summaryrefslogtreecommitdiffstats
path: root/src/initramfs/rootfs/usr
ModeNameSize
d---------bin2383logstatsplain
d---------lib459logstatsplain
d---------sbin418logstatsplain
063&id2=318dacbd049b447a5b45290b39f1c889b9cbde4d'>diff)parentLinux 5.2-rc1 (diff)downloadkernel-qcow2-linux-eceb995e04b74204c73f9dd0ccb19061d5082063.tar.gz
kernel-qcow2-linux-eceb995e04b74204c73f9dd0ccb19061d5082063.tar.xz
kernel-qcow2-linux-eceb995e04b74204c73f9dd0ccb19061d5082063.zip
Merge tag 'v5.2-rc1' into spi-5.3
Linux 5.2-rc1
Diffstat
-rw-r--r--.clang-format8
-rw-r--r--.get_maintainer.ignore1
-rw-r--r--.gitignore24
-rw-r--r--.mailmap16
-rw-r--r--Documentation/ABI/obsolete/sysfs-class-net-batman-adv (renamed from Documentation/ABI/testing/sysfs-class-net-batman-adv)2
-rw-r--r--Documentation/ABI/obsolete/sysfs-class-net-mesh (renamed from Documentation/ABI/testing/sysfs-class-net-mesh)2
-rw-r--r--Documentation/ABI/stable/sysfs-bus-nvmem2
-rw-r--r--Documentation/ABI/stable/sysfs-bus-vmbus12
-rw-r--r--Documentation/ABI/stable/sysfs-devices-node87
-rw-r--r--Documentation/ABI/testing/debugfs-wilco-ec45
-rw-r--r--Documentation/ABI/testing/sysfs-bus-counter230
-rw-r--r--Documentation/ABI/testing/sysfs-bus-counter-104-quad-836
-rw-r--r--Documentation/ABI/testing/sysfs-bus-counter-ftm-quaddec16
-rw-r--r--Documentation/ABI/testing/sysfs-bus-i2c-devices-pca954x20
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio8
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-816
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-impedance-analyzer-ad5933 (renamed from drivers/staging/iio/Documentation/sysfs-bus-iio-impedance-analyzer-ad5933)19
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-sps302
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-temperature-max3185624
-rw-r--r--Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc8
-rw-r--r--Documentation/ABI/testing/sysfs-class-mei15
-rw-r--r--Documentation/ABI/testing/sysfs-class-power51
-rw-r--r--Documentation/ABI/testing/sysfs-devices-platform-ipmi2
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu32
-rw-r--r--Documentation/ABI/testing/sysfs-driver-ucsi-ccg6
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-livepatch2
-rw-r--r--Documentation/ABI/testing/usb-uevent27
-rw-r--r--Documentation/DMA-API-HOWTO.txt15
-rw-r--r--Documentation/Makefile9
-rw-r--r--Documentation/RCU/Design/Data-Structures/Data-Structures.html3
-rw-r--r--Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html4
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html5
-rw-r--r--Documentation/RCU/NMI-RCU.txt13
-rw-r--r--Documentation/RCU/UP.txt6
-rw-r--r--Documentation/RCU/checklist.txt91
-rw-r--r--Documentation/RCU/rcu.txt8
-rw-r--r--Documentation/RCU/rcu_dereference.txt103
-rw-r--r--Documentation/RCU/rcubarrier.txt27
-rw-r--r--Documentation/RCU/whatisRCU.txt10
-rw-r--r--Documentation/accounting/psi.txt107
-rw-r--r--Documentation/acpi/aml-debugger.txt66
-rw-r--r--Documentation/acpi/apei/output_format.txt147
-rw-r--r--Documentation/acpi/dsd/leds.txt99
-rw-r--r--Documentation/acpi/i2c-muxes.txt58
-rw-r--r--Documentation/acpi/initrd_table_override.txt111
-rw-r--r--Documentation/acpi/method-customizing.txt73
-rw-r--r--Documentation/acpi/method-tracing.txt192
-rw-r--r--Documentation/acpi/ssdt-overlays.txt172
-rw-r--r--Documentation/admin-guide/acpi/cppc_sysfs.rst (renamed from Documentation/acpi/cppc_sysfs.txt)71
-rw-r--r--Documentation/admin-guide/acpi/dsdt-override.rst (renamed from Documentation/acpi/dsdt-override.txt)8
-rw-r--r--Documentation/admin-guide/acpi/index.rst14
-rw-r--r--Documentation/admin-guide/acpi/initrd_table_override.rst115
-rw-r--r--Documentation/admin-guide/acpi/ssdt-overlays.rst180
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst27
-rw-r--r--Documentation/admin-guide/ext4.rst38
-rw-r--r--Documentation/admin-guide/hw-vuln/index.rst13
-rw-r--r--Documentation/admin-guide/hw-vuln/l1tf.rst (renamed from Documentation/admin-guide/l1tf.rst)1
-rw-r--r--Documentation/admin-guide/hw-vuln/mds.rst308
-rw-r--r--Documentation/admin-guide/index.rst7
-rw-r--r--Documentation/admin-guide/kernel-parameters.rst1
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt123
-rw-r--r--Documentation/admin-guide/mm/numaperf.rst169
-rw-r--r--Documentation/admin-guide/pm/cpufreq.rst18
-rw-r--r--Documentation/admin-guide/pm/cpuidle.rst8
-rw-r--r--Documentation/admin-guide/pm/index.rst2
-rw-r--r--Documentation/admin-guide/pm/intel_epb.rst41
-rw-r--r--Documentation/admin-guide/pm/intel_pstate.rst32
-rw-r--r--Documentation/admin-guide/pm/sleep-states.rst8
-rw-r--r--Documentation/admin-guide/pm/strategies.rst8
-rw-r--r--Documentation/admin-guide/pm/system-wide.rst2
-rw-r--r--Documentation/admin-guide/pm/working-state.rst3
-rw-r--r--Documentation/arm64/cpu-feature-registers.txt16
-rw-r--r--Documentation/arm64/elf_hwcaps.txt41
-rw-r--r--Documentation/arm64/perf.txt85
-rw-r--r--Documentation/arm64/pointer-authentication.txt22
-rw-r--r--Documentation/arm64/silicon-errata.txt2
-rw-r--r--Documentation/arm64/sve.txt17
-rw-r--r--Documentation/atomic_bitops.txt6
-rw-r--r--Documentation/atomic_t.txt17
-rw-r--r--Documentation/block/bfq-iosched.txt29
-rw-r--r--Documentation/block/null_blk.txt4
-rw-r--r--Documentation/bpf/bpf_design_QA.rst29
-rw-r--r--Documentation/bpf/btf.rst59
-rw-r--r--Documentation/bpf/index.rst10
-rw-r--r--Documentation/bpf/prog_cgroup_sysctl.rst125
-rw-r--r--Documentation/bpf/prog_flow_dissector.rst (renamed from Documentation/networking/bpf_flow_dissector.rst)6
-rw-r--r--Documentation/clearing-warn-once.txt4
-rw-r--r--Documentation/core-api/cachetlb.rst10
-rw-r--r--Documentation/core-api/index.rst1
-rw-r--r--Documentation/core-api/kernel-api.rst4
-rw-r--r--Documentation/core-api/printk-formats.rst8
-rw-r--r--Documentation/cputopology.txt46
-rw-r--r--Documentation/crypto/api-samples.rst1
-rw-r--r--Documentation/dev-tools/gcov.rst18
-rw-r--r--Documentation/dev-tools/kselftest.rst136
-rw-r--r--Documentation/device-mapper/dm-dust.txt272
-rw-r--r--Documentation/device-mapper/dm-integrity.txt32
-rw-r--r--Documentation/devicetree/bindings/arm/altera/socfpga-system.txt12
-rw-r--r--Documentation/devicetree/bindings/arm/amlogic.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/atmel-at91.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/atmel-sysregs.txt5
-rw-r--r--Documentation/devicetree/bindings/arm/coresight.txt60
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.yaml1
-rw-r--r--Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt29
-rw-r--r--Documentation/devicetree/bindings/arm/fsl.yaml36
-rw-r--r--Documentation/devicetree/bindings/arm/intel-ixp4xx.yaml22
-rw-r--r--Documentation/devicetree/bindings/arm/keystone/ti,sci.txt3
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt22
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,ipu.txt43
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,mcucfg.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/omap/omap.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/rockchip.yaml25
-rw-r--r--Documentation/devicetree/bindings/arm/stm32/stm32-syscon.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/sunxi.txt23
-rw-r--r--Documentation/devicetree/bindings/arm/sunxi.yaml807
-rw-r--r--Documentation/devicetree/bindings/arm/sunxi/sunxi-mbus.txt36
-rw-r--r--Documentation/devicetree/bindings/bus/ti-sysc.txt6
-rw-r--r--Documentation/devicetree/bindings/clock/amlogic,axg-audio-clkc.txt3
-rw-r--r--Documentation/devicetree/bindings/clock/at91-clock.txt33
-rw-r--r--Documentation/devicetree/bindings/clock/cirrus,lochnagar.txt93
-rw-r--r--Documentation/devicetree/bindings/clock/milbeaut-clock.yaml73
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,turingcc.txt19
-rw-r--r--Documentation/devicetree/bindings/clock/qoriq-clock.txt5
-rw-r--r--Documentation/devicetree/bindings/clock/sifive/fu540-prci.txt46
-rw-r--r--Documentation/devicetree/bindings/clock/st,stm32-rcc.txt6
-rw-r--r--Documentation/devicetree/bindings/clock/xlnx,zynqmp-clk.txt63
-rw-r--r--Documentation/devicetree/bindings/connector/usb-connector.txt2
-rw-r--r--Documentation/devicetree/bindings/counter/ftm-quaddec.txt18
-rw-r--r--Documentation/devicetree/bindings/counter/stm32-lptimer-cnt.txt (renamed from Documentation/devicetree/bindings/iio/counter/stm32-lptimer-cnt.txt)8
-rw-r--r--Documentation/devicetree/bindings/counter/stm32-timer-cnt.txt31
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt4
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt9
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,simple-framebuffer.txt33
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ti,tfp410.txt32
-rw-r--r--Documentation/devicetree/bindings/display/msm/gmu.txt10
-rw-r--r--Documentation/devicetree/bindings/display/msm/gpu.txt11
-rw-r--r--Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.txt20
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,p079zca.txt2
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt2
-rw-r--r--Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt2
-rw-r--r--Documentation/devicetree/bindings/display/panel/lg,acx467akm-7.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/osddisplays,osd070t1718-19ts.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.txt18
-rw-r--r--Documentation/devicetree/bindings/display/panel/ronbo,rb070d30.yaml51
-rw-r--r--Documentation/devicetree/bindings/display/panel/tpo,td028ttec1.txt2
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.txt72
-rw-r--r--Documentation/devicetree/bindings/display/simple-framebuffer-sunxi.txt36
-rw-r--r--Documentation/devicetree/bindings/display/simple-framebuffer.txt91
-rw-r--r--Documentation/devicetree/bindings/display/simple-framebuffer.yaml160
-rw-r--r--Documentation/devicetree/bindings/display/ste,mcde.txt104
-rw-r--r--Documentation/devicetree/bindings/dma/adi,axi-dmac.txt4
-rw-r--r--Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt4
-rw-r--r--Documentation/devicetree/bindings/edac/socfpga-eccmgr.txt135
-rw-r--r--Documentation/devicetree/bindings/eeprom/at24.txt1
-rw-r--r--Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt71
-rw-r--r--Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml44
-rw-r--r--Documentation/devicetree/bindings/firmware/xilinx/xlnx,zynqmp-firmware.txt54
-rw-r--r--Documentation/devicetree/bindings/fpga/xlnx,zynqmp-pcap-fpga.txt25
-rw-r--r--Documentation/devicetree/bindings/gnss/u-blox.txt1
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-pca953x.txt2
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-bifrost.txt92
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt14
-rw-r--r--Documentation/devicetree/bindings/gpu/aspeed-gfx.txt41
-rw-r--r--Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.txt11
-rw-r--r--Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.txt26
-rw-r--r--Documentation/devicetree/bindings/hwmon/g762.txt2
-rw-r--r--Documentation/devicetree/bindings/hwmon/lm75.txt1
-rw-r--r--Documentation/devicetree/bindings/hwmon/pwm-fan.txt23
-rw-r--r--Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt17
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-designware.txt9
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt5
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-riic.txt5
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-stm32.txt37
-rw-r--r--Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.txt17
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7606.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,ad7780.txt48
-rw-r--r--Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/avia-hx711.txt24
-rw-r--r--Documentation/devicetree/bindings/iio/adc/avia-hx711.yaml66
-rw-r--r--Documentation/devicetree/bindings/iio/adc/imx7d-adc.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/lpc32xx-adc.txt5
-rw-r--r--Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/adc/ti-ads8344.txt19
-rw-r--r--Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.txt8
-rw-r--r--Documentation/devicetree/bindings/iio/gyroscope/bmg160.txt20
-rw-r--r--Documentation/devicetree/bindings/iio/gyroscope/nxp,fxas21002c.txt31
-rw-r--r--Documentation/devicetree/bindings/iio/imu/adi,adis16480.txt85
-rw-r--r--Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt3
-rw-r--r--Documentation/devicetree/bindings/iio/light/vcnl4000.txt24
-rw-r--r--Documentation/devicetree/bindings/iio/pressure/bmp085.txt27
-rw-r--r--Documentation/devicetree/bindings/iio/pressure/bmp085.yaml70
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/devantech-srf04.txt28
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/devantech-srf04.yaml66
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.txt29
-rw-r--r--Documentation/devicetree/bindings/iio/st-sensors.txt1
-rw-r--r--Documentation/devicetree/bindings/iio/temperature/max31856.txt24
-rw-r--r--Documentation/devicetree/bindings/iio/temperature/temperature-bindings.txt7
-rw-r--r--Documentation/devicetree/bindings/input/gpio-vibrator.yaml37
-rw-r--r--Documentation/devicetree/bindings/input/lpc32xx-key.txt5
-rw-r--r--Documentation/devicetree/bindings/input/max77650-onkey.txt26
-rw-r--r--Documentation/devicetree/bindings/input/microchip,qt1050.txt78
-rw-r--r--Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt6
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/goodix.txt3
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/iqs5xx.txt80
-rw-r--r--Documentation/devicetree/bindings/interconnect/interconnect.txt4
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml54
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt7
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.txt66
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt82
-rw-r--r--Documentation/devicetree/bindings/leds/backlight/lm3630a-backlight.yaml129
-rw-r--r--Documentation/devicetree/bindings/leds/leds-lm3532.txt101
-rw-r--r--Documentation/devicetree/bindings/leds/leds-max77650.txt57
-rw-r--r--Documentation/devicetree/bindings/mailbox/marvell,armada-3700-rwtm-mailbox.txt16
-rw-r--r--Documentation/devicetree/bindings/media/aspeed-video.txt6
-rw-r--r--Documentation/devicetree/bindings/media/cedrus.txt1
-rw-r--r--Documentation/devicetree/bindings/media/i2c/st,st-mipid02.txt82
-rw-r--r--Documentation/devicetree/bindings/media/meson-ao-cec.txt13
-rw-r--r--Documentation/devicetree/bindings/media/rcar_imr.txt31
-rw-r--r--Documentation/devicetree/bindings/media/rcar_vin.txt1
-rw-r--r--Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt4
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/atmel,ebi.txt1
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/fsl/mmdc.txt35
-rw-r--r--Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/axp20x.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/cirrus,lochnagar.txt17
-rw-r--r--Documentation/devicetree/bindings/mfd/max77620.txt9
-rw-r--r--Documentation/devicetree/bindings/mfd/max77650.txt46
-rw-r--r--Documentation/devicetree/bindings/mfd/stm32-lptimer.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/stm32-timers.txt7
-rw-r--r--Documentation/devicetree/bindings/mfd/stmfx.txt28
-rw-r--r--Documentation/devicetree/bindings/mfd/ti-lmu.txt24
-rw-r--r--Documentation/devicetree/bindings/misc/aspeed-p2a-ctrl.txt47
-rw-r--r--Documentation/devicetree/bindings/misc/intel,ixp4xx-queue-manager.yaml49
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-esdhc.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt1
-rw-r--r--Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/mtk-sd.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt1
-rw-r--r--Documentation/devicetree/bindings/mtd/allwinner,sun4i-a10-nand.yaml97
-rw-r--r--Documentation/devicetree/bindings/mtd/atmel-nand.txt1
-rw-r--r--Documentation/devicetree/bindings/mtd/denali-nand.txt40
-rw-r--r--Documentation/devicetree/bindings/mtd/ingenic,jz4780-nand.txt32
-rw-r--r--Documentation/devicetree/bindings/mtd/mtd-physmap.txt16
-rw-r--r--Documentation/devicetree/bindings/mtd/nand-controller.yaml143
-rw-r--r--Documentation/devicetree/bindings/mtd/nand.txt75
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/arm,arm-firmware-suite.txt17
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/brcm,bcm963xx-cfe-nor-partitions.txt24
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/brcm,bcm963xx-imagetag.txt45
-rw-r--r--Documentation/devicetree/bindings/mtd/sunxi-nand.txt48
-rw-r--r--Documentation/devicetree/bindings/net/altera_tse.txt5
-rw-r--r--Documentation/devicetree/bindings/net/amd-xgbe.txt5
-rw-r--r--Documentation/devicetree/bindings/net/brcm,amac.txt4
-rw-r--r--Documentation/devicetree/bindings/net/cpsw.txt4
-rw-r--r--Documentation/devicetree/bindings/net/davinci_emac.txt5
-rw-r--r--Documentation/devicetree/bindings/net/dsa/dsa.txt160
-rw-r--r--Documentation/devicetree/bindings/net/dsa/sja1105.txt156
-rw-r--r--Documentation/devicetree/bindings/net/ethernet.txt8
-rw-r--r--Documentation/devicetree/bindings/net/hisilicon-femac.txt4
-rw-r--r--Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt4
-rw-r--r--Documentation/devicetree/bindings/net/keystone-netcp.txt10
-rw-r--r--Documentation/devicetree/bindings/net/macb.txt5
-rw-r--r--Documentation/devicetree/bindings/net/marvell-pxa168.txt4
-rw-r--r--Documentation/devicetree/bindings/net/mdio-mux-meson-g12a.txt48
-rw-r--r--Documentation/devicetree/bindings/net/microchip,enc28j60.txt3
-rw-r--r--Documentation/devicetree/bindings/net/microchip,lan78xx.txt5
-rw-r--r--Documentation/devicetree/bindings/net/phy.txt6
-rw-r--r--Documentation/devicetree/bindings/net/qca,qca7000.txt4
-rw-r--r--Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt5
-rw-r--r--Documentation/devicetree/bindings/net/samsung-sxgbe.txt4
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt5
-rw-r--r--Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt4
-rw-r--r--Documentation/devicetree/bindings/net/socionext-netsec.txt5
-rw-r--r--Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt5
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qca,ath9k.txt4
-rw-r--r--Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt3
-rw-r--r--Documentation/devicetree/bindings/nvmem/imx-ocotp.txt4
-rw-r--r--Documentation/devicetree/bindings/nvmem/st,stm32-romem.txt31
-rw-r--r--Documentation/devicetree/bindings/pci/designware-pcie.txt7
-rw-r--r--Documentation/devicetree/bindings/pci/pci-keystone.txt58
-rw-r--r--Documentation/devicetree/bindings/pci/pci.txt50
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,stingray-usb-phy.txt32
-rw-r--r--Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.txt3
-rw-r--r--Documentation/devicetree/bindings/phy/meson-g12a-usb2-phy.txt22
-rw-r--r--Documentation/devicetree/bindings/phy/meson-g12a-usb3-pcie-phy.txt22
-rw-r--r--Documentation/devicetree/bindings/phy/nvidia,tegra124-xusb-padctl.txt9
-rw-r--r--Documentation/devicetree/bindings/phy/phy-hi3660-usb3.txt26
-rw-r--r--Documentation/devicetree/bindings/phy/phy-mtk-ufs.txt38
-rw-r--r--Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt14
-rw-r--r--Documentation/devicetree/bindings/phy/rcar-gen2-phy.txt57
-rw-r--r--Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt14
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt8
-rw-r--r--Documentation/devicetree/bindings/phy/ti,phy-am654-serdes.txt82
-rw-r--r--Documentation/devicetree/bindings/pinctrl/bitmain,bm1880-pinctrl.txt98
-rw-r--r--Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.txt141
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt6
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-mt8183.txt132
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-stmfx.txt116
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8660-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/power/amlogic,meson-gx-pwrc.txt4
-rw-r--r--Documentation/devicetree/bindings/power/reset/syscon-reboot.txt11
-rw-r--r--Documentation/devicetree/bindings/power/supply/axp20x_usb_power.txt1
-rw-r--r--Documentation/devicetree/bindings/power/supply/gpio-charger.txt8
-rw-r--r--Documentation/devicetree/bindings/power/supply/ingenic,battery.txt31
-rw-r--r--Documentation/devicetree/bindings/power/supply/lt3651-charger.txt (renamed from Documentation/devicetree/bindings/power/supply/ltc3651-charger.txt)10
-rw-r--r--Documentation/devicetree/bindings/power/supply/max77650-charger.txt28
-rw-r--r--Documentation/devicetree/bindings/power/supply/microchip,ucs1002.txt27
-rw-r--r--Documentation/devicetree/bindings/power/supply/olpc_battery.txt2
-rw-r--r--Documentation/devicetree/bindings/pps/pps-gpio.txt7
-rw-r--r--Documentation/devicetree/bindings/pwm/imx-tpm-pwm.txt22
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-meson.txt3
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt1
-rw-r--r--Documentation/devicetree/bindings/regulator/gpio-regulator.txt30
-rw-r--r--Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.txt43
-rw-r--r--Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.txt7
-rw-r--r--Documentation/devicetree/bindings/riscv/sifive-l2-cache.txt51
-rw-r--r--Documentation/devicetree/bindings/rtc/nxp,pcf85063.txt6
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-aspeed.txt22
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc.txt1
-rw-r--r--Documentation/devicetree/bindings/serial/cdns,uart.txt5
-rw-r--r--Documentation/devicetree/bindings/serial/mtk-uart.txt4
-rw-r--r--Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/sifive-serial.txt33
-rw-r--r--Documentation/devicetree/bindings/serial/sprd-uart.txt17
-rw-r--r--Documentation/devicetree/bindings/soc/mediatek/pwrap.txt1
-rw-r--r--Documentation/devicetree/bindings/soc/mediatek/scpsys.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/adi,axi-i2s.txt7
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,axg-fifo.txt4
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,axg-pdm.txt3
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,axg-spdifin.txt3
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,axg-spdifout.txt3
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,axg-tdm-formatters.txt4
-rw-r--r--Documentation/devicetree/bindings/sound/cirrus,lochnagar.txt39
-rw-r--r--Documentation/devicetree/bindings/sound/cs42l51.txt16
-rw-r--r--Documentation/devicetree/bindings/sound/da7219.txt8
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,audmix.txt50
-rw-r--r--Documentation/devicetree/bindings/sound/mchp-i2s-mcc.txt43
-rw-r--r--Documentation/devicetree/bindings/sound/mt8183-da7219-max98357.txt15
-rw-r--r--Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt15
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.txt8
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip,pdm.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/rt5651.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/simple-amplifier.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/simple-card.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/sprd-mcdt.txt19
-rw-r--r--Documentation/devicetree/bindings/thermal/amazon,al-thermal.txt33
-rw-r--r--Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.txt62
-rw-r--r--Documentation/devicetree/bindings/thermal/qcom-tsens.txt14
-rw-r--r--Documentation/devicetree/bindings/thermal/rockchip-thermal.txt1
-rw-r--r--Documentation/devicetree/bindings/thermal/thermal-generic-adc.txt10
-rw-r--r--Documentation/devicetree/bindings/timer/allwinner,sun4i-timer.txt4
-rw-r--r--Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml1
-rw-r--r--Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml42
-rw-r--r--Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt1
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.yaml4
-rw-r--r--Documentation/devicetree/bindings/ufs/cdns,ufshc.txt5
-rw-r--r--Documentation/devicetree/bindings/ufs/ufs-mediatek.txt43
-rw-r--r--Documentation/devicetree/bindings/ufs/ufs-qcom.txt5
-rw-r--r--Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt6
-rw-r--r--Documentation/devicetree/bindings/usb/amlogic,dwc3.txt88
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.txt7
-rw-r--r--Documentation/devicetree/bindings/usb/generic-ehci.yaml95
-rw-r--r--Documentation/devicetree/bindings/usb/generic-ohci.yaml89
-rw-r--r--Documentation/devicetree/bindings/usb/ingenic,jz4740-musb.txt8
-rw-r--r--Documentation/devicetree/bindings/usb/nvidia,tegra124-xusb.txt4
-rw-r--r--Documentation/devicetree/bindings/usb/renesas_usbhs.txt1
-rw-r--r--Documentation/devicetree/bindings/usb/usb-ehci.txt46
-rw-r--r--Documentation/devicetree/bindings/usb/usb-hcd.txt9
-rw-r--r--Documentation/devicetree/bindings/usb/usb-hcd.yaml25
-rw-r--r--Documentation/devicetree/bindings/usb/usb-ohci.txt35
-rw-r--r--Documentation/devicetree/bindings/usb/usb-xhci.txt1
-rw-r--r--Documentation/devicetree/bindings/usb/usb251xb.txt6
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt462
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml977
-rw-r--r--Documentation/devicetree/bindings/watchdog/fsl-imx-sc-wdt.txt24
-rw-r--r--Documentation/devicetree/bindings/watchdog/mtk-wdt.txt1
-rw-r--r--Documentation/devicetree/bindings/writing-bindings.txt60
-rw-r--r--Documentation/devicetree/writing-schema.md2
-rw-r--r--Documentation/doc-guide/index.rst6
-rw-r--r--Documentation/dontdiff11
-rw-r--r--Documentation/driver-api/acpi/index.rst9
-rw-r--r--Documentation/driver-api/acpi/linuxized-acpica.rst (renamed from Documentation/acpi/linuxized-acpica.txt)109
-rw-r--r--Documentation/driver-api/acpi/scan_handlers.rst (renamed from Documentation/acpi/scan_handlers.txt)24
-rw-r--r--Documentation/driver-api/component.rst2
-rw-r--r--Documentation/driver-api/device-io.rst45
-rw-r--r--Documentation/driver-api/generic-counter.rst342
-rw-r--r--Documentation/driver-api/gpio/driver.rst361
-rw-r--r--Documentation/driver-api/index.rst2
-rw-r--r--Documentation/driver-api/pci/p2pdma.rst4
-rw-r--r--Documentation/driver-api/pm/cpuidle.rst7
-rw-r--r--Documentation/driver-api/pm/devices.rst12
-rw-r--r--Documentation/driver-api/pm/index.rst2
-rw-r--r--Documentation/driver-api/pm/notifiers.rst8
-rw-r--r--Documentation/driver-api/pm/types.rst2
-rw-r--r--Documentation/driver-api/soundwire/stream.rst16
-rw-r--r--Documentation/driver-api/usb/power-management.rst14
-rw-r--r--Documentation/driver-model/devres.txt6
-rw-r--r--Documentation/features/debug/kgdb/arch-support.txt2
-rw-r--r--Documentation/features/debug/kprobes/arch-support.txt2
-rw-r--r--Documentation/features/debug/kretprobes/arch-support.txt2
-rw-r--r--Documentation/features/time/modern-timekeeping/arch-support.txt2
-rw-r--r--Documentation/filesystems/Locking4
-rw-r--r--Documentation/filesystems/autofs-mount-control.txt6
-rw-r--r--Documentation/filesystems/autofs.txt66
-rw-r--r--Documentation/filesystems/debugfs.txt16
-rw-r--r--Documentation/filesystems/porting35
-rw-r--r--Documentation/filesystems/vfs.txt8
-rw-r--r--Documentation/firmware-guide/acpi/DSD-properties-rules.rst (renamed from Documentation/acpi/DSD-properties-rules.txt)21
-rw-r--r--Documentation/firmware-guide/acpi/acpi-lid.rst (renamed from Documentation/acpi/acpi-lid.txt)40
-rw-r--r--Documentation/firmware-guide/acpi/aml-debugger.rst75
-rw-r--r--Documentation/firmware-guide/acpi/apei/einj.rst (renamed from Documentation/acpi/apei/einj.txt)94
-rw-r--r--Documentation/firmware-guide/acpi/apei/output_format.rst150
-rw-r--r--Documentation/firmware-guide/acpi/debug.rst (renamed from Documentation/acpi/debug.txt)31
-rw-r--r--Documentation/firmware-guide/acpi/dsd/data-node-references.rst (renamed from Documentation/acpi/dsd/data-node-references.txt)42
-rw-r--r--Documentation/firmware-guide/acpi/dsd/graph.rst (renamed from Documentation/acpi/dsd/graph.txt)53
-rw-r--r--Documentation/firmware-guide/acpi/enumeration.rst (renamed from Documentation/acpi/enumeration.txt)161
-rw-r--r--Documentation/firmware-guide/acpi/gpio-properties.rst (renamed from Documentation/acpi/gpio-properties.txt)78
-rw-r--r--Documentation/firmware-guide/acpi/i2c-muxes.rst61
-rw-r--r--Documentation/firmware-guide/acpi/index.rst26
-rw-r--r--Documentation/firmware-guide/acpi/lpit.rst (renamed from Documentation/acpi/lpit.txt)18
-rw-r--r--Documentation/firmware-guide/acpi/method-customizing.rst89
-rw-r--r--Documentation/firmware-guide/acpi/method-tracing.rst238
-rw-r--r--Documentation/firmware-guide/acpi/namespace.rst (renamed from Documentation/acpi/namespace.txt)294
-rw-r--r--Documentation/firmware-guide/acpi/osi.rst (renamed from Documentation/acpi/osi.txt)15
-rw-r--r--Documentation/firmware-guide/acpi/video_extension.rst (renamed from Documentation/acpi/video_extension.txt)83
-rw-r--r--Documentation/firmware-guide/index.rst13
-rw-r--r--Documentation/gpio/index.rst17
-rw-r--r--Documentation/gpio/sysfs.rst (renamed from Documentation/gpio/sysfs.txt)39
-rw-r--r--Documentation/gpu/drm-internals.rst5
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst18
-rw-r--r--Documentation/gpu/kms-properties.csv1
-rw-r--r--Documentation/gpu/meson.rst6
-rw-r--r--Documentation/gpu/tinydrm.rst30
-rw-r--r--Documentation/gpu/todo.rst12
-rw-r--r--Documentation/hwmon/ab8500.rst (renamed from Documentation/hwmon/ab8500)10
-rw-r--r--Documentation/hwmon/abituguru92
-rw-r--r--Documentation/hwmon/abituguru-datasheet.rst (renamed from Documentation/hwmon/abituguru-datasheet)160
-rw-r--r--Documentation/hwmon/abituguru.rst113
-rw-r--r--Documentation/hwmon/abituguru3.rst (renamed from Documentation/hwmon/abituguru3)36
-rw-r--r--Documentation/hwmon/abx500.rst (renamed from Documentation/hwmon/abx500)8
-rw-r--r--Documentation/hwmon/acpi_power_meter.rst (renamed from Documentation/hwmon/acpi_power_meter)25
-rw-r--r--Documentation/hwmon/ad7314.rst (renamed from Documentation/hwmon/ad7314)9
-rw-r--r--Documentation/hwmon/adc128d818.rst (renamed from Documentation/hwmon/adc128d818)7
-rw-r--r--Documentation/hwmon/adm1021.rst (renamed from Documentation/hwmon/adm1021)44
-rw-r--r--Documentation/hwmon/adm1025.rst (renamed from Documentation/hwmon/adm1025)13
-rw-r--r--Documentation/hwmon/adm1026.rst (renamed from Documentation/hwmon/adm1026)24
-rw-r--r--Documentation/hwmon/adm1031.rst (renamed from Documentation/hwmon/adm1031)16
-rw-r--r--Documentation/hwmon/adm1275.rst (renamed from Documentation/hwmon/adm1275)30
-rw-r--r--Documentation/hwmon/adm9240.rst (renamed from Documentation/hwmon/adm9240)50
-rw-r--r--Documentation/hwmon/ads1015.rst (renamed from Documentation/hwmon/ads1015)74
-rw-r--r--Documentation/hwmon/ads7828.rst (renamed from Documentation/hwmon/ads7828)29
-rw-r--r--Documentation/hwmon/adt7410.rst (renamed from Documentation/hwmon/adt7410)49
-rw-r--r--Documentation/hwmon/adt7411.rst (renamed from Documentation/hwmon/adt7411)20
-rw-r--r--Documentation/hwmon/adt7462.rst (renamed from Documentation/hwmon/adt7462)11
-rw-r--r--Documentation/hwmon/adt7470.rst (renamed from Documentation/hwmon/adt7470)8
-rw-r--r--Documentation/hwmon/adt7475.rst (renamed from Documentation/hwmon/adt7475)38
-rw-r--r--Documentation/hwmon/amc6821.rst (renamed from Documentation/hwmon/amc6821)18
-rw-r--r--Documentation/hwmon/asb100.rst (renamed from Documentation/hwmon/asb100)55
-rw-r--r--Documentation/hwmon/asc7621.rst (renamed from Documentation/hwmon/asc7621)146
-rw-r--r--Documentation/hwmon/aspeed-pwm-tacho.rst (renamed from Documentation/hwmon/aspeed-pwm-tacho)2
-rw-r--r--Documentation/hwmon/coretemp.rst (renamed from Documentation/hwmon/coretemp)46
-rw-r--r--Documentation/hwmon/da9052.rst (renamed from Documentation/hwmon/da9052)41
-rw-r--r--Documentation/hwmon/da9055.rst (renamed from Documentation/hwmon/da9055)20
-rw-r--r--Documentation/hwmon/dme1737.rst (renamed from Documentation/hwmon/dme1737)88
-rw-r--r--Documentation/hwmon/ds1621.rst (renamed from Documentation/hwmon/ds1621)156
-rw-r--r--Documentation/hwmon/ds620.rst (renamed from Documentation/hwmon/ds620)12
-rw-r--r--Documentation/hwmon/emc1403.rst (renamed from Documentation/hwmon/emc1403)33
-rw-r--r--Documentation/hwmon/emc2103.rst (renamed from Documentation/hwmon/emc2103)6
-rw-r--r--Documentation/hwmon/emc6w201.rst (renamed from Documentation/hwmon/emc6w201)5
-rw-r--r--Documentation/hwmon/f71805f.rst (renamed from Documentation/hwmon/f71805f)36
-rw-r--r--Documentation/hwmon/f71882fg.rst (renamed from Documentation/hwmon/f71882fg)56
-rw-r--r--Documentation/hwmon/fam15h_power.rst (renamed from Documentation/hwmon/fam15h_power)85
-rw-r--r--Documentation/hwmon/ftsteutates.rst (renamed from Documentation/hwmon/ftsteutates)14
-rw-r--r--Documentation/hwmon/g760a.rst (renamed from Documentation/hwmon/g760a)4
-rw-r--r--Documentation/hwmon/g762.rst (renamed from Documentation/hwmon/g762)51
-rw-r--r--Documentation/hwmon/gl518sm.rst (renamed from Documentation/hwmon/gl518sm)21
-rw-r--r--Documentation/hwmon/hih6130.rst (renamed from Documentation/hwmon/hih6130)14
-rw-r--r--Documentation/hwmon/hwmon-kernel-api.rst (renamed from Documentation/hwmon/hwmon-kernel-api.txt)337
-rw-r--r--Documentation/hwmon/ibm-cffps.rst (renamed from Documentation/hwmon/ibm-cffps)3
-rw-r--r--Documentation/hwmon/ibmaem.rst (renamed from Documentation/hwmon/ibmaem)10
-rw-r--r--Documentation/hwmon/ibmpowernv.rst (renamed from Documentation/hwmon/ibmpowernv)31
-rw-r--r--Documentation/hwmon/ina209.rst (renamed from Documentation/hwmon/ina209)18
-rw-r--r--Documentation/hwmon/ina2xx.rst (renamed from Documentation/hwmon/ina2xx)41
-rw-r--r--Documentation/hwmon/ina3221.rst (renamed from Documentation/hwmon/ina3221)35
-rw-r--r--Documentation/hwmon/index.rst182
-rw-r--r--Documentation/hwmon/ir35221.rst (renamed from Documentation/hwmon/ir35221)13
-rw-r--r--Documentation/hwmon/ir38064.rst66
-rw-r--r--Documentation/hwmon/isl68137.rst80
-rw-r--r--Documentation/hwmon/it87.rst (renamed from Documentation/hwmon/it87)102
-rw-r--r--Documentation/hwmon/jc42.rst (renamed from Documentation/hwmon/jc42)55
-rw-r--r--Documentation/hwmon/k10temp.rst (renamed from Documentation/hwmon/k10temp)37
-rw-r--r--Documentation/hwmon/k8temp.rst (renamed from Documentation/hwmon/k8temp)17
-rw-r--r--Documentation/hwmon/lineage-pem.rst (renamed from Documentation/hwmon/lineage-pem)16
-rw-r--r--Documentation/hwmon/lm25066.rst (renamed from Documentation/hwmon/lm25066)32
-rw-r--r--Documentation/hwmon/lm63.rst (renamed from Documentation/hwmon/lm63)24
-rw-r--r--Documentation/hwmon/lm70.rst (renamed from Documentation/hwmon/lm70)13
-rw-r--r--Documentation/hwmon/lm73.rst (renamed from Documentation/hwmon/lm73)16
-rw-r--r--Documentation/hwmon/lm75.rst (renamed from Documentation/hwmon/lm75)102
-rw-r--r--Documentation/hwmon/lm77.rst (renamed from Documentation/hwmon/lm77)9
-rw-r--r--Documentation/hwmon/lm78.rst (renamed from Documentation/hwmon/lm78)20
-rw-r--r--Documentation/hwmon/lm80.rst (renamed from Documentation/hwmon/lm80)19
-rw-r--r--Documentation/hwmon/lm83.rst (renamed from Documentation/hwmon/lm83)16
-rw-r--r--Documentation/hwmon/lm85.rst (renamed from Documentation/hwmon/lm85)97
-rw-r--r--Documentation/hwmon/lm87.rst (renamed from Documentation/hwmon/lm87)23
-rw-r--r--Documentation/hwmon/lm90.rst (renamed from Documentation/hwmon/lm90)174
-rw-r--r--Documentation/hwmon/lm92.rst (renamed from Documentation/hwmon/lm92)17
-rw-r--r--Documentation/hwmon/lm93.rst (renamed from Documentation/hwmon/lm93)157
-rw-r--r--Documentation/hwmon/lm95234.rst (renamed from Documentation/hwmon/lm95234)11
-rw-r--r--Documentation/hwmon/lm95245.rst (renamed from Documentation/hwmon/lm95245)13
-rw-r--r--Documentation/hwmon/lochnagar.rst83
-rw-r--r--Documentation/hwmon/ltc2945.rst (renamed from Documentation/hwmon/ltc2945)16
-rw-r--r--Documentation/hwmon/ltc2978.rst (renamed from Documentation/hwmon/ltc2978)267
-rw-r--r--Documentation/hwmon/ltc2990.rst (renamed from Documentation/hwmon/ltc2990)23
-rw-r--r--Documentation/hwmon/ltc3815.rst (renamed from Documentation/hwmon/ltc3815)12
-rw-r--r--Documentation/hwmon/ltc4151.rst (renamed from Documentation/hwmon/ltc4151)16
-rw-r--r--Documentation/hwmon/ltc4215.rst (renamed from Documentation/hwmon/ltc4215)16
-rw-r--r--Documentation/hwmon/ltc4245.rst (renamed from Documentation/hwmon/ltc4245)17
-rw-r--r--Documentation/hwmon/ltc4260.rst (renamed from Documentation/hwmon/ltc4260)16
-rw-r--r--Documentation/hwmon/ltc4261.rst (renamed from Documentation/hwmon/ltc4261)16
-rw-r--r--Documentation/hwmon/max16064.rst (renamed from Documentation/hwmon/max16064)17
-rw-r--r--Documentation/hwmon/max16065.rst (renamed from Documentation/hwmon/max16065)24
-rw-r--r--Documentation/hwmon/max1619.rst (renamed from Documentation/hwmon/max1619)12
-rw-r--r--Documentation/hwmon/max1668.rst (renamed from Documentation/hwmon/max1668)14
-rw-r--r--Documentation/hwmon/max197.rst (renamed from Documentation/hwmon/max197)36
-rw-r--r--Documentation/hwmon/max20751.rst (renamed from Documentation/hwmon/max20751)9
-rw-r--r--Documentation/hwmon/max31722.rst (renamed from Documentation/hwmon/max31722)12
-rw-r--r--Documentation/hwmon/max31785.rst (renamed from Documentation/hwmon/max31785)6
-rw-r--r--Documentation/hwmon/max31790.rst (renamed from Documentation/hwmon/max31790)6
-rw-r--r--Documentation/hwmon/max34440.rst (renamed from Documentation/hwmon/max34440)90
-rw-r--r--Documentation/hwmon/max6639.rst (renamed from Documentation/hwmon/max6639)16
-rw-r--r--Documentation/hwmon/max6642.rst (renamed from Documentation/hwmon/max6642)10
-rw-r--r--Documentation/hwmon/max6650.rst (renamed from Documentation/hwmon/max6650)17
-rw-r--r--Documentation/hwmon/max6697.rst (renamed from Documentation/hwmon/max6697)33
-rw-r--r--Documentation/hwmon/max8688.rst (renamed from Documentation/hwmon/max8688)20
-rw-r--r--Documentation/hwmon/mc13783-adc.rst (renamed from Documentation/hwmon/mc13783-adc)27
-rw-r--r--Documentation/hwmon/mcp3021.rst (renamed from Documentation/hwmon/mcp3021)15
-rw-r--r--Documentation/hwmon/menf21bmc.rst (renamed from Documentation/hwmon/menf21bmc)5
-rw-r--r--Documentation/hwmon/mlxreg-fan.rst (renamed from Documentation/hwmon/mlxreg-fan)60
-rw-r--r--Documentation/hwmon/nct6683.rst (renamed from Documentation/hwmon/nct6683)11
-rw-r--r--Documentation/hwmon/nct6775.rst (renamed from Documentation/hwmon/nct6775)114
-rw-r--r--Documentation/hwmon/nct7802.rst (renamed from Documentation/hwmon/nct7802)11
-rw-r--r--Documentation/hwmon/nct7904.rst (renamed from Documentation/hwmon/nct7904)9
-rw-r--r--Documentation/hwmon/npcm750-pwm-fan.rst (renamed from Documentation/hwmon/npcm750-pwm-fan)4
-rw-r--r--Documentation/hwmon/nsa320.rst (renamed from Documentation/hwmon/nsa320)15
-rw-r--r--Documentation/hwmon/ntc_thermistor.rst (renamed from Documentation/hwmon/ntc_thermistor)123
-rw-r--r--Documentation/hwmon/occ.rst (renamed from Documentation/hwmon/occ)93
-rw-r--r--Documentation/hwmon/pc87360.rst (renamed from Documentation/hwmon/pc87360)38
-rw-r--r--Documentation/hwmon/pc87427.rst (renamed from Documentation/hwmon/pc87427)4
-rw-r--r--Documentation/hwmon/pcf8591.rst (renamed from Documentation/hwmon/pcf8591)52
-rw-r--r--Documentation/hwmon/pmbus-core.rst (renamed from Documentation/hwmon/pmbus-core)173
-rw-r--r--Documentation/hwmon/pmbus.rst (renamed from Documentation/hwmon/pmbus)90
-rw-r--r--Documentation/hwmon/powr1220.rst (renamed from Documentation/hwmon/powr1220)12
-rw-r--r--Documentation/hwmon/pwm-fan.rst (renamed from Documentation/hwmon/pwm-fan)3
-rw-r--r--Documentation/hwmon/raspberrypi-hwmon.rst (renamed from Documentation/hwmon/raspberrypi-hwmon)3
-rw-r--r--Documentation/hwmon/sch5627.rst (renamed from Documentation/hwmon/sch5627)4
-rw-r--r--Documentation/hwmon/sch5636.rst (renamed from Documentation/hwmon/sch5636)3
-rw-r--r--Documentation/hwmon/scpi-hwmon.rst (renamed from Documentation/hwmon/scpi-hwmon)7
-rw-r--r--Documentation/hwmon/sht15.rst (renamed from Documentation/hwmon/sht15)28
-rw-r--r--Documentation/hwmon/sht21.rst (renamed from Documentation/hwmon/sht21)24
-rw-r--r--Documentation/hwmon/sht3x.rst (renamed from Documentation/hwmon/sht3x)42
-rw-r--r--Documentation/hwmon/shtc1.rst (renamed from Documentation/hwmon/shtc1)19
-rw-r--r--Documentation/hwmon/sis5595.rst (renamed from Documentation/hwmon/sis5595)41
-rw-r--r--Documentation/hwmon/smm665.rst (renamed from Documentation/hwmon/smm665)42
-rw-r--r--Documentation/hwmon/smsc47b397.rst (renamed from Documentation/hwmon/smsc47b397)162
-rw-r--r--Documentation/hwmon/smsc47m1.rst (renamed from Documentation/hwmon/smsc47m1)43
-rw-r--r--Documentation/hwmon/smsc47m192103
-rw-r--r--Documentation/hwmon/smsc47m192.rst116
-rw-r--r--Documentation/hwmon/submitting-patches.rst (renamed from Documentation/hwmon/submitting-patches)21
-rw-r--r--Documentation/hwmon/sysfs-interface.rst (renamed from Documentation/hwmon/sysfs-interface)721
-rw-r--r--Documentation/hwmon/tc654.rst (renamed from Documentation/hwmon/tc654)9
-rw-r--r--Documentation/hwmon/tc74.rst (renamed from Documentation/hwmon/tc74)3
-rw-r--r--Documentation/hwmon/thmc50.rst (renamed from Documentation/hwmon/thmc50)37
-rw-r--r--Documentation/hwmon/tmp102.rst (renamed from Documentation/hwmon/tmp102)7
-rw-r--r--Documentation/hwmon/tmp103.rst (renamed from Documentation/hwmon/tmp103)7
-rw-r--r--Documentation/hwmon/tmp108.rst (renamed from Documentation/hwmon/tmp108)7
-rw-r--r--Documentation/hwmon/tmp401.rst (renamed from Documentation/hwmon/tmp401)32
-rw-r--r--Documentation/hwmon/tmp421.rst (renamed from Documentation/hwmon/tmp421)26
-rw-r--r--Documentation/hwmon/tps40422.rst (renamed from Documentation/hwmon/tps40422)25
-rw-r--r--Documentation/hwmon/twl4030-madc-hwmon.rst (renamed from Documentation/hwmon/twl4030-madc-hwmon)8
-rw-r--r--Documentation/hwmon/ucd9000.rst (renamed from Documentation/hwmon/ucd9000)35
-rw-r--r--Documentation/hwmon/ucd9200.rst (renamed from Documentation/hwmon/ucd9200)46
-rw-r--r--Documentation/hwmon/userspace-tools.rst (renamed from Documentation/hwmon/userspace-tools)3
-rw-r--r--Documentation/hwmon/vexpress.rst (renamed from Documentation/hwmon/vexpress)13
-rw-r--r--Documentation/hwmon/via686a.rst (renamed from Documentation/hwmon/via686a)30
-rw-r--r--Documentation/hwmon/vt1211.rst (renamed from Documentation/hwmon/vt1211)84
-rw-r--r--Documentation/hwmon/w83627ehf.rst (renamed from Documentation/hwmon/w83627ehf)162
-rw-r--r--Documentation/hwmon/w83627hf.rst (renamed from Documentation/hwmon/w83627hf)65
-rw-r--r--Documentation/hwmon/w83773g.rst (renamed from Documentation/hwmon/w83773g)12
-rw-r--r--Documentation/hwmon/w83781d.rst (renamed from Documentation/hwmon/w83781d)330
-rw-r--r--Documentation/hwmon/w83791d.rst (renamed from Documentation/hwmon/w83791d)123
-rw-r--r--Documentation/hwmon/w83792d.rst (renamed from Documentation/hwmon/w83792d)112
-rw-r--r--Documentation/hwmon/w83793106
-rw-r--r--Documentation/hwmon/w83793.rst113
-rw-r--r--Documentation/hwmon/w83795127
-rw-r--r--Documentation/hwmon/w83795.rst142
-rw-r--r--Documentation/hwmon/w83l785ts.rst (renamed from Documentation/hwmon/w83l785ts)9
-rw-r--r--Documentation/hwmon/w83l786ng.rst (renamed from Documentation/hwmon/w83l786ng)42
-rw-r--r--Documentation/hwmon/wm831x.rst (renamed from Documentation/hwmon/wm831x)9
-rw-r--r--Documentation/hwmon/wm8350.rst (renamed from Documentation/hwmon/wm8350)10
-rw-r--r--Documentation/hwmon/xgene-hwmon.rst (renamed from Documentation/hwmon/xgene-hwmon)24
-rw-r--r--Documentation/hwmon/zl6100.rst (renamed from Documentation/hwmon/zl6100)71
-rw-r--r--Documentation/i2c/busses/i2c-amd-mp223
-rw-r--r--Documentation/i2c/busses/i2c-piix42
-rw-r--r--Documentation/index.rst13
-rw-r--r--Documentation/kbuild/kbuild.txt5
-rw-r--r--Documentation/kprobes.txt7
-rw-r--r--Documentation/livepatch/callbacks.rst (renamed from Documentation/livepatch/callbacks.txt)45
-rw-r--r--Documentation/livepatch/cumulative-patches.rst (renamed from Documentation/livepatch/cumulative-patches.txt)14
-rw-r--r--Documentation/livepatch/index.rst21
-rw-r--r--Documentation/livepatch/livepatch.rst (renamed from Documentation/livepatch/livepatch.txt)62
-rw-r--r--Documentation/livepatch/module-elf-format.rst (renamed from Documentation/livepatch/module-elf-format.txt)353
-rw-r--r--Documentation/livepatch/shadow-vars.rst (renamed from Documentation/livepatch/shadow-vars.txt)65
-rw-r--r--Documentation/media/index.rst2
-rw-r--r--Documentation/media/kapi/mc-core.rst41
-rw-r--r--Documentation/media/lirc.h.rst.exceptions1
-rw-r--r--Documentation/media/uapi/mediactl/request-api.rst2
-rw-r--r--Documentation/media/uapi/v4l/buffer.rst21
-rw-r--r--Documentation/media/uapi/v4l/colorspaces-defs.rst4
-rw-r--r--Documentation/media/uapi/v4l/colorspaces.rst4
-rw-r--r--Documentation/media/uapi/v4l/dev-raw-vbi.rst4
-rw-r--r--Documentation/media/uapi/v4l/dev-rds.rst2
-rw-r--r--Documentation/media/uapi/v4l/dev-sliced-vbi.rst22
-rw-r--r--Documentation/media/uapi/v4l/dev-subdev.rst82
-rw-r--r--Documentation/media/uapi/v4l/ext-ctrls-camera.rst17
-rw-r--r--Documentation/media/uapi/v4l/ext-ctrls-codec.rst235
-rw-r--r--Documentation/media/uapi/v4l/ext-ctrls-detect.rst2
-rw-r--r--Documentation/media/uapi/v4l/ext-ctrls-dv.rst2
-rw-r--r--Documentation/media/uapi/v4l/ext-ctrls-flash.rst4
-rw-r--r--Documentation/media/uapi/v4l/ext-ctrls-jpeg.rst2
-rw-r--r--Documentation/media/uapi/v4l/field-order.rst20
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-compressed.rst6
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-meta-d4xx.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-meta-vsp1-hgt.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-packed-hsv.rst2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-packed-rgb.rst440
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst23
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-srggb10p.rst2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-srggb12p.rst3
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-srggb14p.rst24
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-v4l2-mplane.rst17
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-y10p.rst10
-rw-r--r--Documentation/media/uapi/v4l/subdev-formats.rst113
-rw-r--r--Documentation/media/uapi/v4l/vidioc-qbuf.rst10
-rw-r--r--Documentation/media/v4l-drivers/index.rst1
-rw-r--r--Documentation/media/v4l-drivers/zoran.rst583
-rw-r--r--Documentation/memory-barriers.txt249
-rw-r--r--Documentation/networking/batman-adv.rst110
-rw-r--r--Documentation/networking/devlink-info-versions.rst5
-rw-r--r--Documentation/networking/dsa/bcm_sf2.rst (renamed from Documentation/networking/dsa/bcm_sf2.txt)27
-rw-r--r--Documentation/networking/dsa/dsa.rst (renamed from Documentation/networking/dsa/dsa.txt)279
-rw-r--r--Documentation/networking/dsa/index.rst11
-rw-r--r--Documentation/networking/dsa/lan9303.rst (renamed from Documentation/networking/dsa/lan9303.txt)6
-rw-r--r--Documentation/networking/dsa/sja1105.rst220
-rw-r--r--Documentation/networking/index.rst2
-rw-r--r--Documentation/networking/ip-sysctl.txt34
-rw-r--r--Documentation/networking/netdev-FAQ.rst2
-rw-r--r--Documentation/networking/rxrpc.txt21
-rw-r--r--Documentation/ntb.txt14
-rw-r--r--Documentation/packing.txt149
-rw-r--r--Documentation/powerpc/DAWR-POWER9.txt40
-rw-r--r--Documentation/preempt-locking.txt1
-rw-r--r--Documentation/process/5.Posting.rst10
-rw-r--r--Documentation/process/coding-style.rst6
-rw-r--r--Documentation/process/deprecated.rst2
-rw-r--r--Documentation/process/howto.rst2
-rw-r--r--Documentation/process/kernel-docs.rst12
-rw-r--r--Documentation/process/license-rules.rst61
-rw-r--r--Documentation/process/maintainer-pgp-guide.rst2
-rw-r--r--Documentation/process/submit-checklist.rst27
-rw-r--r--Documentation/process/submitting-patches.rst46
-rw-r--r--Documentation/robust-futexes.txt3
-rw-r--r--Documentation/rtc.txt2
-rw-r--r--Documentation/serial/cyclades_z.rst (renamed from Documentation/serial/README.cycladesZ)5
-rw-r--r--Documentation/serial/driver.rst (renamed from Documentation/serial/driver)115
-rw-r--r--Documentation/serial/index.rst32
-rw-r--r--Documentation/serial/moxa-smartio523
-rw-r--r--Documentation/serial/moxa-smartio.rst615
-rw-r--r--Documentation/serial/n_gsm.rst103
-rw-r--r--Documentation/serial/n_gsm.txt96
-rw-r--r--Documentation/serial/rocket.rst (renamed from Documentation/serial/rocket.txt)152
-rw-r--r--Documentation/serial/serial-iso7816.rst (renamed from Documentation/serial/serial-iso7816.txt)21
-rw-r--r--Documentation/serial/serial-rs485.rst (renamed from Documentation/serial/serial-rs485.txt)22
-rw-r--r--Documentation/serial/tty.rst (renamed from Documentation/serial/tty.txt)111
-rw-r--r--Documentation/sound/kernel-api/writing-an-alsa-driver.rst4
-rw-r--r--Documentation/sparc/adi.rst (renamed from Documentation/sparc/adi.txt)188
-rw-r--r--Documentation/sparc/console.rst (renamed from Documentation/sparc/console.txt)4
-rw-r--r--Documentation/sparc/index.rst13
-rw-r--r--Documentation/sparc/oradax/oracle-dax.rst (renamed from Documentation/sparc/oradax/oracle-dax.txt)58
-rw-r--r--Documentation/speculation.txt8
-rw-r--r--Documentation/sysctl/kernel.txt2
-rw-r--r--Documentation/sysctl/vm.txt12
-rw-r--r--Documentation/thermal/sysfs-api.txt2
-rw-r--r--Documentation/trace/ftrace.rst32
-rw-r--r--Documentation/trace/histogram.rst121
-rw-r--r--Documentation/trace/intel_th.rst2
-rw-r--r--Documentation/trace/postprocess/trace-vmscan-postprocess.pl7
-rw-r--r--Documentation/translations/index.rst40
-rw-r--r--Documentation/translations/it_IT/core-api/memory-allocation.rst13
-rw-r--r--Documentation/translations/it_IT/disclaimer-ita.rst13
-rw-r--r--Documentation/translations/it_IT/doc-guide/index.rst6
-rw-r--r--Documentation/translations/it_IT/index.rst65
-rw-r--r--Documentation/translations/it_IT/networking/netdev-FAQ.rst13
-rw-r--r--Documentation/translations/it_IT/process/5.Posting.rst10
-rw-r--r--Documentation/translations/it_IT/process/coding-style.rst8
-rw-r--r--Documentation/translations/it_IT/process/deprecated.rst129
-rw-r--r--Documentation/translations/it_IT/process/kernel-enforcement-statement.rst168
-rw-r--r--Documentation/translations/it_IT/process/license-rules.rst500
-rw-r--r--Documentation/translations/it_IT/process/maintainer-pgp-guide.rst939
-rw-r--r--Documentation/translations/it_IT/process/stable-kernel-rules.rst194
-rw-r--r--Documentation/translations/it_IT/process/submitting-patches.rst47
-rw-r--r--Documentation/translations/ja_JP/SubmitChecklist22
-rw-r--r--Documentation/translations/ja_JP/SubmittingPatches6
-rw-r--r--Documentation/translations/ko_KR/memory-barriers.txt49
-rw-r--r--Documentation/translations/zh_CN/SubmittingPatches412
-rw-r--r--Documentation/translations/zh_CN/disclaimer-zh_CN.rst9
-rw-r--r--Documentation/translations/zh_CN/index.rst17
-rw-r--r--Documentation/translations/zh_CN/magic-number.txt153
-rw-r--r--Documentation/translations/zh_CN/oops-tracing.txt2
-rw-r--r--Documentation/translations/zh_CN/process/1.Intro.rst186
-rw-r--r--Documentation/translations/zh_CN/process/2.Process.rst360
-rw-r--r--Documentation/translations/zh_CN/process/3.Early-stage.rst161
-rw-r--r--Documentation/translations/zh_CN/process/4.Coding.rst290
-rw-r--r--Documentation/translations/zh_CN/process/5.Posting.rst240
-rw-r--r--Documentation/translations/zh_CN/process/6.Followthrough.rst145
-rw-r--r--Documentation/translations/zh_CN/process/7.AdvancedTopics.rst124
-rw-r--r--Documentation/translations/zh_CN/process/8.Conclusion.rst64
-rw-r--r--Documentation/translations/zh_CN/process/code-of-conduct-interpretation.rst108
-rw-r--r--Documentation/translations/zh_CN/process/code-of-conduct.rst72
-rw-r--r--Documentation/translations/zh_CN/process/coding-style.rst (renamed from Documentation/translations/zh_CN/coding-style.rst)21
-rw-r--r--Documentation/translations/zh_CN/process/development-process.rst26
-rw-r--r--Documentation/translations/zh_CN/process/email-clients.rst (renamed from Documentation/translations/zh_CN/email-clients.txt)104
-rw-r--r--Documentation/translations/zh_CN/process/howto.rst (renamed from Documentation/translations/zh_CN/HOWTO)265
-rw-r--r--Documentation/translations/zh_CN/process/index.rst60
-rw-r--r--Documentation/translations/zh_CN/process/license-rules.rst370
-rw-r--r--Documentation/translations/zh_CN/process/magic-number.rst151
-rw-r--r--Documentation/translations/zh_CN/process/management-style.rst207
-rw-r--r--Documentation/translations/zh_CN/process/programming-language.rst41
-rw-r--r--Documentation/translations/zh_CN/process/stable-api-nonsense.rst (renamed from Documentation/translations/zh_CN/stable_api_nonsense.txt)68
-rw-r--r--Documentation/translations/zh_CN/process/stable-kernel-rules.rst (renamed from Documentation/translations/zh_CN/stable_kernel_rules.txt)34
-rw-r--r--Documentation/translations/zh_CN/process/submit-checklist.rst107
-rw-r--r--Documentation/translations/zh_CN/process/submitting-drivers.rst (renamed from Documentation/translations/zh_CN/SubmittingDrivers)36
-rw-r--r--Documentation/translations/zh_CN/process/submitting-patches.rst682
-rw-r--r--Documentation/translations/zh_CN/process/volatile-considered-harmful.rst (renamed from Documentation/translations/zh_CN/volatile-considered-harmful.txt)35
-rw-r--r--Documentation/translations/zh_CN/sparse.txt6
-rw-r--r--Documentation/unaligned-memory-access.txt2
-rw-r--r--Documentation/usb/WUSB-Design-overview.txt56
-rw-r--r--Documentation/usb/acm.txt164
-rw-r--r--Documentation/usb/authorization.txt75
-rw-r--r--Documentation/usb/chipidea.txt101
-rw-r--r--Documentation/usb/dwc3.txt12
-rw-r--r--Documentation/usb/ehci.txt42
-rw-r--r--Documentation/usb/functionfs.txt17
-rw-r--r--Documentation/usb/gadget-testing.txt611
-rw-r--r--Documentation/usb/gadget_configfs.txt306
-rw-r--r--Documentation/usb/gadget_hid.txt175
-rw-r--r--Documentation/usb/gadget_multi.txt43
-rw-r--r--Documentation/usb/gadget_printer.txt155
-rw-r--r--Documentation/usb/gadget_serial.txt75
-rw-r--r--Documentation/usb/iuu_phoenix.txt34
-rw-r--r--Documentation/usb/mass-storage.txt19
-rw-r--r--Documentation/usb/misc_usbsevseg.txt9
-rw-r--r--Documentation/usb/mtouchusb.txt42
-rw-r--r--Documentation/usb/ohci.txt5
-rw-r--r--Documentation/usb/rio.txt83
-rw-r--r--Documentation/usb/usb-help.txt21
-rw-r--r--Documentation/usb/usb-serial.txt205
-rw-r--r--Documentation/usb/usbip_protocol.txt552
-rw-r--r--Documentation/usb/usbmon.txt100
-rw-r--r--Documentation/userspace-api/seccomp_filter.rst8
-rw-r--r--Documentation/video-output.txt52
-rw-r--r--Documentation/virtual/kvm/api.txt236
-rw-r--r--Documentation/virtual/kvm/devices/vm.txt3
-rw-r--r--Documentation/virtual/kvm/devices/xive.txt197
-rw-r--r--Documentation/vm/hmm.rst94
-rw-r--r--Documentation/vm/hugetlbfs_reserv.rst17
-rw-r--r--Documentation/vm/index.rst1
-rw-r--r--Documentation/vm/memory-model.rst183
-rw-r--r--Documentation/vm/numa.rst4
-rw-r--r--Documentation/vm/transhuge.rst81
-rw-r--r--Documentation/x86/amd-memory-encryption.rst (renamed from Documentation/x86/amd-memory-encryption.txt)13
-rw-r--r--Documentation/x86/boot.rst (renamed from Documentation/x86/boot.txt)530
-rw-r--r--Documentation/x86/conf.py10
-rw-r--r--Documentation/x86/earlyprintk.rst (renamed from Documentation/x86/earlyprintk.txt)122
-rw-r--r--Documentation/x86/entry_64.rst (renamed from Documentation/x86/entry_64.txt)12
-rw-r--r--Documentation/x86/exception-tables.rst (renamed from Documentation/x86/exception-tables.txt)247
-rw-r--r--Documentation/x86/i386/IO-APIC.rst (renamed from Documentation/x86/i386/IO-APIC.txt)28
-rw-r--r--Documentation/x86/i386/index.rst10
-rw-r--r--Documentation/x86/index.rst31
-rw-r--r--Documentation/x86/intel_mpx.rst (renamed from Documentation/x86/intel_mpx.txt)120
-rw-r--r--Documentation/x86/kernel-stacks.rst (renamed from Documentation/x86/kernel-stacks)33
-rw-r--r--Documentation/x86/mds.rst193
-rw-r--r--Documentation/x86/microcode.rst (renamed from Documentation/x86/microcode.txt)62
-rw-r--r--Documentation/x86/mtrr.rst354
-rw-r--r--Documentation/x86/mtrr.txt329
-rw-r--r--Documentation/x86/orc-unwinder.rst (renamed from Documentation/x86/orc-unwinder.txt)27
-rw-r--r--Documentation/x86/pat.rst242
-rw-r--r--Documentation/x86/pat.txt230
-rw-r--r--Documentation/x86/protection-keys.rst (renamed from Documentation/x86/protection-keys.txt)33
-rw-r--r--Documentation/x86/pti.rst (renamed from Documentation/x86/pti.txt)17
-rw-r--r--Documentation/x86/resctrl_ui.rst (renamed from Documentation/x86/resctrl_ui.txt)916
-rw-r--r--Documentation/x86/tlb.rst (renamed from Documentation/x86/tlb.txt)30
-rw-r--r--Documentation/x86/topology.rst (renamed from Documentation/x86/topology.txt)94
-rw-r--r--Documentation/x86/usb-legacy-support.rst (renamed from Documentation/x86/usb-legacy-support.txt)40
-rw-r--r--Documentation/x86/x86_64/5level-paging.rst (renamed from Documentation/x86/x86_64/5level-paging.txt)16
-rw-r--r--Documentation/x86/x86_64/boot-options.rst335
-rw-r--r--Documentation/x86/x86_64/boot-options.txt278
-rw-r--r--Documentation/x86/x86_64/cpu-hotplug-spec.rst (renamed from Documentation/x86/x86_64/cpu-hotplug-spec)5
-rw-r--r--Documentation/x86/x86_64/fake-numa-for-cpusets.rst (renamed from Documentation/x86/x86_64/fake-numa-for-cpusets)25
-rw-r--r--Documentation/x86/x86_64/index.rst16
-rw-r--r--Documentation/x86/x86_64/machinecheck.rst (renamed from Documentation/x86/x86_64/machinecheck)12
-rw-r--r--Documentation/x86/x86_64/mm.rst161
-rw-r--r--Documentation/x86/x86_64/mm.txt153
-rw-r--r--Documentation/x86/x86_64/uefi.rst (renamed from Documentation/x86/x86_64/uefi.txt)30
-rw-r--r--Documentation/x86/zero-page.rst45
-rw-r--r--Documentation/x86/zero-page.txt40
-rw-r--r--Documentation/xilinx/eemi.txt4
-rw-r--r--LICENSES/deprecated/GPL-1.0 (renamed from LICENSES/other/GPL-1.0)0
-rw-r--r--LICENSES/deprecated/ISC (renamed from LICENSES/other/ISC)0
-rw-r--r--LICENSES/deprecated/Linux-OpenIB (renamed from LICENSES/other/Linux-OpenIB)0
-rw-r--r--LICENSES/deprecated/X11 (renamed from LICENSES/other/X11)0
-rw-r--r--LICENSES/dual/Apache-2.0 (renamed from LICENSES/other/Apache-2.0)4
-rw-r--r--LICENSES/dual/CDDL-1.0 (renamed from LICENSES/other/CDDL-1.0)4
-rw-r--r--LICENSES/dual/MPL-1.1 (renamed from LICENSES/other/MPL-1.1)4
-rw-r--r--MAINTAINERS498
-rw-r--r--Makefile183
-rw-r--r--arch/Kconfig30
-rw-r--r--arch/alpha/Kconfig8
-rw-r--r--arch/alpha/Makefile2
-rw-r--r--arch/alpha/configs/defconfig (renamed from arch/alpha/defconfig)0
-rw-r--r--arch/alpha/include/asm/Kbuild1
-rw-r--r--arch/alpha/include/asm/io.h2
-rw-r--r--arch/alpha/include/asm/rwsem.h211
-rw-r--r--arch/alpha/include/asm/segment.h6
-rw-r--r--arch/alpha/include/asm/syscall.h2
-rw-r--r--arch/alpha/include/asm/tlb.h6
-rw-r--r--arch/alpha/include/uapi/asm/sockios.h4
-rw-r--r--arch/alpha/kernel/pci_iommu.c20
-rw-r--r--arch/alpha/kernel/smc37c669.c1
-rw-r--r--arch/alpha/kernel/smc37c93x.c1
-rw-r--r--arch/alpha/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/alpha/mm/init.c14
-rw-r--r--arch/arc/Kconfig3
-rw-r--r--arch/arc/boot/dts/hsdk.dts13
-rw-r--r--arch/arc/configs/haps_hs_defconfig1
-rw-r--r--arch/arc/configs/haps_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/nsim_700_defconfig1
-rw-r--r--arch/arc/configs/nsim_hs_defconfig1
-rw-r--r--arch/arc/configs/nsim_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig1
-rw-r--r--arch/arc/include/asm/Kbuild1
-rw-r--r--arch/arc/include/asm/elf.h6
-rw-r--r--arch/arc/include/asm/syscall.h11
-rw-r--r--arch/arc/include/asm/tlb.h32
-rw-r--r--arch/arc/include/asm/uaccess.h1
-rw-r--r--arch/arc/lib/memset-archs.S4
-rw-r--r--arch/arc/mm/cache.c31
-rw-r--r--arch/arc/mm/init.c15
-rw-r--r--arch/arm/Kconfig14
-rw-r--r--arch/arm/Kconfig.debug2
-rw-r--r--arch/arm/Makefile4
-rw-r--r--arch/arm/boot/dts/Makefile16
-rw-r--r--arch/arm/boot/dts/am335x-baltos-ir2110.dts16
-rw-r--r--arch/arm/boot/dts/am335x-baltos-ir3220.dts38
-rw-r--r--arch/arm/boot/dts/am335x-baltos-ir5221.dts42
-rw-r--r--arch/arm/boot/dts/am335x-baltos-leds.dtsi6
-rw-r--r--arch/arm/boot/dts/am335x-baltos.dtsi140
-rw-r--r--arch/arm/boot/dts/am335x-base0033.dts48
-rw-r--r--arch/arm/boot/dts/am335x-bone-common.dtsi116
-rw-r--r--arch/arm/boot/dts/am335x-boneblack-common.dtsi54
-rw-r--r--arch/arm/boot/dts/am335x-boneblack-wireless.dts28
-rw-r--r--arch/arm/boot/dts/am335x-boneblue.dts104
-rw-r--r--arch/arm/boot/dts/am335x-bonegreen-common.dtsi4
-rw-r--r--arch/arm/boot/dts/am335x-bonegreen-wireless.dts28
-rw-r--r--arch/arm/boot/dts/am335x-chiliboard.dts66
-rw-r--r--arch/arm/boot/dts/am335x-chilisom.dtsi34
-rw-r--r--arch/arm/boot/dts/am335x-cm-t335.dts190
-rw-r--r--arch/arm/boot/dts/am335x-evm.dts234
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts292
-rw-r--r--arch/arm/boot/dts/am335x-icev2.dts116
-rw-r--r--arch/arm/boot/dts/am335x-igep0033.dtsi40
-rw-r--r--arch/arm/boot/dts/am335x-lxm.dts120
-rw-r--r--arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi42
-rw-r--r--arch/arm/boot/dts/am335x-moxa-uc-2101.dts24
-rw-r--r--arch/arm/boot/dts/am335x-moxa-uc-8100-me-t.dts116
-rw-r--r--arch/arm/boot/dts/am335x-nano.dts140
-rw-r--r--arch/arm/boot/dts/am335x-osd3358-sm-red.dts168
-rw-r--r--arch/arm/boot/dts/am335x-osd335x-common.dtsi4
-rw-r--r--arch/arm/boot/dts/am335x-pcm-953.dtsi74
-rw-r--r--arch/arm/boot/dts/am335x-pdu001.dts170
-rw-r--r--arch/arm/boot/dts/am335x-pepper.dts200
-rw-r--r--arch/arm/boot/dts/am335x-phycore-som.dtsi60
-rw-r--r--arch/arm/boot/dts/am335x-pocketbeagle.dts56
-rw-r--r--arch/arm/boot/dts/am335x-sancloud-bbe.dts62
-rw-r--r--arch/arm/boot/dts/am335x-sbc-t335.dts152
-rw-r--r--arch/arm/boot/dts/am335x-shc.dts226
-rw-r--r--arch/arm/boot/dts/am335x-sl50.dts208
-rw-r--r--arch/arm/boot/dts/am335x-wega.dtsi68
-rw-r--r--arch/arm/boot/dts/am43x-epos-evm.dts11
-rw-r--r--arch/arm/boot/dts/am5718.dtsi32
-rw-r--r--arch/arm/boot/dts/am571x-idk.dts2
-rw-r--r--arch/arm/boot/dts/am5728.dtsi33
-rw-r--r--arch/arm/boot/dts/am572x-idk.dts5
-rw-r--r--arch/arm/boot/dts/am5748.dtsi33
-rw-r--r--arch/arm/boot/dts/am574x-idk.dts4
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi2
-rw-r--r--arch/arm/boot/dts/am57xx-cl-som-am57x.dts2
-rw-r--r--arch/arm/boot/dts/armada-38x.dtsi2
-rw-r--r--arch/arm/boot/dts/aspeed-ast2500-evb.dts21
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-facebook-cmm.dts6
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts5
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts4
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts8
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts52
-rw-r--r--arch/arm/boot/dts/aspeed-g4.dtsi6
-rw-r--r--arch/arm/boot/dts/aspeed-g5.dtsi20
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_som1.dtsi39
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_som1_ek.dts39
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_xplained.dts93
-rw-r--r--arch/arm/boot/dts/at91-sama5d4_xplained.dts39
-rw-r--r--arch/arm/boot/dts/at91-sama5d4ek.dts39
-rw-r--r--arch/arm/boot/dts/at91-vinco.dts39
-rw-r--r--arch/arm/boot/dts/at91sam9260ek.dts39
-rw-r--r--arch/arm/boot/dts/at91sam9xe.dtsi39
-rw-r--r--arch/arm/boot/dts/axp81x.dtsi4
-rw-r--r--arch/arm/boot/dts/dra7-l4.dtsi6
-rw-r--r--arch/arm/boot/dts/dra7.dtsi2
-rw-r--r--arch/arm/boot/dts/emev2-kzm9d.dts2
-rw-r--r--arch/arm/boot/dts/exynos3250.dtsi72
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi14
-rw-r--r--arch/arm/boot/dts/exynos4210-origen.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210-smdkv310.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210-trats.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210-universal_c210.dts21
-rw-r--r--arch/arm/boot/dts/exynos4412-odroidu3.dts7
-rw-r--r--arch/arm/boot/dts/exynos4412-origen.dts3
-rw-r--r--arch/arm/boot/dts/exynos4412-smdk4412.dts4
-rw-r--r--arch/arm/boot/dts/exynos4412-trats2.dts3
-rw-r--r--arch/arm/boot/dts/exynos4412.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos5250-smdk5250.dts3
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi40
-rw-r--r--arch/arm/boot/dts/exynos5260-pinctrl.dtsi16
-rw-r--r--arch/arm/boot/dts/exynos5260-xyref5260.dts2
-rw-r--r--arch/arm/boot/dts/exynos5260.dtsi82
-rw-r--r--arch/arm/boot/dts/exynos5410-odroidxu.dts2
-rw-r--r--arch/arm/boot/dts/exynos5410-smdk5410.dts2
-rw-r--r--arch/arm/boot/dts/exynos5420-arndale-octa.dts364
-rw-r--r--arch/arm/boot/dts/exynos5420-smdk5420.dts3
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi5
-rw-r--r--arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos54xx.dtsi38
-rw-r--r--arch/arm/boot/dts/gemini-dlink-dir-685.dts82
-rw-r--r--arch/arm/boot/dts/imx35.dtsi6
-rw-r--r--arch/arm/boot/dts/imx50-kobo-aura.dts258
-rw-r--r--arch/arm/boot/dts/imx50.dtsi23
-rw-r--r--arch/arm/boot/dts/imx51-zii-rdu1.dts38
-rw-r--r--arch/arm/boot/dts/imx51.dtsi2
-rw-r--r--arch/arm/boot/dts/imx53-m53.dtsi2
-rw-r--r--arch/arm/boot/dts/imx53-m53menlo.dts311
-rw-r--r--arch/arm/boot/dts/imx53.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi8
-rw-r--r--arch/arm/boot/dts/imx6dl-eckelmann-ci4x10.dts381
-rw-r--r--arch/arm/boot/dts/imx6dl-riotboard.dts2
-rw-r--r--arch/arm/boot/dts/imx6dl-sabreauto.dts15
-rw-r--r--arch/arm/boot/dts/imx6q-ba16.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6q-gw54xx.dts105
-rw-r--r--arch/arm/boot/dts/imx6q-logicpd.dts4
-rw-r--r--arch/arm/boot/dts/imx6q-marsboard.dts2
-rw-r--r--arch/arm/boot/dts/imx6q-tbs2910.dts2
-rw-r--r--arch/arm/boot/dts/imx6q-zii-rdu2.dts38
-rw-r--r--arch/arm/boot/dts/imx6qdl-apf6.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-emcon.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw54xx.dtsi29
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw551x.dtsi138
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw5903.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabreauto.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabresd.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-sr-som.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-var-dart.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-wandboard.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi50
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi18
-rw-r--r--arch/arm/boot/dts/imx6qp-zii-rdu2.dts38
-rw-r--r--arch/arm/boot/dts/imx6sl.dtsi11
-rw-r--r--arch/arm/boot/dts/imx6sll.dtsi3
-rw-r--r--arch/arm/boot/dts/imx6sx-sabreauto.dts2
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6sx.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6ul.dtsi4
-rw-r--r--arch/arm/boot/dts/imx7-mba7.dtsi550
-rw-r--r--arch/arm/boot/dts/imx7-tqma7.dtsi249
-rw-r--r--arch/arm/boot/dts/imx7d-mba7.dts119
-rw-r--r--arch/arm/boot/dts/imx7d-pico.dtsi2
-rw-r--r--arch/arm/boot/dts/imx7d-tqma7.dtsi11
-rw-r--r--arch/arm/boot/dts/imx7d-zii-rpu2.dts941
-rw-r--r--arch/arm/boot/dts/imx7d.dtsi1
-rw-r--r--arch/arm/boot/dts/imx7s-mba7.dts18
-rw-r--r--arch/arm/boot/dts/imx7s-tqma7.dtsi11
-rw-r--r--arch/arm/boot/dts/imx7s-warp.dts61
-rw-r--r--arch/arm/boot/dts/imx7s.dtsi98
-rw-r--r--arch/arm/boot/dts/imx7ulp.dtsi12
-rw-r--r--arch/arm/boot/dts/intel-ixp42x-linksys-nslu2.dts109
-rw-r--r--arch/arm/boot/dts/intel-ixp42x.dtsi25
-rw-r--r--arch/arm/boot/dts/intel-ixp43x-gateworks-gw2358.dts94
-rw-r--r--arch/arm/boot/dts/intel-ixp43x.dtsi15
-rw-r--r--arch/arm/boot/dts/intel-ixp45x-ixp46x.dtsi34
-rw-r--r--arch/arm/boot/dts/intel-ixp4xx.dtsi69
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi2
-rw-r--r--arch/arm/boot/dts/lpc3250-ea3250.dts1
-rw-r--r--arch/arm/boot/dts/lpc3250-phy3250.dts3
-rw-r--r--arch/arm/boot/dts/lpc32xx.dtsi38
-rw-r--r--arch/arm/boot/dts/ls1021a-moxa-uc-8410a.dts1
-rw-r--r--arch/arm/boot/dts/ls1021a-qds.dts4
-rw-r--r--arch/arm/boot/dts/ls1021a-twr.dts9
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi40
-rw-r--r--arch/arm/boot/dts/meson.dtsi9
-rw-r--r--arch/arm/boot/dts/meson8.dtsi10
-rw-r--r--arch/arm/boot/dts/meson8b-ec100.dts14
-rw-r--r--arch/arm/boot/dts/meson8b-odroidc1.dts66
-rw-r--r--arch/arm/boot/dts/meson8b.dtsi10
-rw-r--r--arch/arm/boot/dts/omap2420-n810.dts2
-rw-r--r--arch/arm/boot/dts/omap4-droid4-xt894.dts27
-rw-r--r--arch/arm/boot/dts/omap4-duovero.dtsi21
-rw-r--r--arch/arm/boot/dts/omap4-l4-abe.dtsi501
-rw-r--r--arch/arm/boot/dts/omap4-mcpdm.dtsi44
-rw-r--r--arch/arm/boot/dts/omap4-panda-common.dtsi21
-rw-r--r--arch/arm/boot/dts/omap4-sdp.dts21
-rw-r--r--arch/arm/boot/dts/omap4-var-som-om44.dtsi21
-rw-r--r--arch/arm/boot/dts/omap4.dtsi192
-rw-r--r--arch/arm/boot/dts/omap5-board-common.dtsi8
-rw-r--r--arch/arm/boot/dts/omap5-l4-abe.dtsi447
-rw-r--r--arch/arm/boot/dts/omap5.dtsi115
-rw-r--r--arch/arm/boot/dts/qcom-apq8064.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom-ipq4019.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom-mdm9615.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom-msm8660.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom-pma8084.dtsi1
-rw-r--r--arch/arm/boot/dts/r7s72100-rskrza1.dts46
-rw-r--r--arch/arm/boot/dts/r8a73a4-ape6evm.dts29
-rw-r--r--arch/arm/boot/dts/r8a77470-iwg23s-sbc.dts123
-rw-r--r--arch/arm/boot/dts/r8a77470.dtsi313
-rw-r--r--arch/arm/boot/dts/r8a7778-bockw.dts2
-rw-r--r--arch/arm/boot/dts/r8a7779-marzen.dts2
-rw-r--r--arch/arm/boot/dts/r8a7792-blanche.dts20
-rw-r--r--arch/arm/boot/dts/r8a7792.dtsi18
-rw-r--r--arch/arm/boot/dts/r8a7794-alt.dts47
-rw-r--r--arch/arm/boot/dts/rk3036-kylin.dts10
-rw-r--r--arch/arm/boot/dts/rk3036.dtsi136
-rw-r--r--arch/arm/boot/dts/rk3066a-marsboard.dts2
-rw-r--r--arch/arm/boot/dts/rk3066a-mk808.dts37
-rw-r--r--arch/arm/boot/dts/rk3066a-rayeager.dts26
-rw-r--r--arch/arm/boot/dts/rk3066a.dtsi239
-rw-r--r--arch/arm/boot/dts/rk3188-px3-evb.dts4
-rw-r--r--arch/arm/boot/dts/rk3188-radxarock.dts14
-rw-r--r--arch/arm/boot/dts/rk3188.dtsi210
-rw-r--r--arch/arm/boot/dts/rk322x.dtsi170
-rw-r--r--arch/arm/boot/dts/rk3288-evb-act8846.dts4
-rw-r--r--arch/arm/boot/dts/rk3288-evb.dtsi26
-rw-r--r--arch/arm/boot/dts/rk3288-fennec.dts10
-rw-r--r--arch/arm/boot/dts/rk3288-firefly-beta.dts4
-rw-r--r--arch/arm/boot/dts/rk3288-firefly-reload-core.dtsi10
-rw-r--r--arch/arm/boot/dts/rk3288-firefly-reload.dts36
-rw-r--r--arch/arm/boot/dts/rk3288-firefly.dts4
-rw-r--r--arch/arm/boot/dts/rk3288-firefly.dtsi38
-rw-r--r--arch/arm/boot/dts/rk3288-miqi.dts28
-rw-r--r--arch/arm/boot/dts/rk3288-phycore-rdk.dts28
-rw-r--r--arch/arm/boot/dts/rk3288-phycore-som.dtsi30
-rw-r--r--arch/arm/boot/dts/rk3288-r89.dts14
-rw-r--r--arch/arm/boot/dts/rk3288-rock2-som.dtsi4
-rw-r--r--arch/arm/boot/dts/rk3288-rock2-square.dts18
-rw-r--r--arch/arm/boot/dts/rk3288-tinker-s.dts5
-rw-r--r--arch/arm/boot/dts/rk3288-tinker.dtsi67
-rw-r--r--arch/arm/boot/dts/rk3288-veyron-analog-audio.dtsi8
-rw-r--r--arch/arm/boot/dts/rk3288-veyron-brain.dts8
-rw-r--r--arch/arm/boot/dts/rk3288-veyron-chromebook.dtsi25
-rw-r--r--arch/arm/boot/dts/rk3288-veyron-jaq.dts14
-rw-r--r--arch/arm/boot/dts/rk3288-veyron-jerry.dts23
-rw-r--r--arch/arm/boot/dts/rk3288-veyron-mickey.dts6
-rw-r--r--arch/arm/boot/dts/rk3288-veyron-mighty.dts34
-rw-r--r--arch/arm/boot/dts/rk3288-veyron-minnie.dts24
-rw-r--r--arch/arm/boot/dts/rk3288-veyron-pinky.dts6
-rw-r--r--arch/arm/boot/dts/rk3288-veyron-sdmmc.dtsi16
-rw-r--r--arch/arm/boot/dts/rk3288-veyron-speedy.dts14
-rw-r--r--arch/arm/boot/dts/rk3288-veyron.dtsi91
-rw-r--r--arch/arm/boot/dts/rk3288-vyasa.dts6
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi324
-rw-r--r--arch/arm/boot/dts/rv1108-elgin-r1.dts1
-rw-r--r--arch/arm/boot/dts/rv1108.dtsi138
-rw-r--r--arch/arm/boot/dts/s5pv210-goni.dts2
-rw-r--r--arch/arm/boot/dts/s5pv210.dtsi6
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi45
-rw-r--r--arch/arm/boot/dts/sama5d36ek_cmp.dts39
-rw-r--r--arch/arm/boot/dts/sama5d3xcm_cmp.dtsi39
-rw-r--r--arch/arm/boot/dts/sama5d3xmb_cmp.dtsi39
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi39
-rw-r--r--arch/arm/boot/dts/socfpga_arria10_socdk_sdmmc.dts1
-rw-r--r--arch/arm/boot/dts/ste-dbx5x0.dtsi74
-rw-r--r--arch/arm/boot/dts/ste-href-stuib.dtsi13
-rw-r--r--arch/arm/boot/dts/ste-href-tvk1281618.dtsi13
-rw-r--r--arch/arm/boot/dts/stm32f429.dtsi13
-rw-r--r--arch/arm/boot/dts/stm32f769-disco.dts4
-rw-r--r--arch/arm/boot/dts/stm32h743-pinctrl.dtsi68
-rw-r--r--arch/arm/boot/dts/stm32h743.dtsi14
-rw-r--r--arch/arm/boot/dts/stm32h743i-disco.dts20
-rw-r--r--arch/arm/boot/dts/stm32h743i-eval.dts23
-rw-r--r--arch/arm/boot/dts/stm32mp157-pinctrl.dtsi269
-rw-r--r--arch/arm/boot/dts/stm32mp157a-dk1.dts250
-rw-r--r--arch/arm/boot/dts/stm32mp157c-dk2.dts76
-rw-r--r--arch/arm/boot/dts/stm32mp157c-ed1.dts175
-rw-r--r--arch/arm/boot/dts/stm32mp157c.dtsi56
-rw-r--r--arch/arm/boot/dts/sun4i-a10-chuwi-v7-cw0825.dts20
-rw-r--r--arch/arm/boot/dts/sun4i-a10-cubieboard.dts10
-rw-r--r--arch/arm/boot/dts/sun4i-a10-dserve-dsrv9703c.dts20
-rw-r--r--arch/arm/boot/dts/sun4i-a10-hyundai-a7hd.dts20
-rw-r--r--arch/arm/boot/dts/sun4i-a10-inet1.dts20
-rw-r--r--arch/arm/boot/dts/sun4i-a10-inet97fv2.dts20
-rw-r--r--arch/arm/boot/dts/sun4i-a10-inet9f-rev03.dts72
-rw-r--r--arch/arm/boot/dts/sun4i-a10-marsboard.dts12
-rw-r--r--arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts18
-rw-r--r--arch/arm/boot/dts/sun4i-a10-pcduino.dts12
-rw-r--r--arch/arm/boot/dts/sun4i-a10-pov-protab2-ips9.dts20
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi11
-rw-r--r--arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts10
-rw-r--r--arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts10
-rw-r--r--arch/arm/boot/dts/sun5i-a13-empire-electronix-d709.dts20
-rw-r--r--arch/arm/boot/dts/sun5i-a13-hsg-h702.dts12
-rw-r--r--arch/arm/boot/dts/sun5i-a13-licheepi-one.dts5
-rw-r--r--arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts18
-rw-r--r--arch/arm/boot/dts/sun5i-a13-olinuxino.dts20
-rw-r--r--arch/arm/boot/dts/sun5i-a13-q8-tablet.dts11
-rw-r--r--arch/arm/boot/dts/sun5i-a13-utoo-p66.dts16
-rw-r--r--arch/arm/boot/dts/sun5i-gr8-chip-pro.dts4
-rw-r--r--arch/arm/boot/dts/sun5i-gr8-evb.dts4
-rw-r--r--arch/arm/boot/dts/sun5i-r8-chip.dts14
-rw-r--r--arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi20
-rw-r--r--arch/arm/boot/dts/sun5i.dtsi66
-rw-r--r--arch/arm/boot/dts/sun6i-a31-colombus.dts14
-rw-r--r--arch/arm/boot/dts/sun6i-a31-hummingbird.dts16
-rw-r--r--arch/arm/boot/dts/sun6i-a31-i7.dts1
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi22
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-primo81.dts2
-rw-r--r--arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi12
-rw-r--r--arch/arm/boot/dts/sun7i-a20-bananapi.dts10
-rw-r--r--arch/arm/boot/dts/sun7i-a20-cubieboard2.dts12
-rw-r--r--arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts12
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olimex-som204-evb.dts4
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olinuxino-lime.dts18
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts24
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts18
-rw-r--r--arch/arm/boot/dts/sun7i-a20-orangepi-mini.dts12
-rw-r--r--arch/arm/boot/dts/sun7i-a20-orangepi.dts12
-rw-r--r--arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts12
-rw-r--r--arch/arm/boot/dts/sun7i-a20-pcduino3.dts12
-rw-r--r--arch/arm/boot/dts/sun7i-a20-wexler-tab7200.dts12
-rw-r--r--arch/arm/boot/dts/sun7i-a20-wits-pro-a20-dkt.dts12
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi125
-rw-r--r--arch/arm/boot/dts/sun8i-a23-a33.dtsi74
-rw-r--r--arch/arm/boot/dts/sun8i-a23-q8-tablet.dts6
-rw-r--r--arch/arm/boot/dts/sun8i-a33-q8-tablet.dts7
-rw-r--r--arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts11
-rw-r--r--arch/arm/boot/dts/sun8i-a33.dtsi20
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts12
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts12
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts73
-rw-r--r--arch/arm/boot/dts/sun8i-a83t.dtsi111
-rw-r--r--arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts20
-rw-r--r--arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-h3-beelink-x2.dts6
-rw-r--r--arch/arm/boot/dts/sun8i-h3-mapleboard-mp130.dts7
-rw-r--r--arch/arm/boot/dts/sun8i-h3-nanopi-m1-plus.dts3
-rw-r--r--arch/arm/boot/dts/sun8i-h3-nanopi-m1.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-h3-nanopi-neo-air.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-h3-nanopi.dtsi25
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-2.dts34
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-lite.dts27
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-one.dts25
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts27
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts9
-rw-r--r--arch/arm/boot/dts/sun8i-h3-orangepi-zero-plus2.dts3
-rw-r--r--arch/arm/boot/dts/sun8i-h3-rervision-dvk.dts114
-rw-r--r--arch/arm/boot/dts/sun8i-h3.dtsi4
-rw-r--r--arch/arm/boot/dts/sun8i-q8-common.dtsi18
-rw-r--r--arch/arm/boot/dts/sun8i-r16-nintendo-nes-classic.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-r16-parrot.dts12
-rw-r--r--arch/arm/boot/dts/sun8i-r40.dtsi13
-rw-r--r--arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi12
-rw-r--r--arch/arm/boot/dts/sun8i-v3s-licheepi-zero.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-v3s.dtsi13
-rw-r--r--arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts36
-rw-r--r--arch/arm/boot/dts/sun9i-a80-cubieboard4.dts15
-rw-r--r--arch/arm/boot/dts/sun9i-a80-optimus.dts4
-rw-r--r--arch/arm/boot/dts/sun9i-a80.dtsi84
-rw-r--r--arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi7
-rw-r--r--arch/arm/boot/dts/sunxi-h3-h5.dtsi50
-rw-r--r--arch/arm/boot/dts/sunxi-libretech-all-h3-cc.dtsi4
-rw-r--r--arch/arm/boot/dts/tegra124-apalis-emc.dtsi39
-rw-r--r--arch/arm/boot/dts/tegra124-apalis-eval.dts40
-rw-r--r--arch/arm/boot/dts/tegra124-apalis-v1.2-eval.dts2
-rw-r--r--arch/arm/boot/dts/tegra124-apalis-v1.2.dtsi9
-rw-r--r--arch/arm/boot/dts/tegra124-apalis.dtsi45
-rw-r--r--arch/arm/boot/dts/tegra124-jetson-tk1.dts5
-rw-r--r--arch/arm/boot/dts/tegra124-nyan.dtsi5
-rw-r--r--arch/arm/boot/dts/tegra124-venice2.dts5
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi11
-rw-r--r--arch/arm/boot/dts/vf610-zii-cfu1.dts26
-rw-r--r--arch/arm/boot/dts/vf610-zii-dev-rev-b.dts57
-rw-r--r--arch/arm/boot/dts/vf610-zii-dev-rev-c.dts49
-rw-r--r--arch/arm/boot/dts/vf610-zii-dev.dtsi6
-rw-r--r--arch/arm/boot/dts/vf610-zii-scu4-aib.dts14
-rw-r--r--arch/arm/boot/dts/vf610-zii-spb4.dts359
-rw-r--r--arch/arm/boot/dts/vf610-zii-ssmb-dtu.dts5
-rw-r--r--arch/arm/boot/dts/vf610-zii-ssmb-spu3.dts17
-rw-r--r--arch/arm/common/sa1111.c2
-rw-r--r--arch/arm/configs/aspeed_g4_defconfig2
-rw-r--r--arch/arm/configs/aspeed_g5_defconfig2
-rw-r--r--arch/arm/configs/at91_dt_defconfig3
-rw-r--r--arch/arm/configs/clps711x_defconfig3
-rw-r--r--arch/arm/configs/cm_x2xx_defconfig2
-rw-r--r--arch/arm/configs/cm_x300_defconfig2
-rw-r--r--arch/arm/configs/colibri_pxa270_defconfig2
-rw-r--r--arch/arm/configs/corgi_defconfig2
-rw-r--r--arch/arm/configs/davinci_all_defconfig2
-rw-r--r--arch/arm/configs/efm32_defconfig1
-rw-r--r--arch/arm/configs/em_x270_defconfig2
-rw-r--r--arch/arm/configs/ep93xx_defconfig2
-rw-r--r--arch/arm/configs/eseries_pxa_defconfig2
-rw-r--r--arch/arm/configs/exynos_defconfig1
-rw-r--r--arch/arm/configs/ezx_defconfig1
-rw-r--r--arch/arm/configs/h3600_defconfig1
-rw-r--r--arch/arm/configs/imote2_defconfig1
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig2
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig2
-rw-r--r--arch/arm/configs/ixp4xx_defconfig2
-rw-r--r--arch/arm/configs/keystone_defconfig2
-rw-r--r--arch/arm/configs/lpc32xx_defconfig2
-rw-r--r--arch/arm/configs/mini2440_defconfig4
-rw-r--r--arch/arm/configs/mmp2_defconfig2
-rw-r--r--arch/arm/configs/moxart_defconfig1
-rw-r--r--arch/arm/configs/multi_v4t_defconfig3
-rw-r--r--arch/arm/configs/multi_v5_defconfig2
-rw-r--r--arch/arm/configs/multi_v7_defconfig153
-rw-r--r--arch/arm/configs/mv78xx0_defconfig2
-rw-r--r--arch/arm/configs/mvebu_v5_defconfig2
-rw-r--r--arch/arm/configs/mvebu_v7_defconfig2
-rw-r--r--arch/arm/configs/mxs_defconfig2
-rw-r--r--arch/arm/configs/nhk8815_defconfig4
-rw-r--r--arch/arm/configs/omap1_defconfig3
-rw-r--r--arch/arm/configs/omap2plus_defconfig4
-rw-r--r--arch/arm/configs/orion5x_defconfig2
-rw-r--r--arch/arm/configs/oxnas_v6_defconfig2
-rw-r--r--arch/arm/configs/pxa3xx_defconfig2
-rw-r--r--arch/arm/configs/pxa_defconfig6
-rw-r--r--arch/arm/configs/qcom_defconfig15
-rw-r--r--arch/arm/configs/s3c2410_defconfig2
-rw-r--r--arch/arm/configs/s3c6400_defconfig2
-rw-r--r--arch/arm/configs/sama5_defconfig4
-rw-r--r--arch/arm/configs/shmobile_defconfig13
-rw-r--r--arch/arm/configs/socfpga_defconfig6
-rw-r--r--arch/arm/configs/spear13xx_defconfig2
-rw-r--r--arch/arm/configs/spear3xx_defconfig2
-rw-r--r--arch/arm/configs/spear6xx_defconfig2
-rw-r--r--arch/arm/configs/spitz_defconfig2
-rw-r--r--arch/arm/configs/stm32_defconfig1
-rw-r--r--arch/arm/configs/tango4_defconfig2
-rw-r--r--arch/arm/configs/tegra_defconfig36
-rw-r--r--arch/arm/configs/trizeps4_defconfig2
-rw-r--r--arch/arm/configs/u300_defconfig3
-rw-r--r--arch/arm/configs/vexpress_defconfig1
-rw-r--r--arch/arm/crypto/aes-neonbs-glue.c2
-rw-r--r--arch/arm/crypto/chacha-neon-glue.c5
-rw-r--r--arch/arm/crypto/crc32-ce-glue.c5
-rw-r--r--arch/arm/crypto/crct10dif-ce-glue.c3
-rw-r--r--arch/arm/crypto/ghash-ce-glue.c10
-rw-r--r--arch/arm/crypto/nhpoly1305-neon-glue.c3
-rw-r--r--arch/arm/crypto/sha1-ce-glue.c5
-rw-r--r--arch/arm/crypto/sha1_neon_glue.c5
-rw-r--r--arch/arm/crypto/sha2-ce-glue.c5
-rw-r--r--arch/arm/crypto/sha256_neon_glue.c5
-rw-r--r--arch/arm/crypto/sha512-neon-glue.c5
-rw-r--r--arch/arm/firmware/Kconfig29
-rw-r--r--arch/arm/firmware/Makefile4
-rw-r--r--arch/arm/include/asm/Kbuild4
-rw-r--r--arch/arm/include/asm/arch_timer.h18
-rw-r--r--arch/arm/include/asm/cp15.h2
-rw-r--r--arch/arm/include/asm/domain.h6
-rw-r--r--arch/arm/include/asm/firmware.h2
-rw-r--r--arch/arm/include/asm/futex.h3
-rw-r--r--arch/arm/include/asm/hardirq.h1
-rw-r--r--arch/arm/include/asm/io.h2
-rw-r--r--arch/arm/include/asm/kvm_emulate.h2
-rw-r--r--arch/arm/include/asm/kvm_host.h26
-rw-r--r--arch/arm/include/asm/limits.h12
-rw-r--r--arch/arm/include/asm/processor.h4
-rw-r--r--arch/arm/include/asm/stage2_pgtable.h4
-rw-r--r--arch/arm/include/asm/syscall.h2
-rw-r--r--arch/arm/include/asm/tlb.h255
-rw-r--r--arch/arm/include/asm/uaccess.h3
-rw-r--r--arch/arm/kernel/atags.h2
-rw-r--r--arch/arm/kernel/dma-isa.c8
-rw-r--r--arch/arm/kernel/signal.c3
-rw-r--r--arch/arm/kernel/smp.c30
-rw-r--r--arch/arm/kernel/stacktrace.c6
-rw-r--r--arch/arm/kvm/Kconfig1
-rw-r--r--arch/arm/mach-at91/Kconfig26
-rw-r--r--arch/arm/mach-at91/at91sam9.c18
-rw-r--r--arch/arm/mach-at91/generic.h2
-rw-r--r--arch/arm/mach-at91/pm.c193
-rw-r--r--arch/arm/mach-at91/pm_suspend.S111
-rw-r--r--arch/arm/mach-davinci/board-da830-evm.c51
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm355-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-omapl138-hawk.c50
-rw-r--r--arch/arm/mach-davinci/da830.c1
-rw-r--r--arch/arm/mach-davinci/da850.c1
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c1
-rw-r--r--arch/arm/mach-davinci/dm355.c1
-rw-r--r--arch/arm/mach-davinci/dm365.c1
-rw-r--r--arch/arm/mach-davinci/dm644x.c1
-rw-r--r--arch/arm/mach-davinci/dm646x.c1
-rw-r--r--arch/arm/mach-dove/common.c1
-rw-r--r--arch/arm/mach-ep93xx/adssphere.c2
-rw-r--r--arch/arm/mach-ep93xx/clock.c3
-rw-r--r--arch/arm/mach-ep93xx/core.c8
-rw-r--r--arch/arm/mach-ep93xx/dma.c2
-rw-r--r--arch/arm/mach-ep93xx/edb93xx.c4
-rw-r--r--arch/arm/mach-ep93xx/gesbc9312.c2
-rw-r--r--arch/arm/mach-ep93xx/gpio-ep93xx.h (renamed from arch/arm/mach-ep93xx/include/mach/gpio-ep93xx.h)0
-rw-r--r--arch/arm/mach-ep93xx/hardware.h (renamed from arch/arm/mach-ep93xx/include/mach/hardware.h)2
-rw-r--r--arch/arm/mach-ep93xx/micro9.c2
-rw-r--r--arch/arm/mach-ep93xx/platform.h (renamed from arch/arm/mach-ep93xx/include/mach/platform.h)23
-rw-r--r--arch/arm/mach-ep93xx/simone.c4
-rw-r--r--arch/arm/mach-ep93xx/snappercl15.c4
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c4
-rw-r--r--arch/arm/mach-ep93xx/vision_ep9307.c4
-rw-r--r--arch/arm/mach-exynos/Kconfig12
-rw-r--r--arch/arm/mach-exynos/Makefile2
-rw-r--r--arch/arm/mach-exynos/common.h1
-rw-r--r--arch/arm/mach-exynos/exynos.c3
-rw-r--r--arch/arm/mach-exynos/firmware.c1
-rw-r--r--arch/arm/mach-exynos/mcpm-exynos.c13
-rw-r--r--arch/arm/mach-exynos/platsmp.c9
-rw-r--r--arch/arm/mach-exynos/smc.h7
-rw-r--r--arch/arm/mach-exynos/suspend.c49
-rw-r--r--arch/arm/mach-imx/devices/platform-fec.c2
-rw-r--r--arch/arm/mach-imx/devices/platform-gpio_keys.c2
-rw-r--r--arch/arm/mach-imx/devices/platform-imx2-wdt.c2
-rw-r--r--arch/arm/mach-imx/devices/platform-mxc_nand.c2
-rw-r--r--arch/arm/mach-imx/hardware.h2
-rw-r--r--arch/arm/mach-imx/pm-imx6.c8
-rw-r--r--arch/arm/mach-integrator/impd1.c2
-rw-r--r--arch/arm/mach-iop13xx/pci.c2
-rw-r--r--arch/arm/mach-iop13xx/tpmi.c2
-rw-r--r--arch/arm/mach-ixp4xx/Kconfig27
-rw-r--r--arch/arm/mach-ixp4xx/Makefile5
-rw-r--r--arch/arm/mach-ixp4xx/avila-pci.c2
-rw-r--r--arch/arm/mach-ixp4xx/avila-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/common-pci.c2
-rw-r--r--arch/arm/mach-ixp4xx/common.c483
-rw-r--r--arch/arm/mach-ixp4xx/coyote-pci.c2
-rw-r--r--arch/arm/mach-ixp4xx/coyote-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/dsmg600-pci.c2
-rw-r--r--arch/arm/mach-ixp4xx/dsmg600-setup.c5
-rw-r--r--arch/arm/mach-ixp4xx/fsg-pci.c2
-rw-r--r--arch/arm/mach-ixp4xx/fsg-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/gateway7001-pci.c2
-rw-r--r--arch/arm/mach-ixp4xx/gateway7001-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/gtwx5715-pci.c2
-rw-r--r--arch/arm/mach-ixp4xx/gtwx5715-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/entry-macro.S41
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/irqs.h75
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h94
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/qmgr.h204
-rw-r--r--arch/arm/mach-ixp4xx/irqs.h68
-rw-r--r--arch/arm/mach-ixp4xx/ixdp425-pci.c2
-rw-r--r--arch/arm/mach-ixp4xx/ixdp425-setup.c2
-rw-r--r--arch/arm/mach-ixp4xx/ixdpg425-pci.c2
-rw-r--r--arch/arm/mach-ixp4xx/ixp4xx-of.c60
-rw-r--r--arch/arm/mach-ixp4xx/nas100d-pci.c2
-rw-r--r--arch/arm/mach-ixp4xx/nas100d-setup.c5
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-pci.c2
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-setup.c12
-rw-r--r--arch/arm/mach-ixp4xx/wg302v2-pci.c2
-rw-r--r--arch/arm/mach-ixp4xx/wg302v2-setup.c2
-rw-r--r--arch/arm/mach-ks8695/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-lpc32xx/phy3250.c42
-rw-r--r--arch/arm/mach-mediatek/mediatek.c1
-rw-r--r--arch/arm/mach-mv78xx0/common.c1
-rw-r--r--arch/arm/mach-mvebu/board-v7.c1
-rw-r--r--arch/arm/mach-mvebu/coherency_ll.S2
-rw-r--r--arch/arm/mach-mvebu/kirkwood.c5
-rw-r--r--arch/arm/mach-mvebu/pm-board.c11
-rw-r--r--arch/arm/mach-mvebu/pmsu_ll.S3
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c2
-rw-r--r--arch/arm/mach-omap1/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-omap2/clock.c3
-rw-r--r--arch/arm/mach-omap2/common.h9
-rw-r--r--arch/arm/mach-omap2/i2c.c11
-rw-r--r--arch/arm/mach-omap2/io.c7
-rw-r--r--arch/arm/mach-omap2/mmc.h8
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c214
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h8
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2420_data.c1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2430_data.c1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h29
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c103
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c255
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_data.c64
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_43xx_data.c113
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c740
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_54xx_data.c748
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c848
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_81xx_data.c1
-rw-r--r--arch/arm/mach-omap2/pm33xx-core.c84
-rw-r--r--arch/arm/mach-omap2/sleep43xx.S3
-rw-r--r--arch/arm/mach-omap2/sr_device.c5
-rw-r--r--arch/arm/mach-orion5x/common.c1
-rw-r--r--arch/arm/mach-prima2/common.c2
-rw-r--r--arch/arm/mach-pxa/balloon3.c2
-rw-r--r--arch/arm/mach-pxa/colibri-pxa270.c2
-rw-r--r--arch/arm/mach-pxa/colibri-pxa300.c2
-rw-r--r--arch/arm/mach-pxa/colibri-pxa320.c2
-rw-r--r--arch/arm/mach-pxa/colibri-pxa3xx.c2
-rw-r--r--arch/arm/mach-pxa/gumstix.c2
-rw-r--r--arch/arm/mach-pxa/lpd270.c2
-rw-r--r--arch/arm/mach-pxa/lubbock.c2
-rw-r--r--arch/arm/mach-pxa/mainstone.c2
-rw-r--r--arch/arm/mach-pxa/trizeps4.c2
-rw-r--r--arch/arm/mach-pxa/viper.c2
-rw-r--r--arch/arm/mach-rockchip/platsmp.c1
-rw-r--r--arch/arm/mach-rockchip/pm.c4
-rw-r--r--arch/arm/mach-rockchip/rockchip.c1
-rw-r--r--arch/arm/mach-rpc/dma.c8
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410-module.c14
-rw-r--r--arch/arm/mach-sa1100/include/mach/memory.h2
-rw-r--r--arch/arm/mach-sa1100/neponset.c2
-rw-r--r--arch/arm/mach-shmobile/pm-rcar-gen2.c1
-rw-r--r--arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c6
-rw-r--r--arch/arm/mach-stm32/Kconfig5
-rw-r--r--arch/arm/mach-sunxi/mc_smp.c5
-rw-r--r--arch/arm/mach-sunxi/platsmp.c4
-rw-r--r--arch/arm/mach-tegra/Kconfig4
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra114.c4
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra20.c11
-rw-r--r--arch/arm/mach-tegra/cpuidle-tegra30.c9
-rw-r--r--arch/arm/mach-tegra/iomap.h11
-rw-r--r--arch/arm/mach-tegra/irammap.h2
-rw-r--r--arch/arm/mach-tegra/pm.c50
-rw-r--r--arch/arm/mach-tegra/reset-handler.S50
-rw-r--r--arch/arm/mach-tegra/reset.c4
-rw-r--r--arch/arm/mach-tegra/reset.h9
-rw-r--r--arch/arm/mach-tegra/sleep-tegra20.S4
-rw-r--r--arch/arm/mach-tegra/sleep-tegra30.S21
-rw-r--r--arch/arm/mach-tegra/sleep.S14
-rw-r--r--arch/arm/mach-tegra/tegra.c5
-rw-r--r--arch/arm/mach-u300/regulator.c2
-rw-r--r--arch/arm/mach-w90x900/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-zynq/common.c1
-rw-r--r--arch/arm/mm/alignment.c2
-rw-r--r--arch/arm/mm/dma-mapping.c22
-rw-r--r--arch/arm/mm/init.c42
-rw-r--r--arch/arm/nwfpe/fpmodule.c2
-rw-r--r--arch/arm/plat-omap/dma.c1
-rw-r--r--arch/arm/plat-pxa/ssp.c6
-rw-r--r--arch/arm/tools/syscall.tbl6
-rw-r--r--arch/arm/vdso/Makefile21
-rw-r--r--arch/arm/vdso/vgettimeofday.c5
-rw-r--r--arch/arm/xen/p2m.c4
-rw-r--r--arch/arm64/Kconfig147
-rw-r--r--arch/arm64/Kconfig.platforms11
-rw-r--r--arch/arm64/boot/dts/Makefile1
-rw-r--r--arch/arm64/boot/dts/allwinner/Makefile3
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-amarula-relic.dts65
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-oceanic-5205-5inmfd.dts68
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts13
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi75
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-emlid-neutis-n5-devboard.dts3
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-emlid-neutis-n5.dtsi1
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts5
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts3
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts260
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts215
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi41
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi27
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/Makefile1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts185
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts147
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts140
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a.dtsi465
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts10
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm.dtsi27
-rw-r--r--arch/arm64/boot/dts/bitmain/bm1880-sophon-edge.dts143
-rw-r--r--arch/arm64/boot/dts/bitmain/bm1880.dtsi68
-rw-r--r--arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi6
-rw-r--r--arch/arm64/boot/dts/exynos/exynos5433-tm2.dts6
-rw-r--r--arch/arm64/boot/dts/exynos/exynos5433.dtsi83
-rw-r--r--arch/arm64/boot/dts/exynos/exynos7.dtsi57
-rw-r--r--arch/arm64/boot/dts/freescale/Makefile3
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a-oxalis.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts62
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts63
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi64
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a-qds.dts16
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts16
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi69
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-evk.dts235
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm.dtsi733
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-evk.dts129
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts95
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-zest.dts24
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi725
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi309
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp-mek.dts95
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp.dtsi89
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660.dtsi20
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3670-hikey970.dts75
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3670.dtsi62
-rw-r--r--arch/arm64/boot/dts/hisilicon/hikey970-pinctrl.dtsi115
-rw-r--r--arch/arm64/boot/dts/intel/Makefile1
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_agilex.dtsi444
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts75
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts13
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712-pinfunc.h2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712e.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi35
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-pinfunc.h1120
-rw-r--r--arch/arm64/boot/dts/nvidia/Makefile1
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts115
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi42
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186.dtsi147
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi1
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi6
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts12
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi5
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p2894.dtsi6
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts650
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210-smaug.dts12
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210.dtsi41
-rw-r--r--arch/arm64/boot/dts/qcom/apq8096-db820c-pins.dtsi52
-rw-r--r--arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi121
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916.dtsi46
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996-pins.dtsi43
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi558
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi60
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998.dtsi315
-rw-r--r--arch/arm64/boot/dts/qcom/pm8005.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pm8998.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/pmi8994.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pmi8998.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/pms405.dtsi11
-rw-r--r--arch/arm64/boot/dts/qcom/qcs404-evb-1000.dts3
-rw-r--r--arch/arm64/boot/dts/qcom/qcs404-evb-4000.dts85
-rw-r--r--arch/arm64/boot/dts/qcom/qcs404-evb.dtsi95
-rw-r--r--arch/arm64/boot/dts/qcom/qcs404.dtsi23
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-mtp.dts8
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845.dtsi427
-rw-r--r--arch/arm64/boot/dts/renesas/cat875.dtsi22
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774a1.dtsi12
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts62
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0.dtsi44
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7795.dtsi72
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7796-salvator-xs.dts1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a7796.dtsi13
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77965.dtsi324
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77980.dtsi16
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts53
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77990.dtsi74
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77995-draak.dts32
-rw-r--r--arch/arm64/boot/dts/renesas/salvator-common.dtsi73
-rw-r--r--arch/arm64/boot/dts/rockchip/Makefile2
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-evb.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-evb.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts53
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock64.dts33
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi7
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi34
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts8
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-lion-haikou.dts14
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi10
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts46
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-r88.dts36
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi240
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-evb.dts6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-ficus.dts18
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi14
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts8
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi68
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi56
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts69
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-nanopi-neo4.dts50
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi27
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts790
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts12
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi21
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi77
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi314
-rw-r--r--arch/arm64/boot/dts/sprd/whale2.dtsi16
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts1
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts1
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts1
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts1
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts1
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts1
-rw-r--r--arch/arm64/configs/defconfig103
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-glue.c9
-rw-r--r--arch/arm64/crypto/aes-ce-glue.c5
-rw-r--r--arch/arm64/crypto/aes-glue.c6
-rw-r--r--arch/arm64/crypto/aes-neonbs-glue.c6
-rw-r--r--arch/arm64/crypto/chacha-neon-glue.c7
-rw-r--r--arch/arm64/crypto/crct10dif-ce-glue.c9
-rw-r--r--arch/arm64/crypto/ghash-ce-glue.c25
-rw-r--r--arch/arm64/crypto/nhpoly1305-neon-glue.c5
-rw-r--r--arch/arm64/crypto/sha1-ce-glue.c7
-rw-r--r--arch/arm64/crypto/sha2-ce-glue.c7
-rw-r--r--arch/arm64/crypto/sha256-glue.c9
-rw-r--r--arch/arm64/crypto/sha3-ce-glue.c5
-rw-r--r--arch/arm64/crypto/sha512-ce-glue.c7
-rw-r--r--arch/arm64/crypto/sm3-ce-glue.c7
-rw-r--r--arch/arm64/crypto/sm4-ce-glue.c5
-rw-r--r--arch/arm64/include/asm/Kbuild4
-rw-r--r--arch/arm64/include/asm/arch_timer.h119
-rw-r--r--arch/arm64/include/asm/assembler.h8
-rw-r--r--arch/arm64/include/asm/barrier.h24
-rw-r--r--arch/arm64/include/asm/boot.h2
-rw-r--r--arch/arm64/include/asm/brk-imm.h5
-rw-r--r--arch/arm64/include/asm/cpucaps.h3
-rw-r--r--arch/arm64/include/asm/cpufeature.h29
-rw-r--r--arch/arm64/include/asm/cputype.h2
-rw-r--r--arch/arm64/include/asm/debug-monitors.h25
-rw-r--r--arch/arm64/include/asm/elf.h6
-rw-r--r--arch/arm64/include/asm/esr.h7
-rw-r--r--arch/arm64/include/asm/fpsimd.h29
-rw-r--r--arch/arm64/include/asm/futex.h59
-rw-r--r--arch/arm64/include/asm/hugetlb.h4
-rw-r--r--arch/arm64/include/asm/hwcap.h60
-rw-r--r--arch/arm64/include/asm/insn.h8
-rw-r--r--arch/arm64/include/asm/io.h2
-rw-r--r--arch/arm64/include/asm/irqflags.h8
-rw-r--r--arch/arm64/include/asm/kprobes.h2
-rw-r--r--arch/arm64/include/asm/kvm_asm.h3
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h16
-rw-r--r--arch/arm64/include/asm/kvm_host.h101
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h5
-rw-r--r--arch/arm64/include/asm/kvm_ptrauth.h111
-rw-r--r--arch/arm64/include/asm/memory.h4
-rw-r--r--arch/arm64/include/asm/pgalloc.h12
-rw-r--r--arch/arm64/include/asm/pgtable.h5
-rw-r--r--arch/arm64/include/asm/pointer_auth.h2
-rw-r--r--arch/arm64/include/asm/processor.h8
-rw-r--r--arch/arm64/include/asm/ptrace.h22
-rw-r--r--arch/arm64/include/asm/sdei.h2
-rw-r--r--arch/arm64/include/asm/signal32.h2
-rw-r--r--arch/arm64/include/asm/stage2_pgtable.h4
-rw-r--r--arch/arm64/include/asm/syscall.h4
-rw-r--r--arch/arm64/include/asm/sysreg.h62
-rw-r--r--arch/arm64/include/asm/system_misc.h1
-rw-r--r--arch/arm64/include/asm/tlb.h6
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h12
-rw-r--r--arch/arm64/include/asm/vdso_datapage.h1
-rw-r--r--arch/arm64/include/asm/vmap_stack.h2
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h13
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h43
-rw-r--r--arch/arm64/kernel/Makefile11
-rw-r--r--arch/arm64/kernel/acpi_numa.c2
-rw-r--r--arch/arm64/kernel/asm-offsets.c9
-rw-r--r--arch/arm64/kernel/cpu_errata.c256
-rw-r--r--arch/arm64/kernel/cpu_ops.c1
-rw-r--r--arch/arm64/kernel/cpufeature.c195
-rw-r--r--arch/arm64/kernel/cpuinfo.c9
-rw-r--r--arch/arm64/kernel/debug-monitors.c115
-rw-r--r--arch/arm64/kernel/entry.S19
-rw-r--r--arch/arm64/kernel/fpsimd.c183
-rw-r--r--arch/arm64/kernel/head.S12
-rw-r--r--arch/arm64/kernel/insn.c40
-rw-r--r--arch/arm64/kernel/kgdb.c30
-rw-r--r--arch/arm64/kernel/kuser32.S66
-rw-r--r--arch/arm64/kernel/perf_event.c54
-rw-r--r--arch/arm64/kernel/probes/kprobes.c22
-rw-r--r--arch/arm64/kernel/probes/uprobes.c19
-rw-r--r--arch/arm64/kernel/signal.c5
-rw-r--r--arch/arm64/kernel/signal32.c3
-rw-r--r--arch/arm64/kernel/sigreturn32.S46
-rw-r--r--arch/arm64/kernel/smp.c4
-rw-r--r--arch/arm64/kernel/stacktrace.c4
-rw-r--r--arch/arm64/kernel/sys.c2
-rw-r--r--arch/arm64/kernel/traps.c33
-rw-r--r--arch/arm64/kernel/vdso.c139
-rw-r--r--arch/arm64/kernel/vdso/Makefile19
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S22
-rw-r--r--arch/arm64/kvm/Kconfig1
-rw-r--r--arch/arm64/kvm/Makefile2
-rw-r--r--arch/arm64/kvm/fpsimd.c17
-rw-r--r--arch/arm64/kvm/guest.c415
-rw-r--r--arch/arm64/kvm/handle_exit.c36
-rw-r--r--arch/arm64/kvm/hyp/entry.S15
-rw-r--r--arch/arm64/kvm/hyp/switch.c80
-rw-r--r--arch/arm64/kvm/pmu.c239
-rw-r--r--arch/arm64/kvm/reset.c167
-rw-r--r--arch/arm64/kvm/sys_regs.c183
-rw-r--r--arch/arm64/kvm/sys_regs.h25
-rw-r--r--arch/arm64/lib/Makefile2
-rw-r--r--arch/arm64/mm/fault.c16
-rw-r--r--arch/arm64/mm/init.c24
-rw-r--r--arch/arm64/mm/mmu.c55
-rw-r--r--arch/arm64/mm/numa.c25
-rw-r--r--arch/arm64/mm/proc.S34
-rw-r--r--arch/arm64/net/bpf_jit.h8
-rw-r--r--arch/arm64/net/bpf_jit_comp.c29
-rw-r--r--arch/c6x/Kconfig4
-rw-r--r--arch/c6x/include/asm/Kbuild2
-rw-r--r--arch/c6x/include/asm/syscall.h7
-rw-r--r--arch/c6x/include/asm/tlb.h2
-rw-r--r--arch/c6x/mm/init.c12
-rw-r--r--arch/csky/Kconfig11
-rw-r--r--arch/csky/Makefile2
-rw-r--r--arch/csky/abiv1/inc/abi/ckmmu.h24
-rw-r--r--arch/csky/abiv1/inc/abi/entry.h41
-rw-r--r--arch/csky/abiv1/inc/abi/regdef.h5
-rw-r--r--arch/csky/abiv2/cacheflush.c13
-rw-r--r--arch/csky/abiv2/inc/abi/ckmmu.h34
-rw-r--r--arch/csky/abiv2/inc/abi/entry.h87
-rw-r--r--arch/csky/abiv2/inc/abi/regdef.h5
-rw-r--r--arch/csky/abiv2/mcount.S39
-rw-r--r--arch/csky/abiv2/memmove.S6
l---------arch/csky/boot/dts/include/dt-bindings1
-rw-r--r--arch/csky/include/asm/Kbuild6
-rw-r--r--arch/csky/include/asm/ftrace.h18
-rw-r--r--arch/csky/include/asm/mmu_context.h17
-rw-r--r--arch/csky/include/asm/page.h39
-rw-r--r--arch/csky/include/asm/perf_event.h8
-rw-r--r--arch/csky/include/asm/ptrace.h41
-rw-r--r--arch/csky/include/asm/syscall.h11
-rw-r--r--arch/csky/include/asm/thread_info.h27
-rw-r--r--arch/csky/include/asm/unistd.h2
-rw-r--r--arch/csky/include/uapi/asm/perf_regs.h51
-rw-r--r--arch/csky/include/uapi/asm/ptrace.h15
-rw-r--r--arch/csky/kernel/Makefile2
-rw-r--r--arch/csky/kernel/atomic.S26
-rw-r--r--arch/csky/kernel/entry.S77
-rw-r--r--arch/csky/kernel/ftrace.c148
-rw-r--r--arch/csky/kernel/head.S60
-rw-r--r--arch/csky/kernel/perf_callchain.c119
-rw-r--r--arch/csky/kernel/perf_regs.c40
-rw-r--r--arch/csky/kernel/ptrace.c51
-rw-r--r--arch/csky/kernel/setup.c12
-rw-r--r--arch/csky/kernel/signal.c348
-rw-r--r--arch/csky/mm/fault.c15
-rw-r--r--arch/h8300/Kconfig4
-rw-r--r--arch/h8300/include/asm/Kbuild4
-rw-r--r--arch/h8300/include/asm/syscall.h6
-rw-r--r--arch/h8300/include/asm/tlb.h2
-rw-r--r--arch/h8300/include/asm/uaccess.h55
-rw-r--r--arch/h8300/kernel/setup.c1
-rw-r--r--arch/h8300/mm/init.c14
-rw-r--r--arch/hexagon/Kconfig7
-rw-r--r--arch/hexagon/include/asm/Kbuild4
-rw-r--r--arch/hexagon/include/asm/elf.h6
-rw-r--r--arch/hexagon/include/asm/io.h2
-rw-r--r--arch/hexagon/include/asm/syscall.h8
-rw-r--r--arch/hexagon/include/asm/tlb.h12
-rw-r--r--arch/hexagon/include/asm/uaccess.h1
-rw-r--r--arch/hexagon/mm/init.c10
-rw-r--r--arch/ia64/Kconfig5
-rw-r--r--arch/ia64/include/asm/io.h17
-rw-r--r--arch/ia64/include/asm/machvec.h13
-rw-r--r--arch/ia64/include/asm/machvec_sn2.h2
-rw-r--r--arch/ia64/include/asm/mmiowb.h25
-rw-r--r--arch/ia64/include/asm/rwsem.h172
-rw-r--r--arch/ia64/include/asm/segment.h6
-rw-r--r--arch/ia64/include/asm/spinlock.h2
-rw-r--r--arch/ia64/include/asm/syscall.h2
-rw-r--r--arch/ia64/include/asm/tlb.h259
-rw-r--r--arch/ia64/include/asm/tlbflush.h25
-rw-r--r--arch/ia64/include/uapi/asm/sockios.h21
-rw-r--r--arch/ia64/kernel/Makefile.gate2
-rw-r--r--arch/ia64/kernel/acpi.c14
-rw-r--r--arch/ia64/kernel/machvec.c4
-rw-r--r--arch/ia64/kernel/setup.c4
-rw-r--r--arch/ia64/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/ia64/mm/init.c17
-rw-r--r--arch/ia64/mm/tlb.c23
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c7
-rw-r--r--arch/m68k/Kconfig10
-rw-r--r--arch/m68k/amiga/cia.c9
-rw-r--r--arch/m68k/amiga/config.c49
-rw-r--r--arch/m68k/apollo/config.c7
-rw-r--r--arch/m68k/atari/ataints.c4
-rw-r--r--arch/m68k/atari/config.c2
-rw-r--r--arch/m68k/atari/time.c70
-rw-r--r--arch/m68k/bvme6000/config.c77
-rw-r--r--arch/m68k/configs/amcore_defconfig1
-rw-r--r--arch/m68k/configs/amiga_defconfig14
-rw-r--r--arch/m68k/configs/apollo_defconfig14
-rw-r--r--arch/m68k/configs/atari_defconfig14
-rw-r--r--arch/m68k/configs/bvme6000_defconfig14
-rw-r--r--arch/m68k/configs/hp300_defconfig14
-rw-r--r--arch/m68k/configs/m5475evb_defconfig1
-rw-r--r--arch/m68k/configs/mac_defconfig14
-rw-r--r--arch/m68k/configs/multi_defconfig14
-rw-r--r--arch/m68k/configs/mvme147_defconfig14
-rw-r--r--arch/m68k/configs/mvme16x_defconfig14
-rw-r--r--arch/m68k/configs/q40_defconfig14
-rw-r--r--arch/m68k/configs/stmark2_defconfig1
-rw-r--r--arch/m68k/configs/sun3_defconfig14
-rw-r--r--arch/m68k/configs/sun3x_defconfig14
-rw-r--r--arch/m68k/hp300/config.c1
-rw-r--r--arch/m68k/hp300/time.c73
-rw-r--r--arch/m68k/hp300/time.h1
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/io_mm.h2
-rw-r--r--arch/m68k/include/asm/mvme147hw.h2
-rw-r--r--arch/m68k/include/asm/syscall.h12
-rw-r--r--arch/m68k/include/asm/tlb.h14
-rw-r--r--arch/m68k/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/m68k/mac/config.c3
-rw-r--r--arch/m68k/mac/via.c146
-rw-r--r--arch/m68k/mm/init.c7
-rw-r--r--arch/m68k/mvme147/config.c73
-rw-r--r--arch/m68k/mvme16x/config.c97
-rw-r--r--arch/m68k/q40/config.c9
-rw-r--r--arch/m68k/q40/q40ints.c19
-rw-r--r--arch/m68k/sun3/config.c2
-rw-r--r--arch/m68k/sun3/intersil.c7
-rw-r--r--arch/m68k/sun3/sun3ints.c3
-rw-r--r--arch/m68k/sun3x/config.c1
-rw-r--r--arch/m68k/sun3x/time.c21
-rw-r--r--arch/m68k/sun3x/time.h1
-rw-r--r--arch/microblaze/Kconfig7
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/syscall.h2
-rw-r--r--arch/microblaze/include/asm/tlb.h9
-rw-r--r--arch/microblaze/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/microblaze/mm/init.c12
-rw-r--r--arch/microblaze/mm/pgtable.c2
-rw-r--r--arch/mips/Kconfig78
-rw-r--r--arch/mips/alchemy/common/clock.c2
-rw-r--r--arch/mips/alchemy/common/platform.c22
-rw-r--r--arch/mips/ath79/clock.c1
-rw-r--r--arch/mips/ath79/setup.c1
-rw-r--r--arch/mips/bcm47xx/Kconfig8
-rw-r--r--arch/mips/bcm63xx/boards/Kconfig2
-rw-r--r--arch/mips/configs/ar7_defconfig1
-rw-r--r--arch/mips/configs/bcm47xx_defconfig2
-rw-r--r--arch/mips/configs/ci20_defconfig2
-rw-r--r--arch/mips/configs/db1xxx_defconfig4
-rw-r--r--arch/mips/configs/decstation_defconfig1
-rw-r--r--arch/mips/configs/decstation_r4k_defconfig1
-rw-r--r--arch/mips/configs/generic/board-ni169445.config6
-rw-r--r--arch/mips/configs/generic/board-ocelot.config2
-rw-r--r--arch/mips/configs/generic_defconfig1
-rw-r--r--arch/mips/configs/ip22_defconfig2
-rw-r--r--arch/mips/configs/ip27_defconfig2
-rw-r--r--arch/mips/configs/loongson1b_defconfig3
-rw-r--r--arch/mips/configs/loongson1c_defconfig3
-rw-r--r--arch/mips/configs/qi_lb60_defconfig2
-rw-r--r--arch/mips/configs/rb532_defconfig3
-rw-r--r--arch/mips/configs/rbtx49xx_defconfig3
-rw-r--r--arch/mips/configs/xway_defconfig2
-rw-r--r--arch/mips/generic/init.c4
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/bitops.h4
-rw-r--r--arch/mips/include/asm/bootinfo.h1
-rw-r--r--arch/mips/include/asm/io.h3
-rw-r--r--arch/mips/include/asm/jump_label.h15
-rw-r--r--arch/mips/include/asm/mach-ip27/topology.h11
-rw-r--r--arch/mips/include/asm/mmiowb.h11
-rw-r--r--arch/mips/include/asm/pci/bridge.h14
-rw-r--r--arch/mips/include/asm/sn/irq_alloc.h11
-rw-r--r--arch/mips/include/asm/spinlock.h15
-rw-r--r--arch/mips/include/asm/syscall.h6
-rw-r--r--arch/mips/include/asm/tlb.h17
-rw-r--r--arch/mips/include/asm/uasm.h8
-rw-r--r--arch/mips/include/asm/xtalk/xtalk.h9
-rw-r--r--arch/mips/include/uapi/asm/inst.h6
-rw-r--r--arch/mips/include/uapi/asm/sockios.h4
-rw-r--r--arch/mips/kernel/cpu-bugs64.c4
-rw-r--r--arch/mips/kernel/cpu-probe.c8
-rw-r--r--arch/mips/kernel/entry.S5
-rw-r--r--arch/mips/kernel/jump_label.c30
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c21
-rw-r--r--arch/mips/kernel/prom.c18
-rw-r--r--arch/mips/kernel/ptrace.c2
-rw-r--r--arch/mips/kernel/setup.c129
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl6
-rw-r--r--arch/mips/kernel/syscalls/syscall_n64.tbl6
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl6
-rw-r--r--arch/mips/kernel/traps.c63
-rw-r--r--arch/mips/kvm/Kconfig1
-rw-r--r--arch/mips/kvm/emulate.c4
-rw-r--r--arch/mips/mm/gup.c11
-rw-r--r--arch/mips/mm/init.c8
-rw-r--r--arch/mips/mm/uasm-mips.c14
-rw-r--r--arch/mips/mm/uasm.c39
-rw-r--r--arch/mips/net/Makefile1
-rw-r--r--arch/mips/net/bpf_jit.c1270
-rw-r--r--arch/mips/net/bpf_jit_asm.S285
-rw-r--r--arch/mips/net/ebpf_jit.c196
-rw-r--r--arch/mips/pci/Makefile3
-rw-r--r--arch/mips/pci/ops-bridge.c302
-rw-r--r--arch/mips/pci/pci-ip27.c181
-rw-r--r--arch/mips/pci/pci-xtalk-bridge.c610
-rw-r--r--arch/mips/pic32/Kconfig8
-rw-r--r--arch/mips/pnx833x/Platform2
-rw-r--r--arch/mips/sgi-ip22/ip22-platform.c13
-rw-r--r--arch/mips/sgi-ip27/ip27-init.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c190
-rw-r--r--arch/mips/sgi-ip27/ip27-xtalk.c61
-rw-r--r--arch/mips/txx9/generic/setup.c1
-rw-r--r--arch/mips/vdso/Makefile4
-rw-r--r--arch/nds32/Kconfig19
-rw-r--r--arch/nds32/include/asm/Kbuild7
-rw-r--r--arch/nds32/include/asm/assembler.h2
-rw-r--r--arch/nds32/include/asm/barrier.h2
-rw-r--r--arch/nds32/include/asm/bitfield.h2
-rw-r--r--arch/nds32/include/asm/cache.h2
-rw-r--r--arch/nds32/include/asm/cache_info.h2
-rw-r--r--arch/nds32/include/asm/cacheflush.h2
-rw-r--r--arch/nds32/include/asm/current.h2
-rw-r--r--arch/nds32/include/asm/delay.h2
-rw-r--r--arch/nds32/include/asm/elf.h5
-rw-r--r--arch/nds32/include/asm/fixmap.h2
-rw-r--r--arch/nds32/include/asm/futex.h2
-rw-r--r--arch/nds32/include/asm/highmem.h2
-rw-r--r--arch/nds32/include/asm/io.h4
-rw-r--r--arch/nds32/include/asm/irqflags.h2
-rw-r--r--arch/nds32/include/asm/l2_cache.h2
-rw-r--r--arch/nds32/include/asm/linkage.h2
-rw-r--r--arch/nds32/include/asm/memory.h10
-rw-r--r--arch/nds32/include/asm/mmu.h2
-rw-r--r--arch/nds32/include/asm/mmu_context.h2
-rw-r--r--arch/nds32/include/asm/module.h2
-rw-r--r--arch/nds32/include/asm/nds32.h2
-rw-r--r--arch/nds32/include/asm/page.h2
-rw-r--r--arch/nds32/include/asm/pgalloc.h2
-rw-r--r--arch/nds32/include/asm/pgtable.h4
-rw-r--r--arch/nds32/include/asm/proc-fns.h2
-rw-r--r--arch/nds32/include/asm/processor.h2
-rw-r--r--arch/nds32/include/asm/ptrace.h2
-rw-r--r--arch/nds32/include/asm/shmparam.h2
-rw-r--r--arch/nds32/include/asm/string.h2
-rw-r--r--arch/nds32/include/asm/swab.h2
-rw-r--r--arch/nds32/include/asm/syscall.h11
-rw-r--r--arch/nds32/include/asm/syscalls.h2
-rw-r--r--arch/nds32/include/asm/thread_info.h4
-rw-r--r--arch/nds32/include/asm/tlb.h18
-rw-r--r--arch/nds32/include/asm/tlbflush.h3
-rw-r--r--arch/nds32/include/asm/uaccess.h2
-rw-r--r--arch/nds32/include/asm/unistd.h2
-rw-r--r--arch/nds32/include/asm/vdso.h2
-rw-r--r--arch/nds32/include/asm/vdso_datapage.h3
-rw-r--r--arch/nds32/include/asm/vdso_timer_info.h2
-rw-r--r--arch/nds32/include/uapi/asm/auxvec.h2
-rw-r--r--arch/nds32/include/uapi/asm/byteorder.h2
-rw-r--r--arch/nds32/include/uapi/asm/cachectl.h2
-rw-r--r--arch/nds32/include/uapi/asm/param.h2
-rw-r--r--arch/nds32/include/uapi/asm/ptrace.h2
-rw-r--r--arch/nds32/include/uapi/asm/sigcontext.h2
-rw-r--r--arch/nds32/include/uapi/asm/unistd.h2
-rw-r--r--arch/nds32/kernel/.gitignore1
-rw-r--r--arch/nds32/kernel/cacheinfo.c2
-rw-r--r--arch/nds32/kernel/ex-exit.S4
-rw-r--r--arch/nds32/kernel/ftrace.c1
-rw-r--r--arch/nds32/kernel/head.S2
-rw-r--r--arch/nds32/kernel/nds32_ksyms.c6
-rw-r--r--arch/nds32/kernel/vdso.c1
-rw-r--r--arch/nds32/kernel/vdso/.gitignore1
-rw-r--r--arch/nds32/kernel/vdso/Makefile14
-rw-r--r--arch/nds32/kernel/vdso/gettimeofday.c4
-rw-r--r--arch/nds32/mm/init.c14
-rw-r--r--arch/nios2/Kconfig5
-rw-r--r--arch/nios2/include/asm/Kbuild2
-rw-r--r--arch/nios2/include/asm/syscall.h6
-rw-r--r--arch/nios2/include/asm/tlb.h14
-rw-r--r--arch/nios2/mm/init.c12
-rw-r--r--arch/openrisc/Kconfig7
-rw-r--r--arch/openrisc/include/asm/Kbuild2
-rw-r--r--arch/openrisc/include/asm/syscall.h2
-rw-r--r--arch/openrisc/include/asm/tlb.h8
-rw-r--r--arch/openrisc/kernel/ptrace.c1
-rw-r--r--arch/openrisc/kernel/setup.c1
-rw-r--r--arch/openrisc/kernel/traps.c1
-rw-r--r--arch/openrisc/mm/init.c13
-rw-r--r--arch/openrisc/mm/tlb.c1
-rw-r--r--arch/parisc/Kconfig23
-rw-r--r--arch/parisc/boot/compressed/head.S6
-rw-r--r--arch/parisc/boot/compressed/misc.c31
-rw-r--r--arch/parisc/configs/generic-32bit_defconfig1
-rw-r--r--arch/parisc/include/asm/Kbuild3
-rw-r--r--arch/parisc/include/asm/assembly.h6
-rw-r--r--arch/parisc/include/asm/cache.h13
-rw-r--r--arch/parisc/include/asm/fixmap.h19
-rw-r--r--arch/parisc/include/asm/hardware.h2
-rw-r--r--arch/parisc/include/asm/io.h2
-rw-r--r--arch/parisc/include/asm/jump_label.h43
-rw-r--r--arch/parisc/include/asm/kgdb.h68
-rw-r--r--arch/parisc/include/asm/kprobes.h55
-rw-r--r--arch/parisc/include/asm/mmzone.h58
-rw-r--r--arch/parisc/include/asm/page.h4
-rw-r--r--arch/parisc/include/asm/patch.h11
-rw-r--r--arch/parisc/include/asm/pgalloc.h1
-rw-r--r--arch/parisc/include/asm/pgtable.h69
-rw-r--r--arch/parisc/include/asm/ptrace.h13
-rw-r--r--arch/parisc/include/asm/sparsemem.h14
-rw-r--r--arch/parisc/include/asm/spinlock.h4
-rw-r--r--arch/parisc/include/asm/syscall.h4
-rw-r--r--arch/parisc/include/asm/tlb.h18
-rw-r--r--arch/parisc/include/asm/tlbflush.h24
-rw-r--r--arch/parisc/include/uapi/asm/sockios.h14
-rw-r--r--arch/parisc/kernel/Makefile6
-rw-r--r--arch/parisc/kernel/cache.c29
-rw-r--r--arch/parisc/kernel/drivers.c27
-rw-r--r--arch/parisc/kernel/entry.S51
-rw-r--r--arch/parisc/kernel/firmware.c2
-rw-r--r--arch/parisc/kernel/ftrace.c1
-rw-r--r--arch/parisc/kernel/head.S19
-rw-r--r--arch/parisc/kernel/inventory.c15
-rw-r--r--arch/parisc/kernel/jump_label.c55
-rw-r--r--arch/parisc/kernel/kgdb.c209
-rw-r--r--arch/parisc/kernel/kprobes.c291
-rw-r--r--arch/parisc/kernel/pacache.S43
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c6
-rw-r--r--arch/parisc/kernel/patch.c77
-rw-r--r--arch/parisc/kernel/pci.c8
-rw-r--r--arch/parisc/kernel/perf_images.h4
-rw-r--r--arch/parisc/kernel/process.c3
-rw-r--r--arch/parisc/kernel/processor.c7
-rw-r--r--arch/parisc/kernel/ptrace.c35
-rw-r--r--arch/parisc/kernel/setup.c6
-rw-r--r--arch/parisc/kernel/stacktrace.c5
-rw-r--r--arch/parisc/kernel/sys_parisc.c3
-rw-r--r--arch/parisc/kernel/syscall.S22
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/parisc/kernel/time.c2
-rw-r--r--arch/parisc/kernel/traps.c31
-rw-r--r--arch/parisc/kernel/unwind.c2
-rw-r--r--arch/parisc/mm/Makefile2
-rw-r--r--arch/parisc/mm/fixmap.c41
-rw-r--r--arch/parisc/mm/hugetlbpage.c19
-rw-r--r--arch/parisc/mm/init.c200
-rw-r--r--arch/powerpc/Kconfig25
-rw-r--r--arch/powerpc/Kconfig.debug32
-rw-r--r--arch/powerpc/Makefile17
-rw-r--r--arch/powerpc/boot/addnote.c6
-rw-r--r--arch/powerpc/boot/dts/fsl/b4qds.dtsi1
-rw-r--r--arch/powerpc/configs/40x/kilauea_defconfig2
-rw-r--r--arch/powerpc/configs/40x/obs600_defconfig2
-rw-r--r--arch/powerpc/configs/44x/canyonlands_defconfig2
-rw-r--r--arch/powerpc/configs/44x/eiger_defconfig2
-rw-r--r--arch/powerpc/configs/44x/sequoia_defconfig2
-rw-r--r--arch/powerpc/configs/44x/warp_defconfig2
-rw-r--r--arch/powerpc/configs/83xx/mpc8313_rdb_defconfig2
-rw-r--r--arch/powerpc/configs/83xx/mpc8315_rdb_defconfig2
-rw-r--r--arch/powerpc/configs/85xx-hw.config2
-rw-r--r--arch/powerpc/configs/85xx/ge_imp3a_defconfig2
-rw-r--r--arch/powerpc/configs/85xx/socrates_defconfig2
-rw-r--r--arch/powerpc/configs/85xx/tqm8548_defconfig4
-rw-r--r--arch/powerpc/configs/85xx/xes_mpc85xx_defconfig2
-rw-r--r--arch/powerpc/configs/86xx-hw.config2
-rw-r--r--arch/powerpc/configs/mpc512x_defconfig2
-rw-r--r--arch/powerpc/configs/mpc83xx_defconfig2
-rw-r--r--arch/powerpc/configs/pasemi_defconfig2
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig2
-rw-r--r--arch/powerpc/configs/pseries_defconfig1
-rw-r--r--arch/powerpc/configs/skiroot_defconfig2
-rw-r--r--arch/powerpc/crypto/crc-vpmsum_test.c10
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_glue.c4
-rw-r--r--arch/powerpc/crypto/crct10dif-vpmsum_glue.c4
-rw-r--r--arch/powerpc/include/asm/Kbuild2
-rw-r--r--arch/powerpc/include/asm/book3s/32/kup.h145
-rw-r--r--arch/powerpc/include/asm/book3s/32/mmu-hash.h9
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgalloc.h41
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h13
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h23
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-64k.h21
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h97
-rw-r--r--arch/powerpc/include/asm/book3s/64/hugetlb.h77
-rw-r--r--arch/powerpc/include/asm/book3s/64/kup-radix.h108
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h70
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h104
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h52
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h12
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix-4k.h9
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix-64k.h8
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h40
-rw-r--r--arch/powerpc/include/asm/book3s/64/slice.h13
-rw-r--r--arch/powerpc/include/asm/cpuidle.h19
-rw-r--r--arch/powerpc/include/asm/drmem.h21
-rw-r--r--arch/powerpc/include/asm/exception-64s.h2
-rw-r--r--arch/powerpc/include/asm/fadump.h1
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h3
-rw-r--r--arch/powerpc/include/asm/fixmap.h5
-rw-r--r--arch/powerpc/include/asm/futex.h4
-rw-r--r--arch/powerpc/include/asm/hugetlb.h87
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h8
-rw-r--r--arch/powerpc/include/asm/imc-pmu.h39
-rw-r--r--arch/powerpc/include/asm/io.h33
-rw-r--r--arch/powerpc/include/asm/kasan.h40
-rw-r--r--arch/powerpc/include/asm/kup.h73
-rw-r--r--arch/powerpc/include/asm/kvm_host.h11
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h41
-rw-r--r--arch/powerpc/include/asm/livepatch.h5
-rw-r--r--arch/powerpc/include/asm/mce.h97
-rw-r--r--arch/powerpc/include/asm/mmiowb.h18
-rw-r--r--arch/powerpc/include/asm/mmu.h28
-rw-r--r--arch/powerpc/include/asm/mmu_context.h8
-rw-r--r--arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h44
-rw-r--r--arch/powerpc/include/asm/nohash/32/kup-8xx.h58
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-8xx.h102
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu.h25
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgalloc.h123
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h13
-rw-r--r--arch/powerpc/include/asm/nohash/32/slice.h2
-rw-r--r--arch/powerpc/include/asm/nohash/64/mmu.h12
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgalloc.h117
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h12
-rw-r--r--arch/powerpc/include/asm/nohash/64/slice.h12
-rw-r--r--arch/powerpc/include/asm/nohash/hugetlb-book3e.h45
-rw-r--r--arch/powerpc/include/asm/nohash/mmu-book3e.h2
-rw-r--r--arch/powerpc/include/asm/nohash/mmu.h16
-rw-r--r--arch/powerpc/include/asm/nohash/pgalloc.h56
-rw-r--r--arch/powerpc/include/asm/nohash/pte-book3e.h5
-rw-r--r--arch/powerpc/include/asm/opal-api.h18
-rw-r--r--arch/powerpc/include/asm/opal.h9
-rw-r--r--arch/powerpc/include/asm/paca.h46
-rw-r--r--arch/powerpc/include/asm/page.h23
-rw-r--r--arch/powerpc/include/asm/pgalloc.h51
-rw-r--r--arch/powerpc/include/asm/pgtable-be-types.h9
-rw-r--r--arch/powerpc/include/asm/pgtable-types.h9
-rw-r--r--arch/powerpc/include/asm/pgtable.h9
-rw-r--r--arch/powerpc/include/asm/processor.h12
-rw-r--r--arch/powerpc/include/asm/ptrace.h11
-rw-r--r--arch/powerpc/include/asm/reg.h8
-rw-r--r--arch/powerpc/include/asm/reg_booke.h2
-rw-r--r--arch/powerpc/include/asm/slice.h9
-rw-r--r--arch/powerpc/include/asm/sparsemem.h4
-rw-r--r--arch/powerpc/include/asm/spinlock.h17
-rw-r--r--arch/powerpc/include/asm/string.h32
-rw-r--r--arch/powerpc/include/asm/syscall.h10
-rw-r--r--arch/powerpc/include/asm/task_size_64.h2
-rw-r--r--arch/powerpc/include/asm/time.h2
-rw-r--r--arch/powerpc/include/asm/tlb.h18
-rw-r--r--arch/powerpc/include/asm/trace.h16
-rw-r--r--arch/powerpc/include/asm/uaccess.h38
-rw-r--r--arch/powerpc/include/asm/xive.h17
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h46
-rw-r--r--arch/powerpc/kernel/Makefile14
-rw-r--r--arch/powerpc/kernel/asm-offsets.c25
-rw-r--r--arch/powerpc/kernel/cacheinfo.c12
-rw-r--r--arch/powerpc/kernel/cputable.c13
-rw-r--r--arch/powerpc/kernel/dbell.c3
-rw-r--r--arch/powerpc/kernel/early_32.c36
-rw-r--r--arch/powerpc/kernel/entry_32.S186
-rw-r--r--arch/powerpc/kernel/entry_64.S35
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S26
-rw-r--r--arch/powerpc/kernel/fadump.c1
-rw-r--r--arch/powerpc/kernel/fpu.S1
-rw-r--r--arch/powerpc/kernel/head_32.S258
-rw-r--r--arch/powerpc/kernel/head_32.h203
-rw-r--r--arch/powerpc/kernel/head_40x.S155
-rw-r--r--arch/powerpc/kernel/head_44x.S12
-rw-r--r--arch/powerpc/kernel/head_64.S4
-rw-r--r--arch/powerpc/kernel/head_8xx.S136
-rw-r--r--arch/powerpc/kernel/head_booke.h131
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S32
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c62
-rw-r--r--arch/powerpc/kernel/idle_book3s.S1060
-rw-r--r--arch/powerpc/kernel/irq.c16
-rw-r--r--arch/powerpc/kernel/mce.c106
-rw-r--r--arch/powerpc/kernel/mce_power.c253
-rw-r--r--arch/powerpc/kernel/paca.c12
-rw-r--r--arch/powerpc/kernel/process.c35
-rw-r--r--arch/powerpc/kernel/prom_init.c254
-rw-r--r--arch/powerpc/kernel/prom_init_check.sh12
-rw-r--r--arch/powerpc/kernel/ptrace.c3
-rw-r--r--arch/powerpc/kernel/security.c14
-rw-r--r--arch/powerpc/kernel/setup-common.c116
-rw-r--r--arch/powerpc/kernel/setup_32.c28
-rw-r--r--arch/powerpc/kernel/setup_64.c12
-rw-r--r--arch/powerpc/kernel/signal_64.c27
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/powerpc/kernel/sysfs.c8
-rw-r--r--arch/powerpc/kernel/time.c10
-rw-r--r--arch/powerpc/kernel/traps.c10
-rw-r--r--arch/powerpc/kernel/vdso32/Makefile5
-rw-r--r--arch/powerpc/kernel/vdso64/Makefile5
-rw-r--r--arch/powerpc/kernel/vector.S1
-rw-r--r--arch/powerpc/kernel/watchdog.c81
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/kvm/Makefile2
-rw-r--r--arch/powerpc/kvm/book3s.c42
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c4
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c102
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c105
-rw-r--r--arch/powerpc/kvm/book3s_hv.c159
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c57
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c144
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S227
-rw-r--r--arch/powerpc/kvm/book3s_xive.c250
-rw-r--r--arch/powerpc/kvm/book3s_xive.h37
-rw-r--r--arch/powerpc/kvm/book3s_xive_native.c1249
-rw-r--r--arch/powerpc/kvm/book3s_xive_template.c78
-rw-r--r--arch/powerpc/kvm/e500_mmu.c2
-rw-r--r--arch/powerpc/kvm/powerpc.c40
-rw-r--r--arch/powerpc/lib/Makefile19
-rw-r--r--arch/powerpc/lib/checksum_wrappers.c4
-rw-r--r--arch/powerpc/lib/code-patching.c5
-rw-r--r--arch/powerpc/lib/copy_32.S12
-rw-r--r--arch/powerpc/lib/mem_64.S9
-rw-r--r--arch/powerpc/lib/memcpy_64.S4
-rw-r--r--arch/powerpc/mm/Makefile47
-rw-r--r--arch/powerpc/mm/book3s32/Makefile9
-rw-r--r--arch/powerpc/mm/book3s32/hash_low.S (renamed from arch/powerpc/mm/hash_low_32.S)9
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c (renamed from arch/powerpc/mm/ppc_mmu_32.c)94
-rw-r--r--arch/powerpc/mm/book3s32/mmu_context.c (renamed from arch/powerpc/mm/mmu_context_hash32.c)0
-rw-r--r--arch/powerpc/mm/book3s32/tlb.c (renamed from arch/powerpc/mm/tlb_hash32.c)2
-rw-r--r--arch/powerpc/mm/book3s64/Makefile24
-rw-r--r--arch/powerpc/mm/book3s64/hash_4k.c (renamed from arch/powerpc/mm/hash64_4k.c)2
-rw-r--r--arch/powerpc/mm/book3s64/hash_64k.c (renamed from arch/powerpc/mm/hash64_64k.c)2
-rw-r--r--arch/powerpc/mm/book3s64/hash_hugepage.c (renamed from arch/powerpc/mm/hugepage-hash64.c)2
-rw-r--r--arch/powerpc/mm/book3s64/hash_hugetlbpage.c (renamed from arch/powerpc/mm/hugetlbpage-hash64.c)31
-rw-r--r--arch/powerpc/mm/book3s64/hash_native.c (renamed from arch/powerpc/mm/hash_native_64.c)0
-rw-r--r--arch/powerpc/mm/book3s64/hash_pgtable.c (renamed from arch/powerpc/mm/pgtable-hash64.c)15
-rw-r--r--arch/powerpc/mm/book3s64/hash_tlb.c (renamed from arch/powerpc/mm/tlb_hash64.c)18
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c (renamed from arch/powerpc/mm/hash_utils_64.c)145
-rw-r--r--arch/powerpc/mm/book3s64/iommu_api.c (renamed from arch/powerpc/mm/mmu_context_iommu.c)5
-rw-r--r--arch/powerpc/mm/book3s64/mmu_context.c (renamed from arch/powerpc/mm/mmu_context_book3s64.c)29
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c (renamed from arch/powerpc/mm/pgtable-book3s64.c)2
-rw-r--r--arch/powerpc/mm/book3s64/pkeys.c (renamed from arch/powerpc/mm/pkeys.c)1
-rw-r--r--arch/powerpc/mm/book3s64/radix_hugetlbpage.c (renamed from arch/powerpc/mm/hugetlbpage-radix.c)0
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c (renamed from arch/powerpc/mm/pgtable-radix.c)117
-rw-r--r--arch/powerpc/mm/book3s64/radix_tlb.c (renamed from arch/powerpc/mm/tlb-radix.c)10
-rw-r--r--arch/powerpc/mm/book3s64/slb.c (renamed from arch/powerpc/mm/slb.c)31
-rw-r--r--arch/powerpc/mm/book3s64/subpage_prot.c (renamed from arch/powerpc/mm/subpage-prot.c)39
-rw-r--r--arch/powerpc/mm/book3s64/vphn.c (renamed from arch/powerpc/mm/vphn.c)6
-rw-r--r--arch/powerpc/mm/book3s64/vphn.h (renamed from arch/powerpc/mm/vphn.h)3
-rw-r--r--arch/powerpc/mm/copro_fault.c18
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c2
-rw-r--r--arch/powerpc/mm/drmem.c6
-rw-r--r--arch/powerpc/mm/fault.c49
-rw-r--r--arch/powerpc/mm/highmem.c14
-rw-r--r--arch/powerpc/mm/hugetlbpage.c242
-rw-r--r--arch/powerpc/mm/init-common.c26
-rw-r--r--arch/powerpc/mm/init_32.c8
-rw-r--r--arch/powerpc/mm/init_64.c2
-rw-r--r--arch/powerpc/mm/kasan/Makefile5
-rw-r--r--arch/powerpc/mm/kasan/kasan_init_32.c183
-rw-r--r--arch/powerpc/mm/mem.c33
-rw-r--r--arch/powerpc/mm/mmu_context.c2
-rw-r--r--arch/powerpc/mm/mmu_decl.h9
-rw-r--r--arch/powerpc/mm/nohash/40x.c (renamed from arch/powerpc/mm/40x_mmu.c)2
-rw-r--r--arch/powerpc/mm/nohash/44x.c (renamed from arch/powerpc/mm/44x_mmu.c)2
-rw-r--r--arch/powerpc/mm/nohash/8xx.c (renamed from arch/powerpc/mm/8xx_mmu.c)26
-rw-r--r--arch/powerpc/mm/nohash/Makefile18
-rw-r--r--arch/powerpc/mm/nohash/book3e_hugetlbpage.c (renamed from arch/powerpc/mm/hugetlbpage-book3e.c)52
-rw-r--r--arch/powerpc/mm/nohash/book3e_pgtable.c (renamed from arch/powerpc/mm/pgtable-book3e.c)9
-rw-r--r--arch/powerpc/mm/nohash/fsl_booke.c (renamed from arch/powerpc/mm/fsl_booke_mmu.c)2
-rw-r--r--arch/powerpc/mm/nohash/mmu_context.c (renamed from arch/powerpc/mm/mmu_context_nohash.c)2
-rw-r--r--arch/powerpc/mm/nohash/tlb.c (renamed from arch/powerpc/mm/tlb_nohash.c)19
-rw-r--r--arch/powerpc/mm/nohash/tlb_low.S (renamed from arch/powerpc/mm/tlb_nohash_low.S)0
-rw-r--r--arch/powerpc/mm/nohash/tlb_low_64e.S (renamed from arch/powerpc/mm/tlb_low_64e.S)31
-rw-r--r--arch/powerpc/mm/numa.c35
-rw-r--r--arch/powerpc/mm/pgtable.c114
-rw-r--r--arch/powerpc/mm/pgtable_32.c47
-rw-r--r--arch/powerpc/mm/pgtable_64.c13
-rw-r--r--arch/powerpc/mm/ptdump/hashpagetable.c2
-rw-r--r--arch/powerpc/mm/ptdump/ptdump.c86
-rw-r--r--arch/powerpc/mm/slice.c109
-rw-r--r--arch/powerpc/perf/Makefile3
-rw-r--r--arch/powerpc/perf/core-book3s.c28
-rw-r--r--arch/powerpc/perf/generic-compat-pmu.c234
-rw-r--r--arch/powerpc/perf/imc-pmu.c347
-rw-r--r--arch/powerpc/perf/internal.h12
-rw-r--r--arch/powerpc/perf/power5+-pmu.c4
-rw-r--r--arch/powerpc/perf/power5-pmu.c4
-rw-r--r--arch/powerpc/perf/power6-pmu.c4
-rw-r--r--arch/powerpc/perf/power7-pmu.c4
-rw-r--r--arch/powerpc/perf/power8-pmu.c3
-rw-r--r--arch/powerpc/perf/power9-events-list.h2
-rw-r--r--arch/powerpc/perf/power9-pmu.c3
-rw-r--r--arch/powerpc/perf/ppc970-pmu.c4
-rw-r--r--arch/powerpc/platforms/512x/clock-commonclk.c9
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c2
-rw-r--r--arch/powerpc/platforms/83xx/usb.c4
-rw-r--r--arch/powerpc/platforms/8xx/pic.c3
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype47
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c10
-rw-r--r--arch/powerpc/platforms/embedded6xx/holly.c19
-rw-r--r--arch/powerpc/platforms/powermac/Makefile6
-rw-r--r--arch/powerpc/platforms/powernv/idle.c902
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c14
-rw-r--r--arch/powerpc/platforms/powernv/opal-call.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-imc.c5
-rw-r--r--arch/powerpc/platforms/powernv/opal.c23
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c35
-rw-r--r--arch/powerpc/platforms/powernv/pci.h2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c5
-rw-r--r--arch/powerpc/platforms/powernv/subcore.c2
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c17
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c13
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c3
-rw-r--r--arch/powerpc/platforms/pseries/pmem.c3
-rw-r--r--arch/powerpc/platforms/pseries/ras.c135
-rw-r--r--arch/powerpc/purgatory/Makefile3
-rw-r--r--arch/powerpc/sysdev/tsi108_dev.c5
-rw-r--r--arch/powerpc/sysdev/xive/native.c110
-rw-r--r--arch/powerpc/xmon/Makefile1
-rw-r--r--arch/powerpc/xmon/xmon.c71
-rw-r--r--arch/riscv/Kconfig10
-rw-r--r--arch/riscv/Makefile5
-rw-r--r--arch/riscv/include/asm/Kbuild5
-rw-r--r--arch/riscv/include/asm/bug.h35
-rw-r--r--arch/riscv/include/asm/cacheflush.h2
-rw-r--r--arch/riscv/include/asm/csr.h123
-rw-r--r--arch/riscv/include/asm/elf.h6
-rw-r--r--arch/riscv/include/asm/futex.h13
-rw-r--r--arch/riscv/include/asm/io.h15
-rw-r--r--arch/riscv/include/asm/irqflags.h10
-rw-r--r--arch/riscv/include/asm/mmiowb.h14
-rw-r--r--arch/riscv/include/asm/mmu_context.h59
-rw-r--r--arch/riscv/include/asm/ptrace.h21
-rw-r--r--arch/riscv/include/asm/sbi.h19
-rw-r--r--arch/riscv/include/asm/sifive_l2_cache.h16
-rw-r--r--arch/riscv/include/asm/syscall.h2
-rw-r--r--arch/riscv/include/asm/thread_info.h4
-rw-r--r--arch/riscv/include/asm/tlb.h1
-rw-r--r--arch/riscv/include/asm/uaccess.h28
-rw-r--r--arch/riscv/kernel/asm-offsets.c3
-rw-r--r--arch/riscv/kernel/cpu.c3
-rw-r--r--arch/riscv/kernel/entry.S22
-rw-r--r--arch/riscv/kernel/head.S33
-rw-r--r--arch/riscv/kernel/irq.c19
-rw-r--r--arch/riscv/kernel/perf_event.c4
-rw-r--r--arch/riscv/kernel/reset.c15
-rw-r--r--arch/riscv/kernel/setup.c6
-rw-r--r--arch/riscv/kernel/signal.c6
-rw-r--r--arch/riscv/kernel/smp.c61
-rw-r--r--arch/riscv/kernel/smpboot.c22
-rw-r--r--arch/riscv/kernel/stacktrace.c16
-rw-r--r--arch/riscv/kernel/traps.c30
-rw-r--r--arch/riscv/kernel/vdso/Makefile2
-rw-r--r--arch/riscv/mm/Makefile2
-rw-r--r--arch/riscv/mm/cacheflush.c61
-rw-r--r--arch/riscv/mm/context.c69
-rw-r--r--arch/riscv/mm/fault.c9
-rw-r--r--arch/riscv/mm/init.c5
-rw-r--r--arch/riscv/mm/sifive_l2_cache.c175
-rw-r--r--arch/s390/Kconfig67
-rw-r--r--arch/s390/Makefile12
-rw-r--r--arch/s390/boot/Makefile30
-rw-r--r--arch/s390/boot/als.c2
-rw-r--r--arch/s390/boot/boot.h5
-rw-r--r--arch/s390/boot/compressed/decompressor.h5
-rw-r--r--arch/s390/boot/compressed/vmlinux.lds.S24
-rw-r--r--arch/s390/boot/head.S48
-rw-r--r--arch/s390/boot/ipl_parm.c54
-rw-r--r--arch/s390/boot/ipl_report.c165
-rw-r--r--arch/s390/boot/kaslr.c144
-rw-r--r--arch/s390/boot/machine_kexec_reloc.c2
-rw-r--r--arch/s390/boot/startup.c121
-rw-r--r--arch/s390/boot/text_dma.S184
-rw-r--r--arch/s390/boot/uv.c24
-rw-r--r--arch/s390/configs/debug_defconfig1
-rw-r--r--arch/s390/configs/defconfig (renamed from arch/s390/defconfig)1
-rw-r--r--arch/s390/configs/performance_defconfig1
-rw-r--r--arch/s390/crypto/crc32be-vx.S1
-rw-r--r--arch/s390/crypto/crc32le-vx.S6
-rw-r--r--arch/s390/crypto/des_s390.c21
-rw-r--r--arch/s390/crypto/prng.c135
-rw-r--r--arch/s390/hypfs/hypfs_diag0c.c18
-rw-r--r--arch/s390/include/asm/Kbuild2
-rw-r--r--arch/s390/include/asm/airq.h12
-rw-r--r--arch/s390/include/asm/bitops.h12
-rw-r--r--arch/s390/include/asm/boot_data.h11
-rw-r--r--arch/s390/include/asm/bug.h24
-rw-r--r--arch/s390/include/asm/cpacf.h3
-rw-r--r--arch/s390/include/asm/diag.h13
-rw-r--r--arch/s390/include/asm/ebcdic.h2
-rw-r--r--arch/s390/include/asm/elf.h4
-rw-r--r--arch/s390/include/asm/extable.h5
-rw-r--r--arch/s390/include/asm/ftrace.h7
-rw-r--r--arch/s390/include/asm/hugetlb.h8
-rw-r--r--arch/s390/include/asm/io.h17
-rw-r--r--arch/s390/include/asm/ipl.h132
-rw-r--r--arch/s390/include/asm/irq.h9
-rw-r--r--arch/s390/include/asm/kexec.h26
-rw-r--r--arch/s390/include/asm/kvm_host.h2
-rw-r--r--arch/s390/include/asm/linkage.h7
-rw-r--r--arch/s390/include/asm/livepatch.h5
-rw-r--r--arch/s390/include/asm/lowcore.h2
-rw-r--r--arch/s390/include/asm/nospec-insn.h10
-rw-r--r--arch/s390/include/asm/pci.h12
-rw-r--r--arch/s390/include/asm/pci_clp.h20
-rw-r--r--arch/s390/include/asm/pci_insn.h97
-rw-r--r--arch/s390/include/asm/pci_io.h49
-rw-r--r--arch/s390/include/asm/pgtable.h112
-rw-r--r--arch/s390/include/asm/processor.h82
-rw-r--r--arch/s390/include/asm/sclp.h3
-rw-r--r--arch/s390/include/asm/sections.h22
-rw-r--r--arch/s390/include/asm/segment.h5
-rw-r--r--arch/s390/include/asm/setup.h21
-rw-r--r--arch/s390/include/asm/stacktrace.h114
-rw-r--r--arch/s390/include/asm/syscall.h13
-rw-r--r--arch/s390/include/asm/syscall_wrapper.h4
-rw-r--r--arch/s390/include/asm/tlb.h130
-rw-r--r--arch/s390/include/asm/uaccess.h2
-rw-r--r--arch/s390/include/asm/unwind.h101
-rw-r--r--arch/s390/include/asm/uv.h132
-rw-r--r--arch/s390/include/asm/vmlinux.lds.h13
-rw-r--r--arch/s390/include/uapi/asm/ipl.h154
-rw-r--r--arch/s390/include/uapi/asm/kvm.h5
-rw-r--r--arch/s390/kernel/Makefile7
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/base.S71
-rw-r--r--arch/s390/kernel/diag.c67
-rw-r--r--arch/s390/kernel/dumpstack.c167
-rw-r--r--arch/s390/kernel/early.c9
-rw-r--r--arch/s390/kernel/early_nobss.c2
-rw-r--r--arch/s390/kernel/entry.S42
-rw-r--r--arch/s390/kernel/entry.h2
-rw-r--r--arch/s390/kernel/ftrace.c9
-rw-r--r--arch/s390/kernel/head64.S26
-rw-r--r--arch/s390/kernel/ima_arch.c14
-rw-r--r--arch/s390/kernel/ipl.c370
-rw-r--r--arch/s390/kernel/ipl_vmparm.c8
-rw-r--r--arch/s390/kernel/irq.c49
-rw-r--r--arch/s390/kernel/kexec_elf.c63
-rw-r--r--arch/s390/kernel/kexec_image.c49
-rw-r--r--arch/s390/kernel/kprobes.c37
-rw-r--r--arch/s390/kernel/machine_kexec.c8
-rw-r--r--arch/s390/kernel/machine_kexec_file.c268
-rw-r--r--arch/s390/kernel/machine_kexec_reloc.c53
-rw-r--r--arch/s390/kernel/mcount.S12
-rw-r--r--arch/s390/kernel/nmi.c2
-rw-r--r--arch/s390/kernel/nospec-branch.c9
-rw-r--r--arch/s390/kernel/nospec-sysfs.c2
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c15
-rw-r--r--arch/s390/kernel/perf_cpum_cf_diag.c9
-rw-r--r--arch/s390/kernel/perf_cpum_cf_events.c107
-rw-r--r--arch/s390/kernel/perf_event.c16
-rw-r--r--arch/s390/kernel/pgm_check.S2
-rw-r--r--arch/s390/kernel/process.c1
-rw-r--r--arch/s390/kernel/processor.c3
-rw-r--r--arch/s390/kernel/ptrace.c1
-rw-r--r--arch/s390/kernel/reipl.S1
-rw-r--r--arch/s390/kernel/relocate_kernel.S4
-rw-r--r--arch/s390/kernel/setup.c71
-rw-r--r--arch/s390/kernel/smp.c3
-rw-r--r--arch/s390/kernel/stacktrace.c81
-rw-r--r--arch/s390/kernel/swsusp.S17
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/s390/kernel/traps.c3
-rw-r--r--arch/s390/kernel/unwind_bc.c155
-rw-r--r--arch/s390/kernel/vdso.c10
-rw-r--r--arch/s390/kernel/vdso32/Makefile2
-rw-r--r--arch/s390/kernel/vdso64/Makefile2
-rw-r--r--arch/s390/kernel/vmlinux.lds.S19
-rw-r--r--arch/s390/kvm/Kconfig2
-rw-r--r--arch/s390/kvm/interrupt.c15
-rw-r--r--arch/s390/kvm/kvm-s390.c120
-rw-r--r--arch/s390/kvm/vsie.c13
-rw-r--r--arch/s390/lib/mem.S1
-rw-r--r--arch/s390/mm/Makefile2
-rw-r--r--arch/s390/mm/fault.c14
-rw-r--r--arch/s390/mm/gup.c300
-rw-r--r--arch/s390/mm/init.c22
-rw-r--r--arch/s390/mm/kasan_init.c2
-rw-r--r--arch/s390/mm/maccess.c1
-rw-r--r--arch/s390/mm/pgalloc.c63
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/s390/mm/vmem.c2
-rw-r--r--arch/s390/net/bpf_jit_comp.c6
-rw-r--r--arch/s390/oprofile/init.c22
-rw-r--r--arch/s390/pci/Makefile2
-rw-r--r--arch/s390/pci/pci.c366
-rw-r--r--arch/s390/pci/pci_clp.c25
-rw-r--r--arch/s390/pci/pci_insn.c169
-rw-r--r--arch/s390/pci/pci_irq.c486
-rw-r--r--arch/s390/purgatory/Makefile20
-rw-r--r--arch/s390/purgatory/kexec-purgatory.S14
-rw-r--r--arch/s390/purgatory/purgatory.lds.S54
-rw-r--r--arch/s390/scripts/Makefile.chkbss3
-rw-r--r--arch/s390/tools/gen_facilities.c3
-rw-r--r--arch/s390/tools/opcodes.txt11
-rw-r--r--arch/sh/Kconfig8
-rw-r--r--arch/sh/Makefile4
-rw-r--r--arch/sh/boards/board-apsh4a3a.c2
-rw-r--r--arch/sh/boards/board-apsh4ad0a.c2
-rw-r--r--arch/sh/boards/board-edosk7705.c2
-rw-r--r--arch/sh/boards/board-edosk7760.c2
-rw-r--r--arch/sh/boards/board-espt.c2
-rw-r--r--arch/sh/boards/board-urquell.c2
-rw-r--r--arch/sh/boards/mach-dreamcast/irq.c1
-rw-r--r--arch/sh/boards/mach-microdev/setup.c2
-rw-r--r--arch/sh/boards/mach-sdk7786/fpga.c2
-rw-r--r--arch/sh/boards/mach-sdk7786/setup.c2
-rw-r--r--arch/sh/boards/mach-sdk7786/sram.c2
-rw-r--r--arch/sh/boards/mach-se/7343/irq.c2
-rw-r--r--arch/sh/boards/mach-se/7722/irq.c2
-rw-r--r--arch/sh/boot/.gitignore1
-rw-r--r--arch/sh/configs/ap325rxa_defconfig2
-rw-r--r--arch/sh/configs/apsh4ad0a_defconfig1
-rw-r--r--arch/sh/configs/ecovec24-romimage_defconfig1
-rw-r--r--arch/sh/configs/ecovec24_defconfig2
-rw-r--r--arch/sh/configs/migor_defconfig2
-rw-r--r--arch/sh/configs/rsk7264_defconfig1
-rw-r--r--arch/sh/configs/rsk7269_defconfig1
-rw-r--r--arch/sh/configs/sdk7786_defconfig2
-rw-r--r--arch/sh/configs/se7724_defconfig2
-rw-r--r--arch/sh/configs/sh7785lcr_32bit_defconfig1
-rw-r--r--arch/sh/configs/titan_defconfig2
-rw-r--r--arch/sh/drivers/pci/pci-sh7751.c2
-rw-r--r--arch/sh/drivers/pci/pci-sh7780.c2
-rw-r--r--arch/sh/drivers/pci/pcie-sh7786.c2
-rw-r--r--arch/sh/include/asm/Kbuild2
-rw-r--r--arch/sh/include/asm/io.h3
-rw-r--r--arch/sh/include/asm/mmiowb.h12
-rw-r--r--arch/sh/include/asm/pgalloc.h9
-rw-r--r--arch/sh/include/asm/spinlock-llsc.h2
-rw-r--r--arch/sh/include/asm/syscall_32.h2
-rw-r--r--arch/sh/include/asm/syscall_64.h2
-rw-r--r--arch/sh/include/asm/tlb.h132
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7786.h2
-rw-r--r--arch/sh/include/uapi/asm/sockios.h5
-rw-r--r--arch/sh/kernel/stacktrace.c4
-rw-r--r--arch/sh/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/sh/kernel/vsyscall/Makefile3
-rw-r--r--arch/sh/mm/gup.c11
-rw-r--r--arch/sh/mm/init.c31
-rw-r--r--arch/sh/mm/pmb.c2
-rw-r--r--arch/sh/mm/uncached.c2
-rw-r--r--arch/sparc/Kconfig10
-rw-r--r--arch/sparc/crypto/des_glue.c11
-rw-r--r--arch/sparc/include/asm/Kbuild2
-rw-r--r--arch/sparc/include/asm/io_64.h2
-rw-r--r--arch/sparc/include/asm/pgtable_64.h30
-rw-r--r--arch/sparc/include/asm/syscall.h5
-rw-r--r--arch/sparc/include/asm/tlb_32.h18
-rw-r--r--arch/sparc/include/uapi/asm/sockios.h15
-rw-r--r--arch/sparc/kernel/cpumap.c3
-rw-r--r--arch/sparc/kernel/ds.c2
-rw-r--r--arch/sparc/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/sparc/kernel/time_64.c28
-rw-r--r--arch/sparc/kernel/uprobes.c1
-rw-r--r--arch/sparc/mm/gup.c9
-rw-r--r--arch/sparc/mm/init_32.c13
-rw-r--r--arch/sparc/mm/init_64.c50
-rw-r--r--arch/sparc/mm/iommu.c142
-rw-r--r--arch/sparc/vdso/Makefile2
-rw-r--r--arch/um/Kconfig58
-rw-r--r--arch/um/drivers/Kconfig352
-rw-r--r--arch/um/drivers/harddog_kern.c2
-rw-r--r--arch/um/drivers/ubd_kern.c4
-rw-r--r--arch/um/drivers/vector_kern.c2
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/um/include/asm/mmu_context.h1
-rw-r--r--arch/um/include/asm/pgtable.h7
-rw-r--r--arch/um/include/asm/tlb.h158
-rw-r--r--arch/um/kernel/irq.c2
-rw-r--r--arch/um/kernel/mem.c7
-rw-r--r--arch/um/kernel/skas/uaccess.c1
-rw-r--r--arch/um/kernel/stacktrace.c2
-rw-r--r--arch/um/kernel/sysrq.c2
-rw-r--r--arch/um/kernel/time.c2
-rw-r--r--arch/um/os-Linux/signal.c28
-rw-r--r--arch/um/os-Linux/umid.c36
-rw-r--r--arch/unicore32/Kconfig9
-rw-r--r--arch/unicore32/configs/unicore32_defconfig2
-rw-r--r--arch/unicore32/include/asm/Kbuild3
-rw-r--r--arch/unicore32/include/asm/elf.h3
-rw-r--r--arch/unicore32/include/asm/memory.h2
-rw-r--r--arch/unicore32/include/asm/mmu_context.h1
-rw-r--r--arch/unicore32/include/asm/syscall.h12
-rw-r--r--arch/unicore32/include/asm/tlb.h7
-rw-r--r--arch/unicore32/kernel/stacktrace.c2
-rw-r--r--arch/unicore32/mm/init.c26
-rw-r--r--arch/unicore32/mm/ioremap.c2
-rw-r--r--arch/unicore32/mm/mmu.c2
-rw-r--r--arch/x86/Kconfig65
-rw-r--r--arch/x86/Kconfig.debug14
-rw-r--r--arch/x86/Makefile2
-rw-r--r--arch/x86/boot/compressed/acpi.c2
-rw-r--r--arch/x86/configs/i386_defconfig12
-rw-r--r--arch/x86/configs/x86_64_defconfig12
-rw-r--r--arch/x86/crypto/aegis128-aesni-glue.c157
-rw-r--r--arch/x86/crypto/aegis128l-aesni-glue.c157
-rw-r--r--arch/x86/crypto/aegis256-aesni-glue.c157
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c212
-rw-r--r--arch/x86/crypto/chacha_glue.c6
-rw-r--r--arch/x86/crypto/crc32-pclmul_glue.c5
-rw-r--r--arch/x86/crypto/crc32c-intel_glue.c7
-rw-r--r--arch/x86/crypto/crct10dif-pclmul_glue.c20
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_glue.c11
-rw-r--r--arch/x86/crypto/morus1280-avx2-glue.c12
-rw-r--r--arch/x86/crypto/morus1280-sse2-glue.c12
-rw-r--r--arch/x86/crypto/morus1280_glue.c85
-rw-r--r--arch/x86/crypto/morus640-sse2-glue.c12
-rw-r--r--arch/x86/crypto/morus640_glue.c85
-rw-r--r--arch/x86/crypto/nhpoly1305-avx2-glue.c5
-rw-r--r--arch/x86/crypto/nhpoly1305-sse2-glue.c5
-rw-r--r--arch/x86/crypto/poly1305_glue.c4
-rw-r--r--arch/x86/crypto/sha1_ssse3_glue.c7
-rw-r--r--arch/x86/crypto/sha256_ssse3_glue.c7
-rw-r--r--arch/x86/crypto/sha512_ssse3_glue.c10
-rw-r--r--arch/x86/entry/common.c13
-rw-r--r--arch/x86/entry/entry_32.S5
-rw-r--r--arch/x86/entry/entry_64.S35
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl7
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl6
-rw-r--r--arch/x86/entry/vdso/Makefile2
-rw-r--r--arch/x86/entry/vdso/vclock_gettime.c4
-rw-r--r--arch/x86/entry/vdso/vdso2c.c3
-rw-r--r--arch/x86/entry/vdso/vdso2c.h13
-rw-r--r--arch/x86/events/amd/core.c111
-rw-r--r--arch/x86/events/amd/iommu.c2
-rw-r--r--arch/x86/events/core.c95
-rw-r--r--arch/x86/events/intel/bts.c2
-rw-r--r--arch/x86/events/intel/core.c314
-rw-r--r--arch/x86/events/intel/cstate.c2
-rw-r--r--arch/x86/events/intel/ds.c505
-rw-r--r--arch/x86/events/intel/lbr.c35
-rw-r--r--arch/x86/events/intel/pt.c3
-rw-r--r--arch/x86/events/intel/rapl.c2
-rw-r--r--arch/x86/events/intel/uncore.c6
-rw-r--r--arch/x86/events/intel/uncore.h1
-rw-r--r--arch/x86/events/intel/uncore_snb.c91
-rw-r--r--arch/x86/events/msr.c1
-rw-r--r--arch/x86/events/perf_event.h100
-rw-r--r--arch/x86/hyperv/hv_apic.c5
-rw-r--r--arch/x86/hyperv/hv_spinlock.c2
-rw-r--r--arch/x86/ia32/ia32_signal.c46
-rw-r--r--arch/x86/include/asm/Kbuild1
-rw-r--r--arch/x86/include/asm/alternative-asm.h11
-rw-r--r--arch/x86/include/asm/alternative.h10
-rw-r--r--arch/x86/include/asm/arch_hweight.h2
-rw-r--r--arch/x86/include/asm/asm.h24
-rw-r--r--arch/x86/include/asm/cpu_entry_area.h69
-rw-r--r--arch/x86/include/asm/cpufeature.h11
-rw-r--r--arch/x86/include/asm/cpufeatures.h3
-rw-r--r--arch/x86/include/asm/debugreg.h2
-rw-r--r--arch/x86/include/asm/dma-mapping.h10
-rw-r--r--arch/x86/include/asm/e820/api.h1
-rw-r--r--arch/x86/include/asm/fixmap.h2
-rw-r--r--arch/x86/include/asm/fpu/api.h31
-rw-r--r--arch/x86/include/asm/fpu/internal.h140
-rw-r--r--arch/x86/include/asm/fpu/signal.h2
-rw-r--r--arch/x86/include/asm/fpu/types.h9
-rw-r--r--arch/x86/include/asm/fpu/xstate.h8
-rw-r--r--arch/x86/include/asm/ftrace.h8
-rw-r--r--arch/x86/include/asm/hugetlb.h4
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h2
-rw-r--r--arch/x86/include/asm/intel_ds.h2
-rw-r--r--arch/x86/include/asm/io.h2
-rw-r--r--arch/x86/include/asm/irq.h6
-rw-r--r--arch/x86/include/asm/irq_vectors.h4
-rw-r--r--arch/x86/include/asm/irqflags.h4
-rw-r--r--arch/x86/include/asm/kvm_host.h8
-rw-r--r--arch/x86/include/asm/livepatch.h8
-rw-r--r--arch/x86/include/asm/mce.h25
-rw-r--r--arch/x86/include/asm/mmu_context.h62
-rw-r--r--arch/x86/include/asm/mpx.h15
-rw-r--r--arch/x86/include/asm/msr-index.h48
-rw-r--r--arch/x86/include/asm/mwait.h7
-rw-r--r--arch/x86/include/asm/nospec-branch.h78
-rw-r--r--arch/x86/include/asm/page_32_types.h8
-rw-r--r--arch/x86/include/asm/page_64_types.h16
-rw-r--r--arch/x86/include/asm/perf_event.h57
-rw-r--r--arch/x86/include/asm/pgtable.h34
-rw-r--r--arch/x86/include/asm/processor.h49
-rw-r--r--arch/x86/include/asm/rwsem.h237
-rw-r--r--arch/x86/include/asm/set_memory.h3
-rw-r--r--arch/x86/include/asm/smap.h37
-rw-r--r--arch/x86/include/asm/smp.h2
-rw-r--r--arch/x86/include/asm/special_insns.h19
-rw-r--r--arch/x86/include/asm/stackprotector.h6
-rw-r--r--arch/x86/include/asm/stacktrace.h15
-rw-r--r--arch/x86/include/asm/switch_to.h1
-rw-r--r--arch/x86/include/asm/sync_bitops.h31
-rw-r--r--arch/x86/include/asm/syscall.h8
-rw-r--r--arch/x86/include/asm/text-patching.h37
-rw-r--r--arch/x86/include/asm/thread_info.h2
-rw-r--r--arch/x86/include/asm/tlb.h1
-rw-r--r--arch/x86/include/asm/tlbflush.h4
-rw-r--r--arch/x86/include/asm/trace/exceptions.h2
-rw-r--r--arch/x86/include/asm/trace/fpu.h13
-rw-r--r--arch/x86/include/asm/uaccess.h15
-rw-r--r--arch/x86/include/asm/uaccess_64.h3
-rw-r--r--arch/x86/include/asm/vdso.h1
-rw-r--r--arch/x86/include/asm/xen/hypercall.h24
-rw-r--r--arch/x86/include/uapi/asm/kvm.h1
-rw-r--r--arch/x86/include/uapi/asm/perf_regs.h23
-rw-r--r--arch/x86/include/uapi/asm/sockios.h1
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/acpi/boot.c36
-rw-r--r--arch/x86/kernel/acpi/cstate.c12
-rw-r--r--arch/x86/kernel/alternative.c201
-rw-r--r--arch/x86/kernel/amd_gart_64.c6
-rw-r--r--arch/x86/kernel/apic/apic.c57
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c2
-rw-r--r--arch/x86/kernel/asm-offsets_64.c4
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/amd.c5
-rw-r--r--arch/x86/kernel/cpu/aperfmperf.c6
-rw-r--r--arch/x86/kernel/cpu/bugs.c146
-rw-r--r--arch/x86/kernel/cpu/common.c203
-rw-r--r--arch/x86/kernel/cpu/cpu.h1
-rw-r--r--arch/x86/kernel/cpu/hygon.c5
-rw-r--r--arch/x86/kernel/cpu/intel.c34
-rw-r--r--arch/x86/kernel/cpu/intel_epb.c236
-rw-r--r--arch/x86/kernel/cpu/mce/amd.c52
-rw-r--r--arch/x86/kernel/cpu/mce/core.c102
-rw-r--r--arch/x86/kernel/cpu/mce/genpool.c3
-rw-r--r--arch/x86/kernel/cpu/mce/inject.c16
-rw-r--r--arch/x86/kernel/cpu/mce/internal.h9
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c5
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c71
-rw-r--r--arch/x86/kernel/cpu/proc.c10
-rw-r--r--arch/x86/kernel/cpu/resctrl/ctrlmondata.c4
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c173
-rw-r--r--arch/x86/kernel/crash.c3
-rw-r--r--arch/x86/kernel/dumpstack_32.c8
-rw-r--r--arch/x86/kernel/dumpstack_64.c99
-rw-r--r--arch/x86/kernel/e820.c18
-rw-r--r--arch/x86/kernel/early-quirks.c4
-rw-r--r--arch/x86/kernel/fpu/core.c195
-rw-r--r--arch/x86/kernel/fpu/init.c2
-rw-r--r--arch/x86/kernel/fpu/regset.c24
-rw-r--r--arch/x86/kernel/fpu/signal.c202
-rw-r--r--arch/x86/kernel/fpu/xstate.c42
-rw-r--r--arch/x86/kernel/ftrace.c54
-rw-r--r--arch/x86/kernel/ftrace_32.S75
-rw-r--r--arch/x86/kernel/ftrace_64.S28
-rw-r--r--arch/x86/kernel/head_64.S2
-rw-r--r--arch/x86/kernel/idt.c19
-rw-r--r--arch/x86/kernel/ima_arch.c10
-rw-r--r--arch/x86/kernel/irq_32.c41
-rw-r--r--arch/x86/kernel/irq_64.c89
-rw-r--r--arch/x86/kernel/irqinit.c4
-rw-r--r--arch/x86/kernel/jump_label.c21
-rw-r--r--arch/x86/kernel/kgdb.c25
-rw-r--r--arch/x86/kernel/kprobes/core.c22
-rw-r--r--arch/x86/kernel/kvm.c2
-rw-r--r--arch/x86/kernel/ldt.c14
-rw-r--r--arch/x86/kernel/module.c2
-rw-r--r--arch/x86/kernel/nmi.c24
-rw-r--r--arch/x86/kernel/paravirt.c2
-rw-r--r--arch/x86/kernel/pci-dma.c20
-rw-r--r--arch/x86/kernel/perf_regs.c27
-rw-r--r--arch/x86/kernel/process.c6
-rw-r--r--arch/x86/kernel/process_32.c18
-rw-r--r--arch/x86/kernel/process_64.c12
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/setup.c38
-rw-r--r--arch/x86/kernel/setup_percpu.c5
-rw-r--r--arch/x86/kernel/signal.c55
-rw-r--r--arch/x86/kernel/smpboot.c21
-rw-r--r--arch/x86/kernel/stacktrace.c128
-rw-r--r--arch/x86/kernel/topology.c2
-rw-r--r--arch/x86/kernel/traps.c2
-rw-r--r--arch/x86/kernel/tsc.c36
-rw-r--r--arch/x86/kernel/vm86_32.c2
-rw-r--r--arch/x86/kernel/vmlinux.lds.S13
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--arch/x86/kvm/cpuid.c15
-rw-r--r--arch/x86/kvm/hyperv.c35
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h42
-rw-r--r--arch/x86/kvm/lapic.c111
-rw-r--r--arch/x86/kvm/lapic.h4
-rw-r--r--arch/x86/kvm/mmu.c24
-rw-r--r--arch/x86/kvm/mtrr.c10
-rw-r--r--arch/x86/kvm/paging_tmpl.h40
-rw-r--r--arch/x86/kvm/svm.c130
-rw-r--r--arch/x86/kvm/vmx/capabilities.h2
-rw-r--r--arch/x86/kvm/vmx/nested.c352
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c8
-rw-r--r--arch/x86/kvm/vmx/vmenter.S12
-rw-r--r--arch/x86/kvm/vmx/vmx.c106
-rw-r--r--arch/x86/kvm/vmx/vmx.h11
-rw-r--r--arch/x86/kvm/x86.c314
-rw-r--r--arch/x86/kvm/x86.h12
-rw-r--r--arch/x86/lib/Makefile13
-rw-r--r--arch/x86/lib/copy_user_64.S48
-rw-r--r--arch/x86/lib/delay.c2
-rw-r--r--arch/x86/lib/error-inject.c1
-rw-r--r--arch/x86/lib/memcpy_64.S3
-rw-r--r--arch/x86/lib/rwsem.S156
-rw-r--r--arch/x86/lib/usercopy_64.c20
-rw-r--r--arch/x86/math-emu/fpu_entry.c3
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/cpu_entry_area.c64
-rw-r--r--arch/x86/mm/dump_pagetables.c4
-rw-r--r--arch/x86/mm/extable.c4
-rw-r--r--arch/x86/mm/fault.c58
-rw-r--r--arch/x86/mm/hugetlbpage.c2
-rw-r--r--arch/x86/mm/init.c37
-rw-r--r--arch/x86/mm/init_32.c11
-rw-r--r--arch/x86/mm/init_64.c164
-rw-r--r--arch/x86/mm/kaslr.c94
-rw-r--r--arch/x86/mm/mem_encrypt.c10
-rw-r--r--arch/x86/mm/mm_internal.h3
-rw-r--r--arch/x86/mm/mpx.c16
-rw-r--r--arch/x86/mm/pageattr.c16
-rw-r--r--arch/x86/mm/pgtable.c14
-rw-r--r--arch/x86/mm/pkeys.c21
-rw-r--r--arch/x86/mm/pti.c6
-rw-r--r--arch/x86/mm/tlb.c116
-rw-r--r--arch/x86/net/bpf_jit_comp32.c236
-rw-r--r--arch/x86/pci/irq.c10
-rw-r--r--arch/x86/platform/olpc/olpc_dt.c101
-rw-r--r--arch/x86/platform/pvh/enlighten.c8
-rw-r--r--arch/x86/platform/uv/tlb_uv.c7
-rw-r--r--arch/x86/power/hibernate.c1
-rw-r--r--arch/x86/tools/relocs.c76
-rw-r--r--arch/x86/um/Kconfig6
-rw-r--r--arch/x86/um/Makefile4
-rw-r--r--arch/x86/um/asm/syscall.h2
-rw-r--r--arch/x86/um/vdso/Makefile2
-rw-r--r--arch/x86/xen/efi.c12
-rw-r--r--arch/x86/xen/enlighten_pv.c2
-rw-r--r--arch/x86/xen/enlighten_pvh.c7
-rw-r--r--arch/x86/xen/mmu_pv.c2
-rw-r--r--arch/x86/xen/multicalls.c2
-rw-r--r--arch/x86/xen/smp_pv.c4
-rw-r--r--arch/x86/xen/time.c20
-rw-r--r--arch/x86/xen/xen-head.S10
-rw-r--r--arch/x86/xen/xen-ops.h4
-rw-r--r--arch/xtensa/Kconfig29
-rw-r--r--arch/xtensa/boot/boot-redboot/bootstrap.S2
-rw-r--r--arch/xtensa/boot/lib/Makefile2
-rw-r--r--arch/xtensa/include/asm/Kbuild3
-rw-r--r--arch/xtensa/include/asm/asmmacro.h2
-rw-r--r--arch/xtensa/include/asm/atomic.h66
-rw-r--r--arch/xtensa/include/asm/barrier.h4
-rw-r--r--arch/xtensa/include/asm/bitops.h125
-rw-r--r--arch/xtensa/include/asm/cache.h2
-rw-r--r--arch/xtensa/include/asm/checksum.h2
-rw-r--r--arch/xtensa/include/asm/cmpxchg.h36
-rw-r--r--arch/xtensa/include/asm/coprocessor.h2
-rw-r--r--arch/xtensa/include/asm/core.h21
-rw-r--r--arch/xtensa/include/asm/futex.h122
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h38
-rw-r--r--arch/xtensa/include/asm/io.h3
-rw-r--r--arch/xtensa/include/asm/irq.h2
-rw-r--r--arch/xtensa/include/asm/irqflags.h2
-rw-r--r--arch/xtensa/include/asm/pci-bridge.h3
-rw-r--r--arch/xtensa/include/asm/pci.h4
-rw-r--r--arch/xtensa/include/asm/pgalloc.h3
-rw-r--r--arch/xtensa/include/asm/processor.h2
-rw-r--r--arch/xtensa/include/asm/ptrace.h2
-rw-r--r--arch/xtensa/include/asm/segment.h16
-rw-r--r--arch/xtensa/include/asm/syscall.h2
-rw-r--r--arch/xtensa/include/asm/tlb.h26
-rw-r--r--arch/xtensa/include/asm/vectors.h2
-rw-r--r--arch/xtensa/include/uapi/asm/sockios.h4
-rw-r--r--arch/xtensa/kernel/hw_breakpoint.c2
-rw-r--r--arch/xtensa/kernel/setup.c3
-rw-r--r--arch/xtensa/kernel/smp.c2
-rw-r--r--arch/xtensa/kernel/syscalls/syscall.tbl6
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S2
-rw-r--r--arch/xtensa/lib/checksum.S2
-rw-r--r--arch/xtensa/lib/memcopy.S2
-rw-r--r--arch/xtensa/lib/memset.S2
-rw-r--r--arch/xtensa/lib/strncpy_user.S2
-rw-r--r--arch/xtensa/lib/strnlen_user.S2
-rw-r--r--arch/xtensa/lib/usercopy.S2
-rw-r--r--arch/xtensa/mm/init.c5
-rw-r--r--arch/xtensa/platforms/iss/simdisk.c3
-rw-r--r--arch/xtensa/platforms/xt2000/include/platform/hardware.h2
-rw-r--r--arch/xtensa/platforms/xt2000/include/platform/serial.h2
-rw-r--r--arch/xtensa/platforms/xtfpga/setup.c1
-rw-r--r--block/Kconfig24
-rw-r--r--block/badblocks.c10
-rw-r--r--block/bfq-cgroup.c16
-rw-r--r--block/bfq-iosched.c811
-rw-r--r--block/bfq-iosched.h107
-rw-r--r--block/bfq-wf2q.c23
-rw-r--r--block/bio-integrity.c19
-rw-r--r--block/bio.c286
-rw-r--r--block/blk-cgroup.c1
-rw-r--r--block/blk-core.c24
-rw-r--r--block/blk-exec.c1
-rw-r--r--block/blk-flush.c3
-rw-r--r--block/blk-integrity.c19
-rw-r--r--block/blk-iolatency.c1
-rw-r--r--block/blk-merge.c147
-rw-r--r--block/blk-mq-cpumap.c1
-rw-r--r--block/blk-mq-debugfs.c13
-rw-r--r--block/blk-mq-pci.c10
-rw-r--r--block/blk-mq-rdma.c10
-rw-r--r--block/blk-mq-sched.c13
-rw-r--r--block/blk-mq-sysfs.c17
-rw-r--r--block/blk-mq-tag.c1
-rw-r--r--block/blk-mq-virtio.c10
-rw-r--r--block/blk-mq.c192
-rw-r--r--block/blk-mq.h2
-rw-r--r--block/blk-rq-qos.c2
-rw-r--r--block/blk-rq-qos.h1
-rw-r--r--block/blk-settings.c17
-rw-r--r--block/blk-stat.c1
-rw-r--r--block/blk-sysfs.c30
-rw-r--r--block/blk-timeout.c1
-rw-r--r--block/blk-wbt.c1
-rw-r--r--block/blk-zoned.c1
-rw-r--r--block/blk.h2
-rw-r--r--block/bounce.c3
-rw-r--r--block/bsg-lib.c16
-rw-r--r--block/bsg.c9
-rw-r--r--block/elevator.c7
-rw-r--r--block/genhd.c68
-rw-r--r--block/ioctl.c1
-rw-r--r--block/ioprio.c1
-rw-r--r--block/kyber-iosched.c13
-rw-r--r--block/mq-deadline.c1
-rw-r--r--block/opal_proto.h12
-rw-r--r--block/partition-generic.c7
-rw-r--r--block/partitions/acorn.c7
-rw-r--r--block/partitions/aix.h1
-rw-r--r--block/partitions/amiga.h1
-rw-r--r--block/partitions/efi.c16
-rw-r--r--block/partitions/efi.h16
-rw-r--r--block/partitions/ibm.h1
-rw-r--r--block/partitions/karma.h1
-rw-r--r--block/partitions/ldm.c16
-rw-r--r--block/partitions/ldm.h16
-rw-r--r--block/partitions/msdos.h1
-rw-r--r--block/partitions/osf.h1
-rw-r--r--block/partitions/sgi.h1
-rw-r--r--block/partitions/sun.h1
-rw-r--r--block/partitions/sysv68.h1
-rw-r--r--block/partitions/ultrix.h1
-rw-r--r--block/scsi_ioctl.c16
-rw-r--r--block/sed-opal.c726
-rw-r--r--block/t10-pi.c19
-rw-r--r--crypto/842.c2
-rw-r--r--crypto/Kconfig85
-rw-r--r--crypto/Makefile10
-rw-r--r--crypto/adiantum.c3
-rw-r--r--crypto/aegis128.c2
-rw-r--r--crypto/aegis128l.c2
-rw-r--r--crypto/aegis256.c2
-rw-r--r--crypto/aes_generic.c10
-rw-r--r--crypto/akcipher.c14
-rw-r--r--crypto/algboss.c8
-rw-r--r--crypto/ansi_cprng.c2
-rw-r--r--crypto/anubis.c2
-rw-r--r--crypto/arc4.c2
-rw-r--r--crypto/asymmetric_keys/asym_tpm.c43
-rw-r--r--crypto/asymmetric_keys/pkcs7_verify.c1
-rw-r--r--crypto/asymmetric_keys/public_key.c105
-rw-r--r--crypto/asymmetric_keys/verify_pefile.c1
-rw-r--r--crypto/asymmetric_keys/x509.asn12
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c57
-rw-r--r--crypto/asymmetric_keys/x509_public_key.c1
-rw-r--r--crypto/authenc.c2
-rw-r--r--crypto/authencesn.c2
-rw-r--r--crypto/blowfish_generic.c2
-rw-r--r--crypto/camellia_generic.c2
-rw-r--r--crypto/cast5_generic.c2
-rw-r--r--crypto/cast6_generic.c2
-rw-r--r--crypto/cbc.c2
-rw-r--r--crypto/ccm.c46
-rw-r--r--crypto/cfb.c2
-rw-r--r--crypto/chacha20poly1305.c6
-rw-r--r--crypto/chacha_generic.c12
-rw-r--r--crypto/cmac.c2
-rw-r--r--crypto/crc32_generic.c2
-rw-r--r--crypto/crc32c_generic.c2
-rw-r--r--crypto/crct10dif_generic.c13
-rw-r--r--crypto/cryptd.c252
-rw-r--r--crypto/crypto_null.c2
-rw-r--r--crypto/crypto_user_base.c4
-rw-r--r--crypto/ctr.c2
-rw-r--r--crypto/cts.c20
-rw-r--r--crypto/deflate.c2
-rw-r--r--crypto/des_generic.c13
-rw-r--r--crypto/dh.c2
-rw-r--r--crypto/drbg.c3
-rw-r--r--crypto/ecb.c2
-rw-r--r--crypto/ecc.c417
-rw-r--r--crypto/ecc.h153
-rw-r--r--crypto/ecc_curve_defs.h15
-rw-r--r--crypto/ecdh.c2
-rw-r--r--crypto/echainiv.c2
-rw-r--r--crypto/ecrdsa.c296
-rw-r--r--crypto/ecrdsa_defs.h225
-rw-r--r--crypto/ecrdsa_params.asn14
-rw-r--r--crypto/ecrdsa_pub_key.asn11
-rw-r--r--crypto/fcrypt.c2
-rw-r--r--crypto/fips.c2
-rw-r--r--crypto/gcm.c36
-rw-r--r--crypto/ghash-generic.c2
-rw-r--r--crypto/hmac.c13
-rw-r--r--crypto/jitterentropy-kcapi.c2
-rw-r--r--crypto/keywrap.c2
-rw-r--r--crypto/khazad.c2
-rw-r--r--crypto/lrw.c6
-rw-r--r--crypto/lz4.c2
-rw-r--r--crypto/lz4hc.c2
-rw-r--r--crypto/lzo-rle.c2
-rw-r--r--crypto/lzo.c2
-rw-r--r--crypto/md4.c2
-rw-r--r--crypto/md5.c2
-rw-r--r--crypto/michael_mic.c2
-rw-r--r--crypto/morus1280.c2
-rw-r--r--crypto/morus640.c2
-rw-r--r--crypto/nhpoly1305.c2
-rw-r--r--crypto/ofb.c2
-rw-r--r--crypto/pcbc.c2
-rw-r--r--crypto/pcrypt.c2
-rw-r--r--crypto/poly1305_generic.c2
-rw-r--r--crypto/rmd128.c2
-rw-r--r--crypto/rmd160.c2
-rw-r--r--crypto/rmd256.c2
-rw-r--r--crypto/rmd320.c2
-rw-r--r--crypto/rsa-pkcs1pad.c33
-rw-r--r--crypto/rsa.c111
-rw-r--r--crypto/salsa20_generic.c13
-rw-r--r--crypto/scompress.c129
-rw-r--r--crypto/seed.c2
-rw-r--r--crypto/seqiv.c2
-rw-r--r--crypto/serpent_generic.c2
-rw-r--r--crypto/sha1_generic.c2
-rw-r--r--crypto/sha256_generic.c2
-rw-r--r--crypto/sha3_generic.c2
-rw-r--r--crypto/sha512_generic.c2
-rw-r--r--crypto/shash.c7
-rw-r--r--crypto/simd.c273
-rw-r--r--crypto/skcipher.c9
-rw-r--r--crypto/sm3_generic.c2
-rw-r--r--crypto/sm4_generic.c2
-rw-r--r--crypto/streebog_generic.c27
-rw-r--r--crypto/tcrypt.c2
-rw-r--r--crypto/tea.c2
-rw-r--r--crypto/testmgr.c1242
-rw-r--r--crypto/testmgr.h181
-rw-r--r--crypto/tgr192.c2
-rw-r--r--crypto/twofish_generic.c2
-rw-r--r--crypto/vmac.c2
-rw-r--r--crypto/wp512.c2
-rw-r--r--crypto/xcbc.c2
-rw-r--r--crypto/xts.c2
-rw-r--r--crypto/zstd.c2
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_apd.c1
-rw-r--r--drivers/acpi/acpi_configfs.c4
-rw-r--r--drivers/acpi/acpi_dbg.c2
-rw-r--r--drivers/acpi/acpi_lpat.c2
-rw-r--r--drivers/acpi/acpi_lpss.c4
-rw-r--r--drivers/acpi/acpica/aclocal.h4
-rw-r--r--drivers/acpi/acpica/dbexec.c2
-rw-r--r--drivers/acpi/acpica/dbnames.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c6
-rw-r--r--drivers/acpi/acpica/evgpeinit.c4
-rw-r--r--drivers/acpi/acpica/exnames.c6
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsalloc.c4
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c4
-rw-r--r--drivers/acpi/acpica/nsnames.c8
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c4
-rw-r--r--drivers/acpi/acpica/nsutils.c14
-rw-r--r--drivers/acpi/acpica/nsxfname.c4
-rw-r--r--drivers/acpi/acpica/psargs.c8
-rw-r--r--drivers/acpi/acpica/rsxface.c8
-rw-r--r--drivers/acpi/acpica/tbdata.c3
-rw-r--r--drivers/acpi/acpica/tbfind.c20
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbprint.c10
-rw-r--r--drivers/acpi/acpica/tbutils.c6
-rw-r--r--drivers/acpi/acpica/tbxface.c4
-rw-r--r--drivers/acpi/acpica/tbxfload.c15
-rw-r--r--drivers/acpi/acpica/utascii.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c4
-rw-r--r--drivers/acpi/acpica/utmisc.c8
-rw-r--r--drivers/acpi/acpica/utpredef.c4
-rw-r--r--drivers/acpi/acpica/utstring.c6
-rw-r--r--drivers/acpi/arm64/iort.c161
-rw-r--r--drivers/acpi/button.c5
-rw-r--r--drivers/acpi/cppc_acpi.c34
-rw-r--r--drivers/acpi/device_pm.c5
-rw-r--r--drivers/acpi/device_sysfs.c6
-rw-r--r--drivers/acpi/dptf/dptf_power.c3
-rw-r--r--drivers/acpi/event.c4
-rw-r--r--drivers/acpi/hmat/Kconfig11
-rw-r--r--drivers/acpi/hmat/Makefile1
-rw-r--r--drivers/acpi/hmat/hmat.c666
-rw-r--r--drivers/acpi/numa.c16
-rw-r--r--drivers/acpi/pci_mcfg.c12
-rw-r--r--drivers/acpi/pci_root.c2
-rw-r--r--drivers/acpi/power.c4
-rw-r--r--drivers/acpi/pptt.c48
-rw-r--r--drivers/acpi/processor_perflib.c2
-rw-r--r--drivers/acpi/property.c9
-rw-r--r--drivers/acpi/scan.c25
-rw-r--r--drivers/acpi/sleep.c4
-rw-r--r--drivers/acpi/spcr.c2
-rw-r--r--drivers/acpi/sysfs.c14
-rw-r--r--drivers/acpi/tables.c98
-rw-r--r--drivers/acpi/utils.c16
-rw-r--r--drivers/acpi/video_detect.c10
-rw-r--r--drivers/amba/tegra-ahb.c6
-rw-r--r--drivers/android/binder.c12
-rw-r--r--drivers/android/binder_alloc.c8
-rw-r--r--drivers/ata/ahci_qoriq.c55
-rw-r--r--drivers/ata/pata_ep93xx.c2
-rw-r--r--drivers/ata/sata_rcar.c1
-rw-r--r--drivers/atm/iphase.c4
-rw-r--r--drivers/base/Kconfig10
-rw-r--r--drivers/base/arch_topology.c36
-rw-r--r--drivers/base/core.c5
-rw-r--r--drivers/base/cpu.c8
-rw-r--r--drivers/base/dd.c5
-rw-r--r--drivers/base/firmware_loader/Kconfig1
-rw-r--r--drivers/base/firmware_loader/builtin/.gitignore1
-rw-r--r--drivers/base/firmware_loader/fallback.c6
-rw-r--r--drivers/base/memory.c24
-rw-r--r--drivers/base/node.c352
-rw-r--r--drivers/base/platform.c12
-rw-r--r--drivers/base/power/clock_ops.c3
-rw-r--r--drivers/base/power/common.c4
-rw-r--r--drivers/base/power/domain.c130
-rw-r--r--drivers/base/power/domain_governor.c71
-rw-r--r--drivers/base/power/generic_ops.c4
-rw-r--r--drivers/base/power/main.c80
-rw-r--r--drivers/base/power/qos.c6
-rw-r--r--drivers/base/power/runtime.c4
-rw-r--r--drivers/base/power/sysfs.c6
-rw-r--r--drivers/base/power/trace.c2
-rw-r--r--drivers/base/power/wakeirq.c15
-rw-r--r--drivers/base/power/wakeup.c10
-rw-r--r--drivers/base/property.c75
-rw-r--r--drivers/base/regmap/internal.h5
-rw-r--r--drivers/base/regmap/regcache-flat.c18
-rw-r--r--drivers/base/regmap/regcache-lzo.c18
-rw-r--r--drivers/base/regmap/regcache-rbtree.c18
-rw-r--r--drivers/base/regmap/regcache.c18
-rw-r--r--drivers/base/regmap/regmap-ac97.c22
-rw-r--r--drivers/base/regmap/regmap-debugfs.c48
-rw-r--r--drivers/base/regmap/regmap-i2c.c18
-rw-r--r--drivers/base/regmap/regmap-irq.c21
-rw-r--r--drivers/base/regmap/regmap-mmio.c22
-rw-r--r--drivers/base/regmap/regmap-spi.c18
-rw-r--r--drivers/base/regmap/regmap-spmi.c29
-rw-r--r--drivers/base/regmap/regmap-w1.c16
-rw-r--r--drivers/base/regmap/regmap.c27
-rw-r--r--drivers/base/syscore.c12
-rw-r--r--drivers/base/test/Makefile1
-rw-r--r--drivers/block/amiflop.c1
-rw-r--r--drivers/block/ataflop.c1
-rw-r--r--drivers/block/brd.c13
-rw-r--r--drivers/block/drbd/drbd_int.h7
-rw-r--r--drivers/block/drbd/drbd_nl.c8
-rw-r--r--drivers/block/drbd/drbd_nla.c3
-rw-r--r--drivers/block/drbd/drbd_receiver.c7
-rw-r--r--drivers/block/drbd/drbd_req.c2
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/block/floppy.c11
-rw-r--r--drivers/block/loop.c35
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c89
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h17
-rw-r--r--drivers/block/nbd.c38
-rw-r--r--drivers/block/paride/pcd.c1
-rw-r--r--drivers/block/paride/pd.c1
-rw-r--r--drivers/block/paride/pf.c1
-rw-r--r--drivers/block/pktcdvd.c1
-rw-r--r--drivers/block/ps3disk.c4
-rw-r--r--drivers/block/rbd.c24
-rw-r--r--drivers/block/rsxx/core.c1
-rw-r--r--drivers/block/swim.c1
-rw-r--r--drivers/block/swim3.c1
-rw-r--r--drivers/block/virtio_blk.c3
-rw-r--r--drivers/block/xsysace.c1
-rw-r--r--drivers/bluetooth/Kconfig15
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/btbcm.c10
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c36
-rw-r--r--drivers/bluetooth/btmtksdio.c1101
-rw-r--r--drivers/bluetooth/btmtkuart.c2
-rw-r--r--drivers/bluetooth/btqca.c7
-rw-r--r--drivers/bluetooth/btqca.h13
-rw-r--r--drivers/bluetooth/btsdio.c15
-rw-r--r--drivers/bluetooth/hci_bcm.c20
-rw-r--r--drivers/bluetooth/hci_h5.c2
-rw-r--r--drivers/bluetooth/hci_qca.c91
-rw-r--r--drivers/bus/tegra-aconnect.c66
-rw-r--r--drivers/bus/ti-sysc.c661
-rw-r--r--drivers/cdrom/gdrom.c1
-rw-r--r--drivers/char/ds1620.c2
-rw-r--r--drivers/char/dtlk.c3
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/hw_random/omap-rng.c1
-rw-r--r--drivers/char/hw_random/stm32-rng.c9
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c3
-rw-r--r--drivers/char/ipmi/ipmi_dmi.c2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c4
-rw-r--r--drivers/char/ipmi/ipmi_plat_data.c27
-rw-r--r--drivers/char/ipmi/ipmi_plat_data.h3
-rw-r--r--drivers/char/ipmi/ipmi_si_hardcode.c1
-rw-r--r--drivers/char/ipmi/ipmi_si_hotmod.c1
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c6
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c11
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c2
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c2
-rw-r--r--drivers/char/pcmcia/scr24x_cs.c2
-rw-r--r--drivers/char/random.c199
-rw-r--r--drivers/char/tb0219.c2
-rw-r--r--drivers/char/tpm/Kconfig1
-rw-r--r--drivers/char/virtio_console.c3
-rw-r--r--drivers/clk/Kconfig11
-rw-r--r--drivers/clk/Makefile4
-rw-r--r--drivers/clk/actions/owl-common.h2
-rw-r--r--drivers/clk/actions/owl-composite.h2
-rw-r--r--drivers/clk/actions/owl-divider.h2
-rw-r--r--drivers/clk/actions/owl-factor.h2
-rw-r--r--drivers/clk/actions/owl-fixed-factor.h2
-rw-r--r--drivers/clk/actions/owl-gate.h2
-rw-r--r--drivers/clk/actions/owl-mux.h2
-rw-r--r--drivers/clk/actions/owl-pll.h2
-rw-r--r--drivers/clk/actions/owl-reset.h2
-rw-r--r--drivers/clk/analogbits/Kconfig2
-rw-r--r--drivers/clk/analogbits/Makefile3
-rw-r--r--drivers/clk/analogbits/wrpll-cln28hpc.c364
-rw-r--r--drivers/clk/at91/Makefile2
-rw-r--r--drivers/clk/at91/at91sam9260.c14
-rw-r--r--drivers/clk/at91/at91sam9rl.c2
-rw-r--r--drivers/clk/at91/at91sam9x5.c11
-rw-r--r--drivers/clk/at91/clk-generated.c48
-rw-r--r--drivers/clk/at91/clk-master.c8
-rw-r--r--drivers/clk/at91/clk-peripheral.c46
-rw-r--r--drivers/clk/at91/clk-sam9x60-pll.c330
-rw-r--r--drivers/clk/at91/clk-usb.c33
-rw-r--r--drivers/clk/at91/dt-compat.c12
-rw-r--r--drivers/clk/at91/pmc.h25
-rw-r--r--drivers/clk/at91/sam9x60.c307
-rw-r--r--drivers/clk/at91/sama5d2.c12
-rw-r--r--drivers/clk/at91/sama5d4.c10
-rw-r--r--drivers/clk/at91/sckc.c134
-rw-r--r--drivers/clk/axs10x/i2s_pll_clock.c1
-rw-r--r--drivers/clk/axs10x/pll_clock.c1
-rw-r--r--drivers/clk/bcm/clk-bcm2835-aux.c1
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c1
-rw-r--r--drivers/clk/bcm/clk-kona.c3
-rw-r--r--drivers/clk/berlin/berlin2-div.c1
-rw-r--r--drivers/clk/berlin/bg2.c1
-rw-r--r--drivers/clk/berlin/bg2q.c1
-rw-r--r--drivers/clk/clk-aspeed.c42
-rw-r--r--drivers/clk/clk-composite.c2
-rw-r--r--drivers/clk/clk-divider.c26
-rw-r--r--drivers/clk/clk-fixed-factor.c57
-rw-r--r--drivers/clk/clk-fixed-mmio.c3
-rw-r--r--drivers/clk/clk-fixed-rate.c2
-rw-r--r--drivers/clk/clk-fractional-divider.c25
-rw-r--r--drivers/clk/clk-gate.c24
-rw-r--r--drivers/clk/clk-gpio.c2
-rw-r--r--drivers/clk/clk-highbank.c23
-rw-r--r--drivers/clk/clk-hsdk-pll.c1
-rw-r--r--drivers/clk/clk-lochnagar.c336
-rw-r--r--drivers/clk/clk-milbeaut.c663
-rw-r--r--drivers/clk/clk-multiplier.c23
-rw-r--r--drivers/clk/clk-mux.c24
-rw-r--r--drivers/clk/clk-pwm.c2
-rw-r--r--drivers/clk/clk-qoriq.c77
-rw-r--r--drivers/clk/clk-stm32f4.c307
-rw-r--r--drivers/clk/clk-stm32mp1.c3
-rw-r--r--drivers/clk/clk-xgene.c6
-rw-r--r--drivers/clk/clk.c392
-rw-r--r--drivers/clk/clk.h2
-rw-r--r--drivers/clk/clkdev.c30
-rw-r--r--drivers/clk/davinci/da8xx-cfgchip.c4
-rw-r--r--drivers/clk/davinci/pll-da850.c1
-rw-r--r--drivers/clk/davinci/pll.h2
-rw-r--r--drivers/clk/davinci/psc.h2
-rw-r--r--drivers/clk/h8300/clk-div.c1
-rw-r--r--drivers/clk/h8300/clk-h8s2678.c3
-rw-r--r--drivers/clk/hisilicon/clk-hi3660-stub.c1
-rw-r--r--drivers/clk/hisilicon/clk-hi3660.c6
-rw-r--r--drivers/clk/hisilicon/clk-hisi-phase.c4
-rw-r--r--drivers/clk/imx/Makefile2
-rw-r--r--drivers/clk/imx/clk-composite-8m.c3
-rw-r--r--drivers/clk/imx/clk-divider-gate.c20
-rw-r--r--drivers/clk/imx/clk-frac-pll.c1
-rw-r--r--drivers/clk/imx/clk-imx21.c1
-rw-r--r--drivers/clk/imx/clk-imx27.c1
-rw-r--r--drivers/clk/imx/clk-imx5.c (renamed from drivers/clk/imx/clk-imx51-imx53.c)59
-rw-r--r--drivers/clk/imx/clk-imx6sll.c18
-rw-r--r--drivers/clk/imx/clk-imx7d.c4
-rw-r--r--drivers/clk/imx/clk-imx7ulp.c1
-rw-r--r--drivers/clk/imx/clk-imx8mq.c1
-rw-r--r--drivers/clk/imx/clk-pfdv2.c11
-rw-r--r--drivers/clk/imx/clk-pll14xx.c6
-rw-r--r--drivers/clk/imx/clk-pllv3.c31
-rw-r--r--drivers/clk/imx/clk-pllv4.c73
-rw-r--r--drivers/clk/imx/clk-sccg-pll.c13
-rw-r--r--drivers/clk/imx/clk.h6
-rw-r--r--drivers/clk/ingenic/cgu.c1
-rw-r--r--drivers/clk/ingenic/jz4725b-cgu.c6
-rw-r--r--drivers/clk/ingenic/jz4740-cgu.c1
-rw-r--r--drivers/clk/ingenic/jz4770-cgu.c1
-rw-r--r--drivers/clk/ingenic/jz4780-cgu.c1
-rw-r--r--drivers/clk/loongson1/clk-loongson1c.c1
-rw-r--r--drivers/clk/mediatek/Kconfig83
-rw-r--r--drivers/clk/mediatek/Makefile16
-rw-r--r--drivers/clk/mediatek/clk-gate.h14
-rw-r--r--drivers/clk/mediatek/clk-mt8183-audio.c105
-rw-r--r--drivers/clk/mediatek/clk-mt8183-cam.c63
-rw-r--r--drivers/clk/mediatek/clk-mt8183-img.c63
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu0.c56
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu1.c56
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu_adl.c54
-rw-r--r--drivers/clk/mediatek/clk-mt8183-ipu_conn.c123
-rw-r--r--drivers/clk/mediatek/clk-mt8183-mfgcfg.c54
-rw-r--r--drivers/clk/mediatek/clk-mt8183-mm.c111
-rw-r--r--drivers/clk/mediatek/clk-mt8183-vdec.c67
-rw-r--r--drivers/clk/mediatek/clk-mt8183-venc.c59
-rw-r--r--drivers/clk/mediatek/clk-mt8183.c1284
-rw-r--r--drivers/clk/mediatek/clk-mt8516.c815
-rw-r--r--drivers/clk/mediatek/clk-mtk.h3
-rw-r--r--drivers/clk/mediatek/clk-mux.c223
-rw-r--r--drivers/clk/mediatek/clk-mux.h89
-rw-r--r--drivers/clk/mediatek/clk-pll.c87
-rw-r--r--drivers/clk/meson/axg-audio.c1219
-rw-r--r--drivers/clk/meson/axg-audio.h16
-rw-r--r--drivers/clk/meson/clk-pll.c26
-rw-r--r--drivers/clk/meson/clk-pll.h1
-rw-r--r--drivers/clk/meson/g12a-aoclk.h2
-rw-r--r--drivers/clk/meson/g12a.c631
-rw-r--r--drivers/clk/meson/g12a.h31
-rw-r--r--drivers/clk/meson/meson8b.c734
-rw-r--r--drivers/clk/meson/meson8b.h27
-rw-r--r--drivers/clk/microchip/clk-core.c1
-rw-r--r--drivers/clk/microchip/clk-pic32mzda.c1
-rw-r--r--drivers/clk/mmp/clk-gate.c2
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c1
-rw-r--r--drivers/clk/mvebu/armada-37xx-tbg.c1
-rw-r--r--drivers/clk/mvebu/clk-corediv.c1
-rw-r--r--drivers/clk/mvebu/common.c2
-rw-r--r--drivers/clk/mvebu/cp110-system-controller.c4
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-ccu.c7
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-cgu.c25
-rw-r--r--drivers/clk/nxp/clk-lpc32xx.c8
-rw-r--r--drivers/clk/pxa/clk-pxa.c1
-rw-r--r--drivers/clk/qcom/Kconfig6
-rw-r--r--drivers/clk/qcom/Makefile1
-rw-r--r--drivers/clk/qcom/clk-branch.c6
-rw-r--r--drivers/clk/qcom/clk-branch.h1
-rw-r--r--drivers/clk/qcom/clk-regmap-mux-div.h2
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c2
-rw-r--r--drivers/clk/qcom/gcc-qcs404.c90
-rw-r--r--drivers/clk/qcom/turingcc-qcs404.c170
-rw-r--r--drivers/clk/renesas/clk-r8a73a4.c1
-rw-r--r--drivers/clk/renesas/clk-r8a7740.c1
-rw-r--r--drivers/clk/renesas/clk-rcar-gen2.c1
-rw-r--r--drivers/clk/renesas/clk-rz.c1
-rw-r--r--drivers/clk/renesas/clk-sh73a0.c1
-rw-r--r--drivers/clk/renesas/r7s9210-cpg-mssr.c3
-rw-r--r--drivers/clk/renesas/r8a774a1-cpg-mssr.c18
-rw-r--r--drivers/clk/renesas/r8a774c0-cpg-mssr.c7
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c41
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c35
-rw-r--r--drivers/clk/renesas/r8a77965-cpg-mssr.c33
-rw-r--r--drivers/clk/renesas/r8a77980-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a77990-cpg-mssr.c25
-rw-r--r--drivers/clk/renesas/r8a77995-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c2
-rw-r--r--drivers/clk/renesas/rcar-gen2-cpg.h4
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c71
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h9
-rw-r--r--drivers/clk/renesas/rcar-usb2-clock-sel.c1
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c1
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h4
-rw-r--r--drivers/clk/rockchip/clk-ddr.c2
-rw-r--r--drivers/clk/rockchip/clk-half-divider.c9
-rw-r--r--drivers/clk/rockchip/clk-px30.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3128.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3228.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c37
-rw-r--r--drivers/clk/rockchip/clk-rk3328.c19
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c1
-rw-r--r--drivers/clk/rockchip/clk-rv1108.c1
-rw-r--r--drivers/clk/rockchip/clk.c10
-rw-r--r--drivers/clk/rockchip/clk.h23
-rw-r--r--drivers/clk/samsung/clk-cpu.c1
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c1
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c1
-rw-r--r--drivers/clk/samsung/clk-exynos4.c1
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.c1
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c1
-rw-r--r--drivers/clk/samsung/clk-exynos5410.c1
-rw-r--r--drivers/clk/samsung/clk-pll.c3
-rw-r--r--drivers/clk/samsung/clk-s3c2410-dclk.c1
-rw-r--r--drivers/clk/samsung/clk-s3c2412.c1
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c1
-rw-r--r--drivers/clk/samsung/clk.c1
-rw-r--r--drivers/clk/sifive/Kconfig18
-rw-r--r--drivers/clk/sifive/Makefile1
-rw-r--r--drivers/clk/sifive/fu540-prci.c627
-rw-r--r--drivers/clk/socfpga/clk-gate-s10.c1
-rw-r--r--drivers/clk/socfpga/clk-periph-s10.c1
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c1
-rw-r--r--drivers/clk/sprd/common.h2
-rw-r--r--drivers/clk/sprd/composite.h2
-rw-r--r--drivers/clk/sprd/div.h2
-rw-r--r--drivers/clk/sprd/gate.h2
-rw-r--r--drivers/clk/sprd/mux.h2
-rw-r--r--drivers/clk/sprd/pll.h2
-rw-r--r--drivers/clk/st/clkgen-mux.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun4i-a10.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c20
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun5i.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun5i.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a33.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a83t.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c3
-rw-r--r--drivers/clk/sunxi-ng/ccu-suniv-f1c100s.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_frac.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_gate.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_mmc_timing.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_mult.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_mux.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nk.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c25
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_phase.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_sdm.c1
-rw-r--r--drivers/clk/sunxi/Kconfig43
-rw-r--r--drivers/clk/sunxi/Makefile49
-rw-r--r--drivers/clk/sunxi/clk-a10-mod1.c1
-rw-r--r--drivers/clk/sunxi/clk-a10-pll2.c1
-rw-r--r--drivers/clk/sunxi/clk-a10-ve.c1
-rw-r--r--drivers/clk/sunxi/clk-a20-gmac.c1
-rw-r--r--drivers/clk/sunxi/clk-mod0.c1
-rw-r--r--drivers/clk/sunxi/clk-simple-gates.c1
-rw-r--r--drivers/clk/sunxi/clk-sun4i-display.c1
-rw-r--r--drivers/clk/sunxi/clk-sun4i-pll3.c1
-rw-r--r--drivers/clk/sunxi/clk-sun4i-tcon-ch1.c1
-rw-r--r--drivers/clk/sunxi/clk-sun8i-apb0.c1
-rw-r--r--drivers/clk/sunxi/clk-sun8i-bus-gates.c1
-rw-r--r--drivers/clk/sunxi/clk-sun8i-mbus.c1
-rw-r--r--drivers/clk/sunxi/clk-sun9i-cpus.c1
-rw-r--r--drivers/clk/sunxi/clk-sun9i-mmc.c1
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c1
-rw-r--r--drivers/clk/sunxi/clk-usb.c1
-rw-r--r--drivers/clk/tegra/clk-divider.c3
-rw-r--r--drivers/clk/tegra/clk-emc.c58
-rw-r--r--drivers/clk/tegra/clk-periph-fixed.c1
-rw-r--r--drivers/clk/tegra/clk-pll.c54
-rw-r--r--drivers/clk/tegra/clk-sdmmc-mux.c1
-rw-r--r--drivers/clk/tegra/clk-super.c2
-rw-r--r--drivers/clk/tegra/clk-tegra124.c7
-rw-r--r--drivers/clk/tegra/clk-tegra210.c6
-rw-r--r--drivers/clk/tegra/clk.c1
-rw-r--r--drivers/clk/ti/adpll.c1
-rw-r--r--drivers/clk/ti/clk-7xx-compat.c6
-rw-r--r--drivers/clk/ti/clk-7xx.c6
-rw-r--r--drivers/clk/ti/clk.c1
-rw-r--r--drivers/clk/ti/clkctrl.c17
-rw-r--r--drivers/clk/ti/clock.h8
-rw-r--r--drivers/clk/ti/fapll.c1
-rw-r--r--drivers/clk/ux500/clk-sysctrl.c3
-rw-r--r--drivers/clk/versatile/clk-sp810.c1
-rw-r--r--drivers/clk/x86/clk-pmc-atom.c1
-rw-r--r--drivers/clk/zynq/clkc.c6
-rw-r--r--drivers/clk/zynq/pll.c18
-rw-r--r--drivers/clk/zynqmp/clk-mux-zynqmp.c1
-rw-r--r--drivers/clk/zynqmp/clk-zynqmp.h6
-rw-r--r--drivers/clk/zynqmp/clkc.c184
-rw-r--r--drivers/clk/zynqmp/divider.c17
-rw-r--r--drivers/clocksource/Kconfig21
-rw-r--r--drivers/clocksource/Makefile3
-rw-r--r--drivers/clocksource/arm_arch_timer.c138
-rw-r--r--drivers/clocksource/timer-atmel-tcb.c (renamed from drivers/clocksource/tcb_clksrc.c)126
-rw-r--r--drivers/clocksource/timer-fsl-ftm.c15
-rw-r--r--drivers/clocksource/timer-ixp4xx.c282
-rw-r--r--drivers/clocksource/timer-milbeaut.c66
-rw-r--r--drivers/clocksource/timer-sun4i.c5
-rw-r--r--drivers/clocksource/timer-tegra20.c63
-rw-r--r--drivers/clocksource/timer-ti-dm.c1
-rw-r--r--drivers/counter/104-quad-8.c1367
-rw-r--r--drivers/counter/Kconfig60
-rw-r--r--drivers/counter/Makefile10
-rw-r--r--drivers/counter/counter.c1567
-rw-r--r--drivers/counter/ftm-quaddec.c356
-rw-r--r--drivers/counter/stm32-lptimer-cnt.c754
-rw-r--r--drivers/counter/stm32-timer-cnt.c390
-rw-r--r--drivers/cpufreq/Kconfig4
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c19
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c2
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c22
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c1
-rw-r--r--drivers/cpufreq/cpufreq.c242
-rw-r--r--drivers/cpufreq/cpufreq_governor.c2
-rw-r--r--drivers/cpufreq/cpufreq_stats.c15
-rw-r--r--drivers/cpufreq/freq_table.c3
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c65
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c19
-rw-r--r--drivers/cpufreq/loongson1-cpufreq.c1
-rw-r--r--drivers/cpufreq/maple-cpufreq.c6
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c1
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c2
-rw-r--r--drivers/cpufreq/powernow-k8.c2
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c1
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c2
-rw-r--r--drivers/cpufreq/speedstep-centrino.c2
-rw-r--r--drivers/cpuidle/cpuidle-exynos.c2
-rw-r--r--drivers/cpuidle/cpuidle.c19
-rw-r--r--drivers/crypto/Kconfig9
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c24
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c48
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h3
-rw-r--r--drivers/crypto/atmel-tdes.c106
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c2
-rw-r--r--drivers/crypto/bcm/cipher.c22
-rw-r--r--drivers/crypto/bcm/spu.c3
-rw-r--r--drivers/crypto/bcm/util.c1
-rw-r--r--drivers/crypto/caam/caamalg.c84
-rw-r--r--drivers/crypto/caam/caamalg_qi.c73
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c251
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h2
-rw-r--r--drivers/crypto/caam/caampkc.c2
-rw-r--r--drivers/crypto/caam/ctrl.c20
-rw-r--r--drivers/crypto/caam/error.c4
-rw-r--r--drivers/crypto/caam/intern.h4
-rw-r--r--drivers/crypto/caam/jr.c31
-rw-r--r--drivers/crypto/caam/qi.c4
-rw-r--r--drivers/crypto/caam/regs.h3
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c30
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_main.c2
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_mbox.c17
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c6
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_aead.c337
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.c65
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_req.h46
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c8
-rw-r--r--drivers/crypto/cavium/zip/zip_crypto.c8
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c21
-rw-r--r--drivers/crypto/ccp/ccp-crypto-rsa.c8
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c2
-rw-r--r--drivers/crypto/ccp/psp-dev.c69
-rw-r--r--drivers/crypto/ccree/Makefile1
-rw-r--r--drivers/crypto/ccree/cc_aead.c118
-rw-r--r--drivers/crypto/ccree/cc_aead.h3
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c341
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.h2
-rw-r--r--drivers/crypto/ccree/cc_cipher.c585
-rw-r--r--drivers/crypto/ccree/cc_cipher.h3
-rw-r--r--drivers/crypto/ccree/cc_crypto_ctx.h10
-rw-r--r--drivers/crypto/ccree/cc_debugfs.c44
-rw-r--r--drivers/crypto/ccree/cc_debugfs.h2
-rw-r--r--drivers/crypto/ccree/cc_driver.c120
-rw-r--r--drivers/crypto/ccree/cc_driver.h36
-rw-r--r--drivers/crypto/ccree/cc_fips.c29
-rw-r--r--drivers/crypto/ccree/cc_fips.h4
-rw-r--r--drivers/crypto/ccree/cc_hash.c64
-rw-r--r--drivers/crypto/ccree/cc_hash.h2
-rw-r--r--drivers/crypto/ccree/cc_host_regs.h123
-rw-r--r--drivers/crypto/ccree/cc_hw_queue_defs.h35
-rw-r--r--drivers/crypto/ccree/cc_ivgen.c11
-rw-r--r--drivers/crypto/ccree/cc_ivgen.h2
-rw-r--r--drivers/crypto/ccree/cc_kernel_regs.h2
-rw-r--r--drivers/crypto/ccree/cc_lli_defs.h4
-rw-r--r--drivers/crypto/ccree/cc_pm.c11
-rw-r--r--drivers/crypto/ccree/cc_pm.h2
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.c116
-rw-r--r--drivers/crypto/ccree/cc_request_mgr.h2
-rw-r--r--drivers/crypto/ccree/cc_sram_mgr.c7
-rw-r--r--drivers/crypto/ccree/cc_sram_mgr.h2
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c19
-rw-r--r--drivers/crypto/chelsio/chcr_core.c4
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c3
-rw-r--r--drivers/crypto/hifn_795x.c31
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c12
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c11
-rw-r--r--drivers/crypto/ixp4xx_crypto.c68
-rw-r--r--drivers/crypto/marvell/cipher.c11
-rw-r--r--drivers/crypto/marvell/hash.c3
-rw-r--r--drivers/crypto/mediatek/mtk-sha.c3
-rw-r--r--drivers/crypto/mxc-scc.c767
-rw-r--r--drivers/crypto/mxs-dcp.c14
-rw-r--r--drivers/crypto/n2_core.c15
-rw-r--r--drivers/crypto/nx/nx-842-pseries.c6
-rw-r--r--drivers/crypto/nx/nx-842.c3
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c12
-rw-r--r--drivers/crypto/nx/nx-sha256.c6
-rw-r--r--drivers/crypto/nx/nx-sha512.c6
-rw-r--r--drivers/crypto/omap-des.c29
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/crypto/padlock-sha.c5
-rw-r--r--drivers/crypto/picoxcell_crypto.c35
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c1
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c2
-rw-r--r--drivers/crypto/qce/ablkcipher.c22
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c61
-rw-r--r--drivers/crypto/s5p-sss.c1
-rw-r--r--drivers/crypto/sahara.c6
-rw-r--r--drivers/crypto/stm32/Kconfig1
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c74
-rw-r--r--drivers/crypto/stm32/stm32-hash.c4
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-cipher.c78
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-core.c19
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-hash.c5
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss.h2
-rw-r--r--drivers/crypto/talitos.c108
-rw-r--r--drivers/crypto/ux500/cryp/Makefile6
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c86
-rw-r--r--drivers/crypto/vmx/aes.c14
-rw-r--r--drivers/crypto/vmx/aes_cbc.c14
-rw-r--r--drivers/crypto/vmx/aes_ctr.c10
-rw-r--r--drivers/crypto/vmx/aes_xts.c14
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.pl4
-rw-r--r--drivers/crypto/vmx/ghash.c10
-rw-r--r--drivers/crypto/vmx/vmx.c4
-rw-r--r--drivers/dax/Kconfig3
-rw-r--r--drivers/dax/device.c6
-rw-r--r--drivers/dax/pmem/core.c6
-rw-r--r--drivers/dax/super.c7
-rw-r--r--drivers/devfreq/devfreq-event.c2
-rw-r--r--drivers/devfreq/devfreq.c90
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c2
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c25
-rw-r--r--drivers/devfreq/exynos-bus.c8
-rw-r--r--drivers/devfreq/rk3399_dmc.c73
-rw-r--r--drivers/devfreq/tegra-devfreq.c7
-rw-r--r--drivers/dma-buf/Kconfig1
-rw-r--r--drivers/dma-buf/Makefile3
-rw-r--r--drivers/dma-buf/dma-fence-chain.c242
-rw-r--r--drivers/dma-buf/dma-fence.c1
-rw-r--r--drivers/dma-buf/reservation.c8
-rw-r--r--drivers/dma-buf/sw_sync.c2
-rw-r--r--drivers/dma-buf/sync_file.c3
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/amba-pl08x.c22
-rw-r--r--drivers/dma/at_xdmac.c67
-rw-r--r--drivers/dma/bcm-sba-raid.c3
-rw-r--r--drivers/dma/bcm2835-dma.c1
-rw-r--r--drivers/dma/dma-axi-dmac.c116
-rw-r--r--drivers/dma/fsl-edma-common.h2
-rw-r--r--drivers/dma/fsl-edma.c6
-rw-r--r--drivers/dma/idma64.c15
-rw-r--r--drivers/dma/idma64.h2
-rw-r--r--drivers/dma/imx-sdma.c15
-rw-r--r--drivers/dma/nbpfaxi.c4
-rw-r--r--drivers/dma/pl330.c61
-rw-r--r--drivers/dma/sh/rcar-dmac.c4
-rw-r--r--drivers/dma/stm32-dma.c103
-rw-r--r--drivers/dma/tegra210-adma.c269
-rw-r--r--drivers/dma/txx9dmac.c3
-rw-r--r--drivers/dma/xgene-dma.c6
-rw-r--r--drivers/edac/Kconfig4
-rw-r--r--drivers/edac/altera_edac.c262
-rw-r--r--drivers/edac/altera_edac.h69
-rw-r--r--drivers/edac/amd64_edac.c135
-rw-r--r--drivers/edac/amd64_edac.h11
-rw-r--r--drivers/edac/edac_mc.c12
-rw-r--r--drivers/edac/i10nm_base.c52
-rw-r--r--drivers/edac/mce_amd.c4
-rw-r--r--drivers/edac/skx_base.c50
-rw-r--r--drivers/edac/skx_common.c57
-rw-r--r--drivers/edac/skx_common.h8
-rw-r--r--drivers/extcon/Kconfig9
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/devres.c2
-rw-r--r--drivers/extcon/extcon-arizona.c10
-rw-r--r--drivers/extcon/extcon-axp288.c9
-rw-r--r--drivers/extcon/extcon-intel-cht-wc.c81
-rw-r--r--drivers/extcon/extcon-intel-mrfld.c284
-rw-r--r--drivers/extcon/extcon-intel.h20
-rw-r--r--drivers/firewire/core-iso.c15
-rw-r--r--drivers/firewire/nosy.c2
-rw-r--r--drivers/firewire/ohci.c1
-rw-r--r--drivers/firmware/Kconfig31
-rw-r--r--drivers/firmware/Makefile4
-rw-r--r--drivers/firmware/arm_scmi/driver.c8
-rw-r--r--drivers/firmware/arm_sdei.c3
-rw-r--r--drivers/firmware/dmi_scan.c28
-rw-r--r--drivers/firmware/efi/arm-runtime.c6
-rw-r--r--drivers/firmware/efi/libstub/Makefile20
-rw-r--r--drivers/firmware/google/vpd.c4
-rw-r--r--drivers/firmware/imx/Makefile2
-rw-r--r--drivers/firmware/imx/imx-scu-irq.c168
-rw-r--r--drivers/firmware/imx/imx-scu.c6
-rw-r--r--drivers/firmware/imx/scu-pd.c121
-rw-r--r--drivers/firmware/iscsi_ibft.c2
-rw-r--r--drivers/firmware/psci/Kconfig13
-rw-r--r--drivers/firmware/psci/Makefile4
-rw-r--r--drivers/firmware/psci/psci.c (renamed from drivers/firmware/psci.c)110
-rw-r--r--drivers/firmware/psci/psci_checker.c (renamed from drivers/firmware/psci_checker.c)0
-rw-r--r--drivers/firmware/ti_sci.c651
-rw-r--r--drivers/firmware/ti_sci.h102
-rw-r--r--drivers/firmware/trusted_foundations.c (renamed from arch/arm/firmware/trusted_foundations.c)79
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.c18
-rw-r--r--drivers/firmware/xilinx/zynqmp.c56
-rw-r--r--drivers/fpga/Kconfig9
-rw-r--r--drivers/fpga/Makefile1
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c2
-rw-r--r--drivers/fpga/zynqmp-fpga.c159
-rw-r--r--drivers/gnss/core.c2
-rw-r--r--drivers/gnss/ubx.c1
-rw-r--r--drivers/gpio/Kconfig104
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/gpio-74x164.c22
-rw-r--r--drivers/gpio/gpio-74xx-mmio.c4
-rw-r--r--drivers/gpio/gpio-amdpt.c8
-rw-r--r--drivers/gpio/gpio-aspeed.c4
-rw-r--r--drivers/gpio/gpio-bcm-kona.c4
-rw-r--r--drivers/gpio/gpio-cadence.c4
-rw-r--r--drivers/gpio/gpio-clps711x.c7
-rw-r--r--drivers/gpio/gpio-dwapb.c4
-rw-r--r--drivers/gpio/gpio-ftgpio010.c4
-rw-r--r--drivers/gpio/gpio-hlwd.c4
-rw-r--r--drivers/gpio/gpio-iop.c4
-rw-r--r--drivers/gpio/gpio-ixp4xx.c474
-rw-r--r--drivers/gpio/gpio-janz-ttl.c4
-rw-r--r--drivers/gpio/gpio-loongson1.c4
-rw-r--r--drivers/gpio/gpio-lpc18xx.c5
-rw-r--r--drivers/gpio/gpio-max77650.c190
-rw-r--r--drivers/gpio/gpio-mb86s7x.c4
-rw-r--r--drivers/gpio/gpio-merrifield.c18
-rw-r--r--drivers/gpio/gpio-mlxbf.c152
-rw-r--r--drivers/gpio/gpio-mmio.c99
-rw-r--r--drivers/gpio/gpio-mt7621.c3
-rw-r--r--drivers/gpio/gpio-mvebu.c7
-rw-r--r--drivers/gpio/gpio-mxc.c4
-rw-r--r--drivers/gpio/gpio-octeon.c4
-rw-r--r--drivers/gpio/gpio-omap.c644
-rw-r--r--drivers/gpio/gpio-pca953x.c25
-rw-r--r--drivers/gpio/gpio-pxa.c12
-rw-r--r--drivers/gpio/gpio-rcar.c5
-rw-r--r--drivers/gpio/gpio-sch.c5
-rw-r--r--drivers/gpio/gpio-spear-spics.c4
-rw-r--r--drivers/gpio/gpio-sprd.c4
-rw-r--r--drivers/gpio/gpio-sta2x11.c5
-rw-r--r--drivers/gpio/gpio-stp-xway.c4
-rw-r--r--drivers/gpio/gpio-tb10x.c4
-rw-r--r--drivers/gpio/gpio-tegra.c4
-rw-r--r--drivers/gpio/gpio-thunderx.c16
-rw-r--r--drivers/gpio/gpio-timberdale.c4
-rw-r--r--drivers/gpio/gpio-ts4800.c4
-rw-r--r--drivers/gpio/gpio-uniphier.c4
-rw-r--r--drivers/gpio/gpio-vf610.c92
-rw-r--r--drivers/gpio/gpio-xgene-sb.c4
-rw-r--r--drivers/gpio/gpio-xlp.c7
-rw-r--r--drivers/gpio/gpio-zx.c4
-rw-r--r--drivers/gpio/gpio-zynq.c4
-rw-r--r--drivers/gpio/gpiolib-acpi.c115
-rw-r--r--drivers/gpio/gpiolib-devprop.c2
-rw-r--r--drivers/gpio/gpiolib-of.c24
-rw-r--r--drivers/gpio/gpiolib.c43
-rw-r--r--drivers/gpio/gpiolib.h22
-rw-r--r--drivers/gpu/drm/Kconfig22
-rw-r--r--drivers/gpu/drm/Makefile18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c152
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c531
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c318
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c1482
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h294
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c977
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c127
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c270
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c172
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c205
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c652
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c238
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c117
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c1823
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h18
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c53
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c194
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c22
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c126
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c220
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c164
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c196
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c383
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c195
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c121
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h134
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_ddc_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c146
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c119
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h63
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c213
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/clock_source.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c174
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c15
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h3
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h5
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h98
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h16
-rw-r--r--drivers/gpu/drm/amd/include/linux/chash.h366
-rw-r--r--drivers/gpu/drm/amd/lib/Kconfig28
-rw-r--r--drivers/gpu/drm/amd/lib/Makefile32
-rw-r--r--drivers/gpu/drm/amd/lib/chash.c638
-rw-r--r--drivers/gpu/drm/amd/powerplay/Makefile2
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c1253
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c127
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.c66
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.h31
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c39
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c39
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c119
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c53
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h770
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu10.h14
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h89
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h128
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h147
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c1977
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c20
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c2413
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.h129
-rw-r--r--drivers/gpu/drm/arm/display/include/malidp_product.h12
-rw-r--r--drivers/gpu/drm/arm/display/include/malidp_utils.h31
-rw-r--r--drivers/gpu/drm/arm/display/komeda/Makefile8
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_component.c685
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c431
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h50
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h530
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c407
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c118
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.h95
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_drv.c9
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h9
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c77
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.h26
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c113
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h129
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c610
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_plane.c139
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c220
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c48
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h6
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c249
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h31
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c10
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c271
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h20
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c6
-rw-r--r--drivers/gpu/drm/aspeed/Kconfig14
-rw-r--r--drivers/gpu/drm/aspeed/Makefile3
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx.h104
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c241
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c269
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_out.c42
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h4
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c7
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c10
-rw-r--r--drivers/gpu/drm/bochs/bochs.h9
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c194
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c10
-rw-r--r--drivers/gpu/drm/bridge/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c6
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c6
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c140
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig2
-rw-r--r--drivers/gpu/drm/cirrus/Makefile3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus.c657
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c161
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c315
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c328
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c621
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c10
-rw-r--r--drivers/gpu/drm/drm_atomic.c45
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c19
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c4
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c71
-rw-r--r--drivers/gpu/drm/drm_auth.c21
-rw-r--r--drivers/gpu/drm/drm_bufs.c8
-rw-r--r--drivers/gpu/drm/drm_client.c11
-rw-r--r--drivers/gpu/drm/drm_connector.c97
-rw-r--r--drivers/gpu/drm/drm_crtc.c4
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h1
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c1
-rw-r--r--drivers/gpu/drm/drm_drv.c223
-rw-r--r--drivers/gpu/drm/drm_dsc.c269
-rw-r--r--drivers/gpu/drm/drm_edid.c105
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c302
-rw-r--r--drivers/gpu/drm/drm_file.c26
-rw-r--r--drivers/gpu/drm/drm_format_helper.c324
-rw-r--r--drivers/gpu/drm/drm_fourcc.c27
-rw-r--r--drivers/gpu/drm/drm_gem.c320
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c8
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c625
-rw-r--r--drivers/gpu/drm/drm_internal.h10
-rw-r--r--drivers/gpu/drm/drm_ioc32.c13
-rw-r--r--drivers/gpu/drm/drm_ioctl.c86
-rw-r--r--drivers/gpu/drm/drm_irq.c2
-rw-r--r--drivers/gpu/drm/drm_kms_helper_common.c2
-rw-r--r--drivers/gpu/drm/drm_lease.c13
-rw-r--r--drivers/gpu/drm/drm_legacy.h87
-rw-r--r--drivers/gpu/drm/drm_legacy_misc.c82
-rw-r--r--drivers/gpu/drm/drm_lock.c19
-rw-r--r--drivers/gpu/drm/drm_memory.c26
-rw-r--r--drivers/gpu/drm/drm_mm.c25
-rw-r--r--drivers/gpu/drm/drm_mode_config.c5
-rw-r--r--drivers/gpu/drm/drm_mode_object.c5
-rw-r--r--drivers/gpu/drm/drm_modes.c12
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c13
-rw-r--r--drivers/gpu/drm/drm_plane.c8
-rw-r--r--drivers/gpu/drm/drm_prime.c1
-rw-r--r--drivers/gpu/drm/drm_print.c28
-rw-r--r--drivers/gpu/drm/drm_syncobj.c462
-rw-r--r--drivers/gpu/drm/drm_vm.c6
-rw-r--r--drivers/gpu/drm/drm_writeback.c73
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c40
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c22
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c26
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c30
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c97
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c48
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c51
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c72
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c71
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c49
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c75
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c43
-rw-r--r--drivers/gpu/drm/fsl-dcu/Kconfig1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c7
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.h2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c9
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c7
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c12
-rw-r--r--drivers/gpu/drm/i915/.gitignore1
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/Makefile8
-rw-r--r--drivers/gpu/drm/i915/Makefile.header-test47
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c74
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c28
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h18
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h17
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c189
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c248
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h31
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c41
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h6
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c6
-rw-r--r--drivers/gpu/drm/i915/i915_active.c23
-rw-r--r--drivers/gpu/drm/i915/i915_active.h16
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c12
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c173
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c622
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h408
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c782
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c1097
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h260
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context_types.h175
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c48
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c156
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c141
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h26
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.c42
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c8
-rw-r--r--drivers/gpu/drm/i915/i915_globals.c125
-rw-r--r--drivers/gpu/drm/i915/i915_globals.h35
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c183
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h51
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c665
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c262
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c114
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c67
-rw-r--r--drivers/gpu/drm/i915/i915_priolist_types.h42
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h2
-rw-r--r--drivers/gpu/drm/i915/i915_query.c39
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h571
-rw-r--r--drivers/gpu/drm/i915/i915_request.c554
-rw-r--r--drivers/gpu/drm/i915/i915_request.h87
-rw-r--r--drivers/gpu/drm/i915/i915_reset.c621
-rw-r--r--drivers/gpu/drm/i915/i915_reset.h16
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c112
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h95
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler_types.h72
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c4
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c43
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h16
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.c301
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.h89
-rw-r--r--drivers/gpu/drm/i915/i915_timeline_types.h70
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h106
-rw-r--r--drivers/gpu/drm/i915/i915_user_extensions.c61
-rw-r--r--drivers/gpu/drm/i915/i915_user_extensions.h20
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h31
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c11
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c62
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h3
-rw-r--r--drivers/gpu/drm/i915/icl_dsi.c51
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c6
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c59
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.h40
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c95
-rw-r--r--drivers/gpu/drm/i915/intel_audio.h24
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c133
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c92
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c382
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.h46
-rw-r--r--drivers/gpu/drm/i915/intel_color.c1131
-rw-r--r--drivers/gpu/drm/i915/intel_color.h17
-rw-r--r--drivers/gpu/drm/i915/intel_combo_phy.c3
-rw-r--r--drivers/gpu/drm/i915/intel_connector.c19
-rw-r--r--drivers/gpu/drm/i915/intel_connector.h35
-rw-r--r--drivers/gpu/drm/i915/intel_context.c270
-rw-r--r--drivers/gpu/drm/i915/intel_context.h87
-rw-r--r--drivers/gpu/drm/i915/intel_context_types.h77
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c13
-rw-r--r--drivers/gpu/drm/i915/intel_crt.h21
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c5
-rw-r--r--drivers/gpu/drm/i915/intel_csr.h17
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c321
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.h53
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c136
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h46
-rw-r--r--drivers/gpu/drm/i915/intel_display.c813
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c583
-rw-r--r--drivers/gpu/drm/i915/intel_dp.h122
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c1
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c154
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c770
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h5
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h666
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c24
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c10
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.h13
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c491
-rw-r--r--drivers/gpu/drm/i915/intel_engine_types.h546
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c10
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.h42
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c243
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.h53
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c1
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c5
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.h10
-rw-r--r--drivers/gpu/drm/i915/intel_gpu_commands.h9
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c45
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h4
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ads.c3
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ct.c99
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ct.h3
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c4
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c5
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c134
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.h1
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c26
-rw-r--r--drivers/gpu/drm/i915/intel_hdcp.c1179
-rw-r--r--drivers/gpu/drm/i915/intel_hdcp.h33
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c810
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.h51
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_huc_fw.c27
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c904
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h35
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c19
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.h38
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c101
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.h22
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c14
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c3
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c6
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c150
-rw-r--r--drivers/gpu/drm/i915/intel_panel.h65
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c243
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.h35
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c555
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h71
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c318
-rw-r--r--drivers/gpu/drm/i915/intel_psr.h40
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c435
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h650
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c124
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c169
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.h23
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c12
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c260
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.h55
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c5
-rw-r--r--drivers/gpu/drm/i915/intel_tv.h13
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c25
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h1
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c996
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h286
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h3
-rw-r--r--drivers/gpu/drm/i915/intel_vdsc.c133
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c187
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.h19
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds_types.h27
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c25
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c457
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c21
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c37
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_sw_fence.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_timeline.c120
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c16
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c301
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_lrc.c446
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c166
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_workarounds.c423
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c34
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c145
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c54
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.h7
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_timeline.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.h2
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi.c84
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi_pll.c4
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c2
-rw-r--r--drivers/gpu/drm/lima/Kconfig13
-rw-r--r--drivers/gpu/drm/lima/Makefile21
-rw-r--r--drivers/gpu/drm/lima/lima_bcast.c47
-rw-r--r--drivers/gpu/drm/lima/lima_bcast.h14
-rw-r--r--drivers/gpu/drm/lima/lima_ctx.c98
-rw-r--r--drivers/gpu/drm/lima/lima_ctx.h30
-rw-r--r--drivers/gpu/drm/lima/lima_device.c385
-rw-r--r--drivers/gpu/drm/lima/lima_device.h131
-rw-r--r--drivers/gpu/drm/lima/lima_dlbu.c58
-rw-r--r--drivers/gpu/drm/lima/lima_dlbu.h18
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c376
-rw-r--r--drivers/gpu/drm/lima/lima_drv.h45
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c349
-rw-r--r--drivers/gpu/drm/lima/lima_gem.h25
-rw-r--r--drivers/gpu/drm/lima/lima_gem_prime.c47
-rw-r--r--drivers/gpu/drm/lima/lima_gem_prime.h13
-rw-r--r--drivers/gpu/drm/lima/lima_gp.c283
-rw-r--r--drivers/gpu/drm/lima/lima_gp.h16
-rw-r--r--drivers/gpu/drm/lima/lima_l2_cache.c80
-rw-r--r--drivers/gpu/drm/lima/lima_l2_cache.h14
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.c142
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.h16
-rw-r--r--drivers/gpu/drm/lima/lima_object.c122
-rw-r--r--drivers/gpu/drm/lima/lima_object.h36
-rw-r--r--drivers/gpu/drm/lima/lima_pmu.c60
-rw-r--r--drivers/gpu/drm/lima/lima_pmu.h12
-rw-r--r--drivers/gpu/drm/lima/lima_pp.c427
-rw-r--r--drivers/gpu/drm/lima/lima_pp.h19
-rw-r--r--drivers/gpu/drm/lima/lima_regs.h298
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c362
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h102
-rw-r--r--drivers/gpu/drm/lima/lima_vm.c282
-rw-r--r--drivers/gpu/drm/lima/lima_vm.h62
-rw-r--r--drivers/gpu/drm/meson/Makefile2
-rw-r--r--drivers/gpu/drm/meson/meson_canvas.c73
-rw-r--r--drivers/gpu/drm/meson/meson_canvas.h51
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c353
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c83
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h5
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c163
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.h32
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.c18
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c21
-rw-r--r--drivers/gpu/drm/meson/meson_registers.h247
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c123
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c11
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c25
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c85
-rw-r--r--drivers/gpu/drm/meson/meson_vpp.c51
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c8
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c10
-rw-r--r--drivers/gpu/drm/msm/Kconfig5
-rw-r--r--drivers/gpu/drm/msm/Makefile9
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c109
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c216
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h9
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c63
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c141
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c69
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c119
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c177
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c4
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c4
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c15
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h10
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c72
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h10
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c52
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c17
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c13
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c41
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_crtc.c6
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild8
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig15
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c1
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Kconfig17
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Makefile3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c45
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-dvi.c330
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c45
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c39
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c170
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c40
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c221
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c140
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c41
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c41
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c61
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c55
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c58
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c48
-rw-r--r--drivers/gpu/drm/omapdrm/dss/base.c144
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c24
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c64
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c110
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss-of.c60
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c54
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c54
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c18
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h76
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c36
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c68
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c229
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c181
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.h8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c13
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c236
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c211
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.h3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c6
-rw-r--r--drivers/gpu/drm/panel/Kconfig31
-rw-r--r--drivers/gpu/drm/panel/Makefile3
-rw-r--r--drivers/gpu/drm/panel/panel-arm-versatile.c6
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c272
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c6
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c1
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c20
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c3
-rw-r--r--drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c387
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c258
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c3
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c2
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c84
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-tpg110.c12
-rw-r--r--drivers/gpu/drm/panfrost/Kconfig14
-rw-r--r--drivers/gpu/drm/panfrost/Makefile12
-rw-r--r--drivers/gpu/drm/panfrost/TODO27
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c219
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.h14
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c257
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h125
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c475
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_features.h309
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c95
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h29
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c367
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.h19
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_issues.h176
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c564
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.h51
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c386
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.h17
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h298
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c7
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h3
-rw-r--r--drivers/gpu/drm/qxl/qxl_prime.c12
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c17
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig4
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c64
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h13
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c54
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c37
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.h1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c122
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.h17
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_writeback.c243
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_writeback.h39
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c19
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig8
-rw-r--r--drivers/gpu/drm/rockchip/Makefile1
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c876
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.h229
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c11
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c17
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c11
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c20
-rw-r--r--drivers/gpu/drm/selftests/test-drm_mm.c12
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig1
-rw-r--r--drivers/gpu/drm/stm/Kconfig2
-rw-r--r--drivers/gpu/drm/stm/drv.c35
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c28
-rw-r--r--drivers/gpu/drm/stm/ltdc.c24
-rw-r--r--drivers/gpu/drm/stm/ltdc.h3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c63
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi.h1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c40
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c29
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c74
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c179
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c49
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c54
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.h11
-rw-r--r--drivers/gpu/drm/tegra/fb.c4
-rw-r--r--drivers/gpu/drm/tegra/gem.c4
-rw-r--r--drivers/gpu/drm/tegra/sor.c21
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/tinydrm/core/Makefile2
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-core.c183
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c160
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c24
-rw-r--r--drivers/gpu/drm/tinydrm/hx8357d.c59
-rw-r--r--drivers/gpu/drm/tinydrm/ili9225.c87
-rw-r--r--drivers/gpu/drm/tinydrm/ili9341.c59
-rw-r--r--drivers/gpu/drm/tinydrm/mi0283qt.c67
-rw-r--r--drivers/gpu/drm/tinydrm/mipi-dbi.c185
-rw-r--r--drivers/gpu/drm/tinydrm/repaper.c147
-rw-r--r--drivers/gpu/drm/tinydrm/st7586.c148
-rw-r--r--drivers/gpu/drm/tinydrm/st7735r.c59
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c10
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c3
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c57
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h9
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c20
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/udl/udl_main.c35
-rw-r--r--drivers/gpu/drm/v3d/Kconfig1
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c314
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c8
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c65
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h37
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c110
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c67
-rw-r--r--drivers/gpu/drm/v3d/v3d_mmu.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h2
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c25
-rw-r--r--drivers/gpu/drm/vboxvideo/Kconfig (renamed from drivers/staging/vboxvideo/Kconfig)1
-rw-r--r--drivers/gpu/drm/vboxvideo/Makefile (renamed from drivers/staging/vboxvideo/Makefile)0
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_base.c (renamed from drivers/staging/vboxvideo/hgsmi_base.c)0
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_ch_setup.h (renamed from drivers/staging/vboxvideo/hgsmi_ch_setup.h)0
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_channels.h (renamed from drivers/staging/vboxvideo/hgsmi_channels.h)0
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_defs.h (renamed from drivers/staging/vboxvideo/hgsmi_defs.h)0
-rw-r--r--drivers/gpu/drm/vboxvideo/modesetting.c (renamed from drivers/staging/vboxvideo/modesetting.c)0
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c (renamed from drivers/staging/vboxvideo/vbox_drv.c)25
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.h (renamed from drivers/staging/vboxvideo/vbox_drv.h)9
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_fb.c (renamed from drivers/staging/vboxvideo/vbox_fb.c)8
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_hgsmi.c (renamed from drivers/staging/vboxvideo/vbox_hgsmi.c)0
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_irq.c (renamed from drivers/staging/vboxvideo/vbox_irq.c)10
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c (renamed from drivers/staging/vboxvideo/vbox_main.c)6
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c (renamed from drivers/staging/vboxvideo/vbox_mode.c)24
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_prime.c (renamed from drivers/staging/vboxvideo/vbox_prime.c)10
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_ttm.c (renamed from drivers/staging/vboxvideo/vbox_ttm.c)12
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo.h (renamed from drivers/staging/vboxvideo/vboxvideo.h)0
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo_guest.h (renamed from drivers/staging/vboxvideo/vboxvideo_guest.h)0
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo_vbe.h (renamed from drivers/staging/vboxvideo/vboxvideo_vbe.h)0
-rw-r--r--drivers/gpu/drm/vboxvideo/vbva_base.c (renamed from drivers/staging/vboxvideo/vbva_base.c)0
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c69
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c105
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c90
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c39
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c42
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h77
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c176
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c49
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c162
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c180
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c123
-rw-r--r--drivers/gpu/drm/vc4/vc4_perfmon.c18
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c59
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h51
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c23
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c49
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c240
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c83
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c27
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h45
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c35
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c107
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c74
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c25
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c102
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c36
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c98
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c59
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1505
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c44
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c47
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c80
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c61
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h7
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c18
-rw-r--r--drivers/hid/Kconfig27
-rw-r--r--drivers/hid/Makefile3
-rw-r--r--drivers/hid/hid-core.c53
-rw-r--r--drivers/hid/hid-ids.h7
-rw-r--r--drivers/hid/hid-input.c83
-rw-r--r--drivers/hid/hid-lg.c2
-rw-r--r--drivers/hid/hid-logitech-dj.c1142
-rw-r--r--drivers/hid/hid-logitech-hidpp.c736
-rw-r--r--drivers/hid/hid-macally.c45
-rw-r--r--drivers/hid/hid-picolcd_core.c18
-rw-r--r--drivers/hid/hid-quirks.c6
-rw-r--r--drivers/hid/hid-sensor-custom.c12
-rw-r--r--drivers/hid/hid-u2fzero.c374
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c2
-rw-r--r--drivers/hid/intel-ish-hid/Kconfig15
-rw-r--r--drivers/hid/intel-ish-hid/Makefile5
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c1
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-fw-loader.c1085
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c168
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.c49
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.h14
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c96
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.h37
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c60
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.h24
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h31
-rw-r--r--drivers/hid/uhid.c2
-rw-r--r--drivers/hv/channel_mgmt.c3
-rw-r--r--drivers/hv/hv.c1
-rw-r--r--drivers/hv/hyperv_vmbus.h3
-rw-r--r--drivers/hv/ring_buffer.c22
-rw-r--r--drivers/hv/vmbus_drv.c166
-rw-r--r--drivers/hwmon/Kconfig19
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/ad7414.c2
-rw-r--r--drivers/hwmon/adc128d818.c2
-rw-r--r--drivers/hwmon/adm1025.c98
-rw-r--r--drivers/hwmon/adm1026.c416
-rw-r--r--drivers/hwmon/adm1029.c41
-rw-r--r--drivers/hwmon/adm1031.c201
-rw-r--r--drivers/hwmon/adm9240.c135
-rw-r--r--drivers/hwmon/ads1015.c2
-rw-r--r--drivers/hwmon/ads7828.c4
-rw-r--r--drivers/hwmon/adt7411.c48
-rw-r--r--drivers/hwmon/adt7475.c2
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c6
-rw-r--r--drivers/hwmon/f71805f.c15
-rw-r--r--drivers/hwmon/fschmd.c2
-rw-r--r--drivers/hwmon/gpio-fan.c25
-rw-r--r--drivers/hwmon/hih6130.c2
-rw-r--r--drivers/hwmon/hwmon.c9
-rw-r--r--drivers/hwmon/iio_hwmon.c27
-rw-r--r--drivers/hwmon/ina209.c2
-rw-r--r--drivers/hwmon/ina2xx.c2
-rw-r--r--drivers/hwmon/ina3221.c176
-rw-r--r--drivers/hwmon/jc42.c18
-rw-r--r--drivers/hwmon/jz4740-hwmon.c4
-rw-r--r--drivers/hwmon/lm63.c2
-rw-r--r--drivers/hwmon/lm75.c45
-rw-r--r--drivers/hwmon/lm78.c114
-rw-r--r--drivers/hwmon/lm85.c342
-rw-r--r--drivers/hwmon/lm87.c165
-rw-r--r--drivers/hwmon/lm90.c15
-rw-r--r--drivers/hwmon/lm95241.c34
-rw-r--r--drivers/hwmon/lm95245.c49
-rw-r--r--drivers/hwmon/lochnagar-hwmon.c412
-rw-r--r--drivers/hwmon/ltc4151.c2
-rw-r--r--drivers/hwmon/ltc4245.c73
-rw-r--r--drivers/hwmon/ltq-cputemp.c26
-rw-r--r--drivers/hwmon/max197.c2
-rw-r--r--drivers/hwmon/max31790.c58
-rw-r--r--drivers/hwmon/max6621.c44
-rw-r--r--drivers/hwmon/max6650.c90
-rw-r--r--drivers/hwmon/max6697.c2
-rw-r--r--drivers/hwmon/menf21bmc_hwmon.c43
-rw-r--r--drivers/hwmon/mlxreg-fan.c152
-rw-r--r--drivers/hwmon/nct7904.c128
-rw-r--r--drivers/hwmon/npcm750-pwm-fan.c76
-rw-r--r--drivers/hwmon/ntc_thermistor.c24
-rw-r--r--drivers/hwmon/occ/Kconfig17
-rw-r--r--drivers/hwmon/occ/Makefile6
-rw-r--r--drivers/hwmon/occ/common.c11
-rw-r--r--drivers/hwmon/occ/common.h3
-rw-r--r--drivers/hwmon/occ/sysfs.c29
-rw-r--r--drivers/hwmon/pc87427.c14
-rw-r--r--drivers/hwmon/pmbus/Kconfig18
-rw-r--r--drivers/hwmon/pmbus/Makefile2
-rw-r--r--drivers/hwmon/pmbus/ir38064.c65
-rw-r--r--drivers/hwmon/pmbus/isl68137.c169
-rw-r--r--drivers/hwmon/pmbus/lm25066.c17
-rw-r--r--drivers/hwmon/pmbus/pmbus.h18
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c129
-rw-r--r--drivers/hwmon/pmbus/tps53679.c2
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c2
-rw-r--r--drivers/hwmon/pmbus/ucd9200.c2
-rw-r--r--drivers/hwmon/pwm-fan.c169
-rw-r--r--drivers/hwmon/raspberrypi-hwmon.c13
-rw-r--r--drivers/hwmon/s3c-hwmon.c4
-rw-r--r--drivers/hwmon/sht15.c2
-rw-r--r--drivers/hwmon/sis5595.c92
-rw-r--r--drivers/hwmon/smsc47b397.c13
-rw-r--r--drivers/hwmon/smsc47m1.c106
-rw-r--r--drivers/hwmon/smsc47m192.c146
-rw-r--r--drivers/hwmon/stts751.c2
-rw-r--r--drivers/hwmon/thmc50.c83
-rw-r--r--drivers/hwmon/tmp102.c28
-rw-r--r--drivers/hwmon/tmp103.c2
-rw-r--r--drivers/hwmon/tmp108.c29
-rw-r--r--drivers/hwmon/tmp421.c2
-rw-r--r--drivers/hwmon/via686a.c148
-rw-r--r--drivers/hwmon/vt1211.c15
-rw-r--r--drivers/hwmon/vt8231.c166
-rw-r--r--drivers/hwmon/w83627hf.c299
-rw-r--r--drivers/hwmon/w83773g.c32
-rw-r--r--drivers/hwmon/w83793.c2
-rw-r--r--drivers/hwtracing/coresight/Kconfig9
-rw-r--r--drivers/hwtracing/coresight/Makefile1
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.h5
-rw-r--r--drivers/hwtracing/coresight/coresight-dynamic-replicator.c255
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c97
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c37
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c114
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c116
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c238
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c82
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c266
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c17
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h12
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c18
-rw-r--r--drivers/hwtracing/coresight/coresight.c29
-rw-r--r--drivers/hwtracing/intel_th/acpi.c10
-rw-r--r--drivers/hwtracing/intel_th/core.c139
-rw-r--r--drivers/hwtracing/intel_th/gth.c125
-rw-r--r--drivers/hwtracing/intel_th/gth.h19
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h30
-rw-r--r--drivers/hwtracing/intel_th/msu.c407
-rw-r--r--drivers/hwtracing/intel_th/msu.h10
-rw-r--r--drivers/hwtracing/intel_th/pci.c37
-rw-r--r--drivers/hwtracing/stm/core.c9
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c22
-rw-r--r--drivers/i2c/busses/Kconfig25
-rw-r--r--drivers/i2c/busses/Makefile5
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-pci.c483
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-plat.c367
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2.h219
-rw-r--r--drivers/i2c/busses/i2c-at91-core.c376
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c (renamed from drivers/i2c/busses/i2c-at91.c)480
-rw-r--r--drivers/i2c/busses/i2c-at91-slave.c143
-rw-r--r--drivers/i2c/busses/i2c-at91.h174
-rw-r--r--drivers/i2c/busses/i2c-axxia.c57
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c764
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c1
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c18
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h2
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c3
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c5
-rw-r--r--drivers/i2c/busses/i2c-gpio.c2
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c11
-rw-r--r--drivers/i2c/busses/i2c-imx.c4
-rw-r--r--drivers/i2c/busses/i2c-isch.c1
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c255
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c3
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c7
-rw-r--r--drivers/i2c/busses/i2c-ocores.c16
-rw-r--r--drivers/i2c/busses/i2c-omap.c76
-rw-r--r--drivers/i2c/busses/i2c-piix4.c15
-rw-r--r--drivers/i2c/busses/i2c-rcar.c30
-rw-r--r--drivers/i2c/busses/i2c-riic.c43
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c4
-rw-r--r--drivers/i2c/busses/i2c-stu300.c25
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra-bpmp.c25
-rw-r--r--drivers/i2c/i2c-core-base.c147
-rw-r--r--drivers/i2c/i2c-core-smbus.c29
-rw-r--r--drivers/i2c/i2c-core.h36
-rw-r--r--drivers/i2c/i2c-mux.c6
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c6
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c8
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c106
-rw-r--r--drivers/i3c/master.c5
-rw-r--r--drivers/i3c/master/dw-i3c-master.c10
-rw-r--r--drivers/ide/ide-cd.c1
-rw-r--r--drivers/ide/ide-cd_ioctl.c5
-rw-r--r--drivers/ide/ide-gd.c6
-rw-r--r--drivers/ide/ide-probe.c3
-rw-r--r--drivers/ide/tx4939ide.c2
-rw-r--r--drivers/iio/Kconfig26
-rw-r--r--drivers/iio/Makefile1
-rw-r--r--drivers/iio/accel/Kconfig50
-rw-r--r--drivers/iio/accel/bma180.c18
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c23
-rw-r--r--drivers/iio/accel/cros_ec_accel_legacy.c40
-rw-r--r--drivers/iio/accel/kxcjk-1013.c13
-rw-r--r--drivers/iio/accel/kxsd9.c4
-rw-r--r--drivers/iio/accel/mma8452.c2
-rw-r--r--drivers/iio/accel/st_accel.h2
-rw-r--r--drivers/iio/accel/st_accel_core.c78
-rw-r--r--drivers/iio/accel/st_accel_i2c.c5
-rw-r--r--drivers/iio/adc/Kconfig48
-rw-r--r--drivers/iio/adc/Makefile2
-rw-r--r--drivers/iio/adc/ad7124.c2
-rw-r--r--drivers/iio/adc/ad7606.c120
-rw-r--r--drivers/iio/adc/ad7606.h25
-rw-r--r--drivers/iio/adc/ad7606_spi.c2
-rw-r--r--drivers/iio/adc/ad7780.c (renamed from drivers/staging/iio/adc/ad7780.c)179
-rw-r--r--drivers/iio/adc/ad7923.c24
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c16
-rw-r--r--drivers/iio/adc/imx7d_adc.c175
-rw-r--r--drivers/iio/adc/ingenic-adc.c4
-rw-r--r--drivers/iio/adc/lpc32xx_adc.c60
-rw-r--r--drivers/iio/adc/meson_saradc.c8
-rw-r--r--drivers/iio/adc/mxs-lradc-adc.c2
-rw-r--r--drivers/iio/adc/qcom-spmi-adc5.c1
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c628
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c180
-rw-r--r--drivers/iio/adc/stmpe-adc.c5
-rw-r--r--drivers/iio/adc/ti-ads7950.c219
-rw-r--r--drivers/iio/adc/ti-ads8344.c204
-rw-r--r--drivers/iio/adc/ti-ads8688.c2
-rw-r--r--drivers/iio/buffer/industrialio-buffer-cb.c10
-rw-r--r--drivers/iio/chemical/Kconfig12
-rw-r--r--drivers/iio/chemical/pms7003.c5
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c12
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c22
-rw-r--r--drivers/iio/common/ms_sensors/Kconfig2
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_iio.c2
-rw-r--r--drivers/iio/counter/104-quad-8.c631
-rw-r--r--drivers/iio/counter/Kconfig34
-rw-r--r--drivers/iio/counter/Makefile8
-rw-r--r--drivers/iio/counter/stm32-lptimer-cnt.c382
-rw-r--r--drivers/iio/dac/ad5064.c15
-rw-r--r--drivers/iio/dac/ad5758.c55
-rw-r--r--drivers/iio/dac/ti-dac5571.c2
-rw-r--r--drivers/iio/dummy/iio_dummy_evgen.c5
-rw-r--r--drivers/iio/frequency/ad9523.c16
-rw-r--r--drivers/iio/gyro/Kconfig22
-rw-r--r--drivers/iio/gyro/Makefile3
-rw-r--r--drivers/iio/gyro/bmg160_core.c21
-rw-r--r--drivers/iio/gyro/bmg160_i2c.c9
-rw-r--r--drivers/iio/gyro/fxas21002c.h150
-rw-r--r--drivers/iio/gyro/fxas21002c_core.c1004
-rw-r--r--drivers/iio/gyro/fxas21002c_i2c.c69
-rw-r--r--drivers/iio/gyro/fxas21002c_spi.c70
-rw-r--r--drivers/iio/gyro/itg3200_core.c20
-rw-r--r--drivers/iio/gyro/mpu3050-core.c5
-rw-r--r--drivers/iio/humidity/Kconfig20
-rw-r--r--drivers/iio/imu/Makefile2
-rw-r--r--drivers/iio/imu/adis16400.c (renamed from drivers/iio/imu/adis16400_core.c)232
-rw-r--r--drivers/iio/imu/adis16400.h215
-rw-r--r--drivers/iio/imu/adis16400_buffer.c101
-rw-r--r--drivers/iio/imu/adis16480.c435
-rw-r--r--drivers/iio/imu/adis_buffer.c40
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c10
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h6
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c12
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c157
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c15
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c15
-rw-r--r--drivers/iio/industrialio-buffer.c20
-rw-r--r--drivers/iio/industrialio-core.c46
-rw-r--r--drivers/iio/industrialio-trigger.c5
-rw-r--r--drivers/iio/inkern.c22
-rw-r--r--drivers/iio/light/Kconfig274
-rw-r--r--drivers/iio/light/cros_ec_light_prox.c12
-rw-r--r--drivers/iio/light/vcnl4000.c77
-rw-r--r--drivers/iio/magnetometer/ak8974.c5
-rw-r--r--drivers/iio/magnetometer/ak8975.c13
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c21
-rw-r--r--drivers/iio/magnetometer/hmc5843.h1
-rw-r--r--drivers/iio/magnetometer/hmc5843_core.c20
-rw-r--r--drivers/iio/magnetometer/hmc5843_i2c.c7
-rw-r--r--drivers/iio/magnetometer/hmc5843_spi.c7
-rw-r--r--drivers/iio/potentiometer/Kconfig34
-rw-r--r--drivers/iio/potentiostat/lmp91000.c14
-rw-r--r--drivers/iio/pressure/bmp280-core.c6
-rw-r--r--drivers/iio/pressure/cros_ec_baro.c11
-rw-r--r--drivers/iio/proximity/Kconfig23
-rw-r--r--drivers/iio/proximity/Makefile1
-rw-r--r--drivers/iio/proximity/as3935.c50
-rw-r--r--drivers/iio/proximity/mb1232.c272
-rw-r--r--drivers/iio/proximity/srf04.c38
-rw-r--r--drivers/iio/temperature/Kconfig24
-rw-r--r--drivers/iio/temperature/Makefile1
-rw-r--r--drivers/iio/temperature/max31856.c356
-rw-r--r--drivers/iio/trigger/iio-trig-loop.c2
-rw-r--r--drivers/infiniband/Kconfig2
-rw-r--r--drivers/infiniband/core/addr.c25
-rw-r--r--drivers/infiniband/core/cache.c145
-rw-r--r--drivers/infiniband/core/cm.c94
-rw-r--r--drivers/infiniband/core/cm_msgs.h22
-rw-r--r--drivers/infiniband/core/cma.c83
-rw-r--r--drivers/infiniband/core/core_priv.h18
-rw-r--r--drivers/infiniband/core/cq.c21
-rw-r--r--drivers/infiniband/core/device.c632
-rw-r--r--drivers/infiniband/core/iwcm.c35
-rw-r--r--drivers/infiniband/core/iwpm_util.c8
-rw-r--r--drivers/infiniband/core/mad.c87
-rw-r--r--drivers/infiniband/core/mad_priv.h4
-rw-r--r--drivers/infiniband/core/multicast.c1
-rw-r--r--drivers/infiniband/core/nldev.c160
-rw-r--r--drivers/infiniband/core/rdma_core.c200
-rw-r--r--drivers/infiniband/core/rdma_core.h11
-rw-r--r--drivers/infiniband/core/sa_query.c52
-rw-r--r--drivers/infiniband/core/sysfs.c93
-rw-r--r--drivers/infiniband/core/ucm.c37
-rw-r--r--drivers/infiniband/core/ucma.c2
-rw-r--r--drivers/infiniband/core/umem.c182
-rw-r--r--drivers/infiniband/core/umem_odp.c25
-rw-r--r--drivers/infiniband/core/user_mad.c24
-rw-r--r--drivers/infiniband/core/uverbs.h7
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c99
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c29
-rw-r--r--drivers/infiniband/core/uverbs_main.c73
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c52
-rw-r--r--drivers/infiniband/core/uverbs_std_types_counters.c6
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c12
-rw-r--r--drivers/infiniband/core/uverbs_std_types_dm.c10
-rw-r--r--drivers/infiniband/core/uverbs_std_types_flow_action.c6
-rw-r--r--drivers/infiniband/core/uverbs_std_types_mr.c9
-rw-r--r--drivers/infiniband/core/verbs.c233
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/bnxt_re/Kconfig12
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c194
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h36
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c8
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c39
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h13
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c43
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h8
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c16
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h10
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c56
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h38
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_ev.c18
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_mem.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c97
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c69
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c23
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c210
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c18
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h96
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c25
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c77
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c77
-rw-r--r--drivers/infiniband/hw/cxgb4/restrack.c8
-rw-r--r--drivers/infiniband/hw/efa/Kconfig15
-rw-r--r--drivers/infiniband/hw/efa/Makefile9
-rw-r--r--drivers/infiniband/hw/efa/efa.h163
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h794
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_defs.h136
-rw-r--r--drivers/infiniband/hw/efa/efa_com.c1160
-rw-r--r--drivers/infiniband/hw/efa/efa_com.h144
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c692
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h270
-rw-r--r--drivers/infiniband/hw/efa/efa_common_defs.h18
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c533
-rw-r--r--drivers/infiniband/hw/efa/efa_regs_defs.h113
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c1825
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c57
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h3
-rw-r--r--drivers/infiniband/hw/hfi1/chip_registers.h3
-rw-r--r--drivers/infiniband/hw/hfi1/common.h2
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c82
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c19
-rw-r--r--drivers/infiniband/hw/hfi1/exp_rcv.c3
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h8
-rw-r--r--drivers/infiniband/hw/hfi1/init.c59
-rw-r--r--drivers/infiniband/hw/hfi1/opfn.h6
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c1
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c2
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c27
-rw-r--r--drivers/infiniband/hw/hfi1/rc.h8
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c274
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.h2
-rw-r--r--drivers/infiniband/hw/hfi1/trace_dbg.h4
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tid.h12
-rw-r--r--drivers/infiniband/hw/hfi1/user_pages.c3
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c15
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h1
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c19
-rw-r--r--drivers/infiniband/hw/hns/Makefile4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c36
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.h1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h33
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c68
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h52
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c398
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c320
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c35
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c35
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c50
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c126
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c52
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c21
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_osdep.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c123
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.h3
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c103
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c36
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c40
-rw-r--r--drivers/infiniband/hw/mlx4/doorbell.c6
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c33
-rw-r--r--drivers/infiniband/hw/mlx4/main.c13
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h41
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c7
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c56
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c59
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c33
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c159
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h8
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c47
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c45
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c99
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c109
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.h13
-rw-r--r--drivers/infiniband/hw/mlx5/main.c680
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h118
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c53
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c132
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c164
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c76
-rw-r--r--drivers/infiniband/hw/mlx5/srq.h7
-rw-r--r--drivers/infiniband/hw/mlx5/srq_cmd.c35
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c7
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c16
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c179
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c23
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c5
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c168
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c37
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c23
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c25
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c128
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h24
-rw-r--r--drivers/infiniband/hw/qedr/main.c57
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h11
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c10
-rw-r--r--drivers/infiniband/hw/qedr/qedr_roce_cm.c11
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c141
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h27
-rw-r--r--drivers/infiniband/hw/qib/qib.h4
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c20
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c7
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c56
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c7
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c15
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h12
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c12
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.h3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c17
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c28
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c15
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c43
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c56
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h27
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c38
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.h8
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c7
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.h3
-rw-r--r--drivers/infiniband/sw/rdmavt/mmap.c16
-rw-r--r--drivers/infiniband/sw/rdmavt/mmap.h6
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c9
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.h7
-rw-r--r--drivers/infiniband/sw/rdmavt/pd.c7
-rw-r--r--drivers/infiniband/sw/rdmavt/pd.h5
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c27
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.h4
-rw-r--r--drivers/infiniband/sw/rdmavt/rc.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.c49
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.h7
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_qp.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_rc.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_tx.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c10
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hdr.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h16
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mmap.c14
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c46
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c15
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c22
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h15
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c14
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c90
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/iser/Kconfig4
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c7
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c6
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c60
-rw-r--r--drivers/input/evdev.c9
-rw-r--r--drivers/input/joydev.c2
-rw-r--r--drivers/input/keyboard/Kconfig13
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c8
-rw-r--r--drivers/input/keyboard/qt1050.c598
-rw-r--r--drivers/input/keyboard/snvs_pwrkey.c30
-rw-r--r--drivers/input/keyboard/sun4i-lradc-keys.c38
-rw-r--r--drivers/input/misc/Kconfig21
-rw-r--r--drivers/input/misc/Makefile2
-rw-r--r--drivers/input/misc/gpio-vibra.c207
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c20
-rw-r--r--drivers/input/misc/max77650-onkey.c121
-rw-r--r--drivers/input/misc/uinput.c2
-rw-r--r--drivers/input/mouse/psmouse-base.c2
-rw-r--r--drivers/input/rmi4/rmi_f54.c21
-rw-r--r--drivers/input/serio/Kconfig1
-rw-r--r--drivers/input/serio/hyperv-keyboard.c2
-rw-r--r--drivers/input/serio/i8042.c3
-rw-r--r--drivers/input/serio/libps2.c1
-rw-r--r--drivers/input/touchscreen/Kconfig10
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c23
-rw-r--r--drivers/input/touchscreen/goodix.c54
-rw-r--r--drivers/input/touchscreen/iqs5xx.c1133
-rw-r--r--drivers/interconnect/core.c13
-rw-r--r--drivers/iommu/Kconfig26
-rw-r--r--drivers/iommu/amd_iommu.c54
-rw-r--r--drivers/iommu/amd_iommu_init.c8
-rw-r--r--drivers/iommu/amd_iommu_types.h6
-rw-r--r--drivers/iommu/arm-smmu-regs.h2
-rw-r--r--drivers/iommu/arm-smmu-v3.c355
-rw-r--r--drivers/iommu/arm-smmu.c11
-rw-r--r--drivers/iommu/dma-iommu.c95
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/iommu/intel-iommu.c586
-rw-r--r--drivers/iommu/intel-pasid.c4
-rw-r--r--drivers/iommu/intel-svm.c19
-rw-r--r--drivers/iommu/intel_irq_remapping.c9
-rw-r--r--drivers/iommu/io-pgtable-arm.c91
-rw-r--r--drivers/iommu/io-pgtable.c1
-rw-r--r--drivers/iommu/iommu.c211
-rw-r--r--drivers/iommu/msm_iommu.c2
-rw-r--r--drivers/iommu/mtk_iommu.c8
-rw-r--r--drivers/iommu/tegra-smmu.c41
-rw-r--r--drivers/irqchip/Kconfig33
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c3
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c3
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c2
-rw-r--r--drivers/irqchip/irq-gic-pm.c76
-rw-r--r--drivers/irqchip/irq-gic-v2m.c10
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its-platform-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c90
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c10
-rw-r--r--drivers/irqchip/irq-gic-v3.c10
-rw-r--r--drivers/irqchip/irq-gic.c4
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c4
-rw-r--r--drivers/irqchip/irq-ixp4xx.c403
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c7
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c4
-rw-r--r--drivers/irqchip/irq-stm32-exti.c233
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c615
-rw-r--r--drivers/irqchip/irq-ti-sci-intr.c275
-rw-r--r--drivers/isdn/capi/capi.c2
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c9
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c6
-rw-r--r--drivers/isdn/hisax/config.c6
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c9
-rw-r--r--drivers/leds/Kconfig39
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/led-class.c1
-rw-r--r--drivers/leds/led-core.c5
-rw-r--r--drivers/leds/leds-as3645a.c93
-rw-r--r--drivers/leds/leds-blinkm.c1
-rw-r--r--drivers/leds/leds-lm3532.c683
-rw-r--r--drivers/leds/leds-lt3593.c64
-rw-r--r--drivers/leds/leds-max77650.c147
-rw-r--r--drivers/leds/leds-pca955x.c57
-rw-r--r--drivers/leds/leds-pca963x.c66
-rw-r--r--drivers/leds/uleds.c2
-rw-r--r--drivers/lightnvm/core.c82
-rw-r--r--drivers/lightnvm/pblk-cache.c8
-rw-r--r--drivers/lightnvm/pblk-core.c65
-rw-r--r--drivers/lightnvm/pblk-gc.c52
-rw-r--r--drivers/lightnvm/pblk-init.c65
-rw-r--r--drivers/lightnvm/pblk-map.c1
-rw-r--r--drivers/lightnvm/pblk-rb.c13
-rw-r--r--drivers/lightnvm/pblk-read.c394
-rw-r--r--drivers/lightnvm/pblk-recovery.c74
-rw-r--r--drivers/lightnvm/pblk-write.c1
-rw-r--r--drivers/lightnvm/pblk.h28
-rw-r--r--drivers/mailbox/Kconfig10
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/armada-37xx-rwtm-mailbox.c225
-rw-r--r--drivers/mailbox/imx-mailbox.c4
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c1
-rw-r--r--drivers/mailbox/pcc.c2
-rw-r--r--drivers/mailbox/stm32-ipcc.c13
-rw-r--r--drivers/md/Kconfig9
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/bcache/alloc.c5
-rw-r--r--drivers/md/bcache/btree.c12
-rw-r--r--drivers/md/bcache/journal.c42
-rw-r--r--drivers/md/bcache/request.c41
-rw-r--r--drivers/md/bcache/request.h2
-rw-r--r--drivers/md/bcache/super.c84
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/bcache/util.h26
-rw-r--r--drivers/md/dm-bufio.c15
-rw-r--r--drivers/md/dm-cache-metadata.c9
-rw-r--r--drivers/md/dm-crypt.c32
-rw-r--r--drivers/md/dm-delay.c3
-rw-r--r--drivers/md/dm-dust.c515
-rw-r--r--drivers/md/dm-exception-store.h31
-rw-r--r--drivers/md/dm-init.c8
-rw-r--r--drivers/md/dm-integrity.c727
-rw-r--r--drivers/md/dm-ioctl.c6
-rw-r--r--drivers/md/dm-mpath.c19
-rw-r--r--drivers/md/dm-rq.c8
-rw-r--r--drivers/md/dm-snap.c359
-rw-r--r--drivers/md/dm-target.c3
-rw-r--r--drivers/md/dm-thin-metadata.c139
-rw-r--r--drivers/md/dm-writecache.c29
-rw-r--r--drivers/md/dm-zoned-metadata.c5
-rw-r--r--drivers/md/dm-zoned-target.c3
-rw-r--r--drivers/md/dm.c12
-rw-r--r--drivers/md/md-bitmap.c8
-rw-r--r--drivers/md/md.c199
-rw-r--r--drivers/md/md.h25
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c19
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c2
-rw-r--r--drivers/md/raid1.c6
-rw-r--r--drivers/md/raid5.c16
-rw-r--r--drivers/media/Kconfig20
-rw-r--r--drivers/media/Makefile6
-rw-r--r--drivers/media/cec/Kconfig4
-rw-r--r--drivers/media/cec/cec-core.c1
-rw-r--r--drivers/media/cec/cec-notifier.c30
-rw-r--r--drivers/media/common/b2c2/Makefile4
-rw-r--r--drivers/media/common/cx2341x.c151
-rw-r--r--drivers/media/common/siano/Kconfig4
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c53
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c6
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c22
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c24
-rw-r--r--drivers/media/common/videobuf2/videobuf2-vmalloc.c10
-rw-r--r--drivers/media/dvb-core/dvbdev.c1
-rw-r--r--drivers/media/dvb-frontends/as102_fe.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2880/Makefile2
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c3
-rw-r--r--drivers/media/dvb-frontends/dib8000.c4
-rw-r--r--drivers/media/dvb-frontends/dib9000.c6
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c30
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c2
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c11
-rw-r--r--drivers/media/dvb-frontends/si2165.c9
-rw-r--r--drivers/media/dvb-frontends/ts2020.c3
-rw-r--r--drivers/media/i2c/Kconfig271
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/i2c/cx25840/Kconfig2
-rw-r--r--drivers/media/i2c/et8ek8/Kconfig2
-rw-r--r--drivers/media/i2c/imx214.c10
-rw-r--r--drivers/media/i2c/m5mols/Kconfig2
-rw-r--r--drivers/media/i2c/ov2659.c8
-rw-r--r--drivers/media/i2c/ov6650.c43
-rw-r--r--drivers/media/i2c/ov7670.c32
-rw-r--r--drivers/media/i2c/ov7740.c28
-rw-r--r--drivers/media/i2c/smiapp/Kconfig2
-rw-r--r--drivers/media/i2c/smiapp/Makefile2
-rw-r--r--drivers/media/i2c/st-mipid02.c1033
-rw-r--r--drivers/media/media-dev-allocator.c135
-rw-r--r--drivers/media/media-devnode.c4
-rw-r--r--drivers/media/media-entity.c33
-rw-r--r--drivers/media/media-request.c20
-rw-r--r--drivers/media/mmc/siano/Kconfig2
-rw-r--r--drivers/media/mmc/siano/Makefile3
-rw-r--r--drivers/media/pci/b2c2/Makefile2
-rw-r--r--drivers/media/pci/bt8xx/Kconfig2
-rw-r--r--drivers/media/pci/bt8xx/Makefile5
-rw-r--r--drivers/media/pci/bt8xx/dst.c3
-rw-r--r--drivers/media/pci/bt8xx/dst_common.h2
-rw-r--r--drivers/media/pci/cobalt/Kconfig2
-rw-r--r--drivers/media/pci/cobalt/cobalt-irq.c2
-rw-r--r--drivers/media/pci/cx18/Kconfig4
-rw-r--r--drivers/media/pci/cx18/Makefile4
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c2
-rw-r--r--drivers/media/pci/cx23885/Kconfig4
-rw-r--r--drivers/media/pci/cx23885/Makefile4
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c5
-rw-r--r--drivers/media/pci/cx25821/Kconfig4
-rw-r--r--drivers/media/pci/cx88/Kconfig10
-rw-r--r--drivers/media/pci/cx88/Makefile4
-rw-r--r--drivers/media/pci/ddbridge/Kconfig4
-rw-r--r--drivers/media/pci/ddbridge/Makefile4
-rw-r--r--drivers/media/pci/dm1105/Makefile2
-rw-r--r--drivers/media/pci/dt3155/Kconfig2
-rw-r--r--drivers/media/pci/dt3155/dt3155.c8
-rw-r--r--drivers/media/pci/intel/ipu3/Kconfig2
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c1
-rw-r--r--drivers/media/pci/ivtv/Kconfig10
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.c2
-rw-r--r--drivers/media/pci/mantis/Makefile2
-rw-r--r--drivers/media/pci/mantis/mantis_i2c.c2
-rw-r--r--drivers/media/pci/meye/Kconfig2
-rw-r--r--drivers/media/pci/netup_unidvb/Kconfig2
-rw-r--r--drivers/media/pci/netup_unidvb/Makefile2
-rw-r--r--drivers/media/pci/ngene/Kconfig2
-rw-r--r--drivers/media/pci/ngene/Makefile4
-rw-r--r--drivers/media/pci/pluto2/Makefile2
-rw-r--r--drivers/media/pci/pt1/Makefile4
-rw-r--r--drivers/media/pci/pt3/Makefile4
-rw-r--r--drivers/media/pci/saa7134/Kconfig10
-rw-r--r--drivers/media/pci/saa7134/saa7134-go7007.c2
-rw-r--r--drivers/media/pci/saa7146/Kconfig6
-rw-r--r--drivers/media/pci/saa7146/hexium_gemini.c5
-rw-r--r--drivers/media/pci/saa7146/hexium_orion.c5
-rw-r--r--drivers/media/pci/saa7164/Kconfig2
-rw-r--r--drivers/media/pci/smipcie/Makefile5
-rw-r--r--drivers/media/pci/solo6x10/Kconfig2
-rw-r--r--drivers/media/pci/ttpci/Makefile4
-rw-r--r--drivers/media/pci/tw5864/Kconfig2
-rw-r--r--drivers/media/pci/tw5864/tw5864-video.c4
-rw-r--r--drivers/media/pci/tw68/Kconfig2
-rw-r--r--drivers/media/platform/Kconfig90
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c2
-rw-r--r--drivers/media/platform/aspeed-video.c33
-rw-r--r--drivers/media/platform/atmel/Kconfig2
-rw-r--r--drivers/media/platform/atmel/atmel-isc-regs.h21
-rw-r--r--drivers/media/platform/atmel/atmel-isc.c952
-rw-r--r--drivers/media/platform/coda/coda-bit.c3
-rw-r--r--drivers/media/platform/coda/coda-common.c130
-rw-r--r--drivers/media/platform/cros-ec-cec/cros-ec-cec.c1
-rw-r--r--drivers/media/platform/davinci/isif.c9
-rw-r--r--drivers/media/platform/davinci/vpbe.c2
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c2
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c3
-rw-r--r--drivers/media/platform/davinci/vpif_display.c4
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-m2m.c2
-rw-r--r--drivers/media/platform/imx-pxp.c4
-rw-r--r--drivers/media/platform/marvell-ccic/Kconfig4
-rw-r--r--drivers/media/platform/meson/Makefile1
-rw-r--r--drivers/media/platform/meson/ao-cec-g12a.c779
-rw-r--r--drivers/media/platform/meson/ao-cec.c16
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c75
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h16
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c10
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c27
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c2
-rw-r--r--drivers/media/platform/mx2_emmaprp.c4
-rw-r--r--drivers/media/platform/omap/Kconfig2
-rw-r--r--drivers/media/platform/omap/omap_vout.c15
-rw-r--r--drivers/media/platform/pxa_camera.c2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_helper.h4
-rw-r--r--drivers/media/platform/rcar-vin/Kconfig3
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c47
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c184
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c2
-rw-r--r--drivers/media/platform/rcar_drif.c8
-rw-r--r--drivers/media/platform/rcar_fdp1.c28
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.c16
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c6
-rw-r--r--drivers/media/platform/seco-cec/seco-cec.c1
-rw-r--r--drivers/media/platform/sh_veu.c6
-rw-r--r--drivers/media/platform/sti/c8sectpfe/Kconfig2
-rw-r--r--drivers/media/platform/sti/c8sectpfe/Makefile5
-rw-r--r--drivers/media/platform/sti/cec/stih-cec.c21
-rw-r--r--drivers/media/platform/sti/delta/delta-ipc.c6
-rw-r--r--drivers/media/platform/stm32/stm32-cec.c11
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c60
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c4
-rw-r--r--drivers/media/platform/tegra-cec/tegra_cec.c16
-rw-r--r--drivers/media/platform/ti-vpe/cal.c12
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c6
-rw-r--r--drivers/media/platform/vicodec/codec-fwht.c121
-rw-r--r--drivers/media/platform/vicodec/codec-fwht.h12
-rw-r--r--drivers/media/platform/vicodec/codec-v4l2-fwht.c431
-rw-r--r--drivers/media/platform/vicodec/codec-v4l2-fwht.h7
-rw-r--r--drivers/media/platform/vicodec/vicodec-core.c762
-rw-r--r--drivers/media/platform/video-mux.c5
-rw-r--r--drivers/media/platform/vim2m.c69
-rw-r--r--drivers/media/platform/vimc/Kconfig2
-rw-r--r--drivers/media/platform/vimc/vimc-capture.c93
-rw-r--r--drivers/media/platform/vimc/vimc-common.c313
-rw-r--r--drivers/media/platform/vimc/vimc-common.h60
-rw-r--r--drivers/media/platform/vimc/vimc-core.c2
-rw-r--r--drivers/media/platform/vimc/vimc-debayer.c98
-rw-r--r--drivers/media/platform/vimc/vimc-scaler.c78
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.c70
-rw-r--r--drivers/media/platform/vimc/vimc-streamer.c40
-rw-r--r--drivers/media/platform/vimc/vimc-streamer.h22
-rw-r--r--drivers/media/platform/vivid/Kconfig6
-rw-r--r--drivers/media/platform/vivid/vivid-core.c2
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c2
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c14
-rw-r--r--drivers/media/platform/vsp1/vsp1_brx.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_clu.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.c84
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.h6
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c94
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.c3
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.h7
-rw-r--r--drivers/media/platform/vsp1/vsp1_hgo.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_hgt.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_hsit.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_lut.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.c62
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h6
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.h1
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_uif.c1
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c16
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c83
-rw-r--r--drivers/media/platform/xilinx/Kconfig6
-rw-r--r--drivers/media/radio/Kconfig54
-rw-r--r--drivers/media/radio/Makefile2
-rw-r--r--drivers/media/radio/si470x/Kconfig6
-rw-r--r--drivers/media/radio/si4713/Kconfig6
-rw-r--r--drivers/media/radio/si4713/si4713.c2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c14
-rw-r--r--drivers/media/rc/Kconfig80
-rw-r--r--drivers/media/rc/bpf-lirc.c6
-rw-r--r--drivers/media/rc/ir-rcmm-decoder.c1
-rw-r--r--drivers/media/rc/keymaps/Kconfig2
-rw-r--r--drivers/media/rc/keymaps/rc-xbox-dvd.c2
-rw-r--r--drivers/media/rc/lirc_dev.c2
-rw-r--r--drivers/media/rc/rc-main.c2
-rw-r--r--drivers/media/rc/serial_ir.c9
-rw-r--r--drivers/media/rc/xbox_remote.c6
-rw-r--r--drivers/media/spi/Kconfig2
-rw-r--r--drivers/media/spi/Makefile4
-rw-r--r--drivers/media/usb/airspy/Kconfig2
-rw-r--r--drivers/media/usb/as102/Makefile2
-rw-r--r--drivers/media/usb/au0828/Kconfig8
-rw-r--r--drivers/media/usb/au0828/Makefile4
-rw-r--r--drivers/media/usb/au0828/au0828-core.c196
-rw-r--r--drivers/media/usb/au0828/au0828-video.c20
-rw-r--r--drivers/media/usb/au0828/au0828.h6
-rw-r--r--drivers/media/usb/b2c2/Makefile2
-rw-r--r--drivers/media/usb/cpia2/Kconfig2
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c3
-rw-r--r--drivers/media/usb/cx231xx/Kconfig8
-rw-r--r--drivers/media/usb/cx231xx/Makefile5
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c104
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.h12
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c18
-rw-r--r--drivers/media/usb/em28xx/Kconfig8
-rw-r--r--drivers/media/usb/em28xx/Makefile4
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c2
-rw-r--r--drivers/media/usb/go7007/Kconfig8
-rw-r--r--drivers/media/usb/go7007/Makefile2
-rw-r--r--drivers/media/usb/go7007/go7007-fw.c4
-rw-r--r--drivers/media/usb/go7007/go7007-usb.c16
-rw-r--r--drivers/media/usb/go7007/go7007-v4l2.c2
-rw-r--r--drivers/media/usb/gspca/Kconfig2
-rw-r--r--drivers/media/usb/gspca/gspca.c12
-rw-r--r--drivers/media/usb/hackrf/Kconfig2
-rw-r--r--drivers/media/usb/hdpvr/Kconfig2
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c10
-rw-r--r--drivers/media/usb/pulse8-cec/Kconfig2
-rw-r--r--drivers/media/usb/pulse8-cec/pulse8-cec.c4
-rw-r--r--drivers/media/usb/pvrusb2/Kconfig8
-rw-r--r--drivers/media/usb/pvrusb2/Makefile4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c7
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.h1
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c4
-rw-r--r--drivers/media/usb/pwc/Kconfig4
-rw-r--r--drivers/media/usb/pwc/pwc-ctrl.c17
-rw-r--r--drivers/media/usb/rainshadow-cec/Kconfig2
-rw-r--r--drivers/media/usb/siano/Kconfig2
-rw-r--r--drivers/media/usb/siano/Makefile2
-rw-r--r--drivers/media/usb/stk1160/Kconfig2
-rw-r--r--drivers/media/usb/stkwebcam/Kconfig2
-rw-r--r--drivers/media/usb/tm6000/Kconfig4
-rw-r--r--drivers/media/usb/tm6000/Makefile4
-rw-r--r--drivers/media/usb/ttusb-budget/Makefile2
-rw-r--r--drivers/media/usb/usbtv/Kconfig2
-rw-r--r--drivers/media/usb/usbvision/Kconfig2
-rw-r--r--drivers/media/usb/usbvision/Makefile2
-rw-r--r--drivers/media/usb/usbvision/usbvision-core.c3
-rw-r--r--drivers/media/usb/uvc/Kconfig4
-rw-r--r--drivers/media/usb/zr364xx/Kconfig2
-rw-r--r--drivers/media/v4l2-core/Kconfig8
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c186
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c38
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c19
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c17
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c22
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c6
-rw-r--r--drivers/memory/Makefile7
-rw-r--r--drivers/memory/Makefile.asm-offsets4
-rw-r--r--drivers/memory/atmel-ebi.c37
-rw-r--r--drivers/memory/emif.h4
-rw-r--r--drivers/memory/tegra/mc.c34
-rw-r--r--drivers/memory/tegra/mc.h2
-rw-r--r--drivers/memory/tegra/tegra114.c4
-rw-r--r--drivers/memory/tegra/tegra124-emc.c1
-rw-r--r--drivers/memory/tegra/tegra124.c4
-rw-r--r--drivers/memory/tegra/tegra20.c28
-rw-r--r--drivers/memory/tegra/tegra210.c2
-rw-r--r--drivers/memory/tegra/tegra30.c4
-rw-r--r--drivers/memory/ti-emif-pm.c3
-rw-r--r--drivers/memory/ti-emif-sram-pm.S41
-rw-r--r--drivers/memstick/host/jmb38x_ms.c9
-rw-r--r--drivers/memstick/host/tifm_ms.c5
-rw-r--r--drivers/message/fusion/mptbase.c2
-rw-r--r--drivers/message/fusion/mptctl.c2
-rw-r--r--drivers/message/fusion/mptsas.c36
-rw-r--r--drivers/message/fusion/mptscsih.c4
-rw-r--r--drivers/message/fusion/mptspi.c5
-rw-r--r--drivers/mfd/Kconfig99
-rw-r--r--drivers/mfd/Makefile4
-rw-r--r--drivers/mfd/ab8500-debugfs.c2
-rw-r--r--drivers/mfd/altera-sysmgr.c211
-rw-r--r--drivers/mfd/atmel-hlcdc.c1
-rw-r--r--drivers/mfd/axp20x-i2c.c2
-rw-r--r--drivers/mfd/axp20x.c16
-rw-r--r--drivers/mfd/cros_ec.c39
-rw-r--r--drivers/mfd/cros_ec_dev.c36
-rw-r--r--drivers/mfd/cs47l35-tables.c2
-rw-r--r--drivers/mfd/cs47l90-tables.c2
-rw-r--r--drivers/mfd/da9063-core.c28
-rw-r--r--drivers/mfd/da9063-i2c.c10
-rw-r--r--drivers/mfd/da9063-irq.c10
-rw-r--r--drivers/mfd/intel-lpss-pci.c13
-rw-r--r--drivers/mfd/intel-lpss.c8
-rw-r--r--drivers/mfd/intel_quark_i2c_gpio.c10
-rw-r--r--drivers/mfd/intel_soc_pmic_chtwc.c1
-rw-r--r--drivers/mfd/max77620.c87
-rw-r--r--drivers/mfd/max77650.c232
-rw-r--r--drivers/mfd/mfd-core.c13
-rw-r--r--drivers/mfd/omap-usb-tll.c1
-rw-r--r--drivers/mfd/rk808.c9
-rw-r--r--drivers/mfd/sec-core.c59
-rw-r--r--drivers/mfd/sec-irq.c3
-rw-r--r--drivers/mfd/ssbi.c6
-rw-r--r--drivers/mfd/stmfx.c545
-rw-r--r--drivers/mfd/sun6i-prcm.c3
-rw-r--r--drivers/mfd/syscon.c19
-rw-r--r--drivers/mfd/t7l66xb.c12
-rw-r--r--drivers/mfd/tc6387xb.c12
-rw-r--r--drivers/mfd/tc6393xb.c23
-rw-r--r--drivers/mfd/ti-lmu.c11
-rw-r--r--drivers/mfd/tps65912-spi.c1
-rw-r--r--drivers/mfd/twl6040.c13
-rw-r--r--drivers/mfd/wm831x-core.c2
-rw-r--r--drivers/mfd/wm8400-core.c6
-rw-r--r--drivers/misc/Kconfig40
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/atmel_tclib.c5
-rw-r--r--drivers/misc/cardreader/rts5260.c11
-rw-r--r--drivers/misc/cxl/fault.c2
-rw-r--r--drivers/misc/fastrpc.c235
-rw-r--r--drivers/misc/genwqe/card_debugfs.c4
-rw-r--r--drivers/misc/genwqe/card_utils.c2
-rw-r--r--drivers/misc/habanalabs/Makefile2
-rw-r--r--drivers/misc/habanalabs/command_buffer.c13
-rw-r--r--drivers/misc/habanalabs/command_submission.c22
-rw-r--r--drivers/misc/habanalabs/context.c4
-rw-r--r--drivers/misc/habanalabs/debugfs.c96
-rw-r--r--drivers/misc/habanalabs/device.c93
-rw-r--r--drivers/misc/habanalabs/firmware_if.c322
-rw-r--r--drivers/misc/habanalabs/goya/Makefile3
-rw-r--r--drivers/misc/habanalabs/goya/goya.c1209
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h81
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c628
-rw-r--r--drivers/misc/habanalabs/goya/goya_security.c15
-rw-r--r--drivers/misc/habanalabs/habanalabs.h220
-rw-r--r--drivers/misc/habanalabs/habanalabs_drv.c9
-rw-r--r--drivers/misc/habanalabs/habanalabs_ioctl.c139
-rw-r--r--drivers/misc/habanalabs/hw_queue.c46
-rw-r--r--drivers/misc/habanalabs/include/armcp_if.h2
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h12
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h3
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h306
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h1
-rw-r--r--drivers/misc/habanalabs/include/goya/goya.h4
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_async_events.h9
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_coresight.h199
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_fw_if.h2
-rw-r--r--drivers/misc/habanalabs/include/hl_boot_if.h3
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h16
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h23
-rw-r--r--drivers/misc/habanalabs/irq.c14
-rw-r--r--drivers/misc/habanalabs/memory.c197
-rw-r--r--drivers/misc/habanalabs/mmu.c600
-rw-r--r--drivers/misc/habanalabs/pci.c408
-rw-r--r--drivers/misc/ioc4.c2
-rw-r--r--drivers/misc/kgdbts.c4
-rw-r--r--drivers/misc/mei/Kconfig12
-rw-r--r--drivers/misc/mei/Makefile2
-rw-r--r--drivers/misc/mei/bus-fixup.c14
-rw-r--r--drivers/misc/mei/bus.c13
-rw-r--r--drivers/misc/mei/client.c16
-rw-r--r--drivers/misc/mei/client.h14
-rw-r--r--drivers/misc/mei/debugfs.c15
-rw-r--r--drivers/misc/mei/dma-ring.c2
-rw-r--r--drivers/misc/mei/hbm.c15
-rw-r--r--drivers/misc/mei/hbm.h14
-rw-r--r--drivers/misc/mei/hdcp/Kconfig13
-rw-r--r--drivers/misc/mei/hdcp/Makefile2
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c2
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.h2
-rw-r--r--drivers/misc/mei/hw-me-regs.h68
-rw-r--r--drivers/misc/mei/hw-me.c17
-rw-r--r--drivers/misc/mei/hw-me.h16
-rw-r--r--drivers/misc/mei/hw-txe-regs.h63
-rw-r--r--drivers/misc/mei/hw-txe.c14
-rw-r--r--drivers/misc/mei/hw-txe.h14
-rw-r--r--drivers/misc/mei/hw.h14
-rw-r--r--drivers/misc/mei/init.c34
-rw-r--r--drivers/misc/mei/interrupt.c15
-rw-r--r--drivers/misc/mei/main.c80
-rw-r--r--drivers/misc/mei/mei-trace.c14
-rw-r--r--drivers/misc/mei/mei-trace.h14
-rw-r--r--drivers/misc/mei/mei_dev.h17
-rw-r--r--drivers/misc/mei/pci-me.c15
-rw-r--r--drivers/misc/mei/pci-txe.c14
-rw-r--r--drivers/misc/mic/Kconfig4
-rw-r--r--drivers/misc/ocxl/Makefile3
-rw-r--r--drivers/misc/ocxl/afu_irq.c102
-rw-r--r--drivers/misc/ocxl/config.c13
-rw-r--r--drivers/misc/ocxl/context.c31
-rw-r--r--drivers/misc/ocxl/core.c574
-rw-r--r--drivers/misc/ocxl/file.c182
-rw-r--r--drivers/misc/ocxl/link.c42
-rw-r--r--drivers/misc/ocxl/mmio.c234
-rw-r--r--drivers/misc/ocxl/ocxl_internal.h94
-rw-r--r--drivers/misc/ocxl/pci.c565
-rw-r--r--drivers/misc/ocxl/sysfs.c54
-rw-r--r--drivers/misc/ocxl/trace.h12
-rw-r--r--drivers/misc/pci_endpoint_test.c18
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c1
-rw-r--r--drivers/misc/tifm_7xx1.c1
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c6
-rw-r--r--drivers/mmc/core/host.c4
-rw-r--r--drivers/mmc/core/mmc_ops.c16
-rw-r--r--drivers/mmc/core/pwrseq_emmc.c38
-rw-r--r--drivers/mmc/core/queue.c1
-rw-r--r--drivers/mmc/core/quirks.h2
-rw-r--r--drivers/mmc/core/sd.c8
-rw-r--r--drivers/mmc/host/Kconfig46
-rw-r--r--drivers/mmc/host/alcor.c73
-rw-r--r--drivers/mmc/host/cqhci.c2
-rw-r--r--drivers/mmc/host/cqhci.h4
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c419
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c1
-rw-r--r--drivers/mmc/host/mmc_spi.c98
-rw-r--r--drivers/mmc/host/mmci.c82
-rw-r--r--drivers/mmc/host/mmci.h32
-rw-r--r--drivers/mmc/host/mmci_qcom_dml.c17
-rw-r--r--drivers/mmc/host/mmci_qcom_dml.h30
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c18
-rw-r--r--drivers/mmc/host/mtk-sd.c97
-rw-r--r--drivers/mmc/host/mvsdio.c2
-rw-r--r--drivers/mmc/host/mxs-mmc.c3
-rw-r--r--drivers/mmc/host/of_mmc_spi.c6
-rw-r--r--drivers/mmc/host/omap_hsmmc.c4
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/renesas_sdhi.h2
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c12
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c11
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c6
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c41
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c5
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c47
-rw-r--r--drivers/mmc/host/sdhci-omap.c2
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c98
-rw-r--r--drivers/mmc/host/sdhci-pci.h2
-rw-r--r--drivers/mmc/host/sdhci-tegra.c362
-rw-r--r--drivers/mmc/host/sdhci.c177
-rw-r--r--drivers/mmc/host/sdhci.h4
-rw-r--r--drivers/mmc/host/sdhci_am654.c22
-rw-r--r--drivers/mmc/host/tifm_sd.c3
-rw-r--r--drivers/mmc/host/tmio_mmc.h6
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c6
-rw-r--r--drivers/mmc/host/usdhi6rol0.c9
-rw-r--r--drivers/mmc/host/via-sdmmc.c10
-rw-r--r--drivers/mtd/Kconfig20
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/afs.c266
-rw-r--r--drivers/mtd/bcm63xxpart.c163
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c2
-rw-r--r--drivers/mtd/chips/cfi_util.c6
-rw-r--r--drivers/mtd/devices/Kconfig2
-rw-r--r--drivers/mtd/devices/phram.c2
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c1
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/mtd/maps/physmap-core.c2
-rw-r--r--drivers/mtd/maps/physmap-gemini.c2
-rw-r--r--drivers/mtd/maps/sa1100-flash.c2
-rw-r--r--drivers/mtd/maps/uclinux.c8
-rw-r--r--drivers/mtd/mtdpart.c2
-rw-r--r--drivers/mtd/nand/Kconfig1
-rw-r--r--drivers/mtd/nand/core.c34
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c5
-rw-r--r--drivers/mtd/nand/onenand/onenand_bbt.c3
-rw-r--r--drivers/mtd/nand/raw/Kconfig393
-rw-r--r--drivers/mtd/nand/raw/Makefile9
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c127
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.c5
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.h6
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c2
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c7
-rw-r--r--drivers/mtd/nand/raw/denali.c1152
-rw-r--r--drivers/mtd/nand/raw/denali.h117
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c98
-rw-r--r--drivers/mtd/nand/raw/denali_pci.c38
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c7
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c201
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c4
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c6
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c19
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h1
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c2
-rw-r--r--drivers/mtd/nand/raw/ingenic/Kconfig50
-rw-r--r--drivers/mtd/nand/raw/ingenic/Makefile7
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.c166
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.h83
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand.c530
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4725b_bch.c295
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4740_ecc.c197
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4740_nand.c (renamed from drivers/mtd/nand/raw/jz4740_nand.c)7
-rw-r--r--drivers/mtd/nand/raw/ingenic/jz4780_bch.c (renamed from drivers/mtd/nand/raw/jz4780_bch.c)182
-rw-r--r--drivers/mtd/nand/raw/internals.h3
-rw-r--r--drivers/mtd/nand/raw/jz4780_bch.h43
-rw-r--r--drivers/mtd/nand/raw/jz4780_nand.c415
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c63
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c30
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c4
-rw-r--r--drivers/mtd/nand/raw/nand_amd.c19
-rw-r--r--drivers/mtd/nand/raw/nand_base.c324
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c73
-rw-r--r--drivers/mtd/nand/raw/nand_esmt.c19
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c94
-rw-r--r--drivers/mtd/nand/raw/nand_jedec.c27
-rw-r--r--drivers/mtd/nand/raw/nand_macronix.c2
-rw-r--r--drivers/mtd/nand/raw/nand_micron.c16
-rw-r--r--drivers/mtd/nand/raw/nand_onfi.c35
-rw-r--r--drivers/mtd/nand/raw/nand_samsung.c46
-rw-r--r--drivers/mtd/nand/raw/nand_toshiba.c21
-rw-r--r--drivers/mtd/nand/raw/nandsim.c144
-rw-r--r--drivers/mtd/nand/raw/nuc900_nand.c3
-rw-r--r--drivers/mtd/nand/raw/omap2.c4
-rw-r--r--drivers/mtd/nand/raw/omap_elm.c4
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c8
-rw-r--r--drivers/mtd/nand/raw/r852.c2
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c13
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c90
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c8
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c1
-rw-r--r--drivers/mtd/nand/raw/vf610_nfc.c5
-rw-r--r--drivers/mtd/nand/spi/core.c169
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c8
-rw-r--r--drivers/mtd/nand/spi/macronix.c4
-rw-r--r--drivers/mtd/nand/spi/micron.c2
-rw-r--r--drivers/mtd/nand/spi/toshiba.c12
-rw-r--r--drivers/mtd/nand/spi/winbond.c4
-rw-r--r--drivers/mtd/parsers/Kconfig27
-rw-r--r--drivers/mtd/parsers/Makefile2
-rw-r--r--drivers/mtd/parsers/afs.c410
-rw-r--r--drivers/mtd/parsers/parser_imagetag.c222
-rw-r--r--drivers/mtd/sm_ftl.c12
-rw-r--r--drivers/mtd/spi-nor/intel-spi-pci.c1
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c8
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c10
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c16
-rw-r--r--drivers/mtd/ubi/wl.c2
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/appletalk/ipddp.c6
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/bonding/bond_netlink.c8
-rw-r--r--drivers/net/bonding/bond_options.c7
-rw-r--r--drivers/net/dsa/Kconfig4
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/b53/b53_common.c34
-rw-r--r--drivers/net/dsa/b53/b53_priv.h1
-rw-r--r--drivers/net/dsa/bcm_sf2.c9
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c6
-rw-r--r--drivers/net/dsa/lantiq_gswip.c812
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c1
-rw-r--r--drivers/net/dsa/mt7530.c20
-rw-r--r--drivers/net/dsa/mt7530.h1
-rw-r--r--drivers/net/dsa/mv88e6060.c217
-rw-r--r--drivers/net/dsa/mv88e6060.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c287
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h11
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/smi.c158
-rw-r--r--drivers/net/dsa/mv88e6xxx/smi.h59
-rw-r--r--drivers/net/dsa/sja1105/Kconfig17
-rw-r--r--drivers/net/dsa/sja1105/Makefile9
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h159
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c601
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c532
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.h43
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ethtool.c419
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c1679
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c591
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c987
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.h253
-rw-r--r--drivers/net/dummy.c15
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c3
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c4
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c4
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c12
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c35
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c6
-rw-r--r--drivers/net/ethernet/aquantia/Kconfig3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c130
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h15
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c12
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c41
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c121
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c48
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c188
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h34
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c12
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c29
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c56
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h37
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h16
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c13
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c36
-rw-r--r--drivers/net/ethernet/arc/emac_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c1
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c1
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c10
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c71
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h10
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c81
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c327
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c46
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h263
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c25
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c13
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c11
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c85
-rw-r--r--drivers/net/ethernet/cavium/Kconfig1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c1
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c41
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c107
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c10
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/Makefile2
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig2
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/ethoc.c2
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c88
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h25
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c97
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c4
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c40
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h49
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c953
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h40
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c207
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h36
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c110
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c1017
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c910
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h71
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c73
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c52
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c148
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c30
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c2
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c5
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c170
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h22
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c9
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c13
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c355
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ddp.c481
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c82
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c413
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c58
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c171
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c9
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h110
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h192
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c335
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c1392
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h179
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c551
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h61
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c366
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h56
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c768
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c824
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c273
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c524
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h48
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c711
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h102
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c720
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h28
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c14
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h68
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h17
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c839
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c456
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c3
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c19
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h52
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c704
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h138
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c253
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c32
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c104
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c280
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c130
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c403
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c224
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c257
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c474
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c182
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h92
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c158
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c388
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchib.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c29
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c4
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c541
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c5
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c4
-rw-r--r--drivers/net/ethernet/netronome/Kconfig1
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile4
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/ctrl.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c236
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/fw.h33
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c13
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h17
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm.c220
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm.h83
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c203
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c58
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h22
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c155
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h103
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c117
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c618
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c366
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c29
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h23
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c133
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h10
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c131
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c29
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c62
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h8
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c4
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c32
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c17
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c1
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c838
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c29
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c5
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c9
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c10
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c2
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c1
-rw-r--r--drivers/net/ethernet/sfc/falcon/io.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c4
-rw-r--r--drivers/net/ethernet/sfc/io.h2
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c1
-rw-r--r--drivers/net/ethernet/sfc/tx.c12
-rw-r--r--drivers/net/ethernet/silan/sc92031.c15
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c4
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c3
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c3
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c2
-rw-r--r--drivers/net/ethernet/ti/Kconfig19
-rw-r--r--drivers/net/ethernet/ti/Makefile11
-rw-r--r--drivers/net/ethernet/ti/cpmac.c14
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c12
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c9
-rw-r--r--drivers/net/ethernet/ti/cpsw.c1544
-rw-r--r--drivers/net/ethernet/ti/cpsw.h9
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c55
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h12
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c719
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c132
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h429
-rw-r--r--drivers/net/ethernet/ti/cpsw_sl.c328
-rw-r--r--drivers/net/ethernet/ti/cpsw_sl.h73
-rw-r--r--drivers/net/ethernet/ti/cpts.c14
-rw-r--r--drivers/net/ethernet/ti/cpts.h14
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c37
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h13
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c32
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c45
-rw-r--r--drivers/net/ethernet/ti/netcp.h10
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c12
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c10
-rw-r--r--drivers/net/ethernet/ti/netcp_sgmii.c9
-rw-r--r--drivers/net/ethernet/ti/netcp_xgbepcsr.c9
-rw-r--r--drivers/net/ethernet/via/via-rhine.c3
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c8
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c15
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig5
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h26
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c531
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_mdio.c53
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c44
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c14
-rw-r--r--drivers/net/geneve.c3
-rw-r--r--drivers/net/gtp.c7
-rw-r--r--drivers/net/hippi/rrunner.c4
-rw-r--r--drivers/net/hyperv/netvsc.c17
-rw-r--r--drivers/net/hyperv/netvsc_drv.c10
-rw-r--r--drivers/net/ieee802154/ca8210.c1
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c31
-rw-r--r--drivers/net/ieee802154/mcr20a.c6
-rw-r--r--drivers/net/loopback.c14
-rw-r--r--drivers/net/macsec.c78
-rw-r--r--drivers/net/macvlan.c52
-rw-r--r--drivers/net/net_failover.c8
-rw-r--r--drivers/net/netdevsim/Makefile6
-rw-r--r--drivers/net/netdevsim/bpf.c107
-rw-r--r--drivers/net/netdevsim/bus.c341
-rw-r--r--drivers/net/netdevsim/dev.c447
-rw-r--r--drivers/net/netdevsim/devlink.c295
-rw-r--r--drivers/net/netdevsim/fib.c102
-rw-r--r--drivers/net/netdevsim/ipsec.c3
-rw-r--r--drivers/net/netdevsim/netdev.c428
-rw-r--r--drivers/net/netdevsim/netdevsim.h145
-rw-r--r--drivers/net/netdevsim/sdev.c69
-rw-r--r--drivers/net/phy/Kconfig19
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/amd.c2
-rw-r--r--drivers/net/phy/aquantia_main.c526
-rw-r--r--drivers/net/phy/asix.c2
-rw-r--r--drivers/net/phy/at803x.c32
-rw-r--r--drivers/net/phy/bcm-cygnus.c149
-rw-r--r--drivers/net/phy/bcm-phy-lib.c52
-rw-r--r--drivers/net/phy/bcm-phy-lib.h20
-rw-r--r--drivers/net/phy/bcm63xx.c4
-rw-r--r--drivers/net/phy/bcm7xxx.c82
-rw-r--r--drivers/net/phy/broadcom.c34
-rw-r--r--drivers/net/phy/cicada.c4
-rw-r--r--drivers/net/phy/davicom.c8
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/phy/dp83822.c2
-rw-r--r--drivers/net/phy/dp83848.c2
-rw-r--r--drivers/net/phy/dp83867.c2
-rw-r--r--drivers/net/phy/dp83tc811.c2
-rw-r--r--drivers/net/phy/et1011c.c2
-rw-r--r--drivers/net/phy/fixed_phy.c2
-rw-r--r--drivers/net/phy/icplus.c6
-rw-r--r--drivers/net/phy/intel-xway.c20
-rw-r--r--drivers/net/phy/lxt.c8
-rw-r--r--drivers/net/phy/marvell.c148
-rw-r--r--drivers/net/phy/marvell10g.c15
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c7
-rw-r--r--drivers/net/phy/mdio-mux-meson-g12a.c380
-rw-r--r--drivers/net/phy/mdio_bus.c33
-rw-r--r--drivers/net/phy/mdio_device.c13
-rw-r--r--drivers/net/phy/meson-gxl.c19
-rw-r--r--drivers/net/phy/micrel.c72
-rw-r--r--drivers/net/phy/microchip.c2
-rw-r--r--drivers/net/phy/mscc.c479
-rw-r--r--drivers/net/phy/national.c2
-rw-r--r--drivers/net/phy/phy-c45.c37
-rw-r--r--drivers/net/phy/phy-core.c272
-rw-r--r--drivers/net/phy/phy.c47
-rw-r--r--drivers/net/phy/phy_device.c208
-rw-r--r--drivers/net/phy/qsemi.c2
-rw-r--r--drivers/net/phy/realtek.c106
-rw-r--r--drivers/net/phy/rockchip.c33
-rw-r--r--drivers/net/phy/smsc.c12
-rw-r--r--drivers/net/phy/ste10Xp.c4
-rw-r--r--drivers/net/phy/uPD60620.c2
-rw-r--r--drivers/net/phy/vitesse.c34
-rw-r--r--drivers/net/ppp/ppp_mppe.c1
-rw-r--r--drivers/net/sb1000.c9
-rw-r--r--drivers/net/team/team.c34
-rw-r--r--drivers/net/thunderbolt.c3
-rw-r--r--drivers/net/tun.c37
-rw-r--r--drivers/net/usb/aqc111.c31
-rw-r--r--drivers/net/usb/cdc_mbim.c1
-rw-r--r--drivers/net/usb/ipheth.c60
-rw-r--r--drivers/net/usb/qmi_wwan.c75
-rw-r--r--drivers/net/usb/r8152.c53
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/usb/smsc95xx.c4
-rw-r--r--drivers/net/veth.c14
-rw-r--r--drivers/net/virtio_net.c21
-rw-r--r--drivers/net/vrf.c12
-rw-r--r--drivers/net/vxlan.c1
-rw-r--r--drivers/net/wan/ixp4xx_hss.c4
-rw-r--r--drivers/net/wimax/i2400m/control.c1
-rw-r--r--drivers/net/wimax/i2400m/tx.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c24
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/testmode.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c39
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c38
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c78
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c10
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c35
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c74
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.h47
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h11
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c18
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c24
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h91
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c7
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_lp.c13
-rw-r--r--drivers/net/wireless/broadcom/b43/sysfs.c1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/ilt.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c20
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/phy.c1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/pio.h1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/radio.c4
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/sysfs.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c36
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c58
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c68
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c44
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-debug.c8
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c1
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h136
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h181
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h67
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c605
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c148
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c148
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c91
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c232
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c94
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c81
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c10
-rw-r--r--drivers/net/wireless/intersil/orinoco/mic.c1
-rw-r--r--drivers/net/wireless/intersil/p54/p54pci.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c75
-rw-r--r--drivers/net/wireless/marvell/mwifiex/Kconfig4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h69
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_rx.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c8
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c24
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig1
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile3
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c164
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h119
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c35
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/core.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c39
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c54
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c116
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/regs.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Kconfig7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Makefile5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c205
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c98
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c229
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c775
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h300
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c499
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c1655
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h520
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h195
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c150
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h203
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c49
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h44
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c286
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c185
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c266
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c188
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c107
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c148
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c379
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/bus.h25
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c86
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c379
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h7
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c91
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h7
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/debug.c4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c16
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c32
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c23
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c31
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h87
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.c117
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.h5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800.h19
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c628
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.c124
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.h1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c13
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c28
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c6
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mmio.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.h3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c22
-rw-r--r--drivers/net/wireless/ray_cs.c8
-rw-r--r--drivers/net/wireless/realtek/Kconfig1
-rw-r--r--drivers/net/wireless/realtek/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/Kconfig54
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile22
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c637
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h52
-rw-r--r--drivers/net/wireless/realtek/rtw88/efuse.c160
-rw-r--r--drivers/net/wireless/realtek/rtw88/efuse.h26
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c633
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h222
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h211
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c965
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h35
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c481
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c1211
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h1104
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c1211
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h237
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c1727
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h134
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.c166
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.h20
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h421
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.c391
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.h67
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c1594
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h170
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b_table.c20783
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b_table.h18
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c1890
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h186
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.c11753
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.h17
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.c151
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.h41
-rw-r--r--drivers/net/wireless/realtek/rtw88/sec.c120
-rw-r--r--drivers/net/wireless/realtek/rtw88/sec.h39
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c367
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h89
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.c72
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.h34
-rw-r--r--drivers/net/wireless/rndis_wlan.c12
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c199
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c30
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c232
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c129
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c96
-rw-r--r--drivers/net/wireless/rsi/rsi_boot_params.h63
-rw-r--r--drivers/net/wireless/rsi/rsi_hal.h44
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h21
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h26
-rw-r--r--drivers/net/wireless/rsi/rsi_sdio.h5
-rw-r--r--drivers/net/wireless/rsi/rsi_usb.h3
-rw-r--r--drivers/net/wireless/st/cw1200/main.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c15
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h4
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c3
-rw-r--r--drivers/net/xen-netback/common.h18
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/net/xen-netback/xenbus.c17
-rw-r--r--drivers/net/xen-netfront.c5
-rw-r--r--drivers/nfc/mei_phy.c18
-rw-r--r--drivers/nfc/microread/mei.c17
-rw-r--r--drivers/nfc/pn533/pn533.c2
-rw-r--r--drivers/nfc/pn544/mei.c15
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c1
-rw-r--r--drivers/nfc/st21nfca/dep.c2
-rw-r--r--drivers/nfc/st95hf/core.c4
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c7
-rw-r--r--drivers/ntb/test/ntb_perf.c3
-rw-r--r--drivers/nvdimm/bus.c2
-rw-r--r--drivers/nvdimm/dimm_devs.c2
-rw-r--r--drivers/nvdimm/label.c29
-rw-r--r--drivers/nvdimm/namespace_devs.c15
-rw-r--r--drivers/nvdimm/nd.h4
-rw-r--r--drivers/nvdimm/pfn_devs.c4
-rw-r--r--drivers/nvme/host/core.c125
-rw-r--r--drivers/nvme/host/fabrics.c5
-rw-r--r--drivers/nvme/host/fc.c16
-rw-r--r--drivers/nvme/host/lightnvm.c1
-rw-r--r--drivers/nvme/host/multipath.c12
-rw-r--r--drivers/nvme/host/nvme.h3
-rw-r--r--drivers/nvme/host/pci.c304
-rw-r--r--drivers/nvme/host/rdma.c44
-rw-r--r--drivers/nvme/host/tcp.c21
-rw-r--r--drivers/nvme/host/trace.h1
-rw-r--r--drivers/nvme/target/Kconfig1
-rw-r--r--drivers/nvme/target/configfs.c4
-rw-r--r--drivers/nvme/target/core.c38
-rw-r--r--drivers/nvme/target/discovery.c9
-rw-r--r--drivers/nvme/target/fabrics-cmd.c16
-rw-r--r--drivers/nvme/target/fc.c9
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c6
-rw-r--r--drivers/nvme/target/io-cmd-file.c7
-rw-r--r--drivers/nvme/target/loop.c22
-rw-r--r--drivers/nvme/target/nvmet.h4
-rw-r--r--drivers/nvme/target/rdma.c21
-rw-r--r--drivers/nvme/target/tcp.c38
-rw-r--r--drivers/nvmem/Kconfig24
-rw-r--r--drivers/nvmem/Makefile5
-rw-r--r--drivers/nvmem/core.c316
-rw-r--r--drivers/nvmem/imx-iim.c4
-rw-r--r--drivers/nvmem/imx-ocotp.c11
-rw-r--r--drivers/nvmem/mxs-ocotp.c4
-rw-r--r--drivers/nvmem/nvmem-sysfs.c256
-rw-r--r--drivers/nvmem/nvmem.h62
-rw-r--r--drivers/nvmem/stm32-romem.c202
-rw-r--r--drivers/nvmem/sunxi_sid.c115
-rw-r--r--drivers/nvmem/zynqmp_nvmem.c10
-rw-r--r--drivers/of/address.c40
-rw-r--r--drivers/of/base.c5
-rw-r--r--drivers/of/device.c2
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/of/of_net.c40
-rw-r--r--drivers/of/of_reserved_mem.c22
-rw-r--r--drivers/of/property.c2
-rw-r--r--drivers/of/unittest.c13
-rw-r--r--drivers/opp/core.c54
-rw-r--r--drivers/parisc/led.c3
-rw-r--r--drivers/parport/ieee1284.c2
-rw-r--r--drivers/parport/parport_cs.c5
-rw-r--r--drivers/parport/parport_ip32.c18
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/bus.c5
-rw-r--r--drivers/pci/controller/Kconfig1
-rw-r--r--drivers/pci/controller/dwc/Kconfig29
-rw-r--r--drivers/pci/controller/dwc/Makefile1
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c3
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c144
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c926
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c2
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c93
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c55
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c157
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c64
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h26
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c23
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c11
-rw-r--r--drivers/pci/controller/pci-aardvark.c13
-rw-r--r--drivers/pci/controller/pci-host-generic.c2
-rw-r--r--drivers/pci/controller/pci-hyperv.c23
-rw-r--r--drivers/pci/controller/pci-tegra.c37
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c2
-rw-r--r--drivers/pci/controller/pcie-iproc.c98
-rw-r--r--drivers/pci/controller/pcie-mediatek.c51
-rw-r--r--drivers/pci/controller/pcie-rcar.c85
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c2
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c1
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c9
-rw-r--r--drivers/pci/controller/pcie-xilinx.c12
-rw-r--r--drivers/pci/controller/vmd.c7
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c10
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c10
-rw-r--r--drivers/pci/hotplug/pciehp.h31
-rw-r--r--drivers/pci/hotplug/pciehp_core.c18
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c2
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c17
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c2
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c4
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c3
-rw-r--r--drivers/pci/msi.c6
-rw-r--r--drivers/pci/of.c58
-rw-r--r--drivers/pci/p2pdma.c38
-rw-r--r--drivers/pci/pci-acpi.c183
-rw-r--r--drivers/pci/pci-driver.c14
-rw-r--r--drivers/pci/pci-stub.c10
-rw-r--r--drivers/pci/pci-sysfs.c3
-rw-r--r--drivers/pci/pci.c363
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/Kconfig8
-rw-r--r--drivers/pci/pcie/Makefile2
-rw-r--r--drivers/pci/pcie/aer.c30
-rw-r--r--drivers/pci/pcie/aer_inject.c20
-rw-r--r--drivers/pci/pcie/aspm.c47
-rw-r--r--drivers/pci/pcie/bw_notification.c14
-rw-r--r--drivers/pci/pcie/dpc.c37
-rw-r--r--drivers/pci/pcie/pme.c10
-rw-r--r--drivers/pci/pcie/portdrv.h4
-rw-r--r--drivers/pci/pcie/portdrv_core.c3
-rw-r--r--drivers/pci/probe.c230
-rw-r--r--drivers/pci/proc.c1
-rw-r--r--drivers/pci/quirks.c96
-rw-r--r--drivers/pci/search.c10
-rw-r--r--drivers/pci/setup-bus.c526
-rw-r--r--drivers/pci/slot.c2
-rw-r--r--drivers/pci/switch/switchtec.c44
-rw-r--r--drivers/pci/xen-pcifront.c9
-rw-r--r--drivers/pcmcia/omap_cf.c2
-rw-r--r--drivers/perf/Kconfig9
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/arm-cci.c21
-rw-r--r--drivers/perf/arm-ccn.c25
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c865
-rw-r--r--drivers/phy/allwinner/Kconfig9
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c4
-rw-r--r--drivers/phy/amlogic/Kconfig22
-rw-r--r--drivers/phy/amlogic/Makefile2
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-usb2.c341
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c413
-rw-r--r--drivers/phy/amlogic/phy-meson-gxl-usb2.c11
-rw-r--r--drivers/phy/broadcom/Kconfig11
-rw-r--r--drivers/phy/broadcom/Makefile1
-rw-r--r--drivers/phy/broadcom/phy-bcm-sr-usb.c394
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8mq-usb.c12
-rw-r--r--drivers/phy/hisilicon/Kconfig10
-rw-r--r--drivers/phy/hisilicon/Makefile1
-rw-r--r--drivers/phy/hisilicon/phy-hi3660-usb3.c233
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-utmi.c1
-rw-r--r--drivers/phy/mediatek/Kconfig10
-rw-r--r--drivers/phy/mediatek/Makefile1
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c10
-rw-r--r--drivers/phy/mediatek/phy-mtk-ufs.c245
-rw-r--r--drivers/phy/motorola/Kconfig2
-rw-r--r--drivers/phy/mscc/phy-ocelot-serdes.c240
-rw-r--r--drivers/phy/phy-core.c11
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c222
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h12
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c11
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-i.h5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-qmp-14nm.c25
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-qmp-20nm.c25
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs.c59
-rw-r--r--drivers/phy/renesas/Kconfig2
-rw-r--r--drivers/phy/renesas/phy-rcar-gen2.c130
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c232
-rw-r--r--drivers/phy/rockchip/phy-rockchip-emmc.c30
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c4
-rw-r--r--drivers/phy/socionext/phy-uniphier-usb3hs.c10
-rw-r--r--drivers/phy/socionext/phy-uniphier-usb3ss.c10
-rw-r--r--drivers/phy/tegra/Makefile1
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c899
-rw-r--r--drivers/phy/tegra/xusb.c67
-rw-r--r--drivers/phy/tegra/xusb.h35
-rw-r--r--drivers/phy/ti/Kconfig14
-rw-r--r--drivers/phy/ti/Makefile1
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c658
-rw-r--r--drivers/phy/ti/phy-ti-pipe3.c362
-rw-r--r--drivers/phy/ti/phy-twl4030-usb.c35
-rw-r--r--drivers/pinctrl/Kconfig22
-rw-r--r--drivers/pinctrl/Makefile2
-rw-r--r--drivers/pinctrl/cirrus/Kconfig10
-rw-r--r--drivers/pinctrl/cirrus/Makefile2
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-lochnagar.c1235
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c25
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.h1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx8mq.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-scu.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cedarfork.c18
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c66
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h11
-rw-r--r--drivers/pinctrl/mediatek/Kconfig7
-rw-r--r--drivers/pinctrl/mediatek/Makefile1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8183.c50
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8516.c362
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c49
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h11
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt8516.h1182
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c21
-rw-r--r--drivers/pinctrl/pinctrl-amd.c4
-rw-r--r--drivers/pinctrl/pinctrl-artpec6.c19
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c2
-rw-r--r--drivers/pinctrl/pinctrl-bm1880.c965
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c113
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c2
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c18
-rw-r--r--drivers/pinctrl/pinctrl-st.c15
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c819
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm.c1
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig204
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile15
-rw-r--r--drivers/pinctrl/sh-pfc/core.c130
-rw-r--r--drivers/pinctrl/sh-pfc/gpio.c8
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-emev2.c67
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a73a4.c64
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7740.c56
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77470.c136
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7778.c101
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7779.c235
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c132
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c156
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7792.c134
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7794.c127
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c222
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c201
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7796.c225
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77965.c294
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77970.c123
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77980.c135
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77990.c214
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77995.c120
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7203.c152
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7264.c232
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7269.c252
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh73a0.c52
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7720.c144
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7722.c220
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7723.c200
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7724.c204
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7734.c140
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7757.c244
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7785.c136
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7786.c80
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-shx3.c32
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h68
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c105
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.h14
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32mp157.c1089
-rw-r--r--drivers/pinctrl/sunxi/Kconfig57
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c96
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h18
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx.c1
-rw-r--r--drivers/platform/chrome/Kconfig24
-rw-r--r--drivers/platform/chrome/Makefile7
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c2
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c76
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c21
-rw-r--r--drivers/platform/chrome/cros_ec_rpmsg.c258
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c80
-rw-r--r--drivers/platform/chrome/cros_ec_trace.c124
-rw-r--r--drivers/platform/chrome/cros_ec_trace.h51
-rw-r--r--drivers/platform/chrome/cros_usbpd_logger.c262
-rw-r--r--drivers/platform/chrome/wilco_ec/debugfs.c89
-rw-r--r--drivers/platform/chrome/wilco_ec/mailbox.c53
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c3
-rw-r--r--drivers/platform/mellanox/Kconfig12
-rw-r--r--drivers/platform/mellanox/Makefile1
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo-regs.h63
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c1281
-rw-r--r--drivers/platform/x86/Kconfig11
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/alienware-wmi.c19
-rw-r--r--drivers/platform/x86/asus-wmi.c37
-rw-r--r--drivers/platform/x86/dell-laptop.c6
-rw-r--r--drivers/platform/x86/dell-rbtn.c2
-rw-r--r--drivers/platform/x86/ideapad-laptop.c321
-rw-r--r--drivers/platform/x86/intel_mrfld_pwrbtn.c107
-rw-r--r--drivers/platform/x86/intel_pmc_core.c172
-rw-r--r--drivers/platform/x86/intel_pmc_core.h7
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c46
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c8
-rw-r--r--drivers/platform/x86/mlx-platform.c228
-rw-r--r--drivers/platform/x86/sony-laptop.c8
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c146
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c51
-rw-r--r--drivers/pnp/quirks.c2
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c25
-rw-r--r--drivers/power/reset/syscon-reboot.c19
-rw-r--r--drivers/power/supply/Kconfig36
-rw-r--r--drivers/power/supply/Makefile5
-rw-r--r--drivers/power/supply/ab8500_bmdata.c1
-rw-r--r--drivers/power/supply/axp20x_usb_power.c179
-rw-r--r--drivers/power/supply/axp288_charger.c4
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c20
-rw-r--r--drivers/power/supply/bq27xxx_battery.c3
-rw-r--r--drivers/power/supply/charger-manager.c3
-rw-r--r--drivers/power/supply/cpcap-battery.c45
-rw-r--r--drivers/power/supply/cpcap-charger.c5
-rw-r--r--drivers/power/supply/gpio-charger.c57
-rw-r--r--drivers/power/supply/ingenic-battery.c184
-rw-r--r--drivers/power/supply/lt3651-charger.c (renamed from drivers/power/supply/ltc3651-charger.c)123
-rw-r--r--drivers/power/supply/max14656_charger_detector.c27
-rw-r--r--drivers/power/supply/max77650-charger.c368
-rw-r--r--drivers/power/supply/olpc_battery.c171
-rw-r--r--drivers/power/supply/power_supply_core.c38
-rw-r--r--drivers/power/supply/power_supply_sysfs.c12
-rw-r--r--drivers/power/supply/ucs1002_power.c646
-rw-r--r--drivers/pps/clients/pps-gpio.c153
-rw-r--r--drivers/ptp/ptp_qoriq.c3
-rw-r--r--drivers/pwm/Kconfig16
-rw-r--r--drivers/pwm/Makefile1
-rw-r--r--drivers/pwm/core.c11
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c2
-rw-r--r--drivers/pwm/pwm-berlin.c1
-rw-r--r--drivers/pwm/pwm-ep93xx.c2
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c44
-rw-r--r--drivers/pwm/pwm-img.c2
-rw-r--r--drivers/pwm/pwm-imx-tpm.c449
-rw-r--r--drivers/pwm/pwm-imx27.c4
-rw-r--r--drivers/pwm/pwm-meson.c66
-rw-r--r--drivers/pwm/pwm-pca9685.c1
-rw-r--r--drivers/pwm/pwm-samsung.c5
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c2
-rw-r--r--drivers/pwm/sysfs.c16
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c4
-rw-r--r--drivers/rapidio/rio_cm.c8
-rw-r--r--drivers/ras/cec.c4
-rw-r--r--drivers/regulator/88pm800.c18
-rw-r--r--drivers/regulator/88pm8607.c43
-rw-r--r--drivers/regulator/Kconfig11
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/ab3100.c45
-rw-r--r--drivers/regulator/ab8500-ext.c49
-rw-r--r--drivers/regulator/ab8500.c20
-rw-r--r--drivers/regulator/act8865-regulator.c147
-rw-r--r--drivers/regulator/anatop-regulator.c63
-rw-r--r--drivers/regulator/arizona-ldo1.c19
-rw-r--r--drivers/regulator/arizona-micsupp.c19
-rw-r--r--drivers/regulator/as3711-regulator.c37
-rw-r--r--drivers/regulator/as3722-regulator.c287
-rw-r--r--drivers/regulator/axp20x-regulator.c23
-rw-r--r--drivers/regulator/bcm590xx-regulator.c105
-rw-r--r--drivers/regulator/bd718x7-regulator.c4
-rw-r--r--drivers/regulator/core.c30
-rw-r--r--drivers/regulator/cpcap-regulator.c15
-rw-r--r--drivers/regulator/da903x.c16
-rw-r--r--drivers/regulator/da9052-regulator.c55
-rw-r--r--drivers/regulator/da9055-regulator.c89
-rw-r--r--drivers/regulator/da9062-regulator.c146
-rw-r--r--drivers/regulator/da9063-regulator.c134
-rw-r--r--drivers/regulator/da9210-regulator.c23
-rw-r--r--drivers/regulator/da9210-regulator.h17
-rw-r--r--drivers/regulator/da9211-regulator.c24
-rw-r--r--drivers/regulator/da9211-regulator.h11
-rw-r--r--drivers/regulator/db8500-prcmu.c143
-rw-r--r--drivers/regulator/dbx500-prcmu.h4
-rw-r--r--drivers/regulator/fan53555.c60
-rw-r--r--drivers/regulator/gpio-regulator.c22
-rw-r--r--drivers/regulator/hi6421-regulator.c232
-rw-r--r--drivers/regulator/hi6421v530-regulator.c26
-rw-r--r--drivers/regulator/hi655x-regulator.c37
-rw-r--r--drivers/regulator/lm363x-regulator.c8
-rw-r--r--drivers/regulator/lp8755.c15
-rw-r--r--drivers/regulator/lp87565-regulator.c49
-rw-r--r--drivers/regulator/ltc3589.c269
-rw-r--r--drivers/regulator/ltc3676.c10
-rw-r--r--drivers/regulator/max14577-regulator.c55
-rw-r--r--drivers/regulator/max77620-regulator.c2
-rw-r--r--drivers/regulator/max77650-regulator.c2
-rw-r--r--drivers/regulator/max8925-regulator.c76
-rw-r--r--drivers/regulator/max8998.c300
-rw-r--r--drivers/regulator/mcp16502.c67
-rw-r--r--drivers/regulator/mt6311-regulator.c17
-rw-r--r--drivers/regulator/mt6311-regulator.h10
-rw-r--r--drivers/regulator/mt6323-regulator.c32
-rw-r--r--drivers/regulator/mt6380-regulator.c25
-rw-r--r--drivers/regulator/mt6397-regulator.c33
-rw-r--r--drivers/regulator/of_regulator.c5
-rw-r--r--drivers/regulator/palmas-regulator.c12
-rw-r--r--drivers/regulator/pv88060-regulator.c22
-rw-r--r--drivers/regulator/pv88060-regulator.h11
-rw-r--r--drivers/regulator/pv88080-regulator.c22
-rw-r--r--drivers/regulator/pv88080-regulator.h11
-rw-r--r--drivers/regulator/pv88090-regulator.c22
-rw-r--r--drivers/regulator/pv88090-regulator.h11
-rw-r--r--drivers/regulator/rc5t583-regulator.c25
-rw-r--r--drivers/regulator/rn5t618-regulator.c8
-rw-r--r--drivers/regulator/s2mpa01.c41
-rw-r--r--drivers/regulator/sc2731-regulator.c2
-rw-r--r--drivers/regulator/sky81452-regulator.c26
-rw-r--r--drivers/regulator/stm32-pwr.c186
-rw-r--r--drivers/regulator/sy8106a-regulator.c40
-rw-r--r--drivers/regulator/tps6507x-regulator.c113
-rw-r--r--drivers/regulator/tps65086-regulator.c4
-rw-r--r--drivers/regulator/tps65132-regulator.c29
-rw-r--r--drivers/regulator/tps65217-regulator.c9
-rw-r--r--drivers/regulator/tps65218-regulator.c56
-rw-r--r--drivers/regulator/tps6524x-regulator.c11
-rw-r--r--drivers/regulator/tps80031-regulator.c48
-rw-r--r--drivers/regulator/twl-regulator.c6
-rw-r--r--drivers/regulator/vctrl-regulator.c4
-rw-r--r--drivers/regulator/vexpress-regulator.c72
-rw-r--r--drivers/regulator/wm831x-dcdc.c23
-rw-r--r--drivers/regulator/wm831x-isink.c66
-rw-r--r--drivers/regulator/wm831x-ldo.c21
-rw-r--r--drivers/regulator/wm8350-regulator.c102
-rw-r--r--drivers/regulator/wm8400-regulator.c39
-rw-r--r--drivers/regulator/wm8994-regulator.c19
-rw-r--r--drivers/reset/core.c180
-rw-r--r--drivers/reset/reset-zynqmp.c8
-rw-r--r--drivers/rtc/Kconfig19
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/class.c21
-rw-r--r--drivers/rtc/dev.c20
-rw-r--r--drivers/rtc/hctosys.c10
-rw-r--r--drivers/rtc/interface.c107
-rw-r--r--drivers/rtc/lib.c30
-rw-r--r--drivers/rtc/nvmem.c7
-rw-r--r--drivers/rtc/proc.c21
-rw-r--r--drivers/rtc/rtc-88pm80x.c14
-rw-r--r--drivers/rtc/rtc-88pm860x.c2
-rw-r--r--drivers/rtc/rtc-ab-b5ze-s3.c189
-rw-r--r--drivers/rtc/rtc-ab3100.c24
-rw-r--r--drivers/rtc/rtc-abx80x.c43
-rw-r--r--drivers/rtc/rtc-aspeed.c136
-rw-r--r--drivers/rtc/rtc-at91sam9.c108
-rw-r--r--drivers/rtc/rtc-brcmstb-waketimer.c2
-rw-r--r--drivers/rtc/rtc-coh901331.c37
-rw-r--r--drivers/rtc/rtc-da9063.c27
-rw-r--r--drivers/rtc/rtc-digicolor.c25
-rw-r--r--drivers/rtc/rtc-dm355evm.c24
-rw-r--r--drivers/rtc/rtc-ds1374.c2
-rw-r--r--drivers/rtc/rtc-ds1672.c127
-rw-r--r--drivers/rtc/rtc-ds1685.c262
-rw-r--r--drivers/rtc/rtc-ds2404.c73
-rw-r--r--drivers/rtc/rtc-ds3232.c40
-rw-r--r--drivers/rtc/rtc-ep93xx.c70
-rw-r--r--drivers/rtc/rtc-goldfish.c50
-rw-r--r--drivers/rtc/rtc-hid-sensor-time.c3
-rw-r--r--drivers/rtc/rtc-imxdi.c50
-rw-r--r--drivers/rtc/rtc-jz4740.c95
-rw-r--r--drivers/rtc/rtc-lpc32xx.c59
-rw-r--r--drivers/rtc/rtc-m41t80.c2
-rw-r--r--drivers/rtc/rtc-mc13xxx.c25
-rw-r--r--drivers/rtc/rtc-mt6397.c9
-rw-r--r--drivers/rtc/rtc-mv.c33
-rw-r--r--drivers/rtc/rtc-mxc.c86
-rw-r--r--drivers/rtc/rtc-mxc_v2.c29
-rw-r--r--drivers/rtc/rtc-omap.c81
-rw-r--r--drivers/rtc/rtc-opal.c2
-rw-r--r--drivers/rtc/rtc-pcap.c28
-rw-r--r--drivers/rtc/rtc-pcf85063.c446
-rw-r--r--drivers/rtc/rtc-pcf85363.c20
-rw-r--r--drivers/rtc/rtc-ps3.c30
-rw-r--r--drivers/rtc/rtc-pxa.c3
-rw-r--r--drivers/rtc/rtc-rk808.c6
-rw-r--r--drivers/rtc/rtc-rx6110.c9
-rw-r--r--drivers/rtc/rtc-rx8025.c2
-rw-r--r--drivers/rtc/rtc-sh.c30
-rw-r--r--drivers/rtc/rtc-sirfsoc.c2
-rw-r--r--drivers/rtc/rtc-snvs.c48
-rw-r--r--drivers/rtc/rtc-stm32.c9
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c34
-rw-r--r--drivers/rtc/rtc-sun4v.c21
-rw-r--r--drivers/rtc/rtc-tegra.c47
-rw-r--r--drivers/rtc/rtc-test.c11
-rw-r--r--drivers/rtc/rtc-tx4939.c17
-rw-r--r--drivers/rtc/rtc-wilco-ec.c63
-rw-r--r--drivers/rtc/rtc-wm831x.c69
-rw-r--r--drivers/rtc/rtc-wm8350.c12
-rw-r--r--drivers/rtc/rtc-x1205.c7
-rw-r--r--drivers/rtc/rtc-xgene.c61
-rw-r--r--drivers/rtc/rtc-zynqmp.c13
-rw-r--r--drivers/rtc/sysfs.c23
-rw-r--r--drivers/rtc/systohc.c13
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/char/fs3270.c2
-rw-r--r--drivers/s390/char/sclp.c14
-rw-r--r--drivers/s390/char/sclp.h10
-rw-r--r--drivers/s390/char/sclp_early.c5
-rw-r--r--drivers/s390/char/sclp_early_core.c20
-rw-r--r--drivers/s390/char/sclp_sdias.c74
-rw-r--r--drivers/s390/char/tape_char.c2
-rw-r--r--drivers/s390/char/zcore.c24
-rw-r--r--drivers/s390/cio/Makefile3
-rw-r--r--drivers/s390/cio/airq.c41
-rw-r--r--drivers/s390/cio/cio.c2
-rw-r--r--drivers/s390/cio/cio.h4
-rw-r--r--drivers/s390/cio/ioasm.c1
-rw-r--r--drivers/s390/cio/qdio.h6
-rw-r--r--drivers/s390/cio/qdio_debug.c9
-rw-r--r--drivers/s390/cio/qdio_main.c230
-rw-r--r--drivers/s390/cio/qdio_setup.c2
-rw-r--r--drivers/s390/cio/qdio_thinint.c4
-rw-r--r--drivers/s390/cio/trace.c1
-rw-r--r--drivers/s390/cio/trace.h23
-rw-r--r--drivers/s390/cio/vfio_ccw_async.c88
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c21
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.h2
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c81
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c143
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c227
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h48
-rw-r--r--drivers/s390/crypto/ap_bus.c4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c6
-rw-r--r--drivers/s390/net/ism.h29
-rw-r--r--drivers/s390/net/ism_drv.c20
-rw-r--r--drivers/s390/net/qeth_core.h131
-rw-r--r--drivers/s390/net/qeth_core_main.c919
-rw-r--r--drivers/s390/net/qeth_core_mpc.h2
-rw-r--r--drivers/s390/net/qeth_core_sys.c10
-rw-r--r--drivers/s390/net/qeth_ethtool.c17
-rw-r--r--drivers/s390/net/qeth_l2_main.c99
-rw-r--r--drivers/s390/net/qeth_l3_main.c263
-rw-r--r--drivers/s390/net/qeth_l3_sys.c26
-rw-r--r--drivers/s390/virtio/virtio_ccw.c54
-rw-r--r--drivers/sbus/char/oradax.c4
-rw-r--r--drivers/scsi/NCR5380.c11
-rw-r--r--drivers/scsi/aic7xxx/Kconfig.aic7xxx2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c14
-rw-r--r--drivers/scsi/atp870u.c7
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c2
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c1
-rw-r--r--drivers/scsi/be2iscsi/be_main.c1
-rw-r--r--drivers/scsi/bfa/bfa.h3
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c6
-rw-r--r--drivers/scsi/bfa/bfa_hw_cb.c2
-rw-r--r--drivers/scsi/bfa/bfa_hw_ct.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c3
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c3
-rw-r--r--drivers/scsi/csiostor/csio_isr.c28
-rw-r--r--drivers/scsi/csiostor/csio_wr.c1
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c14
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c22
-rw-r--r--drivers/scsi/dpt_i2o.c12
-rw-r--r--drivers/scsi/esp_scsi.c2
-rw-r--r--drivers/scsi/gdth.c5
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c104
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c21
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c49
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c473
-rw-r--r--drivers/scsi/hpsa.c27
-rw-r--r--drivers/scsi/imm.c33
-rw-r--r--drivers/scsi/libsas/sas_ata.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c83
-rw-r--r--drivers/scsi/libsas/sas_init.c42
-rw-r--r--drivers/scsi/libsas/sas_phy.c7
-rw-r--r--drivers/scsi/libsas/sas_port.c24
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c243
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c123
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c43
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c486
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c40
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h42
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c137
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c29
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c64
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c350
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h54
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c3
-rw-r--r--drivers/scsi/mpt3sas/Kconfig1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c179
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h22
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c3
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c3
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2
-rw-r--r--drivers/scsi/mvumi.c6
-rw-r--r--drivers/scsi/osst.c6
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c37
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h2
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c55
-rw-r--r--drivers/scsi/ppa.c1
-rw-r--r--drivers/scsi/qedf/qedf.h57
-rw-r--r--drivers/scsi/qedf/qedf_dbg.c32
-rw-r--r--drivers/scsi/qedf/qedf_debugfs.c57
-rw-r--r--drivers/scsi/qedf/qedf_els.c82
-rw-r--r--drivers/scsi/qedf/qedf_fip.c95
-rw-r--r--drivers/scsi/qedf/qedf_io.c753
-rw-r--r--drivers/scsi/qedf/qedf_main.c281
-rw-r--r--drivers/scsi/qedf/qedf_version.h6
-rw-r--r--drivers/scsi/qedi/qedi_fw.c5
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c12
-rw-r--r--drivers/scsi/qla1280.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c329
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c84
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h11
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c192
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h10
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h267
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_dsd.h30
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h98
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h107
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c139
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1490
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h69
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c360
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c144
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c336
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c111
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h11
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c115
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c38
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c607
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c973
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c201
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h33
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c447
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h76
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c58
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/qlogicfas408.c4
-rw-r--r--drivers/scsi/scsi_error.c1
-rw-r--r--drivers/scsi/scsi_lib.c30
-rw-r--r--drivers/scsi/scsi_scan.c7
-rw-r--r--drivers/scsi/scsi_transport_fc.c119
-rw-r--r--drivers/scsi/sd.c33
-rw-r--r--drivers/scsi/smartpqi/Makefile1
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h15
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c51
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c15
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c15
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h15
-rw-r--r--drivers/scsi/sr.c1
-rw-r--r--drivers/scsi/st.c3
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_nvram.c1
-rw-r--r--drivers/scsi/ufs/Kconfig15
-rw-r--r--drivers/scsi/ufs/Makefile1
-rw-r--r--drivers/scsi/ufs/cdns-pltfrm.c74
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c113
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c368
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.h53
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c216
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h4
-rw-r--r--drivers/scsi/ufs/ufs.h1
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c112
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.h21
-rw-r--r--drivers/scsi/ufs/ufshcd.c41
-rw-r--r--drivers/scsi/ufs/unipro.h2
-rw-r--r--drivers/scsi/virtio_scsi.c2
-rw-r--r--drivers/sh/intc/userimask.c2
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c4
-rw-r--r--drivers/soc/Kconfig2
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/amlogic/meson-gx-pwrc-vpu.c160
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c43
-rw-r--r--drivers/soc/aspeed/Kconfig31
-rw-r--r--drivers/soc/aspeed/Makefile3
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c (renamed from drivers/misc/aspeed-lpc-ctrl.c)0
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-snoop.c (renamed from drivers/misc/aspeed-lpc-snoop.c)0
-rw-r--r--drivers/soc/aspeed/aspeed-p2a-ctrl.c444
-rw-r--r--drivers/soc/fsl/qe/gpio.c4
-rw-r--r--drivers/soc/imx/Makefile1
-rw-r--r--drivers/soc/imx/gpc.c17
-rw-r--r--drivers/soc/imx/gpcv2.c43
-rw-r--r--drivers/soc/imx/soc-imx8.c115
-rw-r--r--drivers/soc/ixp4xx/Kconfig16
-rw-r--r--drivers/soc/ixp4xx/Makefile2
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-npe.c (renamed from arch/arm/mach-ixp4xx/ixp4xx_npe.c)66
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-qmgr.c (renamed from arch/arm/mach-ixp4xx/ixp4xx_qmgr.c)186
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c111
-rw-r--r--drivers/soc/qcom/cmd-db.c4
-rw-r--r--drivers/soc/qcom/qmi_interface.c7
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c21
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c2
-rw-r--r--drivers/soc/renesas/renesas-soc.c3
-rw-r--r--drivers/soc/rockchip/grf.c2
-rw-r--r--drivers/soc/sunxi/Kconfig1
-rw-r--r--drivers/soc/tegra/pmc.c171
-rw-r--r--drivers/soc/ti/Kconfig11
-rw-r--r--drivers/soc/ti/Makefile1
-rw-r--r--drivers/soc/ti/pm33xx.c273
-rw-r--r--drivers/soc/ti/ti_sci_inta_msi.c146
-rw-r--r--drivers/soc/xilinx/zynqmp_pm_domains.c18
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c10
-rw-r--r--drivers/soundwire/Kconfig4
-rw-r--r--drivers/soundwire/bus.c152
-rw-r--r--drivers/soundwire/bus.h16
-rw-r--r--drivers/soundwire/bus_type.c4
-rw-r--r--drivers/soundwire/cadence_master.c100
-rw-r--r--drivers/soundwire/cadence_master.h22
-rw-r--r--drivers/soundwire/intel.c138
-rw-r--r--drivers/soundwire/intel.h4
-rw-r--r--drivers/soundwire/intel_init.c15
-rw-r--r--drivers/soundwire/mipi_disco.c122
-rw-r--r--drivers/soundwire/slave.c10
-rw-r--r--drivers/soundwire/stream.c285
-rw-r--r--drivers/spi/spi-mt7621.c7
-rw-r--r--drivers/spi/spi-pxa2xx.c7
-rw-r--r--drivers/spi/spi-rockchip.c1
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c6
-rw-r--r--drivers/spi/spidev.c2
-rw-r--r--drivers/ssb/bridge_pcmcia_80211.c9
-rw-r--r--drivers/ssb/pci.c1
-rw-r--r--drivers/ssb/pcmcia.c4
-rw-r--r--drivers/staging/Kconfig16
-rw-r--r--drivers/staging/Makefile5
-rw-r--r--drivers/staging/android/Kconfig3
-rw-r--r--drivers/staging/android/Makefile1
-rw-r--r--drivers/staging/android/ion/Kconfig1
-rw-r--r--drivers/staging/android/vsoc.c3
-rw-r--r--drivers/staging/axis-fifo/Kconfig8
-rw-r--r--drivers/staging/axis-fifo/Makefile1
-rw-r--r--drivers/staging/board/Kconfig1
-rw-r--r--drivers/staging/board/Makefile1
-rw-r--r--drivers/staging/clocking-wizard/Kconfig1
-rw-r--r--drivers/staging/clocking-wizard/Makefile1
-rw-r--r--drivers/staging/comedi/Kconfig255
-rw-r--r--drivers/staging/comedi/comedi_buf.c2
-rw-r--r--drivers/staging/comedi/comedi_fops.c32
-rw-r--r--drivers/staging/comedi/drivers.c11
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c2
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c2
-rw-r--r--drivers/staging/comedi/drivers/comedi_isadma.c17
-rw-r--r--drivers/staging/comedi/drivers/comedi_isadma.h3
-rw-r--r--drivers/staging/comedi/drivers/das08.c4
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c2
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c5
-rw-r--r--drivers/staging/comedi/drivers/dyna_pci10xx.c12
-rw-r--r--drivers/staging/comedi/drivers/mite.c3
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_at_ao.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c37
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_routing/tools/Makefile1
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c15
-rw-r--r--drivers/staging/comedi/drivers/ni_usb6501.c4
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c1
-rw-r--r--drivers/staging/comedi/drivers/s626.c2
-rw-r--r--drivers/staging/comedi/drivers/tests/ni_routes_test.c2
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c2
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c2
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c2
-rw-r--r--drivers/staging/comedi/kcomedilib/Makefile1
-rw-r--r--drivers/staging/emxx_udc/Kconfig1
-rw-r--r--drivers/staging/emxx_udc/Makefile1
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c57
-rw-r--r--drivers/staging/erofs/Documentation/filesystems/erofs.txt1
-rw-r--r--drivers/staging/erofs/data.c21
-rw-r--r--drivers/staging/erofs/inode.c18
-rw-r--r--drivers/staging/erofs/internal.h19
-rw-r--r--drivers/staging/erofs/namei.c3
-rw-r--r--drivers/staging/erofs/super.c56
-rw-r--r--drivers/staging/erofs/unzip_pagevec.h6
-rw-r--r--drivers/staging/erofs/unzip_vle.c99
-rw-r--r--drivers/staging/erofs/utils.c4
-rw-r--r--drivers/staging/erofs/xattr.c50
-rw-r--r--drivers/staging/fbtft/Kconfig1
-rw-r--r--drivers/staging/fbtft/fb_agm1264k-fl.c4
-rw-r--r--drivers/staging/fbtft/fb_ra8875.c2
-rw-r--r--drivers/staging/fbtft/fb_ssd1306.c3
-rw-r--r--drivers/staging/fbtft/fb_ssd1331.c3
-rw-r--r--drivers/staging/fbtft/fb_ssd1351.c4
-rw-r--r--drivers/staging/fbtft/fb_watterott.c15
-rw-r--r--drivers/staging/fbtft/fbtft-io.c12
-rw-r--r--drivers/staging/fbtft/fbtft.h1
-rw-r--r--drivers/staging/fbtft/fbtft_device.c2
-rw-r--r--drivers/staging/fbtft/flexfb.c7
-rw-r--r--drivers/staging/fieldbus/Documentation/ABI/fieldbus-dev-cdev31
-rw-r--r--drivers/staging/fieldbus/Documentation/ABI/sysfs-class-fieldbus-dev62
-rw-r--r--drivers/staging/fieldbus/Documentation/fieldbus_dev.txt66
-rw-r--r--drivers/staging/fieldbus/Kconfig18
-rw-r--r--drivers/staging/fieldbus/Makefile7
-rw-r--r--drivers/staging/fieldbus/TODO5
-rw-r--r--drivers/staging/fieldbus/anybuss/Kconfig39
-rw-r--r--drivers/staging/fieldbus/anybuss/Makefile10
-rw-r--r--drivers/staging/fieldbus/anybuss/anybuss-client.h102
-rw-r--r--drivers/staging/fieldbus/anybuss/anybuss-controller.h47
-rw-r--r--drivers/staging/fieldbus/anybuss/arcx-anybus.c399
-rw-r--r--drivers/staging/fieldbus/anybuss/hms-profinet.c228
-rw-r--r--drivers/staging/fieldbus/anybuss/host.c1458
-rw-r--r--drivers/staging/fieldbus/dev_core.c351
-rw-r--r--drivers/staging/fieldbus/fieldbus_dev.h108
-rw-r--r--drivers/staging/fsl-dpaa2/Kconfig1
-rw-r--r--drivers/staging/fsl-dpaa2/Makefile1
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.c4
-rw-r--r--drivers/staging/fwserial/Kconfig1
-rw-r--r--drivers/staging/fwserial/Makefile1
-rw-r--r--drivers/staging/fwserial/fwserial.c5
-rw-r--r--drivers/staging/gasket/Kconfig1
-rw-r--r--drivers/staging/gasket/Makefile1
-rw-r--r--drivers/staging/gasket/apex_driver.c8
-rw-r--r--drivers/staging/gasket/gasket_interrupt.c6
-rw-r--r--drivers/staging/gasket/gasket_page_table.c9
-rw-r--r--drivers/staging/gasket/gasket_sysfs.c9
-rw-r--r--drivers/staging/gasket/gasket_sysfs.h4
-rw-r--r--drivers/staging/gdm724x/Kconfig1
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c1
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c7
-rw-r--r--drivers/staging/gdm724x/hci_packet.h2
-rw-r--r--drivers/staging/goldfish/Kconfig1
-rw-r--r--drivers/staging/goldfish/Makefile1
-rw-r--r--drivers/staging/greybus/Kconfig1
-rw-r--r--drivers/staging/greybus/audio_codec.h122
-rw-r--r--drivers/staging/greybus/audio_manager.c3
-rw-r--r--drivers/staging/greybus/bundle.c2
-rw-r--r--drivers/staging/greybus/hid.c1
-rw-r--r--drivers/staging/greybus/power_supply.c4
-rw-r--r--drivers/staging/greybus/sdio.c8
-rw-r--r--drivers/staging/gs_fpgaboot/Kconfig2
-rw-r--r--drivers/staging/gs_fpgaboot/Makefile1
-rw-r--r--drivers/staging/iio/Kconfig1
-rw-r--r--drivers/staging/iio/accel/Kconfig1
-rw-r--r--drivers/staging/iio/accel/Makefile1
-rw-r--r--drivers/staging/iio/accel/adis16203.c3
-rw-r--r--drivers/staging/iio/accel/adis16240.c3
-rw-r--r--drivers/staging/iio/adc/Kconfig14
-rw-r--r--drivers/staging/iio/adc/Makefile1
-rw-r--r--drivers/staging/iio/adc/ad7192.c236
-rw-r--r--drivers/staging/iio/adc/ad7192.h12
-rw-r--r--drivers/staging/iio/adc/ad7280a.c112
-rw-r--r--drivers/staging/iio/adc/ad7280a.h3
-rw-r--r--drivers/staging/iio/adc/ad7816.c5
-rw-r--r--drivers/staging/iio/addac/Kconfig1
-rw-r--r--drivers/staging/iio/addac/Makefile1
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c3
-rw-r--r--drivers/staging/iio/addac/adt7316-spi.c3
-rw-r--r--drivers/staging/iio/addac/adt7316.c7
-rw-r--r--drivers/staging/iio/addac/adt7316.h3
-rw-r--r--drivers/staging/iio/cdc/Kconfig1
-rw-r--r--drivers/staging/iio/cdc/Makefile1
-rw-r--r--drivers/staging/iio/cdc/ad7150.c3
-rw-r--r--drivers/staging/iio/cdc/ad7746.c3
-rw-r--r--drivers/staging/iio/cdc/ad7746.h3
-rw-r--r--drivers/staging/iio/frequency/Kconfig1
-rw-r--r--drivers/staging/iio/frequency/Makefile1
-rw-r--r--drivers/staging/iio/frequency/ad9832.c54
-rw-r--r--drivers/staging/iio/frequency/ad9832.h4
-rw-r--r--drivers/staging/iio/frequency/ad9834.c5
-rw-r--r--drivers/staging/iio/frequency/ad9834.h3
-rw-r--r--drivers/staging/iio/frequency/dds.h3
-rw-r--r--drivers/staging/iio/impedance-analyzer/Kconfig3
-rw-r--r--drivers/staging/iio/impedance-analyzer/Makefile1
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c58
-rw-r--r--drivers/staging/iio/meter/Kconfig1
-rw-r--r--drivers/staging/iio/meter/ade7854-i2c.c3
-rw-r--r--drivers/staging/iio/meter/ade7854-spi.c3
-rw-r--r--drivers/staging/iio/meter/ade7854.c3
-rw-r--r--drivers/staging/iio/resolver/Kconfig1
-rw-r--r--drivers/staging/iio/resolver/Makefile1
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c6
-rw-r--r--drivers/staging/kpc2000/Kconfig57
-rw-r--r--drivers/staging/kpc2000/Makefile6
-rw-r--r--drivers/staging/kpc2000/TODO8
-rw-r--r--drivers/staging/kpc2000/kpc.h23
-rw-r--r--drivers/staging/kpc2000/kpc2000/Makefile4
-rw-r--r--drivers/staging/kpc2000/kpc2000/cell_probe.c471
-rw-r--r--drivers/staging/kpc2000/kpc2000/core.c437
-rw-r--r--drivers/staging/kpc2000/kpc2000/dma_common_defs.h43
-rw-r--r--drivers/staging/kpc2000/kpc2000/fileops.c131
-rw-r--r--drivers/staging/kpc2000/kpc2000/kp2000_module.c54
-rw-r--r--drivers/staging/kpc2000/kpc2000/pcie.h112
-rw-r--r--drivers/staging/kpc2000/kpc2000/uapi.h22
-rw-r--r--drivers/staging/kpc2000/kpc_dma/Makefile6
-rw-r--r--drivers/staging/kpc2000/kpc_dma/dma.c264
-rw-r--r--drivers/staging/kpc2000/kpc_dma/fileops.c420
-rw-r--r--drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.c248
-rw-r--r--drivers/staging/kpc2000/kpc_dma/kpc_dma_driver.h220
-rw-r--r--drivers/staging/kpc2000/kpc_dma/uapi.h11
-rw-r--r--drivers/staging/kpc2000/kpc_i2c/Makefile4
-rw-r--r--drivers/staging/kpc2000/kpc_i2c/fileops.c181
-rw-r--r--drivers/staging/kpc2000/kpc_i2c/i2c_driver.c699
-rw-r--r--drivers/staging/kpc2000/kpc_spi/Makefile4
-rw-r--r--drivers/staging/kpc2000/kpc_spi/spi_driver.c507
-rw-r--r--drivers/staging/kpc2000/kpc_spi/spi_parts.h48
-rw-r--r--drivers/staging/ks7010/Kconfig1
-rw-r--r--drivers/staging/ks7010/Makefile1
-rw-r--r--drivers/staging/ks7010/ks_hostif.c17
-rw-r--r--drivers/staging/media/Kconfig5
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/bcm2048/Kconfig3
-rw-r--r--drivers/staging/media/bcm2048/Makefile1
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c4
-rw-r--r--drivers/staging/media/davinci_vpfe/Kconfig3
-rw-r--r--drivers/staging/media/davinci_vpfe/Makefile1
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.c6
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe.h2
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipe_hw.c24
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.c41
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_ipipeif.h2
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_isif.c20
-rw-r--r--drivers/staging/media/davinci_vpfe/dm365_resizer.c6
-rw-r--r--drivers/staging/media/imx/Kconfig5
-rw-r--r--drivers/staging/media/imx/imx-ic-common.c10
-rw-r--r--drivers/staging/media/imx/imx-ic-prp.c6
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c8
-rw-r--r--drivers/staging/media/imx/imx-ic.h6
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c17
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c26
-rw-r--r--drivers/staging/media/imx/imx-media-dev-common.c4
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c34
-rw-r--r--drivers/staging/media/imx/imx-media-fim.c6
-rw-r--r--drivers/staging/media/imx/imx-media-internal-sd.c38
-rw-r--r--drivers/staging/media/imx/imx-media-of.c79
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c6
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c18
-rw-r--r--drivers/staging/media/imx/imx-media.h16
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c10
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c8
-rw-r--r--drivers/staging/media/imx/imx7-mipi-csis.c26
-rw-r--r--drivers/staging/media/ipu3/Kconfig3
-rw-r--r--drivers/staging/media/ipu3/Makefile1
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.c40
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c126
-rw-r--r--drivers/staging/media/ipu3/ipu3.c33
-rw-r--r--drivers/staging/media/mt9t031/Kconfig5
-rw-r--r--drivers/staging/media/mt9t031/Makefile1
-rw-r--r--drivers/staging/media/mt9t031/TODO5
-rw-r--r--drivers/staging/media/omap4iss/Kconfig2
-rw-r--r--drivers/staging/media/rockchip/vpu/Kconfig2
-rw-r--r--drivers/staging/media/rockchip/vpu/Makefile1
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c17
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c15
-rw-r--r--drivers/staging/media/soc_camera/Kconfig1
-rw-r--r--drivers/staging/media/soc_camera/TODO4
-rw-r--r--drivers/staging/media/soc_camera/imx074.c6
-rw-r--r--drivers/staging/media/soc_camera/mt9t031.c6
-rw-r--r--drivers/staging/media/soc_camera/soc_camera.c6
-rw-r--r--drivers/staging/media/soc_camera/soc_mediabus.c6
-rw-r--r--drivers/staging/media/soc_camera/soc_mt9v022.c6
-rw-r--r--drivers/staging/media/soc_camera/soc_ov5642.c6
-rw-r--r--drivers/staging/media/soc_camera/soc_ov9740.c6
-rw-r--r--drivers/staging/media/sunxi/Kconfig1
-rw-r--r--drivers/staging/media/sunxi/Makefile1
-rw-r--r--drivers/staging/media/sunxi/cedrus/Kconfig1
-rw-r--r--drivers/staging/media/sunxi/cedrus/Makefile1
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c33
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h3
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c3
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c1
-rw-r--r--drivers/staging/media/tegra-vde/Kconfig1
-rw-r--r--drivers/staging/media/tegra-vde/Makefile1
-rw-r--r--drivers/staging/media/tegra-vde/tegra-vde.c5
-rw-r--r--drivers/staging/media/tegra-vde/uapi.h11
-rw-r--r--drivers/staging/media/zoran/Kconfig75
-rw-r--r--drivers/staging/media/zoran/Makefile7
-rw-r--r--drivers/staging/media/zoran/TODO4
-rw-r--r--drivers/staging/media/zoran/videocodec.c391
-rw-r--r--drivers/staging/media/zoran/videocodec.h349
-rw-r--r--drivers/staging/media/zoran/zoran.h402
-rw-r--r--drivers/staging/media/zoran/zoran_card.c1524
-rw-r--r--drivers/staging/media/zoran/zoran_card.h50
-rw-r--r--drivers/staging/media/zoran/zoran_device.c1619
-rw-r--r--drivers/staging/media/zoran/zoran_device.h91
-rw-r--r--drivers/staging/media/zoran/zoran_driver.c2850
-rw-r--r--drivers/staging/media/zoran/zoran_procfs.c221
-rw-r--r--drivers/staging/media/zoran/zoran_procfs.h32
-rw-r--r--drivers/staging/media/zoran/zr36016.c516
-rw-r--r--drivers/staging/media/zoran/zr36016.h107
-rw-r--r--drivers/staging/media/zoran/zr36050.c896
-rw-r--r--drivers/staging/media/zoran/zr36050.h179
-rw-r--r--drivers/staging/media/zoran/zr36057.h164
-rw-r--r--drivers/staging/media/zoran/zr36060.c1006
-rw-r--r--drivers/staging/media/zoran/zr36060.h216
-rw-r--r--drivers/staging/most/Documentation/ABI/configfs-most.txt204
-rw-r--r--drivers/staging/most/Documentation/driver_usage.txt131
-rw-r--r--drivers/staging/most/Kconfig3
-rw-r--r--drivers/staging/most/Makefile1
-rw-r--r--drivers/staging/most/cdev/Kconfig1
-rw-r--r--drivers/staging/most/cdev/Makefile1
-rw-r--r--drivers/staging/most/cdev/cdev.c10
-rw-r--r--drivers/staging/most/configfs.c676
-rw-r--r--drivers/staging/most/core.c305
-rw-r--r--drivers/staging/most/core.h20
-rw-r--r--drivers/staging/most/dim2/Kconfig1
-rw-r--r--drivers/staging/most/dim2/Makefile1
-rw-r--r--drivers/staging/most/dim2/errors.h2
-rw-r--r--drivers/staging/most/dim2/hal.h2
-rw-r--r--drivers/staging/most/dim2/reg.h2
-rw-r--r--drivers/staging/most/dim2/sysfs.h2
-rw-r--r--drivers/staging/most/i2c/Kconfig3
-rw-r--r--drivers/staging/most/i2c/Makefile1
-rw-r--r--drivers/staging/most/net/Kconfig3
-rw-r--r--drivers/staging/most/net/Makefile1
-rw-r--r--drivers/staging/most/net/net.c3
-rw-r--r--drivers/staging/most/sound/Kconfig3
-rw-r--r--drivers/staging/most/sound/Makefile1
-rw-r--r--drivers/staging/most/sound/sound.c61
-rw-r--r--drivers/staging/most/usb/Kconfig3
-rw-r--r--drivers/staging/most/usb/Makefile1
-rw-r--r--drivers/staging/most/usb/usb.c2
-rw-r--r--drivers/staging/most/video/Kconfig3
-rw-r--r--drivers/staging/most/video/Makefile1
-rw-r--r--drivers/staging/most/video/video.c3
-rw-r--r--drivers/staging/mt7621-dma/Kconfig1
-rw-r--r--drivers/staging/mt7621-dma/Makefile1
-rw-r--r--drivers/staging/mt7621-dma/mtk-hsdma.c7
-rw-r--r--drivers/staging/mt7621-dts/Kconfig1
-rw-r--r--drivers/staging/mt7621-dts/Makefile1
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi23
-rw-r--r--drivers/staging/mt7621-mmc/Kconfig16
-rw-r--r--drivers/staging/mt7621-mmc/Makefile42
-rw-r--r--drivers/staging/mt7621-mmc/TODO8
-rw-r--r--drivers/staging/mt7621-mmc/board.h63
-rw-r--r--drivers/staging/mt7621-mmc/dbg.c304
-rw-r--r--drivers/staging/mt7621-mmc/dbg.h101
-rw-r--r--drivers/staging/mt7621-mmc/mt6575_sd.h488
-rw-r--r--drivers/staging/mt7621-mmc/sd.c1855
-rw-r--r--drivers/staging/mt7621-pci-phy/Kconfig1
-rw-r--r--drivers/staging/mt7621-pci-phy/Makefile1
-rw-r--r--drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.txt44
-rw-r--r--drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c284
-rw-r--r--drivers/staging/mt7621-pci/Kconfig1
-rw-r--r--drivers/staging/mt7621-pci/Makefile1
-rw-r--r--drivers/staging/mt7621-pci/pci-mt7621.c2
-rw-r--r--drivers/staging/mt7621-pinctrl/Kconfig1
-rw-r--r--drivers/staging/mt7621-pinctrl/Makefile1
-rw-r--r--drivers/staging/netlogic/Kconfig1
-rw-r--r--drivers/staging/netlogic/Makefile1
-rw-r--r--drivers/staging/netlogic/xlr_net.c11
-rw-r--r--drivers/staging/nvec/Kconfig1
-rw-r--r--drivers/staging/octeon-usb/Kconfig1
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c4
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.h8
-rw-r--r--drivers/staging/octeon/Kconfig1
-rw-r--r--drivers/staging/octeon/TODO9
-rw-r--r--drivers/staging/octeon/ethernet-tx.c20
-rw-r--r--drivers/staging/octeon/ethernet.c2
-rw-r--r--drivers/staging/olpc_dcon/Kconfig2
-rw-r--r--drivers/staging/olpc_dcon/Makefile1
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h5
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1.c7
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c56
-rw-r--r--drivers/staging/pi433/Kconfig1
-rw-r--r--drivers/staging/pi433/Makefile1
-rw-r--r--drivers/staging/pi433/pi433_if.c40
-rw-r--r--drivers/staging/pi433/rf69.c49
-rw-r--r--drivers/staging/ralink-gdma/Kconfig1
-rw-r--r--drivers/staging/ralink-gdma/Makefile1
-rw-r--r--drivers/staging/ralink-gdma/ralink-gdma.c105
-rw-r--r--drivers/staging/rtl8188eu/Kconfig1
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c28
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c70
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c6
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_hwconfig.c6
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c10
-rw-r--r--drivers/staging/rtl8188eu/include/odm_precomp.h4
-rw-r--r--drivers/staging/rtl8188eu/include/phydm_reg.h (renamed from drivers/staging/rtlwifi/phydm/phydm_features.h)12
-rw-r--r--drivers/staging/rtl8188eu/include/phydm_regdefine11n.h53
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h130
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c3
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c1
-rw-r--r--drivers/staging/rtl8192e/Kconfig10
-rw-r--r--drivers/staging/rtl8192e/dot11d.c9
-rw-r--r--drivers/staging/rtl8192e/license339
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Kconfig1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_def.h18
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c17
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h17
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c18
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h16
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c44
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h22
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c17
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h16
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h18
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.c18
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.h16
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c37
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h16
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h16
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c19
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.h19
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c19
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h20
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c16
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.h16
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c19
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h21
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c18
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.c21
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.h19
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.c17
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.h17
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c19
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.h19
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c17
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.h17
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BA.h16
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c16
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HT.h16
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c16
-rw-r--r--drivers/staging/rtl8192e/rtl819x_Qos.h16
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TS.h16
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c16
-rw-r--r--drivers/staging/rtl8192e/rtllib.h6
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c7
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_wep.c6
-rw-r--r--drivers/staging/rtl8192e/rtllib_debug.h22
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c21
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c20
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c5
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c5
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c22
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c19
-rw-r--r--drivers/staging/rtl8192u/Kconfig1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h40
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.c7
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt.h6
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c10
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c11
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c6
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c76
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c41
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c40
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c22
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HT.h4
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c114
-rw-r--r--drivers/staging/rtl8192u/r8180_93cx6.c26
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c16
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c38
-rw-r--r--drivers/staging/rtl8192u/r8192U_hw.h2
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.h2
-rw-r--r--drivers/staging/rtl8712/Kconfig11
-rw-r--r--drivers/staging/rtl8712/drv_types.h6
-rw-r--r--drivers/staging/rtl8712/hal_init.c3
-rw-r--r--drivers/staging/rtl8712/ieee80211.c3
-rw-r--r--drivers/staging/rtl8712/ieee80211.h3
-rw-r--r--drivers/staging/rtl8712/os_intfs.c8
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c6
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.h1
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.c6
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.h3
-rw-r--r--drivers/staging/rtl8712/rtl8712_io.c4
-rw-r--r--drivers/staging/rtl8712/rtl8712_led.c13
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c28
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.c12
-rw-r--r--drivers/staging/rtl8712/rtl871x_eeprom.c23
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c18
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c14
-rw-r--r--drivers/staging/rtl8712/rtl871x_mp.c5
-rw-r--r--drivers/staging/rtl8712/rtl871x_pwrctrl.c4
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c8
-rw-r--r--drivers/staging/rtl8712/usb_intf.c12
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c26
-rw-r--r--drivers/staging/rtl8723bs/Kconfig1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c12
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c51
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_debug.c11
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c33
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_io.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ioctl_set.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c79
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c59
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c18
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c11
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c32
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c20
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.h20
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_HWConfig.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c12
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_dm.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c8
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_phycfg.c13
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c8
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c9
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c22
-rw-r--r--drivers/staging/rtl8723bs/include/cmd_osdep.h6
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h10
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types_sdio.h4
-rw-r--r--drivers/staging/rtl8723bs/include/ieee80211.h15
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_cmd.h6
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme.h31
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_pwrctrl.h30
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h6
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_xmit.h2
-rw-r--r--drivers/staging/rtl8723bs/include/wifi.h86
-rw-r--r--drivers/staging/rtl8723bs/include/wlan_bssdef.h56
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c57
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c22
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c29
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c10
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c30
-rw-r--r--drivers/staging/rtlwifi/Kconfig12
-rw-r--r--drivers/staging/rtlwifi/Makefile70
-rw-r--r--drivers/staging/rtlwifi/TODO11
-rw-r--r--drivers/staging/rtlwifi/base.c2815
-rw-r--r--drivers/staging/rtlwifi/base.h175
-rw-r--r--drivers/staging/rtlwifi/btcoexist/Makefile8
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbt_precomp.h74
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.c5233
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.h433
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.c5210
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.h487
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.c54
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.h24
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.c1837
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.h791
-rw-r--r--drivers/staging/rtlwifi/btcoexist/rtl_btc.c517
-rw-r--r--drivers/staging/rtlwifi/btcoexist/rtl_btc.h64
-rw-r--r--drivers/staging/rtlwifi/cam.c315
-rw-r--r--drivers/staging/rtlwifi/cam.h39
-rw-r--r--drivers/staging/rtlwifi/core.c1996
-rw-r--r--drivers/staging/rtlwifi/core.h71
-rw-r--r--drivers/staging/rtlwifi/debug.c624
-rw-r--r--drivers/staging/rtlwifi/debug.h223
-rw-r--r--drivers/staging/rtlwifi/efuse.c1329
-rw-r--r--drivers/staging/rtlwifi/efuse.h109
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_2_platform.h41
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_cfg.h121
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_phy.c95
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.c552
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.h29
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.c332
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.h33
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.c312
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.h42
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.c173
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.h31
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.c174
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.h34
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.c403
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.h27
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_88xx_cfg.h160
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c5970
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.h385
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.c318
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.h60
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.c963
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.h73
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.c543
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.h62
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c4465
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.h310
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_api.c415
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_api.h70
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_bit2.h13396
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_bit_8822b.h12092
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_fw_info.h111
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_fw_offload_c2h_nic.h173
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_fw_offload_h2c_nic.h504
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_h2c_extra_info_nic.h104
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_intf_phy_cmd.h43
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_original_c2h_nic.h392
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_original_h2c_nic.h1000
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_pwr_seq_cmd.h105
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_reg2.h1121
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_reg_8822b.h717
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_bd_chip.h37
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_bd_nic.h37
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_desc_chip.h107
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_desc_nic.h122
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_sdio_reg.h51
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_bd_chip.h107
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_bd_nic.h112
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_desc_chip.h433
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_desc_nic.h495
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_type.h1923
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_usb_reg.h17
-rw-r--r--drivers/staging/rtlwifi/halmac/rtl_halmac.c1373
-rw-r--r--drivers/staging/rtlwifi/halmac/rtl_halmac.h83
-rw-r--r--drivers/staging/rtlwifi/pci.c2496
-rw-r--r--drivers/staging/rtlwifi/pci.h319
-rw-r--r--drivers/staging/rtlwifi/phydm/halphyrf_ce.c954
-rw-r--r--drivers/staging/rtlwifi/phydm/halphyrf_ce.h74
-rw-r--r--drivers/staging/rtlwifi/phydm/mp_precomp.h13
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm.c1975
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm.h935
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_acs.c189
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_acs.h46
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adaptivity.c930
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adaptivity.h108
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adc_sampling.c616
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adc_sampling.h85
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_antdiv.c72
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_antdiv.h290
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_beamforming.h37
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_ccx.c447
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_ccx.h72
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_cfotracking.c332
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_cfotracking.h49
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_debug.c2888
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_debug.h164
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dfs.h48
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dig.c1521
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dig.h230
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamic_rx_path.h26
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.c118
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.h39
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.c91
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.h53
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.c128
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.h33
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_hwconfig.c1848
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_hwconfig.h487
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_interface.c307
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_interface.h183
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_iqk.h65
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_kfree.c217
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_kfree.h31
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_noisemonitor.c319
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_noisemonitor.h35
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.c633
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.h282
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_pre_define.h602
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_precomp.h74
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_psd.c406
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_psd.h56
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_rainfo.c1196
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_rainfo.h258
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_reg.h140
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_regdefine11ac.h83
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_regdefine11n.h202
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_types.h119
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.c1956
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.h43
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.c211
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.h27
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.c4730
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.h118
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.c340
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.h34
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.c1804
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.h73
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.c1399
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.h37
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.c157
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.h43
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.c214
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.h19
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/version_rtl8822b.h23
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl_phydm.c865
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl_phydm.h34
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/halcomtxbf.h56
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/haltxbf8822b.h28
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/haltxbfinterface.h27
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/haltxbfjaguar.h25
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/phydm_hal_txbf_api.h30
-rw-r--r--drivers/staging/rtlwifi/ps.c996
-rw-r--r--drivers/staging/rtlwifi/ps.h39
-rw-r--r--drivers/staging/rtlwifi/pwrseqcmd.h83
-rw-r--r--drivers/staging/rtlwifi/rc.c309
-rw-r--r--drivers/staging/rtlwifi/rc.h38
-rw-r--r--drivers/staging/rtlwifi/regd.c458
-rw-r--r--drivers/staging/rtlwifi/regd.h52
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/Makefile7
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/def.h71
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.c964
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.h187
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/hw.c2430
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/hw.h55
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/led.c116
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/led.h23
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/phy.c2223
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/phy.h134
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/reg.h1642
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/sw.c470
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/sw.h21
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/trx.c1004
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/trx.h154
-rw-r--r--drivers/staging/rtlwifi/stats.c249
-rw-r--r--drivers/staging/rtlwifi/stats.h31
-rw-r--r--drivers/staging/rtlwifi/wifi.h3362
-rw-r--r--drivers/staging/rts5208/Kconfig1
-rw-r--r--drivers/staging/rts5208/Makefile1
-rw-r--r--drivers/staging/rts5208/rtsx_chip.h2
-rw-r--r--drivers/staging/sm750fb/Kconfig1
-rw-r--r--drivers/staging/sm750fb/Makefile1
-rw-r--r--drivers/staging/sm750fb/ddk750.h1
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c27
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.h2
-rw-r--r--drivers/staging/sm750fb/ddk750_display.c10
-rw-r--r--drivers/staging/sm750fb/ddk750_display.h2
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.c40
-rw-r--r--drivers/staging/sm750fb/ddk750_power.h2
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.c114
-rw-r--r--drivers/staging/sm750fb/ddk750_swi2c.c1
-rw-r--r--drivers/staging/sm750fb/ddk750_swi2c.h1
-rw-r--r--drivers/staging/sm750fb/sm750.c7
-rw-r--r--drivers/staging/sm750fb/sm750_accel.c32
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c2
-rw-r--r--drivers/staging/speakup/Kconfig1
-rw-r--r--drivers/staging/speakup/kobjects.c14
-rw-r--r--drivers/staging/speakup/main.c1
-rw-r--r--drivers/staging/speakup/selection.c212
-rw-r--r--drivers/staging/speakup/speakup.h1
-rw-r--r--drivers/staging/speakup/speakup_decpc.c2
-rw-r--r--drivers/staging/speakup/speakup_keypc.c6
-rw-r--r--drivers/staging/speakup/spk_ttyio.c2
-rw-r--r--drivers/staging/unisys/Kconfig1
-rw-r--r--drivers/staging/unisys/Makefile1
-rw-r--r--drivers/staging/unisys/include/iochannel.h2
-rw-r--r--drivers/staging/unisys/visorhba/Kconfig13
-rw-r--r--drivers/staging/unisys/visorhba/Makefile1
-rw-r--r--drivers/staging/unisys/visorinput/Kconfig15
-rw-r--r--drivers/staging/unisys/visorinput/Makefile1
-rw-r--r--drivers/staging/unisys/visornic/Kconfig15
-rw-r--r--drivers/staging/unisys/visornic/Makefile1
-rw-r--r--drivers/staging/vboxvideo/TODO10
-rw-r--r--drivers/staging/vc04_services/Kconfig1
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/Kconfig1
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/Makefile1
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-ctl.c12
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/Kconfig1
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c55
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/controls.c75
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi.h38
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi_cfg.h34
-rw-r--r--drivers/staging/vc04_services/interface/vchi/vchi_common.h176
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq.h34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c37
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c56
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h32
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_cfg.h34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c50
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c32
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_if.h34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_pagelist.h34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_shim.c34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h34
-rw-r--r--drivers/staging/vme/Makefile1
-rw-r--r--drivers/staging/vme/devices/Kconfig1
-rw-r--r--drivers/staging/vme/devices/Makefile1
-rw-r--r--drivers/staging/vt6655/Kconfig1
-rw-r--r--drivers/staging/vt6655/card.h6
-rw-r--r--drivers/staging/vt6655/channel.h2
-rw-r--r--drivers/staging/vt6655/desc.h2
-rw-r--r--drivers/staging/vt6655/device.h2
-rw-r--r--drivers/staging/vt6655/device_cfg.h2
-rw-r--r--drivers/staging/vt6655/dpc.h2
-rw-r--r--drivers/staging/vt6655/key.h2
-rw-r--r--drivers/staging/vt6655/mac.c25
-rw-r--r--drivers/staging/vt6655/mac.h9
-rw-r--r--drivers/staging/vt6655/power.c7
-rw-r--r--drivers/staging/vt6655/power.h2
-rw-r--r--drivers/staging/vt6655/rf.c35
-rw-r--r--drivers/staging/vt6655/rf.h2
-rw-r--r--drivers/staging/vt6655/rxtx.c18
-rw-r--r--drivers/staging/vt6655/rxtx.h2
-rw-r--r--drivers/staging/vt6655/srom.h2
-rw-r--r--drivers/staging/vt6655/tmacro.h2
-rw-r--r--drivers/staging/vt6655/upc.h4
-rw-r--r--drivers/staging/vt6656/Kconfig1
-rw-r--r--drivers/staging/vt6656/baseband.h2
-rw-r--r--drivers/staging/vt6656/card.h2
-rw-r--r--drivers/staging/vt6656/channel.h2
-rw-r--r--drivers/staging/vt6656/desc.h2
-rw-r--r--drivers/staging/vt6656/device.h2
-rw-r--r--drivers/staging/vt6656/dpc.h2
-rw-r--r--drivers/staging/vt6656/firmware.h2
-rw-r--r--drivers/staging/vt6656/int.h2
-rw-r--r--drivers/staging/vt6656/key.h2
-rw-r--r--drivers/staging/vt6656/power.h2
-rw-r--r--drivers/staging/vt6656/rf.h2
-rw-r--r--drivers/staging/vt6656/rxtx.h2
-rw-r--r--drivers/staging/vt6656/usbpipe.h2
-rw-r--r--drivers/staging/vt6656/wcmd.h2
-rw-r--r--drivers/staging/wilc1000/Kconfig2
-rw-r--r--drivers/staging/wilc1000/host_interface.c71
-rw-r--r--drivers/staging/wilc1000/host_interface.h14
-rw-r--r--drivers/staging/wilc1000/wilc_netdev.c2
-rw-r--r--drivers/staging/wilc1000/wilc_spi.c13
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c72
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c8
-rw-r--r--drivers/staging/wlan-ng/Kconfig2
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h4
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c2
-rw-r--r--drivers/target/iscsi/cxgbit/Makefile6
-rw-r--r--drivers/target/iscsi/iscsi_target.c118
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c9
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c5
-rw-r--r--drivers/target/target_core_alua.c6
-rw-r--r--drivers/target/target_core_configfs.c163
-rw-r--r--drivers/target/target_core_device.c4
-rw-r--r--drivers/target/target_core_pr.c33
-rw-r--r--drivers/target/target_core_pr.h1
-rw-r--r--drivers/target/target_core_tmr.c2
-rw-r--r--drivers/target/target_core_user.c9
-rw-r--r--drivers/target/target_core_xcopy.c92
-rw-r--r--drivers/tee/optee/core.c80
-rw-r--r--drivers/tee/tee_shm.c2
-rw-r--r--drivers/thermal/Kconfig19
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/broadcom/sr-thermal.c8
-rw-r--r--drivers/thermal/cpu_cooling.c30
-rw-r--r--drivers/thermal/intel/Kconfig1
-rw-r--r--drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c16
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c13
-rw-r--r--drivers/thermal/of-thermal.c3
-rw-r--r--drivers/thermal/qcom/Kconfig1
-rw-r--r--drivers/thermal/qcom/Makefile4
-rw-r--r--drivers/thermal/qcom/tsens-8916.c105
-rw-r--r--drivers/thermal/qcom/tsens-8960.c84
-rw-r--r--drivers/thermal/qcom/tsens-common.c159
-rw-r--r--drivers/thermal/qcom/tsens-v0_1.c (renamed from drivers/thermal/qcom/tsens-8974.c)166
-rw-r--r--drivers/thermal/qcom/tsens-v1.c193
-rw-r--r--drivers/thermal/qcom/tsens-v2.c111
-rw-r--r--drivers/thermal/qcom/tsens.c100
-rw-r--r--drivers/thermal/qcom/tsens.h291
-rw-r--r--drivers/thermal/qoriq_thermal.c5
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c51
-rw-r--r--drivers/thermal/rcar_thermal.c11
-rw-r--r--drivers/thermal/rockchip_thermal.c74
-rw-r--r--drivers/thermal/st/Kconfig22
-rw-r--r--drivers/thermal/st/stm_thermal.c6
-rw-r--r--drivers/thermal/tegra/Kconfig4
-rw-r--r--drivers/thermal/tegra/soctherm.c961
-rw-r--r--drivers/thermal/tegra/soctherm.h16
-rw-r--r--drivers/thermal/tegra/tegra124-soctherm.c7
-rw-r--r--drivers/thermal/tegra/tegra132-soctherm.c7
-rw-r--r--drivers/thermal/tegra/tegra210-soctherm.c15
-rw-r--r--drivers/thermal/thermal-generic-adc.c9
-rw-r--r--drivers/thermal/thermal_core.c80
-rw-r--r--drivers/thermal/thermal_mmio.c129
-rw-r--r--drivers/thunderbolt/Makefile4
-rw-r--r--drivers/thunderbolt/cap.c85
-rw-r--r--drivers/thunderbolt/ctl.c2
-rw-r--r--drivers/thunderbolt/domain.c1
-rw-r--r--drivers/thunderbolt/icm.c65
-rw-r--r--drivers/thunderbolt/lc.c179
-rw-r--r--drivers/thunderbolt/nhi.c3
-rw-r--r--drivers/thunderbolt/path.c420
-rw-r--r--drivers/thunderbolt/property.c16
-rw-r--r--drivers/thunderbolt/switch.c557
-rw-r--r--drivers/thunderbolt/tb.c608
-rw-r--r--drivers/thunderbolt/tb.h227
-rw-r--r--drivers/thunderbolt/tb_msgs.h11
-rw-r--r--drivers/thunderbolt/tb_regs.h50
-rw-r--r--drivers/thunderbolt/tunnel.c691
-rw-r--r--drivers/thunderbolt/tunnel.h78
-rw-r--r--drivers/thunderbolt/tunnel_pci.c226
-rw-r--r--drivers/thunderbolt/tunnel_pci.h31
-rw-r--r--drivers/thunderbolt/xdomain.c170
-rw-r--r--drivers/tty/Kconfig22
-rw-r--r--drivers/tty/Makefile1
-rw-r--r--drivers/tty/hvc/Kconfig3
-rw-r--r--drivers/tty/hvc/hvc_riscv_sbi.c1
-rw-r--r--drivers/tty/ipwireless/Makefile1
-rw-r--r--drivers/tty/ipwireless/main.c8
-rw-r--r--drivers/tty/n_tty.c4
-rw-r--r--drivers/tty/rocket.c16
-rw-r--r--drivers/tty/rocket.h1
-rw-r--r--drivers/tty/serdev/Kconfig1
-rw-r--r--drivers/tty/serdev/Makefile1
-rw-r--r--drivers/tty/serial/8250/8250_dw.c4
-rw-r--r--drivers/tty/serial/8250/8250_exar.c7
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c5
-rw-r--r--drivers/tty/serial/8250/8250_men_mcb.c1
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c162
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/tty/serial/Kconfig54
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/cpm_uart/Makefile1
-rw-r--r--drivers/tty/serial/jsm/Makefile1
-rw-r--r--drivers/tty/serial/men_z135_uart.c1
-rw-r--r--drivers/tty/serial/milbeaut_usio.c614
-rw-r--r--drivers/tty/serial/sc16is7xx.c34
-rw-r--r--drivers/tty/serial/serial_core.c30
-rw-r--r--drivers/tty/serial/serial_txx9.c1
-rw-r--r--drivers/tty/serial/sifive.c1056
-rw-r--r--drivers/tty/serial/sn_console.c1
-rw-r--r--drivers/tty/serial/sprd_serial.c501
-rw-r--r--drivers/tty/serial/ucc_uart.c2
-rw-r--r--drivers/tty/serial/xilinx_uartps.c12
-rw-r--r--drivers/tty/sysrq.c12
-rw-r--r--drivers/tty/tty_io.c2
-rw-r--r--drivers/tty/tty_jobctrl.c4
-rw-r--r--drivers/tty/tty_port.c10
-rw-r--r--drivers/tty/ttynull.c109
-rw-r--r--drivers/tty/vcc.c1
-rw-r--r--drivers/tty/vt/.gitignore1
-rw-r--r--drivers/tty/vt/consolemap.c8
-rw-r--r--drivers/tty/vt/cp437.uni1
-rw-r--r--drivers/tty/vt/defkeymap.c_shipped1
-rw-r--r--drivers/tty/vt/defkeymap.map1
-rw-r--r--drivers/tty/vt/keyboard.c35
-rw-r--r--drivers/tty/vt/selection.c46
-rw-r--r--drivers/tty/vt/vc_screen.c2
-rw-r--r--drivers/tty/vt/vt.c9
-rw-r--r--drivers/uio/uio_fsl_elbc_gpcm.c4
-rw-r--r--drivers/usb/chipidea/ci_hdrc_msm.c9
-rw-r--r--drivers/usb/class/cdc-acm.c63
-rw-r--r--drivers/usb/class/cdc-acm.h3
-rw-r--r--drivers/usb/common/common.c16
-rw-r--r--drivers/usb/core/driver.c13
-rw-r--r--drivers/usb/core/hcd.c44
-rw-r--r--drivers/usb/core/hub.c25
-rw-r--r--drivers/usb/core/message.c4
-rw-r--r--drivers/usb/dwc2/core.c199
-rw-r--r--drivers/usb/dwc2/core.h14
-rw-r--r--drivers/usb/dwc2/core_intr.c12
-rw-r--r--drivers/usb/dwc2/gadget.c101
-rw-r--r--drivers/usb/dwc2/hcd.c326
-rw-r--r--drivers/usb/dwc2/hw.h8
-rw-r--r--drivers/usb/dwc2/params.c35
-rw-r--r--drivers/usb/dwc2/platform.c20
-rw-r--r--drivers/usb/dwc3/Kconfig16
-rw-r--r--drivers/usb/dwc3/Makefile1
-rw-r--r--drivers/usb/dwc3/core.c18
-rw-r--r--drivers/usb/dwc3/core.h3
-rw-r--r--drivers/usb/dwc3/debug.h3
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c604
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c98
-rw-r--r--drivers/usb/dwc3/gadget.c5
-rw-r--r--drivers/usb/early/xhci-dbc.c4
-rw-r--r--drivers/usb/gadget/function/f_fs.c3
-rw-r--r--drivers/usb/gadget/function/f_ncm.c57
-rw-r--r--drivers/usb/gadget/function/f_uac1_legacy.c6
-rw-r--r--drivers/usb/gadget/function/u_ncm.h3
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/epn.c6
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c84
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.h1
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c35
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c167
-rw-r--r--drivers/usb/gadget/udc/net2272.c3
-rw-r--r--drivers/usb/gadget/udc/net2280.c5
-rw-r--r--drivers/usb/host/fhci-sched.c10
-rw-r--r--drivers/usb/host/ohci-da8xx.c42
-rw-r--r--drivers/usb/host/u132-hcd.c2
-rw-r--r--drivers/usb/host/xhci-dbgcap.c2
-rw-r--r--drivers/usb/host/xhci-hub.c44
-rw-r--r--drivers/usb/host/xhci-mtk.c19
-rw-r--r--drivers/usb/host/xhci-plat.c39
-rw-r--r--drivers/usb/host/xhci-ring.c24
-rw-r--r--drivers/usb/host/xhci-tegra.c68
-rw-r--r--drivers/usb/host/xhci-trace.h30
-rw-r--r--drivers/usb/host/xhci.c40
-rw-r--r--drivers/usb/host/xhci.h46
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c4
-rw-r--r--drivers/usb/misc/Kconfig1
-rw-r--r--drivers/usb/misc/ldusb.c2
-rw-r--r--drivers/usb/misc/usb251xb.c135
-rw-r--r--drivers/usb/misc/usb3503.c48
-rw-r--r--drivers/usb/misc/yurex.c1
-rw-r--r--drivers/usb/mtu3/Makefile11
-rw-r--r--drivers/usb/mtu3/mtu3.h57
-rw-r--r--drivers/usb/mtu3/mtu3_core.c27
-rw-r--r--drivers/usb/mtu3/mtu3_debug.h50
-rw-r--r--drivers/usb/mtu3/mtu3_debugfs.c539
-rw-r--r--drivers/usb/mtu3/mtu3_dr.c156
-rw-r--r--drivers/usb/mtu3/mtu3_dr.h4
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c20
-rw-r--r--drivers/usb/mtu3/mtu3_gadget_ep0.c4
-rw-r--r--drivers/usb/mtu3/mtu3_hw_regs.h48
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c47
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.c118
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.h1
-rw-r--r--drivers/usb/mtu3/mtu3_trace.c23
-rw-r--r--drivers/usb/mtu3/mtu3_trace.h279
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/jz4740.c19
-rw-r--r--drivers/usb/musb/musb_core.c9
-rw-r--r--drivers/usb/musb/musb_dsps.c6
-rw-r--r--drivers/usb/musb/omap2430.c6
-rw-r--r--drivers/usb/phy/Kconfig2
-rw-r--r--drivers/usb/serial/ark3116.c11
-rw-r--r--drivers/usb/serial/cypress_m8.c49
-rw-r--r--drivers/usb/serial/digi_acceleport.c41
-rw-r--r--drivers/usb/serial/f81232.c198
-rw-r--r--drivers/usb/serial/generic.c76
-rw-r--r--drivers/usb/serial/io_edgeport.c37
-rw-r--r--drivers/usb/serial/iuu_phoenix.c4
-rw-r--r--drivers/usb/serial/oti6858.c5
-rw-r--r--drivers/usb/serial/pl2303.c58
-rw-r--r--drivers/usb/serial/spcp8x5.c5
-rw-r--r--drivers/usb/serial/usb-serial.c11
-rw-r--r--drivers/usb/storage/Makefile2
-rw-r--r--drivers/usb/storage/realtek_cr.c13
-rw-r--r--drivers/usb/storage/scsiglue.c26
-rw-r--r--drivers/usb/storage/sierra_ms.c4
-rw-r--r--drivers/usb/storage/uas.c35
-rw-r--r--drivers/usb/typec/altmodes/Kconfig10
-rw-r--r--drivers/usb/typec/altmodes/Makefile2
-rw-r--r--drivers/usb/typec/altmodes/displayport.c14
-rw-r--r--drivers/usb/typec/altmodes/displayport.h8
-rw-r--r--drivers/usb/typec/altmodes/nvidia.c44
-rw-r--r--drivers/usb/typec/mux/pi3usb30532.c3
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c438
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c10
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c32
-rw-r--r--drivers/usb/typec/tcpm/wcove.c39
-rw-r--r--drivers/usb/typec/ucsi/Makefile15
-rw-r--r--drivers/usb/typec/ucsi/displayport.c315
-rw-r--r--drivers/usb/typec/ucsi/trace.c12
-rw-r--r--drivers/usb/typec/ucsi/trace.h26
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c404
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h118
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c884
-rw-r--r--drivers/usb/usbip/stub_rx.c18
-rw-r--r--drivers/usb/usbip/usbip_common.h7
-rw-r--r--drivers/usb/usbip/vhci_hcd.c9
-rw-r--r--drivers/vfio/Kconfig1
-rw-r--r--drivers/vfio/mdev/mdev_core.c36
-rw-r--r--drivers/vfio/mdev/mdev_private.h2
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c2
-rw-r--r--drivers/vfio/pci/vfio_pci.c23
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c29
-rw-r--r--drivers/vfio/pci/vfio_pci_nvlink2.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_amdxgbe.c5
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c12
-rw-r--r--drivers/vfio/vfio.c59
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c3
-rw-r--r--drivers/vfio/vfio_iommu_type1.c142
-rw-r--r--drivers/vhost/scsi.c1
-rw-r--r--drivers/vhost/vhost.c2
-rw-r--r--drivers/video/backlight/Kconfig35
-rw-r--r--drivers/video/backlight/lm3630a_bl.c153
-rw-r--r--drivers/video/backlight/pwm_bl.c15
-rw-r--r--drivers/video/fbdev/Kconfig309
-rw-r--r--drivers/video/fbdev/Makefile2
-rw-r--r--drivers/video/fbdev/amba-clcd-nomadik.c251
-rw-r--r--drivers/video/fbdev/amba-clcd-nomadik.h24
-rw-r--r--drivers/video/fbdev/amba-clcd-versatile.c567
-rw-r--r--drivers/video/fbdev/amba-clcd-versatile.h17
-rw-r--r--drivers/video/fbdev/amba-clcd.c98
-rw-r--r--drivers/video/fbdev/atafb.c67
-rw-r--r--drivers/video/fbdev/atafb_iplan2p2.c23
-rw-r--r--drivers/video/fbdev/atafb_iplan2p4.c23
-rw-r--r--drivers/video/fbdev/atafb_iplan2p8.c23
-rw-r--r--drivers/video/fbdev/atafb_mfb.c23
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c116
-rw-r--r--drivers/video/fbdev/core/fbcmap.c2
-rw-r--r--drivers/video/fbdev/core/fbcon.c2
-rw-r--r--drivers/video/fbdev/core/fbmem.c29
-rw-r--r--drivers/video/fbdev/core/modedb.c3
-rw-r--r--drivers/video/fbdev/da8xx-fb.c13
-rw-r--r--drivers/video/fbdev/efifb.c11
-rw-r--r--drivers/video/fbdev/fb-puv3.c2
-rw-r--r--drivers/video/fbdev/gbefb.c24
-rw-r--r--drivers/video/fbdev/hgafb.c2
-rw-r--r--drivers/video/fbdev/imsttfb.c5
-rw-r--r--drivers/video/fbdev/macfb.c29
-rw-r--r--drivers/video/fbdev/mmp/Kconfig6
-rw-r--r--drivers/video/fbdev/mxsfb.c14
-rw-r--r--drivers/video/fbdev/nuc900fb.c2
-rw-r--r--drivers/video/fbdev/omap/Kconfig20
-rw-r--r--drivers/video/fbdev/omap2/omapfb/Kconfig18
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/Kconfig40
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/Kconfig6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c6
-rw-r--r--drivers/video/fbdev/pvr2fb.c4
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c4
-rw-r--r--drivers/video/fbdev/s3c2410fb.c2
-rw-r--r--drivers/video/fbdev/savage/savagefb_driver.c6
-rw-r--r--drivers/video/fbdev/sm712.h12
-rw-r--r--drivers/video/fbdev/sm712fb.c243
-rw-r--r--drivers/video/fbdev/udlfb.c114
-rw-r--r--drivers/video/fbdev/uvesafb.c16
-rw-r--r--drivers/video/fbdev/vesafb.c4
-rw-r--r--drivers/video/fbdev/xen-fbfront.c2
-rw-r--r--drivers/virt/fsl_hypervisor.c31
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c31
-rw-r--r--drivers/virtio/virtio_ring.c28
-rw-r--r--drivers/w1/masters/ds2482.c18
-rw-r--r--drivers/w1/masters/ds2490.c6
-rw-r--r--drivers/w1/slaves/w1_ds2408.c76
-rw-r--r--drivers/w1/w1_io.c3
-rw-r--r--drivers/watchdog/Kconfig161
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/acquirewdt.c2
-rw-r--r--drivers/watchdog/advantechwdt.c2
-rw-r--r--drivers/watchdog/alim1535_wdt.c2
-rw-r--r--drivers/watchdog/alim7101_wdt.c4
-rw-r--r--drivers/watchdog/ar7_wdt.c2
-rw-r--r--drivers/watchdog/armada_37xx_wdt.c43
-rw-r--r--drivers/watchdog/asm9260_wdt.c77
-rw-r--r--drivers/watchdog/aspeed_wdt.c25
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c2
-rw-r--r--drivers/watchdog/at91sam9_wdt.c4
-rw-r--r--drivers/watchdog/ath79_wdt.c6
-rw-r--r--drivers/watchdog/atlas7_wdt.c65
-rw-r--r--drivers/watchdog/bcm2835_wdt.c1
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c2
-rw-r--r--drivers/watchdog/bcm7038_wdt.c42
-rw-r--r--drivers/watchdog/bcm_kona_wdt.c18
-rw-r--r--drivers/watchdog/bd70528_wdt.c290
-rw-r--r--drivers/watchdog/cadence_wdt.c90
-rw-r--r--drivers/watchdog/coh901327_wdt.c28
-rw-r--r--drivers/watchdog/cpu5wdt.c2
-rw-r--r--drivers/watchdog/cpwd.c2
-rw-r--r--drivers/watchdog/da9052_wdt.c13
-rw-r--r--drivers/watchdog/da9055_wdt.c12
-rw-r--r--drivers/watchdog/da9062_wdt.c20
-rw-r--r--drivers/watchdog/da9063_wdt.c21
-rw-r--r--drivers/watchdog/davinci_wdt.c45
-rw-r--r--drivers/watchdog/digicolor_wdt.c4
-rw-r--r--drivers/watchdog/dw_wdt.c4
-rw-r--r--drivers/watchdog/ebc-c384_wdt.c5
-rw-r--r--drivers/watchdog/ep93xx_wdt.c17
-rw-r--r--drivers/watchdog/eurotechwdt.c2
-rw-r--r--drivers/watchdog/f71808e_wdt.c20
-rw-r--r--drivers/watchdog/ftwdt010_wdt.c6
-rw-r--r--drivers/watchdog/gef_wdt.c2
-rw-r--r--drivers/watchdog/geodewdt.c2
-rw-r--r--drivers/watchdog/gpio_wdt.c16
-rw-r--r--drivers/watchdog/hpwdt.c3
-rw-r--r--drivers/watchdog/i6300esb.c9
-rw-r--r--drivers/watchdog/iTCO_wdt.c13
-rw-r--r--drivers/watchdog/ib700wdt.c2
-rw-r--r--drivers/watchdog/ibmasr.c2
-rw-r--r--drivers/watchdog/imgpdc_wdt.c95
-rw-r--r--drivers/watchdog/imx2_wdt.c8
-rw-r--r--drivers/watchdog/imx_sc_wdt.c175
-rw-r--r--drivers/watchdog/indydog.c2
-rw-r--r--drivers/watchdog/intel-mid_wdt.c22
-rw-r--r--drivers/watchdog/intel_scu_watchdog.c22
-rw-r--r--drivers/watchdog/iop_wdt.c2
-rw-r--r--drivers/watchdog/it8712f_wdt.c2
-rw-r--r--drivers/watchdog/ixp4xx_wdt.c11
-rw-r--r--drivers/watchdog/jz4740_wdt.c17
-rw-r--r--drivers/watchdog/kempld_wdt.c28
-rw-r--r--drivers/watchdog/ks8695_wdt.c2
-rw-r--r--drivers/watchdog/lantiq_wdt.c4
-rw-r--r--drivers/watchdog/loongson1_wdt.c52
-rw-r--r--drivers/watchdog/lpc18xx_wdt.c47
-rw-r--r--drivers/watchdog/m54xx_wdt.c2
-rw-r--r--drivers/watchdog/machzwd.c5
-rw-r--r--drivers/watchdog/max63xx_wdt.c24
-rw-r--r--drivers/watchdog/max77620_wdt.c23
-rw-r--r--drivers/watchdog/mena21_wdt.c28
-rw-r--r--drivers/watchdog/menf21bmc_wdt.c33
-rw-r--r--drivers/watchdog/meson_gxbb_wdt.c49
-rw-r--r--drivers/watchdog/meson_wdt.c19
-rw-r--r--drivers/watchdog/mixcomwd.c2
-rw-r--r--drivers/watchdog/mlx_wdt.c14
-rw-r--r--drivers/watchdog/moxart_wdt.c20
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c20
-rw-r--r--drivers/watchdog/mt7621_wdt.c12
-rw-r--r--drivers/watchdog/mtk_wdt.c37
-rw-r--r--drivers/watchdog/mtx-1_wdt.c2
-rw-r--r--drivers/watchdog/mv64x60_wdt.c2
-rw-r--r--drivers/watchdog/ni903x_wdt.c4
-rw-r--r--drivers/watchdog/nic7018_wdt.c5
-rw-r--r--drivers/watchdog/npcm_wdt.c10
-rw-r--r--drivers/watchdog/nuc900_wdt.c6
-rw-r--r--drivers/watchdog/nv_tco.c2
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c62
-rw-r--r--drivers/watchdog/omap_wdt.c4
-rw-r--r--drivers/watchdog/orion_wdt.c14
-rw-r--r--drivers/watchdog/pc87413_wdt.c2
-rw-r--r--drivers/watchdog/pcwd.c4
-rw-r--r--drivers/watchdog/pcwd_pci.c4
-rw-r--r--drivers/watchdog/pcwd_usb.c4
-rw-r--r--drivers/watchdog/pic32-dmt.c50
-rw-r--r--drivers/watchdog/pic32-wdt.c62
-rw-r--r--drivers/watchdog/pika_wdt.c2
-rw-r--r--drivers/watchdog/pm8916_wdt.c21
-rw-r--r--drivers/watchdog/pnx4008_wdt.c45
-rw-r--r--drivers/watchdog/pnx833x_wdt.c2
-rw-r--r--drivers/watchdog/qcom-wdt.c55
-rw-r--r--drivers/watchdog/rc32434_wdt.c2
-rw-r--r--drivers/watchdog/rdc321x_wdt.c2
-rw-r--r--drivers/watchdog/renesas_wdt.c9
-rw-r--r--drivers/watchdog/riowd.c2
-rw-r--r--drivers/watchdog/rn5t618_wdt.c9
-rw-r--r--drivers/watchdog/rt2880_wdt.c32
-rw-r--r--drivers/watchdog/rtd119x_wdt.c47
-rw-r--r--drivers/watchdog/rza_wdt.c25
-rw-r--r--drivers/watchdog/s3c2410_wdt.c4
-rw-r--r--drivers/watchdog/sa1100_wdt.c2
-rw-r--r--drivers/watchdog/sama5d4_wdt.c39
-rw-r--r--drivers/watchdog/sb_wdog.c6
-rw-r--r--drivers/watchdog/sbc60xxwdt.c2
-rw-r--r--drivers/watchdog/sbc7240_wdt.c2
-rw-r--r--drivers/watchdog/sbc8360.c2
-rw-r--r--drivers/watchdog/sbc_epx_c3.c2
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c2
-rw-r--r--drivers/watchdog/sbsa_gwdt.c30
-rw-r--r--drivers/watchdog/sc1200wdt.c2
-rw-r--r--drivers/watchdog/sc520_wdt.c2
-rw-r--r--drivers/watchdog/sch311x_wdt.c2
-rw-r--r--drivers/watchdog/scx200_wdt.c2
-rw-r--r--drivers/watchdog/shwdt.c4
-rw-r--r--drivers/watchdog/sirfsoc_wdt.c28
-rw-r--r--drivers/watchdog/smsc37b787_wdt.c2
-rw-r--r--drivers/watchdog/sp5100_tco.c4
-rw-r--r--drivers/watchdog/sprd_wdt.c42
-rw-r--r--drivers/watchdog/st_lpc_wdt.c53
-rw-r--r--drivers/watchdog/stm32_iwdg.c150
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c16
-rw-r--r--drivers/watchdog/stpmic1_wdt.c13
-rw-r--r--drivers/watchdog/sunxi_wdt.c19
-rw-r--r--drivers/watchdog/tangox_wdt.c41
-rw-r--r--drivers/watchdog/tegra_wdt.c30
-rw-r--r--drivers/watchdog/tqmx86_wdt.c14
-rw-r--r--drivers/watchdog/ts4800_wdt.c33
-rw-r--r--drivers/watchdog/ts72xx_wdt.c18
-rw-r--r--drivers/watchdog/twl4030_wdt.c22
-rw-r--r--drivers/watchdog/txx9wdt.c4
-rw-r--r--drivers/watchdog/uniphier_wdt.c2
-rw-r--r--drivers/watchdog/ux500_wdt.c17
-rw-r--r--drivers/watchdog/w83877f_wdt.c2
-rw-r--r--drivers/watchdog/w83977f_wdt.c2
-rw-r--r--drivers/watchdog/wafer5823wdt.c2
-rw-r--r--drivers/watchdog/watchdog_core.c42
-rw-r--r--drivers/watchdog/watchdog_dev.c2
-rw-r--r--drivers/watchdog/wdat_wdt.c29
-rw-r--r--drivers/watchdog/wdrtas.c4
-rw-r--r--drivers/watchdog/wdt.c4
-rw-r--r--drivers/watchdog/wdt285.c2
-rw-r--r--drivers/watchdog/wdt977.c2
-rw-r--r--drivers/watchdog/wdt_pci.c4
-rw-r--r--drivers/watchdog/wm831x_wdt.c19
-rw-r--r--drivers/watchdog/xen_wdt.c18
-rw-r--r--drivers/watchdog/ziirave_wdt.c6
-rw-r--r--drivers/watchdog/zx2967_wdt.c37
-rw-r--r--drivers/xen/biomerge.c5
-rw-r--r--drivers/xen/events/events_base.c1
-rw-r--r--drivers/xen/evtchn.c2
-rw-r--r--drivers/xen/gntdev.c19
-rw-r--r--drivers/xen/privcmd-buf.c8
-rw-r--r--drivers/xen/swiotlb-xen.c196
-rw-r--r--drivers/xen/xen-pciback/xenbus.c2
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
-rw-r--r--fs/9p/v9fs_vfs.h2
-rw-r--r--fs/9p/vfs_inode.c10
-rw-r--r--fs/9p/vfs_super.c4
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/Makefile5
-rw-r--r--fs/adfs/dir_f.c6
-rw-r--r--fs/adfs/super.c10
-rw-r--r--fs/affs/super.c13
-rw-r--r--fs/afs/Makefile1
-rw-r--r--fs/afs/addr_list.c2
-rw-r--r--fs/afs/afs.h16
-rw-r--r--fs/afs/afs_fs.h2
-rw-r--r--fs/afs/callback.c29
-rw-r--r--fs/afs/cell.c187
-rw-r--r--fs/afs/cmservice.c22
-rw-r--r--fs/afs/dir.c528
-rw-r--r--fs/afs/dir_silly.c254
-rw-r--r--fs/afs/dynroot.c5
-rw-r--r--fs/afs/file.c31
-rw-r--r--fs/afs/flock.c658
-rw-r--r--fs/afs/fs_probe.c17
-rw-r--r--fs/afs/fsclient.c962
-rw-r--r--fs/afs/inode.c490
-rw-r--r--fs/afs/internal.h253
-rw-r--r--fs/afs/misc.c9
-rw-r--r--fs/afs/proc.c8
-rw-r--r--fs/afs/protocol_yfs.h6
-rw-r--r--fs/afs/rotate.c47
-rw-r--r--fs/afs/rxrpc.c54
-rw-r--r--fs/afs/security.c19
-rw-r--r--fs/afs/server.c17
-rw-r--r--fs/afs/super.c63
-rw-r--r--fs/afs/vl_list.c20
-rw-r--r--fs/afs/vl_probe.c18
-rw-r--r--fs/afs/vl_rotate.c28
-rw-r--r--fs/afs/vlclient.c82
-rw-r--r--fs/afs/write.c100
-rw-r--r--fs/afs/xattr.c322
-rw-r--r--fs/afs/yfsclient.c987
-rw-r--r--fs/autofs/autofs_i.h1
-rw-r--r--fs/autofs/inode.c2
-rw-r--r--fs/befs/linuxvfs.c12
-rw-r--r--fs/bfs/inode.c10
-rw-r--r--fs/binfmt_elf.c180
-rw-r--r--fs/block_dev.c25
-rw-r--r--fs/btrfs/acl.c6
-rw-r--r--fs/btrfs/backref.c38
-rw-r--r--fs/btrfs/btrfs_inode.h8
-rw-r--r--fs/btrfs/compression.c5
-rw-r--r--fs/btrfs/ctree.c254
-rw-r--r--fs/btrfs/ctree.h79
-rw-r--r--fs/btrfs/delayed-inode.c5
-rw-r--r--fs/btrfs/delayed-ref.c46
-rw-r--r--fs/btrfs/delayed-ref.h122
-rw-r--r--fs/btrfs/dev-replace.c8
-rw-r--r--fs/btrfs/dev-replace.h3
-rw-r--r--fs/btrfs/dir-item.c5
-rw-r--r--fs/btrfs/disk-io.c229
-rw-r--r--fs/btrfs/disk-io.h7
-rw-r--r--fs/btrfs/extent-tree.c651
-rw-r--r--fs/btrfs/extent_io.c366
-rw-r--r--fs/btrfs/extent_io.h89
-rw-r--r--fs/btrfs/extent_map.c38
-rw-r--r--fs/btrfs/file-item.c32
-rw-r--r--fs/btrfs/file.c47
-rw-r--r--fs/btrfs/free-space-cache.c45
-rw-r--r--fs/btrfs/free-space-cache.h18
-rw-r--r--fs/btrfs/free-space-tree.c24
-rw-r--r--fs/btrfs/free-space-tree.h1
-rw-r--r--fs/btrfs/inode-item.c8
-rw-r--r--fs/btrfs/inode.c346
-rw-r--r--fs/btrfs/ioctl.c181
-rw-r--r--fs/btrfs/locking.c157
-rw-r--r--fs/btrfs/ordered-data.c14
-rw-r--r--fs/btrfs/ordered-data.h3
-rw-r--r--fs/btrfs/print-tree.c2
-rw-r--r--fs/btrfs/props.c242
-rw-r--r--fs/btrfs/props.h7
-rw-r--r--fs/btrfs/qgroup.c5
-rw-r--r--fs/btrfs/raid56.c3
-rw-r--r--fs/btrfs/ref-verify.c69
-rw-r--r--fs/btrfs/ref-verify.h10
-rw-r--r--fs/btrfs/relocation.c123
-rw-r--r--fs/btrfs/root-tree.c13
-rw-r--r--fs/btrfs/scrub.c6
-rw-r--r--fs/btrfs/send.c114
-rw-r--r--fs/btrfs/super.c5
-rw-r--r--fs/btrfs/tests/btrfs-tests.c17
-rw-r--r--fs/btrfs/tests/btrfs-tests.h17
-rw-r--r--fs/btrfs/tests/extent-buffer-tests.c8
-rw-r--r--fs/btrfs/tests/extent-io-tests.c35
-rw-r--r--fs/btrfs/tests/extent-map-tests.c213
-rw-r--r--fs/btrfs/tests/free-space-tests.c11
-rw-r--r--fs/btrfs/tests/free-space-tree-tests.c18
-rw-r--r--fs/btrfs/tests/inode-tests.c34
-rw-r--r--fs/btrfs/tests/qgroup-tests.c20
-rw-r--r--fs/btrfs/transaction.c64
-rw-r--r--fs/btrfs/transaction.h4
-rw-r--r--fs/btrfs/tree-checker.c513
-rw-r--r--fs/btrfs/tree-checker.h11
-rw-r--r--fs/btrfs/tree-log.c289
-rw-r--r--fs/btrfs/tree-log.h10
-rw-r--r--fs/btrfs/uuid-tree.c6
-rw-r--r--fs/btrfs/volumes.c469
-rw-r--r--fs/btrfs/volumes.h40
-rw-r--r--fs/btrfs/xattr.c65
-rw-r--r--fs/btrfs/xattr.h7
-rw-r--r--fs/btrfs/zstd.c11
-rw-r--r--fs/buffer.c8
-rw-r--r--fs/cachefiles/namei.c1
-rw-r--r--fs/ceph/caps.c93
-rw-r--r--fs/ceph/debugfs.c40
-rw-r--r--fs/ceph/export.c356
-rw-r--r--fs/ceph/file.c3
-rw-r--r--fs/ceph/inode.c90
-rw-r--r--fs/ceph/locks.c13
-rw-r--r--fs/ceph/mds_client.c205
-rw-r--r--fs/ceph/mds_client.h33
-rw-r--r--fs/ceph/mdsmap.c2
-rw-r--r--fs/ceph/quota.c177
-rw-r--r--fs/ceph/super.c8
-rw-r--r--fs/ceph/super.h3
-rw-r--r--fs/char_dev.c78
-rw-r--r--fs/cifs/cifs_debug.c35
-rw-r--r--fs/cifs/cifsfs.c29
-rw-r--r--fs/cifs/cifsfs.h4
-rw-r--r--fs/cifs/cifsglob.h29
-rw-r--r--fs/cifs/cifsproto.h9
-rw-r--r--fs/cifs/cifssmb.c98
-rw-r--r--fs/cifs/connect.c38
-rw-r--r--fs/cifs/dfs_cache.c140
-rw-r--r--fs/cifs/dfs_cache.h5
-rw-r--r--fs/cifs/dns_resolve.c2
-rw-r--r--fs/cifs/file.c5
-rw-r--r--fs/cifs/inode.c37
-rw-r--r--fs/cifs/link.c13
-rw-r--r--fs/cifs/misc.c1
-rw-r--r--fs/cifs/smb1ops.c9
-rw-r--r--fs/cifs/smb2ops.c443
-rw-r--r--fs/cifs/smb2pdu.c93
-rw-r--r--fs/cifs/smb2pdu.h71
-rw-r--r--fs/cifs/smb2status.h3480
-rw-r--r--fs/cifs/smbdirect.c298
-rw-r--r--fs/cifs/smbdirect.h19
-rw-r--r--fs/cifs/smbfsctl.h29
-rw-r--r--fs/cifs/transport.c50
-rw-r--r--fs/coda/inode.c10
-rw-r--r--fs/coda/psdev.c1
-rw-r--r--fs/configfs/dir.c19
-rw-r--r--fs/crypto/bio.c11
-rw-r--r--fs/crypto/crypto.c74
-rw-r--r--fs/crypto/fname.c5
-rw-r--r--fs/crypto/hooks.c68
-rw-r--r--fs/crypto/keyinfo.c26
-rw-r--r--fs/crypto/policy.c6
-rw-r--r--fs/dax.c8
-rw-r--r--fs/dcache.c44
-rw-r--r--fs/debugfs/file.c77
-rw-r--r--fs/debugfs/inode.c12
-rw-r--r--fs/direct-io.c3
-rw-r--r--fs/dlm/netlink.c1
-rw-r--r--fs/ecryptfs/crypto.c1
-rw-r--r--fs/ecryptfs/keystore.c1
-rw-r--r--fs/ecryptfs/super.c5
-rw-r--r--fs/efs/super.c10
-rw-r--r--fs/eventfd.c8
-rw-r--r--fs/exec.c4
-rw-r--r--fs/ext2/inode.c4
-rw-r--r--fs/ext2/super.c10
-rw-r--r--fs/ext4/block_validity.c55
-rw-r--r--fs/ext4/dir.c48
-rw-r--r--fs/ext4/ext4.h108
-rw-r--r--fs/ext4/extents.c17
-rw-r--r--fs/ext4/extents_status.c4
-rw-r--r--fs/ext4/file.c7
-rw-r--r--fs/ext4/fsmap.c2
-rw-r--r--fs/ext4/hash.c34
-rw-r--r--fs/ext4/ialloc.c2
-rw-r--r--fs/ext4/inline.c2
-rw-r--r--fs/ext4/inode.c12
-rw-r--r--fs/ext4/ioctl.c22
-rw-r--r--fs/ext4/mballoc.c4
-rw-r--r--fs/ext4/namei.c188
-rw-r--r--fs/ext4/page-io.c3
-rw-r--r--fs/ext4/readpage.c8
-rw-r--r--fs/ext4/resize.c3
-rw-r--r--fs/ext4/super.c193
-rw-r--r--fs/ext4/sysfs.c6
-rw-r--r--fs/ext4/xattr.c2
-rw-r--r--fs/f2fs/acl.c4
-rw-r--r--fs/f2fs/checkpoint.c108
-rw-r--r--fs/f2fs/data.c294
-rw-r--r--fs/f2fs/f2fs.h130
-rw-r--r--fs/f2fs/file.c76
-rw-r--r--fs/f2fs/gc.c16
-rw-r--r--fs/f2fs/inline.c17
-rw-r--r--fs/f2fs/inode.c12
-rw-r--r--fs/f2fs/namei.c19
-rw-r--r--fs/f2fs/node.c45
-rw-r--r--fs/f2fs/recovery.c37
-rw-r--r--fs/f2fs/segment.c71
-rw-r--r--fs/f2fs/segment.h16
-rw-r--r--fs/f2fs/super.c81
-rw-r--r--fs/f2fs/xattr.c36
-rw-r--r--fs/f2fs/xattr.h2
-rw-r--r--fs/fat/file.c11
-rw-r--r--fs/fat/inode.c10
-rw-r--r--fs/fcntl.c2
-rw-r--r--fs/file_table.c9
-rw-r--r--fs/freevxfs/vxfs_super.c11
-rw-r--r--fs/fs-writeback.c11
-rw-r--r--fs/fs_context.c160
-rw-r--r--fs/fsopen.c477
-rw-r--r--fs/fuse/control.c20
-rw-r--r--fs/fuse/cuse.c13
-rw-r--r--fs/fuse/dev.c16
-rw-r--r--fs/fuse/file.c22
-rw-r--r--fs/fuse/fuse_i.h7
-rw-r--r--fs/fuse/inode.c47
-rw-r--r--fs/gfs2/Kconfig1
-rw-r--r--fs/gfs2/aops.c14
-rw-r--r--fs/gfs2/bmap.c135
-rw-r--r--fs/gfs2/bmap.h1
-rw-r--r--fs/gfs2/dir.c2
-rw-r--r--fs/gfs2/glock.c25
-rw-r--r--fs/gfs2/glops.c3
-rw-r--r--fs/gfs2/incore.h9
-rw-r--r--fs/gfs2/log.c47
-rw-r--r--fs/gfs2/log.h5
-rw-r--r--fs/gfs2/lops.c263
-rw-r--r--fs/gfs2/lops.h11
-rw-r--r--fs/gfs2/main.c1
-rw-r--r--fs/gfs2/meta_io.c3
-rw-r--r--fs/gfs2/ops_fstype.c7
-rw-r--r--fs/gfs2/recovery.c135
-rw-r--r--fs/gfs2/recovery.h4
-rw-r--r--fs/gfs2/rgrp.c56
-rw-r--r--fs/gfs2/super.c32
-rw-r--r--fs/gfs2/sys.c8
-rw-r--r--fs/gfs2/trans.c4
-rw-r--r--fs/gfs2/trans.h2
-rw-r--r--fs/gfs2/xattr.c6
-rw-r--r--fs/hfs/super.c10
-rw-r--r--fs/hfsplus/super.c13
-rw-r--r--fs/hostfs/hostfs.h2
-rw-r--r--fs/hostfs/hostfs_kern.c10
-rw-r--r--fs/hpfs/super.c10
-rw-r--r--fs/hugetlbfs/inode.c23
-rw-r--r--fs/inode.c69
-rw-r--r--fs/internal.h9
-rw-r--r--fs/io_uring.c514
-rw-r--r--fs/iomap.c109
-rw-r--r--fs/isofs/inode.c10
-rw-r--r--fs/jbd2/checkpoint.c1
-rw-r--r--fs/jbd2/journal.c53
-rw-r--r--fs/jbd2/revoke.c32
-rw-r--r--fs/jbd2/transaction.c8
-rw-r--r--fs/jffs2/fs.c1
-rw-r--r--fs/jffs2/super.c10
-rw-r--r--fs/jfs/acl.c3
-rw-r--r--fs/jfs/inode.c13
-rw-r--r--fs/jfs/jfs_incore.h6
-rw-r--r--fs/jfs/jfs_logmgr.c18
-rw-r--r--fs/jfs/jfs_logmgr.h10
-rw-r--r--fs/jfs/jfs_mount.c4
-rw-r--r--fs/jfs/jfs_superblock.h8
-rw-r--r--fs/jfs/jfs_txnmgr.c3
-rw-r--r--fs/jfs/namei.c2
-rw-r--r--fs/jfs/super.c32
-rw-r--r--fs/kernfs/dir.c29
-rw-r--r--fs/kernfs/file.c6
-rw-r--r--fs/kernfs/inode.c162
-rw-r--r--fs/kernfs/kernfs-internal.h8
-rw-r--r--fs/kernfs/symlink.c4
-rw-r--r--fs/libfs.c16
-rw-r--r--fs/lockd/clntlock.c6
-rw-r--r--fs/lockd/clntproc.c4
-rw-r--r--fs/lockd/host.c10
-rw-r--r--fs/lockd/mon.c1
-rw-r--r--fs/lockd/svc.c33
-rw-r--r--fs/locks.c14
-rw-r--r--fs/minix/inode.c10
-rw-r--r--fs/mpage.c3
-rw-r--r--fs/namei.c8
-rw-r--r--fs/namespace.c477
-rw-r--r--fs/nfs/Kconfig1
-rw-r--r--fs/nfs/callback.c9
-rw-r--r--fs/nfs/callback_xdr.c2
-rw-r--r--fs/nfs/client.c17
-rw-r--r--fs/nfs/delegation.c12
-rw-r--r--fs/nfs/delegation.h1
-rw-r--r--fs/nfs/dir.c7
-rw-r--r--fs/nfs/direct.c11
-rw-r--r--fs/nfs/dns_resolve.c2
-rw-r--r--fs/nfs/file.c31
-rw-r--r--fs/nfs/filelayout/filelayout.c6
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c14
-rw-r--r--fs/nfs/inode.c23
-rw-r--r--fs/nfs/internal.h10
-rw-r--r--fs/nfs/mount_clnt.c2
-rw-r--r--fs/nfs/nfs2xdr.c58
-rw-r--r--fs/nfs/nfs3client.c1
-rw-r--r--fs/nfs/nfs3xdr.c142
-rw-r--r--fs/nfs/nfs4_fs.h1
-rw-r--r--fs/nfs/nfs4client.c6
-rw-r--r--fs/nfs/nfs4file.c4
-rw-r--r--fs/nfs/nfs4idmap.c27
-rw-r--r--fs/nfs/nfs4proc.c159
-rw-r--r--fs/nfs/nfs4state.c7
-rw-r--r--fs/nfs/nfs4super.c2
-rw-r--r--fs/nfs/pagelist.c123
-rw-r--r--fs/nfs/pnfs.c4
-rw-r--r--fs/nfs/pnfs.h4
-rw-r--r--fs/nfs/read.c6
-rw-r--r--fs/nfs/super.c34
-rw-r--r--fs/nfs/symlink.c7
-rw-r--r--fs/nfs/write.c70
-rw-r--r--fs/nfsd/export.c18
-rw-r--r--fs/nfsd/netns.h11
-rw-r--r--fs/nfsd/nfs3xdr.c21
-rw-r--r--fs/nfsd/nfs4callback.c14
-rw-r--r--fs/nfsd/nfs4idmap.c8
-rw-r--r--fs/nfsd/nfs4layouts.c2
-rw-r--r--fs/nfsd/nfs4proc.c4
-rw-r--r--fs/nfsd/nfs4recover.c437
-rw-r--r--fs/nfsd/nfs4state.c69
-rw-r--r--fs/nfsd/nfs4xdr.c9
-rw-r--r--fs/nfsd/nfsctl.c42
-rw-r--r--fs/nfsd/nfsd.h17
-rw-r--r--fs/nfsd/nfssvc.c271
-rw-r--r--fs/nfsd/nfsxdr.c17
-rw-r--r--fs/nfsd/state.h8
-rw-r--r--fs/nfsd/vfs.c8
-rw-r--r--fs/nfsd/vfs.h5
-rw-r--r--fs/nilfs2/nilfs.h2
-rw-r--r--fs/nilfs2/super.c11
-rw-r--r--fs/notify/dnotify/dnotify.c2
-rw-r--r--fs/notify/fanotify/Kconfig1
-rw-r--r--fs/notify/fanotify/fanotify.c16
-rw-r--r--fs/notify/fsnotify.c49
-rw-r--r--fs/notify/inotify/Kconfig1
-rw-r--r--fs/notify/inotify/inotify.h2
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c6
-rw-r--r--fs/notify/mark.c17
-rw-r--r--fs/nsfs.c26
-rw-r--r--fs/ntfs/inode.c17
-rw-r--r--fs/ntfs/inode.h2
-rw-r--r--fs/ntfs/super.c2
-rw-r--r--fs/ocfs2/cluster/quorum.c1
-rw-r--r--fs/ocfs2/dir.c20
-rw-r--r--fs/ocfs2/dlm/Makefile3
-rw-r--r--fs/ocfs2/dlmfs/Makefile2
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c10
-rw-r--r--fs/ocfs2/export.c30
-rw-r--r--fs/ocfs2/ocfs2_fs.h28
-rw-r--r--fs/ocfs2/super.c22
-rw-r--r--fs/open.c5
-rw-r--r--fs/openpromfs/inode.c10
-rw-r--r--fs/orangefs/acl.c4
-rw-r--r--fs/orangefs/file.c389
-rw-r--r--fs/orangefs/inode.c914
-rw-r--r--fs/orangefs/namei.c40
-rw-r--r--fs/orangefs/orangefs-bufmap.c15
-rw-r--r--fs/orangefs/orangefs-bufmap.h2
-rw-r--r--fs/orangefs/orangefs-debugfs.c4
-rw-r--r--fs/orangefs/orangefs-kernel.h56
-rw-r--r--fs/orangefs/orangefs-mod.c1
-rw-r--r--fs/orangefs/orangefs-sysfs.c22
-rw-r--r--fs/orangefs/orangefs-utils.c179
-rw-r--r--fs/orangefs/super.c43
-rw-r--r--fs/orangefs/waitqueue.c18
-rw-r--r--fs/orangefs/xattr.c106
-rw-r--r--fs/overlayfs/copy_up.c6
-rw-r--r--fs/overlayfs/dir.c2
-rw-r--r--fs/overlayfs/export.c2
-rw-r--r--fs/overlayfs/file.c133
-rw-r--r--fs/overlayfs/inode.c3
-rw-r--r--fs/overlayfs/overlayfs.h2
-rw-r--r--fs/overlayfs/super.c13
-rw-r--r--fs/proc/base.c24
-rw-r--r--fs/proc/inode.c10
-rw-r--r--fs/proc/proc_sysctl.c25
-rw-r--r--fs/proc/task_mmu.c3
-rw-r--r--fs/pstore/inode.c2
-rw-r--r--fs/qnx4/inode.c12
-rw-r--r--fs/qnx6/inode.c12
-rw-r--r--fs/quota/dquot.c37
-rw-r--r--fs/quota/quota_v1.c2
-rw-r--r--fs/quota/quota_v2.c2
-rw-r--r--fs/read_write.c113
-rw-r--r--fs/reiserfs/journal.c2
-rw-r--r--fs/reiserfs/super.c10
-rw-r--r--fs/reiserfs/xattr.c9
-rw-r--r--fs/romfs/super.c11
-rw-r--r--fs/seq_file.c1
-rw-r--r--fs/signalfd.c1
-rw-r--r--fs/squashfs/super.c11
-rw-r--r--fs/stack.c15
-rw-r--r--fs/super.c5
-rw-r--r--fs/sync.c146
-rw-r--r--fs/sysv/inode.c10
-rw-r--r--fs/sysv/namei.c15
-rw-r--r--fs/sysv/super.c3
-rw-r--r--fs/sysv/sysv.h3
-rw-r--r--fs/ubifs/auth.c39
-rw-r--r--fs/ubifs/debug.c1
-rw-r--r--fs/ubifs/dir.c37
-rw-r--r--fs/ubifs/file.c16
-rw-r--r--fs/ubifs/find.c9
-rw-r--r--fs/ubifs/ioctl.c11
-rw-r--r--fs/ubifs/journal.c72
-rw-r--r--fs/ubifs/misc.h8
-rw-r--r--fs/ubifs/orphan.c208
-rw-r--r--fs/ubifs/replay.c2
-rw-r--r--fs/ubifs/sb.c7
-rw-r--r--fs/ubifs/super.c35
-rw-r--r--fs/ubifs/tnc.c15
-rw-r--r--fs/ubifs/ubifs.h10
-rw-r--r--fs/ubifs/xattr.c71
-rw-r--r--fs/udf/namei.c15
-rw-r--r--fs/udf/super.c15
-rw-r--r--fs/ufs/super.c10
-rw-r--r--fs/ufs/util.h6
-rw-r--r--fs/unicode/.gitignore2
-rw-r--r--fs/unicode/Kconfig13
-rw-r--r--fs/unicode/Makefile38
-rw-r--r--fs/unicode/README.utf8data57
-rw-r--r--fs/unicode/mkutf8data.c3419
-rw-r--r--fs/unicode/utf8-core.c187
-rw-r--r--fs/unicode/utf8-norm.c801
-rw-r--r--fs/unicode/utf8-selftest.c320
-rw-r--r--fs/unicode/utf8data.h_shipped4109
-rw-r--r--fs/unicode/utf8n.h117
-rw-r--r--fs/userfaultfd.c5
-rw-r--r--fs/xfs/Kconfig1
-rw-r--r--fs/xfs/Makefile7
-rw-r--r--fs/xfs/libxfs/xfs_ag.c54
-rw-r--r--fs/xfs/libxfs/xfs_ag.h2
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c13
-rw-r--r--fs/xfs/libxfs/xfs_attr.c35
-rw-r--r--fs/xfs/libxfs/xfs_attr.h2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c17
-rw-r--r--fs/xfs/libxfs/xfs_defer.c14
-rw-r--r--fs/xfs/libxfs/xfs_dquot_buf.c4
-rw-r--r--fs/xfs/libxfs/xfs_fs.h139
-rw-r--r--fs/xfs/libxfs/xfs_health.h190
-rw-r--r--fs/xfs/libxfs/xfs_quota_defs.h2
-rw-r--r--fs/xfs/libxfs/xfs_sb.c18
-rw-r--r--fs/xfs/libxfs/xfs_sb.h2
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c6
-rw-r--r--fs/xfs/libxfs/xfs_types.c2
-rw-r--r--fs/xfs/libxfs/xfs_types.h2
-rw-r--r--fs/xfs/scrub/agheader.c20
-rw-r--r--fs/xfs/scrub/common.c47
-rw-r--r--fs/xfs/scrub/common.h4
-rw-r--r--fs/xfs/scrub/fscounters.c366
-rw-r--r--fs/xfs/scrub/health.c237
-rw-r--r--fs/xfs/scrub/health.h14
-rw-r--r--fs/xfs/scrub/ialloc.c4
-rw-r--r--fs/xfs/scrub/parent.c2
-rw-r--r--fs/xfs/scrub/quota.c2
-rw-r--r--fs/xfs/scrub/repair.c34
-rw-r--r--fs/xfs/scrub/repair.h5
-rw-r--r--fs/xfs/scrub/scrub.c49
-rw-r--r--fs/xfs/scrub/scrub.h27
-rw-r--r--fs/xfs/scrub/trace.h63
-rw-r--r--fs/xfs/xfs_aops.c138
-rw-r--r--fs/xfs/xfs_aops.h1
-rw-r--r--fs/xfs/xfs_bmap_util.c2
-rw-r--r--fs/xfs/xfs_buf_item.c4
-rw-r--r--fs/xfs/xfs_discard.c3
-rw-r--r--fs/xfs/xfs_dquot.c17
-rw-r--r--fs/xfs/xfs_file.c6
-rw-r--r--fs/xfs/xfs_fsops.c3
-rw-r--r--fs/xfs/xfs_fsops.h2
-rw-r--r--fs/xfs/xfs_health.c392
-rw-r--r--fs/xfs/xfs_icache.c11
-rw-r--r--fs/xfs/xfs_icache.h4
-rw-r--r--fs/xfs/xfs_inode.c31
-rw-r--r--fs/xfs/xfs_inode.h17
-rw-r--r--fs/xfs/xfs_ioctl.c62
-rw-r--r--fs/xfs/xfs_ioctl32.c9
-rw-r--r--fs/xfs/xfs_itable.c2
-rw-r--r--fs/xfs/xfs_log.c3
-rw-r--r--fs/xfs/xfs_log_cil.c21
-rw-r--r--fs/xfs/xfs_log_recover.c10
-rw-r--r--fs/xfs/xfs_mount.c35
-rw-r--r--fs/xfs/xfs_mount.h32
-rw-r--r--fs/xfs/xfs_qm.c3
-rw-r--r--fs/xfs/xfs_qm.h8
-rw-r--r--fs/xfs/xfs_quota.h37
-rw-r--r--fs/xfs/xfs_super.c51
-rw-r--r--fs/xfs/xfs_trace.h76
-rw-r--r--fs/xfs/xfs_trans_dquot.c52
-rw-r--r--include/acpi/acpi_bus.h10
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actbl.h4
-rw-r--r--include/acpi/actbl1.h2
-rw-r--r--include/acpi/actypes.h12
-rw-r--r--include/acpi/platform/aclinux.h10
-rw-r--r--include/asm-generic/futex.h8
-rw-r--r--include/asm-generic/hugetlb.h7
-rw-r--r--include/asm-generic/io.h7
-rw-r--r--include/asm-generic/mm_hooks.h1
-rw-r--r--include/asm-generic/mmiowb.h63
-rw-r--r--include/asm-generic/mmiowb_types.h12
-rw-r--r--include/asm-generic/pgtable.h2
-rw-r--r--include/asm-generic/rwsem.h140
-rw-r--r--include/asm-generic/sections.h14
-rw-r--r--include/asm-generic/segment.h9
-rw-r--r--include/asm-generic/shmparam.h2
-rw-r--r--include/asm-generic/sizes.h2
-rw-r--r--include/asm-generic/syscall.h5
-rw-r--r--include/asm-generic/tlb.h297
-rw-r--r--include/asm-generic/uaccess.h58
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/crypto/aes.h8
-rw-r--r--include/crypto/akcipher.h54
-rw-r--r--include/crypto/cryptd.h18
-rw-r--r--include/crypto/des.h43
-rw-r--r--include/crypto/hash.h10
-rw-r--r--include/crypto/internal/simd.h44
-rw-r--r--include/crypto/morus1280_glue.h79
-rw-r--r--include/crypto/morus640_glue.h79
-rw-r--r--include/crypto/public_key.h4
-rw-r--r--include/crypto/streebog.h5
-rw-r--r--include/drm/drm_atomic.h6
-rw-r--r--include/drm/drm_audio_component.h7
-rw-r--r--include/drm/drm_auth.h6
-rw-r--r--include/drm/drm_bridge.h11
-rw-r--r--include/drm/drm_cache.h2
-rw-r--r--include/drm/drm_client.h2
-rw-r--r--include/drm/drm_connector.h136
-rw-r--r--include/drm/drm_crtc.h4
-rw-r--r--include/drm/drm_device.h3
-rw-r--r--include/drm/drm_drv.h16
-rw-r--r--include/drm/drm_dsc.h9
-rw-r--r--include/drm/drm_edid.h6
-rw-r--r--include/drm/drm_fb_helper.h48
-rw-r--r--include/drm/drm_file.h2
-rw-r--r--include/drm/drm_format_helper.h35
-rw-r--r--include/drm/drm_framebuffer.h1
-rw-r--r--include/drm/drm_gem.h32
-rw-r--r--include/drm/drm_gem_shmem_helper.h159
-rw-r--r--include/drm/drm_hdcp.h7
-rw-r--r--include/drm/drm_legacy.h2
-rw-r--r--include/drm/drm_modes.h17
-rw-r--r--include/drm/drm_modeset_helper_vtables.h7
-rw-r--r--include/drm/drm_print.h2
-rw-r--r--include/drm/drm_syncobj.h5
-rw-r--r--include/drm/drm_utils.h4
-rw-r--r--include/drm/drm_vma_manager.h12
-rw-r--r--include/drm/drm_writeback.h30
-rw-r--r--include/drm/i915_pciids.h217
-rw-r--r--include/drm/tinydrm/mipi-dbi.h32
-rw-r--r--include/drm/tinydrm/tinydrm-helpers.h21
-rw-r--r--include/drm/tinydrm/tinydrm.h75
-rw-r--r--include/drm/ttm/ttm_bo_driver.h2
-rw-r--r--include/dt-bindings/clock/axg-audio-clkc.h30
-rw-r--r--include/dt-bindings/clock/exynos5410.h3
-rw-r--r--include/dt-bindings/clock/g12a-aoclkc.h2
-rw-r--r--include/dt-bindings/clock/g12a-clkc.h5
-rw-r--r--include/dt-bindings/clock/imx7ulp-clock.h1
-rw-r--r--include/dt-bindings/clock/jz4725b-cgu.h1
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h6
-rw-r--r--include/dt-bindings/clock/mt8183-clk.h422
-rw-r--r--include/dt-bindings/clock/mt8516-clk.h211
-rw-r--r--include/dt-bindings/clock/qcom,gcc-qcs404.h5
-rw-r--r--include/dt-bindings/clock/qcom,turingcc-qcs404.h15
-rw-r--r--include/dt-bindings/clock/stm32fx-clock.h7
-rw-r--r--include/dt-bindings/clock/sun5i-ccu.h2
-rw-r--r--include/dt-bindings/clock/xlnx-zynqmp-clk.h (renamed from include/dt-bindings/clock/xlnx,zynqmp-clk.h)26
-rw-r--r--include/dt-bindings/firmware/imx/rsrc.h25
-rw-r--r--include/dt-bindings/iio/temperature/thermocouple.h16
-rw-r--r--include/dt-bindings/phy/phy-am654-serdes.h13
-rw-r--r--include/dt-bindings/pinctrl/am33xx.h130
-rw-r--r--include/dt-bindings/pinctrl/omap.h1
-rw-r--r--include/dt-bindings/pinctrl/stm32-pinfunc.h6
-rw-r--r--include/dt-bindings/power/r8a77965-sysc.h1
-rw-r--r--include/dt-bindings/thermal/tegra124-soctherm.h8
-rw-r--r--include/linux/acpi.h15
-rw-r--r--include/linux/acpi_iort.h8
-rw-r--r--include/linux/alcor_pci.h2
-rw-r--r--include/linux/amba/clcd.h31
-rw-r--r--include/linux/armada-37xx-rwtm-mailbox.h23
-rw-r--r--include/linux/audit.h75
-rw-r--r--include/linux/balloon_compaction.h15
-rw-r--r--include/linux/binfmts.h3
-rw-r--r--include/linux/bio.h20
-rw-r--r--include/linux/bitops.h16
-rw-r--r--include/linux/blk-mq-rdma.h1
-rw-r--r--include/linux/blk-mq.h2
-rw-r--r--include/linux/blk_types.h29
-rw-r--r--include/linux/blkdev.h42
-rw-r--r--include/linux/bpf-cgroup.h21
-rw-r--r--include/linux/bpf.h86
-rw-r--r--include/linux/bpf_types.h3
-rw-r--r--include/linux/bpf_verifier.h32
-rw-r--r--include/linux/bsg-lib.h16
-rw-r--r--include/linux/btf.h1
-rw-r--r--include/linux/bvec.h36
-rw-r--r--include/linux/ceph/ceph_fs.h6
-rw-r--r--include/linux/ceph/messenger.h3
-rw-r--r--include/linux/ceph/osdmap.h13
-rw-r--r--include/linux/cgroup-defs.h33
-rw-r--r--include/linux/cgroup.h43
-rw-r--r--include/linux/clk-provider.h113
-rw-r--r--include/linux/clk.h16
-rw-r--r--include/linux/clk/analogbits-wrpll-cln28hpc.h79
-rw-r--r--include/linux/clk/at91_pmc.h13
-rw-r--r--include/linux/clk/ti.h2
-rw-r--r--include/linux/compiler.h35
-rw-r--r--include/linux/compiler_types.h3
-rw-r--r--include/linux/console.h7
-rw-r--r--include/linux/coresight-pmu.h2
-rw-r--r--include/linux/coresight.h7
-rw-r--r--include/linux/counter.h510
-rw-r--r--include/linux/counter_enum.h45
-rw-r--r--include/linux/cper.h336
-rw-r--r--include/linux/cpu.h46
-rw-r--r--include/linux/cpufreq.h28
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/cpuidle.h1
-rw-r--r--include/linux/cpumask.h3
-rw-r--r--include/linux/cred.h2
-rw-r--r--include/linux/dcache.h7
-rw-r--r--include/linux/device-mapper.h3
-rw-r--r--include/linux/device.h18
-rw-r--r--include/linux/dma-fence-chain.h81
-rw-r--r--include/linux/dma-fence.h21
-rw-r--r--include/linux/dma-iommu.h24
-rw-r--r--include/linux/dma-mapping.h6
-rw-r--r--include/linux/dma-noncoherent.h6
-rw-r--r--include/linux/dma/idma64.h14
-rw-r--r--include/linux/dmi.h8
-rw-r--r--include/linux/dns_resolver.h3
-rw-r--r--include/linux/dsa/8021q.h76
-rw-r--r--include/linux/dsa/sja1105.h40
-rw-r--r--include/linux/dynamic_debug.h11
-rw-r--r--include/linux/etherdevice.h2
-rw-r--r--include/linux/f2fs_fs.h11
-rw-r--r--include/linux/filter.h34
-rw-r--r--include/linux/firmware/imx/sci.h5
-rw-r--r--include/linux/firmware/intel/stratix10-smc.h19
-rw-r--r--include/linux/firmware/trusted_foundations.h (renamed from arch/arm/include/asm/trusted_foundations.h)27
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h14
-rw-r--r--include/linux/fs.h20
-rw-r--r--include/linux/fs_context.h38
-rw-r--r--include/linux/fscrypt.h85
-rw-r--r--include/linux/fsl/ftm.h88
-rw-r--r--include/linux/fsnotify.h41
-rw-r--r--include/linux/fsnotify_backend.h10
-rw-r--r--include/linux/ftrace.h20
-rw-r--r--include/linux/genhd.h20
-rw-r--r--include/linux/genl_magic_func.h4
-rw-r--r--include/linux/gfp.h4
-rw-r--r--include/linux/gpio/driver.h18
-rw-r--r--include/linux/gpio/machine.h26
-rw-r--r--include/linux/hid.h4
-rw-r--r--include/linux/hmm.h310
-rw-r--r--include/linux/huge_mm.h6
-rw-r--r--include/linux/hugetlb.h4
-rw-r--r--include/linux/hwmon.h18
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/i2c-algo-bit.h1
-rw-r--r--include/linux/i2c.h44
-rw-r--r--include/linux/ieee80211.h14
-rw-r--r--include/linux/if_bridge.h3
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h1
-rw-r--r--include/linux/iio/consumer.h14
-rw-r--r--include/linux/iio/driver.h1
-rw-r--r--include/linux/iio/frequency/ad9523.h8
-rw-r--r--include/linux/iio/gyro/itg3200.h1
-rw-r--r--include/linux/iio/iio.h4
-rw-r--r--include/linux/iio/imu/adis.h14
-rw-r--r--include/linux/iio/timer/stm32-timer-trigger.h11
-rw-r--r--include/linux/ima.h2
-rw-r--r--include/linux/inetdevice.h14
-rw-r--r--include/linux/intel-iommu.h13
-rw-r--r--include/linux/intel-ish-client-if.h112
-rw-r--r--include/linux/interrupt.h25
-rw-r--r--include/linux/io-pgtable.h7
-rw-r--r--include/linux/iomap.h22
-rw-r--r--include/linux/iommu.h144
-rw-r--r--include/linux/iova.h16
-rw-r--r--include/linux/ipc_namespace.h1
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/irqchip/arm-gic-v3.h12
-rw-r--r--include/linux/irqchip/irq-ixp4xx.h12
-rw-r--r--include/linux/irqdomain.h1
-rw-r--r--include/linux/jbd2.h9
-rw-r--r--include/linux/jiffies.h1
-rw-r--r--include/linux/jump_label_ratelimit.h64
-rw-r--r--include/linux/kernel.h15
-rw-r--r--include/linux/kernfs.h15
-rw-r--r--include/linux/kobject.h3
-rw-r--r--include/linux/kthread.h3
-rw-r--r--include/linux/kvm_host.h48
-rw-r--r--include/linux/latencytop.h4
-rw-r--r--include/linux/lightnvm.h2
-rw-r--r--include/linux/list.h37
-rw-r--r--include/linux/list_bl.h26
-rw-r--r--include/linux/list_sort.h1
-rw-r--r--include/linux/livepatch.h3
-rw-r--r--include/linux/lockd/bind.h3
-rw-r--r--include/linux/lockd/lockd.h4
-rw-r--r--include/linux/lockdep.h11
-rw-r--r--include/linux/lsm_hooks.h189
-rw-r--r--include/linux/marvell_phy.h1
-rw-r--r--include/linux/math64.h13
-rw-r--r--include/linux/mdev.h35
-rw-r--r--include/linux/mdio.h3
-rw-r--r--include/linux/mei_cl_bus.h3
-rw-r--r--include/linux/memblock.h44
-rw-r--r--include/linux/memcontrol.h142
-rw-r--r--include/linux/memory.h2
-rw-r--r--include/linux/memory_hotplug.h42
-rw-r--r--include/linux/mfd/altera-sysmgr.h29
-rw-r--r--include/linux/mfd/cros_ec.h5
-rw-r--r--include/linux/mfd/cros_ec_commands.h91
-rw-r--r--include/linux/mfd/da9063/core.h7
-rw-r--r--include/linux/mfd/da9063/registers.h13
-rw-r--r--include/linux/mfd/max77620.h5
-rw-r--r--include/linux/mfd/max77650.h59
-rw-r--r--include/linux/mfd/palmas.h1
-rw-r--r--include/linux/mfd/stmfx.h123
-rw-r--r--include/linux/mfd/syscon/atmel-matrix.h6
-rw-r--r--include/linux/mfd/syscon/atmel-mc.h6
-rw-r--r--include/linux/mfd/syscon/atmel-smc.h5
-rw-r--r--include/linux/mfd/syscon/atmel-st.h6
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h9
-rw-r--r--include/linux/mfd/ti-lmu-register.h44
-rw-r--r--include/linux/mfd/ti-lmu.h1
-rw-r--r--include/linux/mfd/wm831x/regulator.h2
-rw-r--r--include/linux/mfd/wm8400-private.h8
-rw-r--r--include/linux/mlx5/cq.h2
-rw-r--r--include/linux/mlx5/device.h11
-rw-r--r--include/linux/mlx5/doorbell.h39
-rw-r--r--include/linux/mlx5/driver.h14
-rw-r--r--include/linux/mlx5/eswitch.h2
-rw-r--r--include/linux/mlx5/fs.h7
-rw-r--r--include/linux/mlx5/mlx5_ifc.h185
-rw-r--r--include/linux/mlx5/port.h1
-rw-r--r--include/linux/mlx5/qp.h1
-rw-r--r--include/linux/mlx5/transobj.h3
-rw-r--r--include/linux/mlx5/vport.h4
-rw-r--r--include/linux/mm.h135
-rw-r--r--include/linux/mm_inline.h2
-rw-r--r--include/linux/mm_types.h5
-rw-r--r--include/linux/mmc/sdio_func.h12
-rw-r--r--include/linux/mmc/sdio_ids.h2
-rw-r--r--include/linux/mmu_notifier.h63
-rw-r--r--include/linux/mmzone.h64
-rw-r--r--include/linux/module.h24
-rw-r--r--include/linux/moduleparam.h12
-rw-r--r--include/linux/mount.h2
-rw-r--r--include/linux/msi.h54
-rw-r--r--include/linux/mtd/bbm.h14
-rw-r--r--include/linux/mtd/nand.h32
-rw-r--r--include/linux/mtd/nand_bch.h6
-rw-r--r--include/linux/mtd/onenand.h3
-rw-r--r--include/linux/mtd/rawnand.h122
-rw-r--r--include/linux/mtd/spinand.h7
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/netdevice.h95
-rw-r--r--include/linux/netfilter.h18
-rw-r--r--include/linux/netfilter/ipset/ip_set.h11
-rw-r--r--include/linux/netfilter/nfnetlink_osf.h11
-rw-r--r--include/linux/netfilter/x_tables.h1
-rw-r--r--include/linux/netfilter_ipv6.h15
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/nfs_fs_sb.h13
-rw-r--r--include/linux/nfs_page.h12
-rw-r--r--include/linux/node.h71
-rw-r--r--include/linux/nvme-fc-driver.h6
-rw-r--r--include/linux/nvme-rdma.h2
-rw-r--r--include/linux/nvme.h4
-rw-r--r--include/linux/nvmem-consumer.h7
-rw-r--r--include/linux/of.h7
-rw-r--r--include/linux/oid_registry.h18
-rw-r--r--include/linux/overflow.h20
-rw-r--r--include/linux/packing.h49
-rw-r--r--include/linux/pagemap.h26
-rw-r--r--include/linux/pci-ecam.h1
-rw-r--r--include/linux/pci-epc.h2
-rw-r--r--include/linux/pci-epf.h3
-rw-r--r--include/linux/pci.h43
-rw-r--r--include/linux/pci_hotplug.h66
-rw-r--r--include/linux/percpu.h12
-rw-r--r--include/linux/perf_event.h21
-rw-r--r--include/linux/phy.h14
-rw-r--r--include/linux/phy/phy.h2
-rw-r--r--include/linux/pid.h2
-rw-r--r--include/linux/platform_data/ads7828.h2
-rw-r--r--include/linux/platform_data/ds620.h2
-rw-r--r--include/linux/platform_data/elm.h2
-rw-r--r--include/linux/platform_data/eth-ep93xx.h10
-rw-r--r--include/linux/platform_data/gpio-omap.h2
-rw-r--r--include/linux/platform_data/ina2xx.h2
-rw-r--r--include/linux/platform_data/keypad-ep93xx.h4
-rw-r--r--include/linux/platform_data/lm3630a_bl.h4
-rw-r--r--include/linux/platform_data/macb.h9
-rw-r--r--include/linux/platform_data/max197.h2
-rw-r--r--include/linux/platform_data/mv88e6xxx.h1
-rw-r--r--include/linux/platform_data/ntc_thermistor.h2
-rw-r--r--include/linux/platform_data/pca954x.h48
-rw-r--r--include/linux/platform_data/pm33xx.h5
-rw-r--r--include/linux/platform_data/ti-sysc.h9
-rw-r--r--include/linux/platform_data/timer-ixp4xx.h11
-rw-r--r--include/linux/platform_data/wilco-ec.h22
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h1
-rw-r--r--include/linux/platform_data/xilinx-ll-temac.h32
-rw-r--r--include/linux/platform_data/xtalk-bridge.h22
-rw-r--r--include/linux/plist.h4
-rw-r--r--include/linux/pm_domain.h26
-rw-r--r--include/linux/pm_opp.h8
-rw-r--r--include/linux/poll.h4
-rw-r--r--include/linux/power_supply.h11
-rw-r--r--include/linux/pps-gpio.h5
-rw-r--r--include/linux/printk.h6
-rw-r--r--include/linux/property.h18
-rw-r--r--include/linux/psi.h9
-rw-r--r--include/linux/psi_types.h105
-rw-r--r--include/linux/psp-sev.h3
-rw-r--r--include/linux/pwm.h5
-rw-r--r--include/linux/qcom-geni-se.h2
-rw-r--r--include/linux/qed/qed_if.h2
-rw-r--r--include/linux/random.h3
-rw-r--r--include/linux/rcupdate.h6
-rw-r--r--include/linux/rcuwait.h2
-rw-r--r--include/linux/reboot.h2
-rw-r--r--include/linux/regulator/consumer.h5
-rw-r--r--include/linux/reservation.h3
-rw-r--r--include/linux/reset.h115
-rw-r--r--include/linux/rhashtable-types.h2
-rw-r--r--include/linux/rhashtable.h358
-rw-r--r--include/linux/rtc.h6
-rw-r--r--include/linux/rtc/ds1685.h2
-rw-r--r--include/linux/rtc/rtc-omap.h7
-rw-r--r--include/linux/rwsem-spinlock.h47
-rw-r--r--include/linux/rwsem.h37
-rw-r--r--include/linux/scatterlist.h10
-rw-r--r--include/linux/sched.h7
-rw-r--r--include/linux/sched/jobctl.h2
-rw-r--r--include/linux/sched/signal.h51
-rw-r--r--include/linux/sched/task.h1
-rw-r--r--include/linux/sched/topology.h4
-rw-r--r--include/linux/sched/user.h7
-rw-r--r--include/linux/security.h16
-rw-r--r--include/linux/sed-opal.h10
-rw-r--r--include/linux/selection.h7
-rw-r--r--include/linux/serial_core.h2
-rw-r--r--include/linux/set_memory.h11
-rw-r--r--include/linux/siphash.h5
-rw-r--r--include/linux/skbuff.h48
-rw-r--r--include/linux/slab_def.h3
-rw-r--r--include/linux/smpboot.h2
-rw-r--r--include/linux/soc/cirrus/ep93xx.h37
-rw-r--r--include/linux/soc/ixp4xx/npe.h (renamed from arch/arm/mach-ixp4xx/include/mach/npe.h)2
-rw-r--r--include/linux/soc/ixp4xx/qmgr.h91
-rw-r--r--include/linux/soc/ti/ti_sci_inta_msi.h23
-rw-r--r--include/linux/soc/ti/ti_sci_protocol.h124
-rw-r--r--include/linux/soundwire/sdw.h16
-rw-r--r--include/linux/soundwire/sdw_intel.h6
-rw-r--r--include/linux/soundwire/sdw_registers.h5
-rw-r--r--include/linux/soundwire/sdw_type.h6
-rw-r--r--include/linux/spinlock.h11
-rw-r--r--include/linux/srcu.h36
-rw-r--r--include/linux/stackdepot.h8
-rw-r--r--include/linux/stacktrace.h81
-rw-r--r--include/linux/string.h4
-rw-r--r--include/linux/sunrpc/clnt.h4
-rw-r--r--include/linux/sunrpc/sched.h20
-rw-r--r--include/linux/sunrpc/svc.h33
-rw-r--r--include/linux/sunrpc/svc_xprt.h4
-rw-r--r--include/linux/sunrpc/svcsock.h3
-rw-r--r--include/linux/sunrpc/xprt.h6
-rw-r--r--include/linux/suspend.h3
-rw-r--r--include/linux/switchtec.h2
-rw-r--r--include/linux/syscalls.h9
-rw-r--r--include/linux/thermal.h19
-rw-r--r--include/linux/thunderbolt.h8
-rw-r--r--include/linux/ti-emif-sram.h3
-rw-r--r--include/linux/tick.h13
-rw-r--r--include/linux/time64.h21
-rw-r--r--include/linux/tracepoint-defs.h1
-rw-r--r--include/linux/tracepoint.h15
-rw-r--r--include/linux/types.h5
-rw-r--r--include/linux/uaccess.h2
-rw-r--r--include/linux/uio.h2
-rw-r--r--include/linux/unicode.h30
-rw-r--r--include/linux/uprobes.h5
-rw-r--r--include/linux/usb.h4
-rw-r--r--include/linux/usb/ch9.h8
-rw-r--r--include/linux/usb/hcd.h6
-rw-r--r--include/linux/usb/serial.h8
-rw-r--r--include/linux/usb/tcpm.h13
-rw-r--r--include/linux/usb/typec_dp.h5
-rw-r--r--include/linux/userfaultfd_k.h2
-rw-r--r--include/linux/virtio.h17
-rw-r--r--include/linux/vmalloc.h21
-rw-r--r--include/linux/vmstat.h2
-rw-r--r--include/linux/vmw_vmci_defs.h35
-rw-r--r--include/linux/wait.h2
-rw-r--r--include/linux/wait_bit.h13
-rw-r--r--include/media/cec-notifier.h19
-rw-r--r--include/media/davinci/vpbe.h2
-rw-r--r--include/media/dvb-usb-ids.h1
-rw-r--r--include/media/fwht-ctrls.h31
-rw-r--r--include/media/media-dev-allocator.h63
-rw-r--r--include/media/media-entity.h24
-rw-r--r--include/media/media-request.h4
-rw-r--r--include/media/rc-map.h4
-rw-r--r--include/media/v4l2-common.h33
-rw-r--r--include/media/v4l2-ctrls.h11
-rw-r--r--include/media/v4l2-subdev.h15
-rw-r--r--include/media/videobuf2-core.h25
-rw-r--r--include/media/vsp1.h19
-rw-r--r--include/misc/ocxl.h359
-rw-r--r--include/net/addrconf.h55
-rw-r--r--include/net/af_rxrpc.h3
-rw-r--r--include/net/arp.h8
-rw-r--r--include/net/bluetooth/hci.h1
-rw-r--r--include/net/bluetooth/hci_core.h3
-rw-r--r--include/net/bpf_sk_storage.h13
-rw-r--r--include/net/cfg80211.h104
-rw-r--r--include/net/compat.h3
-rw-r--r--include/net/devlink.h534
-rw-r--r--include/net/dsa.h208
-rw-r--r--include/net/dst.h11
-rw-r--r--include/net/fib_notifier.h3
-rw-r--r--include/net/flow_dissector.h7
-rw-r--r--include/net/flow_offload.h23
-rw-r--r--include/net/fq_impl.h18
-rw-r--r--include/net/genetlink.h34
-rw-r--r--include/net/geneve.h2
-rw-r--r--include/net/ife.h1
-rw-r--r--include/net/ip.h4
-rw-r--r--include/net/ip6_fib.h62
-rw-r--r--include/net/ip6_route.h20
-rw-r--r--include/net/ip_fib.h124
-rw-r--r--include/net/ip_vs.h5
-rw-r--r--include/net/ipv6_frag.h1
-rw-r--r--include/net/ipv6_stubs.h68
-rw-r--r--include/net/lwtunnel.h7
-rw-r--r--include/net/mac80211.h30
-rw-r--r--include/net/ndisc.h40
-rw-r--r--include/net/neighbour.h7
-rw-r--r--include/net/netfilter/ipv4/nf_nat_masquerade.h15
-rw-r--r--include/net/netfilter/ipv6/nf_nat_masquerade.h11
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h2
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h24
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h15
-rw-r--r--include/net/netfilter/nf_nat.h7
-rw-r--r--include/net/netfilter/nf_nat_masquerade.h19
-rw-r--r--include/net/netfilter/nf_queue.h3
-rw-r--r--include/net/netfilter/nf_tables.h24
-rw-r--r--include/net/netlink.h372
-rw-r--r--include/net/netns/conntrack.h6
-rw-r--r--include/net/netns/ipv4.h2
-rw-r--r--include/net/netns/ipv6.h5
-rw-r--r--include/net/pkt_cls.h33
-rw-r--r--include/net/psample.h1
-rw-r--r--include/net/request_sock.h10
-rw-r--r--include/net/route.h43
-rw-r--r--include/net/rtnh.h (renamed from include/net/nexthop.h)4
-rw-r--r--include/net/sch_generic.h100
-rw-r--r--include/net/sctp/command.h1
-rw-r--r--include/net/sctp/sctp.h2
-rw-r--r--include/net/sctp/ulpqueue.h2
-rw-r--r--include/net/sock.h27
-rw-r--r--include/net/tc_act/tc_ife.h3
-rw-r--r--include/net/tc_act/tc_police.h70
-rw-r--r--include/net/tcp.h4
-rw-r--r--include/net/tls.h36
-rw-r--r--include/net/udp.h6
-rw-r--r--include/net/udp_tunnel.h2
-rw-r--r--include/net/vxlan.h2
-rw-r--r--include/net/xfrm.h138
-rw-r--r--include/rdma/ib_cache.h4
-rw-r--r--include/rdma/ib_mad.h4
-rw-r--r--include/rdma/ib_smi.h2
-rw-r--r--include/rdma/ib_umem.h12
-rw-r--r--include/rdma/ib_umem_odp.h1
-rw-r--r--include/rdma/ib_verbs.h430
-rw-r--r--include/rdma/iw_cm.h25
-rw-r--r--include/rdma/opa_port_info.h2
-rw-r--r--include/rdma/opa_smi.h4
-rw-r--r--include/rdma/rdma_vt.h78
-rw-r--r--include/rdma/rdmavt_qp.h89
-rw-r--r--include/rdma/uverbs_std_types.h42
-rw-r--r--include/rdma/uverbs_types.h18
-rw-r--r--include/scsi/libsas.h13
-rw-r--r--include/scsi/osd_attributes.h398
-rw-r--r--include/scsi/osd_protocol.h676
-rw-r--r--include/scsi/osd_sec.h45
-rw-r--r--include/scsi/osd_sense.h263
-rw-r--r--include/scsi/osd_types.h45
-rw-r--r--include/scsi/scsi_host.h6
-rw-r--r--include/scsi/scsi_transport_fc.h18
-rw-r--r--include/soc/at91/atmel-sfr.h34
-rw-r--r--include/soc/at91/atmel_tcb.h (renamed from include/linux/atmel_tc.h)4
-rw-r--r--include/soc/rockchip/rk3399_grf.h21
-rw-r--r--include/soc/rockchip/rockchip_sip.h1
-rw-r--r--include/sound/core.h16
-rw-r--r--include/sound/da7219.h8
-rw-r--r--include/sound/hdaudio.h10
-rw-r--r--include/sound/memalloc.h4
-rw-r--r--include/sound/seq_kernel.h3
-rw-r--r--include/sound/simple_card_utils.h238
-rw-r--r--include/sound/sof.h100
-rw-r--r--include/sound/sof/control.h158
-rw-r--r--include/sound/sof/dai-intel.h178
-rw-r--r--include/sound/sof/dai.h75
-rw-r--r--include/sound/sof/header.h158
-rw-r--r--include/sound/sof/info.h118
-rw-r--r--include/sound/sof/pm.h48
-rw-r--r--include/sound/sof/stream.h148
-rw-r--r--include/sound/sof/topology.h256
-rw-r--r--include/sound/sof/trace.h67
-rw-r--r--include/sound/sof/xtensa.h44
-rw-r--r--include/target/iscsi/iscsi_target_core.h1
-rw-r--r--include/target/target_core_base.h4
-rw-r--r--include/target/target_core_fabric.h1
-rw-r--r--include/trace/bpf_probe.h27
-rw-r--r--include/trace/define_trace.h8
-rw-r--r--include/trace/events/afs.h385
-rw-r--r--include/trace/events/bpf_test_run.h50
-rw-r--r--include/trace/events/btrfs.h245
-rw-r--r--include/trace/events/cgroup.h55
-rw-r--r--include/trace/events/compaction.h10
-rw-r--r--include/trace/events/cpuhp.h4
-rw-r--r--include/trace/events/devfreq.h40
-rw-r--r--include/trace/events/f2fs.h57
-rw-r--r--include/trace/events/fib.h44
-rw-r--r--include/trace/events/fib6.h16
-rw-r--r--include/trace/events/gpio.h4
-rw-r--r--include/trace/events/ib_mad.h390
-rw-r--r--include/trace/events/ib_umad.h126
-rw-r--r--include/trace/events/mlxsw.h2
-rw-r--r--include/trace/events/nbd.h107
-rw-r--r--include/trace/events/net.h23
-rw-r--r--include/trace/events/preemptirq.h2
-rw-r--r--include/trace/events/random.h13
-rw-r--r--include/trace/events/rcu.h85
-rw-r--r--include/trace/events/rpcrdma.h27
-rw-r--r--include/trace/events/sched.h21
-rw-r--r--include/trace/events/sunrpc.h10
-rw-r--r--include/trace/events/timer.h17
-rw-r--r--include/trace/events/vmscan.h102
-rw-r--r--include/trace/events/workqueue.h4
-rw-r--r--include/trace/events/writeback.h16
-rw-r--r--include/trace/events/xen.h2
-rw-r--r--include/uapi/asm-generic/sockios.h4
-rw-r--r--include/uapi/asm-generic/unistd.h14
-rw-r--r--include/uapi/drm/amdgpu_drm.h43
-rw-r--r--include/uapi/drm/drm.h37
-rw-r--r--include/uapi/drm/drm_fourcc.h51
-rw-r--r--include/uapi/drm/drm_mode.h4
-rw-r--r--include/uapi/drm/i915_drm.h254
-rw-r--r--include/uapi/drm/lima_drm.h169
-rw-r--r--include/uapi/drm/msm_drm.h14
-rw-r--r--include/uapi/drm/panfrost_drm.h142
-rw-r--r--include/uapi/linux/aspeed-p2a-ctrl.h62
-rw-r--r--include/uapi/linux/audit.h14
-rw-r--r--include/uapi/linux/batadv_packet.h12
-rw-r--r--include/uapi/linux/batman_adv.h25
-rw-r--r--include/uapi/linux/bpf.h364
-rw-r--r--include/uapi/linux/btf.h32
-rw-r--r--include/uapi/linux/btrfs_tree.h2
-rw-r--r--include/uapi/linux/elf-em.h6
-rw-r--r--include/uapi/linux/ethtool.h11
-rw-r--r--include/uapi/linux/fcntl.h2
-rw-r--r--include/uapi/linux/fou.h6
-rw-r--r--include/uapi/linux/fs.h3
-rw-r--r--include/uapi/linux/fuse.h22
-rw-r--r--include/uapi/linux/icmpv6.h4
-rw-r--r--include/uapi/linux/if_ether.h1
-rw-r--r--include/uapi/linux/if_tun.h1
-rw-r--r--include/uapi/linux/if_vlan.h9
-rw-r--r--include/uapi/linux/input-event-codes.h1
-rw-r--r--include/uapi/linux/io_uring.h5
-rw-r--r--include/uapi/linux/ip_vs.h11
-rw-r--r--include/uapi/linux/kfd_ioctl.h12
-rw-r--r--include/uapi/linux/kvm.h15
-rw-r--r--include/uapi/linux/lirc.h2
-rw-r--r--include/uapi/linux/media-bus-format.h3
-rw-r--r--include/uapi/linux/media.h25
-rw-r--r--include/uapi/linux/mei.h67
-rw-r--r--include/uapi/linux/mount.h62
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h12
-rw-r--r--include/uapi/linux/nfs_mount.h9
-rw-r--r--include/uapi/linux/nfsd/cld.h1
-rw-r--r--include/uapi/linux/nl80211.h86
-rw-r--r--include/uapi/linux/openvswitch.h46
-rw-r--r--include/uapi/linux/pci_regs.h138
-rw-r--r--include/uapi/linux/pkt_sched.h13
-rw-r--r--include/uapi/linux/psci.h7
-rw-r--r--include/uapi/linux/psp-sev.h18
-rw-r--r--include/uapi/linux/sched.h1
-rw-r--r--include/uapi/linux/sed-opal.h11
-rw-r--r--include/uapi/linux/serial_core.h6
-rw-r--r--include/uapi/linux/sockios.h21
-rw-r--r--include/uapi/linux/switchtec_ioctl.h13
-rw-r--r--include/uapi/linux/tcp.h27
-rw-r--r--include/uapi/linux/tipc.h1
-rw-r--r--include/uapi/linux/tipc_netlink.h2
-rw-r--r--include/uapi/linux/tls.h15
-rw-r--r--include/uapi/linux/v4l2-controls.h8
-rw-r--r--include/uapi/linux/vfio.h4
-rw-r--r--include/uapi/linux/vfio_ccw.h12
-rw-r--r--include/uapi/linux/videodev2.h17
-rw-r--r--include/uapi/linux/virtio_gpu.h12
-rw-r--r--include/uapi/misc/habanalabs.h161
-rw-r--r--include/uapi/rdma/efa-abi.h101
-rw-r--r--include/uapi/rdma/mlx5-abi.h2
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h2
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_verbs.h7
-rw-r--r--include/uapi/rdma/rdma_netlink.h31
-rw-r--r--include/uapi/rdma/rdma_user_ioctl_cmds.h1
-rw-r--r--include/uapi/scsi/fc/fc_els.h33
-rw-r--r--include/uapi/sound/sof/abi.h62
-rw-r--r--include/uapi/sound/sof/eq.h172
-rw-r--r--include/uapi/sound/sof/fw.h78
-rw-r--r--include/uapi/sound/sof/header.h27
-rw-r--r--include/uapi/sound/sof/manifest.h188
-rw-r--r--include/uapi/sound/sof/tokens.h107
-rw-r--r--include/uapi/sound/sof/tone.h21
-rw-r--r--include/uapi/sound/sof/trace.h66
-rw-r--r--include/video/udlfb.h7
-rw-r--r--include/xen/xen.h4
-rw-r--r--init/Kconfig45
-rw-r--r--init/initramfs.c147
-rw-r--r--init/main.c38
-rw-r--r--ipc/ipc_sysctl.c14
-rw-r--r--ipc/mqueue.c82
-rw-r--r--ipc/msgutil.c6
-rw-r--r--ipc/util.c49
-rw-r--r--ipc/util.h47
-rw-r--r--kernel/.gitignore1
-rw-r--r--kernel/Kconfig.locks9
-rw-r--r--kernel/Makefile11
-rw-r--r--kernel/acct.c4
-rw-r--r--kernel/async.c4
-rw-r--r--kernel/audit.c2
-rw-r--r--kernel/audit.h2
-rw-r--r--kernel/audit_fsnotify.c2
-rw-r--r--kernel/audit_tree.c2
-rw-r--r--kernel/audit_watch.c4
-rw-r--r--kernel/auditfilter.c20
-rw-r--r--kernel/auditsc.c119
-rw-r--r--kernel/backtracetest.c11
-rw-r--r--kernel/bpf/arraymap.c53
-rw-r--r--kernel/bpf/btf.c419
-rw-r--r--kernel/bpf/cgroup.c370
-rw-r--r--kernel/bpf/core.c19
-rw-r--r--kernel/bpf/cpumap.c53
-rw-r--r--kernel/bpf/disasm.c5
-rw-r--r--kernel/bpf/hashtab.c6
-rw-r--r--kernel/bpf/helpers.c131
-rw-r--r--kernel/bpf/inode.c10
-rw-r--r--kernel/bpf/local_storage.c6
-rw-r--r--kernel/bpf/lpm_trie.c3
-rw-r--r--kernel/bpf/queue_stack_maps.c6
-rw-r--r--kernel/bpf/syscall.c164
-rw-r--r--kernel/bpf/verifier.c696
-rw-r--r--kernel/cgroup/Makefile4
-rw-r--r--kernel/cgroup/cgroup-internal.h8
-rw-r--r--kernel/cgroup/cgroup-v1.c16
-rw-r--r--kernel/cgroup/cgroup.c241
-rw-r--r--kernel/cgroup/cpuset.c11
-rw-r--r--kernel/cgroup/debug.c8
-rw-r--r--kernel/cgroup/freezer.c639
-rw-r--r--kernel/cgroup/legacy_freezer.c481
-rw-r--r--kernel/compat.c3
-rw-r--r--kernel/cpu.c91
-rw-r--r--kernel/debug/gdbstub.c9
-rw-r--r--kernel/debug/kdb/Makefile1
-rw-r--r--kernel/debug/kdb/kdb_io.c2
-rw-r--r--kernel/debug/kdb/kdb_main.c3
-rw-r--r--kernel/debug/kdb/kdb_support.c2
-rw-r--r--kernel/dma/Kconfig4
-rw-r--r--kernel/dma/debug.c14
-rw-r--r--kernel/dma/direct.c2
-rw-r--r--kernel/dma/mapping.c9
-rw-r--r--kernel/dma/swiotlb.c6
-rw-r--r--kernel/events/core.c12
-rw-r--r--kernel/events/ring_buffer.c3
-rw-r--r--kernel/events/uprobes.c13
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/fail_function.c2
-rw-r--r--kernel/fork.c170
-rw-r--r--kernel/futex.c190
-rw-r--r--kernel/gcov/Kconfig3
-rw-r--r--kernel/gcov/Makefile5
-rw-r--r--kernel/gcov/base.c86
-rw-r--r--kernel/gcov/clang.c581
-rw-r--r--kernel/gcov/gcc_3_4.c12
-rw-r--r--kernel/gcov/gcc_4_7.c12
-rw-r--r--kernel/gcov/gcc_base.c86
-rw-r--r--kernel/gcov/gcov.h5
-rwxr-xr-xkernel/gen_ikh_data.sh89
-rw-r--r--kernel/iomem.c4
-rw-r--r--kernel/irq/Kconfig3
-rw-r--r--kernel/irq/chip.c27
-rw-r--r--kernel/irq/debugfs.c2
-rw-r--r--kernel/irq/devres.c3
-rw-r--r--kernel/irq/handle.c2
-rw-r--r--kernel/irq/irqdesc.c3
-rw-r--r--kernel/irq/irqdomain.c2
-rw-r--r--kernel/irq/manage.c6
-rw-r--r--kernel/irq/spurious.c4
-rw-r--r--kernel/irq/timings.c522
-rw-r--r--kernel/irq_work.c75
-rw-r--r--kernel/jump_label.c63
-rw-r--r--kernel/kexec_core.c4
-rw-r--r--kernel/kexec_file.c17
-rw-r--r--kernel/kheaders.c74
-rw-r--r--kernel/kthread.c1
-rw-r--r--kernel/latencytop.c37
-rw-r--r--kernel/livepatch/core.c102
-rw-r--r--kernel/livepatch/transition.c22
-rw-r--r--kernel/locking/Makefile5
-rw-r--r--kernel/locking/lock_events.c179
-rw-r--r--kernel/locking/lock_events.h59
-rw-r--r--kernel/locking/lock_events_list.h67
-rw-r--r--kernel/locking/lockdep.c357
-rw-r--r--kernel/locking/lockdep_internals.h34
-rw-r--r--kernel/locking/locktorture.c2
-rw-r--r--kernel/locking/percpu-rwsem.c2
-rw-r--r--kernel/locking/qspinlock.c8
-rw-r--r--kernel/locking/qspinlock_paravirt.h19
-rw-r--r--kernel/locking/qspinlock_stat.h242
-rw-r--r--kernel/locking/rwsem-spinlock.c339
-rw-r--r--kernel/locking/rwsem-xadd.c246
-rw-r--r--kernel/locking/rwsem.c25
-rw-r--r--kernel/locking/rwsem.h174
-rw-r--r--kernel/locking/spinlock.c7
-rw-r--r--kernel/locking/spinlock_debug.c6
-rw-r--r--kernel/memremap.c13
-rw-r--r--kernel/module-internal.h2
-rw-r--r--kernel/module.c108
-rw-r--r--kernel/notifier.c1
-rw-r--r--kernel/padata.c3
-rw-r--r--kernel/panic.c18
-rw-r--r--kernel/pid.c1
-rw-r--r--kernel/power/Kconfig9
-rw-r--r--kernel/power/hibernate.c17
-rw-r--r--kernel/power/main.c14
-rw-r--r--kernel/power/snapshot.c5
-rw-r--r--kernel/power/suspend.c17
-rw-r--r--kernel/power/user.c5
-rw-r--r--kernel/printk/printk.c22
-rw-r--r--kernel/rcu/rcu.h10
-rw-r--r--kernel/rcu/rcuperf.c5
-rw-r--r--kernel/rcu/rcutorture.c21
-rw-r--r--kernel/rcu/srcutiny.c9
-rw-r--r--kernel/rcu/srcutree.c32
-rw-r--r--kernel/rcu/tiny.c2
-rw-r--r--kernel/rcu/tree.c518
-rw-r--r--kernel/rcu/tree.h14
-rw-r--r--kernel/rcu/tree_exp.h36
-rw-r--r--kernel/rcu/tree_plugin.h257
-rw-r--r--kernel/rcu/tree_stall.h709
-rw-r--r--kernel/rcu/update.c59
-rw-r--r--kernel/reboot.c20
-rw-r--r--kernel/resource.c11
-rw-r--r--kernel/rseq.c9
-rw-r--r--kernel/sched/core.c128
-rw-r--r--kernel/sched/cpufreq.c2
-rw-r--r--kernel/sched/cpufreq_schedutil.c27
-rw-r--r--kernel/sched/debug.c2
-rw-r--r--kernel/sched/fair.c25
-rw-r--r--kernel/sched/isolation.c18
-rw-r--r--kernel/sched/psi.c617
-rw-r--r--kernel/sched/rt.c5
-rw-r--r--kernel/sched/sched.h18
-rw-r--r--kernel/sched/topology.c31
-rw-r--r--kernel/seccomp.c23
-rw-r--r--kernel/signal.c80
-rw-r--r--kernel/softirq.c51
-rw-r--r--kernel/stacktrace.c333
-rw-r--r--kernel/stop_machine.c2
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/sys_ni.c3
-rw-r--r--kernel/sysctl.c59
-rw-r--r--kernel/taskstats.c33
-rw-r--r--kernel/time/clockevents.c18
-rw-r--r--kernel/time/jiffies.c2
-rw-r--r--kernel/time/ntp.c24
-rw-r--r--kernel/time/ntp_internal.h4
-rw-r--r--kernel/time/sched_clock.c6
-rw-r--r--kernel/time/tick-broadcast.c48
-rw-r--r--kernel/time/tick-common.c52
-rw-r--r--kernel/time/tick-internal.h10
-rw-r--r--kernel/time/tick-sched.c49
-rw-r--r--kernel/time/tick-sched.h13
-rw-r--r--kernel/time/time.c12
-rw-r--r--kernel/time/timekeeping.c37
-rw-r--r--kernel/time/timer.c32
-rw-r--r--kernel/torture.c2
-rw-r--r--kernel/trace/Kconfig7
-rw-r--r--kernel/trace/bpf_trace.c38
-rw-r--r--kernel/trace/ftrace.c9
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/trace/ring_buffer_benchmark.c2
-rw-r--r--kernel/trace/trace.c522
-rw-r--r--kernel/trace/trace.h21
-rw-r--r--kernel/trace/trace_branch.c4
-rw-r--r--kernel/trace/trace_events.c4
-rw-r--r--kernel/trace/trace_events_filter.c84
-rw-r--r--kernel/trace/trace_events_hist.c282
-rw-r--r--kernel/trace/trace_events_trigger.c3
-rw-r--r--kernel/trace/trace_kdb.c61
-rw-r--r--kernel/trace/trace_kprobe.c77
-rw-r--r--kernel/trace/trace_probe.c291
-rw-r--r--kernel/trace/trace_probe.h78
-rw-r--r--kernel/trace/trace_probe_tmpl.h2
-rw-r--r--kernel/trace/trace_selftest.c5
-rw-r--r--kernel/trace/trace_stack.c85
-rw-r--r--kernel/trace/trace_uprobe.c57
-rw-r--r--kernel/user.c7
-rw-r--r--kernel/watchdog.c2
-rw-r--r--kernel/workqueue.c161
-rw-r--r--kernel/workqueue_internal.h5
-rw-r--r--lib/Kconfig39
-rw-r--r--lib/Kconfig.debug41
-rw-r--r--lib/Makefile29
-rw-r--r--lib/asn1_decoder.c4
-rw-r--r--lib/bitmap.c280
-rw-r--r--lib/cmdline.c5
-rw-r--r--lib/crc-t10dif.c1
-rw-r--r--lib/digsig.c1
-rw-r--r--lib/dynamic_debug.c37
-rw-r--r--lib/error-inject.c2
-rw-r--r--lib/fault-inject.c12
-rw-r--r--lib/hweight.c4
-rw-r--r--lib/iov_iter.c7
-rw-r--r--lib/kobject.c93
-rw-r--r--lib/kobject_uevent.c11
-rw-r--r--lib/libcrc32c.c1
-rw-r--r--lib/list_sort.c242
-rw-r--r--lib/math/Kconfig11
-rw-r--r--lib/math/Makefile5
-rw-r--r--lib/math/cordic.c (renamed from lib/cordic.c)0
-rw-r--r--lib/math/div64.c (renamed from lib/div64.c)2
-rw-r--r--lib/math/gcd.c (renamed from lib/gcd.c)0
-rw-r--r--lib/math/int_pow.c32
-rw-r--r--lib/math/int_sqrt.c (renamed from lib/int_sqrt.c)0
-rw-r--r--lib/math/lcm.c (renamed from lib/lcm.c)0
-rw-r--r--lib/math/prime_numbers.c (renamed from lib/prime_numbers.c)0
-rw-r--r--lib/math/rational.c (renamed from lib/rational.c)0
-rw-r--r--lib/math/reciprocal_div.c (renamed from lib/reciprocal_div.c)0
-rw-r--r--lib/nlattr.c200
-rw-r--r--lib/packing.c213
-rw-r--r--lib/percpu-refcount.c4
-rw-r--r--lib/plist.c4
-rw-r--r--lib/rhashtable.c210
-rw-r--r--lib/siphash.c36
-rw-r--r--lib/sort.c254
-rw-r--r--lib/stackdepot.c54
-rw-r--r--lib/string.c47
-rw-r--r--lib/strncpy_from_user.c5
-rw-r--r--lib/strnlen_user.c4
-rw-r--r--lib/test_bitmap.c87
-rw-r--r--lib/test_printf.c46
-rw-r--r--lib/test_rhashtable.c2
-rw-r--r--lib/test_strscpy.c150
-rw-r--r--lib/test_sysctl.c18
-rw-r--r--lib/test_vmalloc.c8
-rw-r--r--lib/ubsan.c69
-rw-r--r--lib/ubsan.h5
-rw-r--r--lib/vsprintf.c428
-rw-r--r--lib/zstd/bitstream.h5
-rw-r--r--lib/zstd/compress.c1
-rw-r--r--lib/zstd/decompress.c5
-rw-r--r--lib/zstd/huf_compress.c2
-rw-r--r--mm/Kconfig82
-rw-r--r--mm/Kconfig.debug1
-rw-r--r--mm/Makefile7
-rw-r--r--mm/cma.c23
-rw-r--r--mm/cma_debug.c2
-rw-r--r--mm/compaction.c12
-rw-r--r--mm/debug.c2
-rw-r--r--mm/filemap.c216
-rw-r--r--mm/gup.c393
-rw-r--r--mm/gup_benchmark.c5
-rw-r--r--mm/hmm.c1086
-rw-r--r--mm/huge_memory.c39
-rw-r--r--mm/hugetlb.c182
-rw-r--r--mm/kasan/Makefile9
-rw-r--r--mm/kasan/common.c43
-rw-r--r--mm/kasan/report.c10
-rw-r--r--mm/khugepaged.c7
-rw-r--r--mm/kmemleak.c24
-rw-r--r--mm/ksm.c6
-rw-r--r--mm/madvise.c5
-rw-r--r--mm/memblock.c82
-rw-r--r--mm/memcontrol.c385
-rw-r--r--mm/memfd.c2
-rw-r--r--mm/memory.c114
-rw-r--r--mm/memory_hotplug.c132
-rw-r--r--mm/migrate.c7
-rw-r--r--mm/mincore.c23
-rw-r--r--mm/mmap.c15
-rw-r--r--mm/mmu_gather.c129
-rw-r--r--mm/mmu_notifier.c12
-rw-r--r--mm/mprotect.c9
-rw-r--r--mm/mremap.c3
-rw-r--r--mm/nommu.c14
-rw-r--r--mm/oom_kill.c3
-rw-r--r--mm/page-writeback.c12
-rw-r--r--mm/page_alloc.c359
-rw-r--r--mm/page_isolation.c2
-rw-r--r--mm/page_owner.c82
-rw-r--r--mm/percpu-internal.h15
-rw-r--r--mm/percpu-km.c2
-rw-r--r--mm/percpu-stats.c5
-rw-r--r--mm/percpu.c549
-rw-r--r--mm/rmap.c10
-rw-r--r--mm/shmem.c7
-rw-r--r--mm/shuffle.c207
-rw-r--r--mm/shuffle.h64
-rw-r--r--mm/slab.c321
-rw-r--r--mm/slob.c59
-rw-r--r--mm/slub.c93
-rw-r--r--mm/sparse.c16
-rw-r--r--mm/swap.c2
-rw-r--r--mm/swap_state.c4
-rw-r--r--mm/userfaultfd.c3
-rw-r--r--mm/util.c59
-rw-r--r--mm/vmalloc.c1240
-rw-r--r--mm/vmscan.c209
-rw-r--r--mm/workingset.c10
-rw-r--r--mm/z3fold.c638
-rw-r--r--net/6lowpan/nhc.c2
-rw-r--r--net/8021q/vlan.c18
-rw-r--r--net/8021q/vlan_dev.c26
-rw-r--r--net/8021q/vlan_netlink.c11
-rw-r--r--net/Kconfig7
-rw-r--r--net/appletalk/ddp.c8
-rw-r--r--net/atm/clip.c4
-rw-r--r--net/atm/ioctl.c16
-rw-r--r--net/atm/lec.c4
-rw-r--r--net/atm/pvc.c1
-rw-r--r--net/atm/svc.c1
-rw-r--r--net/ax25/af_ax25.c13
-rw-r--r--net/batman-adv/Kconfig24
-rw-r--r--net/batman-adv/Makefile15
-rw-r--r--net/batman-adv/bat_algo.c12
-rw-r--r--net/batman-adv/bat_algo.h12
-rw-r--r--net/batman-adv/bat_iv_ogm.c12
-rw-r--r--net/batman-adv/bat_iv_ogm.h12
-rw-r--r--net/batman-adv/bat_v.c12
-rw-r--r--net/batman-adv/bat_v.h12
-rw-r--r--net/batman-adv/bat_v_elp.c12
-rw-r--r--net/batman-adv/bat_v_elp.h12
-rw-r--r--net/batman-adv/bat_v_ogm.c12
-rw-r--r--net/batman-adv/bat_v_ogm.h12
-rw-r--r--net/batman-adv/bitarray.c12
-rw-r--r--net/batman-adv/bitarray.h12
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c13
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h12
-rw-r--r--net/batman-adv/debugfs.c12
-rw-r--r--net/batman-adv/debugfs.h12
-rw-r--r--net/batman-adv/distributed-arp-table.c40
-rw-r--r--net/batman-adv/distributed-arp-table.h12
-rw-r--r--net/batman-adv/fragmentation.c12
-rw-r--r--net/batman-adv/fragmentation.h12
-rw-r--r--net/batman-adv/gateway_client.c13
-rw-r--r--net/batman-adv/gateway_client.h12
-rw-r--r--net/batman-adv/gateway_common.c12
-rw-r--r--net/batman-adv/gateway_common.h12
-rw-r--r--net/batman-adv/hard-interface.c12
-rw-r--r--net/batman-adv/hard-interface.h12
-rw-r--r--net/batman-adv/hash.c12
-rw-r--r--net/batman-adv/hash.h12
-rw-r--r--net/batman-adv/icmp_socket.c14
-rw-r--r--net/batman-adv/icmp_socket.h12
-rw-r--r--net/batman-adv/log.c14
-rw-r--r--net/batman-adv/log.h12
-rw-r--r--net/batman-adv/main.c86
-rw-r--r--net/batman-adv/main.h16
-rw-r--r--net/batman-adv/multicast.c222
-rw-r--r--net/batman-adv/multicast.h30
-rw-r--r--net/batman-adv/netlink.c60
-rw-r--r--net/batman-adv/netlink.h12
-rw-r--r--net/batman-adv/network-coding.c12
-rw-r--r--net/batman-adv/network-coding.h12
-rw-r--r--net/batman-adv/originator.c12
-rw-r--r--net/batman-adv/originator.h12
-rw-r--r--net/batman-adv/routing.c12
-rw-r--r--net/batman-adv/routing.h12
-rw-r--r--net/batman-adv/send.c12
-rw-r--r--net/batman-adv/send.h12
-rw-r--r--net/batman-adv/soft-interface.c20
-rw-r--r--net/batman-adv/soft-interface.h12
-rw-r--r--net/batman-adv/sysfs.c128
-rw-r--r--net/batman-adv/sysfs.h50
-rw-r--r--net/batman-adv/tp_meter.c12
-rw-r--r--net/batman-adv/tp_meter.h12
-rw-r--r--net/batman-adv/trace.c12
-rw-r--r--net/batman-adv/trace.h12
-rw-r--r--net/batman-adv/translation-table.c17
-rw-r--r--net/batman-adv/translation-table.h16
-rw-r--r--net/batman-adv/tvlv.c12
-rw-r--r--net/batman-adv/tvlv.h12
-rw-r--r--net/batman-adv/types.h23
-rw-r--r--net/bluetooth/af_bluetooth.c8
-rw-r--r--net/bluetooth/amp.c1
-rw-r--r--net/bluetooth/hci_conn.c8
-rw-r--r--net/bluetooth/hci_core.c7
-rw-r--r--net/bluetooth/hci_event.c14
-rw-r--r--net/bluetooth/hci_request.c5
-rw-r--r--net/bluetooth/hci_request.h1
-rw-r--r--net/bluetooth/hidp/sock.c1
-rw-r--r--net/bluetooth/l2cap_core.c9
-rw-r--r--net/bluetooth/l2cap_sock.c8
-rw-r--r--net/bluetooth/mgmt.c11
-rw-r--r--net/bluetooth/rfcomm/sock.c1
-rw-r--r--net/bluetooth/sco.c1
-rw-r--r--net/bluetooth/smp.c1
-rw-r--r--net/bpf/Makefile2
-rw-r--r--net/bpf/test_run.c203
-rw-r--r--net/bpfilter/Makefile2
-rw-r--r--net/bridge/br.c14
-rw-r--r--net/bridge/br_arp_nd_proxy.c19
-rw-r--r--net/bridge/br_fdb.c1
-rw-r--r--net/bridge/br_forward.c15
-rw-r--r--net/bridge/br_if.c15
-rw-r--r--net/bridge/br_input.c77
-rw-r--r--net/bridge/br_mdb.c21
-rw-r--r--net/bridge/br_multicast.c26
-rw-r--r--net/bridge/br_netlink.c16
-rw-r--r--net/bridge/br_netlink_tunnel.c6
-rw-r--r--net/bridge/br_private.h35
-rw-r--r--net/bridge/br_stp_if.c3
-rw-r--r--net/bridge/br_vlan.c215
-rw-r--r--net/bridge/br_vlan_tunnel.c1
-rw-r--r--net/bridge/netfilter/ebtable_broute.c63
-rw-r--r--net/bridge/netfilter/ebtables.c11
-rw-r--r--net/caif/caif_dev.c12
-rw-r--r--net/can/af_can.c6
-rw-r--r--net/can/bcm.c1
-rw-r--r--net/can/gw.c4
-rw-r--r--net/can/raw.c1
-rw-r--r--net/ceph/cls_lock_client.c2
-rw-r--r--net/ceph/debugfs.c4
-rw-r--r--net/ceph/messenger.c123
-rw-r--r--net/ceph/mon_client.c6
-rw-r--r--net/ceph/osd_client.c4
-rw-r--r--net/ceph/pagevec.c2
-rw-r--r--net/compat.c57
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/bpf_sk_storage.c804
-rw-r--r--net/core/datagram.c29
-rw-r--r--net/core/datagram.h15
-rw-r--r--net/core/dev.c88
-rw-r--r--net/core/dev_ioctl.c3
-rw-r--r--net/core/devlink.c422
-rw-r--r--net/core/drop_monitor.c3
-rw-r--r--net/core/dst.c17
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/core/fib_rules.c12
-rw-r--r--net/core/filter.c579
-rw-r--r--net/core/flow_dissector.c133
-rw-r--r--net/core/gen_stats.c2
-rw-r--r--net/core/lwt_bpf.c10
-rw-r--r--net/core/lwtunnel.c9
-rw-r--r--net/core/neighbour.c43
-rw-r--r--net/core/net-procfs.c2
-rw-r--r--net/core/net-sysfs.c10
-rw-r--r--net/core/net_namespace.c21
-rw-r--r--net/core/netpoll.c6
-rw-r--r--net/core/netprio_cgroup.c2
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/rtnetlink.c164
-rw-r--r--net/core/skbuff.c76
-rw-r--r--net/core/sock.c56
-rw-r--r--net/core/sock_reuseport.c2
-rw-r--r--net/core/sysctl_net_core.c8
-rw-r--r--net/dcb/dcbnl.c130
-rw-r--r--net/dccp/ipv4.c1
-rw-r--r--net/dccp/ipv6.c1
-rw-r--r--net/dccp/proto.c3
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/decnet/dn_dev.c8
-rw-r--r--net/decnet/dn_fib.c10
-rw-r--r--net/decnet/dn_route.c4
-rw-r--r--net/decnet/dn_table.c3
-rw-r--r--net/dns_resolver/dns_query.c8
-rw-r--r--net/dsa/Kconfig109
-rw-r--r--net/dsa/Makefile22
-rw-r--r--net/dsa/dsa.c177
-rw-r--r--net/dsa/dsa2.c64
-rw-r--r--net/dsa/dsa_priv.h50
-rw-r--r--net/dsa/legacy.c745
-rw-r--r--net/dsa/port.c87
-rw-r--r--net/dsa/slave.c141
-rw-r--r--net/dsa/switch.c31
-rw-r--r--net/dsa/tag_8021q.c222
-rw-r--r--net/dsa/tag_brcm.c41
-rw-r--r--net/dsa/tag_dsa.c15
-rw-r--r--net/dsa/tag_edsa.c15
-rw-r--r--net/dsa/tag_gswip.c9
-rw-r--r--net/dsa/tag_ksz.c29
-rw-r--r--net/dsa/tag_lan9303.c20
-rw-r--r--net/dsa/tag_mtk.c18
-rw-r--r--net/dsa/tag_qca.c19
-rw-r--r--net/dsa/tag_sja1105.c131
-rw-r--r--net/dsa/tag_trailer.c15
-rw-r--r--net/ethernet/eth.c16
-rw-r--r--net/hsr/Makefile1
-rw-r--r--net/hsr/hsr_debugfs.c119
-rw-r--r--net/hsr/hsr_device.c66
-rw-r--r--net/hsr/hsr_device.h6
-rw-r--r--net/hsr/hsr_forward.c97
-rw-r--r--net/hsr/hsr_forward.h6
-rw-r--r--net/hsr/hsr_framereg.c127
-rw-r--r--net/hsr/hsr_framereg.h18
-rw-r--r--net/hsr/hsr_main.c16
-rw-r--r--net/hsr/hsr_main.h64
-rw-r--r--net/hsr/hsr_netlink.c64
-rw-r--r--net/hsr/hsr_netlink.h6
-rw-r--r--net/hsr/hsr_slave.c19
-rw-r--r--net/hsr/hsr_slave.h7
-rw-r--r--net/ieee802154/ieee802154.h2
-rw-r--r--net/ieee802154/netlink.c1
-rw-r--r--net/ieee802154/nl802154.c139
-rw-r--r--net/ieee802154/socket.c6
-rw-r--r--net/ipv4/Kconfig29
-rw-r--r--net/ipv4/Makefile3
-rw-r--r--net/ipv4/af_inet.c15
-rw-r--r--net/ipv4/bpfilter/sockopt.c3
-rw-r--r--net/ipv4/devinet.c27
-rw-r--r--net/ipv4/esp4.c20
-rw-r--r--net/ipv4/esp4_offload.c58
-rw-r--r--net/ipv4/fib_frontend.c89
-rw-r--r--net/ipv4/fib_lookup.h1
-rw-r--r--net/ipv4/fib_semantics.c1000
-rw-r--r--net/ipv4/fib_trie.c33
-rw-r--r--net/ipv4/fou.c153
-rw-r--r--net/ipv4/inet_connection_sock.c4
-rw-r--r--net/ipv4/ip_forward.c2
-rw-r--r--net/ipv4/ip_input.c11
-rw-r--r--net/ipv4/ip_output.c21
-rw-r--r--net/ipv4/ip_tunnel_core.c8
-rw-r--r--net/ipv4/ip_vti.c75
-rw-r--r--net/ipv4/ipmr.c21
-rw-r--r--net/ipv4/ipmr_base.c5
-rw-r--r--net/ipv4/netfilter/Kconfig20
-rw-r--r--net/ipv4/netfilter/Makefile2
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c2
-rw-r--r--net/ipv4/netfilter/nft_chain_route_ipv4.c89
-rw-r--r--net/ipv4/raw.c4
-rw-r--r--net/ipv4/route.c229
-rw-r--r--net/ipv4/syncookies.c17
-rw-r--r--net/ipv4/sysctl_net_ipv4.c9
-rw-r--r--net/ipv4/tcp.c61
-rw-r--r--net/ipv4/tcp_input.c127
-rw-r--r--net/ipv4/tcp_ipv4.c24
-rw-r--r--net/ipv4/tcp_metrics.c17
-rw-r--r--net/ipv4/tcp_minisocks.c5
-rw-r--r--net/ipv4/tcp_output.c19
-rw-r--r--net/ipv4/tcp_timer.c3
-rw-r--r--net/ipv4/udp.c19
-rw-r--r--net/ipv4/udp_offload.c16
-rw-r--r--net/ipv4/xfrm4_mode_beet.c155
-rw-r--r--net/ipv4/xfrm4_mode_transport.c114
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c152
-rw-r--r--net/ipv4/xfrm4_output.c27
-rw-r--r--net/ipv4/xfrm4_policy.c134
-rw-r--r--net/ipv4/xfrm4_protocol.c3
-rw-r--r--net/ipv6/Kconfig35
-rw-r--r--net/ipv6/Makefile4
-rw-r--r--net/ipv6/addrconf.c65
-rw-r--r--net/ipv6/addrconf_core.c40
-rw-r--r--net/ipv6/addrlabel.c12
-rw-r--r--net/ipv6/af_inet6.c24
-rw-r--r--net/ipv6/esp6_offload.c48
-rw-r--r--net/ipv6/fib6_rules.c53
-rw-r--r--net/ipv6/icmp.c59
-rw-r--r--net/ipv6/ila/ila_lwt.c3
-rw-r--r--net/ipv6/ila/ila_main.c9
-rw-r--r--net/ipv6/ip6_fib.c54
-rw-r--r--net/ipv6/ip6_flowlabel.c22
-rw-r--r--net/ipv6/ip6_input.c12
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ip6_vti.c6
-rw-r--r--net/ipv6/ip6mr.c1
-rw-r--r--net/ipv6/ndisc.c25
-rw-r--r--net/ipv6/netfilter/Kconfig19
-rw-r--r--net/ipv6/netfilter/Makefile2
-rw-r--r--net/ipv6/netfilter/ip6t_MASQUERADE.c81
-rw-r--r--net/ipv6/netfilter/nft_chain_route_ipv6.c91
-rw-r--r--net/ipv6/output_core.c30
-rw-r--r--net/ipv6/raw.c1
-rw-r--r--net/ipv6/route.c1219
-rw-r--r--net/ipv6/seg6.c9
-rw-r--r--net/ipv6/seg6_iptunnel.c4
-rw-r--r--net/ipv6/seg6_local.c11
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/ipv6/tcp_ipv6.c62
-rw-r--r--net/ipv6/udp.c15
-rw-r--r--net/ipv6/xfrm6_mode_beet.c131
-rw-r--r--net/ipv6/xfrm6_mode_ro.c85
-rw-r--r--net/ipv6/xfrm6_mode_transport.c121
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c151
-rw-r--r--net/ipv6/xfrm6_output.c36
-rw-r--r--net/ipv6/xfrm6_policy.c126
-rw-r--r--net/ipv6/xfrm6_protocol.c3
-rw-r--r--net/ipv6/xfrm6_tunnel.c6
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/l2tp/l2tp_core.c13
-rw-r--r--net/l2tp/l2tp_ip.c1
-rw-r--r--net/l2tp/l2tp_ip6.c1
-rw-r--r--net/l2tp/l2tp_netlink.c23
-rw-r--r--net/l2tp/l2tp_ppp.c3
-rw-r--r--net/mac80211/cfg.c46
-rw-r--r--net/mac80211/debugfs.c54
-rw-r--r--net/mac80211/debugfs_netdev.c2
-rw-r--r--net/mac80211/driver-ops.c21
-rw-r--r--net/mac80211/driver-ops.h5
-rw-r--r--net/mac80211/ht.c5
-rw-r--r--net/mac80211/ieee80211_i.h9
-rw-r--r--net/mac80211/iface.c20
-rw-r--r--net/mac80211/key.c87
-rw-r--r--net/mac80211/key.h2
-rw-r--r--net/mac80211/main.c16
-rw-r--r--net/mac80211/mesh.h2
-rw-r--r--net/mac80211/mesh_hwmp.c34
-rw-r--r--net/mac80211/mesh_pathtbl.c2
-rw-r--r--net/mac80211/mlme.c3
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c124
-rw-r--r--net/mac80211/rc80211_minstrel_ht.h2
-rw-r--r--net/mac80211/rx.c74
-rw-r--r--net/mac80211/sta_info.c15
-rw-r--r--net/mac80211/tdls.c2
-rw-r--r--net/mac80211/trace.h30
-rw-r--r--net/mac80211/tx.c113
-rw-r--r--net/mac80211/util.c162
-rw-r--r--net/mac80211/wme.c82
-rw-r--r--net/mac80211/wme.h2
-rw-r--r--net/mpls/af_mpls.c32
-rw-r--r--net/mpls/mpls_iptunnel.c16
-rw-r--r--net/ncsi/ncsi-netlink.c29
-rw-r--r--net/netfilter/Kconfig19
-rw-r--r--net/netfilter/Makefile4
-rw-r--r--net/netfilter/core.c3
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_gen.h14
-rw-r--r--net/netfilter/ipset/ip_set_core.c36
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h14
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c87
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c84
-rw-r--r--net/netfilter/nf_conntrack_amanda.c8
-rw-r--r--net/netfilter/nf_conntrack_expect.c2
-rw-r--r--net/netfilter/nf_conntrack_ftp.c18
-rw-r--r--net/netfilter/nf_conntrack_h323_asn1.c2
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c11
-rw-r--r--net/netfilter/nf_conntrack_helper.c86
-rw-r--r--net/netfilter/nf_conntrack_irc.c6
-rw-r--r--net/netfilter/nf_conntrack_netlink.c103
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c6
-rw-r--r--net/netfilter/nf_conntrack_sane.c12
-rw-r--r--net/netfilter/nf_conntrack_sip.c30
-rw-r--r--net/netfilter/nf_conntrack_standalone.c48
-rw-r--r--net/netfilter/nf_conntrack_tftp.c18
-rw-r--r--net/netfilter/nf_conntrack_timeout.c89
-rw-r--r--net/netfilter/nf_flow_table_core.c34
-rw-r--r--net/netfilter/nf_flow_table_ip.c16
-rw-r--r--net/netfilter/nf_internals.h3
-rw-r--r--net/netfilter/nf_nat_amanda.c9
-rw-r--r--net/netfilter/nf_nat_core.c23
-rw-r--r--net/netfilter/nf_nat_ftp.c9
-rw-r--r--net/netfilter/nf_nat_irc.c9
-rw-r--r--net/netfilter/nf_nat_masquerade.c104
-rw-r--r--net/netfilter/nf_nat_proto.c59
-rw-r--r--net/netfilter/nf_nat_sip.c9
-rw-r--r--net/netfilter/nf_nat_tftp.c9
-rw-r--r--net/netfilter/nf_queue.c1
-rw-r--r--net/netfilter/nf_tables_api.c153
-rw-r--r--net/netfilter/nf_tables_set_core.c1
-rw-r--r--net/netfilter/nfnetlink.c15
-rw-r--r--net/netfilter/nfnetlink_acct.c4
-rw-r--r--net/netfilter/nfnetlink_cthelper.c29
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c11
-rw-r--r--net/netfilter/nfnetlink_osf.c14
-rw-r--r--net/netfilter/nfnetlink_queue.c7
-rw-r--r--net/netfilter/nft_chain_nat.c36
-rw-r--r--net/netfilter/nft_chain_route.c169
-rw-r--r--net/netfilter/nft_compat.c4
-rw-r--r--net/netfilter/nft_ct.c18
-rw-r--r--net/netfilter/nft_dynset.c17
-rw-r--r--net/netfilter/nft_flow_offload.c4
-rw-r--r--net/netfilter/nft_masq.c83
-rw-r--r--net/netfilter/nft_nat.c58
-rw-r--r--net/netfilter/nft_osf.c30
-rw-r--r--net/netfilter/nft_redir.c63
-rw-r--r--net/netfilter/nft_tunnel.c27
-rw-r--r--net/netfilter/x_tables.c3
-rw-r--r--net/netfilter/xt_CT.c93
-rw-r--r--net/netfilter/xt_MASQUERADE.c (renamed from net/ipv4/netfilter/ipt_MASQUERADE.c)84
-rw-r--r--net/netfilter/xt_connlabel.c2
-rw-r--r--net/netfilter/xt_hashlimit.c3
-rw-r--r--net/netlabel/netlabel_calipso.c9
-rw-r--r--net/netlabel/netlabel_cipso_v4.c59
-rw-r--r--net/netlabel/netlabel_mgmt.c25
-rw-r--r--net/netlabel/netlabel_unlabeled.c17
-rw-r--r--net/netlink/genetlink.c52
-rw-r--r--net/netrom/af_netrom.c14
-rw-r--r--net/nfc/netlink.c55
-rw-r--r--net/openvswitch/actions.c48
-rw-r--r--net/openvswitch/conntrack.c97
-rw-r--r--net/openvswitch/datapath.c48
-rw-r--r--net/openvswitch/flow_netlink.c245
-rw-r--r--net/openvswitch/meter.c23
-rw-r--r--net/openvswitch/vport-vxlan.c6
-rw-r--r--net/openvswitch/vport.c2
-rw-r--r--net/packet/af_packet.c85
-rw-r--r--net/packet/diag.c2
-rw-r--r--net/phonet/pn_netlink.c8
-rw-r--r--net/psample/psample.c1
-rw-r--r--net/qrtr/qrtr.c14
-rw-r--r--net/rds/ib_recv.c8
-rw-r--r--net/rds/info.c2
-rw-r--r--net/rds/rdma.c3
-rw-r--r--net/rds/tcp.c2
-rw-r--r--net/rfkill/core.c2
-rw-r--r--net/rose/af_rose.c7
-rw-r--r--net/rxrpc/af_rxrpc.c28
-rw-r--r--net/rxrpc/ar-internal.h2
-rw-r--r--net/rxrpc/call_object.c34
-rw-r--r--net/rxrpc/conn_client.c8
-rw-r--r--net/rxrpc/local_object.c2
-rw-r--r--net/rxrpc/sendmsg.c4
-rw-r--r--net/sched/act_api.c40
-rw-r--r--net/sched/act_bpf.c3
-rw-r--r--net/sched/act_connmark.c4
-rw-r--r--net/sched/act_csum.c3
-rw-r--r--net/sched/act_gact.c3
-rw-r--r--net/sched/act_ife.c10
-rw-r--r--net/sched/act_ipt.c3
-rw-r--r--net/sched/act_mirred.c3
-rw-r--r--net/sched/act_nat.c3
-rw-r--r--net/sched/act_pedit.c13
-rw-r--r--net/sched/act_police.c55
-rw-r--r--net/sched/act_sample.c3
-rw-r--r--net/sched/act_simple.c3
-rw-r--r--net/sched/act_skbedit.c3
-rw-r--r--net/sched/act_skbmod.c3
-rw-r--r--net/sched/act_tunnel_key.c17
-rw-r--r--net/sched/act_vlan.c3
-rw-r--r--net/sched/cls_api.c39
-rw-r--r--net/sched/cls_basic.c6
-rw-r--r--net/sched/cls_bpf.c9
-rw-r--r--net/sched/cls_cgroup.c9
-rw-r--r--net/sched/cls_flow.c5
-rw-r--r--net/sched/cls_flower.c620
-rw-r--r--net/sched/cls_fw.c5
-rw-r--r--net/sched/cls_matchall.c65
-rw-r--r--net/sched/cls_route.c5
-rw-r--r--net/sched/cls_rsvp.h5
-rw-r--r--net/sched/cls_tcindex.c5
-rw-r--r--net/sched/cls_u32.c7
-rw-r--r--net/sched/em_ipt.c4
-rw-r--r--net/sched/em_meta.c3
-rw-r--r--net/sched/ematch.c7
-rw-r--r--net/sched/sch_api.c36
-rw-r--r--net/sched/sch_atm.c5
-rw-r--r--net/sched/sch_cake.c13
-rw-r--r--net/sched/sch_cbq.c10
-rw-r--r--net/sched/sch_cbs.c103
-rw-r--r--net/sched/sch_choke.c5
-rw-r--r--net/sched/sch_codel.c5
-rw-r--r--net/sched/sch_drr.c5
-rw-r--r--net/sched/sch_dsmark.c10
-rw-r--r--net/sched/sch_etf.c5
-rw-r--r--net/sched/sch_fq.c115
-rw-r--r--net/sched/sch_fq_codel.c6
-rw-r--r--net/sched/sch_generic.c72
-rw-r--r--net/sched/sch_gred.c25
-rw-r--r--net/sched/sch_hfsc.c5
-rw-r--r--net/sched/sch_hhf.c5
-rw-r--r--net/sched/sch_htb.c19
-rw-r--r--net/sched/sch_ingress.c2
-rw-r--r--net/sched/sch_mqprio.c9
-rw-r--r--net/sched/sch_netem.c7
-rw-r--r--net/sched/sch_pie.c5
-rw-r--r--net/sched/sch_qfq.c6
-rw-r--r--net/sched/sch_red.c5
-rw-r--r--net/sched/sch_sfb.c5
-rw-r--r--net/sched/sch_taprio.c747
-rw-r--r--net/sched/sch_tbf.c5
-rw-r--r--net/sctp/auth.c1
-rw-r--r--net/sctp/ipv6.c1
-rw-r--r--net/sctp/protocol.c1
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/sctp/sm_sideeffect.c29
-rw-r--r--net/sctp/sm_statefuns.c41
-rw-r--r--net/sctp/socket.c10
-rw-r--r--net/sctp/stream_interleave.c60
-rw-r--r--net/sctp/ulpevent.c19
-rw-r--r--net/sctp/ulpqueue.c53
-rw-r--r--net/smc/af_smc.c355
-rw-r--r--net/smc/smc.h11
-rw-r--r--net/smc/smc_clc.c10
-rw-r--r--net/smc/smc_clc.h20
-rw-r--r--net/smc/smc_core.c93
-rw-r--r--net/smc/smc_core.h25
-rw-r--r--net/smc/smc_ib.c16
-rw-r--r--net/smc/smc_pnet.c56
-rw-r--r--net/smc/smc_pnet.h7
-rw-r--r--net/socket.c85
-rw-r--r--net/strparser/strparser.c16
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c71
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c1
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c6
-rw-r--r--net/sunrpc/auth_unix.c9
-rw-r--r--net/sunrpc/cache.c4
-rw-r--r--net/sunrpc/clnt.c132
-rw-r--r--net/sunrpc/debugfs.c2
-rw-r--r--net/sunrpc/rpc_pipe.c11
-rw-r--r--net/sunrpc/rpcb_clnt.c12
-rw-r--r--net/sunrpc/sched.c158
-rw-r--r--net/sunrpc/socklib.c2
-rw-r--r--net/sunrpc/svc.c298
-rw-r--r--net/sunrpc/svc_xprt.c17
-rw-r--r--net/sunrpc/svcauth_unix.c15
-rw-r--r--net/sunrpc/svcsock.c4
-rw-r--r--net/sunrpc/xprt.c154
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c120
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c63
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c115
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_backchannel.c2
-rw-r--r--net/sunrpc/xprtrdma/transport.c105
-rw-r--r--net/sunrpc/xprtrdma/verbs.c338
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h121
-rw-r--r--net/sunrpc/xprtsock.c9
-rw-r--r--net/tipc/bcast.c275
-rw-r--r--net/tipc/bcast.h12
-rw-r--r--net/tipc/bearer.c50
-rw-r--r--net/tipc/core.c2
-rw-r--r--net/tipc/core.h3
-rw-r--r--net/tipc/group.c2
-rw-r--r--net/tipc/link.c325
-rw-r--r--net/tipc/link.h2
-rw-r--r--net/tipc/monitor.c4
-rw-r--r--net/tipc/msg.h62
-rw-r--r--net/tipc/name_table.c4
-rw-r--r--net/tipc/net.c8
-rw-r--r--net/tipc/netlink.c51
-rw-r--r--net/tipc/netlink_compat.c88
-rw-r--r--net/tipc/node.c119
-rw-r--r--net/tipc/node.h8
-rw-r--r--net/tipc/socket.c29
-rw-r--r--net/tipc/udp_media.c27
-rw-r--r--net/tls/tls_device.c89
-rw-r--r--net/tls/tls_device_fallback.c3
-rw-r--r--net/tls/tls_main.c31
-rw-r--r--net/tls/tls_sw.c103
-rw-r--r--net/unix/af_unix.c6
-rw-r--r--net/wimax/stack.c9
-rw-r--r--net/wireless/lib80211_crypt_tkip.c1
-rw-r--r--net/wireless/nl80211.c923
-rw-r--r--net/wireless/pmsr.c30
-rw-r--r--net/wireless/rdev-ops.h29
-rw-r--r--net/wireless/reg.c28
-rw-r--r--net/wireless/scan.c173
-rw-r--r--net/wireless/trace.h87
-rw-r--r--net/wireless/util.c21
-rw-r--r--net/wireless/wext-compat.c3
-rw-r--r--net/x25/af_x25.c27
-rw-r--r--net/xdp/xdp_umem.c4
-rw-r--r--net/xdp/xsk_queue.h56
-rw-r--r--net/xfrm/Kconfig8
-rw-r--r--net/xfrm/xfrm_device.c63
-rw-r--r--net/xfrm/xfrm_inout.h38
-rw-r--r--net/xfrm/xfrm_input.c299
-rw-r--r--net/xfrm/xfrm_interface.c23
-rw-r--r--net/xfrm/xfrm_output.c381
-rw-r--r--net/xfrm/xfrm_policy.c282
-rw-r--r--net/xfrm/xfrm_state.c218
-rw-r--r--net/xfrm/xfrm_user.c26
-rw-r--r--samples/Kconfig16
-rw-r--r--samples/Makefile24
-rw-r--r--samples/bpf/.gitignore2
-rw-r--r--samples/bpf/Makefile3
-rw-r--r--samples/bpf/asm_goto_workaround.h1
-rw-r--r--samples/bpf/ibumad_kern.c144
-rw-r--r--samples/bpf/ibumad_user.c122
-rw-r--r--samples/bpf/offwaketime_user.c5
-rw-r--r--samples/bpf/sampleip_user.c5
-rw-r--r--samples/bpf/spintest_user.c7
-rw-r--r--samples/bpf/trace_event_user.c5
-rw-r--r--samples/kobject/kset-example.c3
-rw-r--r--samples/pidfd/.gitignore1
-rw-r--r--samples/pidfd/Makefile6
-rw-r--r--samples/pidfd/pidfd-metadata.c112
-rw-r--r--samples/seccomp/Makefile4
-rw-r--r--samples/vfs/.gitignore2
-rw-r--r--samples/vfs/Makefile (renamed from samples/statx/Makefile)5
-rw-r--r--samples/vfs/test-fsmount.c133
-rw-r--r--samples/vfs/test-statx.c (renamed from samples/statx/test-statx.c)11
-rw-r--r--scripts/Kbuild.include8
-rw-r--r--scripts/Kconfig.include8
-rw-r--r--scripts/Makefile.build5
-rw-r--r--scripts/Makefile.extrawarn25
-rw-r--r--scripts/Makefile.host14
-rw-r--r--scripts/Makefile.lib28
-rw-r--r--scripts/Makefile.modbuiltin2
-rw-r--r--scripts/Makefile.modpost2
-rw-r--r--scripts/Makefile.ubsan1
-rwxr-xr-xscripts/bpf_helpers_doc.py8
-rwxr-xr-xscripts/checkpatch.pl18
-rwxr-xr-xscripts/documentation-file-ref-check32
-rw-r--r--scripts/dtc/Makefile6
-rw-r--r--scripts/gcc-plugins/Kconfig126
-rw-r--r--scripts/gcc-plugins/arm_ssp_per_task_plugin.c2
-rw-r--r--scripts/gdb/linux/Makefile2
-rw-r--r--scripts/gdb/linux/clk.py76
-rw-r--r--scripts/gdb/linux/config.py44
-rw-r--r--scripts/gdb/linux/constants.py.in17
-rw-r--r--scripts/gdb/linux/cpus.py1
-rw-r--r--scripts/gdb/linux/lists.py26
-rw-r--r--scripts/gdb/linux/proc.py10
-rw-r--r--scripts/gdb/linux/rbtree.py177
-rw-r--r--scripts/gdb/linux/symbols.py6
-rw-r--r--scripts/gdb/linux/tasks.py2
-rw-r--r--scripts/gdb/linux/timerlist.py219
-rw-r--r--scripts/gdb/linux/utils.py7
-rw-r--r--scripts/gdb/vmlinux-gdb.py4
-rw-r--r--scripts/genksyms/Makefile4
-rw-r--r--scripts/kconfig/Makefile8
-rw-r--r--scripts/kconfig/confdata.c134
-rw-r--r--scripts/kconfig/gconf.c2
-rw-r--r--scripts/kconfig/lexer.l3
-rw-r--r--scripts/kconfig/lkc.h1
-rw-r--r--scripts/kconfig/lxdialog/BIG.FAT.WARNING2
-rw-r--r--scripts/kconfig/mconf.c2
-rwxr-xr-x[-rw-r--r--]scripts/kconfig/nconf-cfg.sh0
-rw-r--r--scripts/kconfig/nconf.c3
-rwxr-xr-xscripts/link-vmlinux.sh28
-rwxr-xr-xscripts/mkmakefile26
-rwxr-xr-xscripts/modules-check.sh16
-rwxr-xr-xscripts/recordmcount.pl5
-rw-r--r--scripts/selinux/genheaders/genheaders.c1
-rwxr-xr-xscripts/selinux/install_policy.sh92
-rw-r--r--scripts/selinux/mdp/Makefile2
-rw-r--r--scripts/selinux/mdp/mdp.c166
-rwxr-xr-xscripts/sphinx-pre-install1
-rwxr-xr-xscripts/tags.sh2
-rw-r--r--security/Kconfig2
-rw-r--r--security/Kconfig.hardening164
-rw-r--r--security/apparmor/apparmorfs.c7
-rw-r--r--security/apparmor/crypto.c2
-rw-r--r--security/inode.c7
-rw-r--r--security/integrity/Kconfig11
-rw-r--r--security/integrity/Makefile8
-rw-r--r--security/integrity/digsig_asymmetric.c11
-rw-r--r--security/integrity/evm/evm_crypto.c1
-rw-r--r--security/integrity/evm/evm_secfs.c10
-rw-r--r--security/integrity/ima/ima_crypto.c4
-rw-r--r--security/integrity/platform_certs/load_ipl_s390.c36
-rw-r--r--security/keys/dh.c1
-rw-r--r--security/keys/encrypted-keys/encrypted.c1
-rw-r--r--security/keys/process_keys.c41
-rw-r--r--security/keys/request_key.c14
-rw-r--r--security/keys/trusted.c1
-rw-r--r--security/security.c11
-rw-r--r--security/selinux/hooks.c229
-rw-r--r--security/selinux/include/classmap.h1
-rw-r--r--security/selinux/include/security.h1
-rw-r--r--security/selinux/netlabel.c14
-rw-r--r--security/selinux/ss/services.c7
-rw-r--r--security/smack/smack.h1
-rw-r--r--security/smack/smack_lsm.c34
-rw-r--r--security/smack/smackfs.c55
-rw-r--r--security/tomoyo/Kconfig10
-rw-r--r--security/tomoyo/common.c13
-rw-r--r--security/tomoyo/network.c4
-rw-r--r--security/tomoyo/realpath.c3
-rw-r--r--security/tomoyo/util.c2
-rw-r--r--sound/core/control.c2
-rw-r--r--sound/core/init.c23
-rw-r--r--sound/core/memalloc.c53
-rw-r--r--sound/core/oss/mixer_oss.c16
-rw-r--r--sound/core/pcm.c14
-rw-r--r--sound/core/rawmidi.c2
-rw-r--r--sound/core/seq/oss/seq_oss_device.h10
-rw-r--r--sound/core/seq/oss/seq_oss_rw.c11
-rw-r--r--sound/core/seq/oss/seq_oss_writeq.c2
-rw-r--r--sound/core/seq/seq_clientmgr.c111
-rw-r--r--sound/core/seq/seq_clientmgr.h8
-rw-r--r--sound/core/seq/seq_fifo.c14
-rw-r--r--sound/core/seq/seq_memory.c30
-rw-r--r--sound/core/seq/seq_ports.c30
-rw-r--r--sound/core/seq/seq_ports.h5
-rw-r--r--sound/core/sound.c5
-rw-r--r--sound/core/timer.c183
-rw-r--r--sound/drivers/aloop.c4
-rw-r--r--sound/firewire/amdtp-stream.c44
-rw-r--r--sound/firewire/motu/amdtp-motu.c6
-rw-r--r--sound/firewire/motu/motu-protocol-v2.c43
-rw-r--r--sound/firewire/motu/motu.c15
-rw-r--r--sound/firewire/motu/motu.h1
-rw-r--r--sound/hda/ext/hdac_ext_bus.c2
-rw-r--r--sound/hda/hdac_bus.c1
-rw-r--r--sound/hda/hdac_component.c18
-rw-r--r--sound/hda/hdac_device.c7
-rw-r--r--sound/hda/hdac_sysfs.c3
-rw-r--r--sound/isa/gus/gus_mem.c2
-rw-r--r--sound/last.c10
-rw-r--r--sound/pci/emu10k1/emu10k1_main.c16
-rw-r--r--sound/pci/hda/hda_codec.c8
-rw-r--r--sound/pci/hda/hda_intel.c12
-rw-r--r--sound/pci/hda/patch_hdmi.c11
-rw-r--r--sound/pci/hda/patch_realtek.c191
-rw-r--r--sound/ppc/snd_ps3.c4
-rw-r--r--sound/sh/aica.c14
-rw-r--r--sound/soc/Kconfig1
-rw-r--r--sound/soc/Makefile1
-rw-r--r--sound/soc/adi/axi-i2s.c68
-rw-r--r--sound/soc/amd/acp-da7219-max98357a.c68
-rw-r--r--sound/soc/amd/raven/acp3x-pcm-dma.c2
-rw-r--r--sound/soc/atmel/Kconfig14
-rw-r--r--sound/soc/atmel/Makefile2
-rw-r--r--sound/soc/atmel/mchp-i2s-mcc.c974
-rw-r--r--sound/soc/atmel/tse850-pcm5142.c16
-rw-r--r--sound/soc/cirrus/edb93xx.c2
-rw-r--r--sound/soc/cirrus/ep93xx-ac97.c1
-rw-r--r--sound/soc/cirrus/ep93xx-i2s.c3
-rw-r--r--sound/soc/cirrus/simone.c2
-rw-r--r--sound/soc/cirrus/snappercl15.c2
-rw-r--r--sound/soc/codecs/Kconfig15
-rw-r--r--sound/soc/codecs/Makefile4
-rw-r--r--sound/soc/codecs/ab8500-codec.c4
-rw-r--r--sound/soc/codecs/cs42l51-i2c.c13
-rw-r--r--sound/soc/codecs/cs42l51.c225
-rw-r--r--sound/soc/codecs/cs42l51.h3
-rw-r--r--sound/soc/codecs/cs43130.c2
-rw-r--r--sound/soc/codecs/cs47l24.c4
-rw-r--r--sound/soc/codecs/da7213.c5
-rw-r--r--sound/soc/codecs/da7213.h2
-rw-r--r--sound/soc/codecs/da7219.c542
-rw-r--r--sound/soc/codecs/da7219.h6
-rw-r--r--sound/soc/codecs/es8316.c14
-rw-r--r--sound/soc/codecs/hdac_hda.c6
-rw-r--r--sound/soc/codecs/hdac_hdmi.c24
-rw-r--r--sound/soc/codecs/hdmi-codec.c12
-rw-r--r--sound/soc/codecs/lochnagar-sc.c266
-rw-r--r--sound/soc/codecs/max98090.c12
-rw-r--r--sound/soc/codecs/max98357a.c3
-rw-r--r--sound/soc/codecs/mt6358.c131
-rw-r--r--sound/soc/codecs/nau8810.c25
-rw-r--r--sound/soc/codecs/pcm3168a.c7
-rw-r--r--sound/soc/codecs/rt5645.c17
-rw-r--r--sound/soc/codecs/rt5651.c47
-rw-r--r--sound/soc/codecs/rt5651.h1
-rw-r--r--sound/soc/codecs/rt5677-spi.c43
-rw-r--r--sound/soc/codecs/rt5682.c2
-rw-r--r--sound/soc/codecs/simple-amplifier.c3
-rw-r--r--sound/soc/codecs/sirf-audio-codec.c3
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c61
-rw-r--r--sound/soc/codecs/tlv320aic31xx.h12
-rw-r--r--sound/soc/codecs/tlv320aic32x4-clk.c483
-rw-r--r--sound/soc/codecs/tlv320aic32x4-i2c.c14
-rw-r--r--sound/soc/codecs/tlv320aic32x4-spi.c14
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c441
-rw-r--r--sound/soc/codecs/tlv320aic32x4.h13
-rw-r--r--sound/soc/codecs/wcd9335.c1
-rw-r--r--sound/soc/codecs/wm5102.c4
-rw-r--r--sound/soc/codecs/wm5110.c4
-rw-r--r--sound/soc/codecs/wm_adsp.c1032
-rw-r--r--sound/soc/codecs/wm_adsp.h50
-rw-r--r--sound/soc/codecs/wmfw.h30
-rw-r--r--sound/soc/fsl/Kconfig25
-rw-r--r--sound/soc/fsl/Makefile5
-rw-r--r--sound/soc/fsl/eukrea-tlv320.c30
-rw-r--r--sound/soc/fsl/fsl_audmix.c578
-rw-r--r--sound/soc/fsl/fsl_audmix.h102
-rw-r--r--sound/soc/fsl/fsl_dma.c26
-rw-r--r--sound/soc/fsl/fsl_dma.h5
-rw-r--r--sound/soc/fsl/fsl_esai.c23
-rw-r--r--sound/soc/fsl/fsl_micfil.c3
-rw-r--r--sound/soc/fsl/fsl_sai.c26
-rw-r--r--sound/soc/fsl/fsl_utils.c1
-rw-r--r--sound/soc/fsl/imx-audmix.c331
-rw-r--r--sound/soc/fsl/imx-audmux.c26
-rw-r--r--sound/soc/fsl/imx-es8328.c15
-rw-r--r--sound/soc/fsl/imx-mc13783.c22
-rw-r--r--sound/soc/fsl/imx-pcm-fiq.c21
-rw-r--r--sound/soc/fsl/imx-pcm.h6
-rw-r--r--sound/soc/fsl/imx-spdif.c13
-rw-r--r--sound/soc/fsl/imx-ssi.c57
-rw-r--r--sound/soc/fsl/imx-ssi.h6
-rw-r--r--sound/soc/fsl/mpc5200_dma.c14
-rw-r--r--sound/soc/fsl/mpc5200_psc_ac97.c16
-rw-r--r--sound/soc/fsl/mpc5200_psc_i2s.c14
-rw-r--r--sound/soc/fsl/mpc8610_hpcd.c18
-rw-r--r--sound/soc/fsl/mx27vis-aic32x4.c29
-rw-r--r--sound/soc/fsl/p1022_ds.c18
-rw-r--r--sound/soc/fsl/p1022_rdk.c32
-rw-r--r--sound/soc/fsl/pcm030-audio-fabric.c18
-rw-r--r--sound/soc/fsl/phycore-ac97.c16
-rw-r--r--sound/soc/fsl/wm1133-ev1.c21
-rw-r--r--sound/soc/generic/audio-graph-card.c429
-rw-r--r--sound/soc/generic/simple-card-utils.c440
-rw-r--r--sound/soc/generic/simple-card.c383
-rw-r--r--sound/soc/intel/Kconfig7
-rw-r--r--sound/soc/intel/boards/Kconfig117
-rw-r--r--sound/soc/intel/boards/Makefile2
-rw-r--r--sound/soc/intel/boards/bdw-rt5677.c4
-rw-r--r--sound/soc/intel/boards/broadwell.c4
-rw-r--r--sound/soc/intel/boards/bytcht_da7213.c9
-rw-r--r--sound/soc/intel/boards/bytcht_es8316.c51
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c16
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c42
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c9
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c9
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98357a.c2
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98927.c52
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_common.c22
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_common.h2
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_generic.c19
-rw-r--r--sound/soc/intel/boards/sof_rt5682.c563
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-byt-match.c6
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cht-match.c6
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cnl-match.c19
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-glk-match.c9
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-icl-match.c6
-rw-r--r--sound/soc/intel/common/sst-firmware.c8
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.c8
-rw-r--r--sound/soc/jz4740/Kconfig2
-rw-r--r--sound/soc/mediatek/Kconfig27
-rw-r--r--sound/soc/mediatek/common/mtk-afe-fe-dai.c51
-rw-r--r--sound/soc/mediatek/common/mtk-btcvsd.c4
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-afe-pcm.c13
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-afe-pcm.c16
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-afe-pcm.c16
-rw-r--r--sound/soc/mediatek/mt8183/Makefile2
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-afe-pcm.c18
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c471
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c423
-rw-r--r--sound/soc/meson/axg-fifo.c34
-rw-r--r--sound/soc/meson/axg-fifo.h2
-rw-r--r--sound/soc/meson/axg-frddr.c143
-rw-r--r--sound/soc/meson/axg-tdm-formatter.c6
-rw-r--r--sound/soc/meson/axg-tdm-formatter.h11
-rw-r--r--sound/soc/meson/axg-tdmin.c16
-rw-r--r--sound/soc/meson/axg-tdmout.c29
-rw-r--r--sound/soc/meson/axg-toddr.c53
-rw-r--r--sound/soc/mxs/mxs-saif.c1
-rw-r--r--sound/soc/qcom/Kconfig2
-rw-r--r--sound/soc/rockchip/rockchip_pdm.c205
-rw-r--r--sound/soc/rockchip/rockchip_pdm.h12
-rw-r--r--sound/soc/samsung/arndale_rt5631.c17
-rw-r--r--sound/soc/samsung/bells.c15
-rw-r--r--sound/soc/samsung/dma.h8
-rw-r--r--sound/soc/samsung/dmaengine.c22
-rw-r--r--sound/soc/samsung/h1940_uda1380.c22
-rw-r--r--sound/soc/samsung/i2s-regs.h8
-rw-r--r--sound/soc/samsung/i2s.c27
-rw-r--r--sound/soc/samsung/i2s.h8
-rw-r--r--sound/soc/samsung/idma.c20
-rw-r--r--sound/soc/samsung/idma.h9
-rw-r--r--sound/soc/samsung/jive_wm8750.c19
-rw-r--r--sound/soc/samsung/littlemill.c15
-rw-r--r--sound/soc/samsung/lowland.c15
-rw-r--r--sound/soc/samsung/neo1973_wm8753.c25
-rw-r--r--sound/soc/samsung/odroid.c10
-rw-r--r--sound/soc/samsung/pcm.c19
-rw-r--r--sound/soc/samsung/pcm.h8
-rw-r--r--sound/soc/samsung/regs-i2s-v2.h10
-rw-r--r--sound/soc/samsung/regs-iis.h7
-rw-r--r--sound/soc/samsung/rx1950_uda1380.c30
-rw-r--r--sound/soc/samsung/s3c-i2s-v2.c26
-rw-r--r--sound/soc/samsung/s3c-i2s-v2.h11
-rw-r--r--sound/soc/samsung/s3c2412-i2s.c28
-rw-r--r--sound/soc/samsung/s3c2412-i2s.h11
-rw-r--r--sound/soc/samsung/s3c24xx-i2s.c25
-rw-r--r--sound/soc/samsung/s3c24xx-i2s.h6
-rw-r--r--sound/soc/samsung/s3c24xx_simtec.c11
-rw-r--r--sound/soc/samsung/s3c24xx_simtec.h10
-rw-r--r--sound/soc/samsung/s3c24xx_simtec_hermes.c11
-rw-r--r--sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c11
-rw-r--r--sound/soc/samsung/s3c24xx_uda134x.c20
-rw-r--r--sound/soc/samsung/smartq_wm8987.c21
-rw-r--r--sound/soc/samsung/smdk_spdif.c16
-rw-r--r--sound/soc/samsung/smdk_wm8580.c15
-rw-r--r--sound/soc/samsung/smdk_wm8994.c9
-rw-r--r--sound/soc/samsung/smdk_wm8994pcm.c16
-rw-r--r--sound/soc/samsung/snow.c15
-rw-r--r--sound/soc/samsung/spdif.c17
-rw-r--r--sound/soc/samsung/spdif.h8
-rw-r--r--sound/soc/samsung/speyside.c15
-rw-r--r--sound/soc/samsung/tm2_wm5110.c17
-rw-r--r--sound/soc/samsung/tobermory.c15
-rw-r--r--sound/soc/sh/rcar/core.c18
-rw-r--r--sound/soc/sh/rcar/rsnd.h1
-rw-r--r--sound/soc/sh/rcar/ssi.c2
-rw-r--r--sound/soc/soc-core.c18
-rw-r--r--sound/soc/soc-dapm.c5
-rw-r--r--sound/soc/soc-pcm.c14
-rw-r--r--sound/soc/soc-topology.c330
-rw-r--r--sound/soc/sof/Kconfig156
-rw-r--r--sound/soc/sof/Makefile18
-rw-r--r--sound/soc/sof/control.c552
-rw-r--r--sound/soc/sof/core.c508
-rw-r--r--sound/soc/sof/debug.c232
-rw-r--r--sound/soc/sof/intel/Kconfig230
-rw-r--r--sound/soc/sof/intel/Makefile19
-rw-r--r--sound/soc/sof/intel/apl.c113
-rw-r--r--sound/soc/sof/intel/bdw.c713
-rw-r--r--sound/soc/sof/intel/byt.c874
-rw-r--r--sound/soc/sof/intel/cnl.c268
-rw-r--r--sound/soc/sof/intel/hda-bus.c111
-rw-r--r--sound/soc/sof/intel/hda-codec.c171
-rw-r--r--sound/soc/sof/intel/hda-ctrl.c181
-rw-r--r--sound/soc/sof/intel/hda-dai.c356
-rw-r--r--sound/soc/sof/intel/hda-dsp.c471
-rw-r--r--sound/soc/sof/intel/hda-ipc.c455
-rw-r--r--sound/soc/sof/intel/hda-loader.c382
-rw-r--r--sound/soc/sof/intel/hda-pcm.c239
-rw-r--r--sound/soc/sof/intel/hda-stream.c701
-rw-r--r--sound/soc/sof/intel/hda-trace.c94
-rw-r--r--sound/soc/sof/intel/hda.c689
-rw-r--r--sound/soc/sof/intel/hda.h583
-rw-r--r--sound/soc/sof/intel/intel-ipc.c92
-rw-r--r--sound/soc/sof/intel/shim.h185
-rw-r--r--sound/soc/sof/ipc.c846
-rw-r--r--sound/soc/sof/loader.c400
-rw-r--r--sound/soc/sof/nocodec.c109
-rw-r--r--sound/soc/sof/ops.c163
-rw-r--r--sound/soc/sof/ops.h411
-rw-r--r--sound/soc/sof/pcm.c767
-rw-r--r--sound/soc/sof/pm.c388
-rw-r--r--sound/soc/sof/sof-acpi-dev.c312
-rw-r--r--sound/soc/sof/sof-pci-dev.c373
-rw-r--r--sound/soc/sof/sof-priv.h635
-rw-r--r--sound/soc/sof/topology.c3179
-rw-r--r--sound/soc/sof/trace.c297
-rw-r--r--sound/soc/sof/utils.c112
-rw-r--r--sound/soc/sof/xtensa/Kconfig2
-rw-r--r--sound/soc/sof/xtensa/Makefile5
-rw-r--r--sound/soc/sof/xtensa/core.c138
-rw-r--r--sound/soc/sprd/Kconfig9
-rw-r--r--sound/soc/sprd/Makefile6
-rw-r--r--sound/soc/sprd/sprd-mcdt.c1011
-rw-r--r--sound/soc/sprd/sprd-mcdt.h107
-rw-r--r--sound/soc/sprd/sprd-pcm-compress.c674
-rw-r--r--sound/soc/sprd/sprd-pcm-dma.c9
-rw-r--r--sound/soc/sprd/sprd-pcm-dma.h43
-rw-r--r--sound/soc/stm/stm32_adfsdm.c2
-rw-r--r--sound/soc/stm/stm32_i2s.c33
-rw-r--r--sound/soc/stm/stm32_sai.c80
-rw-r--r--sound/soc/stm/stm32_sai.h2
-rw-r--r--sound/soc/stm/stm32_sai_sub.c88
-rw-r--r--sound/soc/stm/stm32_spdifrx.c47
-rw-r--r--sound/soc/ti/Kconfig4
-rw-r--r--sound/soc/ti/ams-delta.c2
-rw-r--r--sound/soc/ti/davinci-mcasp.c2
-rw-r--r--sound/soc/ti/edma-pcm.c5
-rw-r--r--sound/soc/ti/sdma-pcm.c9
-rw-r--r--sound/soc/txx9/txx9aclc-ac97.c1
-rw-r--r--sound/synth/emux/emux_hwdep.c5
-rw-r--r--sound/synth/emux/soundfont.c2
-rw-r--r--sound/usb/Kconfig4
-rw-r--r--sound/usb/Makefile2
-rw-r--r--sound/usb/card.c14
-rw-r--r--sound/usb/card.h3
-rw-r--r--sound/usb/line6/driver.c60
-rw-r--r--sound/usb/line6/podhd.c21
-rw-r--r--sound/usb/line6/toneport.c48
-rw-r--r--sound/usb/media.c327
-rw-r--r--sound/usb/media.h74
-rw-r--r--sound/usb/mixer.c6
-rw-r--r--sound/usb/mixer.h3
-rw-r--r--sound/usb/pcm.c29
-rw-r--r--sound/usb/quirks-table.h85
-rw-r--r--sound/usb/stream.c2
-rw-r--r--sound/usb/usbaudio.h6
-rw-r--r--sound/usb/usx2y/usX2Yhwdep.c3
-rw-r--r--sound/usb/usx2y/usb_stream.c20
-rw-r--r--sound/usb/usx2y/usbusx2y.c7
-rw-r--r--sound/usb/usx2y/usx2yhwdeppcm.c6
-rw-r--r--tools/arch/arc/include/uapi/asm/unistd.h51
-rw-r--r--tools/arch/arm64/include/asm/barrier.h10
-rw-r--r--tools/arch/csky/include/uapi/asm/perf_regs.h51
-rw-r--r--tools/arch/hexagon/include/uapi/asm/unistd.h40
-rw-r--r--tools/arch/riscv/include/uapi/asm/unistd.h42
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm.h3
-rw-r--r--tools/arch/x86/include/asm/barrier.h7
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h1
-rw-r--r--tools/arch/x86/include/uapi/asm/perf_regs.h23
-rw-r--r--tools/arch/x86/include/uapi/asm/vmx.h1
-rw-r--r--tools/arch/x86/lib/memcpy_64.S3
-rw-r--r--tools/bpf/bpftool/.gitignore2
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-btf.rst222
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst10
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-feature.rst5
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst5
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-net.rst5
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-perf.rst5
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst18
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool.rst5
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool75
-rw-r--r--tools/bpf/bpftool/btf.c586
-rw-r--r--tools/bpf/bpftool/btf_dumper.c59
-rw-r--r--tools/bpf/bpftool/cgroup.c10
-rw-r--r--tools/bpf/bpftool/main.c3
-rw-r--r--tools/bpf/bpftool/main.h2
-rw-r--r--tools/bpf/bpftool/map.c67
-rw-r--r--tools/bpf/bpftool/net.c54
-rw-r--r--tools/bpf/bpftool/prog.c8
-rw-r--r--tools/bpf/bpftool/xlated_dumper.c3
-rw-r--r--tools/build/Makefile.feature2
-rw-r--r--tools/build/feature/Makefile6
-rw-r--r--tools/build/feature/test-all.c5
-rw-r--r--tools/build/feature/test-libzstd.c12
-rw-r--r--tools/include/linux/coresight-pmu.h2
-rw-r--r--tools/include/linux/filter.h21
-rw-r--r--tools/include/nolibc/nolibc.h194
-rw-r--r--tools/include/uapi/linux/bpf.h364
-rw-r--r--tools/include/uapi/linux/btf.h32
-rw-r--r--tools/lib/bpf/.gitignore2
-rw-r--r--tools/lib/bpf/Makefile21
-rw-r--r--tools/lib/bpf/bpf.c38
-rw-r--r--tools/lib/bpf/bpf.h9
-rw-r--r--tools/lib/bpf/btf.c126
-rw-r--r--tools/lib/bpf/btf.h3
-rw-r--r--tools/lib/bpf/libbpf.c782
-rw-r--r--tools/lib/bpf/libbpf.h6
-rw-r--r--tools/lib/bpf/libbpf.map7
-rw-r--r--tools/lib/bpf/libbpf.pc.template12
-rw-r--r--tools/lib/bpf/libbpf_internal.h27
-rw-r--r--tools/lib/bpf/libbpf_probes.c85
-rw-r--r--tools/lib/bpf/libbpf_util.h30
-rw-r--r--tools/lib/bpf/xsk.c193
-rw-r--r--tools/lib/bpf/xsk.h22
-rw-r--r--tools/lib/traceevent/Documentation/Makefile207
-rw-r--r--tools/lib/traceevent/Documentation/asciidoc.conf120
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-commands.txt153
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-cpus.txt77
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-endian_read.txt78
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-event_find.txt103
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-event_get.txt99
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-event_list.txt122
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-field_find.txt118
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-field_get_val.txt122
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-field_print.txt126
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-field_read.txt81
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-fields.txt105
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-file_endian.txt91
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-filter.txt209
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-func_apis.txt183
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-func_find.txt88
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-handle.txt101
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-header_page.txt102
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-host_endian.txt104
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-long_size.txt78
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-page_size.txt82
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-parse_event.txt90
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-parse_head.txt82
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-record_parse.txt137
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-reg_event_handler.txt156
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-reg_print_func.txt155
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-set_flag.txt104
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-strerror.txt85
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-tseq.txt158
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent.txt203
-rw-r--r--tools/lib/traceevent/Documentation/manpage-1.72.xsl14
-rw-r--r--tools/lib/traceevent/Documentation/manpage-base.xsl35
-rw-r--r--tools/lib/traceevent/Documentation/manpage-bold-literal.xsl17
-rw-r--r--tools/lib/traceevent/Documentation/manpage-normal.xsl13
-rw-r--r--tools/lib/traceevent/Documentation/manpage-suppress-sp.xsl21
-rw-r--r--tools/lib/traceevent/Makefile46
-rw-r--r--tools/lib/traceevent/event-parse-api.c278
-rw-r--r--tools/lib/traceevent/event-parse-local.h6
-rw-r--r--tools/lib/traceevent/event-parse.c909
-rw-r--r--tools/lib/traceevent/event-parse.h154
-rw-r--r--tools/lib/traceevent/event-plugin.c32
-rw-r--r--tools/lib/traceevent/kbuffer-parse.c49
-rw-r--r--tools/lib/traceevent/kbuffer.h13
-rw-r--r--tools/lib/traceevent/libtraceevent.pc.template4
-rw-r--r--tools/lib/traceevent/parse-filter.c216
-rw-r--r--tools/lib/traceevent/parse-utils.c2
-rw-r--r--tools/lib/traceevent/plugin_cfg80211.c8
-rw-r--r--tools/lib/traceevent/plugin_function.c14
-rw-r--r--tools/lib/traceevent/plugin_hrtimer.c12
-rw-r--r--tools/lib/traceevent/plugin_jbd2.c12
-rw-r--r--tools/lib/traceevent/plugin_kmem.c32
-rw-r--r--tools/lib/traceevent/plugin_kvm.c48
-rw-r--r--tools/lib/traceevent/plugin_mac80211.c8
-rw-r--r--tools/lib/traceevent/plugin_sched_switch.c18
-rw-r--r--tools/lib/traceevent/plugin_scsi.c8
-rw-r--r--tools/lib/traceevent/plugin_xen.c8
-rw-r--r--tools/memory-model/Documentation/explanation.txt289
-rw-r--r--tools/memory-model/README33
-rw-r--r--tools/memory-model/linux-kernel.bell35
-rw-r--r--tools/memory-model/linux-kernel.cat39
-rw-r--r--tools/memory-model/linux-kernel.def6
-rw-r--r--tools/memory-model/lock.cat3
-rw-r--r--tools/objtool/Documentation/stack-validation.txt4
-rw-r--r--tools/objtool/Makefile3
-rw-r--r--tools/objtool/arch.h8
-rw-r--r--tools/objtool/arch/x86/decode.c21
-rw-r--r--tools/objtool/builtin-check.c4
-rw-r--r--tools/objtool/builtin.h2
-rw-r--r--tools/objtool/check.c411
-rw-r--r--tools/objtool/check.h4
-rw-r--r--tools/objtool/elf.c15
-rw-r--r--tools/objtool/elf.h3
-rw-r--r--tools/objtool/special.c18
-rw-r--r--tools/objtool/special.h1
-rw-r--r--tools/objtool/warn.h8
-rw-r--r--tools/pci/Makefile10
-rw-r--r--tools/pci/pcitest.c8
-rw-r--r--tools/perf/Documentation/perf-list.txt12
-rw-r--r--tools/perf/Documentation/perf-record.txt27
-rw-r--r--tools/perf/Documentation/perf-stat.txt4
-rw-r--r--tools/perf/Documentation/perf.data-file-format.txt24
-rw-r--r--tools/perf/Documentation/perf.txt2
-rw-r--r--tools/perf/Makefile.config28
-rw-r--r--tools/perf/Makefile.perf3
-rw-r--r--tools/perf/arch/csky/Build1
-rw-r--r--tools/perf/arch/csky/Makefile3
-rw-r--r--tools/perf/arch/csky/include/perf_regs.h100
-rw-r--r--tools/perf/arch/csky/util/Build2
-rw-r--r--tools/perf/arch/csky/util/dwarf-regs.c49
-rw-r--r--tools/perf/arch/csky/util/unwind-libdw.c77
-rw-r--r--tools/perf/arch/x86/include/perf_regs.h26
-rw-r--r--tools/perf/arch/x86/util/perf_regs.c44
-rw-r--r--tools/perf/bench/numa.c4
-rw-r--r--tools/perf/builtin-annotate.c4
-rw-r--r--tools/perf/builtin-inject.c4
-rw-r--r--tools/perf/builtin-kmem.c2
-rw-r--r--tools/perf/builtin-list.c6
-rw-r--r--tools/perf/builtin-record.c292
-rw-r--r--tools/perf/builtin-report.c16
-rw-r--r--tools/perf/builtin-stat.c49
-rw-r--r--tools/perf/builtin-version.c2
-rw-r--r--tools/perf/examples/bpf/augmented_raw_syscalls.c196
-rw-r--r--tools/perf/perf.h2
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/core-imp-def.json179
-rw-r--r--tools/perf/pmu-events/arch/arm64/mapfile.csv5
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/extended.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/frontend.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json260
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/cache.json1630
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/floating-point.json51
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/frontend.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/memory.json1640
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/pipeline.json36
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/cache.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json278
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/cache.json161
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json16
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/memory.json148
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json50
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json304
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/cache.json980
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/memory.json260
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/pipeline.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/cache.json74
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/cache.json175
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/floating-point.json33
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json234
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/memory.json172
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/pipeline.json33
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/cache.json173
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json252
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/memory.json172
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/pipeline.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/cache.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json250
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json256
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/pipeline.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/cache.json6
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json150
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/pipeline.json12
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/cache.json666
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/memory.json268
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json15
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/cache.json680
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json126
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/frontend.json268
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/memory.json68
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/other.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json1328
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json144
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json108
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/cache.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/pipeline.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/cache.json2157
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/frontend.json14
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/memory.json1121
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/pipeline.json39
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json274
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/cache.json786
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/floating-point.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/frontend.json234
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/memory.json751
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/pipeline.json173
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json304
-rw-r--r--tools/perf/pmu-events/jevents.c2
-rwxr-xr-xtools/perf/scripts/python/exported-sql-viewer.py340
-rw-r--r--tools/perf/tests/dso-data.c4
-rw-r--r--tools/perf/tests/make2
-rwxr-xr-xtools/perf/tests/shell/record+zstd_comp_decomp.sh34
-rw-r--r--tools/perf/trace/beauty/renameat.c1
-rw-r--r--tools/perf/trace/strace/groups/string65
-rw-r--r--tools/perf/util/Build2
-rw-r--r--tools/perf/util/annotate.c10
-rw-r--r--tools/perf/util/cloexec.c1
-rw-r--r--tools/perf/util/compress.h53
-rw-r--r--tools/perf/util/cs-etm.c14
-rw-r--r--tools/perf/util/data-convert-bt.c4
-rw-r--r--tools/perf/util/env.c2
-rw-r--r--tools/perf/util/env.h11
-rw-r--r--tools/perf/util/event.c1
-rw-r--r--tools/perf/util/event.h8
-rw-r--r--tools/perf/util/evlist.c8
-rw-r--r--tools/perf/util/evlist.h3
-rw-r--r--tools/perf/util/evsel.c13
-rw-r--r--tools/perf/util/evsel.h9
-rw-r--r--tools/perf/util/header.c53
-rw-r--r--tools/perf/util/header.h1
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c31
-rw-r--r--tools/perf/util/machine.c3
-rw-r--r--tools/perf/util/mmap.c106
-rw-r--r--tools/perf/util/mmap.h17
-rw-r--r--tools/perf/util/parse-events.c85
-rw-r--r--tools/perf/util/parse-events.h6
-rw-r--r--tools/perf/util/parse-events.l12
-rw-r--r--tools/perf/util/parse-events.y12
-rw-r--r--tools/perf/util/parse-regs-options.c33
-rw-r--r--tools/perf/util/parse-regs-options.h3
-rw-r--r--tools/perf/util/perf_regs.c10
-rw-r--r--tools/perf/util/perf_regs.h3
-rw-r--r--tools/perf/util/python.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c2
-rw-r--r--tools/perf/util/session.c141
-rw-r--r--tools/perf/util/session.h14
-rw-r--r--tools/perf/util/stat-display.c125
-rw-r--r--tools/perf/util/stat.c8
-rw-r--r--tools/perf/util/thread.c3
-rw-r--r--tools/perf/util/tool.h2
-rw-r--r--tools/perf/util/trace-event-parse.c2
-rw-r--r--tools/perf/util/trace-event-read.c2
-rw-r--r--tools/perf/util/trace-event.c4
-rw-r--r--tools/perf/util/unwind-libunwind-local.c6
-rw-r--r--tools/perf/util/unwind-libunwind.c10
-rw-r--r--tools/perf/util/zstd.c111
-rw-r--r--tools/power/acpi/os_specific/service_layers/oslinuxtbl.c48
-rw-r--r--tools/power/acpi/tools/acpidump/apdump.c8
-rw-r--r--tools/power/acpi/tools/acpidump/apfiles.c8
-rw-r--r--tools/power/x86/turbostat/Makefile2
-rw-r--r--tools/power/x86/x86_energy_perf_policy/Makefile2
-rwxr-xr-xtools/testing/ktest/ktest.pl122
-rw-r--r--tools/testing/ktest/sample.conf24
-rw-r--r--tools/testing/nvdimm/Kbuild3
-rw-r--r--tools/testing/nvdimm/dax_pmem_compat_test.c8
-rw-r--r--tools/testing/nvdimm/dax_pmem_core_test.c8
-rw-r--r--tools/testing/nvdimm/dax_pmem_test.c8
-rw-r--r--tools/testing/nvdimm/test/nfit.c3
-rw-r--r--tools/testing/nvdimm/watermark.h3
-rw-r--r--tools/testing/selftests/.gitignore1
-rw-r--r--tools/testing/selftests/Makefile98
-rw-r--r--tools/testing/selftests/bpf/.gitignore3
-rw-r--r--tools/testing/selftests/bpf/Makefile41
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h58
-rw-r--r--tools/testing/selftests/bpf/config11
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.c4
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.h24
-rw-r--r--tools/testing/selftests/bpf/map_tests/sk_storage_map.c629
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c49
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector.c296
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c48
-rw-r--r--tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/global_data.c157
-rw-r--r--tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c42
-rw-r--r--tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c80
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skb_ctx.c89
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c16
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tp_attach_query.c3
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_flow.c79
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_data.c106
-rw-r--r--tools/testing/selftests/bpf/progs/test_jhash.h70
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_ctx.c21
-rw-r--r--tools/testing/selftests/bpf/progs/test_sock_fields_kern.c53
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_prog.c70
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_edt.c109
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_tunnel.c536
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c129
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_verif_scale1.c30
-rw-r--r--tools/testing/selftests/bpf/progs/test_verif_scale2.c30
-rw-r--r--tools/testing/selftests/bpf/progs/test_verif_scale3.c30
-rw-r--r--tools/testing/selftests/bpf/test_btf.c799
-rw-r--r--tools/testing/selftests/bpf/test_btf.h69
-rw-r--r--tools/testing/selftests/bpf/test_libbpf_open.c2
-rwxr-xr-xtools/testing/selftests/bpf/test_lwt_ip_encap.sh134
-rw-r--r--tools/testing/selftests/bpf/test_maps.c18
-rw-r--r--tools/testing/selftests/bpf/test_maps.h17
-rwxr-xr-xtools/testing/selftests/bpf/test_offload.py227
-rw-r--r--tools/testing/selftests/bpf/test_progs.c6
-rw-r--r--tools/testing/selftests/bpf/test_progs.h1
-rw-r--r--tools/testing/selftests/bpf/test_section_names.c5
-rw-r--r--tools/testing/selftests/bpf/test_sock_fields.c115
-rw-r--r--tools/testing/selftests/bpf/test_sysctl.c1567
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_edt.sh99
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_tunnel.sh290
-rwxr-xr-xtools/testing/selftests/bpf/test_tcp_check_syncookie.sh81
-rw-r--r--tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c212
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c215
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c4
-rw-r--r--tools/testing/selftests/bpf/urandom_read.c15
-rw-r--r--tools/testing/selftests/bpf/verifier/array_access.c159
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c25
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx_skb.c1
-rw-r--r--tools/testing/selftests/bpf/verifier/direct_packet_access.c22
-rw-r--r--tools/testing/selftests/bpf/verifier/direct_value_access.c347
-rw-r--r--tools/testing/selftests/bpf/verifier/int_ptr.c160
-rw-r--r--tools/testing/selftests/bpf/verifier/jit.c19
-rw-r--r--tools/testing/selftests/bpf/verifier/jump.c195
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_dw.c9
-rw-r--r--tools/testing/selftests/bpf/verifier/raw_tp_writable.c34
-rw-r--r--tools/testing/selftests/bpf/verifier/ref_tracking.c126
-rw-r--r--tools/testing/selftests/bpf/verifier/scale.c18
-rw-r--r--tools/testing/selftests/bpf/verifier/sock.c116
-rw-r--r--tools/testing/selftests/bpf/verifier/unpriv.c8
-rw-r--r--tools/testing/selftests/bpf/verifier/var_off.c186
-rw-r--r--tools/testing/selftests/breakpoints/breakpoint_test.c15
-rw-r--r--tools/testing/selftests/breakpoints/breakpoint_test_arm64.c3
-rw-r--r--tools/testing/selftests/breakpoints/step_after_suspend_test.c8
-rw-r--r--tools/testing/selftests/capabilities/test_execve.c6
-rw-r--r--tools/testing/selftests/cgroup/.gitignore1
-rw-r--r--tools/testing/selftests/cgroup/Makefile2
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.c58
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.h5
-rw-r--r--tools/testing/selftests/cgroup/test_freezer.c851
-rw-r--r--tools/testing/selftests/cgroup/test_memcontrol.c38
-rw-r--r--tools/testing/selftests/drivers/.gitignore1
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh311
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh98
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh122
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh26
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_resources.sh3
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh5
-rwxr-xr-xtools/testing/selftests/efivarfs/efivarfs.sh28
-rw-r--r--tools/testing/selftests/exec/.gitignore3
-rw-r--r--tools/testing/selftests/exec/Makefile4
-rw-r--r--tools/testing/selftests/exec/recursion-depth.c67
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc19
-rw-r--r--tools/testing/selftests/ftrace/test.d/functions12
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc85
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/uprobe_syntax_errors.tc23
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc28
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi.c1
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c1
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c1
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c1
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_timeout.c1
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c1
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_wouldblock.c1
-rw-r--r--tools/testing/selftests/gpio/gpio-mockup-chardev.c1
-rw-r--r--tools/testing/selftests/ima/config4
-rwxr-xr-xtools/testing/selftests/ima/test_kexec_load.sh54
-rw-r--r--tools/testing/selftests/ipc/msgque.c11
-rw-r--r--tools/testing/selftests/kexec/Makefile (renamed from tools/testing/selftests/ima/Makefile)5
-rw-r--r--tools/testing/selftests/kexec/config3
-rwxr-xr-xtools/testing/selftests/kexec/kexec_common_lib.sh220
-rwxr-xr-xtools/testing/selftests/kexec/test_kexec_file_load.sh208
-rwxr-xr-xtools/testing/selftests/kexec/test_kexec_load.sh47
-rw-r--r--tools/testing/selftests/kselftest.h17
-rwxr-xr-xtools/testing/selftests/kselftest/prefix.pl23
-rw-r--r--tools/testing/selftests/kselftest/runner.sh86
-rw-r--r--tools/testing/selftests/kselftest_harness.h2
-rw-r--r--tools/testing/selftests/kselftest_module.h48
-rwxr-xr-xtools/testing/selftests/kselftest_module.sh84
-rw-r--r--tools/testing/selftests/kvm/.gitignore7
-rw-r--r--tools/testing/selftests/kvm/Makefile2
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c13
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h4
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c32
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c9
-rw-r--r--tools/testing/selftests/kvm/x86_64/kvm_create_max_vcpus.c70
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c280
-rw-r--r--tools/testing/selftests/lib.mk96
-rw-r--r--tools/testing/selftests/lib/Makefile2
-rwxr-xr-xtools/testing/selftests/lib/bitmap.sh18
-rw-r--r--tools/testing/selftests/lib/config1
-rwxr-xr-xtools/testing/selftests/lib/prime_numbers.sh17
-rwxr-xr-xtools/testing/selftests/lib/printf.sh19
-rwxr-xr-xtools/testing/selftests/lib/strscpy.sh3
-rw-r--r--tools/testing/selftests/livepatch/Makefile3
-rwxr-xr-xtools/testing/selftests/media_tests/media_dev_allocator.sh85
-rw-r--r--tools/testing/selftests/membarrier/membarrier_test.c1
-rw-r--r--tools/testing/selftests/net/config2
-rwxr-xr-xtools/testing/selftests/net/fib_rule_tests.sh10
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh70
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_igmp.sh152
-rw-r--r--tools/testing/selftests/net/forwarding/devlink_lib.sh110
-rwxr-xr-xtools/testing/selftests/net/forwarding/loopback.sh94
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_multicast.sh107
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_flower.sh59
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_vlan_modify.sh164
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh213
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh11
-rw-r--r--tools/testing/selftests/netfilter/Makefile3
-rwxr-xr-xtools/testing/selftests/netfilter/bridge_brouter.sh146
-rwxr-xr-xtools/testing/selftests/netfilter/nft_nat.sh146
-rw-r--r--tools/testing/selftests/pidfd/.gitignore1
-rw-r--r--tools/testing/selftests/pidfd/pidfd_test.c1
-rw-r--r--tools/testing/selftests/powerpc/copyloops/asm/export.h1
-rw-r--r--tools/testing/selftests/powerpc/copyloops/asm/kasan.h0
-rw-r--r--tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h1
-rw-r--r--tools/testing/selftests/powerpc/harness.c6
-rw-r--r--tools/testing/selftests/powerpc/include/reg.h2
-rw-r--r--tools/testing/selftests/powerpc/signal/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/signal/Makefile3
-rw-r--r--tools/testing/selftests/powerpc/signal/sigfuz.c325
l---------tools/testing/selftests/powerpc/vphn/vphn.c2
l---------tools/testing/selftests/powerpc/vphn/vphn.h2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configNR_CPUS.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/config_override.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configcheck.sh19
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configinit.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/cpus2use.sh17
-rw-r--r--tools/testing/selftests/rcutorture/bin/functions.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/jitter.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-build.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-find-errors.sh5
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/mkinitrd.sh15
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-build.sh17
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-console.sh17
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh17
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh17
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh17
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h4
-rw-r--r--tools/testing/selftests/rseq/Makefile8
-rw-r--r--tools/testing/selftests/rseq/rseq-arm.h132
-rw-r--r--tools/testing/selftests/rseq/rseq-arm64.h74
-rw-r--r--tools/testing/selftests/rseq/rseq-mips.h115
-rw-r--r--tools/testing/selftests/rseq/rseq-ppc.h90
-rw-r--r--tools/testing/selftests/rseq/rseq-s390.h78
-rw-r--r--tools/testing/selftests/rseq/rseq-x86.h264
-rw-r--r--tools/testing/selftests/rseq/rseq.c55
-rw-r--r--tools/testing/selftests/rseq/rseq.h2
-rwxr-xr-xtools/testing/selftests/rseq/run_param_test.sh7
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c77
-rw-r--r--tools/testing/selftests/sigaltstack/sas.c1
-rw-r--r--tools/testing/selftests/sync/sync_test.c1
-rwxr-xr-xtools/testing/selftests/sysctl/sysctl.sh161
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json903
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/tests.json20
-rw-r--r--tools/testing/selftests/timers/skew_consistency.c1
-rw-r--r--tools/testing/selftests/x86/mpx-dig.c2
-rw-r--r--tools/virtio/ringtest/ptr_ring.c1
-rw-r--r--virt/kvm/Kconfig3
-rw-r--r--virt/kvm/arm/arch_timer.c17
-rw-r--r--virt/kvm/arm/arm.c54
-rw-r--r--virt/kvm/arm/mmu.c8
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c3
-rw-r--r--virt/kvm/arm/vgic/vgic.c21
-rw-r--r--virt/kvm/arm/vgic/vgic.h1
-rw-r--r--virt/kvm/kvm_main.c113
11722 files changed, 530463 insertions, 324379 deletions
diff --git a/.clang-format b/.clang-format
index f3923a1f9858..2ffd69afc1a8 100644
--- a/.clang-format
+++ b/.clang-format
@@ -387,14 +387,14 @@ ForEachMacros:
- 'rhl_for_each_entry_rcu'
- 'rhl_for_each_rcu'
- 'rht_for_each'
- - 'rht_for_each_continue'
+ - 'rht_for_each_from'
- 'rht_for_each_entry'
- - 'rht_for_each_entry_continue'
+ - 'rht_for_each_entry_from'
- 'rht_for_each_entry_rcu'
- - 'rht_for_each_entry_rcu_continue'
+ - 'rht_for_each_entry_rcu_from'
- 'rht_for_each_entry_safe'
- 'rht_for_each_rcu'
- - 'rht_for_each_rcu_continue'
+ - 'rht_for_each_rcu_from'
- '__rq_for_each_bio'
- 'rq_for_each_bvec'
- 'rq_for_each_segment'
diff --git a/.get_maintainer.ignore b/.get_maintainer.ignore
index cca6d870f7a5..a64d21913745 100644
--- a/.get_maintainer.ignore
+++ b/.get_maintainer.ignore
@@ -1 +1,2 @@
Christoph Hellwig <hch@lst.de>
+Marc Gonzalez <marc.w.gonzalez@free.fr>
diff --git a/.gitignore b/.gitignore
index a20ac26aa2f5..7587ef56b92d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -58,6 +58,7 @@ modules.builtin
/vmlinuz
/System.map
/Module.markers
+/modules.builtin.modinfo
#
# RPM spec file (make rpm-pkg)
@@ -80,20 +81,22 @@ modules.builtin
/tar-install/
#
-# git files that we don't want to ignore even if they are dot-files
+# We don't want to ignore the following even if they are dot-files
#
+!.clang-format
+!.cocciconfig
+!.get_maintainer.ignore
+!.gitattributes
!.gitignore
!.mailmap
-!.cocciconfig
-!.clang-format
#
# Generated include files
#
-include/config
-include/generated
-include/ksym
-arch/*/include/generated
+/include/config/
+/include/generated/
+/include/ksym/
+/arch/*/include/generated/
# stgit generated dirs
patches-*
@@ -129,7 +132,12 @@ signing_key.x509
x509.genkey
# Kconfig presets
-all.config
+/all.config
+/alldef.config
+/allmod.config
+/allno.config
+/allrandom.config
+/allyes.config
# Kdevelop4
*.kdev4
diff --git a/.mailmap b/.mailmap
index ae2bcad06f4b..07a777f9d687 100644
--- a/.mailmap
+++ b/.mailmap
@@ -16,6 +16,11 @@ Alan Cox <alan@lxorguk.ukuu.org.uk>
Alan Cox <root@hraefn.swansea.linux.org.uk>
Aleksey Gorelov <aleksey_gorelov@phoenix.com>
Aleksandar Markovic <aleksandar.markovic@mips.com> <aleksandar.markovic@imgtec.com>
+Alex Shi <alex.shi@linux.alibaba.com> <alex.shi@intel.com>
+Alex Shi <alex.shi@linux.alibaba.com> <alex.shi@linaro.org>
+Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com>
+Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
+Alexei Starovoitov <ast@kernel.org> <ast@fb.com>
Al Viro <viro@ftp.linux.org.uk>
Al Viro <viro@zenIV.linux.org.uk>
Andi Shyti <andi@etezian.org> <andi.shyti@samsung.com>
@@ -46,6 +51,12 @@ Christoph Hellwig <hch@lst.de>
Christophe Ricard <christophe.ricard@gmail.com>
Corey Minyard <minyard@acm.org>
Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Daniel Borkmann <daniel@iogearbox.net> <dborkman@redhat.com>
+Daniel Borkmann <daniel@iogearbox.net> <dborkmann@redhat.com>
+Daniel Borkmann <daniel@iogearbox.net> <danborkmann@iogearbox.net>
+Daniel Borkmann <daniel@iogearbox.net> <daniel.borkmann@tik.ee.ethz.ch>
+Daniel Borkmann <daniel@iogearbox.net> <danborkmann@googlemail.com>
+Daniel Borkmann <daniel@iogearbox.net> <dxchgb@gmail.com>
David Brownell <david-b@pacbell.net>
David Woodhouse <dwmw2@shinybook.infradead.org>
Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@mips.com>
@@ -117,6 +128,8 @@ Leonid I Ananiev <leonid.i.ananiev@intel.com>
Linas Vepstas <linas@austin.ibm.com>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
+Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
+Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
Mark Brown <broonie@sirena.org.uk>
@@ -189,6 +202,7 @@ Santosh Shilimkar <ssantosh@kernel.org>
Santosh Shilimkar <santosh.shilimkar@oracle.org>
Sascha Hauer <s.hauer@pengutronix.de>
S.Çağlar Onur <caglar@pardus.org.tr>
+Sean Nyekjaer <sean@geanix.com> <sean.nyekjaer@prevas.dk>
Sebastian Reichel <sre@kernel.org> <sre@debian.org>
Sebastian Reichel <sre@kernel.org> <sebastian.reichel@collabora.co.uk>
Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
@@ -207,6 +221,8 @@ Tejun Heo <htejun@gmail.com>
Thomas Graf <tgraf@suug.ch>
Thomas Pedersen <twp@codeaurora.org>
Tony Luck <tony.luck@intel.com>
+TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn>
+TripleX Chung <xxx.phy@gmail.com> <triplex@zh-kernel.org>
Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
Uwe Kleine-König <ukl@pengutronix.de>
diff --git a/Documentation/ABI/testing/sysfs-class-net-batman-adv b/Documentation/ABI/obsolete/sysfs-class-net-batman-adv
index 898106849e27..5bdbc8d40256 100644
--- a/Documentation/ABI/testing/sysfs-class-net-batman-adv
+++ b/Documentation/ABI/obsolete/sysfs-class-net-batman-adv
@@ -1,3 +1,5 @@
+This ABI is deprecated and will be removed after 2021. It is
+replaced with the batadv generic netlink family.
What: /sys/class/net/<iface>/batman-adv/elp_interval
Date: Feb 2014
diff --git a/Documentation/ABI/testing/sysfs-class-net-mesh b/Documentation/ABI/obsolete/sysfs-class-net-mesh
index c2b956d44a95..04c1a2932507 100644
--- a/Documentation/ABI/testing/sysfs-class-net-mesh
+++ b/Documentation/ABI/obsolete/sysfs-class-net-mesh
@@ -1,3 +1,5 @@
+This ABI is deprecated and will be removed after 2021. It is
+replaced with the batadv generic netlink family.
What: /sys/class/net/<mesh_iface>/mesh/aggregated_ogms
Date: May 2010
diff --git a/Documentation/ABI/stable/sysfs-bus-nvmem b/Documentation/ABI/stable/sysfs-bus-nvmem
index 5923ab4620c5..9ffba8576f7b 100644
--- a/Documentation/ABI/stable/sysfs-bus-nvmem
+++ b/Documentation/ABI/stable/sysfs-bus-nvmem
@@ -6,6 +6,8 @@ Description:
This file allows user to read/write the raw NVMEM contents.
Permissions for write to this file depends on the nvmem
provider configuration.
+ Note: This file is only present if CONFIG_NVMEM_SYSFS
+ is enabled
ex:
hexdump /sys/bus/nvmem/devices/qfprom0/nvmem
diff --git a/Documentation/ABI/stable/sysfs-bus-vmbus b/Documentation/ABI/stable/sysfs-bus-vmbus
index 826689dcc2e6..8e8d167eca31 100644
--- a/Documentation/ABI/stable/sysfs-bus-vmbus
+++ b/Documentation/ABI/stable/sysfs-bus-vmbus
@@ -81,7 +81,9 @@ What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/latency
Date: September. 2017
KernelVersion: 4.14
Contact: Stephen Hemminger <sthemmin@microsoft.com>
-Description: Channel signaling latency
+Description: Channel signaling latency. This file is available only for
+ performance critical channels (storage, network, etc.) that use
+ the monitor page mechanism.
Users: Debugging tools
What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/out_mask
@@ -95,7 +97,9 @@ What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/pending
Date: September. 2017
KernelVersion: 4.14
Contact: Stephen Hemminger <sthemmin@microsoft.com>
-Description: Channel interrupt pending state
+Description: Channel interrupt pending state. This file is available only for
+ performance critical channels (storage, network, etc.) that use
+ the monitor page mechanism.
Users: Debugging tools
What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/read_avail
@@ -137,7 +141,9 @@ What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/monitor_id
Date: January. 2018
KernelVersion: 4.16
Contact: Stephen Hemminger <sthemmin@microsoft.com>
-Description: Monitor bit associated with channel
+Description: Monitor bit associated with channel. This file is available only
+ for performance critical channels (storage, network, etc.) that
+ use the monitor page mechanism.
Users: Debugging tools and userspace drivers
What: /sys/bus/vmbus/devices/<UUID>/channels/<N>/ring
diff --git a/Documentation/ABI/stable/sysfs-devices-node b/Documentation/ABI/stable/sysfs-devices-node
index 3e90e1f3bf0a..f7ce68fbd4b9 100644
--- a/Documentation/ABI/stable/sysfs-devices-node
+++ b/Documentation/ABI/stable/sysfs-devices-node
@@ -90,4 +90,89 @@ Date: December 2009
Contact: Lee Schermerhorn <lee.schermerhorn@hp.com>
Description:
The node's huge page size control/query attributes.
- See Documentation/admin-guide/mm/hugetlbpage.rst \ No newline at end of file
+ See Documentation/admin-guide/mm/hugetlbpage.rst
+
+What: /sys/devices/system/node/nodeX/accessY/
+Date: December 2018
+Contact: Keith Busch <keith.busch@intel.com>
+Description:
+ The node's relationship to other nodes for access class "Y".
+
+What: /sys/devices/system/node/nodeX/accessY/initiators/
+Date: December 2018
+Contact: Keith Busch <keith.busch@intel.com>
+Description:
+ The directory containing symlinks to memory initiator
+ nodes that have class "Y" access to this target node's
+ memory. CPUs and other memory initiators in nodes not in
+ the list accessing this node's memory may have different
+ performance.
+
+What: /sys/devices/system/node/nodeX/accessY/targets/
+Date: December 2018
+Contact: Keith Busch <keith.busch@intel.com>
+Description:
+ The directory containing symlinks to memory targets that
+ this initiator node has class "Y" access.
+
+What: /sys/devices/system/node/nodeX/accessY/initiators/read_bandwidth
+Date: December 2018
+Contact: Keith Busch <keith.busch@intel.com>
+Description:
+ This node's read bandwidth in MB/s when accessed from
+ nodes found in this access class's linked initiators.
+
+What: /sys/devices/system/node/nodeX/accessY/initiators/read_latency
+Date: December 2018
+Contact: Keith Busch <keith.busch@intel.com>
+Description:
+ This node's read latency in nanoseconds when accessed
+ from nodes found in this access class's linked initiators.
+
+What: /sys/devices/system/node/nodeX/accessY/initiators/write_bandwidth
+Date: December 2018
+Contact: Keith Busch <keith.busch@intel.com>
+Description:
+ This node's write bandwidth in MB/s when accessed from
+ found in this access class's linked initiators.
+
+What: /sys/devices/system/node/nodeX/accessY/initiators/write_latency
+Date: December 2018
+Contact: Keith Busch <keith.busch@intel.com>
+Description:
+ This node's write latency in nanoseconds when access
+ from nodes found in this class's linked initiators.
+
+What: /sys/devices/system/node/nodeX/memory_side_cache/indexY/
+Date: December 2018
+Contact: Keith Busch <keith.busch@intel.com>
+Description:
+ The directory containing attributes for the memory-side cache
+ level 'Y'.
+
+What: /sys/devices/system/node/nodeX/memory_side_cache/indexY/indexing
+Date: December 2018
+Contact: Keith Busch <keith.busch@intel.com>
+Description:
+ The caches associativity indexing: 0 for direct mapped,
+ non-zero if indexed.
+
+What: /sys/devices/system/node/nodeX/memory_side_cache/indexY/line_size
+Date: December 2018
+Contact: Keith Busch <keith.busch@intel.com>
+Description:
+ The number of bytes accessed from the next cache level on a
+ cache miss.
+
+What: /sys/devices/system/node/nodeX/memory_side_cache/indexY/size
+Date: December 2018
+Contact: Keith Busch <keith.busch@intel.com>
+Description:
+ The size of this memory side cache in bytes.
+
+What: /sys/devices/system/node/nodeX/memory_side_cache/indexY/write_policy
+Date: December 2018
+Contact: Keith Busch <keith.busch@intel.com>
+Description:
+ The cache write policy: 0 for write-back, 1 for write-through,
+ other or unknown.
diff --git a/Documentation/ABI/testing/debugfs-wilco-ec b/Documentation/ABI/testing/debugfs-wilco-ec
index f814f112e213..73a5a66ddca6 100644
--- a/Documentation/ABI/testing/debugfs-wilco-ec
+++ b/Documentation/ABI/testing/debugfs-wilco-ec
@@ -1,23 +1,46 @@
+What: /sys/kernel/debug/wilco_ec/h1_gpio
+Date: April 2019
+KernelVersion: 5.2
+Description:
+ As part of Chrome OS's FAFT (Fully Automated Firmware Testing)
+ tests, we need to ensure that the H1 chip is properly setting
+ some GPIO lines. The h1_gpio attribute exposes the state
+ of the lines:
+ - ENTRY_TO_FACT_MODE in BIT(0)
+ - SPI_CHROME_SEL in BIT(1)
+
+ Output will formatted with "0x%02x\n".
+
What: /sys/kernel/debug/wilco_ec/raw
Date: January 2019
KernelVersion: 5.1
Description:
Write and read raw mailbox commands to the EC.
- For writing:
- Bytes 0-1 indicate the message type:
- 00 F0 = Execute Legacy Command
- 00 F2 = Read/Write NVRAM Property
- Byte 2 provides the command code
- Bytes 3+ consist of the data passed in the request
+ You can write a hexadecimal sentence to raw, and that series of
+ bytes will be sent to the EC. Then, you can read the bytes of
+ response by reading from raw.
- At least three bytes are required, for the msg type and command,
- with additional bytes optional for additional data.
+ For writing, bytes 0-1 indicate the message type, one of enum
+ wilco_ec_msg_type. Byte 2+ consist of the data passed in the
+ request, starting at MBOX[0]
+
+ At least three bytes are required for writing, two for the type
+ and at least a single byte of data. Only the first
+ EC_MAILBOX_DATA_SIZE bytes of MBOX will be used.
Example:
// Request EC info type 3 (EC firmware build date)
- $ echo 00 f0 38 00 03 00 > raw
+ // Corresponds with sending type 0x00f0 with
+ // MBOX = [38, 00, 03, 00]
+ $ echo 00 f0 38 00 03 00 > /sys/kernel/debug/wilco_ec/raw
// View the result. The decoded ASCII result "12/21/18" is
// included after the raw hex.
- $ cat raw
- 00 31 32 2f 32 31 2f 31 38 00 38 00 01 00 2f 00 .12/21/18.8...
+ // Corresponds with MBOX = [00, 00, 31, 32, 2f, 32, 31, 38, ...]
+ $ cat /sys/kernel/debug/wilco_ec/raw
+ 00 00 31 32 2f 32 31 2f 31 38 00 38 00 01 00 2f 00 ..12/21/18.8...
+
+ Note that the first 32 bytes of the received MBOX[] will be
+ printed, even if some of the data is junk. It is up to you to
+ know how many of the first bytes of data are the actual
+ response.
diff --git a/Documentation/ABI/testing/sysfs-bus-counter b/Documentation/ABI/testing/sysfs-bus-counter
new file mode 100644
index 000000000000..566bd99fe0a5
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-counter
@@ -0,0 +1,230 @@
+What: /sys/bus/counter/devices/counterX/countY/count
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Count data of Count Y represented as a string.
+
+What: /sys/bus/counter/devices/counterX/countY/ceiling
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Count value ceiling for Count Y. This is the upper limit for the
+ respective counter.
+
+What: /sys/bus/counter/devices/counterX/countY/floor
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Count value floor for Count Y. This is the lower limit for the
+ respective counter.
+
+What: /sys/bus/counter/devices/counterX/countY/count_mode
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Count mode for channel Y. The ceiling and floor values for
+ Count Y are used by the count mode where required. The following
+ count modes are available:
+
+ normal:
+ Counting is continuous in either direction.
+
+ range limit:
+ An upper or lower limit is set, mimicking limit switches
+ in the mechanical counterpart. The upper limit is set to
+ the Count Y ceiling value, while the lower limit is set
+ to the Count Y floor value. The counter freezes at
+ count = ceiling when counting up, and at count = floor
+ when counting down. At either of these limits, the
+ counting is resumed only when the count direction is
+ reversed.
+
+ non-recycle:
+ The counter is disabled whenever a counter overflow or
+ underflow takes place. The counter is re-enabled when a
+ new count value is loaded to the counter via a preset
+ operation or direct write.
+
+ modulo-n:
+ A count value boundary is set between the Count Y floor
+ value and the Count Y ceiling value. The counter is
+ reset to the Count Y floor value at count = ceiling when
+ counting up, while the counter is set to the Count Y
+ ceiling value at count = floor when counting down; the
+ counter does not freeze at the boundary points, but
+ counts continuously throughout.
+
+What: /sys/bus/counter/devices/counterX/countY/count_mode_available
+What: /sys/bus/counter/devices/counterX/countY/error_noise_available
+What: /sys/bus/counter/devices/counterX/countY/function_available
+What: /sys/bus/counter/devices/counterX/countY/signalZ_action_available
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Discrete set of available values for the respective Count Y
+ configuration are listed in this file. Values are delimited by
+ newline characters.
+
+What: /sys/bus/counter/devices/counterX/countY/direction
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Read-only attribute that indicates the count direction of Count
+ Y. Two count directions are available: forward and backward.
+
+ Some counter devices are able to determine the direction of
+ their counting. For example, quadrature encoding counters can
+ determine the direction of movement by evaluating the leading
+ phase of the respective A and B quadrature encoding signals.
+ This attribute exposes such count directions.
+
+What: /sys/bus/counter/devices/counterX/countY/enable
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Whether channel Y counter is enabled. Valid attribute values are
+ boolean.
+
+ This attribute is intended to serve as a pause/unpause mechanism
+ for Count Y. Suppose a counter device is used to count the total
+ movement of a conveyor belt: this attribute allows an operator
+ to temporarily pause the counter, service the conveyor belt,
+ and then finally unpause the counter to continue where it had
+ left off.
+
+What: /sys/bus/counter/devices/counterX/countY/error_noise
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Read-only attribute that indicates whether excessive noise is
+ present at the channel Y counter inputs.
+
+What: /sys/bus/counter/devices/counterX/countY/function
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Count function mode of Count Y; count function evaluation is
+ triggered by conditions specified by the Count Y signalZ_action
+ attributes. The following count functions are available:
+
+ increase:
+ Accumulated count is incremented.
+
+ decrease:
+ Accumulated count is decremented.
+
+ pulse-direction:
+ Rising edges on signal A updates the respective count.
+ The input level of signal B determines direction.
+
+ quadrature x1 a:
+ If direction is forward, rising edges on quadrature pair
+ signal A updates the respective count; if the direction
+ is backward, falling edges on quadrature pair signal A
+ updates the respective count. Quadrature encoding
+ determines the direction.
+
+ quadrature x1 b:
+ If direction is forward, rising edges on quadrature pair
+ signal B updates the respective count; if the direction
+ is backward, falling edges on quadrature pair signal B
+ updates the respective count. Quadrature encoding
+ determines the direction.
+
+ quadrature x2 a:
+ Any state transition on quadrature pair signal A updates
+ the respective count. Quadrature encoding determines the
+ direction.
+
+ quadrature x2 b:
+ Any state transition on quadrature pair signal B updates
+ the respective count. Quadrature encoding determines the
+ direction.
+
+ quadrature x4:
+ Any state transition on either quadrature pair signals
+ updates the respective count. Quadrature encoding
+ determines the direction.
+
+What: /sys/bus/counter/devices/counterX/countY/name
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Read-only attribute that indicates the device-specific name of
+ Count Y. If possible, this should match the name of the
+ respective channel as it appears in the device datasheet.
+
+What: /sys/bus/counter/devices/counterX/countY/preset
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ If the counter device supports preset registers -- registers
+ used to load counter channels to a set count upon device-defined
+ preset operation trigger events -- the preset count for channel
+ Y is provided by this attribute.
+
+What: /sys/bus/counter/devices/counterX/countY/preset_enable
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Whether channel Y counter preset operation is enabled. Valid
+ attribute values are boolean.
+
+What: /sys/bus/counter/devices/counterX/countY/signalZ_action
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Action mode of Count Y for Signal Z. This attribute indicates
+ the condition of Signal Z that triggers the count function
+ evaluation for Count Y. The following action modes are
+ available:
+
+ none:
+ Signal does not trigger the count function. In
+ Pulse-Direction count function mode, this Signal is
+ evaluated as Direction.
+
+ rising edge:
+ Low state transitions to high state.
+
+ falling edge:
+ High state transitions to low state.
+
+ both edges:
+ Any state transition.
+
+What: /sys/bus/counter/devices/counterX/name
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Read-only attribute that indicates the device-specific name of
+ the Counter. This should match the name of the device as it
+ appears in its respective datasheet.
+
+What: /sys/bus/counter/devices/counterX/num_counts
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Read-only attribute that indicates the total number of Counts
+ belonging to the Counter.
+
+What: /sys/bus/counter/devices/counterX/num_signals
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Read-only attribute that indicates the total number of Signals
+ belonging to the Counter.
+
+What: /sys/bus/counter/devices/counterX/signalY/signal
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Signal data of Signal Y represented as a string.
+
+What: /sys/bus/counter/devices/counterX/signalY/name
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Read-only attribute that indicates the device-specific name of
+ Signal Y. If possible, this should match the name of the
+ respective signal as it appears in the device datasheet.
diff --git a/Documentation/ABI/testing/sysfs-bus-counter-104-quad-8 b/Documentation/ABI/testing/sysfs-bus-counter-104-quad-8
new file mode 100644
index 000000000000..46b1f33b2fce
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-counter-104-quad-8
@@ -0,0 +1,36 @@
+What: /sys/bus/counter/devices/counterX/signalY/index_polarity
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Active level of index input Signal Y; irrelevant in
+ non-synchronous load mode.
+
+What: /sys/bus/counter/devices/counterX/signalY/index_polarity_available
+What: /sys/bus/counter/devices/counterX/signalY/synchronous_mode_available
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Discrete set of available values for the respective Signal Y
+ configuration are listed in this file.
+
+What: /sys/bus/counter/devices/counterX/signalY/synchronous_mode
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Configure the counter associated with Signal Y for
+ non-synchronous or synchronous load mode. Synchronous load mode
+ cannot be selected in non-quadrature (Pulse-Direction) clock
+ mode.
+
+ non-synchronous:
+ A logic low level is the active level at this index
+ input. The index function (as enabled via preset_enable)
+ is performed directly on the active level of the index
+ input.
+
+ synchronous:
+ Intended for interfacing with encoder Index output in
+ quadrature clock mode. The active level is configured
+ via index_polarity. The index function (as enabled via
+ preset_enable) is performed synchronously with the
+ quadrature clock on the active level of the index input.
diff --git a/Documentation/ABI/testing/sysfs-bus-counter-ftm-quaddec b/Documentation/ABI/testing/sysfs-bus-counter-ftm-quaddec
new file mode 100644
index 000000000000..7d2e7b363467
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-counter-ftm-quaddec
@@ -0,0 +1,16 @@
+What: /sys/bus/counter/devices/counterX/countY/prescaler_available
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Discrete set of available values for the respective Count Y
+ configuration are listed in this file. Values are delimited by
+ newline characters.
+
+What: /sys/bus/counter/devices/counterX/countY/prescaler
+KernelVersion: 5.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ Configure the prescaler value associated with Count Y.
+ On the FlexTimer, the counter clock source passes through a
+ prescaler (i.e. a counter). This acts like a clock
+ divider.
diff --git a/Documentation/ABI/testing/sysfs-bus-i2c-devices-pca954x b/Documentation/ABI/testing/sysfs-bus-i2c-devices-pca954x
new file mode 100644
index 000000000000..0b0de8cd0d13
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-i2c-devices-pca954x
@@ -0,0 +1,20 @@
+What: /sys/bus/i2c/.../idle_state
+Date: January 2019
+KernelVersion: 5.2
+Contact: Robert Shearman <robert.shearman@att.com>
+Description:
+ Value that exists only for mux devices that can be
+ written to control the behaviour of the multiplexer on
+ idle. Possible values:
+ -2 - disconnect on idle, i.e. deselect the last used
+ channel, which is useful when there is a device
+ with an address that conflicts with another
+ device on another mux on the same parent bus.
+ -1 - leave the mux as-is, which is the most optimal
+ setting in terms of I2C operations and is the
+ default mode.
+ 0..<nchans> - set the mux to a predetermined channel,
+ which is useful if there is one channel that is
+ used almost always, and you want to reduce the
+ latency for normal operations after rare
+ transactions on other channels
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 864f8efd12e5..6aef7dbbde44 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -1656,6 +1656,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_countY_raw
KernelVersion: 4.10
Contact: linux-iio@vger.kernel.org
Description:
+ This interface is deprecated; please use the Counter subsystem.
+
Raw counter device counts from channel Y. For quadrature
counters, multiplication by an available [Y]_scale results in
the counts of a single quadrature signal phase from channel Y.
@@ -1664,6 +1666,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_indexY_raw
KernelVersion: 4.10
Contact: linux-iio@vger.kernel.org
Description:
+ This interface is deprecated; please use the Counter subsystem.
+
Raw counter device index value from channel Y. This attribute
provides an absolute positional reference (e.g. a pulse once per
revolution) which may be used to home positional systems as
@@ -1673,6 +1677,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_count_count_direction_available
KernelVersion: 4.12
Contact: linux-iio@vger.kernel.org
Description:
+ This interface is deprecated; please use the Counter subsystem.
+
A list of possible counting directions which are:
- "up" : counter device is increasing.
- "down": counter device is decreasing.
@@ -1681,6 +1687,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_countY_count_direction
KernelVersion: 4.12
Contact: linux-iio@vger.kernel.org
Description:
+ This interface is deprecated; please use the Counter subsystem.
+
Raw counter device counters direction for channel Y.
What: /sys/bus/iio/devices/iio:deviceX/in_phaseY_raw
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8 b/Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8
index 7fac2c268d9a..bac3d0d48b7b 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8
+++ b/Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8
@@ -6,6 +6,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_index_synchronous_mode_available
KernelVersion: 4.10
Contact: linux-iio@vger.kernel.org
Description:
+ This interface is deprecated; please use the Counter subsystem.
+
Discrete set of available values for the respective counter
configuration are listed in this file.
@@ -13,6 +15,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_countY_count_mode
KernelVersion: 4.10
Contact: linux-iio@vger.kernel.org
Description:
+ This interface is deprecated; please use the Counter subsystem.
+
Count mode for channel Y. Four count modes are available:
normal, range limit, non-recycle, and modulo-n. The preset value
for channel Y is used by the count mode where required.
@@ -47,6 +51,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_countY_noise_error
KernelVersion: 4.10
Contact: linux-iio@vger.kernel.org
Description:
+ This interface is deprecated; please use the Counter subsystem.
+
Read-only attribute that indicates whether excessive noise is
present at the channel Y count inputs in quadrature clock mode;
irrelevant in non-quadrature clock mode.
@@ -55,6 +61,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_countY_preset
KernelVersion: 4.10
Contact: linux-iio@vger.kernel.org
Description:
+ This interface is deprecated; please use the Counter subsystem.
+
If the counter device supports preset registers, the preset
count for channel Y is provided by this attribute.
@@ -62,6 +70,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_countY_quadrature_mode
KernelVersion: 4.10
Contact: linux-iio@vger.kernel.org
Description:
+ This interface is deprecated; please use the Counter subsystem.
+
Configure channel Y counter for non-quadrature or quadrature
clock mode. Selecting non-quadrature clock mode will disable
synchronous load mode. In quadrature clock mode, the channel Y
@@ -83,6 +93,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_countY_set_to_preset_on_index
KernelVersion: 4.10
Contact: linux-iio@vger.kernel.org
Description:
+ This interface is deprecated; please use the Counter subsystem.
+
Whether to set channel Y counter with channel Y preset value
when channel Y index input is active, or continuously count.
Valid attribute values are boolean.
@@ -91,6 +103,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_indexY_index_polarity
KernelVersion: 4.10
Contact: linux-iio@vger.kernel.org
Description:
+ This interface is deprecated; please use the Counter subsystem.
+
Active level of channel Y index input; irrelevant in
non-synchronous load mode.
@@ -98,6 +112,8 @@ What: /sys/bus/iio/devices/iio:deviceX/in_indexY_synchronous_mode
KernelVersion: 4.10
Contact: linux-iio@vger.kernel.org
Description:
+ This interface is deprecated; please use the Counter subsystem.
+
Configure channel Y counter for non-synchronous or synchronous
load mode. Synchronous load mode cannot be selected in
non-quadrature clock mode.
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio-impedance-analyzer-ad5933 b/Documentation/ABI/testing/sysfs-bus-iio-impedance-analyzer-ad5933
index 79c7e88c64cd..0e86747c67f8 100644
--- a/drivers/staging/iio/Documentation/sysfs-bus-iio-impedance-analyzer-ad5933
+++ b/Documentation/ABI/testing/sysfs-bus-iio-impedance-analyzer-ad5933
@@ -1,26 +1,31 @@
-What: /sys/bus/iio/devices/iio:deviceX/outY_freq_start
+What: /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_frequency_start
+Date: March 2019
KernelVersion: 3.1.0
Contact: linux-iio@vger.kernel.org
Description:
Frequency sweep start frequency in Hz.
-What: /sys/bus/iio/devices/iio:deviceX/outY_freq_increment
+What: /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_frequency_increment
+Date: March 2019
KernelVersion: 3.1.0
Contact: linux-iio@vger.kernel.org
Description:
Frequency increment in Hz (step size) between consecutive
frequency points along the sweep.
-What: /sys/bus/iio/devices/iio:deviceX/outY_freq_points
+What: /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_frequency_points
+Date: March 2019
KernelVersion: 3.1.0
Contact: linux-iio@vger.kernel.org
Description:
Number of frequency points (steps) in the frequency sweep.
- This value, in conjunction with the outY_freq_start and the
- outY_freq_increment, determines the frequency sweep range
- for the sweep operation.
+ This value, in conjunction with the
+ out_altvoltageY_frequency_start and the
+ out_altvoltageY_frequency_increment, determines the frequency
+ sweep range for the sweep operation.
-What: /sys/bus/iio/devices/iio:deviceX/outY_settling_cycles
+What: /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_settling_cycles
+Date: March 2019
KernelVersion: 3.1.0
Contact: linux-iio@vger.kernel.org
Description:
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-sps30 b/Documentation/ABI/testing/sysfs-bus-iio-sps30
index 143df8e89d08..06e1c272537b 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-sps30
+++ b/Documentation/ABI/testing/sysfs-bus-iio-sps30
@@ -1,6 +1,6 @@
What: /sys/bus/iio/devices/iio:deviceX/start_cleaning
Date: December 2018
-KernelVersion: 4.22
+KernelVersion: 5.0
Contact: linux-iio@vger.kernel.org
Description:
Writing 1 starts sensor self cleaning. Internal fan accelerates
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-temperature-max31856 b/Documentation/ABI/testing/sysfs-bus-iio-temperature-max31856
new file mode 100644
index 000000000000..3b3509a3ef2f
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-temperature-max31856
@@ -0,0 +1,24 @@
+What: /sys/bus/iio/devices/iio:deviceX/fault_oc
+KernelVersion: 5.1
+Contact: linux-iio@vger.kernel.org
+Description:
+ Open-circuit fault. The detection of open-circuit faults,
+ such as those caused by broken thermocouple wires.
+ Reading returns either '1' or '0'.
+ '1' = An open circuit such as broken thermocouple wires
+ has been detected.
+ '0' = No open circuit or broken thermocouple wires are detected
+
+What: /sys/bus/iio/devices/iio:deviceX/fault_ovuv
+KernelVersion: 5.1
+Contact: linux-iio@vger.kernel.org
+Description:
+ Overvoltage or Undervoltage Input Fault. The internal circuitry
+ is protected from excessive voltages applied to the thermocouple
+ cables by integrated MOSFETs at the T+ and T- inputs, and the
+ BIAS output. These MOSFETs turn off when the input voltage is
+ negative or greater than VDD.
+ Reading returns either '1' or '0'.
+ '1' = The input voltage is negative or greater than VDD.
+ '0' = The input voltage is positive and less than VDD (normal
+ state).
diff --git a/Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc b/Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc
index b940c5d91cf7..f54ae244f3f1 100644
--- a/Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc
+++ b/Documentation/ABI/testing/sysfs-bus-intel_th-devices-msc
@@ -30,4 +30,12 @@ Description: (RW) Configure MSC buffer size for "single" or "multi" modes.
there are no active users and tracing is not enabled) and then
allocates a new one.
+What: /sys/bus/intel_th/devices/<intel_th_id>-msc<msc-id>/win_switch
+Date: May 2019
+KernelVersion: 5.2
+Contact: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Description: (RW) Trigger window switch for the MSC's buffer, in
+ multi-window mode. In "multi" mode, accepts writes of "1", thereby
+ triggering a window switch for the buffer. Returns an error in any
+ other operating mode or attempts to write something other than "1".
diff --git a/Documentation/ABI/testing/sysfs-class-mei b/Documentation/ABI/testing/sysfs-class-mei
index 17d7444a2397..a92d844f806e 100644
--- a/Documentation/ABI/testing/sysfs-class-mei
+++ b/Documentation/ABI/testing/sysfs-class-mei
@@ -65,3 +65,18 @@ Description: Display the ME firmware version.
<platform>:<major>.<minor>.<milestone>.<build_no>.
There can be up to three such blocks for different
FW components.
+
+What: /sys/class/mei/meiN/dev_state
+Date: Mar 2019
+KernelVersion: 5.1
+Contact: Tomas Winkler <tomas.winkler@intel.com>
+Description: Display the ME device state.
+
+ The device state can have following values:
+ INITIALIZING
+ INIT_CLIENTS
+ ENABLED
+ RESETTING
+ DISABLED
+ POWER_DOWN
+ POWER_UP
diff --git a/Documentation/ABI/testing/sysfs-class-power b/Documentation/ABI/testing/sysfs-class-power
index 5e23e22dce1b..b77e30b9014e 100644
--- a/Documentation/ABI/testing/sysfs-class-power
+++ b/Documentation/ABI/testing/sysfs-class-power
@@ -114,15 +114,60 @@ Description:
Access: Read
Valid values: Represented in microamps
+What: /sys/class/power_supply/<supply_name>/charge_control_limit
+Date: Oct 2012
+Contact: linux-pm@vger.kernel.org
+Description:
+ Maximum allowable charging current. Used for charge rate
+ throttling for thermal cooling or improving battery health.
+
+ Access: Read, Write
+ Valid values: Represented in microamps
+
+What: /sys/class/power_supply/<supply_name>/charge_control_limit_max
+Date: Oct 2012
+Contact: linux-pm@vger.kernel.org
+Description:
+ Maximum legal value for the charge_control_limit property.
+
+ Access: Read
+ Valid values: Represented in microamps
+
+What: /sys/class/power_supply/<supply_name>/charge_control_start_threshold
+Date: April 2019
+Contact: linux-pm@vger.kernel.org
+Description:
+ Represents a battery percentage level, below which charging will
+ begin.
+
+ Access: Read, Write
+ Valid values: 0 - 100 (percent)
+
+What: /sys/class/power_supply/<supply_name>/charge_control_end_threshold
+Date: April 2019
+Contact: linux-pm@vger.kernel.org
+Description:
+ Represents a battery percentage level, above which charging will
+ stop.
+
+ Access: Read, Write
+ Valid values: 0 - 100 (percent)
+
What: /sys/class/power_supply/<supply_name>/charge_type
Date: July 2009
Contact: linux-pm@vger.kernel.org
Description:
Represents the type of charging currently being applied to the
- battery.
+ battery. "Trickle", "Fast", and "Standard" all mean different
+ charging speeds. "Adaptive" means that the charger uses some
+ algorithm to adjust the charge rate dynamically, without
+ any user configuration required. "Custom" means that the charger
+ uses the charge_control_* properties as configuration for some
+ different algorithm.
- Access: Read
- Valid values: "Unknown", "N/A", "Trickle", "Fast"
+ Access: Read, Write
+ Valid values: "Unknown", "N/A", "Trickle", "Fast", "Standard",
+ "Adaptive", "Custom"
What: /sys/class/power_supply/<supply_name>/charge_term_current
Date: July 2014
diff --git a/Documentation/ABI/testing/sysfs-devices-platform-ipmi b/Documentation/ABI/testing/sysfs-devices-platform-ipmi
index 2a781e7513b7..afb5db856e1c 100644
--- a/Documentation/ABI/testing/sysfs-devices-platform-ipmi
+++ b/Documentation/ABI/testing/sysfs-devices-platform-ipmi
@@ -212,7 +212,7 @@ Description:
Messages may be broken into parts if
they are long.
- receieved_messages: (RO) Number of message responses
+ received_messages: (RO) Number of message responses
received.
received_message_parts: (RO) Number of message fragments
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 9605dbd4b5b5..1528239f69b2 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -484,6 +484,7 @@ What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/spectre_v2
/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
/sys/devices/system/cpu/vulnerabilities/l1tf
+ /sys/devices/system/cpu/vulnerabilities/mds
Date: January 2018
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
Description: Information about CPU vulnerabilities
@@ -496,8 +497,7 @@ Description: Information about CPU vulnerabilities
"Vulnerable" CPU is affected and no mitigation in effect
"Mitigation: $M" CPU is affected and mitigation $M is in effect
- Details about the l1tf file can be found in
- Documentation/admin-guide/l1tf.rst
+ See also: Documentation/admin-guide/hw-vuln/index.rst
What: /sys/devices/system/cpu/smt
/sys/devices/system/cpu/smt/active
@@ -511,10 +511,30 @@ Description: Control Symetric Multi Threading (SMT)
control: Read/write interface to control SMT. Possible
values:
- "on" SMT is enabled
- "off" SMT is disabled
- "forceoff" SMT is force disabled. Cannot be changed.
- "notsupported" SMT is not supported by the CPU
+ "on" SMT is enabled
+ "off" SMT is disabled
+ "forceoff" SMT is force disabled. Cannot be changed.
+ "notsupported" SMT is not supported by the CPU
+ "notimplemented" SMT runtime toggling is not
+ implemented for the architecture
If control status is "forceoff" or "notsupported" writes
are rejected.
+
+What: /sys/devices/system/cpu/cpu#/power/energy_perf_bias
+Date: March 2019
+Contact: linux-pm@vger.kernel.org
+Description: Intel Energy and Performance Bias Hint (EPB)
+
+ EPB for the given CPU in a sliding scale 0 - 15, where a value
+ of 0 corresponds to a hint preference for highest performance
+ and a value of 15 corresponds to the maximum energy savings.
+
+ In order to change the EPB value for the CPU, write either
+ a number in the 0 - 15 sliding scale above, or one of the
+ strings: "performance", "balance-performance", "normal",
+ "balance-power", "power" (that represent values reflected by
+ their meaning), to this attribute.
+
+ This attribute is present for all online CPUs supporting the
+ Intel EPB feature.
diff --git a/Documentation/ABI/testing/sysfs-driver-ucsi-ccg b/Documentation/ABI/testing/sysfs-driver-ucsi-ccg
new file mode 100644
index 000000000000..45cf62ad89e9
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-ucsi-ccg
@@ -0,0 +1,6 @@
+What: /sys/bus/i2c/drivers/ucsi_ccg/.../do_flash
+Date: May 2019
+Contact: Ajay Gupta <ajayg@nvidia.com>
+Description:
+ Tell the driver for Cypress CCGx Type-C controller to attempt
+ firmware upgrade by writing [Yy1] to the file.
diff --git a/Documentation/ABI/testing/sysfs-kernel-livepatch b/Documentation/ABI/testing/sysfs-kernel-livepatch
index 85db352f68f9..bea7bd5a1d5f 100644
--- a/Documentation/ABI/testing/sysfs-kernel-livepatch
+++ b/Documentation/ABI/testing/sysfs-kernel-livepatch
@@ -45,7 +45,7 @@ Description:
use this feature without a clearance from a patch
distributor. Removal (rmmod) of patch modules is permanently
disabled when the feature is used. See
- Documentation/livepatch/livepatch.txt for more information.
+ Documentation/livepatch/livepatch.rst for more information.
What: /sys/kernel/livepatch/<patch>/<object>
Date: Nov 2014
diff --git a/Documentation/ABI/testing/usb-uevent b/Documentation/ABI/testing/usb-uevent
new file mode 100644
index 000000000000..d35c3cad892c
--- /dev/null
+++ b/Documentation/ABI/testing/usb-uevent
@@ -0,0 +1,27 @@
+What: Raise a uevent when a USB Host Controller has died
+Date: 2019-04-17
+KernelVersion: 5.2
+Contact: linux-usb@vger.kernel.org
+Description: When the USB Host Controller has entered a state where it is no
+ longer functional a uevent will be raised. The uevent will
+ contain ACTION=offline and ERROR=DEAD.
+
+ Here is an example taken using udevadm monitor -p:
+
+ KERNEL[130.428945] offline /devices/pci0000:00/0000:00:10.0/usb2 (usb)
+ ACTION=offline
+ BUSNUM=002
+ DEVNAME=/dev/bus/usb/002/001
+ DEVNUM=001
+ DEVPATH=/devices/pci0000:00/0000:00:10.0/usb2
+ DEVTYPE=usb_device
+ DRIVER=usb
+ ERROR=DEAD
+ MAJOR=189
+ MINOR=128
+ PRODUCT=1d6b/2/414
+ SEQNUM=2168
+ SUBSYSTEM=usb
+ TYPE=9/0/1
+
+Users: chromium-os-dev@chromium.org
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index 1a721d0f35c8..cb712a02f59f 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -147,7 +147,7 @@ networking subsystems make sure that the buffers they use are valid
for you to DMA from/to.
DMA addressing capabilities
-==========================
+===========================
By default, the kernel assumes that your device can address 32-bits of DMA
addressing. For a 64-bit capable device, this needs to be increased, and for
@@ -365,13 +365,12 @@ __get_free_pages() (but takes size instead of a page order). If your
driver needs regions sized smaller than a page, you may prefer using
the dma_pool interface, described below.
-The consistent DMA mapping interfaces, for non-NULL dev, will by
-default return a DMA address which is 32-bit addressable. Even if the
-device indicates (via DMA mask) that it may address the upper 32-bits,
-consistent allocation will only return > 32-bit addresses for DMA if
-the consistent DMA mask has been explicitly changed via
-dma_set_coherent_mask(). This is true of the dma_pool interface as
-well.
+The consistent DMA mapping interfaces, will by default return a DMA address
+which is 32-bit addressable. Even if the device indicates (via the DMA mask)
+that it may address the upper 32-bits, consistent allocation will only
+return > 32-bit addresses for DMA if the consistent DMA mask has been
+explicitly changed via dma_set_coherent_mask(). This is true of the
+dma_pool interface as well.
dma_alloc_coherent() returns two values: the virtual address which you
can use to access it from the CPU and dma_handle which you pass to the
diff --git a/Documentation/Makefile b/Documentation/Makefile
index 9786957c6a35..e889e7cb8511 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -28,8 +28,13 @@ ifeq ($(HAVE_SPHINX),0)
else # HAVE_SPHINX
-# User-friendly check for pdflatex
+# User-friendly check for pdflatex and latexmk
HAVE_PDFLATEX := $(shell if which $(PDFLATEX) >/dev/null 2>&1; then echo 1; else echo 0; fi)
+HAVE_LATEXMK := $(shell if which latexmk >/dev/null 2>&1; then echo 1; else echo 0; fi)
+
+ifeq ($(HAVE_LATEXMK),1)
+ PDFLATEX := latexmk -$(PDFLATEX)
+endif #HAVE_LATEXMK
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
@@ -82,7 +87,7 @@ pdfdocs:
else # HAVE_PDFLATEX
pdfdocs: latexdocs
- $(foreach var,$(SPHINXDIRS), $(MAKE) PDFLATEX=$(PDFLATEX) LATEXOPTS="$(LATEXOPTS)" -C $(BUILDDIR)/$(var)/latex || exit;)
+ $(foreach var,$(SPHINXDIRS), $(MAKE) PDFLATEX="$(PDFLATEX)" LATEXOPTS="$(LATEXOPTS)" -C $(BUILDDIR)/$(var)/latex || exit;)
endif # HAVE_PDFLATEX
diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.html b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
index 18f179807563..c30c1957c7e6 100644
--- a/Documentation/RCU/Design/Data-Structures/Data-Structures.html
+++ b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
@@ -155,8 +155,7 @@ keeping lock contention under control at all tree levels regardless
of the level of loading on the system.
</p><p>RCU updaters wait for normal grace periods by registering
-RCU callbacks, either directly via <tt>call_rcu()</tt> and
-friends (namely <tt>call_rcu_bh()</tt> and <tt>call_rcu_sched()</tt>),
+RCU callbacks, either directly via <tt>call_rcu()</tt>
or indirectly via <tt>synchronize_rcu()</tt> and friends.
RCU callbacks are represented by <tt>rcu_head</tt> structures,
which are queued on <tt>rcu_data</tt> structures while they are
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
index 19e7a5fb6b73..57300db4b5ff 100644
--- a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
+++ b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
@@ -56,6 +56,7 @@ sections.
RCU-preempt Expedited Grace Periods</a></h2>
<p>
+<tt>CONFIG_PREEMPT=y</tt> kernels implement RCU-preempt.
The overall flow of the handling of a given CPU by an RCU-preempt
expedited grace period is shown in the following diagram:
@@ -139,6 +140,7 @@ or offline, among other things.
RCU-sched Expedited Grace Periods</a></h2>
<p>
+<tt>CONFIG_PREEMPT=n</tt> kernels implement RCU-sched.
The overall flow of the handling of a given CPU by an RCU-sched
expedited grace period is shown in the following diagram:
@@ -146,7 +148,7 @@ expedited grace period is shown in the following diagram:
<p>
As with RCU-preempt, RCU-sched's
-<tt>synchronize_sched_expedited()</tt> ignores offline and
+<tt>synchronize_rcu_expedited()</tt> ignores offline and
idle CPUs, again because they are in remotely detectable
quiescent states.
However, because the
diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
index 8d21af02b1f0..c64f8d26609f 100644
--- a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
+++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
@@ -34,12 +34,11 @@ Similarly, any code that happens before the beginning of a given RCU grace
period is guaranteed to see the effects of all accesses following the end
of that grace period that are within RCU read-side critical sections.
-<p>This guarantee is particularly pervasive for <tt>synchronize_sched()</tt>,
-for which RCU-sched read-side critical sections include any region
+<p>Note well that RCU-sched read-side critical sections include any region
of code for which preemption is disabled.
Given that each individual machine instruction can be thought of as
an extremely small region of preemption-disabled code, one can think of
-<tt>synchronize_sched()</tt> as <tt>smp_mb()</tt> on steroids.
+<tt>synchronize_rcu()</tt> as <tt>smp_mb()</tt> on steroids.
<p>RCU updaters use this guarantee by splitting their updates into
two phases, one of which is executed before the grace period and
diff --git a/Documentation/RCU/NMI-RCU.txt b/Documentation/RCU/NMI-RCU.txt
index 687777f83b23..881353fd5bff 100644
--- a/Documentation/RCU/NMI-RCU.txt
+++ b/Documentation/RCU/NMI-RCU.txt
@@ -81,18 +81,19 @@ currently executing on some other CPU. We therefore cannot free
up any data structures used by the old NMI handler until execution
of it completes on all other CPUs.
-One way to accomplish this is via synchronize_sched(), perhaps as
+One way to accomplish this is via synchronize_rcu(), perhaps as
follows:
unset_nmi_callback();
- synchronize_sched();
+ synchronize_rcu();
kfree(my_nmi_data);
-This works because synchronize_sched() blocks until all CPUs complete
-any preemption-disabled segments of code that they were executing.
-Since NMI handlers disable preemption, synchronize_sched() is guaranteed
+This works because (as of v4.20) synchronize_rcu() blocks until all
+CPUs complete any preemption-disabled segments of code that they were
+executing.
+Since NMI handlers disable preemption, synchronize_rcu() is guaranteed
not to return until all ongoing NMI handlers exit. It is therefore safe
-to free up the handler's data as soon as synchronize_sched() returns.
+to free up the handler's data as soon as synchronize_rcu() returns.
Important note: for this to work, the architecture in question must
invoke nmi_enter() and nmi_exit() on NMI entry and exit, respectively.
diff --git a/Documentation/RCU/UP.txt b/Documentation/RCU/UP.txt
index 90ec5341ee98..53bde717017b 100644
--- a/Documentation/RCU/UP.txt
+++ b/Documentation/RCU/UP.txt
@@ -86,10 +86,8 @@ even on a UP system. So do not do it! Even on a UP system, the RCU
infrastructure -must- respect grace periods, and -must- invoke callbacks
from a known environment in which no locks are held.
-It -is- safe for synchronize_sched() and synchronize_rcu_bh() to return
-immediately on an UP system. It is also safe for synchronize_rcu()
-to return immediately on UP systems, except when running preemptable
-RCU.
+Note that it -is- safe for synchronize_rcu() to return immediately on
+UP systems, including !PREEMPT SMP builds running on UP systems.
Quick Quiz #3: Why can't synchronize_rcu() return immediately on
UP systems running preemptable RCU?
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index 6f469864d9f5..e98ff261a438 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -182,16 +182,13 @@ over a rather long period of time, but improvements are always welcome!
when publicizing a pointer to a structure that can
be traversed by an RCU read-side critical section.
-5. If call_rcu(), or a related primitive such as call_rcu_bh(),
- call_rcu_sched(), or call_srcu() is used, the callback function
- will be called from softirq context. In particular, it cannot
- block.
+5. If call_rcu() or call_srcu() is used, the callback function will
+ be called from softirq context. In particular, it cannot block.
-6. Since synchronize_rcu() can block, it cannot be called from
- any sort of irq context. The same rule applies for
- synchronize_rcu_bh(), synchronize_sched(), synchronize_srcu(),
- synchronize_rcu_expedited(), synchronize_rcu_bh_expedited(),
- synchronize_sched_expedite(), and synchronize_srcu_expedited().
+6. Since synchronize_rcu() can block, it cannot be called
+ from any sort of irq context. The same rule applies
+ for synchronize_srcu(), synchronize_rcu_expedited(), and
+ synchronize_srcu_expedited().
The expedited forms of these primitives have the same semantics
as the non-expedited forms, but expediting is both expensive and
@@ -212,20 +209,20 @@ over a rather long period of time, but improvements are always welcome!
of the system, especially to real-time workloads running on
the rest of the system.
-7. If the updater uses call_rcu() or synchronize_rcu(), then the
- corresponding readers must use rcu_read_lock() and
- rcu_read_unlock(). If the updater uses call_rcu_bh() or
- synchronize_rcu_bh(), then the corresponding readers must
- use rcu_read_lock_bh() and rcu_read_unlock_bh(). If the
- updater uses call_rcu_sched() or synchronize_sched(), then
- the corresponding readers must disable preemption, possibly
- by calling rcu_read_lock_sched() and rcu_read_unlock_sched().
- If the updater uses synchronize_srcu() or call_srcu(), then
- the corresponding readers must use srcu_read_lock() and
+7. As of v4.20, a given kernel implements only one RCU flavor,
+ which is RCU-sched for PREEMPT=n and RCU-preempt for PREEMPT=y.
+ If the updater uses call_rcu() or synchronize_rcu(),
+ then the corresponding readers my use rcu_read_lock() and
+ rcu_read_unlock(), rcu_read_lock_bh() and rcu_read_unlock_bh(),
+ or any pair of primitives that disables and re-enables preemption,
+ for example, rcu_read_lock_sched() and rcu_read_unlock_sched().
+ If the updater uses synchronize_srcu() or call_srcu(),
+ then the corresponding readers must use srcu_read_lock() and
srcu_read_unlock(), and with the same srcu_struct. The rules for
the expedited primitives are the same as for their non-expedited
counterparts. Mixing things up will result in confusion and
- broken kernels.
+ broken kernels, and has even resulted in an exploitable security
+ issue.
One exception to this rule: rcu_read_lock() and rcu_read_unlock()
may be substituted for rcu_read_lock_bh() and rcu_read_unlock_bh()
@@ -288,8 +285,7 @@ over a rather long period of time, but improvements are always welcome!
d. Periodically invoke synchronize_rcu(), permitting a limited
number of updates per grace period.
- The same cautions apply to call_rcu_bh(), call_rcu_sched(),
- call_srcu(), and kfree_rcu().
+ The same cautions apply to call_srcu() and kfree_rcu().
Note that although these primitives do take action to avoid memory
exhaustion when any given CPU has too many callbacks, a determined
@@ -322,7 +318,7 @@ over a rather long period of time, but improvements are always welcome!
11. Any lock acquired by an RCU callback must be acquired elsewhere
with softirq disabled, e.g., via spin_lock_irqsave(),
- spin_lock_bh(), etc. Failing to disable irq on a given
+ spin_lock_bh(), etc. Failing to disable softirq on a given
acquisition of that lock will result in deadlock as soon as
the RCU softirq handler happens to run your RCU callback while
interrupting that acquisition's critical section.
@@ -335,13 +331,16 @@ over a rather long period of time, but improvements are always welcome!
must use whatever locking or other synchronization is required
to safely access and/or modify that data structure.
- RCU callbacks are -usually- executed on the same CPU that executed
- the corresponding call_rcu(), call_rcu_bh(), or call_rcu_sched(),
- but are by -no- means guaranteed to be. For example, if a given
- CPU goes offline while having an RCU callback pending, then that
- RCU callback will execute on some surviving CPU. (If this was
- not the case, a self-spawning RCU callback would prevent the
- victim CPU from ever going offline.)
+ Do not assume that RCU callbacks will be executed on the same
+ CPU that executed the corresponding call_rcu() or call_srcu().
+ For example, if a given CPU goes offline while having an RCU
+ callback pending, then that RCU callback will execute on some
+ surviving CPU. (If this was not the case, a self-spawning RCU
+ callback would prevent the victim CPU from ever going offline.)
+ Furthermore, CPUs designated by rcu_nocbs= might well -always-
+ have their RCU callbacks executed on some other CPUs, in fact,
+ for some real-time workloads, this is the whole point of using
+ the rcu_nocbs= kernel boot parameter.
13. Unlike other forms of RCU, it -is- permissible to block in an
SRCU read-side critical section (demarked by srcu_read_lock()
@@ -381,11 +380,11 @@ over a rather long period of time, but improvements are always welcome!
SRCU's expedited primitive (synchronize_srcu_expedited())
never sends IPIs to other CPUs, so it is easier on
- real-time workloads than is synchronize_rcu_expedited(),
- synchronize_rcu_bh_expedited() or synchronize_sched_expedited().
+ real-time workloads than is synchronize_rcu_expedited().
- Note that rcu_dereference() and rcu_assign_pointer() relate to
- SRCU just as they do to other forms of RCU.
+ Note that rcu_assign_pointer() relates to SRCU just as it does to
+ other forms of RCU, but instead of rcu_dereference() you should
+ use srcu_dereference() in order to avoid lockdep splats.
14. The whole point of call_rcu(), synchronize_rcu(), and friends
is to wait until all pre-existing readers have finished before
@@ -405,6 +404,9 @@ over a rather long period of time, but improvements are always welcome!
read-side critical sections. It is the responsibility of the
RCU update-side primitives to deal with this.
+ For SRCU readers, you can use smp_mb__after_srcu_read_unlock()
+ immediately after an srcu_read_unlock() to get a full barrier.
+
16. Use CONFIG_PROVE_LOCKING, CONFIG_DEBUG_OBJECTS_RCU_HEAD, and the
__rcu sparse checks to validate your RCU code. These can help
find problems as follows:
@@ -428,22 +430,19 @@ over a rather long period of time, but improvements are always welcome!
These debugging aids can help you find problems that are
otherwise extremely difficult to spot.
-17. If you register a callback using call_rcu(), call_rcu_bh(),
- call_rcu_sched(), or call_srcu(), and pass in a function defined
- within a loadable module, then it in necessary to wait for
- all pending callbacks to be invoked after the last invocation
- and before unloading that module. Note that it is absolutely
- -not- sufficient to wait for a grace period! The current (say)
- synchronize_rcu() implementation waits only for all previous
- callbacks registered on the CPU that synchronize_rcu() is running
- on, but it is -not- guaranteed to wait for callbacks registered
- on other CPUs.
+17. If you register a callback using call_rcu() or call_srcu(), and
+ pass in a function defined within a loadable module, then it in
+ necessary to wait for all pending callbacks to be invoked after
+ the last invocation and before unloading that module. Note that
+ it is absolutely -not- sufficient to wait for a grace period!
+ The current (say) synchronize_rcu() implementation is -not-
+ guaranteed to wait for callbacks registered on other CPUs.
+ Or even on the current CPU if that CPU recently went offline
+ and came back online.
You instead need to use one of the barrier functions:
o call_rcu() -> rcu_barrier()
- o call_rcu_bh() -> rcu_barrier()
- o call_rcu_sched() -> rcu_barrier()
o call_srcu() -> srcu_barrier()
However, these barrier functions are absolutely -not- guaranteed
diff --git a/Documentation/RCU/rcu.txt b/Documentation/RCU/rcu.txt
index 721b3e426515..c818cf65c5a9 100644
--- a/Documentation/RCU/rcu.txt
+++ b/Documentation/RCU/rcu.txt
@@ -52,10 +52,10 @@ o If I am running on a uniprocessor kernel, which can only do one
o How can I see where RCU is currently used in the Linux kernel?
Search for "rcu_read_lock", "rcu_read_unlock", "call_rcu",
- "rcu_read_lock_bh", "rcu_read_unlock_bh", "call_rcu_bh",
- "srcu_read_lock", "srcu_read_unlock", "synchronize_rcu",
- "synchronize_net", "synchronize_srcu", and the other RCU
- primitives. Or grab one of the cscope databases from:
+ "rcu_read_lock_bh", "rcu_read_unlock_bh", "srcu_read_lock",
+ "srcu_read_unlock", "synchronize_rcu", "synchronize_net",
+ "synchronize_srcu", and the other RCU primitives. Or grab one
+ of the cscope databases from:
http://www.rdrop.com/users/paulmck/RCU/linuxusage/rculocktab.html
diff --git a/Documentation/RCU/rcu_dereference.txt b/Documentation/RCU/rcu_dereference.txt
index ab96227bad42..bf699e8cfc75 100644
--- a/Documentation/RCU/rcu_dereference.txt
+++ b/Documentation/RCU/rcu_dereference.txt
@@ -351,3 +351,106 @@ garbage values.
In short, rcu_dereference() is -not- optional when you are going to
dereference the resulting pointer.
+
+
+WHICH MEMBER OF THE rcu_dereference() FAMILY SHOULD YOU USE?
+
+First, please avoid using rcu_dereference_raw() and also please avoid
+using rcu_dereference_check() and rcu_dereference_protected() with a
+second argument with a constant value of 1 (or true, for that matter).
+With that caution out of the way, here is some guidance for which
+member of the rcu_dereference() to use in various situations:
+
+1. If the access needs to be within an RCU read-side critical
+ section, use rcu_dereference(). With the new consolidated
+ RCU flavors, an RCU read-side critical section is entered
+ using rcu_read_lock(), anything that disables bottom halves,
+ anything that disables interrupts, or anything that disables
+ preemption.
+
+2. If the access might be within an RCU read-side critical section
+ on the one hand, or protected by (say) my_lock on the other,
+ use rcu_dereference_check(), for example:
+
+ p1 = rcu_dereference_check(p->rcu_protected_pointer,
+ lockdep_is_held(&my_lock));
+
+
+3. If the access might be within an RCU read-side critical section
+ on the one hand, or protected by either my_lock or your_lock on
+ the other, again use rcu_dereference_check(), for example:
+
+ p1 = rcu_dereference_check(p->rcu_protected_pointer,
+ lockdep_is_held(&my_lock) ||
+ lockdep_is_held(&your_lock));
+
+4. If the access is on the update side, so that it is always protected
+ by my_lock, use rcu_dereference_protected():
+
+ p1 = rcu_dereference_protected(p->rcu_protected_pointer,
+ lockdep_is_held(&my_lock));
+
+ This can be extended to handle multiple locks as in #3 above,
+ and both can be extended to check other conditions as well.
+
+5. If the protection is supplied by the caller, and is thus unknown
+ to this code, that is the rare case when rcu_dereference_raw()
+ is appropriate. In addition, rcu_dereference_raw() might be
+ appropriate when the lockdep expression would be excessively
+ complex, except that a better approach in that case might be to
+ take a long hard look at your synchronization design. Still,
+ there are data-locking cases where any one of a very large number
+ of locks or reference counters suffices to protect the pointer,
+ so rcu_dereference_raw() does have its place.
+
+ However, its place is probably quite a bit smaller than one
+ might expect given the number of uses in the current kernel.
+ Ditto for its synonym, rcu_dereference_check( ... , 1), and
+ its close relative, rcu_dereference_protected(... , 1).
+
+
+SPARSE CHECKING OF RCU-PROTECTED POINTERS
+
+The sparse static-analysis tool checks for direct access to RCU-protected
+pointers, which can result in "interesting" bugs due to compiler
+optimizations involving invented loads and perhaps also load tearing.
+For example, suppose someone mistakenly does something like this:
+
+ p = q->rcu_protected_pointer;
+ do_something_with(p->a);
+ do_something_else_with(p->b);
+
+If register pressure is high, the compiler might optimize "p" out
+of existence, transforming the code to something like this:
+
+ do_something_with(q->rcu_protected_pointer->a);
+ do_something_else_with(q->rcu_protected_pointer->b);
+
+This could fatally disappoint your code if q->rcu_protected_pointer
+changed in the meantime. Nor is this a theoretical problem: Exactly
+this sort of bug cost Paul E. McKenney (and several of his innocent
+colleagues) a three-day weekend back in the early 1990s.
+
+Load tearing could of course result in dereferencing a mashup of a pair
+of pointers, which also might fatally disappoint your code.
+
+These problems could have been avoided simply by making the code instead
+read as follows:
+
+ p = rcu_dereference(q->rcu_protected_pointer);
+ do_something_with(p->a);
+ do_something_else_with(p->b);
+
+Unfortunately, these sorts of bugs can be extremely hard to spot during
+review. This is where the sparse tool comes into play, along with the
+"__rcu" marker. If you mark a pointer declaration, whether in a structure
+or as a formal parameter, with "__rcu", which tells sparse to complain if
+this pointer is accessed directly. It will also cause sparse to complain
+if a pointer not marked with "__rcu" is accessed using rcu_dereference()
+and friends. For example, ->rcu_protected_pointer might be declared as
+follows:
+
+ struct foo __rcu *rcu_protected_pointer;
+
+Use of "__rcu" is opt-in. If you choose not to use it, then you should
+ignore the sparse warnings.
diff --git a/Documentation/RCU/rcubarrier.txt b/Documentation/RCU/rcubarrier.txt
index 5d7759071a3e..a2782df69732 100644
--- a/Documentation/RCU/rcubarrier.txt
+++ b/Documentation/RCU/rcubarrier.txt
@@ -83,16 +83,15 @@ Pseudo-code using rcu_barrier() is as follows:
2. Execute rcu_barrier().
3. Allow the module to be unloaded.
-There are also rcu_barrier_bh(), rcu_barrier_sched(), and srcu_barrier()
-functions for the other flavors of RCU, and you of course must match
-the flavor of rcu_barrier() with that of call_rcu(). If your module
-uses multiple flavors of call_rcu(), then it must also use multiple
+There is also an srcu_barrier() function for SRCU, and you of course
+must match the flavor of rcu_barrier() with that of call_rcu(). If your
+module uses multiple flavors of call_rcu(), then it must also use multiple
flavors of rcu_barrier() when unloading that module. For example, if
-it uses call_rcu_bh(), call_srcu() on srcu_struct_1, and call_srcu() on
+it uses call_rcu(), call_srcu() on srcu_struct_1, and call_srcu() on
srcu_struct_2(), then the following three lines of code will be required
when unloading:
- 1 rcu_barrier_bh();
+ 1 rcu_barrier();
2 srcu_barrier(&srcu_struct_1);
3 srcu_barrier(&srcu_struct_2);
@@ -185,12 +184,12 @@ module invokes call_rcu() from timers, you will need to first cancel all
the timers, and only then invoke rcu_barrier() to wait for any remaining
RCU callbacks to complete.
-Of course, if you module uses call_rcu_bh(), you will need to invoke
-rcu_barrier_bh() before unloading. Similarly, if your module uses
-call_rcu_sched(), you will need to invoke rcu_barrier_sched() before
-unloading. If your module uses call_rcu(), call_rcu_bh(), -and-
-call_rcu_sched(), then you will need to invoke each of rcu_barrier(),
-rcu_barrier_bh(), and rcu_barrier_sched().
+Of course, if you module uses call_rcu(), you will need to invoke
+rcu_barrier() before unloading. Similarly, if your module uses
+call_srcu(), you will need to invoke srcu_barrier() before unloading,
+and on the same srcu_struct structure. If your module uses call_rcu()
+-and- call_srcu(), then you will need to invoke rcu_barrier() -and-
+srcu_barrier().
Implementing rcu_barrier()
@@ -223,8 +222,8 @@ shown below. Note that the final "1" in on_each_cpu()'s argument list
ensures that all the calls to rcu_barrier_func() will have completed
before on_each_cpu() returns. Line 9 then waits for the completion.
-This code was rewritten in 2008 to support rcu_barrier_bh() and
-rcu_barrier_sched() in addition to the original rcu_barrier().
+This code was rewritten in 2008 and several times thereafter, but this
+still gives the general idea.
The rcu_barrier_func() runs on each CPU, where it invokes call_rcu()
to post an RCU callback, as follows:
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 1ace20815bb1..981651a8b65d 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -310,7 +310,7 @@ reader, updater, and reclaimer.
rcu_assign_pointer()
- +--------+
+ +--------+
+---------------------->| reader |---------+
| +--------+ |
| | |
@@ -318,12 +318,12 @@ reader, updater, and reclaimer.
| | | rcu_read_lock()
| | | rcu_read_unlock()
| rcu_dereference() | |
- +---------+ | |
- | updater |<---------------------+ |
- +---------+ V
+ +---------+ | |
+ | updater |<----------------+ |
+ +---------+ V
| +-----------+
+----------------------------------->| reclaimer |
- +-----------+
+ +-----------+
Defer:
synchronize_rcu() & call_rcu()
diff --git a/Documentation/accounting/psi.txt b/Documentation/accounting/psi.txt
index 7e71c9c1d8e9..5cbe5659e3b7 100644
--- a/Documentation/accounting/psi.txt
+++ b/Documentation/accounting/psi.txt
@@ -63,6 +63,110 @@ as well as medium and long term trends. The total absolute stall time
spikes which wouldn't necessarily make a dent in the time averages,
or to average trends over custom time frames.
+Monitoring for pressure thresholds
+==================================
+
+Users can register triggers and use poll() to be woken up when resource
+pressure exceeds certain thresholds.
+
+A trigger describes the maximum cumulative stall time over a specific
+time window, e.g. 100ms of total stall time within any 500ms window to
+generate a wakeup event.
+
+To register a trigger user has to open psi interface file under
+/proc/pressure/ representing the resource to be monitored and write the
+desired threshold and time window. The open file descriptor should be
+used to wait for trigger events using select(), poll() or epoll().
+The following format is used:
+
+<some|full> <stall amount in us> <time window in us>
+
+For example writing "some 150000 1000000" into /proc/pressure/memory
+would add 150ms threshold for partial memory stall measured within
+1sec time window. Writing "full 50000 1000000" into /proc/pressure/io
+would add 50ms threshold for full io stall measured within 1sec time window.
+
+Triggers can be set on more than one psi metric and more than one trigger
+for the same psi metric can be specified. However for each trigger a separate
+file descriptor is required to be able to poll it separately from others,
+therefore for each trigger a separate open() syscall should be made even
+when opening the same psi interface file.
+
+Monitors activate only when system enters stall state for the monitored
+psi metric and deactivates upon exit from the stall state. While system is
+in the stall state psi signal growth is monitored at a rate of 10 times per
+tracking window.
+
+The kernel accepts window sizes ranging from 500ms to 10s, therefore min
+monitoring update interval is 50ms and max is 1s. Min limit is set to
+prevent overly frequent polling. Max limit is chosen as a high enough number
+after which monitors are most likely not needed and psi averages can be used
+instead.
+
+When activated, psi monitor stays active for at least the duration of one
+tracking window to avoid repeated activations/deactivations when system is
+bouncing in and out of the stall state.
+
+Notifications to the userspace are rate-limited to one per tracking window.
+
+The trigger will de-register when the file descriptor used to define the
+trigger is closed.
+
+Userspace monitor usage example
+===============================
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <poll.h>
+#include <string.h>
+#include <unistd.h>
+
+/*
+ * Monitor memory partial stall with 1s tracking window size
+ * and 150ms threshold.
+ */
+int main() {
+ const char trig[] = "some 150000 1000000";
+ struct pollfd fds;
+ int n;
+
+ fds.fd = open("/proc/pressure/memory", O_RDWR | O_NONBLOCK);
+ if (fds.fd < 0) {
+ printf("/proc/pressure/memory open error: %s\n",
+ strerror(errno));
+ return 1;
+ }
+ fds.events = POLLPRI;
+
+ if (write(fds.fd, trig, strlen(trig) + 1) < 0) {
+ printf("/proc/pressure/memory write error: %s\n",
+ strerror(errno));
+ return 1;
+ }
+
+ printf("waiting for events...\n");
+ while (1) {
+ n = poll(&fds, 1, -1);
+ if (n < 0) {
+ printf("poll error: %s\n", strerror(errno));
+ return 1;
+ }
+ if (fds.revents & POLLERR) {
+ printf("got POLLERR, event source is gone\n");
+ return 0;
+ }
+ if (fds.revents & POLLPRI) {
+ printf("event triggered!\n");
+ } else {
+ printf("unknown event received: 0x%x\n", fds.revents);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
Cgroup2 interface
=================
@@ -71,3 +175,6 @@ mounted, pressure stall information is also tracked for tasks grouped
into cgroups. Each subdirectory in the cgroupfs mountpoint contains
cpu.pressure, memory.pressure, and io.pressure files; the format is
the same as the /proc/pressure/ files.
+
+Per-cgroup psi monitors can be specified and used the same way as
+system-wide ones.
diff --git a/Documentation/acpi/aml-debugger.txt b/Documentation/acpi/aml-debugger.txt
deleted file mode 100644
index 75ebeb64ab29..000000000000
--- a/Documentation/acpi/aml-debugger.txt
+++ /dev/null
@@ -1,66 +0,0 @@
-The AML Debugger
-
-Copyright (C) 2016, Intel Corporation
-Author: Lv Zheng <lv.zheng@intel.com>
-
-
-This document describes the usage of the AML debugger embedded in the Linux
-kernel.
-
-1. Build the debugger
-
- The following kernel configuration items are required to enable the AML
- debugger interface from the Linux kernel:
-
- CONFIG_ACPI_DEBUGGER=y
- CONFIG_ACPI_DEBUGGER_USER=m
-
- The userspace utilities can be built from the kernel source tree using
- the following commands:
-
- $ cd tools
- $ make acpi
-
- The resultant userspace tool binary is then located at:
-
- tools/power/acpi/acpidbg
-
- It can be installed to system directories by running "make install" (as a
- sufficiently privileged user).
-
-2. Start the userspace debugger interface
-
- After booting the kernel with the debugger built-in, the debugger can be
- started by using the following commands:
-
- # mount -t debugfs none /sys/kernel/debug
- # modprobe acpi_dbg
- # tools/power/acpi/acpidbg
-
- That spawns the interactive AML debugger environment where you can execute
- debugger commands.
-
- The commands are documented in the "ACPICA Overview and Programmer Reference"
- that can be downloaded from
-
- https://acpica.org/documentation
-
- The detailed debugger commands reference is located in Chapter 12 "ACPICA
- Debugger Reference". The "help" command can be used for a quick reference.
-
-3. Stop the userspace debugger interface
-
- The interactive debugger interface can be closed by pressing Ctrl+C or using
- the "quit" or "exit" commands. When finished, unload the module with:
-
- # rmmod acpi_dbg
-
- The module unloading may fail if there is an acpidbg instance running.
-
-4. Run the debugger in a script
-
- It may be useful to run the AML debugger in a test script. "acpidbg" supports
- this in a special "batch" mode. For example, the following command outputs
- the entire ACPI namespace:
-
- # acpidbg -b "namespace"
diff --git a/Documentation/acpi/apei/output_format.txt b/Documentation/acpi/apei/output_format.txt
deleted file mode 100644
index 0c49c197c47a..000000000000
--- a/Documentation/acpi/apei/output_format.txt
+++ /dev/null
@@ -1,147 +0,0 @@
- APEI output format
- ~~~~~~~~~~~~~~~~~~
-
-APEI uses printk as hardware error reporting interface, the output
-format is as follow.
-
-<error record> :=
-APEI generic hardware error status
-severity: <integer>, <severity string>
-section: <integer>, severity: <integer>, <severity string>
-flags: <integer>
-<section flags strings>
-fru_id: <uuid string>
-fru_text: <string>
-section_type: <section type string>
-<section data>
-
-<severity string>* := recoverable | fatal | corrected | info
-
-<section flags strings># :=
-[primary][, containment warning][, reset][, threshold exceeded]\
-[, resource not accessible][, latent error]
-
-<section type string> := generic processor error | memory error | \
-PCIe error | unknown, <uuid string>
-
-<section data> :=
-<generic processor section data> | <memory section data> | \
-<pcie section data> | <null>
-
-<generic processor section data> :=
-[processor_type: <integer>, <proc type string>]
-[processor_isa: <integer>, <proc isa string>]
-[error_type: <integer>
-<proc error type strings>]
-[operation: <integer>, <proc operation string>]
-[flags: <integer>
-<proc flags strings>]
-[level: <integer>]
-[version_info: <integer>]
-[processor_id: <integer>]
-[target_address: <integer>]
-[requestor_id: <integer>]
-[responder_id: <integer>]
-[IP: <integer>]
-
-<proc type string>* := IA32/X64 | IA64
-
-<proc isa string>* := IA32 | IA64 | X64
-
-<processor error type strings># :=
-[cache error][, TLB error][, bus error][, micro-architectural error]
-
-<proc operation string>* := unknown or generic | data read | data write | \
-instruction execution
-
-<proc flags strings># :=
-[restartable][, precise IP][, overflow][, corrected]
-
-<memory section data> :=
-[error_status: <integer>]
-[physical_address: <integer>]
-[physical_address_mask: <integer>]
-[node: <integer>]
-[card: <integer>]
-[module: <integer>]
-[bank: <integer>]
-[device: <integer>]
-[row: <integer>]
-[column: <integer>]
-[bit_position: <integer>]
-[requestor_id: <integer>]
-[responder_id: <integer>]
-[target_id: <integer>]
-[error_type: <integer>, <mem error type string>]
-
-<mem error type string>* :=
-unknown | no error | single-bit ECC | multi-bit ECC | \
-single-symbol chipkill ECC | multi-symbol chipkill ECC | master abort | \
-target abort | parity error | watchdog timeout | invalid address | \
-mirror Broken | memory sparing | scrub corrected error | \
-scrub uncorrected error
-
-<pcie section data> :=
-[port_type: <integer>, <pcie port type string>]
-[version: <integer>.<integer>]
-[command: <integer>, status: <integer>]
-[device_id: <integer>:<integer>:<integer>.<integer>
-slot: <integer>
-secondary_bus: <integer>
-vendor_id: <integer>, device_id: <integer>
-class_code: <integer>]
-[serial number: <integer>, <integer>]
-[bridge: secondary_status: <integer>, control: <integer>]
-[aer_status: <integer>, aer_mask: <integer>
-<aer status string>
-[aer_uncor_severity: <integer>]
-aer_layer=<aer layer string>, aer_agent=<aer agent string>
-aer_tlp_header: <integer> <integer> <integer> <integer>]
-
-<pcie port type string>* := PCIe end point | legacy PCI end point | \
-unknown | unknown | root port | upstream switch port | \
-downstream switch port | PCIe to PCI/PCI-X bridge | \
-PCI/PCI-X to PCIe bridge | root complex integrated endpoint device | \
-root complex event collector
-
-if section severity is fatal or recoverable
-<aer status string># :=
-unknown | unknown | unknown | unknown | Data Link Protocol | \
-unknown | unknown | unknown | unknown | unknown | unknown | unknown | \
-Poisoned TLP | Flow Control Protocol | Completion Timeout | \
-Completer Abort | Unexpected Completion | Receiver Overflow | \
-Malformed TLP | ECRC | Unsupported Request
-else
-<aer status string># :=
-Receiver Error | unknown | unknown | unknown | unknown | unknown | \
-Bad TLP | Bad DLLP | RELAY_NUM Rollover | unknown | unknown | unknown | \
-Replay Timer Timeout | Advisory Non-Fatal
-fi
-
-<aer layer string> :=
-Physical Layer | Data Link Layer | Transaction Layer
-
-<aer agent string> :=
-Receiver ID | Requester ID | Completer ID | Transmitter ID
-
-Where, [] designate corresponding content is optional
-
-All <field string> description with * has the following format:
-
-field: <integer>, <field string>
-
-Where value of <integer> should be the position of "string" in <field
-string> description. Otherwise, <field string> will be "unknown".
-
-All <field strings> description with # has the following format:
-
-field: <integer>
-<field strings>
-
-Where each string in <fields strings> corresponding to one set bit of
-<integer>. The bit position is the position of "string" in <field
-strings> description.
-
-For more detailed explanation of every field, please refer to UEFI
-specification version 2.3 or later, section Appendix N: Common
-Platform Error Record.
diff --git a/Documentation/acpi/dsd/leds.txt b/Documentation/acpi/dsd/leds.txt
new file mode 100644
index 000000000000..81a63af42ed2
--- /dev/null
+++ b/Documentation/acpi/dsd/leds.txt
@@ -0,0 +1,99 @@
+Describing and referring to LEDs in ACPI
+
+Individual LEDs are described by hierarchical data extension [6] nodes under the
+device node, the LED driver chip. The "reg" property in the LED specific nodes
+tells the numerical ID of each individual LED output to which the LEDs are
+connected. [3] The hierarchical data nodes are named "led@X", where X is the
+number of the LED output.
+
+Referring to LEDs in Device tree is documented in [4], in "flash-leds" property
+documentation. In short, LEDs are directly referred to by using phandles.
+
+While Device tree allows referring to any node in the tree[1], in ACPI
+references are limited to device nodes only [2]. For this reason using the same
+mechanism on ACPI is not possible. A mechanism to refer to non-device ACPI nodes
+is documented in [7].
+
+ACPI allows (as does DT) using integer arguments after the reference. A
+combination of the LED driver device reference and an integer argument,
+referring to the "reg" property of the relevant LED, is used to identify
+individual LEDs. The value of the "reg" property is a contract between the
+firmware and software, it uniquely identifies the LED driver outputs.
+
+Under the LED driver device, The first hierarchical data extension package list
+entry shall contain the string "led@" followed by the number of the LED,
+followed by the referred object name. That object shall be named "LED" followed
+by the number of the LED.
+
+An ASL example of a camera sensor device and a LED driver device for two LEDs.
+Objects not relevant for LEDs or the references to them have been omitted.
+
+ Device (LED)
+ {
+ Name (_DSD, Package () {
+ ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
+ Package () {
+ Package () { "led@0", LED0 },
+ Package () { "led@1", LED1 },
+ }
+ })
+ Name (LED0, Package () {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package () { "reg", 0 },
+ Package () { "flash-max-microamp", 1000000 },
+ Package () { "flash-timeout-us", 200000 },
+ Package () { "led-max-microamp", 100000 },
+ Package () { "label", "white:flash" },
+ }
+ })
+ Name (LED1, Package () {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package () { "reg", 1 },
+ Package () { "led-max-microamp", 10000 },
+ Package () { "label", "red:indicator" },
+ }
+ })
+ }
+
+ Device (SEN)
+ {
+ Name (_DSD, Package () {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package () {
+ "flash-leds",
+ Package () { ^LED, "led@0", ^LED, "led@1" },
+ }
+ }
+ })
+ }
+
+where
+
+ LED LED driver device
+ LED0 First LED
+ LED1 Second LED
+ SEN Camera sensor device (or another device the LED is
+ related to)
+
+[1] Device tree. <URL:http://www.devicetree.org>, referenced 2019-02-21.
+
+[2] Advanced Configuration and Power Interface Specification.
+ <URL:https://uefi.org/sites/default/files/resources/ACPI_6_3_final_Jan30.pdf>,
+ referenced 2019-02-21.
+
+[3] Documentation/devicetree/bindings/leds/common.txt
+
+[4] Documentation/devicetree/bindings/media/video-interfaces.txt
+
+[5] Device Properties UUID For _DSD.
+ <URL:http://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf>,
+ referenced 2019-02-21.
+
+[6] Hierarchical Data Extension UUID For _DSD.
+ <URL:http://www.uefi.org/sites/default/files/resources/_DSD-hierarchical-data-extension-UUID-v1.1.pdf>,
+ referenced 2019-02-21.
+
+[7] Documentation/acpi/dsd/data-node-reference.txt
diff --git a/Documentation/acpi/i2c-muxes.txt b/Documentation/acpi/i2c-muxes.txt
deleted file mode 100644
index 9fcc4f0b885e..000000000000
--- a/Documentation/acpi/i2c-muxes.txt
+++ /dev/null
@@ -1,58 +0,0 @@
-ACPI I2C Muxes
---------------
-
-Describing an I2C device hierarchy that includes I2C muxes requires an ACPI
-Device () scope per mux channel.
-
-Consider this topology:
-
-+------+ +------+
-| SMB1 |-->| MUX0 |--CH00--> i2c client A (0x50)
-| | | 0x70 |--CH01--> i2c client B (0x50)
-+------+ +------+
-
-which corresponds to the following ASL:
-
-Device (SMB1)
-{
- Name (_HID, ...)
- Device (MUX0)
- {
- Name (_HID, ...)
- Name (_CRS, ResourceTemplate () {
- I2cSerialBus (0x70, ControllerInitiated, I2C_SPEED,
- AddressingMode7Bit, "^SMB1", 0x00,
- ResourceConsumer,,)
- }
-
- Device (CH00)
- {
- Name (_ADR, 0)
-
- Device (CLIA)
- {
- Name (_HID, ...)
- Name (_CRS, ResourceTemplate () {
- I2cSerialBus (0x50, ControllerInitiated, I2C_SPEED,
- AddressingMode7Bit, "^CH00", 0x00,
- ResourceConsumer,,)
- }
- }
- }
-
- Device (CH01)
- {
- Name (_ADR, 1)
-
- Device (CLIB)
- {
- Name (_HID, ...)
- Name (_CRS, ResourceTemplate () {
- I2cSerialBus (0x50, ControllerInitiated, I2C_SPEED,
- AddressingMode7Bit, "^CH01", 0x00,
- ResourceConsumer,,)
- }
- }
- }
- }
-}
diff --git a/Documentation/acpi/initrd_table_override.txt b/Documentation/acpi/initrd_table_override.txt
deleted file mode 100644
index 30437a6db373..000000000000
--- a/Documentation/acpi/initrd_table_override.txt
+++ /dev/null
@@ -1,111 +0,0 @@
-Upgrading ACPI tables via initrd
-================================
-
-1) Introduction (What is this about)
-2) What is this for
-3) How does it work
-4) References (Where to retrieve userspace tools)
-
-1) What is this about
----------------------
-
-If the ACPI_TABLE_UPGRADE compile option is true, it is possible to
-upgrade the ACPI execution environment that is defined by the ACPI tables
-via upgrading the ACPI tables provided by the BIOS with an instrumented,
-modified, more recent version one, or installing brand new ACPI tables.
-
-When building initrd with kernel in a single image, option
-ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD should also be true for this
-feature to work.
-
-For a full list of ACPI tables that can be upgraded/installed, take a look
-at the char *table_sigs[MAX_ACPI_SIGNATURE]; definition in
-drivers/acpi/tables.c.
-All ACPI tables iasl (Intel's ACPI compiler and disassembler) knows should
-be overridable, except:
- - ACPI_SIG_RSDP (has a signature of 6 bytes)
- - ACPI_SIG_FACS (does not have an ordinary ACPI table header)
-Both could get implemented as well.
-
-
-2) What is this for
--------------------
-
-Complain to your platform/BIOS vendor if you find a bug which is so severe
-that a workaround is not accepted in the Linux kernel. And this facility
-allows you to upgrade the buggy tables before your platform/BIOS vendor
-releases an upgraded BIOS binary.
-
-This facility can be used by platform/BIOS vendors to provide a Linux
-compatible environment without modifying the underlying platform firmware.
-
-This facility also provides a powerful feature to easily debug and test
-ACPI BIOS table compatibility with the Linux kernel by modifying old
-platform provided ACPI tables or inserting new ACPI tables.
-
-It can and should be enabled in any kernel because there is no functional
-change with not instrumented initrds.
-
-
-3) How does it work
--------------------
-
-# Extract the machine's ACPI tables:
-cd /tmp
-acpidump >acpidump
-acpixtract -a acpidump
-# Disassemble, modify and recompile them:
-iasl -d *.dat
-# For example add this statement into a _PRT (PCI Routing Table) function
-# of the DSDT:
-Store("HELLO WORLD", debug)
-# And increase the OEM Revision. For example, before modification:
-DefinitionBlock ("DSDT.aml", "DSDT", 2, "INTEL ", "TEMPLATE", 0x00000000)
-# After modification:
-DefinitionBlock ("DSDT.aml", "DSDT", 2, "INTEL ", "TEMPLATE", 0x00000001)
-iasl -sa dsdt.dsl
-# Add the raw ACPI tables to an uncompressed cpio archive.
-# They must be put into a /kernel/firmware/acpi directory inside the cpio
-# archive. Note that if the table put here matches a platform table
-# (similar Table Signature, and similar OEMID, and similar OEM Table ID)
-# with a more recent OEM Revision, the platform table will be upgraded by
-# this table. If the table put here doesn't match a platform table
-# (dissimilar Table Signature, or dissimilar OEMID, or dissimilar OEM Table
-# ID), this table will be appended.
-mkdir -p kernel/firmware/acpi
-cp dsdt.aml kernel/firmware/acpi
-# A maximum of "NR_ACPI_INITRD_TABLES (64)" tables are currently allowed
-# (see osl.c):
-iasl -sa facp.dsl
-iasl -sa ssdt1.dsl
-cp facp.aml kernel/firmware/acpi
-cp ssdt1.aml kernel/firmware/acpi
-# The uncompressed cpio archive must be the first. Other, typically
-# compressed cpio archives, must be concatenated on top of the uncompressed
-# one. Following command creates the uncompressed cpio archive and
-# concatenates the original initrd on top:
-find kernel | cpio -H newc --create > /boot/instrumented_initrd
-cat /boot/initrd >>/boot/instrumented_initrd
-# reboot with increased acpi debug level, e.g. boot params:
-acpi.debug_level=0x2 acpi.debug_layer=0xFFFFFFFF
-# and check your syslog:
-[ 1.268089] ACPI: PCI Interrupt Routing Table [\_SB_.PCI0._PRT]
-[ 1.272091] [ACPI Debug] String [0x0B] "HELLO WORLD"
-
-iasl is able to disassemble and recompile quite a lot different,
-also static ACPI tables.
-
-
-4) Where to retrieve userspace tools
-------------------------------------
-
-iasl and acpixtract are part of Intel's ACPICA project:
-http://acpica.org/
-and should be packaged by distributions (for example in the acpica package
-on SUSE).
-
-acpidump can be found in Len Browns pmtools:
-ftp://kernel.org/pub/linux/kernel/people/lenb/acpi/utils/pmtools/acpidump
-This tool is also part of the acpica package on SUSE.
-Alternatively, used ACPI tables can be retrieved via sysfs in latest kernels:
-/sys/firmware/acpi/tables
diff --git a/Documentation/acpi/method-customizing.txt b/Documentation/acpi/method-customizing.txt
deleted file mode 100644
index 7235da975f23..000000000000
--- a/Documentation/acpi/method-customizing.txt
+++ /dev/null
@@ -1,73 +0,0 @@
-Linux ACPI Custom Control Method How To
-=======================================
-
-Written by Zhang Rui <rui.zhang@intel.com>
-
-
-Linux supports customizing ACPI control methods at runtime.
-
-Users can use this to
-1. override an existing method which may not work correctly,
- or just for debugging purposes.
-2. insert a completely new method in order to create a missing
- method such as _OFF, _ON, _STA, _INI, etc.
-For these cases, it is far simpler to dynamically install a single
-control method rather than override the entire DSDT, because kernel
-rebuild/reboot is not needed and test result can be got in minutes.
-
-Note: Only ACPI METHOD can be overridden, any other object types like
- "Device", "OperationRegion", are not recognized. Methods
- declared inside scope operators are also not supported.
-Note: The same ACPI control method can be overridden for many times,
- and it's always the latest one that used by Linux/kernel.
-Note: To get the ACPI debug object output (Store (AAAA, Debug)),
- please run "echo 1 > /sys/module/acpi/parameters/aml_debug_output".
-
-1. override an existing method
- a) get the ACPI table via ACPI sysfs I/F. e.g. to get the DSDT,
- just run "cat /sys/firmware/acpi/tables/DSDT > /tmp/dsdt.dat"
- b) disassemble the table by running "iasl -d dsdt.dat".
- c) rewrite the ASL code of the method and save it in a new file,
- d) package the new file (psr.asl) to an ACPI table format.
- Here is an example of a customized \_SB._AC._PSR method,
-
- DefinitionBlock ("", "SSDT", 1, "", "", 0x20080715)
- {
- Method (\_SB_.AC._PSR, 0, NotSerialized)
- {
- Store ("In AC _PSR", Debug)
- Return (ACON)
- }
- }
- Note that the full pathname of the method in ACPI namespace
- should be used.
- e) assemble the file to generate the AML code of the method.
- e.g. "iasl -vw 6084 psr.asl" (psr.aml is generated as a result)
- If parameter "-vw 6084" is not supported by your iASL compiler,
- please try a newer version.
- f) mount debugfs by "mount -t debugfs none /sys/kernel/debug"
- g) override the old method via the debugfs by running
- "cat /tmp/psr.aml > /sys/kernel/debug/acpi/custom_method"
-
-2. insert a new method
- This is easier than overriding an existing method.
- We just need to create the ASL code of the method we want to
- insert and then follow the step c) ~ g) in section 1.
-
-3. undo your changes
- The "undo" operation is not supported for a new inserted method
- right now, i.e. we can not remove a method currently.
- For an overridden method, in order to undo your changes, please
- save a copy of the method original ASL code in step c) section 1,
- and redo step c) ~ g) to override the method with the original one.
-
-
-Note: We can use a kernel with multiple custom ACPI method running,
- But each individual write to debugfs can implement a SINGLE
- method override. i.e. if we want to insert/override multiple
- ACPI methods, we need to redo step c) ~ g) for multiple times.
-
-Note: Be aware that root can mis-use this driver to modify arbitrary
- memory and gain additional rights, if root's privileges got
- restricted (for example if root is not allowed to load additional
- modules after boot).
diff --git a/Documentation/acpi/method-tracing.txt b/Documentation/acpi/method-tracing.txt
deleted file mode 100644
index 0aba14c8f459..000000000000
--- a/Documentation/acpi/method-tracing.txt
+++ /dev/null
@@ -1,192 +0,0 @@
-ACPICA Trace Facility
-
-Copyright (C) 2015, Intel Corporation
-Author: Lv Zheng <lv.zheng@intel.com>
-
-
-Abstract:
-
-This document describes the functions and the interfaces of the method
-tracing facility.
-
-1. Functionalities and usage examples:
-
- ACPICA provides method tracing capability. And two functions are
- currently implemented using this capability.
-
- A. Log reducer
- ACPICA subsystem provides debugging outputs when CONFIG_ACPI_DEBUG is
- enabled. The debugging messages which are deployed via
- ACPI_DEBUG_PRINT() macro can be reduced at 2 levels - per-component
- level (known as debug layer, configured via
- /sys/module/acpi/parameters/debug_layer) and per-type level (known as
- debug level, configured via /sys/module/acpi/parameters/debug_level).
-
- But when the particular layer/level is applied to the control method
- evaluations, the quantity of the debugging outputs may still be too
- large to be put into the kernel log buffer. The idea thus is worked out
- to only enable the particular debug layer/level (normally more detailed)
- logs when the control method evaluation is started, and disable the
- detailed logging when the control method evaluation is stopped.
-
- The following command examples illustrate the usage of the "log reducer"
- functionality:
- a. Filter out the debug layer/level matched logs when control methods
- are being evaluated:
- # cd /sys/module/acpi/parameters
- # echo "0xXXXXXXXX" > trace_debug_layer
- # echo "0xYYYYYYYY" > trace_debug_level
- # echo "enable" > trace_state
- b. Filter out the debug layer/level matched logs when the specified
- control method is being evaluated:
- # cd /sys/module/acpi/parameters
- # echo "0xXXXXXXXX" > trace_debug_layer
- # echo "0xYYYYYYYY" > trace_debug_level
- # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
- # echo "method" > /sys/module/acpi/parameters/trace_state
- c. Filter out the debug layer/level matched logs when the specified
- control method is being evaluated for the first time:
- # cd /sys/module/acpi/parameters
- # echo "0xXXXXXXXX" > trace_debug_layer
- # echo "0xYYYYYYYY" > trace_debug_level
- # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
- # echo "method-once" > /sys/module/acpi/parameters/trace_state
- Where:
- 0xXXXXXXXX/0xYYYYYYYY: Refer to Documentation/acpi/debug.txt for
- possible debug layer/level masking values.
- \PPPP.AAAA.TTTT.HHHH: Full path of a control method that can be found
- in the ACPI namespace. It needn't be an entry
- of a control method evaluation.
-
- B. AML tracer
-
- There are special log entries added by the method tracing facility at
- the "trace points" the AML interpreter starts/stops to execute a control
- method, or an AML opcode. Note that the format of the log entries are
- subject to change:
- [ 0.186427] exdebug-0398 ex_trace_point : Method Begin [0xf58394d8:\_SB.PCI0.LPCB.ECOK] execution.
- [ 0.186630] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905c88:If] execution.
- [ 0.186820] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905cc0:LEqual] execution.
- [ 0.187010] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905a20:-NamePath-] execution.
- [ 0.187214] exdebug-0398 ex_trace_point : Opcode End [0xf5905a20:-NamePath-] execution.
- [ 0.187407] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905f60:One] execution.
- [ 0.187594] exdebug-0398 ex_trace_point : Opcode End [0xf5905f60:One] execution.
- [ 0.187789] exdebug-0398 ex_trace_point : Opcode End [0xf5905cc0:LEqual] execution.
- [ 0.187980] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905cc0:Return] execution.
- [ 0.188146] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905f60:One] execution.
- [ 0.188334] exdebug-0398 ex_trace_point : Opcode End [0xf5905f60:One] execution.
- [ 0.188524] exdebug-0398 ex_trace_point : Opcode End [0xf5905cc0:Return] execution.
- [ 0.188712] exdebug-0398 ex_trace_point : Opcode End [0xf5905c88:If] execution.
- [ 0.188903] exdebug-0398 ex_trace_point : Method End [0xf58394d8:\_SB.PCI0.LPCB.ECOK] execution.
-
- Developers can utilize these special log entries to track the AML
- interpretion, thus can aid issue debugging and performance tuning. Note
- that, as the "AML tracer" logs are implemented via ACPI_DEBUG_PRINT()
- macro, CONFIG_ACPI_DEBUG is also required to be enabled for enabling
- "AML tracer" logs.
-
- The following command examples illustrate the usage of the "AML tracer"
- functionality:
- a. Filter out the method start/stop "AML tracer" logs when control
- methods are being evaluated:
- # cd /sys/module/acpi/parameters
- # echo "0x80" > trace_debug_layer
- # echo "0x10" > trace_debug_level
- # echo "enable" > trace_state
- b. Filter out the method start/stop "AML tracer" when the specified
- control method is being evaluated:
- # cd /sys/module/acpi/parameters
- # echo "0x80" > trace_debug_layer
- # echo "0x10" > trace_debug_level
- # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
- # echo "method" > trace_state
- c. Filter out the method start/stop "AML tracer" logs when the specified
- control method is being evaluated for the first time:
- # cd /sys/module/acpi/parameters
- # echo "0x80" > trace_debug_layer
- # echo "0x10" > trace_debug_level
- # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
- # echo "method-once" > trace_state
- d. Filter out the method/opcode start/stop "AML tracer" when the
- specified control method is being evaluated:
- # cd /sys/module/acpi/parameters
- # echo "0x80" > trace_debug_layer
- # echo "0x10" > trace_debug_level
- # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
- # echo "opcode" > trace_state
- e. Filter out the method/opcode start/stop "AML tracer" when the
- specified control method is being evaluated for the first time:
- # cd /sys/module/acpi/parameters
- # echo "0x80" > trace_debug_layer
- # echo "0x10" > trace_debug_level
- # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
- # echo "opcode-opcode" > trace_state
-
- Note that all above method tracing facility related module parameters can
- be used as the boot parameters, for example:
- acpi.trace_debug_layer=0x80 acpi.trace_debug_level=0x10 \
- acpi.trace_method_name=\_SB.LID0._LID acpi.trace_state=opcode-once
-
-2. Interface descriptions:
-
- All method tracing functions can be configured via ACPI module
- parameters that are accessible at /sys/module/acpi/parameters/:
-
- trace_method_name
- The full path of the AML method that the user wants to trace.
- Note that the full path shouldn't contain the trailing "_"s in its
- name segments but may contain "\" to form an absolute path.
-
- trace_debug_layer
- The temporary debug_layer used when the tracing feature is enabled.
- Using ACPI_EXECUTER (0x80) by default, which is the debug_layer
- used to match all "AML tracer" logs.
-
- trace_debug_level
- The temporary debug_level used when the tracing feature is enabled.
- Using ACPI_LV_TRACE_POINT (0x10) by default, which is the
- debug_level used to match all "AML tracer" logs.
-
- trace_state
- The status of the tracing feature.
- Users can enable/disable this debug tracing feature by executing
- the following command:
- # echo string > /sys/module/acpi/parameters/trace_state
- Where "string" should be one of the following:
- "disable"
- Disable the method tracing feature.
- "enable"
- Enable the method tracing feature.
- ACPICA debugging messages matching
- "trace_debug_layer/trace_debug_level" during any method
- execution will be logged.
- "method"
- Enable the method tracing feature.
- ACPICA debugging messages matching
- "trace_debug_layer/trace_debug_level" during method execution
- of "trace_method_name" will be logged.
- "method-once"
- Enable the method tracing feature.
- ACPICA debugging messages matching
- "trace_debug_layer/trace_debug_level" during method execution
- of "trace_method_name" will be logged only once.
- "opcode"
- Enable the method tracing feature.
- ACPICA debugging messages matching
- "trace_debug_layer/trace_debug_level" during method/opcode
- execution of "trace_method_name" will be logged.
- "opcode-once"
- Enable the method tracing feature.
- ACPICA debugging messages matching
- "trace_debug_layer/trace_debug_level" during method/opcode
- execution of "trace_method_name" will be logged only once.
- Note that, the difference between the "enable" and other feature
- enabling options are:
- 1. When "enable" is specified, since
- "trace_debug_layer/trace_debug_level" shall apply to all control
- method evaluations, after configuring "trace_state" to "enable",
- "trace_method_name" will be reset to NULL.
- 2. When "method/opcode" is specified, if
- "trace_method_name" is NULL when "trace_state" is configured to
- these options, the "trace_debug_layer/trace_debug_level" will
- apply to all control method evaluations.
diff --git a/Documentation/acpi/ssdt-overlays.txt b/Documentation/acpi/ssdt-overlays.txt
deleted file mode 100644
index 5ae13f161ea2..000000000000
--- a/Documentation/acpi/ssdt-overlays.txt
+++ /dev/null
@@ -1,172 +0,0 @@
-
-In order to support ACPI open-ended hardware configurations (e.g. development
-boards) we need a way to augment the ACPI configuration provided by the firmware
-image. A common example is connecting sensors on I2C / SPI buses on development
-boards.
-
-Although this can be accomplished by creating a kernel platform driver or
-recompiling the firmware image with updated ACPI tables, neither is practical:
-the former proliferates board specific kernel code while the latter requires
-access to firmware tools which are often not publicly available.
-
-Because ACPI supports external references in AML code a more practical
-way to augment firmware ACPI configuration is by dynamically loading
-user defined SSDT tables that contain the board specific information.
-
-For example, to enumerate a Bosch BMA222E accelerometer on the I2C bus of the
-Minnowboard MAX development board exposed via the LSE connector [1], the
-following ASL code can be used:
-
-DefinitionBlock ("minnowmax.aml", "SSDT", 1, "Vendor", "Accel", 0x00000003)
-{
- External (\_SB.I2C6, DeviceObj)
-
- Scope (\_SB.I2C6)
- {
- Device (STAC)
- {
- Name (_ADR, Zero)
- Name (_HID, "BMA222E")
-
- Method (_CRS, 0, Serialized)
- {
- Name (RBUF, ResourceTemplate ()
- {
- I2cSerialBus (0x0018, ControllerInitiated, 0x00061A80,
- AddressingMode7Bit, "\\_SB.I2C6", 0x00,
- ResourceConsumer, ,)
- GpioInt (Edge, ActiveHigh, Exclusive, PullDown, 0x0000,
- "\\_SB.GPO2", 0x00, ResourceConsumer, , )
- { // Pin list
- 0
- }
- })
- Return (RBUF)
- }
- }
- }
-}
-
-which can then be compiled to AML binary format:
-
-$ iasl minnowmax.asl
-
-Intel ACPI Component Architecture
-ASL Optimizing Compiler version 20140214-64 [Mar 29 2014]
-Copyright (c) 2000 - 2014 Intel Corporation
-
-ASL Input: minnomax.asl - 30 lines, 614 bytes, 7 keywords
-AML Output: minnowmax.aml - 165 bytes, 6 named objects, 1 executable opcodes
-
-[1] http://wiki.minnowboard.org/MinnowBoard_MAX#Low_Speed_Expansion_Connector_.28Top.29
-
-The resulting AML code can then be loaded by the kernel using one of the methods
-below.
-
-== Loading ACPI SSDTs from initrd ==
-
-This option allows loading of user defined SSDTs from initrd and it is useful
-when the system does not support EFI or when there is not enough EFI storage.
-
-It works in a similar way with initrd based ACPI tables override/upgrade: SSDT
-aml code must be placed in the first, uncompressed, initrd under the
-"kernel/firmware/acpi" path. Multiple files can be used and this will translate
-in loading multiple tables. Only SSDT and OEM tables are allowed. See
-initrd_table_override.txt for more details.
-
-Here is an example:
-
-# Add the raw ACPI tables to an uncompressed cpio archive.
-# They must be put into a /kernel/firmware/acpi directory inside the
-# cpio archive.
-# The uncompressed cpio archive must be the first.
-# Other, typically compressed cpio archives, must be
-# concatenated on top of the uncompressed one.
-mkdir -p kernel/firmware/acpi
-cp ssdt.aml kernel/firmware/acpi
-
-# Create the uncompressed cpio archive and concatenate the original initrd
-# on top:
-find kernel | cpio -H newc --create > /boot/instrumented_initrd
-cat /boot/initrd >>/boot/instrumented_initrd
-
-== Loading ACPI SSDTs from EFI variables ==
-
-This is the preferred method, when EFI is supported on the platform, because it
-allows a persistent, OS independent way of storing the user defined SSDTs. There
-is also work underway to implement EFI support for loading user defined SSDTs
-and using this method will make it easier to convert to the EFI loading
-mechanism when that will arrive.
-
-In order to load SSDTs from an EFI variable the efivar_ssdt kernel command line
-parameter can be used. The argument for the option is the variable name to
-use. If there are multiple variables with the same name but with different
-vendor GUIDs, all of them will be loaded.
-
-In order to store the AML code in an EFI variable the efivarfs filesystem can be
-used. It is enabled and mounted by default in /sys/firmware/efi/efivars in all
-recent distribution.
-
-Creating a new file in /sys/firmware/efi/efivars will automatically create a new
-EFI variable. Updating a file in /sys/firmware/efi/efivars will update the EFI
-variable. Please note that the file name needs to be specially formatted as
-"Name-GUID" and that the first 4 bytes in the file (little-endian format)
-represent the attributes of the EFI variable (see EFI_VARIABLE_MASK in
-include/linux/efi.h). Writing to the file must also be done with one write
-operation.
-
-For example, you can use the following bash script to create/update an EFI
-variable with the content from a given file:
-
-#!/bin/sh -e
-
-while ! [ -z "$1" ]; do
- case "$1" in
- "-f") filename="$2"; shift;;
- "-g") guid="$2"; shift;;
- *) name="$1";;
- esac
- shift
-done
-
-usage()
-{
- echo "Syntax: ${0##*/} -f filename [ -g guid ] name"
- exit 1
-}
-
-[ -n "$name" -a -f "$filename" ] || usage
-
-EFIVARFS="/sys/firmware/efi/efivars"
-
-[ -d "$EFIVARFS" ] || exit 2
-
-if stat -tf $EFIVARFS | grep -q -v de5e81e4; then
- mount -t efivarfs none $EFIVARFS
-fi
-
-# try to pick up an existing GUID
-[ -n "$guid" ] || guid=$(find "$EFIVARFS" -name "$name-*" | head -n1 | cut -f2- -d-)
-
-# use a randomly generated GUID
-[ -n "$guid" ] || guid="$(cat /proc/sys/kernel/random/uuid)"
-
-# efivarfs expects all of the data in one write
-tmp=$(mktemp)
-/bin/echo -ne "\007\000\000\000" | cat - $filename > $tmp
-dd if=$tmp of="$EFIVARFS/$name-$guid" bs=$(stat -c %s $tmp)
-rm $tmp
-
-== Loading ACPI SSDTs from configfs ==
-
-This option allows loading of user defined SSDTs from userspace via the configfs
-interface. The CONFIG_ACPI_CONFIGFS option must be select and configfs must be
-mounted. In the following examples, we assume that configfs has been mounted in
-/config.
-
-New tables can be loading by creating new directories in /config/acpi/table/ and
-writing the SSDT aml code in the aml attribute:
-
-cd /config/acpi/table
-mkdir my_ssdt
-cat ~/ssdt.aml > my_ssdt/aml
diff --git a/Documentation/acpi/cppc_sysfs.txt b/Documentation/admin-guide/acpi/cppc_sysfs.rst
index f20fb445135d..a4b99afbe331 100644
--- a/Documentation/acpi/cppc_sysfs.txt
+++ b/Documentation/admin-guide/acpi/cppc_sysfs.rst
@@ -1,5 +1,11 @@
+.. SPDX-License-Identifier: GPL-2.0
- Collaborative Processor Performance Control (CPPC)
+==================================================
+Collaborative Processor Performance Control (CPPC)
+==================================================
+
+CPPC
+====
CPPC defined in the ACPI spec describes a mechanism for the OS to manage the
performance of a logical processor on a contigious and abstract performance
@@ -10,31 +16,28 @@ For more details on CPPC please refer to the ACPI specification at:
http://uefi.org/specifications
-Some of the CPPC registers are exposed via sysfs under:
-
-/sys/devices/system/cpu/cpuX/acpi_cppc/
-
-for each cpu X
+Some of the CPPC registers are exposed via sysfs under::
---------------------------------------------------------------------------------
+ /sys/devices/system/cpu/cpuX/acpi_cppc/
-$ ls -lR /sys/devices/system/cpu/cpu0/acpi_cppc/
-/sys/devices/system/cpu/cpu0/acpi_cppc/:
-total 0
--r--r--r-- 1 root root 65536 Mar 5 19:38 feedback_ctrs
--r--r--r-- 1 root root 65536 Mar 5 19:38 highest_perf
--r--r--r-- 1 root root 65536 Mar 5 19:38 lowest_freq
--r--r--r-- 1 root root 65536 Mar 5 19:38 lowest_nonlinear_perf
--r--r--r-- 1 root root 65536 Mar 5 19:38 lowest_perf
--r--r--r-- 1 root root 65536 Mar 5 19:38 nominal_freq
--r--r--r-- 1 root root 65536 Mar 5 19:38 nominal_perf
--r--r--r-- 1 root root 65536 Mar 5 19:38 reference_perf
--r--r--r-- 1 root root 65536 Mar 5 19:38 wraparound_time
+for each cpu X::
---------------------------------------------------------------------------------
+ $ ls -lR /sys/devices/system/cpu/cpu0/acpi_cppc/
+ /sys/devices/system/cpu/cpu0/acpi_cppc/:
+ total 0
+ -r--r--r-- 1 root root 65536 Mar 5 19:38 feedback_ctrs
+ -r--r--r-- 1 root root 65536 Mar 5 19:38 highest_perf
+ -r--r--r-- 1 root root 65536 Mar 5 19:38 lowest_freq
+ -r--r--r-- 1 root root 65536 Mar 5 19:38 lowest_nonlinear_perf
+ -r--r--r-- 1 root root 65536 Mar 5 19:38 lowest_perf
+ -r--r--r-- 1 root root 65536 Mar 5 19:38 nominal_freq
+ -r--r--r-- 1 root root 65536 Mar 5 19:38 nominal_perf
+ -r--r--r-- 1 root root 65536 Mar 5 19:38 reference_perf
+ -r--r--r-- 1 root root 65536 Mar 5 19:38 wraparound_time
* highest_perf : Highest performance of this processor (abstract scale).
-* nominal_perf : Highest sustained performance of this processor (abstract scale).
+* nominal_perf : Highest sustained performance of this processor
+ (abstract scale).
* lowest_nonlinear_perf : Lowest performance of this processor with nonlinear
power savings (abstract scale).
* lowest_perf : Lowest performance of this processor (abstract scale).
@@ -48,22 +51,26 @@ total 0
* feedback_ctrs : Includes both Reference and delivered performance counter.
Reference counter ticks up proportional to processor's reference performance.
Delivered counter ticks up proportional to processor's delivered performance.
-* wraparound_time: Minimum time for the feedback counters to wraparound (seconds).
+* wraparound_time: Minimum time for the feedback counters to wraparound
+ (seconds).
* reference_perf : Performance level at which reference performance counter
accumulates (abstract scale).
---------------------------------------------------------------------------------
- Computing Average Delivered Performance
+Computing Average Delivered Performance
+=======================================
+
+Below describes the steps to compute the average performance delivered by
+taking two different snapshots of feedback counters at time T1 and T2.
+
+ T1: Read feedback_ctrs as fbc_t1
+ Wait or run some workload
-Below describes the steps to compute the average performance delivered by taking
-two different snapshots of feedback counters at time T1 and T2.
+ T2: Read feedback_ctrs as fbc_t2
-T1: Read feedback_ctrs as fbc_t1
- Wait or run some workload
-T2: Read feedback_ctrs as fbc_t2
+::
-delivered_counter_delta = fbc_t2[del] - fbc_t1[del]
-reference_counter_delta = fbc_t2[ref] - fbc_t1[ref]
+ delivered_counter_delta = fbc_t2[del] - fbc_t1[del]
+ reference_counter_delta = fbc_t2[ref] - fbc_t1[ref]
-delivered_perf = (refernce_perf x delivered_counter_delta) / reference_counter_delta
+ delivered_perf = (refernce_perf x delivered_counter_delta) / reference_counter_delta
diff --git a/Documentation/acpi/dsdt-override.txt b/Documentation/admin-guide/acpi/dsdt-override.rst
index 784841caa6e6..50bd7f194bf4 100644
--- a/Documentation/acpi/dsdt-override.txt
+++ b/Documentation/admin-guide/acpi/dsdt-override.rst
@@ -1,6 +1,12 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============
+Overriding DSDT
+===============
+
Linux supports a method of overriding the BIOS DSDT:
-CONFIG_ACPI_CUSTOM_DSDT builds the image into the kernel.
+CONFIG_ACPI_CUSTOM_DSDT - builds the image into the kernel.
When to use this method is described in detail on the
Linux/ACPI home page:
diff --git a/Documentation/admin-guide/acpi/index.rst b/Documentation/admin-guide/acpi/index.rst
new file mode 100644
index 000000000000..4d13eeea1eca
--- /dev/null
+++ b/Documentation/admin-guide/acpi/index.rst
@@ -0,0 +1,14 @@
+============
+ACPI Support
+============
+
+Here we document in detail how to interact with various mechanisms in
+the Linux ACPI support.
+
+.. toctree::
+ :maxdepth: 1
+
+ initrd_table_override
+ dsdt-override
+ ssdt-overlays
+ cppc_sysfs
diff --git a/Documentation/admin-guide/acpi/initrd_table_override.rst b/Documentation/admin-guide/acpi/initrd_table_override.rst
new file mode 100644
index 000000000000..cbd768207631
--- /dev/null
+++ b/Documentation/admin-guide/acpi/initrd_table_override.rst
@@ -0,0 +1,115 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+================================
+Upgrading ACPI tables via initrd
+================================
+
+What is this about
+==================
+
+If the ACPI_TABLE_UPGRADE compile option is true, it is possible to
+upgrade the ACPI execution environment that is defined by the ACPI tables
+via upgrading the ACPI tables provided by the BIOS with an instrumented,
+modified, more recent version one, or installing brand new ACPI tables.
+
+When building initrd with kernel in a single image, option
+ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD should also be true for this
+feature to work.
+
+For a full list of ACPI tables that can be upgraded/installed, take a look
+at the char `*table_sigs[MAX_ACPI_SIGNATURE];` definition in
+drivers/acpi/tables.c.
+
+All ACPI tables iasl (Intel's ACPI compiler and disassembler) knows should
+be overridable, except:
+
+ - ACPI_SIG_RSDP (has a signature of 6 bytes)
+ - ACPI_SIG_FACS (does not have an ordinary ACPI table header)
+
+Both could get implemented as well.
+
+
+What is this for
+================
+
+Complain to your platform/BIOS vendor if you find a bug which is so severe
+that a workaround is not accepted in the Linux kernel. And this facility
+allows you to upgrade the buggy tables before your platform/BIOS vendor
+releases an upgraded BIOS binary.
+
+This facility can be used by platform/BIOS vendors to provide a Linux
+compatible environment without modifying the underlying platform firmware.
+
+This facility also provides a powerful feature to easily debug and test
+ACPI BIOS table compatibility with the Linux kernel by modifying old
+platform provided ACPI tables or inserting new ACPI tables.
+
+It can and should be enabled in any kernel because there is no functional
+change with not instrumented initrds.
+
+
+How does it work
+================
+::
+
+ # Extract the machine's ACPI tables:
+ cd /tmp
+ acpidump >acpidump
+ acpixtract -a acpidump
+ # Disassemble, modify and recompile them:
+ iasl -d *.dat
+ # For example add this statement into a _PRT (PCI Routing Table) function
+ # of the DSDT:
+ Store("HELLO WORLD", debug)
+ # And increase the OEM Revision. For example, before modification:
+ DefinitionBlock ("DSDT.aml", "DSDT", 2, "INTEL ", "TEMPLATE", 0x00000000)
+ # After modification:
+ DefinitionBlock ("DSDT.aml", "DSDT", 2, "INTEL ", "TEMPLATE", 0x00000001)
+ iasl -sa dsdt.dsl
+ # Add the raw ACPI tables to an uncompressed cpio archive.
+ # They must be put into a /kernel/firmware/acpi directory inside the cpio
+ # archive. Note that if the table put here matches a platform table
+ # (similar Table Signature, and similar OEMID, and similar OEM Table ID)
+ # with a more recent OEM Revision, the platform table will be upgraded by
+ # this table. If the table put here doesn't match a platform table
+ # (dissimilar Table Signature, or dissimilar OEMID, or dissimilar OEM Table
+ # ID), this table will be appended.
+ mkdir -p kernel/firmware/acpi
+ cp dsdt.aml kernel/firmware/acpi
+ # A maximum of "NR_ACPI_INITRD_TABLES (64)" tables are currently allowed
+ # (see osl.c):
+ iasl -sa facp.dsl
+ iasl -sa ssdt1.dsl
+ cp facp.aml kernel/firmware/acpi
+ cp ssdt1.aml kernel/firmware/acpi
+ # The uncompressed cpio archive must be the first. Other, typically
+ # compressed cpio archives, must be concatenated on top of the uncompressed
+ # one. Following command creates the uncompressed cpio archive and
+ # concatenates the original initrd on top:
+ find kernel | cpio -H newc --create > /boot/instrumented_initrd
+ cat /boot/initrd >>/boot/instrumented_initrd
+ # reboot with increased acpi debug level, e.g. boot params:
+ acpi.debug_level=0x2 acpi.debug_layer=0xFFFFFFFF
+ # and check your syslog:
+ [ 1.268089] ACPI: PCI Interrupt Routing Table [\_SB_.PCI0._PRT]
+ [ 1.272091] [ACPI Debug] String [0x0B] "HELLO WORLD"
+
+iasl is able to disassemble and recompile quite a lot different,
+also static ACPI tables.
+
+
+Where to retrieve userspace tools
+=================================
+
+iasl and acpixtract are part of Intel's ACPICA project:
+http://acpica.org/
+
+and should be packaged by distributions (for example in the acpica package
+on SUSE).
+
+acpidump can be found in Len Browns pmtools:
+ftp://kernel.org/pub/linux/kernel/people/lenb/acpi/utils/pmtools/acpidump
+
+This tool is also part of the acpica package on SUSE.
+Alternatively, used ACPI tables can be retrieved via sysfs in latest kernels:
+/sys/firmware/acpi/tables
diff --git a/Documentation/admin-guide/acpi/ssdt-overlays.rst b/Documentation/admin-guide/acpi/ssdt-overlays.rst
new file mode 100644
index 000000000000..da37455f96c9
--- /dev/null
+++ b/Documentation/admin-guide/acpi/ssdt-overlays.rst
@@ -0,0 +1,180 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============
+SSDT Overlays
+=============
+
+In order to support ACPI open-ended hardware configurations (e.g. development
+boards) we need a way to augment the ACPI configuration provided by the firmware
+image. A common example is connecting sensors on I2C / SPI buses on development
+boards.
+
+Although this can be accomplished by creating a kernel platform driver or
+recompiling the firmware image with updated ACPI tables, neither is practical:
+the former proliferates board specific kernel code while the latter requires
+access to firmware tools which are often not publicly available.
+
+Because ACPI supports external references in AML code a more practical
+way to augment firmware ACPI configuration is by dynamically loading
+user defined SSDT tables that contain the board specific information.
+
+For example, to enumerate a Bosch BMA222E accelerometer on the I2C bus of the
+Minnowboard MAX development board exposed via the LSE connector [1], the
+following ASL code can be used::
+
+ DefinitionBlock ("minnowmax.aml", "SSDT", 1, "Vendor", "Accel", 0x00000003)
+ {
+ External (\_SB.I2C6, DeviceObj)
+
+ Scope (\_SB.I2C6)
+ {
+ Device (STAC)
+ {
+ Name (_ADR, Zero)
+ Name (_HID, "BMA222E")
+
+ Method (_CRS, 0, Serialized)
+ {
+ Name (RBUF, ResourceTemplate ()
+ {
+ I2cSerialBus (0x0018, ControllerInitiated, 0x00061A80,
+ AddressingMode7Bit, "\\_SB.I2C6", 0x00,
+ ResourceConsumer, ,)
+ GpioInt (Edge, ActiveHigh, Exclusive, PullDown, 0x0000,
+ "\\_SB.GPO2", 0x00, ResourceConsumer, , )
+ { // Pin list
+ 0
+ }
+ })
+ Return (RBUF)
+ }
+ }
+ }
+ }
+
+which can then be compiled to AML binary format::
+
+ $ iasl minnowmax.asl
+
+ Intel ACPI Component Architecture
+ ASL Optimizing Compiler version 20140214-64 [Mar 29 2014]
+ Copyright (c) 2000 - 2014 Intel Corporation
+
+ ASL Input: minnomax.asl - 30 lines, 614 bytes, 7 keywords
+ AML Output: minnowmax.aml - 165 bytes, 6 named objects, 1 executable opcodes
+
+[1] http://wiki.minnowboard.org/MinnowBoard_MAX#Low_Speed_Expansion_Connector_.28Top.29
+
+The resulting AML code can then be loaded by the kernel using one of the methods
+below.
+
+Loading ACPI SSDTs from initrd
+==============================
+
+This option allows loading of user defined SSDTs from initrd and it is useful
+when the system does not support EFI or when there is not enough EFI storage.
+
+It works in a similar way with initrd based ACPI tables override/upgrade: SSDT
+aml code must be placed in the first, uncompressed, initrd under the
+"kernel/firmware/acpi" path. Multiple files can be used and this will translate
+in loading multiple tables. Only SSDT and OEM tables are allowed. See
+initrd_table_override.txt for more details.
+
+Here is an example::
+
+ # Add the raw ACPI tables to an uncompressed cpio archive.
+ # They must be put into a /kernel/firmware/acpi directory inside the
+ # cpio archive.
+ # The uncompressed cpio archive must be the first.
+ # Other, typically compressed cpio archives, must be
+ # concatenated on top of the uncompressed one.
+ mkdir -p kernel/firmware/acpi
+ cp ssdt.aml kernel/firmware/acpi
+
+ # Create the uncompressed cpio archive and concatenate the original initrd
+ # on top:
+ find kernel | cpio -H newc --create > /boot/instrumented_initrd
+ cat /boot/initrd >>/boot/instrumented_initrd
+
+Loading ACPI SSDTs from EFI variables
+=====================================
+
+This is the preferred method, when EFI is supported on the platform, because it
+allows a persistent, OS independent way of storing the user defined SSDTs. There
+is also work underway to implement EFI support for loading user defined SSDTs
+and using this method will make it easier to convert to the EFI loading
+mechanism when that will arrive.
+
+In order to load SSDTs from an EFI variable the efivar_ssdt kernel command line
+parameter can be used. The argument for the option is the variable name to
+use. If there are multiple variables with the same name but with different
+vendor GUIDs, all of them will be loaded.
+
+In order to store the AML code in an EFI variable the efivarfs filesystem can be
+used. It is enabled and mounted by default in /sys/firmware/efi/efivars in all
+recent distribution.
+
+Creating a new file in /sys/firmware/efi/efivars will automatically create a new
+EFI variable. Updating a file in /sys/firmware/efi/efivars will update the EFI
+variable. Please note that the file name needs to be specially formatted as
+"Name-GUID" and that the first 4 bytes in the file (little-endian format)
+represent the attributes of the EFI variable (see EFI_VARIABLE_MASK in
+include/linux/efi.h). Writing to the file must also be done with one write
+operation.
+
+For example, you can use the following bash script to create/update an EFI
+variable with the content from a given file::
+
+ #!/bin/sh -e
+
+ while ! [ -z "$1" ]; do
+ case "$1" in
+ "-f") filename="$2"; shift;;
+ "-g") guid="$2"; shift;;
+ *) name="$1";;
+ esac
+ shift
+ done
+
+ usage()
+ {
+ echo "Syntax: ${0##*/} -f filename [ -g guid ] name"
+ exit 1
+ }
+
+ [ -n "$name" -a -f "$filename" ] || usage
+
+ EFIVARFS="/sys/firmware/efi/efivars"
+
+ [ -d "$EFIVARFS" ] || exit 2
+
+ if stat -tf $EFIVARFS | grep -q -v de5e81e4; then
+ mount -t efivarfs none $EFIVARFS
+ fi
+
+ # try to pick up an existing GUID
+ [ -n "$guid" ] || guid=$(find "$EFIVARFS" -name "$name-*" | head -n1 | cut -f2- -d-)
+
+ # use a randomly generated GUID
+ [ -n "$guid" ] || guid="$(cat /proc/sys/kernel/random/uuid)"
+
+ # efivarfs expects all of the data in one write
+ tmp=$(mktemp)
+ /bin/echo -ne "\007\000\000\000" | cat - $filename > $tmp
+ dd if=$tmp of="$EFIVARFS/$name-$guid" bs=$(stat -c %s $tmp)
+ rm $tmp
+
+Loading ACPI SSDTs from configfs
+================================
+
+This option allows loading of user defined SSDTs from userspace via the configfs
+interface. The CONFIG_ACPI_CONFIGFS option must be select and configfs must be
+mounted. In the following examples, we assume that configfs has been mounted in
+/config.
+
+New tables can be loading by creating new directories in /config/acpi/table/ and
+writing the SSDT aml code in the aml attribute::
+
+ cd /config/acpi/table
+ mkdir my_ssdt
+ cat ~/ssdt.aml > my_ssdt/aml
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 20f92c16ffbf..88e746074252 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -864,6 +864,8 @@ All cgroup core files are prefixed with "cgroup."
populated
1 if the cgroup or its descendants contains any live
processes; otherwise, 0.
+ frozen
+ 1 if the cgroup is frozen; otherwise, 0.
cgroup.max.descendants
A read-write single value files. The default is "max".
@@ -897,6 +899,31 @@ All cgroup core files are prefixed with "cgroup."
A dying cgroup can consume system resources not exceeding
limits, which were active at the moment of cgroup deletion.
+ cgroup.freeze
+ A read-write single value file which exists on non-root cgroups.
+ Allowed values are "0" and "1". The default is "0".
+
+ Writing "1" to the file causes freezing of the cgroup and all
+ descendant cgroups. This means that all belonging processes will
+ be stopped and will not run until the cgroup will be explicitly
+ unfrozen. Freezing of the cgroup may take some time; when this action
+ is completed, the "frozen" value in the cgroup.events control file
+ will be updated to "1" and the corresponding notification will be
+ issued.
+
+ A cgroup can be frozen either by its own settings, or by settings
+ of any ancestor cgroups. If any of ancestor cgroups is frozen, the
+ cgroup will remain frozen.
+
+ Processes in the frozen cgroup can be killed by a fatal signal.
+ They also can enter and leave a frozen cgroup: either by an explicit
+ move by a user, or if freezing of the cgroup races with fork().
+ If a process is moved to a frozen cgroup, it stops. If a process is
+ moved out of a frozen cgroup, it becomes running.
+
+ Frozen status of a cgroup doesn't affect any cgroup tree operations:
+ it's possible to delete a frozen (and empty) cgroup, as well as
+ create new sub-cgroups.
Controllers
===========
diff --git a/Documentation/admin-guide/ext4.rst b/Documentation/admin-guide/ext4.rst
index e506d3dae510..059ddcbe769d 100644
--- a/Documentation/admin-guide/ext4.rst
+++ b/Documentation/admin-guide/ext4.rst
@@ -91,10 +91,48 @@ Currently Available
* large block (up to pagesize) support
* efficient new ordered mode in JBD2 and ext4 (avoid using buffer head to force
the ordering)
+* Case-insensitive file name lookups
[1] Filesystems with a block size of 1k may see a limit imposed by the
directory hash tree having a maximum depth of two.
+case-insensitive file name lookups
+======================================================
+
+The case-insensitive file name lookup feature is supported on a
+per-directory basis, allowing the user to mix case-insensitive and
+case-sensitive directories in the same filesystem. It is enabled by
+flipping the +F inode attribute of an empty directory. The
+case-insensitive string match operation is only defined when we know how
+text in encoded in a byte sequence. For that reason, in order to enable
+case-insensitive directories, the filesystem must have the
+casefold feature, which stores the filesystem-wide encoding
+model used. By default, the charset adopted is the latest version of
+Unicode (12.1.0, by the time of this writing), encoded in the UTF-8
+form. The comparison algorithm is implemented by normalizing the
+strings to the Canonical decomposition form, as defined by Unicode,
+followed by a byte per byte comparison.
+
+The case-awareness is name-preserving on the disk, meaning that the file
+name provided by userspace is a byte-per-byte match to what is actually
+written in the disk. The Unicode normalization format used by the
+kernel is thus an internal representation, and not exposed to the
+userspace nor to the disk, with the important exception of disk hashes,
+used on large case-insensitive directories with DX feature. On DX
+directories, the hash must be calculated using the casefolded version of
+the filename, meaning that the normalization format used actually has an
+impact on where the directory entry is stored.
+
+When we change from viewing filenames as opaque byte sequences to seeing
+them as encoded strings we need to address what happens when a program
+tries to create a file with an invalid name. The Unicode subsystem
+within the kernel leaves the decision of what to do in this case to the
+filesystem, which select its preferred behavior by enabling/disabling
+the strict mode. When Ext4 encounters one of those strings and the
+filesystem did not require strict mode, it falls back to considering the
+entire string as an opaque byte sequence, which still allows the user to
+operate on that file, but the case-insensitive lookups won't work.
+
Options
=======
diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst
new file mode 100644
index 000000000000..ffc064c1ec68
--- /dev/null
+++ b/Documentation/admin-guide/hw-vuln/index.rst
@@ -0,0 +1,13 @@
+========================
+Hardware vulnerabilities
+========================
+
+This section describes CPU vulnerabilities and provides an overview of the
+possible mitigations along with guidance for selecting mitigations if they
+are configurable at compile, boot or run time.
+
+.. toctree::
+ :maxdepth: 1
+
+ l1tf
+ mds
diff --git a/Documentation/admin-guide/l1tf.rst b/Documentation/admin-guide/hw-vuln/l1tf.rst
index 9af977384168..31653a9f0e1b 100644
--- a/Documentation/admin-guide/l1tf.rst
+++ b/Documentation/admin-guide/hw-vuln/l1tf.rst
@@ -445,6 +445,7 @@ The default is 'cond'. If 'l1tf=full,force' is given on the kernel command
line, then 'always' is enforced and the kvm-intel.vmentry_l1d_flush
module parameter is ignored and writes to the sysfs file are rejected.
+.. _mitigation_selection:
Mitigation selection guide
--------------------------
diff --git a/Documentation/admin-guide/hw-vuln/mds.rst b/Documentation/admin-guide/hw-vuln/mds.rst
new file mode 100644
index 000000000000..e3a796c0d3a2
--- /dev/null
+++ b/Documentation/admin-guide/hw-vuln/mds.rst
@@ -0,0 +1,308 @@
+MDS - Microarchitectural Data Sampling
+======================================
+
+Microarchitectural Data Sampling is a hardware vulnerability which allows
+unprivileged speculative access to data which is available in various CPU
+internal buffers.
+
+Affected processors
+-------------------
+
+This vulnerability affects a wide range of Intel processors. The
+vulnerability is not present on:
+
+ - Processors from AMD, Centaur and other non Intel vendors
+
+ - Older processor models, where the CPU family is < 6
+
+ - Some Atoms (Bonnell, Saltwell, Goldmont, GoldmontPlus)
+
+ - Intel processors which have the ARCH_CAP_MDS_NO bit set in the
+ IA32_ARCH_CAPABILITIES MSR.
+
+Whether a processor is affected or not can be read out from the MDS
+vulnerability file in sysfs. See :ref:`mds_sys_info`.
+
+Not all processors are affected by all variants of MDS, but the mitigation
+is identical for all of them so the kernel treats them as a single
+vulnerability.
+
+Related CVEs
+------------
+
+The following CVE entries are related to the MDS vulnerability:
+
+ ============== ===== ===================================================
+ CVE-2018-12126 MSBDS Microarchitectural Store Buffer Data Sampling
+ CVE-2018-12130 MFBDS Microarchitectural Fill Buffer Data Sampling
+ CVE-2018-12127 MLPDS Microarchitectural Load Port Data Sampling
+ CVE-2019-11091 MDSUM Microarchitectural Data Sampling Uncacheable Memory
+ ============== ===== ===================================================
+
+Problem
+-------
+
+When performing store, load, L1 refill operations, processors write data
+into temporary microarchitectural structures (buffers). The data in the
+buffer can be forwarded to load operations as an optimization.
+
+Under certain conditions, usually a fault/assist caused by a load
+operation, data unrelated to the load memory address can be speculatively
+forwarded from the buffers. Because the load operation causes a fault or
+assist and its result will be discarded, the forwarded data will not cause
+incorrect program execution or state changes. But a malicious operation
+may be able to forward this speculative data to a disclosure gadget which
+allows in turn to infer the value via a cache side channel attack.
+
+Because the buffers are potentially shared between Hyper-Threads cross
+Hyper-Thread attacks are possible.
+
+Deeper technical information is available in the MDS specific x86
+architecture section: :ref:`Documentation/x86/mds.rst <mds>`.
+
+
+Attack scenarios
+----------------
+
+Attacks against the MDS vulnerabilities can be mounted from malicious non
+priviledged user space applications running on hosts or guest. Malicious
+guest OSes can obviously mount attacks as well.
+
+Contrary to other speculation based vulnerabilities the MDS vulnerability
+does not allow the attacker to control the memory target address. As a
+consequence the attacks are purely sampling based, but as demonstrated with
+the TLBleed attack samples can be postprocessed successfully.
+
+Web-Browsers
+^^^^^^^^^^^^
+
+ It's unclear whether attacks through Web-Browsers are possible at
+ all. The exploitation through Java-Script is considered very unlikely,
+ but other widely used web technologies like Webassembly could possibly be
+ abused.
+
+
+.. _mds_sys_info:
+
+MDS system information
+-----------------------
+
+The Linux kernel provides a sysfs interface to enumerate the current MDS
+status of the system: whether the system is vulnerable, and which
+mitigations are active. The relevant sysfs file is:
+
+/sys/devices/system/cpu/vulnerabilities/mds
+
+The possible values in this file are:
+
+ .. list-table::
+
+ * - 'Not affected'
+ - The processor is not vulnerable
+ * - 'Vulnerable'
+ - The processor is vulnerable, but no mitigation enabled
+ * - 'Vulnerable: Clear CPU buffers attempted, no microcode'
+ - The processor is vulnerable but microcode is not updated.
+
+ The mitigation is enabled on a best effort basis. See :ref:`vmwerv`
+ * - 'Mitigation: Clear CPU buffers'
+ - The processor is vulnerable and the CPU buffer clearing mitigation is
+ enabled.
+
+If the processor is vulnerable then the following information is appended
+to the above information:
+
+ ======================== ============================================
+ 'SMT vulnerable' SMT is enabled
+ 'SMT mitigated' SMT is enabled and mitigated
+ 'SMT disabled' SMT is disabled
+ 'SMT Host state unknown' Kernel runs in a VM, Host SMT state unknown
+ ======================== ============================================
+
+.. _vmwerv:
+
+Best effort mitigation mode
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ If the processor is vulnerable, but the availability of the microcode based
+ mitigation mechanism is not advertised via CPUID the kernel selects a best
+ effort mitigation mode. This mode invokes the mitigation instructions
+ without a guarantee that they clear the CPU buffers.
+
+ This is done to address virtualization scenarios where the host has the
+ microcode update applied, but the hypervisor is not yet updated to expose
+ the CPUID to the guest. If the host has updated microcode the protection
+ takes effect otherwise a few cpu cycles are wasted pointlessly.
+
+ The state in the mds sysfs file reflects this situation accordingly.
+
+
+Mitigation mechanism
+-------------------------
+
+The kernel detects the affected CPUs and the presence of the microcode
+which is required.
+
+If a CPU is affected and the microcode is available, then the kernel
+enables the mitigation by default. The mitigation can be controlled at boot
+time via a kernel command line option. See
+:ref:`mds_mitigation_control_command_line`.
+
+.. _cpu_buffer_clear:
+
+CPU buffer clearing
+^^^^^^^^^^^^^^^^^^^
+
+ The mitigation for MDS clears the affected CPU buffers on return to user
+ space and when entering a guest.
+
+ If SMT is enabled it also clears the buffers on idle entry when the CPU
+ is only affected by MSBDS and not any other MDS variant, because the
+ other variants cannot be protected against cross Hyper-Thread attacks.
+
+ For CPUs which are only affected by MSBDS the user space, guest and idle
+ transition mitigations are sufficient and SMT is not affected.
+
+.. _virt_mechanism:
+
+Virtualization mitigation
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The protection for host to guest transition depends on the L1TF
+ vulnerability of the CPU:
+
+ - CPU is affected by L1TF:
+
+ If the L1D flush mitigation is enabled and up to date microcode is
+ available, the L1D flush mitigation is automatically protecting the
+ guest transition.
+
+ If the L1D flush mitigation is disabled then the MDS mitigation is
+ invoked explicit when the host MDS mitigation is enabled.
+
+ For details on L1TF and virtualization see:
+ :ref:`Documentation/admin-guide/hw-vuln//l1tf.rst <mitigation_control_kvm>`.
+
+ - CPU is not affected by L1TF:
+
+ CPU buffers are flushed before entering the guest when the host MDS
+ mitigation is enabled.
+
+ The resulting MDS protection matrix for the host to guest transition:
+
+ ============ ===== ============= ============ =================
+ L1TF MDS VMX-L1FLUSH Host MDS MDS-State
+
+ Don't care No Don't care N/A Not affected
+
+ Yes Yes Disabled Off Vulnerable
+
+ Yes Yes Disabled Full Mitigated
+
+ Yes Yes Enabled Don't care Mitigated
+
+ No Yes N/A Off Vulnerable
+
+ No Yes N/A Full Mitigated
+ ============ ===== ============= ============ =================
+
+ This only covers the host to guest transition, i.e. prevents leakage from
+ host to guest, but does not protect the guest internally. Guests need to
+ have their own protections.
+
+.. _xeon_phi:
+
+XEON PHI specific considerations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The XEON PHI processor family is affected by MSBDS which can be exploited
+ cross Hyper-Threads when entering idle states. Some XEON PHI variants allow
+ to use MWAIT in user space (Ring 3) which opens an potential attack vector
+ for malicious user space. The exposure can be disabled on the kernel
+ command line with the 'ring3mwait=disable' command line option.
+
+ XEON PHI is not affected by the other MDS variants and MSBDS is mitigated
+ before the CPU enters a idle state. As XEON PHI is not affected by L1TF
+ either disabling SMT is not required for full protection.
+
+.. _mds_smt_control:
+
+SMT control
+^^^^^^^^^^^
+
+ All MDS variants except MSBDS can be attacked cross Hyper-Threads. That
+ means on CPUs which are affected by MFBDS or MLPDS it is necessary to
+ disable SMT for full protection. These are most of the affected CPUs; the
+ exception is XEON PHI, see :ref:`xeon_phi`.
+
+ Disabling SMT can have a significant performance impact, but the impact
+ depends on the type of workloads.
+
+ See the relevant chapter in the L1TF mitigation documentation for details:
+ :ref:`Documentation/admin-guide/hw-vuln/l1tf.rst <smt_control>`.
+
+
+.. _mds_mitigation_control_command_line:
+
+Mitigation control on the kernel command line
+---------------------------------------------
+
+The kernel command line allows to control the MDS mitigations at boot
+time with the option "mds=". The valid arguments for this option are:
+
+ ============ =============================================================
+ full If the CPU is vulnerable, enable all available mitigations
+ for the MDS vulnerability, CPU buffer clearing on exit to
+ userspace and when entering a VM. Idle transitions are
+ protected as well if SMT is enabled.
+
+ It does not automatically disable SMT.
+
+ full,nosmt The same as mds=full, with SMT disabled on vulnerable
+ CPUs. This is the complete mitigation.
+
+ off Disables MDS mitigations completely.
+
+ ============ =============================================================
+
+Not specifying this option is equivalent to "mds=full".
+
+
+Mitigation selection guide
+--------------------------
+
+1. Trusted userspace
+^^^^^^^^^^^^^^^^^^^^
+
+ If all userspace applications are from a trusted source and do not
+ execute untrusted code which is supplied externally, then the mitigation
+ can be disabled.
+
+
+2. Virtualization with trusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The same considerations as above versus trusted user space apply.
+
+3. Virtualization with untrusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The protection depends on the state of the L1TF mitigations.
+ See :ref:`virt_mechanism`.
+
+ If the MDS mitigation is enabled and SMT is disabled, guest to host and
+ guest to guest attacks are prevented.
+
+.. _mds_default_mitigations:
+
+Default mitigations
+-------------------
+
+ The kernel default mitigations for vulnerable processors are:
+
+ - Enable CPU buffer clearing
+
+ The kernel does not by default enforce the disabling of SMT, which leaves
+ SMT systems vulnerable when running untrusted code. The same rationale as
+ for L1TF applies.
+ See :ref:`Documentation/admin-guide/hw-vuln//l1tf.rst <default_mitigations>`.
diff --git a/Documentation/admin-guide/index.rst b/Documentation/admin-guide/index.rst
index 0a491676685e..8001917ee012 100644
--- a/Documentation/admin-guide/index.rst
+++ b/Documentation/admin-guide/index.rst
@@ -17,14 +17,12 @@ etc.
kernel-parameters
devices
-This section describes CPU vulnerabilities and provides an overview of the
-possible mitigations along with guidance for selecting mitigations if they
-are configurable at compile, boot or run time.
+This section describes CPU vulnerabilities and their mitigations.
.. toctree::
:maxdepth: 1
- l1tf
+ hw-vuln/index
Here is a set of documents aimed at users who are trying to track down
problems and bugs in particular.
@@ -77,6 +75,7 @@ configure specific aspects of kernel behavior to your liking.
LSM/index
mm/index
perf-security
+ acpi/index
.. only:: subproject and html
diff --git a/Documentation/admin-guide/kernel-parameters.rst b/Documentation/admin-guide/kernel-parameters.rst
index b8d0bc07ed0a..0124980dca2d 100644
--- a/Documentation/admin-guide/kernel-parameters.rst
+++ b/Documentation/admin-guide/kernel-parameters.rst
@@ -88,6 +88,7 @@ parameter is applicable::
APIC APIC support is enabled.
APM Advanced Power Management support is enabled.
ARM ARM architecture is enabled.
+ ARM64 ARM64 architecture is enabled.
AX25 Appropriate AX.25 support is enabled.
CLK Common clock infrastructure is enabled.
CMA Contiguous Memory Area support is enabled.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 2b8ee90bb644..138f6664b2e2 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -704,8 +704,11 @@
upon panic. This parameter reserves the physical
memory region [offset, offset + size] for that kernel
image. If '@offset' is omitted, then a suitable offset
- is selected automatically. Check
- Documentation/kdump/kdump.txt for further details.
+ is selected automatically.
+ [KNL, x86_64] select a region under 4G first, and
+ fall back to reserve region above 4G when '@offset'
+ hasn't been specified.
+ See Documentation/kdump/kdump.txt for further details.
crashkernel=range1:size1[,range2:size2,...][@offset]
[KNL] Same as above, but depends on the memory
@@ -1585,7 +1588,7 @@
Format: { "off" | "enforce" | "fix" | "log" }
default: "enforce"
- ima_appraise_tcb [IMA]
+ ima_appraise_tcb [IMA] Deprecated. Use ima_policy= instead.
The builtin appraise policy appraises all files
owned by uid=0.
@@ -1612,8 +1615,7 @@
uid=0.
The "appraise_tcb" policy appraises the integrity of
- all files owned by root. (This is the equivalent
- of ima_appraise_tcb.)
+ all files owned by root.
The "secure_boot" policy appraises the integrity
of files (eg. kexec kernel image, kernel modules,
@@ -1828,6 +1830,9 @@
ip= [IP_PNP]
See Documentation/filesystems/nfs/nfsroot.txt.
+ ipcmni_extend [KNL] Extend the maximum number of unique System V
+ IPC identifiers from 32,768 to 16,777,216.
+
irqaffinity= [SMP] Set the default irq affinity mask
The argument is a cpu list, as described above.
@@ -2141,7 +2146,7 @@
Default is 'flush'.
- For details see: Documentation/admin-guide/l1tf.rst
+ For details see: Documentation/admin-guide/hw-vuln/l1tf.rst
l2cr= [PPC]
@@ -2387,6 +2392,32 @@
Format: <first>,<last>
Specifies range of consoles to be captured by the MDA.
+ mds= [X86,INTEL]
+ Control mitigation for the Micro-architectural Data
+ Sampling (MDS) vulnerability.
+
+ Certain CPUs are vulnerable to an exploit against CPU
+ internal buffers which can forward information to a
+ disclosure gadget under certain conditions.
+
+ In vulnerable processors, the speculatively
+ forwarded data can be used in a cache side channel
+ attack, to access data to which the attacker does
+ not have direct access.
+
+ This parameter controls the MDS mitigation. The
+ options are:
+
+ full - Enable MDS mitigation on vulnerable CPUs
+ full,nosmt - Enable MDS mitigation and disable
+ SMT on vulnerable CPUs
+ off - Unconditionally disable MDS mitigation
+
+ Not specifying this option is equivalent to
+ mds=full.
+
+ For details see: Documentation/admin-guide/hw-vuln/mds.rst
+
mem=nn[KMG] [KNL,BOOT] Force usage of a specific amount of memory
Amount of memory to be used when the kernel is not able
to see the whole system memory or for test.
@@ -2544,6 +2575,42 @@
in the "bleeding edge" mini2440 support kernel at
http://repo.or.cz/w/linux-2.6/mini2440.git
+ mitigations=
+ [X86,PPC,S390,ARM64] Control optional mitigations for
+ CPU vulnerabilities. This is a set of curated,
+ arch-independent options, each of which is an
+ aggregation of existing arch-specific options.
+
+ off
+ Disable all optional CPU mitigations. This
+ improves system performance, but it may also
+ expose users to several CPU vulnerabilities.
+ Equivalent to: nopti [X86,PPC]
+ kpti=0 [ARM64]
+ nospectre_v1 [PPC]
+ nobp=0 [S390]
+ nospectre_v2 [X86,PPC,S390,ARM64]
+ spectre_v2_user=off [X86]
+ spec_store_bypass_disable=off [X86,PPC]
+ ssbd=force-off [ARM64]
+ l1tf=off [X86]
+ mds=off [X86]
+
+ auto (default)
+ Mitigate all CPU vulnerabilities, but leave SMT
+ enabled, even if it's vulnerable. This is for
+ users who don't want to be surprised by SMT
+ getting disabled across kernel upgrades, or who
+ have other ways of avoiding SMT-based attacks.
+ Equivalent to: (default behavior)
+
+ auto,nosmt
+ Mitigate all CPU vulnerabilities, disabling SMT
+ if needed. This is for users who always want to
+ be fully mitigated, even if it means losing SMT.
+ Equivalent to: l1tf=flush,nosmt [X86]
+ mds=full,nosmt [X86]
+
mminit_loglevel=
[KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
parameter allows control of the logging verbosity for
@@ -2839,11 +2906,11 @@
noexec=on: enable non-executable mappings (default)
noexec=off: disable non-executable mappings
- nosmap [X86]
+ nosmap [X86,PPC]
Disable SMAP (Supervisor Mode Access Prevention)
even if it is supported by processor.
- nosmep [X86]
+ nosmep [X86,PPC]
Disable SMEP (Supervisor Mode Execution Prevention)
even if it is supported by processor.
@@ -2873,10 +2940,10 @@
check bypass). With this option data leaks are possible
in the system.
- nospectre_v2 [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
- (indirect branch prediction) vulnerability. System may
- allow data leaks with this option, which is equivalent
- to spectre_v2=off.
+ nospectre_v2 [X86,PPC_FSL_BOOK3E,ARM64] Disable all mitigations for
+ the Spectre variant 2 (indirect branch prediction)
+ vulnerability. System may allow data leaks with this
+ option.
nospec_store_bypass_disable
[HW] Disable all mitigations for the Speculative Store Bypass vulnerability
@@ -3110,6 +3177,16 @@
This will also cause panics on machine check exceptions.
Useful together with panic=30 to trigger a reboot.
+ page_alloc.shuffle=
+ [KNL] Boolean flag to control whether the page allocator
+ should randomize its free lists. The randomization may
+ be automatically enabled if the kernel detects it is
+ running on a platform with a direct-mapped memory-side
+ cache, and this parameter can be used to
+ override/disable that behavior. The state of the flag
+ can be read from sysfs at:
+ /sys/module/page_alloc/parameters/shuffle.
+
page_owner= [KNL] Boot-time page_owner enabling option.
Storage of the information about who allocated
each page is disabled in default. With this switch,
@@ -3135,6 +3212,7 @@
bit 2: print timer info
bit 3: print locks info if CONFIG_LOCKDEP is on
bit 4: print ftrace buffer
+ bit 5: print all printk messages in buffer
panic_on_warn panic() instead of WARN(). Useful to cause kdump
on a WARN().
@@ -3394,6 +3472,8 @@
bridges without forcing it upstream. Note:
this removes isolation between devices and
may put more devices in an IOMMU group.
+ force_floating [S390] Force usage of floating interrupts.
+ nomio [S390] Do not use MIO instructions.
pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power
Management.
@@ -3623,7 +3703,9 @@
see CONFIG_RAS_CEC help text.
rcu_nocbs= [KNL]
- The argument is a cpu list, as described above.
+ The argument is a cpu list, as described above,
+ except that the string "all" can be used to
+ specify every CPU on the system.
In kernels built with CONFIG_RCU_NOCB_CPU=y, set
the specified list of CPUs to be no-callback CPUs.
@@ -3986,7 +4068,9 @@
[[,]s[mp]#### \
[[,]b[ios] | a[cpi] | k[bd] | t[riple] | e[fi] | p[ci]] \
[[,]f[orce]
- Where reboot_mode is one of warm (soft) or cold (hard) or gpio,
+ Where reboot_mode is one of warm (soft) or cold (hard) or gpio
+ (prefix with 'panic_' to set mode for panic
+ reboot only),
reboot_type is one of bios, acpi, kbd, triple, efi, or pci,
reboot_force is either force or not specified,
reboot_cpu is s[mp]#### with #### being the processor
@@ -4703,6 +4787,10 @@
[x86] unstable: mark the TSC clocksource as unstable, this
marks the TSC unconditionally unstable at bootup and
avoids any further wobbles once the TSC watchdog notices.
+ [x86] nowatchdog: disable clocksource watchdog. Used
+ in situations with strict latency requirements (where
+ interruptions from clocksource watchdog are not
+ acceptable).
turbografx.map[2|3]= [HW,JOY]
TurboGraFX parallel port interface
@@ -5173,6 +5261,13 @@
with /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT.
+ xen_timer_slop= [X86-64,XEN]
+ Set the timer slop (in nanoseconds) for the virtual Xen
+ timers (default is 100000). This adjusts the minimum
+ delta of virtualized Xen timers, where lower values
+ improve timer resolution at the expense of processing
+ more timer interrupts.
+
xirc2ps_cs= [NET,PCMCIA]
Format:
<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
diff --git a/Documentation/admin-guide/mm/numaperf.rst b/Documentation/admin-guide/mm/numaperf.rst
new file mode 100644
index 000000000000..b79f70c04397
--- /dev/null
+++ b/Documentation/admin-guide/mm/numaperf.rst
@@ -0,0 +1,169 @@
+.. _numaperf:
+
+=============
+NUMA Locality
+=============
+
+Some platforms may have multiple types of memory attached to a compute
+node. These disparate memory ranges may share some characteristics, such
+as CPU cache coherence, but may have different performance. For example,
+different media types and buses affect bandwidth and latency.
+
+A system supports such heterogeneous memory by grouping each memory type
+under different domains, or "nodes", based on locality and performance
+characteristics. Some memory may share the same node as a CPU, and others
+are provided as memory only nodes. While memory only nodes do not provide
+CPUs, they may still be local to one or more compute nodes relative to
+other nodes. The following diagram shows one such example of two compute
+nodes with local memory and a memory only node for each of compute node:
+
+ +------------------+ +------------------+
+ | Compute Node 0 +-----+ Compute Node 1 |
+ | Local Node0 Mem | | Local Node1 Mem |
+ +--------+---------+ +--------+---------+
+ | |
+ +--------+---------+ +--------+---------+
+ | Slower Node2 Mem | | Slower Node3 Mem |
+ +------------------+ +--------+---------+
+
+A "memory initiator" is a node containing one or more devices such as
+CPUs or separate memory I/O devices that can initiate memory requests.
+A "memory target" is a node containing one or more physical address
+ranges accessible from one or more memory initiators.
+
+When multiple memory initiators exist, they may not all have the same
+performance when accessing a given memory target. Each initiator-target
+pair may be organized into different ranked access classes to represent
+this relationship. The highest performing initiator to a given target
+is considered to be one of that target's local initiators, and given
+the highest access class, 0. Any given target may have one or more
+local initiators, and any given initiator may have multiple local
+memory targets.
+
+To aid applications matching memory targets with their initiators, the
+kernel provides symlinks to each other. The following example lists the
+relationship for the access class "0" memory initiators and targets::
+
+ # symlinks -v /sys/devices/system/node/nodeX/access0/targets/
+ relative: /sys/devices/system/node/nodeX/access0/targets/nodeY -> ../../nodeY
+
+ # symlinks -v /sys/devices/system/node/nodeY/access0/initiators/
+ relative: /sys/devices/system/node/nodeY/access0/initiators/nodeX -> ../../nodeX
+
+A memory initiator may have multiple memory targets in the same access
+class. The target memory's initiators in a given class indicate the
+nodes' access characteristics share the same performance relative to other
+linked initiator nodes. Each target within an initiator's access class,
+though, do not necessarily perform the same as each other.
+
+================
+NUMA Performance
+================
+
+Applications may wish to consider which node they want their memory to
+be allocated from based on the node's performance characteristics. If
+the system provides these attributes, the kernel exports them under the
+node sysfs hierarchy by appending the attributes directory under the
+memory node's access class 0 initiators as follows::
+
+ /sys/devices/system/node/nodeY/access0/initiators/
+
+These attributes apply only when accessed from nodes that have the
+are linked under the this access's inititiators.
+
+The performance characteristics the kernel provides for the local initiators
+are exported are as follows::
+
+ # tree -P "read*|write*" /sys/devices/system/node/nodeY/access0/initiators/
+ /sys/devices/system/node/nodeY/access0/initiators/
+ |-- read_bandwidth
+ |-- read_latency
+ |-- write_bandwidth
+ `-- write_latency
+
+The bandwidth attributes are provided in MiB/second.
+
+The latency attributes are provided in nanoseconds.
+
+The values reported here correspond to the rated latency and bandwidth
+for the platform.
+
+==========
+NUMA Cache
+==========
+
+System memory may be constructed in a hierarchy of elements with various
+performance characteristics in order to provide large address space of
+slower performing memory cached by a smaller higher performing memory. The
+system physical addresses memory initiators are aware of are provided
+by the last memory level in the hierarchy. The system meanwhile uses
+higher performing memory to transparently cache access to progressively
+slower levels.
+
+The term "far memory" is used to denote the last level memory in the
+hierarchy. Each increasing cache level provides higher performing
+initiator access, and the term "near memory" represents the fastest
+cache provided by the system.
+
+This numbering is different than CPU caches where the cache level (ex:
+L1, L2, L3) uses the CPU-side view where each increased level is lower
+performing. In contrast, the memory cache level is centric to the last
+level memory, so the higher numbered cache level corresponds to memory
+nearer to the CPU, and further from far memory.
+
+The memory-side caches are not directly addressable by software. When
+software accesses a system address, the system will return it from the
+near memory cache if it is present. If it is not present, the system
+accesses the next level of memory until there is either a hit in that
+cache level, or it reaches far memory.
+
+An application does not need to know about caching attributes in order
+to use the system. Software may optionally query the memory cache
+attributes in order to maximize the performance out of such a setup.
+If the system provides a way for the kernel to discover this information,
+for example with ACPI HMAT (Heterogeneous Memory Attribute Table),
+the kernel will append these attributes to the NUMA node memory target.
+
+When the kernel first registers a memory cache with a node, the kernel
+will create the following directory::
+
+ /sys/devices/system/node/nodeX/memory_side_cache/
+
+If that directory is not present, the system either does not not provide
+a memory-side cache, or that information is not accessible to the kernel.
+
+The attributes for each level of cache is provided under its cache
+level index::
+
+ /sys/devices/system/node/nodeX/memory_side_cache/indexA/
+ /sys/devices/system/node/nodeX/memory_side_cache/indexB/
+ /sys/devices/system/node/nodeX/memory_side_cache/indexC/
+
+Each cache level's directory provides its attributes. For example, the
+following shows a single cache level and the attributes available for
+software to query::
+
+ # tree sys/devices/system/node/node0/memory_side_cache/
+ /sys/devices/system/node/node0/memory_side_cache/
+ |-- index1
+ | |-- indexing
+ | |-- line_size
+ | |-- size
+ | `-- write_policy
+
+The "indexing" will be 0 if it is a direct-mapped cache, and non-zero
+for any other indexed based, multi-way associativity.
+
+The "line_size" is the number of bytes accessed from the next cache
+level on a miss.
+
+The "size" is the number of bytes provided by this cache level.
+
+The "write_policy" will be 0 for write-back, and non-zero for
+write-through caching.
+
+========
+See Also
+========
+.. [1] https://www.uefi.org/sites/default/files/resources/ACPI_6_2.pdf
+ Section 5.2.27
diff --git a/Documentation/admin-guide/pm/cpufreq.rst b/Documentation/admin-guide/pm/cpufreq.rst
index 7eca9026a9ed..0c74a7784964 100644
--- a/Documentation/admin-guide/pm/cpufreq.rst
+++ b/Documentation/admin-guide/pm/cpufreq.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
.. |struct cpufreq_policy| replace:: :c:type:`struct cpufreq_policy <cpufreq_policy>`
.. |intel_pstate| replace:: :doc:`intel_pstate <intel_pstate>`
@@ -5,9 +8,10 @@
CPU Performance Scaling
=======================
-::
+:Copyright: |copy| 2017 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- Copyright (c) 2017 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
The Concept of CPU Performance Scaling
======================================
@@ -396,8 +400,8 @@ RT or deadline scheduling classes, the governor will increase the frequency to
the allowed maximum (that is, the ``scaling_max_freq`` policy limit). In turn,
if it is invoked by the CFS scheduling class, the governor will use the
Per-Entity Load Tracking (PELT) metric for the root control group of the
-given CPU as the CPU utilization estimate (see the `Per-entity load tracking`_
-LWN.net article for a description of the PELT mechanism). Then, the new
+given CPU as the CPU utilization estimate (see the *Per-entity load tracking*
+LWN.net article [1]_ for a description of the PELT mechanism). Then, the new
CPU frequency to apply is computed in accordance with the formula
f = 1.25 * ``f_0`` * ``util`` / ``max``
@@ -698,4 +702,8 @@ hardware feature (e.g. all Intel ones), even if the
:c:macro:`CONFIG_X86_ACPI_CPUFREQ_CPB` configuration option is set.
-.. _Per-entity load tracking: https://lwn.net/Articles/531853/
+References
+==========
+
+.. [1] Jonathan Corbet, *Per-entity load tracking*,
+ https://lwn.net/Articles/531853/
diff --git a/Documentation/admin-guide/pm/cpuidle.rst b/Documentation/admin-guide/pm/cpuidle.rst
index 9c58b35a81cb..e70b365dbc60 100644
--- a/Documentation/admin-guide/pm/cpuidle.rst
+++ b/Documentation/admin-guide/pm/cpuidle.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
.. |struct cpuidle_state| replace:: :c:type:`struct cpuidle_state <cpuidle_state>`
.. |cpufreq| replace:: :doc:`CPU Performance Scaling <cpufreq>`
@@ -5,9 +8,10 @@
CPU Idle Time Management
========================
-::
+:Copyright: |copy| 2018 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- Copyright (c) 2018 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Concepts
========
diff --git a/Documentation/admin-guide/pm/index.rst b/Documentation/admin-guide/pm/index.rst
index 49237ac73442..39f8f9f81e7a 100644
--- a/Documentation/admin-guide/pm/index.rst
+++ b/Documentation/admin-guide/pm/index.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
================
Power Management
================
diff --git a/Documentation/admin-guide/pm/intel_epb.rst b/Documentation/admin-guide/pm/intel_epb.rst
new file mode 100644
index 000000000000..005121167af7
--- /dev/null
+++ b/Documentation/admin-guide/pm/intel_epb.rst
@@ -0,0 +1,41 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+======================================
+Intel Performance and Energy Bias Hint
+======================================
+
+:Copyright: |copy| 2019 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+
+.. kernel-doc:: arch/x86/kernel/cpu/intel_epb.c
+ :doc: overview
+
+Intel Performance and Energy Bias Attribute in ``sysfs``
+========================================================
+
+The Intel Performance and Energy Bias Hint (EPB) value for a given (logical) CPU
+can be checked or updated through a ``sysfs`` attribute (file) under
+:file:`/sys/devices/system/cpu/cpu<N>/power/`, where the CPU number ``<N>``
+is allocated at the system initialization time:
+
+``energy_perf_bias``
+ Shows the current EPB value for the CPU in a sliding scale 0 - 15, where
+ a value of 0 corresponds to a hint preference for highest performance
+ and a value of 15 corresponds to the maximum energy savings.
+
+ In order to update the EPB value for the CPU, this attribute can be
+ written to, either with a number in the 0 - 15 sliding scale above, or
+ with one of the strings: "performance", "balance-performance", "normal",
+ "balance-power", "power" that represent values reflected by their
+ meaning.
+
+ This attribute is present for all online CPUs supporting the EPB
+ feature.
+
+Note that while the EPB interface to the processor is defined at the logical CPU
+level, the physical register backing it may be shared by multiple CPUs (for
+example, SMT siblings or cores in one package). For this reason, updating the
+EPB value for one CPU may cause the EPB values for other CPUs to change.
diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst
index ec0f7c111f65..67e414e34f37 100644
--- a/Documentation/admin-guide/pm/intel_pstate.rst
+++ b/Documentation/admin-guide/pm/intel_pstate.rst
@@ -1,10 +1,13 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
===============================================
``intel_pstate`` CPU Performance Scaling Driver
===============================================
-::
+:Copyright: |copy| 2017 Intel Corporation
- Copyright (c) 2017 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
General Information
@@ -20,11 +23,10 @@ you have not done that yet.]
For the processors supported by ``intel_pstate``, the P-state concept is broader
than just an operating frequency or an operating performance point (see the
-`LinuxCon Europe 2015 presentation by Kristen Accardi <LCEU2015_>`_ for more
+LinuxCon Europe 2015 presentation by Kristen Accardi [1]_ for more
information about that). For this reason, the representation of P-states used
by ``intel_pstate`` internally follows the hardware specification (for details
-refer to `Intel® 64 and IA-32 Architectures Software Developer’s Manual
-Volume 3: System Programming Guide <SDM_>`_). However, the ``CPUFreq`` core
+refer to Intel Software Developer’s Manual [2]_). However, the ``CPUFreq`` core
uses frequencies for identifying operating performance points of CPUs and
frequencies are involved in the user space interface exposed by it, so
``intel_pstate`` maps its internal representation of P-states to frequencies too
@@ -561,9 +563,9 @@ or to pin every task potentially sensitive to them to a specific CPU.]
On the majority of systems supported by ``intel_pstate``, the ACPI tables
provided by the platform firmware contain ``_PSS`` objects returning information
-that can be used for CPU performance scaling (refer to the `ACPI specification`_
-for details on the ``_PSS`` objects and the format of the information returned
-by them).
+that can be used for CPU performance scaling (refer to the ACPI specification
+[3]_ for details on the ``_PSS`` objects and the format of the information
+returned by them).
The information returned by the ACPI ``_PSS`` objects is used by the
``acpi-cpufreq`` scaling driver. On systems supported by ``intel_pstate``
@@ -728,6 +730,14 @@ P-state is called, the ``ftrace`` filter can be set to to
<idle>-0 [000] ..s. 2537.654843: intel_pstate_set_pstate <-intel_pstate_timer_func
-.. _LCEU2015: http://events.linuxfoundation.org/sites/events/files/slides/LinuxConEurope_2015.pdf
-.. _SDM: http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-system-programming-manual-325384.html
-.. _ACPI specification: http://www.uefi.org/sites/default/files/resources/ACPI_6_1.pdf
+References
+==========
+
+.. [1] Kristen Accardi, *Balancing Power and Performance in the Linux Kernel*,
+ http://events.linuxfoundation.org/sites/events/files/slides/LinuxConEurope_2015.pdf
+
+.. [2] *Intel® 64 and IA-32 Architectures Software Developer’s Manual Volume 3: System Programming Guide*,
+ http://www.intel.com/content/www/us/en/architecture-and-technology/64-ia-32-architectures-software-developer-system-programming-manual-325384.html
+
+.. [3] *Advanced Configuration and Power Interface Specification*,
+ https://uefi.org/sites/default/files/resources/ACPI_6_3_final_Jan30.pdf
diff --git a/Documentation/admin-guide/pm/sleep-states.rst b/Documentation/admin-guide/pm/sleep-states.rst
index dbf5acd49f35..cd3a28cb81f4 100644
--- a/Documentation/admin-guide/pm/sleep-states.rst
+++ b/Documentation/admin-guide/pm/sleep-states.rst
@@ -1,10 +1,14 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
===================
System Sleep States
===================
-::
+:Copyright: |copy| 2017 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- Copyright (c) 2017 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Sleep states are global low-power states of the entire system in which user
space code cannot be executed and the overall system activity is significantly
diff --git a/Documentation/admin-guide/pm/strategies.rst b/Documentation/admin-guide/pm/strategies.rst
index afe4d3f831fe..dd0362e32fa5 100644
--- a/Documentation/admin-guide/pm/strategies.rst
+++ b/Documentation/admin-guide/pm/strategies.rst
@@ -1,10 +1,14 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
===========================
Power Management Strategies
===========================
-::
+:Copyright: |copy| 2017 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- Copyright (c) 2017 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
The Linux kernel supports two major high-level power management strategies.
diff --git a/Documentation/admin-guide/pm/system-wide.rst b/Documentation/admin-guide/pm/system-wide.rst
index 0c81e4c5de39..2b1f987b34f0 100644
--- a/Documentation/admin-guide/pm/system-wide.rst
+++ b/Documentation/admin-guide/pm/system-wide.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
============================
System-Wide Power Management
============================
diff --git a/Documentation/admin-guide/pm/working-state.rst b/Documentation/admin-guide/pm/working-state.rst
index b6cef9b5e961..fc298eb1234b 100644
--- a/Documentation/admin-guide/pm/working-state.rst
+++ b/Documentation/admin-guide/pm/working-state.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
==============================
Working-State Power Management
==============================
@@ -8,3 +10,4 @@ Working-State Power Management
cpuidle
cpufreq
intel_pstate
+ intel_epb
diff --git a/Documentation/arm64/cpu-feature-registers.txt b/Documentation/arm64/cpu-feature-registers.txt
index d4b4dd1fe786..684a0da39378 100644
--- a/Documentation/arm64/cpu-feature-registers.txt
+++ b/Documentation/arm64/cpu-feature-registers.txt
@@ -209,6 +209,22 @@ infrastructure:
| AT | [35-32] | y |
x--------------------------------------------------x
+ 6) ID_AA64ZFR0_EL1 - SVE feature ID register 0
+
+ x--------------------------------------------------x
+ | Name | bits | visible |
+ |--------------------------------------------------|
+ | SM4 | [43-40] | y |
+ |--------------------------------------------------|
+ | SHA3 | [35-32] | y |
+ |--------------------------------------------------|
+ | BitPerm | [19-16] | y |
+ |--------------------------------------------------|
+ | AES | [7-4] | y |
+ |--------------------------------------------------|
+ | SVEVer | [3-0] | y |
+ x--------------------------------------------------x
+
Appendix I: Example
---------------------------
diff --git a/Documentation/arm64/elf_hwcaps.txt b/Documentation/arm64/elf_hwcaps.txt
index 13d6691b37be..b73a2519ecf2 100644
--- a/Documentation/arm64/elf_hwcaps.txt
+++ b/Documentation/arm64/elf_hwcaps.txt
@@ -13,9 +13,9 @@ architected discovery mechanism available to userspace code at EL0. The
kernel exposes the presence of these features to userspace through a set
of flags called hwcaps, exposed in the auxilliary vector.
-Userspace software can test for features by acquiring the AT_HWCAP entry
-of the auxilliary vector, and testing whether the relevant flags are
-set, e.g.
+Userspace software can test for features by acquiring the AT_HWCAP or
+AT_HWCAP2 entry of the auxiliary vector, and testing whether the relevant
+flags are set, e.g.
bool floating_point_is_present(void)
{
@@ -135,6 +135,10 @@ HWCAP_DCPOP
Functionality implied by ID_AA64ISAR1_EL1.DPB == 0b0001.
+HWCAP2_DCPODP
+
+ Functionality implied by ID_AA64ISAR1_EL1.DPB == 0b0010.
+
HWCAP_SHA3
Functionality implied by ID_AA64ISAR0_EL1.SHA3 == 0b0001.
@@ -159,6 +163,30 @@ HWCAP_SVE
Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001.
+HWCAP2_SVE2
+
+ Functionality implied by ID_AA64ZFR0_EL1.SVEVer == 0b0001.
+
+HWCAP2_SVEAES
+
+ Functionality implied by ID_AA64ZFR0_EL1.AES == 0b0001.
+
+HWCAP2_SVEPMULL
+
+ Functionality implied by ID_AA64ZFR0_EL1.AES == 0b0010.
+
+HWCAP2_SVEBITPERM
+
+ Functionality implied by ID_AA64ZFR0_EL1.BitPerm == 0b0001.
+
+HWCAP2_SVESHA3
+
+ Functionality implied by ID_AA64ZFR0_EL1.SHA3 == 0b0001.
+
+HWCAP2_SVESM4
+
+ Functionality implied by ID_AA64ZFR0_EL1.SM4 == 0b0001.
+
HWCAP_ASIMDFHM
Functionality implied by ID_AA64ISAR0_EL1.FHM == 0b0001.
@@ -194,3 +222,10 @@ HWCAP_PACG
Functionality implied by ID_AA64ISAR1_EL1.GPA == 0b0001 or
ID_AA64ISAR1_EL1.GPI == 0b0001, as described by
Documentation/arm64/pointer-authentication.txt.
+
+
+4. Unused AT_HWCAP bits
+-----------------------
+
+For interoperation with userspace, the kernel guarantees that bits 62
+and 63 of AT_HWCAP will always be returned as 0.
diff --git a/Documentation/arm64/perf.txt b/Documentation/arm64/perf.txt
new file mode 100644
index 000000000000..0d6a7d87d49e
--- /dev/null
+++ b/Documentation/arm64/perf.txt
@@ -0,0 +1,85 @@
+Perf Event Attributes
+=====================
+
+Author: Andrew Murray <andrew.murray@arm.com>
+Date: 2019-03-06
+
+exclude_user
+------------
+
+This attribute excludes userspace.
+
+Userspace always runs at EL0 and thus this attribute will exclude EL0.
+
+
+exclude_kernel
+--------------
+
+This attribute excludes the kernel.
+
+The kernel runs at EL2 with VHE and EL1 without. Guest kernels always run
+at EL1.
+
+For the host this attribute will exclude EL1 and additionally EL2 on a VHE
+system.
+
+For the guest this attribute will exclude EL1. Please note that EL2 is
+never counted within a guest.
+
+
+exclude_hv
+----------
+
+This attribute excludes the hypervisor.
+
+For a VHE host this attribute is ignored as we consider the host kernel to
+be the hypervisor.
+
+For a non-VHE host this attribute will exclude EL2 as we consider the
+hypervisor to be any code that runs at EL2 which is predominantly used for
+guest/host transitions.
+
+For the guest this attribute has no effect. Please note that EL2 is
+never counted within a guest.
+
+
+exclude_host / exclude_guest
+----------------------------
+
+These attributes exclude the KVM host and guest, respectively.
+
+The KVM host may run at EL0 (userspace), EL1 (non-VHE kernel) and EL2 (VHE
+kernel or non-VHE hypervisor).
+
+The KVM guest may run at EL0 (userspace) and EL1 (kernel).
+
+Due to the overlapping exception levels between host and guests we cannot
+exclusively rely on the PMU's hardware exception filtering - therefore we
+must enable/disable counting on the entry and exit to the guest. This is
+performed differently on VHE and non-VHE systems.
+
+For non-VHE systems we exclude EL2 for exclude_host - upon entering and
+exiting the guest we disable/enable the event as appropriate based on the
+exclude_host and exclude_guest attributes.
+
+For VHE systems we exclude EL1 for exclude_guest and exclude both EL0,EL2
+for exclude_host. Upon entering and exiting the guest we modify the event
+to include/exclude EL0 as appropriate based on the exclude_host and
+exclude_guest attributes.
+
+The statements above also apply when these attributes are used within a
+non-VHE guest however please note that EL2 is never counted within a guest.
+
+
+Accuracy
+--------
+
+On non-VHE hosts we enable/disable counters on the entry/exit of host/guest
+transition at EL2 - however there is a period of time between
+enabling/disabling the counters and entering/exiting the guest. We are
+able to eliminate counters counting host events on the boundaries of guest
+entry/exit when counting guest events by filtering out EL2 for
+exclude_host. However when using !exclude_hv there is a small blackout
+window at the guest entry/exit where host events are not captured.
+
+On VHE systems there are no blackout windows.
diff --git a/Documentation/arm64/pointer-authentication.txt b/Documentation/arm64/pointer-authentication.txt
index 5baca42ba146..fc71b33de87e 100644
--- a/Documentation/arm64/pointer-authentication.txt
+++ b/Documentation/arm64/pointer-authentication.txt
@@ -87,7 +87,21 @@ used to get and set the keys for a thread.
Virtualization
--------------
-Pointer authentication is not currently supported in KVM guests. KVM
-will mask the feature bits from ID_AA64ISAR1_EL1, and attempted use of
-the feature will result in an UNDEFINED exception being injected into
-the guest.
+Pointer authentication is enabled in KVM guest when each virtual cpu is
+initialised by passing flags KVM_ARM_VCPU_PTRAUTH_[ADDRESS/GENERIC] and
+requesting these two separate cpu features to be enabled. The current KVM
+guest implementation works by enabling both features together, so both
+these userspace flags are checked before enabling pointer authentication.
+The separate userspace flag will allow to have no userspace ABI changes
+if support is added in the future to allow these two features to be
+enabled independently of one another.
+
+As Arm Architecture specifies that Pointer Authentication feature is
+implemented along with the VHE feature so KVM arm64 ptrauth code relies
+on VHE mode to be present.
+
+Additionally, when these vcpu feature flags are not set then KVM will
+filter out the Pointer Authentication system key registers from
+KVM_GET/SET_REG_* ioctls and mask those features from cpufeature ID
+register. Any attempt to use the Pointer Authentication instructions will
+result in an UNDEFINED exception being injected into the guest.
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index d1e2bb801e1b..68d9b74fd751 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -61,6 +61,7 @@ stable kernels.
| ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 |
| ARM | Cortex-A76 | #1165522 | ARM64_ERRATUM_1165522 |
| ARM | Cortex-A76 | #1286807 | ARM64_ERRATUM_1286807 |
+| ARM | Neoverse-N1 | #1188873 | ARM64_ERRATUM_1188873 |
| ARM | MMU-500 | #841119,#826419 | N/A |
| | | | |
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
@@ -77,6 +78,7 @@ stable kernels.
| Hisilicon | Hip0{5,6,7} | #161010101 | HISILICON_ERRATUM_161010101 |
| Hisilicon | Hip0{6,7} | #161010701 | N/A |
| Hisilicon | Hip07 | #161600802 | HISILICON_ERRATUM_161600802 |
+| Hisilicon | Hip08 SMMU PMCG | #162001800 | N/A |
| | | | |
| Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 |
diff --git a/Documentation/arm64/sve.txt b/Documentation/arm64/sve.txt
index 7169a0ec41d8..9940e924a47e 100644
--- a/Documentation/arm64/sve.txt
+++ b/Documentation/arm64/sve.txt
@@ -34,6 +34,23 @@ model features for SVE is included in Appendix A.
following sections: software that needs to verify that those interfaces are
present must check for HWCAP_SVE instead.
+* On hardware that supports the SVE2 extensions, HWCAP2_SVE2 will also
+ be reported in the AT_HWCAP2 aux vector entry. In addition to this,
+ optional extensions to SVE2 may be reported by the presence of:
+
+ HWCAP2_SVE2
+ HWCAP2_SVEAES
+ HWCAP2_SVEPMULL
+ HWCAP2_SVEBITPERM
+ HWCAP2_SVESHA3
+ HWCAP2_SVESM4
+
+ This list may be extended over time as the SVE architecture evolves.
+
+ These extensions are also reported via the CPU ID register ID_AA64ZFR0_EL1,
+ which userspace can read using an MRS instruction. See elf_hwcaps.txt and
+ cpu-feature-registers.txt for details.
+
* Debuggers should restrict themselves to interacting with the target via the
NT_ARM_SVE regset. The recommended way of detecting support for this regset
is to connect to a target process first and then attempt a
diff --git a/Documentation/atomic_bitops.txt b/Documentation/atomic_bitops.txt
index be70b32c95d9..093cdaefdb37 100644
--- a/Documentation/atomic_bitops.txt
+++ b/Documentation/atomic_bitops.txt
@@ -1,6 +1,6 @@
-
-On atomic bitops.
-
+=============
+Atomic bitops
+=============
While our bitmap_{}() functions are non-atomic, we have a number of operations
operating on single bits in a bitmap that are atomic.
diff --git a/Documentation/atomic_t.txt b/Documentation/atomic_t.txt
index 913396ac5824..dca3fb0554db 100644
--- a/Documentation/atomic_t.txt
+++ b/Documentation/atomic_t.txt
@@ -56,6 +56,23 @@ Barriers:
smp_mb__{before,after}_atomic()
+TYPES (signed vs unsigned)
+-----
+
+While atomic_t, atomic_long_t and atomic64_t use int, long and s64
+respectively (for hysterical raisins), the kernel uses -fno-strict-overflow
+(which implies -fwrapv) and defines signed overflow to behave like
+2s-complement.
+
+Therefore, an explicitly unsigned variant of the atomic ops is strictly
+unnecessary and we can simply cast, there is no UB.
+
+There was a bug in UBSAN prior to GCC-8 that would generate UB warnings for
+signed types.
+
+With this we also conform to the C/C++ _Atomic behaviour and things like
+P1236R1.
+
SEMANTICS
---------
diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt
index 98a8dd5ee385..1a0f2ac02eb6 100644
--- a/Documentation/block/bfq-iosched.txt
+++ b/Documentation/block/bfq-iosched.txt
@@ -20,13 +20,26 @@ for that device, by setting low_latency to 0. See Section 3 for
details on how to configure BFQ for the desired tradeoff between
latency and throughput, or on how to maximize throughput.
-BFQ has a non-null overhead, which limits the maximum IOPS that a CPU
-can process for a device scheduled with BFQ. To give an idea of the
-limits on slow or average CPUs, here are, first, the limits of BFQ for
-three different CPUs, on, respectively, an average laptop, an old
-desktop, and a cheap embedded system, in case full hierarchical
-support is enabled (i.e., CONFIG_BFQ_GROUP_IOSCHED is set), but
-CONFIG_DEBUG_BLK_CGROUP is not set (Section 4-2):
+As every I/O scheduler, BFQ adds some overhead to per-I/O-request
+processing. To give an idea of this overhead, the total,
+single-lock-protected, per-request processing time of BFQ---i.e., the
+sum of the execution times of the request insertion, dispatch and
+completion hooks---is, e.g., 1.9 us on an Intel Core i7-2760QM@2.40GHz
+(dated CPU for notebooks; time measured with simple code
+instrumentation, and using the throughput-sync.sh script of the S
+suite [1], in performance-profiling mode). To put this result into
+context, the total, single-lock-protected, per-request execution time
+of the lightest I/O scheduler available in blk-mq, mq-deadline, is 0.7
+us (mq-deadline is ~800 LOC, against ~10500 LOC for BFQ).
+
+Scheduling overhead further limits the maximum IOPS that a CPU can
+process (already limited by the execution of the rest of the I/O
+stack). To give an idea of the limits with BFQ, on slow or average
+CPUs, here are, first, the limits of BFQ for three different CPUs, on,
+respectively, an average laptop, an old desktop, and a cheap embedded
+system, in case full hierarchical support is enabled (i.e.,
+CONFIG_BFQ_GROUP_IOSCHED is set), but CONFIG_DEBUG_BLK_CGROUP is not
+set (Section 4-2):
- Intel i7-4850HQ: 400 KIOPS
- AMD A8-3850: 250 KIOPS
- ARM CortexTM-A53 Octa-core: 80 KIOPS
@@ -566,3 +579,5 @@ applications. Unset this tunable if you need/want to control weights.
Slightly extended version:
http://algogroup.unimore.it/people/paolo/disk_sched/bfq-v1-suite-
results.pdf
+
+[3] https://github.com/Algodev-github/S
diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt
index 4cad1024fff7..41f0a3d33bbd 100644
--- a/Documentation/block/null_blk.txt
+++ b/Documentation/block/null_blk.txt
@@ -93,3 +93,7 @@ zoned=[0/1]: Default: 0
zone_size=[MB]: Default: 256
Per zone size when exposed as a zoned block device. Must be a power of two.
+
+zone_nr_conv=[nr_conv]: Default: 0
+ The number of conventional zones to create when block device is zoned. If
+ zone_nr_conv >= nr_zones, it will be reduced to nr_zones - 1.
diff --git a/Documentation/bpf/bpf_design_QA.rst b/Documentation/bpf/bpf_design_QA.rst
index 10453c627135..cb402c59eca5 100644
--- a/Documentation/bpf/bpf_design_QA.rst
+++ b/Documentation/bpf/bpf_design_QA.rst
@@ -85,8 +85,33 @@ Q: Can loops be supported in a safe way?
A: It's not clear yet.
BPF developers are trying to find a way to
-support bounded loops where the verifier can guarantee that
-the program terminates in less than 4096 instructions.
+support bounded loops.
+
+Q: What are the verifier limits?
+--------------------------------
+A: The only limit known to the user space is BPF_MAXINSNS (4096).
+It's the maximum number of instructions that the unprivileged bpf
+program can have. The verifier has various internal limits.
+Like the maximum number of instructions that can be explored during
+program analysis. Currently, that limit is set to 1 million.
+Which essentially means that the largest program can consist
+of 1 million NOP instructions. There is a limit to the maximum number
+of subsequent branches, a limit to the number of nested bpf-to-bpf
+calls, a limit to the number of the verifier states per instruction,
+a limit to the number of maps used by the program.
+All these limits can be hit with a sufficiently complex program.
+There are also non-numerical limits that can cause the program
+to be rejected. The verifier used to recognize only pointer + constant
+expressions. Now it can recognize pointer + bounded_register.
+bpf_lookup_map_elem(key) had a requirement that 'key' must be
+a pointer to the stack. Now, 'key' can be a pointer to map value.
+The verifier is steadily getting 'smarter'. The limits are
+being removed. The only way to know that the program is going to
+be accepted by the verifier is to try to load it.
+The bpf development process guarantees that the future kernel
+versions will accept all bpf programs that were accepted by
+the earlier versions.
+
Instruction level questions
---------------------------
diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst
index 7313d354f20e..8820360d00da 100644
--- a/Documentation/bpf/btf.rst
+++ b/Documentation/bpf/btf.rst
@@ -82,6 +82,8 @@ sequentially and type id is assigned to each recognized type starting from id
#define BTF_KIND_RESTRICT 11 /* Restrict */
#define BTF_KIND_FUNC 12 /* Function */
#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */
+ #define BTF_KIND_VAR 14 /* Variable */
+ #define BTF_KIND_DATASEC 15 /* Section */
Note that the type section encodes debug info, not just pure types.
``BTF_KIND_FUNC`` is not a type, and it represents a defined subprogram.
@@ -393,6 +395,61 @@ refers to parameter type.
If the function has variable arguments, the last parameter is encoded with
``name_off = 0`` and ``type = 0``.
+2.2.14 BTF_KIND_VAR
+~~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: offset to a valid C identifier
+ * ``info.kind_flag``: 0
+ * ``info.kind``: BTF_KIND_VAR
+ * ``info.vlen``: 0
+ * ``type``: the type of the variable
+
+``btf_type`` is followed by a single ``struct btf_variable`` with the
+following data::
+
+ struct btf_var {
+ __u32 linkage;
+ };
+
+``struct btf_var`` encoding:
+ * ``linkage``: currently only static variable 0, or globally allocated
+ variable in ELF sections 1
+
+Not all type of global variables are supported by LLVM at this point.
+The following is currently available:
+
+ * static variables with or without section attributes
+ * global variables with section attributes
+
+The latter is for future extraction of map key/value type id's from a
+map definition.
+
+2.2.15 BTF_KIND_DATASEC
+~~~~~~~~~~~~~~~~~~~~~~~
+
+``struct btf_type`` encoding requirement:
+ * ``name_off``: offset to a valid name associated with a variable or
+ one of .data/.bss/.rodata
+ * ``info.kind_flag``: 0
+ * ``info.kind``: BTF_KIND_DATASEC
+ * ``info.vlen``: # of variables
+ * ``size``: total section size in bytes (0 at compilation time, patched
+ to actual size by BPF loaders such as libbpf)
+
+``btf_type`` is followed by ``info.vlen`` number of ``struct btf_var_secinfo``.::
+
+ struct btf_var_secinfo {
+ __u32 type;
+ __u32 offset;
+ __u32 size;
+ };
+
+``struct btf_var_secinfo`` encoding:
+ * ``type``: the type of the BTF_KIND_VAR variable
+ * ``offset``: the in-section offset of the variable
+ * ``size``: the size of the variable in bytes
+
3. BTF Kernel API
*****************
@@ -521,6 +578,7 @@ For line_info, the line number and column number are defined as below:
#define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff)
3.4 BPF_{PROG,MAP}_GET_NEXT_ID
+==============================
In kernel, every loaded program, map or btf has a unique id. The id won't
change during the lifetime of a program, map, or btf.
@@ -530,6 +588,7 @@ each command, to user space, for bpf program or maps, respectively, so an
inspection tool can inspect all programs and maps.
3.5 BPF_{PROG,MAP}_GET_FD_BY_ID
+===============================
An introspection tool cannot use id to get details about program or maps.
A file descriptor needs to be obtained first for reference-counting purpose.
diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst
index 4e77932959cc..d3fe4cac0c90 100644
--- a/Documentation/bpf/index.rst
+++ b/Documentation/bpf/index.rst
@@ -36,6 +36,16 @@ Two sets of Questions and Answers (Q&A) are maintained.
bpf_devel_QA
+Program types
+=============
+
+.. toctree::
+ :maxdepth: 1
+
+ prog_cgroup_sysctl
+ prog_flow_dissector
+
+
.. Links:
.. _Documentation/networking/filter.txt: ../networking/filter.txt
.. _man-pages: https://www.kernel.org/doc/man-pages/
diff --git a/Documentation/bpf/prog_cgroup_sysctl.rst b/Documentation/bpf/prog_cgroup_sysctl.rst
new file mode 100644
index 000000000000..677d6c637cf3
--- /dev/null
+++ b/Documentation/bpf/prog_cgroup_sysctl.rst
@@ -0,0 +1,125 @@
+.. SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+===========================
+BPF_PROG_TYPE_CGROUP_SYSCTL
+===========================
+
+This document describes ``BPF_PROG_TYPE_CGROUP_SYSCTL`` program type that
+provides cgroup-bpf hook for sysctl.
+
+The hook has to be attached to a cgroup and will be called every time a
+process inside that cgroup tries to read from or write to sysctl knob in proc.
+
+1. Attach type
+**************
+
+``BPF_CGROUP_SYSCTL`` attach type has to be used to attach
+``BPF_PROG_TYPE_CGROUP_SYSCTL`` program to a cgroup.
+
+2. Context
+**********
+
+``BPF_PROG_TYPE_CGROUP_SYSCTL`` provides access to the following context from
+BPF program::
+
+ struct bpf_sysctl {
+ __u32 write;
+ __u32 file_pos;
+ };
+
+* ``write`` indicates whether sysctl value is being read (``0``) or written
+ (``1``). This field is read-only.
+
+* ``file_pos`` indicates file position sysctl is being accessed at, read
+ or written. This field is read-write. Writing to the field sets the starting
+ position in sysctl proc file ``read(2)`` will be reading from or ``write(2)``
+ will be writing to. Writing zero to the field can be used e.g. to override
+ whole sysctl value by ``bpf_sysctl_set_new_value()`` on ``write(2)`` even
+ when it's called by user space on ``file_pos > 0``. Writing non-zero
+ value to the field can be used to access part of sysctl value starting from
+ specified ``file_pos``. Not all sysctl support access with ``file_pos !=
+ 0``, e.g. writes to numeric sysctl entries must always be at file position
+ ``0``. See also ``kernel.sysctl_writes_strict`` sysctl.
+
+See `linux/bpf.h`_ for more details on how context field can be accessed.
+
+3. Return code
+**************
+
+``BPF_PROG_TYPE_CGROUP_SYSCTL`` program must return one of the following
+return codes:
+
+* ``0`` means "reject access to sysctl";
+* ``1`` means "proceed with access".
+
+If program returns ``0`` user space will get ``-1`` from ``read(2)`` or
+``write(2)`` and ``errno`` will be set to ``EPERM``.
+
+4. Helpers
+**********
+
+Since sysctl knob is represented by a name and a value, sysctl specific BPF
+helpers focus on providing access to these properties:
+
+* ``bpf_sysctl_get_name()`` to get sysctl name as it is visible in
+ ``/proc/sys`` into provided by BPF program buffer;
+
+* ``bpf_sysctl_get_current_value()`` to get string value currently held by
+ sysctl into provided by BPF program buffer. This helper is available on both
+ ``read(2)`` from and ``write(2)`` to sysctl;
+
+* ``bpf_sysctl_get_new_value()`` to get new string value currently being
+ written to sysctl before actual write happens. This helper can be used only
+ on ``ctx->write == 1``;
+
+* ``bpf_sysctl_set_new_value()`` to override new string value currently being
+ written to sysctl before actual write happens. Sysctl value will be
+ overridden starting from the current ``ctx->file_pos``. If the whole value
+ has to be overridden BPF program can set ``file_pos`` to zero before calling
+ to the helper. This helper can be used only on ``ctx->write == 1``. New
+ string value set by the helper is treated and verified by kernel same way as
+ an equivalent string passed by user space.
+
+BPF program sees sysctl value same way as user space does in proc filesystem,
+i.e. as a string. Since many sysctl values represent an integer or a vector
+of integers, the following helpers can be used to get numeric value from the
+string:
+
+* ``bpf_strtol()`` to convert initial part of the string to long integer
+ similar to user space `strtol(3)`_;
+* ``bpf_strtoul()`` to convert initial part of the string to unsigned long
+ integer similar to user space `strtoul(3)`_;
+
+See `linux/bpf.h`_ for more details on helpers described here.
+
+5. Examples
+***********
+
+See `test_sysctl_prog.c`_ for an example of BPF program in C that access
+sysctl name and value, parses string value to get vector of integers and uses
+the result to make decision whether to allow or deny access to sysctl.
+
+6. Notes
+********
+
+``BPF_PROG_TYPE_CGROUP_SYSCTL`` is intended to be used in **trusted** root
+environment, for example to monitor sysctl usage or catch unreasonable values
+an application, running as root in a separate cgroup, is trying to set.
+
+Since `task_dfl_cgroup(current)` is called at `sys_read` / `sys_write` time it
+may return results different from that at `sys_open` time, i.e. process that
+opened sysctl file in proc filesystem may differ from process that is trying
+to read from / write to it and two such processes may run in different
+cgroups, what means ``BPF_PROG_TYPE_CGROUP_SYSCTL`` should not be used as a
+security mechanism to limit sysctl usage.
+
+As with any cgroup-bpf program additional care should be taken if an
+application running as root in a cgroup should not be allowed to
+detach/replace BPF program attached by administrator.
+
+.. Links
+.. _linux/bpf.h: ../../include/uapi/linux/bpf.h
+.. _strtol(3): http://man7.org/linux/man-pages/man3/strtol.3p.html
+.. _strtoul(3): http://man7.org/linux/man-pages/man3/strtoul.3p.html
+.. _test_sysctl_prog.c:
+ ../../tools/testing/selftests/bpf/progs/test_sysctl_prog.c
diff --git a/Documentation/networking/bpf_flow_dissector.rst b/Documentation/bpf/prog_flow_dissector.rst
index b375ae2ec2c4..ed343abe541e 100644
--- a/Documentation/networking/bpf_flow_dissector.rst
+++ b/Documentation/bpf/prog_flow_dissector.rst
@@ -1,8 +1,8 @@
.. SPDX-License-Identifier: GPL-2.0
-==================
-BPF Flow Dissector
-==================
+============================
+BPF_PROG_TYPE_FLOW_DISSECTOR
+============================
Overview
========
diff --git a/Documentation/clearing-warn-once.txt b/Documentation/clearing-warn-once.txt
index 5b1f5d547be1..211fd926cf00 100644
--- a/Documentation/clearing-warn-once.txt
+++ b/Documentation/clearing-warn-once.txt
@@ -1,5 +1,7 @@
+Clearing WARN_ONCE
+------------------
-WARN_ONCE / WARN_ON_ONCE only print a warning once.
+WARN_ONCE / WARN_ON_ONCE / printk_once only emit a message once.
echo 1 > /sys/kernel/debug/clear_warn_once
diff --git a/Documentation/core-api/cachetlb.rst b/Documentation/core-api/cachetlb.rst
index 6eb9d3f090cd..93cb65d52720 100644
--- a/Documentation/core-api/cachetlb.rst
+++ b/Documentation/core-api/cachetlb.rst
@@ -101,16 +101,6 @@ changes occur:
translations for software managed TLB configurations.
The sparc64 port currently does this.
-6) ``void tlb_migrate_finish(struct mm_struct *mm)``
-
- This interface is called at the end of an explicit
- process migration. This interface provides a hook
- to allow a platform to update TLB or context-specific
- information for the address space.
-
- The ia64 sn2 platform is one example of a platform
- that uses this interface.
-
Next, we have the cache flushing interfaces. In general, when Linux
is changing an existing virtual-->physical mapping to a new value,
the sequence will be in one of the following forms::
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index 6870baffef82..ee1bb8983a88 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -22,7 +22,6 @@ Core utilities
workqueue
genericirq
xarray
- flexible-arrays
librs
genalloc
errseq
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index 71f5d2fe39b7..a29c99d13331 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -147,10 +147,10 @@ Division Functions
.. kernel-doc:: include/linux/math64.h
:internal:
-.. kernel-doc:: lib/div64.c
+.. kernel-doc:: lib/math/div64.c
:functions: div_s64_rem div64_u64_rem div64_u64 div64_s64
-.. kernel-doc:: lib/gcd.c
+.. kernel-doc:: lib/math/gcd.c
:export:
UUID/GUID
diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst
index c37ec7cd9c06..75d2bbe9813f 100644
--- a/Documentation/core-api/printk-formats.rst
+++ b/Documentation/core-api/printk-formats.rst
@@ -58,6 +58,14 @@ A raw pointer value may be printed with %p which will hash the address
before printing. The kernel also supports extended specifiers for printing
pointers of different types.
+Some of the extended specifiers print the data on the given address instead
+of printing the address itself. In this case, the following error messages
+might be printed instead of the unreachable information::
+
+ (null) data on plain NULL address
+ (efault) data on invalid address
+ (einval) invalid data on a valid address
+
Plain Pointers
--------------
diff --git a/Documentation/cputopology.txt b/Documentation/cputopology.txt
index c6e7e9196a8b..cb61277e2308 100644
--- a/Documentation/cputopology.txt
+++ b/Documentation/cputopology.txt
@@ -3,79 +3,79 @@ How CPU topology info is exported via sysfs
===========================================
Export CPU topology info via sysfs. Items (attributes) are similar
-to /proc/cpuinfo output of some architectures:
+to /proc/cpuinfo output of some architectures. They reside in
+/sys/devices/system/cpu/cpuX/topology/:
-1) /sys/devices/system/cpu/cpuX/topology/physical_package_id:
+physical_package_id:
physical package id of cpuX. Typically corresponds to a physical
socket number, but the actual value is architecture and platform
dependent.
-2) /sys/devices/system/cpu/cpuX/topology/core_id:
+core_id:
the CPU core ID of cpuX. Typically it is the hardware platform's
identifier (rather than the kernel's). The actual value is
architecture and platform dependent.
-3) /sys/devices/system/cpu/cpuX/topology/book_id:
+book_id:
the book ID of cpuX. Typically it is the hardware platform's
identifier (rather than the kernel's). The actual value is
architecture and platform dependent.
-4) /sys/devices/system/cpu/cpuX/topology/drawer_id:
+drawer_id:
the drawer ID of cpuX. Typically it is the hardware platform's
identifier (rather than the kernel's). The actual value is
architecture and platform dependent.
-5) /sys/devices/system/cpu/cpuX/topology/thread_siblings:
+thread_siblings:
internal kernel map of cpuX's hardware threads within the same
core as cpuX.
-6) /sys/devices/system/cpu/cpuX/topology/thread_siblings_list:
+thread_siblings_list:
human-readable list of cpuX's hardware threads within the same
core as cpuX.
-7) /sys/devices/system/cpu/cpuX/topology/core_siblings:
+core_siblings:
internal kernel map of cpuX's hardware threads within the same
physical_package_id.
-8) /sys/devices/system/cpu/cpuX/topology/core_siblings_list:
+core_siblings_list:
human-readable list of cpuX's hardware threads within the same
physical_package_id.
-9) /sys/devices/system/cpu/cpuX/topology/book_siblings:
+book_siblings:
internal kernel map of cpuX's hardware threads within the same
book_id.
-10) /sys/devices/system/cpu/cpuX/topology/book_siblings_list:
+book_siblings_list:
human-readable list of cpuX's hardware threads within the same
book_id.
-11) /sys/devices/system/cpu/cpuX/topology/drawer_siblings:
+drawer_siblings:
internal kernel map of cpuX's hardware threads within the same
drawer_id.
-12) /sys/devices/system/cpu/cpuX/topology/drawer_siblings_list:
+drawer_siblings_list:
human-readable list of cpuX's hardware threads within the same
drawer_id.
-To implement it in an architecture-neutral way, a new source file,
-drivers/base/topology.c, is to export the 6 to 12 attributes. The book
-and drawer related sysfs files will only be created if CONFIG_SCHED_BOOK
-and CONFIG_SCHED_DRAWER are selected.
+Architecture-neutral, drivers/base/topology.c, exports these attributes.
+However, the book and drawer related sysfs files will only be created if
+CONFIG_SCHED_BOOK and CONFIG_SCHED_DRAWER are selected, respectively.
-CONFIG_SCHED_BOOK and CONFIG_DRAWER are currently only used on s390, where
-they reflect the cpu and cache hierarchy.
+CONFIG_SCHED_BOOK and CONFIG_SCHED_DRAWER are currently only used on s390,
+where they reflect the cpu and cache hierarchy.
For an architecture to support this feature, it must define some of
these macros in include/asm-XXX/topology.h::
@@ -98,10 +98,10 @@ To be consistent on all architectures, include/linux/topology.h
provides default definitions for any of the above macros that are
not defined by include/asm-XXX/topology.h:
-1) physical_package_id: -1
-2) core_id: 0
-3) sibling_cpumask: just the given CPU
-4) core_cpumask: just the given CPU
+1) topology_physical_package_id: -1
+2) topology_core_id: 0
+3) topology_sibling_cpumask: just the given CPU
+4) topology_core_cpumask: just the given CPU
For architectures that don't support books (CONFIG_SCHED_BOOK) there are no
default definitions for topology_book_id() and topology_book_cpumask().
diff --git a/Documentation/crypto/api-samples.rst b/Documentation/crypto/api-samples.rst
index 0f6ca8b7261e..f14afaaf2f32 100644
--- a/Documentation/crypto/api-samples.rst
+++ b/Documentation/crypto/api-samples.rst
@@ -133,7 +133,6 @@ Code Example For Use of Operational State Memory With SHASH
if (!sdesc)
return ERR_PTR(-ENOMEM);
sdesc->shash.tfm = alg;
- sdesc->shash.flags = 0x0;
return sdesc;
}
diff --git a/Documentation/dev-tools/gcov.rst b/Documentation/dev-tools/gcov.rst
index 69a7d90c320a..46aae52a41d0 100644
--- a/Documentation/dev-tools/gcov.rst
+++ b/Documentation/dev-tools/gcov.rst
@@ -34,10 +34,6 @@ Configure the kernel with::
CONFIG_DEBUG_FS=y
CONFIG_GCOV_KERNEL=y
-select the gcc's gcov format, default is autodetect based on gcc version::
-
- CONFIG_GCOV_FORMAT_AUTODETECT=y
-
and to get coverage data for the entire kernel::
CONFIG_GCOV_PROFILE_ALL=y
@@ -169,6 +165,20 @@ b) gcov is run on the BUILD machine
[user@build] gcov -o /tmp/coverage/tmp/out/init main.c
+Note on compilers
+-----------------
+
+GCC and LLVM gcov tools are not necessarily compatible. Use gcov_ to work with
+GCC-generated .gcno and .gcda files, and use llvm-cov_ for Clang.
+
+.. _gcov: http://gcc.gnu.org/onlinedocs/gcc/Gcov.html
+.. _llvm-cov: https://llvm.org/docs/CommandGuide/llvm-cov.html
+
+Build differences between GCC and Clang gcov are handled by Kconfig. It
+automatically selects the appropriate gcov format depending on the detected
+toolchain.
+
+
Troubleshooting
---------------
diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst
index 7756f7a7c23b..25604904fa6e 100644
--- a/Documentation/dev-tools/kselftest.rst
+++ b/Documentation/dev-tools/kselftest.rst
@@ -7,6 +7,11 @@ directory. These are intended to be small tests to exercise individual code
paths in the kernel. Tests are intended to be run after building, installing
and booting a kernel.
+You can find additional information on Kselftest framework, how to
+write new tests using the framework on Kselftest wiki:
+
+https://kselftest.wiki.kernel.org/
+
On some systems, hot-plug tests could hang forever waiting for cpu and
memory to be ready to be offlined. A special hot-plug target is created
to run the full range of hot-plug tests. In default mode, hot-plug tests run
@@ -14,6 +19,10 @@ in safe mode with a limited scope. In limited mode, cpu-hotplug test is
run on a single cpu as opposed to all hotplug capable cpus, and memory
hotplug test is run on 2% of hotplug capable memory instead of 10%.
+kselftest runs as a userspace process. Tests that can be written/run in
+userspace may wish to use the `Test Harness`_. Tests that need to be
+run in kernel space may wish to use a `Test Module`_.
+
Running the selftests (hotplug tests are run in limited mode)
=============================================================
@@ -31,17 +40,32 @@ To build and run the tests with a single command, use::
Note that some tests will require root privileges.
-Build and run from user specific object directory (make O=dir)::
+Kselftest supports saving output files in a separate directory and then
+running tests. To locate output files in a separate directory two syntaxes
+are supported. In both cases the working directory must be the root of the
+kernel src. This is applicable to "Running a subset of selftests" section
+below.
+
+To build, save output files in a separate directory with O= ::
$ make O=/tmp/kselftest kselftest
-Build and run KBUILD_OUTPUT directory (make KBUILD_OUTPUT=)::
+To build, save output files in a separate directory with KBUILD_OUTPUT ::
+
+ $ export KBUILD_OUTPUT=/tmp/kselftest; make kselftest
- $ make KBUILD_OUTPUT=/tmp/kselftest kselftest
+The O= assignment takes precedence over the KBUILD_OUTPUT environment
+variable.
-The above commands run the tests and print pass/fail summary to make it
-easier to understand the test results. Please find the detailed individual
-test results for each test in /tmp/testname file(s).
+The above commands by default run the tests and print full pass/fail report.
+Kselftest supports "summary" option to make it easier to understand the test
+results. Please find the detailed individual test results for each test in
+/tmp/testname file(s) when summary option is specified. This is applicable
+to "Running a subset of selftests" section below.
+
+To run kselftest with summary option enabled ::
+
+ $ make summary=1 kselftest
Running a subset of selftests
=============================
@@ -57,17 +81,13 @@ You can specify multiple tests to build and run::
$ make TARGETS="size timers" kselftest
-Build and run from user specific object directory (make O=dir)::
+To build, save output files in a separate directory with O= ::
$ make O=/tmp/kselftest TARGETS="size timers" kselftest
-Build and run KBUILD_OUTPUT directory (make KBUILD_OUTPUT=)::
+To build, save output files in a separate directory with KBUILD_OUTPUT ::
- $ make KBUILD_OUTPUT=/tmp/kselftest TARGETS="size timers" kselftest
-
-The above commands run the tests and print pass/fail summary to make it
-easier to understand the test results. Please find the detailed individual
-test results for each test in /tmp/testname file(s).
+ $ export KBUILD_OUTPUT=/tmp/kselftest; make TARGETS="size timers" kselftest
See the top-level tools/testing/selftests/Makefile for the list of all
possible targets.
@@ -161,11 +181,97 @@ Contributing new tests (details)
e.g: tools/testing/selftests/android/config
+Test Module
+===========
+
+Kselftest tests the kernel from userspace. Sometimes things need
+testing from within the kernel, one method of doing this is to create a
+test module. We can tie the module into the kselftest framework by
+using a shell script test runner. ``kselftest_module.sh`` is designed
+to facilitate this process. There is also a header file provided to
+assist writing kernel modules that are for use with kselftest:
+
+- ``tools/testing/kselftest/kselftest_module.h``
+- ``tools/testing/kselftest/kselftest_module.sh``
+
+How to use
+----------
+
+Here we show the typical steps to create a test module and tie it into
+kselftest. We use kselftests for lib/ as an example.
+
+1. Create the test module
+
+2. Create the test script that will run (load/unload) the module
+ e.g. ``tools/testing/selftests/lib/printf.sh``
+
+3. Add line to config file e.g. ``tools/testing/selftests/lib/config``
+
+4. Add test script to makefile e.g. ``tools/testing/selftests/lib/Makefile``
+
+5. Verify it works:
+
+.. code-block:: sh
+
+ # Assumes you have booted a fresh build of this kernel tree
+ cd /path/to/linux/tree
+ make kselftest-merge
+ make modules
+ sudo make modules_install
+ make TARGETS=lib kselftest
+
+Example Module
+--------------
+
+A bare bones test module might look like this:
+
+.. code-block:: c
+
+ // SPDX-License-Identifier: GPL-2.0+
+
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+ #include "../tools/testing/selftests/kselftest_module.h"
+
+ KSTM_MODULE_GLOBALS();
+
+ /*
+ * Kernel module for testing the foobinator
+ */
+
+ static int __init test_function()
+ {
+ ...
+ }
+
+ static void __init selftest(void)
+ {
+ KSTM_CHECK_ZERO(do_test_case("", 0));
+ }
+
+ KSTM_MODULE_LOADERS(test_foo);
+ MODULE_AUTHOR("John Developer <jd@fooman.org>");
+ MODULE_LICENSE("GPL");
+
+Example test script
+-------------------
+
+.. code-block:: sh
+
+ #!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0+
+ $(dirname $0)/../kselftest_module.sh "foo" test_foo
+
+
Test Harness
============
-The kselftest_harness.h file contains useful helpers to build tests. The tests
-from tools/testing/selftests/seccomp/seccomp_bpf.c can be used as example.
+The kselftest_harness.h file contains useful helpers to build tests. The
+test harness is for userspace testing, for kernel space testing see `Test
+Module`_ above.
+
+The tests from tools/testing/selftests/seccomp/seccomp_bpf.c can be used as
+example.
Example
-------
diff --git a/Documentation/device-mapper/dm-dust.txt b/Documentation/device-mapper/dm-dust.txt
new file mode 100644
index 000000000000..954d402a1f6a
--- /dev/null
+++ b/Documentation/device-mapper/dm-dust.txt
@@ -0,0 +1,272 @@
+dm-dust
+=======
+
+This target emulates the behavior of bad sectors at arbitrary
+locations, and the ability to enable the emulation of the failures
+at an arbitrary time.
+
+This target behaves similarly to a linear target. At a given time,
+the user can send a message to the target to start failing read
+requests on specific blocks (to emulate the behavior of a hard disk
+drive with bad sectors).
+
+When the failure behavior is enabled (i.e.: when the output of
+"dmsetup status" displays "fail_read_on_bad_block"), reads of blocks
+in the "bad block list" will fail with EIO ("Input/output error").
+
+Writes of blocks in the "bad block list will result in the following:
+
+1. Remove the block from the "bad block list".
+2. Successfully complete the write.
+
+This emulates the "remapped sector" behavior of a drive with bad
+sectors.
+
+Normally, a drive that is encountering bad sectors will most likely
+encounter more bad sectors, at an unknown time or location.
+With dm-dust, the user can use the "addbadblock" and "removebadblock"
+messages to add arbitrary bad blocks at new locations, and the
+"enable" and "disable" messages to modulate the state of whether the
+configured "bad blocks" will be treated as bad, or bypassed.
+This allows the pre-writing of test data and metadata prior to
+simulating a "failure" event where bad sectors start to appear.
+
+Table parameters:
+-----------------
+<device_path> <offset> <blksz>
+
+Mandatory parameters:
+ <device_path>: path to the block device.
+ <offset>: offset to data area from start of device_path
+ <blksz>: block size in bytes
+ (minimum 512, maximum 1073741824, must be a power of 2)
+
+Usage instructions:
+-------------------
+
+First, find the size (in 512-byte sectors) of the device to be used:
+
+$ sudo blockdev --getsz /dev/vdb1
+33552384
+
+Create the dm-dust device:
+(For a device with a block size of 512 bytes)
+$ sudo dmsetup create dust1 --table '0 33552384 dust /dev/vdb1 0 512'
+
+(For a device with a block size of 4096 bytes)
+$ sudo dmsetup create dust1 --table '0 33552384 dust /dev/vdb1 0 4096'
+
+Check the status of the read behavior ("bypass" indicates that all I/O
+will be passed through to the underlying device):
+$ sudo dmsetup status dust1
+0 33552384 dust 252:17 bypass
+
+$ sudo dd if=/dev/mapper/dust1 of=/dev/null bs=512 count=128 iflag=direct
+128+0 records in
+128+0 records out
+
+$ sudo dd if=/dev/zero of=/dev/mapper/dust1 bs=512 count=128 oflag=direct
+128+0 records in
+128+0 records out
+
+Adding and removing bad blocks:
+-------------------------------
+
+At any time (i.e.: whether the device has the "bad block" emulation
+enabled or disabled), bad blocks may be added or removed from the
+device via the "addbadblock" and "removebadblock" messages:
+
+$ sudo dmsetup message dust1 0 addbadblock 60
+kernel: device-mapper: dust: badblock added at block 60
+
+$ sudo dmsetup message dust1 0 addbadblock 67
+kernel: device-mapper: dust: badblock added at block 67
+
+$ sudo dmsetup message dust1 0 addbadblock 72
+kernel: device-mapper: dust: badblock added at block 72
+
+These bad blocks will be stored in the "bad block list".
+While the device is in "bypass" mode, reads and writes will succeed:
+
+$ sudo dmsetup status dust1
+0 33552384 dust 252:17 bypass
+
+Enabling block read failures:
+-----------------------------
+
+To enable the "fail read on bad block" behavior, send the "enable" message:
+
+$ sudo dmsetup message dust1 0 enable
+kernel: device-mapper: dust: enabling read failures on bad sectors
+
+$ sudo dmsetup status dust1
+0 33552384 dust 252:17 fail_read_on_bad_block
+
+With the device in "fail read on bad block" mode, attempting to read a
+block will encounter an "Input/output error":
+
+$ sudo dd if=/dev/mapper/dust1 of=/dev/null bs=512 count=1 skip=67 iflag=direct
+dd: error reading '/dev/mapper/dust1': Input/output error
+0+0 records in
+0+0 records out
+0 bytes copied, 0.00040651 s, 0.0 kB/s
+
+...and writing to the bad blocks will remove the blocks from the list,
+therefore emulating the "remap" behavior of hard disk drives:
+
+$ sudo dd if=/dev/zero of=/dev/mapper/dust1 bs=512 count=128 oflag=direct
+128+0 records in
+128+0 records out
+
+kernel: device-mapper: dust: block 60 removed from badblocklist by write
+kernel: device-mapper: dust: block 67 removed from badblocklist by write
+kernel: device-mapper: dust: block 72 removed from badblocklist by write
+kernel: device-mapper: dust: block 87 removed from badblocklist by write
+
+Bad block add/remove error handling:
+------------------------------------
+
+Attempting to add a bad block that already exists in the list will
+result in an "Invalid argument" error, as well as a helpful message:
+
+$ sudo dmsetup message dust1 0 addbadblock 88
+device-mapper: message ioctl on dust1 failed: Invalid argument
+kernel: device-mapper: dust: block 88 already in badblocklist
+
+Attempting to remove a bad block that doesn't exist in the list will
+result in an "Invalid argument" error, as well as a helpful message:
+
+$ sudo dmsetup message dust1 0 removebadblock 87
+device-mapper: message ioctl on dust1 failed: Invalid argument
+kernel: device-mapper: dust: block 87 not found in badblocklist
+
+Counting the number of bad blocks in the bad block list:
+--------------------------------------------------------
+
+To count the number of bad blocks configured in the device, run the
+following message command:
+
+$ sudo dmsetup message dust1 0 countbadblocks
+
+A message will print with the number of bad blocks currently
+configured on the device:
+
+kernel: device-mapper: dust: countbadblocks: 895 badblock(s) found
+
+Querying for specific bad blocks:
+---------------------------------
+
+To find out if a specific block is in the bad block list, run the
+following message command:
+
+$ sudo dmsetup message dust1 0 queryblock 72
+
+The following message will print if the block is in the list:
+device-mapper: dust: queryblock: block 72 found in badblocklist
+
+The following message will print if the block is in the list:
+device-mapper: dust: queryblock: block 72 not found in badblocklist
+
+The "queryblock" message command will work in both the "enabled"
+and "disabled" modes, allowing the verification of whether a block
+will be treated as "bad" without having to issue I/O to the device,
+or having to "enable" the bad block emulation.
+
+Clearing the bad block list:
+----------------------------
+
+To clear the bad block list (without needing to individually run
+a "removebadblock" message command for every block), run the
+following message command:
+
+$ sudo dmsetup message dust1 0 clearbadblocks
+
+After clearing the bad block list, the following message will appear:
+
+kernel: device-mapper: dust: clearbadblocks: badblocks cleared
+
+If there were no bad blocks to clear, the following message will
+appear:
+
+kernel: device-mapper: dust: clearbadblocks: no badblocks found
+
+Message commands list:
+----------------------
+
+Below is a list of the messages that can be sent to a dust device:
+
+Operations on blocks (requires a <blknum> argument):
+
+addbadblock <blknum>
+queryblock <blknum>
+removebadblock <blknum>
+
+...where <blknum> is a block number within range of the device
+ (corresponding to the block size of the device.)
+
+Single argument message commands:
+
+countbadblocks
+clearbadblocks
+disable
+enable
+quiet
+
+Device removal:
+---------------
+
+When finished, remove the device via the "dmsetup remove" command:
+
+$ sudo dmsetup remove dust1
+
+Quiet mode:
+-----------
+
+On test runs with many bad blocks, it may be desirable to avoid
+excessive logging (from bad blocks added, removed, or "remapped").
+This can be done by enabling "quiet mode" via the following message:
+
+$ sudo dmsetup message dust1 0 quiet
+
+This will suppress log messages from add / remove / removed by write
+operations. Log messages from "countbadblocks" or "queryblock"
+message commands will still print in quiet mode.
+
+The status of quiet mode can be seen by running "dmsetup status":
+
+$ sudo dmsetup status dust1
+0 33552384 dust 252:17 fail_read_on_bad_block quiet
+
+To disable quiet mode, send the "quiet" message again:
+
+$ sudo dmsetup message dust1 0 quiet
+
+$ sudo dmsetup status dust1
+0 33552384 dust 252:17 fail_read_on_bad_block verbose
+
+(The presence of "verbose" indicates normal logging.)
+
+"Why not...?"
+-------------
+
+scsi_debug has a "medium error" mode that can fail reads on one
+specified sector (sector 0x1234, hardcoded in the source code), but
+it uses RAM for the persistent storage, which drastically decreases
+the potential device size.
+
+dm-flakey fails all I/O from all block locations at a specified time
+frequency, and not a given point in time.
+
+When a bad sector occurs on a hard disk drive, reads to that sector
+are failed by the device, usually resulting in an error code of EIO
+("I/O error") or ENODATA ("No data available"). However, a write to
+the sector may succeed, and result in the sector becoming readable
+after the device controller no longer experiences errors reading the
+sector (or after a reallocation of the sector). However, there may
+be bad sectors that occur on the device in the future, in a different,
+unpredictable location.
+
+This target seeks to provide a device that can exhibit the behavior
+of a bad sector at a known sector location, at a known time, based
+on a large storage device (at least tens of gigabytes, not occupying
+system memory).
diff --git a/Documentation/device-mapper/dm-integrity.txt b/Documentation/device-mapper/dm-integrity.txt
index 297251b0d2d5..d63d78ffeb73 100644
--- a/Documentation/device-mapper/dm-integrity.txt
+++ b/Documentation/device-mapper/dm-integrity.txt
@@ -21,6 +21,13 @@ mode it calculates and verifies the integrity tag internally. In this
mode, the dm-integrity target can be used to detect silent data
corruption on the disk or in the I/O path.
+There's an alternate mode of operation where dm-integrity uses bitmap
+instead of a journal. If a bit in the bitmap is 1, the corresponding
+region's data and integrity tags are not synchronized - if the machine
+crashes, the unsynchronized regions will be recalculated. The bitmap mode
+is faster than the journal mode, because we don't have to write the data
+twice, but it is also less reliable, because if data corruption happens
+when the machine crashes, it may not be detected.
When loading the target for the first time, the kernel driver will format
the device. But it will only format the device if the superblock contains
@@ -59,6 +66,10 @@ Target arguments:
either both data and tag or none of them are written. The
journaled mode degrades write throughput twice because the
data have to be written twice.
+ B - bitmap mode - data and metadata are written without any
+ synchronization, the driver maintains a bitmap of dirty
+ regions where data and metadata don't match. This mode can
+ only be used with internal hash.
R - recovery mode - in this mode, journal is not replayed,
checksums are not checked and writes to the device are not
allowed. This mode is useful for data recovery if the
@@ -79,6 +90,10 @@ interleave_sectors:number
a power of two. If the device is already formatted, the value from
the superblock is used.
+meta_device:device
+ Don't interleave the data and metadata on on device. Use a
+ separate device for metadata.
+
buffer_sectors:number
The number of sectors in one buffer. The value is rounded down to
a power of two.
@@ -146,6 +161,15 @@ block_size:number
Supported values are 512, 1024, 2048 and 4096 bytes. If not
specified the default block size is 512 bytes.
+sectors_per_bit:number
+ In the bitmap mode, this parameter specifies the number of
+ 512-byte sectors that corresponds to one bitmap bit.
+
+bitmap_flush_interval:number
+ The bitmap flush interval in milliseconds. The metadata buffers
+ are synchronized when this interval expires.
+
+
The journal mode (D/J), buffer_sectors, journal_watermark, commit_time can
be changed when reloading the target (load an inactive table and swap the
tables with suspend and resume). The other arguments should not be changed
@@ -167,7 +191,13 @@ The layout of the formatted block device:
provides (i.e. the size of the device minus the size of all
metadata and padding). The user of this target should not send
bios that access data beyond the "provided data sectors" limit.
- * flags - a flag is set if journal_mac is used
+ * flags
+ SB_FLAG_HAVE_JOURNAL_MAC - a flag is set if journal_mac is used
+ SB_FLAG_RECALCULATING - recalculating is in progress
+ SB_FLAG_DIRTY_BITMAP - journal area contains the bitmap of dirty
+ blocks
+ * log2(sectors per block)
+ * a position where recalculating finished
* journal
The journal is divided into sections, each section contains:
* metadata area (4kiB), it contains journal entries
diff --git a/Documentation/devicetree/bindings/arm/altera/socfpga-system.txt b/Documentation/devicetree/bindings/arm/altera/socfpga-system.txt
index f4d04a067282..82edbaaa3f85 100644
--- a/Documentation/devicetree/bindings/arm/altera/socfpga-system.txt
+++ b/Documentation/devicetree/bindings/arm/altera/socfpga-system.txt
@@ -11,3 +11,15 @@ Example:
reg = <0xffd08000 0x1000>;
cpu1-start-addr = <0xffd080c4>;
};
+
+ARM64 - Stratix10
+Required properties:
+- compatible : "altr,sys-mgr-s10"
+- reg : Should contain 1 register range(address and length)
+ for system manager register.
+
+Example:
+ sysmgr@ffd12000 {
+ compatible = "altr,sys-mgr-s10";
+ reg = <0xffd12000 0x228>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/amlogic.txt b/Documentation/devicetree/bindings/arm/amlogic.txt
index 7f40cb5f490b..061f7b98a07f 100644
--- a/Documentation/devicetree/bindings/arm/amlogic.txt
+++ b/Documentation/devicetree/bindings/arm/amlogic.txt
@@ -110,6 +110,7 @@ Board compatible values (alphabetically, grouped by SoC):
- "amlogic,u200" (Meson g12a s905d2)
- "amediatech,x96-max" (Meson g12a s905x2)
+ - "seirobotics,sei510" (Meson g12a s905x2)
Amlogic Meson Firmware registers Interface
------------------------------------------
diff --git a/Documentation/devicetree/bindings/arm/atmel-at91.txt b/Documentation/devicetree/bindings/arm/atmel-at91.txt
index 4bf1b4da7659..99dee23c74a4 100644
--- a/Documentation/devicetree/bindings/arm/atmel-at91.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-at91.txt
@@ -25,6 +25,7 @@ compatible: must be one of:
o "atmel,at91sam9n12"
o "atmel,at91sam9rl"
o "atmel,at91sam9xe"
+ o "microchip,sam9x60"
* "atmel,sama5" for SoCs using a Cortex-A5, shall be extended with the specific
SoC family:
o "atmel,sama5d2" shall be extended with the specific SoC compatible:
diff --git a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
index e61d00e25b95..9fbde401a090 100644
--- a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
@@ -84,7 +84,7 @@ SHDWC SAMA5D2-Compatible Shutdown Controller
1) shdwc node
required properties:
-- compatible: should be "atmel,sama5d2-shdwc".
+- compatible: should be "atmel,sama5d2-shdwc" or "microchip,sam9x60-shdwc".
- reg: should contain registers location and length
- clocks: phandle to input clock.
- #address-cells: should be one. The cell is the wake-up input index.
@@ -96,6 +96,9 @@ optional properties:
microseconds. It's usually a board-related property.
- atmel,wakeup-rtc-timer: boolean to enable Real-Time Clock wake-up.
+optional microchip,sam9x60-shdwc properties:
+- atmel,wakeup-rtt-timer: boolean to enable Real-time Timer Wake-up.
+
The node contains child nodes for each wake-up input that the platform uses.
2) input nodes
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index f8aff65ab921..8a88ddebc1a2 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -8,7 +8,8 @@ through the intermediate links connecting the source to the currently selected
sink. Each CoreSight component device should use these properties to describe
its hardware characteristcs.
-* Required properties for all components *except* non-configurable replicators:
+* Required properties for all components *except* non-configurable replicators
+ and non-configurable funnels:
* compatible: These have to be supplemented with "arm,primecell" as
drivers are using the AMBA bus interface. Possible values include:
@@ -24,8 +25,10 @@ its hardware characteristcs.
discovered at boot time when the device is probed.
"arm,coresight-tmc", "arm,primecell";
- - Trace Funnel:
- "arm,coresight-funnel", "arm,primecell";
+ - Trace Programmable Funnel:
+ "arm,coresight-dynamic-funnel", "arm,primecell";
+ "arm,coresight-funnel", "arm,primecell"; (OBSOLETE. For
+ backward compatibility and will be removed)
- Embedded Trace Macrocell (version 3.x) and
Program Flow Trace Macrocell:
@@ -65,11 +68,17 @@ its hardware characteristcs.
"stm-stimulus-base", each corresponding to the areas defined in "reg".
* Required properties for devices that don't show up on the AMBA bus, such as
- non-configurable replicators:
+ non-configurable replicators and non-configurable funnels:
* compatible: Currently supported value is (note the absence of the
AMBA markee):
- - "arm,coresight-replicator"
+ - Coresight Non-configurable Replicator:
+ "arm,coresight-static-replicator";
+ "arm,coresight-replicator"; (OBSOLETE. For backward
+ compatibility and will be removed)
+
+ - Coresight Non-configurable Funnel:
+ "arm,coresight-static-funnel";
* port or ports: see "Graph bindings for Coresight" below.
@@ -169,7 +178,7 @@ Example:
/* non-configurable replicators don't show up on the
* AMBA bus. As such no need to add "arm,primecell".
*/
- compatible = "arm,coresight-replicator";
+ compatible = "arm,coresight-static-replicator";
out-ports {
#address-cells = <1>;
@@ -200,8 +209,45 @@ Example:
};
};
+ funnel {
+ /*
+ * non-configurable funnel don't show up on the AMBA
+ * bus. As such no need to add "arm,primecell".
+ */
+ compatible = "arm,coresight-static-funnel";
+ clocks = <&crg_ctrl HI3660_PCLK>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ combo_funnel_out: endpoint {
+ remote-endpoint = <&top_funnel_in>;
+ };
+ };
+ };
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ combo_funnel_in0: endpoint {
+ remote-endpoint = <&cluster0_etf_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ combo_funnel_in1: endpoint {
+ remote-endpoint = <&cluster1_etf_out>;
+ };
+ };
+ };
+ };
+
funnel@20040000 {
- compatible = "arm,coresight-funnel", "arm,primecell";
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
reg = <0 0x20040000 0 0x1000>;
clocks = <&oscclk6a>;
diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpus.yaml
index 82dd7582e945..591bbd012d63 100644
--- a/Documentation/devicetree/bindings/arm/cpus.yaml
+++ b/Documentation/devicetree/bindings/arm/cpus.yaml
@@ -67,6 +67,7 @@ properties:
patternProperties:
'^cpu@[0-9a-f]+$':
+ type: object
properties:
device_type:
const: cpu
diff --git a/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt b/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
index 72d481c8dd48..5d7dbabbb784 100644
--- a/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
+++ b/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
@@ -22,9 +22,11 @@ Required properties:
-------------------
- compatible: should be "fsl,imx-scu".
- mbox-names: should include "tx0", "tx1", "tx2", "tx3",
- "rx0", "rx1", "rx2", "rx3".
-- mboxes: List of phandle of 4 MU channels for tx and 4 MU channels
- for rx. All 8 MU channels must be in the same MU instance.
+ "rx0", "rx1", "rx2", "rx3";
+ include "gip3" if want to support general MU interrupt.
+- mboxes: List of phandle of 4 MU channels for tx, 4 MU channels for
+ rx, and 1 optional MU channel for general interrupt.
+ All MU channels must be in the same MU instance.
Cross instances are not allowed. The MU instance can only
be one of LSIO MU0~M4 for imx8qxp and imx8qm. Users need
to make sure use the one which is not conflict with other
@@ -34,6 +36,7 @@ Required properties:
Channel 1 must be "tx1" or "rx1".
Channel 2 must be "tx2" or "rx2".
Channel 3 must be "tx3" or "rx3".
+ General interrupt rx channel must be "gip3".
e.g.
mboxes = <&lsio_mu1 0 0
&lsio_mu1 0 1
@@ -42,10 +45,18 @@ Required properties:
&lsio_mu1 1 0
&lsio_mu1 1 1
&lsio_mu1 1 2
- &lsio_mu1 1 3>;
+ &lsio_mu1 1 3
+ &lsio_mu1 3 3>;
See Documentation/devicetree/bindings/mailbox/fsl,mu.txt
for detailed mailbox binding.
+Note: Each mu which supports general interrupt should have an alias correctly
+numbered in "aliases" node.
+e.g.
+aliases {
+ mu1 = &lsio_mu1;
+};
+
i.MX SCU Client Device Node:
============================================================
@@ -124,6 +135,10 @@ Required properties:
Example (imx8qxp):
-------------
+aliases {
+ mu1 = &lsio_mu1;
+};
+
lsio_mu1: mailbox@5d1c0000 {
...
#mbox-cells = <2>;
@@ -133,7 +148,8 @@ firmware {
scu {
compatible = "fsl,imx-scu";
mbox-names = "tx0", "tx1", "tx2", "tx3",
- "rx0", "rx1", "rx2", "rx3";
+ "rx0", "rx1", "rx2", "rx3",
+ "gip3";
mboxes = <&lsio_mu1 0 0
&lsio_mu1 0 1
&lsio_mu1 0 2
@@ -141,7 +157,8 @@ firmware {
&lsio_mu1 1 0
&lsio_mu1 1 1
&lsio_mu1 1 2
- &lsio_mu1 1 3>;
+ &lsio_mu1 1 3
+ &lsio_mu1 3 3>;
clk: clk {
compatible = "fsl,imx8qxp-clk", "fsl,scu-clk";
diff --git a/Documentation/devicetree/bindings/arm/fsl.yaml b/Documentation/devicetree/bindings/arm/fsl.yaml
index 7e2cd6ad26bd..407138ebc0d0 100644
--- a/Documentation/devicetree/bindings/arm/fsl.yaml
+++ b/Documentation/devicetree/bindings/arm/fsl.yaml
@@ -51,6 +51,13 @@ properties:
- const: i2se,duckbill-2
- const: fsl,imx28
+ - description: i.MX50 based Boards
+ items:
+ - enum:
+ - fsl,imx50-evk
+ - kobo,aura
+ - const: fsl,imx50
+
- description: i.MX51 Babbage Board
items:
- enum:
@@ -67,6 +74,7 @@ properties:
- fsl,imx53-evk
- fsl,imx53-qsb
- fsl,imx53-smd
+ - menlo,m53menlo
- const: fsl,imx53
- description: i.MX6Q based Boards
@@ -90,6 +98,7 @@ properties:
- description: i.MX6DL based Boards
items:
- enum:
+ - eckelmann,imx6dl-ci4x10
- fsl,imx6dl-sabreauto # i.MX6 DualLite/Solo SABRE Automotive Board
- fsl,imx6dl-sabresd # i.MX6 DualLite SABRE Smart Device Board
- technologic,imx6dl-ts4900
@@ -137,10 +146,18 @@ properties:
- const: fsl,imx6ull # This seems odd. Should be last?
- const: fsl,imx6ulz
+ - description: i.MX7S based Boards
+ items:
+ - enum:
+ - tq,imx7s-mba7 # i.MX7S TQ MBa7 with TQMa7S SoM
+ - const: fsl,imx7s
+
- description: i.MX7D based Boards
items:
- enum:
- fsl,imx7d-sdb # i.MX7 SabreSD Board
+ - tq,imx7d-mba7 # i.MX7D TQ MBa7 with TQMa7D SoM
+ - zii,imx7d-rpu2 # ZII RPU2 Board
- const: fsl,imx7d
- description:
@@ -154,6 +171,12 @@ properties:
- const: compulab,cl-som-imx7
- const: fsl,imx7d
+ - description: i.MX8MM based Boards
+ items:
+ - enum:
+ - fsl,imx8mm-evk # i.MX8MM EVK Board
+ - const: fsl,imx8mm
+
- description: i.MX8QXP based Boards
items:
- enum:
@@ -176,6 +199,19 @@ properties:
- fsl,vf610
- fsl,vf610m4
+ - description: ZII's VF610 based Boards
+ items:
+ - enum:
+ - zii,vf610cfu1 # ZII VF610 CFU1 Board
+ - zii,vf610dev-c # ZII VF610 Development Board, Rev C
+ - zii,vf610dev-b # ZII VF610 Development Board, Rev B
+ - zii,vf610scu4-aib # ZII VF610 SCU4 AIB
+ - zii,vf610dtu # ZII VF610 SSMB DTU Board
+ - zii,vf610spu3 # ZII VF610 SSMB SPU3 Board
+ - zii,vf610spb4 # ZII VF610 SPB4 Board
+ - const: zii,vf610dev
+ - const: fsl,vf610
+
- description: LS1012A based Boards
items:
- enum:
diff --git a/Documentation/devicetree/bindings/arm/intel-ixp4xx.yaml b/Documentation/devicetree/bindings/arm/intel-ixp4xx.yaml
new file mode 100644
index 000000000000..f4f7451e5e8a
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/intel-ixp4xx.yaml
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/intel-ixp4xx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel IXP4xx Device Tree Bindings
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - linksys,nslu2
+ - const: intel,ixp42x
+ - items:
+ - enum:
+ - gateworks,gw2358
+ - const: intel,ixp43x
diff --git a/Documentation/devicetree/bindings/arm/keystone/ti,sci.txt b/Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
index b56a02c10ae6..6f0cd31c1520 100644
--- a/Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
+++ b/Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
@@ -24,7 +24,8 @@ relationship between the TI-SCI parent node to the child node.
Required properties:
-------------------
-- compatible: should be "ti,k2g-sci"
+- compatible: should be "ti,k2g-sci" for TI 66AK2G SoC
+ should be "ti,am654-sci" for for TI AM654 SoC
- mbox-names:
"rx" - Mailbox corresponding to receive path
"tx" - Mailbox corresponding to transmit path
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt
index de4075413d91..161e63a6c254 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt
@@ -14,6 +14,8 @@ Required Properties:
- "mediatek,mt7629-apmixedsys"
- "mediatek,mt8135-apmixedsys"
- "mediatek,mt8173-apmixedsys"
+ - "mediatek,mt8183-apmixedsys", "syscon"
+ - "mediatek,mt8516-apmixedsys"
- #clock-cells: Must be 1
The apmixedsys controller uses the common clk binding from
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt
index d1606b2c3e63..f3cef1a6d95c 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,audsys.txt
@@ -9,6 +9,7 @@ Required Properties:
- "mediatek,mt2701-audsys", "syscon"
- "mediatek,mt7622-audsys", "syscon"
- "mediatek,mt7623-audsys", "mediatek,mt2701-audsys", "syscon"
+ - "mediatek,mt8183-audiosys", "syscon"
- #clock-cells: Must be 1
The AUDSYS controller uses the common clk binding from
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt
new file mode 100644
index 000000000000..d8930f64aa98
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,camsys.txt
@@ -0,0 +1,22 @@
+MediaTek CAMSYS controller
+============================
+
+The MediaTek camsys controller provides various clocks to the system.
+
+Required Properties:
+
+- compatible: Should be one of:
+ - "mediatek,mt8183-camsys", "syscon"
+- #clock-cells: Must be 1
+
+The camsys controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+Example:
+
+camsys: camsys@1a000000 {
+ compatible = "mediatek,mt8183-camsys", "syscon";
+ reg = <0 0x1a000000 0 0x1000>;
+ #clock-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt
index 3f99672163e3..e3bc4a1e7a6e 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,imgsys.txt
@@ -11,6 +11,7 @@ Required Properties:
- "mediatek,mt6797-imgsys", "syscon"
- "mediatek,mt7623-imgsys", "mediatek,mt2701-imgsys", "syscon"
- "mediatek,mt8173-imgsys", "syscon"
+ - "mediatek,mt8183-imgsys", "syscon"
- #clock-cells: Must be 1
The imgsys controller uses the common clk binding from
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt
index 417bd83d1378..a90913988d7e 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt
@@ -15,6 +15,8 @@ Required Properties:
- "mediatek,mt7629-infracfg", "syscon"
- "mediatek,mt8135-infracfg", "syscon"
- "mediatek,mt8173-infracfg", "syscon"
+ - "mediatek,mt8183-infracfg", "syscon"
+ - "mediatek,mt8516-infracfg", "syscon"
- #clock-cells: Must be 1
- #reset-cells: Must be 1
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipu.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipu.txt
new file mode 100644
index 000000000000..aabc8c5c8ed2
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ipu.txt
@@ -0,0 +1,43 @@
+Mediatek IPU controller
+============================
+
+The Mediatek ipu controller provides various clocks to the system.
+
+Required Properties:
+
+- compatible: Should be one of:
+ - "mediatek,mt8183-ipu_conn", "syscon"
+ - "mediatek,mt8183-ipu_adl", "syscon"
+ - "mediatek,mt8183-ipu_core0", "syscon"
+ - "mediatek,mt8183-ipu_core1", "syscon"
+- #clock-cells: Must be 1
+
+The ipu controller uses the common clk binding from
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+The available clocks are defined in dt-bindings/clock/mt*-clk.h.
+
+Example:
+
+ipu_conn: syscon@19000000 {
+ compatible = "mediatek,mt8183-ipu_conn", "syscon";
+ reg = <0 0x19000000 0 0x1000>;
+ #clock-cells = <1>;
+};
+
+ipu_adl: syscon@19010000 {
+ compatible = "mediatek,mt8183-ipu_adl", "syscon";
+ reg = <0 0x19010000 0 0x1000>;
+ #clock-cells = <1>;
+};
+
+ipu_core0: syscon@19180000 {
+ compatible = "mediatek,mt8183-ipu_core0", "syscon";
+ reg = <0 0x19180000 0 0x1000>;
+ #clock-cells = <1>;
+};
+
+ipu_core1: syscon@19280000 {
+ compatible = "mediatek,mt8183-ipu_core1", "syscon";
+ reg = <0 0x19280000 0 0x1000>;
+ #clock-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mcucfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mcucfg.txt
index b8fb03f3613e..2b882b7ca72e 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mcucfg.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mcucfg.txt
@@ -7,6 +7,7 @@ Required Properties:
- compatible: Should be one of:
- "mediatek,mt2712-mcucfg", "syscon"
+ - "mediatek,mt8183-mcucfg", "syscon"
- #clock-cells: Must be 1
The mcucfg controller uses the common clk binding from
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt
index 859e67b416d5..72787e7dd227 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mfgcfg.txt
@@ -7,6 +7,7 @@ Required Properties:
- compatible: Should be one of:
- "mediatek,mt2712-mfgcfg", "syscon"
+ - "mediatek,mt8183-mfgcfg", "syscon"
- #clock-cells: Must be 1
The mfgcfg controller uses the common clk binding from
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt
index 15d977afad31..545eab717c96 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mmsys.txt
@@ -11,6 +11,7 @@ Required Properties:
- "mediatek,mt6797-mmsys", "syscon"
- "mediatek,mt7623-mmsys", "mediatek,mt2701-mmsys", "syscon"
- "mediatek,mt8173-mmsys", "syscon"
+ - "mediatek,mt8183-mmsys", "syscon"
- #clock-cells: Must be 1
The mmsys controller uses the common clk binding from
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt
index d160c2b4b6fe..a023b8338960 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt
@@ -14,6 +14,8 @@ Required Properties:
- "mediatek,mt7629-topckgen"
- "mediatek,mt8135-topckgen"
- "mediatek,mt8173-topckgen"
+ - "mediatek,mt8183-topckgen", "syscon"
+ - "mediatek,mt8516-topckgen"
- #clock-cells: Must be 1
The topckgen controller uses the common clk binding from
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt
index 3212afc753c8..57176bb8dbb5 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vdecsys.txt
@@ -11,6 +11,7 @@ Required Properties:
- "mediatek,mt6797-vdecsys", "syscon"
- "mediatek,mt7623-vdecsys", "mediatek,mt2701-vdecsys", "syscon"
- "mediatek,mt8173-vdecsys", "syscon"
+ - "mediatek,mt8183-vdecsys", "syscon"
- #clock-cells: Must be 1
The vdecsys controller uses the common clk binding from
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt
index 851545357e94..c9faa6269087 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,vencsys.txt
@@ -9,6 +9,7 @@ Required Properties:
- "mediatek,mt2712-vencsys", "syscon"
- "mediatek,mt6797-vencsys", "syscon"
- "mediatek,mt8173-vencsys", "syscon"
+ - "mediatek,mt8183-vencsys", "syscon"
- #clock-cells: Must be 1
The vencsys controller uses the common clk binding from
diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt
index 2ecc712bf707..1c1e48fd94b5 100644
--- a/Documentation/devicetree/bindings/arm/omap/omap.txt
+++ b/Documentation/devicetree/bindings/arm/omap/omap.txt
@@ -92,6 +92,9 @@ SoCs:
- DRA718
compatible = "ti,dra718", "ti,dra722", "ti,dra72", "ti,dra7"
+- AM5748
+ compatible = "ti,am5748", "ti,dra762", "ti,dra7"
+
- AM5728
compatible = "ti,am5728", "ti,dra742", "ti,dra74", "ti,dra7"
@@ -184,6 +187,9 @@ Boards:
- AM57XX SBC-AM57x
compatible = "compulab,sbc-am57x", "compulab,cl-som-am57x", "ti,am5728", "ti,dra742", "ti,dra74", "ti,dra7"
+- AM5748 IDK
+ compatible = "ti,am5748-idk", "ti,am5748", "ti,dra762", "ti,dra7";
+
- AM5728 IDK
compatible = "ti,am5728-idk", "ti,am5728", "ti,dra742", "ti,dra74", "ti,dra7"
diff --git a/Documentation/devicetree/bindings/arm/rockchip.yaml b/Documentation/devicetree/bindings/arm/rockchip.yaml
index 061a03edf9c8..5c6bbf10abc9 100644
--- a/Documentation/devicetree/bindings/arm/rockchip.yaml
+++ b/Documentation/devicetree/bindings/arm/rockchip.yaml
@@ -97,6 +97,7 @@ properties:
- enum:
- friendlyarm,nanopc-t4
- friendlyarm,nanopi-m4
+ - friendlyarm,nanopi-neo4
- const: rockchip,rk3399
- description: GeekBuying GeekBox
@@ -146,7 +147,7 @@ properties:
- const: google,gru
- const: rockchip,rk3399
- - description: Google Jaq (Haier Chromebook 11 and more)
+ - description: Google Jaq (Haier Chromebook 11 and more w/ uSD)
items:
- const: google,veyron-jaq-rev5
- const: google,veyron-jaq-rev4
@@ -159,6 +160,12 @@ properties:
- description: Google Jerry (Hisense Chromebook C11 and more)
items:
+ - const: google,veyron-jerry-rev15
+ - const: google,veyron-jerry-rev14
+ - const: google,veyron-jerry-rev13
+ - const: google,veyron-jerry-rev12
+ - const: google,veyron-jerry-rev11
+ - const: google,veyron-jerry-rev10
- const: google,veyron-jerry-rev7
- const: google,veyron-jerry-rev6
- const: google,veyron-jerry-rev5
@@ -199,6 +206,17 @@ properties:
- const: google,veyron
- const: rockchip,rk3288
+ - description: Google Mighty (Haier Chromebook 11 and more w/ SD)
+ items:
+ - const: google,veyron-mighty-rev5
+ - const: google,veyron-mighty-rev4
+ - const: google,veyron-mighty-rev3
+ - const: google,veyron-mighty-rev2
+ - const: google,veyron-mighty-rev1
+ - const: google,veyron-mighty
+ - const: google,veyron
+ - const: rockchip,rk3288
+
- description: Google Minnie (Asus Chromebook Flip C100P)
items:
- const: google,veyron-minnie-rev4
@@ -308,6 +326,11 @@ properties:
- const: netxeon,r89
- const: rockchip,rk3288
+ - description: Orange Pi RK3399 board
+ items:
+ - const: rockchip,rk3399-orangepi
+ - const: rockchip,rk3399
+
- description: Phytec phyCORE-RK3288 Rapid Development Kit
items:
- const: phytec,rk3288-pcm-947
diff --git a/Documentation/devicetree/bindings/arm/stm32/stm32-syscon.txt b/Documentation/devicetree/bindings/arm/stm32/stm32-syscon.txt
index 99980aee26e5..c92d411fd023 100644
--- a/Documentation/devicetree/bindings/arm/stm32/stm32-syscon.txt
+++ b/Documentation/devicetree/bindings/arm/stm32/stm32-syscon.txt
@@ -5,10 +5,12 @@ Properties:
- " st,stm32mp157-syscfg " - for stm32mp157 based SoCs,
second value must be always "syscon".
- reg : offset and length of the register set.
+ - clocks: phandle to the syscfg clock
Example:
syscfg: syscon@50020000 {
compatible = "st,stm32mp157-syscfg", "syscon";
reg = <0x50020000 0x400>;
+ clocks = <&rcc SYSCFG>;
};
diff --git a/Documentation/devicetree/bindings/arm/sunxi.txt b/Documentation/devicetree/bindings/arm/sunxi.txt
deleted file mode 100644
index 9254cbe7d516..000000000000
--- a/Documentation/devicetree/bindings/arm/sunxi.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Allwinner sunXi Platforms Device Tree Bindings
-
-Each device tree must specify which Allwinner SoC it uses,
-using one of the following compatible strings:
-
- allwinner,sun4i-a10
- allwinner,sun5i-a10s
- allwinner,sun5i-a13
- allwinner,sun5i-r8
- allwinner,sun6i-a31
- allwinner,sun7i-a20
- allwinner,sun8i-a23
- allwinner,sun8i-a33
- allwinner,sun8i-a83t
- allwinner,sun8i-h2-plus
- allwinner,sun8i-h3
- allwinner,sun8i-r40
- allwinner,sun8i-t3
- allwinner,sun8i-v3s
- allwinner,sun9i-a80
- allwinner,sun50i-a64
- allwinner,suniv-f1c100s
- nextthing,gr8
diff --git a/Documentation/devicetree/bindings/arm/sunxi.yaml b/Documentation/devicetree/bindings/arm/sunxi.yaml
new file mode 100644
index 000000000000..285f4fc8519d
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/sunxi.yaml
@@ -0,0 +1,807 @@
+# SPDX-License-Identifier: (GPL-2.0+ OR X11)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/arm/sunxi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner platforms device tree bindings
+
+maintainers:
+ - Chen-Yu Tsai <wens@csie.org>
+ - Maxime Ripard <maxime.ripard@bootlin.com>
+
+properties:
+ $nodename:
+ const: '/'
+ compatible:
+ oneOf:
+
+ - description: Allwinner A23 Evaluation Board
+ items:
+ - const: allwinner,sun8i-a23-evb
+ - const: allwinner,sun8i-a23
+
+ - description: Allwinner A31 APP4 Evaluation Board
+ items:
+ - const: allwinner,app4-evb1
+ - const: allwinner,sun6i-a31
+
+ - description: Allwinner A83t Homlet Evaluation Board v2
+ items:
+ - const: allwinner,h8homlet-v2
+ - const: allwinner,sun8i-a83t
+
+ - description: Allwinner GA10H Quad Core Tablet v1.1
+ items:
+ - const: allwinner,ga10h-v1.1
+ - const: allwinner,sun8i-a33
+
+ - description: Allwinner GT90H Tablet v4
+ items:
+ - const: allwinner,gt90h-v4
+ - const: allwinner,sun8i-a23
+
+ - description: Allwinner R16 EVB (Parrot)
+ items:
+ - const: allwinner,parrot
+ - const: allwinner,sun8i-a33
+
+ - description: Amarula A64 Relic
+ items:
+ - const: amarula,a64-relic
+ - const: allwinner,sun50i-a64
+
+ - description: Auxtek T003 A10s HDMI TV Stick
+ items:
+ - const: allwinner,auxtek-t003
+ - const: allwinner,sun5i-a10s
+
+ - description: Auxtek T004 A10s HDMI TV Stick
+ items:
+ - const: allwinner,auxtek-t004
+ - const: allwinner,sun5i-a10s
+
+ - description: BA10 TV Box
+ items:
+ - const: allwinner,ba10-tvbox
+ - const: allwinner,sun4i-a10
+
+ - description: BananaPi
+ items:
+ - const: lemaker,bananapi
+ - const: allwinner,sun7i-a20
+
+ - description: BananaPi M1 Plus
+ items:
+ - const: sinovoip,bpi-m1-plus
+ - const: allwinner,sun7i-a20
+
+ - description: BananaPi M2
+ items:
+ - const: sinovoip,bpi-m2
+ - const: allwinner,sun6i-a31s
+
+ - description: BananaPi M2 Berry
+ items:
+ - const: sinovoip,bpi-m2-berry
+ - const: allwinner,sun8i-r40
+
+ - description: BananaPi M2 Plus
+ items:
+ - const: sinovoip,bpi-m2-plus
+ - const: allwinner,sun8i-h3
+
+ - description: BananaPi M2 Plus
+ items:
+ - const: sinovoip,bpi-m2-plus
+ - const: allwinner,sun50i-h5
+
+ - description: BananaPi M2 Plus v1.2
+ items:
+ - const: bananapi,bpi-m2-plus-v1.2
+ - const: allwinner,sun8i-h3
+
+ - description: BananaPi M2 Plus v1.2
+ items:
+ - const: bananapi,bpi-m2-plus-v1.2
+ - const: allwinner,sun50i-h5
+
+ - description: BananaPi M2 Magic
+ items:
+ - const: sinovoip,bananapi-m2m
+ - const: allwinner,sun8i-a33
+
+ - description: BananaPi M2 Ultra
+ items:
+ - const: sinovoip,bpi-m2-ultra
+ - const: allwinner,sun8i-r40
+
+ - description: BananaPi M2 Zero
+ items:
+ - const: sinovoip,bpi-m2-zero
+ - const: allwinner,sun8i-h2-plus
+
+ - description: BananaPi M3
+ items:
+ - const: sinovoip,bpi-m3
+ - const: allwinner,sun8i-a83t
+
+ - description: BananaPi M64
+ items:
+ - const: sinovoip,bananapi-m64
+ - const: allwinner,sun50i-a64
+
+ - description: BananaPro
+ items:
+ - const: lemaker,bananapro
+ - const: allwinner,sun7i-a20
+
+ - description: Beelink GS1
+ items:
+ - const: azw,beelink-gs1
+ - const: allwinner,sun50i-h6
+
+ - description: Beelink X2
+ items:
+ - const: roofull,beelink-x2
+ - const: allwinner,sun8i-h3
+
+ - description: Chuwi V7 CW0825
+ items:
+ - const: chuwi,v7-cw0825
+ - const: allwinner,sun4i-a10
+
+ - description: Colorfly E708 Q1 Tablet
+ items:
+ - const: colorfly,e708-q1
+ - const: allwinner,sun6i-a31s
+
+ - description: CSQ CS908 Set Top Box
+ items:
+ - const: csq,cs908
+ - const: allwinner,sun6i-a31s
+
+ - description: Cubietech Cubieboard
+ items:
+ - const: cubietech,a10-cubieboard
+ - const: allwinner,sun4i-a10
+
+ - description: Cubietech Cubieboard2
+ items:
+ - const: cubietech,cubieboard2
+ - const: allwinner,sun7i-a20
+
+ - description: Cubietech Cubieboard4
+ items:
+ - const: cubietech,a80-cubieboard4
+ - const: allwinner,sun9i-a80
+
+ - description: Cubietech Cubietruck
+ items:
+ - const: cubietech,cubietruck
+ - const: allwinner,sun7i-a20
+
+ - description: Cubietech Cubietruck Plus
+ items:
+ - const: cubietech,cubietruck-plus
+ - const: allwinner,sun8i-a83t
+
+ - description: Difrnce DIT4350
+ items:
+ - const: difrnce,dit4350
+ - const: allwinner,sun5i-a13
+
+ - description: Dserve DSRV9703C
+ items:
+ - const: dserve,dsrv9703c
+ - const: allwinner,sun4i-a10
+
+ - description: Empire Electronix D709 Tablet
+ items:
+ - const: empire-electronix,d709
+ - const: allwinner,sun5i-a13
+
+ - description: Empire Electronix M712 Tablet
+ items:
+ - const: empire-electronix,m712
+ - const: allwinner,sun5i-a13
+
+ - description: FriendlyARM NanoPi A64
+ items:
+ - const: friendlyarm,nanopi-a64
+ - const: allwinner,sun50i-a64
+
+ - description: FriendlyARM NanoPi M1
+ items:
+ - const: friendlyarm,nanopi-m1
+ - const: allwinner,sun8i-h3
+
+ - description: FriendlyARM NanoPi M1 Plus
+ items:
+ - const: friendlyarm,nanopi-m1-plus
+ - const: allwinner,sun8i-h3
+
+ - description: FriendlyARM NanoPi Neo
+ items:
+ - const: friendlyarm,nanopi-neo
+ - const: allwinner,sun8i-h3
+
+ - description: FriendlyARM NanoPi Neo 2
+ items:
+ - const: friendlyarm,nanopi-neo2
+ - const: allwinner,sun50i-h5
+
+ - description: FriendlyARM NanoPi Neo Air
+ items:
+ - const: friendlyarm,nanopi-neo-air
+ - const: allwinner,sun8i-h3
+
+ - description: FriendlyARM NanoPi Neo Plus2
+ items:
+ - const: friendlyarm,nanopi-neo-plus2
+ - const: allwinner,sun50i-h5
+
+ - description: Gemei G9 Tablet
+ items:
+ - const: gemei,g9
+ - const: allwinner,sun4i-a10
+
+ - description: Hyundai A7HD
+ items:
+ - const: hyundai,a7hd
+ - const: allwinner,sun4i-a10
+
+ - description: HSG H702
+ items:
+ - const: hsg,h702
+ - const: allwinner,sun5i-a13
+
+ - description: I12 TV Box
+ items:
+ - const: allwinner,i12-tvbox
+ - const: allwinner,sun7i-a20
+
+ - description: ICNova A20 SWAC
+ items:
+ - const: swac,icnova-a20-swac
+ - const: incircuit,icnova-a20
+ - const: allwinner,sun7i-a20
+
+ - description: INet-1
+ items:
+ - const: inet-tek,inet1
+ - const: allwinner,sun4i-a10
+
+ - description: iNet-86DZ Rev 01
+ items:
+ - const: primux,inet86dz
+ - const: allwinner,sun8i-a23
+
+ - description: iNet-9F Rev 03
+ items:
+ - const: inet-tek,inet9f-rev03
+ - const: allwinner,sun4i-a10
+
+ - description: iNet-97F Rev 02
+ items:
+ - const: primux,inet97fv2
+ - const: allwinner,sun4i-a10
+
+ - description: iNet-98V Rev 02
+ items:
+ - const: primux,inet98v-rev2
+ - const: allwinner,sun5i-a13
+
+ - description: iNet D978 Rev 02 Tablet
+ items:
+ - const: primux,inet-d978-rev2
+ - const: allwinner,sun8i-a33
+
+ - description: iNet Q972 Tablet
+ items:
+ - const: inet-tek,inet-q972
+ - const: allwinner,sun6i-a31s
+
+ - description: Itead Ibox A20
+ items:
+ - const: itead,itead-ibox-a20
+ - const: allwinner,sun7i-a20
+
+ - description: Itead Iteaduino Plus A10
+ items:
+ - const: itead,iteaduino-plus-a10
+ - const: allwinner,sun4i-a10
+
+ - description: Jesurun Q5
+ items:
+ - const: jesurun,q5
+ - const: allwinner,sun4i-a10
+
+ - description: Lamobo R1
+ items:
+ - const: lamobo,lamobo-r1
+ - const: allwinner,sun7i-a20
+
+ - description: Libre Computer Board ALL-H3-CC H2+
+ items:
+ - const: libretech,all-h3-cc-h2-plus
+ - const: allwinner,sun8i-h2-plus
+
+ - description: Libre Computer Board ALL-H3-CC H3
+ items:
+ - const: libretech,all-h3-cc-h3
+ - const: allwinner,sun8i-h3
+
+ - description: Libre Computer Board ALL-H3-CC H5
+ items:
+ - const: libretech,all-h3-cc-h5
+ - const: allwinner,sun50i-h5
+
+ - description: Lichee Pi One
+ items:
+ - const: licheepi,licheepi-one
+ - const: allwinner,sun5i-a13
+
+ - description: Lichee Pi Zero
+ items:
+ - const: licheepi,licheepi-zero
+ - const: allwinner,sun8i-v3s
+
+ - description: Lichee Pi Zero (with Dock)
+ items:
+ - const: licheepi,licheepi-zero-dock
+ - const: licheepi,licheepi-zero
+ - const: allwinner,sun8i-v3s
+
+ - description: Linksprite PCDuino
+ items:
+ - const: linksprite,a10-pcduino
+ - const: allwinner,sun4i-a10
+
+ - description: Linksprite PCDuino2
+ items:
+ - const: linksprite,a10-pcduino2
+ - const: allwinner,sun4i-a10
+
+ - description: Linksprite PCDuino3
+ items:
+ - const: linksprite,pcduino3
+ - const: allwinner,sun7i-a20
+
+ - description: Linksprite PCDuino3 Nano
+ items:
+ - const: linksprite,pcduino3-nano
+ - const: allwinner,sun7i-a20
+
+ - description: HAOYU Electronics Marsboard A10
+ items:
+ - const: haoyu,a10-marsboard
+ - const: allwinner,sun4i-a10
+
+ - description: MapleBoard MP130
+ items:
+ - const: mapleboard,mp130
+ - const: allwinner,sun8i-h3
+
+ - description: Mele A1000
+ items:
+ - const: mele,a1000
+ - const: allwinner,sun4i-a10
+
+ - description: Mele A1000G Quad Set Top Box
+ items:
+ - const: mele,a1000g-quad
+ - const: allwinner,sun6i-a31
+
+ - description: Mele I7 Quad Set Top Box
+ items:
+ - const: mele,i7
+ - const: allwinner,sun6i-a31
+
+ - description: Mele M3
+ items:
+ - const: mele,m3
+ - const: allwinner,sun7i-a20
+
+ - description: Mele M9 Set Top Box
+ items:
+ - const: mele,m9
+ - const: allwinner,sun6i-a31
+
+ - description: Merrii A20 Hummingboard
+ items:
+ - const: merrii,a20-hummingbird
+ - const: allwinner,sun7i-a20
+
+ - description: Merrii A31 Hummingboard
+ items:
+ - const: merrii,a31-hummingbird
+ - const: allwinner,sun6i-a31
+
+ - description: Merrii A80 Optimus
+ items:
+ - const: merrii,a80-optimus
+ - const: allwinner,sun9i-a80
+
+ - description: Miniand Hackberry
+ items:
+ - const: miniand,hackberry
+ - const: allwinner,sun4i-a10
+
+ - description: MK802
+ items:
+ - const: allwinner,mk802
+ - const: allwinner,sun4i-a10
+
+ - description: MK802-A10s
+ items:
+ - const: allwinner,a10s-mk802
+ - const: allwinner,sun5i-a10s
+
+ - description: MK802-II
+ items:
+ - const: allwinner,mk802ii
+ - const: allwinner,sun4i-a10
+
+ - description: MK808c
+ items:
+ - const: allwinner,mk808c
+ - const: allwinner,sun7i-a20
+
+ - description: MSI Primo81 Tablet
+ items:
+ - const: msi,primo81
+ - const: allwinner,sun6i-a31s
+
+ - description: Emlid Neutis N5 Developper Board
+ items:
+ - const: emlid,neutis-n5-devboard
+ - const: emlid,neutis-n5
+ - const: allwinner,sun50i-h5
+
+ - description: NextThing Co. CHIP
+ items:
+ - const: nextthing,chip
+ - const: allwinner,sun5i-r8
+ - const: allwinner,sun5i-a13
+
+ - description: NextThing Co. CHIP Pro
+ items:
+ - const: nextthing,chip-pro
+ - const: nextthing,gr8
+
+ - description: NextThing Co. GR8 Evaluation Board
+ items:
+ - const: nextthing,gr8-evb
+ - const: nextthing,gr8
+
+ - description: Nintendo NES Classic
+ items:
+ - const: nintendo,nes-classic
+ - const: allwinner,sun8i-r16
+ - const: allwinner,sun8i-a33
+
+ - description: Nintendo Super NES Classic
+ items:
+ - const: nintendo,super-nes-classic
+ - const: nintendo,nes-classic
+ - const: allwinner,sun8i-r16
+ - const: allwinner,sun8i-a33
+
+ - description: Oceanic 5inMFD (5205)
+ items:
+ - const: oceanic,5205-5inmfd
+ - const: allwinner,sun50i-a64
+
+ - description: Olimex A10-OlinuXino LIME
+ items:
+ - const: olimex,a10-olinuxino-lime
+ - const: allwinner,sun4i-a10
+
+ - description: Olimex A10s-OlinuXino Micro
+ items:
+ - const: olimex,a10s-olinuxino-micro
+ - const: allwinner,sun5i-a10s
+
+ - description: Olimex A13-OlinuXino
+ items:
+ - const: olimex,a13-olinuxino
+ - const: allwinner,sun5i-a13
+
+ - description: Olimex A13-OlinuXino Micro
+ items:
+ - const: olimex,a13-olinuxino-micro
+ - const: allwinner,sun5i-a13
+
+ - description: Olimex A20-Olimex SOM Evaluation Board
+ items:
+ - const: olimex,a20-olimex-som-evb
+ - const: allwinner,sun7i-a20
+
+ - description: Olimex A20-Olimex SOM Evaluation Board (with eMMC)
+ items:
+ - const: olimex,a20-olimex-som-evb-emmc
+ - const: allwinner,sun7i-a20
+
+ - description: Olimex A20-OlinuXino LIME
+ items:
+ - const: olimex,a20-olinuxino-lime
+ - const: allwinner,sun7i-a20
+
+ - description: Olimex A20-OlinuXino LIME2
+ items:
+ - const: olimex,a20-olinuxino-lime2
+ - const: allwinner,sun7i-a20
+
+ - description: Olimex A20-OlinuXino LIME2 (with eMMC)
+ items:
+ - const: olimex,a20-olinuxino-lime2-emmc
+ - const: allwinner,sun7i-a20
+
+ - description: Olimex A20-OlinuXino Micro
+ items:
+ - const: olimex,a20-olinuxino-micro
+ - const: allwinner,sun7i-a20
+
+ - description: Olimex A20-OlinuXino Micro (with eMMC)
+ items:
+ - const: olimex,a20-olinuxino-micro-emmc
+ - const: allwinner,sun7i-a20
+
+ - description: Olimex A20-SOM204 Evaluation Board
+ items:
+ - const: olimex,a20-olimex-som204-evb
+ - const: allwinner,sun7i-a20
+
+ - description: Olimex A20-SOM204 Evaluation Board (with eMMC)
+ items:
+ - const: olimex,a20-olimex-som204-evb-emmc
+ - const: allwinner,sun7i-a20
+
+ - description: Olimex A33-OlinuXino
+ items:
+ - const: olimex,a33-olinuxino
+ - const: allwinner,sun8i-a33
+
+ - description: Olimex A64-OlinuXino
+ items:
+ - const: olimex,a64-olinuxino
+ - const: allwinner,sun50i-a64
+
+ - description: Olimex A64 Teres-I
+ items:
+ - const: olimex,a64-teres-i
+ - const: allwinner,sun50i-a64
+
+ - description: Pine64
+ items:
+ - const: pine64,pine64
+ - const: allwinner,sun50i-a64
+
+ - description: Pine64+
+ items:
+ - const: pine64,pine64-plus
+ - const: allwinner,sun50i-a64
+
+ - description: Pine64 PineH64
+ items:
+ - const: pine64,pine-h64
+ - const: allwinner,sun50i-h6
+
+ - description: Pine64 LTS
+ items:
+ - const: pine64,pine64-lts
+ - const: allwinner,sun50i-r18
+ - const: allwinner,sun50i-a64
+
+ - description: Pine64 Pinebook
+ items:
+ - const: pine64,pinebook
+ - const: allwinner,sun50i-a64
+
+ - description: Pine64 SoPine Baseboard
+ items:
+ - const: pine64,sopine-baseboard
+ - const: pine64,sopine
+ - const: allwinner,sun50i-a64
+
+ - description: PineRiver Mini X-Plus
+ items:
+ - const: pineriver,mini-xplus
+ - const: allwinner,sun4i-a10
+
+ - description: Point of View Protab2-IPS9
+ items:
+ - const: pov,protab2-ips9
+ - const: allwinner,sun4i-a10
+
+ - description: Polaroid MID2407PXE03 Tablet
+ items:
+ - const: polaroid,mid2407pxe03
+ - const: allwinner,sun8i-a23
+
+ - description: Polaroid MID2809PXE04 Tablet
+ items:
+ - const: polaroid,mid2809pxe04
+ - const: allwinner,sun8i-a23
+
+ - description: Q8 A13 Tablet
+ items:
+ - const: allwinner,q8-a13
+ - const: allwinner,sun5i-a13
+
+ - description: Q8 A23 Tablet
+ items:
+ - const: allwinner,q8-a23
+ - const: allwinner,sun8i-a23
+
+ - description: Q8 A33 Tablet
+ items:
+ - const: allwinner,q8-a33
+ - const: allwinner,sun8i-a33
+
+ - description: Qihua CQA3T BV3
+ items:
+ - const: qihua,t3-cqa3t-bv3
+ - const: allwinner,sun8i-t3
+ - const: allwinner,sun8i-r40
+
+ - description: R7 A10s HDMI TV Stick
+ items:
+ - const: allwinner,r7-tv-dongle
+ - const: allwinner,sun5i-a10s
+
+ - description: RerVision H3-DVK
+ items:
+ - const: rervision,h3-dvk
+ - const: allwinner,sun8i-h3
+
+ - description: Sinlinx SinA31s Core Board
+ items:
+ - const: sinlinx,sina31s
+ - const: allwinner,sun6i-a31s
+
+ - description: Sinlinx SinA31s Development Board
+ items:
+ - const: sinlinx,sina31s-sdk
+ - const: allwinner,sun6i-a31s
+
+ - description: Sinlinx SinA33
+ items:
+ - const: sinlinx,sina33
+ - const: allwinner,sun8i-a33
+
+ - description: TBS A711 Tablet
+ items:
+ - const: tbs-biometrics,a711
+ - const: allwinner,sun8i-a83t
+
+ - description: Utoo P66
+ items:
+ - const: utoo,p66
+ - const: allwinner,sun5i-a13
+
+ - description: Wexler TAB7200
+ items:
+ - const: wexler,tab7200
+ - const: allwinner,sun7i-a20
+
+ - description: WITS A31 Colombus Evaluation Board
+ items:
+ - const: wits,colombus
+ - const: allwinner,sun6i-a31
+
+ - description: WITS Pro A20 DKT
+ items:
+ - const: wits,pro-a20-dkt
+ - const: allwinner,sun7i-a20
+
+ - description: Wobo i5
+ items:
+ - const: wobo,a10s-wobo-i5
+ - const: allwinner,sun5i-a10s
+
+ - description: Yones TopTech BS1078 v2 Tablet
+ items:
+ - const: yones-toptech,bs1078-v2
+ - const: allwinner,sun6i-a31s
+
+ - description: Xunlong OrangePi
+ items:
+ - const: xunlong,orangepi
+ - const: allwinner,sun7i-a20
+
+ - description: Xunlong OrangePi 2
+ items:
+ - const: xunlong,orangepi-2
+ - const: allwinner,sun8i-h3
+
+ - description: Xunlong OrangePi 3
+ items:
+ - const: xunlong,orangepi-3
+ - const: allwinner,sun50i-h6
+
+ - description: Xunlong OrangePi Lite
+ items:
+ - const: xunlong,orangepi-lite
+ - const: allwinner,sun8i-h3
+
+ - description: Xunlong OrangePi Lite2
+ items:
+ - const: xunlong,orangepi-lite2
+ - const: allwinner,sun50i-h6
+
+ - description: Xunlong OrangePi Mini
+ items:
+ - const: xunlong,orangepi-mini
+ - const: allwinner,sun7i-a20
+
+ - description: Xunlong OrangePi One
+ items:
+ - const: xunlong,orangepi-one
+ - const: allwinner,sun8i-h3
+
+ - description: Xunlong OrangePi One Plus
+ items:
+ - const: xunlong,orangepi-one-plus
+ - const: allwinner,sun50i-h6
+
+ - description: Xunlong OrangePi PC
+ items:
+ - const: xunlong,orangepi-pc
+ - const: allwinner,sun8i-h3
+
+ - description: Xunlong OrangePi PC 2
+ items:
+ - const: xunlong,orangepi-pc2
+ - const: allwinner,sun50i-h5
+
+ - description: Xunlong OrangePi PC Plus
+ items:
+ - const: xunlong,orangepi-pc-plus
+ - const: allwinner,sun8i-h3
+
+ - description: Xunlong OrangePi Plus
+ items:
+ - const: xunlong,orangepi-plus
+ - const: allwinner,sun8i-h3
+
+ - description: Xunlong OrangePi Plus 2E
+ items:
+ - const: xunlong,orangepi-plus2e
+ - const: allwinner,sun8i-h3
+
+ - description: Xunlong OrangePi Prime
+ items:
+ - const: xunlong,orangepi-prime
+ - const: allwinner,sun50i-h5
+
+ - description: Xunlong OrangePi R1
+ items:
+ - const: xunlong,orangepi-r1
+ - const: allwinner,sun8i-h2-plus
+
+ - description: Xunlong OrangePi Win
+ items:
+ - const: xunlong,orangepi-win
+ - const: allwinner,sun50i-a64
+
+ - description: Xunlong OrangePi Zero
+ items:
+ - const: xunlong,orangepi-zero
+ - const: allwinner,sun8i-h2-plus
+
+ - description: Xunlong OrangePi Zero Plus
+ items:
+ - const: xunlong,orangepi-zero-plus
+ - const: allwinner,sun50i-h5
+
+ - description: Xunlong OrangePi Zero Plus2
+ items:
+ - const: xunlong,orangepi-zero-plus2
+ - const: allwinner,sun50i-h5
+
+ - description: Xunlong OrangePi Zero Plus2
+ items:
+ - const: xunlong,orangepi-zero-plus2-h3
+ - const: allwinner,sun8i-h3
diff --git a/Documentation/devicetree/bindings/arm/sunxi/sunxi-mbus.txt b/Documentation/devicetree/bindings/arm/sunxi/sunxi-mbus.txt
new file mode 100644
index 000000000000..1464a4713553
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/sunxi/sunxi-mbus.txt
@@ -0,0 +1,36 @@
+Allwinner Memory Bus (MBUS) controller
+
+The MBUS controller drives the MBUS that other devices in the SoC will
+use to perform DMA. It also has a register interface that allows to
+monitor and control the bandwidth and priorities for masters on that
+bus.
+
+Required properties:
+ - compatible: Must be one of:
+ - allwinner,sun5i-a13-mbus
+ - reg: Offset and length of the register set for the controller
+ - clocks: phandle to the clock driving the controller
+ - dma-ranges: See section 2.3.9 of the DeviceTree Specification
+ - #interconnect-cells: Must be one, with the argument being the MBUS
+ port ID
+
+Each device having to perform their DMA through the MBUS must have the
+interconnects and interconnect-names properties set to the MBUS
+controller and with "dma-mem" as the interconnect name.
+
+Example:
+
+mbus: dram-controller@1c01000 {
+ compatible = "allwinner,sun5i-a13-mbus";
+ reg = <0x01c01000 0x1000>;
+ clocks = <&ccu CLK_MBUS>;
+ dma-ranges = <0x00000000 0x40000000 0x20000000>;
+ #interconnect-cells = <1>;
+};
+
+fe0: display-frontend@1e00000 {
+ compatible = "allwinner,sun5i-a13-display-frontend";
+ ...
+ interconnects = <&mbus 19>;
+ interconnect-names = "dma-mem";
+};
diff --git a/Documentation/devicetree/bindings/bus/ti-sysc.txt b/Documentation/devicetree/bindings/bus/ti-sysc.txt
index 85a23f551f02..233eb8294204 100644
--- a/Documentation/devicetree/bindings/bus/ti-sysc.txt
+++ b/Documentation/devicetree/bindings/bus/ti-sysc.txt
@@ -94,6 +94,8 @@ Optional properties:
- ti,no-idle-on-init interconnect target module should not be idled at init
+- ti,no-idle interconnect target module should not be idled
+
Example: Single instance of MUSB controller on omap4 using interconnect ranges
using offsets from l4_cfg second segment (0x4a000000 + 0x80000 = 0x4a0ab000):
@@ -131,6 +133,6 @@ using offsets from l4_cfg second segment (0x4a000000 + 0x80000 = 0x4a0ab000):
};
};
-Note that other SoCs, such as am335x can have multipe child devices. On am335x
+Note that other SoCs, such as am335x can have multiple child devices. On am335x
there are two MUSB instances, two USB PHY instances, and a single CPPI41 DMA
-instance as children of a single interconnet target module.
+instance as children of a single interconnect target module.
diff --git a/Documentation/devicetree/bindings/clock/amlogic,axg-audio-clkc.txt b/Documentation/devicetree/bindings/clock/amlogic,axg-audio-clkc.txt
index 61777ad24f61..0f777749f4f1 100644
--- a/Documentation/devicetree/bindings/clock/amlogic,axg-audio-clkc.txt
+++ b/Documentation/devicetree/bindings/clock/amlogic,axg-audio-clkc.txt
@@ -6,7 +6,8 @@ devices.
Required Properties:
-- compatible : should be "amlogic,axg-audio-clkc" for the A113X and A113D
+- compatible : should be "amlogic,axg-audio-clkc" for the A113X and A113D,
+ "amlogic,g12a-audio-clkc" for G12A.
- reg : physical base address of the clock controller and length of
memory mapped region.
- clocks : a list of phandle + clock-specifier pairs for the clocks listed
diff --git a/Documentation/devicetree/bindings/clock/at91-clock.txt b/Documentation/devicetree/bindings/clock/at91-clock.txt
index e9f70fcdfe80..b520280e33ff 100644
--- a/Documentation/devicetree/bindings/clock/at91-clock.txt
+++ b/Documentation/devicetree/bindings/clock/at91-clock.txt
@@ -8,35 +8,30 @@ Slow Clock controller:
Required properties:
- compatible : shall be one of the following:
- "atmel,at91sam9x5-sckc" or
+ "atmel,at91sam9x5-sckc",
+ "atmel,sama5d3-sckc" or
"atmel,sama5d4-sckc":
at91 SCKC (Slow Clock Controller)
- This node contains the slow clock definitions.
-
- "atmel,at91sam9x5-clk-slow-osc":
- at91 slow oscillator
-
- "atmel,at91sam9x5-clk-slow-rc-osc":
- at91 internal slow RC oscillator
-- reg : defines the IO memory reserved for the SCKC.
-- #size-cells : shall be 0 (reg is used to encode clk id).
-- #address-cells : shall be 1 (reg is used to encode clk id).
+- #clock-cells : shall be 0.
+- clocks : shall be the input parent clock phandle for the clock.
+Optional properties:
+- atmel,osc-bypass : boolean property. Set this when a clock signal is directly
+ provided on XIN.
For example:
- sckc: sckc@fffffe50 {
- compatible = "atmel,sama5d3-pmc";
- reg = <0xfffffe50 0x4>
- #size-cells = <0>;
- #address-cells = <1>;
-
- /* put at91 slow clocks here */
+ sckc@fffffe50 {
+ compatible = "atmel,at91sam9x5-sckc";
+ reg = <0xfffffe50 0x4>;
+ clocks = <&slow_xtal>;
+ #clock-cells = <0>;
};
Power Management Controller (PMC):
Required properties:
-- compatible : shall be "atmel,<chip>-pmc", "syscon":
+- compatible : shall be "atmel,<chip>-pmc", "syscon" or
+ "microchip,sam9x60-pmc"
<chip> can be: at91rm9200, at91sam9260, at91sam9261,
at91sam9263, at91sam9g45, at91sam9n12, at91sam9rl, at91sam9g15,
at91sam9g25, at91sam9g35, at91sam9x25, at91sam9x35, at91sam9x5,
diff --git a/Documentation/devicetree/bindings/clock/cirrus,lochnagar.txt b/Documentation/devicetree/bindings/clock/cirrus,lochnagar.txt
new file mode 100644
index 000000000000..b8d8ef3bdc5f
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/cirrus,lochnagar.txt
@@ -0,0 +1,93 @@
+Cirrus Logic Lochnagar Audio Development Board
+
+Lochnagar is an evaluation and development board for Cirrus Logic
+Smart CODEC and Amp devices. It allows the connection of most Cirrus
+Logic devices on mini-cards, as well as allowing connection of
+various application processor systems to provide a full evaluation
+platform. Audio system topology, clocking and power can all be
+controlled through the Lochnagar, allowing the device under test
+to be used in a variety of possible use cases.
+
+This binding document describes the binding for the clock portion of
+the driver.
+
+Also see these documents for generic binding information:
+ [1] Clock : ../clock/clock-bindings.txt
+
+And these for relevant defines:
+ [2] include/dt-bindings/clock/lochnagar.h
+
+This binding must be part of the Lochnagar MFD binding:
+ [3] ../mfd/cirrus,lochnagar.txt
+
+Required properties:
+
+ - compatible : One of the following strings:
+ "cirrus,lochnagar1-clk"
+ "cirrus,lochnagar2-clk"
+
+ - #clock-cells : Must be 1. The first cell indicates the clock
+ number, see [2] for available clocks and [1].
+
+Optional properties:
+
+ - clocks : Must contain an entry for each clock in clock-names.
+ - clock-names : May contain entries for each of the following
+ clocks:
+ - ln-cdc-clkout : Output clock from CODEC card.
+ - ln-dsp-clkout : Output clock from DSP card.
+ - ln-gf-mclk1,ln-gf-mclk2,ln-gf-mclk3,ln-gf-mclk4 : Optional
+ input audio clocks from host system.
+ - ln-psia1-mclk, ln-psia2-mclk : Optional input audio clocks from
+ external connector.
+ - ln-spdif-clkout : Optional input audio clock from SPDIF.
+ - ln-adat-mclk : Optional input audio clock from ADAT.
+ - ln-pmic-32k : On board fixed clock.
+ - ln-clk-12m : On board fixed clock.
+ - ln-clk-11m : On board fixed clock.
+ - ln-clk-24m : On board fixed clock.
+ - ln-clk-22m : On board fixed clock.
+ - ln-clk-8m : On board fixed clock.
+ - ln-usb-clk-24m : On board fixed clock.
+ - ln-usb-clk-12m : On board fixed clock.
+
+ - assigned-clocks : A list of Lochnagar clocks to be reparented, see
+ [2] for available clocks.
+ - assigned-clock-parents : Parents to be assigned to the clocks
+ listed in "assigned-clocks".
+
+Optional nodes:
+
+ - fixed-clock nodes may be registered for the following on board clocks:
+ - ln-pmic-32k : 32768 Hz
+ - ln-clk-12m : 12288000 Hz
+ - ln-clk-11m : 11298600 Hz
+ - ln-clk-24m : 24576000 Hz
+ - ln-clk-22m : 22579200 Hz
+ - ln-clk-8m : 8192000 Hz
+ - ln-usb-clk-24m : 24576000 Hz
+ - ln-usb-clk-12m : 12288000 Hz
+
+Example:
+
+lochnagar {
+ lochnagar-clk {
+ compatible = "cirrus,lochnagar2-clk";
+
+ #clock-cells = <1>;
+
+ clocks = <&clk-audio>, <&clk_pmic>;
+ clock-names = "ln-gf-mclk2", "ln-pmic-32k";
+
+ assigned-clocks = <&lochnagar-clk LOCHNAGAR_CDC_MCLK1>,
+ <&lochnagar-clk LOCHNAGAR_CDC_MCLK2>;
+ assigned-clock-parents = <&clk-audio>,
+ <&clk-pmic>;
+ };
+
+ clk-pmic: clk-pmic {
+ compatible = "fixed-clock";
+ clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/clock/milbeaut-clock.yaml b/Documentation/devicetree/bindings/clock/milbeaut-clock.yaml
new file mode 100644
index 000000000000..5cf0b811821e
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/milbeaut-clock.yaml
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/bindings/clock/milbeaut-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Milbeaut SoCs Clock Controller Binding
+
+maintainers:
+ - Taichi Sugaya <sugaya.taichi@socionext.com>
+
+description: |
+ Milbeaut SoCs Clock controller is an integrated clock controller, which
+ generates and supplies to all modules.
+
+ This binding uses common clock bindings
+ [1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - socionext,milbeaut-m10v-ccu
+ clocks:
+ maxItems: 1
+ description: external clock
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - '#clock-cells'
+
+examples:
+ # Clock controller node:
+ - |
+ m10v-clk-ctrl@1d021000 {
+ compatible = "socionext,milbeaut-m10v-clk-ccu";
+ reg = <0x1d021000 0x4000>;
+ #clock-cells = <1>;
+ clocks = <&clki40mhz>;
+ };
+
+ # Required an external clock for Clock controller node:
+ - |
+ clocks {
+ clki40mhz: clki40mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <40000000>;
+ };
+ /* other clocks */
+ };
+
+ # The clock consumer shall specify the desired clock-output of the clock
+ # controller as below by specifying output-id in its "clk" phandle cell.
+ # 2: uart
+ # 4: 32-bit timer
+ # 7: UHS-I/II
+ - |
+ serial@1e700010 {
+ compatible = "socionext,milbeaut-usio-uart";
+ reg = <0x1e700010 0x10>;
+ interrupts = <0 141 0x4>, <0 149 0x4>;
+ interrupt-names = "rx", "tx";
+ clocks = <&clk 2>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,turingcc.txt b/Documentation/devicetree/bindings/clock/qcom,turingcc.txt
new file mode 100644
index 000000000000..126517de5f9a
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,turingcc.txt
@@ -0,0 +1,19 @@
+Qualcomm Turing Clock & Reset Controller Binding
+------------------------------------------------
+
+Required properties :
+- compatible: shall contain "qcom,qcs404-turingcc".
+- reg: shall contain base register location and length.
+- clocks: ahb clock for the TuringCC
+- #clock-cells: from common clock binding, shall contain 1.
+- #reset-cells: from common reset binding, shall contain 1.
+
+Example:
+ turingcc: clock-controller@800000 {
+ compatible = "qcom,qcs404-turingcc";
+ reg = <0x00800000 0x30000>;
+ clocks = <&gcc GCC_CDSP_CFG_AHB_CLK>;
+
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qoriq-clock.txt b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
index c655f28d5918..f7d48f23da44 100644
--- a/Documentation/devicetree/bindings/clock/qoriq-clock.txt
+++ b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
@@ -39,6 +39,7 @@ Required properties:
* "fsl,b4860-clockgen"
* "fsl,ls1012a-clockgen"
* "fsl,ls1021a-clockgen"
+ * "fsl,ls1028a-clockgen"
* "fsl,ls1043a-clockgen"
* "fsl,ls1046a-clockgen"
* "fsl,ls1088a-clockgen"
@@ -83,8 +84,8 @@ second cell is the clock index for the specified type.
1 cmux index (n in CLKCnCSR)
2 hwaccel index (n in CLKCGnHWACSR)
3 fman 0 for fm1, 1 for fm2
- 4 platform pll 0=pll, 1=pll/2, 2=pll/3, 3=pll/4
- 4=pll/5, 5=pll/6, 6=pll/7, 7=pll/8
+ 4 platform pll n=pll/(n+1). For example, when n=1,
+ that means output_freq=PLL_freq/2.
5 coreclk must be 0
3. Example
diff --git a/Documentation/devicetree/bindings/clock/sifive/fu540-prci.txt b/Documentation/devicetree/bindings/clock/sifive/fu540-prci.txt
new file mode 100644
index 000000000000..349808f4fb8c
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/sifive/fu540-prci.txt
@@ -0,0 +1,46 @@
+SiFive FU540 PRCI bindings
+
+On the FU540 family of SoCs, most system-wide clock and reset integration
+is via the PRCI IP block.
+
+Required properties:
+- compatible: Should be "sifive,<chip>-prci". Only one value is
+ supported: "sifive,fu540-c000-prci"
+- reg: Should describe the PRCI's register target physical address region
+- clocks: Should point to the hfclk device tree node and the rtcclk
+ device tree node. The RTC clock here is not a time-of-day clock,
+ but is instead a high-stability clock source for system timers
+ and cycle counters.
+- #clock-cells: Should be <1>
+
+The clock consumer should specify the desired clock via the clock ID
+macros defined in include/dt-bindings/clock/sifive-fu540-prci.h.
+These macros begin with PRCI_CLK_.
+
+The hfclk and rtcclk nodes are required, and represent physical
+crystals or resonators located on the PCB. These nodes should be present
+underneath /, rather than /soc.
+
+Examples:
+
+/* under /, in PCB-specific DT data */
+hfclk: hfclk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <33333333>;
+ clock-output-names = "hfclk";
+};
+rtcclk: rtcclk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <1000000>;
+ clock-output-names = "rtcclk";
+};
+
+/* under /soc, in SoC-specific DT data */
+prci: clock-controller@10000000 {
+ compatible = "sifive,fu540-c000-prci";
+ reg = <0x0 0x10000000 0x0 0x1000>;
+ clocks = <&hfclk>, <&rtcclk>;
+ #clock-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt b/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt
index b240121d2ac9..cfa04b614d8a 100644
--- a/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt
+++ b/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt
@@ -11,6 +11,8 @@ Required properties:
"st,stm32f42xx-rcc"
"st,stm32f469-rcc"
"st,stm32f746-rcc"
+ "st,stm32f769-rcc"
+
- reg: should be register base and length as documented in the
datasheet
- #reset-cells: 1, see below
@@ -102,6 +104,10 @@ The secondary index is bound with the following magic numbers:
28 CLK_I2C3
29 CLK_I2C4
30 CLK_LPTIMER (LPTimer1 clock)
+ 31 CLK_PLL_SRC
+ 32 CLK_DFSDM1
+ 33 CLK_ADFSDM1
+ 34 CLK_F769_DSI
)
Example:
diff --git a/Documentation/devicetree/bindings/clock/xlnx,zynqmp-clk.txt b/Documentation/devicetree/bindings/clock/xlnx,zynqmp-clk.txt
new file mode 100644
index 000000000000..391ee1a60bed
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/xlnx,zynqmp-clk.txt
@@ -0,0 +1,63 @@
+--------------------------------------------------------------------------
+Device Tree Clock bindings for the Zynq Ultrascale+ MPSoC controlled using
+Zynq MPSoC firmware interface
+--------------------------------------------------------------------------
+The clock controller is a h/w block of Zynq Ultrascale+ MPSoC clock
+tree. It reads required input clock frequencies from the devicetree and acts
+as clock provider for all clock consumers of PS clocks.
+
+See clock_bindings.txt for more information on the generic clock bindings.
+
+Required properties:
+ - #clock-cells: Must be 1
+ - compatible: Must contain: "xlnx,zynqmp-clk"
+ - clocks: List of clock specifiers which are external input
+ clocks to the given clock controller. Please refer
+ the next section to find the input clocks for a
+ given controller.
+ - clock-names: List of clock names which are exteral input clocks
+ to the given clock controller. Please refer to the
+ clock bindings for more details.
+
+Input clocks for zynqmp Ultrascale+ clock controller:
+
+The Zynq UltraScale+ MPSoC has one primary and four alternative reference clock
+inputs. These required clock inputs are:
+ - pss_ref_clk (PS reference clock)
+ - video_clk (reference clock for video system )
+ - pss_alt_ref_clk (alternative PS reference clock)
+ - aux_ref_clk
+ - gt_crx_ref_clk (transceiver reference clock)
+
+The following strings are optional parameters to the 'clock-names' property in
+order to provide an optional (E)MIO clock source:
+ - swdt0_ext_clk
+ - swdt1_ext_clk
+ - gem0_emio_clk
+ - gem1_emio_clk
+ - gem2_emio_clk
+ - gem3_emio_clk
+ - mio_clk_XX # with XX = 00..77
+ - mio_clk_50_or_51 #for the mux clock to gem tsu from 50 or 51
+
+
+Output clocks are registered based on clock information received
+from firmware. Output clocks indexes are mentioned in
+include/dt-bindings/clock/xlnx-zynqmp-clk.h.
+
+-------
+Example
+-------
+
+firmware {
+ zynqmp_firmware: zynqmp-firmware {
+ compatible = "xlnx,zynqmp-firmware";
+ method = "smc";
+ zynqmp_clk: clock-controller {
+ #clock-cells = <1>;
+ compatible = "xlnx,zynqmp-clk";
+ clocks = <&pss_ref_clk>, <&video_clk>, <&pss_alt_ref_clk>, <&aux_ref_clk>, <&gt_crx_ref_clk>;
+ clock-names = "pss_ref_clk", "video_clk", "pss_alt_ref_clk","aux_ref_clk", "gt_crx_ref_clk";
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/connector/usb-connector.txt b/Documentation/devicetree/bindings/connector/usb-connector.txt
index a9a2f2fc44f2..cef556d4e5ee 100644
--- a/Documentation/devicetree/bindings/connector/usb-connector.txt
+++ b/Documentation/devicetree/bindings/connector/usb-connector.txt
@@ -47,7 +47,7 @@ Required properties for usb-c-connector with power delivery support:
Required nodes:
- any data bus to the connector should be modeled using the OF graph bindings
specified in bindings/graph.txt, unless the bus is between parent node and
- the connector. Since single connector can have multpile data buses every bus
+ the connector. Since single connector can have multiple data buses every bus
has assigned OF graph port number as follows:
0: High Speed (HS), present in all connectors,
1: Super Speed (SS), present in SS capable connectors,
diff --git a/Documentation/devicetree/bindings/counter/ftm-quaddec.txt b/Documentation/devicetree/bindings/counter/ftm-quaddec.txt
new file mode 100644
index 000000000000..4d18cd722074
--- /dev/null
+++ b/Documentation/devicetree/bindings/counter/ftm-quaddec.txt
@@ -0,0 +1,18 @@
+FlexTimer Quadrature decoder counter
+
+This driver exposes a simple counter for the quadrature decoder mode.
+
+Required properties:
+- compatible: Must be "fsl,ftm-quaddec".
+- reg: Must be set to the memory region of the flextimer.
+
+Optional property:
+- big-endian: Access the device registers in big-endian mode.
+
+Example:
+ counter0: counter@29d0000 {
+ compatible = "fsl,ftm-quaddec";
+ reg = <0x0 0x29d0000 0x0 0x10000>;
+ big-endian;
+ status = "disabled";
+ };
diff --git a/Documentation/devicetree/bindings/iio/counter/stm32-lptimer-cnt.txt b/Documentation/devicetree/bindings/counter/stm32-lptimer-cnt.txt
index a04aa5c04103..e90bc47f752a 100644
--- a/Documentation/devicetree/bindings/iio/counter/stm32-lptimer-cnt.txt
+++ b/Documentation/devicetree/bindings/counter/stm32-lptimer-cnt.txt
@@ -10,8 +10,9 @@ See ../mfd/stm32-lptimer.txt for details about the parent node.
Required properties:
- compatible: Must be "st,stm32-lptimer-counter".
-- pinctrl-names: Set to "default".
-- pinctrl-0: List of phandles pointing to pin configuration nodes,
+- pinctrl-names: Set to "default". An additional "sleep" state can be
+ defined to set pins in sleep state.
+- pinctrl-n: List of phandles pointing to pin configuration nodes,
to set IN1/IN2 pins in mode of operation for Low-Power
Timer input on external pin.
@@ -21,7 +22,8 @@ Example:
...
counter {
compatible = "st,stm32-lptimer-counter";
- pinctrl-names = "default";
+ pinctrl-names = "default", "sleep";
pinctrl-0 = <&lptim1_in_pins>;
+ pinctrl-1 = <&lptim1_sleep_in_pins>;
};
};
diff --git a/Documentation/devicetree/bindings/counter/stm32-timer-cnt.txt b/Documentation/devicetree/bindings/counter/stm32-timer-cnt.txt
new file mode 100644
index 000000000000..c52fcdd4bf6c
--- /dev/null
+++ b/Documentation/devicetree/bindings/counter/stm32-timer-cnt.txt
@@ -0,0 +1,31 @@
+STMicroelectronics STM32 Timer quadrature encoder
+
+STM32 Timer provides quadrature encoder to detect
+angular position and direction of rotary elements,
+from IN1 and IN2 input signals.
+
+Must be a sub-node of an STM32 Timer device tree node.
+See ../mfd/stm32-timers.txt for details about the parent node.
+
+Required properties:
+- compatible: Must be "st,stm32-timer-counter".
+- pinctrl-names: Set to "default".
+- pinctrl-0: List of phandles pointing to pin configuration nodes,
+ to set CH1/CH2 pins in mode of operation for STM32
+ Timer input on external pin.
+
+Example:
+ timers@40010000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "st,stm32-timers";
+ reg = <0x40010000 0x400>;
+ clocks = <&rcc 0 160>;
+ clock-names = "int";
+
+ counter {
+ compatible = "st,stm32-timer-counter";
+ pinctrl-names = "default";
+ pinctrl-0 = <&tim1_in_pins>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt
index bf4a18047309..3a50a7862cf3 100644
--- a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.txt
@@ -37,6 +37,7 @@ Required properties:
- GXL (S905X, S905D) : "amlogic,meson-gxl-dw-hdmi"
- GXM (S912) : "amlogic,meson-gxm-dw-hdmi"
followed by the common "amlogic,meson-gx-dw-hdmi"
+ - G12A (S905X2, S905Y2, S905D2) : "amlogic,meson-g12a-dw-hdmi"
- reg: Physical base address and length of the controller's registers.
- interrupts: The HDMI interrupt number
- clocks, clock-names : must have the phandles to the HDMI iahb and isfr clocks,
@@ -66,6 +67,9 @@ corresponding to each HDMI output and input.
S905X (GXL) VENC Input TMDS Output
S905D (GXL) VENC Input TMDS Output
S912 (GXM) VENC Input TMDS Output
+ S905X2 (G12A) VENC Input TMDS Output
+ S905Y2 (G12A) VENC Input TMDS Output
+ S905D2 (G12A) VENC Input TMDS Output
Example:
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
index c65fd7a7467c..be40a780501c 100644
--- a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
@@ -57,18 +57,18 @@ Required properties:
- GXL (S905X, S905D) : "amlogic,meson-gxl-vpu"
- GXM (S912) : "amlogic,meson-gxm-vpu"
followed by the common "amlogic,meson-gx-vpu"
+ - G12A (S905X2, S905Y2, S905D2) : "amlogic,meson-g12a-vpu"
- reg: base address and size of he following memory-mapped regions :
- vpu
- hhi
- - dmc
- reg-names: should contain the names of the previous memory regions
- interrupts: should contain the VENC Vsync interrupt number
+- amlogic,canvas: phandle to canvas provider node as described in the file
+ ../soc/amlogic/amlogic,canvas.txt
Optional properties:
- power-domains: Optional phandle to associated power domain as described in
the file ../power/power_domain.txt
-- amlogic,canvas: phandle to canvas provider node as described in the file
- ../soc/amlogic/amlogic,canvas.txt
Required nodes:
@@ -84,6 +84,9 @@ corresponding to each VPU output.
S905X (GXL) CVBS VDAC HDMI-TX
S905D (GXL) CVBS VDAC HDMI-TX
S912 (GXM) CVBS VDAC HDMI-TX
+ S905X2 (G12A) CVBS VDAC HDMI-TX
+ S905Y2 (G12A) CVBS VDAC HDMI-TX
+ S905D2 (G12A) CVBS VDAC HDMI-TX
Example:
diff --git a/Documentation/devicetree/bindings/display/amlogic,simple-framebuffer.txt b/Documentation/devicetree/bindings/display/amlogic,simple-framebuffer.txt
deleted file mode 100644
index aaa6c24c8e70..000000000000
--- a/Documentation/devicetree/bindings/display/amlogic,simple-framebuffer.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-Meson specific Simple Framebuffer bindings
-
-This binding documents meson specific extensions to the simple-framebuffer
-bindings. The meson simplefb u-boot code relies on the devicetree containing
-pre-populated simplefb nodes.
-
-These extensions are intended so that u-boot can select the right node based
-on which pipeline is being used. As such they are solely intended for
-firmware / bootloader use, and the OS should ignore them.
-
-Required properties:
-- compatible: "amlogic,simple-framebuffer", "simple-framebuffer"
-- amlogic,pipeline, one of:
- "vpu-cvbs"
- "vpu-hdmi"
-
-Example:
-
-chosen {
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
- simplefb_hdmi: framebuffer-hdmi {
- compatible = "amlogic,simple-framebuffer",
- "simple-framebuffer";
- amlogic,pipeline = "vpu-hdmi";
- clocks = <&clkc CLKID_HDMI_PCLK>,
- <&clkc CLKID_CLK81>,
- <&clkc CLKID_GCLK_VENCI_INT0>;
- power-domains = <&pwrc_vpu>;
- };
-};
diff --git a/Documentation/devicetree/bindings/display/bridge/ti,tfp410.txt b/Documentation/devicetree/bindings/display/bridge/ti,tfp410.txt
index 54d7e31525ec..5ff4f64ef8e8 100644
--- a/Documentation/devicetree/bindings/display/bridge/ti,tfp410.txt
+++ b/Documentation/devicetree/bindings/display/bridge/ti,tfp410.txt
@@ -6,15 +6,32 @@ Required properties:
Optional properties:
- powerdown-gpios: power-down gpio
-- reg: I2C address. If and only if present the device node
- should be placed into the i2c controller node where the
- tfp410 i2c is connected to.
+- reg: I2C address. If and only if present the device node should be placed
+ into the I2C controller node where the TFP410 I2C is connected to.
+- ti,deskew: data de-skew in 350ps increments, from -4 to +3, as configured
+ through th DK[3:1] pins. This property shall be present only if the TFP410
+ is not connected through I2C.
Required nodes:
-- Video port 0 for DPI input [1].
-- Video port 1 for DVI output [1].
-[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
+This device has two video ports. Their connections are modeled using the OF
+graph bindings specified in [1]. Each port node shall have a single endpoint.
+
+- Port 0 is the DPI input port. Its endpoint subnode shall contain a
+ pclk-sample and bus-width property and a remote-endpoint property as specified
+ in [1].
+ - If pclk-sample is not defined, pclk-sample = 0 should be assumed for
+ backward compatibility.
+ - If bus-width is not defined then bus-width = 24 should be assumed for
+ backward compatibility.
+ bus-width = 24: 24 data lines are connected and single-edge mode
+ bus-width = 12: 12 data lines are connected and dual-edge mode
+
+- Port 1 is the DVI output port. Its endpoint subnode shall contain a
+ remote-endpoint property is specified in [1].
+
+[1] Documentation/devicetree/bindings/media/video-interfaces.txt
+
Example
-------
@@ -22,6 +39,7 @@ Example
tfp410: encoder@0 {
compatible = "ti,tfp410";
powerdown-gpios = <&twl_gpio 2 GPIO_ACTIVE_LOW>;
+ ti,deskew = <4>;
ports {
#address-cells = <1>;
@@ -31,6 +49,8 @@ tfp410: encoder@0 {
reg = <0>;
tfp410_in: endpoint@0 {
+ pclk-sample = <1>;
+ bus-width = <24>;
remote-endpoint = <&dpi_out>;
};
};
diff --git a/Documentation/devicetree/bindings/display/msm/gmu.txt b/Documentation/devicetree/bindings/display/msm/gmu.txt
index 3439b38e60f2..90af5b0a56a9 100644
--- a/Documentation/devicetree/bindings/display/msm/gmu.txt
+++ b/Documentation/devicetree/bindings/display/msm/gmu.txt
@@ -24,7 +24,10 @@ Required properties:
* "cxo"
* "axi"
* "mnoc"
-- power-domains: should be <&clock_gpucc GPU_CX_GDSC>
+- power-domains: should be:
+ <&clock_gpucc GPU_CX_GDSC>
+ <&clock_gpucc GPU_GX_GDSC>
+- power-domain-names: Matching names for the power domains
- iommus: phandle to the adreno iommu
- operating-points-v2: phandle to the OPP operating points
@@ -51,7 +54,10 @@ Example:
<&gcc GCC_GPU_MEMNOC_GFX_CLK>;
clock-names = "gmu", "cxo", "axi", "memnoc";
- power-domains = <&gpucc GPU_CX_GDSC>;
+ power-domains = <&gpucc GPU_CX_GDSC>,
+ <&gpucc GPU_GX_GDSC>;
+ power-domain-names = "cx", "gx";
+
iommus = <&adreno_smmu 5>;
operating-points-v2 = <&gmu_opp_table>;
diff --git a/Documentation/devicetree/bindings/display/msm/gpu.txt b/Documentation/devicetree/bindings/display/msm/gpu.txt
index aad1aef682f7..2b8fd26c43b0 100644
--- a/Documentation/devicetree/bindings/display/msm/gpu.txt
+++ b/Documentation/devicetree/bindings/display/msm/gpu.txt
@@ -22,9 +22,14 @@ Required properties:
- qcom,adreno-630.2
- iommus: optional phandle to an adreno iommu instance
- operating-points-v2: optional phandle to the OPP operating points
+- interconnects: optional phandle to an interconnect provider. See
+ ../interconnect/interconnect.txt for details.
- qcom,gmu: For GMU attached devices a phandle to the GMU device that will
control the power for the GPU. Applicable targets:
- qcom,adreno-630.2
+- zap-shader: For a5xx and a6xx devices this node contains a memory-region that
+ points to reserved memory to store the zap shader that can be used to help
+ bring the GPU out of secure mode.
Example 3xx/4xx/a5xx:
@@ -70,6 +75,12 @@ Example a6xx (with GMU):
operating-points-v2 = <&gpu_opp_table>;
+ interconnects = <&rsc_hlos MASTER_GFX3D &rsc_hlos SLAVE_EBI1>;
+
qcom,gmu = <&gmu>;
+
+ zap-shader {
+ memory-region = <&zap_shader_region>;
+ };
};
};
diff --git a/Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.txt b/Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.txt
new file mode 100644
index 000000000000..82caa7b65ae8
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.txt
@@ -0,0 +1,20 @@
+Feiyang FY07024DI26A30-D 7" MIPI-DSI LCD Panel
+
+Required properties:
+- compatible: must be "feiyang,fy07024di26a30d"
+- reg: DSI virtual channel used by that screen
+- avdd-supply: analog regulator dc1 switch
+- dvdd-supply: 3v3 digital regulator
+- reset-gpios: a GPIO phandle for the reset pin
+
+Optional properties:
+- backlight: phandle for the backlight control.
+
+panel@0 {
+ compatible = "feiyang,fy07024di26a30d";
+ reg = <0>;
+ avdd-supply = <&reg_dc1sw>;
+ dvdd-supply = <&reg_dldo2>;
+ reset-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* LCD-RST: PD24 */
+ backlight = <&backlight>;
+};
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,p079zca.txt b/Documentation/devicetree/bindings/display/panel/innolux,p079zca.txt
index d0f55161579a..3ab8c7412cf6 100644
--- a/Documentation/devicetree/bindings/display/panel/innolux,p079zca.txt
+++ b/Documentation/devicetree/bindings/display/panel/innolux,p079zca.txt
@@ -12,7 +12,7 @@ Optional properties:
Example:
&mipi_dsi {
- panel {
+ panel@0 {
compatible = "innolux,p079zca";
reg = <0>;
power-supply = <...>;
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt b/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt
index 595d9dfeffd3..d1cab3a8f0fb 100644
--- a/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt
+++ b/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.txt
@@ -13,7 +13,7 @@ Optional properties:
Example:
&mipi_dsi {
- panel {
+ panel@0 {
compatible = "innolux,p079zca";
reg = <0>;
avdd-supply = <...>;
diff --git a/Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt b/Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt
index 164a5fa236da..cfefff688614 100644
--- a/Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt
+++ b/Documentation/devicetree/bindings/display/panel/kingdisplay,kd097d04.txt
@@ -12,7 +12,7 @@ Optional properties:
Example:
&mipi_dsi {
- panel {
+ panel@0 {
compatible = "kingdisplay,kd097d04";
reg = <0>;
power-supply = <...>;
diff --git a/Documentation/devicetree/bindings/display/panel/lg,acx467akm-7.txt b/Documentation/devicetree/bindings/display/panel/lg,acx467akm-7.txt
new file mode 100644
index 000000000000..fc1e1b325e49
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/lg,acx467akm-7.txt
@@ -0,0 +1,7 @@
+LG ACX467AKM-7 4.95" 1080×1920 LCD Panel
+
+Required properties:
+- compatible: must be "lg,acx467akm-7"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/osddisplays,osd070t1718-19ts.txt b/Documentation/devicetree/bindings/display/panel/osddisplays,osd070t1718-19ts.txt
new file mode 100644
index 000000000000..e57883ccdf2f
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/osddisplays,osd070t1718-19ts.txt
@@ -0,0 +1,12 @@
+OSD Displays OSD070T1718-19TS 7" WVGA TFT LCD panel
+
+Required properties:
+- compatible: shall be "osddisplays,osd070t1718-19ts"
+- power-supply: see simple-panel.txt
+
+Optional properties:
+- backlight: see simple-panel.txt
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory. No other simple-panel properties than
+the ones specified herein are valid.
diff --git a/Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.txt b/Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.txt
new file mode 100644
index 000000000000..1b5763200cf6
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.txt
@@ -0,0 +1,18 @@
+Rocktech jh057n00900 5.5" 720x1440 TFT LCD panel
+
+Required properties:
+- compatible: should be "rocktech,jh057n00900"
+- reg: DSI virtual channel of the peripheral
+- reset-gpios: panel reset gpio
+- backlight: phandle of the backlight device attached to the panel
+
+Example:
+
+ &mipi_dsi {
+ panel@0 {
+ compatible = "rocktech,jh057n00900";
+ reg = <0>;
+ backlight = <&backlight>;
+ reset-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/ronbo,rb070d30.yaml b/Documentation/devicetree/bindings/display/panel/ronbo,rb070d30.yaml
new file mode 100644
index 000000000000..0e7987f1cdb7
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/ronbo,rb070d30.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0+ OR X11)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/ronbo,rb070d30.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ronbo RB070D30 DSI Display Panel
+
+maintainers:
+ - Maxime Ripard <maxime.ripard@bootlin.com>
+
+properties:
+ compatible:
+ const: ronbo,rb070d30
+
+ reg:
+ description: MIPI-DSI virtual channel
+
+ power-gpios:
+ description: GPIO used for the power pin
+ maxItems: 1
+
+ reset-gpios:
+ description: GPIO used for the reset pin
+ maxItems: 1
+
+ shlr-gpios:
+ description: GPIO used for the shlr pin (horizontal flip)
+ maxItems: 1
+
+ updn-gpios:
+ description: GPIO used for the updn pin (vertical flip)
+ maxItems: 1
+
+ vcc-lcd-supply:
+ description: Power regulator
+
+ backlight:
+ description: Backlight used by the panel
+ $ref: "/schemas/types.yaml#/definitions/phandle"
+
+required:
+ - compatible
+ - power-gpios
+ - reg
+ - reset-gpios
+ - shlr-gpios
+ - updn-gpios
+ - vcc-lcd-supply
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/display/panel/tpo,td028ttec1.txt b/Documentation/devicetree/bindings/display/panel/tpo,td028ttec1.txt
index ed34253d9fb1..898e06ecf4ef 100644
--- a/Documentation/devicetree/bindings/display/panel/tpo,td028ttec1.txt
+++ b/Documentation/devicetree/bindings/display/panel/tpo,td028ttec1.txt
@@ -6,6 +6,7 @@ Required properties:
Optional properties:
- label: a symbolic name for the panel
+- backlight: phandle of the backlight device
Required nodes:
- Video port for DPI input
@@ -21,6 +22,7 @@ lcd-panel: td028ttec1@0 {
spi-cpha;
label = "lcd";
+ backlight = <&backlight>;
port {
lcd_in: endpoint {
remote-endpoint = <&dpi_out>;
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.txt b/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.txt
new file mode 100644
index 000000000000..d1ad31bca8d9
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.txt
@@ -0,0 +1,72 @@
+Rockchip specific extensions for rk3066 HDMI
+============================================
+
+Required properties:
+- compatible:
+ "rockchip,rk3066-hdmi";
+- reg:
+ Physical base address and length of the controller's registers.
+- clocks, clock-names:
+ Phandle to HDMI controller clock, name should be "hclk".
+- interrupts:
+ HDMI interrupt number.
+- power-domains:
+ Phandle to the RK3066_PD_VIO power domain.
+- rockchip,grf:
+ This soc uses GRF regs to switch the HDMI TX input between vop0 and vop1.
+- ports:
+ Contains one port node with two endpoints, numbered 0 and 1,
+ connected respectively to vop0 and vop1.
+ Contains one port node with one endpoint
+ connected to a hdmi-connector node.
+- pinctrl-0, pinctrl-name:
+ Switch the iomux for the HPD/I2C pins to HDMI function.
+
+Example:
+ hdmi: hdmi@10116000 {
+ compatible = "rockchip,rk3066-hdmi";
+ reg = <0x10116000 0x2000>;
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru HCLK_HDMI>;
+ clock-names = "hclk";
+ power-domains = <&power RK3066_PD_VIO>;
+ rockchip,grf = <&grf>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdmii2c_xfer>, <&hdmi_hpd>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ hdmi_in: port@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ hdmi_in_vop0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&vop0_out_hdmi>;
+ };
+ hdmi_in_vop1: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&vop1_out_hdmi>;
+ };
+ };
+ hdmi_out: port@1 {
+ reg = <1>;
+ hdmi_out_con: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+ };
+ };
+ };
+
+&pinctrl {
+ hdmi {
+ hdmi_hpd: hdmi-hpd {
+ rockchip,pins = <0 RK_PA0 1 &pcfg_pull_default>;
+ };
+ hdmii2c_xfer: hdmii2c-xfer {
+ rockchip,pins = <0 RK_PA1 1 &pcfg_pull_none>,
+ <0 RK_PA2 1 &pcfg_pull_none>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/simple-framebuffer-sunxi.txt b/Documentation/devicetree/bindings/display/simple-framebuffer-sunxi.txt
deleted file mode 100644
index d693b8dc9a62..000000000000
--- a/Documentation/devicetree/bindings/display/simple-framebuffer-sunxi.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-Sunxi specific Simple Framebuffer bindings
-
-This binding documents sunxi specific extensions to the simple-framebuffer
-bindings. The sunxi simplefb u-boot code relies on the devicetree containing
-pre-populated simplefb nodes.
-
-These extensions are intended so that u-boot can select the right node based
-on which pipeline is being used. As such they are solely intended for
-firmware / bootloader use, and the OS should ignore them.
-
-Required properties:
-- compatible: "allwinner,simple-framebuffer"
-- allwinner,pipeline, one of:
- "de_be0-lcd0"
- "de_be1-lcd1"
- "de_be0-lcd0-hdmi"
- "de_be1-lcd1-hdmi"
- "mixer0-lcd0"
- "mixer0-lcd0-hdmi"
- "mixer1-lcd1-hdmi"
- "mixer1-lcd1-tve"
-
-Example:
-
-chosen {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- framebuffer@0 {
- compatible = "allwinner,simple-framebuffer", "simple-framebuffer";
- allwinner,pipeline = "de_be0-lcd0-hdmi";
- clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
- <&ahb_gates 44>;
- };
-};
diff --git a/Documentation/devicetree/bindings/display/simple-framebuffer.txt b/Documentation/devicetree/bindings/display/simple-framebuffer.txt
deleted file mode 100644
index 5a9ce511be88..000000000000
--- a/Documentation/devicetree/bindings/display/simple-framebuffer.txt
+++ /dev/null
@@ -1,91 +0,0 @@
-Simple Framebuffer
-
-A simple frame-buffer describes a frame-buffer setup by firmware or
-the bootloader, with the assumption that the display hardware has already
-been set up to scan out from the memory pointed to by the reg property.
-
-Since simplefb nodes represent runtime information they must be sub-nodes of
-the chosen node (*). Simplefb nodes must be named "framebuffer@<address>".
-
-If the devicetree contains nodes for the display hardware used by a simplefb,
-then the simplefb node must contain a property called "display", which
-contains a phandle pointing to the primary display hw node, so that the OS
-knows which simplefb to disable when handing over control to a driver for the
-real hardware. The bindings for the hw nodes must specify which node is
-considered the primary node.
-
-It is advised to add display# aliases to help the OS determine how to number
-things. If display# aliases are used, then if the simplefb node contains a
-"display" property then the /aliases/display# path must point to the display
-hw node the "display" property points to, otherwise it must point directly
-to the simplefb node.
-
-If a simplefb node represents the preferred console for user interaction,
-then the chosen node's stdout-path property should point to it, or to the
-primary display hw node, as with display# aliases. If display aliases are
-used then it should be set to the alias instead.
-
-It is advised that devicetree files contain pre-filled, disabled framebuffer
-nodes, so that the firmware only needs to update the mode information and
-enable them. This way if e.g. later on support for more display clocks get
-added, the simplefb nodes will already contain this info and the firmware
-does not need to be updated.
-
-If pre-filled framebuffer nodes are used, the firmware may need extra
-information to find the right node. In that case an extra platform specific
-compatible and platform specific properties should be used and documented,
-see e.g. simple-framebuffer-sunxi.txt .
-
-Required properties:
-- compatible: "simple-framebuffer"
-- reg: Should contain the location and size of the framebuffer memory.
-- width: The width of the framebuffer in pixels.
-- height: The height of the framebuffer in pixels.
-- stride: The number of bytes in each line of the framebuffer.
-- format: The format of the framebuffer surface. Valid values are:
- - r5g6b5 (16-bit pixels, d[15:11]=r, d[10:5]=g, d[4:0]=b).
- - a8b8g8r8 (32-bit pixels, d[31:24]=a, d[23:16]=b, d[15:8]=g, d[7:0]=r).
-
-Optional properties:
-- clocks : List of clocks used by the framebuffer.
-- *-supply : Any number of regulators used by the framebuffer. These should
- be named according to the names in the device's design.
-
- The above resources are expected to already be configured correctly.
- The OS must ensure they are not modified or disabled while the simple
- framebuffer remains active.
-
-- display : phandle pointing to the primary display hardware node
-
-Example:
-
-aliases {
- display0 = &lcdc0;
-}
-
-chosen {
- framebuffer0: framebuffer@1d385000 {
- compatible = "simple-framebuffer";
- reg = <0x1d385000 (1600 * 1200 * 2)>;
- width = <1600>;
- height = <1200>;
- stride = <(1600 * 2)>;
- format = "r5g6b5";
- clocks = <&ahb_gates 36>, <&ahb_gates 43>, <&ahb_gates 44>;
- lcd-supply = <&reg_dc1sw>;
- display = <&lcdc0>;
- };
- stdout-path = "display0";
-};
-
-soc@1c00000 {
- lcdc0: lcdc@1c0c000 {
- compatible = "allwinner,sun4i-a10-lcdc";
- ...
- };
-};
-
-
-*) Older devicetree files may have a compatible = "simple-framebuffer" node
-in a different place, operating systems must first enumerate any compatible
-nodes found under chosen and then check for other compatible nodes.
diff --git a/Documentation/devicetree/bindings/display/simple-framebuffer.yaml b/Documentation/devicetree/bindings/display/simple-framebuffer.yaml
new file mode 100644
index 000000000000..b052d76cf8b6
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/simple-framebuffer.yaml
@@ -0,0 +1,160 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/simple-framebuffer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Simple Framebuffer Device Tree Bindings
+
+maintainers:
+ - Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
+ - Hans de Goede <hdegoede@redhat.com>
+
+description: |+
+ A simple frame-buffer describes a frame-buffer setup by firmware or
+ the bootloader, with the assumption that the display hardware has
+ already been set up to scan out from the memory pointed to by the
+ reg property.
+
+ Since simplefb nodes represent runtime information they must be
+ sub-nodes of the chosen node (*). Simplefb nodes must be named
+ framebuffer@<address>.
+
+ If the devicetree contains nodes for the display hardware used by a
+ simplefb, then the simplefb node must contain a property called
+ display, which contains a phandle pointing to the primary display
+ hw node, so that the OS knows which simplefb to disable when handing
+ over control to a driver for the real hardware. The bindings for the
+ hw nodes must specify which node is considered the primary node.
+
+ It is advised to add display# aliases to help the OS determine how
+ to number things. If display# aliases are used, then if the simplefb
+ node contains a display property then the /aliases/display# path
+ must point to the display hw node the display property points to,
+ otherwise it must point directly to the simplefb node.
+
+ If a simplefb node represents the preferred console for user
+ interaction, then the chosen node stdout-path property should point
+ to it, or to the primary display hw node, as with display#
+ aliases. If display aliases are used then it should be set to the
+ alias instead.
+
+ It is advised that devicetree files contain pre-filled, disabled
+ framebuffer nodes, so that the firmware only needs to update the
+ mode information and enable them. This way if e.g. later on support
+ for more display clocks get added, the simplefb nodes will already
+ contain this info and the firmware does not need to be updated.
+
+ If pre-filled framebuffer nodes are used, the firmware may need
+ extra information to find the right node. In that case an extra
+ platform specific compatible and platform specific properties should
+ be used and documented.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - allwinner,simple-framebuffer
+ - amlogic,simple-framebuffer
+ - const: simple-framebuffer
+
+ reg:
+ description: Location and size of the framebuffer memory
+
+ clocks:
+ description: List of clocks used by the framebuffer.
+
+ power-domains:
+ description: List of power domains used by the framebuffer.
+
+ width:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Width of the framebuffer in pixels
+
+ height:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Height of the framebuffer in pixels
+
+ stride:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Number of bytes of a line in the framebuffer
+
+ format:
+ description: >
+ Format of the framebuffer:
+ * `a8b8g8r8` - 32-bit pixels, d[31:24]=a, d[23:16]=b, d[15:8]=g, d[7:0]=r
+ * `r5g6b5` - 16-bit pixels, d[15:11]=r, d[10:5]=g, d[4:0]=b
+ enum:
+ - a8b8g8r8
+ - r5g6b5
+
+ display:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: Primary display hardware node
+
+ allwinner,pipeline:
+ description: Pipeline used by the framebuffer on Allwinner SoCs
+ enum:
+ - de_be0-lcd0
+ - de_be0-lcd0-hdmi
+ - de_be0-lcd0-tve0
+ - de_be1-lcd0
+ - de_be1-lcd1-hdmi
+ - de_fe0-de_be0-lcd0
+ - de_fe0-de_be0-lcd0-hdmi
+ - de_fe0-de_be0-lcd0-tve0
+ - mixer0-lcd0
+ - mixer0-lcd0-hdmi
+ - mixer1-lcd1-hdmi
+ - mixer1-lcd1-tve
+
+ amlogic,pipeline:
+ description: Pipeline used by the framebuffer on Amlogic SoCs
+ enum:
+ - vpu-cvbs
+ - vpu-hdmi
+
+patternProperties:
+ "^[a-zA-Z0-9-]+-supply$":
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Regulators used by the framebuffer. These should be named
+ according to the names in the device design.
+
+required:
+ # The binding requires also reg, width, height, stride and format,
+ # but usually they will be filled by the bootloader.
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ aliases {
+ display0 = &lcdc0;
+ };
+
+ chosen {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ stdout-path = "display0";
+ framebuffer0: framebuffer@1d385000 {
+ compatible = "simple-framebuffer";
+ reg = <0x1d385000 3840000>;
+ width = <1600>;
+ height = <1200>;
+ stride = <3200>;
+ format = "r5g6b5";
+ clocks = <&ahb_gates 36>, <&ahb_gates 43>, <&ahb_gates 44>;
+ lcd-supply = <&reg_dc1sw>;
+ display = <&lcdc0>;
+ };
+ };
+
+ soc@1c00000 {
+ lcdc0: lcdc@1c0c000 {
+ compatible = "allwinner,sun4i-a10-lcdc";
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/ste,mcde.txt b/Documentation/devicetree/bindings/display/ste,mcde.txt
new file mode 100644
index 000000000000..4c33c692bd5f
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/ste,mcde.txt
@@ -0,0 +1,104 @@
+ST-Ericsson Multi Channel Display Engine MCDE
+
+The ST-Ericsson MCDE is a display controller with support for compositing
+and displaying several channels memory resident graphics data on DSI or
+LCD displays or bridges. It is used in the ST-Ericsson U8500 platform.
+
+Required properties:
+
+- compatible: must be:
+ "ste,mcde"
+- reg: register base for the main MCDE control registers, should be
+ 0x1000 in size
+- interrupts: the interrupt line for the MCDE
+- epod-supply: a phandle to the EPOD regulator
+- vana-supply: a phandle to the analog voltage regulator
+- clocks: an array of the MCDE clocks in this strict order:
+ MCDECLK (main MCDE clock), LCDCLK (LCD clock), PLLDSI
+ (HDMI clock), DSI0ESCLK (DSI0 energy save clock),
+ DSI1ESCLK (DSI1 energy save clock), DSI2ESCLK (DSI2 energy
+ save clock)
+- clock-names: must be the following array:
+ "mcde", "lcd", "hdmi"
+ to match the required clock inputs above.
+- #address-cells: should be <1> (for the DSI hosts that will be children)
+- #size-cells: should be <1> (for the DSI hosts that will be children)
+- ranges: this should always be stated
+
+Required subnodes:
+
+The devicetree must specify subnodes for the DSI host adapters.
+These must have the following characteristics:
+
+- compatible: must be:
+ "ste,mcde-dsi"
+- reg: must specify the register range for the DSI host
+- vana-supply: phandle to the VANA voltage regulator
+- clocks: phandles to the high speed and low power (energy save) clocks
+ the high speed clock is not present on the third (dsi2) block, so it
+ should only have the "lp" clock
+- clock-names: "hs" for the high speed clock and "lp" for the low power
+ (energy save) clock
+- #address-cells: should be <1>
+- #size-cells: should be <0>
+
+Display panels and bridges will appear as children on the DSI hosts, and
+the displays are connected to the DSI hosts using the common binding
+for video transmitter interfaces; see
+Documentation/devicetree/bindings/media/video-interfaces.txt
+
+If a DSI host is unused (not connected) it will have no children defined.
+
+Example:
+
+mcde@a0350000 {
+ compatible = "ste,mcde";
+ reg = <0xa0350000 0x1000>;
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+ epod-supply = <&db8500_b2r2_mcde_reg>;
+ vana-supply = <&ab8500_ldo_ana_reg>;
+ clocks = <&prcmu_clk PRCMU_MCDECLK>, /* Main MCDE clock */
+ <&prcmu_clk PRCMU_LCDCLK>, /* LCD clock */
+ <&prcmu_clk PRCMU_PLLDSI>; /* HDMI clock */
+ clock-names = "mcde", "lcd", "hdmi";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ dsi0: dsi@a0351000 {
+ compatible = "ste,mcde-dsi";
+ reg = <0xa0351000 0x1000>;
+ vana-supply = <&ab8500_ldo_ana_reg>;
+ clocks = <&prcmu_clk PRCMU_DSI0CLK>, <&prcmu_clk PRCMU_DSI0ESCCLK>;
+ clock-names = "hs", "lp";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel {
+ compatible = "samsung,s6d16d0";
+ reg = <0>;
+ vdd1-supply = <&ab8500_ldo_aux1_reg>;
+ reset-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+ };
+
+ };
+ dsi1: dsi@a0352000 {
+ compatible = "ste,mcde-dsi";
+ reg = <0xa0352000 0x1000>;
+ vana-supply = <&ab8500_ldo_ana_reg>;
+ clocks = <&prcmu_clk PRCMU_DSI1CLK>, <&prcmu_clk PRCMU_DSI1ESCCLK>;
+ clock-names = "hs", "lp";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ dsi2: dsi@a0353000 {
+ compatible = "ste,mcde-dsi";
+ reg = <0xa0353000 0x1000>;
+ vana-supply = <&ab8500_ldo_ana_reg>;
+ /* This DSI port only has the Low Power / Energy Save clock */
+ clocks = <&prcmu_clk PRCMU_DSI2ESCCLK>;
+ clock-names = "lp";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/dma/adi,axi-dmac.txt b/Documentation/devicetree/bindings/dma/adi,axi-dmac.txt
index 47cb1d14b690..b38ee732efa9 100644
--- a/Documentation/devicetree/bindings/dma/adi,axi-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/adi,axi-dmac.txt
@@ -18,7 +18,6 @@ Required properties for adi,channels sub-node:
Required channel sub-node properties:
- reg: Which channel this node refers to.
- - adi,length-width: Width of the DMA transfer length register.
- adi,source-bus-width,
adi,destination-bus-width: Width of the source or destination bus in bits.
- adi,source-bus-type,
@@ -28,7 +27,8 @@ Required channel sub-node properties:
1 (AXI_DMAC_TYPE_AXI_STREAM): Streaming AXI interface
2 (AXI_DMAC_TYPE_AXI_FIFO): FIFO interface
-Optional channel properties:
+Deprecated optional channel properties:
+ - adi,length-width: Width of the DMA transfer length register.
- adi,cyclic: Must be set if the channel supports hardware cyclic DMA
transfers.
- adi,2d: Must be set if the channel supports hardware 2D DMA transfers.
diff --git a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
index 3c9a57a8443b..9d8bbac27d8b 100644
--- a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
+++ b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
@@ -9,6 +9,7 @@ Required properties:
"fsl,imx53-sdma"
"fsl,imx6q-sdma"
"fsl,imx7d-sdma"
+ "fsl,imx8mq-sdma"
The -to variants should be preferred since they allow to determine the
correct ROM script addresses needed for the driver to work without additional
firmware.
diff --git a/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt b/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt
index 2f35b047f772..245d3063715c 100644
--- a/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt
+++ b/Documentation/devicetree/bindings/dma/nvidia,tegra210-adma.txt
@@ -4,7 +4,9 @@ The Tegra Audio DMA controller that is used for transferring data
between system memory and the Audio Processing Engine (APE).
Required properties:
-- compatible: Must be "nvidia,tegra210-adma".
+- compatible: Should contain one of the following:
+ - "nvidia,tegra210-adma": for Tegra210
+ - "nvidia,tegra186-adma": for Tegra186 and Tegra194
- reg: Should contain DMA registers location and length. This should be
a single entry that includes all of the per-channel registers in one
contiguous bank.
diff --git a/Documentation/devicetree/bindings/edac/socfpga-eccmgr.txt b/Documentation/devicetree/bindings/edac/socfpga-eccmgr.txt
index 5626560a6cfd..8f52206cfd2a 100644
--- a/Documentation/devicetree/bindings/edac/socfpga-eccmgr.txt
+++ b/Documentation/devicetree/bindings/edac/socfpga-eccmgr.txt
@@ -232,37 +232,152 @@ Example:
};
};
-Stratix10 SoCFPGA ECC Manager
+Stratix10 SoCFPGA ECC Manager (ARM64)
The Stratix10 SoC ECC Manager handles the IRQs for each peripheral
-in a shared register similar to the Arria10. However, ECC requires
-access to registers that can only be read from Secure Monitor with
-SMC calls. Therefore the device tree is slightly different.
+in a shared register similar to the Arria10. However, Stratix10 ECC
+requires access to registers that can only be read from Secure Monitor
+with SMC calls. Therefore the device tree is slightly different. Note
+that only 1 interrupt is sent in Stratix10 because the double bit errors
+are treated as SErrors in ARM64 instead of IRQs in ARM32.
Required Properties:
- compatible : Should be "altr,socfpga-s10-ecc-manager"
-- interrupts : Should be single bit error interrupt, then double bit error
- interrupt.
+- altr,sysgr-syscon : phandle to Stratix10 System Manager Block
+ containing the ECC manager registers.
+- interrupts : Should be single bit error interrupt.
- interrupt-controller : boolean indicator that ECC Manager is an interrupt controller
- #interrupt-cells : must be set to 2.
+- #address-cells: must be 1
+- #size-cells: must be 1
+- ranges : standard definition, should translate from local addresses
Subcomponents:
SDRAM ECC
Required Properties:
- compatible : Should be "altr,sdram-edac-s10"
-- interrupts : Should be single bit error interrupt, then double bit error
- interrupt, in this order.
+- interrupts : Should be single bit error interrupt.
+
+On-Chip RAM ECC
+Required Properties:
+- compatible : Should be "altr,socfpga-s10-ocram-ecc"
+- reg : Address and size for ECC block registers.
+- altr,ecc-parent : phandle to parent OCRAM node.
+- interrupts : Should be single bit error interrupt.
+
+Ethernet FIFO ECC
+Required Properties:
+- compatible : Should be "altr,socfpga-s10-eth-mac-ecc"
+- reg : Address and size for ECC block registers.
+- altr,ecc-parent : phandle to parent Ethernet node.
+- interrupts : Should be single bit error interrupt.
+
+NAND FIFO ECC
+Required Properties:
+- compatible : Should be "altr,socfpga-s10-nand-ecc"
+- reg : Address and size for ECC block registers.
+- altr,ecc-parent : phandle to parent NAND node.
+- interrupts : Should be single bit error interrupt.
+
+DMA FIFO ECC
+Required Properties:
+- compatible : Should be "altr,socfpga-s10-dma-ecc"
+- reg : Address and size for ECC block registers.
+- altr,ecc-parent : phandle to parent DMA node.
+- interrupts : Should be single bit error interrupt.
+
+USB FIFO ECC
+Required Properties:
+- compatible : Should be "altr,socfpga-s10-usb-ecc"
+- reg : Address and size for ECC block registers.
+- altr,ecc-parent : phandle to parent USB node.
+- interrupts : Should be single bit error interrupt.
+
+SDMMC FIFO ECC
+Required Properties:
+- compatible : Should be "altr,socfpga-s10-sdmmc-ecc"
+- reg : Address and size for ECC block registers.
+- altr,ecc-parent : phandle to parent SD/MMC node.
+- interrupts : Should be single bit error interrupt for port A
+ and then single bit error interrupt for port B.
Example:
eccmgr {
compatible = "altr,socfpga-s10-ecc-manager";
- interrupts = <0 15 4>, <0 95 4>;
+ altr,sysmgr-syscon = <&sysmgr>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupts = <0 15 4>;
interrupt-controller;
#interrupt-cells = <2>;
+ ranges;
sdramedac {
compatible = "altr,sdram-edac-s10";
- interrupts = <16 4>, <48 4>;
+ interrupts = <16 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ ocram-ecc@ff8cc000 {
+ compatible = "altr,socfpga-s10-ocram-ecc";
+ reg = <ff8cc000 0x100>;
+ altr,ecc-parent = <&ocram>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ emac0-rx-ecc@ff8c0000 {
+ compatible = "altr,socfpga-s10-eth-mac-ecc";
+ reg = <0xff8c0000 0x100>;
+ altr,ecc-parent = <&gmac0>;
+ interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ emac0-tx-ecc@ff8c0400 {
+ compatible = "altr,socfpga-s10-eth-mac-ecc";
+ reg = <0xff8c0400 0x100>;
+ altr,ecc-parent = <&gmac0>;
+ interrupts = <5 IRQ_TYPE_LEVEL_HIGH>'
+ };
+
+ nand-buf-ecc@ff8c8000 {
+ compatible = "altr,socfpga-s10-nand-ecc";
+ reg = <0xff8c8000 0x100>;
+ altr,ecc-parent = <&nand>;
+ interrupts = <11 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ nand-rd-ecc@ff8c8400 {
+ compatible = "altr,socfpga-s10-nand-ecc";
+ reg = <0xff8c8400 0x100>;
+ altr,ecc-parent = <&nand>;
+ interrupts = <13 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ nand-wr-ecc@ff8c8800 {
+ compatible = "altr,socfpga-s10-nand-ecc";
+ reg = <0xff8c8800 0x100>;
+ altr,ecc-parent = <&nand>;
+ interrupts = <12 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ dma-ecc@ff8c9000 {
+ compatible = "altr,socfpga-s10-dma-ecc";
+ reg = <0xff8c9000 0x100>;
+ altr,ecc-parent = <&pdma>;
+ interrupts = <10 IRQ_TYPE_LEVEL_HIGH>;
+
+ usb0-ecc@ff8c4000 {
+ compatible = "altr,socfpga-s10-usb-ecc";
+ reg = <0xff8c4000 0x100>;
+ altr,ecc-parent = <&usb0>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ sdmmc-ecc@ff8c8c00 {
+ compatible = "altr,socfpga-s10-sdmmc-ecc";
+ reg = <0xff8c8c00 0x100>;
+ altr,ecc-parent = <&mmc>;
+ interrupts = <14 IRQ_TYPE_LEVEL_HIGH>,
+ <15 IRQ_TYPE_LEVEL_HIGH>;
};
};
diff --git a/Documentation/devicetree/bindings/eeprom/at24.txt b/Documentation/devicetree/bindings/eeprom/at24.txt
index 0e456bbc1213..22aead844d0f 100644
--- a/Documentation/devicetree/bindings/eeprom/at24.txt
+++ b/Documentation/devicetree/bindings/eeprom/at24.txt
@@ -50,6 +50,7 @@ Required properties:
"nxp,se97b" - the fallback is "atmel,24c02",
"renesas,r1ex24002" - the fallback is "atmel,24c02"
+ "renesas,r1ex24016" - the fallback is "atmel,24c16"
"renesas,r1ex24128" - the fallback is "atmel,24c128"
"rohm,br24t01" - the fallback is "atmel,24c01"
diff --git a/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt b/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt
new file mode 100644
index 000000000000..b1f9474f36d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/fieldbus/arcx,anybus-controller.txt
@@ -0,0 +1,71 @@
+* Arcx Anybus-S controller
+
+This chip communicates with the SoC over a parallel bus. It is
+expected that its Device Tree node is specified as the child of a node
+corresponding to the parallel bus used for communication.
+
+Required properties:
+--------------------
+
+ - compatible : The following chip-specific string:
+ "arcx,anybus-controller"
+
+ - reg : three areas:
+ index 0: bus memory area where the cpld registers are located.
+ index 1: bus memory area of the first host's dual-port ram.
+ index 2: bus memory area of the second host's dual-port ram.
+
+ - reset-gpios : the GPIO pin connected to the reset line of the controller.
+
+ - interrupts : two interrupts:
+ index 0: interrupt connected to the first host
+ index 1: interrupt connected to the second host
+ Generic interrupt client node bindings are described in
+ interrupt-controller/interrupts.txt
+
+Optional: use of subnodes
+-------------------------
+
+The card connected to a host may need additional properties. These can be
+specified in subnodes to the controller node.
+
+The subnodes are identified by the standard 'reg' property. Which information
+exactly can be specified depends on the bindings for the function driver
+for the subnode.
+
+Required controller node properties when using subnodes:
+- #address-cells: should be one.
+- #size-cells: should be zero.
+
+Required subnode properties:
+- reg: Must contain the host index of the card this subnode describes:
+ <0> for the first host on the controller
+ <1> for the second host on the controller
+ Note that only a single card can be plugged into a host, so the host
+ index uniquely describes the card location.
+
+Example of usage:
+-----------------
+
+This example places the bridge on top of the i.MX WEIM parallel bus, see:
+Documentation/devicetree/bindings/bus/imx-weim.txt
+
+&weim {
+ controller@0,0 {
+ compatible = "arcx,anybus-controller";
+ reg = <0 0 0x100>, <0 0x400000 0x800>, <1 0x400000 0x800>;
+ reset-gpios = <&gpio5 2 GPIO_ACTIVE_HIGH>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <1 IRQ_TYPE_LEVEL_LOW>, <5 IRQ_TYPE_LEVEL_LOW>;
+ /* fsl,weim-cs-timing is a i.MX WEIM bus specific property */
+ fsl,weim-cs-timing = <0x024400b1 0x00001010 0x20081100
+ 0x00000000 0xa0000240 0x00000000>;
+ /* optional subnode for a card plugged into the first host */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ card@0 {
+ reg = <0>;
+ /* card specific properties go here */
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml b/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml
new file mode 100644
index 000000000000..8cb136c376fb
--- /dev/null
+++ b/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 Linaro Ltd.
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/firmware/intel-ixp4xx-network-processing-engine.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Intel IXP4xx Network Processing Engine
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+ On the IXP4xx SoCs, the Network Processing Engine (NPE) is a small
+ processor that can load a firmware to perform offloading of networking
+ and crypto tasks. It also manages the MDIO bus to the ethernet PHYs
+ on the IXP4xx platform. All IXP4xx platforms have three NPEs at
+ consecutive memory locations. They are all included in the same
+ device node since they are not independent of each other.
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - const: intel,ixp4xx-network-processing-engine
+
+ reg:
+ minItems: 3
+ maxItems: 3
+ items:
+ - description: NPE0 register range
+ - description: NPE1 register range
+ - description: NPE2 register range
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ npe@c8006000 {
+ compatible = "intel,ixp4xx-network-processing-engine";
+ reg = <0xc8006000 0x1000>, <0xc8007000 0x1000>, <0xc8008000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/firmware/xilinx/xlnx,zynqmp-firmware.txt b/Documentation/devicetree/bindings/firmware/xilinx/xlnx,zynqmp-firmware.txt
index 614bac55df86..a4fe136be2ba 100644
--- a/Documentation/devicetree/bindings/firmware/xilinx/xlnx,zynqmp-firmware.txt
+++ b/Documentation/devicetree/bindings/firmware/xilinx/xlnx,zynqmp-firmware.txt
@@ -17,53 +17,6 @@ Required properties:
- "smc" : SMC #0, following the SMCCC
- "hvc" : HVC #0, following the SMCCC
---------------------------------------------------------------------------
-Device Tree Clock bindings for the Zynq Ultrascale+ MPSoC controlled using
-Zynq MPSoC firmware interface
---------------------------------------------------------------------------
-The clock controller is a h/w block of Zynq Ultrascale+ MPSoC clock
-tree. It reads required input clock frequencies from the devicetree and acts
-as clock provider for all clock consumers of PS clocks.
-
-See clock_bindings.txt for more information on the generic clock bindings.
-
-Required properties:
- - #clock-cells: Must be 1
- - compatible: Must contain: "xlnx,zynqmp-clk"
- - clocks: List of clock specifiers which are external input
- clocks to the given clock controller. Please refer
- the next section to find the input clocks for a
- given controller.
- - clock-names: List of clock names which are exteral input clocks
- to the given clock controller. Please refer to the
- clock bindings for more details.
-
-Input clocks for zynqmp Ultrascale+ clock controller:
-
-The Zynq UltraScale+ MPSoC has one primary and four alternative reference clock
-inputs. These required clock inputs are:
- - pss_ref_clk (PS reference clock)
- - video_clk (reference clock for video system )
- - pss_alt_ref_clk (alternative PS reference clock)
- - aux_ref_clk
- - gt_crx_ref_clk (transceiver reference clock)
-
-The following strings are optional parameters to the 'clock-names' property in
-order to provide an optional (E)MIO clock source:
- - swdt0_ext_clk
- - swdt1_ext_clk
- - gem0_emio_clk
- - gem1_emio_clk
- - gem2_emio_clk
- - gem3_emio_clk
- - mio_clk_XX # with XX = 00..77
- - mio_clk_50_or_51 #for the mux clock to gem tsu from 50 or 51
-
-
-Output clocks are registered based on clock information received
-from firmware. Output clocks indexes are mentioned in
-include/dt-bindings/clock/xlnx,zynqmp-clk.h.
-
-------
Example
-------
@@ -72,11 +25,6 @@ firmware {
zynqmp_firmware: zynqmp-firmware {
compatible = "xlnx,zynqmp-firmware";
method = "smc";
- zynqmp_clk: clock-controller {
- #clock-cells = <1>;
- compatible = "xlnx,zynqmp-clk";
- clocks = <&pss_ref_clk>, <&video_clk>, <&pss_alt_ref_clk>, <&aux_ref_clk>, <&gt_crx_ref_clk>;
- clock-names = "pss_ref_clk", "video_clk", "pss_alt_ref_clk","aux_ref_clk", "gt_crx_ref_clk";
- };
+ ...
};
};
diff --git a/Documentation/devicetree/bindings/fpga/xlnx,zynqmp-pcap-fpga.txt b/Documentation/devicetree/bindings/fpga/xlnx,zynqmp-pcap-fpga.txt
new file mode 100644
index 000000000000..3052bf619dd5
--- /dev/null
+++ b/Documentation/devicetree/bindings/fpga/xlnx,zynqmp-pcap-fpga.txt
@@ -0,0 +1,25 @@
+Devicetree bindings for Zynq Ultrascale MPSoC FPGA Manager.
+The ZynqMP SoC uses the PCAP (Processor configuration Port) to configure the
+Programmable Logic (PL). The configuration uses the firmware interface.
+
+Required properties:
+- compatible: should contain "xlnx,zynqmp-pcap-fpga"
+
+Example for full FPGA configuration:
+
+ fpga-region0 {
+ compatible = "fpga-region";
+ fpga-mgr = <&zynqmp_pcap>;
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ };
+
+ firmware {
+ zynqmp_firmware: zynqmp-firmware {
+ compatible = "xlnx,zynqmp-firmware";
+ method = "smc";
+ zynqmp_pcap: pcap {
+ compatible = "xlnx,zynqmp-pcap-fpga";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/gnss/u-blox.txt b/Documentation/devicetree/bindings/gnss/u-blox.txt
index e475659cb85f..7cdefd058fe0 100644
--- a/Documentation/devicetree/bindings/gnss/u-blox.txt
+++ b/Documentation/devicetree/bindings/gnss/u-blox.txt
@@ -9,6 +9,7 @@ Required properties:
- compatible : Must be one of
+ "u-blox,neo-6m"
"u-blox,neo-8"
"u-blox,neo-m8"
diff --git a/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt b/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
index fb144e2b6522..dab537c20def 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt
@@ -2,6 +2,7 @@
Required properties:
- compatible: Has to contain one of the following:
+ nxp,pca6416
nxp,pca9505
nxp,pca9534
nxp,pca9535
@@ -30,6 +31,7 @@ Required properties:
ti,tca6424
ti,tca9539
ti,tca9554
+ onnn,cat9554
onnn,pca9654
exar,xra1202
- gpio-controller: if used as gpio expander.
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.txt b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.txt
new file mode 100644
index 000000000000..b8be9dbc68b4
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.txt
@@ -0,0 +1,92 @@
+ARM Mali Bifrost GPU
+====================
+
+Required properties:
+
+- compatible :
+ * Since Mali Bifrost GPU model/revision is fully discoverable by reading
+ some determined registers, must contain the following:
+ + "arm,mali-bifrost"
+ * which must be preceded by one of the following vendor specifics:
+ + "amlogic,meson-g12a-mali"
+
+- reg : Physical base address of the device and length of the register area.
+
+- interrupts : Contains the three IRQ lines required by Mali Bifrost devices,
+ in the following defined order.
+
+- interrupt-names : Contains the names of IRQ resources in this exact defined
+ order: "job", "mmu", "gpu".
+
+Optional properties:
+
+- clocks : Phandle to clock for the Mali Bifrost device.
+
+- mali-supply : Phandle to regulator for the Mali device. Refer to
+ Documentation/devicetree/bindings/regulator/regulator.txt for details.
+
+- operating-points-v2 : Refer to Documentation/devicetree/bindings/opp/opp.txt
+ for details.
+
+- resets : Phandle of the GPU reset line.
+
+Vendor-specific bindings
+------------------------
+
+The Mali GPU is integrated very differently from one SoC to
+another. In order to accommodate those differences, you have the option
+to specify one more vendor-specific compatible, among:
+
+- "amlogic,meson-g12a-mali"
+ Required properties:
+ - resets : Should contain phandles of :
+ + GPU reset line
+ + GPU APB glue reset line
+
+Example for a Mali-G31:
+
+gpu@ffa30000 {
+ compatible = "amlogic,meson-g12a-mali", "arm,mali-bifrost";
+ reg = <0xffe40000 0x10000>;
+ interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "job", "mmu", "gpu";
+ clocks = <&clk CLKID_MALI>;
+ mali-supply = <&vdd_gpu>;
+ operating-points-v2 = <&gpu_opp_table>;
+ resets = <&reset RESET_DVALIN_CAPB3>, <&reset RESET_DVALIN>;
+};
+
+gpu_opp_table: opp_table0 {
+ compatible = "operating-points-v2";
+
+ opp@533000000 {
+ opp-hz = /bits/ 64 <533000000>;
+ opp-microvolt = <1250000>;
+ };
+ opp@450000000 {
+ opp-hz = /bits/ 64 <450000000>;
+ opp-microvolt = <1150000>;
+ };
+ opp@400000000 {
+ opp-hz = /bits/ 64 <400000000>;
+ opp-microvolt = <1125000>;
+ };
+ opp@350000000 {
+ opp-hz = /bits/ 64 <350000000>;
+ opp-microvolt = <1075000>;
+ };
+ opp@266000000 {
+ opp-hz = /bits/ 64 <266000000>;
+ opp-microvolt = <1025000>;
+ };
+ opp@160000000 {
+ opp-hz = /bits/ 64 <160000000>;
+ opp-microvolt = <925000>;
+ };
+ opp@100000000 {
+ opp-hz = /bits/ 64 <100000000>;
+ opp-microvolt = <912500>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt
index 18a2cde2e5f3..1b1a74129141 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.txt
@@ -37,6 +37,20 @@ Optional properties:
- operating-points-v2 : Refer to Documentation/devicetree/bindings/opp/opp.txt
for details.
+- resets : Phandle of the GPU reset line.
+
+Vendor-specific bindings
+------------------------
+
+The Mali GPU is integrated very differently from one SoC to
+another. In order to accomodate those differences, you have the option
+to specify one more vendor-specific compatible, among:
+
+- "amlogic,meson-gxm-mali"
+ Required properties:
+ - resets : Should contain phandles of :
+ + GPU reset line
+ + GPU APB glue reset line
Example for a Mali-T760:
diff --git a/Documentation/devicetree/bindings/gpu/aspeed-gfx.txt b/Documentation/devicetree/bindings/gpu/aspeed-gfx.txt
new file mode 100644
index 000000000000..958bdf962339
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/aspeed-gfx.txt
@@ -0,0 +1,41 @@
+Device tree configuration for the GFX display device on the ASPEED SoCs
+
+Required properties:
+ - compatible
+ * Must be one of the following:
+ + aspeed,ast2500-gfx
+ + aspeed,ast2400-gfx
+ * In addition, the ASPEED pinctrl bindings require the 'syscon' property to
+ be present
+
+ - reg: Physical base address and length of the GFX registers
+
+ - interrupts: interrupt number for the GFX device
+
+ - clocks: clock number used to generate the pixel clock
+
+ - resets: reset line that must be released to use the GFX device
+
+ - memory-region:
+ Phandle to a memory region to allocate from, as defined in
+ Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
+
+
+Example:
+
+gfx: display@1e6e6000 {
+ compatible = "aspeed,ast2500-gfx", "syscon";
+ reg = <0x1e6e6000 0x1000>;
+ reg-io-width = <4>;
+ clocks = <&syscon ASPEED_CLK_GATE_D1CLK>;
+ resets = <&syscon ASPEED_RESET_CRT1>;
+ interrupts = <0x19>;
+ memory-region = <&gfx_memory>;
+};
+
+gfx_memory: framebuffer {
+ size = <0x01000000>;
+ alignment = <0x01000000>;
+ compatible = "shared-dma-pool";
+ reusable;
+};
diff --git a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.txt b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.txt
index c907aa8dd755..b2df82b44625 100644
--- a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.txt
+++ b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.txt
@@ -6,15 +6,20 @@ For V3D 2.x, see brcm,bcm-vc4.txt.
Required properties:
- compatible: Should be "brcm,7268-v3d" or "brcm,7278-v3d"
- reg: Physical base addresses and lengths of the register areas
-- reg-names: Names for the register areas. The "hub", "bridge", and "core0"
+- reg-names: Names for the register areas. The "hub" and "core0"
register areas are always required. The "gca" register area
- is required if the GCA cache controller is present.
+ is required if the GCA cache controller is present. The
+ "bridge" register area is required if an external reset
+ controller is not present.
- interrupts: The interrupt numbers. The first interrupt is for the hub,
- while the following interrupts are for the cores.
+ while the following interrupts are separate interrupt lines
+ for the cores (if they don't share the hub's interrupt).
See bindings/interrupt-controller/interrupts.txt
Optional properties:
- clocks: The core clock the unit runs on
+- resets: The reset line for v3d, if not using a mapping of the bridge
+ See bindings/reset/reset.txt
v3d {
compatible = "brcm,7268-v3d";
diff --git a/Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.txt b/Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.txt
new file mode 100644
index 000000000000..ffb79ccf51ee
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.txt
@@ -0,0 +1,26 @@
+Cirrus Logic Lochnagar Audio Development Board
+
+Lochnagar is an evaluation and development board for Cirrus Logic
+Smart CODEC and Amp devices. It allows the connection of most Cirrus
+Logic devices on mini-cards, as well as allowing connection of
+various application processor systems to provide a full evaluation
+platform. Audio system topology, clocking and power can all be
+controlled through the Lochnagar, allowing the device under test
+to be used in a variety of possible use cases.
+
+This binding document describes the binding for the hardware monitor
+portion of the driver.
+
+This binding must be part of the Lochnagar MFD binding:
+ [4] ../mfd/cirrus,lochnagar.txt
+
+Required properties:
+
+ - compatible : One of the following strings:
+ "cirrus,lochnagar2-hwmon"
+
+Example:
+
+lochnagar-hwmon {
+ compatible = "cirrus,lochnagar2-hwmon";
+};
diff --git a/Documentation/devicetree/bindings/hwmon/g762.txt b/Documentation/devicetree/bindings/hwmon/g762.txt
index 25cc6d8ee575..6d154c4923de 100644
--- a/Documentation/devicetree/bindings/hwmon/g762.txt
+++ b/Documentation/devicetree/bindings/hwmon/g762.txt
@@ -21,7 +21,7 @@ If an optional property is not set in .dts file, then current value is kept
unmodified (e.g. u-boot installed value).
Additional information on operational parameters for the device is available
-in Documentation/hwmon/g762. A detailed datasheet for the device is available
+in Documentation/hwmon/g762.rst. A detailed datasheet for the device is available
at http://natisbad.org/NAS/refs/GMT_EDS-762_763-080710-0.2.pdf.
Example g762 node:
diff --git a/Documentation/devicetree/bindings/hwmon/lm75.txt b/Documentation/devicetree/bindings/hwmon/lm75.txt
index 12d8cf7cf592..586b5ed70be7 100644
--- a/Documentation/devicetree/bindings/hwmon/lm75.txt
+++ b/Documentation/devicetree/bindings/hwmon/lm75.txt
@@ -25,6 +25,7 @@ Required properties:
"ti,tmp175",
"ti,tmp275",
"ti,tmp75",
+ "ti,tmp75b",
"ti,tmp75c",
- reg: I2C bus address of the device
diff --git a/Documentation/devicetree/bindings/hwmon/pwm-fan.txt b/Documentation/devicetree/bindings/hwmon/pwm-fan.txt
index 49ca5d83ed13..41b76762953a 100644
--- a/Documentation/devicetree/bindings/hwmon/pwm-fan.txt
+++ b/Documentation/devicetree/bindings/hwmon/pwm-fan.txt
@@ -7,13 +7,20 @@ Required properties:
which correspond to thermal cooling states
Optional properties:
-- fan-supply : phandle to the regulator that provides power to the fan
+- fan-supply : phandle to the regulator that provides power to the fan
+- interrupts : This contains a single interrupt specifier which
+ describes the tachometer output of the fan as an
+ interrupt source. The output signal must generate a
+ defined number of interrupts per fan revolution, which
+ require that it must be self resetting edge interrupts.
+ See interrupt-controller/interrupts.txt for the format.
+- pulses-per-revolution : define the tachometer pulses per fan revolution as
+ an integer (default is 2 interrupts per revolution).
+ The value must be greater than zero.
Example:
fan0: pwm-fan {
compatible = "pwm-fan";
- cooling-min-state = <0>;
- cooling-max-state = <3>;
#cooling-cells = <2>;
pwms = <&pwm 0 10000 0>;
cooling-levels = <0 102 170 230>;
@@ -38,3 +45,13 @@ Example:
};
};
};
+
+Example 2:
+ fan0: pwm-fan {
+ compatible = "pwm-fan";
+ pwms = <&pwm 0 40000 0>;
+ fan-supply = <&reg_fan>;
+ interrupt-parent = <&gpio5>;
+ interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
+ pulses-per-revolution = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt b/Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt
index 81f982ccca31..d12cc33cca6c 100644
--- a/Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt
@@ -3,15 +3,12 @@ Broadcom iProc I2C controller
Required properties:
- compatible:
- Must be "brcm,iproc-i2c"
+ Must be "brcm,iproc-i2c" or "brcm,iproc-nic-i2c"
- reg:
Define the base and range of the I/O address space that contain the iProc
I2C controller registers
-- interrupts:
- Should contain the I2C interrupt
-
- clock-frequency:
This is the I2C bus clock. Need to be either 100000 or 400000
@@ -21,6 +18,18 @@ Required properties:
- #size-cells:
Always 0
+Optional properties:
+
+- interrupts:
+ Should contain the I2C interrupt. For certain revisions of the I2C
+ controller, I2C interrupt is unwired to the interrupt controller. In such
+ case, this property should be left unspecified, and driver will fall back
+ to polling mode
+
+- brcm,ape-hsls-addr-mask:
+ Required for "brcm,iproc-nic-i2c". Host view of address mask into the
+ 'APE' co-processor. Value must be unsigned, 32-bit
+
Example:
i2c0: i2c@18008000 {
compatible = "brcm,iproc-i2c";
diff --git a/Documentation/devicetree/bindings/i2c/i2c-designware.txt b/Documentation/devicetree/bindings/i2c/i2c-designware.txt
index 3e4bcc2fb6f7..08be4d3846e5 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-designware.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-designware.txt
@@ -6,12 +6,21 @@ Required properties :
or "mscc,ocelot-i2c" with "snps,designware-i2c" for fallback
- reg : Offset and length of the register set for the device
- interrupts : <IRQ> where IRQ is the interrupt number.
+ - clocks : phandles for the clocks, see the description of clock-names below.
+ The phandle for the "ic_clk" clock is required. The phandle for the "pclk"
+ clock is optional. If a single clock is specified but no clock-name, it is
+ the "ic_clk" clock. If both clocks are listed, the "ic_clk" must be first.
Recommended properties :
- clock-frequency : desired I2C bus clock frequency in Hz.
Optional properties :
+
+ - clock-names : Contains the names of the clocks:
+ "ic_clk", for the core clock used to generate the external I2C clock.
+ "pclk", the interface clock, required for register access.
+
- reg : for "mscc,ocelot-i2c", a second register set to configure the SDA hold
time, named ICPU_CFG:TWI_DELAY in the datasheet.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt b/Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt
index ee4c32454198..68f6d73a8b73 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt
@@ -12,13 +12,16 @@ Required properties:
"mediatek,mt7623-i2c", "mediatek,mt6577-i2c": for MediaTek MT7623
"mediatek,mt7629-i2c", "mediatek,mt2712-i2c": for MediaTek MT7629
"mediatek,mt8173-i2c": for MediaTek MT8173
+ "mediatek,mt8183-i2c": for MediaTek MT8183
+ "mediatek,mt8516-i2c", "mediatek,mt2712-i2c": for MediaTek MT8516
- reg: physical base address of the controller and dma base, length of memory
mapped region.
- interrupts: interrupt number to the cpu.
- clock-div: the fixed value for frequency divider of clock source in i2c
module. Each IC may be different.
- clocks: clock name from clock manager
- - clock-names: Must include "main" and "dma", if enable have-pmic need include
+ - clock-names: Must include "main" and "dma", "arb" is for multi-master that
+ one bus has more than two i2c controllers, if enable have-pmic need include
"pmic" extra.
Optional properties:
diff --git a/Documentation/devicetree/bindings/i2c/i2c-riic.txt b/Documentation/devicetree/bindings/i2c/i2c-riic.txt
index 0bcc4716c319..e26fe3ad86a9 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-riic.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-riic.txt
@@ -1,7 +1,10 @@
Device tree configuration for Renesas RIIC driver
Required properties:
-- compatible : "renesas,riic-<soctype>". "renesas,riic-rz" as fallback
+- compatible :
+ "renesas,riic-r7s72100" if the device is a part of a R7S72100 SoC.
+ "renesas,riic-r7s9210" if the device is a part of a R7S9210 SoC.
+ "renesas,riic-rz" for a generic RZ/A compatible device.
- reg : address start and address range size of device
- interrupts : 8 interrupts (TEI, RI, TI, SPI, STI, NAKI, ALI, TMOI)
- clock-frequency : frequency of bus clock in Hz
diff --git a/Documentation/devicetree/bindings/i2c/i2c-stm32.txt b/Documentation/devicetree/bindings/i2c/i2c-stm32.txt
index 69240e189b01..f334738f7a35 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-stm32.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-stm32.txt
@@ -1,11 +1,11 @@
* I2C controller embedded in STMicroelectronics STM32 I2C platform
-Required properties :
-- compatible : Must be one of the following
+Required properties:
+- compatible: Must be one of the following
- "st,stm32f4-i2c"
- "st,stm32f7-i2c"
-- reg : Offset and length of the register set for the device
-- interrupts : Must contain the interrupt id for I2C event and then the
+- reg: Offset and length of the register set for the device
+- interrupts: Must contain the interrupt id for I2C event and then the
interrupt id for I2C error.
- resets: Must contain the phandle to the reset controller.
- clocks: Must contain the input clock of the I2C instance.
@@ -14,25 +14,26 @@ Required properties :
- #address-cells = <1>;
- #size-cells = <0>;
-Optional properties :
-- clock-frequency : Desired I2C bus clock frequency in Hz. If not specified,
+Optional properties:
+- clock-frequency: Desired I2C bus clock frequency in Hz. If not specified,
the default 100 kHz frequency will be used.
For STM32F4 SoC Standard-mode and Fast-mode are supported, possible values are
100000 and 400000.
- For STM32F7 SoC, Standard-mode, Fast-mode and Fast-mode Plus are supported,
- possible values are 100000, 400000 and 1000000.
-- i2c-scl-rising-time-ns : Only for STM32F7, I2C SCL Rising time for the board
- (default: 25)
-- i2c-scl-falling-time-ns : Only for STM32F7, I2C SCL Falling time for the board
- (default: 10)
+ For STM32F7, STM32H7 and STM32MP1 SoCs, Standard-mode, Fast-mode and Fast-mode
+ Plus are supported, possible values are 100000, 400000 and 1000000.
+- i2c-scl-rising-time-ns: I2C SCL Rising time for the board (default: 25)
+ For STM32F7, STM32H7 and STM32MP1 only.
+- i2c-scl-falling-time-ns: I2C SCL Falling time for the board (default: 10)
+ For STM32F7, STM32H7 and STM32MP1 only.
I2C Timings are derived from these 2 values
-- st,syscfg-fmp: Only for STM32F7, use to set Fast Mode Plus bit within SYSCFG
- whether Fast Mode Plus speed is selected by slave.
- 1st cell : phandle to syscfg
- 2nd cell : register offset within SYSCFG
- 3rd cell : register bitmask for FMP bit
+- st,syscfg-fmp: Use to set Fast Mode Plus bit within SYSCFG when Fast Mode
+ Plus speed is selected by slave.
+ 1st cell: phandle to syscfg
+ 2nd cell: register offset within SYSCFG
+ 3rd cell: register bitmask for FMP bit
+ For STM32F7, STM32H7 and STM32MP1 only.
-Example :
+Example:
i2c@40005400 {
compatible = "st,stm32f4-i2c";
diff --git a/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.txt b/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.txt
new file mode 100644
index 000000000000..eb76a02e2a82
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/accel/kionix,kxcjk1013.txt
@@ -0,0 +1,17 @@
+Kionix KXCJK-1013 Accelerometer device tree bindings
+
+Required properties:
+
+- compatible: Must be one of:
+ "kionix,kxcjk1013"
+ "kionix,kxcj91008"
+ "kionix,kxtj21009"
+ "kionix,kxtf9"
+ - reg: i2c slave address
+
+Example:
+
+kxtf9@f {
+ compatible = "kionix,kxtf9";
+ reg = <0x0F>;
+};
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.txt b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.txt
index d7b6241ca881..d8652460198e 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.txt
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.txt
@@ -7,6 +7,7 @@ Required properties for the AD7606:
* "adi,ad7606-8"
* "adi,ad7606-6"
* "adi,ad7606-4"
+ * "adi,ad7616"
- reg: SPI chip select number for the device
- spi-max-frequency: Max SPI frequency to use
see: Documentation/devicetree/bindings/spi/spi-bus.txt
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7780.txt b/Documentation/devicetree/bindings/iio/adc/adi,ad7780.txt
new file mode 100644
index 000000000000..440e52555349
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7780.txt
@@ -0,0 +1,48 @@
+* Analog Devices AD7170/AD7171/AD7780/AD7781
+
+Data sheets:
+
+- AD7170:
+ * https://www.analog.com/media/en/technical-documentation/data-sheets/AD7170.pdf
+- AD7171:
+ * https://www.analog.com/media/en/technical-documentation/data-sheets/AD7171.pdf
+- AD7780:
+ * https://www.analog.com/media/en/technical-documentation/data-sheets/ad7780.pdf
+- AD7781:
+ * https://www.analog.com/media/en/technical-documentation/data-sheets/AD7781.pdf
+
+Required properties:
+
+- compatible: should be one of
+ * "adi,ad7170"
+ * "adi,ad7171"
+ * "adi,ad7780"
+ * "adi,ad7781"
+- reg: spi chip select number for the device
+- vref-supply: the regulator supply for the ADC reference voltage
+
+Optional properties:
+
+- powerdown-gpios: must be the device tree identifier of the PDRST pin. If
+ specified, it will be asserted during driver probe. As the
+ line is active high, it should be marked GPIO_ACTIVE_HIGH.
+- adi,gain-gpios: must be the device tree identifier of the GAIN pin. Only for
+ the ad778x chips. If specified, it will be asserted during
+ driver probe. As the line is active low, it should be marked
+ GPIO_ACTIVE_LOW.
+- adi,filter-gpios: must be the device tree identifier of the FILTER pin. Only
+ for the ad778x chips. If specified, it will be asserted
+ during driver probe. As the line is active low, it should be
+ marked GPIO_ACTIVE_LOW.
+
+Example:
+
+adc@0 {
+ compatible = "adi,ad7780";
+ reg = <0>;
+ vref-supply = <&vdd_supply>
+
+ powerdown-gpios = <&gpio 12 GPIO_ACTIVE_HIGH>;
+ adi,gain-gpios = <&gpio 5 GPIO_ACTIVE_LOW>;
+ adi,filter-gpios = <&gpio 15 GPIO_ACTIVE_LOW>;
+};
diff --git a/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt b/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt
index 75c775954102..d57e9df25f4f 100644
--- a/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt
@@ -9,6 +9,7 @@ Required properties:
- "amlogic,meson-gxl-saradc" for GXL
- "amlogic,meson-gxm-saradc" for GXM
- "amlogic,meson-axg-saradc" for AXG
+ - "amlogic,meson-g12a-saradc" for AXG
along with the generic "amlogic,meson-saradc"
- reg: the physical base address and length of the registers
- interrupts: the interrupt indicating end of sampling
diff --git a/Documentation/devicetree/bindings/iio/adc/avia-hx711.txt b/Documentation/devicetree/bindings/iio/adc/avia-hx711.txt
deleted file mode 100644
index 7222328a3d0d..000000000000
--- a/Documentation/devicetree/bindings/iio/adc/avia-hx711.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-* AVIA HX711 ADC chip for weight cells
- Bit-banging driver
-
-Required properties:
- - compatible: Should be "avia,hx711"
- - sck-gpios: Definition of the GPIO for the clock
- - dout-gpios: Definition of the GPIO for data-out
- See Documentation/devicetree/bindings/gpio/gpio.txt
- - avdd-supply: Definition of the regulator used as analog supply
-
-Optional properties:
- - clock-frequency: Frequency of PD_SCK in Hz
- Minimum value allowed is 10 kHz because of maximum
- high time of 50 microseconds.
-
-Example:
-weight {
- compatible = "avia,hx711";
- sck-gpios = <&gpio3 10 GPIO_ACTIVE_HIGH>;
- dout-gpios = <&gpio0 7 GPIO_ACTIVE_HIGH>;
- avdd-suppy = <&avdd>;
- clock-frequency = <100000>;
-};
-
diff --git a/Documentation/devicetree/bindings/iio/adc/avia-hx711.yaml b/Documentation/devicetree/bindings/iio/adc/avia-hx711.yaml
new file mode 100644
index 000000000000..8a4100ceeaf2
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/avia-hx711.yaml
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/iio/adc/avia-hx711.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: AVIA HX711 ADC chip for weight cells
+
+maintainers:
+ - Andreas Klinger <ak@it-klinger.de>
+
+description: |
+ Bit-banging driver using two GPIOs:
+ - sck-gpio gives a clock to the sensor with 24 cycles for data retrieval
+ and up to 3 cycles for selection of the input channel and gain for the
+ next measurement
+ - dout-gpio is the sensor data the sensor responds to the clock
+
+ Specifications about the driver can be found at:
+ http://www.aviaic.com/ENProducts.aspx
+
+properties:
+ compatible:
+ enum:
+ - avia,hx711
+
+ sck-gpios:
+ description:
+ Definition of the GPIO for the clock (output). In the datasheet it is
+ named PD_SCK
+ maxItems: 1
+
+ dout-gpios:
+ description:
+ Definition of the GPIO for the data-out sent by the sensor in
+ response to the clock (input).
+ See Documentation/devicetree/bindings/gpio/gpio.txt for information
+ on how to specify a consumer gpio.
+ maxItems: 1
+
+ avdd-supply:
+ description:
+ Definition of the regulator used as analog supply
+ maxItems: 1
+
+ clock-frequency:
+ minimum: 20000
+ maximum: 2500000
+ default: 400000
+
+required:
+ - compatible
+ - sck-gpios
+ - dout-gpios
+ - avdd-supply
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ weight {
+ compatible = "avia,hx711";
+ sck-gpios = <&gpio3 10 GPIO_ACTIVE_HIGH>;
+ dout-gpios = <&gpio0 7 GPIO_ACTIVE_HIGH>;
+ avdd-suppy = <&avdd>;
+ clock-frequency = <100000>;
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/imx7d-adc.txt b/Documentation/devicetree/bindings/iio/adc/imx7d-adc.txt
index 5c184b940669..f1f3a552459b 100644
--- a/Documentation/devicetree/bindings/iio/adc/imx7d-adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/imx7d-adc.txt
@@ -10,6 +10,7 @@ Required properties:
- clocks: The root clock of the ADC controller
- clock-names: Must contain "adc", matching entry in the clocks property
- vref-supply: The regulator supply ADC reference voltage
+- #io-channel-cells: Must be 1 as per ../iio-bindings.txt
Example:
adc1: adc@30610000 {
@@ -19,4 +20,5 @@ adc1: adc@30610000 {
clocks = <&clks IMX7D_ADC_ROOT_CLK>;
clock-names = "adc";
vref-supply = <&reg_vcc_3v3_mcu>;
+ #io-channel-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/iio/adc/lpc32xx-adc.txt b/Documentation/devicetree/bindings/iio/adc/lpc32xx-adc.txt
index b3629d3a9adf..3a1bc669bd51 100644
--- a/Documentation/devicetree/bindings/iio/adc/lpc32xx-adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/lpc32xx-adc.txt
@@ -6,6 +6,10 @@ Required properties:
region.
- interrupts: The ADC interrupt
+Optional:
+ - vref-supply: The regulator supply ADC reference voltage, optional
+ for legacy reason, but highly encouraging to us in new device tree
+
Example:
adc@40048000 {
@@ -13,4 +17,5 @@ Example:
reg = <0x40048000 0x1000>;
interrupt-parent = <&mic>;
interrupts = <39 0>;
+ vref-supply = <&vcc>;
};
diff --git a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.txt b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.txt
index c81993f8d8c3..c8787688122a 100644
--- a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.txt
@@ -13,6 +13,7 @@ VADC node:
Definition: Should contain "qcom,spmi-vadc".
Should contain "qcom,spmi-adc5" for PMIC5 ADC driver.
Should contain "qcom,spmi-adc-rev2" for PMIC rev2 ADC driver.
+ Should contain "qcom,pms405-adc" for PMS405 PMIC
- reg:
Usage: required
diff --git a/Documentation/devicetree/bindings/iio/adc/ti-ads8344.txt b/Documentation/devicetree/bindings/iio/adc/ti-ads8344.txt
new file mode 100644
index 000000000000..e47c3759a82b
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/ti-ads8344.txt
@@ -0,0 +1,19 @@
+* Texas Instruments ADS8344 A/DC chip
+
+Required properties:
+ - compatible: Must be "ti,ads8344"
+ - reg: SPI chip select number for the device
+ - vref-supply: phandle to a regulator node that supplies the
+ reference voltage
+
+Recommended properties:
+ - spi-max-frequency: Definition as per
+ Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Example:
+adc@0 {
+ compatible = "ti,ads8344";
+ reg = <0>;
+ vref-supply = <&refin_supply>;
+ spi-max-frequency = <10000000>;
+};
diff --git a/Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.txt b/Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.txt
index 7b5f06f324c8..c52ea2126eaa 100644
--- a/Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.txt
+++ b/Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.txt
@@ -1,7 +1,13 @@
* Plantower PMS7003 particulate matter sensor
Required properties:
-- compatible: must be "plantower,pms7003"
+- compatible: must one of:
+ "plantower,pms1003"
+ "plantower,pms3003"
+ "plantower,pms5003"
+ "plantower,pms6003"
+ "plantower,pms7003"
+ "plantower,pmsa003"
- vcc-supply: phandle to the regulator that provides power to the sensor
Optional properties:
diff --git a/Documentation/devicetree/bindings/iio/gyroscope/bmg160.txt b/Documentation/devicetree/bindings/iio/gyroscope/bmg160.txt
new file mode 100644
index 000000000000..78e18a1e9c1d
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/gyroscope/bmg160.txt
@@ -0,0 +1,20 @@
+* Bosch BMG160 triaxial rotation sensor (gyroscope)
+
+Required properties:
+
+ - compatible : should be "bosch,bmg160" or "bosch,bmi055_gyro"
+ - reg : the I2C address of the sensor (0x69)
+
+Optional properties:
+
+ - interrupts : interrupt mapping for GPIO IRQ, it should by configured with
+ flags IRQ_TYPE_EDGE_RISING
+
+Example:
+
+bmg160@69 {
+ compatible = "bosch,bmg160";
+ reg = <0x69>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <18 (IRQ_TYPE_EDGE_RISING)>;
+};
diff --git a/Documentation/devicetree/bindings/iio/gyroscope/nxp,fxas21002c.txt b/Documentation/devicetree/bindings/iio/gyroscope/nxp,fxas21002c.txt
new file mode 100644
index 000000000000..465e104bbf14
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/gyroscope/nxp,fxas21002c.txt
@@ -0,0 +1,31 @@
+* NXP FXAS21002C Gyroscope device tree bindings
+
+http://www.nxp.com/products/sensors/gyroscopes/3-axis-digital-gyroscope:FXAS21002C
+
+Required properties:
+ - compatible : should be "nxp,fxas21002c"
+ - reg : the I2C address of the sensor or SPI chip select number for the
+ device.
+ - vdd-supply: phandle to the regulator that provides power to the sensor.
+ - vddio-supply: phandle to the regulator that provides power to the bus.
+
+Optional properties:
+ - reset-gpios : gpio used to reset the device, see gpio/gpio.txt
+ - interrupts : device support 2 interrupts, INT1 and INT2,
+ the interrupts can be triggered on rising or falling edges.
+ See interrupt-controller/interrupts.txt
+ - interrupt-names: should contain "INT1" or "INT2", the gyroscope interrupt
+ line in use.
+ - drive-open-drain: the interrupt/data ready line will be configured
+ as open drain, which is useful if several sensors share
+ the same interrupt line. This is a boolean property.
+ (This binding is taken from pinctrl/pinctrl-bindings.txt)
+
+Example:
+
+gyroscope@20 {
+ compatible = "nxp,fxas21002c";
+ reg = <0x20>;
+ vdd-supply = <&reg_peri_3p15v>;
+ vddio-supply = <&reg_peri_3p15v>;
+};
diff --git a/Documentation/devicetree/bindings/iio/imu/adi,adis16480.txt b/Documentation/devicetree/bindings/iio/imu/adi,adis16480.txt
new file mode 100644
index 000000000000..ed7783f45233
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/imu/adi,adis16480.txt
@@ -0,0 +1,85 @@
+
+Analog Devices ADIS16480 and similar IMUs
+
+Required properties for the ADIS16480:
+
+- compatible: Must be one of
+ * "adi,adis16375"
+ * "adi,adis16480"
+ * "adi,adis16485"
+ * "adi,adis16488"
+ * "adi,adis16495-1"
+ * "adi,adis16495-2"
+ * "adi,adis16495-3"
+ * "adi,adis16497-1"
+ * "adi,adis16497-2"
+ * "adi,adis16497-3"
+- reg: SPI chip select number for the device
+- spi-max-frequency: Max SPI frequency to use
+ see: Documentation/devicetree/bindings/spi/spi-bus.txt
+- spi-cpha: See Documentation/devicetree/bindings/spi/spi-bus.txt
+- spi-cpol: See Documentation/devicetree/bindings/spi/spi-bus.txt
+- interrupts: interrupt mapping for IRQ, accepted values are:
+ * IRQF_TRIGGER_RISING
+ * IRQF_TRIGGER_FALLING
+
+Optional properties:
+
+- interrupt-names: Data ready line selection. Valid values are:
+ * DIO1
+ * DIO2
+ * DIO3
+ * DIO4
+ If this field is left empty, DIO1 is assigned as default data ready
+ signal.
+- reset-gpios: must be the device tree identifier of the RESET pin. As the line
+ is active low, it should be marked GPIO_ACTIVE_LOW.
+- clocks: phandle to the external clock. Should be set according to
+ "clock-names".
+ If this field is left empty together with the "clock-names" field, then
+ the internal clock is used.
+- clock-names: The name of the external clock to be used. Valid values are:
+ * sync: In sync mode, the internal clock is disabled and the frequency
+ of the external clock signal establishes therate of data
+ collection and processing. See Fig 14 and 15 in the datasheet.
+ The clock-frequency must be:
+ * 3000 to 4500 Hz for adis1649x devices.
+ * 700 to 2400 Hz for adis1648x devices.
+ * pps: In Pulse Per Second (PPS) Mode, the rate of data collection and
+ production is equal to the product of the external clock
+ frequency and the scale factor in the SYNC_SCALE register, see
+ Table 154 in the datasheet.
+ The clock-frequency must be:
+ * 1 to 128 Hz for adis1649x devices.
+ * This mode is not supported by adis1648x devices.
+ If this field is left empty together with the "clocks" field, then the
+ internal clock is used.
+- adi,ext-clk-pin: The DIOx line to be used as an external clock input.
+ Valid values are:
+ * DIO1
+ * DIO2
+ * DIO3
+ * DIO4
+ Each DIOx pin supports only one function at a time (data ready line
+ selection or external clock input). When a single pin has two
+ two assignments, the enable bit for the lower priority function
+ automatically resets to zero (disabling the lower priority function).
+ Data ready has highest priority.
+ If this field is left empty, DIO2 is assigned as default external clock
+ input pin.
+
+Example:
+
+ imu@0 {
+ compatible = "adi,adis16495-1";
+ reg = <0>;
+ spi-max-frequency = <3200000>;
+ spi-cpol;
+ spi-cpha;
+ interrupts = <25 IRQF_TRIGGER_FALLING>;
+ interrupt-parent = <&gpio>;
+ interrupt-names = "DIO2";
+ clocks = <&adis16495_sync>;
+ clock-names = "sync";
+ adi,ext-clk-pin = "DIO1";
+ };
diff --git a/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt b/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
index 69d53d98d0f0..efec9ece034a 100644
--- a/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
+++ b/Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
@@ -8,6 +8,9 @@ Required properties:
"st,lsm6dsm"
"st,ism330dlc"
"st,lsm6dso"
+ "st,asm330lhh"
+ "st,lsm6dsox"
+ "st,lsm6dsr"
- reg: i2c address of the sensor / spi cs line
Optional properties:
diff --git a/Documentation/devicetree/bindings/iio/light/vcnl4000.txt b/Documentation/devicetree/bindings/iio/light/vcnl4000.txt
new file mode 100644
index 000000000000..955af4555c90
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/vcnl4000.txt
@@ -0,0 +1,24 @@
+VISHAY VCNL4000 - Ambient Light and proximity sensor
+
+This driver supports the VCNL4000/10/20/40 and VCNL4200 chips
+
+Required properties:
+
+ -compatible: must be one of :
+ vishay,vcnl4000
+ vishay,vcnl4010
+ vishay,vcnl4020
+ vishay,vcnl4040
+ vishay,vcnl4200
+
+ -reg: I2C address of the sensor, should be one from below based on the model:
+ 0x13
+ 0x51
+ 0x60
+
+Example:
+
+light-sensor@51 {
+ compatible = "vishay,vcnl4200";
+ reg = <0x51>;
+};
diff --git a/Documentation/devicetree/bindings/iio/pressure/bmp085.txt b/Documentation/devicetree/bindings/iio/pressure/bmp085.txt
deleted file mode 100644
index 61c72e63c584..000000000000
--- a/Documentation/devicetree/bindings/iio/pressure/bmp085.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-BMP085/BMP18x/BMP28x digital pressure sensors
-
-Required properties:
-- compatible: must be one of:
- "bosch,bmp085"
- "bosch,bmp180"
- "bosch,bmp280"
- "bosch,bme280"
-
-Optional properties:
-- interrupts: interrupt mapping for IRQ
-- reset-gpios: a GPIO line handling reset of the sensor: as the line is
- active low, it should be marked GPIO_ACTIVE_LOW (see gpio/gpio.txt)
-- vddd-supply: digital voltage regulator (see regulator/regulator.txt)
-- vdda-supply: analog voltage regulator (see regulator/regulator.txt)
-
-Example:
-
-pressure@77 {
- compatible = "bosch,bmp085";
- reg = <0x77>;
- interrupt-parent = <&gpio0>;
- interrupts = <25 IRQ_TYPE_EDGE_RISING>;
- reset-gpios = <&gpio0 26 GPIO_ACTIVE_LOW>;
- vddd-supply = <&foo>;
- vdda-supply = <&bar>;
-};
diff --git a/Documentation/devicetree/bindings/iio/pressure/bmp085.yaml b/Documentation/devicetree/bindings/iio/pressure/bmp085.yaml
new file mode 100644
index 000000000000..c6721a7e8938
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/pressure/bmp085.yaml
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/pressure/bmp085.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: BMP085/BMP180/BMP280/BME280 pressure iio sensors
+
+maintainers:
+ - Andreas Klinger <ak@it-klinger.de>
+
+description: |
+ Pressure, temperature and humidity iio sensors with i2c and spi interfaces
+
+ Specifications about the sensor can be found at:
+ https://www.bosch-sensortec.com/bst/products/all_products/bmp180
+ https://www.bosch-sensortec.com/bst/products/all_products/bmp280
+ https://www.bosch-sensortec.com/bst/products/all_products/bme280
+
+properties:
+ compatible:
+ enum:
+ - bosch,bmp085
+ - bosch,bmp180
+ - bosch,bmp280
+ - bosch,bme280
+
+ vddd-supply:
+ description:
+ digital voltage regulator (see regulator/regulator.txt)
+ maxItems: 1
+
+ vdda-supply:
+ description:
+ analog voltage regulator (see regulator/regulator.txt)
+ maxItems: 1
+
+ reset-gpios:
+ description:
+ A GPIO line handling reset of the sensor. As the line is active low,
+ it should be marked GPIO_ACTIVE_LOW (see gpio/gpio.txt)
+ maxItems: 1
+
+ interrupts:
+ description:
+ interrupt mapping for IRQ (BMP085 only)
+ maxItems: 1
+
+required:
+ - compatible
+ - vddd-supply
+ - vdda-supply
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pressure@77 {
+ compatible = "bosch,bmp085";
+ reg = <0x77>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <25 IRQ_TYPE_EDGE_RISING>;
+ reset-gpios = <&gpio0 26 GPIO_ACTIVE_LOW>;
+ vddd-supply = <&foo>;
+ vdda-supply = <&bar>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/proximity/devantech-srf04.txt b/Documentation/devicetree/bindings/iio/proximity/devantech-srf04.txt
deleted file mode 100644
index d4dc7a227e2e..000000000000
--- a/Documentation/devicetree/bindings/iio/proximity/devantech-srf04.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-* Devantech SRF04 ultrasonic range finder
- Bit-banging driver using two GPIOs
-
-Required properties:
- - compatible: Should be "devantech,srf04"
-
- - trig-gpios: Definition of the GPIO for the triggering (output)
- This GPIO is set for about 10 us by the driver to tell the
- device it should initiate the measurement cycle.
-
- - echo-gpios: Definition of the GPIO for the echo (input)
- This GPIO is set by the device as soon as an ultrasonic
- burst is sent out and reset when the first echo is
- received.
- Thus this GPIO is set while the ultrasonic waves are doing
- one round trip.
- It needs to be an GPIO which is able to deliver an
- interrupt because the time between two interrupts is
- measured in the driver.
- See Documentation/devicetree/bindings/gpio/gpio.txt for
- information on how to specify a consumer gpio.
-
-Example:
-srf04@0 {
- compatible = "devantech,srf04";
- trig-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
- echo-gpios = <&gpio2 6 GPIO_ACTIVE_HIGH>;
-};
diff --git a/Documentation/devicetree/bindings/iio/proximity/devantech-srf04.yaml b/Documentation/devicetree/bindings/iio/proximity/devantech-srf04.yaml
new file mode 100644
index 000000000000..4e80ea7c1475
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/proximity/devantech-srf04.yaml
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/proximity/devantech-srf04.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Devantech SRF04 and Maxbotix mb1000 ultrasonic range finder
+
+maintainers:
+ - Andreas Klinger <ak@it-klinger.de>
+
+description: |
+ Bit-banging driver using two GPIOs:
+ - trigger-gpio is raised by the driver to start sending out an ultrasonic
+ burst
+ - echo-gpio is held high by the sensor after sending ultrasonic burst
+ until it is received once again
+
+ Specifications about the devices can be found at:
+ http://www.robot-electronics.co.uk/htm/srf04tech.htm
+
+ http://www.maxbotix.com/documents/LV-MaxSonar-EZ_Datasheet.pdf
+
+properties:
+ compatible:
+ enum:
+ - devantech,srf04
+ - maxbotix,mb1000
+ - maxbotix,mb1010
+ - maxbotix,mb1020
+ - maxbotix,mb1030
+ - maxbotix,mb1040
+
+ trig-gpios:
+ description:
+ Definition of the GPIO for the triggering (output)
+ This GPIO is set for about 10 us by the driver to tell the device it
+ should initiate the measurement cycle.
+ See Documentation/devicetree/bindings/gpio/gpio.txt for information
+ on how to specify a consumer gpio.
+ maxItems: 1
+
+ echo-gpios:
+ description:
+ Definition of the GPIO for the echo (input)
+ This GPIO is set by the device as soon as an ultrasonic burst is sent
+ out and reset when the first echo is received.
+ Thus this GPIO is set while the ultrasonic waves are doing one round
+ trip.
+ It needs to be an GPIO which is able to deliver an interrupt because
+ the time between two interrupts is measured in the driver.
+ maxItems: 1
+
+required:
+ - compatible
+ - trig-gpios
+ - echo-gpios
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ proximity {
+ compatible = "devantech,srf04";
+ trig-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ echo-gpios = <&gpio2 6 GPIO_ACTIVE_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.txt b/Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.txt
new file mode 100644
index 000000000000..dd1058fbe9c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.txt
@@ -0,0 +1,29 @@
+* MaxBotix I2CXL-MaxSonar ultrasonic distance sensor of type mb1202,
+ mb1212, mb1222, mb1232, mb1242, mb7040 or mb7137 using the i2c interface
+ for ranging
+
+Required properties:
+ - compatible: "maxbotix,mb1202",
+ "maxbotix,mb1212",
+ "maxbotix,mb1222",
+ "maxbotix,mb1232",
+ "maxbotix,mb1242",
+ "maxbotix,mb7040" or
+ "maxbotix,mb7137"
+
+ - reg: i2c address of the device, see also i2c/i2c.txt
+
+Optional properties:
+ - interrupts: Interrupt used to announce the preceding reading
+ request has finished and that data is available.
+ If no interrupt is specified the device driver
+ falls back to wait a fixed amount of time until
+ data can be retrieved.
+
+Example:
+proximity@70 {
+ compatible = "maxbotix,mb1232";
+ reg = <0x70>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
+};
diff --git a/Documentation/devicetree/bindings/iio/st-sensors.txt b/Documentation/devicetree/bindings/iio/st-sensors.txt
index 52ee4baec6f0..0ef64a444479 100644
--- a/Documentation/devicetree/bindings/iio/st-sensors.txt
+++ b/Documentation/devicetree/bindings/iio/st-sensors.txt
@@ -49,6 +49,7 @@ Accelerometers:
- st,lis2dw12
- st,lis3dhh
- st,lis3de
+- st,lis2de12
Gyroscopes:
- st,l3g4200d-gyro
diff --git a/Documentation/devicetree/bindings/iio/temperature/max31856.txt b/Documentation/devicetree/bindings/iio/temperature/max31856.txt
new file mode 100644
index 000000000000..06ab43bb4de8
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/temperature/max31856.txt
@@ -0,0 +1,24 @@
+Maxim MAX31856 thermocouple support
+
+https://datasheets.maximintegrated.com/en/ds/MAX31856.pdf
+
+Optional property:
+ - thermocouple-type: Type of thermocouple (THERMOCOUPLE_TYPE_K if
+ omitted). Supported types are B, E, J, K, N, R, S, T.
+
+Required properties:
+ - compatible: must be "maxim,max31856"
+ - reg: SPI chip select number for the device
+ - spi-max-frequency: As per datasheet max. supported freq is 5000000
+ - spi-cpha: must be defined for max31856 to enable SPI mode 1
+
+ Refer to spi/spi-bus.txt for generic SPI slave bindings.
+
+ Example:
+ temp-sensor@0 {
+ compatible = "maxim,max31856";
+ reg = <0>;
+ spi-max-frequency = <5000000>;
+ spi-cpha;
+ thermocouple-type = <THERMOCOUPLE_TYPE_K>;
+ };
diff --git a/Documentation/devicetree/bindings/iio/temperature/temperature-bindings.txt b/Documentation/devicetree/bindings/iio/temperature/temperature-bindings.txt
new file mode 100644
index 000000000000..8f339cab74ae
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/temperature/temperature-bindings.txt
@@ -0,0 +1,7 @@
+If the temperature sensor device can be configured to use some specific
+thermocouple type, you can use the defined types provided in the file
+"include/dt-bindings/iio/temperature/thermocouple.h".
+
+Property:
+thermocouple-type: A single cell representing the type of the thermocouple
+ used by the device.
diff --git a/Documentation/devicetree/bindings/input/gpio-vibrator.yaml b/Documentation/devicetree/bindings/input/gpio-vibrator.yaml
new file mode 100644
index 000000000000..903475f52dbd
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/gpio-vibrator.yaml
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/bindings/input/gpio-vibrator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: GPIO vibrator
+
+maintainers:
+ - Luca Weiss <luca@z3ntu.xyz>
+
+description: |+
+ Registers a GPIO device as vibrator, where the on/off capability is controlled by a GPIO.
+
+properties:
+ compatible:
+ const: gpio-vibrator
+
+ enable-gpios:
+ maxItems: 1
+
+ vcc-supply:
+ description: Regulator that provides power
+
+required:
+ - compatible
+ - enable-gpios
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ vibrator {
+ compatible = "gpio-vibrator";
+ enable-gpios = <&msmgpio 86 GPIO_ACTIVE_HIGH>;
+ vcc-supply = <&pm8941_l18>;
+ };
diff --git a/Documentation/devicetree/bindings/input/lpc32xx-key.txt b/Documentation/devicetree/bindings/input/lpc32xx-key.txt
index bcf62f856358..2b075a080d30 100644
--- a/Documentation/devicetree/bindings/input/lpc32xx-key.txt
+++ b/Documentation/devicetree/bindings/input/lpc32xx-key.txt
@@ -8,6 +8,7 @@ Required Properties:
- reg: Physical base address of the controller and length of memory mapped
region.
- interrupts: The interrupt number to the cpu.
+- clocks: phandle to clock controller plus clock-specifier pair
- nxp,debounce-delay-ms: Debounce delay in ms
- nxp,scan-delay-ms: Repeated scan period in ms
- linux,keymap: the key-code to be reported when the key is pressed
@@ -22,7 +23,9 @@ Example:
key@40050000 {
compatible = "nxp,lpc3220-key";
reg = <0x40050000 0x1000>;
- interrupts = <54 0>;
+ clocks = <&clk LPC32XX_CLK_KEY>;
+ interrupt-parent = <&sic1>;
+ interrupts = <22 IRQ_TYPE_LEVEL_HIGH>;
keypad,num-rows = <1>;
keypad,num-columns = <1>;
nxp,debounce-delay-ms = <3>;
diff --git a/Documentation/devicetree/bindings/input/max77650-onkey.txt b/Documentation/devicetree/bindings/input/max77650-onkey.txt
new file mode 100644
index 000000000000..477dc74f452a
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/max77650-onkey.txt
@@ -0,0 +1,26 @@
+Onkey driver for MAX77650 PMIC from Maxim Integrated.
+
+This module is part of the MAX77650 MFD device. For more details
+see Documentation/devicetree/bindings/mfd/max77650.txt.
+
+The onkey controller is represented as a sub-node of the PMIC node on
+the device tree.
+
+Required properties:
+--------------------
+- compatible: Must be "maxim,max77650-onkey".
+
+Optional properties:
+- linux,code: The key-code to be reported when the key is pressed.
+ Defaults to KEY_POWER.
+- maxim,onkey-slide: The system's button is a slide switch, not the default
+ push button.
+
+Example:
+--------
+
+ onkey {
+ compatible = "maxim,max77650-onkey";
+ linux,code = <KEY_END>;
+ maxim,onkey-slide;
+ };
diff --git a/Documentation/devicetree/bindings/input/microchip,qt1050.txt b/Documentation/devicetree/bindings/input/microchip,qt1050.txt
new file mode 100644
index 000000000000..80e75f96252b
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/microchip,qt1050.txt
@@ -0,0 +1,78 @@
+Microchip AT42QT1050 Five-channel Touch Sensor IC
+
+The AT42QT1050 (QT1050) is a QTouchADC sensor device. The device can sense from
+one to five keys, dependent on mode. The QT1050 includes all signal processing
+functions necessary to provide stable sensing under a wide variety of changing
+conditions, and the outputs are fully debounced.
+
+The touchkey device node should be placed inside an I2C bus node.
+
+Required properties:
+- compatible: Must be "microchip,qt1050"
+- reg: The I2C address of the device
+- interrupts: The sink for the touchpad's IRQ output,
+ see ../interrupt-controller/interrupts.txt
+
+Optional properties:
+- wakeup-source: touch keys can be used as a wakeup source
+
+Each button (key) is represented as a sub-node:
+
+Each not specified key or key with linux,code set to KEY_RESERVED gets disabled
+in HW.
+
+Subnode properties:
+- linux,code: Keycode to emit.
+- reg: The key number. Valid values: 0, 1, 2, 3, 4.
+
+Optional subnode-properties:
+
+If a optional property is missing or has a invalid value the default value is
+taken.
+
+- microchip,pre-charge-time-ns:
+ Each touchpad need some time to precharge. The value depends on the mechanical
+ layout.
+ Valid value range: 0 - 637500; values must be a multiple of 2500;
+ default is 0.
+- microchip,average-samples:
+ Number of data samples which are averaged for each read.
+ Valid values: 1, 4, 16, 64, 256, 1024, 4096, 16384; default is 1.
+- microchip,average-scaling:
+ The scaling factor which is used to scale the average-samples.
+ Valid values: 1, 2, 4, 8, 16, 32, 64, 128; default is 1.
+- microchip,threshold:
+ Number of counts to register a touch detection.
+ Valid value range: 0 - 255; default is 20.
+
+Example:
+QT1050 with 3 non continuous keys, key2 and key4 are disabled.
+
+touchkeys@41 {
+ compatible = "microchip,qt1050";
+ reg = <0x41>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <17 IRQ_TYPE_EDGE_FALLING>;
+
+ up@0 {
+ reg = <0>;
+ linux,code = <KEY_UP>;
+ microchip,average-samples = <64>;
+ microchip,average-scaling = <16>;
+ microchip,pre-charge-time-ns = <10000>;
+ };
+
+ right@1 {
+ reg = <1>;
+ linux,code = <KEY_RIGHT>;
+ microchip,average-samples = <64>;
+ microchip,average-scaling = <8>;
+ };
+
+ down@3 {
+ reg = <3>;
+ linux,code = <KEY_DOWN>;
+ microchip,average-samples = <256>;
+ microchip,average-scaling = <16>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt b/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt
index 1458c3179a63..496125c6bfb7 100644
--- a/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt
+++ b/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt
@@ -2,12 +2,14 @@ Allwinner sun4i low res adc attached tablet keys
------------------------------------------------
Required properties:
- - compatible: "allwinner,sun4i-a10-lradc-keys"
+ - compatible: should be one of the following string:
+ "allwinner,sun4i-a10-lradc-keys"
+ "allwinner,sun8i-a83t-r-lradc"
- reg: mmio address range of the chip
- interrupts: interrupt to which the chip is connected
- vref-supply: powersupply for the lradc reference voltage
-Each key is represented as a sub-node of "allwinner,sun4i-a10-lradc-keys":
+Each key is represented as a sub-node of the compatible mentioned above:
Required subnode-properties:
- label: Descriptive name of the key.
diff --git a/Documentation/devicetree/bindings/input/touchscreen/goodix.txt b/Documentation/devicetree/bindings/input/touchscreen/goodix.txt
index 8cf0b4d38a7e..fc03ea4cf5ab 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/goodix.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/goodix.txt
@@ -3,6 +3,7 @@ Device tree bindings for Goodix GT9xx series touchscreen controller
Required properties:
- compatible : Should be "goodix,gt1151"
+ or "goodix,gt5663"
or "goodix,gt5688"
or "goodix,gt911"
or "goodix,gt9110"
@@ -19,6 +20,8 @@ Optional properties:
- irq-gpios : GPIO pin used for IRQ. The driver uses the
interrupt gpio pin as output to reset the device.
- reset-gpios : GPIO pin used for reset
+ - AVDD28-supply : Analog power supply regulator on AVDD28 pin
+ - VDDIO-supply : GPIO power supply regulator on VDDIO pin
- touchscreen-inverted-x
- touchscreen-inverted-y
- touchscreen-size-x
diff --git a/Documentation/devicetree/bindings/input/touchscreen/iqs5xx.txt b/Documentation/devicetree/bindings/input/touchscreen/iqs5xx.txt
new file mode 100644
index 000000000000..efa0820e2469
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/iqs5xx.txt
@@ -0,0 +1,80 @@
+Azoteq IQS550/572/525 Trackpad/Touchscreen Controller
+
+Required properties:
+
+- compatible : Must be equal to one of the following:
+ "azoteq,iqs550"
+ "azoteq,iqs572"
+ "azoteq,iqs525"
+
+- reg : I2C slave address for the device.
+
+- interrupts : GPIO to which the device's active-high RDY
+ output is connected (see [0]).
+
+- reset-gpios : GPIO to which the device's active-low NRST
+ input is connected (see [1]).
+
+Optional properties:
+
+- touchscreen-min-x : See [2].
+
+- touchscreen-min-y : See [2].
+
+- touchscreen-size-x : See [2]. If this property is omitted, the
+ maximum x-coordinate is specified by the
+ device's "X Resolution" register.
+
+- touchscreen-size-y : See [2]. If this property is omitted, the
+ maximum y-coordinate is specified by the
+ device's "Y Resolution" register.
+
+- touchscreen-max-pressure : See [2]. Pressure is expressed as the sum of
+ the deltas across all channels impacted by a
+ touch event. A channel's delta is calculated
+ as its count value minus a reference, where
+ the count value is inversely proportional to
+ the channel's capacitance.
+
+- touchscreen-fuzz-x : See [2].
+
+- touchscreen-fuzz-y : See [2].
+
+- touchscreen-fuzz-pressure : See [2].
+
+- touchscreen-inverted-x : See [2]. Inversion is applied relative to that
+ which may already be specified by the device's
+ FLIP_X and FLIP_Y register fields.
+
+- touchscreen-inverted-y : See [2]. Inversion is applied relative to that
+ which may already be specified by the device's
+ FLIP_X and FLIP_Y register fields.
+
+- touchscreen-swapped-x-y : See [2]. Swapping is applied relative to that
+ which may already be specified by the device's
+ SWITCH_XY_AXIS register field.
+
+[0]: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+[1]: Documentation/devicetree/bindings/gpio/gpio.txt
+[2]: Documentation/devicetree/bindings/input/touchscreen/touchscreen.txt
+
+Example:
+
+ &i2c1 {
+ /* ... */
+
+ touchscreen@74 {
+ compatible = "azoteq,iqs550";
+ reg = <0x74>;
+ interrupt-parent = <&gpio>;
+ interrupts = <17 4>;
+ reset-gpios = <&gpio 27 1>;
+
+ touchscreen-size-x = <640>;
+ touchscreen-size-y = <480>;
+
+ touchscreen-max-pressure = <16000>;
+ };
+
+ /* ... */
+ };
diff --git a/Documentation/devicetree/bindings/interconnect/interconnect.txt b/Documentation/devicetree/bindings/interconnect/interconnect.txt
index 5a3c575b387a..6f5d23a605b7 100644
--- a/Documentation/devicetree/bindings/interconnect/interconnect.txt
+++ b/Documentation/devicetree/bindings/interconnect/interconnect.txt
@@ -51,6 +51,10 @@ interconnect-names : List of interconnect path name strings sorted in the same
interconnect-names to match interconnect paths with interconnect
specifier pairs.
+ Reserved interconnect names:
+ * dma-mem: Path from the device to the main memory of
+ the system
+
Example:
sdhci@7864000 {
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml b/Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml
index 758fbd7128e7..54838d4ea44c 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml
@@ -129,6 +129,7 @@ required:
patternProperties:
"^v2m@[0-9a-f]+$":
+ type: object
description: |
* GICv2m extension for MSI/MSI-x support (Optional)
diff --git a/Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml b/Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml
new file mode 100644
index 000000000000..bae10e261fa9
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2018 Linaro Ltd.
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/interrupt/intel-ixp4xx-interrupt.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Intel IXP4xx XScale Networking Processors Interrupt Controller
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+ This interrupt controller is found in the Intel IXP4xx processors.
+ Some processors have 32 interrupts, some have up to 64 interrupts.
+ The exact number of interrupts is determined from the compatible
+ string.
+
+ The distinct IXP4xx families with different interrupt controller
+ variations are IXP42x, IXP43x, IXP45x and IXP46x. Those four
+ families were the only ones to reach the developer and consumer
+ market.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - intel,ixp42x-interrupt
+ - intel,ixp43x-interrupt
+ - intel,ixp45x-interrupt
+ - intel,ixp46x-interrupt
+
+ reg:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - '#interrupt-cells'
+
+examples:
+ - |
+ intcon: interrupt-controller@c8003000 {
+ compatible = "intel,ixp43x-interrupt";
+ reg = <0xc8003000 0x100>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt b/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt
index c5d589108a94..0e312fea2a5d 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt
@@ -1,15 +1,18 @@
-+Mediatek MT65xx/MT67xx/MT81xx sysirq
+MediaTek sysirq
-Mediatek SOCs sysirq support controllable irq inverter for each GIC SPI
+MediaTek SOCs sysirq support controllable irq inverter for each GIC SPI
interrupt.
Required properties:
- compatible: should be
+ "mediatek,mt8516-sysirq", "mediatek,mt6577-sysirq": for MT8516
+ "mediatek,mt8183-sysirq", "mediatek,mt6577-sysirq": for MT8183
"mediatek,mt8173-sysirq", "mediatek,mt6577-sysirq": for MT8173
"mediatek,mt8135-sysirq", "mediatek,mt6577-sysirq": for MT8135
"mediatek,mt8127-sysirq", "mediatek,mt6577-sysirq": for MT8127
"mediatek,mt7622-sysirq", "mediatek,mt6577-sysirq": for MT7622
"mediatek,mt7623-sysirq", "mediatek,mt6577-sysirq": for MT7623
+ "mediatek,mt7629-sysirq", "mediatek,mt6577-sysirq": for MT7629
"mediatek,mt6795-sysirq", "mediatek,mt6577-sysirq": for MT6795
"mediatek,mt6797-sysirq", "mediatek,mt6577-sysirq": for MT6797
"mediatek,mt6765-sysirq", "mediatek,mt6577-sysirq": for MT6765
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.txt b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.txt
new file mode 100644
index 000000000000..7841cb099e13
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.txt
@@ -0,0 +1,66 @@
+Texas Instruments K3 Interrupt Aggregator
+=========================================
+
+The Interrupt Aggregator (INTA) provides a centralized machine
+which handles the termination of system events to that they can
+be coherently processed by the host(s) in the system. A maximum
+of 64 events can be mapped to a single interrupt.
+
+
+ Interrupt Aggregator
+ +-----------------------------------------+
+ | Intmap VINT |
+ | +--------------+ +------------+ |
+ m ------>| | vint | bit | | 0 |.....|63| vint0 |
+ . | +--------------+ +------------+ | +------+
+ . | . . | | HOST |
+Globalevents ------>| . . |------>| IRQ |
+ . | . . | | CTRL |
+ . | . . | +------+
+ n ------>| +--------------+ +------------+ |
+ | | vint | bit | | 0 |.....|63| vintx |
+ | +--------------+ +------------+ |
+ | |
+ +-----------------------------------------+
+
+Configuration of these Intmap registers that maps global events to vint is done
+by a system controller (like the Device Memory and Security Controller on K3
+AM654 SoC). Driver should request the system controller to get the range
+of global events and vints assigned to the requesting host. Management
+of these requested resources should be handled by driver and requests
+system controller to map specific global event to vint, bit pair.
+
+Communication between the host processor running an OS and the system
+controller happens through a protocol called TI System Control Interface
+(TISCI protocol). For more details refer:
+Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
+
+TISCI Interrupt Aggregator Node:
+-------------------------------
+- compatible: Must be "ti,sci-inta".
+- reg: Should contain registers location and length.
+- interrupt-controller: Identifies the node as an interrupt controller
+- msi-controller: Identifies the node as an MSI controller.
+- interrupt-parent: phandle of irq parent.
+- ti,sci: Phandle to TI-SCI compatible System controller node.
+- ti,sci-dev-id: TISCI device ID of the Interrupt Aggregator.
+- ti,sci-rm-range-vint: Array of TISCI subtype ids representing vints(inta
+ outputs) range within this INTA, assigned to the
+ requesting host context.
+- ti,sci-rm-range-global-event: Array of TISCI subtype ids representing the
+ global events range reaching this IA and are assigned
+ to the requesting host context.
+
+Example:
+--------
+main_udmass_inta: interrupt-controller@33d00000 {
+ compatible = "ti,sci-inta";
+ reg = <0x0 0x33d00000 0x0 0x100000>;
+ interrupt-controller;
+ msi-controller;
+ interrupt-parent = <&main_navss_intr>;
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <179>;
+ ti,sci-rm-range-vint = <0x0>;
+ ti,sci-rm-range-global-event = <0x1>;
+};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt
new file mode 100644
index 000000000000..1a8718f8855d
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt
@@ -0,0 +1,82 @@
+Texas Instruments K3 Interrupt Router
+=====================================
+
+The Interrupt Router (INTR) module provides a mechanism to mux M
+interrupt inputs to N interrupt outputs, where all M inputs are selectable
+to be driven per N output. An Interrupt Router can either handle edge triggered
+or level triggered interrupts and that is fixed in hardware.
+
+ Interrupt Router
+ +----------------------+
+ | Inputs Outputs |
+ +-------+ | +------+ +-----+ |
+ | GPIO |----------->| | irq0 | | 0 | | Host IRQ
+ +-------+ | +------+ +-----+ | controller
+ | . . | +-------+
+ +-------+ | . . |----->| IRQ |
+ | INTA |----------->| . . | +-------+
+ +-------+ | . +-----+ |
+ | +------+ | N | |
+ | | irqM | +-----+ |
+ | +------+ |
+ | |
+ +----------------------+
+
+There is one register per output (MUXCNTL_N) that controls the selection.
+Configuration of these MUXCNTL_N registers is done by a system controller
+(like the Device Memory and Security Controller on K3 AM654 SoC). System
+controller will keep track of the used and unused registers within the Router.
+Driver should request the system controller to get the range of GIC IRQs
+assigned to the requesting hosts. It is the drivers responsibility to keep
+track of Host IRQs.
+
+Communication between the host processor running an OS and the system
+controller happens through a protocol called TI System Control Interface
+(TISCI protocol). For more details refer:
+Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
+
+TISCI Interrupt Router Node:
+----------------------------
+Required Properties:
+- compatible: Must be "ti,sci-intr".
+- ti,intr-trigger-type: Should be one of the following:
+ 1: If intr supports edge triggered interrupts.
+ 4: If intr supports level triggered interrupts.
+- interrupt-controller: Identifies the node as an interrupt controller
+- #interrupt-cells: Specifies the number of cells needed to encode an
+ interrupt source. The value should be 2.
+ First cell should contain the TISCI device ID of source
+ Second cell should contain the interrupt source offset
+ within the device.
+- ti,sci: Phandle to TI-SCI compatible System controller node.
+- ti,sci-dst-id: TISCI device ID of the destination IRQ controller.
+- ti,sci-rm-range-girq: Array of TISCI subtype ids representing the host irqs
+ assigned to this interrupt router. Each subtype id
+ corresponds to a range of host irqs.
+
+For more details on TISCI IRQ resource management refer:
+http://downloads.ti.com/tisci/esd/latest/2_tisci_msgs/rm/rm_irq.html
+
+Example:
+--------
+The following example demonstrates both interrupt router node and the consumer
+node(main gpio) on the AM654 SoC:
+
+main_intr: interrupt-controller0 {
+ compatible = "ti,sci-intr";
+ ti,intr-trigger-type = <1>;
+ interrupt-controller;
+ interrupt-parent = <&gic500>;
+ #interrupt-cells = <2>;
+ ti,sci = <&dmsc>;
+ ti,sci-dst-id = <56>;
+ ti,sci-rm-range-girq = <0x1>;
+};
+
+main_gpio0: gpio@600000 {
+ ...
+ interrupt-parent = <&main_intr>;
+ interrupts = <57 256>, <57 257>, <57 258>,
+ <57 259>, <57 260>, <57 261>;
+ ...
+};
diff --git a/Documentation/devicetree/bindings/leds/backlight/lm3630a-backlight.yaml b/Documentation/devicetree/bindings/leds/backlight/lm3630a-backlight.yaml
new file mode 100644
index 000000000000..4d61fe0a98a4
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/backlight/lm3630a-backlight.yaml
@@ -0,0 +1,129 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/backlight/lm3630a-backlight.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI LM3630A High-Efficiency Dual-String White LED
+
+maintainers:
+ - Lee Jones <lee.jones@linaro.org>
+ - Daniel Thompson <daniel.thompson@linaro.org>
+ - Jingoo Han <jingoohan1@gmail.com>
+
+description: |
+ The LM3630A is a current-mode boost converter which supplies the power and
+ controls the current in up to two strings of 10 LEDs per string.
+ https://www.ti.com/product/LM3630A
+
+properties:
+ compatible:
+ const: ti,lm3630a
+
+ reg:
+ maxItems: 1
+
+ ti,linear-mapping-mode:
+ description: |
+ Enable linear mapping mode. If disabled, then it will use exponential
+ mapping mode in which the ramp up/down appears to have a more uniform
+ transition to the human eye.
+ type: boolean
+
+required:
+ - compatible
+ - reg
+
+patternProperties:
+ "^led@[01]$":
+ type: object
+ description: |
+ Properties for a string of connected LEDs.
+
+ properties:
+ reg:
+ description: |
+ The control bank that is used to program the two current sinks. The
+ LM3630A has two control banks (A and B) and are represented as 0 or 1
+ in this property. The two current sinks can be controlled
+ independently with both banks, or bank A can be configured to control
+ both sinks with the led-sources property.
+ maxItems: 1
+ minimum: 0
+ maximum: 1
+
+ label:
+ maxItems: 1
+
+ led-sources:
+ allOf:
+ - minItems: 1
+ maxItems: 2
+ items:
+ minimum: 0
+ maximum: 1
+
+ default-brightness:
+ description: Default brightness level on boot.
+ minimum: 0
+ maximum: 255
+
+ max-brightness:
+ description: Maximum brightness that is allowed during runtime.
+ minimum: 0
+ maximum: 255
+
+ required:
+ - reg
+
+ additionalProperties: false
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led-controller@38 {
+ compatible = "ti,lm3630a";
+ reg = <0x38>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ led-sources = <0 1>;
+ label = "lcd-backlight";
+ default-brightness = <200>;
+ max-brightness = <255>;
+ };
+ };
+ };
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led-controller@38 {
+ compatible = "ti,lm3630a";
+ reg = <0x38>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ default-brightness = <150>;
+ ti,linear-mapping-mode;
+ };
+
+ led@1 {
+ reg = <1>;
+ default-brightness = <225>;
+ ti,linear-mapping-mode;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/leds/leds-lm3532.txt b/Documentation/devicetree/bindings/leds/leds-lm3532.txt
new file mode 100644
index 000000000000..c087f85ddddc
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-lm3532.txt
@@ -0,0 +1,101 @@
+* Texas Instruments - lm3532 White LED driver with ambient light sensing
+capability.
+
+The LM3532 provides the 3 high-voltage, low-side current sinks. The device is
+programmable over an I2C-compatible interface and has independent
+current control for all three channels. The adaptive current regulation
+method allows for different LED currents in each current sink thus allowing
+for a wide variety of backlight and keypad applications.
+
+The main features of the LM3532 include dual ambient light sensor inputs
+each with 32 internal voltage setting resistors, 8-bit logarithmic and linear
+brightness control, dual external PWM brightness control inputs, and up to
+1000:1 dimming ratio with programmable fade in and fade out settings.
+
+Required properties:
+ - compatible : "ti,lm3532"
+ - reg : I2C slave address
+ - #address-cells : 1
+ - #size-cells : 0
+
+Optional properties:
+ - enable-gpios : gpio pin to enable (active high)/disable the device.
+ - ramp-up-us - The Run time ramp rates/step are from one current
+ set-point to another after the device has reached its
+ initial target set point from turn-on
+ - ramp-down-us - The Run time ramp rates/step are from one current
+ set-point to another after the device has reached its
+ initial target set point from turn-on
+ Range for ramp settings: 8us - 65536us
+
+Optional properties if ALS mode is used:
+ - ti,als-vmin - Minimum ALS voltage defined in Volts
+ - ti,als-vmax - Maximum ALS voltage defined in Volts
+ Per the data sheet the max ALS voltage is 2V and the min is 0V
+
+ - ti,als1-imp-sel - ALS1 impedance resistor selection in Ohms
+ - ti,als2-imp-sel - ALS2 impedance resistor selection in Ohms
+ Range for impedance select: 37000 Ohms - 1190 Ohms
+ Values above 37kohms will be set to the "High Impedance" setting
+
+ - ti,als-avrg-time-us - Determines the length of time the device needs to
+ average the two ALS inputs. This is only used if
+ the input mode is LM3532_ALS_INPUT_AVRG.
+ Range: 17920us - 2293760us
+ - ti,als-input-mode - Determines how the device uses the attached ALS
+ devices.
+ 0x00 - ALS1 and ALS2 input average
+ 0x01 - ALS1 Input
+ 0x02 - ALS2 Input
+ 0x03 - Max of ALS1 and ALS2
+
+Required child properties:
+ - reg : Indicates control bank the LED string is controlled by
+ - led-sources : see Documentation/devicetree/bindings/leds/common.txt
+ - ti,led-mode : Defines if the LED strings are manually controlled or
+ if the LED strings are controlled by the ALS.
+ 0x00 - LED strings are I2C controlled via full scale
+ brightness control register
+ 0x01 - LED strings are ALS controlled
+
+Optional LED child properties:
+ - label : see Documentation/devicetree/bindings/leds/common.txt
+ - linux,default-trigger :
+ see Documentation/devicetree/bindings/leds/common.txt
+
+Example:
+led-controller@38 {
+ compatible = "ti,lm3532";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x38>;
+
+ enable-gpios = <&gpio6 12 GPIO_ACTIVE_HIGH>;
+ ramp-up-us = <1024>;
+ ramp-down-us = <65536>;
+
+ ti,als-vmin = <0>;
+ ti,als-vmax = <2000>;
+ ti,als1-imp-sel = <4110>;
+ ti,als2-imp-sel = <2180>;
+ ti,als-avrg-time-us = <17920>;
+ ti,als-input-mode = <0x00>;
+
+ led@0 {
+ reg = <0>;
+ led-sources = <2>;
+ ti,led-mode = <1>;
+ label = ":backlight";
+ linux,default-trigger = "backlight";
+ };
+
+ led@1 {
+ reg = <1>;
+ led-sources = <1>;
+ ti,led-mode = <0>;
+ label = ":kbd_backlight";
+ };
+};
+
+For more product information please see the links below:
+http://www.ti.com/product/LM3532
diff --git a/Documentation/devicetree/bindings/leds/leds-max77650.txt b/Documentation/devicetree/bindings/leds/leds-max77650.txt
new file mode 100644
index 000000000000..3a67115cc1da
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-max77650.txt
@@ -0,0 +1,57 @@
+LED driver for MAX77650 PMIC from Maxim Integrated.
+
+This module is part of the MAX77650 MFD device. For more details
+see Documentation/devicetree/bindings/mfd/max77650.txt.
+
+The LED controller is represented as a sub-node of the PMIC node on
+the device tree.
+
+This device has three current sinks.
+
+Required properties:
+--------------------
+- compatible: Must be "maxim,max77650-led"
+- #address-cells: Must be <1>.
+- #size-cells: Must be <0>.
+
+Each LED is represented as a sub-node of the LED-controller node. Up to
+three sub-nodes can be defined.
+
+Required properties of the sub-node:
+------------------------------------
+
+- reg: Must be <0>, <1> or <2>.
+
+Optional properties of the sub-node:
+------------------------------------
+
+- label: See Documentation/devicetree/bindings/leds/common.txt
+- linux,default-trigger: See Documentation/devicetree/bindings/leds/common.txt
+
+For more details, please refer to the generic GPIO DT binding document
+<devicetree/bindings/gpio/gpio.txt>.
+
+Example:
+--------
+
+ leds {
+ compatible = "maxim,max77650-led";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ label = "blue:usr0";
+ };
+
+ led@1 {
+ reg = <1>;
+ label = "red:usr1";
+ linux,default-trigger = "heartbeat";
+ };
+
+ led@2 {
+ reg = <2>;
+ label = "green:usr2";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mailbox/marvell,armada-3700-rwtm-mailbox.txt b/Documentation/devicetree/bindings/mailbox/marvell,armada-3700-rwtm-mailbox.txt
new file mode 100644
index 000000000000..282ab81a4ea6
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/marvell,armada-3700-rwtm-mailbox.txt
@@ -0,0 +1,16 @@
+* rWTM BIU Mailbox driver for Armada 37xx
+
+Required properties:
+- compatible: must be "marvell,armada-3700-rwtm-mailbox"
+- reg: physical base address of the mailbox and length of memory mapped
+ region
+- interrupts: the IRQ line for the mailbox
+- #mbox-cells: must be 1
+
+Example:
+ rwtm: mailbox@b0000 {
+ compatible = "marvell,armada-3700-rwtm-mailbox";
+ reg = <0xb0000 0x100>;
+ interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/media/aspeed-video.txt b/Documentation/devicetree/bindings/media/aspeed-video.txt
index 78b464ae2672..ce2894506e1f 100644
--- a/Documentation/devicetree/bindings/media/aspeed-video.txt
+++ b/Documentation/devicetree/bindings/media/aspeed-video.txt
@@ -14,6 +14,11 @@ Required properties:
the VE
- interrupts: the interrupt associated with the VE on this platform
+Optional properties:
+ - memory-region:
+ phandle to a memory region to allocate from, as defined in
+ Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
+
Example:
video-engine@1e700000 {
@@ -23,4 +28,5 @@ video-engine@1e700000 {
clock-names = "vclk", "eclk";
resets = <&syscon ASPEED_RESET_VIDEO>;
interrupts = <7>;
+ memory-region = <&video_engine_memory>;
};
diff --git a/Documentation/devicetree/bindings/media/cedrus.txt b/Documentation/devicetree/bindings/media/cedrus.txt
index bce0705df953..20c82fb0c343 100644
--- a/Documentation/devicetree/bindings/media/cedrus.txt
+++ b/Documentation/devicetree/bindings/media/cedrus.txt
@@ -13,6 +13,7 @@ Required properties:
- "allwinner,sun8i-h3-video-engine"
- "allwinner,sun50i-a64-video-engine"
- "allwinner,sun50i-h5-video-engine"
+ - "allwinner,sun50i-h6-video-engine"
- reg : register base and length of VE;
- clocks : list of clock specifiers, corresponding to entries in
the clock-names property;
diff --git a/Documentation/devicetree/bindings/media/i2c/st,st-mipid02.txt b/Documentation/devicetree/bindings/media/i2c/st,st-mipid02.txt
new file mode 100644
index 000000000000..7976e6c40a80
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/st,st-mipid02.txt
@@ -0,0 +1,82 @@
+STMicroelectronics MIPID02 CSI-2 to PARALLEL bridge
+
+MIPID02 has two CSI-2 input ports, only one of those ports can be active at a
+time. Active port input stream will be de-serialized and its content outputted
+through PARALLEL output port.
+CSI-2 first input port is a dual lane 800Mbps per lane whereas CSI-2 second
+input port is a single lane 800Mbps. Both ports support clock and data lane
+polarity swap. First port also supports data lane swap.
+PARALLEL output port has a maximum width of 12 bits.
+Supported formats are RAW6, RAW7, RAW8, RAW10, RAW12, RGB565, RGB888, RGB444,
+YUV420 8-bit, YUV422 8-bit and YUV420 10-bit.
+
+Required Properties:
+- compatible: shall be "st,st-mipid02"
+- clocks: reference to the xclk input clock.
+- clock-names: shall be "xclk".
+- VDDE-supply: sensor digital IO supply. Must be 1.8 volts.
+- VDDIN-supply: sensor internal regulator supply. Must be 1.8 volts.
+
+Optional Properties:
+- reset-gpios: reference to the GPIO connected to the xsdn pin, if any.
+ This is an active low signal to the mipid02.
+
+Required subnodes:
+ - ports: A ports node with one port child node per device input and output
+ port, in accordance with the video interface bindings defined in
+ Documentation/devicetree/bindings/media/video-interfaces.txt. The
+ port nodes are numbered as follows:
+
+ Port Description
+ -----------------------------
+ 0 CSI-2 first input port
+ 1 CSI-2 second input port
+ 2 PARALLEL output
+
+Endpoint node required property for CSI-2 connection is:
+- data-lanes: shall be <1> for Port 1. for Port 0 dual-lane operation shall be
+<1 2> or <2 1>. For Port 0 single-lane operation shall be <1> or <2>.
+Endpoint node optional property for CSI-2 connection is:
+- lane-polarities: any lane can be inverted or not.
+
+Endpoint node required property for PARALLEL connection is:
+- bus-width: shall be set to <6>, <7>, <8>, <10> or <12>.
+Endpoint node optional properties for PARALLEL connection are:
+- hsync-active: active state of the HSYNC signal, 0/1 for LOW/HIGH respectively.
+LOW being the default.
+- vsync-active: active state of the VSYNC signal, 0/1 for LOW/HIGH respectively.
+LOW being the default.
+
+Example:
+
+mipid02: csi2rx@14 {
+ compatible = "st,st-mipid02";
+ reg = <0x14>;
+ status = "okay";
+ clocks = <&clk_ext_camera_12>;
+ clock-names = "xclk";
+ VDDE-supply = <&vdd>;
+ VDDIN-supply = <&vdd>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+
+ ep0: endpoint {
+ data-lanes = <1 2>;
+ remote-endpoint = <&mipi_csi2_in>;
+ };
+ };
+ port@2 {
+ reg = <2>;
+
+ ep2: endpoint {
+ bus-width = <8>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ remote-endpoint = <&parallel_out>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/media/meson-ao-cec.txt b/Documentation/devicetree/bindings/media/meson-ao-cec.txt
index 8671bdb08080..c67fc41d4aa2 100644
--- a/Documentation/devicetree/bindings/media/meson-ao-cec.txt
+++ b/Documentation/devicetree/bindings/media/meson-ao-cec.txt
@@ -4,16 +4,23 @@ The Amlogic Meson AO-CEC module is present is Amlogic SoCs and its purpose is
to handle communication between HDMI connected devices over the CEC bus.
Required properties:
- - compatible : value should be following
+ - compatible : value should be following depending on the SoC :
+ For GXBB, GXL, GXM and G12A (AO_CEC_A module) :
"amlogic,meson-gx-ao-cec"
+ For G12A (AO_CEC_B module) :
+ "amlogic,meson-g12a-ao-cec"
- reg : Physical base address of the IP registers and length of memory
mapped region.
- interrupts : AO-CEC interrupt number to the CPU.
- clocks : from common clock binding: handle to AO-CEC clock.
- - clock-names : from common clock binding: must contain "core",
- corresponding to entry in the clocks property.
+ - clock-names : from common clock binding, must contain :
+ For GXBB, GXL, GXM and G12A (AO_CEC_A module) :
+ - "core"
+ For G12A (AO_CEC_B module) :
+ - "oscin"
+ corresponding to entry in the clocks property.
- hdmi-phandle: phandle to the HDMI controller
Example:
diff --git a/Documentation/devicetree/bindings/media/rcar_imr.txt b/Documentation/devicetree/bindings/media/rcar_imr.txt
new file mode 100644
index 000000000000..b0614153ed36
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/rcar_imr.txt
@@ -0,0 +1,31 @@
+Renesas R-Car Image Renderer (Distortion Correction Engine)
+-----------------------------------------------------------
+
+The image renderer, or the distortion correction engine, is a drawing processor
+with a simple instruction system capable of referencing video capture data or
+data in an external memory as 2D texture data and performing texture mapping
+and drawing with respect to any shape that is split into triangular objects.
+
+Required properties:
+
+- compatible: "renesas,<soctype>-imr-lx4", "renesas,imr-lx4" as a fallback for
+ the image renderer light extended 4 (IMR-LX4) found in the R-Car gen3 SoCs,
+ where the examples with <soctype> are:
+ - "renesas,r8a7795-imr-lx4" for R-Car H3,
+ - "renesas,r8a7796-imr-lx4" for R-Car M3-W.
+- reg: offset and length of the register block;
+- interrupts: single interrupt specifier;
+- clocks: single clock phandle/specifier pair;
+- power-domains: power domain phandle/specifier pair;
+- resets: reset phandle/specifier pair.
+
+Example:
+
+ imr-lx4@fe860000 {
+ compatible = "renesas,r8a7795-imr-lx4", "renesas,imr-lx4";
+ reg = <0 0xfe860000 0 0x2000>;
+ interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 823>;
+ power-domains = <&sysc R8A7795_PD_A3VC>;
+ resets = <&cpg 823>;
+ };
diff --git a/Documentation/devicetree/bindings/media/rcar_vin.txt b/Documentation/devicetree/bindings/media/rcar_vin.txt
index 224a4615b418..aa217b096279 100644
--- a/Documentation/devicetree/bindings/media/rcar_vin.txt
+++ b/Documentation/devicetree/bindings/media/rcar_vin.txt
@@ -13,6 +13,7 @@ on Gen3 and RZ/G2 platforms to a CSI-2 receiver.
- "renesas,vin-r8a7743" for the R8A7743 device
- "renesas,vin-r8a7744" for the R8A7744 device
- "renesas,vin-r8a7745" for the R8A7745 device
+ - "renesas,vin-r8a774a1" for the R8A774A1 device
- "renesas,vin-r8a774c0" for the R8A774C0 device
- "renesas,vin-r8a7778" for the R8A7778 device
- "renesas,vin-r8a7779" for the R8A7779 device
diff --git a/Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt b/Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt
index d63275e17afd..331409259752 100644
--- a/Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt
+++ b/Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt
@@ -8,6 +8,7 @@ R-Car VIN module, which provides the video capture capabilities.
Mandatory properties
--------------------
- compatible: Must be one or more of the following
+ - "renesas,r8a774a1-csi2" for the R8A774A1 device.
- "renesas,r8a774c0-csi2" for the R8A774C0 device.
- "renesas,r8a7795-csi2" for the R8A7795 device.
- "renesas,r8a7796-csi2" for the R8A7796 device.
@@ -18,7 +19,8 @@ Mandatory properties
- reg: the register base and size for the device registers
- interrupts: the interrupt for the device
- - clocks: reference to the parent clock
+ - clocks: A phandle + clock specifier for the module clock
+ - resets: A phandle + reset specifier for the module reset
The device node shall contain two 'port' child nodes according to the
bindings defined in Documentation/devicetree/bindings/media/
diff --git a/Documentation/devicetree/bindings/memory-controllers/atmel,ebi.txt b/Documentation/devicetree/bindings/memory-controllers/atmel,ebi.txt
index 9bb5f57e2066..94bf7896a688 100644
--- a/Documentation/devicetree/bindings/memory-controllers/atmel,ebi.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/atmel,ebi.txt
@@ -15,6 +15,7 @@ Required properties:
"atmel,at91sam9g45-ebi"
"atmel,at91sam9x5-ebi"
"atmel,sama5d3-ebi"
+ "microchip,sam9x60-ebi"
- reg: Contains offset/length value for EBI memory mapping.
This property might contain several entries if the EBI
diff --git a/Documentation/devicetree/bindings/memory-controllers/fsl/mmdc.txt b/Documentation/devicetree/bindings/memory-controllers/fsl/mmdc.txt
new file mode 100644
index 000000000000..bcc36c5b543c
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/fsl/mmdc.txt
@@ -0,0 +1,35 @@
+Freescale Multi Mode DDR controller (MMDC)
+
+Required properties :
+- compatible : should be one of following:
+ for i.MX6Q/i.MX6DL:
+ - "fsl,imx6q-mmdc";
+ for i.MX6QP:
+ - "fsl,imx6qp-mmdc", "fsl,imx6q-mmdc";
+ for i.MX6SL:
+ - "fsl,imx6sl-mmdc", "fsl,imx6q-mmdc";
+ for i.MX6SLL:
+ - "fsl,imx6sll-mmdc", "fsl,imx6q-mmdc";
+ for i.MX6SX:
+ - "fsl,imx6sx-mmdc", "fsl,imx6q-mmdc";
+ for i.MX6UL/i.MX6ULL/i.MX6ULZ:
+ - "fsl,imx6ul-mmdc", "fsl,imx6q-mmdc";
+ for i.MX7ULP:
+ - "fsl,imx7ulp-mmdc", "fsl,imx6q-mmdc";
+- reg : address and size of MMDC DDR controller registers
+
+Optional properties :
+- clocks : the clock provided by the SoC to access the MMDC registers
+
+Example :
+ mmdc0: memory-controller@21b0000 { /* MMDC0 */
+ compatible = "fsl,imx6q-mmdc";
+ reg = <0x021b0000 0x4000>;
+ clocks = <&clks IMX6QDL_CLK_MMDC_P0_IPG>;
+ };
+
+ mmdc1: memory-controller@21b4000 { /* MMDC1 */
+ compatible = "fsl,imx6q-mmdc";
+ reg = <0x021b4000 0x4000>;
+ status = "disabled";
+ };
diff --git a/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt b/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt
index 3f643ef121ff..5f8880cc757e 100644
--- a/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt
+++ b/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt
@@ -7,6 +7,7 @@ Required properties:
"atmel,sama5d2-hlcdc"
"atmel,sama5d3-hlcdc"
"atmel,sama5d4-hlcdc"
+ "microchip,sam9x60-hlcdc"
- reg: base address and size of the HLCDC device registers.
- clock-names: the name of the 3 clocks requested by the HLCDC device.
Should contain "periph_clk", "sys_clk" and "slow_clk".
diff --git a/Documentation/devicetree/bindings/mfd/axp20x.txt b/Documentation/devicetree/bindings/mfd/axp20x.txt
index 2af4ff95d6bc..4991a6415796 100644
--- a/Documentation/devicetree/bindings/mfd/axp20x.txt
+++ b/Documentation/devicetree/bindings/mfd/axp20x.txt
@@ -25,6 +25,7 @@ Required properties:
* "x-powers,axp223"
* "x-powers,axp803"
* "x-powers,axp806"
+ * "x-powers,axp805", "x-powers,axp806"
* "x-powers,axp809"
* "x-powers,axp813"
- reg: The I2C slave address or RSB hardware address for the AXP chip
diff --git a/Documentation/devicetree/bindings/mfd/cirrus,lochnagar.txt b/Documentation/devicetree/bindings/mfd/cirrus,lochnagar.txt
index 004b0158cf4d..3bf92ad37fa1 100644
--- a/Documentation/devicetree/bindings/mfd/cirrus,lochnagar.txt
+++ b/Documentation/devicetree/bindings/mfd/cirrus,lochnagar.txt
@@ -19,6 +19,8 @@ And these documents for the required sub-node binding details:
[4] Clock: ../clock/cirrus,lochnagar.txt
[5] Pinctrl: ../pinctrl/cirrus,lochnagar.txt
[6] Regulator: ../regulator/cirrus,lochnagar.txt
+ [7] Sound: ../sound/cirrus,lochnagar.txt
+ [8] Hardware Monitor: ../hwmon/cirrus,lochnagar.txt
Required properties:
@@ -41,6 +43,11 @@ Optional sub-nodes:
- Bindings for the regulator components, see [6]. Only available on
Lochnagar 2.
+ - lochnagar-sc : Binding for the sound card components, see [7].
+ Only available on Lochnagar 2.
+ - lochnagar-hwmon : Binding for the hardware monitor components, see [8].
+ Only available on Lochnagar 2.
+
Optional properties:
- present-gpios : Host present line, indicating the presence of a
@@ -65,4 +72,14 @@ lochnagar: lochnagar@22 {
compatible = "cirrus,lochnagar-pinctrl";
...
};
+
+ lochnagar-sc {
+ compatible = "cirrus,lochnagar2-soundcard";
+ ...
+ };
+
+ lochnagar-hwmon {
+ compatible = "cirrus,lochnagar2-hwmon";
+ ...
+ };
};
diff --git a/Documentation/devicetree/bindings/mfd/max77620.txt b/Documentation/devicetree/bindings/mfd/max77620.txt
index 9c16d51cc15b..5a642a51d58e 100644
--- a/Documentation/devicetree/bindings/mfd/max77620.txt
+++ b/Documentation/devicetree/bindings/mfd/max77620.txt
@@ -4,7 +4,8 @@ Required properties:
-------------------
- compatible: Must be one of
"maxim,max77620"
- "maxim,max20024".
+ "maxim,max20024"
+ "maxim,max77663"
- reg: I2C device address.
Optional properties:
@@ -17,6 +18,11 @@ Optional properties:
IRQ numbers for different interrupt source of MAX77620
are defined at dt-bindings/mfd/max77620.h.
+- system-power-controller: Indicates that this PMIC is controlling the
+ system power, see [1] for more details.
+
+[1] Documentation/devicetree/bindings/power/power-controller.txt
+
Optional subnodes and their properties:
=======================================
@@ -105,6 +111,7 @@ Optional properties:
Here supported time periods by device in microseconds are as follows:
MAX77620 supports 40, 80, 160, 320, 640, 1280, 2560 and 5120 microseconds.
MAX20024 supports 20, 40, 80, 160, 320, 640, 1280 and 2540 microseconds.
+MAX77663 supports 20, 40, 80, 160, 320, 640, 1280 and 2540 microseconds.
-maxim,power-ok-control: configure map power ok bit
1: Enables POK(Power OK) to control nRST_IO and GPIO1
diff --git a/Documentation/devicetree/bindings/mfd/max77650.txt b/Documentation/devicetree/bindings/mfd/max77650.txt
new file mode 100644
index 000000000000..b529d8d19335
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/max77650.txt
@@ -0,0 +1,46 @@
+MAX77650 ultra low-power PMIC from Maxim Integrated.
+
+Required properties:
+-------------------
+- compatible: Must be "maxim,max77650"
+- reg: I2C device address.
+- interrupts: The interrupt on the parent the controller is
+ connected to.
+- interrupt-controller: Marks the device node as an interrupt controller.
+- #interrupt-cells: Must be <2>.
+
+- gpio-controller: Marks the device node as a gpio controller.
+- #gpio-cells: Must be <2>. The first cell is the pin number and
+ the second cell is used to specify the gpio active
+ state.
+
+Optional properties:
+--------------------
+gpio-line-names: Single string containing the name of the GPIO line.
+
+The GPIO-controller module is represented as part of the top-level PMIC
+node. The device exposes a single GPIO line.
+
+For device-tree bindings of other sub-modules (regulator, power supply,
+LEDs and onkey) refer to the binding documents under the respective
+sub-system directories.
+
+For more details on GPIO bindings, please refer to the generic GPIO DT
+binding document <devicetree/bindings/gpio/gpio.txt>.
+
+Example:
+--------
+
+ pmic@48 {
+ compatible = "maxim,max77650";
+ reg = <0x48>;
+
+ interrupt-controller;
+ interrupt-parent = <&gpio2>;
+ #interrupt-cells = <2>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names = "max77650-charger";
+ };
diff --git a/Documentation/devicetree/bindings/mfd/stm32-lptimer.txt b/Documentation/devicetree/bindings/mfd/stm32-lptimer.txt
index 2a9ff29db9c9..fb54e4dad5b3 100644
--- a/Documentation/devicetree/bindings/mfd/stm32-lptimer.txt
+++ b/Documentation/devicetree/bindings/mfd/stm32-lptimer.txt
@@ -16,7 +16,7 @@ Required properties:
Optional subnodes:
- pwm: See ../pwm/pwm-stm32-lp.txt
-- counter: See ../iio/timer/stm32-lptimer-cnt.txt
+- counter: See ../counter/stm32-lptimer-cnt.txt
- trigger: See ../iio/timer/stm32-lptimer-trigger.txt
Example:
diff --git a/Documentation/devicetree/bindings/mfd/stm32-timers.txt b/Documentation/devicetree/bindings/mfd/stm32-timers.txt
index 0e900b52e895..15c3b87f51d9 100644
--- a/Documentation/devicetree/bindings/mfd/stm32-timers.txt
+++ b/Documentation/devicetree/bindings/mfd/stm32-timers.txt
@@ -28,6 +28,7 @@ Optional parameters:
Optional subnodes:
- pwm: See ../pwm/pwm-stm32.txt
- timer: See ../iio/timer/stm32-timer-trigger.txt
+- counter: See ../counter/stm32-timer-cnt.txt
Example:
timers@40010000 {
@@ -48,6 +49,12 @@ Example:
compatible = "st,stm32-timer-trigger";
reg = <0>;
};
+
+ counter {
+ compatible = "st,stm32-timer-counter";
+ pinctrl-names = "default";
+ pinctrl-0 = <&tim1_in_pins>;
+ };
};
Example with all dmas:
diff --git a/Documentation/devicetree/bindings/mfd/stmfx.txt b/Documentation/devicetree/bindings/mfd/stmfx.txt
new file mode 100644
index 000000000000..f0c2f7fcf5c7
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/stmfx.txt
@@ -0,0 +1,28 @@
+STMicroelectonics Multi-Function eXpander (STMFX) Core bindings
+
+ST Multi-Function eXpander (STMFX) is a slave controller using I2C for
+communication with the main MCU. Its main features are GPIO expansion, main
+MCU IDD measurement (IDD is the amount of current that flows through VDD) and
+resistive touchscreen controller.
+
+Required properties:
+- compatible: should be "st,stmfx-0300".
+- reg: I2C slave address of the device.
+- interrupts: interrupt specifier triggered by MFX_IRQ_OUT signal.
+ Please refer to ../interrupt-controller/interrupt.txt
+
+Optional properties:
+- drive-open-drain: configure MFX_IRQ_OUT as open drain.
+- vdd-supply: phandle of the regulator supplying STMFX.
+
+Example:
+
+ stmfx: stmfx@42 {
+ compatible = "st,stmfx-0300";
+ reg = <0x42>;
+ interrupts = <8 IRQ_TYPE_EDGE_RISING>;
+ interrupt-parent = <&gpioi>;
+ vdd-supply = <&v3v3>;
+ };
+
+Please refer to ../pinctrl/pinctrl-stmfx.txt for STMFX GPIO expander function bindings.
diff --git a/Documentation/devicetree/bindings/mfd/ti-lmu.txt b/Documentation/devicetree/bindings/mfd/ti-lmu.txt
index c885cf89b8ce..86ca786d54fc 100644
--- a/Documentation/devicetree/bindings/mfd/ti-lmu.txt
+++ b/Documentation/devicetree/bindings/mfd/ti-lmu.txt
@@ -4,7 +4,6 @@ TI LMU driver supports lighting devices below.
Name Child nodes
------ ---------------------------------
- LM3532 Backlight
LM3631 Backlight and regulator
LM3632 Backlight and regulator
LM3633 Backlight, LED and fault monitor
@@ -13,7 +12,6 @@ TI LMU driver supports lighting devices below.
Required properties:
- compatible: Should be one of:
- "ti,lm3532"
"ti,lm3631"
"ti,lm3632"
"ti,lm3633"
@@ -23,7 +21,6 @@ Required properties:
0x11 for LM3632
0x29 for LM3631
0x36 for LM3633, LM3697
- 0x38 for LM3532
0x63 for LM3695
Optional property:
@@ -47,23 +44,6 @@ Optional nodes:
[2] ../leds/leds-lm3633.txt
[3] ../regulator/lm363x-regulator.txt
-lm3532@38 {
- compatible = "ti,lm3532";
- reg = <0x38>;
-
- enable-gpios = <&pioC 2 GPIO_ACTIVE_HIGH>;
-
- backlight {
- compatible = "ti,lm3532-backlight";
-
- lcd {
- led-sources = <0 1 2>;
- ramp-up-msec = <30>;
- ramp-down-msec = <0>;
- };
- };
-};
-
lm3631@29 {
compatible = "ti,lm3631";
reg = <0x29>;
@@ -124,8 +104,8 @@ lm3632@11 {
regulators {
compatible = "ti,lm363x-regulator";
- ti,lcm-en1-gpio = <&pioC 0 GPIO_ACTIVE_HIGH>; /* PC0 */
- ti,lcm-en2-gpio = <&pioC 1 GPIO_ACTIVE_HIGH>; /* PC1 */
+ enable-gpios = <&pioC 0 GPIO_ACTIVE_HIGH>,
+ <&pioC 1 GPIO_ACTIVE_HIGH>;
vboost {
regulator-name = "lcd_boost";
diff --git a/Documentation/devicetree/bindings/misc/aspeed-p2a-ctrl.txt b/Documentation/devicetree/bindings/misc/aspeed-p2a-ctrl.txt
new file mode 100644
index 000000000000..854bd67ffec6
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/aspeed-p2a-ctrl.txt
@@ -0,0 +1,47 @@
+======================================================================
+Device tree bindings for Aspeed AST2400/AST2500 PCI-to-AHB Bridge Control Driver
+======================================================================
+
+The bridge is available on platforms with the VGA enabled on the Aspeed device.
+In this case, the host has access to a 64KiB window into all of the BMC's
+memory. The BMC can disable this bridge. If the bridge is enabled, the host
+has read access to all the regions of memory, however the host only has read
+and write access depending on a register controlled by the BMC.
+
+Required properties:
+===================
+
+ - compatible: must be one of:
+ - "aspeed,ast2400-p2a-ctrl"
+ - "aspeed,ast2500-p2a-ctrl"
+
+Optional properties:
+===================
+
+- memory-region: A phandle to a reserved_memory region to be used for the PCI
+ to AHB mapping
+
+The p2a-control node should be the child of a syscon node with the required
+property:
+
+- compatible : Should be one of the following:
+ "aspeed,ast2400-scu", "syscon", "simple-mfd"
+ "aspeed,g4-scu", "syscon", "simple-mfd"
+ "aspeed,ast2500-scu", "syscon", "simple-mfd"
+ "aspeed,g5-scu", "syscon", "simple-mfd"
+
+Example
+===================
+
+g4 Example
+----------
+
+syscon: scu@1e6e2000 {
+ compatible = "aspeed,ast2400-scu", "syscon", "simple-mfd";
+ reg = <0x1e6e2000 0x1a8>;
+
+ p2a: p2a-control {
+ compatible = "aspeed,ast2400-p2a-ctrl";
+ memory-region = <&reserved_memory>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/misc/intel,ixp4xx-queue-manager.yaml b/Documentation/devicetree/bindings/misc/intel,ixp4xx-queue-manager.yaml
new file mode 100644
index 000000000000..d2313b1d9405
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/intel,ixp4xx-queue-manager.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 Linaro Ltd.
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/misc/intel-ixp4xx-ahb-queue-manager.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Intel IXP4xx AHB Queue Manager
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+ The IXP4xx AHB Queue Manager maintains queues as circular buffers in
+ an 8KB embedded SRAM along with hardware pointers. It is used by both
+ the XScale processor and the NPEs (Network Processing Units) in the
+ IXP4xx for accelerating queues, especially for networking. Clients pick
+ queues from the queue manager with foo-queue = <&qmgr N> where the
+ &qmgr is a phandle to the queue manager and N is the queue resource
+ number. The queue resources available and their specific purpose
+ on a certain IXP4xx system will vary.
+
+properties:
+ compatible:
+ items:
+ - const: intel,ixp4xx-ahb-queue-manager
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: Interrupt for queues 0-31
+ - description: Interrupt for queues 32-63
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ qmgr: queue-manager@60000000 {
+ compatible = "intel,ixp4xx-ahb-queue-manager";
+ reg = <0x60000000 0x4000>;
+ interrupts = <3 IRQ_TYPE_LEVEL_HIGH>, <4 IRQ_TYPE_LEVEL_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
index 99c5cf8507e8..edb8cadb9541 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
@@ -17,6 +17,7 @@ Required properties:
"fsl,t4240-esdhc"
Possible compatibles for ARM:
"fsl,ls1012a-esdhc"
+ "fsl,ls1028a-esdhc"
"fsl,ls1088a-esdhc"
"fsl,ls1043a-esdhc"
"fsl,ls1046a-esdhc"
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
index 540c65ed9cba..f707b8bee304 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
@@ -17,6 +17,7 @@ Required properties:
"fsl,imx6sx-usdhc"
"fsl,imx6ull-usdhc"
"fsl,imx7d-usdhc"
+ "fsl,imx7ulp-usdhc"
"fsl,imx8qxp-usdhc"
Optional properties:
diff --git a/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt
index 07242d141773..36c4bea675d5 100644
--- a/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt
+++ b/Documentation/devicetree/bindings/mmc/k3-dw-mshc.txt
@@ -13,6 +13,8 @@ Required Properties:
* compatible: should be one of the following.
- "hisilicon,hi3660-dw-mshc": for controllers with hi3660 specific extensions.
+ - "hisilicon,hi3670-dw-mshc", "hisilicon,hi3660-dw-mshc": for controllers
+ with hi3670 specific extensions.
- "hisilicon,hi4511-dw-mshc": for controllers with hi4511 specific extensions.
- "hisilicon,hi6220-dw-mshc": for controllers with hi6220 specific extensions.
diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt
index cdbcfd3a4ff2..c269dbe384fe 100644
--- a/Documentation/devicetree/bindings/mmc/mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc.txt
@@ -64,6 +64,8 @@ Optional properties:
whether pwrseq-simple is used. Default to 10ms if no available.
- supports-cqe : The presence of this property indicates that the corresponding
MMC host controller supports HW command queue feature.
+- disable-cqe-dcmd: This property indicates that the MMC controller's command
+ queue engine (CQE) does not support direct commands (DCMDs).
*NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line
polarity properties, we have to fix the meaning of the "normal" and "inverted"
diff --git a/Documentation/devicetree/bindings/mmc/mtk-sd.txt b/Documentation/devicetree/bindings/mmc/mtk-sd.txt
index f5bcda3980cc..8a532f4453f2 100644
--- a/Documentation/devicetree/bindings/mmc/mtk-sd.txt
+++ b/Documentation/devicetree/bindings/mmc/mtk-sd.txt
@@ -11,10 +11,12 @@ Required properties:
"mediatek,mt8135-mmc": for mmc host ip compatible with mt8135
"mediatek,mt8173-mmc": for mmc host ip compatible with mt8173
"mediatek,mt8183-mmc": for mmc host ip compatible with mt8183
+ "mediatek,mt8516-mmc": for mmc host ip compatible with mt8516
"mediatek,mt2701-mmc": for mmc host ip compatible with mt2701
"mediatek,mt2712-mmc": for mmc host ip compatible with mt2712
"mediatek,mt7622-mmc": for MT7622 SoC
"mediatek,mt7623-mmc", "mediatek,mt2701-mmc": for MT7623 SoC
+ "mediatek,mt7620-mmc", for MT7621 SoC (and others)
- reg: physical base address of the controller and length
- interrupts: Should contain MSDC interrupt number
diff --git a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt
index 2cecdc71d94c..2cf3affa1be7 100644
--- a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt
@@ -14,6 +14,7 @@ Required properties:
- "nvidia,tegra124-sdhci": for Tegra124 and Tegra132
- "nvidia,tegra210-sdhci": for Tegra210
- "nvidia,tegra186-sdhci": for Tegra186
+ - "nvidia,tegra194-sdhci": for Tegra194
- clocks : Must contain one entry, for the module clock.
See ../clocks/clock-bindings.txt for details.
- resets : Must contain an entry for each entry in reset-names.
diff --git a/Documentation/devicetree/bindings/mtd/allwinner,sun4i-a10-nand.yaml b/Documentation/devicetree/bindings/mtd/allwinner,sun4i-a10-nand.yaml
new file mode 100644
index 000000000000..fbd4da3684fc
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/allwinner,sun4i-a10-nand.yaml
@@ -0,0 +1,97 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/allwinner,sun4i-a10-nand.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner A10 NAND Controller Device Tree Bindings
+
+allOf:
+ - $ref: "nand-controller.yaml"
+
+maintainers:
+ - Chen-Yu Tsai <wens@csie.org>
+ - Maxime Ripard <maxime.ripard@bootlin.com>
+
+properties:
+ "#address-cells": true
+ "#size-cells": true
+
+ compatible:
+ enum:
+ - allwinner,sun4i-a10-nand
+ - allwinner,sun8i-a23-nand-controller
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Bus Clock
+ - description: Module Clock
+
+ clock-names:
+ items:
+ - const: ahb
+ - const: mod
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ const: ahb
+
+ dmas:
+ maxItems: 1
+
+ dma-names:
+ const: rxtx
+
+ pinctrl-names: true
+
+patternProperties:
+ "^pinctrl-[0-9]+$": true
+
+ "^nand@[a-f0-9]+$":
+ properties:
+ reg:
+ maxItems: 1
+ minimum: 0
+ maximum: 7
+
+ nand-ecc-mode: true
+
+ nand-ecc-algo:
+ const: bch
+
+ nand-ecc-step-size:
+ enum: [ 512, 1024 ]
+
+ nand-ecc-strength:
+ maximum: 80
+
+ allwinner,rb:
+ description:
+ Contains the native Ready/Busy IDs.
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 1
+ maxItems: 2
+ items:
+ minimum: 0
+ maximum: 1
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+...
diff --git a/Documentation/devicetree/bindings/mtd/atmel-nand.txt b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
index 9bb66e476672..68b51dc58816 100644
--- a/Documentation/devicetree/bindings/mtd/atmel-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
@@ -14,6 +14,7 @@ Required properties:
"atmel,at91sam9261-nand-controller"
"atmel,at91sam9g45-nand-controller"
"atmel,sama5d3-nand-controller"
+ "microchip,sam9x60-nand-controller"
- ranges: empty ranges property to forward EBI ranges definitions.
- #address-cells: should be set to 2.
- #size-cells: should be set to 1.
diff --git a/Documentation/devicetree/bindings/mtd/denali-nand.txt b/Documentation/devicetree/bindings/mtd/denali-nand.txt
index f33da8782741..b14b6751c2f3 100644
--- a/Documentation/devicetree/bindings/mtd/denali-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/denali-nand.txt
@@ -7,34 +7,48 @@ Required properties:
"socionext,uniphier-denali-nand-v5b" - for Socionext UniPhier (v5b)
- reg : should contain registers location and length for data and reg.
- reg-names: Should contain the reg names "nand_data" and "denali_reg"
+ - #address-cells: should be 1. The cell encodes the chip select connection.
+ - #size-cells : should be 0.
- interrupts : The interrupt number.
- clocks: should contain phandle of the controller core clock, the bus
interface clock, and the ECC circuit clock.
- clock-names: should contain "nand", "nand_x", "ecc"
-Optional properties:
- - nand-ecc-step-size: see nand.txt for details. If present, the value must be
- 512 for "altr,socfpga-denali-nand"
- 1024 for "socionext,uniphier-denali-nand-v5a"
- 1024 for "socionext,uniphier-denali-nand-v5b"
- - nand-ecc-strength: see nand.txt for details. Valid values are:
- 8, 15 for "altr,socfpga-denali-nand"
- 8, 16, 24 for "socionext,uniphier-denali-nand-v5a"
- 8, 16 for "socionext,uniphier-denali-nand-v5b"
- - nand-ecc-maximize: see nand.txt for details
-
-The device tree may optionally contain sub-nodes describing partitions of the
+Sub-nodes:
+ Sub-nodes represent available NAND chips.
+
+ Required properties:
+ - reg: should contain the bank ID of the controller to which each chip
+ select is connected.
+
+ Optional properties:
+ - nand-ecc-step-size: see nand.txt for details.
+ If present, the value must be
+ 512 for "altr,socfpga-denali-nand"
+ 1024 for "socionext,uniphier-denali-nand-v5a"
+ 1024 for "socionext,uniphier-denali-nand-v5b"
+ - nand-ecc-strength: see nand.txt for details. Valid values are:
+ 8, 15 for "altr,socfpga-denali-nand"
+ 8, 16, 24 for "socionext,uniphier-denali-nand-v5a"
+ 8, 16 for "socionext,uniphier-denali-nand-v5b"
+ - nand-ecc-maximize: see nand.txt for details
+
+The chip nodes may optionally contain sub-nodes describing partitions of the
address space. See partition.txt for more detail.
Examples:
nand: nand@ff900000 {
#address-cells = <1>;
- #size-cells = <1>;
+ #size-cells = <0>;
compatible = "altr,socfpga-denali-nand";
reg = <0xff900000 0x20>, <0xffb80000 0x1000>;
reg-names = "nand_data", "denali_reg";
clocks = <&nand_clk>, <&nand_x_clk>, <&nand_ecc_clk>;
clock-names = "nand", "nand_x", "ecc";
interrupts = <0 144 4>;
+
+ nand@0 {
+ reg = <0>;
+ }
};
diff --git a/Documentation/devicetree/bindings/mtd/ingenic,jz4780-nand.txt b/Documentation/devicetree/bindings/mtd/ingenic,jz4780-nand.txt
index 29ea5853ca91..c02259353327 100644
--- a/Documentation/devicetree/bindings/mtd/ingenic,jz4780-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/ingenic,jz4780-nand.txt
@@ -1,4 +1,4 @@
-* Ingenic JZ4780 NAND/BCH
+* Ingenic JZ4780 NAND/ECC
This file documents the device tree bindings for NAND flash devices on the
JZ4780. NAND devices are connected to the NEMC controller (described in
@@ -6,15 +6,18 @@ memory-controllers/ingenic,jz4780-nemc.txt), and thus NAND device nodes must
be children of the NEMC node.
Required NAND controller device properties:
-- compatible: Should be set to "ingenic,jz4780-nand".
+- compatible: Should be one of:
+ * ingenic,jz4740-nand
+ * ingenic,jz4725b-nand
+ * ingenic,jz4780-nand
- reg: For each bank with a NAND chip attached, should specify a bank number,
an offset of 0 and a size of 0x1000000 (i.e. the whole NEMC bank).
Optional NAND controller device properties:
-- ingenic,bch-controller: To make use of the hardware BCH controller, this
- property must contain a phandle for the BCH controller node. The required
+- ecc-engine: To make use of the hardware ECC controller, this
+ property must contain a phandle for the ECC controller node. The required
properties for this node are described below. If this is not specified,
- software BCH will be used instead.
+ software ECC will be used instead.
Optional children nodes:
- Individual NAND chips are children of the NAND controller node.
@@ -45,7 +48,7 @@ nemc: nemc@13410000 {
#address-cells = <1>;
#size-cells = <0>;
- ingenic,bch-controller = <&bch>;
+ ecc-engine = <&bch>;
nand@1 {
reg = <1>;
@@ -67,14 +70,17 @@ nemc: nemc@13410000 {
};
};
-The BCH controller is a separate SoC component used for error correction on
+The ECC controller is a separate SoC component used for error correction on
NAND devices. The following is a description of the device properties for a
-BCH controller.
-
-Required BCH properties:
-- compatible: Should be set to "ingenic,jz4780-bch".
-- reg: Should specify the BCH controller registers location and length.
-- clocks: Clock for the BCH controller.
+ECC controller.
+
+Required ECC properties:
+- compatible: Should be one of:
+ * ingenic,jz4740-ecc
+ * ingenic,jz4725b-bch
+ * ingenic,jz4780-bch
+- reg: Should specify the ECC controller registers location and length.
+- clocks: Clock for the ECC controller.
Example:
diff --git a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
index 7df0dcaccb7d..c69f4f065d23 100644
--- a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
+++ b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
@@ -96,3 +96,19 @@ An example using SRAM:
bank-width = <2>;
};
+An example using gpio-addrs
+
+ flash@20000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash", "jedec-flash";
+ reg = <0x20000000 0x02000000>;
+ ranges = <0 0x00000000 0x02000000
+ 1 0x02000000 0x02000000>;
+ bank-width = <2>;
+ addr-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+ partition@0 {
+ label = "test-part1";
+ reg = <0 0x04000000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mtd/nand-controller.yaml b/Documentation/devicetree/bindings/mtd/nand-controller.yaml
new file mode 100644
index 000000000000..199ba5ac2a06
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/nand-controller.yaml
@@ -0,0 +1,143 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/nand-controller.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NAND Chip and NAND Controller Generic Binding
+
+maintainers:
+ - Miquel Raynal <miquel.raynal@bootlin.com>
+ - Richard Weinberger <richard@nod.at>
+
+description: |
+ The NAND controller should be represented with its own DT node, and
+ all NAND chips attached to this controller should be defined as
+ children nodes of the NAND controller. This representation should be
+ enforced even for simple controllers supporting only one chip.
+
+ The ECC strength and ECC step size properties define the user
+ desires in terms of correction capability of a controller. Together,
+ they request the ECC engine to correct {strength} bit errors per
+ {size} bytes.
+
+ The interpretation of these parameters is implementation-defined, so
+ not all implementations must support all possible
+ combinations. However, implementations are encouraged to further
+ specify the value(s) they support.
+
+properties:
+ $nodename:
+ pattern: "^nand-controller(@.*)?"
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ ranges: true
+
+patternProperties:
+ "^nand@[a-f0-9]$":
+ properties:
+ reg:
+ description:
+ Contains the native Ready/Busy IDs.
+
+ nand-ecc-mode:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/string
+ - enum: [ none, soft, hw, hw_syndrome, hw_oob_first, on-die ]
+ description:
+ Desired ECC engine, either hardware (most of the time
+ embedded in the NAND controller) or software correction
+ (Linux will handle the calculations). soft_bch is deprecated
+ and should be replaced by soft and nand-ecc-algo.
+
+ nand-ecc-algo:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/string
+ - enum: [ hamming, bch, rs ]
+ description:
+ Desired ECC algorithm.
+
+ nand-bus-width:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - enum: [ 8, 16 ]
+ - default: 8
+ description:
+ Bus width to the NAND chip
+
+ nand-on-flash-bbt:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ With this property, the OS will search the device for a Bad
+ Block Table (BBT). If not found, it will create one, reserve
+ a few blocks at the end of the device to store it and update
+ it as the device ages. Otherwise, the out-of-band area of a
+ few pages of all the blocks will be scanned at boot time to
+ find Bad Block Markers (BBM). These markers will help to
+ build a volatile BBT in RAM.
+
+ nand-ecc-strength:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 1
+ description:
+ Maximum number of bits that can be corrected per ECC step.
+
+ nand-ecc-step-size:
+ allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - minimum: 1
+ description:
+ Number of data bytes covered by a single ECC step.
+
+ nand-ecc-maximize:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Whether or not the ECC strength should be maximized. The
+ maximum ECC strength is both controller and chip
+ dependent. The ECC engine has to select the ECC config
+ providing the best strength and taking the OOB area size
+ constraint into account. This is particularly useful when
+ only the in-band area is used by the upper layers, and you
+ want to make your NAND as reliable as possible.
+
+ nand-is-boot-medium:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Whether or not the NAND chip is a boot medium. Drivers might
+ use this information to select ECC algorithms supported by
+ the boot ROM or similar restrictions.
+
+ nand-rb:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description:
+ Contains the native Ready/Busy IDs.
+
+ required:
+ - reg
+
+required:
+ - "#address-cells"
+ - "#size-cells"
+
+examples:
+ - |
+ nand-controller {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* controller specific properties */
+
+ nand@0 {
+ reg = <0>;
+ nand-ecc-mode = "soft";
+ nand-ecc-algo = "bch";
+
+ /* controller specific properties */
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mtd/nand.txt b/Documentation/devicetree/bindings/mtd/nand.txt
deleted file mode 100644
index e949c778e983..000000000000
--- a/Documentation/devicetree/bindings/mtd/nand.txt
+++ /dev/null
@@ -1,75 +0,0 @@
-* NAND chip and NAND controller generic binding
-
-NAND controller/NAND chip representation:
-
-The NAND controller should be represented with its own DT node, and all
-NAND chips attached to this controller should be defined as children nodes
-of the NAND controller. This representation should be enforced even for
-simple controllers supporting only one chip.
-
-Mandatory NAND controller properties:
-- #address-cells: depends on your controller. Should at least be 1 to
- encode the CS line id.
-- #size-cells: depends on your controller. Put zero unless you need a
- mapping between CS lines and dedicated memory regions
-
-Optional NAND controller properties
-- ranges: only needed if you need to define a mapping between CS lines and
- memory regions
-
-Optional NAND chip properties:
-
-- nand-ecc-mode : String, operation mode of the NAND ecc mode.
- Supported values are: "none", "soft", "hw", "hw_syndrome",
- "hw_oob_first", "on-die".
- Deprecated values:
- "soft_bch": use "soft" and nand-ecc-algo instead
-- nand-ecc-algo: string, algorithm of NAND ECC.
- Valid values are: "hamming", "bch", "rs".
-- nand-bus-width : 8 or 16 bus width if not present 8
-- nand-on-flash-bbt: boolean to enable on flash bbt option if not present false
-
-- nand-ecc-strength: integer representing the number of bits to correct
- per ECC step.
-
-- nand-ecc-step-size: integer representing the number of data bytes
- that are covered by a single ECC step.
-
-- nand-ecc-maximize: boolean used to specify that you want to maximize ECC
- strength. The maximum ECC strength is both controller and
- chip dependent. The controller side has to select the ECC
- config providing the best strength and taking the OOB area
- size constraint into account.
- This is particularly useful when only the in-band area is
- used by the upper layers, and you want to make your NAND
- as reliable as possible.
-- nand-is-boot-medium: Whether the NAND chip is a boot medium. Drivers might use
- this information to select ECC algorithms supported by
- the boot ROM or similar restrictions.
-
-- nand-rb: shall contain the native Ready/Busy ids.
-
-The ECC strength and ECC step size properties define the correction capability
-of a controller. Together, they say a controller can correct "{strength} bit
-errors per {size} bytes".
-
-The interpretation of these parameters is implementation-defined, so not all
-implementations must support all possible combinations. However, implementations
-are encouraged to further specify the value(s) they support.
-
-Example:
-
- nand-controller {
- #address-cells = <1>;
- #size-cells = <0>;
-
- /* controller specific properties */
-
- nand@0 {
- reg = <0>;
- nand-ecc-mode = "soft";
- nand-ecc-algo = "bch";
-
- /* controller specific properties */
- };
- };
diff --git a/Documentation/devicetree/bindings/mtd/partitions/arm,arm-firmware-suite.txt b/Documentation/devicetree/bindings/mtd/partitions/arm,arm-firmware-suite.txt
new file mode 100644
index 000000000000..d5c5616f6db5
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/partitions/arm,arm-firmware-suite.txt
@@ -0,0 +1,17 @@
+ARM AFS - ARM Firmware Suite Partitions
+=======================================
+
+The ARM Firmware Suite is a flash partitioning system found on the
+ARM reference designs: Integrator AP, Integrator CP, Versatile AB,
+Versatile PB, the RealView family, Versatile Express and Juno.
+
+Required properties:
+- compatible : (required) must be "arm,arm-firmware-suite"
+
+Example:
+
+flash@0 {
+ partitions {
+ compatible = "arm,arm-firmware-suite";
+ };
+};
diff --git a/Documentation/devicetree/bindings/mtd/partitions/brcm,bcm963xx-cfe-nor-partitions.txt b/Documentation/devicetree/bindings/mtd/partitions/brcm,bcm963xx-cfe-nor-partitions.txt
new file mode 100644
index 000000000000..9f630e95f180
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/partitions/brcm,bcm963xx-cfe-nor-partitions.txt
@@ -0,0 +1,24 @@
+Broadcom BCM963XX CFE Loader NOR Flash Partitions
+=================================================
+
+Most Broadcom BCM63XX SoC based devices follow the Broadcom reference layout for
+NOR. The first erase block used for the CFE bootloader, the last for an
+NVRAM partition, and the remainder in-between for one to two firmware partitions
+at fixed offsets. A valid firmware partition is identified by the ImageTag
+header found at beginning of the second erase block, containing the rootfs and
+kernel offsets and sizes within the firmware partition.
+
+Required properties:
+- compatible : must be "brcm,bcm963xx-cfe-nor-partitions"
+
+Example:
+
+flash@1fc00000 {
+ compatible = "cfi-flash";
+ reg = <0x1fc00000 0x400000>;
+ bank-width = <2>;
+
+ partitions {
+ compatible = "brcm,bcm963xx-cfe-nor-partitions";
+ };
+};
diff --git a/Documentation/devicetree/bindings/mtd/partitions/brcm,bcm963xx-imagetag.txt b/Documentation/devicetree/bindings/mtd/partitions/brcm,bcm963xx-imagetag.txt
new file mode 100644
index 000000000000..f8b7418ed817
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/partitions/brcm,bcm963xx-imagetag.txt
@@ -0,0 +1,45 @@
+Broadcom BCM963XX ImageTag Partition Container
+==============================================
+
+Some Broadcom BCM63XX SoC based devices contain additional, non discoverable
+partitions or non standard bootloader partition sizes. For these a mixed layout
+needs to be used with an explicit firmware partition.
+
+The BCM963XX ImageTag is a simple firmware header describing the offsets and
+sizes of the rootfs and kernel parts contained in the firmware.
+
+Required properties:
+- compatible : must be "brcm,bcm963xx-imagetag"
+
+Example:
+
+flash@1e000000 {
+ compatible = "cfi-flash";
+ reg = <0x1e000000 0x2000000>;
+ bank-width = <2>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cfe@0 {
+ reg = <0x0 0x10000>;
+ read-only;
+ };
+
+ firmware@10000 {
+ reg = <0x10000 0x7d0000>;
+ compatible = "brcm,bcm963xx-imagetag";
+ };
+
+ caldata@7e0000 {
+ reg = <0x7e0000 0x10000>;
+ read-only;
+ };
+
+ nvram@7f0000 {
+ reg = <0x7f0000 0x10000>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/mtd/sunxi-nand.txt b/Documentation/devicetree/bindings/mtd/sunxi-nand.txt
deleted file mode 100644
index dcd5a5d80dc0..000000000000
--- a/Documentation/devicetree/bindings/mtd/sunxi-nand.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-Allwinner NAND Flash Controller (NFC)
-
-Required properties:
-- compatible : "allwinner,sun4i-a10-nand".
-- reg : shall contain registers location and length for data and reg.
-- interrupts : shall define the nand controller interrupt.
-- #address-cells: shall be set to 1. Encode the nand CS.
-- #size-cells : shall be set to 0.
-- clocks : shall reference nand controller clocks.
-- clock-names : nand controller internal clock names. Shall contain :
- * "ahb" : AHB gating clock
- * "mod" : nand controller clock
-
-Optional properties:
-- dmas : shall reference DMA channel associated to the NAND controller.
-- dma-names : shall be "rxtx".
-
-Optional children nodes:
-Children nodes represent the available nand chips.
-
-Optional properties:
-- reset : phandle + reset specifier pair
-- reset-names : must contain "ahb"
-- allwinner,rb : shall contain the native Ready/Busy ids.
-- nand-ecc-mode : one of the supported ECC modes ("hw", "soft", "soft_bch" or
- "none")
-
-see Documentation/devicetree/bindings/mtd/nand.txt for generic bindings.
-
-
-Examples:
-nfc: nand@1c03000 {
- compatible = "allwinner,sun4i-a10-nand";
- reg = <0x01c03000 0x1000>;
- interrupts = <0 37 1>;
- clocks = <&ahb_gates 13>, <&nand_clk>;
- clock-names = "ahb", "mod";
- #address-cells = <1>;
- #size-cells = <0>;
- pinctrl-names = "default";
- pinctrl-0 = <&nand_pins_a &nand_cs0_pins_a &nand_rb0_pins_a>;
-
- nand@0 {
- reg = <0>;
- allwinner,rb = <0>;
- nand-ecc-mode = "soft_bch";
- };
-};
diff --git a/Documentation/devicetree/bindings/net/altera_tse.txt b/Documentation/devicetree/bindings/net/altera_tse.txt
index 0e21df94a53f..0b7d4d3758ea 100644
--- a/Documentation/devicetree/bindings/net/altera_tse.txt
+++ b/Documentation/devicetree/bindings/net/altera_tse.txt
@@ -46,9 +46,8 @@ Required properties:
- reg: phy id used to communicate to phy.
- device_type: Must be "ethernet-phy".
-Optional properties:
-- local-mac-address: See ethernet.txt in the same directory.
-- max-frame-size: See ethernet.txt in the same directory.
+The MAC address will be determined using the optional properties defined in
+ethernet.txt.
Example:
diff --git a/Documentation/devicetree/bindings/net/amd-xgbe.txt b/Documentation/devicetree/bindings/net/amd-xgbe.txt
index 93dcb79a5f16..9c27dfcd1133 100644
--- a/Documentation/devicetree/bindings/net/amd-xgbe.txt
+++ b/Documentation/devicetree/bindings/net/amd-xgbe.txt
@@ -24,8 +24,6 @@ Required properties:
- phy-mode: See ethernet.txt file in the same directory
Optional properties:
-- mac-address: mac address to be assigned to the device. Can be overridden
- by UEFI.
- dma-coherent: Present if dma operations are coherent
- amd,per-channel-interrupt: Indicates that Rx and Tx complete will generate
a unique interrupt for each DMA channel - this requires an additional
@@ -34,6 +32,9 @@ Optional properties:
0 - 1GbE and 10GbE (default)
1 - 2.5GbE and 10GbE
+The MAC address will be determined using the optional properties defined in
+ethernet.txt.
+
The following optional properties are represented by an array with each
value corresponding to a particular speed. The first array value represents
the setting for the 1GbE speed, the second value for the 2.5GbE speed and
diff --git a/Documentation/devicetree/bindings/net/brcm,amac.txt b/Documentation/devicetree/bindings/net/brcm,amac.txt
index 0bfad656a9ff..0120ebe93262 100644
--- a/Documentation/devicetree/bindings/net/brcm,amac.txt
+++ b/Documentation/devicetree/bindings/net/brcm,amac.txt
@@ -16,8 +16,8 @@ Required properties:
registers (required for Northstar2)
- interrupts: Interrupt number
-Optional properties:
-- mac-address: See ethernet.txt file in the same directory
+The MAC address will be determined using the optional properties
+defined in ethernet.txt.
Examples:
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
index 3264e1978d25..7c7ac5eb0313 100644
--- a/Documentation/devicetree/bindings/net/cpsw.txt
+++ b/Documentation/devicetree/bindings/net/cpsw.txt
@@ -49,10 +49,12 @@ Required properties:
Optional properties:
- dual_emac_res_vlan : Specifies VID to be used to segregate the ports
-- mac-address : See ethernet.txt file in the same directory
- phy_id : Specifies slave phy id (deprecated, use phy-handle)
- phy-handle : See ethernet.txt file in the same directory
+The MAC address will be determined using the optional properties
+defined in ethernet.txt.
+
Slave sub-nodes:
- fixed-link : See fixed-link.txt file in the same directory
diff --git a/Documentation/devicetree/bindings/net/davinci_emac.txt b/Documentation/devicetree/bindings/net/davinci_emac.txt
index ca83dcc84fb8..5e3579e72e2d 100644
--- a/Documentation/devicetree/bindings/net/davinci_emac.txt
+++ b/Documentation/devicetree/bindings/net/davinci_emac.txt
@@ -20,11 +20,12 @@ Required properties:
Optional properties:
- phy-handle: See ethernet.txt file in the same directory.
If absent, davinci_emac driver defaults to 100/FULL.
-- nvmem-cells: phandle, reference to an nvmem node for the MAC address
-- nvmem-cell-names: string, should be "mac-address" if nvmem is to be used
- ti,davinci-rmii-en: 1 byte, 1 means use RMII
- ti,davinci-no-bd-ram: boolean, does EMAC have BD RAM?
+The MAC address will be determined using the optional properties
+defined in ethernet.txt.
+
Example (enbw_cmc board):
eth0: emac@1e20000 {
compatible = "ti,davinci-dm6467-emac";
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt
index d66a5292b9d3..f66bb7ecdb82 100644
--- a/Documentation/devicetree/bindings/net/dsa/dsa.txt
+++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt
@@ -1,12 +1,6 @@
Distributed Switch Architecture Device Tree Bindings
----------------------------------------------------
-Two bindings exist, one of which has been deprecated due to
-limitations.
-
-Current Binding
----------------
-
Switches are true Linux devices and can be probed by any means. Once
probed, they register to the DSA framework, passing a node
pointer. This node is expected to fulfil the following binding, and
@@ -71,9 +65,8 @@ properties, described in binding documents:
Documentation/devicetree/bindings/net/fixed-link.txt
for details.
-- local-mac-address : See
- Documentation/devicetree/bindings/net/ethernet.txt
- for details.
+The MAC address will be determined using the optional properties
+defined in ethernet.txt.
Example
@@ -262,152 +255,3 @@ linked into one DSA cluster.
};
};
};
-
-Deprecated Binding
-------------------
-
-The deprecated binding makes use of a platform device to represent the
-switches. The switches themselves are not Linux devices, and make use
-of an MDIO bus for management.
-
-Required properties:
-- compatible : Should be "marvell,dsa"
-- #address-cells : Must be 2, first cell is the address on the MDIO bus
- and second cell is the address in the switch tree.
- Second cell is used only when cascading/chaining.
-- #size-cells : Must be 0
-- dsa,ethernet : Should be a phandle to a valid Ethernet device node
-- dsa,mii-bus : Should be a phandle to a valid MDIO bus device node
-
-Optional properties:
-- interrupts : property with a value describing the switch
- interrupt number (not supported by the driver)
-
-A DSA node can contain multiple switch chips which are therefore child nodes of
-the parent DSA node. The maximum number of allowed child nodes is 4
-(DSA_MAX_SWITCHES).
-Each of these switch child nodes should have the following required properties:
-
-- reg : Contains two fields. The first one describes the
- address on the MII bus. The second is the switch
- number that must be unique in cascaded configurations
-- #address-cells : Must be 1
-- #size-cells : Must be 0
-
-A switch child node has the following optional property:
-
-- eeprom-length : Set to the length of an EEPROM connected to the
- switch. Must be set if the switch can not detect
- the presence and/or size of a connected EEPROM,
- otherwise optional.
-
-A switch may have multiple "port" children nodes
-
-Each port children node must have the following mandatory properties:
-- reg : Describes the port address in the switch
-- label : Describes the label associated with this port, special
- labels are "cpu" to indicate a CPU port and "dsa" to
- indicate an uplink/downlink port.
-
-Note that a port labelled "dsa" will imply checking for the uplink phandle
-described below.
-
-Optional property:
-- link : Should be a list of phandles to another switch's DSA port.
- This property is only used when switches are being
- chained/cascaded together. This port is used as outgoing port
- towards the phandle port, which can be more than one hop away.
-
-- phy-handle : Phandle to a PHY on an external MDIO bus, not the
- switch internal one. See
- Documentation/devicetree/bindings/net/ethernet.txt
- for details.
-
-- phy-mode : String representing the connection to the designated
- PHY node specified by the 'phy-handle' property. See
- Documentation/devicetree/bindings/net/ethernet.txt
- for details.
-
-- mii-bus : Should be a phandle to a valid MDIO bus device node.
- This mii-bus will be used in preference to the
- global dsa,mii-bus defined above, for this switch.
-
-Optional subnodes:
-- fixed-link : Fixed-link subnode describing a link to a non-MDIO
- managed entity. See
- Documentation/devicetree/bindings/net/fixed-link.txt
- for details.
-
-Example:
-
- dsa@0 {
- compatible = "marvell,dsa";
- #address-cells = <2>;
- #size-cells = <0>;
-
- interrupts = <10>;
- dsa,ethernet = <&ethernet0>;
- dsa,mii-bus = <&mii_bus0>;
-
- switch@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <16 0>; /* MDIO address 16, switch 0 in tree */
-
- port@0 {
- reg = <0>;
- label = "lan1";
- phy-handle = <&phy0>;
- };
-
- port@1 {
- reg = <1>;
- label = "lan2";
- };
-
- port@5 {
- reg = <5>;
- label = "cpu";
- };
-
- switch0port6: port@6 {
- reg = <6>;
- label = "dsa";
- link = <&switch1port0
- &switch2port0>;
- };
- };
-
- switch@1 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <17 1>; /* MDIO address 17, switch 1 in tree */
- mii-bus = <&mii_bus1>;
- reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
-
- switch1port0: port@0 {
- reg = <0>;
- label = "dsa";
- link = <&switch0port6>;
- };
- switch1port1: port@1 {
- reg = <1>;
- label = "dsa";
- link = <&switch2port1>;
- };
- };
-
- switch@2 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <18 2>; /* MDIO address 18, switch 2 in tree */
- mii-bus = <&mii_bus1>;
-
- switch2port0: port@0 {
- reg = <0>;
- label = "dsa";
- link = <&switch1port1
- &switch0port6>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/net/dsa/sja1105.txt b/Documentation/devicetree/bindings/net/dsa/sja1105.txt
new file mode 100644
index 000000000000..13fd21074d48
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/sja1105.txt
@@ -0,0 +1,156 @@
+NXP SJA1105 switch driver
+=========================
+
+Required properties:
+
+- compatible:
+ Must be one of:
+ - "nxp,sja1105e"
+ - "nxp,sja1105t"
+ - "nxp,sja1105p"
+ - "nxp,sja1105q"
+ - "nxp,sja1105r"
+ - "nxp,sja1105s"
+
+ Although the device ID could be detected at runtime, explicit bindings
+ are required in order to be able to statically check their validity.
+ For example, SGMII can only be specified on port 4 of R and S devices,
+ and the non-SGMII devices, while pin-compatible, are not equal in terms
+ of support for RGMII internal delays (supported on P/Q/R/S, but not on
+ E/T).
+
+Optional properties:
+
+- sja1105,role-mac:
+- sja1105,role-phy:
+ Boolean properties that can be assigned under each port node. By
+ default (unless otherwise specified) a port is configured as MAC if it
+ is driving a PHY (phy-handle is present) or as PHY if it is PHY-less
+ (fixed-link specified, presumably because it is connected to a MAC).
+ The effect of this property (in either its implicit or explicit form)
+ is:
+ - In the case of MII or RMII it specifies whether the SJA1105 port is a
+ clock source or sink for this interface (not applicable for RGMII
+ where there is a Tx and an Rx clock).
+ - In the case of RGMII it affects the behavior regarding internal
+ delays:
+ 1. If sja1105,role-mac is specified, and the phy-mode property is one
+ of "rgmii-id", "rgmii-txid" or "rgmii-rxid", then the entity
+ designated to apply the delay/clock skew necessary for RGMII
+ is the PHY. The SJA1105 MAC does not apply any internal delays.
+ 2. If sja1105,role-phy is specified, and the phy-mode property is one
+ of the above, the designated entity to apply the internal delays
+ is the SJA1105 MAC (if hardware-supported). This is only supported
+ by the second-generation (P/Q/R/S) hardware. On a first-generation
+ E or T device, it is an error to specify an RGMII phy-mode other
+ than "rgmii" for a port that is in fixed-link mode. In that case,
+ the clock skew must either be added by the MAC at the other end of
+ the fixed-link, or by PCB serpentine traces on the board.
+ These properties are required, for example, in the case where SJA1105
+ ports are at both ends of a MII/RMII PHY-less setup. One end would need
+ to have sja1105,role-mac, while the other sja1105,role-phy.
+
+See Documentation/devicetree/bindings/net/dsa/dsa.txt for the list of standard
+DSA required and optional properties.
+
+Other observations
+------------------
+
+The SJA1105 SPI interface requires a CS-to-CLK time (t2 in UM10944) of at least
+one half of t_CLK. At an SPI frequency of 1MHz, this means a minimum
+cs_sck_delay of 500ns. Ensuring that this SPI timing requirement is observed
+depends on the SPI bus master driver.
+
+Example
+-------
+
+Ethernet switch connected via SPI to the host, CPU port wired to enet2:
+
+arch/arm/boot/dts/ls1021a-tsn.dts:
+
+/* SPI controller of the LS1021 */
+&dspi0 {
+ sja1105@1 {
+ reg = <0x1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "nxp,sja1105t";
+ spi-max-frequency = <4000000>;
+ fsl,spi-cs-sck-delay = <1000>;
+ fsl,spi-sck-cs-delay = <1000>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ /* ETH5 written on chassis */
+ label = "swp5";
+ phy-handle = <&rgmii_phy6>;
+ phy-mode = "rgmii-id";
+ reg = <0>;
+ /* Implicit "sja1105,role-mac;" */
+ };
+ port@1 {
+ /* ETH2 written on chassis */
+ label = "swp2";
+ phy-handle = <&rgmii_phy3>;
+ phy-mode = "rgmii-id";
+ reg = <1>;
+ /* Implicit "sja1105,role-mac;" */
+ };
+ port@2 {
+ /* ETH3 written on chassis */
+ label = "swp3";
+ phy-handle = <&rgmii_phy4>;
+ phy-mode = "rgmii-id";
+ reg = <2>;
+ /* Implicit "sja1105,role-mac;" */
+ };
+ port@3 {
+ /* ETH4 written on chassis */
+ phy-handle = <&rgmii_phy5>;
+ label = "swp4";
+ phy-mode = "rgmii-id";
+ reg = <3>;
+ /* Implicit "sja1105,role-mac;" */
+ };
+ port@4 {
+ /* Internal port connected to eth2 */
+ ethernet = <&enet2>;
+ phy-mode = "rgmii";
+ reg = <4>;
+ /* Implicit "sja1105,role-phy;" */
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+ };
+};
+
+/* MDIO controller of the LS1021 */
+&mdio0 {
+ /* BCM5464 */
+ rgmii_phy3: ethernet-phy@3 {
+ reg = <0x3>;
+ };
+ rgmii_phy4: ethernet-phy@4 {
+ reg = <0x4>;
+ };
+ rgmii_phy5: ethernet-phy@5 {
+ reg = <0x5>;
+ };
+ rgmii_phy6: ethernet-phy@6 {
+ reg = <0x6>;
+ };
+};
+
+/* Ethernet master port of the LS1021 */
+&enet2 {
+ phy-connection-type = "rgmii";
+ status = "ok";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt
index a68621580584..e88c3641d613 100644
--- a/Documentation/devicetree/bindings/net/ethernet.txt
+++ b/Documentation/devicetree/bindings/net/ethernet.txt
@@ -4,12 +4,14 @@ NOTE: All 'phy*' properties documented below are Ethernet specific. For the
generic PHY 'phys' property, see
Documentation/devicetree/bindings/phy/phy-bindings.txt.
-- local-mac-address: array of 6 bytes, specifies the MAC address that was
- assigned to the network device;
- mac-address: array of 6 bytes, specifies the MAC address that was last used by
the boot program; should be used in cases where the MAC address assigned to
the device by the boot program is different from the "local-mac-address"
property;
+- local-mac-address: array of 6 bytes, specifies the MAC address that was
+ assigned to the network device;
+- nvmem-cells: phandle, reference to an nvmem node for the MAC address
+- nvmem-cell-names: string, should be "mac-address" if nvmem is to be used
- max-speed: number, specifies maximum speed in Mbit/s supported by the device;
- max-frame-size: number, maximum transfer unit (IEEE defined MTU), rather than
the maximum frame size (there's contradiction in the Devicetree
@@ -36,7 +38,7 @@ Documentation/devicetree/bindings/phy/phy-bindings.txt.
* "smii"
* "xgmii"
* "trgmii"
- * "2000base-x",
+ * "1000base-x",
* "2500base-x",
* "rxaui"
* "xaui"
diff --git a/Documentation/devicetree/bindings/net/hisilicon-femac.txt b/Documentation/devicetree/bindings/net/hisilicon-femac.txt
index d11af5ecace8..5f96976f3cea 100644
--- a/Documentation/devicetree/bindings/net/hisilicon-femac.txt
+++ b/Documentation/devicetree/bindings/net/hisilicon-femac.txt
@@ -14,7 +14,6 @@ Required properties:
the PHY reset signal(optional).
- reset-names: should contain the reset signal name "mac"(required)
and "phy"(optional).
-- mac-address: see ethernet.txt [1].
- phy-mode: see ethernet.txt [1].
- phy-handle: see ethernet.txt [1].
- hisilicon,phy-reset-delays-us: triplet of delays if PHY reset signal given.
@@ -22,6 +21,9 @@ Required properties:
The 2nd cell is reset pulse in micro seconds.
The 3rd cell is reset post-delay in micro seconds.
+The MAC address will be determined using the optional properties
+defined in ethernet.txt[1].
+
[1] Documentation/devicetree/bindings/net/ethernet.txt
Example:
diff --git a/Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt b/Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt
index eea73adc678f..cddf46bf6b63 100644
--- a/Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt
+++ b/Documentation/devicetree/bindings/net/hisilicon-hix5hd2-gmac.txt
@@ -18,7 +18,6 @@ Required properties:
- #size-cells: must be <0>.
- phy-mode: see ethernet.txt [1].
- phy-handle: see ethernet.txt [1].
-- mac-address: see ethernet.txt [1].
- clocks: clock phandle and specifier pair.
- clock-names: contain the clock name "mac_core"(required) and "mac_ifc"(optional).
- resets: should contain the phandle to the MAC core reset signal(optional),
@@ -31,6 +30,9 @@ Required properties:
The 2nd cell is reset pulse in micro seconds.
The 3rd cell is reset post-delay in micro seconds.
+The MAC address will be determined using the properties defined in
+ethernet.txt[1].
+
- PHY subnode: inherits from phy binding [2]
[1] Documentation/devicetree/bindings/net/ethernet.txt
diff --git a/Documentation/devicetree/bindings/net/keystone-netcp.txt b/Documentation/devicetree/bindings/net/keystone-netcp.txt
index 04ba1dc34fd6..6262c2f293b0 100644
--- a/Documentation/devicetree/bindings/net/keystone-netcp.txt
+++ b/Documentation/devicetree/bindings/net/keystone-netcp.txt
@@ -135,14 +135,14 @@ Optional properties:
are swapped. The netcp driver will swap the two DWORDs
back to the proper order when this property is set to 2
when it obtains the mac address from efuse.
-- local-mac-address: the driver is designed to use the of_get_mac_address api
- only if efuse-mac is 0. When efuse-mac is 0, the MAC
- address is obtained from local-mac-address. If this
- attribute is not present, then the driver will use a
- random MAC address.
- "netcp-device label": phandle to the device specification for each of NetCP
sub-module attached to this interface.
+The MAC address will be determined using the optional properties defined in
+ethernet.txt and only if efuse-mac is set to 0. If all of the optional MAC
+address properties are not present, then the driver will use a random MAC
+address.
+
Example binding:
netcp: netcp@2000000 {
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
index 8b80515729d7..9c5e94482b5f 100644
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -26,9 +26,8 @@ Required properties:
Optional elements: 'tsu_clk'
- clocks: Phandles to input clocks.
-Optional properties:
-- nvmem-cells: phandle, reference to an nvmem node for the MAC address
-- nvmem-cell-names: string, should be "mac-address" if nvmem is to be used
+The MAC address will be determined using the optional properties
+defined in ethernet.txt.
Optional properties for PHY child node:
- reset-gpios : Should specify the gpio for phy reset
diff --git a/Documentation/devicetree/bindings/net/marvell-pxa168.txt b/Documentation/devicetree/bindings/net/marvell-pxa168.txt
index 845a148a346e..5574af3554aa 100644
--- a/Documentation/devicetree/bindings/net/marvell-pxa168.txt
+++ b/Documentation/devicetree/bindings/net/marvell-pxa168.txt
@@ -11,7 +11,9 @@ Optional properties:
- #address-cells: must be 1 when using sub-nodes.
- #size-cells: must be 0 when using sub-nodes.
- phy-handle: see ethernet.txt file in the same directory.
-- local-mac-address: see ethernet.txt file in the same directory.
+
+The MAC address will be determined using the optional properties
+defined in ethernet.txt.
Sub-nodes:
Each PHY can be represented as a sub-node. This is not mandatory.
diff --git a/Documentation/devicetree/bindings/net/mdio-mux-meson-g12a.txt b/Documentation/devicetree/bindings/net/mdio-mux-meson-g12a.txt
new file mode 100644
index 000000000000..3a96cbed9294
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/mdio-mux-meson-g12a.txt
@@ -0,0 +1,48 @@
+Properties for the MDIO bus multiplexer/glue of Amlogic G12a SoC family.
+
+This is a special case of a MDIO bus multiplexer. It allows to choose between
+the internal mdio bus leading to the embedded 10/100 PHY or the external
+MDIO bus.
+
+Required properties in addition to the generic multiplexer properties:
+- compatible : amlogic,g12a-mdio-mux
+- reg: physical address and length of the multiplexer/glue registers
+- clocks: list of clock phandle, one for each entry clock-names.
+- clock-names: should contain the following:
+ * "pclk" : peripheral clock.
+ * "clkin0" : platform crytal
+ * "clkin1" : SoC 50MHz MPLL
+
+Example :
+
+mdio_mux: mdio-multiplexer@4c000 {
+ compatible = "amlogic,g12a-mdio-mux";
+ reg = <0x0 0x4c000 0x0 0xa4>;
+ clocks = <&clkc CLKID_ETH_PHY>,
+ <&xtal>,
+ <&clkc CLKID_MPLL_5OM>;
+ clock-names = "pclk", "clkin0", "clkin1";
+ mdio-parent-bus = <&mdio0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ext_mdio: mdio@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ int_mdio: mdio@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ internal_ephy: ethernet-phy@8 {
+ compatible = "ethernet-phy-id0180.3301",
+ "ethernet-phy-ieee802.3-c22";
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <8>;
+ max-speed = <100>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/microchip,enc28j60.txt b/Documentation/devicetree/bindings/net/microchip,enc28j60.txt
index 24626e082b83..a8275921a896 100644
--- a/Documentation/devicetree/bindings/net/microchip,enc28j60.txt
+++ b/Documentation/devicetree/bindings/net/microchip,enc28j60.txt
@@ -21,8 +21,9 @@ Optional properties:
- spi-max-frequency: Maximum frequency of the SPI bus when accessing the ENC28J60.
According to the ENC28J80 datasheet, the chip allows a maximum of 20 MHz, however,
board designs may need to limit this value.
-- local-mac-address: See ethernet.txt in the same directory.
+The MAC address will be determined using the optional properties
+defined in ethernet.txt.
Example (for NXP i.MX28 with pin control stuff for GPIO irq):
diff --git a/Documentation/devicetree/bindings/net/microchip,lan78xx.txt b/Documentation/devicetree/bindings/net/microchip,lan78xx.txt
index 76786a0f6d3d..11a679530ae6 100644
--- a/Documentation/devicetree/bindings/net/microchip,lan78xx.txt
+++ b/Documentation/devicetree/bindings/net/microchip,lan78xx.txt
@@ -7,9 +7,8 @@ The Device Tree properties, if present, override the OTP and EEPROM.
Required properties:
- compatible: Should be one of "usb424,7800", "usb424,7801" or "usb424,7850".
-Optional properties:
-- local-mac-address: see ethernet.txt
-- mac-address: see ethernet.txt
+The MAC address will be determined using the optional properties
+defined in ethernet.txt.
Optional properties of the embedded PHY:
- microchip,led-modes: a 0..4 element vector, with each element configuring
diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
index 17c1d2bd00f6..9b9e5b1765dd 100644
--- a/Documentation/devicetree/bindings/net/phy.txt
+++ b/Documentation/devicetree/bindings/net/phy.txt
@@ -51,6 +51,10 @@ Optional Properties:
to ensure the integrated PHY is used. The absence of this property indicates
the muxers should be configured so that the external PHY is used.
+- resets: The reset-controller phandle and specifier for the PHY reset signal.
+
+- reset-names: Must be "phy" for the PHY reset signal.
+
- reset-gpios: The GPIO phandle and specifier for the PHY reset signal.
- reset-assert-us: Delay after the reset was asserted in microseconds.
@@ -67,6 +71,8 @@ ethernet-phy@0 {
interrupts = <35 IRQ_TYPE_EDGE_RISING>;
reg = <0>;
+ resets = <&rst 8>;
+ reset-names = "phy";
reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
reset-assert-us = <1000>;
reset-deassert-us = <2000>;
diff --git a/Documentation/devicetree/bindings/net/qca,qca7000.txt b/Documentation/devicetree/bindings/net/qca,qca7000.txt
index e4a8a51086df..21c36e524993 100644
--- a/Documentation/devicetree/bindings/net/qca,qca7000.txt
+++ b/Documentation/devicetree/bindings/net/qca,qca7000.txt
@@ -23,7 +23,6 @@ Optional properties:
Numbers smaller than 1000000 or greater than 16000000
are invalid. Missing the property will set the SPI
frequency to 8000000 Hertz.
-- local-mac-address : see ./ethernet.txt
- qca,legacy-mode : Set the SPI data transfer of the QCA7000 to legacy mode.
In this mode the SPI master must toggle the chip select
between each data word. In burst mode these gaps aren't
@@ -31,6 +30,9 @@ Optional properties:
the QCA7000 is setup via GPIO pin strapping. If the
property is missing the driver defaults to burst mode.
+The MAC address will be determined using the optional properties
+defined in ethernet.txt.
+
SPI Example:
/* Freescale i.MX28 SPI master*/
diff --git a/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt b/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt
index 824c0e23c544..7ef6118abd3d 100644
--- a/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt
+++ b/Documentation/devicetree/bindings/net/qualcomm-bluetooth.txt
@@ -11,20 +11,21 @@ Required properties:
- compatible: should contain one of the following:
* "qcom,qca6174-bt"
* "qcom,wcn3990-bt"
+ * "qcom,wcn3998-bt"
Optional properties for compatible string qcom,qca6174-bt:
- enable-gpios: gpio specifier used to enable chip
- clocks: clock provided to the controller (SUSCLK_32KHZ)
-Required properties for compatible string qcom,wcn3990-bt:
+Required properties for compatible string qcom,wcn399x-bt:
- vddio-supply: VDD_IO supply regulator handle.
- vddxo-supply: VDD_XO supply regulator handle.
- vddrf-supply: VDD_RF supply regulator handle.
- vddch0-supply: VDD_CH0 supply regulator handle.
-Optional properties for compatible string qcom,wcn3990-bt:
+Optional properties for compatible string qcom,wcn399x-bt:
- max-speed: see Documentation/devicetree/bindings/serial/slave-device.txt
diff --git a/Documentation/devicetree/bindings/net/samsung-sxgbe.txt b/Documentation/devicetree/bindings/net/samsung-sxgbe.txt
index 46e591178911..2cff6d8a585a 100644
--- a/Documentation/devicetree/bindings/net/samsung-sxgbe.txt
+++ b/Documentation/devicetree/bindings/net/samsung-sxgbe.txt
@@ -21,10 +21,12 @@ Required properties:
range.
Optional properties:
-- mac-address: 6 bytes, mac address
- max-frame-size: Maximum Transfer Unit (IEEE defined MTU), rather
than the maximum frame size.
+The MAC address will be determined using the optional properties
+defined in ethernet.txt.
+
Example:
aliases {
diff --git a/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt b/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
index 36f1aef585f0..ad3c6e109ce1 100644
--- a/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
+++ b/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
@@ -103,8 +103,6 @@ Required properties:
Optional properties:
- dma-coherent: Present if dma operations are coherent
-- mac-address: See ethernet.txt in the same directory
-- local-mac-address: See ethernet.txt in the same directory
- phy-reset-gpios: Phandle and specifier for any GPIO used to reset the PHY.
See ../gpio/gpio.txt.
- snps,en-lpi: If present it enables use of the AXI low-power interface
@@ -133,6 +131,9 @@ Optional properties:
- device_type: Must be "ethernet-phy".
- fixed-mode device tree subnode: see fixed-link.txt in the same directory
+The MAC address will be determined using the optional properties
+defined in ethernet.txt.
+
Examples:
ethernet2@40010000 {
clock-names = "phy_ref_clk", "apb_pclk";
diff --git a/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt
index fc8f01718690..4e85fc495e87 100644
--- a/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt
+++ b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt
@@ -31,8 +31,8 @@ Required properties:
- socionext,syscon-phy-mode: A phandle to syscon with one argument
that configures phy mode. The argument is the ID of MAC instance.
-Optional properties:
- - local-mac-address: See ethernet.txt in the same directory.
+The MAC address will be determined using the optional properties
+defined in ethernet.txt.
Required subnode:
- mdio: A container for child nodes representing phy nodes.
diff --git a/Documentation/devicetree/bindings/net/socionext-netsec.txt b/Documentation/devicetree/bindings/net/socionext-netsec.txt
index 0cff94fb0433..9d6c9feb12ff 100644
--- a/Documentation/devicetree/bindings/net/socionext-netsec.txt
+++ b/Documentation/devicetree/bindings/net/socionext-netsec.txt
@@ -26,11 +26,12 @@ Required properties:
Optional properties: (See ethernet.txt file in the same directory)
- dma-coherent: Boolean property, must only be present if memory
accesses performed by the device are cache coherent.
-- local-mac-address: See ethernet.txt in the same directory.
-- mac-address: See ethernet.txt in the same directory.
- max-speed: See ethernet.txt in the same directory.
- max-frame-size: See ethernet.txt in the same directory.
+The MAC address will be determined using the optional properties
+defined in ethernet.txt.
+
Example:
eth0: ethernet@522d0000 {
compatible = "socionext,synquacer-netsec";
diff --git a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt
index 7b9a776230c0..7e675dafc256 100644
--- a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt
+++ b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt
@@ -13,11 +13,12 @@ properties:
Optional properties:
-- mac-address: See ethernet.txt in the parent directory
-- local-mac-address: See ethernet.txt in the parent directory
- ieee80211-freq-limit: See ieee80211.txt
- mediatek,mtd-eeprom: Specify a MTD partition + offset containing EEPROM data
+The MAC address can as well be set with corresponding optional properties
+defined in net/ethernet.txt.
+
Optional nodes:
- led: Properties for a connected LED
Optional properties:
diff --git a/Documentation/devicetree/bindings/net/wireless/qca,ath9k.txt b/Documentation/devicetree/bindings/net/wireless/qca,ath9k.txt
index b7396c8c271c..aaaeeb5f935b 100644
--- a/Documentation/devicetree/bindings/net/wireless/qca,ath9k.txt
+++ b/Documentation/devicetree/bindings/net/wireless/qca,ath9k.txt
@@ -34,9 +34,9 @@ Optional properties:
ath9k wireless chip (in this case the calibration /
EEPROM data will be loaded from userspace using the
kernel firmware loader).
-- mac-address: See ethernet.txt in the parent directory
-- local-mac-address: See ethernet.txt in the parent directory
+The MAC address will be determined using the optional properties defined in
+net/ethernet.txt.
In this example, the node is defined as child node of the PCI controller:
&pci0 {
diff --git a/Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt b/Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt
index 99c4ba6a3f61..cfb18b4ef8f7 100644
--- a/Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt
+++ b/Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt
@@ -8,11 +8,12 @@ Required properties:
"allwinner,sun8i-h3-sid"
"allwinner,sun50i-a64-sid"
"allwinner,sun50i-h5-sid"
+ "allwinner,sun50i-h6-sid"
- reg: Should contain registers location and length
= Data cells =
-Are child nodes of qfprom, bindings of which as described in
+Are child nodes of sunxi-sid, bindings of which as described in
bindings/nvmem/nvmem.txt
Example for sun4i:
diff --git a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
index 7a999a135e56..68f7d6fdd140 100644
--- a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
+++ b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt
@@ -1,7 +1,8 @@
Freescale i.MX6 On-Chip OTP Controller (OCOTP) device tree bindings
This binding represents the on-chip eFuse OTP controller found on
-i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX, i.MX6UL, i.MX6ULL/ULZ and i.MX6SLL SoCs.
+i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX, i.MX6UL, i.MX6ULL/ULZ, i.MX6SLL,
+i.MX7D/S, i.MX7ULP and i.MX8MQ SoCs.
Required properties:
- compatible: should be one of
@@ -13,6 +14,7 @@ Required properties:
"fsl,imx7d-ocotp" (i.MX7D/S),
"fsl,imx6sll-ocotp" (i.MX6SLL),
"fsl,imx7ulp-ocotp" (i.MX7ULP),
+ "fsl,imx8mq-ocotp" (i.MX8MQ),
followed by "syscon".
- #address-cells : Should be 1
- #size-cells : Should be 1
diff --git a/Documentation/devicetree/bindings/nvmem/st,stm32-romem.txt b/Documentation/devicetree/bindings/nvmem/st,stm32-romem.txt
new file mode 100644
index 000000000000..142a51d5a9be
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/st,stm32-romem.txt
@@ -0,0 +1,31 @@
+STMicroelectronics STM32 Factory-programmed data device tree bindings
+
+This represents STM32 Factory-programmed read only non-volatile area: locked
+flash, OTP, read-only HW regs... This contains various information such as:
+analog calibration data for temperature sensor (e.g. TS_CAL1, TS_CAL2),
+internal vref (VREFIN_CAL), unique device ID...
+
+Required properties:
+- compatible: Should be one of:
+ "st,stm32f4-otp"
+ "st,stm32mp15-bsec"
+- reg: Offset and length of factory-programmed area.
+- #address-cells: Should be '<1>'.
+- #size-cells: Should be '<1>'.
+
+Optional Data cells:
+- Must be child nodes as described in nvmem.txt.
+
+Example on stm32f4:
+ romem: nvmem@1fff7800 {
+ compatible = "st,stm32f4-otp";
+ reg = <0x1fff7800 0x400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ /* Data cells: ts_cal1 at 0x1fff7a2c */
+ ts_cal1: calib@22c {
+ reg = <0x22c 0x2>;
+ };
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
index c124f9bc11f3..5561a1c060d0 100644
--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -4,8 +4,11 @@ Required properties:
- compatible:
"snps,dw-pcie" for RC mode;
"snps,dw-pcie-ep" for EP mode;
-- reg: Should contain the configuration address space.
-- reg-names: Must be "config" for the PCIe configuration space.
+- reg: For designware cores version < 4.80 contains the configuration
+ address space. For designware core version >= 4.80, contains
+ the configuration and ATU address space
+- reg-names: Must be "config" for the PCIe configuration space and "atu" for
+ the ATU address space.
(The old way of getting the configuration address space from "ranges"
is deprecated and should be avoided.)
- num-lanes: number of lanes to use
diff --git a/Documentation/devicetree/bindings/pci/pci-keystone.txt b/Documentation/devicetree/bindings/pci/pci-keystone.txt
index 2030ee0dc4f9..47202a2938f2 100644
--- a/Documentation/devicetree/bindings/pci/pci-keystone.txt
+++ b/Documentation/devicetree/bindings/pci/pci-keystone.txt
@@ -11,16 +11,24 @@ described here as well as properties that are not applicable.
Required Properties:-
-compatibility: "ti,keystone-pcie"
-reg: index 1 is the base address and length of DW application registers.
- index 2 is the base address and length of PCI device ID register.
+compatibility: Should be "ti,keystone-pcie" for RC on Keystone2 SoC
+ Should be "ti,am654-pcie-rc" for RC on AM654x SoC
+reg: Three register ranges as listed in the reg-names property
+reg-names: "dbics" for the DesignWare PCIe registers, "app" for the
+ TI specific application registers, "config" for the
+ configuration space address
pcie_msi_intc : Interrupt controller device node for MSI IRQ chip
interrupt-cells: should be set to 1
interrupts: GIC interrupt lines connected to PCI MSI interrupt lines
+ (required if the compatible is "ti,keystone-pcie")
+msi-map: As specified in Documentation/devicetree/bindings/pci/pci-msi.txt
+ (required if the compatible is "ti,am654-pcie-rc".
ti,syscon-pcie-id : phandle to the device control module required to set device
id and vendor id.
+ti,syscon-pcie-mode : phandle to the device control module required to configure
+ PCI in either RC mode or EP mode.
Example:
pcie_msi_intc: msi-interrupt-controller {
@@ -61,3 +69,47 @@ Optional properties:-
DesignWare DT Properties not applicable for Keystone PCI
1. pcie_bus clock-names not used. Instead, a phandle to phys is used.
+
+AM654 PCIe Endpoint
+===================
+
+Required Properties:-
+
+compatibility: Should be "ti,am654-pcie-ep" for EP on AM654x SoC
+reg: Four register ranges as listed in the reg-names property
+reg-names: "dbics" for the DesignWare PCIe registers, "app" for the
+ TI specific application registers, "atu" for the
+ Address Translation Unit configuration registers and
+ "addr_space" used to map remote RC address space
+num-ib-windows: As specified in
+ Documentation/devicetree/bindings/pci/designware-pcie.txt
+num-ob-windows: As specified in
+ Documentation/devicetree/bindings/pci/designware-pcie.txt
+num-lanes: As specified in
+ Documentation/devicetree/bindings/pci/designware-pcie.txt
+power-domains: As documented by the generic PM domain bindings in
+ Documentation/devicetree/bindings/power/power_domain.txt.
+ti,syscon-pcie-mode: phandle to the device control module required to configure
+ PCI in either RC mode or EP mode.
+
+Optional properties:-
+
+phys: list of PHY specifiers (used by generic PHY framework)
+phy-names: must be "pcie-phy0", "pcie-phy1", "pcie-phyN".. based on the
+ number of lanes as specified in *num-lanes* property.
+("phys" and "phy-names" DT bindings are specified in
+Documentation/devicetree/bindings/phy/phy-bindings.txt)
+interrupts: platform interrupt for error interrupts.
+
+pcie-ep {
+ compatible = "ti,am654-pcie-ep";
+ reg = <0x5500000 0x1000>, <0x5501000 0x1000>,
+ <0x10000000 0x8000000>, <0x5506000 0x1000>;
+ reg-names = "app", "dbics", "addr_space", "atu";
+ power-domains = <&k3_pds 120>;
+ ti,syscon-pcie-mode = <&pcie0_mode>;
+ num-lanes = <1>;
+ num-ib-windows = <16>;
+ num-ob-windows = <16>;
+ interrupts = <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>;
+};
diff --git a/Documentation/devicetree/bindings/pci/pci.txt b/Documentation/devicetree/bindings/pci/pci.txt
index c77981c5dd18..92c01db610df 100644
--- a/Documentation/devicetree/bindings/pci/pci.txt
+++ b/Documentation/devicetree/bindings/pci/pci.txt
@@ -24,3 +24,53 @@ driver implementation may support the following properties:
unsupported link speed, for instance, trying to do training for
unsupported link speed, etc. Must be '4' for gen4, '3' for gen3, '2'
for gen2, and '1' for gen1. Any other values are invalid.
+
+PCI-PCI Bridge properties
+-------------------------
+
+PCIe root ports and switch ports may be described explicitly in the device
+tree, as children of the host bridge node. Even though those devices are
+discoverable by probing, it might be necessary to describe properties that
+aren't provided by standard PCIe capabilities.
+
+Required properties:
+
+- reg:
+ Identifies the PCI-PCI bridge. As defined in the IEEE Std 1275-1994
+ document, it is a five-cell address encoded as (phys.hi phys.mid
+ phys.lo size.hi size.lo). phys.hi should contain the device's BDF as
+ 0b00000000 bbbbbbbb dddddfff 00000000. The other cells should be zero.
+
+ The bus number is defined by firmware, through the standard bridge
+ configuration mechanism. If this port is a switch port, then firmware
+ allocates the bus number and writes it into the Secondary Bus Number
+ register of the bridge directly above this port. Otherwise, the bus
+ number of a root port is the first number in the bus-range property,
+ defaulting to zero.
+
+ If firmware leaves the ARI Forwarding Enable bit set in the bridge
+ above this port, then phys.hi contains the 8-bit function number as
+ 0b00000000 bbbbbbbb ffffffff 00000000. Note that the PCIe specification
+ recommends that firmware only leaves ARI enabled when it knows that the
+ OS is ARI-aware.
+
+Optional properties:
+
+- external-facing:
+ When present, the port is external-facing. All bridges and endpoints
+ downstream of this port are external to the machine. The OS can, for
+ example, use this information to identify devices that cannot be
+ trusted with relaxed DMA protection, as users could easily attach
+ malicious devices to this port.
+
+Example:
+
+pcie@10000000 {
+ compatible = "pci-host-ecam-generic";
+ ...
+ pcie@0008 {
+ /* Root port 00:01.0 is external-facing */
+ reg = <0x00000800 0 0 0 0>;
+ external-facing;
+ };
+};
diff --git a/Documentation/devicetree/bindings/phy/brcm,stingray-usb-phy.txt b/Documentation/devicetree/bindings/phy/brcm,stingray-usb-phy.txt
new file mode 100644
index 000000000000..4ba298966af9
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/brcm,stingray-usb-phy.txt
@@ -0,0 +1,32 @@
+Broadcom Stingray USB PHY
+
+Required properties:
+ - compatible : should be one of the listed compatibles
+ - "brcm,sr-usb-combo-phy" is combo PHY has two PHYs, one SS and one HS.
+ - "brcm,sr-usb-hs-phy" is a single HS PHY.
+ - reg: offset and length of the PHY blocks registers
+ - #phy-cells:
+ - Must be 1 for brcm,sr-usb-combo-phy as it expects one argument to indicate
+ the PHY number of two PHYs. 0 for HS PHY and 1 for SS PHY.
+ - Must be 0 for brcm,sr-usb-hs-phy.
+
+Refer to phy/phy-bindings.txt for the generic PHY binding properties
+
+Example:
+ usbphy0: usb-phy@0 {
+ compatible = "brcm,sr-usb-combo-phy";
+ reg = <0x00000000 0x100>;
+ #phy-cells = <1>;
+ };
+
+ usbphy1: usb-phy@10000 {
+ compatible = "brcm,sr-usb-combo-phy";
+ reg = <0x00010000 0x100>,
+ #phy-cells = <1>;
+ };
+
+ usbphy2: usb-phy@20000 {
+ compatible = "brcm,sr-usb-hs-phy";
+ reg = <0x00020000 0x100>,
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.txt b/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.txt
index a22e853d710c..ed47e5cd067e 100644
--- a/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.txt
+++ b/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.txt
@@ -7,6 +7,9 @@ Required properties:
- clocks: phandles to the clocks for each clock listed in clock-names
- clock-names: must contain "phy"
+Optional properties:
+- vbus-supply: A phandle to the regulator for USB VBUS.
+
Example:
usb3_phy0: phy@381f0040 {
compatible = "fsl,imx8mq-usb-phy";
diff --git a/Documentation/devicetree/bindings/phy/meson-g12a-usb2-phy.txt b/Documentation/devicetree/bindings/phy/meson-g12a-usb2-phy.txt
new file mode 100644
index 000000000000..a6ebc3dea159
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/meson-g12a-usb2-phy.txt
@@ -0,0 +1,22 @@
+* Amlogic G12A USB2 PHY binding
+
+Required properties:
+- compatible: Should be "amlogic,meson-g12a-usb2-phy"
+- reg: The base address and length of the registers
+- #phys-cells: must be 0 (see phy-bindings.txt in this directory)
+- clocks: a phandle to the clock of this PHY
+- clock-names: must be "xtal"
+- resets: a phandle to the reset line of this PHY
+- reset-names: must be "phy"
+- phy-supply: see phy-bindings.txt in this directory
+
+Example:
+ usb2_phy0: phy@36000 {
+ compatible = "amlogic,g12a-usb2-phy";
+ reg = <0x0 0x36000 0x0 0x2000>;
+ clocks = <&xtal>;
+ clock-names = "xtal";
+ resets = <&reset RESET_USB_PHY21>;
+ reset-names = "phy";
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/meson-g12a-usb3-pcie-phy.txt b/Documentation/devicetree/bindings/phy/meson-g12a-usb3-pcie-phy.txt
new file mode 100644
index 000000000000..7cfc17e2df31
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/meson-g12a-usb3-pcie-phy.txt
@@ -0,0 +1,22 @@
+* Amlogic G12A USB3 + PCIE Combo PHY binding
+
+Required properties:
+- compatible: Should be "amlogic,meson-g12a-usb3-pcie-phy"
+- #phys-cells: must be 1. The cell number is used to select the phy mode
+ as defined in <dt-bindings/phy/phy.h> between PHY_TYPE_USB3 and PHY_TYPE_PCIE
+- reg: The base address and length of the registers
+- clocks: a phandle to the 100MHz reference clock of this PHY
+- clock-names: must be "ref_clk"
+- resets: phandle to the reset lines for the PHY control
+- reset-names: must be "phy"
+
+Example:
+ usb3_pcie_phy: phy@46000 {
+ compatible = "amlogic,g12a-usb3-pcie-phy";
+ reg = <0x0 0x46000 0x0 0x2000>;
+ clocks = <&clkc CLKID_PCIE_PLL>;
+ clock-names = "ref_clk";
+ resets = <&reset RESET_PCIE_PHY>;
+ reset-names = "phy";
+ #phy-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/nvidia,tegra124-xusb-padctl.txt b/Documentation/devicetree/bindings/phy/nvidia,tegra124-xusb-padctl.txt
index 3742c152c467..daedb15f322e 100644
--- a/Documentation/devicetree/bindings/phy/nvidia,tegra124-xusb-padctl.txt
+++ b/Documentation/devicetree/bindings/phy/nvidia,tegra124-xusb-padctl.txt
@@ -36,11 +36,20 @@ Required properties:
- Tegra124: "nvidia,tegra124-xusb-padctl"
- Tegra132: "nvidia,tegra132-xusb-padctl", "nvidia,tegra124-xusb-padctl"
- Tegra210: "nvidia,tegra210-xusb-padctl"
+ - Tegra186: "nvidia,tegra186-xusb-padctl"
- reg: Physical base address and length of the controller's registers.
- resets: Must contain an entry for each entry in reset-names.
- reset-names: Must include the following entries:
- "padctl"
+For Tegra186:
+- avdd-pll-erefeut-supply: UPHY brick and reference clock as well as UTMI PHY
+ power supply. Must supply 1.8 V.
+- avdd-usb-supply: USB I/Os, VBUS, ID, REXT, D+/D- power supply. Must supply
+ 3.3 V.
+- vclamp-usb-supply: Bias rail for USB pad. Must supply 1.8 V.
+- vddio-hsic-supply: HSIC PHY power supply. Must supply 1.2 V.
+
Pad nodes:
==========
diff --git a/Documentation/devicetree/bindings/phy/phy-hi3660-usb3.txt b/Documentation/devicetree/bindings/phy/phy-hi3660-usb3.txt
new file mode 100644
index 000000000000..e88ba7d92dcb
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-hi3660-usb3.txt
@@ -0,0 +1,26 @@
+Hisilicon hi3660 USB PHY
+-----------------------
+
+Required properties:
+- compatible: should be "hisilicon,hi3660-usb-phy"
+- #phy-cells: must be 0
+- hisilicon,pericrg-syscon: phandle of syscon used to control phy.
+- hisilicon,pctrl-syscon: phandle of syscon used to control phy.
+- hisilicon,eye-diagram-param: parameter set for phy
+Refer to phy/phy-bindings.txt for the generic PHY binding properties
+
+This is a subnode of usb3_otg_bc register node.
+
+Example:
+ usb3_otg_bc: usb3_otg_bc@ff200000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x0 0xff200000 0x0 0x1000>;
+
+ usb-phy {
+ compatible = "hisilicon,hi3660-usb-phy";
+ #phy-cells = <0>;
+ hisilicon,pericrg-syscon = <&crg_ctrl>;
+ hisilicon,pctrl-syscon = <&pctrl>;
+ hisilicon,eye-diagram-param = <0x22466e4>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/phy-mtk-ufs.txt b/Documentation/devicetree/bindings/phy/phy-mtk-ufs.txt
new file mode 100644
index 000000000000..5789029a1d42
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-mtk-ufs.txt
@@ -0,0 +1,38 @@
+MediaTek Universal Flash Storage (UFS) M-PHY binding
+--------------------------------------------------------
+
+UFS M-PHY nodes are defined to describe on-chip UFS M-PHY hardware macro.
+Each UFS M-PHY node should have its own node.
+
+To bind UFS M-PHY with UFS host controller, the controller node should
+contain a phandle reference to UFS M-PHY node.
+
+Required properties for UFS M-PHY nodes:
+- compatible : Compatible list, contains the following controller:
+ "mediatek,mt8183-ufsphy" for ufs phy
+ persent on MT81xx chipsets.
+- reg : Address and length of the UFS M-PHY register set.
+- #phy-cells : This property shall be set to 0.
+- clocks : List of phandle and clock specifier pairs.
+- clock-names : List of clock input name strings sorted in the same
+ order as the clocks property. Following clocks are
+ mandatory.
+ "unipro": Unipro core control clock.
+ "mp": M-PHY core control clock.
+
+Example:
+
+ ufsphy: phy@11fa0000 {
+ compatible = "mediatek,mt8183-ufsphy";
+ reg = <0 0x11fa0000 0 0xc000>;
+ #phy-cells = <0>;
+
+ clocks = <&infracfg_ao INFRACFG_AO_UNIPRO_SCK_CG>,
+ <&infracfg_ao INFRACFG_AO_UFS_MP_SAP_BCLK_CG>;
+ clock-names = "unipro", "mp";
+ };
+
+ ufshci@11270000 {
+ ...
+ phys = <&ufsphy>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
index 5d181fc3cc18..085fbd676cfc 100644
--- a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
+++ b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
@@ -11,6 +11,7 @@ Required properties:
"qcom,msm8996-qmp-usb3-phy" for 14nm USB3 phy on msm8996,
"qcom,msm8998-qmp-usb3-phy" for USB3 QMP V3 phy on msm8998,
"qcom,msm8998-qmp-ufs-phy" for UFS QMP phy on msm8998,
+ "qcom,msm8998-qmp-pcie-phy" for PCIe QMP phy on msm8998,
"qcom,sdm845-qmp-usb3-phy" for USB3 QMP V3 phy on sdm845,
"qcom,sdm845-qmp-usb3-uni-phy" for USB3 QMP V3 UNI phy on sdm845,
"qcom,sdm845-qmp-ufs-phy" for UFS QMP phy on sdm845.
@@ -48,6 +49,8 @@ Required properties:
"aux", "cfg_ahb", "ref".
For "qcom,msm8998-qmp-ufs-phy" must contain:
"ref", "ref_aux".
+ For "qcom,msm8998-qmp-pcie-phy" must contain:
+ "aux", "cfg_ahb", "ref".
For "qcom,sdm845-qmp-usb3-phy" must contain:
"aux", "cfg_ahb", "ref", "com_aux".
For "qcom,sdm845-qmp-usb3-uni-phy" must contain:
@@ -59,7 +62,8 @@ Required properties:
one for each entry in reset-names.
- reset-names: "phy" for reset of phy block,
"common" for phy common block reset,
- "cfg" for phy's ahb cfg block reset.
+ "cfg" for phy's ahb cfg block reset,
+ "ufsphy" for the PHY reset in the UFS controller.
For "qcom,ipq8074-qmp-pcie-phy" must contain:
"phy", "common".
@@ -69,12 +73,16 @@ Required properties:
"phy", "common".
For "qcom,msm8998-qmp-usb3-phy" must contain
"phy", "common".
- For "qcom,msm8998-qmp-ufs-phy": no resets are listed.
+ For "qcom,msm8998-qmp-ufs-phy": must contain:
+ "ufsphy".
+ For "qcom,msm8998-qmp-pcie-phy" must contain:
+ "phy", "common".
For "qcom,sdm845-qmp-usb3-phy" must contain:
"phy", "common".
For "qcom,sdm845-qmp-usb3-uni-phy" must contain:
"phy", "common".
- For "qcom,sdm845-qmp-ufs-phy": no resets are listed.
+ For "qcom,sdm845-qmp-ufs-phy": must contain:
+ "ufsphy".
- vdda-phy-supply: Phandle to a regulator supply to PHY core block.
- vdda-pll-supply: Phandle to 1.8V regulator supply to PHY refclk pll block.
diff --git a/Documentation/devicetree/bindings/phy/rcar-gen2-phy.txt b/Documentation/devicetree/bindings/phy/rcar-gen2-phy.txt
index 4f0879a0ca12..ac96d6481bb8 100644
--- a/Documentation/devicetree/bindings/phy/rcar-gen2-phy.txt
+++ b/Documentation/devicetree/bindings/phy/rcar-gen2-phy.txt
@@ -7,6 +7,7 @@ Required properties:
- compatible: "renesas,usb-phy-r8a7743" if the device is a part of R8A7743 SoC.
"renesas,usb-phy-r8a7744" if the device is a part of R8A7744 SoC.
"renesas,usb-phy-r8a7745" if the device is a part of R8A7745 SoC.
+ "renesas,usb-phy-r8a77470" if the device is a part of R8A77470 SoC.
"renesas,usb-phy-r8a7790" if the device is a part of R8A7790 SoC.
"renesas,usb-phy-r8a7791" if the device is a part of R8A7791 SoC.
"renesas,usb-phy-r8a7794" if the device is a part of R8A7794 SoC.
@@ -30,7 +31,7 @@ channels. These subnodes must contain the following properties:
- #phy-cells: see phy-bindings.txt in the same directory, must be <1>.
The phandle's argument in the PHY specifier is the USB controller selector for
-the USB channel; see the selector meanings below:
+the USB channel other than r8a77470 SoC; see the selector meanings below:
+-----------+---------------+---------------+
|\ Selector | | |
@@ -41,6 +42,16 @@ the USB channel; see the selector meanings below:
| 2 | PCI EHCI/OHCI | xHCI |
+-----------+---------------+---------------+
+For r8a77470 SoC;see the selector meaning below:
+
++-----------+---------------+---------------+
+|\ Selector | | |
++ --------- + 0 | 1 |
+| Channel \| | |
++-----------+---------------+---------------+
+| 0 | EHCI/OHCI | HS-USB |
++-----------+---------------+---------------+
+
Example (Lager board):
usb-phy@e6590100 {
@@ -48,15 +59,53 @@ Example (Lager board):
reg = <0 0xe6590100 0 0x100>;
#address-cells = <1>;
#size-cells = <0>;
- clocks = <&mstp7_clks R8A7790_CLK_HSUSB>;
+ clocks = <&cpg CPG_MOD 704>;
clock-names = "usbhs";
+ power-domains = <&sysc R8A7790_PD_ALWAYS_ON>;
+ resets = <&cpg 704>;
- usb-channel@0 {
+ usb0: usb-channel@0 {
reg = <0>;
#phy-cells = <1>;
};
- usb-channel@2 {
+ usb2: usb-channel@2 {
reg = <2>;
#phy-cells = <1>;
};
};
+
+Example (iWave RZ/G1C sbc):
+
+ usbphy0: usb-phy0@e6590100 {
+ compatible = "renesas,usb-phy-r8a77470",
+ "renesas,rcar-gen2-usb-phy";
+ reg = <0 0xe6590100 0 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cpg CPG_MOD 704>;
+ clock-names = "usbhs";
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 704>;
+
+ usb0: usb-channel@0 {
+ reg = <0>;
+ #phy-cells = <1>;
+ };
+ };
+
+ usbphy1: usb-phy@e6598100 {
+ compatible = "renesas,usb-phy-r8a77470",
+ "renesas,rcar-gen2-usb-phy";
+ reg = <0 0xe6598100 0 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cpg CPG_MOD 706>;
+ clock-names = "usbhs";
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 706>;
+
+ usb1: usb-channel@0 {
+ reg = <0>;
+ #phy-cells = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
index ad9c290d8f15..d46188f450bf 100644
--- a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
+++ b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
@@ -1,10 +1,12 @@
* Renesas R-Car generation 3 USB 2.0 PHY
This file provides information on what the device node for the R-Car generation
-3 and RZ/G2 USB 2.0 PHY contain.
+3, RZ/G1C and RZ/G2 USB 2.0 PHY contain.
Required properties:
-- compatible: "renesas,usb2-phy-r8a774a1" if the device is a part of an R8A774A1
+- compatible: "renesas,usb2-phy-r8a77470" if the device is a part of an R8A77470
+ SoC.
+ "renesas,usb2-phy-r8a774a1" if the device is a part of an R8A774A1
SoC.
"renesas,usb2-phy-r8a774c0" if the device is a part of an R8A774C0
SoC.
@@ -27,7 +29,13 @@ Required properties:
- reg: offset and length of the partial USB 2.0 Host register block.
- clocks: clock phandle and specifier pair(s).
-- #phy-cells: see phy-bindings.txt in the same directory, must be <0>.
+- #phy-cells: see phy-bindings.txt in the same directory, must be <1> (and
+ using <0> is deprecated).
+
+The phandle's argument in the PHY specifier is the INT_STATUS bit of controller:
+- 1 = USBH_INTA (OHCI)
+- 2 = USBH_INTB (EHCI)
+- 3 = UCOM_INT (OTG and BC)
Optional properties:
To use a USB channel where USB 2.0 Host and HSUSB (USB 2.0 Peripheral) are
diff --git a/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt b/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
index e3ea55763b0a..e728786f21e0 100644
--- a/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
+++ b/Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
@@ -7,12 +7,15 @@ Required properties:
- reg: PHY register address offset and length in "general
register files"
-Optional clocks using the clock bindings (see ../clock/clock-bindings.txt),
-specified by name:
+Optional properties:
- clock-names: Should contain "emmcclk". Although this is listed as optional
(because most boards can get basic functionality without having
access to it), it is strongly suggested.
+ See ../clock/clock-bindings.txt for details.
- clocks: Should have a phandle to the card clock exported by the SDHCI driver.
+ - drive-impedance-ohm: Specifies the drive impedance in Ohm.
+ Possible values are 33, 40, 50, 66 and 100.
+ If not set, the default value of 50 will be applied.
Example:
@@ -29,6 +32,7 @@ grf: syscon@ff770000 {
reg = <0xf780 0x20>;
clocks = <&sdhci>;
clock-names = "emmcclk";
+ drive-impedance-ohm = <50>;
#phy-cells = <0>;
};
};
diff --git a/Documentation/devicetree/bindings/phy/ti,phy-am654-serdes.txt b/Documentation/devicetree/bindings/phy/ti,phy-am654-serdes.txt
new file mode 100644
index 000000000000..64b286d2d398
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/ti,phy-am654-serdes.txt
@@ -0,0 +1,82 @@
+TI AM654 SERDES
+
+Required properties:
+ - compatible: Should be "ti,phy-am654-serdes"
+ - reg : Address and length of the register set for the device.
+ - #phy-cells: determine the number of cells that should be given in the
+ phandle while referencing this phy. Should be "2". The 1st cell
+ corresponds to the phy type (should be one of the types specified in
+ include/dt-bindings/phy/phy.h) and the 2nd cell should be the serdes
+ lane function.
+ If SERDES0 is referenced 2nd cell should be:
+ 0 - USB3
+ 1 - PCIe0 Lane0
+ 2 - ICSS2 SGMII Lane0
+ If SERDES1 is referenced 2nd cell should be:
+ 0 - PCIe1 Lane0
+ 1 - PCIe0 Lane1
+ 2 - ICSS2 SGMII Lane1
+ - power-domains: As documented by the generic PM domain bindings in
+ Documentation/devicetree/bindings/power/power_domain.txt.
+ - clocks: List of clock-specifiers representing the input to the SERDES.
+ Should have 3 items representing the left input clock, external
+ reference clock and right input clock in that order.
+ - clock-output-names: List of clock names for each of the clock outputs of
+ SERDES. Should have 3 items for CMU reference clock,
+ left output clock and right output clock in that order.
+ - assigned-clocks: As defined in
+ Documentation/devicetree/bindings/clock/clock-bindings.txt
+ - assigned-clock-parents: As defined in
+ Documentation/devicetree/bindings/clock/clock-bindings.txt
+ - #clock-cells: Should be <1> to choose between the 3 output clocks.
+ Defined in Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+ The following macros are defined in dt-bindings/phy/phy-am654-serdes.h
+ for selecting the correct reference clock. This can be used while
+ specifying the clocks created by SERDES.
+ => AM654_SERDES_CMU_REFCLK
+ => AM654_SERDES_LO_REFCLK
+ => AM654_SERDES_RO_REFCLK
+
+ - mux-controls: Phandle to the multiplexer that is used to select the lane
+ function. See #phy-cells above to see the multiplex values.
+
+Example:
+
+Example for SERDES0 is given below. It has 3 clock inputs;
+left input reference clock as indicated by <&k3_clks 153 4>, external
+reference clock as indicated by <&k3_clks 153 1> and right input
+reference clock as indicated by <&serdes1 AM654_SERDES_LO_REFCLK>. (The
+right input of SERDES0 is connected to the left output of SERDES1).
+
+SERDES0 registers 3 clock outputs as indicated in clock-output-names. The
+first refers to the CMU reference clock, second refers to the left output
+reference clock and the third refers to the right output reference clock.
+
+The assigned-clocks and assigned-clock-parents is used here to set the
+parent of left input reference clock to MAINHSDIV_CLKOUT4 and parent of
+CMU reference clock to left input reference clock.
+
+serdes0: serdes@900000 {
+ compatible = "ti,phy-am654-serdes";
+ reg = <0x0 0x900000 0x0 0x2000>;
+ reg-names = "serdes";
+ #phy-cells = <2>;
+ power-domains = <&k3_pds 153>;
+ clocks = <&k3_clks 153 4>, <&k3_clks 153 1>,
+ <&serdes1 AM654_SERDES_LO_REFCLK>;
+ clock-output-names = "serdes0_cmu_refclk", "serdes0_lo_refclk",
+ "serdes0_ro_refclk";
+ assigned-clocks = <&k3_clks 153 4>, <&serdes0 AM654_SERDES_CMU_REFCLK>;
+ assigned-clock-parents = <&k3_clks 153 8>, <&k3_clks 153 4>;
+ ti,serdes-clk = <&serdes0_clk>;
+ mux-controls = <&serdes_mux 0>;
+ #clock-cells = <1>;
+};
+
+Example for PCIe consumer node using the SERDES PHY specifier is given below.
+&pcie0_rc {
+ num-lanes = <2>;
+ phys = <&serdes0 PHY_TYPE_PCIE 1>, <&serdes1 PHY_TYPE_PCIE 1>;
+ phy-names = "pcie-phy0", "pcie-phy1";
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/bitmain,bm1880-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/bitmain,bm1880-pinctrl.txt
new file mode 100644
index 000000000000..ed34bb1ee81c
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/bitmain,bm1880-pinctrl.txt
@@ -0,0 +1,98 @@
+Bitmain BM1880 Pin Controller
+
+This binding describes the pin controller found in the BM1880 SoC.
+
+Required Properties:
+
+- compatible: Should be "bitmain,bm1880-pinctrl"
+- reg: Offset and length of pinctrl space in SCTRL.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+The pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration for BM1880 SoC
+includes only pinmux as there is no pinconf support available in SoC.
+
+Each configuration node can consist of multiple nodes describing the pinmux
+options. The name of each subnode is not important; all subnodes should be
+enumerated and processed purely based on their content.
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pinmux subnode:
+
+Required Properties:
+
+- pins: An array of strings, each string containing the name of a pin.
+ Valid values for pins are:
+
+ MIO0 - MIO111
+
+- groups: An array of strings, each string containing the name of a pin
+ group. Valid values for groups are:
+
+ nand_grp, spi_grp, emmc_grp, sdio_grp, eth0_grp, pwm0_grp,
+ pwm1_grp, pwm2_grp, pwm3_grp, pwm4_grp, pwm5_grp, pwm6_grp,
+ pwm7_grp, pwm8_grp, pwm9_grp, pwm10_grp, pwm11_grp, pwm12_grp,
+ pwm13_grp, pwm14_grp, pwm15_grp, pwm16_grp, pwm17_grp,
+ pwm18_grp, pwm19_grp, pwm20_grp, pwm21_grp, pwm22_grp,
+ pwm23_grp, pwm24_grp, pwm25_grp, pwm26_grp, pwm27_grp,
+ pwm28_grp, pwm29_grp, pwm30_grp, pwm31_grp, pwm32_grp,
+ pwm33_grp, pwm34_grp, pwm35_grp, pwm36_grp, i2c0_grp,
+ i2c1_grp, i2c2_grp, i2c3_grp, i2c4_grp, uart0_grp, uart1_grp,
+ uart2_grp, uart3_grp, uart4_grp, uart5_grp, uart6_grp,
+ uart7_grp, uart8_grp, uart9_grp, uart10_grp, uart11_grp,
+ uart12_grp, uart13_grp, uart14_grp, uart15_grp, gpio0_grp,
+ gpio1_grp, gpio2_grp, gpio3_grp, gpio4_grp, gpio5_grp,
+ gpio6_grp, gpio7_grp, gpio8_grp, gpio9_grp, gpio10_grp,
+ gpio11_grp, gpio12_grp, gpio13_grp, gpio14_grp, gpio15_grp,
+ gpio16_grp, gpio17_grp, gpio18_grp, gpio19_grp, gpio20_grp,
+ gpio21_grp, gpio22_grp, gpio23_grp, gpio24_grp, gpio25_grp,
+ gpio26_grp, gpio27_grp, gpio28_grp, gpio29_grp, gpio30_grp,
+ gpio31_grp, gpio32_grp, gpio33_grp, gpio34_grp, gpio35_grp,
+ gpio36_grp, gpio37_grp, gpio38_grp, gpio39_grp, gpio40_grp,
+ gpio41_grp, gpio42_grp, gpio43_grp, gpio44_grp, gpio45_grp,
+ gpio46_grp, gpio47_grp, gpio48_grp, gpio49_grp, gpio50_grp,
+ gpio51_grp, gpio52_grp, gpio53_grp, gpio54_grp, gpio55_grp,
+ gpio56_grp, gpio57_grp, gpio58_grp, gpio59_grp, gpio60_grp,
+ gpio61_grp, gpio62_grp, gpio63_grp, gpio64_grp, gpio65_grp,
+ gpio66_grp, gpio67_grp, eth1_grp, i2s0_grp, i2s0_mclkin_grp,
+ i2s1_grp, i2s1_mclkin_grp, spi0_grp
+
+- function: An array of strings, each string containing the name of the
+ pinmux functions. The following are the list of pinmux
+ functions available:
+
+ nand, spi, emmc, sdio, eth0, pwm0, pwm1, pwm2, pwm3, pwm4,
+ pwm5, pwm6, pwm7, pwm8, pwm9, pwm10, pwm11, pwm12, pwm13,
+ pwm14, pwm15, pwm16, pwm17, pwm18, pwm19, pwm20, pwm21, pwm22,
+ pwm23, pwm24, pwm25, pwm26, pwm27, pwm28, pwm29, pwm30, pwm31,
+ pwm32, pwm33, pwm34, pwm35, pwm36, i2c0, i2c1, i2c2, i2c3,
+ i2c4, uart0, uart1, uart2, uart3, uart4, uart5, uart6, uart7,
+ uart8, uart9, uart10, uart11, uart12, uart13, uart14, uart15,
+ gpio0, gpio1, gpio2, gpio3, gpio4, gpio5, gpio6, gpio7, gpio8,
+ gpio9, gpio10, gpio11, gpio12, gpio13, gpio14, gpio15, gpio16,
+ gpio17, gpio18, gpio19, gpio20, gpio21, gpio22, gpio23,
+ gpio24, gpio25, gpio26, gpio27, gpio28, gpio29, gpio30,
+ gpio31, gpio32, gpio33, gpio34, gpio35, gpio36, gpio37,
+ gpio38, gpio39, gpio40, gpio41, gpio42, gpio43, gpio44,
+ gpio45, gpio46, gpio47, gpio48, gpio49, gpio50, gpio51,
+ gpio52, gpio53, gpio54, gpio55, gpio56, gpio57, gpio58,
+ gpio59, gpio60, gpio61, gpio62, gpio63, gpio64, gpio65,
+ gpio66, gpio67, eth1, i2s0, i2s0_mclkin, i2s1, i2s1_mclkin,
+ spi0
+
+Example:
+ pinctrl: pinctrl@50 {
+ compatible = "bitmain,bm1880-pinctrl";
+ reg = <0x50 0x4B0>;
+
+ pinctrl_uart0_default: uart0-default {
+ pinmux {
+ groups = "uart0_grp";
+ function = "uart0";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.txt b/Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.txt
new file mode 100644
index 000000000000..a87447180e83
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.txt
@@ -0,0 +1,141 @@
+Cirrus Logic Lochnagar Audio Development Board
+
+Lochnagar is an evaluation and development board for Cirrus Logic
+Smart CODEC and Amp devices. It allows the connection of most Cirrus
+Logic devices on mini-cards, as well as allowing connection of
+various application processor systems to provide a full evaluation
+platform. Audio system topology, clocking and power can all be
+controlled through the Lochnagar, allowing the device under test
+to be used in a variety of possible use cases.
+
+This binding document describes the binding for the pinctrl portion
+of the driver.
+
+Also see these documents for generic binding information:
+ [1] GPIO : ../gpio/gpio.txt
+ [2] Pinctrl: ../pinctrl/pinctrl-bindings.txt
+
+And these for relevant defines:
+ [3] include/dt-bindings/pinctrl/lochnagar.h
+
+This binding must be part of the Lochnagar MFD binding:
+ [4] ../mfd/cirrus,lochnagar.txt
+
+Required properties:
+
+ - compatible : One of the following strings:
+ "cirrus,lochnagar-pinctrl"
+
+ - gpio-controller : Indicates this device is a GPIO controller.
+ - #gpio-cells : Must be 2. The first cell is the pin number, see
+ [3] for available pins and the second cell is used to specify
+ optional parameters, see [1].
+ - gpio-ranges : Range of pins managed by the GPIO controller, see
+ [1]. Both the GPIO and Pinctrl base should be set to zero and the
+ count to the appropriate of the LOCHNAGARx_PIN_NUM_GPIOS define,
+ see [3].
+
+ - pinctrl-names : A pinctrl state named "default" must be defined.
+ - pinctrl-0 : A phandle to the default pinctrl state.
+
+Required sub-nodes:
+
+The pin configurations are defined as a child of the pinctrl states
+node, see [2]. Each sub-node can have the following properties:
+ - groups : A list of groups to select (either this or "pins" must be
+ specified), available groups:
+ codec-aif1, codec-aif2, codec-aif3, dsp-aif1, dsp-aif2, psia1,
+ psia2, gf-aif1, gf-aif2, gf-aif3, gf-aif4, spdif-aif, usb-aif1,
+ usb-aif2, adat-aif, soundcard-aif
+ - pins : A list of pin names to select (either this or "groups" must
+ be specified), available pins:
+ fpga-gpio1, fpga-gpio2, fpga-gpio3, fpga-gpio4, fpga-gpio5,
+ fpga-gpio6, codec-gpio1, codec-gpio2, codec-gpio3, codec-gpio4,
+ codec-gpio5, codec-gpio6, codec-gpio7, codec-gpio8, dsp-gpio1,
+ dsp-gpio2, dsp-gpio3, dsp-gpio4, dsp-gpio5, dsp-gpio6, gf-gpio2,
+ gf-gpio3, gf-gpio7, codec-aif1-bclk, codec-aif1-rxdat,
+ codec-aif1-lrclk, codec-aif1-txdat, codec-aif2-bclk,
+ codec-aif2-rxdat, codec-aif2-lrclk, codec-aif2-txdat,
+ codec-aif3-bclk, codec-aif3-rxdat, codec-aif3-lrclk,
+ codec-aif3-txdat, dsp-aif1-bclk, dsp-aif1-rxdat, dsp-aif1-lrclk,
+ dsp-aif1-txdat, dsp-aif2-bclk, dsp-aif2-rxdat,
+ dsp-aif2-lrclk, dsp-aif2-txdat, psia1-bclk, psia1-rxdat,
+ psia1-lrclk, psia1-txdat, psia2-bclk, psia2-rxdat, psia2-lrclk,
+ psia2-txdat, gf-aif3-bclk, gf-aif3-rxdat, gf-aif3-lrclk,
+ gf-aif3-txdat, gf-aif4-bclk, gf-aif4-rxdat, gf-aif4-lrclk,
+ gf-aif4-txdat, gf-aif1-bclk, gf-aif1-rxdat, gf-aif1-lrclk,
+ gf-aif1-txdat, gf-aif2-bclk, gf-aif2-rxdat, gf-aif2-lrclk,
+ gf-aif2-txdat, dsp-uart1-rx, dsp-uart1-tx, dsp-uart2-rx,
+ dsp-uart2-tx, gf-uart2-rx, gf-uart2-tx, usb-uart-rx,
+ codec-pdmclk1, codec-pdmdat1, codec-pdmclk2, codec-pdmdat2,
+ codec-dmicclk1, codec-dmicdat1, codec-dmicclk2, codec-dmicdat2,
+ codec-dmicclk3, codec-dmicdat3, codec-dmicclk4, codec-dmicdat4,
+ dsp-dmicclk1, dsp-dmicdat1, dsp-dmicclk2, dsp-dmicdat2, i2c2-scl,
+ i2c2-sda, i2c3-scl, i2c3-sda, i2c4-scl, i2c4-sda, dsp-standby,
+ codec-mclk1, codec-mclk2, dsp-clkin, psia1-mclk, psia2-mclk,
+ gf-gpio1, gf-gpio5, dsp-gpio20, led1, led2
+ - function : The mux function to select, available functions:
+ aif, fpga-gpio1, fpga-gpio2, fpga-gpio3, fpga-gpio4, fpga-gpio5,
+ fpga-gpio6, codec-gpio1, codec-gpio2, codec-gpio3, codec-gpio4,
+ codec-gpio5, codec-gpio6, codec-gpio7, codec-gpio8, dsp-gpio1,
+ dsp-gpio2, dsp-gpio3, dsp-gpio4, dsp-gpio5, dsp-gpio6, gf-gpio2,
+ gf-gpio3, gf-gpio7, gf-gpio1, gf-gpio5, dsp-gpio20, codec-clkout,
+ dsp-clkout, pmic-32k, spdif-clkout, clk-12m288, clk-11m2986,
+ clk-24m576, clk-22m5792, xmos-mclk, gf-clkout1, gf-mclk1,
+ gf-mclk3, gf-mclk2, gf-clkout2, codec-mclk1, codec-mclk2,
+ dsp-clkin, psia1-mclk, psia2-mclk, spdif-mclk, codec-irq,
+ codec-reset, dsp-reset, dsp-irq, dsp-standby, codec-pdmclk1,
+ codec-pdmdat1, codec-pdmclk2, codec-pdmdat2, codec-dmicclk1,
+ codec-dmicdat1, codec-dmicclk2, codec-dmicdat2, codec-dmicclk3,
+ codec-dmicdat3, codec-dmicclk4, codec-dmicdat4, dsp-dmicclk1,
+ dsp-dmicdat1, dsp-dmicclk2, dsp-dmicdat2, dsp-uart1-rx,
+ dsp-uart1-tx, dsp-uart2-rx, dsp-uart2-tx, gf-uart2-rx,
+ gf-uart2-tx, usb-uart-rx, usb-uart-tx, i2c2-scl, i2c2-sda,
+ i2c3-scl, i2c3-sda, i2c4-scl, i2c4-sda, spdif-aif, psia1,
+ psia1-bclk, psia1-lrclk, psia1-rxdat, psia1-txdat, psia2,
+ psia2-bclk, psia2-lrclk, psia2-rxdat, psia2-txdat, codec-aif1,
+ codec-aif1-bclk, codec-aif1-lrclk, codec-aif1-rxdat,
+ codec-aif1-txdat, codec-aif2, codec-aif2-bclk, codec-aif2-lrclk,
+ codec-aif2-rxdat, codec-aif2-txdat, codec-aif3, codec-aif3-bclk,
+ codec-aif3-lrclk, codec-aif3-rxdat, codec-aif3-txdat, dsp-aif1,
+ dsp-aif1-bclk, dsp-aif1-lrclk, dsp-aif1-rxdat, dsp-aif1-txdat,
+ dsp-aif2, dsp-aif2-bclk, dsp-aif2-lrclk, dsp-aif2-rxdat,
+ dsp-aif2-txdat, gf-aif3, gf-aif3-bclk, gf-aif3-lrclk,
+ gf-aif3-rxdat, gf-aif3-txdat, gf-aif4, gf-aif4-bclk,
+ gf-aif4-lrclk, gf-aif4-rxdat, gf-aif4-txdat, gf-aif1,
+ gf-aif1-bclk, gf-aif1-lrclk, gf-aif1-rxdat, gf-aif1-txdat,
+ gf-aif2, gf-aif2-bclk, gf-aif2-lrclk, gf-aif2-rxdat,
+ gf-aif2-txdat, usb-aif1, usb-aif2, adat-aif, soundcard-aif,
+
+ - output-enable : Specifies that an AIF group will be used as a master
+ interface (either this or input-enable is required if a group is
+ being muxed to an AIF)
+ - input-enable : Specifies that an AIF group will be used as a slave
+ interface (either this or output-enable is required if a group is
+ being muxed to an AIF)
+
+Example:
+
+lochnagar-pinctrl {
+ compatible = "cirrus,lochnagar-pinctrl";
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&lochnagar 0 0 LOCHNAGAR2_PIN_NUM_GPIOS>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pin-settings>;
+
+ pin-settings: pin-settings {
+ ap-aif {
+ input-enable;
+ groups = "gf-aif1";
+ function = "codec-aif3";
+ };
+ codec-aif {
+ output-enable;
+ groups = "codec-aif3";
+ function = "gf-aif1";
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt
index 6666277c3acb..8ac1d0851a0f 100644
--- a/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx7d-pinctrl.txt
@@ -48,9 +48,9 @@ PAD_CTL_HYS (1 << 3)
PAD_CTL_SRE_SLOW (1 << 2)
PAD_CTL_SRE_FAST (0 << 2)
PAD_CTL_DSE_X1 (0 << 0)
-PAD_CTL_DSE_X2 (1 << 0)
-PAD_CTL_DSE_X3 (2 << 0)
-PAD_CTL_DSE_X4 (3 << 0)
+PAD_CTL_DSE_X4 (1 << 0)
+PAD_CTL_DSE_X2 (2 << 0)
+PAD_CTL_DSE_X6 (3 << 0)
Examples:
While iomuxc-lpsr is intended to be used by dedicated peripherals to take
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
index e7d6f81c227f..205be98ae078 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
@@ -11,6 +11,7 @@ Required properties:
"mediatek,mt8127-pinctrl", compatible with mt8127 pinctrl.
"mediatek,mt8135-pinctrl", compatible with mt8135 pinctrl.
"mediatek,mt8173-pinctrl", compatible with mt8173 pinctrl.
+ "mediatek,mt8516-pinctrl", compatible with mt8516 pinctrl.
- pins-are-numbered: Specify the subnodes are using numbered pinmux to
specify pins.
- gpio-controller : Marks the device node as a gpio controller.
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8183.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8183.txt
new file mode 100644
index 000000000000..eccbe3f55d3f
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8183.txt
@@ -0,0 +1,132 @@
+* Mediatek MT8183 Pin Controller
+
+The Mediatek's Pin controller is used to control SoC pins.
+
+Required properties:
+- compatible: value should be one of the following.
+ "mediatek,mt8183-pinctrl", compatible with mt8183 pinctrl.
+- gpio-controller : Marks the device node as a gpio controller.
+- #gpio-cells: number of cells in GPIO specifier. Since the generic GPIO
+ binding is used, the amount of cells must be specified as 2. See the below
+ mentioned gpio binding representation for description of particular cells.
+- gpio-ranges : gpio valid number range.
+- reg: physical address base for gpio base registers. There are 10 GPIO
+ physical address base in mt8183.
+
+Optional properties:
+- reg-names: gpio base register names. There are 10 gpio base register
+ names in mt8183. They are "iocfg0", "iocfg1", "iocfg2", "iocfg3", "iocfg4",
+ "iocfg5", "iocfg6", "iocfg7", "iocfg8", "eint".
+- interrupt-controller: Marks the device node as an interrupt controller
+- #interrupt-cells: Should be two.
+- interrupts : The interrupt outputs to sysirq.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices.
+
+Subnode format
+A pinctrl node should contain at least one subnodes representing the
+pinctrl groups available on the machine. Each subnode will list the
+pins it needs, and how they should be configured, with regard to muxer
+configuration, pullups, drive strength, input enable/disable and input schmitt.
+
+ node {
+ pinmux = <PIN_NUMBER_PINMUX>;
+ GENERIC_PINCONFIG;
+ };
+
+Required properties:
+- pinmux: integer array, represents gpio pin number and mux setting.
+ Supported pin number and mux varies for different SoCs, and are defined
+ as macros in boot/dts/<soc>-pinfunc.h directly.
+
+Optional properties:
+- GENERIC_PINCONFIG: is the generic pinconfig options to use, bias-disable,
+ bias-pull-down, bias-pull-up, input-enable, input-disable, output-low,
+ output-high, input-schmitt-enable, input-schmitt-disable
+ and drive-strength are valid.
+
+ Some special pins have extra pull up strength, there are R0 and R1 pull-up
+ resistors available, but for user, it's only need to set R1R0 as 00, 01,
+ 10 or 11. So It needs config "mediatek,pull-up-adv" or
+ "mediatek,pull-down-adv" to support arguments for those special pins.
+ Valid arguments are from 0 to 3.
+
+ mediatek,tdsel: An integer describing the steps for output level shifter
+ duty cycle when asserted (high pulse width adjustment). Valid arguments
+ are from 0 to 15.
+ mediatek,rdsel: An integer describing the steps for input level shifter
+ duty cycle when asserted (high pulse width adjustment). Valid arguments
+ are from 0 to 63.
+
+ When config drive-strength, it can support some arguments, such as
+ MTK_DRIVE_4mA, MTK_DRIVE_6mA, etc. See dt-bindings/pinctrl/mt65xx.h.
+ It can only support 2/4/6/8/10/12/14/16mA in mt8183.
+ For I2C pins, there are existing generic driving setup and the specific
+ driving setup. I2C pins can only support 2/4/6/8/10/12/14/16mA driving
+ adjustment in generic driving setup. But in specific driving setup,
+ they can support 0.125/0.25/0.5/1mA adjustment. If we enable specific
+ driving setup for I2C pins, the existing generic driving setup will be
+ disabled. For some special features, we need the I2C pins specific
+ driving setup. The specific driving setup is controlled by E1E0EN.
+ So we need add extra vendor driving preperty instead of
+ the generic driving property.
+ We can add "mediatek,drive-strength-adv = <XXX>;" to describe the specific
+ driving setup property. "XXX" means the value of E1E0EN. EN is 0 or 1.
+ It is used to enable or disable the specific driving setup.
+ E1E0 is used to describe the detail strength specification of the I2C pin.
+ When E1=0/E0=0, the strength is 0.125mA.
+ When E1=0/E0=1, the strength is 0.25mA.
+ When E1=1/E0=0, the strength is 0.5mA.
+ When E1=1/E0=1, the strength is 1mA.
+ So the valid arguments of "mediatek,drive-strength-adv" are from 0 to 7.
+
+Examples:
+
+#include "mt8183-pinfunc.h"
+
+...
+{
+ pio: pinctrl@10005000 {
+ compatible = "mediatek,mt8183-pinctrl";
+ reg = <0 0x10005000 0 0x1000>,
+ <0 0x11f20000 0 0x1000>,
+ <0 0x11e80000 0 0x1000>,
+ <0 0x11e70000 0 0x1000>,
+ <0 0x11e90000 0 0x1000>,
+ <0 0x11d30000 0 0x1000>,
+ <0 0x11d20000 0 0x1000>,
+ <0 0x11c50000 0 0x1000>,
+ <0 0x11f30000 0 0x1000>,
+ <0 0x1000b000 0 0x1000>;
+ reg-names = "iocfg0", "iocfg1", "iocfg2",
+ "iocfg3", "iocfg4", "iocfg5",
+ "iocfg6", "iocfg7", "iocfg8",
+ "eint";
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pio 0 0 192>;
+ interrupt-controller;
+ interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <2>;
+
+ i2c0_pins_a: i2c0 {
+ pins1 {
+ pinmux = <PINMUX_GPIO48__FUNC_SCL5>,
+ <PINMUX_GPIO49__FUNC_SDA5>;
+ mediatek,pull-up-adv = <3>;
+ mediatek,drive-strength-adv = <7>;
+ };
+ };
+
+ i2c1_pins_a: i2c1 {
+ pins {
+ pinmux = <PINMUX_GPIO50__FUNC_SCL3>,
+ <PINMUX_GPIO51__FUNC_SDA3>;
+ mediatek,pull-down-adv = <2>;
+ mediatek,drive-strength-adv = <4>;
+ };
+ };
+ ...
+ };
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-stmfx.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-stmfx.txt
new file mode 100644
index 000000000000..c1b4c1819b84
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-stmfx.txt
@@ -0,0 +1,116 @@
+STMicroelectronics Multi-Function eXpander (STMFX) GPIO expander bindings
+
+ST Multi-Function eXpander (STMFX) offers up to 24 GPIOs expansion.
+Please refer to ../mfd/stmfx.txt for STMFX Core bindings.
+
+Required properties:
+- compatible: should be "st,stmfx-0300-pinctrl".
+- #gpio-cells: should be <2>, the first cell is the GPIO number and the second
+ cell is the gpio flags in accordance with <dt-bindings/gpio/gpio.h>.
+- gpio-controller: marks the device as a GPIO controller.
+- #interrupt-cells: should be <2>, the first cell is the GPIO number and the
+ second cell is the interrupt flags in accordance with
+ <dt-bindings/interrupt-controller/irq.h>.
+- interrupt-controller: marks the device as an interrupt controller.
+- gpio-ranges: specifies the mapping between gpio controller and pin
+ controller pins. Check "Concerning gpio-ranges property" below.
+Please refer to ../gpio/gpio.txt.
+
+Please refer to pinctrl-bindings.txt for pin configuration.
+
+Required properties for pin configuration sub-nodes:
+- pins: list of pins to which the configuration applies.
+
+Optional properties for pin configuration sub-nodes (pinconf-generic ones):
+- bias-disable: disable any bias on the pin.
+- bias-pull-up: the pin will be pulled up.
+- bias-pull-pin-default: use the pin-default pull state.
+- bias-pull-down: the pin will be pulled down.
+- drive-open-drain: the pin will be driven with open drain.
+- drive-push-pull: the pin will be driven actively high and low.
+- output-high: the pin will be configured as an output driving high level.
+- output-low: the pin will be configured as an output driving low level.
+
+Note that STMFX pins[15:0] are called "gpio[15:0]", and STMFX pins[23:16] are
+called "agpio[7:0]". Example, to refer to pin 18 of STMFX, use "agpio2".
+
+Concerning gpio-ranges property:
+- if all STMFX pins[24:0] are available (no other STMFX function in use), you
+ should use gpio-ranges = <&stmfx_pinctrl 0 0 24>;
+- if agpio[3:0] are not available (STMFX Touchscreen function in use), you
+ should use gpio-ranges = <&stmfx_pinctrl 0 0 16>, <&stmfx_pinctrl 20 20 4>;
+- if agpio[7:4] are not available (STMFX IDD function in use), you
+ should use gpio-ranges = <&stmfx_pinctrl 0 0 20>;
+
+
+Example:
+
+ stmfx: stmfx@42 {
+ ...
+
+ stmfx_pinctrl: stmfx-pin-controller {
+ compatible = "st,stmfx-0300-pinctrl";
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ gpio-ranges = <&stmfx_pinctrl 0 0 24>;
+
+ joystick_pins: joystick {
+ pins = "gpio0", "gpio1", "gpio2", "gpio3", "gpio4";
+ drive-push-pull;
+ bias-pull-up;
+ };
+ };
+ };
+
+Example of STMFX GPIO consumers:
+
+ joystick {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-0 = <&joystick_pins>;
+ pinctrl-names = "default";
+ button-0 {
+ label = "JoySel";
+ linux,code = <KEY_ENTER>;
+ interrupt-parent = <&stmfx_pinctrl>;
+ interrupts = <0 IRQ_TYPE_EDGE_RISING>;
+ };
+ button-1 {
+ label = "JoyDown";
+ linux,code = <KEY_DOWN>;
+ interrupt-parent = <&stmfx_pinctrl>;
+ interrupts = <1 IRQ_TYPE_EDGE_RISING>;
+ };
+ button-2 {
+ label = "JoyLeft";
+ linux,code = <KEY_LEFT>;
+ interrupt-parent = <&stmfx_pinctrl>;
+ interrupts = <2 IRQ_TYPE_EDGE_RISING>;
+ };
+ button-3 {
+ label = "JoyRight";
+ linux,code = <KEY_RIGHT>;
+ interrupt-parent = <&stmfx_pinctrl>;
+ interrupts = <3 IRQ_TYPE_EDGE_RISING>;
+ };
+ button-4 {
+ label = "JoyUp";
+ linux,code = <KEY_UP>;
+ interrupt-parent = <&stmfx_pinctrl>;
+ interrupts = <4 IRQ_TYPE_EDGE_RISING>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ orange {
+ gpios = <&stmfx_pinctrl 17 1>;
+ };
+
+ blue {
+ gpios = <&stmfx_pinctrl 19 1>;
+ };
+ }
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt
index c2dbb3e8d840..4e90ddd77784 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt
@@ -42,7 +42,7 @@ information about e.g. the mux function.
The following generic properties as defined in pinctrl-bindings.txt are valid
to specify in a pin configuration subnode:
- pins, function, bias-disable, bias-pull-down, bias-pull,up, drive-strength,
+ pins, function, bias-disable, bias-pull-down, bias-pull-up, drive-strength,
output-low, output-high.
Non-empty subnodes must specify the 'pins' property.
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt
index 991be0cd0948..84be0f2c6f3b 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq4019-pinctrl.txt
@@ -44,7 +44,7 @@ information about e.g. the mux function.
The following generic properties as defined in pinctrl-bindings.txt are valid
to specify in a pin configuration subnode:
- pins, function, bias-disable, bias-pull-down, bias-pull,up, drive-strength.
+ pins, function, bias-disable, bias-pull-down, bias-pull-up, drive-strength.
Non-empty subnodes must specify the 'pins' property.
Note that not all properties are valid for all pins.
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt
index 7ed56a1b70fc..a7aaaa7db83b 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt
@@ -42,7 +42,7 @@ information about e.g. the mux function.
The following generic properties as defined in pinctrl-bindings.txt are valid
to specify in a pin configuration subnode:
- pins, function, bias-disable, bias-pull-down, bias-pull,up, drive-strength,
+ pins, function, bias-disable, bias-pull-down, bias-pull-up, drive-strength,
output-low, output-high.
Non-empty subnodes must specify the 'pins' property.
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8660-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8660-pinctrl.txt
index cdc4787e59d2..f095209848c8 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8660-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8660-pinctrl.txt
@@ -42,7 +42,7 @@ information about e.g. the mux function.
The following generic properties as defined in pinctrl-bindings.txt are valid
to specify in a pin configuration subnode:
- pins, function, bias-disable, bias-pull-down, bias-pull,up, drive-strength,
+ pins, function, bias-disable, bias-pull-down, bias-pull-up, drive-strength,
output-low, output-high.
Non-empty subnodes must specify the 'pins' property.
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt
index c22e6c425d0b..004056506679 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt
@@ -41,7 +41,7 @@ information about e.g. the mux function.
The following generic properties as defined in pinctrl-bindings.txt are valid
to specify in a pin configuration subnode:
- pins, function, bias-disable, bias-pull-down, bias-pull,up, drive-strength.
+ pins, function, bias-disable, bias-pull-down, bias-pull-up, drive-strength.
Non-empty subnodes must specify the 'pins' property.
Note that not all properties are valid for all pins.
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
index 48df30a36b01..00169255e48c 100644
--- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt
@@ -57,6 +57,8 @@ Optional properties:
- st,bank-ioport: should correspond to the EXTI IOport selection (EXTI line
used to select GPIOs as interrupts).
- hwlocks: reference to a phandle of a hardware spinlock provider node.
+ - st,package: Indicates the SOC package used.
+ More details in include/dt-bindings/pinctrl/stm32-pinfunc.h
Example 1:
#include <dt-bindings/pinctrl/stm32f429-pinfunc.h>
diff --git a/Documentation/devicetree/bindings/power/amlogic,meson-gx-pwrc.txt b/Documentation/devicetree/bindings/power/amlogic,meson-gx-pwrc.txt
index 1cd050b4054c..0fdc3dd1125e 100644
--- a/Documentation/devicetree/bindings/power/amlogic,meson-gx-pwrc.txt
+++ b/Documentation/devicetree/bindings/power/amlogic,meson-gx-pwrc.txt
@@ -16,7 +16,9 @@ Device Tree Bindings:
---------------------
Required properties:
-- compatible: should be "amlogic,meson-gx-pwrc-vpu" for the Meson GX SoCs
+- compatible: should be one of the following :
+ - "amlogic,meson-gx-pwrc-vpu" for the Meson GX SoCs
+ - "amlogic,meson-g12a-pwrc-vpu" for the Meson G12A SoCs
- #power-domain-cells: should be 0
- amlogic,hhi-sysctrl: phandle to the HHI sysctrl node
- resets: phandles to the reset lines needed for this power demain sequence
diff --git a/Documentation/devicetree/bindings/power/reset/syscon-reboot.txt b/Documentation/devicetree/bindings/power/reset/syscon-reboot.txt
index 11906316b43d..e23dea8344f8 100644
--- a/Documentation/devicetree/bindings/power/reset/syscon-reboot.txt
+++ b/Documentation/devicetree/bindings/power/reset/syscon-reboot.txt
@@ -3,13 +3,20 @@ Generic SYSCON mapped register reset driver
This is a generic reset driver using syscon to map the reset register.
The reset is generally performed with a write to the reset register
defined by the register map pointed by syscon reference plus the offset
-with the mask defined in the reboot node.
+with the value and mask defined in the reboot node.
Required properties:
- compatible: should contain "syscon-reboot"
- regmap: this is phandle to the register map node
- offset: offset in the register map for the reboot register (in bytes)
-- mask: the reset value written to the reboot register (32 bit access)
+- value: the reset value written to the reboot register (32 bit access)
+
+Optional properties:
+- mask: update only the register bits defined by the mask (32 bit)
+
+Legacy usage:
+If a node doesn't contain a value property but contains a mask property, the
+mask property is used as the value.
Default will be little endian mode, 32 bit access only.
diff --git a/Documentation/devicetree/bindings/power/supply/axp20x_usb_power.txt b/Documentation/devicetree/bindings/power/supply/axp20x_usb_power.txt
index ba8d35f66cbe..b2d4968fde7d 100644
--- a/Documentation/devicetree/bindings/power/supply/axp20x_usb_power.txt
+++ b/Documentation/devicetree/bindings/power/supply/axp20x_usb_power.txt
@@ -4,6 +4,7 @@ Required Properties:
-compatible: One of: "x-powers,axp202-usb-power-supply"
"x-powers,axp221-usb-power-supply"
"x-powers,axp223-usb-power-supply"
+ "x-powers,axp813-usb-power-supply"
The AXP223 PMIC shares most of its behaviour with the AXP221 but has slight
variations such as the former being able to set the VBUS power supply max
diff --git a/Documentation/devicetree/bindings/power/supply/gpio-charger.txt b/Documentation/devicetree/bindings/power/supply/gpio-charger.txt
index adbb5dc5b6e9..0fb33b2c62a6 100644
--- a/Documentation/devicetree/bindings/power/supply/gpio-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/gpio-charger.txt
@@ -14,13 +14,17 @@ Required properties :
usb-cdp (USB charging downstream port)
usb-aca (USB accessory charger adapter)
+Optional properties:
+ - charge-status-gpios: GPIO indicating whether a battery is charging.
+
Example:
usb_charger: charger {
compatible = "gpio-charger";
charger-type = "usb-sdp";
- gpios = <&gpf0 2 0 0 0>;
- }
+ gpios = <&gpd 28 GPIO_ACTIVE_LOW>;
+ charge-status-gpios = <&gpc 27 GPIO_ACTIVE_LOW>;
+ };
battery {
power-supplies = <&usb_charger>;
diff --git a/Documentation/devicetree/bindings/power/supply/ingenic,battery.txt b/Documentation/devicetree/bindings/power/supply/ingenic,battery.txt
new file mode 100644
index 000000000000..66430bf73815
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/ingenic,battery.txt
@@ -0,0 +1,31 @@
+* Ingenic JZ47xx battery bindings
+
+Required properties:
+
+- compatible: Must be "ingenic,jz4740-battery".
+- io-channels: phandle and IIO specifier pair to the IIO device.
+ Format described in iio-bindings.txt.
+- monitored-battery: phandle to a "simple-battery" compatible node.
+
+The "monitored-battery" property must be a phandle to a node using the format
+described in battery.txt, with the following properties being required:
+
+- voltage-min-design-microvolt: Drained battery voltage.
+- voltage-max-design-microvolt: Fully charged battery voltage.
+
+Example:
+
+#include <dt-bindings/iio/adc/ingenic,adc.h>
+
+simple_battery: battery {
+ compatible = "simple-battery";
+ voltage-min-design-microvolt = <3600000>;
+ voltage-max-design-microvolt = <4200000>;
+};
+
+ingenic_battery {
+ compatible = "ingenic,jz4740-battery";
+ io-channels = <&adc INGENIC_ADC_BATTERY>;
+ io-channel-names = "battery";
+ monitored-battery = <&simple_battery>;
+};
diff --git a/Documentation/devicetree/bindings/power/supply/ltc3651-charger.txt b/Documentation/devicetree/bindings/power/supply/lt3651-charger.txt
index 71f2840e8209..40811ff8de10 100644
--- a/Documentation/devicetree/bindings/power/supply/ltc3651-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/lt3651-charger.txt
@@ -1,14 +1,16 @@
-ltc3651-charger
+Analog Devices LT3651 Charger Power Supply bindings: lt3651-charger
Required properties:
- - compatible: "lltc,ltc3651-charger"
+- compatible: Should contain one of the following:
+ * "lltc,ltc3651-charger", (DEPRECATED: Use "lltc,lt3651-charger")
+ * "lltc,lt3651-charger"
- lltc,acpr-gpios: Connect to ACPR output. See remark below.
Optional properties:
- lltc,fault-gpios: Connect to FAULT output. See remark below.
- lltc,chrg-gpios: Connect to CHRG output. See remark below.
-The ltc3651 outputs are open-drain type and active low. The driver assumes the
+The lt3651 outputs are open-drain type and active low. The driver assumes the
GPIO reports "active" when the output is asserted, so if the pins have been
connected directly, the GPIO flags should be set to active low also.
@@ -20,7 +22,7 @@ attributes to detect changes.
Example:
charger: battery-charger {
- compatible = "lltc,ltc3651-charger";
+ compatible = "lltc,lt3651-charger";
lltc,acpr-gpios = <&gpio0 68 GPIO_ACTIVE_LOW>;
lltc,fault-gpios = <&gpio0 64 GPIO_ACTIVE_LOW>;
lltc,chrg-gpios = <&gpio0 63 GPIO_ACTIVE_LOW>;
diff --git a/Documentation/devicetree/bindings/power/supply/max77650-charger.txt b/Documentation/devicetree/bindings/power/supply/max77650-charger.txt
new file mode 100644
index 000000000000..e6d0fb6ff94e
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/max77650-charger.txt
@@ -0,0 +1,28 @@
+Battery charger driver for MAX77650 PMIC from Maxim Integrated.
+
+This module is part of the MAX77650 MFD device. For more details
+see Documentation/devicetree/bindings/mfd/max77650.txt.
+
+The charger is represented as a sub-node of the PMIC node on the device tree.
+
+Required properties:
+--------------------
+- compatible: Must be "maxim,max77650-charger"
+
+Optional properties:
+--------------------
+- input-voltage-min-microvolt: Minimum CHGIN regulation voltage. Must be one
+ of: 4000000, 4100000, 4200000, 4300000,
+ 4400000, 4500000, 4600000, 4700000.
+- input-current-limit-microamp: CHGIN input current limit (in microamps). Must
+ be one of: 95000, 190000, 285000, 380000,
+ 475000.
+
+Example:
+--------
+
+ charger {
+ compatible = "maxim,max77650-charger";
+ input-voltage-min-microvolt = <4200000>;
+ input-current-limit-microamp = <285000>;
+ };
diff --git a/Documentation/devicetree/bindings/power/supply/microchip,ucs1002.txt b/Documentation/devicetree/bindings/power/supply/microchip,ucs1002.txt
new file mode 100644
index 000000000000..1d284ad816bf
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/microchip,ucs1002.txt
@@ -0,0 +1,27 @@
+Microchip UCS1002 USB Port Power Controller
+
+Required properties:
+- compatible : Should be "microchip,ucs1002";
+- reg : I2C slave address
+
+Optional properties:
+- interrupts : A list of interrupts lines present (could be either
+ corresponding to A_DET# pin, ALERT# pin, or both)
+- interrupt-names : A list of interrupt names. Should contain (if
+ present):
+ - "a_det" for line connected to A_DET# pin
+ - "alert" for line connected to ALERT# pin
+ Both are expected to be IRQ_TYPE_EDGE_BOTH
+Example:
+
+&i2c3 {
+ charger@32 {
+ compatible = "microchip,ucs1002";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ucs1002_pins>;
+ reg = <0x32>;
+ interrupts-extended = <&gpio5 2 IRQ_TYPE_EDGE_BOTH>,
+ <&gpio3 21 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "a_det", "alert";
+ };
+};
diff --git a/Documentation/devicetree/bindings/power/supply/olpc_battery.txt b/Documentation/devicetree/bindings/power/supply/olpc_battery.txt
index c8901b3992d9..8d87d6b35a98 100644
--- a/Documentation/devicetree/bindings/power/supply/olpc_battery.txt
+++ b/Documentation/devicetree/bindings/power/supply/olpc_battery.txt
@@ -2,4 +2,4 @@ OLPC battery
~~~~~~~~~~~~
Required properties:
- - compatible : "olpc,xo1-battery"
+ - compatible : "olpc,xo1-battery" or "olpc,xo1.5-battery"
diff --git a/Documentation/devicetree/bindings/pps/pps-gpio.txt b/Documentation/devicetree/bindings/pps/pps-gpio.txt
index 3683874832ae..9012a2a02e14 100644
--- a/Documentation/devicetree/bindings/pps/pps-gpio.txt
+++ b/Documentation/devicetree/bindings/pps/pps-gpio.txt
@@ -7,6 +7,10 @@ Required properties:
- compatible: should be "pps-gpio"
- gpios: one PPS GPIO in the format described by ../gpio/gpio.txt
+Additional required properties for the PPS ECHO functionality:
+- echo-gpios: one PPS ECHO GPIO in the format described by ../gpio/gpio.txt
+- echo-active-ms: duration in ms of the active portion of the echo pulse
+
Optional properties:
- assert-falling-edge: when present, assert is indicated by a falling edge
(instead of by a rising edge)
@@ -19,5 +23,8 @@ Example:
gpios = <&gpio1 26 GPIO_ACTIVE_HIGH>;
assert-falling-edge;
+ echo-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>;
+ echo-active-ms = <100>;
+
compatible = "pps-gpio";
};
diff --git a/Documentation/devicetree/bindings/pwm/imx-tpm-pwm.txt b/Documentation/devicetree/bindings/pwm/imx-tpm-pwm.txt
new file mode 100644
index 000000000000..3ba958d764ff
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/imx-tpm-pwm.txt
@@ -0,0 +1,22 @@
+Freescale i.MX TPM PWM controller
+
+Required properties:
+- compatible : Should be "fsl,imx7ulp-pwm".
+- reg: Physical base address and length of the controller's registers.
+- #pwm-cells: Should be 3. See pwm.txt in this directory for a description of the cells format.
+- clocks : The clock provided by the SoC to drive the PWM.
+- interrupts: The interrupt for the PWM controller.
+
+Note: The TPM counter and period counter are shared between multiple channels, so all channels
+should use same period setting.
+
+Example:
+
+tpm4: pwm@40250000 {
+ compatible = "fsl,imx7ulp-pwm";
+ reg = <0x40250000 0x1000>;
+ assigned-clocks = <&pcc2 IMX7ULP_CLK_LPTPM4>;
+ assigned-clock-parents = <&scg1 IMX7ULP_CLK_SOSC_BUS_CLK>;
+ clocks = <&pcc2 IMX7ULP_CLK_LPTPM4>;
+ #pwm-cells = <3>;
+};
diff --git a/Documentation/devicetree/bindings/pwm/pwm-meson.txt b/Documentation/devicetree/bindings/pwm/pwm-meson.txt
index 1fa3f7182133..891632354065 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-meson.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-meson.txt
@@ -7,6 +7,9 @@ Required properties:
or "amlogic,meson-gxbb-ao-pwm"
or "amlogic,meson-axg-ee-pwm"
or "amlogic,meson-axg-ao-pwm"
+ or "amlogic,meson-g12a-ee-pwm"
+ or "amlogic,meson-g12a-ao-pwm-ab"
+ or "amlogic,meson-g12a-ao-pwm-cd"
- #pwm-cells: Should be 3. See pwm.txt in this directory for a description of
the cells format.
diff --git a/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt b/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt
index 944fe356bb45..31c4577157dd 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt
@@ -4,6 +4,7 @@ Required properties:
- compatible: Must be "ti,<soc>-ehrpwm".
for am33xx - compatible = "ti,am3352-ehrpwm", "ti,am33xx-ehrpwm";
for am4372 - compatible = "ti,am4372-ehrpwm", "ti-am3352-ehrpwm", "ti,am33xx-ehrpwm";
+ for am654 - compatible = "ti,am654-ehrpwm", "ti-am3352-ehrpwm";
for da850 - compatible = "ti,da850-ehrpwm", "ti-am3352-ehrpwm", "ti,am33xx-ehrpwm";
for dra746 - compatible = "ti,dra746-ehrpwm", "ti-am3352-ehrpwm";
- #pwm-cells: should be 3. See pwm.txt in this directory for a description of
diff --git a/Documentation/devicetree/bindings/regulator/gpio-regulator.txt b/Documentation/devicetree/bindings/regulator/gpio-regulator.txt
index 1f496159e2bb..dd25e73b5d79 100644
--- a/Documentation/devicetree/bindings/regulator/gpio-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/gpio-regulator.txt
@@ -4,16 +4,30 @@ Required properties:
- compatible : Must be "regulator-gpio".
- regulator-name : Defined in regulator.txt as optional, but required
here.
-- states : Selection of available voltages and GPIO configs.
- if there are no states, then use a fixed regulator
+- gpios : Array of one or more GPIO pins used to select the
+ regulator voltage/current listed in "states".
+- states : Selection of available voltages/currents provided by
+ this regulator and matching GPIO configurations to
+ achieve them. If there are no states in the "states"
+ array, use a fixed regulator instead.
Optional properties:
-- enable-gpio : GPIO to use to enable/disable the regulator.
-- gpios : GPIO group used to control voltage.
-- gpios-states : gpios pin's initial states array. 0: LOW, 1: HIGH.
- defualt is LOW if nothing is specified.
+- enable-gpios : GPIO used to enable/disable the regulator.
+ Warning, the GPIO phandle flags are ignored and the
+ GPIO polarity is controlled solely by the presence
+ of "enable-active-high" DT property. This is due to
+ compatibility with old DTs.
+- enable-active-high : Polarity of "enable-gpio" GPIO is active HIGH.
+ Default is active LOW.
+- gpios-states : On operating systems, that don't support reading back
+ gpio values in output mode (most notably linux), this
+ array provides the state of GPIO pins set when
+ requesting them from the gpio controller. Systems,
+ that are capable of preserving state when requesting
+ the lines, are free to ignore this property.
+ 0: LOW, 1: HIGH. Default is LOW if nothing else
+ is specified.
- startup-delay-us : Startup time in microseconds.
-- enable-active-high : Polarity of GPIO is active high (default is low).
- regulator-type : Specifies what is being regulated, must be either
"voltage" or "current", defaults to voltage.
@@ -30,7 +44,7 @@ Example:
regulator-max-microvolt = <2600000>;
regulator-boot-on;
- enable-gpio = <&gpio0 23 0x4>;
+ enable-gpios = <&gpio0 23 0x4>;
gpios = <&gpio0 24 0x4
&gpio0 25 0x4>;
states = <1800000 0x3
diff --git a/Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.txt b/Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.txt
new file mode 100644
index 000000000000..e372dd3f0c8a
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/st,stm32mp1-pwr-reg.txt
@@ -0,0 +1,43 @@
+STM32MP1 PWR Regulators
+-----------------------
+
+Available Regulators in STM32MP1 PWR block are:
+ - reg11 for regulator 1V1
+ - reg18 for regulator 1V8
+ - usb33 for the swtich USB3V3
+
+Required properties:
+- compatible: Must be "st,stm32mp1,pwr-reg"
+- list of child nodes that specify the regulator reg11, reg18 or usb33
+ initialization data for defined regulators. The definition for each of
+ these nodes is defined using the standard binding for regulators found at
+ Documentation/devicetree/bindings/regulator/regulator.txt.
+- vdd-supply: phandle to the parent supply/regulator node for vdd input
+- vdd_3v3_usbfs-supply: phandle to the parent supply/regulator node for usb33
+
+Example:
+
+pwr_regulators: pwr@50001000 {
+ compatible = "st,stm32mp1,pwr-reg";
+ reg = <0x50001000 0x10>;
+ vdd-supply = <&vdd>;
+ vdd_3v3_usbfs-supply = <&vdd_usb>;
+
+ reg11: reg11 {
+ regulator-name = "reg11";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ };
+
+ reg18: reg18 {
+ regulator-name = "reg18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ usb33: usb33 {
+ regulator-name = "usb33";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.txt b/Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.txt
index 2bf3344b2a02..2df4bddeb688 100644
--- a/Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.txt
+++ b/Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.txt
@@ -5,11 +5,12 @@ Please also refer to reset.txt in this directory for common reset
controller binding usage.
The reset controller registers are part of the system-ctl block on
-hi3660 SoC.
+hi3660 and hi3670 SoCs.
Required properties:
-- compatible: should be
- "hisilicon,hi3660-reset"
+- compatible: should be one of the following:
+ "hisilicon,hi3660-reset" for HI3660
+ "hisilicon,hi3670-reset", "hisilicon,hi3660-reset" for HI3670
- hisi,rst-syscon: phandle of the reset's syscon.
- #reset-cells : Specifies the number of cells needed to encode a
reset source. The type shall be a <u32> and the value shall be 2.
diff --git a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.txt b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.txt
new file mode 100644
index 000000000000..73d8f19c3bd9
--- /dev/null
+++ b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.txt
@@ -0,0 +1,51 @@
+SiFive L2 Cache Controller
+--------------------------
+The SiFive Level 2 Cache Controller is used to provide access to fast copies
+of memory for masters in a Core Complex. The Level 2 Cache Controller also
+acts as directory-based coherency manager.
+All the properties in ePAPR/DeviceTree specification applies for this platform
+
+Required Properties:
+--------------------
+- compatible: Should be "sifive,fu540-c000-ccache" and "cache"
+
+- cache-block-size: Specifies the block size in bytes of the cache.
+ Should be 64
+
+- cache-level: Should be set to 2 for a level 2 cache
+
+- cache-sets: Specifies the number of associativity sets of the cache.
+ Should be 1024
+
+- cache-size: Specifies the size in bytes of the cache. Should be 2097152
+
+- cache-unified: Specifies the cache is a unified cache
+
+- interrupts: Must contain 3 entries (DirError, DataError and DataFail signals)
+
+- reg: Physical base address and size of L2 cache controller registers map
+
+Optional Properties:
+--------------------
+- next-level-cache: phandle to the next level cache if present.
+
+- memory-region: reference to the reserved-memory for the L2 Loosely Integrated
+ Memory region. The reserved memory node should be defined as per the bindings
+ in reserved-memory.txt
+
+
+Example:
+
+ cache-controller@2010000 {
+ compatible = "sifive,fu540-c000-ccache", "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-sets = <1024>;
+ cache-size = <2097152>;
+ cache-unified;
+ interrupt-parent = <&plic0>;
+ interrupts = <1 2 3>;
+ reg = <0x0 0x2010000 0x0 0x1000>;
+ next-level-cache = <&L25 &L40 &L36>;
+ memory-region = <&l2_lim>;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/nxp,pcf85063.txt b/Documentation/devicetree/bindings/rtc/nxp,pcf85063.txt
index d3e380ad712d..627bb533eff7 100644
--- a/Documentation/devicetree/bindings/rtc/nxp,pcf85063.txt
+++ b/Documentation/devicetree/bindings/rtc/nxp,pcf85063.txt
@@ -1,7 +1,11 @@
* NXP PCF85063 Real Time Clock
Required properties:
-- compatible: Should contain "nxp,pcf85063".
+- compatible: Should one of contain:
+ "nxp,pcf85063",
+ "nxp,pcf85063a",
+ "nxp,pcf85063tp",
+ "microcrystal,rv8263"
- reg: I2C address for chip.
Optional property:
diff --git a/Documentation/devicetree/bindings/rtc/rtc-aspeed.txt b/Documentation/devicetree/bindings/rtc/rtc-aspeed.txt
new file mode 100644
index 000000000000..2e956b3dc276
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/rtc-aspeed.txt
@@ -0,0 +1,22 @@
+ASPEED BMC RTC
+==============
+
+Required properties:
+ - compatible: should be one of the following
+ * aspeed,ast2400-rtc for the ast2400
+ * aspeed,ast2500-rtc for the ast2500
+ * aspeed,ast2600-rtc for the ast2600
+
+ - reg: physical base address of the controller and length of memory mapped
+ region
+
+ - interrupts: The interrupt number
+
+Example:
+
+ rtc@1e781000 {
+ compatible = "aspeed,ast2400-rtc";
+ reg = <0x1e781000 0x18>;
+ interrupts = <22>;
+ status = "disabled";
+ };
diff --git a/Documentation/devicetree/bindings/rtc/rtc.txt b/Documentation/devicetree/bindings/rtc/rtc.txt
index f4687c68c08c..a97fc6a9a75e 100644
--- a/Documentation/devicetree/bindings/rtc/rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/rtc.txt
@@ -69,3 +69,4 @@ ricoh,rv5c386 I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
ricoh,rv5c387a I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
sii,s35390a 2-wire CMOS real-time clock
whwave,sd3078 I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC
+xircom,x1205 Xircom X1205 I2C RTC
diff --git a/Documentation/devicetree/bindings/serial/cdns,uart.txt b/Documentation/devicetree/bindings/serial/cdns,uart.txt
index 227bb770b027..4efc560f90ab 100644
--- a/Documentation/devicetree/bindings/serial/cdns,uart.txt
+++ b/Documentation/devicetree/bindings/serial/cdns,uart.txt
@@ -12,6 +12,11 @@ Required properties:
See ../clocks/clock-bindings.txt for details.
+Optional properties:
+- cts-override : Override the CTS modem status signal. This signal will
+ always be reported as active instead of being obtained from the modem status
+ register. Define this if your serial port does not use this pin
+
Example:
uart@e0000000 {
compatible = "cdns,uart-r1p8";
diff --git a/Documentation/devicetree/bindings/serial/mtk-uart.txt b/Documentation/devicetree/bindings/serial/mtk-uart.txt
index bcfb13194f16..c6b5262eb352 100644
--- a/Documentation/devicetree/bindings/serial/mtk-uart.txt
+++ b/Documentation/devicetree/bindings/serial/mtk-uart.txt
@@ -1,4 +1,4 @@
-* Mediatek Universal Asynchronous Receiver/Transmitter (UART)
+* MediaTek Universal Asynchronous Receiver/Transmitter (UART)
Required properties:
- compatible should contain:
@@ -13,10 +13,12 @@ Required properties:
* "mediatek,mt6797-uart" for MT6797 compatible UARTS
* "mediatek,mt7622-uart" for MT7622 compatible UARTS
* "mediatek,mt7623-uart" for MT7623 compatible UARTS
+ * "mediatek,mt7629-uart" for MT7629 compatible UARTS
* "mediatek,mt8127-uart" for MT8127 compatible UARTS
* "mediatek,mt8135-uart" for MT8135 compatible UARTS
* "mediatek,mt8173-uart" for MT8173 compatible UARTS
* "mediatek,mt8183-uart", "mediatek,mt6577-uart" for MT8183 compatible UARTS
+ * "mediatek,mt8516-uart" for MT8516 compatible UARTS
* "mediatek,mt6577-uart" for MT6577 and all of the above
- reg: The base address of the UART register bank.
diff --git a/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt b/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt
index e7921a8e276b..c1091a923a89 100644
--- a/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt
+++ b/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt
@@ -12,6 +12,8 @@ Required properties:
- reg: I2C address of the SC16IS7xx device.
- interrupts: Should contain the UART interrupt
- clocks: Reference to the IC source clock.
+ OR (when there is no clock provider visible to the platform)
+- clock-frequency: The source clock frequency for the IC.
Optional properties:
- gpio-controller: Marks the device node as a GPIO controller.
diff --git a/Documentation/devicetree/bindings/serial/sifive-serial.txt b/Documentation/devicetree/bindings/serial/sifive-serial.txt
new file mode 100644
index 000000000000..c86b1e524159
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/sifive-serial.txt
@@ -0,0 +1,33 @@
+SiFive asynchronous serial interface (UART)
+
+Required properties:
+
+- compatible: should be something similar to
+ "sifive,<chip>-uart" for the UART as integrated
+ on a particular chip, and "sifive,uart<version>" for the
+ general UART IP block programming model. Supported
+ compatible strings as of the date of this writing are:
+ "sifive,fu540-c000-uart" for the SiFive UART v0 as
+ integrated onto the SiFive FU540 chip, or "sifive,uart0"
+ for the SiFive UART v0 IP block with no chip integration
+ tweaks (if any)
+- reg: address and length of the register space
+- interrupts: Should contain the UART interrupt identifier
+- clocks: Should contain a clock identifier for the UART's parent clock
+
+
+UART HDL that corresponds to the IP block version numbers can be found
+here:
+
+https://github.com/sifive/sifive-blocks/tree/master/src/main/scala/devices/uart
+
+
+Example:
+
+uart0: serial@10010000 {
+ compatible = "sifive,fu540-c000-uart", "sifive,uart0";
+ interrupt-parent = <&plic0>;
+ interrupts = <80>;
+ reg = <0x0 0x10010000 0x0 0x1000>;
+ clocks = <&prci PRCI_CLK_TLCLK>;
+};
diff --git a/Documentation/devicetree/bindings/serial/sprd-uart.txt b/Documentation/devicetree/bindings/serial/sprd-uart.txt
index cab40f0f6f49..9607dc616205 100644
--- a/Documentation/devicetree/bindings/serial/sprd-uart.txt
+++ b/Documentation/devicetree/bindings/serial/sprd-uart.txt
@@ -7,7 +7,17 @@ Required properties:
- reg: offset and length of the register set for the device
- interrupts: exactly one interrupt specifier
-- clocks: phandles to input clocks.
+- clock-names: Should contain following entries:
+ "enable" for UART module enable clock,
+ "uart" for UART clock,
+ "source" for UART source (parent) clock.
+- clocks: Should contain a clock specifier for each entry in clock-names.
+ UART clock and source clock are optional properties, but enable clock
+ is required.
+
+Optional properties:
+- dma-names: Should contain "rx" for receive and "tx" for transmit channels.
+- dmas: A list of dma specifiers, one for each entry in dma-names.
Example:
uart0: serial@0 {
@@ -15,5 +25,8 @@ Example:
"sprd,sc9836-uart";
reg = <0x0 0x100>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ext_26m>;
+ dma-names = "rx", "tx";
+ dmas = <&ap_dma 19>, <&ap_dma 20>;
+ clock-names = "enable", "uart", "source";
+ clocks = <&clk_ap_apb_gates 9>, <&clk_uart0>, <&ext_26m>;
};
diff --git a/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt b/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt
index 5a2ef1726e2a..7a32404c6114 100644
--- a/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt
+++ b/Documentation/devicetree/bindings/soc/mediatek/pwrap.txt
@@ -25,6 +25,7 @@ Required properties in pwrap device node.
"mediatek,mt8135-pwrap" for MT8135 SoCs
"mediatek,mt8173-pwrap" for MT8173 SoCs
"mediatek,mt8183-pwrap" for MT8183 SoCs
+ "mediatek,mt8516-pwrap" for MT8516 SoCs
- interrupts: IRQ for pwrap in SOC
- reg-names: Must include the following entries:
"pwrap": Main registers base
diff --git a/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt b/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
index d6fe16f094af..876693a7ada5 100644
--- a/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
+++ b/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
@@ -23,6 +23,7 @@ Required properties:
- "mediatek,mt7622-scpsys"
- "mediatek,mt7623-scpsys", "mediatek,mt2701-scpsys": For MT7623 SoC
- "mediatek,mt7623a-scpsys": For MT7623A SoC
+ - "mediatek,mt7629-scpsys", "mediatek,mt7622-scpsys": For MT7629 SoC
- "mediatek,mt8173-scpsys"
- #power-domain-cells: Must be 1
- reg: Address range of the SCPSYS unit
@@ -33,8 +34,8 @@ Required properties:
Required clocks for MT2701 or MT7623: "mm", "mfg", "ethif"
Required clocks for MT2712: "mm", "mfg", "venc", "jpgdec", "audio", "vdec"
Required clocks for MT6797: "mm", "mfg", "vdec"
- Required clocks for MT7622: "hif_sel"
- Required clocks for MT7622A: "ethif"
+ Required clocks for MT7622 or MT7629: "hif_sel"
+ Required clocks for MT7623A: "ethif"
Required clocks for MT8173: "mm", "mfg", "venc", "venc_lt"
Optional properties:
diff --git a/Documentation/devicetree/bindings/sound/adi,axi-i2s.txt b/Documentation/devicetree/bindings/sound/adi,axi-i2s.txt
index 4248b662deff..229ad1392cdc 100644
--- a/Documentation/devicetree/bindings/sound/adi,axi-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/adi,axi-i2s.txt
@@ -1,5 +1,8 @@
ADI AXI-I2S controller
+The core can be generated with transmit (playback), only receive
+(capture) or both directions enabled.
+
Required properties:
- compatible : Must be "adi,axi-i2s-1.00.a"
- reg : Must contain I2S core's registers location and length
@@ -9,8 +12,8 @@ Required properties:
- clock-names : "axi" for the clock to the AXI interface, "ref" for the sample
rate reference clock.
- dmas: Pairs of phandle and specifier for the DMA channels that are used by
- the core. The core expects two dma channels, one for transmit and one for
- receive.
+ the core. The core expects two dma channels if both transmit and receive are
+ enabled, one channel otherwise.
- dma-names : "tx" for the transmit channel, "rx" for the receive channel.
For more details on the 'dma', 'dma-names', 'clock' and 'clock-names' properties
diff --git a/Documentation/devicetree/bindings/sound/amlogic,axg-fifo.txt b/Documentation/devicetree/bindings/sound/amlogic,axg-fifo.txt
index 3dfc2515e5c6..4330fc9dca6d 100644
--- a/Documentation/devicetree/bindings/sound/amlogic,axg-fifo.txt
+++ b/Documentation/devicetree/bindings/sound/amlogic,axg-fifo.txt
@@ -2,7 +2,9 @@
Required properties:
- compatible: 'amlogic,axg-toddr' or
- 'amlogic,axg-frddr'
+ 'amlogic,axg-toddr' or
+ 'amlogic,g12a-frddr' or
+ 'amlogic,g12a-toddr'
- reg: physical base address of the controller and length of memory
mapped region.
- interrupts: interrupt specifier for the fifo.
diff --git a/Documentation/devicetree/bindings/sound/amlogic,axg-pdm.txt b/Documentation/devicetree/bindings/sound/amlogic,axg-pdm.txt
index 5672d0bc5b16..73f473a9365f 100644
--- a/Documentation/devicetree/bindings/sound/amlogic,axg-pdm.txt
+++ b/Documentation/devicetree/bindings/sound/amlogic,axg-pdm.txt
@@ -1,7 +1,8 @@
* Amlogic Audio PDM input
Required properties:
-- compatible: 'amlogic,axg-pdm'
+- compatible: 'amlogic,axg-pdm' or
+ 'amlogic,g12a-pdm'
- reg: physical base address of the controller and length of memory
mapped region.
- clocks: list of clock phandle, one for each entry clock-names.
diff --git a/Documentation/devicetree/bindings/sound/amlogic,axg-spdifin.txt b/Documentation/devicetree/bindings/sound/amlogic,axg-spdifin.txt
index 2e6cb7d9b202..0b82504fa419 100644
--- a/Documentation/devicetree/bindings/sound/amlogic,axg-spdifin.txt
+++ b/Documentation/devicetree/bindings/sound/amlogic,axg-spdifin.txt
@@ -1,7 +1,8 @@
* Amlogic Audio SPDIF Input
Required properties:
-- compatible: 'amlogic,axg-spdifin'
+- compatible: 'amlogic,axg-spdifin' or
+ 'amlogic,g12a-spdifin'
- interrupts: interrupt specifier for the spdif input.
- clocks: list of clock phandle, one for each entry clock-names.
- clock-names: should contain the following:
diff --git a/Documentation/devicetree/bindings/sound/amlogic,axg-spdifout.txt b/Documentation/devicetree/bindings/sound/amlogic,axg-spdifout.txt
index 521c38ad89e7..826152730508 100644
--- a/Documentation/devicetree/bindings/sound/amlogic,axg-spdifout.txt
+++ b/Documentation/devicetree/bindings/sound/amlogic,axg-spdifout.txt
@@ -1,7 +1,8 @@
* Amlogic Audio SPDIF Output
Required properties:
-- compatible: 'amlogic,axg-spdifout'
+- compatible: 'amlogic,axg-spdifout' or
+ 'amlogic,g12a-spdifout'
- clocks: list of clock phandle, one for each entry clock-names.
- clock-names: should contain the following:
* "pclk" : peripheral clock.
diff --git a/Documentation/devicetree/bindings/sound/amlogic,axg-tdm-formatters.txt b/Documentation/devicetree/bindings/sound/amlogic,axg-tdm-formatters.txt
index 1c1b7490554e..3b94a715a0b9 100644
--- a/Documentation/devicetree/bindings/sound/amlogic,axg-tdm-formatters.txt
+++ b/Documentation/devicetree/bindings/sound/amlogic,axg-tdm-formatters.txt
@@ -2,7 +2,9 @@
Required properties:
- compatible: 'amlogic,axg-tdmin' or
- 'amlogic,axg-tdmout'
+ 'amlogic,axg-tdmout' or
+ 'amlogic,g12a-tdmin' or
+ 'amlogic,g12a-tdmout'
- reg: physical base address of the controller and length of memory
mapped region.
- clocks: list of clock phandle, one for each entry clock-names.
diff --git a/Documentation/devicetree/bindings/sound/cirrus,lochnagar.txt b/Documentation/devicetree/bindings/sound/cirrus,lochnagar.txt
new file mode 100644
index 000000000000..41ae2699f07a
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cirrus,lochnagar.txt
@@ -0,0 +1,39 @@
+Cirrus Logic Lochnagar Audio Development Board
+
+Lochnagar is an evaluation and development board for Cirrus Logic
+Smart CODEC and Amp devices. It allows the connection of most Cirrus
+Logic devices on mini-cards, as well as allowing connection of
+various application processor systems to provide a full evaluation
+platform. Audio system topology, clocking and power can all be
+controlled through the Lochnagar, allowing the device under test
+to be used in a variety of possible use cases.
+
+This binding document describes the binding for the audio portion
+of the driver.
+
+This binding must be part of the Lochnagar MFD binding:
+ [4] ../mfd/cirrus,lochnagar.txt
+
+Required properties:
+
+ - compatible : One of the following strings:
+ "cirrus,lochnagar2-soundcard"
+
+ - #sound-dai-cells : Must be set to 1.
+
+ - clocks : Contains an entry for each entry in clock-names.
+ - clock-names : Must include the following clocks:
+ "mclk" Master clock source for the sound card, should normally
+ be set to LOCHNAGAR_SOUNDCARD_MCLK provided by the Lochnagar
+ clock driver.
+
+Example:
+
+lochnagar-sc {
+ compatible = "cirrus,lochnagar2-soundcard";
+
+ #sound-dai-cells = <1>;
+
+ clocks = <&lochnagar_clk LOCHNAGAR_SOUNDCARD_MCLK>;
+ clock-names = "mclk";
+};
diff --git a/Documentation/devicetree/bindings/sound/cs42l51.txt b/Documentation/devicetree/bindings/sound/cs42l51.txt
index 4b5de33ce377..acbd68ddd2cb 100644
--- a/Documentation/devicetree/bindings/sound/cs42l51.txt
+++ b/Documentation/devicetree/bindings/sound/cs42l51.txt
@@ -1,6 +1,17 @@
CS42L51 audio CODEC
+Required properties:
+
+ - compatible : "cirrus,cs42l51"
+
+ - reg : the I2C address of the device for I2C.
+
Optional properties:
+ - VL-supply, VD-supply, VA-supply, VAHP-supply: power supplies for the device,
+ as covered in Documentation/devicetree/bindings/regulator/regulator.txt.
+
+ - reset-gpios : GPIO specification for the reset pin. If specified, it will be
+ deasserted before starting the communication with the codec.
- clocks : a list of phandles + clock-specifiers, one for each entry in
clock-names
@@ -14,4 +25,9 @@ cs42l51: cs42l51@4a {
reg = <0x4a>;
clocks = <&mclk_prov>;
clock-names = "MCLK";
+ VL-supply = <&reg_audio>;
+ VD-supply = <&reg_audio>;
+ VA-supply = <&reg_audio>;
+ VAHP-supply = <&reg_audio>;
+ reset-gpios = <&gpiog 9 GPIO_ACTIVE_LOW>;
};
diff --git a/Documentation/devicetree/bindings/sound/da7219.txt b/Documentation/devicetree/bindings/sound/da7219.txt
index e9d0baeb94e2..add1caf26ac2 100644
--- a/Documentation/devicetree/bindings/sound/da7219.txt
+++ b/Documentation/devicetree/bindings/sound/da7219.txt
@@ -23,8 +23,8 @@ Optional properties:
interrupt is to be used to wake system, otherwise "irq" should be used.
- wakeup-source: Flag to indicate this device can wake system (suspend/resume).
-- #clock-cells : Should be set to '<0>', only one clock source provided;
-- clock-output-names : Name given for DAI clocks output;
+- #clock-cells : Should be set to '<1>', two clock sources provided;
+- clock-output-names : Names given for DAI clock outputs (WCLK & BCLK);
- clocks : phandle and clock specifier for codec MCLK.
- clock-names : Clock name string for 'clocks' attribute, should be "mclk".
@@ -84,8 +84,8 @@ Example:
VDDMIC-supply = <&reg_audio>;
VDDIO-supply = <&reg_audio>;
- #clock-cells = <0>;
- clock-output-names = "dai-clks";
+ #clock-cells = <1>;
+ clock-output-names = "dai-wclk", "dai-bclk";
clocks = <&clks 201>;
clock-names = "mclk";
diff --git a/Documentation/devicetree/bindings/sound/fsl,audmix.txt b/Documentation/devicetree/bindings/sound/fsl,audmix.txt
new file mode 100644
index 000000000000..840b7e0d6a63
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/fsl,audmix.txt
@@ -0,0 +1,50 @@
+NXP Audio Mixer (AUDMIX).
+
+The Audio Mixer is a on-chip functional module that allows mixing of two
+audio streams into a single audio stream. Audio Mixer has two input serial
+audio interfaces. These are driven by two Synchronous Audio interface
+modules (SAI). Each input serial interface carries 8 audio channels in its
+frame in TDM manner. Mixer mixes audio samples of corresponding channels
+from two interfaces into a single sample. Before mixing, audio samples of
+two inputs can be attenuated based on configuration. The output of the
+Audio Mixer is also a serial audio interface. Like input interfaces it has
+the same TDM frame format. This output is used to drive the serial DAC TDM
+interface of audio codec and also sent to the external pins along with the
+receive path of normal audio SAI module for readback by the CPU.
+
+The output of Audio Mixer can be selected from any of the three streams
+ - serial audio input 1
+ - serial audio input 2
+ - mixed audio
+
+Mixing operation is independent of audio sample rate but the two audio
+input streams must have same audio sample rate with same number of channels
+in TDM frame to be eligible for mixing.
+
+Device driver required properties:
+=================================
+ - compatible : Compatible list, contains "fsl,imx8qm-audmix"
+
+ - reg : Offset and length of the register set for the device.
+
+ - clocks : Must contain an entry for each entry in clock-names.
+
+ - clock-names : Must include the "ipg" for register access.
+
+ - power-domains : Must contain the phandle to AUDMIX power domain node
+
+ - dais : Must contain a list of phandles to AUDMIX connected
+ DAIs. The current implementation requires two phandles
+ to SAI interfaces to be provided, the first SAI in the
+ list being used to route the AUDMIX output.
+
+Device driver configuration example:
+======================================
+ audmix: audmix@59840000 {
+ compatible = "fsl,imx8qm-audmix";
+ reg = <0x0 0x59840000 0x0 0x10000>;
+ clocks = <&clk IMX8QXP_AUD_AUDMIX_IPG>;
+ clock-names = "ipg";
+ power-domains = <&pd_audmix>;
+ dais = <&sai4>, <&sai5>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/mchp-i2s-mcc.txt b/Documentation/devicetree/bindings/sound/mchp-i2s-mcc.txt
new file mode 100644
index 000000000000..91ec83a6faed
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/mchp-i2s-mcc.txt
@@ -0,0 +1,43 @@
+* Microchip I2S Multi-Channel Controller
+
+Required properties:
+- compatible: Should be "microchip,sam9x60-i2smcc".
+- reg: Should be the physical base address of the controller and the
+ length of memory mapped region.
+- interrupts: Should contain the interrupt for the controller.
+- dmas: Should be one per channel name listed in the dma-names property,
+ as described in atmel-dma.txt and dma.txt files.
+- dma-names: Identifier string for each DMA request line in the dmas property.
+ Two dmas have to be defined, "tx" and "rx".
+- clocks: Must contain an entry for each entry in clock-names.
+ Please refer to clock-bindings.txt.
+- clock-names: Should be one of each entry matching the clocks phandles list:
+ - "pclk" (peripheral clock) Required.
+ - "gclk" (generated clock) Optional (1).
+
+Optional properties:
+- pinctrl-0: Should specify pin control groups used for this controller.
+- princtrl-names: Should contain only one value - "default".
+
+
+(1) : Only the peripheral clock is required. The generated clock is optional
+ and should be set mostly when Master Mode is required.
+
+Example:
+
+ i2s@f001c000 {
+ compatible = "microchip,sam9x60-i2smcc";
+ reg = <0xf001c000 0x100>;
+ interrupts = <34 IRQ_TYPE_LEVEL_HIGH 7>;
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(36))>,
+ <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(37))>;
+ dma-names = "tx", "rx";
+ clocks = <&i2s_clk>, <&i2s_gclk>;
+ clock-names = "pclk", "gclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2s_default>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/mt8183-da7219-max98357.txt b/Documentation/devicetree/bindings/sound/mt8183-da7219-max98357.txt
new file mode 100644
index 000000000000..92ac86f83822
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/mt8183-da7219-max98357.txt
@@ -0,0 +1,15 @@
+MT8183 with MT6358, DA7219 and MAX98357 CODECS
+
+Required properties:
+- compatible : "mediatek,mt8183_da7219_max98357"
+- mediatek,headset-codec: the phandles of da7219 codecs
+- mediatek,platform: the phandle of MT8183 ASoC platform
+
+Example:
+
+ sound {
+ compatible = "mediatek,mt8183_da7219_max98357";
+ mediatek,headset-codec = <&da7219>;
+ mediatek,platform = <&afe>;
+ };
+
diff --git a/Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt b/Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt
new file mode 100644
index 000000000000..d6d5207fa996
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt
@@ -0,0 +1,15 @@
+MT8183 with MT6358, TS3A227 and MAX98357 CODECS
+
+Required properties:
+- compatible : "mediatek,mt8183_mt6358_ts3a227_max98357"
+- mediatek,headset-codec: the phandles of ts3a227 codecs
+- mediatek,platform: the phandle of MT8183 ASoC platform
+
+Example:
+
+ sound {
+ compatible = "mediatek,mt8183_mt6358_ts3a227_max98357";
+ mediatek,headset-codec = <&ts3a227>;
+ mediatek,platform = <&afe>;
+ };
+
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
index 648d43e1b1e9..5c52182f7dcf 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
@@ -266,6 +266,7 @@ Required properties:
- "renesas,rcar_sound-r8a7743" (RZ/G1M)
- "renesas,rcar_sound-r8a7744" (RZ/G1N)
- "renesas,rcar_sound-r8a7745" (RZ/G1E)
+ - "renesas,rcar_sound-r8a77470" (RZ/G1C)
- "renesas,rcar_sound-r8a774a1" (RZ/G2M)
- "renesas,rcar_sound-r8a774c0" (RZ/G2E)
- "renesas,rcar_sound-r8a7778" (R-Car M1A)
@@ -282,7 +283,12 @@ Required properties:
- reg : Should contain the register physical address.
required register is
SRU/ADG/SSI if generation1
- SRU/ADG/SSIU/SSI if generation2
+ SRU/ADG/SSIU/SSI/AUDIO-DMAC-periperi if generation2/generation3
+ Select extended AUDIO-DMAC-periperi address if SoC has it,
+ otherwise select normal AUDIO-DMAC-periperi address.
+- reg-names : Should contain the register names.
+ scu/adg/ssi if generation1
+ scu/adg/ssiu/ssi/audmapp if generation2/generation3
- rcar_sound,ssi : Should contain SSI feature.
The number of SSI subnode should be same as HW.
see below for detail.
diff --git a/Documentation/devicetree/bindings/sound/rockchip,pdm.txt b/Documentation/devicetree/bindings/sound/rockchip,pdm.txt
index 47f164fbd1d7..98572a25122f 100644
--- a/Documentation/devicetree/bindings/sound/rockchip,pdm.txt
+++ b/Documentation/devicetree/bindings/sound/rockchip,pdm.txt
@@ -3,6 +3,9 @@
Required properties:
- compatible: "rockchip,pdm"
+ - "rockchip,px30-pdm"
+ - "rockchip,rk1808-pdm"
+ - "rockchip,rk3308-pdm"
- reg: physical base address of the controller and length of memory mapped
region.
- dmas: DMA specifiers for rx dma. See the DMA client binding,
@@ -12,6 +15,8 @@ Required properties:
- clock-names: should contain following:
- "pdm_hclk": clock for PDM BUS
- "pdm_clk" : clock for PDM controller
+- resets: a list of phandle + reset-specifer paris, one for each entry in reset-names.
+- reset-names: reset names, should include "pdm-m".
- pinctrl-names: Must contain a "default" entry.
- pinctrl-N: One property must exist for each entry in
pinctrl-names. See ../pinctrl/pinctrl-bindings.txt
diff --git a/Documentation/devicetree/bindings/sound/rt5651.txt b/Documentation/devicetree/bindings/sound/rt5651.txt
index a41199a5cd79..56e736a1cba9 100644
--- a/Documentation/devicetree/bindings/sound/rt5651.txt
+++ b/Documentation/devicetree/bindings/sound/rt5651.txt
@@ -22,6 +22,11 @@ Optional properties:
2: Use JD1_2 pin for jack-detect
3: Use JD2 pin for jack-detect
+- realtek,jack-detect-not-inverted
+ bool. Normal jack-detect switches give an inverted (active-low) signal,
+ set this bool in the rare case you've a jack-detect switch which is not
+ inverted.
+
- realtek,over-current-threshold-microamp
u32, micbias over-current detection threshold in µA, valid values are
600, 1500 and 2000µA.
diff --git a/Documentation/devicetree/bindings/sound/simple-amplifier.txt b/Documentation/devicetree/bindings/sound/simple-amplifier.txt
index 7182ac4f1e65..b1b097cc9b68 100644
--- a/Documentation/devicetree/bindings/sound/simple-amplifier.txt
+++ b/Documentation/devicetree/bindings/sound/simple-amplifier.txt
@@ -2,9 +2,9 @@ Simple Amplifier Audio Driver
Required properties:
- compatible : "dioo,dio2125" or "simple-audio-amplifier"
-- enable-gpios : the gpio connected to the enable pin of the simple amplifier
Optional properties:
+- enable-gpios : the gpio connected to the enable pin of the simple amplifier
- VCC-supply : power supply for the device, as covered
in Documentation/devicetree/bindings/regulator/regulator.txt
diff --git a/Documentation/devicetree/bindings/sound/simple-card.txt b/Documentation/devicetree/bindings/sound/simple-card.txt
index 4629c8f8a6b6..79954cd6e37b 100644
--- a/Documentation/devicetree/bindings/sound/simple-card.txt
+++ b/Documentation/devicetree/bindings/sound/simple-card.txt
@@ -24,6 +24,8 @@ Optional properties:
a microphone is attached.
- simple-audio-card,aux-devs : List of phandles pointing to auxiliary devices, such
as amplifiers, to be added to the sound card.
+- simple-audio-card,pin-switches : List of strings containing the widget names for
+ which pin switches must be created.
Optional subnodes:
diff --git a/Documentation/devicetree/bindings/sound/sprd-mcdt.txt b/Documentation/devicetree/bindings/sound/sprd-mcdt.txt
new file mode 100644
index 000000000000..274ba0acbfd6
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/sprd-mcdt.txt
@@ -0,0 +1,19 @@
+Spreadtrum Multi-Channel Data Transfer Binding
+
+The Multi-channel data transfer controller is used for sound stream
+transmission between audio subsystem and other AP/CP subsystem. It
+supports 10 DAC channel and 10 ADC channel, and each channel can be
+configured with DMA mode or interrupt mode.
+
+Required properties:
+- compatible: Should be "sprd,sc9860-mcdt".
+- reg: Should contain registers address and length.
+- interrupts: Should contain one interrupt shared by all channel.
+
+Example:
+
+mcdt@41490000 {
+ compatible = "sprd,sc9860-mcdt";
+ reg = <0 0x41490000 0 0x170>;
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+};
diff --git a/Documentation/devicetree/bindings/thermal/amazon,al-thermal.txt b/Documentation/devicetree/bindings/thermal/amazon,al-thermal.txt
new file mode 100644
index 000000000000..703979dbd577
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/amazon,al-thermal.txt
@@ -0,0 +1,33 @@
+Amazon's Annapurna Labs Thermal Sensor
+
+Simple thermal device that allows temperature reading by a single MMIO
+transaction.
+
+Required properties:
+- compatible: "amazon,al-thermal".
+- reg: The physical base address and length of the sensor's registers.
+- #thermal-sensor-cells: Must be 1. See ./thermal.txt for a description.
+
+Example:
+ thermal: thermal {
+ compatible = "amazon,al-thermal";
+ reg = <0x0 0x05002860 0x0 0x1>;
+ #thermal-sensor-cells = <0x1>;
+ };
+
+ thermal-zones {
+ thermal-z0 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+ thermal-sensors = <&thermal 0>;
+ trips {
+ critical {
+ temperature = <105000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.txt b/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.txt
index b6c0ae53d4dc..f02f38527a6b 100644
--- a/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.txt
+++ b/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.txt
@@ -52,13 +52,47 @@ Required properties :
Must set as following values:
TEGRA_SOCTHERM_THROT_LEVEL_LOW, TEGRA_SOCTHERM_THROT_LEVEL_MED
TEGRA_SOCTHERM_THROT_LEVEL_HIGH, TEGRA_SOCTHERM_THROT_LEVEL_NONE
+ - nvidia,gpu-throt-level: This property is for Tegra124 and Tegra210.
+ It is the level of pulse skippers, which used to throttle clock
+ frequencies. It indicates gpu clock throttling depth and can be
+ programmed to any of the following values which represent a throttling
+ percentage:
+ TEGRA_SOCTHERM_THROT_LEVEL_NONE (0%)
+ TEGRA_SOCTHERM_THROT_LEVEL_LOW (50%),
+ TEGRA_SOCTHERM_THROT_LEVEL_MED (75%),
+ TEGRA_SOCTHERM_THROT_LEVEL_HIGH (85%).
- #cooling-cells: Should be 1. This cooling device only support on/off state.
See ./thermal.txt for a description of this property.
+ Optional properties: The following properties are T210 specific and
+ valid only for OCx throttle events.
+ - nvidia,count-threshold: Specifies the number of OC events that are
+ required for triggering an interrupt. Interrupts are not triggered if
+ the property is missing. A value of 0 will interrupt on every OC alarm.
+ - nvidia,polarity-active-low: Configures the polarity of the OC alaram
+ signal. If present, this means assert low, otherwise assert high.
+ - nvidia,alarm-filter: Number of clocks to filter event. When the filter
+ expires (which means the OC event has not occurred for a long time),
+ the counter is cleared and filter is rearmed. Default value is 0.
+ - nvidia,throttle-period-us: Specifies the number of uSec for which
+ throttling is engaged after the OC event is deasserted. Default value
+ is 0.
+
+Optional properties:
+- nvidia,thermtrips : When present, this property specifies the temperature at
+ which the soctherm hardware will assert the thermal trigger signal to the
+ Power Management IC, which can be configured to reset or shutdown the device.
+ It is an array of pairs where each pair represents a tsensor id followed by a
+ temperature in milli Celcius. In the absence of this property the critical
+ trip point will be used for thermtrip temperature.
+
Note:
-- the "critical" type trip points will be set to SOC_THERM hardware as the
-shut down temperature. Once the temperature of this thermal zone is higher
-than it, the system will be shutdown or reset by hardware.
+- the "critical" type trip points will be used to set the temperature at which
+the SOC_THERM hardware will assert a thermal trigger if the "nvidia,thermtrips"
+property is missing. When the thermtrips property is present, the breach of a
+critical trip point is reported back to the thermal framework to implement
+software shutdown.
+
- the "hot" type trip points will be set to SOC_THERM hardware as the throttle
temperature. Once the the temperature of this thermal zone is higher
than it, it will trigger the HW throttle event.
@@ -79,25 +113,32 @@ Example :
#thermal-sensor-cells = <1>;
+ nvidia,thermtrips = <TEGRA124_SOCTHERM_SENSOR_CPU 102500
+ TEGRA124_SOCTHERM_SENSOR_GPU 103000>;
+
throttle-cfgs {
/*
* When the "heavy" cooling device triggered,
- * the HW will skip cpu clock's pulse in 85% depth
+ * the HW will skip cpu clock's pulse in 85% depth,
+ * skip gpu clock's pulse in 85% level
*/
throttle_heavy: heavy {
nvidia,priority = <100>;
nvidia,cpu-throt-percent = <85>;
+ nvidia,gpu-throt-level = <TEGRA_SOCTHERM_THROT_LEVEL_HIGH>;
#cooling-cells = <1>;
};
/*
* When the "light" cooling device triggered,
- * the HW will skip cpu clock's pulse in 50% depth
+ * the HW will skip cpu clock's pulse in 50% depth,
+ * skip gpu clock's pulse in 50% level
*/
throttle_light: light {
nvidia,priority = <80>;
nvidia,cpu-throt-percent = <50>;
+ nvidia,gpu-throt-level = <TEGRA_SOCTHERM_THROT_LEVEL_LOW>;
#cooling-cells = <1>;
};
@@ -107,6 +148,17 @@ Example :
* arbiter will select the highest priority as the final throttle
* settings to skip cpu pulse.
*/
+
+ throttle_oc1: oc1 {
+ nvidia,priority = <50>;
+ nvidia,polarity-active-low;
+ nvidia,count-threshold = <100>;
+ nvidia,alarm-filter = <5100000>;
+ nvidia,throttle-period-us = <0>;
+ nvidia,cpu-throt-percent = <75>;
+ nvidia,gpu-throt-level =
+ <TEGRA_SOCTHERM_THROT_LEVEL_MED>;
+ };
};
};
diff --git a/Documentation/devicetree/bindings/thermal/qcom-tsens.txt b/Documentation/devicetree/bindings/thermal/qcom-tsens.txt
index 1d9e8cf61018..673cc1831ee9 100644
--- a/Documentation/devicetree/bindings/thermal/qcom-tsens.txt
+++ b/Documentation/devicetree/bindings/thermal/qcom-tsens.txt
@@ -6,11 +6,14 @@ Required properties:
- "qcom,msm8916-tsens" (MSM8916)
- "qcom,msm8974-tsens" (MSM8974)
- "qcom,msm8996-tsens" (MSM8996)
+ - "qcom,qcs404-tsens", "qcom,tsens-v1" (QCS404)
- "qcom,msm8998-tsens", "qcom,tsens-v2" (MSM8998)
- "qcom,sdm845-tsens", "qcom,tsens-v2" (SDM845)
The generic "qcom,tsens-v2" property must be used as a fallback for any SoC
with version 2 of the TSENS IP. MSM8996 is the only exception because the
generic property did not exist when support was added.
+ Similarly, the generic "qcom,tsens-v1" property must be used as a fallback for
+ any SoC with version 1 of the TSENS IP.
- reg: Address range of the thermal registers.
New platforms containing v2.x.y of the TSENS IP must specify the SROT and TM
@@ -39,3 +42,14 @@ tsens0: thermal-sensor@c263000 {
#qcom,sensors = <13>;
#thermal-sensor-cells = <1>;
};
+
+Example 3 (for any platform containing v1 of the TSENS IP):
+tsens: thermal-sensor@4a9000 {
+ compatible = "qcom,qcs404-tsens", "qcom,tsens-v1";
+ reg = <0x004a9000 0x1000>, /* TM */
+ <0x004a8000 0x1000>; /* SROT */
+ nvmem-cells = <&tsens_caldata>;
+ nvmem-cell-names = "calib";
+ #qcom,sensors = <10>;
+ #thermal-sensor-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
index 43d744e5305e..c6aac9bcacf1 100644
--- a/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
@@ -2,6 +2,7 @@
Required properties:
- compatible : should be "rockchip,<name>-tsadc"
+ "rockchip,px30-tsadc": found on PX30 SoCs
"rockchip,rv1108-tsadc": found on RV1108 SoCs
"rockchip,rk3228-tsadc": found on RK3228 SoCs
"rockchip,rk3288-tsadc": found on RK3288 SoCs
diff --git a/Documentation/devicetree/bindings/thermal/thermal-generic-adc.txt b/Documentation/devicetree/bindings/thermal/thermal-generic-adc.txt
index d72355502b78..691a09db2fef 100644
--- a/Documentation/devicetree/bindings/thermal/thermal-generic-adc.txt
+++ b/Documentation/devicetree/bindings/thermal/thermal-generic-adc.txt
@@ -8,16 +8,22 @@ temperature using voltage-temperature lookup table.
Required properties:
===================
- compatible: Must be "generic-adc-thermal".
+- #thermal-sensor-cells: Should be 1. See ./thermal.txt for a description
+ of this property.
+Optional properties:
+===================
- temperature-lookup-table: Two dimensional array of Integer; lookup table
to map the relation between ADC value and
temperature. When ADC is read, the value is
looked up on the table to get the equivalent
temperature.
+
The first value of the each row of array is the
temperature in milliCelsius and second value of
the each row of array is the ADC read value.
-- #thermal-sensor-cells: Should be 1. See ./thermal.txt for a description
- of this property.
+
+ If not specified, driver assumes the ADC channel
+ gives milliCelsius directly.
Example :
#include <dt-bindings/thermal/thermal.h>
diff --git a/Documentation/devicetree/bindings/timer/allwinner,sun4i-timer.txt b/Documentation/devicetree/bindings/timer/allwinner,sun4i-timer.txt
index 5c2e23574ca0..3da9d515c03a 100644
--- a/Documentation/devicetree/bindings/timer/allwinner,sun4i-timer.txt
+++ b/Documentation/devicetree/bindings/timer/allwinner,sun4i-timer.txt
@@ -2,7 +2,9 @@ Allwinner A1X SoCs Timer Controller
Required properties:
-- compatible : should be "allwinner,sun4i-a10-timer"
+- compatible : should be one of the following:
+ "allwinner,sun4i-a10-timer"
+ "allwinner,suniv-f1c100s-timer"
- reg : Specifies base physical address and size of the registers.
- interrupts : The interrupt of the first timer
- clocks: phandle to the source clock (usually a 24 MHz fixed clock)
diff --git a/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml b/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml
index c4ab59550fc2..b3f0fe96ff0d 100644
--- a/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml
+++ b/Documentation/devicetree/bindings/timer/arm,arch_timer_mmio.yaml
@@ -59,6 +59,7 @@ properties:
patternProperties:
'^frame@[0-9a-z]*$':
+ type: object
description: A timer node has up to 8 frame sub-nodes, each with the following properties.
properties:
frame-number:
diff --git a/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml b/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
new file mode 100644
index 000000000000..a36a0746c056
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2018 Linaro Ltd.
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/timer/intel-ixp4xx-timer.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Intel IXP4xx XScale Networking Processors Timers
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description: This timer is found in the Intel IXP4xx processors.
+
+properties:
+ compatible:
+ items:
+ - const: intel,ixp4xx-timer
+
+ reg:
+ description: Should contain registers location and length
+
+ interrupts:
+ minItems: 1
+ maxItems: 2
+ items:
+ - description: Timer 1 interrupt
+ - description: Timer 2 interrupt
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ timer@c8005000 {
+ compatible = "intel,ixp4xx-timer";
+ reg = <0xc8005000 0x100>;
+ interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt b/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
index ff7c567a7972..74c3eadad844 100644
--- a/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
+++ b/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
@@ -17,6 +17,7 @@ Required properties:
* "mediatek,mt8127-timer" for MT8127 compatible timers (GPT)
* "mediatek,mt8135-timer" for MT8135 compatible timers (GPT)
* "mediatek,mt8173-timer" for MT8173 compatible timers (GPT)
+ * "mediatek,mt8516-timer" for MT8516 compatible timers (GPT)
* "mediatek,mt6577-timer" for MT6577 and all above compatible timers (GPT)
For those SoCs that use SYST
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml
index d79fb22bde39..747fd3f689dc 100644
--- a/Documentation/devicetree/bindings/trivial-devices.yaml
+++ b/Documentation/devicetree/bindings/trivial-devices.yaml
@@ -92,6 +92,8 @@ properties:
- fsl,sgtl5000
# G751: Digital Temperature Sensor and Thermal Watchdog with Two-Wire Interface
- gmt,g751
+ # Infineon IR38064 Voltage Regulator
+ - infineon,ir38064
# Infineon SLB9635 (Soft-) I2C TPM (old protocol, max 100khz)
- infineon,slb9635tt
# Infineon SLB9645 I2C TPM (new protocol, max 400khz)
@@ -102,6 +104,8 @@ properties:
- isil,isl29028
# Intersil ISL29030 Ambient Light and Proximity Sensor
- isil,isl29030
+ # Intersil ISL68137 Digital Output Configurable PWM Controller
+ - isil,isl68137
# 5 Bit Programmable, Pulse-Width Modulator
- maxim,ds1050
# Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs
diff --git a/Documentation/devicetree/bindings/ufs/cdns,ufshc.txt b/Documentation/devicetree/bindings/ufs/cdns,ufshc.txt
index a04a4989ec7f..02347b017abd 100644
--- a/Documentation/devicetree/bindings/ufs/cdns,ufshc.txt
+++ b/Documentation/devicetree/bindings/ufs/cdns,ufshc.txt
@@ -5,8 +5,9 @@ Each UFS controller instance should have its own node.
Please see the ufshcd-pltfrm.txt for a list of all available properties.
Required properties:
-- compatible : Compatible list, contains the following controller:
- "cdns,ufshc"
+- compatible : Compatible list, contains one of the following controllers:
+ "cdns,ufshc" - Generic CDNS HCI,
+ "cdns,ufshc-m31-16nm" - CDNS UFS HC + M31 16nm PHY
complemented with the JEDEC version:
"jedec,ufs-2.0"
diff --git a/Documentation/devicetree/bindings/ufs/ufs-mediatek.txt b/Documentation/devicetree/bindings/ufs/ufs-mediatek.txt
new file mode 100644
index 000000000000..72aab8547308
--- /dev/null
+++ b/Documentation/devicetree/bindings/ufs/ufs-mediatek.txt
@@ -0,0 +1,43 @@
+* Mediatek Universal Flash Storage (UFS) Host Controller
+
+UFS nodes are defined to describe on-chip UFS hardware macro.
+Each UFS Host Controller should have its own node.
+
+To bind UFS PHY with UFS host controller, the controller node should
+contain a phandle reference to UFS M-PHY node.
+
+Required properties for UFS nodes:
+- compatible : Compatible list, contains the following controller:
+ "mediatek,mt8183-ufshci" for MediaTek UFS host controller
+ present on MT81xx chipsets.
+- reg : Address and length of the UFS register set.
+- phys : phandle to m-phy.
+- clocks : List of phandle and clock specifier pairs.
+- clock-names : List of clock input name strings sorted in the same
+ order as the clocks property. "ufs" is mandatory.
+ "ufs": ufshci core control clock.
+- freq-table-hz : Array of <min max> operating frequencies stored in the same
+ order as the clocks property. If this property is not
+ defined or a value in the array is "0" then it is assumed
+ that the frequency is set by the parent clock or a
+ fixed rate clock source.
+- vcc-supply : phandle to VCC supply regulator node.
+
+Example:
+
+ ufsphy: phy@11fa0000 {
+ ...
+ };
+
+ ufshci@11270000 {
+ compatible = "mediatek,mt8183-ufshci";
+ reg = <0 0x11270000 0 0x2300>;
+ interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>;
+ phys = <&ufsphy>;
+
+ clocks = <&infracfg_ao INFRACFG_AO_UFS_CG>;
+ clock-names = "ufs";
+ freq-table-hz = <0 0>;
+
+ vcc-supply = <&mt_pmic_vemc_ldo_reg>;
+ };
diff --git a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
index 21d9a93db2e9..fd59f93e9556 100644
--- a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
+++ b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
@@ -29,6 +29,7 @@ Optional properties:
- vdda-pll-max-microamp : specifies max. load that can be drawn from pll supply
- vddp-ref-clk-supply : phandle to UFS device ref_clk pad power supply
- vddp-ref-clk-max-microamp : specifies max. load that can be drawn from this supply
+- resets : specifies the PHY reset in the UFS controller
Example:
@@ -51,9 +52,11 @@ Example:
<&clock_gcc clk_ufs_phy_ldo>,
<&clock_gcc clk_gcc_ufs_tx_cfg_clk>,
<&clock_gcc clk_gcc_ufs_rx_cfg_clk>;
+ resets = <&ufshc 0>;
};
- ufshc@fc598000 {
+ ufshc: ufshc@fc598000 {
+ #reset-cells = <1>;
...
phys = <&ufsphy1>;
phy-names = "ufsphy";
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index 5111e9130bc3..a74720486ee2 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -11,6 +11,7 @@ Required properties:
the appropriate jedec string:
"qcom,msm8994-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
"qcom,msm8996-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
+ "qcom,msm8998-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
"qcom,sdm845-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
- interrupts : <interrupt mapping for UFS host controller IRQ>
- reg : <registers mapping>
@@ -31,7 +32,6 @@ Optional properties:
- vcc-max-microamp : specifies max. load that can be drawn from vcc supply
- vccq-max-microamp : specifies max. load that can be drawn from vccq supply
- vccq2-max-microamp : specifies max. load that can be drawn from vccq2 supply
-- <name>-fixed-regulator : boolean property specifying that <name>-supply is a fixed regulator
- clocks : List of phandle and clock specifier pairs
- clock-names : List of clock input name strings sorted in the same
@@ -50,6 +50,8 @@ Optional properties:
-lanes-per-direction : number of lanes available per direction - either 1 or 2.
Note that it is assume same number of lanes is used both
directions at once. If not specified, default is 2 lanes per direction.
+- #reset-cells : Must be <1> for Qualcomm UFS controllers that expose
+ PHY reset from the UFS controller.
- resets : reset node register
- reset-names : describe reset node register, the "rst" corresponds to reset the whole UFS IP.
@@ -63,7 +65,6 @@ Example:
interrupts = <0 28 0>;
vdd-hba-supply = <&xxx_reg0>;
- vdd-hba-fixed-regulator;
vcc-supply = <&xxx_reg1>;
vcc-supply-1p8;
vccq-supply = <&xxx_reg2>;
@@ -79,4 +80,5 @@ Example:
reset-names = "rst";
phys = <&ufsphy1>;
phy-names = "ufsphy";
+ #reset-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/usb/amlogic,dwc3.txt b/Documentation/devicetree/bindings/usb/amlogic,dwc3.txt
index 9a8b631904fd..b9f04e617eb7 100644
--- a/Documentation/devicetree/bindings/usb/amlogic,dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/amlogic,dwc3.txt
@@ -40,3 +40,91 @@ Example device nodes:
phy-names = "usb2-phy", "usb3-phy";
};
};
+
+Amlogic Meson G12A DWC3 USB SoC Controller Glue
+
+The Amlogic G12A embeds a DWC3 USB IP Core configured for USB2 and USB3
+in host-only mode, and a DWC2 IP Core configured for USB2 peripheral mode
+only.
+
+A glue connects the DWC3 core to USB2 PHYs and optionnaly to an USB3 PHY.
+
+One of the USB2 PHY can be re-routed in peripheral mode to a DWC2 USB IP.
+
+The DWC3 Glue controls the PHY routing and power, an interrupt line is
+connected to the Glue to serve as OTG ID change detection.
+
+Required properties:
+- compatible: Should be "amlogic,meson-g12a-usb-ctrl"
+- clocks: a handle for the "USB" clock
+- resets: a handle for the shared "USB" reset line
+- reg: The base address and length of the registers
+- interrupts: the interrupt specifier for the OTG detection
+- phys: handle to used PHYs on the system
+ - a <0> phandle can be used if a PHY is not used
+- phy-names: names of the used PHYs on the system :
+ - "usb2-phy0" for USB2 PHY0 if USBHOST_A port is used
+ - "usb2-phy1" for USB2 PHY1 if USBOTG_B port is used
+ - "usb3-phy0" for USB3 PHY if USB3_0 is used
+- dr_mode: should be "host", "peripheral", or "otg" depending on
+ the usage and configuration of the OTG Capable port.
+ - "host" and "peripheral" means a fixed Host or Device only connection
+ - "otg" means the port can be used as both Host or Device and
+ be switched automatically using the OTG ID pin.
+
+Optional properties:
+- vbus-supply: should be a phandle to the regulator controlling the VBUS
+ power supply when used in OTG switchable mode
+
+Required child nodes:
+
+A child node must exist to represent the core DWC3 IP block. The name of
+the node is not important. The content of the node is defined in dwc3.txt.
+
+A child node must exist to represent the core DWC2 IP block. The name of
+the node is not important. The content of the node is defined in dwc2.txt.
+
+PHY documentation is provided in the following places:
+- Documentation/devicetree/bindings/phy/meson-g12a-usb2-phy.txt
+- Documentation/devicetree/bindings/phy/meson-g12a-usb3-pcie-phy.txt
+
+Example device nodes:
+ usb: usb@ffe09000 {
+ compatible = "amlogic,meson-g12a-usb-ctrl";
+ reg = <0x0 0xffe09000 0x0 0xa0>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ clocks = <&clkc CLKID_USB>;
+ resets = <&reset RESET_USB>;
+
+ dr_mode = "otg";
+
+ phys = <&usb2_phy0>, <&usb2_phy1>,
+ <&usb3_pcie_phy PHY_TYPE_USB3>;
+ phy-names = "usb2-phy0", "usb2-phy1", "usb3-phy0";
+
+ dwc2: usb@ff400000 {
+ compatible = "amlogic,meson-g12a-usb", "snps,dwc2";
+ reg = <0x0 0xff400000 0x0 0x40000>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clkc CLKID_USB1_DDR_BRIDGE>;
+ clock-names = "ddr";
+ phys = <&usb2_phy1>;
+ dr_mode = "peripheral";
+ g-rx-fifo-size = <192>;
+ g-np-tx-fifo-size = <128>;
+ g-tx-fifo-size = <128 128 16 16 16>;
+ };
+
+ dwc3: usb@ff500000 {
+ compatible = "snps,dwc3";
+ reg = <0x0 0xff500000 0x0 0x100000>;
+ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ dr_mode = "host";
+ snps,dis_u2_susphy_quirk;
+ snps,quirk-frame-length-adjustment;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt
index 6dc3c4a34483..49eac0dc86b0 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.txt
+++ b/Documentation/devicetree/bindings/usb/dwc2.txt
@@ -14,6 +14,7 @@ Required properties:
- "amlogic,meson8-usb": The DWC2 USB controller instance in Amlogic Meson8 SoCs;
- "amlogic,meson8b-usb": The DWC2 USB controller instance in Amlogic Meson8b SoCs;
- "amlogic,meson-gxbb-usb": The DWC2 USB controller instance in Amlogic S905 SoCs;
+ - "amlogic,meson-g12a-usb": The DWC2 USB controller instance in Amlogic G12A SoCs;
- "amcc,dwc-otg": The DWC2 USB controller instance in AMCC Canyonlands 460EX SoCs;
- snps,dwc2: A generic DWC2 USB controller with default parameters.
- "st,stm32f4x9-fsotg": The DWC2 USB FS/HS controller instance in STM32F4x9 SoCs
@@ -31,12 +32,18 @@ Refer to clk/clock-bindings.txt for generic clock consumer properties
Optional properties:
- phys: phy provider specifier
- phy-names: shall be "usb2-phy"
+- vbus-supply: reference to the VBUS regulator. Depending on the current mode
+ this is enabled (in "host" mode") or disabled (in "peripheral" mode). The
+ regulator is updated if the controller is configured in "otg" mode and the
+ status changes between "host" and "peripheral".
Refer to phy/phy-bindings.txt for generic phy consumer properties
- dr_mode: shall be one of "host", "peripheral" and "otg"
Refer to usb/generic.txt
- g-rx-fifo-size: size of rx fifo size in gadget mode.
- g-np-tx-fifo-size: size of non-periodic tx fifo size in gadget mode.
- g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0) in gadget mode.
+- snps,reset-phy-on-wake: If present indicates that we need to reset the PHY when
+ we detect a wakeup. This is due to a hardware errata.
Deprecated properties:
- g-use-dma: gadget DMA mode is automatically detected
diff --git a/Documentation/devicetree/bindings/usb/generic-ehci.yaml b/Documentation/devicetree/bindings/usb/generic-ehci.yaml
new file mode 100644
index 000000000000..d3b4f6415920
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/generic-ehci.yaml
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/generic-ehci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: USB EHCI Controller Device Tree Bindings
+
+allOf:
+ - $ref: "usb-hcd.yaml"
+
+maintainers:
+ - Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+properties:
+ compatible:
+ contains:
+ const: generic-ehci
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ resets:
+ minItems: 1
+ maxItems: 4
+
+ clocks:
+ minItems: 1
+ maxItems: 4
+ description: |
+ In case the Renesas R-Car Gen3 SoCs:
+ - if a host only channel: first clock should be host.
+ - if a USB DRD channel: first clock should be host and second
+ one should be peripheral
+
+ big-endian:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Set this flag for HCDs with big endian descriptors and big
+ endian registers.
+
+ big-endian-desc:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Set this flag for HCDs with big endian descriptors.
+
+ big-endian-regs:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Set this flag for HCDs with big endian registers.
+
+ has-transaction-translator:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Set this flag if EHCI has a Transaction Translator built into
+ the root hub.
+
+ needs-reset-on-resume:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Set this flag to force EHCI reset after resume.
+
+ phys: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ ehci@e0000300 {
+ compatible = "ibm,usb-ehci-440epx", "generic-ehci";
+ interrupt-parent = <&UIC0>;
+ interrupts = <0x1a 4>;
+ reg = <0 0xe0000300 90 0 0xe0000390 70>;
+ big-endian;
+ };
+
+ - |
+ ehci0: usb@1c14000 {
+ compatible = "allwinner,sun4i-a10-ehci", "generic-ehci";
+ reg = <0x01c14000 0x100>;
+ interrupts = <39>;
+ clocks = <&ahb_gates 1>;
+ phys = <&usbphy 1>;
+ phy-names = "usb";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/usb/generic-ohci.yaml b/Documentation/devicetree/bindings/usb/generic-ohci.yaml
new file mode 100644
index 000000000000..da5a14becbe5
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/generic-ohci.yaml
@@ -0,0 +1,89 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/generic-ohci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: USB OHCI Controller Device Tree Bindings
+
+allOf:
+ - $ref: "usb-hcd.yaml"
+
+maintainers:
+ - Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+properties:
+ compatible:
+ contains:
+ const: generic-ohci
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ resets:
+ minItems: 1
+ maxItems: 2
+
+ clocks:
+ minItems: 1
+ maxItems: 3
+ description: |
+ In case the Renesas R-Car Gen3 SoCs:
+ - if a host only channel: first clock should be host.
+ - if a USB DRD channel: first clock should be host and second
+ one should be peripheral
+
+ big-endian:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Set this flag for HCDs with big endian descriptors and big
+ endian registers.
+
+ big-endian-desc:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Set this flag for HCDs with big endian descriptors.
+
+ big-endian-regs:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Set this flag for HCDs with big endian registers.
+
+ remote-wakeup-connected:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Remote wakeup is wired on the platform.
+
+ no-big-frame-no:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Set if frame_no lives in bits [15:0] of HCCA
+
+ num-ports:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Overrides the detected port count
+
+ phys: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ ohci0: usb@1c14400 {
+ compatible = "allwinner,sun4i-a10-ohci", "generic-ohci";
+ reg = <0x01c14400 0x100>;
+ interrupts = <64>;
+ clocks = <&usb_clk 6>, <&ahb_gates 2>;
+ phys = <&usbphy 1>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/usb/ingenic,jz4740-musb.txt b/Documentation/devicetree/bindings/usb/ingenic,jz4740-musb.txt
index 620355cee63f..16808721f3ff 100644
--- a/Documentation/devicetree/bindings/usb/ingenic,jz4740-musb.txt
+++ b/Documentation/devicetree/bindings/usb/ingenic,jz4740-musb.txt
@@ -8,9 +8,15 @@ Required properties:
- interrupt-names: must be "mc"
- clocks: phandle to the "udc" clock
- clock-names: must be "udc"
+- phys: phandle to the USB PHY
Example:
+usb_phy: usb-phy@0 {
+ compatible = "usb-nop-xceiv";
+ #phy-cells = <0>;
+};
+
udc: usb@13040000 {
compatible = "ingenic,jz4740-musb";
reg = <0x13040000 0x10000>;
@@ -21,4 +27,6 @@ udc: usb@13040000 {
clocks = <&cgu JZ4740_CLK_UDC>;
clock-names = "udc";
+
+ phys = <&usb_phy>;
};
diff --git a/Documentation/devicetree/bindings/usb/nvidia,tegra124-xusb.txt b/Documentation/devicetree/bindings/usb/nvidia,tegra124-xusb.txt
index 4156c3e181c5..5bfcc0b4d6b9 100644
--- a/Documentation/devicetree/bindings/usb/nvidia,tegra124-xusb.txt
+++ b/Documentation/devicetree/bindings/usb/nvidia,tegra124-xusb.txt
@@ -10,6 +10,7 @@ Required properties:
- Tegra124: "nvidia,tegra124-xusb"
- Tegra132: "nvidia,tegra132-xusb", "nvidia,tegra124-xusb"
- Tegra210: "nvidia,tegra210-xusb"
+ - Tegra186: "nvidia,tegra186-xusb"
- reg: Must contain the base and length of the xHCI host registers, XUSB FPCI
registers and XUSB IPFS registers.
- reg-names: Must contain the following entries:
@@ -59,6 +60,8 @@ For Tegra210:
- avdd-pll-uerefe-supply: PLLE reference PLL power supply. Must supply 1.05 V.
- dvdd-pex-pll-supply: PCIe/USB3 PLL power supply. Must supply 1.05 V.
- hvdd-pex-pll-e-supply: High-voltage PLLE power supply. Must supply 1.8 V.
+
+For Tegra210 and Tegra186:
- power-domains: A list of PM domain specifiers that reference each power-domain
used by the xHCI controller. This list must comprise of a specifier for the
XUSBA and XUSBC power-domains. See ../power/power_domain.txt and
@@ -78,6 +81,7 @@ Optional properties:
- Tegra132: usb2-0, usb2-1, usb2-2, hsic-0, hsic-1, usb3-0, usb3-1
- Tegra210: usb2-0, usb2-1, usb2-2, usb2-3, hsic-0, usb3-0, usb3-1, usb3-2,
usb3-3
+ - Tegra186: usb2-0, usb2-1, usb2-2, hsic-0, usb3-0, usb3-1, usb3-2
Example:
--------
diff --git a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
index d93b6a1504f2..b8acc2a994a8 100644
--- a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
+++ b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
@@ -6,6 +6,7 @@ Required properties:
- "renesas,usbhs-r8a7743" for r8a7743 (RZ/G1M) compatible device
- "renesas,usbhs-r8a7744" for r8a7744 (RZ/G1N) compatible device
- "renesas,usbhs-r8a7745" for r8a7745 (RZ/G1E) compatible device
+ - "renesas,usbhs-r8a77470" for r8a77470 (RZ/G1C) compatible device
- "renesas,usbhs-r8a774a1" for r8a774a1 (RZ/G2M) compatible device
- "renesas,usbhs-r8a774c0" for r8a774c0 (RZ/G2E) compatible device
- "renesas,usbhs-r8a7790" for r8a7790 (R-Car H2) compatible device
diff --git a/Documentation/devicetree/bindings/usb/usb-ehci.txt b/Documentation/devicetree/bindings/usb/usb-ehci.txt
deleted file mode 100644
index 406252d14c6b..000000000000
--- a/Documentation/devicetree/bindings/usb/usb-ehci.txt
+++ /dev/null
@@ -1,46 +0,0 @@
-USB EHCI controllers
-
-Required properties:
- - compatible : should be "generic-ehci".
- - reg : should contain at least address and length of the standard EHCI
- register set for the device. Optional platform-dependent registers
- (debug-port or other) can be also specified here, but only after
- definition of standard EHCI registers.
- - interrupts : one EHCI interrupt should be described here.
-
-Optional properties:
- - big-endian-regs : boolean, set this for hcds with big-endian registers
- - big-endian-desc : boolean, set this for hcds with big-endian descriptors
- - big-endian : boolean, for hcds with big-endian-regs + big-endian-desc
- - needs-reset-on-resume : boolean, set this to force EHCI reset after resume
- - has-transaction-translator : boolean, set this if EHCI have a Transaction
- Translator built into the root hub.
- - clocks : a list of phandle + clock specifier pairs. In case of Renesas
- R-Car Gen3 SoCs:
- - if a host only channel: first clock should be host.
- - if a USB DRD channel: first clock should be host and second one
- should be peripheral.
- - phys : see usb-hcd.txt in the current directory
- - resets : phandle + reset specifier pair
-
-additionally the properties from usb-hcd.txt (in the current directory) are
-supported.
-
-Example (Sequoia 440EPx):
- ehci@e0000300 {
- compatible = "ibm,usb-ehci-440epx", "usb-ehci";
- interrupt-parent = <&UIC0>;
- interrupts = <1a 4>;
- reg = <0 e0000300 90 0 e0000390 70>;
- big-endian;
- };
-
-Example (Allwinner sun4i A10 SoC):
- ehci0: usb@1c14000 {
- compatible = "allwinner,sun4i-a10-ehci", "generic-ehci";
- reg = <0x01c14000 0x100>;
- interrupts = <39>;
- clocks = <&ahb_gates 1>;
- phys = <&usbphy 1>;
- phy-names = "usb";
- };
diff --git a/Documentation/devicetree/bindings/usb/usb-hcd.txt b/Documentation/devicetree/bindings/usb/usb-hcd.txt
deleted file mode 100644
index 50529b838c9c..000000000000
--- a/Documentation/devicetree/bindings/usb/usb-hcd.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-Generic USB HCD (Host Controller Device) Properties
-
-Optional properties:
-- phys: a list of all USB PHYs on this HCD
-
-Example:
- &usb1 {
- phys = <&usb2_phy1>, <&usb3_phy1>;
- };
diff --git a/Documentation/devicetree/bindings/usb/usb-hcd.yaml b/Documentation/devicetree/bindings/usb/usb-hcd.yaml
new file mode 100644
index 000000000000..9c8c56d3a792
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/usb-hcd.yaml
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/usb-hcd.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic USB Host Controller Device Tree Bindings
+
+maintainers:
+ - Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+properties:
+ $nodename:
+ pattern: "^usb(@.*)?"
+
+ phys:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ List of all the USB PHYs on this HCD
+
+examples:
+ - |
+ usb {
+ phys = <&usb2_phy1>, <&usb3_phy1>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/usb-ohci.txt b/Documentation/devicetree/bindings/usb/usb-ohci.txt
deleted file mode 100644
index aaaa5255c972..000000000000
--- a/Documentation/devicetree/bindings/usb/usb-ohci.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-USB OHCI controllers
-
-Required properties:
-- compatible : "generic-ohci"
-- reg : ohci controller register range (address and length)
-- interrupts : ohci controller interrupt
-
-Optional properties:
-- big-endian-regs : boolean, set this for hcds with big-endian registers
-- big-endian-desc : boolean, set this for hcds with big-endian descriptors
-- big-endian : boolean, for hcds with big-endian-regs + big-endian-desc
-- no-big-frame-no : boolean, set if frame_no lives in bits [15:0] of HCCA
-- remote-wakeup-connected: remote wakeup is wired on the platform
-- num-ports : u32, to override the detected port count
-- clocks : a list of phandle + clock specifier pairs. In case of Renesas
- R-Car Gen3 SoCs:
- - if a host only channel: first clock should be host.
- - if a USB DRD channel: first clock should be host and second one
- should be peripheral.
-- phys : see usb-hcd.txt in the current directory
-- resets : a list of phandle + reset specifier pairs
-
-additionally the properties from usb-hcd.txt (in the current directory) are
-supported.
-
-Example:
-
- ohci0: usb@1c14400 {
- compatible = "allwinner,sun4i-a10-ohci", "generic-ohci";
- reg = <0x01c14400 0x100>;
- interrupts = <64>;
- clocks = <&usb_clk 6>, <&ahb_gates 2>;
- phys = <&usbphy 1>;
- phy-names = "usb";
- };
diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt
index fea8b1545751..97400e8f8605 100644
--- a/Documentation/devicetree/bindings/usb/usb-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt
@@ -10,6 +10,7 @@ Required properties:
- "renesas,xhci-r8a7743" for r8a7743 SoC
- "renesas,xhci-r8a7744" for r8a7744 SoC
- "renesas,xhci-r8a774a1" for r8a774a1 SoC
+ - "renesas,xhci-r8a774c0" for r8a774c0 SoC
- "renesas,xhci-r8a7790" for r8a7790 SoC
- "renesas,xhci-r8a7791" for r8a7791 SoC
- "renesas,xhci-r8a7793" for r8a7793 SoC
diff --git a/Documentation/devicetree/bindings/usb/usb251xb.txt b/Documentation/devicetree/bindings/usb/usb251xb.txt
index 17915f64b8ee..bc7945e9dbfe 100644
--- a/Documentation/devicetree/bindings/usb/usb251xb.txt
+++ b/Documentation/devicetree/bindings/usb/usb251xb.txt
@@ -64,8 +64,10 @@ Optional properties :
- power-on-time-ms : Specifies the time it takes from the time the host
initiates the power-on sequence to a port until the port has adequate
power. The value is given in ms in a 0 - 510 range (default is 100ms).
- - swap-dx-lanes : Specifies the ports which will swap the differential-pair
- (D+/D-), default is not-swapped.
+ - swap-dx-lanes : Specifies the downstream ports which will swap the
+ differential-pair (D+/D-), default is not-swapped.
+ - swap-us-lanes : Selects the upstream port differential-pair (D+/D-)
+ swapping (boolean, default is not-swapped)
Examples:
usb2512b@2c {
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
deleted file mode 100644
index 8162b0eb4b50..000000000000
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ /dev/null
@@ -1,462 +0,0 @@
-Device tree binding vendor prefix registry. Keep list in alphabetical order.
-
-This isn't an exhaustive list, but you should add new prefixes to it before
-using them to avoid name-space collisions.
-
-abilis Abilis Systems
-abracon Abracon Corporation
-actions Actions Semiconductor Co., Ltd.
-active-semi Active-Semi International Inc
-ad Avionic Design GmbH
-adafruit Adafruit Industries, LLC
-adapteva Adapteva, Inc.
-adaptrum Adaptrum, Inc.
-adh AD Holdings Plc.
-adi Analog Devices, Inc.
-advantech Advantech Corporation
-aeroflexgaisler Aeroflex Gaisler AB
-al Annapurna Labs
-allo Allo.com
-allwinner Allwinner Technology Co., Ltd.
-alphascale AlphaScale Integrated Circuits Systems, Inc.
-altr Altera Corp.
-amarula Amarula Solutions
-amazon Amazon.com, Inc.
-amcc Applied Micro Circuits Corporation (APM, formally AMCC)
-amd Advanced Micro Devices (AMD), Inc.
-amediatech Shenzhen Amediatech Technology Co., Ltd
-amlogic Amlogic, Inc.
-ampire Ampire Co., Ltd.
-ams AMS AG
-amstaos AMS-Taos Inc.
-analogix Analogix Semiconductor, Inc.
-andestech Andes Technology Corporation
-apm Applied Micro Circuits Corporation (APM)
-aptina Aptina Imaging
-arasan Arasan Chip Systems
-archermind ArcherMind Technology (Nanjing) Co., Ltd.
-arctic Arctic Sand
-aries Aries Embedded GmbH
-arm ARM Ltd.
-armadeus ARMadeus Systems SARL
-arrow Arrow Electronics
-artesyn Artesyn Embedded Technologies Inc.
-asahi-kasei Asahi Kasei Corp.
-aspeed ASPEED Technology Inc.
-asus AsusTek Computer Inc.
-atlas Atlas Scientific LLC
-atmel Atmel Corporation
-auo AU Optronics Corporation
-auvidea Auvidea GmbH
-avago Avago Technologies
-avia avia semiconductor
-avic Shanghai AVIC Optoelectronics Co., Ltd.
-avnet Avnet, Inc.
-axentia Axentia Technologies AB
-axis Axis Communications AB
-bananapi BIPAI KEJI LIMITED
-bhf Beckhoff Automation GmbH & Co. KG
-bitmain Bitmain Technologies
-boe BOE Technology Group Co., Ltd.
-bosch Bosch Sensortec GmbH
-boundary Boundary Devices Inc.
-brcm Broadcom Corporation
-buffalo Buffalo, Inc.
-bticino Bticino International
-calxeda Calxeda
-capella Capella Microsystems, Inc
-cascoda Cascoda, Ltd.
-catalyst Catalyst Semiconductor, Inc.
-cavium Cavium, Inc.
-cdns Cadence Design Systems Inc.
-cdtech CDTech(H.K.) Electronics Limited
-ceva Ceva, Inc.
-chipidea Chipidea, Inc
-chipone ChipOne
-chipspark ChipSPARK
-chrp Common Hardware Reference Platform
-chunghwa Chunghwa Picture Tubes Ltd.
-ciaa Computadora Industrial Abierta Argentina
-cirrus Cirrus Logic, Inc.
-cloudengines Cloud Engines, Inc.
-cnm Chips&Media, Inc.
-cnxt Conexant Systems, Inc.
-compulab CompuLab Ltd.
-cortina Cortina Systems, Inc.
-cosmic Cosmic Circuits
-crane Crane Connectivity Solutions
-creative Creative Technology Ltd
-crystalfontz Crystalfontz America, Inc.
-csky Hangzhou C-SKY Microsystems Co., Ltd
-cubietech Cubietech, Ltd.
-cypress Cypress Semiconductor Corporation
-cznic CZ.NIC, z.s.p.o.
-dallas Maxim Integrated Products (formerly Dallas Semiconductor)
-dataimage DataImage, Inc.
-davicom DAVICOM Semiconductor, Inc.
-delta Delta Electronics, Inc.
-denx Denx Software Engineering
-devantech Devantech, Ltd.
-dh DH electronics GmbH
-digi Digi International Inc.
-digilent Diglent, Inc.
-dioo Dioo Microcircuit Co., Ltd
-dlc DLC Display Co., Ltd.
-dlg Dialog Semiconductor
-dlink D-Link Corporation
-dmo Data Modul AG
-domintech Domintech Co., Ltd.
-dongwoon Dongwoon Anatech
-dptechnics DPTechnics
-dragino Dragino Technology Co., Limited
-ea Embedded Artists AB
-ebs-systart EBS-SYSTART GmbH
-ebv EBV Elektronik
-eckelmann Eckelmann AG
-edt Emerging Display Technologies
-eeti eGalax_eMPIA Technology Inc
-elan Elan Microelectronic Corp.
-elgin Elgin S/A.
-embest Shenzhen Embest Technology Co., Ltd.
-emlid Emlid, Ltd.
-emmicro EM Microelectronic
-emtrion emtrion GmbH
-endless Endless Mobile, Inc.
-energymicro Silicon Laboratories (formerly Energy Micro AS)
-engicam Engicam S.r.l.
-epcos EPCOS AG
-epfl Ecole Polytechnique Fédérale de Lausanne
-epson Seiko Epson Corp.
-est ESTeem Wireless Modems
-ettus NI Ettus Research
-eukrea Eukréa Electromatique
-everest Everest Semiconductor Co. Ltd.
-everspin Everspin Technologies, Inc.
-exar Exar Corporation
-excito Excito
-ezchip EZchip Semiconductor
-facebook Facebook
-fairphone Fairphone B.V.
-faraday Faraday Technology Corporation
-fastrax Fastrax Oy
-fcs Fairchild Semiconductor
-feiyang Shenzhen Fly Young Technology Co.,LTD.
-firefly Firefly
-focaltech FocalTech Systems Co.,Ltd
-friendlyarm Guangzhou FriendlyARM Computer Tech Co., Ltd
-fsl Freescale Semiconductor
-fujitsu Fujitsu Ltd.
-gateworks Gateworks Corporation
-gcw Game Consoles Worldwide
-ge General Electric Company
-geekbuying GeekBuying
-gef GE Fanuc Intelligent Platforms Embedded Systems, Inc.
-GEFanuc GE Fanuc Intelligent Platforms Embedded Systems, Inc.
-geniatech Geniatech, Inc.
-giantec Giantec Semiconductor, Inc.
-giantplus Giantplus Technology Co., Ltd.
-globalscale Globalscale Technologies, Inc.
-globaltop GlobalTop Technology, Inc.
-gmt Global Mixed-mode Technology, Inc.
-goodix Shenzhen Huiding Technology Co., Ltd.
-google Google, Inc.
-grinn Grinn
-grmn Garmin Limited
-gumstix Gumstix, Inc.
-gw Gateworks Corporation
-hannstar HannStar Display Corporation
-haoyu Haoyu Microelectronic Co. Ltd.
-hardkernel Hardkernel Co., Ltd
-hideep HiDeep Inc.
-himax Himax Technologies, Inc.
-hisilicon Hisilicon Limited.
-hit Hitachi Ltd.
-hitex Hitex Development Tools
-holt Holt Integrated Circuits, Inc.
-honeywell Honeywell
-hp Hewlett Packard
-holtek Holtek Semiconductor, Inc.
-hwacom HwaCom Systems Inc.
-i2se I2SE GmbH
-ibm International Business Machines (IBM)
-icplus IC Plus Corp.
-idt Integrated Device Technologies, Inc.
-ifi Ingenieurburo Fur Ic-Technologie (I/F/I)
-ilitek ILI Technology Corporation (ILITEK)
-img Imagination Technologies Ltd.
-infineon Infineon Technologies
-inforce Inforce Computing
-ingenic Ingenic Semiconductor
-innolux Innolux Corporation
-inside-secure INSIDE Secure
-intel Intel Corporation
-intercontrol Inter Control Group
-invensense InvenSense Inc.
-inversepath Inverse Path
-iom Iomega Corporation
-isee ISEE 2007 S.L.
-isil Intersil
-issi Integrated Silicon Solutions Inc.
-itead ITEAD Intelligent Systems Co.Ltd
-iwave iWave Systems Technologies Pvt. Ltd.
-jdi Japan Display Inc.
-jedec JEDEC Solid State Technology Association
-jianda Jiandangjing Technology Co., Ltd.
-karo Ka-Ro electronics GmbH
-keithkoep Keith & Koep GmbH
-keymile Keymile GmbH
-khadas Khadas
-kiebackpeter Kieback & Peter GmbH
-kinetic Kinetic Technologies
-kingdisplay King & Display Technology Co., Ltd.
-kingnovel Kingnovel Technology Co., Ltd.
-koe Kaohsiung Opto-Electronics Inc.
-kosagi Sutajio Ko-Usagi PTE Ltd.
-kyo Kyocera Corporation
-lacie LaCie
-laird Laird PLC
-lantiq Lantiq Semiconductor
-lattice Lattice Semiconductor
-lego LEGO Systems A/S
-lemaker Shenzhen LeMaker Technology Co., Ltd.
-lenovo Lenovo Group Ltd.
-lg LG Corporation
-libretech Shenzhen Libre Technology Co., Ltd
-licheepi Lichee Pi
-linaro Linaro Limited
-linksys Belkin International, Inc. (Linksys)
-linux Linux-specific binding
-linx Linx Technologies
-lltc Linear Technology Corporation
-logicpd Logic PD, Inc.
-lsi LSI Corp. (LSI Logic)
-lwn Liebherr-Werk Nenzing GmbH
-macnica Macnica Americas
-marvell Marvell Technology Group Ltd.
-maxim Maxim Integrated Products
-mbvl Mobiveil Inc.
-mcube mCube
-meas Measurement Specialties
-mediatek MediaTek Inc.
-megachips MegaChips
-mele Shenzhen MeLE Digital Technology Ltd.
-melexis Melexis N.V.
-melfas MELFAS Inc.
-mellanox Mellanox Technologies
-memsic MEMSIC Inc.
-merrii Merrii Technology Co., Ltd.
-micrel Micrel Inc.
-microchip Microchip Technology Inc.
-microcrystal Micro Crystal AG
-micron Micron Technology Inc.
-mikroe MikroElektronika d.o.o.
-minix MINIX Technology Ltd.
-miramems MiraMEMS Sensing Technology Co., Ltd.
-mitsubishi Mitsubishi Electric Corporation
-mosaixtech Mosaix Technologies, Inc.
-motorola Motorola, Inc.
-moxa Moxa Inc.
-mpl MPL AG
-mqmaker mqmaker Inc.
-mscc Microsemi Corporation
-msi Micro-Star International Co. Ltd.
-mti Imagination Technologies Ltd. (formerly MIPS Technologies Inc.)
-multi-inno Multi-Inno Technology Co.,Ltd
-mundoreader Mundo Reader S.L.
-murata Murata Manufacturing Co., Ltd.
-mxicy Macronix International Co., Ltd.
-myir MYIR Tech Limited
-national National Semiconductor
-nec NEC LCD Technologies, Ltd.
-neonode Neonode Inc.
-netgear NETGEAR
-netlogic Broadcom Corporation (formerly NetLogic Microsystems)
-netron-dy Netron DY
-netxeon Shenzhen Netxeon Technology CO., LTD
-nexbox Nexbox
-nextthing Next Thing Co.
-newhaven Newhaven Display International
-ni National Instruments
-nintendo Nintendo
-nlt NLT Technologies, Ltd.
-nokia Nokia
-nordic Nordic Semiconductor
-novtech NovTech, Inc.
-nutsboard NutsBoard
-nuvoton Nuvoton Technology Corporation
-nvd New Vision Display
-nvidia NVIDIA
-nxp NXP Semiconductors
-okaya Okaya Electric America, Inc.
-oki Oki Electric Industry Co., Ltd.
-olimex OLIMEX Ltd.
-olpc One Laptop Per Child
-onion Onion Corporation
-onnn ON Semiconductor Corp.
-ontat On Tat Industrial Company
-opalkelly Opal Kelly Incorporated
-opencores OpenCores.org
-openrisc OpenRISC.io
-option Option NV
-oranth Shenzhen Oranth Technology Co., Ltd.
-ORCL Oracle Corporation
-orisetech Orise Technology
-ortustech Ortus Technology Co., Ltd.
-ovti OmniVision Technologies
-oxsemi Oxford Semiconductor, Ltd.
-panasonic Panasonic Corporation
-parade Parade Technologies Inc.
-pda Precision Design Associates, Inc.
-pericom Pericom Technology Inc.
-pervasive Pervasive Displays, Inc.
-phicomm PHICOMM Co., Ltd.
-phytec PHYTEC Messtechnik GmbH
-picochip Picochip Ltd
-pine64 Pine64
-pixcir PIXCIR MICROELECTRONICS Co., Ltd
-plantower Plantower Co., Ltd
-plathome Plat'Home Co., Ltd.
-plda PLDA
-plx Broadcom Corporation (formerly PLX Technology)
-pni PNI Sensor Corporation
-portwell Portwell Inc.
-poslab Poslab Technology Co., Ltd.
-powervr PowerVR (deprecated, use img)
-probox2 PROBOX2 (by W2COMP Co., Ltd.)
-pulsedlight PulsedLight, Inc
-qca Qualcomm Atheros, Inc.
-qcom Qualcomm Technologies, Inc
-qemu QEMU, a generic and open source machine emulator and virtualizer
-qi Qi Hardware
-qiaodian QiaoDian XianShi Corporation
-qnap QNAP Systems, Inc.
-radxa Radxa
-raidsonic RaidSonic Technology GmbH
-ralink Mediatek/Ralink Technology Corp.
-ramtron Ramtron International
-raspberrypi Raspberry Pi Foundation
-raydium Raydium Semiconductor Corp.
-rda Unisoc Communications, Inc.
-realtek Realtek Semiconductor Corp.
-renesas Renesas Electronics Corporation
-richtek Richtek Technology Corporation
-ricoh Ricoh Co. Ltd.
-rikomagic Rikomagic Tech Corp. Ltd
-riscv RISC-V Foundation
-rockchip Fuzhou Rockchip Electronics Co., Ltd
-rohm ROHM Semiconductor Co., Ltd
-roofull Shenzhen Roofull Technology Co, Ltd
-samsung Samsung Semiconductor
-samtec Samtec/Softing company
-sancloud Sancloud Ltd
-sandisk Sandisk Corporation
-sbs Smart Battery System
-schindler Schindler
-seagate Seagate Technology PLC
-semtech Semtech Corporation
-sensirion Sensirion AG
-sff Small Form Factor Committee
-sgd Solomon Goldentek Display Corporation
-sgx SGX Sensortech
-sharp Sharp Corporation
-shimafuji Shimafuji Electric, Inc.
-si-en Si-En Technology Ltd.
-sifive SiFive, Inc.
-sigma Sigma Designs, Inc.
-sii Seiko Instruments, Inc.
-sil Silicon Image
-silabs Silicon Laboratories
-silead Silead Inc.
-silergy Silergy Corp.
-siliconmitus Silicon Mitus, Inc.
-simtek
-sirf SiRF Technology, Inc.
-sis Silicon Integrated Systems Corp.
-sitronix Sitronix Technology Corporation
-skyworks Skyworks Solutions, Inc.
-smsc Standard Microsystems Corporation
-snps Synopsys, Inc.
-socionext Socionext Inc.
-solidrun SolidRun
-solomon Solomon Systech Limited
-sony Sony Corporation
-spansion Spansion Inc.
-sprd Spreadtrum Communications Inc.
-sst Silicon Storage Technology, Inc.
-st STMicroelectronics
-starry Starry Electronic Technology (ShenZhen) Co., LTD
-startek Startek
-ste ST-Ericsson
-stericsson ST-Ericsson
-summit Summit microelectronics
-sunchip Shenzhen Sunchip Technology Co., Ltd
-SUNW Sun Microsystems, Inc
-swir Sierra Wireless
-syna Synaptics Inc.
-synology Synology, Inc.
-tbs TBS Technologies
-tbs-biometrics Touchless Biometric Systems AG
-tcg Trusted Computing Group
-tcl Toby Churchill Ltd.
-technexion TechNexion
-technologic Technologic Systems
-tempo Tempo Semiconductor
-techstar Shenzhen Techstar Electronics Co., Ltd.
-terasic Terasic Inc.
-thine THine Electronics, Inc.
-ti Texas Instruments
-tianma Tianma Micro-electronics Co., Ltd.
-tlm Trusted Logic Mobility
-tmt Tecon Microprocessor Technologies, LLC.
-topeet Topeet
-toradex Toradex AG
-toshiba Toshiba Corporation
-toumaz Toumaz
-tpk TPK U.S.A. LLC
-tplink TP-LINK Technologies Co., Ltd.
-tpo TPO
-tronfy Tronfy
-tronsmart Tronsmart
-truly Truly Semiconductors Limited
-tsd Theobroma Systems Design und Consulting GmbH
-tyan Tyan Computer Corporation
-u-blox u-blox
-ucrobotics uCRobotics
-ubnt Ubiquiti Networks
-udoo Udoo
-uniwest United Western Technologies Corp (UniWest)
-upisemi uPI Semiconductor Corp.
-urt United Radiant Technology Corporation
-usi Universal Scientific Industrial Co., Ltd.
-v3 V3 Semiconductor
-vamrs Vamrs Ltd.
-variscite Variscite Ltd.
-via VIA Technologies, Inc.
-virtio Virtual I/O Device Specification, developed by the OASIS consortium
-vishay Vishay Intertechnology, Inc
-vitesse Vitesse Semiconductor Corporation
-vivante Vivante Corporation
-vocore VoCore Studio
-voipac Voipac Technologies s.r.o.
-vot Vision Optical Technology Co., Ltd.
-wd Western Digital Corp.
-wetek WeTek Electronics, limited.
-wexler Wexler
-whwave Shenzhen whwave Electronics, Inc.
-wi2wi Wi2Wi, Inc.
-winbond Winbond Electronics corp.
-winstar Winstar Display Corp.
-wlf Wolfson Microelectronics
-wm Wondermedia Technologies, Inc.
-x-powers X-Powers
-xes Extreme Engineering Solutions (X-ES)
-xillybus Xillybus Ltd.
-xlnx Xilinx
-xunlong Shenzhen Xunlong Software CO.,Limited
-ysoft Y Soft Corporation a.s.
-zarlink Zarlink Semiconductor
-zeitec ZEITEC Semiconductor Co., LTD.
-zidoo Shenzhen Zidoo Technology Co., Ltd.
-zii Zodiac Inflight Innovations
-zte ZTE Corp.
-zyxel ZyXEL Communications Corp.
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
new file mode 100644
index 000000000000..33a65a45e319
--- /dev/null
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -0,0 +1,977 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/vendor-prefixes.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Devicetree Vendor Prefix Registry
+
+maintainers:
+ - Rob Herring <robh@kernel.org>
+
+select: true
+
+properties: {}
+
+patternProperties:
+ # Prefixes which are not vendors, but followed the pattern
+ # DO NOT ADD NEW PROPERTIES TO THIS LIST
+ "^(at25|devbus|dmacap|dsa|exynos|gpio-fan|gpio|gpmc|hdmi|i2c-gpio),.*": true
+ "^(keypad|m25p|max8952|max8997|max8998|mpmc),.*": true
+ "^(pinctrl-single|#pinctrl-single|PowerPC),.*": true
+ "^(pl022|pxa-mmc|rcar_sound|rotary-encoder|s5m8767|sdhci),.*": true
+ "^(simple-audio-card|simple-graph-card|st-plgpio|st-spics|ts),.*": true
+
+ # Keep list in alphabetical order.
+ "^abilis,.*":
+ description: Abilis Systems
+ "^abracon,.*":
+ description: Abracon Corporation
+ "^actions,.*":
+ description: Actions Semiconductor Co., Ltd.
+ "^active-semi,.*":
+ description: Active-Semi International Inc
+ "^ad,.*":
+ description: Avionic Design GmbH
+ "^adafruit,.*":
+ description: Adafruit Industries, LLC
+ "^adapteva,.*":
+ description: Adapteva, Inc.
+ "^adaptrum,.*":
+ description: Adaptrum, Inc.
+ "^adh,.*":
+ description: AD Holdings Plc.
+ "^adi,.*":
+ description: Analog Devices, Inc.
+ "^advantech,.*":
+ description: Advantech Corporation
+ "^aeroflexgaisler,.*":
+ description: Aeroflex Gaisler AB
+ "^al,.*":
+ description: Annapurna Labs
+ "^allo,.*":
+ description: Allo.com
+ "^allwinner,.*":
+ description: Allwinner Technology Co., Ltd.
+ "^alphascale,.*":
+ description: AlphaScale Integrated Circuits Systems, Inc.
+ "^altr,.*":
+ description: Altera Corp.
+ "^amarula,.*":
+ description: Amarula Solutions
+ "^amazon,.*":
+ description: Amazon.com, Inc.
+ "^amcc,.*":
+ description: Applied Micro Circuits Corporation (APM, formally AMCC)
+ "^amd,.*":
+ description: Advanced Micro Devices (AMD), Inc.
+ "^amediatech,.*":
+ description: Shenzhen Amediatech Technology Co., Ltd
+ "^amlogic,.*":
+ description: Amlogic, Inc.
+ "^ampire,.*":
+ description: Ampire Co., Ltd.
+ "^ams,.*":
+ description: AMS AG
+ "^amstaos,.*":
+ description: AMS-Taos Inc.
+ "^analogix,.*":
+ description: Analogix Semiconductor, Inc.
+ "^andestech,.*":
+ description: Andes Technology Corporation
+ "^apm,.*":
+ description: Applied Micro Circuits Corporation (APM)
+ "^aptina,.*":
+ description: Aptina Imaging
+ "^arasan,.*":
+ description: Arasan Chip Systems
+ "^archermind,.*":
+ description: ArcherMind Technology (Nanjing) Co., Ltd.
+ "^arctic,.*":
+ description: Arctic Sand
+ "^arcx,.*":
+ description: arcx Inc. / Archronix Inc.
+ "^aries,.*":
+ description: Aries Embedded GmbH
+ "^arm,.*":
+ description: ARM Ltd.
+ "^armadeus,.*":
+ description: ARMadeus Systems SARL
+ "^arrow,.*":
+ description: Arrow Electronics
+ "^artesyn,.*":
+ description: Artesyn Embedded Technologies Inc.
+ "^asahi-kasei,.*":
+ description: Asahi Kasei Corp.
+ "^aspeed,.*":
+ description: ASPEED Technology Inc.
+ "^asus,.*":
+ description: AsusTek Computer Inc.
+ "^atlas,.*":
+ description: Atlas Scientific LLC
+ "^atmel,.*":
+ description: Atmel Corporation
+ "^auo,.*":
+ description: AU Optronics Corporation
+ "^auvidea,.*":
+ description: Auvidea GmbH
+ "^avago,.*":
+ description: Avago Technologies
+ "^avia,.*":
+ description: avia semiconductor
+ "^avic,.*":
+ description: Shanghai AVIC Optoelectronics Co., Ltd.
+ "^avnet,.*":
+ description: Avnet, Inc.
+ "^axentia,.*":
+ description: Axentia Technologies AB
+ "^axis,.*":
+ description: Axis Communications AB
+ "^azoteq,.*":
+ description: Azoteq (Pty) Ltd
+ "^azw,.*":
+ description: Shenzhen AZW Technology Co., Ltd.
+ "^bananapi,.*":
+ description: BIPAI KEJI LIMITED
+ "^bhf,.*":
+ description: Beckhoff Automation GmbH & Co. KG
+ "^bitmain,.*":
+ description: Bitmain Technologies
+ "^boe,.*":
+ description: BOE Technology Group Co., Ltd.
+ "^bosch,.*":
+ description: Bosch Sensortec GmbH
+ "^boundary,.*":
+ description: Boundary Devices Inc.
+ "^brcm,.*":
+ description: Broadcom Corporation
+ "^buffalo,.*":
+ description: Buffalo, Inc.
+ "^bticino,.*":
+ description: Bticino International
+ "^calxeda,.*":
+ description: Calxeda
+ "^capella,.*":
+ description: Capella Microsystems, Inc
+ "^cascoda,.*":
+ description: Cascoda, Ltd.
+ "^catalyst,.*":
+ description: Catalyst Semiconductor, Inc.
+ "^cavium,.*":
+ description: Cavium, Inc.
+ "^cdns,.*":
+ description: Cadence Design Systems Inc.
+ "^cdtech,.*":
+ description: CDTech(H.K.) Electronics Limited
+ "^ceva,.*":
+ description: Ceva, Inc.
+ "^chipidea,.*":
+ description: Chipidea, Inc
+ "^chipone,.*":
+ description: ChipOne
+ "^chipspark,.*":
+ description: ChipSPARK
+ "^chrp,.*":
+ description: Common Hardware Reference Platform
+ "^chunghwa,.*":
+ description: Chunghwa Picture Tubes Ltd.
+ "^ciaa,.*":
+ description: Computadora Industrial Abierta Argentina
+ "^cirrus,.*":
+ description: Cirrus Logic, Inc.
+ "^cloudengines,.*":
+ description: Cloud Engines, Inc.
+ "^cnm,.*":
+ description: Chips&Media, Inc.
+ "^cnxt,.*":
+ description: Conexant Systems, Inc.
+ "^compulab,.*":
+ description: CompuLab Ltd.
+ "^cortina,.*":
+ description: Cortina Systems, Inc.
+ "^cosmic,.*":
+ description: Cosmic Circuits
+ "^crane,.*":
+ description: Crane Connectivity Solutions
+ "^creative,.*":
+ description: Creative Technology Ltd
+ "^crystalfontz,.*":
+ description: Crystalfontz America, Inc.
+ "^csky,.*":
+ description: Hangzhou C-SKY Microsystems Co., Ltd
+ "^cubietech,.*":
+ description: Cubietech, Ltd.
+ "^cypress,.*":
+ description: Cypress Semiconductor Corporation
+ "^cznic,.*":
+ description: CZ.NIC, z.s.p.o.
+ "^dallas,.*":
+ description: Maxim Integrated Products (formerly Dallas Semiconductor)
+ "^dataimage,.*":
+ description: DataImage, Inc.
+ "^davicom,.*":
+ description: DAVICOM Semiconductor, Inc.
+ "^delta,.*":
+ description: Delta Electronics, Inc.
+ "^denx,.*":
+ description: Denx Software Engineering
+ "^devantech,.*":
+ description: Devantech, Ltd.
+ "^dh,.*":
+ description: DH electronics GmbH
+ "^digi,.*":
+ description: Digi International Inc.
+ "^digilent,.*":
+ description: Diglent, Inc.
+ "^dioo,.*":
+ description: Dioo Microcircuit Co., Ltd
+ "^dlc,.*":
+ description: DLC Display Co., Ltd.
+ "^dlg,.*":
+ description: Dialog Semiconductor
+ "^dlink,.*":
+ description: D-Link Corporation
+ "^dmo,.*":
+ description: Data Modul AG
+ "^domintech,.*":
+ description: Domintech Co., Ltd.
+ "^dongwoon,.*":
+ description: Dongwoon Anatech
+ "^dptechnics,.*":
+ description: DPTechnics
+ "^dragino,.*":
+ description: Dragino Technology Co., Limited
+ "^ea,.*":
+ description: Embedded Artists AB
+ "^ebs-systart,.*":
+ description: EBS-SYSTART GmbH
+ "^ebv,.*":
+ description: EBV Elektronik
+ "^eckelmann,.*":
+ description: Eckelmann AG
+ "^edt,.*":
+ description: Emerging Display Technologies
+ "^eeti,.*":
+ description: eGalax_eMPIA Technology Inc
+ "^elan,.*":
+ description: Elan Microelectronic Corp.
+ "^elgin,.*":
+ description: Elgin S/A.
+ "^embest,.*":
+ description: Shenzhen Embest Technology Co., Ltd.
+ "^emlid,.*":
+ description: Emlid, Ltd.
+ "^emmicro,.*":
+ description: EM Microelectronic
+ "^emtrion,.*":
+ description: emtrion GmbH
+ "^endless,.*":
+ description: Endless Mobile, Inc.
+ "^energymicro,.*":
+ description: Silicon Laboratories (formerly Energy Micro AS)
+ "^engicam,.*":
+ description: Engicam S.r.l.
+ "^epcos,.*":
+ description: EPCOS AG
+ "^epfl,.*":
+ description: Ecole Polytechnique Fédérale de Lausanne
+ "^epson,.*":
+ description: Seiko Epson Corp.
+ "^est,.*":
+ description: ESTeem Wireless Modems
+ "^ettus,.*":
+ description: NI Ettus Research
+ "^eukrea,.*":
+ description: Eukréa Electromatique
+ "^everest,.*":
+ description: Everest Semiconductor Co. Ltd.
+ "^everspin,.*":
+ description: Everspin Technologies, Inc.
+ "^exar,.*":
+ description: Exar Corporation
+ "^excito,.*":
+ description: Excito
+ "^ezchip,.*":
+ description: EZchip Semiconductor
+ "^facebook,.*":
+ description: Facebook
+ "^fairphone,.*":
+ description: Fairphone B.V.
+ "^faraday,.*":
+ description: Faraday Technology Corporation
+ "^fastrax,.*":
+ description: Fastrax Oy
+ "^fcs,.*":
+ description: Fairchild Semiconductor
+ "^feiyang,.*":
+ description: Shenzhen Fly Young Technology Co.,LTD.
+ "^firefly,.*":
+ description: Firefly
+ "^focaltech,.*":
+ description: FocalTech Systems Co.,Ltd
+ "^friendlyarm,.*":
+ description: Guangzhou FriendlyARM Computer Tech Co., Ltd
+ "^fsl,.*":
+ description: Freescale Semiconductor
+ "^fujitsu,.*":
+ description: Fujitsu Ltd.
+ "^gateworks,.*":
+ description: Gateworks Corporation
+ "^gcw,.*":
+ description: Game Consoles Worldwide
+ "^ge,.*":
+ description: General Electric Company
+ "^geekbuying,.*":
+ description: GeekBuying
+ "^gef,.*":
+ description: GE Fanuc Intelligent Platforms Embedded Systems, Inc.
+ "^GEFanuc,.*":
+ description: GE Fanuc Intelligent Platforms Embedded Systems, Inc.
+ "^geniatech,.*":
+ description: Geniatech, Inc.
+ "^giantec,.*":
+ description: Giantec Semiconductor, Inc.
+ "^giantplus,.*":
+ description: Giantplus Technology Co., Ltd.
+ "^globalscale,.*":
+ description: Globalscale Technologies, Inc.
+ "^globaltop,.*":
+ description: GlobalTop Technology, Inc.
+ "^gmt,.*":
+ description: Global Mixed-mode Technology, Inc.
+ "^goodix,.*":
+ description: Shenzhen Huiding Technology Co., Ltd.
+ "^google,.*":
+ description: Google, Inc.
+ "^grinn,.*":
+ description: Grinn
+ "^grmn,.*":
+ description: Garmin Limited
+ "^gumstix,.*":
+ description: Gumstix, Inc.
+ "^gw,.*":
+ description: Gateworks Corporation
+ "^hannstar,.*":
+ description: HannStar Display Corporation
+ "^haoyu,.*":
+ description: Haoyu Microelectronic Co. Ltd.
+ "^hardkernel,.*":
+ description: Hardkernel Co., Ltd
+ "^hideep,.*":
+ description: HiDeep Inc.
+ "^himax,.*":
+ description: Himax Technologies, Inc.
+ "^hisilicon,.*":
+ description: Hisilicon Limited.
+ "^hit,.*":
+ description: Hitachi Ltd.
+ "^hitex,.*":
+ description: Hitex Development Tools
+ "^holt,.*":
+ description: Holt Integrated Circuits, Inc.
+ "^honeywell,.*":
+ description: Honeywell
+ "^hp,.*":
+ description: Hewlett Packard
+ "^holtek,.*":
+ description: Holtek Semiconductor, Inc.
+ "^hwacom,.*":
+ description: HwaCom Systems Inc.
+ "^i2se,.*":
+ description: I2SE GmbH
+ "^ibm,.*":
+ description: International Business Machines (IBM)
+ "^icplus,.*":
+ description: IC Plus Corp.
+ "^idt,.*":
+ description: Integrated Device Technologies, Inc.
+ "^ifi,.*":
+ description: Ingenieurburo Fur Ic-Technologie (I/F/I)
+ "^ilitek,.*":
+ description: ILI Technology Corporation (ILITEK)
+ "^img,.*":
+ description: Imagination Technologies Ltd.
+ "^infineon,.*":
+ description: Infineon Technologies
+ "^inforce,.*":
+ description: Inforce Computing
+ "^ingenic,.*":
+ description: Ingenic Semiconductor
+ "^innolux,.*":
+ description: Innolux Corporation
+ "^inside-secure,.*":
+ description: INSIDE Secure
+ "^intel,.*":
+ description: Intel Corporation
+ "^intercontrol,.*":
+ description: Inter Control Group
+ "^invensense,.*":
+ description: InvenSense Inc.
+ "^inversepath,.*":
+ description: Inverse Path
+ "^iom,.*":
+ description: Iomega Corporation
+ "^isee,.*":
+ description: ISEE 2007 S.L.
+ "^isil,.*":
+ description: Intersil
+ "^issi,.*":
+ description: Integrated Silicon Solutions Inc.
+ "^itead,.*":
+ description: ITEAD Intelligent Systems Co.Ltd
+ "^iwave,.*":
+ description: iWave Systems Technologies Pvt. Ltd.
+ "^jdi,.*":
+ description: Japan Display Inc.
+ "^jedec,.*":
+ description: JEDEC Solid State Technology Association
+ "^jianda,.*":
+ description: Jiandangjing Technology Co., Ltd.
+ "^karo,.*":
+ description: Ka-Ro electronics GmbH
+ "^keithkoep,.*":
+ description: Keith & Koep GmbH
+ "^keymile,.*":
+ description: Keymile GmbH
+ "^khadas,.*":
+ description: Khadas
+ "^kiebackpeter,.*":
+ description: Kieback & Peter GmbH
+ "^kinetic,.*":
+ description: Kinetic Technologies
+ "^kingdisplay,.*":
+ description: King & Display Technology Co., Ltd.
+ "^kingnovel,.*":
+ description: Kingnovel Technology Co., Ltd.
+ "^kionix,.*":
+ description: Kionix, Inc.
+ "^kobo,.*":
+ description: Rakuten Kobo Inc.
+ "^koe,.*":
+ description: Kaohsiung Opto-Electronics Inc.
+ "^kosagi,.*":
+ description: Sutajio Ko-Usagi PTE Ltd.
+ "^kyo,.*":
+ description: Kyocera Corporation
+ "^lacie,.*":
+ description: LaCie
+ "^laird,.*":
+ description: Laird PLC
+ "^lantiq,.*":
+ description: Lantiq Semiconductor
+ "^lattice,.*":
+ description: Lattice Semiconductor
+ "^lego,.*":
+ description: LEGO Systems A/S
+ "^lemaker,.*":
+ description: Shenzhen LeMaker Technology Co., Ltd.
+ "^lenovo,.*":
+ description: Lenovo Group Ltd.
+ "^lg,.*":
+ description: LG Corporation
+ "^libretech,.*":
+ description: Shenzhen Libre Technology Co., Ltd
+ "^licheepi,.*":
+ description: Lichee Pi
+ "^linaro,.*":
+ description: Linaro Limited
+ "^linksys,.*":
+ description: Belkin International, Inc. (Linksys)
+ "^linux,.*":
+ description: Linux-specific binding
+ "^linx,.*":
+ description: Linx Technologies
+ "^lltc,.*":
+ description: Linear Technology Corporation
+ "^logicpd,.*":
+ description: Logic PD, Inc.
+ "^lsi,.*":
+ description: LSI Corp. (LSI Logic)
+ "^lwn,.*":
+ description: Liebherr-Werk Nenzing GmbH
+ "^macnica,.*":
+ description: Macnica Americas
+ "^marvell,.*":
+ description: Marvell Technology Group Ltd.
+ "^maxbotix,.*":
+ description: MaxBotix Inc.
+ "^maxim,.*":
+ description: Maxim Integrated Products
+ "^mbvl,.*":
+ description: Mobiveil Inc.
+ "^mcube,.*":
+ description: mCube
+ "^meas,.*":
+ description: Measurement Specialties
+ "^mediatek,.*":
+ description: MediaTek Inc.
+ "^megachips,.*":
+ description: MegaChips
+ "^mele,.*":
+ description: Shenzhen MeLE Digital Technology Ltd.
+ "^melexis,.*":
+ description: Melexis N.V.
+ "^melfas,.*":
+ description: MELFAS Inc.
+ "^mellanox,.*":
+ description: Mellanox Technologies
+ "^memsic,.*":
+ description: MEMSIC Inc.
+ "^menlo,.*":
+ description: Menlo Systems GmbH
+ "^merrii,.*":
+ description: Merrii Technology Co., Ltd.
+ "^micrel,.*":
+ description: Micrel Inc.
+ "^microchip,.*":
+ description: Microchip Technology Inc.
+ "^microcrystal,.*":
+ description: Micro Crystal AG
+ "^micron,.*":
+ description: Micron Technology Inc.
+ "^mikroe,.*":
+ description: MikroElektronika d.o.o.
+ "^minix,.*":
+ description: MINIX Technology Ltd.
+ "^miramems,.*":
+ description: MiraMEMS Sensing Technology Co., Ltd.
+ "^mitsubishi,.*":
+ description: Mitsubishi Electric Corporation
+ "^mosaixtech,.*":
+ description: Mosaix Technologies, Inc.
+ "^motorola,.*":
+ description: Motorola, Inc.
+ "^moxa,.*":
+ description: Moxa Inc.
+ "^mpl,.*":
+ description: MPL AG
+ "^mqmaker,.*":
+ description: mqmaker Inc.
+ "^mscc,.*":
+ description: Microsemi Corporation
+ "^msi,.*":
+ description: Micro-Star International Co. Ltd.
+ "^mti,.*":
+ description: Imagination Technologies Ltd. (formerly MIPS Technologies Inc.)
+ "^multi-inno,.*":
+ description: Multi-Inno Technology Co.,Ltd
+ "^mundoreader,.*":
+ description: Mundo Reader S.L.
+ "^murata,.*":
+ description: Murata Manufacturing Co., Ltd.
+ "^mxicy,.*":
+ description: Macronix International Co., Ltd.
+ "^myir,.*":
+ description: MYIR Tech Limited
+ "^national,.*":
+ description: National Semiconductor
+ "^nec,.*":
+ description: NEC LCD Technologies, Ltd.
+ "^neonode,.*":
+ description: Neonode Inc.
+ "^netgear,.*":
+ description: NETGEAR
+ "^netlogic,.*":
+ description: Broadcom Corporation (formerly NetLogic Microsystems)
+ "^netron-dy,.*":
+ description: Netron DY
+ "^netxeon,.*":
+ description: Shenzhen Netxeon Technology CO., LTD
+ "^nexbox,.*":
+ description: Nexbox
+ "^nextthing,.*":
+ description: Next Thing Co.
+ "^newhaven,.*":
+ description: Newhaven Display International
+ "^ni,.*":
+ description: National Instruments
+ "^nintendo,.*":
+ description: Nintendo
+ "^nlt,.*":
+ description: NLT Technologies, Ltd.
+ "^nokia,.*":
+ description: Nokia
+ "^nordic,.*":
+ description: Nordic Semiconductor
+ "^novtech,.*":
+ description: NovTech, Inc.
+ "^nutsboard,.*":
+ description: NutsBoard
+ "^nuvoton,.*":
+ description: Nuvoton Technology Corporation
+ "^nvd,.*":
+ description: New Vision Display
+ "^nvidia,.*":
+ description: NVIDIA
+ "^nxp,.*":
+ description: NXP Semiconductors
+ "^oceanic,.*":
+ description: Oceanic Systems (UK) Ltd.
+ "^okaya,.*":
+ description: Okaya Electric America, Inc.
+ "^oki,.*":
+ description: Oki Electric Industry Co., Ltd.
+ "^olimex,.*":
+ description: OLIMEX Ltd.
+ "^olpc,.*":
+ description: One Laptop Per Child
+ "^onion,.*":
+ description: Onion Corporation
+ "^onnn,.*":
+ description: ON Semiconductor Corp.
+ "^ontat,.*":
+ description: On Tat Industrial Company
+ "^opalkelly,.*":
+ description: Opal Kelly Incorporated
+ "^opencores,.*":
+ description: OpenCores.org
+ "^openrisc,.*":
+ description: OpenRISC.io
+ "^option,.*":
+ description: Option NV
+ "^oranth,.*":
+ description: Shenzhen Oranth Technology Co., Ltd.
+ "^ORCL,.*":
+ description: Oracle Corporation
+ "^orisetech,.*":
+ description: Orise Technology
+ "^ortustech,.*":
+ description: Ortus Technology Co., Ltd.
+ "^osddisplays,.*":
+ description: OSD Displays
+ "^ovti,.*":
+ description: OmniVision Technologies
+ "^oxsemi,.*":
+ description: Oxford Semiconductor, Ltd.
+ "^panasonic,.*":
+ description: Panasonic Corporation
+ "^parade,.*":
+ description: Parade Technologies Inc.
+ "^pda,.*":
+ description: Precision Design Associates, Inc.
+ "^pericom,.*":
+ description: Pericom Technology Inc.
+ "^pervasive,.*":
+ description: Pervasive Displays, Inc.
+ "^phicomm,.*":
+ description: PHICOMM Co., Ltd.
+ "^phytec,.*":
+ description: PHYTEC Messtechnik GmbH
+ "^picochip,.*":
+ description: Picochip Ltd
+ "^pine64,.*":
+ description: Pine64
+ "^pixcir,.*":
+ description: PIXCIR MICROELECTRONICS Co., Ltd
+ "^plantower,.*":
+ description: Plantower Co., Ltd
+ "^plathome,.*":
+ description: Plat'Home Co., Ltd.
+ "^plda,.*":
+ description: PLDA
+ "^plx,.*":
+ description: Broadcom Corporation (formerly PLX Technology)
+ "^pni,.*":
+ description: PNI Sensor Corporation
+ "^portwell,.*":
+ description: Portwell Inc.
+ "^poslab,.*":
+ description: Poslab Technology Co., Ltd.
+ "^powervr,.*":
+ description: PowerVR (deprecated, use img)
+ "^probox2,.*":
+ description: PROBOX2 (by W2COMP Co., Ltd.)
+ "^pulsedlight,.*":
+ description: PulsedLight, Inc
+ "^qca,.*":
+ description: Qualcomm Atheros, Inc.
+ "^qcom,.*":
+ description: Qualcomm Technologies, Inc
+ "^qemu,.*":
+ description: QEMU, a generic and open source machine emulator and virtualizer
+ "^qi,.*":
+ description: Qi Hardware
+ "^qiaodian,.*":
+ description: QiaoDian XianShi Corporation
+ "^qnap,.*":
+ description: QNAP Systems, Inc.
+ "^radxa,.*":
+ description: Radxa
+ "^raidsonic,.*":
+ description: RaidSonic Technology GmbH
+ "^ralink,.*":
+ description: Mediatek/Ralink Technology Corp.
+ "^ramtron,.*":
+ description: Ramtron International
+ "^raspberrypi,.*":
+ description: Raspberry Pi Foundation
+ "^raydium,.*":
+ description: Raydium Semiconductor Corp.
+ "^rda,.*":
+ description: Unisoc Communications, Inc.
+ "^realtek,.*":
+ description: Realtek Semiconductor Corp.
+ "^renesas,.*":
+ description: Renesas Electronics Corporation
+ "^richtek,.*":
+ description: Richtek Technology Corporation
+ "^ricoh,.*":
+ description: Ricoh Co. Ltd.
+ "^rikomagic,.*":
+ description: Rikomagic Tech Corp. Ltd
+ "^riscv,.*":
+ description: RISC-V Foundation
+ "^rockchip,.*":
+ description: Fuzhou Rockchip Electronics Co., Ltd
+ "^rocktech,.*":
+ description: ROCKTECH DISPLAYS LIMITED
+ "^rohm,.*":
+ description: ROHM Semiconductor Co., Ltd
+ "^ronbo,.*":
+ description: Ronbo Electronics
+ "^roofull,.*":
+ description: Shenzhen Roofull Technology Co, Ltd
+ "^samsung,.*":
+ description: Samsung Semiconductor
+ "^samtec,.*":
+ description: Samtec/Softing company
+ "^sancloud,.*":
+ description: Sancloud Ltd
+ "^sandisk,.*":
+ description: Sandisk Corporation
+ "^sbs,.*":
+ description: Smart Battery System
+ "^schindler,.*":
+ description: Schindler
+ "^seagate,.*":
+ description: Seagate Technology PLC
+ "^seirobotics,.*":
+ description: Shenzhen SEI Robotics Co., Ltd
+ "^semtech,.*":
+ description: Semtech Corporation
+ "^sensirion,.*":
+ description: Sensirion AG
+ "^sff,.*":
+ description: Small Form Factor Committee
+ "^sgd,.*":
+ description: Solomon Goldentek Display Corporation
+ "^sgx,.*":
+ description: SGX Sensortech
+ "^sharp,.*":
+ description: Sharp Corporation
+ "^shimafuji,.*":
+ description: Shimafuji Electric, Inc.
+ "^si-en,.*":
+ description: Si-En Technology Ltd.
+ "^si-linux,.*":
+ description: Silicon Linux Corporation
+ "^sifive,.*":
+ description: SiFive, Inc.
+ "^sigma,.*":
+ description: Sigma Designs, Inc.
+ "^sii,.*":
+ description: Seiko Instruments, Inc.
+ "^sil,.*":
+ description: Silicon Image
+ "^silabs,.*":
+ description: Silicon Laboratories
+ "^silead,.*":
+ description: Silead Inc.
+ "^silergy,.*":
+ description: Silergy Corp.
+ "^siliconmitus,.*":
+ description: Silicon Mitus, Inc.
+ "^simte,.*":
+ description: k
+ "^sirf,.*":
+ description: SiRF Technology, Inc.
+ "^sis,.*":
+ description: Silicon Integrated Systems Corp.
+ "^sitronix,.*":
+ description: Sitronix Technology Corporation
+ "^skyworks,.*":
+ description: Skyworks Solutions, Inc.
+ "^smsc,.*":
+ description: Standard Microsystems Corporation
+ "^snps,.*":
+ description: Synopsys, Inc.
+ "^socionext,.*":
+ description: Socionext Inc.
+ "^solidrun,.*":
+ description: SolidRun
+ "^solomon,.*":
+ description: Solomon Systech Limited
+ "^sony,.*":
+ description: Sony Corporation
+ "^spansion,.*":
+ description: Spansion Inc.
+ "^sprd,.*":
+ description: Spreadtrum Communications Inc.
+ "^sst,.*":
+ description: Silicon Storage Technology, Inc.
+ "^st,.*":
+ description: STMicroelectronics
+ "^starry,.*":
+ description: Starry Electronic Technology (ShenZhen) Co., LTD
+ "^startek,.*":
+ description: Startek
+ "^ste,.*":
+ description: ST-Ericsson
+ "^stericsson,.*":
+ description: ST-Ericsson
+ "^summit,.*":
+ description: Summit microelectronics
+ "^sunchip,.*":
+ description: Shenzhen Sunchip Technology Co., Ltd
+ "^SUNW,.*":
+ description: Sun Microsystems, Inc
+ "^swir,.*":
+ description: Sierra Wireless
+ "^syna,.*":
+ description: Synaptics Inc.
+ "^synology,.*":
+ description: Synology, Inc.
+ "^tbs,.*":
+ description: TBS Technologies
+ "^tbs-biometrics,.*":
+ description: Touchless Biometric Systems AG
+ "^tcg,.*":
+ description: Trusted Computing Group
+ "^tcl,.*":
+ description: Toby Churchill Ltd.
+ "^technexion,.*":
+ description: TechNexion
+ "^technologic,.*":
+ description: Technologic Systems
+ "^tempo,.*":
+ description: Tempo Semiconductor
+ "^techstar,.*":
+ description: Shenzhen Techstar Electronics Co., Ltd.
+ "^terasic,.*":
+ description: Terasic Inc.
+ "^thine,.*":
+ description: THine Electronics, Inc.
+ "^ti,.*":
+ description: Texas Instruments
+ "^tianma,.*":
+ description: Tianma Micro-electronics Co., Ltd.
+ "^tlm,.*":
+ description: Trusted Logic Mobility
+ "^tmt,.*":
+ description: Tecon Microprocessor Technologies, LLC.
+ "^topeet,.*":
+ description: Topeet
+ "^toradex,.*":
+ description: Toradex AG
+ "^toshiba,.*":
+ description: Toshiba Corporation
+ "^toumaz,.*":
+ description: Toumaz
+ "^tpk,.*":
+ description: TPK U.S.A. LLC
+ "^tplink,.*":
+ description: TP-LINK Technologies Co., Ltd.
+ "^tpo,.*":
+ description: TPO
+ "^tq,.*":
+ description: TQ Systems GmbH
+ "^tronfy,.*":
+ description: Tronfy
+ "^tronsmart,.*":
+ description: Tronsmart
+ "^truly,.*":
+ description: Truly Semiconductors Limited
+ "^tsd,.*":
+ description: Theobroma Systems Design und Consulting GmbH
+ "^tyan,.*":
+ description: Tyan Computer Corporation
+ "^u-blox,.*":
+ description: u-blox
+ "^ucrobotics,.*":
+ description: uCRobotics
+ "^ubnt,.*":
+ description: Ubiquiti Networks
+ "^udoo,.*":
+ description: Udoo
+ "^uniwest,.*":
+ description: United Western Technologies Corp (UniWest)
+ "^upisemi,.*":
+ description: uPI Semiconductor Corp.
+ "^urt,.*":
+ description: United Radiant Technology Corporation
+ "^usi,.*":
+ description: Universal Scientific Industrial Co., Ltd.
+ "^v3,.*":
+ description: V3 Semiconductor
+ "^vamrs,.*":
+ description: Vamrs Ltd.
+ "^variscite,.*":
+ description: Variscite Ltd.
+ "^via,.*":
+ description: VIA Technologies, Inc.
+ "^virtio,.*":
+ description: Virtual I/O Device Specification, developed by the OASIS consortium
+ "^vishay,.*":
+ description: Vishay Intertechnology, Inc
+ "^vitesse,.*":
+ description: Vitesse Semiconductor Corporation
+ "^vivante,.*":
+ description: Vivante Corporation
+ "^vocore,.*":
+ description: VoCore Studio
+ "^voipac,.*":
+ description: Voipac Technologies s.r.o.
+ "^vot,.*":
+ description: Vision Optical Technology Co., Ltd.
+ "^wd,.*":
+ description: Western Digital Corp.
+ "^wetek,.*":
+ description: WeTek Electronics, limited.
+ "^wexler,.*":
+ description: Wexler
+ "^whwave,.*":
+ description: Shenzhen whwave Electronics, Inc.
+ "^wi2wi,.*":
+ description: Wi2Wi, Inc.
+ "^winbond,.*":
+ description: Winbond Electronics corp.
+ "^winstar,.*":
+ description: Winstar Display Corp.
+ "^wlf,.*":
+ description: Wolfson Microelectronics
+ "^wm,.*":
+ description: Wondermedia Technologies, Inc.
+ "^x-powers,.*":
+ description: X-Powers
+ "^xes,.*":
+ description: Extreme Engineering Solutions (X-ES)
+ "^xillybus,.*":
+ description: Xillybus Ltd.
+ "^xlnx,.*":
+ description: Xilinx
+ "^xunlong,.*":
+ description: Shenzhen Xunlong Software CO.,Limited
+ "^ysoft,.*":
+ description: Y Soft Corporation a.s.
+ "^zarlink,.*":
+ description: Zarlink Semiconductor
+ "^zeitec,.*":
+ description: ZEITEC Semiconductor Co., LTD.
+ "^zidoo,.*":
+ description: Shenzhen Zidoo Technology Co., Ltd.
+ "^zii,.*":
+ description: Zodiac Inflight Innovations
+ "^zte,.*":
+ description: ZTE Corp.
+ "^zyxel,.*":
+ description: ZyXEL Communications Corp.
+
+ # Normal property name match without a comma
+ # These should catch all node/property names without a prefix
+ "^[a-zA-Z0-9#][a-zA-Z0-9+\\-._@]{0,63}$": true
+ "^[a-zA-Z0-9+\\-._]*@[0-9a-zA-Z,]*$": true
+ "^#.*": true
+
+additionalProperties: false
+
+...
diff --git a/Documentation/devicetree/bindings/watchdog/fsl-imx-sc-wdt.txt b/Documentation/devicetree/bindings/watchdog/fsl-imx-sc-wdt.txt
new file mode 100644
index 000000000000..02b87e92ae68
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/fsl-imx-sc-wdt.txt
@@ -0,0 +1,24 @@
+* Freescale i.MX System Controller Watchdog
+
+i.MX system controller watchdog is for i.MX SoCs with system controller inside,
+the watchdog is managed by system controller, users can ONLY communicate with
+system controller from secure mode for watchdog operations, so Linux i.MX system
+controller watchdog driver will call ARM SMC API and trap into ARM-Trusted-Firmware
+for watchdog operations, ARM-Trusted-Firmware is running at secure EL3 mode and
+it will request system controller to execute the watchdog operation passed from
+Linux kernel.
+
+Required properties:
+- compatible: Should be :
+ "fsl,imx8qxp-sc-wdt"
+ followed by "fsl,imx-sc-wdt";
+
+Optional properties:
+- timeout-sec : Contains the watchdog timeout in seconds.
+
+Examples:
+
+watchdog {
+ compatible = "fsl,imx8qxp-sc-wdt", "fsl,imx-sc-wdt";
+ timeout-sec = <60>;
+};
diff --git a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
index 8682d6a93e5b..fd380eb28df5 100644
--- a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt
@@ -9,6 +9,7 @@ Required properties:
"mediatek,mt7622-wdt", "mediatek,mt6589-wdt": for MT7622
"mediatek,mt7623-wdt", "mediatek,mt6589-wdt": for MT7623
"mediatek,mt7629-wdt", "mediatek,mt6589-wdt": for MT7629
+ "mediatek,mt8516-wdt", "mediatek,mt6589-wdt": for MT8516
- reg : Specifies base physical address and size of the registers.
diff --git a/Documentation/devicetree/bindings/writing-bindings.txt b/Documentation/devicetree/bindings/writing-bindings.txt
new file mode 100644
index 000000000000..27dfd2d8016e
--- /dev/null
+++ b/Documentation/devicetree/bindings/writing-bindings.txt
@@ -0,0 +1,60 @@
+DOs and DON'Ts for designing and writing Devicetree bindings
+
+This is a list of common review feedback items focused on binding design. With
+every rule, there are exceptions and bindings have many gray areas.
+
+For guidelines related to patches, see
+Documentation/devicetree/bindings/submitting-patches.txt
+
+
+Overall design
+
+- DO attempt to make bindings complete even if a driver doesn't support some
+ features. For example, if a device has an interrupt, then include the
+ 'interrupts' property even if the driver is only polled mode.
+
+- DON'T refer to Linux or "device driver" in bindings. Bindings should be
+ based on what the hardware has, not what an OS and driver currently support.
+
+- DO use node names matching the class of the device. Many standard names are
+ defined in the DT Spec. If there isn't one, consider adding it.
+
+- DO check that the example matches the documentation especially after making
+ review changes.
+
+- DON'T create nodes just for the sake of instantiating drivers. Multi-function
+ devices only need child nodes when the child nodes have their own DT
+ resources. A single node can be multiple providers (e.g. clocks and resets).
+
+- DON'T use 'syscon' alone without a specific compatible string. A 'syscon'
+ hardware block should have a compatible string unique enough to infer the
+ register layout of the entire block (at a minimum).
+
+
+Properties
+
+- DO make 'compatible' properties specific. DON'T use wildcards in compatible
+ strings. DO use fallback compatibles when devices are the same as or a subset
+ of prior implementations. DO add new compatibles in case there are new
+ features or bugs.
+
+- DO use a vendor prefix on device specific property names. Consider if
+ properties could be common among devices of the same class. Check other
+ existing bindings for similar devices.
+
+- DON'T redefine common properties. Just reference the definition and define
+ constraints specific to the device.
+
+- DO use common property unit suffixes for properties with scientific units.
+ See property-units.txt.
+
+- DO define properties in terms of constraints. How many entries? What are
+ possible values? What is the order?
+
+
+Board/SoC .dts Files
+
+- DO put all MMIO devices under a bus node and not at the top-level.
+
+- DO use non-empty 'ranges' to limit the size of child buses/devices. 64-bit
+ platforms don't need all devices to have 64-bit address and size.
diff --git a/Documentation/devicetree/writing-schema.md b/Documentation/devicetree/writing-schema.md
index a3652d33a48f..dc032db36262 100644
--- a/Documentation/devicetree/writing-schema.md
+++ b/Documentation/devicetree/writing-schema.md
@@ -97,7 +97,7 @@ The DT schema project must be installed in order to validate the DT schema
binding documents and validate DTS files using the DT schema. The DT schema
project can be installed with pip:
-`pip3 install git+https://github.com/robherring/yaml-bindings.git@master`
+`pip3 install git+https://github.com/devicetree-org/dt-schema.git@master`
dtc must also be built with YAML output support enabled. This requires that
libyaml and its headers be installed on the host system.
diff --git a/Documentation/doc-guide/index.rst b/Documentation/doc-guide/index.rst
index a7f95d7d3a63..603f3ff55d5a 100644
--- a/Documentation/doc-guide/index.rst
+++ b/Documentation/doc-guide/index.rst
@@ -7,9 +7,9 @@ How to write kernel documentation
.. toctree::
:maxdepth: 1
- sphinx.rst
- kernel-doc.rst
- parse-headers.rst
+ sphinx
+ kernel-doc
+ parse-headers
.. only:: subproject and html
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index ef25a066d952..5eba889ea84d 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -127,7 +127,7 @@ flask.h
fore200e_mkfirm
fore200e_pca_fw.c*
gconf
-gconf.glade.h
+gconf-cfg
gen-devlist
gen_crc32table
gen_init_cpio
@@ -148,24 +148,22 @@ int32.c
int4.c
int8.c
kallsyms
-kconfig
keywords.c
ksym.c*
ksym.h*
-kxgettext
*lex.c
*lex.*.c
linux
logo_*.c
logo_*_clut224.c
logo_*_mono.c
-lxdialog
mach-types
mach-types.h
machtypes.h
map
map_hugetlb
mconf
+mconf-cfg
miboot*
mk_elfconfig
mkboot
@@ -176,11 +174,14 @@ mkprep
mkregtable
mktables
mktree
+mkutf8data
modpost
modules.builtin
+modules.builtin.modinfo
modules.order
modversions.h*
nconf
+nconf-cfg
ncscope.*
offset.h
oui.c*
@@ -200,6 +201,7 @@ pnmtologo
ppc_defs.h*
pss_boot.h
qconf
+qconf-cfg
r100_reg_safe.h
r200_reg_safe.h
r300_reg_safe.h
@@ -254,6 +256,7 @@ vsyscall_32.lds
wanxlfw.inc
uImage
unifdef
+utf8data.h
wakeup.bin
wakeup.elf
wakeup.lds
diff --git a/Documentation/driver-api/acpi/index.rst b/Documentation/driver-api/acpi/index.rst
new file mode 100644
index 000000000000..ace0008e54c2
--- /dev/null
+++ b/Documentation/driver-api/acpi/index.rst
@@ -0,0 +1,9 @@
+============
+ACPI Support
+============
+
+.. toctree::
+ :maxdepth: 2
+
+ linuxized-acpica
+ scan_handlers
diff --git a/Documentation/acpi/linuxized-acpica.txt b/Documentation/driver-api/acpi/linuxized-acpica.rst
index 3ad7b0dfb083..0ca8f1538519 100644
--- a/Documentation/acpi/linuxized-acpica.txt
+++ b/Documentation/driver-api/acpi/linuxized-acpica.rst
@@ -1,31 +1,37 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+============================================================
Linuxized ACPICA - Introduction to ACPICA Release Automation
+============================================================
-Copyright (C) 2013-2016, Intel Corporation
-Author: Lv Zheng <lv.zheng@intel.com>
+:Copyright: |copy| 2013-2016, Intel Corporation
+:Author: Lv Zheng <lv.zheng@intel.com>
-Abstract:
+Abstract
+========
This document describes the ACPICA project and the relationship between
ACPICA and Linux. It also describes how ACPICA code in drivers/acpi/acpica,
include/acpi and tools/power/acpi is automatically updated to follow the
upstream.
+ACPICA Project
+==============
-1. ACPICA Project
-
- The ACPI Component Architecture (ACPICA) project provides an operating
- system (OS)-independent reference implementation of the Advanced
- Configuration and Power Interface Specification (ACPI). It has been
- adapted by various host OSes. By directly integrating ACPICA, Linux can
- also benefit from the application experiences of ACPICA from other host
- OSes.
+The ACPI Component Architecture (ACPICA) project provides an operating
+system (OS)-independent reference implementation of the Advanced
+Configuration and Power Interface Specification (ACPI). It has been
+adapted by various host OSes. By directly integrating ACPICA, Linux can
+also benefit from the application experiences of ACPICA from other host
+OSes.
- The homepage of ACPICA project is: www.acpica.org, it is maintained and
- supported by Intel Corporation.
+The homepage of ACPICA project is: www.acpica.org, it is maintained and
+supported by Intel Corporation.
- The following figure depicts the Linux ACPI subsystem where the ACPICA
- adaptation is included:
+The following figure depicts the Linux ACPI subsystem where the ACPICA
+adaptation is included::
+---------------------------------------------------------+
| |
@@ -71,21 +77,27 @@ upstream.
Figure 1. Linux ACPI Software Components
- NOTE:
+.. note::
A. OS Service Layer - Provided by Linux to offer OS dependent
implementation of the predefined ACPICA interfaces (acpi_os_*).
+ ::
+
include/acpi/acpiosxf.h
drivers/acpi/osl.c
include/acpi/platform
include/asm/acenv.h
B. ACPICA Functionality - Released from ACPICA code base to offer
OS independent implementation of the ACPICA interfaces (acpi_*).
+ ::
+
drivers/acpi/acpica
include/acpi/ac*.h
tools/power/acpi
C. Linux/ACPI Functionality - Providing Linux specific ACPI
functionality to the other Linux kernel subsystems and user space
programs.
+ ::
+
drivers/acpi
include/linux/acpi.h
include/linux/acpi*.h
@@ -95,24 +107,27 @@ upstream.
ACPI subsystem to offer architecture specific implementation of the
ACPI interfaces. They are Linux specific components and are out of
the scope of this document.
+ ::
+
include/asm/acpi.h
include/asm/acpi*.h
arch/*/acpi
-2. ACPICA Release
+ACPICA Release
+==============
- The ACPICA project maintains its code base at the following repository URL:
- https://github.com/acpica/acpica.git. As a rule, a release is made every
- month.
+The ACPICA project maintains its code base at the following repository URL:
+https://github.com/acpica/acpica.git. As a rule, a release is made every
+month.
- As the coding style adopted by the ACPICA project is not acceptable by
- Linux, there is a release process to convert the ACPICA git commits into
- Linux patches. The patches generated by this process are referred to as
- "linuxized ACPICA patches". The release process is carried out on a local
- copy the ACPICA git repository. Each commit in the monthly release is
- converted into a linuxized ACPICA patch. Together, they form the monthly
- ACPICA release patchset for the Linux ACPI community. This process is
- illustrated in the following figure:
+As the coding style adopted by the ACPICA project is not acceptable by
+Linux, there is a release process to convert the ACPICA git commits into
+Linux patches. The patches generated by this process are referred to as
+"linuxized ACPICA patches". The release process is carried out on a local
+copy the ACPICA git repository. Each commit in the monthly release is
+converted into a linuxized ACPICA patch. Together, they form the monthly
+ACPICA release patchset for the Linux ACPI community. This process is
+illustrated in the following figure::
+-----------------------------+
| acpica / master (-) commits |
@@ -153,7 +168,7 @@ upstream.
Figure 2. ACPICA -> Linux Upstream Process
- NOTE:
+.. note::
A. Linuxize Utilities - Provided by the ACPICA repository, including a
utility located in source/tools/acpisrc folder and a number of
scripts located in generate/linux folder.
@@ -170,19 +185,20 @@ upstream.
following kernel configuration options:
CONFIG_ACPI/CONFIG_ACPI_DEBUG/CONFIG_ACPI_DEBUGGER
-3. ACPICA Divergences
+ACPICA Divergences
+==================
- Ideally, all of the ACPICA commits should be converted into Linux patches
- automatically without manual modifications, the "linux / master" tree should
- contain the ACPICA code that exactly corresponds to the ACPICA code
- contained in "new linuxized acpica" tree and it should be possible to run
- the release process fully automatically.
+Ideally, all of the ACPICA commits should be converted into Linux patches
+automatically without manual modifications, the "linux / master" tree should
+contain the ACPICA code that exactly corresponds to the ACPICA code
+contained in "new linuxized acpica" tree and it should be possible to run
+the release process fully automatically.
- As a matter of fact, however, there are source code differences between
- the ACPICA code in Linux and the upstream ACPICA code, referred to as
- "ACPICA Divergences".
+As a matter of fact, however, there are source code differences between
+the ACPICA code in Linux and the upstream ACPICA code, referred to as
+"ACPICA Divergences".
- The various sources of ACPICA divergences include:
+The various sources of ACPICA divergences include:
1. Legacy divergences - Before the current ACPICA release process was
established, there already had been divergences between Linux and
ACPICA. Over the past several years those divergences have been greatly
@@ -213,11 +229,12 @@ upstream.
rebased on the ACPICA side in order to offer better solutions, new ACPICA
divergences are generated.
-4. ACPICA Development
+ACPICA Development
+==================
- This paragraph guides Linux developers to use the ACPICA upstream release
- utilities to obtain Linux patches corresponding to upstream ACPICA commits
- before they become available from the ACPICA release process.
+This paragraph guides Linux developers to use the ACPICA upstream release
+utilities to obtain Linux patches corresponding to upstream ACPICA commits
+before they become available from the ACPICA release process.
1. Cherry-pick an ACPICA commit
@@ -225,7 +242,7 @@ upstream.
you want to cherry pick must be committed into the local repository.
Then the gen-patch.sh command can help to cherry-pick an ACPICA commit
- from the ACPICA local repository:
+ from the ACPICA local repository::
$ git clone https://github.com/acpica/acpica
$ cd acpica
@@ -240,7 +257,7 @@ upstream.
changes that haven't been applied to Linux yet.
You can generate the ACPICA release series yourself and rebase your code on
- top of the generated ACPICA release patches:
+ top of the generated ACPICA release patches::
$ git clone https://github.com/acpica/acpica
$ cd acpica
@@ -254,7 +271,7 @@ upstream.
3. Inspect the current divergences
If you have local copies of both Linux and upstream ACPICA, you can generate
- a diff file indicating the state of the current divergences:
+ a diff file indicating the state of the current divergences::
# git clone https://github.com/acpica/acpica
# git clone http://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/Documentation/acpi/scan_handlers.txt b/Documentation/driver-api/acpi/scan_handlers.rst
index 3246ccf15992..7a197b3a33fc 100644
--- a/Documentation/acpi/scan_handlers.txt
+++ b/Documentation/driver-api/acpi/scan_handlers.rst
@@ -1,7 +1,13 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+==================
ACPI Scan Handlers
+==================
+
+:Copyright: |copy| 2012, Intel Corporation
-Copyright (C) 2012, Intel Corporation
-Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
During system initialization and ACPI-based device hot-add, the ACPI namespace
is scanned in search of device objects that generally represent various pieces
@@ -30,14 +36,14 @@ to configure that link so that the kernel can use it.
Those additional configuration tasks usually depend on the type of the hardware
component represented by the given device node which can be determined on the
basis of the device node's hardware ID (HID). They are performed by objects
-called ACPI scan handlers represented by the following structure:
+called ACPI scan handlers represented by the following structure::
-struct acpi_scan_handler {
- const struct acpi_device_id *ids;
- struct list_head list_node;
- int (*attach)(struct acpi_device *dev, const struct acpi_device_id *id);
- void (*detach)(struct acpi_device *dev);
-};
+ struct acpi_scan_handler {
+ const struct acpi_device_id *ids;
+ struct list_head list_node;
+ int (*attach)(struct acpi_device *dev, const struct acpi_device_id *id);
+ void (*detach)(struct acpi_device *dev);
+ };
where ids is the list of IDs of device nodes the given handler is supposed to
take care of, list_node is the hook to the global list of ACPI scan handlers
diff --git a/Documentation/driver-api/component.rst b/Documentation/driver-api/component.rst
index 2da4a8f20607..57e37590733f 100644
--- a/Documentation/driver-api/component.rst
+++ b/Documentation/driver-api/component.rst
@@ -1,3 +1,5 @@
+.. _component:
+
======================================
Component Helper for Aggregate Drivers
======================================
diff --git a/Documentation/driver-api/device-io.rst b/Documentation/driver-api/device-io.rst
index b00b23903078..0e389378f71d 100644
--- a/Documentation/driver-api/device-io.rst
+++ b/Documentation/driver-api/device-io.rst
@@ -103,51 +103,6 @@ continuing execution::
ha->flags.ints_enabled = 0;
}
-In addition to write posting, on some large multiprocessing systems
-(e.g. SGI Challenge, Origin and Altix machines) posted writes won't be
-strongly ordered coming from different CPUs. Thus it's important to
-properly protect parts of your driver that do memory-mapped writes with
-locks and use the :c:func:`mmiowb()` to make sure they arrive in the
-order intended. Issuing a regular readX() will also ensure write ordering,
-but should only be used when the
-driver has to be sure that the write has actually arrived at the device
-(not that it's simply ordered with respect to other writes), since a
-full readX() is a relatively expensive operation.
-
-Generally, one should use :c:func:`mmiowb()` prior to releasing a spinlock
-that protects regions using :c:func:`writeb()` or similar functions that
-aren't surrounded by readb() calls, which will ensure ordering
-and flushing. The following pseudocode illustrates what might occur if
-write ordering isn't guaranteed via :c:func:`mmiowb()` or one of the
-readX() functions::
-
- CPU A: spin_lock_irqsave(&dev_lock, flags)
- CPU A: ...
- CPU A: writel(newval, ring_ptr);
- CPU A: spin_unlock_irqrestore(&dev_lock, flags)
- ...
- CPU B: spin_lock_irqsave(&dev_lock, flags)
- CPU B: writel(newval2, ring_ptr);
- CPU B: ...
- CPU B: spin_unlock_irqrestore(&dev_lock, flags)
-
-In the case above, newval2 could be written to ring_ptr before newval.
-Fixing it is easy though::
-
- CPU A: spin_lock_irqsave(&dev_lock, flags)
- CPU A: ...
- CPU A: writel(newval, ring_ptr);
- CPU A: mmiowb(); /* ensure no other writes beat us to the device */
- CPU A: spin_unlock_irqrestore(&dev_lock, flags)
- ...
- CPU B: spin_lock_irqsave(&dev_lock, flags)
- CPU B: writel(newval2, ring_ptr);
- CPU B: ...
- CPU B: mmiowb();
- CPU B: spin_unlock_irqrestore(&dev_lock, flags)
-
-See tg3.c for a real world example of how to use :c:func:`mmiowb()`
-
PCI ordering rules also guarantee that PIO read responses arrive after any
outstanding DMA writes from that bus, since for some devices the result of
a readb() call may signal to the driver that a DMA transaction is
diff --git a/Documentation/driver-api/generic-counter.rst b/Documentation/driver-api/generic-counter.rst
new file mode 100644
index 000000000000..f51db893f595
--- /dev/null
+++ b/Documentation/driver-api/generic-counter.rst
@@ -0,0 +1,342 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=========================
+Generic Counter Interface
+=========================
+
+Introduction
+============
+
+Counter devices are prevalent within a diverse spectrum of industries.
+The ubiquitous presence of these devices necessitates a common interface
+and standard of interaction and exposure. This driver API attempts to
+resolve the issue of duplicate code found among existing counter device
+drivers by introducing a generic counter interface for consumption. The
+Generic Counter interface enables drivers to support and expose a common
+set of components and functionality present in counter devices.
+
+Theory
+======
+
+Counter devices can vary greatly in design, but regardless of whether
+some devices are quadrature encoder counters or tally counters, all
+counter devices consist of a core set of components. This core set of
+components, shared by all counter devices, is what forms the essence of
+the Generic Counter interface.
+
+There are three core components to a counter:
+
+* Count:
+ Count data for a set of Signals.
+
+* Signal:
+ Input data that is evaluated by the counter to determine the count
+ data.
+
+* Synapse:
+ The association of a Signal with a respective Count.
+
+COUNT
+-----
+A Count represents the count data for a set of Signals. The Generic
+Counter interface provides the following available count data types:
+
+* COUNT_POSITION:
+ Unsigned integer value representing position.
+
+A Count has a count function mode which represents the update behavior
+for the count data. The Generic Counter interface provides the following
+available count function modes:
+
+* Increase:
+ Accumulated count is incremented.
+
+* Decrease:
+ Accumulated count is decremented.
+
+* Pulse-Direction:
+ Rising edges on signal A updates the respective count. The input level
+ of signal B determines direction.
+
+* Quadrature:
+ A pair of quadrature encoding signals are evaluated to determine
+ position and direction. The following Quadrature modes are available:
+
+ - x1 A:
+ If direction is forward, rising edges on quadrature pair signal A
+ updates the respective count; if the direction is backward, falling
+ edges on quadrature pair signal A updates the respective count.
+ Quadrature encoding determines the direction.
+
+ - x1 B:
+ If direction is forward, rising edges on quadrature pair signal B
+ updates the respective count; if the direction is backward, falling
+ edges on quadrature pair signal B updates the respective count.
+ Quadrature encoding determines the direction.
+
+ - x2 A:
+ Any state transition on quadrature pair signal A updates the
+ respective count. Quadrature encoding determines the direction.
+
+ - x2 B:
+ Any state transition on quadrature pair signal B updates the
+ respective count. Quadrature encoding determines the direction.
+
+ - x4:
+ Any state transition on either quadrature pair signals updates the
+ respective count. Quadrature encoding determines the direction.
+
+A Count has a set of one or more associated Signals.
+
+SIGNAL
+------
+A Signal represents a counter input data; this is the input data that is
+evaluated by the counter to determine the count data; e.g. a quadrature
+signal output line of a rotary encoder. Not all counter devices provide
+user access to the Signal data.
+
+The Generic Counter interface provides the following available signal
+data types for when the Signal data is available for user access:
+
+* SIGNAL_LEVEL:
+ Signal line state level. The following states are possible:
+
+ - SIGNAL_LEVEL_LOW:
+ Signal line is in a low state.
+
+ - SIGNAL_LEVEL_HIGH:
+ Signal line is in a high state.
+
+A Signal may be associated with one or more Counts.
+
+SYNAPSE
+-------
+A Synapse represents the association of a Signal with a respective
+Count. Signal data affects respective Count data, and the Synapse
+represents this relationship.
+
+The Synapse action mode specifies the Signal data condition which
+triggers the respective Count's count function evaluation to update the
+count data. The Generic Counter interface provides the following
+available action modes:
+
+* None:
+ Signal does not trigger the count function. In Pulse-Direction count
+ function mode, this Signal is evaluated as Direction.
+
+* Rising Edge:
+ Low state transitions to high state.
+
+* Falling Edge:
+ High state transitions to low state.
+
+* Both Edges:
+ Any state transition.
+
+A counter is defined as a set of input signals associated with count
+data that are generated by the evaluation of the state of the associated
+input signals as defined by the respective count functions. Within the
+context of the Generic Counter interface, a counter consists of Counts
+each associated with a set of Signals, whose respective Synapse
+instances represent the count function update conditions for the
+associated Counts.
+
+Paradigm
+========
+
+The most basic counter device may be expressed as a single Count
+associated with a single Signal via a single Synapse. Take for example
+a counter device which simply accumulates a count of rising edges on a
+source input line::
+
+ Count Synapse Signal
+ ----- ------- ------
+ +---------------------+
+ | Data: Count | Rising Edge ________
+ | Function: Increase | <------------- / Source \
+ | | ____________
+ +---------------------+
+
+In this example, the Signal is a source input line with a pulsing
+voltage, while the Count is a persistent count value which is repeatedly
+incremented. The Signal is associated with the respective Count via a
+Synapse. The increase function is triggered by the Signal data condition
+specified by the Synapse -- in this case a rising edge condition on the
+voltage input line. In summary, the counter device existence and
+behavior is aptly represented by respective Count, Signal, and Synapse
+components: a rising edge condition triggers an increase function on an
+accumulating count datum.
+
+A counter device is not limited to a single Signal; in fact, in theory
+many Signals may be associated with even a single Count. For example, a
+quadrature encoder counter device can keep track of position based on
+the states of two input lines::
+
+ Count Synapse Signal
+ ----- ------- ------
+ +-------------------------+
+ | Data: Position | Both Edges ___
+ | Function: Quadrature x4 | <------------ / A \
+ | | _______
+ | |
+ | | Both Edges ___
+ | | <------------ / B \
+ | | _______
+ +-------------------------+
+
+In this example, two Signals (quadrature encoder lines A and B) are
+associated with a single Count: a rising or falling edge on either A or
+B triggers the "Quadrature x4" function which determines the direction
+of movement and updates the respective position data. The "Quadrature
+x4" function is likely implemented in the hardware of the quadrature
+encoder counter device; the Count, Signals, and Synapses simply
+represent this hardware behavior and functionality.
+
+Signals associated with the same Count can have differing Synapse action
+mode conditions. For example, a quadrature encoder counter device
+operating in a non-quadrature Pulse-Direction mode could have one input
+line dedicated for movement and a second input line dedicated for
+direction::
+
+ Count Synapse Signal
+ ----- ------- ------
+ +---------------------------+
+ | Data: Position | Rising Edge ___
+ | Function: Pulse-Direction | <------------- / A \ (Movement)
+ | | _______
+ | |
+ | | None ___
+ | | <------------- / B \ (Direction)
+ | | _______
+ +---------------------------+
+
+Only Signal A triggers the "Pulse-Direction" update function, but the
+instantaneous state of Signal B is still required in order to know the
+direction so that the position data may be properly updated. Ultimately,
+both Signals are associated with the same Count via two respective
+Synapses, but only one Synapse has an active action mode condition which
+triggers the respective count function while the other is left with a
+"None" condition action mode to indicate its respective Signal's
+availability for state evaluation despite its non-triggering mode.
+
+Keep in mind that the Signal, Synapse, and Count are abstract
+representations which do not need to be closely married to their
+respective physical sources. This allows the user of a counter to
+divorce themselves from the nuances of physical components (such as
+whether an input line is differential or single-ended) and instead focus
+on the core idea of what the data and process represent (e.g. position
+as interpreted from quadrature encoding data).
+
+Userspace Interface
+===================
+
+Several sysfs attributes are generated by the Generic Counter interface,
+and reside under the /sys/bus/counter/devices/counterX directory, where
+counterX refers to the respective counter device. Please see
+Documentation/ABI/testing/sys-bus-counter-generic-sysfs for detailed
+information on each Generic Counter interface sysfs attribute.
+
+Through these sysfs attributes, programs and scripts may interact with
+the Generic Counter paradigm Counts, Signals, and Synapses of respective
+counter devices.
+
+Driver API
+==========
+
+Driver authors may utilize the Generic Counter interface in their code
+by including the include/linux/counter.h header file. This header file
+provides several core data structures, function prototypes, and macros
+for defining a counter device.
+
+.. kernel-doc:: include/linux/counter.h
+ :internal:
+
+.. kernel-doc:: drivers/counter/generic-counter.c
+ :export:
+
+Implementation
+==============
+
+To support a counter device, a driver must first allocate the available
+Counter Signals via counter_signal structures. These Signals should
+be stored as an array and set to the signals array member of an
+allocated counter_device structure before the Counter is registered to
+the system.
+
+Counter Counts may be allocated via counter_count structures, and
+respective Counter Signal associations (Synapses) made via
+counter_synapse structures. Associated counter_synapse structures are
+stored as an array and set to the the synapses array member of the
+respective counter_count structure. These counter_count structures are
+set to the counts array member of an allocated counter_device structure
+before the Counter is registered to the system.
+
+Driver callbacks should be provided to the counter_device structure via
+a constant counter_ops structure in order to communicate with the
+device: to read and write various Signals and Counts, and to set and get
+the "action mode" and "function mode" for various Synapses and Counts
+respectively.
+
+A defined counter_device structure may be registered to the system by
+passing it to the counter_register function, and unregistered by passing
+it to the counter_unregister function. Similarly, the
+devm_counter_register and devm_counter_unregister functions may be used
+if device memory-managed registration is desired.
+
+Extension sysfs attributes can be created for auxiliary functionality
+and data by passing in defined counter_device_ext, counter_count_ext,
+and counter_signal_ext structures. In these cases, the
+counter_device_ext structure is used for global configuration of the
+respective Counter device, while the counter_count_ext and
+counter_signal_ext structures allow for auxiliary exposure and
+configuration of a specific Count or Signal respectively.
+
+Architecture
+============
+
+When the Generic Counter interface counter module is loaded, the
+counter_init function is called which registers a bus_type named
+"counter" to the system. Subsequently, when the module is unloaded, the
+counter_exit function is called which unregisters the bus_type named
+"counter" from the system.
+
+Counter devices are registered to the system via the counter_register
+function, and later removed via the counter_unregister function. The
+counter_register function establishes a unique ID for the Counter
+device and creates a respective sysfs directory, where X is the
+mentioned unique ID:
+
+ /sys/bus/counter/devices/counterX
+
+Sysfs attributes are created within the counterX directory to expose
+functionality, configurations, and data relating to the Counts, Signals,
+and Synapses of the Counter device, as well as options and information
+for the Counter device itself.
+
+Each Signal has a directory created to house its relevant sysfs
+attributes, where Y is the unique ID of the respective Signal:
+
+ /sys/bus/counter/devices/counterX/signalY
+
+Similarly, each Count has a directory created to house its relevant
+sysfs attributes, where Y is the unique ID of the respective Count:
+
+ /sys/bus/counter/devices/counterX/countY
+
+For a more detailed breakdown of the available Generic Counter interface
+sysfs attributes, please refer to the
+Documentation/ABI/testing/sys-bus-counter file.
+
+The Signals and Counts associated with the Counter device are registered
+to the system as well by the counter_register function. The
+signal_read/signal_write driver callbacks are associated with their
+respective Signal attributes, while the count_read/count_write and
+function_get/function_set driver callbacks are associated with their
+respective Count attributes; similarly, the same is true for the
+action_get/action_set driver callbacks and their respective Synapse
+attributes. If a driver callback is left undefined, then the respective
+read/write permission is left disabled for the relevant attributes.
+
+Similarly, extension sysfs attributes are created for the defined
+counter_device_ext, counter_count_ext, and counter_signal_ext
+structures that are passed in.
diff --git a/Documentation/driver-api/gpio/driver.rst b/Documentation/driver-api/gpio/driver.rst
index 3043167fc557..1ce7fcd0f989 100644
--- a/Documentation/driver-api/gpio/driver.rst
+++ b/Documentation/driver-api/gpio/driver.rst
@@ -1,10 +1,8 @@
-================================
-GPIO Descriptor Driver Interface
-================================
+=====================
+GPIO Driver Interface
+=====================
-This document serves as a guide for GPIO chip drivers writers. Note that it
-describes the new descriptor-based interface. For a description of the
-deprecated integer-based GPIO interface please refer to gpio-legacy.txt.
+This document serves as a guide for writers of GPIO chip drivers.
Each GPIO controller driver needs to include the following header, which defines
the structures used to define a GPIO driver:
@@ -15,32 +13,49 @@ the structures used to define a GPIO driver:
Internal Representation of GPIOs
================================
-Inside a GPIO driver, individual GPIOs are identified by their hardware number,
-which is a unique number between 0 and n, n being the number of GPIOs managed by
-the chip. This number is purely internal: the hardware number of a particular
-GPIO descriptor is never made visible outside of the driver.
-
-On top of this internal number, each GPIO also need to have a global number in
-the integer GPIO namespace so that it can be used with the legacy GPIO
+A GPIO chip handles one or more GPIO lines. To be considered a GPIO chip, the
+lines must conform to the definition: General Purpose Input/Output. If the
+line is not general purpose, it is not GPIO and should not be handled by a
+GPIO chip. The use case is the indicative: certain lines in a system may be
+called GPIO but serve a very particular purpose thus not meeting the criteria
+of a general purpose I/O. On the other hand a LED driver line may be used as a
+GPIO and should therefore still be handled by a GPIO chip driver.
+
+Inside a GPIO driver, individual GPIO lines are identified by their hardware
+number, sometime also referred to as ``offset``, which is a unique number
+between 0 and n-1, n being the number of GPIOs managed by the chip.
+
+The hardware GPIO number should be something intuitive to the hardware, for
+example if a system uses a memory-mapped set of I/O-registers where 32 GPIO
+lines are handled by one bit per line in a 32-bit register, it makes sense to
+use hardware offsets 0..31 for these, corresponding to bits 0..31 in the
+register.
+
+This number is purely internal: the hardware number of a particular GPIO
+line is never made visible outside of the driver.
+
+On top of this internal number, each GPIO line also needs to have a global
+number in the integer GPIO namespace so that it can be used with the legacy GPIO
interface. Each chip must thus have a "base" number (which can be automatically
-assigned), and for each GPIO the global number will be (base + hardware number).
-Although the integer representation is considered deprecated, it still has many
-users and thus needs to be maintained.
+assigned), and for each GPIO line the global number will be (base + hardware
+number). Although the integer representation is considered deprecated, it still
+has many users and thus needs to be maintained.
-So for example one platform could use numbers 32-159 for GPIOs, with a
+So for example one platform could use global numbers 32-159 for GPIOs, with a
controller defining 128 GPIOs at a "base" of 32 ; while another platform uses
-numbers 0..63 with one set of GPIO controllers, 64-79 with another type of GPIO
-controller, and on one particular board 80-95 with an FPGA. The numbers need not
-be contiguous; either of those platforms could also use numbers 2000-2063 to
-identify GPIOs in a bank of I2C GPIO expanders.
+global numbers 0..63 with one set of GPIO controllers, 64-79 with another type
+of GPIO controller, and on one particular board 80-95 with an FPGA. The legacy
+numbers need not be contiguous; either of those platforms could also use numbers
+2000-2063 to identify GPIO lines in a bank of I2C GPIO expanders.
Controller Drivers: gpio_chip
=============================
In the gpiolib framework each GPIO controller is packaged as a "struct
-gpio_chip" (see linux/gpio/driver.h for its complete definition) with members
-common to each controller of that type:
+gpio_chip" (see <linux/gpio/driver.h> for its complete definition) with members
+common to each controller of that type, these should be assigned by the
+driver code:
- methods to establish GPIO line direction
- methods used to access GPIO line values
@@ -48,12 +63,12 @@ common to each controller of that type:
- method to return the IRQ number associated to a given GPIO line
- flag saying whether calls to its methods may sleep
- optional line names array to identify lines
- - optional debugfs dump method (showing extra state like pullup config)
+ - optional debugfs dump method (showing extra state information)
- optional base number (will be automatically assigned if omitted)
- optional label for diagnostics and GPIO chip mapping using platform data
The code implementing a gpio_chip should support multiple instances of the
-controller, possibly using the driver model. That code will configure each
+controller, preferably using the driver model. That code will configure each
gpio_chip and issue ``gpiochip_add[_data]()`` or ``devm_gpiochip_add_data()``.
Removing a GPIO controller should be rare; use ``[devm_]gpiochip_remove()``
when it is unavoidable.
@@ -62,24 +77,28 @@ Often a gpio_chip is part of an instance-specific structure with states not
exposed by the GPIO interfaces, such as addressing, power management, and more.
Chips such as audio codecs will have complex non-GPIO states.
-Any debugfs dump method should normally ignore signals which haven't been
-requested as GPIOs. They can use gpiochip_is_requested(), which returns either
-NULL or the label associated with that GPIO when it was requested.
+Any debugfs dump method should normally ignore lines which haven't been
+requested. They can use gpiochip_is_requested(), which returns either
+NULL or the label associated with that GPIO line when it was requested.
-RT_FULL: the GPIO driver should not use spinlock_t or any sleepable APIs
-(like PM runtime) in its gpio_chip implementation (.get/.set and direction
-control callbacks) if it is expected to call GPIO APIs from atomic context
-on -RT (inside hard IRQ handlers and similar contexts). Normally this should
-not be required.
+Realtime considerations: the GPIO driver should not use spinlock_t or any
+sleepable APIs (like PM runtime) in its gpio_chip implementation (.get/.set
+and direction control callbacks) if it is expected to call GPIO APIs from
+atomic context on realtime kernels (inside hard IRQ handlers and similar
+contexts). Normally this should not be required.
GPIO electrical configuration
-----------------------------
-GPIOs can be configured for several electrical modes of operation by using the
-.set_config() callback. Currently this API supports setting debouncing and
-single-ended modes (open drain/open source). These settings are described
-below.
+GPIO lines can be configured for several electrical modes of operation by using
+the .set_config() callback. Currently this API supports setting:
+
+- Debouncing
+- Single-ended modes (open drain/open source)
+- Pull up and pull down resistor enablement
+
+These settings are described below.
The .set_config() callback uses the same enumerators and configuration
semantics as the generic pin control drivers. This is not a coincidence: it is
@@ -94,8 +113,8 @@ description needs to provide "GPIO ranges" mapping the GPIO line offsets to pin
numbers on the pin controller so they can properly cross-reference each other.
-GPIOs with debounce support
----------------------------
+GPIO lines with debounce support
+--------------------------------
Debouncing is a configuration set to a pin indicating that it is connected to
a mechanical switch or button, or similar that may bounce. Bouncing means the
@@ -111,8 +130,8 @@ a certain number of milliseconds for debouncing, or just "on/off" if that time
is not configurable.
-GPIOs with open drain/source support
-------------------------------------
+GPIO lines with open drain/source support
+-----------------------------------------
Open drain (CMOS) or open collector (TTL) means the line is not actively driven
high: instead you provide the drain/collector as output, so when the transistor
@@ -132,13 +151,13 @@ This configuration is normally used as a way to achieve one of two things:
- Level-shifting: to reach a logical level higher than that of the silicon
where the output resides.
-- inverse wire-OR on an I/O line, for example a GPIO line, making it possible
+- Inverse wire-OR on an I/O line, for example a GPIO line, making it possible
for any driving stage on the line to drive it low even if any other output
to the same line is simultaneously driving it high. A special case of this
is driving the SCL and SDA lines of an I2C bus, which is by definition a
wire-OR bus.
-Both usecases require that the line be equipped with a pull-up resistor. This
+Both use cases require that the line be equipped with a pull-up resistor. This
resistor will make the line tend to high level unless one of the transistors on
the rail actively pulls it down.
@@ -208,27 +227,91 @@ For open source configuration the same principle is used, just that instead
of actively driving the line low, it is set to input.
+GPIO lines with pull up/down resistor support
+---------------------------------------------
+
+A GPIO line can support pull-up/down using the .set_config() callback. This
+means that a pull up or pull-down resistor is available on the output of the
+GPIO line, and this resistor is software controlled.
+
+In discrete designs, a pull-up or pull-down resistor is simply soldered on
+the circuit board. This is not something we deal or model in software. The
+most you will think about these lines is that they will very likely be
+configured as open drain or open source (see the section above).
+
+The .set_config() callback can only turn pull up or down on and off, and will
+no have any semantic knowledge about the resistance used. It will only say
+switch a bit in a register enabling or disabling pull-up or pull-down.
+
+If the GPIO line supports shunting in different resistance values for the
+pull-up or pull-down resistor, the GPIO chip callback .set_config() will not
+suffice. For these complex use cases, a combined GPIO chip and pin controller
+need to be implemented, as the pin config interface of a pin controller
+supports more versatile control over electrical properties and can handle
+different pull-up or pull-down resistance values.
+
+
GPIO drivers providing IRQs
----------------------------
+===========================
+
It is custom that GPIO drivers (GPIO chips) are also providing interrupts,
most often cascaded off a parent interrupt controller, and in some special
cases the GPIO logic is melded with a SoC's primary interrupt controller.
-The IRQ portions of the GPIO block are implemented using an irqchip, using
+The IRQ portions of the GPIO block are implemented using an irq_chip, using
the header <linux/irq.h>. So basically such a driver is utilizing two sub-
systems simultaneously: gpio and irq.
-RT_FULL: a realtime compliant GPIO driver should not use spinlock_t or any
-sleepable APIs (like PM runtime) as part of its irq_chip implementation.
+It is legal for any IRQ consumer to request an IRQ from any irqchip even if it
+is a combined GPIO+IRQ driver. The basic premise is that gpio_chip and
+irq_chip are orthogonal, and offering their services independent of each
+other.
-* spinlock_t should be replaced with raw_spinlock_t [1].
-* If sleepable APIs have to be used, these can be done from the .irq_bus_lock()
+gpiod_to_irq() is just a convenience function to figure out the IRQ for a
+certain GPIO line and should not be relied upon to have been called before
+the IRQ is used.
+
+Always prepare the hardware and make it ready for action in respective
+callbacks from the GPIO and irq_chip APIs. Do not rely on gpiod_to_irq() having
+been called first.
+
+We can divide GPIO irqchips in two broad categories:
+
+- CASCADED INTERRUPT CHIPS: this means that the GPIO chip has one common
+ interrupt output line, which is triggered by any enabled GPIO line on that
+ chip. The interrupt output line will then be routed to an parent interrupt
+ controller one level up, in the most simple case the systems primary
+ interrupt controller. This is modeled by an irqchip that will inspect bits
+ inside the GPIO controller to figure out which line fired it. The irqchip
+ part of the driver needs to inspect registers to figure this out and it
+ will likely also need to acknowledge that it is handling the interrupt
+ by clearing some bit (sometime implicitly, by just reading a status
+ register) and it will often need to set up the configuration such as
+ edge sensitivity (rising or falling edge, or high/low level interrupt for
+ example).
+
+- HIERARCHICAL INTERRUPT CHIPS: this means that each GPIO line has a dedicated
+ irq line to a parent interrupt controller one level up. There is no need
+ to inquire the GPIO hardware to figure out which line has figured, but it
+ may still be necessary to acknowledge the interrupt and set up the
+ configuration such as edge sensitivity.
+
+Realtime considerations: a realtime compliant GPIO driver should not use
+spinlock_t or any sleepable APIs (like PM runtime) as part of its irqchip
+implementation.
+
+- spinlock_t should be replaced with raw_spinlock_t [1].
+- If sleepable APIs have to be used, these can be done from the .irq_bus_lock()
and .irq_bus_unlock() callbacks, as these are the only slowpath callbacks
on an irqchip. Create the callbacks if needed [2].
-GPIO irqchips usually fall in one of two categories:
-* CHAINED GPIO irqchips: these are usually the type that is embedded on
+Cascaded GPIO irqchips
+----------------------
+
+Cascaded GPIO irqchips usually fall in one of three categories:
+
+- CHAINED CASCADED GPIO IRQCHIPS: these are usually the type that is embedded on
an SoC. This means that there is a fast IRQ flow handler for the GPIOs that
gets called in a chain from the parent IRQ handler, most typically the
system interrupt controller. This means that the GPIO irqchip handler will
@@ -245,16 +328,19 @@ GPIO irqchips usually fall in one of two categories:
struct gpio_chip, as everything happens directly in the callbacks: no
slow bus traffic like I2C can be used.
- RT_FULL: Note, chained IRQ handlers will not be forced threaded on -RT.
- As result, spinlock_t or any sleepable APIs (like PM runtime) can't be used
- in chained IRQ handler.
- If required (and if it can't be converted to the nested threaded GPIO irqchip)
- a chained IRQ handler can be converted to generic irq handler and this way
- it will be a threaded IRQ handler on -RT and a hard IRQ handler on non-RT
- (for example, see [3]).
- Know W/A: The generic_handle_irq() is expected to be called with IRQ disabled,
+ Realtime considerations: Note that chained IRQ handlers will not be forced
+ threaded on -RT. As a result, spinlock_t or any sleepable APIs (like PM
+ runtime) can't be used in a chained IRQ handler.
+
+ If required (and if it can't be converted to the nested threaded GPIO irqchip,
+ see below) a chained IRQ handler can be converted to generic irq handler and
+ this way it will become a threaded IRQ handler on -RT and a hard IRQ handler
+ on non-RT (for example, see [3]).
+
+ The generic_handle_irq() is expected to be called with IRQ disabled,
so the IRQ core will complain if it is called from an IRQ handler which is
- forced to a thread. The "fake?" raw lock can be used to W/A this problem::
+ forced to a thread. The "fake?" raw lock can be used to work around this
+ problem::
raw_spinlock_t wa_lock;
static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
@@ -263,7 +349,7 @@ GPIO irqchips usually fall in one of two categories:
generic_handle_irq(irq_find_mapping(bank->chip.irq.domain, bit));
raw_spin_unlock_irqrestore(&bank->wa_lock, wa_lock_flags);
-* GENERIC CHAINED GPIO irqchips: these are the same as "CHAINED GPIO irqchips",
+- GENERIC CHAINED GPIO IRQCHIPS: these are the same as "CHAINED GPIO irqchips",
but chained IRQ handlers are not used. Instead GPIO IRQs dispatching is
performed by generic IRQ handler which is configured using request_irq().
The GPIO irqchip will then end up calling something like this sequence in
@@ -273,16 +359,19 @@ GPIO irqchips usually fall in one of two categories:
for each detected GPIO IRQ
generic_handle_irq(...);
- RT_FULL: Such kind of handlers will be forced threaded on -RT, as result IRQ
- core will complain that generic_handle_irq() is called with IRQ enabled and
- the same W/A as for "CHAINED GPIO irqchips" can be applied.
+ Realtime considerations: this kind of handlers will be forced threaded on -RT,
+ and as result the IRQ core will complain that generic_handle_irq() is called
+ with IRQ enabled and the same work around as for "CHAINED GPIO irqchips" can
+ be applied.
+
+- NESTED THREADED GPIO IRQCHIPS: these are off-chip GPIO expanders and any
+ other GPIO irqchip residing on the other side of a sleeping bus such as I2C
+ or SPI.
-* NESTED THREADED GPIO irqchips: these are off-chip GPIO expanders and any
- other GPIO irqchip residing on the other side of a sleeping bus. Of course
- such drivers that need slow bus traffic to read out IRQ status and similar,
- traffic which may in turn incur other IRQs to happen, cannot be handled
- in a quick IRQ handler with IRQs disabled. Instead they need to spawn a
- thread and then mask the parent IRQ line until the interrupt is handled
+ Of course such drivers that need slow bus traffic to read out IRQ status and
+ similar, traffic which may in turn incur other IRQs to happen, cannot be
+ handled in a quick IRQ handler with IRQs disabled. Instead they need to spawn
+ a thread and then mask the parent IRQ line until the interrupt is handled
by the driver. The hallmark of this driver is to call something like
this in its interrupt handler::
@@ -294,36 +383,46 @@ GPIO irqchips usually fall in one of two categories:
flag on struct gpio_chip to true, indicating that this chip may sleep
when accessing the GPIOs.
+ These kinds of irqchips are inherently realtime tolerant as they are
+ already set up to handle sleeping contexts.
+
+
+Infrastructure helpers for GPIO irqchips
+----------------------------------------
+
To help out in handling the set-up and management of GPIO irqchips and the
associated irqdomain and resource allocation callbacks, the gpiolib has
some helpers that can be enabled by selecting the GPIOLIB_IRQCHIP Kconfig
symbol:
-* gpiochip_irqchip_add(): adds a chained irqchip to a gpiochip. It will pass
- the struct gpio_chip* for the chip to all IRQ callbacks, so the callbacks
- need to embed the gpio_chip in its state container and obtain a pointer
- to the container using container_of().
+- gpiochip_irqchip_add(): adds a chained cascaded irqchip to a gpiochip. It
+ will pass the struct gpio_chip* for the chip to all IRQ callbacks, so the
+ callbacks need to embed the gpio_chip in its state container and obtain a
+ pointer to the container using container_of().
(See Documentation/driver-model/design-patterns.txt)
-* gpiochip_irqchip_add_nested(): adds a nested irqchip to a gpiochip.
+- gpiochip_irqchip_add_nested(): adds a nested cascaded irqchip to a gpiochip,
+ as discussed above regarding different types of cascaded irqchips. The
+ cascaded irq has to be handled by a threaded interrupt handler.
Apart from that it works exactly like the chained irqchip.
-* gpiochip_set_chained_irqchip(): sets up a chained irq handler for a
+- gpiochip_set_chained_irqchip(): sets up a chained cascaded irq handler for a
gpio_chip from a parent IRQ and passes the struct gpio_chip* as handler
- data. (Notice handler data, since the irqchip data is likely used by the
- parent irqchip!).
+ data. Notice that we pass is as the handler data, since the irqchip data is
+ likely used by the parent irqchip.
-* gpiochip_set_nested_irqchip(): sets up a nested irq handler for a
+- gpiochip_set_nested_irqchip(): sets up a nested cascaded irq handler for a
gpio_chip from a parent IRQ. As the parent IRQ has usually been
explicitly requested by the driver, this does very little more than
mark all the child IRQs as having the other IRQ as parent.
-If there is a need to exclude certain GPIOs from the IRQ domain, you can
-set .irq.need_valid_mask of the gpiochip before gpiochip_add_data() is
-called. This allocates an .irq.valid_mask with as many bits set as there
-are GPIOs in the chip. Drivers can exclude GPIOs by clearing bits from this
-mask. The mask must be filled in before gpiochip_irqchip_add() or
-gpiochip_irqchip_add_nested() is called.
+If there is a need to exclude certain GPIO lines from the IRQ domain handled by
+these helpers, we can set .irq.need_valid_mask of the gpiochip before
+[devm_]gpiochip_add_data() is called. This allocates an .irq.valid_mask with as
+many bits set as there are GPIO lines in the chip, each bit representing line
+0..n-1. Drivers can exclude GPIO lines by clearing bits from this mask. The mask
+must be filled in before gpiochip_irqchip_add() or gpiochip_irqchip_add_nested()
+is called.
To use the helpers please keep the following in mind:
@@ -333,33 +432,24 @@ To use the helpers please keep the following in mind:
- Nominally set all handlers to handle_bad_irq() in the setup call and pass
handle_bad_irq() as flow handler parameter in gpiochip_irqchip_add() if it is
- expected for GPIO driver that irqchip .set_type() callback have to be called
- before using/enabling GPIO IRQ. Then set the handler to handle_level_irq()
- and/or handle_edge_irq() in the irqchip .set_type() callback depending on
- what your controller supports.
+ expected for GPIO driver that irqchip .set_type() callback will be called
+ before using/enabling each GPIO IRQ. Then set the handler to
+ handle_level_irq() and/or handle_edge_irq() in the irqchip .set_type()
+ callback depending on what your controller supports and what is requested
+ by the consumer.
-It is legal for any IRQ consumer to request an IRQ from any irqchip no matter
-if that is a combined GPIO+IRQ driver. The basic premise is that gpio_chip and
-irq_chip are orthogonal, and offering their services independent of each
-other.
-
-gpiod_to_irq() is just a convenience function to figure out the IRQ for a
-certain GPIO line and should not be relied upon to have been called before
-the IRQ is used.
-So always prepare the hardware and make it ready for action in respective
-callbacks from the GPIO and irqchip APIs. Do not rely on gpiod_to_irq() having
-been called first.
+Locking IRQ usage
+-----------------
-This orthogonality leads to ambiguities that we need to solve: if there is
-competition inside the subsystem which side is using the resource (a certain
-GPIO line and register for example) it needs to deny certain operations and
-keep track of usage inside of the gpiolib subsystem. This is why the API
-below exists.
+Since GPIO and irq_chip are orthogonal, we can get conflicts between different
+use cases. For example a GPIO line used for IRQs should be an input line,
+it does not make sense to fire interrupts on an output GPIO.
+If there is competition inside the subsystem which side is using the
+resource (a certain GPIO line and register for example) it needs to deny
+certain operations and keep track of usage inside of the gpiolib subsystem.
-Locking IRQ usage
------------------
Input GPIOs can be used as IRQ signals. When this happens, a driver is requested
to mark the GPIO as being used as an IRQ::
@@ -380,9 +470,15 @@ assigned.
Disabling and enabling IRQs
---------------------------
+
+In some (fringe) use cases, a driver may be using a GPIO line as input for IRQs,
+but occasionally switch that line over to drive output and then back to being
+an input with interrupts again. This happens on things like CEC (Consumer
+Electronics Control).
+
When a GPIO is used as an IRQ signal, then gpiolib also needs to know if
the IRQ is enabled or disabled. In order to inform gpiolib about this,
-a driver should call::
+the irqchip driver should call::
void gpiochip_disable_irq(struct gpio_chip *chip, unsigned int offset)
@@ -398,40 +494,45 @@ irqchip.
When using the gpiolib irqchip helpers, these callbacks are automatically
assigned.
+
Real-Time compliance for GPIO IRQ chips
---------------------------------------
-Any provider of irqchips needs to be carefully tailored to support Real Time
+Any provider of irqchips needs to be carefully tailored to support Real-Time
preemption. It is desirable that all irqchips in the GPIO subsystem keep this
in mind and do the proper testing to assure they are real time-enabled.
-So, pay attention on above " RT_FULL:" notes, please.
-The following is a checklist to follow when preparing a driver for real
-time-compliance:
-- ensure spinlock_t is not used as part irq_chip implementation;
-- ensure that sleepable APIs are not used as part irq_chip implementation.
+So, pay attention on above realtime considerations in the documentation.
+
+The following is a checklist to follow when preparing a driver for real-time
+compliance:
+
+- ensure spinlock_t is not used as part irq_chip implementation
+- ensure that sleepable APIs are not used as part irq_chip implementation
If sleepable APIs have to be used, these can be done from the .irq_bus_lock()
- and .irq_bus_unlock() callbacks;
+ and .irq_bus_unlock() callbacks
- Chained GPIO irqchips: ensure spinlock_t or any sleepable APIs are not used
- from chained IRQ handler;
+ from the chained IRQ handler
- Generic chained GPIO irqchips: take care about generic_handle_irq() calls and
- apply corresponding W/A;
-- Chained GPIO irqchips: get rid of chained IRQ handler and use generic irq
- handler if possible :)
-- regmap_mmio: Sry, but you are in trouble :( if MMIO regmap is used as for
- GPIO IRQ chip implementation;
-- Test your driver with the appropriate in-kernel real time test cases for both
- level and edge IRQs.
+ apply corresponding work-around
+- Chained GPIO irqchips: get rid of the chained IRQ handler and use generic irq
+ handler if possible
+- regmap_mmio: it is possible to disable internal locking in regmap by setting
+ .disable_locking and handling the locking in the GPIO driver
+- Test your driver with the appropriate in-kernel real-time test cases for both
+ level and edge IRQs
+
+* [1] http://www.spinics.net/lists/linux-omap/msg120425.html
+* [2] https://lkml.org/lkml/2015/9/25/494
+* [3] https://lkml.org/lkml/2015/9/25/495
Requesting self-owned GPIO pins
--------------------------------
+===============================
Sometimes it is useful to allow a GPIO chip driver to request its own GPIO
-descriptors through the gpiolib API. Using gpio_request() for this purpose
-does not help since it pins the module to the kernel forever (it calls
-try_module_get()). A GPIO driver can use the following functions instead
-to request and free descriptors without being pinned to the kernel forever::
+descriptors through the gpiolib API. A GPIO driver can use the following
+functions to request and free descriptors::
struct gpio_desc *gpiochip_request_own_desc(struct gpio_desc *desc,
u16 hwnum,
@@ -446,7 +547,3 @@ gpiochip_free_own_desc().
These functions must be used with care since they do not affect module use
count. Do not use the functions to request gpio descriptors not owned by the
calling driver.
-
-* [1] http://www.spinics.net/lists/linux-omap/msg120425.html
-* [2] https://lkml.org/lkml/2015/9/25/494
-* [3] https://lkml.org/lkml/2015/9/25/495
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index c0b600ed9961..d26308af6036 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -56,6 +56,8 @@ available subsections can be seen below.
slimbus
soundwire/index
fpga/index
+ acpi/index
+ generic-counter
.. only:: subproject and html
diff --git a/Documentation/driver-api/pci/p2pdma.rst b/Documentation/driver-api/pci/p2pdma.rst
index 6d85b5a2598d..44deb52beeb4 100644
--- a/Documentation/driver-api/pci/p2pdma.rst
+++ b/Documentation/driver-api/pci/p2pdma.rst
@@ -132,10 +132,6 @@ precludes passing these pages to userspace.
P2P memory is also technically IO memory but should never have any side
effects behind it. Thus, the order of loads and stores should not be important
and ioreadX(), iowriteX() and friends should not be necessary.
-However, as the memory is not cache coherent, if access ever needs to
-be protected by a spinlock then :c:func:`mmiowb()` must be used before
-unlocking the lock. (See ACQUIRES VS I/O ACCESSES in
-Documentation/memory-barriers.txt)
P2P DMA Support Library
diff --git a/Documentation/driver-api/pm/cpuidle.rst b/Documentation/driver-api/pm/cpuidle.rst
index 5842ab621a58..006cf6db40c6 100644
--- a/Documentation/driver-api/pm/cpuidle.rst
+++ b/Documentation/driver-api/pm/cpuidle.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
.. |struct cpuidle_governor| replace:: :c:type:`struct cpuidle_governor <cpuidle_governor>`
.. |struct cpuidle_device| replace:: :c:type:`struct cpuidle_device <cpuidle_device>`
.. |struct cpuidle_driver| replace:: :c:type:`struct cpuidle_driver <cpuidle_driver>`
@@ -7,9 +10,9 @@
CPU Idle Time Management
========================
-::
+:Copyright: |copy| 2019 Intel Corporation
- Copyright (c) 2019 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
CPU Idle Time Management Subsystem
diff --git a/Documentation/driver-api/pm/devices.rst b/Documentation/driver-api/pm/devices.rst
index 090c151aa86b..30835683616a 100644
--- a/Documentation/driver-api/pm/devices.rst
+++ b/Documentation/driver-api/pm/devices.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
.. |struct dev_pm_ops| replace:: :c:type:`struct dev_pm_ops <dev_pm_ops>`
.. |struct dev_pm_domain| replace:: :c:type:`struct dev_pm_domain <dev_pm_domain>`
.. |struct bus_type| replace:: :c:type:`struct bus_type <bus_type>`
@@ -12,11 +15,12 @@
Device Power Management Basics
==============================
-::
+:Copyright: |copy| 2010-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+:Copyright: |copy| 2010 Alan Stern <stern@rowland.harvard.edu>
+:Copyright: |copy| 2016 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- Copyright (c) 2010-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- Copyright (c) 2010 Alan Stern <stern@rowland.harvard.edu>
- Copyright (c) 2016 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Most of the code in Linux is device drivers, so most of the Linux power
management (PM) code is also driver-specific. Most drivers will do very
diff --git a/Documentation/driver-api/pm/index.rst b/Documentation/driver-api/pm/index.rst
index 56975c6bc789..c2a9ef8d115c 100644
--- a/Documentation/driver-api/pm/index.rst
+++ b/Documentation/driver-api/pm/index.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
===============================
CPU and Device Power Management
===============================
diff --git a/Documentation/driver-api/pm/notifiers.rst b/Documentation/driver-api/pm/notifiers.rst
index 62f860026992..186435c43b77 100644
--- a/Documentation/driver-api/pm/notifiers.rst
+++ b/Documentation/driver-api/pm/notifiers.rst
@@ -1,10 +1,14 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
=============================
Suspend/Hibernation Notifiers
=============================
-::
+:Copyright: |copy| 2016 Intel Corporation
+
+:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- Copyright (c) 2016 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
There are some operations that subsystems or drivers may want to carry out
before hibernation/suspend or after restore/resume, but they require the system
diff --git a/Documentation/driver-api/pm/types.rst b/Documentation/driver-api/pm/types.rst
index 3ebdecc54104..73a231caf764 100644
--- a/Documentation/driver-api/pm/types.rst
+++ b/Documentation/driver-api/pm/types.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
==================================
Device Power Management Data Types
==================================
diff --git a/Documentation/driver-api/soundwire/stream.rst b/Documentation/driver-api/soundwire/stream.rst
index 26a6064503fd..5351bd2f34a8 100644
--- a/Documentation/driver-api/soundwire/stream.rst
+++ b/Documentation/driver-api/soundwire/stream.rst
@@ -201,7 +201,7 @@ Bus implements below API for allocate a stream which needs to be called once
per stream. From ASoC DPCM framework, this stream state maybe linked to
.startup() operation.
- .. code-block:: c
+.. code-block:: c
int sdw_alloc_stream(char * stream_name);
@@ -228,7 +228,7 @@ the respective Master(s) and Slave(s) associated with stream. These APIs can
only be invoked once by respective Master(s) and Slave(s). From ASoC DPCM
framework, this stream state is linked to .hw_params() operation.
- .. code-block:: c
+.. code-block:: c
int sdw_stream_add_master(struct sdw_bus * bus,
struct sdw_stream_config * stream_config,
@@ -274,7 +274,7 @@ Bus implements below API for PREPARE state which needs to be called once per
stream. From ASoC DPCM framework, this stream state is linked to
.prepare() operation.
- .. code-block:: c
+.. code-block:: c
int sdw_prepare_stream(struct sdw_stream_runtime * stream);
@@ -304,7 +304,7 @@ Bus implements below API for ENABLE state which needs to be called once per
stream. From ASoC DPCM framework, this stream state is linked to
.trigger() start operation.
- .. code-block:: c
+.. code-block:: c
int sdw_enable_stream(struct sdw_stream_runtime * stream);
@@ -332,7 +332,7 @@ Bus implements below API for DISABLED state which needs to be called once
per stream. From ASoC DPCM framework, this stream state is linked to
.trigger() stop operation.
- .. code-block:: c
+.. code-block:: c
int sdw_disable_stream(struct sdw_stream_runtime * stream);
@@ -357,7 +357,7 @@ Bus implements below API for DEPREPARED state which needs to be called once
per stream. From ASoC DPCM framework, this stream state is linked to
.trigger() stop operation.
- .. code-block:: c
+.. code-block:: c
int sdw_deprepare_stream(struct sdw_stream_runtime * stream);
@@ -382,7 +382,7 @@ Bus implements below APIs for RELEASE state which needs to be called by
all the Master(s) and Slave(s) associated with stream. From ASoC DPCM
framework, this stream state is linked to .hw_free() operation.
- .. code-block:: c
+.. code-block:: c
int sdw_stream_remove_master(struct sdw_bus * bus,
struct sdw_stream_runtime * stream);
@@ -395,7 +395,7 @@ stream assigned as part of ALLOCATED state.
In .shutdown() the data structure maintaining stream state are freed up.
- .. code-block:: c
+.. code-block:: c
void sdw_release_stream(struct sdw_stream_runtime * stream);
diff --git a/Documentation/driver-api/usb/power-management.rst b/Documentation/driver-api/usb/power-management.rst
index 79beb807996b..4a74cf6f2797 100644
--- a/Documentation/driver-api/usb/power-management.rst
+++ b/Documentation/driver-api/usb/power-management.rst
@@ -370,11 +370,15 @@ autosuspend the interface's device. When the usage counter is = 0
then the interface is considered to be idle, and the kernel may
autosuspend the device.
-Drivers need not be concerned about balancing changes to the usage
-counter; the USB core will undo any remaining "get"s when a driver
-is unbound from its interface. As a corollary, drivers must not call
-any of the ``usb_autopm_*`` functions after their ``disconnect``
-routine has returned.
+Drivers must be careful to balance their overall changes to the usage
+counter. Unbalanced "get"s will remain in effect when a driver is
+unbound from its interface, preventing the device from going into
+runtime suspend should the interface be bound to a driver again. On
+the other hand, drivers are allowed to achieve this balance by calling
+the ``usb_autopm_*`` functions even after their ``disconnect`` routine
+has returned -- say from within a work-queue routine -- provided they
+retain an active reference to the interface (via ``usb_get_intf`` and
+``usb_put_intf``).
Drivers using the async routines are responsible for their own
synchronization and mutual exclusion.
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index d7d6f01e81ff..69c7fa7f616c 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -256,6 +256,9 @@ DMA
dmam_pool_create()
dmam_pool_destroy()
+DRM
+ devm_drm_dev_init()
+
GPIO
devm_gpiod_get()
devm_gpiod_get_index()
@@ -268,6 +271,9 @@ GPIO
devm_gpio_request_one()
devm_gpio_free()
+I2C
+ devm_i2c_new_dummy_device()
+
IIO
devm_iio_device_alloc()
devm_iio_device_free()
diff --git a/Documentation/features/debug/kgdb/arch-support.txt b/Documentation/features/debug/kgdb/arch-support.txt
index 3e6b8f07d5d0..38c40cfa0578 100644
--- a/Documentation/features/debug/kgdb/arch-support.txt
+++ b/Documentation/features/debug/kgdb/arch-support.txt
@@ -21,7 +21,7 @@
| nds32: | TODO |
| nios2: | ok |
| openrisc: | TODO |
- | parisc: | TODO |
+ | parisc: | ok |
| powerpc: | ok |
| riscv: | TODO |
| s390: | TODO |
diff --git a/Documentation/features/debug/kprobes/arch-support.txt b/Documentation/features/debug/kprobes/arch-support.txt
index f4e45bd58fea..e68239b5d2f0 100644
--- a/Documentation/features/debug/kprobes/arch-support.txt
+++ b/Documentation/features/debug/kprobes/arch-support.txt
@@ -21,7 +21,7 @@
| nds32: | TODO |
| nios2: | TODO |
| openrisc: | TODO |
- | parisc: | TODO |
+ | parisc: | ok |
| powerpc: | ok |
| riscv: | ok |
| s390: | ok |
diff --git a/Documentation/features/debug/kretprobes/arch-support.txt b/Documentation/features/debug/kretprobes/arch-support.txt
index 1d5651ef11f8..f17131b328e5 100644
--- a/Documentation/features/debug/kretprobes/arch-support.txt
+++ b/Documentation/features/debug/kretprobes/arch-support.txt
@@ -21,7 +21,7 @@
| nds32: | TODO |
| nios2: | TODO |
| openrisc: | TODO |
- | parisc: | TODO |
+ | parisc: | ok |
| powerpc: | ok |
| riscv: | TODO |
| s390: | ok |
diff --git a/Documentation/features/time/modern-timekeeping/arch-support.txt b/Documentation/features/time/modern-timekeeping/arch-support.txt
index 2855dfe2464d..1d46da165b75 100644
--- a/Documentation/features/time/modern-timekeeping/arch-support.txt
+++ b/Documentation/features/time/modern-timekeeping/arch-support.txt
@@ -15,7 +15,7 @@
| h8300: | ok |
| hexagon: | ok |
| ia64: | ok |
- | m68k: | TODO |
+ | m68k: | ok |
| microblaze: | ok |
| mips: | ok |
| nds32: | ok |
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index efea228ccd8a..dac435575384 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -52,7 +52,7 @@ prototypes:
int (*rename) (struct inode *, struct dentry *,
struct inode *, struct dentry *, unsigned int);
int (*readlink) (struct dentry *, char __user *,int);
- const char *(*get_link) (struct dentry *, struct inode *, void **);
+ const char *(*get_link) (struct dentry *, struct inode *, struct delayed_call *);
void (*truncate) (struct inode *);
int (*permission) (struct inode *, int, unsigned int);
int (*get_acl)(struct inode *, int);
@@ -118,6 +118,7 @@ set: exclusive
--------------------------- super_operations ---------------------------
prototypes:
struct inode *(*alloc_inode)(struct super_block *sb);
+ void (*free_inode)(struct inode *);
void (*destroy_inode)(struct inode *);
void (*dirty_inode) (struct inode *, int flags);
int (*write_inode) (struct inode *, struct writeback_control *wbc);
@@ -139,6 +140,7 @@ locking rules:
All may block [not true, see below]
s_umount
alloc_inode:
+free_inode: called from RCU callback
destroy_inode:
dirty_inode:
write_inode:
diff --git a/Documentation/filesystems/autofs-mount-control.txt b/Documentation/filesystems/autofs-mount-control.txt
index 45edad6933cc..acc02fc57993 100644
--- a/Documentation/filesystems/autofs-mount-control.txt
+++ b/Documentation/filesystems/autofs-mount-control.txt
@@ -354,8 +354,10 @@ this ioctl is called until no further expire candidates are found.
The call requires an initialized struct autofs_dev_ioctl with the
ioctlfd field set to the descriptor obtained from the open call. In
-addition an immediate expire, independent of the mount timeout, can be
-requested by setting the how field of struct args_expire to 1. If no
+addition an immediate expire that's independent of the mount timeout,
+and a forced expire that's independent of whether the mount is busy,
+can be requested by setting the how field of struct args_expire to
+AUTOFS_EXP_IMMEDIATE or AUTOFS_EXP_FORCED, respectively . If no
expire candidates can be found the ioctl returns -1 with errno set to
EAGAIN.
diff --git a/Documentation/filesystems/autofs.txt b/Documentation/filesystems/autofs.txt
index 373ad25852d3..3af38c7fd26d 100644
--- a/Documentation/filesystems/autofs.txt
+++ b/Documentation/filesystems/autofs.txt
@@ -116,7 +116,7 @@ that purpose there is another flag.
**DCACHE_MANAGE_TRANSIT**
If a dentry has DCACHE_MANAGE_TRANSIT set then two very different but
-related behaviors are invoked, both using the `d_op->d_manage()`
+related behaviours are invoked, both using the `d_op->d_manage()`
dentry operation.
Firstly, before checking to see if any filesystem is mounted on the
@@ -193,8 +193,8 @@ VFS remain in RCU-walk mode, but can only tell it to get out of
RCU-walk mode by returning `-ECHILD`.
So `d_manage()`, when called with `rcu_walk` set, should either return
--ECHILD if there is any reason to believe it is unsafe to end the
-mounted filesystem, and otherwise should return 0.
+-ECHILD if there is any reason to believe it is unsafe to enter the
+mounted filesystem, otherwise it should return 0.
autofs will return `-ECHILD` if an expiry of the filesystem has been
initiated or is being considered, otherwise it returns 0.
@@ -210,7 +210,7 @@ mounts that were created by `d_automount()` returning a filesystem to be
mounted. As autofs doesn't return such a filesystem but leaves the
mounting to the automount daemon, it must involve the automount daemon
in unmounting as well. This also means that autofs has more control
-of expiry.
+over expiry.
The VFS also supports "expiry" of mounts using the MNT_EXPIRE flag to
the `umount` system call. Unmounting with MNT_EXPIRE will fail unless
@@ -225,7 +225,7 @@ unmount any filesystems mounted on the autofs filesystem or remove any
symbolic links or empty directories any time it likes. If the unmount
or removal is successful the filesystem will be returned to the state
it was before the mount or creation, so that any access of the name
-will trigger normal auto-mount processing. In particlar, `rmdir` and
+will trigger normal auto-mount processing. In particular, `rmdir` and
`unlink` do not leave negative entries in the dcache as a normal
filesystem would, so an attempt to access a recently-removed object is
passed to autofs for handling.
@@ -240,11 +240,18 @@ Normally the daemon only wants to remove entries which haven't been
used for a while. For this purpose autofs maintains a "`last_used`"
time stamp on each directory or symlink. For symlinks it genuinely
does record the last time the symlink was "used" or followed to find
-out where it points to. For directories the field is a slight
-misnomer. It actually records the last time that autofs checked if
-the directory or one of its descendents was busy and found that it
-was. This is just as useful and doesn't require updating the field so
-often.
+out where it points to. For directories the field is used slightly
+differently. The field is updated at mount time and during expire
+checks if it is found to be in use (ie. open file descriptor or
+process working directory) and during path walks. The update done
+during path walks prevents frequent expire and immediate mount of
+frequently accessed automounts. But in the case where a GUI continually
+access or an application frequently scans an autofs directory tree
+there can be an accumulation of mounts that aren't actually being
+used. To cater for this case the "`strictexpire`" autofs mount option
+can be used to avoid the "`last_used`" update on path walk thereby
+preventing this apparent inability to expire mounts that aren't
+really in use.
The daemon is able to ask autofs if anything is due to be expired,
using an `ioctl` as discussed later. For a *direct* mount, autofs
@@ -255,8 +262,12 @@ up.
There is an option with indirect mounts to consider each of the leaves
that has been mounted on instead of considering the top-level names.
-This is intended for compatability with version 4 of autofs and should
-be considered as deprecated.
+This was originally intended for compatibility with version 4 of autofs
+and should be considered as deprecated for Sun Format automount maps.
+However, it may be used again for amd format mount maps (which are
+generally indirect maps) because the amd automounter allows for the
+setting of an expire timeout for individual mounts. But there are
+some difficulties in making the needed changes for this.
When autofs considers a directory it checks the `last_used` time and
compares it with the "timeout" value set when the filesystem was
@@ -273,7 +284,7 @@ mounts. If it finds something in the root directory to expire it will
return the name of that thing. Once a name has been returned the
automount daemon needs to unmount any filesystems mounted below the
name normally. As described above, this is unsafe for non-toplevel
-mounts in a version-5 autofs. For this reason the current `automountd`
+mounts in a version-5 autofs. For this reason the current `automount(8)`
does not use this ioctl.
The second mechanism uses either the **AUTOFS_DEV_IOCTL_EXPIRE_CMD** or
@@ -345,7 +356,7 @@ The `wait_queue_token` is a unique number which can identify a
particular request to be acknowledged. When a message is sent over
the pipe the affected dentry is marked as either "active" or
"expiring" and other accesses to it block until the message is
-acknowledged using one of the ioctls below and the relevant
+acknowledged using one of the ioctls below with the relevant
`wait_queue_token`.
Communicating with autofs: root directory ioctls
@@ -367,15 +378,14 @@ The available ioctl commands are:
This mode is also entered if a write to the pipe fails.
- **AUTOFS_IOC_PROTOVER**: This returns the protocol version in use.
- **AUTOFS_IOC_PROTOSUBVER**: Returns the protocol sub-version which
- is really a version number for the implementation. It is
- currently 2.
+ is really a version number for the implementation.
- **AUTOFS_IOC_SETTIMEOUT**: This passes a pointer to an unsigned
long. The value is used to set the timeout for expiry, and
the current timeout value is stored back through the pointer.
- **AUTOFS_IOC_ASKUMOUNT**: Returns, in the pointed-to `int`, 1 if
the filesystem could be unmounted. This is only a hint as
the situation could change at any instant. This call can be
- use to avoid a more expensive full unmount attempt.
+ used to avoid a more expensive full unmount attempt.
- **AUTOFS_IOC_EXPIRE**: as described above, this asks if there is
anything suitable to expire. A pointer to a packet:
@@ -400,6 +410,11 @@ The available ioctl commands are:
**AUTOFS_EXP_IMMEDIATE** causes `last_used` time to be ignored
and objects are expired if the are not in use.
+ **AUTOFS_EXP_FORCED** causes the in use status to be ignored
+ and objects are expired ieven if they are in use. This assumes
+ that the daemon has requested this because it is capable of
+ performing the umount.
+
**AUTOFS_EXP_LEAVES** will select a leaf rather than a top-level
name to expire. This is only safe when *maxproto* is 4.
@@ -415,7 +430,7 @@ which can be used to communicate directly with the autofs filesystem.
It requires CAP_SYS_ADMIN for access.
The `ioctl`s that can be used on this device are described in a separate
-document `autofs-mount-control.txt`, and are summarized briefly here.
+document `autofs-mount-control.txt`, and are summarised briefly here.
Each ioctl is passed a pointer to an `autofs_dev_ioctl` structure:
struct autofs_dev_ioctl {
@@ -511,6 +526,21 @@ directories.
Catatonic mode can only be left via the
**AUTOFS_DEV_IOCTL_OPENMOUNT_CMD** ioctl on the `/dev/autofs`.
+The "ignore" mount option
+-------------------------
+
+The "ignore" mount option can be used to provide a generic indicator
+to applications that the mount entry should be ignored when displaying
+mount information.
+
+In other OSes that provide autofs and that provide a mount list to user
+space based on the kernel mount list a no-op mount option ("ignore" is
+the one use on the most common OSes) is allowed so that autofs file
+system users can optionally use it.
+
+This is intended to be used by user space programs to exclude autofs
+mounts from consideration when reading the mounts list.
+
autofs, name spaces, and shared mounts
--------------------------------------
diff --git a/Documentation/filesystems/debugfs.txt b/Documentation/filesystems/debugfs.txt
index 4f45f71149cb..4a0a9c3f4af6 100644
--- a/Documentation/filesystems/debugfs.txt
+++ b/Documentation/filesystems/debugfs.txt
@@ -31,10 +31,10 @@ This call, if successful, will make a directory called name underneath the
indicated parent directory. If parent is NULL, the directory will be
created in the debugfs root. On success, the return value is a struct
dentry pointer which can be used to create files in the directory (and to
-clean it up at the end). A NULL return value indicates that something went
-wrong. If ERR_PTR(-ENODEV) is returned, that is an indication that the
-kernel has been built without debugfs support and none of the functions
-described below will work.
+clean it up at the end). An ERR_PTR(-ERROR) return value indicates that
+something went wrong. If ERR_PTR(-ENODEV) is returned, that is an
+indication that the kernel has been built without debugfs support and none
+of the functions described below will work.
The most general way to create a file within a debugfs directory is with:
@@ -48,8 +48,9 @@ should hold the file, data will be stored in the i_private field of the
resulting inode structure, and fops is a set of file operations which
implement the file's behavior. At a minimum, the read() and/or write()
operations should be provided; others can be included as needed. Again,
-the return value will be a dentry pointer to the created file, NULL for
-error, or ERR_PTR(-ENODEV) if debugfs support is missing.
+the return value will be a dentry pointer to the created file,
+ERR_PTR(-ERROR) on error, or ERR_PTR(-ENODEV) if debugfs support is
+missing.
Create a file with an initial size, the following function can be used
instead:
@@ -214,7 +215,8 @@ can be removed with:
void debugfs_remove(struct dentry *dentry);
-The dentry value can be NULL, in which case nothing will be removed.
+The dentry value can be NULL or an error value, in which case nothing will
+be removed.
Once upon a time, debugfs users were required to remember the dentry
pointer for every debugfs file they created so that all files could be
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index cf43bc4dbf31..3bd1148d8bb6 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -638,3 +638,38 @@ in your dentry operations instead.
inode to d_splice_alias() will also do the right thing (equivalent of
d_add(dentry, NULL); return NULL;), so that kind of special cases
also doesn't need a separate treatment.
+--
+[strongly recommended]
+ take the RCU-delayed parts of ->destroy_inode() into a new method -
+ ->free_inode(). If ->destroy_inode() becomes empty - all the better,
+ just get rid of it. Synchronous work (e.g. the stuff that can't
+ be done from an RCU callback, or any WARN_ON() where we want the
+ stack trace) *might* be movable to ->evict_inode(); however,
+ that goes only for the things that are not needed to balance something
+ done by ->alloc_inode(). IOW, if it's cleaning up the stuff that
+ might have accumulated over the life of in-core inode, ->evict_inode()
+ might be a fit.
+
+ Rules for inode destruction:
+ * if ->destroy_inode() is non-NULL, it gets called
+ * if ->free_inode() is non-NULL, it gets scheduled by call_rcu()
+ * combination of NULL ->destroy_inode and NULL ->free_inode is
+ treated as NULL/free_inode_nonrcu, to preserve the compatibility.
+
+ Note that the callback (be it via ->free_inode() or explicit call_rcu()
+ in ->destroy_inode()) is *NOT* ordered wrt superblock destruction;
+ as the matter of fact, the superblock and all associated structures
+ might be already gone. The filesystem driver is guaranteed to be still
+ there, but that's it. Freeing memory in the callback is fine; doing
+ more than that is possible, but requires a lot of care and is best
+ avoided.
+--
+[mandatory]
+ DCACHE_RCUACCESS is gone; having an RCU delay on dentry freeing is the
+ default. DCACHE_NORCU opts out, and only d_alloc_pseudo() has any
+ business doing so.
+--
+[mandatory]
+ d_alloc_pseudo() is internal-only; uses outside of alloc_file_pseudo() are
+ very suspect (and won't work in modules). Such uses are very likely to
+ be misspelled d_alloc_anon().
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 761c6fd24a53..57fc576b1f3e 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -3,8 +3,6 @@
Original author: Richard Gooch <rgooch@atnf.csiro.au>
- Last updated on June 24, 2007.
-
Copyright (C) 1999 Richard Gooch
Copyright (C) 2005 Pekka Enberg
@@ -465,6 +463,12 @@ otherwise noted.
argument. If request can't be handled without leaving RCU mode,
have it return ERR_PTR(-ECHILD).
+ If the filesystem stores the symlink target in ->i_link, the
+ VFS may use it directly without calling ->get_link(); however,
+ ->get_link() must still be provided. ->i_link must not be
+ freed until after an RCU grace period. Writing to ->i_link
+ post-iget() time requires a 'release' memory barrier.
+
readlink: this is now just an override for use by readlink(2) for the
cases when ->get_link uses nd_jump_link() or object is not in
fact a symlink. Normally filesystems should only implement
diff --git a/Documentation/acpi/DSD-properties-rules.txt b/Documentation/firmware-guide/acpi/DSD-properties-rules.rst
index 3e4862bdad98..4306f29b6103 100644
--- a/Documentation/acpi/DSD-properties-rules.txt
+++ b/Documentation/firmware-guide/acpi/DSD-properties-rules.rst
@@ -1,8 +1,11 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================================
_DSD Device Properties Usage Rules
-----------------------------------
+==================================
Properties, Property Sets and Property Subsets
-----------------------------------------------
+==============================================
The _DSD (Device Specific Data) configuration object, introduced in ACPI 5.1,
allows any type of device configuration data to be provided via the ACPI
@@ -18,7 +21,7 @@ specific type) associated with it.
In the ACPI _DSD context it is an element of the sub-package following the
generic Device Properties UUID in the _DSD return package as specified in the
-Device Properties UUID definition document [1].
+Device Properties UUID definition document [1]_.
It also may be regarded as the definition of a key and the associated data type
that can be returned by _DSD in the Device Properties UUID sub-package for a
@@ -33,14 +36,14 @@ Property subsets are nested collections of properties. Each of them is
associated with an additional key (name) allowing the subset to be referred
to as a whole (and to be treated as a separate entity). The canonical
representation of property subsets is via the mechanism specified in the
-Hierarchical Properties Extension UUID definition document [2].
+Hierarchical Properties Extension UUID definition document [2]_.
Property sets may be hierarchical. That is, a property set may contain
multiple property subsets that each may contain property subsets of its
own and so on.
General Validity Rule for Property Sets
----------------------------------------
+=======================================
Valid property sets must follow the guidance given by the Device Properties UUID
definition document [1].
@@ -73,7 +76,7 @@ suitable for the ACPI environment and consequently they cannot belong to a valid
property set.
Property Sets and Device Tree Bindings
---------------------------------------
+======================================
It often is useful to make _DSD return property sets that follow Device Tree
bindings.
@@ -91,7 +94,7 @@ expected to automatically work in the ACPI environment regardless of their
contents.
References
-----------
+==========
-[1] http://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf
-[2] http://www.uefi.org/sites/default/files/resources/_DSD-hierarchical-data-extension-UUID-v1.1.pdf
+.. [1] http://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf
+.. [2] http://www.uefi.org/sites/default/files/resources/_DSD-hierarchical-data-extension-UUID-v1.1.pdf
diff --git a/Documentation/acpi/acpi-lid.txt b/Documentation/firmware-guide/acpi/acpi-lid.rst
index effe7af3a5af..874ce0ed340d 100644
--- a/Documentation/acpi/acpi-lid.txt
+++ b/Documentation/firmware-guide/acpi/acpi-lid.rst
@@ -1,13 +1,18 @@
-Special Usage Model of the ACPI Control Method Lid Device
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
-Copyright (C) 2016, Intel Corporation
-Author: Lv Zheng <lv.zheng@intel.com>
+=========================================================
+Special Usage Model of the ACPI Control Method Lid Device
+=========================================================
+:Copyright: |copy| 2016, Intel Corporation
-Abstract:
+:Author: Lv Zheng <lv.zheng@intel.com>
-Platforms containing lids convey lid state (open/close) to OSPMs using a
-control method lid device. To implement this, the AML tables issue
+Abstract
+========
+Platforms containing lids convey lid state (open/close) to OSPMs
+using a control method lid device. To implement this, the AML tables issue
Notify(lid_device, 0x80) to notify the OSPMs whenever the lid state has
changed. The _LID control method for the lid device must be implemented to
report the "current" state of the lid as either "opened" or "closed".
@@ -19,7 +24,8 @@ taken into account. This document describes the restrictions and the
expections of the Linux ACPI lid device driver.
-1. Restrictions of the returning value of the _LID control method
+Restrictions of the returning value of the _LID control method
+==============================================================
The _LID control method is described to return the "current" lid state.
However the word of "current" has ambiguity, some buggy AML tables return
@@ -30,7 +36,8 @@ initial returning value. When the AML tables implement this control method
with cached value, the initial returning value is likely not reliable.
There are platforms always retun "closed" as initial lid state.
-2. Restrictions of the lid state change notifications
+Restrictions of the lid state change notifications
+==================================================
There are buggy AML tables never notifying when the lid device state is
changed to "opened". Thus the "opened" notification is not guaranteed. But
@@ -39,18 +46,22 @@ state is changed to "closed". The "closed" notification is normally used to
trigger some system power saving operations on Windows. Since it is fully
tested, it is reliable from all AML tables.
-3. Expections for the userspace users of the ACPI lid device driver
+Expections for the userspace users of the ACPI lid device driver
+================================================================
The ACPI button driver exports the lid state to the userspace via the
-following file:
+following file::
+
/proc/acpi/button/lid/LID0/state
+
This file actually calls the _LID control method described above. And given
the previous explanation, it is not reliable enough on some platforms. So
it is advised for the userspace program to not to solely rely on this file
to determine the actual lid state.
The ACPI button driver emits the following input event to the userspace:
- SW_LID
+ * SW_LID
+
The ACPI lid device driver is implemented to try to deliver the platform
triggered events to the userspace. However, given the fact that the buggy
firmware cannot make sure "opened"/"closed" events are paired, the ACPI
@@ -59,20 +70,25 @@ button driver uses the following 3 modes in order not to trigger issues.
If the userspace hasn't been prepared to ignore the unreliable "opened"
events and the unreliable initial state notification, Linux users can use
the following kernel parameters to handle the possible issues:
+
A. button.lid_init_state=method:
When this option is specified, the ACPI button driver reports the
initial lid state using the returning value of the _LID control method
and whether the "opened"/"closed" events are paired fully relies on the
firmware implementation.
+
This option can be used to fix some platforms where the returning value
of the _LID control method is reliable but the initial lid state
notification is missing.
+
This option is the default behavior during the period the userspace
isn't ready to handle the buggy AML tables.
+
B. button.lid_init_state=open:
When this option is specified, the ACPI button driver always reports the
initial lid state as "opened" and whether the "opened"/"closed" events
are paired fully relies on the firmware implementation.
+
This may fix some platforms where the returning value of the _LID
control method is not reliable and the initial lid state notification is
missing.
@@ -80,6 +96,7 @@ B. button.lid_init_state=open:
If the userspace has been prepared to ignore the unreliable "opened" events
and the unreliable initial state notification, Linux users should always
use the following kernel parameter:
+
C. button.lid_init_state=ignore:
When this option is specified, the ACPI button driver never reports the
initial lid state and there is a compensation mechanism implemented to
@@ -89,6 +106,7 @@ C. button.lid_init_state=ignore:
notifications can be delivered to the userspace when the lid is actually
opens given that some AML tables do not send "opened" notifications
reliably.
+
In this mode, if everything is correctly implemented by the platform
firmware, the old userspace programs should still work. Otherwise, the
new userspace programs are required to work with the ACPI button driver.
diff --git a/Documentation/firmware-guide/acpi/aml-debugger.rst b/Documentation/firmware-guide/acpi/aml-debugger.rst
new file mode 100644
index 000000000000..a889d43bc6c5
--- /dev/null
+++ b/Documentation/firmware-guide/acpi/aml-debugger.rst
@@ -0,0 +1,75 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+================
+The AML Debugger
+================
+
+:Copyright: |copy| 2016, Intel Corporation
+:Author: Lv Zheng <lv.zheng@intel.com>
+
+
+This document describes the usage of the AML debugger embedded in the Linux
+kernel.
+
+1. Build the debugger
+=====================
+
+The following kernel configuration items are required to enable the AML
+debugger interface from the Linux kernel::
+
+ CONFIG_ACPI_DEBUGGER=y
+ CONFIG_ACPI_DEBUGGER_USER=m
+
+The userspace utilities can be built from the kernel source tree using
+the following commands::
+
+ $ cd tools
+ $ make acpi
+
+The resultant userspace tool binary is then located at::
+
+ tools/power/acpi/acpidbg
+
+It can be installed to system directories by running "make install" (as a
+sufficiently privileged user).
+
+2. Start the userspace debugger interface
+=========================================
+
+After booting the kernel with the debugger built-in, the debugger can be
+started by using the following commands::
+
+ # mount -t debugfs none /sys/kernel/debug
+ # modprobe acpi_dbg
+ # tools/power/acpi/acpidbg
+
+That spawns the interactive AML debugger environment where you can execute
+debugger commands.
+
+The commands are documented in the "ACPICA Overview and Programmer Reference"
+that can be downloaded from
+
+https://acpica.org/documentation
+
+The detailed debugger commands reference is located in Chapter 12 "ACPICA
+Debugger Reference". The "help" command can be used for a quick reference.
+
+3. Stop the userspace debugger interface
+========================================
+
+The interactive debugger interface can be closed by pressing Ctrl+C or using
+the "quit" or "exit" commands. When finished, unload the module with::
+
+ # rmmod acpi_dbg
+
+The module unloading may fail if there is an acpidbg instance running.
+
+4. Run the debugger in a script
+===============================
+
+It may be useful to run the AML debugger in a test script. "acpidbg" supports
+this in a special "batch" mode. For example, the following command outputs
+the entire ACPI namespace::
+
+ # acpidbg -b "namespace"
diff --git a/Documentation/acpi/apei/einj.txt b/Documentation/firmware-guide/acpi/apei/einj.rst
index e550c8b98139..e588bccf5158 100644
--- a/Documentation/acpi/apei/einj.txt
+++ b/Documentation/firmware-guide/acpi/apei/einj.rst
@@ -1,13 +1,16 @@
- APEI Error INJection
- ~~~~~~~~~~~~~~~~~~~~
+.. SPDX-License-Identifier: GPL-2.0
+
+====================
+APEI Error INJection
+====================
EINJ provides a hardware error injection mechanism. It is very useful
for debugging and testing APEI and RAS features in general.
You need to check whether your BIOS supports EINJ first. For that, look
-for early boot messages similar to this one:
+for early boot messages similar to this one::
-ACPI: EINJ 0x000000007370A000 000150 (v01 INTEL 00000001 INTL 00000001)
+ ACPI: EINJ 0x000000007370A000 000150 (v01 INTEL 00000001 INTL 00000001)
which shows that the BIOS is exposing an EINJ table - it is the
mechanism through which the injection is done.
@@ -23,11 +26,11 @@ order to see the APEI,EINJ,... functionality supported and exposed by
the BIOS menu.
To use EINJ, make sure the following are options enabled in your kernel
-configuration:
+configuration::
-CONFIG_DEBUG_FS
-CONFIG_ACPI_APEI
-CONFIG_ACPI_APEI_EINJ
+ CONFIG_DEBUG_FS
+ CONFIG_ACPI_APEI
+ CONFIG_ACPI_APEI_EINJ
The EINJ user interface is in <debugfs mount point>/apei/einj.
@@ -37,20 +40,22 @@ The following files belong to it:
This file shows which error types are supported:
+ ================ ===================================
Error Type Value Error Description
- ================ =================
- 0x00000001 Processor Correctable
- 0x00000002 Processor Uncorrectable non-fatal
- 0x00000004 Processor Uncorrectable fatal
- 0x00000008 Memory Correctable
- 0x00000010 Memory Uncorrectable non-fatal
- 0x00000020 Memory Uncorrectable fatal
- 0x00000040 PCI Express Correctable
- 0x00000080 PCI Express Uncorrectable fatal
- 0x00000100 PCI Express Uncorrectable non-fatal
- 0x00000200 Platform Correctable
- 0x00000400 Platform Uncorrectable non-fatal
- 0x00000800 Platform Uncorrectable fatal
+ ================ ===================================
+ 0x00000001 Processor Correctable
+ 0x00000002 Processor Uncorrectable non-fatal
+ 0x00000004 Processor Uncorrectable fatal
+ 0x00000008 Memory Correctable
+ 0x00000010 Memory Uncorrectable non-fatal
+ 0x00000020 Memory Uncorrectable fatal
+ 0x00000040 PCI Express Correctable
+ 0x00000080 PCI Express Uncorrectable fatal
+ 0x00000100 PCI Express Uncorrectable non-fatal
+ 0x00000200 Platform Correctable
+ 0x00000400 Platform Uncorrectable non-fatal
+ 0x00000800 Platform Uncorrectable fatal
+ ================ ===================================
The format of the file contents are as above, except present are only
the available error types.
@@ -73,9 +78,12 @@ The following files belong to it:
injection. Value is a bitmask as specified in ACPI5.0 spec for the
SET_ERROR_TYPE_WITH_ADDRESS data structure:
- Bit 0 - Processor APIC field valid (see param3 below).
- Bit 1 - Memory address and mask valid (param1 and param2).
- Bit 2 - PCIe (seg,bus,dev,fn) valid (see param4 below).
+ Bit 0
+ Processor APIC field valid (see param3 below).
+ Bit 1
+ Memory address and mask valid (param1 and param2).
+ Bit 2
+ PCIe (seg,bus,dev,fn) valid (see param4 below).
If set to zero, legacy behavior is mimicked where the type of
injection specifies just one bit set, and param1 is multiplexed.
@@ -121,7 +129,7 @@ BIOS versions based on the ACPI 5.0 specification have more control over
the target of the injection. For processor-related errors (type 0x1, 0x2
and 0x4), you can set flags to 0x3 (param3 for bit 0, and param1 and
param2 for bit 1) so that you have more information added to the error
-signature being injected. The actual data passed is this:
+signature being injected. The actual data passed is this::
memory_address = param1;
memory_address_range = param2;
@@ -131,7 +139,7 @@ signature being injected. The actual data passed is this:
For memory errors (type 0x8, 0x10 and 0x20) the address is set using
param1 with a mask in param2 (0x0 is equivalent to all ones). For PCI
express errors (type 0x40, 0x80 and 0x100) the segment, bus, device and
-function are specified using param1:
+function are specified using param1::
31 24 23 16 15 11 10 8 7 0
+-------------------------------------------------+
@@ -152,26 +160,26 @@ documentation for details (and expect changes to this API if vendors
creativity in using this feature expands beyond our expectations).
-An error injection example:
+An error injection example::
-# cd /sys/kernel/debug/apei/einj
-# cat available_error_type # See which errors can be injected
-0x00000002 Processor Uncorrectable non-fatal
-0x00000008 Memory Correctable
-0x00000010 Memory Uncorrectable non-fatal
-# echo 0x12345000 > param1 # Set memory address for injection
-# echo $((-1 << 12)) > param2 # Mask 0xfffffffffffff000 - anywhere in this page
-# echo 0x8 > error_type # Choose correctable memory error
-# echo 1 > error_inject # Inject now
+ # cd /sys/kernel/debug/apei/einj
+ # cat available_error_type # See which errors can be injected
+ 0x00000002 Processor Uncorrectable non-fatal
+ 0x00000008 Memory Correctable
+ 0x00000010 Memory Uncorrectable non-fatal
+ # echo 0x12345000 > param1 # Set memory address for injection
+ # echo $((-1 << 12)) > param2 # Mask 0xfffffffffffff000 - anywhere in this page
+ # echo 0x8 > error_type # Choose correctable memory error
+ # echo 1 > error_inject # Inject now
-You should see something like this in dmesg:
+You should see something like this in dmesg::
-[22715.830801] EDAC sbridge MC3: HANDLING MCE MEMORY ERROR
-[22715.834759] EDAC sbridge MC3: CPU 0: Machine Check Event: 0 Bank 7: 8c00004000010090
-[22715.834759] EDAC sbridge MC3: TSC 0
-[22715.834759] EDAC sbridge MC3: ADDR 12345000 EDAC sbridge MC3: MISC 144780c86
-[22715.834759] EDAC sbridge MC3: PROCESSOR 0:306e7 TIME 1422553404 SOCKET 0 APIC 0
-[22716.616173] EDAC MC3: 1 CE memory read error on CPU_SrcID#0_Channel#0_DIMM#0 (channel:0 slot:0 page:0x12345 offset:0x0 grain:32 syndrome:0x0 - area:DRAM err_code:0001:0090 socket:0 channel_mask:1 rank:0)
+ [22715.830801] EDAC sbridge MC3: HANDLING MCE MEMORY ERROR
+ [22715.834759] EDAC sbridge MC3: CPU 0: Machine Check Event: 0 Bank 7: 8c00004000010090
+ [22715.834759] EDAC sbridge MC3: TSC 0
+ [22715.834759] EDAC sbridge MC3: ADDR 12345000 EDAC sbridge MC3: MISC 144780c86
+ [22715.834759] EDAC sbridge MC3: PROCESSOR 0:306e7 TIME 1422553404 SOCKET 0 APIC 0
+ [22716.616173] EDAC MC3: 1 CE memory read error on CPU_SrcID#0_Channel#0_DIMM#0 (channel:0 slot:0 page:0x12345 offset:0x0 grain:32 syndrome:0x0 - area:DRAM err_code:0001:0090 socket:0 channel_mask:1 rank:0)
For more information about EINJ, please refer to ACPI specification
version 4.0, section 17.5 and ACPI 5.0, section 18.6.
diff --git a/Documentation/firmware-guide/acpi/apei/output_format.rst b/Documentation/firmware-guide/acpi/apei/output_format.rst
new file mode 100644
index 000000000000..c2e7ebddb529
--- /dev/null
+++ b/Documentation/firmware-guide/acpi/apei/output_format.rst
@@ -0,0 +1,150 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================
+APEI output format
+==================
+
+APEI uses printk as hardware error reporting interface, the output
+format is as follow::
+
+ <error record> :=
+ APEI generic hardware error status
+ severity: <integer>, <severity string>
+ section: <integer>, severity: <integer>, <severity string>
+ flags: <integer>
+ <section flags strings>
+ fru_id: <uuid string>
+ fru_text: <string>
+ section_type: <section type string>
+ <section data>
+
+ <severity string>* := recoverable | fatal | corrected | info
+
+ <section flags strings># :=
+ [primary][, containment warning][, reset][, threshold exceeded]\
+ [, resource not accessible][, latent error]
+
+ <section type string> := generic processor error | memory error | \
+ PCIe error | unknown, <uuid string>
+
+ <section data> :=
+ <generic processor section data> | <memory section data> | \
+ <pcie section data> | <null>
+
+ <generic processor section data> :=
+ [processor_type: <integer>, <proc type string>]
+ [processor_isa: <integer>, <proc isa string>]
+ [error_type: <integer>
+ <proc error type strings>]
+ [operation: <integer>, <proc operation string>]
+ [flags: <integer>
+ <proc flags strings>]
+ [level: <integer>]
+ [version_info: <integer>]
+ [processor_id: <integer>]
+ [target_address: <integer>]
+ [requestor_id: <integer>]
+ [responder_id: <integer>]
+ [IP: <integer>]
+
+ <proc type string>* := IA32/X64 | IA64
+
+ <proc isa string>* := IA32 | IA64 | X64
+
+ <processor error type strings># :=
+ [cache error][, TLB error][, bus error][, micro-architectural error]
+
+ <proc operation string>* := unknown or generic | data read | data write | \
+ instruction execution
+
+ <proc flags strings># :=
+ [restartable][, precise IP][, overflow][, corrected]
+
+ <memory section data> :=
+ [error_status: <integer>]
+ [physical_address: <integer>]
+ [physical_address_mask: <integer>]
+ [node: <integer>]
+ [card: <integer>]
+ [module: <integer>]
+ [bank: <integer>]
+ [device: <integer>]
+ [row: <integer>]
+ [column: <integer>]
+ [bit_position: <integer>]
+ [requestor_id: <integer>]
+ [responder_id: <integer>]
+ [target_id: <integer>]
+ [error_type: <integer>, <mem error type string>]
+
+ <mem error type string>* :=
+ unknown | no error | single-bit ECC | multi-bit ECC | \
+ single-symbol chipkill ECC | multi-symbol chipkill ECC | master abort | \
+ target abort | parity error | watchdog timeout | invalid address | \
+ mirror Broken | memory sparing | scrub corrected error | \
+ scrub uncorrected error
+
+ <pcie section data> :=
+ [port_type: <integer>, <pcie port type string>]
+ [version: <integer>.<integer>]
+ [command: <integer>, status: <integer>]
+ [device_id: <integer>:<integer>:<integer>.<integer>
+ slot: <integer>
+ secondary_bus: <integer>
+ vendor_id: <integer>, device_id: <integer>
+ class_code: <integer>]
+ [serial number: <integer>, <integer>]
+ [bridge: secondary_status: <integer>, control: <integer>]
+ [aer_status: <integer>, aer_mask: <integer>
+ <aer status string>
+ [aer_uncor_severity: <integer>]
+ aer_layer=<aer layer string>, aer_agent=<aer agent string>
+ aer_tlp_header: <integer> <integer> <integer> <integer>]
+
+ <pcie port type string>* := PCIe end point | legacy PCI end point | \
+ unknown | unknown | root port | upstream switch port | \
+ downstream switch port | PCIe to PCI/PCI-X bridge | \
+ PCI/PCI-X to PCIe bridge | root complex integrated endpoint device | \
+ root complex event collector
+
+ if section severity is fatal or recoverable
+ <aer status string># :=
+ unknown | unknown | unknown | unknown | Data Link Protocol | \
+ unknown | unknown | unknown | unknown | unknown | unknown | unknown | \
+ Poisoned TLP | Flow Control Protocol | Completion Timeout | \
+ Completer Abort | Unexpected Completion | Receiver Overflow | \
+ Malformed TLP | ECRC | Unsupported Request
+ else
+ <aer status string># :=
+ Receiver Error | unknown | unknown | unknown | unknown | unknown | \
+ Bad TLP | Bad DLLP | RELAY_NUM Rollover | unknown | unknown | unknown | \
+ Replay Timer Timeout | Advisory Non-Fatal
+ fi
+
+ <aer layer string> :=
+ Physical Layer | Data Link Layer | Transaction Layer
+
+ <aer agent string> :=
+ Receiver ID | Requester ID | Completer ID | Transmitter ID
+
+Where, [] designate corresponding content is optional
+
+All <field string> description with * has the following format::
+
+ field: <integer>, <field string>
+
+Where value of <integer> should be the position of "string" in <field
+string> description. Otherwise, <field string> will be "unknown".
+
+All <field strings> description with # has the following format::
+
+ field: <integer>
+ <field strings>
+
+Where each string in <fields strings> corresponding to one set bit of
+<integer>. The bit position is the position of "string" in <field
+strings> description.
+
+For more detailed explanation of every field, please refer to UEFI
+specification version 2.3 or later, section Appendix N: Common
+Platform Error Record.
diff --git a/Documentation/acpi/debug.txt b/Documentation/firmware-guide/acpi/debug.rst
index 65bf47c46b6d..1a152dd1d765 100644
--- a/Documentation/acpi/debug.txt
+++ b/Documentation/firmware-guide/acpi/debug.rst
@@ -1,18 +1,21 @@
- ACPI Debug Output
+.. SPDX-License-Identifier: GPL-2.0
+=================
+ACPI Debug Output
+=================
The ACPI CA, the Linux ACPI core, and some ACPI drivers can generate debug
output. This document describes how to use this facility.
Compile-time configuration
---------------------------
+==========================
ACPI debug output is globally enabled by CONFIG_ACPI_DEBUG. If this config
option is turned off, the debug messages are not even built into the
kernel.
Boot- and run-time configuration
---------------------------------
+================================
When CONFIG_ACPI_DEBUG=y, you can select the component and level of messages
you're interested in. At boot-time, use the acpi.debug_layer and
@@ -21,7 +24,7 @@ debug_layer and debug_level files in /sys/module/acpi/parameters/ to control
the debug messages.
debug_layer (component)
------------------------
+=======================
The "debug_layer" is a mask that selects components of interest, e.g., a
specific driver or part of the ACPI interpreter. To build the debug_layer
@@ -33,7 +36,7 @@ to /sys/module/acpi/parameters/debug_layer.
The possible components are defined in include/acpi/acoutput.h and
include/acpi/acpi_drivers.h. Reading /sys/module/acpi/parameters/debug_layer
-shows the supported mask values, currently these:
+shows the supported mask values, currently these::
ACPI_UTILITIES 0x00000001
ACPI_HARDWARE 0x00000002
@@ -65,7 +68,7 @@ shows the supported mask values, currently these:
ACPI_PROCESSOR_COMPONENT 0x20000000
debug_level
------------
+===========
The "debug_level" is a mask that selects different types of messages, e.g.,
those related to initialization, method execution, informational messages, etc.
@@ -81,7 +84,7 @@ to /sys/module/acpi/parameters/debug_level.
The possible levels are defined in include/acpi/acoutput.h. Reading
/sys/module/acpi/parameters/debug_level shows the supported mask values,
-currently these:
+currently these::
ACPI_LV_INIT 0x00000001
ACPI_LV_DEBUG_OBJECT 0x00000002
@@ -113,9 +116,9 @@ currently these:
ACPI_LV_EVENTS 0x80000000
Examples
---------
+========
-For example, drivers/acpi/bus.c contains this:
+For example, drivers/acpi/bus.c contains this::
#define _COMPONENT ACPI_BUS_COMPONENT
...
@@ -127,22 +130,22 @@ statement uses ACPI_DB_INFO, which is macro based on the ACPI_LV_INFO
definition.)
Enable all AML "Debug" output (stores to the Debug object while interpreting
-AML) during boot:
+AML) during boot::
acpi.debug_layer=0xffffffff acpi.debug_level=0x2
-Enable PCI and PCI interrupt routing debug messages:
+Enable PCI and PCI interrupt routing debug messages::
acpi.debug_layer=0x400000 acpi.debug_level=0x4
-Enable all ACPI hardware-related messages:
+Enable all ACPI hardware-related messages::
acpi.debug_layer=0x2 acpi.debug_level=0xffffffff
-Enable all ACPI_DB_INFO messages after boot:
+Enable all ACPI_DB_INFO messages after boot::
# echo 0x4 > /sys/module/acpi/parameters/debug_level
-Show all valid component values:
+Show all valid component values::
# cat /sys/module/acpi/parameters/debug_layer
diff --git a/Documentation/acpi/dsd/data-node-references.txt b/Documentation/firmware-guide/acpi/dsd/data-node-references.rst
index c3871565c8cf..febccbc5689d 100644
--- a/Documentation/acpi/dsd/data-node-references.txt
+++ b/Documentation/firmware-guide/acpi/dsd/data-node-references.rst
@@ -1,9 +1,12 @@
-Copyright (C) 2018 Intel Corporation
-Author: Sakari Ailus <sakari.ailus@linux.intel.com>
-
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+===================================
Referencing hierarchical data nodes
------------------------------------
+===================================
+
+:Copyright: |copy| 2018 Intel Corporation
+:Author: Sakari Ailus <sakari.ailus@linux.intel.com>
ACPI in general allows referring to device objects in the tree only.
Hierarchical data extension nodes may not be referred to directly, hence this
@@ -28,21 +31,22 @@ extension key.
Example
--------
+=======
- In the ASL snippet below, the "reference" _DSD property [2] contains a
- device object reference to DEV0 and under that device object, a
- hierarchical data extension key "node@1" referring to the NOD1 object
- and lastly, a hierarchical data extension key "anothernode" referring to
- the ANOD object which is also the final target node of the reference.
+In the ASL snippet below, the "reference" _DSD property [2] contains a
+device object reference to DEV0 and under that device object, a
+hierarchical data extension key "node@1" referring to the NOD1 object
+and lastly, a hierarchical data extension key "anothernode" referring to
+the ANOD object which is also the final target node of the reference.
+::
Device (DEV0)
{
Name (_DSD, Package () {
ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
Package () {
- Package () { "node@0", NOD0 },
- Package () { "node@1", NOD1 },
+ Package () { "node@0", "NOD0" },
+ Package () { "node@1", "NOD1" },
}
})
Name (NOD0, Package() {
@@ -54,7 +58,7 @@ Example
Name (NOD1, Package() {
ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
Package () {
- Package () { "anothernode", ANOD },
+ Package () { "anothernode", "ANOD" },
}
})
Name (ANOD, Package() {
@@ -75,15 +79,15 @@ Example
})
}
-Please also see a graph example in graph.txt .
+Please also see a graph example in :doc:`graph`.
References
-----------
+==========
[1] Hierarchical Data Extension UUID For _DSD.
- <URL:http://www.uefi.org/sites/default/files/resources/_DSD-hierarchical-data-extension-UUID-v1.1.pdf>,
- referenced 2018-07-17.
+<http://www.uefi.org/sites/default/files/resources/_DSD-hierarchical-data-extension-UUID-v1.1.pdf>,
+referenced 2018-07-17.
[2] Device Properties UUID For _DSD.
- <URL:http://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf>,
- referenced 2016-10-04.
+<http://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf>,
+referenced 2016-10-04.
diff --git a/Documentation/acpi/dsd/graph.txt b/Documentation/firmware-guide/acpi/dsd/graph.rst
index b9ce910781dc..1a6ce7afba5e 100644
--- a/Documentation/acpi/dsd/graph.txt
+++ b/Documentation/firmware-guide/acpi/dsd/graph.rst
@@ -1,8 +1,11 @@
-Graphs
+.. SPDX-License-Identifier: GPL-2.0
+======
+Graphs
+======
_DSD
-----
+====
_DSD (Device Specific Data) [7] is a predefined ACPI device
configuration object that can be used to convey information on
@@ -30,7 +33,7 @@ hierarchical data extension array on each depth.
Ports and endpoints
--------------------
+===================
The port and endpoint concepts are very similar to those in Devicetree
[3]. A port represents an interface in a device, and an endpoint
@@ -38,20 +41,20 @@ represents a connection to that interface.
All port nodes are located under the device's "_DSD" node in the hierarchical
data extension tree. The data extension related to each port node must begin
-with "port" and must be followed by the "@" character and the number of the port
-as its key. The target object it refers to should be called "PRTX", where "X" is
-the number of the port. An example of such a package would be:
+with "port" and must be followed by the "@" character and the number of the
+port as its key. The target object it refers to should be called "PRTX", where
+"X" is the number of the port. An example of such a package would be::
- Package() { "port@4", PRT4 }
+ Package() { "port@4", "PRT4" }
Further on, endpoints are located under the port nodes. The hierarchical
data extension key of the endpoint nodes must begin with
"endpoint" and must be followed by the "@" character and the number of the
endpoint. The object it refers to should be called "EPXY", where "X" is the
number of the port and "Y" is the number of the endpoint. An example of such a
-package would be:
+package would be::
- Package() { "endpoint@0", EP40 }
+ Package() { "endpoint@0", "EP40" }
Each port node contains a property extension key "port", the value of which is
the number of the port. Each endpoint is similarly numbered with a property
@@ -62,20 +65,20 @@ of that port shall be zero. Similarly, if a port may only have a single
endpoint, the number of that endpoint shall be zero.
The endpoint reference uses property extension with "remote-endpoint" property
-name followed by a reference in the same package. Such references consist of the
+name followed by a reference in the same package. Such references consist of
the remote device reference, the first package entry of the port data extension
reference under the device and finally the first package entry of the endpoint
-data extension reference under the port. Individual references thus appear as:
+data extension reference under the port. Individual references thus appear as::
Package() { device, "port@X", "endpoint@Y" }
-In the above example, "X" is the number of the port and "Y" is the number of the
-endpoint.
+In the above example, "X" is the number of the port and "Y" is the number of
+the endpoint.
The references to endpoints must be always done both ways, to the
remote endpoint and back from the referred remote endpoint node.
-A simple example of this is show below:
+A simple example of this is show below::
Scope (\_SB.PCI0.I2C2)
{
@@ -88,7 +91,7 @@ A simple example of this is show below:
},
ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
Package () {
- Package () { "port@0", PRT0 },
+ Package () { "port@0", "PRT0" },
}
})
Name (PRT0, Package() {
@@ -98,7 +101,7 @@ A simple example of this is show below:
},
ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
Package () {
- Package () { "endpoint@0", EP00 },
+ Package () { "endpoint@0", "EP00" },
}
})
Name (EP00, Package() {
@@ -118,7 +121,7 @@ A simple example of this is show below:
Name (_DSD, Package () {
ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
Package () {
- Package () { "port@4", PRT4 },
+ Package () { "port@4", "PRT4" },
}
})
@@ -129,7 +132,7 @@ A simple example of this is show below:
},
ToUUID("dbb8e3e6-5886-4ba6-8795-1319f52a966b"),
Package () {
- Package () { "endpoint@0", EP40 },
+ Package () { "endpoint@0", "EP40" },
}
})
@@ -148,27 +151,27 @@ the "ISP" device and vice versa.
References
-----------
+==========
[1] _DSD (Device Specific Data) Implementation Guide.
- <URL:http://www.uefi.org/sites/default/files/resources/_DSD-implementation-guide-toplevel-1_1.htm>,
+ http://www.uefi.org/sites/default/files/resources/_DSD-implementation-guide-toplevel-1_1.htm,
referenced 2016-10-03.
-[2] Devicetree. <URL:http://www.devicetree.org>, referenced 2016-10-03.
+[2] Devicetree. http://www.devicetree.org, referenced 2016-10-03.
[3] Documentation/devicetree/bindings/graph.txt
[4] Device Properties UUID For _DSD.
- <URL:http://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf>,
+ http://www.uefi.org/sites/default/files/resources/_DSD-device-properties-UUID.pdf,
referenced 2016-10-04.
[5] Hierarchical Data Extension UUID For _DSD.
- <URL:http://www.uefi.org/sites/default/files/resources/_DSD-hierarchical-data-extension-UUID-v1.1.pdf>,
+ http://www.uefi.org/sites/default/files/resources/_DSD-hierarchical-data-extension-UUID-v1.1.pdf,
referenced 2016-10-04.
[6] Advanced Configuration and Power Interface Specification.
- <URL:http://www.uefi.org/sites/default/files/resources/ACPI_6_1.pdf>,
+ http://www.uefi.org/sites/default/files/resources/ACPI_6_1.pdf,
referenced 2016-10-04.
[7] _DSD Device Properties Usage Rules.
- Documentation/acpi/DSD-properties-rules.txt
+ :doc:`../DSD-properties-rules`
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/firmware-guide/acpi/enumeration.rst
index 7bcf9c3d9fbe..6b32b7be8c85 100644
--- a/Documentation/acpi/enumeration.txt
+++ b/Documentation/firmware-guide/acpi/enumeration.rst
@@ -1,5 +1,9 @@
-ACPI based device enumeration
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. SPDX-License-Identifier: GPL-2.0
+
+=============================
+ACPI Based Device Enumeration
+=============================
+
ACPI 5 introduced a set of new resources (UartTSerialBus, I2cSerialBus,
SpiSerialBus, GpioIo and GpioInt) which can be used in enumerating slave
devices behind serial bus controllers.
@@ -11,12 +15,12 @@ that are accessed through memory-mapped registers.
In order to support this and re-use the existing drivers as much as
possible we decided to do following:
- o Devices that have no bus connector resource are represented as
- platform devices.
+ - Devices that have no bus connector resource are represented as
+ platform devices.
- o Devices behind real busses where there is a connector resource
- are represented as struct spi_device or struct i2c_device
- (standard UARTs are not busses so there is no struct uart_device).
+ - Devices behind real busses where there is a connector resource
+ are represented as struct spi_device or struct i2c_device
+ (standard UARTs are not busses so there is no struct uart_device).
As both ACPI and Device Tree represent a tree of devices (and their
resources) this implementation follows the Device Tree way as much as
@@ -31,7 +35,8 @@ enumerated from ACPI namespace. This handle can be used to extract other
device-specific configuration. There is an example of this below.
Platform bus support
-~~~~~~~~~~~~~~~~~~~~
+====================
+
Since we are using platform devices to represent devices that are not
connected to any physical bus we only need to implement a platform driver
for the device and add supported ACPI IDs. If this same IP-block is used on
@@ -39,7 +44,7 @@ some other non-ACPI platform, the driver might work out of the box or needs
some minor changes.
Adding ACPI support for an existing driver should be pretty
-straightforward. Here is the simplest example:
+straightforward. Here is the simplest example::
#ifdef CONFIG_ACPI
static const struct acpi_device_id mydrv_acpi_match[] = {
@@ -61,12 +66,13 @@ configuring GPIOs it can get its ACPI handle and extract this information
from ACPI tables.
DMA support
-~~~~~~~~~~~
+===========
+
DMA controllers enumerated via ACPI should be registered in the system to
provide generic access to their resources. For example, a driver that would
like to be accessible to slave devices via generic API call
dma_request_slave_channel() must register itself at the end of the probe
-function like this:
+function like this::
err = devm_acpi_dma_controller_register(dev, xlate_func, dw);
/* Handle the error if it's not a case of !CONFIG_ACPI */
@@ -74,7 +80,7 @@ function like this:
and implement custom xlate function if needed (usually acpi_dma_simple_xlate()
is enough) which converts the FixedDMA resource provided by struct
acpi_dma_spec into the corresponding DMA channel. A piece of code for that case
-could look like:
+could look like::
#ifdef CONFIG_ACPI
struct filter_args {
@@ -114,7 +120,7 @@ provided by struct acpi_dma.
Clients must call dma_request_slave_channel() with the string parameter that
corresponds to a specific FixedDMA resource. By default "tx" means the first
entry of the FixedDMA resource array, "rx" means the second entry. The table
-below shows a layout:
+below shows a layout::
Device (I2C0)
{
@@ -138,12 +144,13 @@ acpi_dma_request_slave_chan_by_index() directly and therefore choose the
specific FixedDMA resource by its index.
SPI serial bus support
-~~~~~~~~~~~~~~~~~~~~~~
+======================
+
Slave devices behind SPI bus have SpiSerialBus resource attached to them.
This is extracted automatically by the SPI core and the slave devices are
enumerated once spi_register_master() is called by the bus driver.
-Here is what the ACPI namespace for a SPI slave might look like:
+Here is what the ACPI namespace for a SPI slave might look like::
Device (EEP0)
{
@@ -163,7 +170,7 @@ Here is what the ACPI namespace for a SPI slave might look like:
The SPI device drivers only need to add ACPI IDs in a similar way than with
the platform device drivers. Below is an example where we add ACPI support
-to at25 SPI eeprom driver (this is meant for the above ACPI snippet):
+to at25 SPI eeprom driver (this is meant for the above ACPI snippet)::
#ifdef CONFIG_ACPI
static const struct acpi_device_id at25_acpi_match[] = {
@@ -182,7 +189,7 @@ to at25 SPI eeprom driver (this is meant for the above ACPI snippet):
Note that this driver actually needs more information like page size of the
eeprom etc. but at the time writing this there is no standard way of
-passing those. One idea is to return this in _DSM method like:
+passing those. One idea is to return this in _DSM method like::
Device (EEP0)
{
@@ -202,7 +209,7 @@ passing those. One idea is to return this in _DSM method like:
}
Then the at25 SPI driver can get this configuration by calling _DSM on its
-ACPI handle like:
+ACPI handle like::
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_object_list input;
@@ -220,14 +227,15 @@ ACPI handle like:
kfree(output.pointer);
I2C serial bus support
-~~~~~~~~~~~~~~~~~~~~~~
+======================
+
The slaves behind I2C bus controller only need to add the ACPI IDs like
with the platform and SPI drivers. The I2C core automatically enumerates
any slave devices behind the controller device once the adapter is
registered.
Below is an example of how to add ACPI support to the existing mpu3050
-input driver:
+input driver::
#ifdef CONFIG_ACPI
static const struct acpi_device_id mpu3050_acpi_match[] = {
@@ -251,56 +259,57 @@ input driver:
};
GPIO support
-~~~~~~~~~~~~
+============
+
ACPI 5 introduced two new resources to describe GPIO connections: GpioIo
and GpioInt. These resources can be used to pass GPIO numbers used by
the device to the driver. ACPI 5.1 extended this with _DSD (Device
Specific Data) which made it possible to name the GPIOs among other things.
-For example:
+For example::
-Device (DEV)
-{
- Method (_CRS, 0, NotSerialized)
+ Device (DEV)
{
- Name (SBUF, ResourceTemplate()
+ Method (_CRS, 0, NotSerialized)
{
- ...
- // Used to power on/off the device
- GpioIo (Exclusive, PullDefault, 0x0000, 0x0000,
- IoRestrictionOutputOnly, "\\_SB.PCI0.GPI0",
- 0x00, ResourceConsumer,,)
+ Name (SBUF, ResourceTemplate()
{
- // Pin List
- 0x0055
- }
+ ...
+ // Used to power on/off the device
+ GpioIo (Exclusive, PullDefault, 0x0000, 0x0000,
+ IoRestrictionOutputOnly, "\\_SB.PCI0.GPI0",
+ 0x00, ResourceConsumer,,)
+ {
+ // Pin List
+ 0x0055
+ }
+
+ // Interrupt for the device
+ GpioInt (Edge, ActiveHigh, ExclusiveAndWake, PullNone,
+ 0x0000, "\\_SB.PCI0.GPI0", 0x00, ResourceConsumer,,)
+ {
+ // Pin list
+ 0x0058
+ }
+
+ ...
- // Interrupt for the device
- GpioInt (Edge, ActiveHigh, ExclusiveAndWake, PullNone,
- 0x0000, "\\_SB.PCI0.GPI0", 0x00, ResourceConsumer,,)
- {
- // Pin list
- 0x0058
}
- ...
-
+ Return (SBUF)
}
- Return (SBUF)
- }
-
- // ACPI 5.1 _DSD used for naming the GPIOs
- Name (_DSD, Package ()
- {
- ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
- Package ()
+ // ACPI 5.1 _DSD used for naming the GPIOs
+ Name (_DSD, Package ()
{
- Package () {"power-gpios", Package() {^DEV, 0, 0, 0 }},
- Package () {"irq-gpios", Package() {^DEV, 1, 0, 0 }},
- }
- })
- ...
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package ()
+ {
+ Package () {"power-gpios", Package() {^DEV, 0, 0, 0 }},
+ Package () {"irq-gpios", Package() {^DEV, 1, 0, 0 }},
+ }
+ })
+ ...
These GPIO numbers are controller relative and path "\\_SB.PCI0.GPI0"
specifies the path to the controller. In order to use these GPIOs in Linux
@@ -310,7 +319,7 @@ There is a standard GPIO API for that and is documented in
Documentation/gpio/.
In the above example we can get the corresponding two GPIO descriptors with
-a code like this:
+a code like this::
#include <linux/gpio/consumer.h>
...
@@ -334,21 +343,22 @@ See Documentation/acpi/gpio-properties.txt for more information about the
_DSD binding related to GPIOs.
MFD devices
-~~~~~~~~~~~
+===========
+
The MFD devices register their children as platform devices. For the child
devices there needs to be an ACPI handle that they can use to reference
parts of the ACPI namespace that relate to them. In the Linux MFD subsystem
we provide two ways:
- o The children share the parent ACPI handle.
- o The MFD cell can specify the ACPI id of the device.
+ - The children share the parent ACPI handle.
+ - The MFD cell can specify the ACPI id of the device.
For the first case, the MFD drivers do not need to do anything. The
resulting child platform device will have its ACPI_COMPANION() set to point
to the parent device.
If the ACPI namespace has a device that we can match using an ACPI id or ACPI
-adr, the cell should be set like:
+adr, the cell should be set like::
static struct mfd_cell_acpi_match my_subdevice_cell_acpi_match = {
.pnpid = "XYZ0001",
@@ -366,7 +376,8 @@ the MFD device and if found, that ACPI companion device is bound to the
resulting child platform device.
Device Tree namespace link device ID
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+====================================
+
The Device Tree protocol uses device identification based on the "compatible"
property whose value is a string or an array of strings recognized as device
identifiers by drivers and the driver core. The set of all those strings may be
@@ -410,6 +421,32 @@ Specifically, the device IDs returned by _HID and preceding PRP0001 in the _CID
return package will be checked first. Also in that case the bus type the device
will be enumerated to depends on the device ID returned by _HID.
+For example, the following ACPI sample might be used to enumerate an lm75-type
+I2C temperature sensor and match it to the driver using the Device Tree
+namespace link:
+
+ Device (TMP0)
+ {
+ Name (_HID, "PRP0001")
+ Name (_DSD, Package() {
+ ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
+ Package () {
+ Package (2) { "compatible", "ti,tmp75" },
+ }
+ })
+ Method (_CRS, 0, Serialized)
+ {
+ Name (SBUF, ResourceTemplate ()
+ {
+ I2cSerialBusV2 (0x48, ControllerInitiated,
+ 400000, AddressingMode7Bit,
+ "\\_SB.PCI0.I2C1", 0x00,
+ ResourceConsumer, , Exclusive,)
+ })
+ Return (SBUF)
+ }
+ }
+
It is valid to define device objects with a _HID returning PRP0001 and without
the "compatible" property in the _DSD or a _CID as long as one of their
ancestors provides a _DSD with a valid "compatible" property. Such device
@@ -423,4 +460,4 @@ the _DSD of the device object itself or the _DSD of its ancestor in the
Otherwise, the _DSD itself is regarded as invalid and therefore the "compatible"
property returned by it is meaningless.
-Refer to DSD-properties-rules.txt for more information.
+Refer to :doc:`DSD-properties-rules` for more information.
diff --git a/Documentation/acpi/gpio-properties.txt b/Documentation/firmware-guide/acpi/gpio-properties.rst
index 88c65cb5bf0a..bb6d74f23ee0 100644
--- a/Documentation/acpi/gpio-properties.txt
+++ b/Documentation/firmware-guide/acpi/gpio-properties.rst
@@ -1,5 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+======================================
_DSD Device Properties Related to GPIO
---------------------------------------
+======================================
With the release of ACPI 5.1, the _DSD configuration object finally
allows names to be given to GPIOs (and other things as well) returned
@@ -8,7 +11,7 @@ the corresponding GPIO, which is pretty error prone (it depends on
the _CRS output ordering, for example).
With _DSD we can now query GPIOs using a name instead of an integer
-index, like the ASL example below shows:
+index, like the ASL example below shows::
// Bluetooth device with reset and shutdown GPIOs
Device (BTH)
@@ -34,15 +37,19 @@ index, like the ASL example below shows:
})
}
-The format of the supported GPIO property is:
+The format of the supported GPIO property is::
Package () { "name", Package () { ref, index, pin, active_low }}
- ref - The device that has _CRS containing GpioIo()/GpioInt() resources,
- typically this is the device itself (BTH in our case).
- index - Index of the GpioIo()/GpioInt() resource in _CRS starting from zero.
- pin - Pin in the GpioIo()/GpioInt() resource. Typically this is zero.
- active_low - If 1 the GPIO is marked as active_low.
+ref
+ The device that has _CRS containing GpioIo()/GpioInt() resources,
+ typically this is the device itself (BTH in our case).
+index
+ Index of the GpioIo()/GpioInt() resource in _CRS starting from zero.
+pin
+ Pin in the GpioIo()/GpioInt() resource. Typically this is zero.
+active_low
+ If 1 the GPIO is marked as active_low.
Since ACPI GpioIo() resource does not have a field saying whether it is
active low or high, the "active_low" argument can be used here. Setting
@@ -55,7 +62,7 @@ It is possible to leave holes in the array of GPIOs. This is useful in
cases like with SPI host controllers where some chip selects may be
implemented as GPIOs and some as native signals. For example a SPI host
controller can have chip selects 0 and 2 implemented as GPIOs and 1 as
-native:
+native::
Package () {
"cs-gpios",
@@ -67,7 +74,7 @@ native:
}
Other supported properties
---------------------------
+==========================
Following Device Tree compatible device properties are also supported by
_DSD device properties for GPIO controllers:
@@ -78,7 +85,7 @@ _DSD device properties for GPIO controllers:
- input
- line-name
-Example:
+Example::
Name (_DSD, Package () {
// _DSD Hierarchical Properties Extension UUID
@@ -100,7 +107,7 @@ Example:
- gpio-line-names
-Example:
+Example::
Package () {
"gpio-line-names",
@@ -114,7 +121,7 @@ See Documentation/devicetree/bindings/gpio/gpio.txt for more information
about these properties.
ACPI GPIO Mappings Provided by Drivers
---------------------------------------
+======================================
There are systems in which the ACPI tables do not contain _DSD but provide _CRS
with GpioIo()/GpioInt() resources and device drivers still need to work with
@@ -139,16 +146,16 @@ line in that resource starting from zero, and the active-low flag for that line,
respectively, in analogy with the _DSD GPIO property format specified above.
For the example Bluetooth device discussed previously the data structures in
-question would look like this:
+question would look like this::
-static const struct acpi_gpio_params reset_gpio = { 1, 1, false };
-static const struct acpi_gpio_params shutdown_gpio = { 0, 0, false };
+ static const struct acpi_gpio_params reset_gpio = { 1, 1, false };
+ static const struct acpi_gpio_params shutdown_gpio = { 0, 0, false };
-static const struct acpi_gpio_mapping bluetooth_acpi_gpios[] = {
- { "reset-gpios", &reset_gpio, 1 },
- { "shutdown-gpios", &shutdown_gpio, 1 },
- { },
-};
+ static const struct acpi_gpio_mapping bluetooth_acpi_gpios[] = {
+ { "reset-gpios", &reset_gpio, 1 },
+ { "shutdown-gpios", &shutdown_gpio, 1 },
+ { },
+ };
Next, the mapping table needs to be passed as the second argument to
acpi_dev_add_driver_gpios() that will register it with the ACPI device object
@@ -158,12 +165,12 @@ calling acpi_dev_remove_driver_gpios() on the ACPI device object where that
table was previously registered.
Using the _CRS fallback
------------------------
+=======================
If a device does not have _DSD or the driver does not create ACPI GPIO
mapping, the Linux GPIO framework refuses to return any GPIOs. This is
because the driver does not know what it actually gets. For example if we
-have a device like below:
+have a device like below::
Device (BTH)
{
@@ -177,7 +184,7 @@ have a device like below:
})
}
-The driver might expect to get the right GPIO when it does:
+The driver might expect to get the right GPIO when it does::
desc = gpiod_get(dev, "reset", GPIOD_OUT_LOW);
@@ -193,22 +200,25 @@ the ACPI GPIO mapping tables are hardly linked to ACPI ID and certain
objects, as listed in the above chapter, of the device in question.
Getting GPIO descriptor
------------------------
+=======================
+
+There are two main approaches to get GPIO resource from ACPI::
-There are two main approaches to get GPIO resource from ACPI:
- desc = gpiod_get(dev, connection_id, flags);
- desc = gpiod_get_index(dev, connection_id, index, flags);
+ desc = gpiod_get(dev, connection_id, flags);
+ desc = gpiod_get_index(dev, connection_id, index, flags);
We may consider two different cases here, i.e. when connection ID is
provided and otherwise.
-Case 1:
- desc = gpiod_get(dev, "non-null-connection-id", flags);
- desc = gpiod_get_index(dev, "non-null-connection-id", index, flags);
+Case 1::
+
+ desc = gpiod_get(dev, "non-null-connection-id", flags);
+ desc = gpiod_get_index(dev, "non-null-connection-id", index, flags);
+
+Case 2::
-Case 2:
- desc = gpiod_get(dev, NULL, flags);
- desc = gpiod_get_index(dev, NULL, index, flags);
+ desc = gpiod_get(dev, NULL, flags);
+ desc = gpiod_get_index(dev, NULL, index, flags);
Case 1 assumes that corresponding ACPI device description must have
defined device properties and will prevent to getting any GPIO resources
diff --git a/Documentation/firmware-guide/acpi/i2c-muxes.rst b/Documentation/firmware-guide/acpi/i2c-muxes.rst
new file mode 100644
index 000000000000..3a8997ccd7c4
--- /dev/null
+++ b/Documentation/firmware-guide/acpi/i2c-muxes.rst
@@ -0,0 +1,61 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============
+ACPI I2C Muxes
+==============
+
+Describing an I2C device hierarchy that includes I2C muxes requires an ACPI
+Device () scope per mux channel.
+
+Consider this topology::
+
+ +------+ +------+
+ | SMB1 |-->| MUX0 |--CH00--> i2c client A (0x50)
+ | | | 0x70 |--CH01--> i2c client B (0x50)
+ +------+ +------+
+
+which corresponds to the following ASL::
+
+ Device (SMB1)
+ {
+ Name (_HID, ...)
+ Device (MUX0)
+ {
+ Name (_HID, ...)
+ Name (_CRS, ResourceTemplate () {
+ I2cSerialBus (0x70, ControllerInitiated, I2C_SPEED,
+ AddressingMode7Bit, "^SMB1", 0x00,
+ ResourceConsumer,,)
+ }
+
+ Device (CH00)
+ {
+ Name (_ADR, 0)
+
+ Device (CLIA)
+ {
+ Name (_HID, ...)
+ Name (_CRS, ResourceTemplate () {
+ I2cSerialBus (0x50, ControllerInitiated, I2C_SPEED,
+ AddressingMode7Bit, "^CH00", 0x00,
+ ResourceConsumer,,)
+ }
+ }
+ }
+
+ Device (CH01)
+ {
+ Name (_ADR, 1)
+
+ Device (CLIB)
+ {
+ Name (_HID, ...)
+ Name (_CRS, ResourceTemplate () {
+ I2cSerialBus (0x50, ControllerInitiated, I2C_SPEED,
+ AddressingMode7Bit, "^CH01", 0x00,
+ ResourceConsumer,,)
+ }
+ }
+ }
+ }
+ }
diff --git a/Documentation/firmware-guide/acpi/index.rst b/Documentation/firmware-guide/acpi/index.rst
new file mode 100644
index 000000000000..ae609eec4679
--- /dev/null
+++ b/Documentation/firmware-guide/acpi/index.rst
@@ -0,0 +1,26 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============
+ACPI Support
+============
+
+.. toctree::
+ :maxdepth: 1
+
+ namespace
+ dsd/graph
+ dsd/data-node-references
+ enumeration
+ osi
+ method-customizing
+ method-tracing
+ DSD-properties-rules
+ debug
+ aml-debugger
+ apei/output_format
+ apei/einj
+ gpio-properties
+ i2c-muxes
+ acpi-lid
+ lpit
+ video_extension
diff --git a/Documentation/acpi/lpit.txt b/Documentation/firmware-guide/acpi/lpit.rst
index b426398d2e97..aca928fab027 100644
--- a/Documentation/acpi/lpit.txt
+++ b/Documentation/firmware-guide/acpi/lpit.rst
@@ -1,3 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========================
+Low Power Idle Table (LPIT)
+===========================
+
To enumerate platform Low Power Idle states, Intel platforms are using
“Low Power Idle Table” (LPIT). More details about this table can be
downloaded from:
@@ -8,13 +14,15 @@ Residencies for each low power state can be read via FFH
On platforms supporting S0ix sleep states, there can be two types of
residencies:
-- CPU PKG C10 (Read via FFH interface)
-- Platform Controller Hub (PCH) SLP_S0 (Read via memory mapped interface)
+
+ - CPU PKG C10 (Read via FFH interface)
+ - Platform Controller Hub (PCH) SLP_S0 (Read via memory mapped interface)
The following attributes are added dynamically to the cpuidle
-sysfs attribute group:
- /sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us
- /sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us
+sysfs attribute group::
+
+ /sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us
+ /sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us
The "low_power_idle_cpu_residency_us" attribute shows time spent
by the CPU package in PKG C10
diff --git a/Documentation/firmware-guide/acpi/method-customizing.rst b/Documentation/firmware-guide/acpi/method-customizing.rst
new file mode 100644
index 000000000000..de3ebcaed4cf
--- /dev/null
+++ b/Documentation/firmware-guide/acpi/method-customizing.rst
@@ -0,0 +1,89 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================================
+Linux ACPI Custom Control Method How To
+=======================================
+
+:Author: Zhang Rui <rui.zhang@intel.com>
+
+
+Linux supports customizing ACPI control methods at runtime.
+
+Users can use this to:
+
+1. override an existing method which may not work correctly,
+ or just for debugging purposes.
+2. insert a completely new method in order to create a missing
+ method such as _OFF, _ON, _STA, _INI, etc.
+
+For these cases, it is far simpler to dynamically install a single
+control method rather than override the entire DSDT, because kernel
+rebuild/reboot is not needed and test result can be got in minutes.
+
+.. note::
+
+ - Only ACPI METHOD can be overridden, any other object types like
+ "Device", "OperationRegion", are not recognized. Methods
+ declared inside scope operators are also not supported.
+
+ - The same ACPI control method can be overridden for many times,
+ and it's always the latest one that used by Linux/kernel.
+
+ - To get the ACPI debug object output (Store (AAAA, Debug)),
+ please run::
+
+ echo 1 > /sys/module/acpi/parameters/aml_debug_output
+
+
+1. override an existing method
+==============================
+a) get the ACPI table via ACPI sysfs I/F. e.g. to get the DSDT,
+ just run "cat /sys/firmware/acpi/tables/DSDT > /tmp/dsdt.dat"
+b) disassemble the table by running "iasl -d dsdt.dat".
+c) rewrite the ASL code of the method and save it in a new file,
+d) package the new file (psr.asl) to an ACPI table format.
+ Here is an example of a customized \_SB._AC._PSR method::
+
+ DefinitionBlock ("", "SSDT", 1, "", "", 0x20080715)
+ {
+ Method (\_SB_.AC._PSR, 0, NotSerialized)
+ {
+ Store ("In AC _PSR", Debug)
+ Return (ACON)
+ }
+ }
+
+ Note that the full pathname of the method in ACPI namespace
+ should be used.
+e) assemble the file to generate the AML code of the method.
+ e.g. "iasl -vw 6084 psr.asl" (psr.aml is generated as a result)
+ If parameter "-vw 6084" is not supported by your iASL compiler,
+ please try a newer version.
+f) mount debugfs by "mount -t debugfs none /sys/kernel/debug"
+g) override the old method via the debugfs by running
+ "cat /tmp/psr.aml > /sys/kernel/debug/acpi/custom_method"
+
+2. insert a new method
+======================
+This is easier than overriding an existing method.
+We just need to create the ASL code of the method we want to
+insert and then follow the step c) ~ g) in section 1.
+
+3. undo your changes
+====================
+The "undo" operation is not supported for a new inserted method
+right now, i.e. we can not remove a method currently.
+For an overridden method, in order to undo your changes, please
+save a copy of the method original ASL code in step c) section 1,
+and redo step c) ~ g) to override the method with the original one.
+
+
+.. note:: We can use a kernel with multiple custom ACPI method running,
+ But each individual write to debugfs can implement a SINGLE
+ method override. i.e. if we want to insert/override multiple
+ ACPI methods, we need to redo step c) ~ g) for multiple times.
+
+.. note:: Be aware that root can mis-use this driver to modify arbitrary
+ memory and gain additional rights, if root's privileges got
+ restricted (for example if root is not allowed to load additional
+ modules after boot).
diff --git a/Documentation/firmware-guide/acpi/method-tracing.rst b/Documentation/firmware-guide/acpi/method-tracing.rst
new file mode 100644
index 000000000000..d0b077b73f5f
--- /dev/null
+++ b/Documentation/firmware-guide/acpi/method-tracing.rst
@@ -0,0 +1,238 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+=====================
+ACPICA Trace Facility
+=====================
+
+:Copyright: |copy| 2015, Intel Corporation
+:Author: Lv Zheng <lv.zheng@intel.com>
+
+
+Abstract
+========
+This document describes the functions and the interfaces of the
+method tracing facility.
+
+Functionalities and usage examples
+==================================
+
+ACPICA provides method tracing capability. And two functions are
+currently implemented using this capability.
+
+Log reducer
+-----------
+
+ACPICA subsystem provides debugging outputs when CONFIG_ACPI_DEBUG is
+enabled. The debugging messages which are deployed via
+ACPI_DEBUG_PRINT() macro can be reduced at 2 levels - per-component
+level (known as debug layer, configured via
+/sys/module/acpi/parameters/debug_layer) and per-type level (known as
+debug level, configured via /sys/module/acpi/parameters/debug_level).
+
+But when the particular layer/level is applied to the control method
+evaluations, the quantity of the debugging outputs may still be too
+large to be put into the kernel log buffer. The idea thus is worked out
+to only enable the particular debug layer/level (normally more detailed)
+logs when the control method evaluation is started, and disable the
+detailed logging when the control method evaluation is stopped.
+
+The following command examples illustrate the usage of the "log reducer"
+functionality:
+
+a. Filter out the debug layer/level matched logs when control methods
+ are being evaluated::
+
+ # cd /sys/module/acpi/parameters
+ # echo "0xXXXXXXXX" > trace_debug_layer
+ # echo "0xYYYYYYYY" > trace_debug_level
+ # echo "enable" > trace_state
+
+b. Filter out the debug layer/level matched logs when the specified
+ control method is being evaluated::
+
+ # cd /sys/module/acpi/parameters
+ # echo "0xXXXXXXXX" > trace_debug_layer
+ # echo "0xYYYYYYYY" > trace_debug_level
+ # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+ # echo "method" > /sys/module/acpi/parameters/trace_state
+
+c. Filter out the debug layer/level matched logs when the specified
+ control method is being evaluated for the first time::
+
+ # cd /sys/module/acpi/parameters
+ # echo "0xXXXXXXXX" > trace_debug_layer
+ # echo "0xYYYYYYYY" > trace_debug_level
+ # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+ # echo "method-once" > /sys/module/acpi/parameters/trace_state
+
+Where:
+ 0xXXXXXXXX/0xYYYYYYYY
+ Refer to Documentation/acpi/debug.txt for possible debug layer/level
+ masking values.
+ \PPPP.AAAA.TTTT.HHHH
+ Full path of a control method that can be found in the ACPI namespace.
+ It needn't be an entry of a control method evaluation.
+
+AML tracer
+----------
+
+There are special log entries added by the method tracing facility at
+the "trace points" the AML interpreter starts/stops to execute a control
+method, or an AML opcode. Note that the format of the log entries are
+subject to change::
+
+ [ 0.186427] exdebug-0398 ex_trace_point : Method Begin [0xf58394d8:\_SB.PCI0.LPCB.ECOK] execution.
+ [ 0.186630] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905c88:If] execution.
+ [ 0.186820] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905cc0:LEqual] execution.
+ [ 0.187010] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905a20:-NamePath-] execution.
+ [ 0.187214] exdebug-0398 ex_trace_point : Opcode End [0xf5905a20:-NamePath-] execution.
+ [ 0.187407] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905f60:One] execution.
+ [ 0.187594] exdebug-0398 ex_trace_point : Opcode End [0xf5905f60:One] execution.
+ [ 0.187789] exdebug-0398 ex_trace_point : Opcode End [0xf5905cc0:LEqual] execution.
+ [ 0.187980] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905cc0:Return] execution.
+ [ 0.188146] exdebug-0398 ex_trace_point : Opcode Begin [0xf5905f60:One] execution.
+ [ 0.188334] exdebug-0398 ex_trace_point : Opcode End [0xf5905f60:One] execution.
+ [ 0.188524] exdebug-0398 ex_trace_point : Opcode End [0xf5905cc0:Return] execution.
+ [ 0.188712] exdebug-0398 ex_trace_point : Opcode End [0xf5905c88:If] execution.
+ [ 0.188903] exdebug-0398 ex_trace_point : Method End [0xf58394d8:\_SB.PCI0.LPCB.ECOK] execution.
+
+Developers can utilize these special log entries to track the AML
+interpretion, thus can aid issue debugging and performance tuning. Note
+that, as the "AML tracer" logs are implemented via ACPI_DEBUG_PRINT()
+macro, CONFIG_ACPI_DEBUG is also required to be enabled for enabling
+"AML tracer" logs.
+
+The following command examples illustrate the usage of the "AML tracer"
+functionality:
+
+a. Filter out the method start/stop "AML tracer" logs when control
+ methods are being evaluated::
+
+ # cd /sys/module/acpi/parameters
+ # echo "0x80" > trace_debug_layer
+ # echo "0x10" > trace_debug_level
+ # echo "enable" > trace_state
+
+b. Filter out the method start/stop "AML tracer" when the specified
+ control method is being evaluated::
+
+ # cd /sys/module/acpi/parameters
+ # echo "0x80" > trace_debug_layer
+ # echo "0x10" > trace_debug_level
+ # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+ # echo "method" > trace_state
+
+c. Filter out the method start/stop "AML tracer" logs when the specified
+ control method is being evaluated for the first time::
+
+ # cd /sys/module/acpi/parameters
+ # echo "0x80" > trace_debug_layer
+ # echo "0x10" > trace_debug_level
+ # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+ # echo "method-once" > trace_state
+
+d. Filter out the method/opcode start/stop "AML tracer" when the
+ specified control method is being evaluated::
+
+ # cd /sys/module/acpi/parameters
+ # echo "0x80" > trace_debug_layer
+ # echo "0x10" > trace_debug_level
+ # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+ # echo "opcode" > trace_state
+
+e. Filter out the method/opcode start/stop "AML tracer" when the
+ specified control method is being evaluated for the first time::
+
+ # cd /sys/module/acpi/parameters
+ # echo "0x80" > trace_debug_layer
+ # echo "0x10" > trace_debug_level
+ # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+ # echo "opcode-opcode" > trace_state
+
+Note that all above method tracing facility related module parameters can
+be used as the boot parameters, for example::
+
+ acpi.trace_debug_layer=0x80 acpi.trace_debug_level=0x10 \
+ acpi.trace_method_name=\_SB.LID0._LID acpi.trace_state=opcode-once
+
+
+Interface descriptions
+======================
+
+All method tracing functions can be configured via ACPI module
+parameters that are accessible at /sys/module/acpi/parameters/:
+
+trace_method_name
+ The full path of the AML method that the user wants to trace.
+
+ Note that the full path shouldn't contain the trailing "_"s in its
+ name segments but may contain "\" to form an absolute path.
+
+trace_debug_layer
+ The temporary debug_layer used when the tracing feature is enabled.
+
+ Using ACPI_EXECUTER (0x80) by default, which is the debug_layer
+ used to match all "AML tracer" logs.
+
+trace_debug_level
+ The temporary debug_level used when the tracing feature is enabled.
+
+ Using ACPI_LV_TRACE_POINT (0x10) by default, which is the
+ debug_level used to match all "AML tracer" logs.
+
+trace_state
+ The status of the tracing feature.
+
+ Users can enable/disable this debug tracing feature by executing
+ the following command::
+
+ # echo string > /sys/module/acpi/parameters/trace_state
+
+Where "string" should be one of the following:
+
+"disable"
+ Disable the method tracing feature.
+
+"enable"
+ Enable the method tracing feature.
+
+ ACPICA debugging messages matching "trace_debug_layer/trace_debug_level"
+ during any method execution will be logged.
+
+"method"
+ Enable the method tracing feature.
+
+ ACPICA debugging messages matching "trace_debug_layer/trace_debug_level"
+ during method execution of "trace_method_name" will be logged.
+
+"method-once"
+ Enable the method tracing feature.
+
+ ACPICA debugging messages matching "trace_debug_layer/trace_debug_level"
+ during method execution of "trace_method_name" will be logged only once.
+
+"opcode"
+ Enable the method tracing feature.
+
+ ACPICA debugging messages matching "trace_debug_layer/trace_debug_level"
+ during method/opcode execution of "trace_method_name" will be logged.
+
+"opcode-once"
+ Enable the method tracing feature.
+
+ ACPICA debugging messages matching "trace_debug_layer/trace_debug_level"
+ during method/opcode execution of "trace_method_name" will be logged only
+ once.
+
+Note that, the difference between the "enable" and other feature
+enabling options are:
+
+1. When "enable" is specified, since
+ "trace_debug_layer/trace_debug_level" shall apply to all control
+ method evaluations, after configuring "trace_state" to "enable",
+ "trace_method_name" will be reset to NULL.
+2. When "method/opcode" is specified, if
+ "trace_method_name" is NULL when "trace_state" is configured to
+ these options, the "trace_debug_layer/trace_debug_level" will
+ apply to all control method evaluations.
diff --git a/Documentation/acpi/namespace.txt b/Documentation/firmware-guide/acpi/namespace.rst
index 1860cb3865c6..835521baeb89 100644
--- a/Documentation/acpi/namespace.txt
+++ b/Documentation/firmware-guide/acpi/namespace.rst
@@ -1,85 +1,90 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+===================================================
ACPI Device Tree - Representation of ACPI Namespace
+===================================================
-Copyright (C) 2013, Intel Corporation
-Author: Lv Zheng <lv.zheng@intel.com>
+:Copyright: |copy| 2013, Intel Corporation
+:Author: Lv Zheng <lv.zheng@intel.com>
-Abstract:
+:Credit: Thanks for the help from Zhang Rui <rui.zhang@intel.com> and
+ Rafael J.Wysocki <rafael.j.wysocki@intel.com>.
+Abstract
+========
The Linux ACPI subsystem converts ACPI namespace objects into a Linux
device tree under the /sys/devices/LNXSYSTEM:00 and updates it upon
-receiving ACPI hotplug notification events. For each device object in this
-hierarchy there is a corresponding symbolic link in the
+receiving ACPI hotplug notification events. For each device object
+in this hierarchy there is a corresponding symbolic link in the
/sys/bus/acpi/devices.
+
This document illustrates the structure of the ACPI device tree.
+ACPI Definition Blocks
+======================
+
+The ACPI firmware sets up RSDP (Root System Description Pointer) in the
+system memory address space pointing to the XSDT (Extended System
+Description Table). The XSDT always points to the FADT (Fixed ACPI
+Description Table) using its first entry, the data within the FADT
+includes various fixed-length entries that describe fixed ACPI features
+of the hardware. The FADT contains a pointer to the DSDT
+(Differentiated System Descripition Table). The XSDT also contains
+entries pointing to possibly multiple SSDTs (Secondary System
+Description Table).
+
+The DSDT and SSDT data is organized in data structures called definition
+blocks that contain definitions of various objects, including ACPI
+control methods, encoded in AML (ACPI Machine Language). The data block
+of the DSDT along with the contents of SSDTs represents a hierarchical
+data structure called the ACPI namespace whose topology reflects the
+structure of the underlying hardware platform.
+
+The relationships between ACPI System Definition Tables described above
+are illustrated in the following diagram::
+
+ +---------+ +-------+ +--------+ +------------------------+
+ | RSDP | +->| XSDT | +->| FADT | | +-------------------+ |
+ +---------+ | +-------+ | +--------+ +-|->| DSDT | |
+ | Pointer | | | Entry |-+ | ...... | | | +-------------------+ |
+ +---------+ | +-------+ | X_DSDT |--+ | | Definition Blocks | |
+ | Pointer |-+ | ..... | | ...... | | +-------------------+ |
+ +---------+ +-------+ +--------+ | +-------------------+ |
+ | Entry |------------------|->| SSDT | |
+ +- - - -+ | +-------------------| |
+ | Entry | - - - - - - - -+ | | Definition Blocks | |
+ +- - - -+ | | +-------------------+ |
+ | | +- - - - - - - - - -+ |
+ +-|->| SSDT | |
+ | +-------------------+ |
+ | | Definition Blocks | |
+ | +- - - - - - - - - -+ |
+ +------------------------+
+ |
+ OSPM Loading |
+ \|/
+ +----------------+
+ | ACPI Namespace |
+ +----------------+
+
+ Figure 1. ACPI Definition Blocks
+
+.. note:: RSDP can also contain a pointer to the RSDT (Root System
+ Description Table). Platforms provide RSDT to enable
+ compatibility with ACPI 1.0 operating systems. The OS is expected
+ to use XSDT, if present.
+
+
+Example ACPI Namespace
+======================
+
+All definition blocks are loaded into a single namespace. The namespace
+is a hierarchy of objects identified by names and paths.
+The following naming conventions apply to object names in the ACPI
+namespace:
-Credit:
-
-Thanks for the help from Zhang Rui <rui.zhang@intel.com> and Rafael J.
-Wysocki <rafael.j.wysocki@intel.com>.
-
-
-1. ACPI Definition Blocks
-
- The ACPI firmware sets up RSDP (Root System Description Pointer) in the
- system memory address space pointing to the XSDT (Extended System
- Description Table). The XSDT always points to the FADT (Fixed ACPI
- Description Table) using its first entry, the data within the FADT
- includes various fixed-length entries that describe fixed ACPI features
- of the hardware. The FADT contains a pointer to the DSDT
- (Differentiated System Descripition Table). The XSDT also contains
- entries pointing to possibly multiple SSDTs (Secondary System
- Description Table).
-
- The DSDT and SSDT data is organized in data structures called definition
- blocks that contain definitions of various objects, including ACPI
- control methods, encoded in AML (ACPI Machine Language). The data block
- of the DSDT along with the contents of SSDTs represents a hierarchical
- data structure called the ACPI namespace whose topology reflects the
- structure of the underlying hardware platform.
-
- The relationships between ACPI System Definition Tables described above
- are illustrated in the following diagram.
-
- +---------+ +-------+ +--------+ +------------------------+
- | RSDP | +->| XSDT | +->| FADT | | +-------------------+ |
- +---------+ | +-------+ | +--------+ +-|->| DSDT | |
- | Pointer | | | Entry |-+ | ...... | | | +-------------------+ |
- +---------+ | +-------+ | X_DSDT |--+ | | Definition Blocks | |
- | Pointer |-+ | ..... | | ...... | | +-------------------+ |
- +---------+ +-------+ +--------+ | +-------------------+ |
- | Entry |------------------|->| SSDT | |
- +- - - -+ | +-------------------| |
- | Entry | - - - - - - - -+ | | Definition Blocks | |
- +- - - -+ | | +-------------------+ |
- | | +- - - - - - - - - -+ |
- +-|->| SSDT | |
- | +-------------------+ |
- | | Definition Blocks | |
- | +- - - - - - - - - -+ |
- +------------------------+
- |
- OSPM Loading |
- \|/
- +----------------+
- | ACPI Namespace |
- +----------------+
-
- Figure 1. ACPI Definition Blocks
-
- NOTE: RSDP can also contain a pointer to the RSDT (Root System
- Description Table). Platforms provide RSDT to enable
- compatibility with ACPI 1.0 operating systems. The OS is expected
- to use XSDT, if present.
-
-
-2. Example ACPI Namespace
-
- All definition blocks are loaded into a single namespace. The namespace
- is a hierarchy of objects identified by names and paths.
- The following naming conventions apply to object names in the ACPI
- namespace:
1. All names are 32 bits long.
2. The first byte of a name must be one of 'A' - 'Z', '_'.
3. Each of the remaining bytes of a name must be one of 'A' - 'Z', '0'
@@ -91,7 +96,7 @@ Wysocki <rafael.j.wysocki@intel.com>.
(i.e. names prepended with '^' are relative to the parent of the
current namespace node).
- The figure below shows an example ACPI namespace.
+The figure below shows an example ACPI namespace::
+------+
| \ | Root
@@ -184,19 +189,20 @@ Wysocki <rafael.j.wysocki@intel.com>.
Figure 2. Example ACPI Namespace
-3. Linux ACPI Device Objects
+Linux ACPI Device Objects
+=========================
- The Linux kernel's core ACPI subsystem creates struct acpi_device
- objects for ACPI namespace objects representing devices, power resources
- processors, thermal zones. Those objects are exported to user space via
- sysfs as directories in the subtree under /sys/devices/LNXSYSTM:00. The
- format of their names is <bus_id:instance>, where 'bus_id' refers to the
- ACPI namespace representation of the given object and 'instance' is used
- for distinguishing different object of the same 'bus_id' (it is
- two-digit decimal representation of an unsigned integer).
+The Linux kernel's core ACPI subsystem creates struct acpi_device
+objects for ACPI namespace objects representing devices, power resources
+processors, thermal zones. Those objects are exported to user space via
+sysfs as directories in the subtree under /sys/devices/LNXSYSTM:00. The
+format of their names is <bus_id:instance>, where 'bus_id' refers to the
+ACPI namespace representation of the given object and 'instance' is used
+for distinguishing different object of the same 'bus_id' (it is
+two-digit decimal representation of an unsigned integer).
- The value of 'bus_id' depends on the type of the object whose name it is
- part of as listed in the table below.
+The value of 'bus_id' depends on the type of the object whose name it is
+part of as listed in the table below::
+---+-----------------+-------+----------+
| | Object/Feature | Table | bus_id |
@@ -226,10 +232,11 @@ Wysocki <rafael.j.wysocki@intel.com>.
Table 1. ACPI Namespace Objects Mapping
- The following rules apply when creating struct acpi_device objects on
- the basis of the contents of ACPI System Description Tables (as
- indicated by the letter in the first column and the notation in the
- second column of the table above):
+The following rules apply when creating struct acpi_device objects on
+the basis of the contents of ACPI System Description Tables (as
+indicated by the letter in the first column and the notation in the
+second column of the table above):
+
N:
The object's source is an ACPI namespace node (as indicated by the
named object's type in the second column). In that case the object's
@@ -249,13 +256,14 @@ Wysocki <rafael.j.wysocki@intel.com>.
struct acpi_device object with LNXVIDEO 'bus_id' will be created for
it.
- The third column of the above table indicates which ACPI System
- Description Tables contain information used for the creation of the
- struct acpi_device objects represented by the given row (xSDT means DSDT
- or SSDT).
+The third column of the above table indicates which ACPI System
+Description Tables contain information used for the creation of the
+struct acpi_device objects represented by the given row (xSDT means DSDT
+or SSDT).
+
+The forth column of the above table indicates the 'bus_id' generation
+rule of the struct acpi_device object:
- The forth column of the above table indicates the 'bus_id' generation
- rule of the struct acpi_device object:
_HID:
_HID in the last column of the table means that the object's bus_id
is derived from the _HID/_CID identification objects present under
@@ -275,45 +283,47 @@ Wysocki <rafael.j.wysocki@intel.com>.
object's bus_id.
-4. Linux ACPI Physical Device Glue
-
- ACPI device (i.e. struct acpi_device) objects may be linked to other
- objects in the Linux' device hierarchy that represent "physical" devices
- (for example, devices on the PCI bus). If that happens, it means that
- the ACPI device object is a "companion" of a device otherwise
- represented in a different way and is used (1) to provide configuration
- information on that device which cannot be obtained by other means and
- (2) to do specific things to the device with the help of its ACPI
- control methods. One ACPI device object may be linked this way to
- multiple "physical" devices.
-
- If an ACPI device object is linked to a "physical" device, its sysfs
- directory contains the "physical_node" symbolic link to the sysfs
- directory of the target device object. In turn, the target device's
- sysfs directory will then contain the "firmware_node" symbolic link to
- the sysfs directory of the companion ACPI device object.
- The linking mechanism relies on device identification provided by the
- ACPI namespace. For example, if there's an ACPI namespace object
- representing a PCI device (i.e. a device object under an ACPI namespace
- object representing a PCI bridge) whose _ADR returns 0x00020000 and the
- bus number of the parent PCI bridge is 0, the sysfs directory
- representing the struct acpi_device object created for that ACPI
- namespace object will contain the 'physical_node' symbolic link to the
- /sys/devices/pci0000:00/0000:00:02:0/ sysfs directory of the
- corresponding PCI device.
-
- The linking mechanism is generally bus-specific. The core of its
- implementation is located in the drivers/acpi/glue.c file, but there are
- complementary parts depending on the bus types in question located
- elsewhere. For example, the PCI-specific part of it is located in
- drivers/pci/pci-acpi.c.
-
-
-5. Example Linux ACPI Device Tree
-
- The sysfs hierarchy of struct acpi_device objects corresponding to the
- example ACPI namespace illustrated in Figure 2 with the addition of
- fixed PWR_BUTTON/SLP_BUTTON devices is shown below.
+Linux ACPI Physical Device Glue
+===============================
+
+ACPI device (i.e. struct acpi_device) objects may be linked to other
+objects in the Linux' device hierarchy that represent "physical" devices
+(for example, devices on the PCI bus). If that happens, it means that
+the ACPI device object is a "companion" of a device otherwise
+represented in a different way and is used (1) to provide configuration
+information on that device which cannot be obtained by other means and
+(2) to do specific things to the device with the help of its ACPI
+control methods. One ACPI device object may be linked this way to
+multiple "physical" devices.
+
+If an ACPI device object is linked to a "physical" device, its sysfs
+directory contains the "physical_node" symbolic link to the sysfs
+directory of the target device object. In turn, the target device's
+sysfs directory will then contain the "firmware_node" symbolic link to
+the sysfs directory of the companion ACPI device object.
+The linking mechanism relies on device identification provided by the
+ACPI namespace. For example, if there's an ACPI namespace object
+representing a PCI device (i.e. a device object under an ACPI namespace
+object representing a PCI bridge) whose _ADR returns 0x00020000 and the
+bus number of the parent PCI bridge is 0, the sysfs directory
+representing the struct acpi_device object created for that ACPI
+namespace object will contain the 'physical_node' symbolic link to the
+/sys/devices/pci0000:00/0000:00:02:0/ sysfs directory of the
+corresponding PCI device.
+
+The linking mechanism is generally bus-specific. The core of its
+implementation is located in the drivers/acpi/glue.c file, but there are
+complementary parts depending on the bus types in question located
+elsewhere. For example, the PCI-specific part of it is located in
+drivers/pci/pci-acpi.c.
+
+
+Example Linux ACPI Device Tree
+=================================
+
+The sysfs hierarchy of struct acpi_device objects corresponding to the
+example ACPI namespace illustrated in Figure 2 with the addition of
+fixed PWR_BUTTON/SLP_BUTTON devices is shown below::
+--------------+---+-----------------+
| LNXSYSTEM:00 | \ | acpi:LNXSYSTEM: |
@@ -377,12 +387,14 @@ Wysocki <rafael.j.wysocki@intel.com>.
Figure 3. Example Linux ACPI Device Tree
- NOTE: Each node is represented as "object/path/modalias", where:
- 1. 'object' is the name of the object's directory in sysfs.
- 2. 'path' is the ACPI namespace path of the corresponding
- ACPI namespace object, as returned by the object's 'path'
- sysfs attribute.
- 3. 'modalias' is the value of the object's 'modalias' sysfs
- attribute (as described earlier in this document).
- NOTE: N/A indicates the device object does not have the 'path' or the
- 'modalias' attribute.
+.. note:: Each node is represented as "object/path/modalias", where:
+
+ 1. 'object' is the name of the object's directory in sysfs.
+ 2. 'path' is the ACPI namespace path of the corresponding
+ ACPI namespace object, as returned by the object's 'path'
+ sysfs attribute.
+ 3. 'modalias' is the value of the object's 'modalias' sysfs
+ attribute (as described earlier in this document).
+
+.. note:: N/A indicates the device object does not have the 'path' or the
+ 'modalias' attribute.
diff --git a/Documentation/acpi/osi.txt b/Documentation/firmware-guide/acpi/osi.rst
index 50cde0ceb9b0..29e9ef79ebc0 100644
--- a/Documentation/acpi/osi.txt
+++ b/Documentation/firmware-guide/acpi/osi.rst
@@ -1,5 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==========================
ACPI _OSI and _REV methods
---------------------------
+==========================
An ACPI BIOS can use the "Operating System Interfaces" method (_OSI)
to find out what the operating system supports. Eg. If BIOS
@@ -14,7 +17,7 @@ This document explains how and why the BIOS and Linux should use these methods.
It also explains how and why they are widely misused.
How to use _OSI
----------------
+===============
Linux runs on two groups of machines -- those that are tested by the OEM
to be compatible with Linux, and those that were never tested with Linux,
@@ -62,7 +65,7 @@ the string when that support is added to the kernel.
That was easy. Read on, to find out how to do it wrong.
Before _OSI, there was _OS
---------------------------
+==========================
ACPI 1.0 specified "_OS" as an
"object that evaluates to a string that identifies the operating system."
@@ -96,7 +99,7 @@ That is the *only* viable strategy, as that is what modern Windows does,
and so doing otherwise could steer the BIOS down an untested path.
_OSI is born, and immediately misused
---------------------------------------
+=====================================
With _OSI, the *BIOS* provides the string describing an interface,
and asks the OS: "YES/NO, are you compatible with this interface?"
@@ -144,7 +147,7 @@ catastrophic failure resulting from the BIOS taking paths that
were never validated under *any* OS.
Do not use _REV
----------------
+===============
Since _OSI("Linux") went away, some BIOS writers used _REV
to support Linux and Windows differences in the same BIOS.
@@ -164,7 +167,7 @@ from mid-2015 onward. The ACPI specification will also be updated
to reflect that _REV is deprecated, and always returns 2.
Apple Mac and _OSI("Darwin")
-----------------------------
+============================
On Apple's Mac platforms, the ACPI BIOS invokes _OSI("Darwin")
to determine if the machine is running Apple OSX.
diff --git a/Documentation/acpi/video_extension.txt b/Documentation/firmware-guide/acpi/video_extension.rst
index 79bf6a4921be..099b8607e07b 100644
--- a/Documentation/acpi/video_extension.txt
+++ b/Documentation/firmware-guide/acpi/video_extension.rst
@@ -1,5 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================
ACPI video extensions
-~~~~~~~~~~~~~~~~~~~~~
+=====================
This driver implement the ACPI Extensions For Display Adapters for
integrated graphics devices on motherboard, as specified in ACPI 2.0
@@ -8,9 +11,10 @@ defining the video POST device, retrieving EDID information or to
setup a video output, etc. Note that this is an ref. implementation
only. It may or may not work for your integrated video device.
-The ACPI video driver does 3 things regarding backlight control:
+The ACPI video driver does 3 things regarding backlight control.
-1 Export a sysfs interface for user space to control backlight level
+Export a sysfs interface for user space to control backlight level
+==================================================================
If the ACPI table has a video device, and acpi_backlight=vendor kernel
command line is not present, the driver will register a backlight device
@@ -22,36 +26,41 @@ The backlight sysfs interface has a standard definition here:
Documentation/ABI/stable/sysfs-class-backlight.
And what ACPI video driver does is:
-actual_brightness: on read, control method _BQC will be evaluated to
-get the brightness level the firmware thinks it is at;
-bl_power: not implemented, will set the current brightness instead;
-brightness: on write, control method _BCM will run to set the requested
-brightness level;
-max_brightness: Derived from the _BCL package(see below);
-type: firmware
+
+actual_brightness:
+ on read, control method _BQC will be evaluated to
+ get the brightness level the firmware thinks it is at;
+bl_power:
+ not implemented, will set the current brightness instead;
+brightness:
+ on write, control method _BCM will run to set the requested brightness level;
+max_brightness:
+ Derived from the _BCL package(see below);
+type:
+ firmware
Note that ACPI video backlight driver will always use index for
brightness, actual_brightness and max_brightness. So if we have
-the following _BCL package:
+the following _BCL package::
-Method (_BCL, 0, NotSerialized)
-{
- Return (Package (0x0C)
+ Method (_BCL, 0, NotSerialized)
{
- 0x64,
- 0x32,
- 0x0A,
- 0x14,
- 0x1E,
- 0x28,
- 0x32,
- 0x3C,
- 0x46,
- 0x50,
- 0x5A,
- 0x64
- })
-}
+ Return (Package (0x0C)
+ {
+ 0x64,
+ 0x32,
+ 0x0A,
+ 0x14,
+ 0x1E,
+ 0x28,
+ 0x32,
+ 0x3C,
+ 0x46,
+ 0x50,
+ 0x5A,
+ 0x64
+ })
+ }
The first two levels are for when laptop are on AC or on battery and are
not used by Linux currently. The remaining 10 levels are supported levels
@@ -62,13 +71,15 @@ as a "brightness level" indicator. Thus from the user space perspective
the range of available brightness levels is from 0 to 9 (max_brightness)
inclusive.
-2 Notify user space about hotkey event
+Notify user space about hotkey event
+====================================
There are generally two cases for hotkey event reporting:
+
i) For some laptops, when user presses the hotkey, a scancode will be
generated and sent to user space through the input device created by
the keyboard driver as a key type input event, with proper remap, the
- following key code will appear to user space:
+ following key code will appear to user space::
EV_KEY, KEY_BRIGHTNESSUP
EV_KEY, KEY_BRIGHTNESSDOWN
@@ -84,23 +95,27 @@ ii) For some laptops, the press of the hotkey will not generate the
notify value it received and send the event to user space through the
input device it created:
+ ===== ==================
event keycode
+ ===== ==================
0x86 KEY_BRIGHTNESSUP
0x87 KEY_BRIGHTNESSDOWN
etc.
+ ===== ==================
so this would lead to the same effect as case i) now.
Once user space tool receives this event, it can modify the backlight
level through the sysfs interface.
-3 Change backlight level in the kernel
+Change backlight level in the kernel
+====================================
This works for machines covered by case ii) in Section 2. Once the driver
received a notification, it will set the backlight level accordingly. This does
not affect the sending of event to user space, they are always sent to user
space regardless of whether or not the video module controls the backlight level
directly. This behaviour can be controlled through the brightness_switch_enabled
-module parameter as documented in admin-guide/kernel-parameters.rst. It is recommended to
-disable this behaviour once a GUI environment starts up and wants to have full
-control of the backlight level.
+module parameter as documented in admin-guide/kernel-parameters.rst. It is
+recommended to disable this behaviour once a GUI environment starts up and
+wants to have full control of the backlight level.
diff --git a/Documentation/firmware-guide/index.rst b/Documentation/firmware-guide/index.rst
new file mode 100644
index 000000000000..5355784ca0a2
--- /dev/null
+++ b/Documentation/firmware-guide/index.rst
@@ -0,0 +1,13 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============================
+The Linux kernel firmware guide
+===============================
+
+This section describes the ACPI subsystem in Linux from firmware perspective.
+
+.. toctree::
+ :maxdepth: 1
+
+ acpi/index
+
diff --git a/Documentation/gpio/index.rst b/Documentation/gpio/index.rst
new file mode 100644
index 000000000000..09a4a553f434
--- /dev/null
+++ b/Documentation/gpio/index.rst
@@ -0,0 +1,17 @@
+:orphan:
+
+====
+gpio
+====
+
+.. toctree::
+ :maxdepth: 1
+
+ sysfs
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/gpio/sysfs.txt b/Documentation/gpio/sysfs.rst
index 58eeab81f349..ec09ffd983e7 100644
--- a/Documentation/gpio/sysfs.txt
+++ b/Documentation/gpio/sysfs.rst
@@ -1,10 +1,12 @@
GPIO Sysfs Interface for Userspace
==================================
-THIS ABI IS DEPRECATED, THE ABI DOCUMENTATION HAS BEEN MOVED TO
-Documentation/ABI/obsolete/sysfs-gpio AND NEW USERSPACE CONSUMERS
-ARE SUPPOSED TO USE THE CHARACTER DEVICE ABI. THIS OLD SYSFS ABI WILL
-NOT BE DEVELOPED (NO NEW FEATURES), IT WILL JUST BE MAINTAINED.
+.. warning::
+
+ THIS ABI IS DEPRECATED, THE ABI DOCUMENTATION HAS BEEN MOVED TO
+ Documentation/ABI/obsolete/sysfs-gpio AND NEW USERSPACE CONSUMERS
+ ARE SUPPOSED TO USE THE CHARACTER DEVICE ABI. THIS OLD SYSFS ABI WILL
+ NOT BE DEVELOPED (NO NEW FEATURES), IT WILL JUST BE MAINTAINED.
Refer to the examples in tools/gpio/* for an introduction to the new
character device ABI. Also see the userspace header in
@@ -51,13 +53,15 @@ The control interfaces are write-only:
/sys/class/gpio/
- "export" ... Userspace may ask the kernel to export control of
+ "export" ...
+ Userspace may ask the kernel to export control of
a GPIO to userspace by writing its number to this file.
Example: "echo 19 > export" will create a "gpio19" node
for GPIO #19, if that's not requested by kernel code.
- "unexport" ... Reverses the effect of exporting to userspace.
+ "unexport" ...
+ Reverses the effect of exporting to userspace.
Example: "echo 19 > unexport" will remove a "gpio19"
node exported using the "export" file.
@@ -67,7 +71,8 @@ and have the following read/write attributes:
/sys/class/gpio/gpioN/
- "direction" ... reads as either "in" or "out". This value may
+ "direction" ...
+ reads as either "in" or "out". This value may
normally be written. Writing as "out" defaults to
initializing the value as low. To ensure glitch free
operation, values "low" and "high" may be written to
@@ -78,7 +83,8 @@ and have the following read/write attributes:
it was exported by kernel code that didn't explicitly
allow userspace to reconfigure this GPIO's direction.
- "value" ... reads as either 0 (low) or 1 (high). If the GPIO
+ "value" ...
+ reads as either 0 (low) or 1 (high). If the GPIO
is configured as an output, this value may be written;
any nonzero value is treated as high.
@@ -92,14 +98,16 @@ and have the following read/write attributes:
file and read the new value or close the file and re-open it
to read the value.
- "edge" ... reads as either "none", "rising", "falling", or
+ "edge" ...
+ reads as either "none", "rising", "falling", or
"both". Write these strings to select the signal edge(s)
that will make poll(2) on the "value" file return.
This file exists only if the pin can be configured as an
interrupt generating input pin.
- "active_low" ... reads as either 0 (false) or 1 (true). Write
+ "active_low" ...
+ reads as either 0 (false) or 1 (true). Write
any nonzero value to invert the value attribute both
for reading and writing. Existing and subsequent
poll(2) support configuration via the edge attribute
@@ -112,11 +120,14 @@ read-only attributes:
/sys/class/gpio/gpiochipN/
- "base" ... same as N, the first GPIO managed by this chip
+ "base" ...
+ same as N, the first GPIO managed by this chip
- "label" ... provided for diagnostics (not always unique)
+ "label" ...
+ provided for diagnostics (not always unique)
- "ngpio" ... how many GPIOs this manages (N to N + ngpio - 1)
+ "ngpio" ...
+ how many GPIOs this manages (N to N + ngpio - 1)
Board documentation should in most cases cover what GPIOs are used for
what purposes. However, those numbers are not always stable; GPIOs on
@@ -129,7 +140,7 @@ the correct GPIO number to use for a given signal.
Exporting from Kernel code
--------------------------
Kernel code can explicitly manage exports of GPIOs which have already been
-requested using gpio_request():
+requested using gpio_request()::
/* export the GPIO to userspace */
int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
diff --git a/Documentation/gpu/drm-internals.rst b/Documentation/gpu/drm-internals.rst
index 3ae23a5454ac..966bd2d9f0cc 100644
--- a/Documentation/gpu/drm-internals.rst
+++ b/Documentation/gpu/drm-internals.rst
@@ -93,6 +93,11 @@ Device Instance and Driver Handling
Driver Load
-----------
+Component Helper Usage
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/drm_drv.c
+ :doc: component helper usage recommendations
IRQ Helper Library
~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index 17ca7f8bf3d3..14102ae035dc 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -107,6 +107,12 @@ fbdev Helper Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_fb_helper.c
:export:
+format Helper Functions Reference
+=================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_format_helper.c
+ :export:
+
Framebuffer CMA Helper Functions Reference
==========================================
@@ -369,3 +375,15 @@ Legacy CRTC/Modeset Helper Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_crtc_helper.c
:export:
+
+SHMEM GEM Helper Reference
+==========================
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_shmem_helper.c
+ :doc: overview
+
+.. kernel-doc:: include/drm/drm_gem_shmem_helper.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_shmem_helper.c
+ :export:
diff --git a/Documentation/gpu/kms-properties.csv b/Documentation/gpu/kms-properties.csv
index bfde04eddd14..07ed22ea3bd6 100644
--- a/Documentation/gpu/kms-properties.csv
+++ b/Documentation/gpu/kms-properties.csv
@@ -17,7 +17,6 @@ Owner Module/Drivers,Group,Property Name,Type,Property Values,Object attached,De
,Virtual GPU,“suggested X”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an X offset for a connector
,,“suggested Y”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an Y offset for a connector
,Optional,"""aspect ratio""",ENUM,"{ ""None"", ""4:3"", ""16:9"" }",Connector,TDB
-,Optional,"""content type""",ENUM,"{ ""No Data"", ""Graphics"", ""Photo"", ""Cinema"", ""Game"" }",Connector,TBD
i915,Generic,"""Broadcast RGB""",ENUM,"{ ""Automatic"", ""Full"", ""Limited 16:235"" }",Connector,"When this property is set to Limited 16:235 and CTM is set, the hardware will be programmed with the result of the multiplication of CTM by the limited range matrix to ensure the pixels normaly in the range 0..1.0 are remapped to the range 16/255..235/255."
,,“audio”,ENUM,"{ ""force-dvi"", ""off"", ""auto"", ""on"" }",Connector,TBD
,SDVO-TV,“mode”,ENUM,"{ ""NTSC_M"", ""NTSC_J"", ""NTSC_443"", ""PAL_B"" } etc.",Connector,TBD
diff --git a/Documentation/gpu/meson.rst b/Documentation/gpu/meson.rst
index 479f6f51a13b..b9e2f9aa3bd8 100644
--- a/Documentation/gpu/meson.rst
+++ b/Documentation/gpu/meson.rst
@@ -42,12 +42,6 @@ Video Encoder
.. kernel-doc:: drivers/gpu/drm/meson/meson_venc.c
:doc: Video Encoder
-Video Canvas Management
-=======================
-
-.. kernel-doc:: drivers/gpu/drm/meson/meson_canvas.c
- :doc: Canvas
-
Video Clocks
============
diff --git a/Documentation/gpu/tinydrm.rst b/Documentation/gpu/tinydrm.rst
index a913644bfc19..33a41544f659 100644
--- a/Documentation/gpu/tinydrm.rst
+++ b/Documentation/gpu/tinydrm.rst
@@ -1,34 +1,22 @@
-==========================
-drm/tinydrm Driver library
-==========================
+============================
+drm/tinydrm Tiny DRM drivers
+============================
-.. kernel-doc:: drivers/gpu/drm/tinydrm/core/tinydrm-core.c
- :doc: overview
-
-Core functionality
-==================
+tinydrm is a collection of DRM drivers that are so small they can fit in a
+single source file.
-.. kernel-doc:: drivers/gpu/drm/tinydrm/core/tinydrm-core.c
- :doc: core
+Helpers
+=======
-.. kernel-doc:: include/drm/tinydrm/tinydrm.h
+.. kernel-doc:: include/drm/tinydrm/tinydrm-helpers.h
:internal:
-.. kernel-doc:: drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+.. kernel-doc:: drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
:export:
.. kernel-doc:: drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
:export:
-Additional helpers
-==================
-
-.. kernel-doc:: include/drm/tinydrm/tinydrm-helpers.h
- :internal:
-
-.. kernel-doc:: drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
- :export:
-
MIPI DBI Compatible Controllers
===============================
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 159a4aba49e6..1528ad2d598b 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -215,12 +215,12 @@ Might be good to also have some igt testcases for this.
Contact: Daniel Vetter, Noralf Tronnes
-Put a reservation_object into drm_gem_object
+Remove the ->gem_prime_res_obj callback
--------------------------------------------
-This would remove the need for the ->gem_prime_res_obj callback. It would also
-allow us to implement generic helpers for waiting for a bo, allowing for quite a
-bit of refactoring in the various wait ioctl implementations.
+The ->gem_prime_res_obj callback can be removed from drivers by using the
+reservation_object in the drm_gem_object. It may also be possible to use the
+generic drm_gem_reservation_object_wait helper for waiting for a bo.
Contact: Daniel Vetter
@@ -469,10 +469,6 @@ those drivers as simple as possible, so lots of room for refactoring:
one of the ideas for having a shared dsi/dbi helper, abstracting away the
transport details more.
-- Quick aside: The unregister devm stuff is kinda getting the lifetimes of
- a drm_device wrong. Doesn't matter, since everyone else gets it wrong
- too :-)
-
Contact: Noralf Trønnes, Daniel Vetter
AMD DC Display Driver
diff --git a/Documentation/hwmon/ab8500 b/Documentation/hwmon/ab8500.rst
index cf169c8ef4e3..33f93a9cec04 100644
--- a/Documentation/hwmon/ab8500
+++ b/Documentation/hwmon/ab8500.rst
@@ -2,19 +2,23 @@ Kernel driver ab8500
====================
Supported chips:
+
* ST-Ericsson AB8500
+
Prefix: 'ab8500'
+
Addresses scanned: -
+
Datasheet: http://www.stericsson.com/developers/documentation.jsp
Authors:
- Martin Persson <martin.persson@stericsson.com>
- Hongbo Zhang <hongbo.zhang@linaro.org>
+ - Martin Persson <martin.persson@stericsson.com>
+ - Hongbo Zhang <hongbo.zhang@linaro.org>
Description
-----------
-See also Documentation/hwmon/abx500. This is the ST-Ericsson AB8500 specific
+See also Documentation/hwmon/abx500.rst. This is the ST-Ericsson AB8500 specific
driver.
Currently only the AB8500 internal sensor and one external sensor for battery
diff --git a/Documentation/hwmon/abituguru b/Documentation/hwmon/abituguru
deleted file mode 100644
index 44013d23b3f0..000000000000
--- a/Documentation/hwmon/abituguru
+++ /dev/null
@@ -1,92 +0,0 @@
-Kernel driver abituguru
-=======================
-
-Supported chips:
- * Abit uGuru revision 1 & 2 (Hardware Monitor part only)
- Prefix: 'abituguru'
- Addresses scanned: ISA 0x0E0
- Datasheet: Not available, this driver is based on reverse engineering.
- A "Datasheet" has been written based on the reverse engineering it
- should be available in the same dir as this file under the name
- abituguru-datasheet.
- Note:
- The uGuru is a microcontroller with onboard firmware which programs
- it to behave as a hwmon IC. There are many different revisions of the
- firmware and thus effectivly many different revisions of the uGuru.
- Below is an incomplete list with which revisions are used for which
- Motherboards:
- uGuru 1.00 ~ 1.24 (AI7, KV8-MAX3, AN7) (1)
- uGuru 2.0.0.0 ~ 2.0.4.2 (KV8-PRO)
- uGuru 2.1.0.0 ~ 2.1.2.8 (AS8, AV8, AA8, AG8, AA8XE, AX8)
- uGuru 2.2.0.0 ~ 2.2.0.6 (AA8 Fatal1ty)
- uGuru 2.3.0.0 ~ 2.3.0.9 (AN8)
- uGuru 3.0.0.0 ~ 3.0.x.x (AW8, AL8, AT8, NI8 SLI, AT8 32X, AN8 32X,
- AW9D-MAX) (2)
- 1) For revisions 2 and 3 uGuru's the driver can autodetect the
- sensortype (Volt or Temp) for bank1 sensors, for revision 1 uGuru's
- this does not always work. For these uGuru's the autodetection can
- be overridden with the bank1_types module param. For all 3 known
- revison 1 motherboards the correct use of this param is:
- bank1_types=1,1,0,0,0,0,0,2,0,0,0,0,2,0,0,1
- You may also need to specify the fan_sensors option for these boards
- fan_sensors=5
- 2) There is a separate abituguru3 driver for these motherboards,
- the abituguru (without the 3 !) driver will not work on these
- motherboards (and visa versa)!
-
-Authors:
- Hans de Goede <j.w.r.degoede@hhs.nl>,
- (Initial reverse engineering done by Olle Sandberg
- <ollebull@gmail.com>)
-
-
-Module Parameters
------------------
-
-* force: bool Force detection. Note this parameter only causes the
- detection to be skipped, and thus the insmod to
- succeed. If the uGuru can't be read the actual hwmon
- driver will not load and thus no hwmon device will get
- registered.
-* bank1_types: int[] Bank1 sensortype autodetection override:
- -1 autodetect (default)
- 0 volt sensor
- 1 temp sensor
- 2 not connected
-* fan_sensors: int Tell the driver how many fan speed sensors there are
- on your motherboard. Default: 0 (autodetect).
-* pwms: int Tell the driver how many fan speed controls (fan
- pwms) your motherboard has. Default: 0 (autodetect).
-* verbose: int How verbose should the driver be? (0-3):
- 0 normal output
- 1 + verbose error reporting
- 2 + sensors type probing info (default)
- 3 + retryable error reporting
- Default: 2 (the driver is still in the testing phase)
-
-Notice if you need any of the first three options above please insmod the
-driver with verbose set to 3 and mail me <j.w.r.degoede@hhs.nl> the output of:
-dmesg | grep abituguru
-
-
-Description
------------
-
-This driver supports the hardware monitoring features of the first and
-second revision of the Abit uGuru chip found on Abit uGuru featuring
-motherboards (most modern Abit motherboards).
-
-The first and second revision of the uGuru chip in reality is a Winbond
-W83L950D in disguise (despite Abit claiming it is "a new microprocessor
-designed by the ABIT Engineers"). Unfortunately this doesn't help since the
-W83L950D is a generic microcontroller with a custom Abit application running
-on it.
-
-Despite Abit not releasing any information regarding the uGuru, Olle
-Sandberg <ollebull@gmail.com> has managed to reverse engineer the sensor part
-of the uGuru. Without his work this driver would not have been possible.
-
-Known Issues
-------------
-
-The voltage and frequency control parts of the Abit uGuru are not supported.
diff --git a/Documentation/hwmon/abituguru-datasheet b/Documentation/hwmon/abituguru-datasheet.rst
index 86c0b1251c81..6d5253e2223b 100644
--- a/Documentation/hwmon/abituguru-datasheet
+++ b/Documentation/hwmon/abituguru-datasheet.rst
@@ -1,3 +1,4 @@
+===============
uGuru datasheet
===============
@@ -168,34 +169,35 @@ This bank contains 0 sensors, iow the sensor address is ignored (but must be
written) just use 0. Bank 0x20 contains 3 bytes:
Byte 0:
-This byte holds the alarm flags for sensor 0-7 of Sensor Bank1, with bit 0
-corresponding to sensor 0, 1 to 1, etc.
+ This byte holds the alarm flags for sensor 0-7 of Sensor Bank1, with bit 0
+ corresponding to sensor 0, 1 to 1, etc.
Byte 1:
-This byte holds the alarm flags for sensor 8-15 of Sensor Bank1, with bit 0
-corresponding to sensor 8, 1 to 9, etc.
+ This byte holds the alarm flags for sensor 8-15 of Sensor Bank1, with bit 0
+ corresponding to sensor 8, 1 to 9, etc.
Byte 2:
-This byte holds the alarm flags for sensor 0-5 of Sensor Bank2, with bit 0
-corresponding to sensor 0, 1 to 1, etc.
+ This byte holds the alarm flags for sensor 0-5 of Sensor Bank2, with bit 0
+ corresponding to sensor 0, 1 to 1, etc.
Bank 0x21 Sensor Bank1 Values / Readings (R)
--------------------------------------------
This bank contains 16 sensors, for each sensor it contains 1 byte.
So far the following sensors are known to be available on all motherboards:
-Sensor 0 CPU temp
-Sensor 1 SYS temp
-Sensor 3 CPU core volt
-Sensor 4 DDR volt
-Sensor 10 DDR Vtt volt
-Sensor 15 PWM temp
+
+- Sensor 0 CPU temp
+- Sensor 1 SYS temp
+- Sensor 3 CPU core volt
+- Sensor 4 DDR volt
+- Sensor 10 DDR Vtt volt
+- Sensor 15 PWM temp
Byte 0:
-This byte holds the reading from the sensor. Sensors in Bank1 can be both
-volt and temp sensors, this is motherboard specific. The uGuru however does
-seem to know (be programmed with) what kindoff sensor is attached see Sensor
-Bank1 Settings description.
+ This byte holds the reading from the sensor. Sensors in Bank1 can be both
+ volt and temp sensors, this is motherboard specific. The uGuru however does
+ seem to know (be programmed with) what kindoff sensor is attached see Sensor
+ Bank1 Settings description.
Volt sensors use a linear scale, a reading 0 corresponds with 0 volt and a
reading of 255 with 3494 mV. The sensors for higher voltages however are
@@ -207,96 +209,118 @@ Temp sensors also use a linear scale, a reading of 0 corresponds with 0 degree
Celsius and a reading of 255 with a reading of 255 degrees Celsius.
-Bank 0x22 Sensor Bank1 Settings (R)
-Bank 0x23 Sensor Bank1 Settings (W)
------------------------------------
+Bank 0x22 Sensor Bank1 Settings (R) and Bank 0x23 Sensor Bank1 Settings (W)
+---------------------------------------------------------------------------
-This bank contains 16 sensors, for each sensor it contains 3 bytes. Each
+Those banks contain 16 sensors, for each sensor it contains 3 bytes. Each
set of 3 bytes contains the settings for the sensor with the same sensor
address in Bank 0x21 .
Byte 0:
-Alarm behaviour for the selected sensor. A 1 enables the described behaviour.
-Bit 0: Give an alarm if measured temp is over the warning threshold (RW) *
-Bit 1: Give an alarm if measured volt is over the max threshold (RW) **
-Bit 2: Give an alarm if measured volt is under the min threshold (RW) **
-Bit 3: Beep if alarm (RW)
-Bit 4: 1 if alarm cause measured temp is over the warning threshold (R)
-Bit 5: 1 if alarm cause measured volt is over the max threshold (R)
-Bit 6: 1 if alarm cause measured volt is under the min threshold (R)
-Bit 7: Volt sensor: Shutdown if alarm persist for more than 4 seconds (RW)
- Temp sensor: Shutdown if temp is over the shutdown threshold (RW)
-
-* This bit is only honored/used by the uGuru if a temp sensor is connected
-** This bit is only honored/used by the uGuru if a volt sensor is connected
-Note with some trickery this can be used to find out what kinda sensor is
-detected see the Linux kernel driver for an example with many comments on
-how todo this.
+ Alarm behaviour for the selected sensor. A 1 enables the described
+ behaviour.
+
+Bit 0:
+ Give an alarm if measured temp is over the warning threshold (RW) [1]_
+
+Bit 1:
+ Give an alarm if measured volt is over the max threshold (RW) [2]_
+
+Bit 2:
+ Give an alarm if measured volt is under the min threshold (RW) [2]_
+
+Bit 3:
+ Beep if alarm (RW)
+
+Bit 4:
+ 1 if alarm cause measured temp is over the warning threshold (R)
+
+Bit 5:
+ 1 if alarm cause measured volt is over the max threshold (R)
+
+Bit 6:
+ 1 if alarm cause measured volt is under the min threshold (R)
+
+Bit 7:
+ - Volt sensor: Shutdown if alarm persist for more than 4 seconds (RW)
+ - Temp sensor: Shutdown if temp is over the shutdown threshold (RW)
+
+.. [1] This bit is only honored/used by the uGuru if a temp sensor is connected
+
+.. [2] This bit is only honored/used by the uGuru if a volt sensor is connected
+ Note with some trickery this can be used to find out what kinda sensor
+ is detected see the Linux kernel driver for an example with many
+ comments on how todo this.
Byte 1:
-Temp sensor: warning threshold (scale as bank 0x21)
-Volt sensor: min threshold (scale as bank 0x21)
+ - Temp sensor: warning threshold (scale as bank 0x21)
+ - Volt sensor: min threshold (scale as bank 0x21)
Byte 2:
-Temp sensor: shutdown threshold (scale as bank 0x21)
-Volt sensor: max threshold (scale as bank 0x21)
+ - Temp sensor: shutdown threshold (scale as bank 0x21)
+ - Volt sensor: max threshold (scale as bank 0x21)
-Bank 0x24 PWM outputs for FAN's (R)
-Bank 0x25 PWM outputs for FAN's (W)
------------------------------------
+Bank 0x24 PWM outputs for FAN's (R) and Bank 0x25 PWM outputs for FAN's (W)
+---------------------------------------------------------------------------
-This bank contains 3 "sensors", for each sensor it contains 5 bytes.
-Sensor 0 usually controls the CPU fan
-Sensor 1 usually controls the NB (or chipset for single chip) fan
-Sensor 2 usually controls the System fan
+Those banks contain 3 "sensors", for each sensor it contains 5 bytes.
+ - Sensor 0 usually controls the CPU fan
+ - Sensor 1 usually controls the NB (or chipset for single chip) fan
+ - Sensor 2 usually controls the System fan
Byte 0:
-Flag 0x80 to enable control, Fan runs at 100% when disabled.
-low nibble (temp)sensor address at bank 0x21 used for control.
+ Flag 0x80 to enable control, Fan runs at 100% when disabled.
+ low nibble (temp)sensor address at bank 0x21 used for control.
Byte 1:
-0-255 = 0-12v (linear), specify voltage at which fan will rotate when under
-low threshold temp (specified in byte 3)
+ 0-255 = 0-12v (linear), specify voltage at which fan will rotate when under
+ low threshold temp (specified in byte 3)
Byte 2:
-0-255 = 0-12v (linear), specify voltage at which fan will rotate when above
-high threshold temp (specified in byte 4)
+ 0-255 = 0-12v (linear), specify voltage at which fan will rotate when above
+ high threshold temp (specified in byte 4)
Byte 3:
-Low threshold temp (scale as bank 0x21)
+ Low threshold temp (scale as bank 0x21)
byte 4:
-High threshold temp (scale as bank 0x21)
+ High threshold temp (scale as bank 0x21)
Bank 0x26 Sensors Bank2 Values / Readings (R)
---------------------------------------------
This bank contains 6 sensors (AFAIK), for each sensor it contains 1 byte.
+
So far the following sensors are known to be available on all motherboards:
-Sensor 0: CPU fan speed
-Sensor 1: NB (or chipset for single chip) fan speed
-Sensor 2: SYS fan speed
+ - Sensor 0: CPU fan speed
+ - Sensor 1: NB (or chipset for single chip) fan speed
+ - Sensor 2: SYS fan speed
Byte 0:
-This byte holds the reading from the sensor. 0-255 = 0-15300 (linear)
+ This byte holds the reading from the sensor. 0-255 = 0-15300 (linear)
-Bank 0x27 Sensors Bank2 Settings (R)
-Bank 0x28 Sensors Bank2 Settings (W)
-------------------------------------
+Bank 0x27 Sensors Bank2 Settings (R) and Bank 0x28 Sensors Bank2 Settings (W)
+-----------------------------------------------------------------------------
-This bank contains 6 sensors (AFAIK), for each sensor it contains 2 bytes.
+Those banks contain 6 sensors (AFAIK), for each sensor it contains 2 bytes.
Byte 0:
-Alarm behaviour for the selected sensor. A 1 enables the described behaviour.
-Bit 0: Give an alarm if measured rpm is under the min threshold (RW)
-Bit 3: Beep if alarm (RW)
-Bit 7: Shutdown if alarm persist for more than 4 seconds (RW)
+ Alarm behaviour for the selected sensor. A 1 enables the described behaviour.
+
+Bit 0:
+ Give an alarm if measured rpm is under the min threshold (RW)
+
+Bit 3:
+ Beep if alarm (RW)
+
+Bit 7:
+ Shutdown if alarm persist for more than 4 seconds (RW)
Byte 1:
-min threshold (scale as bank 0x26)
+ min threshold (scale as bank 0x26)
Warning for the adventurous
diff --git a/Documentation/hwmon/abituguru.rst b/Documentation/hwmon/abituguru.rst
new file mode 100644
index 000000000000..d8243c827de9
--- /dev/null
+++ b/Documentation/hwmon/abituguru.rst
@@ -0,0 +1,113 @@
+Kernel driver abituguru
+=======================
+
+Supported chips:
+
+ * Abit uGuru revision 1 & 2 (Hardware Monitor part only)
+
+ Prefix: 'abituguru'
+
+ Addresses scanned: ISA 0x0E0
+
+ Datasheet: Not available, this driver is based on reverse engineering.
+ A "Datasheet" has been written based on the reverse engineering it
+ should be available in the same dir as this file under the name
+ abituguru-datasheet.
+
+ Note:
+ The uGuru is a microcontroller with onboard firmware which programs
+ it to behave as a hwmon IC. There are many different revisions of the
+ firmware and thus effectivly many different revisions of the uGuru.
+ Below is an incomplete list with which revisions are used for which
+ Motherboards:
+
+ - uGuru 1.00 ~ 1.24 (AI7, KV8-MAX3, AN7) [1]_
+ - uGuru 2.0.0.0 ~ 2.0.4.2 (KV8-PRO)
+ - uGuru 2.1.0.0 ~ 2.1.2.8 (AS8, AV8, AA8, AG8, AA8XE, AX8)
+ - uGuru 2.2.0.0 ~ 2.2.0.6 (AA8 Fatal1ty)
+ - uGuru 2.3.0.0 ~ 2.3.0.9 (AN8)
+ - uGuru 3.0.0.0 ~ 3.0.x.x (AW8, AL8, AT8, NI8 SLI, AT8 32X, AN8 32X,
+ AW9D-MAX) [2]_
+
+.. [1] For revisions 2 and 3 uGuru's the driver can autodetect the
+ sensortype (Volt or Temp) for bank1 sensors, for revision 1 uGuru's
+ this does not always work. For these uGuru's the autodetection can
+ be overridden with the bank1_types module param. For all 3 known
+ revison 1 motherboards the correct use of this param is:
+ bank1_types=1,1,0,0,0,0,0,2,0,0,0,0,2,0,0,1
+ You may also need to specify the fan_sensors option for these boards
+ fan_sensors=5
+
+.. [2] There is a separate abituguru3 driver for these motherboards,
+ the abituguru (without the 3 !) driver will not work on these
+ motherboards (and visa versa)!
+
+Authors:
+ - Hans de Goede <j.w.r.degoede@hhs.nl>,
+ - (Initial reverse engineering done by Olle Sandberg
+ <ollebull@gmail.com>)
+
+
+Module Parameters
+-----------------
+
+* force: bool
+ Force detection. Note this parameter only causes the
+ detection to be skipped, and thus the insmod to
+ succeed. If the uGuru can't be read the actual hwmon
+ driver will not load and thus no hwmon device will get
+ registered.
+* bank1_types: int[]
+ Bank1 sensortype autodetection override:
+
+ * -1 autodetect (default)
+ * 0 volt sensor
+ * 1 temp sensor
+ * 2 not connected
+* fan_sensors: int
+ Tell the driver how many fan speed sensors there are
+ on your motherboard. Default: 0 (autodetect).
+* pwms: int
+ Tell the driver how many fan speed controls (fan
+ pwms) your motherboard has. Default: 0 (autodetect).
+* verbose: int
+ How verbose should the driver be? (0-3):
+
+ * 0 normal output
+ * 1 + verbose error reporting
+ * 2 + sensors type probing info (default)
+ * 3 + retryable error reporting
+
+ Default: 2 (the driver is still in the testing phase)
+
+Notice: if you need any of the first three options above please insmod the
+driver with verbose set to 3 and mail me <j.w.r.degoede@hhs.nl> the output of:
+dmesg | grep abituguru
+
+
+Description
+-----------
+
+This driver supports the hardware monitoring features of the first and
+second revision of the Abit uGuru chip found on Abit uGuru featuring
+motherboards (most modern Abit motherboards).
+
+The first and second revision of the uGuru chip in reality is a Winbond
+W83L950D in disguise (despite Abit claiming it is "a new microprocessor
+designed by the ABIT Engineers"). Unfortunately this doesn't help since the
+W83L950D is a generic microcontroller with a custom Abit application running
+on it.
+
+Despite Abit not releasing any information regarding the uGuru, Olle
+Sandberg <ollebull@gmail.com> has managed to reverse engineer the sensor part
+of the uGuru. Without his work this driver would not have been possible.
+
+Known Issues
+------------
+
+The voltage and frequency control parts of the Abit uGuru are not supported.
+
+.. toctree::
+ :maxdepth: 1
+
+ abituguru-datasheet.rst
diff --git a/Documentation/hwmon/abituguru3 b/Documentation/hwmon/abituguru3.rst
index a6ccfe4bb6aa..514f11f41e8b 100644
--- a/Documentation/hwmon/abituguru3
+++ b/Documentation/hwmon/abituguru3.rst
@@ -3,41 +3,51 @@ Kernel driver abituguru3
Supported chips:
* Abit uGuru revision 3 (Hardware Monitor part, reading only)
+
Prefix: 'abituguru3'
+
Addresses scanned: ISA 0x0E0
+
Datasheet: Not available, this driver is based on reverse engineering.
+
Note:
The uGuru is a microcontroller with onboard firmware which programs
it to behave as a hwmon IC. There are many different revisions of the
firmware and thus effectivly many different revisions of the uGuru.
Below is an incomplete list with which revisions are used for which
Motherboards:
- uGuru 1.00 ~ 1.24 (AI7, KV8-MAX3, AN7)
- uGuru 2.0.0.0 ~ 2.0.4.2 (KV8-PRO)
- uGuru 2.1.0.0 ~ 2.1.2.8 (AS8, AV8, AA8, AG8, AA8XE, AX8)
- uGuru 2.3.0.0 ~ 2.3.0.9 (AN8)
- uGuru 3.0.0.0 ~ 3.0.x.x (AW8, AL8, AT8, NI8 SLI, AT8 32X, AN8 32X,
- AW9D-MAX)
+
+ - uGuru 1.00 ~ 1.24 (AI7, KV8-MAX3, AN7)
+ - uGuru 2.0.0.0 ~ 2.0.4.2 (KV8-PRO)
+ - uGuru 2.1.0.0 ~ 2.1.2.8 (AS8, AV8, AA8, AG8, AA8XE, AX8)
+ - uGuru 2.3.0.0 ~ 2.3.0.9 (AN8)
+ - uGuru 3.0.0.0 ~ 3.0.x.x (AW8, AL8, AT8, NI8 SLI, AT8 32X, AN8 32X,
+ AW9D-MAX)
+
The abituguru3 driver is only for revison 3.0.x.x motherboards,
this driver will not work on older motherboards. For older
motherboards use the abituguru (without the 3 !) driver.
Authors:
- Hans de Goede <j.w.r.degoede@hhs.nl>,
- (Initial reverse engineering done by Louis Kruger)
+ - Hans de Goede <j.w.r.degoede@hhs.nl>,
+ - (Initial reverse engineering done by Louis Kruger)
Module Parameters
-----------------
-* force: bool Force detection. Note this parameter only causes the
+* force: bool
+ Force detection. Note this parameter only causes the
detection to be skipped, and thus the insmod to
succeed. If the uGuru can't be read the actual hwmon
driver will not load and thus no hwmon device will get
registered.
-* verbose: bool Should the driver be verbose?
- 0/off/false normal output
- 1/on/true + verbose error reporting (default)
+* verbose: bool
+ Should the driver be verbose?
+
+ * 0/off/false normal output
+ * 1/on/true + verbose error reporting (default)
+
Default: 1 (the driver is still in the testing phase)
Description
@@ -62,4 +72,4 @@ neither is writing any of the sensor settings and writing / reading the
fanspeed control registers (FanEQ)
If you encounter any problems please mail me <j.w.r.degoede@hhs.nl> and
-include the output of: "dmesg | grep abituguru"
+include the output of: `dmesg | grep abituguru`
diff --git a/Documentation/hwmon/abx500 b/Documentation/hwmon/abx500.rst
index 319a058cec7c..3d88b2ce0f00 100644
--- a/Documentation/hwmon/abx500
+++ b/Documentation/hwmon/abx500.rst
@@ -2,14 +2,18 @@ Kernel driver abx500
====================
Supported chips:
+
* ST-Ericsson ABx500 series
+
Prefix: 'abx500'
+
Addresses scanned: -
+
Datasheet: http://www.stericsson.com/developers/documentation.jsp
Authors:
- Martin Persson <martin.persson@stericsson.com>
- Hongbo Zhang <hongbo.zhang@linaro.org>
+ Martin Persson <martin.persson@stericsson.com>
+ Hongbo Zhang <hongbo.zhang@linaro.org>
Description
-----------
diff --git a/Documentation/hwmon/acpi_power_meter b/Documentation/hwmon/acpi_power_meter.rst
index c80399a00c50..4a0941ade0ca 100644
--- a/Documentation/hwmon/acpi_power_meter
+++ b/Documentation/hwmon/acpi_power_meter.rst
@@ -4,8 +4,11 @@ Kernel driver power_meter
This driver talks to ACPI 4.0 power meters.
Supported systems:
+
* Any recent system with ACPI 4.0.
+
Prefix: 'power_meter'
+
Datasheet: http://acpi.info/, section 10.4.
Author: Darrick J. Wong
@@ -18,26 +21,26 @@ the ACPI 4.0 spec (Chapter 10.4). These devices have a simple set of
features--a power meter that returns average power use over a configurable
interval, an optional capping mechanism, and a couple of trip points. The
sysfs interface conforms with the specification outlined in the "Power" section
-of Documentation/hwmon/sysfs-interface.
+of Documentation/hwmon/sysfs-interface.rst.
Special Features
----------------
-The power[1-*]_is_battery knob indicates if the power supply is a battery.
-Both power[1-*]_average_{min,max} must be set before the trip points will work.
+The `power[1-*]_is_battery` knob indicates if the power supply is a battery.
+Both `power[1-*]_average_{min,max}` must be set before the trip points will work.
When both of them are set, an ACPI event will be broadcast on the ACPI netlink
socket and a poll notification will be sent to the appropriate
-power[1-*]_average sysfs file.
+`power[1-*]_average` sysfs file.
-The power[1-*]_{model_number, serial_number, oem_info} fields display arbitrary
-strings that ACPI provides with the meter. The measures/ directory contains
-symlinks to the devices that this meter measures.
+The `power[1-*]_{model_number, serial_number, oem_info}` fields display
+arbitrary strings that ACPI provides with the meter. The measures/ directory
+contains symlinks to the devices that this meter measures.
Some computers have the ability to enforce a power cap in hardware. If this is
-the case, the power[1-*]_cap and related sysfs files will appear. When the
+the case, the `power[1-*]_cap` and related sysfs files will appear. When the
average power consumption exceeds the cap, an ACPI event will be broadcast on
the netlink event socket and a poll notification will be sent to the
-appropriate power[1-*]_alarm file to indicate that capping has begun, and the
+appropriate `power[1-*]_alarm` file to indicate that capping has begun, and the
hardware has taken action to reduce power consumption. Most likely this will
result in reduced performance.
@@ -46,6 +49,6 @@ all cases the ACPI event will be broadcast on the ACPI netlink event socket as
well as sent as a poll notification to a sysfs file. The events are as
follows:
-power[1-*]_cap will be notified if the firmware changes the power cap.
-power[1-*]_interval will be notified if the firmware changes the averaging
+`power[1-*]_cap` will be notified if the firmware changes the power cap.
+`power[1-*]_interval` will be notified if the firmware changes the averaging
interval.
diff --git a/Documentation/hwmon/ad7314 b/Documentation/hwmon/ad7314.rst
index 1912549c7467..bf389736bcd1 100644
--- a/Documentation/hwmon/ad7314
+++ b/Documentation/hwmon/ad7314.rst
@@ -2,14 +2,23 @@ Kernel driver ad7314
====================
Supported chips:
+
* Analog Devices AD7314
+
Prefix: 'ad7314'
+
Datasheet: Publicly available at Analog Devices website.
+
* Analog Devices ADT7301
+
Prefix: 'adt7301'
+
Datasheet: Publicly available at Analog Devices website.
+
* Analog Devices ADT7302
+
Prefix: 'adt7302'
+
Datasheet: Publicly available at Analog Devices website.
Description
diff --git a/Documentation/hwmon/adc128d818 b/Documentation/hwmon/adc128d818.rst
index 39c95004dabc..6753468932ab 100644
--- a/Documentation/hwmon/adc128d818
+++ b/Documentation/hwmon/adc128d818.rst
@@ -2,11 +2,14 @@ Kernel driver adc128d818
========================
Supported chips:
+
* Texas Instruments ADC818D818
+
Prefix: 'adc818d818'
+
Addresses scanned: I2C 0x1d, 0x1e, 0x1f, 0x2d, 0x2e, 0x2f
- Datasheet: Publicly available at the TI website
- http://www.ti.com/
+
+ Datasheet: Publicly available at the TI website http://www.ti.com/
Author: Guenter Roeck
diff --git a/Documentation/hwmon/adm1021 b/Documentation/hwmon/adm1021.rst
index 02ad96cf9b2b..6cbb0f75fe00 100644
--- a/Documentation/hwmon/adm1021
+++ b/Documentation/hwmon/adm1021.rst
@@ -2,51 +2,91 @@ Kernel driver adm1021
=====================
Supported chips:
+
* Analog Devices ADM1021
+
Prefix: 'adm1021'
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet: Publicly available at the Analog Devices website
+
* Analog Devices ADM1021A/ADM1023
+
Prefix: 'adm1023'
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet: Publicly available at the Analog Devices website
+
* Genesys Logic GL523SM
+
Prefix: 'gl523sm'
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet:
+
* Maxim MAX1617
+
Prefix: 'max1617'
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet: Publicly available at the Maxim website
+
* Maxim MAX1617A
+
Prefix: 'max1617a'
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet: Publicly available at the Maxim website
+
* National Semiconductor LM84
+
Prefix: 'lm84'
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet: Publicly available at the National Semiconductor website
+
* Philips NE1617
+
Prefix: 'max1617' (probably detected as a max1617)
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet: Publicly available at the Philips website
+
* Philips NE1617A
+
Prefix: 'max1617' (probably detected as a max1617)
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet: Publicly available at the Philips website
+
* TI THMC10
+
Prefix: 'thmc10'
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet: Publicly available at the TI website
+
* Onsemi MC1066
+
Prefix: 'mc1066'
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet: Publicly available at the Onsemi website
Authors:
- Frodo Looijaard <frodol@dds.nl>,
- Philip Edelbrock <phil@netroedge.com>
+ - Frodo Looijaard <frodol@dds.nl>,
+ - Philip Edelbrock <phil@netroedge.com>
Module Parameters
-----------------
diff --git a/Documentation/hwmon/adm1025 b/Documentation/hwmon/adm1025.rst
index 99f05049c68a..283e65e348a5 100644
--- a/Documentation/hwmon/adm1025
+++ b/Documentation/hwmon/adm1025.rst
@@ -2,23 +2,32 @@ Kernel driver adm1025
=====================
Supported chips:
+
* Analog Devices ADM1025, ADM1025A
+
Prefix: 'adm1025'
+
Addresses scanned: I2C 0x2c - 0x2e
+
Datasheet: Publicly available at the Analog Devices website
+
* Philips NE1619
+
Prefix: 'ne1619'
+
Addresses scanned: I2C 0x2c - 0x2d
+
Datasheet: Publicly available at the Philips website
The NE1619 presents some differences with the original ADM1025:
+
* Only two possible addresses (0x2c - 0x2d).
* No temperature offset register, but we don't use it anyway.
* No INT mode for pin 16. We don't play with it anyway.
Authors:
- Chen-Yuan Wu <gwu@esoft.com>,
- Jean Delvare <jdelvare@suse.de>
+ - Chen-Yuan Wu <gwu@esoft.com>,
+ - Jean Delvare <jdelvare@suse.de>
Description
-----------
diff --git a/Documentation/hwmon/adm1026 b/Documentation/hwmon/adm1026.rst
index d8fabe0c23ac..35d63e6498a3 100644
--- a/Documentation/hwmon/adm1026
+++ b/Documentation/hwmon/adm1026.rst
@@ -3,28 +3,36 @@ Kernel driver adm1026
Supported chips:
* Analog Devices ADM1026
+
Prefix: 'adm1026'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: Publicly available at the Analog Devices website
- http://www.onsemi.com/PowerSolutions/product.do?id=ADM1026
+
+ http://www.onsemi.com/PowerSolutions/product.do?id=ADM1026
Authors:
- Philip Pokorny <ppokorny@penguincomputing.com> for Penguin Computing
- Justin Thiessen <jthiessen@penguincomputing.com>
+ - Philip Pokorny <ppokorny@penguincomputing.com> for Penguin Computing
+ - Justin Thiessen <jthiessen@penguincomputing.com>
Module Parameters
-----------------
* gpio_input: int array (min = 1, max = 17)
- List of GPIO pins (0-16) to program as inputs
+ List of GPIO pins (0-16) to program as inputs
+
* gpio_output: int array (min = 1, max = 17)
- List of GPIO pins (0-16) to program as outputs
+ List of GPIO pins (0-16) to program as outputs
+
* gpio_inverted: int array (min = 1, max = 17)
- List of GPIO pins (0-16) to program as inverted
+ List of GPIO pins (0-16) to program as inverted
+
* gpio_normal: int array (min = 1, max = 17)
- List of GPIO pins (0-16) to program as normal/non-inverted
+ List of GPIO pins (0-16) to program as normal/non-inverted
+
* gpio_fan: int array (min = 1, max = 8)
- List of GPIO pins (0-7) to program as fan tachs
+ List of GPIO pins (0-7) to program as fan tachs
Description
diff --git a/Documentation/hwmon/adm1031 b/Documentation/hwmon/adm1031.rst
index a143117c99cb..a677c3ab5574 100644
--- a/Documentation/hwmon/adm1031
+++ b/Documentation/hwmon/adm1031.rst
@@ -3,20 +3,28 @@ Kernel driver adm1031
Supported chips:
* Analog Devices ADM1030
+
Prefix: 'adm1030'
+
Addresses scanned: I2C 0x2c to 0x2e
+
Datasheet: Publicly available at the Analog Devices website
- http://www.analog.com/en/prod/0%2C2877%2CADM1030%2C00.html
+
+ http://www.analog.com/en/prod/0%2C2877%2CADM1030%2C00.html
* Analog Devices ADM1031
+
Prefix: 'adm1031'
+
Addresses scanned: I2C 0x2c to 0x2e
+
Datasheet: Publicly available at the Analog Devices website
- http://www.analog.com/en/prod/0%2C2877%2CADM1031%2C00.html
+
+ http://www.analog.com/en/prod/0%2C2877%2CADM1031%2C00.html
Authors:
- Alexandre d'Alton <alex@alexdalton.org>
- Jean Delvare <jdelvare@suse.de>
+ - Alexandre d'Alton <alex@alexdalton.org>
+ - Jean Delvare <jdelvare@suse.de>
Description
-----------
diff --git a/Documentation/hwmon/adm1275 b/Documentation/hwmon/adm1275.rst
index 5e277b0d91ce..9a1913e5b4d9 100644
--- a/Documentation/hwmon/adm1275
+++ b/Documentation/hwmon/adm1275.rst
@@ -2,29 +2,53 @@ Kernel driver adm1275
=====================
Supported chips:
+
* Analog Devices ADM1075
+
Prefix: 'adm1075'
+
Addresses scanned: -
+
Datasheet: www.analog.com/static/imported-files/data_sheets/ADM1075.pdf
+
* Analog Devices ADM1272
+
Prefix: 'adm1272'
+
Addresses scanned: -
+
Datasheet: www.analog.com/static/imported-files/data_sheets/ADM1272.pdf
+
* Analog Devices ADM1275
+
Prefix: 'adm1275'
+
Addresses scanned: -
+
Datasheet: www.analog.com/static/imported-files/data_sheets/ADM1275.pdf
+
* Analog Devices ADM1276
+
Prefix: 'adm1276'
+
Addresses scanned: -
+
Datasheet: www.analog.com/static/imported-files/data_sheets/ADM1276.pdf
+
* Analog Devices ADM1278
+
Prefix: 'adm1278'
+
Addresses scanned: -
+
Datasheet: www.analog.com/static/imported-files/data_sheets/ADM1278.pdf
+
* Analog Devices ADM1293/ADM1294
+
Prefix: 'adm1293', 'adm1294'
+
Addresses scanned: -
+
Datasheet: http://www.analog.com/media/en/technical-documentation/data-sheets/ADM1293_1294.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -44,7 +68,7 @@ integrated 12 bit analog-to-digital converter (ADC), accessed using a
PMBus interface.
The driver is a client driver to the core PMBus driver. Please see
-Documentation/hwmon/pmbus for details on PMBus client drivers.
+Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
Usage Notes
@@ -66,7 +90,7 @@ Platform data support
---------------------
The driver supports standard PMBus driver platform data. Please see
-Documentation/hwmon/pmbus for details.
+Documentation/hwmon/pmbus.rst for details.
Sysfs entries
@@ -75,6 +99,7 @@ Sysfs entries
The following attributes are supported. Limits are read-write, history reset
attributes are write-only, all other attributes are read-only.
+======================= =======================================================
inX_label "vin1" or "vout1" depending on chip variant and
configuration. On ADM1075, ADM1293, and ADM1294,
vout1 reports the voltage on the VAUX pin.
@@ -120,3 +145,4 @@ temp1_reset_history Write any value to reset history.
Temperature attributes are supported on ADM1272 and
ADM1278.
+======================= =======================================================
diff --git a/Documentation/hwmon/adm9240 b/Documentation/hwmon/adm9240.rst
index 9b174fc700cc..91063b0f4c6f 100644
--- a/Documentation/hwmon/adm9240
+++ b/Documentation/hwmon/adm9240.rst
@@ -2,30 +2,43 @@ Kernel driver adm9240
=====================
Supported chips:
+
* Analog Devices ADM9240
+
Prefix: 'adm9240'
+
Addresses scanned: I2C 0x2c - 0x2f
+
Datasheet: Publicly available at the Analog Devices website
- http://www.analog.com/UploadedFiles/Data_Sheets/79857778ADM9240_0.pdf
+
+ http://www.analog.com/UploadedFiles/Data_Sheets/79857778ADM9240_0.pdf
* Dallas Semiconductor DS1780
+
Prefix: 'ds1780'
+
Addresses scanned: I2C 0x2c - 0x2f
+
Datasheet: Publicly available at the Dallas Semiconductor (Maxim) website
- http://pdfserv.maxim-ic.com/en/ds/DS1780.pdf
+
+ http://pdfserv.maxim-ic.com/en/ds/DS1780.pdf
* National Semiconductor LM81
+
Prefix: 'lm81'
+
Addresses scanned: I2C 0x2c - 0x2f
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/ds.cgi/LM/LM81.pdf
+
+ http://www.national.com/ds.cgi/LM/LM81.pdf
Authors:
- Frodo Looijaard <frodol@dds.nl>,
- Philip Edelbrock <phil@netroedge.com>,
- Michiel Rook <michiel@grendelproject.nl>,
- Grant Coady <gcoady.lk@gmail.com> with guidance
- from Jean Delvare <jdelvare@suse.de>
+ - Frodo Looijaard <frodol@dds.nl>,
+ - Philip Edelbrock <phil@netroedge.com>,
+ - Michiel Rook <michiel@grendelproject.nl>,
+ - Grant Coady <gcoady.lk@gmail.com> with guidance
+ from Jean Delvare <jdelvare@suse.de>
Interface
---------
@@ -87,11 +100,13 @@ rpm = (22500 * 60) / (count * divider)
Automatic fan clock divider
* User sets 0 to fan_min limit
+
- low speed alarm is disabled
- fan clock divider not changed
- auto fan clock adjuster enabled for valid fan speed reading
* User sets fan_min limit too low
+
- low speed alarm is enabled
- fan clock divider set to max
- fan_min set to register value 254 which corresponds
@@ -101,18 +116,20 @@ Automatic fan clock divider
- auto fan clock adjuster disabled
* User sets reasonable fan speed
+
- low speed alarm is enabled
- fan clock divider set to suit fan_min
- auto fan clock adjuster enabled: adjusts fan_min
* User sets unreasonably high low fan speed limit
+
- resolution of the low speed limit may be reduced
- alarm will be asserted
- auto fan clock adjuster enabled: adjusts fan_min
- * fan speed may be displayed as zero until the auto fan clock divider
- adjuster brings fan speed clock divider back into chip measurement
- range, this will occur within a few measurement cycles.
+ * fan speed may be displayed as zero until the auto fan clock divider
+ adjuster brings fan speed clock divider back into chip measurement
+ range, this will occur within a few measurement cycles.
Analog Output
-------------
@@ -122,16 +139,21 @@ power up or reset. This doesn't do much on the test Intel SE440BX-2.
Voltage Monitor
+^^^^^^^^^^^^^^^
+
Voltage (IN) measurement is internally scaled:
+ === =========== =========== ========= ==========
nr label nominal maximum resolution
- mV mV mV
+ mV mV mV
+ === =========== =========== ========= ==========
0 +2.5V 2500 3320 13.0
1 Vccp1 2700 3600 14.1
2 +3.3V 3300 4380 17.2
3 +5V 5000 6640 26.0
4 +12V 12000 15940 62.5
5 Vccp2 2700 3600 14.1
+ === =========== =========== ========= ==========
The reading is an unsigned 8-bit value, nominal voltage measurement is
represented by a reading of 192, being 3/4 of the measurement range.
@@ -159,8 +181,9 @@ Clear the CI latch by writing value 0 to the sysfs intrusion0_alarm file.
Alarm flags reported as 16-bit word
+ === ============= ==========================
bit label comment
- --- ------------- --------------------------
+ === ============= ==========================
0 +2.5 V_Error high or low limit exceeded
1 VCCP_Error high or low limit exceeded
2 +3.3 V_Error high or low limit exceeded
@@ -171,6 +194,7 @@ Alarm flags reported as 16-bit word
8 +12 V_Error high or low limit exceeded
9 VCCP2_Error high or low limit exceeded
12 Chassis_Error CI pin went high
+ === ============= ==========================
Remaining bits are reserved and thus undefined. It is important to note
that alarm bits may be cleared on read, user-space may latch alarms and
diff --git a/Documentation/hwmon/ads1015 b/Documentation/hwmon/ads1015.rst
index 02d2a459385f..e0951c4e57bb 100644
--- a/Documentation/hwmon/ads1015
+++ b/Documentation/hwmon/ads1015.rst
@@ -2,17 +2,25 @@ Kernel driver ads1015
=====================
Supported chips:
+
* Texas Instruments ADS1015
+
Prefix: 'ads1015'
- Datasheet: Publicly available at the Texas Instruments website :
- http://focus.ti.com/lit/ds/symlink/ads1015.pdf
+
+ Datasheet: Publicly available at the Texas Instruments website:
+
+ http://focus.ti.com/lit/ds/symlink/ads1015.pdf
+
* Texas Instruments ADS1115
+
Prefix: 'ads1115'
- Datasheet: Publicly available at the Texas Instruments website :
- http://focus.ti.com/lit/ds/symlink/ads1115.pdf
+
+ Datasheet: Publicly available at the Texas Instruments website:
+
+ http://focus.ti.com/lit/ds/symlink/ads1115.pdf
Authors:
- Dirk Eibach, Guntermann & Drunck GmbH <eibach@gdsys.de>
+ Dirk Eibach, Guntermann & Drunck GmbH <eibach@gdsys.de>
Description
-----------
@@ -24,14 +32,15 @@ This device is a 12/16-bit A-D converter with 4 inputs.
The inputs can be used single ended or in certain differential combinations.
The inputs can be made available by 8 sysfs input files in0_input - in7_input:
-in0: Voltage over AIN0 and AIN1.
-in1: Voltage over AIN0 and AIN3.
-in2: Voltage over AIN1 and AIN3.
-in3: Voltage over AIN2 and AIN3.
-in4: Voltage over AIN0 and GND.
-in5: Voltage over AIN1 and GND.
-in6: Voltage over AIN2 and GND.
-in7: Voltage over AIN3 and GND.
+
+ - in0: Voltage over AIN0 and AIN1.
+ - in1: Voltage over AIN0 and AIN3.
+ - in2: Voltage over AIN1 and AIN3.
+ - in3: Voltage over AIN2 and AIN3.
+ - in4: Voltage over AIN0 and GND.
+ - in5: Voltage over AIN1 and GND.
+ - in6: Voltage over AIN2 and GND.
+ - in7: Voltage over AIN3 and GND.
Which inputs are available can be configured using platform data or devicetree.
@@ -42,29 +51,34 @@ Platform Data
In linux/platform_data/ads1015.h platform data is defined, channel_data contains
configuration data for the used input combinations:
+
- pga is the programmable gain amplifier (values are full scale)
- 0: +/- 6.144 V
- 1: +/- 4.096 V
- 2: +/- 2.048 V
- 3: +/- 1.024 V
- 4: +/- 0.512 V
- 5: +/- 0.256 V
+
+ - 0: +/- 6.144 V
+ - 1: +/- 4.096 V
+ - 2: +/- 2.048 V
+ - 3: +/- 1.024 V
+ - 4: +/- 0.512 V
+ - 5: +/- 0.256 V
+
- data_rate in samples per second
- 0: 128
- 1: 250
- 2: 490
- 3: 920
- 4: 1600
- 5: 2400
- 6: 3300
-
-Example:
-struct ads1015_platform_data data = {
+
+ - 0: 128
+ - 1: 250
+ - 2: 490
+ - 3: 920
+ - 4: 1600
+ - 5: 2400
+ - 6: 3300
+
+Example::
+
+ struct ads1015_platform_data data = {
.channel_data = {
[2] = { .enabled = true, .pga = 1, .data_rate = 0 },
[4] = { .enabled = true, .pga = 4, .data_rate = 5 },
}
-};
+ };
In this case only in2_input (FS +/- 4.096 V, 128 SPS) and in4_input
(FS +/- 0.512 V, 2400 SPS) would be created.
diff --git a/Documentation/hwmon/ads7828 b/Documentation/hwmon/ads7828.rst
index f6e263e0f607..b830b490cfe4 100644
--- a/Documentation/hwmon/ads7828
+++ b/Documentation/hwmon/ads7828.rst
@@ -2,20 +2,27 @@ Kernel driver ads7828
=====================
Supported chips:
+
* Texas Instruments/Burr-Brown ADS7828
+
Prefix: 'ads7828'
+
Datasheet: Publicly available at the Texas Instruments website:
- http://focus.ti.com/lit/ds/symlink/ads7828.pdf
+
+ http://focus.ti.com/lit/ds/symlink/ads7828.pdf
* Texas Instruments ADS7830
+
Prefix: 'ads7830'
+
Datasheet: Publicly available at the Texas Instruments website:
- http://focus.ti.com/lit/ds/symlink/ads7830.pdf
+
+ http://focus.ti.com/lit/ds/symlink/ads7830.pdf
Authors:
- Steve Hardy <shardy@redhat.com>
- Vivien Didelot <vivien.didelot@savoirfairelinux.com>
- Guillaume Roguez <guillaume.roguez@savoirfairelinux.com>
+ - Steve Hardy <shardy@redhat.com>
+ - Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ - Guillaume Roguez <guillaume.roguez@savoirfairelinux.com>
Platform data
-------------
@@ -24,16 +31,16 @@ The ads7828 driver accepts an optional ads7828_platform_data structure (defined
in include/linux/platform_data/ads7828.h). The structure fields are:
* diff_input: (bool) Differential operation
- set to true for differential mode, false for default single ended mode.
+ set to true for differential mode, false for default single ended mode.
* ext_vref: (bool) External reference
- set to true if it operates with an external reference, false for default
- internal reference.
+ set to true if it operates with an external reference, false for default
+ internal reference.
* vref_mv: (unsigned int) Voltage reference
- if using an external reference, set this to the reference voltage in mV,
- otherwise it will default to the internal value (2500mV). This value will be
- bounded with limits accepted by the chip, described in the datasheet.
+ if using an external reference, set this to the reference voltage in mV,
+ otherwise it will default to the internal value (2500mV). This value will be
+ bounded with limits accepted by the chip, described in the datasheet.
If no structure is provided, the configuration defaults to single ended
operation and internal voltage reference (2.5V).
diff --git a/Documentation/hwmon/adt7410 b/Documentation/hwmon/adt7410.rst
index 9817941e5f19..24caaa83c8ec 100644
--- a/Documentation/hwmon/adt7410
+++ b/Documentation/hwmon/adt7410.rst
@@ -2,26 +2,45 @@ Kernel driver adt7410
=====================
Supported chips:
+
* Analog Devices ADT7410
+
Prefix: 'adt7410'
+
Addresses scanned: None
+
Datasheet: Publicly available at the Analog Devices website
- http://www.analog.com/static/imported-files/data_sheets/ADT7410.pdf
+
+ http://www.analog.com/static/imported-files/data_sheets/ADT7410.pdf
* Analog Devices ADT7420
+
Prefix: 'adt7420'
+
Addresses scanned: None
+
Datasheet: Publicly available at the Analog Devices website
- http://www.analog.com/static/imported-files/data_sheets/ADT7420.pdf
+
+ http://www.analog.com/static/imported-files/data_sheets/ADT7420.pdf
+
* Analog Devices ADT7310
+
Prefix: 'adt7310'
+
Addresses scanned: None
+
Datasheet: Publicly available at the Analog Devices website
- http://www.analog.com/static/imported-files/data_sheets/ADT7310.pdf
+
+ http://www.analog.com/static/imported-files/data_sheets/ADT7310.pdf
+
* Analog Devices ADT7320
+
Prefix: 'adt7320'
+
Addresses scanned: None
+
Datasheet: Publicly available at the Analog Devices website
- http://www.analog.com/static/imported-files/data_sheets/ADT7320.pdf
+
+ http://www.analog.com/static/imported-files/data_sheets/ADT7320.pdf
Author: Hartmut Knaack <knaack.h@gmx.de>
@@ -61,13 +80,15 @@ The device is set to 16 bit resolution and comparator mode.
sysfs-Interface
---------------
-temp#_input - temperature input
-temp#_min - temperature minimum setpoint
-temp#_max - temperature maximum setpoint
-temp#_crit - critical temperature setpoint
-temp#_min_hyst - hysteresis for temperature minimum (read-only)
-temp#_max_hyst - hysteresis for temperature maximum (read/write)
-temp#_crit_hyst - hysteresis for critical temperature (read-only)
-temp#_min_alarm - temperature minimum alarm flag
-temp#_max_alarm - temperature maximum alarm flag
-temp#_crit_alarm - critical temperature alarm flag
+======================== ====================================================
+temp#_input temperature input
+temp#_min temperature minimum setpoint
+temp#_max temperature maximum setpoint
+temp#_crit critical temperature setpoint
+temp#_min_hyst hysteresis for temperature minimum (read-only)
+temp#_max_hyst hysteresis for temperature maximum (read/write)
+temp#_crit_hyst hysteresis for critical temperature (read-only)
+temp#_min_alarm temperature minimum alarm flag
+temp#_max_alarm temperature maximum alarm flag
+temp#_crit_alarm critical temperature alarm flag
+======================== ====================================================
diff --git a/Documentation/hwmon/adt7411 b/Documentation/hwmon/adt7411.rst
index 1632960f9745..57ad16fb216a 100644
--- a/Documentation/hwmon/adt7411
+++ b/Documentation/hwmon/adt7411.rst
@@ -2,9 +2,13 @@ Kernel driver adt7411
=====================
Supported chips:
+
* Analog Devices ADT7411
+
Prefix: 'adt7411'
+
Addresses scanned: 0x48, 0x4a, 0x4b
+
Datasheet: Publicly available at the Analog Devices website
Author: Wolfram Sang (based on adt7470 by Darrick J. Wong)
@@ -26,15 +30,19 @@ Check the datasheet for details.
sysfs-Interface
---------------
-in0_input - vdd voltage input
-in[1-8]_input - analog 1-8 input
-temp1_input - temperature input
+================ =================
+in0_input vdd voltage input
+in[1-8]_input analog 1-8 input
+temp1_input temperature input
+================ =================
Besides standard interfaces, this driver adds (0 = off, 1 = on):
- adc_ref_vdd - Use vdd as reference instead of 2.25 V
- fast_sampling - Sample at 22.5 kHz instead of 1.4 kHz, but drop filters
- no_average - Turn off averaging over 16 samples
+ ============== =======================================================
+ adc_ref_vdd Use vdd as reference instead of 2.25 V
+ fast_sampling Sample at 22.5 kHz instead of 1.4 kHz, but drop filters
+ no_average Turn off averaging over 16 samples
+ ============== =======================================================
Notes
-----
diff --git a/Documentation/hwmon/adt7462 b/Documentation/hwmon/adt7462.rst
index ec660b328275..139e19696188 100644
--- a/Documentation/hwmon/adt7462
+++ b/Documentation/hwmon/adt7462.rst
@@ -1,10 +1,14 @@
Kernel driver adt7462
-======================
+=====================
Supported chips:
+
* Analog Devices ADT7462
+
Prefix: 'adt7462'
+
Addresses scanned: I2C 0x58, 0x5C
+
Datasheet: Publicly available at the Analog Devices website
Author: Darrick J. Wong
@@ -57,11 +61,10 @@ Besides standard interfaces driver adds the following:
* pwm#_auto_point1_pwm and temp#_auto_point1_temp and
* pwm#_auto_point2_pwm and temp#_auto_point2_temp -
-point1: Set the pwm speed at a lower temperature bound.
-point2: Set the pwm speed at a higher temperature bound.
+ - point1: Set the pwm speed at a lower temperature bound.
+ - point2: Set the pwm speed at a higher temperature bound.
The ADT7462 will scale the pwm between the lower and higher pwm speed when
the temperature is between the two temperature boundaries. PWM values range
from 0 (off) to 255 (full speed). Fan speed will be set to maximum when the
temperature sensor associated with the PWM control exceeds temp#_max.
-
diff --git a/Documentation/hwmon/adt7470 b/Documentation/hwmon/adt7470.rst
index fe68e18a0c8d..d225f816e992 100644
--- a/Documentation/hwmon/adt7470
+++ b/Documentation/hwmon/adt7470.rst
@@ -2,9 +2,13 @@ Kernel driver adt7470
=====================
Supported chips:
+
* Analog Devices ADT7470
+
Prefix: 'adt7470'
+
Addresses scanned: I2C 0x2C, 0x2E, 0x2F
+
Datasheet: Publicly available at the Analog Devices website
Author: Darrick J. Wong
@@ -56,8 +60,8 @@ Besides standard interfaces driver adds the following:
* pwm#_auto_point1_pwm and pwm#_auto_point1_temp and
* pwm#_auto_point2_pwm and pwm#_auto_point2_temp -
-point1: Set the pwm speed at a lower temperature bound.
-point2: Set the pwm speed at a higher temperature bound.
+ - point1: Set the pwm speed at a lower temperature bound.
+ - point2: Set the pwm speed at a higher temperature bound.
The ADT7470 will scale the pwm between the lower and higher pwm speed when
the temperature is between the two temperature boundaries. PWM values range
diff --git a/Documentation/hwmon/adt7475 b/Documentation/hwmon/adt7475.rst
index 01b46b290532..ef3ea1ea9bc1 100644
--- a/Documentation/hwmon/adt7475
+++ b/Documentation/hwmon/adt7475.rst
@@ -2,28 +2,44 @@ Kernel driver adt7475
=====================
Supported chips:
+
* Analog Devices ADT7473
+
Prefix: 'adt7473'
+
Addresses scanned: I2C 0x2C, 0x2D, 0x2E
+
Datasheet: Publicly available at the On Semiconductors website
+
* Analog Devices ADT7475
+
Prefix: 'adt7475'
+
Addresses scanned: I2C 0x2E
+
Datasheet: Publicly available at the On Semiconductors website
+
* Analog Devices ADT7476
+
Prefix: 'adt7476'
+
Addresses scanned: I2C 0x2C, 0x2D, 0x2E
+
Datasheet: Publicly available at the On Semiconductors website
+
* Analog Devices ADT7490
+
Prefix: 'adt7490'
+
Addresses scanned: I2C 0x2C, 0x2D, 0x2E
+
Datasheet: Publicly available at the On Semiconductors website
Authors:
- Jordan Crouse
- Hans de Goede
- Darrick J. Wong (documentation)
- Jean Delvare
+ - Jordan Crouse
+ - Hans de Goede
+ - Darrick J. Wong (documentation)
+ - Jean Delvare
Description
@@ -82,14 +98,16 @@ ADT7490:
Sysfs Mapping
-------------
- ADT7490 ADT7476 ADT7475 ADT7473
- ------- ------- ------- -------
+==== =========== =========== ========= ==========
+in ADT7490 ADT7476 ADT7475 ADT7473
+==== =========== =========== ========= ==========
in0 2.5VIN (22) 2.5VIN (22) - -
in1 VCCP (23) VCCP (23) VCCP (14) VCCP (14)
in2 VCC (4) VCC (4) VCC (4) VCC (3)
in3 5VIN (20) 5VIN (20)
in4 12VIN (21) 12VIN (21)
in5 VTT (8)
+==== =========== =========== ========= ==========
Special Features
----------------
@@ -107,8 +125,8 @@ Fan Speed Control
The driver exposes two trip points per PWM channel.
-point1: Set the PWM speed at the lower temperature bound
-point2: Set the PWM speed at the higher temperature bound
+- point1: Set the PWM speed at the lower temperature bound
+- point2: Set the PWM speed at the higher temperature bound
The ADT747x will scale the PWM linearly between the lower and higher PWM
speed when the temperature is between the two temperature boundaries.
@@ -123,12 +141,12 @@ the PWM control exceeds temp#_max.
At Tmin - hysteresis the PWM output can either be off (0% duty cycle) or at the
minimum (i.e. auto_point1_pwm). This behaviour can be configured using the
-pwm[1-*]_stall_disable sysfs attribute. A value of 0 means the fans will shut
+`pwm[1-*]_stall_disable sysfs attribute`. A value of 0 means the fans will shut
off. A value of 1 means the fans will run at auto_point1_pwm.
The responsiveness of the ADT747x to temperature changes can be configured.
This allows smoothing of the fan speed transition. To set the transition time
-set the value in ms in the temp[1-*]_smoothing sysfs attribute.
+set the value in ms in the `temp[1-*]_smoothing` sysfs attribute.
Notes
-----
diff --git a/Documentation/hwmon/amc6821 b/Documentation/hwmon/amc6821.rst
index ced8359c50f8..5ddb2849da90 100644
--- a/Documentation/hwmon/amc6821
+++ b/Documentation/hwmon/amc6821.rst
@@ -2,9 +2,13 @@ Kernel driver amc6821
=====================
Supported chips:
+
Texas Instruments AMC6821
+
Prefix: 'amc6821'
+
Addresses scanned: 0x18, 0x19, 0x1a, 0x2c, 0x2d, 0x2e, 0x4c, 0x4d, 0x4e
+
Datasheet: http://focus.ti.com/docs/prod/folders/print/amc6821.html
Authors:
@@ -21,10 +25,11 @@ The pwm can be controlled either from software or automatically.
The driver provides the following sensor accesses in sysfs:
+======================= == ===============================================
temp1_input ro on-chip temperature
temp1_min rw "
temp1_max rw "
-temp1_crit rw "
+temp1_crit rw "
temp1_min_alarm ro "
temp1_max_alarm ro "
temp1_crit_alarm ro "
@@ -32,16 +37,16 @@ temp1_crit_alarm ro "
temp2_input ro remote temperature
temp2_min rw "
temp2_max rw "
-temp2_crit rw "
+temp2_crit rw "
temp2_min_alarm ro "
temp2_max_alarm ro "
temp2_crit_alarm ro "
temp2_fault ro "
-fan1_input ro tachometer speed
+fan1_input ro tachometer speed
fan1_min rw "
fan1_max rw "
-fan1_fault ro "
+fan1_fault ro "
fan1_div rw Fan divisor can be either 2 or 4.
pwm1 rw pwm1
@@ -87,6 +92,7 @@ temp2_auto_point3_temp rw Above this temperature fan runs at maximum
values which depend on temp2_auto_point2_temp
and pwm1_auto_point2_pwm. Read it out after
writing to get actual value.
+======================= == ===============================================
Module parameters
@@ -97,6 +103,6 @@ load the module with: init=0.
If your board BIOS doesn't initialize the chip, or you want
different settings, you can set the following parameters:
-init=1,
-pwminv: 0 default pwm output, 1 inverts pwm output.
+- init=1,
+- pwminv: 0 default pwm output, 1 inverts pwm output.
diff --git a/Documentation/hwmon/asb100 b/Documentation/hwmon/asb100.rst
index ab7365e139be..c2d5f97085fe 100644
--- a/Documentation/hwmon/asb100
+++ b/Documentation/hwmon/asb100.rst
@@ -2,9 +2,13 @@ Kernel driver asb100
====================
Supported Chips:
+
* Asus ASB100 and ASB100-A "Bach"
+
Prefix: 'asb100'
+
Addresses scanned: I2C 0x2d
+
Datasheet: none released
Author: Mark M. Hoffman <mhoffman@lightlink.com>
@@ -41,32 +45,29 @@ processor itself. It is a value in volts.
Alarms: (TODO question marks indicate may or may not work)
-0x0001 => in0 (?)
-0x0002 => in1 (?)
-0x0004 => in2
-0x0008 => in3
-0x0010 => temp1 (1)
-0x0020 => temp2
-0x0040 => fan1
-0x0080 => fan2
-0x0100 => in4
-0x0200 => in5 (?) (2)
-0x0400 => in6 (?) (2)
-0x0800 => fan3
-0x1000 => chassis switch
-0x2000 => temp3
-
-Alarm Notes:
-
-(1) This alarm will only trigger if the hysteresis value is 127C.
-I.e. it behaves the same as w83781d.
-
-(2) The min and max registers for these values appear to
-be read-only or otherwise stuck at 0x00.
+- 0x0001 => in0 (?)
+- 0x0002 => in1 (?)
+- 0x0004 => in2
+- 0x0008 => in3
+- 0x0010 => temp1 [1]_
+- 0x0020 => temp2
+- 0x0040 => fan1
+- 0x0080 => fan2
+- 0x0100 => in4
+- 0x0200 => in5 (?) [2]_
+- 0x0400 => in6 (?) [2]_
+- 0x0800 => fan3
+- 0x1000 => chassis switch
+- 0x2000 => temp3
+
+.. [1] This alarm will only trigger if the hysteresis value is 127C.
+ I.e. it behaves the same as w83781d.
+
+.. [2] The min and max registers for these values appear to
+ be read-only or otherwise stuck at 0x00.
TODO:
-* Experiment with fan divisors > 8.
-* Experiment with temp. sensor types.
-* Are there really 13 voltage inputs? Probably not...
-* Cleanups, no doubt...
-
+ * Experiment with fan divisors > 8.
+ * Experiment with temp. sensor types.
+ * Are there really 13 voltage inputs? Probably not...
+ * Cleanups, no doubt...
diff --git a/Documentation/hwmon/asc7621 b/Documentation/hwmon/asc7621.rst
index 7287be7e1f21..b5a9fad0f172 100644
--- a/Documentation/hwmon/asc7621
+++ b/Documentation/hwmon/asc7621.rst
@@ -1,10 +1,15 @@
+=====================
Kernel driver asc7621
-==================
+=====================
Supported chips:
+
Andigilog aSC7621 and aSC7621a
+
Prefix: 'asc7621'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: http://www.fairview5.com/linux/asc7621/asc7621.pdf
Author:
@@ -73,8 +78,10 @@ Finally, we have added a tach disable function that turns off the tach
measurement system for individual tachs in order to save power. That is
in register 75h.
---
+--------------------------------------------------------------------------
+
aSC7621 Product Description
+===========================
The aSC7621 has a two wire digital interface compatible with SMBus 2.0.
Using a 10-bit ADC, the aSC7621 measures the temperature of two remote diode
@@ -102,6 +109,8 @@ System voltages of VCCP, 2.5V, 3.3V, 5.0V, and 12V motherboard power are
monitored efficiently with internal scaling resistors.
Features
+--------
+
- Supports PECI interface and monitors internal and remote thermal diodes
- 2-wire, SMBus 2.0 compliant, serial interface
- 10-bit ADC
@@ -110,7 +119,7 @@ Features
- Noise filtering of temperature reading for fan speed control
- 0.25C digital temperature sensor resolution
- 3 PWM fan speed control outputs for 2-, 3- or 4-wire fans and up to 4 fan
- tachometer inputs
+ tachometer inputs
- Enhanced measured temperature to Temperature Zone assignment.
- Provides high and low PWM frequency ranges
- 3 GPIO pins for custom use
@@ -123,17 +132,20 @@ Except where noted below, the sysfs entries created by this driver follow
the standards defined in "sysfs-interface".
temp1_source
+ = ===============================================
0 (default) peci_legacy = 0, Remote 1 Temperature
- peci_legacy = 1, PECI Processor Temperature 0
+ peci_legacy = 1, PECI Processor Temperature 0
1 Remote 1 Temperature
2 Remote 2 Temperature
3 Internal Temperature
4 PECI Processor Temperature 0
5 PECI Processor Temperature 1
6 PECI Processor Temperature 2
- 7 PECI Processor Temperature 3
+ 7 PECI Processor Temperature 3
+ = ===============================================
temp2_source
+ = ===============================================
0 (default) Internal Temperature
1 Remote 1 Temperature
2 Remote 2 Temperature
@@ -142,8 +154,10 @@ temp2_source
5 PECI Processor Temperature 1
6 PECI Processor Temperature 2
7 PECI Processor Temperature 3
+ = ===============================================
temp3_source
+ = ===============================================
0 (default) Remote 2 Temperature
1 Remote 1 Temperature
2 Remote 2 Temperature
@@ -152,10 +166,12 @@ temp3_source
5 PECI Processor Temperature 1
6 PECI Processor Temperature 2
7 PECI Processor Temperature 3
+ = ===============================================
temp4_source
+ = ===============================================
0 (default) peci_legacy = 0, PECI Processor Temperature 0
- peci_legacy = 1, Remote 1 Temperature
+ peci_legacy = 1, Remote 1 Temperature
1 Remote 1 Temperature
2 Remote 2 Temperature
3 Internal Temperature
@@ -163,58 +179,65 @@ temp4_source
5 PECI Processor Temperature 1
6 PECI Processor Temperature 2
7 PECI Processor Temperature 3
+ = ===============================================
-temp[1-4]_smoothing_enable
-temp[1-4]_smoothing_time
+temp[1-4]_smoothing_enable / temp[1-4]_smoothing_time
Smooths spikes in temp readings caused by noise.
Valid values in milliseconds are:
- 35000
- 17600
- 11800
- 7000
- 4400
- 3000
- 1600
- 800
+
+ * 35000
+ * 17600
+ * 11800
+ * 7000
+ * 4400
+ * 3000
+ * 1600
+ * 800
temp[1-4]_crit
When the corresponding zone temperature reaches this value,
ALL pwm outputs will got to 100%.
-temp[5-8]_input
-temp[5-8]_enable
+temp[5-8]_input / temp[5-8]_enable
The aSC7621 can also read temperatures provided by the processor
via the PECI bus. Usually these are "core" temps and are relative
to the point where the automatic thermal control circuit starts
throttling. This means that these are usually negative numbers.
pwm[1-3]_enable
+ =============== ========================================================
0 Fan off.
1 Fan on manual control.
2 Fan on automatic control and will run at the minimum pwm
- if the temperature for the zone is below the minimum.
- 3 Fan on automatic control but will be off if the temperature
- for the zone is below the minimum.
- 4-254 Ignored.
+ if the temperature for the zone is below the minimum.
+ 3 Fan on automatic control but will be off if the
+ temperature for the zone is below the minimum.
+ 4-254 Ignored.
255 Fan on full.
+ =============== ========================================================
pwm[1-3]_auto_channels
Bitmap as described in sysctl-interface with the following
exceptions...
+
Only the following combination of zones (and their corresponding masks)
are valid:
- 1
- 2
- 3
- 2,3
- 1,2,3
- 4
- 1,2,3,4
- Special values:
- 0 Disabled.
- 16 Fan on manual control.
- 31 Fan on full.
+ * 1
+ * 2
+ * 3
+ * 2,3
+ * 1,2,3
+ * 4
+ * 1,2,3,4
+
+ * Special values:
+
+ == ======================
+ 0 Disabled.
+ 16 Fan on manual control.
+ 31 Fan on full.
+ == ======================
pwm[1-3]_invert
@@ -226,22 +249,22 @@ pwm[1-3]_freq
PWM frequency in Hz
Valid values in Hz are:
- 10
- 15
- 23
- 30 (default)
- 38
- 47
- 62
- 94
- 23000
- 24000
- 25000
- 26000
- 27000
- 28000
- 29000
- 30000
+ * 10
+ * 15
+ * 23
+ * 30 (default)
+ * 38
+ * 47
+ * 62
+ * 94
+ * 23000
+ * 24000
+ * 25000
+ * 26000
+ * 27000
+ * 28000
+ * 29000
+ * 30000
Setting any other value will be ignored.
@@ -251,17 +274,17 @@ peci_enable
peci_avg
Input filter average time.
- 0 0 Sec. (no Smoothing) (default)
- 1 0.25 Sec.
- 2 0.5 Sec.
- 3 1.0 Sec.
- 4 2.0 Sec.
- 5 4.0 Sec.
- 6 8.0 Sec.
- 7 0.0 Sec.
+ * 0 0 Sec. (no Smoothing) (default)
+ * 1 0.25 Sec.
+ * 2 0.5 Sec.
+ * 3 1.0 Sec.
+ * 4 2.0 Sec.
+ * 5 4.0 Sec.
+ * 6 8.0 Sec.
+ * 7 0.0 Sec.
peci_legacy
-
+ = ============================================
0 Standard Mode (default)
Remote Diode 1 reading is associated with
Temperature Zone 1, PECI is associated with
@@ -270,10 +293,12 @@ peci_legacy
1 Legacy Mode
PECI is associated with Temperature Zone 1,
Remote Diode 1 is associated with Zone 4
+ = ============================================
peci_diode
Diode filter
+ = ====================
0 0.25 Sec.
1 1.1 Sec.
2 2.4 Sec. (default)
@@ -282,15 +307,20 @@ peci_diode
5 6.8 Sec.
6 10.2 Sec.
7 16.4 Sec.
+ = ====================
peci_4domain
Four domain enable
+ = ===============================================
0 1 or 2 Domains for enabled processors (default)
1 3 or 4 Domains for enabled processors
+ = ===============================================
peci_domain
Domain
+ = ==================================================
0 Processor contains a single domain (0) (default)
1 Processor contains two domains (0,1)
+ = ==================================================
diff --git a/Documentation/hwmon/aspeed-pwm-tacho b/Documentation/hwmon/aspeed-pwm-tacho.rst
index 7cfb34977460..6dcec845fbc7 100644
--- a/Documentation/hwmon/aspeed-pwm-tacho
+++ b/Documentation/hwmon/aspeed-pwm-tacho.rst
@@ -15,8 +15,10 @@ controller supports up to 16 tachometer inputs.
The driver provides the following sensor accesses in sysfs:
+=============== ======= =====================================================
fanX_input ro provide current fan rotation value in RPM as reported
by the fan to the device.
pwmX rw get or set PWM fan control value. This is an integer
value between 0(off) and 255(full speed).
+=============== ======= =====================================================
diff --git a/Documentation/hwmon/coretemp b/Documentation/hwmon/coretemp.rst
index fec5a9bf755f..c609329e3bc4 100644
--- a/Documentation/hwmon/coretemp
+++ b/Documentation/hwmon/coretemp.rst
@@ -3,20 +3,29 @@ Kernel driver coretemp
Supported chips:
* All Intel Core family
+
Prefix: 'coretemp'
- CPUID: family 0x6, models 0xe (Pentium M DC), 0xf (Core 2 DC 65nm),
- 0x16 (Core 2 SC 65nm), 0x17 (Penryn 45nm),
- 0x1a (Nehalem), 0x1c (Atom), 0x1e (Lynnfield),
- 0x26 (Tunnel Creek Atom), 0x27 (Medfield Atom),
- 0x36 (Cedar Trail Atom)
- Datasheet: Intel 64 and IA-32 Architectures Software Developer's Manual
- Volume 3A: System Programming Guide
- http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
+
+ CPUID: family 0x6, models
+
+ - 0xe (Pentium M DC), 0xf (Core 2 DC 65nm),
+ - 0x16 (Core 2 SC 65nm), 0x17 (Penryn 45nm),
+ - 0x1a (Nehalem), 0x1c (Atom), 0x1e (Lynnfield),
+ - 0x26 (Tunnel Creek Atom), 0x27 (Medfield Atom),
+ - 0x36 (Cedar Trail Atom)
+
+ Datasheet:
+
+ Intel 64 and IA-32 Architectures Software Developer's Manual
+ Volume 3A: System Programming Guide
+
+ http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
Author: Rudolf Marek
Description
-----------
+
This driver permits reading the DTS (Digital Temperature Sensor) embedded
inside Intel CPUs. This driver can read both the per-core and per-package
temperature using the appropriate sensors. The per-package sensor is new;
@@ -35,14 +44,17 @@ may be raised, if the temperature grows enough (more than TjMax) to trigger
the Out-Of-Spec bit. Following table summarizes the exported sysfs files:
All Sysfs entries are named with their core_id (represented here by 'X').
-tempX_input - Core temperature (in millidegrees Celsius).
-tempX_max - All cooling devices should be turned on (on Core2).
-tempX_crit - Maximum junction temperature (in millidegrees Celsius).
-tempX_crit_alarm - Set when Out-of-spec bit is set, never clears.
- Correct CPU operation is no longer guaranteed.
-tempX_label - Contains string "Core X", where X is processor
- number. For Package temp, this will be "Physical id Y",
- where Y is the package number.
+
+================= ========================================================
+tempX_input Core temperature (in millidegrees Celsius).
+tempX_max All cooling devices should be turned on (on Core2).
+tempX_crit Maximum junction temperature (in millidegrees Celsius).
+tempX_crit_alarm Set when Out-of-spec bit is set, never clears.
+ Correct CPU operation is no longer guaranteed.
+tempX_label Contains string "Core X", where X is processor
+ number. For Package temp, this will be "Physical id Y",
+ where Y is the package number.
+================= ========================================================
On CPU models which support it, TjMax is read from a model-specific register.
On other models, it is set to an arbitrary value based on weak heuristics.
@@ -52,6 +64,7 @@ as a module parameter (tjmax).
Appendix A. Known TjMax lists (TBD):
Some information comes from ark.intel.com
+=============== =============================================== ================
Process Processor TjMax(C)
22nm Core i5/i7 Processors
@@ -179,3 +192,4 @@ Process Processor TjMax(C)
65nm Celeron Processors
T1700/1600 100
560/550/540/530 100
+=============== =============================================== ================
diff --git a/Documentation/hwmon/da9052 b/Documentation/hwmon/da9052.rst
index 5bc51346b689..c1c0f1f08904 100644
--- a/Documentation/hwmon/da9052
+++ b/Documentation/hwmon/da9052.rst
@@ -1,6 +1,12 @@
+Kernel driver da9052
+====================
+
Supported chips:
+
* Dialog Semiconductors DA9052-BC and DA9053-AA/Bx PMICs
+
Prefix: 'da9052'
+
Datasheet: Datasheet is not publicly available.
Authors: David Dajun Chen <dchen@diasemi.com>
@@ -15,17 +21,20 @@ different inputs. The track and hold circuit ensures stable input voltages at
the input of the ADC during the conversion.
The ADC is used to measure the following inputs:
-Channel 0: VDDOUT - measurement of the system voltage
-Channel 1: ICH - internal battery charger current measurement
-Channel 2: TBAT - output from the battery NTC
-Channel 3: VBAT - measurement of the battery voltage
-Channel 4: ADC_IN4 - high impedance input (0 - 2.5V)
-Channel 5: ADC_IN5 - high impedance input (0 - 2.5V)
-Channel 6: ADC_IN6 - high impedance input (0 - 2.5V)
-Channel 7: XY - TSI interface to measure the X and Y voltage of the touch
- screen resistive potentiometers
-Channel 8: Internal Tjunc. - sense (internal temp. sensor)
-Channel 9: VBBAT - measurement of the backup battery voltage
+
+========= ===================================================================
+Channel 0 VDDOUT - measurement of the system voltage
+Channel 1 ICH - internal battery charger current measurement
+Channel 2 TBAT - output from the battery NTC
+Channel 3 VBAT - measurement of the battery voltage
+Channel 4 ADC_IN4 - high impedance input (0 - 2.5V)
+Channel 5 ADC_IN5 - high impedance input (0 - 2.5V)
+Channel 6 ADC_IN6 - high impedance input (0 - 2.5V)
+Channel 7 XY - TSI interface to measure the X and Y voltage of the touch
+ screen resistive potentiometers
+Channel 8 Internal Tjunc. - sense (internal temp. sensor)
+Channel 9 VBBAT - measurement of the backup battery voltage
+========= ===================================================================
By using sysfs attributes we can measure the system voltage VDDOUT, the battery
charging current ICH, battery temperature TBAT, battery junction temperature
@@ -37,12 +46,15 @@ Voltage Monitoring
Voltages are sampled by a 10 bit ADC.
The battery voltage is calculated as:
+
Milli volt = ((ADC value * 1000) / 512) + 2500
The backup battery voltage is calculated as:
+
Milli volt = (ADC value * 2500) / 512;
The voltages on ADC channels 4, 5 and 6 are calculated as:
+
Milli volt = (ADC value * 2500) / 1023
Temperature Monitoring
@@ -52,10 +64,15 @@ Temperatures are sampled by a 10 bit ADC. Junction and battery temperatures
are monitored by the ADC channels.
The junction temperature is calculated:
+
Degrees celsius = 1.708 * (TJUNC_RES - T_OFFSET) - 108.8
+
The junction temperature attribute is supported by the driver.
The battery temperature is calculated:
- Degree Celsius = 1 / (t1 + 1/298)- 273
+
+ Degree Celsius = 1 / (t1 + 1/298) - 273
+
where t1 = (1/B)* ln(( ADCval * 2.5)/(R25*ITBAT*255))
+
Default values of R25, B, ITBAT are 10e3, 3380 and 50e-6 respectively.
diff --git a/Documentation/hwmon/da9055 b/Documentation/hwmon/da9055.rst
index 855c3f536e00..beae271a3312 100644
--- a/Documentation/hwmon/da9055
+++ b/Documentation/hwmon/da9055.rst
@@ -1,6 +1,11 @@
+Kernel driver da9055
+====================
+
Supported chips:
* Dialog Semiconductors DA9055 PMIC
+
Prefix: 'da9055'
+
Datasheet: Datasheet is not publicly available.
Authors: David Dajun Chen <dchen@diasemi.com>
@@ -15,11 +20,12 @@ different inputs. The track and hold circuit ensures stable input voltages at
the input of the ADC during the conversion.
The ADC is used to measure the following inputs:
-Channel 0: VDDOUT - measurement of the system voltage
-Channel 1: ADC_IN1 - high impedance input (0 - 2.5V)
-Channel 2: ADC_IN2 - high impedance input (0 - 2.5V)
-Channel 3: ADC_IN3 - high impedance input (0 - 2.5V)
-Channel 4: Internal Tjunc. - sense (internal temp. sensor)
+
+- Channel 0: VDDOUT - measurement of the system voltage
+- Channel 1: ADC_IN1 - high impedance input (0 - 2.5V)
+- Channel 2: ADC_IN2 - high impedance input (0 - 2.5V)
+- Channel 3: ADC_IN3 - high impedance input (0 - 2.5V)
+- Channel 4: Internal Tjunc. - sense (internal temp. sensor)
By using sysfs attributes we can measure the system voltage VDDOUT,
chip junction temperature and auxiliary channels voltages.
@@ -31,9 +37,11 @@ Voltages are sampled in a AUTO mode it can be manually sampled too and results
are stored in a 10 bit ADC.
The system voltage is calculated as:
+
Milli volt = ((ADC value * 1000) / 85) + 2500
The voltages on ADC channels 1, 2 and 3 are calculated as:
+
Milli volt = (ADC value * 1000) / 102
Temperature Monitoring
@@ -43,5 +51,7 @@ Temperatures are sampled by a 10 bit ADC. Junction temperatures
are monitored by the ADC channels.
The junction temperature is calculated:
+
Degrees celsius = -0.4084 * (ADC_RES - T_OFFSET) + 307.6332
+
The junction temperature attribute is supported by the driver.
diff --git a/Documentation/hwmon/dme1737 b/Documentation/hwmon/dme1737.rst
index 4d2935145a1c..82fcbc6b2b43 100644
--- a/Documentation/hwmon/dme1737
+++ b/Documentation/hwmon/dme1737.rst
@@ -2,21 +2,37 @@ Kernel driver dme1737
=====================
Supported chips:
+
* SMSC DME1737 and compatibles (like Asus A8000)
+
Prefix: 'dme1737'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: Provided by SMSC upon request and under NDA
+
* SMSC SCH3112, SCH3114, SCH3116
+
Prefix: 'sch311x'
+
Addresses scanned: none, address read from Super-I/O config space
+
Datasheet: Available on the Internet
+
* SMSC SCH5027
+
Prefix: 'sch5027'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: Provided by SMSC upon request and under NDA
+
* SMSC SCH5127
+
Prefix: 'sch5127'
+
Addresses scanned: none, address read from Super-I/O config space
+
Datasheet: Provided by SMSC upon request and under NDA
Authors:
@@ -26,11 +42,14 @@ Authors:
Module Parameters
-----------------
-* force_start: bool Enables the monitoring of voltage, fan and temp inputs
+* force_start: bool
+ Enables the monitoring of voltage, fan and temp inputs
and PWM output control functions. Using this parameter
shouldn't be required since the BIOS usually takes care
of this.
-* probe_all_addr: bool Include non-standard LPC addresses 0x162e and 0x164e
+
+* probe_all_addr: bool
+ Include non-standard LPC addresses 0x162e and 0x164e
when probing for ISA devices. This is required for the
following boards:
- VIA EPIA SN18000
@@ -70,7 +89,8 @@ scaling resistors. The values returned by the driver therefore reflect true
millivolts and don't need scaling. The voltage inputs are mapped as follows
(the last column indicates the input ranges):
-DME1737, A8000:
+DME1737, A8000::
+
in0: +5VTR (+5V standby) 0V - 6.64V
in1: Vccp (processor core) 0V - 3V
in2: VCC (internal +3.3V) 0V - 4.38V
@@ -79,7 +99,8 @@ DME1737, A8000:
in5: VTR (+3.3V standby) 0V - 4.38V
in6: Vbat (+3.0V) 0V - 4.38V
-SCH311x:
+SCH311x::
+
in0: +2.5V 0V - 3.32V
in1: Vccp (processor core) 0V - 2V
in2: VCC (internal +3.3V) 0V - 4.38V
@@ -88,7 +109,8 @@ SCH311x:
in5: VTR (+3.3V standby) 0V - 4.38V
in6: Vbat (+3.0V) 0V - 4.38V
-SCH5027:
+SCH5027::
+
in0: +5VTR (+5V standby) 0V - 6.64V
in1: Vccp (processor core) 0V - 3V
in2: VCC (internal +3.3V) 0V - 4.38V
@@ -97,7 +119,8 @@ SCH5027:
in5: VTR (+3.3V standby) 0V - 4.38V
in6: Vbat (+3.0V) 0V - 4.38V
-SCH5127:
+SCH5127::
+
in0: +2.5 0V - 3.32V
in1: Vccp (processor core) 0V - 3V
in2: VCC (internal +3.3V) 0V - 4.38V
@@ -119,7 +142,7 @@ Celsius. The chip also features offsets for all 3 temperature inputs which -
when programmed - get added to the input readings. The chip does all the
scaling by itself and the driver therefore reports true temperatures that don't
need any user-space adjustments. The temperature inputs are mapped as follows
-(the last column indicates the input ranges):
+(the last column indicates the input ranges)::
temp1: Remote diode 1 (3904 type) temperature -127C - +127C
temp2: DME1737 internal temperature -127C - +127C
@@ -171,6 +194,7 @@ pwm[1-3]_auto_pwm_min, respectively. The thermal thresholds of the zones are
programmed via zone[1-3]_auto_point[1-3]_temp and
zone[1-3]_auto_point1_temp_hyst:
+ =============================== =======================================
pwm[1-3]_auto_point2_pwm full-speed duty-cycle (255, i.e., 100%)
pwm[1-3]_auto_point1_pwm low-speed duty-cycle
pwm[1-3]_auto_pwm_min min-speed duty-cycle
@@ -179,6 +203,7 @@ zone[1-3]_auto_point1_temp_hyst:
zone[1-3]_auto_point2_temp full-speed temp
zone[1-3]_auto_point1_temp low-speed temp
zone[1-3]_auto_point1_temp_hyst min-speed temp
+ =============================== =======================================
The chip adjusts the output duty-cycle linearly in the range of auto_point1_pwm
to auto_point2_pwm if the temperature of the associated zone is between
@@ -192,17 +217,21 @@ all PWM outputs are set to 100% duty-cycle.
Following is another representation of how the chip sets the output duty-cycle
based on the temperature of the associated thermal zone:
- Duty-Cycle Duty-Cycle
- Temperature Rising Temp Falling Temp
- ----------- ----------- ------------
+ =============== =============== =================
+ Temperature Duty-Cycle Duty-Cycle
+ Rising Temp Falling Temp
+ =============== =============== =================
full-speed full-speed full-speed
- < linearly adjusted duty-cycle >
+ - < linearly -
+ adjusted
+ duty-cycle >
low-speed low-speed low-speed
- min-speed low-speed
+ - min-speed low-speed
min-speed min-speed min-speed
- min-speed min-speed
+ - min-speed min-speed
+ =============== =============== =================
Sysfs Attributes
@@ -211,8 +240,9 @@ Sysfs Attributes
Following is a list of all sysfs attributes that the driver provides, their
permissions and a short description:
+=============================== ======= =======================================
Name Perm Description
----- ---- -----------
+=============================== ======= =======================================
cpu0_vid RO CPU core reference voltage in
millivolts.
vrm RW Voltage regulator module version
@@ -242,9 +272,10 @@ temp[1-3]_fault RO Temp input fault. Returns 1 if the chip
zone[1-3]_auto_channels_temp RO Temperature zone to temperature input
mapping. This attribute is a bitfield
and supports the following values:
- 1: temp1
- 2: temp2
- 4: temp3
+
+ - 1: temp1
+ - 2: temp2
+ - 4: temp3
zone[1-3]_auto_point1_temp_hyst RW Auto PWM temp point1 hysteresis. The
output of the corresponding PWM is set
to the pwm_auto_min value if the temp
@@ -275,9 +306,10 @@ pmw[1-3,5-6] RO/RW Duty-cycle of PWM output. Supported
manual mode.
pwm[1-3]_enable RW Enable of PWM outputs 1-3. Supported
values are:
- 0: turned off (output @ 100%)
- 1: manual mode
- 2: automatic mode
+
+ - 0: turned off (output @ 100%)
+ - 1: manual mode
+ - 2: automatic mode
pwm[5-6]_enable RO Enable of PWM outputs 5-6. Always
returns 1 since these 2 outputs are
hard-wired to manual mode.
@@ -294,11 +326,12 @@ pmw[1-3]_ramp_rate RW Ramp rate of PWM output. Determines how
pwm[1-3]_auto_channels_zone RW PWM output to temperature zone mapping.
This attribute is a bitfield and
supports the following values:
- 1: zone1
- 2: zone2
- 4: zone3
- 6: highest of zone[2-3]
- 7: highest of zone[1-3]
+
+ - 1: zone1
+ - 2: zone2
+ - 4: zone3
+ - 6: highest of zone[2-3]
+ - 7: highest of zone[1-3]
pwm[1-3]_auto_pwm_min RW Auto PWM min pwm. Minimum PWM duty-
cycle. Supported values are 0 or
auto_point1_pwm.
@@ -307,12 +340,14 @@ pwm[1-3]_auto_point1_pwm RW Auto PWM pwm point. Auto_point1 is the
pwm[1-3]_auto_point2_pwm RO Auto PWM pwm point. Auto_point2 is the
full-speed duty-cycle which is hard-
wired to 255 (100% duty-cycle).
+=============================== ======= =======================================
Chip Differences
----------------
+======================= ======= ======= ======= =======
Feature dme1737 sch311x sch5027 sch5127
--------------------------------------------------------
+======================= ======= ======= ======= =======
temp[1-3]_offset yes yes
vid yes
zone3 yes yes yes
@@ -326,3 +361,4 @@ pwm5 opt opt
fan6 opt opt
pwm6 opt opt
in7 yes
+======================= ======= ======= ======= =======
diff --git a/Documentation/hwmon/ds1621 b/Documentation/hwmon/ds1621.rst
index fa3407997795..552b37e9dd34 100644
--- a/Documentation/hwmon/ds1621
+++ b/Documentation/hwmon/ds1621.rst
@@ -2,42 +2,61 @@ Kernel driver ds1621
====================
Supported chips:
+
* Dallas Semiconductor / Maxim Integrated DS1621
+
Prefix: 'ds1621'
+
Addresses scanned: none
+
Datasheet: Publicly available from www.maximintegrated.com
* Dallas Semiconductor DS1625
+
Prefix: 'ds1625'
+
Addresses scanned: none
+
Datasheet: Publicly available from www.datasheetarchive.com
* Maxim Integrated DS1631
+
Prefix: 'ds1631'
+
Addresses scanned: none
+
Datasheet: Publicly available from www.maximintegrated.com
* Maxim Integrated DS1721
+
Prefix: 'ds1721'
+
Addresses scanned: none
+
Datasheet: Publicly available from www.maximintegrated.com
* Maxim Integrated DS1731
+
Prefix: 'ds1731'
+
Addresses scanned: none
+
Datasheet: Publicly available from www.maximintegrated.com
Authors:
- Christian W. Zuckschwerdt <zany@triq.net>
- valuable contributions by Jan M. Sendler <sendler@sendler.de>
- ported to 2.6 by Aurelien Jarno <aurelien@aurel32.net>
- with the help of Jean Delvare <jdelvare@suse.de>
+ - Christian W. Zuckschwerdt <zany@triq.net>
+ - valuable contributions by Jan M. Sendler <sendler@sendler.de>
+ - ported to 2.6 by Aurelien Jarno <aurelien@aurel32.net>
+ with the help of Jean Delvare <jdelvare@suse.de>
Module Parameters
------------------
* polarity int
- Output's polarity: 0 = active high, 1 = active low
+ Output's polarity:
+
+ * 0 = active high,
+ * 1 = active low
Description
-----------
@@ -87,28 +106,31 @@ are used internally, however, these flags do get set and cleared as the actual
temperature crosses the min or max settings (which by default are set to 75
and 80 degrees respectively).
-Temperature Conversion:
------------------------
-DS1621 - 750ms (older devices may take up to 1000ms)
-DS1625 - 500ms
-DS1631 - 93ms..750ms for 9..12 bits resolution, respectively.
-DS1721 - 93ms..750ms for 9..12 bits resolution, respectively.
-DS1731 - 93ms..750ms for 9..12 bits resolution, respectively.
+Temperature Conversion
+----------------------
+
+- DS1621 - 750ms (older devices may take up to 1000ms)
+- DS1625 - 500ms
+- DS1631 - 93ms..750ms for 9..12 bits resolution, respectively.
+- DS1721 - 93ms..750ms for 9..12 bits resolution, respectively.
+- DS1731 - 93ms..750ms for 9..12 bits resolution, respectively.
Note:
On the DS1621, internal access to non-volatile registers may last for 10ms
or less (unverified on the other devices).
-Temperature Accuracy:
----------------------
-DS1621: +/- 0.5 degree Celsius (from 0 to +70 degrees)
-DS1625: +/- 0.5 degree Celsius (from 0 to +70 degrees)
-DS1631: +/- 0.5 degree Celsius (from 0 to +70 degrees)
-DS1721: +/- 1.0 degree Celsius (from -10 to +85 degrees)
-DS1731: +/- 1.0 degree Celsius (from -10 to +85 degrees)
+Temperature Accuracy
+--------------------
-Note:
-Please refer to the device datasheets for accuracy at other temperatures.
+- DS1621: +/- 0.5 degree Celsius (from 0 to +70 degrees)
+- DS1625: +/- 0.5 degree Celsius (from 0 to +70 degrees)
+- DS1631: +/- 0.5 degree Celsius (from 0 to +70 degrees)
+- DS1721: +/- 1.0 degree Celsius (from -10 to +85 degrees)
+- DS1731: +/- 1.0 degree Celsius (from -10 to +85 degrees)
+
+.. Note::
+
+ Please refer to the device datasheets for accuracy at other temperatures.
Temperature Resolution:
-----------------------
@@ -117,60 +139,67 @@ support, which is achieved via the R0 and R1 config register bits, where:
R0..R1
------
- 0 0 => 9 bits, 0.5 degrees Celsius
- 1 0 => 10 bits, 0.25 degrees Celsius
- 0 1 => 11 bits, 0.125 degrees Celsius
- 1 1 => 12 bits, 0.0625 degrees Celsius
-Note:
-At initial device power-on, the default resolution is set to 12-bits.
+== == ===============================
+R0 R1
+== == ===============================
+ 0 0 9 bits, 0.5 degrees Celsius
+ 1 0 10 bits, 0.25 degrees Celsius
+ 0 1 11 bits, 0.125 degrees Celsius
+ 1 1 12 bits, 0.0625 degrees Celsius
+== == ===============================
+
+.. Note::
+
+ At initial device power-on, the default resolution is set to 12-bits.
The resolution mode for the DS1631, DS1721, or DS1731 can be changed from
userspace, via the device 'update_interval' sysfs attribute. This attribute
will normalize the range of input values to the device maximum resolution
values defined in the datasheet as follows:
+============= ================== ===============
Resolution Conversion Time Input Range
(C/LSB) (msec) (msec)
-------------------------------------------------
+============= ================== ===============
0.5 93.75 0....94
0.25 187.5 95...187
0.125 375 188..375
0.0625 750 376..infinity
-------------------------------------------------
+============= ================== ===============
The following examples show how the 'update_interval' attribute can be
-used to change the conversion time:
-
-$ cat update_interval
-750
-$ cat temp1_input
-22062
-$
-$ echo 300 > update_interval
-$ cat update_interval
-375
-$ cat temp1_input
-22125
-$
-$ echo 150 > update_interval
-$ cat update_interval
-188
-$ cat temp1_input
-22250
-$
-$ echo 1 > update_interval
-$ cat update_interval
-94
-$ cat temp1_input
-22000
-$
-$ echo 1000 > update_interval
-$ cat update_interval
-750
-$ cat temp1_input
-22062
-$
+used to change the conversion time::
+
+ $ cat update_interval
+ 750
+ $ cat temp1_input
+ 22062
+ $
+ $ echo 300 > update_interval
+ $ cat update_interval
+ 375
+ $ cat temp1_input
+ 22125
+ $
+ $ echo 150 > update_interval
+ $ cat update_interval
+ 188
+ $ cat temp1_input
+ 22250
+ $
+ $ echo 1 > update_interval
+ $ cat update_interval
+ 94
+ $ cat temp1_input
+ 22000
+ $
+ $ echo 1000 > update_interval
+ $ cat update_interval
+ 750
+ $ cat temp1_input
+ 22062
+ $
As shown, the ds1621 driver automatically adjusts the 'update_interval'
user input, via a step function. Reading back the 'update_interval' value
@@ -182,6 +211,7 @@ via the following function:
g(x) = 0.5 * [minimum_conversion_time/x]
where:
- -> 'x' = the output from 'update_interval'
- -> 'g(x)' = the resolution in degrees C per LSB.
- -> 93.75ms = minimum conversion time
+
+ - 'x' = the output from 'update_interval'
+ - 'g(x)' = the resolution in degrees C per LSB.
+ - 93.75ms = minimum conversion time
diff --git a/Documentation/hwmon/ds620 b/Documentation/hwmon/ds620.rst
index 1fbe3cd916cc..2d686b17b547 100644
--- a/Documentation/hwmon/ds620
+++ b/Documentation/hwmon/ds620.rst
@@ -2,15 +2,19 @@ Kernel driver ds620
===================
Supported chips:
+
* Dallas Semiconductor DS620
+
Prefix: 'ds620'
+
Datasheet: Publicly available at the Dallas Semiconductor website
- http://www.dalsemi.com/
+
+ http://www.dalsemi.com/
Authors:
- Roland Stigge <stigge@antcom.de>
- based on ds1621.c by
- Christian W. Zuckschwerdt <zany@triq.net>
+ Roland Stigge <stigge@antcom.de>
+ based on ds1621.c by
+ Christian W. Zuckschwerdt <zany@triq.net>
Description
-----------
diff --git a/Documentation/hwmon/emc1403 b/Documentation/hwmon/emc1403.rst
index a869b0ef6a9d..3a4913b63ef3 100644
--- a/Documentation/hwmon/emc1403
+++ b/Documentation/hwmon/emc1403.rst
@@ -2,28 +2,48 @@ Kernel driver emc1403
=====================
Supported chips:
+
* SMSC / Microchip EMC1402, EMC1412
+
Addresses scanned: I2C 0x18, 0x1c, 0x29, 0x4c, 0x4d, 0x5c
+
Prefix: 'emc1402'
+
Datasheets:
- http://ww1.microchip.com/downloads/en/DeviceDoc/1412.pdf
- http://ww1.microchip.com/downloads/en/DeviceDoc/1402.pdf
+
+ - http://ww1.microchip.com/downloads/en/DeviceDoc/1412.pdf
+ - http://ww1.microchip.com/downloads/en/DeviceDoc/1402.pdf
+
* SMSC / Microchip EMC1403, EMC1404, EMC1413, EMC1414
+
Addresses scanned: I2C 0x18, 0x29, 0x4c, 0x4d
+
Prefix: 'emc1403', 'emc1404'
+
Datasheets:
- http://ww1.microchip.com/downloads/en/DeviceDoc/1403_1404.pdf
- http://ww1.microchip.com/downloads/en/DeviceDoc/1413_1414.pdf
+
+ - http://ww1.microchip.com/downloads/en/DeviceDoc/1403_1404.pdf
+ - http://ww1.microchip.com/downloads/en/DeviceDoc/1413_1414.pdf
+
* SMSC / Microchip EMC1422
+
Addresses scanned: I2C 0x4c
+
Prefix: 'emc1422'
+
Datasheet:
- http://ww1.microchip.com/downloads/en/DeviceDoc/1422.pdf
+
+ - http://ww1.microchip.com/downloads/en/DeviceDoc/1422.pdf
+
* SMSC / Microchip EMC1423, EMC1424
+
Addresses scanned: I2C 0x4c
+
Prefix: 'emc1423', 'emc1424'
+
Datasheet:
- http://ww1.microchip.com/downloads/en/DeviceDoc/1423_1424.pdf
+
+ - http://ww1.microchip.com/downloads/en/DeviceDoc/1423_1424.pdf
Author:
Kalhan Trisal <kalhan.trisal@intel.com
@@ -46,6 +66,7 @@ difference between the limit and its hysteresis is always the same for
all three limits.
This implementation detail implies the following:
+
* When setting a limit, its hysteresis will automatically follow, the
difference staying unchanged. For example, if the old critical limit
was 80 degrees C, and the hysteresis was 75 degrees C, and you change
diff --git a/Documentation/hwmon/emc2103 b/Documentation/hwmon/emc2103.rst
index a12b2c127140..6a6ca6d1b34e 100644
--- a/Documentation/hwmon/emc2103
+++ b/Documentation/hwmon/emc2103.rst
@@ -2,13 +2,17 @@ Kernel driver emc2103
======================
Supported chips:
+
* SMSC EMC2103
+
Addresses scanned: I2C 0x2e
+
Prefix: 'emc2103'
+
Datasheet: Not public
Authors:
- Steve Glendinning <steve.glendinning@smsc.com>
+ Steve Glendinning <steve.glendinning@smsc.com>
Description
-----------
diff --git a/Documentation/hwmon/emc6w201 b/Documentation/hwmon/emc6w201.rst
index 757629b12897..a8e1185b9bb6 100644
--- a/Documentation/hwmon/emc6w201
+++ b/Documentation/hwmon/emc6w201.rst
@@ -2,9 +2,13 @@ Kernel driver emc6w201
======================
Supported chips:
+
* SMSC EMC6W201
+
Prefix: 'emc6w201'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: Not public
Author: Jean Delvare <jdelvare@suse.de>
@@ -38,5 +42,6 @@ Known Systems With EMC6W201
The EMC6W201 is a rare device, only found on a few systems, made in
2005 and 2006. Known systems with this device:
+
* Dell Precision 670 workstation
* Gigabyte 2CEWH mainboard
diff --git a/Documentation/hwmon/f71805f b/Documentation/hwmon/f71805f.rst
index 48a356084bc6..1efe5e5d337c 100644
--- a/Documentation/hwmon/f71805f
+++ b/Documentation/hwmon/f71805f.rst
@@ -2,17 +2,29 @@ Kernel driver f71805f
=====================
Supported chips:
+
* Fintek F71805F/FG
+
Prefix: 'f71805f'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Available from the Fintek website
+
* Fintek F71806F/FG
+
Prefix: 'f71872f'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Available from the Fintek website
+
* Fintek F71872F/FG
+
Prefix: 'f71872f'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Available from the Fintek website
Author: Jean Delvare <jdelvare@suse.de>
@@ -64,24 +76,26 @@ you can only set the limits in steps of 32 mV (before scaling).
The wirings and resistor values suggested by Fintek are as follow:
- pin expected
- name use R1 R2 divider raw val.
-
+======= ======= =========== ==== ======= ============ ==============
+in pin expected
+ name use R1 R2 divider raw val.
+======= ======= =========== ==== ======= ============ ==============
in0 VCC VCC3.3V int. int. 2.00 1.65 V
in1 VIN1 VTT1.2V 10K - 1.00 1.20 V
-in2 VIN2 VRAM 100K 100K 2.00 ~1.25 V (1)
-in3 VIN3 VCHIPSET 47K 100K 1.47 2.24 V (2)
+in2 VIN2 VRAM 100K 100K 2.00 ~1.25 V [1]_
+in3 VIN3 VCHIPSET 47K 100K 1.47 2.24 V [2]_
in4 VIN4 VCC5V 200K 47K 5.25 0.95 V
in5 VIN5 +12V 200K 20K 11.00 1.05 V
in6 VIN6 VCC1.5V 10K - 1.00 1.50 V
-in7 VIN7 VCORE 10K - 1.00 ~1.40 V (1)
+in7 VIN7 VCORE 10K - 1.00 ~1.40 V [1]_
in8 VIN8 VSB5V 200K 47K 1.00 0.95 V
-in10 VSB VSB3.3V int. int. 2.00 1.65 V (3)
-in9 VBAT VBATTERY int. int. 2.00 1.50 V (3)
+in10 VSB VSB3.3V int. int. 2.00 1.65 V [3]_
+in9 VBAT VBATTERY int. int. 2.00 1.50 V [3]_
+======= ======= =========== ==== ======= ============ ==============
-(1) Depends on your hardware setup.
-(2) Obviously not correct, swapping R1 and R2 would make more sense.
-(3) F71872F/FG only.
+.. [1] Depends on your hardware setup.
+.. [2] Obviously not correct, swapping R1 and R2 would make more sense.
+.. [3] F71872F/FG only.
These values can be used as hints at best, as motherboard manufacturers
are free to use a completely different setup. As a matter of fact, the
diff --git a/Documentation/hwmon/f71882fg b/Documentation/hwmon/f71882fg.rst
index 4c3cb8377d74..5c0b7b0db150 100644
--- a/Documentation/hwmon/f71882fg
+++ b/Documentation/hwmon/f71882fg.rst
@@ -2,60 +2,114 @@ Kernel driver f71882fg
======================
Supported chips:
+
* Fintek F71808E
+
Prefix: 'f71808e'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Not public
+
* Fintek F71808A
+
Prefix: 'f71808a'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Not public
+
* Fintek F71858FG
+
Prefix: 'f71858fg'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Available from the Fintek website
+
* Fintek F71862FG and F71863FG
+
Prefix: 'f71862fg'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Available from the Fintek website
+
* Fintek F71869F and F71869E
+
Prefix: 'f71869'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Available from the Fintek website
+
* Fintek F71869A
+
Prefix: 'f71869a'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Not public
+
* Fintek F71882FG and F71883FG
+
Prefix: 'f71882fg'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Available from the Fintek website
+
* Fintek F71889FG
+
Prefix: 'f71889fg'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Available from the Fintek website
+
* Fintek F71889ED
+
Prefix: 'f71889ed'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Should become available on the Fintek website soon
+
* Fintek F71889A
+
Prefix: 'f71889a'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Should become available on the Fintek website soon
+
* Fintek F8000
+
Prefix: 'f8000'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Not public
+
* Fintek F81801U
+
Prefix: 'f71889fg'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Not public
- Note: This is the 64-pin variant of the F71889FG, they have the
+
+ Note:
+ This is the 64-pin variant of the F71889FG, they have the
same device ID and are fully compatible as far as hardware
monitoring is concerned.
+
* Fintek F81865F
+
Prefix: 'f81865f'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Available from the Fintek website
Author: Hans de Goede <hdegoede@redhat.com>
diff --git a/Documentation/hwmon/fam15h_power b/Documentation/hwmon/fam15h_power.rst
index fb594c281c46..fdde632c93a3 100644
--- a/Documentation/hwmon/fam15h_power
+++ b/Documentation/hwmon/fam15h_power.rst
@@ -2,15 +2,20 @@ Kernel driver fam15h_power
==========================
Supported chips:
+
* AMD Family 15h Processors
+
* AMD Family 16h Processors
Prefix: 'fam15h_power'
+
Addresses scanned: PCI space
+
Datasheets:
- BIOS and Kernel Developer's Guide (BKDG) For AMD Family 15h Processors
- BIOS and Kernel Developer's Guide (BKDG) For AMD Family 16h Processors
- AMD64 Architecture Programmer's Manual Volume 2: System Programming
+
+ - BIOS and Kernel Developer's Guide (BKDG) For AMD Family 15h Processors
+ - BIOS and Kernel Developer's Guide (BKDG) For AMD Family 16h Processors
+ - AMD64 Architecture Programmer's Manual Volume 2: System Programming
Author: Andreas Herrmann <herrmann.der.user@googlemail.com>
@@ -31,14 +36,19 @@ For AMD Family 15h and 16h processors the following power values can
be calculated using different processor northbridge function
registers:
-* BasePwrWatts: Specifies in watts the maximum amount of power
- consumed by the processor for NB and logic external to the core.
-* ProcessorPwrWatts: Specifies in watts the maximum amount of power
- the processor can support.
-* CurrPwrWatts: Specifies in watts the current amount of power being
- consumed by the processor.
+* BasePwrWatts:
+ Specifies in watts the maximum amount of power
+ consumed by the processor for NB and logic external to the core.
+
+* ProcessorPwrWatts:
+ Specifies in watts the maximum amount of power
+ the processor can support.
+* CurrPwrWatts:
+ Specifies in watts the current amount of power being
+ consumed by the processor.
This driver provides ProcessorPwrWatts and CurrPwrWatts:
+
* power1_crit (ProcessorPwrWatts)
* power1_input (CurrPwrWatts)
@@ -53,35 +63,53 @@ calculate the average power consumed by a processor during a
measurement interval Tm. The feature of accumulated power mechanism is
indicated by CPUID Fn8000_0007_EDX[12].
-* Tsample: compute unit power accumulator sample period
-* Tref: the PTSC counter period
-* PTSC: performance timestamp counter
-* N: the ratio of compute unit power accumulator sample period to the
- PTSC period
-* Jmax: max compute unit accumulated power which is indicated by
- MaxCpuSwPwrAcc MSR C001007b
-* Jx/Jy: compute unit accumulated power which is indicated by
- CpuSwPwrAcc MSR C001007a
-* Tx/Ty: the value of performance timestamp counter which is indicated
- by CU_PTSC MSR C0010280
-* PwrCPUave: CPU average power
+* Tsample:
+ compute unit power accumulator sample period
+
+* Tref:
+ the PTSC counter period
+
+* PTSC:
+ performance timestamp counter
+
+* N:
+ the ratio of compute unit power accumulator sample period to the
+ PTSC period
+
+* Jmax:
+ max compute unit accumulated power which is indicated by
+ MaxCpuSwPwrAcc MSR C001007b
+
+* Jx/Jy:
+ compute unit accumulated power which is indicated by
+ CpuSwPwrAcc MSR C001007a
+* Tx/Ty:
+ the value of performance timestamp counter which is indicated
+ by CU_PTSC MSR C0010280
+
+* PwrCPUave:
+ CPU average power
i. Determine the ratio of Tsample to Tref by executing CPUID Fn8000_0007.
+
N = value of CPUID Fn8000_0007_ECX[CpuPwrSampleTimeRatio[15:0]].
ii. Read the full range of the cumulative energy value from the new
-MSR MaxCpuSwPwrAcc.
+ MSR MaxCpuSwPwrAcc.
+
Jmax = value returned.
+
iii. At time x, SW reads CpuSwPwrAcc MSR and samples the PTSC.
- Jx = value read from CpuSwPwrAcc and Tx = value read from
-PTSC.
+
+ Jx = value read from CpuSwPwrAcc and Tx = value read from PTSC.
iv. At time y, SW reads CpuSwPwrAcc MSR and samples the PTSC.
- Jy = value read from CpuSwPwrAcc and Ty = value read from
-PTSC.
+
+ Jy = value read from CpuSwPwrAcc and Ty = value read from PTSC.
v. Calculate the average power consumption for a compute unit over
-time period (y-x). Unit of result is uWatt.
+ time period (y-x). Unit of result is uWatt::
+
if (Jy < Jx) // Rollover has occurred
Jdelta = (Jy + Jmax) - Jx
else
@@ -90,13 +118,14 @@ time period (y-x). Unit of result is uWatt.
This driver provides PwrCPUave and interval(default is 10 millisecond
and maximum is 1 second):
+
* power1_average (PwrCPUave)
* power1_average_interval (Interval)
The power1_average_interval can be updated at /etc/sensors3.conf file
as below:
-chip "fam15h_power-*"
+chip `fam15h_power-*`
set power1_average_interval 0.01
Then save it with "sensors -s".
diff --git a/Documentation/hwmon/ftsteutates b/Documentation/hwmon/ftsteutates.rst
index af54db92391b..58a2483d8d0d 100644
--- a/Documentation/hwmon/ftsteutates
+++ b/Documentation/hwmon/ftsteutates.rst
@@ -1,9 +1,12 @@
Kernel driver ftsteutates
-=====================
+=========================
Supported chips:
+
* FTS Teutates
+
Prefix: 'ftsteutates'
+
Addresses scanned: I2C 0x73 (7-Bit)
Author: Thilo Cestonaro <thilo.cestonaro@ts.fujitsu.com>
@@ -11,6 +14,7 @@ Author: Thilo Cestonaro <thilo.cestonaro@ts.fujitsu.com>
Description
-----------
+
The BMC Teutates is the Eleventh generation of Superior System
monitoring and thermal management solution. It is builds on the basic
functionality of the BMC Theseus and contains several new features and
@@ -19,9 +23,11 @@ enhancements. It can monitor up to 4 voltages, 16 temperatures and
implemented in this driver.
To clear a temperature or fan alarm, execute the following command with the
-correct path to the alarm file:
+correct path to the alarm file::
+
echo 0 >XXXX_alarm
Specification of the chip can be found here:
-ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf
-ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf
+
+- ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf
+- ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf
diff --git a/Documentation/hwmon/g760a b/Documentation/hwmon/g760a.rst
index cfc894537061..d82952cc8319 100644
--- a/Documentation/hwmon/g760a
+++ b/Documentation/hwmon/g760a.rst
@@ -2,9 +2,13 @@ Kernel driver g760a
===================
Supported chips:
+
* Global Mixed-mode Technology Inc. G760A
+
Prefix: 'g760a'
+
Datasheet: Publicly available at the GMT website
+
http://www.gmt.com.tw/product/datasheet/EDS-760A.pdf
Author: Herbert Valerio Riedel <hvr@gnu.org>
diff --git a/Documentation/hwmon/g762 b/Documentation/hwmon/g762.rst
index 923db9c5b5bc..0371b3365c48 100644
--- a/Documentation/hwmon/g762
+++ b/Documentation/hwmon/g762.rst
@@ -7,7 +7,7 @@ modes - PWM or DC - are supported by the device.
For additional information, a detailed datasheet is available at
http://natisbad.org/NAS/ref/GMT_EDS-762_763-080710-0.2.pdf. sysfs
-bindings are described in Documentation/hwmon/sysfs-interface.
+bindings are described in Documentation/hwmon/sysfs-interface.rst.
The following entries are available to the user in a subdirectory of
/sys/bus/i2c/drivers/g762/ to control the operation of the device.
@@ -21,34 +21,43 @@ documented in Documentation/devicetree/bindings/hwmon/g762.txt or
using a specific platform_data structure in board initialization
file (see include/linux/platform_data/g762.h).
- fan1_target: set desired fan speed. This only makes sense in closed-loop
- fan speed control (i.e. when pwm1_enable is set to 2).
+ fan1_target:
+ set desired fan speed. This only makes sense in closed-loop
+ fan speed control (i.e. when pwm1_enable is set to 2).
- fan1_input: provide current fan rotation value in RPM as reported by
- the fan to the device.
+ fan1_input:
+ provide current fan rotation value in RPM as reported by
+ the fan to the device.
- fan1_div: fan clock divisor. Supported value are 1, 2, 4 and 8.
+ fan1_div:
+ fan clock divisor. Supported value are 1, 2, 4 and 8.
- fan1_pulses: number of pulses per fan revolution. Supported values
- are 2 and 4.
+ fan1_pulses:
+ number of pulses per fan revolution. Supported values
+ are 2 and 4.
- fan1_fault: reports fan failure, i.e. no transition on fan gear pin for
- about 0.7s (if the fan is not voluntarily set off).
+ fan1_fault:
+ reports fan failure, i.e. no transition on fan gear pin for
+ about 0.7s (if the fan is not voluntarily set off).
- fan1_alarm: in closed-loop control mode, if fan RPM value is 25% out
- of the programmed value for over 6 seconds 'fan1_alarm' is
- set to 1.
+ fan1_alarm:
+ in closed-loop control mode, if fan RPM value is 25% out
+ of the programmed value for over 6 seconds 'fan1_alarm' is
+ set to 1.
- pwm1_enable: set current fan speed control mode i.e. 1 for manual fan
- speed control (open-loop) via pwm1 described below, 2 for
- automatic fan speed control (closed-loop) via fan1_target
- above.
+ pwm1_enable:
+ set current fan speed control mode i.e. 1 for manual fan
+ speed control (open-loop) via pwm1 described below, 2 for
+ automatic fan speed control (closed-loop) via fan1_target
+ above.
- pwm1_mode: set or get fan driving mode: 1 for PWM mode, 0 for DC mode.
+ pwm1_mode:
+ set or get fan driving mode: 1 for PWM mode, 0 for DC mode.
- pwm1: get or set PWM fan control value in open-loop mode. This is an
- integer value between 0 and 255. 0 stops the fan, 255 makes
- it run at full speed.
+ pwm1:
+ get or set PWM fan control value in open-loop mode. This is an
+ integer value between 0 and 255. 0 stops the fan, 255 makes
+ it run at full speed.
Both in PWM mode ('pwm1_mode' set to 1) and DC mode ('pwm1_mode' set to 0),
when current fan speed control mode is open-loop ('pwm1_enable' set to 1),
diff --git a/Documentation/hwmon/gl518sm b/Documentation/hwmon/gl518sm.rst
index 494bb55b6e72..bf1e0b5e824b 100644
--- a/Documentation/hwmon/gl518sm
+++ b/Documentation/hwmon/gl518sm.rst
@@ -2,27 +2,34 @@ Kernel driver gl518sm
=====================
Supported chips:
+
* Genesys Logic GL518SM release 0x00
+
Prefix: 'gl518sm'
+
Addresses scanned: I2C 0x2c and 0x2d
+
* Genesys Logic GL518SM release 0x80
+
Prefix: 'gl518sm'
+
Addresses scanned: I2C 0x2c and 0x2d
+
Datasheet: http://www.genesyslogic.com/
Authors:
- Frodo Looijaard <frodol@dds.nl>,
- Kyösti Mälkki <kmalkki@cc.hut.fi>
- Hong-Gunn Chew <hglinux@gunnet.org>
- Jean Delvare <jdelvare@suse.de>
+ - Frodo Looijaard <frodol@dds.nl>,
+ - Kyösti Mälkki <kmalkki@cc.hut.fi>
+ - Hong-Gunn Chew <hglinux@gunnet.org>
+ - Jean Delvare <jdelvare@suse.de>
Description
-----------
-IMPORTANT:
+.. important::
-For the revision 0x00 chip, the in0, in1, and in2 values (+5V, +3V,
-and +12V) CANNOT be read. This is a limitation of the chip, not the driver.
+ For the revision 0x00 chip, the in0, in1, and in2 values (+5V, +3V,
+ and +12V) CANNOT be read. This is a limitation of the chip, not the driver.
This driver supports the Genesys Logic GL518SM chip. There are at least
two revision of this chip, which we call revision 0x00 and 0x80. Revision
diff --git a/Documentation/hwmon/hih6130 b/Documentation/hwmon/hih6130.rst
index 73dae918ea7b..649bd4be4fc2 100644
--- a/Documentation/hwmon/hih6130
+++ b/Documentation/hwmon/hih6130.rst
@@ -2,11 +2,16 @@ Kernel driver hih6130
=====================
Supported chips:
+
* Honeywell HIH-6130 / HIH-6131
+
Prefix: 'hih6130'
+
Addresses scanned: none
+
Datasheet: Publicly available at the Honeywell website
- http://sensing.honeywell.com/index.php?ci_id=3106&la_id=1&defId=44872
+
+ http://sensing.honeywell.com/index.php?ci_id=3106&la_id=1&defId=44872
Author:
Iain Paton <ipaton0@gmail.com>
@@ -28,8 +33,11 @@ instantiate I2C devices.
sysfs-Interface
---------------
-temp1_input - temperature input
-humidity1_input - humidity input
+temp1_input
+ temperature input
+
+humidity1_input
+ humidity input
Notes
-----
diff --git a/Documentation/hwmon/hwmon-kernel-api.txt b/Documentation/hwmon/hwmon-kernel-api.rst
index 8bdefb41be30..c41eb6108103 100644
--- a/Documentation/hwmon/hwmon-kernel-api.txt
+++ b/Documentation/hwmon/hwmon-kernel-api.rst
@@ -1,5 +1,5 @@
-The Linux Hardware Monitoring kernel API.
-=========================================
+The Linux Hardware Monitoring kernel API
+========================================
Guenter Roeck
@@ -12,42 +12,43 @@ drivers that want to use the hardware monitoring framework.
This document does not describe what a hardware monitoring (hwmon) Driver or
Device is. It also does not describe the API which can be used by user space
to communicate with a hardware monitoring device. If you want to know this
-then please read the following file: Documentation/hwmon/sysfs-interface.
+then please read the following file: Documentation/hwmon/sysfs-interface.rst.
For additional guidelines on how to write and improve hwmon drivers, please
-also read Documentation/hwmon/submitting-patches.
+also read Documentation/hwmon/submitting-patches.rst.
The API
-------
Each hardware monitoring driver must #include <linux/hwmon.h> and, in most
cases, <linux/hwmon-sysfs.h>. linux/hwmon.h declares the following
-register/unregister functions:
-
-struct device *
-hwmon_device_register_with_groups(struct device *dev, const char *name,
- void *drvdata,
- const struct attribute_group **groups);
-
-struct device *
-devm_hwmon_device_register_with_groups(struct device *dev,
- const char *name, void *drvdata,
- const struct attribute_group **groups);
-
-struct device *
-hwmon_device_register_with_info(struct device *dev,
- const char *name, void *drvdata,
- const struct hwmon_chip_info *info,
- const struct attribute_group **extra_groups);
-
-struct device *
-devm_hwmon_device_register_with_info(struct device *dev,
- const char *name,
- void *drvdata,
- const struct hwmon_chip_info *info,
- const struct attribute_group **extra_groups);
-
-void hwmon_device_unregister(struct device *dev);
-void devm_hwmon_device_unregister(struct device *dev);
+register/unregister functions::
+
+ struct device *
+ hwmon_device_register_with_groups(struct device *dev, const char *name,
+ void *drvdata,
+ const struct attribute_group **groups);
+
+ struct device *
+ devm_hwmon_device_register_with_groups(struct device *dev,
+ const char *name, void *drvdata,
+ const struct attribute_group **groups);
+
+ struct device *
+ hwmon_device_register_with_info(struct device *dev,
+ const char *name, void *drvdata,
+ const struct hwmon_chip_info *info,
+ const struct attribute_group **extra_groups);
+
+ struct device *
+ devm_hwmon_device_register_with_info(struct device *dev,
+ const char *name,
+ void *drvdata,
+ const struct hwmon_chip_info *info,
+ const struct attribute_group **extra_groups);
+
+ void hwmon_device_unregister(struct device *dev);
+
+ void devm_hwmon_device_unregister(struct device *dev);
hwmon_device_register_with_groups registers a hardware monitoring device.
The first parameter of this function is a pointer to the parent device.
@@ -100,78 +101,89 @@ Using devm_hwmon_device_register_with_info()
hwmon_device_register_with_info() registers a hardware monitoring device.
The parameters to this function are
-struct device *dev Pointer to parent device
-const char *name Device name
-void *drvdata Driver private data
-const struct hwmon_chip_info *info
- Pointer to chip description.
-const struct attribute_group **extra_groups
- Null-terminated list of additional non-standard
- sysfs attribute groups.
+=============================================== ===============================================
+`struct device *dev` Pointer to parent device
+`const char *name` Device name
+`void *drvdata` Driver private data
+`const struct hwmon_chip_info *info` Pointer to chip description.
+`const struct attribute_group **extra_groups` Null-terminated list of additional non-standard
+ sysfs attribute groups.
+=============================================== ===============================================
This function returns a pointer to the created hardware monitoring device
on success and a negative error code for failure.
-The hwmon_chip_info structure looks as follows.
+The hwmon_chip_info structure looks as follows::
-struct hwmon_chip_info {
- const struct hwmon_ops *ops;
- const struct hwmon_channel_info **info;
-};
+ struct hwmon_chip_info {
+ const struct hwmon_ops *ops;
+ const struct hwmon_channel_info **info;
+ };
It contains the following fields:
-* ops: Pointer to device operations.
-* info: NULL-terminated list of device channel descriptors.
+* ops:
+ Pointer to device operations.
+* info:
+ NULL-terminated list of device channel descriptors.
-The list of hwmon operations is defined as:
+The list of hwmon operations is defined as::
-struct hwmon_ops {
+ struct hwmon_ops {
umode_t (*is_visible)(const void *, enum hwmon_sensor_types type,
u32 attr, int);
int (*read)(struct device *, enum hwmon_sensor_types type,
u32 attr, int, long *);
int (*write)(struct device *, enum hwmon_sensor_types type,
u32 attr, int, long);
-};
+ };
It defines the following operations.
-* is_visible: Pointer to a function to return the file mode for each supported
- attribute. This function is mandatory.
+* is_visible:
+ Pointer to a function to return the file mode for each supported
+ attribute. This function is mandatory.
-* read: Pointer to a function for reading a value from the chip. This function
- is optional, but must be provided if any readable attributes exist.
+* read:
+ Pointer to a function for reading a value from the chip. This function
+ is optional, but must be provided if any readable attributes exist.
-* write: Pointer to a function for writing a value to the chip. This function is
- optional, but must be provided if any writeable attributes exist.
+* write:
+ Pointer to a function for writing a value to the chip. This function is
+ optional, but must be provided if any writeable attributes exist.
Each sensor channel is described with struct hwmon_channel_info, which is
-defined as follows.
+defined as follows::
-struct hwmon_channel_info {
- enum hwmon_sensor_types type;
- u32 *config;
-};
+ struct hwmon_channel_info {
+ enum hwmon_sensor_types type;
+ u32 *config;
+ };
It contains following fields:
-* type: The hardware monitoring sensor type.
- Supported sensor types are
- * hwmon_chip A virtual sensor type, used to describe attributes
- * which are not bound to a specific input or output
- * hwmon_temp Temperature sensor
- * hwmon_in Voltage sensor
- * hwmon_curr Current sensor
- * hwmon_power Power sensor
- * hwmon_energy Energy sensor
- * hwmon_humidity Humidity sensor
- * hwmon_fan Fan speed sensor
- * hwmon_pwm PWM control
-
-* config: Pointer to a 0-terminated list of configuration values for each
- sensor of the given type. Each value is a combination of bit values
- describing the attributes supposed by a single sensor.
+* type:
+ The hardware monitoring sensor type.
+
+ Supported sensor types are
+
+ ================== ==================================================
+ hwmon_chip A virtual sensor type, used to describe attributes
+ which are not bound to a specific input or output
+ hwmon_temp Temperature sensor
+ hwmon_in Voltage sensor
+ hwmon_curr Current sensor
+ hwmon_power Power sensor
+ hwmon_energy Energy sensor
+ hwmon_humidity Humidity sensor
+ hwmon_fan Fan speed sensor
+ hwmon_pwm PWM control
+ ================== ==================================================
+
+* config:
+ Pointer to a 0-terminated list of configuration values for each
+ sensor of the given type. Each value is a combination of bit values
+ describing the attributes supposed by a single sensor.
As an example, here is the complete description file for a LM75 compatible
sensor chip. The chip has a single temperature sensor. The driver wants to
@@ -179,48 +191,62 @@ register with the thermal subsystem (HWMON_C_REGISTER_TZ), and it supports
the update_interval attribute (HWMON_C_UPDATE_INTERVAL). The chip supports
reading the temperature (HWMON_T_INPUT), it has a maximum temperature
register (HWMON_T_MAX) as well as a maximum temperature hysteresis register
-(HWMON_T_MAX_HYST).
-
-static const u32 lm75_chip_config[] = {
- HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
- 0
-};
-
-static const struct hwmon_channel_info lm75_chip = {
- .type = hwmon_chip,
- .config = lm75_chip_config,
-};
-
-static const u32 lm75_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST,
- 0
-};
-
-static const struct hwmon_channel_info lm75_temp = {
- .type = hwmon_temp,
- .config = lm75_temp_config,
-};
-
-static const struct hwmon_channel_info *lm75_info[] = {
- &lm75_chip,
- &lm75_temp,
- NULL
-};
-
-static const struct hwmon_ops lm75_hwmon_ops = {
- .is_visible = lm75_is_visible,
- .read = lm75_read,
- .write = lm75_write,
-};
-
-static const struct hwmon_chip_info lm75_chip_info = {
- .ops = &lm75_hwmon_ops,
- .info = lm75_info,
-};
+(HWMON_T_MAX_HYST)::
+
+ static const u32 lm75_chip_config[] = {
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
+ 0
+ };
+
+ static const struct hwmon_channel_info lm75_chip = {
+ .type = hwmon_chip,
+ .config = lm75_chip_config,
+ };
+
+ static const u32 lm75_temp_config[] = {
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST,
+ 0
+ };
+
+ static const struct hwmon_channel_info lm75_temp = {
+ .type = hwmon_temp,
+ .config = lm75_temp_config,
+ };
+
+ static const struct hwmon_channel_info *lm75_info[] = {
+ &lm75_chip,
+ &lm75_temp,
+ NULL
+ };
+
+ The HWMON_CHANNEL_INFO() macro can and should be used when possible.
+ With this macro, the above example can be simplified to
+
+ static const struct hwmon_channel_info *lm75_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST),
+ NULL
+ };
+
+ The remaining declarations are as follows.
+
+ static const struct hwmon_ops lm75_hwmon_ops = {
+ .is_visible = lm75_is_visible,
+ .read = lm75_read,
+ .write = lm75_write,
+ };
+
+ static const struct hwmon_chip_info lm75_chip_info = {
+ .ops = &lm75_hwmon_ops,
+ .info = lm75_info,
+ };
A complete list of bit values indicating individual attribute support
is defined in include/linux/hwmon.h. Definition prefixes are as follows.
+=============== =================================================
HWMON_C_xxxx Chip attributes, for use with hwmon_chip.
HWMON_T_xxxx Temperature attributes, for use with hwmon_temp.
HWMON_I_xxxx Voltage attributes, for use with hwmon_in.
@@ -231,57 +257,76 @@ HWMON_E_xxxx Energy attributes, for use with hwmon_energy.
HWMON_H_xxxx Humidity attributes, for use with hwmon_humidity.
HWMON_F_xxxx Fan speed attributes, for use with hwmon_fan.
HWMON_PWM_xxxx PWM control attributes, for use with hwmon_pwm.
+=============== =================================================
Driver callback functions
-------------------------
Each driver provides is_visible, read, and write functions. Parameters
-and return values for those functions are as follows.
+and return values for those functions are as follows::
-umode_t is_visible_func(const void *data, enum hwmon_sensor_types type,
- u32 attr, int channel)
+ umode_t is_visible_func(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
Parameters:
- data: Pointer to device private data structure.
- type: The sensor type.
- attr: Attribute identifier associated with a specific attribute.
+ data:
+ Pointer to device private data structure.
+ type:
+ The sensor type.
+ attr:
+ Attribute identifier associated with a specific attribute.
For example, the attribute value for HWMON_T_INPUT would be
hwmon_temp_input. For complete mappings of bit fields to
attribute values please see include/linux/hwmon.h.
- channel:The sensor channel number.
+ channel:
+ The sensor channel number.
Return value:
The file mode for this attribute. Typically, this will be 0 (the
attribute will not be created), S_IRUGO, or 'S_IRUGO | S_IWUSR'.
-int read_func(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long *val)
+::
+
+ int read_func(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
Parameters:
- dev: Pointer to the hardware monitoring device.
- type: The sensor type.
- attr: Attribute identifier associated with a specific attribute.
+ dev:
+ Pointer to the hardware monitoring device.
+ type:
+ The sensor type.
+ attr:
+ Attribute identifier associated with a specific attribute.
For example, the attribute value for HWMON_T_INPUT would be
hwmon_temp_input. For complete mappings please see
include/linux/hwmon.h.
- channel:The sensor channel number.
- val: Pointer to attribute value.
+ channel:
+ The sensor channel number.
+ val:
+ Pointer to attribute value.
Return value:
0 on success, a negative error number otherwise.
-int write_func(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, long val)
+::
+
+ int write_func(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
Parameters:
- dev: Pointer to the hardware monitoring device.
- type: The sensor type.
- attr: Attribute identifier associated with a specific attribute.
+ dev:
+ Pointer to the hardware monitoring device.
+ type:
+ The sensor type.
+ attr:
+ Attribute identifier associated with a specific attribute.
For example, the attribute value for HWMON_T_INPUT would be
hwmon_temp_input. For complete mappings please see
include/linux/hwmon.h.
- channel:The sensor channel number.
- val: The value to write to the chip.
+ channel:
+ The sensor channel number.
+ val:
+ The value to write to the chip.
Return value:
0 on success, a negative error number otherwise.
@@ -317,25 +362,25 @@ Standard functions, similar to DEVICE_ATTR_{RW,RO,WO}, have _show and _store
appended to the provided function name.
SENSOR_DEVICE_ATTR and its variants define a struct sensor_device_attribute
-variable. This structure has the following fields.
+variable. This structure has the following fields::
-struct sensor_device_attribute {
- struct device_attribute dev_attr;
- int index;
-};
+ struct sensor_device_attribute {
+ struct device_attribute dev_attr;
+ int index;
+ };
You can use to_sensor_dev_attr to get the pointer to this structure from the
attribute read or write function. Its parameter is the device to which the
attribute is attached.
SENSOR_DEVICE_ATTR_2 and its variants define a struct sensor_device_attribute_2
-variable, which is defined as follows.
+variable, which is defined as follows::
-struct sensor_device_attribute_2 {
- struct device_attribute dev_attr;
- u8 index;
- u8 nr;
-};
+ struct sensor_device_attribute_2 {
+ struct device_attribute dev_attr;
+ u8 index;
+ u8 nr;
+ };
Use to_sensor_dev_attr_2 to get the pointer to this structure. Its parameter
is the device to which the attribute is attached.
diff --git a/Documentation/hwmon/ibm-cffps b/Documentation/hwmon/ibm-cffps.rst
index e05ecd8ecfcf..52e74e39463a 100644
--- a/Documentation/hwmon/ibm-cffps
+++ b/Documentation/hwmon/ibm-cffps.rst
@@ -2,6 +2,7 @@ Kernel driver ibm-cffps
=======================
Supported chips:
+
* IBM Common Form Factor power supply
Author: Eddie James <eajames@us.ibm.com>
@@ -24,6 +25,7 @@ Sysfs entries
The following attributes are supported:
+======================= ======================================================
curr1_alarm Output current over-current alarm.
curr1_input Measured output current in mA.
curr1_label "iout1"
@@ -52,3 +54,4 @@ temp2_alarm Secondary rectifier temp over-temperature alarm.
temp2_input Measured secondary rectifier temp in millidegrees C.
temp3_alarm ORing FET temperature over-temperature alarm.
temp3_input Measured ORing FET temperature in millidegrees C.
+======================= ======================================================
diff --git a/Documentation/hwmon/ibmaem b/Documentation/hwmon/ibmaem.rst
index 1e0d59e000b4..f07a14a1c2f5 100644
--- a/Documentation/hwmon/ibmaem
+++ b/Documentation/hwmon/ibmaem.rst
@@ -1,15 +1,21 @@
Kernel driver ibmaem
-======================
+====================
This driver talks to the IBM Systems Director Active Energy Manager, known
henceforth as AEM.
Supported systems:
+
* Any recent IBM System X server with AEM support.
+
This includes the x3350, x3550, x3650, x3655, x3755, x3850 M2,
- x3950 M2, and certain HC10/HS2x/LS2x/QS2x blades. The IPMI host interface
+ x3950 M2, and certain HC10/HS2x/LS2x/QS2x blades.
+
+ The IPMI host interface
driver ("ipmi-si") needs to be loaded for this driver to do anything.
+
Prefix: 'ibmaem'
+
Datasheet: Not available
Author: Darrick J. Wong
diff --git a/Documentation/hwmon/ibmpowernv b/Documentation/hwmon/ibmpowernv.rst
index 56468258711f..5d642bc3dec0 100644
--- a/Documentation/hwmon/ibmpowernv
+++ b/Documentation/hwmon/ibmpowernv.rst
@@ -2,6 +2,7 @@ Kernel Driver IBMPOWERNV
========================
Supported systems:
+
* Any recent IBM P servers based on POWERNV platform
Author: Neelesh Gupta
@@ -29,10 +30,11 @@ CONFIG_SENSORS_IBMPOWERNV. It can also be built as module 'ibmpowernv'.
Sysfs attributes
----------------
+======================= =======================================================
fanX_input Measured RPM value.
fanX_min Threshold RPM for alert generation.
-fanX_fault 0: No fail condition
- 1: Failing fan
+fanX_fault - 0: No fail condition
+ - 1: Failing fan
tempX_input Measured ambient temperature.
tempX_max Threshold ambient temperature for alert generation.
@@ -42,20 +44,22 @@ tempX_enable Enable/disable all temperature sensors belonging to the
sub-group. In POWER9, this attribute corresponds to
each OCC. Using this attribute each OCC can be asked to
disable/enable all of its temperature sensors.
- 1: Enable
- 0: Disable
+
+ - 1: Enable
+ - 0: Disable
inX_input Measured power supply voltage (millivolt)
-inX_fault 0: No fail condition.
- 1: Failing power supply.
+inX_fault - 0: No fail condition.
+ - 1: Failing power supply.
inX_highest Historical maximum voltage
inX_lowest Historical minimum voltage
inX_enable Enable/disable all voltage sensors belonging to the
sub-group. In POWER9, this attribute corresponds to
each OCC. Using this attribute each OCC can be asked to
disable/enable all of its voltage sensors.
- 1: Enable
- 0: Disable
+
+ - 1: Enable
+ - 0: Disable
powerX_input Power consumption (microWatt)
powerX_input_highest Historical maximum power
@@ -64,8 +68,9 @@ powerX_enable Enable/disable all power sensors belonging to the
sub-group. In POWER9, this attribute corresponds to
each OCC. Using this attribute each OCC can be asked to
disable/enable all of its power sensors.
- 1: Enable
- 0: Disable
+
+ - 1: Enable
+ - 0: Disable
currX_input Measured current (milliampere)
currX_highest Historical maximum current
@@ -74,7 +79,9 @@ currX_enable Enable/disable all current sensors belonging to the
sub-group. In POWER9, this attribute corresponds to
each OCC. Using this attribute each OCC can be asked to
disable/enable all of its current sensors.
- 1: Enable
- 0: Disable
+
+ - 1: Enable
+ - 0: Disable
energyX_input Cumulative energy (microJoule)
+======================= =======================================================
diff --git a/Documentation/hwmon/ina209 b/Documentation/hwmon/ina209.rst
index 672501de4509..64322075a145 100644
--- a/Documentation/hwmon/ina209
+++ b/Documentation/hwmon/ina209.rst
@@ -1,16 +1,21 @@
Kernel driver ina209
-=====================
+====================
Supported chips:
+
* Burr-Brown / Texas Instruments INA209
+
Prefix: 'ina209'
+
Addresses scanned: -
+
Datasheet:
- http://www.ti.com/lit/gpn/ina209
+ http://www.ti.com/lit/gpn/ina209
-Author: Paul Hays <Paul.Hays@cattail.ca>
-Author: Ira W. Snyder <iws@ovro.caltech.edu>
-Author: Guenter Roeck <linux@roeck-us.net>
+Author:
+ - Paul Hays <Paul.Hays@cattail.ca>
+ - Ira W. Snyder <iws@ovro.caltech.edu>
+ - Guenter Roeck <linux@roeck-us.net>
Description
@@ -31,7 +36,7 @@ the I2C bus. See the datasheet for details.
This tries to expose most monitoring features of the hardware via
sysfs. It does not support every feature of this chip.
-
+======================= =======================================================
in0_input shunt voltage (mV)
in0_input_highest shunt voltage historical maximum reading (mV)
in0_input_lowest shunt voltage historical minimum reading (mV)
@@ -70,6 +75,7 @@ curr1_input current measurement (mA)
update_interval data conversion time; affects number of samples used
to average results for shunt and bus voltages.
+======================= =======================================================
General Remarks
---------------
diff --git a/Documentation/hwmon/ina2xx b/Documentation/hwmon/ina2xx.rst
index 0f36c021192d..94b9a260c518 100644
--- a/Documentation/hwmon/ina2xx
+++ b/Documentation/hwmon/ina2xx.rst
@@ -2,35 +2,56 @@ Kernel driver ina2xx
====================
Supported chips:
+
* Texas Instruments INA219
+
+
Prefix: 'ina219'
Addresses: I2C 0x40 - 0x4f
+
Datasheet: Publicly available at the Texas Instruments website
- http://www.ti.com/
+
+ http://www.ti.com/
* Texas Instruments INA220
+
Prefix: 'ina220'
+
Addresses: I2C 0x40 - 0x4f
+
Datasheet: Publicly available at the Texas Instruments website
- http://www.ti.com/
+
+ http://www.ti.com/
* Texas Instruments INA226
+
Prefix: 'ina226'
+
Addresses: I2C 0x40 - 0x4f
+
Datasheet: Publicly available at the Texas Instruments website
- http://www.ti.com/
+
+ http://www.ti.com/
* Texas Instruments INA230
+
Prefix: 'ina230'
+
Addresses: I2C 0x40 - 0x4f
+
Datasheet: Publicly available at the Texas Instruments website
- http://www.ti.com/
+
+ http://www.ti.com/
* Texas Instruments INA231
+
Prefix: 'ina231'
+
Addresses: I2C 0x40 - 0x4f
+
Datasheet: Publicly available at the Texas Instruments website
- http://www.ti.com/
+
+ http://www.ti.com/
Author: Lothar Felten <lothar.felten@gmail.com>
@@ -57,23 +78,27 @@ refer to the Documentation/devicetree/bindings/hwmon/ina2xx.txt for bindings
if the device tree is used.
Additionally ina226 supports update_interval attribute as described in
-Documentation/hwmon/sysfs-interface. Internally the interval is the sum of
+Documentation/hwmon/sysfs-interface.rst. Internally the interval is the sum of
bus and shunt voltage conversion times multiplied by the averaging rate. We
don't touch the conversion times and only modify the number of averages. The
lower limit of the update_interval is 2 ms, the upper limit is 2253 ms.
The actual programmed interval may vary from the desired value.
General sysfs entries
--------------
+---------------------
+======================= ===============================
in0_input Shunt voltage(mV) channel
in1_input Bus voltage(mV) channel
curr1_input Current(mA) measurement channel
power1_input Power(uW) measurement channel
shunt_resistor Shunt resistance(uOhm) channel
+======================= ===============================
Sysfs entries for ina226, ina230 and ina231 only
--------------
+------------------------------------------------
+======================= ====================================================
update_interval data conversion time; affects number of samples used
to average results for shunt and bus voltages.
+======================= ====================================================
diff --git a/Documentation/hwmon/ina3221 b/Documentation/hwmon/ina3221.rst
index 4b82cbfb551c..f6007ae8f4e2 100644
--- a/Documentation/hwmon/ina3221
+++ b/Documentation/hwmon/ina3221.rst
@@ -2,11 +2,16 @@ Kernel driver ina3221
=====================
Supported chips:
+
* Texas Instruments INA3221
+
Prefix: 'ina3221'
+
Addresses: I2C 0x40 - 0x43
+
Datasheet: Publicly available at the Texas Instruments website
- http://www.ti.com/
+
+ http://www.ti.com/
Author: Andrew F. Davis <afd@ti.com>
@@ -21,17 +26,37 @@ and power are calculated host-side from these.
Sysfs entries
-------------
+======================= =======================================================
in[123]_label Voltage channel labels
in[123]_enable Voltage channel enable controls
in[123]_input Bus voltage(mV) channels
curr[123]_input Current(mA) measurement channels
shunt[123]_resistor Shunt resistance(uOhm) channels
curr[123]_crit Critical alert current(mA) setting, activates the
- corresponding alarm when the respective current
- is above this value
+ corresponding alarm when the respective current
+ is above this value
curr[123]_crit_alarm Critical alert current limit exceeded
curr[123]_max Warning alert current(mA) setting, activates the
- corresponding alarm when the respective current
- average is above this value.
+ corresponding alarm when the respective current
+ average is above this value.
curr[123]_max_alarm Warning alert current limit exceeded
in[456]_input Shunt voltage(uV) for channels 1, 2, and 3 respectively
+samples Number of samples using in the averaging mode.
+
+ Supports the list of number of samples:
+
+ 1, 4, 16, 64, 128, 256, 512, 1024
+
+update_interval Data conversion time in millisecond, following:
+
+ update_interval = C x S x (BC + SC)
+
+ * C: number of enabled channels
+ * S: number of samples
+ * BC: bus-voltage conversion time in millisecond
+ * SC: shunt-voltage conversion time in millisecond
+
+ Affects both Bus- and Shunt-voltage conversion time.
+ Note that setting update_interval to 0ms sets both BC
+ and SC to 140 us (minimum conversion time).
+======================= =======================================================
diff --git a/Documentation/hwmon/index.rst b/Documentation/hwmon/index.rst
new file mode 100644
index 000000000000..ee090e51653a
--- /dev/null
+++ b/Documentation/hwmon/index.rst
@@ -0,0 +1,182 @@
+=========================
+Linux Hardware Monitoring
+=========================
+
+.. toctree::
+ :maxdepth: 1
+
+ hwmon-kernel-api
+ pmbus-core
+ submitting-patches
+ sysfs-interface
+ userspace-tools
+
+Hardware Monitoring Kernel Drivers
+==================================
+
+.. toctree::
+ :maxdepth: 1
+
+ ab8500
+ abituguru
+ abituguru3
+ abx500
+ acpi_power_meter
+ ad7314
+ adc128d818
+ adm1021
+ adm1025
+ adm1026
+ adm1031
+ adm1275
+ adm9240
+ ads1015
+ ads7828
+ adt7410
+ adt7411
+ adt7462
+ adt7470
+ adt7475
+ amc6821
+ asb100
+ asc7621
+ aspeed-pwm-tacho
+ coretemp
+ da9052
+ da9055
+ dme1737
+ ds1621
+ ds620
+ emc1403
+ emc2103
+ emc6w201
+ f71805f
+ f71882fg
+ fam15h_power
+ ftsteutates
+ g760a
+ g762
+ gl518sm
+ hih6130
+ ibmaem
+ ibm-cffps
+ ibmpowernv
+ ina209
+ ina2xx
+ ina3221
+ ir35221
+ ir38064
+ isl68137
+ it87
+ jc42
+ k10temp
+ k8temp
+ lineage-pem
+ lm25066
+ lm63
+ lm70
+ lm73
+ lm75
+ lm77
+ lm78
+ lm80
+ lm83
+ lm85
+ lm87
+ lm90
+ lm92
+ lm93
+ lm95234
+ lm95245
+ lochnagar
+ ltc2945
+ ltc2978
+ ltc2990
+ ltc3815
+ ltc4151
+ ltc4215
+ ltc4245
+ ltc4260
+ ltc4261
+ max16064
+ max16065
+ max1619
+ max1668
+ max197
+ max20751
+ max31722
+ max31785
+ max31790
+ max34440
+ max6639
+ max6642
+ max6650
+ max6697
+ max8688
+ mc13783-adc
+ mcp3021
+ menf21bmc
+ mlxreg-fan
+ nct6683
+ nct6775
+ nct7802
+ nct7904
+ npcm750-pwm-fan
+ nsa320
+ ntc_thermistor
+ occ
+ pc87360
+ pc87427
+ pcf8591
+ pmbus
+ powr1220
+ pwm-fan
+ raspberrypi-hwmon
+ sch5627
+ sch5636
+ scpi-hwmon
+ sht15
+ sht21
+ sht3x
+ shtc1
+ sis5595
+ smm665
+ smsc47b397
+ smsc47m192
+ smsc47m1
+ tc654
+ tc74
+ thmc50
+ tmp102
+ tmp103
+ tmp108
+ tmp401
+ tmp421
+ tps40422
+ twl4030-madc-hwmon
+ ucd9000
+ ucd9200
+ vexpress
+ via686a
+ vt1211
+ w83627ehf
+ w83627hf
+ w83773g
+ w83781d
+ w83791d
+ w83792d
+ w83793
+ w83795
+ w83l785ts
+ w83l786ng
+ wm831x
+ wm8350
+ xgene-hwmon
+ zl6100
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/hwmon/ir35221 b/Documentation/hwmon/ir35221.rst
index f7e112752c04..a83922e5ccb5 100644
--- a/Documentation/hwmon/ir35221
+++ b/Documentation/hwmon/ir35221.rst
@@ -2,9 +2,12 @@ Kernel driver ir35221
=====================
Supported chips:
- * Infinion IR35221
+ * Infineon IR35221
+
Prefix: 'ir35221'
+
Addresses scanned: -
+
Datasheet: Datasheet is not publicly available.
Author: Samuel Mendoza-Jonas <sam@mendozajonas.com>
@@ -23,15 +26,16 @@ This driver does not probe for PMBus devices. You will have to instantiate
devices explicitly.
Example: the following commands will load the driver for an IR35221
-at address 0x70 on I2C bus #4:
+at address 0x70 on I2C bus #4::
-# modprobe ir35221
-# echo ir35221 0x70 > /sys/bus/i2c/devices/i2c-4/new_device
+ # modprobe ir35221
+ # echo ir35221 0x70 > /sys/bus/i2c/devices/i2c-4/new_device
Sysfs attributes
----------------
+======================= =======================================================
curr1_label "iin"
curr1_input Measured input current
curr1_max Maximum current
@@ -85,3 +89,4 @@ temp[1-2]_highest Highest temperature
temp[1-2]_lowest Lowest temperature
temp[1-2]_max Maximum temperature
temp[1-2]_max_alarm Chip temperature high alarm
+======================= =======================================================
diff --git a/Documentation/hwmon/ir38064.rst b/Documentation/hwmon/ir38064.rst
new file mode 100644
index 000000000000..c455d755a267
--- /dev/null
+++ b/Documentation/hwmon/ir38064.rst
@@ -0,0 +1,66 @@
+Kernel driver ir38064
+=====================
+
+Supported chips:
+
+ * Infineon IR38064
+
+ Prefix: 'ir38064'
+ Addresses scanned: -
+
+ Datasheet: Publicly available at the Infineon webiste
+ https://www.infineon.com/dgdl/Infineon-IR38064MTRPBF-DS-v03_07-EN.pdf?fileId=5546d462584d1d4a0158db0d9efb67ca
+
+Authors:
+ - Maxim Sloyko <maxims@google.com>
+ - Patrick Venture <venture@google.com>
+
+Description
+-----------
+
+IR38064 is a Single-input Voltage, Synchronous Buck Regulator, DC-DC Converter.
+
+Usage Notes
+-----------
+
+This driver does not probe for PMBus devices. You will have to instantiate
+devices explicitly.
+
+Sysfs attributes
+----------------
+
+======================= ===========================
+curr1_label "iout1"
+curr1_input Measured output current
+curr1_crit Critical maximum current
+curr1_crit_alarm Current critical high alarm
+curr1_max Maximum current
+curr1_max_alarm Current high alarm
+
+in1_label "vin"
+in1_input Measured input voltage
+in1_crit Critical maximum input voltage
+in1_crit_alarm Input voltage critical high alarm
+in1_min Minimum input voltage
+in1_min_alarm Input voltage low alarm
+
+in2_label "vout1"
+in2_input Measured output voltage
+in2_lcrit Critical minimum output voltage
+in2_lcrit_alarm Output voltage critical low alarm
+in2_crit Critical maximum output voltage
+in2_crit_alarm Output voltage critical high alarm
+in2_max Maximum output voltage
+in2_max_alarm Output voltage high alarm
+in2_min Minimum output voltage
+in2_min_alarm Output voltage low alarm
+
+power1_label "pout1"
+power1_input Measured output power
+
+temp1_input Measured temperature
+temp1_crit Critical high temperature
+temp1_crit_alarm Chip temperature critical high alarm
+temp1_max Maximum temperature
+temp1_max_alarm Chip temperature high alarm
+======================= ===========================
diff --git a/Documentation/hwmon/isl68137.rst b/Documentation/hwmon/isl68137.rst
new file mode 100644
index 000000000000..a5a7c8545c9e
--- /dev/null
+++ b/Documentation/hwmon/isl68137.rst
@@ -0,0 +1,80 @@
+Kernel driver isl68137
+======================
+
+Supported chips:
+
+ * Intersil ISL68137
+
+ Prefix: 'isl68137'
+
+ Addresses scanned: -
+
+ Datasheet:
+
+ Publicly available at the Intersil website
+ https://www.intersil.com/content/dam/Intersil/documents/isl6/isl68137.pdf
+
+Authors:
+ - Maxim Sloyko <maxims@google.com>
+ - Robert Lippert <rlippert@google.com>
+ - Patrick Venture <venture@google.com>
+
+Description
+-----------
+
+Intersil ISL68137 is a digital output 7-phase configurable PWM
+controller with an AVSBus interface.
+
+Usage Notes
+-----------
+
+This driver does not probe for PMBus devices. You will have to instantiate
+devices explicitly.
+
+The ISL68137 AVS operation mode must be enabled/disabled at runtime.
+
+Beyond the normal sysfs pmbus attributes, the driver exposes a control attribute.
+
+Additional Sysfs attributes
+---------------------------
+
+======================= ====================================
+avs(0|1)_enable Controls the AVS state of each rail.
+
+curr1_label "iin"
+curr1_input Measured input current
+curr1_crit Critical maximum current
+curr1_crit_alarm Current critical high alarm
+
+curr[2-3]_label "iout[1-2]"
+curr[2-3]_input Measured output current
+curr[2-3]_crit Critical maximum current
+curr[2-3]_crit_alarm Current critical high alarm
+
+in1_label "vin"
+in1_input Measured input voltage
+in1_lcrit Critical minimum input voltage
+in1_lcrit_alarm Input voltage critical low alarm
+in1_crit Critical maximum input voltage
+in1_crit_alarm Input voltage critical high alarm
+
+in[2-3]_label "vout[1-2]"
+in[2-3]_input Measured output voltage
+in[2-3]_lcrit Critical minimum output voltage
+in[2-3]_lcrit_alarm Output voltage critical low alarm
+in[2-3]_crit Critical maximum output voltage
+in[2-3]_crit_alarm Output voltage critical high alarm
+
+power1_label "pin"
+power1_input Measured input power
+power1_alarm Input power high alarm
+
+power[2-3]_label "pout[1-2]"
+power[2-3]_input Measured output power
+
+temp[1-3]_input Measured temperature
+temp[1-3]_crit Critical high temperature
+temp[1-3]_crit_alarm Chip temperature critical high alarm
+temp[1-3]_max Maximum temperature
+temp[1-3]_max_alarm Chip temperature high alarm
+======================= ====================================
diff --git a/Documentation/hwmon/it87 b/Documentation/hwmon/it87.rst
index fff6f6bf55bc..2d83f23bee93 100644
--- a/Documentation/hwmon/it87
+++ b/Documentation/hwmon/it87.rst
@@ -2,105 +2,179 @@ Kernel driver it87
==================
Supported chips:
+
* IT8603E/IT8623E
+
Prefix: 'it8603'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8620E
+
Prefix: 'it8620'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
* IT8628E
+
Prefix: 'it8628'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8705F
+
Prefix: 'it87'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Once publicly available at the ITE website, but no longer
+
* IT8712F
+
Prefix: 'it8712'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Once publicly available at the ITE website, but no longer
+
* IT8716F/IT8726F
+
Prefix: 'it8716'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Once publicly available at the ITE website, but no longer
+
* IT8718F
+
Prefix: 'it8718'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Once publicly available at the ITE website, but no longer
+
* IT8720F
+
Prefix: 'it8720'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8721F/IT8758E
+
Prefix: 'it8721'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8728F
+
Prefix: 'it8728'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8732F
+
Prefix: 'it8732'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8771E
+
Prefix: 'it8771'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8772E
+
Prefix: 'it8772'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8781F
+
Prefix: 'it8781'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8782F
+
Prefix: 'it8782'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8783E/F
+
Prefix: 'it8783'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8786E
+
Prefix: 'it8786'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* IT8790E
+
Prefix: 'it8790'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: Not publicly available
+
* SiS950 [clone of IT8705F]
+
Prefix: 'it87'
+
Addresses scanned: from Super I/O config space (8 I/O ports)
+
Datasheet: No longer be available
+
Authors:
- Christophe Gauthron
- Jean Delvare <jdelvare@suse.de>
+ - Christophe Gauthron
+ - Jean Delvare <jdelvare@suse.de>
Module Parameters
-----------------
* update_vbat: int
-
- 0 if vbat should report power on value, 1 if vbat should be updated after
- each read. Default is 0. On some boards the battery voltage is provided
- by either the battery or the onboard power supply. Only the first reading
- at power on will be the actual battery voltage (which the chip does
- automatically). On other boards the battery voltage is always fed to
- the chip so can be read at any time. Excessive reading may decrease
- battery life but no information is given in the datasheet.
+ 0 if vbat should report power on value, 1 if vbat should be updated after
+ each read. Default is 0. On some boards the battery voltage is provided
+ by either the battery or the onboard power supply. Only the first reading
+ at power on will be the actual battery voltage (which the chip does
+ automatically). On other boards the battery voltage is always fed to
+ the chip so can be read at any time. Excessive reading may decrease
+ battery life but no information is given in the datasheet.
* fix_pwm_polarity int
-
- Force PWM polarity to active high (DANGEROUS). Some chips are
- misconfigured by BIOS - PWM values would be inverted. This option tries
- to fix this. Please contact your BIOS manufacturer and ask him for fix.
+ Force PWM polarity to active high (DANGEROUS). Some chips are
+ misconfigured by BIOS - PWM values would be inverted. This option tries
+ to fix this. Please contact your BIOS manufacturer and ask him for fix.
Hardware Interfaces
diff --git a/Documentation/hwmon/jc42 b/Documentation/hwmon/jc42.rst
index b4b671f22453..5b14b49bb6f7 100644
--- a/Documentation/hwmon/jc42
+++ b/Documentation/hwmon/jc42.rst
@@ -2,53 +2,100 @@ Kernel driver jc42
==================
Supported chips:
+
* Analog Devices ADT7408
+
Datasheets:
+
http://www.analog.com/static/imported-files/data_sheets/ADT7408.pdf
+
* Atmel AT30TS00, AT30TS002A/B, AT30TSE004A
+
Datasheets:
+
http://www.atmel.com/Images/doc8585.pdf
+
http://www.atmel.com/Images/doc8711.pdf
+
http://www.atmel.com/Images/Atmel-8852-SEEPROM-AT30TSE002A-Datasheet.pdf
+
http://www.atmel.com/Images/Atmel-8868-DTS-AT30TSE004A-Datasheet.pdf
+
* IDT TSE2002B3, TSE2002GB2, TSE2004GB2, TS3000B3, TS3000GB0, TS3000GB2,
+
TS3001GB2
+
Datasheets:
+
Available from IDT web site
+
* Maxim MAX6604
+
Datasheets:
+
http://datasheets.maxim-ic.com/en/ds/MAX6604.pdf
+
* Microchip MCP9804, MCP9805, MCP9808, MCP98242, MCP98243, MCP98244, MCP9843
+
Datasheets:
+
http://ww1.microchip.com/downloads/en/DeviceDoc/22203C.pdf
+
http://ww1.microchip.com/downloads/en/DeviceDoc/21977b.pdf
+
http://ww1.microchip.com/downloads/en/DeviceDoc/25095A.pdf
+
http://ww1.microchip.com/downloads/en/DeviceDoc/21996a.pdf
+
http://ww1.microchip.com/downloads/en/DeviceDoc/22153c.pdf
+
http://ww1.microchip.com/downloads/en/DeviceDoc/22327A.pdf
+
* NXP Semiconductors SE97, SE97B, SE98, SE98A
+
Datasheets:
+
http://www.nxp.com/documents/data_sheet/SE97.pdf
+
http://www.nxp.com/documents/data_sheet/SE97B.pdf
+
http://www.nxp.com/documents/data_sheet/SE98.pdf
+
http://www.nxp.com/documents/data_sheet/SE98A.pdf
+
* ON Semiconductor CAT34TS02, CAT6095
+
Datasheet:
+
http://www.onsemi.com/pub_link/Collateral/CAT34TS02-D.PDF
+
http://www.onsemi.com/pub/Collateral/CAT6095-D.PDF
+
* ST Microelectronics STTS424, STTS424E02, STTS2002, STTS2004, STTS3000
+
Datasheets:
+
http://www.st.com/web/en/resource/technical/document/datasheet/CD00157556.pdf
+
http://www.st.com/web/en/resource/technical/document/datasheet/CD00157558.pdf
+
http://www.st.com/web/en/resource/technical/document/datasheet/CD00266638.pdf
+
http://www.st.com/web/en/resource/technical/document/datasheet/CD00225278.pdf
+
http://www.st.com/web/en/resource/technical/document/datasheet/DM00076709.pdf
+
* JEDEC JC 42.4 compliant temperature sensor chips
+
Datasheet:
+
http://www.jedec.org/sites/default/files/docs/4_01_04R19.pdf
+
Common for all chips:
+
Prefix: 'jc42'
+
Addresses scanned: I2C 0x18 - 0x1f
Author:
@@ -67,10 +114,10 @@ The driver auto-detects the chips listed above, but can be manually instantiated
to support other JC 42.4 compliant chips.
Example: the following will load the driver for a generic JC 42.4 compliant
-temperature sensor at address 0x18 on I2C bus #1:
+temperature sensor at address 0x18 on I2C bus #1::
-# modprobe jc42
-# echo jc42 0x18 > /sys/bus/i2c/devices/i2c-1/new_device
+ # modprobe jc42
+ # echo jc42 0x18 > /sys/bus/i2c/devices/i2c-1/new_device
A JC 42.4 compliant chip supports a single temperature sensor. Minimum, maximum,
and critical temperature can be configured. There are alarms for high, low,
@@ -90,6 +137,7 @@ cannot be changed.
Sysfs entries
-------------
+======================= ===========================================
temp1_input Temperature (RO)
temp1_min Minimum temperature (RO or RW)
temp1_max Maximum temperature (RO or RW)
@@ -101,3 +149,4 @@ temp1_max_hyst Maximum hysteresis temperature (RO)
temp1_min_alarm Temperature low alarm
temp1_max_alarm Temperature high alarm
temp1_crit_alarm Temperature critical alarm
+======================= ===========================================
diff --git a/Documentation/hwmon/k10temp b/Documentation/hwmon/k10temp.rst
index 254d2f55345a..12a86ba17de9 100644
--- a/Documentation/hwmon/k10temp
+++ b/Documentation/hwmon/k10temp.rst
@@ -2,42 +2,77 @@ Kernel driver k10temp
=====================
Supported chips:
+
* AMD Family 10h processors:
+
Socket F: Quad-Core/Six-Core/Embedded Opteron (but see below)
+
Socket AM2+: Quad-Core Opteron, Phenom (II) X3/X4, Athlon X2 (but see below)
+
Socket AM3: Quad-Core Opteron, Athlon/Phenom II X2/X3/X4, Sempron II
+
Socket S1G3: Athlon II, Sempron, Turion II
+
* AMD Family 11h processors:
+
Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra)
+
* AMD Family 12h processors: "Llano" (E2/A4/A6/A8-Series)
+
* AMD Family 14h processors: "Brazos" (C/E/G/Z-Series)
+
* AMD Family 15h processors: "Bulldozer" (FX-Series), "Trinity", "Kaveri", "Carrizo"
+
* AMD Family 16h processors: "Kabini", "Mullins"
Prefix: 'k10temp'
+
Addresses scanned: PCI space
+
Datasheets:
+
BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h Processors:
+
http://support.amd.com/us/Processor_TechDocs/31116.pdf
+
BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors:
+
http://support.amd.com/us/Processor_TechDocs/41256.pdf
+
BIOS and Kernel Developer's Guide (BKDG) for AMD Family 12h Processors:
+
http://support.amd.com/us/Processor_TechDocs/41131.pdf
+
BIOS and Kernel Developer's Guide (BKDG) for AMD Family 14h Models 00h-0Fh Processors:
+
http://support.amd.com/us/Processor_TechDocs/43170.pdf
+
Revision Guide for AMD Family 10h Processors:
+
http://support.amd.com/us/Processor_TechDocs/41322.pdf
+
Revision Guide for AMD Family 11h Processors:
+
http://support.amd.com/us/Processor_TechDocs/41788.pdf
+
Revision Guide for AMD Family 12h Processors:
+
http://support.amd.com/us/Processor_TechDocs/44739.pdf
+
Revision Guide for AMD Family 14h Models 00h-0Fh Processors:
+
http://support.amd.com/us/Processor_TechDocs/47534.pdf
+
AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks:
+
http://support.amd.com/us/Processor_TechDocs/43373.pdf
+
AMD Family 10h Server and Workstation Processor Power and Thermal Data Sheet:
+
http://support.amd.com/us/Processor_TechDocs/43374.pdf
+
AMD Family 10h Desktop Processor Power and Thermal Data Sheet:
+
http://support.amd.com/us/Processor_TechDocs/43375.pdf
Author: Clemens Ladisch <clemens@ladisch.de>
@@ -60,7 +95,7 @@ are using an AM3 processor on an AM2+ mainboard, you can safely use the
There is one temperature measurement value, available as temp1_input in
sysfs. It is measured in degrees Celsius with a resolution of 1/8th degree.
-Please note that it is defined as a relative value; to quote the AMD manual:
+Please note that it is defined as a relative value; to quote the AMD manual::
Tctl is the processor temperature control value, used by the platform to
control cooling systems. Tctl is a non-physical temperature on an
diff --git a/Documentation/hwmon/k8temp b/Documentation/hwmon/k8temp.rst
index 716dc24c7237..72da12aa17e5 100644
--- a/Documentation/hwmon/k8temp
+++ b/Documentation/hwmon/k8temp.rst
@@ -2,12 +2,17 @@ Kernel driver k8temp
====================
Supported chips:
+
* AMD Athlon64/FX or Opteron CPUs
+
Prefix: 'k8temp'
+
Addresses scanned: PCI space
+
Datasheet: http://support.amd.com/us/Processor_TechDocs/32559.pdf
Author: Rudolf Marek
+
Contact: Rudolf Marek <r.marek@assembler.cz>
Description
@@ -27,10 +32,12 @@ implemented sensors.
Mapping of /sys files is as follows:
-temp1_input - temperature of Core 0 and "place" 0
-temp2_input - temperature of Core 0 and "place" 1
-temp3_input - temperature of Core 1 and "place" 0
-temp4_input - temperature of Core 1 and "place" 1
+============= ===================================
+temp1_input temperature of Core 0 and "place" 0
+temp2_input temperature of Core 0 and "place" 1
+temp3_input temperature of Core 1 and "place" 0
+temp4_input temperature of Core 1 and "place" 1
+============= ===================================
Temperatures are measured in degrees Celsius and measurement resolution is
1 degree C. It is expected that future CPU will have better resolution. The
@@ -48,7 +55,7 @@ computed temperature called TControl, which must be lower than TControlMax.
The relationship is following:
-temp1_input - TjOffset*2 < TControlMax,
+ temp1_input - TjOffset*2 < TControlMax,
TjOffset is not yet exported by the driver, TControlMax is usually
70 degrees C. The rule of the thumb -> CPU temperature should not cross
diff --git a/Documentation/hwmon/lineage-pem b/Documentation/hwmon/lineage-pem.rst
index 83b2ddc160c8..10c271dc20e8 100644
--- a/Documentation/hwmon/lineage-pem
+++ b/Documentation/hwmon/lineage-pem.rst
@@ -2,11 +2,16 @@ Kernel driver lineage-pem
=========================
Supported devices:
+
* Lineage Compact Power Line Power Entry Modules
+
Prefix: 'lineage-pem'
+
Addresses scanned: -
+
Documentation:
- http://www.lineagepower.com/oem/pdf/CPLI2C.pdf
+
+ http://www.lineagepower.com/oem/pdf/CPLI2C.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -31,9 +36,10 @@ which can be safely used to identify the chip. You will have to instantiate
the devices explicitly.
Example: the following will load the driver for a Lineage PEM at address 0x40
-on I2C bus #1:
-$ modprobe lineage-pem
-$ echo lineage-pem 0x40 > /sys/bus/i2c/devices/i2c-1/new_device
+on I2C bus #1::
+
+ $ modprobe lineage-pem
+ $ echo lineage-pem 0x40 > /sys/bus/i2c/devices/i2c-1/new_device
All Lineage CPL power entry modules have a built-in I2C bus master selector
(PCA9541). To ensure device access, this driver should only be used as client
@@ -51,6 +57,7 @@ Input voltage, input current, input power, and fan speed measurement is only
supported on newer devices. The driver detects if those attributes are supported,
and only creates respective sysfs entries if they are.
+======================= ===============================
in1_input Output voltage (mV)
in1_min_alarm Output undervoltage alarm
in1_max_alarm Output overvoltage alarm
@@ -75,3 +82,4 @@ temp1_crit
temp1_alarm
temp1_crit_alarm
temp1_fault
+======================= ===============================
diff --git a/Documentation/hwmon/lm25066 b/Documentation/hwmon/lm25066.rst
index 51b32aa203a8..da15e3094c8c 100644
--- a/Documentation/hwmon/lm25066
+++ b/Documentation/hwmon/lm25066.rst
@@ -2,34 +2,62 @@ Kernel driver lm25066
=====================
Supported chips:
+
* TI LM25056
+
Prefix: 'lm25056'
+
Addresses scanned: -
+
Datasheets:
+
http://www.ti.com/lit/gpn/lm25056
+
http://www.ti.com/lit/gpn/lm25056a
+
* National Semiconductor LM25066
+
Prefix: 'lm25066'
+
Addresses scanned: -
+
Datasheets:
+
http://www.national.com/pf/LM/LM25066.html
+
http://www.national.com/pf/LM/LM25066A.html
+
* National Semiconductor LM5064
+
Prefix: 'lm5064'
+
Addresses scanned: -
+
Datasheet:
+
http://www.national.com/pf/LM/LM5064.html
+
* National Semiconductor LM5066
+
Prefix: 'lm5066'
+
Addresses scanned: -
+
Datasheet:
+
http://www.national.com/pf/LM/LM5066.html
+
* Texas Instruments LM5066I
+
Prefix: 'lm5066i'
+
Addresses scanned: -
+
Datasheet:
+
http://www.ti.com/product/LM5066I
+
Author: Guenter Roeck <linux@roeck-us.net>
@@ -41,7 +69,7 @@ LM25066, LM5064, and LM5066/LM5066I Power Management, Monitoring,
Control, and Protection ICs.
The driver is a client driver to the core PMBus driver. Please see
-Documentation/hwmon/pmbus for details on PMBus client drivers.
+Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
Usage Notes
@@ -64,6 +92,7 @@ Sysfs entries
The following attributes are supported. Limits are read-write; all other
attributes are read-only.
+======================= =======================================================
in1_label "vin"
in1_input Measured input voltage.
in1_average Average measured input voltage.
@@ -105,3 +134,4 @@ temp1_max Maximum temperature.
temp1_crit Critical high temperature.
temp1_max_alarm Chip temperature high alarm.
temp1_crit_alarm Chip temperature critical high alarm.
+======================= =======================================================
diff --git a/Documentation/hwmon/lm63 b/Documentation/hwmon/lm63.rst
index 4a00461512a6..f478132b0408 100644
--- a/Documentation/hwmon/lm63
+++ b/Documentation/hwmon/lm63.rst
@@ -2,26 +2,43 @@ Kernel driver lm63
==================
Supported chips:
+
* National Semiconductor LM63
+
Prefix: 'lm63'
+
Addresses scanned: I2C 0x4c
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/pf/LM/LM63.html
+
+ http://www.national.com/pf/LM/LM63.html
+
* National Semiconductor LM64
+
Prefix: 'lm64'
+
Addresses scanned: I2C 0x18 and 0x4e
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/pf/LM/LM64.html
+
+ http://www.national.com/pf/LM/LM64.html
+
* National Semiconductor LM96163
+
Prefix: 'lm96163'
+
Addresses scanned: I2C 0x4c
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/pf/LM/LM96163.html
+
+ http://www.national.com/pf/LM/LM96163.html
+
Author: Jean Delvare <jdelvare@suse.de>
Thanks go to Tyan and especially Alex Buckingham for setting up a remote
access to their S4882 test platform for this driver.
+
http://www.tyan.com/
Description
@@ -32,6 +49,7 @@ and control.
The LM63 is basically an LM86 with fan speed monitoring and control
capabilities added. It misses some of the LM86 features though:
+
- No low limit for local temperature.
- No critical limit for local temperature.
- Critical limit for remote temperature can be changed only once. We
diff --git a/Documentation/hwmon/lm70 b/Documentation/hwmon/lm70.rst
index c3a1f2ea017d..f259bc1fcd91 100644
--- a/Documentation/hwmon/lm70
+++ b/Documentation/hwmon/lm70.rst
@@ -2,19 +2,30 @@ Kernel driver lm70
==================
Supported chips:
+
* National Semiconductor LM70
+
Datasheet: http://www.national.com/pf/LM/LM70.html
+
* Texas Instruments TMP121/TMP123
+
Information: http://focus.ti.com/docs/prod/folders/print/tmp121.html
+
* Texas Instruments TMP122/TMP124
+
Information: http://www.ti.com/product/tmp122
+
* National Semiconductor LM71
+
Datasheet: http://www.ti.com/product/LM71
+
* National Semiconductor LM74
+
Datasheet: http://www.ti.com/product/LM74
+
Author:
- Kaiwan N Billimoria <kaiwan@designergraphix.com>
+ Kaiwan N Billimoria <kaiwan@designergraphix.com>
Description
-----------
diff --git a/Documentation/hwmon/lm73 b/Documentation/hwmon/lm73.rst
index 8af059dcb642..1d6a46844e85 100644
--- a/Documentation/hwmon/lm73
+++ b/Documentation/hwmon/lm73.rst
@@ -2,13 +2,20 @@ Kernel driver lm73
==================
Supported chips:
+
* Texas Instruments LM73
+
Prefix: 'lm73'
+
Addresses scanned: I2C 0x48, 0x49, 0x4a, 0x4c, 0x4d, and 0x4e
+
Datasheet: Publicly available at the Texas Instruments website
- http://www.ti.com/product/lm73
+
+ http://www.ti.com/product/lm73
+
Author: Guillaume Ligneul <guillaume.ligneul@gmail.com>
+
Documentation: Chris Verges <kg4ysn@gmail.com>
@@ -29,17 +36,18 @@ conversion time via the 'update_interval' sysfs attribute for the
device. This attribute will normalize ranges of input values to the
maximum times defined for the resolution in the datasheet.
+ ============= ============= ============
Resolution Conv. Time Input Range
(C/LSB) (msec) (msec)
- --------------------------------------
+ ============= ============= ============
0.25 14 0..14
0.125 28 15..28
0.0625 56 29..56
0.03125 112 57..infinity
- --------------------------------------
+ ============= ============= ============
The following examples show how the 'update_interval' attribute can be
-used to change the conversion time:
+used to change the conversion time::
$ echo 0 > update_interval
$ cat update_interval
diff --git a/Documentation/hwmon/lm75 b/Documentation/hwmon/lm75.rst
index 010583608f12..ba8acbd2a6cb 100644
--- a/Documentation/hwmon/lm75
+++ b/Documentation/hwmon/lm75.rst
@@ -2,68 +2,132 @@ Kernel driver lm75
==================
Supported chips:
+
* National Semiconductor LM75
+
Prefix: 'lm75'
+
Addresses scanned: I2C 0x48 - 0x4f
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/
+
+ http://www.national.com/
+
* National Semiconductor LM75A
+
Prefix: 'lm75a'
+
Addresses scanned: I2C 0x48 - 0x4f
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/
+
+ http://www.national.com/
+
* Dallas Semiconductor (now Maxim) DS75, DS1775, DS7505
+
Prefixes: 'ds75', 'ds1775', 'ds7505'
+
Addresses scanned: none
+
Datasheet: Publicly available at the Maxim website
- http://www.maximintegrated.com/
+
+ http://www.maximintegrated.com/
+
* Maxim MAX6625, MAX6626, MAX31725, MAX31726
+
Prefixes: 'max6625', 'max6626', 'max31725', 'max31726'
+
Addresses scanned: none
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/
+
+ http://www.maxim-ic.com/
+
* Microchip (TelCom) TCN75
+
Prefix: 'tcn75'
+
Addresses scanned: none
+
Datasheet: Publicly available at the Microchip website
- http://www.microchip.com/
+
+ http://www.microchip.com/
+
* Microchip MCP9800, MCP9801, MCP9802, MCP9803
+
Prefix: 'mcp980x'
+
Addresses scanned: none
+
Datasheet: Publicly available at the Microchip website
- http://www.microchip.com/
+
+ http://www.microchip.com/
+
* Analog Devices ADT75
+
Prefix: 'adt75'
+
Addresses scanned: none
+
Datasheet: Publicly available at the Analog Devices website
- http://www.analog.com/adt75
+
+ http://www.analog.com/adt75
+
* ST Microelectronics STDS75
+
Prefix: 'stds75'
+
Addresses scanned: none
+
Datasheet: Publicly available at the ST website
- http://www.st.com/internet/analog/product/121769.jsp
+
+ http://www.st.com/internet/analog/product/121769.jsp
+
* ST Microelectronics STLM75
+
Prefix: 'stlm75'
+
Addresses scanned: none
+
Datasheet: Publicly available at the ST website
+
https://www.st.com/resource/en/datasheet/stlm75.pdf
- * Texas Instruments TMP100, TMP101, TMP105, TMP112, TMP75, TMP75C, TMP175, TMP275
- Prefixes: 'tmp100', 'tmp101', 'tmp105', 'tmp112', 'tmp175', 'tmp75', 'tmp75c', 'tmp275'
+
+ * Texas Instruments TMP100, TMP101, TMP105, TMP112, TMP75, TMP75B, TMP75C, TMP175, TMP275
+
+ Prefixes: 'tmp100', 'tmp101', 'tmp105', 'tmp112', 'tmp175', 'tmp75', 'tmp75b', 'tmp75c', 'tmp275'
+
Addresses scanned: none
+
Datasheet: Publicly available at the Texas Instruments website
- http://www.ti.com/product/tmp100
- http://www.ti.com/product/tmp101
- http://www.ti.com/product/tmp105
- http://www.ti.com/product/tmp112
- http://www.ti.com/product/tmp75
- http://www.ti.com/product/tmp75c
- http://www.ti.com/product/tmp175
- http://www.ti.com/product/tmp275
+
+ http://www.ti.com/product/tmp100
+
+ http://www.ti.com/product/tmp101
+
+ http://www.ti.com/product/tmp105
+
+ http://www.ti.com/product/tmp112
+
+ http://www.ti.com/product/tmp75
+
+ http://www.ti.com/product/tmp75b
+
+ http://www.ti.com/product/tmp75c
+
+ http://www.ti.com/product/tmp175
+
+ http://www.ti.com/product/tmp275
+
* NXP LM75B
+
Prefix: 'lm75b'
+
Addresses scanned: none
+
Datasheet: Publicly available at the NXP website
- http://www.nxp.com/documents/data_sheet/LM75B.pdf
+
+ http://www.nxp.com/documents/data_sheet/LM75B.pdf
Author: Frodo Looijaard <frodol@dds.nl>
diff --git a/Documentation/hwmon/lm77 b/Documentation/hwmon/lm77.rst
index bfc915fe3639..4ed3fe6b999a 100644
--- a/Documentation/hwmon/lm77
+++ b/Documentation/hwmon/lm77.rst
@@ -2,11 +2,17 @@ Kernel driver lm77
==================
Supported chips:
+
* National Semiconductor LM77
+
Prefix: 'lm77'
+
Addresses scanned: I2C 0x48 - 0x4b
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/
+
+ http://www.national.com/
+
Author: Andras BALI <drewie@freemail.hu>
@@ -25,6 +31,7 @@ register on the chip, which means that the relative difference between
the limit and its hysteresis is always the same for all 3 limits.
This implementation detail implies the following:
+
* When setting a limit, its hysteresis will automatically follow, the
difference staying unchanged. For example, if the old critical limit
was 80 degrees C, and the hysteresis was 75 degrees C, and you change
diff --git a/Documentation/hwmon/lm78 b/Documentation/hwmon/lm78.rst
index 4dd47731789f..cb7a4832f35e 100644
--- a/Documentation/hwmon/lm78
+++ b/Documentation/hwmon/lm78.rst
@@ -2,19 +2,31 @@ Kernel driver lm78
==================
Supported chips:
+
* National Semiconductor LM78 / LM78-J
+
Prefix: 'lm78'
+
Addresses scanned: I2C 0x28 - 0x2f, ISA 0x290 (8 I/O ports)
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/
+
+ http://www.national.com/
+
* National Semiconductor LM79
+
Prefix: 'lm79'
+
Addresses scanned: I2C 0x28 - 0x2f, ISA 0x290 (8 I/O ports)
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/
-Authors: Frodo Looijaard <frodol@dds.nl>
- Jean Delvare <jdelvare@suse.de>
+ http://www.national.com/
+
+
+Authors:
+ - Frodo Looijaard <frodol@dds.nl>
+ - Jean Delvare <jdelvare@suse.de>
Description
-----------
diff --git a/Documentation/hwmon/lm80 b/Documentation/hwmon/lm80.rst
index a60b43efc32b..c53186abd82e 100644
--- a/Documentation/hwmon/lm80
+++ b/Documentation/hwmon/lm80.rst
@@ -2,20 +2,31 @@ Kernel driver lm80
==================
Supported chips:
+
* National Semiconductor LM80
+
Prefix: 'lm80'
+
Addresses scanned: I2C 0x28 - 0x2f
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/
+
+ http://www.national.com/
+
* National Semiconductor LM96080
+
Prefix: 'lm96080'
+
Addresses scanned: I2C 0x28 - 0x2f
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/
+
+ http://www.national.com/
+
Authors:
- Frodo Looijaard <frodol@dds.nl>,
- Philip Edelbrock <phil@netroedge.com>
+ - Frodo Looijaard <frodol@dds.nl>,
+ - Philip Edelbrock <phil@netroedge.com>
Description
-----------
diff --git a/Documentation/hwmon/lm83 b/Documentation/hwmon/lm83.rst
index 50be5cb26de9..ecf83819960e 100644
--- a/Documentation/hwmon/lm83
+++ b/Documentation/hwmon/lm83.rst
@@ -2,16 +2,24 @@ Kernel driver lm83
==================
Supported chips:
+
* National Semiconductor LM83
+
Prefix: 'lm83'
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/pf/LM/LM83.html
+
+ http://www.national.com/pf/LM/LM83.html
+
* National Semiconductor LM82
+
Addresses scanned: I2C 0x18 - 0x1a, 0x29 - 0x2b, 0x4c - 0x4e
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/pf/LM/LM82.html
+ http://www.national.com/pf/LM/LM82.html
Author: Jean Delvare <jdelvare@suse.de>
@@ -34,13 +42,17 @@ fact that any of these motherboards do actually have an LM83, please
contact us. Note that the LM90 can easily be misdetected as a LM83.
Confirmed motherboards:
+ === =====
SBS P014
SBS PSL09
+ === =====
Unconfirmed motherboards:
+ =========== ==========
Gigabyte GA-8IK1100
Iwill MPX2
Soltek SL-75DRV5
+ =========== ==========
The LM82 is confirmed to have been found on most AMD Geode reference
designs and test platforms.
diff --git a/Documentation/hwmon/lm85 b/Documentation/hwmon/lm85.rst
index 2329c383efe4..faa92f54431c 100644
--- a/Documentation/hwmon/lm85
+++ b/Documentation/hwmon/lm85.rst
@@ -2,49 +2,85 @@ Kernel driver lm85
==================
Supported chips:
+
* National Semiconductor LM85 (B and C versions)
+
Prefix: 'lm85b' or 'lm85c'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: http://www.national.com/pf/LM/LM85.html
+
* Texas Instruments LM96000
+
Prefix: 'lm9600'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: http://www.ti.com/lit/ds/symlink/lm96000.pdf
+
* Analog Devices ADM1027
+
Prefix: 'adm1027'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: http://www.onsemi.com/PowerSolutions/product.do?id=ADM1027
+
* Analog Devices ADT7463
+
Prefix: 'adt7463'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: http://www.onsemi.com/PowerSolutions/product.do?id=ADT7463
+
* Analog Devices ADT7468
+
Prefix: 'adt7468'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: http://www.onsemi.com/PowerSolutions/product.do?id=ADT7468
+
* SMSC EMC6D100, SMSC EMC6D101
+
Prefix: 'emc6d100'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
- Datasheet: http://www.smsc.com/media/Downloads_Public/discontinued/6d100.pdf
+
+ Datasheet: http://www.smsc.com/media/Downloads_Public/discontinued/6d100.pdf
+
* SMSC EMC6D102
+
Prefix: 'emc6d102'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: http://www.smsc.com/main/catalog/emc6d102.html
+
* SMSC EMC6D103
+
Prefix: 'emc6d103'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: http://www.smsc.com/main/catalog/emc6d103.html
+
* SMSC EMC6D103S
+
Prefix: 'emc6d103s'
+
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+
Datasheet: http://www.smsc.com/main/catalog/emc6d103s.html
Authors:
- Philip Pokorny <ppokorny@penguincomputing.com>,
- Frodo Looijaard <frodol@dds.nl>,
- Richard Barrington <rich_b_nz@clear.net.nz>,
- Margit Schubert-While <margitsw@t-online.de>,
- Justin Thiessen <jthiessen@penguincomputing.com>
+ - Philip Pokorny <ppokorny@penguincomputing.com>,
+ - Frodo Looijaard <frodol@dds.nl>,
+ - Richard Barrington <rich_b_nz@clear.net.nz>,
+ - Margit Schubert-While <margitsw@t-online.de>,
+ - Justin Thiessen <jthiessen@penguincomputing.com>
Description
-----------
@@ -177,38 +213,50 @@ Each temperature sensor is associated with a Zone. There are three
sensors and therefore three zones (# 1, 2 and 3). Each zone has the following
temperature configuration points:
-* temp#_auto_temp_off - temperature below which fans should be off or spinning very low.
-* temp#_auto_temp_min - temperature over which fans start to spin.
-* temp#_auto_temp_max - temperature when fans spin at full speed.
-* temp#_auto_temp_crit - temperature when all fans will run full speed.
+* temp#_auto_temp_off
+ - temperature below which fans should be off or spinning very low.
+* temp#_auto_temp_min
+ - temperature over which fans start to spin.
+* temp#_auto_temp_max
+ - temperature when fans spin at full speed.
+* temp#_auto_temp_crit
+ - temperature when all fans will run full speed.
-* PWM Control
+PWM Control
+^^^^^^^^^^^
There are three PWM outputs. The LM85 datasheet suggests that the
pwm3 output control both fan3 and fan4. Each PWM can be individually
configured and assigned to a zone for its control value. Each PWM can be
configured individually according to the following options.
-* pwm#_auto_pwm_min - this specifies the PWM value for temp#_auto_temp_off
- temperature. (PWM value from 0 to 255)
+* pwm#_auto_pwm_min
+ - this specifies the PWM value for temp#_auto_temp_off
+ temperature. (PWM value from 0 to 255)
+
+* pwm#_auto_pwm_minctl
+ - this flags selects for temp#_auto_temp_off temperature
+ the behaviour of fans. Write 1 to let fans spinning at
+ pwm#_auto_pwm_min or write 0 to let them off.
-* pwm#_auto_pwm_minctl - this flags selects for temp#_auto_temp_off temperature
- the behaviour of fans. Write 1 to let fans spinning at
- pwm#_auto_pwm_min or write 0 to let them off.
+.. note::
-NOTE: It has been reported that there is a bug in the LM85 that causes the flag
-to be associated with the zones not the PWMs. This contradicts all the
-published documentation. Setting pwm#_min_ctl in this case actually affects all
-PWMs controlled by zone '#'.
+ It has been reported that there is a bug in the LM85 that causes
+ the flag to be associated with the zones not the PWMs. This
+ contradicts all the published documentation. Setting pwm#_min_ctl
+ in this case actually affects all PWMs controlled by zone '#'.
-* PWM Controlling Zone selection
+PWM Controlling Zone selection
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-* pwm#_auto_channels - controls zone that is associated with PWM
+* pwm#_auto_channels
+ - controls zone that is associated with PWM
Configuration choices:
- Value Meaning
- ------ ------------------------------------------------
+========== =============================================
+Value Meaning
+========== =============================================
1 Controlled by Zone 1
2 Controlled by Zone 2
3 Controlled by Zone 3
@@ -217,6 +265,7 @@ Configuration choices:
0 PWM always 0% (off)
-1 PWM always 100% (full on)
-2 Manual control (write to 'pwm#' to set)
+========== =============================================
The National LM85's have two vendor specific configuration
features. Tach. mode and Spinup Control. For more details on these,
diff --git a/Documentation/hwmon/lm87 b/Documentation/hwmon/lm87.rst
index a2339fd9acb9..72fcb577ef2a 100644
--- a/Documentation/hwmon/lm87
+++ b/Documentation/hwmon/lm87.rst
@@ -2,23 +2,32 @@ Kernel driver lm87
==================
Supported chips:
+
* National Semiconductor LM87
+
Prefix: 'lm87'
+
Addresses scanned: I2C 0x2c - 0x2e
+
Datasheet: http://www.national.com/pf/LM/LM87.html
+
* Analog Devices ADM1024
+
Prefix: 'adm1024'
+
Addresses scanned: I2C 0x2c - 0x2e
+
Datasheet: http://www.analog.com/en/prod/0,2877,ADM1024,00.html
+
Authors:
- Frodo Looijaard <frodol@dds.nl>,
- Philip Edelbrock <phil@netroedge.com>,
- Mark Studebaker <mdsxyz123@yahoo.com>,
- Stephen Rousset <stephen.rousset@rocketlogix.com>,
- Dan Eaton <dan.eaton@rocketlogix.com>,
- Jean Delvare <jdelvare@suse.de>,
- Original 2.6 port Jeff Oliver
+ - Frodo Looijaard <frodol@dds.nl>,
+ - Philip Edelbrock <phil@netroedge.com>,
+ - Mark Studebaker <mdsxyz123@yahoo.com>,
+ - Stephen Rousset <stephen.rousset@rocketlogix.com>,
+ - Dan Eaton <dan.eaton@rocketlogix.com>,
+ - Jean Delvare <jdelvare@suse.de>,
+ - Original 2.6 port Jeff Oliver
Description
-----------
diff --git a/Documentation/hwmon/lm90 b/Documentation/hwmon/lm90.rst
index 8122675d30f6..953315987c06 100644
--- a/Documentation/hwmon/lm90
+++ b/Documentation/hwmon/lm90.rst
@@ -2,132 +2,256 @@ Kernel driver lm90
==================
Supported chips:
+
* National Semiconductor LM90
+
Prefix: 'lm90'
+
Addresses scanned: I2C 0x4c
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/pf/LM/LM90.html
+
+ http://www.national.com/pf/LM/LM90.html
+
* National Semiconductor LM89
+
Prefix: 'lm89' (no auto-detection)
+
Addresses scanned: I2C 0x4c and 0x4d
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/mpf/LM/LM89.html
+
+ http://www.national.com/mpf/LM/LM89.html
+
* National Semiconductor LM99
+
Prefix: 'lm99'
+
Addresses scanned: I2C 0x4c and 0x4d
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/pf/LM/LM99.html
+
+ http://www.national.com/pf/LM/LM99.html
+
* National Semiconductor LM86
+
Prefix: 'lm86'
+
Addresses scanned: I2C 0x4c
+
Datasheet: Publicly available at the National Semiconductor website
- http://www.national.com/mpf/LM/LM86.html
+
+ http://www.national.com/mpf/LM/LM86.html
+
* Analog Devices ADM1032
+
Prefix: 'adm1032'
+
Addresses scanned: I2C 0x4c and 0x4d
+
Datasheet: Publicly available at the ON Semiconductor website
- http://www.onsemi.com/PowerSolutions/product.do?id=ADM1032
+
+ http://www.onsemi.com/PowerSolutions/product.do?id=ADM1032
+
* Analog Devices ADT7461
+
Prefix: 'adt7461'
+
Addresses scanned: I2C 0x4c and 0x4d
+
Datasheet: Publicly available at the ON Semiconductor website
- http://www.onsemi.com/PowerSolutions/product.do?id=ADT7461
+
+ http://www.onsemi.com/PowerSolutions/product.do?id=ADT7461
+
* Analog Devices ADT7461A
+
Prefix: 'adt7461a'
+
Addresses scanned: I2C 0x4c and 0x4d
+
Datasheet: Publicly available at the ON Semiconductor website
- http://www.onsemi.com/PowerSolutions/product.do?id=ADT7461A
+
+ http://www.onsemi.com/PowerSolutions/product.do?id=ADT7461A
+
* ON Semiconductor NCT1008
+
Prefix: 'nct1008'
+
Addresses scanned: I2C 0x4c and 0x4d
+
Datasheet: Publicly available at the ON Semiconductor website
- http://www.onsemi.com/PowerSolutions/product.do?id=NCT1008
+
+ http://www.onsemi.com/PowerSolutions/product.do?id=NCT1008
+
* Maxim MAX6646
+
Prefix: 'max6646'
+
Addresses scanned: I2C 0x4d
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3497
+
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3497
+
* Maxim MAX6647
+
Prefix: 'max6646'
+
Addresses scanned: I2C 0x4e
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3497
+
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3497
+
* Maxim MAX6648
+
Prefix: 'max6646'
+
Addresses scanned: I2C 0x4c
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3500
+
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3500
+
* Maxim MAX6649
+
Prefix: 'max6646'
+
Addresses scanned: I2C 0x4c
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3497
+
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3497
+
* Maxim MAX6657
+
Prefix: 'max6657'
+
Addresses scanned: I2C 0x4c
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/quick_view2.cfm/qv_pk/2578
+
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/2578
+
* Maxim MAX6658
+
Prefix: 'max6657'
+
Addresses scanned: I2C 0x4c
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/quick_view2.cfm/qv_pk/2578
+
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/2578
+
* Maxim MAX6659
+
Prefix: 'max6659'
+
Addresses scanned: I2C 0x4c, 0x4d, 0x4e
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/quick_view2.cfm/qv_pk/2578
+
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/2578
+
* Maxim MAX6680
+
Prefix: 'max6680'
+
Addresses scanned: I2C 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b,
- 0x4c, 0x4d and 0x4e
+
+ 0x4c, 0x4d and 0x4e
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3370
+
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3370
+
* Maxim MAX6681
+
Prefix: 'max6680'
+
Addresses scanned: I2C 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b,
- 0x4c, 0x4d and 0x4e
+
+ 0x4c, 0x4d and 0x4e
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3370
+
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3370
+
* Maxim MAX6692
+
Prefix: 'max6646'
+
Addresses scanned: I2C 0x4c
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3500
+
+ http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3500
+
* Maxim MAX6695
+
Prefix: 'max6695'
+
Addresses scanned: I2C 0x18
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/datasheet/index.mvp/id/4199
+
+ http://www.maxim-ic.com/datasheet/index.mvp/id/4199
+
* Maxim MAX6696
+
Prefix: 'max6695'
+
Addresses scanned: I2C 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b,
- 0x4c, 0x4d and 0x4e
+
+ 0x4c, 0x4d and 0x4e
+
Datasheet: Publicly available at the Maxim website
- http://www.maxim-ic.com/datasheet/index.mvp/id/4199
+
+ http://www.maxim-ic.com/datasheet/index.mvp/id/4199
+
* Winbond/Nuvoton W83L771W/G
+
Prefix: 'w83l771'
+
Addresses scanned: I2C 0x4c
+
Datasheet: No longer available
+
* Winbond/Nuvoton W83L771AWG/ASG
+
Prefix: 'w83l771'
+
Addresses scanned: I2C 0x4c
+
Datasheet: Not publicly available, can be requested from Nuvoton
+
* Philips/NXP SA56004X
+
Prefix: 'sa56004'
+
Addresses scanned: I2C 0x48 through 0x4F
+
Datasheet: Publicly available at NXP website
- http://ics.nxp.com/products/interface/datasheet/sa56004x.pdf
+
+ http://ics.nxp.com/products/interface/datasheet/sa56004x.pdf
+
* GMT G781
+
Prefix: 'g781'
+
Addresses scanned: I2C 0x4c, 0x4d
+
Datasheet: Not publicly available from GMT
+
* Texas Instruments TMP451
+
Prefix: 'tmp451'
+
Addresses scanned: I2C 0x4c
+
Datasheet: Publicly available at TI website
- http://www.ti.com/litv/pdf/sbos686
+ http://www.ti.com/litv/pdf/sbos686
Author: Jean Delvare <jdelvare@suse.de>
diff --git a/Documentation/hwmon/lm92 b/Documentation/hwmon/lm92.rst
index cfa99a353b8c..c131b923ed36 100644
--- a/Documentation/hwmon/lm92
+++ b/Documentation/hwmon/lm92.rst
@@ -2,22 +2,35 @@ Kernel driver lm92
==================
Supported chips:
+
* National Semiconductor LM92
+
Prefix: 'lm92'
+
Addresses scanned: I2C 0x48 - 0x4b
+
Datasheet: http://www.national.com/pf/LM/LM92.html
+
* National Semiconductor LM76
+
Prefix: 'lm92'
+
Addresses scanned: none, force parameter needed
+
Datasheet: http://www.national.com/pf/LM/LM76.html
+
* Maxim MAX6633/MAX6634/MAX6635
+
Prefix: 'max6635'
+
Addresses scanned: none, force parameter needed
+
Datasheet: http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3074
+
Authors:
- Abraham van der Merwe <abraham@2d3d.co.za>
- Jean Delvare <jdelvare@suse.de>
+ - Abraham van der Merwe <abraham@2d3d.co.za>
+ - Jean Delvare <jdelvare@suse.de>
Description
diff --git a/Documentation/hwmon/lm93 b/Documentation/hwmon/lm93.rst
index f3b2ad2ceb01..49d199b45b67 100644
--- a/Documentation/hwmon/lm93
+++ b/Documentation/hwmon/lm93.rst
@@ -2,20 +2,29 @@ Kernel driver lm93
==================
Supported chips:
+
* National Semiconductor LM93
+
Prefix 'lm93'
+
Addresses scanned: I2C 0x2c-0x2e
+
Datasheet: http://www.national.com/ds.cgi/LM/LM93.pdf
+
* National Semiconductor LM94
+
Prefix 'lm94'
+
Addresses scanned: I2C 0x2c-0x2e
+
Datasheet: http://www.national.com/ds.cgi/LM/LM94.pdf
+
Authors:
- Mark M. Hoffman <mhoffman@lightlink.com>
- Ported to 2.6 by Eric J. Bowersox <ericb@aspsys.com>
- Adapted to 2.6.20 by Carsten Emde <ce@osadl.org>
- Modified for mainline integration by Hans J. Koch <hjk@hansjkoch.de>
+ - Mark M. Hoffman <mhoffman@lightlink.com>
+ - Ported to 2.6 by Eric J. Bowersox <ericb@aspsys.com>
+ - Adapted to 2.6.20 by Carsten Emde <ce@osadl.org>
+ - Modified for mainline integration by Hans J. Koch <hjk@hansjkoch.de>
Module Parameters
-----------------
@@ -67,7 +76,8 @@ LM94 are not supported.
User Interface
--------------
-#PROCHOT:
+#PROCHOT
+^^^^^^^^
The LM93 can monitor two #PROCHOT signals. The results are found in the
sysfs files prochot1, prochot2, prochot1_avg, prochot2_avg, prochot1_max,
@@ -86,7 +96,8 @@ prochot2_interval. The values in these files specify the intervals for
list will cause the driver to use the next largest interval. The available
intervals are (in seconds):
-#PROCHOT intervals: 0.73, 1.46, 2.9, 5.8, 11.7, 23.3, 46.6, 93.2, 186, 372
+#PROCHOT intervals:
+ 0.73, 1.46, 2.9, 5.8, 11.7, 23.3, 46.6, 93.2, 186, 372
It is possible to configure the LM93 to logically short the two #PROCHOT
signals. I.e. when #P1_PROCHOT is asserted, the LM93 will automatically
@@ -105,16 +116,15 @@ contains a value controlling the duty cycle for the PWM signal used when
the override function is enabled. This value ranges from 0 to 15, with 0
indicating minimum duty cycle and 15 indicating maximum.
-#VRD_HOT:
+#VRD_HOT
+^^^^^^^^
The LM93 can monitor two #VRD_HOT signals. The results are found in the
sysfs files vrdhot1 and vrdhot2. There is one value per file: a boolean for
which 1 indicates #VRD_HOT is asserted and 0 indicates it is negated. These
files are read-only.
-Smart Tach Mode:
-
-(from the datasheet)
+Smart Tach Mode (from the datasheet)::
If a fan is driven using a low-side drive PWM, the tachometer
output of the fan is corrupted. The LM93 includes smart tachometer
@@ -127,7 +137,8 @@ the fan tachometer with a pwm) to the sysfs file fan<n>_smart_tach. A zero
will disable the function for that fan. Note that Smart tach mode cannot be
enabled if the PWM output frequency is 22500 Hz (see below).
-Manual PWM:
+Manual PWM
+^^^^^^^^^^
The LM93 has a fixed or override mode for the two PWM outputs (although, there
are still some conditions that will override even this mode - see section
@@ -141,7 +152,8 @@ will cause the driver to use the next largest value. Also note: when manual
PWM mode is disabled, the value of pwm1 and pwm2 indicates the current duty
cycle chosen by the h/w.
-PWM Output Frequency:
+PWM Output Frequency
+^^^^^^^^^^^^^^^^^^^^
The LM93 supports several different frequencies for the PWM output channels.
The sysfs files pwm1_freq and pwm2_freq are used to select the frequency. The
@@ -149,9 +161,11 @@ frequency values are constrained by the hardware. Selecting a value which is
not available will cause the driver to use the next largest value. Also note
that this parameter has implications for the Smart Tach Mode (see above).
-PWM Output Frequencies (in Hz): 12, 36, 48, 60, 72, 84, 96, 22500 (default)
+PWM Output Frequencies (in Hz):
+ 12, 36, 48, 60, 72, 84, 96, 22500 (default)
-Automatic PWM:
+Automatic PWM
+^^^^^^^^^^^^^
The LM93 is capable of complex automatic fan control, with many different
points of configuration. To start, each PWM output can be bound to any
@@ -163,14 +177,16 @@ The eight control sources are: temp1-temp4 (aka "zones" in the datasheet),
in the sysfs files pwm<n>_auto_channels, where a "1" enables the binding, and
a "0" disables it. The h/w default is 0x0f (all temperatures bound).
- 0x01 - Temp 1
- 0x02 - Temp 2
- 0x04 - Temp 3
- 0x08 - Temp 4
- 0x10 - #PROCHOT 1
- 0x20 - #PROCHOT 2
- 0x40 - #VRDHOT 1
- 0x80 - #VRDHOT 2
+ ====== ===========
+ 0x01 Temp 1
+ 0x02 Temp 2
+ 0x04 Temp 3
+ 0x08 Temp 4
+ 0x10 #PROCHOT 1
+ 0x20 #PROCHOT 2
+ 0x40 #VRDHOT 1
+ 0x80 #VRDHOT 2
+ ====== ===========
The function y = f(x) takes a source temperature x to a PWM output y. This
function of the LM93 is derived from a base temperature and a table of 12
@@ -180,7 +196,9 @@ degrees C, with the value of offset <i> for temperature value <n> being
contained in the file temp<n>_auto_offset<i>. E.g. if the base temperature
is 40C:
+ ========== ======================= =============== =======
offset # temp<n>_auto_offset<i> range pwm
+ ========== ======================= =============== =======
1 0 - 25.00%
2 0 - 28.57%
3 1 40C - 41C 32.14%
@@ -193,7 +211,8 @@ is 40C:
10 2 54C - 56C 57.14%
11 2 56C - 58C 71.43%
12 2 58C - 60C 85.71%
- > 60C 100.00%
+ - - > 60C 100.00%
+ ========== ======================= =============== =======
Valid offsets are in the range 0C <= x <= 7.5C in 0.5C increments.
@@ -213,7 +232,8 @@ temp<n>_auto_pwm_min. Note, there are only two minimums: one each for temp[12]
and temp[34]. Therefore, any change to e.g. temp1_auto_pwm_min will also
affect temp2_auto_pwm_min.
-PWM Spin-Up Cycle:
+PWM Spin-Up Cycle
+^^^^^^^^^^^^^^^^^
A spin-up cycle occurs when a PWM output is commanded from 0% duty cycle to
some value > 0%. The LM93 supports a minimum duty cycle during spin-up. These
@@ -225,10 +245,11 @@ the spin-up time in seconds. The available spin-up times are constrained by
the hardware. Selecting a value which is not available will cause the driver
to use the next largest value.
-Spin-up Durations: 0 (disabled, h/w default), 0.1, 0.25, 0.4, 0.7, 1.0,
- 2.0, 4.0
+Spin-up Durations:
+ 0 (disabled, h/w default), 0.1, 0.25, 0.4, 0.7, 1.0, 2.0, 4.0
-#PROCHOT and #VRDHOT PWM Ramping:
+#PROCHOT and #VRDHOT PWM Ramping
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If the #PROCHOT or #VRDHOT signals are asserted while bound to a PWM output
channel, the LM93 will ramp the PWM output up to 100% duty cycle in discrete
@@ -237,9 +258,11 @@ one value each in seconds: pwm_auto_prochot_ramp and pwm_auto_vrdhot_ramp.
The available ramp times are constrained by the hardware. Selecting a value
which is not available will cause the driver to use the next largest value.
-Ramp Times: 0 (disabled, h/w default) to 0.75 in 0.05 second intervals
+Ramp Times:
+ 0 (disabled, h/w default) to 0.75 in 0.05 second intervals
-Fan Boost:
+Fan Boost
+^^^^^^^^^
For each temperature channel, there is a boost temperature: if the channel
exceeds this limit, the LM93 will immediately drive both PWM outputs to 100%.
@@ -249,7 +272,8 @@ limit is reached, the temperature channel must drop below this value before
the boost function is disabled. This temperature is also expressed in degrees
C in the sysfs files temp<n>_auto_boost_hyst.
-GPIO Pins:
+GPIO Pins
+^^^^^^^^^
The LM93 can monitor the logic level of four dedicated GPIO pins as well as the
four tach input pins. GPIO0-GPIO3 correspond to (fan) tach 1-4, respectively.
@@ -260,50 +284,29 @@ LSB is GPIO0, and the MSB is GPIO7.
LM93 Unique sysfs Files
-----------------------
- file description
- -------------------------------------------------------------
-
- prochot<n> current #PROCHOT %
-
- prochot<n>_avg moving average #PROCHOT %
-
- prochot<n>_max limit #PROCHOT %
-
- prochot_short enable or disable logical #PROCHOT pin short
-
- prochot<n>_override force #PROCHOT assertion as PWM
-
- prochot_override_duty_cycle
- duty cycle for the PWM signal used when
- #PROCHOT is overridden
-
- prochot<n>_interval #PROCHOT PWM sampling interval
-
- vrdhot<n> 0 means negated, 1 means asserted
-
- fan<n>_smart_tach enable or disable smart tach mode
-
- pwm<n>_auto_channels select control sources for PWM outputs
-
- pwm<n>_auto_spinup_min minimum duty cycle during spin-up
-
- pwm<n>_auto_spinup_time duration of spin-up
-
- pwm_auto_prochot_ramp ramp time per step when #PROCHOT asserted
-
- pwm_auto_vrdhot_ramp ramp time per step when #VRDHOT asserted
-
- temp<n>_auto_base temperature channel base
-
- temp<n>_auto_offset[1-12]
- temperature channel offsets
-
- temp<n>_auto_offset_hyst
- temperature channel offset hysteresis
-
- temp<n>_auto_boost temperature channel boost (PWMs to 100%) limit
-
- temp<n>_auto_boost_hyst temperature channel boost hysteresis
-
- gpio input state of 8 GPIO pins; read-only
-
+=========================== ===============================================
+file description
+=========================== ===============================================
+prochot<n> current #PROCHOT %
+prochot<n>_avg moving average #PROCHOT %
+prochot<n>_max limit #PROCHOT %
+prochot_short enable or disable logical #PROCHOT pin short
+prochot<n>_override force #PROCHOT assertion as PWM
+prochot_override_duty_cycle duty cycle for the PWM signal used when
+ #PROCHOT is overridden
+prochot<n>_interval #PROCHOT PWM sampling interval
+vrdhot<n> 0 means negated, 1 means asserted
+fan<n>_smart_tach enable or disable smart tach mode
+pwm<n>_auto_channels select control sources for PWM outputs
+pwm<n>_auto_spinup_min minimum duty cycle during spin-up
+pwm<n>_auto_spinup_time duration of spin-up
+pwm_auto_prochot_ramp ramp time per step when #PROCHOT asserted
+pwm_auto_vrdhot_ramp ramp time per step when #VRDHOT asserted
+temp<n>_auto_base temperature channel base
+temp<n>_auto_offset[1-12] temperature channel offsets
+temp<n>_auto_offset_hyst temperature channel offset hysteresis
+temp<n>_auto_boost temperature channel boost (PWMs to 100%)
+ limit
+temp<n>_auto_boost_hyst temperature channel boost hysteresis
+gpio input state of 8 GPIO pins; read-only
+=========================== ===============================================
diff --git a/Documentation/hwmon/lm95234 b/Documentation/hwmon/lm95234.rst
index 32b777ef224c..e4c14bea5efd 100644
--- a/Documentation/hwmon/lm95234
+++ b/Documentation/hwmon/lm95234.rst
@@ -2,15 +2,22 @@ Kernel driver lm95234
=====================
Supported chips:
+
* National Semiconductor / Texas Instruments LM95233
+
Addresses scanned: I2C 0x18, 0x2a, 0x2b
+
Datasheet: Publicly available at the Texas Instruments website
- http://www.ti.com/product/lm95233
+
+ http://www.ti.com/product/lm95233
+
* National Semiconductor / Texas Instruments LM95234
+
Addresses scanned: I2C 0x18, 0x4d, 0x4e
+
Datasheet: Publicly available at the Texas Instruments website
- http://www.ti.com/product/lm95234
+ http://www.ti.com/product/lm95234
Author: Guenter Roeck <linux@roeck-us.net>
diff --git a/Documentation/hwmon/lm95245 b/Documentation/hwmon/lm95245.rst
index d755901f58c4..566d1dc8c5a6 100644
--- a/Documentation/hwmon/lm95245
+++ b/Documentation/hwmon/lm95245.rst
@@ -1,16 +1,23 @@
Kernel driver lm95245
-==================
+=====================
Supported chips:
+
* TI LM95235
+
Addresses scanned: I2C 0x18, 0x29, 0x4c
+
Datasheet: Publicly available at the TI website
- http://www.ti.com/lit/ds/symlink/lm95235.pdf
+
+ http://www.ti.com/lit/ds/symlink/lm95235.pdf
+
* TI / National Semiconductor LM95245
+
Addresses scanned: I2C 0x18, 0x19, 0x29, 0x4c, 0x4d
+
Datasheet: Publicly available at the TI website
- http://www.ti.com/lit/ds/symlink/lm95245.pdf
+ http://www.ti.com/lit/ds/symlink/lm95245.pdf
Author: Alexander Stein <alexander.stein@systec-electronic.com>
diff --git a/Documentation/hwmon/lochnagar.rst b/Documentation/hwmon/lochnagar.rst
new file mode 100644
index 000000000000..1d609c4d18c3
--- /dev/null
+++ b/Documentation/hwmon/lochnagar.rst
@@ -0,0 +1,83 @@
+Kernel Driver Lochnagar
+=======================
+
+Supported systems:
+ * Cirrus Logic : Lochnagar 2
+
+Author: Lucas A. Tanure Alves
+
+Description
+-----------
+
+Lochnagar 2 features built-in Current Monitor circuitry that allows for the
+measurement of both voltage and current on up to eight of the supply voltage
+rails provided to the minicards. The Current Monitor does not require any
+hardware modifications or external circuitry to operate.
+
+The current and voltage measurements are obtained through the standard register
+map interface to the Lochnagar board controller, and can therefore be monitored
+by software.
+
+Sysfs attributes
+----------------
+
+======================= =======================================================
+temp1_input The Lochnagar board temperature (milliCelsius)
+in0_input Measured voltage for DBVDD1 (milliVolts)
+in0_label "DBVDD1"
+curr1_input Measured current for DBVDD1 (milliAmps)
+curr1_label "DBVDD1"
+power1_average Measured average power for DBVDD1 (microWatts)
+power1_average_interval Power averaging time input valid from 1 to 1708mS
+power1_label "DBVDD1"
+in1_input Measured voltage for 1V8 DSP (milliVolts)
+in1_label "1V8 DSP"
+curr2_input Measured current for 1V8 DSP (milliAmps)
+curr2_label "1V8 DSP"
+power2_average Measured average power for 1V8 DSP (microWatts)
+power2_average_interval Power averaging time input valid from 1 to 1708mS
+power2_label "1V8 DSP"
+in2_input Measured voltage for 1V8 CDC (milliVolts)
+in2_label "1V8 CDC"
+curr3_input Measured current for 1V8 CDC (milliAmps)
+curr3_label "1V8 CDC"
+power3_average Measured average power for 1V8 CDC (microWatts)
+power3_average_interval Power averaging time input valid from 1 to 1708mS
+power3_label "1V8 CDC"
+in3_input Measured voltage for VDDCORE DSP (milliVolts)
+in3_label "VDDCORE DSP"
+curr4_input Measured current for VDDCORE DSP (milliAmps)
+curr4_label "VDDCORE DSP"
+power4_average Measured average power for VDDCORE DSP (microWatts)
+power4_average_interval Power averaging time input valid from 1 to 1708mS
+power4_label "VDDCORE DSP"
+in4_input Measured voltage for AVDD 1V8 (milliVolts)
+in4_label "AVDD 1V8"
+curr5_input Measured current for AVDD 1V8 (milliAmps)
+curr5_label "AVDD 1V8"
+power5_average Measured average power for AVDD 1V8 (microWatts)
+power5_average_interval Power averaging time input valid from 1 to 1708mS
+power5_label "AVDD 1V8"
+curr6_input Measured current for SYSVDD (milliAmps)
+curr6_label "SYSVDD"
+power6_average Measured average power for SYSVDD (microWatts)
+power6_average_interval Power averaging time input valid from 1 to 1708mS
+power6_label "SYSVDD"
+in6_input Measured voltage for VDDCORE CDC (milliVolts)
+in6_label "VDDCORE CDC"
+curr7_input Measured current for VDDCORE CDC (milliAmps)
+curr7_label "VDDCORE CDC"
+power7_average Measured average power for VDDCORE CDC (microWatts)
+power7_average_interval Power averaging time input valid from 1 to 1708mS
+power7_label "VDDCORE CDC"
+in7_input Measured voltage for MICVDD (milliVolts)
+in7_label "MICVDD"
+curr8_input Measured current for MICVDD (milliAmps)
+curr8_label "MICVDD"
+power8_average Measured average power for MICVDD (microWatts)
+power8_average_interval Power averaging time input valid from 1 to 1708mS
+power8_label "MICVDD"
+======================= =======================================================
+
+Note:
+ It is not possible to measure voltage on the SYSVDD rail.
diff --git a/Documentation/hwmon/ltc2945 b/Documentation/hwmon/ltc2945.rst
index f8d0f7f19adb..20c884985367 100644
--- a/Documentation/hwmon/ltc2945
+++ b/Documentation/hwmon/ltc2945.rst
@@ -2,11 +2,16 @@ Kernel driver ltc2945
=====================
Supported chips:
+
* Linear Technology LTC2945
+
Prefix: 'ltc2945'
+
Addresses scanned: -
+
Datasheet:
- http://cds.linear.com/docs/en/datasheet/2945fa.pdf
+
+ http://cds.linear.com/docs/en/datasheet/2945fa.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -26,9 +31,10 @@ which can be safely used to identify the chip. You will have to instantiate
the devices explicitly.
Example: the following will load the driver for an LTC2945 at address 0x10
-on I2C bus #1:
-$ modprobe ltc2945
-$ echo ltc2945 0x10 > /sys/bus/i2c/devices/i2c-1/new_device
+on I2C bus #1::
+
+ $ modprobe ltc2945
+ $ echo ltc2945 0x10 > /sys/bus/i2c/devices/i2c-1/new_device
Sysfs entries
@@ -45,6 +51,7 @@ Current Sense register. The reported value assumes that a 1 mOhm sense resistor
is installed. If a different sense resistor is installed, calculate the real
current by dividing the reported value by the sense resistor value in mOhm.
+======================= ========================================================
in1_input VIN voltage (mV). Voltage is measured either at
SENSE+ or VDD pin depending on chip configuration.
in1_min Undervoltage threshold
@@ -82,3 +89,4 @@ power1_input_highest Historical maximum power use
power1_reset_history Write 1 to reset power1 history
power1_min_alarm Low power alarm
power1_max_alarm High power alarm
+======================= ========================================================
diff --git a/Documentation/hwmon/ltc2978 b/Documentation/hwmon/ltc2978.rst
index dfb2caa401d9..01a24fd6d5fe 100644
--- a/Documentation/hwmon/ltc2978
+++ b/Documentation/hwmon/ltc2978.rst
@@ -2,85 +2,143 @@ Kernel driver ltc2978
=====================
Supported chips:
+
* Linear Technology LTC2974
+
Prefix: 'ltc2974'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc2974
+
* Linear Technology LTC2975
+
Prefix: 'ltc2975'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc2975
+
* Linear Technology LTC2977
+
Prefix: 'ltc2977'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc2977
+
* Linear Technology LTC2978, LTC2978A
+
Prefix: 'ltc2978'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc2978
- http://www.linear.com/product/ltc2978a
+
+ http://www.linear.com/product/ltc2978a
+
* Linear Technology LTC2980
+
Prefix: 'ltc2980'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc2980
+
* Linear Technology LTC3880
+
Prefix: 'ltc3880'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc3880
+
* Linear Technology LTC3882
+
Prefix: 'ltc3882'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc3882
+
* Linear Technology LTC3883
+
Prefix: 'ltc3883'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc3883
+
* Linear Technology LTC3886
+
Prefix: 'ltc3886'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc3886
+
* Linear Technology LTC3887
+
Prefix: 'ltc3887'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc3887
+
* Linear Technology LTM2987
+
Prefix: 'ltm2987'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltm2987
+
* Linear Technology LTM4675
+
Prefix: 'ltm4675'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltm4675
+
* Linear Technology LTM4676
+
Prefix: 'ltm4676'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltm4676
+
* Analog Devices LTM4686
+
Prefix: 'ltm4686'
+
Addresses scanned: -
+
Datasheet: http://www.analog.com/ltm4686
+
Author: Guenter Roeck <linux@roeck-us.net>
Description
-----------
-LTC2974 and LTC2975 are quad digital power supply managers.
-LTC2978 is an octal power supply monitor.
-LTC2977 is a pin compatible replacement for LTC2978.
-LTC2980 is a 16-channel Power System Manager, consisting of two LTC2977
-in a single die. The chip is instantiated and reported as two separate chips
-on two different I2C bus addresses.
-LTC3880, LTC3882, LTC3886, and LTC3887 are dual output poly-phase step-down
-DC/DC controllers.
-LTC3883 is a single phase step-down DC/DC controller.
-LTM2987 is a 16-channel Power System Manager with two LTC2977 plus
-additional components on a single die. The chip is instantiated and reported
-as two separate chips on two different I2C bus addresses.
-LTM4675 is a dual 9A or single 18A μModule regulator
-LTM4676 is a dual 13A or single 26A uModule regulator.
-LTM4686 is a dual 10A or single 20A uModule regulator.
+- LTC2974 and LTC2975 are quad digital power supply managers.
+- LTC2978 is an octal power supply monitor.
+- LTC2977 is a pin compatible replacement for LTC2978.
+- LTC2980 is a 16-channel Power System Manager, consisting of two LTC2977
+- in a single die. The chip is instantiated and reported as two separate chips
+- on two different I2C bus addresses.
+- LTC3880, LTC3882, LTC3886, and LTC3887 are dual output poly-phase step-down
+- DC/DC controllers.
+- LTC3883 is a single phase step-down DC/DC controller.
+- LTM2987 is a 16-channel Power System Manager with two LTC2977 plus
+- additional components on a single die. The chip is instantiated and reported
+- as two separate chips on two different I2C bus addresses.
+- LTM4675 is a dual 9A or single 18A μModule regulator
+- LTM4676 is a dual 13A or single 26A uModule regulator.
+- LTM4686 is a dual 10A or single 20A uModule regulator.
Usage Notes
@@ -90,127 +148,208 @@ This driver does not probe for PMBus devices. You will have to instantiate
devices explicitly.
Example: the following commands will load the driver for an LTC2978 at address
-0x60 on I2C bus #1:
+0x60 on I2C bus #1::
-# modprobe ltc2978
-# echo ltc2978 0x60 > /sys/bus/i2c/devices/i2c-1/new_device
+ # modprobe ltc2978
+ # echo ltc2978 0x60 > /sys/bus/i2c/devices/i2c-1/new_device
Sysfs attributes
----------------
+======================= ========================================================
in1_label "vin"
+
in1_input Measured input voltage.
+
in1_min Minimum input voltage.
+
in1_max Maximum input voltage.
+
LTC2974, LTC2975, LTC2977, LTC2980, LTC2978, and
LTM2987 only.
+
in1_lcrit Critical minimum input voltage.
+
LTC2974, LTC2975, LTC2977, LTC2980, LTC2978, and
LTM2987 only.
+
in1_crit Critical maximum input voltage.
+
in1_min_alarm Input voltage low alarm.
+
in1_max_alarm Input voltage high alarm.
+
LTC2974, LTC2975, LTC2977, LTC2980, LTC2978, and
LTM2987 only.
in1_lcrit_alarm Input voltage critical low alarm.
+
LTC2974, LTC2975, LTC2977, LTC2980, LTC2978, and
LTM2987 only.
in1_crit_alarm Input voltage critical high alarm.
+
in1_lowest Lowest input voltage.
+
LTC2974, LTC2975, LTC2977, LTC2980, LTC2978, and
LTM2987 only.
in1_highest Highest input voltage.
+
in1_reset_history Reset input voltage history.
in[N]_label "vout[1-8]".
- LTC2974, LTC2975: N=2-5
- LTC2977, LTC2980, LTM2987: N=2-9
- LTC2978: N=2-9
- LTC3880, LTC3882, LTC23886 LTC3887, LTM4675, LTM4676:
- N=2-3
- LTC3883: N=2
+
+ - LTC2974, LTC2975: N=2-5
+ - LTC2977, LTC2980, LTM2987: N=2-9
+ - LTC2978: N=2-9
+ - LTC3880, LTC3882, LTC23886 LTC3887, LTM4675, LTM4676:
+ N=2-3
+ - LTC3883: N=2
+
in[N]_input Measured output voltage.
+
in[N]_min Minimum output voltage.
+
in[N]_max Maximum output voltage.
+
in[N]_lcrit Critical minimum output voltage.
+
in[N]_crit Critical maximum output voltage.
+
in[N]_min_alarm Output voltage low alarm.
+
in[N]_max_alarm Output voltage high alarm.
+
in[N]_lcrit_alarm Output voltage critical low alarm.
+
in[N]_crit_alarm Output voltage critical high alarm.
-in[N]_lowest Lowest output voltage. LTC2974, LTC2975,
- and LTC2978 only.
+
+in[N]_lowest Lowest output voltage.
+
+
+ LTC2974, LTC2975,and LTC2978 only.
+
in[N]_highest Highest output voltage.
+
in[N]_reset_history Reset output voltage history.
temp[N]_input Measured temperature.
- On LTC2974 and LTC2975, temp[1-4] report external
- temperatures, and temp5 reports the chip temperature.
- On LTC2977, LTC2980, LTC2978, and LTM2987, only one
- temperature measurement is supported and reports
- the chip temperature.
- On LTC3880, LTC3882, LTC3887, LTM4675, and LTM4676,
- temp1 and temp2 report external temperatures, and temp3
- reports the chip temperature.
- On LTC3883, temp1 reports an external temperature,
- and temp2 reports the chip temperature.
-temp[N]_min Mimimum temperature. LTC2974, LCT2977, LTM2980, LTC2978,
- and LTM2987 only.
+
+ - On LTC2974 and LTC2975, temp[1-4] report external
+ temperatures, and temp5 reports the chip temperature.
+ - On LTC2977, LTC2980, LTC2978, and LTM2987, only one
+ temperature measurement is supported and reports
+ the chip temperature.
+ - On LTC3880, LTC3882, LTC3887, LTM4675, and LTM4676,
+ temp1 and temp2 report external temperatures, and
+ temp3 reports the chip temperature.
+ - On LTC3883, temp1 reports an external temperature,
+ and temp2 reports the chip temperature.
+
+temp[N]_min Mimimum temperature.
+
+ LTC2974, LCT2977, LTM2980, LTC2978, and LTM2987 only.
+
temp[N]_max Maximum temperature.
+
temp[N]_lcrit Critical low temperature.
+
temp[N]_crit Critical high temperature.
+
temp[N]_min_alarm Temperature low alarm.
+
LTC2974, LTC2975, LTC2977, LTM2980, LTC2978, and
LTM2987 only.
+
temp[N]_max_alarm Temperature high alarm.
+
+
temp[N]_lcrit_alarm Temperature critical low alarm.
+
temp[N]_crit_alarm Temperature critical high alarm.
+
temp[N]_lowest Lowest measured temperature.
- LTC2974, LTC2975, LTC2977, LTM2980, LTC2978, and
- LTM2987 only.
- Not supported for chip temperature sensor on LTC2974 and
- LTC2975.
-temp[N]_highest Highest measured temperature. Not supported for chip
- temperature sensor on LTC2974 and LTC2975.
-temp[N]_reset_history Reset temperature history. Not supported for chip
- temperature sensor on LTC2974 and LTC2975.
+
+ - LTC2974, LTC2975, LTC2977, LTM2980, LTC2978, and
+ LTM2987 only.
+ - Not supported for chip temperature sensor on LTC2974
+ and LTC2975.
+
+temp[N]_highest Highest measured temperature.
+
+ Not supported for chip temperature sensor on
+ LTC2974 and LTC2975.
+
+temp[N]_reset_history Reset temperature history.
+
+ Not supported for chip temperature sensor on
+ LTC2974 and LTC2975.
power1_label "pin". LTC3883 and LTC3886 only.
+
power1_input Measured input power.
power[N]_label "pout[1-4]".
- LTC2974, LTC2975: N=1-4
- LTC2977, LTC2980, LTM2987: Not supported
- LTC2978: Not supported
- LTC3880, LTC3882, LTC3886, LTC3887, LTM4675, LTM4676:
- N=1-2
- LTC3883: N=2
+
+ - LTC2974, LTC2975: N=1-4
+ - LTC2977, LTC2980, LTM2987: Not supported
+ - LTC2978: Not supported
+ - LTC3880, LTC3882, LTC3886, LTC3887, LTM4675, LTM4676:
+ N=1-2
+ - LTC3883: N=2
+
power[N]_input Measured output power.
-curr1_label "iin". LTC3880, LTC3883, LTC3886, LTC3887, LTM4675,
+curr1_label "iin".
+
+ LTC3880, LTC3883, LTC3886, LTC3887, LTM4675,
and LTM4676 only.
+
curr1_input Measured input current.
+
curr1_max Maximum input current.
+
curr1_max_alarm Input current high alarm.
-curr1_highest Highest input current. LTC3883 and LTC3886 only.
-curr1_reset_history Reset input current history. LTC3883 and LTC3886 only.
+
+curr1_highest Highest input current.
+
+ LTC3883 and LTC3886 only.
+
+curr1_reset_history Reset input current history.
+
+ LTC3883 and LTC3886 only.
curr[N]_label "iout[1-4]".
- LTC2974, LTC2975: N=1-4
- LTC2977, LTC2980, LTM2987: not supported
- LTC2978: not supported
- LTC3880, LTC3882, LTC3886, LTC3887, LTM4675, LTM4676:
- N=2-3
- LTC3883: N=2
+
+ - LTC2974, LTC2975: N=1-4
+ - LTC2977, LTC2980, LTM2987: not supported
+ - LTC2978: not supported
+ - LTC3880, LTC3882, LTC3886, LTC3887, LTM4675, LTM4676:
+ N=2-3
+ - LTC3883: N=2
+
curr[N]_input Measured output current.
+
curr[N]_max Maximum output current.
+
curr[N]_crit Critical high output current.
-curr[N]_lcrit Critical low output current. LTC2974 and LTC2975 only.
+
+curr[N]_lcrit Critical low output current.
+
+ LTC2974 and LTC2975 only.
+
curr[N]_max_alarm Output current high alarm.
+
curr[N]_crit_alarm Output current critical high alarm.
+
curr[N]_lcrit_alarm Output current critical low alarm.
+
+ LTC2974 and LTC2975 only.
+
+curr[N]_lowest Lowest output current.
+
LTC2974 and LTC2975 only.
-curr[N]_lowest Lowest output current. LTC2974 and LTC2975 only.
+
curr[N]_highest Highest output current.
+
curr[N]_reset_history Reset output current history.
+======================= ========================================================
diff --git a/Documentation/hwmon/ltc2990 b/Documentation/hwmon/ltc2990.rst
index 3ed68f676c0f..e0a369e679d3 100644
--- a/Documentation/hwmon/ltc2990
+++ b/Documentation/hwmon/ltc2990.rst
@@ -1,14 +1,23 @@
Kernel driver ltc2990
=====================
+
Supported chips:
+
* Linear Technology LTC2990
+
Prefix: 'ltc2990'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc2990
-Author: Mike Looijmans <mike.looijmans@topic.nl>
- Tom Levens <tom.levens@cern.ch>
+
+
+Author:
+
+ - Mike Looijmans <mike.looijmans@topic.nl>
+ - Tom Levens <tom.levens@cern.ch>
Description
@@ -31,17 +40,21 @@ devices explicitly.
Sysfs attributes
----------------
+============= ==================================================
in0_input Voltage at Vcc pin in millivolt (range 2.5V to 5V)
-temp1_input Internal chip temperature in millidegrees Celcius
+temp1_input Internal chip temperature in millidegrees Celsius
+============= ==================================================
A subset of the following attributes are visible, depending on the measurement
mode of the chip.
+============= ==========================================================
in[1-4]_input Voltage at V[1-4] pin in millivolt
-temp2_input External temperature sensor TR1 in millidegrees Celcius
-temp3_input External temperature sensor TR2 in millidegrees Celcius
+temp2_input External temperature sensor TR1 in millidegrees Celsius
+temp3_input External temperature sensor TR2 in millidegrees Celsius
curr1_input Current in mA across V1-V2 assuming a 1mOhm sense resistor
curr2_input Current in mA across V3-V4 assuming a 1mOhm sense resistor
+============= ==========================================================
The "curr*_input" measurements actually report the voltage drop across the
input pins in microvolts. This is equivalent to the current through a 1mOhm
diff --git a/Documentation/hwmon/ltc3815 b/Documentation/hwmon/ltc3815.rst
index eb7db2d13587..fb0135fc1925 100644
--- a/Documentation/hwmon/ltc3815
+++ b/Documentation/hwmon/ltc3815.rst
@@ -2,9 +2,13 @@ Kernel driver ltc3815
=====================
Supported chips:
+
* Linear Technology LTC3815
+
Prefix: 'ltc3815'
+
Addresses scanned: -
+
Datasheet: http://www.linear.com/product/ltc3815
Author: Guenter Roeck <linux@roeck-us.net>
@@ -23,15 +27,16 @@ This driver does not probe for PMBus devices. You will have to instantiate
devices explicitly.
Example: the following commands will load the driver for an LTC3815
-at address 0x20 on I2C bus #1:
+at address 0x20 on I2C bus #1::
-# modprobe ltc3815
-# echo ltc3815 0x20 > /sys/bus/i2c/devices/i2c-1/new_device
+ # modprobe ltc3815
+ # echo ltc3815 0x20 > /sys/bus/i2c/devices/i2c-1/new_device
Sysfs attributes
----------------
+======================= =======================================================
in1_label "vin"
in1_input Measured input voltage.
in1_alarm Input voltage alarm.
@@ -59,3 +64,4 @@ curr2_input Measured output current.
curr2_alarm Output current alarm.
curr2_highest Highest output current.
curr2_reset_history Reset output current history.
+======================= =======================================================
diff --git a/Documentation/hwmon/ltc4151 b/Documentation/hwmon/ltc4151.rst
index 43c667e6677a..c39229b19624 100644
--- a/Documentation/hwmon/ltc4151
+++ b/Documentation/hwmon/ltc4151.rst
@@ -2,11 +2,16 @@ Kernel driver ltc4151
=====================
Supported chips:
+
* Linear Technology LTC4151
+
Prefix: 'ltc4151'
+
Addresses scanned: -
+
Datasheet:
- http://www.linear.com/docs/Datasheet/4151fc.pdf
+
+ http://www.linear.com/docs/Datasheet/4151fc.pdf
Author: Per Dalen <per.dalen@appeartv.com>
@@ -25,9 +30,10 @@ which can be safely used to identify the chip. You will have to instantiate
the devices explicitly.
Example: the following will load the driver for an LTC4151 at address 0x6f
-on I2C bus #0:
-# modprobe ltc4151
-# echo ltc4151 0x6f > /sys/bus/i2c/devices/i2c-0/new_device
+on I2C bus #0::
+
+ # modprobe ltc4151
+ # echo ltc4151 0x6f > /sys/bus/i2c/devices/i2c-0/new_device
Sysfs entries
@@ -40,8 +46,10 @@ Current reading provided by this driver is reported as obtained from the Current
Sense register. The reported value assumes that a 1 mOhm sense resistor is
installed.
+======================= ==================
in1_input VDIN voltage (mV)
in2_input ADIN voltage (mV)
curr1_input SENSE current (mA)
+======================= ==================
diff --git a/Documentation/hwmon/ltc4215 b/Documentation/hwmon/ltc4215.rst
index c196a1846259..8d5044d99bab 100644
--- a/Documentation/hwmon/ltc4215
+++ b/Documentation/hwmon/ltc4215.rst
@@ -2,11 +2,16 @@ Kernel driver ltc4215
=====================
Supported chips:
+
* Linear Technology LTC4215
+
Prefix: 'ltc4215'
+
Addresses scanned: 0x44
+
Datasheet:
- http://www.linear.com/pc/downloadDocument.do?navId=H0,C1,C1003,C1006,C1163,P17572,D12697
+
+ http://www.linear.com/pc/downloadDocument.do?navId=H0,C1,C1003,C1006,C1163,P17572,D12697
Author: Ira W. Snyder <iws@ovro.caltech.edu>
@@ -26,9 +31,10 @@ of the possible addresses are unfriendly to probing. You will have to
instantiate the devices explicitly.
Example: the following will load the driver for an LTC4215 at address 0x44
-on I2C bus #0:
-$ modprobe ltc4215
-$ echo ltc4215 0x44 > /sys/bus/i2c/devices/i2c-0/new_device
+on I2C bus #0::
+
+ $ modprobe ltc4215
+ $ echo ltc4215 0x44 > /sys/bus/i2c/devices/i2c-0/new_device
Sysfs entries
@@ -38,6 +44,7 @@ The LTC4215 has built-in limits for overvoltage, undervoltage, and
undercurrent warnings. This makes it very likely that the reference
circuit will be used.
+======================= =========================
in1_input input voltage
in2_input output voltage
@@ -49,3 +56,4 @@ curr1_max_alarm overcurrent alarm
power1_input power usage
power1_alarm power bad alarm
+======================= =========================
diff --git a/Documentation/hwmon/ltc4245 b/Documentation/hwmon/ltc4245.rst
index 4ca7a9da09f9..3dafd08a4e87 100644
--- a/Documentation/hwmon/ltc4245
+++ b/Documentation/hwmon/ltc4245.rst
@@ -2,11 +2,16 @@ Kernel driver ltc4245
=====================
Supported chips:
+
* Linear Technology LTC4245
+
Prefix: 'ltc4245'
+
Addresses scanned: 0x20-0x3f
+
Datasheet:
- http://www.linear.com/pc/downloadDocument.do?navId=H0,C1,C1003,C1006,C1140,P19392,D13517
+
+ http://www.linear.com/pc/downloadDocument.do?navId=H0,C1,C1003,C1006,C1140,P19392,D13517
Author: Ira W. Snyder <iws@ovro.caltech.edu>
@@ -27,9 +32,10 @@ of the possible addresses are unfriendly to probing. You will have to
instantiate the devices explicitly.
Example: the following will load the driver for an LTC4245 at address 0x23
-on I2C bus #1:
-$ modprobe ltc4245
-$ echo ltc4245 0x23 > /sys/bus/i2c/devices/i2c-1/new_device
+on I2C bus #1::
+
+ $ modprobe ltc4245
+ $ echo ltc4245 0x23 > /sys/bus/i2c/devices/i2c-1/new_device
Sysfs entries
@@ -42,6 +48,7 @@ This driver uses the values in the datasheet to change the register values
into the values specified in the sysfs-interface document. The current readings
rely on the sense resistors listed in Table 2: "Sense Resistor Values".
+======================= =======================================================
in1_input 12v input voltage (mV)
in2_input 5v input voltage (mV)
in3_input 3v input voltage (mV)
@@ -80,6 +87,7 @@ power1_input 12v power usage (mW)
power2_input 5v power usage (mW)
power3_input 3v power usage (mW)
power4_input Vee (-12v) power usage (mW)
+======================= =======================================================
Note 1
@@ -96,6 +104,7 @@ slowly, -EAGAIN will be returned when you read the sysfs attribute containing
the sensor reading.
The LTC4245 chip can be configured to sample all GPIO pins with two methods:
+
1) platform data -- see include/linux/platform_data/ltc4245.h
2) OF device tree -- add the "ltc4245,use-extra-gpios" property to each chip
diff --git a/Documentation/hwmon/ltc4260 b/Documentation/hwmon/ltc4260.rst
index c4ff4ad998b2..4c335b6a51d1 100644
--- a/Documentation/hwmon/ltc4260
+++ b/Documentation/hwmon/ltc4260.rst
@@ -2,11 +2,16 @@ Kernel driver ltc4260
=====================
Supported chips:
+
* Linear Technology LTC4260
+
Prefix: 'ltc4260'
+
Addresses scanned: -
+
Datasheet:
- http://cds.linear.com/docs/en/datasheet/4260fc.pdf
+
+ http://cds.linear.com/docs/en/datasheet/4260fc.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -26,9 +31,10 @@ which can be safely used to identify the chip. You will have to instantiate
the devices explicitly.
Example: the following will load the driver for an LTC4260 at address 0x10
-on I2C bus #1:
-$ modprobe ltc4260
-$ echo ltc4260 0x10 > /sys/bus/i2c/devices/i2c-1/new_device
+on I2C bus #1::
+
+ $ modprobe ltc4260
+ $ echo ltc4260 0x10 > /sys/bus/i2c/devices/i2c-1/new_device
Sysfs entries
@@ -45,6 +51,7 @@ Current Sense register. The reported value assumes that a 1 mOhm sense resistor
is installed. If a different sense resistor is installed, calculate the real
current by dividing the reported value by the sense resistor value in mOhm.
+======================= =======================
in1_input SOURCE voltage (mV)
in1_min_alarm Undervoltage alarm
in1_max_alarm Overvoltage alarm
@@ -54,3 +61,4 @@ in2_alarm Power bad alarm
curr1_input SENSE current (mA)
curr1_alarm SENSE overcurrent alarm
+======================= =======================
diff --git a/Documentation/hwmon/ltc4261 b/Documentation/hwmon/ltc4261.rst
index 9378a75c6134..c80233f8082e 100644
--- a/Documentation/hwmon/ltc4261
+++ b/Documentation/hwmon/ltc4261.rst
@@ -2,11 +2,16 @@ Kernel driver ltc4261
=====================
Supported chips:
+
* Linear Technology LTC4261
+
Prefix: 'ltc4261'
+
Addresses scanned: -
+
Datasheet:
- http://cds.linear.com/docs/Datasheet/42612fb.pdf
+
+ http://cds.linear.com/docs/Datasheet/42612fb.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -26,9 +31,10 @@ which can be safely used to identify the chip. You will have to instantiate
the devices explicitly.
Example: the following will load the driver for an LTC4261 at address 0x10
-on I2C bus #1:
-$ modprobe ltc4261
-$ echo ltc4261 0x10 > /sys/bus/i2c/devices/i2c-1/new_device
+on I2C bus #1::
+
+ $ modprobe ltc4261
+ $ echo ltc4261 0x10 > /sys/bus/i2c/devices/i2c-1/new_device
Sysfs entries
@@ -51,6 +57,7 @@ the proximity of the ADIN2 pin to the OV pin. ADIN2 is, however, not available
on all chip variants. To ensure that the alarm condition is reported to the user,
report it with both voltage sensors.
+======================= =============================
in1_input ADIN2 voltage (mV)
in1_min_alarm ADIN/ADIN2 Undervoltage alarm
in1_max_alarm ADIN/ADIN2 Overvoltage alarm
@@ -61,3 +68,4 @@ in2_max_alarm ADIN/ADIN2 Overvoltage alarm
curr1_input SENSE current (mA)
curr1_alarm SENSE overcurrent alarm
+======================= =============================
diff --git a/Documentation/hwmon/max16064 b/Documentation/hwmon/max16064.rst
index 265370f5cb82..6d5e9538991f 100644
--- a/Documentation/hwmon/max16064
+++ b/Documentation/hwmon/max16064.rst
@@ -2,9 +2,13 @@ Kernel driver max16064
======================
Supported chips:
+
* Maxim MAX16064
+
Prefix: 'max16064'
+
Addresses scanned: -
+
Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -17,7 +21,7 @@ This driver supports hardware monitoring for Maxim MAX16064 Quad Power-Supply
Controller with Active-Voltage Output Control and PMBus Interface.
The driver is a client driver to the core PMBus driver.
-Please see Documentation/hwmon/pmbus for details on PMBus client drivers.
+Please see Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
Usage Notes
@@ -40,16 +44,20 @@ Sysfs entries
The following attributes are supported. Limits are read-write; all other
attributes are read-only.
+======================= ========================================================
in[1-4]_label "vout[1-4]"
in[1-4]_input Measured voltage. From READ_VOUT register.
in[1-4]_min Minimum Voltage. From VOUT_UV_WARN_LIMIT register.
in[1-4]_max Maximum voltage. From VOUT_OV_WARN_LIMIT register.
in[1-4]_lcrit Critical minimum Voltage. VOUT_UV_FAULT_LIMIT register.
-in[1-4]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
+in[1-4]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT
+ register.
in[1-4]_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
in[1-4]_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
-in[1-4]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
-in[1-4]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
+in[1-4]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT
+ status.
+in[1-4]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT
+ status.
in[1-4]_highest Historical maximum voltage.
in[1-4]_reset_history Write any value to reset history.
@@ -64,3 +72,4 @@ temp1_crit_alarm Chip temperature critical high alarm. Set by comparing
status is set.
temp1_highest Historical maximum temperature.
temp1_reset_history Write any value to reset history.
+======================= ========================================================
diff --git a/Documentation/hwmon/max16065 b/Documentation/hwmon/max16065.rst
index 208a29e43010..fa5c852a178c 100644
--- a/Documentation/hwmon/max16065
+++ b/Documentation/hwmon/max16065.rst
@@ -1,28 +1,48 @@
Kernel driver max16065
======================
+
Supported chips:
+
* Maxim MAX16065, MAX16066
+
Prefixes: 'max16065', 'max16066'
+
Addresses scanned: -
+
Datasheet:
+
http://datasheets.maxim-ic.com/en/ds/MAX16065-MAX16066.pdf
+
* Maxim MAX16067
+
Prefix: 'max16067'
+
Addresses scanned: -
+
Datasheet:
+
http://datasheets.maxim-ic.com/en/ds/MAX16067.pdf
+
* Maxim MAX16068
+
Prefix: 'max16068'
+
Addresses scanned: -
+
Datasheet:
+
http://datasheets.maxim-ic.com/en/ds/MAX16068.pdf
+
* Maxim MAX16070/MAX16071
+
Prefixes: 'max16070', 'max16071'
+
Addresses scanned: -
+
Datasheet:
- http://datasheets.maxim-ic.com/en/ds/MAX16070-MAX16071.pdf
+ http://datasheets.maxim-ic.com/en/ds/MAX16070-MAX16071.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -73,6 +93,7 @@ turn into a brick.
Sysfs entries
-------------
+======================= ========================================================
in[0-11]_input Input voltage measurements.
in12_input Voltage on CSP (Current Sense Positive) pin.
@@ -103,3 +124,4 @@ curr1_input Current sense input; only if the chip supports current
curr1_alarm Overcurrent alarm; only if the chip supports current
sensing and if current sensing is enabled.
+======================= ========================================================
diff --git a/Documentation/hwmon/max1619 b/Documentation/hwmon/max1619.rst
index 518bae3a80c4..e25956e70f73 100644
--- a/Documentation/hwmon/max1619
+++ b/Documentation/hwmon/max1619.rst
@@ -2,15 +2,20 @@ Kernel driver max1619
=====================
Supported chips:
+
* Maxim MAX1619
+
Prefix: 'max1619'
+
Addresses scanned: I2C 0x18-0x1a, 0x29-0x2b, 0x4c-0x4e
+
Datasheet: Publicly available at the Maxim website
- http://pdfserv.maxim-ic.com/en/ds/MAX1619.pdf
+
+ http://pdfserv.maxim-ic.com/en/ds/MAX1619.pdf
Authors:
- Oleksij Rempel <bug-track@fisher-privat.net>,
- Jean Delvare <jdelvare@suse.de>
+ - Oleksij Rempel <bug-track@fisher-privat.net>,
+ - Jean Delvare <jdelvare@suse.de>
Description
-----------
@@ -26,4 +31,3 @@ Only the external sensor has high and low limits.
The max1619 driver will not update its values more frequently than every
other second; reading them more often will do no harm, but will return
'old' values.
-
diff --git a/Documentation/hwmon/max1668 b/Documentation/hwmon/max1668.rst
index 8f9d570dbfec..417f17d750e6 100644
--- a/Documentation/hwmon/max1668
+++ b/Documentation/hwmon/max1668.rst
@@ -2,12 +2,17 @@ Kernel driver max1668
=====================
Supported chips:
+
* Maxim MAX1668, MAX1805 and MAX1989
+
Prefix: 'max1668'
+
Addresses scanned: I2C 0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e
+
Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX1668-MAX1989.pdf
Author:
+
David George <david.george@ska.ac.za>
Description
@@ -23,8 +28,9 @@ two ICs.
The driver is able to distinguish between the devices and creates sysfs
entries as follows:
-MAX1805, MAX1668 and MAX1989:
+- MAX1805, MAX1668 and MAX1989:
+=============== == ============================================================
temp1_input ro local (ambient) temperature
temp1_max rw local temperature maximum threshold for alarm
temp1_max_alarm ro local temperature maximum threshold alarm
@@ -40,8 +46,11 @@ temp3_max rw remote temperature 2 maximum threshold for alarm
temp3_max_alarm ro remote temperature 2 maximum threshold alarm
temp3_min rw remote temperature 2 minimum threshold for alarm
temp3_min_alarm ro remote temperature 2 minimum threshold alarm
+=============== == ============================================================
+
+- MAX1668 and MAX1989 only:
-MAX1668 and MAX1989 only:
+=============== == ============================================================
temp4_input ro remote temperature 3
temp4_max rw remote temperature 3 maximum threshold for alarm
temp4_max_alarm ro remote temperature 3 maximum threshold alarm
@@ -52,6 +61,7 @@ temp5_max rw remote temperature 4 maximum threshold for alarm
temp5_max_alarm ro remote temperature 4 maximum threshold alarm
temp5_min rw remote temperature 4 minimum threshold for alarm
temp5_min_alarm ro remote temperature 4 minimum threshold alarm
+=============== == ============================================================
Module Parameters
-----------------
diff --git a/Documentation/hwmon/max197 b/Documentation/hwmon/max197.rst
index 8d89b9009df8..02fe19bc3428 100644
--- a/Documentation/hwmon/max197
+++ b/Documentation/hwmon/max197.rst
@@ -1,16 +1,22 @@
-Maxim MAX197 driver
-===================
+Kernel driver max197
+====================
Author:
+
* Vivien Didelot <vivien.didelot@savoirfairelinux.com>
Supported chips:
+
* Maxim MAX197
+
Prefix: 'max197'
+
Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX197.pdf
* Maxim MAX199
+
Prefix: 'max199'
+
Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX199.pdf
Description
@@ -26,7 +32,7 @@ Platform data
-------------
The MAX197 platform data (defined in linux/platform_data/max197.h) should be
-filled with a pointer to a conversion function, defined like:
+filled with a pointer to a conversion function, defined like::
int convert(u8 ctrl);
@@ -36,25 +42,29 @@ or a negative error code otherwise.
Control byte format:
+======= ========== ============================================
Bit Name Description
7,6 PD1,PD0 Clock and Power-Down modes
5 ACQMOD Internal or External Controlled Acquisition
4 RNG Full-scale voltage magnitude at the input
3 BIP Unipolar or Bipolar conversion mode
2,1,0 A2,A1,A0 Channel
+======= ========== ============================================
Sysfs interface
---------------
-* in[0-7]_input: The conversion value for the corresponding channel.
- RO
+ ============== ==============================================================
+ in[0-7]_input The conversion value for the corresponding channel.
+ RO
-* in[0-7]_min: The lower limit (in mV) for the corresponding channel.
- For the MAX197, it will be adjusted to -10000, -5000, or 0.
- For the MAX199, it will be adjusted to -4000, -2000, or 0.
- RW
+ in[0-7]_min The lower limit (in mV) for the corresponding channel.
+ For the MAX197, it will be adjusted to -10000, -5000, or 0.
+ For the MAX199, it will be adjusted to -4000, -2000, or 0.
+ RW
-* in[0-7]_max: The higher limit (in mV) for the corresponding channel.
- For the MAX197, it will be adjusted to 0, 5000, or 10000.
- For the MAX199, it will be adjusted to 0, 2000, or 4000.
- RW
+ in[0-7]_max The higher limit (in mV) for the corresponding channel.
+ For the MAX197, it will be adjusted to 0, 5000, or 10000.
+ For the MAX199, it will be adjusted to 0, 2000, or 4000.
+ RW
+ ============== ==============================================================
diff --git a/Documentation/hwmon/max20751 b/Documentation/hwmon/max20751.rst
index f9fa25ebb521..aa4469be6674 100644
--- a/Documentation/hwmon/max20751
+++ b/Documentation/hwmon/max20751.rst
@@ -2,10 +2,15 @@ Kernel driver max20751
======================
Supported chips:
+
* maxim MAX20751
+
Prefix: 'max20751'
+
Addresses scanned: -
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX20751.pdf
+
Application note: http://pdfserv.maximintegrated.com/en/an/AN5941.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -18,7 +23,7 @@ This driver supports MAX20751 Multiphase Master with PMBus Interface
and Internal Buck Converter.
The driver is a client driver to the core PMBus driver.
-Please see Documentation/hwmon/pmbus for details on PMBus client drivers.
+Please see Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
Usage Notes
@@ -40,6 +45,7 @@ Sysfs entries
The following attributes are supported.
+======================= =======================================================
in1_label "vin1"
in1_input Measured voltage.
in1_min Minimum input voltage.
@@ -75,3 +81,4 @@ temp1_crit_alarm Chip temperature critical high alarm.
power1_input Output power.
power1_label "pout1"
+======================= =======================================================
diff --git a/Documentation/hwmon/max31722 b/Documentation/hwmon/max31722.rst
index 090da84538c8..0ab15c00b226 100644
--- a/Documentation/hwmon/max31722
+++ b/Documentation/hwmon/max31722.rst
@@ -2,15 +2,25 @@ Kernel driver max31722
======================
Supported chips:
+
* Maxim Integrated MAX31722
+
Prefix: 'max31722'
+
ACPI ID: MAX31722
+
Addresses scanned: -
+
Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX31722-MAX31723.pdf
+
* Maxim Integrated MAX31723
+
Prefix: 'max31723'
+
ACPI ID: MAX31723
+
Addresses scanned: -
+
Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX31722-MAX31723.pdf
Author: Tiberiu Breana <tiberiu.a.breana@intel.com>
@@ -31,4 +41,6 @@ Sysfs entries
The following attribute is supported:
+======================= =======================================================
temp1_input Measured temperature. Read-only.
+======================= =======================================================
diff --git a/Documentation/hwmon/max31785 b/Documentation/hwmon/max31785.rst
index 270c5f865261..c8c6756d0ee1 100644
--- a/Documentation/hwmon/max31785
+++ b/Documentation/hwmon/max31785.rst
@@ -2,9 +2,13 @@ Kernel driver max31785
======================
Supported chips:
+
* Maxim MAX31785, MAX31785A
+
Prefix: 'max31785' or 'max31785a'
+
Addresses scanned: -
+
Datasheet: https://datasheets.maximintegrated.com/en/ds/MAX31785.pdf
Author: Andrew Jeffery <andrew@aj.id.au>
@@ -30,6 +34,7 @@ devices explicitly.
Sysfs attributes
----------------
+======================= =======================================================
fan[1-4]_alarm Fan alarm.
fan[1-4]_fault Fan fault.
fan[1-8]_input Fan RPM. On the MAX31785A, inputs 5-8 correspond to the
@@ -58,3 +63,4 @@ temp[1-11]_crit_alarm Chip temperature critical high alarm
temp[1-11]_input Measured temperature
temp[1-11]_max Maximum temperature
temp[1-11]_max_alarm Chip temperature high alarm
+======================= =======================================================
diff --git a/Documentation/hwmon/max31790 b/Documentation/hwmon/max31790.rst
index 855e62430da9..84c62a12ef3a 100644
--- a/Documentation/hwmon/max31790
+++ b/Documentation/hwmon/max31790.rst
@@ -2,9 +2,13 @@ Kernel driver max31790
======================
Supported chips:
+
* Maxim MAX31790
+
Prefix: 'max31790'
+
Addresses scanned: -
+
Datasheet: http://pdfserv.maximintegrated.com/en/ds/MAX31790.pdf
Author: Il Han <corone.il.han@gmail.com>
@@ -30,8 +34,10 @@ also be configured to serve as tachometer inputs.
Sysfs entries
-------------
+================== === =======================================================
fan[1-12]_input RO fan tachometer speed in RPM
fan[1-12]_fault RO fan experienced fault
fan[1-6]_target RW desired fan speed in RPM
pwm[1-6]_enable RW regulator mode, 0=disabled, 1=manual mode, 2=rpm mode
pwm[1-6] RW fan target duty cycle (0-255)
+================== === =======================================================
diff --git a/Documentation/hwmon/max34440 b/Documentation/hwmon/max34440.rst
index b2de8fa49273..939138e12b02 100644
--- a/Documentation/hwmon/max34440
+++ b/Documentation/hwmon/max34440.rst
@@ -2,34 +2,63 @@ Kernel driver max34440
======================
Supported chips:
+
* Maxim MAX34440
+
Prefixes: 'max34440'
+
Addresses scanned: -
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34440.pdf
+
* Maxim MAX34441
+
PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller
+
Prefixes: 'max34441'
+
Addresses scanned: -
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34441.pdf
+
* Maxim MAX34446
+
PMBus Power-Supply Data Logger
+
Prefixes: 'max34446'
+
Addresses scanned: -
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34446.pdf
+
* Maxim MAX34451
+
PMBus 16-Channel V/I Monitor and 12-Channel Sequencer/Marginer
+
Prefixes: 'max34451'
+
Addresses scanned: -
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34451.pdf
+
* Maxim MAX34460
+
PMBus 12-Channel Voltage Monitor & Sequencer
+
Prefix: 'max34460'
+
Addresses scanned: -
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34460.pdf
+
* Maxim MAX34461
+
PMBus 16-Channel Voltage Monitor & Sequencer
+
Prefix: 'max34461'
+
Addresses scanned: -
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34461.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -47,7 +76,7 @@ based on GIN pins. The MAX34460 supports 12 voltage channels, and the MAX34461
supports 16 voltage channels.
The driver is a client driver to the core PMBus driver. Please see
-Documentation/hwmon/pmbus for details on PMBus client drivers.
+Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
Usage Notes
@@ -77,42 +106,67 @@ Sysfs entries
The following attributes are supported. Limits are read-write; all other
attributes are read-only.
+In
+~~
+
+======================= =======================================================
in[1-6]_label "vout[1-6]".
in[1-6]_input Measured voltage. From READ_VOUT register.
in[1-6]_min Minimum Voltage. From VOUT_UV_WARN_LIMIT register.
in[1-6]_max Maximum voltage. From VOUT_OV_WARN_LIMIT register.
in[1-6]_lcrit Critical minimum Voltage. VOUT_UV_FAULT_LIMIT register.
-in[1-6]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
+in[1-6]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT
+ register.
in[1-6]_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
in[1-6]_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
-in[1-6]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
-in[1-6]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
+in[1-6]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT
+ status.
+in[1-6]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT
+ status.
in[1-6]_lowest Historical minimum voltage.
in[1-6]_highest Historical maximum voltage.
in[1-6]_reset_history Write any value to reset history.
+======================= =======================================================
+
+.. note:: MAX34446 only supports in[1-4].
- MAX34446 only supports in[1-4].
+Curr
+~~~~
+======================= ========================================================
curr[1-6]_label "iout[1-6]".
curr[1-6]_input Measured current. From READ_IOUT register.
curr[1-6]_max Maximum current. From IOUT_OC_WARN_LIMIT register.
-curr[1-6]_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
+curr[1-6]_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT
+ register.
curr[1-6]_max_alarm Current high alarm. From IOUT_OC_WARNING status.
curr[1-6]_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status.
curr[1-4]_average Historical average current (MAX34446/34451 only).
curr[1-6]_highest Historical maximum current.
curr[1-6]_reset_history Write any value to reset history.
+======================= ========================================================
+
+.. note::
+
+ - in6 and curr6 attributes only exist for MAX34440.
+ - MAX34446 only supports curr[1-4].
- in6 and curr6 attributes only exist for MAX34440.
- MAX34446 only supports curr[1-4].
+Power
+~~~~~
+======================= ========================================================
power[1,3]_label "pout[1,3]"
power[1,3]_input Measured power.
power[1,3]_average Historical average power.
power[1,3]_highest Historical maximum power.
+======================= ========================================================
- Power attributes only exist for MAX34446.
+.. note:: Power attributes only exist for MAX34446.
+Temp
+~~~~
+
+======================= ========================================================
temp[1-8]_input Measured temperatures. From READ_TEMPERATURE_1 register.
temp1 is the chip's internal temperature. temp2..temp5
are remote I2C temperature sensors. For MAX34441, temp6
@@ -125,11 +179,17 @@ temp[1-8]_crit_alarm Temperature critical high alarm.
temp[1-8]_average Historical average temperature (MAX34446 only).
temp[1-8]_highest Historical maximum temperature.
temp[1-8]_reset_history Write any value to reset history.
+======================= ========================================================
+
+
+.. note::
+ - temp7 and temp8 attributes only exist for MAX34440.
+ - MAX34446 only supports temp[1-3].
+
- temp7 and temp8 attributes only exist for MAX34440.
- MAX34446 only supports temp[1-3].
+.. note::
-MAX34451 supports attribute groups in[1-16] (or curr[1-16] based on input pins)
-and temp[1-5].
-MAX34460 supports attribute groups in[1-12] and temp[1-5].
-MAX34461 supports attribute groups in[1-16] and temp[1-5].
+ - MAX34451 supports attribute groups in[1-16] (or curr[1-16] based on
+ input pins) and temp[1-5].
+ - MAX34460 supports attribute groups in[1-12] and temp[1-5].
+ - MAX34461 supports attribute groups in[1-16] and temp[1-5].
diff --git a/Documentation/hwmon/max6639 b/Documentation/hwmon/max6639.rst
index dc49f8be7167..3da54225f83c 100644
--- a/Documentation/hwmon/max6639
+++ b/Documentation/hwmon/max6639.rst
@@ -2,14 +2,18 @@ Kernel driver max6639
=====================
Supported chips:
+
* Maxim MAX6639
+
Prefix: 'max6639'
+
Addresses scanned: I2C 0x2c, 0x2e, 0x2f
+
Datasheet: http://pdfserv.maxim-ic.com/en/ds/MAX6639.pdf
Authors:
- He Changqing <hechangqing@semptian.com>
- Roland Stigge <stigge@antcom.de>
+ - He Changqing <hechangqing@semptian.com>
+ - Roland Stigge <stigge@antcom.de>
Description
-----------
@@ -21,19 +25,20 @@ diode-connected transistors.
The following device attributes are implemented via sysfs:
+====================== ==== ===================================================
Attribute R/W Contents
-----------------------------------------------------------------------------
+====================== ==== ===================================================
temp1_input R Temperature channel 1 input (0..150 C)
temp2_input R Temperature channel 2 input (0..150 C)
temp1_fault R Temperature channel 1 diode fault
temp2_fault R Temperature channel 2 diode fault
temp1_max RW Set THERM temperature for input 1
- (in C, see datasheet)
+ (in C, see datasheet)
temp2_max RW Set THERM temperature for input 2
temp1_crit RW Set ALERT temperature for input 1
temp2_crit RW Set ALERT temperature for input 2
temp1_emergency RW Set OT temperature for input 1
- (in C, see datasheet)
+ (in C, see datasheet)
temp2_emergency RW Set OT temperature for input 2
pwm1 RW Fan 1 target duty cycle (0..255)
pwm2 RW Fan 2 target duty cycle (0..255)
@@ -47,3 +52,4 @@ temp1_crit_alarm R Alarm on ALERT temperature on channel 1
temp2_crit_alarm R Alarm on ALERT temperature on channel 2
temp1_emergency_alarm R Alarm on OT temperature on channel 1
temp2_emergency_alarm R Alarm on OT temperature on channel 2
+====================== ==== ===================================================
diff --git a/Documentation/hwmon/max6642 b/Documentation/hwmon/max6642.rst
index afbd3e4942e2..7e5b7d4f9492 100644
--- a/Documentation/hwmon/max6642
+++ b/Documentation/hwmon/max6642.rst
@@ -2,14 +2,20 @@ Kernel driver max6642
=====================
Supported chips:
+
* Maxim MAX6642
+
Prefix: 'max6642'
+
Addresses scanned: I2C 0x48-0x4f
+
Datasheet: Publicly available at the Maxim website
- http://datasheets.maxim-ic.com/en/ds/MAX6642.pdf
+
+ http://datasheets.maxim-ic.com/en/ds/MAX6642.pdf
Authors:
- Per Dalen <per.dalen@appeartv.com>
+
+ Per Dalen <per.dalen@appeartv.com>
Description
-----------
diff --git a/Documentation/hwmon/max6650 b/Documentation/hwmon/max6650.rst
index dff1d296a48b..253482add082 100644
--- a/Documentation/hwmon/max6650
+++ b/Documentation/hwmon/max6650.rst
@@ -2,19 +2,27 @@ Kernel driver max6650
=====================
Supported chips:
+
* Maxim MAX6650
+
Prefix: 'max6650'
+
Addresses scanned: none
+
Datasheet: http://pdfserv.maxim-ic.com/en/ds/MAX6650-MAX6651.pdf
+
* Maxim MAX6651
+
Prefix: 'max6651'
+
Addresses scanned: none
+
Datasheet: http://pdfserv.maxim-ic.com/en/ds/MAX6650-MAX6651.pdf
Authors:
- Hans J. Koch <hjk@hansjkoch.de>
- John Morris <john.morris@spirentcom.com>
- Claus Gindhart <claus.gindhart@kontron.com>
+ - Hans J. Koch <hjk@hansjkoch.de>
+ - John Morris <john.morris@spirentcom.com>
+ - Claus Gindhart <claus.gindhart@kontron.com>
Description
-----------
@@ -28,6 +36,7 @@ The driver is not able to distinguish between the 2 devices.
The driver provides the following sensor accesses in sysfs:
+=============== ======= =======================================================
fan1_input ro fan tachometer speed in RPM
fan2_input ro "
fan3_input ro "
@@ -40,6 +49,7 @@ pwm1 rw relative speed (0-255), 255=max. speed.
fan1_div rw sets the speed range the inputs can handle. Legal
values are 1, 2, 4, and 8. Use lower values for
faster fans.
+=============== ======= =======================================================
Usage notes
-----------
@@ -62,4 +72,3 @@ clock: The clock frequency in Hz of the chip the driver should assume [254000]
Please have a look at the MAX6650/6651 data sheet and make sure that you fully
understand the meaning of these parameters before you attempt to change them.
-
diff --git a/Documentation/hwmon/max6697 b/Documentation/hwmon/max6697.rst
index 6594177ededa..ffc5a7d8d33b 100644
--- a/Documentation/hwmon/max6697
+++ b/Documentation/hwmon/max6697.rst
@@ -2,38 +2,69 @@ Kernel driver max6697
=====================
Supported chips:
+
* Maxim MAX6581
+
Prefix: 'max6581'
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6581.pdf
+
* Maxim MAX6602
+
Prefix: 'max6602'
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6602.pdf
+
* Maxim MAX6622
+
Prefix: 'max6622'
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6622.pdf
+
* Maxim MAX6636
+
Prefix: 'max6636'
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6636.pdf
+
* Maxim MAX6689
+
Prefix: 'max6689'
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6689.pdf
+
* Maxim MAX6693
+
Prefix: 'max6693'
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6693.pdf
+
* Maxim MAX6694
+
Prefix: 'max6694'
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6694.pdf
+
* Maxim MAX6697
+
Prefix: 'max6697'
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6697.pdf
+
* Maxim MAX6698
+
Prefix: 'max6698'
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6698.pdf
+
* Maxim MAX6699
+
Prefix: 'max6699'
+
Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX6699.pdf
Author:
+
Guenter Roeck <linux@roeck-us.net>
Description
@@ -50,9 +81,11 @@ The driver provides the following sysfs attributes. temp1 is the local (chip)
temperature, temp[2..n] are remote temperatures. The actually supported
per-channel attributes are chip type and channel dependent.
+================ == ==========================================================
tempX_input RO temperature
tempX_max RW temperature maximum threshold
tempX_max_alarm RO temperature maximum threshold alarm
tempX_crit RW temperature critical threshold
tempX_crit_alarm RO temperature critical threshold alarm
tempX_fault RO temperature diode fault (remote sensors only)
+================ == ==========================================================
diff --git a/Documentation/hwmon/max8688 b/Documentation/hwmon/max8688.rst
index ca233bec7a8a..009487759c61 100644
--- a/Documentation/hwmon/max8688
+++ b/Documentation/hwmon/max8688.rst
@@ -2,9 +2,13 @@ Kernel driver max8688
=====================
Supported chips:
+
* Maxim MAX8688
+
Prefix: 'max8688'
+
Addresses scanned: -
+
Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -17,7 +21,7 @@ This driver supports hardware monitoring for Maxim MAX8688 Digital Power-Supply
Controller/Monitor with PMBus Interface.
The driver is a client driver to the core PMBus driver. Please see
-Documentation/hwmon/pmbus for details on PMBus client drivers.
+Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
Usage Notes
@@ -40,23 +44,28 @@ Sysfs entries
The following attributes are supported. Limits are read-write; all other
attributes are read-only.
+======================= ========================================================
in1_label "vout1"
in1_input Measured voltage. From READ_VOUT register.
in1_min Minimum Voltage. From VOUT_UV_WARN_LIMIT register.
in1_max Maximum voltage. From VOUT_OV_WARN_LIMIT register.
in1_lcrit Critical minimum Voltage. VOUT_UV_FAULT_LIMIT register.
-in1_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
+in1_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT
+ register.
in1_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
in1_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
-in1_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
-in1_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
+in1_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT
+ status.
+in1_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT
+ status.
in1_highest Historical maximum voltage.
in1_reset_history Write any value to reset history.
curr1_label "iout1"
curr1_input Measured current. From READ_IOUT register.
curr1_max Maximum current. From IOUT_OC_WARN_LIMIT register.
-curr1_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
+curr1_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT
+ register.
curr1_max_alarm Current high alarm. From IOUT_OC_WARN_LIMIT register.
curr1_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status.
curr1_highest Historical maximum current.
@@ -73,3 +82,4 @@ temp1_crit_alarm Chip temperature critical high alarm. Set by comparing
status is set.
temp1_highest Historical maximum temperature.
temp1_reset_history Write any value to reset history.
+======================= ========================================================
diff --git a/Documentation/hwmon/mc13783-adc b/Documentation/hwmon/mc13783-adc.rst
index 05ccc9f159f1..cae70350ba2f 100644
--- a/Documentation/hwmon/mc13783-adc
+++ b/Documentation/hwmon/mc13783-adc.rst
@@ -2,16 +2,25 @@ Kernel driver mc13783-adc
=========================
Supported chips:
+
* Freescale MC13783
+
Prefix: 'mc13783'
+
Datasheet: https://www.nxp.com/docs/en/data-sheet/MC13783.pdf
+
* Freescale MC13892
+
Prefix: 'mc13892'
+
Datasheet: https://www.nxp.com/docs/en/data-sheet/MC13892.pdf
+
+
Authors:
- Sascha Hauer <s.hauer@pengutronix.de>
- Luotao Fu <l.fu@pengutronix.de>
+
+ - Sascha Hauer <s.hauer@pengutronix.de>
+ - Luotao Fu <l.fu@pengutronix.de>
Description
-----------
@@ -30,9 +39,11 @@ the General Purpose inputs and touchscreen.
See the following tables for the meaning of the different channels and their
chip internal scaling:
-MC13783:
+- MC13783:
+
+======= =============================================== =============== =======
Channel Signal Input Range Scaling
--------------------------------------------------------------------------------
+======= =============================================== =============== =======
0 Battery Voltage (BATT) 2.50 - 4.65V -2.40V
1 Battery Current (BATT - BATTISNS) -50 - 50 mV x20
2 Application Supply (BP) 2.50 - 4.65V -2.40V
@@ -52,10 +63,13 @@ Channel Signal Input Range Scaling
13 General Purpose TSX2 / Touchscreen X-plate 2 0 - 2.30V No
14 General Purpose TSY1 / Touchscreen Y-plate 1 0 - 2.30V No
15 General Purpose TSY2 / Touchscreen Y-plate 2 0 - 2.30V No
+======= =============================================== =============== =======
+
+- MC13892:
-MC13892:
+======= =============================================== =============== =======
Channel Signal Input Range Scaling
--------------------------------------------------------------------------------
+======= =============================================== =============== =======
0 Battery Voltage (BATT) 0 - 4.8V /2
1 Battery Current (BATT - BATTISNSCC) -60 - 60 mV x20
2 Application Supply (BPSNS) 0 - 4.8V /2
@@ -72,3 +86,4 @@ Channel Signal Input Range Scaling
13 General Purpose TSX2 / Touchscreen X-plate 2 0 - 2.4V No
14 General Purpose TSY1 / Touchscreen Y-plate 1 0 - 2.4V No
15 General Purpose TSY2 / Touchscreen Y-plate 2 0 - 2.4V No
+======= =============================================== =============== =======
diff --git a/Documentation/hwmon/mcp3021 b/Documentation/hwmon/mcp3021.rst
index 74a6b72adf5f..83f4bda2f269 100644
--- a/Documentation/hwmon/mcp3021
+++ b/Documentation/hwmon/mcp3021.rst
@@ -1,17 +1,26 @@
Kernel driver MCP3021
-======================
+=====================
Supported chips:
+
* Microchip Technology MCP3021
+
Prefix: 'mcp3021'
+
Datasheet: http://ww1.microchip.com/downloads/en/DeviceDoc/21805a.pdf
+
* Microchip Technology MCP3221
+
Prefix: 'mcp3221'
+
Datasheet: http://ww1.microchip.com/downloads/en/DeviceDoc/21732c.pdf
+
+
Authors:
- Mingkai Hu
- Sven Schuchmann <schuchmann@schleissheimer.de>
+
+ - Mingkai Hu
+ - Sven Schuchmann <schuchmann@schleissheimer.de>
Description
-----------
diff --git a/Documentation/hwmon/menf21bmc b/Documentation/hwmon/menf21bmc.rst
index 2a273a065c5e..1f0c6b2235ab 100644
--- a/Documentation/hwmon/menf21bmc
+++ b/Documentation/hwmon/menf21bmc.rst
@@ -2,8 +2,11 @@ Kernel driver menf21bmc_hwmon
=============================
Supported chips:
+
* MEN 14F021P00
+
Prefix: 'menf21bmc_hwmon'
+
Adresses scanned: -
Author: Andreas Werner <andreas.werner@men.de>
@@ -34,6 +37,7 @@ Sysfs entries
The following attributes are supported. All attributes are read only
The Limits are read once by the driver.
+=============== ==========================
in0_input +3.3V input voltage
in1_input +5.0V input voltage
in2_input +12.0V input voltage
@@ -48,3 +52,4 @@ in1_label "MON_5V"
in2_label "MON_12V"
in3_label "5V_STANDBY"
in4_label "VBAT"
+=============== ==========================
diff --git a/Documentation/hwmon/mlxreg-fan b/Documentation/hwmon/mlxreg-fan.rst
index fc531c6978d4..c92b8e885f7e 100644
--- a/Documentation/hwmon/mlxreg-fan
+++ b/Documentation/hwmon/mlxreg-fan.rst
@@ -2,32 +2,38 @@ Kernel driver mlxreg-fan
========================
Provides FAN control for the next Mellanox systems:
-QMB700, equipped with 40x200GbE InfiniBand ports;
-MSN3700, equipped with 32x200GbE or 16x400GbE Ethernet ports;
-MSN3410, equipped with 6x400GbE plus 48x50GbE Ethernet ports;
-MSN3800, equipped with 64x1000GbE Ethernet ports;
+
+- QMB700, equipped with 40x200GbE InfiniBand ports;
+- MSN3700, equipped with 32x200GbE or 16x400GbE Ethernet ports;
+- MSN3410, equipped with 6x400GbE plus 48x50GbE Ethernet ports;
+- MSN3800, equipped with 64x1000GbE Ethernet ports;
+
+Author: Vadim Pasternak <vadimp@mellanox.com>
+
These are the Top of the Rack systems, equipped with Mellanox switch
board with Mellanox Quantum or Spectrume-2 devices.
FAN controller is implemented by the programmable device logic.
The default registers offsets set within the programmable device is as
following:
-- pwm1 0xe3
-- fan1 (tacho1) 0xe4
-- fan2 (tacho2) 0xe5
-- fan3 (tacho3) 0xe6
-- fan4 (tacho4) 0xe7
-- fan5 (tacho5) 0xe8
-- fan6 (tacho6) 0xe9
-- fan7 (tacho7) 0xea
-- fan8 (tacho8) 0xeb
-- fan9 (tacho9) 0xec
-- fan10 (tacho10) 0xed
-- fan11 (tacho11) 0xee
-- fan12 (tacho12) 0xef
-This setup can be re-programmed with other registers.
-Author: Vadim Pasternak <vadimp@mellanox.com>
+======================= ====
+pwm1 0xe3
+fan1 (tacho1) 0xe4
+fan2 (tacho2) 0xe5
+fan3 (tacho3) 0xe6
+fan4 (tacho4) 0xe7
+fan5 (tacho5) 0xe8
+fan6 (tacho6) 0xe9
+fan7 (tacho7) 0xea
+fan8 (tacho8) 0xeb
+fan9 (tacho9) 0xec
+fan10 (tacho10) 0xed
+fan11 (tacho11) 0xee
+fan12 (tacho12) 0xef
+======================= ====
+
+This setup can be re-programmed with other registers.
Description
-----------
@@ -48,13 +54,17 @@ thermal's sysfs interfaces.
/sys files in hwmon subsystem
-----------------------------
-fan[1-12]_fault - RO files for tachometers TACH1-TACH12 fault indication
-fan[1-12]_input - RO files for tachometers TACH1-TACH12 input (in RPM)
-pwm1 - RW file for fan[1-12] target duty cycle (0..255)
+================= == ===================================================
+fan[1-12]_fault RO files for tachometers TACH1-TACH12 fault indication
+fan[1-12]_input RO files for tachometers TACH1-TACH12 input (in RPM)
+pwm1 RW file for fan[1-12] target duty cycle (0..255)
+================= == ===================================================
/sys files in thermal subsystem
-------------------------------
-cur_state - RW file for current cooling state of the cooling device
- (0..max_state)
-max_state - RO file for maximum cooling state of the cooling device
+================= == ====================================================
+cur_state RW file for current cooling state of the cooling device
+ (0..max_state)
+max_state RO file for maximum cooling state of the cooling device
+================= == ====================================================
diff --git a/Documentation/hwmon/nct6683 b/Documentation/hwmon/nct6683.rst
index c1301d4300cd..efbf7e9703ec 100644
--- a/Documentation/hwmon/nct6683
+++ b/Documentation/hwmon/nct6683.rst
@@ -2,13 +2,18 @@ Kernel driver nct6683
=====================
Supported chips:
+
* Nuvoton NCT6683D
+
Prefix: 'nct6683'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
Authors:
- Guenter Roeck <linux@roeck-us.net>
+
+ Guenter Roeck <linux@roeck-us.net>
Description
-----------
@@ -50,8 +55,10 @@ Tested Boards and Firmware Versions
The driver has been reported to work with the following boards and
firmware versions.
+=============== ===============================================
Board Firmware version
----------------------------------------------------------------
+=============== ===============================================
Intel DH87RL NCT6683D EC firmware version 1.0 build 04/03/13
Intel DH87MC NCT6683D EC firmware version 1.0 build 04/03/13
Intel DB85FL NCT6683D EC firmware version 1.0 build 04/03/13
+=============== ===============================================
diff --git a/Documentation/hwmon/nct6775 b/Documentation/hwmon/nct6775.rst
index bd59834d310f..1d0315c40952 100644
--- a/Documentation/hwmon/nct6775
+++ b/Documentation/hwmon/nct6775.rst
@@ -1,52 +1,90 @@
-Note
-====
-
-This driver supersedes the NCT6775F and NCT6776F support in the W83627EHF
-driver.
-
Kernel driver NCT6775
=====================
+.. note::
+
+ This driver supersedes the NCT6775F and NCT6776F support in the W83627EHF
+ driver.
+
Supported chips:
+
* Nuvoton NCT6102D/NCT6104D/NCT6106D
+
Prefix: 'nct6106'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from the Nuvoton web site
+
* Nuvoton NCT5572D/NCT6771F/NCT6772F/NCT6775F/W83677HG-I
+
Prefix: 'nct6775'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
+
* Nuvoton NCT5573D/NCT5577D/NCT6776D/NCT6776F
+
Prefix: 'nct6776'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
+
* Nuvoton NCT5532D/NCT6779D
+
Prefix: 'nct6779'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
+
* Nuvoton NCT6791D
+
Prefix: 'nct6791'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
+
* Nuvoton NCT6792D
+
Prefix: 'nct6792'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
+
* Nuvoton NCT6793D
+
Prefix: 'nct6793'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
+
* Nuvoton NCT6795D
+
Prefix: 'nct6795'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
+
* Nuvoton NCT6796D
+
Prefix: 'nct6796'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
+
+
Authors:
- Guenter Roeck <linux@roeck-us.net>
+
+ Guenter Roeck <linux@roeck-us.net>
Description
-----------
@@ -96,10 +134,14 @@ The mode works for fan1-fan5.
sysfs attributes
----------------
-pwm[1-7] - this file stores PWM duty cycle or DC value (fan speed) in range:
+pwm[1-7]
+ - this file stores PWM duty cycle or DC value (fan speed) in range:
+
0 (lowest speed) to 255 (full)
-pwm[1-7]_enable - this file controls mode of fan/temperature control:
+pwm[1-7]_enable
+ - this file controls mode of fan/temperature control:
+
* 0 Fan control disabled (fans set to maximum speed)
* 1 Manual mode, write to pwm[0-5] any value 0-255
* 2 "Thermal Cruise" mode
@@ -107,15 +149,19 @@ pwm[1-7]_enable - this file controls mode of fan/temperature control:
* 4 "Smart Fan III" mode (NCT6775F only)
* 5 "Smart Fan IV" mode
-pwm[1-7]_mode - controls if output is PWM or DC level
- * 0 DC output
- * 1 PWM output
+pwm[1-7]_mode
+ - controls if output is PWM or DC level
+
+ * 0 DC output
+ * 1 PWM output
Common fan control attributes
-----------------------------
-pwm[1-7]_temp_sel Temperature source. Value is temperature sensor index.
+pwm[1-7]_temp_sel
+ Temperature source. Value is temperature sensor index.
For example, select '1' for temp1_input.
+
pwm[1-7]_weight_temp_sel
Secondary temperature source. Value is temperature
sensor index. For example, select '1' for temp1_input.
@@ -126,13 +172,16 @@ following attributes.
pwm[1-7]_weight_duty_step
Duty step size.
+
pwm[1-7]_weight_temp_step
Temperature step size. With each step over
temp_step_base, the value of weight_duty_step is added
to the current pwm value.
+
pwm[1-7]_weight_temp_step_base
Temperature at which secondary temperature control kicks
in.
+
pwm[1-7]_weight_temp_step_tol
Temperature step tolerance.
@@ -141,24 +190,35 @@ Thermal Cruise mode (2)
If the temperature is in the range defined by:
-pwm[1-7]_target_temp Target temperature, unit millidegree Celsius
+pwm[1-7]_target_temp
+ Target temperature, unit millidegree Celsius
(range 0 - 127000)
+
pwm[1-7]_temp_tolerance
Target temperature tolerance, unit millidegree Celsius
-there are no changes to fan speed. Once the temperature leaves the interval, fan
+There are no changes to fan speed. Once the temperature leaves the interval, fan
speed increases (if temperature is higher that desired) or decreases (if
temperature is lower than desired), using the following limits and time
intervals.
-pwm[1-7]_start fan pwm start value (range 1 - 255), to start fan
+pwm[1-7]_start
+ fan pwm start value (range 1 - 255), to start fan
when the temperature is above defined range.
-pwm[1-7]_floor lowest fan pwm (range 0 - 255) if temperature is below
+
+pwm[1-7]_floor
+ lowest fan pwm (range 0 - 255) if temperature is below
the defined range. If set to 0, the fan is expected to
stop if the temperature is below the defined range.
-pwm[1-7]_step_up_time milliseconds before fan speed is increased
-pwm[1-7]_step_down_time milliseconds before fan speed is decreased
-pwm[1-7]_stop_time how many milliseconds must elapse to switch
+
+pwm[1-7]_step_up_time
+ milliseconds before fan speed is increased
+
+pwm[1-7]_step_down_time
+ milliseconds before fan speed is decreased
+
+pwm[1-7]_stop_time
+ how many milliseconds must elapse to switch
corresponding fan off (when the temperature was below
defined range).
@@ -167,7 +227,9 @@ Speed Cruise mode (3)
This modes tries to keep the fan speed constant.
-fan[1-7]_target Target fan speed
+fan[1-7]_target
+ Target fan speed
+
fan[1-7]_tolerance
Target speed tolerance
@@ -188,16 +250,22 @@ critical temperature mode, in which the fans should run at full speed.
pwm[1-7]_auto_point[1-7]_pwm
pwm value to be set if temperature reaches matching
temperature range.
+
pwm[1-7]_auto_point[1-7]_temp
Temperature over which the matching pwm is enabled.
+
pwm[1-7]_temp_tolerance
Temperature tolerance, unit millidegree Celsius
+
pwm[1-7]_crit_temp_tolerance
Temperature tolerance for critical temperature,
unit millidegree Celsius
-pwm[1-7]_step_up_time milliseconds before fan speed is increased
-pwm[1-7]_step_down_time milliseconds before fan speed is decreased
+pwm[1-7]_step_up_time
+ milliseconds before fan speed is increased
+
+pwm[1-7]_step_down_time
+ milliseconds before fan speed is decreased
Usage Notes
-----------
diff --git a/Documentation/hwmon/nct7802 b/Documentation/hwmon/nct7802.rst
index 5438deb6be02..8b7365a7cb32 100644
--- a/Documentation/hwmon/nct7802
+++ b/Documentation/hwmon/nct7802.rst
@@ -2,13 +2,18 @@ Kernel driver nct7802
=====================
Supported chips:
+
* Nuvoton NCT7802Y
+
Prefix: 'nct7802'
+
Addresses scanned: I2C 0x28..0x2f
+
Datasheet: Available from Nuvoton web site
Authors:
- Guenter Roeck <linux@roeck-us.net>
+
+ Guenter Roeck <linux@roeck-us.net>
Description
-----------
@@ -25,7 +30,9 @@ Tested Boards and BIOS Versions
The driver has been reported to work with the following boards and
BIOS versions.
+======================= ===============================================
Board BIOS version
----------------------------------------------------------------
+======================= ===============================================
Kontron COMe-bSC2 CHR2E934.001.GGO
Kontron COMe-bIP2 CCR2E212
+======================= ===============================================
diff --git a/Documentation/hwmon/nct7904 b/Documentation/hwmon/nct7904.rst
index 57fffe33ebfc..5b2f111582ff 100644
--- a/Documentation/hwmon/nct7904
+++ b/Documentation/hwmon/nct7904.rst
@@ -1,11 +1,16 @@
Kernel driver nct7904
-====================
+=====================
Supported chip:
+
* Nuvoton NCT7904D
+
Prefix: nct7904
+
Addresses: I2C 0x2d, 0x2e
+
Datasheet: Publicly available at Nuvoton website
+
http://www.nuvoton.com/
Author: Vadim V. Vlasov <vvlasov@dev.rtsoft.ru>
@@ -25,6 +30,7 @@ Sysfs entries
Currently, the driver supports only the following features:
+======================= =======================================================
in[1-20]_input Input voltage measurements (mV)
fan[1-12]_input Fan tachometer measurements (rpm)
@@ -40,6 +46,7 @@ pwm[1-4]_enable R/W, 1/2 for manual or SmartFan mode
previously configured by BIOS (or configuration EEPROM)
pwm[1-4] R/O in SmartFan mode, R/W in manual control mode
+======================= =======================================================
The driver checks sensor control registers and does not export the sensors
that are not enabled. Anyway, a sensor that is enabled may actually be not
diff --git a/Documentation/hwmon/npcm750-pwm-fan b/Documentation/hwmon/npcm750-pwm-fan.rst
index 6156ef7398e6..c67af08b6773 100644
--- a/Documentation/hwmon/npcm750-pwm-fan
+++ b/Documentation/hwmon/npcm750-pwm-fan.rst
@@ -2,9 +2,11 @@ Kernel driver npcm750-pwm-fan
=============================
Supported chips:
+
NUVOTON NPCM750/730/715/705
Authors:
+
<tomer.maimon@nuvoton.com>
Description:
@@ -15,8 +17,10 @@ controller supports up to 16 tachometer inputs.
The driver provides the following sensor accesses in sysfs:
+=============== ======= =====================================================
fanX_input ro provide current fan rotation value in RPM as reported
by the fan to the device.
pwmX rw get or set PWM fan control value. This is an integer
value between 0(off) and 255(full speed).
+=============== ======= =====================================================
diff --git a/Documentation/hwmon/nsa320 b/Documentation/hwmon/nsa320.rst
index fdbd6947799b..4fe75fd2f937 100644
--- a/Documentation/hwmon/nsa320
+++ b/Documentation/hwmon/nsa320.rst
@@ -2,14 +2,23 @@ Kernel driver nsa320_hwmon
==========================
Supported chips:
+
* Holtek HT46R065 microcontroller with onboard firmware that configures
+
it to act as a hardware monitor.
+
Prefix: 'nsa320'
+
Addresses scanned: none
+
Datasheet: Not available, driver was reverse engineered based upon the
+
Zyxel kernel source
+
+
Author:
+
Adam Baker <linux@baker-net.org.uk>
Description
@@ -31,8 +40,10 @@ tenths of a degree.
sysfs-Interface
---------------
-temp1_input - temperature input
-fan1_input - fan speed
+============= =================
+temp1_input temperature input
+fan1_input fan speed
+============= =================
Notes
-----
diff --git a/Documentation/hwmon/ntc_thermistor b/Documentation/hwmon/ntc_thermistor.rst
index 8b9ff23edc32..d0e7f91726b9 100644
--- a/Documentation/hwmon/ntc_thermistor
+++ b/Documentation/hwmon/ntc_thermistor.rst
@@ -1,22 +1,29 @@
Kernel driver ntc_thermistor
-=================
+============================
Supported thermistors from Murata:
+
* Murata NTC Thermistors NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473,
NCP15WL333, NCP03WF104, NCP15XH103
+
Prefixes: 'ncp15wb473', 'ncp18wb473', 'ncp21wb473', 'ncp03wb473',
'ncp15wl333', 'ncp03wf104', 'ncp15xh103'
+
Datasheet: Publicly available at Murata
Supported thermistors from EPCOS:
+
* EPCOS NTC Thermistors B57330V2103
+
Prefixes: b57330v2103
+
Datasheet: Publicly available at EPCOS
Other NTC thermistors can be supported simply by adding compensation
tables; e.g., NCP15WL333 support is added by the table ncpXXwl333.
Authors:
+
MyungJoo Ham <myungjoo.ham@samsung.com>
Description
@@ -29,57 +36,60 @@ compensation table to get the temperature input.
The NTC driver provides lookup tables with a linear approximation function
and four circuit models with an option not to use any of the four models.
+Using the following convention::
+
+ $ resistor
+ [TH] the thermistor
+
The four circuit models provided are:
- $: resister, [TH]: the thermistor
-
- 1. connect = NTC_CONNECTED_POSITIVE, pullup_ohm > 0
-
- [pullup_uV]
- | |
- [TH] $ (pullup_ohm)
- | |
- +----+-----------------------[read_uV]
- |
- $ (pulldown_ohm)
- |
- --- (ground)
-
- 2. connect = NTC_CONNECTED_POSITIVE, pullup_ohm = 0 (not-connected)
-
- [pullup_uV]
- |
- [TH]
- |
- +----------------------------[read_uV]
- |
- $ (pulldown_ohm)
- |
- --- (ground)
-
- 3. connect = NTC_CONNECTED_GROUND, pulldown_ohm > 0
-
- [pullup_uV]
- |
- $ (pullup_ohm)
- |
- +----+-----------------------[read_uV]
- | |
- [TH] $ (pulldown_ohm)
- | |
- -------- (ground)
-
- 4. connect = NTC_CONNECTED_GROUND, pulldown_ohm = 0 (not-connected)
-
- [pullup_uV]
- |
- $ (pullup_ohm)
- |
- +----------------------------[read_uV]
- |
- [TH]
- |
- --- (ground)
+1. connect = NTC_CONNECTED_POSITIVE, pullup_ohm > 0::
+
+ [pullup_uV]
+ | |
+ [TH] $ (pullup_ohm)
+ | |
+ +----+-----------------------[read_uV]
+ |
+ $ (pulldown_ohm)
+ |
+ -+- (ground)
+
+2. connect = NTC_CONNECTED_POSITIVE, pullup_ohm = 0 (not-connected)::
+
+ [pullup_uV]
+ |
+ [TH]
+ |
+ +----------------------------[read_uV]
+ |
+ $ (pulldown_ohm)
+ |
+ -+- (ground)
+
+3. connect = NTC_CONNECTED_GROUND, pulldown_ohm > 0::
+
+ [pullup_uV]
+ |
+ $ (pullup_ohm)
+ |
+ +----+-----------------------[read_uV]
+ | |
+ [TH] $ (pulldown_ohm)
+ | |
+ -+----+- (ground)
+
+4. connect = NTC_CONNECTED_GROUND, pulldown_ohm = 0 (not-connected)::
+
+ [pullup_uV]
+ |
+ $ (pullup_ohm)
+ |
+ +----------------------------[read_uV]
+ |
+ [TH]
+ |
+ -+- (ground)
When one of the four circuit models is used, read_uV, pullup_uV, pullup_ohm,
pulldown_ohm, and connect should be provided. When none of the four models
@@ -88,13 +98,14 @@ provide read_ohm and _not_ provide the others.
Sysfs Interface
---------------
-name the mandatory global attribute, the thermistor name.
-temp1_type always 4 (thermistor)
- RO
+=============== == =============================================================
+name the mandatory global attribute, the thermistor name.
+=============== == =============================================================
+temp1_type RO always 4 (thermistor)
-temp1_input measure the temperature and provide the measured value.
- (reading this file initiates the reading procedure.)
- RO
+temp1_input RO measure the temperature and provide the measured value.
+ (reading this file initiates the reading procedure.)
+=============== == =============================================================
Note that each NTC thermistor has only _one_ thermistor; thus, only temp1 exists.
diff --git a/Documentation/hwmon/occ b/Documentation/hwmon/occ.rst
index e787596e03fe..bf41c162d70e 100644
--- a/Documentation/hwmon/occ
+++ b/Documentation/hwmon/occ.rst
@@ -2,6 +2,7 @@ Kernel driver occ-hwmon
=======================
Supported chips:
+
* POWER8
* POWER9
@@ -37,53 +38,87 @@ Some entries are only present with certain OCC sensor versions or only on
certain OCCs in the system. The version number is not exported to the user
but can be inferred.
-temp[1-n]_label OCC sensor ID.
+temp[1-n]_label
+ OCC sensor ID.
+
[with temperature sensor version 1]
- temp[1-n]_input Measured temperature of the component in millidegrees
+
+ temp[1-n]_input
+ Measured temperature of the component in millidegrees
Celsius.
+
[with temperature sensor version >= 2]
- temp[1-n]_type The FRU (Field Replaceable Unit) type
+
+ temp[1-n]_type
+ The FRU (Field Replaceable Unit) type
(represented by an integer) for the component
that this sensor measures.
- temp[1-n]_fault Temperature sensor fault boolean; 1 to indicate
+ temp[1-n]_fault
+ Temperature sensor fault boolean; 1 to indicate
that a fault is present or 0 to indicate that
no fault is present.
+
[with type == 3 (FRU type is VRM)]
- temp[1-n]_alarm VRM temperature alarm boolean; 1 to indicate
+
+ temp[1-n]_alarm
+ VRM temperature alarm boolean; 1 to indicate
alarm, 0 to indicate no alarm
+
[else]
- temp[1-n]_input Measured temperature of the component in
- millidegrees Celsius.
-freq[1-n]_label OCC sensor ID.
-freq[1-n]_input Measured frequency of the component in MHz.
+ temp[1-n]_input
+ Measured temperature of the component in
+ millidegrees Celsius.
-power[1-n]_input Latest measured power reading of the component in
+freq[1-n]_label
+ OCC sensor ID.
+freq[1-n]_input
+ Measured frequency of the component in MHz.
+power[1-n]_input
+ Latest measured power reading of the component in
microwatts.
-power[1-n]_average Average power of the component in microwatts.
-power[1-n]_average_interval The amount of time over which the power average
+power[1-n]_average
+ Average power of the component in microwatts.
+power[1-n]_average_interval
+ The amount of time over which the power average
was taken in microseconds.
+
[with power sensor version < 2]
- power[1-n]_label OCC sensor ID.
+
+ power[1-n]_label
+ OCC sensor ID.
+
[with power sensor version >= 2]
- power[1-n]_label OCC sensor ID + function ID + channel in the form
+
+ power[1-n]_label
+ OCC sensor ID + function ID + channel in the form
of a string, delimited by underscores, i.e. "0_15_1".
Both the function ID and channel are integers that
further identify the power sensor.
+
[with power sensor version 0xa0]
- power[1-n]_label OCC sensor ID + sensor type in the form of a string,
+
+ power[1-n]_label
+ OCC sensor ID + sensor type in the form of a string,
delimited by an underscore, i.e. "0_system". Sensor
type will be one of "system", "proc", "vdd" or "vdn".
For this sensor version, OCC sensor ID will be the same
for all power sensors.
+
[present only on "master" OCC; represents the whole system power; only one of
- this type of power sensor will be present]
- power[1-n]_label "system"
- power[1-n]_input Latest system output power in microwatts.
- power[1-n]_cap Current system power cap in microwatts.
- power[1-n]_cap_not_redundant System power cap in microwatts when
- there is not redundant power.
- power[1-n]_cap_max Maximum power cap that the OCC can enforce in
+this type of power sensor will be present]
+
+ power[1-n]_label
+ "system"
+ power[1-n]_input
+ Latest system output power in microwatts.
+ power[1-n]_cap
+ Current system power cap in microwatts.
+ power[1-n]_cap_not_redundant
+ System power cap in microwatts when
+ there is not redundant power.
+ power[1-n]_cap_max
+ Maximum power cap that the OCC can enforce in
microwatts.
power[1-n]_cap_min Minimum power cap that the OCC can enforce in
microwatts.
@@ -94,8 +129,11 @@ power[1-n]_average_interval The amount of time over which the power average
ignored, i.e. requesting a power cap of
500900000 microwatts will result in a power cap
request of 500 watts.
+
[with caps sensor version > 1]
- power[1-n]_cap_user_source Indicates how the user power cap was
+
+ power[1-n]_cap_user_source
+ Indicates how the user power cap was
set. This is an integer that maps to
system or firmware components that can
set the user power cap.
@@ -104,9 +142,12 @@ The following "extn" sensors are exported as a way for the OCC to provide data
that doesn't fit anywhere else. The meaning of these sensors is entirely
dependent on their data, and cannot be statically defined.
-extn[1-n]_label ASCII ID or OCC sensor ID.
-extn[1-n]_flags This is one byte hexadecimal value. Bit 7 indicates the
+extn[1-n]_label
+ ASCII ID or OCC sensor ID.
+extn[1-n]_flags
+ This is one byte hexadecimal value. Bit 7 indicates the
type of the label attribute; 1 for sensor ID, 0 for
ASCII ID. Other bits are reserved.
-extn[1-n]_input 6 bytes of hexadecimal data, with a meaning defined by
+extn[1-n]_input
+ 6 bytes of hexadecimal data, with a meaning defined by
the sensor ID.
diff --git a/Documentation/hwmon/pc87360 b/Documentation/hwmon/pc87360.rst
index d5f5cf16ce59..4bad07bce54b 100644
--- a/Documentation/hwmon/pc87360
+++ b/Documentation/hwmon/pc87360.rst
@@ -2,14 +2,19 @@ Kernel driver pc87360
=====================
Supported chips:
+
* National Semiconductor PC87360, PC87363, PC87364, PC87365 and PC87366
+
Prefixes: 'pc87360', 'pc87363', 'pc87364', 'pc87365', 'pc87366'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheets: No longer available
Authors: Jean Delvare <jdelvare@suse.de>
Thanks to Sandeep Mehta, Tonko de Rooy and Daniel Ceregatti for testing.
+
Thanks to Rudolf Marek for helping me investigate conversion issues.
@@ -17,11 +22,13 @@ Module Parameters
-----------------
* init int
- Chip initialization level:
- 0: None
- *1: Forcibly enable internal voltage and temperature channels, except in9
- 2: Forcibly enable all voltage and temperature channels, except in9
- 3: Forcibly enable all voltage and temperature channels, including in9
+ Chip initialization level:
+
+ - 0: None
+ - **1**: Forcibly enable internal voltage and temperature channels,
+ except in9
+ - 2: Forcibly enable all voltage and temperature channels, except in9
+ - 3: Forcibly enable all voltage and temperature channels, including in9
Note that this parameter has no effect for the PC87360, PC87363 and PC87364
chips.
@@ -43,13 +50,15 @@ hardware monitoring chipsets, not only controlling and monitoring three fans,
but also monitoring eleven voltage inputs and two (PC87365) or up to four
(PC87366) temperatures.
+ =========== ======= ======= ======= ======= =====
Chip #vin #fan #pwm #temp devid
-
+ =========== ======= ======= ======= ======= =====
PC87360 - 2 2 - 0xE1
PC87363 - 2 2 - 0xE8
PC87364 - 3 3 - 0xE4
PC87365 11 3 3 2 0xE5
PC87366 11 3 3 3-4 0xE9
+ =========== ======= ======= ======= ======= =====
The driver assumes that no more than one chip is present, and one of the
standard Super I/O addresses is used (0x2E/0x2F or 0x4E/0x4F)
@@ -68,18 +77,23 @@ have to care no more.
For reference, here are a few values about clock dividers:
- slowest accuracy highest
- measurable around 3000 accurate
+ =========== =============== =============== ===========
+ slowest accuracy highest
+ measurable around 3000 accurate
divider speed (RPM) RPM (RPM) speed (RPM)
- 1 1882 18 6928
- 2 941 37 4898
- 4 470 74 3464
- 8 235 150 2449
+ =========== =============== =============== ===========
+ 1 1882 18 6928
+ 2 941 37 4898
+ 4 470 74 3464
+ 8 235 150 2449
+ =========== =============== =============== ===========
For the curious, here is how the values above were computed:
+
* slowest measurable speed: clock/(255*divider)
* accuracy around 3000 RPM: 3000^2/clock
* highest accurate speed: sqrt(clock*100)
+
The clock speed for the PC87360 family is 480 kHz. I arbitrarily chose 100
RPM as the lowest acceptable accuracy.
diff --git a/Documentation/hwmon/pc87427 b/Documentation/hwmon/pc87427.rst
index c313eb66e08a..22d8f62d851f 100644
--- a/Documentation/hwmon/pc87427
+++ b/Documentation/hwmon/pc87427.rst
@@ -2,9 +2,13 @@ Kernel driver pc87427
=====================
Supported chips:
+
* National Semiconductor PC87427
+
Prefix: 'pc87427'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: No longer available
Author: Jean Delvare <jdelvare@suse.de>
diff --git a/Documentation/hwmon/pcf8591 b/Documentation/hwmon/pcf8591.rst
index 447c0702c0ec..e98bd542a441 100644
--- a/Documentation/hwmon/pcf8591
+++ b/Documentation/hwmon/pcf8591.rst
@@ -2,16 +2,21 @@ Kernel driver pcf8591
=====================
Supported chips:
+
* Philips/NXP PCF8591
+
Prefix: 'pcf8591'
+
Addresses scanned: none
+
Datasheet: Publicly available at the NXP website
- http://www.nxp.com/pip/PCF8591_6.html
+
+ http://www.nxp.com/pip/PCF8591_6.html
Authors:
- Aurelien Jarno <aurelien@aurel32.net>
- valuable contributions by Jan M. Sendler <sendler@sendler.de>,
- Jean Delvare <jdelvare@suse.de>
+ - Aurelien Jarno <aurelien@aurel32.net>
+ - valuable contributions by Jan M. Sendler <sendler@sendler.de>,
+ - Jean Delvare <jdelvare@suse.de>
Description
@@ -22,24 +27,25 @@ analog output) for the I2C bus produced by Philips Semiconductors (now NXP).
It is designed to provide a byte I2C interface to up to 4 separate devices.
The PCF8591 has 4 analog inputs programmable as single-ended or
-differential inputs :
+differential inputs:
+
- mode 0 : four single ended inputs
- Pins AIN0 to AIN3 are single ended inputs for channels 0 to 3
+ Pins AIN0 to AIN3 are single ended inputs for channels 0 to 3
- mode 1 : three differential inputs
- Pins AIN3 is the common negative differential input
- Pins AIN0 to AIN2 are positive differential inputs for channels 0 to 2
+ Pins AIN3 is the common negative differential input
+ Pins AIN0 to AIN2 are positive differential inputs for channels 0 to 2
- mode 2 : single ended and differential mixed
- Pins AIN0 and AIN1 are single ended inputs for channels 0 and 1
- Pins AIN2 is the positive differential input for channel 3
- Pins AIN3 is the negative differential input for channel 3
+ Pins AIN0 and AIN1 are single ended inputs for channels 0 and 1
+ Pins AIN2 is the positive differential input for channel 3
+ Pins AIN3 is the negative differential input for channel 3
- mode 3 : two differential inputs
- Pins AIN0 is the positive differential input for channel 0
- Pins AIN1 is the negative differential input for channel 0
- Pins AIN2 is the positive differential input for channel 1
- Pins AIN3 is the negative differential input for channel 1
+ Pins AIN0 is the positive differential input for channel 0
+ Pins AIN1 is the negative differential input for channel 0
+ Pins AIN2 is the positive differential input for channel 1
+ Pins AIN3 is the negative differential input for channel 1
See the datasheet for details.
@@ -49,10 +55,11 @@ Module parameters
* input_mode int
Analog input mode:
- 0 = four single ended inputs
- 1 = three differential inputs
- 2 = single ended and differential mixed
- 3 = two differential inputs
+
+ - 0 = four single ended inputs
+ - 1 = three differential inputs
+ - 2 = single ended and differential mixed
+ - 3 = two differential inputs
Accessing PCF8591 via /sys interface
@@ -67,11 +74,12 @@ for details.
Directories are being created for each instantiated PCF8591:
/sys/bus/i2c/devices/<0>-<1>/
-where <0> is the bus the chip is connected to (e. g. i2c-0)
-and <1> the chip address ([48..4f])
+ where <0> is the bus the chip is connected to (e. g. i2c-0)
+ and <1> the chip address ([48..4f])
Inside these directories, there are such files:
-in0_input, in1_input, in2_input, in3_input, out0_enable, out0_output, name
+
+ in0_input, in1_input, in2_input, in3_input, out0_enable, out0_output, name
Name contains chip name.
diff --git a/Documentation/hwmon/pmbus-core b/Documentation/hwmon/pmbus-core.rst
index 8ed10e9ddfb5..92515c446fe3 100644
--- a/Documentation/hwmon/pmbus-core
+++ b/Documentation/hwmon/pmbus-core.rst
@@ -1,3 +1,4 @@
+==================================
PMBus core driver and internal API
==================================
@@ -120,24 +121,24 @@ Specifically, it provides the following information.
non-standard PMBus commands to standard commands, or to augment standard
command return values with device specific information.
- API functions
- -------------
+API functions
+=============
- Functions provided by chip driver
- ---------------------------------
+Functions provided by chip driver
+---------------------------------
- All functions return the command return value (read) or zero (write) if
- successful. A return value of -ENODATA indicates that there is no manufacturer
- specific command, but that a standard PMBus command may exist. Any other
- negative return value indicates that the commands does not exist for this
- chip, and that no attempt should be made to read or write the standard
- command.
+All functions return the command return value (read) or zero (write) if
+successful. A return value of -ENODATA indicates that there is no manufacturer
+specific command, but that a standard PMBus command may exist. Any other
+negative return value indicates that the commands does not exist for this
+chip, and that no attempt should be made to read or write the standard
+command.
- As mentioned above, an exception to this rule applies to virtual commands,
- which _must_ be handled in driver specific code. See "Virtual PMBus Commands"
- above for more details.
+As mentioned above, an exception to this rule applies to virtual commands,
+which *must* be handled in driver specific code. See "Virtual PMBus Commands"
+above for more details.
- Command execution in the core PMBus driver code is as follows.
+Command execution in the core PMBus driver code is as follows::
if (chip_access_function) {
status = chip_access_function();
@@ -148,128 +149,160 @@ Specifically, it provides the following information.
return -EINVAL;
return generic_access();
- Chip drivers may provide pointers to the following functions in struct
- pmbus_driver_info. All functions are optional.
+Chip drivers may provide pointers to the following functions in struct
+pmbus_driver_info. All functions are optional.
+
+::
int (*read_byte_data)(struct i2c_client *client, int page, int reg);
- Read byte from page <page>, register <reg>.
- <page> may be -1, which means "current page".
+Read byte from page <page>, register <reg>.
+<page> may be -1, which means "current page".
+
+
+::
int (*read_word_data)(struct i2c_client *client, int page, int reg);
- Read word from page <page>, register <reg>.
+Read word from page <page>, register <reg>.
+
+::
int (*write_word_data)(struct i2c_client *client, int page, int reg,
- u16 word);
+ u16 word);
- Write word to page <page>, register <reg>.
+Write word to page <page>, register <reg>.
+
+::
int (*write_byte)(struct i2c_client *client, int page, u8 value);
- Write byte to page <page>, register <reg>.
- <page> may be -1, which means "current page".
+Write byte to page <page>, register <reg>.
+<page> may be -1, which means "current page".
+
+::
int (*identify)(struct i2c_client *client, struct pmbus_driver_info *info);
- Determine supported PMBus functionality. This function is only necessary
- if a chip driver supports multiple chips, and the chip functionality is not
- pre-determined. It is currently only used by the generic pmbus driver
- (pmbus.c).
+Determine supported PMBus functionality. This function is only necessary
+if a chip driver supports multiple chips, and the chip functionality is not
+pre-determined. It is currently only used by the generic pmbus driver
+(pmbus.c).
+
+Functions exported by core driver
+---------------------------------
- Functions exported by core driver
- ---------------------------------
+Chip drivers are expected to use the following functions to read or write
+PMBus registers. Chip drivers may also use direct I2C commands. If direct I2C
+commands are used, the chip driver code must not directly modify the current
+page, since the selected page is cached in the core driver and the core driver
+will assume that it is selected. Using pmbus_set_page() to select a new page
+is mandatory.
- Chip drivers are expected to use the following functions to read or write
- PMBus registers. Chip drivers may also use direct I2C commands. If direct I2C
- commands are used, the chip driver code must not directly modify the current
- page, since the selected page is cached in the core driver and the core driver
- will assume that it is selected. Using pmbus_set_page() to select a new page
- is mandatory.
+::
int pmbus_set_page(struct i2c_client *client, u8 page);
- Set PMBus page register to <page> for subsequent commands.
+Set PMBus page register to <page> for subsequent commands.
+
+::
int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg);
- Read word data from <page>, <reg>. Similar to i2c_smbus_read_word_data(), but
- selects page first.
+Read word data from <page>, <reg>. Similar to i2c_smbus_read_word_data(), but
+selects page first.
+
+::
int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg,
u16 word);
- Write word data to <page>, <reg>. Similar to i2c_smbus_write_word_data(), but
- selects page first.
+Write word data to <page>, <reg>. Similar to i2c_smbus_write_word_data(), but
+selects page first.
+
+::
int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg);
- Read byte data from <page>, <reg>. Similar to i2c_smbus_read_byte_data(), but
- selects page first. <page> may be -1, which means "current page".
+Read byte data from <page>, <reg>. Similar to i2c_smbus_read_byte_data(), but
+selects page first. <page> may be -1, which means "current page".
+
+::
int pmbus_write_byte(struct i2c_client *client, int page, u8 value);
- Write byte data to <page>, <reg>. Similar to i2c_smbus_write_byte(), but
- selects page first. <page> may be -1, which means "current page".
+Write byte data to <page>, <reg>. Similar to i2c_smbus_write_byte(), but
+selects page first. <page> may be -1, which means "current page".
+
+::
void pmbus_clear_faults(struct i2c_client *client);
- Execute PMBus "Clear Fault" command on all chip pages.
- This function calls the device specific write_byte function if defined.
- Therefore, it must _not_ be called from that function.
+Execute PMBus "Clear Fault" command on all chip pages.
+This function calls the device specific write_byte function if defined.
+Therefore, it must _not_ be called from that function.
+
+::
bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg);
- Check if byte register exists. Return true if the register exists, false
- otherwise.
- This function calls the device specific write_byte function if defined to
- obtain the chip status. Therefore, it must _not_ be called from that function.
+Check if byte register exists. Return true if the register exists, false
+otherwise.
+This function calls the device specific write_byte function if defined to
+obtain the chip status. Therefore, it must _not_ be called from that function.
+
+::
bool pmbus_check_word_register(struct i2c_client *client, int page, int reg);
- Check if word register exists. Return true if the register exists, false
- otherwise.
- This function calls the device specific write_byte function if defined to
- obtain the chip status. Therefore, it must _not_ be called from that function.
+Check if word register exists. Return true if the register exists, false
+otherwise.
+This function calls the device specific write_byte function if defined to
+obtain the chip status. Therefore, it must _not_ be called from that function.
+
+::
int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
- struct pmbus_driver_info *info);
+ struct pmbus_driver_info *info);
+
+Execute probe function. Similar to standard probe function for other drivers,
+with the pointer to struct pmbus_driver_info as additional argument. Calls
+identify function if supported. Must only be called from device probe
+function.
- Execute probe function. Similar to standard probe function for other drivers,
- with the pointer to struct pmbus_driver_info as additional argument. Calls
- identify function if supported. Must only be called from device probe
- function.
+::
void pmbus_do_remove(struct i2c_client *client);
- Execute driver remove function. Similar to standard driver remove function.
+Execute driver remove function. Similar to standard driver remove function.
+
+::
const struct pmbus_driver_info
*pmbus_get_driver_info(struct i2c_client *client);
- Return pointer to struct pmbus_driver_info as passed to pmbus_do_probe().
+Return pointer to struct pmbus_driver_info as passed to pmbus_do_probe().
PMBus driver platform data
==========================
PMBus platform data is defined in include/linux/pmbus.h. Platform data
-currently only provides a flag field with a single bit used.
+currently only provides a flag field with a single bit used::
-#define PMBUS_SKIP_STATUS_CHECK (1 << 0)
+ #define PMBUS_SKIP_STATUS_CHECK (1 << 0)
-struct pmbus_platform_data {
- u32 flags; /* Device specific flags */
-};
+ struct pmbus_platform_data {
+ u32 flags; /* Device specific flags */
+ };
Flags
-----
PMBUS_SKIP_STATUS_CHECK
-
-During register detection, skip checking the status register for
-communication or command errors.
+ During register detection, skip checking the status register for
+ communication or command errors.
Some PMBus chips respond with valid data when trying to read an unsupported
register. For such chips, checking the status register is mandatory when
diff --git a/Documentation/hwmon/pmbus b/Documentation/hwmon/pmbus.rst
index dfd9c65996c0..abfb9dd4857d 100644
--- a/Documentation/hwmon/pmbus
+++ b/Documentation/hwmon/pmbus.rst
@@ -1,42 +1,77 @@
Kernel driver pmbus
-====================
+===================
Supported chips:
+
* Ericsson BMR453, BMR454
+
Prefixes: 'bmr453', 'bmr454'
+
Addresses scanned: -
+
Datasheet:
+
http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146395
+
* ON Semiconductor ADP4000, NCP4200, NCP4208
+
Prefixes: 'adp4000', 'ncp4200', 'ncp4208'
+
Addresses scanned: -
+
Datasheets:
+
http://www.onsemi.com/pub_link/Collateral/ADP4000-D.PDF
+
http://www.onsemi.com/pub_link/Collateral/NCP4200-D.PDF
+
http://www.onsemi.com/pub_link/Collateral/JUNE%202009-%20REV.%200.PDF
+
* Lineage Power
+
Prefixes: 'mdt040', 'pdt003', 'pdt006', 'pdt012', 'udt020'
+
Addresses scanned: -
+
Datasheets:
+
http://www.lineagepower.com/oem/pdf/PDT003A0X.pdf
+
http://www.lineagepower.com/oem/pdf/PDT006A0X.pdf
+
http://www.lineagepower.com/oem/pdf/PDT012A0X.pdf
+
http://www.lineagepower.com/oem/pdf/UDT020A0X.pdf
+
http://www.lineagepower.com/oem/pdf/MDT040A0X.pdf
+
* Texas Instruments TPS40400, TPS544B20, TPS544B25, TPS544C20, TPS544C25
+
Prefixes: 'tps40400', 'tps544b20', 'tps544b25', 'tps544c20', 'tps544c25'
+
Addresses scanned: -
+
Datasheets:
+
http://www.ti.com/lit/gpn/tps40400
+
http://www.ti.com/lit/gpn/tps544b20
+
http://www.ti.com/lit/gpn/tps544b25
+
http://www.ti.com/lit/gpn/tps544c20
+
http://www.ti.com/lit/gpn/tps544c25
+
* Generic PMBus devices
+
Prefix: 'pmbus'
+
Addresses scanned: -
+
Datasheet: n.a.
+
Author: Guenter Roeck <linux@roeck-us.net>
@@ -62,9 +97,10 @@ supported by all chips), and since there is no well defined address range for
PMBus devices. You will have to instantiate the devices explicitly.
Example: the following will load the driver for an LTC2978 at address 0x60
-on I2C bus #1:
-$ modprobe pmbus
-$ echo ltc2978 0x60 > /sys/bus/i2c/devices/i2c-1/new_device
+on I2C bus #1::
+
+ $ modprobe pmbus
+ $ echo ltc2978 0x60 > /sys/bus/i2c/devices/i2c-1/new_device
Platform data support
@@ -72,9 +108,9 @@ Platform data support
Support for additional PMBus chips can be added by defining chip parameters in
a new chip specific driver file. For example, (untested) code to add support for
-Emerson DS1200 power modules might look as follows.
+Emerson DS1200 power modules might look as follows::
-static struct pmbus_driver_info ds1200_info = {
+ static struct pmbus_driver_info ds1200_info = {
.pages = 1,
/* Note: All other sensors are in linear mode */
.direct[PSC_VOLTAGE_OUT] = true,
@@ -95,45 +131,45 @@ static struct pmbus_driver_info ds1200_info = {
| PMBUS_HAVE_PIN | PMBUS_HAVE_POUT
| PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP
| PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12,
-};
+ };
-static int ds1200_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
+ static int ds1200_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+ {
return pmbus_do_probe(client, id, &ds1200_info);
-}
+ }
-static int ds1200_remove(struct i2c_client *client)
-{
+ static int ds1200_remove(struct i2c_client *client)
+ {
return pmbus_do_remove(client);
-}
+ }
-static const struct i2c_device_id ds1200_id[] = {
+ static const struct i2c_device_id ds1200_id[] = {
{"ds1200", 0},
{}
-};
+ };
-MODULE_DEVICE_TABLE(i2c, ds1200_id);
+ MODULE_DEVICE_TABLE(i2c, ds1200_id);
-/* This is the driver that will be inserted */
-static struct i2c_driver ds1200_driver = {
+ /* This is the driver that will be inserted */
+ static struct i2c_driver ds1200_driver = {
.driver = {
.name = "ds1200",
},
.probe = ds1200_probe,
.remove = ds1200_remove,
.id_table = ds1200_id,
-};
+ };
-static int __init ds1200_init(void)
-{
+ static int __init ds1200_init(void)
+ {
return i2c_add_driver(&ds1200_driver);
-}
+ }
-static void __exit ds1200_exit(void)
-{
+ static void __exit ds1200_exit(void)
+ {
i2c_del_driver(&ds1200_driver);
-}
+ }
Sysfs entries
@@ -148,6 +184,7 @@ a given sysfs entry.
The following attributes are supported. Limits are read-write; all other
attributes are read-only.
+======================= ========================================================
inX_input Measured voltage. From READ_VIN or READ_VOUT register.
inX_min Minimum Voltage.
From VIN_UV_WARN_LIMIT or VOUT_UV_WARN_LIMIT register.
@@ -214,3 +251,4 @@ tempX_lcrit_alarm Chip temperature critical low alarm. Set by comparing
tempX_crit_alarm Chip temperature critical high alarm. Set by comparing
READ_TEMPERATURE_X with OT_FAULT_LIMIT if
TEMP_OT_FAULT status is set.
+======================= ========================================================
diff --git a/Documentation/hwmon/powr1220 b/Documentation/hwmon/powr1220.rst
index 21e44f71ae6e..a7fc258da0a8 100644
--- a/Documentation/hwmon/powr1220
+++ b/Documentation/hwmon/powr1220.rst
@@ -1,12 +1,17 @@
Kernel driver powr1220
-==================
+======================
Supported chips:
+
* Lattice POWR1220AT8
+
Prefix: 'powr1220'
+
Addresses scanned: none
+
Datasheet: Publicly available at the Lattice website
- http://www.latticesemi.com/
+
+ http://www.latticesemi.com/
Author: Scott Kanowitz <scott.kanowitz@gmail.com>
@@ -26,7 +31,9 @@ value over the low measurement range maximum of 2 V.
The input naming convention is as follows:
+============== ========
driver name pin name
+============== ========
in0 VMON1
in1 VMON2
in2 VMON3
@@ -41,5 +48,6 @@ in10 VMON11
in11 VMON12
in12 VCCA
in13 VCCINP
+============== ========
The ADC readings are updated on request with a minimum period of 1s.
diff --git a/Documentation/hwmon/pwm-fan b/Documentation/hwmon/pwm-fan.rst
index 18529d2e3bcf..82fe96742fee 100644
--- a/Documentation/hwmon/pwm-fan
+++ b/Documentation/hwmon/pwm-fan.rst
@@ -15,3 +15,6 @@ The driver implements a simple interface for driving a fan connected to
a PWM output. It uses the generic PWM interface, thus it can be used with
a range of SoCs. The driver exposes the fan to the user space through
the hwmon's sysfs interface.
+
+The fan rotation speed returned via the optional 'fan1_input' is extrapolated
+from the sampled interrupts from the tachometer signal within 1 second.
diff --git a/Documentation/hwmon/raspberrypi-hwmon b/Documentation/hwmon/raspberrypi-hwmon.rst
index 3c92e2cb52d6..8038ade36490 100644
--- a/Documentation/hwmon/raspberrypi-hwmon
+++ b/Documentation/hwmon/raspberrypi-hwmon.rst
@@ -2,6 +2,7 @@ Kernel driver raspberrypi-hwmon
===============================
Supported boards:
+
* Raspberry Pi A+ (via GPIO on SoC)
* Raspberry Pi B+ (via GPIO on SoC)
* Raspberry Pi 2 B (via GPIO on SoC)
@@ -19,4 +20,6 @@ undervoltage conditions.
Sysfs entries
-------------
+======================= ==================
in0_lcrit_alarm Undervoltage alarm
+======================= ==================
diff --git a/Documentation/hwmon/sch5627 b/Documentation/hwmon/sch5627.rst
index 0551d266c51c..187682e99114 100644
--- a/Documentation/hwmon/sch5627
+++ b/Documentation/hwmon/sch5627.rst
@@ -2,9 +2,13 @@ Kernel driver sch5627
=====================
Supported chips:
+
* SMSC SCH5627
+
Prefix: 'sch5627'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: Application Note available upon request
Author: Hans de Goede <hdegoede@redhat.com>
diff --git a/Documentation/hwmon/sch5636 b/Documentation/hwmon/sch5636.rst
index 7b0a01da0717..4aaee3672f13 100644
--- a/Documentation/hwmon/sch5636
+++ b/Documentation/hwmon/sch5636.rst
@@ -2,8 +2,11 @@ Kernel driver sch5636
=====================
Supported chips:
+
* SMSC SCH5636
+
Prefix: 'sch5636'
+
Addresses scanned: none, address read from Super I/O config space
Author: Hans de Goede <hdegoede@redhat.com>
diff --git a/Documentation/hwmon/scpi-hwmon b/Documentation/hwmon/scpi-hwmon.rst
index 4cfcdf2d5eab..eee7022b44db 100644
--- a/Documentation/hwmon/scpi-hwmon
+++ b/Documentation/hwmon/scpi-hwmon.rst
@@ -2,8 +2,11 @@ Kernel driver scpi-hwmon
========================
Supported chips:
+
* Chips based on ARM System Control Processor Interface
+
Addresses scanned: -
+
Datasheet: http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0922b/index.html
Author: Punit Agrawal <punit.agrawal@arm.com>
@@ -14,7 +17,7 @@ Description
This driver supports hardware monitoring for SoC's based on the ARM
System Control Processor (SCP) implementing the System Control
Processor Interface (SCPI). The following sensor types are supported
-by the SCP -
+by the SCP:
* temperature
* voltage
@@ -30,4 +33,4 @@ Usage Notes
The driver relies on device tree node to indicate the presence of SCPI
support in the kernel. See
Documentation/devicetree/bindings/arm/arm,scpi.txt for details of the
-devicetree node. \ No newline at end of file
+devicetree node.
diff --git a/Documentation/hwmon/sht15 b/Documentation/hwmon/sht15.rst
index 5e3207c3b177..485abe037f6c 100644
--- a/Documentation/hwmon/sht15
+++ b/Documentation/hwmon/sht15.rst
@@ -2,29 +2,37 @@ Kernel driver sht15
===================
Authors:
+
* Wouter Horre
* Jonathan Cameron
* Vivien Didelot <vivien.didelot@savoirfairelinux.com>
* Jerome Oufella <jerome.oufella@savoirfairelinux.com>
Supported chips:
+
* Sensirion SHT10
+
Prefix: 'sht10'
* Sensirion SHT11
+
Prefix: 'sht11'
* Sensirion SHT15
+
Prefix: 'sht15'
* Sensirion SHT71
+
Prefix: 'sht71'
* Sensirion SHT75
+
Prefix: 'sht75'
Datasheet: Publicly available at the Sensirion website
-http://www.sensirion.ch/en/pdf/product_information/Datasheet-humidity-sensor-SHT1x.pdf
+
+ http://www.sensirion.ch/en/pdf/product_information/Datasheet-humidity-sensor-SHT1x.pdf
Description
-----------
@@ -63,11 +71,13 @@ Platform data
Sysfs interface
---------------
-* temp1_input: temperature input
-* humidity1_input: humidity input
-* heater_enable: write 1 in this attribute to enable the on-chip heater,
- 0 to disable it. Be careful not to enable the heater
- for too long.
-* temp1_fault: if 1, this means that the voltage is low (below 2.47V) and
- measurement may be invalid.
-* humidity1_fault: same as temp1_fault.
+================== ==========================================================
+temp1_input temperature input
+humidity1_input humidity input
+heater_enable write 1 in this attribute to enable the on-chip heater,
+ 0 to disable it. Be careful not to enable the heater
+ for too long.
+temp1_fault if 1, this means that the voltage is low (below 2.47V) and
+ measurement may be invalid.
+humidity1_fault same as temp1_fault.
+================== ==========================================================
diff --git a/Documentation/hwmon/sht21 b/Documentation/hwmon/sht21.rst
index 8b3cdda541c1..f1f5da030108 100644
--- a/Documentation/hwmon/sht21
+++ b/Documentation/hwmon/sht21.rst
@@ -2,19 +2,33 @@ Kernel driver sht21
===================
Supported chips:
+
* Sensirion SHT21
+
Prefix: 'sht21'
+
Addresses scanned: none
+
Datasheet: Publicly available at the Sensirion website
+
http://www.sensirion.com/file/datasheet_sht21
+
+
* Sensirion SHT25
+
Prefix: 'sht25'
+
Addresses scanned: none
+
Datasheet: Publicly available at the Sensirion website
+
http://www.sensirion.com/file/datasheet_sht25
+
+
Author:
+
Urs Fleisch <urs.fleisch@sensirion.com>
Description
@@ -33,9 +47,13 @@ in the board setup code.
sysfs-Interface
---------------
-temp1_input - temperature input
-humidity1_input - humidity input
-eic - Electronic Identification Code
+temp1_input
+ - temperature input
+
+humidity1_input
+ - humidity input
+eic
+ - Electronic Identification Code
Notes
-----
diff --git a/Documentation/hwmon/sht3x b/Documentation/hwmon/sht3x.rst
index d9daa6ab1e8e..978a7117e4b2 100644
--- a/Documentation/hwmon/sht3x
+++ b/Documentation/hwmon/sht3x.rst
@@ -2,14 +2,19 @@ Kernel driver sht3x
===================
Supported chips:
+
* Sensirion SHT3x-DIS
+
Prefix: 'sht3x'
+
Addresses scanned: none
+
Datasheet: https://www.sensirion.com/file/datasheet_sht3x_digital
Author:
- David Frey <david.frey@sensirion.com>
- Pascal Sachs <pascal.sachs@sensirion.com>
+
+ - David Frey <david.frey@sensirion.com>
+ - Pascal Sachs <pascal.sachs@sensirion.com>
Description
-----------
@@ -24,6 +29,7 @@ addresses 0x44 or 0x45, depending on the wiring. See
Documentation/i2c/instantiating-devices for methods to instantiate the device.
There are two options configurable by means of sht3x_platform_data:
+
1. blocking (pull the I2C clock line down while performing the measurement) or
non-blocking mode. Blocking mode will guarantee the fastest result but
the I2C bus will be busy during that time. By default, non-blocking mode
@@ -35,12 +41,15 @@ There are two options configurable by means of sht3x_platform_data:
The sht3x sensor supports a single shot mode as well as 5 periodic measure
modes, which can be controlled with the update_interval sysfs interface.
The allowed update_interval in milliseconds are as follows:
- * 0 single shot mode
- * 2000 0.5 Hz periodic measurement
- * 1000 1 Hz periodic measurement
- * 500 2 Hz periodic measurement
- * 250 4 Hz periodic measurement
- * 100 10 Hz periodic measurement
+
+ ===== ======= ====================
+ 0 single shot mode
+ 2000 0.5 Hz periodic measurement
+ 1000 1 Hz periodic measurement
+ 500 2 Hz periodic measurement
+ 250 4 Hz periodic measurement
+ 100 10 Hz periodic measurement
+ ===== ======= ====================
In the periodic measure mode, the sensor automatically triggers a measurement
with the configured update interval on the chip. When a temperature or humidity
@@ -53,6 +62,7 @@ low.
sysfs-Interface
---------------
+=================== ============================================================
temp1_input: temperature input
humidity1_input: humidity input
temp1_max: temperature max value
@@ -64,13 +74,15 @@ temp1_min_hyst: temperature hysteresis value for min limit
humidity1_min: humidity min value
humidity1_min_hyst: humidity hysteresis value for min limit
temp1_alarm: alarm flag is set to 1 if the temperature is outside the
- configured limits. Alarm only works in periodic measure mode
+ configured limits. Alarm only works in periodic measure mode
humidity1_alarm: alarm flag is set to 1 if the humidity is outside the
- configured limits. Alarm only works in periodic measure mode
+ configured limits. Alarm only works in periodic measure mode
heater_enable: heater enable, heating element removes excess humidity from
- sensor
- 0: turned off
- 1: turned on
+ sensor:
+
+ - 0: turned off
+ - 1: turned on
update_interval: update interval, 0 for single shot, interval in msec
- for periodic measurement. If the interval is not supported
- by the sensor, the next faster interval is chosen
+ for periodic measurement. If the interval is not supported
+ by the sensor, the next faster interval is chosen
+=================== ============================================================
diff --git a/Documentation/hwmon/shtc1 b/Documentation/hwmon/shtc1.rst
index 6b1e05458f0f..aa116332ba26 100644
--- a/Documentation/hwmon/shtc1
+++ b/Documentation/hwmon/shtc1.rst
@@ -2,17 +2,29 @@ Kernel driver shtc1
===================
Supported chips:
+
* Sensirion SHTC1
+
Prefix: 'shtc1'
+
Addresses scanned: none
+
Datasheet: http://www.sensirion.com/file/datasheet_shtc1
+
+
* Sensirion SHTW1
+
Prefix: 'shtw1'
+
Addresses scanned: none
+
Datasheet: Not publicly available
+
+
Author:
+
Johannes Winkelmann <johannes.winkelmann@sensirion.com>
Description
@@ -28,6 +40,7 @@ address 0x70. See Documentation/i2c/instantiating-devices for methods to
instantiate the device.
There are two options configurable by means of shtc1_platform_data:
+
1. blocking (pull the I2C clock line down while performing the measurement) or
non-blocking mode. Blocking mode will guarantee the fastest result but
the I2C bus will be busy during that time. By default, non-blocking mode
@@ -39,5 +52,7 @@ There are two options configurable by means of shtc1_platform_data:
sysfs-Interface
---------------
-temp1_input - temperature input
-humidity1_input - humidity input
+temp1_input
+ - temperature input
+humidity1_input
+ - humidity input
diff --git a/Documentation/hwmon/sis5595 b/Documentation/hwmon/sis5595.rst
index 4f8877a34f37..16123b3bfff9 100644
--- a/Documentation/hwmon/sis5595
+++ b/Documentation/hwmon/sis5595.rst
@@ -2,49 +2,67 @@ Kernel driver sis5595
=====================
Supported chips:
+
* Silicon Integrated Systems Corp. SiS5595 Southbridge Hardware Monitor
+
Prefix: 'sis5595'
+
Addresses scanned: ISA in PCI-space encoded address
+
Datasheet: Publicly available at the Silicon Integrated Systems Corp. site.
+
+
Authors:
- Kyösti Mälkki <kmalkki@cc.hut.fi>,
- Mark D. Studebaker <mdsxyz123@yahoo.com>,
- Aurelien Jarno <aurelien@aurel32.net> 2.6 port
+
+ - Kyösti Mälkki <kmalkki@cc.hut.fi>,
+ - Mark D. Studebaker <mdsxyz123@yahoo.com>,
+ - Aurelien Jarno <aurelien@aurel32.net> 2.6 port
SiS southbridge has a LM78-like chip integrated on the same IC.
This driver is a customized copy of lm78.c
Supports following revisions:
+
+ =============== =============== ==============
Version PCI ID PCI Revision
+ =============== =============== ==============
1 1039/0008 AF or less
2 1039/0008 B0 or greater
+ =============== =============== ==============
Note: these chips contain a 0008 device which is incompatible with the
- 5595. We recognize these by the presence of the listed
- "blacklist" PCI ID and refuse to load.
+ 5595. We recognize these by the presence of the listed
+ "blacklist" PCI ID and refuse to load.
+ =================== =============== ================
NOT SUPPORTED PCI ID BLACKLIST PCI ID
- 540 0008 0540
- 550 0008 0550
+ =================== =============== ================
+ 540 0008 0540
+ 550 0008 0550
5513 0008 5511
5581 0008 5597
5582 0008 5597
5597 0008 5597
- 630 0008 0630
- 645 0008 0645
- 730 0008 0730
- 735 0008 0735
+ 630 0008 0630
+ 645 0008 0645
+ 730 0008 0730
+ 735 0008 0735
+ =================== =============== ================
Module Parameters
-----------------
+
+======================= =====================================================
force_addr=0xaddr Set the I/O base address. Useful for boards
that don't set the address in the BIOS. Does not do a
PCI force; the device must still be present in lspci.
Don't use this unless the driver complains that the
base address is not set.
+
Example: 'modprobe sis5595 force_addr=0x290'
+======================= =====================================================
Description
@@ -103,4 +121,3 @@ Problems
--------
Some chips refuse to be enabled. We don't know why.
The driver will recognize this and print a message in dmesg.
-
diff --git a/Documentation/hwmon/smm665 b/Documentation/hwmon/smm665.rst
index a341eeedab75..a0e27f62b57b 100644
--- a/Documentation/hwmon/smm665
+++ b/Documentation/hwmon/smm665.rst
@@ -2,31 +2,57 @@ Kernel driver smm665
====================
Supported chips:
+
* Summit Microelectronics SMM465
+
Prefix: 'smm465'
+
Addresses scanned: -
+
Datasheet:
+
http://www.summitmicro.com/prod_select/summary/SMM465/SMM465DS.pdf
+
* Summit Microelectronics SMM665, SMM665B
+
Prefix: 'smm665'
+
Addresses scanned: -
+
Datasheet:
+
http://www.summitmicro.com/prod_select/summary/SMM665/SMM665B_2089_20.pdf
+
* Summit Microelectronics SMM665C
+
Prefix: 'smm665c'
+
Addresses scanned: -
+
Datasheet:
+
http://www.summitmicro.com/prod_select/summary/SMM665C/SMM665C_2125.pdf
+
* Summit Microelectronics SMM764
+
Prefix: 'smm764'
+
Addresses scanned: -
+
Datasheet:
+
http://www.summitmicro.com/prod_select/summary/SMM764/SMM764_2098.pdf
+
* Summit Microelectronics SMM766, SMM766B
+
Prefix: 'smm766'
+
Addresses scanned: -
+
Datasheets:
+
http://www.summitmicro.com/prod_select/summary/SMM766/SMM766_2086.pdf
+
http://www.summitmicro.com/prod_select/summary/SMM766B/SMM766B_2122.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -36,9 +62,10 @@ Module Parameters
-----------------
* vref: int
- Default: 1250 (mV)
- Reference voltage on VREF_ADC pin in mV. It should not be necessary to set
- this parameter unless a non-default reference voltage is used.
+ Default: 1250 (mV)
+
+ Reference voltage on VREF_ADC pin in mV. It should not be necessary to set
+ this parameter unless a non-default reference voltage is used.
Description
@@ -64,9 +91,10 @@ the devices explicitly. When instantiating the device, you have to specify
its configuration register address.
Example: the following will load the driver for an SMM665 at address 0x57
-on I2C bus #1:
-$ modprobe smm665
-$ echo smm665 0x57 > /sys/bus/i2c/devices/i2c-1/new_device
+on I2C bus #1::
+
+ $ modprobe smm665
+ $ echo smm665 0x57 > /sys/bus/i2c/devices/i2c-1/new_device
Sysfs entries
@@ -84,6 +112,7 @@ max otherwise. For details please see the SMM665 datasheet.
For SMM465 and SMM764, values for Channel E and F are reported but undefined.
+======================= =======================================================
in1_input 12V input voltage (mV)
in2_input 3.3V (VDD) input voltage (mV)
in3_input Channel A voltage (mV)
@@ -155,3 +184,4 @@ temp1_min Mimimum chip temperature
temp1_max Maximum chip temperature
temp1_crit Critical chip temperature
temp1_crit_alarm Temperature critical alarm
+======================= =======================================================
diff --git a/Documentation/hwmon/smsc47b397 b/Documentation/hwmon/smsc47b397.rst
index 3a43b6948924..600194cf1804 100644
--- a/Documentation/hwmon/smsc47b397
+++ b/Documentation/hwmon/smsc47b397.rst
@@ -2,29 +2,38 @@ Kernel driver smsc47b397
========================
Supported chips:
+
* SMSC LPC47B397-NC
+
* SMSC SCH5307-NS
+
* SMSC SCH5317
+
Prefix: 'smsc47b397'
+
Addresses scanned: none, address read from Super I/O config space
+
Datasheet: In this file
-Authors: Mark M. Hoffman <mhoffman@lightlink.com>
- Utilitek Systems, Inc.
+Authors:
+
+ - Mark M. Hoffman <mhoffman@lightlink.com>
+ - Utilitek Systems, Inc.
November 23, 2004
-The following specification describes the SMSC LPC47B397-NC[1] sensor chip
+The following specification describes the SMSC LPC47B397-NC [1]_ sensor chip
(for which there is no public datasheet available). This document was
provided by Craig Kelly (In-Store Broadcast Network) and edited/corrected
by Mark M. Hoffman <mhoffman@lightlink.com>.
-[1] And SMSC SCH5307-NS and SCH5317, which have different device IDs but are
-otherwise compatible.
+.. [1] And SMSC SCH5307-NS and SCH5317, which have different device IDs but are
+ otherwise compatible.
-* * * * *
+-------------------------------------------------------------------------
-Methods for detecting the HP SIO and reading the thermal data on a dc7100.
+Methods for detecting the HP SIO and reading the thermal data on a dc7100
+-------------------------------------------------------------------------
The thermal information on the dc7100 is contained in the SIO Hardware Monitor
(HWM). The information is accessed through an index/data pair. The index/data
@@ -35,18 +44,22 @@ and 0x61 (LSB). Currently we are using 0x480 for the HWM Base Address and
Reading temperature information.
The temperature information is located in the following registers:
+
+=============== ======= =======================================================
Temp1 0x25 (Currently, this reflects the CPU temp on all systems).
Temp2 0x26
Temp3 0x27
Temp4 0x80
+=============== ======= =======================================================
Programming Example
-The following is an example of how to read the HWM temperature registers:
-MOV DX,480H
-MOV AX,25H
-OUT DX,AL
-MOV DX,481H
-IN AL,DX
+The following is an example of how to read the HWM temperature registers::
+
+ MOV DX,480H
+ MOV AX,25H
+ OUT DX,AL
+ MOV DX,481H
+ IN AL,DX
AL contains the data in hex, the temperature in Celsius is the decimal
equivalent.
@@ -55,25 +68,32 @@ Ex: If AL contains 0x2A, the temperature is 42 degrees C.
Reading tach information.
The fan speed information is located in the following registers:
+
+=============== ======= ======= =================================
LSB MSB
Tach1 0x28 0x29 (Currently, this reflects the CPU
fan speed on all systems).
Tach2 0x2A 0x2B
Tach3 0x2C 0x2D
Tach4 0x2E 0x2F
+=============== ======= ======= =================================
-Important!!!
-Reading the tach LSB locks the tach MSB.
-The LSB Must be read first.
+.. Important::
+
+ Reading the tach LSB locks the tach MSB.
+ The LSB Must be read first.
+
+How to convert the tach reading to RPM
+--------------------------------------
-How to convert the tach reading to RPM.
The tach reading (TCount) is given by: (Tach MSB * 256) + (Tach LSB)
The SIO counts the number of 90kHz (11.111us) pulses per revolution.
RPM = 60/(TCount * 11.111us)
-Example:
-Reg 0x28 = 0x9B
-Reg 0x29 = 0x08
+Example::
+
+ Reg 0x28 = 0x9B
+ Reg 0x29 = 0x08
TCount = 0x89B = 2203
@@ -81,21 +101,28 @@ RPM = 60 / (2203 * 11.11111 E-6) = 2451 RPM
Obtaining the SIO version.
-CONFIGURATION SEQUENCE
+Configuration Sequence
+----------------------
+
To program the configuration registers, the following sequence must be followed:
1. Enter Configuration Mode
2. Configure the Configuration Registers
3. Exit Configuration Mode.
Enter Configuration Mode
+^^^^^^^^^^^^^^^^^^^^^^^^
+
To place the chip into the Configuration State The config key (0x55) is written
to the CONFIG PORT (0x2E).
Configuration Mode
+^^^^^^^^^^^^^^^^^^
+
In configuration mode, the INDEX PORT is located at the CONFIG PORT address and
the DATA PORT is at INDEX PORT address + 1.
The desired configuration registers are accessed in two steps:
+
a. Write the index of the Logical Device Number Configuration Register
(i.e., 0x07) to the INDEX PORT and then write the number of the
desired logical device to the DATA PORT.
@@ -104,30 +131,35 @@ b. Write the address of the desired configuration register within the
logical device to the INDEX PORT and then write or read the config-
uration register through the DATA PORT.
-Note: If accessing the Global Configuration Registers, step (a) is not required.
+Note:
+ If accessing the Global Configuration Registers, step (a) is not required.
Exit Configuration Mode
+^^^^^^^^^^^^^^^^^^^^^^^
+
To exit the Configuration State the write 0xAA to the CONFIG PORT (0x2E).
The chip returns to the RUN State. (This is important).
Programming Example
-The following is an example of how to read the SIO Device ID located at 0x20
-
-; ENTER CONFIGURATION MODE
-MOV DX,02EH
-MOV AX,055H
-OUT DX,AL
-; GLOBAL CONFIGURATION REGISTER
-MOV DX,02EH
-MOV AL,20H
-OUT DX,AL
-; READ THE DATA
-MOV DX,02FH
-IN AL,DX
-; EXIT CONFIGURATION MODE
-MOV DX,02EH
-MOV AX,0AAH
-OUT DX,AL
+^^^^^^^^^^^^^^^^^^^
+
+The following is an example of how to read the SIO Device ID located at 0x20:
+
+ ; ENTER CONFIGURATION MODE
+ MOV DX,02EH
+ MOV AX,055H
+ OUT DX,AL
+ ; GLOBAL CONFIGURATION REGISTER
+ MOV DX,02EH
+ MOV AL,20H
+ OUT DX,AL
+ ; READ THE DATA
+ MOV DX,02FH
+ IN AL,DX
+ ; EXIT CONFIGURATION MODE
+ MOV DX,02EH
+ MOV AX,0AAH
+ OUT DX,AL
The registers of interest for identifying the SIO on the dc7100 are Device ID
(0x20) and Device Rev (0x21).
@@ -135,29 +167,31 @@ The registers of interest for identifying the SIO on the dc7100 are Device ID
The Device ID will read 0x6F (0x81 for SCH5307-NS, and 0x85 for SCH5317)
The Device Rev currently reads 0x01
-Obtaining the HWM Base Address.
+Obtaining the HWM Base Address
+------------------------------
+
The following is an example of how to read the HWM Base Address located in
-Logical Device 8.
-
-; ENTER CONFIGURATION MODE
-MOV DX,02EH
-MOV AX,055H
-OUT DX,AL
-; CONFIGURE REGISTER CRE0,
-; LOGICAL DEVICE 8
-MOV DX,02EH
-MOV AL,07H
-OUT DX,AL ;Point to LD# Config Reg
-MOV DX,02FH
-MOV AL, 08H
-OUT DX,AL;Point to Logical Device 8
-;
-MOV DX,02EH
-MOV AL,60H
-OUT DX,AL ; Point to HWM Base Addr MSB
-MOV DX,02FH
-IN AL,DX ; Get MSB of HWM Base Addr
-; EXIT CONFIGURATION MODE
-MOV DX,02EH
-MOV AX,0AAH
-OUT DX,AL
+Logical Device 8::
+
+ ; ENTER CONFIGURATION MODE
+ MOV DX,02EH
+ MOV AX,055H
+ OUT DX,AL
+ ; CONFIGURE REGISTER CRE0,
+ ; LOGICAL DEVICE 8
+ MOV DX,02EH
+ MOV AL,07H
+ OUT DX,AL ;Point to LD# Config Reg
+ MOV DX,02FH
+ MOV AL, 08H
+ OUT DX,AL;Point to Logical Device 8
+ ;
+ MOV DX,02EH
+ MOV AL,60H
+ OUT DX,AL ; Point to HWM Base Addr MSB
+ MOV DX,02FH
+ IN AL,DX ; Get MSB of HWM Base Addr
+ ; EXIT CONFIGURATION MODE
+ MOV DX,02EH
+ MOV AX,0AAH
+ OUT DX,AL
diff --git a/Documentation/hwmon/smsc47m1 b/Documentation/hwmon/smsc47m1.rst
index 10a24b420686..c54eabd5eb57 100644
--- a/Documentation/hwmon/smsc47m1
+++ b/Documentation/hwmon/smsc47m1.rst
@@ -2,30 +2,53 @@ Kernel driver smsc47m1
======================
Supported chips:
+
* SMSC LPC47B27x, LPC47M112, LPC47M10x, LPC47M13x, LPC47M14x,
+
LPC47M15x and LPC47M192
+
Addresses scanned: none, address read from Super I/O config space
+
Prefix: 'smsc47m1'
+
Datasheets:
- http://www.smsc.com/media/Downloads_Public/Data_Sheets/47b272.pdf
- http://www.smsc.com/media/Downloads_Public/Data_Sheets/47m10x.pdf
- http://www.smsc.com/media/Downloads_Public/Data_Sheets/47m112.pdf
- http://www.smsc.com/
+
+ http://www.smsc.com/media/Downloads_Public/Data_Sheets/47b272.pdf
+
+ http://www.smsc.com/media/Downloads_Public/Data_Sheets/47m10x.pdf
+
+ http://www.smsc.com/media/Downloads_Public/Data_Sheets/47m112.pdf
+
+ http://www.smsc.com/
+
* SMSC LPC47M292
+
Addresses scanned: none, address read from Super I/O config space
+
Prefix: 'smsc47m2'
+
Datasheet: Not public
+
* SMSC LPC47M997
+
Addresses scanned: none, address read from Super I/O config space
+
Prefix: 'smsc47m1'
+
Datasheet: none
+
+
Authors:
- Mark D. Studebaker <mdsxyz123@yahoo.com>,
- With assistance from Bruce Allen <ballen@uwm.edu>, and his
- fan.c program: http://www.lsc-group.phys.uwm.edu/%7Eballen/driver/
- Gabriele Gorla <gorlik@yahoo.com>,
- Jean Delvare <jdelvare@suse.de>
+
+ - Mark D. Studebaker <mdsxyz123@yahoo.com>,
+ - With assistance from Bruce Allen <ballen@uwm.edu>, and his
+ fan.c program:
+
+ - http://www.lsc-group.phys.uwm.edu/%7Eballen/driver/
+
+ - Gabriele Gorla <gorlik@yahoo.com>,
+ - Jean Delvare <jdelvare@suse.de>
Description
-----------
@@ -57,7 +80,7 @@ hardware registers are read whenever any data is read (unless it is less
than 1.5 seconds since the last update). This means that you can easily
miss once-only alarms.
+------------------------------------------------------------------
-**********************
The lm_sensors project gratefully acknowledges the support of
Intel in the development of this driver.
diff --git a/Documentation/hwmon/smsc47m192 b/Documentation/hwmon/smsc47m192
deleted file mode 100644
index 6d54ecb7b3f8..000000000000
--- a/Documentation/hwmon/smsc47m192
+++ /dev/null
@@ -1,103 +0,0 @@
-Kernel driver smsc47m192
-========================
-
-Supported chips:
- * SMSC LPC47M192, LPC47M15x, LPC47M292 and LPC47M997
- Prefix: 'smsc47m192'
- Addresses scanned: I2C 0x2c - 0x2d
- Datasheet: The datasheet for LPC47M192 is publicly available from
- http://www.smsc.com/
- The LPC47M15x, LPC47M292 and LPC47M997 are compatible for
- hardware monitoring.
-
-Author: Hartmut Rick <linux@rick.claranet.de>
- Special thanks to Jean Delvare for careful checking
- of the code and many helpful comments and suggestions.
-
-
-Description
------------
-
-This driver implements support for the hardware sensor capabilities
-of the SMSC LPC47M192 and compatible Super-I/O chips.
-
-These chips support 3 temperature channels and 8 voltage inputs
-as well as CPU voltage VID input.
-
-They do also have fan monitoring and control capabilities, but the
-these features are accessed via ISA bus and are not supported by this
-driver. Use the 'smsc47m1' driver for fan monitoring and control.
-
-Voltages and temperatures are measured by an 8-bit ADC, the resolution
-of the temperatures is 1 bit per degree C.
-Voltages are scaled such that the nominal voltage corresponds to
-192 counts, i.e. 3/4 of the full range. Thus the available range for
-each voltage channel is 0V ... 255/192*(nominal voltage), the resolution
-is 1 bit per (nominal voltage)/192.
-Both voltage and temperature values are scaled by 1000, the sys files
-show voltages in mV and temperatures in units of 0.001 degC.
-
-The +12V analog voltage input channel (in4_input) is multiplexed with
-bit 4 of the encoded CPU voltage. This means that you either get
-a +12V voltage measurement or a 5 bit CPU VID, but not both.
-The default setting is to use the pin as 12V input, and use only 4 bit VID.
-This driver assumes that the information in the configuration register
-is correct, i.e. that the BIOS has updated the configuration if
-the motherboard has this input wired to VID4.
-
-The temperature and voltage readings are updated once every 1.5 seconds.
-Reading them more often repeats the same values.
-
-
-sysfs interface
----------------
-
-in0_input - +2.5V voltage input
-in1_input - CPU voltage input (nominal 2.25V)
-in2_input - +3.3V voltage input
-in3_input - +5V voltage input
-in4_input - +12V voltage input (may be missing if used as VID4)
-in5_input - Vcc voltage input (nominal 3.3V)
- This is the supply voltage of the sensor chip itself.
-in6_input - +1.5V voltage input
-in7_input - +1.8V voltage input
-
-in[0-7]_min,
-in[0-7]_max - lower and upper alarm thresholds for in[0-7]_input reading
-
- All voltages are read and written in mV.
-
-in[0-7]_alarm - alarm flags for voltage inputs
- These files read '1' in case of alarm, '0' otherwise.
-
-temp1_input - chip temperature measured by on-chip diode
-temp[2-3]_input - temperature measured by external diodes (one of these would
- typically be wired to the diode inside the CPU)
-
-temp[1-3]_min,
-temp[1-3]_max - lower and upper alarm thresholds for temperatures
-
-temp[1-3]_offset - temperature offset registers
- The chip adds the offsets stored in these registers to
- the corresponding temperature readings.
- Note that temp1 and temp2 offsets share the same register,
- they cannot both be different from zero at the same time.
- Writing a non-zero number to one of them will reset the other
- offset to zero.
-
- All temperatures and offsets are read and written in
- units of 0.001 degC.
-
-temp[1-3]_alarm - alarm flags for temperature inputs, '1' in case of alarm,
- '0' otherwise.
-temp[2-3]_input_fault - diode fault flags for temperature inputs 2 and 3.
- A fault is detected if the two pins for the corresponding
- sensor are open or shorted, or any of the two is shorted
- to ground or Vcc. '1' indicates a diode fault.
-
-cpu0_vid - CPU voltage as received from the CPU
-
-vrm - CPU VID standard used for decoding CPU voltage
-
- The *_min, *_max, *_offset and vrm files can be read and
- written, all others are read-only.
diff --git a/Documentation/hwmon/smsc47m192.rst b/Documentation/hwmon/smsc47m192.rst
new file mode 100644
index 000000000000..a2e86ab67918
--- /dev/null
+++ b/Documentation/hwmon/smsc47m192.rst
@@ -0,0 +1,116 @@
+Kernel driver smsc47m192
+========================
+
+Supported chips:
+
+ * SMSC LPC47M192, LPC47M15x, LPC47M292 and LPC47M997
+
+ Prefix: 'smsc47m192'
+
+ Addresses scanned: I2C 0x2c - 0x2d
+
+ Datasheet: The datasheet for LPC47M192 is publicly available from
+
+ http://www.smsc.com/
+
+ The LPC47M15x, LPC47M292 and LPC47M997 are compatible for
+
+ hardware monitoring.
+
+
+
+Author:
+ - Hartmut Rick <linux@rick.claranet.de>
+
+ - Special thanks to Jean Delvare for careful checking
+ of the code and many helpful comments and suggestions.
+
+
+Description
+-----------
+
+This driver implements support for the hardware sensor capabilities
+of the SMSC LPC47M192 and compatible Super-I/O chips.
+
+These chips support 3 temperature channels and 8 voltage inputs
+as well as CPU voltage VID input.
+
+They do also have fan monitoring and control capabilities, but the
+these features are accessed via ISA bus and are not supported by this
+driver. Use the 'smsc47m1' driver for fan monitoring and control.
+
+Voltages and temperatures are measured by an 8-bit ADC, the resolution
+of the temperatures is 1 bit per degree C.
+Voltages are scaled such that the nominal voltage corresponds to
+192 counts, i.e. 3/4 of the full range. Thus the available range for
+each voltage channel is 0V ... 255/192*(nominal voltage), the resolution
+is 1 bit per (nominal voltage)/192.
+Both voltage and temperature values are scaled by 1000, the sys files
+show voltages in mV and temperatures in units of 0.001 degC.
+
+The +12V analog voltage input channel (in4_input) is multiplexed with
+bit 4 of the encoded CPU voltage. This means that you either get
+a +12V voltage measurement or a 5 bit CPU VID, but not both.
+The default setting is to use the pin as 12V input, and use only 4 bit VID.
+This driver assumes that the information in the configuration register
+is correct, i.e. that the BIOS has updated the configuration if
+the motherboard has this input wired to VID4.
+
+The temperature and voltage readings are updated once every 1.5 seconds.
+Reading them more often repeats the same values.
+
+
+sysfs interface
+---------------
+
+===================== ==========================================================
+in0_input +2.5V voltage input
+in1_input CPU voltage input (nominal 2.25V)
+in2_input +3.3V voltage input
+in3_input +5V voltage input
+in4_input +12V voltage input (may be missing if used as VID4)
+in5_input Vcc voltage input (nominal 3.3V)
+ This is the supply voltage of the sensor chip itself.
+in6_input +1.5V voltage input
+in7_input +1.8V voltage input
+
+in[0-7]_min,
+in[0-7]_max lower and upper alarm thresholds for in[0-7]_input reading
+
+ All voltages are read and written in mV.
+
+in[0-7]_alarm alarm flags for voltage inputs
+ These files read '1' in case of alarm, '0' otherwise.
+
+temp1_input chip temperature measured by on-chip diode
+temp[2-3]_input temperature measured by external diodes (one of these
+ would typically be wired to the diode inside the CPU)
+
+temp[1-3]_min,
+temp[1-3]_max lower and upper alarm thresholds for temperatures
+
+temp[1-3]_offset temperature offset registers
+ The chip adds the offsets stored in these registers to
+ the corresponding temperature readings.
+ Note that temp1 and temp2 offsets share the same register,
+ they cannot both be different from zero at the same time.
+ Writing a non-zero number to one of them will reset the other
+ offset to zero.
+
+ All temperatures and offsets are read and written in
+ units of 0.001 degC.
+
+temp[1-3]_alarm alarm flags for temperature inputs, '1' in case of alarm,
+ '0' otherwise.
+temp[2-3]_input_fault diode fault flags for temperature inputs 2 and 3.
+ A fault is detected if the two pins for the corresponding
+ sensor are open or shorted, or any of the two is shorted
+ to ground or Vcc. '1' indicates a diode fault.
+
+cpu0_vid CPU voltage as received from the CPU
+
+vrm CPU VID standard used for decoding CPU voltage
+===================== ==========================================================
+
+The `*_min`, `*_max`, `*_offset` and `vrm` files can be read and written,
+all others are read-only.
diff --git a/Documentation/hwmon/submitting-patches b/Documentation/hwmon/submitting-patches.rst
index f88221b46153..f9796b9d9db6 100644
--- a/Documentation/hwmon/submitting-patches
+++ b/Documentation/hwmon/submitting-patches.rst
@@ -1,5 +1,5 @@
- How to Get Your Patch Accepted Into the Hwmon Subsystem
- -------------------------------------------------------
+How to Get Your Patch Accepted Into the Hwmon Subsystem
+=======================================================
This text is a collection of suggestions for people writing patches or
drivers for the hwmon subsystem. Following these suggestions will greatly
@@ -9,11 +9,12 @@ increase the chances of your change being accepted.
1. General
----------
-* It should be unnecessary to mention, but please read and follow
- Documentation/process/submit-checklist.rst
- Documentation/process/submitting-drivers.rst
- Documentation/process/submitting-patches.rst
- Documentation/process/coding-style.rst
+* It should be unnecessary to mention, but please read and follow:
+
+ - Documentation/process/submit-checklist.rst
+ - Documentation/process/submitting-drivers.rst
+ - Documentation/process/submitting-patches.rst
+ - Documentation/process/coding-style.rst
* Please run your patch through 'checkpatch --strict'. There should be no
errors, no warnings, and few if any check messages. If there are any
@@ -38,7 +39,7 @@ increase the chances of your change being accepted.
2. Adding functionality to existing drivers
-------------------------------------------
-* Make sure the documentation in Documentation/hwmon/<driver_name> is up to
+* Make sure the documentation in Documentation/hwmon/<driver_name>.rst is up to
date.
* Make sure the information in Kconfig is up to date.
@@ -60,7 +61,7 @@ increase the chances of your change being accepted.
* Consider adding yourself to MAINTAINERS.
-* Document the driver in Documentation/hwmon/<driver_name>.
+* Document the driver in Documentation/hwmon/<driver_name>.rst.
* Add the driver to Kconfig and Makefile in alphabetical order.
@@ -133,7 +134,7 @@ increase the chances of your change being accepted.
non-standard attributes, or you believe you do, discuss it on the mailing list
first. Either case, provide a detailed explanation why you need the
non-standard attribute(s).
- Standard attributes are specified in Documentation/hwmon/sysfs-interface.
+ Standard attributes are specified in Documentation/hwmon/sysfs-interface.rst.
* When deciding which sysfs attributes to support, look at the chip's
capabilities. While we do not expect your driver to support everything the
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface.rst
index 2b9e1005d88b..fd590633bb14 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface.rst
@@ -1,5 +1,5 @@
Naming and data format standards for sysfs files
-------------------------------------------------
+================================================
The libsensors library offers an interface to the raw sensors data
through the sysfs interface. Since lm-sensors 3.0.0, libsensors is
@@ -32,7 +32,7 @@ this reason, it is still not recommended to bypass the library.
Each chip gets its own directory in the sysfs /sys/devices tree. To
find all sensor chips, it is easier to follow the device symlinks from
-/sys/class/hwmon/hwmon*.
+`/sys/class/hwmon/hwmon*`.
Up to lm-sensors 3.0.0, libsensors looks for hardware monitoring attributes
in the "physical" device directory. Since lm-sensors 3.0.1, attributes found
@@ -67,11 +67,13 @@ are interpreted as 0! For more on how written strings are interpreted see the
-------------------------------------------------------------------------
-[0-*] denotes any positive number starting from 0
-[1-*] denotes any positive number starting from 1
+======= ===========================================
+`[0-*]` denotes any positive number starting from 0
+`[1-*]` denotes any positive number starting from 1
RO read only value
WO write only value
RW read/write value
+======= ===========================================
Read/write values may be read-only for some chips, depending on the
hardware implementation.
@@ -80,57 +82,82 @@ All entries (except name) are optional, and should only be created in a
given driver if the chip has the feature.
-*********************
-* Global attributes *
-*********************
+*****************
+Global attributes
+*****************
-name The chip name.
+`name`
+ The chip name.
This should be a short, lowercase string, not containing
whitespace, dashes, or the wildcard character '*'.
This attribute represents the chip name. It is the only
mandatory attribute.
I2C devices get this attribute created automatically.
+
RO
-update_interval The interval at which the chip will update readings.
+`update_interval`
+ The interval at which the chip will update readings.
Unit: millisecond
+
RW
+
Some devices have a variable update rate or interval.
This attribute can be used to change it to the desired value.
-************
-* Voltages *
-************
+********
+Voltages
+********
+
+`in[0-*]_min`
+ Voltage min value.
-in[0-*]_min Voltage min value.
Unit: millivolt
+
RW
-
-in[0-*]_lcrit Voltage critical min value.
+
+`in[0-*]_lcrit`
+ Voltage critical min value.
+
Unit: millivolt
+
RW
+
If voltage drops to or below this limit, the system may
take drastic action such as power down or reset. At the very
least, it should report a fault.
-in[0-*]_max Voltage max value.
+`in[0-*]_max`
+ Voltage max value.
+
Unit: millivolt
+
RW
-
-in[0-*]_crit Voltage critical max value.
+
+`in[0-*]_crit`
+ Voltage critical max value.
+
Unit: millivolt
+
RW
+
If voltage reaches or exceeds this limit, the system may
take drastic action such as power down or reset. At the very
least, it should report a fault.
-in[0-*]_input Voltage input value.
+`in[0-*]_input`
+ Voltage input value.
+
Unit: millivolt
+
RO
+
Voltage measured on the chip pin.
+
Actual voltage depends on the scaling resistors on the
motherboard, as recommended in the chip datasheet.
+
This varies by chip and by motherboard.
Because of this variation, values are generally NOT scaled
by the chip driver, and must be done by the application.
@@ -140,166 +167,232 @@ in[0-*]_input Voltage input value.
thumb: drivers should report the voltage values at the
"pins" of the chip.
-in[0-*]_average
+`in[0-*]_average`
Average voltage
+
Unit: millivolt
+
RO
-in[0-*]_lowest
+`in[0-*]_lowest`
Historical minimum voltage
+
Unit: millivolt
+
RO
-in[0-*]_highest
+`in[0-*]_highest`
Historical maximum voltage
+
Unit: millivolt
+
RO
-in[0-*]_reset_history
+`in[0-*]_reset_history`
Reset inX_lowest and inX_highest
+
WO
-in_reset_history
+`in_reset_history`
Reset inX_lowest and inX_highest for all sensors
+
WO
-in[0-*]_label Suggested voltage channel label.
+`in[0-*]_label`
+ Suggested voltage channel label.
+
Text string
+
Should only be created if the driver has hints about what
this voltage channel is being used for, and user-space
doesn't. In all other cases, the label is provided by
user-space.
+
RO
-in[0-*]_enable
+`in[0-*]_enable`
Enable or disable the sensors.
+
When disabled the sensor read will return -ENODATA.
- 1: Enable
- 0: Disable
+
+ - 1: Enable
+ - 0: Disable
+
RW
-cpu[0-*]_vid CPU core reference voltage.
+`cpu[0-*]_vid`
+ CPU core reference voltage.
+
Unit: millivolt
+
RO
+
Not always correct.
-vrm Voltage Regulator Module version number.
+`vrm`
+ Voltage Regulator Module version number.
+
RW (but changing it should no more be necessary)
+
Originally the VRM standard version multiplied by 10, but now
an arbitrary number, as not all standards have a version
number.
+
Affects the way the driver calculates the CPU core reference
voltage from the vid pins.
Also see the Alarms section for status flags associated with voltages.
-********
-* Fans *
-********
+****
+Fans
+****
+
+`fan[1-*]_min`
+ Fan minimum value
-fan[1-*]_min Fan minimum value
Unit: revolution/min (RPM)
+
RW
-fan[1-*]_max Fan maximum value
+`fan[1-*]_max`
+ Fan maximum value
+
Unit: revolution/min (RPM)
+
Only rarely supported by the hardware.
RW
-fan[1-*]_input Fan input value.
+`fan[1-*]_input`
+ Fan input value.
+
Unit: revolution/min (RPM)
+
RO
-fan[1-*]_div Fan divisor.
+`fan[1-*]_div`
+ Fan divisor.
+
Integer value in powers of two (1, 2, 4, 8, 16, 32, 64, 128).
+
RW
+
Some chips only support values 1, 2, 4 and 8.
Note that this is actually an internal clock divisor, which
affects the measurable speed range, not the read value.
-fan[1-*]_pulses Number of tachometer pulses per fan revolution.
+`fan[1-*]_pulses`
+ Number of tachometer pulses per fan revolution.
+
Integer value, typically between 1 and 4.
+
RW
+
This value is a characteristic of the fan connected to the
device's input, so it has to be set in accordance with the fan
model.
+
Should only be created if the chip has a register to configure
the number of pulses. In the absence of such a register (and
thus attribute) the value assumed by all devices is 2 pulses
per fan revolution.
-fan[1-*]_target
+`fan[1-*]_target`
Desired fan speed
+
Unit: revolution/min (RPM)
+
RW
+
Only makes sense if the chip supports closed-loop fan speed
control based on the measured fan speed.
-fan[1-*]_label Suggested fan channel label.
+`fan[1-*]_label`
+ Suggested fan channel label.
+
Text string
+
Should only be created if the driver has hints about what
this fan channel is being used for, and user-space doesn't.
In all other cases, the label is provided by user-space.
+
RO
-fan[1-*]_enable
+`fan[1-*]_enable`
Enable or disable the sensors.
+
When disabled the sensor read will return -ENODATA.
- 1: Enable
- 0: Disable
+
+ - 1: Enable
+ - 0: Disable
+
RW
Also see the Alarms section for status flags associated with fans.
-*******
-* PWM *
-*******
+***
+PWM
+***
+
+`pwm[1-*]`
+ Pulse width modulation fan control.
-pwm[1-*] Pulse width modulation fan control.
Integer value in the range 0 to 255
+
RW
+
255 is max or 100%.
-pwm[1-*]_enable
+`pwm[1-*]_enable`
Fan speed control method:
- 0: no fan speed control (i.e. fan at full speed)
- 1: manual fan speed control enabled (using pwm[1-*])
- 2+: automatic fan speed control enabled
+
+ - 0: no fan speed control (i.e. fan at full speed)
+ - 1: manual fan speed control enabled (using `pwm[1-*]`)
+ - 2+: automatic fan speed control enabled
+
Check individual chip documentation files for automatic mode
details.
+
RW
-pwm[1-*]_mode 0: DC mode (direct current)
- 1: PWM mode (pulse-width modulation)
+`pwm[1-*]_mode`
+ - 0: DC mode (direct current)
+ - 1: PWM mode (pulse-width modulation)
+
RW
-pwm[1-*]_freq Base PWM frequency in Hz.
+`pwm[1-*]_freq`
+ Base PWM frequency in Hz.
+
Only possibly available when pwmN_mode is PWM, but not always
present even then.
+
RW
-pwm[1-*]_auto_channels_temp
+`pwm[1-*]_auto_channels_temp`
Select which temperature channels affect this PWM output in
- auto mode. Bitfield, 1 is temp1, 2 is temp2, 4 is temp3 etc...
+ auto mode.
+
+ Bitfield, 1 is temp1, 2 is temp2, 4 is temp3 etc...
Which values are possible depend on the chip used.
+
RW
-pwm[1-*]_auto_point[1-*]_pwm
-pwm[1-*]_auto_point[1-*]_temp
-pwm[1-*]_auto_point[1-*]_temp_hyst
- Define the PWM vs temperature curve. Number of trip points is
- chip-dependent. Use this for chips which associate trip points
- to PWM output channels.
+`pwm[1-*]_auto_point[1-*]_pwm` / `pwm[1-*]_auto_point[1-*]_temp` / `pwm[1-*]_auto_point[1-*]_temp_hyst`
+ Define the PWM vs temperature curve.
+
+ Number of trip points is chip-dependent. Use this for chips
+ which associate trip points to PWM output channels.
+
RW
-temp[1-*]_auto_point[1-*]_pwm
-temp[1-*]_auto_point[1-*]_temp
-temp[1-*]_auto_point[1-*]_temp_hyst
- Define the PWM vs temperature curve. Number of trip points is
- chip-dependent. Use this for chips which associate trip points
- to temperature channels.
+`temp[1-*]_auto_point[1-*]_pwm` / `temp[1-*]_auto_point[1-*]_temp` / `temp[1-*]_auto_point[1-*]_temp_hyst`
+ Define the PWM vs temperature curve.
+
+ Number of trip points is chip-dependent. Use this for chips
+ which associate trip points to temperature channels.
+
RW
There is a third case where trip points are associated to both PWM output
@@ -312,122 +405,173 @@ The actual result is up to the chip, but in general the highest candidate
value (fastest fan speed) wins.
-****************
-* Temperatures *
-****************
+************
+Temperatures
+************
+
+`temp[1-*]_type`
+ Sensor type selection.
-temp[1-*]_type Sensor type selection.
Integers 1 to 6
+
RW
- 1: CPU embedded diode
- 2: 3904 transistor
- 3: thermal diode
- 4: thermistor
- 5: AMD AMDSI
- 6: Intel PECI
+
+ - 1: CPU embedded diode
+ - 2: 3904 transistor
+ - 3: thermal diode
+ - 4: thermistor
+ - 5: AMD AMDSI
+ - 6: Intel PECI
+
Not all types are supported by all chips
-temp[1-*]_max Temperature max value.
+`temp[1-*]_max`
+ Temperature max value.
+
Unit: millidegree Celsius (or millivolt, see below)
+
RW
-temp[1-*]_min Temperature min value.
+`temp[1-*]_min`
+ Temperature min value.
+
Unit: millidegree Celsius
+
RW
-temp[1-*]_max_hyst
+`temp[1-*]_max_hyst`
Temperature hysteresis value for max limit.
+
Unit: millidegree Celsius
+
Must be reported as an absolute temperature, NOT a delta
from the max value.
+
RW
-temp[1-*]_min_hyst
+`temp[1-*]_min_hyst`
Temperature hysteresis value for min limit.
Unit: millidegree Celsius
+
Must be reported as an absolute temperature, NOT a delta
from the min value.
+
RW
-temp[1-*]_input Temperature input value.
+`temp[1-*]_input`
+ Temperature input value.
+
Unit: millidegree Celsius
+
RO
-temp[1-*]_crit Temperature critical max value, typically greater than
+`temp[1-*]_crit`
+ Temperature critical max value, typically greater than
corresponding temp_max values.
+
Unit: millidegree Celsius
+
RW
-temp[1-*]_crit_hyst
+`temp[1-*]_crit_hyst`
Temperature hysteresis value for critical limit.
+
Unit: millidegree Celsius
+
Must be reported as an absolute temperature, NOT a delta
from the critical value.
+
RW
-temp[1-*]_emergency
+`temp[1-*]_emergency`
Temperature emergency max value, for chips supporting more than
two upper temperature limits. Must be equal or greater than
corresponding temp_crit values.
+
Unit: millidegree Celsius
+
RW
-temp[1-*]_emergency_hyst
+`temp[1-*]_emergency_hyst`
Temperature hysteresis value for emergency limit.
+
Unit: millidegree Celsius
+
Must be reported as an absolute temperature, NOT a delta
from the emergency value.
+
RW
-temp[1-*]_lcrit Temperature critical min value, typically lower than
+`temp[1-*]_lcrit`
+ Temperature critical min value, typically lower than
corresponding temp_min values.
+
Unit: millidegree Celsius
+
RW
-temp[1-*]_lcrit_hyst
+`temp[1-*]_lcrit_hyst`
Temperature hysteresis value for critical min limit.
+
Unit: millidegree Celsius
+
Must be reported as an absolute temperature, NOT a delta
from the critical min value.
+
RW
-temp[1-*]_offset
+`temp[1-*]_offset`
Temperature offset which is added to the temperature reading
by the chip.
+
Unit: millidegree Celsius
+
Read/Write value.
-temp[1-*]_label Suggested temperature channel label.
+`temp[1-*]_label`
+ Suggested temperature channel label.
+
Text string
+
Should only be created if the driver has hints about what
this temperature channel is being used for, and user-space
doesn't. In all other cases, the label is provided by
user-space.
+
RO
-temp[1-*]_lowest
+`temp[1-*]_lowest`
Historical minimum temperature
+
Unit: millidegree Celsius
+
RO
-temp[1-*]_highest
+`temp[1-*]_highest`
Historical maximum temperature
+
Unit: millidegree Celsius
+
RO
-temp[1-*]_reset_history
+`temp[1-*]_reset_history`
Reset temp_lowest and temp_highest
+
WO
-temp_reset_history
+`temp_reset_history`
Reset temp_lowest and temp_highest for all sensors
+
WO
-temp[1-*]_enable
+`temp[1-*]_enable`
Enable or disable the sensors.
+
When disabled the sensor read will return -ENODATA.
- 1: Enable
- 0: Disable
+
+ - 1: Enable
+ - 0: Disable
+
RW
Some chips measure temperature using external thermistors and an ADC, and
@@ -442,201 +586,300 @@ channels by the driver.
Also see the Alarms section for status flags associated with temperatures.
-************
-* Currents *
-************
+********
+Currents
+********
+
+`curr[1-*]_max`
+ Current max value
-curr[1-*]_max Current max value
Unit: milliampere
+
RW
-curr[1-*]_min Current min value.
+`curr[1-*]_min`
+ Current min value.
+
Unit: milliampere
+
RW
-curr[1-*]_lcrit Current critical low value
+`curr[1-*]_lcrit`
+ Current critical low value
+
Unit: milliampere
+
RW
-curr[1-*]_crit Current critical high value.
+`curr[1-*]_crit`
+ Current critical high value.
+
Unit: milliampere
+
RW
-curr[1-*]_input Current input value
+`curr[1-*]_input`
+ Current input value
+
Unit: milliampere
+
RO
-curr[1-*]_average
+`curr[1-*]_average`
Average current use
+
Unit: milliampere
+
RO
-curr[1-*]_lowest
+`curr[1-*]_lowest`
Historical minimum current
+
Unit: milliampere
+
RO
-curr[1-*]_highest
+`curr[1-*]_highest`
Historical maximum current
Unit: milliampere
RO
-curr[1-*]_reset_history
+`curr[1-*]_reset_history`
Reset currX_lowest and currX_highest
+
WO
-curr_reset_history
+`curr_reset_history`
Reset currX_lowest and currX_highest for all sensors
+
WO
-curr[1-*]_enable
+`curr[1-*]_enable`
Enable or disable the sensors.
+
When disabled the sensor read will return -ENODATA.
- 1: Enable
- 0: Disable
+
+ - 1: Enable
+ - 0: Disable
+
RW
Also see the Alarms section for status flags associated with currents.
-*********
-* Power *
-*********
+*****
+Power
+*****
+
+`power[1-*]_average`
+ Average power use
-power[1-*]_average Average power use
Unit: microWatt
+
RO
-power[1-*]_average_interval Power use averaging interval. A poll
+`power[1-*]_average_interval`
+ Power use averaging interval. A poll
notification is sent to this file if the
hardware changes the averaging interval.
+
Unit: milliseconds
+
RW
-power[1-*]_average_interval_max Maximum power use averaging interval
+`power[1-*]_average_interval_max`
+ Maximum power use averaging interval
+
Unit: milliseconds
+
RO
-power[1-*]_average_interval_min Minimum power use averaging interval
+`power[1-*]_average_interval_min`
+ Minimum power use averaging interval
+
Unit: milliseconds
+
RO
-power[1-*]_average_highest Historical average maximum power use
+`power[1-*]_average_highest`
+ Historical average maximum power use
+
Unit: microWatt
+
RO
-power[1-*]_average_lowest Historical average minimum power use
+`power[1-*]_average_lowest`
+ Historical average minimum power use
+
Unit: microWatt
+
RO
-power[1-*]_average_max A poll notification is sent to
- power[1-*]_average when power use
+`power[1-*]_average_max`
+ A poll notification is sent to
+ `power[1-*]_average` when power use
rises above this value.
+
Unit: microWatt
+
RW
-power[1-*]_average_min A poll notification is sent to
- power[1-*]_average when power use
+`power[1-*]_average_min`
+ A poll notification is sent to
+ `power[1-*]_average` when power use
sinks below this value.
+
Unit: microWatt
+
RW
-power[1-*]_input Instantaneous power use
+`power[1-*]_input`
+ Instantaneous power use
+
Unit: microWatt
+
RO
-power[1-*]_input_highest Historical maximum power use
+`power[1-*]_input_highest`
+ Historical maximum power use
+
Unit: microWatt
+
RO
-power[1-*]_input_lowest Historical minimum power use
+`power[1-*]_input_lowest`
+ Historical minimum power use
+
Unit: microWatt
+
RO
-power[1-*]_reset_history Reset input_highest, input_lowest,
+`power[1-*]_reset_history`
+ Reset input_highest, input_lowest,
average_highest and average_lowest.
+
WO
-power[1-*]_accuracy Accuracy of the power meter.
+`power[1-*]_accuracy`
+ Accuracy of the power meter.
+
Unit: Percent
+
RO
-power[1-*]_cap If power use rises above this limit, the
+`power[1-*]_cap`
+ If power use rises above this limit, the
system should take action to reduce power use.
A poll notification is sent to this file if the
- cap is changed by the hardware. The *_cap
+ cap is changed by the hardware. The `*_cap`
files only appear if the cap is known to be
enforced by hardware.
+
Unit: microWatt
+
RW
-power[1-*]_cap_hyst Margin of hysteresis built around capping and
+`power[1-*]_cap_hyst`
+ Margin of hysteresis built around capping and
notification.
+
Unit: microWatt
+
RW
-power[1-*]_cap_max Maximum cap that can be set.
+`power[1-*]_cap_max`
+ Maximum cap that can be set.
+
Unit: microWatt
+
RO
-power[1-*]_cap_min Minimum cap that can be set.
+`power[1-*]_cap_min`
+ Minimum cap that can be set.
+
Unit: microWatt
+
RO
-power[1-*]_max Maximum power.
+`power[1-*]_max`
+ Maximum power.
+
Unit: microWatt
+
RW
-power[1-*]_crit Critical maximum power.
+`power[1-*]_crit`
+ Critical maximum power.
+
If power rises to or above this limit, the
system is expected take drastic action to reduce
power consumption, such as a system shutdown or
a forced powerdown of some devices.
+
Unit: microWatt
+
RW
-power[1-*]_enable Enable or disable the sensors.
+`power[1-*]_enable`
+ Enable or disable the sensors.
+
When disabled the sensor read will return
-ENODATA.
- 1: Enable
- 0: Disable
+
+ - 1: Enable
+ - 0: Disable
+
RW
Also see the Alarms section for status flags associated with power readings.
-**********
-* Energy *
-**********
+******
+Energy
+******
+
+`energy[1-*]_input`
+ Cumulative energy use
-energy[1-*]_input Cumulative energy use
Unit: microJoule
+
RO
-energy[1-*]_enable Enable or disable the sensors.
+`energy[1-*]_enable`
+ Enable or disable the sensors.
+
When disabled the sensor read will return
-ENODATA.
- 1: Enable
- 0: Disable
+
+ - 1: Enable
+ - 0: Disable
+
RW
-************
-* Humidity *
-************
+********
+Humidity
+********
+
+`humidity[1-*]_input`
+ Humidity
-humidity[1-*]_input Humidity
Unit: milli-percent (per cent mille, pcm)
+
RO
-humidity[1-*]_enable Enable or disable the sensors
+`humidity[1-*]_enable`
+ Enable or disable the sensors
+
When disabled the sensor read will return
-ENODATA.
- 1: Enable
- 0: Disable
+
+ - 1: Enable
+ - 0: Disable
+
RW
-**********
-* Alarms *
-**********
+******
+Alarms
+******
Each channel or limit may have an associated alarm file, containing a
boolean value. 1 means than an alarm condition exists, 0 means no alarm.
@@ -645,67 +888,67 @@ Usually a given chip will either use channel-related alarms, or
limit-related alarms, not both. The driver should just reflect the hardware
implementation.
-in[0-*]_alarm
-curr[1-*]_alarm
-power[1-*]_alarm
-fan[1-*]_alarm
-temp[1-*]_alarm
- Channel alarm
- 0: no alarm
- 1: alarm
- RO
-
-OR
-
-in[0-*]_min_alarm
-in[0-*]_max_alarm
-in[0-*]_lcrit_alarm
-in[0-*]_crit_alarm
-curr[1-*]_min_alarm
-curr[1-*]_max_alarm
-curr[1-*]_lcrit_alarm
-curr[1-*]_crit_alarm
-power[1-*]_cap_alarm
-power[1-*]_max_alarm
-power[1-*]_crit_alarm
-fan[1-*]_min_alarm
-fan[1-*]_max_alarm
-temp[1-*]_min_alarm
-temp[1-*]_max_alarm
-temp[1-*]_lcrit_alarm
-temp[1-*]_crit_alarm
-temp[1-*]_emergency_alarm
- Limit alarm
- 0: no alarm
- 1: alarm
- RO
++-------------------------------+-----------------------+
+| **`in[0-*]_alarm`, | Channel alarm |
+| `curr[1-*]_alarm`, | |
+| `power[1-*]_alarm`, | - 0: no alarm |
+| `fan[1-*]_alarm`, | - 1: alarm |
+| `temp[1-*]_alarm`** | |
+| | RO |
++-------------------------------+-----------------------+
+
+**OR**
+
++-------------------------------+-----------------------+
+| **`in[0-*]_min_alarm`, | Limit alarm |
+| `in[0-*]_max_alarm`, | |
+| `in[0-*]_lcrit_alarm`, | - 0: no alarm |
+| `in[0-*]_crit_alarm`, | - 1: alarm |
+| `curr[1-*]_min_alarm`, | |
+| `curr[1-*]_max_alarm`, | RO |
+| `curr[1-*]_lcrit_alarm`, | |
+| `curr[1-*]_crit_alarm`, | |
+| `power[1-*]_cap_alarm`, | |
+| `power[1-*]_max_alarm`, | |
+| `power[1-*]_crit_alarm`, | |
+| `fan[1-*]_min_alarm`, | |
+| `fan[1-*]_max_alarm`, | |
+| `temp[1-*]_min_alarm`, | |
+| `temp[1-*]_max_alarm`, | |
+| `temp[1-*]_lcrit_alarm`, | |
+| `temp[1-*]_crit_alarm`, | |
+| `temp[1-*]_emergency_alarm`** | |
++-------------------------------+-----------------------+
Each input channel may have an associated fault file. This can be used
to notify open diodes, unconnected fans etc. where the hardware
supports it. When this boolean has value 1, the measurement for that
channel should not be trusted.
-fan[1-*]_fault
-temp[1-*]_fault
+`fan[1-*]_fault` / `temp[1-*]_fault`
Input fault condition
- 0: no fault occurred
- 1: fault condition
+
+ - 0: no fault occurred
+ - 1: fault condition
+
RO
Some chips also offer the possibility to get beeped when an alarm occurs:
-beep_enable Master beep enable
- 0: no beeps
- 1: beeps
+`beep_enable`
+ Master beep enable
+
+ - 0: no beeps
+ - 1: beeps
+
RW
-in[0-*]_beep
-curr[1-*]_beep
-fan[1-*]_beep
-temp[1-*]_beep
+`in[0-*]_beep`, `curr[1-*]_beep`, `fan[1-*]_beep`, `temp[1-*]_beep`,
Channel beep
- 0: disable
- 1: enable
+
+ - 0: disable
+ - 1: enable
+
RW
In theory, a chip could provide per-limit beep masking, but no such chip
@@ -715,56 +958,90 @@ Old drivers provided a different, non-standard interface to alarms and
beeps. These interface files are deprecated, but will be kept around
for compatibility reasons:
-alarms Alarm bitmask.
+`alarms`
+ Alarm bitmask.
+
RO
+
Integer representation of one to four bytes.
+
A '1' bit means an alarm.
+
Chips should be programmed for 'comparator' mode so that
the alarm will 'come back' after you read the register
if it is still valid.
+
Generally a direct representation of a chip's internal
alarm registers; there is no standard for the position
of individual bits. For this reason, the use of this
interface file for new drivers is discouraged. Use
- individual *_alarm and *_fault files instead.
+ `individual *_alarm` and `*_fault` files instead.
Bits are defined in kernel/include/sensors.h.
-beep_mask Bitmask for beep.
+`beep_mask`
+ Bitmask for beep.
Same format as 'alarms' with the same bit locations,
use discouraged for the same reason. Use individual
- *_beep files instead.
+ `*_beep` files instead.
RW
-***********************
-* Intrusion detection *
-***********************
+*******************
+Intrusion detection
+*******************
-intrusion[0-*]_alarm
+`intrusion[0-*]_alarm`
Chassis intrusion detection
- 0: OK
- 1: intrusion detected
+
+ - 0: OK
+ - 1: intrusion detected
+
RW
+
Contrary to regular alarm flags which clear themselves
automatically when read, this one sticks until cleared by
the user. This is done by writing 0 to the file. Writing
other values is unsupported.
-intrusion[0-*]_beep
+`intrusion[0-*]_beep`
Chassis intrusion beep
+
0: disable
1: enable
+
RW
+****************************
+Average sample configuration
+****************************
+
+Devices allowing for reading {in,power,curr,temp}_average values may export
+attributes for controlling number of samples used to compute average.
+
++--------------+---------------------------------------------------------------+
+| samples | Sets number of average samples for all types of measurements. |
+| | |
+| | RW |
++--------------+---------------------------------------------------------------+
+| in_samples | Sets number of average samples for specific type of |
+| power_samples| measurements. |
+| curr_samples | |
+| temp_samples | Note that on some devices it won't be possible to set all of |
+| | them to different values so changing one might also change |
+| | some others. |
+| | |
+| | RW |
++--------------+---------------------------------------------------------------+
sysfs attribute writes interpretation
-------------------------------------
hwmon sysfs attributes always contain numbers, so the first thing to do is to
convert the input to a number, there are 2 ways todo this depending whether
-the number can be negative or not:
-unsigned long u = simple_strtoul(buf, NULL, 10);
-long s = simple_strtol(buf, NULL, 10);
+the number can be negative or not::
+
+ unsigned long u = simple_strtoul(buf, NULL, 10);
+ long s = simple_strtol(buf, NULL, 10);
With buf being the buffer with the user input being passed by the kernel.
Notice that we do not use the second argument of strto[u]l, and thus cannot
@@ -789,13 +1066,13 @@ limits using clamp_val(value, min_limit, max_limit). If it is not continuous
like for example a tempX_type, then when an invalid value is written,
-EINVAL should be returned.
-Example1, temp1_max, register is a signed 8 bit value (-128 - 127 degrees):
+Example1, temp1_max, register is a signed 8 bit value (-128 - 127 degrees)::
long v = simple_strtol(buf, NULL, 10) / 1000;
v = clamp_val(v, -128, 127);
/* write v to register */
-Example2, fan divider setting, valid values 2, 4 and 8:
+Example2, fan divider setting, valid values 2, 4 and 8::
unsigned long v = simple_strtoul(buf, NULL, 10);
diff --git a/Documentation/hwmon/tc654 b/Documentation/hwmon/tc654.rst
index 47636a8077b4..ce546ee6dfed 100644
--- a/Documentation/hwmon/tc654
+++ b/Documentation/hwmon/tc654.rst
@@ -2,13 +2,16 @@ Kernel driver tc654
===================
Supported chips:
+
* Microchip TC654 and TC655
+
Prefix: 'tc654'
- Datasheet: http://ww1.microchip.com/downloads/en/DeviceDoc/20001734C.pdf
+ Datasheet: http://ww1.m
+ icrochip.com/downloads/en/DeviceDoc/20001734C.pdf
Authors:
- Chris Packham <chris.packham@alliedtelesis.co.nz>
- Masahiko Iwamoto <iwamoto@allied-telesis.co.jp>
+ - Chris Packham <chris.packham@alliedtelesis.co.nz>
+ - Masahiko Iwamoto <iwamoto@allied-telesis.co.jp>
Description
-----------
diff --git a/Documentation/hwmon/tc74 b/Documentation/hwmon/tc74.rst
index 43027aad5f8e..f1764211c129 100644
--- a/Documentation/hwmon/tc74
+++ b/Documentation/hwmon/tc74.rst
@@ -2,8 +2,11 @@ Kernel driver tc74
====================
Supported chips:
+
* Microchip TC74
+
Prefix: 'tc74'
+
Datasheet: Publicly available at Microchip website.
Description
diff --git a/Documentation/hwmon/thmc50 b/Documentation/hwmon/thmc50.rst
index 8a7772ade8d0..cfff3885287d 100644
--- a/Documentation/hwmon/thmc50
+++ b/Documentation/hwmon/thmc50.rst
@@ -2,30 +2,41 @@ Kernel driver thmc50
=====================
Supported chips:
+
* Analog Devices ADM1022
+
Prefix: 'adm1022'
+
Addresses scanned: I2C 0x2c - 0x2e
+
Datasheet: http://www.analog.com/en/prod/0,2877,ADM1022,00.html
+
* Texas Instruments THMC50
+
Prefix: 'thmc50'
+
Addresses scanned: I2C 0x2c - 0x2e
- Datasheet: http://www.ti.com/
+
+ Datasheet: http://www.ti.com/
+
Author: Krzysztof Helt <krzysztof.h1@wp.pl>
This driver was derived from the 2.4 kernel thmc50.c source file.
Credits:
+
thmc50.c (2.4 kernel):
- Frodo Looijaard <frodol@dds.nl>
- Philip Edelbrock <phil@netroedge.com>
+
+ - Frodo Looijaard <frodol@dds.nl>
+ - Philip Edelbrock <phil@netroedge.com>
Module Parameters
-----------------
* adm1022_temp3: short array
- List of adapter,address pairs to force chips into ADM1022 mode with
- second remote temperature. This does not work for original THMC50 chips.
+ List of adapter,address pairs to force chips into ADM1022 mode with
+ second remote temperature. This does not work for original THMC50 chips.
Description
-----------
@@ -59,16 +70,20 @@ Driver Features
The driver provides up to three temperatures:
-temp1 -- internal
-temp2 -- remote
-temp3 -- 2nd remote only for ADM1022
+temp1
+ - internal
+temp2
+ - remote
+temp3
+ - 2nd remote only for ADM1022
-pwm1 -- fan speed (0 = stop, 255 = full)
-pwm1_mode -- always 0 (DC mode)
+pwm1
+ - fan speed (0 = stop, 255 = full)
+pwm1_mode
+ - always 0 (DC mode)
The value of 0 for pwm1 also forces FAN_OFF signal from the chip,
so it stops fans even if the value 0 into the ANALOG_OUT register does not.
The driver was tested on Compaq AP550 with two ADM1022 chips (one works
in the temp3 mode), five temperature readings and two fans.
-
diff --git a/Documentation/hwmon/tmp102 b/Documentation/hwmon/tmp102.rst
index 8454a7763122..b1f585531a88 100644
--- a/Documentation/hwmon/tmp102
+++ b/Documentation/hwmon/tmp102.rst
@@ -2,12 +2,17 @@ Kernel driver tmp102
====================
Supported chips:
+
* Texas Instruments TMP102
+
Prefix: 'tmp102'
+
Addresses scanned: none
+
Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp102.html
Author:
+
Steven King <sfking@fdwdc.com>
Description
@@ -23,4 +28,4 @@ The TMP102 has a programmable update rate that can select between 8, 4, 1, and
0.5 Hz. (Currently the driver only supports the default of 4 Hz).
The driver provides the common sysfs-interface for temperatures (see
-Documentation/hwmon/sysfs-interface under Temperatures).
+Documentation/hwmon/sysfs-interface.rst under Temperatures).
diff --git a/Documentation/hwmon/tmp103 b/Documentation/hwmon/tmp103.rst
index ec00a15645ba..15d25806d585 100644
--- a/Documentation/hwmon/tmp103
+++ b/Documentation/hwmon/tmp103.rst
@@ -2,12 +2,17 @@ Kernel driver tmp103
====================
Supported chips:
+
* Texas Instruments TMP103
+
Prefix: 'tmp103'
+
Addresses scanned: none
+
Product info and datasheet: http://www.ti.com/product/tmp103
Author:
+
Heiko Schocher <hs@denx.de>
Description
@@ -22,7 +27,7 @@ Resolution: 8 Bits
Accuracy: ±1°C Typ (–10°C to +100°C)
The driver provides the common sysfs-interface for temperatures (see
-Documentation/hwmon/sysfs-interface under Temperatures).
+Documentation/hwmon/sysfs-interface.rst under Temperatures).
Please refer how to instantiate this driver:
Documentation/i2c/instantiating-devices
diff --git a/Documentation/hwmon/tmp108 b/Documentation/hwmon/tmp108.rst
index 25802df23010..5f4266a16cb2 100644
--- a/Documentation/hwmon/tmp108
+++ b/Documentation/hwmon/tmp108.rst
@@ -2,12 +2,17 @@ Kernel driver tmp108
====================
Supported chips:
+
* Texas Instruments TMP108
+
Prefix: 'tmp108'
+
Addresses scanned: none
+
Datasheet: http://www.ti.com/product/tmp108
Author:
+
John Muir <john@jmuir.com>
Description
@@ -33,4 +38,4 @@ and then the device is shut down automatically. (This driver only supports
continuous mode.)
The driver provides the common sysfs-interface for temperatures (see
-Documentation/hwmon/sysfs-interface under Temperatures).
+Documentation/hwmon/sysfs-interface.rst under Temperatures).
diff --git a/Documentation/hwmon/tmp401 b/Documentation/hwmon/tmp401.rst
index 2d9ca42213cf..6a05a0719bc7 100644
--- a/Documentation/hwmon/tmp401
+++ b/Documentation/hwmon/tmp401.rst
@@ -2,33 +2,59 @@ Kernel driver tmp401
====================
Supported chips:
+
* Texas Instruments TMP401
+
Prefix: 'tmp401'
+
Addresses scanned: I2C 0x4c
+
Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp401.html
+
* Texas Instruments TMP411
+
Prefix: 'tmp411'
+
Addresses scanned: I2C 0x4c, 0x4d, 0x4e
+
Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp411.html
+
* Texas Instruments TMP431
+
Prefix: 'tmp431'
+
Addresses scanned: I2C 0x4c, 0x4d
+
Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp431.html
+
* Texas Instruments TMP432
+
Prefix: 'tmp432'
+
Addresses scanned: I2C 0x4c, 0x4d
+
Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp432.html
+
* Texas Instruments TMP435
+
Prefix: 'tmp435'
+
Addresses scanned: I2C 0x48 - 0x4f
+
Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp435.html
+
* Texas Instruments TMP461
+
Prefix: 'tmp461'
+
Datasheet: http://www.ti.com/product/tmp461
+
+
Authors:
- Hans de Goede <hdegoede@redhat.com>
- Andre Prendel <andre.prendel@gmx.de>
+
+ - Hans de Goede <hdegoede@redhat.com>
+ - Andre Prendel <andre.prendel@gmx.de>
Description
-----------
@@ -42,7 +68,7 @@ supported by the driver so far, so using the default resolution of 0.5
degree).
The driver provides the common sysfs-interface for temperatures (see
-Documentation/hwmon/sysfs-interface under Temperatures).
+Documentation/hwmon/sysfs-interface.rst under Temperatures).
The TMP411 and TMP431 chips are compatible with TMP401. TMP411 provides
some additional features.
diff --git a/Documentation/hwmon/tmp421 b/Documentation/hwmon/tmp421.rst
index 9e6fe5549ca1..1ba926a3605c 100644
--- a/Documentation/hwmon/tmp421
+++ b/Documentation/hwmon/tmp421.rst
@@ -2,28 +2,49 @@ Kernel driver tmp421
====================
Supported chips:
+
* Texas Instruments TMP421
+
Prefix: 'tmp421'
+
Addresses scanned: I2C 0x2a, 0x4c, 0x4d, 0x4e and 0x4f
+
Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp421.html
+
* Texas Instruments TMP422
+
Prefix: 'tmp422'
+
Addresses scanned: I2C 0x4c, 0x4d, 0x4e and 0x4f
+
Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp421.html
+
* Texas Instruments TMP423
+
Prefix: 'tmp423'
+
Addresses scanned: I2C 0x4c and 0x4d
+
Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp421.html
+
* Texas Instruments TMP441
+
Prefix: 'tmp441'
+
Addresses scanned: I2C 0x2a, 0x4c, 0x4d, 0x4e and 0x4f
+
Datasheet: http://www.ti.com/product/tmp441
+
* Texas Instruments TMP442
+
Prefix: 'tmp442'
+
Addresses scanned: I2C 0x4c and 0x4d
+
Datasheet: http://www.ti.com/product/tmp442
Authors:
+
Andre Prendel <andre.prendel@gmx.de>
Description
@@ -40,5 +61,6 @@ for both the local and remote channels is 0.0625 degree C.
The chips support only temperature measurement. The driver exports
the temperature values via the following sysfs files:
-temp[1-4]_input
-temp[2-4]_fault
+**temp[1-4]_input**
+
+**temp[2-4]_fault**
diff --git a/Documentation/hwmon/tps40422 b/Documentation/hwmon/tps40422.rst
index 24bb0688d515..b691e30479dd 100644
--- a/Documentation/hwmon/tps40422
+++ b/Documentation/hwmon/tps40422.rst
@@ -2,9 +2,13 @@ Kernel driver tps40422
======================
Supported chips:
+
* TI TPS40422
+
Prefix: 'tps40422'
+
Addresses scanned: -
+
Datasheet: http://www.ti.com/lit/gpn/tps40422
Author: Zhu Laiwen <richard.zhu@nsn.com>
@@ -17,7 +21,7 @@ This driver supports TI TPS40422 Dual-Output or Two-Phase Synchronous Buck
Controller with PMBus
The driver is a client driver to the core PMBus driver.
-Please see Documentation/hwmon/pmbus for details on PMBus client drivers.
+Please see Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
Usage Notes
@@ -39,6 +43,7 @@ Sysfs entries
The following attributes are supported.
+======================= =======================================================
in[1-2]_label "vout[1-2]"
in[1-2]_input Measured voltage. From READ_VOUT register.
in[1-2]_alarm voltage alarm.
@@ -46,19 +51,23 @@ in[1-2]_alarm voltage alarm.
curr[1-2]_input Measured current. From READ_IOUT register.
curr[1-2]_label "iout[1-2]"
curr1_max Maximum current. From IOUT_OC_WARN_LIMIT register.
-curr1_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
+curr1_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT
+ register.
curr1_max_alarm Current high alarm. From IOUT_OC_WARN_LIMIT status.
curr1_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status.
curr2_alarm Current high alarm. From IOUT_OC_WARNING status.
-temp1_input Measured temperature. From READ_TEMPERATURE_2 register on page 0.
+temp1_input Measured temperature. From READ_TEMPERATURE_2 register
+ on page 0.
temp1_max Maximum temperature. From OT_WARN_LIMIT register.
temp1_crit Critical high temperature. From OT_FAULT_LIMIT register.
temp1_max_alarm Chip temperature high alarm. Set by comparing
- READ_TEMPERATURE_2 on page 0 with OT_WARN_LIMIT if TEMP_OT_WARNING
- status is set.
+ READ_TEMPERATURE_2 on page 0 with OT_WARN_LIMIT if
+ TEMP_OT_WARNING status is set.
temp1_crit_alarm Chip temperature critical high alarm. Set by comparing
- READ_TEMPERATURE_2 on page 0 with OT_FAULT_LIMIT if TEMP_OT_FAULT
- status is set.
-temp2_input Measured temperature. From READ_TEMPERATURE_2 register on page 1.
+ READ_TEMPERATURE_2 on page 0 with OT_FAULT_LIMIT if
+ TEMP_OT_FAULT status is set.
+temp2_input Measured temperature. From READ_TEMPERATURE_2 register
+ on page 1.
temp2_alarm Chip temperature alarm on page 1.
+======================= =======================================================
diff --git a/Documentation/hwmon/twl4030-madc-hwmon b/Documentation/hwmon/twl4030-madc-hwmon.rst
index c3a3a5be10ad..22c885383b11 100644
--- a/Documentation/hwmon/twl4030-madc-hwmon
+++ b/Documentation/hwmon/twl4030-madc-hwmon.rst
@@ -1,8 +1,10 @@
Kernel driver twl4030-madc
-=========================
+==========================
Supported chips:
+
* Texas Instruments TWL4030
+
Prefix: 'twl4030-madc'
@@ -19,8 +21,9 @@ channels which can be used in different modes.
See this table for the meaning of the different channels
+======= ==========================================================
Channel Signal
-------------------------------------------
+======= ==========================================================
0 Battery type(BTYPE)
1 BCI: Battery temperature (BTEMP)
2 GP analog input
@@ -37,6 +40,7 @@ Channel Signal
13 Reserved
14 Reserved
15 VRUSB Supply/Speaker left/Speaker right polarization level
+======= ==========================================================
The Sysfs nodes will represent the voltage in the units of mV,
diff --git a/Documentation/hwmon/ucd9000 b/Documentation/hwmon/ucd9000.rst
index 262e713e60ff..ebc4f2b3bfea 100644
--- a/Documentation/hwmon/ucd9000
+++ b/Documentation/hwmon/ucd9000.rst
@@ -2,15 +2,20 @@ Kernel driver ucd9000
=====================
Supported chips:
+
* TI UCD90120, UCD90124, UCD90160, UCD9090, and UCD90910
+
Prefixes: 'ucd90120', 'ucd90124', 'ucd90160', 'ucd9090', 'ucd90910'
+
Addresses scanned: -
+
Datasheets:
- http://focus.ti.com/lit/ds/symlink/ucd90120.pdf
- http://focus.ti.com/lit/ds/symlink/ucd90124.pdf
- http://focus.ti.com/lit/ds/symlink/ucd90160.pdf
- http://focus.ti.com/lit/ds/symlink/ucd9090.pdf
- http://focus.ti.com/lit/ds/symlink/ucd90910.pdf
+
+ - http://focus.ti.com/lit/ds/symlink/ucd90120.pdf
+ - http://focus.ti.com/lit/ds/symlink/ucd90124.pdf
+ - http://focus.ti.com/lit/ds/symlink/ucd90160.pdf
+ - http://focus.ti.com/lit/ds/symlink/ucd9090.pdf
+ - http://focus.ti.com/lit/ds/symlink/ucd90910.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -52,7 +57,7 @@ system-health monitor. The device integrates a 12-bit ADC for monitoring up to
13 power-supply voltage, current, or temperature inputs.
This driver is a client driver to the core PMBus driver. Please see
-Documentation/hwmon/pmbus for details on PMBus client drivers.
+Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
Usage Notes
@@ -67,7 +72,7 @@ Platform data support
---------------------
The driver supports standard PMBus driver platform data. Please see
-Documentation/hwmon/pmbus for details.
+Documentation/hwmon/pmbus.rst for details.
Sysfs entries
@@ -76,23 +81,28 @@ Sysfs entries
The following attributes are supported. Limits are read-write; all other
attributes are read-only.
+======================= ========================================================
in[1-12]_label "vout[1-12]".
in[1-12]_input Measured voltage. From READ_VOUT register.
in[1-12]_min Minimum Voltage. From VOUT_UV_WARN_LIMIT register.
in[1-12]_max Maximum voltage. From VOUT_OV_WARN_LIMIT register.
in[1-12]_lcrit Critical minimum Voltage. VOUT_UV_FAULT_LIMIT register.
-in[1-12]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
+in[1-12]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT
+ register.
in[1-12]_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
in[1-12]_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
-in[1-12]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
-in[1-12]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
+in[1-12]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT
+ status.
+in[1-12]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT
+ status.
curr[1-12]_label "iout[1-12]".
curr[1-12]_input Measured current. From READ_IOUT register.
curr[1-12]_max Maximum current. From IOUT_OC_WARN_LIMIT register.
-curr[1-12]_lcrit Critical minimum output current. From IOUT_UC_FAULT_LIMIT
+curr[1-12]_lcrit Critical minimum output current. From
+ IOUT_UC_FAULT_LIMIT register.
+curr[1-12]_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT
register.
-curr[1-12]_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
curr[1-12]_max_alarm Current high alarm. From IOUT_OC_WARNING status.
curr[1-12]_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status.
@@ -116,3 +126,4 @@ fan[1-4]_fault Fan fault.
created only for enabled fans.
Note that even though UCD90910 supports up to 10 fans,
only up to four fans are currently supported.
+======================= ========================================================
diff --git a/Documentation/hwmon/ucd9200 b/Documentation/hwmon/ucd9200.rst
index 1e8060e631bd..b819dfd75f71 100644
--- a/Documentation/hwmon/ucd9200
+++ b/Documentation/hwmon/ucd9200.rst
@@ -2,18 +2,23 @@ Kernel driver ucd9200
=====================
Supported chips:
+
* TI UCD9220, UCD9222, UCD9224, UCD9240, UCD9244, UCD9246, and UCD9248
+
Prefixes: 'ucd9220', 'ucd9222', 'ucd9224', 'ucd9240', 'ucd9244', 'ucd9246',
- 'ucd9248'
+ 'ucd9248'
+
Addresses scanned: -
+
Datasheets:
- http://focus.ti.com/lit/ds/symlink/ucd9220.pdf
- http://focus.ti.com/lit/ds/symlink/ucd9222.pdf
- http://focus.ti.com/lit/ds/symlink/ucd9224.pdf
- http://focus.ti.com/lit/ds/symlink/ucd9240.pdf
- http://focus.ti.com/lit/ds/symlink/ucd9244.pdf
- http://focus.ti.com/lit/ds/symlink/ucd9246.pdf
- http://focus.ti.com/lit/ds/symlink/ucd9248.pdf
+
+ - http://focus.ti.com/lit/ds/symlink/ucd9220.pdf
+ - http://focus.ti.com/lit/ds/symlink/ucd9222.pdf
+ - http://focus.ti.com/lit/ds/symlink/ucd9224.pdf
+ - http://focus.ti.com/lit/ds/symlink/ucd9240.pdf
+ - http://focus.ti.com/lit/ds/symlink/ucd9244.pdf
+ - http://focus.ti.com/lit/ds/symlink/ucd9246.pdf
+ - http://focus.ti.com/lit/ds/symlink/ucd9248.pdf
Author: Guenter Roeck <linux@roeck-us.net>
@@ -28,7 +33,7 @@ dedicated circuitry for DC/DC loop management with flash memory and a serial
interface to support configuration, monitoring and management.
This driver is a client driver to the core PMBus driver. Please see
-Documentation/hwmon/pmbus for details on PMBus client drivers.
+Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
Usage Notes
@@ -43,7 +48,7 @@ Platform data support
---------------------
The driver supports standard PMBus driver platform data. Please see
-Documentation/hwmon/pmbus for details.
+Documentation/hwmon/pmbus.rst for details.
Sysfs entries
@@ -52,12 +57,14 @@ Sysfs entries
The following attributes are supported. Limits are read-write; all other
attributes are read-only.
+======================= ========================================================
in1_label "vin".
in1_input Measured voltage. From READ_VIN register.
in1_min Minimum Voltage. From VIN_UV_WARN_LIMIT register.
in1_max Maximum voltage. From VIN_OV_WARN_LIMIT register.
in1_lcrit Critical minimum Voltage. VIN_UV_FAULT_LIMIT register.
-in1_crit Critical maximum voltage. From VIN_OV_FAULT_LIMIT register.
+in1_crit Critical maximum voltage. From VIN_OV_FAULT_LIMIT
+ register.
in1_min_alarm Voltage low alarm. From VIN_UV_WARNING status.
in1_max_alarm Voltage high alarm. From VIN_OV_WARNING status.
in1_lcrit_alarm Voltage critical low alarm. From VIN_UV_FAULT status.
@@ -68,11 +75,14 @@ in[2-5]_input Measured voltage. From READ_VOUT register.
in[2-5]_min Minimum Voltage. From VOUT_UV_WARN_LIMIT register.
in[2-5]_max Maximum voltage. From VOUT_OV_WARN_LIMIT register.
in[2-5]_lcrit Critical minimum Voltage. VOUT_UV_FAULT_LIMIT register.
-in[2-5]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
+in[2-5]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT
+ register.
in[2-5]_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
in[2-5]_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
-in[2-5]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
-in[2-5]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
+in[2-5]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT
+ status.
+in[2-5]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT
+ status.
curr1_label "iin".
curr1_input Measured current. From READ_IIN register.
@@ -80,9 +90,10 @@ curr1_input Measured current. From READ_IIN register.
curr[2-5]_label "iout[1-4]".
curr[2-5]_input Measured current. From READ_IOUT register.
curr[2-5]_max Maximum current. From IOUT_OC_WARN_LIMIT register.
-curr[2-5]_lcrit Critical minimum output current. From IOUT_UC_FAULT_LIMIT
+curr[2-5]_lcrit Critical minimum output current. From
+ IOUT_UC_FAULT_LIMIT register.
+curr[2-5]_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT
register.
-curr[2-5]_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
curr[2-5]_max_alarm Current high alarm. From IOUT_OC_WARNING status.
curr[2-5]_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status.
@@ -97,7 +108,7 @@ power[2-5]_label "pout[1-4]"
rails. See chip datasheets for details.
temp[1-5]_input Measured temperatures. From READ_TEMPERATURE_1 and
- READ_TEMPERATURE_2 registers.
+ READ_TEMPERATURE_2 registers.
temp1 is the chip internal temperature. temp[2-5] are
rail temperatures. temp[2-5] attributes are only
created for enabled rails. See chip datasheets for
@@ -110,3 +121,4 @@ temp[1-5]_crit_alarm Temperature critical high alarm.
fan1_input Fan RPM. ucd9240 only.
fan1_alarm Fan alarm. ucd9240 only.
fan1_fault Fan fault. ucd9240 only.
+======================= ========================================================
diff --git a/Documentation/hwmon/userspace-tools b/Documentation/hwmon/userspace-tools.rst
index 9865aeedc58f..bf3797c8e734 100644
--- a/Documentation/hwmon/userspace-tools
+++ b/Documentation/hwmon/userspace-tools.rst
@@ -1,3 +1,6 @@
+Userspace tools
+===============
+
Introduction
------------
diff --git a/Documentation/hwmon/vexpress b/Documentation/hwmon/vexpress.rst
index 557d6d5ad90d..8c861c8151ac 100644
--- a/Documentation/hwmon/vexpress
+++ b/Documentation/hwmon/vexpress.rst
@@ -2,14 +2,21 @@ Kernel driver vexpress
======================
Supported systems:
+
* ARM Ltd. Versatile Express platform
+
Prefix: 'vexpress'
+
Datasheets:
+
* "Hardware Description" sections of the Technical Reference Manuals
- for the Versatile Express boards:
- http://infocenter.arm.com/help/topic/com.arm.doc.subset.boards.express/index.html
+ for the Versatile Express boards:
+
+ - http://infocenter.arm.com/help/topic/com.arm.doc.subset.boards.express/index.html
+
* Section "4.4.14. System Configuration registers" of the V2M-P1 TRM:
- http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0447-/index.html
+
+ - http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0447-/index.html
Author: Pawel Moll
diff --git a/Documentation/hwmon/via686a b/Documentation/hwmon/via686a.rst
index e5f90ab5c48d..a343c35df740 100644
--- a/Documentation/hwmon/via686a
+++ b/Documentation/hwmon/via686a.rst
@@ -2,29 +2,35 @@ Kernel driver via686a
=====================
Supported chips:
+
* Via VT82C686A, VT82C686B Southbridge Integrated Hardware Monitor
+
Prefix: 'via686a'
+
Addresses scanned: ISA in PCI-space encoded address
+
Datasheet: On request through web form (http://www.via.com.tw/en/resources/download-center/)
Authors:
- Kyösti Mälkki <kmalkki@cc.hut.fi>,
- Mark D. Studebaker <mdsxyz123@yahoo.com>
- Bob Dougherty <bobd@stanford.edu>
- (Some conversion-factor data were contributed by
- Jonathan Teh Soon Yew <j.teh@iname.com>
- and Alex van Kaam <darkside@chello.nl>.)
+ - Kyösti Mälkki <kmalkki@cc.hut.fi>,
+ - Mark D. Studebaker <mdsxyz123@yahoo.com>
+ - Bob Dougherty <bobd@stanford.edu>
+ - (Some conversion-factor data were contributed by
+ - Jonathan Teh Soon Yew <j.teh@iname.com>
+ - and Alex van Kaam <darkside@chello.nl>.)
Module Parameters
-----------------
+======================= =======================================================
force_addr=0xaddr Set the I/O base address. Useful for boards that
- don't set the address in the BIOS. Look for a BIOS
- upgrade before resorting to this. Does not do a
- PCI force; the via686a must still be present in lspci.
- Don't use this unless the driver complains that the
- base address is not set.
- Example: 'modprobe via686a force_addr=0x6000'
+ don't set the address in the BIOS. Look for a BIOS
+ upgrade before resorting to this. Does not do a
+ PCI force; the via686a must still be present in lspci.
+ Don't use this unless the driver complains that the
+ base address is not set.
+ Example: 'modprobe via686a force_addr=0x6000'
+======================= =======================================================
Description
-----------
diff --git a/Documentation/hwmon/vt1211 b/Documentation/hwmon/vt1211.rst
index 77fa633b97a8..ddbcde7dd642 100644
--- a/Documentation/hwmon/vt1211
+++ b/Documentation/hwmon/vt1211.rst
@@ -2,9 +2,13 @@ Kernel driver vt1211
====================
Supported chips:
+
* VIA VT1211
+
Prefix: 'vt1211'
+
Addresses scanned: none, address read from Super-I/O config space
+
Datasheet: Provided by VIA upon request and under NDA
Authors: Juerg Haefliger <juergh@gmail.com>
@@ -19,14 +23,17 @@ technical support.
Module Parameters
-----------------
-* uch_config: int Override the BIOS default universal channel (UCH)
+
+* uch_config: int
+ Override the BIOS default universal channel (UCH)
configuration for channels 1-5.
Legal values are in the range of 0-31. Bit 0 maps to
UCH1, bit 1 maps to UCH2 and so on. Setting a bit to 1
enables the thermal input of that particular UCH and
setting a bit to 0 enables the voltage input.
-* int_mode: int Override the BIOS default temperature interrupt mode.
+* int_mode: int
+ Override the BIOS default temperature interrupt mode.
The only possible value is 0 which forces interrupt
mode 0. In this mode, any pending interrupt is cleared
when the status register is read but is regenerated as
@@ -55,8 +62,9 @@ connected to the PWM outputs of the VT1211 :-().
The following table shows the relationship between the vt1211 inputs and the
sysfs nodes.
+=============== ============== =========== ================================
Sensor Voltage Mode Temp Mode Default Use (from the datasheet)
------- ------------ --------- --------------------------------
+=============== ============== =========== ================================
Reading 1 temp1 Intel thermal diode
Reading 3 temp2 Internal thermal diode
UCH1/Reading2 in0 temp3 NTC type thermistor
@@ -65,6 +73,7 @@ UCH3 in2 temp5 VccP (processor core)
UCH4 in3 temp6 +5V
UCH5 in4 temp7 +12V
+3.3V in5 Internal VCC (+3.3V)
+=============== ============== =========== ================================
Voltage Monitoring
@@ -82,19 +91,22 @@ follows. And this is of course totally dependent on the actual board
implementation :-) You will have to find documentation for your own
motherboard and edit sensors.conf accordingly.
- Expected
+============= ====== ====== ========= ============
+ Expected
Voltage R1 R2 Divider Raw Value
------------------------------------------------
+============= ====== ====== ========= ============
+2.5V 2K 10K 1.2 2083 mV
-VccP --- --- 1.0 1400 mV (1)
+VccP --- --- 1.0 1400 mV [1]_
+5V 14K 10K 2.4 2083 mV
+12V 47K 10K 5.7 2105 mV
-+3.3V (int) 2K 3.4K 1.588 3300 mV (2)
++3.3V (int) 2K 3.4K 1.588 3300 mV [2]_
+3.3V (ext) 6.8K 10K 1.68 1964 mV
+============= ====== ====== ========= ============
+
+.. [1] Depending on the CPU (1.4V is for a VIA C3 Nehemiah).
-(1) Depending on the CPU (1.4V is for a VIA C3 Nehemiah).
-(2) R1 and R2 for 3.3V (int) are internal to the VT1211 chip and the driver
- performs the scaling and returns the properly scaled voltage value.
+.. [2] R1 and R2 for 3.3V (int) are internal to the VT1211 chip and the driver
+ performs the scaling and returns the properly scaled voltage value.
Each measured voltage has an associated low and high limit which triggers an
alarm when crossed.
@@ -124,35 +136,37 @@ compute temp1 (@-Offset)/Gain, (@*Gain)+Offset
According to the VIA VT1211 BIOS porting guide, the following gain and offset
values should be used:
+=============== ======== ===========
Diode Type Offset Gain
----------- ------ ----
+=============== ======== ===========
Intel CPU 88.638 0.9528
- 65.000 0.9686 *)
+ 65.000 0.9686 [3]_
VIA C3 Ezra 83.869 0.9528
VIA C3 Ezra-T 73.869 0.9528
+=============== ======== ===========
-*) This is the formula from the lm_sensors 2.10.0 sensors.conf file. I don't
-know where it comes from or how it was derived, it's just listed here for
-completeness.
+.. [3] This is the formula from the lm_sensors 2.10.0 sensors.conf file. I don't
+ know where it comes from or how it was derived, it's just listed here for
+ completeness.
Temp3-temp7 support NTC thermistors. For these channels, the driver returns
the voltages as seen at the individual pins of UCH1-UCH5. The voltage at the
pin (Vpin) is formed by a voltage divider made of the thermistor (Rth) and a
-scaling resistor (Rs):
+scaling resistor (Rs)::
-Vpin = 2200 * Rth / (Rs + Rth) (2200 is the ADC max limit of 2200 mV)
+ Vpin = 2200 * Rth / (Rs + Rth) (2200 is the ADC max limit of 2200 mV)
The equation for the thermistor is as follows (google it if you want to know
-more about it):
+more about it)::
-Rth = Ro * exp(B * (1 / T - 1 / To)) (To is 298.15K (25C) and Ro is the
- nominal resistance at 25C)
+ Rth = Ro * exp(B * (1 / T - 1 / To)) (To is 298.15K (25C) and Ro is the
+ nominal resistance at 25C)
Mingling the above two equations and assuming Rs = Ro and B = 3435 yields the
-following formula for sensors.conf:
+following formula for sensors.conf::
-compute tempx 1 / (1 / 298.15 - (` (2200 / @ - 1)) / 3435) - 273.15,
- 2200 / (1 + (^ (3435 / 298.15 - 3435 / (273.15 + @))))
+ compute tempx 1 / (1 / 298.15 - (` (2200 / @ - 1)) / 3435) - 273.15,
+ 2200 / (1 + (^ (3435 / 298.15 - 3435 / (273.15 + @))))
Fan Speed Control
@@ -176,31 +190,37 @@ registers in the VT1211 and programming one set is sufficient (actually only
the first set pwm1_auto_point[1-4]_temp is writable, the second set is
read-only).
+========================== =========================================
PWM Auto Point PWM Output Duty-Cycle
-------------------------------------------------
+========================== =========================================
pwm[1-2]_auto_point4_pwm full speed duty-cycle (hard-wired to 255)
pwm[1-2]_auto_point3_pwm high speed duty-cycle
pwm[1-2]_auto_point2_pwm low speed duty-cycle
pwm[1-2]_auto_point1_pwm off duty-cycle (hard-wired to 0)
+========================== =========================================
+========================== =================
Temp Auto Point Thermal Threshold
----------------------------------------------
+========================== =================
pwm[1-2]_auto_point4_temp full speed temp
pwm[1-2]_auto_point3_temp high speed temp
pwm[1-2]_auto_point2_temp low speed temp
pwm[1-2]_auto_point1_temp off temp
+========================== =================
Long story short, the controller implements the following algorithm to set the
PWM output duty-cycle based on the input temperature:
-Thermal Threshold Output Duty-Cycle
- (Rising Temp) (Falling Temp)
-----------------------------------------------------------
- full speed duty-cycle full speed duty-cycle
+=================== ======================= ========================
+Thermal Threshold Output Duty-Cycle Output Duty-Cycle
+ (Rising Temp) (Falling Temp)
+=================== ======================= ========================
+- full speed duty-cycle full speed duty-cycle
full speed temp
- high speed duty-cycle full speed duty-cycle
+- high speed duty-cycle full speed duty-cycle
high speed temp
- low speed duty-cycle high speed duty-cycle
+- low speed duty-cycle high speed duty-cycle
low speed temp
- off duty-cycle low speed duty-cycle
+- off duty-cycle low speed duty-cycle
off temp
+=================== ======================= ========================
diff --git a/Documentation/hwmon/w83627ehf b/Documentation/hwmon/w83627ehf.rst
index 735c42a85ead..74d19ef11e1f 100644
--- a/Documentation/hwmon/w83627ehf
+++ b/Documentation/hwmon/w83627ehf.rst
@@ -2,45 +2,79 @@ Kernel driver w83627ehf
=======================
Supported chips:
+
* Winbond W83627EHF/EHG (ISA access ONLY)
+
Prefix: 'w83627ehf'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: not available
+
* Winbond W83627DHG
+
Prefix: 'w83627dhg'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: not available
+
* Winbond W83627DHG-P
+
Prefix: 'w83627dhg'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: not available
+
* Winbond W83627UHG
+
Prefix: 'w83627uhg'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: available from www.nuvoton.com
+
* Winbond W83667HG
+
Prefix: 'w83667hg'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: not available
+
* Winbond W83667HG-B
+
Prefix: 'w83667hg'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
+
* Nuvoton NCT6775F/W83667HG-I
+
Prefix: 'nct6775'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
+
* Nuvoton NCT6776F
+
Prefix: 'nct6776'
+
Addresses scanned: ISA address retrieved from Super I/O registers
+
Datasheet: Available from Nuvoton upon request
+
Authors:
- Jean Delvare <jdelvare@suse.de>
- Yuan Mu (Winbond)
- Rudolf Marek <r.marek@assembler.cz>
- David Hubbard <david.c.hubbard@gmail.com>
- Gong Jun <JGong@nuvoton.com>
+
+ - Jean Delvare <jdelvare@suse.de>
+ - Yuan Mu (Winbond)
+ - Rudolf Marek <r.marek@assembler.cz>
+ - David Hubbard <david.c.hubbard@gmail.com>
+ - Gong Jun <JGong@nuvoton.com>
Description
-----------
@@ -85,25 +119,30 @@ predefined temperature range. If the temperature goes out of range, fan
is driven slower/faster to reach the predefined range again.
The mode works for fan1-fan4. Mapping of temperatures to pwm outputs is as
-follows:
+follows::
-temp1 -> pwm1
-temp2 -> pwm2
-temp3 -> pwm3 (not on 627UHG)
-prog -> pwm4 (not on 667HG and 667HG-B; the programmable setting is not
- supported by the driver)
+ temp1 -> pwm1
+ temp2 -> pwm2
+ temp3 -> pwm3 (not on 627UHG)
+ prog -> pwm4 (not on 667HG and 667HG-B; the programmable setting is not
+ supported by the driver)
/sys files
----------
-name - this is a standard hwmon device entry, it contains the name of
- the device (see the prefix in the list of supported devices at
- the top of this file)
+name
+ this is a standard hwmon device entry, it contains the name of
+ the device (see the prefix in the list of supported devices at
+ the top of this file)
+
+pwm[1-4]
+ this file stores PWM duty cycle or DC value (fan speed) in range:
-pwm[1-4] - this file stores PWM duty cycle or DC value (fan speed) in range:
0 (stop) to 255 (full)
-pwm[1-4]_enable - this file controls mode of fan/temperature control:
+pwm[1-4]_enable
+ this file controls mode of fan/temperature control:
+
* 1 Manual mode, write to pwm file any value 0-255 (full speed)
* 2 "Thermal Cruise" mode
* 3 "Fan Speed Cruise" mode
@@ -121,33 +160,43 @@ pwm[1-4]_enable - this file controls mode of fan/temperature control:
returned when reading pwm attributes is unrelated to SmartFan IV
operation.
-pwm[1-4]_mode - controls if output is PWM or DC level
- * 0 DC output (0 - 12v)
- * 1 PWM output
+pwm[1-4]_mode
+ controls if output is PWM or DC level
+
+ * 0 DC output (0 - 12v)
+ * 1 PWM output
Thermal Cruise mode
-------------------
If the temperature is in the range defined by:
-pwm[1-4]_target - set target temperature, unit millidegree Celsius
- (range 0 - 127000)
-pwm[1-4]_tolerance - tolerance, unit millidegree Celsius (range 0 - 15000)
+pwm[1-4]_target
+ set target temperature, unit millidegree Celsius
+ (range 0 - 127000)
+pwm[1-4]_tolerance
+ tolerance, unit millidegree Celsius (range 0 - 15000)
there are no changes to fan speed. Once the temperature leaves the interval,
fan speed increases (temp is higher) or decreases if lower than desired.
There are defined steps and times, but not exported by the driver yet.
-pwm[1-4]_min_output - minimum fan speed (range 1 - 255), when the temperature
- is below defined range.
-pwm[1-4]_stop_time - how many milliseconds [ms] must elapse to switch
- corresponding fan off. (when the temperature was below
- defined range).
-pwm[1-4]_start_output-minimum fan speed (range 1 - 255) when spinning up
-pwm[1-4]_step_output- rate of fan speed change (1 - 255)
-pwm[1-4]_stop_output- minimum fan speed (range 1 - 255) when spinning down
-pwm[1-4]_max_output - maximum fan speed (range 1 - 255), when the temperature
- is above defined range.
+pwm[1-4]_min_output
+ minimum fan speed (range 1 - 255), when the temperature
+ is below defined range.
+pwm[1-4]_stop_time
+ how many milliseconds [ms] must elapse to switch
+ corresponding fan off. (when the temperature was below
+ defined range).
+pwm[1-4]_start_output
+ minimum fan speed (range 1 - 255) when spinning up
+pwm[1-4]_step_output
+ rate of fan speed change (1 - 255)
+pwm[1-4]_stop_output
+ minimum fan speed (range 1 - 255) when spinning down
+pwm[1-4]_max_output
+ maximum fan speed (range 1 - 255), when the temperature
+ is above defined range.
Note: last six functions are influenced by other control bits, not yet exported
by the driver, so a change might not have any effect.
@@ -161,26 +210,35 @@ different power-on default values, but BIOS should already be loading
appropriate defaults. Note that bank selection must be performed as is currently
done in the driver for all register addresses.
-0x49: only on DHG, selects temperature source for AUX fan, CPU fan0
-0x4a: not completely documented for the EHF and the DHG documentation assigns
- different behavior to bits 7 and 6, including extending the temperature
- input selection to SmartFan I, not just SmartFan III. Testing on the EHF
- will reveal whether they are compatible or not.
-
-0x58: Chip ID: 0xa1=EHF 0xc1=DHG
-0x5e: only on DHG, has bits to enable "current mode" temperature detection and
- critical temperature protection
-0x45b: only on EHF, bit 3, vin4 alarm (EHF supports 10 inputs, only 9 on DHG)
-0x552: only on EHF, vin4
-0x558: only on EHF, vin4 high limit
-0x559: only on EHF, vin4 low limit
-0x6b: only on DHG, SYS fan critical temperature
-0x6c: only on DHG, CPU fan0 critical temperature
-0x6d: only on DHG, AUX fan critical temperature
-0x6e: only on DHG, CPU fan1 critical temperature
-
-0x50-0x55 and 0x650-0x657 are marked "Test Register" for the EHF, but "Reserved
- Register" for the DHG
+========================= =====================================================
+Register(s) Meaning
+========================= =====================================================
+0x49 only on DHG, selects temperature source for AUX fan,
+ CPU fan0
+0x4a not completely documented for the EHF and the DHG
+ documentation assigns different behavior to bits 7
+ and 6, including extending the temperature input
+ selection to SmartFan I, not just SmartFan III.
+ Testing on the EHF will reveal whether they are
+ compatible or not.
+0x58 Chip ID: 0xa1=EHF 0xc1=DHG
+0x5e only on DHG, has bits to enable "current mode"
+ temperature detection and critical temperature
+ protection
+0x45b only on EHF, bit 3, vin4 alarm (EHF supports 10
+ inputs, only 9 on DHG)
+0x552 only on EHF, vin4
+0x558 only on EHF, vin4 high limit
+0x559 only on EHF, vin4 low limit
+0x6b only on DHG, SYS fan critical temperature
+0x6c only on DHG, CPU fan0 critical temperature
+0x6d only on DHG, AUX fan critical temperature
+0x6e only on DHG, CPU fan1 critical temperature
+0x50-0x55 and 0x650-0x657 marked as:
+
+ - "Test Register" for the EHF
+ - "Reserved Register" for the DHG
+========================= =====================================================
The DHG also supports PECI, where the DHG queries Intel CPU temperatures, and
the ICH8 southbridge gets that data via PECI from the DHG, so that the
diff --git a/Documentation/hwmon/w83627hf b/Documentation/hwmon/w83627hf.rst
index 8432e1118173..d1406c28dee7 100644
--- a/Documentation/hwmon/w83627hf
+++ b/Documentation/hwmon/w83627hf.rst
@@ -20,10 +20,10 @@ Supported chips:
Datasheet: Provided by Winbond on request(http://www.winbond.com/hq/enu)
Authors:
- Frodo Looijaard <frodol@dds.nl>,
- Philip Edelbrock <phil@netroedge.com>,
- Mark Studebaker <mdsxyz123@yahoo.com>,
- Bernhard C. Schrenk <clemy@clemy.org>
+ Frodo Looijaard <frodol@dds.nl>,
+ Philip Edelbrock <phil@netroedge.com>,
+ Mark Studebaker <mdsxyz123@yahoo.com>,
+ Bernhard C. Schrenk <clemy@clemy.org>
Module Parameters
-----------------
@@ -52,8 +52,8 @@ If you really want i2c accesses for these Super I/O chips,
use the w83781d driver. However this is not the preferred method
now that this ISA driver has been developed.
-The w83627_HF_ uses pins 110-106 as VID0-VID4. The w83627_THF_ uses the
-same pins as GPIO[0:4]. Technically, the w83627_THF_ does not support a
+The `w83627_HF_` uses pins 110-106 as VID0-VID4. The `w83627_THF_` uses the
+same pins as GPIO[0:4]. Technically, the `w83627_THF_` does not support a
VID reading. However the two chips have the identical 128 pin package. So,
it is possible or even likely for a w83627thf to have the VID signals routed
to these pins despite their not being labeled for that purpose. Therefore,
@@ -75,19 +75,23 @@ module parameter is gone for technical reasons. If you need this feature,
you can obtain the same result by using the isaset tool (part of
lm-sensors) before loading the driver:
-# Enter the Super I/O config space
-isaset -y -f 0x2e 0x87
-isaset -y -f 0x2e 0x87
+# Enter the Super I/O config space::
-# Select the hwmon logical device
-isaset -y 0x2e 0x2f 0x07 0x0b
+ isaset -y -f 0x2e 0x87
+ isaset -y -f 0x2e 0x87
-# Set the base I/O address (to 0x290 in this example)
-isaset -y 0x2e 0x2f 0x60 0x02
-isaset -y 0x2e 0x2f 0x61 0x90
+# Select the hwmon logical device::
-# Exit the Super-I/O config space
-isaset -y -f 0x2e 0xaa
+ isaset -y 0x2e 0x2f 0x07 0x0b
+
+# Set the base I/O address (to 0x290 in this example)::
+
+ isaset -y 0x2e 0x2f 0x60 0x02
+ isaset -y 0x2e 0x2f 0x61 0x90
+
+# Exit the Super-I/O config space::
+
+ isaset -y -f 0x2e 0xaa
The above sequence assumes a Super-I/O config space at 0x2e/0x2f, but
0x4e/0x4f is also possible.
@@ -97,18 +101,23 @@ Voltage pin mapping
Here is a summary of the voltage pin mapping for the W83627THF. This
can be useful to convert data provided by board manufacturers into
-working libsensors configuration statements.
-
- W83627THF |
- Pin | Name | Register | Sysfs attribute
------------------------------------------------------
- 100 | CPUVCORE | 20h | in0
- 99 | VIN0 | 21h | in1
- 98 | VIN1 | 22h | in2
- 97 | VIN2 | 24h | in4
- 114 | AVCC | 23h | in3
- 61 | 5VSB | 50h (bank 5) | in7
- 74 | VBAT | 51h (bank 5) | in8
+working libsensors configuration statements:
+
+
+- W83627THF
+
+
+ ======== =============== =============== ===============
+ Pin Name Register Sysfs attribute
+ ======== =============== =============== ===============
+ 100 CPUVCORE 20h in0
+ 99 VIN0 21h in1
+ 98 VIN1 22h in2
+ 97 VIN2 24h in4
+ 114 AVCC 23h in3
+ 61 5VSB 50h (bank 5) in7
+ 74 VBAT 51h (bank 5) in8
+ ======== =============== =============== ===============
For other supported devices, you'll have to take the hard path and
look up the information in the datasheet yourself (and then add it
diff --git a/Documentation/hwmon/w83773g b/Documentation/hwmon/w83773g.rst
index 4cc6c0b8257f..cabaed391414 100644
--- a/Documentation/hwmon/w83773g
+++ b/Documentation/hwmon/w83773g.rst
@@ -1,13 +1,18 @@
Kernel driver w83773g
-====================
+=====================
Supported chips:
+
* Nuvoton W83773G
+
Prefix: 'w83773g'
+
Addresses scanned: I2C 0x4c and 0x4d
+
Datasheet: https://www.nuvoton.com/resource-files/W83773G_SG_DatasheetV1_2.pdf
Authors:
+
Lei YU <mine260309@gmail.com>
Description
@@ -27,7 +32,4 @@ Resolution for both the local and remote channels is 0.125 degree C.
The chip supports only temperature measurement. The driver exports
the temperature values via the following sysfs files:
-temp[1-3]_input
-temp[2-3]_fault
-temp[2-3]_offset
-update_interval
+**temp[1-3]_input, temp[2-3]_fault, temp[2-3]_offset, update_interval**
diff --git a/Documentation/hwmon/w83781d b/Documentation/hwmon/w83781d.rst
index 129b0a3b555b..f36d33dfb704 100644
--- a/Documentation/hwmon/w83781d
+++ b/Documentation/hwmon/w83781d.rst
@@ -2,44 +2,64 @@ Kernel driver w83781d
=====================
Supported chips:
+
* Winbond W83781D
+
Prefix: 'w83781d'
+
Addresses scanned: I2C 0x28 - 0x2f, ISA 0x290 (8 I/O ports)
+
Datasheet: http://www.winbond-usa.com/products/winbond_products/pdfs/PCIC/w83781d.pdf
+
* Winbond W83782D
+
Prefix: 'w83782d'
+
Addresses scanned: I2C 0x28 - 0x2f, ISA 0x290 (8 I/O ports)
+
Datasheet: http://www.winbond.com
+
* Winbond W83783S
+
Prefix: 'w83783s'
+
Addresses scanned: I2C 0x2d
+
Datasheet: http://www.winbond-usa.com/products/winbond_products/pdfs/PCIC/w83783s.pdf
+
* Asus AS99127F
+
Prefix: 'as99127f'
+
Addresses scanned: I2C 0x28 - 0x2f
+
Datasheet: Unavailable from Asus
+
+
Authors:
- Frodo Looijaard <frodol@dds.nl>,
- Philip Edelbrock <phil@netroedge.com>,
- Mark Studebaker <mdsxyz123@yahoo.com>
+
+ - Frodo Looijaard <frodol@dds.nl>,
+ - Philip Edelbrock <phil@netroedge.com>,
+ - Mark Studebaker <mdsxyz123@yahoo.com>
Module parameters
-----------------
* init int
- (default 1)
- Use 'init=0' to bypass initializing the chip.
- Try this if your computer crashes when you load the module.
+ (default 1)
+
+ Use 'init=0' to bypass initializing the chip.
+ Try this if your computer crashes when you load the module.
* reset int
- (default 0)
- The driver used to reset the chip on load, but does no more. Use
- 'reset=1' to restore the old behavior. Report if you need to do this.
+ (default 0)
+ The driver used to reset the chip on load, but does no more. Use
+ 'reset=1' to restore the old behavior. Report if you need to do this.
force_subclients=bus,caddr,saddr,saddr
This is used to force the i2c addresses for subclients of
- a certain chip. Typical usage is `force_subclients=0,0x2d,0x4a,0x4b'
+ a certain chip. Typical usage is `force_subclients=0,0x2d,0x4a,0x4b`
to force the subclients of chip 0x2d on bus 0 to i2c addresses
0x4a and 0x4b. This parameter is useful for certain Tyan boards.
@@ -54,12 +74,19 @@ There is quite some difference between these chips, but they are similar
enough that it was sensible to put them together in one driver.
The Asus chips are similar to an I2C-only W83782D.
-Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
-as99127f 7 3 0 3 0x31 0x12c3 yes no
-as99127f rev.2 (type_name = as99127f) 0x31 0x5ca3 yes no
-w83781d 7 3 0 3 0x10-1 0x5ca3 yes yes
-w83782d 9 3 2-4 3 0x30 0x5ca3 yes yes
-w83783s 5-6 3 2 1-2 0x40 0x5ca3 yes no
++----------+---------+--------+-------+-------+---------+--------+------+-----+
+| Chip | #vin | #fanin | #pwm | #temp | wchipid | vendid | i2c | ISA |
++----------+---------+--------+-------+-------+---------+--------+------+-----+
+| as99127f | 7 | 3 | 0 | 3 | 0x31 | 0x12c3 | yes | no |
++----------+---------+--------+-------+-------+---------+--------+------+-----+
+| as99127f rev.2 (type_name = as99127f) | 0x31 | 0x5ca3 | yes | no |
++----------+---------+--------+-------+-------+---------+--------+------+-----+
+| w83781d | 7 | 3 | 0 | 3 | 0x10-1 | 0x5ca3 | yes | yes |
++----------+---------+--------+-------+-------+---------+--------+------+-----+
+| w83782d | 9 | 3 | 2-4 | 3 | 0x30 | 0x5ca3 | yes | yes |
++----------+---------+--------+-------+-------+---------+--------+------+-----+
+| w83783s | 5-6 | 3 | 2 | 1-2 | 0x40 | 0x5ca3 | yes | no |
++----------+---------+--------+-------+-------+---------+--------+------+-----+
Detection of these chips can sometimes be foiled because they can be in
an internal state that allows no clean access. If you know the address
@@ -124,22 +151,24 @@ or only the beeping for some alarms.
Individual alarm and beep bits:
-0x000001: in0
-0x000002: in1
-0x000004: in2
-0x000008: in3
-0x000010: temp1
-0x000020: temp2 (+temp3 on W83781D)
-0x000040: fan1
-0x000080: fan2
-0x000100: in4
-0x000200: in5
-0x000400: in6
-0x000800: fan3
-0x001000: chassis
-0x002000: temp3 (W83782D only)
-0x010000: in7 (W83782D only)
-0x020000: in8 (W83782D only)
+======== ==========================
+0x000001 in0
+0x000002 in1
+0x000004 in2
+0x000008 in3
+0x000010 temp1
+0x000020 temp2 (+temp3 on W83781D)
+0x000040 fan1
+0x000080 fan2
+0x000100 in4
+0x000200 in5
+0x000400 in6
+0x000800 fan3
+0x001000 chassis
+0x002000 temp3 (W83782D only)
+0x010000 in7 (W83782D only)
+0x020000 in8 (W83782D only)
+======== ==========================
If an alarm triggers, it will remain triggered until the hardware register
is read at least once. This means that the cause for the alarm may
@@ -179,68 +208,74 @@ Please do not send mail to the author or the sensors group asking for
a datasheet or ideas on how to convince Asus. We can't help.
-NOTES:
+NOTES
-----
783s has no in1 so that in[2-6] are compatible with the 781d/782d.
783s pin is programmable for -5V or temp1; defaults to -5V,
- no control in driver so temp1 doesn't work.
+ no control in driver so temp1 doesn't work.
782d and 783s datasheets differ on which is pwm1 and which is pwm2.
- We chose to follow 782d.
+ We chose to follow 782d.
782d and 783s pin is programmable for fan3 input or pwm2 output;
- defaults to fan3 input.
- If pwm2 is enabled (with echo 255 1 > pwm2), then
- fan3 will report 0.
+ defaults to fan3 input.
+ If pwm2 is enabled (with echo 255 1 > pwm2), then
+ fan3 will report 0.
782d has pwm1-2 for ISA, pwm1-4 for i2c. (pwm3-4 share pins with
- the ISA pins)
+ the ISA pins)
-Data sheet updates:
+Data sheet updates
------------------
- PWM clock registers:
-
- 000: master / 512
- 001: master / 1024
- 010: master / 2048
- 011: master / 4096
- 100: master / 8192
+ * 000: master / 512
+ * 001: master / 1024
+ * 010: master / 2048
+ * 011: master / 4096
+ * 100: master / 8192
Answers from Winbond tech support
---------------------------------
->
-> 1) In the W83781D data sheet section 7.2 last paragraph, it talks about
-> reprogramming the R-T table if the Beta of the thermistor is not
-> 3435K. The R-T table is described briefly in section 8.20.
-> What formulas do I use to program a new R-T table for a given Beta?
->
- We are sorry that the calculation for R-T table value is
-confidential. If you have another Beta value of thermistor, we can help
-to calculate the R-T table for you. But you should give us real R-T
-Table which can be gotten by thermistor vendor. Therefore we will calculate
-them and obtain 32-byte data, and you can fill the 32-byte data to the
-register in Bank0.CR51 of W83781D.
+::
+
+ >
+ > 1) In the W83781D data sheet section 7.2 last paragraph, it talks about
+ > reprogramming the R-T table if the Beta of the thermistor is not
+ > 3435K. The R-T table is described briefly in section 8.20.
+ > What formulas do I use to program a new R-T table for a given Beta?
+ >
+
+ We are sorry that the calculation for R-T table value is
+ confidential. If you have another Beta value of thermistor, we can help
+ to calculate the R-T table for you. But you should give us real R-T
+ Table which can be gotten by thermistor vendor. Therefore we will calculate
+ them and obtain 32-byte data, and you can fill the 32-byte data to the
+ register in Bank0.CR51 of W83781D.
-> 2) In the W83782D data sheet, it mentions that pins 38, 39, and 40 are
-> programmable to be either thermistor or Pentium II diode inputs.
-> How do I program them for diode inputs? I can't find any register
-> to program these to be diode inputs.
- --> You may program Bank0 CR[5Dh] and CR[59h] registers.
- CR[5Dh] bit 1(VTIN1) bit 2(VTIN2) bit 3(VTIN3)
+ > 2) In the W83782D data sheet, it mentions that pins 38, 39, and 40 are
+ > programmable to be either thermistor or Pentium II diode inputs.
+ > How do I program them for diode inputs? I can't find any register
+ > to program these to be diode inputs.
- thermistor 0 0 0
- diode 1 1 1
+ You may program Bank0 CR[5Dh] and CR[59h] registers.
+ =============================== =============== ============== ============
+ CR[5Dh] bit 1(VTIN1) bit 2(VTIN2) bit 3(VTIN3)
-(error) CR[59h] bit 4(VTIN1) bit 2(VTIN2) bit 3(VTIN3)
-(right) CR[59h] bit 4(VTIN1) bit 5(VTIN2) bit 6(VTIN3)
+ thermistor 0 0 0
+ diode 1 1 1
- PII thermal diode 1 1 1
- 2N3904 diode 0 0 0
+
+ (error) CR[59h] bit 4(VTIN1) bit 2(VTIN2) bit 3(VTIN3)
+ (right) CR[59h] bit 4(VTIN1) bit 5(VTIN2) bit 6(VTIN3)
+
+ PII thermal diode 1 1 1
+ 2N3904 diode 0 0 0
+ =============================== =============== ============== ============
Asus Clones
@@ -251,18 +286,21 @@ Here are some very useful information that were given to us by Alex Van
Kaam about how to detect these chips, and how to read their values. He
also gives advice for another Asus chipset, the Mozart-2 (which we
don't support yet). Thanks Alex!
+
I reworded some parts and added personal comments.
-# Detection:
+Detection
+^^^^^^^^^
AS99127F rev.1, AS99127F rev.2 and ASB100:
- I2C address range: 0x29 - 0x2F
-- If register 0x58 holds 0x31 then we have an Asus (either ASB100 or
- AS99127F)
+- If register 0x58 holds 0x31 then we have an Asus (either ASB100 or AS99127F)
- Which one depends on register 0x4F (manufacturer ID):
- 0x06 or 0x94: ASB100
- 0x12 or 0xC3: AS99127F rev.1
- 0x5C or 0xA3: AS99127F rev.2
+
+ - 0x06 or 0x94: ASB100
+ - 0x12 or 0xC3: AS99127F rev.1
+ - 0x5C or 0xA3: AS99127F rev.2
+
Note that 0x5CA3 is Winbond's ID (WEC), which let us think Asus get their
AS99127F rev.2 direct from Winbond. The other codes mean ATT and DVC,
respectively. ATT could stand for Asustek something (although it would be
@@ -273,88 +311,103 @@ Mozart-2:
- I2C address: 0x77
- If register 0x58 holds 0x56 or 0x10 then we have a Mozart-2
- Of the Mozart there are 3 types:
- 0x58=0x56, 0x4E=0x94, 0x4F=0x36: Asus ASM58 Mozart-2
- 0x58=0x56, 0x4E=0x94, 0x4F=0x06: Asus AS2K129R Mozart-2
- 0x58=0x10, 0x4E=0x5C, 0x4F=0xA3: Asus ??? Mozart-2
+
+ - 0x58=0x56, 0x4E=0x94, 0x4F=0x36: Asus ASM58 Mozart-2
+ - 0x58=0x56, 0x4E=0x94, 0x4F=0x06: Asus AS2K129R Mozart-2
+ - 0x58=0x10, 0x4E=0x5C, 0x4F=0xA3: Asus ??? Mozart-2
+
You can handle all 3 the exact same way :)
-# Temperature sensors:
+Temperature sensors
+^^^^^^^^^^^^^^^^^^^
ASB100:
-- sensor 1: register 0x27
-- sensor 2 & 3 are the 2 LM75's on the SMBus
-- sensor 4: register 0x17
-Remark: I noticed that on Intel boards sensor 2 is used for the CPU
+ - sensor 1: register 0x27
+ - sensor 2 & 3 are the 2 LM75's on the SMBus
+ - sensor 4: register 0x17
+
+Remark:
+
+ I noticed that on Intel boards sensor 2 is used for the CPU
and 4 is ignored/stuck, on AMD boards sensor 4 is the CPU and sensor 2 is
either ignored or a socket temperature.
AS99127F (rev.1 and 2 alike):
-- sensor 1: register 0x27
-- sensor 2 & 3 are the 2 LM75's on the SMBus
-Remark: Register 0x5b is suspected to be temperature type selector. Bit 1
+ - sensor 1: register 0x27
+ - sensor 2 & 3 are the 2 LM75's on the SMBus
+
+Remark:
+
+ Register 0x5b is suspected to be temperature type selector. Bit 1
would control temp1, bit 3 temp2 and bit 5 temp3.
Mozart-2:
-- sensor 1: register 0x27
-- sensor 2: register 0x13
+ - sensor 1: register 0x27
+ - sensor 2: register 0x13
-# Fan sensors:
+Fan sensors
+^^^^^^^^^^^
ASB100, AS99127F (rev.1 and 2 alike):
-- 3 fans, identical to the W83781D
+ - 3 fans, identical to the W83781D
Mozart-2:
-- 2 fans only, 1350000/RPM/div
-- fan 1: register 0x28, divisor on register 0xA1 (bits 4-5)
-- fan 2: register 0x29, divisor on register 0xA1 (bits 6-7)
+ - 2 fans only, 1350000/RPM/div
+ - fan 1: register 0x28, divisor on register 0xA1 (bits 4-5)
+ - fan 2: register 0x29, divisor on register 0xA1 (bits 6-7)
-# Voltages:
+Voltages
+^^^^^^^^
This is where there is a difference between AS99127F rev.1 and 2.
-Remark: The difference is similar to the difference between
+
+Remark:
+
+ The difference is similar to the difference between
W83781D and W83782D.
ASB100:
-in0=r(0x20)*0.016
-in1=r(0x21)*0.016
-in2=r(0x22)*0.016
-in3=r(0x23)*0.016*1.68
-in4=r(0x24)*0.016*3.8
-in5=r(0x25)*(-0.016)*3.97
-in6=r(0x26)*(-0.016)*1.666
+ - in0=r(0x20)*0.016
+ - in1=r(0x21)*0.016
+ - in2=r(0x22)*0.016
+ - in3=r(0x23)*0.016*1.68
+ - in4=r(0x24)*0.016*3.8
+ - in5=r(0x25)*(-0.016)*3.97
+ - in6=r(0x26)*(-0.016)*1.666
AS99127F rev.1:
-in0=r(0x20)*0.016
-in1=r(0x21)*0.016
-in2=r(0x22)*0.016
-in3=r(0x23)*0.016*1.68
-in4=r(0x24)*0.016*3.8
-in5=r(0x25)*(-0.016)*3.97
-in6=r(0x26)*(-0.016)*1.503
+ - in0=r(0x20)*0.016
+ - in1=r(0x21)*0.016
+ - in2=r(0x22)*0.016
+ - in3=r(0x23)*0.016*1.68
+ - in4=r(0x24)*0.016*3.8
+ - in5=r(0x25)*(-0.016)*3.97
+ - in6=r(0x26)*(-0.016)*1.503
AS99127F rev.2:
-in0=r(0x20)*0.016
-in1=r(0x21)*0.016
-in2=r(0x22)*0.016
-in3=r(0x23)*0.016*1.68
-in4=r(0x24)*0.016*3.8
-in5=(r(0x25)*0.016-3.6)*5.14+3.6
-in6=(r(0x26)*0.016-3.6)*3.14+3.6
+ - in0=r(0x20)*0.016
+ - in1=r(0x21)*0.016
+ - in2=r(0x22)*0.016
+ - in3=r(0x23)*0.016*1.68
+ - in4=r(0x24)*0.016*3.8
+ - in5=(r(0x25)*0.016-3.6)*5.14+3.6
+ - in6=(r(0x26)*0.016-3.6)*3.14+3.6
Mozart-2:
-in0=r(0x20)*0.016
-in1=255
-in2=r(0x22)*0.016
-in3=r(0x23)*0.016*1.68
-in4=r(0x24)*0.016*4
-in5=255
-in6=255
+ - in0=r(0x20)*0.016
+ - in1=255
+ - in2=r(0x22)*0.016
+ - in3=r(0x23)*0.016*1.68
+ - in4=r(0x24)*0.016*4
+ - in5=255
+ - in6=255
-# PWM
+PWM
+^^^
* Additional info about PWM on the AS99127F (may apply to other Asus
-chips as well) by Jean Delvare as of 2004-04-09:
+ chips as well) by Jean Delvare as of 2004-04-09:
AS99127F revision 2 seems to have two PWM registers at 0x59 and 0x5A,
and a temperature sensor type selector at 0x5B (which basically means
@@ -401,15 +454,20 @@ AS99127F chips at all.
I've been fiddling around with the (in)famous 0x59 register and
found out the following values do work as a form of coarse pwm:
-0x80 - seems to turn fans off after some time(1-2 minutes)... might be
-some form of auto-fan-control based on temp? hmm (Qfan? this mobo is an
-old ASUS, it isn't marketed as Qfan. Maybe some beta pre-attempt at Qfan
-that was dropped at the BIOS)
-0x81 - off
-0x82 - slightly "on-ner" than off, but my fans do not get to move. I can
-hear the high-pitched PWM sound that motors give off at too-low-pwm.
-0x83 - now they do move. Estimate about 70% speed or so.
-0x84-0x8f - full on
+0x80
+ - seems to turn fans off after some time(1-2 minutes)... might be
+ some form of auto-fan-control based on temp? hmm (Qfan? this mobo is an
+ old ASUS, it isn't marketed as Qfan. Maybe some beta pre-attempt at Qfan
+ that was dropped at the BIOS)
+0x81
+ - off
+0x82
+ - slightly "on-ner" than off, but my fans do not get to move. I can
+ hear the high-pitched PWM sound that motors give off at too-low-pwm.
+0x83
+ - now they do move. Estimate about 70% speed or so.
+0x84-0x8f
+ - full on
Changing the high nibble doesn't seem to do much except the high bit
(0x80) must be set for PWM to work, else the current pwm doesn't seem to
@@ -435,6 +493,7 @@ looks like PWM is filtered on this motherboard.
Here are some of measurements:
+==== =========
0x80 20 mV
0x81 20 mV
0x82 232 mV
@@ -451,3 +510,4 @@ Here are some of measurements:
0x8d 12.4 V
0x8e 12.4 V
0x8f 12.4 V
+==== =========
diff --git a/Documentation/hwmon/w83791d b/Documentation/hwmon/w83791d.rst
index f4021a285460..3adaed39b157 100644
--- a/Documentation/hwmon/w83791d
+++ b/Documentation/hwmon/w83791d.rst
@@ -2,9 +2,13 @@ Kernel driver w83791d
=====================
Supported chips:
+
* Winbond W83791D
+
Prefix: 'w83791d'
+
Addresses scanned: I2C 0x2c - 0x2f
+
Datasheet: http://www.winbond-usa.com/products/winbond_products/pdfs/PCIC/W83791D_W83791Gb.pdf
Author: Charles Spirakis <bezaur@gmail.com>
@@ -12,39 +16,46 @@ Author: Charles Spirakis <bezaur@gmail.com>
This driver was derived from the w83781d.c and w83792d.c source files.
Credits:
+
w83781d.c:
- Frodo Looijaard <frodol@dds.nl>,
- Philip Edelbrock <phil@netroedge.com>,
- and Mark Studebaker <mdsxyz123@yahoo.com>
+
+ - Frodo Looijaard <frodol@dds.nl>,
+ - Philip Edelbrock <phil@netroedge.com>,
+ - Mark Studebaker <mdsxyz123@yahoo.com>
+
w83792d.c:
- Shane Huang (Winbond),
- Rudolf Marek <r.marek@assembler.cz>
+
+ - Shane Huang (Winbond),
+ - Rudolf Marek <r.marek@assembler.cz>
Additional contributors:
- Sven Anders <anders@anduras.de>
- Marc Hulsman <m.hulsman@tudelft.nl>
+
+ - Sven Anders <anders@anduras.de>
+ - Marc Hulsman <m.hulsman@tudelft.nl>
Module Parameters
-----------------
* init boolean
- (default 0)
- Use 'init=1' to have the driver do extra software initializations.
- The default behavior is to do the minimum initialization possible
- and depend on the BIOS to properly setup the chip. If you know you
- have a w83791d and you're having problems, try init=1 before trying
- reset=1.
+ (default 0)
+
+ Use 'init=1' to have the driver do extra software initializations.
+ The default behavior is to do the minimum initialization possible
+ and depend on the BIOS to properly setup the chip. If you know you
+ have a w83791d and you're having problems, try init=1 before trying
+ reset=1.
* reset boolean
- (default 0)
- Use 'reset=1' to reset the chip (via index 0x40, bit 7). The default
- behavior is no chip reset to preserve BIOS settings.
+ (default 0)
+
+ Use 'reset=1' to reset the chip (via index 0x40, bit 7). The default
+ behavior is no chip reset to preserve BIOS settings.
* force_subclients=bus,caddr,saddr,saddr
- This is used to force the i2c addresses for subclients of
- a certain chip. Example usage is `force_subclients=0,0x2f,0x4a,0x4b'
- to force the subclients of chip 0x2f on bus 0 to i2c addresses
- 0x4a and 0x4b.
+ This is used to force the i2c addresses for subclients of
+ a certain chip. Example usage is `force_subclients=0,0x2f,0x4a,0x4b`
+ to force the subclients of chip 0x2f on bus 0 to i2c addresses
+ 0x4a and 0x4b.
Description
@@ -91,11 +102,11 @@ This file is used for both legacy and new code.
The sysfs interface to the beep bitmask has migrated from the original legacy
method of a single sysfs beep_mask file to a newer method using multiple
-*_beep files as described in .../Documentation/hwmon/sysfs-interface.
+`*_beep` files as described in `Documentation/hwmon/sysfs-interface.rst`.
A similar change has occurred for the bitmap corresponding to the alarms. The
original legacy method used a single sysfs alarms file containing a bitmap
-of triggered alarms. The newer method uses multiple sysfs *_alarm files
+of triggered alarms. The newer method uses multiple sysfs `*_alarm` files
(again following the pattern described in sysfs-interface).
Since both methods read and write the underlying hardware, they can be used
@@ -116,46 +127,54 @@ User mode code requesting values more often will receive cached values.
The sysfs-interface is documented in the 'sysfs-interface' file. Only
chip-specific options are documented here.
-pwm[1-3]_enable - this file controls mode of fan/temperature control for
+======================= =======================================================
+pwm[1-3]_enable this file controls mode of fan/temperature control for
fan 1-3. Fan/PWM 4-5 only support manual mode.
- * 1 Manual mode
- * 2 Thermal Cruise mode
- * 3 Fan Speed Cruise mode (no further support)
-temp[1-3]_target - defines the target temperature for Thermal Cruise mode.
+ * 1 Manual mode
+ * 2 Thermal Cruise mode
+ * 3 Fan Speed Cruise mode (no further support)
+
+temp[1-3]_target defines the target temperature for Thermal Cruise mode.
Unit: millidegree Celsius
RW
-temp[1-3]_tolerance - temperature tolerance for Thermal Cruise mode.
+temp[1-3]_tolerance temperature tolerance for Thermal Cruise mode.
Specifies an interval around the target temperature
in which the fan speed is not changed.
Unit: millidegree Celsius
RW
+======================= =======================================================
Alarms bitmap vs. beep_mask bitmask
-------------------------------------
+-----------------------------------
+
For legacy code using the alarms and beep_mask files:
-in0 (VCORE) : alarms: 0x000001 beep_mask: 0x000001
-in1 (VINR0) : alarms: 0x000002 beep_mask: 0x002000 <== mismatch
-in2 (+3.3VIN): alarms: 0x000004 beep_mask: 0x000004
-in3 (5VDD) : alarms: 0x000008 beep_mask: 0x000008
-in4 (+12VIN) : alarms: 0x000100 beep_mask: 0x000100
-in5 (-12VIN) : alarms: 0x000200 beep_mask: 0x000200
-in6 (-5VIN) : alarms: 0x000400 beep_mask: 0x000400
-in7 (VSB) : alarms: 0x080000 beep_mask: 0x010000 <== mismatch
-in8 (VBAT) : alarms: 0x100000 beep_mask: 0x020000 <== mismatch
-in9 (VINR1) : alarms: 0x004000 beep_mask: 0x004000
-temp1 : alarms: 0x000010 beep_mask: 0x000010
-temp2 : alarms: 0x000020 beep_mask: 0x000020
-temp3 : alarms: 0x002000 beep_mask: 0x000002 <== mismatch
-fan1 : alarms: 0x000040 beep_mask: 0x000040
-fan2 : alarms: 0x000080 beep_mask: 0x000080
-fan3 : alarms: 0x000800 beep_mask: 0x000800
-fan4 : alarms: 0x200000 beep_mask: 0x200000
-fan5 : alarms: 0x400000 beep_mask: 0x400000
-tart1 : alarms: 0x010000 beep_mask: 0x040000 <== mismatch
-tart2 : alarms: 0x020000 beep_mask: 0x080000 <== mismatch
-tart3 : alarms: 0x040000 beep_mask: 0x100000 <== mismatch
-case_open : alarms: 0x001000 beep_mask: 0x001000
-global_enable: alarms: -------- beep_mask: 0x800000 (modified via beep_enable)
+============= ======== ========= ==========================
+Signal Alarms beep_mask Obs
+============= ======== ========= ==========================
+in0 (VCORE) 0x000001 0x000001
+in1 (VINR0) 0x000002 0x002000 <== mismatch
+in2 (+3.3VIN) 0x000004 0x000004
+in3 (5VDD) 0x000008 0x000008
+in4 (+12VIN) 0x000100 0x000100
+in5 (-12VIN) 0x000200 0x000200
+in6 (-5VIN) 0x000400 0x000400
+in7 (VSB) 0x080000 0x010000 <== mismatch
+in8 (VBAT) 0x100000 0x020000 <== mismatch
+in9 (VINR1) 0x004000 0x004000
+temp1 0x000010 0x000010
+temp2 0x000020 0x000020
+temp3 0x002000 0x000002 <== mismatch
+fan1 0x000040 0x000040
+fan2 0x000080 0x000080
+fan3 0x000800 0x000800
+fan4 0x200000 0x200000
+fan5 0x400000 0x400000
+tart1 0x010000 0x040000 <== mismatch
+tart2 0x020000 0x080000 <== mismatch
+tart3 0x040000 0x100000 <== mismatch
+case_open 0x001000 0x001000
+global_enable - 0x800000 (modified via beep_enable)
+============= ======== ========= ==========================
diff --git a/Documentation/hwmon/w83792d b/Documentation/hwmon/w83792d.rst
index f2ffc402ea45..92c4bfe4968c 100644
--- a/Documentation/hwmon/w83792d
+++ b/Documentation/hwmon/w83792d.rst
@@ -2,9 +2,13 @@ Kernel driver w83792d
=====================
Supported chips:
+
* Winbond W83792D
+
Prefix: 'w83792d'
+
Addresses scanned: I2C 0x2c - 0x2f
+
Datasheet: http://www.winbond.com.tw
Author: Shane Huang (Winbond)
@@ -15,15 +19,16 @@ Module Parameters
-----------------
* init int
- (default 1)
- Use 'init=0' to bypass initializing the chip.
- Try this if your computer crashes when you load the module.
+ (default 1)
+
+ Use 'init=0' to bypass initializing the chip.
+ Try this if your computer crashes when you load the module.
* force_subclients=bus,caddr,saddr,saddr
- This is used to force the i2c addresses for subclients of
- a certain chip. Example usage is `force_subclients=0,0x2f,0x4a,0x4b'
- to force the subclients of chip 0x2f on bus 0 to i2c addresses
- 0x4a and 0x4b.
+ This is used to force the i2c addresses for subclients of
+ a certain chip. Example usage is `force_subclients=0,0x2f,0x4a,0x4b`
+ to force the subclients of chip 0x2f on bus 0 to i2c addresses
+ 0x4a and 0x4b.
Description
@@ -67,31 +72,34 @@ or maximum limit.
Alarms are provided as output from "realtime status register". Following bits
are defined:
-bit - alarm on:
-0 - in0
-1 - in1
-2 - temp1
-3 - temp2
-4 - temp3
-5 - fan1
-6 - fan2
-7 - fan3
-8 - in2
-9 - in3
-10 - in4
-11 - in5
-12 - in6
-13 - VID change
-14 - chassis
-15 - fan7
-16 - tart1
-17 - tart2
-18 - tart3
-19 - in7
-20 - in8
-21 - fan4
-22 - fan5
-23 - fan6
+==== ==========
+bit alarm on
+==== ==========
+0 in0
+1 in1
+2 temp1
+3 temp2
+4 temp3
+5 fan1
+6 fan2
+7 fan3
+8 in2
+9 in3
+10 in4
+11 in5
+12 in6
+13 VID change
+14 chassis
+15 fan7
+16 tart1
+17 tart2
+18 tart3
+19 in7
+20 in8
+21 fan4
+22 fan5
+23 fan6
+==== ==========
Tart will be asserted while target temperature cannot be achieved after 3 minutes
of full speed rotation of corresponding fan.
@@ -114,7 +122,7 @@ Known problems:
by CR[0x49h].
- The function of vid and vrm has not been finished, because I'm NOT
very familiar with them. Adding support is welcome.
-  - The function of chassis open detection needs more tests.
+ - The function of chassis open detection needs more tests.
- If you have ASUS server board and chip was not found: Then you will
need to upgrade to latest (or beta) BIOS. If it does not help please
contact us.
@@ -165,17 +173,27 @@ for each fan.
/sys files
----------
-pwm[1-7] - this file stores PWM duty cycle or DC value (fan speed) in range:
- 0 (stop) to 255 (full)
-pwm[1-3]_enable - this file controls mode of fan/temperature control:
- * 0 Disabled
- * 1 Manual mode
- * 2 Smart Fan II
- * 3 Thermal Cruise
-pwm[1-7]_mode - Select PWM or DC mode
- * 0 DC
- * 1 PWM
-thermal_cruise[1-3] - Selects the desired temperature for cruise (degC)
-tolerance[1-3] - Value in degrees of Celsius (degC) for +- T
-sf2_point[1-4]_fan[1-3] - four temperature points for each fan for Smart Fan II
-sf2_level[1-3]_fan[1-3] - three PWM/DC levels for each fan for Smart Fan II
+pwm[1-7]
+ - this file stores PWM duty cycle or DC value (fan speed) in range:
+
+ 0 (stop) to 255 (full)
+pwm[1-3]_enable
+ - this file controls mode of fan/temperature control:
+
+ * 0 Disabled
+ * 1 Manual mode
+ * 2 Smart Fan II
+ * 3 Thermal Cruise
+pwm[1-7]_mode
+ - Select PWM or DC mode
+
+ * 0 DC
+ * 1 PWM
+thermal_cruise[1-3]
+ - Selects the desired temperature for cruise (degC)
+tolerance[1-3]
+ - Value in degrees of Celsius (degC) for +- T
+sf2_point[1-4]_fan[1-3]
+ - four temperature points for each fan for Smart Fan II
+sf2_level[1-3]_fan[1-3]
+ - three PWM/DC levels for each fan for Smart Fan II
diff --git a/Documentation/hwmon/w83793 b/Documentation/hwmon/w83793
deleted file mode 100644
index 6cc5f639b721..000000000000
--- a/Documentation/hwmon/w83793
+++ /dev/null
@@ -1,106 +0,0 @@
-Kernel driver w83793
-====================
-
-Supported chips:
- * Winbond W83793G/W83793R
- Prefix: 'w83793'
- Addresses scanned: I2C 0x2c - 0x2f
- Datasheet: Still not published
-
-Authors:
- Yuan Mu (Winbond Electronics)
- Rudolf Marek <r.marek@assembler.cz>
-
-
-Module parameters
------------------
-
-* reset int
- (default 0)
- This parameter is not recommended, it will lose motherboard specific
- settings. Use 'reset=1' to reset the chip when loading this module.
-
-* force_subclients=bus,caddr,saddr1,saddr2
- This is used to force the i2c addresses for subclients of
- a certain chip. Typical usage is `force_subclients=0,0x2f,0x4a,0x4b'
- to force the subclients of chip 0x2f on bus 0 to i2c addresses
- 0x4a and 0x4b.
-
-
-Description
------------
-
-This driver implements support for Winbond W83793G/W83793R chips.
-
-* Exported features
- This driver exports 10 voltage sensors, up to 12 fan tachometer inputs,
- 6 remote temperatures, up to 8 sets of PWM fan controls, SmartFan
- (automatic fan speed control) on all temperature/PWM combinations, 2
- sets of 6-pin CPU VID input.
-
-* Sensor resolutions
- If your motherboard maker used the reference design, the resolution of
- voltage0-2 is 2mV, resolution of voltage3/4/5 is 16mV, 8mV for voltage6,
- 24mV for voltage7/8. Temp1-4 have a 0.25 degree Celsius resolution,
- temp5-6 have a 1 degree Celsiis resolution.
-
-* Temperature sensor types
- Temp1-4 have 2 possible types. It can be read from (and written to)
- temp[1-4]_type.
- - If the value is 3, it starts monitoring using a remote termal diode
- (default).
- - If the value is 6, it starts monitoring using the temperature sensor
- in Intel CPU and get result by PECI.
- Temp5-6 can be connected to external thermistors (value of
- temp[5-6]_type is 4).
-
-* Alarm mechanism
- For voltage sensors, an alarm triggers if the measured value is below
- the low voltage limit or over the high voltage limit.
- For temperature sensors, an alarm triggers if the measured value goes
- above the high temperature limit, and wears off only after the measured
- value drops below the hysteresis value.
- For fan sensors, an alarm triggers if the measured value is below the
- low speed limit.
-
-* SmartFan/PWM control
- If you want to set a pwm fan to manual mode, you just need to make sure it
- is not controlled by any temp channel, for example, you want to set fan1
- to manual mode, you need to check the value of temp[1-6]_fan_map, make
- sure bit 0 is cleared in the 6 values. And then set the pwm1 value to
- control the fan.
-
- Each temperature channel can control all the 8 PWM outputs (by setting the
- corresponding bit in tempX_fan_map), you can set the temperature channel
- mode using temp[1-6]_pwm_enable, 2 is Thermal Cruise mode and 3
- is the SmartFanII mode. Temperature channels will try to speed up or
- slow down all controlled fans, this means one fan can receive different
- PWM value requests from different temperature channels, but the chip
- will always pick the safest (max) PWM value for each fan.
-
- In Thermal Cruise mode, the chip attempts to keep the temperature at a
- predefined value, within a tolerance margin. So if tempX_input >
- thermal_cruiseX + toleranceX, the chip will increase the PWM value,
- if tempX_input < thermal_cruiseX - toleranceX, the chip will decrease
- the PWM value. If the temperature is within the tolerance range, the PWM
- value is left unchanged.
-
- SmartFanII works differently, you have to define up to 7 PWM, temperature
- trip points, defining a PWM/temperature curve which the chip will follow.
- While not fundamentally different from the Thermal Cruise mode, the
- implementation is quite different, giving you a finer-grained control.
-
-* Chassis
- If the case open alarm triggers, it will stay in this state unless cleared
- by writing 0 to the sysfs file "intrusion0_alarm".
-
-* VID and VRM
- The VRM version is detected automatically, don't modify the it unless you
- *do* know the cpu VRM version and it's not properly detected.
-
-
-Notes
------
-
- Only Fan1-5 and PWM1-3 are guaranteed to always exist, other fan inputs and
- PWM outputs may or may not exist depending on the chip pin configuration.
diff --git a/Documentation/hwmon/w83793.rst b/Documentation/hwmon/w83793.rst
new file mode 100644
index 000000000000..83bb40c48645
--- /dev/null
+++ b/Documentation/hwmon/w83793.rst
@@ -0,0 +1,113 @@
+Kernel driver w83793
+====================
+
+Supported chips:
+
+ * Winbond W83793G/W83793R
+
+ Prefix: 'w83793'
+
+ Addresses scanned: I2C 0x2c - 0x2f
+
+ Datasheet: Still not published
+
+Authors:
+ - Yuan Mu (Winbond Electronics)
+ - Rudolf Marek <r.marek@assembler.cz>
+
+
+Module parameters
+-----------------
+
+* reset int
+ (default 0)
+
+ This parameter is not recommended, it will lose motherboard specific
+ settings. Use 'reset=1' to reset the chip when loading this module.
+
+* force_subclients=bus,caddr,saddr1,saddr2
+ This is used to force the i2c addresses for subclients of
+ a certain chip. Typical usage is `force_subclients=0,0x2f,0x4a,0x4b`
+ to force the subclients of chip 0x2f on bus 0 to i2c addresses
+ 0x4a and 0x4b.
+
+
+Description
+-----------
+
+This driver implements support for Winbond W83793G/W83793R chips.
+
+* Exported features
+ This driver exports 10 voltage sensors, up to 12 fan tachometer inputs,
+ 6 remote temperatures, up to 8 sets of PWM fan controls, SmartFan
+ (automatic fan speed control) on all temperature/PWM combinations, 2
+ sets of 6-pin CPU VID input.
+
+* Sensor resolutions
+ If your motherboard maker used the reference design, the resolution of
+ voltage0-2 is 2mV, resolution of voltage3/4/5 is 16mV, 8mV for voltage6,
+ 24mV for voltage7/8. Temp1-4 have a 0.25 degree Celsius resolution,
+ temp5-6 have a 1 degree Celsiis resolution.
+
+* Temperature sensor types
+ Temp1-4 have 2 possible types. It can be read from (and written to)
+ temp[1-4]_type.
+
+ - If the value is 3, it starts monitoring using a remote termal diode
+ (default).
+ - If the value is 6, it starts monitoring using the temperature sensor
+ in Intel CPU and get result by PECI.
+
+ Temp5-6 can be connected to external thermistors (value of
+ temp[5-6]_type is 4).
+
+* Alarm mechanism
+ For voltage sensors, an alarm triggers if the measured value is below
+ the low voltage limit or over the high voltage limit.
+ For temperature sensors, an alarm triggers if the measured value goes
+ above the high temperature limit, and wears off only after the measured
+ value drops below the hysteresis value.
+ For fan sensors, an alarm triggers if the measured value is below the
+ low speed limit.
+
+* SmartFan/PWM control
+ If you want to set a pwm fan to manual mode, you just need to make sure it
+ is not controlled by any temp channel, for example, you want to set fan1
+ to manual mode, you need to check the value of temp[1-6]_fan_map, make
+ sure bit 0 is cleared in the 6 values. And then set the pwm1 value to
+ control the fan.
+
+ Each temperature channel can control all the 8 PWM outputs (by setting the
+ corresponding bit in tempX_fan_map), you can set the temperature channel
+ mode using temp[1-6]_pwm_enable, 2 is Thermal Cruise mode and 3
+ is the SmartFanII mode. Temperature channels will try to speed up or
+ slow down all controlled fans, this means one fan can receive different
+ PWM value requests from different temperature channels, but the chip
+ will always pick the safest (max) PWM value for each fan.
+
+ In Thermal Cruise mode, the chip attempts to keep the temperature at a
+ predefined value, within a tolerance margin. So if tempX_input >
+ thermal_cruiseX + toleranceX, the chip will increase the PWM value,
+ if tempX_input < thermal_cruiseX - toleranceX, the chip will decrease
+ the PWM value. If the temperature is within the tolerance range, the PWM
+ value is left unchanged.
+
+ SmartFanII works differently, you have to define up to 7 PWM, temperature
+ trip points, defining a PWM/temperature curve which the chip will follow.
+ While not fundamentally different from the Thermal Cruise mode, the
+ implementation is quite different, giving you a finer-grained control.
+
+* Chassis
+ If the case open alarm triggers, it will stay in this state unless cleared
+ by writing 0 to the sysfs file "intrusion0_alarm".
+
+* VID and VRM
+ The VRM version is detected automatically, don't modify the it unless you
+ *do* know the cpu VRM version and it's not properly detected.
+
+
+Notes
+-----
+
+ Only Fan1-5 and PWM1-3 are guaranteed to always exist, other fan inputs and
+ PWM outputs may or may not exist depending on the chip pin configuration.
diff --git a/Documentation/hwmon/w83795 b/Documentation/hwmon/w83795
deleted file mode 100644
index d3e678216b9a..000000000000
--- a/Documentation/hwmon/w83795
+++ /dev/null
@@ -1,127 +0,0 @@
-Kernel driver w83795
-====================
-
-Supported chips:
- * Winbond/Nuvoton W83795G
- Prefix: 'w83795g'
- Addresses scanned: I2C 0x2c - 0x2f
- Datasheet: Available for download on nuvoton.com
- * Winbond/Nuvoton W83795ADG
- Prefix: 'w83795adg'
- Addresses scanned: I2C 0x2c - 0x2f
- Datasheet: Available for download on nuvoton.com
-
-Authors:
- Wei Song (Nuvoton)
- Jean Delvare <jdelvare@suse.de>
-
-
-Pin mapping
------------
-
-Here is a summary of the pin mapping for the W83795G and W83795ADG.
-This can be useful to convert data provided by board manufacturers
-into working libsensors configuration statements.
-
- W83795G |
- Pin | Name | Register | Sysfs attribute
-------------------------------------------------------------------
- 13 | VSEN1 (VCORE1) | 10h | in0
- 14 | VSEN2 (VCORE2) | 11h | in1
- 15 | VSEN3 (VCORE3) | 12h | in2
- 16 | VSEN4 | 13h | in3
- 17 | VSEN5 | 14h | in4
- 18 | VSEN6 | 15h | in5
- 19 | VSEN7 | 16h | in6
- 20 | VSEN8 | 17h | in7
- 21 | VSEN9 | 18h | in8
- 22 | VSEN10 | 19h | in9
- 23 | VSEN11 | 1Ah | in10
- 28 | VTT | 1Bh | in11
- 24 | 3VDD | 1Ch | in12
- 25 | 3VSB | 1Dh | in13
- 26 | VBAT | 1Eh | in14
- 3 | VSEN12/TR5 | 1Fh | in15/temp5
- 4 | VSEN13/TR5 | 20h | in16/temp6
- 5/ 6 | VDSEN14/TR1/TD1 | 21h | in17/temp1
- 7/ 8 | VDSEN15/TR2/TD2 | 22h | in18/temp2
- 9/ 10 | VDSEN16/TR3/TD3 | 23h | in19/temp3
- 11/ 12 | VDSEN17/TR4/TD4 | 24h | in20/temp4
- 40 | FANIN1 | 2Eh | fan1
- 42 | FANIN2 | 2Fh | fan2
- 44 | FANIN3 | 30h | fan3
- 46 | FANIN4 | 31h | fan4
- 48 | FANIN5 | 32h | fan5
- 50 | FANIN6 | 33h | fan6
- 52 | FANIN7 | 34h | fan7
- 54 | FANIN8 | 35h | fan8
- 57 | FANIN9 | 36h | fan9
- 58 | FANIN10 | 37h | fan10
- 59 | FANIN11 | 38h | fan11
- 60 | FANIN12 | 39h | fan12
- 31 | FANIN13 | 3Ah | fan13
- 35 | FANIN14 | 3Bh | fan14
- 41 | FANCTL1 | 10h (bank 2) | pwm1
- 43 | FANCTL2 | 11h (bank 2) | pwm2
- 45 | FANCTL3 | 12h (bank 2) | pwm3
- 47 | FANCTL4 | 13h (bank 2) | pwm4
- 49 | FANCTL5 | 14h (bank 2) | pwm5
- 51 | FANCTL6 | 15h (bank 2) | pwm6
- 53 | FANCTL7 | 16h (bank 2) | pwm7
- 55 | FANCTL8 | 17h (bank 2) | pwm8
- 29/ 30 | PECI/TSI (DTS1) | 26h | temp7
- 29/ 30 | PECI/TSI (DTS2) | 27h | temp8
- 29/ 30 | PECI/TSI (DTS3) | 28h | temp9
- 29/ 30 | PECI/TSI (DTS4) | 29h | temp10
- 29/ 30 | PECI/TSI (DTS5) | 2Ah | temp11
- 29/ 30 | PECI/TSI (DTS6) | 2Bh | temp12
- 29/ 30 | PECI/TSI (DTS7) | 2Ch | temp13
- 29/ 30 | PECI/TSI (DTS8) | 2Dh | temp14
- 27 | CASEOPEN# | 46h | intrusion0
-
- W83795ADG |
- Pin | Name | Register | Sysfs attribute
-------------------------------------------------------------------
- 10 | VSEN1 (VCORE1) | 10h | in0
- 11 | VSEN2 (VCORE2) | 11h | in1
- 12 | VSEN3 (VCORE3) | 12h | in2
- 13 | VSEN4 | 13h | in3
- 14 | VSEN5 | 14h | in4
- 15 | VSEN6 | 15h | in5
- 16 | VSEN7 | 16h | in6
- 17 | VSEN8 | 17h | in7
- 22 | VTT | 1Bh | in11
- 18 | 3VDD | 1Ch | in12
- 19 | 3VSB | 1Dh | in13
- 20 | VBAT | 1Eh | in14
- 48 | VSEN12/TR5 | 1Fh | in15/temp5
- 1 | VSEN13/TR5 | 20h | in16/temp6
- 2/ 3 | VDSEN14/TR1/TD1 | 21h | in17/temp1
- 4/ 5 | VDSEN15/TR2/TD2 | 22h | in18/temp2
- 6/ 7 | VDSEN16/TR3/TD3 | 23h | in19/temp3
- 8/ 9 | VDSEN17/TR4/TD4 | 24h | in20/temp4
- 32 | FANIN1 | 2Eh | fan1
- 34 | FANIN2 | 2Fh | fan2
- 36 | FANIN3 | 30h | fan3
- 37 | FANIN4 | 31h | fan4
- 38 | FANIN5 | 32h | fan5
- 39 | FANIN6 | 33h | fan6
- 40 | FANIN7 | 34h | fan7
- 41 | FANIN8 | 35h | fan8
- 43 | FANIN9 | 36h | fan9
- 44 | FANIN10 | 37h | fan10
- 45 | FANIN11 | 38h | fan11
- 46 | FANIN12 | 39h | fan12
- 24 | FANIN13 | 3Ah | fan13
- 28 | FANIN14 | 3Bh | fan14
- 33 | FANCTL1 | 10h (bank 2) | pwm1
- 35 | FANCTL2 | 11h (bank 2) | pwm2
- 23 | PECI (DTS1) | 26h | temp7
- 23 | PECI (DTS2) | 27h | temp8
- 23 | PECI (DTS3) | 28h | temp9
- 23 | PECI (DTS4) | 29h | temp10
- 23 | PECI (DTS5) | 2Ah | temp11
- 23 | PECI (DTS6) | 2Bh | temp12
- 23 | PECI (DTS7) | 2Ch | temp13
- 23 | PECI (DTS8) | 2Dh | temp14
- 21 | CASEOPEN# | 46h | intrusion0
diff --git a/Documentation/hwmon/w83795.rst b/Documentation/hwmon/w83795.rst
new file mode 100644
index 000000000000..d0615e2fabb9
--- /dev/null
+++ b/Documentation/hwmon/w83795.rst
@@ -0,0 +1,142 @@
+Kernel driver w83795
+====================
+
+Supported chips:
+
+ * Winbond/Nuvoton W83795G
+
+ Prefix: 'w83795g'
+
+ Addresses scanned: I2C 0x2c - 0x2f
+
+ Datasheet: Available for download on nuvoton.com
+
+ * Winbond/Nuvoton W83795ADG
+
+ Prefix: 'w83795adg'
+
+ Addresses scanned: I2C 0x2c - 0x2f
+
+ Datasheet: Available for download on nuvoton.com
+
+Authors:
+ - Wei Song (Nuvoton)
+ - Jean Delvare <jdelvare@suse.de>
+
+
+Pin mapping
+-----------
+
+Here is a summary of the pin mapping for the W83795G and W83795ADG.
+This can be useful to convert data provided by board manufacturers
+into working libsensors configuration statements.
+
+
+- W83795G
+
+========= ======================= =============== ================
+Pin Name Register Sysfs attribute
+========= ======================= =============== ================
+ 13 VSEN1 (VCORE1) 10h in0
+ 14 VSEN2 (VCORE2) 11h in1
+ 15 VSEN3 (VCORE3) 12h in2
+ 16 VSEN4 13h in3
+ 17 VSEN5 14h in4
+ 18 VSEN6 15h in5
+ 19 VSEN7 16h in6
+ 20 VSEN8 17h in7
+ 21 VSEN9 18h in8
+ 22 VSEN10 19h in9
+ 23 VSEN11 1Ah in10
+ 28 VTT 1Bh in11
+ 24 3VDD 1Ch in12
+ 25 3VSB 1Dh in13
+ 26 VBAT 1Eh in14
+ 3 VSEN12/TR5 1Fh in15/temp5
+ 4 VSEN13/TR5 20h in16/temp6
+ 5/ 6 VDSEN14/TR1/TD1 21h in17/temp1
+ 7/ 8 VDSEN15/TR2/TD2 22h in18/temp2
+ 9/ 10 VDSEN16/TR3/TD3 23h in19/temp3
+ 11/ 12 VDSEN17/TR4/TD4 24h in20/temp4
+ 40 FANIN1 2Eh fan1
+ 42 FANIN2 2Fh fan2
+ 44 FANIN3 30h fan3
+ 46 FANIN4 31h fan4
+ 48 FANIN5 32h fan5
+ 50 FANIN6 33h fan6
+ 52 FANIN7 34h fan7
+ 54 FANIN8 35h fan8
+ 57 FANIN9 36h fan9
+ 58 FANIN10 37h fan10
+ 59 FANIN11 38h fan11
+ 60 FANIN12 39h fan12
+ 31 FANIN13 3Ah fan13
+ 35 FANIN14 3Bh fan14
+ 41 FANCTL1 10h (bank 2) pwm1
+ 43 FANCTL2 11h (bank 2) pwm2
+ 45 FANCTL3 12h (bank 2) pwm3
+ 47 FANCTL4 13h (bank 2) pwm4
+ 49 FANCTL5 14h (bank 2) pwm5
+ 51 FANCTL6 15h (bank 2) pwm6
+ 53 FANCTL7 16h (bank 2) pwm7
+ 55 FANCTL8 17h (bank 2) pwm8
+ 29/ 30 PECI/TSI (DTS1) 26h temp7
+ 29/ 30 PECI/TSI (DTS2) 27h temp8
+ 29/ 30 PECI/TSI (DTS3) 28h temp9
+ 29/ 30 PECI/TSI (DTS4) 29h temp10
+ 29/ 30 PECI/TSI (DTS5) 2Ah temp11
+ 29/ 30 PECI/TSI (DTS6) 2Bh temp12
+ 29/ 30 PECI/TSI (DTS7) 2Ch temp13
+ 29/ 30 PECI/TSI (DTS8) 2Dh temp14
+ 27 CASEOPEN# 46h intrusion0
+========= ======================= =============== ================
+
+- W83795ADG
+
+========= ======================= =============== ================
+Pin Name Register Sysfs attribute
+========= ======================= =============== ================
+ 10 VSEN1 (VCORE1) 10h in0
+ 11 VSEN2 (VCORE2) 11h in1
+ 12 VSEN3 (VCORE3) 12h in2
+ 13 VSEN4 13h in3
+ 14 VSEN5 14h in4
+ 15 VSEN6 15h in5
+ 16 VSEN7 16h in6
+ 17 VSEN8 17h in7
+ 22 VTT 1Bh in11
+ 18 3VDD 1Ch in12
+ 19 3VSB 1Dh in13
+ 20 VBAT 1Eh in14
+ 48 VSEN12/TR5 1Fh in15/temp5
+ 1 VSEN13/TR5 20h in16/temp6
+ 2/ 3 VDSEN14/TR1/TD1 21h in17/temp1
+ 4/ 5 VDSEN15/TR2/TD2 22h in18/temp2
+ 6/ 7 VDSEN16/TR3/TD3 23h in19/temp3
+ 8/ 9 VDSEN17/TR4/TD4 24h in20/temp4
+ 32 FANIN1 2Eh fan1
+ 34 FANIN2 2Fh fan2
+ 36 FANIN3 30h fan3
+ 37 FANIN4 31h fan4
+ 38 FANIN5 32h fan5
+ 39 FANIN6 33h fan6
+ 40 FANIN7 34h fan7
+ 41 FANIN8 35h fan8
+ 43 FANIN9 36h fan9
+ 44 FANIN10 37h fan10
+ 45 FANIN11 38h fan11
+ 46 FANIN12 39h fan12
+ 24 FANIN13 3Ah fan13
+ 28 FANIN14 3Bh fan14
+ 33 FANCTL1 10h (bank 2) pwm1
+ 35 FANCTL2 11h (bank 2) pwm2
+ 23 PECI (DTS1) 26h temp7
+ 23 PECI (DTS2) 27h temp8
+ 23 PECI (DTS3) 28h temp9
+ 23 PECI (DTS4) 29h temp10
+ 23 PECI (DTS5) 2Ah temp11
+ 23 PECI (DTS6) 2Bh temp12
+ 23 PECI (DTS7) 2Ch temp13
+ 23 PECI (DTS8) 2Dh temp14
+ 21 CASEOPEN# 46h intrusion0
+========= ======================= =============== ================
diff --git a/Documentation/hwmon/w83l785ts b/Documentation/hwmon/w83l785ts.rst
index c8978478871f..7fa5418fed11 100644
--- a/Documentation/hwmon/w83l785ts
+++ b/Documentation/hwmon/w83l785ts.rst
@@ -2,14 +2,19 @@ Kernel driver w83l785ts
=======================
Supported chips:
+
* Winbond W83L785TS-S
+
Prefix: 'w83l785ts'
+
Addresses scanned: I2C 0x2e
+
Datasheet: Publicly available at the Winbond USA website
- http://www.winbond-usa.com/products/winbond_products/pdfs/PCIC/W83L785TS-S.pdf
+
+ http://www.winbond-usa.com/products/winbond_products/pdfs/PCIC/W83L785TS-S.pdf
Authors:
- Jean Delvare <jdelvare@suse.de>
+ Jean Delvare <jdelvare@suse.de>
Description
-----------
diff --git a/Documentation/hwmon/w83l786ng b/Documentation/hwmon/w83l786ng.rst
index d8f55d7fff10..2b7776190de3 100644
--- a/Documentation/hwmon/w83l786ng
+++ b/Documentation/hwmon/w83l786ng.rst
@@ -1,10 +1,14 @@
Kernel driver w83l786ng
-=====================
+=======================
Supported chips:
+
* Winbond W83L786NG/W83L786NR
+
Prefix: 'w83l786ng'
+
Addresses scanned: I2C 0x2e - 0x2f
+
Datasheet: http://www.winbond-usa.com/products/winbond_products/pdfs/PCIC/W83L786NRNG09.pdf
Author: Kevin Lo <kevlo@kevlo.org>
@@ -14,9 +18,10 @@ Module Parameters
-----------------
* reset boolean
- (default 0)
- Use 'reset=1' to reset the chip (via index 0x40, bit 7). The default
- behavior is no chip reset to preserve BIOS settings
+ (default 0)
+
+ Use 'reset=1' to reset the chip (via index 0x40, bit 7). The default
+ behavior is no chip reset to preserve BIOS settings
Description
@@ -41,14 +46,21 @@ or maximum limit.
/sys files
----------
-pwm[1-2] - this file stores PWM duty cycle or DC value (fan speed) in range:
- 0 (stop) to 255 (full)
-pwm[1-2]_enable - this file controls mode of fan/temperature control:
- * 0 Manual Mode
- * 1 Thermal Cruise
- * 2 Smart Fan II
- * 4 FAN_SET
-pwm[1-2]_mode - Select PWM of DC mode
- * 0 DC
- * 1 PWM
-tolerance[1-2] - Value in degrees of Celsius (degC) for +- T
+pwm[1-2]
+ - this file stores PWM duty cycle or DC value (fan speed) in range:
+
+ 0 (stop) to 255 (full)
+pwm[1-2]_enable
+ - this file controls mode of fan/temperature control:
+
+ * 0 Manual Mode
+ * 1 Thermal Cruise
+ * 2 Smart Fan II
+ * 4 FAN_SET
+pwm[1-2]_mode
+ - Select PWM of DC mode
+
+ * 0 DC
+ * 1 PWM
+tolerance[1-2]
+ - Value in degrees of Celsius (degC) for +- T
diff --git a/Documentation/hwmon/wm831x b/Documentation/hwmon/wm831x.rst
index 11446757c8c8..c56fb35a2fb3 100644
--- a/Documentation/hwmon/wm831x
+++ b/Documentation/hwmon/wm831x.rst
@@ -3,11 +3,14 @@ Kernel driver wm831x-hwmon
Supported chips:
* Wolfson Microelectronics WM831x PMICs
+
Prefix: 'wm831x'
+
Datasheet:
- http://www.wolfsonmicro.com/products/WM8310
- http://www.wolfsonmicro.com/products/WM8311
- http://www.wolfsonmicro.com/products/WM8312
+
+ - http://www.wolfsonmicro.com/products/WM8310
+ - http://www.wolfsonmicro.com/products/WM8311
+ - http://www.wolfsonmicro.com/products/WM8312
Authors: Mark Brown <broonie@opensource.wolfsonmicro.com>
diff --git a/Documentation/hwmon/wm8350 b/Documentation/hwmon/wm8350.rst
index 98f923bd2e92..cec044ca5900 100644
--- a/Documentation/hwmon/wm8350
+++ b/Documentation/hwmon/wm8350.rst
@@ -2,12 +2,16 @@ Kernel driver wm8350-hwmon
==========================
Supported chips:
+
* Wolfson Microelectronics WM835x PMICs
+
Prefix: 'wm8350'
+
Datasheet:
- http://www.wolfsonmicro.com/products/WM8350
- http://www.wolfsonmicro.com/products/WM8351
- http://www.wolfsonmicro.com/products/WM8352
+
+ - http://www.wolfsonmicro.com/products/WM8350
+ - http://www.wolfsonmicro.com/products/WM8351
+ - http://www.wolfsonmicro.com/products/WM8352
Authors: Mark Brown <broonie@opensource.wolfsonmicro.com>
diff --git a/Documentation/hwmon/xgene-hwmon b/Documentation/hwmon/xgene-hwmon.rst
index 6ec50ed7cc8f..439b30b881b6 100644
--- a/Documentation/hwmon/xgene-hwmon
+++ b/Documentation/hwmon/xgene-hwmon.rst
@@ -1,7 +1,8 @@
Kernel driver xgene-hwmon
-========================
+=========================
Supported chips:
+
* APM X-Gene SoC
Description
@@ -15,16 +16,21 @@ For ACPI, it is the PCC mailbox.
The following sensors are supported
* Temperature
- - SoC on-die temperature in milli-degree C
- - Alarm when high/over temperature occurs
+ - SoC on-die temperature in milli-degree C
+ - Alarm when high/over temperature occurs
+
* Power
- - CPU power in uW
- - IO power in uW
+ - CPU power in uW
+ - IO power in uW
sysfs-Interface
---------------
-temp0_input - SoC on-die temperature (milli-degree C)
-temp0_critical_alarm - An 1 would indicates on-die temperature exceeded threshold
-power0_input - CPU power in (uW)
-power1_input - IO power in (uW)
+temp0_input
+ - SoC on-die temperature (milli-degree C)
+temp0_critical_alarm
+ - An 1 would indicates on-die temperature exceeded threshold
+power0_input
+ - CPU power in (uW)
+power1_input
+ - IO power in (uW)
diff --git a/Documentation/hwmon/zl6100 b/Documentation/hwmon/zl6100.rst
index 477a94b131ae..41513bb7fe51 100644
--- a/Documentation/hwmon/zl6100
+++ b/Documentation/hwmon/zl6100.rst
@@ -2,57 +2,106 @@ Kernel driver zl6100
====================
Supported chips:
+
* Intersil / Zilker Labs ZL2004
+
Prefix: 'zl2004'
+
Addresses scanned: -
+
Datasheet: http://www.intersil.com/data/fn/fn6847.pdf
+
* Intersil / Zilker Labs ZL2005
+
Prefix: 'zl2005'
+
Addresses scanned: -
+
Datasheet: http://www.intersil.com/data/fn/fn6848.pdf
+
* Intersil / Zilker Labs ZL2006
+
Prefix: 'zl2006'
+
Addresses scanned: -
+
Datasheet: http://www.intersil.com/data/fn/fn6850.pdf
+
* Intersil / Zilker Labs ZL2008
+
Prefix: 'zl2008'
+
Addresses scanned: -
+
Datasheet: http://www.intersil.com/data/fn/fn6859.pdf
+
* Intersil / Zilker Labs ZL2105
+
Prefix: 'zl2105'
+
Addresses scanned: -
+
Datasheet: http://www.intersil.com/data/fn/fn6851.pdf
+
* Intersil / Zilker Labs ZL2106
+
Prefix: 'zl2106'
+
Addresses scanned: -
+
Datasheet: http://www.intersil.com/data/fn/fn6852.pdf
+
* Intersil / Zilker Labs ZL6100
+
Prefix: 'zl6100'
+
Addresses scanned: -
+
Datasheet: http://www.intersil.com/data/fn/fn6876.pdf
+
* Intersil / Zilker Labs ZL6105
+
Prefix: 'zl6105'
+
Addresses scanned: -
+
Datasheet: http://www.intersil.com/data/fn/fn6906.pdf
+
* Intersil / Zilker Labs ZL9101M
+
Prefix: 'zl9101'
+
Addresses scanned: -
+
Datasheet: http://www.intersil.com/data/fn/fn7669.pdf
+
* Intersil / Zilker Labs ZL9117M
+
Prefix: 'zl9117'
+
Addresses scanned: -
+
Datasheet: http://www.intersil.com/data/fn/fn7914.pdf
+
* Ericsson BMR450, BMR451
+
Prefix: 'bmr450', 'bmr451'
+
Addresses scanned: -
+
Datasheet:
+
http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146401
+
* Ericsson BMR462, BMR463, BMR464
+
Prefixes: 'bmr462', 'bmr463', 'bmr464'
+
Addresses scanned: -
+
Datasheet:
-http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146256
+ http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146256
Author: Guenter Roeck <linux@roeck-us.net>
@@ -64,7 +113,7 @@ This driver supports hardware monitoring for Intersil / Zilker Labs ZL6100 and
compatible digital DC-DC controllers.
The driver is a client driver to the core PMBus driver. Please see
-Documentation/hwmon/pmbus and Documentation.hwmon/pmbus-core for details
+Documentation/hwmon/pmbus.rst and Documentation.hwmon/pmbus-core for details
on PMBus client drivers.
@@ -75,13 +124,15 @@ This driver does not auto-detect devices. You will have to instantiate the
devices explicitly. Please see Documentation/i2c/instantiating-devices for
details.
-WARNING: Do not access chip registers using the i2cdump command, and do not use
-any of the i2ctools commands on a command register used to save and restore
-configuration data (0x11, 0x12, 0x15, 0x16, and 0xf4). The chips supported by
-this driver interpret any access to those command registers (including read
-commands) as request to execute the command in question. Unless write accesses
-to those registers are protected, this may result in power loss, board resets,
-and/or Flash corruption. Worst case, your board may turn into a brick.
+.. warning::
+
+ Do not access chip registers using the i2cdump command, and do not use
+ any of the i2ctools commands on a command register used to save and restore
+ configuration data (0x11, 0x12, 0x15, 0x16, and 0xf4). The chips supported by
+ this driver interpret any access to those command registers (including read
+ commands) as request to execute the command in question. Unless write accesses
+ to those registers are protected, this may result in power loss, board resets,
+ and/or Flash corruption. Worst case, your board may turn into a brick.
Platform data support
@@ -110,6 +161,7 @@ Sysfs entries
The following attributes are supported. Limits are read-write; all other
attributes are read-only.
+======================= ========================================================
in1_label "vin"
in1_input Measured input voltage.
in1_min Minimum input voltage.
@@ -158,3 +210,4 @@ temp[12]_min_alarm Chip temperature low alarm.
temp[12]_max_alarm Chip temperature high alarm.
temp[12]_lcrit_alarm Chip temperature critical low alarm.
temp[12]_crit_alarm Chip temperature critical high alarm.
+======================= ========================================================
diff --git a/Documentation/i2c/busses/i2c-amd-mp2 b/Documentation/i2c/busses/i2c-amd-mp2
new file mode 100644
index 000000000000..6571487171f4
--- /dev/null
+++ b/Documentation/i2c/busses/i2c-amd-mp2
@@ -0,0 +1,23 @@
+Kernel driver i2c-amd-mp2
+
+Supported adapters:
+ * AMD MP2 PCIe interface
+
+Datasheet: not publicly available.
+
+Authors:
+ Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ Nehal Shah <nehal-bakulchandra.shah@amd.com>
+ Elie Morisse <syniurge@gmail.com>
+
+Description
+-----------
+
+The MP2 is an ARM processor programmed as an I2C controller and communicating
+with the x86 host through PCI.
+
+If you see something like this:
+
+03:00.7 MP2 I2C controller: Advanced Micro Devices, Inc. [AMD] Device 15e6
+
+in your 'lspci -v', then this driver is for your device.
diff --git a/Documentation/i2c/busses/i2c-piix4 b/Documentation/i2c/busses/i2c-piix4
index aa959fd22450..2703bc3acad0 100644
--- a/Documentation/i2c/busses/i2c-piix4
+++ b/Documentation/i2c/busses/i2c-piix4
@@ -15,6 +15,8 @@ Supported adapters:
http://support.amd.com/us/Embedded_TechDocs/44413.pdf
* AMD Hudson-2, ML, CZ
Datasheet: Not publicly available
+ * Hygon CZ
+ Datasheet: Not publicly available
* Standard Microsystems (SMSC) SLC90E66 (Victory66) southbridge
Datasheet: Publicly available at the SMSC website http://www.smsc.com
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 80a421cb935e..a7566ef62411 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -35,6 +35,16 @@ trying to get it to work optimally on a given system.
admin-guide/index
+Firmware-related documentation
+------------------------------
+The following holds information on the kernel's expectations regarding the
+platform firmwares.
+
+.. toctree::
+ :maxdepth: 2
+
+ firmware-guide/index
+
Application-developer documentation
-----------------------------------
@@ -83,6 +93,7 @@ needed).
media/index
networking/index
input/index
+ hwmon/index
gpu/index
security/index
sound/index
@@ -101,7 +112,9 @@ implementation.
.. toctree::
:maxdepth: 2
+ x86/index
sh/index
+ x86/index
Filesystem Documentation
------------------------
diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt
index 8a3830b39c7d..9c230ea71963 100644
--- a/Documentation/kbuild/kbuild.txt
+++ b/Documentation/kbuild/kbuild.txt
@@ -11,6 +11,11 @@ modules.builtin
This file lists all modules that are built into the kernel. This is used
by modprobe to not fail when trying to load something builtin.
+modules.builtin.modinfo
+--------------------------------------------------
+This file contains modinfo from all modules that are built into the kernel.
+Unlike modinfo of a separate module, all fields are prefixed with module name.
+
Environment variables
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 10f4499e677c..8baab8832c5b 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -243,10 +243,10 @@ Optimization
^^^^^^^^^^^^
The Kprobe-optimizer doesn't insert the jump instruction immediately;
-rather, it calls synchronize_sched() for safety first, because it's
+rather, it calls synchronize_rcu() for safety first, because it's
possible for a CPU to be interrupted in the middle of executing the
-optimized region [3]_. As you know, synchronize_sched() can ensure
-that all interruptions that were active when synchronize_sched()
+optimized region [3]_. As you know, synchronize_rcu() can ensure
+that all interruptions that were active when synchronize_rcu()
was called are done, but only if CONFIG_PREEMPT=n. So, this version
of kprobe optimization supports only kernels with CONFIG_PREEMPT=n [4]_.
@@ -321,6 +321,7 @@ architectures:
- ppc
- mips
- s390
+- parisc
Configuring Kprobes
===================
diff --git a/Documentation/livepatch/callbacks.txt b/Documentation/livepatch/callbacks.rst
index 182e31d4abce..470944aa8658 100644
--- a/Documentation/livepatch/callbacks.txt
+++ b/Documentation/livepatch/callbacks.rst
@@ -4,7 +4,7 @@
Livepatch (un)patch-callbacks provide a mechanism for livepatch modules
to execute callback functions when a kernel object is (un)patched. They
-can be considered a "power feature" that extends livepatching abilities
+can be considered a **power feature** that **extends livepatching abilities**
to include:
- Safe updates to global data
@@ -17,6 +17,9 @@ In most cases, (un)patch callbacks will need to be used in conjunction
with memory barriers and kernel synchronization primitives, like
mutexes/spinlocks, or even stop_machine(), to avoid concurrency issues.
+1. Motivation
+=============
+
Callbacks differ from existing kernel facilities:
- Module init/exit code doesn't run when disabling and re-enabling a
@@ -28,21 +31,31 @@ Callbacks are part of the klp_object structure and their implementation
is specific to that klp_object. Other livepatch objects may or may not
be patched, irrespective of the target klp_object's current state.
+2. Callback types
+=================
+
Callbacks can be registered for the following livepatch actions:
- * Pre-patch - before a klp_object is patched
+ * Pre-patch
+ - before a klp_object is patched
- * Post-patch - after a klp_object has been patched and is active
+ * Post-patch
+ - after a klp_object has been patched and is active
across all tasks
- * Pre-unpatch - before a klp_object is unpatched (ie, patched code is
+ * Pre-unpatch
+ - before a klp_object is unpatched (ie, patched code is
active), used to clean up post-patch callback
resources
- * Post-unpatch - after a klp_object has been patched, all code has
+ * Post-unpatch
+ - after a klp_object has been patched, all code has
been restored and no tasks are running patched code,
used to cleanup pre-patch callback resources
+3. How it works
+===============
+
Each callback is optional, omitting one does not preclude specifying any
other. However, the livepatching core executes the handlers in
symmetry: pre-patch callbacks have a post-unpatch counterpart and
@@ -86,11 +99,14 @@ If the object did successfully patch, but the patch transition never
started for some reason (e.g., if another object failed to patch),
only the post-unpatch callback will be called.
+4. Use cases
+============
-Example Use-cases
-=================
+Sample livepatch modules demonstrating the callback API can be found in
+samples/livepatch/ directory. These samples were modified for use in
+kselftests and can be found in the lib/livepatch directory.
-Update global data
+Global data update
------------------
A pre-patch callback can be useful to update a global variable. For
@@ -103,24 +119,15 @@ patch the data *after* patching is complete with a post-patch callback,
so that tcp_send_challenge_ack() could first be changed to read
sysctl_tcp_challenge_ack_limit with READ_ONCE.
-
-Support __init and probe function patches
+__init and probe function patches support
-----------------------------------------
Although __init and probe functions are not directly livepatch-able, it
may be possible to implement similar updates via pre/post-patch
callbacks.
-48900cb6af42 ("virtio-net: drop NETIF_F_FRAGLIST") change the way that
+The commit ``48900cb6af42 ("virtio-net: drop NETIF_F_FRAGLIST")`` change the way that
virtnet_probe() initialized its driver's net_device features. A
pre/post-patch callback could iterate over all such devices, making a
similar change to their hw_features value. (Client functions of the
value may need to be updated accordingly.)
-
-
-Other Examples
-==============
-
-Sample livepatch modules demonstrating the callback API can be found in
-samples/livepatch/ directory. These samples were modified for use in
-kselftests and can be found in the lib/livepatch directory.
diff --git a/Documentation/livepatch/cumulative-patches.txt b/Documentation/livepatch/cumulative-patches.rst
index 0012808e8d44..1931f318976a 100644
--- a/Documentation/livepatch/cumulative-patches.txt
+++ b/Documentation/livepatch/cumulative-patches.rst
@@ -18,7 +18,7 @@ Usage
-----
The atomic replace can be enabled by setting "replace" flag in struct klp_patch,
-for example:
+for example::
static struct klp_patch patch = {
.mod = THIS_MODULE,
@@ -49,19 +49,19 @@ Features
The atomic replace allows:
- + Atomically revert some functions in a previous patch while
+ - Atomically revert some functions in a previous patch while
upgrading other functions.
- + Remove eventual performance impact caused by core redirection
+ - Remove eventual performance impact caused by core redirection
for functions that are no longer patched.
- + Decrease user confusion about dependencies between livepatches.
+ - Decrease user confusion about dependencies between livepatches.
Limitations:
------------
- + Once the operation finishes, there is no straightforward way
+ - Once the operation finishes, there is no straightforward way
to reverse it and restore the replaced patches atomically.
A good practice is to set .replace flag in any released livepatch.
@@ -74,7 +74,7 @@ Limitations:
only when the transition was not forced.
- + Only the (un)patching callbacks from the _new_ cumulative livepatch are
+ - Only the (un)patching callbacks from the _new_ cumulative livepatch are
executed. Any callbacks from the replaced patches are ignored.
In other words, the cumulative patch is responsible for doing any actions
@@ -93,7 +93,7 @@ Limitations:
enabled patches were called.
- + There is no special handling of shadow variables. Livepatch authors
+ - There is no special handling of shadow variables. Livepatch authors
must create their own rules how to pass them from one cumulative
patch to the other. Especially that they should not blindly remove
them in module_exit() functions.
diff --git a/Documentation/livepatch/index.rst b/Documentation/livepatch/index.rst
new file mode 100644
index 000000000000..edd291d51847
--- /dev/null
+++ b/Documentation/livepatch/index.rst
@@ -0,0 +1,21 @@
+:orphan:
+
+===================
+Kernel Livepatching
+===================
+
+.. toctree::
+ :maxdepth: 1
+
+ livepatch
+ callbacks
+ cumulative-patches
+ module-elf-format
+ shadow-vars
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/livepatch/livepatch.txt b/Documentation/livepatch/livepatch.rst
index 4627b41ff02e..c2c598c4ead8 100644
--- a/Documentation/livepatch/livepatch.txt
+++ b/Documentation/livepatch/livepatch.rst
@@ -4,22 +4,22 @@ Livepatch
This document outlines basic information about kernel livepatching.
-Table of Contents:
-
-1. Motivation
-2. Kprobes, Ftrace, Livepatching
-3. Consistency model
-4. Livepatch module
- 4.1. New functions
- 4.2. Metadata
-5. Livepatch life-cycle
- 5.1. Loading
- 5.2. Enabling
- 5.3. Replacing
- 5.4. Disabling
- 5.5. Removing
-6. Sysfs
-7. Limitations
+.. Table of Contents:
+
+ 1. Motivation
+ 2. Kprobes, Ftrace, Livepatching
+ 3. Consistency model
+ 4. Livepatch module
+ 4.1. New functions
+ 4.2. Metadata
+ 5. Livepatch life-cycle
+ 5.1. Loading
+ 5.2. Enabling
+ 5.3. Replacing
+ 5.4. Disabling
+ 5.5. Removing
+ 6. Sysfs
+ 7. Limitations
1. Motivation
@@ -40,14 +40,14 @@ There are multiple mechanisms in the Linux kernel that are directly related
to redirection of code execution; namely: kernel probes, function tracing,
and livepatching:
- + The kernel probes are the most generic. The code can be redirected by
+ - The kernel probes are the most generic. The code can be redirected by
putting a breakpoint instruction instead of any instruction.
- + The function tracer calls the code from a predefined location that is
+ - The function tracer calls the code from a predefined location that is
close to the function entry point. This location is generated by the
compiler using the '-pg' gcc option.
- + Livepatching typically needs to redirect the code at the very beginning
+ - Livepatching typically needs to redirect the code at the very beginning
of the function entry before the function parameters or the stack
are in any way modified.
@@ -249,7 +249,7 @@ The patch contains only functions that are really modified. But they
might want to access functions or data from the original source file
that may only be locally accessible. This can be solved by a special
relocation section in the generated livepatch module, see
-Documentation/livepatch/module-elf-format.txt for more details.
+Documentation/livepatch/module-elf-format.rst for more details.
4.2. Metadata
@@ -258,7 +258,7 @@ Documentation/livepatch/module-elf-format.txt for more details.
The patch is described by several structures that split the information
into three levels:
- + struct klp_func is defined for each patched function. It describes
+ - struct klp_func is defined for each patched function. It describes
the relation between the original and the new implementation of a
particular function.
@@ -275,7 +275,7 @@ into three levels:
only for a particular object ( vmlinux or a kernel module ). Note that
kallsyms allows for searching symbols according to the object name.
- + struct klp_object defines an array of patched functions (struct
+ - struct klp_object defines an array of patched functions (struct
klp_func) in the same object. Where the object is either vmlinux
(NULL) or a module name.
@@ -285,7 +285,7 @@ into three levels:
only when they are available.
- + struct klp_patch defines an array of patched objects (struct
+ - struct klp_patch defines an array of patched objects (struct
klp_object).
This structure handles all patched functions consistently and eventually,
@@ -337,14 +337,16 @@ operation fails.
Second, livepatch enters into a transition state where tasks are converging
to the patched state. If an original function is patched for the first
time, a function specific struct klp_ops is created and an universal
-ftrace handler is registered[*]. This stage is indicated by a value of '1'
+ftrace handler is registered\ [#]_. This stage is indicated by a value of '1'
in /sys/kernel/livepatch/<name>/transition. For more information about
this process, see the "Consistency model" section.
Finally, once all tasks have been patched, the 'transition' value changes
to '0'.
-[*] Note that functions might be patched multiple times. The ftrace handler
+.. [#]
+
+ Note that functions might be patched multiple times. The ftrace handler
is registered only once for a given function. Further patches just add
an entry to the list (see field `func_stack`) of the struct klp_ops.
The right implementation is selected by the ftrace handler, see
@@ -368,7 +370,7 @@ the ftrace handler is unregistered and the struct klp_ops is
freed when the related function is not modified by the new patch
and func_stack list becomes empty.
-See Documentation/livepatch/cumulative-patches.txt for more details.
+See Documentation/livepatch/cumulative-patches.rst for more details.
5.4. Disabling
@@ -421,7 +423,7 @@ See Documentation/ABI/testing/sysfs-kernel-livepatch for more details.
The current Livepatch implementation has several limitations:
- + Only functions that can be traced could be patched.
+ - Only functions that can be traced could be patched.
Livepatch is based on the dynamic ftrace. In particular, functions
implementing ftrace or the livepatch ftrace handler could not be
@@ -431,7 +433,7 @@ The current Livepatch implementation has several limitations:
- + Livepatch works reliably only when the dynamic ftrace is located at
+ - Livepatch works reliably only when the dynamic ftrace is located at
the very beginning of the function.
The function need to be redirected before the stack or the function
@@ -445,7 +447,7 @@ The current Livepatch implementation has several limitations:
this is handled on the ftrace level.
- + Kretprobes using the ftrace framework conflict with the patched
+ - Kretprobes using the ftrace framework conflict with the patched
functions.
Both kretprobes and livepatches use a ftrace handler that modifies
@@ -453,7 +455,7 @@ The current Livepatch implementation has several limitations:
is rejected when the handler is already in use by the other.
- + Kprobes in the original function are ignored when the code is
+ - Kprobes in the original function are ignored when the code is
redirected to the new implementation.
There is a work in progress to add warnings about this situation.
diff --git a/Documentation/livepatch/module-elf-format.txt b/Documentation/livepatch/module-elf-format.rst
index f21a5289a09c..2a591e6f8e6c 100644
--- a/Documentation/livepatch/module-elf-format.txt
+++ b/Documentation/livepatch/module-elf-format.rst
@@ -4,33 +4,21 @@ Livepatch module Elf format
This document outlines the Elf format requirements that livepatch modules must follow.
------------------
-Table of Contents
------------------
-0. Background and motivation
-1. Livepatch modinfo field
-2. Livepatch relocation sections
- 2.1 What are livepatch relocation sections?
- 2.2 Livepatch relocation section format
- 2.2.1 Required flags
- 2.2.2 Required name format
- 2.2.3 Example livepatch relocation section names
- 2.2.4 Example `readelf --sections` output
- 2.2.5 Example `readelf --relocs` output
-3. Livepatch symbols
- 3.1 What are livepatch symbols?
- 3.2 A livepatch module's symbol table
- 3.3 Livepatch symbol format
- 3.3.1 Required flags
- 3.3.2 Required name format
- 3.3.3 Example livepatch symbol names
- 3.3.4 Example `readelf --symbols` output
-4. Architecture-specific sections
-5. Symbol table and Elf section access
-
-----------------------------
-0. Background and motivation
-----------------------------
+
+.. Table of Contents
+
+ 1. Background and motivation
+ 2. Livepatch modinfo field
+ 3. Livepatch relocation sections
+ 3.1 Livepatch relocation section format
+ 4. Livepatch symbols
+ 4.1 A livepatch module's symbol table
+ 4.2 Livepatch symbol format
+ 5. Architecture-specific sections
+ 6. Symbol table and Elf section access
+
+1. Background and motivation
+============================
Formerly, livepatch required separate architecture-specific code to write
relocations. However, arch-specific code to write relocations already
@@ -52,8 +40,8 @@ relocation sections and symbols, which are described in this document. The
Elf constants used to mark livepatch symbols and relocation sections were
selected from OS-specific ranges according to the definitions from glibc.
-0.1 Why does livepatch need to write its own relocations?
----------------------------------------------------------
+Why does livepatch need to write its own relocations?
+-----------------------------------------------------
A typical livepatch module contains patched versions of functions that can
reference non-exported global symbols and non-included local symbols.
Relocations referencing these types of symbols cannot be left in as-is
@@ -72,13 +60,8 @@ relas reference are special livepatch symbols (see section 2 and 3). The
arch-specific livepatch relocation code is replaced by a call to
apply_relocate_add().
-================================
-PATCH MODULE FORMAT REQUIREMENTS
-================================
-
---------------------------
-1. Livepatch modinfo field
---------------------------
+2. Livepatch modinfo field
+==========================
Livepatch modules are required to have the "livepatch" modinfo attribute.
See the sample livepatch module in samples/livepatch/ for how this is done.
@@ -87,22 +70,23 @@ Livepatch modules can be identified by users by using the 'modinfo' command
and looking for the presence of the "livepatch" field. This field is also
used by the kernel module loader to identify livepatch modules.
-Example modinfo output:
------------------------
-% modinfo livepatch-meminfo.ko
-filename: livepatch-meminfo.ko
-livepatch: Y
-license: GPL
-depends:
-vermagic: 4.3.0+ SMP mod_unload
-
---------------------------------
-2. Livepatch relocation sections
---------------------------------
-
--------------------------------------------
-2.1 What are livepatch relocation sections?
--------------------------------------------
+Example:
+--------
+
+**Modinfo output:**
+
+::
+
+ % modinfo livepatch-meminfo.ko
+ filename: livepatch-meminfo.ko
+ livepatch: Y
+ license: GPL
+ depends:
+ vermagic: 4.3.0+ SMP mod_unload
+
+3. Livepatch relocation sections
+================================
+
A livepatch module manages its own Elf relocation sections to apply
relocations to modules as well as to the kernel (vmlinux) at the
appropriate time. For example, if a patch module patches a driver that is
@@ -127,12 +111,9 @@ Every symbol referenced by a rela in a livepatch relocation section is a
livepatch symbol. These must be resolved before livepatch can call
apply_relocate_add(). See Section 3 for more information.
----------------------------------------
-2.2 Livepatch relocation section format
----------------------------------------
+3.1 Livepatch relocation section format
+=======================================
-2.2.1 Required flags
---------------------
Livepatch relocation sections must be marked with the SHF_RELA_LIVEPATCH
section flag. See include/uapi/linux/elf.h for the definition. The module
loader recognizes this flag and will avoid applying those relocation sections
@@ -140,28 +121,39 @@ at patch module load time. These sections must also be marked with SHF_ALLOC,
so that the module loader doesn't discard them on module load (i.e. they will
be copied into memory along with the other SHF_ALLOC sections).
-2.2.2 Required name format
---------------------------
-The name of a livepatch relocation section must conform to the following format:
+The name of a livepatch relocation section must conform to the following
+format::
-.klp.rela.objname.section_name
-^ ^^ ^ ^ ^
-|________||_____| |__________|
- [A] [B] [C]
+ .klp.rela.objname.section_name
+ ^ ^^ ^ ^ ^
+ |________||_____| |__________|
+ [A] [B] [C]
-[A] The relocation section name is prefixed with the string ".klp.rela."
-[B] The name of the object (i.e. "vmlinux" or name of module) to
- which the relocation section belongs follows immediately after the prefix.
-[C] The actual name of the section to which this relocation section applies.
+[A]
+ The relocation section name is prefixed with the string ".klp.rela."
-2.2.3 Example livepatch relocation section names:
--------------------------------------------------
-.klp.rela.ext4.text.ext4_attr_store
-.klp.rela.vmlinux.text.cmdline_proc_show
+[B]
+ The name of the object (i.e. "vmlinux" or name of module) to
+ which the relocation section belongs follows immediately after the prefix.
+
+[C]
+ The actual name of the section to which this relocation section applies.
+
+Examples:
+---------
+
+**Livepatch relocation section names:**
+
+::
+
+ .klp.rela.ext4.text.ext4_attr_store
+ .klp.rela.vmlinux.text.cmdline_proc_show
+
+**`readelf --sections` output for a patch
+module that patches vmlinux and modules 9p, btrfs, ext4:**
+
+::
-2.2.4 Example `readelf --sections` output for a patch
-module that patches vmlinux and modules 9p, btrfs, ext4:
---------------------------------------------------------
Section Headers:
[Nr] Name Type Address Off Size ES Flg Lk Inf Al
[ snip ]
@@ -175,31 +167,33 @@ module that patches vmlinux and modules 9p, btrfs, ext4:
[ snip ] ^ ^
| |
[*] [*]
-[*] Livepatch relocation sections are SHT_RELA sections but with a few special
-characteristics. Notice that they are marked SHF_ALLOC ("A") so that they will
-not be discarded when the module is loaded into memory, as well as with the
-SHF_RELA_LIVEPATCH flag ("o" - for OS-specific).
-
-2.2.5 Example `readelf --relocs` output for a patch module:
------------------------------------------------------------
-Relocation section '.klp.rela.btrfs.text.btrfs_feature_attr_show' at offset 0x2ba0 contains 4 entries:
- Offset Info Type Symbol's Value Symbol's Name + Addend
-000000000000001f 0000005e00000002 R_X86_64_PC32 0000000000000000 .klp.sym.vmlinux.printk,0 - 4
-0000000000000028 0000003d0000000b R_X86_64_32S 0000000000000000 .klp.sym.btrfs.btrfs_ktype,0 + 0
-0000000000000036 0000003b00000002 R_X86_64_PC32 0000000000000000 .klp.sym.btrfs.can_modify_feature.isra.3,0 - 4
-000000000000004c 0000004900000002 R_X86_64_PC32 0000000000000000 .klp.sym.vmlinux.snprintf,0 - 4
-[ snip ] ^
- |
- [*]
-[*] Every symbol referenced by a relocation is a livepatch symbol.
-
---------------------
-3. Livepatch symbols
---------------------
-
--------------------------------
-3.1 What are livepatch symbols?
--------------------------------
+
+[*]
+ Livepatch relocation sections are SHT_RELA sections but with a few special
+ characteristics. Notice that they are marked SHF_ALLOC ("A") so that they will
+ not be discarded when the module is loaded into memory, as well as with the
+ SHF_RELA_LIVEPATCH flag ("o" - for OS-specific).
+
+**`readelf --relocs` output for a patch module:**
+
+::
+
+ Relocation section '.klp.rela.btrfs.text.btrfs_feature_attr_show' at offset 0x2ba0 contains 4 entries:
+ Offset Info Type Symbol's Value Symbol's Name + Addend
+ 000000000000001f 0000005e00000002 R_X86_64_PC32 0000000000000000 .klp.sym.vmlinux.printk,0 - 4
+ 0000000000000028 0000003d0000000b R_X86_64_32S 0000000000000000 .klp.sym.btrfs.btrfs_ktype,0 + 0
+ 0000000000000036 0000003b00000002 R_X86_64_PC32 0000000000000000 .klp.sym.btrfs.can_modify_feature.isra.3,0 - 4
+ 000000000000004c 0000004900000002 R_X86_64_PC32 0000000000000000 .klp.sym.vmlinux.snprintf,0 - 4
+ [ snip ] ^
+ |
+ [*]
+
+[*]
+ Every symbol referenced by a relocation is a livepatch symbol.
+
+4. Livepatch symbols
+====================
+
Livepatch symbols are symbols referred to by livepatch relocation sections.
These are symbols accessed from new versions of functions for patched
objects, whose addresses cannot be resolved by the module loader (because
@@ -219,9 +213,8 @@ loader can identify and ignore them. Livepatch modules keep these symbols
in their symbol tables, and the symbol table is made accessible through
module->symtab.
--------------------------------------
-3.2 A livepatch module's symbol table
--------------------------------------
+4.1 A livepatch module's symbol table
+=====================================
Normally, a stripped down copy of a module's symbol table (containing only
"core" symbols) is made available through module->symtab (See layout_symtab()
in kernel/module.c). For livepatch modules, the symbol table copied into memory
@@ -231,71 +224,82 @@ relocation section refer to their respective symbols with their symbol indices,
and the original symbol indices (and thus the symtab ordering) must be
preserved in order for apply_relocate_add() to find the right symbol.
-For example, take this particular rela from a livepatch module:
-Relocation section '.klp.rela.btrfs.text.btrfs_feature_attr_show' at offset 0x2ba0 contains 4 entries:
- Offset Info Type Symbol's Value Symbol's Name + Addend
-000000000000001f 0000005e00000002 R_X86_64_PC32 0000000000000000 .klp.sym.vmlinux.printk,0 - 4
-
-This rela refers to the symbol '.klp.sym.vmlinux.printk,0', and the symbol index is encoded
-in 'Info'. Here its symbol index is 0x5e, which is 94 in decimal, which refers to the
-symbol index 94.
-And in this patch module's corresponding symbol table, symbol index 94 refers to that very symbol:
-[ snip ]
-94: 0000000000000000 0 NOTYPE GLOBAL DEFAULT OS [0xff20] .klp.sym.vmlinux.printk,0
-[ snip ]
-
----------------------------
-3.3 Livepatch symbol format
----------------------------
-
-3.3.1 Required flags
---------------------
+For example, take this particular rela from a livepatch module:::
+
+ Relocation section '.klp.rela.btrfs.text.btrfs_feature_attr_show' at offset 0x2ba0 contains 4 entries:
+ Offset Info Type Symbol's Value Symbol's Name + Addend
+ 000000000000001f 0000005e00000002 R_X86_64_PC32 0000000000000000 .klp.sym.vmlinux.printk,0 - 4
+
+ This rela refers to the symbol '.klp.sym.vmlinux.printk,0', and the symbol index is encoded
+ in 'Info'. Here its symbol index is 0x5e, which is 94 in decimal, which refers to the
+ symbol index 94.
+ And in this patch module's corresponding symbol table, symbol index 94 refers to that very symbol:
+ [ snip ]
+ 94: 0000000000000000 0 NOTYPE GLOBAL DEFAULT OS [0xff20] .klp.sym.vmlinux.printk,0
+ [ snip ]
+
+4.2 Livepatch symbol format
+===========================
+
Livepatch symbols must have their section index marked as SHN_LIVEPATCH, so
that the module loader can identify them and not attempt to resolve them.
See include/uapi/linux/elf.h for the actual definitions.
-3.3.2 Required name format
---------------------------
-Livepatch symbol names must conform to the following format:
-
-.klp.sym.objname.symbol_name,sympos
-^ ^^ ^ ^ ^ ^
-|_______||_____| |_________| |
- [A] [B] [C] [D]
-
-[A] The symbol name is prefixed with the string ".klp.sym."
-[B] The name of the object (i.e. "vmlinux" or name of module) to
- which the symbol belongs follows immediately after the prefix.
-[C] The actual name of the symbol.
-[D] The position of the symbol in the object (as according to kallsyms)
- This is used to differentiate duplicate symbols within the same
- object. The symbol position is expressed numerically (0, 1, 2...).
- The symbol position of a unique symbol is 0.
-
-3.3.3 Example livepatch symbol names:
--------------------------------------
-.klp.sym.vmlinux.snprintf,0
-.klp.sym.vmlinux.printk,0
-.klp.sym.btrfs.btrfs_ktype,0
-
-3.3.4 Example `readelf --symbols` output for a patch module:
-------------------------------------------------------------
-Symbol table '.symtab' contains 127 entries:
- Num: Value Size Type Bind Vis Ndx Name
- [ snip ]
- 73: 0000000000000000 0 NOTYPE GLOBAL DEFAULT OS [0xff20] .klp.sym.vmlinux.snprintf,0
- 74: 0000000000000000 0 NOTYPE GLOBAL DEFAULT OS [0xff20] .klp.sym.vmlinux.capable,0
- 75: 0000000000000000 0 NOTYPE GLOBAL DEFAULT OS [0xff20] .klp.sym.vmlinux.find_next_bit,0
- 76: 0000000000000000 0 NOTYPE GLOBAL DEFAULT OS [0xff20] .klp.sym.vmlinux.si_swapinfo,0
- [ snip ] ^
- |
- [*]
-[*] Note that the 'Ndx' (Section index) for these symbols is SHN_LIVEPATCH (0xff20).
- "OS" means OS-specific.
-
----------------------------------
-4. Architecture-specific sections
----------------------------------
+Livepatch symbol names must conform to the following format::
+
+ .klp.sym.objname.symbol_name,sympos
+ ^ ^^ ^ ^ ^ ^
+ |_______||_____| |_________| |
+ [A] [B] [C] [D]
+
+[A]
+ The symbol name is prefixed with the string ".klp.sym."
+
+[B]
+ The name of the object (i.e. "vmlinux" or name of module) to
+ which the symbol belongs follows immediately after the prefix.
+
+[C]
+ The actual name of the symbol.
+
+[D]
+ The position of the symbol in the object (as according to kallsyms)
+ This is used to differentiate duplicate symbols within the same
+ object. The symbol position is expressed numerically (0, 1, 2...).
+ The symbol position of a unique symbol is 0.
+
+Examples:
+---------
+
+**Livepatch symbol names:**
+
+::
+
+ .klp.sym.vmlinux.snprintf,0
+ .klp.sym.vmlinux.printk,0
+ .klp.sym.btrfs.btrfs_ktype,0
+
+**`readelf --symbols` output for a patch module:**
+
+::
+
+ Symbol table '.symtab' contains 127 entries:
+ Num: Value Size Type Bind Vis Ndx Name
+ [ snip ]
+ 73: 0000000000000000 0 NOTYPE GLOBAL DEFAULT OS [0xff20] .klp.sym.vmlinux.snprintf,0
+ 74: 0000000000000000 0 NOTYPE GLOBAL DEFAULT OS [0xff20] .klp.sym.vmlinux.capable,0
+ 75: 0000000000000000 0 NOTYPE GLOBAL DEFAULT OS [0xff20] .klp.sym.vmlinux.find_next_bit,0
+ 76: 0000000000000000 0 NOTYPE GLOBAL DEFAULT OS [0xff20] .klp.sym.vmlinux.si_swapinfo,0
+ [ snip ] ^
+ |
+ [*]
+
+[*]
+ Note that the 'Ndx' (Section index) for these symbols is SHN_LIVEPATCH (0xff20).
+ "OS" means OS-specific.
+
+5. Architecture-specific sections
+=================================
Architectures may override arch_klp_init_object_loaded() to perform
additional arch-specific tasks when a target module loads, such as applying
arch-specific sections. On x86 for example, we must apply per-object
@@ -304,20 +308,19 @@ These sections must be prefixed with ".klp.arch.$objname." so that they can
be easily identified when iterating through a patch module's Elf sections
(See arch/x86/kernel/livepatch.c for a complete example).
---------------------------------------
-5. Symbol table and Elf section access
---------------------------------------
+6. Symbol table and Elf section access
+======================================
A livepatch module's symbol table is accessible through module->symtab.
Since apply_relocate_add() requires access to a module's section headers,
symbol table, and relocation section indices, Elf information is preserved for
livepatch modules and is made accessible by the module loader through
module->klp_info, which is a klp_modinfo struct. When a livepatch module loads,
-this struct is filled in by the module loader. Its fields are documented below:
-
-struct klp_modinfo {
- Elf_Ehdr hdr; /* Elf header */
- Elf_Shdr *sechdrs; /* Section header table */
- char *secstrings; /* String table for the section headers */
- unsigned int symndx; /* The symbol table section index */
-};
+this struct is filled in by the module loader. Its fields are documented below::
+
+ struct klp_modinfo {
+ Elf_Ehdr hdr; /* Elf header */
+ Elf_Shdr *sechdrs; /* Section header table */
+ char *secstrings; /* String table for the section headers */
+ unsigned int symndx; /* The symbol table section index */
+ };
diff --git a/Documentation/livepatch/shadow-vars.txt b/Documentation/livepatch/shadow-vars.rst
index ecc09a7be5dd..c05715aeafa4 100644
--- a/Documentation/livepatch/shadow-vars.txt
+++ b/Documentation/livepatch/shadow-vars.rst
@@ -27,10 +27,13 @@ A hashtable references all shadow variables. These references are
stored and retrieved through a <obj, id> pair.
* The klp_shadow variable data structure encapsulates both tracking
-meta-data and shadow-data:
+ meta-data and shadow-data:
+
- meta-data
+
- obj - pointer to parent object
- id - data identifier
+
- data[] - storage for shadow data
It is important to note that the klp_shadow_alloc() and
@@ -47,31 +50,43 @@ to do actions that can be done only once when a new variable is allocated.
* klp_shadow_alloc() - allocate and add a new shadow variable
- search hashtable for <obj, id> pair
+
- if exists
+
- WARN and return NULL
+
- if <obj, id> doesn't already exist
+
- allocate a new shadow variable
- initialize the variable using a custom constructor and data when provided
- add <obj, id> to the global hashtable
* klp_shadow_get_or_alloc() - get existing or alloc a new shadow variable
- search hashtable for <obj, id> pair
+
- if exists
+
- return existing shadow variable
+
- if <obj, id> doesn't already exist
+
- allocate a new shadow variable
- initialize the variable using a custom constructor and data when provided
- add <obj, id> pair to the global hashtable
* klp_shadow_free() - detach and free a <obj, id> shadow variable
- find and remove a <obj, id> reference from global hashtable
+
- if found
+
- call destructor function if defined
- free shadow variable
* klp_shadow_free_all() - detach and free all <*, id> shadow variables
- find and remove any <*, id> references from global hashtable
+
- if found
+
- call destructor function if defined
- free shadow variable
@@ -102,12 +117,12 @@ parent "goes live" (ie, any shadow variable get-API requests are made
for this <obj, id> pair.)
For commit 1d147bfa6429, when a parent sta_info structure is allocated,
-allocate a shadow copy of the ps_lock pointer, then initialize it:
+allocate a shadow copy of the ps_lock pointer, then initialize it::
-#define PS_LOCK 1
-struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
- const u8 *addr, gfp_t gfp)
-{
+ #define PS_LOCK 1
+ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr, gfp_t gfp)
+ {
struct sta_info *sta;
spinlock_t *ps_lock;
@@ -123,10 +138,10 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
...
When requiring a ps_lock, query the shadow variable API to retrieve one
-for a specific struct sta_info:
+for a specific struct sta_info:::
-void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
-{
+ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
+ {
spinlock_t *ps_lock;
/* sync with ieee80211_tx_h_unicast_ps_buf */
@@ -136,10 +151,10 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
...
When the parent sta_info structure is freed, first free the shadow
-variable:
+variable::
-void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
-{
+ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
+ {
klp_shadow_free(sta, PS_LOCK, NULL);
kfree(sta);
...
@@ -155,19 +170,19 @@ these cases, the klp_shadow_get_or_alloc() call can be used to attach
shadow variables to parents already in-flight.
For commit 1d147bfa6429, a good spot to allocate a shadow spinlock is
-inside ieee80211_sta_ps_deliver_wakeup():
+inside ieee80211_sta_ps_deliver_wakeup()::
-int ps_lock_shadow_ctor(void *obj, void *shadow_data, void *ctor_data)
-{
+ int ps_lock_shadow_ctor(void *obj, void *shadow_data, void *ctor_data)
+ {
spinlock_t *lock = shadow_data;
spin_lock_init(lock);
return 0;
-}
+ }
-#define PS_LOCK 1
-void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
-{
+ #define PS_LOCK 1
+ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
+ {
spinlock_t *ps_lock;
/* sync with ieee80211_tx_h_unicast_ps_buf */
@@ -200,10 +215,12 @@ suggests how to handle the parent object.
=============
* https://github.com/dynup/kpatch
-The livepatch implementation is based on the kpatch version of shadow
-variables.
+
+ The livepatch implementation is based on the kpatch version of shadow
+ variables.
* http://files.mkgnu.net/files/dynamos/doc/papers/dynamos_eurosys_07.pdf
-Dynamic and Adaptive Updates of Non-Quiescent Subsystems in Commodity
-Operating System Kernels (Kritis Makris, Kyung Dong Ryu 2007) presented
-a datatype update technique called "shadow data structures".
+
+ Dynamic and Adaptive Updates of Non-Quiescent Subsystems in Commodity
+ Operating System Kernels (Kritis Makris, Kyung Dong Ryu 2007) presented
+ a datatype update technique called "shadow data structures".
diff --git a/Documentation/media/index.rst b/Documentation/media/index.rst
index 0a222fc1d7ca..0301c25ff887 100644
--- a/Documentation/media/index.rst
+++ b/Documentation/media/index.rst
@@ -18,7 +18,7 @@ Linux Media Subsystem Documentation
v4l-drivers/index
cec-drivers/index
-.. only:: subproject
+.. only:: html and subproject
Indices
=======
diff --git a/Documentation/media/kapi/mc-core.rst b/Documentation/media/kapi/mc-core.rst
index f930725e0d6b..05bba0b61748 100644
--- a/Documentation/media/kapi/mc-core.rst
+++ b/Documentation/media/kapi/mc-core.rst
@@ -259,6 +259,45 @@ Subsystems should facilitate link validation by providing subsystem specific
helper functions to provide easy access for commonly needed information, and
in the end provide a way to use driver-specific callbacks.
+Media Controller Device Allocator API
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When the media device belongs to more than one driver, the shared media
+device is allocated with the shared struct device as the key for look ups.
+
+The shared media device should stay in registered state until the last
+driver unregisters it. In addition, the media device should be released when
+all the references are released. Each driver gets a reference to the media
+device during probe, when it allocates the media device. If media device is
+already allocated, the allocate API bumps up the refcount and returns the
+existing media device. The driver puts the reference back in its disconnect
+routine when it calls :c:func:`media_device_delete()`.
+
+The media device is unregistered and cleaned up from the kref put handler to
+ensure that the media device stays in registered state until the last driver
+unregisters the media device.
+
+**Driver Usage**
+
+Drivers should use the appropriate media-core routines to manage the shared
+media device life-time handling the two states:
+1. allocate -> register -> delete
+2. get reference to already registered device -> delete
+
+call :c:func:`media_device_delete()` routine to make sure the shared media
+device delete is handled correctly.
+
+**driver probe:**
+Call :c:func:`media_device_usb_allocate()` to allocate or get a reference
+Call :c:func:`media_device_register()`, if media devnode isn't registered
+
+**driver disconnect:**
+Call :c:func:`media_device_delete()` to free the media_device. Freeing is
+handled by the kref put handler.
+
+API Definitions
+^^^^^^^^^^^^^^^
+
.. kernel-doc:: include/media/media-device.h
.. kernel-doc:: include/media/media-devnode.h
@@ -266,3 +305,5 @@ in the end provide a way to use driver-specific callbacks.
.. kernel-doc:: include/media/media-entity.h
.. kernel-doc:: include/media/media-request.h
+
+.. kernel-doc:: include/media/media-dev-allocator.h
diff --git a/Documentation/media/lirc.h.rst.exceptions b/Documentation/media/lirc.h.rst.exceptions
index 7a8b8ff4f076..ac768d769113 100644
--- a/Documentation/media/lirc.h.rst.exceptions
+++ b/Documentation/media/lirc.h.rst.exceptions
@@ -63,6 +63,7 @@ ignore symbol RC_PROTO_IMON
ignore symbol RC_PROTO_RCMM12
ignore symbol RC_PROTO_RCMM24
ignore symbol RC_PROTO_RCMM32
+ignore symbol RC_PROTO_XBOX_DVD
# Undocumented macros
diff --git a/Documentation/media/uapi/mediactl/request-api.rst b/Documentation/media/uapi/mediactl/request-api.rst
index 1ad631e549fe..a74c82d95609 100644
--- a/Documentation/media/uapi/mediactl/request-api.rst
+++ b/Documentation/media/uapi/mediactl/request-api.rst
@@ -93,7 +93,7 @@ A queued request cannot be modified anymore.
.. caution::
For :ref:`memory-to-memory devices <mem2mem>` you can use requests only for
output buffers, not for capture buffers. Attempting to add a capture buffer
- to a request will result in an ``EACCES`` error.
+ to a request will result in an ``EBADR`` error.
If the request contains configurations for multiple entities, individual drivers
may synchronize so the requested pipeline's topology is applied before the
diff --git a/Documentation/media/uapi/v4l/buffer.rst b/Documentation/media/uapi/v4l/buffer.rst
index 81ffdcb89057..1cbd9cde57f3 100644
--- a/Documentation/media/uapi/v4l/buffer.rst
+++ b/Documentation/media/uapi/v4l/buffer.rst
@@ -165,7 +165,7 @@ of appropriately sized buffers for each use case).
struct v4l2_buffer
==================
-.. tabularcolumns:: |p{2.8cm}|p{2.5cm}|p{1.3cm}|p{10.5cm}|
+.. tabularcolumns:: |p{2.8cm}|p{2.5cm}|p{1.6cm}|p{10.2cm}|
.. cssclass:: longtable
@@ -326,7 +326,7 @@ struct v4l2_buffer
Applications should not set ``V4L2_BUF_FLAG_REQUEST_FD`` for any ioctls
other than :ref:`VIDIOC_QBUF <VIDIOC_QBUF>`.
- If the device does not support requests, then ``EACCES`` will be returned.
+ If the device does not support requests, then ``EBADR`` will be returned.
If requests are supported but an invalid request file descriptor is
given, then ``EINVAL`` will be returned.
@@ -420,7 +420,7 @@ enum v4l2_buf_type
.. cssclass:: longtable
-.. tabularcolumns:: |p{7.2cm}|p{0.6cm}|p{9.7cm}|
+.. tabularcolumns:: |p{7.8cm}|p{0.6cm}|p{9.1cm}|
.. flat-table::
:header-rows: 0
@@ -482,7 +482,11 @@ enum v4l2_buf_type
Buffer Flags
============
-.. tabularcolumns:: |p{7.0cm}|p{2.2cm}|p{8.3cm}|
+.. raw:: latex
+
+ \small
+
+.. tabularcolumns:: |p{7.0cm}|p{2.1cm}|p{8.4cm}|
.. cssclass:: longtable
@@ -681,6 +685,9 @@ Buffer Flags
exposure of the frame has begun. This is only valid for the
``V4L2_BUF_TYPE_VIDEO_CAPTURE`` buffer type.
+.. raw:: latex
+
+ \normalsize
.. c:type:: v4l2_memory
@@ -688,7 +695,7 @@ Buffer Flags
enum v4l2_memory
================
-.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+.. tabularcolumns:: |p{5.0cm}|p{0.8cm}|p{11.7cm}|
.. flat-table::
:header-rows: 0
@@ -724,7 +731,7 @@ The :c:type:`v4l2_buffer_timecode` structure is designed to hold a
struct v4l2_timecode
--------------------
-.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
+.. tabularcolumns:: |p{1.4cm}|p{2.8cm}|p{12.3cm}|
.. flat-table::
:header-rows: 0
@@ -761,7 +768,7 @@ struct v4l2_timecode
Timecode Types
--------------
-.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+.. tabularcolumns:: |p{5.6cm}|p{0.8cm}|p{11.1cm}|
.. flat-table::
:header-rows: 0
diff --git a/Documentation/media/uapi/v4l/colorspaces-defs.rst b/Documentation/media/uapi/v4l/colorspaces-defs.rst
index c4e8fc620379..e122bbe3d799 100644
--- a/Documentation/media/uapi/v4l/colorspaces-defs.rst
+++ b/Documentation/media/uapi/v4l/colorspaces-defs.rst
@@ -39,7 +39,7 @@ whole range, 0-255, dividing the angular value by 1.41. The enum
colorspaces except for BT.2020 which uses limited range R'G'B'
quantization.
-.. tabularcolumns:: |p{6.0cm}|p{11.5cm}|
+.. tabularcolumns:: |p{6.7cm}|p{10.8cm}|
.. c:type:: v4l2_colorspace
@@ -112,7 +112,7 @@ whole range, 0-255, dividing the angular value by 1.41. The enum
.. c:type:: v4l2_ycbcr_encoding
-.. tabularcolumns:: |p{6.5cm}|p{11.0cm}|
+.. tabularcolumns:: |p{7.2cm}|p{10.3cm}|
.. flat-table:: V4L2 Y'CbCr Encodings
:header-rows: 1
diff --git a/Documentation/media/uapi/v4l/colorspaces.rst b/Documentation/media/uapi/v4l/colorspaces.rst
index c5a560f0c13d..4f6c82fa057f 100644
--- a/Documentation/media/uapi/v4l/colorspaces.rst
+++ b/Documentation/media/uapi/v4l/colorspaces.rst
@@ -56,9 +56,9 @@ The Y value in the CIE XYZ colorspace corresponds to luminance. Often
the CIE XYZ colorspace is transformed to the normalized CIE xyY
colorspace:
-x = X / (X + Y + Z)
+ x = X / (X + Y + Z)
-y = Y / (X + Y + Z)
+ y = Y / (X + Y + Z)
The x and y values are the chromaticity coordinates and can be used to
define a color without the luminance component Y. It is very confusing
diff --git a/Documentation/media/uapi/v4l/dev-raw-vbi.rst b/Documentation/media/uapi/v4l/dev-raw-vbi.rst
index d6a707f0b24f..e06b03ca2ab2 100644
--- a/Documentation/media/uapi/v4l/dev-raw-vbi.rst
+++ b/Documentation/media/uapi/v4l/dev-raw-vbi.rst
@@ -106,7 +106,7 @@ VBI devices must implement both the :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>` and
and always returns default parameters as :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>` does.
:ref:`VIDIOC_TRY_FMT <VIDIOC_G_FMT>` is optional.
-.. tabularcolumns:: |p{2.4cm}|p{4.4cm}|p{10.7cm}|
+.. tabularcolumns:: |p{1.6cm}|p{4.2cm}|p{11.7cm}|
.. c:type:: v4l2_vbi_format
@@ -190,7 +190,7 @@ and always returns default parameters as :ref:`VIDIOC_G_FMT <VIDIOC_G_FMT>` does
applications must set it to zero.
-.. tabularcolumns:: |p{4.0cm}|p{1.5cm}|p{12.0cm}|
+.. tabularcolumns:: |p{4.4cm}|p{1.5cm}|p{11.6cm}|
.. _vbifmt-flags:
diff --git a/Documentation/media/uapi/v4l/dev-rds.rst b/Documentation/media/uapi/v4l/dev-rds.rst
index 624d6f95b842..64a724ef58f5 100644
--- a/Documentation/media/uapi/v4l/dev-rds.rst
+++ b/Documentation/media/uapi/v4l/dev-rds.rst
@@ -146,7 +146,7 @@ RDS datastructures
.. _v4l2-rds-block-codes:
-.. tabularcolumns:: |p{5.6cm}|p{2.0cm}|p{1.5cm}|p{7.0cm}|
+.. tabularcolumns:: |p{6.4cm}|p{2.0cm}|p{1.2cm}|p{7.9cm}|
.. flat-table:: Block defines
:header-rows: 0
diff --git a/Documentation/media/uapi/v4l/dev-sliced-vbi.rst b/Documentation/media/uapi/v4l/dev-sliced-vbi.rst
index 0aa6cb8a272b..e86346f66017 100644
--- a/Documentation/media/uapi/v4l/dev-sliced-vbi.rst
+++ b/Documentation/media/uapi/v4l/dev-sliced-vbi.rst
@@ -118,7 +118,7 @@ struct v4l2_sliced_vbi_format
\scriptsize
\setlength{\tabcolsep}{2pt}
-.. tabularcolumns:: |p{.75cm}|p{3.3cm}|p{3.4cm}|p{3.4cm}|p{3.4cm}|
+.. tabularcolumns:: |p{.85cm}|p{3.3cm}|p{4.4cm}|p{4.4cm}|p{4.4cm}|
.. cssclass:: longtable
@@ -223,7 +223,7 @@ Sliced VBI services
.. raw:: latex
- \footnotesize
+ \scriptsize
.. tabularcolumns:: |p{4.1cm}|p{1.1cm}|p{2.4cm}|p{2.0cm}|p{7.3cm}|
@@ -541,7 +541,7 @@ Magic Constants for struct v4l2_mpeg_vbi_fmt_ivtv magic field
structs v4l2_mpeg_vbi_itv0 and v4l2_mpeg_vbi_ITV0
-------------------------------------------------
-.. tabularcolumns:: |p{4.9cm}|p{2.4cm}|p{10.2cm}|
+.. tabularcolumns:: |p{5.2cm}|p{2.4cm}|p{9.9cm}|
.. flat-table::
:header-rows: 0
@@ -561,12 +561,12 @@ structs v4l2_mpeg_vbi_itv0 and v4l2_mpeg_vbi_ITV0
::
- linemask[0] b0: line 6 first field
- linemask[0] b17: line 23 first field
- linemask[0] b18: line 6 second field
- linemask[0] b31: line 19 second field
- linemask[1] b0: line 20 second field
- linemask[1] b3: line 23 second field
+ linemask[0] b0: line 6 first field
+ linemask[0] b17: line 23 first field
+ linemask[0] b18: line 6 second field
+ linemask[0] b31: line 19 second field
+ linemask[1] b0: line 20 second field
+ linemask[1] b3: line 23 second field
linemask[1] b4-b31: unused and set to 0
* - struct
:c:type:`v4l2_mpeg_vbi_itv0_line`
@@ -590,7 +590,7 @@ structs v4l2_mpeg_vbi_itv0 and v4l2_mpeg_vbi_ITV0
struct v4l2_mpeg_vbi_ITV0
-------------------------
-.. tabularcolumns:: |p{4.9cm}|p{4.4cm}|p{8.2cm}|
+.. tabularcolumns:: |p{5.2cm}|p{2.4cm}|p{9.9cm}|
.. flat-table::
:header-rows: 0
@@ -635,7 +635,7 @@ struct v4l2_mpeg_vbi_itv0_line
Line Identifiers for struct v4l2_mpeg_vbi_itv0_line id field
------------------------------------------------------------
-.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+.. tabularcolumns:: |p{7.0cm}|p{1.8cm}|p{8.7cm}|
.. flat-table::
:header-rows: 1
diff --git a/Documentation/media/uapi/v4l/dev-subdev.rst b/Documentation/media/uapi/v4l/dev-subdev.rst
index 2c2768c7343b..029bb2d9928a 100644
--- a/Documentation/media/uapi/v4l/dev-subdev.rst
+++ b/Documentation/media/uapi/v4l/dev-subdev.rst
@@ -211,7 +211,7 @@ list entity names and pad numbers).
.. raw:: latex
- \tiny
+ \scriptsize
.. tabularcolumns:: |p{2.0cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}|p{2.3cm}|
@@ -223,40 +223,80 @@ list entity names and pad numbers).
:widths: 5 5 5 5 5 5 5
* -
- - Sensor/0 format
- - Frontend/0 format
- - Frontend/1 format
- - Scaler/0 format
- - Scaler/0 compose selection rectangle
- - Scaler/1 format
+ - Sensor/0
+
+ format
+ - Frontend/0
+
+ format
+ - Frontend/1
+
+ format
+ - Scaler/0
+
+ format
+ - Scaler/0
+
+ compose selection rectangle
+ - Scaler/1
+
+ format
* - Initial state
- - 2048x1536/SGRBG8_1X8
+ - 2048x1536
+
+ SGRBG8_1X8
- (default)
- (default)
- (default)
- (default)
- (default)
* - Configure frontend sink format
- - 2048x1536/SGRBG8_1X8
- - *2048x1536/SGRBG8_1X8*
- - *2046x1534/SGRBG8_1X8*
+ - 2048x1536
+
+ SGRBG8_1X8
+ - *2048x1536*
+
+ *SGRBG8_1X8*
+ - *2046x1534*
+
+ *SGRBG8_1X8*
- (default)
- (default)
- (default)
* - Configure scaler sink format
- - 2048x1536/SGRBG8_1X8
- - 2048x1536/SGRBG8_1X8
- - 2046x1534/SGRBG8_1X8
- - *2046x1534/SGRBG8_1X8*
+ - 2048x1536
+
+ SGRBG8_1X8
+ - 2048x1536
+
+ SGRBG8_1X8
+ - 2046x1534
+
+ SGRBG8_1X8
+ - *2046x1534*
+
+ *SGRBG8_1X8*
- *0,0/2046x1534*
- - *2046x1534/SGRBG8_1X8*
+ - *2046x1534*
+
+ *SGRBG8_1X8*
* - Configure scaler sink compose selection
- - 2048x1536/SGRBG8_1X8
- - 2048x1536/SGRBG8_1X8
- - 2046x1534/SGRBG8_1X8
- - 2046x1534/SGRBG8_1X8
+ - 2048x1536
+
+ SGRBG8_1X8
+ - 2048x1536
+
+ SGRBG8_1X8
+ - 2046x1534
+
+ SGRBG8_1X8
+ - 2046x1534
+
+ SGRBG8_1X8
- *0,0/1280x960*
- - *1280x960/SGRBG8_1X8*
+ - *1280x960*
+
+ *SGRBG8_1X8*
.. raw:: latex
diff --git a/Documentation/media/uapi/v4l/ext-ctrls-camera.rst b/Documentation/media/uapi/v4l/ext-ctrls-camera.rst
index d3a553cd86c9..51c1d5c9eb00 100644
--- a/Documentation/media/uapi/v4l/ext-ctrls-camera.rst
+++ b/Documentation/media/uapi/v4l/ext-ctrls-camera.rst
@@ -88,7 +88,7 @@ enum v4l2_exposure_metering -
Determines how the camera measures the amount of light available for
the frame exposure. Possible values are:
-.. tabularcolumns:: |p{8.5cm}|p{9.0cm}|
+.. tabularcolumns:: |p{8.7cm}|p{8.8cm}|
.. flat-table::
:header-rows: 0
@@ -180,7 +180,7 @@ enum v4l2_exposure_metering -
control may stop updates of the ``V4L2_CID_AUTO_FOCUS_STATUS``
control value.
-.. tabularcolumns:: |p{6.5cm}|p{11.0cm}|
+.. tabularcolumns:: |p{6.7cm}|p{10.8cm}|
.. flat-table::
:header-rows: 0
@@ -206,7 +206,7 @@ enum v4l2_exposure_metering -
enum v4l2_auto_focus_range -
Determines auto focus distance range for which lens may be adjusted.
-.. tabularcolumns:: |p{6.5cm}|p{11.0cm}|
+.. tabularcolumns:: |p{6.8cm}|p{10.7cm}|
.. flat-table::
:header-rows: 0
@@ -281,7 +281,7 @@ enum v4l2_auto_n_preset_white_balance -
representation. The following white balance presets are listed in
order of increasing color temperature.
-.. tabularcolumns:: |p{7.0 cm}|p{10.5cm}|
+.. tabularcolumns:: |p{7.2 cm}|p{10.3cm}|
.. flat-table::
:header-rows: 0
@@ -387,7 +387,11 @@ enum v4l2_scene_mode -
to ``V4L2_SCENE_MODE_NONE`` to make sure the other possibly related
controls are accessible. The following scene programs are defined:
-.. tabularcolumns:: |p{6.0cm}|p{11.5cm}|
+.. raw:: latex
+
+ \small
+
+.. tabularcolumns:: |p{5.9cm}|p{11.5cm}|
.. flat-table::
:header-rows: 0
@@ -459,6 +463,9 @@ enum v4l2_scene_mode -
may be switched to close-up mode and this setting may also involve
some lens-distortion correction.
+.. raw:: latex
+
+ \normalsize
``V4L2_CID_3A_LOCK (bitmask)``
diff --git a/Documentation/media/uapi/v4l/ext-ctrls-codec.rst b/Documentation/media/uapi/v4l/ext-ctrls-codec.rst
index c97fb7923be5..4a8446203085 100644
--- a/Documentation/media/uapi/v4l/ext-ctrls-codec.rst
+++ b/Documentation/media/uapi/v4l/ext-ctrls-codec.rst
@@ -105,7 +105,7 @@ enum v4l2_mpeg_stream_vbi_fmt -
-.. tabularcolumns:: |p{6 cm}|p{11.5cm}|
+.. tabularcolumns:: |p{6.6 cm}|p{10.9cm}|
.. flat-table::
:header-rows: 0
@@ -477,7 +477,7 @@ enum v4l2_mpeg_audio_dec_playback -
-.. tabularcolumns:: |p{9.0cm}|p{8.5cm}|
+.. tabularcolumns:: |p{9.8cm}|p{7.7cm}|
.. flat-table::
:header-rows: 0
@@ -888,7 +888,7 @@ enum v4l2_mpeg_video_multi_slice_mode -
-.. tabularcolumns:: |p{8.7cm}|p{8.8cm}|
+.. tabularcolumns:: |p{9.6cm}|p{7.9cm}|
.. flat-table::
:header-rows: 0
@@ -923,9 +923,11 @@ enum v4l2_mpeg_video_multi_slice_mode -
enum v4l2_mpeg_video_h264_loop_filter_mode -
Loop filter mode for H264 encoder. Possible values are:
+.. raw:: latex
+ \small
-.. tabularcolumns:: |p{14.0cm}|p{3.5cm}|
+.. tabularcolumns:: |p{13.6cm}|p{3.9cm}|
.. flat-table::
:header-rows: 0
@@ -938,6 +940,9 @@ enum v4l2_mpeg_video_h264_loop_filter_mode -
* - ``V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY``
- Loop filter is disabled at the slice boundary.
+.. raw:: latex
+
+ \normalsize
``V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA (integer)``
@@ -964,6 +969,8 @@ enum v4l2_mpeg_video_h264_entropy_mode -
encoder. Possible values are:
+.. tabularcolumns:: |p{9.0cm}|p{8.5cm}|
+
.. flat-table::
:header-rows: 0
@@ -1048,6 +1055,30 @@ enum v4l2_mpeg_video_h264_entropy_mode -
Quantization parameter for an B frame for H264. Valid range: from 0
to 51.
+``V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP (integer)``
+ Minimum quantization parameter for the H264 I frame to limit I frame
+ quality to a range. Valid range: from 0 to 51. If
+ V4L2_CID_MPEG_VIDEO_H264_MIN_QP is also set, the quantization parameter
+ should be chosen to meet both requirements.
+
+``V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP (integer)``
+ Maximum quantization parameter for the H264 I frame to limit I frame
+ quality to a range. Valid range: from 0 to 51. If
+ V4L2_CID_MPEG_VIDEO_H264_MAX_QP is also set, the quantization parameter
+ should be chosen to meet both requirements.
+
+``V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP (integer)``
+ Minimum quantization parameter for the H264 P frame to limit P frame
+ quality to a range. Valid range: from 0 to 51. If
+ V4L2_CID_MPEG_VIDEO_H264_MIN_QP is also set, the quantization parameter
+ should be chosen to meet both requirements.
+
+``V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP (integer)``
+ Maximum quantization parameter for the H264 P frame to limit P frame
+ quality to a range. Valid range: from 0 to 51. If
+ V4L2_CID_MPEG_VIDEO_H264_MAX_QP is also set, the quantization parameter
+ should be chosen to meet both requirements.
+
``V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP (integer)``
Quantization parameter for an I frame for MPEG4. Valid range: from 1
to 31.
@@ -1129,7 +1160,9 @@ enum v4l2_mpeg_video_header_mode -
it returned together with the first frame. Applicable to encoders.
Possible values are:
+.. raw:: latex
+ \small
.. tabularcolumns:: |p{10.3cm}|p{7.2cm}|
@@ -1143,6 +1176,9 @@ enum v4l2_mpeg_video_header_mode -
- The stream header is returned together with the first encoded
frame.
+.. raw:: latex
+
+ \normalsize
``V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER (boolean)``
@@ -1181,6 +1217,10 @@ enum v4l2_mpeg_video_h264_sei_fp_arrangement_type -
Frame packing arrangement type for H264 SEI. Applicable to the H264
encoder. Possible values are:
+.. raw:: latex
+
+ \small
+
.. tabularcolumns:: |p{12cm}|p{5.5cm}|
.. flat-table::
@@ -1200,6 +1240,10 @@ enum v4l2_mpeg_video_h264_sei_fp_arrangement_type -
* - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TEMPORAL``
- One view per frame.
+.. raw:: latex
+
+ \normalsize
+
``V4L2_CID_MPEG_VIDEO_H264_FMO (boolean)``
@@ -1217,6 +1261,10 @@ enum v4l2_mpeg_video_h264_fmo_map_type -
patterns of macroblocks. Applicable to the H264 encoder. Possible
values are:
+.. raw:: latex
+
+ \small
+
.. tabularcolumns:: |p{12.5cm}|p{5.0cm}|
.. flat-table::
@@ -1240,6 +1288,10 @@ enum v4l2_mpeg_video_h264_fmo_map_type -
* - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_EXPLICIT``
- User defined map type.
+.. raw:: latex
+
+ \normalsize
+
``V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP (integer)``
@@ -1361,6 +1413,8 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
.. cssclass:: longtable
+.. tabularcolumns:: |p{5.8cm}|p{4.8cm}|p{6.6cm}|
+
.. flat-table:: struct v4l2_ctrl_mpeg2_slice_params
:header-rows: 0
:stub-columns: 0
@@ -1402,6 +1456,8 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
.. cssclass:: longtable
+.. tabularcolumns:: |p{1.5cm}|p{6.3cm}|p{9.4cm}|
+
.. flat-table:: struct v4l2_mpeg2_sequence
:header-rows: 0
:stub-columns: 0
@@ -1433,6 +1489,8 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
.. cssclass:: longtable
+.. tabularcolumns:: |p{1.5cm}|p{6.3cm}|p{9.4cm}|
+
.. flat-table:: struct v4l2_mpeg2_picture
:header-rows: 0
:stub-columns: 0
@@ -1492,6 +1550,12 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
.. cssclass:: longtable
+.. tabularcolumns:: |p{1.2cm}|p{8.0cm}|p{7.4cm}|
+
+.. raw:: latex
+
+ \small
+
.. flat-table:: struct v4l2_ctrl_mpeg2_quantization
:header-rows: 0
:stub-columns: 0
@@ -1537,6 +1601,19 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
non-intra-coded frames, in zigzag scanning order. Only relevant for
non-4:2:0 YUV formats.
+``V4L2_CID_FWHT_I_FRAME_QP (integer)``
+ Quantization parameter for an I frame for FWHT. Valid range: from 1
+ to 31.
+
+``V4L2_CID_FWHT_P_FRAME_QP (integer)``
+ Quantization parameter for a P frame for FWHT. Valid range: from 1
+ to 31.
+
+.. raw:: latex
+
+ \normalsize
+
+
MFC 5.1 MPEG Controls
=====================
@@ -1644,7 +1721,11 @@ enum v4l2_mpeg_mfc51_video_frame_skip_mode -
are:
-.. tabularcolumns:: |p{9.0cm}|p{8.5cm}|
+.. tabularcolumns:: |p{9.2cm}|p{8.3cm}|
+
+.. raw:: latex
+
+ \small
.. flat-table::
:header-rows: 0
@@ -1659,7 +1740,9 @@ enum v4l2_mpeg_mfc51_video_frame_skip_mode -
- Frame skip mode enabled and buffer limit is set by the VBV
(MPEG1/2/4) or CPB (H264) buffer size control.
+.. raw:: latex
+ \normalsize
``V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT (integer)``
Enable rate-control with fixed target bit. If this setting is
@@ -1682,7 +1765,7 @@ enum v4l2_mpeg_mfc51_video_force_frame_type -
Force a frame type for the next queued buffer. Applicable to
encoders. Possible values are:
-
+.. tabularcolumns:: |p{9.5cm}|p{8.0cm}|
.. flat-table::
:header-rows: 0
@@ -1696,6 +1779,125 @@ enum v4l2_mpeg_mfc51_video_force_frame_type -
- Force a non-coded frame.
+.. _v4l2-mpeg-fwht:
+
+``V4L2_CID_MPEG_VIDEO_FWHT_PARAMS (struct)``
+ Specifies the fwht parameters (as extracted from the bitstream) for the
+ associated FWHT data. This includes the necessary parameters for
+ configuring a stateless hardware decoding pipeline for FWHT.
+
+ .. note::
+
+ This compound control is not yet part of the public kernel API and
+ it is expected to change.
+
+.. c:type:: v4l2_ctrl_fwht_params
+
+.. cssclass:: longtable
+
+.. tabularcolumns:: |p{1.4cm}|p{4.3cm}|p{11.8cm}|
+
+.. flat-table:: struct v4l2_ctrl_fwht_params
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 1 2
+
+ * - __u64
+ - ``backward_ref_ts``
+ - Timestamp of the V4L2 capture buffer to use as backward reference, used
+ with P-coded frames. The timestamp refers to the
+ ``timestamp`` field in struct :c:type:`v4l2_buffer`. Use the
+ :c:func:`v4l2_timeval_to_ns()` function to convert the struct
+ :c:type:`timeval` in struct :c:type:`v4l2_buffer` to a __u64.
+ * - __u32
+ - ``version``
+ - The version of the codec
+ * - __u32
+ - ``width``
+ - The width of the frame
+ * - __u32
+ - ``height``
+ - The height of the frame
+ * - __u32
+ - ``flags``
+ - The flags of the frame, see :ref:`fwht-flags`.
+ * - __u32
+ - ``colorspace``
+ - The colorspace of the frame, from enum :c:type:`v4l2_colorspace`.
+ * - __u32
+ - ``xfer_func``
+ - The transfer function, from enum :c:type:`v4l2_xfer_func`.
+ * - __u32
+ - ``ycbcr_enc``
+ - The Y'CbCr encoding, from enum :c:type:`v4l2_ycbcr_encoding`.
+ * - __u32
+ - ``quantization``
+ - The quantization range, from enum :c:type:`v4l2_quantization`.
+
+
+
+.. _fwht-flags:
+
+FWHT Flags
+============
+
+.. cssclass:: longtable
+
+.. tabularcolumns:: |p{6.8cm}|p{2.4cm}|p{8.3cm}|
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 3 1 4
+
+ * - ``FWHT_FL_IS_INTERLACED``
+ - 0x00000001
+ - Set if this is an interlaced format
+ * - ``FWHT_FL_IS_BOTTOM_FIRST``
+ - 0x00000002
+ - Set if this is a bottom-first (NTSC) interlaced format
+ * - ``FWHT_FL_IS_ALTERNATE``
+ - 0x00000004
+ - Set if each 'frame' contains just one field
+ * - ``FWHT_FL_IS_BOTTOM_FIELD``
+ - 0x00000008
+ - If FWHT_FL_IS_ALTERNATE was set, then this is set if this 'frame' is the
+ bottom field, else it is the top field.
+ * - ``FWHT_FL_LUMA_IS_UNCOMPRESSED``
+ - 0x00000010
+ - Set if the luma plane is uncompressed
+ * - ``FWHT_FL_CB_IS_UNCOMPRESSED``
+ - 0x00000020
+ - Set if the cb plane is uncompressed
+ * - ``FWHT_FL_CR_IS_UNCOMPRESSED``
+ - 0x00000040
+ - Set if the cr plane is uncompressed
+ * - ``FWHT_FL_CHROMA_FULL_HEIGHT``
+ - 0x00000080
+ - Set if the chroma plane has the same height as the luma plane,
+ else the chroma plane is half the height of the luma plane
+ * - ``FWHT_FL_CHROMA_FULL_WIDTH``
+ - 0x00000100
+ - Set if the chroma plane has the same width as the luma plane,
+ else the chroma plane is half the width of the luma plane
+ * - ``FWHT_FL_ALPHA_IS_UNCOMPRESSED``
+ - 0x00000200
+ - Set if the alpha plane is uncompressed
+ * - ``FWHT_FL_I_FRAME``
+ - 0x00000400
+ - Set if this is an I-frame
+ * - ``FWHT_FL_COMPONENTS_NUM_MSK``
+ - 0x00070000
+ - A 4-values flag - the number of components - 1
+ * - ``FWHT_FL_PIXENC_YUV``
+ - 0x00080000
+ - Set if the pixel encoding is YUV
+ * - ``FWHT_FL_PIXENC_RGB``
+ - 0x00100000
+ - Set if the pixel encoding is RGB
+ * - ``FWHT_FL_PIXENC_HSV``
+ - 0x00180000
+ - Set if the pixel encoding is HSV
CX2341x MPEG Controls
@@ -1745,9 +1947,11 @@ enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type -
Select the algorithm to use for the Luma Spatial Filter (default
``1D_HOR``). Possible values:
+.. tabularcolumns:: |p{14.5cm}|p{3.0cm}|
+.. raw:: latex
-.. tabularcolumns:: |p{14.5cm}|p{3.0cm}|
+ \small
.. flat-table::
:header-rows: 0
@@ -1764,6 +1968,10 @@ enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type -
* - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE``
- Two-dimensional symmetrical non-separable
+.. raw:: latex
+
+ \normalsize
+
.. _chroma-spatial-filter-type:
@@ -1776,6 +1984,7 @@ enum v4l2_mpeg_cx2341x_video_chroma_spatial_filter_type -
``1D_HOR``). Possible values are:
+.. tabularcolumns:: |p{14.0cm}|p{3.5cm}|
.. flat-table::
:header-rows: 0
@@ -1918,6 +2127,10 @@ enum v4l2_vp8_num_ref_frames -
.. tabularcolumns:: |p{7.9cm}|p{9.6cm}|
+.. raw:: latex
+
+ \small
+
.. flat-table::
:header-rows: 0
:stub-columns: 0
@@ -1932,6 +2145,10 @@ enum v4l2_vp8_num_ref_frames -
- The last encoded frame, the golden frame and the altref frame will
be searched.
+.. raw:: latex
+
+ \normalsize
+
``V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL (integer)``
@@ -1961,7 +2178,7 @@ enum v4l2_vp8_golden_frame_sel -
.. raw:: latex
- \footnotesize
+ \scriptsize
.. tabularcolumns:: |p{9.0cm}|p{8.0cm}|
@@ -2283,7 +2500,7 @@ enum v4l2_mpeg_video_hevc_loop_filter_mode -
\footnotesize
-.. tabularcolumns:: |p{10.7cm}|p{6.3cm}|
+.. tabularcolumns:: |p{12.1cm}|p{5.4cm}|
.. flat-table::
:header-rows: 0
diff --git a/Documentation/media/uapi/v4l/ext-ctrls-detect.rst b/Documentation/media/uapi/v4l/ext-ctrls-detect.rst
index 8a45ce642829..80981d0cff42 100644
--- a/Documentation/media/uapi/v4l/ext-ctrls-detect.rst
+++ b/Documentation/media/uapi/v4l/ext-ctrls-detect.rst
@@ -30,7 +30,7 @@ Detect Control IDs
``V4L2_CID_DETECT_MD_MODE (menu)``
Sets the motion detection mode.
-.. tabularcolumns:: |p{7.5cm}|p{10.0cm}|
+.. tabularcolumns:: |p{7.7cm}|p{9.8cm}|
.. flat-table::
:header-rows: 0
diff --git a/Documentation/media/uapi/v4l/ext-ctrls-dv.rst b/Documentation/media/uapi/v4l/ext-ctrls-dv.rst
index 57edf211875c..5c70ac98f710 100644
--- a/Documentation/media/uapi/v4l/ext-ctrls-dv.rst
+++ b/Documentation/media/uapi/v4l/ext-ctrls-dv.rst
@@ -106,7 +106,7 @@ enum v4l2_dv_it_content_type -
or an analog source. The enum v4l2_dv_it_content_type defines
the possible content types:
-.. tabularcolumns:: |p{7.0cm}|p{10.5cm}|
+.. tabularcolumns:: |p{7.3cm}|p{10.4cm}|
.. flat-table::
:header-rows: 0
diff --git a/Documentation/media/uapi/v4l/ext-ctrls-flash.rst b/Documentation/media/uapi/v4l/ext-ctrls-flash.rst
index 5f30791c35b5..eff056b17167 100644
--- a/Documentation/media/uapi/v4l/ext-ctrls-flash.rst
+++ b/Documentation/media/uapi/v4l/ext-ctrls-flash.rst
@@ -87,7 +87,7 @@ Flash Control IDs
``V4L2_CID_FLASH_STROBE_SOURCE (menu)``
Defines the source of the flash LED strobe.
-.. tabularcolumns:: |p{7.0cm}|p{10.5cm}|
+.. tabularcolumns:: |p{7.5cm}|p{10.0cm}|
.. flat-table::
:header-rows: 0
@@ -146,7 +146,7 @@ Flash Control IDs
an effect is chip dependent. Reading the faults resets the control
and returns the chip to a usable state if possible.
-.. tabularcolumns:: |p{8.0cm}|p{9.5cm}|
+.. tabularcolumns:: |p{8.4cm}|p{9.1cm}|
.. flat-table::
:header-rows: 0
diff --git a/Documentation/media/uapi/v4l/ext-ctrls-jpeg.rst b/Documentation/media/uapi/v4l/ext-ctrls-jpeg.rst
index cf9cd8a9f9b4..60ce3f949319 100644
--- a/Documentation/media/uapi/v4l/ext-ctrls-jpeg.rst
+++ b/Documentation/media/uapi/v4l/ext-ctrls-jpeg.rst
@@ -37,7 +37,7 @@ JPEG Control IDs
how Cb and Cr components are downsampled after converting an input
image from RGB to Y'CbCr color space.
-.. tabularcolumns:: |p{7.0cm}|p{10.5cm}|
+.. tabularcolumns:: |p{7.5cm}|p{10.0cm}|
.. flat-table::
:header-rows: 0
diff --git a/Documentation/media/uapi/v4l/field-order.rst b/Documentation/media/uapi/v4l/field-order.rst
index 8415268d439c..d640e922a974 100644
--- a/Documentation/media/uapi/v4l/field-order.rst
+++ b/Documentation/media/uapi/v4l/field-order.rst
@@ -64,7 +64,9 @@ enum v4l2_field
.. c:type:: v4l2_field
-.. tabularcolumns:: |p{6.6cm}|p{2.2cm}|p{8.7cm}|
+.. tabularcolumns:: |p{5.8cm}|p{0.6cm}|p{11.1cm}|
+
+.. cssclass:: longtable
.. flat-table::
:header-rows: 0
@@ -73,12 +75,11 @@ enum v4l2_field
* - ``V4L2_FIELD_ANY``
- 0
- - Applications request this field order when any one of the
- ``V4L2_FIELD_NONE``, ``V4L2_FIELD_TOP``, ``V4L2_FIELD_BOTTOM``, or
- ``V4L2_FIELD_INTERLACED`` formats is acceptable. Drivers choose
- depending on hardware capabilities or e. g. the requested image
- size, and return the actual field order. Drivers must never return
- ``V4L2_FIELD_ANY``. If multiple field orders are possible the
+ - Applications request this field order when any field format
+ is acceptable. Drivers choose depending on hardware capabilities or
+ e.g. the requested image size, and return the actual field order.
+ Drivers must never return ``V4L2_FIELD_ANY``.
+ If multiple field orders are possible the
driver must choose one of the possible field orders during
:ref:`VIDIOC_S_FMT <VIDIOC_G_FMT>` or
:ref:`VIDIOC_TRY_FMT <VIDIOC_G_FMT>`. struct
@@ -86,9 +87,8 @@ enum v4l2_field
``V4L2_FIELD_ANY``.
* - ``V4L2_FIELD_NONE``
- 1
- - Images are in progressive format, not interlaced. The driver may
- also indicate this order when it cannot distinguish between
- ``V4L2_FIELD_TOP`` and ``V4L2_FIELD_BOTTOM``.
+ - Images are in progressive (frame-based) format, not interlaced
+ (field-based).
* - ``V4L2_FIELD_TOP``
- 2
- Images consist of the top (aka odd) field only.
diff --git a/Documentation/media/uapi/v4l/pixfmt-compressed.rst b/Documentation/media/uapi/v4l/pixfmt-compressed.rst
index 2675bef3eefe..6c961cfb74da 100644
--- a/Documentation/media/uapi/v4l/pixfmt-compressed.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-compressed.rst
@@ -125,3 +125,9 @@ Compressed Formats
- Video elementary stream using a codec based on the Fast Walsh Hadamard
Transform. This codec is implemented by the vicodec ('Virtual Codec')
driver. See the codec-fwht.h header for more details.
+ * .. _V4L2-PIX-FMT-FWHT-STATELESS:
+
+ - ``V4L2_PIX_FMT_FWHT_STATELESS``
+ - 'SFWH'
+ - Same format as V4L2_PIX_FMT_FWHT but requires stateless codec implementation.
+ See the :ref:`associated Codec Control IDs <v4l2-mpeg-fwht>`.
diff --git a/Documentation/media/uapi/v4l/pixfmt-meta-d4xx.rst b/Documentation/media/uapi/v4l/pixfmt-meta-d4xx.rst
index 862e1f327150..87e8fd7d5d02 100644
--- a/Documentation/media/uapi/v4l/pixfmt-meta-d4xx.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-meta-d4xx.rst
@@ -36,13 +36,16 @@ per frame, therefore their headers cannot be larger than 255 bytes.
Below are proprietary Microsoft style metadata types, used by D4xx cameras,
where all fields are in little endian order:
+.. tabularcolumns:: |p{5.0cm}|p{12.5cm}|
+
+
.. flat-table:: D4xx metadata
- :widths: 1 4
+ :widths: 1 2
:header-rows: 1
:stub-columns: 0
- * - Field
- - Description
+ * - **Field**
+ - **Description**
* - :cspan:`1` *Depth Control*
* - __u32 ID
- 0x80000000
diff --git a/Documentation/media/uapi/v4l/pixfmt-meta-vsp1-hgt.rst b/Documentation/media/uapi/v4l/pixfmt-meta-vsp1-hgt.rst
index 2ebccdcca95d..d1a341af9c48 100644
--- a/Documentation/media/uapi/v4l/pixfmt-meta-vsp1-hgt.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-meta-vsp1-hgt.rst
@@ -41,6 +41,10 @@ The Hue position **m** (0 - 5) of the bucket in the matrix depends on
how the HGT Hue areas are configured. There are 6 user configurable Hue
Areas which can be configured to cover overlapping Hue values:
+.. raw:: latex
+
+ \small
+
::
Area 0 Area 1 Area 2 Area 3 Area 4 Area 5
@@ -53,6 +57,11 @@ Areas which can be configured to cover overlapping Hue values:
5U 0L 0U 1L 1U 2L 2U 3L 3U 4L 4U 5L 5U 0L
<0..............................Hue Value............................255>
+
+.. raw:: latex
+
+ \normalsize
+
When two consecutive areas don't overlap (n+1L is equal to nU) the boundary
value is considered as part of the lower area.
diff --git a/Documentation/media/uapi/v4l/pixfmt-packed-hsv.rst b/Documentation/media/uapi/v4l/pixfmt-packed-hsv.rst
index 38b1895a509f..dfc4a8367b3d 100644
--- a/Documentation/media/uapi/v4l/pixfmt-packed-hsv.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-packed-hsv.rst
@@ -31,7 +31,7 @@ The values are packed in 24 or 32 bit formats.
\tiny
\setlength{\tabcolsep}{2pt}
-.. tabularcolumns:: |p{2.0cm}|p{0.54cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|
+.. tabularcolumns:: |p{2.6cm}|p{0.8cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|
.. _packed-hsv-formats:
diff --git a/Documentation/media/uapi/v4l/pixfmt-packed-rgb.rst b/Documentation/media/uapi/v4l/pixfmt-packed-rgb.rst
index 6b3781c04dd5..738bb14c0ee2 100644
--- a/Documentation/media/uapi/v4l/pixfmt-packed-rgb.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-packed-rgb.rst
@@ -27,7 +27,7 @@ next to each other in memory.
\tiny
\setlength{\tabcolsep}{2pt}
-.. tabularcolumns:: |p{2.3cm}|p{1.6cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|
+.. tabularcolumns:: |p{2.8cm}|p{2.0cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|
.. _rgb-formats:
@@ -139,6 +139,144 @@ next to each other in memory.
- r\ :sub:`1`
- r\ :sub:`0`
-
+ * .. _V4L2-PIX-FMT-RGBA444:
+
+ - ``V4L2_PIX_FMT_RGBA444``
+ - 'RA12'
+
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ - a\ :sub:`3`
+ - a\ :sub:`2`
+ - a\ :sub:`1`
+ - a\ :sub:`0`
+
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
+ -
+ * .. _V4L2-PIX-FMT-RGBX444:
+
+ - ``V4L2_PIX_FMT_RGBX444``
+ - 'RX12'
+
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ -
+ -
+ -
+ -
+
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
+ -
+ * .. _V4L2-PIX-FMT-ABGR444:
+
+ - ``V4L2_PIX_FMT_ABGR444``
+ - 'AB12'
+
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+
+ - a\ :sub:`3`
+ - a\ :sub:`2`
+ - a\ :sub:`1`
+ - a\ :sub:`0`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ -
+ * .. _V4L2-PIX-FMT-XBGR444:
+
+ - ``V4L2_PIX_FMT_XBGR444``
+ - 'XB12'
+
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+
+ -
+ -
+ -
+ -
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ -
+ * .. _V4L2-PIX-FMT-BGRA444:
+
+ - ``V4L2_PIX_FMT_BGRA444``
+ - 'BA12'
+
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ - a\ :sub:`3`
+ - a\ :sub:`2`
+ - a\ :sub:`1`
+ - a\ :sub:`0`
+
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
+ -
+ * .. _V4L2-PIX-FMT-BGRX444:
+
+ - ``V4L2_PIX_FMT_BGRX444``
+ - 'BX12'
+
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ -
+ -
+ -
+ -
+
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
+ -
* .. _V4L2-PIX-FMT-ARGB555:
- ``V4L2_PIX_FMT_ARGB555``
@@ -185,6 +323,144 @@ next to each other in memory.
- g\ :sub:`4`
- g\ :sub:`3`
-
+ * .. _V4L2-PIX-FMT-RGBA555:
+
+ - ``V4L2_PIX_FMT_RGBA555``
+ - 'RA15'
+
+ - g\ :sub:`1`
+ - g\ :sub:`0`
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ - a
+
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ -
+ * .. _V4L2-PIX-FMT-RGBX555:
+
+ - ``V4L2_PIX_FMT_RGBX555``
+ - 'RX15'
+
+ - g\ :sub:`1`
+ - g\ :sub:`0`
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ -
+
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ -
+ * .. _V4L2-PIX-FMT-ABGR555:
+
+ - ``V4L2_PIX_FMT_ABGR555``
+ - 'AB15'
+
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+
+ - a
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ -
+ * .. _V4L2-PIX-FMT-XBGR555:
+
+ - ``V4L2_PIX_FMT_XBGR555``
+ - 'XB15'
+
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+
+ -
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ -
+ * .. _V4L2-PIX-FMT-BGRA555:
+
+ - ``V4L2_PIX_FMT_BGRA555``
+ - 'BA15'
+
+ - g\ :sub:`1`
+ - g\ :sub:`0`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ - a
+
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ -
+ * .. _V4L2-PIX-FMT-BGRX555:
+
+ - ``V4L2_PIX_FMT_BGRX555``
+ - 'BX15'
+
+ - g\ :sub:`1`
+ - g\ :sub:`0`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ -
+
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ -
* .. _V4L2-PIX-FMT-RGB565:
- ``V4L2_PIX_FMT_RGB565``
@@ -461,6 +737,166 @@ next to each other in memory.
-
-
-
+ * .. _V4L2-PIX-FMT-BGRA32:
+
+ - ``V4L2_PIX_FMT_BGRA32``
+ - 'RA24'
+
+ - a\ :sub:`7`
+ - a\ :sub:`6`
+ - a\ :sub:`5`
+ - a\ :sub:`4`
+ - a\ :sub:`3`
+ - a\ :sub:`2`
+ - a\ :sub:`1`
+ - a\ :sub:`0`
+
+ - b\ :sub:`7`
+ - b\ :sub:`6`
+ - b\ :sub:`5`
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+
+ - g\ :sub:`7`
+ - g\ :sub:`6`
+ - g\ :sub:`5`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
+
+ - r\ :sub:`7`
+ - r\ :sub:`6`
+ - r\ :sub:`5`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ * .. _V4L2-PIX-FMT-BGRX32:
+
+ - ``V4L2_PIX_FMT_BGRX32``
+ - 'RX24'
+
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+
+ - b\ :sub:`7`
+ - b\ :sub:`6`
+ - b\ :sub:`5`
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+
+ - g\ :sub:`7`
+ - g\ :sub:`6`
+ - g\ :sub:`5`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
+
+ - r\ :sub:`7`
+ - r\ :sub:`6`
+ - r\ :sub:`5`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+ * .. _V4L2-PIX-FMT-RGBA32:
+
+ - ``V4L2_PIX_FMT_RGBA32``
+ - 'AB24'
+
+ - r\ :sub:`7`
+ - r\ :sub:`6`
+ - r\ :sub:`5`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+
+ - g\ :sub:`7`
+ - g\ :sub:`6`
+ - g\ :sub:`5`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
+
+ - b\ :sub:`7`
+ - b\ :sub:`6`
+ - b\ :sub:`5`
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+
+ - a\ :sub:`7`
+ - a\ :sub:`6`
+ - a\ :sub:`5`
+ - a\ :sub:`4`
+ - a\ :sub:`3`
+ - a\ :sub:`2`
+ - a\ :sub:`1`
+ - a\ :sub:`0`
+ * .. _V4L2-PIX-FMT-RGBX32:
+
+ - ``V4L2_PIX_FMT_RGBX32``
+ - 'XB24'
+
+ - r\ :sub:`7`
+ - r\ :sub:`6`
+ - r\ :sub:`5`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
+
+ - g\ :sub:`7`
+ - g\ :sub:`6`
+ - g\ :sub:`5`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
+
+ - b\ :sub:`7`
+ - b\ :sub:`6`
+ - b\ :sub:`5`
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
* .. _V4L2-PIX-FMT-ARGB32:
- ``V4L2_PIX_FMT_ARGB32``
@@ -656,7 +1092,7 @@ either the corresponding ARGB or XRGB format, depending on the driver.
\tiny
\setlength{\tabcolsep}{2pt}
-.. tabularcolumns:: |p{2.2cm}|p{0.60cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|
+.. tabularcolumns:: |p{2.6cm}|p{0.70cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|
.. _rgb-formats-deprecated:
diff --git a/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst b/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst
index 7fcee1c11ac4..41b60fae703a 100644
--- a/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst
@@ -28,7 +28,7 @@ component of each pixel in one 16 or 32 bit word.
.. _packed-yuv-formats:
-.. tabularcolumns:: |p{2.0cm}|p{0.67cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|p{0.29cm}|
+.. tabularcolumns:: |p{2.5cm}|p{0.69cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|p{0.31cm}|
.. flat-table:: Packed YUV Image Formats
:header-rows: 2
@@ -44,6 +44,7 @@ component of each pixel in one 16 or 32 bit word.
- :cspan:`7` Byte 2
- :cspan:`7` Byte 3
+
* -
-
- 7
@@ -81,6 +82,7 @@ component of each pixel in one 16 or 32 bit word.
- 2
- 1
- 0
+
* .. _V4L2-PIX-FMT-YUV444:
- ``V4L2_PIX_FMT_YUV444``
@@ -103,7 +105,9 @@ component of each pixel in one 16 or 32 bit word.
- Y'\ :sub:`2`
- Y'\ :sub:`1`
- Y'\ :sub:`0`
- -
+
+ - :cspan:`15`
+
* .. _V4L2-PIX-FMT-YUV555:
- ``V4L2_PIX_FMT_YUV555``
@@ -126,7 +130,8 @@ component of each pixel in one 16 or 32 bit word.
- Y'\ :sub:`0`
- Cb\ :sub:`4`
- Cb\ :sub:`3`
- -
+
+ - :cspan:`15`
* .. _V4L2-PIX-FMT-YUV565:
- ``V4L2_PIX_FMT_YUV565``
@@ -149,7 +154,9 @@ component of each pixel in one 16 or 32 bit word.
- Cb\ :sub:`5`
- Cb\ :sub:`4`
- Cb\ :sub:`3`
- -
+
+ - :cspan:`15`
+
* .. _V4L2-PIX-FMT-YUV32:
- ``V4L2_PIX_FMT_YUV32``
@@ -190,7 +197,7 @@ component of each pixel in one 16 or 32 bit word.
- Cr\ :sub:`2`
- Cr\ :sub:`1`
- Cr\ :sub:`0`
- -
+
* .. _V4L2-PIX-FMT-AYUV32:
- ``V4L2_PIX_FMT_AYUV32``
@@ -231,7 +238,7 @@ component of each pixel in one 16 or 32 bit word.
- Cr\ :sub:`2`
- Cr\ :sub:`1`
- Cr\ :sub:`0`
- -
+
* .. _V4L2-PIX-FMT-XYUV32:
- ``V4L2_PIX_FMT_XYUV32``
@@ -272,7 +279,7 @@ component of each pixel in one 16 or 32 bit word.
- Cr\ :sub:`2`
- Cr\ :sub:`1`
- Cr\ :sub:`0`
- -
+
* .. _V4L2-PIX-FMT-VUYA32:
- ``V4L2_PIX_FMT_VUYA32``
@@ -313,7 +320,7 @@ component of each pixel in one 16 or 32 bit word.
- a\ :sub:`2`
- a\ :sub:`1`
- a\ :sub:`0`
- -
+
* .. _V4L2-PIX-FMT-VUYX32:
- ``V4L2_PIX_FMT_VUYX32``
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst b/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst
index cdb70ac26126..fd32660a3766 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst
@@ -40,7 +40,7 @@ of a small V4L2_PIX_FMT_SBGGR10P image:
**Byte Order.**
Each cell is one byte.
-.. tabularcolumns:: |p{2.0cm}|p{1.0cm}|p{1.0cm}|p{1.0cm}|p{1.0cm}|p{5.4cm}|
+.. tabularcolumns:: |p{2.4cm}|p{1.4cm}|p{1.2cm}|p{1.2cm}|p{1.2cm}|p{6.4cm}|
.. flat-table::
:header-rows: 0
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb12p.rst b/Documentation/media/uapi/v4l/pixfmt-srggb12p.rst
index 01413be12916..960851275f23 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb12p.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb12p.rst
@@ -18,6 +18,7 @@ V4L2_PIX_FMT_SRGGB12P ('pRAA'), V4L2_PIX_FMT_SGRBG12P ('pgAA'), V4L2_PIX_FMT_SGB
12-bit packed Bayer formats
+---------------------------
Description
@@ -37,7 +38,7 @@ Below is an example of a small V4L2_PIX_FMT_SBGGR12P image:
**Byte Order.**
Each cell is one byte.
-.. tabularcolumns:: |p{2.0cm}|p{1.0cm}|p{1.0cm}|p{2.7cm}|p{1.0cm}|p{1.0cm}|p{2.7cm}|
+.. tabularcolumns:: |p{2.2cm}|p{1.2cm}|p{1.2cm}|p{3.1cm}|p{1.2cm}|p{1.2cm}|p{3.1cm}|
.. flat-table::
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb14p.rst b/Documentation/media/uapi/v4l/pixfmt-srggb14p.rst
index b583531c2853..1a988d7e7ff8 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb14p.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb14p.rst
@@ -41,17 +41,21 @@ of one of these formats:
**Byte Order.**
Each cell is one byte.
+.. raw:: latex
+ \footnotesize
+
+.. tabularcolumns:: |p{1.8cm}|p{1.0cm}|p{1.0cm}|p{1.0cm}|p{1.1cm}|p{3.3cm}|p{3.3cm}|p{3.3cm}|
.. flat-table::
:header-rows: 0
:stub-columns: 0
- :widths: 2 1 1 1 1 1 1 1
+ :widths: 2 1 1 1 1 3 3 3
- .. row 1
- - start + 0:
+ - start + 0
- B\ :sub:`00high`
@@ -62,17 +66,20 @@ Each cell is one byte.
- G\ :sub:`03high`
- G\ :sub:`01low bits 1--0`\ (bits 7--6)
+
B\ :sub:`00low bits 5--0`\ (bits 5--0)
- R\ :sub:`02low bits 3--0`\ (bits 7--4)
+
G\ :sub:`01low bits 5--2`\ (bits 3--0)
- G\ :sub:`03low bits 5--0`\ (bits 7--2)
+
R\ :sub:`02low bits 5--4`\ (bits 1--0)
- .. row 2
- - start + 7:
+ - start + 7
- G\ :sub:`00high`
@@ -83,12 +90,15 @@ Each cell is one byte.
- R\ :sub:`03high`
- R\ :sub:`01low bits 1--0`\ (bits 7--6)
+
G\ :sub:`00low bits 5--0`\ (bits 5--0)
- G\ :sub:`02low bits 3--0`\ (bits 7--4)
+
R\ :sub:`01low bits 5--2`\ (bits 3--0)
- R\ :sub:`03low bits 5--0`\ (bits 7--2)
+
G\ :sub:`02low bits 5--4`\ (bits 1--0)
- .. row 3
@@ -104,12 +114,15 @@ Each cell is one byte.
- G\ :sub:`23high`
- G\ :sub:`21low bits 1--0`\ (bits 7--6)
+
B\ :sub:`20low bits 5--0`\ (bits 5--0)
- R\ :sub:`22low bits 3--0`\ (bits 7--4)
+
G\ :sub:`21low bits 5--2`\ (bits 3--0)
- G\ :sub:`23low bits 5--0`\ (bits 7--2)
+
R\ :sub:`22low bits 5--4`\ (bits 1--0)
- .. row 4
@@ -132,3 +145,8 @@ Each cell is one byte.
- R\ :sub:`33low bits 5--0`\ (bits 7--2)
G\ :sub:`32low bits 5--4`\ (bits 1--0)
+
+.. raw:: latex
+
+ \normalsize
+
diff --git a/Documentation/media/uapi/v4l/pixfmt-v4l2-mplane.rst b/Documentation/media/uapi/v4l/pixfmt-v4l2-mplane.rst
index 7f82dad9013a..5688c816e334 100644
--- a/Documentation/media/uapi/v4l/pixfmt-v4l2-mplane.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-v4l2-mplane.rst
@@ -19,6 +19,7 @@ array of struct :c:type:`v4l2_plane_pix_format` structures,
describing all planes of that format.
+
.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
.. c:type:: v4l2_plane_pix_format
@@ -41,6 +42,10 @@ describing all planes of that format.
applications.
+.. raw:: latex
+
+ \small
+
.. tabularcolumns:: |p{4.4cm}|p{5.6cm}|p{7.5cm}|
.. c:type:: v4l2_pix_format_mplane
@@ -82,9 +87,7 @@ describing all planes of that format.
* - __u8
- ``flags``
- Flags set by the application or driver, see :ref:`format-flags`.
- * - union {
- - (anonymous)
- -
+ * - :cspan:`2` union { (anonymous)
* - __u8
- ``ycbcr_enc``
- Y'CbCr encoding, from enum :c:type:`v4l2_ycbcr_encoding`.
@@ -97,9 +100,7 @@ describing all planes of that format.
This information supplements the ``colorspace`` and must be set by
the driver for capture streams and by the application for output
streams, see :ref:`colorspaces`.
- * - }
- -
- -
+ * - :cspan:`2` }
* - __u8
- ``quantization``
- Quantization range, from enum :c:type:`v4l2_quantization`.
@@ -116,3 +117,7 @@ describing all planes of that format.
- ``reserved[7]``
- Reserved for future extensions. Should be zeroed by drivers and
applications.
+
+.. raw:: latex
+
+ \normalsize
diff --git a/Documentation/media/uapi/v4l/pixfmt-y10p.rst b/Documentation/media/uapi/v4l/pixfmt-y10p.rst
index 7893642faee3..39cd789dcb59 100644
--- a/Documentation/media/uapi/v4l/pixfmt-y10p.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-y10p.rst
@@ -27,6 +27,12 @@ in the same order.
**Bit-packed representation.**
+.. raw:: latex
+
+ \small
+
+.. tabularcolumns:: |p{1.2cm}||p{1.2cm}||p{1.2cm}||p{1.2cm}|p{3.2cm}|p{3.2cm}|
+
.. flat-table::
:header-rows: 0
:stub-columns: 0
@@ -38,3 +44,7 @@ in the same order.
- Y'\ :sub:`03[9:2]`
- Y'\ :sub:`03[1:0]`\ (bits 7--6) Y'\ :sub:`02[1:0]`\ (bits 5--4)
Y'\ :sub:`01[1:0]`\ (bits 3--2) Y'\ :sub:`00[1:0]`\ (bits 1--0)
+
+.. raw:: latex
+
+ \normalsize
diff --git a/Documentation/media/uapi/v4l/subdev-formats.rst b/Documentation/media/uapi/v4l/subdev-formats.rst
index f5440d55d510..ab1a48a5ae80 100644
--- a/Documentation/media/uapi/v4l/subdev-formats.rst
+++ b/Documentation/media/uapi/v4l/subdev-formats.rst
@@ -980,6 +980,113 @@ The following tables list existing packed RGB formats.
- r\ :sub:`2`
- r\ :sub:`1`
- r\ :sub:`0`
+ * .. _MEDIA-BUS-FMT-BGR888-3X8:
+
+ - MEDIA_BUS_FMT_BGR888_3X8
+ - 0x101b
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - b\ :sub:`7`
+ - b\ :sub:`6`
+ - b\ :sub:`5`
+ - b\ :sub:`4`
+ - b\ :sub:`3`
+ - b\ :sub:`2`
+ - b\ :sub:`1`
+ - b\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - g\ :sub:`7`
+ - g\ :sub:`6`
+ - g\ :sub:`5`
+ - g\ :sub:`4`
+ - g\ :sub:`3`
+ - g\ :sub:`2`
+ - g\ :sub:`1`
+ - g\ :sub:`0`
+ * -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ -
+ - r\ :sub:`7`
+ - r\ :sub:`6`
+ - r\ :sub:`5`
+ - r\ :sub:`4`
+ - r\ :sub:`3`
+ - r\ :sub:`2`
+ - r\ :sub:`1`
+ - r\ :sub:`0`
* .. _MEDIA-BUS-FMT-GBR888-1X24:
- MEDIA_BUS_FMT_GBR888_1X24
@@ -7414,7 +7521,7 @@ The following table lists existing HSV/HSL formats.
\tiny
\setlength{\tabcolsep}{2pt}
-.. tabularcolumns:: |p{3.0cm}|p{0.60cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|
+.. tabularcolumns:: |p{3.9cm}|p{0.73cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|p{0.22cm}|
.. _v4l2-mbus-pixelcode-hsv:
@@ -7524,7 +7631,7 @@ The following table lists existing JPEG compressed formats.
.. _v4l2-mbus-pixelcode-jpeg:
-.. tabularcolumns:: |p{5.4cm}|p{1.4cm}|p{10.7cm}|
+.. tabularcolumns:: |p{6.0cm}|p{1.4cm}|p{10.1cm}|
.. flat-table:: JPEG Formats
:header-rows: 1
@@ -7557,7 +7664,7 @@ formats.
.. _v4l2-mbus-pixelcode-vendor-specific:
-.. tabularcolumns:: |p{6.8cm}|p{1.4cm}|p{9.3cm}|
+.. tabularcolumns:: |p{8.0cm}|p{1.4cm}|p{7.7cm}|
.. flat-table:: Vendor and device specific formats
:header-rows: 1
diff --git a/Documentation/media/uapi/v4l/vidioc-qbuf.rst b/Documentation/media/uapi/v4l/vidioc-qbuf.rst
index c138d149faea..dbf7b445a27b 100644
--- a/Documentation/media/uapi/v4l/vidioc-qbuf.rst
+++ b/Documentation/media/uapi/v4l/vidioc-qbuf.rst
@@ -111,7 +111,7 @@ in use. Setting it means that the buffer will not be passed to the driver
until the request itself is queued. Also, the driver will apply any
settings associated with the request for this buffer. This field will
be ignored unless the ``V4L2_BUF_FLAG_REQUEST_FD`` flag is set.
-If the device does not support requests, then ``EACCES`` will be returned.
+If the device does not support requests, then ``EBADR`` will be returned.
If requests are supported but an invalid request file descriptor is given,
then ``EINVAL`` will be returned.
@@ -125,7 +125,7 @@ then ``EINVAL`` will be returned.
For :ref:`memory-to-memory devices <mem2mem>` you can specify the
``request_fd`` only for output buffers, not for capture buffers. Attempting
- to specify this for a capture buffer will result in an ``EACCES`` error.
+ to specify this for a capture buffer will result in an ``EBADR`` error.
Applications call the ``VIDIOC_DQBUF`` ioctl to dequeue a filled
(capturing) or displayed (output) buffer from the driver's outgoing
@@ -185,9 +185,11 @@ EPIPE
codecs if a buffer with the ``V4L2_BUF_FLAG_LAST`` was already
dequeued and no new buffers are expected to become available.
-EACCES
+EBADR
The ``V4L2_BUF_FLAG_REQUEST_FD`` flag was set but the device does not
- support requests for the given buffer type.
+ support requests for the given buffer type, or
+ the ``V4L2_BUF_FLAG_REQUEST_FD`` flag was not set but the device requires
+ that the buffer is part of a request.
EBUSY
The first buffer was queued via a request, but the application now tries
diff --git a/Documentation/media/v4l-drivers/index.rst b/Documentation/media/v4l-drivers/index.rst
index dfd4b205937c..33a055907258 100644
--- a/Documentation/media/v4l-drivers/index.rst
+++ b/Documentation/media/v4l-drivers/index.rst
@@ -65,5 +65,4 @@ For more details see the file COPYING in the source distribution of Linux.
soc-camera
uvcvideo
vivid
- zoran
zr364xx
diff --git a/Documentation/media/v4l-drivers/zoran.rst b/Documentation/media/v4l-drivers/zoran.rst
deleted file mode 100644
index d2724a863d1d..000000000000
--- a/Documentation/media/v4l-drivers/zoran.rst
+++ /dev/null
@@ -1,583 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-The Zoran driver
-================
-
-unified zoran driver (zr360x7, zoran, buz, dc10(+), dc30(+), lml33)
-
-website: http://mjpeg.sourceforge.net/driver-zoran/
-
-
-Frequently Asked Questions
---------------------------
-
-What cards are supported
-------------------------
-
-Iomega Buz, Linux Media Labs LML33/LML33R10, Pinnacle/Miro
-DC10/DC10+/DC30/DC30+ and related boards (available under various names).
-
-Iomega Buz
-~~~~~~~~~~
-
-* Zoran zr36067 PCI controller
-* Zoran zr36060 MJPEG codec
-* Philips saa7111 TV decoder
-* Philips saa7185 TV encoder
-
-Drivers to use: videodev, i2c-core, i2c-algo-bit,
-videocodec, saa7111, saa7185, zr36060, zr36067
-
-Inputs/outputs: Composite and S-video
-
-Norms: PAL, SECAM (720x576 @ 25 fps), NTSC (720x480 @ 29.97 fps)
-
-Card number: 7
-
-AverMedia 6 Eyes AVS6EYES
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-* Zoran zr36067 PCI controller
-* Zoran zr36060 MJPEG codec
-* Samsung ks0127 TV decoder
-* Conexant bt866 TV encoder
-
-Drivers to use: videodev, i2c-core, i2c-algo-bit,
-videocodec, ks0127, bt866, zr36060, zr36067
-
-Inputs/outputs:
- Six physical inputs. 1-6 are composite,
- 1-2, 3-4, 5-6 doubles as S-video,
- 1-3 triples as component.
- One composite output.
-
-Norms: PAL, SECAM (720x576 @ 25 fps), NTSC (720x480 @ 29.97 fps)
-
-Card number: 8
-
-.. note::
-
- Not autodetected, card=8 is necessary.
-
-Linux Media Labs LML33
-~~~~~~~~~~~~~~~~~~~~~~
-
-* Zoran zr36067 PCI controller
-* Zoran zr36060 MJPEG codec
-* Brooktree bt819 TV decoder
-* Brooktree bt856 TV encoder
-
-Drivers to use: videodev, i2c-core, i2c-algo-bit,
-videocodec, bt819, bt856, zr36060, zr36067
-
-Inputs/outputs: Composite and S-video
-
-Norms: PAL (720x576 @ 25 fps), NTSC (720x480 @ 29.97 fps)
-
-Card number: 5
-
-Linux Media Labs LML33R10
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-* Zoran zr36067 PCI controller
-* Zoran zr36060 MJPEG codec
-* Philips saa7114 TV decoder
-* Analog Devices adv7170 TV encoder
-
-Drivers to use: videodev, i2c-core, i2c-algo-bit,
-videocodec, saa7114, adv7170, zr36060, zr36067
-
-Inputs/outputs: Composite and S-video
-
-Norms: PAL (720x576 @ 25 fps), NTSC (720x480 @ 29.97 fps)
-
-Card number: 6
-
-Pinnacle/Miro DC10(new)
-~~~~~~~~~~~~~~~~~~~~~~~
-
-* Zoran zr36057 PCI controller
-* Zoran zr36060 MJPEG codec
-* Philips saa7110a TV decoder
-* Analog Devices adv7176 TV encoder
-
-Drivers to use: videodev, i2c-core, i2c-algo-bit,
-videocodec, saa7110, adv7175, zr36060, zr36067
-
-Inputs/outputs: Composite, S-video and Internal
-
-Norms: PAL, SECAM (768x576 @ 25 fps), NTSC (640x480 @ 29.97 fps)
-
-Card number: 1
-
-Pinnacle/Miro DC10+
-~~~~~~~~~~~~~~~~~~~
-
-* Zoran zr36067 PCI controller
-* Zoran zr36060 MJPEG codec
-* Philips saa7110a TV decoder
-* Analog Devices adv7176 TV encoder
-
-Drivers to use: videodev, i2c-core, i2c-algo-bit,
-videocodec, sa7110, adv7175, zr36060, zr36067
-
-Inputs/outputs: Composite, S-video and Internal
-
-Norms: PAL, SECAM (768x576 @ 25 fps), NTSC (640x480 @ 29.97 fps)
-
-Card number: 2
-
-Pinnacle/Miro DC10(old)
-~~~~~~~~~~~~~~~~~~~~~~~
-
-* Zoran zr36057 PCI controller
-* Zoran zr36050 MJPEG codec
-* Zoran zr36016 Video Front End or Fuji md0211 Video Front End (clone?)
-* Micronas vpx3220a TV decoder
-* mse3000 TV encoder or Analog Devices adv7176 TV encoder
-
-Drivers to use: videodev, i2c-core, i2c-algo-bit,
-videocodec, vpx3220, mse3000/adv7175, zr36050, zr36016, zr36067
-
-Inputs/outputs: Composite, S-video and Internal
-
-Norms: PAL, SECAM (768x576 @ 25 fps), NTSC (640x480 @ 29.97 fps)
-
-Card number: 0
-
-Pinnacle/Miro DC30
-~~~~~~~~~~~~~~~~~~
-
-* Zoran zr36057 PCI controller
-* Zoran zr36050 MJPEG codec
-* Zoran zr36016 Video Front End
-* Micronas vpx3225d/vpx3220a/vpx3216b TV decoder
-* Analog Devices adv7176 TV encoder
-
-Drivers to use: videodev, i2c-core, i2c-algo-bit,
-videocodec, vpx3220/vpx3224, adv7175, zr36050, zr36016, zr36067
-
-Inputs/outputs: Composite, S-video and Internal
-
-Norms: PAL, SECAM (768x576 @ 25 fps), NTSC (640x480 @ 29.97 fps)
-
-Card number: 3
-
-Pinnacle/Miro DC30+
-~~~~~~~~~~~~~~~~~~~
-
-* Zoran zr36067 PCI controller
-* Zoran zr36050 MJPEG codec
-* Zoran zr36016 Video Front End
-* Micronas vpx3225d/vpx3220a/vpx3216b TV decoder
-* Analog Devices adv7176 TV encoder
-
-Drivers to use: videodev, i2c-core, i2c-algo-bit,
-videocodec, vpx3220/vpx3224, adv7175, zr36050, zr36015, zr36067
-
-Inputs/outputs: Composite, S-video and Internal
-
-Norms: PAL, SECAM (768x576 @ 25 fps), NTSC (640x480 @ 29.97 fps)
-
-Card number: 4
-
-.. note::
-
- #) No module for the mse3000 is available yet
- #) No module for the vpx3224 is available yet
-
-1.1 What the TV decoder can do an what not
-------------------------------------------
-
-The best know TV standards are NTSC/PAL/SECAM. but for decoding a frame that
-information is not enough. There are several formats of the TV standards.
-And not every TV decoder is able to handle every format. Also the every
-combination is supported by the driver. There are currently 11 different
-tv broadcast formats all aver the world.
-
-The CCIR defines parameters needed for broadcasting the signal.
-The CCIR has defined different standards: A,B,D,E,F,G,D,H,I,K,K1,L,M,N,...
-The CCIR says not much about the colorsystem used !!!
-And talking about a colorsystem says not to much about how it is broadcast.
-
-The CCIR standards A,E,F are not used any more.
-
-When you speak about NTSC, you usually mean the standard: CCIR - M using
-the NTSC colorsystem which is used in the USA, Japan, Mexico, Canada
-and a few others.
-
-When you talk about PAL, you usually mean: CCIR - B/G using the PAL
-colorsystem which is used in many Countries.
-
-When you talk about SECAM, you mean: CCIR - L using the SECAM Colorsystem
-which is used in France, and a few others.
-
-There the other version of SECAM, CCIR - D/K is used in Bulgaria, China,
-Slovakai, Hungary, Korea (Rep.), Poland, Rumania and a others.
-
-The CCIR - H uses the PAL colorsystem (sometimes SECAM) and is used in
-Egypt, Libya, Sri Lanka, Syrain Arab. Rep.
-
-The CCIR - I uses the PAL colorsystem, and is used in Great Britain, Hong Kong,
-Ireland, Nigeria, South Africa.
-
-The CCIR - N uses the PAL colorsystem and PAL frame size but the NTSC framerate,
-and is used in Argentinia, Uruguay, an a few others
-
-We do not talk about how the audio is broadcast !
-
-A rather good sites about the TV standards are:
-http://www.sony.jp/support/
-http://info.electronicwerkstatt.de/bereiche/fernsehtechnik/frequenzen_und_normen/Fernsehnormen/
-and http://www.cabl.com/restaurant/channel.html
-
-Other weird things around: NTSC 4.43 is a modificated NTSC, which is mainly
-used in PAL VCR's that are able to play back NTSC. PAL 60 seems to be the same
-as NTSC 4.43 . The Datasheets also talk about NTSC 44, It seems as if it would
-be the same as NTSC 4.43.
-NTSC Combs seems to be a decoder mode where the decoder uses a comb filter
-to split coma and luma instead of a Delay line.
-
-But I did not defiantly find out what NTSC Comb is.
-
-Philips saa7111 TV decoder
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-- was introduced in 1997, is used in the BUZ and
-- can handle: PAL B/G/H/I, PAL N, PAL M, NTSC M, NTSC N, NTSC 4.43 and SECAM
-
-Philips saa7110a TV decoder
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-- was introduced in 1995, is used in the Pinnacle/Miro DC10(new), DC10+ and
-- can handle: PAL B/G, NTSC M and SECAM
-
-Philips saa7114 TV decoder
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-- was introduced in 2000, is used in the LML33R10 and
-- can handle: PAL B/G/D/H/I/N, PAL N, PAL M, NTSC M, NTSC 4.43 and SECAM
-
-Brooktree bt819 TV decoder
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-- was introduced in 1996, and is used in the LML33 and
-- can handle: PAL B/D/G/H/I, NTSC M
-
-Micronas vpx3220a TV decoder
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-- was introduced in 1996, is used in the DC30 and DC30+ and
-- can handle: PAL B/G/H/I, PAL N, PAL M, NTSC M, NTSC 44, PAL 60, SECAM,NTSC Comb
-
-Samsung ks0127 TV decoder
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-- is used in the AVS6EYES card and
-- can handle: NTSC-M/N/44, PAL-M/N/B/G/H/I/D/K/L and SECAM
-
-
-What the TV encoder can do an what not
---------------------------------------
-
-The TV encoder are doing the "same" as the decoder, but in the oder direction.
-You feed them digital data and the generate a Composite or SVHS signal.
-For information about the colorsystems and TV norm take a look in the
-TV decoder section.
-
-Philips saa7185 TV Encoder
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-- was introduced in 1996, is used in the BUZ
-- can generate: PAL B/G, NTSC M
-
-Brooktree bt856 TV Encoder
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-- was introduced in 1994, is used in the LML33
-- can generate: PAL B/D/G/H/I/N, PAL M, NTSC M, PAL-N (Argentina)
-
-Analog Devices adv7170 TV Encoder
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-- was introduced in 2000, is used in the LML300R10
-- can generate: PAL B/D/G/H/I/N, PAL M, NTSC M, PAL 60
-
-Analog Devices adv7175 TV Encoder
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-- was introduced in 1996, is used in the DC10, DC10+, DC10 old, DC30, DC30+
-- can generate: PAL B/D/G/H/I/N, PAL M, NTSC M
-
-ITT mse3000 TV encoder
-~~~~~~~~~~~~~~~~~~~~~~
-
-- was introduced in 1991, is used in the DC10 old
-- can generate: PAL , NTSC , SECAM
-
-Conexant bt866 TV encoder
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-- is used in AVS6EYES, and
-- can generate: NTSC/PAL, PAL­M, PAL­N
-
-The adv717x, should be able to produce PAL N. But you find nothing PAL N
-specific in the registers. Seem that you have to reuse a other standard
-to generate PAL N, maybe it would work if you use the PAL M settings.
-
-How do I get this damn thing to work
-------------------------------------
-
-Load zr36067.o. If it can't autodetect your card, use the card=X insmod
-option with X being the card number as given in the previous section.
-To have more than one card, use card=X1[,X2[,X3,[X4[..]]]]
-
-To automate this, add the following to your /etc/modprobe.d/zoran.conf:
-
-options zr36067 card=X1[,X2[,X3[,X4[..]]]]
-alias char-major-81-0 zr36067
-
-One thing to keep in mind is that this doesn't load zr36067.o itself yet. It
-just automates loading. If you start using xawtv, the device won't load on
-some systems, since you're trying to load modules as a user, which is not
-allowed ("permission denied"). A quick workaround is to add 'Load "v4l"' to
-XF86Config-4 when you use X by default, or to run 'v4l-conf -c <device>' in
-one of your startup scripts (normally rc.local) if you don't use X. Both
-make sure that the modules are loaded on startup, under the root account.
-
-What mainboard should I use (or why doesn't my card work)
----------------------------------------------------------
-
-
-<insert lousy disclaimer here>. In short: good=SiS/Intel, bad=VIA.
-
-Experience tells us that people with a Buz, on average, have more problems
-than users with a DC10+/LML33. Also, it tells us that people owning a VIA-
-based mainboard (ktXXX, MVP3) have more problems than users with a mainboard
-based on a different chipset. Here's some notes from Andrew Stevens:
-
-Here's my experience of using LML33 and Buz on various motherboards:
-
-- VIA MVP3
- - Forget it. Pointless. Doesn't work.
-- Intel 430FX (Pentium 200)
- - LML33 perfect, Buz tolerable (3 or 4 frames dropped per movie)
-- Intel 440BX (early stepping)
- - LML33 tolerable. Buz starting to get annoying (6-10 frames/hour)
-- Intel 440BX (late stepping)
- - Buz tolerable, LML3 almost perfect (occasional single frame drops)
-- SiS735
- - LML33 perfect, Buz tolerable.
-- VIA KT133(*)
- - LML33 starting to get annoying, Buz poor enough that I have up.
-
-- Both 440BX boards were dual CPU versions.
-
-Bernhard Praschinger later added:
-
-- AMD 751
- - Buz perfect-tolerable
-- AMD 760
- - Buz perfect-tolerable
-
-In general, people on the user mailinglist won't give you much of a chance
-if you have a VIA-based motherboard. They may be cheap, but sometimes, you'd
-rather want to spend some more money on better boards. In general, VIA
-mainboard's IDE/PCI performance will also suck badly compared to others.
-You'll noticed the DC10+/DC30+ aren't mentioned anywhere in the overview.
-Basically, you can assume that if the Buz works, the LML33 will work too. If
-the LML33 works, the DC10+/DC30+ will work too. They're most tolerant to
-different mainboard chipsets from all of the supported cards.
-
-If you experience timeouts during capture, buy a better mainboard or lower
-the quality/buffersize during capture (see 'Concerning buffer sizes, quality,
-output size etc.'). If it hangs, there's little we can do as of now. Check
-your IRQs and make sure the card has its own interrupts.
-
-Programming interface
----------------------
-
-This driver conforms to video4linux2. Support for V4L1 and for the custom
-zoran ioctls has been removed in kernel 2.6.38.
-
-For programming example, please, look at lavrec.c and lavplay.c code in
-the MJPEG-tools (http://mjpeg.sf.net/).
-
-Additional notes for software developers:
-
- The driver returns maxwidth and maxheight parameters according to
- the current TV standard (norm). Therefore, the software which
- communicates with the driver and "asks" for these parameters should
- first set the correct norm. Well, it seems logically correct: TV
- standard is "more constant" for current country than geometry
- settings of a variety of TV capture cards which may work in ITU or
- square pixel format.
-
-Applications
-------------
-
-Applications known to work with this driver:
-
-TV viewing:
-
-* xawtv
-* kwintv
-* probably any TV application that supports video4linux or video4linux2.
-
-MJPEG capture/playback:
-
-* mjpegtools/lavtools (or Linux Video Studio)
-* gstreamer
-* mplayer
-
-General raw capture:
-
-* xawtv
-* gstreamer
-* probably any application that supports video4linux or video4linux2
-
-Video editing:
-
-* Cinelerra
-* MainActor
-* mjpegtools (or Linux Video Studio)
-
-
-Concerning buffer sizes, quality, output size etc.
---------------------------------------------------
-
-
-The zr36060 can do 1:2 JPEG compression. This is really the theoretical
-maximum that the chipset can reach. The driver can, however, limit compression
-to a maximum (size) of 1:4. The reason for this is that some cards (e.g. Buz)
-can't handle 1:2 compression without stopping capture after only a few minutes.
-With 1:4, it'll mostly work. If you have a Buz, use 'low_bitrate=1' to go into
-1:4 max. compression mode.
-
-100% JPEG quality is thus 1:2 compression in practice. So for a full PAL frame
-(size 720x576). The JPEG fields are stored in YUY2 format, so the size of the
-fields are 720x288x16/2 bits/field (2 fields/frame) = 207360 bytes/field x 2 =
-414720 bytes/frame (add some more bytes for headers and DHT (huffman)/DQT
-(quantization) tables, and you'll get to something like 512kB per frame for
-1:2 compression. For 1:4 compression, you'd have frames of half this size.
-
-Some additional explanation by Martin Samuelsson, which also explains the
-importance of buffer sizes:
---
-> Hmm, I do not think it is really that way. With the current (downloaded
-> at 18:00 Monday) driver I get that output sizes for 10 sec:
-> -q 50 -b 128 : 24.283.332 Bytes
-> -q 50 -b 256 : 48.442.368
-> -q 25 -b 128 : 24.655.992
-> -q 25 -b 256 : 25.859.820
-
-I woke up, and can't go to sleep again. I'll kill some time explaining why
-this doesn't look strange to me.
-
-Let's do some math using a width of 704 pixels. I'm not sure whether the Buz
-actually use that number or not, but that's not too important right now.
-
-704x288 pixels, one field, is 202752 pixels. Divided by 64 pixels per block;
-3168 blocks per field. Each pixel consist of two bytes; 128 bytes per block;
-1024 bits per block. 100% in the new driver mean 1:2 compression; the maximum
-output becomes 512 bits per block. Actually 510, but 512 is simpler to use
-for calculations.
-
-Let's say that we specify d1q50. We thus want 256 bits per block; times 3168
-becomes 811008 bits; 101376 bytes per field. We're talking raw bits and bytes
-here, so we don't need to do any fancy corrections for bits-per-pixel or such
-things. 101376 bytes per field.
-
-d1 video contains two fields per frame. Those sum up to 202752 bytes per
-frame, and one of those frames goes into each buffer.
-
-But wait a second! -b128 gives 128kB buffers! It's not possible to cram
-202752 bytes of JPEG data into 128kB!
-
-This is what the driver notice and automatically compensate for in your
-examples. Let's do some math using this information:
-
-128kB is 131072 bytes. In this buffer, we want to store two fields, which
-leaves 65536 bytes for each field. Using 3168 blocks per field, we get
-20.68686868... available bytes per block; 165 bits. We can't allow the
-request for 256 bits per block when there's only 165 bits available! The -q50
-option is silently overridden, and the -b128 option takes precedence, leaving
-us with the equivalence of -q32.
-
-This gives us a data rate of 165 bits per block, which, times 3168, sums up
-to 65340 bytes per field, out of the allowed 65536. The current driver has
-another level of rate limiting; it won't accept -q values that fill more than
-6/8 of the specified buffers. (I'm not sure why. "Playing it safe" seem to be
-a safe bet. Personally, I think I would have lowered requested-bits-per-block
-by one, or something like that.) We can't use 165 bits per block, but have to
-lower it again, to 6/8 of the available buffer space: We end up with 124 bits
-per block, the equivalence of -q24. With 128kB buffers, you can't use greater
-than -q24 at -d1. (And PAL, and 704 pixels width...)
-
-The third example is limited to -q24 through the same process. The second
-example, using very similar calculations, is limited to -q48. The only
-example that actually grab at the specified -q value is the last one, which
-is clearly visible, looking at the file size.
---
-
-Conclusion: the quality of the resulting movie depends on buffer size, quality,
-whether or not you use 'low_bitrate=1' as insmod option for the zr36060.c
-module to do 1:4 instead of 1:2 compression, etc.
-
-If you experience timeouts, lowering the quality/buffersize or using
-'low_bitrate=1 as insmod option for zr36060.o might actually help, as is
-proven by the Buz.
-
-It hangs/crashes/fails/whatevers! Help!
----------------------------------------
-
-Make sure that the card has its own interrupts (see /proc/interrupts), check
-the output of dmesg at high verbosity (load zr36067.o with debug=2,
-load all other modules with debug=1). Check that your mainboard is favorable
-(see question 2) and if not, test the card in another computer. Also see the
-notes given in question 3 and try lowering quality/buffersize/capturesize
-if recording fails after a period of time.
-
-If all this doesn't help, give a clear description of the problem including
-detailed hardware information (memory+brand, mainboard+chipset+brand, which
-MJPEG card, processor, other PCI cards that might be of interest), give the
-system PnP information (/proc/interrupts, /proc/dma, /proc/devices), and give
-the kernel version, driver version, glibc version, gcc version and any other
-information that might possibly be of interest. Also provide the dmesg output
-at high verbosity. See 'Contacting' on how to contact the developers.
-
-Maintainers/Contacting
-----------------------
-
-The driver is currently maintained by Laurent Pinchart and Ronald Bultje
-(<laurent.pinchart@skynet.be> and <rbultje@ronald.bitfreak.net>). For bug
-reports or questions, please contact the mailinglist instead of the developers
-individually. For user questions (i.e. bug reports or how-to questions), send
-an email to <mjpeg-users@lists.sf.net>, for developers (i.e. if you want to
-help programming), send an email to <mjpeg-developer@lists.sf.net>. See
-http://www.sf.net/projects/mjpeg/ for subscription information.
-
-For bug reports, be sure to include all the information as described in
-the section 'It hangs/crashes/fails/whatevers! Help!'. Please make sure
-you're using the latest version (http://mjpeg.sf.net/driver-zoran/).
-
-Previous maintainers/developers of this driver include Serguei Miridonov
-<mirsev@cicese.mx>, Wolfgang Scherr <scherr@net4you.net>, Dave Perks
-<dperks@ibm.net> and Rainer Johanni <Rainer@Johanni.de>.
-
-Driver's License
-----------------
-
- This driver is distributed under the terms of the General Public License.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
-See http://www.gnu.org/ for more information.
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 1c22b21ae922..f70ebcdfe592 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -1937,21 +1937,6 @@ There are some more advanced barrier functions:
information on consistent memory.
-MMIO WRITE BARRIER
-------------------
-
-The Linux kernel also has a special barrier for use with memory-mapped I/O
-writes:
-
- mmiowb();
-
-This is a variation on the mandatory write barrier that causes writes to weakly
-ordered I/O regions to be partially ordered. Its effects may go beyond the
-CPU->Hardware interface and actually affect the hardware at some level.
-
-See the subsection "Acquires vs I/O accesses" for more information.
-
-
===============================
IMPLICIT KERNEL MEMORY BARRIERS
===============================
@@ -2317,75 +2302,6 @@ But it won't see any of:
*E, *F or *G following RELEASE Q
-
-ACQUIRES VS I/O ACCESSES
-------------------------
-
-Under certain circumstances (especially involving NUMA), I/O accesses within
-two spinlocked sections on two different CPUs may be seen as interleaved by the
-PCI bridge, because the PCI bridge does not necessarily participate in the
-cache-coherence protocol, and is therefore incapable of issuing the required
-read memory barriers.
-
-For example:
-
- CPU 1 CPU 2
- =============================== ===============================
- spin_lock(Q)
- writel(0, ADDR)
- writel(1, DATA);
- spin_unlock(Q);
- spin_lock(Q);
- writel(4, ADDR);
- writel(5, DATA);
- spin_unlock(Q);
-
-may be seen by the PCI bridge as follows:
-
- STORE *ADDR = 0, STORE *ADDR = 4, STORE *DATA = 1, STORE *DATA = 5
-
-which would probably cause the hardware to malfunction.
-
-
-What is necessary here is to intervene with an mmiowb() before dropping the
-spinlock, for example:
-
- CPU 1 CPU 2
- =============================== ===============================
- spin_lock(Q)
- writel(0, ADDR)
- writel(1, DATA);
- mmiowb();
- spin_unlock(Q);
- spin_lock(Q);
- writel(4, ADDR);
- writel(5, DATA);
- mmiowb();
- spin_unlock(Q);
-
-this will ensure that the two stores issued on CPU 1 appear at the PCI bridge
-before either of the stores issued on CPU 2.
-
-
-Furthermore, following a store by a load from the same device obviates the need
-for the mmiowb(), because the load forces the store to complete before the load
-is performed:
-
- CPU 1 CPU 2
- =============================== ===============================
- spin_lock(Q)
- writel(0, ADDR)
- a = readl(DATA);
- spin_unlock(Q);
- spin_lock(Q);
- writel(4, ADDR);
- b = readl(DATA);
- spin_unlock(Q);
-
-
-See Documentation/driver-api/device-io.rst for more information.
-
-
=================================
WHERE ARE MEMORY BARRIERS NEEDED?
=================================
@@ -2532,16 +2448,9 @@ the device to malfunction.
Inside of the Linux kernel, I/O should be done through the appropriate accessor
routines - such as inb() or writel() - which know how to make such accesses
appropriately sequential. While this, for the most part, renders the explicit
-use of memory barriers unnecessary, there are a couple of situations where they
-might be needed:
-
- (1) On some systems, I/O stores are not strongly ordered across all CPUs, and
- so for _all_ general drivers locks should be used and mmiowb() must be
- issued prior to unlocking the critical section.
-
- (2) If the accessor functions are used to refer to an I/O memory window with
- relaxed memory access properties, then _mandatory_ memory barriers are
- required to enforce ordering.
+use of memory barriers unnecessary, if the accessor functions are used to refer
+to an I/O memory window with relaxed memory access properties, then _mandatory_
+memory barriers are required to enforce ordering.
See Documentation/driver-api/device-io.rst for more information.
@@ -2586,8 +2495,7 @@ explicit barriers are used.
Normally this won't be a problem because the I/O accesses done inside such
sections will include synchronous load operations on strictly ordered I/O
-registers that form implicit I/O barriers. If this isn't sufficient then an
-mmiowb() may need to be used explicitly.
+registers that form implicit I/O barriers.
A similar situation may occur between an interrupt routine and two routines
@@ -2599,71 +2507,114 @@ likely, then interrupt-disabling locks should be used to guarantee ordering.
KERNEL I/O BARRIER EFFECTS
==========================
-When accessing I/O memory, drivers should use the appropriate accessor
-functions:
-
- (*) inX(), outX():
-
- These are intended to talk to I/O space rather than memory space, but
- that's primarily a CPU-specific concept. The i386 and x86_64 processors
- do indeed have special I/O space access cycles and instructions, but many
- CPUs don't have such a concept.
-
- The PCI bus, amongst others, defines an I/O space concept which - on such
- CPUs as i386 and x86_64 - readily maps to the CPU's concept of I/O
- space. However, it may also be mapped as a virtual I/O space in the CPU's
- memory map, particularly on those CPUs that don't support alternate I/O
- spaces.
-
- Accesses to this space may be fully synchronous (as on i386), but
- intermediary bridges (such as the PCI host bridge) may not fully honour
- that.
-
- They are guaranteed to be fully ordered with respect to each other.
-
- They are not guaranteed to be fully ordered with respect to other types of
- memory and I/O operation.
+Interfacing with peripherals via I/O accesses is deeply architecture and device
+specific. Therefore, drivers which are inherently non-portable may rely on
+specific behaviours of their target systems in order to achieve synchronization
+in the most lightweight manner possible. For drivers intending to be portable
+between multiple architectures and bus implementations, the kernel offers a
+series of accessor functions that provide various degrees of ordering
+guarantees:
(*) readX(), writeX():
- Whether these are guaranteed to be fully ordered and uncombined with
- respect to each other on the issuing CPU depends on the characteristics
- defined for the memory window through which they're accessing. On later
- i386 architecture machines, for example, this is controlled by way of the
- MTRR registers.
+ The readX() and writeX() MMIO accessors take a pointer to the
+ peripheral being accessed as an __iomem * parameter. For pointers
+ mapped with the default I/O attributes (e.g. those returned by
+ ioremap()), the ordering guarantees are as follows:
+
+ 1. All readX() and writeX() accesses to the same peripheral are ordered
+ with respect to each other. This ensures that MMIO register accesses
+ by the same CPU thread to a particular device will arrive in program
+ order.
+
+ 2. A writeX() issued by a CPU thread holding a spinlock is ordered
+ before a writeX() to the same peripheral from another CPU thread
+ issued after a later acquisition of the same spinlock. This ensures
+ that MMIO register writes to a particular device issued while holding
+ a spinlock will arrive in an order consistent with acquisitions of
+ the lock.
+
+ 3. A writeX() by a CPU thread to the peripheral will first wait for the
+ completion of all prior writes to memory either issued by, or
+ propagated to, the same thread. This ensures that writes by the CPU
+ to an outbound DMA buffer allocated by dma_alloc_coherent() will be
+ visible to a DMA engine when the CPU writes to its MMIO control
+ register to trigger the transfer.
+
+ 4. A readX() by a CPU thread from the peripheral will complete before
+ any subsequent reads from memory by the same thread can begin. This
+ ensures that reads by the CPU from an incoming DMA buffer allocated
+ by dma_alloc_coherent() will not see stale data after reading from
+ the DMA engine's MMIO status register to establish that the DMA
+ transfer has completed.
+
+ 5. A readX() by a CPU thread from the peripheral will complete before
+ any subsequent delay() loop can begin execution on the same thread.
+ This ensures that two MMIO register writes by the CPU to a peripheral
+ will arrive at least 1us apart if the first write is immediately read
+ back with readX() and udelay(1) is called prior to the second
+ writeX():
+
+ writel(42, DEVICE_REGISTER_0); // Arrives at the device...
+ readl(DEVICE_REGISTER_0);
+ udelay(1);
+ writel(42, DEVICE_REGISTER_1); // ...at least 1us before this.
+
+ The ordering properties of __iomem pointers obtained with non-default
+ attributes (e.g. those returned by ioremap_wc()) are specific to the
+ underlying architecture and therefore the guarantees listed above cannot
+ generally be relied upon for accesses to these types of mappings.
+
+ (*) readX_relaxed(), writeX_relaxed():
+
+ These are similar to readX() and writeX(), but provide weaker memory
+ ordering guarantees. Specifically, they do not guarantee ordering with
+ respect to locking, normal memory accesses or delay() loops (i.e.
+ bullets 2-5 above) but they are still guaranteed to be ordered with
+ respect to other accesses from the same CPU thread to the same
+ peripheral when operating on __iomem pointers mapped with the default
+ I/O attributes.
+
+ (*) readsX(), writesX():
+
+ The readsX() and writesX() MMIO accessors are designed for accessing
+ register-based, memory-mapped FIFOs residing on peripherals that are not
+ capable of performing DMA. Consequently, they provide only the ordering
+ guarantees of readX_relaxed() and writeX_relaxed(), as documented above.
- Ordinarily, these will be guaranteed to be fully ordered and uncombined,
- provided they're not accessing a prefetchable device.
+ (*) inX(), outX():
- However, intermediary hardware (such as a PCI bridge) may indulge in
- deferral if it so wishes; to flush a store, a load from the same location
- is preferred[*], but a load from the same device or from configuration
- space should suffice for PCI.
+ The inX() and outX() accessors are intended to access legacy port-mapped
+ I/O peripherals, which may require special instructions on some
+ architectures (notably x86). The port number of the peripheral being
+ accessed is passed as an argument.
- [*] NOTE! attempting to load from the same location as was written to may
- cause a malfunction - consider the 16550 Rx/Tx serial registers for
- example.
+ Since many CPU architectures ultimately access these peripherals via an
+ internal virtual memory mapping, the portable ordering guarantees
+ provided by inX() and outX() are the same as those provided by readX()
+ and writeX() respectively when accessing a mapping with the default I/O
+ attributes.
- Used with prefetchable I/O memory, an mmiowb() barrier may be required to
- force stores to be ordered.
+ Device drivers may expect outX() to emit a non-posted write transaction
+ that waits for a completion response from the I/O peripheral before
+ returning. This is not guaranteed by all architectures and is therefore
+ not part of the portable ordering semantics.
- Please refer to the PCI specification for more information on interactions
- between PCI transactions.
+ (*) insX(), outsX():
- (*) readX_relaxed(), writeX_relaxed()
+ As above, the insX() and outsX() accessors provide the same ordering
+ guarantees as readsX() and writesX() respectively when accessing a
+ mapping with the default I/O attributes.
- These are similar to readX() and writeX(), but provide weaker memory
- ordering guarantees. Specifically, they do not guarantee ordering with
- respect to normal memory accesses (e.g. DMA buffers) nor do they guarantee
- ordering with respect to LOCK or UNLOCK operations. If the latter is
- required, an mmiowb() barrier can be used. Note that relaxed accesses to
- the same peripheral are guaranteed to be ordered with respect to each
- other.
+ (*) ioreadX(), iowriteX():
- (*) ioreadX(), iowriteX()
+ These will perform appropriately for the type of access they're actually
+ doing, be it inX()/outX() or readX()/writeX().
- These will perform appropriately for the type of access they're actually
- doing, be it inX()/outX() or readX()/writeX().
+With the exception of the string accessors (insX(), outsX(), readsX() and
+writesX()), all of the above assume that the underlying peripheral is
+little-endian and will therefore perform byte-swapping operations on big-endian
+architectures.
========================================
diff --git a/Documentation/networking/batman-adv.rst b/Documentation/networking/batman-adv.rst
index 245fb6c0ab6f..18020943ba25 100644
--- a/Documentation/networking/batman-adv.rst
+++ b/Documentation/networking/batman-adv.rst
@@ -27,24 +27,8 @@ Load the batman-adv module into your kernel::
$ insmod batman-adv.ko
The module is now waiting for activation. You must add some interfaces on which
-batman can operate. After loading the module batman advanced will scan your
-systems interfaces to search for compatible interfaces. Once found, it will
-create subfolders in the ``/sys`` directories of each supported interface,
-e.g.::
-
- $ ls /sys/class/net/eth0/batman_adv/
- elp_interval iface_status mesh_iface throughput_override
-
-If an interface does not have the ``batman_adv`` subfolder, it probably is not
-supported. Not supported interfaces are: loopback, non-ethernet and batman's
-own interfaces.
-
-Note: After the module was loaded it will continuously watch for new
-interfaces to verify the compatibility. There is no need to reload the module
-if you plug your USB wifi adapter into your machine after batman advanced was
-initially loaded.
-
-The batman-adv soft-interface can be created using the iproute2 tool ``ip``::
+batman-adv can operate. The batman-adv soft-interface can be created using the
+iproute2 tool ``ip``::
$ ip link add name bat0 type batadv
@@ -52,57 +36,46 @@ To activate a given interface simply attach it to the ``bat0`` interface::
$ ip link set dev eth0 master bat0
-Repeat this step for all interfaces you wish to add. Now batman starts
+Repeat this step for all interfaces you wish to add. Now batman-adv starts
using/broadcasting on this/these interface(s).
-By reading the "iface_status" file you can check its status::
-
- $ cat /sys/class/net/eth0/batman_adv/iface_status
- active
-
To deactivate an interface you have to detach it from the "bat0" interface::
$ ip link set dev eth0 nomaster
+The same can also be done using the batctl interface subcommand::
-All mesh wide settings can be found in batman's own interface folder::
+ batctl -m bat0 interface create
+ batctl -m bat0 interface add -M eth0
- $ ls /sys/class/net/bat0/mesh/
- aggregated_ogms fragmentation isolation_mark routing_algo
- ap_isolation gw_bandwidth log_level vlan0
- bonding gw_mode multicast_mode
- bridge_loop_avoidance gw_sel_class network_coding
- distributed_arp_table hop_penalty orig_interval
+To detach eth0 and destroy bat0::
-There is a special folder for debugging information::
+ batctl -m bat0 interface del -M eth0
+ batctl -m bat0 interface destroy
- $ ls /sys/kernel/debug/batman_adv/bat0/
- bla_backbone_table log neighbors transtable_local
- bla_claim_table mcast_flags originators
- dat_cache nc socket
- gateways nc_nodes transtable_global
+There are additional settings for each batadv mesh interface, vlan and hardif
+which can be modified using batctl. Detailed information about this can be found
+in its manual.
-Some of the files contain all sort of status information regarding the mesh
-network. For example, you can view the table of originators (mesh
-participants) with::
+For instance, you can check the current originator interval (value
+in milliseconds which determines how often batman-adv sends its broadcast
+packets)::
- $ cat /sys/kernel/debug/batman_adv/bat0/originators
-
-Other files allow to change batman's behaviour to better fit your requirements.
-For instance, you can check the current originator interval (value in
-milliseconds which determines how often batman sends its broadcast packets)::
-
- $ cat /sys/class/net/bat0/mesh/orig_interval
+ $ batctl -M bat0 orig_interval
1000
and also change its value::
- $ echo 3000 > /sys/class/net/bat0/mesh/orig_interval
+ $ batctl -M bat0 orig_interval 3000
In very mobile scenarios, you might want to adjust the originator interval to a
lower value. This will make the mesh more responsive to topology changes, but
will also increase the overhead.
+Information about the current state can be accessed via the batadv generic
+netlink family. batctl provides human readable version via its debug tables
+subcommands.
+
Usage
=====
@@ -147,43 +120,16 @@ batman-adv module. When building batman-adv as part of kernel, use "make
menuconfig" and enable the option ``B.A.T.M.A.N. debugging``
(``CONFIG_BATMAN_ADV_DEBUG=y``).
-Those additional debug messages can be accessed using a special file in
-debugfs::
+Those additional debug messages can be accessed using the perf infrastructure::
- $ cat /sys/kernel/debug/batman_adv/bat0/log
+ $ trace-cmd stream -e batadv:batadv_dbg
The additional debug output is by default disabled. It can be enabled during
-run time. Following log_levels are defined:
-
-.. flat-table::
-
- * - 0
- - All debug output disabled
- * - 1
- - Enable messages related to routing / flooding / broadcasting
- * - 2
- - Enable messages related to route added / changed / deleted
- * - 4
- - Enable messages related to translation table operations
- * - 8
- - Enable messages related to bridge loop avoidance
- * - 16
- - Enable messages related to DAT, ARP snooping and parsing
- * - 32
- - Enable messages related to network coding
- * - 64
- - Enable messages related to multicast
- * - 128
- - Enable messages related to throughput meter
- * - 255
- - Enable all messages
-
-The debug output can be changed at runtime using the file
-``/sys/class/net/bat0/mesh/log_level``. e.g.::
-
- $ echo 6 > /sys/class/net/bat0/mesh/log_level
-
-will enable debug messages for when routes change.
+run time::
+
+ $ batctl -m bat0 loglevel routes tt
+
+will enable debug messages for when routes and translation table entries change.
Counters for different types of packets entering and leaving the batman-adv
module are available through ethtool::
diff --git a/Documentation/networking/devlink-info-versions.rst b/Documentation/networking/devlink-info-versions.rst
index c79ad8593383..4316342b7746 100644
--- a/Documentation/networking/devlink-info-versions.rst
+++ b/Documentation/networking/devlink-info-versions.rst
@@ -41,3 +41,8 @@ fw.ncsi
Version of the software responsible for supporting/handling the
Network Controller Sideband Interface.
+
+fw.psid
+=======
+
+Unique identifier of the firmware parameter set.
diff --git a/Documentation/networking/dsa/bcm_sf2.txt b/Documentation/networking/dsa/bcm_sf2.rst
index eba3a2431e91..dee234039e1e 100644
--- a/Documentation/networking/dsa/bcm_sf2.txt
+++ b/Documentation/networking/dsa/bcm_sf2.rst
@@ -1,3 +1,4 @@
+=============================================
Broadcom Starfighter 2 Ethernet switch driver
=============================================
@@ -25,27 +26,27 @@ are connected at a lower speed.
The switch hardware block is typically interfaced using MMIO accesses and
contains a bunch of sub-blocks/registers:
-* SWITCH_CORE: common switch registers
-* SWITCH_REG: external interfaces switch register
-* SWITCH_MDIO: external MDIO bus controller (there is another one in SWITCH_CORE,
+- ``SWITCH_CORE``: common switch registers
+- ``SWITCH_REG``: external interfaces switch register
+- ``SWITCH_MDIO``: external MDIO bus controller (there is another one in SWITCH_CORE,
which is used for indirect PHY accesses)
-* SWITCH_INDIR_RW: 64-bits wide register helper block
-* SWITCH_INTRL2_0/1: Level-2 interrupt controllers
-* SWITCH_ACB: Admission control block
-* SWITCH_FCB: Fail-over control block
+- ``SWITCH_INDIR_RW``: 64-bits wide register helper block
+- ``SWITCH_INTRL2_0/1``: Level-2 interrupt controllers
+- ``SWITCH_ACB``: Admission control block
+- ``SWITCH_FCB``: Fail-over control block
Implementation details
======================
-The driver is located in drivers/net/dsa/bcm_sf2.c and is implemented as a DSA
-driver; see Documentation/networking/dsa/dsa.txt for details on the subsystem
+The driver is located in ``drivers/net/dsa/bcm_sf2.c`` and is implemented as a DSA
+driver; see ``Documentation/networking/dsa/dsa.rst`` for details on the subsystem
and what it provides.
The SF2 switch is configured to enable a Broadcom specific 4-bytes switch tag
which gets inserted by the switch for every packet forwarded to the CPU
interface, conversely, the CPU network interface should insert a similar tag for
packets entering the CPU port. The tag format is described in
-net/dsa/tag_brcm.c.
+``net/dsa/tag_brcm.c``.
Overall, the SF2 driver is a fairly regular DSA driver; there are a few
specifics covered below.
@@ -54,7 +55,7 @@ Device Tree probing
-------------------
The DSA platform device driver is probed using a specific compatible string
-provided in net/dsa/dsa.c. The reason for that is because the DSA subsystem gets
+provided in ``net/dsa/dsa.c``. The reason for that is because the DSA subsystem gets
registered as a platform device driver currently. DSA will provide the needed
device_node pointers which are then accessible by the switch driver setup
function to setup resources such as register ranges and interrupts. This
@@ -70,7 +71,7 @@ Broadcom switches connected to a SF2 require the use of the DSA slave MDIO bus
in order to properly configure them. By default, the SF2 pseudo-PHY address, and
an external switch pseudo-PHY address will both be snooping for incoming MDIO
transactions, since they are at the same address (30), resulting in some kind of
-"double" programming. Using DSA, and setting ds->phys_mii_mask accordingly, we
+"double" programming. Using DSA, and setting ``ds->phys_mii_mask`` accordingly, we
selectively divert reads and writes towards external Broadcom switches
pseudo-PHY addresses. Newer revisions of the SF2 hardware have introduced a
configurable pseudo-PHY address which circumvents the initial design limitation.
@@ -86,7 +87,7 @@ firmware gets reloaded. The SF2 driver relies on such events to properly set its
MoCA interface carrier state and properly report this to the networking stack.
The MoCA interfaces are supported using the PHY library's fixed PHY/emulated PHY
-device and the switch driver registers a fixed_link_update callback for such
+device and the switch driver registers a ``fixed_link_update`` callback for such
PHYs which reflects the link state obtained from the interrupt handler.
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.rst
index 43ef767bc440..ca87068b9ab9 100644
--- a/Documentation/networking/dsa/dsa.txt
+++ b/Documentation/networking/dsa/dsa.rst
@@ -1,10 +1,8 @@
-Distributed Switch Architecture
-===============================
-
-Introduction
+============
+Architecture
============
-This document describes the Distributed Switch Architecture (DSA) subsystem
+This document describes the **Distributed Switch Architecture (DSA)** subsystem
design principles, limitations, interactions with other subsystems, and how to
develop drivers for this subsystem as well as a TODO for developers interested
in joining the effort.
@@ -70,11 +68,11 @@ Switch tagging protocols
DSA currently supports 5 different tagging protocols, and a tag-less mode as
well. The different protocols are implemented in:
-net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy)
-net/dsa/tag_dsa.c: Marvell's original DSA tag
-net/dsa/tag_edsa.c: Marvell's enhanced DSA tag
-net/dsa/tag_brcm.c: Broadcom's 4 bytes tag
-net/dsa/tag_qca.c: Qualcomm's 2 bytes tag
+- ``net/dsa/tag_trailer.c``: Marvell's 4 trailer tag mode (legacy)
+- ``net/dsa/tag_dsa.c``: Marvell's original DSA tag
+- ``net/dsa/tag_edsa.c``: Marvell's enhanced DSA tag
+- ``net/dsa/tag_brcm.c``: Broadcom's 4 bytes tag
+- ``net/dsa/tag_qca.c``: Qualcomm's 2 bytes tag
The exact format of the tag protocol is vendor specific, but in general, they
all contain something which:
@@ -89,7 +87,7 @@ Master network devices are regular, unmodified Linux network device drivers for
the CPU/management Ethernet interface. Such a driver might occasionally need to
know whether DSA is enabled (e.g.: to enable/disable specific offload features),
but the DSA subsystem has been proven to work with industry standard drivers:
-e1000e, mv643xx_eth etc. without having to introduce modifications to these
+``e1000e,`` ``mv643xx_eth`` etc. without having to introduce modifications to these
drivers. Such network devices are also often referred to as conduit network
devices since they act as a pipe between the host processor and the hardware
Ethernet switch.
@@ -100,40 +98,42 @@ Networking stack hooks
When a master netdev is used with DSA, a small hook is placed in in the
networking stack is in order to have the DSA subsystem process the Ethernet
switch specific tagging protocol. DSA accomplishes this by registering a
-specific (and fake) Ethernet type (later becoming skb->protocol) with the
-networking stack, this is also known as a ptype or packet_type. A typical
+specific (and fake) Ethernet type (later becoming ``skb->protocol``) with the
+networking stack, this is also known as a ``ptype`` or ``packet_type``. A typical
Ethernet Frame receive sequence looks like this:
Master network device (e.g.: e1000e):
-Receive interrupt fires:
-- receive function is invoked
-- basic packet processing is done: getting length, status etc.
-- packet is prepared to be processed by the Ethernet layer by calling
- eth_type_trans
+1. Receive interrupt fires:
+
+ - receive function is invoked
+ - basic packet processing is done: getting length, status etc.
+ - packet is prepared to be processed by the Ethernet layer by calling
+ ``eth_type_trans``
+
+2. net/ethernet/eth.c::
+
+ eth_type_trans(skb, dev)
+ if (dev->dsa_ptr != NULL)
+ -> skb->protocol = ETH_P_XDSA
-net/ethernet/eth.c:
+3. drivers/net/ethernet/\*::
-eth_type_trans(skb, dev)
- if (dev->dsa_ptr != NULL)
- -> skb->protocol = ETH_P_XDSA
+ netif_receive_skb(skb)
+ -> iterate over registered packet_type
+ -> invoke handler for ETH_P_XDSA, calls dsa_switch_rcv()
-drivers/net/ethernet/*:
+4. net/dsa/dsa.c::
-netif_receive_skb(skb)
- -> iterate over registered packet_type
- -> invoke handler for ETH_P_XDSA, calls dsa_switch_rcv()
+ -> dsa_switch_rcv()
+ -> invoke switch tag specific protocol handler in 'net/dsa/tag_*.c'
-net/dsa/dsa.c:
- -> dsa_switch_rcv()
- -> invoke switch tag specific protocol handler in
- net/dsa/tag_*.c
+5. net/dsa/tag_*.c:
-net/dsa/tag_*.c:
- -> inspect and strip switch tag protocol to determine originating port
- -> locate per-port network device
- -> invoke eth_type_trans() with the DSA slave network device
- -> invoked netif_receive_skb()
+ - inspect and strip switch tag protocol to determine originating port
+ - locate per-port network device
+ - invoke ``eth_type_trans()`` with the DSA slave network device
+ - invoked ``netif_receive_skb()``
Past this point, the DSA slave network devices get delivered regular Ethernet
frames that can be processed by the networking stack.
@@ -162,7 +162,7 @@ invoke a specific transmit routine which takes care of adding the relevant
switch tag in the Ethernet frames.
These frames are then queued for transmission using the master network device
-ndo_start_xmit() function, since they contain the appropriate switch tag, the
+``ndo_start_xmit()`` function, since they contain the appropriate switch tag, the
Ethernet switch will be able to process these incoming frames from the
management interface and delivers these frames to the physical switch port.
@@ -170,23 +170,25 @@ Graphical representation
------------------------
Summarized, this is basically how DSA looks like from a network device
-perspective:
-
-
- |---------------------------
- | CPU network device (eth0)|
- ----------------------------
- | <tag added by switch |
- | |
- | |
- | tag added by CPU> |
- |--------------------------------------------|
- | Switch driver |
- |--------------------------------------------|
- || || ||
- |-------| |-------| |-------|
- | sw0p0 | | sw0p1 | | sw0p2 |
- |-------| |-------| |-------|
+perspective::
+
+
+ |---------------------------
+ | CPU network device (eth0)|
+ ----------------------------
+ | <tag added by switch |
+ | |
+ | |
+ | tag added by CPU> |
+ |--------------------------------------------|
+ | Switch driver |
+ |--------------------------------------------|
+ || || ||
+ |-------| |-------| |-------|
+ | sw0p0 | | sw0p1 | | sw0p2 |
+ |-------| |-------| |-------|
+
+
Slave MDIO bus
--------------
@@ -207,31 +209,32 @@ PHYs, external PHYs, or even external switches.
Data structures
---------------
-DSA data structures are defined in include/net/dsa.h as well as
-net/dsa/dsa_priv.h.
+DSA data structures are defined in ``include/net/dsa.h`` as well as
+``net/dsa/dsa_priv.h``:
-dsa_chip_data: platform data configuration for a given switch device, this
-structure describes a switch device's parent device, its address, as well as
-various properties of its ports: names/labels, and finally a routing table
-indication (when cascading switches)
+- ``dsa_chip_data``: platform data configuration for a given switch device,
+ this structure describes a switch device's parent device, its address, as
+ well as various properties of its ports: names/labels, and finally a routing
+ table indication (when cascading switches)
-dsa_platform_data: platform device configuration data which can reference a
-collection of dsa_chip_data structure if multiples switches are cascaded, the
-master network device this switch tree is attached to needs to be referenced
+- ``dsa_platform_data``: platform device configuration data which can reference
+ a collection of dsa_chip_data structure if multiples switches are cascaded,
+ the master network device this switch tree is attached to needs to be
+ referenced
-dsa_switch_tree: structure assigned to the master network device under
-"dsa_ptr", this structure references a dsa_platform_data structure as well as
-the tagging protocol supported by the switch tree, and which receive/transmit
-function hooks should be invoked, information about the directly attached switch
-is also provided: CPU port. Finally, a collection of dsa_switch are referenced
-to address individual switches in the tree.
+- ``dsa_switch_tree``: structure assigned to the master network device under
+ ``dsa_ptr``, this structure references a dsa_platform_data structure as well as
+ the tagging protocol supported by the switch tree, and which receive/transmit
+ function hooks should be invoked, information about the directly attached
+ switch is also provided: CPU port. Finally, a collection of dsa_switch are
+ referenced to address individual switches in the tree.
-dsa_switch: structure describing a switch device in the tree, referencing a
-dsa_switch_tree as a backpointer, slave network devices, master network device,
-and a reference to the backing dsa_switch_ops
+- ``dsa_switch``: structure describing a switch device in the tree, referencing
+ a ``dsa_switch_tree`` as a backpointer, slave network devices, master network
+ device, and a reference to the backing``dsa_switch_ops``
-dsa_switch_ops: structure referencing function pointers, see below for a full
-description.
+- ``dsa_switch_ops``: structure referencing function pointers, see below for a
+ full description.
Design limitations
==================
@@ -240,7 +243,7 @@ Limits on the number of devices and ports
-----------------------------------------
DSA currently limits the number of maximum switches within a tree to 4
-(DSA_MAX_SWITCHES), and the number of ports per switch to 12 (DSA_MAX_PORTS).
+(``DSA_MAX_SWITCHES``), and the number of ports per switch to 12 (``DSA_MAX_PORTS``).
These limits could be extended to support larger configurations would this need
arise.
@@ -279,15 +282,15 @@ Interactions with other subsystems
DSA currently leverages the following subsystems:
-- MDIO/PHY library: drivers/net/phy/phy.c, mdio_bus.c
-- Switchdev: net/switchdev/*
+- MDIO/PHY library: ``drivers/net/phy/phy.c``, ``mdio_bus.c``
+- Switchdev:``net/switchdev/*``
- Device Tree for various of_* functions
MDIO/PHY library
----------------
Slave network devices exposed by DSA may or may not be interfacing with PHY
-devices (struct phy_device as defined in include/linux/phy.h), but the DSA
+devices (``struct phy_device`` as defined in ``include/linux/phy.h)``, but the DSA
subsystem deals with all possible combinations:
- internal PHY devices, built into the Ethernet switch hardware
@@ -296,16 +299,16 @@ subsystem deals with all possible combinations:
- special, non-autonegotiated or non MDIO-managed PHY devices: SFPs, MoCA; a.k.a
fixed PHYs
-The PHY configuration is done by the dsa_slave_phy_setup() function and the
+The PHY configuration is done by the ``dsa_slave_phy_setup()`` function and the
logic basically looks like this:
- if Device Tree is used, the PHY device is looked up using the standard
"phy-handle" property, if found, this PHY device is created and registered
- using of_phy_connect()
+ using ``of_phy_connect()``
- if Device Tree is used, and the PHY device is "fixed", that is, conforms to
the definition of a non-MDIO managed PHY as defined in
- Documentation/devicetree/bindings/net/fixed-link.txt, the PHY is registered
+ ``Documentation/devicetree/bindings/net/fixed-link.txt``, the PHY is registered
and connected transparently using the special fixed MDIO bus driver
- finally, if the PHY is built into the switch, as is very common with
@@ -331,8 +334,8 @@ Device Tree
-----------
DSA features a standardized binding which is documented in
-Documentation/devicetree/bindings/net/dsa/dsa.txt. PHY/MDIO library helper
-functions such as of_get_phy_mode(), of_phy_connect() are also used to query
+``Documentation/devicetree/bindings/net/dsa/dsa.txt``. PHY/MDIO library helper
+functions such as ``of_get_phy_mode()``, ``of_phy_connect()`` are also used to query
per-port PHY specific details: interface connection, MDIO bus location etc..
Driver development
@@ -341,8 +344,8 @@ Driver development
DSA switch drivers need to implement a dsa_switch_ops structure which will
contain the various members described below.
-register_switch_driver() registers this dsa_switch_ops in its internal list
-of drivers to probe for. unregister_switch_driver() does the exact opposite.
+``register_switch_driver()`` registers this dsa_switch_ops in its internal list
+of drivers to probe for. ``unregister_switch_driver()`` does the exact opposite.
Unless requested differently by setting the priv_size member accordingly, DSA
does not allocate any driver private context space.
@@ -350,17 +353,17 @@ does not allocate any driver private context space.
Switch configuration
--------------------
-- tag_protocol: this is to indicate what kind of tagging protocol is supported,
- should be a valid value from the dsa_tag_protocol enum
+- ``tag_protocol``: this is to indicate what kind of tagging protocol is supported,
+ should be a valid value from the ``dsa_tag_protocol`` enum
-- probe: probe routine which will be invoked by the DSA platform device upon
+- ``probe``: probe routine which will be invoked by the DSA platform device upon
registration to test for the presence/absence of a switch device. For MDIO
devices, it is recommended to issue a read towards internal registers using
the switch pseudo-PHY and return whether this is a supported device. For other
buses, return a non-NULL string
-- setup: setup function for the switch, this function is responsible for setting
- up the dsa_switch_ops private structure with all it needs: register maps,
+- ``setup``: setup function for the switch, this function is responsible for setting
+ up the ``dsa_switch_ops`` private structure with all it needs: register maps,
interrupts, mutexes, locks etc.. This function is also expected to properly
configure the switch to separate all network interfaces from each other, that
is, they should be isolated by the switch hardware itself, typically by creating
@@ -375,27 +378,27 @@ Switch configuration
PHY devices and link management
-------------------------------
-- get_phy_flags: Some switches are interfaced to various kinds of Ethernet PHYs,
+- ``get_phy_flags``: Some switches are interfaced to various kinds of Ethernet PHYs,
if the PHY library PHY driver needs to know about information it cannot obtain
on its own (e.g.: coming from switch memory mapped registers), this function
should return a 32-bits bitmask of "flags", that is private between the switch
- driver and the Ethernet PHY driver in drivers/net/phy/*.
+ driver and the Ethernet PHY driver in ``drivers/net/phy/\*``.
-- phy_read: Function invoked by the DSA slave MDIO bus when attempting to read
+- ``phy_read``: Function invoked by the DSA slave MDIO bus when attempting to read
the switch port MDIO registers. If unavailable, return 0xffff for each read.
For builtin switch Ethernet PHYs, this function should allow reading the link
status, auto-negotiation results, link partner pages etc..
-- phy_write: Function invoked by the DSA slave MDIO bus when attempting to write
+- ``phy_write``: Function invoked by the DSA slave MDIO bus when attempting to write
to the switch port MDIO registers. If unavailable return a negative error
code.
-- adjust_link: Function invoked by the PHY library when a slave network device
+- ``adjust_link``: Function invoked by the PHY library when a slave network device
is attached to a PHY device. This function is responsible for appropriately
configuring the switch port link parameters: speed, duplex, pause based on
- what the phy_device is providing.
+ what the ``phy_device`` is providing.
-- fixed_link_update: Function invoked by the PHY library, and specifically by
+- ``fixed_link_update``: Function invoked by the PHY library, and specifically by
the fixed PHY driver asking the switch driver for link parameters that could
not be auto-negotiated, or obtained by reading the PHY registers through MDIO.
This is particularly useful for specific kinds of hardware such as QSGMII,
@@ -405,87 +408,87 @@ PHY devices and link management
Ethtool operations
------------------
-- get_strings: ethtool function used to query the driver's strings, will
+- ``get_strings``: ethtool function used to query the driver's strings, will
typically return statistics strings, private flags strings etc.
-- get_ethtool_stats: ethtool function used to query per-port statistics and
+- ``get_ethtool_stats``: ethtool function used to query per-port statistics and
return their values. DSA overlays slave network devices general statistics:
RX/TX counters from the network device, with switch driver specific statistics
per port
-- get_sset_count: ethtool function used to query the number of statistics items
+- ``get_sset_count``: ethtool function used to query the number of statistics items
-- get_wol: ethtool function used to obtain Wake-on-LAN settings per-port, this
+- ``get_wol``: ethtool function used to obtain Wake-on-LAN settings per-port, this
function may, for certain implementations also query the master network device
Wake-on-LAN settings if this interface needs to participate in Wake-on-LAN
-- set_wol: ethtool function used to configure Wake-on-LAN settings per-port,
+- ``set_wol``: ethtool function used to configure Wake-on-LAN settings per-port,
direct counterpart to set_wol with similar restrictions
-- set_eee: ethtool function which is used to configure a switch port EEE (Green
+- ``set_eee``: ethtool function which is used to configure a switch port EEE (Green
Ethernet) settings, can optionally invoke the PHY library to enable EEE at the
PHY level if relevant. This function should enable EEE at the switch port MAC
controller and data-processing logic
-- get_eee: ethtool function which is used to query a switch port EEE settings,
+- ``get_eee``: ethtool function which is used to query a switch port EEE settings,
this function should return the EEE state of the switch port MAC controller
and data-processing logic as well as query the PHY for its currently configured
EEE settings
-- get_eeprom_len: ethtool function returning for a given switch the EEPROM
+- ``get_eeprom_len``: ethtool function returning for a given switch the EEPROM
length/size in bytes
-- get_eeprom: ethtool function returning for a given switch the EEPROM contents
+- ``get_eeprom``: ethtool function returning for a given switch the EEPROM contents
-- set_eeprom: ethtool function writing specified data to a given switch EEPROM
+- ``set_eeprom``: ethtool function writing specified data to a given switch EEPROM
-- get_regs_len: ethtool function returning the register length for a given
+- ``get_regs_len``: ethtool function returning the register length for a given
switch
-- get_regs: ethtool function returning the Ethernet switch internal register
+- ``get_regs``: ethtool function returning the Ethernet switch internal register
contents. This function might require user-land code in ethtool to
pretty-print register values and registers
Power management
----------------
-- suspend: function invoked by the DSA platform device when the system goes to
+- ``suspend``: function invoked by the DSA platform device when the system goes to
suspend, should quiesce all Ethernet switch activities, but keep ports
participating in Wake-on-LAN active as well as additional wake-up logic if
supported
-- resume: function invoked by the DSA platform device when the system resumes,
+- ``resume``: function invoked by the DSA platform device when the system resumes,
should resume all Ethernet switch activities and re-configure the switch to be
in a fully active state
-- port_enable: function invoked by the DSA slave network device ndo_open
+- ``port_enable``: function invoked by the DSA slave network device ndo_open
function when a port is administratively brought up, this function should be
fully enabling a given switch port. DSA takes care of marking the port with
- BR_STATE_BLOCKING if the port is a bridge member, or BR_STATE_FORWARDING if it
+ ``BR_STATE_BLOCKING`` if the port is a bridge member, or ``BR_STATE_FORWARDING`` if it
was not, and propagating these changes down to the hardware
-- port_disable: function invoked by the DSA slave network device ndo_close
+- ``port_disable``: function invoked by the DSA slave network device ndo_close
function when a port is administratively brought down, this function should be
fully disabling a given switch port. DSA takes care of marking the port with
- BR_STATE_DISABLED and propagating changes to the hardware if this port is
+ ``BR_STATE_DISABLED`` and propagating changes to the hardware if this port is
disabled while being a bridge member
Bridge layer
------------
-- port_bridge_join: bridge layer function invoked when a given switch port is
+- ``port_bridge_join``: bridge layer function invoked when a given switch port is
added to a bridge, this function should be doing the necessary at the switch
level to permit the joining port from being added to the relevant logical
domain for it to ingress/egress traffic with other members of the bridge.
-- port_bridge_leave: bridge layer function invoked when a given switch port is
+- ``port_bridge_leave``: bridge layer function invoked when a given switch port is
removed from a bridge, this function should be doing the necessary at the
switch level to deny the leaving port from ingress/egress traffic from the
remaining bridge members. When the port leaves the bridge, it should be aged
out at the switch hardware for the switch to (re) learn MAC addresses behind
this port.
-- port_stp_state_set: bridge layer function invoked when a given switch port STP
+- ``port_stp_state_set``: bridge layer function invoked when a given switch port STP
state is computed by the bridge layer and should be propagated to switch
hardware to forward/block/learn traffic. The switch driver is responsible for
computing a STP state change based on current and asked parameters and perform
@@ -494,7 +497,7 @@ Bridge layer
Bridge VLAN filtering
---------------------
-- port_vlan_filtering: bridge layer function invoked when the bridge gets
+- ``port_vlan_filtering``: bridge layer function invoked when the bridge gets
configured for turning on or off VLAN filtering. If nothing specific needs to
be done at the hardware level, this callback does not need to be implemented.
When VLAN filtering is turned on, the hardware must be programmed with
@@ -504,61 +507,61 @@ Bridge VLAN filtering
accept any 802.1Q frames irrespective of their VLAN ID, and untagged frames are
allowed.
-- port_vlan_prepare: bridge layer function invoked when the bridge prepares the
+- ``port_vlan_prepare``: bridge layer function invoked when the bridge prepares the
configuration of a VLAN on the given port. If the operation is not supported
- by the hardware, this function should return -EOPNOTSUPP to inform the bridge
+ by the hardware, this function should return ``-EOPNOTSUPP`` to inform the bridge
code to fallback to a software implementation. No hardware setup must be done
in this function. See port_vlan_add for this and details.
-- port_vlan_add: bridge layer function invoked when a VLAN is configured
+- ``port_vlan_add``: bridge layer function invoked when a VLAN is configured
(tagged or untagged) for the given switch port
-- port_vlan_del: bridge layer function invoked when a VLAN is removed from the
+- ``port_vlan_del``: bridge layer function invoked when a VLAN is removed from the
given switch port
-- port_vlan_dump: bridge layer function invoked with a switchdev callback
+- ``port_vlan_dump``: bridge layer function invoked with a switchdev callback
function that the driver has to call for each VLAN the given port is a member
of. A switchdev object is used to carry the VID and bridge flags.
-- port_fdb_add: bridge layer function invoked when the bridge wants to install a
+- ``port_fdb_add``: bridge layer function invoked when the bridge wants to install a
Forwarding Database entry, the switch hardware should be programmed with the
specified address in the specified VLAN Id in the forwarding database
associated with this VLAN ID. If the operation is not supported, this
- function should return -EOPNOTSUPP to inform the bridge code to fallback to
+ function should return ``-EOPNOTSUPP`` to inform the bridge code to fallback to
a software implementation.
-Note: VLAN ID 0 corresponds to the port private database, which, in the context
-of DSA, would be the its port-based VLAN, used by the associated bridge device.
+.. note:: VLAN ID 0 corresponds to the port private database, which, in the context
+ of DSA, would be the its port-based VLAN, used by the associated bridge device.
-- port_fdb_del: bridge layer function invoked when the bridge wants to remove a
+- ``port_fdb_del``: bridge layer function invoked when the bridge wants to remove a
Forwarding Database entry, the switch hardware should be programmed to delete
the specified MAC address from the specified VLAN ID if it was mapped into
this port forwarding database
-- port_fdb_dump: bridge layer function invoked with a switchdev callback
+- ``port_fdb_dump``: bridge layer function invoked with a switchdev callback
function that the driver has to call for each MAC address known to be behind
the given port. A switchdev object is used to carry the VID and FDB info.
-- port_mdb_prepare: bridge layer function invoked when the bridge prepares the
+- ``port_mdb_prepare``: bridge layer function invoked when the bridge prepares the
installation of a multicast database entry. If the operation is not supported,
- this function should return -EOPNOTSUPP to inform the bridge code to fallback
+ this function should return ``-EOPNOTSUPP`` to inform the bridge code to fallback
to a software implementation. No hardware setup must be done in this function.
- See port_fdb_add for this and details.
+ See ``port_fdb_add`` for this and details.
-- port_mdb_add: bridge layer function invoked when the bridge wants to install
+- ``port_mdb_add``: bridge layer function invoked when the bridge wants to install
a multicast database entry, the switch hardware should be programmed with the
specified address in the specified VLAN ID in the forwarding database
associated with this VLAN ID.
-Note: VLAN ID 0 corresponds to the port private database, which, in the context
-of DSA, would be the its port-based VLAN, used by the associated bridge device.
+.. note:: VLAN ID 0 corresponds to the port private database, which, in the context
+ of DSA, would be the its port-based VLAN, used by the associated bridge device.
-- port_mdb_del: bridge layer function invoked when the bridge wants to remove a
+- ``port_mdb_del``: bridge layer function invoked when the bridge wants to remove a
multicast database entry, the switch hardware should be programmed to delete
the specified MAC address from the specified VLAN ID if it was mapped into
this port forwarding database.
-- port_mdb_dump: bridge layer function invoked with a switchdev callback
+- ``port_mdb_dump``: bridge layer function invoked with a switchdev callback
function that the driver has to call for each MAC address known to be behind
the given port. A switchdev object is used to carry the VID and MDB info.
@@ -577,7 +580,7 @@ two subsystems and get the best of both worlds.
Other hanging fruits
--------------------
-- making the number of ports fully dynamic and not dependent on DSA_MAX_PORTS
+- making the number of ports fully dynamic and not dependent on ``DSA_MAX_PORTS``
- allowing more than one CPU/management interface:
http://comments.gmane.org/gmane.linux.network/365657
- porting more drivers from other vendors:
diff --git a/Documentation/networking/dsa/index.rst b/Documentation/networking/dsa/index.rst
new file mode 100644
index 000000000000..0e5b7a9be406
--- /dev/null
+++ b/Documentation/networking/dsa/index.rst
@@ -0,0 +1,11 @@
+===============================
+Distributed Switch Architecture
+===============================
+
+.. toctree::
+ :maxdepth: 1
+
+ dsa
+ bcm_sf2
+ lan9303
+ sja1105
diff --git a/Documentation/networking/dsa/lan9303.txt b/Documentation/networking/dsa/lan9303.rst
index 144b02b95207..e3c820db28ad 100644
--- a/Documentation/networking/dsa/lan9303.txt
+++ b/Documentation/networking/dsa/lan9303.rst
@@ -1,3 +1,4 @@
+==============================
LAN9303 Ethernet switch driver
==============================
@@ -9,10 +10,9 @@ host master network interface (e.g. fixed link).
Driver details
==============
-The driver is implemented as a DSA driver, see
-Documentation/networking/dsa/dsa.txt.
+The driver is implemented as a DSA driver, see ``Documentation/networking/dsa/dsa.rst``.
-See Documentation/devicetree/bindings/net/dsa/lan9303.txt for device tree
+See ``Documentation/devicetree/bindings/net/dsa/lan9303.txt`` for device tree
binding.
The LAN9303 can be managed both via MDIO and I2C, both supported by this driver.
diff --git a/Documentation/networking/dsa/sja1105.rst b/Documentation/networking/dsa/sja1105.rst
new file mode 100644
index 000000000000..ea7bac438cfd
--- /dev/null
+++ b/Documentation/networking/dsa/sja1105.rst
@@ -0,0 +1,220 @@
+=========================
+NXP SJA1105 switch driver
+=========================
+
+Overview
+========
+
+The NXP SJA1105 is a family of 6 devices:
+
+- SJA1105E: First generation, no TTEthernet
+- SJA1105T: First generation, TTEthernet
+- SJA1105P: Second generation, no TTEthernet, no SGMII
+- SJA1105Q: Second generation, TTEthernet, no SGMII
+- SJA1105R: Second generation, no TTEthernet, SGMII
+- SJA1105S: Second generation, TTEthernet, SGMII
+
+These are SPI-managed automotive switches, with all ports being gigabit
+capable, and supporting MII/RMII/RGMII and optionally SGMII on one port.
+
+Being automotive parts, their configuration interface is geared towards
+set-and-forget use, with minimal dynamic interaction at runtime. They
+require a static configuration to be composed by software and packed
+with CRC and table headers, and sent over SPI.
+
+The static configuration is composed of several configuration tables. Each
+table takes a number of entries. Some configuration tables can be (partially)
+reconfigured at runtime, some not. Some tables are mandatory, some not:
+
+============================= ================== =============================
+Table Mandatory Reconfigurable
+============================= ================== =============================
+Schedule no no
+Schedule entry points if Scheduling no
+VL Lookup no no
+VL Policing if VL Lookup no
+VL Forwarding if VL Lookup no
+L2 Lookup no no
+L2 Policing yes no
+VLAN Lookup yes yes
+L2 Forwarding yes partially (fully on P/Q/R/S)
+MAC Config yes partially (fully on P/Q/R/S)
+Schedule Params if Scheduling no
+Schedule Entry Points Params if Scheduling no
+VL Forwarding Params if VL Forwarding no
+L2 Lookup Params no partially (fully on P/Q/R/S)
+L2 Forwarding Params yes no
+Clock Sync Params no no
+AVB Params no no
+General Params yes partially
+Retagging no yes
+xMII Params yes no
+SGMII no yes
+============================= ================== =============================
+
+
+Also the configuration is write-only (software cannot read it back from the
+switch except for very few exceptions).
+
+The driver creates a static configuration at probe time, and keeps it at
+all times in memory, as a shadow for the hardware state. When required to
+change a hardware setting, the static configuration is also updated.
+If that changed setting can be transmitted to the switch through the dynamic
+reconfiguration interface, it is; otherwise the switch is reset and
+reprogrammed with the updated static configuration.
+
+Traffic support
+===============
+
+The switches do not support switch tagging in hardware. But they do support
+customizing the TPID by which VLAN traffic is identified as such. The switch
+driver is leveraging ``CONFIG_NET_DSA_TAG_8021Q`` by requesting that special
+VLANs (with a custom TPID of ``ETH_P_EDSA`` instead of ``ETH_P_8021Q``) are
+installed on its ports when not in ``vlan_filtering`` mode. This does not
+interfere with the reception and transmission of real 802.1Q-tagged traffic,
+because the switch does no longer parse those packets as VLAN after the TPID
+change.
+The TPID is restored when ``vlan_filtering`` is requested by the user through
+the bridge layer, and general IP termination becomes no longer possible through
+the switch netdevices in this mode.
+
+The switches have two programmable filters for link-local destination MACs.
+These are used to trap BPDUs and PTP traffic to the master netdevice, and are
+further used to support STP and 1588 ordinary clock/boundary clock
+functionality.
+
+The following traffic modes are supported over the switch netdevices:
+
++--------------------+------------+------------------+------------------+
+| | Standalone | Bridged with | Bridged with |
+| | ports | vlan_filtering 0 | vlan_filtering 1 |
++====================+============+==================+==================+
+| Regular traffic | Yes | Yes | No (use master) |
++--------------------+------------+------------------+------------------+
+| Management traffic | Yes | Yes | Yes |
+| (BPDU, PTP) | | | |
++--------------------+------------+------------------+------------------+
+
+Switching features
+==================
+
+The driver supports the configuration of L2 forwarding rules in hardware for
+port bridging. The forwarding, broadcast and flooding domain between ports can
+be restricted through two methods: either at the L2 forwarding level (isolate
+one bridge's ports from another's) or at the VLAN port membership level
+(isolate ports within the same bridge). The final forwarding decision taken by
+the hardware is a logical AND of these two sets of rules.
+
+The hardware tags all traffic internally with a port-based VLAN (pvid), or it
+decodes the VLAN information from the 802.1Q tag. Advanced VLAN classification
+is not possible. Once attributed a VLAN tag, frames are checked against the
+port's membership rules and dropped at ingress if they don't match any VLAN.
+This behavior is available when switch ports are enslaved to a bridge with
+``vlan_filtering 1``.
+
+Normally the hardware is not configurable with respect to VLAN awareness, but
+by changing what TPID the switch searches 802.1Q tags for, the semantics of a
+bridge with ``vlan_filtering 0`` can be kept (accept all traffic, tagged or
+untagged), and therefore this mode is also supported.
+
+Segregating the switch ports in multiple bridges is supported (e.g. 2 + 2), but
+all bridges should have the same level of VLAN awareness (either both have
+``vlan_filtering`` 0, or both 1). Also an inevitable limitation of the fact
+that VLAN awareness is global at the switch level is that once a bridge with
+``vlan_filtering`` enslaves at least one switch port, the other un-bridged
+ports are no longer available for standalone traffic termination.
+
+Topology and loop detection through STP is supported.
+
+L2 FDB manipulation (add/delete/dump) is currently possible for the first
+generation devices. Aging time of FDB entries, as well as enabling fully static
+management (no address learning and no flooding of unknown traffic) is not yet
+configurable in the driver.
+
+A special comment about bridging with other netdevices (illustrated with an
+example):
+
+A board has eth0, eth1, swp0@eth1, swp1@eth1, swp2@eth1, swp3@eth1.
+The switch ports (swp0-3) are under br0.
+It is desired that eth0 is turned into another switched port that communicates
+with swp0-3.
+
+If br0 has vlan_filtering 0, then eth0 can simply be added to br0 with the
+intended results.
+If br0 has vlan_filtering 1, then a new br1 interface needs to be created that
+enslaves eth0 and eth1 (the DSA master of the switch ports). This is because in
+this mode, the switch ports beneath br0 are not capable of regular traffic, and
+are only used as a conduit for switchdev operations.
+
+Device Tree bindings and board design
+=====================================
+
+This section references ``Documentation/devicetree/bindings/net/dsa/sja1105.txt``
+and aims to showcase some potential switch caveats.
+
+RMII PHY role and out-of-band signaling
+---------------------------------------
+
+In the RMII spec, the 50 MHz clock signals are either driven by the MAC or by
+an external oscillator (but not by the PHY).
+But the spec is rather loose and devices go outside it in several ways.
+Some PHYs go against the spec and may provide an output pin where they source
+the 50 MHz clock themselves, in an attempt to be helpful.
+On the other hand, the SJA1105 is only binary configurable - when in the RMII
+MAC role it will also attempt to drive the clock signal. To prevent this from
+happening it must be put in RMII PHY role.
+But doing so has some unintended consequences.
+In the RMII spec, the PHY can transmit extra out-of-band signals via RXD[1:0].
+These are practically some extra code words (/J/ and /K/) sent prior to the
+preamble of each frame. The MAC does not have this out-of-band signaling
+mechanism defined by the RMII spec.
+So when the SJA1105 port is put in PHY role to avoid having 2 drivers on the
+clock signal, inevitably an RMII PHY-to-PHY connection is created. The SJA1105
+emulates a PHY interface fully and generates the /J/ and /K/ symbols prior to
+frame preambles, which the real PHY is not expected to understand. So the PHY
+simply encodes the extra symbols received from the SJA1105-as-PHY onto the
+100Base-Tx wire.
+On the other side of the wire, some link partners might discard these extra
+symbols, while others might choke on them and discard the entire Ethernet
+frames that follow along. This looks like packet loss with some link partners
+but not with others.
+The take-away is that in RMII mode, the SJA1105 must be let to drive the
+reference clock if connected to a PHY.
+
+RGMII fixed-link and internal delays
+------------------------------------
+
+As mentioned in the bindings document, the second generation of devices has
+tunable delay lines as part of the MAC, which can be used to establish the
+correct RGMII timing budget.
+When powered up, these can shift the Rx and Tx clocks with a phase difference
+between 73.8 and 101.7 degrees.
+The catch is that the delay lines need to lock onto a clock signal with a
+stable frequency. This means that there must be at least 2 microseconds of
+silence between the clock at the old vs at the new frequency. Otherwise the
+lock is lost and the delay lines must be reset (powered down and back up).
+In RGMII the clock frequency changes with link speed (125 MHz at 1000 Mbps, 25
+MHz at 100 Mbps and 2.5 MHz at 10 Mbps), and link speed might change during the
+AN process.
+In the situation where the switch port is connected through an RGMII fixed-link
+to a link partner whose link state life cycle is outside the control of Linux
+(such as a different SoC), then the delay lines would remain unlocked (and
+inactive) until there is manual intervention (ifdown/ifup on the switch port).
+The take-away is that in RGMII mode, the switch's internal delays are only
+reliable if the link partner never changes link speeds, or if it does, it does
+so in a way that is coordinated with the switch port (practically, both ends of
+the fixed-link are under control of the same Linux system).
+As to why would a fixed-link interface ever change link speeds: there are
+Ethernet controllers out there which come out of reset in 100 Mbps mode, and
+their driver inevitably needs to change the speed and clock frequency if it's
+required to work at gigabit.
+
+MDIO bus and PHY management
+---------------------------
+
+The SJA1105 does not have an MDIO bus and does not perform in-band AN either.
+Therefore there is no link state notification coming from the switch device.
+A board would need to hook up the PHYs connected to the switch to any other
+MDIO bus available to Linux within the system (e.g. to the DSA master's MDIO
+bus). Link state management then works by the driver manually keeping in sync
+(over SPI commands) the MAC link speed with the settings negotiated by the PHY.
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index 984e68f9e026..f390fe3cfdfb 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -9,7 +9,6 @@ Contents:
netdev-FAQ
af_xdp
batman-adv
- bpf_flow_dissector
can
can_ucan_protocol
device_drivers/freescale/dpaa2/index
@@ -25,6 +24,7 @@ Contents:
device_drivers/intel/i40e
device_drivers/intel/iavf
device_drivers/intel/ice
+ dsa/index
devlink-info-versions
ieee802154
kapi
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index e2142fe40cda..725b8bea58a7 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -81,6 +81,11 @@ fib_multipath_hash_policy - INTEGER
0 - Layer 3
1 - Layer 4
+fib_sync_mem - UNSIGNED INTEGER
+ Amount of dirty memory from fib entries that can be backlogged before
+ synchronize_rcu is forced.
+ Default: 512kB Minimum: 64kB Maximum: 64MB
+
ip_forward_update_priority - INTEGER
Whether to update SKB priority from "TOS" field in IPv4 header after it
is forwarded. The new SKB priority is mapped from TOS field value
@@ -1337,6 +1342,7 @@ tag - INTEGER
Default value is 0.
xfrm4_gc_thresh - INTEGER
+ (Obsolete since linux-4.14)
The threshold at which we will start garbage collecting for IPv4
destination cache entries. At twice this value the system will
refuse new allocations.
@@ -1909,17 +1915,43 @@ enhanced_dad - BOOLEAN
icmp/*:
ratelimit - INTEGER
- Limit the maximal rates for sending ICMPv6 packets.
+ Limit the maximal rates for sending ICMPv6 messages.
0 to disable any limiting,
otherwise the minimal space between responses in milliseconds.
Default: 1000
+ratemask - list of comma separated ranges
+ For ICMPv6 message types matching the ranges in the ratemask, limit
+ the sending of the message according to ratelimit parameter.
+
+ The format used for both input and output is a comma separated
+ list of ranges (e.g. "0-127,129" for ICMPv6 message type 0 to 127 and
+ 129). Writing to the file will clear all previous ranges of ICMPv6
+ message types and update the current list with the input.
+
+ Refer to: https://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xhtml
+ for numerical values of ICMPv6 message types, e.g. echo request is 128
+ and echo reply is 129.
+
+ Default: 0-1,3-127 (rate limit ICMPv6 errors except Packet Too Big)
+
echo_ignore_all - BOOLEAN
If set non-zero, then the kernel will ignore all ICMP ECHO
requests sent to it over the IPv6 protocol.
Default: 0
+echo_ignore_multicast - BOOLEAN
+ If set non-zero, then the kernel will ignore all ICMP ECHO
+ requests sent to it over the IPv6 protocol via multicast.
+ Default: 0
+
+echo_ignore_anycast - BOOLEAN
+ If set non-zero, then the kernel will ignore all ICMP ECHO
+ requests sent to it over the IPv6 protocol destined to anycast address.
+ Default: 0
+
xfrm6_gc_thresh - INTEGER
+ (Obsolete since linux-4.14)
The threshold at which we will start garbage collecting for IPv6
destination cache entries. At twice this value the system will
refuse new allocations.
diff --git a/Documentation/networking/netdev-FAQ.rst b/Documentation/networking/netdev-FAQ.rst
index 8c7a713cf657..642fa963be3c 100644
--- a/Documentation/networking/netdev-FAQ.rst
+++ b/Documentation/networking/netdev-FAQ.rst
@@ -132,7 +132,7 @@ version that should be applied. If there is any doubt, the maintainer
will reply and ask what should be done.
Q: I made changes to only a few patches in a patch series should I resend only those changed?
---------------------------------------------------------------------------------------------
+---------------------------------------------------------------------------------------------
A: No, please resend the entire patch series and make sure you do number your
patches such that it is clear this is the latest and greatest set of patches
that can be applied.
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt
index cd7303d7fa25..180e07d956a7 100644
--- a/Documentation/networking/rxrpc.txt
+++ b/Documentation/networking/rxrpc.txt
@@ -796,7 +796,9 @@ The kernel interface functions are as follows:
s64 tx_total_len,
gfp_t gfp,
rxrpc_notify_rx_t notify_rx,
- bool upgrade);
+ bool upgrade,
+ bool intr,
+ unsigned int debug_id);
This allocates the infrastructure to make a new RxRPC call and assigns
call and connection numbers. The call will be made on the UDP port that
@@ -824,6 +826,13 @@ The kernel interface functions are as follows:
the server upgrade the service to a better one. The resultant service ID
is returned by rxrpc_kernel_recv_data().
+ intr should be set to true if the call should be interruptible. If this
+ is not set, this function may not return until a channel has been
+ allocated; if it is set, the function may return -ERESTARTSYS.
+
+ debug_id is the call debugging ID to be used for tracing. This can be
+ obtained by atomically incrementing rxrpc_debug_id.
+
If this function is successful, an opaque reference to the RxRPC call is
returned. The caller now holds a reference on this and it must be
properly ended.
@@ -1056,6 +1065,16 @@ The kernel interface functions are as follows:
This value can be used to determine if the remote client has been
restarted as it shouldn't change otherwise.
+ (*) Set the maxmimum lifespan on a call.
+
+ void rxrpc_kernel_set_max_life(struct socket *sock,
+ struct rxrpc_call *call,
+ unsigned long hard_timeout)
+
+ This sets the maximum lifespan on a call to hard_timeout (which is in
+ jiffies). In the event of the timeout occurring, the call will be
+ aborted and -ETIME or -ETIMEDOUT will be returned.
+
=======================
CONFIGURABLE PARAMETERS
diff --git a/Documentation/ntb.txt b/Documentation/ntb.txt
index a043854d28df..074a423c853c 100644
--- a/Documentation/ntb.txt
+++ b/Documentation/ntb.txt
@@ -41,9 +41,10 @@ mainly used to perform the proper memory window initialization. Typically
there are two types of memory window interfaces supported by the NTB API:
inbound translation configured on the local ntb port and outbound translation
configured by the peer, on the peer ntb port. The first type is
-depicted on the next figure
+depicted on the next figure::
+
+ Inbound translation:
-Inbound translation:
Memory: Local NTB Port: Peer NTB Port: Peer MMIO:
____________
| dma-mapped |-ntb_mw_set_trans(addr) |
@@ -58,9 +59,10 @@ maps corresponding outbound memory window so to have access to the shared
memory region.
The second type of interface, that implies the shared windows being
-initialized by a peer device, is depicted on the figure:
+initialized by a peer device, is depicted on the figure::
+
+ Outbound translation:
-Outbound translation:
Memory: Local NTB Port: Peer NTB Port: Peer MMIO:
____________ ______________
| dma-mapped | | | MW base addr |<== memory-mapped IO
@@ -75,11 +77,13 @@ outbound memory window so to have access to the shared memory region.
As one can see the described scenarios can be combined in one portable
algorithm.
+
Local device:
1) Allocate memory for a shared window
2) Initialize memory window by translated address of the allocated region
(it may fail if local memory window initialization is unsupported)
3) Send the translated address and memory window index to a peer device
+
Peer device:
1) Initialize memory window with retrieved address of the allocated
by another device memory region (it may fail if peer memory window
@@ -88,6 +92,7 @@ algorithm.
In accordance with this scenario, the NTB Memory Window API can be used as
follows:
+
Local device:
1) ntb_mw_count(pidx) - retrieve number of memory ranges, which can
be allocated for memory windows between local device and peer device
@@ -103,6 +108,7 @@ follows:
5) Send translated base address (usually together with memory window
number) to the peer device using, for instance, scratchpad or message
registers.
+
Peer device:
1) ntb_peer_mw_set_trans(pidx, midx) - try to set received from other
device (related to pidx) translated address for specified memory
diff --git a/Documentation/packing.txt b/Documentation/packing.txt
new file mode 100644
index 000000000000..f830c98645f1
--- /dev/null
+++ b/Documentation/packing.txt
@@ -0,0 +1,149 @@
+================================================
+Generic bitfield packing and unpacking functions
+================================================
+
+Problem statement
+-----------------
+
+When working with hardware, one has to choose between several approaches of
+interfacing with it.
+One can memory-map a pointer to a carefully crafted struct over the hardware
+device's memory region, and access its fields as struct members (potentially
+declared as bitfields). But writing code this way would make it less portable,
+due to potential endianness mismatches between the CPU and the hardware device.
+Additionally, one has to pay close attention when translating register
+definitions from the hardware documentation into bit field indices for the
+structs. Also, some hardware (typically networking equipment) tends to group
+its register fields in ways that violate any reasonable word boundaries
+(sometimes even 64 bit ones). This creates the inconvenience of having to
+define "high" and "low" portions of register fields within the struct.
+A more robust alternative to struct field definitions would be to extract the
+required fields by shifting the appropriate number of bits. But this would
+still not protect from endianness mismatches, except if all memory accesses
+were performed byte-by-byte. Also the code can easily get cluttered, and the
+high-level idea might get lost among the many bit shifts required.
+Many drivers take the bit-shifting approach and then attempt to reduce the
+clutter with tailored macros, but more often than not these macros take
+shortcuts that still prevent the code from being truly portable.
+
+The solution
+------------
+
+This API deals with 2 basic operations:
+ - Packing a CPU-usable number into a memory buffer (with hardware
+ constraints/quirks)
+ - Unpacking a memory buffer (which has hardware constraints/quirks)
+ into a CPU-usable number.
+
+The API offers an abstraction over said hardware constraints and quirks,
+over CPU endianness and therefore between possible mismatches between
+the two.
+
+The basic unit of these API functions is the u64. From the CPU's
+perspective, bit 63 always means bit offset 7 of byte 7, albeit only
+logically. The question is: where do we lay this bit out in memory?
+
+The following examples cover the memory layout of a packed u64 field.
+The byte offsets in the packed buffer are always implicitly 0, 1, ... 7.
+What the examples show is where the logical bytes and bits sit.
+
+1. Normally (no quirks), we would do it like this:
+
+63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32
+7 6 5 4
+31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+3 2 1 0
+
+That is, the MSByte (7) of the CPU-usable u64 sits at memory offset 0, and the
+LSByte (0) of the u64 sits at memory offset 7.
+This corresponds to what most folks would regard to as "big endian", where
+bit i corresponds to the number 2^i. This is also referred to in the code
+comments as "logical" notation.
+
+
+2. If QUIRK_MSB_ON_THE_RIGHT is set, we do it like this:
+
+56 57 58 59 60 61 62 63 48 49 50 51 52 53 54 55 40 41 42 43 44 45 46 47 32 33 34 35 36 37 38 39
+7 6 5 4
+24 25 26 27 28 29 30 31 16 17 18 19 20 21 22 23 8 9 10 11 12 13 14 15 0 1 2 3 4 5 6 7
+3 2 1 0
+
+That is, QUIRK_MSB_ON_THE_RIGHT does not affect byte positioning, but
+inverts bit offsets inside a byte.
+
+
+3. If QUIRK_LITTLE_ENDIAN is set, we do it like this:
+
+39 38 37 36 35 34 33 32 47 46 45 44 43 42 41 40 55 54 53 52 51 50 49 48 63 62 61 60 59 58 57 56
+4 5 6 7
+7 6 5 4 3 2 1 0 15 14 13 12 11 10 9 8 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
+0 1 2 3
+
+Therefore, QUIRK_LITTLE_ENDIAN means that inside the memory region, every
+byte from each 4-byte word is placed at its mirrored position compared to
+the boundary of that word.
+
+4. If QUIRK_MSB_ON_THE_RIGHT and QUIRK_LITTLE_ENDIAN are both set, we do it
+ like this:
+
+32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
+4 5 6 7
+0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+0 1 2 3
+
+
+5. If just QUIRK_LSW32_IS_FIRST is set, we do it like this:
+
+31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+3 2 1 0
+63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32
+7 6 5 4
+
+In this case the 8 byte memory region is interpreted as follows: first
+4 bytes correspond to the least significant 4-byte word, next 4 bytes to
+the more significant 4-byte word.
+
+
+6. If QUIRK_LSW32_IS_FIRST and QUIRK_MSB_ON_THE_RIGHT are set, we do it like
+ this:
+
+24 25 26 27 28 29 30 31 16 17 18 19 20 21 22 23 8 9 10 11 12 13 14 15 0 1 2 3 4 5 6 7
+3 2 1 0
+56 57 58 59 60 61 62 63 48 49 50 51 52 53 54 55 40 41 42 43 44 45 46 47 32 33 34 35 36 37 38 39
+7 6 5 4
+
+
+7. If QUIRK_LSW32_IS_FIRST and QUIRK_LITTLE_ENDIAN are set, it looks like
+ this:
+
+7 6 5 4 3 2 1 0 15 14 13 12 11 10 9 8 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
+0 1 2 3
+39 38 37 36 35 34 33 32 47 46 45 44 43 42 41 40 55 54 53 52 51 50 49 48 63 62 61 60 59 58 57 56
+4 5 6 7
+
+
+8. If QUIRK_LSW32_IS_FIRST, QUIRK_LITTLE_ENDIAN and QUIRK_MSB_ON_THE_RIGHT
+ are set, it looks like this:
+
+0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+0 1 2 3
+32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
+4 5 6 7
+
+
+We always think of our offsets as if there were no quirk, and we translate
+them afterwards, before accessing the memory region.
+
+Intended use
+------------
+
+Drivers that opt to use this API first need to identify which of the above 3
+quirk combinations (for a total of 8) match what the hardware documentation
+describes. Then they should wrap the packing() function, creating a new
+xxx_packing() that calls it using the proper QUIRK_* one-hot bits set.
+
+The packing() function returns an int-encoded error code, which protects the
+programmer against incorrect API use. The errors are not expected to occur
+durring runtime, therefore it is reasonable for xxx_packing() to return void
+and simply swallow those errors. Optionally it can dump stack or print the
+error description.
diff --git a/Documentation/powerpc/DAWR-POWER9.txt b/Documentation/powerpc/DAWR-POWER9.txt
index 2feaa6619658..ecdbb076438c 100644
--- a/Documentation/powerpc/DAWR-POWER9.txt
+++ b/Documentation/powerpc/DAWR-POWER9.txt
@@ -1,10 +1,10 @@
DAWR issues on POWER9
============================
-On POWER9 the DAWR can cause a checkstop if it points to cache
-inhibited (CI) memory. Currently Linux has no way to disinguish CI
-memory when configuring the DAWR, so (for now) the DAWR is disabled by
-this commit:
+On POWER9 the Data Address Watchpoint Register (DAWR) can cause a checkstop
+if it points to cache inhibited (CI) memory. Currently Linux has no way to
+disinguish CI memory when configuring the DAWR, so (for now) the DAWR is
+disabled by this commit:
commit 9654153158d3e0684a1bdb76dbababdb7111d5a0
Author: Michael Neuling <mikey@neuling.org>
@@ -56,3 +56,35 @@ POWER9. Loads and stores to the watchpoint locations will not be
trapped in GDB. The watchpoint is remembered, so if the guest is
migrated back to the POWER8 host, it will start working again.
+Force enabling the DAWR
+=============================
+Kernels (since ~v5.2) have an option to force enable the DAWR via:
+
+ echo Y > /sys/kernel/debug/powerpc/dawr_enable_dangerous
+
+This enables the DAWR even on POWER9.
+
+This is a dangerous setting, USE AT YOUR OWN RISK.
+
+Some users may not care about a bad user crashing their box
+(ie. single user/desktop systems) and really want the DAWR. This
+allows them to force enable DAWR.
+
+This flag can also be used to disable DAWR access. Once this is
+cleared, all DAWR access should be cleared immediately and your
+machine once again safe from crashing.
+
+Userspace may get confused by toggling this. If DAWR is force
+enabled/disabled between getting the number of breakpoints (via
+PTRACE_GETHWDBGINFO) and setting the breakpoint, userspace will get an
+inconsistent view of what's available. Similarly for guests.
+
+For the DAWR to be enabled in a KVM guest, the DAWR needs to be force
+enabled in the host AND the guest. For this reason, this won't work on
+POWERVM as it doesn't allow the HCALL to work. Writes of 'Y' to the
+dawr_enable_dangerous file will fail if the hypervisor doesn't support
+writing the DAWR.
+
+To double check the DAWR is working, run this kernel selftest:
+ tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c
+Any errors/failures/skips mean something is wrong.
diff --git a/Documentation/preempt-locking.txt b/Documentation/preempt-locking.txt
index 509f5a422d57..dce336134e54 100644
--- a/Documentation/preempt-locking.txt
+++ b/Documentation/preempt-locking.txt
@@ -52,7 +52,6 @@ preemption must be disabled around such regions.
Note, some FPU functions are already explicitly preempt safe. For example,
kernel_fpu_begin and kernel_fpu_end will disable and enable preemption.
-However, fpu__restore() must be called with preemption disabled.
RULE #3: Lock acquire and release must be performed by same task
diff --git a/Documentation/process/5.Posting.rst b/Documentation/process/5.Posting.rst
index 4213e580f273..855a70b80269 100644
--- a/Documentation/process/5.Posting.rst
+++ b/Documentation/process/5.Posting.rst
@@ -216,10 +216,12 @@ The tags in common use are:
which can be found in :ref:`Documentation/process/submitting-patches.rst <submittingpatches>`
Code without a proper signoff cannot be merged into the mainline.
- - Co-developed-by: states that the patch was also created by another developer
- along with the original author. This is useful at times when multiple
- people work on a single patch. Note, this person also needs to have a
- Signed-off-by: line in the patch as well.
+ - Co-developed-by: states that the patch was co-created by several developers;
+ it is a used to give attribution to co-authors (in addition to the author
+ attributed by the From: tag) when multiple people work on a single patch.
+ Every Co-developed-by: must be immediately followed by a Signed-off-by: of
+ the associated co-author. Details and examples can be found in
+ :ref:`Documentation/process/submitting-patches.rst <submittingpatches>`.
- Acked-by: indicates an agreement by another developer (often a
maintainer of the relevant code) that the patch is appropriate for
diff --git a/Documentation/process/coding-style.rst b/Documentation/process/coding-style.rst
index 8ea913e99fa1..fa864a51e6ea 100644
--- a/Documentation/process/coding-style.rst
+++ b/Documentation/process/coding-style.rst
@@ -843,7 +843,8 @@ used.
The kernel provides the following general purpose memory allocators:
kmalloc(), kzalloc(), kmalloc_array(), kcalloc(), vmalloc(), and
vzalloc(). Please refer to the API documentation for further information
-about them.
+about them. :ref:`Documentation/core-api/memory-allocation.rst
+<memory_allocation>`
The preferred form for passing a size of a struct is the following:
@@ -874,6 +875,9 @@ The preferred form for allocating a zeroed array is the following:
Both forms check for overflow on the allocation size n * sizeof(...),
and return NULL if that occurred.
+These generic allocation functions all emit a stack dump on failure when used
+without __GFP_NOWARN so there is no use in emitting an additional failure
+message when NULL is returned.
15) The inline disease
----------------------
diff --git a/Documentation/process/deprecated.rst b/Documentation/process/deprecated.rst
index 0ef5a63c06ba..49e0f64a3427 100644
--- a/Documentation/process/deprecated.rst
+++ b/Documentation/process/deprecated.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: GPL-2.0
+.. _deprecated:
+
=====================================================================
Deprecated Interfaces, Language Features, Attributes, and Conventions
=====================================================================
diff --git a/Documentation/process/howto.rst b/Documentation/process/howto.rst
index ad2b6c852b95..6ab75c11d2c3 100644
--- a/Documentation/process/howto.rst
+++ b/Documentation/process/howto.rst
@@ -62,7 +62,7 @@ Legal Issues
The Linux kernel source code is released under the GPL. Please see the file
COPYING in the main directory of the source tree. The Linux kernel licensing
rules and how to use `SPDX <https://spdx.org/>`_ identifiers in source code are
-descibed in :ref:`Documentation/process/license-rules.rst <kernel_licensing>`.
+described in :ref:`Documentation/process/license-rules.rst <kernel_licensing>`.
If you have further questions about the license, please contact a lawyer, and do
not ask on the Linux kernel mailing list. The people on the mailing lists are
not lawyers, and you should not rely on their statements on legal matters.
diff --git a/Documentation/process/kernel-docs.rst b/Documentation/process/kernel-docs.rst
index ab12dddc773e..7a45a8e36ea7 100644
--- a/Documentation/process/kernel-docs.rst
+++ b/Documentation/process/kernel-docs.rst
@@ -95,18 +95,6 @@ On-line docs
[...]. This paper examines some common problems for
submitting larger changes and some strategies to avoid problems.
- * Title: **Overview of the Virtual File System**
-
- :Author: Richard Gooch.
- :URL: http://www.mjmwired.net/kernel/Documentation/filesystems/vfs.txt
- :Date: 2007
- :Keywords: VFS, File System, mounting filesystems, opening files,
- dentries, dcache.
- :Description: Brief introduction to the Linux Virtual File System.
- What is it, how it works, operations taken when opening a file or
- mounting a file system and description of important data
- structures explaining the purpose of each of their entries.
-
* Title: **Linux Device Drivers, Third Edition**
:Author: Jonathan Corbet, Alessandro Rubini, Greg Kroah-Hartman
diff --git a/Documentation/process/license-rules.rst b/Documentation/process/license-rules.rst
index 6b09033a8e9e..2ef44ada3f11 100644
--- a/Documentation/process/license-rules.rst
+++ b/Documentation/process/license-rules.rst
@@ -234,13 +234,13 @@ kernel, can be broken down into:
|
-2. Not recommended licenses:
+2. Deprecated licenses:
These licenses should only be used for existing code or for importing
code from a different project. These licenses are available from the
directory::
- LICENSES/other/
+ LICENSES/deprecated/
in the kernel source tree.
@@ -250,14 +250,14 @@ kernel, can be broken down into:
Examples::
- LICENSES/other/ISC
+ LICENSES/deprecated/ISC
Contains the Internet Systems Consortium license text and the required
metatags::
- LICENSES/other/ZLib
+ LICENSES/deprecated/GPL-1.0
- Contains the ZLIB license text and the required metatags.
+ Contains the GPL version 1 license text and the required metatags.
Metatags:
@@ -281,7 +281,56 @@ kernel, can be broken down into:
|
-3. _`Exceptions`:
+3. Dual Licensing Only
+
+ These licenses should only be used to dual license code with another
+ license in addition to a preferred license. These licenses are available
+ from the directory::
+
+ LICENSES/dual/
+
+ in the kernel source tree.
+
+ The files in this directory contain the full license text and
+ `Metatags`_. The file names are identical to the SPDX license
+ identifier which shall be used for the license in source files.
+
+ Examples::
+
+ LICENSES/dual/MPL-1.1
+
+ Contains the Mozilla Public License version 1.1 license text and the
+ required metatags::
+
+ LICENSES/dual/Apache-2.0
+
+ Contains the Apache License version 2.0 license text and the required
+ metatags.
+
+ Metatags:
+
+ The metatag requirements for 'other' licenses are identical to the
+ requirements of the `Preferred licenses`_.
+
+ File format example::
+
+ Valid-License-Identifier: MPL-1.1
+ SPDX-URL: https://spdx.org/licenses/MPL-1.1.html
+ Usage-Guide:
+ Do NOT use. The MPL-1.1 is not GPL2 compatible. It may only be used for
+ dual-licensed files where the other license is GPL2 compatible.
+ If you end up using this it MUST be used together with a GPL2 compatible
+ license using "OR".
+ To use the Mozilla Public License version 1.1 put the following SPDX
+ tag/value pair into a comment according to the placement guidelines in
+ the licensing rules documentation:
+ SPDX-License-Identifier: MPL-1.1
+ License-Text:
+ Full license text
+
+|
+
+4. _`Exceptions`:
Some licenses can be amended with exceptions which grant certain rights
which the original license does not. These exceptions are available
diff --git a/Documentation/process/maintainer-pgp-guide.rst b/Documentation/process/maintainer-pgp-guide.rst
index aff9b1a4d77b..4bab7464ff8c 100644
--- a/Documentation/process/maintainer-pgp-guide.rst
+++ b/Documentation/process/maintainer-pgp-guide.rst
@@ -943,7 +943,7 @@ have on your keyring::
Next, open the `PGP pathfinder`_. In the "From" field, paste the key
fingerprint of Linus Torvalds from the output above. In the "To" field,
-paste they key-id you found via ``gpg --search`` of the unknown key, and
+paste the key-id you found via ``gpg --search`` of the unknown key, and
check the results:
- `Finding paths to Linus`_
diff --git a/Documentation/process/submit-checklist.rst b/Documentation/process/submit-checklist.rst
index 367353c54949..c88867b173d9 100644
--- a/Documentation/process/submit-checklist.rst
+++ b/Documentation/process/submit-checklist.rst
@@ -72,47 +72,44 @@ and elsewhere regarding submitting Linux kernel patches.
13) Has been build- and runtime tested with and without ``CONFIG_SMP`` and
``CONFIG_PREEMPT.``
-14) If the patch affects IO/Disk, etc: has been tested with and without
- ``CONFIG_LBDAF.``
+16) All codepaths have been exercised with all lockdep features enabled.
-15) All codepaths have been exercised with all lockdep features enabled.
+17) All new ``/proc`` entries are documented under ``Documentation/``
-16) All new ``/proc`` entries are documented under ``Documentation/``
-
-17) All new kernel boot parameters are documented in
+18) All new kernel boot parameters are documented in
``Documentation/admin-guide/kernel-parameters.rst``.
-18) All new module parameters are documented with ``MODULE_PARM_DESC()``
+19) All new module parameters are documented with ``MODULE_PARM_DESC()``
-19) All new userspace interfaces are documented in ``Documentation/ABI/``.
+20) All new userspace interfaces are documented in ``Documentation/ABI/``.
See ``Documentation/ABI/README`` for more information.
Patches that change userspace interfaces should be CCed to
linux-api@vger.kernel.org.
-20) Check that it all passes ``make headers_check``.
+21) Check that it all passes ``make headers_check``.
-21) Has been checked with injection of at least slab and page-allocation
+22) Has been checked with injection of at least slab and page-allocation
failures. See ``Documentation/fault-injection/``.
If the new code is substantial, addition of subsystem-specific fault
injection might be appropriate.
-22) Newly-added code has been compiled with ``gcc -W`` (use
+23) Newly-added code has been compiled with ``gcc -W`` (use
``make EXTRA_CFLAGS=-W``). This will generate lots of noise, but is good
for finding bugs like "warning: comparison between signed and unsigned".
-23) Tested after it has been merged into the -mm patchset to make sure
+24) Tested after it has been merged into the -mm patchset to make sure
that it still works with all of the other queued patches and various
changes in the VM, VFS, and other subsystems.
-24) All memory barriers {e.g., ``barrier()``, ``rmb()``, ``wmb()``} need a
+25) All memory barriers {e.g., ``barrier()``, ``rmb()``, ``wmb()``} need a
comment in the source code that explains the logic of what they are doing
and why.
-25) If any ioctl's are added by the patch, then also update
+26) If any ioctl's are added by the patch, then also update
``Documentation/ioctl/ioctl-number.txt``.
-26) If your modified source code depends on or uses any of the kernel
+27) If your modified source code depends on or uses any of the kernel
APIs or features that are related to the following ``Kconfig`` symbols,
then test multiple builds with the related ``Kconfig`` symbols disabled
and/or ``=m`` (if that option is available) [not all of these at the
diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
index be7d1829c3af..9c4299293c72 100644
--- a/Documentation/process/submitting-patches.rst
+++ b/Documentation/process/submitting-patches.rst
@@ -60,8 +60,8 @@ not in any lower subdirectory.
To create a patch for a single file, it is often sufficient to do::
- SRCTREE= linux
- MYFILE= drivers/net/mydriver.c
+ SRCTREE=linux
+ MYFILE=drivers/net/mydriver.c
cd $SRCTREE
cp $MYFILE $MYFILE.orig
@@ -73,7 +73,7 @@ To create a patch for multiple files, you should unpack a "vanilla",
or unmodified kernel source tree, and generate a ``diff`` against your
own source tree. For example::
- MYSRC= /devel/linux
+ MYSRC=/devel/linux
tar xvfz linux-3.19.tar.gz
mv linux-3.19 linux-3.19-vanilla
@@ -545,10 +545,40 @@ person it names - but it should indicate that this person was copied on the
patch. This tag documents that potentially interested parties
have been included in the discussion.
-A Co-developed-by: states that the patch was also created by another developer
-along with the original author. This is useful at times when multiple people
-work on a single patch. Note, this person also needs to have a Signed-off-by:
-line in the patch as well.
+Co-developed-by: states that the patch was co-created by multiple developers;
+it is a used to give attribution to co-authors (in addition to the author
+attributed by the From: tag) when several people work on a single patch. Since
+Co-developed-by: denotes authorship, every Co-developed-by: must be immediately
+followed by a Signed-off-by: of the associated co-author. Standard sign-off
+procedure applies, i.e. the ordering of Signed-off-by: tags should reflect the
+chronological history of the patch insofar as possible, regardless of whether
+the author is attributed via From: or Co-developed-by:. Notably, the last
+Signed-off-by: must always be that of the developer submitting the patch.
+
+Note, the From: tag is optional when the From: author is also the person (and
+email) listed in the From: line of the email header.
+
+Example of a patch submitted by the From: author::
+
+ <changelog>
+
+ Co-developed-by: First Co-Author <first@coauthor.example.org>
+ Signed-off-by: First Co-Author <first@coauthor.example.org>
+ Co-developed-by: Second Co-Author <second@coauthor.example.org>
+ Signed-off-by: Second Co-Author <second@coauthor.example.org>
+ Signed-off-by: From Author <from@author.example.org>
+
+Example of a patch submitted by a Co-developed-by: author::
+
+ From: From Author <from@author.example.org>
+
+ <changelog>
+
+ Co-developed-by: Random Co-Author <random@coauthor.example.org>
+ Signed-off-by: Random Co-Author <random@coauthor.example.org>
+ Signed-off-by: From Author <from@author.example.org>
+ Co-developed-by: Submitting Co-Author <sub@coauthor.example.org>
+ Signed-off-by: Submitting Co-Author <sub@coauthor.example.org>
13) Using Reported-by:, Tested-by:, Reviewed-by:, Suggested-by: and Fixes:
@@ -696,7 +726,7 @@ A couple of example Subjects::
The ``from`` line must be the very first line in the message body,
and has the form:
- From: Original Author <author@example.com>
+ From: Patch Author <author@example.com>
The ``from`` line specifies who will be credited as the author of the
patch in the permanent changelog. If the ``from`` line is missing,
diff --git a/Documentation/robust-futexes.txt b/Documentation/robust-futexes.txt
index 6c42c75103eb..6361fb01c9c1 100644
--- a/Documentation/robust-futexes.txt
+++ b/Documentation/robust-futexes.txt
@@ -218,5 +218,4 @@ All other architectures should build just fine too - but they won't have
the new syscalls yet.
Architectures need to implement the new futex_atomic_cmpxchg_inatomic()
-inline function before writing up the syscalls (that function returns
--ENOSYS right now).
+inline function before writing up the syscalls.
diff --git a/Documentation/rtc.txt b/Documentation/rtc.txt
index a129acf38537..688c95b11919 100644
--- a/Documentation/rtc.txt
+++ b/Documentation/rtc.txt
@@ -136,5 +136,5 @@ a high functionality RTC is integrated into the SOC. That system might read
the system clock from the discrete RTC, but use the integrated one for all
other tasks, because of its greater functionality.
-Check out tools/testing/selftests/timers/rtctest.c for an example usage of the
+Check out tools/testing/selftests/rtc/rtctest.c for an example usage of the
ioctl interface.
diff --git a/Documentation/serial/README.cycladesZ b/Documentation/serial/cyclades_z.rst
index 024a69443cc2..532ff67e2f1c 100644
--- a/Documentation/serial/README.cycladesZ
+++ b/Documentation/serial/cyclades_z.rst
@@ -1,8 +1,11 @@
+================
+Cyclades-Z notes
+================
The Cyclades-Z must have firmware loaded onto the card before it will
operate. This operation should be performed during system startup,
The firmware, loader program and the latest device driver code are
available from Cyclades at
- ftp://ftp.cyclades.com/pub/cyclades/cyclades-z/linux/
+ ftp://ftp.cyclades.com/pub/cyclades/cyclades-z/linux/
diff --git a/Documentation/serial/driver b/Documentation/serial/driver.rst
index 86e47c19a924..4537119bf624 100644
--- a/Documentation/serial/driver
+++ b/Documentation/serial/driver.rst
@@ -1,6 +1,6 @@
-
- Low Level Serial API
- --------------------
+====================
+Low Level Serial API
+====================
This document is meant as a brief overview of some aspects of the new serial
@@ -44,7 +44,7 @@ are described in the uart_ops listing below.)
There are two locks. A per-port spinlock, and an overall semaphore.
From the core driver perspective, the port->lock locks the following
-data:
+data::
port->mctrl
port->icount
@@ -75,41 +75,51 @@ hardware.
return TIOCSER_TEMT.
Locking: none.
+
Interrupts: caller dependent.
+
This call must not sleep
set_mctrl(port, mctrl)
This function sets the modem control lines for port described
by 'port' to the state described by mctrl. The relevant bits
of mctrl are:
+
- TIOCM_RTS RTS signal.
- TIOCM_DTR DTR signal.
- TIOCM_OUT1 OUT1 signal.
- TIOCM_OUT2 OUT2 signal.
- TIOCM_LOOP Set the port into loopback mode.
+
If the appropriate bit is set, the signal should be driven
active. If the bit is clear, the signal should be driven
inactive.
Locking: port->lock taken.
+
Interrupts: locally disabled.
+
This call must not sleep
get_mctrl(port)
Returns the current state of modem control inputs. The state
of the outputs should not be returned, since the core keeps
track of their state. The state information should include:
+
- TIOCM_CAR state of DCD signal
- TIOCM_CTS state of CTS signal
- TIOCM_DSR state of DSR signal
- TIOCM_RI state of RI signal
+
The bit is set if the signal is currently driven active. If
the port does not support CTS, DCD or DSR, the driver should
indicate that the signal is permanently active. If RI is
not available, the signal should not be indicated as active.
Locking: port->lock taken.
+
Interrupts: locally disabled.
+
This call must not sleep
stop_tx(port)
@@ -121,14 +131,18 @@ hardware.
possible.
Locking: port->lock taken.
+
Interrupts: locally disabled.
+
This call must not sleep
start_tx(port)
Start transmitting characters.
Locking: port->lock taken.
+
Interrupts: locally disabled.
+
This call must not sleep
throttle(port)
@@ -138,16 +152,17 @@ hardware.
This will be called only if hardware assisted flow control is enabled.
Locking: serialized with .unthrottle() and termios modification by the
- tty layer.
+ tty layer.
unthrottle(port)
Notify the serial driver that characters can now be sent to the serial
port without fear of overrunning the input buffers of the line
disciplines.
+
This will be called only if hardware assisted flow control is enabled.
Locking: serialized with .throttle() and termios modification by the
- tty layer.
+ tty layer.
send_xchar(port,ch)
Transmit a high priority character, even if the port is stopped.
@@ -159,6 +174,7 @@ hardware.
Do not transmit if ch == '\0' (__DISABLED_CHAR).
Locking: none.
+
Interrupts: caller dependent.
stop_rx(port)
@@ -166,7 +182,9 @@ hardware.
being closed.
Locking: port->lock taken.
+
Interrupts: locally disabled.
+
This call must not sleep
enable_ms(port)
@@ -177,7 +195,9 @@ hardware.
called.
Locking: port->lock taken.
+
Interrupts: locally disabled.
+
This call must not sleep
break_ctl(port,ctl)
@@ -196,6 +216,7 @@ hardware.
This method will only be called when the port is initially opened.
Locking: port_sem taken.
+
Interrupts: globally disabled.
shutdown(port)
@@ -210,6 +231,7 @@ hardware.
this port.
Locking: port_sem taken.
+
Interrupts: caller dependent.
flush_buffer(port)
@@ -220,7 +242,9 @@ hardware.
buffer is cleared.
Locking: port->lock taken.
+
Interrupts: locally disabled.
+
This call must not sleep
set_termios(port,termios,oldtermios)
@@ -228,29 +252,46 @@ hardware.
bits. Update read_status_mask and ignore_status_mask to indicate
the types of events we are interested in receiving. Relevant
termios->c_cflag bits are:
- CSIZE - word size
- CSTOPB - 2 stop bits
- PARENB - parity enable
- PARODD - odd parity (when PARENB is in force)
- CREAD - enable reception of characters (if not set,
+
+ CSIZE
+ - word size
+ CSTOPB
+ - 2 stop bits
+ PARENB
+ - parity enable
+ PARODD
+ - odd parity (when PARENB is in force)
+ CREAD
+ - enable reception of characters (if not set,
still receive characters from the port, but
throw them away.
- CRTSCTS - if set, enable CTS status change reporting
- CLOCAL - if not set, enable modem status change
+ CRTSCTS
+ - if set, enable CTS status change reporting
+ CLOCAL
+ - if not set, enable modem status change
reporting.
+
Relevant termios->c_iflag bits are:
- INPCK - enable frame and parity error events to be
+
+ INPCK
+ - enable frame and parity error events to be
passed to the TTY layer.
- BRKINT
- PARMRK - both of these enable break events to be
+ BRKINT / PARMRK
+ - both of these enable break events to be
passed to the TTY layer.
- IGNPAR - ignore parity and framing errors
- IGNBRK - ignore break errors, If IGNPAR is also
+ IGNPAR
+ - ignore parity and framing errors
+ IGNBRK
+ - ignore break errors, If IGNPAR is also
set, ignore overrun errors as well.
+
The interaction of the iflag bits is as follows (parity error
given as an example):
+
+ =============== ======= ====== =============================
Parity error INPCK IGNPAR
+ =============== ======= ====== =============================
n/a 0 n/a character received, marked as
TTY_NORMAL
None 1 n/a character received, marked as
@@ -258,16 +299,19 @@ hardware.
Yes 1 0 character received, marked as
TTY_PARITY
Yes 1 1 character discarded
+ =============== ======= ====== =============================
Other flags may be used (eg, xon/xoff characters) if your
hardware supports hardware "soft" flow control.
Locking: caller holds tty_port->mutex
+
Interrupts: caller dependent.
+
This call must not sleep
set_ldisc(port,termios)
- Notifier for discipline change. See Documentation/serial/tty.txt.
+ Notifier for discipline change. See Documentation/serial/tty.rst.
Locking: caller holds tty_port->mutex
@@ -283,6 +327,7 @@ hardware.
will occur even if CONFIG_PM is not set.
Locking: none.
+
Interrupts: caller dependent.
type(port)
@@ -291,6 +336,7 @@ hardware.
substituted.
Locking: none.
+
Interrupts: caller dependent.
release_port(port)
@@ -298,6 +344,7 @@ hardware.
the port.
Locking: none.
+
Interrupts: caller dependent.
request_port(port)
@@ -306,6 +353,7 @@ hardware.
returns, and it should return -EBUSY on failure.
Locking: none.
+
Interrupts: caller dependent.
config_port(port,type)
@@ -321,6 +369,7 @@ hardware.
internally hard wired (eg, system on a chip implementations).
Locking: none.
+
Interrupts: caller dependent.
verify_port(port,serinfo)
@@ -328,6 +377,7 @@ hardware.
suitable for this port type.
Locking: none.
+
Interrupts: caller dependent.
ioctl(port,cmd,arg)
@@ -335,6 +385,7 @@ hardware.
using the standard numbering system found in <asm/ioctl.h>
Locking: none.
+
Interrupts: caller dependent.
poll_init(port)
@@ -343,6 +394,7 @@ hardware.
this should not request interrupts.
Locking: tty_mutex and tty_port->mutex taken.
+
Interrupts: n/a.
poll_put_char(port,ch)
@@ -350,7 +402,9 @@ hardware.
port. It can and should block until there is space in the TX FIFO.
Locking: none.
+
Interrupts: caller dependent.
+
This call must not sleep
poll_get_char(port)
@@ -359,7 +413,9 @@ hardware.
the function should return NO_POLL_CHAR immediately.
Locking: none.
+
Interrupts: caller dependent.
+
This call must not sleep
Other functions
@@ -370,6 +426,7 @@ uart_update_timeout(port,cflag,baud)
number of bits, parity, stop bits and baud rate.
Locking: caller is expected to take port->lock
+
Interrupts: n/a
uart_get_baud_rate(port,termios,old,min,max)
@@ -385,6 +442,7 @@ uart_get_baud_rate(port,termios,old,min,max)
Note: min..max must always allow 9600 baud to be selected.
Locking: caller dependent.
+
Interrupts: n/a
uart_get_divisor(port,baud)
@@ -395,6 +453,7 @@ uart_get_divisor(port,baud)
custom divisor instead.
Locking: caller dependent.
+
Interrupts: n/a
uart_match_port(port1,port2)
@@ -402,6 +461,7 @@ uart_match_port(port1,port2)
uart_port structures describe the same port.
Locking: n/a
+
Interrupts: n/a
uart_write_wakeup(port)
@@ -409,6 +469,7 @@ uart_write_wakeup(port)
characters in the transmit buffer have dropped below a threshold.
Locking: port->lock should be held.
+
Interrupts: n/a
uart_register_driver(drv)
@@ -419,6 +480,7 @@ uart_register_driver(drv)
registered using uart_add_one_port after this call has succeeded.
Locking: none
+
Interrupts: enabled
uart_unregister_driver()
@@ -427,15 +489,16 @@ uart_unregister_driver()
uart_remove_one_port() if it registered them with uart_add_one_port().
Locking: none
+
Interrupts: enabled
-uart_suspend_port()
+**uart_suspend_port()**
-uart_resume_port()
+**uart_resume_port()**
-uart_add_one_port()
+**uart_add_one_port()**
-uart_remove_one_port()
+**uart_remove_one_port()**
Other notes
-----------
@@ -444,7 +507,7 @@ It is intended some day to drop the 'unused' entries from uart_port, and
allow low level drivers to register their own individual uart_port's with
the core. This will allow drivers to use uart_port as a pointer to a
structure containing both the uart_port entry with their own extensions,
-thus:
+thus::
struct my_port {
struct uart_port port;
@@ -459,14 +522,14 @@ Some helpers are provided in order to set/get modem control lines via GPIO.
mctrl_gpio_init(port, idx):
This will get the {cts,rts,...}-gpios from device tree if they are
present and request them, set direction etc, and return an
- allocated structure. devm_* functions are used, so there's no need
+ allocated structure. `devm_*` functions are used, so there's no need
to call mctrl_gpio_free().
As this sets up the irq handling make sure to not handle changes to the
gpio input lines in your driver, too.
mctrl_gpio_free(dev, gpios):
This will free the requested gpios in mctrl_gpio_init().
- As devm_* functions are used, there's generally no need to call
+ As `devm_*` functions are used, there's generally no need to call
this function.
mctrl_gpio_to_gpiod(gpios, gidx)
diff --git a/Documentation/serial/index.rst b/Documentation/serial/index.rst
new file mode 100644
index 000000000000..d0ba22ea23bf
--- /dev/null
+++ b/Documentation/serial/index.rst
@@ -0,0 +1,32 @@
+:orphan:
+
+==========================
+Support for Serial devices
+==========================
+
+.. toctree::
+ :maxdepth: 1
+
+
+ driver
+ tty
+
+Serial drivers
+==============
+
+.. toctree::
+ :maxdepth: 1
+
+ cyclades_z
+ moxa-smartio
+ n_gsm
+ rocket
+ serial-iso7816
+ serial-rs485
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/serial/moxa-smartio b/Documentation/serial/moxa-smartio
deleted file mode 100644
index 5d2a33be0bd8..000000000000
--- a/Documentation/serial/moxa-smartio
+++ /dev/null
@@ -1,523 +0,0 @@
-=============================================================================
- MOXA Smartio/Industio Family Device Driver Installation Guide
- for Linux Kernel 2.4.x, 2.6.x
- Copyright (C) 2008, Moxa Inc.
-=============================================================================
-Date: 01/21/2008
-
-Content
-
-1. Introduction
-2. System Requirement
-3. Installation
- 3.1 Hardware installation
- 3.2 Driver files
- 3.3 Device naming convention
- 3.4 Module driver configuration
- 3.5 Static driver configuration for Linux kernel 2.4.x and 2.6.x.
- 3.6 Custom configuration
- 3.7 Verify driver installation
-4. Utilities
-5. Setserial
-6. Troubleshooting
-
------------------------------------------------------------------------------
-1. Introduction
-
- The Smartio/Industio/UPCI family Linux driver supports following multiport
- boards.
-
- - 2 ports multiport board
- CP-102U, CP-102UL, CP-102UF
- CP-132U-I, CP-132UL,
- CP-132, CP-132I, CP132S, CP-132IS,
- CI-132, CI-132I, CI-132IS,
- (C102H, C102HI, C102HIS, C102P, CP-102, CP-102S)
-
- - 4 ports multiport board
- CP-104EL,
- CP-104UL, CP-104JU,
- CP-134U, CP-134U-I,
- C104H/PCI, C104HS/PCI,
- CP-114, CP-114I, CP-114S, CP-114IS, CP-114UL,
- C104H, C104HS,
- CI-104J, CI-104JS,
- CI-134, CI-134I, CI-134IS,
- (C114HI, CT-114I, C104P)
- POS-104UL,
- CB-114,
- CB-134I
-
- - 8 ports multiport board
- CP-118EL, CP-168EL,
- CP-118U, CP-168U,
- C168H/PCI,
- C168H, C168HS,
- (C168P),
- CB-108
-
- This driver and installation procedure have been developed upon Linux Kernel
- 2.4.x and 2.6.x. This driver supports Intel x86 hardware platform. In order
- to maintain compatibility, this version has also been properly tested with
- RedHat, Mandrake, Fedora and S.u.S.E Linux. However, if compatibility problem
- occurs, please contact Moxa at support@moxa.com.tw.
-
- In addition to device driver, useful utilities are also provided in this
- version. They are
- - msdiag Diagnostic program for displaying installed Moxa
- Smartio/Industio boards.
- - msmon Monitor program to observe data count and line status signals.
- - msterm A simple terminal program which is useful in testing serial
- ports.
- - io-irq.exe Configuration program to setup ISA boards. Please note that
- this program can only be executed under DOS.
-
- All the drivers and utilities are published in form of source code under
- GNU General Public License in this version. Please refer to GNU General
- Public License announcement in each source code file for more detail.
-
- In Moxa's Web sites, you may always find latest driver at http://www.moxa.com/.
-
- This version of driver can be installed as Loadable Module (Module driver)
- or built-in into kernel (Static driver). You may refer to following
- installation procedure for suitable one. Before you install the driver,
- please refer to hardware installation procedure in the User's Manual.
-
- We assume the user should be familiar with following documents.
- - Serial-HOWTO
- - Kernel-HOWTO
-
------------------------------------------------------------------------------
-2. System Requirement
- - Hardware platform: Intel x86 machine
- - Kernel version: 2.4.x or 2.6.x
- - gcc version 2.72 or later
- - Maximum 4 boards can be installed in combination
-
------------------------------------------------------------------------------
-3. Installation
-
- 3.1 Hardware installation
- 3.2 Driver files
- 3.3 Device naming convention
- 3.4 Module driver configuration
- 3.5 Static driver configuration for Linux kernel 2.4.x, 2.6.x.
- 3.6 Custom configuration
- 3.7 Verify driver installation
-
-
- 3.1 Hardware installation
-
- There are two types of buses, ISA and PCI, for Smartio/Industio
- family multiport board.
-
- ISA board
- ---------
- You'll have to configure CAP address, I/O address, Interrupt Vector
- as well as IRQ before installing this driver. Please refer to hardware
- installation procedure in User's Manual before proceed any further.
- Please make sure the JP1 is open after the ISA board is set properly.
-
- PCI/UPCI board
- --------------
- You may need to adjust IRQ usage in BIOS to avoid from IRQ conflict
- with other ISA devices. Please refer to hardware installation
- procedure in User's Manual in advance.
-
- PCI IRQ Sharing
- -----------
- Each port within the same multiport board shares the same IRQ. Up to
- 4 Moxa Smartio/Industio PCI Family multiport boards can be installed
- together on one system and they can share the same IRQ.
-
-
- 3.2 Driver files
-
- The driver file may be obtained from ftp, CD-ROM or floppy disk. The
- first step, anyway, is to copy driver file "mxser.tgz" into specified
- directory. e.g. /moxa. The execute commands as below.
-
- # cd /
- # mkdir moxa
- # cd /moxa
- # tar xvf /dev/fd0
-
- or
-
- # cd /
- # mkdir moxa
- # cd /moxa
- # cp /mnt/cdrom/<driver directory>/mxser.tgz .
- # tar xvfz mxser.tgz
-
-
- 3.3 Device naming convention
-
- You may find all the driver and utilities files in /moxa/mxser.
- Following installation procedure depends on the model you'd like to
- run the driver. If you prefer module driver, please refer to 3.4.
- If static driver is required, please refer to 3.5.
-
- Dialin and callout port
- -----------------------
- This driver remains traditional serial device properties. There are
- two special file name for each serial port. One is dial-in port
- which is named "ttyMxx". For callout port, the naming convention
- is "cumxx".
-
- Device naming when more than 2 boards installed
- -----------------------------------------------
- Naming convention for each Smartio/Industio multiport board is
- pre-defined as below.
-
- Board Num. Dial-in Port Callout port
- 1st board ttyM0 - ttyM7 cum0 - cum7
- 2nd board ttyM8 - ttyM15 cum8 - cum15
- 3rd board ttyM16 - ttyM23 cum16 - cum23
- 4th board ttyM24 - ttym31 cum24 - cum31
-
-
- !!!!!!!!!!!!!!!!!!!! NOTE !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
- Under Kernel 2.6 the cum Device is Obsolete. So use ttyM*
- device instead.
- !!!!!!!!!!!!!!!!!!!! NOTE !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-
- Board sequence
- --------------
- This driver will activate ISA boards according to the parameter set
- in the driver. After all specified ISA board activated, PCI board
- will be installed in the system automatically driven.
- Therefore the board number is sorted by the CAP address of ISA boards.
- For PCI boards, their sequence will be after ISA boards and C168H/PCI
- has higher priority than C104H/PCI boards.
-
- 3.4 Module driver configuration
- Module driver is easiest way to install. If you prefer static driver
- installation, please skip this paragraph.
-
-
- ------------- Prepare to use the MOXA driver--------------------
- 3.4.1 Create tty device with correct major number
- Before using MOXA driver, your system must have the tty devices
- which are created with driver's major number. We offer one shell
- script "msmknod" to simplify the procedure.
- This step is only needed to be executed once. But you still
- need to do this procedure when:
- a. You change the driver's major number. Please refer the "3.7"
- section.
- b. Your total installed MOXA boards number is changed. Maybe you
- add/delete one MOXA board.
- c. You want to change the tty name. This needs to modify the
- shell script "msmknod"
-
- The procedure is:
- # cd /moxa/mxser/driver
- # ./msmknod
-
- This shell script will require the major number for dial-in
- device and callout device to create tty device. You also need
- to specify the total installed MOXA board number. Default major
- numbers for dial-in device and callout device are 30, 35. If
- you need to change to other number, please refer section "3.7"
- for more detailed procedure.
- Msmknod will delete any special files occupying the same device
- naming.
-
- 3.4.2 Build the MOXA driver and utilities
- Before using the MOXA driver and utilities, you need compile the
- all the source code. This step is only need to be executed once.
- But you still re-compile the source code if you modify the source
- code. For example, if you change the driver's major number (see
- "3.7" section), then you need to do this step again.
-
- Find "Makefile" in /moxa/mxser, then run
-
- # make clean; make install
-
- !!!!!!!!!! NOTE !!!!!!!!!!!!!!!!!
- For Red Hat 9, Red Hat Enterprise Linux AS3/ES3/WS3 & Fedora Core1:
- # make clean; make installsp1
-
- For Red Hat Enterprise Linux AS4/ES4/WS4:
- # make clean; make installsp2
- !!!!!!!!!! NOTE !!!!!!!!!!!!!!!!!
-
- The driver files "mxser.o" and utilities will be properly compiled
- and copied to system directories respectively.
-
- ------------- Load MOXA driver--------------------
- 3.4.3 Load the MOXA driver
-
- # modprobe mxser <argument>
-
- will activate the module driver. You may run "lsmod" to check
- if "mxser" is activated. If the MOXA board is ISA board, the
- <argument> is needed. Please refer to section "3.4.5" for more
- information.
-
-
- ------------- Load MOXA driver on boot --------------------
- 3.4.4 For the above description, you may manually execute
- "modprobe mxser" to activate this driver and run
- "rmmod mxser" to remove it.
- However, it's better to have a boot time configuration to
- eliminate manual operation. Boot time configuration can be
- achieved by rc file. We offer one "rc.mxser" file to simplify
- the procedure under "moxa/mxser/driver".
-
- But if you use ISA board, please modify the "modprobe ..." command
- to add the argument (see "3.4.5" section). After modifying the
- rc.mxser, please try to execute "/moxa/mxser/driver/rc.mxser"
- manually to make sure the modification is ok. If any error
- encountered, please try to modify again. If the modification is
- completed, follow the below step.
-
- Run following command for setting rc files.
-
- # cd /moxa/mxser/driver
- # cp ./rc.mxser /etc/rc.d
- # cd /etc/rc.d
-
- Check "rc.serial" is existed or not. If "rc.serial" doesn't exist,
- create it by vi, run "chmod 755 rc.serial" to change the permission.
- Add "/etc/rc.d/rc.mxser" in last line,
-
- Reboot and check if moxa.o activated by "lsmod" command.
-
- 3.4.5. If you'd like to drive Smartio/Industio ISA boards in the system,
- you'll have to add parameter to specify CAP address of given
- board while activating "mxser.o". The format for parameters are
- as follows.
-
- modprobe mxser ioaddr=0x???,0x???,0x???,0x???
- | | | |
- | | | +- 4th ISA board
- | | +------ 3rd ISA board
- | +------------ 2nd ISA board
- +------------------- 1st ISA board
-
- 3.5 Static driver configuration for Linux kernel 2.4.x and 2.6.x
-
- Note: To use static driver, you must install the linux kernel
- source package.
-
- 3.5.1 Backup the built-in driver in the kernel.
- # cd /usr/src/linux/drivers/char
- # mv mxser.c mxser.c.old
-
- For Red Hat 7.x user, you need to create link:
- # cd /usr/src
- # ln -s linux-2.4 linux
-
- 3.5.2 Create link
- # cd /usr/src/linux/drivers/char
- # ln -s /moxa/mxser/driver/mxser.c mxser.c
-
- 3.5.3 Add CAP address list for ISA boards. For PCI boards user,
- please skip this step.
-
- In module mode, the CAP address for ISA board is given by
- parameter. In static driver configuration, you'll have to
- assign it within driver's source code. If you will not
- install any ISA boards, you may skip to next portion.
- The instructions to modify driver source code are as
- below.
- a. # cd /moxa/mxser/driver
- # vi mxser.c
- b. Find the array mxserBoardCAP[] as below.
-
- static int mxserBoardCAP[]
- = {0x00, 0x00, 0x00, 0x00};
-
- c. Change the address within this array using vi. For
- example, to driver 2 ISA boards with CAP address
- 0x280 and 0x180 as 1st and 2nd board. Just to change
- the source code as follows.
-
- static int mxserBoardCAP[]
- = {0x280, 0x180, 0x00, 0x00};
-
- 3.5.4 Setup kernel configuration
-
- Configure the kernel:
-
- # cd /usr/src/linux
- # make menuconfig
-
- You will go into a menu-driven system. Please select [Character
- devices][Non-standard serial port support], enable the [Moxa
- SmartIO support] driver with "[*]" for built-in (not "[M]"), then
- select [Exit] to exit this program.
-
- 3.5.5 Rebuild kernel
- The following are for Linux kernel rebuilding, for your
- reference only.
- For appropriate details, please refer to the Linux document.
-
- a. cd /usr/src/linux
- b. make clean /* take a few minutes */
- c. make dep /* take a few minutes */
- d. make bzImage /* take probably 10-20 minutes */
- e. make install /* copy boot image to correct position */
- f. Please make sure the boot kernel (vmlinuz) is in the
- correct position.
- g. If you use 'lilo' utility, you should check /etc/lilo.conf
- 'image' item specified the path which is the 'vmlinuz' path,
- or you will load wrong (or old) boot kernel image (vmlinuz).
- After checking /etc/lilo.conf, please run "lilo".
-
- Note that if the result of "make bzImage" is ERROR, then you have to
- go back to Linux configuration Setup. Type "make menuconfig" in
- directory /usr/src/linux.
-
-
- 3.5.6 Make tty device and special file
- # cd /moxa/mxser/driver
- # ./msmknod
-
- 3.5.7 Make utility
- # cd /moxa/mxser/utility
- # make clean; make install
-
- 3.5.8 Reboot
-
-
-
- 3.6 Custom configuration
- Although this driver already provides you default configuration, you
- still can change the device name and major number. The instruction to
- change these parameters are shown as below.
-
- Change Device name
- ------------------
- If you'd like to use other device names instead of default naming
- convention, all you have to do is to modify the internal code
- within the shell script "msmknod". First, you have to open "msmknod"
- by vi. Locate each line contains "ttyM" and "cum" and change them
- to the device name you desired. "msmknod" creates the device names
- you need next time executed.
-
- Change Major number
- -------------------
- If major number 30 and 35 had been occupied, you may have to select
- 2 free major numbers for this driver. There are 3 steps to change
- major numbers.
-
- 3.6.1 Find free major numbers
- In /proc/devices, you may find all the major numbers occupied
- in the system. Please select 2 major numbers that are available.
- e.g. 40, 45.
- 3.6.2 Create special files
- Run /moxa/mxser/driver/msmknod to create special files with
- specified major numbers.
- 3.6.3 Modify driver with new major number
- Run vi to open /moxa/mxser/driver/mxser.c. Locate the line
- contains "MXSERMAJOR". Change the content as below.
- #define MXSERMAJOR 40
- #define MXSERCUMAJOR 45
- 3.6.4 Run "make clean; make install" in /moxa/mxser/driver.
-
- 3.7 Verify driver installation
- You may refer to /var/log/messages to check the latest status
- log reported by this driver whenever it's activated.
-
------------------------------------------------------------------------------
-4. Utilities
- There are 3 utilities contained in this driver. They are msdiag, msmon and
- msterm. These 3 utilities are released in form of source code. They should
- be compiled into executable file and copied into /usr/bin.
-
- Before using these utilities, please load driver (refer 3.4 & 3.5) and
- make sure you had run the "msmknod" utility.
-
- msdiag - Diagnostic
- --------------------
- This utility provides the function to display what Moxa Smartio/Industio
- board found by driver in the system.
-
- msmon - Port Monitoring
- -----------------------
- This utility gives the user a quick view about all the MOXA ports'
- activities. One can easily learn each port's total received/transmitted
- (Rx/Tx) character count since the time when the monitoring is started.
- Rx/Tx throughputs per second are also reported in interval basis (e.g.
- the last 5 seconds) and in average basis (since the time the monitoring
- is started). You can reset all ports' count by <HOME> key. <+> <->
- (plus/minus) keys to change the displaying time interval. Press <ENTER>
- on the port, that cursor stay, to view the port's communication
- parameters, signal status, and input/output queue.
-
- msterm - Terminal Emulation
- ---------------------------
- This utility provides data sending and receiving ability of all tty ports,
- especially for MOXA ports. It is quite useful for testing simple
- application, for example, sending AT command to a modem connected to the
- port or used as a terminal for login purpose. Note that this is only a
- dumb terminal emulation without handling full screen operation.
-
------------------------------------------------------------------------------
-5. Setserial
-
- Supported Setserial parameters are listed as below.
-
- uart set UART type(16450-->disable FIFO, 16550A-->enable FIFO)
- close_delay set the amount of time(in 1/100 of a second) that DTR
- should be kept low while being closed.
- closing_wait set the amount of time(in 1/100 of a second) that the
- serial port should wait for data to be drained while
- being closed, before the receiver is disable.
- spd_hi Use 57.6kb when the application requests 38.4kb.
- spd_vhi Use 115.2kb when the application requests 38.4kb.
- spd_shi Use 230.4kb when the application requests 38.4kb.
- spd_warp Use 460.8kb when the application requests 38.4kb.
- spd_normal Use 38.4kb when the application requests 38.4kb.
- spd_cust Use the custom divisor to set the speed when the
- application requests 38.4kb.
- divisor This option set the custom division.
- baud_base This option set the base baud rate.
-
------------------------------------------------------------------------------
-6. Troubleshooting
-
- The boot time error messages and solutions are stated as clearly as
- possible. If all the possible solutions fail, please contact our technical
- support team to get more help.
-
-
- Error msg: More than 4 Moxa Smartio/Industio family boards found. Fifth board
- and after are ignored.
- Solution:
- To avoid this problem, please unplug fifth and after board, because Moxa
- driver supports up to 4 boards.
-
- Error msg: Request_irq fail, IRQ(?) may be conflict with another device.
- Solution:
- Other PCI or ISA devices occupy the assigned IRQ. If you are not sure
- which device causes the situation, please check /proc/interrupts to find
- free IRQ and simply change another free IRQ for Moxa board.
-
- Error msg: Board #: C1xx Series(CAP=xxx) interrupt number invalid.
- Solution:
- Each port within the same multiport board shares the same IRQ. Please set
- one IRQ (IRQ doesn't equal to zero) for one Moxa board.
-
- Error msg: No interrupt vector be set for Moxa ISA board(CAP=xxx).
- Solution:
- Moxa ISA board needs an interrupt vector.Please refer to user's manual
- "Hardware Installation" chapter to set interrupt vector.
-
- Error msg: Couldn't install MOXA Smartio/Industio family driver!
- Solution:
- Load Moxa driver fail, the major number may conflict with other devices.
- Please refer to previous section 3.7 to change a free major number for
- Moxa driver.
-
- Error msg: Couldn't install MOXA Smartio/Industio family callout driver!
- Solution:
- Load Moxa callout driver fail, the callout device major number may
- conflict with other devices. Please refer to previous section 3.7 to
- change a free callout device major number for Moxa driver.
-
-
------------------------------------------------------------------------------
-
diff --git a/Documentation/serial/moxa-smartio.rst b/Documentation/serial/moxa-smartio.rst
new file mode 100644
index 000000000000..156100f17c3f
--- /dev/null
+++ b/Documentation/serial/moxa-smartio.rst
@@ -0,0 +1,615 @@
+=============================================================
+MOXA Smartio/Industio Family Device Driver Installation Guide
+=============================================================
+
+.. note::
+
+ This file is outdated. It needs some care in order to make it
+ updated to Kernel 5.0 and upper
+
+Copyright (C) 2008, Moxa Inc.
+
+Date: 01/21/2008
+
+.. Content
+
+ 1. Introduction
+ 2. System Requirement
+ 3. Installation
+ 3.1 Hardware installation
+ 3.2 Driver files
+ 3.3 Device naming convention
+ 3.4 Module driver configuration
+ 3.5 Static driver configuration for Linux kernel 2.4.x and 2.6.x.
+ 3.6 Custom configuration
+ 3.7 Verify driver installation
+ 4. Utilities
+ 5. Setserial
+ 6. Troubleshooting
+
+1. Introduction
+^^^^^^^^^^^^^^^
+
+ The Smartio/Industio/UPCI family Linux driver supports following multiport
+ boards.
+
+ - 2 ports multiport board
+ CP-102U, CP-102UL, CP-102UF
+ CP-132U-I, CP-132UL,
+ CP-132, CP-132I, CP132S, CP-132IS,
+ CI-132, CI-132I, CI-132IS,
+ (C102H, C102HI, C102HIS, C102P, CP-102, CP-102S)
+
+ - 4 ports multiport board
+ CP-104EL,
+ CP-104UL, CP-104JU,
+ CP-134U, CP-134U-I,
+ C104H/PCI, C104HS/PCI,
+ CP-114, CP-114I, CP-114S, CP-114IS, CP-114UL,
+ C104H, C104HS,
+ CI-104J, CI-104JS,
+ CI-134, CI-134I, CI-134IS,
+ (C114HI, CT-114I, C104P),
+ POS-104UL,
+ CB-114,
+ CB-134I
+
+ - 8 ports multiport board
+ CP-118EL, CP-168EL,
+ CP-118U, CP-168U,
+ C168H/PCI,
+ C168H, C168HS,
+ (C168P),
+ CB-108
+
+ This driver and installation procedure have been developed upon Linux Kernel
+ 2.4.x and 2.6.x. This driver supports Intel x86 hardware platform. In order
+ to maintain compatibility, this version has also been properly tested with
+ RedHat, Mandrake, Fedora and S.u.S.E Linux. However, if compatibility problem
+ occurs, please contact Moxa at support@moxa.com.tw.
+
+ In addition to device driver, useful utilities are also provided in this
+ version. They are:
+
+ - msdiag
+ Diagnostic program for displaying installed Moxa
+ Smartio/Industio boards.
+ - msmon
+ Monitor program to observe data count and line status signals.
+ - msterm A simple terminal program which is useful in testing serial
+ ports.
+ - io-irq.exe
+ Configuration program to setup ISA boards. Please note that
+ this program can only be executed under DOS.
+
+ All the drivers and utilities are published in form of source code under
+ GNU General Public License in this version. Please refer to GNU General
+ Public License announcement in each source code file for more detail.
+
+ In Moxa's Web sites, you may always find latest driver at http://www.moxa.com/.
+
+ This version of driver can be installed as Loadable Module (Module driver)
+ or built-in into kernel (Static driver). You may refer to following
+ installation procedure for suitable one. Before you install the driver,
+ please refer to hardware installation procedure in the User's Manual.
+
+ We assume the user should be familiar with following documents.
+
+ - Serial-HOWTO
+ - Kernel-HOWTO
+
+2. System Requirement
+^^^^^^^^^^^^^^^^^^^^^
+
+ - Hardware platform: Intel x86 machine
+ - Kernel version: 2.4.x or 2.6.x
+ - gcc version 2.72 or later
+ - Maximum 4 boards can be installed in combination
+
+3. Installation
+^^^^^^^^^^^^^^^
+
+3.1 Hardware installation
+=========================
+
+ There are two types of buses, ISA and PCI, for Smartio/Industio
+ family multiport board.
+
+ISA board
+---------
+
+ You'll have to configure CAP address, I/O address, Interrupt Vector
+ as well as IRQ before installing this driver. Please refer to hardware
+ installation procedure in User's Manual before proceed any further.
+ Please make sure the JP1 is open after the ISA board is set properly.
+
+PCI/UPCI board
+--------------
+
+ You may need to adjust IRQ usage in BIOS to avoid from IRQ conflict
+ with other ISA devices. Please refer to hardware installation
+ procedure in User's Manual in advance.
+
+PCI IRQ Sharing
+---------------
+
+ Each port within the same multiport board shares the same IRQ. Up to
+ 4 Moxa Smartio/Industio PCI Family multiport boards can be installed
+ together on one system and they can share the same IRQ.
+
+
+3.2 Driver files
+================
+
+ The driver file may be obtained from ftp, CD-ROM or floppy disk. The
+ first step, anyway, is to copy driver file "mxser.tgz" into specified
+ directory. e.g. /moxa. The execute commands as below::
+
+ # cd /
+ # mkdir moxa
+ # cd /moxa
+ # tar xvf /dev/fd0
+
+or::
+
+ # cd /
+ # mkdir moxa
+ # cd /moxa
+ # cp /mnt/cdrom/<driver directory>/mxser.tgz .
+ # tar xvfz mxser.tgz
+
+
+3.3 Device naming convention
+============================
+
+ You may find all the driver and utilities files in /moxa/mxser.
+ Following installation procedure depends on the model you'd like to
+ run the driver. If you prefer module driver, please refer to 3.4.
+ If static driver is required, please refer to 3.5.
+
+Dialin and callout port
+-----------------------
+
+ This driver remains traditional serial device properties. There are
+ two special file name for each serial port. One is dial-in port
+ which is named "ttyMxx". For callout port, the naming convention
+ is "cumxx".
+
+Device naming when more than 2 boards installed
+-----------------------------------------------
+
+ Naming convention for each Smartio/Industio multiport board is
+ pre-defined as below.
+
+ ============ =============== ==============
+ Board Num. Dial-in Port Callout port
+ 1st board ttyM0 - ttyM7 cum0 - cum7
+ 2nd board ttyM8 - ttyM15 cum8 - cum15
+ 3rd board ttyM16 - ttyM23 cum16 - cum23
+ 4th board ttyM24 - ttym31 cum24 - cum31
+ ============ =============== ==============
+
+.. note::
+
+ Under Kernel 2.6 and upper, the cum Device is Obsolete. So use ttyM*
+ device instead.
+
+Board sequence
+--------------
+
+ This driver will activate ISA boards according to the parameter set
+ in the driver. After all specified ISA board activated, PCI board
+ will be installed in the system automatically driven.
+ Therefore the board number is sorted by the CAP address of ISA boards.
+ For PCI boards, their sequence will be after ISA boards and C168H/PCI
+ has higher priority than C104H/PCI boards.
+
+3.4 Module driver configuration
+===============================
+
+ Module driver is easiest way to install. If you prefer static driver
+ installation, please skip this paragraph.
+
+
+ ------------- Prepare to use the MOXA driver --------------------
+
+3.4.1 Create tty device with correct major number
+-------------------------------------------------
+
+ Before using MOXA driver, your system must have the tty devices
+ which are created with driver's major number. We offer one shell
+ script "msmknod" to simplify the procedure.
+ This step is only needed to be executed once. But you still
+ need to do this procedure when:
+
+ a. You change the driver's major number. Please refer the "3.7"
+ section.
+ b. Your total installed MOXA boards number is changed. Maybe you
+ add/delete one MOXA board.
+ c. You want to change the tty name. This needs to modify the
+ shell script "msmknod"
+
+ The procedure is::
+
+ # cd /moxa/mxser/driver
+ # ./msmknod
+
+ This shell script will require the major number for dial-in
+ device and callout device to create tty device. You also need
+ to specify the total installed MOXA board number. Default major
+ numbers for dial-in device and callout device are 30, 35. If
+ you need to change to other number, please refer section "3.7"
+ for more detailed procedure.
+ Msmknod will delete any special files occupying the same device
+ naming.
+
+3.4.2 Build the MOXA driver and utilities
+-----------------------------------------
+
+ Before using the MOXA driver and utilities, you need compile the
+ all the source code. This step is only need to be executed once.
+ But you still re-compile the source code if you modify the source
+ code. For example, if you change the driver's major number (see
+ "3.7" section), then you need to do this step again.
+
+ Find "Makefile" in /moxa/mxser, then run
+
+ # make clean; make install
+
+ ..note::
+
+ For Red Hat 9, Red Hat Enterprise Linux AS3/ES3/WS3 & Fedora Core1:
+ # make clean; make installsp1
+
+ For Red Hat Enterprise Linux AS4/ES4/WS4:
+ # make clean; make installsp2
+
+ The driver files "mxser.o" and utilities will be properly compiled
+ and copied to system directories respectively.
+
+------------- Load MOXA driver--------------------
+
+3.4.3 Load the MOXA driver
+--------------------------
+
+ ::
+
+ # modprobe mxser <argument>
+
+ will activate the module driver. You may run "lsmod" to check
+ if "mxser" is activated. If the MOXA board is ISA board, the
+ <argument> is needed. Please refer to section "3.4.5" for more
+ information.
+
+------------- Load MOXA driver on boot --------------------
+
+3.4.4 Load the mxser driver
+---------------------------
+
+
+ For the above description, you may manually execute
+ "modprobe mxser" to activate this driver and run
+ "rmmod mxser" to remove it.
+
+ However, it's better to have a boot time configuration to
+ eliminate manual operation. Boot time configuration can be
+ achieved by rc file. We offer one "rc.mxser" file to simplify
+ the procedure under "moxa/mxser/driver".
+
+ But if you use ISA board, please modify the "modprobe ..." command
+ to add the argument (see "3.4.5" section). After modifying the
+ rc.mxser, please try to execute "/moxa/mxser/driver/rc.mxser"
+ manually to make sure the modification is ok. If any error
+ encountered, please try to modify again. If the modification is
+ completed, follow the below step.
+
+ Run following command for setting rc files::
+
+ # cd /moxa/mxser/driver
+ # cp ./rc.mxser /etc/rc.d
+ # cd /etc/rc.d
+
+ Check "rc.serial" is existed or not. If "rc.serial" doesn't exist,
+ create it by vi, run "chmod 755 rc.serial" to change the permission.
+
+ Add "/etc/rc.d/rc.mxser" in last line.
+
+ Reboot and check if moxa.o activated by "lsmod" command.
+
+3.4.5. specify CAP address
+--------------------------
+
+ If you'd like to drive Smartio/Industio ISA boards in the system,
+ you'll have to add parameter to specify CAP address of given
+ board while activating "mxser.o". The format for parameters are
+ as follows.::
+
+ modprobe mxser ioaddr=0x???,0x???,0x???,0x???
+ | | | |
+ | | | +- 4th ISA board
+ | | +------ 3rd ISA board
+ | +------------ 2nd ISA board
+ +-------------------1st ISA board
+
+3.5 Static driver configuration for Linux kernel 2.4.x and 2.6.x
+================================================================
+
+ Note:
+ To use static driver, you must install the linux kernel
+ source package.
+
+3.5.1 Backup the built-in driver in the kernel
+----------------------------------------------
+
+ ::
+
+ # cd /usr/src/linux/drivers/char
+ # mv mxser.c mxser.c.old
+
+ For Red Hat 7.x user, you need to create link:
+ # cd /usr/src
+ # ln -s linux-2.4 linux
+
+3.5.2 Create link
+-----------------
+ ::
+
+ # cd /usr/src/linux/drivers/char
+ # ln -s /moxa/mxser/driver/mxser.c mxser.c
+
+3.5.3 Add CAP address list for ISA boards.
+------------------------------------------
+
+ For PCI boards user, please skip this step.
+
+ In module mode, the CAP address for ISA board is given by
+ parameter. In static driver configuration, you'll have to
+ assign it within driver's source code. If you will not
+ install any ISA boards, you may skip to next portion.
+ The instructions to modify driver source code are as
+ below.
+
+ a. run::
+
+ # cd /moxa/mxser/driver
+ # vi mxser.c
+
+ b. Find the array mxserBoardCAP[] as below::
+
+ static int mxserBoardCAP[] = {0x00, 0x00, 0x00, 0x00};
+
+ c. Change the address within this array using vi. For
+ example, to driver 2 ISA boards with CAP address
+ 0x280 and 0x180 as 1st and 2nd board. Just to change
+ the source code as follows::
+
+ static int mxserBoardCAP[] = {0x280, 0x180, 0x00, 0x00};
+
+3.5.4 Setup kernel configuration
+--------------------------------
+
+ Configure the kernel::
+
+ # cd /usr/src/linux
+ # make menuconfig
+
+ You will go into a menu-driven system. Please select [Character
+ devices][Non-standard serial port support], enable the [Moxa
+ SmartIO support] driver with "[*]" for built-in (not "[M]"), then
+ select [Exit] to exit this program.
+
+3.5.5 Rebuild kernel
+--------------------
+
+ The following are for Linux kernel rebuilding, for your
+ reference only.
+
+ For appropriate details, please refer to the Linux document:
+
+ a. Run the following commands::
+
+ cd /usr/src/linux
+ make clean # take a few minutes
+ make dep # take a few minutes
+ make bzImage # take probably 10-20 minutes
+ make install # copy boot image to correct position
+
+ f. Please make sure the boot kernel (vmlinuz) is in the
+ correct position.
+ g. If you use 'lilo' utility, you should check /etc/lilo.conf
+ 'image' item specified the path which is the 'vmlinuz' path,
+ or you will load wrong (or old) boot kernel image (vmlinuz).
+ After checking /etc/lilo.conf, please run "lilo".
+
+ Note that if the result of "make bzImage" is ERROR, then you have to
+ go back to Linux configuration Setup. Type "make menuconfig" in
+ directory /usr/src/linux.
+
+
+3.5.6 Make tty device and special file
+--------------------------------------
+
+ ::
+ # cd /moxa/mxser/driver
+ # ./msmknod
+
+3.5.7 Make utility
+------------------
+
+ ::
+
+ # cd /moxa/mxser/utility
+ # make clean; make install
+
+3.5.8 Reboot
+------------
+
+
+
+3.6 Custom configuration
+========================
+
+ Although this driver already provides you default configuration, you
+ still can change the device name and major number. The instruction to
+ change these parameters are shown as below.
+
+a. Change Device name
+
+ If you'd like to use other device names instead of default naming
+ convention, all you have to do is to modify the internal code
+ within the shell script "msmknod". First, you have to open "msmknod"
+ by vi. Locate each line contains "ttyM" and "cum" and change them
+ to the device name you desired. "msmknod" creates the device names
+ you need next time executed.
+
+b. Change Major number
+
+ If major number 30 and 35 had been occupied, you may have to select
+ 2 free major numbers for this driver. There are 3 steps to change
+ major numbers.
+
+3.6.1 Find free major numbers
+-----------------------------
+
+ In /proc/devices, you may find all the major numbers occupied
+ in the system. Please select 2 major numbers that are available.
+ e.g. 40, 45.
+
+3.6.2 Create special files
+--------------------------
+
+ Run /moxa/mxser/driver/msmknod to create special files with
+ specified major numbers.
+
+3.6.3 Modify driver with new major number
+-----------------------------------------
+
+ Run vi to open /moxa/mxser/driver/mxser.c. Locate the line
+ contains "MXSERMAJOR". Change the content as below::
+
+ #define MXSERMAJOR 40
+ #define MXSERCUMAJOR 45
+
+ 3.6.4 Run "make clean; make install" in /moxa/mxser/driver.
+
+3.7 Verify driver installation
+==============================
+
+ You may refer to /var/log/messages to check the latest status
+ log reported by this driver whenever it's activated.
+
+4. Utilities
+^^^^^^^^^^^^
+
+ There are 3 utilities contained in this driver. They are msdiag, msmon and
+ msterm. These 3 utilities are released in form of source code. They should
+ be compiled into executable file and copied into /usr/bin.
+
+ Before using these utilities, please load driver (refer 3.4 & 3.5) and
+ make sure you had run the "msmknod" utility.
+
+msdiag - Diagnostic
+===================
+
+ This utility provides the function to display what Moxa Smartio/Industio
+ board found by driver in the system.
+
+msmon - Port Monitoring
+=======================
+
+ This utility gives the user a quick view about all the MOXA ports'
+ activities. One can easily learn each port's total received/transmitted
+ (Rx/Tx) character count since the time when the monitoring is started.
+
+ Rx/Tx throughputs per second are also reported in interval basis (e.g.
+ the last 5 seconds) and in average basis (since the time the monitoring
+ is started). You can reset all ports' count by <HOME> key. <+> <->
+ (plus/minus) keys to change the displaying time interval. Press <ENTER>
+ on the port, that cursor stay, to view the port's communication
+ parameters, signal status, and input/output queue.
+
+msterm - Terminal Emulation
+===========================
+
+ This utility provides data sending and receiving ability of all tty ports,
+ especially for MOXA ports. It is quite useful for testing simple
+ application, for example, sending AT command to a modem connected to the
+ port or used as a terminal for login purpose. Note that this is only a
+ dumb terminal emulation without handling full screen operation.
+
+5. Setserial
+^^^^^^^^^^^^
+
+ Supported Setserial parameters are listed as below.
+
+ ============== =========================================================
+ uart set UART type(16450-->disable FIFO, 16550A-->enable FIFO)
+ close_delay set the amount of time(in 1/100 of a second) that DTR
+ should be kept low while being closed.
+ closing_wait set the amount of time(in 1/100 of a second) that the
+ serial port should wait for data to be drained while
+ being closed, before the receiver is disable.
+ spd_hi Use 57.6kb when the application requests 38.4kb.
+ spd_vhi Use 115.2kb when the application requests 38.4kb.
+ spd_shi Use 230.4kb when the application requests 38.4kb.
+ spd_warp Use 460.8kb when the application requests 38.4kb.
+ spd_normal Use 38.4kb when the application requests 38.4kb.
+ spd_cust Use the custom divisor to set the speed when the
+ application requests 38.4kb.
+ divisor This option set the custom division.
+ baud_base This option set the base baud rate.
+ ============== =========================================================
+
+6. Troubleshooting
+^^^^^^^^^^^^^^^^^^
+
+ The boot time error messages and solutions are stated as clearly as
+ possible. If all the possible solutions fail, please contact our technical
+ support team to get more help.
+
+
+ Error msg:
+ More than 4 Moxa Smartio/Industio family boards found. Fifth board
+ and after are ignored.
+
+ Solution:
+ To avoid this problem, please unplug fifth and after board, because Moxa
+ driver supports up to 4 boards.
+
+ Error msg:
+ Request_irq fail, IRQ(?) may be conflict with another device.
+
+ Solution:
+ Other PCI or ISA devices occupy the assigned IRQ. If you are not sure
+ which device causes the situation, please check /proc/interrupts to find
+ free IRQ and simply change another free IRQ for Moxa board.
+
+ Error msg:
+ Board #: C1xx Series(CAP=xxx) interrupt number invalid.
+
+ Solution:
+ Each port within the same multiport board shares the same IRQ. Please set
+ one IRQ (IRQ doesn't equal to zero) for one Moxa board.
+
+ Error msg:
+ No interrupt vector be set for Moxa ISA board(CAP=xxx).
+
+ Solution:
+ Moxa ISA board needs an interrupt vector.Please refer to user's manual
+ "Hardware Installation" chapter to set interrupt vector.
+
+ Error msg:
+ Couldn't install MOXA Smartio/Industio family driver!
+
+ Solution:
+ Load Moxa driver fail, the major number may conflict with other devices.
+ Please refer to previous section 3.7 to change a free major number for
+ Moxa driver.
+
+ Error msg:
+ Couldn't install MOXA Smartio/Industio family callout driver!
+
+ Solution:
+ Load Moxa callout driver fail, the callout device major number may
+ conflict with other devices. Please refer to previous section 3.7 to
+ change a free callout device major number for Moxa driver.
diff --git a/Documentation/serial/n_gsm.rst b/Documentation/serial/n_gsm.rst
new file mode 100644
index 000000000000..f3ad9fd26408
--- /dev/null
+++ b/Documentation/serial/n_gsm.rst
@@ -0,0 +1,103 @@
+==============================
+GSM 0710 tty multiplexor HOWTO
+==============================
+
+This line discipline implements the GSM 07.10 multiplexing protocol
+detailed in the following 3GPP document:
+
+ http://www.3gpp.org/ftp/Specs/archive/07_series/07.10/0710-720.zip
+
+This document give some hints on how to use this driver with GPRS and 3G
+modems connected to a physical serial port.
+
+How to use it
+-------------
+1. initialize the modem in 0710 mux mode (usually AT+CMUX= command) through
+ its serial port. Depending on the modem used, you can pass more or less
+ parameters to this command,
+2. switch the serial line to using the n_gsm line discipline by using
+ TIOCSETD ioctl,
+3. configure the mux using GSMIOC_GETCONF / GSMIOC_SETCONF ioctl,
+
+Major parts of the initialization program :
+(a good starting point is util-linux-ng/sys-utils/ldattach.c)::
+
+ #include <linux/gsmmux.h>
+ #define N_GSM0710 21 /* GSM 0710 Mux */
+ #define DEFAULT_SPEED B115200
+ #define SERIAL_PORT /dev/ttyS0
+
+ int ldisc = N_GSM0710;
+ struct gsm_config c;
+ struct termios configuration;
+
+ /* open the serial port connected to the modem */
+ fd = open(SERIAL_PORT, O_RDWR | O_NOCTTY | O_NDELAY);
+
+ /* configure the serial port : speed, flow control ... */
+
+ /* send the AT commands to switch the modem to CMUX mode
+ and check that it's successful (should return OK) */
+ write(fd, "AT+CMUX=0\r", 10);
+
+ /* experience showed that some modems need some time before
+ being able to answer to the first MUX packet so a delay
+ may be needed here in some case */
+ sleep(3);
+
+ /* use n_gsm line discipline */
+ ioctl(fd, TIOCSETD, &ldisc);
+
+ /* get n_gsm configuration */
+ ioctl(fd, GSMIOC_GETCONF, &c);
+ /* we are initiator and need encoding 0 (basic) */
+ c.initiator = 1;
+ c.encapsulation = 0;
+ /* our modem defaults to a maximum size of 127 bytes */
+ c.mru = 127;
+ c.mtu = 127;
+ /* set the new configuration */
+ ioctl(fd, GSMIOC_SETCONF, &c);
+
+ /* and wait for ever to keep the line discipline enabled */
+ daemon(0,0);
+ pause();
+
+4. create the devices corresponding to the "virtual" serial ports (take care,
+ each modem has its configuration and some DLC have dedicated functions,
+ for example GPS), starting with minor 1 (DLC0 is reserved for the management
+ of the mux)::
+
+ MAJOR=`cat /proc/devices |grep gsmtty | awk '{print $1}`
+ for i in `seq 1 4`; do
+ mknod /dev/ttygsm$i c $MAJOR $i
+ done
+
+5. use these devices as plain serial ports.
+
+ for example, it's possible:
+
+ - and to use gnokii to send / receive SMS on ttygsm1
+ - to use ppp to establish a datalink on ttygsm2
+
+6. first close all virtual ports before closing the physical port.
+
+ Note that after closing the physical port the modem is still in multiplexing
+ mode. This may prevent a successful re-opening of the port later. To avoid
+ this situation either reset the modem if your hardware allows that or send
+ a disconnect command frame manually before initializing the multiplexing mode
+ for the second time. The byte sequence for the disconnect command frame is::
+
+ 0xf9, 0x03, 0xef, 0x03, 0xc3, 0x16, 0xf9.
+
+Additional Documentation
+------------------------
+More practical details on the protocol and how it's supported by industrial
+modems can be found in the following documents :
+
+- http://www.telit.com/module/infopool/download.php?id=616
+- http://www.u-blox.com/images/downloads/Product_Docs/LEON-G100-G200-MuxImplementation_ApplicationNote_%28GSM%20G1-CS-10002%29.pdf
+- http://www.sierrawireless.com/Support/Downloads/AirPrime/WMP_Series/~/media/Support_Downloads/AirPrime/Application_notes/CMUX_Feature_Application_Note-Rev004.ashx
+- http://wm.sim.com/sim/News/photo/2010721161442.pdf
+
+11-03-08 - Eric Bénard - <eric@eukrea.com>
diff --git a/Documentation/serial/n_gsm.txt b/Documentation/serial/n_gsm.txt
deleted file mode 100644
index 875361bb7cb4..000000000000
--- a/Documentation/serial/n_gsm.txt
+++ /dev/null
@@ -1,96 +0,0 @@
-n_gsm.c GSM 0710 tty multiplexor HOWTO
-===================================================
-
-This line discipline implements the GSM 07.10 multiplexing protocol
-detailed in the following 3GPP document :
-http://www.3gpp.org/ftp/Specs/archive/07_series/07.10/0710-720.zip
-
-This document give some hints on how to use this driver with GPRS and 3G
-modems connected to a physical serial port.
-
-How to use it
--------------
-1- initialize the modem in 0710 mux mode (usually AT+CMUX= command) through
-its serial port. Depending on the modem used, you can pass more or less
-parameters to this command,
-2- switch the serial line to using the n_gsm line discipline by using
-TIOCSETD ioctl,
-3- configure the mux using GSMIOC_GETCONF / GSMIOC_SETCONF ioctl,
-
-Major parts of the initialization program :
-(a good starting point is util-linux-ng/sys-utils/ldattach.c)
-#include <linux/gsmmux.h>
-#define N_GSM0710 21 /* GSM 0710 Mux */
-#define DEFAULT_SPEED B115200
-#define SERIAL_PORT /dev/ttyS0
-
- int ldisc = N_GSM0710;
- struct gsm_config c;
- struct termios configuration;
-
- /* open the serial port connected to the modem */
- fd = open(SERIAL_PORT, O_RDWR | O_NOCTTY | O_NDELAY);
-
- /* configure the serial port : speed, flow control ... */
-
- /* send the AT commands to switch the modem to CMUX mode
- and check that it's successful (should return OK) */
- write(fd, "AT+CMUX=0\r", 10);
-
- /* experience showed that some modems need some time before
- being able to answer to the first MUX packet so a delay
- may be needed here in some case */
- sleep(3);
-
- /* use n_gsm line discipline */
- ioctl(fd, TIOCSETD, &ldisc);
-
- /* get n_gsm configuration */
- ioctl(fd, GSMIOC_GETCONF, &c);
- /* we are initiator and need encoding 0 (basic) */
- c.initiator = 1;
- c.encapsulation = 0;
- /* our modem defaults to a maximum size of 127 bytes */
- c.mru = 127;
- c.mtu = 127;
- /* set the new configuration */
- ioctl(fd, GSMIOC_SETCONF, &c);
-
- /* and wait for ever to keep the line discipline enabled */
- daemon(0,0);
- pause();
-
-4- create the devices corresponding to the "virtual" serial ports (take care,
-each modem has its configuration and some DLC have dedicated functions,
-for example GPS), starting with minor 1 (DLC0 is reserved for the management
-of the mux)
-
-MAJOR=`cat /proc/devices |grep gsmtty | awk '{print $1}`
-for i in `seq 1 4`; do
- mknod /dev/ttygsm$i c $MAJOR $i
-done
-
-5- use these devices as plain serial ports.
-for example, it's possible :
-- and to use gnokii to send / receive SMS on ttygsm1
-- to use ppp to establish a datalink on ttygsm2
-
-6- first close all virtual ports before closing the physical port.
-
-Note that after closing the physical port the modem is still in multiplexing
-mode. This may prevent a successful re-opening of the port later. To avoid
-this situation either reset the modem if your hardware allows that or send
-a disconnect command frame manually before initializing the multiplexing mode
-for the second time. The byte sequence for the disconnect command frame is:
-0xf9, 0x03, 0xef, 0x03, 0xc3, 0x16, 0xf9.
-
-Additional Documentation
-------------------------
-More practical details on the protocol and how it's supported by industrial
-modems can be found in the following documents :
-http://www.telit.com/module/infopool/download.php?id=616
-http://www.u-blox.com/images/downloads/Product_Docs/LEON-G100-G200-MuxImplementation_ApplicationNote_%28GSM%20G1-CS-10002%29.pdf
-http://www.sierrawireless.com/Support/Downloads/AirPrime/WMP_Series/~/media/Support_Downloads/AirPrime/Application_notes/CMUX_Feature_Application_Note-Rev004.ashx
-http://wm.sim.com/sim/News/photo/2010721161442.pdf
-
-11-03-08 - Eric Bénard - <eric@eukrea.com>
diff --git a/Documentation/serial/rocket.txt b/Documentation/serial/rocket.rst
index 60b039891057..23761eae4282 100644
--- a/Documentation/serial/rocket.txt
+++ b/Documentation/serial/rocket.rst
@@ -1,20 +1,22 @@
-Comtrol(tm) RocketPort(R)/RocketModem(TM) Series
-Device Driver for the Linux Operating System
+================================================
+Comtrol(tm) RocketPort(R)/RocketModem(TM) Series
+================================================
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+Device Driver for the Linux Operating System
+============================================
-PRODUCT OVERVIEW
+Product overview
----------------
This driver provides a loadable kernel driver for the Comtrol RocketPort
-and RocketModem PCI boards. These boards provide, 2, 4, 8, 16, or 32
+and RocketModem PCI boards. These boards provide, 2, 4, 8, 16, or 32
high-speed serial ports or modems. This driver supports up to a combination
of four RocketPort or RocketModems boards in one machine simultaneously.
This file assumes that you are using the RocketPort driver which is
-integrated into the kernel sources.
+integrated into the kernel sources.
-The driver can also be installed as an external module using the usual
-"make;make install" routine. This external module driver, obtainable
+The driver can also be installed as an external module using the usual
+"make;make install" routine. This external module driver, obtainable
from the Comtrol website listed below, is useful for updating the driver
or installing it into kernels which do not have the driver configured
into them. Installations instructions for the external module
@@ -29,57 +31,59 @@ information on how to set the DIP switches.
You pass the I/O port to the driver using the following module parameters:
-board1 : I/O port for the first ISA board
-board2 : I/O port for the second ISA board
-board3 : I/O port for the third ISA board
-board4 : I/O port for the fourth ISA board
+board1:
+ I/O port for the first ISA board
+board2:
+ I/O port for the second ISA board
+board3:
+ I/O port for the third ISA board
+board4:
+ I/O port for the fourth ISA board
There is a set of utilities and scripts provided with the external driver
-( downloadable from http://www.comtrol.com ) that ease the configuration and
+(downloadable from http://www.comtrol.com) that ease the configuration and
setup of the ISA cards.
The RocketModem II PCI boards require firmware to be loaded into the card
before it will function. The driver has only been tested as a module for this
board.
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
-
-INSTALLATION PROCEDURES
+Installation Procedures
-----------------------
-RocketPort/RocketModem PCI cards require no driver configuration, they are
+RocketPort/RocketModem PCI cards require no driver configuration, they are
automatically detected and configured.
-The RocketPort driver can be installed as a module (recommended) or built
+The RocketPort driver can be installed as a module (recommended) or built
into the kernel. This is selected, as for other drivers, through the `make config`
-command from the root of the Linux source tree during the kernel build process.
+command from the root of the Linux source tree during the kernel build process.
The RocketPort/RocketModem serial ports installed by this driver are assigned
-device major number 46, and will be named /dev/ttyRx, where x is the port number
+device major number 46, and will be named /dev/ttyRx, where x is the port number
starting at zero (ex. /dev/ttyR0, /devttyR1, ...). If you have multiple cards
installed in the system, the mapping of port names to serial ports is displayed
in the system log at /var/log/messages.
If installed as a module, the module must be loaded. This can be done
manually by entering "modprobe rocket". To have the module loaded automatically
-upon system boot, edit a /etc/modprobe.d/*.conf file and add the line
+upon system boot, edit a `/etc/modprobe.d/*.conf` file and add the line
"alias char-major-46 rocket".
In order to use the ports, their device names (nodes) must be created with mknod.
-This is only required once, the system will retain the names once created. To
-create the RocketPort/RocketModem device names, use the command
-"mknod /dev/ttyRx c 46 x" where x is the port number starting at zero. For example:
+This is only required once, the system will retain the names once created. To
+create the RocketPort/RocketModem device names, use the command
+"mknod /dev/ttyRx c 46 x" where x is the port number starting at zero.
->mknod /dev/ttyR0 c 46 0
->mknod /dev/ttyR1 c 46 1
->mknod /dev/ttyR2 c 46 2
+For example::
-The Linux script MAKEDEV will create the first 16 ttyRx device names (nodes)
-for you:
+ > mknod /dev/ttyR0 c 46 0
+ > mknod /dev/ttyR1 c 46 1
+ > mknod /dev/ttyR2 c 46 2
->/dev/MAKEDEV ttyR
+The Linux script MAKEDEV will create the first 16 ttyRx device names (nodes)
+for you::
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ >/dev/MAKEDEV ttyR
ISA Rocketport Boards
---------------------
@@ -89,7 +93,7 @@ card before installing and using it. This is done by setting a set of DIP
switches on the Rocketport board.
-SETTING THE I/O ADDRESS
+Setting the I/O address
-----------------------
Before installing RocketPort(R) or RocketPort RA boards, you must find
@@ -130,40 +134,36 @@ the first 4 bytes of that range are used by the first board. You would
need to set the second, third, or fourth board to one of the next available
blocks such as 0x180.
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
-
-RocketPort and RocketPort RA SW1 Settings:
-
- +-------------------------------+
- | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 |
- +-------+-------+---------------+
- | Unused| Card | I/O Port Block|
- +-------------------------------+
-
-DIP Switches DIP Switches
-7 8 6 5
-=================== ===================
-On On UNUSED, MUST BE ON. On On First Card <==== Default
- On Off Second Card
- Off On Third Card
- Off Off Fourth Card
-
-DIP Switches I/O Address Range
-4 3 2 1 Used by the First Card
-=====================================
-On Off On Off 100-143
-On Off Off On 140-183
-On Off Off Off 180-1C3 <==== Default
-Off On On Off 200-243
-Off On Off On 240-283
-Off On Off Off 280-2C3
-Off Off On Off 300-343
-Off Off Off On 340-383
-Off Off Off Off 380-3C3
-
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
-
-REPORTING BUGS
+RocketPort and RocketPort RA SW1 Settings::
+
+ +-------------------------------+
+ | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 |
+ +-------+-------+---------------+
+ | Unused| Card | I/O Port Block|
+ +-------------------------------+
+
+ DIP Switches DIP Switches
+ 7 8 6 5
+ =================== ===================
+ On On UNUSED, MUST BE ON. On On First Card <==== Default
+ On Off Second Card
+ Off On Third Card
+ Off Off Fourth Card
+
+ DIP Switches I/O Address Range
+ 4 3 2 1 Used by the First Card
+ =====================================
+ On Off On Off 100-143
+ On Off Off On 140-183
+ On Off Off Off 180-1C3 <==== Default
+ Off On On Off 200-243
+ Off On Off On 240-283
+ Off On Off Off 280-2C3
+ Off Off On Off 300-343
+ Off Off Off On 340-383
+ Off Off Off Off 380-3C3
+
+Reporting Bugs
--------------
For technical support, please provide the following
@@ -171,19 +171,15 @@ information: Driver version, kernel release, distribution of
kernel, and type of board you are using. Error messages and log
printouts port configuration details are especially helpful.
-USA
- Phone: (612) 494-4100
- FAX: (612) 494-4199
- email: support@comtrol.com
+USA:
+ :Phone: (612) 494-4100
+ :FAX: (612) 494-4199
+ :email: support@comtrol.com
-Comtrol Europe
- Phone: +44 (0) 1 869 323-220
- FAX: +44 (0) 1 869 323-211
- email: support@comtrol.co.uk
+Comtrol Europe:
+ :Phone: +44 (0) 1 869 323-220
+ :FAX: +44 (0) 1 869 323-211
+ :email: support@comtrol.co.uk
Web: http://www.comtrol.com
FTP: ftp.comtrol.com
-
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
-
-
diff --git a/Documentation/serial/serial-iso7816.txt b/Documentation/serial/serial-iso7816.rst
index 3193d24a2b0f..d990143de0c6 100644
--- a/Documentation/serial/serial-iso7816.txt
+++ b/Documentation/serial/serial-iso7816.rst
@@ -1,11 +1,15 @@
- ISO7816 SERIAL COMMUNICATIONS
+=============================
+ISO7816 Serial Communications
+=============================
-1. INTRODUCTION
+1. Introduction
+===============
ISO/IEC7816 is a series of standards specifying integrated circuit cards (ICC)
also known as smart cards.
-2. HARDWARE-RELATED CONSIDERATIONS
+2. Hardware-related considerations
+==================================
Some CPUs/UARTs (e.g., Microchip AT91) contain a built-in mode capable of
handling communication with a smart card.
@@ -15,7 +19,8 @@
available at user-level to allow switching from one mode to the other, and
vice versa.
-3. DATA STRUCTURES ALREADY AVAILABLE IN THE KERNEL
+3. Data Structures Already Available in the Kernel
+==================================================
The Linux kernel provides the serial_iso7816 structure (see [1]) to handle
ISO7816 communications. This data structure is used to set and configure
@@ -27,10 +32,11 @@
to TIOCGISO7816 and TIOCSISO7816 ioctls (see below). The iso7816_config
callback receives a pointer to struct serial_iso7816.
-4. USAGE FROM USER-LEVEL
+4. Usage from user-level
+========================
From user-level, ISO7816 configuration can be get/set using the previous
- ioctls. For instance, to set ISO7816 you can use the following code:
+ ioctls. For instance, to set ISO7816 you can use the following code::
#include <linux/serial.h>
@@ -78,6 +84,7 @@
/* Error handling. See errno. */
}
-5. REFERENCES
+5. References
+=============
[1] include/uapi/linux/serial.h
diff --git a/Documentation/serial/serial-rs485.txt b/Documentation/serial/serial-rs485.rst
index ce0c1a9b8aab..6bc824f948f9 100644
--- a/Documentation/serial/serial-rs485.txt
+++ b/Documentation/serial/serial-rs485.rst
@@ -1,6 +1,9 @@
- RS485 SERIAL COMMUNICATIONS
+===========================
+RS485 Serial Communications
+===========================
-1. INTRODUCTION
+1. Introduction
+===============
EIA-485, also known as TIA/EIA-485 or RS-485, is a standard defining the
electrical characteristics of drivers and receivers for use in balanced
@@ -9,7 +12,8 @@
because it can be used effectively over long distances and in electrically
noisy environments.
-2. HARDWARE-RELATED CONSIDERATIONS
+2. Hardware-related Considerations
+==================================
Some CPUs/UARTs (e.g., Atmel AT91 or 16C950 UART) contain a built-in
half-duplex mode capable of automatically controlling line direction by
@@ -22,7 +26,8 @@
available at user-level to allow switching from one mode to the other, and
vice versa.
-3. DATA STRUCTURES ALREADY AVAILABLE IN THE KERNEL
+3. Data Structures Already Available in the Kernel
+==================================================
The Linux kernel provides the serial_rs485 structure (see [1]) to handle
RS485 communications. This data structure is used to set and configure RS485
@@ -38,10 +43,11 @@
to TIOCSRS485 and TIOCGRS485 ioctls (see below). The rs485_config callback
receives a pointer to struct serial_rs485.
-4. USAGE FROM USER-LEVEL
+4. Usage from user-level
+========================
From user-level, RS485 configuration can be get/set using the previous
- ioctls. For instance, to set RS485 you can use the following code:
+ ioctls. For instance, to set RS485 you can use the following code::
#include <linux/serial.h>
@@ -89,7 +95,9 @@
/* Error handling. See errno. */
}
-5. REFERENCES
+5. References
+=============
[1] include/uapi/linux/serial.h
+
[2] Documentation/devicetree/bindings/serial/rs485.txt
diff --git a/Documentation/serial/tty.txt b/Documentation/serial/tty.rst
index b48780977a68..dd972caacf3e 100644
--- a/Documentation/serial/tty.txt
+++ b/Documentation/serial/tty.rst
@@ -1,5 +1,6 @@
-
- The Lockronomicon
+=================
+The Lockronomicon
+=================
Your guide to the ancient and twisted locking policies of the tty layer and
the warped logic behind them. Beware all ye who read on.
@@ -9,12 +10,12 @@ Line Discipline
---------------
Line disciplines are registered with tty_register_ldisc() passing the
-discipline number and the ldisc structure. At the point of registration the
+discipline number and the ldisc structure. At the point of registration the
discipline must be ready to use and it is possible it will get used before
the call returns success. If the call returns an error then it won't get
called. Do not re-use ldisc numbers as they are part of the userspace ABI
and writing over an existing ldisc will cause demons to eat your computer.
-After the return the ldisc data has been copied so you may free your own
+After the return the ldisc data has been copied so you may free your own
copy of the structure. You must not re-register over the top of the line
discipline even with the same data or your computer again will be eaten by
demons.
@@ -26,7 +27,7 @@ code manages the module counts this should not usually be a concern.
Heed this warning: the reference count field of the registered copies of the
tty_ldisc structure in the ldisc table counts the number of lines using this
-discipline. The reference count of the tty_ldisc structure within a tty
+discipline. The reference count of the tty_ldisc structure within a tty
counts the number of active users of the ldisc at this instant. In effect it
counts the number of threads of execution within an ldisc method (plus those
about to enter and exit although this detail matters not).
@@ -34,9 +35,11 @@ about to enter and exit although this detail matters not).
Line Discipline Methods
-----------------------
-TTY side interfaces:
+TTY side interfaces
+^^^^^^^^^^^^^^^^^^^
-open() - Called when the line discipline is attached to
+======================= =======================================================
+open() Called when the line discipline is attached to
the terminal. No other call into the line
discipline for this tty will occur until it
completes successfully. Should initialize any
@@ -47,66 +50,69 @@ open() - Called when the line discipline is attached to
Returning an error will prevent the ldisc from
being attached. Can sleep.
-close() - This is called on a terminal when the line
+close() This is called on a terminal when the line
discipline is being unplugged. At the point of
execution no further users will enter the
ldisc code for this tty. Can sleep.
-hangup() - Called when the tty line is hung up.
+hangup() Called when the tty line is hung up.
The line discipline should cease I/O to the tty.
No further calls into the ldisc code will occur.
The return value is ignored. Can sleep.
-read() - (optional) A process requests reading data from
+read() (optional) A process requests reading data from
the line. Multiple read calls may occur in parallel
and the ldisc must deal with serialization issues.
If not defined, the process will receive an EIO
error. May sleep.
-write() - (optional) A process requests writing data to the
+write() (optional) A process requests writing data to the
line. Multiple write calls are serialized by the
tty layer for the ldisc. If not defined, the
process will receive an EIO error. May sleep.
-flush_buffer() - (optional) May be called at any point between
+flush_buffer() (optional) May be called at any point between
open and close, and instructs the line discipline
to empty its input buffer.
-set_termios() - (optional) Called on termios structure changes.
+set_termios() (optional) Called on termios structure changes.
The caller passes the old termios data and the
current data is in the tty. Called under the
termios semaphore so allowed to sleep. Serialized
against itself only.
-poll() - (optional) Check the status for the poll/select
+poll() (optional) Check the status for the poll/select
calls. Multiple poll calls may occur in parallel.
May sleep.
-ioctl() - (optional) Called when an ioctl is handed to the
+ioctl() (optional) Called when an ioctl is handed to the
tty layer that might be for the ldisc. Multiple
ioctl calls may occur in parallel. May sleep.
-compat_ioctl() - (optional) Called when a 32 bit ioctl is handed
+compat_ioctl() (optional) Called when a 32 bit ioctl is handed
to the tty layer that might be for the ldisc.
Multiple ioctl calls may occur in parallel.
May sleep.
+======================= =======================================================
-Driver Side Interfaces:
+Driver Side Interfaces
+^^^^^^^^^^^^^^^^^^^^^^
-receive_buf() - (optional) Called by the low-level driver to hand
+======================= =======================================================
+receive_buf() (optional) Called by the low-level driver to hand
a buffer of received bytes to the ldisc for
processing. The number of bytes is guaranteed not
to exceed the current value of tty->receive_room.
All bytes must be processed.
-receive_buf2() - (optional) Called by the low-level driver to hand
+receive_buf2() (optional) Called by the low-level driver to hand
a buffer of received bytes to the ldisc for
processing. Returns the number of bytes processed.
If both receive_buf() and receive_buf2() are
defined, receive_buf2() should be preferred.
-write_wakeup() - May be called at any point between open and close.
+write_wakeup() May be called at any point between open and close.
The TTY_DO_WRITE_WAKEUP flag indicates if a call
is needed but always races versus calls. Thus the
ldisc must be careful about setting order and to
@@ -117,17 +123,20 @@ write_wakeup() - May be called at any point between open and close.
is permitted to call the driver write method from
this function. In such a situation defer it.
-dcd_change() - Report to the tty line the current DCD pin status
+dcd_change() Report to the tty line the current DCD pin status
changes and the relative timestamp. The timestamp
cannot be NULL.
+======================= =======================================================
Driver Access
+^^^^^^^^^^^^^
Line discipline methods can call the following methods of the underlying
hardware driver through the function pointers within the tty->driver
structure:
+======================= =======================================================
write() Write a block of characters to the tty device.
Returns the number of characters accepted. The
character buffer passed to this method is already
@@ -189,13 +198,16 @@ wait_until_sent() Waits until the device has written out all of the
characters in its transmitter FIFO.
send_xchar() Send a high-priority XON/XOFF character to the device.
+======================= =======================================================
Flags
+^^^^^
Line discipline methods have access to tty->flags field containing the
following interesting flags:
+======================= =======================================================
TTY_THROTTLED Driver input is throttled. The ldisc should call
tty->driver->unthrottle() in order to resume
reception when it is ready to process more data.
@@ -212,102 +224,105 @@ TTY_OTHER_CLOSED Device is a pty and the other side has closed.
TTY_NO_WRITE_SPLIT Prevent driver from splitting up writes into
smaller chunks.
+======================= =======================================================
Locking
+^^^^^^^
Callers to the line discipline functions from the tty layer are required to
take line discipline locks. The same is true of calls from the driver side
but not yet enforced.
-Three calls are now provided
+Three calls are now provided::
ldisc = tty_ldisc_ref(tty);
takes a handle to the line discipline in the tty and returns it. If no ldisc
is currently attached or the ldisc is being closed and re-opened at this
point then NULL is returned. While this handle is held the ldisc will not
-change or go away.
+change or go away::
tty_ldisc_deref(ldisc)
Returns the ldisc reference and allows the ldisc to be closed. Returning the
reference takes away your right to call the ldisc functions until you take
-a new reference.
+a new reference::
ldisc = tty_ldisc_ref_wait(tty);
Performs the same function as tty_ldisc_ref except that it will wait for an
-ldisc change to complete and then return a reference to the new ldisc.
+ldisc change to complete and then return a reference to the new ldisc.
While these functions are slightly slower than the old code they should have
minimal impact as most receive logic uses the flip buffers and they only
need to take a reference when they push bits up through the driver.
-A caution: The ldisc->open(), ldisc->close() and driver->set_ldisc
+A caution: The ldisc->open(), ldisc->close() and driver->set_ldisc
functions are called with the ldisc unavailable. Thus tty_ldisc_ref will
fail in this situation if used within these functions. Ldisc and driver
-code calling its own functions must be careful in this case.
+code calling its own functions must be careful in this case.
Driver Interface
----------------
-open() - Called when a device is opened. May sleep
+======================= =======================================================
+open() Called when a device is opened. May sleep
-close() - Called when a device is closed. At the point of
- return from this call the driver must make no
+close() Called when a device is closed. At the point of
+ return from this call the driver must make no
further ldisc calls of any kind. May sleep
-write() - Called to write bytes to the device. May not
- sleep. May occur in parallel in special cases.
+write() Called to write bytes to the device. May not
+ sleep. May occur in parallel in special cases.
Because this includes panic paths drivers generally
shouldn't try and do clever locking here.
-put_char() - Stuff a single character onto the queue. The
+put_char() Stuff a single character onto the queue. The
driver is guaranteed following up calls to
flush_chars.
-flush_chars() - Ask the kernel to write put_char queue
+flush_chars() Ask the kernel to write put_char queue
-write_room() - Return the number of characters that can be stuffed
+write_room() Return the number of characters that can be stuffed
into the port buffers without overflow (or less).
The ldisc is responsible for being intelligent
- about multi-threading of write_room/write calls
+ about multi-threading of write_room/write calls
-ioctl() - Called when an ioctl may be for the driver
+ioctl() Called when an ioctl may be for the driver
-set_termios() - Called on termios change, serialized against
+set_termios() Called on termios change, serialized against
itself by a semaphore. May sleep.
-set_ldisc() - Notifier for discipline change. At the point this
+set_ldisc() Notifier for discipline change. At the point this
is done the discipline is not yet usable. Can now
sleep (I think)
-throttle() - Called by the ldisc to ask the driver to do flow
+throttle() Called by the ldisc to ask the driver to do flow
control. Serialization including with unthrottle
is the job of the ldisc layer.
-unthrottle() - Called by the ldisc to ask the driver to stop flow
+unthrottle() Called by the ldisc to ask the driver to stop flow
control.
-stop() - Ldisc notifier to the driver to stop output. As with
+stop() Ldisc notifier to the driver to stop output. As with
throttle the serializations with start() are down
to the ldisc layer.
-start() - Ldisc notifier to the driver to start output.
+start() Ldisc notifier to the driver to start output.
-hangup() - Ask the tty driver to cause a hangup initiated
+hangup() Ask the tty driver to cause a hangup initiated
from the host side. [Can sleep ??]
-break_ctl() - Send RS232 break. Can sleep. Can get called in
+break_ctl() Send RS232 break. Can sleep. Can get called in
parallel, driver must serialize (for now), and
with write calls.
-wait_until_sent() - Wait for characters to exit the hardware queue
+wait_until_sent() Wait for characters to exit the hardware queue
of the driver. Can sleep
-send_xchar() - Send XON/XOFF and if possible jump the queue with
+send_xchar() Send XON/XOFF and if possible jump the queue with
it in order to get fast flow control responses.
Cannot sleep ??
-
+======================= =======================================================
diff --git a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
index 6b154dbb02cc..132f5eb9b530 100644
--- a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
+++ b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
@@ -324,7 +324,7 @@ to details explained in the following section.
strcpy(card->driver, "My Chip");
strcpy(card->shortname, "My Own Chip 123");
sprintf(card->longname, "%s at 0x%lx irq %i",
- card->shortname, chip->ioport, chip->irq);
+ card->shortname, chip->port, chip->irq);
/* (5) */
.... /* implemented later */
@@ -437,7 +437,7 @@ Since each component can be properly freed, the single
strcpy(card->driver, "My Chip");
strcpy(card->shortname, "My Own Chip 123");
sprintf(card->longname, "%s at 0x%lx irq %i",
- card->shortname, chip->ioport, chip->irq);
+ card->shortname, chip->port, chip->irq);
The driver field holds the minimal ID string of the chip. This is used
by alsa-lib's configurator, so keep it simple but unique. Even the
diff --git a/Documentation/sparc/adi.txt b/Documentation/sparc/adi.rst
index e1aed155fb89..857ad30f9569 100644
--- a/Documentation/sparc/adi.txt
+++ b/Documentation/sparc/adi.rst
@@ -1,3 +1,4 @@
+================================
Application Data Integrity (ADI)
================================
@@ -44,12 +45,15 @@ provided by the hypervisor to the kernel. Kernel returns the value of
ADI block size to userspace using auxiliary vector along with other ADI
info. Following auxiliary vectors are provided by the kernel:
+ ============ ===========================================
AT_ADI_BLKSZ ADI block size. This is the granularity and
alignment, in bytes, of ADI versioning.
AT_ADI_NBITS Number of ADI version bits in the VA
+ ============ ===========================================
-IMPORTANT NOTES:
+IMPORTANT NOTES
+===============
- Version tag values of 0x0 and 0xf are reserved. These values match any
tag in virtual address and never generate a mismatch exception.
@@ -86,11 +90,12 @@ IMPORTANT NOTES:
ADI related traps
------------------
+=================
With ADI enabled, following new traps may occur:
Disrupting memory corruption
+----------------------------
When a store accesses a memory localtion that has TTE.mcd=1,
the task is running with ADI enabled (PSTATE.mcde=1), and the ADI
@@ -100,7 +105,7 @@ Disrupting memory corruption
first. Hypervisor creates a sun4v error report and sends a
resumable error (TT=0x7e) trap to the kernel. The kernel sends
a SIGSEGV to the task that resulted in this trap with the following
- info:
+ info::
siginfo.si_signo = SIGSEGV;
siginfo.errno = 0;
@@ -110,6 +115,7 @@ Disrupting memory corruption
Precise memory corruption
+-------------------------
When a store accesses a memory location that has TTE.mcd=1,
the task is running with ADI enabled (PSTATE.mcde=1), and the ADI
@@ -118,7 +124,7 @@ Precise memory corruption
MCD precise exception is enabled (MCDPERR=1), a precise
exception is sent to the kernel with TT=0x1a. The kernel sends
a SIGSEGV to the task that resulted in this trap with the following
- info:
+ info::
siginfo.si_signo = SIGSEGV;
siginfo.errno = 0;
@@ -126,17 +132,19 @@ Precise memory corruption
siginfo.si_addr = addr; /* address that caused trap */
siginfo.si_trapno = 0;
- NOTE: ADI tag mismatch on a load always results in precise trap.
+ NOTE:
+ ADI tag mismatch on a load always results in precise trap.
MCD disabled
+------------
When a task has not enabled ADI and attempts to set ADI version
on a memory address, processor sends an MCD disabled trap. This
trap is handled by hypervisor first and the hypervisor vectors this
trap through to the kernel as Data Access Exception trap with
fault type set to 0xa (invalid ASI). When this occurs, the kernel
- sends the task SIGSEGV signal with following info:
+ sends the task SIGSEGV signal with following info::
siginfo.si_signo = SIGSEGV;
siginfo.errno = 0;
@@ -149,35 +157,35 @@ Sample program to use ADI
-------------------------
Following sample program is meant to illustrate how to use the ADI
-functionality.
-
-#include <unistd.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <elf.h>
-#include <sys/ipc.h>
-#include <sys/shm.h>
-#include <sys/mman.h>
-#include <asm/asi.h>
-
-#ifndef AT_ADI_BLKSZ
-#define AT_ADI_BLKSZ 48
-#endif
-#ifndef AT_ADI_NBITS
-#define AT_ADI_NBITS 49
-#endif
-
-#ifndef PROT_ADI
-#define PROT_ADI 0x10
-#endif
-
-#define BUFFER_SIZE 32*1024*1024UL
-
-main(int argc, char* argv[], char* envp[])
-{
- unsigned long i, mcde, adi_blksz, adi_nbits;
- char *shmaddr, *tmp_addr, *end, *veraddr, *clraddr;
- int shmid, version;
+functionality::
+
+ #include <unistd.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <elf.h>
+ #include <sys/ipc.h>
+ #include <sys/shm.h>
+ #include <sys/mman.h>
+ #include <asm/asi.h>
+
+ #ifndef AT_ADI_BLKSZ
+ #define AT_ADI_BLKSZ 48
+ #endif
+ #ifndef AT_ADI_NBITS
+ #define AT_ADI_NBITS 49
+ #endif
+
+ #ifndef PROT_ADI
+ #define PROT_ADI 0x10
+ #endif
+
+ #define BUFFER_SIZE 32*1024*1024UL
+
+ main(int argc, char* argv[], char* envp[])
+ {
+ unsigned long i, mcde, adi_blksz, adi_nbits;
+ char *shmaddr, *tmp_addr, *end, *veraddr, *clraddr;
+ int shmid, version;
Elf64_auxv_t *auxv;
adi_blksz = 0;
@@ -202,77 +210,77 @@ main(int argc, char* argv[], char* envp[])
printf("\tBlock size = %ld\n", adi_blksz);
printf("\tNumber of bits = %ld\n", adi_nbits);
- if ((shmid = shmget(2, BUFFER_SIZE,
- IPC_CREAT | SHM_R | SHM_W)) < 0) {
- perror("shmget failed");
- exit(1);
- }
+ if ((shmid = shmget(2, BUFFER_SIZE,
+ IPC_CREAT | SHM_R | SHM_W)) < 0) {
+ perror("shmget failed");
+ exit(1);
+ }
- shmaddr = shmat(shmid, NULL, 0);
- if (shmaddr == (char *)-1) {
- perror("shm attach failed");
- shmctl(shmid, IPC_RMID, NULL);
- exit(1);
- }
+ shmaddr = shmat(shmid, NULL, 0);
+ if (shmaddr == (char *)-1) {
+ perror("shm attach failed");
+ shmctl(shmid, IPC_RMID, NULL);
+ exit(1);
+ }
if (mprotect(shmaddr, BUFFER_SIZE, PROT_READ|PROT_WRITE|PROT_ADI)) {
perror("mprotect failed");
goto err_out;
}
- /* Set the ADI version tag on the shm segment
- */
- version = 10;
- tmp_addr = shmaddr;
- end = shmaddr + BUFFER_SIZE;
- while (tmp_addr < end) {
- asm volatile(
- "stxa %1, [%0]0x90\n\t"
- :
- : "r" (tmp_addr), "r" (version));
- tmp_addr += adi_blksz;
- }
+ /* Set the ADI version tag on the shm segment
+ */
+ version = 10;
+ tmp_addr = shmaddr;
+ end = shmaddr + BUFFER_SIZE;
+ while (tmp_addr < end) {
+ asm volatile(
+ "stxa %1, [%0]0x90\n\t"
+ :
+ : "r" (tmp_addr), "r" (version));
+ tmp_addr += adi_blksz;
+ }
asm volatile("membar #Sync\n\t");
- /* Create a versioned address from the normal address by placing
+ /* Create a versioned address from the normal address by placing
* version tag in the upper adi_nbits bits
- */
- tmp_addr = (void *) ((unsigned long)shmaddr << adi_nbits);
- tmp_addr = (void *) ((unsigned long)tmp_addr >> adi_nbits);
- veraddr = (void *) (((unsigned long)version << (64-adi_nbits))
- | (unsigned long)tmp_addr);
-
- printf("Starting the writes:\n");
- for (i = 0; i < BUFFER_SIZE; i++) {
- veraddr[i] = (char)(i);
- if (!(i % (1024 * 1024)))
- printf(".");
- }
- printf("\n");
-
- printf("Verifying data...");
+ */
+ tmp_addr = (void *) ((unsigned long)shmaddr << adi_nbits);
+ tmp_addr = (void *) ((unsigned long)tmp_addr >> adi_nbits);
+ veraddr = (void *) (((unsigned long)version << (64-adi_nbits))
+ | (unsigned long)tmp_addr);
+
+ printf("Starting the writes:\n");
+ for (i = 0; i < BUFFER_SIZE; i++) {
+ veraddr[i] = (char)(i);
+ if (!(i % (1024 * 1024)))
+ printf(".");
+ }
+ printf("\n");
+
+ printf("Verifying data...");
fflush(stdout);
- for (i = 0; i < BUFFER_SIZE; i++)
- if (veraddr[i] != (char)i)
- printf("\nIndex %lu mismatched\n", i);
- printf("Done.\n");
+ for (i = 0; i < BUFFER_SIZE; i++)
+ if (veraddr[i] != (char)i)
+ printf("\nIndex %lu mismatched\n", i);
+ printf("Done.\n");
- /* Disable ADI and clean up
- */
+ /* Disable ADI and clean up
+ */
if (mprotect(shmaddr, BUFFER_SIZE, PROT_READ|PROT_WRITE)) {
perror("mprotect failed");
goto err_out;
}
- if (shmdt((const void *)shmaddr) != 0)
- perror("Detach failure");
- shmctl(shmid, IPC_RMID, NULL);
+ if (shmdt((const void *)shmaddr) != 0)
+ perror("Detach failure");
+ shmctl(shmid, IPC_RMID, NULL);
- exit(0);
+ exit(0);
-err_out:
- if (shmdt((const void *)shmaddr) != 0)
- perror("Detach failure");
- shmctl(shmid, IPC_RMID, NULL);
- exit(1);
-}
+ err_out:
+ if (shmdt((const void *)shmaddr) != 0)
+ perror("Detach failure");
+ shmctl(shmid, IPC_RMID, NULL);
+ exit(1);
+ }
diff --git a/Documentation/sparc/console.txt b/Documentation/sparc/console.rst
index 5aa735a44e02..73132db83ece 100644
--- a/Documentation/sparc/console.txt
+++ b/Documentation/sparc/console.rst
@@ -1,5 +1,5 @@
-Steps for sending 'break' on sunhv console:
-===========================================
+Steps for sending 'break' on sunhv console
+==========================================
On Baremetal:
1. press Esc + 'B'
diff --git a/Documentation/sparc/index.rst b/Documentation/sparc/index.rst
new file mode 100644
index 000000000000..91f7d6643dd5
--- /dev/null
+++ b/Documentation/sparc/index.rst
@@ -0,0 +1,13 @@
+:orphan:
+
+==================
+Sparc Architecture
+==================
+
+.. toctree::
+ :maxdepth: 1
+
+ console
+ adi
+
+ oradax/oracle-dax
diff --git a/Documentation/sparc/oradax/oracle-dax.txt b/Documentation/sparc/oradax/oracle-dax.rst
index 9d53ac93286f..d1e14d572918 100644
--- a/Documentation/sparc/oradax/oracle-dax.txt
+++ b/Documentation/sparc/oradax/oracle-dax.rst
@@ -1,5 +1,6 @@
+=======================================
Oracle Data Analytics Accelerator (DAX)
----------------------------------------
+=======================================
DAX is a coprocessor which resides on the SPARC M7 (DAX1) and M8
(DAX2) processor chips, and has direct access to the CPU's L3 caches
@@ -17,6 +18,7 @@ code sufficient to write user or kernel applications that use DAX
functionality.
The user library is open source and available at:
+
https://oss.oracle.com/git/gitweb.cgi?p=libdax.git
The Hypervisor interface to the coprocessor is described in detail in
@@ -26,7 +28,7 @@ Specification" version 3.0.20+15, dated 2017-09-25.
High Level Overview
--------------------
+===================
A coprocessor request is described by a Command Control Block
(CCB). The CCB contains an opcode and various parameters. The opcode
@@ -52,7 +54,7 @@ thread.
Addressing Memory
------------------
+=================
The kernel does not have access to physical memory in the Sun4v
architecture, as there is an additional level of memory virtualization
@@ -77,7 +79,7 @@ the request.
The Driver API
---------------
+==============
An application makes requests to the driver via the write() system
call, and gets results (if any) via read(). The completion areas are
@@ -108,6 +110,7 @@ equal to the number of bytes given in the call. Otherwise -1 is
returned and errno is set.
CCB_DEQUEUE
+-----------
Tells the driver to clean up resources associated with past
requests. Since no interrupt is generated upon the completion of a
@@ -116,12 +119,14 @@ further status information is returned, so the user should not
subsequently call read().
CCB_KILL
+--------
Kills a CCB during execution. The CCB is guaranteed to not continue
executing once this call returns successfully. On success, read() must
be called to retrieve the result of the action.
CCB_INFO
+--------
Retrieves information about a currently executing CCB. Note that some
Hypervisors might return 'notfound' when the CCB is in 'inprogress'
@@ -130,6 +135,7 @@ CCB_KILL must be invoked on that CCB. Upon success, read() must be
called to retrieve the details of the action.
Submission of an array of CCBs for execution
+---------------------------------------------
A write() whose length is a multiple of the CCB size is treated as a
submit operation. The file offset is treated as the index of the
@@ -146,6 +152,7 @@ status will reflect the error caused by the first CCB that was not
accepted, and status_data will provide additional data in some cases.
MMAP
+----
The mmap() function provides access to the completion area allocated
in the driver. Note that the completion area is not writeable by the
@@ -153,7 +160,7 @@ user process, and the mmap call must not specify PROT_WRITE.
Completion of a Request
------------------------
+=======================
The first byte in each completion area is the command status which is
updated by the coprocessor hardware. Software may take advantage of
@@ -172,7 +179,7 @@ and resumption of execution may be just a few nanoseconds.
Application Life Cycle of a DAX Submission
-------------------------------------------
+==========================================
- open dax device
- call mmap() to get the completion area address
@@ -187,7 +194,7 @@ Application Life Cycle of a DAX Submission
Memory Constraints
-------------------
+==================
The DAX hardware operates only on physical addresses. Therefore, it is
not aware of virtual memory mappings and the discontiguities that may
@@ -226,7 +233,7 @@ CCB Structure
-------------
A CCB is an array of 8 64-bit words. Several of these words provide
command opcodes, parameters, flags, etc., and the rest are addresses
-for the completion area, output buffer, and various inputs:
+for the completion area, output buffer, and various inputs::
struct ccb {
u64 control;
@@ -252,7 +259,7 @@ The first word (control) is examined by the driver for the following:
Example Code
-------------
+============
The DAX is accessible to both user and kernel code. The kernel code
can make hypercalls directly while the user code must use wrappers
@@ -265,7 +272,7 @@ arch/sparc/include/uapi/asm/oradax.h must be included.
First, the proper device must be opened. For M7 it will be
/dev/oradax1 and for M8 it will be /dev/oradax2. The simplest
-procedure is to attempt to open both, as only one will succeed:
+procedure is to attempt to open both, as only one will succeed::
fd = open("/dev/oradax1", O_RDWR);
if (fd < 0)
@@ -273,7 +280,7 @@ procedure is to attempt to open both, as only one will succeed:
if (fd < 0)
/* No DAX found */
-Next, the completion area must be mapped:
+Next, the completion area must be mapped::
completion_area = mmap(NULL, DAX_MMAP_LEN, PROT_READ, MAP_SHARED, fd, 0);
@@ -295,7 +302,7 @@ is the input bitmap inverted.
For details of all the parameters and bits used in this CCB, please
refer to section 36.2.1.3 of the DAX Hypervisor API document, which
-describes the Scan command in detail.
+describes the Scan command in detail::
ccb->control = /* Table 36.1, CCB Header Format */
(2L << 48) /* command = Scan Value */
@@ -326,7 +333,7 @@ describes the Scan command in detail.
The CCB submission is a write() or pwrite() system call to the
driver. If the call fails, then a read() must be used to retrieve the
-status:
+status::
if (pwrite(fd, ccb, 64, 0) != 64) {
struct ccb_exec_result status;
@@ -337,7 +344,7 @@ status:
After a successful submission of the CCB, the completion area may be
polled to determine when the DAX is finished. Detailed information on
the contents of the completion area can be found in section 36.2.2 of
-the DAX HV API document.
+the DAX HV API document::
while (1) {
/* Monitored Load */
@@ -355,7 +362,7 @@ the DAX HV API document.
A completion area status of 1 indicates successful completion of the
CCB and validity of the output bitmap, which may be used immediately.
All other non-zero values indicate error conditions which are
-described in section 36.2.2.
+described in section 36.2.2::
if (completion_area[0] != 1) { /* section 36.2.2, 1 = command ran and succeeded */
/* completion_area[0] contains the completion status */
@@ -364,7 +371,7 @@ described in section 36.2.2.
After the completion area has been processed, the driver must be
notified that it can release any resources associated with the
-request. This is done via the dequeue operation:
+request. This is done via the dequeue operation::
struct dax_command cmd;
cmd.command = CCB_DEQUEUE;
@@ -375,13 +382,14 @@ request. This is done via the dequeue operation:
Finally, normal program cleanup should be done, i.e., unmapping
completion area, closing the dax device, freeing memory etc.
-[Kernel example]
+Kernel example
+--------------
The only difference in using the DAX in kernel code is the treatment
of the completion area. Unlike user applications which mmap the
completion area allocated by the driver, kernel code must allocate its
own memory to use for the completion area, and this address and its
-type must be given in the CCB:
+type must be given in the CCB::
ccb->control |= /* Table 36.1, CCB Header Format */
(3L << 32); /* completion area address type = primary virtual */
@@ -389,9 +397,11 @@ type must be given in the CCB:
ccb->completion = (unsigned long) completion_area; /* Completion area address */
The dax submit hypercall is made directly. The flags used in the
-ccb_submit call are documented in the DAX HV API in section 36.3.1.
+ccb_submit call are documented in the DAX HV API in section 36.3.1/
-#include <asm/hypervisor.h>
+::
+
+ #include <asm/hypervisor.h>
hv_rv = sun4v_ccb_submit((unsigned long)ccb, 64,
HV_CCB_QUERY_CMD |
@@ -405,7 +415,7 @@ ccb_submit call are documented in the DAX HV API in section 36.3.1.
}
After the submission, the completion area polling code is identical to
-that in user land:
+that in user land::
while (1) {
/* Monitored Load */
@@ -427,3 +437,9 @@ that in user land:
The output bitmap is ready for consumption immediately after the
completion status indicates success.
+
+Excer[t from UltraSPARC Virtual Machine Specification
+=====================================================
+
+ .. include:: dax-hv-api.txt
+ :literal:
diff --git a/Documentation/speculation.txt b/Documentation/speculation.txt
index e9e6cbae2841..50d7ea857cff 100644
--- a/Documentation/speculation.txt
+++ b/Documentation/speculation.txt
@@ -17,7 +17,7 @@ observed to extract secret information.
For example, in the presence of branch prediction, it is possible for bounds
checks to be ignored by code which is speculatively executed. Consider the
-following code:
+following code::
int load_array(int *array, unsigned int index)
{
@@ -27,7 +27,7 @@ following code:
return array[index];
}
-Which, on arm64, may be compiled to an assembly sequence such as:
+Which, on arm64, may be compiled to an assembly sequence such as::
CMP <index>, #MAX_ARRAY_ELEMS
B.LT less
@@ -44,7 +44,7 @@ microarchitectural state which can be subsequently measured.
More complex sequences involving multiple dependent memory accesses may
result in sensitive information being leaked. Consider the following
-code, building on the prior example:
+code, building on the prior example::
int load_dependent_arrays(int *arr1, int *arr2, int index)
{
@@ -77,7 +77,7 @@ A call to array_index_nospec(index, size) returns a sanitized index
value that is bounded to [0, size) even under cpu speculation
conditions.
-This can be used to protect the earlier load_array() example:
+This can be used to protect the earlier load_array() example::
int load_array(int *array, unsigned int index)
{
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index aa058aa7bf28..f0c86fbb3b48 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -196,7 +196,7 @@ CAP_LAST_CAP from the kernel.
core_pattern:
core_pattern is used to specify a core dumpfile pattern name.
-. max length 128 characters; default value is "core"
+. max length 127 characters; default value is "core"
. core_pattern is used as a pattern template for the output filename;
certain string patterns (beginning with '%') are substituted with
their actual values.
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 3f13d8599337..749322060f10 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -61,6 +61,7 @@ Currently, these files are in /proc/sys/vm:
- stat_refresh
- numa_stat
- swappiness
+- unprivileged_userfaultfd
- user_reserve_kbytes
- vfs_cache_pressure
- watermark_boost_factor
@@ -818,6 +819,17 @@ The default value is 60.
==============================================================
+unprivileged_userfaultfd
+
+This flag controls whether unprivileged users can use the userfaultfd
+system calls. Set this to 1 to allow unprivileged users to use the
+userfaultfd system calls, or set this to 0 to restrict userfaultfd to only
+privileged users (with SYS_CAP_PTRACE capability).
+
+The default value is 1.
+
+==============================================================
+
- user_reserve_kbytes
When overcommit_memory is set to 2, "never overcommit" mode, reserve
diff --git a/Documentation/thermal/sysfs-api.txt b/Documentation/thermal/sysfs-api.txt
index 911399730c1c..c3fa500df92c 100644
--- a/Documentation/thermal/sysfs-api.txt
+++ b/Documentation/thermal/sysfs-api.txt
@@ -316,7 +316,7 @@ ACPI thermal zones.
|---temp[1-*]_input: The current temperature of thermal zone [1-*]
|---temp[1-*]_critical: The critical trip point of thermal zone [1-*]
-Please read Documentation/hwmon/sysfs-interface for additional information.
+Please read Documentation/hwmon/sysfs-interface.rst for additional information.
***************************
* Thermal zone attributes *
diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index 7c5e6d6ab5d1..f60079259669 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -765,6 +765,37 @@ Here is the list of current tracers that may be configured.
tracers from tracing simply echo "nop" into
current_tracer.
+Error conditions
+----------------
+
+ For most ftrace commands, failure modes are obvious and communicated
+ using standard return codes.
+
+ For other more involved commands, extended error information may be
+ available via the tracing/error_log file. For the commands that
+ support it, reading the tracing/error_log file after an error will
+ display more detailed information about what went wrong, if
+ information is available. The tracing/error_log file is a circular
+ error log displaying a small number (currently, 8) of ftrace errors
+ for the last (8) failed commands.
+
+ The extended error information and usage takes the form shown in
+ this example::
+
+ # echo xxx > /sys/kernel/debug/tracing/events/sched/sched_wakeup/trigger
+ echo: write error: Invalid argument
+
+ # cat /sys/kernel/debug/tracing/error_log
+ [ 5348.887237] location: error: Couldn't yyy: zzz
+ Command: xxx
+ ^
+ [ 7517.023364] location: error: Bad rrr: sss
+ Command: ppp qqq
+ ^
+
+ To clear the error log, echo the empty string into it::
+
+ # echo > /sys/kernel/debug/tracing/error_log
Examples of using the tracer
----------------------------
@@ -1404,6 +1435,7 @@ trace has provided some very helpful debugging information.
If we prefer function graph output instead of function, we can set
display-graph option::
+
with echo 1 > options/display-graph
# tracer: irqsoff
diff --git a/Documentation/trace/histogram.rst b/Documentation/trace/histogram.rst
index 0ea59d45aef1..fb621a1c2638 100644
--- a/Documentation/trace/histogram.rst
+++ b/Documentation/trace/histogram.rst
@@ -199,20 +199,8 @@ Extended error information
For some error conditions encountered when invoking a hist trigger
command, extended error information is available via the
- corresponding event's 'hist' file. Reading the hist file after an
- error will display more detailed information about what went wrong,
- if information is available. This extended error information will
- be available until the next hist trigger command for that event.
-
- If available for a given error condition, the extended error
- information and usage takes the following form::
-
- # echo xxx > /sys/kernel/debug/tracing/events/sched/sched_wakeup/trigger
- echo: write error: Invalid argument
-
- # cat /sys/kernel/debug/tracing/events/sched/sched_wakeup/hist
- ERROR: Couldn't yyy: zzz
- Last command: xxx
+ tracing/error_log file. See Error Conditions in
+ :file:`Documentation/trace/ftrace.rst` for details.
6.2 'hist' trigger examples
---------------------------
@@ -1915,7 +1903,10 @@ The following commonly-used handler.action pairs are available:
The 'matching.event' specification is simply the fully qualified
event name of the event that matches the target event for the
- onmatch() functionality, in the form 'system.event_name'.
+ onmatch() functionality, in the form 'system.event_name'. Histogram
+ keys of both events are compared to find if events match. In case
+ multiple histogram keys are used, they all must match in the specified
+ order.
Finally, the number and type of variables/fields in the 'param
list' must match the number and types of the fields in the
@@ -1978,9 +1969,9 @@ The following commonly-used handler.action pairs are available:
/sys/kernel/debug/tracing/events/sched/sched_waking/trigger
Then, when the corresponding thread is actually scheduled onto the
- CPU by a sched_switch event, calculate the latency and use that
- along with another variable and an event field to generate a
- wakeup_latency synthetic event::
+ CPU by a sched_switch event (saved_pid matches next_pid), calculate
+ the latency and use that along with another variable and an event field
+ to generate a wakeup_latency synthetic event::
# echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0:\
onmatch(sched.sched_waking).wakeup_latency($wakeup_lat,\
@@ -2133,33 +2124,33 @@ The following commonly-used handler.action pairs are available:
the end the event that triggered the snapshot (in this case you
can verify the timestamps between the sched_waking and
sched_switch events, which should match the time displayed in the
- global maximum):
-
- # cat /sys/kernel/debug/tracing/snapshot
-
- <...>-2103 [005] d..3 309.873125: sched_switch: prev_comm=cyclictest prev_pid=2103 prev_prio=19 prev_state=D ==> next_comm=swapper/5 next_pid=0 next_prio=120
- <idle>-0 [005] d.h3 309.873611: sched_waking: comm=cyclictest pid=2102 prio=19 target_cpu=005
- <idle>-0 [005] dNh4 309.873613: sched_wakeup: comm=cyclictest pid=2102 prio=19 target_cpu=005
- <idle>-0 [005] d..3 309.873616: sched_switch: prev_comm=swapper/5 prev_pid=0 prev_prio=120 prev_state=S ==> next_comm=cyclictest next_pid=2102 next_prio=19
- <...>-2102 [005] d..3 309.873625: sched_switch: prev_comm=cyclictest prev_pid=2102 prev_prio=19 prev_state=D ==> next_comm=swapper/5 next_pid=0 next_prio=120
- <idle>-0 [005] d.h3 309.874624: sched_waking: comm=cyclictest pid=2102 prio=19 target_cpu=005
- <idle>-0 [005] dNh4 309.874626: sched_wakeup: comm=cyclictest pid=2102 prio=19 target_cpu=005
- <idle>-0 [005] dNh3 309.874628: sched_waking: comm=cyclictest pid=2103 prio=19 target_cpu=005
- <idle>-0 [005] dNh4 309.874630: sched_wakeup: comm=cyclictest pid=2103 prio=19 target_cpu=005
- <idle>-0 [005] d..3 309.874633: sched_switch: prev_comm=swapper/5 prev_pid=0 prev_prio=120 prev_state=S ==> next_comm=cyclictest next_pid=2102 next_prio=19
- <idle>-0 [004] d.h3 309.874757: sched_waking: comm=gnome-terminal- pid=1699 prio=120 target_cpu=004
- <idle>-0 [004] dNh4 309.874762: sched_wakeup: comm=gnome-terminal- pid=1699 prio=120 target_cpu=004
- <idle>-0 [004] d..3 309.874766: sched_switch: prev_comm=swapper/4 prev_pid=0 prev_prio=120 prev_state=S ==> next_comm=gnome-terminal- next_pid=1699 next_prio=120
- gnome-terminal--1699 [004] d.h2 309.874941: sched_stat_runtime: comm=gnome-terminal- pid=1699 runtime=180706 [ns] vruntime=1126870572 [ns]
- <idle>-0 [003] d.s4 309.874956: sched_waking: comm=rcu_sched pid=9 prio=120 target_cpu=007
- <idle>-0 [003] d.s5 309.874960: sched_wake_idle_without_ipi: cpu=7
- <idle>-0 [003] d.s5 309.874961: sched_wakeup: comm=rcu_sched pid=9 prio=120 target_cpu=007
- <idle>-0 [007] d..3 309.874963: sched_switch: prev_comm=swapper/7 prev_pid=0 prev_prio=120 prev_state=S ==> next_comm=rcu_sched next_pid=9 next_prio=120
- rcu_sched-9 [007] d..3 309.874973: sched_stat_runtime: comm=rcu_sched pid=9 runtime=13646 [ns] vruntime=22531430286 [ns]
- rcu_sched-9 [007] d..3 309.874978: sched_switch: prev_comm=rcu_sched prev_pid=9 prev_prio=120 prev_state=R+ ==> next_comm=swapper/7 next_pid=0 next_prio=120
- <...>-2102 [005] d..4 309.874994: sched_migrate_task: comm=cyclictest pid=2103 prio=19 orig_cpu=5 dest_cpu=1
- <...>-2102 [005] d..4 309.875185: sched_wake_idle_without_ipi: cpu=1
- <idle>-0 [001] d..3 309.875200: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=S ==> next_comm=cyclictest next_pid=2103 next_prio=19
+ global maximum)::
+
+ # cat /sys/kernel/debug/tracing/snapshot
+
+ <...>-2103 [005] d..3 309.873125: sched_switch: prev_comm=cyclictest prev_pid=2103 prev_prio=19 prev_state=D ==> next_comm=swapper/5 next_pid=0 next_prio=120
+ <idle>-0 [005] d.h3 309.873611: sched_waking: comm=cyclictest pid=2102 prio=19 target_cpu=005
+ <idle>-0 [005] dNh4 309.873613: sched_wakeup: comm=cyclictest pid=2102 prio=19 target_cpu=005
+ <idle>-0 [005] d..3 309.873616: sched_switch: prev_comm=swapper/5 prev_pid=0 prev_prio=120 prev_state=S ==> next_comm=cyclictest next_pid=2102 next_prio=19
+ <...>-2102 [005] d..3 309.873625: sched_switch: prev_comm=cyclictest prev_pid=2102 prev_prio=19 prev_state=D ==> next_comm=swapper/5 next_pid=0 next_prio=120
+ <idle>-0 [005] d.h3 309.874624: sched_waking: comm=cyclictest pid=2102 prio=19 target_cpu=005
+ <idle>-0 [005] dNh4 309.874626: sched_wakeup: comm=cyclictest pid=2102 prio=19 target_cpu=005
+ <idle>-0 [005] dNh3 309.874628: sched_waking: comm=cyclictest pid=2103 prio=19 target_cpu=005
+ <idle>-0 [005] dNh4 309.874630: sched_wakeup: comm=cyclictest pid=2103 prio=19 target_cpu=005
+ <idle>-0 [005] d..3 309.874633: sched_switch: prev_comm=swapper/5 prev_pid=0 prev_prio=120 prev_state=S ==> next_comm=cyclictest next_pid=2102 next_prio=19
+ <idle>-0 [004] d.h3 309.874757: sched_waking: comm=gnome-terminal- pid=1699 prio=120 target_cpu=004
+ <idle>-0 [004] dNh4 309.874762: sched_wakeup: comm=gnome-terminal- pid=1699 prio=120 target_cpu=004
+ <idle>-0 [004] d..3 309.874766: sched_switch: prev_comm=swapper/4 prev_pid=0 prev_prio=120 prev_state=S ==> next_comm=gnome-terminal- next_pid=1699 next_prio=120
+ gnome-terminal--1699 [004] d.h2 309.874941: sched_stat_runtime: comm=gnome-terminal- pid=1699 runtime=180706 [ns] vruntime=1126870572 [ns]
+ <idle>-0 [003] d.s4 309.874956: sched_waking: comm=rcu_sched pid=9 prio=120 target_cpu=007
+ <idle>-0 [003] d.s5 309.874960: sched_wake_idle_without_ipi: cpu=7
+ <idle>-0 [003] d.s5 309.874961: sched_wakeup: comm=rcu_sched pid=9 prio=120 target_cpu=007
+ <idle>-0 [007] d..3 309.874963: sched_switch: prev_comm=swapper/7 prev_pid=0 prev_prio=120 prev_state=S ==> next_comm=rcu_sched next_pid=9 next_prio=120
+ rcu_sched-9 [007] d..3 309.874973: sched_stat_runtime: comm=rcu_sched pid=9 runtime=13646 [ns] vruntime=22531430286 [ns]
+ rcu_sched-9 [007] d..3 309.874978: sched_switch: prev_comm=rcu_sched prev_pid=9 prev_prio=120 prev_state=R+ ==> next_comm=swapper/7 next_pid=0 next_prio=120
+ <...>-2102 [005] d..4 309.874994: sched_migrate_task: comm=cyclictest pid=2103 prio=19 orig_cpu=5 dest_cpu=1
+ <...>-2102 [005] d..4 309.875185: sched_wake_idle_without_ipi: cpu=1
+ <idle>-0 [001] d..3 309.875200: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=S ==> next_comm=cyclictest next_pid=2103 next_prio=19
- onchange(var).save(field,.. .)
@@ -2213,9 +2204,10 @@ The following commonly-used handler.action pairs are available:
following the rest of the fields.
If a snaphot was taken, there is also a message indicating that,
- along with the value and event that triggered the snapshot:
+ along with the value and event that triggered the snapshot::
+
+ # cat /sys/kernel/debug/tracing/events/tcp/tcp_probe/hist
- # cat /sys/kernel/debug/tracing/events/tcp/tcp_probe/hist
{ dport: 1521 } hitcount: 8
changed: 10 snd_wnd: 35456 srtt: 154262 rcv_wnd: 42112
@@ -2228,14 +2220,15 @@ The following commonly-used handler.action pairs are available:
{ dport: 443 } hitcount: 211
changed: 10 snd_wnd: 26960 srtt: 17379 rcv_wnd: 28800
- Snapshot taken (see tracing/snapshot). Details:
+ Snapshot taken (see tracing/snapshot). Details::
+
triggering value { onchange($cwnd) }: 10
triggered by event with key: { dport: 80 }
- Totals:
- Hits: 414
- Entries: 4
- Dropped: 0
+ Totals:
+ Hits: 414
+ Entries: 4
+ Dropped: 0
In the above case, the event that triggered the snapshot has the
key with dport == 80. If you look at the bucket that has 80 as
@@ -2245,18 +2238,18 @@ The following commonly-used handler.action pairs are available:
the global snapshot).
And finally, looking at the snapshot data should show at or near
- the end the event that triggered the snapshot:
-
- # cat /sys/kernel/debug/tracing/snapshot
-
- gnome-shell-1261 [006] dN.3 49.823113: sched_stat_runtime: comm=gnome-shell pid=1261 runtime=49347 [ns] vruntime=1835730389 [ns]
- kworker/u16:4-773 [003] d..3 49.823114: sched_switch: prev_comm=kworker/u16:4 prev_pid=773 prev_prio=120 prev_state=R+ ==> next_comm=kworker/3:2 next_pid=135 next_prio=120
- gnome-shell-1261 [006] d..3 49.823114: sched_switch: prev_comm=gnome-shell prev_pid=1261 prev_prio=120 prev_state=R+ ==> next_comm=kworker/6:2 next_pid=387 next_prio=120
- kworker/3:2-135 [003] d..3 49.823118: sched_stat_runtime: comm=kworker/3:2 pid=135 runtime=5339 [ns] vruntime=17815800388 [ns]
- kworker/6:2-387 [006] d..3 49.823120: sched_stat_runtime: comm=kworker/6:2 pid=387 runtime=9594 [ns] vruntime=14589605367 [ns]
- kworker/6:2-387 [006] d..3 49.823122: sched_switch: prev_comm=kworker/6:2 prev_pid=387 prev_prio=120 prev_state=R+ ==> next_comm=gnome-shell next_pid=1261 next_prio=120
- kworker/3:2-135 [003] d..3 49.823123: sched_switch: prev_comm=kworker/3:2 prev_pid=135 prev_prio=120 prev_state=T ==> next_comm=swapper/3 next_pid=0 next_prio=120
- <idle>-0 [004] ..s7 49.823798: tcp_probe: src=10.0.0.10:54326 dest=23.215.104.193:80 mark=0x0 length=32 snd_nxt=0xe3ae2ff5 snd_una=0xe3ae2ecd snd_cwnd=10 ssthresh=2147483647 snd_wnd=28960 srtt=19604 rcv_wnd=29312
+ the end the event that triggered the snapshot::
+
+ # cat /sys/kernel/debug/tracing/snapshot
+
+ gnome-shell-1261 [006] dN.3 49.823113: sched_stat_runtime: comm=gnome-shell pid=1261 runtime=49347 [ns] vruntime=1835730389 [ns]
+ kworker/u16:4-773 [003] d..3 49.823114: sched_switch: prev_comm=kworker/u16:4 prev_pid=773 prev_prio=120 prev_state=R+ ==> next_comm=kworker/3:2 next_pid=135 next_prio=120
+ gnome-shell-1261 [006] d..3 49.823114: sched_switch: prev_comm=gnome-shell prev_pid=1261 prev_prio=120 prev_state=R+ ==> next_comm=kworker/6:2 next_pid=387 next_prio=120
+ kworker/3:2-135 [003] d..3 49.823118: sched_stat_runtime: comm=kworker/3:2 pid=135 runtime=5339 [ns] vruntime=17815800388 [ns]
+ kworker/6:2-387 [006] d..3 49.823120: sched_stat_runtime: comm=kworker/6:2 pid=387 runtime=9594 [ns] vruntime=14589605367 [ns]
+ kworker/6:2-387 [006] d..3 49.823122: sched_switch: prev_comm=kworker/6:2 prev_pid=387 prev_prio=120 prev_state=R+ ==> next_comm=gnome-shell next_pid=1261 next_prio=120
+ kworker/3:2-135 [003] d..3 49.823123: sched_switch: prev_comm=kworker/3:2 prev_pid=135 prev_prio=120 prev_state=T ==> next_comm=swapper/3 next_pid=0 next_prio=120
+ <idle>-0 [004] ..s7 49.823798: tcp_probe: src=10.0.0.10:54326 dest=23.215.104.193:80 mark=0x0 length=32 snd_nxt=0xe3ae2ff5 snd_una=0xe3ae2ecd snd_cwnd=10 ssthresh=2147483647 snd_wnd=28960 srtt=19604 rcv_wnd=29312
3. User space creating a trigger
--------------------------------
diff --git a/Documentation/trace/intel_th.rst b/Documentation/trace/intel_th.rst
index 19e2d633f3c7..baa12eb09ef4 100644
--- a/Documentation/trace/intel_th.rst
+++ b/Documentation/trace/intel_th.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
=======================
Intel(R) Trace Hub (TH)
=======================
diff --git a/Documentation/trace/postprocess/trace-vmscan-postprocess.pl b/Documentation/trace/postprocess/trace-vmscan-postprocess.pl
index 66bfd8396877..995da15b16ca 100644
--- a/Documentation/trace/postprocess/trace-vmscan-postprocess.pl
+++ b/Documentation/trace/postprocess/trace-vmscan-postprocess.pl
@@ -113,7 +113,7 @@ my $regex_kswapd_wake_default = 'nid=([0-9]*) order=([0-9]*)';
my $regex_kswapd_sleep_default = 'nid=([0-9]*)';
my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*) gfp_flags=([A-Z_|]*)';
my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) classzone_idx=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned=([0-9]*) nr_skipped=([0-9]*) nr_taken=([0-9]*) lru=([a-z_]*)';
-my $regex_lru_shrink_inactive_default = 'nid=([0-9]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) nr_dirty=([0-9]*) nr_writeback=([0-9]*) nr_congested=([0-9]*) nr_immediate=([0-9]*) nr_activate=([0-9]*) nr_ref_keep=([0-9]*) nr_unmap_fail=([0-9]*) priority=([0-9]*) flags=([A-Z_|]*)';
+my $regex_lru_shrink_inactive_default = 'nid=([0-9]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) nr_dirty=([0-9]*) nr_writeback=([0-9]*) nr_congested=([0-9]*) nr_immediate=([0-9]*) nr_activate_anon=([0-9]*) nr_activate_file=([0-9]*) nr_ref_keep=([0-9]*) nr_unmap_fail=([0-9]*) priority=([0-9]*) flags=([A-Z_|]*)';
my $regex_lru_shrink_active_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_rotated=([0-9]*) priority=([0-9]*)';
my $regex_writepage_default = 'page=([0-9a-f]*) pfn=([0-9]*) flags=([A-Z_|]*)';
@@ -212,7 +212,8 @@ $regex_lru_shrink_inactive = generate_traceevent_regex(
"vmscan/mm_vmscan_lru_shrink_inactive",
$regex_lru_shrink_inactive_default,
"nid", "nr_scanned", "nr_reclaimed", "nr_dirty", "nr_writeback",
- "nr_congested", "nr_immediate", "nr_activate", "nr_ref_keep",
+ "nr_congested", "nr_immediate", "nr_activate_anon",
+ "nr_activate_file", "nr_ref_keep",
"nr_unmap_fail", "priority", "flags");
$regex_lru_shrink_active = generate_traceevent_regex(
"vmscan/mm_vmscan_lru_shrink_active",
@@ -407,7 +408,7 @@ EVENT_PROCESS:
}
my $nr_reclaimed = $3;
- my $flags = $12;
+ my $flags = $13;
my $file = 0;
if ($flags =~ /RECLAIM_WB_FILE/) {
$file = 1;
diff --git a/Documentation/translations/index.rst b/Documentation/translations/index.rst
index 7f77c52d33aa..e446e5ed00a6 100644
--- a/Documentation/translations/index.rst
+++ b/Documentation/translations/index.rst
@@ -11,3 +11,43 @@ Translations
it_IT/index
ko_KR/index
ja_JP/index
+
+
+.. _translations_disclaimer:
+
+Disclaimer
+----------
+
+Translation's purpose is to ease reading and understanding in languages other
+than English. Its aim is to help people who do not understand English or have
+doubts about its interpretation. Additionally, some people prefer to read
+documentation in their native language, but please bear in mind that the
+*only* official documentation is the English one: :ref:`linux_doc`.
+
+It is very unlikely that an update to :ref:`linux_doc` will be propagated
+immediately to all translations. Translations' maintainers - and
+contributors - follow the evolution of the official documentation and they
+maintain translations aligned as much as they can. For this reason there is
+no guarantee that a translation is up to date. If what you read in a
+translation does not sound right compared to what you read in the code, please
+inform the translation maintainer and - if you can - check also the English
+documentation.
+
+A translation is not a fork of the official documentation, therefore
+translations' users should not find information that differs from the official
+English documentation. Any content addition, removal or update, must be
+applied to the English documents first. Afterwards and when possible, the
+same change should be applied to translations. Translations' maintainers
+accept only contributions that are merely translation related (e.g. new
+translations, updates, fixes).
+
+Translations try to be as accurate as possible but it is not possible to map
+one language directly to all other languages. Each language has its own
+grammar and culture, so the translation of an English statement may need to be
+adapted to fit a different language. For this reason, when viewing
+translations, you may find slight differences that carry the same message but
+in a different form.
+
+If you need to communicate with the Linux community but you do not feel
+comfortable writing in English, you can ask the translation's maintainers
+for help.
diff --git a/Documentation/translations/it_IT/core-api/memory-allocation.rst b/Documentation/translations/it_IT/core-api/memory-allocation.rst
new file mode 100644
index 000000000000..11d5148f8d6b
--- /dev/null
+++ b/Documentation/translations/it_IT/core-api/memory-allocation.rst
@@ -0,0 +1,13 @@
+.. include:: ../disclaimer-ita.rst
+
+:Original: :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>`
+
+.. _it_memory_allocation:
+
+================================
+Guida all'allocazione di memoria
+================================
+
+.. warning::
+
+ TODO ancora da tradurre
diff --git a/Documentation/translations/it_IT/disclaimer-ita.rst b/Documentation/translations/it_IT/disclaimer-ita.rst
index d68e52de6a5d..bfe8a384baed 100644
--- a/Documentation/translations/it_IT/disclaimer-ita.rst
+++ b/Documentation/translations/it_IT/disclaimer-ita.rst
@@ -1,13 +1,6 @@
:orphan:
-.. note::
- This document is maintained by Federico Vaga <federico.vaga@vaga.pv.it>.
- If you find any difference between this document and the original file or a
- problem with the translation, please contact the maintainer of this file.
- Following people helped to translate or review:
- Alessia Mantegazza <amantegazza@vaga.pv.it>
-
.. warning::
- The purpose of this file is to be easier to read and understand for Italian
- speakers and is not intended as a fork. So, if you have any comments or
- updates for this file please try to update the original English file first.
+ In caso di dubbi sulla correttezza del contenuto di questa traduzione,
+ l'unico riferimento valido è la documentazione ufficiale in inglese.
+ Per maggiori informazioni consultate le :ref:`avvertenze <it_disclaimer>`.
diff --git a/Documentation/translations/it_IT/doc-guide/index.rst b/Documentation/translations/it_IT/doc-guide/index.rst
index 7a6562b547ee..9fffff626711 100644
--- a/Documentation/translations/it_IT/doc-guide/index.rst
+++ b/Documentation/translations/it_IT/doc-guide/index.rst
@@ -12,9 +12,9 @@ Come scrivere la documentazione del kernel
.. toctree::
:maxdepth: 1
- sphinx.rst
- kernel-doc.rst
- parse-headers.rst
+ sphinx
+ kernel-doc
+ parse-headers
.. only:: subproject and html
diff --git a/Documentation/translations/it_IT/index.rst b/Documentation/translations/it_IT/index.rst
index ea9b2916b3e4..409eaac03e9f 100644
--- a/Documentation/translations/it_IT/index.rst
+++ b/Documentation/translations/it_IT/index.rst
@@ -4,26 +4,49 @@
Traduzione italiana
===================
-L'obiettivo di questa traduzione è di rendere più facile la lettura e
-la comprensione per chi preferisce leggere in lingua italiana.
-Tenete presente che la documentazione di riferimento rimane comunque
-quella in lingua inglese: :ref:`linux_doc`
-
-Questa traduzione cerca di essere il più fedele possibile all'originale ma
-è ovvio che alcune frasi vadano trasformate: non aspettatevi una traduzione
-letterale. Quando possibile, si eviteranno gli inglesismi ed al loro posto
-verranno utilizzate le corrispettive parole italiane.
+:manutentore: Federico Vaga <federico.vaga@vaga.pv.it>
-Se notate che la traduzione non è più aggiornata potete contattare
-direttamente il manutentore della traduzione italiana.
+.. _it_disclaimer:
-Se notate che la documentazione contiene errori o dimenticanze, allora
-verificate la documentazione di riferimento in lingua inglese. Se il problema
-è presente anche nella documentazione di riferimento, contattate il suo
-manutentore. Se avete problemi a scrivere in inglese, potete comunque
-riportare il problema al manutentore della traduzione italiana.
+Avvertenze
+==========
-Manutentore della traduzione italiana: Federico Vaga <federico.vaga@vaga.pv.it>
+L'obiettivo di questa traduzione è di rendere più facile la lettura e
+la comprensione per chi non capisce l'inglese o ha dubbi sulla sua
+interpretazione, oppure semplicemente per chi preferisce leggere in lingua
+italiana. Tuttavia, tenete ben presente che l'*unica* documentazione
+ufficiale è quella in lingua inglese: :ref:`linux_doc`
+
+La propagazione simultanea a tutte le traduzioni di una modifica in
+:ref:`linux_doc` è altamente improbabile. I manutentori delle traduzioni -
+e i contributori - seguono l'evolversi della documentazione ufficiale e
+cercano di mantenere le rispettive traduzioni allineate nel limite del
+possibile. Per questo motivo non c'è garanzia che una traduzione sia
+aggiornata all'ultima modifica. Se quello che leggete in una traduzione
+non corrisponde a quello che leggete nel codice, informate il manutentore
+della traduzione e - se potete - verificate anche la documentazione in
+inglese.
+
+Una traduzione non è un *fork* della documentazione ufficiale, perciò gli
+utenti non vi troveranno alcuna informazione diversa rispetto alla versione
+ufficiale. Ogni aggiunta, rimozione o modifica dei contenuti deve essere
+fatta prima nei documenti in inglese. In seguito, e quando è possibile, la
+stessa modifica dovrebbe essere applicata anche alle traduzioni. I manutentori
+delle traduzioni accettano contributi che interessano prettamente l'attività
+di traduzione (per esempio, nuove traduzioni, aggiornamenti, correzioni).
+
+Le traduzioni cercano di essere il più possibile accurate ma non è possibile
+mappare direttamente una lingua in un'altra. Ogni lingua ha la sua grammatica
+e una sua cultura alle spalle, quindi la traduzione di una frase in inglese
+potrebbe essere modificata per adattarla all'italiano. Per questo motivo,
+quando leggete questa traduzione, potreste trovare alcune differenze di forma
+ma che trasmettono comunque il messaggio originale. Nonostante la grande
+diffusione di inglesismi nella lingua parlata, quando possibile, questi
+verranno sostituiti dalle corrispettive parole italiane
+
+Se avete bisogno d'aiuto per comunicare con la comunità Linux ma non vi sentite
+a vostro agio nello scrivere in inglese, potete chiedere aiuto al manutentore
+della traduzione.
La documentazione del kernel Linux
==================================
@@ -47,9 +70,7 @@ I seguenti documenti descrivono la licenza usata nei sorgenti del kernel Linux
(GPLv2), come licenziare i singoli file; inoltre troverete i riferimenti al
testo integrale della licenza.
-.. warning::
-
- TODO ancora da tradurre
+* :ref:`it_kernel_licensing`
Documentazione per gli utenti
-----------------------------
@@ -90,10 +111,6 @@ vostre modifiche molto più semplice
doc-guide/index
kernel-hacking/index
-.. warning::
-
- TODO ancora da tradurre
-
Documentazione della API del kernel
-----------------------------------
diff --git a/Documentation/translations/it_IT/networking/netdev-FAQ.rst b/Documentation/translations/it_IT/networking/netdev-FAQ.rst
new file mode 100644
index 000000000000..8489ead7cff1
--- /dev/null
+++ b/Documentation/translations/it_IT/networking/netdev-FAQ.rst
@@ -0,0 +1,13 @@
+.. include:: ../disclaimer-ita.rst
+
+:Original: :ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
+
+.. _it_netdev-FAQ:
+
+==========
+netdev FAQ
+==========
+
+.. warning::
+
+ TODO ancora da tradurre
diff --git a/Documentation/translations/it_IT/process/5.Posting.rst b/Documentation/translations/it_IT/process/5.Posting.rst
index b979266aa884..1476d51eb5e5 100644
--- a/Documentation/translations/it_IT/process/5.Posting.rst
+++ b/Documentation/translations/it_IT/process/5.Posting.rst
@@ -233,10 +233,12 @@ Le etichette in uso più comuni sono:
:ref:`Documentation/translations/it_IT/process/submitting-patches.rst <it_submittingpatches>`.
Codice che non presenta una firma appropriata non potrà essere integrato.
- - Co-developed-by: indica che la patch è stata sviluppata anche da un altro
- sviluppatore assieme all'autore originale. Questo è utile quando più
- persone lavorano sulla stessa patch. Da notare che questa persona deve
- avere anche una riga "Signed-off-by:" nella patch.
+ - Co-developed-by: indica che la patch è stata cosviluppata da diversi
+ sviluppatori; viene usato per assegnare più autori (in aggiunta a quello
+ associato all'etichetta From:) quando più persone lavorano ad una patch.
+ Ogni Co-developed-by: dev'essere seguito immediatamente da un Signed-off-by:
+ del corrispondente coautore. Maggiori dettagli ed esempi sono disponibili
+ in :ref:`Documentation/translations/it_IT/process/submitting-patches.rst <it_submittingpatches>`.
- Acked-by: indica il consenso di un altro sviluppatore (spesso il manutentore
del codice in oggetto) all'integrazione della patch nel kernel.
diff --git a/Documentation/translations/it_IT/process/coding-style.rst b/Documentation/translations/it_IT/process/coding-style.rst
index 2fd0e7f79d55..5ef534c95e69 100644
--- a/Documentation/translations/it_IT/process/coding-style.rst
+++ b/Documentation/translations/it_IT/process/coding-style.rst
@@ -859,7 +859,8 @@ racchiusa in #ifdef, potete usare printk(KERN_DEBUG ...).
Il kernel fornisce i seguenti assegnatori ad uso generico:
kmalloc(), kzalloc(), kmalloc_array(), kcalloc(), vmalloc(), e vzalloc().
-Per maggiori informazioni, consultate la documentazione dell'API.
+Per maggiori informazioni, consultate la documentazione dell'API:
+:ref:`Documentation/translations/it_IT/core-api/memory-allocation.rst <it_memory_allocation>`
Il modo preferito per passare la dimensione di una struttura è il seguente:
@@ -890,6 +891,11 @@ Il modo preferito per assegnare un vettore a zero è il seguente:
Entrambe verificano la condizione di overflow per la dimensione
d'assegnamento n * sizeof(...), se accade ritorneranno NULL.
+Questi allocatori generici producono uno *stack dump* in caso di fallimento
+a meno che non venga esplicitamente specificato __GFP_NOWARN. Quindi, nella
+maggior parte dei casi, è inutile stampare messaggi aggiuntivi quando uno di
+questi allocatori ritornano un puntatore NULL.
+
15) Il morbo inline
-------------------
diff --git a/Documentation/translations/it_IT/process/deprecated.rst b/Documentation/translations/it_IT/process/deprecated.rst
new file mode 100644
index 000000000000..776f26732a94
--- /dev/null
+++ b/Documentation/translations/it_IT/process/deprecated.rst
@@ -0,0 +1,129 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-ita.rst
+
+:Original: :ref:`Documentation/process/deprecated.rst <deprecated>`
+:Translator: Federico Vaga <federico.vaga@vaga.pv.it>
+
+.. _it_deprecated:
+
+==============================================================================
+Interfacce deprecate, caratteristiche del linguaggio, attributi, e convenzioni
+==============================================================================
+
+In un mondo perfetto, sarebbe possibile prendere tutti gli usi di
+un'interfaccia deprecata e convertirli in quella nuova, e così sarebbe
+possibile rimuovere la vecchia interfaccia in un singolo ciclo di sviluppo.
+Tuttavia, per via delle dimensioni del kernel, la gerarchia dei manutentori e
+le tempistiche, non è sempre possibile fare questo tipo di conversione tutta
+in una volta. Questo significa che nuove istanze di una vecchia interfaccia
+potrebbero aggiungersi al kernel proprio quando si sta cercando di rimuoverle,
+aumentando così il carico di lavoro. Al fine di istruire gli sviluppatori su
+cosa è considerato deprecato (e perché), è stata create la seguente lista a cui
+fare riferimento quando qualcuno propone modifiche che usano cose deprecate.
+
+__deprecated
+------------
+Nonostante questo attributo marchi visibilmente un interfaccia come deprecata,
+`non produce più alcun avviso durante la compilazione
+<https://git.kernel.org/linus/771c035372a036f83353eef46dbb829780330234>`_
+perché uno degli obiettivi del kernel è quello di compilare senza avvisi;
+inoltre, nessuno stava agendo per rimuovere queste interfacce. Nonostante l'uso
+di `__deprecated` in un file d'intestazione sia opportuno per segnare una
+interfaccia come 'vecchia', questa non è una soluzione completa. L'interfaccia
+deve essere rimossa dal kernel, o aggiunta a questo documento per scoraggiarne
+l'uso.
+
+Calcoli codificati negli argomenti di un allocatore
+----------------------------------------------------
+Il calcolo dinamico delle dimensioni (specialmente le moltiplicazioni) non
+dovrebbero essere fatto negli argomenti di funzioni di allocazione di memoria
+(o simili) per via del rischio di overflow. Questo può portare a valori più
+piccoli di quelli che il chiamante si aspettava. L'uso di questo modo di
+allocare può portare ad un overflow della memoria di heap e altri
+malfunzionamenti. (Si fa eccezione per valori numerici per i quali il
+compilatore può generare avvisi circa un potenziale overflow. Tuttavia usare
+i valori numerici come suggerito di seguito è innocuo).
+
+Per esempio, non usate ``count * size`` come argomento::
+
+ foo = kmalloc(count * size, GFP_KERNEL);
+
+Al suo posto, si dovrebbe usare l'allocatore a due argomenti::
+
+ foo = kmalloc_array(count, size, GFP_KERNEL);
+
+Se questo tipo di allocatore non è disponibile, allora dovrebbero essere usate
+le funzioni del tipo *saturate-on-overflow*::
+
+ bar = vmalloc(array_size(count, size));
+
+Un altro tipico caso da evitare è quello di calcolare la dimensione di una
+struttura seguita da un vettore di altre strutture, come nel seguente caso::
+
+ header = kzalloc(sizeof(*header) + count * sizeof(*header->item),
+ GFP_KERNEL);
+
+Invece, usate la seguente funzione::
+
+ header = kzalloc(struct_size(header, item, count), GFP_KERNEL);
+
+Per maggiori dettagli fate riferimento a :c:func:`array_size`,
+:c:func:`array3_size`, e :c:func:`struct_size`, così come la famiglia di
+funzioni :c:func:`check_add_overflow` e :c:func:`check_mul_overflow`.
+
+simple_strtol(), simple_strtoll(), simple_strtoul(), simple_strtoull()
+----------------------------------------------------------------------
+Le funzioni :c:func:`simple_strtol`, :c:func:`simple_strtoll`,
+:c:func:`simple_strtoul`, e :c:func:`simple_strtoull` ignorano volutamente
+i possibili overflow, e questo può portare il chiamante a generare risultati
+inaspettati. Le rispettive funzioni :c:func:`kstrtol`, :c:func:`kstrtoll`,
+:c:func:`kstrtoul`, e :c:func:`kstrtoull` sono da considerarsi le corrette
+sostitute; tuttavia va notato che queste richiedono che la stringa sia
+terminata con il carattere NUL o quello di nuova riga.
+
+strcpy()
+--------
+La funzione :c:func:`strcpy` non fa controlli agli estremi del buffer
+di destinazione. Questo può portare ad un overflow oltre i limiti del
+buffer e generare svariati tipi di malfunzionamenti. Nonostante l'opzione
+`CONFIG_FORTIFY_SOURCE=y` e svariate opzioni del compilatore aiutano
+a ridurne il rischio, non c'è alcuna buona ragione per continuare ad usare
+questa funzione. La versione sicura da usare è :c:func:`strscpy`.
+
+strncpy() su stringe terminate con NUL
+--------------------------------------
+L'utilizzo di :c:func:`strncpy` non fornisce alcuna garanzia sul fatto che
+il buffer di destinazione verrà terminato con il carattere NUL. Questo
+potrebbe portare a diversi overflow di lettura o altri malfunzionamenti
+causati, appunto, dalla mancanza del terminatore. Questa estende la
+terminazione nel buffer di destinazione quando la stringa d'origine è più
+corta; questo potrebbe portare ad una penalizzazione delle prestazioni per
+chi usa solo stringe terminate. La versione sicura da usare è
+:c:func:`strscpy`. (chi usa :c:func:`strscpy` e necessita di estendere la
+terminazione con NUL deve aggiungere una chiamata a :c:func:`memset`)
+
+Se il chiamate no usa stringhe terminate con NUL, allore :c:func:`strncpy()`
+può continuare ad essere usata, ma i buffer di destinazione devono essere
+marchiati con l'attributo `__nonstring <https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html>`_
+per evitare avvisi durante la compilazione.
+
+strlcpy()
+---------
+La funzione :c:func:`strlcpy`, per prima cosa, legge interamente il buffer di
+origine, magari leggendo più di quanto verrà effettivamente copiato. Questo
+è inefficiente e può portare a overflow di lettura quando la stringa non è
+terminata con NUL. La versione sicura da usare è :c:func:`strscpy`.
+
+Vettori a dimensione variabile (VLA)
+------------------------------------
+
+Usare VLA sullo stack produce codice molto peggiore rispetto a quando si usano
+vettori a dimensione fissa. Questi `problemi di prestazioni <https://git.kernel.org/linus/02361bc77888>`_,
+tutt'altro che banali, sono già un motivo valido per eliminare i VLA; in
+aggiunta sono anche un problema per la sicurezza. La crescita dinamica di un
+vettore nello stack potrebbe eccedere la memoria rimanente in tale segmento.
+Questo può portare a dei malfunzionamenti, potrebbe sovrascrivere
+dati importanti alla fine dello stack (quando il kernel è compilato senza
+`CONFIG_THREAD_INFO_IN_TASK=y`), o sovrascrivere un pezzo di memoria adiacente
+allo stack (quando il kernel è compilato senza `CONFIG_VMAP_STACK=y`).
diff --git a/Documentation/translations/it_IT/process/kernel-enforcement-statement.rst b/Documentation/translations/it_IT/process/kernel-enforcement-statement.rst
index 4ddf5a35b270..1f62da622e26 100644
--- a/Documentation/translations/it_IT/process/kernel-enforcement-statement.rst
+++ b/Documentation/translations/it_IT/process/kernel-enforcement-statement.rst
@@ -1,13 +1,175 @@
.. include:: ../disclaimer-ita.rst
:Original: :ref:`Documentation/process/kernel-enforcement-statement.rst <process_statement_kernel>`
-
+:Translator: Federico Vaga <federico.vaga@vaga.pv.it>
.. _it_process_statement_kernel:
Applicazione della licenza sul kernel Linux
===========================================
-.. warning::
+Come sviluppatori del kernel Linux, abbiamo un certo interessa su come il
+nostro software viene usato e su come la sua licenza viene fatta rispettare.
+Il rispetto reciproco degli obblighi di condivisione della GPL-2.0 è
+fondamentale per la sostenibilità di lungo periodo del nostro software e
+della nostra comunità.
+
+Benché ognuno abbia il diritto a far rispettare il diritto d'autore per i
+propri contributi alla nostra comunità, condividiamo l'interesse a far si che
+ogni azione individuale nel far rispettare i propri diritti sia condotta in
+modo da portare beneficio alla comunità e che non abbia, involontariamente,
+impatti negativi sulla salute e la crescita del nostro ecosistema software.
+Al fine di scoraggiare l'esecuzione di azioni inutili, concordiamo che è nel
+migliore interesse della nostra comunità di sviluppo di impegnarci nel
+rispettare i seguenti obblighi nei confronti degli utenti del kernel Linux
+per conto nostro e di qualsiasi successore ai nostri interessi sul diritto
+d'autore:
+
+ Malgrado le clausole di risoluzione della licenza GPL-2.0, abbiamo
+ concordato che è nel migliore interesse della nostra comunità di sviluppo
+ adottare le seguenti disposizioni della GPL-3.0 come permessi aggiuntivi
+ alla nostra licenza nei confronti di qualsiasi affermazione non difensiva
+ di diritti sulla licenza.
+
+ In ogni caso, se cessano tutte le violazioni di questa Licenza, allora
+ la tua licenza da parte di un dato detentore del copyright viene
+ ripristinata (a) in via cautelativa, a meno che e fino a quando il
+ detentore del copyright non cessa esplicitamente e definitivamente
+ la tua licenza, e (b) in via permanente se il detentore del copyright
+ non ti notifica in alcun modo la violazione entro 60 giorni dalla
+ cessazione della licenza.
+
+ Inoltre, la tua licenza da parte di un dato detentore del copyright
+ viene ripristinata in maniera permanente se il detentore del copyright
+ ti notifica la violazione in maniera adeguata, se questa è la prima
+ volta che ricevi una notifica di violazione di questa Licenza (per
+ qualunque Programma) dallo stesso detentore di copyright, e se rimedi
+ alla violazione entro 30 giorni dalla data di ricezione della notifica
+ di violazione.
+
+Fornendo queste garanzie, abbiamo l'intenzione di incoraggiare l'uso del
+software. Vogliamo che le aziende e le persone usino, modifichino e
+distribuiscano a questo software. Vogliamo lavorare con gli utenti in modo
+aperto e trasparente per eliminare ogni incertezza circa le nostre aspettative
+sul rispetto o l'ottemperanza alla licenza che possa limitare l'uso del nostro
+software. Vediamo l'azione legale come ultima spiaggia, da avviare solo quando
+gli altri sforzi della comunità hanno fallito nel risolvere il problema.
+
+Per finire, una volta che un problema di non rispetto della licenza viene
+risolto, speriamo che gli utenti si sentano i benvenuti ad aggregarsi a noi
+nello sviluppo di questo progetto. Lavorando assieme, saremo più forti.
+
+Ad eccezione deve specificato, parliamo per noi stessi, e non per una qualsiasi
+azienda per la quale lavoriamo oggi, o per cui abbiamo lavorato in passato, o
+lavoreremo in futuro.
+
- TODO ancora da tradurre
+ - Laura Abbott
+ - Bjorn Andersson (Linaro)
+ - Andrea Arcangeli
+ - Neil Armstrong
+ - Jens Axboe
+ - Pablo Neira Ayuso
+ - Khalid Aziz
+ - Ralf Baechle
+ - Felipe Balbi
+ - Arnd Bergmann
+ - Ard Biesheuvel
+ - Tim Bird
+ - Paolo Bonzini
+ - Christian Borntraeger
+ - Mark Brown (Linaro)
+ - Paul Burton
+ - Javier Martinez Canillas
+ - Rob Clark
+ - Kees Cook (Google)
+ - Jonathan Corbet
+ - Dennis Dalessandro
+ - Vivien Didelot (Savoir-faire Linux)
+ - Hans de Goede
+ - Mel Gorman (SUSE)
+ - Sven Eckelmann
+ - Alex Elder (Linaro)
+ - Fabio Estevam
+ - Larry Finger
+ - Bhumika Goyal
+ - Andy Gross
+ - Juergen Gross
+ - Shawn Guo
+ - Ulf Hansson
+ - Stephen Hemminger (Microsoft)
+ - Tejun Heo
+ - Rob Herring
+ - Masami Hiramatsu
+ - Michal Hocko
+ - Simon Horman
+ - Johan Hovold (Hovold Consulting AB)
+ - Christophe JAILLET
+ - Olof Johansson
+ - Lee Jones (Linaro)
+ - Heiner Kallweit
+ - Srinivas Kandagatla
+ - Jan Kara
+ - Shuah Khan (Samsung)
+ - David Kershner
+ - Jaegeuk Kim
+ - Namhyung Kim
+ - Colin Ian King
+ - Jeff Kirsher
+ - Greg Kroah-Hartman (Linux Foundation)
+ - Christian König
+ - Vinod Koul
+ - Krzysztof Kozlowski
+ - Viresh Kumar
+ - Aneesh Kumar K.V
+ - Julia Lawall
+ - Doug Ledford
+ - Chuck Lever (Oracle)
+ - Daniel Lezcano
+ - Shaohua Li
+ - Xin Long
+ - Tony Luck
+ - Catalin Marinas (Arm Ltd)
+ - Mike Marshall
+ - Chris Mason
+ - Paul E. McKenney
+ - Arnaldo Carvalho de Melo
+ - David S. Miller
+ - Ingo Molnar
+ - Kuninori Morimoto
+ - Trond Myklebust
+ - Martin K. Petersen (Oracle)
+ - Borislav Petkov
+ - Jiri Pirko
+ - Josh Poimboeuf
+ - Sebastian Reichel (Collabora)
+ - Guenter Roeck
+ - Joerg Roedel
+ - Leon Romanovsky
+ - Steven Rostedt (VMware)
+ - Frank Rowand
+ - Ivan Safonov
+ - Anna Schumaker
+ - Jes Sorensen
+ - K.Y. Srinivasan
+ - David Sterba (SUSE)
+ - Heiko Stuebner
+ - Jiri Kosina (SUSE)
+ - Willy Tarreau
+ - Dmitry Torokhov
+ - Linus Torvalds
+ - Thierry Reding
+ - Rik van Riel
+ - Luis R. Rodriguez
+ - Geert Uytterhoeven (Glider bvba)
+ - Eduardo Valentin (Amazon.com)
+ - Daniel Vetter
+ - Linus Walleij
+ - Richard Weinberger
+ - Dan Williams
+ - Rafael J. Wysocki
+ - Arvind Yadav
+ - Masahiro Yamada
+ - Wei Yongjun
+ - Lv Zheng
+ - Marc Zyngier (Arm Ltd)
diff --git a/Documentation/translations/it_IT/process/license-rules.rst b/Documentation/translations/it_IT/process/license-rules.rst
new file mode 100644
index 000000000000..f058e06996dc
--- /dev/null
+++ b/Documentation/translations/it_IT/process/license-rules.rst
@@ -0,0 +1,500 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-ita.rst
+
+:Original: :ref:`Documentation/process/license-rules.rst <kernel_licensing>`
+:Translator: Federico Vaga <federico.vaga@vaga.pv.it>
+
+.. _it_kernel_licensing:
+
+Regole per licenziare il kernel Linux
+=====================================
+
+Il kernel Linux viene rilasciato sotto i termini definiti dalla seconda
+versione della licenza *GNU General Public License* (GPL-2.0), di cui una
+copia è disponibile nel file LICENSES/preferred/GPL-2.0; a questo si
+aggiunge eccezione per le chiamate di sistema come descritto in
+LICENSES/exceptions/Linux-syscall-note; tutto ciò è descritto nel file COPYING.
+
+Questo documento fornisce una descrizione su come ogni singolo file sorgente
+debba essere licenziato per far si che sia chiaro e non ambiguo. Questo non
+sostituisce la licenza del kernel.
+
+La licenza descritta nel file COPYING si applica ai sorgenti del kernel nella
+loro interezza, quindi i singoli file sorgenti possono avere diverse licenze ma
+devono essere compatibili con la GPL-2.0::
+
+ GPL-1.0+ : GNU General Public License v1.0 o successiva
+ GPL-2.0+ : GNU General Public License v2.0 o successiva
+ LGPL-2.0 : GNU Library General Public License v2
+ LGPL-2.0+ : GNU Library General Public License v2 o successiva
+ LGPL-2.1 : GNU Lesser General Public License v2.1
+ LGPL-2.1+ : GNU Lesser General Public License v2.1 o successiva
+
+A parte questo, i singolo file possono essere forniti con una doppia licenza,
+per esempio con una delle varianti compatibili della GPL e alternativamente con
+una licenza permissiva come BSD, MIT eccetera.
+
+I file d'intestazione per l'API verso lo spazio utente (UAPI) descrivono
+le interfacce usate dai programmi, e per questo sono un caso speciale.
+Secondo le note nel file COPYING, le chiamate di sistema sono un chiaro
+confine oltre il quale non si estendono i requisiti della GPL per quei
+programmi che le usano per comunicare con il kernel. Dato che i file
+d'intestazione UAPI devono poter essere inclusi nei sorgenti di un
+qualsiasi programma eseguibile sul kernel Linux, questi meritano
+un'eccezione documentata da una clausola speciale.
+
+Il modo più comune per indicare la licenza dei file sorgenti è quello di
+aggiungere il corrispondente blocco di testo come commento in testa a detto
+file. Per via della formattazione, dei refusi, eccetera, questi blocchi di
+testo sono difficili da identificare dagli strumenti usati per verificare il
+rispetto delle licenze.
+
+Un'alternativa ai blocchi di testo è data dall'uso degli identificatori
+*Software Package Data Exchange* (SPDX) in ogni file sorgente. Gli
+identificatori di licenza SPDX sono analizzabili dalle macchine e sono precisi
+simboli stenografici che identificano la licenza sotto la quale viene
+licenziato il file che lo include. Gli identificatori di licenza SPDX sono
+gestiti del gruppo di lavoro SPDX presso la Linux Foundation e sono stati
+concordati fra i soci nell'industria, gli sviluppatori di strumenti, e i
+rispettivi gruppi legali. Per maggiori informazioni, consultate
+https://spdx.org/
+
+Il kernel Linux richiede un preciso identificatore SPDX in tutti i file
+sorgenti. Gli identificatori validi verranno spiegati nella sezione
+`Identificatori di licenza`_ e sono stati copiati dalla lista ufficiale di
+licenze SPDX assieme al rispettivo testo come mostrato in
+https://spdx.org/licenses/.
+
+Sintassi degli identificatori di licenza
+----------------------------------------
+
+1. Posizionamento:
+
+ L'identificativo di licenza SPDX dev'essere posizionato come prima riga
+ possibile di un file che possa contenere commenti. Per la maggior parte
+ dei file questa è la prima riga, fanno eccezione gli script che richiedono
+ come prima riga '#!PATH_TO_INTERPRETER'. Per questi script l'identificativo
+ SPDX finisce nella seconda riga.
+
+|
+
+2. Stile:
+
+ L'identificativo di licenza SPDX viene aggiunto sotto forma di commento.
+ Lo stile del commento dipende dal tipo di file::
+
+ sorgenti C: // SPDX-License-Identifier: <SPDX License Expression>
+ intestazioni C: /* SPDX-License-Identifier: <SPDX License Expression> */
+ ASM: /* SPDX-License-Identifier: <SPDX License Expression> */
+ scripts: # SPDX-License-Identifier: <SPDX License Expression>
+ .rst: .. SPDX-License-Identifier: <SPDX License Expression>
+ .dts{i}: // SPDX-License-Identifier: <SPDX License Expression>
+
+ Se un particolare programma non dovesse riuscire a gestire lo stile
+ principale per i commenti, allora dev'essere usato il meccanismo accettato
+ dal programma. Questo è il motivo per cui si ha "/\* \*/" nei file
+ d'intestazione C. Notammo che 'ld' falliva nell'analizzare i commenti del
+ C++ nei file .lds che venivano prodotti. Oggi questo è stato corretto,
+ ma ci sono in giro ancora vecchi programmi che non sono in grado di
+ gestire lo stile dei commenti del C++.
+
+|
+
+3. Sintassi:
+
+ Una <espressione di licenza SPDX> può essere scritta usando l'identificatore
+ SPDX della licenza come indicato nella lista di licenze SPDX, oppure la
+ combinazione di due identificatori SPDX separati da "WITH" per i casi
+ eccezionali. Quando si usano più licenze l'espressione viene formata da
+ sottoespressioni separate dalle parole chiave "AND", "OR" e racchiuse fra
+ parentesi tonde "(", ")".
+
+ Gli identificativi di licenza per licenze come la [L]GPL che si avvalgono
+ dell'opzione 'o successive' si formano aggiungendo alla fine il simbolo "+"
+ per indicare l'opzione 'o successive'.::
+
+ // SPDX-License-Identifier: GPL-2.0+
+ // SPDX-License-Identifier: LGPL-2.1+
+
+ WITH dovrebbe essere usato quando sono necessarie delle modifiche alla
+ licenza. Per esempio, la UAPI del kernel linux usa l'espressione::
+
+ // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ // SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note
+
+ Altri esempi di usi di WITH all'interno del kernel sono::
+
+ // SPDX-License-Identifier: GPL-2.0 WITH mif-exception
+ // SPDX-License-Identifier: GPL-2.0+ WITH GCC-exception-2.0
+
+ Le eccezioni si possono usare solo in combinazione con identificatori di
+ licenza. Gli identificatori di licenza riconosciuti sono elencati nei
+ corrispondenti file d'eccezione. Per maggiori dettagli consultate
+ `Eccezioni`_ nel capitolo `Identificatori di licenza`_
+
+ La parola chiave OR dovrebbe essere usata solo quando si usa una doppia
+ licenza e solo una dev'essere scelta. Per esempio, alcuni file dtsi sono
+ disponibili con doppia licenza::
+
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+ Esempi dal kernel di espressioni per file licenziati con doppia licenza
+ sono::
+
+ // SPDX-License-Identifier: GPL-2.0 OR MIT
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+ // SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+ // SPDX-License-Identifier: GPL-2.0 OR MPL-1.1
+ // SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT
+ // SPDX-License-Identifier: GPL-1.0+ OR BSD-3-Clause OR OpenSSL
+
+ La parola chiave AND dovrebbe essere usata quando i termini di più licenze
+ si applicano ad un file. Per esempio, quando il codice viene preso da
+ un altro progetto il quale da i permessi per aggiungerlo nel kernel ma
+ richiede che i termini originali della licenza rimangano intatti::
+
+ // SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) AND MIT
+
+ Di seguito, un altro esempio dove entrambe i termini di licenza devono
+ essere rispettati::
+
+ // SPDX-License-Identifier: GPL-1.0+ AND LGPL-2.1+
+
+Identificatori di licenza
+-------------------------
+
+Le licenze attualmente in uso, così come le licenze aggiunte al kernel, possono
+essere categorizzate in:
+
+1. _`Licenze raccomandate`:
+
+ Ovunque possibile le licenze qui indicate dovrebbero essere usate perché
+ pienamente compatibili e molto usate. Queste licenze sono disponibile nei
+ sorgenti del kernel, nella cartella::
+
+ LICENSES/preferred/
+
+ I file in questa cartella contengono il testo completo della licenza e i
+ `Metatag`_. Il nome di questi file è lo stesso usato come identificatore
+ di licenza SPDX e che deve essere usato nei file sorgenti.
+
+ Esempi::
+
+ LICENSES/preferred/GPL-2.0
+
+ Contiene il testo della seconda versione della licenza GPL e i metatag
+ necessari::
+
+ LICENSES/preferred/MIT
+
+ Contiene il testo della licenza MIT e i metatag necessari.
+
+ _`Metatag`:
+
+ I seguenti metatag devono essere presenti in un file di licenza:
+
+ - Valid-License-Identifier:
+
+ Una o più righe che dichiarano quali identificatori di licenza sono validi
+ all'interno del progetto per far riferimento alla licenza in questione.
+ Solitamente, questo è un unico identificatore valido, ma per esempio le
+ licenze che permettono l'opzione 'o successive' hanno due identificatori
+ validi.
+
+ - SPDX-URL:
+
+ L'URL della pagina SPDX che contiene informazioni aggiuntive riguardanti
+ la licenza.
+
+ - Usage-Guidance:
+
+ Testo in formato libero per dare suggerimenti agli utenti. Il testo deve
+ includere degli esempi su come usare gli identificatori di licenza SPDX
+ in un file sorgente in conformità con le linea guida in
+ `Sintassi degli identificatori di licenza`_.
+
+ - License-Text:
+
+ Tutto il testo che compare dopo questa etichetta viene trattato
+ come se fosse parte del testo originale della licenza.
+
+ Esempi::
+
+ Valid-License-Identifier: GPL-2.0
+ Valid-License-Identifier: GPL-2.0+
+ SPDX-URL: https://spdx.org/licenses/GPL-2.0.html
+ Usage-Guide:
+ To use this license in source code, put one of the following SPDX
+ tag/value pairs into a comment according to the placement
+ guidelines in the licensing rules documentation.
+ For 'GNU General Public License (GPL) version 2 only' use:
+ SPDX-License-Identifier: GPL-2.0
+ For 'GNU General Public License (GPL) version 2 or any later version' use:
+ SPDX-License-Identifier: GPL-2.0+
+ License-Text:
+ Full license text
+
+ ::
+
+ SPDX-License-Identifier: MIT
+ SPDX-URL: https://spdx.org/licenses/MIT.html
+ Usage-Guide:
+ To use this license in source code, put the following SPDX
+ tag/value pair into a comment according to the placement
+ guidelines in the licensing rules documentation.
+ SPDX-License-Identifier: MIT
+ License-Text:
+ Full license text
+
+|
+
+2. Licenze deprecate:
+
+ Questo tipo di licenze dovrebbero essere usate solo per codice già esistente
+ o quando si prende codice da altri progetti. Le licenze sono disponibili
+ nei sorgenti del kernel nella cartella::
+
+ LICENSES/deprecated/
+
+ I file in questa cartella contengono il testo completo della licenza e i
+ `Metatag`_. Il nome di questi file è lo stesso usato come identificatore
+ di licenza SPDX e che deve essere usato nei file sorgenti.
+
+ Esempi::
+
+ LICENSES/deprecated/ISC
+
+ Contiene il testo della licenza Internet System Consortium e i suoi
+ metatag::
+
+ LICENSES/deprecated/GPL-1.0
+
+ Contiene il testo della versione 1 della licenza GPL e i suoi metatag.
+
+ Metatag:
+
+ I metatag necessari per le 'altre' ('other') licenze sono gli stessi
+ di usati per le `Licenze raccomandate`_.
+
+ Esempio del formato del file::
+
+ Valid-License-Identifier: ISC
+ SPDX-URL: https://spdx.org/licenses/ISC.html
+ Usage-Guide:
+ Usage of this license in the kernel for new code is discouraged
+ and it should solely be used for importing code from an already
+ existing project.
+ To use this license in source code, put the following SPDX
+ tag/value pair into a comment according to the placement
+ guidelines in the licensing rules documentation.
+ SPDX-License-Identifier: ISC
+ License-Text:
+ Full license text
+
+|
+
+3. Solo per doppie licenze
+
+ Queste licenze dovrebbero essere usate solamente per codice licenziato in
+ combinazione con un'altra licenza che solitamente è quella preferita.
+ Queste licenze sono disponibili nei sorgenti del kernel nella cartella::
+
+ LICENSES/dual
+
+ I file in questa cartella contengono il testo completo della rispettiva
+ licenza e i suoi `Metatags`_. I nomi dei file sono identici agli
+ identificatori di licenza SPDX che dovrebbero essere usati nei file
+ sorgenti.
+
+ Esempi::
+
+ LICENSES/dual/MPL-1.1
+
+ Questo file contiene il testo della versione 1.1 della licenza *Mozilla
+ Pulic License* e i metatag necessari::
+
+ LICENSES/dual/Apache-2.0
+
+ Questo file contiene il testo della versione 2.0 della licenza Apache e i
+ metatag necessari.
+
+ Metatag:
+
+ I requisiti per le 'altre' ('*other*') licenze sono identici a quelli per le
+ `Licenze raccomandate`_.
+
+ Esempio del formato del file::
+
+ Valid-License-Identifier: MPL-1.1
+ SPDX-URL: https://spdx.org/licenses/MPL-1.1.html
+ Usage-Guide:
+ Do NOT use. The MPL-1.1 is not GPL2 compatible. It may only be used for
+ dual-licensed files where the other license is GPL2 compatible.
+ If you end up using this it MUST be used together with a GPL2 compatible
+ license using "OR".
+ To use the Mozilla Public License version 1.1 put the following SPDX
+ tag/value pair into a comment according to the placement guidelines in
+ the licensing rules documentation:
+ SPDX-License-Identifier: MPL-1.1
+ License-Text:
+ Full license text
+
+|
+
+4. _`Eccezioni`:
+
+ Alcune licenze possono essere corrette con delle eccezioni che forniscono
+ diritti aggiuntivi. Queste eccezioni sono disponibili nei sorgenti del
+ kernel nella cartella::
+
+ LICENSES/exceptions/
+
+ I file in questa cartella contengono il testo completo dell'eccezione e i
+ `Metatag per le eccezioni`_.
+
+ Esempi::
+
+ LICENSES/exceptions/Linux-syscall-note
+
+ Contiene la descrizione dell'eccezione per le chiamate di sistema Linux
+ così come documentato nel file COPYING del kernel Linux; questo viene usato
+ per i file d'intestazione per la UAPI. Per esempio
+ /\* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note \*/::
+
+ LICENSES/exceptions/GCC-exception-2.0
+
+ Contiene la 'eccezione di linking' che permette di collegare qualsiasi
+ binario, indipendentemente dalla sua licenza, con un compilato il cui file
+ sorgente è marchiato con questa eccezione. Questo è necessario per creare
+ eseguibili dai sorgenti che non sono compatibili con la GPL.
+
+ _`Metatag per le eccezioni`:
+
+ Un file contenente un'eccezione deve avere i seguenti metatag:
+
+ - SPDX-Exception-Identifier:
+
+ Un identificatore d'eccezione che possa essere usato in combinazione con
+ un identificatore di licenza SPDX.
+
+ - SPDX-URL:
+
+ L'URL della pagina SPDX che contiene informazioni aggiuntive riguardanti
+ l'eccezione.
+
+ - SPDX-Licenses:
+
+ Una lista di licenze SPDX separate da virgola, che possono essere usate
+ con l'eccezione.
+
+ - Usage-Guidance:
+
+ Testo in formato libero per dare suggerimenti agli utenti. Il testo deve
+ includere degli esempi su come usare gli identificatori di licenza SPDX
+ in un file sorgente in conformità con le linea guida in
+ `Sintassi degli identificatori di licenza`_.
+
+ - Exception-Text:
+
+ Tutto il testo che compare dopo questa etichetta viene trattato
+ come se fosse parte del testo originale della licenza.
+
+ Esempi::
+
+ SPDX-Exception-Identifier: Linux-syscall-note
+ SPDX-URL: https://spdx.org/licenses/Linux-syscall-note.html
+ SPDX-Licenses: GPL-2.0, GPL-2.0+, GPL-1.0+, LGPL-2.0, LGPL-2.0+, LGPL-2.1, LGPL-2.1+
+ Usage-Guidance:
+ This exception is used together with one of the above SPDX-Licenses
+ to mark user-space API (uapi) header files so they can be included
+ into non GPL compliant user-space application code.
+ To use this exception add it with the keyword WITH to one of the
+ identifiers in the SPDX-Licenses tag:
+ SPDX-License-Identifier: <SPDX-License> WITH Linux-syscall-note
+ Exception-Text:
+ Full exception text
+
+ ::
+
+ SPDX-Exception-Identifier: GCC-exception-2.0
+ SPDX-URL: https://spdx.org/licenses/GCC-exception-2.0.html
+ SPDX-Licenses: GPL-2.0, GPL-2.0+
+ Usage-Guidance:
+ The "GCC Runtime Library exception 2.0" is used together with one
+ of the above SPDX-Licenses for code imported from the GCC runtime
+ library.
+ To use this exception add it with the keyword WITH to one of the
+ identifiers in the SPDX-Licenses tag:
+ SPDX-License-Identifier: <SPDX-License> WITH GCC-exception-2.0
+ Exception-Text:
+ Full exception text
+
+Per ogni identificatore di licenza SPDX e per le eccezioni dev'esserci un file
+nella sotto-cartella LICENSES. Questo è necessario per permettere agli
+strumenti di effettuare verifiche (come checkpatch.pl), per avere le licenze
+disponibili per la lettura e per estrarre i diritti dai sorgenti, così come
+raccomandato da diverse organizzazioni FOSS, per esempio l'`iniziativa FSFE
+REUSE <https://reuse.software/>`_.
+
+_`MODULE_LICENSE`
+-----------------
+
+ I moduli del kernel necessitano di un'etichetta MODULE_LICENSE(). Questa
+ etichetta non sostituisce le informazioni sulla licenza del codice sorgente
+ (SPDX-License-Identifier) né fornisce informazioni che esprimono o
+ determinano l'esatta licenza sotto la quale viene rilasciato.
+
+ Il solo scopo di questa etichetta è quello di fornire sufficienti
+ informazioni al caricatore di moduli del kernel, o agli strumenti in spazio
+ utente, per capire se il modulo è libero o proprietario.
+
+ Le stringe di licenza valide per MODULE_LICENSE() sono:
+
+ ============================= =============================================
+ "GPL" Il modulo è licenziato con la GPL versione 2.
+ Questo non fa distinzione fra GPL'2.0-only o
+ GPL-2.0-or-later. L'esatta licenza può essere
+ determinata solo leggendo i corrispondenti
+ file sorgenti.
+
+ "GPL v2" Stesso significato di "GPL". Esiste per
+ motivi storici.
+
+ "GPL and additional rights" Questa è una variante che esiste per motivi
+ storici che indica che i sorgenti di un
+ modulo sono rilasciati sotto una variante
+ della licenza GPL v2 e quella MIT. Per favore
+ non utilizzatela per codice nuovo.
+
+ "Dual MIT/GPL" Questo è il modo corretto per esprimere il
+ il fatto che il modulo è rilasciato con
+ doppia licenza a scelta fra: una variante
+ della GPL v2 o la licenza MIT.
+
+ "Dual BSD/GPL" Questo modulo è rilasciato con doppia licenza
+ a scelta fra: una variante della GPL v2 o la
+ licenza BSD. La variante esatta della licenza
+ BSD può essere determinata solo attraverso i
+ corrispondenti file sorgenti.
+
+ "Dual MPL/GPL" Questo modulo è rilasciato con doppia licenza
+ a scelta fra: una variante della GPL v2 o la
+ Mozilla Public License (MPL). La variante
+ esatta della licenza MPL può essere
+ determinata solo attraverso i corrispondenti
+ file sorgenti.
+
+ "Proprietary" Questo modulo è rilasciato con licenza
+ proprietaria. Questa stringa è solo per i
+ moduli proprietari di terze parti e non può
+ essere usata per quelli che risiedono nei
+ sorgenti del kernel. I moduli etichettati in
+ questo modo stanno contaminando il kernel e
+ gli viene assegnato un flag 'P'; quando
+ vengono caricati, il caricatore di moduli del
+ kernel si rifiuterà di collegare questi
+ moduli ai simboli che sono stati esportati
+ con EXPORT_SYMBOL_GPL().
+
+ ============================= =============================================
diff --git a/Documentation/translations/it_IT/process/maintainer-pgp-guide.rst b/Documentation/translations/it_IT/process/maintainer-pgp-guide.rst
index 24a133f0a51d..276db0e37f43 100644
--- a/Documentation/translations/it_IT/process/maintainer-pgp-guide.rst
+++ b/Documentation/translations/it_IT/process/maintainer-pgp-guide.rst
@@ -1,13 +1,946 @@
.. include:: ../disclaimer-ita.rst
:Original: :ref:`Documentation/process/maintainer-pgp-guide.rst <pgpguide>`
+:Translator: Alessia Mantegazza <amantegazza@vaga.pv.it>
.. _it_pgpguide:
+=========================================
+La guida a PGP per manutentori del kernel
+=========================================
+
+:Author: Konstantin Ryabitsev <konstantin@linuxfoundation.org>
+
+Questo documento è destinato agli sviluppatori del kernel Linux, in particolar
+modo ai manutentori. Contiene degli approfondimenti riguardo informazioni che
+sono state affrontate in maniera più generale nella sezione
+"`Protecting Code Integrity`_" pubblicata dalla Linux Foundation.
+Per approfondire alcuni argomenti trattati in questo documento è consigliato
+leggere il documento sopraindicato
+
+.. _`Protecting Code Integrity`: https://github.com/lfit/itpol/blob/master/protecting-code-integrity.md
+
+Il ruolo di PGP nello sviluppo del kernel Linux
+===============================================
+
+PGP aiuta ad assicurare l'integrità del codice prodotto dalla comunità
+di sviluppo del kernel e, in secondo luogo, stabilisce canali di comunicazione
+affidabili tra sviluppatori attraverso lo scambio di email firmate con PGP.
+
+Il codice sorgente del kernel Linux è disponibile principalmente in due
+formati:
+
+- repositori distribuiti di sorgenti (git)
+- rilasci periodici di istantanee (archivi tar)
+
+Sia i repositori git che gli archivi tar portano le firme PGP degli
+sviluppatori che hanno creato i rilasci ufficiali del kernel. Queste firme
+offrono una garanzia crittografica che le versioni scaricabili rese disponibili
+via kernel.org, o altri portali, siano identiche a quelle che gli sviluppatori
+hanno sul loro posto di lavoro. A tal scopo:
+
+- i repositori git forniscono firme PGP per ogni tag
+- gli archivi tar hanno firme separate per ogni archivio
+
+.. _it_devs_not_infra:
+
+Fidatevi degli sviluppatori e non dell'infrastruttura
+-----------------------------------------------------
+
+Fin dal 2011, quando i sistemi di kernel.org furono compromessi, il principio
+generale del progetto Kernel Archives è stato quello di assumere che qualsiasi
+parte dell'infrastruttura possa essere compromessa in ogni momento. Per questa
+ragione, gli amministratori hanno intrapreso deliberatemene dei passi per
+enfatizzare che la fiducia debba risiedere sempre negli sviluppatori e mai nel
+codice che gestisce l'infrastruttura, indipendentemente da quali che siano le
+pratiche di sicurezza messe in atto.
+
+Il principio sopra indicato è la ragione per la quale è necessaria questa
+guida. Vogliamo essere sicuri che il riporre la fiducia negli sviluppatori
+non sia fatto semplicemente per incolpare qualcun'altro per future falle di
+sicurezza. L'obiettivo è quello di fornire una serie di linee guida che gli
+sviluppatori possano seguire per creare un ambiente di lavoro sicuro e
+salvaguardare le chiavi PGP usate nello stabilire l'integrità del kernel Linux
+stesso.
+
+.. _it_pgp_tools:
+
+Strumenti PGP
+=============
+
+Usare GnuPG v2
+--------------
+
+La vostra distribuzione potrebbe avere già installato GnuPG, dovete solo
+verificare che stia utilizzando la versione 2.x e non la serie 1.4 --
+molte distribuzioni forniscono entrambe, di base il comando ''gpg''
+invoca GnuPG v.1. Per controllate usate::
+
+ $ gpg --version | head -n1
+
+Se visualizzate ``gpg (GnuPG) 1.4.x``, allora state usando GnuPG v.1.
+Provate il comando ``gpg2`` (se non lo avete, potreste aver bisogno
+di installare il pacchetto gnupg2)::
+
+ $ gpg2 --version | head -n1
+
+Se visualizzate ``gpg (GnuPG) 2.x.x``, allora siete pronti a partire.
+Questa guida assume che abbiate la versione 2.2.(o successiva) di GnuPG.
+Se state usando la versione 2.0, alcuni dei comandi indicati qui non
+funzioneranno, in questo caso considerate un aggiornamento all'ultima versione,
+la 2.2. Versioni di gnupg-2.1.11 e successive dovrebbero essere compatibili
+per gli obiettivi di questa guida.
+
+Se avete entrambi i comandi: ``gpg`` e ``gpg2``, assicuratevi di utilizzare
+sempre la versione V2, e non quella vecchia. Per evitare errori potreste creare
+un alias::
+
+ $ alias gpg=gpg2
+
+Potete mettere questa opzione nel vostro ``.bashrc`` in modo da essere sicuri.
+
+Configurare le opzioni di gpg-agent
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+L'agente GnuPG è uno strumento di aiuto che partirà automaticamente ogni volta
+che userete il comando ``gpg`` e funzionerà in background con l'obiettivo di
+individuare la passphrase. Ci sono due opzioni che dovreste conoscere
+per personalizzare la scadenza della passphrase nella cache:
+
+- ``default-cache-ttl`` (secondi): Se usate ancora la stessa chiave prima
+ che il time-to-live termini, il conto alla rovescia si resetterà per un
+ altro periodo. Di base è di 600 (10 minuti).
+
+- ``max-cache-ttl`` (secondi): indipendentemente da quanto sia recente l'ultimo
+ uso della chiave da quando avete inserito la passphrase, se il massimo
+ time-to-live è scaduto, dovrete reinserire nuovamente la passphrase.
+ Di base è di 30 minuti.
+
+Se ritenete entrambe questi valori di base troppo corti (o troppo lunghi),
+potete creare il vostro file ``~/.gnupg/gpg-agent.conf`` ed impostare i vostri
+valori::
+
+ # set to 30 minutes for regular ttl, and 2 hours for max ttl
+ default-cache-ttl 1800
+ max-cache-ttl 7200
+
+.. note::
+
+ Non è più necessario far partire l'agente gpg manualmente all'inizio della
+ vostra sessione. Dovreste controllare i file rc per rimuovere tutto ciò che
+ riguarda vecchie le versioni di GnuPG, poiché potrebbero non svolgere più
+ bene il loro compito.
+
+Impostare un *refresh* con cronjob
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Potreste aver bisogno di rinfrescare regolarmente il vostro portachiavi in
+modo aggiornare le chiavi pubbliche di altre persone, lavoro che è svolto
+al meglio con un cronjob giornaliero::
+
+ @daily /usr/bin/gpg2 --refresh >/dev/null 2>&1
+
+Controllate il percorso assoluto del vostro comando ``gpg`` o ``gpg2`` e usate
+il comando ``gpg2`` se per voi ``gpg`` corrisponde alla versione GnuPG v.1.
+
+.. _it_master_key:
+
+Proteggere la vostra chiave PGP primaria
========================================
-Guida a PGP per i manutentori del kernel
-========================================
+
+Questa guida parte dal presupposto che abbiate già una chiave PGP che usate
+per lo sviluppo del kernel Linux. Se non ne avete ancora una, date uno sguardo
+al documento "`Protecting Code Integrity`_" che abbiamo menzionato prima.
+
+Dovreste inoltre creare una nuova chiave se quella attuale è inferiore a 2048
+bit (RSA).
+
+Chiave principale o sottochiavi
+-------------------------------
+
+Le sottochiavi sono chiavi PGP totalmente indipendenti, e sono collegate alla
+chiave principale attraverso firme certificate. È quindi importante
+comprendere i seguenti punti:
+
+1. Non ci sono differenze tecniche tra la chiave principale e la sottochiave.
+2. In fesa di creazione, assegniamo limitazioni funzionali ad ogni chiave
+ assegnando capacità specifiche.
+3. Una chiave PGP può avere 4 capacità:
+
+ - **[S]** può essere usata per firmare
+ - **[E]** può essere usata per criptare
+ - **[A]** può essere usata per autenticare
+ - **[C]** può essere usata per certificare altre chiavi
+
+4. Una singola chiave può avere più capacità
+5. Una sottochiave è completamente indipendente dalla chiave principale.
+ Un messaggio criptato con la sottochiave non può essere decrittato con
+ quella principale. Se perdete la vostra sottochiave privata, non può
+ essere rigenerata in nessun modo da quella principale.
+
+La chiave con capacità **[C]** (certify) è identificata come la chiave
+principale perché è l'unica che può essere usata per indicare la relazione
+con altre chiavi. Solo la chiave **[C]** può essere usata per:
+
+- Aggiungere o revocare altre chiavi (sottochiavi) che hanno capacità S/E/A
+- Aggiungere, modificare o eliminare le identità (unids) associate alla chiave
+- Aggiungere o modificare la data di termine di sé stessa o di ogni sottochiave
+- Firmare le chiavi di altre persone a scopo di creare una rete di fiducia
+
+Di base, alla creazione di nuove chiavi, GnuPG genera quanto segue:
+
+- Una chiave madre che porta sia la capacità di certificazione che quella
+ di firma (**[SC]**)
+- Una sottochiave separata con capacità di criptaggio (**[E]**)
+
+Se avete usato i parametri di base per generare la vostra chiave, quello
+sarà il risultato. Potete verificarlo utilizzando ``gpg --list-secret-keys``,
+per esempio::
+
+ sec rsa2048 2018-01-23 [SC] [expires: 2020-01-23]
+ 000000000000000000000000AAAABBBBCCCCDDDD
+ uid [ultimate] Alice Dev <adev@kernel.org>
+ ssb rsa2048 2018-01-23 [E] [expires: 2020-01-23]
+
+Qualsiasi chiave che abbia la capacità **[C]** è la vostra chiave madre,
+indipendentemente da quali altre capacità potreste averle assegnato.
+
+La lunga riga sotto la voce ``sec`` è la vostra impronta digitale --
+negli esempi che seguono, quando vedere ``[fpr]`` ci si riferisce a questa
+stringa di 40 caratteri.
+
+Assicuratevi che la vostra passphrase sia forte
+-----------------------------------------------
+
+GnuPG utilizza le passphrases per criptare la vostra chiave privata prima
+di salvarla sul disco. In questo modo, anche se il contenuto della vostra
+cartella ``.gnupg`` venisse letto o trafugato nella sia interezza, gli
+attaccanti non potrebbero comunque utilizzare le vostre chiavi private senza
+aver prima ottenuto la passphrase per decriptarle.
+
+È assolutamente essenziale che le vostre chiavi private siano protette da
+una passphrase forte. Per impostarla o cambiarla, usate::
+
+ $ gpg --change-passphrase [fpr]
+
+Create una sottochiave di firma separata
+----------------------------------------
+
+Il nostro obiettivo è di proteggere la chiave primaria spostandola su un
+dispositivo sconnesso dalla rete, dunque se avete solo una chiave combinata
+**[SC]** allora dovreste creare una sottochiave di firma separata::
+
+ $ gpg --quick-add-key [fpr] ed25519 sign
+
+Ricordate di informare il keyserver del vostro cambiamento, cosicché altri
+possano ricevere la vostra nuova sottochiave::
+
+ $ gpg --send-key [fpr]
+
+.. note:: Supporto ECC in GnuPG
+ GnuPG 2.1 e successivi supportano pienamente *Elliptic Curve Cryptography*,
+ con la possibilità di combinare sottochiavi ECC con le tradizionali chiavi
+ primarie RSA. Il principale vantaggio della crittografia ECC è che è molto
+ più veloce da calcolare e crea firme più piccole se confrontate byte per
+ byte con le chiavi RSA a più di 2048 bit. A meno che non pensiate di
+ utilizzare un dispositivo smartcard che non supporta le operazioni ECC, vi
+ raccomandiamo ti creare sottochiavi di firma ECC per il vostro lavoro col
+ kernel.
+
+ Se per qualche ragione preferite rimanere con sottochiavi RSA, nel comando
+ precedente, sostituite "ed25519" con "rsa2048".
+
+Copia di riserva della chiave primaria per gestire il recupero da disastro
+--------------------------------------------------------------------------
+
+Maggiori sono le firme di altri sviluppatori che vengono applicate alla vostra,
+maggiori saranno i motivi per avere una copia di riserva che non sia digitale,
+al fine di effettuare un recupero da disastro.
+
+Il modo migliore per creare una copia fisica della vostra chiave privata è
+l'uso del programma ``paperkey``. Consultate ``man paperkey`` per maggiori
+dettagli sul formato dell'output ed i suoi punti di forza rispetto ad altre
+soluzioni. Paperkey dovrebbe essere già pacchettizzato per la maggior parte
+delle distribuzioni.
+
+Eseguite il seguente comando per creare una copia fisica di riserva della
+vostra chiave privata::
+
+ $ gpg --export-secret-key [fpr] | paperkey -o /tmp/key-backup.txt
+
+Stampate il file (o fate un pipe direttamente verso lpr), poi prendete
+una penna e scrivete la passphare sul margine del foglio. **Questo è
+caldamente consigliato** perché la copia cartacea è comunque criptata con
+la passphrase, e se mai doveste cambiarla non vi ricorderete qual'era al
+momento della creazione di quella copia -- *garantito*.
+
+Mettete la copia cartacea e la passphrase scritta a mano in una busta e
+mettetela in un posto sicuro e ben protetto, preferibilmente fuori casa,
+magari in una cassetta di sicurezza in banca.
+
+.. note::
+
+ Probabilmente la vostra stampante non è più quello stupido dispositivo
+ connesso alla porta parallela, ma dato che il suo output è comunque
+ criptato con la passphrase, eseguire la stampa in un sistema "cloud"
+ moderno dovrebbe essere comunque relativamente sicuro. Un'opzione potrebbe
+ essere quella di cambiare la passphrase della vostra chiave primaria
+ subito dopo aver finito con paperkey.
+
+Copia di riserva di tutta la cartella GnuPG
+-------------------------------------------
.. warning::
- TODO ancora da tradurre
+ **!!!Non saltate questo passo!!!**
+
+Quando avete bisogno di recuperare le vostre chiavi PGP è importante avere
+una copia di riserva pronta all'uso. Questo sta su un diverso piano di
+prontezza rispetto al recupero da disastro che abbiamo risolto con
+``paperkey``. Vi affiderete a queste copie esterne quando dovreste usare la
+vostra chiave Certify -- ovvero quando fate modifiche alle vostre chiavi o
+firmate le chiavi di altre persone ad una conferenza o ad un gruppo d'incontro.
+
+Incominciate con una piccola chiavetta di memoria USB (preferibilmente due)
+che userete per le copie di riserva. Dovrete criptarle usando LUKS -- fate
+riferimento alla documentazione della vostra distribuzione per capire come
+fare.
+
+Per la passphrase di criptazione, potete usare la stessa della vostra chiave
+primaria.
+
+Una volta che il processo di criptazione è finito, reinserite il disco USB ed
+assicurativi che venga montato correttamente. Copiate interamente la cartella
+``.gnugp`` nel disco criptato::
+
+ $ cp -a ~/.gnupg /media/disk/foo/gnupg-backup
+
+Ora dovreste verificare che tutto continui a funzionare::
+
+ $ gpg --homedir=/media/disk/foo/gnupg-backup --list-key [fpr]
+
+Se non vedete errori, allora dovreste avere fatto tutto con successo.
+Smontate il disco USB, etichettatelo per bene di modo da evitare di
+distruggerne il contenuto non appena vi serve una chiavetta USB a caso, ed
+infine mettetelo in un posto sicuro -- ma non troppo lontano, perché vi servirà
+di tanto in tanto per modificare le identità, aggiungere o revocare
+sottochiavi, o firmare le chiavi di altre persone.
+
+Togliete la chiave primaria dalla vostra home
+---------------------------------------------
+
+I file che si trovano nella vostra cartella home non sono poi così ben protetti
+come potreste pensare. Potrebbero essere letti o trafugati in diversi modi:
+
+- accidentalmente quando fate una rapida copia della cartella home per
+ configurare una nuova postazione
+- da un amministratore di sistema negligente o malintenzionato
+- attraverso copie di riserva insicure
+- attraverso malware installato in alcune applicazioni (browser, lettori PDF,
+ eccetera)
+- attraverso coercizione quando attraversate confini internazionali
+
+Proteggere la vostra chiave con una buona passphare aiuta notevolmente a
+ridurre i rischi elencati qui sopra, ma le passphrase possono essere scoperte
+attraverso i keylogger, il shoulder-surfing, o altri modi. Per questi motivi,
+nella configurazione si raccomanda di rimuove la chiave primaria dalla vostra
+cartella home e la si archivia su un dispositivo disconnesso.
+
+.. warning::
+
+ Per favore, fate riferimento alla sezione precedente e assicuratevi
+ di aver fatto una copia di riserva totale della cartella GnuPG. Quello
+ che stiamo per fare renderà la vostra chiave inutile se non avete delle
+ copie di riserva utilizzabili!
+
+Per prima cosa, identificate il keygrip della vostra chiave primaria::
+
+ $ gpg --with-keygrip --list-key [fpr]
+
+L'output assomiglierà a questo::
+
+ pub rsa2048 2018-01-24 [SC] [expires: 2020-01-24]
+ 000000000000000000000000AAAABBBBCCCCDDDD
+ Keygrip = 1111000000000000000000000000000000000000
+ uid [ultimate] Alice Dev <adev@kernel.org>
+ sub rsa2048 2018-01-24 [E] [expires: 2020-01-24]
+ Keygrip = 2222000000000000000000000000000000000000
+ sub ed25519 2018-01-24 [S]
+ Keygrip = 3333000000000000000000000000000000000000
+
+Trovate la voce keygrid che si trova sotto alla riga ``pub`` (appena sotto
+all'impronta digitale della chiave primaria). Questo corrisponderà direttamente
+ad un file nella cartella ``~/.gnupg``::
+
+ $ cd ~/.gnupg/private-keys-v1.d
+ $ ls
+ 1111000000000000000000000000000000000000.key
+ 2222000000000000000000000000000000000000.key
+ 3333000000000000000000000000000000000000.key
+
+Quello che dovrete fare è rimuovere il file .key che corrisponde al keygrip
+della chiave primaria::
+
+ $ cd ~/.gnupg/private-keys-v1.d
+ $ rm 1111000000000000000000000000000000000000.key
+
+Ora, se eseguite il comando ``--list-secret-keys``, vedrete che la chiave
+primaria non compare più (il simbolo ``#`` indica che non è disponibile)::
+
+ $ gpg --list-secret-keys
+ sec# rsa2048 2018-01-24 [SC] [expires: 2020-01-24]
+ 000000000000000000000000AAAABBBBCCCCDDDD
+ uid [ultimate] Alice Dev <adev@kernel.org>
+ ssb rsa2048 2018-01-24 [E] [expires: 2020-01-24]
+ ssb ed25519 2018-01-24 [S]
+
+Dovreste rimuovere anche i file ``secring.gpg`` che si trovano nella cartella
+``~/.gnupg``, in quanto rimasugli delle versioni precedenti di GnuPG.
+
+Se non avete la cartella "private-keys-v1.d"
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Se non avete la cartella ``~/.gnupg/private-keys-v1.d``, allora le vostre
+chiavi segrete sono ancora salvate nel vecchio file ``secring.gpg`` usato
+da GnuPG v1. Effettuare una qualsiasi modifica alla vostra chiave, come
+cambiare la passphare o aggiungere una sottochiave, dovrebbe convertire
+automaticamente il vecchio formato ``secring.gpg``nel nuovo
+``private-keys-v1.d``.
+
+Una volta che l'avete fatto, assicuratevi di rimuovere il file ``secring.gpg``,
+che continua a contenere la vostra chiave privata.
+
+.. _it_smartcards:
+
+Spostare le sottochiavi in un apposito dispositivo criptato
+===========================================================
+
+Nonostante la chiave primaria sia ora al riparo da occhi e mani indiscrete,
+le sottochiavi si trovano ancora nella vostra cartella home. Chiunque riesca
+a mettere le sue mani su quelle chiavi riuscirà a decriptare le vostre
+comunicazioni o a falsificare le vostre firme (se conoscono la passphrase).
+Inoltre, ogni volta che viene fatta un'operazione con GnuPG, le chiavi vengono
+caricate nella memoria di sistema e potrebbero essere rubate con l'uso di
+malware sofisticati (pensate a Meltdown e a Spectre).
+
+Il miglior modo per proteggere le proprie chiave è di spostarle su un
+dispositivo specializzato in grado di effettuare operazioni smartcard.
+
+I benefici di una smartcard
+---------------------------
+
+Una smartcard contiene un chip crittografico che è capace di immagazzinare
+le chiavi private ed effettuare operazioni crittografiche direttamente sulla
+carta stessa. Dato che la chiave non lascia mai la smartcard, il sistema
+operativo usato sul computer non sarà in grado di accedere alle chiavi.
+Questo è molto diverso dai dischi USB criptati che abbiamo usato allo scopo di
+avere una copia di riserva sicura -- quando il dispositivo USB è connesso e
+montato, il sistema operativo potrà accedere al contenuto delle chiavi private.
+
+L'uso di un disco USB criptato non può sostituire le funzioni di un dispositivo
+capace di operazioni di tipo smartcard.
+
+Dispositivi smartcard disponibili
+---------------------------------
+
+A meno che tutti i vostri computer dispongano di lettori smartcard, il modo
+più semplice è equipaggiarsi di un dispositivo USB specializzato che
+implementi le funzionalità delle smartcard. Sul mercato ci sono diverse
+soluzioni disponibili:
+
+- `Nitrokey Start`_: è Open hardware e Free Software, è basata sul progetto
+ `GnuK`_ della FSIJ. Ha il supporto per chiavi ECC, ma meno funzionalità di
+ sicurezza (come la resistenza alla manomissione o alcuni attacchi ad un
+ canale laterale).
+- `Nitrokey Pro`_: è simile alla Nitrokey Start, ma è più resistente alla
+ manomissione e offre più funzionalità di sicurezza, ma l'ECC.
+- `Yubikey 4`_: l'hardware e il software sono proprietari, ma è più economica
+ della Nitrokey Pro ed è venduta anche con porta USB-C il che è utile con i
+ computer portatili più recenti. In aggiunta, offre altre funzionalità di
+ sicurezza come FIDO, U2F, ma non l'ECC
+
+`Su LWN c'è una buona recensione`_ dei modelli elencati qui sopra e altri.
+Se volete usare chiavi ECC, la vostra migliore scelta sul mercato è la
+Nitrokey Start.
+
+.. _`Nitrokey Start`: https://shop.nitrokey.com/shop/product/nitrokey-start-6
+.. _`Nitrokey Pro`: https://shop.nitrokey.com/shop/product/nitrokey-pro-3
+.. _`Yubikey 4`: https://www.yubico.com/product/yubikey-4-series/
+.. _Gnuk: http://www.fsij.org/doc-gnuk/
+.. _`Su LWN c'è una buona recensione`: https://lwn.net/Articles/736231/
+
+Configurare il vostro dispositivo smartcard
+-------------------------------------------
+
+Il vostro dispositivo smartcard dovrebbe iniziare a funzionare non appena
+lo collegate ad un qualsiasi computer Linux moderno. Potete verificarlo
+eseguendo::
+
+ $ gpg --card-status
+
+Se vedete tutti i dettagli della smartcard, allora ci siamo. Sfortunatamente,
+affrontare tutti i possibili motivi per cui le cose potrebbero non funzionare
+non è lo scopo di questa guida. Se avete problemi nel far funzionare la carta
+con GnuPG, cercate aiuto attraverso i soliti canali di supporto.
+
+Per configurare la vostra smartcard, dato che non c'è una via facile dalla
+riga di comando, dovrete usate il menu di GnuPG::
+
+ $ gpg --card-edit
+ [...omitted...]
+ gpg/card> admin
+ Admin commands are allowed
+ gpg/card> passwd
+
+Dovreste impostare il PIN dell'utente (1), quello dell'amministratore (3) e il
+codice di reset (4). Assicuratevi di annotare e salvare questi codici in un
+posto sicuro -- specialmente il PIN dell'amministratore e il codice di reset
+(che vi permetterà di azzerare completamente la smartcard). Il PIN
+dell'amministratore viene usato così raramente che è inevitabile dimenticarselo
+se non lo si annota.
+
+Tornando al nostro menu, potete impostare anche altri valori (come il nome,
+il sesso, informazioni d'accesso, eccetera), ma non sono necessari e aggiunge
+altre informazioni sulla carta che potrebbero trapelare in caso di smarrimento.
+
+.. note::
+
+ A dispetto del nome "PIN", né il PIN utente né quello dell'amministratore
+ devono essere esclusivamente numerici.
+
+Spostare le sottochiavi sulla smartcard
+---------------------------------------
+
+Uscite dal menu (usando "q") e salverete tutte le modifiche. Poi, spostiamo
+tutte le sottochiavi sulla smartcard. Per la maggior parte delle operazioni
+vi serviranno sia la passphrase della chiave PGP che il PIN
+dell'amministratore::
+
+ $ gpg --edit-key [fpr]
+
+ Secret subkeys are available.
+
+ pub rsa2048/AAAABBBBCCCCDDDD
+ created: 2018-01-23 expires: 2020-01-23 usage: SC
+ trust: ultimate validity: ultimate
+ ssb rsa2048/1111222233334444
+ created: 2018-01-23 expires: never usage: E
+ ssb ed25519/5555666677778888
+ created: 2017-12-07 expires: never usage: S
+ [ultimate] (1). Alice Dev <adev@kernel.org>
+
+ gpg>
+
+Usando ``--edit-key`` si tornerà alla modalità menu e noterete che
+la lista delle chiavi è leggermente diversa. Da questo momento in poi,
+tutti i comandi saranno eseguiti nella modalità menu, come indicato
+da ``gpg>``.
+
+Per prima cosa, selezioniamo la chiave che verrà messa sulla carta --
+potete farlo digitando ``key 1`` (è la prima della lista, la sottochiave
+**[E]**)::
+
+ gpg> key 1
+
+Nel'output dovreste vedere ``ssb*`` associato alla chiave **[E]**. Il simbolo
+``*`` indica che la chiave è stata "selezionata". Funziona come un
+interruttore, ovvero se scrivete nuovamente ``key 1``, il simbolo ``*`` sparirà
+e la chiave non sarà più selezionata.
+
+Ora, spostiamo la chiave sulla smartcard::
+
+ gpg> keytocard
+ Please select where to store the key:
+ (2) Encryption key
+ Your selection? 2
+
+Dato che è la nostra chiave **[E]**, ha senso metterla nella sezione criptata.
+Quando confermerete la selezione, vi verrà chiesta la passphrase della vostra
+chiave PGP, e poi il PIN dell'amministratore. Se il comando ritorna senza
+errori, allora la vostra chiave è stata spostata con successo.
+
+**Importante**: digitate nuovamente ``key 1`` per deselezionare la prima chiave
+e selezionate la seconda chiave **[S]** con ``key 2``::
+
+ gpg> key 1
+ gpg> key 2
+ gpg> keytocard
+ Please select where to store the key:
+ (1) Signature key
+ (3) Authentication key
+ Your selection? 1
+
+Potete usare la chiave **[S]** sia per firmare che per autenticare, ma vogliamo
+che sia nella sezione di firma, quindi scegliete (1). Ancora una volta, se il
+comando ritorna senza errori, allora l'operazione è avvenuta con successo::
+
+ gpg> q
+ Save changes? (y/N) y
+
+Salvando le modifiche cancellerete dalla vostra cartella home tutte le chiavi
+che avete spostato sulla carta (ma questo non è un problema, perché abbiamo
+fatto delle copie di sicurezza nel caso in cui dovessimo configurare una
+nuova smartcard).
+
+Verificare che le chiavi siano state spostate
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Ora, se doveste usare l'opzione ``--list-secret-keys``, vedrete una
+sottile differenza nell'output::
+
+ $ gpg --list-secret-keys
+ sec# rsa2048 2018-01-24 [SC] [expires: 2020-01-24]
+ 000000000000000000000000AAAABBBBCCCCDDDD
+ uid [ultimate] Alice Dev <adev@kernel.org>
+ ssb> rsa2048 2018-01-24 [E] [expires: 2020-01-24]
+ ssb> ed25519 2018-01-24 [S]
+
+Il simbolo ``>`` in ``ssb>`` indica che la sottochiave è disponibile solo
+nella smartcard. Se tornate nella vostra cartella delle chiavi segrete e
+guardate al suo contenuto, noterete che i file ``.key`` sono stati sostituiti
+con degli stub::
+
+ $ cd ~/.gnupg/private-keys-v1.d
+ $ strings *.key | grep 'private-key'
+
+Per indicare che i file sono solo degli stub e che in realtà il contenuto è
+sulla smartcard, l'output dovrebbe mostrarvi ``shadowed-private-key``.
+
+Verificare che la smartcard funzioni
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Per verificare che la smartcard funzioni come dovuto, potete creare
+una firma::
+
+ $ echo "Hello world" | gpg --clearsign > /tmp/test.asc
+ $ gpg --verify /tmp/test.asc
+
+Col primo comando dovrebbe chiedervi il PIN della smartcard, e poi dovrebbe
+mostrare "Good signature" dopo l'esecuzione di ``gpg --verify``.
+
+Complimenti, siete riusciti a rendere estremamente difficile il furto della
+vostra identità digitale di sviluppatore.
+
+Altre operazioni possibili con GnuPG
+------------------------------------
+
+Segue un breve accenno ad alcune delle operazioni più comuni che dovrete
+fare con le vostre chiavi PGP.
+
+Montare il disco con la chiave primaria
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Vi servirà la vostra chiave principale per tutte le operazioni che seguiranno,
+per cui per prima cosa dovrete accedere ai vostri backup e dire a GnuPG di
+usarli::
+
+ $ export GNUPGHOME=/media/disk/foo/gnupg-backup
+ $ gpg --list-secret-keys
+
+Dovete assicurarvi di vedere ``sec`` e non ``sec#`` nell'output del programma
+(il simbolo ``#`` significa che la chiave non è disponibile e che state ancora
+utilizzando la vostra solita cartella di lavoro).
+
+Estendere la data di scadenza di una chiave
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+La chiave principale ha una data di scadenza di 2 anni dal momento della sua
+creazione. Questo per motivi di sicurezza e per rendere obsolete le chiavi
+che, eventualmente, dovessero sparire dai keyserver.
+
+Per estendere di un anno, dalla data odierna, la scadenza di una vostra chiave,
+eseguite::
+
+ $ gpg --quick-set-expire [fpr] 1y
+
+Se per voi è più facile da memorizzare, potete anche utilizzare una data
+specifica (per esempio, il vostro compleanno o capodanno)::
+
+ $ gpg --quick-set-expire [fpr] 2020-07-01
+
+Ricordatevi di inviare l'aggiornamento ai keyserver::
+
+ $ gpg --send-key [fpr]
+
+Aggiornare la vostra cartella di lavoro dopo ogni modifica
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Dopo aver fatto delle modifiche alle vostre chiavi usando uno spazio a parte,
+dovreste importarle nella vostra cartella di lavoro abituale::
+
+ $ gpg --export | gpg --homedir ~/.gnupg --import
+ $ unset GNUPGHOME
+
+
+Usare PGP con Git
+=================
+
+Una delle caratteristiche fondanti di Git è la sua natura decentralizzata --
+una volta che il repositorio è stato clonato sul vostro sistema, avete la
+storia completa del progetto, inclusi i suoi tag, i commit ed i rami. Tuttavia,
+con i centinaia di repositori clonati che ci sono in giro, come si fa a
+verificare che la loro copia di linux.git non è stata manomessa da qualcuno?
+
+Oppure, cosa succede se viene scoperta una backdoor nel codice e la riga
+"Autore" dice che sei stato tu, mentre tu sei abbastanza sicuro di
+`non averci niente a che fare`_?
+
+Per risolvere entrambi i problemi, Git ha introdotto l'integrazione con PGP.
+I tag firmati dimostrano che il repositorio è integro assicurando che il suo
+contenuto è lo stesso che si trova sulle macchine degli sviluppatori che hanno
+creato il tag; mentre i commit firmati rendono praticamente impossibile
+ad un malintenzionato di impersonarvi senza avere accesso alle vostre chiavi
+PGP.
+
+.. _`non averci niente a che fare`: https://github.com/jayphelps/git-blame-someone-else
+
+Configurare git per usare la vostra chiave PGP
+----------------------------------------------
+
+Se avete solo una chiave segreta nel vostro portachiavi, allora non avete nulla
+da fare in più dato che sarà la vostra chiave di base. Tuttavia, se doveste
+avere più chiavi segrete, potete dire a git quale dovrebbe usare (``[fpg]``
+è la vostra impronta digitale)::
+
+ $ git config --global user.signingKey [fpr]
+
+**IMPORTANTE**: se avete una comando dedicato per ``gpg2``, allora dovreste
+dire a git di usare sempre quello piuttosto che il vecchio comando ``gpg``::
+
+ $ git config --global gpg.program gpg2
+
+Come firmare i tag
+------------------
+
+Per creare un tag firmato, passate l'opzione ``-s`` al comando tag::
+
+ $ git tag -s [tagname]
+
+La nostra raccomandazione è quella di firmare sempre i tag git, perché
+questo permette agli altri sviluppatori di verificare che il repositorio
+git dal quale stanno prendendo il codice non è stato alterato intenzionalmente.
+
+Come verificare i tag firmati
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Per verificare un tag firmato, potete usare il comando ``verify-tag``::
+
+ $ git verify-tag [tagname]
+
+Se state prendendo un tag da un fork del repositorio del progetto, git
+dovrebbe verificare automaticamente la firma di quello che state prendendo
+e vi mostrerà il risultato durante l'operazione di merge::
+
+ $ git pull [url] tags/sometag
+
+Il merge conterrà qualcosa di simile::
+
+ Merge tag 'sometag' of [url]
+
+ [Tag message]
+
+ # gpg: Signature made [...]
+ # gpg: Good signature from [...]
+
+Se state verificando il tag di qualcun altro, allora dovrete importare
+la loro chiave PGP. Fate riferimento alla sezione ":ref:`it_verify_identities`"
+che troverete più avanti.
+
+Configurare git per firmare sempre i tag con annotazione
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Se state creando un tag con annotazione è molto probabile che vogliate
+firmarlo. Per imporre a git di firmare sempre un tag con annotazione,
+dovete impostare la seguente opzione globale::
+
+ $ git config --global tag.forceSignAnnotated true
+
+Come usare commit firmati
+-------------------------
+
+Creare dei commit firmati è facile, ma è molto più difficile utilizzarli
+nello sviluppo del kernel linux per via del fatto che ci si affida alle
+liste di discussione e questo modo di procedere non mantiene le firme PGP
+nei commit. In aggiunta, quando si usa *rebase* nel proprio repositorio
+locale per allinearsi al kernel anche le proprie firme PGP verranno scartate.
+Per questo motivo, la maggior parte degli sviluppatori del kernel non si
+preoccupano troppo di firmare i propri commit ed ignoreranno quelli firmati
+che si trovano in altri repositori usati per il proprio lavoro.
+
+Tuttavia, se avete il vostro repositorio di lavoro disponibile al pubblico
+su un qualche servizio di hosting git (kernel.org, infradead.org, ozlabs.org,
+o altri), allora la raccomandazione è di firmare tutti i vostri commit
+anche se gli sviluppatori non ne beneficeranno direttamente.
+
+Vi raccomandiamo di farlo per i seguenti motivi:
+
+1. Se dovesse mai esserci la necessità di fare delle analisi forensi o
+ tracciare la provenienza di un codice, anche sorgenti mantenuti
+ esternamente che hanno firme PGP sui commit avranno un certo valore a
+ questo scopo.
+2. Se dovesse mai capitarvi di clonare il vostro repositorio locale (per
+ esempio dopo un danneggiamento del disco), la firma vi permetterà di
+ verificare l'integrità del repositorio prima di riprendere il lavoro.
+3. Se qualcuno volesse usare *cherry-pick* sui vostri commit, allora la firma
+ permetterà di verificare l'integrità dei commit prima di applicarli.
+
+Creare commit firmati
+~~~~~~~~~~~~~~~~~~~~~
+
+Per creare un commit firmato, dovete solamente aggiungere l'opzione ``-S``
+al comando ``git commit`` (si usa la lettera maiuscola per evitare
+conflitti con un'altra opzione)::
+
+ $ git commit -S
+
+Configurare git per firmare sempre i commit
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Potete dire a git di firmare sempre i commit::
+
+ git config --global commit.gpgSign true
+
+.. note::
+
+ Assicuratevi di aver configurato ``gpg-agent`` prima di abilitare
+ questa opzione.
+
+.. _it_verify_identities:
+
+Come verificare l'identità degli sviluppatori del kernel
+========================================================
+
+Firmare i tag e i commit è facile, ma come si fa a verificare che la chiave
+usata per firmare qualcosa appartenga davvero allo sviluppatore e non ad un
+impostore?
+
+Configurare l'auto-key-retrieval usando WKD e DANE
+--------------------------------------------------
+
+Se non siete ancora in possesso di una vasta collezione di chiavi pubbliche
+di altri sviluppatori, allora potreste iniziare il vostro portachiavi
+affidandovi ai servizi di auto-scoperta e auto-recupero. GnuPG può affidarsi
+ad altre tecnologie di delega della fiducia, come DNSSEC e TLS, per sostenervi
+nel caso in cui iniziare una propria rete di fiducia da zero sia troppo
+scoraggiante.
+
+Aggiungete il seguente testo al vostro file ``~/.gnupg/gpg.conf``::
+
+ auto-key-locate wkd,dane,local
+ auto-key-retrieve
+
+La *DNS-Based Authentication of Named Entities* ("DANE") è un metodo
+per la pubblicazione di chiavi pubbliche su DNS e per renderle sicure usando
+zone firmate con DNSSEC. Il *Web Key Directory* ("WKD") è un metodo
+alternativo che usa https a scopo di ricerca. Quando si usano DANE o WKD
+per la ricerca di chiavi pubbliche, GnuPG validerà i certificati DNSSEC o TLS
+prima di aggiungere al vostro portachiavi locale le eventuali chiavi trovate.
+
+Kernel.org pubblica la WKD per tutti gli sviluppatori che hanno un account
+kernel.org. Una volta che avete applicato le modifiche al file ``gpg.conf``,
+potrete auto-recuperare le chiavi di Linus Torvalds e Greg Kroah-Hartman
+(se non le avete già)::
+
+ $ gpg --locate-keys torvalds@kernel.org gregkh@kernel.org
+
+Se avete un account kernel.org, al fine di rendere più utile l'uso di WKD
+da parte di altri sviluppatori del kernel, dovreste `aggiungere alla vostra
+chiave lo UID di kernel.org`_.
+
+.. _`aggiungere alla vostra chiave lo UID di kernel.org`: https://korg.wiki.kernel.org/userdoc/mail#adding_a_kernelorg_uid_to_your_pgp_key
+
+Web of Trust (WOT) o Trust on First Use (TOFU)
+----------------------------------------------
+
+PGP incorpora un meccanismo di delega della fiducia conosciuto come
+"Web of Trust". Di base, questo è un tentativo di sostituire la necessità
+di un'autorità certificativa centralizzata tipica del mondo HTTPS/TLS.
+Invece di avere svariati produttori software che decidono chi dovrebbero
+essere le entità di certificazione di cui dovreste fidarvi, PGP lascia
+la responsabilità ad ogni singolo utente.
+
+Sfortunatamente, solo poche persone capiscono come funziona la rete di fiducia.
+Nonostante sia un importante aspetto della specifica OpenPGP, recentemente
+le versioni di GnuPG (2.2 e successive) hanno implementato un meccanisco
+alternativo chiamato "Trust on First Use" (TOFU). Potete pensare a TOFU come
+"ad un approccio all fidicia simile ad SSH". In SSH, la prima volta che vi
+connettete ad un sistema remoto, l'impronta digitale della chiave viene
+registrata e ricordata. Se la chiave dovesse cambiare in futuro, il programma
+SSH vi avviserà e si rifiuterà di connettersi, obbligandovi a prendere una
+decisione circa la fiducia che riponete nella nuova chiave. In modo simile,
+la prima volta che importate la chiave PGP di qualcuno, si assume sia valida.
+Se ad un certo punto GnuPG trova un'altra chiave con la stessa identità,
+entrambe, la vecchia e la nuova, verranno segnate come invalide e dovrete
+verificare manualmente quale tenere.
+
+Vi raccomandiamo di usare il meccanisco TOFU+PGP (che è la nuova configurazione
+di base di GnuPG v2). Per farlo, aggiungete (o modificate) l'impostazione
+``trust-model`` in ``~/.gnupg/gpg.conf``::
+
+ trust-model tofu+pgp
+
+Come usare i keyserver in sicurezza
+-----------------------------------
+Se ottenete l'errore "No public key" quando cercate di validate il tag di
+qualcuno, allora dovreste cercare quella chiave usando un keyserver. È
+importante tenere bene a mente che non c'è alcuna garanzia che la chiave
+che avete recuperato da un keyserver PGP appartenga davvero alla persona
+reale -- è progettato così. Dovreste usare il Web of Trust per assicurarvi
+che la chiave sia valida.
+
+Come mantenere il Web of Trust va oltre gli scopi di questo documento,
+semplicemente perché farlo come si deve richiede sia sforzi che perseveranza
+che tendono ad andare oltre al livello di interesse della maggior parte degli
+esseri umani. Qui di seguito alcuni rapidi suggerimenti per aiutarvi a ridurre
+il rischio di importare chiavi maligne.
+
+Primo, diciamo che avete provato ad eseguire ``git verify-tag`` ma restituisce
+un errore dicendo che la chiave non è stata trovata::
+
+ $ git verify-tag sunxi-fixes-for-4.15-2
+ gpg: Signature made Sun 07 Jan 2018 10:51:55 PM EST
+ gpg: using RSA key DA73759BF8619E484E5A3B47389A54219C0F2430
+ gpg: issuer "wens@...org"
+ gpg: Can't check signature: No public key
+
+Cerchiamo nel keyserver per maggiori informazioni sull'impronta digitale
+della chiave (l'impronta digitale, probabilmente, appartiene ad una
+sottochiave, dunque non possiamo usarla direttamente senza trovare prima
+l'ID della chiave primaria associata ad essa)::
+
+ $ gpg --search DA73759BF8619E484E5A3B47389A54219C0F2430
+ gpg: data source: hkp://keys.gnupg.net
+ (1) Chen-Yu Tsai <wens@...org>
+ 4096 bit RSA key C94035C21B4F2AEB, created: 2017-03-14, expires: 2019-03-15
+ Keys 1-1 of 1 for "DA73759BF8619E484E5A3B47389A54219C0F2430". Enter number(s), N)ext, or Q)uit > q
+
+Localizzate l'ID della chiave primaria, nel nostro esempio
+``C94035C21B4F2AEB``. Ora visualizzate le chiavi di Linus Torvalds
+che avete nel vostro portachiavi::
+
+ $ gpg --list-key torvalds@kernel.org
+ pub rsa2048 2011-09-20 [SC]
+ ABAF11C65A2970B130ABE3C479BE3E4300411886
+ uid [ unknown] Linus Torvalds <torvalds@kernel.org>
+ sub rsa2048 2011-09-20 [E]
+
+Poi, aprite il `PGP pathfinder`_. Nel campo "From", incollate l'impronta
+digitale della chiave di Linus Torvalds che si vede nell'output qui sopra.
+Nel campo "to", incollate il key-id della chiave sconosciuta che avete
+trovato con ``gpg --search``, e poi verificare il risultato:
+
+- `Finding paths to Linus`_
+
+Se trovate un paio di percorsi affidabili è un buon segno circa la validità
+della chiave. Ora, potete aggiungerla al vostro portachiavi dal keyserver::
+
+ $ gpg --recv-key C94035C21B4F2AEB
+
+Questa procedura non è perfetta, e ovviamente state riponendo la vostra
+fiducia nell'amministratore del servizio *PGP Pathfinder* sperando che non
+sia malintenzionato (infatti, questo va contro :ref:`it_devs_not_infra`).
+Tuttavia, se mantenete con cura la vostra rete di fiducia sarà un deciso
+miglioramento rispetto alla cieca fiducia nei keyserver.
+
+.. _`PGP pathfinder`: https://pgp.cs.uu.nl/
+.. _`Finding paths to Linus`: https://pgp.cs.uu.nl/paths/79BE3E4300411886/to/C94035C21B4F2AEB.html
diff --git a/Documentation/translations/it_IT/process/stable-kernel-rules.rst b/Documentation/translations/it_IT/process/stable-kernel-rules.rst
index 6fa5ce9c3572..48e88e5ad2c5 100644
--- a/Documentation/translations/it_IT/process/stable-kernel-rules.rst
+++ b/Documentation/translations/it_IT/process/stable-kernel-rules.rst
@@ -1,12 +1,202 @@
.. include:: ../disclaimer-ita.rst
:Original: :ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
+:Translator: Federico Vaga <federico.vaga@vaga.pv.it>
.. _it_stable_kernel_rules:
Tutto quello che volevate sapere sui rilasci -stable di Linux
==============================================================
-.. warning::
+Regole sul tipo di patch che vengono o non vengono accettate nei sorgenti
+"-stable":
- TODO ancora da tradurre
+ - Ovviamente dev'essere corretta e verificata.
+ - Non dev'essere più grande di 100 righe, incluso il contesto.
+ - Deve correggere una cosa sola.
+ - Deve correggere un baco vero che sta disturbando gli utenti (non cose del
+ tipo "Questo potrebbe essere un problema ...").
+ - Deve correggere un problema di compilazione (ma non per cose già segnate
+ con CONFIG_BROKEN), un kernel oops, un blocco, una corruzione di dati,
+ un vero problema di sicurezza, o problemi del tipo "oh, questo non va bene".
+ In pratica, qualcosa di critico.
+ - Problemi importanti riportati dagli utenti di una distribuzione potrebbero
+ essere considerati se correggono importanti problemi di prestazioni o di
+ interattività. Dato che questi problemi non sono così ovvi e la loro
+ correzione ha un'alta probabilità d'introdurre una regressione, dovrebbero
+ essere sottomessi solo dal manutentore della distribuzione includendo un
+ link, se esiste, ad un rapporto su bugzilla, e informazioni aggiuntive
+ sull'impatto che ha sugli utenti.
+ - Non deve correggere problemi relativi a una "teorica sezione critica",
+ a meno che non venga fornita anche una spiegazione su come questa si
+ possa verificare.
+ - Non deve includere alcuna correzione "banale" (correzioni grammaticali,
+ pulizia dagli spazi bianchi, eccetera).
+ - Deve rispettare le regole scritte in
+ :ref:`Documentation/translation/it_IT/process/submitting-patches.rst <it_submittingpatches>`
+ - Questa patch o una equivalente deve esistere già nei sorgenti principali di
+ Linux
+
+
+Procedura per sottomettere patch per i sorgenti -stable
+-------------------------------------------------------
+
+ - Se la patch contiene modifiche a dei file nelle cartelle net/ o drivers/net,
+ allora seguite le linee guida descritte in
+ :ref:`Documentation/translation/it_IT/networking/netdev-FAQ.rst <it_netdev-FAQ>`;
+ ma solo dopo aver verificato al seguente indirizzo che la patch non sia
+ già in coda:
+ https://patchwork.ozlabs.org/bundle/davem/stable/?series=&submitter=&state=*&q=&archive=
+ - Una patch di sicurezza non dovrebbero essere gestite (solamente) dal processo
+ di revisione -stable, ma dovrebbe seguire le procedure descritte in
+ :ref:`Documentation/translations/it_IT/admin-guide/security-bugs.rst <it_securitybugs>`.
+
+
+Per tutte le altre sottomissioni, scegliere una delle seguenti procedure
+------------------------------------------------------------------------
+
+.. _it_option_1:
+
+Opzione 1
+*********
+
+Per far sì che una patch venga automaticamente inclusa nei sorgenti stabili,
+aggiungete l'etichetta
+
+.. code-block:: none
+
+ Cc: stable@vger.kernel.org
+
+nell'area dedicata alla firme. Una volta che la patch è stata inclusa, verrà
+applicata anche sui sorgenti stabili senza che l'autore o il manutentore
+del sottosistema debba fare qualcosa.
+
+.. _it_option_2:
+
+Opzione 2
+*********
+
+Dopo che la patch è stata inclusa nei sorgenti Linux, inviate una mail a
+stable@vger.kernel.org includendo: il titolo della patch, l'identificativo
+del commit, il perché pensate che debba essere applicata, e in quale versione
+del kernel la vorreste vedere.
+
+.. _it_option_3:
+
+Opzione 3
+*********
+
+Inviata la patch, dopo aver verificato che rispetta le regole descritte in
+precedenza, a stable@vger.kernel.org. Dovete annotare nel changelog
+l'identificativo del commit nei sorgenti principali, così come la versione
+del kernel nel quale vorreste vedere la patch.
+
+L':ref:`it_option_1` è fortemente raccomandata; è il modo più facile e usato.
+L':ref:`it_option_2` e l':ref:`it_option_3` sono più utili quando, al momento
+dell'inclusione dei sorgenti principali, si ritiene che non debbano essere
+incluse anche in quelli stabili (per esempio, perché si crede che si dovrebbero
+fare più verifiche per eventuali regressioni). L':ref:`it_option_3` è
+particolarmente utile se la patch ha bisogno di qualche modifica per essere
+applicata ad un kernel più vecchio (per esempio, perché nel frattempo l'API è
+cambiata).
+
+Notate che per l':ref:`it_option_3`, se la patch è diversa da quella nei
+sorgenti principali (per esempio perché è stato necessario un lavoro di
+adattamento) allora dev'essere ben documentata e giustificata nella descrizione
+della patch.
+
+L'identificativo del commit nei sorgenti principali dev'essere indicato sopra
+al messaggio della patch, così:
+
+.. code-block:: none
+
+ commit <sha1> upstream.
+
+In aggiunta, alcune patch inviate attraverso l':ref:`it_option_1` potrebbero
+dipendere da altre che devo essere incluse. Questa situazione può essere
+indicata nel seguente modo nell'area dedicata alle firme:
+
+.. code-block:: none
+
+ Cc: <stable@vger.kernel.org> # 3.3.x: a1f84a3: sched: Check for idle
+ Cc: <stable@vger.kernel.org> # 3.3.x: 1b9508f: sched: Rate-limit newidle
+ Cc: <stable@vger.kernel.org> # 3.3.x: fd21073: sched: Fix affinity logic
+ Cc: <stable@vger.kernel.org> # 3.3.x
+ Signed-off-by: Ingo Molnar <mingo@elte.hu>
+
+La sequenza di etichette ha il seguente significato:
+
+.. code-block:: none
+
+ git cherry-pick a1f84a3
+ git cherry-pick 1b9508f
+ git cherry-pick fd21073
+ git cherry-pick <this commit>
+
+Inoltre, alcune patch potrebbero avere dei requisiti circa la versione del
+kernel. Questo può essere indicato usando il seguente formato nell'area
+dedicata alle firme:
+
+.. code-block:: none
+
+ Cc: <stable@vger.kernel.org> # 3.3.x
+
+L'etichetta ha il seguente significato:
+
+.. code-block:: none
+
+ git cherry-pick <this commit>
+
+per ogni sorgente "-stable" che inizia con la versione indicata.
+
+Dopo la sottomissione:
+
+ - Il mittente riceverà un ACK quando la patch è stata accettata e messa in
+ coda, oppure un NAK se la patch è stata rigettata. A seconda degli impegni
+ degli sviluppatori, questa risposta potrebbe richiedere alcuni giorni.
+ - Se accettata, la patch verrà aggiunta alla coda -stable per essere
+ revisionata dal altri sviluppatori e dal principale manutentore del
+ sottosistema.
+
+
+Ciclo di una revisione
+----------------------
+
+ - Quando i manutentori -stable decidono di fare un ciclo di revisione, le
+ patch vengono mandate al comitato per la revisione, ai manutentori soggetti
+ alle modifiche delle patch (a meno che il mittente non sia anche il
+ manutentore di quell'area del kernel) e in CC: alla lista di discussione
+ linux-kernel.
+ - La commissione per la revisione ha 48 ore per dare il proprio ACK o NACK
+ alle patch.
+ - Se una patch viene rigettata da un membro della commissione, o un membro
+ della lista linux-kernel obietta la bontà della patch, sollevando problemi
+ che i manutentori ed i membri non avevano compreso, allora la patch verrà
+ rimossa dalla coda.
+ - Alla fine del ciclo di revisione tutte le patch che hanno ricevuto l'ACK
+ verranno aggiunte per il prossimo rilascio -stable, e successivamente
+ questo nuovo rilascio verrà fatto.
+ - Le patch di sicurezza verranno accettate nei sorgenti -stable direttamente
+ dalla squadra per la sicurezza del kernel, e non passerà per il normale
+ ciclo di revisione. Contattate la suddetta squadra per maggiori dettagli
+ su questa procedura.
+
+Sorgenti
+--------
+
+ - La coda delle patch, sia quelle già applicate che in fase di revisione,
+ possono essere trovate al seguente indirizzo:
+
+ https://git.kernel.org/pub/scm/linux/kernel/git/stable/stable-queue.git
+
+ - Il rilascio definitivo, e marchiato, di tutti i kernel stabili può essere
+ trovato in rami distinti per versione al seguente indirizzo:
+
+ https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git
+
+
+Comitato per la revisione
+-------------------------
+
+ - Questo comitato è fatto di sviluppatori del kernel che si sono offerti
+ volontari per questo lavoro, e pochi altri che non sono proprio volontari.
diff --git a/Documentation/translations/it_IT/process/submitting-patches.rst b/Documentation/translations/it_IT/process/submitting-patches.rst
index 2ab9c1401aa1..7d7ea92c5c5a 100644
--- a/Documentation/translations/it_IT/process/submitting-patches.rst
+++ b/Documentation/translations/it_IT/process/submitting-patches.rst
@@ -67,8 +67,8 @@ sulla radice dei sorgenti del kernel, e non sulle sue sottocartelle.
Per creare una patch per un singolo file, spesso è sufficiente fare::
- SRCTREE= linux
- MYFILE= drivers/net/mydriver.c
+ SRCTREE=linux
+ MYFILE=drivers/net/mydriver.c
cd $SRCTREE
cp $MYFILE $MYFILE.orig
@@ -80,7 +80,7 @@ Per creare una patch per molteplici file, dovreste spacchettare i sorgenti
"vergini", o comunque non modificati, e fare un ``diff`` coi vostri.
Per esempio::
- MYSRC= /devel/linux
+ MYSRC=/devel/linux
tar xvfz linux-3.19.tar.gz
mv linux-3.19 linux-3.19-vanilla
@@ -567,11 +567,42 @@ alcunché - ma dovrebbe indicare che la persona ha ricevuto una copia della
patch. Questa etichetta documenta che terzi potenzialmente interessati sono
stati inclusi nella discussione.
-L'etichetta Co-developed-by: indica che la patch è stata scritta dall'autore in
-collaborazione con un altro sviluppatore. Qualche volta questo è utile quando
-più persone lavorano sulla stessa patch. Notate, questa persona deve avere
-nella patch anche una riga Signed-off-by:.
+Co-developed-by: indica che la patch è stata cosviluppata da diversi
+sviluppatori; viene usato per assegnare più autori (in aggiunta a quello
+associato all'etichetta From:) quando più persone lavorano ad una patch. Dato
+che Co-developed-by: implica la paternità della patch, ogni Co-developed-by:
+dev'essere seguito immediatamente dal Signed-off-by: del corrispondente
+coautore. Qui si applica la procedura di base per sign-off, in pratica
+l'ordine delle etichette Signed-off-by: dovrebbe riflettere il più possibile
+l'ordine cronologico della storia della patch, indipendentemente dal fatto che
+la paternità venga assegnata via From: o Co-developed-by:. Da notare che
+l'ultimo Signed-off-by: dev'essere quello di colui che ha sottomesso la patch.
+Notate anche che l'etichetta From: è opzionale quando l'autore in From: è
+anche la persona (e indirizzo email) indicato nel From: dell'intestazione
+dell'email.
+
+Esempio di una patch sottomessa dall'autore in From:::
+
+ <changelog>
+
+ Co-developed-by: First Co-Author <first@coauthor.example.org>
+ Signed-off-by: First Co-Author <first@coauthor.example.org>
+ Co-developed-by: Second Co-Author <second@coauthor.example.org>
+ Signed-off-by: Second Co-Author <second@coauthor.example.org>
+ Signed-off-by: From Author <from@author.example.org>
+
+Esempio di una patch sottomessa dall'autore Co-developed-by:::
+
+ From: From Author <from@author.example.org>
+
+ <changelog>
+
+ Co-developed-by: Random Co-Author <random@coauthor.example.org>
+ Signed-off-by: Random Co-Author <random@coauthor.example.org>
+ Signed-off-by: From Author <from@author.example.org>
+ Co-developed-by: Submitting Co-Author <sub@coauthor.example.org>
+ Signed-off-by: Submitting Co-Author <sub@coauthor.example.org>
13) Utilizzare Reported-by:, Tested-by:, Reviewed-by:, Suggested-by: e Fixes:
-----------------------------------------------------------------------------
@@ -719,7 +750,7 @@ Un paio di esempi di oggetti::
La riga ``from`` dev'essere la prima nel corpo del messaggio ed è nel
formato:
- From: Original Author <author@example.com>
+ From: Patch Author <author@example.com>
La riga ``from`` indica chi verrà accreditato nel changelog permanente come
l'autore della patch. Se la riga ``from`` è mancante, allora per determinare
diff --git a/Documentation/translations/ja_JP/SubmitChecklist b/Documentation/translations/ja_JP/SubmitChecklist
index 60c7c35ac517..b42220d3d46c 100644
--- a/Documentation/translations/ja_JP/SubmitChecklist
+++ b/Documentation/translations/ja_JP/SubmitChecklist
@@ -74,38 +74,34 @@ Linux カーネルパッチ投稿者向けチェックリスト
13: CONFIG_SMP, CONFIG_PREEMPT を有効にした場合と無効にした場合の両方で
ビルドした上、動作確認を行ってください。
-14: もしパッチがディスクのI/O性能などに影響を与えるようであれば、
- 'CONFIG_LBDAF'オプションを有効にした場合と無効にした場合の両方で
- テストを実施してみてください。
+14: lockdepの機能を全て有効にした上で、全てのコードパスを評価してください。
-15: lockdepの機能を全て有効にした上で、全てのコードパスを評価してください。
-
-16: /proc に新しいエントリを追加した場合には、Documentation/ 配下に
+15: /proc に新しいエントリを追加した場合には、Documentation/ 配下に
必ずドキュメントを追加してください。
-17: 新しいブートパラメータを追加した場合には、
+16: 新しいブートパラメータを追加した場合には、
必ずDocumentation/admin-guide/kernel-parameters.rst に説明を追加してください。
-18: 新しくmoduleにパラメータを追加した場合には、MODULE_PARM_DESC()を
+17: 新しくmoduleにパラメータを追加した場合には、MODULE_PARM_DESC()を
利用して必ずその説明を記述してください。
-19: 新しいuserspaceインタフェースを作成した場合には、Documentation/ABI/ に
+18: 新しいuserspaceインタフェースを作成した場合には、Documentation/ABI/ に
Documentation/ABI/README を参考にして必ずドキュメントを追加してください。
-20: 'make headers_check'を実行して全く問題がないことを確認してください。
+19: 'make headers_check'を実行して全く問題がないことを確認してください。
-21: 少なくともslabアロケーションとpageアロケーションに失敗した場合の
+20: 少なくともslabアロケーションとpageアロケーションに失敗した場合の
挙動について、fault-injectionを利用して確認してください。
Documentation/fault-injection/ を参照してください。
追加したコードがかなりの量であったならば、サブシステム特有の
fault-injectionを追加したほうが良いかもしれません。
-22: 新たに追加したコードは、`gcc -W'でコンパイルしてください。
+21: 新たに追加したコードは、`gcc -W'でコンパイルしてください。
このオプションは大量の不要なメッセージを出力しますが、
"warning: comparison between signed and unsigned" のようなメッセージは、
バグを見つけるのに役に立ちます。
-23: 投稿したパッチが -mm パッチセットにマージされた後、全ての既存のパッチや
+22: 投稿したパッチが -mm パッチセットにマージされた後、全ての既存のパッチや
VM, VFS およびその他のサブシステムに関する様々な変更と、現時点でも共存
できることを確認するテストを行ってください。
diff --git a/Documentation/translations/ja_JP/SubmittingPatches b/Documentation/translations/ja_JP/SubmittingPatches
index 02139656463e..ad979c3c06a6 100644
--- a/Documentation/translations/ja_JP/SubmittingPatches
+++ b/Documentation/translations/ja_JP/SubmittingPatches
@@ -58,8 +58,8 @@ Linux カーネルに対する全ての変更は diff(1) コマンドによる
1個のファイルについてのパッチを作成するためには、ほとんどの場合、
以下の作業を行えば十分です。
- SRCTREE= linux-2.6
- MYFILE= drivers/net/mydriver.c
+ SRCTREE=linux-2.6
+ MYFILE=drivers/net/mydriver.c
cd $SRCTREE
cp $MYFILE $MYFILE.orig
@@ -71,7 +71,7 @@ Linux カーネルに対する全ての変更は diff(1) コマンドによる
なわち変更を加えてない Linux カーネルを展開し、自分の Linux カーネル
ソースとの差分を生成しないといけません。例えば、
- MYSRC= /devel/linux-2.6
+ MYSRC=/devel/linux-2.6
tar xvfz linux-2.6.12.tar.gz
mv linux-2.6.12 linux-2.6.12-vanilla
diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt
index 7f01fb1c1084..db0b9d8619f1 100644
--- a/Documentation/translations/ko_KR/memory-barriers.txt
+++ b/Documentation/translations/ko_KR/memory-barriers.txt
@@ -493,10 +493,8 @@ CPU 에게 기대할 수 있는 최소한의 보장사항 몇가지가 있습니
이 타입의 오퍼레이션은 단방향의 투과성 배리어처럼 동작합니다. ACQUIRE
오퍼레이션 뒤의 모든 메모리 오퍼레이션들이 ACQUIRE 오퍼레이션 후에
일어난 것으로 시스템의 나머지 컴포넌트들에 보이게 될 것이 보장됩니다.
- LOCK 오퍼레이션과 smp_load_acquire(), smp_cond_acquire() 오퍼레이션도
- ACQUIRE 오퍼레이션에 포함됩니다. smp_cond_acquire() 오퍼레이션은 컨트롤
- 의존성과 smp_rmb() 를 사용해서 ACQUIRE 의 의미적 요구사항(semantic)을
- 충족시킵니다.
+ LOCK 오퍼레이션과 smp_load_acquire(), smp_cond_load_acquire() 오퍼레이션도
+ ACQUIRE 오퍼레이션에 포함됩니다.
ACQUIRE 오퍼레이션 앞의 메모리 오퍼레이션들은 ACQUIRE 오퍼레이션 완료 후에
수행된 것처럼 보일 수 있습니다.
@@ -2146,33 +2144,40 @@ set_current_state() 는 다음의 것들로 감싸질 수도 있습니다:
event_indicated = 1;
wake_up_process(event_daemon);
-wake_up() 류에 의해 쓰기 메모리 배리어가 내포됩니다. 만약 그것들이 뭔가를
-깨운다면요. 이 배리어는 태스크 상태가 지워지기 전에 수행되므로, 이벤트를
-알리기 위한 STORE 와 태스크 상태를 TASK_RUNNING 으로 설정하는 STORE 사이에
-위치하게 됩니다.
+wake_up() 이 무언가를 깨우게 되면, 이 함수는 범용 메모리 배리어를 수행합니다.
+이 함수가 아무것도 깨우지 않는다면 메모리 배리어는 수행될 수도, 수행되지 않을
+수도 있습니다; 이 경우에 메모리 배리어를 수행할 거라 오해해선 안됩니다. 이
+배리어는 태스크 상태가 접근되기 전에 수행되는데, 자세히 말하면 이 이벤트를
+알리기 위한 STORE 와 TASK_RUNNING 으로 상태를 쓰는 STORE 사이에 수행됩니다:
- CPU 1 CPU 2
+ CPU 1 (Sleeper) CPU 2 (Waker)
=============================== ===============================
set_current_state(); STORE event_indicated
smp_store_mb(); wake_up();
- STORE current->state <쓰기 배리어>
- <범용 배리어> STORE current->state
- LOAD event_indicated
+ STORE current->state ...
+ <범용 배리어> <범용 배리어>
+ LOAD event_indicated if ((LOAD task->state) & TASK_NORMAL)
+ STORE task->state
-한번더 말합니다만, 이 쓰기 메모리 배리어는 이 코드가 정말로 뭔가를 깨울 때에만
-실행됩니다. 이걸 설명하기 위해, X 와 Y 는 모두 0 으로 초기화 되어 있다는 가정
-하에 아래의 이벤트 시퀀스를 생각해 봅시다:
+여기서 "task" 는 깨어나지는 쓰레드이고 CPU 1 의 "current" 와 같습니다.
+
+반복하지만, wake_up() 이 무언가를 정말 깨운다면 범용 메모리 배리어가 수행될
+것이 보장되지만, 그렇지 않다면 그런 보장이 없습니다. 이걸 이해하기 위해, X 와
+Y 는 모두 0 으로 초기화 되어 있다는 가정 하에 아래의 이벤트 시퀀스를 생각해
+봅시다:
CPU 1 CPU 2
=============================== ===============================
- X = 1; STORE event_indicated
+ X = 1; Y = 1;
smp_mb(); wake_up();
- Y = 1; wait_event(wq, Y == 1);
- wake_up(); load from Y sees 1, no memory barrier
- load from X might see 0
+ LOAD Y LOAD X
+
+정말로 깨우기가 행해졌다면, 두 로드 중 (최소한) 하나는 1 을 보게 됩니다.
+반면에, 실제 깨우기가 행해지지 않았다면, 두 로드 모두 0을 볼 수도 있습니다.
-위 예제에서의 경우와 달리 깨우기가 정말로 행해졌다면, CPU 2 의 X 로드는 1 을
-본다고 보장될 수 있을 겁니다.
+wake_up_process() 는 항상 범용 메모리 배리어를 수행합니다. 이 배리어 역시
+태스크 상태가 접근되기 전에 수행됩니다. 특히, 앞의 예제 코드에서 wake_up() 이
+wake_up_process() 로 대체된다면 두 로드 중 하나는 1을 볼 것이 보장됩니다.
사용 가능한 깨우기류 함수들로 다음과 같은 것들이 있습니다:
@@ -2192,6 +2197,8 @@ wake_up() 류에 의해 쓰기 메모리 배리어가 내포됩니다. 만약
wake_up_poll();
wake_up_process();
+메모리 순서규칙 관점에서, 이 함수들은 모두 wake_up() 과 같거나 보다 강한 순서
+보장을 제공합니다.
[!] 잠재우는 코드와 깨우는 코드에 내포되는 메모리 배리어들은 깨우기 전에
이루어진 스토어를 잠재우는 코드가 set_current_state() 를 호출한 후에 행하는
diff --git a/Documentation/translations/zh_CN/SubmittingPatches b/Documentation/translations/zh_CN/SubmittingPatches
deleted file mode 100644
index e9098da8f1a4..000000000000
--- a/Documentation/translations/zh_CN/SubmittingPatches
+++ /dev/null
@@ -1,412 +0,0 @@
-Chinese translated version of Documentation/process/submitting-patches.rst
-
-If you have any comment or update to the content, please contact the
-original document maintainer directly. However, if you have a problem
-communicating in English you can also ask the Chinese maintainer for
-help. Contact the Chinese maintainer if this translation is outdated
-or if there is a problem with the translation.
-
-Chinese maintainer: TripleX Chung <triplex@zh-kernel.org>
----------------------------------------------------------------------
-Documentation/process/submitting-patches.rst 的中文翻译
-
-如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
-交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
-译存在问题,请联系中文版维护者。
-
-中文版维护者: 钟宇 TripleX Chung <triplex@zh-kernel.org>
-中文版翻译者: 钟宇 TripleX Chung <triplex@zh-kernel.org>
-中文版校译者: 李阳 Li Yang <leo@zh-kernel.org>
- 王聪 Wang Cong <xiyou.wangcong@gmail.com>
-
-以下为正文
----------------------------------------------------------------------
-
- 如何让你的改动进入内核
- 或者
- 获得亲爱的 Linus Torvalds 的关注和处理
-----------------------------------
-
-对于想要将改动提交到 Linux 内核的个人或者公司来说,如果不熟悉“规矩”,
-提交的流程会让人畏惧。本文档收集了一系列建议,这些建议可以大大的提高你
-的改动被接受的机会。
-阅读 Documentation/process/submit-checklist.rst 来获得在提交代码前需要检查的项目的列
-表。如果你在提交一个驱动程序,那么同时阅读一下
-Documentation/process/submitting-drivers.rst 。
-
-
---------------------------
-第一节 - 创建并发送你的改动
---------------------------
-
-1) "diff -up"
------------
-
-使用 "diff -up" 或者 "diff -uprN" 来创建补丁。
-
-所有内核的改动,都是以补丁的形式呈现的,补丁由 diff(1) 生成。创建补丁的
-时候,要确认它是以 "unified diff" 格式创建的,这种格式由 diff(1) 的 '-u'
-参数生成。而且,请使用 '-p' 参数,那样会显示每个改动所在的C函数,使得
-产生的补丁容易读得多。补丁应该基于内核源代码树的根目录,而不是里边的任
-何子目录。
-为一个单独的文件创建补丁,一般来说这样做就够了:
-
- SRCTREE= linux-2.6
- MYFILE= drivers/net/mydriver.c
-
- cd $SRCTREE
- cp $MYFILE $MYFILE.orig
- vi $MYFILE # make your change
- cd ..
- diff -up $SRCTREE/$MYFILE{.orig,} > /tmp/patch
-
-为多个文件创建补丁,你可以解开一个没有修改过的内核源代码树,然后和你自
-己的代码树之间做 diff 。例如:
-
- MYSRC= /devel/linux-2.6
-
- tar xvfz linux-2.6.12.tar.gz
- mv linux-2.6.12 linux-2.6.12-vanilla
- diff -uprN -X linux-2.6.12-vanilla/Documentation/dontdiff \
- linux-2.6.12-vanilla $MYSRC > /tmp/patch
-
-"dontdiff" 是内核在编译的时候产生的文件的列表,列表中的文件在 diff(1)
-产生的补丁里会被跳过。"dontdiff" 文件被包含在2.6.12和之后版本的内核源代
-码树中。对于更早的内核版本,你可以从
-<http://www.xenotime.net/linux/doc/dontdiff> 获取它。
-确定你的补丁里没有包含任何不属于这次补丁提交的额外文件。记得在用diff(1)
-生成补丁之后,审阅一次补丁,以确保准确。
-如果你的改动很散乱,你应该研究一下如何将补丁分割成独立的部分,将改动分
-割成一系列合乎逻辑的步骤。这样更容易让其他内核开发者审核,如果你想你的
-补丁被接受,这是很重要的。下面这些脚本能够帮助你做这件事情:
-Quilt:
-http://savannah.nongnu.org/projects/quilt
-
-2)描述你的改动。
-描述你的改动包含的技术细节。
-
-要多具体就写多具体。最糟糕的描述可能是像下面这些语句:“更新了某驱动程
-序”,“修正了某驱动程序的bug”,或者“这个补丁包含了某子系统的修改,请
-使用。”
-
-如果你的描述开始变长,这表示你也许需要拆分你的补丁了,请看第3小节,
-继续。
-
-3)拆分你的改动
-
-将改动拆分,逻辑类似的放到同一个补丁文件里。
-
-例如,如果你的改动里同时有bug修正和性能优化,那么把这些改动拆分到两个或
-者更多的补丁文件中。如果你的改动包含对API的修改,并且修改了驱动程序来适
-应这些新的API,那么把这些修改分成两个补丁。
-
-另一方面,如果你将一个单独的改动做成多个补丁文件,那么将它们合并成一个
-单独的补丁文件。这样一个逻辑上单独的改动只被包含在一个补丁文件里。
-
-如果有一个补丁依赖另外一个补丁来完成它的改动,那没问题。简单的在你的补
-丁描述里指出“这个补丁依赖某补丁”就好了。
-
-如果你不能将补丁浓缩成更少的文件,那么每次大约发送出15个,然后等待审查
-和整合。
-
-4)选择 e-mail 的收件人
-
-看一遍 MAINTAINERS 文件和源代码,看看你所的改动所在的内核子系统有没有指
-定的维护者。如果有,给他们发e-mail。
-
-如果没有找到维护者,或者维护者没有反馈,将你的补丁发送到内核开发者主邮
-件列表 linux-kernel@vger.kernel.org。大部分的内核开发者都跟踪这个邮件列
-表,可以评价你的改动。
-
-每次不要发送超过15个补丁到 vger 邮件列表!!!
-
-Linus Torvalds 是决定改动能否进入 Linux 内核的最终裁决者。他的 e-mail
-地址是 <torvalds@linux-foundation.org> 。他收到的 e-mail 很多,所以一般
-的说,最好别给他发 e-mail。
-
-那些修正bug,“显而易见”的修改或者是类似的只需要很少讨论的补丁可以直接
-发送或者CC给Linus。那些需要讨论或者没有很清楚的好处的补丁,一般先发送到
-linux-kernel邮件列表。只有当补丁被讨论得差不多了,才提交给Linus。
-
-5)选择CC( e-mail 抄送)列表
-
-除非你有理由不这样做,否则CC linux-kernel@vger.kernel.org。
-
-除了 Linus 之外,其他内核开发者也需要注意到你的改动,这样他们才能评论你
-的改动并提供代码审查和建议。linux-kernel 是 Linux 内核开发者主邮件列表
-。其它的邮件列表为特定的子系统提供服务,比如 USB,framebuffer 设备,虚
-拟文件系统,SCSI 子系统,等等。查看 MAINTAINERS 文件来获得和你的改动有
-关的邮件列表。
-
-Majordomo lists of VGER.KERNEL.ORG at:
- <http://vger.kernel.org/vger-lists.html>
-
-如果改动影响了用户空间和内核之间的接口,请给 MAN-PAGES 的维护者(列在
-MAINTAINERS 文件里的)发送一个手册页(man-pages)补丁,或者至少通知一下改
-变,让一些信息有途径进入手册页。
-
-即使在第四步的时候,维护者没有作出回应,也要确认在修改他们的代码的时候
-,一直将维护者拷贝到CC列表中。
-
-对于小的补丁,你也许会CC到 Adrian Bunk 管理的搜集琐碎补丁的邮件列表
-(Trivial Patch Monkey)trivial@kernel.org,那里专门收集琐碎的补丁。下面这样
-的补丁会被看作“琐碎的”补丁:
- 文档的拼写修正。
- 修正会影响到 grep(1) 的拼写。
- 警告信息修正(频繁的打印无用的警告是不好的。)
- 编译错误修正(代码逻辑的确是对的,只是编译有问题。)
- 运行时修正(只要真的修正了错误。)
- 移除使用了被废弃的函数/宏的代码(例如 check_region。)
- 联系方式和文档修正。
- 用可移植的代码替换不可移植的代码(即使在体系结构相关的代码中,既然有
- 人拷贝,只要它是琐碎的)
- 任何文件的作者/维护者对该文件的改动(例如 patch monkey 在重传模式下)
-
-EMAIL: trivial@kernel.org
-
-(译注,关于“琐碎补丁”的一些说明:因为原文的这一部分写得比较简单,所以不得不
-违例写一下译注。"trivial"这个英文单词的本意是“琐碎的,不重要的。”但是在这里
-有稍微有一些变化,例如对一些明显的NULL指针的修正,属于运行时修正,会被归类
-到琐碎补丁里。虽然NULL指针的修正很重要,但是这样的修正往往很小而且很容易得到
-检验,所以也被归入琐碎补丁。琐碎补丁更精确的归类应该是
-“simple, localized & easy to verify”,也就是说简单的,局部的和易于检验的。
-trivial@kernel.org邮件列表的目的是针对这样的补丁,为提交者提供一个中心,来
-降低提交的门槛。)
-
-6)没有 MIME 编码,没有链接,没有压缩,没有附件,只有纯文本。
-
-Linus 和其他的内核开发者需要阅读和评论你提交的改动。对于内核开发者来说
-,可以“引用”你的改动很重要,使用一般的 e-mail 工具,他们就可以在你的
-代码的任何位置添加评论。
-
-因为这个原因,所有的提交的补丁都是 e-mail 中“内嵌”的。
-警告:如果你使用剪切-粘贴你的补丁,小心你的编辑器的自动换行功能破坏你的
-补丁。
-
-不要将补丁作为 MIME 编码的附件,不管是否压缩。很多流行的 e-mail 软件不
-是任何时候都将 MIME 编码的附件当作纯文本发送的,这会使得别人无法在你的
-代码中加评论。另外,MIME 编码的附件会让 Linus 多花一点时间来处理,这就
-降低了你的改动被接受的可能性。
-
-警告:一些邮件软件,比如 Mozilla 会将你的信息以如下格式发送:
----- 邮件头 ----
-Content-Type: text/plain; charset=us-ascii; format=flowed
----- 邮件头 ----
-问题在于 “format=flowed” 会让接收端的某些邮件软件将邮件中的制表符替换
-成空格以及做一些类似的替换。这样,你发送的时候看起来没问题的补丁就被破
-坏了。
-
-要修正这个问题,只需要将你的 mozilla 的 defaults/pref/mailnews.js 文件
-里的
-pref("mailnews.send_plaintext_flowed", false); // RFC 2646=======
-修改成
-pref("mailnews.display.disable_format_flowed_support", true);
-就可以了。
-
-7) e-mail 的大小
-
-给 Linus 发送补丁的时候,永远按照第6小节说的做。
-
-大的改动对邮件列表不合适,对某些维护者也不合适。如果你的补丁,在不压缩
-的情况下,超过了40kB,那么你最好将补丁放在一个能通过 internet 访问的服
-务器上,然后用指向你的补丁的 URL 替代。
-
-8) 指出你的内核版本
-
-在标题和在补丁的描述中,指出补丁对应的内核的版本,是很重要的。
-
-如果补丁不能干净的在最新版本的内核上打上,Linus 是不会接受它的。
-
-9) 不要气馁,继续提交。
-
-当你提交了改动以后,耐心地等待。如果 Linus 喜欢你的改动并且同意它,那么
-它将在下一个内核发布版本中出现。
-
-然而,如果你的改动没有出现在下一个版本的内核中,可能有若干原因。减少那
-些原因,修正错误,重新提交更新后的改动,是你自己的工作。
-
-Linus不给出任何评论就“丢弃”你的补丁是常见的事情。在系统中这样的事情很
-平常。如果他没有接受你的补丁,也许是由于以下原因:
-* 你的补丁不能在最新版本的内核上干净的打上。
-* 你的补丁在 linux-kernel 邮件列表中没有得到充分的讨论。
-* 风格问题(参照第2小节)
-* 邮件格式问题(重读本节)
-* 你的改动有技术问题。
-* 他收到了成吨的 e-mail,而你的在混乱中丢失了。
-* 你让人为难。
-
-有疑问的时候,在 linux-kernel 邮件列表上请求评论。
-
-10) 在标题上加上 PATCH 的字样
-
-Linus 和 linux-kernel 邮件列表的 e-mail 流量都很高,一个通常的约定是标
-题行以 [PATCH] 开头。这样可以让 Linus 和其他内核开发人员可以从 e-mail
-的讨论中很轻易的将补丁分辨出来。
-
-11)为你的工作签名
-
-为了加强对谁做了何事的追踪,尤其是对那些透过好几层的维护者的补丁,我们
-建议在发送出去的补丁上加一个 “sign-off” 的过程。
-
-"sign-off" 是在补丁的注释的最后的简单的一行文字,认证你编写了它或者其他
-人有权力将它作为开放源代码的补丁传递。规则很简单:如果你能认证如下信息
-:
- 开发者来源证书 1.1
- 对于本项目的贡献,我认证如下信息:
- (a)这些贡献是完全或者部分的由我创建,我有权利以文件中指出
- 的开放源代码许可证提交它;或者
- (b)这些贡献基于以前的工作,据我所知,这些以前的工作受恰当的开放
- 源代码许可证保护,而且,根据许可证,我有权提交修改后的贡献,
- 无论是完全还是部分由我创造,这些贡献都使用同一个开放源代码许可证
- (除非我被允许用其它的许可证),正如文件中指出的;或者
- (c)这些贡献由认证(a),(b)或者(c)的人直接提供给我,而
- 且我没有修改它。
- (d)我理解并同意这个项目和贡献是公开的,贡献的记录(包括我
- 一起提交的个人记录,包括 sign-off )被永久维护并且可以和这个项目
- 或者开放源代码的许可证同步地再发行。
- 那么加入这样一行:
- Signed-off-by: Random J Developer <random@developer.example.org>
-
-使用你的真名(抱歉,不能使用假名或者匿名。)
-
-有人在最后加上标签。现在这些东西会被忽略,但是你可以这样做,来标记公司
-内部的过程,或者只是指出关于 sign-off 的一些特殊细节。
-
-12)标准补丁格式
-
-标准的补丁,标题行是:
- Subject: [PATCH 001/123] 子系统:一句话概述
-
-标准补丁的信体存在如下部分:
-
- - 一个 "from" 行指出补丁作者。
-
- - 一个空行
-
- - 说明的主体,这些说明文字会被拷贝到描述该补丁的永久改动记录里。
-
- - 一个由"---"构成的标记行
-
- - 不合适放到改动记录里的额外的注解。
-
- - 补丁本身(diff 输出)
-
-标题行的格式,使得对标题行按字母序排序非常的容易 - 很多 e-mail 客户端都
-可以支持 - 因为序列号是用零填充的,所以按数字排序和按字母排序是一样的。
-
-e-mail 标题中的“子系统”标识哪个内核子系统将被打补丁。
-
-e-mail 标题中的“一句话概述”扼要的描述 e-mail 中的补丁。“一句话概述”
-不应该是一个文件名。对于一个补丁系列(“补丁系列”指一系列的多个相关补
-丁),不要对每个补丁都使用同样的“一句话概述”。
-
-记住 e-mail 的“一句话概述”会成为该补丁的全局唯一标识。它会蔓延到 git
-的改动记录里。然后“一句话概述”会被用在开发者的讨论里,用来指代这个补
-丁。用户将希望通过 google 来搜索"一句话概述"来找到那些讨论这个补丁的文
-章。
-
-一些标题的例子:
-
- Subject: [patch 2/5] ext2: improve scalability of bitmap searching
- Subject: [PATCHv2 001/207] x86: fix eflags tracking
-
-"from" 行是信体里的最上面一行,具有如下格式:
- From: Original Author <author@example.com>
-
-"from" 行指明在永久改动日志里,谁会被确认为作者。如果没有 "from" 行,那
-么邮件头里的 "From: " 行会被用来决定改动日志中的作者。
-
-说明的主题将会被提交到永久的源代码改动日志里,因此对那些早已经不记得和
-这个补丁相关的讨论细节的有能力的读者来说,是有意义的。
-
-"---" 标记行对于补丁处理工具要找到哪里是改动日志信息的结束,是不可缺少
-的。
-
-对于 "---" 标记之后的额外注解,一个好的用途就是用来写 diffstat,用来显
-示修改了什么文件和每个文件都增加和删除了多少行。diffstat 对于比较大的补
-丁特别有用。其余那些只是和时刻或者开发者相关的注解,不合适放到永久的改
-动日志里的,也应该放这里。
-使用 diffstat的选项 "-p 1 -w 70" 这样文件名就会从内核源代码树的目录开始
-,不会占用太宽的空间(很容易适合80列的宽度,也许会有一些缩进。)
-
-在后面的参考资料中能看到适当的补丁格式的更多细节。
-
--------------------------------
-第二节 提示,建议和诀窍
--------------------------------
-
-本节包含很多和提交到内核的代码有关的通常的"规则"。事情永远有例外...但是
-你必须真的有好的理由这样做。你可以把本节叫做Linus的计算机科学入门课。
-
-1) 读 Document/process/coding-style.rst
-
-Nuff 说过,如果你的代码和这个偏离太多,那么它有可能会被拒绝,没有更多的
-审查,没有更多的评价。
-
-2) #ifdef 是丑陋的
-混杂了 ifdef 的代码难以阅读和维护。别这样做。作为替代,将你的 ifdef 放
-在头文件里,有条件地定义 "static inline" 函数,或者宏,在代码里用这些东
-西。让编译器把那些"空操作"优化掉。
-
-一个简单的例子,不好的代码:
-
- dev = alloc_etherdev (sizeof(struct funky_private));
- if (!dev)
- return -ENODEV;
- #ifdef CONFIG_NET_FUNKINESS
- init_funky_net(dev);
- #endif
-
-清理后的例子:
-
-(头文件里)
- #ifndef CONFIG_NET_FUNKINESS
- static inline void init_funky_net (struct net_device *d) {}
- #endif
-
-(代码文件里)
- dev = alloc_etherdev (sizeof(struct funky_private));
- if (!dev)
- return -ENODEV;
- init_funky_net(dev);
-
-3) 'static inline' 比宏好
-
-Static inline 函数相比宏来说,是好得多的选择。Static inline 函数提供了
-类型安全,没有长度限制,没有格式限制,在 gcc 下开销和宏一样小。
-
-宏只在 static inline 函数不是最优的时候[在 fast paths 里有很少的独立的
-案例],或者不可能用 static inline 函数的时候[例如字符串分配]。
-应该用 'static inline' 而不是 'static __inline__', 'extern inline' 和
-'extern __inline__' 。
-
-4) 不要过度设计
-
-不要试图预计模糊的未来事情,这些事情也许有用也许没有用:"让事情尽可能的
-简单,而不是更简单"。
-
-----------------
-第三节 参考文献
-----------------
-
-Andrew Morton, "The perfect patch" (tpp).
- <http://www.ozlabs.org/~akpm/stuff/tpp.txt>
-
-Jeff Garzik, "Linux kernel patch submission format".
- <http://linux.yyz.us/patch-format.html>
-
-Greg Kroah-Hartman, "How to piss off a kernel subsystem maintainer".
- <http://www.kroah.com/log/2005/03/31/>
- <http://www.kroah.com/log/2005/07/08/>
- <http://www.kroah.com/log/2005/10/19/>
- <http://www.kroah.com/log/2006/01/11/>
-
-NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people!
- <https://lkml.org/lkml/2005/7/11/336>
-
-Kernel Documentation/process/coding-style.rst:
- <http://sosdg.org/~coywolf/lxr/source/Documentation/process/coding-style.rst>
-
-Linus Torvalds's mail on the canonical patch format:
- <http://lkml.org/lkml/2005/4/7/183>
---
diff --git a/Documentation/translations/zh_CN/disclaimer-zh_CN.rst b/Documentation/translations/zh_CN/disclaimer-zh_CN.rst
new file mode 100644
index 000000000000..dcf803ede85a
--- /dev/null
+++ b/Documentation/translations/zh_CN/disclaimer-zh_CN.rst
@@ -0,0 +1,9 @@
+:orphan:
+
+.. warning::
+ 此文件的目的是为让中文读者更容易阅读和理解,而不是作为一个分支。 因此,
+ 如果您对此文件有任何意见或更新,请先尝试更新原始英文文件。
+
+.. note::
+ 如果您发现本文档与原始文件有任何不同或者有翻译问题,请联系该文件的译者,
+ 或者请求时奎亮的帮助:<alex.shi@linux.alibaba.com>。
diff --git a/Documentation/translations/zh_CN/index.rst b/Documentation/translations/zh_CN/index.rst
index 75956d669962..d3165535ec9e 100644
--- a/Documentation/translations/zh_CN/index.rst
+++ b/Documentation/translations/zh_CN/index.rst
@@ -3,10 +3,19 @@
\renewcommand\thesection*
\renewcommand\thesubsection*
-Chinese translations
-====================
+中文翻译
+========
+
+这些手册包含有关如何开发内核的整体信息。内核社区非常庞大,一年下来有数千名开发
+人员做出贡献。 与任何大型社区一样,知道如何完成任务将使得更改合并的过程变得更
+加容易。
.. toctree::
- :maxdepth: 1
+ :maxdepth: 2
+
+ process/index
+
+目录和表格
+----------
- coding-style
+* :ref:`genindex`
diff --git a/Documentation/translations/zh_CN/magic-number.txt b/Documentation/translations/zh_CN/magic-number.txt
deleted file mode 100644
index 7159cec04090..000000000000
--- a/Documentation/translations/zh_CN/magic-number.txt
+++ /dev/null
@@ -1,153 +0,0 @@
-Chinese translated version of Documentation/process/magic-number.rst
-
-If you have any comment or update to the content, please post to LKML directly.
-However, if you have problem communicating in English you can also ask the
-Chinese maintainer for help. Contact the Chinese maintainer, if this
-translation is outdated or there is problem with translation.
-
-Chinese maintainer: Jia Wei Wei <harryxiyou@gmail.com>
----------------------------------------------------------------------
-Documentation/process/magic-number.rst的中文翻译
-
-如果想评论或更新本文的内容,请直接发信到LKML。如果你使用英文交流有困难的话,也可
-以向中文版维护者求助。如果本翻译更新不及时或者翻译存在问题,请联系中文版维护者。
-
-中文版维护者: 贾威威 Jia Wei Wei <harryxiyou@gmail.com>
-中文版翻译者: 贾威威 Jia Wei Wei <harryxiyou@gmail.com>
-中文版校译者: 贾威威 Jia Wei Wei <harryxiyou@gmail.com>
-
-以下为正文
----------------------------------------------------------------------
-这个文件是有关当前使用的魔术值注册表。当你给一个结构添加了一个魔术值,你也应该把这个魔术值添加到这个文件,因为我们最好把用于各种结构的魔术值统一起来。
-
-使用魔术值来保护内核数据结构是一个非常好的主意。这就允许你在运行期检查(a)一个结构是否已经被攻击,或者(b)你已经给一个例行程序通过了一个错误的结构。后一种情况特别地有用---特别是当你通过一个空指针指向结构体的时候。tty源码,例如,经常通过特定驱动使用这种方法并且反复地排列特定方面的结构。
-
-使用魔术值的方法是在结构的开始处声明的,如下:
-
-struct tty_ldisc {
- int magic;
- ...
-};
-
-当你以后给内核添加增强功能的时候,请遵守这条规则!这样就会节省数不清的调试时间,特别是一些古怪的情况,例如,数组超出范围并且重新写了超出部分。遵守这个规则,‪这些情况可以被快速地,安全地避免。
-
- Theodore Ts'o
- 31 Mar 94
-
-给当前的Linux 2.1.55添加魔术表。
-
- Michael Chastain
- <mailto:mec@shout.net>
- 22 Sep 1997
-
-现在应该最新的Linux 2.1.112.因为在特性冻结期间,不能在2.2.x前改变任何东西。这些条目被数域所排序。
-
- Krzysztof G.Baranowski
- <mailto: kgb@knm.org.pl>
- 29 Jul 1998
-
-更新魔术表到Linux 2.5.45。刚好越过特性冻结,但是有可能还会有一些新的魔术值在2.6.x之前融入到内核中。
-
- Petr Baudis
- <pasky@ucw.cz>
- 03 Nov 2002
-
-更新魔术表到Linux 2.5.74。
-
- Fabian Frederick
- <ffrederick@users.sourceforge.net>
- 09 Jul 2003
-
-魔术名 地址 结构 所在文件
-===========================================================================
-PG_MAGIC 'P' pg_{read,write}_hdr include/linux/pg.h
-CMAGIC 0x0111 user include/linux/a.out.h
-MKISS_DRIVER_MAGIC 0x04bf mkiss_channel drivers/net/mkiss.h
-HDLC_MAGIC 0x239e n_hdlc drivers/char/n_hdlc.c
-APM_BIOS_MAGIC 0x4101 apm_user arch/x86/kernel/apm_32.c
-CYCLADES_MAGIC 0x4359 cyclades_port include/linux/cyclades.h
-DB_MAGIC 0x4442 fc_info drivers/net/iph5526_novram.c
-DL_MAGIC 0x444d fc_info drivers/net/iph5526_novram.c
-FASYNC_MAGIC 0x4601 fasync_struct include/linux/fs.h
-FF_MAGIC 0x4646 fc_info drivers/net/iph5526_novram.c
-ISICOM_MAGIC 0x4d54 isi_port include/linux/isicom.h
-PTY_MAGIC 0x5001 drivers/char/pty.c
-PPP_MAGIC 0x5002 ppp include/linux/if_pppvar.h
-SERIAL_MAGIC 0x5301 async_struct include/linux/serial.h
-SSTATE_MAGIC 0x5302 serial_state include/linux/serial.h
-SLIP_MAGIC 0x5302 slip drivers/net/slip.h
-STRIP_MAGIC 0x5303 strip drivers/net/strip.c
-X25_ASY_MAGIC 0x5303 x25_asy drivers/net/x25_asy.h
-SIXPACK_MAGIC 0x5304 sixpack drivers/net/hamradio/6pack.h
-AX25_MAGIC 0x5316 ax_disp drivers/net/mkiss.h
-TTY_MAGIC 0x5401 tty_struct include/linux/tty.h
-MGSL_MAGIC 0x5401 mgsl_info drivers/char/synclink.c
-TTY_DRIVER_MAGIC 0x5402 tty_driver include/linux/tty_driver.h
-MGSLPC_MAGIC 0x5402 mgslpc_info drivers/char/pcmcia/synclink_cs.c
-TTY_LDISC_MAGIC 0x5403 tty_ldisc include/linux/tty_ldisc.h
-USB_SERIAL_MAGIC 0x6702 usb_serial drivers/usb/serial/usb-serial.h
-FULL_DUPLEX_MAGIC 0x6969 drivers/net/ethernet/dec/tulip/de2104x.c
-USB_BLUETOOTH_MAGIC 0x6d02 usb_bluetooth drivers/usb/class/bluetty.c
-RFCOMM_TTY_MAGIC 0x6d02 net/bluetooth/rfcomm/tty.c
-USB_SERIAL_PORT_MAGIC 0x7301 usb_serial_port drivers/usb/serial/usb-serial.h
-CG_MAGIC 0x00090255 ufs_cylinder_group include/linux/ufs_fs.h
-RPORT_MAGIC 0x00525001 r_port drivers/char/rocket_int.h
-LSEMAGIC 0x05091998 lse drivers/fc4/fc.c
-GDTIOCTL_MAGIC 0x06030f07 gdth_iowr_str drivers/scsi/gdth_ioctl.h
-RIEBL_MAGIC 0x09051990 drivers/net/atarilance.c
-NBD_REQUEST_MAGIC 0x12560953 nbd_request include/linux/nbd.h
-RED_MAGIC2 0x170fc2a5 (any) mm/slab.c
-BAYCOM_MAGIC 0x19730510 baycom_state drivers/net/baycom_epp.c
-ISDN_X25IFACE_MAGIC 0x1e75a2b9 isdn_x25iface_proto_data
- drivers/isdn/isdn_x25iface.h
-ECP_MAGIC 0x21504345 cdkecpsig include/linux/cdk.h
-LSOMAGIC 0x27091997 lso drivers/fc4/fc.c
-LSMAGIC 0x2a3b4d2a ls drivers/fc4/fc.c
-WANPIPE_MAGIC 0x414C4453 sdla_{dump,exec} include/linux/wanpipe.h
-CS_CARD_MAGIC 0x43525553 cs_card sound/oss/cs46xx.c
-LABELCL_MAGIC 0x4857434c labelcl_info_s include/asm/ia64/sn/labelcl.h
-ISDN_ASYNC_MAGIC 0x49344C01 modem_info include/linux/isdn.h
-CTC_ASYNC_MAGIC 0x49344C01 ctc_tty_info drivers/s390/net/ctctty.c
-ISDN_NET_MAGIC 0x49344C02 isdn_net_local_s drivers/isdn/i4l/isdn_net_lib.h
-SAVEKMSG_MAGIC2 0x4B4D5347 savekmsg arch/*/amiga/config.c
-CS_STATE_MAGIC 0x4c4f4749 cs_state sound/oss/cs46xx.c
-SLAB_C_MAGIC 0x4f17a36d kmem_cache mm/slab.c
-COW_MAGIC 0x4f4f4f4d cow_header_v1 arch/um/drivers/ubd_user.c
-I810_CARD_MAGIC 0x5072696E i810_card sound/oss/i810_audio.c
-TRIDENT_CARD_MAGIC 0x5072696E trident_card sound/oss/trident.c
-ROUTER_MAGIC 0x524d4157 wan_device [in wanrouter.h pre 3.9]
-SAVEKMSG_MAGIC1 0x53415645 savekmsg arch/*/amiga/config.c
-GDA_MAGIC 0x58464552 gda arch/mips/include/asm/sn/gda.h
-RED_MAGIC1 0x5a2cf071 (any) mm/slab.c
-EEPROM_MAGIC_VALUE 0x5ab478d2 lanai_dev drivers/atm/lanai.c
-HDLCDRV_MAGIC 0x5ac6e778 hdlcdrv_state include/linux/hdlcdrv.h
-PCXX_MAGIC 0x5c6df104 channel drivers/char/pcxx.h
-KV_MAGIC 0x5f4b565f kernel_vars_s arch/mips/include/asm/sn/klkernvars.h
-I810_STATE_MAGIC 0x63657373 i810_state sound/oss/i810_audio.c
-TRIDENT_STATE_MAGIC 0x63657373 trient_state sound/oss/trident.c
-M3_CARD_MAGIC 0x646e6f50 m3_card sound/oss/maestro3.c
-FW_HEADER_MAGIC 0x65726F66 fw_header drivers/atm/fore200e.h
-SLOT_MAGIC 0x67267321 slot drivers/hotplug/cpqphp.h
-SLOT_MAGIC 0x67267322 slot drivers/hotplug/acpiphp.h
-LO_MAGIC 0x68797548 nbd_device include/linux/nbd.h
-OPROFILE_MAGIC 0x6f70726f super_block drivers/oprofile/oprofilefs.h
-M3_STATE_MAGIC 0x734d724d m3_state sound/oss/maestro3.c
-VMALLOC_MAGIC 0x87654320 snd_alloc_track sound/core/memory.c
-KMALLOC_MAGIC 0x87654321 snd_alloc_track sound/core/memory.c
-PWC_MAGIC 0x89DC10AB pwc_device drivers/usb/media/pwc.h
-NBD_REPLY_MAGIC 0x96744668 nbd_reply include/linux/nbd.h
-ENI155_MAGIC 0xa54b872d midway_eprom drivers/atm/eni.h
-CODA_MAGIC 0xC0DAC0DA coda_file_info include/linux/coda_fs_i.h
-DPMEM_MAGIC 0xc0ffee11 gdt_pci_sram drivers/scsi/gdth.h
-YAM_MAGIC 0xF10A7654 yam_port drivers/net/hamradio/yam.c
-CCB_MAGIC 0xf2691ad2 ccb drivers/scsi/ncr53c8xx.c
-QUEUE_MAGIC_FREE 0xf7e1c9a3 queue_entry drivers/scsi/arm/queue.c
-QUEUE_MAGIC_USED 0xf7e1cc33 queue_entry drivers/scsi/arm/queue.c
-HTB_CMAGIC 0xFEFAFEF1 htb_class net/sched/sch_htb.c
-NMI_MAGIC 0x48414d4d455201 nmi_s arch/mips/include/asm/sn/nmi.h
-
-请注意,在声音记忆管理中仍然有一些特殊的为每个驱动定义的魔术值。查看include/sound/sndmagic.h来获取他们完整的列表信息。很多OSS声音驱动拥有自己从声卡PCI ID构建的魔术值-他们也没有被列在这里。
-
-IrDA子系统也使用了大量的自己的魔术值,查看include/net/irda/irda.h来获取他们完整的信息。
-
-HFS是另外一个比较大的使用魔术值的文件系统-你可以在fs/hfs/hfs.h中找到他们。
diff --git a/Documentation/translations/zh_CN/oops-tracing.txt b/Documentation/translations/zh_CN/oops-tracing.txt
index a893f04dfd5d..93fa061cf9e4 100644
--- a/Documentation/translations/zh_CN/oops-tracing.txt
+++ b/Documentation/translations/zh_CN/oops-tracing.txt
@@ -16,7 +16,7 @@ Documentation/admin-guide/bug-hunting.rst 的中文翻译
中文版维护者: 杨瑞 Dave Young <hidave.darkstar@gmail.com>
中文版翻译者: 杨瑞 Dave Young <hidave.darkstar@gmail.com>
-中文版校译者: 李阳 Li Yang <leo@zh-kernel.org>
+中文版校译者: 李阳 Li Yang <leoyang.li@nxp.com>
王聪 Wang Cong <xiyou.wangcong@gmail.com>
以下为正文
diff --git a/Documentation/translations/zh_CN/process/1.Intro.rst b/Documentation/translations/zh_CN/process/1.Intro.rst
new file mode 100644
index 000000000000..10a15f3dc282
--- /dev/null
+++ b/Documentation/translations/zh_CN/process/1.Intro.rst
@@ -0,0 +1,186 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :ref:`Documentation/process/1.Intro.rst <development_process_intro>`
+:Translator: Alex Shi <alex.shi@linux.alibaba.com>
+
+.. _cn_development_process_intro:
+
+介绍
+====
+
+执行摘要
+--------
+
+本节的其余部分涵盖了内核开发过程的范围,以及开发人员及其雇主在这方面可能遇
+到的各种挫折。内核代码应该合并到正式的(“主线”)内核中有很多原因,包括对用
+户的自动可用性、多种形式的社区支持以及影响内核开发方向的能力。提供给Linux
+内核的代码必须在与GPL兼容的许可证下可用。
+
+:ref:`cn_development_process` 介绍了开发过程、内核发布周期和合并窗口的机制。
+涵盖了补丁开发、审查和合并周期中的各个阶段。有一些关于工具和邮件列表的讨论。
+鼓励希望开始内核开发的开发人员作为初始练习跟踪并修复bug。
+
+
+:ref:`cn_development_early_stage` 包括早期项目规划,重点是尽快让开发社区参与
+
+:ref:`cn_development_coding` 是关于编码过程的;讨论了其他开发人员遇到的几个
+陷阱。对补丁的一些要求已经涵盖,并且介绍了一些工具,这些工具有助于确保内核
+补丁是正确的。
+
+:ref:`cn_development_posting` 讨论发布补丁以供评审的过程。为了让开发社区
+认真对待,补丁必须正确格式化和描述,并且必须发送到正确的地方。遵循本节中的
+建议有助于确保为您的工作提供最好的接纳。
+
+:ref:`cn_development_followthrough` 介绍了发布补丁之后发生的事情;该工作
+在这一点上还远远没有完成。与审阅者一起工作是开发过程中的一个重要部分;本节
+提供了一些关于如何在这个重要阶段避免问题的提示。当补丁被合并到主线中时,
+开发人员要注意不要假定任务已经完成。
+
+:ref:`cn_development_advancedtopics` 介绍了两个“高级”主题:
+使用Git管理补丁和查看其他人发布的补丁。
+
+:ref:`cn_development_conclusion` 总结了有关内核开发的更多信息,附带有带有
+指向资源的链接.
+
+这个文件是关于什么的
+--------------------
+
+Linux内核有超过800万行代码,每个版本的贡献者超过1000人,是现存最大、最活跃
+的免费软件项目之一。从1991年开始,这个内核已经发展成为一个最好的操作系统
+组件,运行在袖珍数字音乐播放器、台式PC、现存最大的超级计算机以及所有类型的
+系统上。它是一种适用于几乎任何情况的健壮、高效和可扩展的解决方案。
+
+随着Linux的发展,希望参与其开发的开发人员(和公司)的数量也在增加。硬件供应商
+希望确保Linux能够很好地支持他们的产品,使这些产品对Linux用户具有吸引力。嵌入
+式系统供应商使用Linux作为集成产品的组件,希望Linux能够尽可能地胜任手头的任务。
+分销商和其他基于Linux的软件供应商对Linux内核的功能、性能和可靠性有着明确的
+兴趣。最终用户也常常希望修改Linux,使之更好地满足他们的需求。
+
+Linux最引人注目的特性之一是这些开发人员可以访问它;任何具备必要技能的人都可以
+改进Linux并影响其开发方向。专有产品不能提供这种开放性,这是自由软件的一个特点。
+但是,如果有什么不同的话,内核比大多数其他自由软件项目更开放。一个典型的三个月
+内核开发周期可以涉及1000多个开发人员,他们为100多个不同的公司
+(或者根本没有公司)工作。
+
+与内核开发社区合作并不是特别困难。但是,尽管如此,许多潜在的贡献者在尝试做
+内核工作时遇到了困难。内核社区已经发展了自己独特的操作方式,使其能够在每天
+都要更改数千行代码的环境中顺利运行(并生成高质量的产品)。因此,Linux内核开发
+过程与专有的开发方法有很大的不同也就不足为奇了。
+
+对于新开发人员来说,内核的开发过程可能会让人感到奇怪和恐惧,但这个背后有充分的
+理由和坚实的经验。一个不了解内核社区的方式的开发人员(或者更糟的是,他们试图
+抛弃或规避内核社区的方式)会有一个令人沮丧的体验。开发社区, 在帮助那些试图学习
+的人的同时,没有时间帮助那些不愿意倾听或不关心开发过程的人。
+
+希望阅读本文的人能够避免这种令人沮丧的经历。这里有很多材料,但阅读时所做的
+努力会在短时间内得到回报。开发社区总是需要能让内核变更好的开发人员;下面的
+文本应该帮助您或为您工作的人员加入我们的社区。
+
+致谢
+----
+
+本文件由Jonathan Corbet撰写,corbet@lwn.net。以下人员的建议使之更为完善:
+Johannes Berg, James Berry, Alex Chiang, Roland Dreier, Randy Dunlap,
+Jake Edge, Jiri Kosina, Matt Mackall, Arthur Marsh, Amanda McPherson,
+Andrew Morton, Andrew Price, Tsugikazu Shibata, 和 Jochen Voß.
+
+这项工作得到了Linux基金会的支持,特别感谢Amanda McPherson,他看到了这项工作
+的价值并把它变成现实。
+
+代码进入主线的重要性
+--------------------
+
+有些公司和开发人员偶尔会想,为什么他们要费心学习如何与内核社区合作,并将代码
+放入主线内核(“主线”是由Linus Torvalds维护的内核,Linux发行商将其用作基础)。
+在短期内,贡献代码看起来像是一种可以避免的开销;仅仅将代码分开并直接支持用户
+似乎更容易。事实上,保持代码独立(“树外”)是在经济上是错误的。
+
+作为说明树外代码成本的一种方法,下面是内核开发过程的一些相关方面;本文稍后将
+更详细地讨论其中的大部分内容。考虑:
+
+- 所有Linux用户都可以使用合并到主线内核中的代码。它将自动出现在所有启用它的
+ 发行版上。不需要驱动程序磁盘、下载,也不需要为多个发行版的多个版本提供支持;
+ 对于开发人员和用户来说,这一切都是可行的。并入主线解决了大量的分布和支持问题
+
+- 当内核开发人员努力维护一个稳定的用户空间接口时,内部内核API处于不断变化之中.
+ 缺乏一个稳定的内部接口是一个深思熟虑的设计决策;它允许在任何时候进行基本的改
+ 进,并产生更高质量的代码。但该策略的一个结果是,如果要使用新的内核,任何树外
+ 代码都需要持续的维护。维护树外代码需要大量的工作才能使代码保持工作状态。
+
+ 相反,位于主线中的代码不需要这样做,因为一个简单的规则要求进行API更改的任何
+ 开发人员也必须修复由于该更改而破坏的任何代码。因此,合并到主线中的代码大大
+ 降低了维护成本。
+
+- 除此之外,内核中的代码通常会被其他开发人员改进。令人惊讶的结果可能来自授权
+ 您的用户社区和客户改进您的产品。
+
+- 内核代码在合并到主线之前和之后都要经过审查。不管原始开发人员的技能有多强,
+ 这个审查过程总是能找到改进代码的方法。审查经常发现严重的错误和安全问题。
+ 这对于在封闭环境中开发的代码尤其如此;这种代码从外部开发人员的审查中获益
+ 匪浅。树外代码是低质量代码。
+
+- 参与开发过程是您影响内核开发方向的方式。旁观者的抱怨会被听到,但是活跃的
+ 开发人员有更强的声音——并且能够实现使内核更好地满足其需求的更改。
+
+- 当单独维护代码时,总是存在第三方为类似功能提供不同实现的可能性。如果发生
+ 这种情况,合并代码将变得更加困难——甚至到了不可能的地步。然后,您将面临以下
+ 令人不快的选择:(1)无限期地维护树外的非标准特性,或(2)放弃代码并将用户
+ 迁移到树内版本。
+
+- 代码的贡献是使整个过程工作的根本。通过贡献代码,您可以向内核添加新功能,并
+ 提供其他内核开发人员使用的功能和示例。如果您已经为Linux开发了代码(或者
+ 正在考虑这样做),那么您显然对这个平台的持续成功感兴趣;贡献代码是确保成功
+ 的最好方法之一。
+
+上述所有理由都适用于任何树外内核代码,包括以专有的、仅二进制形式分发的代码。
+然而,在考虑任何类型的纯二进制内核代码分布之前,还需要考虑其他因素。这些包括:
+
+- 围绕专有内核模块分发的法律问题充其量是模糊的;相当多的内核版权所有者认为,
+ 大多数仅限二进制的模块是内核的派生产品,因此,它们的分发违反了GNU通用公共
+ 许可证(下面将详细介绍)。您的作者不是律师,本文档中的任何内容都不可能被
+ 视为法律建议。封闭源代码模块的真实法律地位只能由法院决定。但不管怎样,困扰
+ 这些模块的不确定性仍然存在。
+
+- 二进制模块大大增加了调试内核问题的难度,以至于大多数内核开发人员甚至都不会
+ 尝试。因此,只分发二进制模块将使您的用户更难从社区获得支持。
+
+- 对于只支持二进制的模块的发行者来说,支持也更加困难,他们必须为他们希望支持
+ 的每个发行版和每个内核版本提供一个版本的模块。为了提供相当全面的覆盖范围,
+ 可能需要一个模块的几十个构建,并且每次升级内核时,您的用户都必须单独升级
+ 您的模块。
+
+- 上面提到的关于代码评审的所有问题都更加存在于封闭源代码。由于该代码根本不可
+ 用,因此社区无法对其进行审查,毫无疑问,它将存在严重问题。
+
+尤其是嵌入式系统的制造商,可能会倾向于忽视本节中所说的大部分内容,因为他们
+相信自己正在商用一种使用冻结内核版本的独立产品,在发布后不需要再进行开发。
+这个论点忽略了广泛的代码审查的价值以及允许用户向产品添加功能的价值。但这些
+产品也有有限的商业寿命,之后必须发布新版本的产品。在这一点上,代码在主线上
+并得到良好维护的供应商将能够更好地占位,以使新产品快速上市。
+
+许可
+----
+
+代码是根据一些许可证提供给Linux内核的,但是所有代码都必须与GNU通用公共许可
+证(GPLV2)的版本2兼容,该版本是覆盖整个内核分发的许可证。在实践中,这意味
+着所有代码贡献都由GPLv2(可选地,语言允许在更高版本的GPL下分发)或3子句BSD
+许可(New BSD License, 译者注)覆盖。任何不包含在兼容许可证中的贡献都不会
+被接受到内核中。
+
+贡献给内核的代码不需要(或请求)版权分配。合并到主线内核中的所有代码都保留
+其原始所有权;因此,内核现在拥有数千个所有者。
+
+这种所有权结构的一个暗示是,任何改变内核许可的尝试都注定会失败。很少有实际
+的场景可以获得所有版权所有者的同意(或者从内核中删除他们的代码)。因此,特
+别是,在可预见的将来,不可能迁移到GPL的版本3。
+
+所有贡献给内核的代码都必须是合法的免费软件。因此,不接受匿名(或匿名)贡献
+者的代码。所有贡献者都需要在他们的代码上“sign off”,声明代码可以在GPL下与内
+核一起分发。无法提供未被其所有者许可为免费软件的代码,或可能为内核造成版权
+相关问题的代码(例如,由缺乏适当保护的反向工程工作派生的代码)不能被接受。
+
+有关版权相关问题的问题在Linux开发邮件列表中很常见。这样的问题通常会得到不少
+答案,但要记住,回答这些问题的人不是律师,不能提供法律咨询。如果您有关于
+Linux源代码的法律问题,那么与了解该领域的律师交流是无法替代的。依靠从技术
+邮件列表中获得的答案是一件冒险的事情。
+
diff --git a/Documentation/translations/zh_CN/process/2.Process.rst b/Documentation/translations/zh_CN/process/2.Process.rst
new file mode 100644
index 000000000000..ceb733bb0294
--- /dev/null
+++ b/Documentation/translations/zh_CN/process/2.Process.rst
@@ -0,0 +1,360 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :ref:`Documentation/process/2.Process.rst <development_process>`
+:Translator: Alex Shi <alex.shi@linux.alibaba.com>
+
+.. _cn_development_process:
+
+开发流程如何工作
+================
+
+90年代早期的Linux内核开发是一件相当松散的事情,涉及的用户和开发人员相对较
+少。由于拥有数以百万计的用户群,并且在一年的时间里有大约2000名开发人员参与
+进来,内核因此必须发展许多流程来保持开发的顺利进行。要成为流程的有效组成
+部分,需要对流程的工作方式有一个扎实的理解。
+
+总览
+----
+
+内核开发人员使用一个松散的基于时间的发布过程,每两到三个月发布一次新的主要
+内核版本。最近的发布历史记录如下:
+
+ ====== =================
+ 4.11 四月 30, 2017
+ 4.12 七月 2, 2017
+ 4.13 九月 3, 2017
+ 4.14 十一月 12, 2017
+ 4.15 一月 28, 2018
+ 4.16 四月 1, 2018
+ ====== =================
+
+每4.x版本都是一个主要的内核版本,具有新特性、内部API更改等等。一个典型的4.x
+版本包含大约13000个变更集,变更了几十万行代码。因此,4.x是Linux内核开发的前
+沿;内核使用滚动开发模型,不断集成重大变化。
+
+对于每个版本的补丁合并,遵循一个相对简单的规则。在每个开发周期的开始,“合并
+窗口”被打开。当时,被认为足够稳定(并且被开发社区接受)的代码被合并到主线内
+核中。在这段时间内,新开发周期的大部分变更(以及所有主要变更)将以接近每天
+1000次变更(“补丁”或“变更集”)的速度合并。
+
+(顺便说一句,值得注意的是,合并窗口期间集成的更改并不是凭空产生的;它们是
+提前收集、测试和分级的。稍后将详细描述该过程的工作方式)。
+
+合并窗口持续大约两周。在这段时间结束时,LinusTorvalds将声明窗口已关闭,并
+释放第一个“rc”内核。例如,对于目标为4.14的内核,在合并窗口结束时发生的释放
+将被称为4.14-rc1。RC1版本是一个信号,表示合并新特性的时间已经过去,稳定下一
+个内核的时间已经开始。
+
+在接下来的6到10周内,只有修复问题的补丁才应该提交给主线。有时会允许更大的
+更改,但这种情况很少发生;试图在合并窗口外合并新功能的开发人员往往会受到不
+友好的接待。一般来说,如果您错过了给定特性的合并窗口,最好的做法是等待下一
+个开发周期。(对于以前不支持的硬件,偶尔会对驱动程序进行例外;如果它们不
+改变已有代码,则不会导致回归,并且应该可以随时安全地添加)。
+
+随着修复程序进入主线,补丁速度将随着时间的推移而变慢。Linus大约每周发布一次
+新的-rc内核;一个正常的系列将在-rc6和-rc9之间,内核被认为足够稳定并最终发布。
+然后,整个过程又重新开始了。
+
+例如,这里是4.16的开发周期进行情况(2018年的所有日期):
+
+ ============== ==============================
+ 一月 28 4.15 稳定版发布
+ 二月 11 4.16-rc1, 合并窗口关闭
+ 二月 18 4.16-rc2
+ 二月 25 4.16-rc3
+ 三月 4 4.16-rc4
+ 三月 11 4.16-rc5
+ 三月 18 4.16-rc6
+ 三月 25 4.16-rc7
+ 四月 1 4.16 稳定版发布
+ ============== ==============================
+
+开发人员如何决定何时结束开发周期并创建稳定的版本?使用的最重要的指标是以前
+版本的回归列表。不欢迎出现任何错误,但是那些破坏了以前能工作的系统的错误被
+认为是特别严重的。因此,导致回归的补丁是不受欢迎的,很可能在稳定期内删除。
+
+开发人员的目标是在稳定发布之前修复所有已知的回归。在现实世界中,这种完美是
+很难实现的;在这种规模的项目中,变量太多了。有一点,延迟最终版本只会使问题
+变得更糟;等待下一个合并窗口的一堆更改将变大,从而在下次创建更多的回归错误。
+因此,大多数4.x内核都有一些已知的回归错误,不过,希望没有一个是严重的。
+
+一旦一个稳定的版本发布,它正在进行的维护工作就被移交给“稳定团队”,目前由
+Greg Kroah-Hartman组成。稳定团队将使用4.x.y编号方案不定期的发布稳定版本的更
+新。要加入更新版本,补丁程序必须(1)修复一个重要的bug,(2)已经合并到
+下一个开发主线中。内核通常会在超过其初始版本的一个以上的开发周期内接收稳定
+的更新。例如,4.13内核的历史如下
+
+ ============== ===============================
+ 九月 3 4.13 稳定版发布
+ 九月 13 4.13.1
+ 九月 20 4.13.2
+ 九月 27 4.13.3
+ 十月 5 4.13.4
+ 十月 12 4.13.5
+ ... ...
+ 十一月 24 4.13.16
+ ============== ===============================
+
+4.13.16是4.13版本的最终稳定更新。
+
+有些内核被指定为“长期”内核;它们将得到更长时间的支持。在本文中,当前的长期
+内核及其维护者是:
+
+ ====== ====================== ==============================
+ 3.16 Ben Hutchings (长期稳定内核)
+ 4.1 Sasha Levin
+ 4.4 Greg Kroah-Hartman (长期稳定内核)
+ 4.9 Greg Kroah-Hartman
+ 4.14 Greg Kroah-Hartman
+ ====== ====================== ==============================
+
+为长期支持选择内核纯粹是维护人员有必要和时间来维护该版本的问题。目前还没有
+为即将发布的任何特定版本提供长期支持的已知计划。
+
+补丁的生命周期
+--------------
+
+补丁不会直接从开发人员的键盘进入主线内核。相反,有一个稍微复杂(如果有些非
+正式)的过程,旨在确保对每个补丁进行质量审查,并确保每个补丁实现了一个在主线
+中需要的更改。对于小的修复,这个过程可能会很快发生,或者,在大的和有争议的
+变更的情况下,会持续数年。许多开发人员的挫折来自于对这个过程缺乏理解或者
+试图绕过它。
+
+为了减少这种挫折感,本文将描述补丁如何进入内核。下面是一个介绍,它以某种
+理想化的方式描述了这个过程。更详细的过程将在后面的章节中介绍。
+
+补丁程序经历的阶段通常是:
+
+- 设计。这就是补丁的真正需求——以及满足这些需求的方式——的所在。设计工作通常
+ 是在不涉及社区的情况下完成的,但是如果可能的话,最好是在公开的情况下完成
+ 这项工作;这样可以节省很多稍后再重新设计的时间。
+
+- 早期评审。补丁被发布到相关的邮件列表中,列表中的开发人员会回复他们可能有
+ 的任何评论。如果一切顺利的话,这个过程应该会发现补丁的任何主要问题。
+
+- 更广泛的评审。当补丁接近准备好纳入主线时,它应该被相关的子系统维护人员
+ 接受——尽管这种接受并不能保证补丁会一直延伸到主线。补丁将出现在维护人员的
+ 子系统树中,并进入 -next 树(如下所述)。当流程工作时,此步骤将导致对补丁
+ 进行更广泛的审查,并发现由于将此补丁与其他人所做的工作集成而导致的任何
+ 问题。
+
+- 请注意,大多数维护人员也有日常工作,因此合并补丁可能不是他们的最高优先级。
+ 如果您的补丁程序得到了关于所需更改的反馈,那么您应该进行这些更改,或者为
+ 不应该进行这些更改的原因辩护。如果您的补丁没有评审意见,但没有被其相应的
+ 子系统或驱动程序维护者接受,那么您应该坚持不懈地将补丁更新到当前内核,使
+ 其干净地应用,并不断地将其发送以供审查和合并。
+
+- 合并到主线。最终,一个成功的补丁将被合并到由LinusTorvalds管理的主线存储库
+ 中。此时可能会出现更多的评论和/或问题;开发人员应对这些问题并解决出现的
+ 任何问题很重要。
+
+- 稳定版发布。可能受补丁影响的用户数量现在很大,因此可能再次出现新的问题。
+
+- 长期维护。虽然开发人员在合并代码后可能会忘记代码,但这种行为往往会给开发
+ 社区留下不良印象。合并代码消除了一些维护负担,因为其他代码将修复由API
+ 更改引起的问题。但是,如果代码要长期保持有用,原始开发人员应该继续为
+ 代码负责。
+
+内核开发人员(或他们的雇主)犯的最大错误之一是试图将流程简化为一个
+“合并到主线”步骤。这种方法总是会让所有相关人员感到沮丧。
+
+补丁如何进入内核
+----------------
+
+只有一个人可以将补丁合并到主线内核存储库中:LinusTorvalds。但是,在进入
+2.6.38内核的9500多个补丁中,只有112个(大约1.3%)是由Linus自己直接选择的。
+内核项目已经发展到一个规模,没有一个开发人员可以在没有支持的情况下检查和
+选择每个补丁。内核开发人员处理这种增长的方式是通过使用围绕信任链构建的
+助理系统。
+
+内核代码库在逻辑上被分解为一组子系统:网络、特定的体系结构支持、内存管理、
+视频设备等。大多数子系统都有一个指定的维护人员,开发人员对该子系统中的代码
+负有全部责任。这些子系统维护者(松散地)是他们所管理的内核部分的守护者;
+他们(通常)会接受一个补丁以包含到主线内核中。
+
+子系统维护人员每个人都使用git源代码管理工具管理自己版本的内核源代码树。Git
+等工具(以及Quilt或Mercurial等相关工具)允许维护人员跟踪补丁列表,包括作者
+信息和其他元数据。在任何给定的时间,维护人员都可以确定他或她的存储库中的哪
+些补丁在主线中找不到。
+
+当合并窗口打开时,顶级维护人员将要求Linus从其存储库中“拉出”他们为合并选择
+的补丁。如果Linus同意,补丁流将流向他的存储库,成为主线内核的一部分。
+Linus对拉操作中接收到的特定补丁的关注程度各不相同。很明显,有时他看起来很
+关注。但是,作为一般规则,Linus相信子系统维护人员不会向上游发送坏补丁。
+
+子系统维护人员反过来也可以从其他维护人员那里获取补丁。例如,网络树是由首先
+在专用于网络设备驱动程序、无线网络等的树中积累的补丁构建的。此存储链可以
+任意长,但很少超过两个或三个链接。由于链中的每个维护者都信任那些管理较低
+级别树的维护者,所以这个过程称为“信任链”。
+
+显然,在这样的系统中,获取内核补丁取决于找到正确的维护者。直接向Linus发送
+补丁通常不是正确的方法。
+
+Next 树
+-------
+
+子系统树链引导补丁流到内核,但它也提出了一个有趣的问题:如果有人想查看为
+下一个合并窗口准备的所有补丁怎么办?开发人员将感兴趣的是,还有什么其他的
+更改有待解决,以查看是否存在需要担心的冲突;例如,更改核心内核函数原型的
+修补程序将与使用该函数旧形式的任何其他修补程序冲突。审查人员和测试人员希望
+在所有这些变更到达主线内核之前,能够访问它们的集成形式中的变更。您可以从所有
+有趣的子系统树中提取更改,但这将是一项大型且容易出错的工作。
+
+答案以-next树的形式出现,在这里子系统树被收集以供测试和审查。Andrew Morton
+维护的这些旧树被称为“-mm”(用于内存管理,这就是它的启动名字)。-mm 树集成了
+一长串子系统树中的补丁;它还包含一些旨在帮助调试的补丁。
+
+除此之外,-mm 还包含大量由Andrew直接选择的补丁。这些补丁可能已经发布在邮件
+列表上,或者它们可能应用于内核中没有指定子系统树的部分。结果,-mm 作为一种
+最后手段的子系统树运行;如果没有其他明显的路径可以让补丁进入主线,那么它很
+可能以-mm 结束。累积在-mm 中的各种补丁最终将被转发到适当的子系统树,或者直接
+发送到Linus。在典型的开发周期中,大约5-10%的补丁通过-mm 进入主线。
+
+当前-mm 补丁可在“mmotm”(-mm of the moment)目录中找到,地址:
+
+ http://www.ozlabs.org/~akpm/mmotm/
+
+然而,使用mmotm树可能是一种令人沮丧的体验;它甚至可能无法编译。
+
+下一个周期补丁合并的主要树是linux-next,由Stephen Rothwell 维护。根据设计
+linux-next 是下一个合并窗口关闭后主线的快照。linux-next树在Linux-kernel 和
+Linux-next 邮件列表中发布,可从以下位置下载:
+
+ http://www.kernel.org/pub/linux/kernel/next/
+
+Linux-next 已经成为内核开发过程中不可或缺的一部分;在一个给定的合并窗口中合并
+的所有补丁都应该在合并窗口打开之前的一段时间内找到进入Linux-next 的方法。
+
+Staging 树
+----------
+
+内核源代码树包含drivers/staging/directory,其中有许多驱动程序或文件系统的
+子目录正在被添加到内核树中。它们然需要更多的工作的时候可以保留在
+driver/staging目录中;一旦完成,就可以将它们移到内核中。这是一种跟踪不符合
+Linux内核编码或质量标准的驱动程序的方法,但人们可能希望使用它们并跟踪开发。
+
+Greg Kroah Hartman 目前负责维护staging 树。仍需要工作的驱动程序将发送给他,
+每个驱动程序在drivers/staging/中都有自己的子目录。除了驱动程序源文件之外,
+目录中还应该有一个TODO文件。todo文件列出了驱动程序需要接受的挂起的工作,
+以及驱动程序的任何补丁都应该抄送的人员列表。当前的规则要求,staging的驱动
+程序必须至少正确编译。
+
+Staging 是一种相对容易的方法,可以让新的驱动程序进入主线,幸运的是,他们会
+引起其他开发人员的注意,并迅速改进。然而,进入staging并不是故事的结尾;
+staging中没有看到常规进展的代码最终将被删除。经销商也倾向于相对不愿意使用
+staging驱动程序。因此,在成为一名合适的主线驱动的路上,staging 充其量只是
+一个停留。
+
+工具
+----
+
+从上面的文本可以看出,内核开发过程在很大程度上依赖于在不同方向上聚集补丁的
+能力。如果没有适当强大的工具,整个系统将无法在任何地方正常工作。关于如何使用
+这些工具的教程远远超出了本文档的范围,但是还是有一些指南的空间。
+
+到目前为止,内核社区使用的主要源代码管理系统是git。Git是在自由软件社区中开发
+的许多分布式版本控制系统之一。它非常适合内核开发,因为它在处理大型存储库和
+大量补丁时性能非常好。它还有一个难以学习和使用的名声,尽管随着时间的推移它
+变得更好了。对于内核开发人员来说,对Git的某种熟悉几乎是一种要求;即使他们不
+将它用于自己的工作,他们也需要Git来跟上其他开发人员(以及主线)正在做的事情。
+
+现在几乎所有的Linux发行版都打包了Git。主页位于:
+
+ http://git-scm.com/
+
+那个页面有指向文档和教程的指针。
+
+在不使用git的内核开发人员中,最流行的选择几乎肯定是mercurial:
+
+ http://www.seleric.com/mercurial/
+
+Mercurial与Git共享许多特性,但它提供了一个界面,许多人觉得它更易于使用。
+
+另一个值得了解的工具是quilt:
+
+ http://savannah.nongnu.org/projects/quilt
+
+Quilt 是一个补丁管理系统,而不是源代码管理系统。它不会随着时间的推移跟踪历史;
+相反,它面向根据不断发展的代码库跟踪一组特定的更改。一些主要的子系统维护人员
+使用Quilt来管理打算向上游移动的补丁。对于某些树的管理(例如-mm),quilt 是
+最好的工具。
+
+邮件列表
+--------
+
+大量的Linux内核开发工作是通过邮件列表完成的。如果不在某个地方加入至少一个列表,
+就很难成为社区中一个功能完备的成员。但是,Linux邮件列表对开发人员来说也是一个
+潜在的危险,他们可能会被一堆电子邮件淹没,违反Linux列表上使用的约定,或者
+两者兼而有之。
+
+大多数内核邮件列表都在vger.kernel.org上运行;主列表位于:
+
+ http://vger.kernel.org/vger-lists.html
+
+不过,也有一些列表托管在别处;其中一些列表位于lists.redhat.com。
+
+当然,内核开发的核心邮件列表是linux-kernel。这个名单是一个令人生畏的地方;
+每天的信息量可以达到500条,噪音很高,谈话技术性很强,参与者并不总是表现出
+高度的礼貌。但是,没有其他地方可以让内核开发社区作为一个整体聚集在一起;
+避免使用此列表的开发人员将错过重要信息。
+
+有一些提示可以帮助在linux-kernel生存:
+
+- 将邮件转移到单独的文件夹,而不是主邮箱。我们必须能够持续地忽略洪流。
+
+- 不要试图跟踪每一次谈话-其他人都不会。重要的是要对感兴趣的主题(尽管请
+ 注意,长时间的对话可以在不更改电子邮件主题行的情况下偏离原始主题)和参与
+ 的人进行筛选。
+
+- 不要挑事。如果有人试图激起愤怒的反应,忽略他们。
+
+- 当响应Linux内核电子邮件(或其他列表上的电子邮件)时,请为所有相关人员保留
+ cc:header。如果没有强有力的理由(如明确的请求),则不应删除收件人。一定要
+ 确保你要回复的人在cc:list中。这个惯例也使你不必在回复邮件时明确要求被抄送。
+
+- 在提出问题之前,搜索列表档案(和整个网络)。有些开发人员可能会对那些显然
+ 没有完成家庭作业的人感到不耐烦。
+
+- 避免贴顶帖(把你的答案放在你要回复的引文上面的做法)。这会让你的回答更难
+ 理解,印象也很差。
+
+- 询问正确的邮件列表。linux-kernel 可能是通用的讨论点,但它不是从所有子系统
+ 中寻找开发人员的最佳场所。
+
+最后一点——找到正确的邮件列表——是开发人员出错的常见地方。在Linux内核上提出与
+网络相关的问题的人几乎肯定会收到一个礼貌的建议,转而在netdev列表上提出,
+因为这是大多数网络开发人员经常出现的列表。还有其他列表可用于scsi、
+video4linux、ide、filesystem等子系统。查找邮件列表的最佳位置是与内核源代码
+一起打包的MAINTAINERS文件。
+
+开始内核开发
+------------
+
+关于如何开始内核开发过程的问题很常见——来自个人和公司。同样常见的是错误,这
+使得关系的开始比必须的更困难。
+
+公司通常希望聘请知名的开发人员来启动开发团队。实际上,这是一种有效的技术。
+但它也往往是昂贵的,而且没有增长经验丰富的内核开发人员储备。考虑到时间的
+投入,可以让内部开发人员加快Linux内核的开发速度。花这个时间可以让雇主拥有
+一批了解内核和公司的开发人员,他们也可以帮助培训其他人。从中期来看,这往往
+是更有利可图的方法。
+
+可以理解的是,单个开发人员往往对起步感到茫然。从一个大型项目开始可能会很
+吓人;人们往往想先用一些较小的东西来测试水域。这是一些开发人员开始创建修补
+拼写错误或轻微编码风格问题的补丁的地方。不幸的是,这样的补丁会产生一定程度
+的噪音,这会分散整个开发社区的注意力,因此,越来越多的人看不起它们。希望向
+社区介绍自己的新开发人员将无法通过这些方式获得他们想要的那种接待。
+
+Andrew Morton 为有抱负的内核开发人员提供了这个建议
+
+::
+
+ 所有内核初学者的No.1项目肯定是“确保内核在所有的机器上,你可以触摸
+ 到的,始终运行良好" 通常这样做的方法是与其他人一起解决问题(这
+ 可能需要坚持!)但这很好——这是内核开发的一部分
+
+(http://lwn.net/articles/283982/)
+
+在没有明显问题需要解决的情况下,建议开发人员查看当前的回归和开放式错误列表.
+解决需要修复的问题没有任何缺点;通过解决这些问题,开发人员将获得处理过程的
+经验,同时与开发社区的其他人建立尊重。
diff --git a/Documentation/translations/zh_CN/process/3.Early-stage.rst b/Documentation/translations/zh_CN/process/3.Early-stage.rst
new file mode 100644
index 000000000000..b8676aec6005
--- /dev/null
+++ b/Documentation/translations/zh_CN/process/3.Early-stage.rst
@@ -0,0 +1,161 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :ref:`Documentation/process/3.Early-stage.rst <development_early_stage>`
+:Translator: Alex Shi <alex.shi@linux.alibaba.com>
+
+.. _cn_development_early_stage:
+
+早期规划
+========
+
+当考虑一个Linux内核开发项目时,很可能会直接跳进去开始编码。然而,与任何重要
+的项目一样,成功的许多基础最好是在第一行代码编写之前就做好了。在早期计划和
+沟通中花费一些时间可以节省更多的时间。
+
+详述问题
+--------
+
+与任何工程项目一样,成功的内核增强从要解决的问题的清晰描述开始。在某些情况
+下,这个步骤很容易:例如,当某个特定硬件需要驱动程序时。不过,在其他方面,
+将实际问题与建议的解决方案混淆是很有诱惑力的,这可能会导致困难。
+
+举个例子:几年前,使用Linux音频的开发人员寻求一种方法来运行应用程序,而不因
+系统延迟过大而导致退出或其他工件。他们得到的解决方案是一个内核模块,旨在连
+接到Linux安全模块(LSM)框架中;这个模块可以配置为允许特定的应用程序访问
+实时调度程序。这个模块被实现并发送到Linux内核邮件列表,在那里它立即遇到问题。
+
+对于音频开发人员来说,这个安全模块足以解决他们当前的问题。但是,对于更广泛的
+内核社区来说,这被视为对LSM框架的滥用(LSM框架并不打算授予他们原本不具备的
+进程特权),并对系统稳定性造成风险。他们首选的解决方案包括短期的通过rlimit
+机制进行实时调度访问,以及长期的减少延迟的工作。
+
+然而,音频社区看不到他们实施的特定解决方案的过去;他们不愿意接受替代方案。
+由此产生的分歧使这些开发人员对整个内核开发过程感到失望;其中一个开发人员返回
+到音频列表并发布了以下内容:
+
+ 有很多非常好的Linux内核开发人员,但他们往往会被一群傲慢的傻瓜所压倒。
+ 试图向这些人传达用户需求是浪费时间。他们太“聪明”了,根本听不到少数人
+ 的话。
+
+(http://lwn.net/articles/131776/)
+
+实际情况不同;与特定模块相比,内核开发人员更关心系统稳定性、长期维护以及找到
+正确的问题解决方案。这个故事的寓意是把重点放在问题上——而不是具体的解决方案
+上——并在投入创建代码之前与开发社区讨论这个问题。
+
+因此,在考虑一个内核开发项目时,我们应该得到一组简短问题的答案:
+
+ - 究竟需要解决的问题是什么?
+
+ - 受此问题影响的用户是谁?解决方案应该解决哪些用例?
+
+ - 内核现在为何没能解决这个问题?
+
+只有这样,才能开始考虑可能的解决方案。
+
+
+早期讨论
+--------
+
+在计划内核开发项目时,在开始实施之前与社区进行讨论是很有意义的。早期沟通可以
+通过多种方式节省时间和麻烦:
+
+ - 很可能问题是由内核以您不理解的方式解决的。Linux内核很大,具有许多不明显
+ 的特性和功能。并不是所有的内核功能都像人们所希望的那样有文档记录,而且很
+ 容易遗漏一些东西。你的作者发出了一个完整的驱动程序,复制了一个新作者不
+ 知道的现有驱动程序。重新设计现有轮子的代码不仅浪费,而且不会被接受到主线
+ 内核中。
+
+ - 建议的解决方案中可能有一些元素不适用于主线合并。在编写代码之前,最好先
+ 了解这样的问题。
+
+ - 其他开发人员完全有可能考虑过这个问题;他们可能有更好的解决方案的想法,并且
+ 可能愿意帮助创建这个解决方案。
+
+在内核开发社区的多年经验给了我们一个明确的教训:闭门设计和开发的内核代码总是
+有一些问题,这些问题只有在代码发布到社区中时才会被发现。有时这些问题很严重,
+需要数月或数年的努力才能使代码达到内核社区的标准。一些例子包括:
+
+ - 设计并实现了单处理器系统的DeviceScape网络栈。只有使其适合于多处理器系统,
+ 才能将其合并到主线中。在代码中改装锁等等是一项困难的任务;因此,这段代码
+ (现在称为mac80211)的合并被推迟了一年多。
+
+ - Reiser4文件系统包含许多功能,核心内核开发人员认为这些功能应该在虚拟文件
+ 系统层中实现。它还包括一些特性,这些特性在不将系统暴露于用户引起的死锁的
+ 情况下是不容易实现的。这些问题的最新发现——以及对其中一些问题的拒绝——已经
+ 导致Reiser4远离了主线内核。
+
+ - Apparmor安全模块以被认为不安全和不可靠的方式使用内部虚拟文件系统数据结构。
+ 这种担心(包括其他)使Apparmor多年不在主线上。
+
+在每一种情况下,通过与内核开发人员的早期讨论,可以避免大量的痛苦和额外的工作。
+
+找谁交流
+--------
+
+当开发人员决定公开他们的计划时,下一个问题是:我们从哪里开始?答案是找到正确
+的邮件列表和正确的维护者。对于邮件列表,最好的方法是在维护者(MAINTAINERS)文件
+中查找要发布的相关位置。如果有一个合适的子系统列表,那么发布它通常比在Linux
+内核上发布更可取;您更有可能接触到在相关子系统中具有专业知识的开发人员,并且
+环境可能具支持性。
+
+找到维护人员可能会有点困难。同样,维护者文件是开始的地方。但是,该文件往往不总
+是最新的,并且并非所有子系统都在那里表示。实际上,维护者文件中列出的人员可能
+不是当前实际担任该角色的人员。因此,当对联系谁有疑问时,一个有用的技巧是使用
+git(尤其是“git-log”)查看感兴趣的子系统中当前活动的用户。看看谁在写补丁,
+如果有人的话,谁会在这些补丁上加上用线签名的。这些人将是帮助新开发项目的最佳
+人选。
+
+找到合适的维护者的任务有时是非常具有挑战性的,以至于内核开发人员添加了一个
+脚本来简化过程:
+
+::
+
+ .../scripts/get_maintainer.pl
+
+当给定“-f”选项时,此脚本将返回给定文件或目录的当前维护者。如果在命令行上传递
+了一个补丁,它将列出可能接收补丁副本的维护人员。有许多选项可以调节
+get_maintainer.pl搜索维护者的难易程度;请小心使用更具攻击性的选项,因为最终
+可能会包括对您正在修改的代码没有真正兴趣的开发人员。
+
+如果所有其他方法都失败了,那么与Andrew Morton交谈可以成为一种有效的方法来跟踪
+特定代码段的维护人员。
+
+何时邮寄?
+----------
+
+如果可能的话,在早期阶段发布你的计划只会有帮助。描述正在解决的问题以及已经
+制定的关于如何实施的任何计划。您可以提供的任何信息都可以帮助开发社区为项目
+提供有用的输入。
+
+在这个阶段可能发生的一件令人沮丧的事情不是敌对的反应,而是很少或根本没有
+反应。可悲的事实是:(1)内核开发人员往往很忙;(2)不缺少有宏伟计划和很少
+代码(甚至代码前景)支持他们的人;(3)没有人有义务审查或评论别人发表的
+想法。除此之外,高级设计常常隐藏一些问题,这些问题只有在有人真正尝试实现
+这些设计时才会被发现;因此,内核开发人员宁愿看到代码。
+
+如果发表评论的请求在评论的方式上没有什么效果,不要假设这意味着对项目没有
+兴趣。不幸的是,你也不能假设你的想法没有问题。在这种情况下,最好的做法是
+继续进行,把你的进展随时通知社区。
+
+获得官方认可
+-----------------------
+
+如果您的工作是在公司环境中完成的,就像大多数Linux内核工作一样,显然,在您将
+公司的计划或代码发布到公共邮件列表之前,必须获得适当授权的经理的许可。发布
+不确定是否兼容GPL的代码可能是有特别问题的;公司的管理层和法律人员越早能够就
+发布内核开发项目达成一致,对参与的每个人都越好。
+
+一些读者可能会认为他们的核心工作是为了支持还没有正式承认存在的产品。将雇主
+的计划公布在公共邮件列表上可能不是一个可行的选择。在这种情况下,有必要考虑
+保密是否真的是必要的;通常不需要把开发计划关在门内。
+
+也就是说,有些情况下,一家公司在开发过程的早期就不能合法地披露其计划。拥有
+经验丰富的内核开发人员的公司可以选择以开环的方式进行,前提是他们以后能够避免
+严重的集成问题。对于没有这种内部专业知识的公司,最好的选择往往是聘请外部
+开发商根据保密协议审查计划。Linux基金会运行了一个NDA程序,旨在帮助解决这种
+情况;
+
+ http://www.linuxfoundation.org/en/NDA_program
+
+这种审查通常足以避免以后出现严重问题,而无需公开披露项目。
diff --git a/Documentation/translations/zh_CN/process/4.Coding.rst b/Documentation/translations/zh_CN/process/4.Coding.rst
new file mode 100644
index 000000000000..5301e9d55255
--- /dev/null
+++ b/Documentation/translations/zh_CN/process/4.Coding.rst
@@ -0,0 +1,290 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :ref:`Documentation/process/4.Coding.rst <development_coding>`
+:Translator: Alex Shi <alex.shi@linux.alibaba.com>
+
+.. _cn_development_coding:
+
+使代码正确
+======================
+
+虽然对于一个坚实的、面向社区的设计过程有很多话要说,但是任何内核开发项目的
+证明都在生成的代码中。它是将由其他开发人员检查并合并(或不合并)到主线树中
+的代码。所以这段代码的质量决定了项目的最终成功。
+
+本节将检查编码过程。我们将从内核开发人员出错的几种方式开始。然后重点将转移
+到正确的事情和可以帮助这个任务的工具上。
+
+陷阱
+----
+
+编码风格
+********
+
+内核长期以来都有一种标准的编码风格,如
+:ref:`Documentation/translations/zh_CN/process/coding-style.rst <cn_codingstyle>`
+中所述。在大部分时间里,该文件中描述的政策被认为至多是建议性的。因此,内核
+中存在大量不符合编码风格准则的代码。代码的存在会给内核开发人员带来两个独立
+的危害。
+
+首先,要相信内核编码标准并不重要,也不强制执行。事实上,如果没有按照标准对代
+码进行编码,那么向内核添加新代码是非常困难的;许多开发人员甚至会在审查代码之
+前要求对代码进行重新格式化。一个与内核一样大的代码库需要一些统一的代码,以使
+开发人员能够快速理解其中的任何部分。所以已经没有空间来存放奇怪的格式化代码了。
+
+偶尔,内核的编码风格会与雇主的强制风格发生冲突。在这种情况下,内核的风格必须
+在代码合并之前获胜。将代码放入内核意味着以多种方式放弃一定程度的控制权——包括
+控制代码的格式化方式。
+
+另一个陷阱是假定已经在内核中的代码迫切需要编码样式的修复。开发人员可能会开始
+生成重新格式化补丁,作为熟悉过程的一种方式,或者作为将其名称写入内核变更日志
+的一种方式,或者两者兼而有之。但是纯编码风格的修复被开发社区视为噪音;它们往
+往受到冷遇。因此,最好避免使用这种类型的补丁。由于其他原因,在处理一段代码的
+同时修复它的样式是很自然的,但是编码样式的更改不应该仅为了更改而进行。
+
+编码风格的文档也不应该被视为绝对的法律,这是永远不会被违反的。如果有一个很好
+的理由反对这种样式(例如,如果拆分为适合80列限制的行,那么它的可读性就会大大
+降低),那么就这样做。
+
+请注意,您还可以使用 ``clang-format`` 工具来帮助您处理这些规则,自动重新格式
+化部分代码,并查看完整的文件,以发现编码样式错误、拼写错误和可能的改进。它还
+可以方便地进行排序,包括对齐变量/宏、回流文本和其他类似任务。有关详细信息,请
+参阅文件 :ref:`Documentation/process/clang-format.rst <clangformat>`
+
+抽象层
+******
+
+计算机科学教授教学生以灵活性和信息隐藏的名义广泛使用抽象层。当然,内核广泛
+地使用了抽象;任何涉及数百万行代码的项目都不能做到这一点并存活下来。但经验
+表明,过度或过早的抽象可能和过早的优化一样有害。抽象应用于所需的级别,
+不要过度。
+
+在一个简单的级别上,考虑一个函数的参数,该参数总是由所有调用方作为零传递。
+我们可以保留这个论点: 以防有人最终需要使用它提供的额外灵活性。不过,到那时,
+实现这个额外参数的代码很有可能以某种从未被注意到的微妙方式被破坏——因为它从
+未被使用过。或者,当需要额外的灵活性时,它不会以符合程序员早期期望的方式来
+这样做。内核开发人员通常会提交补丁来删除未使用的参数;一般来说,首先不应该
+添加这些参数。
+
+隐藏硬件访问的抽象层——通常允许大量的驱动程序在多个操作系统中使用——尤其不受
+欢迎。这样的层使代码变得模糊,可能会造成性能损失;它们不属于Linux内核。
+
+另一方面,如果您发现自己从另一个内核子系统复制了大量的代码,那么现在是时候
+问一下,事实上,将这些代码中的一些提取到单独的库中,或者在更高的层次上实现
+这些功能是否有意义。在整个内核中复制相同的代码没有价值。
+
+#ifdef 和预处理
+***************
+
+C预处理器似乎给一些C程序员带来了强大的诱惑,他们认为它是一种有效地将大量灵
+活性编码到源文件中的方法。但是预处理器不是C,大量使用它会导致代码对其他人来
+说更难读取,对编译器来说更难检查正确性。大量的预处理器几乎总是代码需要一些
+清理工作的标志。
+
+使用ifdef的条件编译实际上是一个强大的功能,它在内核中使用。但是很少有人希望
+看到代码被大量地撒上ifdef块。作为一般规则,ifdef的使用应尽可能限制在头文件
+中。有条件编译的代码可以限制函数,如果代码不存在,这些函数就会变成空的。然后
+编译器将悄悄地优化对空函数的调用。结果是代码更加清晰,更容易理解。
+
+C预处理器宏存在许多危险,包括可能对具有副作用且没有类型安全性的表达式进行多
+重评估。如果您试图定义宏,请考虑创建一个内联函数。结果相同的代码,但是内联
+函数更容易读取,不会多次计算其参数,并且允许编译器对参数和返回值执行类型检查。
+
+内联函数
+********
+
+不过,内联函数本身也存在风险。程序员可以倾心于避免函数调用和用内联函数填充源
+文件所固有的效率。然而,这些功能实际上会降低性能。因为它们的代码在每个调用站
+点都被复制,所以它们最终会增加编译内核的大小。反过来,这会对处理器的内存缓存
+造成压力,从而大大降低执行速度。通常,内联函数应该非常小,而且相对较少。毕竟,
+函数调用的成本并不高;大量内联函数的创建是过早优化的典型例子。
+
+一般来说,内核程序员会忽略缓存效果,这会带来危险。在开始的数据结构课程中,经
+典的时间/空间权衡通常不适用于当代硬件。空间就是时间,因为一个大的程序比一个
+更紧凑的程序运行得慢。
+
+最近的编译器在决定一个给定函数是否应该被内联方面扮演着越来越积极的角色。
+因此,“inline”关键字的自由放置可能不仅仅是过度的,它也可能是无关的。
+
+锁
+**
+
+2006年5月,“deviceescape”网络堆栈在GPL下发布,并被纳入主线内核。这是一个受
+欢迎的消息;对Linux中无线网络的支持充其量被认为是不合格的,而deviceescape
+堆栈提供了修复这种情况的承诺。然而,直到2007年6月(2.6.22),这段代码才真
+正进入主线。发生了什么?
+
+这段代码显示了许多闭门造车的迹象。但一个特别大的问题是,它并不是设计用于多
+处理器系统。在合并这个网络堆栈(现在称为mac80211)之前,需要对其进行一个锁
+方案的改造。
+
+曾经,Linux内核代码可以在不考虑多处理器系统所带来的并发性问题的情况下进行
+开发。然而,现在,这个文件是写在双核笔记本电脑上的。即使在单处理器系统上,
+为提高响应能力所做的工作也会提高内核内的并发性水平。编写内核代码而不考虑锁
+的日子已经过去很长了。
+
+可以由多个线程并发访问的任何资源(数据结构、硬件寄存器等)必须由锁保护。新
+的代码应该记住这一要求;事后改装锁是一项相当困难的任务。内核开发人员应该花
+时间充分了解可用的锁原语,以便为作业选择正确的工具。显示对并发性缺乏关注的
+代码进入主线将很困难。
+
+回归
+****
+
+最后一个值得一提的危险是:它可能会引起改变(这可能会带来很大的改进),从而
+导致现有用户的某些东西中断。这种变化被称为“回归”,回归已经成为主线内核最不
+受欢迎的。除少数例外情况外,如果回归不能及时修正,会导致回归的变化将被取消。
+最好首先避免回归。
+
+人们常常争论,如果回归让更多人可以工作,远超过产生问题,那么回归是合理的。
+如果它破坏的一个系统却为十个系统带来新的功能,为什么不进行更改呢?2007年7月,
+Linus对这个问题给出了最佳答案:
+
+::
+ 所以我们不会通过引入新问题来修复错误。那样的谎言很疯狂,没有人知道
+ 你是否真的有进展。是前进两步,后退一步,还是向前一步,向后两步?
+
+(http://lwn.net/articles/243460/)
+
+一种特别不受欢迎的回归类型是用户空间ABI的任何变化。一旦接口被导出到用户空间,
+就必须无限期地支持它。这一事实使得用户空间接口的创建特别具有挑战性:因为它们
+不能以不兼容的方式进行更改,所以必须第一次正确地进行更改。因此,用户空间界面
+总是需要大量的思考、清晰的文档和广泛的审查。
+
+
+代码检查工具
+------------
+
+至少目前,编写无错误代码仍然是我们中很少人能达到的理想状态。不过,我们希望做
+的是,在代码进入主线内核之前,尽可能多地捕获并修复这些错误。为此,内核开发人
+员已经组装了一系列令人印象深刻的工具,可以自动捕获各种各样的模糊问题。计算机
+发现的任何问题都是一个以后不会困扰用户的问题,因此,只要有可能,就应该使用
+自动化工具。
+
+第一步只是注意编译器产生的警告。当代版本的GCC可以检测(并警告)大量潜在错误。
+通常,这些警告都指向真正的问题。提交以供审阅的代码通常不会产生任何编译器警告。
+在消除警告时,注意了解真正的原因,并尽量避免“修复”,使警告消失而不解决其原因。
+
+请注意,并非所有编译器警告都默认启用。使用“make EXTRA_CFLAGS=-W”构建内核以
+获得完整集合。
+
+内核提供了几个配置选项,可以打开调试功能;大多数配置选项位于“kernel hacking”
+子菜单中。对于任何用于开发或测试目的的内核,都应该启用其中几个选项。特别是,
+您应该打开:
+
+ - 启用 ENABLE_MUST_CHECK and FRAME_WARN 以获得一组额外的警告,以解决使用不
+ 推荐使用的接口或忽略函数的重要返回值等问题。这些警告生成的输出可能是冗长
+ 的,但您不必担心来自内核其他部分的警告。
+
+ - DEBUG_OBJECTS 将添加代码,以跟踪内核创建的各种对象的生存期,并在出现问题时
+ 发出警告。如果要添加创建(和导出)自己的复杂对象的子系统,请考虑添加对对象
+ 调试基础结构的支持。
+
+ - DEBUG_SLAB 可以发现各种内存分配和使用错误;它应该用于大多数开发内核。
+
+ - DEBUG_SPINLOCK, DEBUG_ATOMIC_SLEEP and DEBUG_MUTEXES 会发现许多常见的
+ 锁定错误.
+
+还有很多其他调试选项,其中一些将在下面讨论。其中一些具有显著的性能影响,不应
+一直使用。但是,在学习可用选项上花费的一些时间可能会在短期内得到多次回报。
+
+其中一个较重的调试工具是锁定检查器或“lockdep”。该工具将跟踪系统中每个锁
+(spinlock或mutex)的获取和释放、获取锁的相对顺序、当前中断环境等等。然后,
+它可以确保总是以相同的顺序获取锁,相同的中断假设适用于所有情况,等等。换句话
+说,lockdep可以找到许多场景,在这些场景中,系统很少会死锁。在部署的系统中,
+这种问题可能会很痛苦(对于开发人员和用户而言);LockDep允许提前以自动方式
+发现问题。具有任何类型的非普通锁定的代码在提交包含前应在启用lockdep的情况
+下运行。
+
+作为一个勤奋的内核程序员,毫无疑问,您将检查任何可能失败的操作(如内存分配)
+的返回状态。然而,事实上,最终的故障恢复路径可能完全没有经过测试。未测试的
+代码往往会被破坏;如果所有这些错误处理路径都被执行了几次,那么您可能对代码
+更有信心。
+
+内核提供了一个可以做到这一点的错误注入框架,特别是在涉及内存分配的情况下。
+启用故障注入后,内存分配的可配置百分比将失败;这些失败可以限制在特定的代码
+范围内。在启用了故障注入的情况下运行,程序员可以看到当情况恶化时代码如何响
+应。有关如何使用此工具的详细信息,请参阅
+Documentation/fault-injection/fault-injection.txt。
+
+使用“sparse”静态分析工具可以发现其他类型的错误。对于sparse,可以警告程序员
+用户空间和内核空间地址之间的混淆、big endian和small endian数量的混合、在需
+要一组位标志的地方传递整数值等等。sparse必须单独安装(如果您的分发服务器没
+有将其打包,可以在 https://sparse.wiki.kernel.org/index.php/Main_page)找到,
+然后可以通过在make命令中添加“C=1”在代码上运行它。
+
+“Coccinelle”工具 :ref:`http://coccinelle.lip6.fr/ <devtools_coccinelle>`
+能够发现各种潜在的编码问题;它还可以为这些问题提出修复方案。在
+scripts/coccinelle目录下已经打包了相当多的内核“语义补丁”;运行
+“make coccicheck”将运行这些语义补丁并报告发现的任何问题。有关详细信息,请参阅
+:ref:`Documentation/dev-tools/coccinelle.rst <devtools_coccinelle>`
+
+
+其他类型的可移植性错误最好通过为其他体系结构编译代码来发现。如果没有S/390系统
+或Blackfin开发板,您仍然可以执行编译步骤。可以在以下位置找到一组用于x86系统的
+大型交叉编译器:
+
+ http://www.kernel.org/pub/tools/crosstool/
+
+花一些时间安装和使用这些编译器将有助于避免以后的尴尬。
+
+文档
+----
+
+文档通常比内核开发规则更为例外。即便如此,足够的文档将有助于简化将新代码合并
+到内核中的过程,使其他开发人员的生活更轻松,并对您的用户有所帮助。在许多情况
+下,文件的添加已基本上成为强制性的。
+
+任何补丁的第一个文档是其关联的变更日志。日志条目应该描述正在解决的问题、解决
+方案的形式、处理补丁的人员、对性能的任何相关影响,以及理解补丁可能需要的任何
+其他内容。确保changelog说明了为什么补丁值得应用;大量开发人员未能提供这些信息。
+
+任何添加新用户空间界面的代码(包括新的sysfs或/proc文件)都应该包含该界面的
+文档,该文档使用户空间开发人员能够知道他们在使用什么。请参阅
+Documentation/abi/readme,了解如何格式化此文档以及需要提供哪些信息。
+
+文件 :ref:`Documentation/admin-guide/kernel-parameters.rst <kernelparameters>`
+描述了内核的所有引导时间参数。任何添加新参数的补丁都应该向该文件添加适当的
+条目。
+
+任何新的配置选项都必须附有帮助文本,帮助文本清楚地解释了这些选项以及用户可能
+希望何时选择它们。
+
+许多子系统的内部API信息通过专门格式化的注释进行记录;这些注释可以通过
+“kernel-doc”脚本以多种方式提取和格式化。如果您在具有kerneldoc注释的子系统中
+工作,则应该维护它们,并根据需要为外部可用的功能添加它们。即使在没有如此记录
+的领域中,为将来添加kerneldoc注释也没有坏处;实际上,这对于刚开始开发内核的人
+来说是一个有用的活动。这些注释的格式以及如何创建kerneldoc模板的一些信息可以在
+:ref:`Documentation/doc-guide/ <doc_guide>` 上找到。
+
+任何阅读大量现有内核代码的人都会注意到,注释的缺失往往是最值得注意的。再一次,
+对新代码的期望比过去更高;合并未注释的代码将更加困难。这就是说,人们几乎不希望
+用语言注释代码。代码本身应该是可读的,注释解释了更微妙的方面。
+
+某些事情应该总是被注释。使用内存屏障时,应附上一行文字,解释为什么需要设置内存
+屏障。数据结构的锁定规则通常需要在某个地方解释。一般来说,主要数据结构需要全面
+的文档。应该指出单独代码位之间不明显的依赖性。任何可能诱使代码看门人进行错误的
+“清理”的事情都需要一个注释来说明为什么要这样做。等等。
+
+
+内部API更改
+-----------
+
+内核提供给用户空间的二进制接口不能被破坏,除非在最严重的情况下。相反,内核的
+内部编程接口是高度流动的,当需要时可以更改。如果你发现自己不得不处理一个内核
+API,或者仅仅因为它不满足你的需求而不使用特定的功能,这可能是API需要改变的一
+个标志。作为内核开发人员,您有权进行此类更改。
+
+当然, 可以进行API更改,但它们必须是合理的。因此,任何进行内部API更改的补丁都
+应该附带一个关于更改内容和必要原因的描述。这种变化也应该分解成一个单独的补丁,
+而不是埋在一个更大的补丁中。
+
+另一个要点是,更改内部API的开发人员通常要负责修复内核树中被更改破坏的任何代码。
+对于一个广泛使用的函数,这个职责可以导致成百上千的变化,其中许多变化可能与其他
+开发人员正在做的工作相冲突。不用说,这可能是一项大工作,所以最好确保理由是
+可靠的。请注意,coccinelle工具可以帮助进行广泛的API更改。
+
+在进行不兼容的API更改时,应尽可能确保编译器捕获未更新的代码。这将帮助您确保找
+到该接口的树内用处。它还将警告开发人员树外代码存在他们需要响应的更改。支持树外
+代码不是内核开发人员需要担心的事情,但是我们也不必使树外开发人员的生活有不必要
+的困难。
diff --git a/Documentation/translations/zh_CN/process/5.Posting.rst b/Documentation/translations/zh_CN/process/5.Posting.rst
new file mode 100644
index 000000000000..41aba21ff050
--- /dev/null
+++ b/Documentation/translations/zh_CN/process/5.Posting.rst
@@ -0,0 +1,240 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :ref:`Documentation/process/5.Posting.rst <development_posting>`
+:Translator: Alex Shi <alex.shi@linux.alibaba.com>
+
+.. _cn_development_posting:
+
+发送补丁
+========
+
+迟早,当您的工作准备好提交给社区进行审查,并最终包含到主线内核中时。不出所料,
+内核开发社区已经发展出一套用于发布补丁的约定和过程;遵循这些约定和过程将使
+参与其中的每个人的生活更加轻松。本文件将试图合理详细地涵盖这些期望;更多信息
+也可在以下文件中找到
+:ref:`Documentation/translations/zh_CN/process/submitting-patches.rst <cn_submittingpatches>`,
+:ref:`Documentation/process/submitting-drivers.rst <submittingdrivers>`
+和 :ref:`Documentation/translations/zh_CN/process/submit-checklist.rst <cn_submitchecklist>`.
+
+何时邮寄
+--------
+
+在补丁完全“准备好”之前,有一个不断的诱惑来避免发布补丁。对于简单的补丁,
+这不是问题。但是,如果正在完成的工作很复杂,那么在工作完成之前从社区获得
+反馈就可以获得很多好处。因此,您应该考虑发布正在进行的工作,甚至使Git树
+可用,以便感兴趣的开发人员可以随时赶上您的工作。
+
+当发布还没有准备好包含的代码时,最好在发布本身中这样说。还应提及任何有待完成
+的主要工作和任何已知问题。很少有人会看到那些被认为是半生不熟的补丁,但是那些
+人会想到他们可以帮助你把工作推向正确的方向。
+
+创建补丁之前
+------------
+
+在考虑将补丁发送到开发社区之前,有许多事情应该做。这些包括:
+
+ - 尽可能地测试代码。利用内核的调试工具,确保内核使用所有合理的配置选项组合
+ 进行构建,使用跨编译器为不同的体系结构进行构建等。
+
+ - 确保您的代码符合内核编码风格指南。
+
+ - 您的更改是否具有性能影响?如果是这样,您应该运行基准测试来显示您的变更的
+ 影响(或好处);结果的摘要应该包含在补丁中。
+
+ - 确保您有权发布代码。如果这项工作是为雇主完成的,雇主对这项工作具有所有权,
+ 并且必须同意根据GPL对其进行放行。
+
+一般来说,在发布代码之前进行一些额外的思考,几乎总是能在短时间内得到回报。
+
+补丁准备
+--------
+
+准备发布补丁可能是一个惊人的工作量,但再次尝试节省时间在这里通常是不明智的,
+即使在短期内。
+
+必须针对内核的特定版本准备补丁。作为一般规则,补丁程序应该基于Linus的Git树中
+的当前主线。当以主线为基础时,从一个众所周知的发布点开始——一个稳定的或RC的
+发布——而不是在一个主线分支任意点。
+
+但是,可能需要针对-mm、linux-next或子系统树生成版本,以便于更广泛的测试和审查。
+根据补丁的区域以及其他地方的情况,针对这些其他树建立补丁可能需要大量的工作来
+解决冲突和处理API更改。
+
+只有最简单的更改才应格式化为单个补丁;其他所有更改都应作为一系列逻辑更改进行。
+分割补丁是一门艺术;一些开发人员花了很长时间来弄清楚如何按照社区期望的方式来
+做。然而,有一些经验法则可以大大帮助:
+
+ - 您发布的补丁程序系列几乎肯定不会是工作系统中的一系列更改。相反,您所做的
+ 更改需要在最终形式中加以考虑,然后以有意义的方式进行拆分。开发人员对离散的、
+ 自包含的更改感兴趣,而不是您获取这些更改的路径。
+
+ - 每个逻辑上独立的变更都应该格式化为单独的补丁。这些更改可以是小的(“向此
+ 结构添加字段”)或大的(例如,添加一个重要的新驱动程序),但它们在概念上
+ 应该是小的,并且可以接受一行描述。每个补丁都应该做一个特定的更改,可以单独
+ 检查并验证它所做的事情。
+
+ - 作为重申上述准则的一种方法:不要在同一补丁中混合不同类型的更改。如果一个
+ 补丁修复了一个关键的安全漏洞,重新排列了一些结构,并重新格式化了代码,那么
+ 很有可能它会被忽略,而重要的修复将丢失。
+
+ - 每个补丁都应该产生一个内核,它可以正确地构建和运行;如果补丁系列在中间被
+ 中断,那么结果应该仍然是一个工作的内核。补丁系列的部分应用是使用
+ “git bisct”工具查找回归的一个常见场景;如果结果是一个损坏的内核,那么对于
+ 那些从事追踪问题的高尚工作的开发人员和用户来说,将使他们的生活更加艰难。
+
+ - 不过,不要过分。一位开发人员曾经将一组编辑内容作为500个单独的补丁发布到一个
+ 文件中,这并没有使他成为内核邮件列表中最受欢迎的人。一个补丁可以相当大,
+ 只要它仍然包含一个单一的逻辑变更。
+
+ - 用一系列补丁添加一个全新的基础设施是很有诱惑力的,但是在系列中的最后一个
+ 补丁启用整个补丁之前,该基础设施是不使用的。如果可能的话,应该避免这种
+ 诱惑;如果这个系列增加了回归,那么二分法将指出最后一个补丁是导致问题的
+ 补丁,即使真正的bug在其他地方。只要有可能,添加新代码的补丁程序应该立即
+ 激活该代码。
+
+创建完美补丁系列的工作可能是一个令人沮丧的过程,在完成“真正的工作”之后需要花费
+大量的时间和思考。但是,如果做得好,这是一段很好的时间。
+
+补丁格式和更改日志
+------------------
+
+所以现在你有了一系列完美的补丁可以发布,但是这项工作还没有完成。每个补丁都
+需要被格式化成一条消息,它可以快速而清晰地将其目的传达给世界其他地方。为此,
+每个补丁将由以下部分组成:
+
+ - 命名补丁作者的可选“from”行。只有当你通过电子邮件传递别人的补丁时,这一行
+ 才是必要的,但是如果有疑问,添加它不会有任何伤害。
+
+ - 一行描述补丁的作用。对于没有其他上下文的读者来说,此消息应该足够了解补丁
+ 的范围;这是将在“短格式”变更日志中显示的行。此消息通常首先用相关的子系统
+ 名称格式化,然后是补丁的目的。例如:
+
+ ::
+
+ gpio: fix build on CONFIG_GPIO_SYSFS=n
+
+ - 一个空白行,后面是补丁内容的详细描述。这个描述可以是必需的;它应该说明补丁
+ 的作用以及为什么它应该应用于内核。
+
+ - 一个或多个标记行,至少有一个由补丁作者的:signed-off-by 签名。签名将在下面
+ 更详细地描述。
+
+上面的项目一起构成补丁的变更日志。写一篇好的变更日志是一门至关重要但常常被
+忽视的艺术;值得花一点时间来讨论这个问题。当你写一个变更日志时,你应该记住
+有很多不同的人会读你的话。其中包括子系统维护人员和审查人员,他们需要决定是否
+应该包括补丁,分销商和其他维护人员试图决定是否应该将补丁反向移植到其他内核,
+bug搜寻人员想知道补丁是否负责他们正在追查的问题,想知道内核如何变化的用户。
+等等。一个好的变更日志以最直接和最简洁的方式向所有这些人传达所需的信息。
+
+为此,总结行应该描述变更的影响和动机,以及在一行约束条件下可能发生的变化。
+然后,详细的描述可以详述这些主题,并提供任何需要的附加信息。如果补丁修复了
+一个bug,请引用引入该bug的commit(如果可能,请在引用commits时同时提供commit id
+和标题)。如果某个问题与特定的日志或编译器输出相关联,请包含该输出以帮助其他
+人搜索同一问题的解决方案。如果更改是为了支持以后补丁中的其他更改,那么就这么
+说。如果更改了内部API,请详细说明这些更改以及其他开发人员应该如何响应。一般
+来说,你越能把自己放在每个阅读你的changelog的人的位置上,changelog(和内核
+作为一个整体)就越好。
+
+不用说,变更日志应该是将变更提交到修订控制系统时使用的文本。接下来是:
+
+ - 补丁本身,采用统一的(“-u”)补丁格式。将“-p”选项用于diff将使函数名与更改
+ 相关联,从而使结果补丁更容易被其他人读取。
+
+您应该避免在补丁中包括对不相关文件(例如,由构建过程生成的文件或编辑器
+备份文件)的更改。文档目录中的文件“dontdiff”在这方面有帮助;使用“-X”选项将
+其传递给diff。
+
+上面提到的标签用于描述各种开发人员如何与这个补丁的开发相关联。
+:ref:`Documentation/translations/zh_CN/process/submitting-patches.rst <cn_submittingpatches>`
+文档中对它们进行了详细描述;下面是一个简短的总结。每一行的格式如下:
+
+::
+
+ tag: Full Name <email address> optional-other-stuff
+
+常用的标签有:
+
+ - Signed-off-by: 这是一个开发人员的证明,他或她有权提交补丁以包含到内核中。
+ 这是开发来源认证协议,其全文可在
+ :ref:`Documentation/translations/zh_CN/process/submitting-patches.rst <cn_submittingpatches>`
+ 中找到,如果没有适当的签字,则不能合并到主线中。
+
+ - Co-developed-by: 声明补丁是由多个开发人员共同创建的;当几个人在一个补丁上
+ 工作时,它用于将属性赋予共同作者(除了 From: 所赋予的作者之外)。因为
+ Co-developed-by: 表示作者身份,所以每个共同开发人, 必须紧跟在相关合作作者
+ 的签名之后。具体内容和示例可以在以下文件中找到
+ :ref:`Documentation/translations/zh_CN/process/submitting-patches.rst <cn_submittingpatches>`
+
+ - Acked-by: 表示另一个开发人员(通常是相关代码的维护人员)同意补丁适合包含
+ 在内核中。
+
+ - Tested-by: 声明指定的人已经测试了补丁并发现它可以工作。
+
+ - Reviewed-by: 指定的开发人员已经审查了补丁的正确性;有关详细信息,请参阅
+ :ref:`Documentation/translations/zh_CN/process/submitting-patches.rst <cn_submittingpatches>`
+
+ - Reported-by: 指定报告此补丁修复的问题的用户;此标记用于提供感谢。
+
+ - Cc:指定的人收到了补丁的副本,并有机会对此发表评论。
+
+在补丁中添加标签时要小心:只有cc:才适合在没有指定人员明确许可的情况下添加。
+
+发送补丁
+--------
+
+在邮寄补丁之前,您还需要注意以下几点:
+
+ - 您确定您的邮件发送程序不会损坏补丁吗?有免费的空白更改或由邮件客户端
+ 执行的行包装的补丁不会在另一端复原,并且通常不会进行任何详细检查。如果有
+ 任何疑问,把补丁寄给你自己,让你自己相信它是完好无损的。
+
+ :ref:`Documentation/translations/zh_CN/process/email-clients.rst <cn_email_clients>`
+ 提供了一些有用的提示,可以让特定的邮件客户机工作以发送补丁。
+
+ - 你确定你的补丁没有愚蠢的错误吗?您应该始终通过scripts/checkpatch.pl运行
+ 补丁程序,并解决它提出的投诉。请记住,checkpatch.pl虽然是大量思考内核
+ 补丁应该是什么样子的体现,但它并不比您聪明。如果修复checkpatch.pl投诉会
+ 使代码变得更糟,请不要这样做。
+
+补丁应始终以纯文本形式发送。请不要将它们作为附件发送;这使得审阅者在答复中更难
+引用补丁的部分。相反,只需将补丁直接放到您的消息中。
+
+邮寄补丁时,重要的是将副本发送给任何可能感兴趣的人。与其他一些项目不同,内核
+鼓励人们错误地发送过多的副本;不要假定相关人员会看到您在邮件列表中的发布。
+尤其是,副本应发送至:
+
+ - 受影响子系统的维护人员。如前所述,维护人员文件是查找这些人员的第一个地方。
+
+ - 其他在同一领域工作的开发人员,尤其是那些现在可能在那里工作的开发人员。使用
+ git查看还有谁修改了您正在处理的文件,这很有帮助。
+
+ - 如果您对错误报告或功能请求做出响应,也可以抄送原始发送人。
+
+ - 将副本发送到相关邮件列表,或者,如果没有其他应用,则发送到Linux内核列表。
+
+ - 如果您正在修复一个bug,请考虑该修复是否应进入下一个稳定更新。如果是这样,
+ stable@vger.kernel.org 应该得到补丁的副本。另外,在补丁本身的标签中添加
+ 一个“cc:stable@vger.kernel.org”;这将使稳定团队在修复进入主线时收到通知。
+
+当为一个补丁选择接收者时,最好知道你认为谁最终会接受这个补丁并将其合并。虽然
+可以将补丁直接发送给LinusTorvalds并让他合并,但通常情况下不会这样做。Linus
+很忙,并且有子系统维护人员负责监视内核的特定部分。通常您会希望维护人员合并您
+的补丁。如果没有明显的维护人员,Andrew Morton通常是最后的补丁目标。
+
+补丁需要好的主题行。补丁程序行的规范格式如下:
+
+::
+
+ [PATCH nn/mm] subsys: one-line description of the patch
+
+其中“nn”是补丁的序号,“mm”是系列中补丁的总数,“subsys”是受影响子系统的名称。
+显然,一个单独的补丁可以省略nn/mm。
+
+如果您有一系列重要的补丁,那么通常将介绍性描述作为零部分发送。不过,这种约定
+并没有得到普遍遵循;如果您使用它,请记住简介中的信息不会使它进入内核变更日志。
+因此,请确保补丁本身具有完整的变更日志信息。
+
+一般来说,多部分补丁的第二部分和后续部分应作为对第一部分的回复发送,以便它们
+在接收端都连接在一起。像git和coilt这样的工具有命令,可以通过适当的线程发送
+一组补丁。但是,如果您有一个长系列,并且正在使用git,请远离–chain reply-to
+选项,以避免创建异常深的嵌套。
diff --git a/Documentation/translations/zh_CN/process/6.Followthrough.rst b/Documentation/translations/zh_CN/process/6.Followthrough.rst
new file mode 100644
index 000000000000..f509e077e1cb
--- /dev/null
+++ b/Documentation/translations/zh_CN/process/6.Followthrough.rst
@@ -0,0 +1,145 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :ref:`Documentation/process/6.Followthrough.rst <development_followthrough>`
+:Translator: Alex Shi <alex.shi@linux.alibaba.com>
+
+.. _cn_development_followthrough:
+
+跟进
+====
+
+在这一点上,您已经遵循了到目前为止给出的指导方针,并且,随着您自己的工程技能
+的增加,已经发布了一系列完美的补丁。即使是经验丰富的内核开发人员也能犯的最大
+错误之一是,认为他们的工作现在已经完成了。事实上,发布补丁意味着进入流程的下
+一个阶段,可能还需要做很多工作。
+
+一个补丁在第一次发布时就非常出色,没有改进的余地,这是很罕见的。内核开发流程
+认识到这一事实,因此,它非常注重对已发布代码的改进。作为代码的作者,您应该与
+内核社区合作,以确保您的代码符合内核的质量标准。如果不参与这个过程,很可能会
+阻止将补丁包含到主线中。
+
+与审阅者合作
+------------
+
+任何意义上的补丁都会导致其他开发人员在审查代码时发表大量评论。对于许多开发
+人员来说,与审查人员合作可能是内核开发过程中最令人生畏的部分。但是,如果你
+记住一些事情,生活会变得容易得多:
+
+ - 如果你已经很好地解释了你的补丁,评论人员会理解它的价值,以及为什么你会
+ 费尽心思去写它。但是这个并不能阻止他们提出一个基本的问题:五年或十年后
+ 用这个代码维护一个内核会是什么感觉?你可能被要求做出的许多改变——从编码风格
+ 的调整到大量的重写——都来自于对Linux的理解,即从现在起十年后,Linux仍将在
+ 开发中。
+
+ - 代码审查是一项艰苦的工作,这是一项相对吃力不讨好的工作;人们记得谁编写了
+ 内核代码,但对于那些审查它的人来说,几乎没有什么持久的名声。因此,评论
+ 人员可能会变得暴躁,尤其是当他们看到同样的错误被一遍又一遍地犯下时。如果
+ 你得到了一个看起来愤怒、侮辱或完全冒犯你的评论,抵制以同样方式回应的冲动。
+ 代码审查是关于代码的,而不是关于人的,代码审查人员不会亲自攻击您。
+
+ - 同样,代码审查人员也不想以牺牲你雇主的利益为代价来宣传他们雇主的议程。
+ 内核开发人员通常希望今后几年能在内核上工作,但他们明白他们的雇主可能会改
+ 变。他们真的,几乎毫无例外地,致力于创造他们所能做到的最好的内核;他们并
+ 没有试图给雇主的竞争对手造成不适。
+
+所有这些归根结底都是,当审阅者向您发送评论时,您需要注意他们正在进行的技术
+观察。不要让他们的表达方式或你自己的骄傲阻止这种事情的发生。当你在一个补丁
+上得到评论时,花点时间去理解评论人想说什么。如果可能的话,请修复审阅者要求
+您修复的内容。然后回复审稿人:谢谢他们,并描述你将如何回答他们的问题。
+
+请注意,您不必同意审阅者建议的每个更改。如果您认为审阅者误解了您的代码,请
+解释到底发生了什么。如果您对建议的更改有技术上的异议,请描述它并证明您对该
+问题的解决方案是正确的。如果你的解释有道理,审稿人会接受的。不过,如果你的
+解释不能证明是有说服力的,尤其是当其他人开始同意审稿人的观点时,请花些时间
+重新考虑一下。你很容易对自己解决问题的方法视而不见,以至于你没有意识到某个
+问题根本是错误的,或者你甚至没有解决正确的问题。
+
+Andrew Morton建议,每一条不会导致代码更改的评论都应该导致额外的代码注释;
+这可以帮助未来的评论人员避免出现第一次出现的问题。
+
+一个致命的错误是忽视评论,希望它们会消失。他们不会走的。如果您在没有对之前
+收到的注释做出响应的情况下重新发布代码,那么很可能会发现补丁毫无用处。
+
+说到重新发布代码:请记住,审阅者不会记住您上次发布的代码的所有细节。因此,
+提醒审查人员以前提出的问题以及您如何处理这些问题总是一个好主意;补丁变更
+日志是提供此类信息的好地方。审阅者不必搜索列表档案来熟悉上次所说的内容;
+如果您帮助他们开始运行,当他们重新访问您的代码时,他们的心情会更好。
+
+如果你已经试着做正确的事情,但事情仍然没有进展呢?大多数技术上的分歧都可以
+通过讨论来解决,但有时人们只需要做出决定。如果你真的认为这个决定对你不利,
+你可以试着向更高的权力上诉。在这篇文章中,更高的权力倾向于Andrew Morton。
+Andrew在内核开发社区中受i很大的尊重;他经常为似乎被绝望地阻塞事情清障。
+尽管如此,对Andrew的呼吁不应轻而易举,也不应在所有其他替代方案都被探索之前
+使用。当然,记住,他也可能不同意你的意见。
+
+接下来会发生什么
+----------------
+
+如果一个补丁被认为是添加到内核中的一件好事,并且一旦大多数审查问题得到解决,
+下一步通常是进入子系统维护人员的树中。工作方式因子系统而异;每个维护人员都
+有自己的工作方式。特别是,可能有不止一棵树——一棵树,也许,专门用于计划下一
+个合并窗口的补丁,另一棵树用于长期工作。
+
+对于应用于没有明显子系统树(例如内存管理修补程序)的区域的修补程序,默认树
+通常以-mm结尾。影响多个子系统的补丁也可以最终通过-mm树。
+
+包含在子系统树中可以提高补丁的可见性。现在,使用该树的其他开发人员将默认获
+得补丁。子系统树通常也为Linux提供支持,使其内容对整个开发社区可见。在这一点
+上,您很可能会从一组新的审阅者那里得到更多的评论;这些评论需要像上一轮那样
+得到回答。
+
+在这一点上也会发生什么,这取决于你的补丁的性质,是与其他人正在做的工作发生
+冲突。在最坏的情况下,严重的补丁冲突可能会导致一些工作被搁置,以便剩余的补丁
+可以成形并合并。另一些时候,冲突解决将涉及到与其他开发人员合作,可能还会
+在树之间移动一些补丁,以确保所有的应用都是干净的。这项工作可能是一件痛苦的
+事情,但要计算您的福祉:在Linux下一棵树出现之前,这些冲突通常只在合并窗口
+中出现,必须迅速解决。现在可以在合并窗口打开之前,在空闲时解决这些问题。
+
+有朝一日,如果一切顺利,您将登录并看到您的补丁已经合并到主线内核中。祝贺你!
+然而,一旦庆祝活动完成(并且您已经将自己添加到维护人员文件中),就值得记住
+一个重要的小事实:工作仍然没有完成。并入主线带来了自身的挑战。
+
+首先,补丁的可见性再次提高。可能会有新一轮的开发者评论,他们以前不知道这
+个补丁。忽略它们可能很有诱惑力,因为您的代码不再存在任何被合并的问题。但是,
+要抵制这种诱惑,您仍然需要对有问题或建议的开发人员作出响应。
+
+不过,更重要的是:将代码包含在主线中会将代码交给更大的一组测试人员。即使您
+为尚未提供的硬件提供了驱动程序,您也会惊讶于有多少人会将您的代码构建到内核
+中。当然,如果有测试人员,也会有错误报告。
+
+最糟糕的错误报告是回归。如果你的补丁导致回归,你会发现很多不舒服的眼睛盯着
+你;回归需要尽快修复。如果您不愿意或无法修复回归(其他人都不会为您修复),
+那么在稳定期内,您的补丁几乎肯定会被移除。除了否定您为使补丁进入主线所做的
+所有工作之外,如果由于未能修复回归而取消补丁,很可能会使将来的工作更难合并。
+
+在处理完任何回归之后,可能还有其他普通的bug需要处理。稳定期是修复这些错误并
+确保代码在主线内核版本中的首次发布尽可能可靠的最好机会。所以,请回答错误
+报告,并尽可能解决问题。这就是稳定期的目的;一旦解决了旧补丁的任何问题,就
+可以开始创建酷的新补丁。
+
+别忘了,还有其他里程碑也可能会创建bug报告:下一个主线稳定版本,当著名的发行
+商选择包含补丁的内核版本时,等等。继续响应这些报告是您工作的基本骄傲。但是,
+如果这不是足够的动机,那么也值得考虑的是,开发社区会记住那些在合并后对代码
+失去兴趣的开发人员。下一次你发布补丁时,他们会以你以后不会在身边维护它为假
+设来评估它。
+
+其他可能发生的事情
+------------------
+
+有一天,你可以打开你的邮件客户端,看到有人给你寄了一个代码补丁。毕竟,这是
+让您的代码公开存在的好处之一。如果您同意这个补丁,您可以将它转发给子系统
+维护人员(确保包含一个正确的From:行,这样属性是正确的,并添加一个您自己
+的签准),或者回复一个Acked-by,让原始发送者向上发送它。
+
+如果您不同意补丁,请发送一个礼貌的回复,解释原因。如果可能的话,告诉作者需要
+做哪些更改才能让您接受补丁。对于代码的编写者和维护者所反对的合并补丁,存在着
+一定的阻力,但仅此而已。如果你被认为不必要的阻碍了好的工作,那么这些补丁最
+终会经过你身边并进入主线。在Linux内核中,没有人对任何代码拥有绝对的否决权。
+除了Linus。
+
+在非常罕见的情况下,您可能会看到完全不同的东西:另一个开发人员发布了针对您
+的问题的不同解决方案。在这一点上,两个补丁中的一个可能不会合并,“我的在这里
+是第一个”不被认为是一个令人信服的技术论据。如果有人的补丁取代了你的补丁而进
+入了主线,那么只有一种方法可以回应你:高兴你的问题得到解决,继续你的工作。
+以这种方式把一个人的工作推到一边可能会伤害和气馁,但是在他们忘记了谁的补丁
+真正被合并很久之后,社区会记住你的反应。
diff --git a/Documentation/translations/zh_CN/process/7.AdvancedTopics.rst b/Documentation/translations/zh_CN/process/7.AdvancedTopics.rst
new file mode 100644
index 000000000000..956815edbd18
--- /dev/null
+++ b/Documentation/translations/zh_CN/process/7.AdvancedTopics.rst
@@ -0,0 +1,124 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :ref:`Documentation/process/7.AdvancedTopics.rst <development_advancedtopics>`
+:Translator: Alex Shi <alex.shi@linux.alibaba.com>
+
+.. _cn_development_advancedtopics:
+
+高级主题
+========
+
+现在,希望您能够掌握开发流程的工作方式。然而,还有更多的东西要学!本节将介绍
+一些主题,这些主题对希望成为Linux内核开发过程常规部分的开发人员有帮助。
+
+使用Git管理补丁
+---------------
+
+内核使用分布式版本控制始于2002年初,当时Linus首次开始使用专有的Bitkeeper应用
+程序。虽然bitkeeper存在争议,但它所体现的软件版本管理方法却肯定不是。分布式
+版本控制可以立即加速内核开发项目。在当前的时代,有几种免费的比特保持器替代品。
+无论好坏,内核项目都将Git作为其选择的工具。
+
+使用Git管理补丁可以使开发人员的生活更加轻松,尤其是随着补丁数量的增加。Git
+也有其粗糙的边缘和一定的危险,它是一个年轻和强大的工具,仍然在其开发人员完善
+中。本文档不会试图教会读者如何使用git;这会是个巨长的文档。相反,这里的重点
+将是Git如何特别适合内核开发过程。想要加快Git的开发人员可以在以下网站上找到
+更多信息:
+
+ http://git-scm.com/
+
+ http://www.kernel.org/pub/software/scm/git/docs/user-manual.html
+
+在尝试使用它使补丁可供其他人使用之前,第一要务是阅读上述站点,对Git的工作
+方式有一个扎实的了解。使用Git的开发人员应该能够获得主线存储库的副本,探索
+修订历史,提交对树的更改,使用分支等。了解Git用于重写历史的工具(如Rebase)
+也很有用。Git有自己的术语和概念;Git的新用户应该了解refs、远程分支、索引、
+快进合并、推拉、分离头等。一开始可能有点吓人,但这些概念不难通过一点学习来
+理解。
+
+使用git生成通过电子邮件提交的补丁是提高速度的一个很好的练习。
+
+当您准备好开始安装Git树供其他人查看时,您当然需要一个可以从中提取的服务器。
+如果您有一个可以访问Internet的系统,那么使用git守护进程设置这样的服务器相
+对简单。否则,免费的公共托管网站(例如github)开始出现在网络上。成熟的开发
+人员可以在kernel.org上获得一个帐户,但这些帐户并不容易找到;有关更多信息,
+请参阅 http://kernel.org/faq/
+
+正常的Git工作流程涉及到许多分支的使用。每一条开发线都可以分为单独的“主题
+分支”,并独立维护。Git的分支机构很便宜,没有理由不免费使用它们。而且,在
+任何情况下,您都不应该在任何您打算让其他人从中受益的分支中进行开发。应该
+小心地创建公开可用的分支;当它们处于完整的形式并准备好运行时(而不是之前),
+合并开发分支的补丁。
+
+Git提供了一些强大的工具,可以让您重写开发历史。一个不方便的补丁(比如说,
+一个打破二分法的补丁,或者有其他一些明显的缺陷)可以在适当的位置修复,或者
+完全从历史中消失。一个补丁系列可以被重写,就好像它是在今天的主线之上写的
+一样,即使你已经花了几个月的时间在写它。可以透明地将更改从一个分支转移到另
+一个分支。等等。明智地使用git修改历史的能力可以帮助创建问题更少的干净补丁集。
+
+然而,过度使用这种能力可能会导致其他问题,而不仅仅是对创建完美项目历史的
+简单痴迷。重写历史将重写该历史中包含的更改,将经过测试(希望)的内核树变
+为未经测试的内核树。但是,除此之外,如果开发人员没有对项目历史的共享视图,
+他们就无法轻松地协作;如果您重写了其他开发人员拉入他们存储库的历史,您将
+使这些开发人员的生活更加困难。因此,这里有一个简单的经验法则:被导出到其他
+人的历史在此后通常被认为是不可变的。
+
+因此,一旦将一组更改推送到公开可用的服务器上,就不应该重写这些更改。如果您
+尝试强制进行不会导致快进合并(即不共享同一历史记录的更改)的更改,Git将尝
+试强制执行此规则。可以重写此检查,有时可能需要重写导出的树。在树之间移动变
+更集以避免Linux-next中的冲突就是一个例子。但这种行为应该是罕见的。这就是为
+什么开发应该在私有分支中进行(必要时可以重写)并且只有在公共分支处于合理的
+高级状态时才转移到公共分支中的原因之一。
+
+当主线(或其他一组变更所基于的树)前进时,很容易与该树合并以保持领先地位。
+对于一个私有的分支,rebasing 可能是一个很容易跟上另一棵树的方法,但是一旦
+一棵树被导出到全世界,rebasing就不是一个选项。一旦发生这种情况,就必须进行
+完全合并(merge)。合并有时是很有意义的,但是过于频繁的合并会不必要地扰乱
+历史。在这种情况下,建议的技术是不经常合并,通常只在特定的发布点(如主线-rc
+发布)合并。如果您对特定的更改感到紧张,则可以始终在私有分支中执行测试合并。
+在这种情况下,git rerere 工具很有用;它记住合并冲突是如何解决的,这样您就
+不必重复相同的工作。
+
+关于Git这样的工具的一个最大的反复抱怨是:补丁从一个存储库到另一个存储库的
+大量移动使得很容易陷入错误建议的变更中,这些变更避开审查雷达进入主线。当内
+核开发人员看到这种情况发生时,他们往往会感到不高兴;在Git树上放置未查看或
+主题外的补丁可能会影响您将来获取树的能力。引用Linus:
+
+::
+
+ 你可以给我发补丁,但要我从你哪里取一个Git补丁,我需要知道你知道
+ 你在做什么,我需要能够相信事情而不去检查每个个人改变。
+
+(http://lwn.net/articles/224135/)。
+
+为了避免这种情况,请确保给定分支中的所有补丁都与相关主题紧密相关;“驱动程序
+修复”分支不应更改核心内存管理代码。而且,最重要的是,不要使用Git树来绕过
+审查过程。不时的将树的摘要发布到相关的列表中,当时间合适时,请求
+Linux-next 中包含该树。
+
+如果其他人开始发送补丁以包含到您的树中,不要忘记查看它们。还要确保您维护正确
+的作者信息; ``git am`` 工具在这方面做得最好,但是如果它通过第三方转发给您,
+您可能需要在补丁中添加“From:”行。
+
+请求pull操作时,请务必提供所有相关信息:树的位置、要拉的分支以及拉操作将导致
+的更改。在这方面,git request pull 命令非常有用;它将按照其他开发人员的预期
+格式化请求,并检查以确保您记住了将这些更改推送到公共服务器。
+
+审查补丁
+--------
+
+一些读者当然会反对将本节与“高级主题”放在一起,因为即使是刚开始的内核开发人员
+也应该检查补丁。当然,学习如何在内核环境中编程没有比查看其他人发布的代码更好
+的方法了。此外,审阅者永远供不应求;通过查看代码,您可以对整个流程做出重大贡献。
+
+审查代码可能是一个令人生畏的前景,特别是对于一个新的内核开发人员来说,他们
+可能会对公开询问代码感到紧张,而这些代码是由那些有更多经验的人发布的。不过,
+即使是最有经验的开发人员编写的代码也可以得到改进。也许对评审员(所有评审员)
+最好的建议是:把评审评论当成问题而不是批评。询问“在这条路径中如何释放锁?”
+总是比说“这里的锁是错误的”更好。
+
+不同的开发人员将从不同的角度审查代码。一些主要关注的是编码样式以及代码行是
+否有尾随空格。其他人将主要关注补丁作为一个整体实现的变更是否对内核有好处。
+然而,其他人会检查是否存在锁定问题、堆栈使用过度、可能的安全问题、在其他
+地方发现的代码重复、足够的文档、对性能的不利影响、用户空间ABI更改等。所有
+类型的检查,如果它们导致更好的代码进入内核,都是受欢迎和值得的。
diff --git a/Documentation/translations/zh_CN/process/8.Conclusion.rst b/Documentation/translations/zh_CN/process/8.Conclusion.rst
new file mode 100644
index 000000000000..2bbd76161e10
--- /dev/null
+++ b/Documentation/translations/zh_CN/process/8.Conclusion.rst
@@ -0,0 +1,64 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :ref:`Documentation/process/8.Conclusion.rst <development_conclusion>`
+:Translator: Alex Shi <alex.shi@linux.alibaba.com>
+
+.. _cn_development_conclusion:
+
+更多信息
+========
+
+关于Linux内核开发和相关主题的信息来源很多。首先是在内核源代码分发中找到的
+文档目录。顶级 :ref:`Documentation/translations/zh_CN/process/howto.rst <cn_process_howto>`
+文件是一个重要的起点
+:ref:`Documentation/translations/zh_CN/process/submitting-patches.rst <cn_submittingpatches>`
+和 :ref:`process/submitting-drivers.rst <submittingdrivers>`
+也是所有内核开发人员都应该阅读的内容。许多内部内核API都是使用kerneldoc机制
+记录的;“make htmldocs”或“make pdfdocs”可用于以HTML或PDF格式生成这些文档(
+尽管某些发行版提供的tex版本会遇到内部限制,无法正确处理文档)。
+
+不同的网站在各个细节层次上讨论内核开发。您的作者想谦虚地建议用 http://lwn.net/
+作为来源;有关许多特定内核主题的信息可以通过以下网址的lwn内核索引找到:
+
+ http://lwn.net/kernel/index/
+
+除此之外,内核开发人员的一个宝贵资源是:
+
+ http://kernelnewbies.org/
+
+当然,我们不应该忘记 http://kernel.org/ 这是内核发布信息的最终位置。
+
+关于内核开发有很多书:
+
+ Linux设备驱动程序,第三版(Jonathan Corbet、Alessandro Rubini和Greg Kroah Hartman)。
+ 在线:http://lwn.net/kernel/ldd3/
+
+ Linux内核开发(Robert Love)。
+
+ 了解Linux内核(Daniel Bovet和Marco Cesati)。
+
+然而,所有这些书都有一个共同的缺点:当它们上架时,它们往往有些过时,而且它们
+已经上架一段时间了。不过,在那里还可以找到相当多的好信息。
+
+有关git的文档,请访问:
+
+ http://www.kernel.org/pub/software/scm/git/docs/
+
+ http://www.kernel.org/pub/software/scm/git/docs/user-manual.html
+
+结论
+====
+
+祝贺所有通过这篇冗长的文件的人。希望它能够帮助您理解Linux内核是如何开发的,
+以及您如何参与这个过程。
+
+最后,重要的是参与。任何开源软件项目都不超过其贡献者投入其中的总和。Linux内核
+的发展速度和以前一样快,因为它得到了大量开发人员的帮助,他们都在努力使它变得
+更好。内核是一个主要的例子,说明当成千上万的人为了一个共同的目标一起工作时,
+可以做些什么。
+
+不过,内核总是可以从更大的开发人员基础中获益。总有更多的工作要做。但是,同样
+重要的是,Linux生态系统中的大多数其他参与者可以通过为内核做出贡献而受益。使
+代码进入主线是提高代码质量、降低维护和分发成本、提高对内核开发方向的影响程度
+等的关键。这是一种人人都赢的局面。踢开你的编辑,来加入我们吧,你会非常受
+欢迎的。
diff --git a/Documentation/translations/zh_CN/process/code-of-conduct-interpretation.rst b/Documentation/translations/zh_CN/process/code-of-conduct-interpretation.rst
new file mode 100644
index 000000000000..c323ce76e0cb
--- /dev/null
+++ b/Documentation/translations/zh_CN/process/code-of-conduct-interpretation.rst
@@ -0,0 +1,108 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :ref:`Documentation/process/code-of-conduct-interpretation.rst <code_of_conduct_interpretation>`
+:Translator: Alex Shi <alex.shi@linux.alibaba.com>
+
+.. _cn_code_of_conduct_interpretation:
+
+Linux内核贡献者契约行为准则解释
+===============================
+
+:ref:`cn_code_of_conduct` 准则是一个通用文档,旨在为几乎所有开源社区提供一套规则。
+每个开源社区都是独一无二的,Linux内核也不例外。因此,本文描述了Linux内核社区中
+如何解释它。我们也不希望这种解释随着时间的推移是静态的,并将根据需要进行调整。
+
+与开发软件的“传统”方法相比,Linux内核开发工作是一个非常个人化的过程。你的贡献
+和背后的想法将被仔细审查,往往导致批判和批评。审查将几乎总是需要改进,材料才
+能包括在内核中。要知道这是因为所有相关人员都希望看到Linux整体成功的最佳解决方
+案。这个开发过程已经被证明可以创建有史以来最健壮的操作系统内核,我们不想做任何
+事情来导致提交质量和最终结果的下降。
+
+维护者
+------
+
+行为准则多次使用“维护者”一词。在内核社区中,“维护者”是负责子系统、驱动程序或
+文件的任何人,并在内核源代码树的维护者文件中列出。
+
+责任
+----
+
+《行为准则》提到了维护人员的权利和责任,这需要进一步澄清。
+
+首先,最重要的是,有一个合理的期望是由维护人员通过实例来领导。
+
+也就是说,我们的社区是广阔的,对维护者没有新的要求,他们单方面处理其他人在
+他们活跃的社区的行为。这一责任由我们所有人承担,最终《行为准则》记录了最终的
+上诉路径,以防有关行为问题的问题悬而未决。
+
+维护人员应该愿意在出现问题时提供帮助,并在需要时与社区中的其他人合作。如果您
+不确定如何处理出现的情况,请不要害怕联系技术咨询委员会(TAB)或其他维护人员。
+除非您愿意,否则不会将其视为违规报告。如果您不确定是否该联系TAB 或任何其他维
+护人员,请联系我们的冲突调解人 Mishi Choudhary <mishi@linux.com>。
+
+最后,“善待对方”才是每个人的最终目标。我们知道每个人都是人,有时我们都会失败,
+但我们所有人的首要目标应该是努力友好地解决问题。执行行为准则将是最后的选择。
+
+我们的目标是创建一个强大的、技术先进的操作系统,以及所涉及的技术复杂性,这自
+然需要专业知识和决策。
+
+所需的专业知识因贡献领域而异。它主要由上下文和技术复杂性决定,其次由贡献者和
+维护者的期望决定。
+
+专家的期望和决策都要经过讨论,但在最后,为了取得进展,必须能够做出决策。这一
+特权掌握在维护人员和项目领导的手中,预计将善意使用。
+
+因此,设定专业知识期望、作出决定和拒绝不适当的贡献不被视为违反行为准则。
+
+虽然维护人员一般都欢迎新来者,但他们帮助(新)贡献者克服障碍的能力有限,因此
+他们必须确定优先事项。这也不应被视为违反了行为准则。内核社区意识到这一点,并
+以各种形式提供入门级节目,如 kernelnewbies.org 。
+
+范围
+----
+
+Linux内核社区主要在一组公共电子邮件列表上进行交互,这些列表分布在由多个不同
+公司或个人控制的多个不同服务器上。所有这些列表都在内核源代码树中的
+MAINTAINERS 文件中定义。发送到这些邮件列表的任何电子邮件都被视为包含在行为
+准则中。
+
+使用 kernel.org bugzilla和其他子系统bugzilla 或bug跟踪工具的开发人员应该遵循
+行为准则的指导原则。Linux内核社区没有“官方”项目电子邮件地址或“官方”社交媒体
+地址。使用kernel.org电子邮件帐户执行的任何活动必须遵循为kernel.org发布的行为
+准则,就像任何使用公司电子邮件帐户的个人必须遵循该公司的特定规则一样。
+
+行为准则并不禁止在邮件列表消息、内核更改日志消息或代码注释中继续包含名称、
+电子邮件地址和相关注释。
+
+其他论坛中的互动包括在适用于上述论坛的任何规则中,通常不包括在行为准则中。
+除了在极端情况下可考虑的例外情况。
+
+提交给内核的贡献应该使用适当的语言。在行为准则之前已经存在的内容现在不会被
+视为违反。然而,不适当的语言可以被视为一个bug;如果任何相关方提交补丁,
+这样的bug将被更快地修复。当前属于用户/内核API的一部分的表达式,或者反映已
+发布标准或规范中使用的术语的表达式,不被视为bug。
+
+执行
+----
+
+行为准则中列出的地址属于行为准则委员会。https://kernel.org/code-of-conduct.html
+列出了在任何给定时间接收这些电子邮件的确切成员。成员不能访问在加入委员会之前
+或离开委员会之后所做的报告。
+
+最初的行为准则委员会由TAB的志愿者以及作为中立第三方的专业调解人组成。委员会
+的首要任务是建立文件化的流程,并将其公开。
+
+如果报告人不希望将整个委员会纳入投诉或关切,可直接联系委员会的任何成员,包括
+调解人。
+
+行为准则委员会根据流程审查案例(见上文),并根据需要和适当与TAB协商,例如请求
+和接收有关内核社区的信息。
+
+委员会做出的任何决定都将提交到表中,以便在必要时与相关维护人员一起执行。行为
+准则委员会的决定可以通过三分之二的投票推翻。
+
+每季度,行为准则委员会和标签将提供一份报告,概述行为准则委员会收到的匿名报告
+及其状态,以及任何否决决定的细节,包括完整和可识别的投票细节。
+
+我们希望在启动期之后为行为准则委员会人员配备建立一个不同的流程。发生此情况时,
+将使用该信息更新此文档。
diff --git a/Documentation/translations/zh_CN/process/code-of-conduct.rst b/Documentation/translations/zh_CN/process/code-of-conduct.rst
new file mode 100644
index 000000000000..99024df058e9
--- /dev/null
+++ b/Documentation/translations/zh_CN/process/code-of-conduct.rst
@@ -0,0 +1,72 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :ref:`Documentation/process/code-of-conduct.rst <code_of_conduct>`
+:Translator: Alex Shi <alex.shi@linux.alibaba.com>
+
+.. _cn_code_of_conduct:
+
+贡献者契约行为准则
+++++++++++++++++++
+
+我们的誓言
+==========
+
+为了营造一个开放、友好的环境,我们作为贡献者和维护人承诺,让我们的社区和参
+与者,拥有一个无骚扰的体验,无论年龄、体型、残疾、种族、性别特征、性别认同
+和表达、经验水平、教育程度、社会状况,经济地位、国籍、个人外貌、种族、宗教
+或性身份和取向。
+
+我们的标准
+==========
+
+有助于创造积极环境的行为包括:
+
+* 使用欢迎和包容的语言
+* 尊重不同的观点和经验
+* 优雅地接受建设性的批评
+* 关注什么对社区最有利
+* 对其他社区成员表示同情
+
+参与者的不可接受行为包括:
+
+* 使用性意味的语言或意象以及不受欢迎的性注意或者更过分的行为
+* 煽动、侮辱/贬损评论以及个人或政治攻击
+* 公开或私下骚扰
+* 未经明确许可,发布他人的私人信息,如物理或电子地址。
+* 在专业场合被合理认为不适当的其他行为
+
+我们的责任
+==========
+
+维护人员负责澄清可接受行为的标准,并应针对任何不可接受行为采取适当和公平的
+纠正措施。
+
+维护人员有权和责任删除、编辑或拒绝与本行为准则不一致的评论、承诺、代码、
+wiki编辑、问题和其他贡献,或暂时或永久禁止任何贡献者从事他们认为不适当、
+威胁、冒犯或有害的其他行为。
+
+范围
+====
+
+当个人代表项目或其社区时,本行为准则既适用于项目空间,也适用于公共空间。
+代表一个项目或社区的例子包括使用一个正式的项目电子邮件地址,通过一个正式
+的社交媒体帐户发布,或者在在线或离线事件中担任指定的代表。项目维护人员可以
+进一步定义和澄清项目的表示。
+
+执行
+====
+
+如有滥用、骚扰或其他不可接受的行为,可联系行为准则委员会<conduct@kernel.org>。
+所有投诉都将接受审查和调查,并将得到必要和适当的答复。行为准则委员会有义务
+对事件报告人保密。具体执行政策的进一步细节可单独公布。
+
+归属
+====
+
+本行为准则改编自《贡献者契约》,版本1.4,可从
+https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 获取。
+
+解释
+====
+
+有关Linux内核社区如何解释此文档,请参阅 :ref:`cn_code_of_conduct_interpretation`
diff --git a/Documentation/translations/zh_CN/coding-style.rst b/Documentation/translations/zh_CN/process/coding-style.rst
index 3cb09803e084..5479c591c2f7 100644
--- a/Documentation/translations/zh_CN/coding-style.rst
+++ b/Documentation/translations/zh_CN/process/coding-style.rst
@@ -1,19 +1,10 @@
-Chinese translated version of Documentation/process/coding-style.rst
+.. include:: ../disclaimer-zh_CN.rst
-If you have any comment or update to the content, please post to LKML directly.
-However, if you have problem communicating in English you can also ask the
-Chinese maintainer for help. Contact the Chinese maintainer, if this
-translation is outdated or there is problem with translation.
+:Original: :ref:`Documentation/process/coding-style.rst <codingstyle>`
-Chinese maintainer: Zhang Le <r0bertz@gentoo.org>
+.. _cn_codingstyle:
----------------------------------------------------------------------
-
-Documentation/process/coding-style.rst 的中文翻译
-
-如果想评论或更新本文的内容,请直接发信到LKML。如果你使用英文交流有困难的话,
-也可以向中文版维护者求助。如果本翻译更新不及时或者翻译存在问题,请联系中文版
-维护者::
+译者::
中文版维护者: 张乐 Zhang Le <r0bertz@gentoo.org>
中文版翻译者: 张乐 Zhang Le <r0bertz@gentoo.org>
@@ -23,10 +14,6 @@ Documentation/process/coding-style.rst 的中文翻译
Li Zefan <lizf@cn.fujitsu.com>
Wang Chen <wangchen@cn.fujitsu.com>
-以下为正文
-
----------------------------------------------------------------------
-
Linux 内核代码风格
=========================
diff --git a/Documentation/translations/zh_CN/process/development-process.rst b/Documentation/translations/zh_CN/process/development-process.rst
new file mode 100644
index 000000000000..30cffe66c075
--- /dev/null
+++ b/Documentation/translations/zh_CN/process/development-process.rst
@@ -0,0 +1,26 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :ref:`Documentation/process/development-process.rst <development_process_main>`
+:Translator: Alex Shi <alex.shi@linux.alibaba.com>
+
+.. _cn_development_process_main:
+
+内核开发过程指南
+================
+
+内容:
+
+.. toctree::
+ :numbered:
+ :maxdepth: 2
+
+ 1.Intro
+ 2.Process
+ 3.Early-stage
+ 4.Coding
+ 5.Posting
+ 6.Followthrough
+ 7.AdvancedTopics
+ 8.Conclusion
+
+本文档的目的是帮助开发人员(及其经理)以最小的挫折感与开发社区合作。它试图记录这个社区如何以一种不熟悉Linux内核开发(或者实际上是自由软件开发)的人可以访问的方式工作。虽然这里有一些技术资料,但这是一个面向过程的讨论,不需要深入了解内核编程就可以理解。
diff --git a/Documentation/translations/zh_CN/email-clients.txt b/Documentation/translations/zh_CN/process/email-clients.rst
index ec31d97e8d0e..102023651118 100644
--- a/Documentation/translations/zh_CN/email-clients.txt
+++ b/Documentation/translations/zh_CN/process/email-clients.rst
@@ -1,33 +1,34 @@
-Chinese translated version of Documentation/process/email-clients.rst
+.. _cn_email_clients:
-If you have any comment or update to the content, please contact the
-original document maintainer directly. However, if you have a problem
-communicating in English you can also ask the Chinese maintainer for
-help. Contact the Chinese maintainer if this translation is outdated
-or if there is a problem with the translation.
+.. include:: ../disclaimer-zh_CN.rst
-Chinese maintainer: Harry Wei <harryxiyou@gmail.com>
----------------------------------------------------------------------
-Documentation/process/email-clients.rst 的中文翻译
+:Original: :ref:`Documentation/process/email-clients.rst <email_clients>`
-如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
-交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
-译存在问题,请联系中文版维护者。
+译者::
-中文版维护者: 贾威威 Harry Wei <harryxiyou@gmail.com>
-中文版翻译者: 贾威威 Harry Wei <harryxiyou@gmail.com>
-中文版校译者: Yinglin Luan <synmyth@gmail.com>
- Xiaochen Wang <wangxiaochen0@gmail.com>
- yaxinsn <yaxinsn@163.com>
-
-以下为正文
----------------------------------------------------------------------
+ 中文版维护者: 贾威威 Harry Wei <harryxiyou@gmail.com>
+ 中文版翻译者: 贾威威 Harry Wei <harryxiyou@gmail.com>
+ 时奎亮 Alex Shi <alex.shi@linux.alibaba.com>
+ 中文版校译者: Yinglin Luan <synmyth@gmail.com>
+ Xiaochen Wang <wangxiaochen0@gmail.com>
+ yaxinsn <yaxinsn@163.com>
Linux邮件客户端配置信息
-======================================================================
+=======================
+
+Git
+---
+
+现在大多数开发人员使用 ``git send-email`` 而不是常规的电子邮件客户端。这方面
+的手册非常好。在接收端,维护人员使用 ``git am`` 加载补丁。
+
+如果你是 ``git`` 新手,那么把你的第一个补丁发送给你自己。将其保存为包含所有
+标题的原始文本。运行 ``git am raw_email.txt`` ,然后使用 ``git log`` 查看更
+改日志。如果工作正常,再将补丁发送到相应的邮件列表。
+
普通配置
-----------------------------------------------------------------------
+--------
Linux内核补丁是通过邮件被提交的,最好把补丁作为邮件体的内嵌文本。有些维护者
接收附件,但是附件的内容格式应该是"text/plain"。然而,附件一般是不赞成的,
因为这会使补丁的引用部分在评论过程中变的很困难。
@@ -56,7 +57,7 @@ Linux内核补丁是通过邮件被提交的,最好把补丁作为邮件体的
一些邮件客户端提示
-----------------------------------------------------------------------
+------------------
这里给出一些详细的MUA配置提示,可以用于给Linux内核发送补丁。这些并不意味是
所有的软件包配置总结。
@@ -64,8 +65,8 @@ Linux内核补丁是通过邮件被提交的,最好把补丁作为邮件体的
TUI = 以文本为基础的用户接口
GUI = 图形界面用户接口
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Alpine (TUI)
+~~~~~~~~~~~~
配置选项:
在"Sending Preferences"部分:
@@ -76,8 +77,8 @@ Alpine (TUI)
当写邮件时,光标应该放在补丁会出现的地方,然后按下CTRL-R组合键,使指定的
补丁文件嵌入到邮件中。
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Evolution (GUI)
+~~~~~~~~~~~~~~~
一些开发者成功的使用它发送补丁
@@ -89,8 +90,8 @@ Evolution (GUI)
你还可以"diff -Nru old.c new.c | xclip",选择Preformat,然后使用中间键进行粘帖。
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Kmail (GUI)
+~~~~~~~~~~~
一些开发者成功的使用它发送补丁。
@@ -118,13 +119,13 @@ display",这样内嵌附件更容易让读者看到。
并且希望这将会被处理。邮件是以只针对某个用户可读写的权限被保存的,所以如果你想把邮件复制到其他地方,
你不得不把他们的权限改为组或者整体可读。
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lotus Notes (GUI)
+~~~~~~~~~~~~~~~~~
不要使用它。
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Mutt (TUI)
+~~~~~~~~~~
很多Linux开发人员使用mutt客户端,所以证明它肯定工作的非常漂亮。
@@ -142,12 +143,49 @@ Mutt不自带编辑器,所以不管你使用什么编辑器都不应该带有
如果想要把补丁作为内嵌文本。
(a)ttach工作的很好,不带有"set paste"。
+你可以通过 ``git format-patch`` 生成补丁,然后用 Mutt发送它们::
+
+ $ mutt -H 0001-some-bug-fix.patch
+
配置选项:
它应该以默认设置的形式工作。
然而,把"send_charset"设置为"us-ascii::utf-8"也是一个不错的主意。
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Mutt 是高度可配置的。 这里是个使用mutt通过 Gmail 发送的补丁的最小配置::
+
+ # .muttrc
+ # ================ IMAP ====================
+ set imap_user = 'yourusername@gmail.com'
+ set imap_pass = 'yourpassword'
+ set spoolfile = imaps://imap.gmail.com/INBOX
+ set folder = imaps://imap.gmail.com/
+ set record="imaps://imap.gmail.com/[Gmail]/Sent Mail"
+ set postponed="imaps://imap.gmail.com/[Gmail]/Drafts"
+ set mbox="imaps://imap.gmail.com/[Gmail]/All Mail"
+
+ # ================ SMTP ====================
+ set smtp_url = "smtp://username@smtp.gmail.com:587/"
+ set smtp_pass = $imap_pass
+ set ssl_force_tls = yes # Require encrypted connection
+
+ # ================ Composition ====================
+ set editor = `echo \$EDITOR`
+ set edit_headers = yes # See the headers when editing
+ set charset = UTF-8 # value of $LANG; also fallback for send_charset
+ # Sender, email address, and sign-off line must match
+ unset use_domain # because joe@localhost is just embarrassing
+ set realname = "YOUR NAME"
+ set from = "username@gmail.com"
+ set use_from = yes
+
+Mutt文档含有更多信息:
+
+ http://dev.mutt.org/trac/wiki/UseCases/Gmail
+
+ http://dev.mutt.org/doc/manual.html
+
Pine (TUI)
+~~~~~~~~~~
Pine过去有一些空格删减问题,但是这些现在应该都被修复了。
@@ -158,8 +196,8 @@ Pine过去有一些空格删减问题,但是这些现在应该都被修复了
- "no-strip-whitespace-before-send"选项也是需要的。
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sylpheed (GUI)
+~~~~~~~~~~~~~~
- 内嵌文本可以很好的工作(或者使用附件)。
- 允许使用外部的编辑器。
@@ -168,8 +206,8 @@ Sylpheed (GUI)
- 在组成窗口中有一个很有用的ruler bar。
- 给地址本中添加地址就不会正确的了解显示名。
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Thunderbird (GUI)
+~~~~~~~~~~~~~~~~~
默认情况下,thunderbird很容易损坏文本,但是还有一些方法可以强制它变得更好。
@@ -191,13 +229,13 @@ Thunderbird (GUI)
$EDITOR来读取或者合并补丁到文本中。要实现它,可以下载并且安装这个扩展,然后添加一个使用它的
按键View->Toolbars->Customize...最后当你书写信息的时候仅仅点击它就可以了。
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
TkRat (GUI)
+~~~~~~~~~~~
可以使用它。使用"Insert file..."或者外部的编辑器。
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Gmail (Web GUI)
+~~~~~~~~~~~~~~~
不要使用它发送补丁。
diff --git a/Documentation/translations/zh_CN/HOWTO b/Documentation/translations/zh_CN/process/howto.rst
index 7a00a8a4eb15..5b671178b17b 100644
--- a/Documentation/translations/zh_CN/HOWTO
+++ b/Documentation/translations/zh_CN/process/howto.rst
@@ -1,32 +1,22 @@
-Chinese translated version of Documentation/process/howto.rst
+.. _cn_process_howto:
-If you have any comment or update to the content, please contact the
-original document maintainer directly. However, if you have a problem
-communicating in English you can also ask the Chinese maintainer for
-help. Contact the Chinese maintainer if this translation is outdated
-or if there is a problem with the translation.
+.. include:: ../disclaimer-zh_CN.rst
-Maintainer: Greg Kroah-Hartman <greg@kroah.com>
-Chinese maintainer: Li Yang <leoli@freescale.com>
----------------------------------------------------------------------
-Documentation/process/howto.rst 的中文翻译
+:Original: :ref:`Documentation/process/howto.rst <process_howto>`
-如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
-交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
-译存在问题,请联系中文版维护者。
+译者::
-英文版维护者: Greg Kroah-Hartman <greg@kroah.com>
-中文版维护者: 李阳 Li Yang <leoli@freescale.com>
-中文版翻译者: 李阳 Li Yang <leoli@freescale.com>
-中文版校译者: 钟宇 TripleX Chung <xxx.phy@gmail.com>
- 陈琦 Maggie Chen <chenqi@beyondsoft.com>
- 王聪 Wang Cong <xiyou.wangcong@gmail.com>
-
-以下为正文
----------------------------------------------------------------------
+ 英文版维护者: Greg Kroah-Hartman <greg@kroah.com>
+ 中文版维护者: 李阳 Li Yang <leoyang.li@nxp.com>
+ 中文版翻译者: 李阳 Li Yang <leoyang.li@nxp.com>
+ 时奎亮 Alex Shi <alex.shi@linux.alibaba.com>
+ 中文版校译者:
+ 钟宇 TripleX Chung <xxx.phy@gmail.com>
+ 陈琦 Maggie Chen <chenqi@beyondsoft.com>
+ 王聪 Wang Cong <xiyou.wangcong@gmail.com>
如何参与Linux内核开发
----------------------
+=====================
这是一篇将如何参与Linux内核开发的相关问题一网打尽的终极秘笈。它将指导你
成为一名Linux内核开发者,并且学会如何同Linux内核开发社区合作。它尽可能不
@@ -47,6 +37,7 @@ Linux内核大部分是由C语言写成的,一些体系结构相关的代码
参与内核开发,你必须精通C语言。除非你想为某个架构开发底层代码,否则你并
不需要了解(任何体系结构的)汇编语言。下面列举的书籍虽然不能替代扎实的C
语言教育和多年的开发经验,但如果需要的话,做为参考还是不错的:
+
- "The C Programming Language" by Kernighan and Ritchie [Prentice Hall]
《C程序设计语言(第2版·新版)》(徐宝文 李志 译)[机械工业出版社]
- "Practical C Programming" by Steve Oualline [O'Reilly]
@@ -71,9 +62,11 @@ Linux内核使用GNU C和GNU工具链开发。虽然它遵循ISO C89标准,但
--------
Linux内核源代码都是在GPL(通用公共许可证)的保护下发布的。要了解这种许可
-的细节请查看源代码主目录下的COPYING文件。如果你对它还有更深入问题请联系
-律师,而不要在Linux内核邮件组上提问。因为邮件组里的人并不是律师,不要期
-望他们的话有法律效力。
+的细节请查看源代码主目录下的COPYING文件。Linux内核许可准则和如何使用
+`SPDX <https://spdx.org/>` 标志符说明在这个文件中
+:ref:`Documentation/translations/zh_CN/process/license-rules.rst <cn_kernel_licensing>`
+如果你对它还有更深入问题请联系律师,而不要在Linux内核邮件组上提问。因为
+邮件组里的人并不是律师,不要期望他们的话有法律效力。
对于GPL的常见问题和解答,请访问以下链接:
http://www.gnu.org/licenses/gpl-faq.html
@@ -89,65 +82,75 @@ Linux内核代码中包含有大量的文档。这些文档对于学习如何与
的维护者解释这些变化。
以下是内核代码中需要阅读的文档:
- README
+ :ref:`Documentation/admin-guide/README.rst <readme>`
文件简要介绍了Linux内核的背景,并且描述了如何配置和编译内核。内核的
新用户应该从这里开始。
- Documentation/process/changes.rst
+
+ :ref:`Documentation/process/changes.rst <changes>`
文件给出了用来编译和使用内核所需要的最小软件包列表。
- Documentation/process/coding-style.rst
+ :ref:`Documentation/translations/zh_CN/process/coding-style.rst <cn_codingstyle>`
描述Linux内核的代码风格和理由。所有新代码需要遵守这篇文档中定义的规
范。大多数维护者只会接收符合规定的补丁,很多人也只会帮忙检查符合风格
的代码。
- Documentation/process/submitting-patches.rst
- Documentation/process/submitting-drivers.rst
+ :ref:`Documentation/translations/zh_CN/process/submitting-patches.rst <cn_submittingpatches>`
+ :ref:`Documentation/process/submitting-drivers.rst <submittingdrivers>`
+
这两份文档明确描述如何创建和发送补丁,其中包括(但不仅限于):
- 邮件内容
- 邮件格式
- 选择收件人
+
遵守这些规定并不能保证提交成功(因为所有补丁需要通过严格的内容和风格
审查),但是忽视他们几乎就意味着失败。
其他关于如何正确地生成补丁的优秀文档包括:
"The Perfect Patch"
+
http://www.ozlabs.org/~akpm/stuff/tpp.txt
+
"Linux kernel patch submission format"
+
http://linux.yyz.us/patch-format.html
- Documentation/process/stable-api-nonsense.rst
+ :ref:`Documentation/translations/zh_CN/process/stable-api-nonsense.rst <cn_stable_api_nonsense>`
论证内核为什么特意不包括稳定的内核内部API,也就是说不包括像这样的特
性:
+
- 子系统中间层(为了兼容性?)
- 在不同操作系统间易于移植的驱动程序
- 减缓(甚至阻止)内核代码的快速变化
+
这篇文档对于理解Linux的开发哲学至关重要。对于将开发平台从其他操作系
统转移到Linux的人来说也很重要。
- Documentation/admin-guide/security-bugs.rst
+ :ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`
如果你认为自己发现了Linux内核的安全性问题,请根据这篇文档中的步骤来
提醒其他内核开发者并帮助解决这个问题。
- Documentation/process/management-style.rst
+ :ref:`Documentation/translations/zh_CN/process/management-style.rst <cn_managementstyle>`
+
描述内核维护者的工作方法及其共有特点。这对于刚刚接触内核开发(或者对
它感到好奇)的人来说很重要,因为它解释了很多对于内核维护者独特行为的
普遍误解与迷惑。
- Documentation/process/stable-kernel-rules.rst
+ :ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
解释了稳定版内核发布的规则,以及如何将改动放入这些版本的步骤。
- Documentation/process/kernel-docs.rst
+ :ref:`Documentation/process/kernel-docs.rst <kernel_docs>`
有助于内核开发的外部文档列表。如果你在内核自带的文档中没有找到你想找
的内容,可以查看这些文档。
- Documentation/process/applying-patches.rst
+ :ref:`Documentation/process/applying-patches.rst <applying_patches>`
关于补丁是什么以及如何将它打在不同内核开发分支上的好介绍
内核还拥有大量从代码自动生成的文档。它包含内核内部API的全面介绍以及如何
妥善处理加锁的规则。生成的文档会放在 Documentation/DocBook/目录下。在内
核源码的主目录中使用以下不同命令将会分别生成PDF、Postscript、HTML和手册
-页等不同格式的文档:
+页等不同格式的文档::
+
make pdfdocs
make htmldocs
@@ -155,7 +158,9 @@ Linux内核代码中包含有大量的文档。这些文档对于学习如何与
如何成为内核开发者
------------------
如果你对Linux内核开发一无所知,你应该访问“Linux内核新手”计划:
+
http://kernelnewbies.org
+
它拥有一个可以问各种最基本的内核开发问题的邮件列表(在提问之前一定要记得
查找已往的邮件,确认是否有人已经回答过相同的问题)。它还拥有一个可以获得
实时反馈的IRC聊天频道,以及大量对于学习Linux内核开发相当有帮助的文档。
@@ -166,23 +171,21 @@ Linux内核代码中包含有大量的文档。这些文档对于学习如何与
如果你想加入内核开发社区并协助完成一些任务,却找不到从哪里开始,可以访问
“Linux内核房管员”计划:
+
http://kernelnewbies.org/KernelJanitors
+
这是极佳的起点。它提供一个相对简单的任务列表,列出内核代码中需要被重新
整理或者改正的地方。通过和负责这个计划的开发者们一同工作,你会学到将补丁
集成进内核的基本原理。如果还没有决定下一步要做什么的话,你还可能会得到方
向性的指点。
-如果你已经有一些现成的代码想要放到内核中,但是需要一些帮助来使它们拥有正
-确的格式。请访问“内核导师”计划。这个计划就是用来帮助你完成这个目标的。它
-是一个邮件列表,地址如下:
- http://selenic.com/mailman/listinfo/kernel-mentors
-
在真正动手修改内核代码之前,理解要修改的代码如何运作是必需的。要达到这个
目的,没什么办法比直接读代码更有效了(大多数花招都会有相应的注释),而且
一些特制的工具还可以提供帮助。例如,“Linux代码交叉引用”项目就是一个值得
特别推荐的帮助工具,它将源代码显示在有编目和索引的网页上。其中一个更新及
时的内核源码库,可以通过以下地址访问:
- http://sosdg.org/~coywolf/lxr/
+
+ https://elixir.bootlin.com/
开发流程
@@ -190,22 +193,23 @@ Linux内核代码中包含有大量的文档。这些文档对于学习如何与
目前Linux内核开发流程包括几个“主内核分支”和很多子系统相关的内核分支。这
些分支包括:
- - 2.6.x主内核源码树
- - 2.6.x.y -stable内核源码树
- - 2.6.x -mm内核补丁集
- - 子系统相关的内核源码树和补丁集
+ - Linus 的内核源码树
+ - 多个主要版本的稳定版内核树
+ - 子系统相关的内核树
+ - linux-next 集成测试树
+
+
+主线树
+------
+主线树是由Linus Torvalds 维护的。你可以在https://kernel.org 网站或者代码
+库中下找到它。它的开发遵循以下步骤:
-2.6.x内核主源码树
------------------
-2.6.x内核是由Linus Torvalds(Linux的创造者)亲自维护的。你可以在
-kernel.org网站的pub/linux/kernel/v2.6/目录下找到它。它的开发遵循以下步
-骤:
- 每当一个新版本的内核被发布,为期两周的集成窗口将被打开。在这段时间里
维护者可以向Linus提交大段的修改,通常这些修改已经被放到-mm内核中几个
星期了。提交大量修改的首选方式是使用git工具(内核的代码版本管理工具
- ,更多的信息可以在http://git-scm.com/获取),不过使用普通补丁也是可以
- 的。
+ ,更多的信息可以在 http://git-scm.com/ 获取),不过使用普通补丁也是
+ 可以的。
- 两个星期以后-rc1版本内核发布。之后只有不包含可能影响整个内核稳定性的
新功能的补丁才可能被接受。请注意一个全新的驱动程序(或者文件系统)有
可能在-rc1后被接受是因为这样的修改完全独立,不会影响其他的代码,所以
@@ -220,106 +224,61 @@ kernel.org网站的pub/linux/kernel/v2.6/目录下找到它。它的开发遵循
“没有人知道新内核何时会被发布,因为发布是根据已知bug的情况来决定
的,而不是根据一个事先制定好的时间表。”
+子系统特定树
+------------
-2.6.x.y -stable(稳定版)内核源码树
------------------------------------
-由4个数字组成的内核版本号说明此内核是-stable版本。它们包含基于2.6.x版本
-内核的相对较小且至关重要的修补,这些修补针对安全性问题或者严重的内核退步。
-
-这种版本的内核适用于那些期望获得最新的稳定版内核并且不想参与测试开发版或
-者实验版的用户。
-
-如果没有2.6.x.y版本内核存在,那么最新的2.6.x版本内核就相当于是当前的稳定
-版内核。
-
-2.6.x.y版本由“稳定版”小组(邮件地址<stable@vger.kernel.org>)维护,一般隔周发
-布新版本。
-
-内核源码中的Documentation/process/stable-kernel-rules.rst文件具体描述了可被稳定
-版内核接受的修改类型以及发布的流程。
+各种内核子系统的维护者——以及许多内核子系统开发人员——在源代码库中公开了他们
+当前的开发状态。这样,其他人就可以看到内核的不同区域发生了什么。在开发速度
+很快的领域,可能会要求开发人员将提交的内容建立在这样的子系统内核树上,这样
+就避免了提交与其他已经进行的工作之间的冲突。
+这些存储库中的大多数都是Git树,但是也有其他的scm在使用,或者补丁队列被发布
+为Quilt系列。这些子系统存储库的地址列在MAINTAINERS文件中。其中许多可以在
+https://git.kernel.org/上浏览。
-2.6.x -mm补丁集
----------------
-这是由Andrew Morton维护的试验性内核补丁集。Andrew将所有子系统的内核源码
-和补丁拼凑到一起,并且加入了大量从linux-kernel邮件列表中采集的补丁。这个
-源码树是新功能和补丁的试炼场。当补丁在-mm补丁集里证明了其价值以后Andrew
-或者相应子系统的维护者会将补丁发给Linus以便集成进主内核源码树。
+在将一个建议的补丁提交到这样的子系统树之前,需要对它进行审查,审查主要发生
+在邮件列表上(请参见下面相应的部分)。对于几个内核子系统,这个审查过程是通
+过工具补丁跟踪的。Patchwork提供了一个Web界面,显示补丁发布、对补丁的任何评
+论或修订,维护人员可以将补丁标记为正在审查、接受或拒绝。大多数补丁网站都列
+在 https://patchwork.kernel.org/
-在将所有新补丁发给Linus以集成到主内核源码树之前,我们非常鼓励先把这些补
-丁放在-mm版内核源码树中进行测试。
-
-这些内核版本不适合在需要稳定运行的系统上运行,因为运行它们比运行任何其他
-内核分支都更具有风险。
-
-如果你想为内核开发进程提供帮助,请尝试并使用这些内核版本,并在
-linux-kernel邮件列表中提供反馈,告诉大家你遇到了问题还是一切正常。
-
-通常-mm版补丁集不光包括这些额外的试验性补丁,还包括发布时-git版主源码树
-中的改动。
-
--mm版内核没有固定的发布周期,但是通常在每两个-rc版内核发布之间都会有若干
-个-mm版内核发布(一般是1至3个)。
-
-
-子系统相关内核源码树和补丁集
-----------------------------
-相当一部分内核子系统开发者会公开他们自己的开发源码树,以便其他人能了解内
-核的不同领域正在发生的事情。如上所述,这些源码树会被集成到-mm版本内核中。
-
-下面是目前可用的一些内核源码树的列表:
- 通过git管理的源码树:
- - Kbuild开发源码树, Sam Ravnborg <sam@ravnborg.org>
- git.kernel.org:/pub/scm/linux/kernel/git/sam/kbuild.git
-
- - ACPI开发源码树, Len Brown <len.brown@intel.com>
- git.kernel.org:/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git
-
- - 块设备开发源码树, Jens Axboe <axboe@suse.de>
- git.kernel.org:/pub/scm/linux/kernel/git/axboe/linux-2.6-block.git
-
- - DRM开发源码树, Dave Airlie <airlied@linux.ie>
- git.kernel.org:/pub/scm/linux/kernel/git/airlied/drm-2.6.git
-
- - ia64开发源码树, Tony Luck <tony.luck@intel.com>
- git.kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6.git
+Linux-next 集成测试树
+---------------------
- - ieee1394开发源码树, Jody McIntyre <scjody@modernduck.com>
- git.kernel.org:/pub/scm/linux/kernel/git/scjody/ieee1394.git
+在将子系统树的更新合并到主线树之前,需要对它们进行集成测试。为此,存在一个
+特殊的测试存储库,其中几乎每天都会提取所有子系统树:
- - infiniband开发源码树, Roland Dreier <rolandd@cisco.com>
- git.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband.git
+ https://git.kernel.org/?p=linux/kernel/git/next/linux-next.git
- - libata开发源码树, Jeff Garzik <jgarzik@pobox.com>
- git.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev.git
+通过这种方式,Linux-next 对下一个合并阶段将进入主线内核的内容给出了一个概要
+展望。非常欢冒险的测试者运行测试Linux-next。
- - 网络驱动程序开发源码树, Jeff Garzik <jgarzik@pobox.com>
- git.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6.git
+多个主要版本的稳定版内核树
+-----------------------------------
+由3个数字组成的内核版本号说明此内核是-stable版本。它们包含内核的相对较小且
+至关重要的修补,这些修补针对安全性问题或者严重的内核退步。
- - pcmcia开发源码树, Dominik Brodowski <linux@dominikbrodowski.net>
- git.kernel.org:/pub/scm/linux/kernel/git/brodo/pcmcia-2.6.git
+这种版本的内核适用于那些期望获得最新的稳定版内核并且不想参与测试开发版或
+者实验版的用户。
- - SCSI开发源码树, James Bottomley <James.Bottomley@SteelEye.com>
- git.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git
+稳定版内核树版本由“稳定版”小组(邮件地址<stable@vger.kernel.org>)维护,一般
+隔周发布新版本。
- 使用quilt管理的补丁集:
- - USB, PCI, 驱动程序核心和I2C, Greg Kroah-Hartman <gregkh@linuxfoundation.org>
- kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
- - x86-64, 部分i386, Andi Kleen <ak@suse.de>
- ftp.firstfloor.org:/pub/ak/x86_64/quilt/
+内核源码中的 :ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
+文件具体描述了可被稳定版内核接受的修改类型以及发布的流程。
- 其他内核源码树可以在http://git.kernel.org的列表中和MAINTAINERS文件里
- 找到。
报告bug
-------
bugzilla.kernel.org是Linux内核开发者们用来跟踪内核Bug的网站。我们鼓励用
户在这个工具中报告找到的所有bug。如何使用内核bugzilla的细节请访问:
+
http://test.kernel.org/bugzilla/faq.html
-内核源码主目录中的admin-guide/reporting-bugs.rst文件里有一个很好的模板。它指导用户如何报
-告可能的内核bug以及需要提供哪些信息来帮助内核开发者们找到问题的根源。
+内核源码主目录中的:ref:`admin-guide/reporting-bugs.rst <reportingbugs>`
+文件里有一个很好的模板。它指导用户如何报告可能的内核bug以及需要提供哪些信息
+来帮助内核开发者们找到问题的根源。
利用bug报告
@@ -330,12 +289,7 @@ bugzilla.kernel.org是Linux内核开发者们用来跟踪内核Bug的网站。
者感受到你的存在。修改bug是赢得其他开发者赞誉的最好办法,因为并不是很多
人都喜欢浪费时间去修改别人报告的bug。
-要尝试修改已知的bug,请访问http://bugzilla.kernel.org网址。如果你想获得
-最新bug的通知,可以订阅bugme-new邮件列表(只有新的bug报告会被寄到这里)
-或者订阅bugme-janitor邮件列表(所有bugzilla的变动都会被寄到这里)。
-
- https://lists.linux-foundation.org/mailman/listinfo/bugme-new
- https://lists.linux-foundation.org/mailman/listinfo/bugme-janitors
+要尝试修改已知的bug,请访问 http://bugzilla.kernel.org 网址。
邮件列表
@@ -343,10 +297,14 @@ bugzilla.kernel.org是Linux内核开发者们用来跟踪内核Bug的网站。
正如上面的文档所描述,大多数的骨干内核开发者都加入了Linux Kernel邮件列
表。如何订阅和退订列表的细节可以在这里找到:
+
http://vger.kernel.org/vger-lists.html#linux-kernel
+
网上很多地方都有这个邮件列表的存档(archive)。可以使用搜索引擎来找到这些
存档。比如:
+
http://dir.gmane.org/gmane.linux.kernel
+
在发信之前,我们强烈建议你先在存档中搜索你想要讨论的问题。很多已经被详细
讨论过的问题只在邮件列表的存档中可以找到。
@@ -354,10 +312,12 @@ bugzilla.kernel.org是Linux内核开发者们用来跟踪内核Bug的网站。
MAINTAINERS文件中可以找到不同话题对应的邮件列表。
很多邮件列表架设在kernel.org服务器上。这些列表的信息可以在这里找到:
+
http://vger.kernel.org/vger-lists.html
在使用这些邮件列表时,请记住保持良好的行为习惯。下面的链接提供了与这些列
表(或任何其它邮件列表)交流的一些简单规则,虽然内容有点滥竽充数。
+
http://www.albion.com/netiquette/
当有很多人回复你的邮件时,邮件的抄送列表会变得很长。请不要将任何人从抄送
@@ -369,11 +329,12 @@ MAINTAINERS文件中可以找到不同话题对应的邮件列表。
这几行。将你的评论加在被引用的段落之间而不要放在邮件的顶部。
如果你在邮件中附带补丁,请确认它们是可以直接阅读的纯文本(如
-Documentation/process/submitting-patches.rst文档中所述)。内核开发者们不希望遇到附件
-或者被压缩了的补丁。只有这样才能保证他们可以直接评论你的每行代码。请确保
-你使用的邮件发送程序不会修改空格和制表符。一个防范性的测试方法是先将邮件
-发送给自己,然后自己尝试是否可以顺利地打上收到的补丁。如果测试不成功,请
-调整或者更换你的邮件发送程序直到它正确工作为止。
+:ref:`Documentation/translations/zh_CN/process/submitting-patches.rst <cn_submittingpatches>`
+文档中所述)。内核开发者们不希望遇到附件或者被压缩了的补丁。只有这样才能
+保证他们可以直接评论你的每行代码。请确保你使用的邮件发送程序不会修改空格
+和制表符。一个防范性的测试方法是先将邮件发送给自己,然后自己尝试是否可以
+顺利地打上收到的补丁。如果测试不成功,请调整或者更换你的邮件发送程序直到
+它正确工作为止。
总而言之,请尊重其他的邮件列表订阅者。
@@ -383,6 +344,7 @@ Documentation/process/submitting-patches.rst文档中所述)。内核开发者
内核社区的目标就是提供尽善尽美的内核。所以当你提交补丁期望被接受进内核的
时候,它的技术价值以及其他方面都将被评审。那么你可能会得到什么呢?
+
- 批评
- 评论
- 要求修改
@@ -395,6 +357,7 @@ Documentation/process/submitting-patches.rst文档中所述)。内核开发者
没在茫茫信海中。
你不应该做的事情:
+
- 期望自己的补丁不受任何质疑就直接被接受
- 翻脸
- 忽略别人的评论
@@ -414,7 +377,8 @@ Documentation/process/submitting-patches.rst文档中所述)。内核开发者
内核社区的工作模式同大多数传统公司开发队伍的工作模式并不相同。下面这些例
子,可以帮助你避免某些可能发生问题:
- 用这些话介绍你的修改提案会有好处:
+用这些话介绍你的修改提案会有好处:
+
- 它同时解决了多个问题
- 它删除了2000行代码
- 这是补丁,它已经解释了我想要说明的
@@ -422,7 +386,8 @@ Documentation/process/submitting-patches.rst文档中所述)。内核开发者
- 这是一系列小补丁用来……
- 这个修改提高了普通机器的性能……
- 应该避免如下的说法:
+应该避免如下的说法:
+
- 我们在AIX/ptx/Solaris就是这么做的,所以这么做肯定是好的……
- 我做这行已经20年了,所以……
- 为了我们公司赚钱考虑必须这么做
@@ -495,6 +460,7 @@ Linux内核社区并不喜欢一下接收大段的代码。修改需要被恰当
当你发送补丁的时候,需要特别留意邮件正文的内容。因为这里的信息将会做为补
丁的修改记录(ChangeLog),会被一直保留以备大家查阅。它需要完全地描述补丁,
包括:
+
- 为什么需要这个修改
- 补丁的总体设计
- 实现细节
@@ -510,7 +476,8 @@ Linux内核社区并不喜欢一下接收大段的代码。修改需要被恰当
很多人已经做到了,而他们都曾经和现在的你站在同样的起点上。
----------------
+感谢
+----
感谢Paolo Ciarrocchi允许“开发流程”部分基于他所写的文章
(http://www.kerneltravel.net/newbie/2.6-development_process),感谢Randy
Dunlap和Gerrit Huizenga完善了应该说和不该说的列表。感谢Pat Mochel, Hanna
diff --git a/Documentation/translations/zh_CN/process/index.rst b/Documentation/translations/zh_CN/process/index.rst
new file mode 100644
index 000000000000..be1e764a80d2
--- /dev/null
+++ b/Documentation/translations/zh_CN/process/index.rst
@@ -0,0 +1,60 @@
+.. raw:: latex
+
+ \renewcommand\thesection*
+ \renewcommand\thesubsection*
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :ref:`Documentation/process/index.rst <process_index>`
+:Translator: Alex Shi <alex.shi@linux.alibaba.com>
+
+.. _cn_process_index:
+
+与Linux 内核社区一起工作
+========================
+
+那么你想成为Linux内核开发人员? 欢迎! 不但从技术意义上讲有很多关于内核的知识
+需要学,而且了解我们社区的工作方式也很重要。 阅读这些文章可以让您以更轻松地,
+麻烦最少的方式将更改合并到内核。
+
+以下是每位开发人员应阅读的基本指南。
+
+.. toctree::
+ :maxdepth: 1
+
+ howto
+ code-of-conduct
+ code-of-conduct-interpretation
+ submitting-patches
+ programming-language
+ coding-style
+ development-process
+ email-clients
+ license-rules
+
+其它大多数开发人员感兴趣的社区指南:
+
+
+.. toctree::
+ :maxdepth: 1
+
+ submitting-drivers
+ submit-checklist
+ stable-api-nonsense
+ stable-kernel-rules
+ management-style
+
+这些是一些总体技术指南,由于缺乏更好的地方,现在已经放在这里
+
+.. toctree::
+ :maxdepth: 1
+
+ magic-number
+ volatile-considered-harmful
+
+.. only:: subproject and html
+
+ 目录
+ ====
+
+ * :ref:`genindex`
diff --git a/Documentation/translations/zh_CN/process/license-rules.rst b/Documentation/translations/zh_CN/process/license-rules.rst
new file mode 100644
index 000000000000..30c272b2a292
--- /dev/null
+++ b/Documentation/translations/zh_CN/process/license-rules.rst
@@ -0,0 +1,370 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :ref:`Documentation/process/license-rules.rst <kernel_licensing>`
+:Translator: Alex Shi <alex.shi@linux.alibaba.com>
+
+.. _cn_kernel_licensing:
+
+Linux内核许可规则
+=================
+
+Linux内核根据LICENSES/preferred/GPL-2.0中提供的GNU通用公共许可证版本2
+(GPL-2.0)的条款提供,并在LICENSES/exceptions/Linux-syscall-note中显式
+描述了例外的系统调用,如COPYING文件中所述。
+
+此文档文件提供了如何对每个源文件进行注释以使其许可证清晰明确的说明。
+它不会取代内核的许可证。
+
+内核源代码作为一个整体适用于COPYING文件中描述的许可证,但是单个源文件可以
+具有不同的与GPL-20兼容的许可证::
+
+ GPL-1.0+ : GNU通用公共许可证v1.0或更高版本
+ GPL-2.0+ : GNU通用公共许可证v2.0或更高版本
+ LGPL-2.0 : 仅限GNU库通用公共许可证v2
+ LGPL-2.0+: GNU 库通用公共许可证v2或更高版本
+ LGPL-2.1 : 仅限GNU宽通用公共许可证v2.1
+ LGPL-2.1+: GNU宽通用公共许可证v2.1或更高版本
+
+除此之外,个人文件可以在双重许可下提供,例如一个兼容的GPL变体,或者BSD,
+MIT等许可。
+
+用户空间API(UAPI)头文件描述了用户空间程序与内核的接口,这是一种特殊情况。
+根据内核COPYING文件中的注释,syscall接口是一个明确的边界,它不会将GPL要求
+扩展到任何使用它与内核通信的软件。由于UAPI头文件必须包含在创建在Linux内核
+上运行的可执行文件的任何源文件中,因此此例外必须记录在特别的许可证表述中。
+
+表达源文件许可证的常用方法是将匹配的样板文本添加到文件的顶部注释中。由于
+格式,拼写错误等,这些“样板”很难通过那些在上下文中使用的验证许可证合规性
+的工具。
+
+样板文本的替代方法是在每个源文件中使用软件包数据交换(SPDX)许可证标识符。
+SPDX许可证标识符是机器可解析的,并且是用于提供文件内容的许可证的精确缩写。
+SPDX许可证标识符由Linux 基金会的SPDX 工作组管理,并得到了整个行业,工具
+供应商和法律团队的合作伙伴的一致同意。有关详细信息,请参阅
+https://spdx.org/
+
+Linux内核需要所有源文件中的精确SPDX标识符。内核中使用的有效标识符在
+`许可标识符`_ 一节中进行了解释,并且已可以在
+https://spdx.org/licenses/ 上的官方SPDX许可证列表中检索,并附带许可证
+文本。
+
+许可标识符语法
+--------------
+
+1.安置:
+
+   内核文件中的SPDX许可证标识符应添加到可包含注释的文件中的第一行。对于大多
+ 数文件,这是第一行,除了那些在第一行中需要'#!PATH_TO_INTERPRETER'的脚本。
+ 对于这些脚本,SPDX标识符进入第二行。
+
+|
+
+2. 风格:
+
+ SPDX许可证标识符以注释的形式添加。注释样式取决于文件类型::
+
+ C source: // SPDX-License-Identifier: <SPDX License Expression>
+ C header: /* SPDX-License-Identifier: <SPDX License Expression> */
+ ASM: /* SPDX-License-Identifier: <SPDX License Expression> */
+ scripts: # SPDX-License-Identifier: <SPDX License Expression>
+ .rst: .. SPDX-License-Identifier: <SPDX License Expression>
+ .dts{i}: // SPDX-License-Identifier: <SPDX License Expression>
+
+ 如果特定工具无法处理标准注释样式,则应使用工具接受的相应注释机制。这是在
+ C 头文件中使用“/\*\*/”样式注释的原因。过去在使用生成的.lds文件中观察到
+ 构建被破坏,其中'ld'无法解析C++注释。现在已经解决了这个问题,但仍然有较
+ 旧的汇编程序工具无法处理C++样式的注释。
+
+|
+
+3. 句法:
+
+ <SPDX许可证表达式>是SPDX许可证列表中的SPDX短格式许可证标识符,或者在许可
+ 证例外适用时由“WITH”分隔的两个SPDX短格式许可证标识符的组合。当应用多个许
+ 可证时,表达式由分隔子表达式的关键字“AND”,“OR”组成,并由“(”,“)”包围。
+
+ 带有“或更高”选项的[L]GPL等许可证的许可证标识符通过使用“+”来表示“或更高”
+ 选项来构建。::
+
+ // SPDX-License-Identifier: GPL-2.0+
+ // SPDX-License-Identifier: LGPL-2.1+
+
+ 当需要修正的许可证时,应使用WITH。 例如,linux内核UAPI文件使用表达式::
+
+ // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+ // SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note
+
+ 其它在内核中使用WITH例外的事例如下::
+
+ // SPDX-License-Identifier: GPL-2.0 WITH mif-exception
+ // SPDX-License-Identifier: GPL-2.0+ WITH GCC-exception-2.0
+
+ 例外只能与特定的许可证标识符一起使用。有效的许可证标识符列在异常文本文件
+ 的标记中。有关详细信息,请参阅 `许可标识符`_ 一章中的 `例外`_ 。
+
+ 如果文件是双重许可且只选择一个许可证,则应使用OR。例如,一些dtsi文件在双
+ 许可下可用::
+
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+ 内核中双许可文件中许可表达式的示例::
+
+ // SPDX-License-Identifier: GPL-2.0 OR MIT
+ // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+ // SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+ // SPDX-License-Identifier: GPL-2.0 OR MPL-1.1
+ // SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT
+ // SPDX-License-Identifier: GPL-1.0+ OR BSD-3-Clause OR OpenSSL
+
+ 如果文件具有多个许可证,其条款全部适用于使用该文件,则应使用AND。例如,
+ 如果代码是从另一个项目继承的,并且已经授予了将其放入内核的权限,但原始
+ 许可条款需要保持有效::
+
+ // SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) AND MIT
+
+ 另一个需要遵守两套许可条款的例子是::
+
+ // SPDX-License-Identifier: GPL-1.0+ AND LGPL-2.1+
+
+许可标识符
+----------
+
+当前使用的许可证以及添加到内核的代码许可证可以分解为:
+
+1. _`优先许可`:
+
+ 应尽可能使用这些许可证,因为它们已知完全兼容并广泛使用。这些许可证在内核
+ 目录::
+
+ LICENSES/preferred/
+
+ 此目录中的文件包含完整的许可证文本和 `元标记`_ 。文件名与SPDX许可证标识
+ 符相同,后者应用于源文件中的许可证。
+
+ 例如::
+
+ LICENSES/preferred/GPL-2.0
+
+ 包含GPLv2许可证文本和所需的元标签::
+
+ LICENSES/preferred/MIT
+
+ 包含MIT许可证文本和所需的元标记
+
+ _`元标记`:
+
+ 许可证文件中必须包含以下元标记:
+
+ - Valid-License-Identifier:
+
+   一行或多行, 声明那些许可标识符在项目内有效, 以引用此特定许可的文本。通
+ 常这是一个有效的标识符,但是例如对于带有'或更高'选项的许可证,两个标识
+ 符都有效。
+
+ - SPDX-URL:
+
+ SPDX页面的URL,其中包含与许可证相关的其他信息.
+
+ - Usage-Guidance:
+
+ 使用建议的自由格式文本。该文本必须包含SPDX许可证标识符的正确示例,因为
+ 它们应根据 `许可标识符语法`_ 指南放入源文件中。
+
+ - License-Text:
+
+ 此标记之后的所有文本都被视为原始许可文本
+
+ 文件格式示例::
+
+ Valid-License-Identifier: GPL-2.0
+ Valid-License-Identifier: GPL-2.0+
+ SPDX-URL: https://spdx.org/licenses/GPL-2.0.html
+ Usage-Guide:
+ To use this license in source code, put one of the following SPDX
+ tag/value pairs into a comment according to the placement
+ guidelines in the licensing rules documentation.
+ For 'GNU General Public License (GPL) version 2 only' use:
+ SPDX-License-Identifier: GPL-2.0
+ For 'GNU General Public License (GPL) version 2 or any later version' use:
+ SPDX-License-Identifier: GPL-2.0+
+ License-Text:
+ Full license text
+
+ ::
+
+ SPDX-License-Identifier: MIT
+ SPDX-URL: https://spdx.org/licenses/MIT.html
+ Usage-Guide:
+ To use this license in source code, put the following SPDX
+ tag/value pair into a comment according to the placement
+ guidelines in the licensing rules documentation.
+ SPDX-License-Identifier: MIT
+ License-Text:
+ Full license text
+
+|
+
+2. 不推荐的许可证:
+
+ 这些许可证只应用于现有代码或从其他项目导入代码。这些许可证在内核目录::
+
+ LICENSES/other/
+
+ 此目录中的文件包含完整的许可证文本和 `元标记`_ 。文件名与SPDX许可证标识
+ 符相同,后者应用于源文件中的许可证。
+
+ 例如::
+
+ LICENSES/other/ISC
+
+ 包含国际系统联合许可文本和所需的元标签::
+
+ LICENSES/other/ZLib
+
+ 包含ZLIB许可文本和所需的元标签.
+
+ 元标签:
+
+ “其他”许可证的元标签要求与 `优先许可`_ 的要求相同。
+
+ 文件格式示例::
+
+ Valid-License-Identifier: ISC
+ SPDX-URL: https://spdx.org/licenses/ISC.html
+ Usage-Guide:
+ Usage of this license in the kernel for new code is discouraged
+ and it should solely be used for importing code from an already
+ existing project.
+ To use this license in source code, put the following SPDX
+ tag/value pair into a comment according to the placement
+ guidelines in the licensing rules documentation.
+ SPDX-License-Identifier: ISC
+ License-Text:
+ Full license text
+
+|
+
+3. _`例外`:
+
+ 某些许可证可以修改,并允许原始许可证不具有的某些例外权利。这些例外在
+ 内核目录::
+
+ LICENSES/exceptions/
+
+ 此目录中的文件包含完整的例外文本和所需的 `例外元标记`_ 。
+
+ 例如::
+
+ LICENSES/exceptions/Linux-syscall-note
+
+ 包含Linux内核的COPYING文件中记录的Linux系统调用例外,该文件用于UAPI
+ 头文件。例如::
+
+ LICENSES/exceptions/GCC-exception-2.0
+
+ 包含GCC'链接例外',它允许独立于其许可证的任何二进制文件与标记有此例外的
+ 文件的编译版本链接。这是从GPL不兼容源代码创建可运行的可执行文件所必需的。
+
+ _`例外元标记`:
+
+ 以下元标记必须在例外文件中可用:
+
+ - SPDX-Exception-Identifier:
+
+   一个可与SPDX许可证标识符一起使用的例外标识符。
+
+ - SPDX-URL:
+
+ SPDX页面的URL,其中包含与例外相关的其他信息。
+
+ - SPDX-Licenses:
+
+   以逗号分隔的例外可用的SPDX许可证标识符列表。
+
+ - Usage-Guidance:
+
+ 使用建议的自由格式文本。必须在文本后面加上SPDX许可证标识符的正确示例,
+ 因为它们应根据 `许可标识符语法`_ 指南放入源文件中。
+
+ - Exception-Text:
+
+ 此标记之后的所有文本都被视为原始异常文本
+
+ 文件格式示例::
+
+ SPDX-Exception-Identifier: Linux-syscall-note
+ SPDX-URL: https://spdx.org/licenses/Linux-syscall-note.html
+ SPDX-Licenses: GPL-2.0, GPL-2.0+, GPL-1.0+, LGPL-2.0, LGPL-2.0+, LGPL-2.1, LGPL-2.1+
+ Usage-Guidance:
+ This exception is used together with one of the above SPDX-Licenses
+ to mark user-space API (uapi) header files so they can be included
+ into non GPL compliant user-space application code.
+ To use this exception add it with the keyword WITH to one of the
+ identifiers in the SPDX-Licenses tag:
+ SPDX-License-Identifier: <SPDX-License> WITH Linux-syscall-note
+ Exception-Text:
+ Full exception text
+
+ ::
+
+ SPDX-Exception-Identifier: GCC-exception-2.0
+ SPDX-URL: https://spdx.org/licenses/GCC-exception-2.0.html
+ SPDX-Licenses: GPL-2.0, GPL-2.0+
+ Usage-Guidance:
+ The "GCC Runtime Library exception 2.0" is used together with one
+ of the above SPDX-Licenses for code imported from the GCC runtime
+ library.
+ To use this exception add it with the keyword WITH to one of the
+ identifiers in the SPDX-Licenses tag:
+ SPDX-License-Identifier: <SPDX-License> WITH GCC-exception-2.0
+ Exception-Text:
+ Full exception text
+
+
+所有SPDX许可证标识符和例外都必须在LICENSES子目录中具有相应的文件。这是允许
+工具验证(例如checkpatch.pl)以及准备好从源读取和提取许可证所必需的, 这是
+各种FOSS组织推荐的,例如 `FSFE REUSE initiative <https://reuse.software/>`_.
+
+_`模块许可`
+-----------------
+
+ 可加载内核模块还需要MODULE_LICENSE()标记。此标记既不替代正确的源代码
+ 许可证信息(SPDX-License-Identifier),也不以任何方式表示或确定提供模块
+ 源代码的确切许可证。
+
+ 此标记的唯一目的是提供足够的信息,该模块是否是自由软件或者是内核模块加
+ 载器和用户空间工具的专有模块。
+
+ MODULE_LICENSE()的有效许可证字符串是:
+
+ ============================= =============================================
+ "GPL" 模块是根据GPL版本2许可的。这并不表示仅限于
+ GPL-2.0或GPL-2.0或更高版本之间的任何区别。
+ 最正确许可证信息只能通过相应源文件中的许可证
+ 信息来确定
+
+ "GPL v2" 和"GPL"相同,它的存在是因为历史原因。
+
+ "GPL and additional rights" 表示模块源在GPL v2变体和MIT许可下双重许可的
+ 历史变体。请不要在新代码中使用。
+
+ "Dual MIT/GPL" 表达该模块在GPL v2变体或MIT许可证选择下双重
+ 许可的正确方式。
+
+ "Dual BSD/GPL" 该模块根据GPL v2变体或BSD许可证选择进行双重
+ 许可。 BSD许可证的确切变体只能通过相应源文件
+ 中的许可证信息来确定。
+
+ "Dual MPL/GPL" 该模块根据GPL v2变体或Mozilla Public License
+ (MPL)选项进行双重许可。 MPL许可证的确切变体
+ 只能通过相应的源文件中的许可证信息来确定。
+
+ "Proprietary" 该模块属于专有许可。此字符串仅用于专有的第三
+ 方模块,不能用于在内核树中具有源代码的模块。
+ 以这种方式标记的模块在加载时会使用'P'标记污
+ 染内核,并且内核模块加载器拒绝将这些模块链接
+ 到使用EXPORT_SYMBOL_GPL()导出的符号。
+ ============================= =============================================
+
diff --git a/Documentation/translations/zh_CN/process/magic-number.rst b/Documentation/translations/zh_CN/process/magic-number.rst
new file mode 100644
index 000000000000..15c592518194
--- /dev/null
+++ b/Documentation/translations/zh_CN/process/magic-number.rst
@@ -0,0 +1,151 @@
+.. _cn_magicnumbers:
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :ref:`Documentation/process/magic-number.rst <magicnumbers>`
+
+如果想评论或更新本文的内容,请直接发信到LKML。如果你使用英文交流有困难的话,也可
+以向中文版维护者求助。如果本翻译更新不及时或者翻译存在问题,请联系中文版维护者::
+
+ 中文版维护者: 贾威威 Jia Wei Wei <harryxiyou@gmail.com>
+ 中文版翻译者: 贾威威 Jia Wei Wei <harryxiyou@gmail.com>
+ 中文版校译者: 贾威威 Jia Wei Wei <harryxiyou@gmail.com>
+
+Linux 魔术数
+============
+
+这个文件是有关当前使用的魔术值注册表。当你给一个结构添加了一个魔术值,你也应该把这个魔术值添加到这个文件,因为我们最好把用于各种结构的魔术值统一起来。
+
+使用魔术值来保护内核数据结构是一个非常好的主意。这就允许你在运行期检查(a)一个结构是否已经被攻击,或者(b)你已经给一个例行程序通过了一个错误的结构。后一种情况特别地有用---特别是当你通过一个空指针指向结构体的时候。tty源码,例如,经常通过特定驱动使用这种方法并且反复地排列特定方面的结构。
+
+使用魔术值的方法是在结构的开始处声明的,如下::
+
+ struct tty_ldisc {
+ int magic;
+ ...
+ };
+
+当你以后给内核添加增强功能的时候,请遵守这条规则!这样就会节省数不清的调试时间,特别是一些古怪的情况,例如,数组超出范围并且重新写了超出部分。遵守这个规则,‪这些情况可以被快速地,安全地避免。
+
+ Theodore Ts'o
+ 31 Mar 94
+
+给当前的Linux 2.1.55添加魔术表。
+
+ Michael Chastain
+ <mailto:mec@shout.net>
+ 22 Sep 1997
+
+现在应该最新的Linux 2.1.112.因为在特性冻结期间,不能在2.2.x前改变任何东西。这些条目被数域所排序。
+
+ Krzysztof G.Baranowski
+ <mailto: kgb@knm.org.pl>
+ 29 Jul 1998
+
+更新魔术表到Linux 2.5.45。刚好越过特性冻结,但是有可能还会有一些新的魔术值在2.6.x之前融入到内核中。
+
+ Petr Baudis
+ <pasky@ucw.cz>
+ 03 Nov 2002
+
+更新魔术表到Linux 2.5.74。
+
+ Fabian Frederick
+ <ffrederick@users.sourceforge.net>
+ 09 Jul 2003
+
+===================== ================ ======================== ==========================================
+魔术数名 数字 结构 文件
+===================== ================ ======================== ==========================================
+PG_MAGIC 'P' pg_{read,write}_hdr ``include/linux/pg.h``
+CMAGIC 0x0111 user ``include/linux/a.out.h``
+MKISS_DRIVER_MAGIC 0x04bf mkiss_channel ``drivers/net/mkiss.h``
+HDLC_MAGIC 0x239e n_hdlc ``drivers/char/n_hdlc.c``
+APM_BIOS_MAGIC 0x4101 apm_user ``arch/x86/kernel/apm_32.c``
+CYCLADES_MAGIC 0x4359 cyclades_port ``include/linux/cyclades.h``
+DB_MAGIC 0x4442 fc_info ``drivers/net/iph5526_novram.c``
+DL_MAGIC 0x444d fc_info ``drivers/net/iph5526_novram.c``
+FASYNC_MAGIC 0x4601 fasync_struct ``include/linux/fs.h``
+FF_MAGIC 0x4646 fc_info ``drivers/net/iph5526_novram.c``
+ISICOM_MAGIC 0x4d54 isi_port ``include/linux/isicom.h``
+PTY_MAGIC 0x5001 ``drivers/char/pty.c``
+PPP_MAGIC 0x5002 ppp ``include/linux/if_pppvar.h``
+SERIAL_MAGIC 0x5301 async_struct ``include/linux/serial.h``
+SSTATE_MAGIC 0x5302 serial_state ``include/linux/serial.h``
+SLIP_MAGIC 0x5302 slip ``drivers/net/slip.h``
+STRIP_MAGIC 0x5303 strip ``drivers/net/strip.c``
+X25_ASY_MAGIC 0x5303 x25_asy ``drivers/net/x25_asy.h``
+SIXPACK_MAGIC 0x5304 sixpack ``drivers/net/hamradio/6pack.h``
+AX25_MAGIC 0x5316 ax_disp ``drivers/net/mkiss.h``
+TTY_MAGIC 0x5401 tty_struct ``include/linux/tty.h``
+MGSL_MAGIC 0x5401 mgsl_info ``drivers/char/synclink.c``
+TTY_DRIVER_MAGIC 0x5402 tty_driver ``include/linux/tty_driver.h``
+MGSLPC_MAGIC 0x5402 mgslpc_info ``drivers/char/pcmcia/synclink_cs.c``
+TTY_LDISC_MAGIC 0x5403 tty_ldisc ``include/linux/tty_ldisc.h``
+USB_SERIAL_MAGIC 0x6702 usb_serial ``drivers/usb/serial/usb-serial.h``
+FULL_DUPLEX_MAGIC 0x6969 ``drivers/net/ethernet/dec/tulip/de2104x.c``
+USB_BLUETOOTH_MAGIC 0x6d02 usb_bluetooth ``drivers/usb/class/bluetty.c``
+RFCOMM_TTY_MAGIC 0x6d02 ``net/bluetooth/rfcomm/tty.c``
+USB_SERIAL_PORT_MAGIC 0x7301 usb_serial_port ``drivers/usb/serial/usb-serial.h``
+CG_MAGIC 0x00090255 ufs_cylinder_group ``include/linux/ufs_fs.h``
+RPORT_MAGIC 0x00525001 r_port ``drivers/char/rocket_int.h``
+LSEMAGIC 0x05091998 lse ``drivers/fc4/fc.c``
+GDTIOCTL_MAGIC 0x06030f07 gdth_iowr_str ``drivers/scsi/gdth_ioctl.h``
+RIEBL_MAGIC 0x09051990 ``drivers/net/atarilance.c``
+NBD_REQUEST_MAGIC 0x12560953 nbd_request ``include/linux/nbd.h``
+RED_MAGIC2 0x170fc2a5 (any) ``mm/slab.c``
+BAYCOM_MAGIC 0x19730510 baycom_state ``drivers/net/baycom_epp.c``
+ISDN_X25IFACE_MAGIC 0x1e75a2b9 isdn_x25iface_proto_data ``drivers/isdn/isdn_x25iface.h``
+ECP_MAGIC 0x21504345 cdkecpsig ``include/linux/cdk.h``
+LSOMAGIC 0x27091997 lso ``drivers/fc4/fc.c``
+LSMAGIC 0x2a3b4d2a ls ``drivers/fc4/fc.c``
+WANPIPE_MAGIC 0x414C4453 sdla_{dump,exec} ``include/linux/wanpipe.h``
+CS_CARD_MAGIC 0x43525553 cs_card ``sound/oss/cs46xx.c``
+LABELCL_MAGIC 0x4857434c labelcl_info_s ``include/asm/ia64/sn/labelcl.h``
+ISDN_ASYNC_MAGIC 0x49344C01 modem_info ``include/linux/isdn.h``
+CTC_ASYNC_MAGIC 0x49344C01 ctc_tty_info ``drivers/s390/net/ctctty.c``
+ISDN_NET_MAGIC 0x49344C02 isdn_net_local_s ``drivers/isdn/i4l/isdn_net_lib.h``
+SAVEKMSG_MAGIC2 0x4B4D5347 savekmsg ``arch/*/amiga/config.c``
+CS_STATE_MAGIC 0x4c4f4749 cs_state ``sound/oss/cs46xx.c``
+SLAB_C_MAGIC 0x4f17a36d kmem_cache ``mm/slab.c``
+COW_MAGIC 0x4f4f4f4d cow_header_v1 ``arch/um/drivers/ubd_user.c``
+I810_CARD_MAGIC 0x5072696E i810_card ``sound/oss/i810_audio.c``
+TRIDENT_CARD_MAGIC 0x5072696E trident_card ``sound/oss/trident.c``
+ROUTER_MAGIC 0x524d4157 wan_device [in ``wanrouter.h`` pre 3.9]
+SAVEKMSG_MAGIC1 0x53415645 savekmsg ``arch/*/amiga/config.c``
+GDA_MAGIC 0x58464552 gda ``arch/mips/include/asm/sn/gda.h``
+RED_MAGIC1 0x5a2cf071 (any) ``mm/slab.c``
+EEPROM_MAGIC_VALUE 0x5ab478d2 lanai_dev ``drivers/atm/lanai.c``
+HDLCDRV_MAGIC 0x5ac6e778 hdlcdrv_state ``include/linux/hdlcdrv.h``
+PCXX_MAGIC 0x5c6df104 channel ``drivers/char/pcxx.h``
+KV_MAGIC 0x5f4b565f kernel_vars_s ``arch/mips/include/asm/sn/klkernvars.h``
+I810_STATE_MAGIC 0x63657373 i810_state ``sound/oss/i810_audio.c``
+TRIDENT_STATE_MAGIC 0x63657373 trient_state ``sound/oss/trident.c``
+M3_CARD_MAGIC 0x646e6f50 m3_card ``sound/oss/maestro3.c``
+FW_HEADER_MAGIC 0x65726F66 fw_header ``drivers/atm/fore200e.h``
+SLOT_MAGIC 0x67267321 slot ``drivers/hotplug/cpqphp.h``
+SLOT_MAGIC 0x67267322 slot ``drivers/hotplug/acpiphp.h``
+LO_MAGIC 0x68797548 nbd_device ``include/linux/nbd.h``
+OPROFILE_MAGIC 0x6f70726f super_block ``drivers/oprofile/oprofilefs.h``
+M3_STATE_MAGIC 0x734d724d m3_state ``sound/oss/maestro3.c``
+VMALLOC_MAGIC 0x87654320 snd_alloc_track ``sound/core/memory.c``
+KMALLOC_MAGIC 0x87654321 snd_alloc_track ``sound/core/memory.c``
+PWC_MAGIC 0x89DC10AB pwc_device ``drivers/usb/media/pwc.h``
+NBD_REPLY_MAGIC 0x96744668 nbd_reply ``include/linux/nbd.h``
+ENI155_MAGIC 0xa54b872d midway_eprom ``drivers/atm/eni.h``
+CODA_MAGIC 0xC0DAC0DA coda_file_info ``fs/coda/coda_fs_i.h``
+DPMEM_MAGIC 0xc0ffee11 gdt_pci_sram ``drivers/scsi/gdth.h``
+YAM_MAGIC 0xF10A7654 yam_port ``drivers/net/hamradio/yam.c``
+CCB_MAGIC 0xf2691ad2 ccb ``drivers/scsi/ncr53c8xx.c``
+QUEUE_MAGIC_FREE 0xf7e1c9a3 queue_entry ``drivers/scsi/arm/queue.c``
+QUEUE_MAGIC_USED 0xf7e1cc33 queue_entry ``drivers/scsi/arm/queue.c``
+HTB_CMAGIC 0xFEFAFEF1 htb_class ``net/sched/sch_htb.c``
+NMI_MAGIC 0x48414d4d455201 nmi_s ``arch/mips/include/asm/sn/nmi.h``
+===================== ================ ======================== ==========================================
+
+
+请注意,在声音记忆管理中仍然有一些特殊的为每个驱动定义的魔术值。查看include/sound/sndmagic.h来获取他们完整的列表信息。很多OSS声音驱动拥有自己从声卡PCI ID构建的魔术值-他们也没有被列在这里。
+
+IrDA子系统也使用了大量的自己的魔术值,查看include/net/irda/irda.h来获取他们完整的信息。
+
+HFS是另外一个比较大的使用魔术值的文件系统-你可以在fs/hfs/hfs.h中找到他们。
diff --git a/Documentation/translations/zh_CN/process/management-style.rst b/Documentation/translations/zh_CN/process/management-style.rst
new file mode 100644
index 000000000000..a181fa56d19e
--- /dev/null
+++ b/Documentation/translations/zh_CN/process/management-style.rst
@@ -0,0 +1,207 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :ref:`Documentation/process/management-style.rst <managementstyle>`
+:Translator: Alex Shi <alex.shi@linux.alibaba.com>
+
+.. _cn_managementstyle:
+
+Linux内核管理风格
+=================
+
+这是一个简短的文档,描述了Linux内核首选的(或胡编的,取决于您问谁)管理风格。
+它的目的是在某种程度上参照 :ref:`process/coding-style.rst <codingstyle>`
+主要是为了避免反复回答 [#cnf1]_ 相同(或类似)的问题。
+
+管理风格是非常个人化的,比简单的编码风格规则更难以量化,因此本文档可能与实
+际情况有关,也可能与实际情况无关。起初它是一个玩笑,但这并不意味着它可能不
+是真的。你得自己决定。
+
+顺便说一句,在谈到“核心管理者”时,主要是技术负责人,而不是在公司内部进行传
+统管理的人。如果你签署了采购订单或者对你的团队的预算有任何了解,你几乎肯定
+不是一个核心管理者。这些建议可能适用于您,也可能不适用于您。
+
+首先,我建议你购买“高效人的七个习惯”,而不是阅读它。烧了它,这是一个伟大的
+象征性姿态。
+
+.. [#cnf1] 本文件并不是通过回答问题,而是通过让提问者痛苦地明白,我们不知道
+ 答案是什么。
+
+不管怎样,这里是:
+
+.. _decisions:
+
+1)决策
+-------
+
+每个人都认为管理者做决定,而且决策很重要。决定越大越痛苦,管理者就必须越高级。
+这很明显,但事实并非如此。
+
+游戏的名字是 **避免** 做出决定。尤其是,如果有人告诉你“选择(a)或(b),
+我们真的需要你来做决定”,你就是陷入麻烦的管理者。你管理的人比你更了解细节,
+所以如果他们来找你做技术决策,你完蛋了。你显然没有能力为他们做这个决定。
+
+(推论:如果你管理的人不比你更了解细节,你也会被搞砸,尽管原因完全不同。
+也就是说,你的工作是错的,他们应该管理你的才智)
+
+所以游戏的名字是 **避免** 做出决定,至少是那些大而痛苦的决定。做一些小的
+和非结果性的决定是很好的,并且使您看起来好像知道自己在做什么,所以内核管理者
+需要做的是将那些大的和痛苦的决定变成那些没有人真正关心的小事情。
+
+这有助于认识到一个大的决定和一个小的决定之间的关键区别是你是否可以在事后修正
+你的决定。任何决定都可以通过始终确保如果你错了(而且你一定会错),你以后总是
+可以通过回溯来弥补损失。突然间,你就要做两个无关紧要的决定,一个是错误的,另
+一个是正确的。
+
+人们甚至会认为这是真正的领导能力(咳,胡说,咳)。
+
+因此,避免重大决策的关键在于避免做那些无法挽回的事情。不要被引导到一个你无法
+逃离的角落。走投无路的老鼠可能很危险——走投无路的管理者真可怜。
+
+事实证明,由于没有人会愚蠢到让内核管理者承担巨大的财政责任,所以通常很容易
+回溯。既然你不可能浪费掉你无法偿还的巨额资金,你唯一可以回溯的就是技术决策,
+而回溯很容易:只要告诉大家你是个不称职的傻瓜,说对不起,然后撤销你去年让别
+人所做的毫无价值的工作。突然间,你一年前做的决定不在是一个重大的决定,因为
+它很容易被推翻。
+
+事实证明,有些人对接受这种方法有困难,原因有两个:
+
+ - 承认你是个白痴比看起来更难。我们都喜欢保持形象,在公共场合说你错了有时
+ 确实很难。
+ - 如果有人告诉你,你去年所做的工作终究是不值得的,那么对那些可怜的低级工
+ 程师来说也是很困难的,虽然实际的 **工作** 很容易删除,但你可能已经不可
+ 挽回地失去了工程师的信任。记住:“不可撤销”是我们一开始就试图避免的,
+ 而你的决定终究是一个重大的决定。
+
+令人欣慰的是,这两个原因都可以通过预先承认你没有任何线索,提前告诉人们你的
+决定完全是初步的,而且可能是错误的事情来有效地缓解。你应该始终保留改变主意
+的权利,并让人们 **意识** 到这一点。当你 **还没有** 做过真正愚蠢的事情的时
+候,承认自己是愚蠢的要容易得多。
+
+然后,当它真的被证明是愚蠢的时候,人们就转动他们的眼珠说“哎呀,下次不要了”。
+
+这种对不称职的先发制人的承认,也可能使真正做这项工作的人也会三思是否值得做。
+毕竟,如果他们不确定这是否是一个好主意,你肯定不应该通过向他们保证他们所做
+的工作将会进入(内核)鼓励他们。在他们开始一项巨大的努力之前,至少让他们三
+思而后行。
+
+记住:他们最好比你更了解细节,而且他们通常认为他们对每件事都有答案。作为一
+个管理者,你能做的最好的事情不是灌输自信,而是对他们所做的事情进行健康的批
+判性思考。
+
+顺便说一句,另一种避免做出决定的方法是看起来很可怜的抱怨 “我们不能两者兼
+得吗?” 相信我,它是有效的。如果不清楚哪种方法更好,他们最终会弄清楚的。
+最终的答案可能是两个团队都会因为这种情况而感到沮丧,以至于他们放弃了。
+
+这听起来像是一个失败,但这通常是一个迹象,表明两个项目都有问题,而参与其中
+的人不能做决定的原因是他们都是错误的。你最终会闻到玫瑰的味道,你避免了另一
+个你本可以搞砸的决定。
+
+2)人
+-----
+
+大多数人都是白痴,做一名管理者意味着你必须处理好这件事,也许更重要的是,
+**他们** 必须处理好你。
+
+事实证明,虽然很容易纠正技术错误,但不容易纠正人格障碍。你只能和他们的和
+你的(人格障碍)共处。
+
+但是,为了做好作为内核管理者的准备,最好记住不要烧掉任何桥梁,不要轰炸任何
+无辜的村民,也不要疏远太多的内核开发人员。事实证明,疏远人是相当容易的,而
+亲近一个疏远的人是很难的。因此,“疏远”立即属于“不可逆”的范畴,并根据
+:ref:`decisions` 成为绝不可以做的事情。
+
+这里只有几个简单的规则:
+
+ (1) 不要叫人笨蛋(至少不要在公共场合)
+ (2) 学习如何在忘记规则(1)时道歉
+
+问题在于 #1 很容易去做,因为你可以用数百万种不同的方式说“你是一个笨蛋” [#cnf2]_
+有时甚至没有意识到,而且几乎总是带着一种白热化的信念,认为你是对的。
+
+你越确信自己是对的(让我们面对现实吧,你可以把几乎所有人都称为坏人,而且你
+经常是对的),事后道歉就越难。
+
+要解决此问题,您实际上只有两个选项:
+
+ - 非常擅长道歉
+ - 把“爱”均匀地散开,没有人会真正感觉到自己被不公平地瞄准了。让它有足够的
+ 创造性,他们甚至可能会觉得好笑。
+
+选择永远保持礼貌是不存在的。没有人会相信一个如此明显地隐藏了他们真实性格的人。
+
+.. [#cnf2] 保罗·西蒙演唱了“离开爱人的50种方法”,因为坦率地说,“告诉开发者
+ 他们是D*CKHEAD" 的100万种方法都无法确认。但我确信他已经这么想了。
+
+3)人2 - 好人
+-------------
+
+虽然大多数人都是白痴,但不幸的是,据此推论你也是白痴,尽管我们都自我感觉良
+好,我们比普通人更好(让我们面对现实吧,没有人相信他们是普通人或低于普通人),
+我们也应该承认我们不是最锋利的刀,而且会有其他人比你更不像白痴。
+
+有些人对聪明人反应不好。其他人利用它们。
+
+作为内核维护人员,确保您在第二组中。接受他们,因为他们会让你的工作更容易。
+特别是,他们能够为你做决定,这就是游戏的全部内容。
+
+所以当你发现一个比你聪明的人时,就顺其自然吧。你的管理职责在很大程度上变成
+了“听起来像是个好主意——去尝试吧”,或者“听起来不错,但是XXX呢?”“。第二个版
+本尤其是一个很好的方法,要么学习一些关于“XXX”的新东西,要么通过指出一些聪明
+人没有想到的东西来显得更具管理性。无论哪种情况,你都会赢。
+
+要注意的一件事是认识到一个领域的伟大不一定会转化为其他领域。所以你可能会向
+特定的方向刺激人们,但让我们面对现实吧,他们可能擅长他们所做的事情,而且对
+其他事情都很差劲。好消息是,人们往往会自然而然地重拾他们擅长的东西,所以当
+你向某个方向刺激他们时,你并不是在做不可逆转的事情,只是不要用力推。
+
+4)责备
+-------
+
+事情会出问题的,人们希望去责备人。贴标签,你就是受责备的人。
+
+事实上,接受责备并不难,尤其是当人们意识到这不 **全是** 你的过错时。这让我
+们找到了承担责任的最佳方式:为别人承担这件事。你会感觉很好,他们会感觉很好,
+没有受到指责. 那谁,失去了他们的全部36GB色情收藏的人,因为你的无能将勉强承
+认,你至少没有试图逃避责任。
+
+然后让真正搞砸了的开发人员(如果你能找到他们)私下知道他们搞砸了。不仅是为
+了将来可以避免,而且为了让他们知道他们欠你一个人情。而且,也许更重要的是,
+他们也可能是能够解决问题的人。因为,让我们面对现实吧,肯定不是你。
+
+承担责任也是你首先成为管理者的原因。这是让人们信任你,让你获得潜在的荣耀的
+一部分,因为你就是那个会说“我搞砸了”的人。如果你已经遵循了以前的规则,你现
+在已经很擅长说了。
+
+5)应避免的事情
+---------------
+
+有一件事人们甚至比被称为“笨蛋”更讨厌,那就是在一个神圣的声音中被称为“笨蛋”。
+第一个你可以道歉,第二个你不会真正得到机会。即使你做得很好,他们也可能不再
+倾听。
+
+我们都认为自己比别人强,这意味着当别人装腔作势时,这会让我们很恼火。你也许
+在道德和智力上比你周围的每个人都优越,但不要试图太明显,除非你真的打算激怒
+某人 [#cnf3]_
+
+同样,不要对事情太客气或太微妙。礼貌很容易落得落花流水,把问题隐藏起来,
+正如他们所说,“在互联网上,没人能听到你的含蓄。”用一个钝器把这一点锤进去,
+因为你不能真的依靠别人来获得你的观点。
+
+一些幽默可以帮助缓和直率和道德化。过度到荒谬的地步,可以灌输一个观点,而不
+会让接受者感到痛苦,他们只是认为你是愚蠢的。因此,它可以帮助我们摆脱对批评
+的个人心理障碍。
+
+.. [#cnf3] 提示:与你的工作没有直接关系的网络新闻组是消除你对他人不满的好
+ 方法。偶尔写些侮辱性的帖子,打个喷嚏,让你的情绪得到净化。别把牢骚带回家
+
+6)为什么是我?
+---------------
+
+既然你的主要责任似乎是为别人的错误承担责任,并且让别人痛苦地明白你是不称职
+的,那么显而易见的问题之一就变成了为什么首先要这样做。
+
+首先,虽然你可能会或可能不会听到十几岁女孩(或男孩,让我们不要在这里评判或
+性别歧视)敲你的更衣室门,你会得到一个巨大的个人成就感为“负责”。别介意你真
+的在领导别人,你要跟上别人,尽可能快地追赶他们。每个人都会认为你是负责人。
+
+如果你可以做到这个, 这是个伟大的工作!
diff --git a/Documentation/translations/zh_CN/process/programming-language.rst b/Documentation/translations/zh_CN/process/programming-language.rst
new file mode 100644
index 000000000000..51fd4ef48ea1
--- /dev/null
+++ b/Documentation/translations/zh_CN/process/programming-language.rst
@@ -0,0 +1,41 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :ref:`Documentation/process/programming-language.rst <programming_language>`
+:Translator: Alex Shi <alex.shi@linux.alibaba.com>
+
+.. _cn_programming_language:
+
+程序设计语言
+============
+
+内核是用C语言 [c-language]_ 编写的。更准确地说,内核通常是用 ``gcc`` [gcc]_
+在 ``-std=gnu89`` [gcc-c-dialect-options]_ 下编译的:ISO C90的 GNU 方言(
+包括一些C99特性)
+
+这种方言包含对语言 [gnu-extensions]_ 的许多扩展,当然,它们许多都在内核中使用。
+
+对于一些体系结构,有一些使用 ``clang`` [clang]_ 和 ``icc`` [icc]_ 编译内核
+的支持,尽管在编写此文档时还没有完成,仍需要第三方补丁。
+
+属性
+----
+
+在整个内核中使用的一个常见扩展是属性(attributes) [gcc-attribute-syntax]_
+属性允许将实现定义的语义引入语言实体(如变量、函数或类型),而无需对语言进行
+重大的语法更改(例如添加新关键字) [n2049]_
+
+在某些情况下,属性是可选的(即不支持这些属性的编译器仍然应该生成正确的代码,
+即使其速度较慢或执行的编译时检查/诊断次数不够)
+
+内核定义了伪关键字(例如, ``pure`` ),而不是直接使用GNU属性语法(例如,
+``__attribute__((__pure__))`` ),以检测可以使用哪些关键字和/或缩短代码, 具体
+请参阅 ``include/linux/compiler_attributes.h``
+
+.. [c-language] http://www.open-std.org/jtc1/sc22/wg14/www/standards
+.. [gcc] https://gcc.gnu.org
+.. [clang] https://clang.llvm.org
+.. [icc] https://software.intel.com/en-us/c-compilers
+.. [gcc-c-dialect-options] https://gcc.gnu.org/onlinedocs/gcc/C-Dialect-Options.html
+.. [gnu-extensions] https://gcc.gnu.org/onlinedocs/gcc/C-Extensions.html
+.. [gcc-attribute-syntax] https://gcc.gnu.org/onlinedocs/gcc/Attribute-Syntax.html
+.. [n2049] http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2049.pdf
diff --git a/Documentation/translations/zh_CN/stable_api_nonsense.txt b/Documentation/translations/zh_CN/process/stable-api-nonsense.rst
index a2b27fab382c..b4ddb6e88d9d 100644
--- a/Documentation/translations/zh_CN/stable_api_nonsense.txt
+++ b/Documentation/translations/zh_CN/process/stable-api-nonsense.rst
@@ -1,26 +1,18 @@
-Chinese translated version of Documentation/process/stable-api-nonsense.rst
+.. _cn_stable_api_nonsense:
-If you have any comment or update to the content, please contact the
-original document maintainer directly. However, if you have problem
-communicating in English you can also ask the Chinese maintainer for help.
-Contact the Chinese maintainer, if this translation is outdated or there
-is problem with translation.
+.. include:: ../disclaimer-zh_CN.rst
-Maintainer: Greg Kroah-Hartman <greg@kroah.com>
-Chinese maintainer: TripleX Chung <zhongyu@18mail.cn>
----------------------------------------------------------------------
-Documentation/process/stable-api-nonsense.rst 的中文翻译
+:Original: :ref:`Documentation/process/stable-api-nonsense.rst
+ <stable_api_nonsense>`
-如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
-交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
-译存在问题,请联系中文版维护者。
+译者::
-英文版维护者: Greg Kroah-Hartman <greg@kroah.com>
-中文版维护者: 钟宇 TripleX Chung <zhongyu@18mail.cn>
-中文版翻译者: 钟宇 TripleX Chung <zhongyu@18mail.cn>
-中文版校译者: 李阳 Li Yang <leoli@freescale.com>
-以下为正文
----------------------------------------------------------------------
+ 中文版维护者: 钟宇 TripleX Chung <xxx.phy@gmail.com>
+ 中文版翻译者: 钟宇 TripleX Chung <xxx.phy@gmail.com>
+ 中文版校译者: 李阳 Li Yang <leoyang.li@nxp.com>
+
+Linux 内核驱动接口
+==================
写作本文档的目的,是为了解释为什么Linux既没有二进制内核接口,也没有稳定
的内核接口。这里所说的内核接口,是指内核里的接口,而不是内核和用户空间
@@ -59,18 +51,22 @@ Linux能成为强壮,稳定,成熟的操作系统,这也是你最开始选
--------------
假如我们有一个稳定的内核源代码接口,那么自然而然的,我们就拥有了稳定的
二进制接口,是这样的吗?错。让我们看看关于Linux内核的几点事实:
+
- 取决于所用的C编译器的版本,不同的内核数据结构里的结构体的对齐方
-式会有差别,代码中不同函数的表现形式也不一样(函数是不是被inline编译取
-决于编译器行为)。不同的函数的表现形式并不重要,但是数据结构内部的对齐
-方式很关键。
+ 式会有差别,代码中不同函数的表现形式也不一样(函数是不是被inline
+ 编译取决于编译器行为)。不同的函数的表现形式并不重要,但是数据
+ 结构内部的对齐方式很关键。
+
- 取决于内核的配置选项,不同的选项会让内核的很多东西发生改变:
+
- 同一个结构体可能包含不同的成员变量
- 有的函数可能根本不会被实现(比如编译的时候没有选择SMP支持
-,一些锁函数就会被定义成空函数)。
+ 一些锁函数就会被定义成空函数)。
- 内核使用的内存会以不同的方式对齐,这取决于不同的内核配置选
-项。
+ 项。
+
- Linux可以在很多的不同体系结构的处理器上运行。在某个体系结构上编
-译好的二进制驱动程序,不可能在另外一个体系结构上正确的运行。
+ 译好的二进制驱动程序,不可能在另外一个体系结构上正确的运行。
对于一个特定的内核,满足这些条件并不难,使用同一个C编译器和同样的内核配
置选项来编译驱动程序模块就可以了。这对于给一个特定Linux发布的特定版本提
@@ -90,7 +86,7 @@ Linux能成为强壮,稳定,成熟的操作系统,这也是你最开始选
如果有人不将他的内核驱动程序,放入公版内核的源代码树,而又想让驱动程序
一直保持在最新的内核中可用,那么这个话题将会变得没完没了。
- 内核开发是持续而且快节奏的,从来都不会慢下来。内核开发人员在当前接口中
+内核开发是持续而且快节奏的,从来都不会慢下来。内核开发人员在当前接口中
找到bug,或者找到更好的实现方式。一旦发现这些,他们就很快会去修改当前的
接口。修改接口意味着,函数名可能会改变,结构体可能被扩充或者删减,函数
的参数也可能发生改变。一旦接口被修改,内核中使用这些接口的地方需要同时
@@ -98,21 +94,22 @@ Linux能成为强壮,稳定,成熟的操作系统,这也是你最开始选
举一个例子,内核的USB驱动程序接口在USB子系统的整个生命周期中,至少经历
了三次重写。这些重写解决以下问题:
+
- 把数据流从同步模式改成非同步模式,这个改动减少了一些驱动程序的
-复杂度,提高了所有USB驱动程序的吞吐率,这样几乎所有的USB设备都能以最大
-速率工作了。
+ 复杂度,提高了所有USB驱动程序的吞吐率,这样几乎所有的USB设备都
+ 能以最大速率工作了。
- 修改了USB核心代码中为USB驱动分配数据包内存的方式,所有的驱动都
-需要提供更多的参数给USB核心,以修正了很多已经被记录在案的死锁。
+ 需要提供更多的参数给USB核心,以修正了很多已经被记录在案的死锁。
这和一些封闭源代码的操作系统形成鲜明的对比,在那些操作系统上,不得不额
外的维护旧的USB接口。这导致了一个可能性,新的开发者依然会不小心使用旧的
接口,以不恰当的方式编写代码,进而影响到操作系统的稳定性。
- 在上面的例子中,所有的开发者都同意这些重要的改动,在这样的情况下修改代
+在上面的例子中,所有的开发者都同意这些重要的改动,在这样的情况下修改代
价很低。如果Linux保持一个稳定的内核源代码接口,那么就得创建一个新的接口
;旧的,有问题的接口必须一直维护,给Linux USB开发者带来额外的工作。既然
所有的Linux USB驱动的作者都是利用自己的时间工作,那么要求他们去做毫无意
义的免费额外工作,是不可能的。
- 安全问题对Linux来说十分重要。一个安全问题被发现,就会在短时间内得到修
+安全问题对Linux来说十分重要。一个安全问题被发现,就会在短时间内得到修
正。在很多情况下,这将导致Linux内核中的一些接口被重写,以从根本上避免安
全问题。一旦接口被重写,所有使用这些接口的驱动程序,必须同时得到修正,
以确定安全问题已经得到修复并且不可能在未来还有同样的安全问题。如果内核
@@ -124,7 +121,7 @@ Linux能成为强壮,稳定,成熟的操作系统,这也是你最开始选
要做什么
--------
+--------
如果你写了一个Linux内核驱动,但是它还不在Linux源代码树里,作为一个开发
者,你应该怎么做?为每个发布的每个版本提供一个二进制驱动,那简直是一个
@@ -137,20 +134,21 @@ Linux能成为强壮,稳定,成熟的操作系统,这也是你最开始选
做什么事情。
把驱动放到内核源代码树里会有很多的好处:
+
- 驱动的质量会提升,而维护成本(对原始作者来说)会下降。
- 其他人会给驱动添加新特性。
- 其他人会找到驱动中的bug并修复。
- 其他人会在驱动中找到性能优化的机会。
- 当外部的接口的改变需要修改驱动程序的时候,其他人会修改驱动程序
-。
- 不需要联系任何发行商,这个驱动会自动的随着所有的Linux发布一起发
-布。
+ 布。
和别的操作系统相比,Linux为更多不同的设备提供现成的驱动,而且能在更多不
同体系结构的处理器上支持这些设备。这个经过考验的开发模式,必然是错不了
的 :)
--------------
+感谢
+----
感谢 Randy Dunlap, Andrew Morton, David Brownell, Hanna Linder,
Robert Love, and Nishanth Aravamudan 对于本文档早期版本的评审和建议。
diff --git a/Documentation/translations/zh_CN/stable_kernel_rules.txt b/Documentation/translations/zh_CN/process/stable-kernel-rules.rst
index db4ba5a0c39a..fba361f2ddfd 100644
--- a/Documentation/translations/zh_CN/stable_kernel_rules.txt
+++ b/Documentation/translations/zh_CN/process/stable-kernel-rules.rst
@@ -1,31 +1,26 @@
-Chinese translated version of Documentation/process/stable-kernel-rules.rst
+.. _cn_stable_kernel_rules:
-If you have any comment or update to the content, please contact the
-original document maintainer directly. However, if you have a problem
-communicating in English you can also ask the Chinese maintainer for
-help. Contact the Chinese maintainer if this translation is outdated
-or if there is a problem with the translation.
+.. include:: ../disclaimer-zh_CN.rst
-Chinese maintainer: TripleX Chung <triplex@zh-kernel.org>
----------------------------------------------------------------------
-Documentation/process/stable-kernel-rules.rst 的中文翻译
+:Original: :ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
-译存在问题,请联系中文版维护者。
+译存在问题,请联系中文版维护者::
+ 中文版维护者: 钟宇 TripleX Chung <xxx.phy@gmail.com>
+ 中文版翻译者: 钟宇 TripleX Chung <xxx.phy@gmail.com>
+ 中文版校译者:
+ - 李阳 Li Yang <leoyang.li@nxp.com>
+ - Kangkai Yin <e12051@motorola.com>
-中文版维护者: 钟宇 TripleX Chung <triplex@zh-kernel.org>
-中文版翻译者: 钟宇 TripleX Chung <triplex@zh-kernel.org>
-中文版校译者: 李阳 Li Yang <leo@zh-kernel.org>
- Kangkai Yin <e12051@motorola.com>
-
-以下为正文
----------------------------------------------------------------------
+所有你想知道的事情 - 关于linux稳定版发布
+========================================
关于Linux 2.6稳定版发布,所有你想知道的事情。
关于哪些类型的补丁可以被接收进入稳定版代码树,哪些不可以的规则:
+----------------------------------------------------------------
- 必须是显而易见的正确,并且经过测试的。
- 连同上下文,不能大于100行。
@@ -38,9 +33,10 @@ Documentation/process/stable-kernel-rules.rst 的中文翻译
- 没有“理论上的竞争条件”,除非能给出竞争条件如何被利用的解释。
- 不能存在任何的“琐碎的”修正(拼写修正,去掉多余空格之类的)。
- 必须被相关子系统的维护者接受。
- - 必须遵循Documentation/process/submitting-patches.rst里的规则。
+ - 必须遵循Documentation/translations/zh_CN/process/submitting-patches.rst里的规则。
向稳定版代码树提交补丁的过程:
+------------------------------
- 在确认了补丁符合以上的规则后,将补丁发送到stable@vger.kernel.org。
- 如果补丁被接受到队列里,发送者会收到一个ACK回复,如果没有被接受,收
@@ -49,6 +45,7 @@ Documentation/process/stable-kernel-rules.rst 的中文翻译
- 安全方面的补丁不要发到这个列表,应该发送到security@kernel.org。
审查周期:
+----------
- 当稳定版的维护者决定开始一个审查周期,补丁将被发送到审查委员会,以
及被补丁影响的领域的维护者(除非提交者就是该领域的维护者)并且抄送
@@ -63,4 +60,5 @@ Documentation/process/stable-kernel-rules.rst 的中文翻译
通常的审查周期。请联系内核安全小组以获得关于这个过程的更多细节。
审查委员会:
+------------
- 由一些自愿承担这项任务的内核开发者,和几个非志愿的组成。
diff --git a/Documentation/translations/zh_CN/process/submit-checklist.rst b/Documentation/translations/zh_CN/process/submit-checklist.rst
new file mode 100644
index 000000000000..89061aa8fdbe
--- /dev/null
+++ b/Documentation/translations/zh_CN/process/submit-checklist.rst
@@ -0,0 +1,107 @@
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :ref:`Documentation/process/submit-checklist.rst <submitchecklist>`
+:Translator: Alex Shi <alex.shi@linux.alibaba.com>
+
+.. _cn_submitchecklist:
+
+Linux内核补丁提交清单
+~~~~~~~~~~~~~~~~~~~~~
+
+如果开发人员希望看到他们的内核补丁提交更快地被接受,那么他们应该做一些基本
+的事情。
+
+这些都是在
+:ref:`Documentation/translations/zh_CN/process/submitting-patches.rst <cn_submittingpatches>`
+和其他有关提交Linux内核补丁的文档中提供的。
+
+1) 如果使用工具,则包括定义/声明该工具的文件。不要依赖于其他头文件拉入您使用
+ 的头文件。
+
+2) 干净的编译:
+
+ a) 使用适用或修改的 ``CONFIG`` 选项 ``=y``、``=m`` 和 ``=n`` 。没有GCC
+ 警告/错误,没有链接器警告/错误。
+
+ b) 通过allnoconfig、allmodconfig
+
+ c) 使用 ``O=builddir`` 时可以成功编译
+
+3) 通过使用本地交叉编译工具或其他一些构建场在多个CPU体系结构上构建。
+
+4) PPC64是一种很好的交叉编译检查体系结构,因为它倾向于对64位的数使用无符号
+ 长整型。
+
+5) 如下所述 :ref:`Documentation/translations/zh_CN/process/coding-style.rst <cn_codingstyle>`.
+ 检查您的补丁是否为常规样式。在提交( ``scripts/check patch.pl`` )之前,
+ 使用补丁样式检查器检查是否有轻微的冲突。您应该能够处理您的补丁中存在的所有
+ 违规行为。
+
+6) 任何新的或修改过的 ``CONFIG`` 选项都不会弄脏配置菜单,并默认为关闭,除非
+ 它们符合 ``Documentation/kbuild/kconfig-language.txt`` 中记录的异常条件,
+ 菜单属性:默认值.
+
+7) 所有新的 ``kconfig`` 选项都有帮助文本。
+
+8) 已仔细审查了相关的 ``Kconfig`` 组合。这很难用测试来纠正——脑力在这里是有
+ 回报的。
+
+9) 用 sparse 检查干净。
+
+10) 使用 ``make checkstack`` 和 ``make namespacecheck`` 并修复他们发现的任何
+ 问题。
+
+ .. note::
+
+ ``checkstack`` 并没有明确指出问题,但是任何一个在堆栈上使用超过512
+ 字节的函数都可以进行更改。
+
+11) 包括 :ref:`kernel-doc <kernel_doc>` 内核文档以记录全局内核API。(静态函数
+ 不需要,但也可以。)使用 ``make htmldocs`` 或 ``make pdfdocs`` 检查
+ :ref:`kernel-doc <kernel_doc>` 并修复任何问题。
+
+12) 通过以下选项同时启用的测试 ``CONFIG_PREEMPT``, ``CONFIG_DEBUG_PREEMPT``,
+ ``CONFIG_DEBUG_SLAB``, ``CONFIG_DEBUG_PAGEALLOC``, ``CONFIG_DEBUG_MUTEXES``,
+ ``CONFIG_DEBUG_SPINLOCK``, ``CONFIG_DEBUG_ATOMIC_SLEEP``,
+ ``CONFIG_PROVE_RCU`` and ``CONFIG_DEBUG_OBJECTS_RCU_HEAD``
+
+13) 已经过构建和运行时测试,包括有或没有 ``CONFIG_SMP``, ``CONFIG_PREEMPT``.
+
+14) 如果补丁程序影响IO/磁盘等:使用或不使用 ``CONFIG_LBDAF`` 进行测试。
+
+15) 所有代码路径都已在启用所有lockdep功能的情况下运行。
+
+16) 所有新的/proc条目都记录在 ``Documentation/``
+
+17) 所有新的内核引导参数都记录在
+ Documentation/admin-guide/kernel-parameters.rst 中。
+
+18) 所有新的模块参数都记录在 ``MODULE_PARM_DESC()``
+
+19) 所有新的用户空间接口都记录在 ``Documentation/ABI/`` 中。有关详细信息,
+ 请参阅 ``Documentation/ABI/README`` 。更改用户空间接口的补丁应该抄送
+ linux-api@vger.kernel.org。
+
+20) 检查是否全部通过 ``make headers_check`` 。
+
+21) 已通过至少注入slab和page分配失败进行检查。请参阅 ``Documentation/fault-injection/``
+ 如果新代码是实质性的,那么添加子系统特定的故障注入可能是合适的。
+
+22) 新添加的代码已经用 ``gcc -W`` 编译(使用 ``make EXTRA-CFLAGS=-W`` )。这
+ 将产生大量噪声,但对于查找诸如“警告:有符号和无符号之间的比较”之类的错误
+ 很有用。
+
+23) 在它被合并到-mm补丁集中之后进行测试,以确保它仍然与所有其他排队的补丁以
+ 及VM、VFS和其他子系统中的各种更改一起工作。
+
+24) 所有内存屏障例如 ``barrier()``, ``rmb()``, ``wmb()`` 都需要源代码中的注
+ 释来解释它们正在执行的操作及其原因的逻辑。
+
+25) 如果补丁添加了任何ioctl,那么也要更新 ``Documentation/ioctl/ioctl-number.txt``
+
+26) 如果修改后的源代码依赖或使用与以下 ``Kconfig`` 符号相关的任何内核API或
+ 功能,则在禁用相关 ``Kconfig`` 符号和/或 ``=m`` (如果该选项可用)的情况
+ 下测试以下多个构建[并非所有这些都同时存在,只是它们的各种/随机组合]:
+
+ ``CONFIG_SMP``, ``CONFIG_SYSFS``, ``CONFIG_PROC_FS``, ``CONFIG_INPUT``, ``CONFIG_PCI``, ``CONFIG_BLOCK``, ``CONFIG_PM``, ``CONFIG_MAGIC_SYSRQ``,
+ ``CONFIG_NET``, ``CONFIG_INET=n`` (但是后者伴随 ``CONFIG_NET=y``).
diff --git a/Documentation/translations/zh_CN/SubmittingDrivers b/Documentation/translations/zh_CN/process/submitting-drivers.rst
index 15e73562f710..72c6cd935821 100644
--- a/Documentation/translations/zh_CN/SubmittingDrivers
+++ b/Documentation/translations/zh_CN/process/submitting-drivers.rst
@@ -1,36 +1,28 @@
-Chinese translated version of Documentation/process/submitting-drivers.rst
+.. _cn_submittingdrivers:
-If you have any comment or update to the content, please contact the
-original document maintainer directly. However, if you have a problem
-communicating in English you can also ask the Chinese maintainer for
-help. Contact the Chinese maintainer if this translation is outdated
-or if there is a problem with the translation.
+.. include:: ../disclaimer-zh_CN.rst
-Chinese maintainer: Li Yang <leo@zh-kernel.org>
----------------------------------------------------------------------
-Documentation/process/submitting-drivers.rst 的中文翻译
+:Original: :ref:`Documentation/process/submitting-drivers.rst
+ <submittingdrivers>`
如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
-译存在问题,请联系中文版维护者。
+译存在问题,请联系中文版维护者::
-中文版维护者: 李阳 Li Yang <leo@zh-kernel.org>
-中文版翻译者: 李阳 Li Yang <leo@zh-kernel.org>
-中文版校译者: 陈琦 Maggie Chen <chenqi@beyondsoft.com>
- 王聪 Wang Cong <xiyou.wangcong@gmail.com>
- 张巍 Zhang Wei <Wei.Zhang@freescale.com>
-
-以下为正文
----------------------------------------------------------------------
+ 中文版维护者: 李阳 Li Yang <leoyang.li@nxp.com>
+ 中文版翻译者: 李阳 Li Yang <leoyang.li@nxp.com>
+ 中文版校译者: 陈琦 Maggie Chen <chenqi@beyondsoft.com>
+ 王聪 Wang Cong <xiyou.wangcong@gmail.com>
+ 张巍 Zhang Wei <wezhang@outlook.com>
如何向 Linux 内核提交驱动程序
------------------------------
+=============================
这篇文档将会解释如何向不同的内核源码树提交设备驱动程序。请注意,如果你感
兴趣的是显卡驱动程序,你也许应该访问 XFree86 项目(http://www.xfree86.org/)
和/或 X.org 项目 (http://x.org)。
-另请参阅 Documentation/process/submitting-patches.rst 文档。
+另请参阅 Documentation/Documentation/translations/zh_CN/process/submitting-patches.rst 文档。
分配设备号
@@ -145,9 +137,13 @@ Linux 设备驱动程序,第三版(探讨 2.6.10 版内核):
LWN.net:
每周内核开发活动摘要 - http://lwn.net/
+
2.6 版中 API 的变更:
+
http://lwn.net/Articles/2.6-kernel-api/
+
将旧版内核的驱动程序移植到 2.6 版:
+
http://lwn.net/Articles/driver-porting/
内核新手(KernelNewbies):
diff --git a/Documentation/translations/zh_CN/process/submitting-patches.rst b/Documentation/translations/zh_CN/process/submitting-patches.rst
new file mode 100644
index 000000000000..437c23b367bb
--- /dev/null
+++ b/Documentation/translations/zh_CN/process/submitting-patches.rst
@@ -0,0 +1,682 @@
+.. _cn_submittingpatches:
+
+.. include:: ../disclaimer-zh_CN.rst
+
+:Original: :ref:`Documentation/process/submitting-patches.rst <submittingpatches>`
+
+译者::
+
+ 中文版维护者: 钟宇 TripleX Chung <xxx.phy@gmail.com>
+ 中文版翻译者: 钟宇 TripleX Chung <xxx.phy@gmail.com>
+ 时奎亮 Alex Shi <alex.shi@linux.alibaba.com>
+ 中文版校译者: 李阳 Li Yang <leoyang.li@nxp.com>
+ 王聪 Wang Cong <xiyou.wangcong@gmail.com>
+
+
+如何让你的改动进入内核
+======================
+
+对于想要将改动提交到 Linux 内核的个人或者公司来说,如果不熟悉“规矩”,
+提交的流程会让人畏惧。本文档收集了一系列建议,这些建议可以大大的提高你
+的改动被接受的机会.
+
+以下文档含有大量简洁的建议, 具体请见:
+:ref:`Documentation/process <development_process_main>`
+同样,:ref:`Documentation/translations/zh_CN/process/submit-checklist.rst <cn_submitchecklist>`
+给出在提交代码前需要检查的项目的列表。如果你在提交一个驱动程序,那么
+同时阅读一下:
+:ref:`Documentation/process/submitting-drivers.rst <submittingdrivers>`
+
+其中许多步骤描述了Git版本控制系统的默认行为;如果您使用Git来准备补丁,
+您将发现它为您完成的大部分机械工作,尽管您仍然需要准备和记录一组合理的
+补丁。一般来说,使用git将使您作为内核开发人员的生活更轻松。
+
+
+0) 获取当前源码树
+-----------------
+
+如果您没有一个可以使用当前内核源代码的存储库,请使用git获取一个。您将要
+从主线存储库开始,它可以通过以下方式获取::
+
+ git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+
+但是,请注意,您可能不希望直接针对主线树进行开发。大多数子系统维护人员运
+行自己的树,并希望看到针对这些树准备的补丁。请参见MAINTAINERS文件中子系
+统的 **T:** 项以查找该树,或者简单地询问维护者该树是否未在其中列出。
+
+仍然可以通过tarballs下载内核版本(如下一节所述),但这是进行内核开发的
+一种困难的方式。
+
+1) "diff -up"
+-------------
+
+使用 "diff -up" 或者 "diff -uprN" 来创建补丁。
+
+所有内核的改动,都是以补丁的形式呈现的,补丁由 diff(1) 生成。创建补丁的
+时候,要确认它是以 "unified diff" 格式创建的,这种格式由 diff(1) 的 '-u'
+参数生成。而且,请使用 '-p' 参数,那样会显示每个改动所在的C函数,使得
+产生的补丁容易读得多。补丁应该基于内核源代码树的根目录,而不是里边的任
+何子目录。
+
+为一个单独的文件创建补丁,一般来说这样做就够了::
+
+ SRCTREE=linux
+ MYFILE=drivers/net/mydriver.c
+
+ cd $SRCTREE
+ cp $MYFILE $MYFILE.orig
+ vi $MYFILE # make your change
+ cd ..
+ diff -up $SRCTREE/$MYFILE{.orig,} > /tmp/patch
+
+为多个文件创建补丁,你可以解开一个没有修改过的内核源代码树,然后和你自
+己的代码树之间做 diff 。例如::
+
+ MYSRC=/devel/linux
+
+ tar xvfz linux-3.19.tar.gz
+ mv linux-3.19 linux-3.19-vanilla
+ diff -uprN -X linux-3.19-vanilla/Documentation/dontdiff \
+ linux-3.19-vanilla $MYSRC > /tmp/patch
+
+"dontdiff" 是内核在编译的时候产生的文件的列表,列表中的文件在 diff(1)
+产生的补丁里会被跳过。
+
+确定你的补丁里没有包含任何不属于这次补丁提交的额外文件。记得在用diff(1)
+生成补丁之后,审阅一次补丁,以确保准确。
+
+如果你的改动很散乱,你应该研究一下如何将补丁分割成独立的部分,将改动分
+割成一系列合乎逻辑的步骤。这样更容易让其他内核开发者审核,如果你想你的
+补丁被接受,这是很重要的。请参阅:
+:ref:`cn_split_changes`
+
+如果你用 ``git`` , ``git rebase -i`` 可以帮助你这一点。如果你不用 ``git``,
+``quilt`` <http://savannah.nongnu.org/projects/quilt> 另外一个流行的选择。
+
+.. _cn_describe_changes:
+
+2) 描述你的改动
+---------------
+
+描述你的问题。无论您的补丁是一行错误修复还是5000行新功能,都必须有一个潜在
+的问题激励您完成这项工作。让审稿人相信有一个问题值得解决,让他们读完第一段
+是有意义的。
+
+描述用户可见的影响。直接崩溃和锁定是相当有说服力的,但并不是所有的错误都那么
+明目张胆。即使在代码审查期间发现了这个问题,也要描述一下您认为它可能对用户产
+生的影响。请记住,大多数Linux安装运行的内核来自二级稳定树或特定于供应商/产品
+的树,只从上游精选特定的补丁,因此请包含任何可以帮助您将更改定位到下游的内容:
+触发的场景、DMESG的摘录、崩溃描述、性能回归、延迟尖峰、锁定等。
+
+量化优化和权衡。如果您声称在性能、内存消耗、堆栈占用空间或二进制大小方面有所
+改进,请包括支持它们的数字。但也要描述不明显的成本。优化通常不是免费的,而是
+在CPU、内存和可读性之间进行权衡;或者,探索性的工作,在不同的工作负载之间进
+行权衡。请描述优化的预期缺点,以便审阅者可以权衡成本和收益。
+
+一旦问题建立起来,就要详细地描述一下您实际在做什么。对于审阅者来说,用简单的
+英语描述代码的变化是很重要的,以验证代码的行为是否符合您的意愿。
+
+如果您将补丁描述写在一个表单中,这个表单可以很容易地作为“提交日志”放入Linux
+的源代码管理系统git中,那么维护人员将非常感谢您。见 :ref:`cn_explicit_in_reply_to`.
+
+每个补丁只解决一个问题。如果你的描述开始变长,这就表明你可能需要拆分你的补丁。
+请见 :ref:`cn_split_changes`
+
+提交或重新提交修补程序或修补程序系列时,请包括完整的修补程序说明和理由。不要
+只说这是补丁(系列)的第几版。不要期望子系统维护人员引用更早的补丁版本或引用
+URL来查找补丁描述并将其放入补丁中。也就是说,补丁(系列)及其描述应该是独立的。
+这对维护人员和审查人员都有好处。一些评审者可能甚至没有收到补丁的早期版本。
+
+描述你在命令语气中的变化,例如“make xyzzy do frotz”而不是“[这个补丁]make
+xyzzy do frotz”或“[我]changed xyzzy to do frotz”,就好像你在命令代码库改变
+它的行为一样。
+
+如果修补程序修复了一个记录的bug条目,请按编号和URL引用该bug条目。如果补丁来
+自邮件列表讨论,请给出邮件列表存档的URL;使用带有 ``Message-ID`` 的
+https://lkml.kernel.org/ 重定向,以确保链接不会过时。
+
+但是,在没有外部资源的情况下,尽量让你的解释可理解。除了提供邮件列表存档或
+bug的URL之外,还要总结需要提交补丁的相关讨论要点。
+
+如果您想要引用一个特定的提交,不要只引用提交的 SHA-1 ID。还请包括提交的一行
+摘要,以便于审阅者了解它是关于什么的。例如::
+
+ Commit e21d2170f36602ae2708 ("video: remove unnecessary
+ platform_set_drvdata()") removed the unnecessary
+ platform_set_drvdata(), but left the variable "dev" unused,
+ delete it.
+
+您还应该确保至少使用前12位 SHA-1 ID. 内核存储库包含*许多*对象,使与较短的ID
+发生冲突的可能性很大。记住,即使现在不会与您的六个字符ID发生冲突,这种情况
+可能五年后改变。
+
+如果修补程序修复了特定提交中的错误,例如,使用 ``git bisct`` ,请使用带有前
+12个字符SHA-1 ID 的"Fixes:"标记和单行摘要。为了简化不要将标记拆分为多个,
+行、标记不受分析脚本“75列换行”规则的限制。例如::
+
+ Fixes: 54a4f0239f2e ("KVM: MMU: make kvm_mmu_zap_page() return the number of pages it actually freed")
+
+下列 ``git config`` 设置可以添加让 ``git log``, ``git show`` 漂亮的显示格式::
+
+ [core]
+ abbrev = 12
+ [pretty]
+ fixes = Fixes: %h (\"%s\")
+
+.. _cn_split_changes:
+
+3) 拆分你的改动
+---------------
+
+将每个逻辑更改分隔成一个单独的补丁。
+
+例如,如果你的改动里同时有bug修正和性能优化,那么把这些改动拆分到两个或
+者更多的补丁文件中。如果你的改动包含对API的修改,并且修改了驱动程序来适
+应这些新的API,那么把这些修改分成两个补丁。
+
+另一方面,如果你将一个单独的改动做成多个补丁文件,那么将它们合并成一个
+单独的补丁文件。这样一个逻辑上单独的改动只被包含在一个补丁文件里。
+
+如果有一个补丁依赖另外一个补丁来完成它的改动,那没问题。简单的在你的补
+丁描述里指出“这个补丁依赖某补丁”就好了。
+
+在将您的更改划分为一系列补丁时,要特别注意确保内核在系列中的每个补丁之后
+都能正常构建和运行。使用 ``git bisect`` 来追踪问题的开发者可能会在任何时
+候分割你的补丁系列;如果你在中间引入错误,他们不会感谢你。
+
+如果你不能将补丁浓缩成更少的文件,那么每次大约发送出15个,然后等待审查
+和集成。
+
+4) 检查你的更改风格
+-------------------
+
+检查您的补丁是否存在基本样式冲突,详细信息可在
+:ref:`Documentation/translations/zh_CN/process/coding-style.rst <cn_codingstyle>`
+中找到。如果不这样做,只会浪费审稿人的时间,并且会导致你的补丁被拒绝,甚至
+可能没有被阅读。
+
+一个重要的例外是在将代码从一个文件移动到另一个文件时——在这种情况下,您不应
+该在移动代码的同一个补丁中修改移动的代码。这清楚地描述了移动代码和您的更改
+的行为。这大大有助于审查实际差异,并允许工具更好地跟踪代码本身的历史。
+
+在提交之前,使用补丁样式检查程序检查补丁(scripts/check patch.pl)。不过,
+请注意,样式检查程序应该被视为一个指南,而不是作为人类判断的替代品。如果您
+的代码看起来更好,但有违规行为,那么最好不要使用它。
+
+检查者报告三个级别:
+
+ - ERROR:很可能出错的事情
+ - WARNING:需要仔细审查的事项
+ - CHECK:需要思考的事情
+
+您应该能够判断您的补丁中存在的所有违规行为。
+
+5) 选择补丁收件人
+-----------------
+
+您应该总是在任何补丁上复制相应的子系统维护人员,以获得他们维护的代码;查看
+维护人员文件和源代码修订历史记录,以了解这些维护人员是谁。脚本
+scripts/get_Maintainer.pl在这个步骤中非常有用。如果您找不到正在工作的子系统
+的维护人员,那么Andrew Morton(akpm@linux-foundation.org)将充当最后的维护
+人员。
+
+您通常还应该选择至少一个邮件列表来接收补丁集的。linux-kernel@vger.kernel.org
+作为最后一个解决办法的列表,但是这个列表上的体积已经引起了许多开发人员的拒绝。
+在MAINTAINERS文件中查找子系统特定的列表;您的补丁可能会在那里得到更多的关注。
+不过,请不要发送垃圾邮件到无关的列表。
+
+许多与内核相关的列表托管在vger.kernel.org上;您可以在
+http://vger.kernel.org/vger-lists.html 上找到它们的列表。不过,也有与内核相关
+的列表托管在其他地方。
+
+不要一次发送超过15个补丁到vger邮件列表!!!!
+
+Linus Torvalds 是决定改动能否进入 Linux 内核的最终裁决者。他的 e-mail
+地址是 <torvalds@linux-foundation.org> 。他收到的 e-mail 很多,所以一般
+的说,最好别给他发 e-mail。
+
+如果您有修复可利用安全漏洞的补丁,请将该补丁发送到 security@kernel.org。对于
+严重的bug,可以考虑短期暂停以允许分销商向用户发布补丁;在这种情况下,显然不应
+将补丁发送到任何公共列表。
+
+修复已发布内核中严重错误的补丁程序应该指向稳定版维护人员,方法是放这样的一行::
+
+ Cc: stable@vger.kernel.org
+
+进入补丁的签准区(注意,不是电子邮件收件人)。除了这个文件之外,您还应该阅读
+:ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
+
+但是,请注意,一些子系统维护人员希望得出他们自己的结论,即哪些补丁应该被放到
+稳定的树上。尤其是网络维护人员,不希望看到单个开发人员在补丁中添加像上面这样
+的行。
+
+如果更改影响到用户和内核接口,请向手册页维护人员(如维护人员文件中所列)发送
+手册页补丁,或至少发送更改通知,以便一些信息进入手册页。还应将用户空间API
+更改复制到 linux-api@vger.kernel.org。
+
+对于小的补丁,你也许会CC到搜集琐碎补丁的邮件列表(Trivial Patch Monkey)
+trivial@kernel.org,那里专门收集琐碎的补丁。下面这样的补丁会被看作“琐碎的”
+补丁:
+
+ - 文档的拼写修正。
+ - 修正会影响到 grep(1) 的拼写。
+ - 警告信息修正(频繁的打印无用的警告是不好的。)
+ - 编译错误修正(代码逻辑的确是对的,只是编译有问题。)
+ - 运行时修正(只要真的修正了错误。)
+ - 移除使用了被废弃的函数/宏的代码(例如 check_region。)
+ - 联系方式和文档修正。
+ - 用可移植的代码替换不可移植的代码(即使在体系结构相关的代码中,既然有
+ - 人拷贝,只要它是琐碎的)
+ - 任何文件的作者/维护者对该文件的改动(例如 patch monkey 在重传模式下)
+
+(译注,关于“琐碎补丁”的一些说明:因为原文的这一部分写得比较简单,所以不得不
+违例写一下译注。"trivial"这个英文单词的本意是“琐碎的,不重要的。”但是在这里
+有稍微有一些变化,例如对一些明显的NULL指针的修正,属于运行时修正,会被归类
+到琐碎补丁里。虽然NULL指针的修正很重要,但是这样的修正往往很小而且很容易得到
+检验,所以也被归入琐碎补丁。琐碎补丁更精确的归类应该是
+“simple, localized & easy to verify”,也就是说简单的,局部的和易于检验的。
+trivial@kernel.org邮件列表的目的是针对这样的补丁,为提交者提供一个中心,来
+降低提交的门槛。)
+
+6) 没有 MIME 编码,没有链接,没有压缩,没有附件,只有纯文本
+-----------------------------------------------------------
+
+Linus 和其他的内核开发者需要阅读和评论你提交的改动。对于内核开发者来说
+,可以“引用”你的改动很重要,使用一般的 e-mail 工具,他们就可以在你的
+代码的任何位置添加评论。
+
+因为这个原因,所有的提交的补丁都是 e-mail 中“内嵌”的。
+
+.. warning::
+ 如果你使用剪切-粘贴你的补丁,小心你的编辑器的自动换行功能破坏你的补丁
+
+不要将补丁作为 MIME 编码的附件,不管是否压缩。很多流行的 e-mail 软件不
+是任何时候都将 MIME 编码的附件当作纯文本发送的,这会使得别人无法在你的
+代码中加评论。另外,MIME 编码的附件会让 Linus 多花一点时间来处理,这就
+降低了你的改动被接受的可能性。
+
+例外:如果你的邮递员弄坏了补丁,那么有人可能会要求你使用mime重新发送补丁
+
+请参阅 :ref:`Documentation/translations/zh_CN/process/email-clients.rst <cn_email_clients>`
+以获取有关配置电子邮件客户端以使其不受影响地发送修补程序的提示。
+
+7) e-mail 的大小
+----------------
+
+大的改动对邮件列表不合适,对某些维护者也不合适。如果你的补丁,在不压缩
+的情况下,超过了300kB,那么你最好将补丁放在一个能通过 internet 访问的服
+务器上,然后用指向你的补丁的 URL 替代。但是请注意,如果您的补丁超过了
+300kb,那么它几乎肯定需要被破坏。
+
+8)回复评审意见
+---------------
+
+你的补丁几乎肯定会得到评审者对补丁改进方法的评论。您必须对这些评论作出
+回应;让补丁被忽略的一个好办法就是忽略审阅者的意见。不会导致代码更改的
+意见或问题几乎肯定会带来注释或变更日志的改变,以便下一个评审者更好地了解
+正在发生的事情。
+
+一定要告诉审稿人你在做什么改变,并感谢他们的时间。代码审查是一个累人且
+耗时的过程,审查人员有时会变得暴躁。即使在这种情况下,也要礼貌地回应并
+解决他们指出的问题。
+
+9)不要泄气或不耐烦
+-------------------
+
+提交更改后,请耐心等待。审阅者是忙碌的人,可能无法立即访问您的修补程序。
+
+曾几何时,补丁曾在没有评论的情况下消失在空白中,但开发过程比现在更加顺利。
+您应该在一周左右的时间内收到评论;如果没有收到评论,请确保您已将补丁发送
+到正确的位置。在重新提交或联系审阅者之前至少等待一周-在诸如合并窗口之类的
+繁忙时间可能更长。
+
+10)主题中包含 PATCH
+--------------------
+
+由于到linus和linux内核的电子邮件流量很高,通常会在主题行前面加上[PATCH]
+前缀. 这使Linus和其他内核开发人员更容易将补丁与其他电子邮件讨论区分开。
+
+11)签署你的作品-开发者原始认证
+-------------------------------
+
+为了加强对谁做了何事的追踪,尤其是对那些透过好几层的维护者的补丁,我们
+建议在发送出去的补丁上加一个 “sign-off” 的过程。
+
+"sign-off" 是在补丁的注释的最后的简单的一行文字,认证你编写了它或者其他
+人有权力将它作为开放源代码的补丁传递。规则很简单:如果你能认证如下信息:
+
+开发者来源证书 1.1
+^^^^^^^^^^^^^^^^^^
+
+对于本项目的贡献,我认证如下信息:
+
+ (a)这些贡献是完全或者部分的由我创建,我有权利以文件中指出
+ 的开放源代码许可证提交它;或者
+ (b)这些贡献基于以前的工作,据我所知,这些以前的工作受恰当的开放
+ 源代码许可证保护,而且,根据许可证,我有权提交修改后的贡献,
+ 无论是完全还是部分由我创造,这些贡献都使用同一个开放源代码许可证
+ (除非我被允许用其它的许可证),正如文件中指出的;或者
+ (c)这些贡献由认证(a),(b)或者(c)的人直接提供给我,而
+ 且我没有修改它。
+ (d)我理解并同意这个项目和贡献是公开的,贡献的记录(包括我
+ 一起提交的个人记录,包括 sign-off )被永久维护并且可以和这个项目
+ 或者开放源代码的许可证同步地再发行。
+
+那么加入这样一行::
+
+ Signed-off-by: Random J Developer <random@developer.example.org>
+
+使用你的真名(抱歉,不能使用假名或者匿名。)
+
+有人在最后加上标签。现在这些东西会被忽略,但是你可以这样做,来标记公司
+内部的过程,或者只是指出关于 sign-off 的一些特殊细节。
+
+如果您是子系统或分支维护人员,有时需要稍微修改收到的补丁,以便合并它们,
+因为树和提交者中的代码不完全相同。如果你严格遵守规则(c),你应该要求提交者
+重新发布,但这完全是在浪费时间和精力。规则(b)允许您调整代码,但是更改一个
+提交者的代码并让他认可您的错误是非常不礼貌的。要解决此问题,建议在最后一个
+由签名行和您的行之间添加一行,指示更改的性质。虽然这并不是强制性的,但似乎
+在描述前加上您的邮件和/或姓名(全部用方括号括起来),这足以让人注意到您对最
+后一分钟的更改负有责任。例如::
+
+ Signed-off-by: Random J Developer <random@developer.example.org>
+ [lucky@maintainer.example.org: struct foo moved from foo.c to foo.h]
+ Signed-off-by: Lucky K Maintainer <lucky@maintainer.example.org>
+
+如果您维护一个稳定的分支机构,同时希望对作者进行致谢、跟踪更改、合并修复并
+保护提交者不受投诉,那么这种做法尤其有用。请注意,在任何情况下都不能更改作者
+的ID(From 头),因为它是出现在更改日志中的标识。
+
+对回合(back-porters)的特别说明:在提交消息的顶部(主题行之后)插入一个补丁
+的起源指示似乎是一种常见且有用的实践,以便于跟踪。例如,下面是我们在3.x稳定
+版本中看到的内容::
+
+ Date: Tue Oct 7 07:26:38 2014 -0400
+
+ libata: Un-break ATA blacklist
+
+ commit 1c40279960bcd7d52dbdf1d466b20d24b99176c8 upstream.
+
+还有, 这里是一个旧版内核中的一个回合补丁::
+
+ Date: Tue May 13 22:12:27 2008 +0200
+
+ wireless, airo: waitbusy() won't delay
+
+ [backport of 2.6 commit b7acbdfbd1f277c1eb23f344f899cfa4cd0bf36a]
+
+12)何时使用Acked-by:,CC:,和Co-Developed by:
+----------------------------------------------
+
+Singed-off-by: 标记表示签名者参与了补丁的开发,或者他/她在补丁的传递路径中。
+
+如果一个人没有直接参与补丁的准备或处理,但希望表示并记录他们对补丁的批准,
+那么他们可以要求在补丁的变更日志中添加一个 Acked-by:
+
+Acked-by:通常由受影响代码的维护者使用,当该维护者既没有贡献也没有转发补丁时。
+
+Acked-by: 不像签字人那样正式。这是一个记录,确认人至少审查了补丁,并表示接受。
+因此,补丁合并有时会手动将Acker的“Yep,looks good to me”转换为 Acked-By:(但
+请注意,通常最好要求一个明确的Ack)。
+
+Acked-by:不一定表示对整个补丁的确认。例如,如果一个补丁影响多个子系统,并且
+有一个:来自一个子系统维护者,那么这通常表示只确认影响维护者代码的部分。这里
+应该仔细判断。如有疑问,应参考邮件列表档案中的原始讨论。
+
+如果某人有机会对补丁进行评论,但没有提供此类评论,您可以选择在补丁中添加 ``Cc:``
+这是唯一一个标签,它可以在没有被它命名的人显式操作的情况下添加,但它应该表明
+这个人是在补丁上抄送的。讨论中包含了潜在利益相关方。
+
+Co-developed-by: 声明补丁是由多个开发人员共同创建的;当几个人在一个补丁上工
+作时,它用于将属性赋予共同作者(除了 From: 所赋予的作者之外)。因为
+Co-developed-by: 表示作者身份,所以每个共同开发人:必须紧跟在相关合作作者的
+签名之后。标准的签核程序要求:标记的签核顺序应尽可能反映补丁的时间历史,而不
+管作者是通过 From :还是由 Co-developed-by: 共同开发的。值得注意的是,最后一
+个签字人:必须始终是提交补丁的开发人员。
+
+注意,当作者也是电子邮件标题“发件人:”行中列出的人时,“From: ” 标记是可选的。
+
+作者提交的补丁程序示例::
+
+ <changelog>
+
+ Co-developed-by: First Co-Author <first@coauthor.example.org>
+ Signed-off-by: First Co-Author <first@coauthor.example.org>
+ Co-developed-by: Second Co-Author <second@coauthor.example.org>
+ Signed-off-by: Second Co-Author <second@coauthor.example.org>
+ Signed-off-by: From Author <from@author.example.org>
+
+合作开发者提交的补丁示例::
+
+ From: From Author <from@author.example.org>
+
+ <changelog>
+
+ Co-developed-by: Random Co-Author <random@coauthor.example.org>
+ Signed-off-by: Random Co-Author <random@coauthor.example.org>
+ Signed-off-by: From Author <from@author.example.org>
+ Co-developed-by: Submitting Co-Author <sub@coauthor.example.org>
+ Signed-off-by: Submitting Co-Author <sub@coauthor.example.org>
+
+
+13)使用报告人:、测试人:、审核人:、建议人:、修复人:
+--------------------------------------------------------
+
+Reported-by: 给那些发现错误并报告错误的人致谢,它希望激励他们在将来再次帮助
+我们。请注意,如果bug是以私有方式报告的,那么在使用Reported-by标记之前,请
+先请求权限。
+
+Tested-by: 标记表示补丁已由指定的人(在某些环境中)成功测试。这个标签通知
+维护人员已经执行了一些测试,为将来的补丁提供了一种定位测试人员的方法,并确
+保测试人员的信誉。
+
+Reviewed-by:相反,根据审查人的声明,表明该补丁已被审查并被认为是可接受的:
+
+
+审查人的监督声明
+^^^^^^^^^^^^^^^^
+
+通过提供我的 Reviewed-by,我声明:
+
+ (a) 我已经对这个补丁进行了一次技术审查,以评估它是否适合被包含到
+ 主线内核中。
+
+ (b) 与补丁相关的任何问题、顾虑或问题都已反馈给提交者。我对提交者对
+ 我的评论的回应感到满意。
+
+ (c) 虽然这一提交可能会改进一些东西,但我相信,此时,(1)对内核
+ 进行了有价值的修改,(2)没有包含争论中涉及的已知问题。
+
+ (d) 虽然我已经审查了补丁并认为它是健全的,但我不会(除非另有明确
+ 说明)作出任何保证或保证它将在任何给定情况下实现其规定的目的
+ 或正常运行。
+
+Reviewed-by 是一种观点声明,即补丁是对内核的适当修改,没有任何遗留的严重技术
+问题。任何感兴趣的审阅者(完成工作的人)都可以为一个补丁提供一个 Review-by
+标签。此标签用于向审阅者提供致谢,并通知维护者已在修补程序上完成的审阅程度。
+Reviewed-by: 当由已知了解主题区域并执行彻底检查的审阅者提供时,通常会增加
+补丁进入内核的可能性。
+
+Suggested-by: 表示补丁的想法是由指定的人提出的,并确保将此想法归功于指定的
+人。请注意,未经许可,不得添加此标签,特别是如果该想法未在公共论坛上发布。
+这就是说,如果我们勤快地致谢我们的创意者,他们很有希望在未来得到鼓舞,再次
+帮助我们。
+
+Fixes: 指示补丁在以前的提交中修复了一个问题。它可以很容易地确定错误的来源,
+这有助于检查错误修复。这个标记还帮助稳定内核团队确定应该接收修复的稳定内核
+版本。这是指示补丁修复的错误的首选方法。请参阅 :ref:`cn_describe_changes`
+描述您的更改以了解更多详细信息。
+
+.. _cn_the_canonical_patch_format:
+
+12)标准补丁格式
+----------------
+
+本节描述如何格式化补丁本身。请注意,如果您的补丁存储在 ``Git`` 存储库中,则
+可以使用 ``git format-patch`` 进行正确的补丁格式设置。但是,这些工具无法创建
+必要的文本,因此请务必阅读下面的说明。
+
+标准的补丁,标题行是::
+
+ Subject: [PATCH 001/123] 子系统:一句话概述
+
+标准补丁的信体存在如下部分:
+
+ - 一个 "from" 行指出补丁作者。后跟空行(仅当发送修补程序的人不是作者时才需要)。
+
+ - 解释的正文,行以75列包装,这将被复制到永久变更日志来描述这个补丁。
+
+ - 一个空行
+
+ - 上面描述的“Signed-off-by” 行,也将出现在更改日志中。
+
+ - 只包含 ``---`` 的标记线。
+
+ - 任何其他不适合放在变更日志的注释。
+
+ - 实际补丁( ``diff`` 输出)。
+
+标题行的格式,使得对标题行按字母序排序非常的容易 - 很多 e-mail 客户端都
+可以支持 - 因为序列号是用零填充的,所以按数字排序和按字母排序是一样的。
+
+e-mail 标题中的“子系统”标识哪个内核子系统将被打补丁。
+
+e-mail 标题中的“一句话概述”扼要的描述 e-mail 中的补丁。“一句话概述”
+不应该是一个文件名。对于一个补丁系列(“补丁系列”指一系列的多个相关补
+丁),不要对每个补丁都使用同样的“一句话概述”。
+
+记住 e-mail 的“一句话概述”会成为该补丁的全局唯一标识。它会蔓延到 git
+的改动记录里。然后“一句话概述”会被用在开发者的讨论里,用来指代这个补
+丁。用户将希望通过 google 来搜索"一句话概述"来找到那些讨论这个补丁的文
+章。当人们在两三个月后使用诸如 ``gitk`` 或 ``git log --oneline`` 之类
+的工具查看数千个补丁时,也会很快看到它。
+
+出于这些原因,概述必须不超过70-75个字符,并且必须描述补丁的更改以及为
+什么需要补丁。既要简洁又要描述性很有挑战性,但写得好的概述应该这样做。
+
+概述的前缀可以用方括号括起来:“Subject: [PATCH <tag>...] <概述>”。标记
+不被视为概述的一部分,而是描述应该如何处理补丁。如果补丁的多个版本已发
+送出来以响应评审(即“v1,v2,v3”)或“rfc”,以指示评审请求,那么通用标记
+可能包括版本描述符。如果一个补丁系列中有四个补丁,那么各个补丁可以这样
+编号:1/4、2/4、3/4、4/4。这可以确保开发人员了解补丁应用的顺序,并且他们
+已经查看或应用了补丁系列中的所有补丁。
+
+一些标题的例子::
+
+ Subject: [patch 2/5] ext2: improve scalability of bitmap searching
+ Subject: [PATCHv2 001/207] x86: fix eflags tracking
+
+"From" 行是信体里的最上面一行,具有如下格式:
+ From: Patch Author <author@example.com>
+
+"From" 行指明在永久改动日志里,谁会被确认为作者。如果没有 "From" 行,那
+么邮件头里的 "From: " 行会被用来决定改动日志中的作者。
+
+说明的主题将会被提交到永久的源代码改动日志里,因此对那些早已经不记得和
+这个补丁相关的讨论细节的有能力的读者来说,是有意义的。包括补丁程序定位
+错误的(内核日志消息、OOPS消息等)症状,对于搜索提交日志以寻找适用补丁的人
+尤其有用。如果一个补丁修复了一个编译失败,那么可能不需要包含所有编译失败;
+只要足够让搜索补丁的人能够找到它就行了。与概述一样,既要简洁又要描述性。
+
+"---" 标记行对于补丁处理工具要找到哪里是改动日志信息的结束,是不可缺少
+的。
+
+对于 "---" 标记之后的额外注解,一个好的用途就是用来写 diffstat,用来显
+示修改了什么文件和每个文件都增加和删除了多少行。diffstat 对于比较大的补
+丁特别有用。其余那些只是和时刻或者开发者相关的注解,不合适放到永久的改
+动日志里的,也应该放这里。
+使用 diffstat的选项 "-p 1 -w 70" 这样文件名就会从内核源代码树的目录开始
+,不会占用太宽的空间(很容易适合80列的宽度,也许会有一些缩进。)
+
+在后面的参考资料中能看到适当的补丁格式的更多细节。
+
+.. _cn_explicit_in_reply_to:
+
+15) 明确回复邮件头(In-Reply-To)
+-------------------------------
+
+手动添加回复补丁的的标题头(In-Reply_To:) 是有帮助的(例如,使用 ``git send-email`` )
+将补丁与以前的相关讨论关联起来,例如,将bug修复程序链接到电子邮件和bug报告。
+但是,对于多补丁系列,最好避免在回复时使用链接到该系列的旧版本。这样,
+补丁的多个版本就不会成为电子邮件客户端中无法管理的引用序列。如果链接有用,
+可以使用 https://lkml.kernel.org/ 重定向器(例如,在封面电子邮件文本中)
+链接到补丁系列的早期版本。
+
+16) 发送git pull请求
+--------------------
+
+如果您有一系列补丁,那么让维护人员通过git pull操作将它们直接拉入子系统存储
+库可能是最方便的。但是,请注意,从开发人员那里获取补丁比从邮件列表中获取补
+丁需要更高的信任度。因此,许多子系统维护人员不愿意接受请求,特别是来自新的
+未知开发人员的请求。如果有疑问,您可以在封面邮件中使用pull 请求作为补丁系列
+正常发布的一个选项,让维护人员可以选择使用其中之一。
+
+pull 请求的主题行中应该有[Git Pull]。请求本身应该在一行中包含存储库名称和
+感兴趣的分支;它应该看起来像::
+
+ Please pull from
+
+ git://jdelvare.pck.nerim.net/jdelvare-2.6 i2c-for-linus
+
+ to get these changes:
+
+
+pull 请求还应该包含一条整体消息,说明请求中将包含什么,一个补丁本身的 ``Git shortlog``
+以及一个显示补丁系列整体效果的 ``diffstat`` 。当然,将所有这些信息收集在一起
+的最简单方法是让 ``git`` 使用 ``git request-pull`` 命令为您完成这些工作。
+
+一些维护人员(包括Linus)希望看到来自已签名提交的请求;这增加了他们对你的
+请求信心。特别是,在没有签名标签的情况下,Linus 不会从像 Github 这样的公共
+托管站点拉请求。
+
+创建此类签名的第一步是生成一个 GNRPG 密钥,并由一个或多个核心内核开发人员对
+其进行签名。这一步对新开发人员来说可能很困难,但没有办法绕过它。参加会议是
+找到可以签署您的密钥的开发人员的好方法。
+
+一旦您在Git 中准备了一个您希望有人拉的补丁系列,就用 ``git tag -s`` 创建一
+个签名标记。这将创建一个新标记,标识该系列中的最后一次提交,并包含用您的私
+钥创建的签名。您还可以将changelog样式的消息添加到标记中;这是一个描述拉请求
+整体效果的理想位置。
+
+如果维护人员将要从中提取的树不是您正在使用的存储库,请不要忘记将已签名的标记
+显式推送到公共树。
+
+生成拉请求时,请使用已签名的标记作为目标。这样的命令可以实现::
+
+ git request-pull master git://my.public.tree/linux.git my-signed-tag
+
+参考文献
+--------
+
+Andrew Morton, "The perfect patch" (tpp).
+ <http://www.ozlabs.org/~akpm/stuff/tpp.txt>
+
+Jeff Garzik, "Linux kernel patch submission format".
+ <http://linux.yyz.us/patch-format.html>
+
+Greg Kroah-Hartman, "How to piss off a kernel subsystem maintainer".
+ <http://www.kroah.com/log/linux/maintainer.html>
+
+ <http://www.kroah.com/log/linux/maintainer-02.html>
+
+ <http://www.kroah.com/log/linux/maintainer-03.html>
+
+ <http://www.kroah.com/log/linux/maintainer-04.html>
+
+ <http://www.kroah.com/log/linux/maintainer-05.html>
+
+ <http://www.kroah.com/log/linux/maintainer-06.html>
+
+NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people!
+ <https://lkml.org/lkml/2005/7/11/336>
+
+Kernel Documentation/process/coding-style.rst:
+ :ref:`Documentation/translations/zh_CN/process/coding-style.rst <cn_codingstyle>`
+
+Linus Torvalds's mail on the canonical patch format:
+ <http://lkml.org/lkml/2005/4/7/183>
+
+Andi Kleen, "On submitting kernel patches"
+ Some strategies to get difficult or controversial changes in.
+
+ http://halobates.de/on-submitting-patches.pdf
diff --git a/Documentation/translations/zh_CN/volatile-considered-harmful.txt b/Documentation/translations/zh_CN/process/volatile-considered-harmful.rst
index 475125967197..48b32ce58ef1 100644
--- a/Documentation/translations/zh_CN/volatile-considered-harmful.txt
+++ b/Documentation/translations/zh_CN/process/volatile-considered-harmful.rst
@@ -1,30 +1,23 @@
-Chinese translated version of Documentation/process/volatile-considered-harmful.rst
+.. _cn_volatile_considered_harmful:
-If you have any comment or update to the content, please contact the
-original document maintainer directly. However, if you have a problem
-communicating in English you can also ask the Chinese maintainer for
-help. Contact the Chinese maintainer if this translation is outdated
-or if there is a problem with the translation.
+.. include:: ../disclaimer-zh_CN.rst
-Maintainer: Jonathan Corbet <corbet@lwn.net>
-Chinese maintainer: Bryan Wu <bryan.wu@analog.com>
----------------------------------------------------------------------
-Documentation/process/volatile-considered-harmful.rst 的中文翻译
+:Original: :ref:`Documentation/process/volatile-considered-harmful.rst
+ <volatile_considered_harmful>`
如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
-译存在问题,请联系中文版维护者。
+译存在问题,请联系中文版维护者::
-英文版维护者: Jonathan Corbet <corbet@lwn.net>
-中文版维护者: 伍鹏 Bryan Wu <bryan.wu@analog.com>
-中文版翻译者: 伍鹏 Bryan Wu <bryan.wu@analog.com>
-中文版校译者: 张汉辉 Eugene Teo <eugeneteo@kernel.sg>
- 杨瑞 Dave Young <hidave.darkstar@gmail.com>
-以下为正文
----------------------------------------------------------------------
+ 英文版维护者: Jonathan Corbet <corbet@lwn.net>
+ 中文版维护者: 伍鹏 Bryan Wu <bryan.wu@analog.com>
+ 中文版翻译者: 伍鹏 Bryan Wu <bryan.wu@analog.com>
+ 中文版校译者: 张汉辉 Eugene Teo <eugeneteo@kernel.sg>
+ 杨瑞 Dave Young <hidave.darkstar@gmail.com>
+ 时奎亮 Alex Shi <alex.shi@linux.alibaba.com>
为什么不应该使用“volatile”类型
-------------------------------
+==============================
C程序员通常认为volatile表示某个变量可以在当前执行的线程之外被改变;因此,在内核
中用到共享数据结构时,常常会有C程序员喜欢使用volatile这类变量。换句话说,他们经
@@ -41,7 +34,7 @@ C程序员通常认为volatile表示某个变量可以在当前执行的线程
必要再使用volatile。如果仍然必须使用volatile,那么几乎可以肯定在代码的某处有一
个bug。在正确设计的内核代码中,volatile能带来的仅仅是使事情变慢。
-思考一下这段典型的内核代码:
+思考一下这段典型的内核代码::
spin_lock(&the_lock);
do_something_on(&shared_data);
@@ -66,7 +59,7 @@ volatile的存储类型最初是为那些内存映射的I/O寄存器而定义。
是必需的。
另一种引起用户可能使用volatile的情况是当处理器正忙着等待一个变量的值。正确执行一
-个忙等待的方法是:
+个忙等待的方法是::
while (my_variable != what_i_want)
cpu_relax();
diff --git a/Documentation/translations/zh_CN/sparse.txt b/Documentation/translations/zh_CN/sparse.txt
index 2f728962a8e2..47fc4a06ebe8 100644
--- a/Documentation/translations/zh_CN/sparse.txt
+++ b/Documentation/translations/zh_CN/sparse.txt
@@ -6,7 +6,7 @@ communicating in English you can also ask the Chinese maintainer for
help. Contact the Chinese maintainer if this translation is outdated
or if there is a problem with the translation.
-Chinese maintainer: Li Yang <leo@zh-kernel.org>
+Chinese maintainer: Li Yang <leoyang.li@nxp.com>
---------------------------------------------------------------------
Documentation/dev-tools/sparse.rst 的中文翻译
@@ -14,8 +14,8 @@ Documentation/dev-tools/sparse.rst 的中文翻译
交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
译存在问题,请联系中文版维护者。
-中文版维护者: 李阳 Li Yang <leo@zh-kernel.org>
-中文版翻译者: 李阳 Li Yang <leo@zh-kernel.org>
+中文版维护者: 李阳 Li Yang <leoyang.li@nxp.com>
+中文版翻译者: 李阳 Li Yang <leoyang.li@nxp.com>
以下为正文
diff --git a/Documentation/unaligned-memory-access.txt b/Documentation/unaligned-memory-access.txt
index 51b4ff031586..1ee82419d8aa 100644
--- a/Documentation/unaligned-memory-access.txt
+++ b/Documentation/unaligned-memory-access.txt
@@ -1,5 +1,5 @@
=========================
-UNALIGNED MEMORY ACCESSES
+Unaligned Memory Accesses
=========================
:Author: Daniel Drake <dsd@gentoo.org>,
diff --git a/Documentation/usb/WUSB-Design-overview.txt b/Documentation/usb/WUSB-Design-overview.txt
index fdb47637720e..dc5e21609bb5 100644
--- a/Documentation/usb/WUSB-Design-overview.txt
+++ b/Documentation/usb/WUSB-Design-overview.txt
@@ -1,7 +1,9 @@
-
+================================
Linux UWB + Wireless USB + WiNET
+================================
+
+ Copyright (C) 2005-2006 Intel Corporation
- (C) 2005-2006 Intel Corporation
Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
This program is free software; you can redistribute it and/or
@@ -29,6 +31,7 @@ drivers for the USB based UWB radio controllers defined in the
Wireless USB 1.0 specification (including Wireless USB host controller
and an Intel WiNET controller).
+.. Contents
1. Introduction
1. HWA: Host Wire adapters, your Wireless USB dongle
@@ -51,7 +54,8 @@ and an Intel WiNET controller).
4. Glossary
- Introduction
+Introduction
+============
UWB is a wide-band communication protocol that is to serve also as the
low-level protocol for others (much like TCP sits on IP). Currently
@@ -93,7 +97,8 @@ The different logical parts of this driver are:
do the actual WUSB.
- HWA: Host Wire adapters, your Wireless USB dongle
+HWA: Host Wire adapters, your Wireless USB dongle
+-------------------------------------------------
WUSB also defines a device called a Host Wire Adaptor (HWA), which in
mere terms is a USB dongle that enables your PC to have UWB and Wireless
@@ -125,7 +130,8 @@ The HWA itself is broken in two or three main interfaces:
their type and kick into gear.
- DWA: Device Wired Adaptor, a Wireless USB hub for wired devices
+DWA: Device Wired Adaptor, a Wireless USB hub for wired devices
+---------------------------------------------------------------
These are the complement to HWAs. They are a USB host for connecting
wired devices, but it is connected to your PC connected via Wireless
@@ -137,7 +143,8 @@ code with the HWA-RC driver; there is a bunch of factorization work that
has been done to support that in upcoming releases.
- WHCI: Wireless Host Controller Interface, the PCI WUSB host adapter
+WHCI: Wireless Host Controller Interface, the PCI WUSB host adapter
+-------------------------------------------------------------------
This is your usual PCI device that implements WHCI. Similar in concept
to EHCI, it allows your wireless USB devices (including DWAs) to connect
@@ -148,7 +155,8 @@ There is still no driver support for this, but will be in upcoming
releases.
- The UWB stack
+The UWB stack
+=============
The main mission of the UWB stack is to keep a tally of which devices
are in radio proximity to allow drivers to connect to them. As well, it
@@ -156,7 +164,8 @@ provides an API for controlling the local radio controllers (RCs from
now on), such as to start/stop beaconing, scan, allocate bandwidth, etc.
- Devices and hosts: the basic structure
+Devices and hosts: the basic structure
+--------------------------------------
The main building block here is the UWB device (struct uwb_dev). For
each device that pops up in radio presence (ie: the UWB host receives a
@@ -187,7 +196,8 @@ the USB connected HWA. Eventually, drivers/whci-rc.c will do the same
for the PCI connected WHCI controller.
- Host Controller life cycle
+Host Controller life cycle
+--------------------------
So let's say we connect a dongle to the system: it is detected and
firmware uploaded if needed [for Intel's i1480
@@ -209,7 +219,8 @@ When a dongle is disconnected, /drivers/uwb/hwa-rc.c:hwarc_disconnect()/
takes time of tearing everything down safely (or not...).
- On the air: beacons and enumerating the radio neighborhood
+On the air: beacons and enumerating the radio neighborhood
+----------------------------------------------------------
So assuming we have devices and we have agreed for a channel to connect
on (let's say 9), we put the new RC to beacon:
@@ -235,12 +246,14 @@ are received in some time, the device is considered gone and wiped out
the beacon cache of dead devices].
- Device lists
+Device lists
+------------
All UWB devices are kept in the list of the struct bus_type uwb_bus_type.
- Bandwidth allocation
+Bandwidth allocation
+--------------------
The UWB stack maintains a local copy of DRP availability through
processing of incoming *DRP Availability Change* notifications. This
@@ -260,7 +273,8 @@ completion. [Note: The bandwidth reservation work is in progress and
subject to change.]
- Wireless USB Host Controller drivers
+Wireless USB Host Controller drivers
+====================================
*WARNING* This section needs a lot of work!
@@ -296,7 +310,8 @@ starts sending MMCs.
Now it all depends on external stimuli.
-*New device connection*
+New device connection
+---------------------
A new device pops up, it scans the radio looking for MMCs that give out
the existence of Wireless USB channels. Once one (or more) are found,
@@ -322,7 +337,8 @@ has seen the port status changes, as we have been toggling them. It will
start enumerating and doing transfers through usb_hcd->urb_enqueue() to
read descriptors and move our data.
-*Device life cycle and keep alives*
+Device life cycle and keep alives
+---------------------------------
Every time there is a successful transfer to/from a device, we update a
per-device activity timestamp. If not, every now and then we check and
@@ -340,7 +356,8 @@ device list looking for whom needs refreshing.
If the device wants to disconnect, it will either die (ugly) or send a
/DN_Disconnect/ that will prompt a disconnection from the system.
-*Sending and receiving data*
+Sending and receiving data
+--------------------------
Data is sent and received through /Remote Pipes/ (rpipes). An rpipe is
/aimed/ at an endpoint in a WUSB device. This is the same for HWAs and
@@ -394,7 +411,8 @@ finalize the transfer.
For IN xfers, we only issue URBs for the segments we want to read and
then wait for the xfer result data.
-*URB mapping into xfers*
+URB mapping into xfers
+^^^^^^^^^^^^^^^^^^^^^^
This is done by hwahc_op_urb_[en|de]queue(). In enqueue() we aim an
rpipe to the endpoint where we have to transmit, create a transfer
@@ -407,7 +425,8 @@ and not yet done and when all that is done, the xfer callback will be
called--this will call the URB callback.
- Glossary
+Glossary
+========
*DWA* -- Device Wire Adapter
@@ -436,4 +455,3 @@ the host.
Design-overview.txt-1.8 (last edited 2006-11-04 12:22:24 by
InakyPerezGonzalez)
-
diff --git a/Documentation/usb/acm.txt b/Documentation/usb/acm.txt
index 903abca10517..e8bda98e9b51 100644
--- a/Documentation/usb/acm.txt
+++ b/Documentation/usb/acm.txt
@@ -1,127 +1,131 @@
- Linux ACM driver v0.16
- (c) 1999 Vojtech Pavlik <vojtech@suse.cz>
- Sponsored by SuSE
-----------------------------------------------------------------------------
+======================
+Linux ACM driver v0.16
+======================
+
+Copyright (c) 1999 Vojtech Pavlik <vojtech@suse.cz>
+
+Sponsored by SuSE
0. Disclaimer
~~~~~~~~~~~~~
- This program is free software; you can redistribute it and/or modify it
+This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your option)
any later version.
- This program is distributed in the hope that it will be useful, but
+This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
- You should have received a copy of the GNU General Public License along
+You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc., 59
Temple Place, Suite 330, Boston, MA 02111-1307 USA
- Should you need to contact me, the author, you can do so either by e-mail
-- mail your message to <vojtech@suse.cz>, or by paper mail: Vojtech Pavlik,
+Should you need to contact me, the author, you can do so either by e-mail -
+mail your message to <vojtech@suse.cz>, or by paper mail: Vojtech Pavlik,
Ucitelska 1576, Prague 8, 182 00 Czech Republic
- For your convenience, the GNU General Public License version 2 is included
+For your convenience, the GNU General Public License version 2 is included
in the package: See the file COPYING.
1. Usage
~~~~~~~~
- The drivers/usb/class/cdc-acm.c drivers works with USB modems and USB ISDN terminal
+The drivers/usb/class/cdc-acm.c drivers works with USB modems and USB ISDN terminal
adapters that conform to the Universal Serial Bus Communication Device Class
Abstract Control Model (USB CDC ACM) specification.
- Many modems do, here is a list of those I know of:
+Many modems do, here is a list of those I know of:
- 3Com OfficeConnect 56k
- 3Com Voice FaxModem Pro
- 3Com Sportster
- MultiTech MultiModem 56k
- Zoom 2986L FaxModem
- Compaq 56k FaxModem
- ELSA Microlink 56k
+ - 3Com OfficeConnect 56k
+ - 3Com Voice FaxModem Pro
+ - 3Com Sportster
+ - MultiTech MultiModem 56k
+ - Zoom 2986L FaxModem
+ - Compaq 56k FaxModem
+ - ELSA Microlink 56k
- I know of one ISDN TA that does work with the acm driver:
+I know of one ISDN TA that does work with the acm driver:
- 3Com USR ISDN Pro TA
+ - 3Com USR ISDN Pro TA
- Some cell phones also connect via USB. I know the following phones work:
+Some cell phones also connect via USB. I know the following phones work:
- SonyEricsson K800i
+ - SonyEricsson K800i
- Unfortunately many modems and most ISDN TAs use proprietary interfaces and
+Unfortunately many modems and most ISDN TAs use proprietary interfaces and
thus won't work with this drivers. Check for ACM compliance before buying.
- To use the modems you need these modules loaded:
+To use the modems you need these modules loaded::
usbcore.ko
uhci-hcd.ko ohci-hcd.ko or ehci-hcd.ko
cdc-acm.ko
- After that, the modem[s] should be accessible. You should be able to use
+After that, the modem[s] should be accessible. You should be able to use
minicom, ppp and mgetty with them.
2. Verifying that it works
~~~~~~~~~~~~~~~~~~~~~~~~~~
- The first step would be to check /sys/kernel/debug/usb/devices, it should look
-like this:
-
-T: Bus=01 Lev=00 Prnt=00 Port=00 Cnt=00 Dev#= 1 Spd=12 MxCh= 2
-B: Alloc= 0/900 us ( 0%), #Int= 0, #Iso= 0
-D: Ver= 1.00 Cls=09(hub ) Sub=00 Prot=00 MxPS= 8 #Cfgs= 1
-P: Vendor=0000 ProdID=0000 Rev= 0.00
-S: Product=USB UHCI Root Hub
-S: SerialNumber=6800
-C:* #Ifs= 1 Cfg#= 1 Atr=40 MxPwr= 0mA
-I: If#= 0 Alt= 0 #EPs= 1 Cls=09(hub ) Sub=00 Prot=00 Driver=hub
-E: Ad=81(I) Atr=03(Int.) MxPS= 8 Ivl=255ms
-T: Bus=01 Lev=01 Prnt=01 Port=01 Cnt=01 Dev#= 2 Spd=12 MxCh= 0
-D: Ver= 1.00 Cls=02(comm.) Sub=00 Prot=00 MxPS= 8 #Cfgs= 2
-P: Vendor=04c1 ProdID=008f Rev= 2.07
-S: Manufacturer=3Com Inc.
-S: Product=3Com U.S. Robotics Pro ISDN TA
-S: SerialNumber=UFT53A49BVT7
-C: #Ifs= 1 Cfg#= 1 Atr=60 MxPwr= 0mA
-I: If#= 0 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=acm
-E: Ad=85(I) Atr=02(Bulk) MxPS= 64 Ivl= 0ms
-E: Ad=04(O) Atr=02(Bulk) MxPS= 64 Ivl= 0ms
-E: Ad=81(I) Atr=03(Int.) MxPS= 16 Ivl=128ms
-C:* #Ifs= 2 Cfg#= 2 Atr=60 MxPwr= 0mA
-I: If#= 0 Alt= 0 #EPs= 1 Cls=02(comm.) Sub=02 Prot=01 Driver=acm
-E: Ad=81(I) Atr=03(Int.) MxPS= 16 Ivl=128ms
-I: If#= 1 Alt= 0 #EPs= 2 Cls=0a(data ) Sub=00 Prot=00 Driver=acm
-E: Ad=85(I) Atr=02(Bulk) MxPS= 64 Ivl= 0ms
-E: Ad=04(O) Atr=02(Bulk) MxPS= 64 Ivl= 0ms
+
+The first step would be to check /sys/kernel/debug/usb/devices, it should look
+like this::
+
+ T: Bus=01 Lev=00 Prnt=00 Port=00 Cnt=00 Dev#= 1 Spd=12 MxCh= 2
+ B: Alloc= 0/900 us ( 0%), #Int= 0, #Iso= 0
+ D: Ver= 1.00 Cls=09(hub ) Sub=00 Prot=00 MxPS= 8 #Cfgs= 1
+ P: Vendor=0000 ProdID=0000 Rev= 0.00
+ S: Product=USB UHCI Root Hub
+ S: SerialNumber=6800
+ C:* #Ifs= 1 Cfg#= 1 Atr=40 MxPwr= 0mA
+ I: If#= 0 Alt= 0 #EPs= 1 Cls=09(hub ) Sub=00 Prot=00 Driver=hub
+ E: Ad=81(I) Atr=03(Int.) MxPS= 8 Ivl=255ms
+ T: Bus=01 Lev=01 Prnt=01 Port=01 Cnt=01 Dev#= 2 Spd=12 MxCh= 0
+ D: Ver= 1.00 Cls=02(comm.) Sub=00 Prot=00 MxPS= 8 #Cfgs= 2
+ P: Vendor=04c1 ProdID=008f Rev= 2.07
+ S: Manufacturer=3Com Inc.
+ S: Product=3Com U.S. Robotics Pro ISDN TA
+ S: SerialNumber=UFT53A49BVT7
+ C: #Ifs= 1 Cfg#= 1 Atr=60 MxPwr= 0mA
+ I: If#= 0 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=acm
+ E: Ad=85(I) Atr=02(Bulk) MxPS= 64 Ivl= 0ms
+ E: Ad=04(O) Atr=02(Bulk) MxPS= 64 Ivl= 0ms
+ E: Ad=81(I) Atr=03(Int.) MxPS= 16 Ivl=128ms
+ C:* #Ifs= 2 Cfg#= 2 Atr=60 MxPwr= 0mA
+ I: If#= 0 Alt= 0 #EPs= 1 Cls=02(comm.) Sub=02 Prot=01 Driver=acm
+ E: Ad=81(I) Atr=03(Int.) MxPS= 16 Ivl=128ms
+ I: If#= 1 Alt= 0 #EPs= 2 Cls=0a(data ) Sub=00 Prot=00 Driver=acm
+ E: Ad=85(I) Atr=02(Bulk) MxPS= 64 Ivl= 0ms
+ E: Ad=04(O) Atr=02(Bulk) MxPS= 64 Ivl= 0ms
The presence of these three lines (and the Cls= 'comm' and 'data' classes)
is important, it means it's an ACM device. The Driver=acm means the acm
driver is used for the device. If you see only Cls=ff(vend.) then you're out
-of luck, you have a device with vendor specific-interface.
-
-D: Ver= 1.00 Cls=02(comm.) Sub=00 Prot=00 MxPS= 8 #Cfgs= 2
-I: If#= 0 Alt= 0 #EPs= 1 Cls=02(comm.) Sub=02 Prot=01 Driver=acm
-I: If#= 1 Alt= 0 #EPs= 2 Cls=0a(data ) Sub=00 Prot=00 Driver=acm
-
-In the system log you should see:
-
-usb.c: USB new device connect, assigned device number 2
-usb.c: kmalloc IF c7691fa0, numif 1
-usb.c: kmalloc IF c7b5f3e0, numif 2
-usb.c: skipped 4 class/vendor specific interface descriptors
-usb.c: new device strings: Mfr=1, Product=2, SerialNumber=3
-usb.c: USB device number 2 default language ID 0x409
-Manufacturer: 3Com Inc.
-Product: 3Com U.S. Robotics Pro ISDN TA
-SerialNumber: UFT53A49BVT7
-acm.c: probing config 1
-acm.c: probing config 2
-ttyACM0: USB ACM device
-acm.c: acm_control_msg: rq: 0x22 val: 0x0 len: 0x0 result: 0
-acm.c: acm_control_msg: rq: 0x20 val: 0x0 len: 0x7 result: 7
-usb.c: acm driver claimed interface c7b5f3e0
-usb.c: acm driver claimed interface c7b5f3f8
-usb.c: acm driver claimed interface c7691fa0
+of luck, you have a device with vendor specific-interface::
+
+ D: Ver= 1.00 Cls=02(comm.) Sub=00 Prot=00 MxPS= 8 #Cfgs= 2
+ I: If#= 0 Alt= 0 #EPs= 1 Cls=02(comm.) Sub=02 Prot=01 Driver=acm
+ I: If#= 1 Alt= 0 #EPs= 2 Cls=0a(data ) Sub=00 Prot=00 Driver=acm
+
+In the system log you should see::
+
+ usb.c: USB new device connect, assigned device number 2
+ usb.c: kmalloc IF c7691fa0, numif 1
+ usb.c: kmalloc IF c7b5f3e0, numif 2
+ usb.c: skipped 4 class/vendor specific interface descriptors
+ usb.c: new device strings: Mfr=1, Product=2, SerialNumber=3
+ usb.c: USB device number 2 default language ID 0x409
+ Manufacturer: 3Com Inc.
+ Product: 3Com U.S. Robotics Pro ISDN TA
+ SerialNumber: UFT53A49BVT7
+ acm.c: probing config 1
+ acm.c: probing config 2
+ ttyACM0: USB ACM device
+ acm.c: acm_control_msg: rq: 0x22 val: 0x0 len: 0x0 result: 0
+ acm.c: acm_control_msg: rq: 0x20 val: 0x0 len: 0x7 result: 7
+ usb.c: acm driver claimed interface c7b5f3e0
+ usb.c: acm driver claimed interface c7b5f3f8
+ usb.c: acm driver claimed interface c7691fa0
If all this seems to be OK, fire up minicom and set it to talk to the ttyACM
device and try typing 'at'. If it responds with 'OK', then everything is
diff --git a/Documentation/usb/authorization.txt b/Documentation/usb/authorization.txt
index 9dd1dc7b1009..9e53909d04c2 100644
--- a/Documentation/usb/authorization.txt
+++ b/Documentation/usb/authorization.txt
@@ -1,7 +1,8 @@
-
+==============================================================
Authorizing (or not) your USB devices to connect to the system
+==============================================================
-(C) 2007 Inaky Perez-Gonzalez <inaky@linux.intel.com> Intel Corporation
+Copyright (C) 2007 Inaky Perez-Gonzalez <inaky@linux.intel.com> Intel Corporation
This feature allows you to control if a USB device can be used (or
not) in a system. This feature will allow you to implement a lock-down
@@ -12,24 +13,25 @@ its interfaces are immediately made available to the users. With this
modification, only if root authorizes the device to be configured will
then it be possible to use it.
-Usage:
+Usage
+=====
-Authorize a device to connect:
+Authorize a device to connect::
-$ echo 1 > /sys/bus/usb/devices/DEVICE/authorized
+ $ echo 1 > /sys/bus/usb/devices/DEVICE/authorized
-Deauthorize a device:
+De-authorize a device::
-$ echo 0 > /sys/bus/usb/devices/DEVICE/authorized
+ $ echo 0 > /sys/bus/usb/devices/DEVICE/authorized
Set new devices connected to hostX to be deauthorized by default (ie:
-lock down):
+lock down)::
-$ echo 0 > /sys/bus/usb/devices/usbX/authorized_default
+ $ echo 0 > /sys/bus/usb/devices/usbX/authorized_default
-Remove the lock down:
+Remove the lock down::
-$ echo 1 > /sys/bus/usb/devices/usbX/authorized_default
+ $ echo 1 > /sys/bus/usb/devices/usbX/authorized_default
By default, Wired USB devices are authorized by default to
connect. Wireless USB hosts deauthorize by default all new connected
@@ -40,21 +42,21 @@ USB ports.
Example system lockdown (lame)
------------------------
+------------------------------
Imagine you want to implement a lockdown so only devices of type XYZ
can be connected (for example, it is a kiosk machine with a visible
-USB port):
+USB port)::
-boot up
-rc.local ->
+ boot up
+ rc.local ->
- for host in /sys/bus/usb/devices/usb*
- do
- echo 0 > $host/authorized_default
- done
+ for host in /sys/bus/usb/devices/usb*
+ do
+ echo 0 > $host/authorized_default
+ done
-Hookup an script to udev, for new USB devices
+Hookup an script to udev, for new USB devices::
if device_is_my_type $DEV
then
@@ -67,10 +69,10 @@ checking if the class, type and protocol match something is the worse
security verification you can make (or the best, for someone willing
to break it). If you need something secure, use crypto and Certificate
Authentication or stuff like that. Something simple for an storage key
-could be:
+could be::
-function device_is_my_type()
-{
+ function device_is_my_type()
+ {
echo 1 > authorized # temporarily authorize it
# FIXME: make sure none can mount it
mount DEVICENODE /mntpoint
@@ -83,7 +85,7 @@ function device_is_my_type()
else
echo 0 > authorized
fi
-}
+ }
Of course, this is lame, you'd want to do a real certificate
@@ -95,30 +97,35 @@ welcome.
Interface authorization
-----------------------
+
There is a similar approach to allow or deny specific USB interfaces.
That allows to block only a subset of an USB device.
-Authorize an interface:
-$ echo 1 > /sys/bus/usb/devices/INTERFACE/authorized
+Authorize an interface::
-Deauthorize an interface:
-$ echo 0 > /sys/bus/usb/devices/INTERFACE/authorized
+ $ echo 1 > /sys/bus/usb/devices/INTERFACE/authorized
+
+Deauthorize an interface::
+
+ $ echo 0 > /sys/bus/usb/devices/INTERFACE/authorized
The default value for new interfaces
on a particular USB bus can be changed, too.
-Allow interfaces per default:
-$ echo 1 > /sys/bus/usb/devices/usbX/interface_authorized_default
+Allow interfaces per default::
+
+ $ echo 1 > /sys/bus/usb/devices/usbX/interface_authorized_default
+
+Deny interfaces per default::
-Deny interfaces per default:
-$ echo 0 > /sys/bus/usb/devices/usbX/interface_authorized_default
+ $ echo 0 > /sys/bus/usb/devices/usbX/interface_authorized_default
Per default the interface_authorized_default bit is 1.
So all interfaces would authorized per default.
Note:
-If a deauthorized interface will be authorized so the driver probing must
-be triggered manually by writing INTERFACE to /sys/bus/usb/drivers_probe
+ If a deauthorized interface will be authorized so the driver probing must
+ be triggered manually by writing INTERFACE to /sys/bus/usb/drivers_probe
For drivers that need multiple interfaces all needed interfaces should be
authorized first. After that the drivers should be probed.
diff --git a/Documentation/usb/chipidea.txt b/Documentation/usb/chipidea.txt
index d1eedc01b00a..68473abe2823 100644
--- a/Documentation/usb/chipidea.txt
+++ b/Documentation/usb/chipidea.txt
@@ -1,22 +1,37 @@
+==============================================
+ChipIdea Highspeed Dual Role Controller Driver
+==============================================
+
1. How to test OTG FSM(HNP and SRP)
-----------------------------------
+
To show how to demo OTG HNP and SRP functions via sys input files
with 2 Freescale i.MX6Q sabre SD boards.
1.1 How to enable OTG FSM
----------------------------------------
+-------------------------
+
1.1.1 Select CONFIG_USB_OTG_FSM in menuconfig, rebuild kernel
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
Image and modules. If you want to check some internal
variables for otg fsm, mount debugfs, there are 2 files
-which can show otg fsm variables and some controller registers value:
-cat /sys/kernel/debug/ci_hdrc.0/otg
-cat /sys/kernel/debug/ci_hdrc.0/registers
+which can show otg fsm variables and some controller registers value::
+
+ cat /sys/kernel/debug/ci_hdrc.0/otg
+ cat /sys/kernel/debug/ci_hdrc.0/registers
+
1.1.2 Add below entries in your dts file for your controller node
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+::
+
otg-rev = <0x0200>;
adp-disable;
1.2 Test operations
-------------------
+
1) Power up 2 Freescale i.MX6Q sabre SD boards with gadget class driver loaded
(e.g. g_mass_storage).
@@ -26,19 +41,24 @@ cat /sys/kernel/debug/ci_hdrc.0/registers
The A-device(with micro A plug inserted) should enumerate B-device.
3) Role switch
- On B-device:
- echo 1 > /sys/bus/platform/devices/ci_hdrc.0/inputs/b_bus_req
+
+ On B-device::
+
+ echo 1 > /sys/bus/platform/devices/ci_hdrc.0/inputs/b_bus_req
B-device should take host role and enumerate A-device.
4) A-device switch back to host.
- On B-device:
- echo 0 > /sys/bus/platform/devices/ci_hdrc.0/inputs/b_bus_req
+
+ On B-device::
+
+ echo 0 > /sys/bus/platform/devices/ci_hdrc.0/inputs/b_bus_req
or, by introducing HNP polling, B-Host can know when A-peripheral wish
to be host role, so this role switch also can be trigged in A-peripheral
- side by answering the polling from B-Host, this can be done on A-device:
- echo 1 > /sys/bus/platform/devices/ci_hdrc.0/inputs/a_bus_req
+ side by answering the polling from B-Host, this can be done on A-device::
+
+ echo 1 > /sys/bus/platform/devices/ci_hdrc.0/inputs/a_bus_req
A-device should switch back to host and enumerate B-device.
@@ -49,23 +69,31 @@ cat /sys/kernel/debug/ci_hdrc.0/registers
A-device should NOT enumerate B-device.
if A-device wants to use bus:
- On A-device:
- echo 0 > /sys/bus/platform/devices/ci_hdrc.0/inputs/a_bus_drop
- echo 1 > /sys/bus/platform/devices/ci_hdrc.0/inputs/a_bus_req
+
+ On A-device::
+
+ echo 0 > /sys/bus/platform/devices/ci_hdrc.0/inputs/a_bus_drop
+ echo 1 > /sys/bus/platform/devices/ci_hdrc.0/inputs/a_bus_req
if B-device wants to use bus:
- On B-device:
- echo 1 > /sys/bus/platform/devices/ci_hdrc.0/inputs/b_bus_req
+
+ On B-device::
+
+ echo 1 > /sys/bus/platform/devices/ci_hdrc.0/inputs/b_bus_req
7) A-device power down the bus.
- On A-device:
- echo 1 > /sys/bus/platform/devices/ci_hdrc.0/inputs/a_bus_drop
+
+ On A-device::
+
+ echo 1 > /sys/bus/platform/devices/ci_hdrc.0/inputs/a_bus_drop
A-device should disconnect with B-device and power down the bus.
8) B-device does data pulse for SRP.
- On B-device:
- echo 1 > /sys/bus/platform/devices/ci_hdrc.0/inputs/b_bus_req
+
+ On B-device::
+
+ echo 1 > /sys/bus/platform/devices/ci_hdrc.0/inputs/b_bus_req
A-device should resume usb bus and enumerate B-device.
@@ -75,22 +103,31 @@ cat /sys/kernel/debug/ci_hdrc.0/registers
July 27, 2012 Revision 2.0 version 1.1a"
2. How to enable USB as system wakeup source
------------------------------------
+--------------------------------------------
Below is the example for how to enable USB as system wakeup source
at imx6 platform.
-2.1 Enable core's wakeup
-echo enabled > /sys/bus/platform/devices/ci_hdrc.0/power/wakeup
-2.2 Enable glue layer's wakeup
-echo enabled > /sys/bus/platform/devices/2184000.usb/power/wakeup
-2.3 Enable PHY's wakeup (optional)
-echo enabled > /sys/bus/platform/devices/20c9000.usbphy/power/wakeup
-2.4 Enable roothub's wakeup
-echo enabled > /sys/bus/usb/devices/usb1/power/wakeup
-2.5 Enable related device's wakeup
-echo enabled > /sys/bus/usb/devices/1-1/power/wakeup
+2.1 Enable core's wakeup::
+
+ echo enabled > /sys/bus/platform/devices/ci_hdrc.0/power/wakeup
+
+2.2 Enable glue layer's wakeup::
+
+ echo enabled > /sys/bus/platform/devices/2184000.usb/power/wakeup
+
+2.3 Enable PHY's wakeup (optional)::
+
+ echo enabled > /sys/bus/platform/devices/20c9000.usbphy/power/wakeup
+
+2.4 Enable roothub's wakeup::
+
+ echo enabled > /sys/bus/usb/devices/usb1/power/wakeup
+
+2.5 Enable related device's wakeup::
+
+ echo enabled > /sys/bus/usb/devices/1-1/power/wakeup
If the system has only one usb port, and you want usb wakeup at this port, you
-can use below script to enable usb wakeup.
-for i in $(find /sys -name wakeup | grep usb);do echo enabled > $i;done;
+can use below script to enable usb wakeup::
+ for i in $(find /sys -name wakeup | grep usb);do echo enabled > $i;done;
diff --git a/Documentation/usb/dwc3.txt b/Documentation/usb/dwc3.txt
index 1d02c01d1c7c..f94a7ba16573 100644
--- a/Documentation/usb/dwc3.txt
+++ b/Documentation/usb/dwc3.txt
@@ -1,6 +1,11 @@
+===========
+DWC3 driver
+===========
+
+
+TODO
+~~~~
- TODO
-~~~~~~
Please pick something while reading :)
- Convert interrupt handler to per-ep-thread-irq
@@ -9,6 +14,7 @@ Please pick something while reading :)
until the command completes which is bad.
Implementation idea:
+
- dwc core implements a demultiplexing irq chip for interrupts per
endpoint. The interrupt numbers are allocated during probe and belong
to the device. If MSI provides per-endpoint interrupt this dummy
@@ -19,6 +25,7 @@ Please pick something while reading :)
- dwc3_send_gadget_ep_cmd() will sleep in wait_for_completion_timeout()
until the command completes.
- the interrupt handler is split into the following pieces:
+
- primary handler of the device
goes through every event and calls generic_handle_irq() for event
it. On return from generic_handle_irq() in acknowledges the event
@@ -40,6 +47,7 @@ Please pick something while reading :)
for command completion.
Latency:
+
There should be no increase in latency since the interrupt-thread has a
high priority and will be run before an average task in user land
(except the user changed priorities).
diff --git a/Documentation/usb/ehci.txt b/Documentation/usb/ehci.txt
index 160bd6c3ab7b..31f650e7c1b4 100644
--- a/Documentation/usb/ehci.txt
+++ b/Documentation/usb/ehci.txt
@@ -1,3 +1,7 @@
+===========
+EHCI driver
+===========
+
27-Dec-2002
The EHCI driver is used to talk to high speed USB 2.0 devices using
@@ -40,7 +44,8 @@ APIs exposed to USB device drivers.
<dbrownell@users.sourceforge.net>
-FUNCTIONALITY
+Functionality
+=============
This driver is regularly tested on x86 hardware, and has also been
used on PPC hardware so big/little endianness issues should be gone.
@@ -48,6 +53,7 @@ It's believed to do all the right PCI magic so that I/O works even on
systems with interesting DMA mapping issues.
Transfer Types
+--------------
At this writing the driver should comfortably handle all control, bulk,
and interrupt transfers, including requests to USB 1.1 devices through
@@ -63,6 +69,7 @@ since EHCI represents these with a different data structure. So for now,
most USB audio and video devices can't be connected to high speed buses.
Driver Behavior
+---------------
Transfers of all types can be queued. This means that control transfers
from a driver on one interface (or through usbfs) won't interfere with
@@ -83,14 +90,15 @@ limits on the number of periodic transactions that can be scheduled,
and prevent use of polling intervals of less than one frame.
-USE BY
+Use by
+======
Assuming you have an EHCI controller (on a PCI card or motherboard)
-and have compiled this driver as a module, load this like:
+and have compiled this driver as a module, load this like::
# modprobe ehci-hcd
-and remove it by:
+and remove it by::
# rmmod ehci-hcd
@@ -112,13 +120,16 @@ If you're using this driver on a 2.5 kernel, and you've enabled USB
debugging support, you'll see three files in the "sysfs" directory for
any EHCI controller:
- "async" dumps the asynchronous schedule, used for control
+ "async"
+ dumps the asynchronous schedule, used for control
and bulk transfers. Shows each active qh and the qtds
pending, usually one qtd per urb. (Look at it with
usb-storage doing disk I/O; watch the request queues!)
- "periodic" dumps the periodic schedule, used for interrupt
+ "periodic"
+ dumps the periodic schedule, used for interrupt
and isochronous transfers. Doesn't show qtds.
- "registers" show controller register state, and
+ "registers"
+ show controller register state, and
The contents of those files can help identify driver problems.
@@ -136,7 +147,8 @@ transaction translators are in use; some drivers have been seen to behave
badly when they see different faults than OHCI or UHCI report.
-PERFORMANCE
+Performance
+===========
USB 2.0 throughput is gated by two main factors: how fast the host
controller can process requests, and how fast devices can respond to
@@ -156,6 +168,7 @@ hardware and device driver software allow it. Periodic transfer modes
approach the quoted 480 MBit/sec transfer rate.
Hardware Performance
+--------------------
At this writing, individual USB 2.0 devices tend to max out at around
20 MByte/sec transfer rates. This is of course subject to change;
@@ -183,6 +196,7 @@ you issue a control or bulk request you can often expect to learn that
it completed in less than 250 usec (depending on transfer size).
Software Performance
+--------------------
To get even 20 MByte/sec transfer rates, Linux-USB device drivers will
need to keep the EHCI queue full. That means issuing large requests,
@@ -206,9 +220,11 @@ mapping (which might apply an IOMMU) and IRQ reduction, all of which will
help make high speed transfers run as fast as they can.
-TBD: Interrupt and ISO transfer performance issues. Those periodic
-transfers are fully scheduled, so the main issue is likely to be how
-to trigger "high bandwidth" modes.
+TBD:
+ Interrupt and ISO transfer performance issues. Those periodic
+ transfers are fully scheduled, so the main issue is likely to be how
+ to trigger "high bandwidth" modes.
-TBD: More than standard 80% periodic bandwidth allocation is possible
-through sysfs uframe_periodic_max parameter. Describe that.
+TBD:
+ More than standard 80% periodic bandwidth allocation is possible
+ through sysfs uframe_periodic_max parameter. Describe that.
diff --git a/Documentation/usb/functionfs.txt b/Documentation/usb/functionfs.txt
index eaaaea019fc7..7fdc6d840ac5 100644
--- a/Documentation/usb/functionfs.txt
+++ b/Documentation/usb/functionfs.txt
@@ -1,4 +1,6 @@
-*How FunctionFS works*
+====================
+How FunctionFS works
+====================
From kernel point of view it is just a composite function with some
unique behaviour. It may be added to an USB configuration only after
@@ -38,13 +40,13 @@ when mounting.
One can imagine a gadget that has an Ethernet, MTP and HID interfaces
where the last two are implemented via FunctionFS. On user space
-level it would look like this:
+level it would look like this::
-$ insmod g_ffs.ko idVendor=<ID> iSerialNumber=<string> functions=mtp,hid
-$ mkdir /dev/ffs-mtp && mount -t functionfs mtp /dev/ffs-mtp
-$ ( cd /dev/ffs-mtp && mtp-daemon ) &
-$ mkdir /dev/ffs-hid && mount -t functionfs hid /dev/ffs-hid
-$ ( cd /dev/ffs-hid && hid-daemon ) &
+ $ insmod g_ffs.ko idVendor=<ID> iSerialNumber=<string> functions=mtp,hid
+ $ mkdir /dev/ffs-mtp && mount -t functionfs mtp /dev/ffs-mtp
+ $ ( cd /dev/ffs-mtp && mtp-daemon ) &
+ $ mkdir /dev/ffs-hid && mount -t functionfs hid /dev/ffs-hid
+ $ ( cd /dev/ffs-hid && hid-daemon ) &
On kernel level the gadget checks ffs_data->dev_name to identify
whether it's FunctionFS designed for MTP ("mtp") or HID ("hid").
@@ -64,4 +66,3 @@ have been written to their ep0's.
Conversely, the gadget is unregistered after the first USB function
closes its endpoints.
-
diff --git a/Documentation/usb/gadget-testing.txt b/Documentation/usb/gadget-testing.txt
index 5908a21fddb6..7d7f2340af42 100644
--- a/Documentation/usb/gadget-testing.txt
+++ b/Documentation/usb/gadget-testing.txt
@@ -1,26 +1,32 @@
+==============
+Gadget Testing
+==============
+
This file summarizes information on basic testing of USB functions
provided by gadgets.
-1. ACM function
-2. ECM function
-3. ECM subset function
-4. EEM function
-5. FFS function
-6. HID function
-7. LOOPBACK function
-8. MASS STORAGE function
-9. MIDI function
-10. NCM function
-11. OBEX function
-12. PHONET function
-13. RNDIS function
-14. SERIAL function
-15. SOURCESINK function
-16. UAC1 function (legacy implementation)
-17. UAC2 function
-18. UVC function
-19. PRINTER function
-20. UAC1 function (new API)
+.. contents
+
+ 1. ACM function
+ 2. ECM function
+ 3. ECM subset function
+ 4. EEM function
+ 5. FFS function
+ 6. HID function
+ 7. LOOPBACK function
+ 8. MASS STORAGE function
+ 9. MIDI function
+ 10. NCM function
+ 11. OBEX function
+ 12. PHONET function
+ 13. RNDIS function
+ 14. SERIAL function
+ 15. SOURCESINK function
+ 16. UAC1 function (legacy implementation)
+ 17. UAC2 function
+ 18. UVC function
+ 19. PRINTER function
+ 20. UAC1 function (new API)
1. ACM function
@@ -44,13 +50,23 @@ There can be at most 4 ACM/generic serial/OBEX ports in the system.
Testing the ACM function
------------------------
-On the host: cat > /dev/ttyACM<X>
-On the device : cat /dev/ttyGS<Y>
+On the host::
+
+ cat > /dev/ttyACM<X>
+
+On the device::
+
+ cat /dev/ttyGS<Y>
then the other way round
-On the device: cat > /dev/ttyGS<Y>
-On the host: cat /dev/ttyACM<X>
+On the device::
+
+ cat > /dev/ttyGS<Y>
+
+On the host::
+
+ cat /dev/ttyACM<X>
2. ECM function
===============
@@ -63,13 +79,15 @@ Function-specific configfs interface
The function name to use when creating the function directory is "ecm".
The ECM function provides these attributes in its function directory:
- ifname - network device interface name associated with this
+ =============== ==================================================
+ ifname network device interface name associated with this
function instance
- qmult - queue length multiplier for high and super speed
- host_addr - MAC address of host's end of this
+ qmult queue length multiplier for high and super speed
+ host_addr MAC address of host's end of this
Ethernet over USB link
- dev_addr - MAC address of device's end of this
+ dev_addr MAC address of device's end of this
Ethernet over USB link
+ =============== ==================================================
and after creating the functions/ecm.<instance name> they contain default
values: qmult is 5, dev_addr and host_addr are randomly selected.
@@ -82,8 +100,13 @@ Testing the ECM function
Configure IP addresses of the device and the host. Then:
-On the device: ping <host's IP>
-On the host: ping <device's IP>
+On the device::
+
+ ping <host's IP>
+
+On the host::
+
+ ping <device's IP>
3. ECM subset function
======================
@@ -96,13 +119,15 @@ Function-specific configfs interface
The function name to use when creating the function directory is "geth".
The ECM subset function provides these attributes in its function directory:
- ifname - network device interface name associated with this
+ =============== ==================================================
+ ifname network device interface name associated with this
function instance
- qmult - queue length multiplier for high and super speed
- host_addr - MAC address of host's end of this
+ qmult queue length multiplier for high and super speed
+ host_addr MAC address of host's end of this
Ethernet over USB link
- dev_addr - MAC address of device's end of this
+ dev_addr MAC address of device's end of this
Ethernet over USB link
+ =============== ==================================================
and after creating the functions/ecm.<instance name> they contain default
values: qmult is 5, dev_addr and host_addr are randomly selected.
@@ -115,8 +140,13 @@ Testing the ECM subset function
Configure IP addresses of the device and the host. Then:
-On the device: ping <host's IP>
-On the host: ping <device's IP>
+On the device::
+
+ ping <host's IP>
+
+On the host::
+
+ ping <device's IP>
4. EEM function
===============
@@ -129,13 +159,15 @@ Function-specific configfs interface
The function name to use when creating the function directory is "eem".
The EEM function provides these attributes in its function directory:
- ifname - network device interface name associated with this
+ =============== ==================================================
+ ifname network device interface name associated with this
function instance
- qmult - queue length multiplier for high and super speed
- host_addr - MAC address of host's end of this
+ qmult queue length multiplier for high and super speed
+ host_addr MAC address of host's end of this
Ethernet over USB link
- dev_addr - MAC address of device's end of this
+ dev_addr MAC address of device's end of this
Ethernet over USB link
+ =============== ==================================================
and after creating the functions/eem.<instance name> they contain default
values: qmult is 5, dev_addr and host_addr are randomly selected.
@@ -148,8 +180,13 @@ Testing the EEM function
Configure IP addresses of the device and the host. Then:
-On the device: ping <host's IP>
-On the host: ping <device's IP>
+On the device::
+
+ ping <host's IP>
+
+On the host::
+
+ ping <device's IP>
5. FFS function
===============
@@ -172,6 +209,7 @@ Testing the FFS function
------------------------
On the device: start the function's userspace daemon, enable the gadget
+
On the host: use the USB function provided by the device
6. HID function
@@ -185,39 +223,43 @@ Function-specific configfs interface
The function name to use when creating the function directory is "hid".
The HID function provides these attributes in its function directory:
- protocol - HID protocol to use
- report_desc - data to be used in HID reports, except data
+ =============== ===========================================
+ protocol HID protocol to use
+ report_desc data to be used in HID reports, except data
passed with /dev/hidg<X>
- report_length - HID report length
- subclass - HID subclass to use
+ report_length HID report length
+ subclass HID subclass to use
+ =============== ===========================================
For a keyboard the protocol and the subclass are 1, the report_length is 8,
-while the report_desc is:
+while the report_desc is::
-$ hd my_report_desc
-00000000 05 01 09 06 a1 01 05 07 19 e0 29 e7 15 00 25 01 |..........)...%.|
-00000010 75 01 95 08 81 02 95 01 75 08 81 03 95 05 75 01 |u.......u.....u.|
-00000020 05 08 19 01 29 05 91 02 95 01 75 03 91 03 95 06 |....).....u.....|
-00000030 75 08 15 00 25 65 05 07 19 00 29 65 81 00 c0 |u...%e....)e...|
-0000003f
+ $ hd my_report_desc
+ 00000000 05 01 09 06 a1 01 05 07 19 e0 29 e7 15 00 25 01 |..........)...%.|
+ 00000010 75 01 95 08 81 02 95 01 75 08 81 03 95 05 75 01 |u.......u.....u.|
+ 00000020 05 08 19 01 29 05 91 02 95 01 75 03 91 03 95 06 |....).....u.....|
+ 00000030 75 08 15 00 25 65 05 07 19 00 29 65 81 00 c0 |u...%e....)e...|
+ 0000003f
-Such a sequence of bytes can be stored to the attribute with echo:
+Such a sequence of bytes can be stored to the attribute with echo::
-$ echo -ne \\x05\\x01\\x09\\x06\\xa1.....
+ $ echo -ne \\x05\\x01\\x09\\x06\\xa1.....
Testing the HID function
------------------------
Device:
+
- create the gadget
- connect the gadget to a host, preferably not the one used
-to control the gadget
+ to control the gadget
- run a program which writes to /dev/hidg<N>, e.g.
-a userspace program found in Documentation/usb/gadget_hid.txt:
+ a userspace program found in Documentation/usb/gadget_hid.txt::
-$ ./hid_gadget_test /dev/hidg0 keyboard
+ $ ./hid_gadget_test /dev/hidg0 keyboard
Host:
+
- observe the keystrokes from the gadget
7. LOOPBACK function
@@ -231,13 +273,16 @@ Function-specific configfs interface
The function name to use when creating the function directory is "Loopback".
The LOOPBACK function provides these attributes in its function directory:
- qlen - depth of loopback queue
- bulk_buflen - buffer length
+ =============== =======================
+ qlen depth of loopback queue
+ bulk_buflen buffer length
+ =============== =======================
Testing the LOOPBACK function
-----------------------------
device: run the gadget
+
host: test-usb (tools/usb/testusb.c)
8. MASS STORAGE function
@@ -252,18 +297,20 @@ The function name to use when creating the function directory is "mass_storage".
The MASS STORAGE function provides these attributes in its directory:
files:
- stall - Set to permit function to halt bulk endpoints.
+ =============== ==============================================
+ stall Set to permit function to halt bulk endpoints.
Disabled on some USB devices known not to work
correctly. You should set it to true.
- num_buffers - Number of pipeline buffers. Valid numbers
+ num_buffers Number of pipeline buffers. Valid numbers
are 2..4. Available only if
CONFIG_USB_GADGET_DEBUG_FILES is set.
+ =============== ==============================================
and a default lun.0 directory corresponding to SCSI LUN #0.
-A new lun can be added with mkdir:
+A new lun can be added with mkdir::
-$ mkdir functions/mass_storage.0/partition.5
+ $ mkdir functions/mass_storage.0/partition.5
Lun numbering does not have to be continuous, except for lun #0 which is
created by default. A maximum of 8 luns can be specified and they all must be
@@ -273,18 +320,20 @@ although it is not mandatory.
In each lun directory there are the following attribute files:
- file - The path to the backing file for the LUN.
+ =============== ==============================================
+ file The path to the backing file for the LUN.
Required if LUN is not marked as removable.
- ro - Flag specifying access to the LUN shall be
+ ro Flag specifying access to the LUN shall be
read-only. This is implied if CD-ROM emulation
is enabled as well as when it was impossible
to open "filename" in R/W mode.
- removable - Flag specifying that LUN shall be indicated as
+ removable Flag specifying that LUN shall be indicated as
being removable.
- cdrom - Flag specifying that LUN shall be reported as
+ cdrom Flag specifying that LUN shall be reported as
being a CD-ROM.
- nofua - Flag specifying that FUA flag
+ nofua Flag specifying that FUA flag
in SCSI WRITE(10,12)
+ =============== ==============================================
Testing the MASS STORAGE function
---------------------------------
@@ -304,12 +353,14 @@ Function-specific configfs interface
The function name to use when creating the function directory is "midi".
The MIDI function provides these attributes in its function directory:
- buflen - MIDI buffer length
- id - ID string for the USB MIDI adapter
- in_ports - number of MIDI input ports
- index - index value for the USB MIDI adapter
- out_ports - number of MIDI output ports
- qlen - USB read request queue length
+ =============== ====================================
+ buflen MIDI buffer length
+ id ID string for the USB MIDI adapter
+ in_ports number of MIDI input ports
+ index index value for the USB MIDI adapter
+ out_ports number of MIDI output ports
+ qlen USB read request queue length
+ =============== ====================================
Testing the MIDI function
-------------------------
@@ -317,60 +368,63 @@ Testing the MIDI function
There are two cases: playing a mid from the gadget to
the host and playing a mid from the host to the gadget.
-1) Playing a mid from the gadget to the host
-host)
+1) Playing a mid from the gadget to the host:
+
+host::
-$ arecordmidi -l
- Port Client name Port name
- 14:0 Midi Through Midi Through Port-0
- 24:0 MIDI Gadget MIDI Gadget MIDI 1
-$ arecordmidi -p 24:0 from_gadget.mid
+ $ arecordmidi -l
+ Port Client name Port name
+ 14:0 Midi Through Midi Through Port-0
+ 24:0 MIDI Gadget MIDI Gadget MIDI 1
+ $ arecordmidi -p 24:0 from_gadget.mid
-gadget)
+gadget::
-$ aplaymidi -l
- Port Client name Port name
- 20:0 f_midi f_midi
+ $ aplaymidi -l
+ Port Client name Port name
+ 20:0 f_midi f_midi
-$ aplaymidi -p 20:0 to_host.mid
+ $ aplaymidi -p 20:0 to_host.mid
2) Playing a mid from the host to the gadget
-gadget)
-$ arecordmidi -l
- Port Client name Port name
- 20:0 f_midi f_midi
+gadget::
+
+ $ arecordmidi -l
+ Port Client name Port name
+ 20:0 f_midi f_midi
-$ arecordmidi -p 20:0 from_host.mid
+ $ arecordmidi -p 20:0 from_host.mid
-host)
+host::
-$ aplaymidi -l
- Port Client name Port name
- 14:0 Midi Through Midi Through Port-0
- 24:0 MIDI Gadget MIDI Gadget MIDI 1
+ $ aplaymidi -l
+ Port Client name Port name
+ 14:0 Midi Through Midi Through Port-0
+ 24:0 MIDI Gadget MIDI Gadget MIDI 1
-$ aplaymidi -p24:0 to_gadget.mid
+ $ aplaymidi -p24:0 to_gadget.mid
The from_gadget.mid should sound identical to the to_host.mid.
+
The from_host.id should sound identical to the to_gadget.mid.
-MIDI files can be played to speakers/headphones with e.g. timidity installed
+MIDI files can be played to speakers/headphones with e.g. timidity installed::
-$ aplaymidi -l
- Port Client name Port name
- 14:0 Midi Through Midi Through Port-0
- 24:0 MIDI Gadget MIDI Gadget MIDI 1
-128:0 TiMidity TiMidity port 0
-128:1 TiMidity TiMidity port 1
-128:2 TiMidity TiMidity port 2
-128:3 TiMidity TiMidity port 3
+ $ aplaymidi -l
+ Port Client name Port name
+ 14:0 Midi Through Midi Through Port-0
+ 24:0 MIDI Gadget MIDI Gadget MIDI 1
+ 128:0 TiMidity TiMidity port 0
+ 128:1 TiMidity TiMidity port 1
+ 128:2 TiMidity TiMidity port 2
+ 128:3 TiMidity TiMidity port 3
-$ aplaymidi -p 128:0 file.mid
+ $ aplaymidi -p 128:0 file.mid
-MIDI ports can be logically connected using the aconnect utility, e.g.:
+MIDI ports can be logically connected using the aconnect utility, e.g.::
-$ aconnect 24:0 128:0 # try it on the host
+ $ aconnect 24:0 128:0 # try it on the host
After the gadget's MIDI port is connected to timidity's MIDI port,
whatever is played at the gadget side with aplaymidi -l is audible
@@ -387,13 +441,15 @@ Function-specific configfs interface
The function name to use when creating the function directory is "ncm".
The NCM function provides these attributes in its function directory:
- ifname - network device interface name associated with this
+ =============== ==================================================
+ ifname network device interface name associated with this
function instance
- qmult - queue length multiplier for high and super speed
- host_addr - MAC address of host's end of this
+ qmult queue length multiplier for high and super speed
+ host_addr MAC address of host's end of this
Ethernet over USB link
- dev_addr - MAC address of device's end of this
+ dev_addr MAC address of device's end of this
Ethernet over USB link
+ =============== ==================================================
and after creating the functions/ncm.<instance name> they contain default
values: qmult is 5, dev_addr and host_addr are randomly selected.
@@ -406,8 +462,13 @@ Testing the NCM function
Configure IP addresses of the device and the host. Then:
-On the device: ping <host's IP>
-On the host: ping <device's IP>
+On the device::
+
+ ping <host's IP>
+
+On the host::
+
+ ping <device's IP>
11. OBEX function
=================
@@ -429,13 +490,18 @@ There can be at most 4 ACM/generic serial/OBEX ports in the system.
Testing the OBEX function
-------------------------
-On device: seriald -f /dev/ttyGS<Y> -s 1024
-On host: serialc -v <vendorID> -p <productID> -i<interface#> -a1 -s1024 \
- -t<out endpoint addr> -r<in endpoint addr>
+On device::
+
+ seriald -f /dev/ttyGS<Y> -s 1024
+
+On host::
+
+ serialc -v <vendorID> -p <productID> -i<interface#> -a1 -s1024 \
+ -t<out endpoint addr> -r<in endpoint addr>
where seriald and serialc are Felipe's utilities found here:
-https://github.com/felipebalbi/usb-tools.git master
+ https://github.com/felipebalbi/usb-tools.git master
12. PHONET function
===================
@@ -448,8 +514,10 @@ Function-specific configfs interface
The function name to use when creating the function directory is "phonet".
The PHONET function provides just one attribute in its function directory:
- ifname - network device interface name associated with this
+ =============== ==================================================
+ ifname network device interface name associated with this
function instance
+ =============== ==================================================
Testing the PHONET function
---------------------------
@@ -464,41 +532,41 @@ These tools are required:
git://git.gitorious.org/meego-cellular/phonet-utils.git
-On the host:
+On the host::
-$ ./phonet -a 0x10 -i usbpn0
-$ ./pnroute add 0x6c usbpn0
-$./pnroute add 0x10 usbpn0
-$ ifconfig usbpn0 up
+ $ ./phonet -a 0x10 -i usbpn0
+ $ ./pnroute add 0x6c usbpn0
+ $./pnroute add 0x10 usbpn0
+ $ ifconfig usbpn0 up
-On the device:
+On the device::
-$ ./phonet -a 0x6c -i upnlink0
-$ ./pnroute add 0x10 upnlink0
-$ ifconfig upnlink0 up
+ $ ./phonet -a 0x6c -i upnlink0
+ $ ./pnroute add 0x10 upnlink0
+ $ ifconfig upnlink0 up
-Then a test program can be used:
+Then a test program can be used::
-http://www.spinics.net/lists/linux-usb/msg85690.html
+ http://www.spinics.net/lists/linux-usb/msg85690.html
-On the device:
+On the device::
-$ ./pnxmit -a 0x6c -r
+ $ ./pnxmit -a 0x6c -r
-On the host:
+On the host::
-$ ./pnxmit -a 0x10 -s 0x6c
+ $ ./pnxmit -a 0x10 -s 0x6c
As a result some data should be sent from host to device.
Then the other way round:
-On the host:
+On the host::
-$ ./pnxmit -a 0x10 -r
+ $ ./pnxmit -a 0x10 -r
-On the device:
+On the device::
-$ ./pnxmit -a 0x6c -s 0x10
+ $ ./pnxmit -a 0x6c -s 0x10
13. RNDIS function
==================
@@ -511,13 +579,15 @@ Function-specific configfs interface
The function name to use when creating the function directory is "rndis".
The RNDIS function provides these attributes in its function directory:
- ifname - network device interface name associated with this
+ =============== ==================================================
+ ifname network device interface name associated with this
function instance
- qmult - queue length multiplier for high and super speed
- host_addr - MAC address of host's end of this
+ qmult queue length multiplier for high and super speed
+ host_addr MAC address of host's end of this
Ethernet over USB link
- dev_addr - MAC address of device's end of this
+ dev_addr MAC address of device's end of this
Ethernet over USB link
+ =============== ==================================================
and after creating the functions/rndis.<instance name> they contain default
values: qmult is 5, dev_addr and host_addr are randomly selected.
@@ -530,8 +600,13 @@ Testing the RNDIS function
Configure IP addresses of the device and the host. Then:
-On the device: ping <host's IP>
-On the host: ping <device's IP>
+On the device::
+
+ ping <host's IP>
+
+On the host::
+
+ ping <device's IP>
14. SERIAL function
===================
@@ -553,15 +628,28 @@ There can be at most 4 ACM/generic serial/OBEX ports in the system.
Testing the SERIAL function
---------------------------
-On host: insmod usbserial
- echo VID PID >/sys/bus/usb-serial/drivers/generic/new_id
-On host: cat > /dev/ttyUSB<X>
-On target: cat /dev/ttyGS<Y>
+On host::
+
+ insmod usbserial
+ echo VID PID >/sys/bus/usb-serial/drivers/generic/new_id
+
+On host::
+
+ cat > /dev/ttyUSB<X>
+
+On target::
+
+ cat /dev/ttyGS<Y>
then the other way round
-On target: cat > /dev/ttyGS<Y>
-On host: cat /dev/ttyUSB<X>
+On target::
+
+ cat > /dev/ttyGS<Y>
+
+On host::
+
+ cat /dev/ttyUSB<X>
15. SOURCESINK function
=======================
@@ -574,24 +662,27 @@ Function-specific configfs interface
The function name to use when creating the function directory is "SourceSink".
The SOURCESINK function provides these attributes in its function directory:
- pattern - 0 (all zeros), 1 (mod63), 2 (none)
- isoc_interval - 1..16
- isoc_maxpacket - 0 - 1023 (fs), 0 - 1024 (hs/ss)
- isoc_mult - 0..2 (hs/ss only)
- isoc_maxburst - 0..15 (ss only)
- bulk_buflen - buffer length
- bulk_qlen - depth of queue for bulk
- iso_qlen - depth of queue for iso
+ =============== ==================================
+ pattern 0 (all zeros), 1 (mod63), 2 (none)
+ isoc_interval 1..16
+ isoc_maxpacket 0 - 1023 (fs), 0 - 1024 (hs/ss)
+ isoc_mult 0..2 (hs/ss only)
+ isoc_maxburst 0..15 (ss only)
+ bulk_buflen buffer length
+ bulk_qlen depth of queue for bulk
+ iso_qlen depth of queue for iso
+ =============== ==================================
Testing the SOURCESINK function
-------------------------------
device: run the gadget
+
host: test-usb (tools/usb/testusb.c)
16. UAC1 function (legacy implementation)
-=================
+=========================================
The function is provided by usb_f_uac1_legacy.ko module.
@@ -602,12 +693,14 @@ The function name to use when creating the function directory
is "uac1_legacy".
The uac1 function provides these attributes in its function directory:
- audio_buf_size - audio buffer size
- fn_cap - capture pcm device file name
- fn_cntl - control device file name
- fn_play - playback pcm device file name
- req_buf_size - ISO OUT endpoint request buffer size
- req_count - ISO OUT endpoint request count
+ =============== ====================================
+ audio_buf_size audio buffer size
+ fn_cap capture pcm device file name
+ fn_cntl control device file name
+ fn_play playback pcm device file name
+ req_buf_size ISO OUT endpoint request buffer size
+ req_count ISO OUT endpoint request count
+ =============== ====================================
The attributes have sane default values.
@@ -615,7 +708,10 @@ Testing the UAC1 function
-------------------------
device: run the gadget
-host: aplay -l # should list our USB Audio Gadget
+
+host::
+
+ aplay -l # should list our USB Audio Gadget
17. UAC2 function
=================
@@ -628,14 +724,16 @@ Function-specific configfs interface
The function name to use when creating the function directory is "uac2".
The uac2 function provides these attributes in its function directory:
- c_chmask - capture channel mask
- c_srate - capture sampling rate
- c_ssize - capture sample size (bytes)
- p_chmask - playback channel mask
- p_srate - playback sampling rate
- p_ssize - playback sample size (bytes)
- req_number - the number of pre-allocated request for both capture
- and playback
+ =============== ====================================================
+ c_chmask capture channel mask
+ c_srate capture sampling rate
+ c_ssize capture sample size (bytes)
+ p_chmask playback channel mask
+ p_srate playback sampling rate
+ p_ssize playback sample size (bytes)
+ req_number the number of pre-allocated request for both capture
+ and playback
+ =============== ====================================================
The attributes have sane default values.
@@ -648,14 +746,14 @@ host: aplay -l # should list our USB Audio Gadget
This function does not require real hardware support, it just
sends a stream of audio data to/from the host. In order to
actually hear something at the device side, a command similar
-to this must be used at the device side:
+to this must be used at the device side::
-$ arecord -f dat -t wav -D hw:2,0 | aplay -D hw:0,0 &
+ $ arecord -f dat -t wav -D hw:2,0 | aplay -D hw:0,0 &
-e.g.:
+e.g.::
-$ arecord -f dat -t wav -D hw:CARD=UAC2Gadget,DEV=0 | \
-aplay -D default:CARD=OdroidU3
+ $ arecord -f dat -t wav -D hw:CARD=UAC2Gadget,DEV=0 | \
+ aplay -D default:CARD=OdroidU3
18. UVC function
================
@@ -668,66 +766,73 @@ Function-specific configfs interface
The function name to use when creating the function directory is "uvc".
The uvc function provides these attributes in its function directory:
- streaming_interval - interval for polling endpoint for data transfers
- streaming_maxburst - bMaxBurst for super speed companion descriptor
- streaming_maxpacket - maximum packet size this endpoint is capable of
- sending or receiving when this configuration is
- selected
+ =================== ================================================
+ streaming_interval interval for polling endpoint for data transfers
+ streaming_maxburst bMaxBurst for super speed companion descriptor
+ streaming_maxpacket maximum packet size this endpoint is capable of
+ sending or receiving when this configuration is
+ selected
+ =================== ================================================
There are also "control" and "streaming" subdirectories, each of which contain
a number of their subdirectories. There are some sane defaults provided, but
the user must provide the following:
- control header - create in control/header, link from control/class/fs
- and/or control/class/ss
- streaming header - create in streaming/header, link from
- streaming/class/fs and/or streaming/class/hs and/or
- streaming/class/ss
- format description - create in streaming/mjpeg and/or
- streaming/uncompressed
- frame description - create in streaming/mjpeg/<format> and/or in
- streaming/uncompressed/<format>
+ ================== ====================================================
+ control header create in control/header, link from control/class/fs
+ and/or control/class/ss
+ streaming header create in streaming/header, link from
+ streaming/class/fs and/or streaming/class/hs and/or
+ streaming/class/ss
+ format description create in streaming/mjpeg and/or
+ streaming/uncompressed
+ frame description create in streaming/mjpeg/<format> and/or in
+ streaming/uncompressed/<format>
+ ================== ====================================================
Each frame description contains frame interval specification, and each
such specification consists of a number of lines with an inverval value
-in each line. The rules stated above are best illustrated with an example:
-
-# mkdir functions/uvc.usb0/control/header/h
-# cd functions/uvc.usb0/control/
-# ln -s header/h class/fs
-# ln -s header/h class/ss
-# mkdir -p functions/uvc.usb0/streaming/uncompressed/u/360p
-# cat <<EOF > functions/uvc.usb0/streaming/uncompressed/u/360p/dwFrameInterval
-666666
-1000000
-5000000
-EOF
-# cd $GADGET_CONFIGFS_ROOT
-# mkdir functions/uvc.usb0/streaming/header/h
-# cd functions/uvc.usb0/streaming/header/h
-# ln -s ../../uncompressed/u
-# cd ../../class/fs
-# ln -s ../../header/h
-# cd ../../class/hs
-# ln -s ../../header/h
-# cd ../../class/ss
-# ln -s ../../header/h
+in each line. The rules stated above are best illustrated with an example::
+
+ # mkdir functions/uvc.usb0/control/header/h
+ # cd functions/uvc.usb0/control/
+ # ln -s header/h class/fs
+ # ln -s header/h class/ss
+ # mkdir -p functions/uvc.usb0/streaming/uncompressed/u/360p
+ # cat <<EOF > functions/uvc.usb0/streaming/uncompressed/u/360p/dwFrameInterval
+ 666666
+ 1000000
+ 5000000
+ EOF
+ # cd $GADGET_CONFIGFS_ROOT
+ # mkdir functions/uvc.usb0/streaming/header/h
+ # cd functions/uvc.usb0/streaming/header/h
+ # ln -s ../../uncompressed/u
+ # cd ../../class/fs
+ # ln -s ../../header/h
+ # cd ../../class/hs
+ # ln -s ../../header/h
+ # cd ../../class/ss
+ # ln -s ../../header/h
Testing the UVC function
------------------------
-device: run the gadget, modprobe vivid
+device: run the gadget, modprobe vivid::
-# uvc-gadget -u /dev/video<uvc video node #> -v /dev/video<vivid video node #>
+ # uvc-gadget -u /dev/video<uvc video node #> -v /dev/video<vivid video node #>
where uvc-gadget is this program:
-http://git.ideasonboard.org/uvc-gadget.git
+ http://git.ideasonboard.org/uvc-gadget.git
with these patches:
-http://www.spinics.net/lists/linux-usb/msg99220.html
-host: luvcview -f yuv
+ http://www.spinics.net/lists/linux-usb/msg99220.html
+
+host::
+
+ luvcview -f yuv
19. PRINTER function
====================
@@ -740,16 +845,19 @@ Function-specific configfs interface
The function name to use when creating the function directory is "printer".
The printer function provides these attributes in its function directory:
- pnp_string - Data to be passed to the host in pnp string
- q_len - Number of requests per endpoint
+ ========== ===========================================
+ pnp_string Data to be passed to the host in pnp string
+ q_len Number of requests per endpoint
+ ========== ===========================================
Testing the PRINTER function
----------------------------
The most basic testing:
-device: run the gadget
-# ls -l /devices/virtual/usb_printer_gadget/
+device: run the gadget::
+
+ # ls -l /devices/virtual/usb_printer_gadget/
should show g_printer<number>.
@@ -761,23 +869,28 @@ If udev is active, then e.g. /dev/usb/lp0 should appear.
host->device transmission:
-device:
-# cat /dev/g_printer<number>
-host:
-# cat > /dev/usb/lp0
+device::
-device->host transmission:
+ # cat /dev/g_printer<number>
-# cat > /dev/g_printer<number>
-host:
-# cat /dev/usb/lp0
+host::
+
+ # cat > /dev/usb/lp0
+
+device->host transmission::
+
+ # cat > /dev/g_printer<number>
+
+host::
+
+ # cat /dev/usb/lp0
More advanced testing can be done with the prn_example
described in Documentation/usb/gadget_printer.txt.
20. UAC1 function (virtual ALSA card, using u_audio API)
-=================
+========================================================
The function is provided by usb_f_uac1.ko module.
It will create a virtual ALSA card and the audio streams are simply
@@ -789,14 +902,16 @@ Function-specific configfs interface
The function name to use when creating the function directory is "uac1".
The uac1 function provides these attributes in its function directory:
- c_chmask - capture channel mask
- c_srate - capture sampling rate
- c_ssize - capture sample size (bytes)
- p_chmask - playback channel mask
- p_srate - playback sampling rate
- p_ssize - playback sample size (bytes)
- req_number - the number of pre-allocated request for both capture
- and playback
+ ========== ====================================================
+ c_chmask capture channel mask
+ c_srate capture sampling rate
+ c_ssize capture sample size (bytes)
+ p_chmask playback channel mask
+ p_srate playback sampling rate
+ p_ssize playback sample size (bytes)
+ req_number the number of pre-allocated request for both capture
+ and playback
+ ========== ====================================================
The attributes have sane default values.
@@ -809,11 +924,11 @@ host: aplay -l # should list our USB Audio Gadget
This function does not require real hardware support, it just
sends a stream of audio data to/from the host. In order to
actually hear something at the device side, a command similar
-to this must be used at the device side:
+to this must be used at the device side::
-$ arecord -f dat -t wav -D hw:2,0 | aplay -D hw:0,0 &
+ $ arecord -f dat -t wav -D hw:2,0 | aplay -D hw:0,0 &
-e.g.:
+e.g.::
-$ arecord -f dat -t wav -D hw:CARD=UAC1Gadget,DEV=0 | \
-aplay -D default:CARD=OdroidU3
+ $ arecord -f dat -t wav -D hw:CARD=UAC1Gadget,DEV=0 | \
+ aplay -D default:CARD=OdroidU3
diff --git a/Documentation/usb/gadget_configfs.txt b/Documentation/usb/gadget_configfs.txt
index b8cb38a98c19..54fb08baae22 100644
--- a/Documentation/usb/gadget_configfs.txt
+++ b/Documentation/usb/gadget_configfs.txt
@@ -1,11 +1,9 @@
+============================================
+Linux USB gadget configured through configfs
+============================================
-
-
- Linux USB gadget configured through configfs
-
-
- 25th April 2013
+25th April 2013
@@ -26,7 +24,7 @@ Linux provides a number of functions for gadgets to use.
Creating a gadget means deciding what configurations there will be
and which functions each configuration will provide.
-Configfs (please see Documentation/filesystems/configfs/*) lends itself nicely
+Configfs (please see `Documentation/filesystems/configfs/*`) lends itself nicely
for the purpose of telling the kernel about the above mentioned decision.
This document is about how to do it.
@@ -51,44 +49,46 @@ Usage
made available through configfs can be seen here:
http://www.spinics.net/lists/linux-usb/msg76388.html)
-$ modprobe libcomposite
-$ mount none $CONFIGFS_HOME -t configfs
+::
+
+ $ modprobe libcomposite
+ $ mount none $CONFIGFS_HOME -t configfs
where CONFIGFS_HOME is the mount point for configfs
1. Creating the gadgets
-----------------------
-For each gadget to be created its corresponding directory must be created:
+For each gadget to be created its corresponding directory must be created::
-$ mkdir $CONFIGFS_HOME/usb_gadget/<gadget name>
+ $ mkdir $CONFIGFS_HOME/usb_gadget/<gadget name>
-e.g.:
+e.g.::
-$ mkdir $CONFIGFS_HOME/usb_gadget/g1
+ $ mkdir $CONFIGFS_HOME/usb_gadget/g1
-...
-...
-...
+ ...
+ ...
+ ...
-$ cd $CONFIGFS_HOME/usb_gadget/g1
+ $ cd $CONFIGFS_HOME/usb_gadget/g1
-Each gadget needs to have its vendor id <VID> and product id <PID> specified:
+Each gadget needs to have its vendor id <VID> and product id <PID> specified::
-$ echo <VID> > idVendor
-$ echo <PID> > idProduct
+ $ echo <VID> > idVendor
+ $ echo <PID> > idProduct
A gadget also needs its serial number, manufacturer and product strings.
In order to have a place to store them, a strings subdirectory must be created
-for each language, e.g.:
+for each language, e.g.::
-$ mkdir strings/0x409
+ $ mkdir strings/0x409
-Then the strings can be specified:
+Then the strings can be specified::
-$ echo <serial number> > strings/0x409/serialnumber
-$ echo <manufacturer> > strings/0x409/manufacturer
-$ echo <product> > strings/0x409/product
+ $ echo <serial number> > strings/0x409/serialnumber
+ $ echo <manufacturer> > strings/0x409/manufacturer
+ $ echo <product> > strings/0x409/product
2. Creating the configurations
------------------------------
@@ -99,43 +99,43 @@ directories must be created:
$ mkdir configs/<name>.<number>
where <name> can be any string which is legal in a filesystem and the
-<number> is the configuration's number, e.g.:
+<number> is the configuration's number, e.g.::
-$ mkdir configs/c.1
+ $ mkdir configs/c.1
-...
-...
-...
+ ...
+ ...
+ ...
Each configuration also needs its strings, so a subdirectory must be created
-for each language, e.g.:
+for each language, e.g.::
-$ mkdir configs/c.1/strings/0x409
+ $ mkdir configs/c.1/strings/0x409
-Then the configuration string can be specified:
+Then the configuration string can be specified::
-$ echo <configuration> > configs/c.1/strings/0x409/configuration
+ $ echo <configuration> > configs/c.1/strings/0x409/configuration
-Some attributes can also be set for a configuration, e.g.:
+Some attributes can also be set for a configuration, e.g.::
-$ echo 120 > configs/c.1/MaxPower
+ $ echo 120 > configs/c.1/MaxPower
3. Creating the functions
-------------------------
The gadget will provide some functions, for each function its corresponding
-directory must be created:
+directory must be created::
-$ mkdir functions/<name>.<instance name>
+ $ mkdir functions/<name>.<instance name>
where <name> corresponds to one of allowed function names and instance name
-is an arbitrary string allowed in a filesystem, e.g.:
+is an arbitrary string allowed in a filesystem, e.g.::
-$ mkdir functions/ncm.usb0 # usb_f_ncm.ko gets loaded with request_module()
+ $ mkdir functions/ncm.usb0 # usb_f_ncm.ko gets loaded with request_module()
-...
-...
-...
+ ...
+ ...
+ ...
Each function provides its specific set of attributes, with either read-only
or read-write access. Where applicable they need to be written to as
@@ -149,17 +149,17 @@ At this moment a number of gadgets is created, each of which has a number of
configurations specified and a number of functions available. What remains
is specifying which function is available in which configuration (the same
function can be used in multiple configurations). This is achieved with
-creating symbolic links:
+creating symbolic links::
-$ ln -s functions/<name>.<instance name> configs/<name>.<number>
+ $ ln -s functions/<name>.<instance name> configs/<name>.<number>
-e.g.:
+e.g.::
-$ ln -s functions/ncm.usb0 configs/c.1
+ $ ln -s functions/ncm.usb0 configs/c.1
-...
-...
-...
+ ...
+ ...
+ ...
5. Enabling the gadget
----------------------
@@ -167,123 +167,127 @@ $ ln -s functions/ncm.usb0 configs/c.1
All the above steps serve the purpose of composing the gadget of
configurations and functions.
-An example directory structure might look like this:
-
-.
-./strings
-./strings/0x409
-./strings/0x409/serialnumber
-./strings/0x409/product
-./strings/0x409/manufacturer
-./configs
-./configs/c.1
-./configs/c.1/ncm.usb0 -> ../../../../usb_gadget/g1/functions/ncm.usb0
-./configs/c.1/strings
-./configs/c.1/strings/0x409
-./configs/c.1/strings/0x409/configuration
-./configs/c.1/bmAttributes
-./configs/c.1/MaxPower
-./functions
-./functions/ncm.usb0
-./functions/ncm.usb0/ifname
-./functions/ncm.usb0/qmult
-./functions/ncm.usb0/host_addr
-./functions/ncm.usb0/dev_addr
-./UDC
-./bcdUSB
-./bcdDevice
-./idProduct
-./idVendor
-./bMaxPacketSize0
-./bDeviceProtocol
-./bDeviceSubClass
-./bDeviceClass
+An example directory structure might look like this::
+
+ .
+ ./strings
+ ./strings/0x409
+ ./strings/0x409/serialnumber
+ ./strings/0x409/product
+ ./strings/0x409/manufacturer
+ ./configs
+ ./configs/c.1
+ ./configs/c.1/ncm.usb0 -> ../../../../usb_gadget/g1/functions/ncm.usb0
+ ./configs/c.1/strings
+ ./configs/c.1/strings/0x409
+ ./configs/c.1/strings/0x409/configuration
+ ./configs/c.1/bmAttributes
+ ./configs/c.1/MaxPower
+ ./functions
+ ./functions/ncm.usb0
+ ./functions/ncm.usb0/ifname
+ ./functions/ncm.usb0/qmult
+ ./functions/ncm.usb0/host_addr
+ ./functions/ncm.usb0/dev_addr
+ ./UDC
+ ./bcdUSB
+ ./bcdDevice
+ ./idProduct
+ ./idVendor
+ ./bMaxPacketSize0
+ ./bDeviceProtocol
+ ./bDeviceSubClass
+ ./bDeviceClass
Such a gadget must be finally enabled so that the USB host can enumerate it.
-In order to enable the gadget it must be bound to a UDC (USB Device Controller).
-$ echo <udc name> > UDC
+In order to enable the gadget it must be bound to a UDC (USB Device
+Controller)::
+
+ $ echo <udc name> > UDC
where <udc name> is one of those found in /sys/class/udc/*
-e.g.:
+e.g.::
-$ echo s3c-hsotg > UDC
+ $ echo s3c-hsotg > UDC
6. Disabling the gadget
-----------------------
-$ echo "" > UDC
+::
+
+ $ echo "" > UDC
7. Cleaning up
--------------
-Remove functions from configurations:
+Remove functions from configurations::
-$ rm configs/<config name>.<number>/<function>
+ $ rm configs/<config name>.<number>/<function>
where <config name>.<number> specify the configuration and <function> is
-a symlink to a function being removed from the configuration, e.g.:
+a symlink to a function being removed from the configuration, e.g.::
-$ rm configs/c.1/ncm.usb0
+ $ rm configs/c.1/ncm.usb0
-...
-...
-...
+ ...
+ ...
+ ...
-Remove strings directories in configurations
+Remove strings directories in configurations:
-$ rmdir configs/<config name>.<number>/strings/<lang>
+ $ rmdir configs/<config name>.<number>/strings/<lang>
-e.g.:
+e.g.::
-$ rmdir configs/c.1/strings/0x409
+ $ rmdir configs/c.1/strings/0x409
-...
-...
-...
+ ...
+ ...
+ ...
-and remove the configurations
+and remove the configurations::
-$ rmdir configs/<config name>.<number>
+ $ rmdir configs/<config name>.<number>
-e.g.:
+e.g.::
-rmdir configs/c.1
+ rmdir configs/c.1
-...
-...
-...
+ ...
+ ...
+ ...
-Remove functions (function modules are not unloaded, though)
+Remove functions (function modules are not unloaded, though):
-$ rmdir functions/<name>.<instance name>
+ $ rmdir functions/<name>.<instance name>
-e.g.:
+e.g.::
-$ rmdir functions/ncm.usb0
+ $ rmdir functions/ncm.usb0
-...
-...
-...
+ ...
+ ...
+ ...
-Remove strings directories in the gadget
+Remove strings directories in the gadget::
-$ rmdir strings/<lang>
+ $ rmdir strings/<lang>
-e.g.:
+e.g.::
-$ rmdir strings/0x409
+ $ rmdir strings/0x409
-and finally remove the gadget:
+and finally remove the gadget::
-$ cd ..
-$ rmdir <gadget name>
+ $ cd ..
+ $ rmdir <gadget name>
-e.g.:
+e.g.::
-$ rmdir g1
+ $ rmdir g1
@@ -305,16 +309,16 @@ configured elements. However, they are embedded in usage-specific
larger structures. In the picture below there is a "cs" which contains
a config_item and an "sa" which contains a configfs_attribute.
-The filesystem view would be like this:
+The filesystem view would be like this::
-./
-./cs (directory)
- |
- +--sa (file)
- |
- .
- .
- .
+ ./
+ ./cs (directory)
+ |
+ +--sa (file)
+ |
+ .
+ .
+ .
Whenever a user reads/writes the "sa" file, a function is called
which accepts a struct config_item and a struct configfs_attribute.
@@ -326,29 +330,31 @@ buffer), while the "store" is for modifying the file's contents (copy data
from the buffer to the cs), but it is up to the implementer of the
two functions to decide what they actually do.
-typedef struct configured_structure cs;
-typedef struct specific_attribute sa;
-
- sa
- +----------------------------------+
- cs | (*show)(cs *, buffer); |
-+-----------------+ | (*store)(cs *, buffer, length); |
-| | | |
-| +-------------+ | | +------------------+ |
-| | struct |-|----|------>|struct | |
-| | config_item | | | |configfs_attribute| |
-| +-------------+ | | +------------------+ |
-| | +----------------------------------+
-| data to be set | .
-| | .
-+-----------------+ .
+::
+
+ typedef struct configured_structure cs;
+ typedef struct specific_attribute sa;
+
+ sa
+ +----------------------------------+
+ cs | (*show)(cs *, buffer); |
+ +-----------------+ | (*store)(cs *, buffer, length); |
+ | | | |
+ | +-------------+ | | +------------------+ |
+ | | struct |-|----|------>|struct | |
+ | | config_item | | | |configfs_attribute| |
+ | +-------------+ | | +------------------+ |
+ | | +----------------------------------+
+ | data to be set | .
+ | | .
+ +-----------------+ .
The file names are decided by the config item/group designer, while
the directories in general can be named at will. A group can have
a number of its default sub-groups created automatically.
For more information on configfs please see
-Documentation/filesystems/configfs/*.
+`Documentation/filesystems/configfs/*`.
The concepts described above translate to USB gadgets like this:
diff --git a/Documentation/usb/gadget_hid.txt b/Documentation/usb/gadget_hid.txt
index 7a0fb8e16e27..098d563040cc 100644
--- a/Documentation/usb/gadget_hid.txt
+++ b/Documentation/usb/gadget_hid.txt
@@ -1,28 +1,31 @@
-
- Linux USB HID gadget driver
+===========================
+Linux USB HID gadget driver
+===========================
Introduction
+============
- The HID Gadget driver provides emulation of USB Human Interface
- Devices (HID). The basic HID handling is done in the kernel,
- and HID reports can be sent/received through I/O on the
- /dev/hidgX character devices.
+The HID Gadget driver provides emulation of USB Human Interface
+Devices (HID). The basic HID handling is done in the kernel,
+and HID reports can be sent/received through I/O on the
+/dev/hidgX character devices.
- For more details about HID, see the developer page on
- http://www.usb.org/developers/hidpage/
+For more details about HID, see the developer page on
+http://www.usb.org/developers/hidpage/
Configuration
+=============
- g_hid is a platform driver, so to use it you need to add
- struct platform_device(s) to your platform code defining the
- HID function descriptors you want to use - E.G. something
- like:
+g_hid is a platform driver, so to use it you need to add
+struct platform_device(s) to your platform code defining the
+HID function descriptors you want to use - E.G. something
+like::
-#include <linux/platform_device.h>
-#include <linux/usb/g_hid.h>
+ #include <linux/platform_device.h>
+ #include <linux/usb/g_hid.h>
-/* hid descriptor for a keyboard */
-static struct hidg_func_descriptor my_hid_data = {
+ /* hid descriptor for a keyboard */
+ static struct hidg_func_descriptor my_hid_data = {
.subclass = 0, /* No subclass */
.protocol = 1, /* Keyboard */
.report_length = 8,
@@ -61,85 +64,87 @@ static struct hidg_func_descriptor my_hid_data = {
0x81, 0x00, /* INPUT (Data,Ary,Abs) */
0xc0 /* END_COLLECTION */
}
-};
+ };
-static struct platform_device my_hid = {
+ static struct platform_device my_hid = {
.name = "hidg",
.id = 0,
.num_resources = 0,
.resource = 0,
.dev.platform_data = &my_hid_data,
-};
+ };
- You can add as many HID functions as you want, only limited by
- the amount of interrupt endpoints your gadget driver supports.
+You can add as many HID functions as you want, only limited by
+the amount of interrupt endpoints your gadget driver supports.
Configuration with configfs
+===========================
- Instead of adding fake platform devices and drivers in order to pass
- some data to the kernel, if HID is a part of a gadget composed with
- configfs the hidg_func_descriptor.report_desc is passed to the kernel
- by writing the appropriate stream of bytes to a configfs attribute.
+Instead of adding fake platform devices and drivers in order to pass
+some data to the kernel, if HID is a part of a gadget composed with
+configfs the hidg_func_descriptor.report_desc is passed to the kernel
+by writing the appropriate stream of bytes to a configfs attribute.
Send and receive HID reports
+============================
- HID reports can be sent/received using read/write on the
- /dev/hidgX character devices. See below for an example program
- to do this.
+HID reports can be sent/received using read/write on the
+/dev/hidgX character devices. See below for an example program
+to do this.
- hid_gadget_test is a small interactive program to test the HID
- gadget driver. To use, point it at a hidg device and set the
- device type (keyboard / mouse / joystick) - E.G.:
+hid_gadget_test is a small interactive program to test the HID
+gadget driver. To use, point it at a hidg device and set the
+device type (keyboard / mouse / joystick) - E.G.::
- # hid_gadget_test /dev/hidg0 keyboard
+ # hid_gadget_test /dev/hidg0 keyboard
- You are now in the prompt of hid_gadget_test. You can type any
- combination of options and values. Available options and
- values are listed at program start. In keyboard mode you can
- send up to six values.
+You are now in the prompt of hid_gadget_test. You can type any
+combination of options and values. Available options and
+values are listed at program start. In keyboard mode you can
+send up to six values.
- For example type: g i s t r --left-shift
+For example type: g i s t r --left-shift
- Hit return and the corresponding report will be sent by the
- HID gadget.
+Hit return and the corresponding report will be sent by the
+HID gadget.
- Another interesting example is the caps lock test. Type
- --caps-lock and hit return. A report is then sent by the
- gadget and you should receive the host answer, corresponding
- to the caps lock LED status.
+Another interesting example is the caps lock test. Type
+--caps-lock and hit return. A report is then sent by the
+gadget and you should receive the host answer, corresponding
+to the caps lock LED status::
- --caps-lock
- recv report:2
+ --caps-lock
+ recv report:2
- With this command:
+With this command::
- # hid_gadget_test /dev/hidg1 mouse
+ # hid_gadget_test /dev/hidg1 mouse
- You can test the mouse emulation. Values are two signed numbers.
+You can test the mouse emulation. Values are two signed numbers.
-Sample code
+Sample code::
-/* hid_gadget_test */
+ /* hid_gadget_test */
-#include <pthread.h>
-#include <string.h>
-#include <stdio.h>
-#include <ctype.h>
-#include <fcntl.h>
-#include <errno.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
+ #include <pthread.h>
+ #include <string.h>
+ #include <stdio.h>
+ #include <ctype.h>
+ #include <fcntl.h>
+ #include <errno.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <unistd.h>
-#define BUF_LEN 512
+ #define BUF_LEN 512
-struct options {
+ struct options {
const char *opt;
unsigned char val;
-};
+ };
-static struct options kmod[] = {
+ static struct options kmod[] = {
{.opt = "--left-ctrl", .val = 0x01},
{.opt = "--right-ctrl", .val = 0x10},
{.opt = "--left-shift", .val = 0x02},
@@ -149,9 +154,9 @@ static struct options kmod[] = {
{.opt = "--left-meta", .val = 0x08},
{.opt = "--right-meta", .val = 0x80},
{.opt = NULL}
-};
+ };
-static struct options kval[] = {
+ static struct options kval[] = {
{.opt = "--return", .val = 0x28},
{.opt = "--esc", .val = 0x29},
{.opt = "--bckspc", .val = 0x2a},
@@ -183,10 +188,10 @@ static struct options kval[] = {
{.opt = "--up", .val = 0x52},
{.opt = "--num-lock", .val = 0x53},
{.opt = NULL}
-};
+ };
-int keyboard_fill_report(char report[8], char buf[BUF_LEN], int *hold)
-{
+ int keyboard_fill_report(char report[8], char buf[BUF_LEN], int *hold)
+ {
char *tok = strtok(buf, " ");
int key = 0;
int i = 0;
@@ -229,17 +234,17 @@ int keyboard_fill_report(char report[8], char buf[BUF_LEN], int *hold)
fprintf(stderr, "unknown option: %s\n", tok);
}
return 8;
-}
+ }
-static struct options mmod[] = {
+ static struct options mmod[] = {
{.opt = "--b1", .val = 0x01},
{.opt = "--b2", .val = 0x02},
{.opt = "--b3", .val = 0x04},
{.opt = NULL}
-};
+ };
-int mouse_fill_report(char report[8], char buf[BUF_LEN], int *hold)
-{
+ int mouse_fill_report(char report[8], char buf[BUF_LEN], int *hold)
+ {
char *tok = strtok(buf, " ");
int mvt = 0;
int i = 0;
@@ -274,9 +279,9 @@ int mouse_fill_report(char report[8], char buf[BUF_LEN], int *hold)
fprintf(stderr, "unknown option: %s\n", tok);
}
return 3;
-}
+ }
-static struct options jmod[] = {
+ static struct options jmod[] = {
{.opt = "--b1", .val = 0x10},
{.opt = "--b2", .val = 0x20},
{.opt = "--b3", .val = 0x40},
@@ -287,10 +292,10 @@ static struct options jmod[] = {
{.opt = "--hat4", .val = 0x03},
{.opt = "--hatneutral", .val = 0x04},
{.opt = NULL}
-};
+ };
-int joystick_fill_report(char report[8], char buf[BUF_LEN], int *hold)
-{
+ int joystick_fill_report(char report[8], char buf[BUF_LEN], int *hold)
+ {
char *tok = strtok(buf, " ");
int mvt = 0;
int i = 0;
@@ -326,10 +331,10 @@ int joystick_fill_report(char report[8], char buf[BUF_LEN], int *hold)
fprintf(stderr, "unknown option: %s\n", tok);
}
return 4;
-}
+ }
-void print_options(char c)
-{
+ void print_options(char c)
+ {
int i = 0;
if (c == 'k') {
@@ -358,10 +363,10 @@ void print_options(char c)
" three signed numbers\n"
"--quit to close\n");
}
-}
+ }
-int main(int argc, const char *argv[])
-{
+ int main(int argc, const char *argv[])
+ {
const char *filename = NULL;
int fd = 0;
char buf[BUF_LEN];
@@ -449,4 +454,4 @@ int main(int argc, const char *argv[])
close(fd);
return 0;
-}
+ }
diff --git a/Documentation/usb/gadget_multi.txt b/Documentation/usb/gadget_multi.txt
index b3146dd7aa43..9806b55af301 100644
--- a/Documentation/usb/gadget_multi.txt
+++ b/Documentation/usb/gadget_multi.txt
@@ -1,6 +1,9 @@
- -*- org -*-
+==============================
+Multifunction Composite Gadget
+==============================
-* Overview
+Overview
+========
The Multifunction Composite Gadget (or g_multi) is a composite gadget
that makes extensive use of the composite framework to provide
@@ -17,13 +20,15 @@ have two configurations -- one with RNDIS and another with CDC ECM[3].
Please note that if you use non-standard configuration (that is enable
CDC ECM) you may need to change vendor and/or product ID.
-* Host drivers
+Host drivers
+============
To make use of the gadget one needs to make it work on host side --
without that there's no hope of achieving anything with the gadget.
As one might expect, things one need to do very from system to system.
-** Linux host drivers
+Linux host drivers
+------------------
Since the gadget uses standard composite framework and appears as such
to Linux host it does not need any additional drivers on Linux host
@@ -34,11 +39,13 @@ This is also true for two configuration set-up with RNDIS
configuration being the first one. Linux host will use the second
configuration with CDC ECM which should work better under Linux.
-** Windows host drivers
+Windows host drivers
+--------------------
For the gadget to work under Windows two conditions have to be met:
-*** Detecting as composite gadget
+Detecting as composite gadget
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
First of all, Windows need to detect the gadget as an USB composite
gadget which on its own have some conditions[4]. If they are met,
@@ -53,7 +60,8 @@ The only thing to worry is that the gadget has to have a single
configuration so a dual RNDIS and CDC ECM gadget won't work unless you
create a proper INF -- and of course, if you do submit it!
-*** Installing drivers for each function
+Installing drivers for each function
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The other, trickier thing is making Windows install drivers for each
individual function.
@@ -63,7 +71,8 @@ implementing USB Mass Storage class and selects appropriate driver.
Things are harder with RDNIS and CDC ACM.
-**** RNDIS
+RNDIS
+.....
To make Windows select RNDIS drivers for the first function in the
gadget, one needs to use the [[file:linux.inf]] file provided with this
@@ -75,11 +84,13 @@ RNDIS was not the first interface. You do not need to worry abut it
unless you are trying to develop your own gadget in which case watch
out for this bug.
-**** CDC ACM
+CDC ACM
+.......
Similarly, [[file:linux-cdc-acm.inf]] is provided for CDC ACM.
-**** Customising the gadget
+Customising the gadget
+......................
If you intend to hack the g_multi gadget be advised that rearranging
functions will obviously change interface numbers for each of the
@@ -97,14 +108,16 @@ things don't work as intended before realising Windows have cached
some drivers information (changing USB port may sometimes help plus
you might try using USBDeview[8] to remove the phantom device).
-**** INF testing
+INF testing
+...........
Provided INF files have been tested on Windows XP SP3, Windows Vista
and Windows 7, all 32-bit versions. It should work on 64-bit versions
as well. It most likely won't work on Windows prior to Windows XP
SP2.
-** Other systems
+Other systems
+-------------
At this moment, drivers for any other systems have not been tested.
Knowing how MacOS is based on BSD and BSD is an Open Source it is
@@ -115,7 +128,8 @@ For more exotic systems I have even less to say...
Any testing and drivers *are* *welcome*!
-* Authors
+Authors
+=======
This document has been written by Michal Nazarewicz
([[mailto:mina86@mina86.com]]). INF files have been hacked with
@@ -124,7 +138,8 @@ Xiaofan Chen ([[mailto:xiaofanc@gmail.com]]) basing on the MS RNDIS
template[9], Microchip's CDC ACM INF file and David Brownell's
([[mailto:dbrownell@users.sourceforge.net]]) original INF files.
-* Footnotes
+Footnotes
+=========
[1] Remote Network Driver Interface Specification,
[[http://msdn.microsoft.com/en-us/library/ee484414.aspx]].
diff --git a/Documentation/usb/gadget_printer.txt b/Documentation/usb/gadget_printer.txt
index ad995bf0db41..5e5516c69075 100644
--- a/Documentation/usb/gadget_printer.txt
+++ b/Documentation/usb/gadget_printer.txt
@@ -1,12 +1,14 @@
+===============================
+Linux USB Printer Gadget Driver
+===============================
- Linux USB Printer Gadget Driver
- 06/04/2007
+06/04/2007
- Copyright (C) 2007 Craig W. Nadler <craig@nadler.us>
+Copyright (C) 2007 Craig W. Nadler <craig@nadler.us>
-GENERAL
+General
=======
This driver may be used if you are writing printer firmware using Linux as
@@ -29,52 +31,60 @@ user space firmware can read or write this status byte using a device file
-HOWTO USE THIS DRIVER
+Howto Use This Driver
=====================
To load the USB device controller driver and the printer gadget driver. The
-following example uses the Netchip 2280 USB device controller driver:
+following example uses the Netchip 2280 USB device controller driver::
-modprobe net2280
-modprobe g_printer
+ modprobe net2280
+ modprobe g_printer
The follow command line parameter can be used when loading the printer gadget
(ex: modprobe g_printer idVendor=0x0525 idProduct=0xa4a8 ):
-idVendor - This is the Vendor ID used in the device descriptor. The default is
+idVendor
+ This is the Vendor ID used in the device descriptor. The default is
the Netchip vendor id 0x0525. YOU MUST CHANGE TO YOUR OWN VENDOR ID
BEFORE RELEASING A PRODUCT. If you plan to release a product and don't
already have a Vendor ID please see www.usb.org for details on how to
get one.
-idProduct - This is the Product ID used in the device descriptor. The default
+idProduct
+ This is the Product ID used in the device descriptor. The default
is 0xa4a8, you should change this to an ID that's not used by any of
your other USB products if you have any. It would be a good idea to
start numbering your products starting with say 0x0001.
-bcdDevice - This is the version number of your product. It would be a good idea
+bcdDevice
+ This is the version number of your product. It would be a good idea
to put your firmware version here.
-iManufacturer - A string containing the name of the Vendor.
+iManufacturer
+ A string containing the name of the Vendor.
-iProduct - A string containing the Product Name.
+iProduct
+ A string containing the Product Name.
-iSerialNum - A string containing the Serial Number. This should be changed for
+iSerialNum
+ A string containing the Serial Number. This should be changed for
each unit of your product.
-iPNPstring - The PNP ID string used for this printer. You will want to set
+iPNPstring
+ The PNP ID string used for this printer. You will want to set
either on the command line or hard code the PNP ID string used for
your printer product.
-qlen - The number of 8k buffers to use per endpoint. The default is 10, you
+qlen
+ The number of 8k buffers to use per endpoint. The default is 10, you
should tune this for your product. You may also want to tune the
size of each buffer for your product.
-USING THE EXAMPLE CODE
+Using The Example Code
======================
This example code talks to stdout, instead of a print engine.
@@ -82,22 +92,23 @@ This example code talks to stdout, instead of a print engine.
To compile the test code below:
1) save it to a file called prn_example.c
-2) compile the code with the follow command:
+2) compile the code with the follow command::
+
gcc prn_example.c -o prn_example
-To read printer data from the host to stdout:
+To read printer data from the host to stdout::
# prn_example -read_data
-To write printer data from a file (data_file) to the host:
+To write printer data from a file (data_file) to the host::
# cat data_file | prn_example -write_data
-To get the current printer status for the gadget driver:
+To get the current printer status for the gadget driver:::
# prn_example -get_status
@@ -107,60 +118,62 @@ To get the current printer status for the gadget driver:
Printer OK
-To set printer to Selected/On-line:
+To set printer to Selected/On-line::
# prn_example -selected
-To set printer to Not Selected/Off-line:
+To set printer to Not Selected/Off-line::
# prn_example -not_selected
-To set paper status to paper out:
+To set paper status to paper out::
# prn_example -paper_out
-To set paper status to paper loaded:
+To set paper status to paper loaded::
# prn_example -paper_loaded
-To set error status to printer OK:
+To set error status to printer OK::
# prn_example -no_error
-To set error status to ERROR:
+To set error status to ERROR::
# prn_example -error
-EXAMPLE CODE
+Example Code
============
+::
+
-#include <stdio.h>
-#include <stdlib.h>
-#include <fcntl.h>
-#include <linux/poll.h>
-#include <sys/ioctl.h>
-#include <linux/usb/g_printer.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <fcntl.h>
+ #include <linux/poll.h>
+ #include <sys/ioctl.h>
+ #include <linux/usb/g_printer.h>
-#define PRINTER_FILE "/dev/g_printer"
-#define BUF_SIZE 512
+ #define PRINTER_FILE "/dev/g_printer"
+ #define BUF_SIZE 512
-/*
- * 'usage()' - Show program usage.
- */
+ /*
+ * 'usage()' - Show program usage.
+ */
-static void
-usage(const char *option) /* I - Option string or NULL */
-{
+ static void
+ usage(const char *option) /* I - Option string or NULL */
+ {
if (option) {
fprintf(stderr,"prn_example: Unknown option \"%s\"!\n",
option);
@@ -186,12 +199,12 @@ usage(const char *option) /* I - Option string or NULL */
fputs("\n\n", stderr);
exit(1);
-}
+ }
-static int
-read_printer_data()
-{
+ static int
+ read_printer_data()
+ {
struct pollfd fd[1];
/* Open device file for printer gadget. */
@@ -236,12 +249,12 @@ read_printer_data()
close(fd[0].fd);
return 0;
-}
+ }
-static int
-write_printer_data()
-{
+ static int
+ write_printer_data()
+ {
struct pollfd fd[1];
/* Open device file for printer gadget. */
@@ -295,12 +308,12 @@ write_printer_data()
close(fd[0].fd);
return 0;
-}
+ }
-static int
-read_NB_printer_data()
-{
+ static int
+ read_NB_printer_data()
+ {
int fd;
static char buf[BUF_SIZE];
int bytes_read;
@@ -329,12 +342,12 @@ read_NB_printer_data()
close(fd);
return 0;
-}
+ }
-static int
-get_printer_status()
-{
+ static int
+ get_printer_status()
+ {
int retval;
int fd;
@@ -357,12 +370,12 @@ get_printer_status()
close(fd);
return(retval);
-}
+ }
-static int
-set_printer_status(unsigned char buf, int clear_printer_status_bit)
-{
+ static int
+ set_printer_status(unsigned char buf, int clear_printer_status_bit)
+ {
int retval;
int fd;
@@ -397,12 +410,12 @@ set_printer_status(unsigned char buf, int clear_printer_status_bit)
close(fd);
return 0;
-}
+ }
-static int
-display_printer_status()
-{
+ static int
+ display_printer_status()
+ {
char printer_status;
printer_status = get_printer_status();
@@ -429,12 +442,12 @@ display_printer_status()
}
return(0);
-}
+ }
-int
-main(int argc, char *argv[])
-{
+ int
+ main(int argc, char *argv[])
+ {
int i; /* Looping var */
int retval = 0;
@@ -507,4 +520,4 @@ main(int argc, char *argv[])
}
exit(retval);
-}
+ }
diff --git a/Documentation/usb/gadget_serial.txt b/Documentation/usb/gadget_serial.txt
index d1def3186782..dce8bc1fb1f2 100644
--- a/Documentation/usb/gadget_serial.txt
+++ b/Documentation/usb/gadget_serial.txt
@@ -1,7 +1,10 @@
+===============================
+Linux Gadget Serial Driver v2.0
+===============================
- Linux Gadget Serial Driver v2.0
- 11/20/2004
- (updated 8-May-2008 for v2.3)
+11/20/2004
+
+(updated 8-May-2008 for v2.3)
License and Disclaimer
@@ -56,7 +59,7 @@ hardware; for example, a PDA, an embedded Linux system, or a PC
with a USB development card.
The gadget serial driver talks over USB to either a CDC ACM driver
-or a generic USB serial driver running on a host PC.
+or a generic USB serial driver running on a host PC::
Host
--------------------------------------
@@ -112,11 +115,11 @@ configuring the kernel. Then rebuild and install the kernel or
modules.
Then you must load the gadget serial driver. To load it as an
-ACM device (recommended for interoperability), do this:
+ACM device (recommended for interoperability), do this::
modprobe g_serial
-To load it as a vendor specific bulk in/out device, do this:
+To load it as a vendor specific bulk in/out device, do this::
modprobe g_serial use_acm=0
@@ -127,7 +130,7 @@ desired.
Your system should use mdev (from busybox) or udev to make the
device nodes. After this gadget driver has been set up you should
-then see a /dev/ttyGS0 node:
+then see a /dev/ttyGS0 node::
# ls -l /dev/ttyGS0 | cat
crw-rw---- 1 root root 253, 0 May 8 14:10 /dev/ttyGS0
@@ -187,24 +190,24 @@ support".
Once the gadget serial driver is loaded and the USB device connected
to the Linux host with a USB cable, the host system should recognize
-the gadget serial device. For example, the command
+the gadget serial device. For example, the command::
cat /sys/kernel/debug/usb/devices
-should show something like this:
-
-T: Bus=01 Lev=01 Prnt=01 Port=01 Cnt=02 Dev#= 5 Spd=480 MxCh= 0
-D: Ver= 2.00 Cls=02(comm.) Sub=00 Prot=00 MxPS=64 #Cfgs= 1
-P: Vendor=0525 ProdID=a4a7 Rev= 2.01
-S: Manufacturer=Linux 2.6.8.1 with net2280
-S: Product=Gadget Serial
-S: SerialNumber=0
-C:* #Ifs= 2 Cfg#= 2 Atr=c0 MxPwr= 2mA
-I: If#= 0 Alt= 0 #EPs= 1 Cls=02(comm.) Sub=02 Prot=01 Driver=acm
-E: Ad=83(I) Atr=03(Int.) MxPS= 8 Ivl=32ms
-I: If#= 1 Alt= 0 #EPs= 2 Cls=0a(data ) Sub=00 Prot=00 Driver=acm
-E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
-E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+should show something like this:::
+
+ T: Bus=01 Lev=01 Prnt=01 Port=01 Cnt=02 Dev#= 5 Spd=480 MxCh= 0
+ D: Ver= 2.00 Cls=02(comm.) Sub=00 Prot=00 MxPS=64 #Cfgs= 1
+ P: Vendor=0525 ProdID=a4a7 Rev= 2.01
+ S: Manufacturer=Linux 2.6.8.1 with net2280
+ S: Product=Gadget Serial
+ S: SerialNumber=0
+ C:* #Ifs= 2 Cfg#= 2 Atr=c0 MxPwr= 2mA
+ I: If#= 0 Alt= 0 #EPs= 1 Cls=02(comm.) Sub=02 Prot=01 Driver=acm
+ E: Ad=83(I) Atr=03(Int.) MxPS= 8 Ivl=32ms
+ I: If#= 1 Alt= 0 #EPs= 2 Cls=0a(data ) Sub=00 Prot=00 Driver=acm
+ E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+ E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
If the host side Linux system is configured properly, the ACM driver
should be loaded automatically. The command "lsmod" should show the
@@ -219,29 +222,29 @@ Serial Converter support", and for the "USB Generic Serial Driver".
Once the gadget serial driver is loaded and the USB device connected
to the Linux host with a USB cable, the host system should recognize
-the gadget serial device. For example, the command
+the gadget serial device. For example, the command::
cat /sys/kernel/debug/usb/devices
-should show something like this:
+should show something like this:::
-T: Bus=01 Lev=01 Prnt=01 Port=01 Cnt=02 Dev#= 6 Spd=480 MxCh= 0
-D: Ver= 2.00 Cls=ff(vend.) Sub=00 Prot=00 MxPS=64 #Cfgs= 1
-P: Vendor=0525 ProdID=a4a6 Rev= 2.01
-S: Manufacturer=Linux 2.6.8.1 with net2280
-S: Product=Gadget Serial
-S: SerialNumber=0
-C:* #Ifs= 1 Cfg#= 1 Atr=c0 MxPwr= 2mA
-I: If#= 0 Alt= 0 #EPs= 2 Cls=0a(data ) Sub=00 Prot=00 Driver=serial
-E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
-E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+ T: Bus=01 Lev=01 Prnt=01 Port=01 Cnt=02 Dev#= 6 Spd=480 MxCh= 0
+ D: Ver= 2.00 Cls=ff(vend.) Sub=00 Prot=00 MxPS=64 #Cfgs= 1
+ P: Vendor=0525 ProdID=a4a6 Rev= 2.01
+ S: Manufacturer=Linux 2.6.8.1 with net2280
+ S: Product=Gadget Serial
+ S: SerialNumber=0
+ C:* #Ifs= 1 Cfg#= 1 Atr=c0 MxPwr= 2mA
+ I: If#= 0 Alt= 0 #EPs= 2 Cls=0a(data ) Sub=00 Prot=00 Driver=serial
+ E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+ E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
You must load the usbserial driver and explicitly set its parameters
-to configure it to recognize the gadget serial device, like this:
+to configure it to recognize the gadget serial device, like this::
echo 0x0525 0xA4A6 >/sys/bus/usb-serial/drivers/generic/new_id
-The legacy way is to use module parameters:
+The legacy way is to use module parameters::
modprobe usbserial vendor=0x0525 product=0xA4A6
diff --git a/Documentation/usb/iuu_phoenix.txt b/Documentation/usb/iuu_phoenix.txt
index e5f048067da4..b76268728450 100644
--- a/Documentation/usb/iuu_phoenix.txt
+++ b/Documentation/usb/iuu_phoenix.txt
@@ -1,5 +1,6 @@
+=============================
Infinity Usb Unlimited Readme
------------------------------
+=============================
Hi all,
@@ -19,7 +20,8 @@ have his own device file(/dev/ttyUSB0,/dev/ttyUSB1,...)
-How to tune the reader speed ?
+How to tune the reader speed?
+=============================
A few parameters can be used at load time
To use parameters, just unload the module if it is
@@ -27,26 +29,33 @@ How to tune the reader speed ?
In case of prebuilt module, use the command
insmod iuu_phoenix param=value.
- Example:
+ Example::
- modprobe iuu_phoenix clockmode=3
+ modprobe iuu_phoenix clockmode=3
The parameters are:
- parm: clockmode:1=3Mhz579,2=3Mhz680,3=6Mhz (int)
- parm: boost:overclock boost percent 100 to 500 (int)
- parm: cdmode:Card detect mode 0=none, 1=CD, 2=!CD, 3=DSR, 4=!DSR, 5=CTS, 6=!CTS, 7=RING, 8=!RING (int)
- parm: xmas:xmas color enabled or not (bool)
- parm: debug:Debug enabled or not (bool)
+clockmode:
+ 1=3Mhz579,2=3Mhz680,3=6Mhz (int)
+boost:
+ overclock boost percent 100 to 500 (int)
+cdmode:
+ Card detect mode
+ 0=none, 1=CD, 2=!CD, 3=DSR, 4=!DSR, 5=CTS, 6=!CTS, 7=RING, 8=!RING (int)
+xmas:
+ xmas color enabled or not (bool)
+debug:
+ Debug enabled or not (bool)
- clockmode will provide 3 different base settings commonly adopted by
different software:
- 1. 3Mhz579
+
+ 1. 3Mhz579
2. 3Mhz680
3. 6Mhz
- boost provide a way to overclock the reader ( my favorite :-) )
- For example to have best performance than a simple clockmode=3, try this:
+ For example to have best performance than a simple clockmode=3, try this::
modprobe boost=195
@@ -66,7 +75,8 @@ How to tune the reader speed ?
- debug will produce a lot of debugging messages...
- Last notes:
+Last notes
+==========
Don't worry about the serial settings, the serial emulation
is an abstraction, so use any speed or parity setting will
diff --git a/Documentation/usb/mass-storage.txt b/Documentation/usb/mass-storage.txt
index e89803a5a960..d181b47c3cb6 100644
--- a/Documentation/usb/mass-storage.txt
+++ b/Documentation/usb/mass-storage.txt
@@ -1,4 +1,9 @@
-* Overview
+=========================
+Mass Storage Gadget (MSG)
+=========================
+
+Overview
+========
Mass Storage Gadget (or MSG) acts as a USB Mass Storage device,
appearing to the host as a disk or a CD-ROM drive. It supports
@@ -24,7 +29,8 @@
(which is no longer included in Linux). It will talk only briefly
about how to use MSF within composite gadgets.
-* Module parameters
+Module parameters
+=================
The mass storage gadget accepts the following mass storage specific
module parameters:
@@ -146,7 +152,8 @@
- iProduct -- USB Product string (string)
- iSerialNumber -- SerialNumber string (sting)
-* sysfs entries
+sysfs entries
+=============
For each logical unit, the gadget creates a directory in the sysfs
hierarchy. Inside of it the following three files are created:
@@ -177,7 +184,8 @@
Other then those, as usual, the values of module parameters can be
read from /sys/module/g_mass_storage/parameters/* files.
-* Other gadgets using mass storage function
+Other gadgets using mass storage function
+=========================================
The Mass Storage Gadget uses the Mass Storage Function to handle
mass storage protocol. As a composite function, MSF may be used by
@@ -193,7 +201,8 @@
may take a look at mass_storage.c, acm_ms.c and multi.c (sorted by
complexity).
-* Relation to file storage gadget
+Relation to file storage gadget
+===============================
The Mass Storage Function and thus the Mass Storage Gadget has been
based on the File Storage Gadget. The difference between the two is
diff --git a/Documentation/usb/misc_usbsevseg.txt b/Documentation/usb/misc_usbsevseg.txt
index 0f6be4f9930b..6274aee083ed 100644
--- a/Documentation/usb/misc_usbsevseg.txt
+++ b/Documentation/usb/misc_usbsevseg.txt
@@ -1,4 +1,7 @@
+=============================
USB 7-Segment Numeric Display
+=============================
+
Manufactured by Delcom Engineering
Device Information
@@ -13,9 +16,13 @@ Device Modes
------------
By default, the driver assumes the display is only 6 characters
The mode for 6 characters is:
+
MSB 0x06; LSB 0x3f
+
For the 8 character display:
+
MSB 0x08; LSB 0xff
+
The device can accept "text" either in raw, hex, or ascii textmode.
raw controls each segment manually,
hex expects a value between 0-15 per character,
@@ -42,5 +49,3 @@ Device Operation
To set multiple decimals points sum up each power.
For example, to set the 0th and 3rd decimal place
echo 1001 > /sys/bus/usb/.../decimals
-
-
diff --git a/Documentation/usb/mtouchusb.txt b/Documentation/usb/mtouchusb.txt
index a91adb26ea7b..d1111b74bf75 100644
--- a/Documentation/usb/mtouchusb.txt
+++ b/Documentation/usb/mtouchusb.txt
@@ -1,19 +1,27 @@
-CHANGES
+================
+mtouchusb driver
+================
+
+Changes
+=======
- 0.3 - Created based off of scanner & INSTALL from the original touchscreen
driver on freecode (http://freecode.com/projects/3mtouchscreendriver)
- Amended for linux-2.4.18, then 2.4.19
- 0.5 - Complete rewrite using Linux Input in 2.6.3
- Unfortunately no calibration support at this time
+ Unfortunately no calibration support at this time
- 1.4 - Multiple changes to support the EXII 5000UC and house cleaning
- Changed reset from standard USB dev reset to vendor reset
- Changed data sent to host from compensated to raw coordinates
- Eliminated vendor/product module params
- Performed multiple successful tests with an EXII-5010UC
+ Changed reset from standard USB dev reset to vendor reset
+ Changed data sent to host from compensated to raw coordinates
+ Eliminated vendor/product module params
+ Performed multiple successful tests with an EXII-5010UC
+
+Supported Hardware
+==================
-SUPPORTED HARDWARE:
+::
All controllers have the Vendor: 0x0596 & Product: 0x0001
@@ -29,9 +37,10 @@ SUPPORTED HARDWARE:
USB Capacitive - Black Case EXII-5030UC
USB Capacitive - No Case EXII-5050UC
-DRIVER NOTES:
+Driver Notes
+============
-Installation is simple, you only need to add Linux Input, Linux USB, and the
+Installation is simple, you only need to add Linux Input, Linux USB, and the
driver to the kernel. The driver can also be optionally built as a module.
This driver appears to be one of possible 2 Linux USB Input Touchscreen
@@ -49,24 +58,27 @@ The controller screen resolution is now 0 to 16384 for both X and Y reporting
the raw touch data. This is the same for the old and new capacitive USB
controllers.
-Perhaps at some point an abstract function will be placed into evdev so
-generic functions like calibrations, resets, and vendor information can be
+Perhaps at some point an abstract function will be placed into evdev so
+generic functions like calibrations, resets, and vendor information can be
requested from the userspace (And the drivers would handle the vendor specific
tasks).
-TODO:
+TODO
+====
Implement a control urb again to handle requests to and from the device
such as calibration, etc once/if it becomes available.
-DISCLAIMER:
+Disclaimer
+==========
-I am not a MicroTouch/3M employee, nor have I ever been. 3M does not support
+I am not a MicroTouch/3M employee, nor have I ever been. 3M does not support
this driver! If you want touch drivers only supported within X, please go to:
http://www.3m.com/3MTouchSystems/
-THANKS:
+Thanks
+======
A huge thank you to 3M Touch Systems for the EXII-5010UC controllers for
testing!
diff --git a/Documentation/usb/ohci.txt b/Documentation/usb/ohci.txt
index 99320d9fa523..bb3c49719e6b 100644
--- a/Documentation/usb/ohci.txt
+++ b/Documentation/usb/ohci.txt
@@ -1,3 +1,7 @@
+====
+OHCI
+====
+
23-Aug-2002
The "ohci-hcd" driver is a USB Host Controller Driver (HCD) that is derived
@@ -29,4 +33,3 @@ work on while the OS is getting around to the relevant IRQ processing.
- David Brownell
<dbrownell@users.sourceforge.net>
-
diff --git a/Documentation/usb/rio.txt b/Documentation/usb/rio.txt
index aee715af7db7..ca9adcf56355 100644
--- a/Documentation/usb/rio.txt
+++ b/Documentation/usb/rio.txt
@@ -1,72 +1,80 @@
+============
+Diamonds Rio
+============
+
Copyright (C) 1999, 2000 Bruce Tenison
+
Portions Copyright (C) 1999, 2000 David Nelson
+
Thanks to David Nelson for guidance and the usage of the scanner.txt
and scanner.c files to model our driver and this informative file.
Mar. 2, 2000
-CHANGES
+Changes
+=======
- Initial Revision
-OVERVIEW
+Overview
+========
This README will address issues regarding how to configure the kernel
-to access a RIO 500 mp3 player.
+to access a RIO 500 mp3 player.
Before I explain how to use this to access the Rio500 please be warned:
-W A R N I N G:
---------------
+.. warning::
-Please note that this software is still under development. The authors
-are in no way responsible for any damage that may occur, no matter how
-inconsequential.
+ Please note that this software is still under development. The authors
+ are in no way responsible for any damage that may occur, no matter how
+ inconsequential.
It seems that the Rio has a problem when sending .mp3 with low batteries.
I suggest when the batteries are low and you want to transfer stuff that you
replace it with a fresh one. In my case, what happened is I lost two 16kb
blocks (they are no longer usable to store information to it). But I don't
-know if that's normal or not; it could simply be a problem with the flash
+know if that's normal or not; it could simply be a problem with the flash
memory.
-In an extreme case, I left my Rio playing overnight and the batteries wore
-down to nothing and appear to have corrupted the flash memory. My RIO
-needed to be replaced as a result. Diamond tech support is aware of the
-problem. Do NOT allow your batteries to wear down to nothing before
-changing them. It appears RIO 500 firmware does not handle low battery
-power well at all.
+In an extreme case, I left my Rio playing overnight and the batteries wore
+down to nothing and appear to have corrupted the flash memory. My RIO
+needed to be replaced as a result. Diamond tech support is aware of the
+problem. Do NOT allow your batteries to wear down to nothing before
+changing them. It appears RIO 500 firmware does not handle low battery
+power well at all.
-On systems with OHCI controllers, the kernel OHCI code appears to have
-power on problems with some chipsets. If you are having problems
-connecting to your RIO 500, try turning it on first and then plugging it
-into the USB cable.
+On systems with OHCI controllers, the kernel OHCI code appears to have
+power on problems with some chipsets. If you are having problems
+connecting to your RIO 500, try turning it on first and then plugging it
+into the USB cable.
-Contact information:
---------------------
+Contact Information
+-------------------
The main page for the project is hosted at sourceforge.net in the following
URL: <http://rio500.sourceforge.net>. You can also go to the project's
sourceforge home page at: <http://sourceforge.net/projects/rio500/>.
There is also a mailing list: rio500-users@lists.sourceforge.net
-Authors:
+Authors
-------
-Most of the code was written by Cesar Miquel <miquel@df.uba.ar>. Keith
+Most of the code was written by Cesar Miquel <miquel@df.uba.ar>. Keith
Clayton <kclayton@jps.net> is incharge of the PPC port and making sure
things work there. Bruce Tenison <btenison@dibbs.net> is adding support
for .fon files and also does testing. The program will mostly sure be
re-written and Pete Ikusz along with the rest will re-design it. I would
-also like to thank Tri Nguyen <tmn_3022000@hotmail.com> who provided use
+also like to thank Tri Nguyen <tmn_3022000@hotmail.com> who provided use
with some important information regarding the communication with the Rio.
-ADDITIONAL INFORMATION and Userspace tools
+Additional Information and userspace tools
-http://rio500.sourceforge.net/
+ http://rio500.sourceforge.net/
-REQUIREMENTS
+Requirements
+============
A host with a USB port. Ideally, either a UHCI (Intel) or OHCI
(Compaq and others) hardware port should work.
@@ -80,11 +88,11 @@ A Linux kernel with RIO 500 support enabled.
'lspci' which is only needed to determine the type of USB hardware
available in your machine.
-CONFIGURATION
+Configuration
Using `lspci -v`, determine the type of USB hardware available.
- If you see something like:
+ If you see something like::
USB Controller: ......
Flags: .....
@@ -92,7 +100,7 @@ Using `lspci -v`, determine the type of USB hardware available.
Then you have a UHCI based controller.
- If you see something like:
+ If you see something like::
USB Controller: .....
Flags: ....
@@ -107,8 +115,9 @@ hardware (determined from the steps above), 'USB Diamond Rio500 support', and
(you may need to execute `depmod -a` to update the module
dependencies).
-Add a device for the USB rio500:
- `mknod /dev/usb/rio500 c 180 64`
+Add a device for the USB rio500::
+
+ mknod /dev/usb/rio500 c 180 64
Set appropriate permissions for /dev/usb/rio500 (don't forget about
group and world permissions). Both read and write permissions are
@@ -116,12 +125,14 @@ required for proper operation.
Load the appropriate modules (if compiled as modules):
- OHCI:
+ OHCI::
+
modprobe usbcore
modprobe usb-ohci
modprobe rio500
- UHCI:
+ UHCI::
+
modprobe usbcore
modprobe usb-uhci (or uhci)
modprobe rio500
@@ -129,10 +140,10 @@ Load the appropriate modules (if compiled as modules):
That's it. The Rio500 Utils at: http://rio500.sourceforge.net should
be able to access the rio500.
-BUGS
+Bugs
+====
If you encounter any problems feel free to drop me an email.
Bruce Tenison
btenison@dibbs.net
-
diff --git a/Documentation/usb/usb-help.txt b/Documentation/usb/usb-help.txt
index 4273ca2b86ba..dc23ecd4d802 100644
--- a/Documentation/usb/usb-help.txt
+++ b/Documentation/usb/usb-help.txt
@@ -1,16 +1,17 @@
-usb-help.txt
+==============
+USB references
+==============
+
2008-Mar-7
For USB help other than the readme files that are located in
-Documentation/usb/*, see the following:
+`Documentation/usb/*`, see the following:
-Linux-USB project: http://www.linux-usb.org
- mirrors at http://usb.in.tum.de/linux-usb/
- and http://it.linux-usb.org
-Linux USB Guide: http://linux-usb.sourceforge.net
-Linux-USB device overview (working devices and drivers):
- http://www.qbik.ch/usb/devices/
+- Linux-USB project: http://www.linux-usb.org
+ mirrors at http://usb.in.tum.de/linux-usb/
+ and http://it.linux-usb.org
+- Linux USB Guide: http://linux-usb.sourceforge.net
+- Linux-USB device overview (working devices and drivers):
+ http://www.qbik.ch/usb/devices/
The Linux-USB mailing list is at linux-usb@vger.kernel.org
-
-###
diff --git a/Documentation/usb/usb-serial.txt b/Documentation/usb/usb-serial.txt
index ab100d6ee436..8fa7dbd3da9a 100644
--- a/Documentation/usb/usb-serial.txt
+++ b/Documentation/usb/usb-serial.txt
@@ -1,4 +1,9 @@
-INTRODUCTION
+==========
+USB serial
+==========
+
+Introduction
+============
The USB serial driver currently supports a number of different USB to
serial converter products, as well as some devices that use a serial
@@ -8,13 +13,15 @@ INTRODUCTION
the different devices.
-CONFIGURATION
+Configuration
+=============
Currently the driver can handle up to 256 different serial interfaces at
- one time.
+ one time.
The major number that the driver uses is 188 so to use the driver,
- create the following nodes:
+ create the following nodes::
+
mknod /dev/ttyUSB0 c 188 0
mknod /dev/ttyUSB1 c 188 1
mknod /dev/ttyUSB2 c 188 2
@@ -28,12 +35,14 @@ CONFIGURATION
When the device is connected and recognized by the driver, the driver
will print to the system log, which node(s) the device has been bound
to.
-
-SPECIFIC DEVICES SUPPORTED
+
+Specific Devices Supported
+==========================
ConnectTech WhiteHEAT 4 port converter
+--------------------------------------
ConnectTech has been very forthcoming with information about their
device, including providing a unit to test with.
@@ -46,6 +55,7 @@ ConnectTech WhiteHEAT 4 port converter
HandSpring Visor, Palm USB, and Clié USB driver
+-----------------------------------------------
This driver works with all HandSpring USB, Palm USB, and Sony Clié USB
devices.
@@ -62,7 +72,7 @@ HandSpring Visor, Palm USB, and Clié USB driver
This goes against the current documentation for pilot-xfer and other
packages, but is the only way that it will work due to the hardware
in the device.
-
+
When the device is connected, try talking to it on the second port
(this is usually /dev/ttyUSB1 if you do not have any other usb-serial
devices in the system.) The system log should tell you which port is
@@ -78,10 +88,10 @@ HandSpring Visor, Palm USB, and Clié USB driver
try resetting the device, first a hot reset, and then a cold reset if
necessary. Some devices need this before they can talk to the USB port
properly.
-
+
Devices that are not compiled into the kernel can be specified with module
parameters. e.g. modprobe visor vendor=0x54c product=0x66
-
+
There is a webpage and mailing lists for this portion of the driver at:
http://sourceforge.net/projects/usbvisor/
@@ -90,6 +100,7 @@ HandSpring Visor, Palm USB, and Clié USB driver
PocketPC PDA Driver
+-------------------
This driver can be used to connect to Compaq iPAQ, HP Jornada, Casio EM500
and other PDAs running Windows CE 3.0 or PocketPC 2002 using a USB
@@ -135,12 +146,13 @@ PocketPC PDA Driver
be used to flash the ROM, as well as the microP code.. so much for needing
Toshiba's $350 serial cable for flashing!! :D
NOTE: This has NOT been tested. Use at your own risk.
-
+
For any questions or problems with the driver, please contact Ganesh
Varadarajan <ganesh@veritas.com>
Keyspan PDA Serial Adapter
+--------------------------
Single port DB-9 serial adapter, pushed as a PDA adapter for iMacs (mostly
sold in Macintosh catalogs, comes in a translucent white/green dongle).
@@ -148,32 +160,37 @@ Keyspan PDA Serial Adapter
This driver also works for the Xircom/Entrega single port serial adapter.
Current status:
+
Things that work:
- basic input/output (tested with 'cu')
- blocking write when serial line can't keep up
- changing baud rates (up to 115200)
- getting/setting modem control pins (TIOCM{GET,SET,BIS,BIC})
- sending break (although duration looks suspect)
+ - basic input/output (tested with 'cu')
+ - blocking write when serial line can't keep up
+ - changing baud rates (up to 115200)
+ - getting/setting modem control pins (TIOCM{GET,SET,BIS,BIC})
+ - sending break (although duration looks suspect)
+
Things that don't:
- device strings (as logged by kernel) have trailing binary garbage
- device ID isn't right, might collide with other Keyspan products
- changing baud rates ought to flush tx/rx to avoid mangled half characters
+ - device strings (as logged by kernel) have trailing binary garbage
+ - device ID isn't right, might collide with other Keyspan products
+ - changing baud rates ought to flush tx/rx to avoid mangled half characters
+
Big Things on the todo list:
- parity, 7 vs 8 bits per char, 1 or 2 stop bits
- HW flow control
- not all of the standard USB descriptors are handled: Get_Status, Set_Feature
- O_NONBLOCK, select()
+ - parity, 7 vs 8 bits per char, 1 or 2 stop bits
+ - HW flow control
+ - not all of the standard USB descriptors are handled:
+ Get_Status, Set_Feature, O_NONBLOCK, select()
For any questions or problems with this driver, please contact Brian
- Warner at warner@lothar.com
+ Warner at warner@lothar.com
Keyspan USA-series Serial Adapters
+----------------------------------
- Single, Dual and Quad port adapters - driver uses Keyspan supplied
+ Single, Dual and Quad port adapters - driver uses Keyspan supplied
firmware and is being developed with their support.
-
+
Current status:
+
The USA-18X, USA-28X, USA-19, USA-19W and USA-49W are supported and
have been pretty thoroughly tested at various baud rates with 8-N-1
character settings. Other character lengths and parity setups are
@@ -182,32 +199,37 @@ Keyspan USA-series Serial Adapters
The USA-28 isn't yet supported though doing so should be pretty
straightforward. Contact the maintainer if you require this
functionality.
-
+
More information is available at:
+
http://www.carnationsoftware.com/carnation/Keyspan.html
-
+
For any questions or problems with this driver, please contact Hugh
Blemings at hugh@misc.nu
FTDI Single Port Serial Driver
+------------------------------
This is a single port DB-25 serial adapter.
Devices supported include:
- -TripNav TN-200 USB GPS
- -Navis Engineering Bureau CH-4711 USB GPS
+
+ - TripNav TN-200 USB GPS
+ - Navis Engineering Bureau CH-4711 USB GPS
For any questions or problems with this driver, please contact Bill Ryder.
ZyXEL omni.net lcd plus ISDN TA
+-------------------------------
This is an ISDN TA. Please report both successes and troubles to
azummo@towertech.it
Cypress M8 CY4601 Family Serial Driver
+--------------------------------------
This driver was in most part developed by Neil "koyama" Whelchel. It
has been improved since that previous form to support dynamic serial
@@ -215,18 +237,19 @@ Cypress M8 CY4601 Family Serial Driver
part stable and has been tested on an smp machine. (dual p2)
Chipsets supported under CY4601 family:
-
+
CY7C63723, CY7C63742, CY7C63743, CY7C64013
Devices supported:
- -DeLorme's USB Earthmate GPS (SiRF Star II lp arch)
- -Cypress HID->COM RS232 adapter
-
- Note: Cypress Semiconductor claims no affiliation with the
+ - DeLorme's USB Earthmate GPS (SiRF Star II lp arch)
+ - Cypress HID->COM RS232 adapter
+
+ Note:
+ Cypress Semiconductor claims no affiliation with the
hid->com device.
- Most devices using chipsets under the CY4601 family should
+ Most devices using chipsets under the CY4601 family should
work with the driver. As long as they stay true to the CY4601
usbserial specification.
@@ -236,8 +259,9 @@ Cypress M8 CY4601 Family Serial Driver
upon start init to this setting. usbserial core provides the rest
of the termios settings, along with some custom termios so that the
output is in proper format and parsable.
-
- The device can be put into sirf mode by issuing NMEA command:
+
+ The device can be put into sirf mode by issuing NMEA command::
+
$PSRF100,<protocol>,<baud>,<databits>,<stopbits>,<parity>*CHECKSUM
$PSRF100,0,9600,8,1,0*0C
@@ -259,11 +283,14 @@ Cypress M8 CY4601 Family Serial Driver
If you have any questions, problems, patches, feature requests, etc. you can
contact me here via email:
+
dignome@gmail.com
+
(your problems/patches can alternately be submitted to usb-devel)
Digi AccelePort Driver
+----------------------
This driver supports the Digi AccelePort USB 2 and 4 devices, 2 port
(plus a parallel port) and 4 port USB serial converters. The driver
@@ -285,42 +312,49 @@ Digi AccelePort Driver
Belkin USB Serial Adapter F5U103
+--------------------------------
Single port DB-9/PS-2 serial adapter from Belkin with firmware by eTEK Labs.
The Peracom single port serial adapter also works with this driver, as
well as the GoHubs adapter.
Current status:
+
The following have been tested and work:
- Baud rate 300-230400
- Data bits 5-8
- Stop bits 1-2
- Parity N,E,O,M,S
- Handshake None, Software (XON/XOFF), Hardware (CTSRTS,CTSDTR)*
- Break Set and clear
- Line control Input/Output query and control **
-
- * Hardware input flow control is only enabled for firmware
+
+ - Baud rate 300-230400
+ - Data bits 5-8
+ - Stop bits 1-2
+ - Parity N,E,O,M,S
+ - Handshake None, Software (XON/XOFF), Hardware (CTSRTS,CTSDTR) [1]_
+ - Break Set and clear
+ - Line control Input/Output query and control [2]_
+
+ .. [1]
+ Hardware input flow control is only enabled for firmware
levels above 2.06. Read source code comments describing Belkin
firmware errata. Hardware output flow control is working for all
firmware versions.
- ** Queries of inputs (CTS,DSR,CD,RI) show the last
+
+ .. [2]
+ Queries of inputs (CTS,DSR,CD,RI) show the last
reported state. Queries of outputs (DTR,RTS) show the last
requested state and may not reflect current state as set by
automatic hardware flow control.
TO DO List:
- -- Add true modem control line query capability. Currently tracks the
- states reported by the interrupt and the states requested.
- -- Add error reporting back to application for UART error conditions.
- -- Add support for flush ioctls.
- -- Add everything else that is missing :)
+ - Add true modem control line query capability. Currently tracks the
+ states reported by the interrupt and the states requested.
+ - Add error reporting back to application for UART error conditions.
+ - Add support for flush ioctls.
+ - Add everything else that is missing :)
For any questions or problems with this driver, please contact William
Greathouse at wgreathouse@smva.com
Empeg empeg-car Mark I/II Driver
+--------------------------------
This is an experimental driver to provide connectivity support for the
client synchronization tools for an Empeg empeg-car mp3 player.
@@ -335,6 +369,7 @@ Empeg empeg-car Mark I/II Driver
MCT USB Single Port Serial Adapter U232
+---------------------------------------
This driver is for the MCT USB-RS232 Converter (25 pin, Model No.
U232-P25) from Magic Control Technology Corp. (there is also a 9 pin
@@ -355,35 +390,39 @@ MCT USB Single Port Serial Adapter U232
Inside Out Networks Edgeport Driver
+-----------------------------------
This driver supports all devices made by Inside Out Networks, specifically
the following models:
- Edgeport/4
- Rapidport/4
- Edgeport/4t
- Edgeport/2
- Edgeport/4i
- Edgeport/2i
- Edgeport/421
- Edgeport/21
- Edgeport/8
- Edgeport/8 Dual
- Edgeport/2D8
- Edgeport/4D8
- Edgeport/8i
- Edgeport/2 DIN
- Edgeport/4 DIN
- Edgeport/16 Dual
+
+ - Edgeport/4
+ - Rapidport/4
+ - Edgeport/4t
+ - Edgeport/2
+ - Edgeport/4i
+ - Edgeport/2i
+ - Edgeport/421
+ - Edgeport/21
+ - Edgeport/8
+ - Edgeport/8 Dual
+ - Edgeport/2D8
+ - Edgeport/4D8
+ - Edgeport/8i
+ - Edgeport/2 DIN
+ - Edgeport/4 DIN
+ - Edgeport/16 Dual
For any questions or problems with this driver, please contact Greg
Kroah-Hartman at greg@kroah.com
REINER SCT cyberJack pinpad/e-com USB chipcard reader
-
+-----------------------------------------------------
+
Interface to ISO 7816 compatible contactbased chipcards, e.g. GSM SIMs.
-
+
Current status:
+
This is the kernel part of the driver for this USB card reader.
There is also a user part for a CT-API driver available. A site
for downloading is TBA. For now, you can request it from the
@@ -394,6 +433,7 @@ REINER SCT cyberJack pinpad/e-com USB chipcard reader
Prolific PL2303 Driver
+----------------------
This driver supports any device that has the PL2303 chip from Prolific
in it. This includes a number of single port USB to serial converters,
@@ -403,11 +443,13 @@ Prolific PL2303 Driver
For any questions or problems with this driver, please contact Greg
Kroah-Hartman at greg@kroah.com
-
+
KL5KUSB105 chipset / PalmConnect USB single-port adapter
-
+--------------------------------------------------------
+
Current status:
+
The driver was put together by looking at the usb bus transactions
done by Palm's driver under Windows, so a lot of functionality is
still missing. Notably, serial ioctls are sometimes faked or not yet
@@ -417,21 +459,25 @@ Current status:
are supported, but handshaking (software or hardware) is not, which is
why it is wise to cut down on the rate used is wise for large
transfers until this is settled.
-
+
See http://www.uuhaus.de/linux/palmconnect.html for up-to-date
information on this driver.
Winchiphead CH341 Driver
+------------------------
This driver is for the Winchiphead CH341 USB-RS232 Converter. This chip
also implements an IEEE 1284 parallel port, I2C and SPI, but that is not
supported by the driver. The protocol was analyzed from the behaviour
of the Windows driver, no datasheet is available at present.
+
The manufacturer's website: http://www.winchiphead.com/.
+
For any questions or problems with this driver, please contact
frank@kingswood-consulting.co.uk.
Moschip MCS7720, MCS7715 driver
+-------------------------------
These chips are present in devices sold by various manufacturers, such as Syba
and Cables Unlimited. There may be others. The 7720 provides two serial
@@ -449,20 +495,24 @@ Moschip MCS7720, MCS7715 driver
don't have one of these devices, so I can't say for sure.
Generic Serial driver
+---------------------
If your device is not one of the above listed devices, compatible with
the above models, you can try out the "generic" interface. This
interface does not provide any type of control messages sent to the
device, and does not support any kind of device flow control. All that
is required of your device is that it has at least one bulk in endpoint,
- or one bulk out endpoint.
+ or one bulk out endpoint.
+
+ To enable the generic driver to recognize your device, provide::
- To enable the generic driver to recognize your device, provide
echo <vid> <pid> >/sys/bus/usb-serial/drivers/generic/new_id
+
where the <vid> and <pid> is replaced with the hex representation of your
device's vendor id and product id.
If the driver is compiled as a module you can also provide one id when
- loading the module
+ loading the module::
+
insmod usbserial vendor=0x#### product=0x####
This driver has been successfully used to connect to the NetChip USB
@@ -473,7 +523,8 @@ Generic Serial driver
Kroah-Hartman at greg@kroah.com
-CONTACT:
+Contact
+=======
If anyone has any problems using these drivers, with any of the above
specified products, please contact the specific driver's author listed
diff --git a/Documentation/usb/usbip_protocol.txt b/Documentation/usb/usbip_protocol.txt
index c7a0f4c7e7f1..988c832166cd 100644
--- a/Documentation/usb/usbip_protocol.txt
+++ b/Documentation/usb/usbip_protocol.txt
@@ -1,3 +1,7 @@
+===============
+USB/IP protocol
+===============
+
PRELIMINARY DRAFT, MAY CONTAIN MISTAKES!
28 Jun 2011
@@ -12,6 +16,8 @@ in one or more pieces at the low level transport layer). The server sends back
the OP_REP_DEVLIST packet which lists the exported USB devices. Finally the
TCP/IP connection is closed.
+::
+
virtual host controller usb host
"client" "server"
(imports USB devices) (exports USB devices)
@@ -32,6 +38,8 @@ send two types of packets: the USBIP_CMD_SUBMIT to submit an URB, and
USBIP_CMD_UNLINK to unlink a previously submitted URB. The answers of the
server may be USBIP_RET_SUBMIT and USBIP_RET_UNLINK respectively.
+::
+
virtual host controller usb host
"client" "server"
(imports USB devices) (exports USB devices)
@@ -88,270 +96,316 @@ The fields are in network (big endian) byte order meaning that the most signific
byte (MSB) is stored at the lowest address.
-OP_REQ_DEVLIST: Retrieve the list of exported USB devices.
+OP_REQ_DEVLIST:
+ Retrieve the list of exported USB devices.
- Offset | Length | Value | Description
------------+--------+------------+---------------------------------------------------
- 0 | 2 | 0x0100 | Binary-coded decimal USBIP version number: v1.0.0
------------+--------+------------+---------------------------------------------------
- 2 | 2 | 0x8005 | Command code: Retrieve the list of exported USB
- | | | devices.
------------+--------+------------+---------------------------------------------------
- 4 | 4 | 0x00000000 | Status: unused, shall be set to 0
++-----------+--------+------------+---------------------------------------------------+
+| Offset | Length | Value | Description |
++===========+========+============+===================================================+
+| 0 | 2 | 0x0100 | Binary-coded decimal USBIP version number: v1.0.0 |
++-----------+--------+------------+---------------------------------------------------+
+| 2 | 2 | 0x8005 | Command code: Retrieve the list of exported USB |
+| | | | devices. |
++-----------+--------+------------+---------------------------------------------------+
+| 4 | 4 | 0x00000000 | Status: unused, shall be set to 0 |
++-----------+--------+------------+---------------------------------------------------+
-OP_REP_DEVLIST: Reply with the list of exported USB devices.
+OP_REP_DEVLIST:
+ Reply with the list of exported USB devices.
- Offset | Length | Value | Description
------------+--------+------------+---------------------------------------------------
- 0 | 2 | 0x0100 | Binary-coded decimal USBIP version number: v1.0.0.
------------+--------+------------+---------------------------------------------------
- 2 | 2 | 0x0005 | Reply code: The list of exported USB devices.
------------+--------+------------+---------------------------------------------------
- 4 | 4 | 0x00000000 | Status: 0 for OK
------------+--------+------------+---------------------------------------------------
- 8 | 4 | n | Number of exported devices: 0 means no exported
- | | | devices.
------------+--------+------------+---------------------------------------------------
- 0x0C | | | From now on the exported n devices are described,
- | | | if any. If no devices are exported the message
- | | | ends with the previous "number of exported
- | | | devices" field.
------------+--------+------------+---------------------------------------------------
- | 256 | | path: Path of the device on the host exporting the
- | | | USB device, string closed with zero byte, e.g.
- | | | "/sys/devices/pci0000:00/0000:00:1d.1/usb3/3-2"
- | | | The unused bytes shall be filled with zero
- | | | bytes.
------------+--------+------------+---------------------------------------------------
- 0x10C | 32 | | busid: Bus ID of the exported device, string
- | | | closed with zero byte, e.g. "3-2". The unused
- | | | bytes shall be filled with zero bytes.
------------+--------+------------+---------------------------------------------------
- 0x12C | 4 | | busnum
------------+--------+------------+---------------------------------------------------
- 0x130 | 4 | | devnum
------------+--------+------------+---------------------------------------------------
- 0x134 | 4 | | speed
------------+--------+------------+---------------------------------------------------
- 0x138 | 2 | | idVendor
------------+--------+------------+---------------------------------------------------
- 0x13A | 2 | | idProduct
------------+--------+------------+---------------------------------------------------
- 0x13C | 2 | | bcdDevice
------------+--------+------------+---------------------------------------------------
- 0x13E | 1 | | bDeviceClass
------------+--------+------------+---------------------------------------------------
- 0x13F | 1 | | bDeviceSubClass
------------+--------+------------+---------------------------------------------------
- 0x140 | 1 | | bDeviceProtocol
------------+--------+------------+---------------------------------------------------
- 0x141 | 1 | | bConfigurationValue
------------+--------+------------+---------------------------------------------------
- 0x142 | 1 | | bNumConfigurations
------------+--------+------------+---------------------------------------------------
- 0x143 | 1 | | bNumInterfaces
------------+--------+------------+---------------------------------------------------
- 0x144 | | m_0 | From now on each interface is described, all
- | | | together bNumInterfaces times, with the
- | | | the following 4 fields:
------------+--------+------------+---------------------------------------------------
- | 1 | | bInterfaceClass
------------+--------+------------+---------------------------------------------------
- 0x145 | 1 | | bInterfaceSubClass
------------+--------+------------+---------------------------------------------------
- 0x146 | 1 | | bInterfaceProtocol
------------+--------+------------+---------------------------------------------------
- 0x147 | 1 | | padding byte for alignment, shall be set to zero
------------+--------+------------+---------------------------------------------------
- 0xC + | | | The second exported USB device starts at i=1
- i*0x138 + | | | with the busid field.
- m_(i-1)*4 | | |
++-----------+--------+------------+---------------------------------------------------+
+| Offset | Length | Value | Description |
++===========+========+============+===================================================+
+| 0 | 2 | 0x0100 | Binary-coded decimal USBIP version number: v1.0.0.|
++-----------+--------+------------+---------------------------------------------------+
+| 2 | 2 | 0x0005 | Reply code: The list of exported USB devices. |
++-----------+--------+------------+---------------------------------------------------+
+| 4 | 4 | 0x00000000 | Status: 0 for OK |
++-----------+--------+------------+---------------------------------------------------+
+| 8 | 4 | n | Number of exported devices: 0 means no exported |
+| | | | devices. |
++-----------+--------+------------+---------------------------------------------------+
+| 0x0C | | | From now on the exported n devices are described, |
+| | | | if any. If no devices are exported the message |
+| | | | ends with the previous "number of exported |
+| | | | devices" field. |
++-----------+--------+------------+---------------------------------------------------+
+| | 256 | | path: Path of the device on the host exporting the|
+| | | | USB device, string closed with zero byte, e.g. |
+| | | | "/sys/devices/pci0000:00/0000:00:1d.1/usb3/3-2" |
+| | | | The unused bytes shall be filled with zero |
+| | | | bytes. |
++-----------+--------+------------+---------------------------------------------------+
+| 0x10C | 32 | | busid: Bus ID of the exported device, string |
+| | | | closed with zero byte, e.g. "3-2". The unused |
+| | | | bytes shall be filled with zero bytes. |
++-----------+--------+------------+---------------------------------------------------+
+| 0x12C | 4 | | busnum |
++-----------+--------+------------+---------------------------------------------------+
+| 0x130 | 4 | | devnum |
++-----------+--------+------------+---------------------------------------------------+
+| 0x134 | 4 | | speed |
++-----------+--------+------------+---------------------------------------------------+
+| 0x138 | 2 | | idVendor |
++-----------+--------+------------+---------------------------------------------------+
+| 0x13A | 2 | | idProduct |
++-----------+--------+------------+---------------------------------------------------+
+| 0x13C | 2 | | bcdDevice |
++-----------+--------+------------+---------------------------------------------------+
+| 0x13E | 1 | | bDeviceClass |
++-----------+--------+------------+---------------------------------------------------+
+| 0x13F | 1 | | bDeviceSubClass |
++-----------+--------+------------+---------------------------------------------------+
+| 0x140 | 1 | | bDeviceProtocol |
++-----------+--------+------------+---------------------------------------------------+
+| 0x141 | 1 | | bConfigurationValue |
++-----------+--------+------------+---------------------------------------------------+
+| 0x142 | 1 | | bNumConfigurations |
++-----------+--------+------------+---------------------------------------------------+
+| 0x143 | 1 | | bNumInterfaces |
++-----------+--------+------------+---------------------------------------------------+
+| 0x144 | | m_0 | From now on each interface is described, all |
+| | | | together bNumInterfaces times, with the |
+| | | | the following 4 fields: |
++-----------+--------+------------+---------------------------------------------------+
+| | 1 | | bInterfaceClass |
++-----------+--------+------------+---------------------------------------------------+
+| 0x145 | 1 | | bInterfaceSubClass |
++-----------+--------+------------+---------------------------------------------------+
+| 0x146 | 1 | | bInterfaceProtocol |
++-----------+--------+------------+---------------------------------------------------+
+| 0x147 | 1 | | padding byte for alignment, shall be set to zero |
++-----------+--------+------------+---------------------------------------------------+
+| 0xC + | | | The second exported USB device starts at i=1 |
+| i*0x138 + | | | with the busid field. |
+| m_(i-1)*4 | | | |
++-----------+--------+------------+---------------------------------------------------+
-OP_REQ_IMPORT: Request to import (attach) a remote USB device.
+OP_REQ_IMPORT:
+ Request to import (attach) a remote USB device.
- Offset | Length | Value | Description
------------+--------+------------+---------------------------------------------------
- 0 | 2 | 0x0100 | Binary-coded decimal USBIP version number: v1.0.0
------------+--------+------------+---------------------------------------------------
- 2 | 2 | 0x8003 | Command code: import a remote USB device.
------------+--------+------------+---------------------------------------------------
- 4 | 4 | 0x00000000 | Status: unused, shall be set to 0
------------+--------+------------+---------------------------------------------------
- 8 | 32 | | busid: the busid of the exported device on the
- | | | remote host. The possible values are taken
- | | | from the message field OP_REP_DEVLIST.busid.
- | | | A string closed with zero, the unused bytes
- | | | shall be filled with zeros.
------------+--------+------------+---------------------------------------------------
++-----------+--------+------------+---------------------------------------------------+
+| Offset | Length | Value | Description |
++===========+========+============+===================================================+
+| 0 | 2 | 0x0100 | Binary-coded decimal USBIP version number: v1.0.0 |
++-----------+--------+------------+---------------------------------------------------+
+| 2 | 2 | 0x8003 | Command code: import a remote USB device. |
++-----------+--------+------------+---------------------------------------------------+
+| 4 | 4 | 0x00000000 | Status: unused, shall be set to 0 |
++-----------+--------+------------+---------------------------------------------------+
+| 8 | 32 | | busid: the busid of the exported device on the |
+| | | | remote host. The possible values are taken |
+| | | | from the message field OP_REP_DEVLIST.busid. |
+| | | | A string closed with zero, the unused bytes |
+| | | | shall be filled with zeros. |
++-----------+--------+------------+---------------------------------------------------+
-OP_REP_IMPORT: Reply to import (attach) a remote USB device.
+OP_REP_IMPORT:
+ Reply to import (attach) a remote USB device.
- Offset | Length | Value | Description
------------+--------+------------+---------------------------------------------------
- 0 | 2 | 0x0100 | Binary-coded decimal USBIP version number: v1.0.0
------------+--------+------------+---------------------------------------------------
- 2 | 2 | 0x0003 | Reply code: Reply to import.
------------+--------+------------+---------------------------------------------------
- 4 | 4 | 0x00000000 | Status: 0 for OK
- | | | 1 for error
------------+--------+------------+---------------------------------------------------
- 8 | | | From now on comes the details of the imported
- | | | device, if the previous status field was OK (0),
- | | | otherwise the reply ends with the status field.
------------+--------+------------+---------------------------------------------------
- | 256 | | path: Path of the device on the host exporting the
- | | | USB device, string closed with zero byte, e.g.
- | | | "/sys/devices/pci0000:00/0000:00:1d.1/usb3/3-2"
- | | | The unused bytes shall be filled with zero
- | | | bytes.
------------+--------+------------+---------------------------------------------------
- 0x108 | 32 | | busid: Bus ID of the exported device, string
- | | | closed with zero byte, e.g. "3-2". The unused
- | | | bytes shall be filled with zero bytes.
------------+--------+------------+---------------------------------------------------
- 0x128 | 4 | | busnum
------------+--------+------------+---------------------------------------------------
- 0x12C | 4 | | devnum
------------+--------+------------+---------------------------------------------------
- 0x130 | 4 | | speed
------------+--------+------------+---------------------------------------------------
- 0x134 | 2 | | idVendor
------------+--------+------------+---------------------------------------------------
- 0x136 | 2 | | idProduct
------------+--------+------------+---------------------------------------------------
- 0x138 | 2 | | bcdDevice
------------+--------+------------+---------------------------------------------------
- 0x139 | 1 | | bDeviceClass
------------+--------+------------+---------------------------------------------------
- 0x13A | 1 | | bDeviceSubClass
------------+--------+------------+---------------------------------------------------
- 0x13B | 1 | | bDeviceProtocol
------------+--------+------------+---------------------------------------------------
- 0x13C | 1 | | bConfigurationValue
------------+--------+------------+---------------------------------------------------
- 0x13D | 1 | | bNumConfigurations
------------+--------+------------+---------------------------------------------------
- 0x13E | 1 | | bNumInterfaces
++-----------+--------+------------+---------------------------------------------------+
+| Offset | Length | Value | Description |
++===========+========+============+===================================================+
+| 0 | 2 | 0x0100 | Binary-coded decimal USBIP version number: v1.0.0 |
++-----------+--------+------------+---------------------------------------------------+
+| 2 | 2 | 0x0003 | Reply code: Reply to import. |
++-----------+--------+------------+---------------------------------------------------+
+| 4 | 4 | 0x00000000 | Status: |
+| | | | |
+| | | | - 0 for OK |
+| | | | - 1 for error |
++-----------+--------+------------+---------------------------------------------------+
+| 8 | | | From now on comes the details of the imported |
+| | | | device, if the previous status field was OK (0), |
+| | | | otherwise the reply ends with the status field. |
++-----------+--------+------------+---------------------------------------------------+
+| | 256 | | path: Path of the device on the host exporting the|
+| | | | USB device, string closed with zero byte, e.g. |
+| | | | "/sys/devices/pci0000:00/0000:00:1d.1/usb3/3-2" |
+| | | | The unused bytes shall be filled with zero |
+| | | | bytes. |
++-----------+--------+------------+---------------------------------------------------+
+| 0x108 | 32 | | busid: Bus ID of the exported device, string |
+| | | | closed with zero byte, e.g. "3-2". The unused |
+| | | | bytes shall be filled with zero bytes. |
++-----------+--------+------------+---------------------------------------------------+
+| 0x128 | 4 | | busnum |
++-----------+--------+------------+---------------------------------------------------+
+| 0x12C | 4 | | devnum |
++-----------+--------+------------+---------------------------------------------------+
+| 0x130 | 4 | | speed |
++-----------+--------+------------+---------------------------------------------------+
+| 0x134 | 2 | | idVendor |
++-----------+--------+------------+---------------------------------------------------+
+| 0x136 | 2 | | idProduct |
++-----------+--------+------------+---------------------------------------------------+
+| 0x138 | 2 | | bcdDevice |
++-----------+--------+------------+---------------------------------------------------+
+| 0x139 | 1 | | bDeviceClass |
++-----------+--------+------------+---------------------------------------------------+
+| 0x13A | 1 | | bDeviceSubClass |
++-----------+--------+------------+---------------------------------------------------+
+| 0x13B | 1 | | bDeviceProtocol |
++-----------+--------+------------+---------------------------------------------------+
+| 0x13C | 1 | | bConfigurationValue |
++-----------+--------+------------+---------------------------------------------------+
+| 0x13D | 1 | | bNumConfigurations |
++-----------+--------+------------+---------------------------------------------------+
+| 0x13E | 1 | | bNumInterfaces |
++-----------+--------+------------+---------------------------------------------------+
-USBIP_CMD_SUBMIT: Submit an URB
+USBIP_CMD_SUBMIT:
+ Submit an URB
- Offset | Length | Value | Description
------------+--------+------------+---------------------------------------------------
- 0 | 4 | 0x00000001 | command: Submit an URB
------------+--------+------------+---------------------------------------------------
- 4 | 4 | | seqnum: the sequence number of the URB to submit
------------+--------+------------+---------------------------------------------------
- 8 | 4 | | devid
------------+--------+------------+---------------------------------------------------
- 0xC | 4 | | direction: 0: USBIP_DIR_OUT
- | | | 1: USBIP_DIR_IN
------------+--------+------------+---------------------------------------------------
- 0x10 | 4 | | ep: endpoint number, possible values are: 0...15
------------+--------+------------+---------------------------------------------------
- 0x14 | 4 | | transfer_flags: possible values depend on the
- | | | URB transfer type, see below
------------+--------+------------+---------------------------------------------------
- 0x18 | 4 | | transfer_buffer_length
------------+--------+------------+---------------------------------------------------
- 0x1C | 4 | | start_frame: specify the selected frame to
- | | | transmit an ISO frame, ignored if URB_ISO_ASAP
- | | | is specified at transfer_flags
------------+--------+------------+---------------------------------------------------
- 0x20 | 4 | | number_of_packets: number of ISO packets
------------+--------+------------+---------------------------------------------------
- 0x24 | 4 | | interval: maximum time for the request on the
- | | | server-side host controller
------------+--------+------------+---------------------------------------------------
- 0x28 | 8 | | setup: data bytes for USB setup, filled with
- | | | zeros if not used
------------+--------+------------+---------------------------------------------------
- 0x30 | | | URB data. For ISO transfers the padding between
- | | | each ISO packets is not transmitted.
++-----------+--------+------------+---------------------------------------------------+
+| Offset | Length | Value | Description |
++===========+========+============+===================================================+
+| 0 | 4 | 0x00000001 | command: Submit an URB |
++-----------+--------+------------+---------------------------------------------------+
+| 4 | 4 | | seqnum: the sequence number of the URB to submit |
++-----------+--------+------------+---------------------------------------------------+
+| 8 | 4 | | devid |
++-----------+--------+------------+---------------------------------------------------+
+| 0xC | 4 | | direction: |
+| | | | |
+| | | | - 0: USBIP_DIR_OUT |
+| | | | - 1: USBIP_DIR_IN |
++-----------+--------+------------+---------------------------------------------------+
+| 0x10 | 4 | | ep: endpoint number, possible values are: 0...15 |
++-----------+--------+------------+---------------------------------------------------+
+| 0x14 | 4 | | transfer_flags: possible values depend on the |
+| | | | URB transfer type, see below |
++-----------+--------+------------+---------------------------------------------------+
+| 0x18 | 4 | | transfer_buffer_length |
++-----------+--------+------------+---------------------------------------------------+
+| 0x1C | 4 | | start_frame: specify the selected frame to |
+| | | | transmit an ISO frame, ignored if URB_ISO_ASAP |
+| | | | is specified at transfer_flags |
++-----------+--------+------------+---------------------------------------------------+
+| 0x20 | 4 | | number_of_packets: number of ISO packets |
++-----------+--------+------------+---------------------------------------------------+
+| 0x24 | 4 | | interval: maximum time for the request on the |
+| | | | server-side host controller |
++-----------+--------+------------+---------------------------------------------------+
+| 0x28 | 8 | | setup: data bytes for USB setup, filled with |
+| | | | zeros if not used |
++-----------+--------+------------+---------------------------------------------------+
+| 0x30 | | | URB data. For ISO transfers the padding between |
+| | | | each ISO packets is not transmitted. |
++-----------+--------+------------+---------------------------------------------------+
- Allowed transfer_flags | value | control | interrupt | bulk | isochronous
- -------------------------+------------+---------+-----------+----------+-------------
- URB_SHORT_NOT_OK | 0x00000001 | only in | only in | only in | no
- URB_ISO_ASAP | 0x00000002 | no | no | no | yes
- URB_NO_TRANSFER_DMA_MAP | 0x00000004 | yes | yes | yes | yes
- URB_ZERO_PACKET | 0x00000040 | no | no | only out | no
- URB_NO_INTERRUPT | 0x00000080 | yes | yes | yes | yes
- URB_FREE_BUFFER | 0x00000100 | yes | yes | yes | yes
- URB_DIR_MASK | 0x00000200 | yes | yes | yes | yes
+ +-------------------------+------------+---------+-----------+----------+-------------+
+ | Allowed transfer_flags | value | control | interrupt | bulk | isochronous |
+ +=========================+============+=========+===========+==========+=============+
+ | URB_SHORT_NOT_OK | 0x00000001 | only in | only in | only in | no |
+ +-------------------------+------------+---------+-----------+----------+-------------+
+ | URB_ISO_ASAP | 0x00000002 | no | no | no | yes |
+ +-------------------------+------------+---------+-----------+----------+-------------+
+ | URB_NO_TRANSFER_DMA_MAP | 0x00000004 | yes | yes | yes | yes |
+ +-------------------------+------------+---------+-----------+----------+-------------+
+ | URB_ZERO_PACKET | 0x00000040 | no | no | only out | no |
+ +-------------------------+------------+---------+-----------+----------+-------------+
+ | URB_NO_INTERRUPT | 0x00000080 | yes | yes | yes | yes |
+ +-------------------------+------------+---------+-----------+----------+-------------+
+ | URB_FREE_BUFFER | 0x00000100 | yes | yes | yes | yes |
+ +-------------------------+------------+---------+-----------+----------+-------------+
+ | URB_DIR_MASK | 0x00000200 | yes | yes | yes | yes |
+ +-------------------------+------------+---------+-----------+----------+-------------+
-USBIP_RET_SUBMIT: Reply for submitting an URB
+USBIP_RET_SUBMIT:
+ Reply for submitting an URB
- Offset | Length | Value | Description
------------+--------+------------+---------------------------------------------------
- 0 | 4 | 0x00000003 | command
------------+--------+------------+---------------------------------------------------
- 4 | 4 | | seqnum: URB sequence number
------------+--------+------------+---------------------------------------------------
- 8 | 4 | | devid
------------+--------+------------+---------------------------------------------------
- 0xC | 4 | | direction: 0: USBIP_DIR_OUT
- | | | 1: USBIP_DIR_IN
------------+--------+------------+---------------------------------------------------
- 0x10 | 4 | | ep: endpoint number
------------+--------+------------+---------------------------------------------------
- 0x14 | 4 | | status: zero for successful URB transaction,
- | | | otherwise some kind of error happened.
------------+--------+------------+---------------------------------------------------
- 0x18 | 4 | n | actual_length: number of URB data bytes
------------+--------+------------+---------------------------------------------------
- 0x1C | 4 | | start_frame: for an ISO frame the actually
- | | | selected frame for transmit.
------------+--------+------------+---------------------------------------------------
- 0x20 | 4 | | number_of_packets
------------+--------+------------+---------------------------------------------------
- 0x24 | 4 | | error_count
------------+--------+------------+---------------------------------------------------
- 0x28 | 8 | | setup: data bytes for USB setup, filled with
- | | | zeros if not used
------------+--------+------------+---------------------------------------------------
- 0x30 | n | | URB data bytes. For ISO transfers the padding
- | | | between each ISO packets is not transmitted.
++-----------+--------+------------+---------------------------------------------------+
+| Offset | Length | Value | Description |
++===========+========+============+===================================================+
+| 0 | 4 | 0x00000003 | command |
++-----------+--------+------------+---------------------------------------------------+
+| 4 | 4 | | seqnum: URB sequence number |
++-----------+--------+------------+---------------------------------------------------+
+| 8 | 4 | | devid |
++-----------+--------+------------+---------------------------------------------------+
+| 0xC | 4 | | direction: |
+| | | | |
+| | | | - 0: USBIP_DIR_OUT |
+| | | | - 1: USBIP_DIR_IN |
++-----------+--------+------------+---------------------------------------------------+
+| 0x10 | 4 | | ep: endpoint number |
++-----------+--------+------------+---------------------------------------------------+
+| 0x14 | 4 | | status: zero for successful URB transaction, |
+| | | | otherwise some kind of error happened. |
++-----------+--------+------------+---------------------------------------------------+
+| 0x18 | 4 | n | actual_length: number of URB data bytes |
++-----------+--------+------------+---------------------------------------------------+
+| 0x1C | 4 | | start_frame: for an ISO frame the actually |
+| | | | selected frame for transmit. |
++-----------+--------+------------+---------------------------------------------------+
+| 0x20 | 4 | | number_of_packets |
++-----------+--------+------------+---------------------------------------------------+
+| 0x24 | 4 | | error_count |
++-----------+--------+------------+---------------------------------------------------+
+| 0x28 | 8 | | setup: data bytes for USB setup, filled with |
+| | | | zeros if not used |
++-----------+--------+------------+---------------------------------------------------+
+| 0x30 | n | | URB data bytes. For ISO transfers the padding |
+| | | | between each ISO packets is not transmitted. |
++-----------+--------+------------+---------------------------------------------------+
-USBIP_CMD_UNLINK: Unlink an URB
+USBIP_CMD_UNLINK:
+ Unlink an URB
- Offset | Length | Value | Description
------------+--------+------------+---------------------------------------------------
- 0 | 4 | 0x00000002 | command: URB unlink command
------------+--------+------------+---------------------------------------------------
- 4 | 4 | | seqnum: URB sequence number to unlink: FIXME: is this so?
------------+--------+------------+---------------------------------------------------
- 8 | 4 | | devid
------------+--------+------------+---------------------------------------------------
- 0xC | 4 | | direction: 0: USBIP_DIR_OUT
- | | | 1: USBIP_DIR_IN
------------+--------+------------+---------------------------------------------------
- 0x10 | 4 | | ep: endpoint number: zero
------------+--------+------------+---------------------------------------------------
- 0x14 | 4 | | seqnum: the URB sequence number given previously
- | | | at USBIP_CMD_SUBMIT.seqnum field
------------+--------+------------+---------------------------------------------------
- 0x30 | n | | URB data bytes. For ISO transfers the padding
- | | | between each ISO packets is not transmitted.
++-----------+--------+------------+---------------------------------------------------+
+| Offset | Length | Value | Description |
++===========+========+============+===================================================+
+| 0 | 4 | 0x00000002 | command: URB unlink command |
++-----------+--------+------------+---------------------------------------------------+
+| 4 | 4 | | seqnum: URB sequence number to unlink: |
+| | | | |
+| | | | FIXME: |
+| | | | is this so? |
++-----------+--------+------------+---------------------------------------------------+
+| 8 | 4 | | devid |
++-----------+--------+------------+---------------------------------------------------+
+| 0xC | 4 | | direction: |
+| | | | |
+| | | | - 0: USBIP_DIR_OUT |
+| | | | - 1: USBIP_DIR_IN |
++-----------+--------+------------+---------------------------------------------------+
+| 0x10 | 4 | | ep: endpoint number: zero |
++-----------+--------+------------+---------------------------------------------------+
+| 0x14 | 4 | | seqnum: the URB sequence number given previously |
+| | | | at USBIP_CMD_SUBMIT.seqnum field |
++-----------+--------+------------+---------------------------------------------------+
+| 0x30 | n | | URB data bytes. For ISO transfers the padding |
+| | | | between each ISO packets is not transmitted. |
++-----------+--------+------------+---------------------------------------------------+
-USBIP_RET_UNLINK: Reply for URB unlink
+USBIP_RET_UNLINK:
+ Reply for URB unlink
- Offset | Length | Value | Description
------------+--------+------------+---------------------------------------------------
- 0 | 4 | 0x00000004 | command: reply for the URB unlink command
------------+--------+------------+---------------------------------------------------
- 4 | 4 | | seqnum: the unlinked URB sequence number
------------+--------+------------+---------------------------------------------------
- 8 | 4 | | devid
------------+--------+------------+---------------------------------------------------
- 0xC | 4 | | direction: 0: USBIP_DIR_OUT
- | | | 1: USBIP_DIR_IN
------------+--------+------------+---------------------------------------------------
- 0x10 | 4 | | ep: endpoint number
------------+--------+------------+---------------------------------------------------
- 0x14 | 4 | | status: This is the value contained in the
- | | | urb->status in the URB completition handler.
- | | | FIXME: a better explanation needed.
------------+--------+------------+---------------------------------------------------
- 0x30 | n | | URB data bytes. For ISO transfers the padding
- | | | between each ISO packets is not transmitted.
++-----------+--------+------------+---------------------------------------------------+
+| Offset | Length | Value | Description |
++===========+========+============+===================================================+
+| 0 | 4 | 0x00000004 | command: reply for the URB unlink command |
++-----------+--------+------------+---------------------------------------------------+
+| 4 | 4 | | seqnum: the unlinked URB sequence number |
++-----------+--------+------------+---------------------------------------------------+
+| 8 | 4 | | devid |
++-----------+--------+------------+---------------------------------------------------+
+| 0xC | 4 | | direction: |
+| | | | |
+| | | | - 0: USBIP_DIR_OUT |
+| | | | - 1: USBIP_DIR_IN |
++-----------+--------+------------+---------------------------------------------------+
+| 0x10 | 4 | | ep: endpoint number |
++-----------+--------+------------+---------------------------------------------------+
+| 0x14 | 4 | | status: This is the value contained in the |
+| | | | urb->status in the URB completition handler. |
+| | | | |
+| | | | FIXME: |
+| | | | a better explanation needed. |
++-----------+--------+------------+---------------------------------------------------+
+| 0x30 | n | | URB data bytes. For ISO transfers the padding |
+| | | | between each ISO packets is not transmitted. |
++-----------+--------+------------+---------------------------------------------------+
diff --git a/Documentation/usb/usbmon.txt b/Documentation/usb/usbmon.txt
index 28425f736756..b0bd51080799 100644
--- a/Documentation/usb/usbmon.txt
+++ b/Documentation/usb/usbmon.txt
@@ -1,4 +1,9 @@
-* Introduction
+======
+usbmon
+======
+
+Introduction
+============
The name "usbmon" in lowercase refers to a facility in kernel which is
used to collect traces of I/O on the USB bus. This function is analogous
@@ -16,7 +21,8 @@ Two APIs are currently implemented: "text" and "binary". The binary API
is available through a character device in /dev namespace and is an ABI.
The text API is deprecated since 2.6.35, but available for convenience.
-* How to use usbmon to collect raw text traces
+How to use usbmon to collect raw text traces
+============================================
Unlike the packet socket, usbmon has an interface which provides traces
in a text format. This is used for two purposes. First, it serves as a
@@ -26,38 +32,41 @@ are finalized. Second, humans can read it in case tools are not available.
To collect a raw text trace, execute following steps.
1. Prepare
+----------
Mount debugfs (it has to be enabled in your kernel configuration), and
load the usbmon module (if built as module). The second step is skipped
-if usbmon is built into the kernel.
+if usbmon is built into the kernel::
-# mount -t debugfs none_debugs /sys/kernel/debug
-# modprobe usbmon
-#
+ # mount -t debugfs none_debugs /sys/kernel/debug
+ # modprobe usbmon
+ #
-Verify that bus sockets are present.
+Verify that bus sockets are present:
-# ls /sys/kernel/debug/usb/usbmon
-0s 0u 1s 1t 1u 2s 2t 2u 3s 3t 3u 4s 4t 4u
-#
+ # ls /sys/kernel/debug/usb/usbmon
+ 0s 0u 1s 1t 1u 2s 2t 2u 3s 3t 3u 4s 4t 4u
+ #
Now you can choose to either use the socket '0u' (to capture packets on all
buses), and skip to step #3, or find the bus used by your device with step #2.
This allows to filter away annoying devices that talk continuously.
2. Find which bus connects to the desired device
+------------------------------------------------
Run "cat /sys/kernel/debug/usb/devices", and find the T-line which corresponds
to the device. Usually you do it by looking for the vendor string. If you have
many similar devices, unplug one and compare the two
/sys/kernel/debug/usb/devices outputs. The T-line will have a bus number.
-Example:
-T: Bus=03 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 2 Spd=12 MxCh= 0
-D: Ver= 1.10 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 8 #Cfgs= 1
-P: Vendor=0557 ProdID=2004 Rev= 1.00
-S: Manufacturer=ATEN
-S: Product=UC100KM V2.00
+Example::
+
+ T: Bus=03 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 2 Spd=12 MxCh= 0
+ D: Ver= 1.10 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 8 #Cfgs= 1
+ P: Vendor=0557 ProdID=2004 Rev= 1.00
+ S: Manufacturer=ATEN
+ S: Product=UC100KM V2.00
"Bus=03" means it's bus 3. Alternatively, you can look at the output from
"lsusb" and get the bus number from the appropriate line. Example:
@@ -65,23 +74,28 @@ S: Product=UC100KM V2.00
Bus 003 Device 002: ID 0557:2004 ATEN UC100KM V2.00
3. Start 'cat'
+--------------
+
+::
-# cat /sys/kernel/debug/usb/usbmon/3u > /tmp/1.mon.out
+ # cat /sys/kernel/debug/usb/usbmon/3u > /tmp/1.mon.out
-to listen on a single bus, otherwise, to listen on all buses, type:
+to listen on a single bus, otherwise, to listen on all buses, type::
-# cat /sys/kernel/debug/usb/usbmon/0u > /tmp/1.mon.out
+ # cat /sys/kernel/debug/usb/usbmon/0u > /tmp/1.mon.out
This process will read until it is killed. Naturally, the output can be
redirected to a desirable location. This is preferred, because it is going
to be quite long.
4. Perform the desired operation on the USB bus
+-----------------------------------------------
This is where you do something that creates the traffic: plug in a flash key,
copy files, control a webcam, etc.
5. Kill cat
+-----------
Usually it's done with a keyboard interrupt (Control-C).
@@ -89,7 +103,8 @@ At this point the output file (/tmp/1.mon.out in this example) can be saved,
sent by e-mail, or inspected with a text editor. In the last case make sure
that the file size is not excessive for your favourite editor.
-* Raw text data format
+Raw text data format
+====================
Two formats are supported currently: the original, or '1t' format, and
the '1u' format. The '1t' format is deprecated in kernel 2.6.21. The '1u'
@@ -122,10 +137,14 @@ Here is the list of words, from left to right:
- "Address" word (formerly a "pipe"). It consists of four fields, separated by
colons: URB type and direction, Bus number, Device address, Endpoint number.
Type and direction are encoded with two bytes in the following manner:
+
+ == == =============================
Ci Co Control input and output
Zi Zo Isochronous input and output
Ii Io Interrupt input and output
Bi Bo Bulk input and output
+ == == =============================
+
Bus number, Device address, and Endpoint are decimal numbers, but they may
have leading zeros, for the sake of human readers.
@@ -178,24 +197,25 @@ Here is the list of words, from left to right:
Examples:
-An input control transfer to get a port status.
+An input control transfer to get a port status::
-d5ea89a0 3575914555 S Ci:1:001:0 s a3 00 0000 0003 0004 4 <
-d5ea89a0 3575914560 C Ci:1:001:0 0 4 = 01050000
+ d5ea89a0 3575914555 S Ci:1:001:0 s a3 00 0000 0003 0004 4 <
+ d5ea89a0 3575914560 C Ci:1:001:0 0 4 = 01050000
An output bulk transfer to send a SCSI command 0x28 (READ_10) in a 31-byte
-Bulk wrapper to a storage device at address 5:
+Bulk wrapper to a storage device at address 5::
-dd65f0e8 4128379752 S Bo:1:005:2 -115 31 = 55534243 ad000000 00800000 80010a28 20000000 20000040 00000000 000000
-dd65f0e8 4128379808 C Bo:1:005:2 0 31 >
+ dd65f0e8 4128379752 S Bo:1:005:2 -115 31 = 55534243 ad000000 00800000 80010a28 20000000 20000040 00000000 000000
+ dd65f0e8 4128379808 C Bo:1:005:2 0 31 >
-* Raw binary format and API
+Raw binary format and API
+=========================
The overall architecture of the API is about the same as the one above,
only the events are delivered in binary format. Each event is sent in
-the following structure (its name is made up, so that we can refer to it):
+the following structure (its name is made up, so that we can refer to it)::
-struct usbmon_packet {
+ struct usbmon_packet {
u64 id; /* 0: URB ID - from submission to callback */
unsigned char type; /* 8: Same as text; extensible. */
unsigned char xfer_type; /* ISO (0), Intr, Control, Bulk (3) */
@@ -220,7 +240,7 @@ struct usbmon_packet {
int start_frame; /* 52: For ISO */
unsigned int xfer_flags; /* 56: copy of URB's transfer_flags */
unsigned int ndesc; /* 60: Actual number of ISO descriptors */
-}; /* 64 total length */
+ }; /* 64 total length */
These events can be received from a character device by reading with read(2),
with an ioctl(2), or by accessing the buffer with mmap. However, read(2)
@@ -244,12 +264,12 @@ no events are available.
MON_IOCG_STATS, defined as _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats)
-The argument is a pointer to the following structure:
+The argument is a pointer to the following structure::
-struct mon_bin_stats {
+ struct mon_bin_stats {
u32 queued;
u32 dropped;
-};
+ };
The member "queued" refers to the number of events currently queued in the
buffer (and not to the number of events processed since the last reset).
@@ -273,13 +293,13 @@ This call returns the current size of the buffer in bytes.
These calls wait for events to arrive if none were in the kernel buffer,
then return the first event. The argument is a pointer to the following
-structure:
+structure::
-struct mon_get_arg {
+ struct mon_get_arg {
struct usbmon_packet *hdr;
void *data;
size_t alloc; /* Length of data (can be zero) */
-};
+ };
Before the call, hdr, data, and alloc should be filled. Upon return, the area
pointed by hdr contains the next event structure, and the data buffer contains
@@ -290,13 +310,13 @@ The MON_IOCX_GET copies 48 bytes to hdr area, MON_IOCX_GETX copies 64 bytes.
MON_IOCX_MFETCH, defined as _IOWR(MON_IOC_MAGIC, 7, struct mon_mfetch_arg)
This ioctl is primarily used when the application accesses the buffer
-with mmap(2). Its argument is a pointer to the following structure:
+with mmap(2). Its argument is a pointer to the following structure::
-struct mon_mfetch_arg {
+ struct mon_mfetch_arg {
uint32_t *offvec; /* Vector of events fetched */
uint32_t nfetch; /* Number of events to fetch (out: fetched) */
uint32_t nflush; /* Number of events to flush */
-};
+ };
The ioctl operates in 3 stages.
@@ -329,7 +349,7 @@ be polled with select(2) and poll(2). But lseek(2) does not work.
The basic idea is simple:
To prepare, map the buffer by getting the current size, then using mmap(2).
-Then, execute a loop similar to the one written in pseudo-code below:
+Then, execute a loop similar to the one written in pseudo-code below::
struct mon_mfetch_arg fetch;
struct usbmon_packet *hdr;
diff --git a/Documentation/userspace-api/seccomp_filter.rst b/Documentation/userspace-api/seccomp_filter.rst
index b1b846d8a094..bd9165241b6c 100644
--- a/Documentation/userspace-api/seccomp_filter.rst
+++ b/Documentation/userspace-api/seccomp_filter.rst
@@ -123,9 +123,9 @@ In precedence order, they are:
to userland as the errno without executing the system call.
``SECCOMP_RET_USER_NOTIF``:
- Results in a ``struct seccomp_notif`` message sent on the userspace
- notification fd, if it is attached, or ``-ENOSYS`` if it is not. See below
- on discussion of how to handle user notifications.
+ Results in a ``struct seccomp_notif`` message sent on the userspace
+ notification fd, if it is attached, or ``-ENOSYS`` if it is not. See
+ below on discussion of how to handle user notifications.
``SECCOMP_RET_TRACE``:
When returned, this value will cause the kernel to attempt to
@@ -133,7 +133,7 @@ In precedence order, they are:
call. If there is no tracer present, ``-ENOSYS`` is returned to
userland and the system call is not executed.
- A tracer will be notified if it requests ``PTRACE_O_TRACESECCOM``P
+ A tracer will be notified if it requests ``PTRACE_O_TRACESECCOMP``
using ``ptrace(PTRACE_SETOPTIONS)``. The tracer will be notified
of a ``PTRACE_EVENT_SECCOMP`` and the ``SECCOMP_RET_DATA`` portion of
the BPF program return value will be available to the tracer
diff --git a/Documentation/video-output.txt b/Documentation/video-output.txt
index e517011be4f9..56d6fa2e2368 100644
--- a/Documentation/video-output.txt
+++ b/Documentation/video-output.txt
@@ -1,34 +1,34 @@
+Video Output Switcher Control
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Video Output Switcher Control
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- 2006 luming.yu@intel.com
+2006 luming.yu@intel.com
The output sysfs class driver provides an abstract video output layer that
can be used to hook platform specific methods to enable/disable video output
device through common sysfs interface. For example, on my IBM ThinkPad T42
laptop, The ACPI video driver registered its output devices and read/write
-method for 'state' with output sysfs class. The user interface under sysfs is:
+method for 'state' with output sysfs class. The user interface under sysfs is::
-linux:/sys/class/video_output # tree .
-.
-|-- CRT0
-| |-- device -> ../../../devices/pci0000:00/0000:00:01.0
-| |-- state
-| |-- subsystem -> ../../../class/video_output
-| `-- uevent
-|-- DVI0
-| |-- device -> ../../../devices/pci0000:00/0000:00:01.0
-| |-- state
-| |-- subsystem -> ../../../class/video_output
-| `-- uevent
-|-- LCD0
-| |-- device -> ../../../devices/pci0000:00/0000:00:01.0
-| |-- state
-| |-- subsystem -> ../../../class/video_output
-| `-- uevent
-`-- TV0
- |-- device -> ../../../devices/pci0000:00/0000:00:01.0
- |-- state
- |-- subsystem -> ../../../class/video_output
- `-- uevent
+ linux:/sys/class/video_output # tree .
+ .
+ |-- CRT0
+ | |-- device -> ../../../devices/pci0000:00/0000:00:01.0
+ | |-- state
+ | |-- subsystem -> ../../../class/video_output
+ | `-- uevent
+ |-- DVI0
+ | |-- device -> ../../../devices/pci0000:00/0000:00:01.0
+ | |-- state
+ | |-- subsystem -> ../../../class/video_output
+ | `-- uevent
+ |-- LCD0
+ | |-- device -> ../../../devices/pci0000:00/0000:00:01.0
+ | |-- state
+ | |-- subsystem -> ../../../class/video_output
+ | `-- uevent
+ `-- TV0
+ |-- device -> ../../../devices/pci0000:00/0000:00:01.0
+ |-- state
+ |-- subsystem -> ../../../class/video_output
+ `-- uevent
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 67068c47c591..ba6c42c576dd 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -69,23 +69,6 @@ by and on behalf of the VM's process may not be freed/unaccounted when
the VM is shut down.
-It is important to note that althought VM ioctls may only be issued from
-the process that created the VM, a VM's lifecycle is associated with its
-file descriptor, not its creator (process). In other words, the VM and
-its resources, *including the associated address space*, are not freed
-until the last reference to the VM's file descriptor has been released.
-For example, if fork() is issued after ioctl(KVM_CREATE_VM), the VM will
-not be freed until both the parent (original) process and its child have
-put their references to the VM's file descriptor.
-
-Because a VM's resources are not freed until the last reference to its
-file descriptor is released, creating additional references to a VM via
-via fork(), dup(), etc... without careful consideration is strongly
-discouraged and may have unwanted side effects, e.g. memory allocated
-by and on behalf of the VM's process may not be freed/unaccounted when
-the VM is shut down.
-
-
3. Extensions
-------------
@@ -321,7 +304,7 @@ cpu's hardware control block.
4.8 KVM_GET_DIRTY_LOG (vm ioctl)
Capability: basic
-Architectures: x86
+Architectures: all
Type: vm ioctl
Parameters: struct kvm_dirty_log (in/out)
Returns: 0 on success, -1 on error
@@ -347,7 +330,7 @@ They must be less than the value that KVM_CHECK_EXTENSION returns for
the KVM_CAP_MULTI_ADDRESS_SPACE capability.
The bits in the dirty bitmap are cleared before the ioctl returns, unless
-KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is enabled. For more information,
+KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is enabled. For more information,
see the description of the capability.
4.9 KVM_SET_MEMORY_ALIAS
@@ -1117,9 +1100,8 @@ struct kvm_userspace_memory_region {
This ioctl allows the user to create, modify or delete a guest physical
memory slot. Bits 0-15 of "slot" specify the slot id and this value
should be less than the maximum number of user memory slots supported per
-VM. The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS,
-if this capability is supported by the architecture. Slots may not
-overlap in guest physical address space.
+VM. The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS.
+Slots may not overlap in guest physical address space.
If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot"
specifies the address space which is being modified. They must be
@@ -1901,6 +1883,12 @@ Architectures: all
Type: vcpu ioctl
Parameters: struct kvm_one_reg (in)
Returns: 0 on success, negative value on failure
+Errors:
+  ENOENT:   no such register
+  EINVAL:   invalid register ID, or no such register
+  EPERM:    (arm64) register access not allowed before vcpu finalization
+(These error codes are indicative only: do not rely on a specific error
+code being returned in a specific situation.)
struct kvm_one_reg {
__u64 id;
@@ -1985,6 +1973,7 @@ registers, find a list below:
PPC | KVM_REG_PPC_TLB3PS | 32
PPC | KVM_REG_PPC_EPTCFG | 32
PPC | KVM_REG_PPC_ICP_STATE | 64
+ PPC | KVM_REG_PPC_VP_STATE | 128
PPC | KVM_REG_PPC_TB_OFFSET | 64
PPC | KVM_REG_PPC_SPMC1 | 32
PPC | KVM_REG_PPC_SPMC2 | 32
@@ -2137,6 +2126,37 @@ contains elements ranging from 32 to 128 bits. The index is a 32bit
value in the kvm_regs structure seen as a 32bit array.
0x60x0 0000 0010 <index into the kvm_regs struct:16>
+Specifically:
+ Encoding Register Bits kvm_regs member
+----------------------------------------------------------------
+ 0x6030 0000 0010 0000 X0 64 regs.regs[0]
+ 0x6030 0000 0010 0002 X1 64 regs.regs[1]
+ ...
+ 0x6030 0000 0010 003c X30 64 regs.regs[30]
+ 0x6030 0000 0010 003e SP 64 regs.sp
+ 0x6030 0000 0010 0040 PC 64 regs.pc
+ 0x6030 0000 0010 0042 PSTATE 64 regs.pstate
+ 0x6030 0000 0010 0044 SP_EL1 64 sp_el1
+ 0x6030 0000 0010 0046 ELR_EL1 64 elr_el1
+ 0x6030 0000 0010 0048 SPSR_EL1 64 spsr[KVM_SPSR_EL1] (alias SPSR_SVC)
+ 0x6030 0000 0010 004a SPSR_ABT 64 spsr[KVM_SPSR_ABT]
+ 0x6030 0000 0010 004c SPSR_UND 64 spsr[KVM_SPSR_UND]
+ 0x6030 0000 0010 004e SPSR_IRQ 64 spsr[KVM_SPSR_IRQ]
+ 0x6060 0000 0010 0050 SPSR_FIQ 64 spsr[KVM_SPSR_FIQ]
+ 0x6040 0000 0010 0054 V0 128 fp_regs.vregs[0] (*)
+ 0x6040 0000 0010 0058 V1 128 fp_regs.vregs[1] (*)
+ ...
+ 0x6040 0000 0010 00d0 V31 128 fp_regs.vregs[31] (*)
+ 0x6020 0000 0010 00d4 FPSR 32 fp_regs.fpsr
+ 0x6020 0000 0010 00d5 FPCR 32 fp_regs.fpcr
+
+(*) These encodings are not accepted for SVE-enabled vcpus. See
+ KVM_ARM_VCPU_INIT.
+
+ The equivalent register content can be accessed via bits [127:0] of
+ the corresponding SVE Zn registers instead for vcpus that have SVE
+ enabled (see below).
+
arm64 CCSIDR registers are demultiplexed by CSSELR value:
0x6020 0000 0011 00 <csselr:8>
@@ -2146,6 +2166,64 @@ arm64 system registers have the following id bit patterns:
arm64 firmware pseudo-registers have the following bit pattern:
0x6030 0000 0014 <regno:16>
+arm64 SVE registers have the following bit patterns:
+ 0x6080 0000 0015 00 <n:5> <slice:5> Zn bits[2048*slice + 2047 : 2048*slice]
+ 0x6050 0000 0015 04 <n:4> <slice:5> Pn bits[256*slice + 255 : 256*slice]
+ 0x6050 0000 0015 060 <slice:5> FFR bits[256*slice + 255 : 256*slice]
+ 0x6060 0000 0015 ffff KVM_REG_ARM64_SVE_VLS pseudo-register
+
+Access to register IDs where 2048 * slice >= 128 * max_vq will fail with
+ENOENT. max_vq is the vcpu's maximum supported vector length in 128-bit
+quadwords: see (**) below.
+
+These registers are only accessible on vcpus for which SVE is enabled.
+See KVM_ARM_VCPU_INIT for details.
+
+In addition, except for KVM_REG_ARM64_SVE_VLS, these registers are not
+accessible until the vcpu's SVE configuration has been finalized
+using KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE). See KVM_ARM_VCPU_INIT
+and KVM_ARM_VCPU_FINALIZE for more information about this procedure.
+
+KVM_REG_ARM64_SVE_VLS is a pseudo-register that allows the set of vector
+lengths supported by the vcpu to be discovered and configured by
+userspace. When transferred to or from user memory via KVM_GET_ONE_REG
+or KVM_SET_ONE_REG, the value of this register is of type
+__u64[KVM_ARM64_SVE_VLS_WORDS], and encodes the set of vector lengths as
+follows:
+
+__u64 vector_lengths[KVM_ARM64_SVE_VLS_WORDS];
+
+if (vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX &&
+ ((vector_lengths[(vq - KVM_ARM64_SVE_VQ_MIN) / 64] >>
+ ((vq - KVM_ARM64_SVE_VQ_MIN) % 64)) & 1))
+ /* Vector length vq * 16 bytes supported */
+else
+ /* Vector length vq * 16 bytes not supported */
+
+(**) The maximum value vq for which the above condition is true is
+max_vq. This is the maximum vector length available to the guest on
+this vcpu, and determines which register slices are visible through
+this ioctl interface.
+
+(See Documentation/arm64/sve.txt for an explanation of the "vq"
+nomenclature.)
+
+KVM_REG_ARM64_SVE_VLS is only accessible after KVM_ARM_VCPU_INIT.
+KVM_ARM_VCPU_INIT initialises it to the best set of vector lengths that
+the host supports.
+
+Userspace may subsequently modify it if desired until the vcpu's SVE
+configuration is finalized using KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE).
+
+Apart from simply removing all vector lengths from the host set that
+exceed some value, support for arbitrarily chosen sets of vector lengths
+is hardware-dependent and may not be available. Attempting to configure
+an invalid set of vector lengths via KVM_SET_ONE_REG will fail with
+EINVAL.
+
+After the vcpu's SVE configuration is finalized, further attempts to
+write this register will fail with EPERM.
+
MIPS registers are mapped using the lower 32 bits. The upper 16 of that is
the register group type:
@@ -2198,6 +2276,12 @@ Architectures: all
Type: vcpu ioctl
Parameters: struct kvm_one_reg (in and out)
Returns: 0 on success, negative value on failure
+Errors include:
+  ENOENT:   no such register
+  EINVAL:   invalid register ID, or no such register
+  EPERM:    (arm64) register access not allowed before vcpu finalization
+(These error codes are indicative only: do not rely on a specific error
+code being returned in a specific situation.)
This ioctl allows to receive the value of a single register implemented
in a vcpu. The register to read is indicated by the "id" field of the
@@ -2690,6 +2774,49 @@ Possible features:
- KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU.
Depends on KVM_CAP_ARM_PMU_V3.
+ - KVM_ARM_VCPU_PTRAUTH_ADDRESS: Enables Address Pointer authentication
+ for arm64 only.
+ Depends on KVM_CAP_ARM_PTRAUTH_ADDRESS.
+ If KVM_CAP_ARM_PTRAUTH_ADDRESS and KVM_CAP_ARM_PTRAUTH_GENERIC are
+ both present, then both KVM_ARM_VCPU_PTRAUTH_ADDRESS and
+ KVM_ARM_VCPU_PTRAUTH_GENERIC must be requested or neither must be
+ requested.
+
+ - KVM_ARM_VCPU_PTRAUTH_GENERIC: Enables Generic Pointer authentication
+ for arm64 only.
+ Depends on KVM_CAP_ARM_PTRAUTH_GENERIC.
+ If KVM_CAP_ARM_PTRAUTH_ADDRESS and KVM_CAP_ARM_PTRAUTH_GENERIC are
+ both present, then both KVM_ARM_VCPU_PTRAUTH_ADDRESS and
+ KVM_ARM_VCPU_PTRAUTH_GENERIC must be requested or neither must be
+ requested.
+
+ - KVM_ARM_VCPU_SVE: Enables SVE for the CPU (arm64 only).
+ Depends on KVM_CAP_ARM_SVE.
+ Requires KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE):
+
+ * After KVM_ARM_VCPU_INIT:
+
+ - KVM_REG_ARM64_SVE_VLS may be read using KVM_GET_ONE_REG: the
+ initial value of this pseudo-register indicates the best set of
+ vector lengths possible for a vcpu on this host.
+
+ * Before KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE):
+
+ - KVM_RUN and KVM_GET_REG_LIST are not available;
+
+ - KVM_GET_ONE_REG and KVM_SET_ONE_REG cannot be used to access
+ the scalable archietctural SVE registers
+ KVM_REG_ARM64_SVE_ZREG(), KVM_REG_ARM64_SVE_PREG() or
+ KVM_REG_ARM64_SVE_FFR;
+
+ - KVM_REG_ARM64_SVE_VLS may optionally be written using
+ KVM_SET_ONE_REG, to modify the set of vector lengths available
+ for the vcpu.
+
+ * After KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE):
+
+ - the KVM_REG_ARM64_SVE_VLS pseudo-register is immutable, and can
+ no longer be written using KVM_SET_ONE_REG.
4.83 KVM_ARM_PREFERRED_TARGET
@@ -3809,8 +3936,8 @@ to I/O ports.
4.117 KVM_CLEAR_DIRTY_LOG (vm ioctl)
-Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT
-Architectures: x86
+Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
+Architectures: x86, arm, arm64, mips
Type: vm ioctl
Parameters: struct kvm_dirty_log (in)
Returns: 0 on success, -1 on error
@@ -3830,8 +3957,9 @@ The ioctl clears the dirty status of pages in a memory slot, according to
the bitmap that is passed in struct kvm_clear_dirty_log's dirty_bitmap
field. Bit 0 of the bitmap corresponds to page "first_page" in the
memory slot, and num_pages is the size in bits of the input bitmap.
-Both first_page and num_pages must be a multiple of 64. For each bit
-that is set in the input bitmap, the corresponding page is marked "clean"
+first_page must be a multiple of 64; num_pages must also be a multiple of
+64 unless first_page + num_pages is the size of the memory slot. For each
+bit that is set in the input bitmap, the corresponding page is marked "clean"
in KVM's dirty bitmap, and dirty tracking is re-enabled for that page
(for example via write-protection, or by clearing the dirty bit in
a page table entry).
@@ -3841,10 +3969,10 @@ the address space for which you want to return the dirty bitmap.
They must be less than the value that KVM_CHECK_EXTENSION returns for
the KVM_CAP_MULTI_ADDRESS_SPACE capability.
-This ioctl is mostly useful when KVM_CAP_MANUAL_DIRTY_LOG_PROTECT
+This ioctl is mostly useful when KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
is enabled; for more information, see the description of the capability.
However, it can always be used as long as KVM_CHECK_EXTENSION confirms
-that KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is present.
+that KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is present.
4.118 KVM_GET_SUPPORTED_HV_CPUID
@@ -3903,6 +4031,40 @@ number of valid entries in the 'entries' array, which is then filled.
'index' and 'flags' fields in 'struct kvm_cpuid_entry2' are currently reserved,
userspace should not expect to get any particular value there.
+4.119 KVM_ARM_VCPU_FINALIZE
+
+Architectures: arm, arm64
+Type: vcpu ioctl
+Parameters: int feature (in)
+Returns: 0 on success, -1 on error
+Errors:
+ EPERM: feature not enabled, needs configuration, or already finalized
+ EINVAL: feature unknown or not present
+
+Recognised values for feature:
+ arm64 KVM_ARM_VCPU_SVE (requires KVM_CAP_ARM_SVE)
+
+Finalizes the configuration of the specified vcpu feature.
+
+The vcpu must already have been initialised, enabling the affected feature, by
+means of a successful KVM_ARM_VCPU_INIT call with the appropriate flag set in
+features[].
+
+For affected vcpu features, this is a mandatory step that must be performed
+before the vcpu is fully usable.
+
+Between KVM_ARM_VCPU_INIT and KVM_ARM_VCPU_FINALIZE, the feature may be
+configured by use of ioctls such as KVM_SET_ONE_REG. The exact configuration
+that should be performaned and how to do it are feature-dependent.
+
+Other calls that depend on a particular feature being finalized, such as
+KVM_RUN, KVM_GET_REG_LIST, KVM_GET_ONE_REG and KVM_SET_ONE_REG, will fail with
+-EPERM unless the feature has already been finalized by means of a
+KVM_ARM_VCPU_FINALIZE call.
+
+See KVM_ARM_VCPU_INIT for details of vcpu features that require finalization
+using this ioctl.
+
5. The kvm_run structure
------------------------
@@ -4504,6 +4666,15 @@ struct kvm_sync_regs {
struct kvm_vcpu_events events;
};
+6.75 KVM_CAP_PPC_IRQ_XIVE
+
+Architectures: ppc
+Target: vcpu
+Parameters: args[0] is the XIVE device fd
+ args[1] is the XIVE CPU number (server ID) for this vcpu
+
+This capability connects the vcpu to an in-kernel XIVE device.
+
7. Capabilities that can be enabled on VMs
------------------------------------------
@@ -4797,9 +4968,9 @@ and injected exceptions.
* For the new DR6 bits, note that bit 16 is set iff the #DB exception
will clear DR6.RTM.
-7.18 KVM_CAP_MANUAL_DIRTY_LOG_PROTECT
+7.18 KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
-Architectures: all
+Architectures: x86, arm, arm64, mips
Parameters: args[0] whether feature should be enabled or not
With this capability enabled, KVM_GET_DIRTY_LOG will not automatically
@@ -4820,6 +4991,11 @@ while userspace can see false reports of dirty pages. Manual reprotection
helps reducing this time, improving guest performance and reducing the
number of dirty log false positives.
+KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 was previously available under the name
+KVM_CAP_MANUAL_DIRTY_LOG_PROTECT, but the implementation had bugs that make
+it hard or impossible to use it correctly. The availability of
+KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 signals that those bugs are fixed.
+Userspace should not try to use KVM_CAP_MANUAL_DIRTY_LOG_PROTECT.
8. Other capabilities.
----------------------
diff --git a/Documentation/virtual/kvm/devices/vm.txt b/Documentation/virtual/kvm/devices/vm.txt
index 95ca68d663a4..4ffb82b02468 100644
--- a/Documentation/virtual/kvm/devices/vm.txt
+++ b/Documentation/virtual/kvm/devices/vm.txt
@@ -141,7 +141,8 @@ struct kvm_s390_vm_cpu_subfunc {
u8 pcc[16]; # valid with Message-Security-Assist-Extension 4
u8 ppno[16]; # valid with Message-Security-Assist-Extension 5
u8 kma[16]; # valid with Message-Security-Assist-Extension 8
- u8 reserved[1808]; # reserved for future instructions
+ u8 kdsa[16]; # valid with Message-Security-Assist-Extension 9
+ u8 reserved[1792]; # reserved for future instructions
};
Parameters: address of a buffer to load the subfunction blocks from.
diff --git a/Documentation/virtual/kvm/devices/xive.txt b/Documentation/virtual/kvm/devices/xive.txt
new file mode 100644
index 000000000000..9a24a4525253
--- /dev/null
+++ b/Documentation/virtual/kvm/devices/xive.txt
@@ -0,0 +1,197 @@
+POWER9 eXternal Interrupt Virtualization Engine (XIVE Gen1)
+==========================================================
+
+Device types supported:
+ KVM_DEV_TYPE_XIVE POWER9 XIVE Interrupt Controller generation 1
+
+This device acts as a VM interrupt controller. It provides the KVM
+interface to configure the interrupt sources of a VM in the underlying
+POWER9 XIVE interrupt controller.
+
+Only one XIVE instance may be instantiated. A guest XIVE device
+requires a POWER9 host and the guest OS should have support for the
+XIVE native exploitation interrupt mode. If not, it should run using
+the legacy interrupt mode, referred as XICS (POWER7/8).
+
+* Device Mappings
+
+ The KVM device exposes different MMIO ranges of the XIVE HW which
+ are required for interrupt management. These are exposed to the
+ guest in VMAs populated with a custom VM fault handler.
+
+ 1. Thread Interrupt Management Area (TIMA)
+
+ Each thread has an associated Thread Interrupt Management context
+ composed of a set of registers. These registers let the thread
+ handle priority management and interrupt acknowledgment. The most
+ important are :
+
+ - Interrupt Pending Buffer (IPB)
+ - Current Processor Priority (CPPR)
+ - Notification Source Register (NSR)
+
+ They are exposed to software in four different pages each proposing
+ a view with a different privilege. The first page is for the
+ physical thread context and the second for the hypervisor. Only the
+ third (operating system) and the fourth (user level) are exposed the
+ guest.
+
+ 2. Event State Buffer (ESB)
+
+ Each source is associated with an Event State Buffer (ESB) with
+ either a pair of even/odd pair of pages which provides commands to
+ manage the source: to trigger, to EOI, to turn off the source for
+ instance.
+
+ 3. Device pass-through
+
+ When a device is passed-through into the guest, the source
+ interrupts are from a different HW controller (PHB4) and the ESB
+ pages exposed to the guest should accommadate this change.
+
+ The passthru_irq helpers, kvmppc_xive_set_mapped() and
+ kvmppc_xive_clr_mapped() are called when the device HW irqs are
+ mapped into or unmapped from the guest IRQ number space. The KVM
+ device extends these helpers to clear the ESB pages of the guest IRQ
+ number being mapped and then lets the VM fault handler repopulate.
+ The handler will insert the ESB page corresponding to the HW
+ interrupt of the device being passed-through or the initial IPI ESB
+ page if the device has being removed.
+
+ The ESB remapping is fully transparent to the guest and the OS
+ device driver. All handling is done within VFIO and the above
+ helpers in KVM-PPC.
+
+* Groups:
+
+ 1. KVM_DEV_XIVE_GRP_CTRL
+ Provides global controls on the device
+ Attributes:
+ 1.1 KVM_DEV_XIVE_RESET (write only)
+ Resets the interrupt controller configuration for sources and event
+ queues. To be used by kexec and kdump.
+ Errors: none
+
+ 1.2 KVM_DEV_XIVE_EQ_SYNC (write only)
+ Sync all the sources and queues and mark the EQ pages dirty. This
+ to make sure that a consistent memory state is captured when
+ migrating the VM.
+ Errors: none
+
+ 2. KVM_DEV_XIVE_GRP_SOURCE (write only)
+ Initializes a new source in the XIVE device and mask it.
+ Attributes:
+ Interrupt source number (64-bit)
+ The kvm_device_attr.addr points to a __u64 value:
+ bits: | 63 .... 2 | 1 | 0
+ values: | unused | level | type
+ - type: 0:MSI 1:LSI
+ - level: assertion level in case of an LSI.
+ Errors:
+ -E2BIG: Interrupt source number is out of range
+ -ENOMEM: Could not create a new source block
+ -EFAULT: Invalid user pointer for attr->addr.
+ -ENXIO: Could not allocate underlying HW interrupt
+
+ 3. KVM_DEV_XIVE_GRP_SOURCE_CONFIG (write only)
+ Configures source targeting
+ Attributes:
+ Interrupt source number (64-bit)
+ The kvm_device_attr.addr points to a __u64 value:
+ bits: | 63 .... 33 | 32 | 31 .. 3 | 2 .. 0
+ values: | eisn | mask | server | priority
+ - priority: 0-7 interrupt priority level
+ - server: CPU number chosen to handle the interrupt
+ - mask: mask flag (unused)
+ - eisn: Effective Interrupt Source Number
+ Errors:
+ -ENOENT: Unknown source number
+ -EINVAL: Not initialized source number
+ -EINVAL: Invalid priority
+ -EINVAL: Invalid CPU number.
+ -EFAULT: Invalid user pointer for attr->addr.
+ -ENXIO: CPU event queues not configured or configuration of the
+ underlying HW interrupt failed
+ -EBUSY: No CPU available to serve interrupt
+
+ 4. KVM_DEV_XIVE_GRP_EQ_CONFIG (read-write)
+ Configures an event queue of a CPU
+ Attributes:
+ EQ descriptor identifier (64-bit)
+ The EQ descriptor identifier is a tuple (server, priority) :
+ bits: | 63 .... 32 | 31 .. 3 | 2 .. 0
+ values: | unused | server | priority
+ The kvm_device_attr.addr points to :
+ struct kvm_ppc_xive_eq {
+ __u32 flags;
+ __u32 qshift;
+ __u64 qaddr;
+ __u32 qtoggle;
+ __u32 qindex;
+ __u8 pad[40];
+ };
+ - flags: queue flags
+ KVM_XIVE_EQ_ALWAYS_NOTIFY (required)
+ forces notification without using the coalescing mechanism
+ provided by the XIVE END ESBs.
+ - qshift: queue size (power of 2)
+ - qaddr: real address of queue
+ - qtoggle: current queue toggle bit
+ - qindex: current queue index
+ - pad: reserved for future use
+ Errors:
+ -ENOENT: Invalid CPU number
+ -EINVAL: Invalid priority
+ -EINVAL: Invalid flags
+ -EINVAL: Invalid queue size
+ -EINVAL: Invalid queue address
+ -EFAULT: Invalid user pointer for attr->addr.
+ -EIO: Configuration of the underlying HW failed
+
+ 5. KVM_DEV_XIVE_GRP_SOURCE_SYNC (write only)
+ Synchronize the source to flush event notifications
+ Attributes:
+ Interrupt source number (64-bit)
+ Errors:
+ -ENOENT: Unknown source number
+ -EINVAL: Not initialized source number
+
+* VCPU state
+
+ The XIVE IC maintains VP interrupt state in an internal structure
+ called the NVT. When a VP is not dispatched on a HW processor
+ thread, this structure can be updated by HW if the VP is the target
+ of an event notification.
+
+ It is important for migration to capture the cached IPB from the NVT
+ as it synthesizes the priorities of the pending interrupts. We
+ capture a bit more to report debug information.
+
+ KVM_REG_PPC_VP_STATE (2 * 64bits)
+ bits: | 63 .... 32 | 31 .... 0 |
+ values: | TIMA word0 | TIMA word1 |
+ bits: | 127 .......... 64 |
+ values: | unused |
+
+* Migration:
+
+ Saving the state of a VM using the XIVE native exploitation mode
+ should follow a specific sequence. When the VM is stopped :
+
+ 1. Mask all sources (PQ=01) to stop the flow of events.
+
+ 2. Sync the XIVE device with the KVM control KVM_DEV_XIVE_EQ_SYNC to
+ flush any in-flight event notification and to stabilize the EQs. At
+ this stage, the EQ pages are marked dirty to make sure they are
+ transferred in the migration sequence.
+
+ 3. Capture the state of the source targeting, the EQs configuration
+ and the state of thread interrupt context registers.
+
+ Restore is similar :
+
+ 1. Restore the EQ configuration. As targeting depends on it.
+ 2. Restore targeting
+ 3. Restore the thread interrupt contexts
+ 4. Restore the source states
+ 5. Let the vCPU run
diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst
index 44205f0b671f..ec1efa32af3c 100644
--- a/Documentation/vm/hmm.rst
+++ b/Documentation/vm/hmm.rst
@@ -189,20 +189,10 @@ the driver callback returns.
When the device driver wants to populate a range of virtual addresses, it can
use either::
- int hmm_vma_get_pfns(struct vm_area_struct *vma,
- struct hmm_range *range,
- unsigned long start,
- unsigned long end,
- hmm_pfn_t *pfns);
- int hmm_vma_fault(struct vm_area_struct *vma,
- struct hmm_range *range,
- unsigned long start,
- unsigned long end,
- hmm_pfn_t *pfns,
- bool write,
- bool block);
-
-The first one (hmm_vma_get_pfns()) will only fetch present CPU page table
+ long hmm_range_snapshot(struct hmm_range *range);
+ long hmm_range_fault(struct hmm_range *range, bool block);
+
+The first one (hmm_range_snapshot()) will only fetch present CPU page table
entries and will not trigger a page fault on missing or non-present entries.
The second one does trigger a page fault on missing or read-only entry if the
write parameter is true. Page faults use the generic mm page fault code path
@@ -220,25 +210,56 @@ respect in order to keep things properly synchronized. The usage pattern is::
{
struct hmm_range range;
...
+
+ range.start = ...;
+ range.end = ...;
+ range.pfns = ...;
+ range.flags = ...;
+ range.values = ...;
+ range.pfn_shift = ...;
+ hmm_range_register(&range);
+
+ /*
+ * Just wait for range to be valid, safe to ignore return value as we
+ * will use the return value of hmm_range_snapshot() below under the
+ * mmap_sem to ascertain the validity of the range.
+ */
+ hmm_range_wait_until_valid(&range, TIMEOUT_IN_MSEC);
+
again:
- ret = hmm_vma_get_pfns(vma, &range, start, end, pfns);
- if (ret)
+ down_read(&mm->mmap_sem);
+ ret = hmm_range_snapshot(&range);
+ if (ret) {
+ up_read(&mm->mmap_sem);
+ if (ret == -EAGAIN) {
+ /*
+ * No need to check hmm_range_wait_until_valid() return value
+ * on retry we will get proper error with hmm_range_snapshot()
+ */
+ hmm_range_wait_until_valid(&range, TIMEOUT_IN_MSEC);
+ goto again;
+ }
+ hmm_mirror_unregister(&range);
return ret;
+ }
take_lock(driver->update);
- if (!hmm_vma_range_done(vma, &range)) {
+ if (!range.valid) {
release_lock(driver->update);
+ up_read(&mm->mmap_sem);
goto again;
}
// Use pfns array content to update device page table
+ hmm_mirror_unregister(&range);
release_lock(driver->update);
+ up_read(&mm->mmap_sem);
return 0;
}
The driver->update lock is the same lock that the driver takes inside its
-update() callback. That lock must be held before hmm_vma_range_done() to avoid
-any race with a concurrent CPU page table update.
+update() callback. That lock must be held before checking the range.valid
+field to avoid any race with a concurrent CPU page table update.
HMM implements all this on top of the mmu_notifier API because we wanted a
simpler API and also to be able to perform optimizations latter on like doing
@@ -255,6 +276,41 @@ report commands as executed is serialized (there is no point in doing this
concurrently).
+Leverage default_flags and pfn_flags_mask
+=========================================
+
+The hmm_range struct has 2 fields default_flags and pfn_flags_mask that allows
+to set fault or snapshot policy for a whole range instead of having to set them
+for each entries in the range.
+
+For instance if the device flags for device entries are:
+ VALID (1 << 63)
+ WRITE (1 << 62)
+
+Now let say that device driver wants to fault with at least read a range then
+it does set:
+ range->default_flags = (1 << 63)
+ range->pfn_flags_mask = 0;
+
+and calls hmm_range_fault() as described above. This will fill fault all page
+in the range with at least read permission.
+
+Now let say driver wants to do the same except for one page in the range for
+which its want to have write. Now driver set:
+ range->default_flags = (1 << 63);
+ range->pfn_flags_mask = (1 << 62);
+ range->pfns[index_of_write] = (1 << 62);
+
+With this HMM will fault in all page with at least read (ie valid) and for the
+address == range->start + (index_of_write << PAGE_SHIFT) it will fault with
+write permission ie if the CPU pte does not have write permission set then HMM
+will call handle_mm_fault().
+
+Note that HMM will populate the pfns array with write permission for any entry
+that have write permission within the CPU pte no matter what are the values set
+in default_flags or pfn_flags_mask.
+
+
Represent and manage device memory from core kernel point of view
=================================================================
diff --git a/Documentation/vm/hugetlbfs_reserv.rst b/Documentation/vm/hugetlbfs_reserv.rst
index 9d200762114f..f143954e0d05 100644
--- a/Documentation/vm/hugetlbfs_reserv.rst
+++ b/Documentation/vm/hugetlbfs_reserv.rst
@@ -85,10 +85,10 @@ Reservation Map Location (Private or Shared)
A huge page mapping or segment is either private or shared. If private,
it is typically only available to a single address space (task). If shared,
it can be mapped into multiple address spaces (tasks). The location and
-semantics of the reservation map is significantly different for two types
+semantics of the reservation map is significantly different for the two types
of mappings. Location differences are:
-- For private mappings, the reservation map hangs off the the VMA structure.
+- For private mappings, the reservation map hangs off the VMA structure.
Specifically, vma->vm_private_data. This reserve map is created at the
time the mapping (mmap(MAP_PRIVATE)) is created.
- For shared mappings, the reservation map hangs off the inode. Specifically,
@@ -109,15 +109,15 @@ These operations result in a call to the routine hugetlb_reserve_pages()::
struct vm_area_struct *vma,
vm_flags_t vm_flags)
-The first thing hugetlb_reserve_pages() does is check for the NORESERVE
+The first thing hugetlb_reserve_pages() does is check if the NORESERVE
flag was specified in either the shmget() or mmap() call. If NORESERVE
-was specified, then this routine returns immediately as no reservation
+was specified, then this routine returns immediately as no reservations
are desired.
The arguments 'from' and 'to' are huge page indices into the mapping or
underlying file. For shmget(), 'from' is always 0 and 'to' corresponds to
the length of the segment/mapping. For mmap(), the offset argument could
-be used to specify the offset into the underlying file. In such a case
+be used to specify the offset into the underlying file. In such a case,
the 'from' and 'to' arguments have been adjusted by this offset.
One of the big differences between PRIVATE and SHARED mappings is the way
@@ -138,7 +138,8 @@ to indicate this VMA owns the reservations.
The reservation map is consulted to determine how many huge page reservations
are needed for the current mapping/segment. For private mappings, this is
-always the value (to - from). However, for shared mappings it is possible that some reservations may already exist within the range (to - from). See the
+always the value (to - from). However, for shared mappings it is possible that
+some reservations may already exist within the range (to - from). See the
section :ref:`Reservation Map Modifications <resv_map_modifications>`
for details on how this is accomplished.
@@ -165,7 +166,7 @@ these counters.
If there were enough free huge pages and the global count resv_huge_pages
was adjusted, then the reservation map associated with the mapping is
modified to reflect the reservations. In the case of a shared mapping, a
-file_region will exist that includes the range 'from' 'to'. For private
+file_region will exist that includes the range 'from' - 'to'. For private
mappings, no modifications are made to the reservation map as lack of an
entry indicates a reservation exists.
@@ -239,7 +240,7 @@ subpool accounting when the page is freed.
The routine vma_commit_reservation() is then called to adjust the reserve
map based on the consumption of the reservation. In general, this involves
ensuring the page is represented within a file_region structure of the region
-map. For shared mappings where the the reservation was present, an entry
+map. For shared mappings where the reservation was present, an entry
in the reserve map already existed so no change is made. However, if there
was no reservation in a shared mapping or this was a private mapping a new
entry must be created.
diff --git a/Documentation/vm/index.rst b/Documentation/vm/index.rst
index b58cc3bfe777..e8d943b21cf9 100644
--- a/Documentation/vm/index.rst
+++ b/Documentation/vm/index.rst
@@ -37,6 +37,7 @@ descriptions of data structures and algorithms.
hwpoison
hugetlbfs_reserv
ksm
+ memory-model
mmu_notifier
numa
overcommit-accounting
diff --git a/Documentation/vm/memory-model.rst b/Documentation/vm/memory-model.rst
new file mode 100644
index 000000000000..382f72ace1fc
--- /dev/null
+++ b/Documentation/vm/memory-model.rst
@@ -0,0 +1,183 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. _physical_memory_model:
+
+=====================
+Physical Memory Model
+=====================
+
+Physical memory in a system may be addressed in different ways. The
+simplest case is when the physical memory starts at address 0 and
+spans a contiguous range up to the maximal address. It could be,
+however, that this range contains small holes that are not accessible
+for the CPU. Then there could be several contiguous ranges at
+completely distinct addresses. And, don't forget about NUMA, where
+different memory banks are attached to different CPUs.
+
+Linux abstracts this diversity using one of the three memory models:
+FLATMEM, DISCONTIGMEM and SPARSEMEM. Each architecture defines what
+memory models it supports, what the default memory model is and
+whether it is possible to manually override that default.
+
+.. note::
+ At time of this writing, DISCONTIGMEM is considered deprecated,
+ although it is still in use by several architectures.
+
+All the memory models track the status of physical page frames using
+:c:type:`struct page` arranged in one or more arrays.
+
+Regardless of the selected memory model, there exists one-to-one
+mapping between the physical page frame number (PFN) and the
+corresponding `struct page`.
+
+Each memory model defines :c:func:`pfn_to_page` and :c:func:`page_to_pfn`
+helpers that allow the conversion from PFN to `struct page` and vice
+versa.
+
+FLATMEM
+=======
+
+The simplest memory model is FLATMEM. This model is suitable for
+non-NUMA systems with contiguous, or mostly contiguous, physical
+memory.
+
+In the FLATMEM memory model, there is a global `mem_map` array that
+maps the entire physical memory. For most architectures, the holes
+have entries in the `mem_map` array. The `struct page` objects
+corresponding to the holes are never fully initialized.
+
+To allocate the `mem_map` array, architecture specific setup code
+should call :c:func:`free_area_init_node` function or its convenience
+wrapper :c:func:`free_area_init`. Yet, the mappings array is not
+usable until the call to :c:func:`memblock_free_all` that hands all
+the memory to the page allocator.
+
+If an architecture enables `CONFIG_ARCH_HAS_HOLES_MEMORYMODEL` option,
+it may free parts of the `mem_map` array that do not cover the
+actual physical pages. In such case, the architecture specific
+:c:func:`pfn_valid` implementation should take the holes in the
+`mem_map` into account.
+
+With FLATMEM, the conversion between a PFN and the `struct page` is
+straightforward: `PFN - ARCH_PFN_OFFSET` is an index to the
+`mem_map` array.
+
+The `ARCH_PFN_OFFSET` defines the first page frame number for
+systems with physical memory starting at address different from 0.
+
+DISCONTIGMEM
+============
+
+The DISCONTIGMEM model treats the physical memory as a collection of
+`nodes` similarly to how Linux NUMA support does. For each node Linux
+constructs an independent memory management subsystem represented by
+`struct pglist_data` (or `pg_data_t` for short). Among other
+things, `pg_data_t` holds the `node_mem_map` array that maps
+physical pages belonging to that node. The `node_start_pfn` field of
+`pg_data_t` is the number of the first page frame belonging to that
+node.
+
+The architecture setup code should call :c:func:`free_area_init_node` for
+each node in the system to initialize the `pg_data_t` object and its
+`node_mem_map`.
+
+Every `node_mem_map` behaves exactly as FLATMEM's `mem_map` -
+every physical page frame in a node has a `struct page` entry in the
+`node_mem_map` array. When DISCONTIGMEM is enabled, a portion of the
+`flags` field of the `struct page` encodes the node number of the
+node hosting that page.
+
+The conversion between a PFN and the `struct page` in the
+DISCONTIGMEM model became slightly more complex as it has to determine
+which node hosts the physical page and which `pg_data_t` object
+holds the `struct page`.
+
+Architectures that support DISCONTIGMEM provide :c:func:`pfn_to_nid`
+to convert PFN to the node number. The opposite conversion helper
+:c:func:`page_to_nid` is generic as it uses the node number encoded in
+page->flags.
+
+Once the node number is known, the PFN can be used to index
+appropriate `node_mem_map` array to access the `struct page` and
+the offset of the `struct page` from the `node_mem_map` plus
+`node_start_pfn` is the PFN of that page.
+
+SPARSEMEM
+=========
+
+SPARSEMEM is the most versatile memory model available in Linux and it
+is the only memory model that supports several advanced features such
+as hot-plug and hot-remove of the physical memory, alternative memory
+maps for non-volatile memory devices and deferred initialization of
+the memory map for larger systems.
+
+The SPARSEMEM model presents the physical memory as a collection of
+sections. A section is represented with :c:type:`struct mem_section`
+that contains `section_mem_map` that is, logically, a pointer to an
+array of struct pages. However, it is stored with some other magic
+that aids the sections management. The section size and maximal number
+of section is specified using `SECTION_SIZE_BITS` and
+`MAX_PHYSMEM_BITS` constants defined by each architecture that
+supports SPARSEMEM. While `MAX_PHYSMEM_BITS` is an actual width of a
+physical address that an architecture supports, the
+`SECTION_SIZE_BITS` is an arbitrary value.
+
+The maximal number of sections is denoted `NR_MEM_SECTIONS` and
+defined as
+
+.. math::
+
+ NR\_MEM\_SECTIONS = 2 ^ {(MAX\_PHYSMEM\_BITS - SECTION\_SIZE\_BITS)}
+
+The `mem_section` objects are arranged in a two-dimensional array
+called `mem_sections`. The size and placement of this array depend
+on `CONFIG_SPARSEMEM_EXTREME` and the maximal possible number of
+sections:
+
+* When `CONFIG_SPARSEMEM_EXTREME` is disabled, the `mem_sections`
+ array is static and has `NR_MEM_SECTIONS` rows. Each row holds a
+ single `mem_section` object.
+* When `CONFIG_SPARSEMEM_EXTREME` is enabled, the `mem_sections`
+ array is dynamically allocated. Each row contains PAGE_SIZE worth of
+ `mem_section` objects and the number of rows is calculated to fit
+ all the memory sections.
+
+The architecture setup code should call :c:func:`memory_present` for
+each active memory range or use :c:func:`memblocks_present` or
+:c:func:`sparse_memory_present_with_active_regions` wrappers to
+initialize the memory sections. Next, the actual memory maps should be
+set up using :c:func:`sparse_init`.
+
+With SPARSEMEM there are two possible ways to convert a PFN to the
+corresponding `struct page` - a "classic sparse" and "sparse
+vmemmap". The selection is made at build time and it is determined by
+the value of `CONFIG_SPARSEMEM_VMEMMAP`.
+
+The classic sparse encodes the section number of a page in page->flags
+and uses high bits of a PFN to access the section that maps that page
+frame. Inside a section, the PFN is the index to the array of pages.
+
+The sparse vmemmap uses a virtually mapped memory map to optimize
+pfn_to_page and page_to_pfn operations. There is a global `struct
+page *vmemmap` pointer that points to a virtually contiguous array of
+`struct page` objects. A PFN is an index to that array and the the
+offset of the `struct page` from `vmemmap` is the PFN of that
+page.
+
+To use vmemmap, an architecture has to reserve a range of virtual
+addresses that will map the physical pages containing the memory
+map and make sure that `vmemmap` points to that range. In addition,
+the architecture should implement :c:func:`vmemmap_populate` method
+that will allocate the physical memory and create page tables for the
+virtual memory map. If an architecture does not have any special
+requirements for the vmemmap mappings, it can use default
+:c:func:`vmemmap_populate_basepages` provided by the generic memory
+management.
+
+The virtually mapped memory map allows storing `struct page` objects
+for persistent memory devices in pre-allocated storage on those
+devices. This storage is represented with :c:type:`struct vmem_altmap`
+that is eventually passed to vmemmap_populate() through a long chain
+of function calls. The vmemmap_populate() implementation may use the
+`vmem_altmap` along with :c:func:`altmap_alloc_block_buf` helper to
+allocate memory map on the persistent memory device.
diff --git a/Documentation/vm/numa.rst b/Documentation/vm/numa.rst
index 185d8a568168..5cae13e9a08b 100644
--- a/Documentation/vm/numa.rst
+++ b/Documentation/vm/numa.rst
@@ -109,8 +109,8 @@ System administrators and application designers can restrict a task's migration
to improve NUMA locality using various CPU affinity command line interfaces,
such as taskset(1) and numactl(1), and program interfaces such as
sched_setaffinity(2). Further, one can modify the kernel's default local
-allocation behavior using Linux NUMA memory policy.
-[see Documentation/admin-guide/mm/numa_memory_policy.rst.]
+allocation behavior using Linux NUMA memory policy. [see
+:ref:`Documentation/admin-guide/mm/numa_memory_policy.rst <numa_memory_policy>`].
System administrators can restrict the CPUs and nodes' memories that a non-
privileged user can specify in the scheduling or NUMA commands and functions
diff --git a/Documentation/vm/transhuge.rst b/Documentation/vm/transhuge.rst
index a8cf6809e36e..37c57ca32629 100644
--- a/Documentation/vm/transhuge.rst
+++ b/Documentation/vm/transhuge.rst
@@ -4,8 +4,9 @@
Transparent Hugepage Support
============================
-This document describes design principles Transparent Hugepage (THP)
-Support and its interaction with other parts of the memory management.
+This document describes design principles for Transparent Hugepage (THP)
+support and its interaction with other parts of the memory management
+system.
Design principles
=================
@@ -37,31 +38,25 @@ get_user_pages and follow_page
get_user_pages and follow_page if run on a hugepage, will return the
head or tail pages as usual (exactly as they would do on
-hugetlbfs). Most gup users will only care about the actual physical
+hugetlbfs). Most GUP users will only care about the actual physical
address of the page and its temporary pinning to release after the I/O
is complete, so they won't ever notice the fact the page is huge. But
if any driver is going to mangle over the page structure of the tail
page (like for checking page->mapping or other bits that are relevant
for the head page and not the tail page), it should be updated to jump
-to check head page instead. Taking reference on any head/tail page would
-prevent page from being split by anyone.
+to check head page instead. Taking a reference on any head/tail page would
+prevent the page from being split by anyone.
.. note::
these aren't new constraints to the GUP API, and they match the
- same constrains that applies to hugetlbfs too, so any driver capable
+ same constraints that apply to hugetlbfs too, so any driver capable
of handling GUP on hugetlbfs will also work fine on transparent
hugepage backed mappings.
In case you can't handle compound pages if they're returned by
-follow_page, the FOLL_SPLIT bit can be specified as parameter to
+follow_page, the FOLL_SPLIT bit can be specified as a parameter to
follow_page, so that it will split the hugepages before returning
-them. Migration for example passes FOLL_SPLIT as parameter to
-follow_page because it's not hugepage aware and in fact it can't work
-at all on hugetlbfs (but it instead works fine on transparent
-hugepages thanks to FOLL_SPLIT). migration simply can't deal with
-hugepages being returned (as it's not only checking the pfn of the
-page and pinning it during the copy but it pretends to migrate the
-memory in regular page sizes and with regular pte/pmd mappings).
+them.
Graceful fallback
=================
@@ -72,11 +67,11 @@ pmd_offset. It's trivial to make the code transparent hugepage aware
by just grepping for "pmd_offset" and adding split_huge_pmd where
missing after pmd_offset returns the pmd. Thanks to the graceful
fallback design, with a one liner change, you can avoid to write
-hundred if not thousand of lines of complex code to make your code
+hundreds if not thousands of lines of complex code to make your code
hugepage aware.
If you're not walking pagetables but you run into a physical hugepage
-but you can't handle it natively in your code, you can split it by
+that you can't handle natively in your code, you can split it by
calling split_huge_page(page). This is what the Linux VM does before
it tries to swapout the hugepage for example. split_huge_page() can fail
if the page is pinned and you must handle this correctly.
@@ -103,18 +98,18 @@ split_huge_page() or split_huge_pmd() has a cost.
To make pagetable walks huge pmd aware, all you need to do is to call
pmd_trans_huge() on the pmd returned by pmd_offset. You must hold the
-mmap_sem in read (or write) mode to be sure an huge pmd cannot be
+mmap_sem in read (or write) mode to be sure a huge pmd cannot be
created from under you by khugepaged (khugepaged collapse_huge_page
takes the mmap_sem in write mode in addition to the anon_vma lock). If
pmd_trans_huge returns false, you just fallback in the old code
paths. If instead pmd_trans_huge returns true, you have to take the
page table lock (pmd_lock()) and re-run pmd_trans_huge. Taking the
-page table lock will prevent the huge pmd to be converted into a
+page table lock will prevent the huge pmd being converted into a
regular pmd from under you (split_huge_pmd can run in parallel to the
pagetable walk). If the second pmd_trans_huge returns false, you
should just drop the page table lock and fallback to the old code as
-before. Otherwise you can proceed to process the huge pmd and the
-hugepage natively. Once finished you can drop the page table lock.
+before. Otherwise, you can proceed to process the huge pmd and the
+hugepage natively. Once finished, you can drop the page table lock.
Refcounts and transparent huge pages
====================================
@@ -122,61 +117,61 @@ Refcounts and transparent huge pages
Refcounting on THP is mostly consistent with refcounting on other compound
pages:
- - get_page()/put_page() and GUP operate in head page's ->_refcount.
+ - get_page()/put_page() and GUP operate on head page's ->_refcount.
- ->_refcount in tail pages is always zero: get_page_unless_zero() never
- succeed on tail pages.
+ succeeds on tail pages.
- map/unmap of the pages with PTE entry increment/decrement ->_mapcount
on relevant sub-page of the compound page.
- - map/unmap of the whole compound page accounted in compound_mapcount
+ - map/unmap of the whole compound page is accounted for in compound_mapcount
(stored in first tail page). For file huge pages, we also increment
->_mapcount of all sub-pages in order to have race-free detection of
last unmap of subpages.
PageDoubleMap() indicates that the page is *possibly* mapped with PTEs.
-For anonymous pages PageDoubleMap() also indicates ->_mapcount in all
+For anonymous pages, PageDoubleMap() also indicates ->_mapcount in all
subpages is offset up by one. This additional reference is required to
get race-free detection of unmap of subpages when we have them mapped with
both PMDs and PTEs.
-This is optimization required to lower overhead of per-subpage mapcount
-tracking. The alternative is alter ->_mapcount in all subpages on each
+This optimization is required to lower the overhead of per-subpage mapcount
+tracking. The alternative is to alter ->_mapcount in all subpages on each
map/unmap of the whole compound page.
-For anonymous pages, we set PG_double_map when a PMD of the page got split
-for the first time, but still have PMD mapping. The additional references
-go away with last compound_mapcount.
+For anonymous pages, we set PG_double_map when a PMD of the page is split
+for the first time, but still have a PMD mapping. The additional references
+go away with the last compound_mapcount.
-File pages get PG_double_map set on first map of the page with PTE and
-goes away when the page gets evicted from page cache.
+File pages get PG_double_map set on the first map of the page with PTE and
+goes away when the page gets evicted from the page cache.
split_huge_page internally has to distribute the refcounts in the head
page to the tail pages before clearing all PG_head/tail bits from the page
structures. It can be done easily for refcounts taken by page table
-entries. But we don't have enough information on how to distribute any
+entries, but we don't have enough information on how to distribute any
additional pins (i.e. from get_user_pages). split_huge_page() fails any
-requests to split pinned huge page: it expects page count to be equal to
-sum of mapcount of all sub-pages plus one (split_huge_page caller must
-have reference for head page).
+requests to split pinned huge pages: it expects page count to be equal to
+the sum of mapcount of all sub-pages plus one (split_huge_page caller must
+have a reference to the head page).
split_huge_page uses migration entries to stabilize page->_refcount and
-page->_mapcount of anonymous pages. File pages just got unmapped.
+page->_mapcount of anonymous pages. File pages just get unmapped.
-We safe against physical memory scanners too: the only legitimate way
-scanner can get reference to a page is get_page_unless_zero().
+We are safe against physical memory scanners too: the only legitimate way
+a scanner can get a reference to a page is get_page_unless_zero().
All tail pages have zero ->_refcount until atomic_add(). This prevents the
scanner from getting a reference to the tail page up to that point. After the
-atomic_add() we don't care about the ->_refcount value. We already known how
+atomic_add() we don't care about the ->_refcount value. We already know how
many references should be uncharged from the head page.
For head page get_page_unless_zero() will succeed and we don't mind. It's
-clear where reference should go after split: it will stay on head page.
+clear where references should go after split: it will stay on the head page.
-Note that split_huge_pmd() doesn't have any limitation on refcounting:
+Note that split_huge_pmd() doesn't have any limitations on refcounting:
pmd can be split at any point and never fails.
Partial unmap and deferred_split_huge_page()
@@ -188,10 +183,10 @@ in page_remove_rmap() and queue the THP for splitting if memory pressure
comes. Splitting will free up unused subpages.
Splitting the page right away is not an option due to locking context in
-the place where we can detect partial unmap. It's also might be
+the place where we can detect partial unmap. It also might be
counterproductive since in many cases partial unmap happens during exit(2) if
a THP crosses a VMA boundary.
-Function deferred_split_huge_page() is used to queue page for splitting.
+The function deferred_split_huge_page() is used to queue a page for splitting.
The splitting itself will happen when we get memory pressure via shrinker
interface.
diff --git a/Documentation/x86/amd-memory-encryption.txt b/Documentation/x86/amd-memory-encryption.rst
index afc41f544dab..c48d452d0718 100644
--- a/Documentation/x86/amd-memory-encryption.txt
+++ b/Documentation/x86/amd-memory-encryption.rst
@@ -1,3 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================
+AMD Memory Encryption
+=====================
+
Secure Memory Encryption (SME) and Secure Encrypted Virtualization (SEV) are
features found on AMD processors.
@@ -34,7 +40,7 @@ is operating in 64-bit or 32-bit PAE mode, in all other modes the SEV hardware
forces the memory encryption bit to 1.
Support for SME and SEV can be determined through the CPUID instruction. The
-CPUID function 0x8000001f reports information related to SME:
+CPUID function 0x8000001f reports information related to SME::
0x8000001f[eax]:
Bit[0] indicates support for SME
@@ -48,14 +54,14 @@ CPUID function 0x8000001f reports information related to SME:
addresses)
If support for SME is present, MSR 0xc00100010 (MSR_K8_SYSCFG) can be used to
-determine if SME is enabled and/or to enable memory encryption:
+determine if SME is enabled and/or to enable memory encryption::
0xc0010010:
Bit[23] 0 = memory encryption features are disabled
1 = memory encryption features are enabled
If SEV is supported, MSR 0xc0010131 (MSR_AMD64_SEV) can be used to determine if
-SEV is active:
+SEV is active::
0xc0010131:
Bit[0] 0 = memory encryption is not active
@@ -68,6 +74,7 @@ requirements for the system. If this bit is not set upon Linux startup then
Linux itself will not set it and memory encryption will not be possible.
The state of SME in the Linux kernel can be documented as follows:
+
- Supported:
The CPU supports SME (determined through CPUID instruction).
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.rst
index f4c2a97bfdbd..08a2f100c0e6 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.rst
@@ -1,5 +1,8 @@
- THE LINUX/x86 BOOT PROTOCOL
- ---------------------------
+.. SPDX-License-Identifier: GPL-2.0
+
+===========================
+The Linux/x86 Boot Protocol
+===========================
On the x86 platform, the Linux kernel uses a rather complicated boot
convention. This has evolved partially due to historical aspects, as
@@ -10,84 +13,91 @@ real-mode DOS as a mainstream operating system.
Currently, the following versions of the Linux/x86 boot protocol exist.
-Old kernels: zImage/Image support only. Some very early kernels
+============= ============================================================
+Old kernels zImage/Image support only. Some very early kernels
may not even support a command line.
-Protocol 2.00: (Kernel 1.3.73) Added bzImage and initrd support, as
+Protocol 2.00 (Kernel 1.3.73) Added bzImage and initrd support, as
well as a formalized way to communicate between the
boot loader and the kernel. setup.S made relocatable,
although the traditional setup area still assumed
writable.
-Protocol 2.01: (Kernel 1.3.76) Added a heap overrun warning.
+Protocol 2.01 (Kernel 1.3.76) Added a heap overrun warning.
-Protocol 2.02: (Kernel 2.4.0-test3-pre3) New command line protocol.
+Protocol 2.02 (Kernel 2.4.0-test3-pre3) New command line protocol.
Lower the conventional memory ceiling. No overwrite
of the traditional setup area, thus making booting
safe for systems which use the EBDA from SMM or 32-bit
BIOS entry points. zImage deprecated but still
supported.
-Protocol 2.03: (Kernel 2.4.18-pre1) Explicitly makes the highest possible
+Protocol 2.03 (Kernel 2.4.18-pre1) Explicitly makes the highest possible
initrd address available to the bootloader.
-Protocol 2.04: (Kernel 2.6.14) Extend the syssize field to four bytes.
+Protocol 2.04 (Kernel 2.6.14) Extend the syssize field to four bytes.
-Protocol 2.05: (Kernel 2.6.20) Make protected mode kernel relocatable.
+Protocol 2.05 (Kernel 2.6.20) Make protected mode kernel relocatable.
Introduce relocatable_kernel and kernel_alignment fields.
-Protocol 2.06: (Kernel 2.6.22) Added a field that contains the size of
+Protocol 2.06 (Kernel 2.6.22) Added a field that contains the size of
the boot command line.
-Protocol 2.07: (Kernel 2.6.24) Added paravirtualised boot protocol.
+Protocol 2.07 (Kernel 2.6.24) Added paravirtualised boot protocol.
Introduced hardware_subarch and hardware_subarch_data
and KEEP_SEGMENTS flag in load_flags.
-Protocol 2.08: (Kernel 2.6.26) Added crc32 checksum and ELF format
+Protocol 2.08 (Kernel 2.6.26) Added crc32 checksum and ELF format
payload. Introduced payload_offset and payload_length
fields to aid in locating the payload.
-Protocol 2.09: (Kernel 2.6.26) Added a field of 64-bit physical
+Protocol 2.09 (Kernel 2.6.26) Added a field of 64-bit physical
pointer to single linked list of struct setup_data.
-Protocol 2.10: (Kernel 2.6.31) Added a protocol for relaxed alignment
+Protocol 2.10 (Kernel 2.6.31) Added a protocol for relaxed alignment
beyond the kernel_alignment added, new init_size and
pref_address fields. Added extended boot loader IDs.
-Protocol 2.11: (Kernel 3.6) Added a field for offset of EFI handover
+Protocol 2.11 (Kernel 3.6) Added a field for offset of EFI handover
protocol entry point.
-Protocol 2.12: (Kernel 3.8) Added the xloadflags field and extension fields
+Protocol 2.12 (Kernel 3.8) Added the xloadflags field and extension fields
to struct boot_params for loading bzImage and ramdisk
above 4G in 64bit.
-**** MEMORY LAYOUT
+Protocol 2.13 (Kernel 3.14) Support 32- and 64-bit flags being set in
+ xloadflags to support booting a 64-bit kernel from 32-bit
+ EFI
+============= ============================================================
-The traditional memory map for the kernel loader, used for Image or
-zImage kernels, typically looks like:
-
- | |
-0A0000 +------------------------+
- | Reserved for BIOS | Do not use. Reserved for BIOS EBDA.
-09A000 +------------------------+
- | Command line |
- | Stack/heap | For use by the kernel real-mode code.
-098000 +------------------------+
- | Kernel setup | The kernel real-mode code.
-090200 +------------------------+
- | Kernel boot sector | The kernel legacy boot sector.
-090000 +------------------------+
- | Protected-mode kernel | The bulk of the kernel image.
-010000 +------------------------+
- | Boot loader | <- Boot sector entry point 0000:7C00
-001000 +------------------------+
- | Reserved for MBR/BIOS |
-000800 +------------------------+
- | Typically used by MBR |
-000600 +------------------------+
- | BIOS use only |
-000000 +------------------------+
+Memory Layout
+=============
+
+The traditional memory map for the kernel loader, used for Image or
+zImage kernels, typically looks like::
+
+ | |
+ 0A0000 +------------------------+
+ | Reserved for BIOS | Do not use. Reserved for BIOS EBDA.
+ 09A000 +------------------------+
+ | Command line |
+ | Stack/heap | For use by the kernel real-mode code.
+ 098000 +------------------------+
+ | Kernel setup | The kernel real-mode code.
+ 090200 +------------------------+
+ | Kernel boot sector | The kernel legacy boot sector.
+ 090000 +------------------------+
+ | Protected-mode kernel | The bulk of the kernel image.
+ 010000 +------------------------+
+ | Boot loader | <- Boot sector entry point 0000:7C00
+ 001000 +------------------------+
+ | Reserved for MBR/BIOS |
+ 000800 +------------------------+
+ | Typically used by MBR |
+ 000600 +------------------------+
+ | BIOS use only |
+ 000000 +------------------------+
When using bzImage, the protected-mode kernel was relocated to
0x100000 ("high memory"), and the kernel real-mode block (boot sector,
@@ -112,36 +122,36 @@ zImage or old bzImage kernels, which need data written into the
above the 0x9A000 point; too many BIOSes will break above that point.
For a modern bzImage kernel with boot protocol version >= 2.02, a
-memory layout like the following is suggested:
-
- ~ ~
- | Protected-mode kernel |
-100000 +------------------------+
- | I/O memory hole |
-0A0000 +------------------------+
- | Reserved for BIOS | Leave as much as possible unused
- ~ ~
- | Command line | (Can also be below the X+10000 mark)
-X+10000 +------------------------+
- | Stack/heap | For use by the kernel real-mode code.
-X+08000 +------------------------+
- | Kernel setup | The kernel real-mode code.
- | Kernel boot sector | The kernel legacy boot sector.
-X +------------------------+
- | Boot loader | <- Boot sector entry point 0000:7C00
-001000 +------------------------+
- | Reserved for MBR/BIOS |
-000800 +------------------------+
- | Typically used by MBR |
-000600 +------------------------+
- | BIOS use only |
-000000 +------------------------+
-
-... where the address X is as low as the design of the boot loader
-permits.
-
-
-**** THE REAL-MODE KERNEL HEADER
+memory layout like the following is suggested::
+
+ ~ ~
+ | Protected-mode kernel |
+ 100000 +------------------------+
+ | I/O memory hole |
+ 0A0000 +------------------------+
+ | Reserved for BIOS | Leave as much as possible unused
+ ~ ~
+ | Command line | (Can also be below the X+10000 mark)
+ X+10000 +------------------------+
+ | Stack/heap | For use by the kernel real-mode code.
+ X+08000 +------------------------+
+ | Kernel setup | The kernel real-mode code.
+ | Kernel boot sector | The kernel legacy boot sector.
+ X +------------------------+
+ | Boot loader | <- Boot sector entry point 0000:7C00
+ 001000 +------------------------+
+ | Reserved for MBR/BIOS |
+ 000800 +------------------------+
+ | Typically used by MBR |
+ 000600 +------------------------+
+ | BIOS use only |
+ 000000 +------------------------+
+
+ ... where the address X is as low as the design of the boot loader permits.
+
+
+The Real-Mode Kernel Header
+===========================
In the following text, and anywhere in the kernel boot sequence, "a
sector" refers to 512 bytes. It is independent of the actual sector
@@ -155,61 +165,63 @@ sectors (1K) and then examine the bootup sector size.
The header looks like:
-Offset Proto Name Meaning
-/Size
-
-01F1/1 ALL(1 setup_sects The size of the setup in sectors
-01F2/2 ALL root_flags If set, the root is mounted readonly
-01F4/4 2.04+(2 syssize The size of the 32-bit code in 16-byte paras
-01F8/2 ALL ram_size DO NOT USE - for bootsect.S use only
-01FA/2 ALL vid_mode Video mode control
-01FC/2 ALL root_dev Default root device number
-01FE/2 ALL boot_flag 0xAA55 magic number
-0200/2 2.00+ jump Jump instruction
-0202/4 2.00+ header Magic signature "HdrS"
-0206/2 2.00+ version Boot protocol version supported
-0208/4 2.00+ realmode_swtch Boot loader hook (see below)
-020C/2 2.00+ start_sys_seg The load-low segment (0x1000) (obsolete)
-020E/2 2.00+ kernel_version Pointer to kernel version string
-0210/1 2.00+ type_of_loader Boot loader identifier
-0211/1 2.00+ loadflags Boot protocol option flags
-0212/2 2.00+ setup_move_size Move to high memory size (used with hooks)
-0214/4 2.00+ code32_start Boot loader hook (see below)
-0218/4 2.00+ ramdisk_image initrd load address (set by boot loader)
-021C/4 2.00+ ramdisk_size initrd size (set by boot loader)
-0220/4 2.00+ bootsect_kludge DO NOT USE - for bootsect.S use only
-0224/2 2.01+ heap_end_ptr Free memory after setup end
-0226/1 2.02+(3 ext_loader_ver Extended boot loader version
-0227/1 2.02+(3 ext_loader_type Extended boot loader ID
-0228/4 2.02+ cmd_line_ptr 32-bit pointer to the kernel command line
-022C/4 2.03+ initrd_addr_max Highest legal initrd address
-0230/4 2.05+ kernel_alignment Physical addr alignment required for kernel
-0234/1 2.05+ relocatable_kernel Whether kernel is relocatable or not
-0235/1 2.10+ min_alignment Minimum alignment, as a power of two
-0236/2 2.12+ xloadflags Boot protocol option flags
-0238/4 2.06+ cmdline_size Maximum size of the kernel command line
-023C/4 2.07+ hardware_subarch Hardware subarchitecture
-0240/8 2.07+ hardware_subarch_data Subarchitecture-specific data
-0248/4 2.08+ payload_offset Offset of kernel payload
-024C/4 2.08+ payload_length Length of kernel payload
-0250/8 2.09+ setup_data 64-bit physical pointer to linked list
- of struct setup_data
-0258/8 2.10+ pref_address Preferred loading address
-0260/4 2.10+ init_size Linear memory required during initialization
-0264/4 2.11+ handover_offset Offset of handover entry point
-
-(1) For backwards compatibility, if the setup_sects field contains 0, the
- real value is 4.
-
-(2) For boot protocol prior to 2.04, the upper two bytes of the syssize
- field are unusable, which means the size of a bzImage kernel
- cannot be determined.
-
-(3) Ignored, but safe to set, for boot protocols 2.02-2.09.
+=========== ======== ===================== ============================================
+Offset/Size Proto Name Meaning
+=========== ======== ===================== ============================================
+01F1/1 ALL(1) setup_sects The size of the setup in sectors
+01F2/2 ALL root_flags If set, the root is mounted readonly
+01F4/4 2.04+(2) syssize The size of the 32-bit code in 16-byte paras
+01F8/2 ALL ram_size DO NOT USE - for bootsect.S use only
+01FA/2 ALL vid_mode Video mode control
+01FC/2 ALL root_dev Default root device number
+01FE/2 ALL boot_flag 0xAA55 magic number
+0200/2 2.00+ jump Jump instruction
+0202/4 2.00+ header Magic signature "HdrS"
+0206/2 2.00+ version Boot protocol version supported
+0208/4 2.00+ realmode_swtch Boot loader hook (see below)
+020C/2 2.00+ start_sys_seg The load-low segment (0x1000) (obsolete)
+020E/2 2.00+ kernel_version Pointer to kernel version string
+0210/1 2.00+ type_of_loader Boot loader identifier
+0211/1 2.00+ loadflags Boot protocol option flags
+0212/2 2.00+ setup_move_size Move to high memory size (used with hooks)
+0214/4 2.00+ code32_start Boot loader hook (see below)
+0218/4 2.00+ ramdisk_image initrd load address (set by boot loader)
+021C/4 2.00+ ramdisk_size initrd size (set by boot loader)
+0220/4 2.00+ bootsect_kludge DO NOT USE - for bootsect.S use only
+0224/2 2.01+ heap_end_ptr Free memory after setup end
+0226/1 2.02+(3) ext_loader_ver Extended boot loader version
+0227/1 2.02+(3) ext_loader_type Extended boot loader ID
+0228/4 2.02+ cmd_line_ptr 32-bit pointer to the kernel command line
+022C/4 2.03+ initrd_addr_max Highest legal initrd address
+0230/4 2.05+ kernel_alignment Physical addr alignment required for kernel
+0234/1 2.05+ relocatable_kernel Whether kernel is relocatable or not
+0235/1 2.10+ min_alignment Minimum alignment, as a power of two
+0236/2 2.12+ xloadflags Boot protocol option flags
+0238/4 2.06+ cmdline_size Maximum size of the kernel command line
+023C/4 2.07+ hardware_subarch Hardware subarchitecture
+0240/8 2.07+ hardware_subarch_data Subarchitecture-specific data
+0248/4 2.08+ payload_offset Offset of kernel payload
+024C/4 2.08+ payload_length Length of kernel payload
+0250/8 2.09+ setup_data 64-bit physical pointer to linked list
+ of struct setup_data
+0258/8 2.10+ pref_address Preferred loading address
+0260/4 2.10+ init_size Linear memory required during initialization
+0264/4 2.11+ handover_offset Offset of handover entry point
+=========== ======== ===================== ============================================
+
+.. note::
+ (1) For backwards compatibility, if the setup_sects field contains 0, the
+ real value is 4.
+
+ (2) For boot protocol prior to 2.04, the upper two bytes of the syssize
+ field are unusable, which means the size of a bzImage kernel
+ cannot be determined.
+
+ (3) Ignored, but safe to set, for boot protocols 2.02-2.09.
If the "HdrS" (0x53726448) magic number is not found at offset 0x202,
the boot protocol version is "old". Loading an old kernel, the
-following parameters should be assumed:
+following parameters should be assumed::
Image type = zImage
initrd not supported
@@ -221,7 +233,8 @@ setting fields in the header, you must make sure only to set fields
supported by the protocol version in use.
-**** DETAILS OF HEADER FIELDS
+Details of Harder Fileds
+========================
For each field, some are information from the kernel to the bootloader
("read"), some are expected to be filled out by the bootloader
@@ -235,106 +248,132 @@ boot loaders can ignore those fields.
The byte order of all fields is littleendian (this is x86, after all.)
+============ ===========
Field name: setup_sects
Type: read
Offset/size: 0x1f1/1
Protocol: ALL
+============ ===========
The size of the setup code in 512-byte sectors. If this field is
0, the real value is 4. The real-mode code consists of the boot
sector (always one 512-byte sector) plus the setup code.
-Field name: root_flags
-Type: modify (optional)
-Offset/size: 0x1f2/2
-Protocol: ALL
+============ =================
+Field name: root_flags
+Type: modify (optional)
+Offset/size: 0x1f2/2
+Protocol: ALL
+============ =================
If this field is nonzero, the root defaults to readonly. The use of
this field is deprecated; use the "ro" or "rw" options on the
command line instead.
+============ ===============================================
Field name: syssize
Type: read
Offset/size: 0x1f4/4 (protocol 2.04+) 0x1f4/2 (protocol ALL)
Protocol: 2.04+
+============ ===============================================
The size of the protected-mode code in units of 16-byte paragraphs.
For protocol versions older than 2.04 this field is only two bytes
wide, and therefore cannot be trusted for the size of a kernel if
the LOAD_HIGH flag is set.
+============ ===============
Field name: ram_size
Type: kernel internal
Offset/size: 0x1f8/2
Protocol: ALL
+============ ===============
This field is obsolete.
+============ ===================
Field name: vid_mode
Type: modify (obligatory)
Offset/size: 0x1fa/2
+============ ===================
Please see the section on SPECIAL COMMAND LINE OPTIONS.
+============ =================
Field name: root_dev
Type: modify (optional)
Offset/size: 0x1fc/2
Protocol: ALL
+============ =================
The default root device device number. The use of this field is
deprecated, use the "root=" option on the command line instead.
+============ =========
Field name: boot_flag
Type: read
Offset/size: 0x1fe/2
Protocol: ALL
+============ =========
Contains 0xAA55. This is the closest thing old Linux kernels have
to a magic number.
+============ =======
Field name: jump
Type: read
Offset/size: 0x200/2
Protocol: 2.00+
+============ =======
Contains an x86 jump instruction, 0xEB followed by a signed offset
relative to byte 0x202. This can be used to determine the size of
the header.
+============ =======
Field name: header
Type: read
Offset/size: 0x202/4
Protocol: 2.00+
+============ =======
Contains the magic number "HdrS" (0x53726448).
+============ =======
Field name: version
Type: read
Offset/size: 0x206/2
Protocol: 2.00+
+============ =======
Contains the boot protocol version, in (major << 8)+minor format,
e.g. 0x0204 for version 2.04, and 0x0a11 for a hypothetical version
10.17.
+============ =================
Field name: realmode_swtch
Type: modify (optional)
Offset/size: 0x208/4
Protocol: 2.00+
+============ =================
Boot loader hook (see ADVANCED BOOT LOADER HOOKS below.)
+============ =============
Field name: start_sys_seg
Type: read
Offset/size: 0x20c/2
Protocol: 2.00+
+============ =============
The load low segment (0x1000). Obsolete.
+============ ==============
Field name: kernel_version
Type: read
Offset/size: 0x20e/2
Protocol: 2.00+
+============ ==============
If set to a nonzero value, contains a pointer to a NUL-terminated
human-readable kernel version number string, less 0x200. This can
@@ -344,17 +383,19 @@ Protocol: 2.00+
For example, if this value is set to 0x1c00, the kernel version
number string can be found at offset 0x1e00 in the kernel file.
This is a valid value if and only if the "setup_sects" field
- contains the value 15 or higher, as:
+ contains the value 15 or higher, as::
0x1c00 < 15*0x200 (= 0x1e00) but
0x1c00 >= 14*0x200 (= 0x1c00)
- 0x1c00 >> 9 = 14, so the minimum value for setup_secs is 15.
+ 0x1c00 >> 9 = 14, So the minimum value for setup_secs is 15.
+============ ==================
Field name: type_of_loader
Type: write (obligatory)
Offset/size: 0x210/1
Protocol: 2.00+
+============ ==================
If your boot loader has an assigned id (see table below), enter
0xTV here, where T is an identifier for the boot loader and V is
@@ -365,17 +406,20 @@ Protocol: 2.00+
Similarly, the ext_loader_ver field can be used to provide more than
four bits for the bootloader version.
- For example, for T = 0x15, V = 0x234, write:
+ For example, for T = 0x15, V = 0x234, write::
- type_of_loader <- 0xE4
- ext_loader_type <- 0x05
- ext_loader_ver <- 0x23
+ type_of_loader <- 0xE4
+ ext_loader_type <- 0x05
+ ext_loader_ver <- 0x23
Assigned boot loader ids (hexadecimal):
- 0 LILO (0x00 reserved for pre-2.00 bootloader)
+ == =======================================
+ 0 LILO
+ (0x00 reserved for pre-2.00 bootloader)
1 Loadlin
- 2 bootsect-loader (0x20, all other values reserved)
+ 2 bootsect-loader
+ (0x20, all other values reserved)
3 Syslinux
4 Etherboot/gPXE/iPXE
5 ELILO
@@ -386,55 +430,70 @@ Protocol: 2.00+
B Qemu
C Arcturus Networks uCbootloader
D kexec-tools
- E Extended (see ext_loader_type)
- F Special (0xFF = undefined)
- 10 Reserved
- 11 Minimal Linux Bootloader <http://sebastian-plotz.blogspot.de>
- 12 OVMF UEFI virtualization stack
+ E Extended (see ext_loader_type)
+ F Special (0xFF = undefined)
+ 10 Reserved
+ 11 Minimal Linux Bootloader
+ <http://sebastian-plotz.blogspot.de>
+ 12 OVMF UEFI virtualization stack
+ == =======================================
- Please contact <hpa@zytor.com> if you need a bootloader ID
- value assigned.
+ Please contact <hpa@zytor.com> if you need a bootloader ID value assigned.
+============ ===================
Field name: loadflags
Type: modify (obligatory)
Offset/size: 0x211/1
Protocol: 2.00+
+============ ===================
This field is a bitmask.
Bit 0 (read): LOADED_HIGH
+
- If 0, the protected-mode code is loaded at 0x10000.
- If 1, the protected-mode code is loaded at 0x100000.
Bit 1 (kernel internal): KASLR_FLAG
+
- Used internally by the compressed kernel to communicate
KASLR status to kernel proper.
- If 1, KASLR enabled.
- If 0, KASLR disabled.
+
+ - If 1, KASLR enabled.
+ - If 0, KASLR disabled.
Bit 5 (write): QUIET_FLAG
+
- If 0, print early messages.
- If 1, suppress early messages.
+
This requests to the kernel (decompressor and early
kernel) to not write early messages that require
accessing the display hardware directly.
Bit 6 (write): KEEP_SEGMENTS
+
Protocol: 2.07+
+
- If 0, reload the segment registers in the 32bit entry point.
- If 1, do not reload the segment registers in the 32bit entry point.
+
Assume that %cs %ds %ss %es are all set to flat segments with
a base of 0 (or the equivalent for their environment).
Bit 7 (write): CAN_USE_HEAP
+
Set this bit to 1 to indicate that the value entered in the
heap_end_ptr is valid. If this field is clear, some setup code
functionality will be disabled.
+
+============ ===================
Field name: setup_move_size
Type: modify (obligatory)
Offset/size: 0x212/2
Protocol: 2.00-2.01
+============ ===================
When using protocol 2.00 or 2.01, if the real mode kernel is not
loaded at 0x90000, it gets moved there later in the loading
@@ -443,14 +502,16 @@ Protocol: 2.00-2.01
itself.
The unit is bytes starting with the beginning of the boot sector.
-
+
This field is can be ignored when the protocol is 2.02 or higher, or
if the real-mode code is loaded at 0x90000.
+============ ========================
Field name: code32_start
Type: modify (optional, reloc)
Offset/size: 0x214/4
Protocol: 2.00+
+============ ========================
The address to jump to in protected mode. This defaults to the load
address of the kernel, and can be used by the boot loader to
@@ -458,47 +519,57 @@ Protocol: 2.00+
This field can be modified for two purposes:
- 1. as a boot loader hook (see ADVANCED BOOT LOADER HOOKS below.)
+ 1. as a boot loader hook (see Advanced Boot Loader Hooks below.)
- 2. if a bootloader which does not install a hook loads a
- relocatable kernel at a nonstandard address it will have to modify
- this field to point to the load address.
+ 2. if a bootloader which does not install a hook loads a
+ relocatable kernel at a nonstandard address it will have to modify
+ this field to point to the load address.
+============ ==================
Field name: ramdisk_image
Type: write (obligatory)
Offset/size: 0x218/4
Protocol: 2.00+
+============ ==================
The 32-bit linear address of the initial ramdisk or ramfs. Leave at
zero if there is no initial ramdisk/ramfs.
+============ ==================
Field name: ramdisk_size
Type: write (obligatory)
Offset/size: 0x21c/4
Protocol: 2.00+
+============ ==================
Size of the initial ramdisk or ramfs. Leave at zero if there is no
initial ramdisk/ramfs.
+============ ===============
Field name: bootsect_kludge
Type: kernel internal
Offset/size: 0x220/4
Protocol: 2.00+
+============ ===============
This field is obsolete.
+============ ==================
Field name: heap_end_ptr
Type: write (obligatory)
Offset/size: 0x224/2
Protocol: 2.01+
+============ ==================
Set this field to the offset (from the beginning of the real-mode
code) of the end of the setup stack/heap, minus 0x0200.
+============ ================
Field name: ext_loader_ver
Type: write (optional)
Offset/size: 0x226/1
Protocol: 2.02+
+============ ================
This field is used as an extension of the version number in the
type_of_loader field. The total version number is considered to be
@@ -510,10 +581,12 @@ Protocol: 2.02+
Kernels prior to 2.6.31 did not recognize this field, but it is safe
to write for protocol version 2.02 or higher.
+============ =====================================================
Field name: ext_loader_type
Type: write (obligatory if (type_of_loader & 0xf0) == 0xe0)
Offset/size: 0x227/1
Protocol: 2.02+
+============ =====================================================
This field is used as an extension of the type number in
type_of_loader field. If the type in type_of_loader is 0xE, then
@@ -524,10 +597,12 @@ Protocol: 2.02+
Kernels prior to 2.6.31 did not recognize this field, but it is safe
to write for protocol version 2.02 or higher.
+============ ==================
Field name: cmd_line_ptr
Type: write (obligatory)
Offset/size: 0x228/4
Protocol: 2.02+
+============ ==================
Set this field to the linear address of the kernel command line.
The kernel command line can be located anywhere between the end of
@@ -540,10 +615,12 @@ Protocol: 2.02+
zero, the kernel will assume that your boot loader does not support
the 2.02+ protocol.
+============ ===============
Field name: initrd_addr_max
Type: read
Offset/size: 0x22c/4
Protocol: 2.03+
+============ ===============
The maximum address that may be occupied by the initial
ramdisk/ramfs contents. For boot protocols 2.02 or earlier, this
@@ -552,10 +629,12 @@ Protocol: 2.03+
your ramdisk is exactly 131072 bytes long and this field is
0x37FFFFFF, you can start your ramdisk at 0x37FE0000.)
+============ ============================
Field name: kernel_alignment
Type: read/modify (reloc)
Offset/size: 0x230/4
Protocol: 2.05+ (read), 2.10+ (modify)
+============ ============================
Alignment unit required by the kernel (if relocatable_kernel is
true.) A relocatable kernel that is loaded at an alignment
@@ -567,25 +646,29 @@ Protocol: 2.05+ (read), 2.10+ (modify)
loader to modify this field to permit a lesser alignment. See the
min_alignment and pref_address field below.
+============ ==================
Field name: relocatable_kernel
Type: read (reloc)
Offset/size: 0x234/1
Protocol: 2.05+
+============ ==================
If this field is nonzero, the protected-mode part of the kernel can
be loaded at any address that satisfies the kernel_alignment field.
After loading, the boot loader must set the code32_start field to
point to the loaded code, or to a boot loader hook.
+============ =============
Field name: min_alignment
Type: read (reloc)
Offset/size: 0x235/1
Protocol: 2.10+
+============ =============
This field, if nonzero, indicates as a power of two the minimum
alignment required, as opposed to preferred, by the kernel to boot.
If a boot loader makes use of this field, it should update the
- kernel_alignment field with the alignment unit desired; typically:
+ kernel_alignment field with the alignment unit desired; typically::
kernel_alignment = 1 << min_alignment
@@ -593,44 +676,56 @@ Protocol: 2.10+
misaligned kernel. Therefore, a loader should typically try each
power-of-two alignment from kernel_alignment down to this alignment.
-Field name: xloadflags
-Type: read
-Offset/size: 0x236/2
-Protocol: 2.12+
+============ ==========
+Field name: xloadflags
+Type: read
+Offset/size: 0x236/2
+Protocol: 2.12+
+============ ==========
This field is a bitmask.
Bit 0 (read): XLF_KERNEL_64
+
- If 1, this kernel has the legacy 64-bit entry point at 0x200.
Bit 1 (read): XLF_CAN_BE_LOADED_ABOVE_4G
+
- If 1, kernel/boot_params/cmdline/ramdisk can be above 4G.
Bit 2 (read): XLF_EFI_HANDOVER_32
+
- If 1, the kernel supports the 32-bit EFI handoff entry point
given at handover_offset.
Bit 3 (read): XLF_EFI_HANDOVER_64
+
- If 1, the kernel supports the 64-bit EFI handoff entry point
given at handover_offset + 0x200.
Bit 4 (read): XLF_EFI_KEXEC
+
- If 1, the kernel supports kexec EFI boot with EFI runtime support.
+
+============ ============
Field name: cmdline_size
Type: read
Offset/size: 0x238/4
Protocol: 2.06+
+============ ============
The maximum size of the command line without the terminating
zero. This means that the command line can contain at most
cmdline_size characters. With protocol version 2.05 and earlier, the
maximum size was 255.
+============ ====================================
Field name: hardware_subarch
Type: write (optional, defaults to x86/PC)
Offset/size: 0x23c/4
Protocol: 2.07+
+============ ====================================
In a paravirtualized environment the hardware low level architectural
pieces such as interrupt handling, page table handling, and
@@ -639,25 +734,31 @@ Protocol: 2.07+
This field allows the bootloader to inform the kernel we are in one
one of those environments.
+ ========== ==============================
0x00000000 The default x86/PC environment
0x00000001 lguest
0x00000002 Xen
0x00000003 Moorestown MID
0x00000004 CE4100 TV Platform
+ ========== ==============================
+============ =========================
Field name: hardware_subarch_data
Type: write (subarch-dependent)
Offset/size: 0x240/8
Protocol: 2.07+
+============ =========================
A pointer to data that is specific to hardware subarch
This field is currently unused for the default x86/PC environment,
do not modify.
+============ ==============
Field name: payload_offset
Type: read
Offset/size: 0x248/4
Protocol: 2.08+
+============ ==============
If non-zero then this field contains the offset from the beginning
of the protected-mode code to the payload.
@@ -670,29 +771,33 @@ Protocol: 2.08+
02 21). The uncompressed payload is currently always ELF (magic
number 7F 45 4C 46).
+============ ==============
Field name: payload_length
Type: read
Offset/size: 0x24c/4
Protocol: 2.08+
+============ ==============
The length of the payload.
+============ ===============
Field name: setup_data
Type: write (special)
Offset/size: 0x250/8
Protocol: 2.09+
+============ ===============
The 64-bit physical pointer to NULL terminated single linked list of
struct setup_data. This is used to define a more extensible boot
parameters passing mechanism. The definition of struct setup_data is
- as follow:
+ as follow::
- struct setup_data {
- u64 next;
- u32 type;
- u32 len;
- u8 data[0];
- };
+ struct setup_data {
+ u64 next;
+ u32 type;
+ u32 len;
+ u8 data[0];
+ };
Where, the next is a 64-bit physical pointer to the next node of
linked list, the next field of the last node is 0; the type is used
@@ -704,10 +809,12 @@ Protocol: 2.09+
sure to consider the case where the linked list already contains
entries.
+============ ============
Field name: pref_address
Type: read (reloc)
Offset/size: 0x258/8
Protocol: 2.10+
+============ ============
This field, if nonzero, represents a preferred load address for the
kernel. A relocating bootloader should attempt to load at this
@@ -716,9 +823,11 @@ Protocol: 2.10+
A non-relocatable kernel will unconditionally move itself and to run
at this address.
+============ =======
Field name: init_size
Type: read
Offset/size: 0x260/4
+============ =======
This field indicates the amount of linear contiguous memory starting
at the kernel runtime start address that the kernel needs before it
@@ -727,16 +836,18 @@ Offset/size: 0x260/4
be used by a relocating boot loader to help select a safe load
address for the kernel.
- The kernel runtime start address is determined by the following algorithm:
+ The kernel runtime start address is determined by the following algorithm::
- if (relocatable_kernel)
+ if (relocatable_kernel)
runtime_start = align_up(load_address, kernel_alignment)
- else
+ else
runtime_start = pref_address
+============ ===============
Field name: handover_offset
Type: read
Offset/size: 0x264/4
+============ ===============
This field is the offset from the beginning of the kernel image to
the EFI handover protocol entry point. Boot loaders using the EFI
@@ -745,7 +856,8 @@ Offset/size: 0x264/4
See EFI HANDOVER PROTOCOL below for more details.
-**** THE IMAGE CHECKSUM
+The Image Checksum
+==================
From boot protocol version 2.08 onwards the CRC-32 is calculated over
the entire file using the characteristic polynomial 0x04C11DB7 and an
@@ -754,7 +866,8 @@ file; therefore the CRC of the file up to the limit specified in the
syssize field of the header is always 0.
-**** THE KERNEL COMMAND LINE
+The Kernel Command Line
+=======================
The kernel command line has become an important way for the boot
loader to communicate with the kernel. Some of its options are also
@@ -774,19 +887,20 @@ heap and 0xA0000.
If the protocol version is *not* 2.02 or higher, the kernel
command line is entered using the following protocol:
- At offset 0x0020 (word), "cmd_line_magic", enter the magic
- number 0xA33F.
+ - At offset 0x0020 (word), "cmd_line_magic", enter the magic
+ number 0xA33F.
+
+ - At offset 0x0022 (word), "cmd_line_offset", enter the offset
+ of the kernel command line (relative to the start of the
+ real-mode kernel).
- At offset 0x0022 (word), "cmd_line_offset", enter the offset
- of the kernel command line (relative to the start of the
- real-mode kernel).
-
- The kernel command line *must* be within the memory region
- covered by setup_move_size, so you may need to adjust this
- field.
+ - The kernel command line *must* be within the memory region
+ covered by setup_move_size, so you may need to adjust this
+ field.
-**** MEMORY LAYOUT OF THE REAL-MODE CODE
+Memory Layout of The Real-Mode Code
+===================================
The real-mode code requires a stack/heap to be set up, as well as
memory allocated for the kernel command line. This needs to be done
@@ -802,10 +916,11 @@ segment has to be used:
- When loading a zImage kernel ((loadflags & 0x01) == 0).
- When loading a 2.01 or earlier boot protocol kernel.
- -> For the 2.00 and 2.01 boot protocols, the real-mode code
- can be loaded at another address, but it is internally
- relocated to 0x90000. For the "old" protocol, the
- real-mode code must be loaded at 0x90000.
+.. note::
+ For the 2.00 and 2.01 boot protocols, the real-mode code
+ can be loaded at another address, but it is internally
+ relocated to 0x90000. For the "old" protocol, the
+ real-mode code must be loaded at 0x90000.
When loading at 0x90000, avoid using memory above 0x9a000.
@@ -818,24 +933,29 @@ The kernel command line should not be located below the real-mode
code, nor should it be located in high memory.
-**** SAMPLE BOOT CONFIGURATION
+Sample Boot Configuartion
+=========================
As a sample configuration, assume the following layout of the real
-mode segment:
+mode segment.
When loading below 0x90000, use the entire segment:
+ ============= ===================
0x0000-0x7fff Real mode kernel
0x8000-0xdfff Stack and heap
0xe000-0xffff Kernel command line
+ ============= ===================
When loading at 0x90000 OR the protocol version is 2.01 or earlier:
+ ============= ===================
0x0000-0x7fff Real mode kernel
0x8000-0x97ff Stack and heap
0x9800-0x9fff Kernel command line
+ ============= ===================
-Such a boot loader should enter the following fields in the header:
+Such a boot loader should enter the following fields in the header::
unsigned long base_ptr; /* base address for real-mode segment */
@@ -894,7 +1014,8 @@ Such a boot loader should enter the following fields in the header:
}
-**** LOADING THE REST OF THE KERNEL
+Loading The Rest of The Kernel
+==============================
The 32-bit (non-real-mode) kernel starts at offset (setup_sects+1)*512
in the kernel file (again, if setup_sects == 0 the real value is 4.)
@@ -902,7 +1023,7 @@ It should be loaded at address 0x10000 for Image/zImage kernels and
0x100000 for bzImage kernels.
The kernel is a bzImage kernel if the protocol >= 2.00 and the 0x01
-bit (LOAD_HIGH) in the loadflags field is set:
+bit (LOAD_HIGH) in the loadflags field is set::
is_bzImage = (protocol >= 0x0200) && (loadflags & 0x01);
load_address = is_bzImage ? 0x100000 : 0x10000;
@@ -912,8 +1033,8 @@ the entire 0x10000-0x90000 range of memory. This means it is pretty
much a requirement for these kernels to load the real-mode part at
0x90000. bzImage kernels allow much more flexibility.
-
-**** SPECIAL COMMAND LINE OPTIONS
+Special Command Line Options
+============================
If the command line provided by the boot loader is entered by the
user, the user may expect the following command line options to work.
@@ -962,7 +1083,8 @@ or configuration-specified command line. Otherwise, "init=/bin/sh"
gets confused by the "auto" option.
-**** RUNNING THE KERNEL
+Running the Kernel
+==================
The kernel is started by jumping to the kernel entry point, which is
located at *segment* offset 0x20 from the start of the real mode
@@ -976,7 +1098,7 @@ interrupts should be disabled. Furthermore, to guard against bugs in
the kernel, it is recommended that the boot loader sets fs = gs = ds =
es = ss.
-In our example from above, we would do:
+In our example from above, we would do::
/* Note: in the case of the "old" kernel protocol, base_ptr must
be == 0x90000 at this point; see the previous sample code */
@@ -999,7 +1121,8 @@ switched off, especially if the loaded kernel has the floppy driver as
a demand-loaded module!
-**** ADVANCED BOOT LOADER HOOKS
+Advanced Boot Loader Hooks
+==========================
If the boot loader runs in a particularly hostile environment (such as
LOADLIN, which runs under DOS) it may be impossible to follow the
@@ -1028,7 +1151,8 @@ IMPORTANT: All the hooks are required to preserve %esp, %ebp, %esi and
(relocated, if appropriate.)
-**** 32-bit BOOT PROTOCOL
+32-bit Boot Protocol
+====================
For machine with some new BIOS other than legacy BIOS, such as EFI,
LinuxBIOS, etc, and kexec, the 16-bit real mode setup code in kernel
@@ -1041,7 +1165,7 @@ traditionally known as "zero page"). The memory for struct boot_params
should be allocated and initialized to all zero. Then the setup header
from offset 0x01f1 of kernel image on should be loaded into struct
boot_params and examined. The end of setup header can be calculated as
-follow:
+follow::
0x0202 + byte value at offset 0x0201
@@ -1065,7 +1189,8 @@ must have read/write permission; CS must be __BOOT_CS and DS, ES, SS
must be __BOOT_DS; interrupt must be disabled; %esi must hold the base
address of the struct boot_params; %ebp, %edi and %ebx must be zero.
-**** 64-bit BOOT PROTOCOL
+64-bit Boot Protocol
+====================
For machine with 64bit cpus and 64bit kernel, we could use 64bit bootloader
and we need a 64-bit boot protocol.
@@ -1076,7 +1201,7 @@ traditionally known as "zero page"). The memory for struct boot_params
could be allocated anywhere (even above 4G) and initialized to all zero.
Then, the setup header at offset 0x01f1 of kernel image on should be
loaded into struct boot_params and examined. The end of setup header
-can be calculated as follows:
+can be calculated as follows::
0x0202 + byte value at offset 0x0201
@@ -1103,7 +1228,8 @@ must have read/write permission; CS must be __BOOT_CS and DS, ES, SS
must be __BOOT_DS; interrupt must be disabled; %rsi must hold the base
address of the struct boot_params.
-**** EFI HANDOVER PROTOCOL
+EFI Handover Protocol
+=====================
This protocol allows boot loaders to defer initialisation to the EFI
boot stub. The boot loader is required to load the kernel/initrd(s)
@@ -1111,7 +1237,7 @@ from the boot media and jump to the EFI handover protocol entry point
which is hdr->handover_offset bytes from the beginning of
startup_{32,64}.
-The function prototype for the handover entry point looks like this,
+The function prototype for the handover entry point looks like this::
efi_main(void *handle, efi_system_table_t *table, struct boot_params *bp)
@@ -1120,11 +1246,11 @@ firmware, 'table' is the EFI system table - these are the first two
arguments of the "handoff state" as described in section 2.3 of the
UEFI specification. 'bp' is the boot loader-allocated boot params.
-The boot loader *must* fill out the following fields in bp,
+The boot loader *must* fill out the following fields in bp::
- o hdr.code32_start
- o hdr.cmd_line_ptr
- o hdr.ramdisk_image (if applicable)
- o hdr.ramdisk_size (if applicable)
+ - hdr.code32_start
+ - hdr.cmd_line_ptr
+ - hdr.ramdisk_image (if applicable)
+ - hdr.ramdisk_size (if applicable)
All other fields should be zero.
diff --git a/Documentation/x86/conf.py b/Documentation/x86/conf.py
new file mode 100644
index 000000000000..33c5c3142e20
--- /dev/null
+++ b/Documentation/x86/conf.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8; mode: python -*-
+
+project = "X86 architecture specific documentation"
+
+tags.add("subproject")
+
+latex_documents = [
+ ('index', 'x86.tex', project,
+ 'The kernel development community', 'manual'),
+]
diff --git a/Documentation/x86/earlyprintk.txt b/Documentation/x86/earlyprintk.rst
index 46933e06c972..11307378acf0 100644
--- a/Documentation/x86/earlyprintk.txt
+++ b/Documentation/x86/earlyprintk.rst
@@ -1,52 +1,58 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============
+Early Printk
+============
Mini-HOWTO for using the earlyprintk=dbgp boot option with a
USB2 Debug port key and a debug cable, on x86 systems.
You need two computers, the 'USB debug key' special gadget and
-and two USB cables, connected like this:
+and two USB cables, connected like this::
[host/target] <-------> [USB debug key] <-------> [client/console]
-1. There are a number of specific hardware requirements:
-
- a.) Host/target system needs to have USB debug port capability.
-
- You can check this capability by looking at a 'Debug port' bit in
- the lspci -vvv output:
-
- # lspci -vvv
- ...
- 00:1d.7 USB Controller: Intel Corporation 82801H (ICH8 Family) USB2 EHCI Controller #1 (rev 03) (prog-if 20 [EHCI])
- Subsystem: Lenovo ThinkPad T61
- Control: I/O- Mem+ BusMaster+ SpecCycle- MemWINV- VGASnoop- ParErr- Stepping- SERR+ FastB2B- DisINTx-
- Status: Cap+ 66MHz- UDF- FastB2B+ ParErr- DEVSEL=medium >TAbort- <TAbort- <MAbort- >SERR- <PERR- INTx-
- Latency: 0
- Interrupt: pin D routed to IRQ 19
- Region 0: Memory at fe227000 (32-bit, non-prefetchable) [size=1K]
- Capabilities: [50] Power Management version 2
- Flags: PMEClk- DSI- D1- D2- AuxCurrent=375mA PME(D0+,D1-,D2-,D3hot+,D3cold+)
- Status: D0 PME-Enable- DSel=0 DScale=0 PME+
- Capabilities: [58] Debug port: BAR=1 offset=00a0
+Hardware requirements
+=====================
+
+ a) Host/target system needs to have USB debug port capability.
+
+ You can check this capability by looking at a 'Debug port' bit in
+ the lspci -vvv output::
+
+ # lspci -vvv
+ ...
+ 00:1d.7 USB Controller: Intel Corporation 82801H (ICH8 Family) USB2 EHCI Controller #1 (rev 03) (prog-if 20 [EHCI])
+ Subsystem: Lenovo ThinkPad T61
+ Control: I/O- Mem+ BusMaster+ SpecCycle- MemWINV- VGASnoop- ParErr- Stepping- SERR+ FastB2B- DisINTx-
+ Status: Cap+ 66MHz- UDF- FastB2B+ ParErr- DEVSEL=medium >TAbort- <TAbort- <MAbort- >SERR- <PERR- INTx-
+ Latency: 0
+ Interrupt: pin D routed to IRQ 19
+ Region 0: Memory at fe227000 (32-bit, non-prefetchable) [size=1K]
+ Capabilities: [50] Power Management version 2
+ Flags: PMEClk- DSI- D1- D2- AuxCurrent=375mA PME(D0+,D1-,D2-,D3hot+,D3cold+)
+ Status: D0 PME-Enable- DSel=0 DScale=0 PME+
+ Capabilities: [58] Debug port: BAR=1 offset=00a0
^^^^^^^^^^^ <==================== [ HERE ]
- Kernel driver in use: ehci_hcd
- Kernel modules: ehci-hcd
- ...
+ Kernel driver in use: ehci_hcd
+ Kernel modules: ehci-hcd
+ ...
-( If your system does not list a debug port capability then you probably
- won't be able to use the USB debug key. )
+ .. note::
+ If your system does not list a debug port capability then you probably
+ won't be able to use the USB debug key.
- b.) You also need a NetChip USB debug cable/key:
+ b) You also need a NetChip USB debug cable/key:
http://www.plxtech.com/products/NET2000/NET20DC/default.asp
This is a small blue plastic connector with two USB connections;
it draws power from its USB connections.
- c.) You need a second client/console system with a high speed USB 2.0
- port.
+ c) You need a second client/console system with a high speed USB 2.0 port.
- d.) The NetChip device must be plugged directly into the physical
- debug port on the "host/target" system. You cannot use a USB hub in
+ d) The NetChip device must be plugged directly into the physical
+ debug port on the "host/target" system. You cannot use a USB hub in
between the physical debug port and the "host/target" system.
The EHCI debug controller is bound to a specific physical USB
@@ -65,29 +71,31 @@ and two USB cables, connected like this:
to the hardware vendor, because there is no reason not to wire
this port into one of the physically accessible ports.
- e.) It is also important to note, that many versions of the NetChip
+ e) It is also important to note, that many versions of the NetChip
device require the "client/console" system to be plugged into the
right hand side of the device (with the product logo facing up and
readable left to right). The reason being is that the 5 volt
power supply is taken from only one side of the device and it
must be the side that does not get rebooted.
-2. Software requirements:
+Software requirements
+=====================
- a.) On the host/target system:
+ a) On the host/target system:
- You need to enable the following kernel config option:
+ You need to enable the following kernel config option::
CONFIG_EARLY_PRINTK_DBGP=y
And you need to add the boot command line: "earlyprintk=dbgp".
- (If you are using Grub, append it to the 'kernel' line in
- /etc/grub.conf. If you are using Grub2 on a BIOS firmware system,
- append it to the 'linux' line in /boot/grub2/grub.cfg. If you are
- using Grub2 on an EFI firmware system, append it to the 'linux'
- or 'linuxefi' line in /boot/grub2/grub.cfg or
- /boot/efi/EFI/<distro>/grub.cfg.)
+ .. note::
+ If you are using Grub, append it to the 'kernel' line in
+ /etc/grub.conf. If you are using Grub2 on a BIOS firmware system,
+ append it to the 'linux' line in /boot/grub2/grub.cfg. If you are
+ using Grub2 on an EFI firmware system, append it to the 'linux'
+ or 'linuxefi' line in /boot/grub2/grub.cfg or
+ /boot/efi/EFI/<distro>/grub.cfg.
On systems with more than one EHCI debug controller you must
specify the correct EHCI debug controller number. The ordering
@@ -96,14 +104,15 @@ and two USB cables, connected like this:
controller. To use the second EHCI debug controller, you would
use the command line: "earlyprintk=dbgp1"
- NOTE: normally earlyprintk console gets turned off once the
- regular console is alive - use "earlyprintk=dbgp,keep" to keep
- this channel open beyond early bootup. This can be useful for
- debugging crashes under Xorg, etc.
+ .. note::
+ normally earlyprintk console gets turned off once the
+ regular console is alive - use "earlyprintk=dbgp,keep" to keep
+ this channel open beyond early bootup. This can be useful for
+ debugging crashes under Xorg, etc.
- b.) On the client/console system:
+ b) On the client/console system:
- You should enable the following kernel config option:
+ You should enable the following kernel config option::
CONFIG_USB_SERIAL_DEBUG=y
@@ -115,27 +124,28 @@ and two USB cables, connected like this:
it up to use /dev/ttyUSB0 - or use a raw 'cat /dev/ttyUSBx' to
see the raw output.
- c.) On Nvidia Southbridge based systems: the kernel will try to probe
+ c) On Nvidia Southbridge based systems: the kernel will try to probe
and find out which port has a debug device connected.
-3. Testing that it works fine:
+Testing
+=======
- You can test the output by using earlyprintk=dbgp,keep and provoking
- kernel messages on the host/target system. You can provoke a harmless
- kernel message by for example doing:
+You can test the output by using earlyprintk=dbgp,keep and provoking
+kernel messages on the host/target system. You can provoke a harmless
+kernel message by for example doing::
echo h > /proc/sysrq-trigger
- On the host/target system you should see this help line in "dmesg" output:
+On the host/target system you should see this help line in "dmesg" output::
SysRq : HELP : loglevel(0-9) reBoot Crashdump terminate-all-tasks(E) memory-full-oom-kill(F) kill-all-tasks(I) saK show-backtrace-all-active-cpus(L) show-memory-usage(M) nice-all-RT-tasks(N) powerOff show-registers(P) show-all-timers(Q) unRaw Sync show-task-states(T) Unmount show-blocked-tasks(W) dump-ftrace-buffer(Z)
- On the client/console system do:
+On the client/console system do::
cat /dev/ttyUSB0
- And you should see the help line above displayed shortly after you've
- provoked it on the host system.
+And you should see the help line above displayed shortly after you've
+provoked it on the host system.
If it does not work then please ask about it on the linux-kernel@vger.kernel.org
mailing list or contact the x86 maintainers.
diff --git a/Documentation/x86/entry_64.txt b/Documentation/x86/entry_64.rst
index c1df8eba9dfd..a48b3f6ebbe8 100644
--- a/Documentation/x86/entry_64.txt
+++ b/Documentation/x86/entry_64.rst
@@ -1,3 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============
+Kernel Entries
+==============
+
This file documents some of the kernel entries in
arch/x86/entry/entry_64.S. A lot of this explanation is adapted from
an email from Ingo Molnar:
@@ -59,7 +65,7 @@ Now, there's a secondary complication: there's a cheap way to test
which mode the CPU is in and an expensive way.
The cheap way is to pick this info off the entry frame on the kernel
-stack, from the CS of the ptregs area of the kernel stack:
+stack, from the CS of the ptregs area of the kernel stack::
xorl %ebx,%ebx
testl $3,CS+8(%rsp)
@@ -67,7 +73,7 @@ stack, from the CS of the ptregs area of the kernel stack:
SWAPGS
The expensive (paranoid) way is to read back the MSR_GS_BASE value
-(which is what SWAPGS modifies):
+(which is what SWAPGS modifies)::
movl $1,%ebx
movl $MSR_GS_BASE,%ecx
@@ -76,7 +82,7 @@ The expensive (paranoid) way is to read back the MSR_GS_BASE value
js 1f /* negative -> in kernel */
SWAPGS
xorl %ebx,%ebx
-1: ret
+ 1: ret
If we are at an interrupt or user-trap/gate-alike boundary then we can
use the faster check: the stack will be a reliable indicator of
diff --git a/Documentation/x86/exception-tables.txt b/Documentation/x86/exception-tables.rst
index e396bcd8d830..24596c8210b5 100644
--- a/Documentation/x86/exception-tables.txt
+++ b/Documentation/x86/exception-tables.rst
@@ -1,5 +1,10 @@
- Kernel level exception handling in Linux
- Commentary by Joerg Pommnitz <joerg@raleigh.ibm.com>
+.. SPDX-License-Identifier: GPL-2.0
+
+===============================
+Kernel level exception handling
+===============================
+
+Commentary by Joerg Pommnitz <joerg@raleigh.ibm.com>
When a process runs in kernel mode, it often has to access user
mode memory whose address has been passed by an untrusted program.
@@ -25,9 +30,9 @@ How does this work?
Whenever the kernel tries to access an address that is currently not
accessible, the CPU generates a page fault exception and calls the
-page fault handler
+page fault handler::
-void do_page_fault(struct pt_regs *regs, unsigned long error_code)
+ void do_page_fault(struct pt_regs *regs, unsigned long error_code)
in arch/x86/mm/fault.c. The parameters on the stack are set up by
the low level assembly glue in arch/x86/kernel/entry_32.S. The parameter
@@ -57,73 +62,74 @@ as an example. The definition is somewhat hard to follow, so let's peek at
the code generated by the preprocessor and the compiler. I selected
the get_user call in drivers/char/sysrq.c for a detailed examination.
-The original code in sysrq.c line 587:
+The original code in sysrq.c line 587::
+
get_user(c, buf);
-The preprocessor output (edited to become somewhat readable):
-
-(
- {
- long __gu_err = - 14 , __gu_val = 0;
- const __typeof__(*( ( buf ) )) *__gu_addr = ((buf));
- if (((((0 + current_set[0])->tss.segment) == 0x18 ) ||
- (((sizeof(*(buf))) <= 0xC0000000UL) &&
- ((unsigned long)(__gu_addr ) <= 0xC0000000UL - (sizeof(*(buf)))))))
- do {
- __gu_err = 0;
- switch ((sizeof(*(buf)))) {
- case 1:
- __asm__ __volatile__(
- "1: mov" "b" " %2,%" "b" "1\n"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- "3: movl %3,%0\n"
- " xor" "b" " %" "b" "1,%" "b" "1\n"
- " jmp 2b\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,3b\n"
- ".text" : "=r"(__gu_err), "=q" (__gu_val): "m"((*(struct __large_struct *)
- ( __gu_addr )) ), "i"(- 14 ), "0"( __gu_err )) ;
- break;
- case 2:
- __asm__ __volatile__(
- "1: mov" "w" " %2,%" "w" "1\n"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- "3: movl %3,%0\n"
- " xor" "w" " %" "w" "1,%" "w" "1\n"
- " jmp 2b\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b,3b\n"
- ".text" : "=r"(__gu_err), "=r" (__gu_val) : "m"((*(struct __large_struct *)
- ( __gu_addr )) ), "i"(- 14 ), "0"( __gu_err ));
- break;
- case 4:
- __asm__ __volatile__(
- "1: mov" "l" " %2,%" "" "1\n"
- "2:\n"
- ".section .fixup,\"ax\"\n"
- "3: movl %3,%0\n"
- " xor" "l" " %" "" "1,%" "" "1\n"
- " jmp 2b\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n" " .long 1b,3b\n"
- ".text" : "=r"(__gu_err), "=r" (__gu_val) : "m"((*(struct __large_struct *)
- ( __gu_addr )) ), "i"(- 14 ), "0"(__gu_err));
- break;
- default:
- (__gu_val) = __get_user_bad();
- }
- } while (0) ;
- ((c)) = (__typeof__(*((buf))))__gu_val;
- __gu_err;
- }
-);
+The preprocessor output (edited to become somewhat readable)::
+
+ (
+ {
+ long __gu_err = - 14 , __gu_val = 0;
+ const __typeof__(*( ( buf ) )) *__gu_addr = ((buf));
+ if (((((0 + current_set[0])->tss.segment) == 0x18 ) ||
+ (((sizeof(*(buf))) <= 0xC0000000UL) &&
+ ((unsigned long)(__gu_addr ) <= 0xC0000000UL - (sizeof(*(buf)))))))
+ do {
+ __gu_err = 0;
+ switch ((sizeof(*(buf)))) {
+ case 1:
+ __asm__ __volatile__(
+ "1: mov" "b" " %2,%" "b" "1\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: movl %3,%0\n"
+ " xor" "b" " %" "b" "1,%" "b" "1\n"
+ " jmp 2b\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b,3b\n"
+ ".text" : "=r"(__gu_err), "=q" (__gu_val): "m"((*(struct __large_struct *)
+ ( __gu_addr )) ), "i"(- 14 ), "0"( __gu_err )) ;
+ break;
+ case 2:
+ __asm__ __volatile__(
+ "1: mov" "w" " %2,%" "w" "1\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: movl %3,%0\n"
+ " xor" "w" " %" "w" "1,%" "w" "1\n"
+ " jmp 2b\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b,3b\n"
+ ".text" : "=r"(__gu_err), "=r" (__gu_val) : "m"((*(struct __large_struct *)
+ ( __gu_addr )) ), "i"(- 14 ), "0"( __gu_err ));
+ break;
+ case 4:
+ __asm__ __volatile__(
+ "1: mov" "l" " %2,%" "" "1\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: movl %3,%0\n"
+ " xor" "l" " %" "" "1,%" "" "1\n"
+ " jmp 2b\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n" " .long 1b,3b\n"
+ ".text" : "=r"(__gu_err), "=r" (__gu_val) : "m"((*(struct __large_struct *)
+ ( __gu_addr )) ), "i"(- 14 ), "0"(__gu_err));
+ break;
+ default:
+ (__gu_val) = __get_user_bad();
+ }
+ } while (0) ;
+ ((c)) = (__typeof__(*((buf))))__gu_val;
+ __gu_err;
+ }
+ );
WOW! Black GCC/assembly magic. This is impossible to follow, so let's
-see what code gcc generates:
+see what code gcc generates::
> xorl %edx,%edx
> movl current_set,%eax
@@ -154,7 +160,7 @@ understand. Can we? The actual user access is quite obvious. Thanks
to the unified address space we can just access the address in user
memory. But what does the .section stuff do?????
-To understand this we have to look at the final kernel:
+To understand this we have to look at the final kernel::
> objdump --section-headers vmlinux
>
@@ -181,7 +187,7 @@ To understand this we have to look at the final kernel:
There are obviously 2 non standard ELF sections in the generated object
file. But first we want to find out what happened to our code in the
-final kernel executable:
+final kernel executable::
> objdump --disassemble --section=.text vmlinux
>
@@ -199,7 +205,7 @@ final kernel executable:
The whole user memory access is reduced to 10 x86 machine instructions.
The instructions bracketed in the .section directives are no longer
in the normal execution path. They are located in a different section
-of the executable file:
+of the executable file::
> objdump --disassemble --section=.fixup vmlinux
>
@@ -207,14 +213,15 @@ of the executable file:
> c0199ffa <.fixup+10ba> xorb %dl,%dl
> c0199ffc <.fixup+10bc> jmp c017e7a7 <do_con_write+e3>
-And finally:
+And finally::
+
> objdump --full-contents --section=__ex_table vmlinux
>
> c01aa7c4 93c017c0 e09f19c0 97c017c0 99c017c0 ................
> c01aa7d4 f6c217c0 e99f19c0 a5e717c0 f59f19c0 ................
> c01aa7e4 080a18c0 01a019c0 0a0a18c0 04a019c0 ................
-or in human readable byte order:
+or in human readable byte order::
> c01aa7c4 c017c093 c0199fe0 c017c097 c017c099 ................
> c01aa7d4 c017c2f6 c0199fe9 c017e7a5 c0199ff5 ................
@@ -222,18 +229,22 @@ or in human readable byte order:
this is the interesting part!
> c01aa7e4 c0180a08 c019a001 c0180a0a c019a004 ................
-What happened? The assembly directives
+What happened? The assembly directives::
-.section .fixup,"ax"
-.section __ex_table,"a"
+ .section .fixup,"ax"
+ .section __ex_table,"a"
told the assembler to move the following code to the specified
-sections in the ELF object file. So the instructions
-3: movl $-14,%eax
- xorb %dl,%dl
- jmp 2b
-ended up in the .fixup section of the object file and the addresses
+sections in the ELF object file. So the instructions::
+
+ 3: movl $-14,%eax
+ xorb %dl,%dl
+ jmp 2b
+
+ended up in the .fixup section of the object file and the addresses::
+
.long 1b,3b
+
ended up in the __ex_table section of the object file. 1b and 3b
are local labels. The local label 1b (1b stands for next label 1
backward) is the address of the instruction that might fault, i.e.
@@ -246,35 +257,39 @@ the fault, in our case the actual value is c0199ff5:
the original assembly code: > 3: movl $-14,%eax
and linked in vmlinux : > c0199ff5 <.fixup+10b5> movl $0xfffffff2,%eax
-The assembly code
+The assembly code::
+
> .section __ex_table,"a"
> .align 4
> .long 1b,3b
-becomes the value pair
+becomes the value pair::
+
> c01aa7d4 c017c2f6 c0199fe9 c017e7a5 c0199ff5 ................
^this is ^this is
1b 3b
+
c017e7a5,c0199ff5 in the exception table of the kernel.
So, what actually happens if a fault from kernel mode with no suitable
vma occurs?
-1.) access to invalid address:
- > c017e7a5 <do_con_write+e1> movb (%ebx),%dl
-2.) MMU generates exception
-3.) CPU calls do_page_fault
-4.) do page fault calls search_exception_table (regs->eip == c017e7a5);
-5.) search_exception_table looks up the address c017e7a5 in the
- exception table (i.e. the contents of the ELF section __ex_table)
- and returns the address of the associated fault handle code c0199ff5.
-6.) do_page_fault modifies its own return address to point to the fault
- handle code and returns.
-7.) execution continues in the fault handling code.
-8.) 8a) EAX becomes -EFAULT (== -14)
- 8b) DL becomes zero (the value we "read" from user space)
- 8c) execution continues at local label 2 (address of the
- instruction immediately after the faulting user access).
+#. access to invalid address::
+
+ > c017e7a5 <do_con_write+e1> movb (%ebx),%dl
+#. MMU generates exception
+#. CPU calls do_page_fault
+#. do page fault calls search_exception_table (regs->eip == c017e7a5);
+#. search_exception_table looks up the address c017e7a5 in the
+ exception table (i.e. the contents of the ELF section __ex_table)
+ and returns the address of the associated fault handle code c0199ff5.
+#. do_page_fault modifies its own return address to point to the fault
+ handle code and returns.
+#. execution continues in the fault handling code.
+#. a) EAX becomes -EFAULT (== -14)
+ b) DL becomes zero (the value we "read" from user space)
+ c) execution continues at local label 2 (address of the
+ instruction immediately after the faulting user access).
The steps 8a to 8c in a certain way emulate the faulting instruction.
@@ -295,14 +310,15 @@ Things changed when 64-bit support was added to x86 Linux. Rather than
double the size of the exception table by expanding the two entries
from 32-bits to 64 bits, a clever trick was used to store addresses
as relative offsets from the table itself. The assembly code changed
-from:
- .long 1b,3b
-to:
- .long (from) - .
- .long (to) - .
+from::
+
+ .long 1b,3b
+ to:
+ .long (from) - .
+ .long (to) - .
and the C-code that uses these values converts back to absolute addresses
-like this:
+like this::
ex_insn_addr(const struct exception_table_entry *x)
{
@@ -313,15 +329,18 @@ In v4.6 the exception table entry was expanded with a new field "handler".
This is also 32-bits wide and contains a third relative function
pointer which points to one of:
-1) int ex_handler_default(const struct exception_table_entry *fixup)
- This is legacy case that just jumps to the fixup code
-2) int ex_handler_fault(const struct exception_table_entry *fixup)
- This case provides the fault number of the trap that occurred at
- entry->insn. It is used to distinguish page faults from machine
- check.
-3) int ex_handler_ext(const struct exception_table_entry *fixup)
- This case is used for uaccess_err ... we need to set a flag
- in the task structure. Before the handler functions existed this
- case was handled by adding a large offset to the fixup to tag
- it as special.
+1) ``int ex_handler_default(const struct exception_table_entry *fixup)``
+ This is legacy case that just jumps to the fixup code
+
+2) ``int ex_handler_fault(const struct exception_table_entry *fixup)``
+ This case provides the fault number of the trap that occurred at
+ entry->insn. It is used to distinguish page faults from machine
+ check.
+
+3) ``int ex_handler_ext(const struct exception_table_entry *fixup)``
+ This case is used for uaccess_err ... we need to set a flag
+ in the task structure. Before the handler functions existed this
+ case was handled by adding a large offset to the fixup to tag
+ it as special.
+
More functions can easily be added.
diff --git a/Documentation/x86/i386/IO-APIC.txt b/Documentation/x86/i386/IO-APIC.rst
index 15f5baf7e1b6..ce4d8df15e7c 100644
--- a/Documentation/x86/i386/IO-APIC.txt
+++ b/Documentation/x86/i386/IO-APIC.rst
@@ -1,3 +1,11 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======
+IO-APIC
+=======
+
+:Author: Ingo Molnar <mingo@kernel.org>
+
Most (all) Intel-MP compliant SMP boards have the so-called 'IO-APIC',
which is an enhanced interrupt controller. It enables us to route
hardware interrupts to multiple CPUs, or to CPU groups. Without an
@@ -13,9 +21,8 @@ usually worked around by the kernel. If your MP-compliant SMP board does
not boot Linux, then consult the linux-smp mailing list archives first.
If your box boots fine with enabled IO-APIC IRQs, then your
-/proc/interrupts will look like this one:
+/proc/interrupts will look like this one::
- ---------------------------->
hell:~> cat /proc/interrupts
CPU0
0: 1360293 IO-APIC-edge timer
@@ -28,7 +35,6 @@ If your box boots fine with enabled IO-APIC IRQs, then your
NMI: 0
ERR: 0
hell:~>
- <----------------------------
Some interrupts are still listed as 'XT PIC', but this is not a problem;
none of those IRQ sources is performance-critical.
@@ -37,14 +43,14 @@ none of those IRQ sources is performance-critical.
In the unlikely case that your board does not create a working mp-table,
you can use the pirq= boot parameter to 'hand-construct' IRQ entries. This
is non-trivial though and cannot be automated. One sample /etc/lilo.conf
-entry:
+entry::
append="pirq=15,11,10"
The actual numbers depend on your system, on your PCI cards and on their
PCI slot position. Usually PCI slots are 'daisy chained' before they are
connected to the PCI chipset IRQ routing facility (the incoming PIRQ1-4
-lines):
+lines)::
,-. ,-. ,-. ,-. ,-.
PIRQ4 ----| |-. ,-| |-. ,-| |-. ,-| |--------| |
@@ -56,7 +62,7 @@ lines):
PIRQ1 ----| |- `----| |- `----| |- `----| |--------| |
`-' `-' `-' `-' `-'
-Every PCI card emits a PCI IRQ, which can be INTA, INTB, INTC or INTD:
+Every PCI card emits a PCI IRQ, which can be INTA, INTB, INTC or INTD::
,-.
INTD--| |
@@ -78,19 +84,19 @@ to have non shared interrupts). Slot5 should be used for videocards, they
do not use interrupts normally, thus they are not daisy chained either.
so if you have your SCSI card (IRQ11) in Slot1, Tulip card (IRQ9) in
-Slot2, then you'll have to specify this pirq= line:
+Slot2, then you'll have to specify this pirq= line::
append="pirq=11,9"
the following script tries to figure out such a default pirq= line from
-your PCI configuration:
+your PCI configuration::
echo -n pirq=; echo `scanpci | grep T_L | cut -c56-` | sed 's/ /,/g'
note that this script won't work if you have skipped a few slots or if your
board does not do default daisy-chaining. (or the IO-APIC has the PIRQ pins
connected in some strange way). E.g. if in the above case you have your SCSI
-card (IRQ11) in Slot3, and have Slot1 empty:
+card (IRQ11) in Slot3, and have Slot1 empty::
append="pirq=0,9,11"
@@ -105,7 +111,7 @@ won't function properly (e.g. if it's inserted as a module).
If you have 2 PCI buses, then you can use up to 8 pirq values, although such
boards tend to have a good configuration.
-Be prepared that it might happen that you need some strange pirq line:
+Be prepared that it might happen that you need some strange pirq line::
append="pirq=0,0,0,0,0,0,9,11"
@@ -115,5 +121,3 @@ Good luck and mail to linux-smp@vger.kernel.org or
linux-kernel@vger.kernel.org if you have any problems that are not covered
by this document.
--- mingo
-
diff --git a/Documentation/x86/i386/index.rst b/Documentation/x86/i386/index.rst
new file mode 100644
index 000000000000..8747cf5bbd49
--- /dev/null
+++ b/Documentation/x86/i386/index.rst
@@ -0,0 +1,10 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============
+i386 Support
+============
+
+.. toctree::
+ :maxdepth: 2
+
+ IO-APIC
diff --git a/Documentation/x86/index.rst b/Documentation/x86/index.rst
new file mode 100644
index 000000000000..ae36fc5fc649
--- /dev/null
+++ b/Documentation/x86/index.rst
@@ -0,0 +1,31 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==========================
+x86-specific Documentation
+==========================
+
+.. toctree::
+ :maxdepth: 2
+ :numbered:
+
+ boot
+ topology
+ exception-tables
+ kernel-stacks
+ entry_64
+ earlyprintk
+ orc-unwinder
+ zero-page
+ tlb
+ mtrr
+ pat
+ protection-keys
+ intel_mpx
+ amd-memory-encryption
+ pti
+ mds
+ microcode
+ resctrl_ui
+ usb-legacy-support
+ i386/index
+ x86_64/index
diff --git a/Documentation/x86/intel_mpx.txt b/Documentation/x86/intel_mpx.rst
index 85d0549ad846..387a640941a6 100644
--- a/Documentation/x86/intel_mpx.txt
+++ b/Documentation/x86/intel_mpx.rst
@@ -1,5 +1,11 @@
-1. Intel(R) MPX Overview
-========================
+.. SPDX-License-Identifier: GPL-2.0
+
+===========================================
+Intel(R) Memory Protection Extensions (MPX)
+===========================================
+
+Intel(R) MPX Overview
+=====================
Intel(R) Memory Protection Extensions (Intel(R) MPX) is a new capability
introduced into Intel Architecture. Intel MPX provides hardware features
@@ -7,7 +13,7 @@ that can be used in conjunction with compiler changes to check memory
references, for those references whose compile-time normal intentions are
usurped at runtime due to buffer overflow or underflow.
-You can tell if your CPU supports MPX by looking in /proc/cpuinfo:
+You can tell if your CPU supports MPX by looking in /proc/cpuinfo::
cat /proc/cpuinfo | grep ' mpx '
@@ -21,8 +27,8 @@ can be downloaded from
http://software.intel.com/en-us/articles/intel-software-development-emulator
-2. How to get the advantage of MPX
-==================================
+How to get the advantage of MPX
+===============================
For MPX to work, changes are required in the kernel, binutils and compiler.
No source changes are required for applications, just a recompile.
@@ -84,14 +90,15 @@ Kernel MPX Code:
is unmapped.
-3. How does MPX kernel code work
-================================
+How does MPX kernel code work
+=============================
Handling #BR faults caused by MPX
---------------------------------
When MPX is enabled, there are 2 new situations that can generate
#BR faults.
+
* new bounds tables (BT) need to be allocated to save bounds.
* bounds violation caused by MPX instructions.
@@ -124,37 +131,37 @@ the kernel. It can theoretically be done completely from userspace. Here
are a few ways this could be done. We don't think any of them are practical
in the real-world, but here they are.
-Q: Can virtual space simply be reserved for the bounds tables so that we
- never have to allocate them?
-A: MPX-enabled application will possibly create a lot of bounds tables in
- process address space to save bounds information. These tables can take
- up huge swaths of memory (as much as 80% of the memory on the system)
- even if we clean them up aggressively. In the worst-case scenario, the
- tables can be 4x the size of the data structure being tracked. IOW, a
- 1-page structure can require 4 bounds-table pages. An X-GB virtual
- area needs 4*X GB of virtual space, plus 2GB for the bounds directory.
- If we were to preallocate them for the 128TB of user virtual address
- space, we would need to reserve 512TB+2GB, which is larger than the
- entire virtual address space today. This means they can not be reserved
- ahead of time. Also, a single process's pre-populated bounds directory
- consumes 2GB of virtual *AND* physical memory. IOW, it's completely
- infeasible to prepopulate bounds directories.
-
-Q: Can we preallocate bounds table space at the same time memory is
- allocated which might contain pointers that might eventually need
- bounds tables?
-A: This would work if we could hook the site of each and every memory
- allocation syscall. This can be done for small, constrained applications.
- But, it isn't practical at a larger scale since a given app has no
- way of controlling how all the parts of the app might allocate memory
- (think libraries). The kernel is really the only place to intercept
- these calls.
-
-Q: Could a bounds fault be handed to userspace and the tables allocated
- there in a signal handler instead of in the kernel?
-A: mmap() is not on the list of safe async handler functions and even
- if mmap() would work it still requires locking or nasty tricks to
- keep track of the allocation state there.
+:Q: Can virtual space simply be reserved for the bounds tables so that we
+ never have to allocate them?
+:A: MPX-enabled application will possibly create a lot of bounds tables in
+ process address space to save bounds information. These tables can take
+ up huge swaths of memory (as much as 80% of the memory on the system)
+ even if we clean them up aggressively. In the worst-case scenario, the
+ tables can be 4x the size of the data structure being tracked. IOW, a
+ 1-page structure can require 4 bounds-table pages. An X-GB virtual
+ area needs 4*X GB of virtual space, plus 2GB for the bounds directory.
+ If we were to preallocate them for the 128TB of user virtual address
+ space, we would need to reserve 512TB+2GB, which is larger than the
+ entire virtual address space today. This means they can not be reserved
+ ahead of time. Also, a single process's pre-populated bounds directory
+ consumes 2GB of virtual *AND* physical memory. IOW, it's completely
+ infeasible to prepopulate bounds directories.
+
+:Q: Can we preallocate bounds table space at the same time memory is
+ allocated which might contain pointers that might eventually need
+ bounds tables?
+:A: This would work if we could hook the site of each and every memory
+ allocation syscall. This can be done for small, constrained applications.
+ But, it isn't practical at a larger scale since a given app has no
+ way of controlling how all the parts of the app might allocate memory
+ (think libraries). The kernel is really the only place to intercept
+ these calls.
+
+:Q: Could a bounds fault be handed to userspace and the tables allocated
+ there in a signal handler instead of in the kernel?
+:A: mmap() is not on the list of safe async handler functions and even
+ if mmap() would work it still requires locking or nasty tricks to
+ keep track of the allocation state there.
Having ruled out all of the userspace-only approaches for managing
bounds tables that we could think of, we create them on demand in
@@ -167,20 +174,20 @@ If a #BR is generated due to a bounds violation caused by MPX.
We need to decode MPX instructions to get violation address and
set this address into extended struct siginfo.
-The _sigfault field of struct siginfo is extended as follow:
-
-87 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
-88 struct {
-89 void __user *_addr; /* faulting insn/memory ref. */
-90 #ifdef __ARCH_SI_TRAPNO
-91 int _trapno; /* TRAP # which caused the signal */
-92 #endif
-93 short _addr_lsb; /* LSB of the reported address */
-94 struct {
-95 void __user *_lower;
-96 void __user *_upper;
-97 } _addr_bnd;
-98 } _sigfault;
+The _sigfault field of struct siginfo is extended as follow::
+
+ 87 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ 88 struct {
+ 89 void __user *_addr; /* faulting insn/memory ref. */
+ 90 #ifdef __ARCH_SI_TRAPNO
+ 91 int _trapno; /* TRAP # which caused the signal */
+ 92 #endif
+ 93 short _addr_lsb; /* LSB of the reported address */
+ 94 struct {
+ 95 void __user *_lower;
+ 96 void __user *_upper;
+ 97 } _addr_bnd;
+ 98 } _sigfault;
The '_addr' field refers to violation address, and new '_addr_and'
field refers to the upper/lower bounds when a #BR is caused.
@@ -209,9 +216,10 @@ Adding new prctl commands
Two new prctl commands are added to enable and disable MPX bounds tables
management in kernel.
+::
-155 #define PR_MPX_ENABLE_MANAGEMENT 43
-156 #define PR_MPX_DISABLE_MANAGEMENT 44
+ 155 #define PR_MPX_ENABLE_MANAGEMENT 43
+ 156 #define PR_MPX_DISABLE_MANAGEMENT 44
Runtime library in userspace is responsible for allocation of bounds
directory. So kernel have to use XSAVE instruction to get the base
@@ -223,8 +231,8 @@ into struct mm_struct to be used in future during PR_MPX_ENABLE_MANAGEMENT
command execution.
-4. Special rules
-================
+Special rules
+=============
1) If userspace is requesting help from the kernel to do the management
of bounds tables, it may not create or modify entries in the bounds directory.
diff --git a/Documentation/x86/kernel-stacks b/Documentation/x86/kernel-stacks.rst
index 9a0aa4d3a866..6b0bcf027ff1 100644
--- a/Documentation/x86/kernel-stacks
+++ b/Documentation/x86/kernel-stacks.rst
@@ -1,5 +1,11 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============
+Kernel Stacks
+=============
+
Kernel stacks on x86-64 bit
----------------------------
+===========================
Most of the text from Keith Owens, hacked by AK
@@ -57,9 +63,9 @@ IST events with the same code to be nested. However in most cases, the
stack size allocated to an IST assumes no nesting for the same code.
If that assumption is ever broken then the stacks will become corrupt.
-The currently assigned IST stacks are :-
+The currently assigned IST stacks are:
-* DOUBLEFAULT_STACK. EXCEPTION_STKSZ (PAGE_SIZE).
+* ESTACK_DF. EXCEPTION_STKSZ (PAGE_SIZE).
Used for interrupt 8 - Double Fault Exception (#DF).
@@ -68,7 +74,7 @@ The currently assigned IST stacks are :-
Using a separate stack allows the kernel to recover from it well enough
in many cases to still output an oops.
-* NMI_STACK. EXCEPTION_STKSZ (PAGE_SIZE).
+* ESTACK_NMI. EXCEPTION_STKSZ (PAGE_SIZE).
Used for non-maskable interrupts (NMI).
@@ -76,7 +82,7 @@ The currently assigned IST stacks are :-
middle of switching stacks. Using IST for NMI events avoids making
assumptions about the previous state of the kernel stack.
-* DEBUG_STACK. DEBUG_STKSZ
+* ESTACK_DB. EXCEPTION_STKSZ (PAGE_SIZE).
Used for hardware debug interrupts (interrupt 1) and for software
debug interrupts (INT3).
@@ -86,7 +92,12 @@ The currently assigned IST stacks are :-
avoids making assumptions about the previous state of the kernel
stack.
-* MCE_STACK. EXCEPTION_STKSZ (PAGE_SIZE).
+ To handle nested #DB correctly there exist two instances of DB stacks. On
+ #DB entry the IST stackpointer for #DB is switched to the second instance
+ so a nested #DB starts from a clean stack. The nested #DB switches
+ the IST stackpointer to a guard hole to catch triple nesting.
+
+* ESTACK_MCE. EXCEPTION_STKSZ (PAGE_SIZE).
Used for interrupt 18 - Machine Check Exception (#MC).
@@ -98,7 +109,7 @@ For more details see the Intel IA32 or AMD AMD64 architecture manuals.
Printing backtraces on x86
---------------------------
+==========================
The question about the '?' preceding function names in an x86 stacktrace
keeps popping up, here's an indepth explanation. It helps if the reader
@@ -108,7 +119,7 @@ arch/x86/kernel/dumpstack.c.
Adapted from Ingo's mail, Message-ID: <20150521101614.GA10889@gmail.com>:
We always scan the full kernel stack for return addresses stored on
-the kernel stack(s) [*], from stack top to stack bottom, and print out
+the kernel stack(s) [1]_, from stack top to stack bottom, and print out
anything that 'looks like' a kernel text address.
If it fits into the frame pointer chain, we print it without a question
@@ -136,6 +147,6 @@ that look like kernel text addresses, so if debug information is wrong,
we still print out the real call chain as well - just with more question
marks than ideal.
-[*] For things like IRQ and IST stacks, we also scan those stacks, in
- the right order, and try to cross from one stack into another
- reconstructing the call chain. This works most of the time.
+.. [1] For things like IRQ and IST stacks, we also scan those stacks, in
+ the right order, and try to cross from one stack into another
+ reconstructing the call chain. This works most of the time.
diff --git a/Documentation/x86/mds.rst b/Documentation/x86/mds.rst
new file mode 100644
index 000000000000..5d4330be200f
--- /dev/null
+++ b/Documentation/x86/mds.rst
@@ -0,0 +1,193 @@
+Microarchitectural Data Sampling (MDS) mitigation
+=================================================
+
+.. _mds:
+
+Overview
+--------
+
+Microarchitectural Data Sampling (MDS) is a family of side channel attacks
+on internal buffers in Intel CPUs. The variants are:
+
+ - Microarchitectural Store Buffer Data Sampling (MSBDS) (CVE-2018-12126)
+ - Microarchitectural Fill Buffer Data Sampling (MFBDS) (CVE-2018-12130)
+ - Microarchitectural Load Port Data Sampling (MLPDS) (CVE-2018-12127)
+ - Microarchitectural Data Sampling Uncacheable Memory (MDSUM) (CVE-2019-11091)
+
+MSBDS leaks Store Buffer Entries which can be speculatively forwarded to a
+dependent load (store-to-load forwarding) as an optimization. The forward
+can also happen to a faulting or assisting load operation for a different
+memory address, which can be exploited under certain conditions. Store
+buffers are partitioned between Hyper-Threads so cross thread forwarding is
+not possible. But if a thread enters or exits a sleep state the store
+buffer is repartitioned which can expose data from one thread to the other.
+
+MFBDS leaks Fill Buffer Entries. Fill buffers are used internally to manage
+L1 miss situations and to hold data which is returned or sent in response
+to a memory or I/O operation. Fill buffers can forward data to a load
+operation and also write data to the cache. When the fill buffer is
+deallocated it can retain the stale data of the preceding operations which
+can then be forwarded to a faulting or assisting load operation, which can
+be exploited under certain conditions. Fill buffers are shared between
+Hyper-Threads so cross thread leakage is possible.
+
+MLPDS leaks Load Port Data. Load ports are used to perform load operations
+from memory or I/O. The received data is then forwarded to the register
+file or a subsequent operation. In some implementations the Load Port can
+contain stale data from a previous operation which can be forwarded to
+faulting or assisting loads under certain conditions, which again can be
+exploited eventually. Load ports are shared between Hyper-Threads so cross
+thread leakage is possible.
+
+MDSUM is a special case of MSBDS, MFBDS and MLPDS. An uncacheable load from
+memory that takes a fault or assist can leave data in a microarchitectural
+structure that may later be observed using one of the same methods used by
+MSBDS, MFBDS or MLPDS.
+
+Exposure assumptions
+--------------------
+
+It is assumed that attack code resides in user space or in a guest with one
+exception. The rationale behind this assumption is that the code construct
+needed for exploiting MDS requires:
+
+ - to control the load to trigger a fault or assist
+
+ - to have a disclosure gadget which exposes the speculatively accessed
+ data for consumption through a side channel.
+
+ - to control the pointer through which the disclosure gadget exposes the
+ data
+
+The existence of such a construct in the kernel cannot be excluded with
+100% certainty, but the complexity involved makes it extremly unlikely.
+
+There is one exception, which is untrusted BPF. The functionality of
+untrusted BPF is limited, but it needs to be thoroughly investigated
+whether it can be used to create such a construct.
+
+
+Mitigation strategy
+-------------------
+
+All variants have the same mitigation strategy at least for the single CPU
+thread case (SMT off): Force the CPU to clear the affected buffers.
+
+This is achieved by using the otherwise unused and obsolete VERW
+instruction in combination with a microcode update. The microcode clears
+the affected CPU buffers when the VERW instruction is executed.
+
+For virtualization there are two ways to achieve CPU buffer
+clearing. Either the modified VERW instruction or via the L1D Flush
+command. The latter is issued when L1TF mitigation is enabled so the extra
+VERW can be avoided. If the CPU is not affected by L1TF then VERW needs to
+be issued.
+
+If the VERW instruction with the supplied segment selector argument is
+executed on a CPU without the microcode update there is no side effect
+other than a small number of pointlessly wasted CPU cycles.
+
+This does not protect against cross Hyper-Thread attacks except for MSBDS
+which is only exploitable cross Hyper-thread when one of the Hyper-Threads
+enters a C-state.
+
+The kernel provides a function to invoke the buffer clearing:
+
+ mds_clear_cpu_buffers()
+
+The mitigation is invoked on kernel/userspace, hypervisor/guest and C-state
+(idle) transitions.
+
+As a special quirk to address virtualization scenarios where the host has
+the microcode updated, but the hypervisor does not (yet) expose the
+MD_CLEAR CPUID bit to guests, the kernel issues the VERW instruction in the
+hope that it might actually clear the buffers. The state is reflected
+accordingly.
+
+According to current knowledge additional mitigations inside the kernel
+itself are not required because the necessary gadgets to expose the leaked
+data cannot be controlled in a way which allows exploitation from malicious
+user space or VM guests.
+
+Kernel internal mitigation modes
+--------------------------------
+
+ ======= ============================================================
+ off Mitigation is disabled. Either the CPU is not affected or
+ mds=off is supplied on the kernel command line
+
+ full Mitigation is enabled. CPU is affected and MD_CLEAR is
+ advertised in CPUID.
+
+ vmwerv Mitigation is enabled. CPU is affected and MD_CLEAR is not
+ advertised in CPUID. That is mainly for virtualization
+ scenarios where the host has the updated microcode but the
+ hypervisor does not expose MD_CLEAR in CPUID. It's a best
+ effort approach without guarantee.
+ ======= ============================================================
+
+If the CPU is affected and mds=off is not supplied on the kernel command
+line then the kernel selects the appropriate mitigation mode depending on
+the availability of the MD_CLEAR CPUID bit.
+
+Mitigation points
+-----------------
+
+1. Return to user space
+^^^^^^^^^^^^^^^^^^^^^^^
+
+ When transitioning from kernel to user space the CPU buffers are flushed
+ on affected CPUs when the mitigation is not disabled on the kernel
+ command line. The migitation is enabled through the static key
+ mds_user_clear.
+
+ The mitigation is invoked in prepare_exit_to_usermode() which covers
+ all but one of the kernel to user space transitions. The exception
+ is when we return from a Non Maskable Interrupt (NMI), which is
+ handled directly in do_nmi().
+
+ (The reason that NMI is special is that prepare_exit_to_usermode() can
+ enable IRQs. In NMI context, NMIs are blocked, and we don't want to
+ enable IRQs with NMIs blocked.)
+
+
+2. C-State transition
+^^^^^^^^^^^^^^^^^^^^^
+
+ When a CPU goes idle and enters a C-State the CPU buffers need to be
+ cleared on affected CPUs when SMT is active. This addresses the
+ repartitioning of the store buffer when one of the Hyper-Threads enters
+ a C-State.
+
+ When SMT is inactive, i.e. either the CPU does not support it or all
+ sibling threads are offline CPU buffer clearing is not required.
+
+ The idle clearing is enabled on CPUs which are only affected by MSBDS
+ and not by any other MDS variant. The other MDS variants cannot be
+ protected against cross Hyper-Thread attacks because the Fill Buffer and
+ the Load Ports are shared. So on CPUs affected by other variants, the
+ idle clearing would be a window dressing exercise and is therefore not
+ activated.
+
+ The invocation is controlled by the static key mds_idle_clear which is
+ switched depending on the chosen mitigation mode and the SMT state of
+ the system.
+
+ The buffer clear is only invoked before entering the C-State to prevent
+ that stale data from the idling CPU from spilling to the Hyper-Thread
+ sibling after the store buffer got repartitioned and all entries are
+ available to the non idle sibling.
+
+ When coming out of idle the store buffer is partitioned again so each
+ sibling has half of it available. The back from idle CPU could be then
+ speculatively exposed to contents of the sibling. The buffers are
+ flushed either on exit to user space or on VMENTER so malicious code
+ in user space or the guest cannot speculatively access them.
+
+ The mitigation is hooked into all variants of halt()/mwait(), but does
+ not cover the legacy ACPI IO-Port mechanism because the ACPI idle driver
+ has been superseded by the intel_idle driver around 2010 and is
+ preferred on all affected CPUs which are expected to gain the MD_CLEAR
+ functionality in microcode. Aside of that the IO-Port mechanism is a
+ legacy interface which is only used on older systems which are either
+ not affected or do not receive microcode updates anymore.
diff --git a/Documentation/x86/microcode.txt b/Documentation/x86/microcode.rst
index 79fdb4a8148a..a320d37982ed 100644
--- a/Documentation/x86/microcode.txt
+++ b/Documentation/x86/microcode.rst
@@ -1,7 +1,11 @@
- The Linux Microcode Loader
+.. SPDX-License-Identifier: GPL-2.0
-Authors: Fenghua Yu <fenghua.yu@intel.com>
- Borislav Petkov <bp@suse.de>
+==========================
+The Linux Microcode Loader
+==========================
+
+:Authors: - Fenghua Yu <fenghua.yu@intel.com>
+ - Borislav Petkov <bp@suse.de>
The kernel has a x86 microcode loading facility which is supposed to
provide microcode loading methods in the OS. Potential use cases are
@@ -10,8 +14,8 @@ and updating the microcode on long-running systems without rebooting.
The loader supports three loading methods:
-1. Early load microcode
-=======================
+Early load microcode
+====================
The kernel can update microcode very early during boot. Loading
microcode early can fix CPU issues before they are observed during
@@ -26,8 +30,10 @@ loader parses the combined initrd image during boot.
The microcode files in cpio name space are:
-on Intel: kernel/x86/microcode/GenuineIntel.bin
-on AMD : kernel/x86/microcode/AuthenticAMD.bin
+on Intel:
+ kernel/x86/microcode/GenuineIntel.bin
+on AMD :
+ kernel/x86/microcode/AuthenticAMD.bin
During BSP (BootStrapping Processor) boot (pre-SMP), the kernel
scans the microcode file in the initrd. If microcode matching the
@@ -42,8 +48,8 @@ Here's a crude example how to prepare an initrd with microcode (this is
normally done automatically by the distribution, when recreating the
initrd, so you don't really have to do it yourself. It is documented
here for future reference only).
+::
----
#!/bin/bash
if [ -z "$1" ]; then
@@ -76,15 +82,15 @@ here for future reference only).
cat ucode.cpio $INITRD.orig > $INITRD
rm -rf $TMPDIR
----
+
The system needs to have the microcode packages installed into
/lib/firmware or you need to fixup the paths above if yours are
somewhere else and/or you've downloaded them directly from the processor
vendor's site.
-2. Late loading
-===============
+Late loading
+============
There are two legacy user space interfaces to load microcode, either through
/dev/cpu/microcode or through /sys/devices/system/cpu/microcode/reload file
@@ -94,9 +100,9 @@ The /dev/cpu/microcode method is deprecated because it needs a special
userspace tool for that.
The easier method is simply installing the microcode packages your distro
-supplies and running:
+supplies and running::
-# echo 1 > /sys/devices/system/cpu/microcode/reload
+ # echo 1 > /sys/devices/system/cpu/microcode/reload
as root.
@@ -104,29 +110,29 @@ The loading mechanism looks for microcode blobs in
/lib/firmware/{intel-ucode,amd-ucode}. The default distro installation
packages already put them there.
-3. Builtin microcode
-====================
+Builtin microcode
+=================
The loader supports also loading of a builtin microcode supplied through
the regular builtin firmware method CONFIG_EXTRA_FIRMWARE. Only 64-bit is
currently supported.
-Here's an example:
+Here's an example::
-CONFIG_EXTRA_FIRMWARE="intel-ucode/06-3a-09 amd-ucode/microcode_amd_fam15h.bin"
-CONFIG_EXTRA_FIRMWARE_DIR="/lib/firmware"
+ CONFIG_EXTRA_FIRMWARE="intel-ucode/06-3a-09 amd-ucode/microcode_amd_fam15h.bin"
+ CONFIG_EXTRA_FIRMWARE_DIR="/lib/firmware"
-This basically means, you have the following tree structure locally:
+This basically means, you have the following tree structure locally::
-/lib/firmware/
-|-- amd-ucode
-...
-| |-- microcode_amd_fam15h.bin
-...
-|-- intel-ucode
-...
-| |-- 06-3a-09
-...
+ /lib/firmware/
+ |-- amd-ucode
+ ...
+ | |-- microcode_amd_fam15h.bin
+ ...
+ |-- intel-ucode
+ ...
+ | |-- 06-3a-09
+ ...
so that the build system can find those files and integrate them into
the final kernel image. The early loader finds them and applies them.
diff --git a/Documentation/x86/mtrr.rst b/Documentation/x86/mtrr.rst
new file mode 100644
index 000000000000..c5b695d75349
--- /dev/null
+++ b/Documentation/x86/mtrr.rst
@@ -0,0 +1,354 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=========================================
+MTRR (Memory Type Range Register) control
+=========================================
+
+:Authors: - Richard Gooch <rgooch@atnf.csiro.au> - 3 Jun 1999
+ - Luis R. Rodriguez <mcgrof@do-not-panic.com> - April 9, 2015
+
+
+Phasing out MTRR use
+====================
+
+MTRR use is replaced on modern x86 hardware with PAT. Direct MTRR use by
+drivers on Linux is now completely phased out, device drivers should use
+arch_phys_wc_add() in combination with ioremap_wc() to make MTRR effective on
+non-PAT systems while a no-op but equally effective on PAT enabled systems.
+
+Even if Linux does not use MTRRs directly, some x86 platform firmware may still
+set up MTRRs early before booting the OS. They do this as some platform
+firmware may still have implemented access to MTRRs which would be controlled
+and handled by the platform firmware directly. An example of platform use of
+MTRRs is through the use of SMI handlers, one case could be for fan control,
+the platform code would need uncachable access to some of its fan control
+registers. Such platform access does not need any Operating System MTRR code in
+place other than mtrr_type_lookup() to ensure any OS specific mapping requests
+are aligned with platform MTRR setup. If MTRRs are only set up by the platform
+firmware code though and the OS does not make any specific MTRR mapping
+requests mtrr_type_lookup() should always return MTRR_TYPE_INVALID.
+
+For details refer to :doc:`pat`.
+
+.. tip::
+ On Intel P6 family processors (Pentium Pro, Pentium II and later)
+ the Memory Type Range Registers (MTRRs) may be used to control
+ processor access to memory ranges. This is most useful when you have
+ a video (VGA) card on a PCI or AGP bus. Enabling write-combining
+ allows bus write transfers to be combined into a larger transfer
+ before bursting over the PCI/AGP bus. This can increase performance
+ of image write operations 2.5 times or more.
+
+ The Cyrix 6x86, 6x86MX and M II processors have Address Range
+ Registers (ARRs) which provide a similar functionality to MTRRs. For
+ these, the ARRs are used to emulate the MTRRs.
+
+ The AMD K6-2 (stepping 8 and above) and K6-3 processors have two
+ MTRRs. These are supported. The AMD Athlon family provide 8 Intel
+ style MTRRs.
+
+ The Centaur C6 (WinChip) has 8 MCRs, allowing write-combining. These
+ are supported.
+
+ The VIA Cyrix III and VIA C3 CPUs offer 8 Intel style MTRRs.
+
+ The CONFIG_MTRR option creates a /proc/mtrr file which may be used
+ to manipulate your MTRRs. Typically the X server should use
+ this. This should have a reasonably generic interface so that
+ similar control registers on other processors can be easily
+ supported.
+
+There are two interfaces to /proc/mtrr: one is an ASCII interface
+which allows you to read and write. The other is an ioctl()
+interface. The ASCII interface is meant for administration. The
+ioctl() interface is meant for C programs (i.e. the X server). The
+interfaces are described below, with sample commands and C code.
+
+
+Reading MTRRs from the shell
+============================
+::
+
+ % cat /proc/mtrr
+ reg00: base=0x00000000 ( 0MB), size= 128MB: write-back, count=1
+ reg01: base=0x08000000 ( 128MB), size= 64MB: write-back, count=1
+
+Creating MTRRs from the C-shell::
+
+ # echo "base=0xf8000000 size=0x400000 type=write-combining" >! /proc/mtrr
+
+or if you use bash::
+
+ # echo "base=0xf8000000 size=0x400000 type=write-combining" >| /proc/mtrr
+
+And the result thereof::
+
+ % cat /proc/mtrr
+ reg00: base=0x00000000 ( 0MB), size= 128MB: write-back, count=1
+ reg01: base=0x08000000 ( 128MB), size= 64MB: write-back, count=1
+ reg02: base=0xf8000000 (3968MB), size= 4MB: write-combining, count=1
+
+This is for video RAM at base address 0xf8000000 and size 4 megabytes. To
+find out your base address, you need to look at the output of your X
+server, which tells you where the linear framebuffer address is. A
+typical line that you may get is::
+
+ (--) S3: PCI: 968 rev 0, Linear FB @ 0xf8000000
+
+Note that you should only use the value from the X server, as it may
+move the framebuffer base address, so the only value you can trust is
+that reported by the X server.
+
+To find out the size of your framebuffer (what, you don't actually
+know?), the following line will tell you::
+
+ (--) S3: videoram: 4096k
+
+That's 4 megabytes, which is 0x400000 bytes (in hexadecimal).
+A patch is being written for XFree86 which will make this automatic:
+in other words the X server will manipulate /proc/mtrr using the
+ioctl() interface, so users won't have to do anything. If you use a
+commercial X server, lobby your vendor to add support for MTRRs.
+
+
+Creating overlapping MTRRs
+==========================
+::
+
+ %echo "base=0xfb000000 size=0x1000000 type=write-combining" >/proc/mtrr
+ %echo "base=0xfb000000 size=0x1000 type=uncachable" >/proc/mtrr
+
+And the results::
+
+ % cat /proc/mtrr
+ reg00: base=0x00000000 ( 0MB), size= 64MB: write-back, count=1
+ reg01: base=0xfb000000 (4016MB), size= 16MB: write-combining, count=1
+ reg02: base=0xfb000000 (4016MB), size= 4kB: uncachable, count=1
+
+Some cards (especially Voodoo Graphics boards) need this 4 kB area
+excluded from the beginning of the region because it is used for
+registers.
+
+NOTE: You can only create type=uncachable region, if the first
+region that you created is type=write-combining.
+
+
+Removing MTRRs from the C-shel
+==============================
+::
+
+ % echo "disable=2" >! /proc/mtrr
+
+or using bash::
+
+ % echo "disable=2" >| /proc/mtrr
+
+
+Reading MTRRs from a C program using ioctl()'s
+==============================================
+::
+
+ /* mtrr-show.c
+
+ Source file for mtrr-show (example program to show MTRRs using ioctl()'s)
+
+ Copyright (C) 1997-1998 Richard Gooch
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ Richard Gooch may be reached by email at rgooch@atnf.csiro.au
+ The postal address is:
+ Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
+ */
+
+ /*
+ This program will use an ioctl() on /proc/mtrr to show the current MTRR
+ settings. This is an alternative to reading /proc/mtrr.
+
+
+ Written by Richard Gooch 17-DEC-1997
+
+ Last updated by Richard Gooch 2-MAY-1998
+
+
+ */
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <string.h>
+ #include <sys/types.h>
+ #include <sys/stat.h>
+ #include <fcntl.h>
+ #include <sys/ioctl.h>
+ #include <errno.h>
+ #include <asm/mtrr.h>
+
+ #define TRUE 1
+ #define FALSE 0
+ #define ERRSTRING strerror (errno)
+
+ static char *mtrr_strings[MTRR_NUM_TYPES] =
+ {
+ "uncachable", /* 0 */
+ "write-combining", /* 1 */
+ "?", /* 2 */
+ "?", /* 3 */
+ "write-through", /* 4 */
+ "write-protect", /* 5 */
+ "write-back", /* 6 */
+ };
+
+ int main ()
+ {
+ int fd;
+ struct mtrr_gentry gentry;
+
+ if ( ( fd = open ("/proc/mtrr", O_RDONLY, 0) ) == -1 )
+ {
+ if (errno == ENOENT)
+ {
+ fputs ("/proc/mtrr not found: not supported or you don't have a PPro?\n",
+ stderr);
+ exit (1);
+ }
+ fprintf (stderr, "Error opening /proc/mtrr\t%s\n", ERRSTRING);
+ exit (2);
+ }
+ for (gentry.regnum = 0; ioctl (fd, MTRRIOC_GET_ENTRY, &gentry) == 0;
+ ++gentry.regnum)
+ {
+ if (gentry.size < 1)
+ {
+ fprintf (stderr, "Register: %u disabled\n", gentry.regnum);
+ continue;
+ }
+ fprintf (stderr, "Register: %u base: 0x%lx size: 0x%lx type: %s\n",
+ gentry.regnum, gentry.base, gentry.size,
+ mtrr_strings[gentry.type]);
+ }
+ if (errno == EINVAL) exit (0);
+ fprintf (stderr, "Error doing ioctl(2) on /dev/mtrr\t%s\n", ERRSTRING);
+ exit (3);
+ } /* End Function main */
+
+
+Creating MTRRs from a C programme using ioctl()'s
+=================================================
+::
+
+ /* mtrr-add.c
+
+ Source file for mtrr-add (example programme to add an MTRRs using ioctl())
+
+ Copyright (C) 1997-1998 Richard Gooch
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ Richard Gooch may be reached by email at rgooch@atnf.csiro.au
+ The postal address is:
+ Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
+ */
+
+ /*
+ This programme will use an ioctl() on /proc/mtrr to add an entry. The first
+ available mtrr is used. This is an alternative to writing /proc/mtrr.
+
+
+ Written by Richard Gooch 17-DEC-1997
+
+ Last updated by Richard Gooch 2-MAY-1998
+
+
+ */
+ #include <stdio.h>
+ #include <string.h>
+ #include <stdlib.h>
+ #include <unistd.h>
+ #include <sys/types.h>
+ #include <sys/stat.h>
+ #include <fcntl.h>
+ #include <sys/ioctl.h>
+ #include <errno.h>
+ #include <asm/mtrr.h>
+
+ #define TRUE 1
+ #define FALSE 0
+ #define ERRSTRING strerror (errno)
+
+ static char *mtrr_strings[MTRR_NUM_TYPES] =
+ {
+ "uncachable", /* 0 */
+ "write-combining", /* 1 */
+ "?", /* 2 */
+ "?", /* 3 */
+ "write-through", /* 4 */
+ "write-protect", /* 5 */
+ "write-back", /* 6 */
+ };
+
+ int main (int argc, char **argv)
+ {
+ int fd;
+ struct mtrr_sentry sentry;
+
+ if (argc != 4)
+ {
+ fprintf (stderr, "Usage:\tmtrr-add base size type\n");
+ exit (1);
+ }
+ sentry.base = strtoul (argv[1], NULL, 0);
+ sentry.size = strtoul (argv[2], NULL, 0);
+ for (sentry.type = 0; sentry.type < MTRR_NUM_TYPES; ++sentry.type)
+ {
+ if (strcmp (argv[3], mtrr_strings[sentry.type]) == 0) break;
+ }
+ if (sentry.type >= MTRR_NUM_TYPES)
+ {
+ fprintf (stderr, "Illegal type: \"%s\"\n", argv[3]);
+ exit (2);
+ }
+ if ( ( fd = open ("/proc/mtrr", O_WRONLY, 0) ) == -1 )
+ {
+ if (errno == ENOENT)
+ {
+ fputs ("/proc/mtrr not found: not supported or you don't have a PPro?\n",
+ stderr);
+ exit (3);
+ }
+ fprintf (stderr, "Error opening /proc/mtrr\t%s\n", ERRSTRING);
+ exit (4);
+ }
+ if (ioctl (fd, MTRRIOC_ADD_ENTRY, &sentry) == -1)
+ {
+ fprintf (stderr, "Error doing ioctl(2) on /dev/mtrr\t%s\n", ERRSTRING);
+ exit (5);
+ }
+ fprintf (stderr, "Sleeping for 5 seconds so you can see the new entry\n");
+ sleep (5);
+ close (fd);
+ fputs ("I've just closed /proc/mtrr so now the new entry should be gone\n",
+ stderr);
+ } /* End Function main */
diff --git a/Documentation/x86/mtrr.txt b/Documentation/x86/mtrr.txt
deleted file mode 100644
index dc3e703913ac..000000000000
--- a/Documentation/x86/mtrr.txt
+++ /dev/null
@@ -1,329 +0,0 @@
-MTRR (Memory Type Range Register) control
-
-Richard Gooch <rgooch@atnf.csiro.au> - 3 Jun 1999
-Luis R. Rodriguez <mcgrof@do-not-panic.com> - April 9, 2015
-
-===============================================================================
-Phasing out MTRR use
-
-MTRR use is replaced on modern x86 hardware with PAT. Direct MTRR use by
-drivers on Linux is now completely phased out, device drivers should use
-arch_phys_wc_add() in combination with ioremap_wc() to make MTRR effective on
-non-PAT systems while a no-op but equally effective on PAT enabled systems.
-
-Even if Linux does not use MTRRs directly, some x86 platform firmware may still
-set up MTRRs early before booting the OS. They do this as some platform
-firmware may still have implemented access to MTRRs which would be controlled
-and handled by the platform firmware directly. An example of platform use of
-MTRRs is through the use of SMI handlers, one case could be for fan control,
-the platform code would need uncachable access to some of its fan control
-registers. Such platform access does not need any Operating System MTRR code in
-place other than mtrr_type_lookup() to ensure any OS specific mapping requests
-are aligned with platform MTRR setup. If MTRRs are only set up by the platform
-firmware code though and the OS does not make any specific MTRR mapping
-requests mtrr_type_lookup() should always return MTRR_TYPE_INVALID.
-
-For details refer to Documentation/x86/pat.txt.
-
-===============================================================================
-
- On Intel P6 family processors (Pentium Pro, Pentium II and later)
- the Memory Type Range Registers (MTRRs) may be used to control
- processor access to memory ranges. This is most useful when you have
- a video (VGA) card on a PCI or AGP bus. Enabling write-combining
- allows bus write transfers to be combined into a larger transfer
- before bursting over the PCI/AGP bus. This can increase performance
- of image write operations 2.5 times or more.
-
- The Cyrix 6x86, 6x86MX and M II processors have Address Range
- Registers (ARRs) which provide a similar functionality to MTRRs. For
- these, the ARRs are used to emulate the MTRRs.
-
- The AMD K6-2 (stepping 8 and above) and K6-3 processors have two
- MTRRs. These are supported. The AMD Athlon family provide 8 Intel
- style MTRRs.
-
- The Centaur C6 (WinChip) has 8 MCRs, allowing write-combining. These
- are supported.
-
- The VIA Cyrix III and VIA C3 CPUs offer 8 Intel style MTRRs.
-
- The CONFIG_MTRR option creates a /proc/mtrr file which may be used
- to manipulate your MTRRs. Typically the X server should use
- this. This should have a reasonably generic interface so that
- similar control registers on other processors can be easily
- supported.
-
-
-There are two interfaces to /proc/mtrr: one is an ASCII interface
-which allows you to read and write. The other is an ioctl()
-interface. The ASCII interface is meant for administration. The
-ioctl() interface is meant for C programs (i.e. the X server). The
-interfaces are described below, with sample commands and C code.
-
-===============================================================================
-Reading MTRRs from the shell:
-
-% cat /proc/mtrr
-reg00: base=0x00000000 ( 0MB), size= 128MB: write-back, count=1
-reg01: base=0x08000000 ( 128MB), size= 64MB: write-back, count=1
-===============================================================================
-Creating MTRRs from the C-shell:
-# echo "base=0xf8000000 size=0x400000 type=write-combining" >! /proc/mtrr
-or if you use bash:
-# echo "base=0xf8000000 size=0x400000 type=write-combining" >| /proc/mtrr
-
-And the result thereof:
-% cat /proc/mtrr
-reg00: base=0x00000000 ( 0MB), size= 128MB: write-back, count=1
-reg01: base=0x08000000 ( 128MB), size= 64MB: write-back, count=1
-reg02: base=0xf8000000 (3968MB), size= 4MB: write-combining, count=1
-
-This is for video RAM at base address 0xf8000000 and size 4 megabytes. To
-find out your base address, you need to look at the output of your X
-server, which tells you where the linear framebuffer address is. A
-typical line that you may get is:
-
-(--) S3: PCI: 968 rev 0, Linear FB @ 0xf8000000
-
-Note that you should only use the value from the X server, as it may
-move the framebuffer base address, so the only value you can trust is
-that reported by the X server.
-
-To find out the size of your framebuffer (what, you don't actually
-know?), the following line will tell you:
-
-(--) S3: videoram: 4096k
-
-That's 4 megabytes, which is 0x400000 bytes (in hexadecimal).
-A patch is being written for XFree86 which will make this automatic:
-in other words the X server will manipulate /proc/mtrr using the
-ioctl() interface, so users won't have to do anything. If you use a
-commercial X server, lobby your vendor to add support for MTRRs.
-===============================================================================
-Creating overlapping MTRRs:
-
-%echo "base=0xfb000000 size=0x1000000 type=write-combining" >/proc/mtrr
-%echo "base=0xfb000000 size=0x1000 type=uncachable" >/proc/mtrr
-
-And the results: cat /proc/mtrr
-reg00: base=0x00000000 ( 0MB), size= 64MB: write-back, count=1
-reg01: base=0xfb000000 (4016MB), size= 16MB: write-combining, count=1
-reg02: base=0xfb000000 (4016MB), size= 4kB: uncachable, count=1
-
-Some cards (especially Voodoo Graphics boards) need this 4 kB area
-excluded from the beginning of the region because it is used for
-registers.
-
-NOTE: You can only create type=uncachable region, if the first
-region that you created is type=write-combining.
-===============================================================================
-Removing MTRRs from the C-shell:
-% echo "disable=2" >! /proc/mtrr
-or using bash:
-% echo "disable=2" >| /proc/mtrr
-===============================================================================
-Reading MTRRs from a C program using ioctl()'s:
-
-/* mtrr-show.c
-
- Source file for mtrr-show (example program to show MTRRs using ioctl()'s)
-
- Copyright (C) 1997-1998 Richard Gooch
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
- Richard Gooch may be reached by email at rgooch@atnf.csiro.au
- The postal address is:
- Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
-*/
-
-/*
- This program will use an ioctl() on /proc/mtrr to show the current MTRR
- settings. This is an alternative to reading /proc/mtrr.
-
-
- Written by Richard Gooch 17-DEC-1997
-
- Last updated by Richard Gooch 2-MAY-1998
-
-
-*/
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <sys/ioctl.h>
-#include <errno.h>
-#include <asm/mtrr.h>
-
-#define TRUE 1
-#define FALSE 0
-#define ERRSTRING strerror (errno)
-
-static char *mtrr_strings[MTRR_NUM_TYPES] =
-{
- "uncachable", /* 0 */
- "write-combining", /* 1 */
- "?", /* 2 */
- "?", /* 3 */
- "write-through", /* 4 */
- "write-protect", /* 5 */
- "write-back", /* 6 */
-};
-
-int main ()
-{
- int fd;
- struct mtrr_gentry gentry;
-
- if ( ( fd = open ("/proc/mtrr", O_RDONLY, 0) ) == -1 )
- {
- if (errno == ENOENT)
- {
- fputs ("/proc/mtrr not found: not supported or you don't have a PPro?\n",
- stderr);
- exit (1);
- }
- fprintf (stderr, "Error opening /proc/mtrr\t%s\n", ERRSTRING);
- exit (2);
- }
- for (gentry.regnum = 0; ioctl (fd, MTRRIOC_GET_ENTRY, &gentry) == 0;
- ++gentry.regnum)
- {
- if (gentry.size < 1)
- {
- fprintf (stderr, "Register: %u disabled\n", gentry.regnum);
- continue;
- }
- fprintf (stderr, "Register: %u base: 0x%lx size: 0x%lx type: %s\n",
- gentry.regnum, gentry.base, gentry.size,
- mtrr_strings[gentry.type]);
- }
- if (errno == EINVAL) exit (0);
- fprintf (stderr, "Error doing ioctl(2) on /dev/mtrr\t%s\n", ERRSTRING);
- exit (3);
-} /* End Function main */
-===============================================================================
-Creating MTRRs from a C programme using ioctl()'s:
-
-/* mtrr-add.c
-
- Source file for mtrr-add (example programme to add an MTRRs using ioctl())
-
- Copyright (C) 1997-1998 Richard Gooch
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
- Richard Gooch may be reached by email at rgooch@atnf.csiro.au
- The postal address is:
- Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
-*/
-
-/*
- This programme will use an ioctl() on /proc/mtrr to add an entry. The first
- available mtrr is used. This is an alternative to writing /proc/mtrr.
-
-
- Written by Richard Gooch 17-DEC-1997
-
- Last updated by Richard Gooch 2-MAY-1998
-
-
-*/
-#include <stdio.h>
-#include <string.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <sys/ioctl.h>
-#include <errno.h>
-#include <asm/mtrr.h>
-
-#define TRUE 1
-#define FALSE 0
-#define ERRSTRING strerror (errno)
-
-static char *mtrr_strings[MTRR_NUM_TYPES] =
-{
- "uncachable", /* 0 */
- "write-combining", /* 1 */
- "?", /* 2 */
- "?", /* 3 */
- "write-through", /* 4 */
- "write-protect", /* 5 */
- "write-back", /* 6 */
-};
-
-int main (int argc, char **argv)
-{
- int fd;
- struct mtrr_sentry sentry;
-
- if (argc != 4)
- {
- fprintf (stderr, "Usage:\tmtrr-add base size type\n");
- exit (1);
- }
- sentry.base = strtoul (argv[1], NULL, 0);
- sentry.size = strtoul (argv[2], NULL, 0);
- for (sentry.type = 0; sentry.type < MTRR_NUM_TYPES; ++sentry.type)
- {
- if (strcmp (argv[3], mtrr_strings[sentry.type]) == 0) break;
- }
- if (sentry.type >= MTRR_NUM_TYPES)
- {
- fprintf (stderr, "Illegal type: \"%s\"\n", argv[3]);
- exit (2);
- }
- if ( ( fd = open ("/proc/mtrr", O_WRONLY, 0) ) == -1 )
- {
- if (errno == ENOENT)
- {
- fputs ("/proc/mtrr not found: not supported or you don't have a PPro?\n",
- stderr);
- exit (3);
- }
- fprintf (stderr, "Error opening /proc/mtrr\t%s\n", ERRSTRING);
- exit (4);
- }
- if (ioctl (fd, MTRRIOC_ADD_ENTRY, &sentry) == -1)
- {
- fprintf (stderr, "Error doing ioctl(2) on /dev/mtrr\t%s\n", ERRSTRING);
- exit (5);
- }
- fprintf (stderr, "Sleeping for 5 seconds so you can see the new entry\n");
- sleep (5);
- close (fd);
- fputs ("I've just closed /proc/mtrr so now the new entry should be gone\n",
- stderr);
-} /* End Function main */
-===============================================================================
diff --git a/Documentation/x86/orc-unwinder.txt b/Documentation/x86/orc-unwinder.rst
index cd4b29be29af..d811576c1f3e 100644
--- a/Documentation/x86/orc-unwinder.txt
+++ b/Documentation/x86/orc-unwinder.rst
@@ -1,8 +1,11 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============
ORC unwinder
============
Overview
---------
+========
The kernel CONFIG_UNWINDER_ORC option enables the ORC unwinder, which is
similar in concept to a DWARF unwinder. The difference is that the
@@ -23,12 +26,12 @@ correlate instruction addresses with their stack states at run time.
ORC vs frame pointers
----------------------
+=====================
With frame pointers enabled, GCC adds instrumentation code to every
function in the kernel. The kernel's .text size increases by about
3.2%, resulting in a broad kernel-wide slowdown. Measurements by Mel
-Gorman [1] have shown a slowdown of 5-10% for some workloads.
+Gorman [1]_ have shown a slowdown of 5-10% for some workloads.
In contrast, the ORC unwinder has no effect on text size or runtime
performance, because the debuginfo is out of band. So if you disable
@@ -55,7 +58,7 @@ depending on the kernel config.
ORC vs DWARF
-------------
+============
ORC debuginfo's advantage over DWARF itself is that it's much simpler.
It gets rid of the complex DWARF CFI state machine and also gets rid of
@@ -65,7 +68,7 @@ mission critical oops code.
The simpler debuginfo format also enables the unwinder to be much faster
than DWARF, which is important for perf and lockdep. In a basic
-performance test by Jiri Slaby [2], the ORC unwinder was about 20x
+performance test by Jiri Slaby [2]_, the ORC unwinder was about 20x
faster than an out-of-tree DWARF unwinder. (Note: That measurement was
taken before some performance tweaks were added, which doubled
performance, so the speedup over DWARF may be closer to 40x.)
@@ -85,7 +88,7 @@ still be able to control the format, e.g. no complex state machines.
ORC unwind table generation
----------------------------
+===========================
The ORC data is generated by objtool. With the existing compile-time
stack metadata validation feature, objtool already follows all code
@@ -133,7 +136,7 @@ objtool follows GCC code quite well.
Unwinder implementation details
--------------------------------
+===============================
Objtool generates the ORC data by integrating with the compile-time
stack metadata validation feature, which is described in detail in
@@ -154,7 +157,7 @@ subset of the table needs to be searched.
Etymology
----------
+=========
Orcs, fearsome creatures of medieval folklore, are the Dwarves' natural
enemies. Similarly, the ORC unwinder was created in opposition to the
@@ -162,7 +165,7 @@ complexity and slowness of DWARF.
"Although Orcs rarely consider multiple solutions to a problem, they do
excel at getting things done because they are creatures of action, not
-thought." [3] Similarly, unlike the esoteric DWARF unwinder, the
+thought." [3]_ Similarly, unlike the esoteric DWARF unwinder, the
veracious ORC unwinder wastes no time or siloconic effort decoding
variable-length zero-extended unsigned-integer byte-coded
state-machine-based debug information entries.
@@ -174,6 +177,6 @@ brutal, unyielding efficiency.
ORC stands for Oops Rewind Capability.
-[1] https://lkml.kernel.org/r/20170602104048.jkkzssljsompjdwy@suse.de
-[2] https://lkml.kernel.org/r/d2ca5435-6386-29b8-db87-7f227c2b713a@suse.cz
-[3] http://dustin.wikidot.com/half-orcs-and-orcs
+.. [1] https://lkml.kernel.org/r/20170602104048.jkkzssljsompjdwy@suse.de
+.. [2] https://lkml.kernel.org/r/d2ca5435-6386-29b8-db87-7f227c2b713a@suse.cz
+.. [3] http://dustin.wikidot.com/half-orcs-and-orcs
diff --git a/Documentation/x86/pat.rst b/Documentation/x86/pat.rst
new file mode 100644
index 000000000000..9a298fd97d74
--- /dev/null
+++ b/Documentation/x86/pat.rst
@@ -0,0 +1,242 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==========================
+PAT (Page Attribute Table)
+==========================
+
+x86 Page Attribute Table (PAT) allows for setting the memory attribute at the
+page level granularity. PAT is complementary to the MTRR settings which allows
+for setting of memory types over physical address ranges. However, PAT is
+more flexible than MTRR due to its capability to set attributes at page level
+and also due to the fact that there are no hardware limitations on number of
+such attribute settings allowed. Added flexibility comes with guidelines for
+not having memory type aliasing for the same physical memory with multiple
+virtual addresses.
+
+PAT allows for different types of memory attributes. The most commonly used
+ones that will be supported at this time are:
+
+=== ==============
+WB Write-back
+UC Uncached
+WC Write-combined
+WT Write-through
+UC- Uncached Minus
+=== ==============
+
+
+PAT APIs
+========
+
+There are many different APIs in the kernel that allows setting of memory
+attributes at the page level. In order to avoid aliasing, these interfaces
+should be used thoughtfully. Below is a table of interfaces available,
+their intended usage and their memory attribute relationships. Internally,
+these APIs use a reserve_memtype()/free_memtype() interface on the physical
+address range to avoid any aliasing.
+
++------------------------+----------+--------------+------------------+
+| API | RAM | ACPI,... | Reserved/Holes |
++------------------------+----------+--------------+------------------+
+| ioremap | -- | UC- | UC- |
++------------------------+----------+--------------+------------------+
+| ioremap_cache | -- | WB | WB |
++------------------------+----------+--------------+------------------+
+| ioremap_uc | -- | UC | UC |
++------------------------+----------+--------------+------------------+
+| ioremap_nocache | -- | UC- | UC- |
++------------------------+----------+--------------+------------------+
+| ioremap_wc | -- | -- | WC |
++------------------------+----------+--------------+------------------+
+| ioremap_wt | -- | -- | WT |
++------------------------+----------+--------------+------------------+
+| set_memory_uc, | UC- | -- | -- |
+| set_memory_wb | | | |
++------------------------+----------+--------------+------------------+
+| set_memory_wc, | WC | -- | -- |
+| set_memory_wb | | | |
++------------------------+----------+--------------+------------------+
+| set_memory_wt, | WT | -- | -- |
+| set_memory_wb | | | |
++------------------------+----------+--------------+------------------+
+| pci sysfs resource | -- | -- | UC- |
++------------------------+----------+--------------+------------------+
+| pci sysfs resource_wc | -- | -- | WC |
+| is IORESOURCE_PREFETCH | | | |
++------------------------+----------+--------------+------------------+
+| pci proc | -- | -- | UC- |
+| !PCIIOC_WRITE_COMBINE | | | |
++------------------------+----------+--------------+------------------+
+| pci proc | -- | -- | WC |
+| PCIIOC_WRITE_COMBINE | | | |
++------------------------+----------+--------------+------------------+
+| /dev/mem | -- | WB/WC/UC- | WB/WC/UC- |
+| read-write | | | |
++------------------------+----------+--------------+------------------+
+| /dev/mem | -- | UC- | UC- |
+| mmap SYNC flag | | | |
++------------------------+----------+--------------+------------------+
+| /dev/mem | -- | WB/WC/UC- | WB/WC/UC- |
+| mmap !SYNC flag | | | |
+| and | |(from existing| (from existing |
+| any alias to this area | |alias) | alias) |
++------------------------+----------+--------------+------------------+
+| /dev/mem | -- | WB | WB |
+| mmap !SYNC flag | | | |
+| no alias to this area | | | |
+| and | | | |
+| MTRR says WB | | | |
++------------------------+----------+--------------+------------------+
+| /dev/mem | -- | -- | UC- |
+| mmap !SYNC flag | | | |
+| no alias to this area | | | |
+| and | | | |
+| MTRR says !WB | | | |
++------------------------+----------+--------------+------------------+
+
+
+Advanced APIs for drivers
+=========================
+
+A. Exporting pages to users with remap_pfn_range, io_remap_pfn_range,
+vmf_insert_pfn.
+
+Drivers wanting to export some pages to userspace do it by using mmap
+interface and a combination of:
+
+ 1) pgprot_noncached()
+ 2) io_remap_pfn_range() or remap_pfn_range() or vmf_insert_pfn()
+
+With PAT support, a new API pgprot_writecombine is being added. So, drivers can
+continue to use the above sequence, with either pgprot_noncached() or
+pgprot_writecombine() in step 1, followed by step 2.
+
+In addition, step 2 internally tracks the region as UC or WC in memtype
+list in order to ensure no conflicting mapping.
+
+Note that this set of APIs only works with IO (non RAM) regions. If driver
+wants to export a RAM region, it has to do set_memory_uc() or set_memory_wc()
+as step 0 above and also track the usage of those pages and use set_memory_wb()
+before the page is freed to free pool.
+
+MTRR effects on PAT / non-PAT systems
+=====================================
+
+The following table provides the effects of using write-combining MTRRs when
+using ioremap*() calls on x86 for both non-PAT and PAT systems. Ideally
+mtrr_add() usage will be phased out in favor of arch_phys_wc_add() which will
+be a no-op on PAT enabled systems. The region over which a arch_phys_wc_add()
+is made, should already have been ioremapped with WC attributes or PAT entries,
+this can be done by using ioremap_wc() / set_memory_wc(). Devices which
+combine areas of IO memory desired to remain uncacheable with areas where
+write-combining is desirable should consider use of ioremap_uc() followed by
+set_memory_wc() to white-list effective write-combined areas. Such use is
+nevertheless discouraged as the effective memory type is considered
+implementation defined, yet this strategy can be used as last resort on devices
+with size-constrained regions where otherwise MTRR write-combining would
+otherwise not be effective.
+::
+
+ ==== ======= === ========================= =====================
+ MTRR Non-PAT PAT Linux ioremap value Effective memory type
+ ==== ======= === ========================= =====================
+ PAT Non-PAT | PAT
+ |PCD |
+ ||PWT |
+ ||| |
+ WC 000 WB _PAGE_CACHE_MODE_WB WC | WC
+ WC 001 WC _PAGE_CACHE_MODE_WC WC* | WC
+ WC 010 UC- _PAGE_CACHE_MODE_UC_MINUS WC* | UC
+ WC 011 UC _PAGE_CACHE_MODE_UC UC | UC
+ ==== ======= === ========================= =====================
+
+ (*) denotes implementation defined and is discouraged
+
+.. note:: -- in the above table mean "Not suggested usage for the API". Some
+ of the --'s are strictly enforced by the kernel. Some others are not really
+ enforced today, but may be enforced in future.
+
+For ioremap and pci access through /sys or /proc - The actual type returned
+can be more restrictive, in case of any existing aliasing for that address.
+For example: If there is an existing uncached mapping, a new ioremap_wc can
+return uncached mapping in place of write-combine requested.
+
+set_memory_[uc|wc|wt] and set_memory_wb should be used in pairs, where driver
+will first make a region uc, wc or wt and switch it back to wb after use.
+
+Over time writes to /proc/mtrr will be deprecated in favor of using PAT based
+interfaces. Users writing to /proc/mtrr are suggested to use above interfaces.
+
+Drivers should use ioremap_[uc|wc] to access PCI BARs with [uc|wc] access
+types.
+
+Drivers should use set_memory_[uc|wc|wt] to set access type for RAM ranges.
+
+
+PAT debugging
+=============
+
+With CONFIG_DEBUG_FS enabled, PAT memtype list can be examined by::
+
+ # mount -t debugfs debugfs /sys/kernel/debug
+ # cat /sys/kernel/debug/x86/pat_memtype_list
+ PAT memtype list:
+ uncached-minus @ 0x7fadf000-0x7fae0000
+ uncached-minus @ 0x7fb19000-0x7fb1a000
+ uncached-minus @ 0x7fb1a000-0x7fb1b000
+ uncached-minus @ 0x7fb1b000-0x7fb1c000
+ uncached-minus @ 0x7fb1c000-0x7fb1d000
+ uncached-minus @ 0x7fb1d000-0x7fb1e000
+ uncached-minus @ 0x7fb1e000-0x7fb25000
+ uncached-minus @ 0x7fb25000-0x7fb26000
+ uncached-minus @ 0x7fb26000-0x7fb27000
+ uncached-minus @ 0x7fb27000-0x7fb28000
+ uncached-minus @ 0x7fb28000-0x7fb2e000
+ uncached-minus @ 0x7fb2e000-0x7fb2f000
+ uncached-minus @ 0x7fb2f000-0x7fb30000
+ uncached-minus @ 0x7fb31000-0x7fb32000
+ uncached-minus @ 0x80000000-0x90000000
+
+This list shows physical address ranges and various PAT settings used to
+access those physical address ranges.
+
+Another, more verbose way of getting PAT related debug messages is with
+"debugpat" boot parameter. With this parameter, various debug messages are
+printed to dmesg log.
+
+PAT Initialization
+==================
+
+The following table describes how PAT is initialized under various
+configurations. The PAT MSR must be updated by Linux in order to support WC
+and WT attributes. Otherwise, the PAT MSR has the value programmed in it
+by the firmware. Note, Xen enables WC attribute in the PAT MSR for guests.
+
+ ==== ===== ========================== ========= =======
+ MTRR PAT Call Sequence PAT State PAT MSR
+ ==== ===== ========================== ========= =======
+ E E MTRR -> PAT init Enabled OS
+ E D MTRR -> PAT init Disabled -
+ D E MTRR -> PAT disable Disabled BIOS
+ D D MTRR -> PAT disable Disabled -
+ - np/E PAT -> PAT disable Disabled BIOS
+ - np/D PAT -> PAT disable Disabled -
+ E !P/E MTRR -> PAT init Disabled BIOS
+ D !P/E MTRR -> PAT disable Disabled BIOS
+ !M !P/E MTRR stub -> PAT disable Disabled BIOS
+ ==== ===== ========================== ========= =======
+
+ Legend
+
+ ========= =======================================
+ E Feature enabled in CPU
+ D Feature disabled/unsupported in CPU
+ np "nopat" boot option specified
+ !P CONFIG_X86_PAT option unset
+ !M CONFIG_MTRR option unset
+ Enabled PAT state set to enabled
+ Disabled PAT state set to disabled
+ OS PAT initializes PAT MSR with OS setting
+ BIOS PAT keeps PAT MSR with BIOS setting
+ ========= =======================================
+
diff --git a/Documentation/x86/pat.txt b/Documentation/x86/pat.txt
deleted file mode 100644
index 481d8d8536ac..000000000000
--- a/Documentation/x86/pat.txt
+++ /dev/null
@@ -1,230 +0,0 @@
-
-PAT (Page Attribute Table)
-
-x86 Page Attribute Table (PAT) allows for setting the memory attribute at the
-page level granularity. PAT is complementary to the MTRR settings which allows
-for setting of memory types over physical address ranges. However, PAT is
-more flexible than MTRR due to its capability to set attributes at page level
-and also due to the fact that there are no hardware limitations on number of
-such attribute settings allowed. Added flexibility comes with guidelines for
-not having memory type aliasing for the same physical memory with multiple
-virtual addresses.
-
-PAT allows for different types of memory attributes. The most commonly used
-ones that will be supported at this time are Write-back, Uncached,
-Write-combined, Write-through and Uncached Minus.
-
-
-PAT APIs
---------
-
-There are many different APIs in the kernel that allows setting of memory
-attributes at the page level. In order to avoid aliasing, these interfaces
-should be used thoughtfully. Below is a table of interfaces available,
-their intended usage and their memory attribute relationships. Internally,
-these APIs use a reserve_memtype()/free_memtype() interface on the physical
-address range to avoid any aliasing.
-
-
--------------------------------------------------------------------
-API | RAM | ACPI,... | Reserved/Holes |
------------------------|----------|------------|------------------|
- | | | |
-ioremap | -- | UC- | UC- |
- | | | |
-ioremap_cache | -- | WB | WB |
- | | | |
-ioremap_uc | -- | UC | UC |
- | | | |
-ioremap_nocache | -- | UC- | UC- |
- | | | |
-ioremap_wc | -- | -- | WC |
- | | | |
-ioremap_wt | -- | -- | WT |
- | | | |
-set_memory_uc | UC- | -- | -- |
- set_memory_wb | | | |
- | | | |
-set_memory_wc | WC | -- | -- |
- set_memory_wb | | | |
- | | | |
-set_memory_wt | WT | -- | -- |
- set_memory_wb | | | |
- | | | |
-pci sysfs resource | -- | -- | UC- |
- | | | |
-pci sysfs resource_wc | -- | -- | WC |
- is IORESOURCE_PREFETCH| | | |
- | | | |
-pci proc | -- | -- | UC- |
- !PCIIOC_WRITE_COMBINE | | | |
- | | | |
-pci proc | -- | -- | WC |
- PCIIOC_WRITE_COMBINE | | | |
- | | | |
-/dev/mem | -- | WB/WC/UC- | WB/WC/UC- |
- read-write | | | |
- | | | |
-/dev/mem | -- | UC- | UC- |
- mmap SYNC flag | | | |
- | | | |
-/dev/mem | -- | WB/WC/UC- | WB/WC/UC- |
- mmap !SYNC flag | |(from exist-| (from exist- |
- and | | ing alias)| ing alias) |
- any alias to this area| | | |
- | | | |
-/dev/mem | -- | WB | WB |
- mmap !SYNC flag | | | |
- no alias to this area | | | |
- and | | | |
- MTRR says WB | | | |
- | | | |
-/dev/mem | -- | -- | UC- |
- mmap !SYNC flag | | | |
- no alias to this area | | | |
- and | | | |
- MTRR says !WB | | | |
- | | | |
--------------------------------------------------------------------
-
-Advanced APIs for drivers
--------------------------
-A. Exporting pages to users with remap_pfn_range, io_remap_pfn_range,
-vmf_insert_pfn
-
-Drivers wanting to export some pages to userspace do it by using mmap
-interface and a combination of
-1) pgprot_noncached()
-2) io_remap_pfn_range() or remap_pfn_range() or vmf_insert_pfn()
-
-With PAT support, a new API pgprot_writecombine is being added. So, drivers can
-continue to use the above sequence, with either pgprot_noncached() or
-pgprot_writecombine() in step 1, followed by step 2.
-
-In addition, step 2 internally tracks the region as UC or WC in memtype
-list in order to ensure no conflicting mapping.
-
-Note that this set of APIs only works with IO (non RAM) regions. If driver
-wants to export a RAM region, it has to do set_memory_uc() or set_memory_wc()
-as step 0 above and also track the usage of those pages and use set_memory_wb()
-before the page is freed to free pool.
-
-MTRR effects on PAT / non-PAT systems
--------------------------------------
-
-The following table provides the effects of using write-combining MTRRs when
-using ioremap*() calls on x86 for both non-PAT and PAT systems. Ideally
-mtrr_add() usage will be phased out in favor of arch_phys_wc_add() which will
-be a no-op on PAT enabled systems. The region over which a arch_phys_wc_add()
-is made, should already have been ioremapped with WC attributes or PAT entries,
-this can be done by using ioremap_wc() / set_memory_wc(). Devices which
-combine areas of IO memory desired to remain uncacheable with areas where
-write-combining is desirable should consider use of ioremap_uc() followed by
-set_memory_wc() to white-list effective write-combined areas. Such use is
-nevertheless discouraged as the effective memory type is considered
-implementation defined, yet this strategy can be used as last resort on devices
-with size-constrained regions where otherwise MTRR write-combining would
-otherwise not be effective.
-
-----------------------------------------------------------------------
-MTRR Non-PAT PAT Linux ioremap value Effective memory type
-----------------------------------------------------------------------
- Non-PAT | PAT
- PAT
- |PCD
- ||PWT
- |||
-WC 000 WB _PAGE_CACHE_MODE_WB WC | WC
-WC 001 WC _PAGE_CACHE_MODE_WC WC* | WC
-WC 010 UC- _PAGE_CACHE_MODE_UC_MINUS WC* | UC
-WC 011 UC _PAGE_CACHE_MODE_UC UC | UC
-----------------------------------------------------------------------
-
-(*) denotes implementation defined and is discouraged
-
-Notes:
-
--- in the above table mean "Not suggested usage for the API". Some of the --'s
-are strictly enforced by the kernel. Some others are not really enforced
-today, but may be enforced in future.
-
-For ioremap and pci access through /sys or /proc - The actual type returned
-can be more restrictive, in case of any existing aliasing for that address.
-For example: If there is an existing uncached mapping, a new ioremap_wc can
-return uncached mapping in place of write-combine requested.
-
-set_memory_[uc|wc|wt] and set_memory_wb should be used in pairs, where driver
-will first make a region uc, wc or wt and switch it back to wb after use.
-
-Over time writes to /proc/mtrr will be deprecated in favor of using PAT based
-interfaces. Users writing to /proc/mtrr are suggested to use above interfaces.
-
-Drivers should use ioremap_[uc|wc] to access PCI BARs with [uc|wc] access
-types.
-
-Drivers should use set_memory_[uc|wc|wt] to set access type for RAM ranges.
-
-
-PAT debugging
--------------
-
-With CONFIG_DEBUG_FS enabled, PAT memtype list can be examined by
-
-# mount -t debugfs debugfs /sys/kernel/debug
-# cat /sys/kernel/debug/x86/pat_memtype_list
-PAT memtype list:
-uncached-minus @ 0x7fadf000-0x7fae0000
-uncached-minus @ 0x7fb19000-0x7fb1a000
-uncached-minus @ 0x7fb1a000-0x7fb1b000
-uncached-minus @ 0x7fb1b000-0x7fb1c000
-uncached-minus @ 0x7fb1c000-0x7fb1d000
-uncached-minus @ 0x7fb1d000-0x7fb1e000
-uncached-minus @ 0x7fb1e000-0x7fb25000
-uncached-minus @ 0x7fb25000-0x7fb26000
-uncached-minus @ 0x7fb26000-0x7fb27000
-uncached-minus @ 0x7fb27000-0x7fb28000
-uncached-minus @ 0x7fb28000-0x7fb2e000
-uncached-minus @ 0x7fb2e000-0x7fb2f000
-uncached-minus @ 0x7fb2f000-0x7fb30000
-uncached-minus @ 0x7fb31000-0x7fb32000
-uncached-minus @ 0x80000000-0x90000000
-
-This list shows physical address ranges and various PAT settings used to
-access those physical address ranges.
-
-Another, more verbose way of getting PAT related debug messages is with
-"debugpat" boot parameter. With this parameter, various debug messages are
-printed to dmesg log.
-
-PAT Initialization
-------------------
-
-The following table describes how PAT is initialized under various
-configurations. The PAT MSR must be updated by Linux in order to support WC
-and WT attributes. Otherwise, the PAT MSR has the value programmed in it
-by the firmware. Note, Xen enables WC attribute in the PAT MSR for guests.
-
- MTRR PAT Call Sequence PAT State PAT MSR
- =========================================================
- E E MTRR -> PAT init Enabled OS
- E D MTRR -> PAT init Disabled -
- D E MTRR -> PAT disable Disabled BIOS
- D D MTRR -> PAT disable Disabled -
- - np/E PAT -> PAT disable Disabled BIOS
- - np/D PAT -> PAT disable Disabled -
- E !P/E MTRR -> PAT init Disabled BIOS
- D !P/E MTRR -> PAT disable Disabled BIOS
- !M !P/E MTRR stub -> PAT disable Disabled BIOS
-
- Legend
- ------------------------------------------------
- E Feature enabled in CPU
- D Feature disabled/unsupported in CPU
- np "nopat" boot option specified
- !P CONFIG_X86_PAT option unset
- !M CONFIG_MTRR option unset
- Enabled PAT state set to enabled
- Disabled PAT state set to disabled
- OS PAT initializes PAT MSR with OS setting
- BIOS PAT keeps PAT MSR with BIOS setting
-
diff --git a/Documentation/x86/protection-keys.txt b/Documentation/x86/protection-keys.rst
index ecb0d2dadfb7..49d9833af871 100644
--- a/Documentation/x86/protection-keys.txt
+++ b/Documentation/x86/protection-keys.rst
@@ -1,3 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+======================
+Memory Protection Keys
+======================
+
Memory Protection Keys for Userspace (PKU aka PKEYs) is a feature
which is found on Intel's Skylake "Scalable Processor" Server CPUs.
It will be avalable in future non-server parts.
@@ -23,9 +29,10 @@ even though there is theoretically space in the PAE PTEs. These
permissions are enforced on data access only and have no effect on
instruction fetches.
-=========================== Syscalls ===========================
+Syscalls
+========
-There are 3 system calls which directly interact with pkeys:
+There are 3 system calls which directly interact with pkeys::
int pkey_alloc(unsigned long flags, unsigned long init_access_rights)
int pkey_free(int pkey);
@@ -37,6 +44,7 @@ pkey_alloc(). An application calls the WRPKRU instruction
directly in order to change access permissions to memory covered
with a key. In this example WRPKRU is wrapped by a C function
called pkey_set().
+::
int real_prot = PROT_READ|PROT_WRITE;
pkey = pkey_alloc(0, PKEY_DISABLE_WRITE);
@@ -45,43 +53,44 @@ called pkey_set().
... application runs here
Now, if the application needs to update the data at 'ptr', it can
-gain access, do the update, then remove its write access:
+gain access, do the update, then remove its write access::
pkey_set(pkey, 0); // clear PKEY_DISABLE_WRITE
*ptr = foo; // assign something
pkey_set(pkey, PKEY_DISABLE_WRITE); // set PKEY_DISABLE_WRITE again
Now when it frees the memory, it will also free the pkey since it
-is no longer in use:
+is no longer in use::
munmap(ptr, PAGE_SIZE);
pkey_free(pkey);
-(Note: pkey_set() is a wrapper for the RDPKRU and WRPKRU instructions.
- An example implementation can be found in
- tools/testing/selftests/x86/protection_keys.c)
+.. note:: pkey_set() is a wrapper for the RDPKRU and WRPKRU instructions.
+ An example implementation can be found in
+ tools/testing/selftests/x86/protection_keys.c.
-=========================== Behavior ===========================
+Behavior
+========
The kernel attempts to make protection keys consistent with the
-behavior of a plain mprotect(). For instance if you do this:
+behavior of a plain mprotect(). For instance if you do this::
mprotect(ptr, size, PROT_NONE);
something(ptr);
-you can expect the same effects with protection keys when doing this:
+you can expect the same effects with protection keys when doing this::
pkey = pkey_alloc(0, PKEY_DISABLE_WRITE | PKEY_DISABLE_READ);
pkey_mprotect(ptr, size, PROT_READ|PROT_WRITE, pkey);
something(ptr);
That should be true whether something() is a direct access to 'ptr'
-like:
+like::
*ptr = foo;
or when the kernel does the access on the application's behalf like
-with a read():
+with a read()::
read(fd, ptr, 1);
diff --git a/Documentation/x86/pti.txt b/Documentation/x86/pti.rst
index 5cd58439ad2d..4b858a9bad8d 100644
--- a/Documentation/x86/pti.txt
+++ b/Documentation/x86/pti.rst
@@ -1,9 +1,15 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==========================
+Page Table Isolation (PTI)
+==========================
+
Overview
========
-Page Table Isolation (pti, previously known as KAISER[1]) is a
+Page Table Isolation (pti, previously known as KAISER [1]_) is a
countermeasure against attacks on the shared user/kernel address
-space such as the "Meltdown" approach[2].
+space such as the "Meltdown" approach [2]_.
To mitigate this class of attacks, we create an independent set of
page tables for use only when running userspace applications. When
@@ -60,6 +66,7 @@ Protection against side-channel attacks is important. But,
this protection comes at a cost:
1. Increased Memory Use
+
a. Each process now needs an order-1 PGD instead of order-0.
(Consumes an additional 4k per process).
b. The 'cpu_entry_area' structure must be 2MB in size and 2MB
@@ -68,6 +75,7 @@ this protection comes at a cost:
is decompressed, but no space in the kernel image itself.
2. Runtime Cost
+
a. CR3 manipulation to switch between the page table copies
must be done at interrupt, syscall, and exception entry
and exit (it can be skipped when the kernel is interrupted,
@@ -142,6 +150,7 @@ ideally doing all of these in parallel:
interrupted, including nested NMIs. Using "-c" boosts the rate of
NMIs, and using two -c with separate counters encourages nested NMIs
and less deterministic behavior.
+ ::
while true; do perf record -c 10000 -e instructions,cycles -a sleep 10; done
@@ -182,5 +191,5 @@ that are worth noting here.
tended to be TLB invalidation issues. Usually invalidating
the wrong PCID, or otherwise missing an invalidation.
-1. https://gruss.cc/files/kaiser.pdf
-2. https://meltdownattack.com/meltdown.pdf
+.. [1] https://gruss.cc/files/kaiser.pdf
+.. [2] https://meltdownattack.com/meltdown.pdf
diff --git a/Documentation/x86/resctrl_ui.txt b/Documentation/x86/resctrl_ui.rst
index c1f95b59e14d..225cfd4daaee 100644
--- a/Documentation/x86/resctrl_ui.txt
+++ b/Documentation/x86/resctrl_ui.rst
@@ -1,33 +1,44 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. include:: <isonum.txt>
+
+===========================================
User Interface for Resource Control feature
+===========================================
-Intel refers to this feature as Intel Resource Director Technology(Intel(R) RDT).
-AMD refers to this feature as AMD Platform Quality of Service(AMD QoS).
+:Copyright: |copy| 2016 Intel Corporation
+:Authors: - Fenghua Yu <fenghua.yu@intel.com>
+ - Tony Luck <tony.luck@intel.com>
+ - Vikas Shivappa <vikas.shivappa@intel.com>
-Copyright (C) 2016 Intel Corporation
-Fenghua Yu <fenghua.yu@intel.com>
-Tony Luck <tony.luck@intel.com>
-Vikas Shivappa <vikas.shivappa@intel.com>
+Intel refers to this feature as Intel Resource Director Technology(Intel(R) RDT).
+AMD refers to this feature as AMD Platform Quality of Service(AMD QoS).
This feature is enabled by the CONFIG_X86_CPU_RESCTRL and the x86 /proc/cpuinfo
flag bits:
-RDT (Resource Director Technology) Allocation - "rdt_a"
-CAT (Cache Allocation Technology) - "cat_l3", "cat_l2"
-CDP (Code and Data Prioritization ) - "cdp_l3", "cdp_l2"
-CQM (Cache QoS Monitoring) - "cqm_llc", "cqm_occup_llc"
-MBM (Memory Bandwidth Monitoring) - "cqm_mbm_total", "cqm_mbm_local"
-MBA (Memory Bandwidth Allocation) - "mba"
-To use the feature mount the file system:
+============================================= ================================
+RDT (Resource Director Technology) Allocation "rdt_a"
+CAT (Cache Allocation Technology) "cat_l3", "cat_l2"
+CDP (Code and Data Prioritization) "cdp_l3", "cdp_l2"
+CQM (Cache QoS Monitoring) "cqm_llc", "cqm_occup_llc"
+MBM (Memory Bandwidth Monitoring) "cqm_mbm_total", "cqm_mbm_local"
+MBA (Memory Bandwidth Allocation) "mba"
+============================================= ================================
+
+To use the feature mount the file system::
# mount -t resctrl resctrl [-o cdp[,cdpl2][,mba_MBps]] /sys/fs/resctrl
mount options are:
-"cdp": Enable code/data prioritization in L3 cache allocations.
-"cdpl2": Enable code/data prioritization in L2 cache allocations.
-"mba_MBps": Enable the MBA Software Controller(mba_sc) to specify MBA
- bandwidth in MBps
+"cdp":
+ Enable code/data prioritization in L3 cache allocations.
+"cdpl2":
+ Enable code/data prioritization in L2 cache allocations.
+"mba_MBps":
+ Enable the MBA Software Controller(mba_sc) to specify MBA
+ bandwidth in MBps
L2 and L3 CDP are controlled seperately.
@@ -44,7 +55,7 @@ For more details on the behavior of the interface during monitoring
and allocation, see the "Resource alloc and monitor groups" section.
Info directory
---------------
+==============
The 'info' directory contains information about the enabled
resources. Each resource has its own subdirectory. The subdirectory
@@ -56,77 +67,93 @@ allocation:
Cache resource(L3/L2) subdirectory contains the following files
related to allocation:
-"num_closids": The number of CLOSIDs which are valid for this
- resource. The kernel uses the smallest number of
- CLOSIDs of all enabled resources as limit.
-
-"cbm_mask": The bitmask which is valid for this resource.
- This mask is equivalent to 100%.
-
-"min_cbm_bits": The minimum number of consecutive bits which
- must be set when writing a mask.
-
-"shareable_bits": Bitmask of shareable resource with other executing
- entities (e.g. I/O). User can use this when
- setting up exclusive cache partitions. Note that
- some platforms support devices that have their
- own settings for cache use which can over-ride
- these bits.
-"bit_usage": Annotated capacity bitmasks showing how all
- instances of the resource are used. The legend is:
- "0" - Corresponding region is unused. When the system's
+"num_closids":
+ The number of CLOSIDs which are valid for this
+ resource. The kernel uses the smallest number of
+ CLOSIDs of all enabled resources as limit.
+"cbm_mask":
+ The bitmask which is valid for this resource.
+ This mask is equivalent to 100%.
+"min_cbm_bits":
+ The minimum number of consecutive bits which
+ must be set when writing a mask.
+
+"shareable_bits":
+ Bitmask of shareable resource with other executing
+ entities (e.g. I/O). User can use this when
+ setting up exclusive cache partitions. Note that
+ some platforms support devices that have their
+ own settings for cache use which can over-ride
+ these bits.
+"bit_usage":
+ Annotated capacity bitmasks showing how all
+ instances of the resource are used. The legend is:
+
+ "0":
+ Corresponding region is unused. When the system's
resources have been allocated and a "0" is found
in "bit_usage" it is a sign that resources are
wasted.
- "H" - Corresponding region is used by hardware only
+
+ "H":
+ Corresponding region is used by hardware only
but available for software use. If a resource
has bits set in "shareable_bits" but not all
of these bits appear in the resource groups'
schematas then the bits appearing in
"shareable_bits" but no resource group will
be marked as "H".
- "X" - Corresponding region is available for sharing and
+ "X":
+ Corresponding region is available for sharing and
used by hardware and software. These are the
bits that appear in "shareable_bits" as
well as a resource group's allocation.
- "S" - Corresponding region is used by software
+ "S":
+ Corresponding region is used by software
and available for sharing.
- "E" - Corresponding region is used exclusively by
+ "E":
+ Corresponding region is used exclusively by
one resource group. No sharing allowed.
- "P" - Corresponding region is pseudo-locked. No
+ "P":
+ Corresponding region is pseudo-locked. No
sharing allowed.
Memory bandwitdh(MB) subdirectory contains the following files
with respect to allocation:
-"min_bandwidth": The minimum memory bandwidth percentage which
- user can request.
+"min_bandwidth":
+ The minimum memory bandwidth percentage which
+ user can request.
-"bandwidth_gran": The granularity in which the memory bandwidth
- percentage is allocated. The allocated
- b/w percentage is rounded off to the next
- control step available on the hardware. The
- available bandwidth control steps are:
- min_bandwidth + N * bandwidth_gran.
+"bandwidth_gran":
+ The granularity in which the memory bandwidth
+ percentage is allocated. The allocated
+ b/w percentage is rounded off to the next
+ control step available on the hardware. The
+ available bandwidth control steps are:
+ min_bandwidth + N * bandwidth_gran.
-"delay_linear": Indicates if the delay scale is linear or
- non-linear. This field is purely informational
- only.
+"delay_linear":
+ Indicates if the delay scale is linear or
+ non-linear. This field is purely informational
+ only.
If RDT monitoring is available there will be an "L3_MON" directory
with the following files:
-"num_rmids": The number of RMIDs available. This is the
- upper bound for how many "CTRL_MON" + "MON"
- groups can be created.
+"num_rmids":
+ The number of RMIDs available. This is the
+ upper bound for how many "CTRL_MON" + "MON"
+ groups can be created.
-"mon_features": Lists the monitoring events if
- monitoring is enabled for the resource.
+"mon_features":
+ Lists the monitoring events if
+ monitoring is enabled for the resource.
"max_threshold_occupancy":
- Read/write file provides the largest value (in
- bytes) at which a previously used LLC_occupancy
- counter can be considered for re-use.
+ Read/write file provides the largest value (in
+ bytes) at which a previously used LLC_occupancy
+ counter can be considered for re-use.
Finally, in the top level of the "info" directory there is a file
named "last_cmd_status". This is reset with every "command" issued
@@ -134,6 +161,7 @@ via the file system (making new directories or writing to any of the
control files). If the command was successful, it will read as "ok".
If the command failed, it will provide more information that can be
conveyed in the error returns from file operations. E.g.
+::
# echo L3:0=f7 > schemata
bash: echo: write error: Invalid argument
@@ -141,7 +169,7 @@ conveyed in the error returns from file operations. E.g.
mask f7 has non-consecutive 1-bits
Resource alloc and monitor groups
----------------------------------
+=================================
Resource groups are represented as directories in the resctrl file
system. The default group is the root directory which, immediately
@@ -226,6 +254,7 @@ When monitoring is enabled all MON groups will also contain:
Resource allocation rules
-------------------------
+
When a task is running the following rules define which resources are
available to it:
@@ -252,7 +281,7 @@ Resource monitoring rules
Notes on cache occupancy monitoring and control
------------------------------------------------
+===============================================
When moving a task from one group to another you should remember that
this only affects *new* cache allocations by the task. E.g. you may have
a task in a monitor group showing 3 MB of cache occupancy. If you move
@@ -321,7 +350,7 @@ of the capacity of the cache. You could partition the cache into four
equal parts with masks: 0x1f, 0x3e0, 0x7c00, 0xf8000.
Memory bandwidth Allocation and monitoring
-------------------------------------------
+==========================================
For Memory bandwidth resource, by default the user controls the resource
by indicating the percentage of total memory bandwidth.
@@ -369,7 +398,7 @@ In order to mitigate this and make the interface more user friendly,
resctrl added support for specifying the bandwidth in MBps as well. The
kernel underneath would use a software feedback mechanism or a "Software
Controller(mba_sc)" which reads the actual bandwidth using MBM counters
-and adjust the memowy bandwidth percentages to ensure
+and adjust the memowy bandwidth percentages to ensure::
"actual bandwidth < user specified bandwidth".
@@ -380,14 +409,14 @@ sections.
L3 schemata file details (code and data prioritization disabled)
----------------------------------------------------------------
-With CDP disabled the L3 schemata format is:
+With CDP disabled the L3 schemata format is::
L3:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
L3 schemata file details (CDP enabled via mount option to resctrl)
------------------------------------------------------------------
When CDP is enabled L3 control is split into two separate resources
-so you can specify independent masks for code and data like this:
+so you can specify independent masks for code and data like this::
L3data:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
L3code:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
@@ -395,7 +424,7 @@ so you can specify independent masks for code and data like this:
L2 schemata file details
------------------------
L2 cache does not support code and data prioritization, so the
-schemata format is always:
+schemata format is always::
L2:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
@@ -403,6 +432,7 @@ Memory bandwidth Allocation (default mode)
------------------------------------------
Memory b/w domain is L3 cache.
+::
MB:<cache_id0>=bandwidth0;<cache_id1>=bandwidth1;...
@@ -410,6 +440,7 @@ Memory bandwidth Allocation specified in MBps
---------------------------------------------
Memory bandwidth domain is L3 cache.
+::
MB:<cache_id0>=bw_MBps0;<cache_id1>=bw_MBps1;...
@@ -418,17 +449,18 @@ Reading/writing the schemata file
Reading the schemata file will show the state of all resources
on all domains. When writing you only need to specify those values
which you wish to change. E.g.
+::
-# cat schemata
-L3DATA:0=fffff;1=fffff;2=fffff;3=fffff
-L3CODE:0=fffff;1=fffff;2=fffff;3=fffff
-# echo "L3DATA:2=3c0;" > schemata
-# cat schemata
-L3DATA:0=fffff;1=fffff;2=3c0;3=fffff
-L3CODE:0=fffff;1=fffff;2=fffff;3=fffff
+ # cat schemata
+ L3DATA:0=fffff;1=fffff;2=fffff;3=fffff
+ L3CODE:0=fffff;1=fffff;2=fffff;3=fffff
+ # echo "L3DATA:2=3c0;" > schemata
+ # cat schemata
+ L3DATA:0=fffff;1=fffff;2=3c0;3=fffff
+ L3CODE:0=fffff;1=fffff;2=fffff;3=fffff
Cache Pseudo-Locking
---------------------
+====================
CAT enables a user to specify the amount of cache space that an
application can fill. Cache pseudo-locking builds on the fact that a
CPU can still read and write data pre-allocated outside its current
@@ -442,6 +474,7 @@ a region of memory with reduced average read latency.
The creation of a cache pseudo-locked region is triggered by a request
from the user to do so that is accompanied by a schemata of the region
to be pseudo-locked. The cache pseudo-locked region is created as follows:
+
- Create a CAT allocation CLOSNEW with a CBM matching the schemata
from the user of the cache region that will contain the pseudo-locked
memory. This region must not overlap with any current CAT allocation/CLOS
@@ -480,6 +513,7 @@ initial mmap() handling, there is no enforcement afterwards and the
application self needs to ensure it remains affine to the correct cores.
Pseudo-locking is accomplished in two stages:
+
1) During the first stage the system administrator allocates a portion
of cache that should be dedicated to pseudo-locking. At this time an
equivalent portion of memory is allocated, loaded into allocated
@@ -506,7 +540,7 @@ by user space in order to obtain access to the pseudo-locked memory region.
An example of cache pseudo-locked region creation and usage can be found below.
Cache Pseudo-Locking Debugging Interface
----------------------------------------
+----------------------------------------
The pseudo-locking debugging interface is enabled by default (if
CONFIG_DEBUG_FS is enabled) and can be found in /sys/kernel/debug/resctrl.
@@ -514,6 +548,7 @@ There is no explicit way for the kernel to test if a provided memory
location is present in the cache. The pseudo-locking debugging interface uses
the tracing infrastructure to provide two ways to measure cache residency of
the pseudo-locked region:
+
1) Memory access latency using the pseudo_lock_mem_latency tracepoint. Data
from these measurements are best visualized using a hist trigger (see
example below). In this test the pseudo-locked region is traversed at
@@ -529,87 +564,97 @@ it in debugfs as /sys/kernel/debug/resctrl/<newdir>. A single
write-only file, pseudo_lock_measure, is present in this directory. The
measurement of the pseudo-locked region depends on the number written to this
debugfs file:
-1 - writing "1" to the pseudo_lock_measure file will trigger the latency
+
+1:
+ writing "1" to the pseudo_lock_measure file will trigger the latency
measurement captured in the pseudo_lock_mem_latency tracepoint. See
example below.
-2 - writing "2" to the pseudo_lock_measure file will trigger the L2 cache
+2:
+ writing "2" to the pseudo_lock_measure file will trigger the L2 cache
residency (cache hits and misses) measurement captured in the
pseudo_lock_l2 tracepoint. See example below.
-3 - writing "3" to the pseudo_lock_measure file will trigger the L3 cache
+3:
+ writing "3" to the pseudo_lock_measure file will trigger the L3 cache
residency (cache hits and misses) measurement captured in the
pseudo_lock_l3 tracepoint.
All measurements are recorded with the tracing infrastructure. This requires
the relevant tracepoints to be enabled before the measurement is triggered.
-Example of latency debugging interface:
+Example of latency debugging interface
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In this example a pseudo-locked region named "newlock" was created. Here is
how we can measure the latency in cycles of reading from this region and
visualize this data with a histogram that is available if CONFIG_HIST_TRIGGERS
-is set:
-# :> /sys/kernel/debug/tracing/trace
-# echo 'hist:keys=latency' > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/trigger
-# echo 1 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/enable
-# echo 1 > /sys/kernel/debug/resctrl/newlock/pseudo_lock_measure
-# echo 0 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/enable
-# cat /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/hist
-
-# event histogram
-#
-# trigger info: hist:keys=latency:vals=hitcount:sort=hitcount:size=2048 [active]
-#
-
-{ latency: 456 } hitcount: 1
-{ latency: 50 } hitcount: 83
-{ latency: 36 } hitcount: 96
-{ latency: 44 } hitcount: 174
-{ latency: 48 } hitcount: 195
-{ latency: 46 } hitcount: 262
-{ latency: 42 } hitcount: 693
-{ latency: 40 } hitcount: 3204
-{ latency: 38 } hitcount: 3484
-
-Totals:
- Hits: 8192
- Entries: 9
- Dropped: 0
-
-Example of cache hits/misses debugging:
+is set::
+
+ # :> /sys/kernel/debug/tracing/trace
+ # echo 'hist:keys=latency' > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/trigger
+ # echo 1 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/enable
+ # echo 1 > /sys/kernel/debug/resctrl/newlock/pseudo_lock_measure
+ # echo 0 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/enable
+ # cat /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_mem_latency/hist
+
+ # event histogram
+ #
+ # trigger info: hist:keys=latency:vals=hitcount:sort=hitcount:size=2048 [active]
+ #
+
+ { latency: 456 } hitcount: 1
+ { latency: 50 } hitcount: 83
+ { latency: 36 } hitcount: 96
+ { latency: 44 } hitcount: 174
+ { latency: 48 } hitcount: 195
+ { latency: 46 } hitcount: 262
+ { latency: 42 } hitcount: 693
+ { latency: 40 } hitcount: 3204
+ { latency: 38 } hitcount: 3484
+
+ Totals:
+ Hits: 8192
+ Entries: 9
+ Dropped: 0
+
+Example of cache hits/misses debugging
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In this example a pseudo-locked region named "newlock" was created on the L2
cache of a platform. Here is how we can obtain details of the cache hits
and misses using the platform's precision counters.
+::
-# :> /sys/kernel/debug/tracing/trace
-# echo 1 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_l2/enable
-# echo 2 > /sys/kernel/debug/resctrl/newlock/pseudo_lock_measure
-# echo 0 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_l2/enable
-# cat /sys/kernel/debug/tracing/trace
+ # :> /sys/kernel/debug/tracing/trace
+ # echo 1 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_l2/enable
+ # echo 2 > /sys/kernel/debug/resctrl/newlock/pseudo_lock_measure
+ # echo 0 > /sys/kernel/debug/tracing/events/resctrl/pseudo_lock_l2/enable
+ # cat /sys/kernel/debug/tracing/trace
-# tracer: nop
-#
-# _-----=> irqs-off
-# / _----=> need-resched
-# | / _---=> hardirq/softirq
-# || / _--=> preempt-depth
-# ||| / delay
-# TASK-PID CPU# |||| TIMESTAMP FUNCTION
-# | | | |||| | |
- pseudo_lock_mea-1672 [002] .... 3132.860500: pseudo_lock_l2: hits=4097 miss=0
+ # tracer: nop
+ #
+ # _-----=> irqs-off
+ # / _----=> need-resched
+ # | / _---=> hardirq/softirq
+ # || / _--=> preempt-depth
+ # ||| / delay
+ # TASK-PID CPU# |||| TIMESTAMP FUNCTION
+ # | | | |||| | |
+ pseudo_lock_mea-1672 [002] .... 3132.860500: pseudo_lock_l2: hits=4097 miss=0
-Examples for RDT allocation usage:
+Examples for RDT allocation usage
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+1) Example 1
-Example 1
----------
On a two socket machine (one L3 cache per socket) with just four bits
for cache bit masks, minimum b/w of 10% with a memory bandwidth
-granularity of 10%
+granularity of 10%.
+::
-# mount -t resctrl resctrl /sys/fs/resctrl
-# cd /sys/fs/resctrl
-# mkdir p0 p1
-# echo "L3:0=3;1=c\nMB:0=50;1=50" > /sys/fs/resctrl/p0/schemata
-# echo "L3:0=3;1=3\nMB:0=50;1=50" > /sys/fs/resctrl/p1/schemata
+ # mount -t resctrl resctrl /sys/fs/resctrl
+ # cd /sys/fs/resctrl
+ # mkdir p0 p1
+ # echo "L3:0=3;1=c\nMB:0=50;1=50" > /sys/fs/resctrl/p0/schemata
+ # echo "L3:0=3;1=3\nMB:0=50;1=50" > /sys/fs/resctrl/p1/schemata
The default resource group is unmodified, so we have access to all parts
of all caches (its schemata file reads "L3:0=f;1=f").
@@ -628,100 +673,106 @@ the b/w accordingly.
If the MBA is specified in MB(megabytes) then user can enter the max b/w in MB
rather than the percentage values.
+::
-# echo "L3:0=3;1=c\nMB:0=1024;1=500" > /sys/fs/resctrl/p0/schemata
-# echo "L3:0=3;1=3\nMB:0=1024;1=500" > /sys/fs/resctrl/p1/schemata
+ # echo "L3:0=3;1=c\nMB:0=1024;1=500" > /sys/fs/resctrl/p0/schemata
+ # echo "L3:0=3;1=3\nMB:0=1024;1=500" > /sys/fs/resctrl/p1/schemata
In the above example the tasks in "p1" and "p0" on socket 0 would use a max b/w
of 1024MB where as on socket 1 they would use 500MB.
-Example 2
----------
+2) Example 2
+
Again two sockets, but this time with a more realistic 20-bit mask.
Two real time tasks pid=1234 running on processor 0 and pid=5678 running on
processor 1 on socket 0 on a 2-socket and dual core machine. To avoid noisy
neighbors, each of the two real-time tasks exclusively occupies one quarter
of L3 cache on socket 0.
+::
-# mount -t resctrl resctrl /sys/fs/resctrl
-# cd /sys/fs/resctrl
+ # mount -t resctrl resctrl /sys/fs/resctrl
+ # cd /sys/fs/resctrl
First we reset the schemata for the default group so that the "upper"
50% of the L3 cache on socket 0 and 50% of memory b/w cannot be used by
-ordinary tasks:
+ordinary tasks::
-# echo "L3:0=3ff;1=fffff\nMB:0=50;1=100" > schemata
+ # echo "L3:0=3ff;1=fffff\nMB:0=50;1=100" > schemata
Next we make a resource group for our first real time task and give
it access to the "top" 25% of the cache on socket 0.
+::
-# mkdir p0
-# echo "L3:0=f8000;1=fffff" > p0/schemata
+ # mkdir p0
+ # echo "L3:0=f8000;1=fffff" > p0/schemata
Finally we move our first real time task into this resource group. We
also use taskset(1) to ensure the task always runs on a dedicated CPU
on socket 0. Most uses of resource groups will also constrain which
processors tasks run on.
+::
-# echo 1234 > p0/tasks
-# taskset -cp 1 1234
+ # echo 1234 > p0/tasks
+ # taskset -cp 1 1234
-Ditto for the second real time task (with the remaining 25% of cache):
+Ditto for the second real time task (with the remaining 25% of cache)::
-# mkdir p1
-# echo "L3:0=7c00;1=fffff" > p1/schemata
-# echo 5678 > p1/tasks
-# taskset -cp 2 5678
+ # mkdir p1
+ # echo "L3:0=7c00;1=fffff" > p1/schemata
+ # echo 5678 > p1/tasks
+ # taskset -cp 2 5678
For the same 2 socket system with memory b/w resource and CAT L3 the
schemata would look like(Assume min_bandwidth 10 and bandwidth_gran is
10):
-For our first real time task this would request 20% memory b/w on socket
-0.
+For our first real time task this would request 20% memory b/w on socket 0.
+::
-# echo -e "L3:0=f8000;1=fffff\nMB:0=20;1=100" > p0/schemata
+ # echo -e "L3:0=f8000;1=fffff\nMB:0=20;1=100" > p0/schemata
For our second real time task this would request an other 20% memory b/w
on socket 0.
+::
-# echo -e "L3:0=f8000;1=fffff\nMB:0=20;1=100" > p0/schemata
+ # echo -e "L3:0=f8000;1=fffff\nMB:0=20;1=100" > p0/schemata
-Example 3
----------
+3) Example 3
A single socket system which has real-time tasks running on core 4-7 and
non real-time workload assigned to core 0-3. The real-time tasks share text
and data, so a per task association is not required and due to interaction
with the kernel it's desired that the kernel on these cores shares L3 with
the tasks.
+::
-# mount -t resctrl resctrl /sys/fs/resctrl
-# cd /sys/fs/resctrl
+ # mount -t resctrl resctrl /sys/fs/resctrl
+ # cd /sys/fs/resctrl
First we reset the schemata for the default group so that the "upper"
50% of the L3 cache on socket 0, and 50% of memory bandwidth on socket 0
-cannot be used by ordinary tasks:
+cannot be used by ordinary tasks::
-# echo "L3:0=3ff\nMB:0=50" > schemata
+ # echo "L3:0=3ff\nMB:0=50" > schemata
Next we make a resource group for our real time cores and give it access
to the "top" 50% of the cache on socket 0 and 50% of memory bandwidth on
socket 0.
+::
-# mkdir p0
-# echo "L3:0=ffc00\nMB:0=50" > p0/schemata
+ # mkdir p0
+ # echo "L3:0=ffc00\nMB:0=50" > p0/schemata
Finally we move core 4-7 over to the new group and make sure that the
kernel and the tasks running there get 50% of the cache. They should
also get 50% of memory bandwidth assuming that the cores 4-7 are SMT
siblings and only the real time threads are scheduled on the cores 4-7.
+::
-# echo F0 > p0/cpus
+ # echo F0 > p0/cpus
-Example 4
----------
+4) Example 4
The resource groups in previous examples were all in the default "shareable"
mode allowing sharing of their cache allocations. If one resource group
@@ -732,157 +783,168 @@ In this example a new exclusive resource group will be created on a L2 CAT
system with two L2 cache instances that can be configured with an 8-bit
capacity bitmask. The new exclusive resource group will be configured to use
25% of each cache instance.
+::
-# mount -t resctrl resctrl /sys/fs/resctrl/
-# cd /sys/fs/resctrl
+ # mount -t resctrl resctrl /sys/fs/resctrl/
+ # cd /sys/fs/resctrl
First, we observe that the default group is configured to allocate to all L2
-cache:
+cache::
-# cat schemata
-L2:0=ff;1=ff
+ # cat schemata
+ L2:0=ff;1=ff
We could attempt to create the new resource group at this point, but it will
-fail because of the overlap with the schemata of the default group:
-# mkdir p0
-# echo 'L2:0=0x3;1=0x3' > p0/schemata
-# cat p0/mode
-shareable
-# echo exclusive > p0/mode
--sh: echo: write error: Invalid argument
-# cat info/last_cmd_status
-schemata overlaps
+fail because of the overlap with the schemata of the default group::
+
+ # mkdir p0
+ # echo 'L2:0=0x3;1=0x3' > p0/schemata
+ # cat p0/mode
+ shareable
+ # echo exclusive > p0/mode
+ -sh: echo: write error: Invalid argument
+ # cat info/last_cmd_status
+ schemata overlaps
To ensure that there is no overlap with another resource group the default
resource group's schemata has to change, making it possible for the new
resource group to become exclusive.
-# echo 'L2:0=0xfc;1=0xfc' > schemata
-# echo exclusive > p0/mode
-# grep . p0/*
-p0/cpus:0
-p0/mode:exclusive
-p0/schemata:L2:0=03;1=03
-p0/size:L2:0=262144;1=262144
+::
+
+ # echo 'L2:0=0xfc;1=0xfc' > schemata
+ # echo exclusive > p0/mode
+ # grep . p0/*
+ p0/cpus:0
+ p0/mode:exclusive
+ p0/schemata:L2:0=03;1=03
+ p0/size:L2:0=262144;1=262144
A new resource group will on creation not overlap with an exclusive resource
-group:
-# mkdir p1
-# grep . p1/*
-p1/cpus:0
-p1/mode:shareable
-p1/schemata:L2:0=fc;1=fc
-p1/size:L2:0=786432;1=786432
-
-The bit_usage will reflect how the cache is used:
-# cat info/L2/bit_usage
-0=SSSSSSEE;1=SSSSSSEE
-
-A resource group cannot be forced to overlap with an exclusive resource group:
-# echo 'L2:0=0x1;1=0x1' > p1/schemata
--sh: echo: write error: Invalid argument
-# cat info/last_cmd_status
-overlaps with exclusive group
+group::
+
+ # mkdir p1
+ # grep . p1/*
+ p1/cpus:0
+ p1/mode:shareable
+ p1/schemata:L2:0=fc;1=fc
+ p1/size:L2:0=786432;1=786432
+
+The bit_usage will reflect how the cache is used::
+
+ # cat info/L2/bit_usage
+ 0=SSSSSSEE;1=SSSSSSEE
+
+A resource group cannot be forced to overlap with an exclusive resource group::
+
+ # echo 'L2:0=0x1;1=0x1' > p1/schemata
+ -sh: echo: write error: Invalid argument
+ # cat info/last_cmd_status
+ overlaps with exclusive group
Example of Cache Pseudo-Locking
--------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lock portion of L2 cache from cache id 1 using CBM 0x3. Pseudo-locked
region is exposed at /dev/pseudo_lock/newlock that can be provided to
application for argument to mmap().
+::
-# mount -t resctrl resctrl /sys/fs/resctrl/
-# cd /sys/fs/resctrl
+ # mount -t resctrl resctrl /sys/fs/resctrl/
+ # cd /sys/fs/resctrl
Ensure that there are bits available that can be pseudo-locked, since only
unused bits can be pseudo-locked the bits to be pseudo-locked needs to be
-removed from the default resource group's schemata:
-# cat info/L2/bit_usage
-0=SSSSSSSS;1=SSSSSSSS
-# echo 'L2:1=0xfc' > schemata
-# cat info/L2/bit_usage
-0=SSSSSSSS;1=SSSSSS00
+removed from the default resource group's schemata::
+
+ # cat info/L2/bit_usage
+ 0=SSSSSSSS;1=SSSSSSSS
+ # echo 'L2:1=0xfc' > schemata
+ # cat info/L2/bit_usage
+ 0=SSSSSSSS;1=SSSSSS00
Create a new resource group that will be associated with the pseudo-locked
region, indicate that it will be used for a pseudo-locked region, and
-configure the requested pseudo-locked region capacity bitmask:
+configure the requested pseudo-locked region capacity bitmask::
-# mkdir newlock
-# echo pseudo-locksetup > newlock/mode
-# echo 'L2:1=0x3' > newlock/schemata
+ # mkdir newlock
+ # echo pseudo-locksetup > newlock/mode
+ # echo 'L2:1=0x3' > newlock/schemata
On success the resource group's mode will change to pseudo-locked, the
bit_usage will reflect the pseudo-locked region, and the character device
-exposing the pseudo-locked region will exist:
-
-# cat newlock/mode
-pseudo-locked
-# cat info/L2/bit_usage
-0=SSSSSSSS;1=SSSSSSPP
-# ls -l /dev/pseudo_lock/newlock
-crw------- 1 root root 243, 0 Apr 3 05:01 /dev/pseudo_lock/newlock
-
-/*
- * Example code to access one page of pseudo-locked cache region
- * from user space.
- */
-#define _GNU_SOURCE
-#include <fcntl.h>
-#include <sched.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <sys/mman.h>
-
-/*
- * It is required that the application runs with affinity to only
- * cores associated with the pseudo-locked region. Here the cpu
- * is hardcoded for convenience of example.
- */
-static int cpuid = 2;
-
-int main(int argc, char *argv[])
-{
- cpu_set_t cpuset;
- long page_size;
- void *mapping;
- int dev_fd;
- int ret;
-
- page_size = sysconf(_SC_PAGESIZE);
-
- CPU_ZERO(&cpuset);
- CPU_SET(cpuid, &cpuset);
- ret = sched_setaffinity(0, sizeof(cpuset), &cpuset);
- if (ret < 0) {
- perror("sched_setaffinity");
- exit(EXIT_FAILURE);
- }
-
- dev_fd = open("/dev/pseudo_lock/newlock", O_RDWR);
- if (dev_fd < 0) {
- perror("open");
- exit(EXIT_FAILURE);
- }
-
- mapping = mmap(0, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
- dev_fd, 0);
- if (mapping == MAP_FAILED) {
- perror("mmap");
- close(dev_fd);
- exit(EXIT_FAILURE);
- }
-
- /* Application interacts with pseudo-locked memory @mapping */
-
- ret = munmap(mapping, page_size);
- if (ret < 0) {
- perror("munmap");
- close(dev_fd);
- exit(EXIT_FAILURE);
- }
-
- close(dev_fd);
- exit(EXIT_SUCCESS);
-}
+exposing the pseudo-locked region will exist::
+
+ # cat newlock/mode
+ pseudo-locked
+ # cat info/L2/bit_usage
+ 0=SSSSSSSS;1=SSSSSSPP
+ # ls -l /dev/pseudo_lock/newlock
+ crw------- 1 root root 243, 0 Apr 3 05:01 /dev/pseudo_lock/newlock
+
+::
+
+ /*
+ * Example code to access one page of pseudo-locked cache region
+ * from user space.
+ */
+ #define _GNU_SOURCE
+ #include <fcntl.h>
+ #include <sched.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <unistd.h>
+ #include <sys/mman.h>
+
+ /*
+ * It is required that the application runs with affinity to only
+ * cores associated with the pseudo-locked region. Here the cpu
+ * is hardcoded for convenience of example.
+ */
+ static int cpuid = 2;
+
+ int main(int argc, char *argv[])
+ {
+ cpu_set_t cpuset;
+ long page_size;
+ void *mapping;
+ int dev_fd;
+ int ret;
+
+ page_size = sysconf(_SC_PAGESIZE);
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpuid, &cpuset);
+ ret = sched_setaffinity(0, sizeof(cpuset), &cpuset);
+ if (ret < 0) {
+ perror("sched_setaffinity");
+ exit(EXIT_FAILURE);
+ }
+
+ dev_fd = open("/dev/pseudo_lock/newlock", O_RDWR);
+ if (dev_fd < 0) {
+ perror("open");
+ exit(EXIT_FAILURE);
+ }
+
+ mapping = mmap(0, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ dev_fd, 0);
+ if (mapping == MAP_FAILED) {
+ perror("mmap");
+ close(dev_fd);
+ exit(EXIT_FAILURE);
+ }
+
+ /* Application interacts with pseudo-locked memory @mapping */
+
+ ret = munmap(mapping, page_size);
+ if (ret < 0) {
+ perror("munmap");
+ close(dev_fd);
+ exit(EXIT_FAILURE);
+ }
+
+ close(dev_fd);
+ exit(EXIT_SUCCESS);
+ }
Locking between applications
----------------------------
@@ -921,86 +983,86 @@ Read lock:
B) If success read the directory structure.
C) funlock
-Example with bash:
-
-# Atomically read directory structure
-$ flock -s /sys/fs/resctrl/ find /sys/fs/resctrl
-
-# Read directory contents and create new subdirectory
-
-$ cat create-dir.sh
-find /sys/fs/resctrl/ > output.txt
-mask = function-of(output.txt)
-mkdir /sys/fs/resctrl/newres/
-echo mask > /sys/fs/resctrl/newres/schemata
-
-$ flock /sys/fs/resctrl/ ./create-dir.sh
-
-Example with C:
-
-/*
- * Example code do take advisory locks
- * before accessing resctrl filesystem
- */
-#include <sys/file.h>
-#include <stdlib.h>
-
-void resctrl_take_shared_lock(int fd)
-{
- int ret;
-
- /* take shared lock on resctrl filesystem */
- ret = flock(fd, LOCK_SH);
- if (ret) {
- perror("flock");
- exit(-1);
- }
-}
-
-void resctrl_take_exclusive_lock(int fd)
-{
- int ret;
-
- /* release lock on resctrl filesystem */
- ret = flock(fd, LOCK_EX);
- if (ret) {
- perror("flock");
- exit(-1);
- }
-}
-
-void resctrl_release_lock(int fd)
-{
- int ret;
-
- /* take shared lock on resctrl filesystem */
- ret = flock(fd, LOCK_UN);
- if (ret) {
- perror("flock");
- exit(-1);
- }
-}
-
-void main(void)
-{
- int fd, ret;
-
- fd = open("/sys/fs/resctrl", O_DIRECTORY);
- if (fd == -1) {
- perror("open");
- exit(-1);
- }
- resctrl_take_shared_lock(fd);
- /* code to read directory contents */
- resctrl_release_lock(fd);
-
- resctrl_take_exclusive_lock(fd);
- /* code to read and write directory contents */
- resctrl_release_lock(fd);
-}
-
-Examples for RDT Monitoring along with allocation usage:
-
+Example with bash::
+
+ # Atomically read directory structure
+ $ flock -s /sys/fs/resctrl/ find /sys/fs/resctrl
+
+ # Read directory contents and create new subdirectory
+
+ $ cat create-dir.sh
+ find /sys/fs/resctrl/ > output.txt
+ mask = function-of(output.txt)
+ mkdir /sys/fs/resctrl/newres/
+ echo mask > /sys/fs/resctrl/newres/schemata
+
+ $ flock /sys/fs/resctrl/ ./create-dir.sh
+
+Example with C::
+
+ /*
+ * Example code do take advisory locks
+ * before accessing resctrl filesystem
+ */
+ #include <sys/file.h>
+ #include <stdlib.h>
+
+ void resctrl_take_shared_lock(int fd)
+ {
+ int ret;
+
+ /* take shared lock on resctrl filesystem */
+ ret = flock(fd, LOCK_SH);
+ if (ret) {
+ perror("flock");
+ exit(-1);
+ }
+ }
+
+ void resctrl_take_exclusive_lock(int fd)
+ {
+ int ret;
+
+ /* release lock on resctrl filesystem */
+ ret = flock(fd, LOCK_EX);
+ if (ret) {
+ perror("flock");
+ exit(-1);
+ }
+ }
+
+ void resctrl_release_lock(int fd)
+ {
+ int ret;
+
+ /* take shared lock on resctrl filesystem */
+ ret = flock(fd, LOCK_UN);
+ if (ret) {
+ perror("flock");
+ exit(-1);
+ }
+ }
+
+ void main(void)
+ {
+ int fd, ret;
+
+ fd = open("/sys/fs/resctrl", O_DIRECTORY);
+ if (fd == -1) {
+ perror("open");
+ exit(-1);
+ }
+ resctrl_take_shared_lock(fd);
+ /* code to read directory contents */
+ resctrl_release_lock(fd);
+
+ resctrl_take_exclusive_lock(fd);
+ /* code to read and write directory contents */
+ resctrl_release_lock(fd);
+ }
+
+Examples for RDT Monitoring along with allocation usage
+=======================================================
Reading monitored data
----------------------
Reading an event file (for ex: mon_data/mon_L3_00/llc_occupancy) would
@@ -1009,17 +1071,17 @@ group or CTRL_MON group.
Example 1 (Monitor CTRL_MON group and subset of tasks in CTRL_MON group)
----------
+------------------------------------------------------------------------
On a two socket machine (one L3 cache per socket) with just four bits
-for cache bit masks
+for cache bit masks::
-# mount -t resctrl resctrl /sys/fs/resctrl
-# cd /sys/fs/resctrl
-# mkdir p0 p1
-# echo "L3:0=3;1=c" > /sys/fs/resctrl/p0/schemata
-# echo "L3:0=3;1=3" > /sys/fs/resctrl/p1/schemata
-# echo 5678 > p1/tasks
-# echo 5679 > p1/tasks
+ # mount -t resctrl resctrl /sys/fs/resctrl
+ # cd /sys/fs/resctrl
+ # mkdir p0 p1
+ # echo "L3:0=3;1=c" > /sys/fs/resctrl/p0/schemata
+ # echo "L3:0=3;1=3" > /sys/fs/resctrl/p1/schemata
+ # echo 5678 > p1/tasks
+ # echo 5679 > p1/tasks
The default resource group is unmodified, so we have access to all parts
of all caches (its schemata file reads "L3:0=f;1=f").
@@ -1029,47 +1091,51 @@ Tasks that are under the control of group "p0" may only allocate from the
Tasks in group "p1" use the "lower" 50% of cache on both sockets.
Create monitor groups and assign a subset of tasks to each monitor group.
+::
-# cd /sys/fs/resctrl/p1/mon_groups
-# mkdir m11 m12
-# echo 5678 > m11/tasks
-# echo 5679 > m12/tasks
+ # cd /sys/fs/resctrl/p1/mon_groups
+ # mkdir m11 m12
+ # echo 5678 > m11/tasks
+ # echo 5679 > m12/tasks
fetch data (data shown in bytes)
+::
-# cat m11/mon_data/mon_L3_00/llc_occupancy
-16234000
-# cat m11/mon_data/mon_L3_01/llc_occupancy
-14789000
-# cat m12/mon_data/mon_L3_00/llc_occupancy
-16789000
+ # cat m11/mon_data/mon_L3_00/llc_occupancy
+ 16234000
+ # cat m11/mon_data/mon_L3_01/llc_occupancy
+ 14789000
+ # cat m12/mon_data/mon_L3_00/llc_occupancy
+ 16789000
The parent ctrl_mon group shows the aggregated data.
+::
-# cat /sys/fs/resctrl/p1/mon_data/mon_l3_00/llc_occupancy
-31234000
+ # cat /sys/fs/resctrl/p1/mon_data/mon_l3_00/llc_occupancy
+ 31234000
Example 2 (Monitor a task from its creation)
----------
-On a two socket machine (one L3 cache per socket)
+--------------------------------------------
+On a two socket machine (one L3 cache per socket)::
-# mount -t resctrl resctrl /sys/fs/resctrl
-# cd /sys/fs/resctrl
-# mkdir p0 p1
+ # mount -t resctrl resctrl /sys/fs/resctrl
+ # cd /sys/fs/resctrl
+ # mkdir p0 p1
An RMID is allocated to the group once its created and hence the <cmd>
below is monitored from its creation.
+::
-# echo $$ > /sys/fs/resctrl/p1/tasks
-# <cmd>
+ # echo $$ > /sys/fs/resctrl/p1/tasks
+ # <cmd>
-Fetch the data
+Fetch the data::
-# cat /sys/fs/resctrl/p1/mon_data/mon_l3_00/llc_occupancy
-31789000
+ # cat /sys/fs/resctrl/p1/mon_data/mon_l3_00/llc_occupancy
+ 31789000
Example 3 (Monitor without CAT support or before creating CAT groups)
----------
+---------------------------------------------------------------------
Assume a system like HSW has only CQM and no CAT support. In this case
the resctrl will still mount but cannot create CTRL_MON directories.
@@ -1078,27 +1144,29 @@ able to monitor all tasks including kernel threads.
This can also be used to profile jobs cache size footprint before being
able to allocate them to different allocation groups.
+::
-# mount -t resctrl resctrl /sys/fs/resctrl
-# cd /sys/fs/resctrl
-# mkdir mon_groups/m01
-# mkdir mon_groups/m02
+ # mount -t resctrl resctrl /sys/fs/resctrl
+ # cd /sys/fs/resctrl
+ # mkdir mon_groups/m01
+ # mkdir mon_groups/m02
-# echo 3478 > /sys/fs/resctrl/mon_groups/m01/tasks
-# echo 2467 > /sys/fs/resctrl/mon_groups/m02/tasks
+ # echo 3478 > /sys/fs/resctrl/mon_groups/m01/tasks
+ # echo 2467 > /sys/fs/resctrl/mon_groups/m02/tasks
Monitor the groups separately and also get per domain data. From the
below its apparent that the tasks are mostly doing work on
domain(socket) 0.
+::
-# cat /sys/fs/resctrl/mon_groups/m01/mon_L3_00/llc_occupancy
-31234000
-# cat /sys/fs/resctrl/mon_groups/m01/mon_L3_01/llc_occupancy
-34555
-# cat /sys/fs/resctrl/mon_groups/m02/mon_L3_00/llc_occupancy
-31234000
-# cat /sys/fs/resctrl/mon_groups/m02/mon_L3_01/llc_occupancy
-32789
+ # cat /sys/fs/resctrl/mon_groups/m01/mon_L3_00/llc_occupancy
+ 31234000
+ # cat /sys/fs/resctrl/mon_groups/m01/mon_L3_01/llc_occupancy
+ 34555
+ # cat /sys/fs/resctrl/mon_groups/m02/mon_L3_00/llc_occupancy
+ 31234000
+ # cat /sys/fs/resctrl/mon_groups/m02/mon_L3_01/llc_occupancy
+ 32789
Example 4 (Monitor real time tasks)
@@ -1107,15 +1175,17 @@ Example 4 (Monitor real time tasks)
A single socket system which has real time tasks running on cores 4-7
and non real time tasks on other cpus. We want to monitor the cache
occupancy of the real time threads on these cores.
+::
+
+ # mount -t resctrl resctrl /sys/fs/resctrl
+ # cd /sys/fs/resctrl
+ # mkdir p1
-# mount -t resctrl resctrl /sys/fs/resctrl
-# cd /sys/fs/resctrl
-# mkdir p1
+Move the cpus 4-7 over to p1::
-Move the cpus 4-7 over to p1
-# echo f0 > p1/cpus
+ # echo f0 > p1/cpus
-View the llc occupancy snapshot
+View the llc occupancy snapshot::
-# cat /sys/fs/resctrl/p1/mon_data/mon_L3_00/llc_occupancy
-11234000
+ # cat /sys/fs/resctrl/p1/mon_data/mon_L3_00/llc_occupancy
+ 11234000
diff --git a/Documentation/x86/tlb.txt b/Documentation/x86/tlb.rst
index 6a0607b99ed8..82ec58ae63a8 100644
--- a/Documentation/x86/tlb.txt
+++ b/Documentation/x86/tlb.rst
@@ -1,5 +1,12 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======
+The TLB
+=======
+
When the kernel unmaps or modified the attributes of a range of
memory, it has two choices:
+
1. Flush the entire TLB with a two-instruction sequence. This is
a quick operation, but it causes collateral damage: TLB entries
from areas other than the one we are trying to flush will be
@@ -10,6 +17,7 @@ memory, it has two choices:
damage to other TLB entries.
Which method to do depends on a few things:
+
1. The size of the flush being performed. A flush of the entire
address space is obviously better performed by flushing the
entire TLB than doing 2^48/PAGE_SIZE individual flushes.
@@ -33,7 +41,7 @@ well. There is essentially no "right" point to choose.
You may be doing too many individual invalidations if you see the
invlpg instruction (or instructions _near_ it) show up high in
profiles. If you believe that individual invalidations being
-called too often, you can lower the tunable:
+called too often, you can lower the tunable::
/sys/kernel/debug/x86/tlb_single_page_flush_ceiling
@@ -43,7 +51,7 @@ Setting it to 1 is a very conservative setting and it should
never need to be 0 under normal circumstances.
Despite the fact that a single individual flush on x86 is
-guaranteed to flush a full 2MB [1], hugetlbfs always uses the full
+guaranteed to flush a full 2MB [1]_, hugetlbfs always uses the full
flushes. THP is treated exactly the same as normal memory.
You might see invlpg inside of flush_tlb_mm_range() show up in
@@ -54,15 +62,15 @@ Essentially, you are balancing the cycles you spend doing invlpg
with the cycles that you spend refilling the TLB later.
You can measure how expensive TLB refills are by using
-performance counters and 'perf stat', like this:
+performance counters and 'perf stat', like this::
-perf stat -e
- cpu/event=0x8,umask=0x84,name=dtlb_load_misses_walk_duration/,
- cpu/event=0x8,umask=0x82,name=dtlb_load_misses_walk_completed/,
- cpu/event=0x49,umask=0x4,name=dtlb_store_misses_walk_duration/,
- cpu/event=0x49,umask=0x2,name=dtlb_store_misses_walk_completed/,
- cpu/event=0x85,umask=0x4,name=itlb_misses_walk_duration/,
- cpu/event=0x85,umask=0x2,name=itlb_misses_walk_completed/
+ perf stat -e
+ cpu/event=0x8,umask=0x84,name=dtlb_load_misses_walk_duration/,
+ cpu/event=0x8,umask=0x82,name=dtlb_load_misses_walk_completed/,
+ cpu/event=0x49,umask=0x4,name=dtlb_store_misses_walk_duration/,
+ cpu/event=0x49,umask=0x2,name=dtlb_store_misses_walk_completed/,
+ cpu/event=0x85,umask=0x4,name=itlb_misses_walk_duration/,
+ cpu/event=0x85,umask=0x2,name=itlb_misses_walk_completed/
That works on an IvyBridge-era CPU (i5-3320M). Different CPUs
may have differently-named counters, but they should at least
@@ -70,6 +78,6 @@ be there in some form. You can use pmu-tools 'ocperf list'
(https://github.com/andikleen/pmu-tools) to find the right
counters for a given CPU.
-1. A footnote in Intel's SDM "4.10.4.2 Recommended Invalidation"
+.. [1] A footnote in Intel's SDM "4.10.4.2 Recommended Invalidation"
says: "One execution of INVLPG is sufficient even for a page
with size greater than 4 KBytes."
diff --git a/Documentation/x86/topology.txt b/Documentation/x86/topology.rst
index 2953e3ec9a02..6e28dbe818ab 100644
--- a/Documentation/x86/topology.txt
+++ b/Documentation/x86/topology.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============
x86 Topology
============
@@ -33,14 +36,14 @@ The topology of a system is described in the units of:
- cores
- threads
-* Package:
-
- Packages contain a number of cores plus shared resources, e.g. DRAM
- controller, shared caches etc.
+Package
+=======
+Packages contain a number of cores plus shared resources, e.g. DRAM
+controller, shared caches etc.
- AMD nomenclature for package is 'Node'.
+AMD nomenclature for package is 'Node'.
- Package-related topology information in the kernel:
+Package-related topology information in the kernel:
- cpuinfo_x86.x86_max_cores:
@@ -51,7 +54,7 @@ The topology of a system is described in the units of:
The physical ID of the package. This information is retrieved via CPUID
and deduced from the APIC IDs of the cores in the package.
- - cpuinfo_x86.logical_id:
+ - cpuinfo_x86.logical_proc_id:
The logical ID of the package. As we do not trust BIOSes to enumerate the
packages in a consistent way, we introduced the concept of logical package
@@ -66,40 +69,41 @@ The topology of a system is described in the units of:
- cpu_llc_id:
A per-CPU variable containing:
- - On Intel, the first APIC ID of the list of CPUs sharing the Last Level
- Cache
- - On AMD, the Node ID or Core Complex ID containing the Last Level
- Cache. In general, it is a number identifying an LLC uniquely on the
- system.
+ - On Intel, the first APIC ID of the list of CPUs sharing the Last Level
+ Cache
-* Cores:
+ - On AMD, the Node ID or Core Complex ID containing the Last Level
+ Cache. In general, it is a number identifying an LLC uniquely on the
+ system.
- A core consists of 1 or more threads. It does not matter whether the threads
- are SMT- or CMT-type threads.
+Cores
+=====
+A core consists of 1 or more threads. It does not matter whether the threads
+are SMT- or CMT-type threads.
- AMDs nomenclature for a CMT core is "Compute Unit". The kernel always uses
- "core".
+AMDs nomenclature for a CMT core is "Compute Unit". The kernel always uses
+"core".
- Core-related topology information in the kernel:
+Core-related topology information in the kernel:
- smp_num_siblings:
The number of threads in a core. The number of threads in a package can be
- calculated by:
+ calculated by::
threads_per_package = cpuinfo_x86.x86_max_cores * smp_num_siblings
-* Threads:
+Threads
+=======
+A thread is a single scheduling unit. It's the equivalent to a logical Linux
+CPU.
- A thread is a single scheduling unit. It's the equivalent to a logical Linux
- CPU.
+AMDs nomenclature for CMT threads is "Compute Unit Core". The kernel always
+uses "thread".
- AMDs nomenclature for CMT threads is "Compute Unit Core". The kernel always
- uses "thread".
-
- Thread-related topology information in the kernel:
+Thread-related topology information in the kernel:
- topology_core_cpumask():
@@ -113,15 +117,15 @@ The topology of a system is described in the units of:
The cpumask contains all online threads in the core to which a thread
belongs.
- - topology_logical_package_id():
+ - topology_logical_package_id():
The logical package ID to which a thread belongs.
- - topology_physical_package_id():
+ - topology_physical_package_id():
The physical package ID to which a thread belongs.
- - topology_core_id();
+ - topology_core_id();
The ID of the core to which a thread belongs. It is also printed in /proc/cpuinfo
"core_id."
@@ -129,41 +133,41 @@ The topology of a system is described in the units of:
System topology examples
+========================
-Note:
-
-The alternative Linux CPU enumeration depends on how the BIOS enumerates the
-threads. Many BIOSes enumerate all threads 0 first and then all threads 1.
-That has the "advantage" that the logical Linux CPU numbers of threads 0 stay
-the same whether threads are enabled or not. That's merely an implementation
-detail and has no practical impact.
+.. note::
+ The alternative Linux CPU enumeration depends on how the BIOS enumerates the
+ threads. Many BIOSes enumerate all threads 0 first and then all threads 1.
+ That has the "advantage" that the logical Linux CPU numbers of threads 0 stay
+ the same whether threads are enabled or not. That's merely an implementation
+ detail and has no practical impact.
-1) Single Package, Single Core
+1) Single Package, Single Core::
[package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
2) Single Package, Dual Core
- a) One thread per core
+ a) One thread per core::
[package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
-> [core 1] -> [thread 0] -> Linux CPU 1
- b) Two threads per core
+ b) Two threads per core::
[package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
-> [thread 1] -> Linux CPU 1
-> [core 1] -> [thread 0] -> Linux CPU 2
-> [thread 1] -> Linux CPU 3
- Alternative enumeration:
+ Alternative enumeration::
[package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
-> [thread 1] -> Linux CPU 2
-> [core 1] -> [thread 0] -> Linux CPU 1
-> [thread 1] -> Linux CPU 3
- AMD nomenclature for CMT systems:
+ AMD nomenclature for CMT systems::
[node 0] -> [Compute Unit 0] -> [Compute Unit Core 0] -> Linux CPU 0
-> [Compute Unit Core 1] -> Linux CPU 1
@@ -172,7 +176,7 @@ detail and has no practical impact.
4) Dual Package, Dual Core
- a) One thread per core
+ a) One thread per core::
[package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
-> [core 1] -> [thread 0] -> Linux CPU 1
@@ -180,7 +184,7 @@ detail and has no practical impact.
[package 1] -> [core 0] -> [thread 0] -> Linux CPU 2
-> [core 1] -> [thread 0] -> Linux CPU 3
- b) Two threads per core
+ b) Two threads per core::
[package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
-> [thread 1] -> Linux CPU 1
@@ -192,7 +196,7 @@ detail and has no practical impact.
-> [core 1] -> [thread 0] -> Linux CPU 6
-> [thread 1] -> Linux CPU 7
- Alternative enumeration:
+ Alternative enumeration::
[package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
-> [thread 1] -> Linux CPU 4
@@ -204,7 +208,7 @@ detail and has no practical impact.
-> [core 1] -> [thread 0] -> Linux CPU 3
-> [thread 1] -> Linux CPU 7
- AMD nomenclature for CMT systems:
+ AMD nomenclature for CMT systems::
[node 0] -> [Compute Unit 0] -> [Compute Unit Core 0] -> Linux CPU 0
-> [Compute Unit Core 1] -> Linux CPU 1
diff --git a/Documentation/x86/usb-legacy-support.txt b/Documentation/x86/usb-legacy-support.rst
index 1894cdfc69d9..e01c08b7c981 100644
--- a/Documentation/x86/usb-legacy-support.txt
+++ b/Documentation/x86/usb-legacy-support.rst
@@ -1,7 +1,11 @@
+
+.. SPDX-License-Identifier: GPL-2.0
+
+==================
USB Legacy support
-~~~~~~~~~~~~~~~~~~
+==================
-Vojtech Pavlik <vojtech@suse.cz>, January 2004
+:Author: Vojtech Pavlik <vojtech@suse.cz>, January 2004
Also known as "USB Keyboard" or "USB Mouse support" in the BIOS Setup is a
@@ -27,18 +31,20 @@ It has several drawbacks, though:
Solutions:
-Problem 1) can be solved by loading the USB drivers prior to loading the
-PS/2 mouse driver. Since the PS/2 mouse driver is in 2.6 compiled into
-the kernel unconditionally, this means the USB drivers need to be
-compiled-in, too.
-
-Problem 2) can currently only be solved by either disabling HIGHMEM64G
-in the kernel config or USB Legacy support in the BIOS. A BIOS update
-could help, but so far no such update exists.
-
-Problem 3) is usually fixed by a BIOS update. Check the board
-manufacturers web site. If an update is not available, disable USB
-Legacy support in the BIOS. If this alone doesn't help, try also adding
-idle=poll on the kernel command line. The BIOS may be entering the SMM
-on the HLT instruction as well.
-
+Problem 1)
+ can be solved by loading the USB drivers prior to loading the
+ PS/2 mouse driver. Since the PS/2 mouse driver is in 2.6 compiled into
+ the kernel unconditionally, this means the USB drivers need to be
+ compiled-in, too.
+
+Problem 2)
+ can currently only be solved by either disabling HIGHMEM64G
+ in the kernel config or USB Legacy support in the BIOS. A BIOS update
+ could help, but so far no such update exists.
+
+Problem 3)
+ is usually fixed by a BIOS update. Check the board
+ manufacturers web site. If an update is not available, disable USB
+ Legacy support in the BIOS. If this alone doesn't help, try also adding
+ idle=poll on the kernel command line. The BIOS may be entering the SMM
+ on the HLT instruction as well.
diff --git a/Documentation/x86/x86_64/5level-paging.txt b/Documentation/x86/x86_64/5level-paging.rst
index 2432a5ef86d9..ab88a4514163 100644
--- a/Documentation/x86/x86_64/5level-paging.txt
+++ b/Documentation/x86/x86_64/5level-paging.rst
@@ -1,5 +1,11 @@
-== Overview ==
+.. SPDX-License-Identifier: GPL-2.0
+==============
+5-level paging
+==============
+
+Overview
+========
Original x86-64 was limited by 4-level paing to 256 TiB of virtual address
space and 64 TiB of physical address space. We are already bumping into
this limit: some vendors offers servers with 64 TiB of memory today.
@@ -16,16 +22,17 @@ QEMU 2.9 and later support 5-level paging.
Virtual memory layout for 5-level paging is described in
Documentation/x86/x86_64/mm.txt
-== Enabling 5-level paging ==
+Enabling 5-level paging
+=======================
CONFIG_X86_5LEVEL=y enables the feature.
Kernel with CONFIG_X86_5LEVEL=y still able to boot on 4-level hardware.
In this case additional page table level -- p4d -- will be folded at
runtime.
-== User-space and large virtual address space ==
-
+User-space and large virtual address space
+==========================================
On x86, 5-level paging enables 56-bit userspace virtual address space.
Not all user space is ready to handle wide addresses. It's known that
at least some JIT compilers use higher bits in pointers to encode their
@@ -58,4 +65,3 @@ One important case we need to handle here is interaction with MPX.
MPX (without MAWA extension) cannot handle addresses above 47-bit, so we
need to make sure that MPX cannot be enabled we already have VMA above
the boundary and forbid creating such VMAs once MPX is enabled.
-
diff --git a/Documentation/x86/x86_64/boot-options.rst b/Documentation/x86/x86_64/boot-options.rst
new file mode 100644
index 000000000000..2f69836b8445
--- /dev/null
+++ b/Documentation/x86/x86_64/boot-options.rst
@@ -0,0 +1,335 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========================
+AMD64 Specific Boot Options
+===========================
+
+There are many others (usually documented in driver documentation), but
+only the AMD64 specific ones are listed here.
+
+Machine check
+=============
+Please see Documentation/x86/x86_64/machinecheck for sysfs runtime tunables.
+
+ mce=off
+ Disable machine check
+ mce=no_cmci
+ Disable CMCI(Corrected Machine Check Interrupt) that
+ Intel processor supports. Usually this disablement is
+ not recommended, but it might be handy if your hardware
+ is misbehaving.
+ Note that you'll get more problems without CMCI than with
+ due to the shared banks, i.e. you might get duplicated
+ error logs.
+ mce=dont_log_ce
+ Don't make logs for corrected errors. All events reported
+ as corrected are silently cleared by OS.
+ This option will be useful if you have no interest in any
+ of corrected errors.
+ mce=ignore_ce
+ Disable features for corrected errors, e.g. polling timer
+ and CMCI. All events reported as corrected are not cleared
+ by OS and remained in its error banks.
+ Usually this disablement is not recommended, however if
+ there is an agent checking/clearing corrected errors
+ (e.g. BIOS or hardware monitoring applications), conflicting
+ with OS's error handling, and you cannot deactivate the agent,
+ then this option will be a help.
+ mce=no_lmce
+ Do not opt-in to Local MCE delivery. Use legacy method
+ to broadcast MCEs.
+ mce=bootlog
+ Enable logging of machine checks left over from booting.
+ Disabled by default on AMD Fam10h and older because some BIOS
+ leave bogus ones.
+ If your BIOS doesn't do that it's a good idea to enable though
+ to make sure you log even machine check events that result
+ in a reboot. On Intel systems it is enabled by default.
+ mce=nobootlog
+ Disable boot machine check logging.
+ mce=tolerancelevel[,monarchtimeout] (number,number)
+ tolerance levels:
+ 0: always panic on uncorrected errors, log corrected errors
+ 1: panic or SIGBUS on uncorrected errors, log corrected errors
+ 2: SIGBUS or log uncorrected errors, log corrected errors
+ 3: never panic or SIGBUS, log all errors (for testing only)
+ Default is 1
+ Can be also set using sysfs which is preferable.
+ monarchtimeout:
+ Sets the time in us to wait for other CPUs on machine checks. 0
+ to disable.
+ mce=bios_cmci_threshold
+ Don't overwrite the bios-set CMCI threshold. This boot option
+ prevents Linux from overwriting the CMCI threshold set by the
+ bios. Without this option, Linux always sets the CMCI
+ threshold to 1. Enabling this may make memory predictive failure
+ analysis less effective if the bios sets thresholds for memory
+ errors since we will not see details for all errors.
+ mce=recovery
+ Force-enable recoverable machine check code paths
+
+ nomce (for compatibility with i386)
+ same as mce=off
+
+ Everything else is in sysfs now.
+
+APICs
+=====
+
+ apic
+ Use IO-APIC. Default
+
+ noapic
+ Don't use the IO-APIC.
+
+ disableapic
+ Don't use the local APIC
+
+ nolapic
+ Don't use the local APIC (alias for i386 compatibility)
+
+ pirq=...
+ See Documentation/x86/i386/IO-APIC.txt
+
+ noapictimer
+ Don't set up the APIC timer
+
+ no_timer_check
+ Don't check the IO-APIC timer. This can work around
+ problems with incorrect timer initialization on some boards.
+
+ apicpmtimer
+ Do APIC timer calibration using the pmtimer. Implies
+ apicmaintimer. Useful when your PIT timer is totally broken.
+
+Timing
+======
+
+ notsc
+ Deprecated, use tsc=unstable instead.
+
+ nohpet
+ Don't use the HPET timer.
+
+Idle loop
+=========
+
+ idle=poll
+ Don't do power saving in the idle loop using HLT, but poll for rescheduling
+ event. This will make the CPUs eat a lot more power, but may be useful
+ to get slightly better performance in multiprocessor benchmarks. It also
+ makes some profiling using performance counters more accurate.
+ Please note that on systems with MONITOR/MWAIT support (like Intel EM64T
+ CPUs) this option has no performance advantage over the normal idle loop.
+ It may also interact badly with hyperthreading.
+
+Rebooting
+=========
+
+ reboot=b[ios] | t[riple] | k[bd] | a[cpi] | e[fi] [, [w]arm | [c]old]
+ bios
+ Use the CPU reboot vector for warm reset
+ warm
+ Don't set the cold reboot flag
+ cold
+ Set the cold reboot flag
+ triple
+ Force a triple fault (init)
+ kbd
+ Use the keyboard controller. cold reset (default)
+ acpi
+ Use the ACPI RESET_REG in the FADT. If ACPI is not configured or
+ the ACPI reset does not work, the reboot path attempts the reset
+ using the keyboard controller.
+ efi
+ Use efi reset_system runtime service. If EFI is not configured or
+ the EFI reset does not work, the reboot path attempts the reset using
+ the keyboard controller.
+
+ Using warm reset will be much faster especially on big memory
+ systems because the BIOS will not go through the memory check.
+ Disadvantage is that not all hardware will be completely reinitialized
+ on reboot so there may be boot problems on some systems.
+
+ reboot=force
+ Don't stop other CPUs on reboot. This can make reboot more reliable
+ in some cases.
+
+Non Executable Mappings
+=======================
+
+ noexec=on|off
+ on
+ Enable(default)
+ off
+ Disable
+
+NUMA
+====
+
+ numa=off
+ Only set up a single NUMA node spanning all memory.
+
+ numa=noacpi
+ Don't parse the SRAT table for NUMA setup
+
+ numa=fake=<size>[MG]
+ If given as a memory unit, fills all system RAM with nodes of
+ size interleaved over physical nodes.
+
+ numa=fake=<N>
+ If given as an integer, fills all system RAM with N fake nodes
+ interleaved over physical nodes.
+
+ numa=fake=<N>U
+ If given as an integer followed by 'U', it will divide each
+ physical node into N emulated nodes.
+
+ACPI
+====
+
+ acpi=off
+ Don't enable ACPI
+ acpi=ht
+ Use ACPI boot table parsing, but don't enable ACPI interpreter
+ acpi=force
+ Force ACPI on (currently not needed)
+ acpi=strict
+ Disable out of spec ACPI workarounds.
+ acpi_sci={edge,level,high,low}
+ Set up ACPI SCI interrupt.
+ acpi=noirq
+ Don't route interrupts
+ acpi=nocmcff
+ Disable firmware first mode for corrected errors. This
+ disables parsing the HEST CMC error source to check if
+ firmware has set the FF flag. This may result in
+ duplicate corrected error reports.
+
+PCI
+===
+
+ pci=off
+ Don't use PCI
+ pci=conf1
+ Use conf1 access.
+ pci=conf2
+ Use conf2 access.
+ pci=rom
+ Assign ROMs.
+ pci=assign-busses
+ Assign busses
+ pci=irqmask=MASK
+ Set PCI interrupt mask to MASK
+ pci=lastbus=NUMBER
+ Scan up to NUMBER busses, no matter what the mptable says.
+ pci=noacpi
+ Don't use ACPI to set up PCI interrupt routing.
+
+IOMMU (input/output memory management unit)
+===========================================
+Multiple x86-64 PCI-DMA mapping implementations exist, for example:
+
+ 1. <lib/dma-direct.c>: use no hardware/software IOMMU at all
+ (e.g. because you have < 3 GB memory).
+ Kernel boot message: "PCI-DMA: Disabling IOMMU"
+
+ 2. <arch/x86/kernel/amd_gart_64.c>: AMD GART based hardware IOMMU.
+ Kernel boot message: "PCI-DMA: using GART IOMMU"
+
+ 3. <arch/x86_64/kernel/pci-swiotlb.c> : Software IOMMU implementation. Used
+ e.g. if there is no hardware IOMMU in the system and it is need because
+ you have >3GB memory or told the kernel to us it (iommu=soft))
+ Kernel boot message: "PCI-DMA: Using software bounce buffering
+ for IO (SWIOTLB)"
+
+ 4. <arch/x86_64/pci-calgary.c> : IBM Calgary hardware IOMMU. Used in IBM
+ pSeries and xSeries servers. This hardware IOMMU supports DMA address
+ mapping with memory protection, etc.
+ Kernel boot message: "PCI-DMA: Using Calgary IOMMU"
+
+::
+
+ iommu=[<size>][,noagp][,off][,force][,noforce]
+ [,memaper[=<order>]][,merge][,fullflush][,nomerge]
+ [,noaperture][,calgary]
+
+General iommu options:
+
+ off
+ Don't initialize and use any kind of IOMMU.
+ noforce
+ Don't force hardware IOMMU usage when it is not needed. (default).
+ force
+ Force the use of the hardware IOMMU even when it is
+ not actually needed (e.g. because < 3 GB memory).
+ soft
+ Use software bounce buffering (SWIOTLB) (default for
+ Intel machines). This can be used to prevent the usage
+ of an available hardware IOMMU.
+
+iommu options only relevant to the AMD GART hardware IOMMU:
+
+ <size>
+ Set the size of the remapping area in bytes.
+ allowed
+ Overwrite iommu off workarounds for specific chipsets.
+ fullflush
+ Flush IOMMU on each allocation (default).
+ nofullflush
+ Don't use IOMMU fullflush.
+ memaper[=<order>]
+ Allocate an own aperture over RAM with size 32MB<<order.
+ (default: order=1, i.e. 64MB)
+ merge
+ Do scatter-gather (SG) merging. Implies "force" (experimental).
+ nomerge
+ Don't do scatter-gather (SG) merging.
+ noaperture
+ Ask the IOMMU not to touch the aperture for AGP.
+ noagp
+ Don't initialize the AGP driver and use full aperture.
+ panic
+ Always panic when IOMMU overflows.
+ calgary
+ Use the Calgary IOMMU if it is available
+
+iommu options only relevant to the software bounce buffering (SWIOTLB) IOMMU
+implementation:
+
+ swiotlb=<pages>[,force]
+ <pages>
+ Prereserve that many 128K pages for the software IO bounce buffering.
+ force
+ Force all IO through the software TLB.
+
+Settings for the IBM Calgary hardware IOMMU currently found in IBM
+pSeries and xSeries machines
+
+ calgary=[64k,128k,256k,512k,1M,2M,4M,8M]
+ Set the size of each PCI slot's translation table when using the
+ Calgary IOMMU. This is the size of the translation table itself
+ in main memory. The smallest table, 64k, covers an IO space of
+ 32MB; the largest, 8MB table, can cover an IO space of 4GB.
+ Normally the kernel will make the right choice by itself.
+ calgary=[translate_empty_slots]
+ Enable translation even on slots that have no devices attached to
+ them, in case a device will be hotplugged in the future.
+ calgary=[disable=<PCI bus number>]
+ Disable translation on a given PHB. For
+ example, the built-in graphics adapter resides on the first bridge
+ (PCI bus number 0); if translation (isolation) is enabled on this
+ bridge, X servers that access the hardware directly from user
+ space might stop working. Use this option if you have devices that
+ are accessed from userspace directly on some PCI host bridge.
+ panic
+ Always panic when IOMMU overflows
+
+
+Miscellaneous
+=============
+
+ nogbpages
+ Do not use GB pages for kernel direct mappings.
+ gbpages
+ Use GB pages for kernel direct mappings.
diff --git a/Documentation/x86/x86_64/boot-options.txt b/Documentation/x86/x86_64/boot-options.txt
deleted file mode 100644
index abc53886655e..000000000000
--- a/Documentation/x86/x86_64/boot-options.txt
+++ /dev/null
@@ -1,278 +0,0 @@
-AMD64 specific boot options
-
-There are many others (usually documented in driver documentation), but
-only the AMD64 specific ones are listed here.
-
-Machine check
-
- Please see Documentation/x86/x86_64/machinecheck for sysfs runtime tunables.
-
- mce=off
- Disable machine check
- mce=no_cmci
- Disable CMCI(Corrected Machine Check Interrupt) that
- Intel processor supports. Usually this disablement is
- not recommended, but it might be handy if your hardware
- is misbehaving.
- Note that you'll get more problems without CMCI than with
- due to the shared banks, i.e. you might get duplicated
- error logs.
- mce=dont_log_ce
- Don't make logs for corrected errors. All events reported
- as corrected are silently cleared by OS.
- This option will be useful if you have no interest in any
- of corrected errors.
- mce=ignore_ce
- Disable features for corrected errors, e.g. polling timer
- and CMCI. All events reported as corrected are not cleared
- by OS and remained in its error banks.
- Usually this disablement is not recommended, however if
- there is an agent checking/clearing corrected errors
- (e.g. BIOS or hardware monitoring applications), conflicting
- with OS's error handling, and you cannot deactivate the agent,
- then this option will be a help.
- mce=no_lmce
- Do not opt-in to Local MCE delivery. Use legacy method
- to broadcast MCEs.
- mce=bootlog
- Enable logging of machine checks left over from booting.
- Disabled by default on AMD Fam10h and older because some BIOS
- leave bogus ones.
- If your BIOS doesn't do that it's a good idea to enable though
- to make sure you log even machine check events that result
- in a reboot. On Intel systems it is enabled by default.
- mce=nobootlog
- Disable boot machine check logging.
- mce=tolerancelevel[,monarchtimeout] (number,number)
- tolerance levels:
- 0: always panic on uncorrected errors, log corrected errors
- 1: panic or SIGBUS on uncorrected errors, log corrected errors
- 2: SIGBUS or log uncorrected errors, log corrected errors
- 3: never panic or SIGBUS, log all errors (for testing only)
- Default is 1
- Can be also set using sysfs which is preferable.
- monarchtimeout:
- Sets the time in us to wait for other CPUs on machine checks. 0
- to disable.
- mce=bios_cmci_threshold
- Don't overwrite the bios-set CMCI threshold. This boot option
- prevents Linux from overwriting the CMCI threshold set by the
- bios. Without this option, Linux always sets the CMCI
- threshold to 1. Enabling this may make memory predictive failure
- analysis less effective if the bios sets thresholds for memory
- errors since we will not see details for all errors.
- mce=recovery
- Force-enable recoverable machine check code paths
-
- nomce (for compatibility with i386): same as mce=off
-
- Everything else is in sysfs now.
-
-APICs
-
- apic Use IO-APIC. Default
-
- noapic Don't use the IO-APIC.
-
- disableapic Don't use the local APIC
-
- nolapic Don't use the local APIC (alias for i386 compatibility)
-
- pirq=... See Documentation/x86/i386/IO-APIC.txt
-
- noapictimer Don't set up the APIC timer
-
- no_timer_check Don't check the IO-APIC timer. This can work around
- problems with incorrect timer initialization on some boards.
- apicpmtimer
- Do APIC timer calibration using the pmtimer. Implies
- apicmaintimer. Useful when your PIT timer is totally
- broken.
-
-Timing
-
- notsc
- Deprecated, use tsc=unstable instead.
-
- nohpet
- Don't use the HPET timer.
-
-Idle loop
-
- idle=poll
- Don't do power saving in the idle loop using HLT, but poll for rescheduling
- event. This will make the CPUs eat a lot more power, but may be useful
- to get slightly better performance in multiprocessor benchmarks. It also
- makes some profiling using performance counters more accurate.
- Please note that on systems with MONITOR/MWAIT support (like Intel EM64T
- CPUs) this option has no performance advantage over the normal idle loop.
- It may also interact badly with hyperthreading.
-
-Rebooting
-
- reboot=b[ios] | t[riple] | k[bd] | a[cpi] | e[fi] [, [w]arm | [c]old]
- bios Use the CPU reboot vector for warm reset
- warm Don't set the cold reboot flag
- cold Set the cold reboot flag
- triple Force a triple fault (init)
- kbd Use the keyboard controller. cold reset (default)
- acpi Use the ACPI RESET_REG in the FADT. If ACPI is not configured or the
- ACPI reset does not work, the reboot path attempts the reset using
- the keyboard controller.
- efi Use efi reset_system runtime service. If EFI is not configured or the
- EFI reset does not work, the reboot path attempts the reset using
- the keyboard controller.
-
- Using warm reset will be much faster especially on big memory
- systems because the BIOS will not go through the memory check.
- Disadvantage is that not all hardware will be completely reinitialized
- on reboot so there may be boot problems on some systems.
-
- reboot=force
-
- Don't stop other CPUs on reboot. This can make reboot more reliable
- in some cases.
-
-Non Executable Mappings
-
- noexec=on|off
-
- on Enable(default)
- off Disable
-
-NUMA
-
- numa=off Only set up a single NUMA node spanning all memory.
-
- numa=noacpi Don't parse the SRAT table for NUMA setup
-
- numa=fake=<size>[MG]
- If given as a memory unit, fills all system RAM with nodes of
- size interleaved over physical nodes.
-
- numa=fake=<N>
- If given as an integer, fills all system RAM with N fake nodes
- interleaved over physical nodes.
-
- numa=fake=<N>U
- If given as an integer followed by 'U', it will divide each
- physical node into N emulated nodes.
-
-ACPI
-
- acpi=off Don't enable ACPI
- acpi=ht Use ACPI boot table parsing, but don't enable ACPI
- interpreter
- acpi=force Force ACPI on (currently not needed)
-
- acpi=strict Disable out of spec ACPI workarounds.
-
- acpi_sci={edge,level,high,low} Set up ACPI SCI interrupt.
-
- acpi=noirq Don't route interrupts
-
- acpi=nocmcff Disable firmware first mode for corrected errors. This
- disables parsing the HEST CMC error source to check if
- firmware has set the FF flag. This may result in
- duplicate corrected error reports.
-
-PCI
-
- pci=off Don't use PCI
- pci=conf1 Use conf1 access.
- pci=conf2 Use conf2 access.
- pci=rom Assign ROMs.
- pci=assign-busses Assign busses
- pci=irqmask=MASK Set PCI interrupt mask to MASK
- pci=lastbus=NUMBER Scan up to NUMBER busses, no matter what the mptable says.
- pci=noacpi Don't use ACPI to set up PCI interrupt routing.
-
-IOMMU (input/output memory management unit)
-
- Multiple x86-64 PCI-DMA mapping implementations exist, for example:
-
- 1. <lib/dma-direct.c>: use no hardware/software IOMMU at all
- (e.g. because you have < 3 GB memory).
- Kernel boot message: "PCI-DMA: Disabling IOMMU"
-
- 2. <arch/x86/kernel/amd_gart_64.c>: AMD GART based hardware IOMMU.
- Kernel boot message: "PCI-DMA: using GART IOMMU"
-
- 3. <arch/x86_64/kernel/pci-swiotlb.c> : Software IOMMU implementation. Used
- e.g. if there is no hardware IOMMU in the system and it is need because
- you have >3GB memory or told the kernel to us it (iommu=soft))
- Kernel boot message: "PCI-DMA: Using software bounce buffering
- for IO (SWIOTLB)"
-
- 4. <arch/x86_64/pci-calgary.c> : IBM Calgary hardware IOMMU. Used in IBM
- pSeries and xSeries servers. This hardware IOMMU supports DMA address
- mapping with memory protection, etc.
- Kernel boot message: "PCI-DMA: Using Calgary IOMMU"
-
- iommu=[<size>][,noagp][,off][,force][,noforce]
- [,memaper[=<order>]][,merge][,fullflush][,nomerge]
- [,noaperture][,calgary]
-
- General iommu options:
- off Don't initialize and use any kind of IOMMU.
- noforce Don't force hardware IOMMU usage when it is not needed.
- (default).
- force Force the use of the hardware IOMMU even when it is
- not actually needed (e.g. because < 3 GB memory).
- soft Use software bounce buffering (SWIOTLB) (default for
- Intel machines). This can be used to prevent the usage
- of an available hardware IOMMU.
-
- iommu options only relevant to the AMD GART hardware IOMMU:
- <size> Set the size of the remapping area in bytes.
- allowed Overwrite iommu off workarounds for specific chipsets.
- fullflush Flush IOMMU on each allocation (default).
- nofullflush Don't use IOMMU fullflush.
- memaper[=<order>] Allocate an own aperture over RAM with size 32MB<<order.
- (default: order=1, i.e. 64MB)
- merge Do scatter-gather (SG) merging. Implies "force"
- (experimental).
- nomerge Don't do scatter-gather (SG) merging.
- noaperture Ask the IOMMU not to touch the aperture for AGP.
- noagp Don't initialize the AGP driver and use full aperture.
- panic Always panic when IOMMU overflows.
- calgary Use the Calgary IOMMU if it is available
-
- iommu options only relevant to the software bounce buffering (SWIOTLB) IOMMU
- implementation:
- swiotlb=<pages>[,force]
- <pages> Prereserve that many 128K pages for the software IO
- bounce buffering.
- force Force all IO through the software TLB.
-
- Settings for the IBM Calgary hardware IOMMU currently found in IBM
- pSeries and xSeries machines:
-
- calgary=[64k,128k,256k,512k,1M,2M,4M,8M]
- calgary=[translate_empty_slots]
- calgary=[disable=<PCI bus number>]
- panic Always panic when IOMMU overflows
-
- 64k,...,8M - Set the size of each PCI slot's translation table
- when using the Calgary IOMMU. This is the size of the translation
- table itself in main memory. The smallest table, 64k, covers an IO
- space of 32MB; the largest, 8MB table, can cover an IO space of
- 4GB. Normally the kernel will make the right choice by itself.
-
- translate_empty_slots - Enable translation even on slots that have
- no devices attached to them, in case a device will be hotplugged
- in the future.
-
- disable=<PCI bus number> - Disable translation on a given PHB. For
- example, the built-in graphics adapter resides on the first bridge
- (PCI bus number 0); if translation (isolation) is enabled on this
- bridge, X servers that access the hardware directly from user
- space might stop working. Use this option if you have devices that
- are accessed from userspace directly on some PCI host bridge.
-
-Miscellaneous
-
- nogbpages
- Do not use GB pages for kernel direct mappings.
- gbpages
- Use GB pages for kernel direct mappings.
diff --git a/Documentation/x86/x86_64/cpu-hotplug-spec b/Documentation/x86/x86_64/cpu-hotplug-spec.rst
index 3c23e0587db3..8d1c91f0c880 100644
--- a/Documentation/x86/x86_64/cpu-hotplug-spec
+++ b/Documentation/x86/x86_64/cpu-hotplug-spec.rst
@@ -1,5 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================================================
Firmware support for CPU hotplug under Linux/x86-64
----------------------------------------------------
+===================================================
Linux/x86-64 supports CPU hotplug now. For various reasons Linux wants to
know in advance of boot time the maximum number of CPUs that could be plugged
diff --git a/Documentation/x86/x86_64/fake-numa-for-cpusets b/Documentation/x86/x86_64/fake-numa-for-cpusets.rst
index 4b09f18831f8..74fbb78b3c67 100644
--- a/Documentation/x86/x86_64/fake-numa-for-cpusets
+++ b/Documentation/x86/x86_64/fake-numa-for-cpusets.rst
@@ -1,5 +1,12 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================
+Fake NUMA For CPUSets
+=====================
+
+:Author: David Rientjes <rientjes@cs.washington.edu>
+
Using numa=fake and CPUSets for Resource Management
-Written by David Rientjes <rientjes@cs.washington.edu>
This document describes how the numa=fake x86_64 command-line option can be used
in conjunction with cpusets for coarse memory management. Using this feature,
@@ -20,7 +27,7 @@ you become more familiar with using this combination for resource control,
you'll determine a better setup to minimize the number of nodes you have to deal
with.
-A machine may be split as follows with "numa=fake=4*512," as reported by dmesg:
+A machine may be split as follows with "numa=fake=4*512," as reported by dmesg::
Faking node 0 at 0000000000000000-0000000020000000 (512MB)
Faking node 1 at 0000000020000000-0000000040000000 (512MB)
@@ -34,7 +41,7 @@ A machine may be split as follows with "numa=fake=4*512," as reported by dmesg:
Now following the instructions for mounting the cpusets filesystem from
Documentation/cgroup-v1/cpusets.txt, you can assign fake nodes (i.e. contiguous memory
-address spaces) to individual cpusets:
+address spaces) to individual cpusets::
[root@xroads /]# mkdir exampleset
[root@xroads /]# mount -t cpuset none exampleset
@@ -47,7 +54,7 @@ Now this cpuset, 'ddset', will only allowed access to fake nodes 0 and 1 for
memory allocations (1G).
You can now assign tasks to these cpusets to limit the memory resources
-available to them according to the fake nodes assigned as mems:
+available to them according to the fake nodes assigned as mems::
[root@xroads /exampleset/ddset]# echo $$ > tasks
[root@xroads /exampleset/ddset]# dd if=/dev/zero of=tmp bs=1024 count=1G
@@ -57,9 +64,13 @@ Notice the difference between the system memory usage as reported by
/proc/meminfo between the restricted cpuset case above and the unrestricted
case (i.e. running the same 'dd' command without assigning it to a fake NUMA
cpuset):
- Unrestricted Restricted
- MemTotal: 3091900 kB 3091900 kB
- MemFree: 42113 kB 1513236 kB
+
+ ======== ============ ==========
+ Name Unrestricted Restricted
+ ======== ============ ==========
+ MemTotal 3091900 kB 3091900 kB
+ MemFree 42113 kB 1513236 kB
+ ======== ============ ==========
This allows for coarse memory management for the tasks you assign to particular
cpusets. Since cpusets can form a hierarchy, you can create some pretty
diff --git a/Documentation/x86/x86_64/index.rst b/Documentation/x86/x86_64/index.rst
new file mode 100644
index 000000000000..d6eaaa5a35fc
--- /dev/null
+++ b/Documentation/x86/x86_64/index.rst
@@ -0,0 +1,16 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============
+x86_64 Support
+==============
+
+.. toctree::
+ :maxdepth: 2
+
+ boot-options
+ uefi
+ mm
+ 5level-paging
+ fake-numa-for-cpusets
+ cpu-hotplug-spec
+ machinecheck
diff --git a/Documentation/x86/x86_64/machinecheck b/Documentation/x86/x86_64/machinecheck.rst
index d0648a74fceb..e189168406fa 100644
--- a/Documentation/x86/x86_64/machinecheck
+++ b/Documentation/x86/x86_64/machinecheck.rst
@@ -1,5 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
-Configurable sysfs parameters for the x86-64 machine check code.
+===============================================================
+Configurable sysfs parameters for the x86-64 machine check code
+===============================================================
Machine checks report internal hardware error conditions detected
by the CPU. Uncorrected errors typically cause a machine check
@@ -16,14 +19,13 @@ log then mcelog should run to collect and decode machine check entries
from /dev/mcelog. Normally mcelog should be run regularly from a cronjob.
Each CPU has a directory in /sys/devices/system/machinecheck/machinecheckN
-(N = CPU number)
+(N = CPU number).
The directory contains some configurable entries:
-Entries:
-
bankNctl
-(N bank number)
+ (N bank number)
+
64bit Hex bitmask enabling/disabling specific subevents for bank N
When a bit in the bitmask is zero then the respective
subevent will not be reported.
diff --git a/Documentation/x86/x86_64/mm.rst b/Documentation/x86/x86_64/mm.rst
new file mode 100644
index 000000000000..267fc4808945
--- /dev/null
+++ b/Documentation/x86/x86_64/mm.rst
@@ -0,0 +1,161 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+================
+Memory Managment
+================
+
+Complete virtual memory map with 4-level page tables
+====================================================
+
+.. note::
+
+ - Negative addresses such as "-23 TB" are absolute addresses in bytes, counted down
+ from the top of the 64-bit address space. It's easier to understand the layout
+ when seen both in absolute addresses and in distance-from-top notation.
+
+ For example 0xffffe90000000000 == -23 TB, it's 23 TB lower than the top of the
+ 64-bit address space (ffffffffffffffff).
+
+ Note that as we get closer to the top of the address space, the notation changes
+ from TB to GB and then MB/KB.
+
+ - "16M TB" might look weird at first sight, but it's an easier to visualize size
+ notation than "16 EB", which few will recognize at first sight as 16 exabytes.
+ It also shows it nicely how incredibly large 64-bit address space is.
+
+::
+
+ ========================================================================================================================
+ Start addr | Offset | End addr | Size | VM area description
+ ========================================================================================================================
+ | | | |
+ 0000000000000000 | 0 | 00007fffffffffff | 128 TB | user-space virtual memory, different per mm
+ __________________|____________|__________________|_________|___________________________________________________________
+ | | | |
+ 0000800000000000 | +128 TB | ffff7fffffffffff | ~16M TB | ... huge, almost 64 bits wide hole of non-canonical
+ | | | | virtual memory addresses up to the -128 TB
+ | | | | starting offset of kernel mappings.
+ __________________|____________|__________________|_________|___________________________________________________________
+ |
+ | Kernel-space virtual memory, shared between all processes:
+ ____________________________________________________________|___________________________________________________________
+ | | | |
+ ffff800000000000 | -128 TB | ffff87ffffffffff | 8 TB | ... guard hole, also reserved for hypervisor
+ ffff880000000000 | -120 TB | ffff887fffffffff | 0.5 TB | LDT remap for PTI
+ ffff888000000000 | -119.5 TB | ffffc87fffffffff | 64 TB | direct mapping of all physical memory (page_offset_base)
+ ffffc88000000000 | -55.5 TB | ffffc8ffffffffff | 0.5 TB | ... unused hole
+ ffffc90000000000 | -55 TB | ffffe8ffffffffff | 32 TB | vmalloc/ioremap space (vmalloc_base)
+ ffffe90000000000 | -23 TB | ffffe9ffffffffff | 1 TB | ... unused hole
+ ffffea0000000000 | -22 TB | ffffeaffffffffff | 1 TB | virtual memory map (vmemmap_base)
+ ffffeb0000000000 | -21 TB | ffffebffffffffff | 1 TB | ... unused hole
+ ffffec0000000000 | -20 TB | fffffbffffffffff | 16 TB | KASAN shadow memory
+ __________________|____________|__________________|_________|____________________________________________________________
+ |
+ | Identical layout to the 56-bit one from here on:
+ ____________________________________________________________|____________________________________________________________
+ | | | |
+ fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole
+ | | | | vaddr_end for KASLR
+ fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping
+ fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | ... unused hole
+ ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks
+ ffffff8000000000 | -512 GB | ffffffeeffffffff | 444 GB | ... unused hole
+ ffffffef00000000 | -68 GB | fffffffeffffffff | 64 GB | EFI region mapping space
+ ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | ... unused hole
+ ffffffff80000000 | -2 GB | ffffffff9fffffff | 512 MB | kernel text mapping, mapped to physical address 0
+ ffffffff80000000 |-2048 MB | | |
+ ffffffffa0000000 |-1536 MB | fffffffffeffffff | 1520 MB | module mapping space
+ ffffffffff000000 | -16 MB | | |
+ FIXADDR_START | ~-11 MB | ffffffffff5fffff | ~0.5 MB | kernel-internal fixmap range, variable size and offset
+ ffffffffff600000 | -10 MB | ffffffffff600fff | 4 kB | legacy vsyscall ABI
+ ffffffffffe00000 | -2 MB | ffffffffffffffff | 2 MB | ... unused hole
+ __________________|____________|__________________|_________|___________________________________________________________
+
+
+Complete virtual memory map with 5-level page tables
+====================================================
+
+.. note::
+
+ - With 56-bit addresses, user-space memory gets expanded by a factor of 512x,
+ from 0.125 PB to 64 PB. All kernel mappings shift down to the -64 PB starting
+ offset and many of the regions expand to support the much larger physical
+ memory supported.
+
+::
+
+ ========================================================================================================================
+ Start addr | Offset | End addr | Size | VM area description
+ ========================================================================================================================
+ | | | |
+ 0000000000000000 | 0 | 00ffffffffffffff | 64 PB | user-space virtual memory, different per mm
+ __________________|____________|__________________|_________|___________________________________________________________
+ | | | |
+ 0100000000000000 | +64 PB | feffffffffffffff | ~16K PB | ... huge, still almost 64 bits wide hole of non-canonical
+ | | | | virtual memory addresses up to the -64 PB
+ | | | | starting offset of kernel mappings.
+ __________________|____________|__________________|_________|___________________________________________________________
+ |
+ | Kernel-space virtual memory, shared between all processes:
+ ____________________________________________________________|___________________________________________________________
+ | | | |
+ ff00000000000000 | -64 PB | ff0fffffffffffff | 4 PB | ... guard hole, also reserved for hypervisor
+ ff10000000000000 | -60 PB | ff10ffffffffffff | 0.25 PB | LDT remap for PTI
+ ff11000000000000 | -59.75 PB | ff90ffffffffffff | 32 PB | direct mapping of all physical memory (page_offset_base)
+ ff91000000000000 | -27.75 PB | ff9fffffffffffff | 3.75 PB | ... unused hole
+ ffa0000000000000 | -24 PB | ffd1ffffffffffff | 12.5 PB | vmalloc/ioremap space (vmalloc_base)
+ ffd2000000000000 | -11.5 PB | ffd3ffffffffffff | 0.5 PB | ... unused hole
+ ffd4000000000000 | -11 PB | ffd5ffffffffffff | 0.5 PB | virtual memory map (vmemmap_base)
+ ffd6000000000000 | -10.5 PB | ffdeffffffffffff | 2.25 PB | ... unused hole
+ ffdf000000000000 | -8.25 PB | fffffbffffffffff | ~8 PB | KASAN shadow memory
+ __________________|____________|__________________|_________|____________________________________________________________
+ |
+ | Identical layout to the 47-bit one from here on:
+ ____________________________________________________________|____________________________________________________________
+ | | | |
+ fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole
+ | | | | vaddr_end for KASLR
+ fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping
+ fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | ... unused hole
+ ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks
+ ffffff8000000000 | -512 GB | ffffffeeffffffff | 444 GB | ... unused hole
+ ffffffef00000000 | -68 GB | fffffffeffffffff | 64 GB | EFI region mapping space
+ ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | ... unused hole
+ ffffffff80000000 | -2 GB | ffffffff9fffffff | 512 MB | kernel text mapping, mapped to physical address 0
+ ffffffff80000000 |-2048 MB | | |
+ ffffffffa0000000 |-1536 MB | fffffffffeffffff | 1520 MB | module mapping space
+ ffffffffff000000 | -16 MB | | |
+ FIXADDR_START | ~-11 MB | ffffffffff5fffff | ~0.5 MB | kernel-internal fixmap range, variable size and offset
+ ffffffffff600000 | -10 MB | ffffffffff600fff | 4 kB | legacy vsyscall ABI
+ ffffffffffe00000 | -2 MB | ffffffffffffffff | 2 MB | ... unused hole
+ __________________|____________|__________________|_________|___________________________________________________________
+
+Architecture defines a 64-bit virtual address. Implementations can support
+less. Currently supported are 48- and 57-bit virtual addresses. Bits 63
+through to the most-significant implemented bit are sign extended.
+This causes hole between user space and kernel addresses if you interpret them
+as unsigned.
+
+The direct mapping covers all memory in the system up to the highest
+memory address (this means in some cases it can also include PCI memory
+holes).
+
+vmalloc space is lazily synchronized into the different PML4/PML5 pages of
+the processes using the page fault handler, with init_top_pgt as
+reference.
+
+We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual
+memory window (this size is arbitrary, it can be raised later if needed).
+The mappings are not part of any other kernel PGD and are only available
+during EFI runtime calls.
+
+Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all
+physical memory, vmalloc/ioremap space and virtual memory map are randomized.
+Their order is preserved but their base will be offset early at boot time.
+
+Be very careful vs. KASLR when changing anything here. The KASLR address
+range must not overlap with anything except the KASAN shadow area, which is
+correct as KASAN disables KASLR.
+
+For both 4- and 5-level layouts, the STACKLEAK_POISON value in the last 2MB
+hole: ffffffffffff4111
diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
deleted file mode 100644
index 804f9426ed17..000000000000
--- a/Documentation/x86/x86_64/mm.txt
+++ /dev/null
@@ -1,153 +0,0 @@
-====================================================
-Complete virtual memory map with 4-level page tables
-====================================================
-
-Notes:
-
- - Negative addresses such as "-23 TB" are absolute addresses in bytes, counted down
- from the top of the 64-bit address space. It's easier to understand the layout
- when seen both in absolute addresses and in distance-from-top notation.
-
- For example 0xffffe90000000000 == -23 TB, it's 23 TB lower than the top of the
- 64-bit address space (ffffffffffffffff).
-
- Note that as we get closer to the top of the address space, the notation changes
- from TB to GB and then MB/KB.
-
- - "16M TB" might look weird at first sight, but it's an easier to visualize size
- notation than "16 EB", which few will recognize at first sight as 16 exabytes.
- It also shows it nicely how incredibly large 64-bit address space is.
-
-========================================================================================================================
- Start addr | Offset | End addr | Size | VM area description
-========================================================================================================================
- | | | |
- 0000000000000000 | 0 | 00007fffffffffff | 128 TB | user-space virtual memory, different per mm
-__________________|____________|__________________|_________|___________________________________________________________
- | | | |
- 0000800000000000 | +128 TB | ffff7fffffffffff | ~16M TB | ... huge, almost 64 bits wide hole of non-canonical
- | | | | virtual memory addresses up to the -128 TB
- | | | | starting offset of kernel mappings.
-__________________|____________|__________________|_________|___________________________________________________________
- |
- | Kernel-space virtual memory, shared between all processes:
-____________________________________________________________|___________________________________________________________
- | | | |
- ffff800000000000 | -128 TB | ffff87ffffffffff | 8 TB | ... guard hole, also reserved for hypervisor
- ffff880000000000 | -120 TB | ffff887fffffffff | 0.5 TB | LDT remap for PTI
- ffff888000000000 | -119.5 TB | ffffc87fffffffff | 64 TB | direct mapping of all physical memory (page_offset_base)
- ffffc88000000000 | -55.5 TB | ffffc8ffffffffff | 0.5 TB | ... unused hole
- ffffc90000000000 | -55 TB | ffffe8ffffffffff | 32 TB | vmalloc/ioremap space (vmalloc_base)
- ffffe90000000000 | -23 TB | ffffe9ffffffffff | 1 TB | ... unused hole
- ffffea0000000000 | -22 TB | ffffeaffffffffff | 1 TB | virtual memory map (vmemmap_base)
- ffffeb0000000000 | -21 TB | ffffebffffffffff | 1 TB | ... unused hole
- ffffec0000000000 | -20 TB | fffffbffffffffff | 16 TB | KASAN shadow memory
-__________________|____________|__________________|_________|____________________________________________________________
- |
- | Identical layout to the 56-bit one from here on:
-____________________________________________________________|____________________________________________________________
- | | | |
- fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole
- | | | | vaddr_end for KASLR
- fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping
- fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | ... unused hole
- ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks
- ffffff8000000000 | -512 GB | ffffffeeffffffff | 444 GB | ... unused hole
- ffffffef00000000 | -68 GB | fffffffeffffffff | 64 GB | EFI region mapping space
- ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | ... unused hole
- ffffffff80000000 | -2 GB | ffffffff9fffffff | 512 MB | kernel text mapping, mapped to physical address 0
- ffffffff80000000 |-2048 MB | | |
- ffffffffa0000000 |-1536 MB | fffffffffeffffff | 1520 MB | module mapping space
- ffffffffff000000 | -16 MB | | |
- FIXADDR_START | ~-11 MB | ffffffffff5fffff | ~0.5 MB | kernel-internal fixmap range, variable size and offset
- ffffffffff600000 | -10 MB | ffffffffff600fff | 4 kB | legacy vsyscall ABI
- ffffffffffe00000 | -2 MB | ffffffffffffffff | 2 MB | ... unused hole
-__________________|____________|__________________|_________|___________________________________________________________
-
-
-====================================================
-Complete virtual memory map with 5-level page tables
-====================================================
-
-Notes:
-
- - With 56-bit addresses, user-space memory gets expanded by a factor of 512x,
- from 0.125 PB to 64 PB. All kernel mappings shift down to the -64 PT starting
- offset and many of the regions expand to support the much larger physical
- memory supported.
-
-========================================================================================================================
- Start addr | Offset | End addr | Size | VM area description
-========================================================================================================================
- | | | |
- 0000000000000000 | 0 | 00ffffffffffffff | 64 PB | user-space virtual memory, different per mm
-__________________|____________|__________________|_________|___________________________________________________________
- | | | |
- 0000800000000000 | +64 PB | ffff7fffffffffff | ~16K PB | ... huge, still almost 64 bits wide hole of non-canonical
- | | | | virtual memory addresses up to the -64 PB
- | | | | starting offset of kernel mappings.
-__________________|____________|__________________|_________|___________________________________________________________
- |
- | Kernel-space virtual memory, shared between all processes:
-____________________________________________________________|___________________________________________________________
- | | | |
- ff00000000000000 | -64 PB | ff0fffffffffffff | 4 PB | ... guard hole, also reserved for hypervisor
- ff10000000000000 | -60 PB | ff10ffffffffffff | 0.25 PB | LDT remap for PTI
- ff11000000000000 | -59.75 PB | ff90ffffffffffff | 32 PB | direct mapping of all physical memory (page_offset_base)
- ff91000000000000 | -27.75 PB | ff9fffffffffffff | 3.75 PB | ... unused hole
- ffa0000000000000 | -24 PB | ffd1ffffffffffff | 12.5 PB | vmalloc/ioremap space (vmalloc_base)
- ffd2000000000000 | -11.5 PB | ffd3ffffffffffff | 0.5 PB | ... unused hole
- ffd4000000000000 | -11 PB | ffd5ffffffffffff | 0.5 PB | virtual memory map (vmemmap_base)
- ffd6000000000000 | -10.5 PB | ffdeffffffffffff | 2.25 PB | ... unused hole
- ffdf000000000000 | -8.25 PB | fffffdffffffffff | ~8 PB | KASAN shadow memory
-__________________|____________|__________________|_________|____________________________________________________________
- |
- | Identical layout to the 47-bit one from here on:
-____________________________________________________________|____________________________________________________________
- | | | |
- fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole
- | | | | vaddr_end for KASLR
- fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping
- fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | ... unused hole
- ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks
- ffffff8000000000 | -512 GB | ffffffeeffffffff | 444 GB | ... unused hole
- ffffffef00000000 | -68 GB | fffffffeffffffff | 64 GB | EFI region mapping space
- ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | ... unused hole
- ffffffff80000000 | -2 GB | ffffffff9fffffff | 512 MB | kernel text mapping, mapped to physical address 0
- ffffffff80000000 |-2048 MB | | |
- ffffffffa0000000 |-1536 MB | fffffffffeffffff | 1520 MB | module mapping space
- ffffffffff000000 | -16 MB | | |
- FIXADDR_START | ~-11 MB | ffffffffff5fffff | ~0.5 MB | kernel-internal fixmap range, variable size and offset
- ffffffffff600000 | -10 MB | ffffffffff600fff | 4 kB | legacy vsyscall ABI
- ffffffffffe00000 | -2 MB | ffffffffffffffff | 2 MB | ... unused hole
-__________________|____________|__________________|_________|___________________________________________________________
-
-Architecture defines a 64-bit virtual address. Implementations can support
-less. Currently supported are 48- and 57-bit virtual addresses. Bits 63
-through to the most-significant implemented bit are sign extended.
-This causes hole between user space and kernel addresses if you interpret them
-as unsigned.
-
-The direct mapping covers all memory in the system up to the highest
-memory address (this means in some cases it can also include PCI memory
-holes).
-
-vmalloc space is lazily synchronized into the different PML4/PML5 pages of
-the processes using the page fault handler, with init_top_pgt as
-reference.
-
-We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual
-memory window (this size is arbitrary, it can be raised later if needed).
-The mappings are not part of any other kernel PGD and are only available
-during EFI runtime calls.
-
-Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all
-physical memory, vmalloc/ioremap space and virtual memory map are randomized.
-Their order is preserved but their base will be offset early at boot time.
-
-Be very careful vs. KASLR when changing anything here. The KASLR address
-range must not overlap with anything except the KASAN shadow area, which is
-correct as KASAN disables KASLR.
-
-For both 4- and 5-level layouts, the STACKLEAK_POISON value in the last 2MB
-hole: ffffffffffff4111
diff --git a/Documentation/x86/x86_64/uefi.txt b/Documentation/x86/x86_64/uefi.rst
index a5e2b4fdb170..88c3ba32546f 100644
--- a/Documentation/x86/x86_64/uefi.txt
+++ b/Documentation/x86/x86_64/uefi.rst
@@ -1,5 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================================
General note on [U]EFI x86_64 support
--------------------------------------
+=====================================
The nomenclature EFI and UEFI are used interchangeably in this document.
@@ -14,29 +17,42 @@ with EFI firmware and specifications are listed below.
3. x86_64 platform with EFI/UEFI firmware.
-Mechanics:
+Mechanics
---------
-- Build the kernel with the following configuration.
+
+- Build the kernel with the following configuration::
+
CONFIG_FB_EFI=y
CONFIG_FRAMEBUFFER_CONSOLE=y
+
If EFI runtime services are expected, the following configuration should
- be selected.
+ be selected::
+
CONFIG_EFI=y
CONFIG_EFI_VARS=y or m # optional
+
- Create a VFAT partition on the disk
- Copy the following to the VFAT partition:
+
elilo bootloader with x86_64 support, elilo configuration file,
kernel image built in first step and corresponding
initrd. Instructions on building elilo and its dependencies
can be found in the elilo sourceforge project.
+
- Boot to EFI shell and invoke elilo choosing the kernel image built
in first step.
- If some or all EFI runtime services don't work, you can try following
kernel command line parameters to turn off some or all EFI runtime
services.
- noefi turn off all EFI runtime services
- reboot_type=k turn off EFI reboot runtime service
+
+ noefi
+ turn off all EFI runtime services
+ reboot_type=k
+ turn off EFI reboot runtime service
+
- If the EFI memory map has additional entries not in the E820 map,
you can include those entries in the kernels memory map of available
physical RAM by using the following kernel command line parameter.
- add_efi_memmap include EFI memory map of available physical RAM
+
+ add_efi_memmap
+ include EFI memory map of available physical RAM
diff --git a/Documentation/x86/zero-page.rst b/Documentation/x86/zero-page.rst
new file mode 100644
index 000000000000..f088f5881666
--- /dev/null
+++ b/Documentation/x86/zero-page.rst
@@ -0,0 +1,45 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=========
+Zero Page
+=========
+The additional fields in struct boot_params as a part of 32-bit boot
+protocol of kernel. These should be filled by bootloader or 16-bit
+real-mode setup code of the kernel. References/settings to it mainly
+are in::
+
+ arch/x86/include/uapi/asm/bootparam.h
+
+=========== ===== ======================= =================================================
+Offset/Size Proto Name Meaning
+
+000/040 ALL screen_info Text mode or frame buffer information
+ (struct screen_info)
+040/014 ALL apm_bios_info APM BIOS information (struct apm_bios_info)
+058/008 ALL tboot_addr Physical address of tboot shared page
+060/010 ALL ist_info Intel SpeedStep (IST) BIOS support information
+ (struct ist_info)
+080/010 ALL hd0_info hd0 disk parameter, OBSOLETE!!
+090/010 ALL hd1_info hd1 disk parameter, OBSOLETE!!
+0A0/010 ALL sys_desc_table System description table (struct sys_desc_table),
+ OBSOLETE!!
+0B0/010 ALL olpc_ofw_header OLPC's OpenFirmware CIF and friends
+0C0/004 ALL ext_ramdisk_image ramdisk_image high 32bits
+0C4/004 ALL ext_ramdisk_size ramdisk_size high 32bits
+0C8/004 ALL ext_cmd_line_ptr cmd_line_ptr high 32bits
+140/080 ALL edid_info Video mode setup (struct edid_info)
+1C0/020 ALL efi_info EFI 32 information (struct efi_info)
+1E0/004 ALL alt_mem_k Alternative mem check, in KB
+1E4/004 ALL scratch Scratch field for the kernel setup code
+1E8/001 ALL e820_entries Number of entries in e820_table (below)
+1E9/001 ALL eddbuf_entries Number of entries in eddbuf (below)
+1EA/001 ALL edd_mbr_sig_buf_entries Number of entries in edd_mbr_sig_buffer
+ (below)
+1EB/001 ALL kbd_status Numlock is enabled
+1EC/001 ALL secure_boot Secure boot is enabled in the firmware
+1EF/001 ALL sentinel Used to detect broken bootloaders
+290/040 ALL edd_mbr_sig_buffer EDD MBR signatures
+2D0/A00 ALL e820_table E820 memory map table
+ (array of struct e820_entry)
+D00/1EC ALL eddbuf EDD data (array of struct edd_info)
+=========== ===== ======================= =================================================
diff --git a/Documentation/x86/zero-page.txt b/Documentation/x86/zero-page.txt
deleted file mode 100644
index 68aed077f7b6..000000000000
--- a/Documentation/x86/zero-page.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-The additional fields in struct boot_params as a part of 32-bit boot
-protocol of kernel. These should be filled by bootloader or 16-bit
-real-mode setup code of the kernel. References/settings to it mainly
-are in:
-
- arch/x86/include/uapi/asm/bootparam.h
-
-
-Offset Proto Name Meaning
-/Size
-
-000/040 ALL screen_info Text mode or frame buffer information
- (struct screen_info)
-040/014 ALL apm_bios_info APM BIOS information (struct apm_bios_info)
-058/008 ALL tboot_addr Physical address of tboot shared page
-060/010 ALL ist_info Intel SpeedStep (IST) BIOS support information
- (struct ist_info)
-080/010 ALL hd0_info hd0 disk parameter, OBSOLETE!!
-090/010 ALL hd1_info hd1 disk parameter, OBSOLETE!!
-0A0/010 ALL sys_desc_table System description table (struct sys_desc_table),
- OBSOLETE!!
-0B0/010 ALL olpc_ofw_header OLPC's OpenFirmware CIF and friends
-0C0/004 ALL ext_ramdisk_image ramdisk_image high 32bits
-0C4/004 ALL ext_ramdisk_size ramdisk_size high 32bits
-0C8/004 ALL ext_cmd_line_ptr cmd_line_ptr high 32bits
-140/080 ALL edid_info Video mode setup (struct edid_info)
-1C0/020 ALL efi_info EFI 32 information (struct efi_info)
-1E0/004 ALL alt_mem_k Alternative mem check, in KB
-1E4/004 ALL scratch Scratch field for the kernel setup code
-1E8/001 ALL e820_entries Number of entries in e820_table (below)
-1E9/001 ALL eddbuf_entries Number of entries in eddbuf (below)
-1EA/001 ALL edd_mbr_sig_buf_entries Number of entries in edd_mbr_sig_buffer
- (below)
-1EB/001 ALL kbd_status Numlock is enabled
-1EC/001 ALL secure_boot Secure boot is enabled in the firmware
-1EF/001 ALL sentinel Used to detect broken bootloaders
-290/040 ALL edd_mbr_sig_buffer EDD MBR signatures
-2D0/A00 ALL e820_table E820 memory map table
- (array of struct e820_entry)
-D00/1EC ALL eddbuf EDD data (array of struct edd_info)
diff --git a/Documentation/xilinx/eemi.txt b/Documentation/xilinx/eemi.txt
index 0ab686c173be..5f39b4ffdcd4 100644
--- a/Documentation/xilinx/eemi.txt
+++ b/Documentation/xilinx/eemi.txt
@@ -41,8 +41,8 @@ Example of EEMI ops usage:
int ret;
eemi_ops = zynqmp_pm_get_eemi_ops();
- if (!eemi_ops)
- return -ENXIO;
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
ret = eemi_ops->query_data(qdata, ret_payload);
diff --git a/LICENSES/other/GPL-1.0 b/LICENSES/deprecated/GPL-1.0
index 3a4fa969e4c2..3a4fa969e4c2 100644
--- a/LICENSES/other/GPL-1.0
+++ b/LICENSES/deprecated/GPL-1.0
diff --git a/LICENSES/other/ISC b/LICENSES/deprecated/ISC
index 8953c3142079..8953c3142079 100644
--- a/LICENSES/other/ISC
+++ b/LICENSES/deprecated/ISC
diff --git a/LICENSES/other/Linux-OpenIB b/LICENSES/deprecated/Linux-OpenIB
index 1ad85f6b3a89..1ad85f6b3a89 100644
--- a/LICENSES/other/Linux-OpenIB
+++ b/LICENSES/deprecated/Linux-OpenIB
diff --git a/LICENSES/other/X11 b/LICENSES/deprecated/X11
index fe4353fd0000..fe4353fd0000 100644
--- a/LICENSES/other/X11
+++ b/LICENSES/deprecated/X11
diff --git a/LICENSES/other/Apache-2.0 b/LICENSES/dual/Apache-2.0
index 7cd903f573e5..6e89ddeab187 100644
--- a/LICENSES/other/Apache-2.0
+++ b/LICENSES/dual/Apache-2.0
@@ -1,6 +1,10 @@
Valid-License-Identifier: Apache-2.0
SPDX-URL: https://spdx.org/licenses/Apache-2.0.html
Usage-Guide:
+ Do NOT use. The Apache-2.0 is not GPL2 compatible. It may only be used
+ for dual-licensed files where the other license is GPL2 compatible.
+ If you end up using this it MUST be used together with a GPL2 compatible
+ license using "OR".
To use the Apache License version 2.0 put the following SPDX tag/value
pair into a comment according to the placement guidelines in the
licensing rules documentation:
diff --git a/LICENSES/other/CDDL-1.0 b/LICENSES/dual/CDDL-1.0
index 25f614276ddd..b0ca1016db79 100644
--- a/LICENSES/other/CDDL-1.0
+++ b/LICENSES/dual/CDDL-1.0
@@ -1,8 +1,8 @@
Valid-License-Identifier: CDDL-1.0
SPDX-URL: https://spdx.org/licenses/CDDL-1.0.html
Usage-Guide:
- Do NOT use. The CDDL-1.0 is not GPL compatible. It may only be used for
- dual-licensed files where the other license is GPL compatible.
+ Do NOT use. The CDDL-1.0 is not GPL2 compatible. It may only be used for
+ dual-licensed files where the other license is GPL2 compatible.
If you end up using this it MUST be used together with a GPL2 compatible
license using "OR".
To use the Common Development and Distribution License 1.0 put the
diff --git a/LICENSES/other/MPL-1.1 b/LICENSES/dual/MPL-1.1
index 568b6049efe6..61706859e1b2 100644
--- a/LICENSES/other/MPL-1.1
+++ b/LICENSES/dual/MPL-1.1
@@ -1,6 +1,10 @@
Valid-License-Identifier: MPL-1.1
SPDX-URL: https://spdx.org/licenses/MPL-1.1.html
Usage-Guide:
+ Do NOT use. The MPL-1.1 is not GPL2 compatible. It may only be used for
+ dual-licensed files where the other license is GPL2 compatible.
+ If you end up using this it MUST be used together with a GPL2 compatible
+ license using "OR".
To use the Mozilla Public License version 1.1 put the following SPDX
tag/value pair into a comment according to the placement guidelines in
the licensing rules documentation:
diff --git a/MAINTAINERS b/MAINTAINERS
index 5c38f21aee78..5cfbea4ce575 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -268,12 +268,13 @@ L: linux-gpio@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-104-idio-16.c
-ACCES 104-QUAD-8 IIO DRIVER
+ACCES 104-QUAD-8 DRIVER
M: William Breathitt Gray <vilhelm.gray@gmail.com>
L: linux-iio@vger.kernel.org
S: Maintained
+F: Documentation/ABI/testing/sysfs-bus-counter-104-quad-8
F: Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8
-F: drivers/iio/counter/104-quad-8.c
+F: drivers/counter/104-quad-8.c
ACCES PCI-IDIO-16 GPIO DRIVER
M: William Breathitt Gray <vilhelm.gray@gmail.com>
@@ -468,7 +469,7 @@ ADM1025 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/adm1025
+F: Documentation/hwmon/adm1025.rst
F: drivers/hwmon/adm1025.c
ADM1029 HARDWARE MONITOR DRIVER
@@ -520,7 +521,7 @@ ADS1015 HARDWARE MONITOR DRIVER
M: Dirk Eibach <eibach@gdsys.de>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/ads1015
+F: Documentation/hwmon/ads1015.rst
F: drivers/hwmon/ads1015.c
F: include/linux/platform_data/ads1015.h
@@ -533,7 +534,7 @@ ADT7475 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/adt7475
+F: Documentation/hwmon/adt7475.rst
F: drivers/hwmon/adt7475.c
ADVANSYS SCSI DRIVER
@@ -709,6 +710,12 @@ L: linux-gpio@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-altera.c
+ALTERA SYSTEM MANAGER DRIVER
+M: Thor Thayer <thor.thayer@linux.intel.com>
+S: Maintained
+F: drivers/mfd/altera-sysmgr.c
+F: include/linux/mfd/altera-sysgmr.h
+
ALTERA SYSTEM RESOURCE DRIVER FOR ARRIA10 DEVKIT
M: Thor Thayer <thor.thayer@linux.intel.com>
S: Maintained
@@ -735,6 +742,12 @@ F: drivers/tty/serial/altera_jtaguart.c
F: include/linux/altera_uart.h
F: include/linux/altera_jtaguart.h
+AMAZON ANNAPURNA LABS THERMAL MMIO DRIVER
+M: Talel Shenhar <talel@amazon.com>
+S: Maintained
+F: Documentation/devicetree/bindings/thermal/amazon,al-thermal.txt
+F: drivers/thermal/thermal_mmio.c
+
AMAZON ETHERNET DRIVERS
M: Netanel Belgazal <netanel@amazon.com>
R: Saeed Bishara <saeedb@amazon.com>
@@ -744,6 +757,15 @@ S: Supported
F: Documentation/networking/device_drivers/amazon/ena.txt
F: drivers/net/ethernet/amazon/
+AMAZON RDMA EFA DRIVER
+M: Gal Pressman <galpress@amazon.com>
+R: Yossi Leybovich <sleybo@amazon.com>
+L: linux-rdma@vger.kernel.org
+Q: https://patchwork.kernel.org/project/linux-rdma/list/
+S: Supported
+F: drivers/infiniband/hw/efa/
+F: include/uapi/rdma/efa-abi.h
+
AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER
M: Tom Lendacky <thomas.lendacky@amd.com>
M: Gary Hook <gary.hook@amd.com>
@@ -764,7 +786,7 @@ AMD FAM15H PROCESSOR POWER MONITORING DRIVER
M: Huang Rui <ray.huang@amd.com>
L: linux-hwmon@vger.kernel.org
S: Supported
-F: Documentation/hwmon/fam15h_power
+F: Documentation/hwmon/fam15h_power.rst
F: drivers/hwmon/fam15h_power.c
AMD FCH GPIO DRIVER
@@ -816,6 +838,14 @@ F: drivers/gpu/drm/amd/include/vi_structs.h
F: drivers/gpu/drm/amd/include/v9_structs.h
F: include/uapi/linux/kfd_ioctl.h
+AMD MP2 I2C DRIVER
+M: Elie Morisse <syniurge@gmail.com>
+M: Nehal Shah <nehal-bakulchandra.shah@amd.com>
+M: Shyam Sundar S K <shyam-sundar.s-k@amd.com>
+L: linux-i2c@vger.kernel.org
+S: Maintained
+F: drivers/i2c/busses/i2c-amd-mp2*
+
AMD POWERPLAY
M: Rex Zhu <rex.zhu@amd.com>
M: Evan Quan <evan.quan@amd.com>
@@ -868,7 +898,7 @@ L: linux-iio@vger.kernel.org
W: http://ez.analog.com/community/linux-device-drivers
S: Supported
F: drivers/iio/adc/ad7606.c
-F: Documentation/devicetree/bindings/iio/adc/ad7606.txt
+F: Documentation/devicetree/bindings/iio/adc/adi,ad7606.txt
ANALOG DEVICES INC AD7768-1 DRIVER
M: Stefan Popa <stefan.popa@analog.com>
@@ -950,6 +980,7 @@ F: drivers/dma/dma-axi-dmac.c
ANALOG DEVICES INC IIO DRIVERS
M: Lars-Peter Clausen <lars@metafoo.de>
M: Michael Hennerich <Michael.Hennerich@analog.com>
+M: Stefan Popa <stefan.popa@analog.com>
W: http://wiki.analog.com/
W: http://ez.analog.com/community/linux-device-drivers
S: Supported
@@ -960,10 +991,16 @@ F: drivers/iio/adc/ltc2497*
X: drivers/iio/*/adjd*
F: drivers/staging/iio/*/ad*
+ANALOGBITS PLL LIBRARIES
+M: Paul Walmsley <paul.walmsley@sifive.com>
+S: Supported
+F: drivers/clk/analogbits/*
+F: include/linux/clk/analogbits*
+
ANDES ARCHITECTURE
M: Greentime Hu <green.hu@gmail.com>
M: Vincent Chen <deanbo422@gmail.com>
-T: git https://github.com/andestech/linux.git
+T: git https://git.kernel.org/pub/scm/linux/kernel/git/greentime/linux.git
S: Supported
F: arch/nds32/
F: Documentation/devicetree/bindings/interrupt-controller/andestech,ativic32.txt
@@ -1167,7 +1204,7 @@ S: Supported
T: git git://linux-arm.org/linux-ld.git for-upstream/mali-dp
F: drivers/gpu/drm/arm/display/include/
F: drivers/gpu/drm/arm/display/komeda/
-F: Documentation/devicetree/bindings/display/arm/arm,komeda.txt
+F: Documentation/devicetree/bindings/display/arm,komeda.txt
F: Documentation/gpu/komeda-kms.rst
ARM MALI-DP DRM DRIVER
@@ -1180,6 +1217,15 @@ F: drivers/gpu/drm/arm/
F: Documentation/devicetree/bindings/display/arm,malidp.txt
F: Documentation/gpu/afbc.rst
+ARM MALI PANFROST DRM DRIVER
+M: Rob Herring <robh@kernel.org>
+M: Tomeu Vizoso <tomeu.vizoso@collabora.com>
+L: dri-devel@lists.freedesktop.org
+S: Supported
+T: git git://anongit.freedesktop.org/drm/drm-misc
+F: drivers/gpu/drm/panfrost/
+F: include/uapi/drm/panfrost_drm.h
+
ARM MFM AND FLOPPY DRIVERS
M: Ian Molton <spyro@f2s.com>
S: Maintained
@@ -1416,7 +1462,9 @@ M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm64/boot/dts/bitmain/
+F: drivers/pinctrl/pinctrl-bm1880.c
F: Documentation/devicetree/bindings/arm/bitmain.yaml
+F: Documentation/devicetree/bindings/pinctrl/bitmain,bm1880-pinctrl.txt
ARM/CALXEDA HIGHBANK ARCHITECTURE
M: Rob Herring <robh@kernel.org>
@@ -1685,11 +1733,21 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/INTEL IXP4XX ARM ARCHITECTURE
+M: Linus Walleij <linusw@kernel.org>
M: Imre Kaloz <kaloz@openwrt.org>
M: Krzysztof Halasa <khalasa@piap.pl>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+F: Documentation/devicetree/bindings/arm/intel-ixp4xx.yaml
+F: Documentation/devicetree/bindings/gpio/intel,ixp4xx-gpio.txt
+F: Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml
+F: Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
F: arch/arm/mach-ixp4xx/
+F: drivers/clocksource/timer-ixp4xx.c
+F: drivers/gpio/gpio-ixp4xx.c
+F: drivers/irqchip/irq-ixp4xx.c
+F: include/linux/irqchip/irq-ixp4xx.h
+F: include/linux/platform_data/timer-ixp4xx.h
ARM/INTEL RESEARCH IMOTE/STARGATE 2 MACHINE SUPPORT
M: Jonathan Cameron <jic23@cam.ac.uk>
@@ -1985,7 +2043,7 @@ W: http://www.armlinux.org.uk/
S: Maintained
ARM/QUALCOMM SUPPORT
-M: Andy Gross <andy.gross@linaro.org>
+M: Andy Gross <agross@kernel.org>
M: David Brown <david.brown@linaro.org>
L: linux-arm-msm@vger.kernel.org
S: Maintained
@@ -2190,6 +2248,7 @@ F: arch/arm/mach-socfpga/
F: arch/arm/boot/dts/socfpga*
F: arch/arm/configs/socfpga_defconfig
F: arch/arm64/boot/dts/altera/
+F: arch/arm64/boot/dts/intel/
W: http://www.rocketboards.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
@@ -2513,7 +2572,7 @@ ASC7621 HARDWARE MONITOR DRIVER
M: George Joseph <george.joseph@fairview5.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/asc7621
+F: Documentation/hwmon/asc7621.rst
F: drivers/hwmon/asc7621.c
ASPEED VIDEO ENGINE DRIVER
@@ -2560,7 +2619,7 @@ F: include/linux/dmaengine.h
F: include/linux/async_tx.h
AT24 EEPROM DRIVER
-M: Bartosz Golaszewski <brgl@bgdev.pl>
+M: Bartosz Golaszewski <bgolaszewski@baylibre.com>
L: linux-i2c@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
S: Maintained
@@ -2794,10 +2853,13 @@ M: Simon Wunderlich <sw@simonwunderlich.de>
M: Antonio Quartulli <a@unstable.cc>
L: b.a.t.m.a.n@lists.open-mesh.org (moderated for non-subscribers)
W: https://www.open-mesh.org/
+B: https://www.open-mesh.org/projects/batman-adv/issues
+C: irc://chat.freenode.net/batman
Q: https://patchwork.open-mesh.org/project/batman/list/
+T: git https://git.open-mesh.org/linux-merge.git
S: Maintained
-F: Documentation/ABI/testing/sysfs-class-net-batman-adv
-F: Documentation/ABI/testing/sysfs-class-net-mesh
+F: Documentation/ABI/obsolete/sysfs-class-net-batman-adv
+F: Documentation/ABI/obsolete/sysfs-class-net-mesh
F: Documentation/networking/batman-adv.rst
F: include/uapi/linux/batadv_packet.h
F: include/uapi/linux/batman_adv.h
@@ -3353,7 +3415,7 @@ F: include/uapi/linux/bsg.h
BT87X AUDIO DRIVER
M: Clemens Ladisch <clemens@ladisch.de>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
-T: git git://git.alsa-project.org/alsa-kernel.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
S: Maintained
F: Documentation/sound/cards/bt87x.rst
F: sound/pci/bt87x.c
@@ -3406,7 +3468,7 @@ F: drivers/scsi/FlashPoint.*
C-MEDIA CMI8788 DRIVER
M: Clemens Ladisch <clemens@ladisch.de>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
-T: git git://git.alsa-project.org/alsa-kernel.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
S: Maintained
F: sound/pci/oxygen/
@@ -3730,8 +3792,8 @@ F: scripts/checkpatch.pl
CHINESE DOCUMENTATION
M: Harry Wei <harryxiyou@gmail.com>
+M: Alex Shi <alex.shi@linux.alibaba.com>
L: xiyoulinuxkernelgroup@googlegroups.com (subscribers-only)
-L: linux-kernel@zh-kernel.org (moderated for non-subscribers)
S: Maintained
F: Documentation/translations/zh_CN/
@@ -3798,16 +3860,21 @@ M: Richard Fitzgerald <rf@opensource.cirrus.com>
L: patches@opensource.cirrus.com
S: Supported
F: drivers/clk/clk-lochnagar.c
+F: drivers/hwmon/lochnagar-hwmon.c
F: drivers/mfd/lochnagar-i2c.c
F: drivers/pinctrl/cirrus/pinctrl-lochnagar.c
F: drivers/regulator/lochnagar-regulator.c
+F: sound/soc/codecs/lochnagar-sc.c
F: include/dt-bindings/clk/lochnagar.h
F: include/dt-bindings/pinctrl/lochnagar.h
F: include/linux/mfd/lochnagar*
F: Documentation/devicetree/bindings/mfd/cirrus,lochnagar.txt
F: Documentation/devicetree/bindings/clock/cirrus,lochnagar.txt
+F: Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.txt
F: Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.txt
F: Documentation/devicetree/bindings/regulator/cirrus,lochnagar.txt
+F: Documentation/devicetree/bindings/sound/cirrus,lochnagar.txt
+F: Documentation/hwmon/lochnagar
CISCO FCOE HBA DRIVER
M: Satish Kharat <satishkh@cisco.com>
@@ -4045,7 +4112,7 @@ CORETEMP HARDWARE MONITORING DRIVER
M: Fenghua Yu <fenghua.yu@intel.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/coretemp
+F: Documentation/hwmon/coretemp.rst
F: drivers/hwmon/coretemp.c
COSA/SRP SYNC SERIAL DRIVER
@@ -4054,6 +4121,16 @@ W: http://www.fi.muni.cz/~kas/cosa/
S: Maintained
F: drivers/net/wan/cosa*
+COUNTER SUBSYSTEM
+M: William Breathitt Gray <vilhelm.gray@gmail.com>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: Documentation/ABI/testing/sysfs-bus-counter*
+F: Documentation/driver-api/generic-counter.rst
+F: drivers/counter/
+F: include/linux/counter.h
+F: include/linux/counter_enum.h
+
CPMAC ETHERNET DRIVER
M: Florian Fainelli <f.fainelli@gmail.com>
L: netdev@vger.kernel.org
@@ -4073,7 +4150,9 @@ F: Documentation/admin-guide/pm/intel_pstate.rst
F: Documentation/cpu-freq/
F: Documentation/devicetree/bindings/cpufreq/
F: drivers/cpufreq/
+F: kernel/sched/cpufreq*.c
F: include/linux/cpufreq.h
+F: include/linux/sched/cpufreq.h
F: tools/testing/selftests/cpufreq/
CPU FREQUENCY DRIVERS - ARM BIG LITTLE
@@ -4248,7 +4327,7 @@ S: Supported
F: drivers/scsi/cxgbi/cxgb3i
CXGB3 IWARP RNIC DRIVER (IW_CXGB3)
-M: Steve Wise <swise@chelsio.com>
+M: Potnuri Bharat Teja <bharat@chelsio.com>
L: linux-rdma@vger.kernel.org
W: http://www.openfabrics.org
S: Supported
@@ -4256,7 +4335,7 @@ F: drivers/infiniband/hw/cxgb3/
F: include/uapi/rdma/cxgb3-abi.h
CXGB4 CRYPTO DRIVER (chcr)
-M: Harsh Jain <harsh@chelsio.com>
+M: Atul Gupta <atul.gupta@chelsio.com>
L: linux-crypto@vger.kernel.org
W: http://www.chelsio.com
S: Supported
@@ -4277,7 +4356,7 @@ S: Supported
F: drivers/scsi/cxgbi/cxgb4i
CXGB4 IWARP RNIC DRIVER (IW_CXGB4)
-M: Steve Wise <swise@chelsio.com>
+M: Potnuri Bharat Teja <bharat@chelsio.com>
L: linux-rdma@vger.kernel.org
W: http://www.openfabrics.org
S: Supported
@@ -4293,7 +4372,7 @@ F: drivers/net/ethernet/chelsio/cxgb4vf/
CXL (IBM Coherent Accelerator Processor Interface CAPI) DRIVER
M: Frederic Barrat <fbarrat@linux.ibm.com>
-M: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
+M: Andrew Donnellan <ajd@linux.ibm.com>
L: linuxppc-dev@lists.ozlabs.org
S: Supported
F: arch/powerpc/platforms/powernv/pci-cxl.c
@@ -4553,6 +4632,7 @@ S: Maintained
F: drivers/devfreq/
F: include/linux/devfreq.h
F: Documentation/devicetree/bindings/devfreq/
+F: include/trace/events/devfreq.h
DEVICE FREQUENCY EVENT (DEVFREQ-EVENT)
M: Chanwoo Choi <cw00.choi@samsung.com>
@@ -4600,7 +4680,7 @@ DIALOG SEMICONDUCTOR DRIVERS
M: Support Opensource <support.opensource@diasemi.com>
W: http://www.dialog-semiconductor.com/products
S: Supported
-F: Documentation/hwmon/da90??
+F: Documentation/hwmon/da90??.rst
F: Documentation/devicetree/bindings/mfd/da90*.txt
F: Documentation/devicetree/bindings/input/da90??-onkey.txt
F: Documentation/devicetree/bindings/thermal/da90??-thermal.txt
@@ -4751,7 +4831,7 @@ DME1737 HARDWARE MONITOR DRIVER
M: Juerg Haefliger <juergh@gmail.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/dme1737
+F: Documentation/hwmon/dme1737.rst
F: drivers/hwmon/dme1737.c
DMI/SMBIOS SUPPORT
@@ -4896,6 +4976,14 @@ M: Dave Airlie <airlied@redhat.com>
S: Odd Fixes
F: drivers/gpu/drm/ast/
+DRM DRIVER FOR ASPEED BMC GFX
+M: Joel Stanley <joel@jms.id.au>
+L: linux-aspeed@lists.ozlabs.org
+T: git git://anongit.freedesktop.org/drm/drm-misc
+S: Supported
+F: drivers/gpu/drm/aspeed/
+F: Documentation/devicetree/bindings/gpu/aspeed-gfx.txt
+
DRM DRIVER FOR BOCHS VIRTUAL GPU
M: Gerd Hoffmann <kraxel@redhat.com>
L: virtualization@lists.linux-foundation.org
@@ -4909,6 +4997,12 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained
F: drivers/gpu/drm/tve200/
+DRM DRIVER FOR FEIYANG FY07024DI26A30-D MIPI-DSI LCD PANELS
+M: Jagan Teki <jagan@amarulasolutions.com>
+S: Maintained
+F: drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
+F: Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.txt
+
DRM DRIVER FOR ILITEK ILI9225 PANELS
M: David Lechner <david@lechnology.com>
S: Maintained
@@ -5000,6 +5094,12 @@ S: Orphan / Obsolete
F: drivers/gpu/drm/r128/
F: include/uapi/drm/r128_drm.h
+DRM DRIVER FOR ROCKTECH JH057N00900 PANELS
+M: Guido Günther <agx@sigxcpu.org>
+S: Maintained
+F: drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
+F: Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.txt
+
DRM DRIVER FOR SAVAGE VIDEO CARDS
S: Orphan / Obsolete
F: drivers/gpu/drm/savage/
@@ -5047,6 +5147,13 @@ S: Odd Fixes
F: drivers/gpu/drm/udl/
T: git git://anongit.freedesktop.org/drm/drm-misc
+DRM DRIVER FOR VIRTUALBOX VIRTUAL GPU
+M: Hans de Goede <hdegoede@redhat.com>
+L: dri-devel@lists.freedesktop.org
+S: Maintained
+F: drivers/gpu/drm/vboxvideo/
+T: git git://anongit.freedesktop.org/drm/drm-misc
+
DRM DRIVER FOR VIRTUAL KERNEL MODESETTING (VKMS)
M: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
R: Haneen Mohammed <hamohammed.sa@gmail.com>
@@ -5181,6 +5288,15 @@ S: Maintained
F: drivers/gpu/drm/hisilicon/
F: Documentation/devicetree/bindings/display/hisilicon/
+DRM DRIVERS FOR LIMA
+M: Qiang Yu <yuq825@gmail.com>
+L: dri-devel@lists.freedesktop.org
+L: lima@lists.freedesktop.org (moderated for non-subscribers)
+S: Maintained
+F: drivers/gpu/drm/lima/
+F: include/uapi/drm/lima_drm.h
+T: git git://anongit.freedesktop.org/drm/drm-misc
+
DRM DRIVERS FOR MEDIATEK
M: CK Hu <ck.hu@mediatek.com>
M: Philipp Zabel <p.zabel@pengutronix.de>
@@ -5598,6 +5714,12 @@ L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/ghes_edac.c
+EDAC-I10NM
+M: Tony Luck <tony.luck@intel.com>
+L: linux-edac@vger.kernel.org
+S: Maintained
+F: drivers/edac/i10nm_base.c
+
EDAC-I3000
L: linux-edac@vger.kernel.org
S: Orphan
@@ -5679,7 +5801,7 @@ EDAC-SKYLAKE
M: Tony Luck <tony.luck@intel.com>
L: linux-edac@vger.kernel.org
S: Maintained
-F: drivers/edac/skx_edac.c
+F: drivers/edac/skx_*.c
EDAC-TI
M: Tero Kristo <t-kristo@ti.com>
@@ -5698,7 +5820,7 @@ F: drivers/edac/qcom_edac.c
EDIROL UA-101/UA-1000 DRIVER
M: Clemens Ladisch <clemens@ladisch.de>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
-T: git git://git.alsa-project.org/alsa-kernel.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
S: Maintained
F: sound/usb/misc/ua101.c
@@ -5937,7 +6059,7 @@ F71805F HARDWARE MONITORING DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/f71805f
+F: Documentation/hwmon/f71805f.rst
F: drivers/hwmon/f71805f.c
FADDR2LINE
@@ -6038,7 +6160,7 @@ F: include/linux/f75375s.h
FIREWIRE AUDIO DRIVERS
M: Clemens Ladisch <clemens@ladisch.de>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
-T: git git://git.alsa-project.org/alsa-kernel.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
S: Maintained
F: sound/firewire/
@@ -6462,7 +6584,7 @@ S: Maintained
F: drivers/media/radio/radio-gemtek*
GENERIC GPIO I2C DRIVER
-M: Haavard Skinnemoen <hskinnemoen@gmail.com>
+M: Wolfram Sang <wsa+renesas@sang-engineering.com>
S: Supported
F: drivers/i2c/busses/i2c-gpio.c
F: include/linux/platform_data/i2c-gpio.h
@@ -6594,7 +6716,7 @@ M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
L: linux-gpio@vger.kernel.org
L: linux-acpi@vger.kernel.org
S: Maintained
-F: Documentation/acpi/gpio-properties.txt
+F: Documentation/firmware-guide/acpi/gpio-properties.rst
F: drivers/gpio/gpiolib-acpi.c
GPIO IR Transmitter
@@ -7386,13 +7508,12 @@ S: Supported
F: drivers/net/ethernet/ibm/ibmvnic.*
IBM Power Virtual Accelerator Switchboard
-M: Sukadev Bhattiprolu
+M: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
L: linuxppc-dev@lists.ozlabs.org
S: Supported
F: arch/powerpc/platforms/powernv/vas*
F: arch/powerpc/platforms/powernv/copy-paste.h
F: arch/powerpc/include/asm/vas.h
-F: arch/powerpc/include/uapi/asm/vas.h
IBM Power Virtual Ethernet Device Driver
M: Thomas Falcon <tlfalcon@linux.ibm.com>
@@ -7439,14 +7560,14 @@ F: drivers/crypto/vmx/ghash*
F: drivers/crypto/vmx/ppc-xlate.pl
IBM Power PCI Hotplug Driver for RPA-compliant PPC64 platform
-M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
+M: Tyrel Datwyler <tyreld@linux.ibm.com>
L: linux-pci@vger.kernel.org
L: linuxppc-dev@lists.ozlabs.org
S: Supported
F: drivers/pci/hotplug/rpaphp*
IBM Power IO DLPAR Driver for RPA-compliant PPC64 platform
-M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
+M: Tyrel Datwyler <tyreld@linux.ibm.com>
L: linux-pci@vger.kernel.org
L: linuxppc-dev@lists.ozlabs.org
S: Supported
@@ -7618,7 +7739,7 @@ INA209 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/ina209
+F: Documentation/hwmon/ina209.rst
F: Documentation/devicetree/bindings/hwmon/ina2xx.txt
F: drivers/hwmon/ina209.c
@@ -7626,7 +7747,7 @@ INA2XX HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/ina2xx
+F: Documentation/hwmon/ina2xx.rst
F: drivers/hwmon/ina2xx.c
F: include/linux/platform_data/ina2xx.h
@@ -7653,6 +7774,10 @@ F: drivers/infiniband/
F: include/uapi/linux/if_infiniband.h
F: include/uapi/rdma/
F: include/rdma/
+F: include/trace/events/ib_mad.h
+F: include/trace/events/ib_umad.h
+F: samples/bpf/ibumad_kern.c
+F: samples/bpf/ibumad_user.c
INGENIC JZ4780 DMA Driver
M: Zubair Lutfullah Kakakhel <Zubair.Kakakhel@imgtec.com>
@@ -7876,10 +8001,10 @@ F: Documentation/media/v4l-drivers/ipu3.rst
INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT
M: Krzysztof Halasa <khalasa@piap.pl>
S: Maintained
-F: arch/arm/mach-ixp4xx/include/mach/qmgr.h
-F: arch/arm/mach-ixp4xx/include/mach/npe.h
-F: arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
-F: arch/arm/mach-ixp4xx/ixp4xx_npe.c
+F: include/linux/soc/ixp4xx/qmgr.h
+F: include/linux/soc/ixp4xx/npe.h
+F: drivers/soc/ixp4xx/ixp4xx-qmgr.c
+F: drivers/soc/ixp4xx/ixp4xx-npe.c
F: drivers/net/ethernet/xscale/ixp4xx_eth.c
F: drivers/net/wan/ixp4xx_hss.c
@@ -8046,6 +8171,7 @@ F: drivers/gpio/gpio-intel-mid.c
INTERCONNECT API
M: Georgi Djakov <georgi.djakov@linaro.org>
+L: linux-pm@vger.kernel.org
S: Maintained
F: Documentation/interconnect/
F: Documentation/devicetree/bindings/interconnect/
@@ -8254,7 +8380,7 @@ IT87 HARDWARE MONITORING DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/it87
+F: Documentation/hwmon/it87.rst
F: drivers/hwmon/it87.c
IT913X MEDIA DRIVER
@@ -8298,7 +8424,7 @@ M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
F: drivers/hwmon/jc42.c
-F: Documentation/hwmon/jc42
+F: Documentation/hwmon/jc42.rst
JFS FILESYSTEM
M: Dave Kleikamp <shaggy@kernel.org>
@@ -8317,9 +8443,11 @@ F: drivers/net/ethernet/jme.*
JOURNALLING FLASH FILE SYSTEM V2 (JFFS2)
M: David Woodhouse <dwmw2@infradead.org>
+M: Richard Weinberger <richard@nod.at>
L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/doc/jffs2.html
-S: Maintained
+T: git git://git.infradead.org/ubifs-2.6.git
+S: Odd Fixes
F: fs/jffs2/
F: include/uapi/linux/jffs2.h
@@ -8346,14 +8474,14 @@ K10TEMP HARDWARE MONITORING DRIVER
M: Clemens Ladisch <clemens@ladisch.de>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/k10temp
+F: Documentation/hwmon/k10temp.rst
F: drivers/hwmon/k10temp.c
K8TEMP HARDWARE MONITORING DRIVER
M: Rudolf Marek <r.marek@assembler.cz>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/k8temp
+F: Documentation/hwmon/k8temp.rst
F: drivers/hwmon/k8temp.c
KASAN
@@ -8414,6 +8542,7 @@ F: scripts/Kbuild*
F: scripts/Makefile*
F: scripts/basic/
F: scripts/mk*
+F: scripts/*vmlinux*
F: scripts/mod/
F: scripts/package/
@@ -8994,7 +9123,7 @@ R: Daniel Lustig <dlustig@nvidia.com>
L: linux-kernel@vger.kernel.org
L: linux-arch@vger.kernel.org
S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
F: tools/memory-model/
F: Documentation/atomic_bitops.txt
F: Documentation/atomic_t.txt
@@ -9045,21 +9174,21 @@ LM78 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/lm78
+F: Documentation/hwmon/lm78.rst
F: drivers/hwmon/lm78.c
LM83 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/lm83
+F: Documentation/hwmon/lm83.rst
F: drivers/hwmon/lm83.c
LM90 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/lm90
+F: Documentation/hwmon/lm90.rst
F: Documentation/devicetree/bindings/hwmon/lm90.txt
F: drivers/hwmon/lm90.c
F: include/dt-bindings/thermal/lm90.h
@@ -9068,7 +9197,7 @@ LM95234 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/lm95234
+F: Documentation/hwmon/lm95234.rst
F: drivers/hwmon/lm95234.c
LME2510 MEDIA DRIVER
@@ -9100,7 +9229,6 @@ F: arch/*/include/asm/spinlock*.h
F: include/linux/rwlock*.h
F: include/linux/mutex*.h
F: include/linux/rwsem*.h
-F: arch/*/include/asm/rwsem.h
F: include/linux/seqlock.h
F: lib/locking*.[ch]
F: kernel/locking/
@@ -9142,7 +9270,7 @@ LTC4261 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/ltc4261
+F: Documentation/hwmon/ltc4261.rst
F: drivers/hwmon/ltc4261.c
LTC4306 I2C MULTIPLEXER DRIVER
@@ -9373,7 +9501,7 @@ MAX16065 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/max16065
+F: Documentation/hwmon/max16065.rst
F: drivers/hwmon/max16065.c
MAX2175 SDR TUNER DRIVER
@@ -9389,14 +9517,14 @@ F: include/uapi/linux/max2175.h
MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
L: linux-hwmon@vger.kernel.org
S: Orphan
-F: Documentation/hwmon/max6650
+F: Documentation/hwmon/max6650.rst
F: drivers/hwmon/max6650.c
MAX6697 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/max6697
+F: Documentation/hwmon/max6697.rst
F: Documentation/devicetree/bindings/hwmon/max6697.txt
F: drivers/hwmon/max6697.c
F: include/linux/platform_data/max6697.h
@@ -9408,6 +9536,27 @@ S: Maintained
F: Documentation/devicetree/bindings/sound/max9860.txt
F: sound/soc/codecs/max9860.*
+MAXBOTIX ULTRASONIC RANGER IIO DRIVER
+M: Andreas Klinger <ak@it-klinger.de>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.txt
+F: drivers/iio/proximity/mb1232.c
+
+MAXIM MAX77650 PMIC MFD DRIVER
+M: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/*/*max77650.txt
+F: Documentation/devicetree/bindings/*/max77650*.txt
+F: include/linux/mfd/max77650.h
+F: drivers/mfd/max77650.c
+F: drivers/regulator/max77650-regulator.c
+F: drivers/power/supply/max77650-charger.c
+F: drivers/input/misc/max77650-onkey.c
+F: drivers/leds/leds-max77650.c
+F: drivers/gpio/gpio-max77650.c
+
MAXIM MAX77802 PMIC REGULATOR DEVICE DRIVER
M: Javier Martinez Canillas <javier@dowhile0.org>
L: linux-kernel@vger.kernel.org
@@ -9776,9 +9925,17 @@ F: drivers/media/platform/mtk-vpu/
F: Documentation/devicetree/bindings/media/mediatek-vcodec.txt
F: Documentation/devicetree/bindings/media/mediatek-vpu.txt
+MEDIATEK MMC/SD/SDIO DRIVER
+M: Chaotian Jing <chaotian.jing@mediatek.com>
+S: Maintained
+F: drivers/mmc/host/mtk-sd.c
+F: Documentation/devicetree/bindings/mmc/mtk-sd.txt
+
MEDIATEK MT76 WIRELESS LAN DRIVER
M: Felix Fietkau <nbd@nbd.name>
M: Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+R: Ryder Lee <ryder.lee@mediatek.com>
+R: Roy Luo <royluo@google.com>
L: linux-wireless@vger.kernel.org
S: Maintained
F: drivers/net/wireless/mediatek/mt76/
@@ -9877,15 +10034,6 @@ F: drivers/net/ethernet/mellanox/mlx5/core/accel/*
F: drivers/net/ethernet/mellanox/mlx5/core/fpga/*
F: include/linux/mlx5/mlx5_ifc_fpga.h
-MELLANOX ETHERNET INNOVA IPSEC DRIVER
-R: Boris Pismenny <borisp@mellanox.com>
-L: netdev@vger.kernel.org
-S: Supported
-W: http://www.mellanox.com
-Q: http://patchwork.ozlabs.org/project/netdev/list/
-F: drivers/net/ethernet/mellanox/mlx5/core/en_ipsec/*
-F: drivers/net/ethernet/mellanox/mlx5/core/ipsec*
-
MELLANOX ETHERNET SWITCH DRIVERS
M: Jiri Pirko <jiri@mellanox.com>
M: Ido Schimmel <idosch@mellanox.com>
@@ -10008,14 +10156,15 @@ F: mm/
MEMORY TECHNOLOGY DEVICES (MTD)
M: David Woodhouse <dwmw2@infradead.org>
M: Brian Norris <computersforpeace@gmail.com>
-M: Boris Brezillon <bbrezillon@kernel.org>
M: Marek Vasut <marek.vasut@gmail.com>
+M: Miquel Raynal <miquel.raynal@bootlin.com>
M: Richard Weinberger <richard@nod.at>
+M: Vignesh Raghavendra <vigneshr@ti.com>
L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
-T: git git://git.infradead.org/linux-mtd.git master
-T: git git://git.infradead.org/linux-mtd.git mtd/next
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux.git mtd/fixes
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux.git mtd/next
S: Maintained
F: Documentation/devicetree/bindings/mtd/
F: drivers/mtd/
@@ -10042,7 +10191,7 @@ F: drivers/mfd/menf21bmc.c
F: drivers/watchdog/menf21bmc_wdt.c
F: drivers/leds/leds-menf21bmc.c
F: drivers/hwmon/menf21bmc_hwmon.c
-F: Documentation/hwmon/menf21bmc
+F: Documentation/hwmon/menf21bmc.rst
MEN Z069 WATCHDOG DRIVER
M: Johannes Thumshirn <jth@kernel.org>
@@ -10057,6 +10206,7 @@ L: linux-amlogic@lists.infradead.org
W: http://linux-meson.com/
S: Supported
F: drivers/media/platform/meson/ao-cec.c
+F: drivers/media/platform/meson/ao-cec-g12a.c
F: Documentation/devicetree/bindings/media/meson-ao-cec.txt
T: git git://linuxtv.org/media_tree.git
@@ -10113,7 +10263,8 @@ MICROCHIP I2C DRIVER
M: Ludovic Desroches <ludovic.desroches@microchip.com>
L: linux-i2c@vger.kernel.org
S: Supported
-F: drivers/i2c/busses/i2c-at91.c
+F: drivers/i2c/busses/i2c-at91.h
+F: drivers/i2c/busses/i2c-at91-*.c
MICROCHIP ISC DRIVER
M: Eugen Hristev <eugen.hristev@microchip.com>
@@ -10389,7 +10540,7 @@ F: arch/arm/mach-mmp/
MMU GATHER AND TLB INVALIDATION
M: Will Deacon <will.deacon@arm.com>
-M: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+M: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
M: Andrew Morton <akpm@linux-foundation.org>
M: Nick Piggin <npiggin@gmail.com>
M: Peter Zijlstra <peterz@infradead.org>
@@ -10435,7 +10586,7 @@ F: include/uapi/linux/meye.h
MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
M: Jiri Slaby <jirislaby@gmail.com>
S: Maintained
-F: Documentation/serial/moxa-smartio
+F: Documentation/serial/moxa-smartio.rst
F: drivers/tty/mxser.*
MR800 AVERMEDIA USB FM RADIO DRIVER
@@ -10622,14 +10773,12 @@ S: Supported
F: drivers/net/ethernet/myricom/myri10ge/
NAND FLASH SUBSYSTEM
-M: Boris Brezillon <bbrezillon@kernel.org>
M: Miquel Raynal <miquel.raynal@bootlin.com>
R: Richard Weinberger <richard@nod.at>
L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
-T: git git://git.infradead.org/linux-mtd.git nand/fixes
-T: git git://git.infradead.org/linux-mtd.git nand/next
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux.git nand/next
S: Maintained
F: drivers/mtd/nand/
F: include/linux/mtd/*nand*.h
@@ -10670,7 +10819,7 @@ NCT6775 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/nct6775
+F: Documentation/hwmon/nct6775.rst
F: drivers/hwmon/nct6775.c
NET_FAILOVER MODULE
@@ -10748,6 +10897,7 @@ L: linux-block@vger.kernel.org
L: nbd@other.debian.org
F: Documentation/blockdev/nbd.txt
F: drivers/block/nbd.c
+F: include/trace/events/nbd.h
F: include/uapi/linux/nbd.h
NETWORK DROP MONITOR
@@ -11118,6 +11268,16 @@ F: Documentation/ABI/stable/sysfs-bus-nvmem
F: include/linux/nvmem-consumer.h
F: include/linux/nvmem-provider.h
+NXP FXAS21002C DRIVER
+M: Rui Miguel Silva <rmfrfs@gmail.com>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/iio/gyroscope/fxas21002c.txt
+F: drivers/iio/gyro/fxas21002c_core.c
+F: drivers/iio/gyro/fxas21002c.h
+F: drivers/iio/gyro/fxas21002c_i2c.c
+F: drivers/iio/gyro/fxas21002c_spi.c
+
NXP SGTL5000 DRIVER
M: Fabio Estevam <festevam@gmail.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -11125,6 +11285,12 @@ S: Maintained
F: Documentation/devicetree/bindings/sound/sgtl5000.txt
F: sound/soc/codecs/sgtl5000*
+NXP SJA1105 ETHERNET SWITCH DRIVER
+M: Vladimir Oltean <olteanv@gmail.com>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+F: drivers/net/dsa/sja1105
+
NXP TDA998X DRM DRIVER
M: Russell King <linux@armlinux.org.uk>
S: Maintained
@@ -11173,7 +11339,7 @@ F: tools/objtool/
OCXL (Open Coherent Accelerator Processor Interface OpenCAPI) DRIVER
M: Frederic Barrat <fbarrat@linux.ibm.com>
-M: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
+M: Andrew Donnellan <ajd@linux.ibm.com>
L: linuxppc-dev@lists.ozlabs.org
S: Supported
F: arch/powerpc/platforms/powernv/ocxl.c
@@ -11604,7 +11770,7 @@ F: Documentation/devicetree/bindings/opp/
OPL4 DRIVER
M: Clemens Ladisch <clemens@ladisch.de>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
-T: git git://git.alsa-project.org/alsa-kernel.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
S: Maintained
F: sound/drivers/opl4/
@@ -11620,6 +11786,7 @@ F: include/linux/oprofile.h
ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
M: Mark Fasheh <mark@fasheh.com>
M: Joel Becker <jlbec@evilplan.org>
+M: Joseph Qi <joseph.qi@linux.alibaba.com>
L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
W: http://ocfs2.wiki.kernel.org
S: Supported
@@ -11678,6 +11845,14 @@ L: linux-i2c@vger.kernel.org
S: Orphan
F: drivers/i2c/busses/i2c-pasemi.c
+PACKING
+M: Vladimir Oltean <olteanv@gmail.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: lib/packing.c
+F: include/linux/packing.h
+F: Documentation/packing.txt
+
PADATA PARALLEL EXECUTION MECHANISM
M: Steffen Klassert <steffen.klassert@secunet.com>
L: linux-crypto@vger.kernel.org
@@ -11764,7 +11939,7 @@ PC87360 HARDWARE MONITORING DRIVER
M: Jim Cromie <jim.cromie@gmail.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/pc87360
+F: Documentation/hwmon/pc87360.rst
F: drivers/hwmon/pc87360.c
PC8736x GPIO DRIVER
@@ -11776,7 +11951,7 @@ PC87427 HARDWARE MONITORING DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/pc87427
+F: Documentation/hwmon/pc87427.rst
F: drivers/hwmon/pc87427.c
PCA9532 LED DRIVER
@@ -11891,7 +12066,8 @@ F: include/linux/switchtec.h
F: drivers/ntb/hw/mscc/
PCI DRIVER FOR MOBIVEIL PCIE IP
-M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+M: Karthikeyan Mitran <m.karthikeyan@mobiveil.co.in>
+M: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
L: linux-pci@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
@@ -12025,6 +12201,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git/
S: Supported
F: drivers/pci/controller/
+PCIE DRIVER FOR ANNAPURNA LABS
+M: Jonathan Chocron <jonnyc@amazon.com>
+L: linux-pci@vger.kernel.org
+S: Maintained
+F: drivers/pci/controller/dwc/pcie-al.c
+
PCIE DRIVER FOR AMLOGIC MESON
M: Yue Wang <yue.wang@Amlogic.com>
L: linux-pci@vger.kernel.org
@@ -12176,6 +12358,7 @@ F: arch/*/kernel/*/*/perf_event*.c
F: arch/*/include/asm/perf_event.h
F: arch/*/kernel/perf_callchain.c
F: arch/*/events/*
+F: arch/*/events/*/*
F: tools/perf/
PERSONALITY HANDLING
@@ -12344,23 +12527,23 @@ S: Maintained
F: Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt
F: Documentation/devicetree/bindings/hwmon/max31785.txt
F: Documentation/devicetree/bindings/hwmon/ltc2978.txt
-F: Documentation/hwmon/adm1275
-F: Documentation/hwmon/ibm-cffps
-F: Documentation/hwmon/ir35221
-F: Documentation/hwmon/lm25066
-F: Documentation/hwmon/ltc2978
-F: Documentation/hwmon/ltc3815
-F: Documentation/hwmon/max16064
-F: Documentation/hwmon/max20751
-F: Documentation/hwmon/max31785
-F: Documentation/hwmon/max34440
-F: Documentation/hwmon/max8688
-F: Documentation/hwmon/pmbus
-F: Documentation/hwmon/pmbus-core
-F: Documentation/hwmon/tps40422
-F: Documentation/hwmon/ucd9000
-F: Documentation/hwmon/ucd9200
-F: Documentation/hwmon/zl6100
+F: Documentation/hwmon/adm1275.rst
+F: Documentation/hwmon/ibm-cffps.rst
+F: Documentation/hwmon/ir35221.rst
+F: Documentation/hwmon/lm25066.rst
+F: Documentation/hwmon/ltc2978.rst
+F: Documentation/hwmon/ltc3815.rst
+F: Documentation/hwmon/max16064.rst
+F: Documentation/hwmon/max20751.rst
+F: Documentation/hwmon/max31785.rst
+F: Documentation/hwmon/max34440.rst
+F: Documentation/hwmon/max8688.rst
+F: Documentation/hwmon/pmbus.rst
+F: Documentation/hwmon/pmbus-core.rst
+F: Documentation/hwmon/tps40422.rst
+F: Documentation/hwmon/ucd9000.rst
+F: Documentation/hwmon/ucd9200.rst
+F: Documentation/hwmon/zl6100.rst
F: drivers/hwmon/pmbus/
F: include/linux/pmbus.h
@@ -12416,7 +12599,7 @@ M: Mark Rutland <mark.rutland@arm.com>
M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
L: linux-arm-kernel@lists.infradead.org
S: Maintained
-F: drivers/firmware/psci*.c
+F: drivers/firmware/psci/
F: include/linux/psci.h
F: include/uapi/linux/psci.h
@@ -12624,7 +12807,7 @@ M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
L: linux-hwmon@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/hwmon/pwm-fan.txt
-F: Documentation/hwmon/pwm-fan
+F: Documentation/hwmon/pwm-fan.rst
F: drivers/hwmon/pwm-fan.c
PWM IR Transmitter
@@ -13042,9 +13225,9 @@ M: Josh Triplett <josh@joshtriplett.org>
R: Steven Rostedt <rostedt@goodmis.org>
R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
R: Lai Jiangshan <jiangshanlai@gmail.com>
-L: linux-kernel@vger.kernel.org
+L: rcu@vger.kernel.org
S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
F: tools/testing/selftests/rcutorture
RDC R-321X SoC
@@ -13090,10 +13273,10 @@ R: Steven Rostedt <rostedt@goodmis.org>
R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
R: Lai Jiangshan <jiangshanlai@gmail.com>
R: Joel Fernandes <joel@joelfernandes.org>
-L: linux-kernel@vger.kernel.org
+L: rcu@vger.kernel.org
W: http://www.rdrop.com/users/paulmck/RCU/
S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
F: Documentation/RCU/
X: Documentation/RCU/torture.txt
F: include/linux/rcu*
@@ -13319,7 +13502,7 @@ ROCKETPORT DRIVER
P: Comtrol Corp.
W: http://www.comtrol.com
S: Maintained
-F: Documentation/serial/rocket.txt
+F: Documentation/serial/rocket.rst
F: drivers/tty/rocket*
ROCKETPORT EXPRESS/INFINITY DRIVER
@@ -13403,6 +13586,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.g
S: Maintained
F: drivers/net/wireless/realtek/rtlwifi/
+REALTEK WIRELESS DRIVER (rtw88)
+M: Yan-Hsuan Chuang <yhchuang@realtek.com>
+L: linux-wireless@vger.kernel.org
+S: Maintained
+F: drivers/net/wireless/realtek/rtw88/
+
RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
M: Jes Sorensen <Jes.Sorensen@gmail.com>
L: linux-wireless@vger.kernel.org
@@ -13948,7 +14137,7 @@ W: https://selinuxproject.org
W: https://github.com/SELinuxProject
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/selinux.git
S: Supported
-F: include/linux/selinux*
+F: include/uapi/linux/selinux_netlink.h
F: security/selinux/
F: scripts/selinux/
F: Documentation/admin-guide/LSM/SELinux.rst
@@ -14245,10 +14434,10 @@ M: "Paul E. McKenney" <paulmck@linux.ibm.com>
M: Josh Triplett <josh@joshtriplett.org>
R: Steven Rostedt <rostedt@goodmis.org>
R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-L: linux-kernel@vger.kernel.org
+L: rcu@vger.kernel.org
W: http://www.rdrop.com/users/paulmck/RCU/
S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
F: include/linux/srcu*.h
F: kernel/rcu/srcu*.c
@@ -14289,21 +14478,21 @@ SMM665 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/smm665
+F: Documentation/hwmon/smm665.rst
F: drivers/hwmon/smm665.c
SMSC EMC2103 HARDWARE MONITOR DRIVER
M: Steve Glendinning <steve.glendinning@shawell.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/emc2103
+F: Documentation/hwmon/emc2103.rst
F: drivers/hwmon/emc2103.c
SMSC SCH5627 HARDWARE MONITOR DRIVER
M: Hans de Goede <hdegoede@redhat.com>
L: linux-hwmon@vger.kernel.org
S: Supported
-F: Documentation/hwmon/sch5627
+F: Documentation/hwmon/sch5627.rst
F: drivers/hwmon/sch5627.c
SMSC UFX6000 and UFX7000 USB to VGA DRIVER
@@ -14316,7 +14505,7 @@ SMSC47B397 HARDWARE MONITOR DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/smsc47b397
+F: Documentation/hwmon/smsc47b397.rst
F: drivers/hwmon/smsc47b397.c
SMSC911x ETHERNET DRIVER
@@ -14336,9 +14525,8 @@ SOC-CAMERA V4L2 SUBSYSTEM
L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
S: Orphan
-F: include/media/soc*
-F: drivers/media/i2c/soc_camera/
-F: drivers/media/platform/soc_camera/
+F: include/media/soc_camera.h
+F: drivers/staging/media/soc_camera/
SOCIONEXT SYNQUACER I2C DRIVER
M: Ard Biesheuvel <ard.biesheuvel@linaro.org>
@@ -14474,16 +14662,15 @@ T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/i2c/imx355.c
-SONY MEMORYSTICK CARD SUPPORT
-M: Alex Dubov <oakad@yahoo.com>
-W: http://tifmxx.berlios.de/
-S: Maintained
-F: drivers/memstick/host/tifm_ms.c
-
-SONY MEMORYSTICK STANDARD SUPPORT
+SONY MEMORYSTICK SUBSYSTEM
M: Maxim Levitsky <maximlevitsky@gmail.com>
+M: Alex Dubov <oakad@yahoo.com>
+M: Ulf Hansson <ulf.hansson@linaro.org>
+L: linux-mmc@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc.git
S: Maintained
-F: drivers/memstick/core/ms_block.*
+F: drivers/memstick/
+F: include/linux/memstick.h
SONY VAIO CONTROL DEVICE DRIVER
M: Mattia Dongili <malattia@linux.it>
@@ -14501,7 +14688,6 @@ M: Takashi Iwai <tiwai@suse.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
W: http://www.alsa-project.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
-T: git git://git.alsa-project.org/alsa-kernel.git
Q: http://patchwork.kernel.org/project/alsa-devel/list/
S: Maintained
F: Documentation/sound/
@@ -14614,8 +14800,7 @@ M: Tudor Ambarus <tudor.ambarus@microchip.com>
L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
-T: git git://git.infradead.org/linux-mtd.git spi-nor/fixes
-T: git git://git.infradead.org/linux-mtd.git spi-nor/next
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux.git spi-nor/next
S: Maintained
F: drivers/mtd/spi-nor/
F: include/linux/mtd/spi-nor.h
@@ -14679,6 +14864,14 @@ S: Maintained
F: drivers/iio/imu/st_lsm6dsx/
F: Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
+ST MIPID02 CSI-2 TO PARALLEL BRIDGE DRIVER
+M: Mickael Guene <mickael.guene@st.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/st-mipid02.c
+F: Documentation/devicetree/bindings/media/i2c/st,st-mipid02.txt
+
ST STM32 I2C/SMBUS DRIVER
M: Pierre-Yves MORDRET <pierre-yves.mordret@st.com>
L: linux-i2c@vger.kernel.org
@@ -15315,6 +15508,11 @@ M: Laxman Dewangan <ldewangan@nvidia.com>
S: Supported
F: drivers/spi/spi-tegra*
+TEGRA XUSB PADCTL DRIVER
+M: JC Kuo <jckuo@nvidia.com>
+S: Supported
+F: drivers/phy/tegra/xusb*
+
TEHUTI ETHERNET DRIVER
M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org
@@ -15350,6 +15548,12 @@ F: Documentation/devicetree/bindings/reset/ti,sci-reset.txt
F: Documentation/devicetree/bindings/clock/ti,sci-clk.txt
F: drivers/clk/keystone/sci-clk.c
F: drivers/reset/reset-ti-sci.c
+F: Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.txt
+F: Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.txt
+F: drivers/irqchip/irq-ti-sci-intr.c
+F: drivers/irqchip/irq-ti-sci-inta.c
+F: include/linux/soc/ti/ti_sci_inta_msi.h
+F: drivers/soc/ti/ti_sci_inta_msi.c
Texas Instruments ASoC drivers
M: Peter Ujfalusi <peter.ujfalusi@ti.com>
@@ -15468,7 +15672,7 @@ F: include/linux/clk/ti.h
TI DAVINCI MACHINE SUPPORT
M: Sekhar Nori <nsekhar@ti.com>
-M: Kevin Hilman <khilman@kernel.org>
+R: Bartosz Golaszewski <bgolaszewski@baylibre.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git
S: Supported
@@ -15508,9 +15712,11 @@ S: Maintained
F: drivers/net/ethernet/ti/cpsw*
F: drivers/net/ethernet/ti/davinci*
-TI FLASH MEDIA INTERFACE DRIVER
+TI FLASH MEDIA MEMORYSTICK/MMC DRIVERS
M: Alex Dubov <oakad@yahoo.com>
S: Maintained
+W: http://tifmxx.berlios.de/
+F: drivers/memstick/host/tifm_ms.c
F: drivers/misc/tifm*
F: drivers/mmc/host/tifm_sd.c
F: include/linux/tifm.h
@@ -15662,7 +15868,7 @@ TMP401 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/tmp401
+F: Documentation/hwmon/tmp401.rst
F: drivers/hwmon/tmp401.c
TMPFS (SHMEM FILESYSTEM)
@@ -15695,7 +15901,7 @@ M: "Paul E. McKenney" <paulmck@linux.ibm.com>
M: Josh Triplett <josh@joshtriplett.org>
L: linux-kernel@vger.kernel.org
S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
F: Documentation/RCU/torture.txt
F: kernel/torture.c
F: kernel/rcu/rcutorture.c
@@ -15937,6 +16143,12 @@ F: drivers/uwb/
F: include/linux/uwb.h
F: include/linux/uwb/
+UNICODE SUBSYSTEM:
+M: Gabriel Krisman Bertazi <krisman@collabora.com>
+L: linux-fsdevel@vger.kernel.org
+S: Supported
+F: fs/unicode/
+
UNICORE32 ARCHITECTURE:
M: Guan Xuetao <gxt@pku.edu.cn>
W: http://mprc.pku.edu.cn/~guanxuetao/linux
@@ -15982,6 +16194,13 @@ L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/ufs/*dwc*
+UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER MEDIATEK HOOKS
+M: Stanley Chu <stanley.chu@mediatek.com>
+L: linux-scsi@vger.kernel.org
+L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: drivers/scsi/ufs/ufs-mediatek*
+
UNSORTED BLOCK IMAGES (UBI)
M: Artem Bityutskiy <dedekind1@gmail.com>
M: Richard Weinberger <richard@nod.at>
@@ -16085,6 +16304,14 @@ L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/usb/roles/intel-xhci-usb-role-switch.c
+USB IP DRIVER FOR HISILICON KIRIN
+M: Yu Chen <chenyu56@huawei.com>
+M: Binghui Wang <wangbinghui@hisilicon.com>
+L: linux-usb@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/phy/phy-hi3660-usb3.txt
+F: drivers/phy/hisilicon/phy-hi3660-usb3.c
+
USB ISP116X DRIVER
M: Olav Kongas <ok@artecdesign.ee>
L: linux-usb@vger.kernel.org
@@ -16111,7 +16338,7 @@ F: drivers/usb/storage/
USB MIDI DRIVER
M: Clemens Ladisch <clemens@ladisch.de>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
-T: git git://git.alsa-project.org/alsa-kernel.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
S: Maintained
F: sound/usb/midi.*
@@ -16384,6 +16611,7 @@ F: fs/fat/
VFIO DRIVER
M: Alex Williamson <alex.williamson@redhat.com>
+R: Cornelia Huck <cohuck@redhat.com>
L: kvm@vger.kernel.org
T: git git://github.com/awilliam/linux-vfio.git
S: Maintained
@@ -16700,7 +16928,7 @@ VT1211 HARDWARE MONITOR DRIVER
M: Juerg Haefliger <juergh@gmail.com>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/vt1211
+F: Documentation/hwmon/vt1211.rst
F: drivers/hwmon/vt1211.c
VT8231 HARDWARE MONITOR DRIVER
@@ -16728,14 +16956,14 @@ W83791D HARDWARE MONITORING DRIVER
M: Marc Hulsman <m.hulsman@tudelft.nl>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/w83791d
+F: Documentation/hwmon/w83791d.rst
F: drivers/hwmon/w83791d.c
W83793 HARDWARE MONITORING DRIVER
M: Rudolf Marek <r.marek@assembler.cz>
L: linux-hwmon@vger.kernel.org
S: Maintained
-F: Documentation/hwmon/w83793
+F: Documentation/hwmon/w83793.rst
F: drivers/hwmon/w83793.c
W83795 HARDWARE MONITORING DRIVER
@@ -16844,7 +17072,7 @@ L: patches@opensource.cirrus.com
T: git https://github.com/CirrusLogic/linux-drivers.git
W: https://github.com/CirrusLogic/linux-drivers/wiki
S: Supported
-F: Documentation/hwmon/wm83??
+F: Documentation/hwmon/wm83??.rst
F: Documentation/devicetree/bindings/extcon/extcon-arizona.txt
F: Documentation/devicetree/bindings/regulator/arizona-regulator.txt
F: Documentation/devicetree/bindings/mfd/arizona.txt
@@ -16934,7 +17162,7 @@ M: Tony Luck <tony.luck@intel.com>
M: Borislav Petkov <bp@alien8.de>
L: linux-edac@vger.kernel.org
S: Maintained
-F: arch/x86/kernel/cpu/mcheck/*
+F: arch/x86/kernel/cpu/mce/*
X86 MICROCODE UPDATE SUPPORT
M: Borislav Petkov <bp@alien8.de>
diff --git a/Makefile b/Makefile
index 2b99679148dc..a45f84a7e811 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
-PATCHLEVEL = 1
+PATCHLEVEL = 2
SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION = -rc1
NAME = Shy Crocodile
# *DOCUMENTATION*
@@ -96,56 +96,65 @@ endif
export quiet Q KBUILD_VERBOSE
-# kbuild supports saving output files in a separate directory.
-# To locate output files in a separate directory two syntaxes are supported.
-# In both cases the working directory must be the root of the kernel src.
+# Kbuild will save output files in the current working directory.
+# This does not need to match to the root of the kernel source tree.
+#
+# For example, you can do this:
+#
+# cd /dir/to/store/output/files; make -f /dir/to/kernel/source/Makefile
+#
+# If you want to save output files in a different location, there are
+# two syntaxes to specify it.
+#
# 1) O=
# Use "make O=dir/to/store/output/files/"
#
# 2) Set KBUILD_OUTPUT
-# Set the environment variable KBUILD_OUTPUT to point to the directory
-# where the output files shall be placed.
-# export KBUILD_OUTPUT=dir/to/store/output/files/
-# make
+# Set the environment variable KBUILD_OUTPUT to point to the output directory.
+# export KBUILD_OUTPUT=dir/to/store/output/files/; make
#
# The O= assignment takes precedence over the KBUILD_OUTPUT environment
# variable.
-# KBUILD_SRC is not intended to be used by the regular user (for now),
-# it is set on invocation of make with KBUILD_OUTPUT or O= specified.
-
-# OK, Make called in directory where kernel src resides
-# Do we want to locate output files in a separate directory?
+# Do we want to change the working directory?
ifeq ("$(origin O)", "command line")
KBUILD_OUTPUT := $(O)
endif
-ifneq ($(words $(subst :, ,$(CURDIR))), 1)
- $(error main directory cannot contain spaces nor colons)
+ifneq ($(KBUILD_OUTPUT),)
+# Make's built-in functions such as $(abspath ...), $(realpath ...) cannot
+# expand a shell special character '~'. We use a somewhat tedious way here.
+abs_objtree := $(shell mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) && pwd)
+$(if $(abs_objtree),, \
+ $(error failed to create output directory "$(KBUILD_OUTPUT)"))
+
+# $(realpath ...) resolves symlinks
+abs_objtree := $(realpath $(abs_objtree))
+else
+abs_objtree := $(CURDIR)
+endif # ifneq ($(KBUILD_OUTPUT),)
+
+ifeq ($(abs_objtree),$(CURDIR))
+# Suppress "Entering directory ..." unless we are changing the work directory.
+MAKEFLAGS += --no-print-directory
+else
+need-sub-make := 1
endif
-ifneq ($(KBUILD_OUTPUT),)
-# check that the output directory actually exists
-saved-output := $(KBUILD_OUTPUT)
-KBUILD_OUTPUT := $(shell mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) \
- && pwd)
-$(if $(KBUILD_OUTPUT),, \
- $(error failed to create output directory "$(saved-output)"))
+abs_srctree := $(realpath $(dir $(lastword $(MAKEFILE_LIST))))
+ifneq ($(words $(subst :, ,$(abs_srctree))), 1)
+$(error source directory cannot contain spaces or colons)
+endif
+
+ifneq ($(abs_srctree),$(abs_objtree))
# Look for make include files relative to root of kernel src
#
# This does not become effective immediately because MAKEFLAGS is re-parsed
-# once after the Makefile is read. It is OK since we are going to invoke
-# 'sub-make' below.
-MAKEFLAGS += --include-dir=$(CURDIR)
-
+# once after the Makefile is read. We need to invoke sub-make.
+MAKEFLAGS += --include-dir=$(abs_srctree)
need-sub-make := 1
-else
-
-# Do not print "Entering directory ..." at all for in-tree build.
-MAKEFLAGS += --no-print-directory
-
-endif # ifneq ($(KBUILD_OUTPUT),)
+endif
ifneq ($(filter 3.%,$(MAKE_VERSION)),)
# 'MAKEFLAGS += -rR' does not immediately become effective for GNU Make 3.x
@@ -155,20 +164,19 @@ need-sub-make := 1
$(lastword $(MAKEFILE_LIST)): ;
endif
+export abs_srctree abs_objtree
export sub_make_done := 1
ifeq ($(need-sub-make),1)
PHONY += $(MAKECMDGOALS) sub-make
-$(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
+$(filter-out _all sub-make $(lastword $(MAKEFILE_LIST)), $(MAKECMDGOALS)) _all: sub-make
@:
# Invoke a second make in the output directory, passing relevant variables
sub-make:
- $(Q)$(MAKE) \
- $(if $(KBUILD_OUTPUT),-C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR)) \
- -f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS))
+ $(Q)$(MAKE) -C $(abs_objtree) -f $(abs_srctree)/Makefile $(MAKECMDGOALS)
endif # need-sub-make
endif # sub_make_done
@@ -213,16 +221,21 @@ ifeq ("$(origin M)", "command line")
KBUILD_EXTMOD := $(M)
endif
-ifeq ($(KBUILD_SRC),)
+ifeq ($(abs_srctree),$(abs_objtree))
# building in the source tree
srctree := .
else
- ifeq ($(KBUILD_SRC)/,$(dir $(CURDIR)))
+ ifeq ($(abs_srctree)/,$(dir $(abs_objtree)))
# building in a subdirectory of the source tree
srctree := ..
else
- srctree := $(KBUILD_SRC)
+ srctree := $(abs_srctree)
endif
+
+ # TODO:
+ # KBUILD_SRC is only used to distinguish in-tree/out-of-tree build.
+ # Replace it with $(srctree) or something.
+ KBUILD_SRC := $(abs_srctree)
endif
export KBUILD_CHECKSRC KBUILD_EXTMOD KBUILD_SRC
@@ -401,6 +414,7 @@ NM = $(CROSS_COMPILE)nm
STRIP = $(CROSS_COMPILE)strip
OBJCOPY = $(CROSS_COMPILE)objcopy
OBJDUMP = $(CROSS_COMPILE)objdump
+PAHOLE = pahole
LEX = flex
YACC = bison
AWK = awk
@@ -435,7 +449,7 @@ USERINCLUDE := \
LINUXINCLUDE := \
-I$(srctree)/arch/$(SRCARCH)/include \
-I$(objtree)/arch/$(SRCARCH)/include/generated \
- $(if $(KBUILD_SRC), -I$(srctree)/include) \
+ $(if $(filter .,$(srctree)),,-I$(srctree)/include) \
-I$(objtree)/include \
$(USERINCLUDE)
@@ -455,7 +469,7 @@ KBUILD_LDFLAGS :=
GCC_PLUGINS_CFLAGS :=
export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC
-export CPP AR NM STRIP OBJCOPY OBJDUMP KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS
+export CPP AR NM STRIP OBJCOPY OBJDUMP PAHOLE KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS
export MAKE LEX YACC AWK INSTALLKERNEL PERL PYTHON PYTHON2 PYTHON3 UTS_MACHINE
export HOSTCXX KBUILD_HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
@@ -496,7 +510,7 @@ PHONY += outputmakefile
# At the same time when output Makefile generated, generate .gitignore to
# ignore whole output directory
outputmakefile:
-ifneq ($(KBUILD_SRC),)
+ifneq ($(srctree),.)
$(Q)ln -fsn $(srctree) source
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree)
$(Q)test -e .gitignore || \
@@ -519,20 +533,11 @@ KBUILD_AFLAGS += $(CLANG_FLAGS)
export CLANG_FLAGS
endif
-RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
-RETPOLINE_VDSO_CFLAGS_GCC := -mindirect-branch=thunk-inline -mindirect-branch-register
-RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
-RETPOLINE_VDSO_CFLAGS_CLANG := -mretpoline
-RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
-RETPOLINE_VDSO_CFLAGS := $(call cc-option,$(RETPOLINE_VDSO_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_VDSO_CFLAGS_CLANG)))
-export RETPOLINE_CFLAGS
-export RETPOLINE_VDSO_CFLAGS
-
# The expansion should be delayed until arch/$(SRCARCH)/Makefile is included.
# Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile.
# CC_VERSION_TEXT is referenced from Kconfig (so it needs export),
# and from include/config/auto.conf.cmd to detect the compiler upgrade.
-CC_VERSION_TEXT = $(shell $(CC) --version | head -n 1)
+CC_VERSION_TEXT = $(shell $(CC) --version 2>/dev/null | head -n 1)
ifeq ($(config-targets),1)
# ===========================================================================
@@ -594,20 +599,21 @@ endif
export KBUILD_MODULES KBUILD_BUILTIN
+ifeq ($(dot-config),1)
+include include/config/auto.conf
+endif
+
ifeq ($(KBUILD_EXTMOD),)
# Objects we will link into vmlinux / subdirs we need to visit
init-y := init/
drivers-y := drivers/ sound/
+drivers-$(CONFIG_SAMPLES) += samples/
net-y := net/
libs-y := lib/
core-y := usr/
virt-y := virt/
endif # KBUILD_EXTMOD
-ifeq ($(dot-config),1)
-include include/config/auto.conf
-endif
-
# The all: target is the default when no target is given on the
# command line.
# This allow a user to issue only 'make' to build a kernel including modules
@@ -624,6 +630,15 @@ ifdef CONFIG_FUNCTION_TRACER
CC_FLAGS_FTRACE := -pg
endif
+RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
+RETPOLINE_VDSO_CFLAGS_GCC := -mindirect-branch=thunk-inline -mindirect-branch-register
+RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
+RETPOLINE_VDSO_CFLAGS_CLANG := -mretpoline
+RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
+RETPOLINE_VDSO_CFLAGS := $(call cc-option,$(RETPOLINE_VDSO_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_VDSO_CFLAGS_CLANG)))
+export RETPOLINE_CFLAGS
+export RETPOLINE_VDSO_CFLAGS
+
# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
# values of the respective KBUILD_* variables
ARCH_CPPFLAGS :=
@@ -636,7 +651,7 @@ ifeq ($(may-sync-config),1)
# Read in dependencies to all Kconfig* files, make sure to run syncconfig if
# changes are detected. This should be included after arch/$(SRCARCH)/Makefile
# because some architectures define CROSS_COMPILE there.
--include include/config/auto.conf.cmd
+include include/config/auto.conf.cmd
$(KCONFIG_CONFIG):
@echo >&2 '***'
@@ -677,7 +692,7 @@ KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
-KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
+KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
KBUILD_CFLAGS += -Os
@@ -716,17 +731,15 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong
KBUILD_CFLAGS += $(stackp-flags-y)
ifdef CONFIG_CC_IS_CLANG
-KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
-KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
-KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
-KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
+KBUILD_CPPFLAGS += -Qunused-arguments
+KBUILD_CFLAGS += -Wno-format-invalid-specifier
+KBUILD_CFLAGS += -Wno-gnu
# Quiet clang warning: comparison of unsigned expression < 0 is always false
-KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
+KBUILD_CFLAGS += -Wno-tautological-compare
# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
# source of a reference will be _MergedGlobals and not on of the whitelisted names.
# See modpost pattern 2
-KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
-KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
+KBUILD_CFLAGS += -mno-global-merge
else
# These warnings generated too much noise in a regular build.
@@ -748,6 +761,11 @@ KBUILD_CFLAGS += -fomit-frame-pointer
endif
endif
+# Initialize all stack variables with a pattern, if desired.
+ifdef CONFIG_INIT_STACK_ALL
+KBUILD_CFLAGS += -ftrivial-auto-var-init=pattern
+endif
+
DEBUG_CFLAGS := $(call cc-option, -fno-var-tracking-assignments)
ifdef CONFIG_DEBUG_INFO
@@ -811,6 +829,10 @@ KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
LDFLAGS_vmlinux += --gc-sections
endif
+ifdef CONFIG_LIVEPATCH
+KBUILD_CFLAGS += $(call cc-option, -flive-patching=inline-clone)
+endif
+
# arch Makefile may override CC so keep this after arch Makefile is included
NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
@@ -818,7 +840,7 @@ NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
KBUILD_CFLAGS += -Wdeclaration-after-statement
# Variable Length Arrays (VLAs) should not be used anywhere in the kernel
-KBUILD_CFLAGS += $(call cc-option,-Wvla)
+KBUILD_CFLAGS += -Wvla
# disable pointer signed / unsigned warnings in gcc 4.0
KBUILD_CFLAGS += -Wno-pointer-sign
@@ -975,8 +997,9 @@ vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \
$(net-y) $(net-m) $(libs-y) $(libs-m) $(virt-y)))
-vmlinux-alldirs := $(sort $(vmlinux-dirs) $(patsubst %/,%,$(filter %/, \
- $(init-) $(core-) $(drivers-) $(net-) $(libs-) $(virt-))))
+vmlinux-alldirs := $(sort $(vmlinux-dirs) Documentation \
+ $(patsubst %/,%,$(filter %/, $(init-) $(core-) \
+ $(drivers-) $(net-) $(libs-) $(virt-))))
init-y := $(patsubst %/, %/built-in.a, $(init-y))
core-y := $(patsubst %/, %/built-in.a, $(core-y))
@@ -993,7 +1016,7 @@ export KBUILD_VMLINUX_LIBS := $(libs-y1)
export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds
export LDFLAGS_vmlinux
# used by scripts/package/Makefile
-export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) arch Documentation include samples scripts tools)
+export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) LICENSES arch include scripts tools)
vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_OBJS) $(KBUILD_VMLINUX_LIBS)
@@ -1030,11 +1053,8 @@ vmlinux: scripts/link-vmlinux.sh autoksyms_recursive $(vmlinux-deps) FORCE
targets := vmlinux
-# Build samples along the rest of the kernel. This needs headers_install.
-ifdef CONFIG_SAMPLES
-vmlinux-dirs += samples
+# Some samples need headers_install.
samples: headers_install
-endif
# The actual objects are generated when descending,
# make sure no implicit rule kicks in
@@ -1054,7 +1074,7 @@ filechk_kernel.release = \
echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))"
# Store (new) KERNELRELEASE string in include/config/kernel.release
-include/config/kernel.release: $(srctree)/Makefile FORCE
+include/config/kernel.release: FORCE
$(call filechk,kernel.release)
# Additional helpers built in scripts/
@@ -1076,9 +1096,11 @@ PHONY += prepare archprepare prepare1 prepare3
# and if so do:
# 1) Check that make has not been executed in the kernel src $(srctree)
prepare3: include/config/kernel.release
-ifneq ($(KBUILD_SRC),)
+ifneq ($(srctree),.)
@$(kecho) ' Using $(srctree) as source for kernel'
- $(Q)if [ -f $(srctree)/.config -o -d $(srctree)/include/config ]; then \
+ $(Q)if [ -f $(srctree)/.config -o \
+ -d $(srctree)/include/config -o \
+ -d $(srctree)/arch/$(SRCARCH)/include/generated ]; then \
echo >&2 " $(srctree) is not clean, please run 'make mrproper'"; \
echo >&2 " in the '$(srctree)' directory.";\
/bin/false; \
@@ -1266,6 +1288,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
$(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
@$(kecho) ' Building modules, stage 2.';
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/modules-check.sh
modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
$(Q)$(AWK) '!x[$$0]++' $^ > $(objtree)/modules.builtin
@@ -1294,6 +1317,7 @@ _modinst_:
fi
@cp -f $(objtree)/modules.order $(MODLIB)/
@cp -f $(objtree)/modules.builtin $(MODLIB)/
+ @cp -f $(objtree)/modules.builtin.modinfo $(MODLIB)/
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst
# This depmod is only for convenience to give the initial
@@ -1334,10 +1358,11 @@ endif # CONFIG_MODULES
# Directories & files removed with 'make clean'
CLEAN_DIRS += $(MODVERDIR) include/ksym
+CLEAN_FILES += modules.builtin.modinfo
# Directories & files removed with 'make mrproper'
MRPROPER_DIRS += include/config usr/include include/generated \
- arch/*/include/generated .tmp_objdiff
+ arch/$(SRCARCH)/include/generated .tmp_objdiff
MRPROPER_FILES += .config .config.old .version \
Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
signing_key.pem signing_key.priv signing_key.x509 \
@@ -1348,7 +1373,7 @@ MRPROPER_FILES += .config .config.old .version \
#
clean: rm-dirs := $(CLEAN_DIRS)
clean: rm-files := $(CLEAN_FILES)
-clean-dirs := $(addprefix _clean_, . $(vmlinux-alldirs) Documentation samples)
+clean-dirs := $(addprefix _clean_, . $(vmlinux-alldirs))
PHONY += $(clean-dirs) clean archclean vmlinuxclean
$(clean-dirs):
diff --git a/arch/Kconfig b/arch/Kconfig
index 33687dddd86a..c47b328eada0 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -245,10 +245,21 @@ config ARCH_HAS_FORTIFY_SOURCE
An architecture should select this when it can successfully
build and run with CONFIG_FORTIFY_SOURCE.
+#
+# Select if the arch provides a historic keepinit alias for the retain_initrd
+# command line option
+#
+config ARCH_HAS_KEEPINITRD
+ bool
+
# Select if arch has all set_memory_ro/rw/x/nx() functions in asm/cacheflush.h
config ARCH_HAS_SET_MEMORY
bool
+# Select if arch has all set_direct_map_invalid/default() functions
+config ARCH_HAS_SET_DIRECT_MAP
+ bool
+
# Select if arch init_task must go in the __init_task_data section
config ARCH_TASK_STRUCT_ON_STACK
bool
@@ -383,7 +394,13 @@ config HAVE_ARCH_JUMP_LABEL_RELATIVE
config HAVE_RCU_TABLE_FREE
bool
-config HAVE_RCU_TABLE_INVALIDATE
+config HAVE_RCU_TABLE_NO_INVALIDATE
+ bool
+
+config HAVE_MMU_GATHER_PAGE_SIZE
+ bool
+
+config HAVE_MMU_GATHER_NO_GATHER
bool
config ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -764,7 +781,7 @@ config COMPAT_OLD_SIGACTION
bool
config 64BIT_TIME
- def_bool ARCH_HAS_64BIT_TIME
+ def_bool y
help
This should be selected by all architectures that need to support
new system calls with a 64-bit time_t. This is relevant on all 32-bit
@@ -901,6 +918,15 @@ config HAVE_ARCH_PREL32_RELOCATIONS
config ARCH_USE_MEMREMAP_PROT
bool
+config LOCK_EVENT_COUNTS
+ bool "Locking event counts collection"
+ depends on DEBUG_FS
+ ---help---
+ Enable light-weight counting of various locking related events
+ in the system with minimal performance impact. This reduces
+ the chance of application behavior change because of timing
+ differences. The counts are reported via debugfs.
+
source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig"
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 584a6e114853..f7b19b813a70 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -36,6 +36,7 @@ config ALPHA
select ODD_RT_SIGACTION
select OLD_SIGSUSPEND
select CPU_NO_EFFICIENT_FFS if !ALPHA_EV67
+ select MMU_GATHER_NO_RANGE
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
@@ -49,13 +50,6 @@ config MMU
bool
default y
-config RWSEM_GENERIC_SPINLOCK
- bool
-
-config RWSEM_XCHGADD_ALGORITHM
- bool
- default y
-
config ARCH_HAS_ILOG2_U32
bool
default n
diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile
index 12dee59b011c..b3314e0dcb6f 100644
--- a/arch/alpha/Makefile
+++ b/arch/alpha/Makefile
@@ -8,6 +8,8 @@
# Copyright (C) 1994 by Linus Torvalds
#
+KBUILD_DEFCONFIG := defconfig
+
NM := $(NM) -B
LDFLAGS_vmlinux := -static -N #-relax
diff --git a/arch/alpha/defconfig b/arch/alpha/configs/defconfig
index f4ec420d7f2d..f4ec420d7f2d 100644
--- a/arch/alpha/defconfig
+++ b/arch/alpha/configs/defconfig
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index 70b783333965..89e87bbc987f 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -9,6 +9,7 @@ generic-y += irq_work.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += preempt.h
generic-y += sections.h
generic-y += trace_clock.h
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index 4c533fc94d62..ccf9d65166bb 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -513,8 +513,6 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
#define writel_relaxed(b, addr) __raw_writel(b, addr)
#define writeq_relaxed(b, addr) __raw_writeq(b, addr)
-#define mmiowb()
-
/*
* String version of IO memory access ops:
*/
diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h
deleted file mode 100644
index cf8fc8f9a2ed..000000000000
--- a/arch/alpha/include/asm/rwsem.h
+++ /dev/null
@@ -1,211 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ALPHA_RWSEM_H
-#define _ALPHA_RWSEM_H
-
-/*
- * Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001.
- * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
- */
-
-#ifndef _LINUX_RWSEM_H
-#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
-#endif
-
-#ifdef __KERNEL__
-
-#include <linux/compiler.h>
-
-#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
-#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
-#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
-#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
-#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-static inline int ___down_read(struct rw_semaphore *sem)
-{
- long oldcount;
-#ifndef CONFIG_SMP
- oldcount = sem->count.counter;
- sem->count.counter += RWSEM_ACTIVE_READ_BIAS;
-#else
- long temp;
- __asm__ __volatile__(
- "1: ldq_l %0,%1\n"
- " addq %0,%3,%2\n"
- " stq_c %2,%1\n"
- " beq %2,2f\n"
- " mb\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
- :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
-#endif
- return (oldcount < 0);
-}
-
-static inline void __down_read(struct rw_semaphore *sem)
-{
- if (unlikely(___down_read(sem)))
- rwsem_down_read_failed(sem);
-}
-
-static inline int __down_read_killable(struct rw_semaphore *sem)
-{
- if (unlikely(___down_read(sem)))
- if (IS_ERR(rwsem_down_read_failed_killable(sem)))
- return -EINTR;
-
- return 0;
-}
-
-/*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
- long old, new, res;
-
- res = atomic_long_read(&sem->count);
- do {
- new = res + RWSEM_ACTIVE_READ_BIAS;
- if (new <= 0)
- break;
- old = res;
- res = atomic_long_cmpxchg(&sem->count, old, new);
- } while (res != old);
- return res >= 0 ? 1 : 0;
-}
-
-static inline long ___down_write(struct rw_semaphore *sem)
-{
- long oldcount;
-#ifndef CONFIG_SMP
- oldcount = sem->count.counter;
- sem->count.counter += RWSEM_ACTIVE_WRITE_BIAS;
-#else
- long temp;
- __asm__ __volatile__(
- "1: ldq_l %0,%1\n"
- " addq %0,%3,%2\n"
- " stq_c %2,%1\n"
- " beq %2,2f\n"
- " mb\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
- :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
-#endif
- return oldcount;
-}
-
-static inline void __down_write(struct rw_semaphore *sem)
-{
- if (unlikely(___down_write(sem)))
- rwsem_down_write_failed(sem);
-}
-
-static inline int __down_write_killable(struct rw_semaphore *sem)
-{
- if (unlikely(___down_write(sem))) {
- if (IS_ERR(rwsem_down_write_failed_killable(sem)))
- return -EINTR;
- }
-
- return 0;
-}
-
-/*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
- long ret = atomic_long_cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
- RWSEM_ACTIVE_WRITE_BIAS);
- if (ret == RWSEM_UNLOCKED_VALUE)
- return 1;
- return 0;
-}
-
-static inline void __up_read(struct rw_semaphore *sem)
-{
- long oldcount;
-#ifndef CONFIG_SMP
- oldcount = sem->count.counter;
- sem->count.counter -= RWSEM_ACTIVE_READ_BIAS;
-#else
- long temp;
- __asm__ __volatile__(
- " mb\n"
- "1: ldq_l %0,%1\n"
- " subq %0,%3,%2\n"
- " stq_c %2,%1\n"
- " beq %2,2f\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
- :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
-#endif
- if (unlikely(oldcount < 0))
- if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0)
- rwsem_wake(sem);
-}
-
-static inline void __up_write(struct rw_semaphore *sem)
-{
- long count;
-#ifndef CONFIG_SMP
- sem->count.counter -= RWSEM_ACTIVE_WRITE_BIAS;
- count = sem->count.counter;
-#else
- long temp;
- __asm__ __volatile__(
- " mb\n"
- "1: ldq_l %0,%1\n"
- " subq %0,%3,%2\n"
- " stq_c %2,%1\n"
- " beq %2,2f\n"
- " subq %0,%3,%0\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (count), "=m" (sem->count), "=&r" (temp)
- :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
-#endif
- if (unlikely(count))
- if ((int)count == 0)
- rwsem_wake(sem);
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
- long oldcount;
-#ifndef CONFIG_SMP
- oldcount = sem->count.counter;
- sem->count.counter -= RWSEM_WAITING_BIAS;
-#else
- long temp;
- __asm__ __volatile__(
- "1: ldq_l %0,%1\n"
- " addq %0,%3,%2\n"
- " stq_c %2,%1\n"
- " beq %2,2f\n"
- " mb\n"
- ".subsection 2\n"
- "2: br 1b\n"
- ".previous"
- :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
- :"Ir" (-RWSEM_WAITING_BIAS), "m" (sem->count) : "memory");
-#endif
- if (unlikely(oldcount < 0))
- rwsem_downgrade_wake(sem);
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ALPHA_RWSEM_H */
diff --git a/arch/alpha/include/asm/segment.h b/arch/alpha/include/asm/segment.h
deleted file mode 100644
index 0453d97daae7..000000000000
--- a/arch/alpha/include/asm/segment.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ALPHA_SEGMENT_H
-#define __ALPHA_SEGMENT_H
-
-/* Only here because we have some old header files that expect it.. */
-
-#endif
diff --git a/arch/alpha/include/asm/syscall.h b/arch/alpha/include/asm/syscall.h
index d73a6fcb519c..11c688c1d7ec 100644
--- a/arch/alpha/include/asm/syscall.h
+++ b/arch/alpha/include/asm/syscall.h
@@ -4,7 +4,7 @@
#include <uapi/linux/audit.h>
-static inline int syscall_get_arch(void)
+static inline int syscall_get_arch(struct task_struct *task)
{
return AUDIT_ARCH_ALPHA;
}
diff --git a/arch/alpha/include/asm/tlb.h b/arch/alpha/include/asm/tlb.h
index 8f5042b61875..4f79e331af5e 100644
--- a/arch/alpha/include/asm/tlb.h
+++ b/arch/alpha/include/asm/tlb.h
@@ -2,12 +2,6 @@
#ifndef _ALPHA_TLB_H
#define _ALPHA_TLB_H
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0)
-
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
#include <asm-generic/tlb.h>
#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
diff --git a/arch/alpha/include/uapi/asm/sockios.h b/arch/alpha/include/uapi/asm/sockios.h
index ba287e4b01bf..af92bc27c3be 100644
--- a/arch/alpha/include/uapi/asm/sockios.h
+++ b/arch/alpha/include/uapi/asm/sockios.h
@@ -11,7 +11,7 @@
#define SIOCSPGRP _IOW('s', 8, pid_t)
#define SIOCGPGRP _IOR('s', 9, pid_t)
-#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
-#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
+#define SIOCGSTAMP_OLD 0x8906 /* Get stamp (timeval) */
+#define SIOCGSTAMPNS_OLD 0x8907 /* Get stamp (timespec) */
#endif /* _ASM_ALPHA_SOCKIOS_H */
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index 3034d6d936d2..242108439f42 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -249,7 +249,7 @@ static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
ok = 0;
/* If both conditions above are met, we are fine. */
- DBGA("pci_dac_dma_supported %s from %pf\n",
+ DBGA("pci_dac_dma_supported %s from %ps\n",
ok ? "yes" : "no", __builtin_return_address(0));
return ok;
@@ -281,7 +281,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
&& paddr + size <= __direct_map_size) {
ret = paddr + __direct_map_base;
- DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %pf\n",
+ DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %ps\n",
cpu_addr, size, ret, __builtin_return_address(0));
return ret;
@@ -292,7 +292,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
if (dac_allowed) {
ret = paddr + alpha_mv.pci_dac_offset;
- DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %pf\n",
+ DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %ps\n",
cpu_addr, size, ret, __builtin_return_address(0));
return ret;
@@ -329,7 +329,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
ret = arena->dma_base + dma_ofs * PAGE_SIZE;
ret += (unsigned long)cpu_addr & ~PAGE_MASK;
- DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %pf\n",
+ DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %ps\n",
cpu_addr, size, npages, ret, __builtin_return_address(0));
return ret;
@@ -396,14 +396,14 @@ static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
&& dma_addr < __direct_map_base + __direct_map_size) {
/* Nothing to do. */
- DBGA2("pci_unmap_single: direct [%llx,%zx] from %pf\n",
+ DBGA2("pci_unmap_single: direct [%llx,%zx] from %ps\n",
dma_addr, size, __builtin_return_address(0));
return;
}
if (dma_addr > 0xffffffff) {
- DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %pf\n",
+ DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %ps\n",
dma_addr, size, __builtin_return_address(0));
return;
}
@@ -435,7 +435,7 @@ static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
spin_unlock_irqrestore(&arena->lock, flags);
- DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %pf\n",
+ DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %ps\n",
dma_addr, size, npages, __builtin_return_address(0));
}
@@ -458,7 +458,7 @@ try_again:
cpu_addr = (void *)__get_free_pages(gfp | __GFP_ZERO, order);
if (! cpu_addr) {
printk(KERN_INFO "pci_alloc_consistent: "
- "get_free_pages failed from %pf\n",
+ "get_free_pages failed from %ps\n",
__builtin_return_address(0));
/* ??? Really atomic allocation? Otherwise we could play
with vmalloc and sg if we can't find contiguous memory. */
@@ -477,7 +477,7 @@ try_again:
goto try_again;
}
- DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %pf\n",
+ DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %ps\n",
size, cpu_addr, *dma_addrp, __builtin_return_address(0));
return cpu_addr;
@@ -497,7 +497,7 @@ static void alpha_pci_free_coherent(struct device *dev, size_t size,
pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
free_pages((unsigned long)cpu_addr, get_order(size));
- DBGA2("pci_free_consistent: [%llx,%zx] from %pf\n",
+ DBGA2("pci_free_consistent: [%llx,%zx] from %ps\n",
dma_addr, size, __builtin_return_address(0));
}
diff --git a/arch/alpha/kernel/smc37c669.c b/arch/alpha/kernel/smc37c669.c
index 4dbd4e415041..bbbd34586de0 100644
--- a/arch/alpha/kernel/smc37c669.c
+++ b/arch/alpha/kernel/smc37c669.c
@@ -10,7 +10,6 @@
#include <asm/hwrpb.h>
#include <asm/io.h>
-#include <asm/segment.h>
#if 0
# define DBG_DEVS(args) printk args
diff --git a/arch/alpha/kernel/smc37c93x.c b/arch/alpha/kernel/smc37c93x.c
index 733f08966fd2..71cd7aca38ce 100644
--- a/arch/alpha/kernel/smc37c93x.c
+++ b/arch/alpha/kernel/smc37c93x.c
@@ -11,7 +11,6 @@
#include <asm/hwrpb.h>
#include <asm/io.h>
-#include <asm/segment.h>
#define SMC_DEBUG 0
diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl
index 165f268beafc..9e7704e44f6d 100644
--- a/arch/alpha/kernel/syscalls/syscall.tbl
+++ b/arch/alpha/kernel/syscalls/syscall.tbl
@@ -467,3 +467,9 @@
535 common io_uring_setup sys_io_uring_setup
536 common io_uring_enter sys_io_uring_enter
537 common io_uring_register sys_io_uring_register
+538 common open_tree sys_open_tree
+539 common move_mount sys_move_mount
+540 common fsopen sys_fsopen
+541 common fsconfig sys_fsconfig
+542 common fsmount sys_fsmount
+543 common fspick sys_fspick
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index a42fc5c4db89..e2cbec3789e8 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -285,17 +285,3 @@ mem_init(void)
memblock_free_all();
mem_init_print_info(NULL);
}
-
-void
-free_initmem(void)
-{
- free_initmem_default(-1);
-}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void
-free_initrd_mem(unsigned long start, unsigned long end)
-{
- free_reserved_area((void *)start, (void *)end, -1, "initrd");
-}
-#endif
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index c781e45d1d99..23e063df5d2c 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -63,9 +63,6 @@ config SCHED_OMIT_FRAME_POINTER
config GENERIC_CSUM
def_bool y
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
config ARCH_DISCONTIGMEM_ENABLE
def_bool n
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 69bc1c9e8e50..7425bb0f2d1b 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -18,8 +18,8 @@
model = "snps,hsdk";
compatible = "snps,hsdk";
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
chosen {
bootargs = "earlycon=uart8250,mmio32,0xf0005000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1";
@@ -105,7 +105,7 @@
#size-cells = <1>;
interrupt-parent = <&idu_intc>;
- ranges = <0x00000000 0xf0000000 0x10000000>;
+ ranges = <0x00000000 0x0 0xf0000000 0x10000000>;
cgu_rst: reset-controller@8a0 {
compatible = "snps,hsdk-reset";
@@ -269,9 +269,10 @@
};
memory@80000000 {
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
device_type = "memory";
- reg = <0x80000000 0x40000000>; /* 1 GiB */
+ reg = <0x0 0x80000000 0x0 0x40000000>; /* 1 GB lowmem */
+ /* 0x1 0x00000000 0x0 0x40000000>; 1 GB highmem */
};
};
diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig
index f56cc2070c11..b117e6c16d41 100644
--- a/arch/arc/configs/haps_hs_defconfig
+++ b/arch/arc/configs/haps_hs_defconfig
@@ -15,7 +15,6 @@ CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig
index b6f2482c7e74..33a787c375e2 100644
--- a/arch/arc/configs/haps_hs_smp_defconfig
+++ b/arch/arc/configs/haps_hs_smp_defconfig
@@ -17,7 +17,6 @@ CONFIG_PERF_EVENTS=y
CONFIG_SLAB=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
index 318e4cd29629..de398c7b10b3 100644
--- a/arch/arc/configs/nsim_700_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -18,7 +18,6 @@ CONFIG_PERF_EVENTS=y
CONFIG_ISA_ARCOMPACT=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
index c15807b0e0c1..2dbd34a9ff07 100644
--- a/arch/arc/configs/nsim_hs_defconfig
+++ b/arch/arc/configs/nsim_hs_defconfig
@@ -20,7 +20,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
index 65e983fd942b..c7135f1e2583 100644
--- a/arch/arc/configs/nsim_hs_smp_defconfig
+++ b/arch/arc/configs/nsim_hs_smp_defconfig
@@ -18,7 +18,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 08c5b99ac341..385a71d3c478 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -18,7 +18,6 @@ CONFIG_PERF_EVENTS=y
CONFIG_ISA_ARCOMPACT=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index 5b5e26d67955..248a2c3bdc12 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -17,7 +17,6 @@ CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
CONFIG_MODULES=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index 26af9b2f7fcb..1a4bc7b660fb 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -12,7 +12,6 @@ CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
CONFIG_MODULES=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index decc306a3b52..393d4f5e1450 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -16,6 +16,7 @@ generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += msi.h
generic-y += parport.h
generic-y += percpu.h
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
index aa2d6da9d187..2b80c184c9c8 100644
--- a/arch/arc/include/asm/elf.h
+++ b/arch/arc/include/asm/elf.h
@@ -10,13 +10,9 @@
#define __ASM_ARC_ELF_H
#include <linux/types.h>
+#include <linux/elf-em.h>
#include <uapi/asm/elf.h>
-/* These ELF defines belong to uapi but libc elf.h already defines them */
-#define EM_ARCOMPACT 93
-
-#define EM_ARCV2 195 /* ARCv2 Cores */
-
#define EM_ARC_INUSE (IS_ENABLED(CONFIG_ISA_ARCOMPACT) ? \
EM_ARCOMPACT : EM_ARCV2)
diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h
index c7a4201ed62b..9cac959ca4e8 100644
--- a/arch/arc/include/asm/syscall.h
+++ b/arch/arc/include/asm/syscall.h
@@ -9,6 +9,7 @@
#ifndef _ASM_ARC_SYSCALL_H
#define _ASM_ARC_SYSCALL_H 1
+#include <uapi/linux/audit.h>
#include <linux/err.h>
#include <linux/sched.h>
#include <asm/unistd.h>
@@ -67,4 +68,14 @@ syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
}
}
+static inline int
+syscall_get_arch(struct task_struct *task)
+{
+ return IS_ENABLED(CONFIG_ISA_ARCOMPACT)
+ ? (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
+ ? AUDIT_ARCH_ARCOMPACTBE : AUDIT_ARCH_ARCOMPACT)
+ : (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
+ ? AUDIT_ARCH_ARCV2BE : AUDIT_ARCH_ARCV2);
+}
+
#endif
diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h
index a9db5f62aaf3..90cac97643a4 100644
--- a/arch/arc/include/asm/tlb.h
+++ b/arch/arc/include/asm/tlb.h
@@ -9,38 +9,6 @@
#ifndef _ASM_ARC_TLB_H
#define _ASM_ARC_TLB_H
-#define tlb_flush(tlb) \
-do { \
- if (tlb->fullmm) \
- flush_tlb_mm((tlb)->mm); \
-} while (0)
-
-/*
- * This pair is called at time of munmap/exit to flush cache and TLB entries
- * for mappings being torn down.
- * 1) cache-flush part -implemented via tlb_start_vma( ) for VIPT aliasing D$
- * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range
- *
- * Note, read http://lkml.org/lkml/2004/1/15/6
- */
-#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
-#define tlb_start_vma(tlb, vma)
-#else
-#define tlb_start_vma(tlb, vma) \
-do { \
- if (!tlb->fullmm) \
- flush_cache_range(vma, vma->vm_start, vma->vm_end); \
-} while(0)
-#endif
-
-#define tlb_end_vma(tlb, vma) \
-do { \
- if (!tlb->fullmm) \
- flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
-} while (0)
-
-#define __tlb_remove_tlb_entry(tlb, ptep, address)
-
#include <linux/pagemap.h>
#include <asm-generic/tlb.h>
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index eabc3efa6c6d..526418543379 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -742,6 +742,7 @@ extern long arc_strnlen_user_noinline(const char __user *src, long n);
#endif
+#include <asm/segment.h>
#include <asm-generic/uaccess.h>
#endif
diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
index f230bb7092fd..b3373f5c88e0 100644
--- a/arch/arc/lib/memset-archs.S
+++ b/arch/arc/lib/memset-archs.S
@@ -30,10 +30,10 @@
#else
-.macro PREALLOC_INSTR
+.macro PREALLOC_INSTR reg, off
.endm
-.macro PREFETCHW_INSTR
+.macro PREFETCHW_INSTR reg, off
.endm
#endif
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 4135abec3fb0..63e6e6504699 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -113,10 +113,24 @@ static void read_decode_cache_bcr_arcv2(int cpu)
}
READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
- if (cbcr.c)
+ if (cbcr.c) {
ioc_exists = 1;
- else
+
+ /*
+ * As for today we don't support both IOC and ZONE_HIGHMEM enabled
+ * simultaneously. This happens because as of today IOC aperture covers
+ * only ZONE_NORMAL (low mem) and any dma transactions outside this
+ * region won't be HW coherent.
+ * If we want to use both IOC and ZONE_HIGHMEM we can use
+ * bounce_buffer to handle dma transactions to HIGHMEM.
+ * Also it is possible to modify dma_direct cache ops or increase IOC
+ * aperture size if we are planning to use HIGHMEM without PAE.
+ */
+ if (IS_ENABLED(CONFIG_HIGHMEM) || is_pae40_enabled())
+ ioc_enable = 0;
+ } else {
ioc_enable = 0;
+ }
/* HS 2.0 didn't have AUX_VOL */
if (cpuinfo_arc700[cpu].core.family > 0x51) {
@@ -1158,19 +1172,6 @@ noinline void __init arc_ioc_setup(void)
if (!ioc_enable)
return;
- /*
- * As for today we don't support both IOC and ZONE_HIGHMEM enabled
- * simultaneously. This happens because as of today IOC aperture covers
- * only ZONE_NORMAL (low mem) and any dma transactions outside this
- * region won't be HW coherent.
- * If we want to use both IOC and ZONE_HIGHMEM we can use
- * bounce_buffer to handle dma transactions to HIGHMEM.
- * Also it is possible to modify dma_direct cache ops or increase IOC
- * aperture size if we are planning to use HIGHMEM without PAE.
- */
- if (IS_ENABLED(CONFIG_HIGHMEM))
- panic("IOC and HIGHMEM can't be used simultaneously");
-
/* Flush + invalidate + disable L1 dcache */
__dc_disable();
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index e1ab2d7f1d64..02b7a3b20d7c 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -206,18 +206,3 @@ void __init mem_init(void)
memblock_free_all();
mem_init_print_info(NULL);
}
-
-/*
- * free_initmem: Free all the __init memory.
- */
-void __ref free_initmem(void)
-{
- free_initmem_default(-1);
-}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void __init free_initrd_mem(unsigned long start, unsigned long end)
-{
- free_reserved_area((void *)start, (void *)end, -1, "initrd");
-}
-#endif
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 9aed25a6019b..8869742a85df 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -4,11 +4,11 @@ config ARM
default y
select ARCH_32BIT_OFF_T
select ARCH_CLOCKSOURCE_DATA
- select ARCH_DISCARD_MEMBLOCK if !HAVE_ARCH_PFN_VALID && !KEXEC
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
+ select ARCH_HAS_KEEPINITRD
select ARCH_HAS_KCOV
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_PTE_SPECIAL if ARM_LPAE
@@ -21,6 +21,7 @@ config ARM
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_KEEP_MEMBLOCK if HAVE_ARCH_PFN_VALID || KEXEC
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_NO_SG_CHAIN if !ARM_HAS_SG_CHAIN
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
@@ -178,10 +179,6 @@ config TRACE_IRQFLAGS_SUPPORT
bool
default !CPU_V7M
-config RWSEM_XCHGADD_ALGORITHM
- bool
- default y
-
config ARCH_HAS_ILOG2_U32
bool
@@ -429,12 +426,15 @@ config ARCH_IXP4XX
depends on MMU
select ARCH_HAS_DMA_SET_COHERENT_MASK
select ARCH_SUPPORTS_BIG_ENDIAN
- select CLKSRC_MMIO
select CPU_XSCALE
select DMABOUNCE if PCI
select GENERIC_CLOCKEVENTS
+ select GENERIC_IRQ_MULTI_HANDLER
+ select GPIO_IXP4XX
select GPIOLIB
select HAVE_PCI
+ select IXP4XX_IRQ
+ select IXP4XX_TIMER
select NEED_MACH_IO_H
select USB_EHCI_BIG_ENDIAN_DESC
select USB_EHCI_BIG_ENDIAN_MMIO
@@ -900,8 +900,6 @@ config PLAT_PXA
config PLAT_VERSATILE
bool
-source "arch/arm/firmware/Kconfig"
-
source "arch/arm/mm/Kconfig"
config IWMMXT
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index e388af4594a6..9a8862fee738 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -1676,6 +1676,7 @@ config DEBUG_UART_PHYS
default 0xe6e68000 if DEBUG_RCAR_GEN2_SCIF1
default 0xe6ee0000 if DEBUG_RCAR_GEN2_SCIF4
default 0xe8008000 if DEBUG_R7S72100_SCIF2
+ default 0xf0000000 if DEBUG_DIGICOLOR_UA0
default 0xf0000be0 if ARCH_EBSA110
default 0xf1012000 if DEBUG_MVEBU_UART0_ALTERNATE
default 0xf1012100 if DEBUG_MVEBU_UART1_ALTERNATE
@@ -1727,6 +1728,7 @@ config DEBUG_UART_VIRT
default 0xe0010fe0 if ARCH_RPC
default 0xf0000be0 if ARCH_EBSA110
default 0xf0010000 if DEBUG_ASM9260_UART
+ default 0xf0100000 if DEBUG_DIGICOLOR_UA0
default 0xf01fb000 if DEBUG_NOMADIK_UART
default 0xf0201000 if DEBUG_BCM2835 || DEBUG_BCM2836
default 0xf1000300 if DEBUG_BCM_5301X
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 807a7d06c2a0..f863c6935d0e 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -116,8 +116,7 @@ endif
AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
ifeq ($(CONFIG_THUMB2_KERNEL),y)
-AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mauto-it)
-CFLAGS_ISA :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
+CFLAGS_ISA :=-mthumb -Wa,-mimplicit-it=always $(AFLAGS_NOWARN)
AFLAGS_ISA :=$(CFLAGS_ISA) -Wa$(comma)-mthumb
# Work around buggy relocation from gas if requested:
ifeq ($(CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11),y)
@@ -290,7 +289,6 @@ core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/
core-y += arch/arm/probes/
core-y += arch/arm/net/
core-y += arch/arm/crypto/
-core-y += arch/arm/firmware/
core-y += $(machdirs) $(platdirs)
drivers-$(CONFIG_OPROFILE) += arch/arm/oprofile/
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index f4f5aeaf3298..dab2914fa293 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -229,6 +229,9 @@ dtb-$(CONFIG_ARCH_HIX5HD2) += \
dtb-$(CONFIG_ARCH_INTEGRATOR) += \
integratorap.dtb \
integratorcp.dtb
+dtb-$(CONFIG_ARCH_IXP4XX) += \
+ intel-ixp42x-linksys-nslu2.dtb \
+ intel-ixp43x-gateworks-gw2358.dtb
dtb-$(CONFIG_ARCH_KEYSTONE) += \
keystone-k2hk-evm.dtb \
keystone-k2l-evm.dtb \
@@ -363,7 +366,8 @@ dtb-$(CONFIG_SOC_IMX35) += \
imx35-eukrea-mbimxsd35-baseboard.dtb \
imx35-pdk.dtb
dtb-$(CONFIG_SOC_IMX50) += \
- imx50-evk.dtb
+ imx50-evk.dtb \
+ imx50-kobo-aura.dtb
dtb-$(CONFIG_SOC_IMX51) += \
imx51-apf51.dtb \
imx51-apf51dev.dtb \
@@ -380,6 +384,7 @@ dtb-$(CONFIG_SOC_IMX53) += \
imx53-kp-ddc.dtb \
imx53-kp-hsc.dtb \
imx53-m53evk.dtb \
+ imx53-m53menlo.dtb \
imx53-mba53.dtb \
imx53-ppd.dtb \
imx53-qsb.dtb \
@@ -400,6 +405,7 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
imx6dl-cubox-i-emmc-som-v15.dtb \
imx6dl-cubox-i-som-v15.dtb \
imx6dl-dfi-fs700-m60.dtb \
+ imx6dl-eckelmann-ci4x10.dtb \
imx6dl-emcon-avari.dtb \
imx6dl-gw51xx.dtb \
imx6dl-gw52xx.dtb \
@@ -579,6 +585,7 @@ dtb-$(CONFIG_SOC_IMX7D) += \
imx7d-cl-som-imx7.dtb \
imx7d-colibri-emmc-eval-v3.dtb \
imx7d-colibri-eval-v3.dtb \
+ imx7d-mba7.dtb \
imx7d-nitrogen7.dtb \
imx7d-pico-hobbit.dtb \
imx7d-pico-pi.dtb \
@@ -586,7 +593,9 @@ dtb-$(CONFIG_SOC_IMX7D) += \
imx7d-sdb.dtb \
imx7d-sdb-reva.dtb \
imx7d-sdb-sht11.dtb \
+ imx7d-zii-rpu2.dtb \
imx7s-colibri-eval-v3.dtb \
+ imx7s-mba7.dtb \
imx7s-warp.dtb
dtb-$(CONFIG_SOC_IMX7ULP) += \
imx7ulp-evk.dtb
@@ -606,6 +615,7 @@ dtb-$(CONFIG_SOC_VF610) += \
vf610-zii-dev-rev-b.dtb \
vf610-zii-dev-rev-c.dtb \
vf610-zii-scu4-aib.dtb \
+ vf610-zii-spb4.dtb \
vf610-zii-ssmb-dtu.dtb \
vf610-zii-ssmb-spu3.dtb
dtb-$(CONFIG_ARCH_MXS) += \
@@ -909,6 +919,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += \
rk3288-veyron-jaq.dtb \
rk3288-veyron-jerry.dtb \
rk3288-veyron-mickey.dtb \
+ rk3288-veyron-mighty.dtb \
rk3288-veyron-minnie.dtb \
rk3288-veyron-pinky.dtb \
rk3288-veyron-speedy.dtb \
@@ -964,6 +975,8 @@ dtb-$(CONFIG_ARCH_STM32) += \
stm32746g-eval.dtb \
stm32h743i-eval.dtb \
stm32h743i-disco.dtb \
+ stm32mp157a-dk1.dtb \
+ stm32mp157c-dk2.dtb \
stm32mp157c-ed1.dtb \
stm32mp157c-ev1.dtb
dtb-$(CONFIG_MACH_SUN4I) += \
@@ -1091,6 +1104,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \
sun8i-h3-orangepi-plus.dtb \
sun8i-h3-orangepi-plus2e.dtb \
sun8i-h3-orangepi-zero-plus2.dtb \
+ sun8i-h3-rervision-dvk.dtb \
sun8i-r16-bananapi-m2m.dtb \
sun8i-r16-nintendo-nes-classic.dtb \
sun8i-r16-nintendo-super-nes-classic.dtb \
diff --git a/arch/arm/boot/dts/am335x-baltos-ir2110.dts b/arch/arm/boot/dts/am335x-baltos-ir2110.dts
index 50dcf1290ac6..2f650a736b44 100644
--- a/arch/arm/boot/dts/am335x-baltos-ir2110.dts
+++ b/arch/arm/boot/dts/am335x-baltos-ir2110.dts
@@ -23,14 +23,14 @@
&am33xx_pinmux {
uart1_pins: pinmux_uart1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x980, PIN_INPUT | MUX_MODE0) /* uart1_rxd */
- AM33XX_IOPAD(0x984, PIN_INPUT | MUX_MODE0) /* uart1_txd */
- AM33XX_IOPAD(0x978, PIN_INPUT_PULLDOWN | MUX_MODE0) /* uart1_ctsn */
- AM33XX_IOPAD(0x97c, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart1_rtsn */
- AM33XX_IOPAD(0x8e0, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* lcd_vsync.gpio2[22] DTR */
- AM33XX_IOPAD(0x8e4, PIN_INPUT_PULLDOWN | MUX_MODE7) /* lcd_hsync.gpio2[23] DSR */
- AM33XX_IOPAD(0x8e8, PIN_INPUT_PULLDOWN | MUX_MODE7) /* lcd_pclk.gpio2[24] DCD */
- AM33XX_IOPAD(0x8ec, PIN_INPUT_PULLDOWN | MUX_MODE7) /* lcd_ac_bias_en.gpio2[25] RI */
+ AM33XX_PADCONF(AM335X_PIN_UART1_RXD, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_TXD, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* lcd_vsync.gpio2[22] DTR */
+ AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_INPUT_PULLDOWN, MUX_MODE7) /* lcd_hsync.gpio2[23] DSR */
+ AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_INPUT_PULLDOWN, MUX_MODE7) /* lcd_pclk.gpio2[24] DCD */
+ AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_INPUT_PULLDOWN, MUX_MODE7) /* lcd_ac_bias_en.gpio2[25] RI */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-baltos-ir3220.dts b/arch/arm/boot/dts/am335x-baltos-ir3220.dts
index f3f1abd26470..1ba66d5e21e8 100644
--- a/arch/arm/boot/dts/am335x-baltos-ir3220.dts
+++ b/arch/arm/boot/dts/am335x-baltos-ir3220.dts
@@ -23,35 +23,35 @@
&am33xx_pinmux {
tca6416_pins: pinmux_tca6416_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x9b4, PIN_INPUT_PULLUP | MUX_MODE7) /* xdma_event_intr1.gpio0[20] tca6416 stuff */
+ AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR1, PIN_INPUT_PULLUP, MUX_MODE7) /* xdma_event_intr1.gpio0[20] tca6416 stuff */
>;
};
uart1_pins: pinmux_uart1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x980, PIN_INPUT | MUX_MODE0) /* uart1_rxd */
- AM33XX_IOPAD(0x984, PIN_INPUT | MUX_MODE0) /* uart1_txd */
- AM33XX_IOPAD(0x978, PIN_INPUT_PULLDOWN | MUX_MODE0) /* uart1_ctsn */
- AM33XX_IOPAD(0x97c, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart1_rtsn */
- AM33XX_IOPAD(0x8e0, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* lcd_vsync.gpio2[22] DTR */
- AM33XX_IOPAD(0x8e4, PIN_INPUT_PULLDOWN | MUX_MODE7) /* lcd_hsync.gpio2[23] DSR */
- AM33XX_IOPAD(0x8e8, PIN_INPUT_PULLDOWN | MUX_MODE7) /* lcd_pclk.gpio2[24] DCD */
- AM33XX_IOPAD(0x8ec, PIN_INPUT_PULLDOWN | MUX_MODE7) /* lcd_ac_bias_en.gpio2[25] RI */
+ AM33XX_PADCONF(AM335X_PIN_UART1_RXD, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_TXD, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* lcd_vsync.gpio2[22] DTR */
+ AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_INPUT_PULLDOWN, MUX_MODE7) /* lcd_hsync.gpio2[23] DSR */
+ AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_INPUT_PULLDOWN, MUX_MODE7) /* lcd_pclk.gpio2[24] DCD */
+ AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_INPUT_PULLDOWN, MUX_MODE7) /* lcd_ac_bias_en.gpio2[25] RI */
>;
};
uart2_pins: pinmux_uart2_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x950, PIN_INPUT | MUX_MODE1) /* spi0_sclk.uart2_rxd_mux3 */
- AM33XX_IOPAD(0x954, PIN_OUTPUT | MUX_MODE1) /* spi0_d0.uart2_txd_mux3 */
- AM33XX_IOPAD(0x988, PIN_INPUT_PULLDOWN | MUX_MODE2) /* i2c0_sda.uart2_ctsn_mux0 */
- AM33XX_IOPAD(0x98c, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* i2c0_scl.uart2_rtsn_mux0 */
- AM33XX_IOPAD(0x830, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad12.gpio1[12] DTR */
- AM33XX_IOPAD(0x834, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad13.gpio1[13] DSR */
- AM33XX_IOPAD(0x838, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad14.gpio1[14] DCD */
- AM33XX_IOPAD(0x83c, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad15.gpio1[15] RI */
-
- AM33XX_IOPAD(0x9a0, PIN_INPUT_PULLUP | MUX_MODE7) /* mcasp0_aclkr.gpio3[18], INPUT_PULLDOWN | MODE7 */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_SCLK, PIN_INPUT, MUX_MODE1) /* spi0_sclk.uart2_rxd_mux3 */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D0, PIN_OUTPUT, MUX_MODE1) /* spi0_d0.uart2_txd_mux3 */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT_PULLDOWN, MUX_MODE2) /* i2c0_sda.uart2_ctsn_mux0 */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* i2c0_scl.uart2_rtsn_mux0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD12, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad12.gpio1[12] DTR */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD13, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad13.gpio1[13] DSR */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD14, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad14.gpio1[14] DCD */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD15, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad15.gpio1[15] RI */
+
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKR, PIN_INPUT_PULLUP, MUX_MODE7) /* mcasp0_aclkr.gpio3[18], INPUT_PULLDOWN | MODE7 */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-baltos-ir5221.dts b/arch/arm/boot/dts/am335x-baltos-ir5221.dts
index 42f473f0ed77..eed65fc0e8e6 100644
--- a/arch/arm/boot/dts/am335x-baltos-ir5221.dts
+++ b/arch/arm/boot/dts/am335x-baltos-ir5221.dts
@@ -23,43 +23,43 @@
&am33xx_pinmux {
tca6416_pins: pinmux_tca6416_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x9b4, PIN_INPUT_PULLUP | MUX_MODE7) /* xdma_event_intr1.gpio0[20] tca6416 stuff */
+ AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR1, PIN_INPUT_PULLUP, MUX_MODE7) /* xdma_event_intr1.gpio0[20] tca6416 stuff */
>;
};
dcan1_pins: pinmux_dcan1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x968, PIN_OUTPUT | MUX_MODE2) /* uart0_ctsn.dcan1_tx_mux0 */
- AM33XX_IOPAD(0x96c, PIN_INPUT | MUX_MODE2) /* uart0_rtsn.dcan1_rx_mux0 */
+ AM33XX_PADCONF(AM335X_PIN_UART0_CTSN, PIN_OUTPUT, MUX_MODE2) /* uart0_ctsn.dcan1_tx_mux0 */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RTSN, PIN_INPUT, MUX_MODE2) /* uart0_rtsn.dcan1_rx_mux0 */
>;
};
uart1_pins: pinmux_uart1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x980, PIN_INPUT | MUX_MODE0) /* uart1_rxd */
- AM33XX_IOPAD(0x984, PIN_INPUT | MUX_MODE0) /* uart1_txd */
- AM33XX_IOPAD(0x978, PIN_INPUT_PULLDOWN | MUX_MODE0) /* uart1_ctsn */
- AM33XX_IOPAD(0x97c, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart1_rtsn */
- AM33XX_IOPAD(0x8e0, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* lcd_vsync.gpio2[22] DTR */
- AM33XX_IOPAD(0x8e4, PIN_INPUT_PULLDOWN | MUX_MODE7) /* lcd_hsync.gpio2[23] DSR */
- AM33XX_IOPAD(0x8e8, PIN_INPUT_PULLDOWN | MUX_MODE7) /* lcd_pclk.gpio2[24] DCD */
- AM33XX_IOPAD(0x8ec, PIN_INPUT_PULLDOWN | MUX_MODE7) /* lcd_ac_bias_en.gpio2[25] RI */
+ AM33XX_PADCONF(AM335X_PIN_UART1_RXD, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_TXD, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* lcd_vsync.gpio2[22] DTR */
+ AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_INPUT_PULLDOWN, MUX_MODE7) /* lcd_hsync.gpio2[23] DSR */
+ AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_INPUT_PULLDOWN, MUX_MODE7) /* lcd_pclk.gpio2[24] DCD */
+ AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_INPUT_PULLDOWN, MUX_MODE7) /* lcd_ac_bias_en.gpio2[25] RI */
>;
};
uart2_pins: pinmux_uart2_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x950, PIN_INPUT | MUX_MODE1) /* spi0_sclk.uart2_rxd_mux3 */
- AM33XX_IOPAD(0x954, PIN_OUTPUT | MUX_MODE1) /* spi0_d0.uart2_txd_mux3 */
- AM33XX_IOPAD(0x988, PIN_INPUT_PULLDOWN | MUX_MODE2) /* i2c0_sda.uart2_ctsn_mux0 */
- AM33XX_IOPAD(0x98c, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* i2c0_scl.uart2_rtsn_mux0 */
- AM33XX_IOPAD(0x830, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad12.gpio1[12] DTR */
- AM33XX_IOPAD(0x834, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad13.gpio1[13] DSR */
- AM33XX_IOPAD(0x838, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad14.gpio1[14] DCD */
- AM33XX_IOPAD(0x83c, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad15.gpio1[15] RI */
-
- AM33XX_IOPAD(0x9a0, PIN_INPUT_PULLUP | MUX_MODE7) /* mcasp0_aclkr.gpio3[18], INPUT_PULLDOWN | MODE7 */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_SCLK, PIN_INPUT, MUX_MODE1) /* spi0_sclk.uart2_rxd_mux3 */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D0, PIN_OUTPUT, MUX_MODE1) /* spi0_d0.uart2_txd_mux3 */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT_PULLDOWN, MUX_MODE2) /* i2c0_sda.uart2_ctsn_mux0 */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* i2c0_scl.uart2_rtsn_mux0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD12, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad12.gpio1[12] DTR */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD13, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad13.gpio1[13] DSR */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD14, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad14.gpio1[14] DCD */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD15, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad15.gpio1[15] RI */
+
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKR, PIN_INPUT_PULLUP, MUX_MODE7) /* mcasp0_aclkr.gpio3[18], INPUT_PULLDOWN | MODE7 */
>;
};
diff --git a/arch/arm/boot/dts/am335x-baltos-leds.dtsi b/arch/arm/boot/dts/am335x-baltos-leds.dtsi
index 3ab1767d5c13..fe75050c016f 100644
--- a/arch/arm/boot/dts/am335x-baltos-leds.dtsi
+++ b/arch/arm/boot/dts/am335x-baltos-leds.dtsi
@@ -42,9 +42,9 @@
&am33xx_pinmux {
user_leds: pinmux_user_leds {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x908, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* mii1_col.gpio3_0 PWR LED */
- AM33XX_IOPAD(0x91c, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* mii1_txd3.gpio0_16 WLAN LED */
- AM33XX_IOPAD(0x920, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* mii1_txd2.gpio0_17 APP LED */
+ AM33XX_PADCONF(AM335X_PIN_MII1_COL, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* mii1_col.gpio3_0 PWR LED */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* mii1_txd3.gpio0_16 WLAN LED */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* mii1_txd2.gpio0_17 APP LED */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-baltos.dtsi b/arch/arm/boot/dts/am335x-baltos.dtsi
index 8c6fc4161ad7..b572ad1f1377 100644
--- a/arch/arm/boot/dts/am335x-baltos.dtsi
+++ b/arch/arm/boot/dts/am335x-baltos.dtsi
@@ -53,130 +53,130 @@
&am33xx_pinmux {
mmc2_pins: pinmux_mmc2_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x820, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_ad8.mmc1_dat0_mux0 */
- AM33XX_IOPAD(0x824, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_ad9.mmc1_dat1_mux0 */
- AM33XX_IOPAD(0x828, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_ad10.mmc1_dat2_mux0 */
- AM33XX_IOPAD(0x82c, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_ad11.mmc1_dat3_mux0 */
- AM33XX_IOPAD(0x880, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn1.mmc1_clk_mux0 */
- AM33XX_IOPAD(0x884, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn2.mmc1_cmd_mux0 */
- AM33XX_IOPAD(0x9e4, PIN_INPUT_PULLUP | MUX_MODE7) /* emu0.gpio3[7] */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD8, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_ad8.mmc1_dat0_mux0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD9, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_ad9.mmc1_dat1_mux0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD10, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_ad10.mmc1_dat2_mux0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD11, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_ad11.mmc1_dat3_mux0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN1, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_csn1.mmc1_clk_mux0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN2, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_csn2.mmc1_cmd_mux0 */
+ AM33XX_PADCONF(AM335X_PIN_EMU0, PIN_INPUT_PULLUP, MUX_MODE7) /* emu0.gpio3[7] */
>;
};
wl12xx_gpio: pinmux_wl12xx_gpio {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x9e8, PIN_OUTPUT_PULLUP | MUX_MODE7) /* emu1.gpio3[8] */
+ AM33XX_PADCONF(AM335X_PIN_EMU1, PIN_OUTPUT_PULLUP, MUX_MODE7) /* emu1.gpio3[8] */
>;
};
tps65910_pins: pinmux_tps65910_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x878, PIN_INPUT_PULLUP | MUX_MODE7) /* gpmc_ben1.gpio1[28] */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_BEN1, PIN_INPUT_PULLUP, MUX_MODE7) /* gpmc_ben1.gpio1[28] */
>;
};
i2c1_pins: pinmux_i2c1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x958, PIN_INPUT | MUX_MODE2) /* spi0_d1.i2c1_sda_mux3 */
- AM33XX_IOPAD(0x95c, PIN_INPUT | MUX_MODE2) /* spi0_cs0.i2c1_scl_mux3 */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D1, PIN_INPUT, MUX_MODE2) /* spi0_d1.i2c1_sda_mux3 */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS0, PIN_INPUT, MUX_MODE2) /* spi0_cs0.i2c1_scl_mux3 */
>;
};
uart0_pins: pinmux_uart0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
- AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
cpsw_default: cpsw_default {
pinctrl-single,pins = <
/* Slave 1 */
- AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE1) /* mii1_crs.rmii1_crs_dv */
- AM33XX_IOPAD(0x914, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* mii1_tx_en.rmii1_txen */
- AM33XX_IOPAD(0x924, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* mii1_txd1.rmii1_txd1 */
- AM33XX_IOPAD(0x928, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* mii1_txd0.rmii1_txd0 */
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE1) /* mii1_rxd1.rmii1_rxd1 */
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE1) /* mii1_rxd0.rmii1_rxd0 */
- AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii1_ref_clk.rmii1_refclk */
+ AM33XX_PADCONF(AM335X_PIN_MII1_CRS, PIN_INPUT_PULLDOWN, MUX_MODE1) /* mii1_crs.rmii1_crs_dv */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE1) /* mii1_tx_en.rmii1_txen */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_OUTPUT_PULLDOWN, MUX_MODE1) /* mii1_txd1.rmii1_txd1 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_OUTPUT_PULLDOWN, MUX_MODE1) /* mii1_txd0.rmii1_txd0 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE1) /* mii1_rxd1.rmii1_rxd1 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE1) /* mii1_rxd0.rmii1_rxd0 */
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_INPUT_PULLDOWN, MUX_MODE0) /* rmii1_ref_clk.rmii1_refclk */
/* Slave 2 */
- AM33XX_IOPAD(0x840, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a0.rgmii2_tctl */
- AM33XX_IOPAD(0x844, PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a1.rgmii2_rctl */
- AM33XX_IOPAD(0x848, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a2.rgmii2_td3 */
- AM33XX_IOPAD(0x84c, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a3.rgmii2_td2 */
- AM33XX_IOPAD(0x850, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a4.rgmii2_td1 */
- AM33XX_IOPAD(0x854, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a5.rgmii2_td0 */
- AM33XX_IOPAD(0x858, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a6.rgmii2_tclk */
- AM33XX_IOPAD(0x85c, PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a7.rgmii2_rclk */
- AM33XX_IOPAD(0x860, PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a8.rgmii2_rd3 */
- AM33XX_IOPAD(0x864, PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a9.rgmii2_rd2 */
- AM33XX_IOPAD(0x868, PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a10.rgmii2_rd1 */
- AM33XX_IOPAD(0x86c, PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a11.rgmii2_rd0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A0, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* gpmc_a0.rgmii2_tctl */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A1, PIN_INPUT_PULLDOWN, MUX_MODE2) /* gpmc_a1.rgmii2_rctl */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A2, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* gpmc_a2.rgmii2_td3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A3, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* gpmc_a3.rgmii2_td2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A4, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* gpmc_a4.rgmii2_td1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* gpmc_a5.rgmii2_td0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A6, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* gpmc_a6.rgmii2_tclk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A7, PIN_INPUT_PULLDOWN, MUX_MODE2) /* gpmc_a7.rgmii2_rclk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A8, PIN_INPUT_PULLDOWN, MUX_MODE2) /* gpmc_a8.rgmii2_rd3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A9, PIN_INPUT_PULLDOWN, MUX_MODE2) /* gpmc_a9.rgmii2_rd2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A10, PIN_INPUT_PULLDOWN, MUX_MODE2) /* gpmc_a10.rgmii2_rd1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A11, PIN_INPUT_PULLDOWN, MUX_MODE2) /* gpmc_a11.rgmii2_rd0 */
>;
};
cpsw_sleep: cpsw_sleep {
pinctrl-single,pins = <
/* Slave 1 reset value */
- AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x914, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x924, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x928, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_CRS, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
/* Slave 2 reset value*/
- AM33XX_IOPAD(0x840, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x844, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x848, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x84c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x850, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x854, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x858, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x85c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x860, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x864, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x868, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x86c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A4, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A6, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A7, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A8, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A9, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A10, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A11, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
davinci_mdio_default: davinci_mdio_default {
pinctrl-single,pins = <
/* MDIO */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
- AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLUP | SLEWCTRL_FAST, MUX_MODE0) /* mdio_data.mdio_data */
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLUP, MUX_MODE0) /* mdio_clk.mdio_clk */
>;
};
davinci_mdio_sleep: davinci_mdio_sleep {
pinctrl-single,pins = <
/* MDIO reset value */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x94c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
nandflash_pins_s0: nandflash_pins_s0 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x800, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad0.gpmc_ad0 */
- AM33XX_IOPAD(0x804, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad1.gpmc_ad1 */
- AM33XX_IOPAD(0x808, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad2.gpmc_ad2 */
- AM33XX_IOPAD(0x80c, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad3.gpmc_ad3 */
- AM33XX_IOPAD(0x810, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad4.gpmc_ad4 */
- AM33XX_IOPAD(0x814, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad5.gpmc_ad5 */
- AM33XX_IOPAD(0x818, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad6.gpmc_ad6 */
- AM33XX_IOPAD(0x81c, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad7.gpmc_ad7 */
- AM33XX_IOPAD(0x870, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_wait0.gpmc_wait0 */
- AM33XX_IOPAD(0x874, PIN_INPUT_PULLUP | MUX_MODE7) /* gpmc_wpn.gpio0_30 */
- AM33XX_IOPAD(0x87c, PIN_OUTPUT | MUX_MODE0) /* gpmc_csn0.gpmc_csn0 */
- AM33XX_IOPAD(0x890, PIN_OUTPUT | MUX_MODE0) /* gpmc_advn_ale.gpmc_advn_ale */
- AM33XX_IOPAD(0x894, PIN_OUTPUT | MUX_MODE0) /* gpmc_oen_ren.gpmc_oen_ren */
- AM33XX_IOPAD(0x898, PIN_OUTPUT | MUX_MODE0) /* gpmc_wen.gpmc_wen */
- AM33XX_IOPAD(0x89c, PIN_OUTPUT | MUX_MODE0) /* gpmc_be0n_cle.gpmc_be0n_cle */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD0, PIN_INPUT_PULLUP, MUX_MODE0) /* gpmc_ad0.gpmc_ad0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD1, PIN_INPUT_PULLUP, MUX_MODE0) /* gpmc_ad1.gpmc_ad1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD2, PIN_INPUT_PULLUP, MUX_MODE0) /* gpmc_ad2.gpmc_ad2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD3, PIN_INPUT_PULLUP, MUX_MODE0) /* gpmc_ad3.gpmc_ad3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD4, PIN_INPUT_PULLUP, MUX_MODE0) /* gpmc_ad4.gpmc_ad4 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD5, PIN_INPUT_PULLUP, MUX_MODE0) /* gpmc_ad5.gpmc_ad5 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD6, PIN_INPUT_PULLUP, MUX_MODE0) /* gpmc_ad6.gpmc_ad6 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD7, PIN_INPUT_PULLUP, MUX_MODE0) /* gpmc_ad7.gpmc_ad7 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT_PULLUP, MUX_MODE0) /* gpmc_wait0.gpmc_wait0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WPN, PIN_INPUT_PULLUP, MUX_MODE7) /* gpmc_wpn.gpio0_30 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN0, PIN_OUTPUT, MUX_MODE0) /* gpmc_csn0.gpmc_csn0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_ADVN_ALE, PIN_OUTPUT, MUX_MODE0) /* gpmc_advn_ale.gpmc_advn_ale */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_OEN_REN, PIN_OUTPUT, MUX_MODE0) /* gpmc_oen_ren.gpmc_oen_ren */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WEN, PIN_OUTPUT, MUX_MODE0) /* gpmc_wen.gpmc_wen */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_BEN0_CLE, PIN_OUTPUT, MUX_MODE0) /* gpmc_be0n_cle.gpmc_be0n_cle */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-base0033.dts b/arch/arm/boot/dts/am335x-base0033.dts
index 29782be07605..cbd5bd8c57de 100644
--- a/arch/arm/boot/dts/am335x-base0033.dts
+++ b/arch/arm/boot/dts/am335x-base0033.dts
@@ -46,39 +46,39 @@
&am33xx_pinmux {
nxp_hdmi_pins: pinmux_nxp_hdmi_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x9b0, PIN_OUTPUT | MUX_MODE3) /* xdma_event_intr0.clkout1 */
- AM33XX_IOPAD(0x8a0, PIN_OUTPUT | MUX_MODE0) /* lcd_data0 */
- AM33XX_IOPAD(0x8a4, PIN_OUTPUT | MUX_MODE0) /* lcd_data1 */
- AM33XX_IOPAD(0x8a8, PIN_OUTPUT | MUX_MODE0) /* lcd_data2 */
- AM33XX_IOPAD(0x8ac, PIN_OUTPUT | MUX_MODE0) /* lcd_data3 */
- AM33XX_IOPAD(0x8b0, PIN_OUTPUT | MUX_MODE0) /* lcd_data4 */
- AM33XX_IOPAD(0x8b4, PIN_OUTPUT | MUX_MODE0) /* lcd_data5 */
- AM33XX_IOPAD(0x8b8, PIN_OUTPUT | MUX_MODE0) /* lcd_data6 */
- AM33XX_IOPAD(0x8bc, PIN_OUTPUT | MUX_MODE0) /* lcd_data7 */
- AM33XX_IOPAD(0x8c0, PIN_OUTPUT | MUX_MODE0) /* lcd_data8 */
- AM33XX_IOPAD(0x8c4, PIN_OUTPUT | MUX_MODE0) /* lcd_data9 */
- AM33XX_IOPAD(0x8c8, PIN_OUTPUT | MUX_MODE0) /* lcd_data10 */
- AM33XX_IOPAD(0x8cc, PIN_OUTPUT | MUX_MODE0) /* lcd_data11 */
- AM33XX_IOPAD(0x8d0, PIN_OUTPUT | MUX_MODE0) /* lcd_data12 */
- AM33XX_IOPAD(0x8d4, PIN_OUTPUT | MUX_MODE0) /* lcd_data13 */
- AM33XX_IOPAD(0x8d8, PIN_OUTPUT | MUX_MODE0) /* lcd_data14 */
- AM33XX_IOPAD(0x8dc, PIN_OUTPUT | MUX_MODE0) /* lcd_data15 */
- AM33XX_IOPAD(0x8e0, PIN_OUTPUT | MUX_MODE0) /* lcd_vsync */
- AM33XX_IOPAD(0x8e4, PIN_OUTPUT | MUX_MODE0) /* lcd_hsync */
- AM33XX_IOPAD(0x8e8, PIN_OUTPUT | MUX_MODE0) /* lcd_pclk */
- AM33XX_IOPAD(0x8ec, PIN_OUTPUT | MUX_MODE0) /* lcd_ac_bias_en */
+ AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR0, PIN_OUTPUT, MUX_MODE3) /* xdma_event_intr0.clkout1 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA0, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA1, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA2, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA3, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA4, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA5, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA6, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA7, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA8, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA9, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA10, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA11, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA12, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA13, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA14, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA15, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_OUTPUT, MUX_MODE0)
>;
};
nxp_hdmi_off_pins: pinmux_nxp_hdmi_off_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x9b0, PIN_OUTPUT | MUX_MODE3) /* xdma_event_intr0.clkout1 */
+ AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR0, PIN_OUTPUT, MUX_MODE3) /* xdma_event_intr0.clkout1 */
>;
};
leds_base_pins: pinmux_leds_base_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x854, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a5.gpio1_21 */
- AM33XX_IOPAD(0x888, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_csn3.gpio2_0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_a5.gpio1_21 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN3, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_csn3.gpio2_0 */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
index 456eef57ef89..42cfc3b37c32 100644
--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
+++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
@@ -71,118 +71,118 @@
user_leds_s0: user_leds_s0 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x854, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a5.gpio1_21 */
- AM33XX_IOPAD(0x858, PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_a6.gpio1_22 */
- AM33XX_IOPAD(0x85c, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a7.gpio1_23 */
- AM33XX_IOPAD(0x860, PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_a8.gpio1_24 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_a5.gpio1_21 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A6, PIN_OUTPUT_PULLUP, MUX_MODE7) /* gpmc_a6.gpio1_22 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A7, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_a7.gpio1_23 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A8, PIN_OUTPUT_PULLUP, MUX_MODE7) /* gpmc_a8.gpio1_24 */
>;
};
i2c0_pins: pinmux_i2c0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x988, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */
- AM33XX_IOPAD(0x98c, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT_PULLUP, MUX_MODE0) /* i2c0_sda.i2c0_sda */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_INPUT_PULLUP, MUX_MODE0) /* i2c0_scl.i2c0_scl */
>;
};
i2c2_pins: pinmux_i2c2_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x978, PIN_INPUT_PULLUP | MUX_MODE3) /* uart1_ctsn.i2c2_sda */
- AM33XX_IOPAD(0x97c, PIN_INPUT_PULLUP | MUX_MODE3) /* uart1_rtsn.i2c2_scl */
+ AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_INPUT_PULLUP, MUX_MODE3) /* uart1_ctsn.i2c2_sda */
+ AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_INPUT_PULLUP, MUX_MODE3) /* uart1_rtsn.i2c2_scl */
>;
};
uart0_pins: pinmux_uart0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
- AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
clkout2_pin: pinmux_clkout2_pin {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x9b4, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr1.clkout2 */
+ AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR1, PIN_OUTPUT_PULLDOWN, MUX_MODE3) /* xdma_event_intr1.clkout2 */
>;
};
cpsw_default: cpsw_default {
pinctrl-single,pins = <
/* Slave 1 */
- AM33XX_IOPAD(0x910, PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxerr.mii1_rxerr */
- AM33XX_IOPAD(0x914, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txen.mii1_txen */
- AM33XX_IOPAD(0x918, PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxdv.mii1_rxdv */
- AM33XX_IOPAD(0x91c, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd3.mii1_txd3 */
- AM33XX_IOPAD(0x920, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd2.mii1_txd2 */
- AM33XX_IOPAD(0x924, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd1.mii1_txd1 */
- AM33XX_IOPAD(0x928, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd0.mii1_txd0 */
- AM33XX_IOPAD(0x92c, PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_txclk.mii1_txclk */
- AM33XX_IOPAD(0x930, PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxclk.mii1_rxclk */
- AM33XX_IOPAD(0x934, PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd3.mii1_rxd3 */
- AM33XX_IOPAD(0x938, PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd2.mii1_rxd2 */
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd1.mii1_rxd1 */
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd0.mii1_rxd0 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLUP, MUX_MODE0)
>;
};
cpsw_sleep: cpsw_sleep {
pinctrl-single,pins = <
/* Slave 1 reset value */
- AM33XX_IOPAD(0x910, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x914, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x918, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x91c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x920, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x924, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x928, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x92c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x930, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x934, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x938, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
davinci_mdio_default: davinci_mdio_default {
pinctrl-single,pins = <
/* MDIO */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
- AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLUP | SLEWCTRL_FAST, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLUP, MUX_MODE0)
>;
};
davinci_mdio_sleep: davinci_mdio_sleep {
pinctrl-single,pins = <
/* MDIO reset value */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x94c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
mmc1_pins: pinmux_mmc1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE7) /* spio0_cs1.gpio0_6 */
- AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat0.mmc0_dat0 */
- AM33XX_IOPAD(0x8f8, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat1.mmc0_dat1 */
- AM33XX_IOPAD(0x8f4, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat2.mmc0_dat2 */
- AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat3.mmc0_dat3 */
- AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_cmd.mmc0_cmd */
- AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_clk.mmc0_clk */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS1, PIN_INPUT, MUX_MODE7) /* spio0_cs1.gpio0_6 */
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT2, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
>;
};
emmc_pins: pinmux_emmc_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x880, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn1.mmc1_clk */
- AM33XX_IOPAD(0x884, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn2.mmc1_cmd */
- AM33XX_IOPAD(0x800, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad0.mmc1_dat0 */
- AM33XX_IOPAD(0x804, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad1.mmc1_dat1 */
- AM33XX_IOPAD(0x808, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad2.mmc1_dat2 */
- AM33XX_IOPAD(0x80c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad3.mmc1_dat3 */
- AM33XX_IOPAD(0x810, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad4.mmc1_dat4 */
- AM33XX_IOPAD(0x814, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad5.mmc1_dat5 */
- AM33XX_IOPAD(0x818, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad6.mmc1_dat6 */
- AM33XX_IOPAD(0x81c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad7.mmc1_dat7 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN1, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_csn1.mmc1_clk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN2, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_csn2.mmc1_cmd */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD0, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad0.mmc1_dat0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD1, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad1.mmc1_dat1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD2, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad2.mmc1_dat2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD3, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad3.mmc1_dat3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD4, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad4.mmc1_dat4 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD5, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad5.mmc1_dat5 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD6, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad6.mmc1_dat6 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD7, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad7.mmc1_dat7 */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-boneblack-common.dtsi b/arch/arm/boot/dts/am335x-boneblack-common.dtsi
index e543c2bee8c2..283e288b6e42 100644
--- a/arch/arm/boot/dts/am335x-boneblack-common.dtsi
+++ b/arch/arm/boot/dts/am335x-boneblack-common.dtsi
@@ -30,43 +30,43 @@
&am33xx_pinmux {
nxp_hdmi_bonelt_pins: nxp_hdmi_bonelt_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x9b0, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr0 */
- AM33XX_IOPAD(0x8a0, PIN_OUTPUT | MUX_MODE0) /* lcd_data0.lcd_data0 */
- AM33XX_IOPAD(0x8a4, PIN_OUTPUT | MUX_MODE0) /* lcd_data1.lcd_data1 */
- AM33XX_IOPAD(0x8a8, PIN_OUTPUT | MUX_MODE0) /* lcd_data2.lcd_data2 */
- AM33XX_IOPAD(0x8ac, PIN_OUTPUT | MUX_MODE0) /* lcd_data3.lcd_data3 */
- AM33XX_IOPAD(0x8b0, PIN_OUTPUT | MUX_MODE0) /* lcd_data4.lcd_data4 */
- AM33XX_IOPAD(0x8b4, PIN_OUTPUT | MUX_MODE0) /* lcd_data5.lcd_data5 */
- AM33XX_IOPAD(0x8b8, PIN_OUTPUT | MUX_MODE0) /* lcd_data6.lcd_data6 */
- AM33XX_IOPAD(0x8bc, PIN_OUTPUT | MUX_MODE0) /* lcd_data7.lcd_data7 */
- AM33XX_IOPAD(0x8c0, PIN_OUTPUT | MUX_MODE0) /* lcd_data8.lcd_data8 */
- AM33XX_IOPAD(0x8c4, PIN_OUTPUT | MUX_MODE0) /* lcd_data9.lcd_data9 */
- AM33XX_IOPAD(0x8c8, PIN_OUTPUT | MUX_MODE0) /* lcd_data10.lcd_data10 */
- AM33XX_IOPAD(0x8cc, PIN_OUTPUT | MUX_MODE0) /* lcd_data11.lcd_data11 */
- AM33XX_IOPAD(0x8d0, PIN_OUTPUT | MUX_MODE0) /* lcd_data12.lcd_data12 */
- AM33XX_IOPAD(0x8d4, PIN_OUTPUT | MUX_MODE0) /* lcd_data13.lcd_data13 */
- AM33XX_IOPAD(0x8d8, PIN_OUTPUT | MUX_MODE0) /* lcd_data14.lcd_data14 */
- AM33XX_IOPAD(0x8dc, PIN_OUTPUT | MUX_MODE0) /* lcd_data15.lcd_data15 */
- AM33XX_IOPAD(0x8e0, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* lcd_vsync.lcd_vsync */
- AM33XX_IOPAD(0x8e4, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* lcd_hsync.lcd_hsync */
- AM33XX_IOPAD(0x8e8, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* lcd_pclk.lcd_pclk */
- AM33XX_IOPAD(0x8ec, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* lcd_ac_bias_en.lcd_ac_bias_en */
+ AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR0, PIN_OUTPUT_PULLDOWN, MUX_MODE3)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA0, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA1, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA2, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA3, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA4, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA5, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA6, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA7, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA8, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA9, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA10, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA11, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA12, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA13, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA14, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA15, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
nxp_hdmi_bonelt_off_pins: nxp_hdmi_bonelt_off_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x9b0, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr0 */
+ AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR0, PIN_OUTPUT_PULLDOWN, MUX_MODE3)
>;
};
mcasp0_pins: mcasp0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x9ac, PIN_INPUT_PULLUP | MUX_MODE0) /* mcasp0_ahcklx.mcasp0_ahclkx */
- AM33XX_IOPAD(0x99c, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mcasp0_ahclkr.mcasp0_axr2*/
- AM33XX_IOPAD(0x994, PIN_OUTPUT_PULLUP | MUX_MODE0) /* mcasp0_fsx.mcasp0_fsx */
- AM33XX_IOPAD(0x990, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mcasp0_aclkx.mcasp0_aclkx */
- AM33XX_IOPAD(0x86c, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a11.GPIO1_27 */
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKX, PIN_INPUT_PULLUP, MUX_MODE0) /* mcasp0_ahcklx.mcasp0_ahclkx */
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKR, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mcasp0_ahclkr.mcasp0_axr2*/
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_FSX, PIN_OUTPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKX, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A11, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_a11.GPIO1_27 */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-boneblack-wireless.dts b/arch/arm/boot/dts/am335x-boneblack-wireless.dts
index 83f49f616b19..5b275c96fccf 100644
--- a/arch/arm/boot/dts/am335x-boneblack-wireless.dts
+++ b/arch/arm/boot/dts/am335x-boneblack-wireless.dts
@@ -32,35 +32,35 @@
&am33xx_pinmux {
bt_pins: pinmux_bt_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x928, PIN_OUTPUT_PULLUP | MUX_MODE7) /* gmii1_txd0.gpio0_28 - BT_EN */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_OUTPUT_PULLUP, MUX_MODE7) /* gmii1_txd0.gpio0_28 - BT_EN */
>;
};
mmc3_pins: pinmux_mmc3_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLUP | MUX_MODE6 ) /* (L15) gmii1_rxd1.mmc2_clk */
- AM33XX_IOPAD(0x914, PIN_INPUT_PULLUP | MUX_MODE6 ) /* (J16) gmii1_txen.mmc2_cmd */
- AM33XX_IOPAD(0x918, PIN_INPUT_PULLUP | MUX_MODE5 ) /* (J17) gmii1_rxdv.mmc2_dat0 */
- AM33XX_IOPAD(0x91c, PIN_INPUT_PULLUP | MUX_MODE5 ) /* (J18) gmii1_txd3.mmc2_dat1 */
- AM33XX_IOPAD(0x920, PIN_INPUT_PULLUP | MUX_MODE5 ) /* (K15) gmii1_txd2.mmc2_dat2 */
- AM33XX_IOPAD(0x908, PIN_INPUT_PULLUP | MUX_MODE5 ) /* (H16) gmii1_col.mmc2_dat3 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLUP, MUX_MODE6 ) /* (L15) gmii1_rxd1.mmc2_clk */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_INPUT_PULLUP, MUX_MODE6 ) /* (J16) gmii1_txen.mmc2_cmd */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLUP, MUX_MODE5 ) /* (J17) gmii1_rxdv.mmc2_dat0 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_INPUT_PULLUP, MUX_MODE5 ) /* (J18) gmii1_txd3.mmc2_dat1 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_INPUT_PULLUP, MUX_MODE5 ) /* (K15) gmii1_txd2.mmc2_dat2 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_COL, PIN_INPUT_PULLUP, MUX_MODE5 ) /* (H16) gmii1_col.mmc2_dat3 */
>;
};
uart3_pins: pinmux_uart3_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x934, PIN_INPUT_PULLUP | MUX_MODE1) /* gmii1_rxd3.uart3_rxd */
- AM33XX_IOPAD(0x938, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* gmii1_rxd2.uart3_txd */
- AM33XX_IOPAD(0x948, PIN_INPUT | MUX_MODE3) /* mdio_data.uart3_ctsn */
- AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* mdio_clk.uart3_rtsn */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLUP, MUX_MODE1) /* gmii1_rxd3.uart3_rxd */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_OUTPUT_PULLDOWN, MUX_MODE1) /* gmii1_rxd2.uart3_txd */
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT, MUX_MODE3) /* mdio_data.uart3_ctsn */
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLDOWN, MUX_MODE3) /* mdio_clk.uart3_rtsn */
>;
};
wl18xx_pins: pinmux_wl18xx_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x92c, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gmii1_txclk.gpio3_9 WL_EN */
- AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE7) /* rmii1_refclk.gpio0_29 WL_IRQ */
- AM33XX_IOPAD(0x930, PIN_OUTPUT_PULLUP | MUX_MODE7) /* gmii1_rxclk.gpio3_10 LS_BUF_EN */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gmii1_txclk.gpio3_9 WL_EN */
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7) /* rmii1_refclk.gpio0_29 WL_IRQ */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_OUTPUT_PULLUP, MUX_MODE7) /* gmii1_rxclk.gpio3_10 LS_BUF_EN */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-boneblue.dts b/arch/arm/boot/dts/am335x-boneblue.dts
index ccb147e70d17..8d241c856c8d 100644
--- a/arch/arm/boot/dts/am335x-boneblue.dts
+++ b/arch/arm/boot/dts/am335x-boneblue.dts
@@ -130,135 +130,135 @@
&am33xx_pinmux {
user_leds_s0: user_leds_s0 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x854, PIN_OUTPUT | MUX_MODE7) /* (V15) gpmc_a5.gpio1[21] - USR_LED_0 */
- AM33XX_IOPAD(0x858, PIN_OUTPUT | MUX_MODE7) /* (U15) gpmc_a6.gpio1[22] - USR_LED_1 */
- AM33XX_IOPAD(0x85c, PIN_OUTPUT | MUX_MODE7) /* (T15) gpmc_a7.gpio1[23] - USR_LED_2 */
- AM33XX_IOPAD(0x860, PIN_OUTPUT | MUX_MODE7) /* (V16) gpmc_a8.gpio1[24] - USR_LED_3 */
- AM33XX_IOPAD(0x9b0, PIN_OUTPUT | MUX_MODE7) /* (A15) xdma_event_intr0.gpio0[19] - WIFI_LED */
- AM33XX_IOPAD(0x890, PIN_OUTPUT | MUX_MODE7) /* (R7) gpmc_advn_ale.gpio2[2] - P8.7, LED_RED, GP1_PIN_5 */
- AM33XX_IOPAD(0x894, PIN_OUTPUT | MUX_MODE7) /* (T7) gpmc_oen_ren.gpio2[3] - P8.8, LED_GREEN, GP1_PIN_6 */
- AM33XX_IOPAD(0x82c, PIN_OUTPUT | MUX_MODE7) /* (U12) gpmc_ad11.gpio0[27] - P8.17, BATT_LED_1 */
- AM33XX_IOPAD(0x8dc, PIN_OUTPUT | MUX_MODE7) /* (T5) lcd_data15.gpio0[11] - P8.32, BATT_LED_2 */
- AM33XX_IOPAD(0x87c, PIN_OUTPUT | MUX_MODE7) /* (V6) gpmc_csn0.gpio1[29] - P8.26, BATT_LED_3 */
- AM33XX_IOPAD(0x828, PIN_OUTPUT | MUX_MODE7) /* (T11) gpmc_ad10.gpio0[26] - P8.14, BATT_LED_4 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_OUTPUT, MUX_MODE7) /* (V15) gpmc_a5.gpio1[21] - USR_LED_0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A6, PIN_OUTPUT, MUX_MODE7) /* (U15) gpmc_a6.gpio1[22] - USR_LED_1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A7, PIN_OUTPUT, MUX_MODE7) /* (T15) gpmc_a7.gpio1[23] - USR_LED_2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A8, PIN_OUTPUT, MUX_MODE7) /* (V16) gpmc_a8.gpio1[24] - USR_LED_3 */
+ AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR0, PIN_OUTPUT, MUX_MODE7) /* (A15) xdma_event_intr0.gpio0[19] - WIFI_LED */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_ADVN_ALE, PIN_OUTPUT, MUX_MODE7) /* (R7) gpmc_advn_ale.gpio2[2] - P8.7, LED_RED, GP1_PIN_5 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_OEN_REN, PIN_OUTPUT, MUX_MODE7) /* (T7) gpmc_oen_ren.gpio2[3] - P8.8, LED_GREEN, GP1_PIN_6 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD11, PIN_OUTPUT, MUX_MODE7) /* (U12) gpmc_ad11.gpio0[27] - P8.17, BATT_LED_1 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA15, PIN_OUTPUT, MUX_MODE7) /* (T5) lcd_data15.gpio0[11] - P8.32, BATT_LED_2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN0, PIN_OUTPUT, MUX_MODE7) /* (V6) gpmc_csn0.gpio1[29] - P8.26, BATT_LED_3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD10, PIN_OUTPUT, MUX_MODE7) /* (T11) gpmc_ad10.gpio0[26] - P8.14, BATT_LED_4 */
>;
};
i2c0_pins: pinmux_i2c0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x988, PIN_INPUT_PULLUP | MUX_MODE0) /* (C17) I2C0_SDA.I2C0_SDA */
- AM33XX_IOPAD(0x98c, PIN_INPUT_PULLUP | MUX_MODE0) /* (C16) I2C0_SCL.I2C0_SCL */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT_PULLUP, MUX_MODE0) /* (C17) I2C0_SDA.I2C0_SDA */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_INPUT_PULLUP, MUX_MODE0) /* (C16) I2C0_SCL.I2C0_SCL */
>;
};
i2c2_pins: pinmux_i2c2_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x978, PIN_INPUT_PULLUP | MUX_MODE3) /* (D18) uart1_ctsn.I2C2_SDA */
- AM33XX_IOPAD(0x97c, PIN_INPUT_PULLUP | MUX_MODE3) /* (D17) uart1_rtsn.I2C2_SCL */
+ AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_INPUT_PULLUP, MUX_MODE3) /* (D18) uart1_ctsn.I2C2_SDA */
+ AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_INPUT_PULLUP, MUX_MODE3) /* (D17) uart1_rtsn.I2C2_SCL */
>;
};
/* UT0 */
uart0_pins: pinmux_uart0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0) /* (E15) uart0_rxd.uart0_rxd */
- AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* (E16) uart0_txd.uart0_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
/* UT1 */
uart1_pins: pinmux_uart1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x980, PIN_INPUT_PULLUP | MUX_MODE0) /* (D16) uart1_rxd.uart1_rxd */
- AM33XX_IOPAD(0x984, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* (D15) uart1_txd.uart1_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART1_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
/* GPS */
uart2_pins: pinmux_uart2_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x950, PIN_INPUT_PULLUP | MUX_MODE1) /* (A17) spi0_sclk.uart2_rxd */
- AM33XX_IOPAD(0x954, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* (B17) spi0_d0.uart2_txd */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_SCLK, PIN_INPUT_PULLUP, MUX_MODE1) /* (A17) spi0_sclk.uart2_rxd */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D0, PIN_OUTPUT_PULLDOWN, MUX_MODE1) /* (B17) spi0_d0.uart2_txd */
>;
};
/* DSM2 */
uart4_pins: pinmux_uart4_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x870, PIN_INPUT_PULLUP | MUX_MODE6) /* (T17) gpmc_wait0.uart4_rxd */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT_PULLUP, MUX_MODE6) /* (T17) gpmc_wait0.uart4_rxd */
>;
};
/* UT5 */
uart5_pins: pinmux_uart5_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x8C4, PIN_INPUT_PULLUP | MUX_MODE4) /* (U2) lcd_data9.uart5_rxd */
- AM33XX_IOPAD(0x8C0, PIN_OUTPUT_PULLDOWN | MUX_MODE4) /* (U1) lcd_data8.uart5_txd */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA9, PIN_INPUT_PULLUP, MUX_MODE4) /* (U2) lcd_data9.uart5_rxd */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA8, PIN_OUTPUT_PULLDOWN, MUX_MODE4) /* (U1) lcd_data8.uart5_txd */
>;
};
mmc1_pins: pinmux_mmc1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE7) /* (C15) spi0_cs1.gpio0[6] */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS1, PIN_INPUT, MUX_MODE7) /* (C15) spi0_cs1.gpio0[6] */
>;
};
mmc2_pins: pinmux_mmc2_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x880, PIN_INPUT_PULLUP | MUX_MODE2) /* (U9) gpmc_csn1.mmc1_clk */
- AM33XX_IOPAD(0x884, PIN_INPUT_PULLUP | MUX_MODE2) /* (V9) gpmc_csn2.mmc1_cmd */
- AM33XX_IOPAD(0x800, PIN_INPUT_PULLUP | MUX_MODE1) /* (U7) gpmc_ad0.mmc1_dat0 */
- AM33XX_IOPAD(0x804, PIN_INPUT_PULLUP | MUX_MODE1) /* (V7) gpmc_ad1.mmc1_dat1 */
- AM33XX_IOPAD(0x808, PIN_INPUT_PULLUP | MUX_MODE1) /* (R8) gpmc_ad2.mmc1_dat2 */
- AM33XX_IOPAD(0x80c, PIN_INPUT_PULLUP | MUX_MODE1) /* (T8) gpmc_ad3.mmc1_dat3 */
- AM33XX_IOPAD(0x810, PIN_INPUT_PULLUP | MUX_MODE1) /* (U8) gpmc_ad4.mmc1_dat4 */
- AM33XX_IOPAD(0x814, PIN_INPUT_PULLUP | MUX_MODE1) /* (V8) gpmc_ad5.mmc1_dat5 */
- AM33XX_IOPAD(0x818, PIN_INPUT_PULLUP | MUX_MODE1) /* (R9) gpmc_ad6.mmc1_dat6 */
- AM33XX_IOPAD(0x81c, PIN_INPUT_PULLUP | MUX_MODE1) /* (T9) gpmc_ad7.mmc1_dat7 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN1, PIN_INPUT_PULLUP, MUX_MODE2) /* (U9) gpmc_csn1.mmc1_clk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN2, PIN_INPUT_PULLUP, MUX_MODE2) /* (V9) gpmc_csn2.mmc1_cmd */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD0, PIN_INPUT_PULLUP, MUX_MODE1) /* (U7) gpmc_ad0.mmc1_dat0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD1, PIN_INPUT_PULLUP, MUX_MODE1) /* (V7) gpmc_ad1.mmc1_dat1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD2, PIN_INPUT_PULLUP, MUX_MODE1) /* (R8) gpmc_ad2.mmc1_dat2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD3, PIN_INPUT_PULLUP, MUX_MODE1) /* (T8) gpmc_ad3.mmc1_dat3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD4, PIN_INPUT_PULLUP, MUX_MODE1) /* (U8) gpmc_ad4.mmc1_dat4 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD5, PIN_INPUT_PULLUP, MUX_MODE1) /* (V8) gpmc_ad5.mmc1_dat5 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD6, PIN_INPUT_PULLUP, MUX_MODE1) /* (R9) gpmc_ad6.mmc1_dat6 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD7, PIN_INPUT_PULLUP, MUX_MODE1) /* (T9) gpmc_ad7.mmc1_dat7 */
>;
};
mmc3_pins: pinmux_mmc3_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLUP | MUX_MODE6) /* (L15) gmii1_rxd1.mmc2_clk */
- AM33XX_IOPAD(0x914, PIN_INPUT_PULLUP | MUX_MODE6) /* (J16) gmii1_txen.mmc2_cmd */
- AM33XX_IOPAD(0x918, PIN_INPUT_PULLUP | MUX_MODE5) /* (J17) gmii1_rxdv.mmc2_dat0 */
- AM33XX_IOPAD(0x91c, PIN_INPUT_PULLUP | MUX_MODE5) /* (J18) gmii1_txd3.mmc2_dat1 */
- AM33XX_IOPAD(0x920, PIN_INPUT_PULLUP | MUX_MODE5) /* (K15) gmii1_txd2.mmc2_dat2 */
- AM33XX_IOPAD(0x908, PIN_INPUT_PULLUP | MUX_MODE5) /* (H16) gmii1_col.mmc2_dat3 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLUP, MUX_MODE6) /* (L15) gmii1_rxd1.mmc2_clk */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_INPUT_PULLUP, MUX_MODE6) /* (J16) gmii1_txen.mmc2_cmd */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLUP, MUX_MODE5) /* (J17) gmii1_rxdv.mmc2_dat0 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_INPUT_PULLUP, MUX_MODE5) /* (J18) gmii1_txd3.mmc2_dat1 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_INPUT_PULLUP, MUX_MODE5) /* (K15) gmii1_txd2.mmc2_dat2 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_COL, PIN_INPUT_PULLUP, MUX_MODE5) /* (H16) gmii1_col.mmc2_dat3 */
>;
};
bt_pins: pinmux_bt_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x928, PIN_OUTPUT_PULLUP | MUX_MODE7) /* (K17) gmii1_txd0.gpio0[28] - BT_EN */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_OUTPUT_PULLUP, MUX_MODE7) /* (K17) gmii1_txd0.gpio0[28] - BT_EN */
>;
};
uart3_pins: pinmux_uart3_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x934, PIN_INPUT_PULLUP | MUX_MODE1) /* (L17) gmii1_rxd3.uart3_rxd */
- AM33XX_IOPAD(0x938, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* (L16) gmii1_rxd2.uart3_txd */
- AM33XX_IOPAD(0x948, PIN_INPUT | MUX_MODE3) /* (M17) mdio_data.uart3_ctsn */
- AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* (M18) mdio_clk.uart3_rtsn */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLUP, MUX_MODE1) /* (L17) gmii1_rxd3.uart3_rxd */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_OUTPUT_PULLDOWN, MUX_MODE1) /* (L16) gmii1_rxd2.uart3_txd */
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT, MUX_MODE3) /* (M17) mdio_data.uart3_ctsn */
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLDOWN, MUX_MODE3) /* (M18) mdio_clk.uart3_rtsn */
>;
};
wl18xx_pins: pinmux_wl18xx_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x92c, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* (K18) gmii1_txclk.gpio3[9] - WL_EN */
- AM33XX_IOPAD(0x924, PIN_INPUT_PULLDOWN | MUX_MODE7) /* (K16) gmii1_txd1.gpio0[21] - WL_IRQ */
- AM33XX_IOPAD(0x930, PIN_OUTPUT_PULLUP | MUX_MODE7) /* (L18) gmii1_rxclk.gpio3[10] - LS_BUF_EN */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* (K18) gmii1_txclk.gpio3[9] - WL_EN */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_INPUT_PULLDOWN, MUX_MODE7) /* (K16) gmii1_txd1.gpio0[21] - WL_IRQ */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_OUTPUT_PULLUP, MUX_MODE7) /* (L18) gmii1_rxclk.gpio3[10] - LS_BUF_EN */
>;
};
/* DCAN */
dcan1_pins: pinmux_dcan1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x96c, PIN_INPUT | MUX_MODE2) /* (E17) uart0_rtsn.dcan1_rx */
- AM33XX_IOPAD(0x968, PIN_OUTPUT | MUX_MODE2) /* (E18) uart0_ctsn.dcan1_tx */
- AM33XX_IOPAD(0x940, PIN_OUTPUT | MUX_MODE7) /* (M16) gmii1_rxd0.gpio2[21] */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RTSN, PIN_INPUT, MUX_MODE2) /* (E17) uart0_rtsn.dcan1_rx */
+ AM33XX_PADCONF(AM335X_PIN_UART0_CTSN, PIN_OUTPUT, MUX_MODE2) /* (E18) uart0_ctsn.dcan1_tx */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_OUTPUT, MUX_MODE7) /* (M16) gmii1_rxd0.gpio2[21] */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-bonegreen-common.dtsi b/arch/arm/boot/dts/am335x-bonegreen-common.dtsi
index 853e6d3a028d..71317e372ec7 100644
--- a/arch/arm/boot/dts/am335x-bonegreen-common.dtsi
+++ b/arch/arm/boot/dts/am335x-bonegreen-common.dtsi
@@ -27,8 +27,8 @@
&am33xx_pinmux {
uart2_pins: uart2_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x950, PIN_INPUT | MUX_MODE1) /* spi0_sclk.uart2_rxd */
- AM33XX_IOPAD(0x954, PIN_OUTPUT | MUX_MODE1) /* spi0_d0.uart2_txd */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_SCLK, PIN_INPUT, MUX_MODE1) /* spi0_sclk.uart2_rxd */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D0, PIN_OUTPUT, MUX_MODE1) /* spi0_d0.uart2_txd */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-bonegreen-wireless.dts b/arch/arm/boot/dts/am335x-bonegreen-wireless.dts
index 57731f0daf10..7db86a9c836a 100644
--- a/arch/arm/boot/dts/am335x-bonegreen-wireless.dts
+++ b/arch/arm/boot/dts/am335x-bonegreen-wireless.dts
@@ -32,35 +32,35 @@
&am33xx_pinmux {
bt_pins: pinmux_bt_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x878, PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_ad12.gpio1_28 BT_EN */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_BEN1, PIN_OUTPUT_PULLUP, MUX_MODE7) /* gpmc_ad12.gpio1_28 BT_EN */
>;
};
mmc3_pins: pinmux_mmc3_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x830, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ad12.mmc2_dat0 */
- AM33XX_IOPAD(0x834, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ad13.mmc2_dat1 */
- AM33XX_IOPAD(0x838, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ad14.mmc2_dat2 */
- AM33XX_IOPAD(0x83c, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ad15.mmc2_dat3 */
- AM33XX_IOPAD(0x888, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_csn3.mmc2_cmd */
- AM33XX_IOPAD(0x88c, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_clk.mmc2_clk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD12, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_ad12.mmc2_dat0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD13, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_ad13.mmc2_dat1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD14, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_ad14.mmc2_dat2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD15, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_ad15.mmc2_dat3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN3, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_csn3.mmc2_cmd */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CLK, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_clk.mmc2_clk */
>;
};
uart3_pins: pinmux_uart3_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x934, PIN_INPUT_PULLUP | MUX_MODE1) /* gmii1_rxd3.uart3_rxd */
- AM33XX_IOPAD(0x938, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* gmii1_rxd2.uart3_txd */
- AM33XX_IOPAD(0x948, PIN_INPUT | MUX_MODE3) /* mdio_data.uart3_ctsn */
- AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* mdio_clk.uart3_rtsn */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLUP, MUX_MODE1) /* gmii1_rxd3.uart3_rxd */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_OUTPUT_PULLDOWN, MUX_MODE1) /* gmii1_rxd2.uart3_txd */
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT, MUX_MODE3) /* mdio_data.uart3_ctsn */
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLDOWN, MUX_MODE3) /* mdio_clk.uart3_rtsn */
>;
};
wl18xx_pins: pinmux_wl18xx_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x828, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad10.gpio0_26 WL_EN */
- AM33XX_IOPAD(0x82C, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad11.gpio0_27 WL_IRQ */
- AM33XX_IOPAD(0x87C, PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_csn0.gpio1_29 LS_BUF_EN */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD10, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad10.gpio0_26 WL_EN */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD11, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad11.gpio0_27 WL_IRQ */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN0, PIN_OUTPUT_PULLUP, MUX_MODE7) /* gpmc_csn0.gpio1_29 LS_BUF_EN */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-chiliboard.dts b/arch/arm/boot/dts/am335x-chiliboard.dts
index bffa5dce54ec..31da68355e57 100644
--- a/arch/arm/boot/dts/am335x-chiliboard.dts
+++ b/arch/arm/boot/dts/am335x-chiliboard.dts
@@ -41,79 +41,79 @@
&am33xx_pinmux {
uart0_pins: pinmux_uart0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
- AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
cpsw_default: cpsw_default {
pinctrl-single,pins = <
/* Slave 1 */
- AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE1) /* mii1_crs.rmii1_crs */
- AM33XX_IOPAD(0x910, PIN_INPUT_PULLUP | MUX_MODE1) /* mii1_rxerr.rmii1_rxerr */
- AM33XX_IOPAD(0x914, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* mii1_txen.rmii1_txen */
- AM33XX_IOPAD(0x924, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* mii1_txd1.rmii1_txd1 */
- AM33XX_IOPAD(0x928, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* mii1_txd0.rmii1_txd0 */
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLUP | MUX_MODE1) /* mii1_rxd1.rmii1_rxd1 */
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLUP | MUX_MODE1) /* mii1_rxd0.rmii1_rxd0 */
- AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii1_ref_clk.rmii_ref_clk */
+ AM33XX_PADCONF(AM335X_PIN_MII1_CRS, PIN_INPUT_PULLDOWN, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLUP, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_OUTPUT_PULLDOWN, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_OUTPUT_PULLDOWN, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLUP, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLUP, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_INPUT_PULLDOWN, MUX_MODE0)
>;
};
cpsw_sleep: cpsw_sleep {
pinctrl-single,pins = <
/* Slave 1 reset value */
- AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x910, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x914, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x918, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x924, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x928, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_CRS, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
davinci_mdio_default: davinci_mdio_default {
pinctrl-single,pins = <
/* mdio_data.mdio_data */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLUP | SLEWCTRL_FAST, MUX_MODE0)
/* mdio_clk.mdio_clk */
- AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLUP | MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLUP, MUX_MODE0)
>;
};
davinci_mdio_sleep: davinci_mdio_sleep {
pinctrl-single,pins = <
/* MDIO reset value */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x94c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
usb1_drvvbus: usb1_drvvbus {
pinctrl-single,pins = <
- AM33XX_IOPAD(0xa34, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* usb1_drvvbus.usb1_drvvbus */
+ AM33XX_PADCONF(AM335X_PIN_USB1_DRVVBUS, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
sd_pins: pinmux_sd_card {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x8f0, PIN_INPUT | MUX_MODE0) /* mmc0_dat0.mmc0_dat0 */
- AM33XX_IOPAD(0x8f4, PIN_INPUT | MUX_MODE0) /* mmc0_dat1.mmc0_dat1 */
- AM33XX_IOPAD(0x8f8, PIN_INPUT | MUX_MODE0) /* mmc0_dat2.mmc0_dat2 */
- AM33XX_IOPAD(0x8fc, PIN_INPUT | MUX_MODE0) /* mmc0_dat3.mmc0_dat3 */
- AM33XX_IOPAD(0x900, PIN_INPUT | MUX_MODE0) /* mmc0_clk.mmc0_clk */
- AM33XX_IOPAD(0x904, PIN_INPUT | MUX_MODE0) /* mmc0_cmd.mmc0_cmd */
- AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE7) /* spi0_cs1.gpio0_6 */
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT2, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT1, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT0, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS1, PIN_INPUT, MUX_MODE7) /* spi0_cs1.gpio0_6 */
>;
};
led_gpio_pins: led_gpio_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x9e4, PIN_OUTPUT | MUX_MODE7) /* emu0.gpio3_7 */
- AM33XX_IOPAD(0x9e8, PIN_OUTPUT | MUX_MODE7) /* emu1.gpio3_8 */
+ AM33XX_PADCONF(AM335X_PIN_EMU0, PIN_OUTPUT, MUX_MODE7) /* emu0.gpio3_7 */
+ AM33XX_PADCONF(AM335X_PIN_EMU1, PIN_OUTPUT, MUX_MODE7) /* emu1.gpio3_8 */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-chilisom.dtsi b/arch/arm/boot/dts/am335x-chilisom.dtsi
index 1b43ebd08b38..8b88bf6dafc4 100644
--- a/arch/arm/boot/dts/am335x-chilisom.dtsi
+++ b/arch/arm/boot/dts/am335x-chilisom.dtsi
@@ -30,28 +30,28 @@
i2c0_pins: pinmux_i2c0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x988, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */
- AM33XX_IOPAD(0x98c, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_INPUT_PULLUP, MUX_MODE0)
>;
};
nandflash_pins: nandflash_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x800, PIN_INPUT_PULLDOWN | MUX_MODE0) /* gpmc_ad0.gpmc_ad0 */
- AM33XX_IOPAD(0x804, PIN_INPUT_PULLDOWN | MUX_MODE0) /* gpmc_ad1.gpmc_ad1 */
- AM33XX_IOPAD(0x808, PIN_INPUT_PULLDOWN | MUX_MODE0) /* gpmc_ad2.gpmc_ad2 */
- AM33XX_IOPAD(0x80c, PIN_INPUT_PULLDOWN | MUX_MODE0) /* gpmc_ad3.gpmc_ad3 */
- AM33XX_IOPAD(0x810, PIN_INPUT_PULLDOWN | MUX_MODE0) /* gpmc_ad4.gpmc_ad4 */
- AM33XX_IOPAD(0x814, PIN_INPUT_PULLDOWN | MUX_MODE0) /* gpmc_ad5.gpmc_ad5 */
- AM33XX_IOPAD(0x818, PIN_INPUT_PULLDOWN | MUX_MODE0) /* gpmc_ad6.gpmc_ad6 */
- AM33XX_IOPAD(0x81c, PIN_INPUT_PULLDOWN | MUX_MODE0) /* gpmc_ad7.gpmc_ad7 */
-
- AM33XX_IOPAD(0x870, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_wait0.gpmc_wait0 */
- AM33XX_IOPAD(0x87c, PIN_OUTPUT_PULLUP | MUX_MODE0) /* gpmc_csn0.gpmc_csn0 */
- AM33XX_IOPAD(0x890, PIN_OUTPUT_PULLUP | MUX_MODE0) /* gpmc_advn_ale.gpmc_advn_ale */
- AM33XX_IOPAD(0x894, PIN_OUTPUT_PULLUP | MUX_MODE0) /* gpmc_oen_ren.gpmc_oen_ren */
- AM33XX_IOPAD(0x898, PIN_OUTPUT_PULLUP | MUX_MODE0) /* gpmc_wen.gpmc_wen */
- AM33XX_IOPAD(0x89c, PIN_OUTPUT_PULLUP | MUX_MODE0) /* gpmc_be0n_cle.gpmc_be0n_cle */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD0, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD1, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD2, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD3, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD4, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD5, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD6, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD7, PIN_INPUT_PULLDOWN, MUX_MODE0)
+
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN0, PIN_OUTPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_ADVN_ALE, PIN_OUTPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_OEN_REN, PIN_OUTPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WEN, PIN_OUTPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_BEN0_CLE, PIN_OUTPUT_PULLUP, MUX_MODE0)
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-cm-t335.dts b/arch/arm/boot/dts/am335x-cm-t335.dts
index 2c724bb60417..3b0bb88dfc12 100644
--- a/arch/arm/boot/dts/am335x-cm-t335.dts
+++ b/arch/arm/boot/dts/am335x-cm-t335.dts
@@ -94,108 +94,85 @@
i2c0_pins: pinmux_i2c0_pins {
pinctrl-single,pins = <
- /* i2c0_sda.i2c0_sda */
- AM33XX_IOPAD(0x988, PIN_INPUT_PULLUP | MUX_MODE0)
- /* i2c0_scl.i2c0_scl */
- AM33XX_IOPAD(0x98c, PIN_INPUT_PULLUP | MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_INPUT_PULLUP, MUX_MODE0)
>;
};
i2c1_pins: pinmux_i2c1_pins {
pinctrl-single,pins = <
/* uart0_ctsn.i2c1_sda */
- AM33XX_IOPAD(0x968, PIN_INPUT_PULLUP | MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_UART0_CTSN, PIN_INPUT_PULLUP, MUX_MODE2)
/* uart0_rtsn.i2c1_scl */
- AM33XX_IOPAD(0x96c, PIN_INPUT_PULLUP | MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_UART0_RTSN, PIN_INPUT_PULLUP, MUX_MODE2)
>;
};
gpio_led_pins: pinmux_gpio_led_pins {
pinctrl-single,pins = <
/* gpmc_csn3.gpio2_0 */
- AM33XX_IOPAD(0x888, PIN_OUTPUT | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN3, PIN_OUTPUT, MUX_MODE7)
>;
};
nandflash_pins: pinmux_nandflash_pins {
pinctrl-single,pins = <
- /* gpmc_ad0.gpmc_ad0 */
- AM33XX_IOPAD(0x800, PIN_INPUT_PULLUP | MUX_MODE0)
- /* gpmc_ad1.gpmc_ad1 */
- AM33XX_IOPAD(0x804, PIN_INPUT_PULLUP | MUX_MODE0)
- /* gpmc_ad2.gpmc_ad2 */
- AM33XX_IOPAD(0x808, PIN_INPUT_PULLUP | MUX_MODE0)
- /* gpmc_ad3.gpmc_ad3 */
- AM33XX_IOPAD(0x80c, PIN_INPUT_PULLUP | MUX_MODE0)
- /* gpmc_ad4.gpmc_ad4 */
- AM33XX_IOPAD(0x810, PIN_INPUT_PULLUP | MUX_MODE0)
- /* gpmc_ad5.gpmc_ad5 */
- AM33XX_IOPAD(0x814, PIN_INPUT_PULLUP | MUX_MODE0)
- /* gpmc_ad6.gpmc_ad6 */
- AM33XX_IOPAD(0x818, PIN_INPUT_PULLUP | MUX_MODE0)
- /* gpmc_ad7.gpmc_ad7 */
- AM33XX_IOPAD(0x81c, PIN_INPUT_PULLUP | MUX_MODE0)
- /* gpmc_wait0.gpmc_wait0 */
- AM33XX_IOPAD(0x870, PIN_INPUT_PULLUP | MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD2, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD3, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD4, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD5, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD6, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD7, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT_PULLUP, MUX_MODE0)
/* gpmc_wpn.gpio0_30 */
- AM33XX_IOPAD(0x874, PIN_INPUT_PULLUP | MUX_MODE7)
- /* gpmc_csn0.gpmc_csn0 */
- AM33XX_IOPAD(0x87c, PIN_OUTPUT | MUX_MODE0)
- /* gpmc_advn_ale.gpmc_advn_ale */
- AM33XX_IOPAD(0x890, PIN_OUTPUT | MUX_MODE0)
- /* gpmc_oen_ren.gpmc_oen_ren */
- AM33XX_IOPAD(0x894, PIN_OUTPUT | MUX_MODE0)
- /* gpmc_wen.gpmc_wen */
- AM33XX_IOPAD(0x898, PIN_OUTPUT | MUX_MODE0)
- /* gpmc_ben0_cle.gpmc_ben0_cle */
- AM33XX_IOPAD(0x89c, PIN_OUTPUT | MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WPN, PIN_INPUT_PULLUP, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN0, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_ADVN_ALE, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_OEN_REN, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WEN, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_BEN0_CLE, PIN_OUTPUT, MUX_MODE0)
>;
};
uart0_pins: pinmux_uart0_pins {
pinctrl-single,pins = <
- /* uart0_rxd.uart0_rxd */
- AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0)
- /* uart0_txd.uart0_txd */
- AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
uart1_pins: pinmux_uart1_pins {
pinctrl-single,pins = <
- /* uart1_ctsn.uart1_ctsn */
- AM33XX_IOPAD(0x978, PIN_INPUT | MUX_MODE0)
- /* uart1_rtsn.uart1_rtsn */
- AM33XX_IOPAD(0x97C, PIN_OUTPUT_PULLDOWN | MUX_MODE0)
- /* uart1_rxd.uart1_rxd */
- AM33XX_IOPAD(0x980, PIN_INPUT_PULLUP | MUX_MODE0)
- /* uart1_txd.uart1_txd */
- AM33XX_IOPAD(0x984, PIN_OUTPUT_PULLDOWN | MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
dcan0_pins: pinmux_dcan0_pins {
pinctrl-single,pins = <
/* uart1_ctsn.dcan0_tx */
- AM33XX_IOPAD(0x978, PIN_OUTPUT | MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_OUTPUT, MUX_MODE2)
/* uart1_rtsn.dcan0_rx */
- AM33XX_IOPAD(0x97C, PIN_INPUT | MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_INPUT, MUX_MODE2)
>;
};
dcan1_pins: pinmux_dcan1_pins {
pinctrl-single,pins = <
/* uart1_rxd.dcan1_tx */
- AM33XX_IOPAD(0x980, PIN_OUTPUT | MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_UART1_RXD, PIN_OUTPUT, MUX_MODE2)
/* uart1_txd.dcan1_rx */
- AM33XX_IOPAD(0x984, PIN_INPUT | MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_UART1_TXD, PIN_INPUT, MUX_MODE2)
>;
};
ecap0_pins: pinmux_ecap0_pins {
pinctrl-single,pins = <
- /* eCAP0_in_PWM0_out.eCAP0_in_PWM0_out MODE0 */
- AM33XX_IOPAD(0x964, 0x0)
+ AM33XX_PADCONF(AM335X_PIN_ECAP0_IN_PWM0_OUT, 0x0, MUX_MODE0)
>;
};
@@ -203,96 +180,83 @@
pinctrl-single,pins = <
/* Slave 1 */
/* mii1_tx_en.rgmii1_tctl */
- AM33XX_IOPAD(0x914, PIN_OUTPUT_PULLDOWN | MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE2)
/* mii1_rxdv.rgmii1_rctl */
- AM33XX_IOPAD(0x918, PIN_INPUT_PULLDOWN | MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLDOWN, MUX_MODE2)
/* mii1_txd3.rgmii1_td3 */
- AM33XX_IOPAD(0x91c, PIN_OUTPUT_PULLDOWN | MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_OUTPUT_PULLDOWN, MUX_MODE2)
/* mii1_txd2.rgmii1_td2 */
- AM33XX_IOPAD(0x920, PIN_OUTPUT_PULLDOWN | MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_OUTPUT_PULLDOWN, MUX_MODE2)
/* mii1_txd1.rgmii1_td1 */
- AM33XX_IOPAD(0x924, PIN_OUTPUT_PULLDOWN | MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_OUTPUT_PULLDOWN, MUX_MODE2)
/* mii1_txd0.rgmii1_td0 */
- AM33XX_IOPAD(0x928, PIN_OUTPUT_PULLDOWN | MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_OUTPUT_PULLDOWN, MUX_MODE2)
/* mii1_txclk.rgmii1_tclk */
- AM33XX_IOPAD(0x92c, PIN_OUTPUT_PULLDOWN | MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_OUTPUT_PULLDOWN, MUX_MODE2)
/* mii1_rxclk.rgmii1_rclk */
- AM33XX_IOPAD(0x930, PIN_INPUT_PULLDOWN | MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE2)
/* mii1_rxd3.rgmii1_rd3 */
- AM33XX_IOPAD(0x934, PIN_INPUT_PULLDOWN | MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLDOWN, MUX_MODE2)
/* mii1_rxd2.rgmii1_rd2 */
- AM33XX_IOPAD(0x938, PIN_INPUT_PULLDOWN | MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT_PULLDOWN, MUX_MODE2)
/* mii1_rxd1.rgmii1_rd1 */
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE2)
/* mii1_rxd0.rgmii1_rd0 */
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE2)
>;
};
cpsw_sleep: cpsw_sleep {
pinctrl-single,pins = <
/* Slave 1 reset value */
- AM33XX_IOPAD(0x914, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x918, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x91c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x920, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x924, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x928, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x92c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x930, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x934, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x938, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
davinci_mdio_default: davinci_mdio_default {
pinctrl-single,pins = <
- /* mdio_data.mdio_data */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0)
- /* mdio_clk.mdio_clk */
- AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLUP | MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLUP | SLEWCTRL_FAST, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLUP, MUX_MODE0)
>;
};
davinci_mdio_sleep: davinci_mdio_sleep {
pinctrl-single,pins = <
/* MDIO reset value */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x94c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
mmc1_pins: pinmux_mmc1_pins {
pinctrl-single,pins = <
- /* mmc0_dat3.mmc0_dat3 */
- AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0)
- /* mmc0_dat2.mmc0_dat2 */
- AM33XX_IOPAD(0x8f4, PIN_INPUT_PULLUP | MUX_MODE0)
- /* mmc0_dat1.mmc0_dat1 */
- AM33XX_IOPAD(0x8f8, PIN_INPUT_PULLUP | MUX_MODE0)
- /* mmc0_dat0.mmc0_dat0 */
- AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0)
- /* mmc0_clk.mmc0_clk */
- AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0)
- /* mmc0_cmd.mmc0_cmd */
- AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT2, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0)
>;
};
spi0_pins: pinmux_spi0_pins {
pinctrl-single,pins = <
- /* spi0_sclk.spi0_sclk */
- AM33XX_IOPAD(0x950, PIN_INPUT | MUX_MODE0)
- /* spi0_d0.spi0_d0 */
- AM33XX_IOPAD(0x954, PIN_OUTPUT_PULLUP | MUX_MODE0)
- /* spi0_d1.spi0_d1 */
- AM33XX_IOPAD(0x958, PIN_INPUT | MUX_MODE0)
- /* spi0_cs0.spi0_cs0 */
- AM33XX_IOPAD(0x95C, PIN_OUTPUT | MUX_MODE0)
- /* spi0_cs1.spi0_cs1 */
- AM33XX_IOPAD(0x960, PIN_OUTPUT | MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_SCLK, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D0, PIN_OUTPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D1, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS0, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS1, PIN_OUTPUT, MUX_MODE0)
>;
};
@@ -300,7 +264,7 @@
bluetooth_pins: pinmux_bluetooth_pins {
pinctrl-single,pins = <
/* XDMA_EVENT_INTR0.gpio0_19 - bluetooth enable */
- AM33XX_IOPAD(0x9b0, PIN_OUTPUT_PULLUP | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR0, PIN_OUTPUT_PULLUP, MUX_MODE7)
>;
};
@@ -308,13 +272,13 @@
mcasp1_pins: pinmux_mcasp1_pins {
pinctrl-single,pins = <
/* MII1_CRS.mcasp1_aclkx */
- AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE4)
+ AM33XX_PADCONF(AM335X_PIN_MII1_CRS, PIN_INPUT_PULLDOWN, MUX_MODE4)
/* MII1_RX_ER.mcasp1_fsx */
- AM33XX_IOPAD(0x910, PIN_INPUT_PULLDOWN | MUX_MODE4)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLDOWN, MUX_MODE4)
/* MII1_COL.mcasp1_axr2 */
- AM33XX_IOPAD(0x908, PIN_INPUT_PULLDOWN | MUX_MODE4)
+ AM33XX_PADCONF(AM335X_PIN_MII1_COL, PIN_INPUT_PULLDOWN, MUX_MODE4)
/* RMII1_REF_CLK.mcasp1_axr3 */
- AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE4)
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_INPUT_PULLDOWN, MUX_MODE4)
>;
};
@@ -322,9 +286,9 @@
wifi_pins: pinmux_wifi_pins {
pinctrl-single,pins = <
/* EMU1.gpio3_8 - WiFi IRQ */
- AM33XX_IOPAD(0x9e8, PIN_INPUT_PULLUP | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_EMU1, PIN_INPUT_PULLUP, MUX_MODE7)
/* XDMA_EVENT_INTR1.gpio0_20 - WiFi enable */
- AM33XX_IOPAD(0x9b4, PIN_OUTPUT | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR1, PIN_OUTPUT, MUX_MODE7)
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index edcff79879e7..55d4392bb7a1 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -190,222 +190,222 @@
matrix_keypad_s0: matrix_keypad_s0 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x854, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a5.gpio1_21 */
- AM33XX_IOPAD(0x858, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a6.gpio1_22 */
- AM33XX_IOPAD(0x864, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_a9.gpio1_25 */
- AM33XX_IOPAD(0x868, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_a10.gpio1_26 */
- AM33XX_IOPAD(0x86c, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_a11.gpio1_27 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_a5.gpio1_21 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A6, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_a6.gpio1_22 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A9, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_a9.gpio1_25 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A10, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_a10.gpio1_26 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A11, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_a11.gpio1_27 */
>;
};
volume_keys_s0: volume_keys_s0 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x950, PIN_INPUT_PULLDOWN | MUX_MODE7) /* spi0_sclk.gpio0_2 */
- AM33XX_IOPAD(0x954, PIN_INPUT_PULLDOWN | MUX_MODE7) /* spi0_d0.gpio0_3 */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_SCLK, PIN_INPUT_PULLDOWN, MUX_MODE7) /* spi0_sclk.gpio0_2 */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D0, PIN_INPUT_PULLDOWN, MUX_MODE7) /* spi0_d0.gpio0_3 */
>;
};
i2c0_pins: pinmux_i2c0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x988, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */
- AM33XX_IOPAD(0x98c, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT_PULLUP, MUX_MODE0) /* i2c0_sda.i2c0_sda */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_INPUT_PULLUP, MUX_MODE0) /* i2c0_scl.i2c0_scl */
>;
};
i2c1_pins: pinmux_i2c1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE2) /* spi0_d1.i2c1_sda */
- AM33XX_IOPAD(0x95c, PIN_INPUT_PULLUP | MUX_MODE2) /* spi0_cs0.i2c1_scl */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D1, PIN_INPUT_PULLUP, MUX_MODE2) /* spi0_d1.i2c1_sda */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS0, PIN_INPUT_PULLUP, MUX_MODE2) /* spi0_cs0.i2c1_scl */
>;
};
uart0_pins: pinmux_uart0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
- AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
uart1_pins: pinmux_uart1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x978, PIN_INPUT | MUX_MODE0) /* uart1_ctsn.uart1_ctsn */
- AM33XX_IOPAD(0x97C, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart1_rtsn.uart1_rtsn */
- AM33XX_IOPAD(0x980, PIN_INPUT_PULLUP | MUX_MODE0) /* uart1_rxd.uart1_rxd */
- AM33XX_IOPAD(0x984, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart1_txd.uart1_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
clkout2_pin: pinmux_clkout2_pin {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x9b4, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr1.clkout2 */
+ AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR1, PIN_OUTPUT_PULLDOWN, MUX_MODE3) /* xdma_event_intr1.clkout2 */
>;
};
nandflash_pins_s0: nandflash_pins_s0 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x800, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad0.gpmc_ad0 */
- AM33XX_IOPAD(0x804, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad1.gpmc_ad1 */
- AM33XX_IOPAD(0x808, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad2.gpmc_ad2 */
- AM33XX_IOPAD(0x80c, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad3.gpmc_ad3 */
- AM33XX_IOPAD(0x810, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad4.gpmc_ad4 */
- AM33XX_IOPAD(0x814, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad5.gpmc_ad5 */
- AM33XX_IOPAD(0x818, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad6.gpmc_ad6 */
- AM33XX_IOPAD(0x81c, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad7.gpmc_ad7 */
- AM33XX_IOPAD(0x870, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_wait0.gpmc_wait0 */
- AM33XX_IOPAD(0x874, PIN_INPUT_PULLUP | MUX_MODE7) /* gpmc_wpn.gpio0_30 */
- AM33XX_IOPAD(0x87c, PIN_OUTPUT | MUX_MODE0) /* gpmc_csn0.gpmc_csn0 */
- AM33XX_IOPAD(0x890, PIN_OUTPUT | MUX_MODE0) /* gpmc_advn_ale.gpmc_advn_ale */
- AM33XX_IOPAD(0x894, PIN_OUTPUT | MUX_MODE0) /* gpmc_oen_ren.gpmc_oen_ren */
- AM33XX_IOPAD(0x898, PIN_OUTPUT | MUX_MODE0) /* gpmc_wen.gpmc_wen */
- AM33XX_IOPAD(0x89c, PIN_OUTPUT | MUX_MODE0) /* gpmc_be0n_cle.gpmc_be0n_cle */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD2, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD3, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD4, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD5, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD6, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD7, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WPN, PIN_INPUT_PULLUP, MUX_MODE7) /* gpmc_wpn.gpio0_30 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN0, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_ADVN_ALE, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_OEN_REN, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WEN, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_BEN0_CLE, PIN_OUTPUT, MUX_MODE0)
>;
};
ecap0_pins: backlight_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x964, MUX_MODE0) /* eCAP0_in_PWM0_out.eCAP0_in_PWM0_out */
+ AM33XX_PADCONF(AM335X_PIN_ECAP0_IN_PWM0_OUT, 0x0, MUX_MODE0)
>;
};
cpsw_default: cpsw_default {
pinctrl-single,pins = <
/* Slave 1 */
- AM33XX_IOPAD(0x914, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txen.rgmii1_tctl */
- AM33XX_IOPAD(0x918, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxdv.rgmii1_rctl */
- AM33XX_IOPAD(0x91c, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd3.rgmii1_td3 */
- AM33XX_IOPAD(0x920, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd2.rgmii1_td2 */
- AM33XX_IOPAD(0x924, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd1.rgmii1_td1 */
- AM33XX_IOPAD(0x928, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd0.rgmii1_td0 */
- AM33XX_IOPAD(0x92c, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txclk.rgmii1_tclk */
- AM33XX_IOPAD(0x930, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxclk.rgmii1_rclk */
- AM33XX_IOPAD(0x934, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd3.rgmii1_rd3 */
- AM33XX_IOPAD(0x938, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd2.rgmii1_rd2 */
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd1.rgmii1_rd1 */
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd0.rgmii1_rd0 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txen.rgmii1_tctl */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxdv.rgmii1_rctl */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txd3.rgmii1_td3 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txd2.rgmii1_td2 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txd1.rgmii1_td1 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txd0.rgmii1_td0 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txclk.rgmii1_tclk */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxclk.rgmii1_rclk */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxd3.rgmii1_rd3 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxd2.rgmii1_rd2 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxd1.rgmii1_rd1 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxd0.rgmii1_rd0 */
>;
};
cpsw_sleep: cpsw_sleep {
pinctrl-single,pins = <
/* Slave 1 reset value */
- AM33XX_IOPAD(0x914, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x918, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x91c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x920, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x924, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x928, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x92c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x930, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x934, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x938, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
davinci_mdio_default: davinci_mdio_default {
pinctrl-single,pins = <
/* MDIO */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
- AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLUP | SLEWCTRL_FAST, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLUP, MUX_MODE0)
>;
};
davinci_mdio_sleep: davinci_mdio_sleep {
pinctrl-single,pins = <
/* MDIO reset value */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x94c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
mmc1_pins: pinmux_mmc1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE7) /* spi0_cs1.gpio0_6 */
- AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat0.mmc0_dat0 */
- AM33XX_IOPAD(0x8f8, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat1.mmc0_dat1 */
- AM33XX_IOPAD(0x8f4, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat2.mmc0_dat2 */
- AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat3.mmc0_dat3 */
- AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_cmd.mmc0_cmd */
- AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_clk.mmc0_clk */
- AM33XX_IOPAD(0x9a0, PIN_INPUT | MUX_MODE4) /* mcasp0_aclkr.mmc0_sdwp */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS1, PIN_INPUT, MUX_MODE7) /* spi0_cs1.gpio0_6 */
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT2, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKR, PIN_INPUT, MUX_MODE4) /* mcasp0_aclkr.mmc0_sdwp */
>;
};
mmc3_pins: pinmux_mmc3_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x844, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_a1.mmc2_dat0, INPUT_PULLUP | MODE3 */
- AM33XX_IOPAD(0x848, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_a2.mmc2_dat1, INPUT_PULLUP | MODE3 */
- AM33XX_IOPAD(0x84c, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_a3.mmc2_dat2, INPUT_PULLUP | MODE3 */
- AM33XX_IOPAD(0x878, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ben1.mmc2_dat3, INPUT_PULLUP | MODE3 */
- AM33XX_IOPAD(0x888, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_csn3.mmc2_cmd, INPUT_PULLUP | MODE3 */
- AM33XX_IOPAD(0x88c, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_clk.mmc2_clk, INPUT_PULLUP | MODE3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A1, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_a1.mmc2_dat0, INPUT_PULLUP | MODE3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A2, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_a2.mmc2_dat1, INPUT_PULLUP | MODE3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A3, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_a3.mmc2_dat2, INPUT_PULLUP | MODE3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_BEN1, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_ben1.mmc2_dat3, INPUT_PULLUP | MODE3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN3, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_csn3.mmc2_cmd, INPUT_PULLUP | MODE3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CLK, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_clk.mmc2_clk, INPUT_PULLUP | MODE3 */
>;
};
wlan_pins: pinmux_wlan_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x840, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a0.gpio1_16 */
- AM33XX_IOPAD(0x99c, PIN_INPUT | MUX_MODE7) /* mcasp0_ahclkr.gpio3_17 */
- AM33XX_IOPAD(0x9ac, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* mcasp0_ahclkx.gpio3_21 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A0, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_a0.gpio1_16 */
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKR, PIN_INPUT, MUX_MODE7) /* mcasp0_ahclkr.gpio3_17 */
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKX, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* mcasp0_ahclkx.gpio3_21 */
>;
};
lcd_pins_s0: lcd_pins_s0 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x820, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad8.lcd_data23 */
- AM33XX_IOPAD(0x824, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad9.lcd_data22 */
- AM33XX_IOPAD(0x828, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad10.lcd_data21 */
- AM33XX_IOPAD(0x82c, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad11.lcd_data20 */
- AM33XX_IOPAD(0x830, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad12.lcd_data19 */
- AM33XX_IOPAD(0x834, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad13.lcd_data18 */
- AM33XX_IOPAD(0x838, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad14.lcd_data17 */
- AM33XX_IOPAD(0x83c, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad15.lcd_data16 */
- AM33XX_IOPAD(0x8a0, PIN_OUTPUT | MUX_MODE0) /* lcd_data0.lcd_data0 */
- AM33XX_IOPAD(0x8a4, PIN_OUTPUT | MUX_MODE0) /* lcd_data1.lcd_data1 */
- AM33XX_IOPAD(0x8a8, PIN_OUTPUT | MUX_MODE0) /* lcd_data2.lcd_data2 */
- AM33XX_IOPAD(0x8ac, PIN_OUTPUT | MUX_MODE0) /* lcd_data3.lcd_data3 */
- AM33XX_IOPAD(0x8b0, PIN_OUTPUT | MUX_MODE0) /* lcd_data4.lcd_data4 */
- AM33XX_IOPAD(0x8b4, PIN_OUTPUT | MUX_MODE0) /* lcd_data5.lcd_data5 */
- AM33XX_IOPAD(0x8b8, PIN_OUTPUT | MUX_MODE0) /* lcd_data6.lcd_data6 */
- AM33XX_IOPAD(0x8bc, PIN_OUTPUT | MUX_MODE0) /* lcd_data7.lcd_data7 */
- AM33XX_IOPAD(0x8c0, PIN_OUTPUT | MUX_MODE0) /* lcd_data8.lcd_data8 */
- AM33XX_IOPAD(0x8c4, PIN_OUTPUT | MUX_MODE0) /* lcd_data9.lcd_data9 */
- AM33XX_IOPAD(0x8c8, PIN_OUTPUT | MUX_MODE0) /* lcd_data10.lcd_data10 */
- AM33XX_IOPAD(0x8cc, PIN_OUTPUT | MUX_MODE0) /* lcd_data11.lcd_data11 */
- AM33XX_IOPAD(0x8d0, PIN_OUTPUT | MUX_MODE0) /* lcd_data12.lcd_data12 */
- AM33XX_IOPAD(0x8d4, PIN_OUTPUT | MUX_MODE0) /* lcd_data13.lcd_data13 */
- AM33XX_IOPAD(0x8d8, PIN_OUTPUT | MUX_MODE0) /* lcd_data14.lcd_data14 */
- AM33XX_IOPAD(0x8dc, PIN_OUTPUT | MUX_MODE0) /* lcd_data15.lcd_data15 */
- AM33XX_IOPAD(0x8e0, PIN_OUTPUT | MUX_MODE0) /* lcd_vsync.lcd_vsync */
- AM33XX_IOPAD(0x8e4, PIN_OUTPUT | MUX_MODE0) /* lcd_hsync.lcd_hsync */
- AM33XX_IOPAD(0x8e8, PIN_OUTPUT | MUX_MODE0) /* lcd_pclk.lcd_pclk */
- AM33XX_IOPAD(0x8ec, PIN_OUTPUT | MUX_MODE0) /* lcd_ac_bias_en.lcd_ac_bias_en */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD8, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad8.lcd_data23 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD9, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad9.lcd_data22 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD10, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad10.lcd_data21 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD11, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad11.lcd_data20 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD12, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad12.lcd_data19 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD13, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad13.lcd_data18 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD14, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad14.lcd_data17 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD15, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad15.lcd_data16 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA0, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA1, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA2, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA3, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA4, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA5, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA6, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA7, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA8, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA9, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA10, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA11, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA12, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA13, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA14, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA15, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_OUTPUT, MUX_MODE0)
>;
};
mcasp1_pins: mcasp1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_crs.mcasp1_aclkx */
- AM33XX_IOPAD(0x910, PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_rxerr.mcasp1_fsx */
- AM33XX_IOPAD(0x908, PIN_OUTPUT_PULLDOWN | MUX_MODE4) /* mii1_col.mcasp1_axr2 */
- AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE4) /* rmii1_ref_clk.mcasp1_axr3 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_CRS, PIN_INPUT_PULLDOWN, MUX_MODE4) /* mii1_crs.mcasp1_aclkx */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLDOWN, MUX_MODE4) /* mii1_rxerr.mcasp1_fsx */
+ AM33XX_PADCONF(AM335X_PIN_MII1_COL, PIN_OUTPUT_PULLDOWN, MUX_MODE4) /* mii1_col.mcasp1_axr2 */
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_INPUT_PULLDOWN, MUX_MODE4) /* rmii1_ref_clk.mcasp1_axr3 */
>;
};
mcasp1_pins_sleep: mcasp1_pins_sleep {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x910, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x908, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_CRS, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_COL, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
dcan1_pins_default: dcan1_pins_default {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x968, PIN_OUTPUT | MUX_MODE2) /* uart0_ctsn.d_can1_tx */
- AM33XX_IOPAD(0x96c, PIN_INPUT_PULLDOWN | MUX_MODE2) /* uart0_rtsn.d_can1_rx */
+ AM33XX_PADCONF(AM335X_PIN_UART0_CTSN, PIN_OUTPUT, MUX_MODE2) /* uart0_ctsn.d_can1_tx */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RTSN, PIN_INPUT_PULLDOWN, MUX_MODE2) /* uart0_rtsn.d_can1_rx */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 2c2d8b5b8cf5..8fc8056db94f 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -227,241 +227,241 @@
lcd_pins_default: lcd_pins_default {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x820, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad8.lcd_data23 */
- AM33XX_IOPAD(0x824, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad9.lcd_data22 */
- AM33XX_IOPAD(0x828, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad10.lcd_data21 */
- AM33XX_IOPAD(0x82c, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad11.lcd_data20 */
- AM33XX_IOPAD(0x830, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad12.lcd_data19 */
- AM33XX_IOPAD(0x834, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad13.lcd_data18 */
- AM33XX_IOPAD(0x838, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad14.lcd_data17 */
- AM33XX_IOPAD(0x83c, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad15.lcd_data16 */
- AM33XX_IOPAD(0x8a0, PIN_OUTPUT | MUX_MODE0) /* lcd_data0.lcd_data0 */
- AM33XX_IOPAD(0x8a4, PIN_OUTPUT | MUX_MODE0) /* lcd_data1.lcd_data1 */
- AM33XX_IOPAD(0x8a8, PIN_OUTPUT | MUX_MODE0) /* lcd_data2.lcd_data2 */
- AM33XX_IOPAD(0x8ac, PIN_OUTPUT | MUX_MODE0) /* lcd_data3.lcd_data3 */
- AM33XX_IOPAD(0x8b0, PIN_OUTPUT | MUX_MODE0) /* lcd_data4.lcd_data4 */
- AM33XX_IOPAD(0x8b4, PIN_OUTPUT | MUX_MODE0) /* lcd_data5.lcd_data5 */
- AM33XX_IOPAD(0x8b8, PIN_OUTPUT | MUX_MODE0) /* lcd_data6.lcd_data6 */
- AM33XX_IOPAD(0x8bc, PIN_OUTPUT | MUX_MODE0) /* lcd_data7.lcd_data7 */
- AM33XX_IOPAD(0x8c0, PIN_OUTPUT | MUX_MODE0) /* lcd_data8.lcd_data8 */
- AM33XX_IOPAD(0x8c4, PIN_OUTPUT | MUX_MODE0) /* lcd_data9.lcd_data9 */
- AM33XX_IOPAD(0x8c8, PIN_OUTPUT | MUX_MODE0) /* lcd_data10.lcd_data10 */
- AM33XX_IOPAD(0x8cc, PIN_OUTPUT | MUX_MODE0) /* lcd_data11.lcd_data11 */
- AM33XX_IOPAD(0x8d0, PIN_OUTPUT | MUX_MODE0) /* lcd_data12.lcd_data12 */
- AM33XX_IOPAD(0x8d4, PIN_OUTPUT | MUX_MODE0) /* lcd_data13.lcd_data13 */
- AM33XX_IOPAD(0x8d8, PIN_OUTPUT | MUX_MODE0) /* lcd_data14.lcd_data14 */
- AM33XX_IOPAD(0x8dc, PIN_OUTPUT | MUX_MODE0) /* lcd_data15.lcd_data15 */
- AM33XX_IOPAD(0x8e0, PIN_OUTPUT | MUX_MODE0) /* lcd_vsync.lcd_vsync */
- AM33XX_IOPAD(0x8e4, PIN_OUTPUT | MUX_MODE0) /* lcd_hsync.lcd_hsync */
- AM33XX_IOPAD(0x8e8, PIN_OUTPUT | MUX_MODE0) /* lcd_pclk.lcd_pclk */
- AM33XX_IOPAD(0x8ec, PIN_OUTPUT | MUX_MODE0) /* lcd_ac_bias_en.lcd_ac_bias_en */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD8, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad8.lcd_data23 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD9, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad9.lcd_data22 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD10, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad10.lcd_data21 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD11, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad11.lcd_data20 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD12, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad12.lcd_data19 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD13, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad13.lcd_data18 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD14, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad14.lcd_data17 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD15, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad15.lcd_data16 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA0, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA1, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA2, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA3, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA4, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA5, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA6, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA7, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA8, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA9, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA10, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA11, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA12, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA13, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA14, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA15, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_OUTPUT, MUX_MODE0)
>;
};
lcd_pins_sleep: lcd_pins_sleep {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x820, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad8.lcd_data23 */
- AM33XX_IOPAD(0x824, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad9.lcd_data22 */
- AM33XX_IOPAD(0x828, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad10.lcd_data21 */
- AM33XX_IOPAD(0x82c, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad11.lcd_data20 */
- AM33XX_IOPAD(0x830, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad12.lcd_data19 */
- AM33XX_IOPAD(0x834, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad13.lcd_data18 */
- AM33XX_IOPAD(0x838, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad14.lcd_data17 */
- AM33XX_IOPAD(0x83c, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad15.lcd_data16 */
- AM33XX_IOPAD(0x8a0, PULL_DISABLE | MUX_MODE7) /* lcd_data0.lcd_data0 */
- AM33XX_IOPAD(0x8a4, PULL_DISABLE | MUX_MODE7) /* lcd_data1.lcd_data1 */
- AM33XX_IOPAD(0x8a8, PULL_DISABLE | MUX_MODE7) /* lcd_data2.lcd_data2 */
- AM33XX_IOPAD(0x8ac, PULL_DISABLE | MUX_MODE7) /* lcd_data3.lcd_data3 */
- AM33XX_IOPAD(0x8b0, PULL_DISABLE | MUX_MODE7) /* lcd_data4.lcd_data4 */
- AM33XX_IOPAD(0x8b4, PULL_DISABLE | MUX_MODE7) /* lcd_data5.lcd_data5 */
- AM33XX_IOPAD(0x8b8, PULL_DISABLE | MUX_MODE7) /* lcd_data6.lcd_data6 */
- AM33XX_IOPAD(0x8bc, PULL_DISABLE | MUX_MODE7) /* lcd_data7.lcd_data7 */
- AM33XX_IOPAD(0x8c0, PULL_DISABLE | MUX_MODE7) /* lcd_data8.lcd_data8 */
- AM33XX_IOPAD(0x8c4, PULL_DISABLE | MUX_MODE7) /* lcd_data9.lcd_data9 */
- AM33XX_IOPAD(0x8c8, PULL_DISABLE | MUX_MODE7) /* lcd_data10.lcd_data10 */
- AM33XX_IOPAD(0x8cc, PULL_DISABLE | MUX_MODE7) /* lcd_data11.lcd_data11 */
- AM33XX_IOPAD(0x8d0, PULL_DISABLE | MUX_MODE7) /* lcd_data12.lcd_data12 */
- AM33XX_IOPAD(0x8d4, PULL_DISABLE | MUX_MODE7) /* lcd_data13.lcd_data13 */
- AM33XX_IOPAD(0x8d8, PULL_DISABLE | MUX_MODE7) /* lcd_data14.lcd_data14 */
- AM33XX_IOPAD(0x8dc, PULL_DISABLE | MUX_MODE7) /* lcd_data15.lcd_data15 */
- AM33XX_IOPAD(0x8e0, PIN_INPUT_PULLDOWN | MUX_MODE7) /* lcd_vsync.lcd_vsync */
- AM33XX_IOPAD(0x8e4, PIN_INPUT_PULLDOWN | MUX_MODE7) /* lcd_hsync.lcd_hsync */
- AM33XX_IOPAD(0x8e8, PIN_INPUT_PULLDOWN | MUX_MODE7) /* lcd_pclk.lcd_pclk */
- AM33XX_IOPAD(0x8ec, PIN_INPUT_PULLDOWN | MUX_MODE7) /* lcd_ac_bias_en.lcd_ac_bias_en */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD8, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad8.lcd_data23 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD9, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad9.lcd_data22 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD10, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad10.lcd_data21 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD11, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad11.lcd_data20 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD12, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad12.lcd_data19 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD13, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad13.lcd_data18 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD14, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad14.lcd_data17 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD15, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad15.lcd_data16 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA0, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA1, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA2, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA3, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA4, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA5, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA6, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA7, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA8, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA9, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA10, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA11, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA12, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA13, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA14, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA15, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
user_leds_s0: user_leds_s0 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x810, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad4.gpio1_4 */
- AM33XX_IOPAD(0x814, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad5.gpio1_5 */
- AM33XX_IOPAD(0x818, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad6.gpio1_6 */
- AM33XX_IOPAD(0x81c, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad7.gpio1_7 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD4, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad4.gpio1_4 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD5, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad5.gpio1_5 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD6, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad6.gpio1_6 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD7, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad7.gpio1_7 */
>;
};
gpio_keys_s0: gpio_keys_s0 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x894, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_oen_ren.gpio2_3 */
- AM33XX_IOPAD(0x890, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_advn_ale.gpio2_2 */
- AM33XX_IOPAD(0x870, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_wait0.gpio0_30 */
- AM33XX_IOPAD(0x89c, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ben0_cle.gpio2_5 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_OEN_REN, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_oen_ren.gpio2_3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_ADVN_ALE, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_advn_ale.gpio2_2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_wait0.gpio0_30 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_BEN0_CLE, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_ben0_cle.gpio2_5 */
>;
};
i2c0_pins: pinmux_i2c0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x988, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */
- AM33XX_IOPAD(0x98c, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_INPUT_PULLUP, MUX_MODE0)
>;
};
uart0_pins: pinmux_uart0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
- AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
clkout2_pin: pinmux_clkout2_pin {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x9b4, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr1.clkout2 */
+ AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR1, PIN_OUTPUT_PULLDOWN, MUX_MODE3) /* xdma_event_intr1.clkout2 */
>;
};
ecap2_pins: backlight_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x99c, MUX_MODE4) /* mcasp0_ahclkr.ecap2_in_pwm2_out */
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKR, 0x0, MUX_MODE4) /* mcasp0_ahclkr.ecap2_in_pwm2_out */
>;
};
cpsw_default: cpsw_default {
pinctrl-single,pins = <
/* Slave 1 */
- AM33XX_IOPAD(0x914, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txen.rgmii1_tctl */
- AM33XX_IOPAD(0x918, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxdv.rgmii1_rctl */
- AM33XX_IOPAD(0x91c, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd3.rgmii1_td3 */
- AM33XX_IOPAD(0x920, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd2.rgmii1_td2 */
- AM33XX_IOPAD(0x924, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd1.rgmii1_td1 */
- AM33XX_IOPAD(0x928, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd0.rgmii1_td0 */
- AM33XX_IOPAD(0x92c, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txclk.rgmii1_tclk */
- AM33XX_IOPAD(0x930, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxclk.rgmii1_rclk */
- AM33XX_IOPAD(0x934, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd3.rgmii1_rd3 */
- AM33XX_IOPAD(0x938, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd2.rgmii1_rd2 */
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd1.rgmii1_rd1 */
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd0.rgmii1_rd0 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txen.rgmii1_tctl */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxdv.rgmii1_rctl */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txd3.rgmii1_td3 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txd2.rgmii1_td2 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txd1.rgmii1_td1 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txd0.rgmii1_td0 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txclk.rgmii1_tclk */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxclk.rgmii1_rclk */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxd3.rgmii1_rd3 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxd2.rgmii1_rd2 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxd1.rgmii1_rd1 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxd0.rgmii1_rd0 */
/* Slave 2 */
- AM33XX_IOPAD(0x840, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a0.rgmii2_tctl */
- AM33XX_IOPAD(0x844, PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a1.rgmii2_rctl */
- AM33XX_IOPAD(0x848, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a2.rgmii2_td3 */
- AM33XX_IOPAD(0x84c, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a3.rgmii2_td2 */
- AM33XX_IOPAD(0x850, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a4.rgmii2_td1 */
- AM33XX_IOPAD(0x854, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a5.rgmii2_td0 */
- AM33XX_IOPAD(0x858, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a6.rgmii2_tclk */
- AM33XX_IOPAD(0x85c, PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a7.rgmii2_rclk */
- AM33XX_IOPAD(0x860, PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a8.rgmii2_rd3 */
- AM33XX_IOPAD(0x864, PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a9.rgmii2_rd2 */
- AM33XX_IOPAD(0x868, PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a10.rgmii2_rd1 */
- AM33XX_IOPAD(0x86c, PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a11.rgmii2_rd0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A0, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* gpmc_a0.rgmii2_tctl */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A1, PIN_INPUT_PULLDOWN, MUX_MODE2) /* gpmc_a1.rgmii2_rctl */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A2, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* gpmc_a2.rgmii2_td3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A3, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* gpmc_a3.rgmii2_td2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A4, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* gpmc_a4.rgmii2_td1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* gpmc_a5.rgmii2_td0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A6, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* gpmc_a6.rgmii2_tclk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A7, PIN_INPUT_PULLDOWN, MUX_MODE2) /* gpmc_a7.rgmii2_rclk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A8, PIN_INPUT_PULLDOWN, MUX_MODE2) /* gpmc_a8.rgmii2_rd3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A9, PIN_INPUT_PULLDOWN, MUX_MODE2) /* gpmc_a9.rgmii2_rd2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A10, PIN_INPUT_PULLDOWN, MUX_MODE2) /* gpmc_a10.rgmii2_rd1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A11, PIN_INPUT_PULLDOWN, MUX_MODE2) /* gpmc_a11.rgmii2_rd0 */
>;
};
cpsw_sleep: cpsw_sleep {
pinctrl-single,pins = <
/* Slave 1 reset value */
- AM33XX_IOPAD(0x914, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x918, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x91c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x920, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x924, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x928, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x92c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x930, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x934, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x938, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
/* Slave 2 reset value*/
- AM33XX_IOPAD(0x840, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x844, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x848, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x84c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x850, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x854, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x858, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x85c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x860, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x864, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x868, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x86c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A4, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A6, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A7, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A8, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A9, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A10, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A11, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
davinci_mdio_default: davinci_mdio_default {
pinctrl-single,pins = <
/* MDIO */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
- AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLUP | SLEWCTRL_FAST, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLUP, MUX_MODE0)
>;
};
davinci_mdio_sleep: davinci_mdio_sleep {
pinctrl-single,pins = <
/* MDIO reset value */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x94c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
mmc1_pins: pinmux_mmc1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE7) /* spi0_cs1.gpio0_6 */
- AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat0.mmc0_dat0 */
- AM33XX_IOPAD(0x8f8, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat1.mmc0_dat1 */
- AM33XX_IOPAD(0x8f4, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat2.mmc0_dat2 */
- AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat3.mmc0_dat3 */
- AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_cmd.mmc0_cmd */
- AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_clk.mmc0_clk */
- AM33XX_IOPAD(0x9a0, PIN_INPUT | MUX_MODE4) /* mcasp0_aclkr.mmc0_sdwp */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS1, PIN_INPUT, MUX_MODE7) /* spi0_cs1.gpio0_6 */
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT2, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKR, PIN_INPUT, MUX_MODE4) /* mcasp0_aclkr.mmc0_sdwp */
>;
};
mcasp1_pins: mcasp1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_crs.mcasp1_aclkx */
- AM33XX_IOPAD(0x910, PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_rxerr.mcasp1_fsx */
- AM33XX_IOPAD(0x908, PIN_OUTPUT_PULLDOWN | MUX_MODE4) /* mii1_col.mcasp1_axr2 */
- AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE4) /* rmii1_ref_clk.mcasp1_axr3 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_CRS, PIN_INPUT_PULLDOWN, MUX_MODE4) /* mii1_crs.mcasp1_aclkx */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLDOWN, MUX_MODE4) /* mii1_rxerr.mcasp1_fsx */
+ AM33XX_PADCONF(AM335X_PIN_MII1_COL, PIN_OUTPUT_PULLDOWN, MUX_MODE4) /* mii1_col.mcasp1_axr2 */
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_INPUT_PULLDOWN, MUX_MODE4) /* rmii1_ref_clk.mcasp1_axr3 */
>;
};
mcasp1_pins_sleep: mcasp1_pins_sleep {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x910, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x908, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_CRS, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_COL, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
mmc2_pins: pinmux_mmc2_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x874, PIN_INPUT_PULLUP | MUX_MODE7) /* gpmc_wpn.gpio0_31 */
- AM33XX_IOPAD(0x880, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn1.mmc1_clk */
- AM33XX_IOPAD(0x884, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn2.mmc1_cmd */
- AM33XX_IOPAD(0x800, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad0.mmc1_dat0 */
- AM33XX_IOPAD(0x804, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad1.mmc1_dat1 */
- AM33XX_IOPAD(0x808, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad2.mmc1_dat2 */
- AM33XX_IOPAD(0x80c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad3.mmc1_dat3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WPN, PIN_INPUT_PULLUP, MUX_MODE7) /* gpmc_wpn.gpio0_31 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN1, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_csn1.mmc1_clk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN2, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_csn2.mmc1_cmd */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD0, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad0.mmc1_dat0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD1, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad1.mmc1_dat1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD2, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad2.mmc1_dat2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD3, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad3.mmc1_dat3 */
>;
};
wl12xx_gpio: pinmux_wl12xx_gpio {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x87c, PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_csn0.gpio1_29 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN0, PIN_OUTPUT_PULLUP, MUX_MODE7) /* gpmc_csn0.gpio1_29 */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-icev2.dts b/arch/arm/boot/dts/am335x-icev2.dts
index 9ac775c71072..4365684fa66f 100644
--- a/arch/arm/boot/dts/am335x-icev2.dts
+++ b/arch/arm/boot/dts/am335x-icev2.dts
@@ -157,111 +157,111 @@
&am33xx_pinmux {
user_leds: user_leds {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x91c, PIN_OUTPUT | MUX_MODE7) /* (J18) gmii1_txd3.gpio0[16] */
- AM33XX_IOPAD(0x920, PIN_OUTPUT | MUX_MODE7) /* (K15) gmii1_txd2.gpio0[17] */
- AM33XX_IOPAD(0x9b0, PIN_OUTPUT | MUX_MODE7) /* (A15) xdma_event_intr0.gpio0[19] */
- AM33XX_IOPAD(0x9b4, PIN_OUTPUT | MUX_MODE7) /* (D14) xdma_event_intr1.gpio0[20] */
- AM33XX_IOPAD(0x880, PIN_OUTPUT | MUX_MODE7) /* (U9) gpmc_csn1.gpio1[30] */
- AM33XX_IOPAD(0x92c, PIN_OUTPUT | MUX_MODE7) /* (K18) gmii1_txclk.gpio3[9] */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_OUTPUT, MUX_MODE7) /* (J18) gmii1_txd3.gpio0[16] */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_OUTPUT, MUX_MODE7) /* (K15) gmii1_txd2.gpio0[17] */
+ AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR0, PIN_OUTPUT, MUX_MODE7) /* (A15) xdma_event_intr0.gpio0[19] */
+ AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR1, PIN_OUTPUT, MUX_MODE7) /* (D14) xdma_event_intr1.gpio0[20] */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN1, PIN_OUTPUT, MUX_MODE7) /* (U9) gpmc_csn1.gpio1[30] */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_OUTPUT, MUX_MODE7) /* (K18) gmii1_txclk.gpio3[9] */
>;
};
mmc0_pins_default: mmc0_pins_default {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) /* (F17) mmc0_dat3.mmc0_dat3 */
- AM33XX_IOPAD(0x8f4, PIN_INPUT_PULLUP | MUX_MODE0) /* (F18) mmc0_dat2.mmc0_dat2 */
- AM33XX_IOPAD(0x8f8, PIN_INPUT_PULLUP | MUX_MODE0) /* (G15) mmc0_dat1.mmc0_dat1 */
- AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* (G16) mmc0_dat0.mmc0_dat0 */
- AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* (G17) mmc0_clk.mmc0_clk */
- AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* (G18) mmc0_cmd.mmc0_cmd */
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT2, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0)
>;
};
i2c0_pins_default: i2c0_pins_default {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x988, PIN_INPUT | MUX_MODE0) /* (C17) I2C0_SDA.I2C0_SDA */
- AM33XX_IOPAD(0x98c, PIN_INPUT | MUX_MODE0) /* (C16) I2C0_SCL.I2C0_SCL */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_INPUT, MUX_MODE0)
>;
};
spi0_pins_default: spi0_pins_default {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x950, PIN_INPUT_PULLUP | MUX_MODE0) /* (A17) spi0_sclk.spi0_sclk */
- AM33XX_IOPAD(0x954, PIN_INPUT_PULLUP | MUX_MODE0) /* (B17) spi0_d0.spi0_d0 */
- AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE0) /* (B16) spi0_d1.spi0_d1 */
- AM33XX_IOPAD(0x95c, PIN_INPUT_PULLUP | MUX_MODE0) /* (A16) spi0_cs0.spi0_cs0 */
- AM33XX_IOPAD(0x960, PIN_INPUT_PULLUP | MUX_MODE0) /* (C15) spi0_cs1.spi0_cs1 */
- AM33XX_IOPAD(0x9a0, PIN_INPUT_PULLUP | MUX_MODE7) /* (B12) mcasp0_aclkr.gpio3[18] */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_SCLK, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKR, PIN_INPUT_PULLUP, MUX_MODE7) /* (B12) mcasp0_aclkr.gpio3[18] */
>;
};
uart3_pins_default: uart3_pins_default {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x934, PIN_INPUT_PULLUP | MUX_MODE1) /* (L17) gmii1_rxd3.uart3_rxd */
- AM33XX_IOPAD(0x938, PIN_OUTPUT_PULLUP | MUX_MODE1) /* (L16) gmii1_rxd2.uart3_txd */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLUP, MUX_MODE1) /* (L17) gmii1_rxd3.uart3_rxd */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_OUTPUT_PULLUP, MUX_MODE1) /* (L16) gmii1_rxd2.uart3_txd */
>;
};
cpsw_default: cpsw_default {
pinctrl-single,pins = <
/* Slave 1, RMII mode */
- AM33XX_IOPAD(0x90c, (PIN_INPUT_PULLUP | MUX_MODE1)) /* mii1_crs.rmii1_crs_dv */
- AM33XX_IOPAD(0x944, (PIN_INPUT_PULLUP | MUX_MODE0)) /* rmii1_refclk.rmii1_refclk */
- AM33XX_IOPAD(0x940, (PIN_INPUT_PULLUP | MUX_MODE1)) /* mii1_rxd0.rmii1_rxd0 */
- AM33XX_IOPAD(0x93c, (PIN_INPUT_PULLUP | MUX_MODE1)) /* mii1_rxd1.rmii1_rxd1 */
- AM33XX_IOPAD(0x910, (PIN_INPUT_PULLUP | MUX_MODE1)) /* mii1_rxerr.rmii1_rxerr */
- AM33XX_IOPAD(0x928, (PIN_OUTPUT_PULLDOWN | MUX_MODE1)) /* mii1_txd0.rmii1_txd0 */
- AM33XX_IOPAD(0x924, (PIN_OUTPUT_PULLDOWN | MUX_MODE1)) /* mii1_txd1.rmii1_txd1 */
- AM33XX_IOPAD(0x914, (PIN_OUTPUT_PULLDOWN | MUX_MODE1)) /* mii1_txen.rmii1_txen */
+ AM33XX_PADCONF(AM335X_PIN_MII1_CRS, PIN_INPUT_PULLUP, MUX_MODE1) /* mii1_crs.rmii1_crs_dv */
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLUP, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLUP, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLUP, MUX_MODE1) /* mii1_rxerr.rmii1_rxerr */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_OUTPUT_PULLDOWN, MUX_MODE1) /* mii1_txd0.rmii1_txd0 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_OUTPUT_PULLDOWN, MUX_MODE1) /* mii1_txd1.rmii1_txd1 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE1) /* mii1_txen.rmii1_txen */
/* Slave 2, RMII mode */
- AM33XX_IOPAD(0x870, (PIN_INPUT_PULLUP | MUX_MODE3)) /* gpmc_wait0.rmii2_crs_dv */
- AM33XX_IOPAD(0x908, (PIN_INPUT_PULLUP | MUX_MODE1)) /* mii1_col.rmii2_refclk */
- AM33XX_IOPAD(0x86c, (PIN_INPUT_PULLUP | MUX_MODE3)) /* gpmc_a11.rmii2_rxd0 */
- AM33XX_IOPAD(0x868, (PIN_INPUT_PULLUP | MUX_MODE3)) /* gpmc_a10.rmii2_rxd1 */
- AM33XX_IOPAD(0x874, (PIN_INPUT_PULLUP | MUX_MODE3)) /* gpmc_wpn.rmii2_rxerr */
- AM33XX_IOPAD(0x854, (PIN_OUTPUT_PULLDOWN | MUX_MODE3)) /* gpmc_a5.rmii2_txd0 */
- AM33XX_IOPAD(0x850, (PIN_OUTPUT_PULLDOWN | MUX_MODE3)) /* gpmc_a4.rmii2_txd1 */
- AM33XX_IOPAD(0x840, (PIN_OUTPUT_PULLDOWN | MUX_MODE3)) /* gpmc_a0.rmii2_txen */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_wait0.rmii2_crs_dv */
+ AM33XX_PADCONF(AM335X_PIN_MII1_COL, PIN_INPUT_PULLUP, MUX_MODE1) /* mii1_col.rmii2_refclk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A11, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_a11.rmii2_rxd0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A10, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_a10.rmii2_rxd1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WPN, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_wpn.rmii2_rxerr */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_OUTPUT_PULLDOWN, MUX_MODE3) /* gpmc_a5.rmii2_txd0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A4, PIN_OUTPUT_PULLDOWN, MUX_MODE3) /* gpmc_a4.rmii2_txd1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A0, PIN_OUTPUT_PULLDOWN, MUX_MODE3) /* gpmc_a0.rmii2_txen */
>;
};
cpsw_sleep: cpsw_sleep {
pinctrl-single,pins = <
/* Slave 1 reset value */
- AM33XX_IOPAD(0x90c, (PIN_INPUT_PULLDOWN | MUX_MODE7))
- AM33XX_IOPAD(0x944, (PIN_INPUT_PULLDOWN | MUX_MODE7))
- AM33XX_IOPAD(0x940, (PIN_INPUT_PULLDOWN | MUX_MODE7))
- AM33XX_IOPAD(0x93c, (PIN_INPUT_PULLDOWN | MUX_MODE7))
- AM33XX_IOPAD(0x910, (PIN_INPUT_PULLDOWN | MUX_MODE7))
- AM33XX_IOPAD(0x928, (PIN_INPUT_PULLDOWN | MUX_MODE7))
- AM33XX_IOPAD(0x924, (PIN_INPUT_PULLDOWN | MUX_MODE7))
- AM33XX_IOPAD(0x914, (PIN_INPUT_PULLDOWN | MUX_MODE7))
+ AM33XX_PADCONF(AM335X_PIN_MII1_CRS, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_INPUT_PULLDOWN, MUX_MODE7)
/* Slave 2 reset value */
- AM33XX_IOPAD(0x870, (PIN_INPUT_PULLDOWN | MUX_MODE7))
- AM33XX_IOPAD(0x908, (PIN_INPUT_PULLDOWN | MUX_MODE7))
- AM33XX_IOPAD(0x86c, (PIN_INPUT_PULLDOWN | MUX_MODE7))
- AM33XX_IOPAD(0x868, (PIN_INPUT_PULLDOWN | MUX_MODE7))
- AM33XX_IOPAD(0x874, (PIN_INPUT_PULLDOWN | MUX_MODE7))
- AM33XX_IOPAD(0x854, (PIN_INPUT_PULLDOWN | MUX_MODE7))
- AM33XX_IOPAD(0x850, (PIN_INPUT_PULLDOWN | MUX_MODE7))
- AM33XX_IOPAD(0x840, (PIN_INPUT_PULLDOWN | MUX_MODE7))
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_COL, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A11, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A10, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WPN, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A4, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A0, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
davinci_mdio_default: davinci_mdio_default {
pinctrl-single,pins = <
/* MDIO */
- AM33XX_IOPAD(0x948, (PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0)) /* mdio_data.mdio_data */
- AM33XX_IOPAD(0x94c, (PIN_OUTPUT_PULLUP | MUX_MODE0)) /* mdio_clk.mdio_clk */
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLUP | SLEWCTRL_FAST, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLUP, MUX_MODE0)
>;
};
davinci_mdio_sleep: davinci_mdio_sleep {
pinctrl-single,pins = <
/* MDIO reset value */
- AM33XX_IOPAD(0x948, (PIN_INPUT_PULLDOWN | MUX_MODE7))
- AM33XX_IOPAD(0x94c, (PIN_INPUT_PULLDOWN | MUX_MODE7))
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-igep0033.dtsi b/arch/arm/boot/dts/am335x-igep0033.dtsi
index cbd22f25de95..312deb6cf6a2 100644
--- a/arch/arm/boot/dts/am335x-igep0033.dtsi
+++ b/arch/arm/boot/dts/am335x-igep0033.dtsi
@@ -57,41 +57,41 @@
&am33xx_pinmux {
i2c0_pins: pinmux_i2c0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x988, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */
- AM33XX_IOPAD(0x98c, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_INPUT_PULLUP, MUX_MODE0)
>;
};
nandflash_pins: pinmux_nandflash_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x800, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad0.gpmc_ad0 */
- AM33XX_IOPAD(0x804, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad1.gpmc_ad1 */
- AM33XX_IOPAD(0x808, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad2.gpmc_ad2 */
- AM33XX_IOPAD(0x80c, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad3.gpmc_ad3 */
- AM33XX_IOPAD(0x810, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad4.gpmc_ad4 */
- AM33XX_IOPAD(0x814, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad5.gpmc_ad5 */
- AM33XX_IOPAD(0x818, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad6.gpmc_ad6 */
- AM33XX_IOPAD(0x81c, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad7.gpmc_ad7 */
- AM33XX_IOPAD(0x870, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_wait0.gpmc_wait0 */
- AM33XX_IOPAD(0x874, PIN_INPUT_PULLUP | MUX_MODE7) /* gpmc_wpn.gpio0_30 */
- AM33XX_IOPAD(0x87c, PIN_OUTPUT | MUX_MODE0) /* gpmc_csn0.gpmc_csn0 */
- AM33XX_IOPAD(0x890, PIN_OUTPUT | MUX_MODE0) /* gpmc_advn_ale.gpmc_advn_ale */
- AM33XX_IOPAD(0x894, PIN_OUTPUT | MUX_MODE0) /* gpmc_oen_ren.gpmc_oen_ren */
- AM33XX_IOPAD(0x898, PIN_OUTPUT | MUX_MODE0) /* gpmc_wen.gpmc_wen */
- AM33XX_IOPAD(0x89c, PIN_OUTPUT | MUX_MODE0) /* gpmc_be0n_cle.gpmc_be0n_cle */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD2, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD3, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD4, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD5, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD6, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD7, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WPN, PIN_INPUT_PULLUP, MUX_MODE7) /* gpmc_wpn.gpio0_30 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN0, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_ADVN_ALE, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_OEN_REN, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WEN, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_BEN0_CLE, PIN_OUTPUT, MUX_MODE0)
>;
};
uart0_pins: pinmux_uart0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
- AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
leds_pins: pinmux_leds_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x85c, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a7.gpio1_23 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A7, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_a7.gpio1_23 */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-lxm.dts b/arch/arm/boot/dts/am335x-lxm.dts
index d0e8e720a4d6..aa4cd2b8d4b6 100644
--- a/arch/arm/boot/dts/am335x-lxm.dts
+++ b/arch/arm/boot/dts/am335x-lxm.dts
@@ -46,109 +46,109 @@
&am33xx_pinmux {
mmc1_pins: pinmux_mmc1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat3 */
- AM33XX_IOPAD(0x8f4, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat2 */
- AM33XX_IOPAD(0x8f8, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat1 */
- AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat0 */
- AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_clk */
- AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_cmd */
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT2, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0)
>;
};
i2c0_pins: pinmux_i2c0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x988, PIN_INPUT | MUX_MODE0) /* i2c0_sda.i2c0_sda */
- AM33XX_IOPAD(0x98c, PIN_INPUT | MUX_MODE0) /* i2c0_scl.i2c0_scl */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_INPUT, MUX_MODE0)
>;
};
cpsw_default: cpsw_default {
pinctrl-single,pins = <
/* Slave 1 */
- AM33XX_IOPAD(0x864, PIN_INPUT_PULLDOWN | MUX_MODE7) /* rmii1_int */
- AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE1) /* rmii1_crs_dv */
- AM33XX_IOPAD(0x910, PIN_INPUT_PULLDOWN | MUX_MODE1) /* rmii1_rxer */
- AM33XX_IOPAD(0x914, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* rmii1_txen */
- AM33XX_IOPAD(0x924, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* rmii1_td1 */
- AM33XX_IOPAD(0x928, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* rmii1_td0 */
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE1) /* rmii1_rd1 */
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE1) /* rmii1_rd0 */
- AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii1_refclk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A9, PIN_INPUT_PULLDOWN, MUX_MODE7) /* rmii1_int */
+ AM33XX_PADCONF(AM335X_PIN_MII1_CRS, PIN_INPUT_PULLDOWN, MUX_MODE1) /* rmii1_crs_dv */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLDOWN, MUX_MODE1) /* rmii1_rxer */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE1) /* rmii1_txen */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_OUTPUT_PULLDOWN, MUX_MODE1) /* rmii1_td1 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_OUTPUT_PULLDOWN, MUX_MODE1) /* rmii1_td0 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE1) /* rmii1_rd1 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE1) /* rmii1_rd0 */
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_INPUT_PULLDOWN, MUX_MODE0)
/* Slave 2 */
- AM33XX_IOPAD(0x840, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* rmii2_txen */
- AM33XX_IOPAD(0x850, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* rmii2_td1 */
- AM33XX_IOPAD(0x854, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* rmii2_td0 */
- AM33XX_IOPAD(0x868, PIN_INPUT_PULLDOWN | MUX_MODE3) /* rmii2_rd1 */
- AM33XX_IOPAD(0x86c, PIN_INPUT_PULLDOWN | MUX_MODE3) /* rmii2_rd0 */
- AM33XX_IOPAD(0x870, PIN_INPUT_PULLDOWN | MUX_MODE3) /* rmii2_crs_dv */
- AM33XX_IOPAD(0x874, PIN_INPUT_PULLDOWN | MUX_MODE3) /* rmii2_rxer */
- AM33XX_IOPAD(0x878, PIN_INPUT_PULLDOWN | MUX_MODE7) /* rmii2_int */
- AM33XX_IOPAD(0x908, PIN_INPUT_PULLDOWN | MUX_MODE1) /* rmii2_refclk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A0, PIN_OUTPUT_PULLDOWN, MUX_MODE3) /* rmii2_txen */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A4, PIN_OUTPUT_PULLDOWN, MUX_MODE3) /* rmii2_td1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_OUTPUT_PULLDOWN, MUX_MODE3) /* rmii2_td0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A10, PIN_INPUT_PULLDOWN, MUX_MODE3) /* rmii2_rd1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A11, PIN_INPUT_PULLDOWN, MUX_MODE3) /* rmii2_rd0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT_PULLDOWN, MUX_MODE3) /* rmii2_crs_dv */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WPN, PIN_INPUT_PULLDOWN, MUX_MODE3) /* rmii2_rxer */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_BEN1, PIN_INPUT_PULLDOWN, MUX_MODE7) /* rmii2_int */
+ AM33XX_PADCONF(AM335X_PIN_MII1_COL, PIN_INPUT_PULLDOWN, MUX_MODE1) /* rmii2_refclk */
>;
};
cpsw_sleep: cpsw_sleep {
pinctrl-single,pins = <
/* Slave 1 reset value */
- AM33XX_IOPAD(0x864, PIN_INPUT_PULLDOWN | MUX_MODE7) /* rmii1_int */
- AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE7) /* rmii1_crs_dv */
- AM33XX_IOPAD(0x910, PIN_INPUT_PULLDOWN | MUX_MODE7) /* rmii1_rxer */
- AM33XX_IOPAD(0x914, PIN_INPUT_PULLDOWN | MUX_MODE7) /* rmii1_txen */
- AM33XX_IOPAD(0x924, PIN_INPUT_PULLDOWN | MUX_MODE7) /* rmii1_td1 */
- AM33XX_IOPAD(0x928, PIN_INPUT_PULLDOWN | MUX_MODE7) /* rmii1_td0 */
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE7) /* rmii1_rd1 */
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE7) /* rmii1_rd0 */
- AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE7) /* rmii1_refclk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A9, PIN_INPUT_PULLDOWN, MUX_MODE7) /* rmii1_int */
+ AM33XX_PADCONF(AM335X_PIN_MII1_CRS, PIN_INPUT_PULLDOWN, MUX_MODE7) /* rmii1_crs_dv */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLDOWN, MUX_MODE7) /* rmii1_rxer */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_INPUT_PULLDOWN, MUX_MODE7) /* rmii1_txen */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_INPUT_PULLDOWN, MUX_MODE7) /* rmii1_td1 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_INPUT_PULLDOWN, MUX_MODE7) /* rmii1_td0 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE7) /* rmii1_rd1 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE7) /* rmii1_rd0 */
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7) /* rmii1_refclk */
/* Slave 2 reset value*/
- AM33XX_IOPAD(0x840, PIN_INPUT_PULLDOWN | MUX_MODE7) /* rmii2_txen */
- AM33XX_IOPAD(0x850, PIN_INPUT_PULLDOWN | MUX_MODE7) /* rmii2_td1 */
- AM33XX_IOPAD(0x854, PIN_INPUT_PULLDOWN | MUX_MODE7) /* rmii2_td0 */
- AM33XX_IOPAD(0x868, PIN_INPUT_PULLDOWN | MUX_MODE7) /* rmii2_rd1 */
- AM33XX_IOPAD(0x86c, PIN_INPUT_PULLDOWN | MUX_MODE7) /* rmii2_rd0 */
- AM33XX_IOPAD(0x870, PIN_INPUT_PULLDOWN | MUX_MODE7) /* rmii2_crs_dv */
- AM33XX_IOPAD(0x874, PIN_INPUT_PULLDOWN | MUX_MODE7) /* rmii2_rxer */
- AM33XX_IOPAD(0x878, PIN_INPUT_PULLDOWN | MUX_MODE7) /* rmii2_int */
- AM33XX_IOPAD(0x908, PIN_INPUT_PULLDOWN | MUX_MODE7) /* rmii2_refclk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A0, PIN_INPUT_PULLDOWN, MUX_MODE7) /* rmii2_txen */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A4, PIN_INPUT_PULLDOWN, MUX_MODE7) /* rmii2_td1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_INPUT_PULLDOWN, MUX_MODE7) /* rmii2_td0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A10, PIN_INPUT_PULLDOWN, MUX_MODE7) /* rmii2_rd1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A11, PIN_INPUT_PULLDOWN, MUX_MODE7) /* rmii2_rd0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT_PULLDOWN, MUX_MODE7) /* rmii2_crs_dv */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WPN, PIN_INPUT_PULLDOWN, MUX_MODE7) /* rmii2_rxer */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_BEN1, PIN_INPUT_PULLDOWN, MUX_MODE7) /* rmii2_int */
+ AM33XX_PADCONF(AM335X_PIN_MII1_COL, PIN_INPUT_PULLDOWN, MUX_MODE7) /* rmii2_refclk */
>;
};
davinci_mdio_default: davinci_mdio_default {
pinctrl-single,pins = <
/* MDIO */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
- AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLUP | SLEWCTRL_FAST, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLUP, MUX_MODE0)
>;
};
davinci_mdio_sleep: davinci_mdio_sleep {
pinctrl-single,pins = <
/* MDIO reset value */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x94c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
emmc_pins: pinmux_emmc_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x880, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn1.mmc1_clk */
- AM33XX_IOPAD(0x884, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn2.mmc1_cmd */
- AM33XX_IOPAD(0x800, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad0.mmc1_dat0 */
- AM33XX_IOPAD(0x804, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad1.mmc1_dat1 */
- AM33XX_IOPAD(0x808, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad2.mmc1_dat2 */
- AM33XX_IOPAD(0x80c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad3.mmc1_dat3 */
- AM33XX_IOPAD(0x810, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad4.mmc1_dat4 */
- AM33XX_IOPAD(0x814, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad5.mmc1_dat5 */
- AM33XX_IOPAD(0x818, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad6.mmc1_dat6 */
- AM33XX_IOPAD(0x81c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad7.mmc1_dat7 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN1, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_csn1.mmc1_clk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN2, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_csn2.mmc1_cmd */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD0, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad0.mmc1_dat0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD1, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad1.mmc1_dat1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD2, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad2.mmc1_dat2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD3, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad3.mmc1_dat3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD4, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad4.mmc1_dat4 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD5, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad5.mmc1_dat5 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD6, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad6.mmc1_dat6 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD7, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad7.mmc1_dat7 */
>;
};
uart0_pins: pinmux_uart0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
- AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi b/arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi
index cb5913a69837..671d4a5da9c4 100644
--- a/arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi
+++ b/arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi
@@ -33,54 +33,54 @@
i2c0_pins: pinmux_i2c0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x988, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */
- AM33XX_IOPAD(0x98c, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_INPUT_PULLUP, MUX_MODE0)
>;
};
push_button_pins: pinmux_push_button {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x8e4, PIN_INPUT_PULLDOWN | MUX_MODE7) /* lcd_hsync.gpio2_23 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_INPUT_PULLDOWN, MUX_MODE7) /* lcd_hsync.gpio2_23 */
>;
};
uart0_pins: pinmux_uart0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
- AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
davinci_mdio_default: davinci_mdio_default {
pinctrl-single,pins = <
/* MDIO */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
- AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLUP | SLEWCTRL_FAST, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLUP, MUX_MODE0)
>;
};
mmc1_pins_default: pinmux_mmc1_pins {
pinctrl-single,pins = <
/* eMMC */
- AM33XX_IOPAD(0x800, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad12.mmc1_dat0 */
- AM33XX_IOPAD(0x804, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad13.mmc1_dat1 */
- AM33XX_IOPAD(0x808, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad14.mmc1_dat2 */
- AM33XX_IOPAD(0x80c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad15.mmc1_dat3 */
- AM33XX_IOPAD(0x810, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad8.mmc1_dat4 */
- AM33XX_IOPAD(0x814, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad9.mmc1_dat5 */
- AM33XX_IOPAD(0x818, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad10.mmc1_dat6 */
- AM33XX_IOPAD(0x81c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad11.mmc1_dat7 */
- AM33XX_IOPAD(0x884, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn2.mmc1_cmd */
- AM33XX_IOPAD(0x880, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn1.mmc1_clk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD0, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad12.mmc1_dat0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD1, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad13.mmc1_dat1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD2, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad14.mmc1_dat2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD3, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad15.mmc1_dat3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD4, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad8.mmc1_dat4 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD5, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad9.mmc1_dat5 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD6, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad10.mmc1_dat6 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD7, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad11.mmc1_dat7 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN2, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_csn2.mmc1_cmd */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN1, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_csn1.mmc1_clk */
>;
};
spi0_pins: pinmux_spi0 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x950, PIN_INPUT_PULLUP | MUX_MODE0) /* spi0_sclk.spi0_sclk */
- AM33XX_IOPAD(0x95c, PIN_INPUT_PULLUP | MUX_MODE0) /* spi0_cs0.spi0_cs0 */
- AM33XX_IOPAD(0x954, PIN_INPUT_PULLUP | MUX_MODE0) /* spi0_d0.spi0_d0 */
- AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE0) /* spi0_d1.spi0_d1 */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_SCLK, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D1, PIN_INPUT_PULLUP, MUX_MODE0)
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-moxa-uc-2101.dts b/arch/arm/boot/dts/am335x-moxa-uc-2101.dts
index 48aee6de4cdb..5923b6e7e1cb 100644
--- a/arch/arm/boot/dts/am335x-moxa-uc-2101.dts
+++ b/arch/arm/boot/dts/am335x-moxa-uc-2101.dts
@@ -31,23 +31,23 @@
cpsw_default: cpsw_default {
pinctrl-single,pins = <
/* Slave 1 */
- AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE1) /* mii1_crs.rmii1_crs_dv */
- AM33XX_IOPAD(0x910, PIN_INPUT_PULLUP | MUX_MODE1) /* mii1_rxerr.rmii1_rxerr */
- AM33XX_IOPAD(0x914, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* mii1_txen.rmii1_txen */
- AM33XX_IOPAD(0x924, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* mii1_txd1.rmii1_txd1 */
- AM33XX_IOPAD(0x928, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* mii1_txd0.rmii1_txd0 */
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLUP | MUX_MODE1) /* mii1_rxd1.rmii1_rxd1 */
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLUP | MUX_MODE1) /* mii1_rxd0.rmii1_rxd0 */
- AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE0) /* mii1_refclk.rmii1_refclk */
+ AM33XX_PADCONF(AM335X_PIN_MII1_CRS, PIN_INPUT_PULLDOWN, MUX_MODE1) /* mii1_crs.rmii1_crs_dv */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLUP, MUX_MODE1) /* mii1_rxerr.rmii1_rxerr */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE1) /* mii1_txen.rmii1_txen */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_OUTPUT_PULLDOWN, MUX_MODE1) /* mii1_txd1.rmii1_txd1 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_OUTPUT_PULLDOWN, MUX_MODE1) /* mii1_txd0.rmii1_txd0 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLUP, MUX_MODE1) /* mii1_rxd1.rmii1_rxd1 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLUP, MUX_MODE1) /* mii1_rxd0.rmii1_rxd0 */
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_INPUT_PULLDOWN, MUX_MODE0)
>;
};
spi1_pins: pinmux_spi1 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x964, PIN_INPUT_PULLUP | MUX_MODE4) /* ecap0_in_pwm0_out.spi1_sclk */
- AM33XX_IOPAD(0x978, PIN_INPUT_PULLUP | MUX_MODE4) /* uart1_ctsn.spi1_cs0 */
- AM33XX_IOPAD(0x968, PIN_INPUT_PULLUP | MUX_MODE4) /* uart0_ctsn.spi1_d0 */
- AM33XX_IOPAD(0x96c, PIN_INPUT_PULLUP | MUX_MODE4) /* uart0_rtsn.spi1_d1 */
+ AM33XX_PADCONF(AM335X_PIN_ECAP0_IN_PWM0_OUT, PIN_INPUT_PULLUP, MUX_MODE4) /* ecap0_in_pwm0_out.spi1_sclk */
+ AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_INPUT_PULLUP, MUX_MODE4) /* uart1_ctsn.spi1_cs0 */
+ AM33XX_PADCONF(AM335X_PIN_UART0_CTSN, PIN_INPUT_PULLUP, MUX_MODE4) /* uart0_ctsn.spi1_d0 */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RTSN, PIN_INPUT_PULLUP, MUX_MODE4) /* uart0_rtsn.spi1_d1 */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-moxa-uc-8100-me-t.dts b/arch/arm/boot/dts/am335x-moxa-uc-8100-me-t.dts
index e562ce40f290..5a2fb4bd4e02 100644
--- a/arch/arm/boot/dts/am335x-moxa-uc-8100-me-t.dts
+++ b/arch/arm/boot/dts/am335x-moxa-uc-8100-me-t.dts
@@ -104,79 +104,79 @@
minipcie_pins: pinmux_minipcie {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x8e8, PIN_INPUT_PULLDOWN | MUX_MODE7) /* lcd_pclk.gpio2_24 */
- AM33XX_IOPAD(0x8ec, PIN_INPUT_PULLDOWN | MUX_MODE7) /* lcd_ac_bias_en.gpio2_25 */
- AM33XX_IOPAD(0x8e0, PIN_INPUT_PULLDOWN | MUX_MODE7) /* lcd_vsync.gpio2_22 Power off PIN*/
+ AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_INPUT_PULLDOWN, MUX_MODE7) /* lcd_pclk.gpio2_24 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_INPUT_PULLDOWN, MUX_MODE7) /* lcd_ac_bias_en.gpio2_25 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_INPUT_PULLDOWN, MUX_MODE7) /* lcd_vsync.gpio2_22 Power off PIN*/
>;
};
push_button_pins: pinmux_push_button {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x9ac, PIN_INPUT_PULLDOWN | MUX_MODE7) /* mcasp0_ahcklx.gpio3_21 */
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKX, PIN_INPUT_PULLDOWN, MUX_MODE7) /* mcasp0_ahcklx.gpio3_21 */
>;
};
i2c0_pins: pinmux_i2c0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x988, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */
- AM33XX_IOPAD(0x98c, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_INPUT_PULLUP, MUX_MODE0)
>;
};
i2c1_pins: pinmux_i2c1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x968, PIN_INPUT_PULLUP | MUX_MODE3) /* uart0_ctsn.i2c1_sda */
- AM33XX_IOPAD(0x96c, PIN_INPUT_PULLUP | MUX_MODE3) /* uart0_rtsn.i2c1_scl */
+ AM33XX_PADCONF(AM335X_PIN_UART0_CTSN, PIN_INPUT_PULLUP, MUX_MODE3) /* uart0_ctsn.i2c1_sda */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RTSN, PIN_INPUT_PULLUP, MUX_MODE3) /* uart0_rtsn.i2c1_scl */
>;
};
uart0_pins: pinmux_uart0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
- AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
uart1_pins: pinmux_uart1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x978, PIN_INPUT | MUX_MODE0) /* uart1_ctsn.uart1_ctsn */
- AM33XX_IOPAD(0x97C, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart1_rtsn.uart1_rtsn */
- AM33XX_IOPAD(0x980, PIN_INPUT_PULLUP | MUX_MODE0) /* uart1_rxd.uart1_rxd */
- AM33XX_IOPAD(0x984, PIN_OUTPUT | MUX_MODE0) /* uart1_txd.uart1_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_TXD, PIN_OUTPUT, MUX_MODE0)
>;
};
uart2_pins: pinmux_uart2_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x8d8, PIN_INPUT | MUX_MODE6) /* lcd_data14.uart5_ctsn */
- AM33XX_IOPAD(0x8dc, PIN_OUTPUT_PULLDOWN | MUX_MODE6) /* lcd_data15.uart5_rtsn */
- AM33XX_IOPAD(0x8c4, PIN_INPUT_PULLUP | MUX_MODE4) /* lcd_data9.uart5_rxd */
- AM33XX_IOPAD(0x8c0, PIN_OUTPUT | MUX_MODE4) /* lcd_data8.uart5_txd */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA14, PIN_INPUT, MUX_MODE6) /* lcd_data14.uart5_ctsn */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA15, PIN_OUTPUT_PULLDOWN, MUX_MODE6) /* lcd_data15.uart5_rtsn */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA9, PIN_INPUT_PULLUP, MUX_MODE4) /* lcd_data9.uart5_rxd */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA8, PIN_OUTPUT, MUX_MODE4) /* lcd_data8.uart5_txd */
>;
};
cpsw_default: cpsw_default {
pinctrl-single,pins = <
/* Slave 1 */
- AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE1) /* mii1_crs.rmii1_crs_dv */
- AM33XX_IOPAD(0x910, PIN_INPUT_PULLUP | MUX_MODE1) /* mii1_rxerr.rmii1_rxerr */
- AM33XX_IOPAD(0x914, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* mii1_txen.rmii1_txen */
- AM33XX_IOPAD(0x924, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* mii1_txd1.rmii1_txd1 */
- AM33XX_IOPAD(0x928, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* mii1_txd0.rmii1_txd0 */
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLUP | MUX_MODE1) /* mii1_rxd1.rmii1_rxd1 */
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLUP | MUX_MODE1) /* mii1_rxd0.rmii1_rxd0 */
- AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE0) /* mii1_refclk.rmii1_refclk */
+ AM33XX_PADCONF(AM335X_PIN_MII1_CRS, PIN_INPUT_PULLDOWN, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLUP, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_OUTPUT_PULLDOWN, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_OUTPUT_PULLDOWN, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLUP, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLUP, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_INPUT_PULLDOWN, MUX_MODE0)
/* Slave 2 */
- AM33XX_IOPAD(0x870, PIN_INPUT_PULLDOWN | MUX_MODE3) /* rmii2_crs_dv */
- AM33XX_IOPAD(0x874, PIN_INPUT_PULLDOWN | MUX_MODE3) /* rmii2_rxer */
- AM33XX_IOPAD(0x840, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* rmii2_txen */
- AM33XX_IOPAD(0x850, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* rmii2_td1 */
- AM33XX_IOPAD(0x854, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* rmii2_td0 */
- AM33XX_IOPAD(0x868, PIN_INPUT_PULLDOWN | MUX_MODE3) /* rmii2_rd1 */
- AM33XX_IOPAD(0x86c, PIN_INPUT_PULLDOWN | MUX_MODE3) /* rmii2_rd0 */
- AM33XX_IOPAD(0x908, PIN_INPUT_PULLDOWN | MUX_MODE1) /* rmii2_refclk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT_PULLDOWN, MUX_MODE3) /* rmii2_crs_dv */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WPN, PIN_INPUT_PULLDOWN, MUX_MODE3) /* rmii2_rxer */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A0, PIN_OUTPUT_PULLDOWN, MUX_MODE3) /* rmii2_txen */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A4, PIN_OUTPUT_PULLDOWN, MUX_MODE3) /* rmii2_td1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_OUTPUT_PULLDOWN, MUX_MODE3) /* rmii2_td0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A10, PIN_INPUT_PULLDOWN, MUX_MODE3) /* rmii2_rd1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A11, PIN_INPUT_PULLDOWN, MUX_MODE3) /* rmii2_rd0 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_COL, PIN_INPUT_PULLDOWN, MUX_MODE1) /* rmii2_refclk */
>;
};
@@ -184,46 +184,46 @@
davinci_mdio_default: davinci_mdio_default {
pinctrl-single,pins = <
/* MDIO */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
- AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLUP | SLEWCTRL_FAST, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLUP, MUX_MODE0)
>;
};
mmc0_pins_default: pinmux_mmc0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat3 */
- AM33XX_IOPAD(0x8f4, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat2 */
- AM33XX_IOPAD(0x8f8, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat1 */
- AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat0 */
- AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_clk */
- AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_cmd */
- AM33XX_IOPAD(0x990, PIN_INPUT_PULLUP | MUX_MODE7) /* mcasp0_aclkx.gpio3_14 */
- AM33XX_IOPAD(0x9a0, PIN_INPUT_PULLUP | MUX_MODE7) /* mcasp0_aclkx.gpio3_18 */
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT2, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKX, PIN_INPUT_PULLUP, MUX_MODE7) /* mcasp0_aclkx.gpio3_14 */
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKR, PIN_INPUT_PULLUP, MUX_MODE7) /* mcasp0_aclkx.gpio3_18 */
>;
};
mmc2_pins_default: pinmux_mmc2_pins {
pinctrl-single,pins = <
/* eMMC */
- AM33XX_IOPAD(0x830, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ad12.mmc2_dat0 */
- AM33XX_IOPAD(0x834, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ad13.mmc2_dat1 */
- AM33XX_IOPAD(0x838, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ad14.mmc2_dat2 */
- AM33XX_IOPAD(0x83c, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ad15.mmc2_dat3 */
- AM33XX_IOPAD(0x820, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ad8.mmc2_dat4 */
- AM33XX_IOPAD(0x824, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ad9.mmc2_dat5 */
- AM33XX_IOPAD(0x828, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ad10.mmc2_dat6 */
- AM33XX_IOPAD(0x82c, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ad11.mmc2_dat7 */
- AM33XX_IOPAD(0x888, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_csn3.mmc2_cmd */
- AM33XX_IOPAD(0x88c, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_clk.mmc2_clk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD12, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_ad12.mmc2_dat0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD13, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_ad13.mmc2_dat1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD14, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_ad14.mmc2_dat2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD15, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_ad15.mmc2_dat3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD8, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_ad8.mmc2_dat4 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD9, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_ad9.mmc2_dat5 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD10, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_ad10.mmc2_dat6 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD11, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_ad11.mmc2_dat7 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN3, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_csn3.mmc2_cmd */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CLK, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_clk.mmc2_clk */
>;
};
spi0_pins: pinmux_spi0 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x950, PIN_INPUT_PULLUP | MUX_MODE0) /* spi0_sclk.spi0_sclk */
- AM33XX_IOPAD(0x95C, PIN_INPUT_PULLUP | MUX_MODE0) /* spi0_cs0.spi0_cs0 */
- AM33XX_IOPAD(0x954, PIN_INPUT_PULLUP | MUX_MODE0) /* spi0_d0.spi0_d0 */
- AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE0) /* spi0_d1.spi0_d1 */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_SCLK, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D1, PIN_INPUT_PULLUP, MUX_MODE0)
>;
};
diff --git a/arch/arm/boot/dts/am335x-nano.dts b/arch/arm/boot/dts/am335x-nano.dts
index 9c9143ed4003..0052657331ee 100644
--- a/arch/arm/boot/dts/am335x-nano.dts
+++ b/arch/arm/boot/dts/am335x-nano.dts
@@ -41,121 +41,121 @@
misc_pins: misc_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x95c, PIN_OUTPUT | MUX_MODE7) /* spi0_cs0.gpio0_5 */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS0, PIN_OUTPUT, MUX_MODE7) /* spi0_cs0.gpio0_5 */
>;
};
gpmc_pins: gpmc_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x800, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad0.gpmc_ad0 */
- AM33XX_IOPAD(0x804, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad1.gpmc_ad1 */
- AM33XX_IOPAD(0x808, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad2.gpmc_ad2 */
- AM33XX_IOPAD(0x80c, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad3.gpmc_ad3 */
- AM33XX_IOPAD(0x810, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad4.gpmc_ad4 */
- AM33XX_IOPAD(0x814, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad5.gpmc_ad5 */
- AM33XX_IOPAD(0x818, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad6.gpmc_ad6 */
- AM33XX_IOPAD(0x81c, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad7.gpmc_ad7 */
- AM33XX_IOPAD(0x820, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad8.gpmc_ad8 */
- AM33XX_IOPAD(0x824, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad9.gpmc_ad9 */
- AM33XX_IOPAD(0x828, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad10.gpmc_ad10 */
- AM33XX_IOPAD(0x82c, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad11.gpmc_ad11 */
- AM33XX_IOPAD(0x830, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad12.gpmc_ad12 */
- AM33XX_IOPAD(0x834, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad13.gpmc_ad13 */
- AM33XX_IOPAD(0x838, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad14.gpmc_ad14 */
- AM33XX_IOPAD(0x83c, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad15.gpmc_ad15 */
-
- AM33XX_IOPAD(0x870, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_wait0.gpmc_wait0 */
- AM33XX_IOPAD(0x87c, PIN_OUTPUT | MUX_MODE0) /* gpmc_csn0.gpmc_csn0 */
- AM33XX_IOPAD(0x880, PIN_OUTPUT | MUX_MODE0) /* gpmc_csn1.gpmc_csn1 */
- AM33XX_IOPAD(0x884, PIN_OUTPUT | MUX_MODE0) /* gpmc_csn2.gpmc_csn2 */
- AM33XX_IOPAD(0x888, PIN_OUTPUT | MUX_MODE0) /* gpmc_csn3.gpmc_csn3 */
-
- AM33XX_IOPAD(0x890, PIN_OUTPUT | MUX_MODE0) /* gpmc_advn_ale.gpmc_advn_ale */
- AM33XX_IOPAD(0x894, PIN_OUTPUT | MUX_MODE0) /* gpmc_oen_ren.gpmc_oen_ren */
- AM33XX_IOPAD(0x898, PIN_OUTPUT | MUX_MODE0) /* gpmc_wen.gpmc_wen */
- AM33XX_IOPAD(0x89c, PIN_OUTPUT | MUX_MODE0) /* gpmc_ben0_cle.gpmc_ben0_cle */
-
- AM33XX_IOPAD(0x8a4, PIN_OUTPUT | MUX_MODE1) /* lcd_data1.gpmc_a1 */
- AM33XX_IOPAD(0x8a8, PIN_OUTPUT | MUX_MODE1) /* lcd_data2.gpmc_a2 */
- AM33XX_IOPAD(0x8ac, PIN_OUTPUT | MUX_MODE1) /* lcd_data3.gpmc_a3 */
- AM33XX_IOPAD(0x8b0, PIN_OUTPUT | MUX_MODE1) /* lcd_data4.gpmc_a4 */
- AM33XX_IOPAD(0x8b4, PIN_OUTPUT | MUX_MODE1) /* lcd_data5.gpmc_a5 */
- AM33XX_IOPAD(0x8b8, PIN_OUTPUT | MUX_MODE1) /* lcd_data6.gpmc_a6 */
- AM33XX_IOPAD(0x8bc, PIN_OUTPUT | MUX_MODE1) /* lcd_data7.gpmc_a7 */
-
- AM33XX_IOPAD(0x8e0, PIN_OUTPUT | MUX_MODE1) /* lcd_vsync.gpmc_a8 */
- AM33XX_IOPAD(0x8e4, PIN_OUTPUT | MUX_MODE1) /* lcd_hsync.gpmc_a9 */
- AM33XX_IOPAD(0x8e8, PIN_OUTPUT | MUX_MODE1) /* lcd_pclk.gpmc_a10 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD2, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD3, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD4, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD5, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD6, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD7, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD8, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD9, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD10, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD11, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD12, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD13, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD14, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD15, PIN_INPUT_PULLUP, MUX_MODE0)
+
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN0, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN1, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN2, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN3, PIN_OUTPUT, MUX_MODE0)
+
+ AM33XX_PADCONF(AM335X_PIN_GPMC_ADVN_ALE, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_OEN_REN, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WEN, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_BEN0_CLE, PIN_OUTPUT, MUX_MODE0)
+
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA1, PIN_OUTPUT, MUX_MODE1) /* lcd_data1.gpmc_a1 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA2, PIN_OUTPUT, MUX_MODE1) /* lcd_data2.gpmc_a2 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA3, PIN_OUTPUT, MUX_MODE1) /* lcd_data3.gpmc_a3 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA4, PIN_OUTPUT, MUX_MODE1) /* lcd_data4.gpmc_a4 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA5, PIN_OUTPUT, MUX_MODE1) /* lcd_data5.gpmc_a5 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA6, PIN_OUTPUT, MUX_MODE1) /* lcd_data6.gpmc_a6 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA7, PIN_OUTPUT, MUX_MODE1) /* lcd_data7.gpmc_a7 */
+
+ AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_OUTPUT, MUX_MODE1) /* lcd_vsync.gpmc_a8 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_OUTPUT, MUX_MODE1) /* lcd_hsync.gpmc_a9 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_OUTPUT, MUX_MODE1) /* lcd_pclk.gpmc_a10 */
>;
};
i2c0_pins: i2c0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x988, PIN_INPUT_PULLDOWN | MUX_MODE0) /* i2c0_sda.i2c0_sda */
- AM33XX_IOPAD(0x98c, PIN_INPUT_PULLDOWN | MUX_MODE0) /* i2c0_scl.i2c0_scl */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_INPUT_PULLDOWN, MUX_MODE0)
>;
};
uart0_pins: uart0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
- AM33XX_IOPAD(0x974, PIN_OUTPUT | MUX_MODE0) /* uart0_txd.uart0_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT, MUX_MODE0)
>;
};
uart1_pins: uart1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x978, PIN_OUTPUT | MUX_MODE7) /* uart1_ctsn.uart1_ctsn */
- AM33XX_IOPAD(0x97c, PIN_OUTPUT | MUX_MODE7) /* uart1_rtsn.uart1_rtsn */
- AM33XX_IOPAD(0x980, PIN_INPUT_PULLUP | MUX_MODE0) /* uart1_rxd.uart1_rxd */
- AM33XX_IOPAD(0x984, PIN_OUTPUT | MUX_MODE0) /* uart1_txd.uart1_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_UART1_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_TXD, PIN_OUTPUT, MUX_MODE0)
>;
};
uart2_pins: uart2_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x8c0, PIN_INPUT_PULLUP | MUX_MODE7) /* lcd_data8.gpio2[14] */
- AM33XX_IOPAD(0x8c4, PIN_OUTPUT | MUX_MODE7) /* lcd_data9.gpio2[15] */
- AM33XX_IOPAD(0x950, PIN_INPUT | MUX_MODE1) /* spi0_sclk.uart2_rxd */
- AM33XX_IOPAD(0x954, PIN_OUTPUT | MUX_MODE1) /* spi0_d0.uart2_txd */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA8, PIN_INPUT_PULLUP, MUX_MODE7) /* lcd_data8.gpio2[14] */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA9, PIN_OUTPUT, MUX_MODE7) /* lcd_data9.gpio2[15] */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_SCLK, PIN_INPUT, MUX_MODE1) /* spi0_sclk.uart2_rxd */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D0, PIN_OUTPUT, MUX_MODE1) /* spi0_d0.uart2_txd */
>;
};
uart3_pins: uart3_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x8c8, PIN_INPUT_PULLUP | MUX_MODE6) /* lcd_data10.uart3_ctsn */
- AM33XX_IOPAD(0x8cc, PIN_OUTPUT | MUX_MODE6) /* lcd_data11.uart3_rtsn */
- AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE1) /* spi0_cs1.uart3_rxd */
- AM33XX_IOPAD(0x964, PIN_OUTPUT | MUX_MODE1) /* ecap0_in_pwm0_out.uart3_txd */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA10, PIN_INPUT_PULLUP, MUX_MODE6) /* lcd_data10.uart3_ctsn */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA11, PIN_OUTPUT, MUX_MODE6) /* lcd_data11.uart3_rtsn */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS1, PIN_INPUT, MUX_MODE1) /* spi0_cs1.uart3_rxd */
+ AM33XX_PADCONF(AM335X_PIN_ECAP0_IN_PWM0_OUT, PIN_OUTPUT, MUX_MODE1) /* ecap0_in_pwm0_out.uart3_txd */
>;
};
uart4_pins: uart4_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x8d0, PIN_INPUT_PULLUP | MUX_MODE6) /* lcd_data12.uart4_ctsn */
- AM33XX_IOPAD(0x8d4, PIN_OUTPUT | MUX_MODE6) /* lcd_data13.uart4_rtsn */
- AM33XX_IOPAD(0x968, PIN_INPUT | MUX_MODE1) /* uart0_ctsn.uart4_rxd */
- AM33XX_IOPAD(0x96c, PIN_OUTPUT | MUX_MODE1) /* uart0_rtsn.uart4_txd */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA12, PIN_INPUT_PULLUP, MUX_MODE6) /* lcd_data12.uart4_ctsn */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA13, PIN_OUTPUT, MUX_MODE6) /* lcd_data13.uart4_rtsn */
+ AM33XX_PADCONF(AM335X_PIN_UART0_CTSN, PIN_INPUT, MUX_MODE1) /* uart0_ctsn.uart4_rxd */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RTSN, PIN_OUTPUT, MUX_MODE1) /* uart0_rtsn.uart4_txd */
>;
};
uart5_pins: uart5_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x8d8, PIN_INPUT | MUX_MODE4) /* lcd_data14.uart5_rxd */
- AM33XX_IOPAD(0x944, PIN_OUTPUT | MUX_MODE3) /* rmiii1_refclk.uart5_txd */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA14, PIN_INPUT, MUX_MODE4) /* lcd_data14.uart5_rxd */
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_OUTPUT, MUX_MODE3) /* rmiii1_refclk.uart5_txd */
>;
};
mmc1_pins: mmc1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat0.mmc0_dat0 */
- AM33XX_IOPAD(0x8f4, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat1.mmc0_dat1 */
- AM33XX_IOPAD(0x8f8, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat2.mmc0_dat2 */
- AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat3.mmc0_dat3 */
- AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_clk.mmc0_clk */
- AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_cmd.mmc0_cmd */
- AM33XX_IOPAD(0x9e8, PIN_INPUT_PULLUP | MUX_MODE7) /* emu1.gpio3[8] */
- AM33XX_IOPAD(0x9a0, PIN_INPUT_PULLUP | MUX_MODE7) /* mcasp0_aclkr.gpio3[18] */
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT2, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0) /* mmc0_clk.mmc0_clk */
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0) /* mmc0_cmd.mmc0_cmd */
+ AM33XX_PADCONF(AM335X_PIN_EMU1, PIN_INPUT_PULLUP, MUX_MODE7) /* emu1.gpio3[8] */
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKR, PIN_INPUT_PULLUP, MUX_MODE7) /* mcasp0_aclkr.gpio3[18] */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-osd3358-sm-red.dts b/arch/arm/boot/dts/am335x-osd3358-sm-red.dts
index 95d54cf3849e..f47cc9fea253 100644
--- a/arch/arm/boot/dts/am335x-osd3358-sm-red.dts
+++ b/arch/arm/boot/dts/am335x-osd3358-sm-red.dts
@@ -40,61 +40,61 @@
&am33xx_pinmux {
nxp_hdmi_bonelt_pins: nxp-hdmi-bonelt-pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x9b0, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr0 */
- AM33XX_IOPAD(0x8a0, PIN_OUTPUT | MUX_MODE0) /* lcd_data0.lcd_data0 */
- AM33XX_IOPAD(0x8a4, PIN_OUTPUT | MUX_MODE0) /* lcd_data1.lcd_data1 */
- AM33XX_IOPAD(0x8a8, PIN_OUTPUT | MUX_MODE0) /* lcd_data2.lcd_data2 */
- AM33XX_IOPAD(0x8ac, PIN_OUTPUT | MUX_MODE0) /* lcd_data3.lcd_data3 */
- AM33XX_IOPAD(0x8b0, PIN_OUTPUT | MUX_MODE0) /* lcd_data4.lcd_data4 */
- AM33XX_IOPAD(0x8b4, PIN_OUTPUT | MUX_MODE0) /* lcd_data5.lcd_data5 */
- AM33XX_IOPAD(0x8b8, PIN_OUTPUT | MUX_MODE0) /* lcd_data6.lcd_data6 */
- AM33XX_IOPAD(0x8bc, PIN_OUTPUT | MUX_MODE0) /* lcd_data7.lcd_data7 */
- AM33XX_IOPAD(0x8c0, PIN_OUTPUT | MUX_MODE0) /* lcd_data8.lcd_data8 */
- AM33XX_IOPAD(0x8c4, PIN_OUTPUT | MUX_MODE0) /* lcd_data9.lcd_data9 */
- AM33XX_IOPAD(0x8c8, PIN_OUTPUT | MUX_MODE0) /* lcd_data10.lcd_data10 */
- AM33XX_IOPAD(0x8cc, PIN_OUTPUT | MUX_MODE0) /* lcd_data11.lcd_data11 */
- AM33XX_IOPAD(0x8d0, PIN_OUTPUT | MUX_MODE0) /* lcd_data12.lcd_data12 */
- AM33XX_IOPAD(0x8d4, PIN_OUTPUT | MUX_MODE0) /* lcd_data13.lcd_data13 */
- AM33XX_IOPAD(0x8d8, PIN_OUTPUT | MUX_MODE0) /* lcd_data14.lcd_data14 */
- AM33XX_IOPAD(0x8dc, PIN_OUTPUT | MUX_MODE0) /* lcd_data15.lcd_data15 */
- AM33XX_IOPAD(0x8e0, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* lcd_vsync.lcd_vsync */
- AM33XX_IOPAD(0x8e4, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* lcd_hsync.lcd_hsync */
- AM33XX_IOPAD(0x8e8, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* lcd_pclk.lcd_pclk */
- AM33XX_IOPAD(0x8ec, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* lcd_ac_bias_en.lcd_ac_bias_en */
+ AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR0, PIN_OUTPUT_PULLDOWN, MUX_MODE3)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA0, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA1, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA2, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA3, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA4, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA5, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA6, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA7, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA8, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA9, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA10, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA11, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA12, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA13, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA14, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA15, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
nxp_hdmi_bonelt_off_pins: nxp-hdmi-bonelt-off-pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x9b0, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr0 */
+ AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR0, PIN_OUTPUT_PULLDOWN, MUX_MODE3)
>;
};
mcasp0_pins: mcasp0-pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x9ac, PIN_INPUT_PULLUP | MUX_MODE0) /* mcasp0_ahcklx.mcasp0_ahclkx */
- AM33XX_IOPAD(0x99c, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mcasp0_ahclkr.mcasp0_axr2*/
- AM33XX_IOPAD(0x994, PIN_OUTPUT_PULLUP | MUX_MODE0) /* mcasp0_fsx.mcasp0_fsx */
- AM33XX_IOPAD(0x990, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mcasp0_aclkx.mcasp0_aclkx */
- AM33XX_IOPAD(0x86c, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a11.GPIO1_27 */
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKX, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKR, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mcasp0_ahclkr.mcasp0_axr2*/
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_FSX, PIN_OUTPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKX, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A11, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_a11.GPIO1_27 */
>;
};
flash_enable: flash-enable {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x944, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* rmii1_ref_clk.gpio0_29 */
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* rmii1_ref_clk.gpio0_29 */
>;
};
imu_interrupt: imu-interrupt {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x910, PIN_INPUT_PULLDOWN | MUX_MODE7) /* mii1_rx_er.gpio3_2 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLDOWN, MUX_MODE7) /* mii1_rx_er.gpio3_2 */
>;
};
ethernet_interrupt: ethernet-interrupt{
pinctrl-single,pins = <
- AM33XX_IOPAD(0x908, PIN_INPUT_PULLDOWN | MUX_MODE7) /* mii1_col.gpio3_0 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_COL, PIN_INPUT_PULLDOWN, MUX_MODE7) /* mii1_col.gpio3_0 */
>;
};
};
@@ -269,109 +269,109 @@
user_leds_s0: user-leds-s0 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x854, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a5.gpio1_21 */
- AM33XX_IOPAD(0x858, PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_a6.gpio1_22 */
- AM33XX_IOPAD(0x85c, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_a7.gpio1_23 */
- AM33XX_IOPAD(0x860, PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_a8.gpio1_24 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_a5.gpio1_21 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A6, PIN_OUTPUT_PULLUP, MUX_MODE7) /* gpmc_a6.gpio1_22 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A7, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_a7.gpio1_23 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A8, PIN_OUTPUT_PULLUP, MUX_MODE7) /* gpmc_a8.gpio1_24 */
>;
};
i2c2_pins: pinmux-i2c2-pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x978, PIN_INPUT_PULLUP | MUX_MODE3) /* uart1_ctsn.i2c2_sda */
- AM33XX_IOPAD(0x97c, PIN_INPUT_PULLUP | MUX_MODE3) /* uart1_rtsn.i2c2_scl */
+ AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_INPUT_PULLUP, MUX_MODE3) /* uart1_ctsn.i2c2_sda */
+ AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_INPUT_PULLUP, MUX_MODE3) /* uart1_rtsn.i2c2_scl */
>;
};
uart0_pins: pinmux-uart0-pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
- AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
clkout2_pin: pinmux-clkout2-pin {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x9b4, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr1.clkout2 */
+ AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR1, PIN_OUTPUT_PULLDOWN, MUX_MODE3) /* xdma_event_intr1.clkout2 */
>;
};
cpsw_default: cpsw-default {
pinctrl-single,pins = <
/* Slave 1 */
- AM33XX_IOPAD(0x914, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txen.rgmii1_tctl */
- AM33XX_IOPAD(0x918, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxdv.rgmii1_rctl */
- AM33XX_IOPAD(0x91c, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd3.rgmii1_txd3 */
- AM33XX_IOPAD(0x920, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd2.rgmii1_txd2 */
- AM33XX_IOPAD(0x924, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd1.rgmii1_txd1 */
- AM33XX_IOPAD(0x928, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd0.rgmii1_txd0 */
- AM33XX_IOPAD(0x92c, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txclk.rgmii1_txclk */
- AM33XX_IOPAD(0x930, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxclk.rgmii1_rxclk */
- AM33XX_IOPAD(0x934, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd3.rgmii1_rxd3 */
- AM33XX_IOPAD(0x938, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd2.rgmii1_rxd2 */
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd1.rgmii1_rxd1 */
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd0.rgmii1_rxd0 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txen.rgmii1_tctl */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxdv.rgmii1_rctl */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_OUTPUT_PULLDOWN, MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_OUTPUT_PULLDOWN, MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_OUTPUT_PULLDOWN, MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_OUTPUT_PULLDOWN, MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_OUTPUT_PULLDOWN, MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLDOWN, MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT_PULLDOWN, MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE2)
>;
};
cpsw_sleep: cpsw-sleep {
pinctrl-single,pins = <
/* Slave 1 reset value */
- AM33XX_IOPAD(0x914, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x918, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x91c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x920, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x924, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x928, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x92c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x930, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x934, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x938, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
davinci_mdio_default: davinci-mdio-default {
pinctrl-single,pins = <
/* MDIO */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
- AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLUP | SLEWCTRL_FAST, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLUP, MUX_MODE0)
>;
};
davinci_mdio_sleep: davinci-mdio-sleep {
pinctrl-single,pins = <
/* MDIO reset value */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x94c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
mmc1_pins: pinmux-mmc1-pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE7) /* (C15) spi0_cs1.gpio0[6] */
- AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* (G16) mmc0_dat0.mmc0_dat0 */
- AM33XX_IOPAD(0x8f8, PIN_INPUT_PULLUP | MUX_MODE0) /* (G15) mmc0_dat1.mmc0_dat1 */
- AM33XX_IOPAD(0x8f4, PIN_INPUT_PULLUP | MUX_MODE0) /* (F18) mmc0_dat2.mmc0_dat2 */
- AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) /* (F17) mmc0_dat3.mmc0_dat3 */
- AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* (G18) mmc0_cmd.mmc0_cmd */
- AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* (G17) mmc0_clk.mmc0_clk */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS1, PIN_INPUT, MUX_MODE7) /* (C15) spi0_cs1.gpio0[6] */
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT2, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
>;
};
emmc_pins: pinmux-emmc-pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x880, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn1.mmc1_clk */
- AM33XX_IOPAD(0x884, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn2.mmc1_cmd */
- AM33XX_IOPAD(0x800, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad0.mmc1_dat0 */
- AM33XX_IOPAD(0x804, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad1.mmc1_dat1 */
- AM33XX_IOPAD(0x808, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad2.mmc1_dat2 */
- AM33XX_IOPAD(0x80c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad3.mmc1_dat3 */
- AM33XX_IOPAD(0x810, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad4.mmc1_dat4 */
- AM33XX_IOPAD(0x814, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad5.mmc1_dat5 */
- AM33XX_IOPAD(0x818, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad6.mmc1_dat6 */
- AM33XX_IOPAD(0x81c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad7.mmc1_dat7 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN1, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_csn1.mmc1_clk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN2, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_csn2.mmc1_cmd */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD0, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad0.mmc1_dat0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD1, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad1.mmc1_dat1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD2, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad2.mmc1_dat2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD3, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad3.mmc1_dat3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD4, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad4.mmc1_dat4 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD5, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad5.mmc1_dat5 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD6, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad6.mmc1_dat6 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD7, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad7.mmc1_dat7 */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-osd335x-common.dtsi b/arch/arm/boot/dts/am335x-osd335x-common.dtsi
index f8ff473f94f0..a8b6842489f7 100644
--- a/arch/arm/boot/dts/am335x-osd335x-common.dtsi
+++ b/arch/arm/boot/dts/am335x-osd335x-common.dtsi
@@ -36,8 +36,8 @@
&am33xx_pinmux {
i2c0_pins: pinmux-i2c0-pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x988, PIN_INPUT_PULLUP | MUX_MODE0) /* (C17) I2C0_SDA.I2C0_SDA */
- AM33XX_IOPAD(0x98c, PIN_INPUT_PULLUP | MUX_MODE0) /* (C16) I2C0_SCL.I2C0_SCL */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_INPUT_PULLUP, MUX_MODE0)
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-pcm-953.dtsi b/arch/arm/boot/dts/am335x-pcm-953.dtsi
index 1ec8e0d80191..baceaa7bb33b 100644
--- a/arch/arm/boot/dts/am335x-pcm-953.dtsi
+++ b/arch/arm/boot/dts/am335x-pcm-953.dtsi
@@ -79,15 +79,15 @@
&am33xx_pinmux {
user_buttons_pins: pinmux_user_buttons {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x9e4, PIN_INPUT_PULLDOWN | MUX_MODE7) /* emu0.gpio3_7 */
- AM33XX_IOPAD(0x9e8, PIN_INPUT_PULLDOWN | MUX_MODE7) /* emu1.gpio3_8 */
+ AM33XX_PADCONF(AM335X_PIN_EMU0, PIN_INPUT_PULLDOWN, MUX_MODE7) /* emu0.gpio3_7 */
+ AM33XX_PADCONF(AM335X_PIN_EMU1, PIN_INPUT_PULLDOWN, MUX_MODE7) /* emu1.gpio3_8 */
>;
};
user_leds_pins: pinmux_user_leds {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x880, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_csn1.gpio1_30 */
- AM33XX_IOPAD(0x884, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpmc_csn2.gpio1_31 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN1, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_csn1.gpio1_30 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN2, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* gpmc_csn2.gpio1_31 */
>;
};
};
@@ -96,8 +96,8 @@
&am33xx_pinmux {
dcan1_pins: pinmux_dcan1 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x980, PIN_OUTPUT_PULLUP | MUX_MODE2) /* uart1_rxd.dcan1_tx_mux2 */
- AM33XX_IOPAD(0x984, PIN_INPUT_PULLUP | MUX_MODE2) /* uart1_txd.dcan1_rx_mux2 */
+ AM33XX_PADCONF(AM335X_PIN_UART1_RXD, PIN_OUTPUT_PULLUP, MUX_MODE2) /* uart1_rxd.dcan1_tx_mux2 */
+ AM33XX_PADCONF(AM335X_PIN_UART1_TXD, PIN_INPUT_PULLUP, MUX_MODE2) /* uart1_txd.dcan1_rx_mux2 */
>;
};
};
@@ -112,18 +112,18 @@
&am33xx_pinmux {
ethernet1_pins: pinmux_ethernet1 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x840, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a0.rgmii2_tctl */
- AM33XX_IOPAD(0x844, PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a1.rgmii2_rctl */
- AM33XX_IOPAD(0x848, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a2.rgmii2_td3 */
- AM33XX_IOPAD(0x84c, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a3.rgmii2_td2 */
- AM33XX_IOPAD(0x850, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a4.rgmii2_td1 */
- AM33XX_IOPAD(0x854, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a5.rgmii2_td0 */
- AM33XX_IOPAD(0x858, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* gpmc_a6.rgmii2_tclk */
- AM33XX_IOPAD(0x85c, PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a7.rgmii2_rclk */
- AM33XX_IOPAD(0x860, PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a8.rgmii2_rd3 */
- AM33XX_IOPAD(0x864, PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a9.rgmii2_rd2 */
- AM33XX_IOPAD(0x868, PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a10.rgmii2_rd1 */
- AM33XX_IOPAD(0x86c, PIN_INPUT_PULLDOWN | MUX_MODE2) /* gpmc_a11.rgmii2_rd0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A0, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* gpmc_a0.rgmii2_tctl */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A1, PIN_INPUT_PULLDOWN, MUX_MODE2) /* gpmc_a1.rgmii2_rctl */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A2, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* gpmc_a2.rgmii2_td3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A3, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* gpmc_a3.rgmii2_td2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A4, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* gpmc_a4.rgmii2_td1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* gpmc_a5.rgmii2_td0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A6, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* gpmc_a6.rgmii2_tclk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A7, PIN_INPUT_PULLDOWN, MUX_MODE2) /* gpmc_a7.rgmii2_rclk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A8, PIN_INPUT_PULLDOWN, MUX_MODE2) /* gpmc_a8.rgmii2_rd3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A9, PIN_INPUT_PULLDOWN, MUX_MODE2) /* gpmc_a9.rgmii2_rd2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A10, PIN_INPUT_PULLDOWN, MUX_MODE2) /* gpmc_a10.rgmii2_rd1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A11, PIN_INPUT_PULLDOWN, MUX_MODE2) /* gpmc_a11.rgmii2_rd0 */
>;
};
};
@@ -171,8 +171,8 @@
cb_gpio_pins: pinmux_cb_gpio {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x968, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* uart0_ctsn.gpio1_8 */
- AM33XX_IOPAD(0x96c, PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* uart0_rtsn.gpio1_9 */
+ AM33XX_PADCONF(AM335X_PIN_UART0_CTSN, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* uart0_ctsn.gpio1_8 */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RTSN, PIN_OUTPUT_PULLDOWN, MUX_MODE7) /* uart0_rtsn.gpio1_9 */
>;
};
};
@@ -181,13 +181,13 @@
&am33xx_pinmux {
mmc1_pins: pinmux_mmc1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat3.mmc0_dat3 */
- AM33XX_IOPAD(0x8f4, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat2.mmc0_dat2 */
- AM33XX_IOPAD(0x8f8, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat1.mmc0_dat1 */
- AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat0.mmc0_dat0 */
- AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_clk.mmc0_clk */
- AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_cmd.mmc0_cmd */
- AM33XX_IOPAD(0x960, PIN_INPUT_PULLUP | MUX_MODE7) /* spi0_cs1.mmc0_sdcd */
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT2, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS1, PIN_INPUT_PULLUP, MUX_MODE7) /* spi0_cs1.mmc0_sdcd */
>;
};
};
@@ -205,31 +205,31 @@
&am33xx_pinmux {
uart0_pins: pinmux_uart0 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
- AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
uart1_pins: pinmux_uart1 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x980, PIN_INPUT_PULLUP | MUX_MODE0) /* uart1_rxd.uart1_rxd */
- AM33XX_IOPAD(0x984, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart1_txd.uart1_txd */
- AM33XX_IOPAD(0x978, PIN_INPUT | MUX_MODE0) /* uart1_ctsn.uart1_ctsn */
- AM33XX_IOPAD(0x97c, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart1_rtsn.uart1_rtsn */
+ AM33XX_PADCONF(AM335X_PIN_UART1_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
uart2_pins: pinmux_uart2 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x92c, PIN_INPUT_PULLUP | MUX_MODE1) /* mii1_tx_clk.uart2_rxd */
- AM33XX_IOPAD(0x930, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* mii1_rx_clk.uart2_txd */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_INPUT_PULLUP, MUX_MODE1) /* mii1_tx_clk.uart2_rxd */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_OUTPUT_PULLDOWN, MUX_MODE1) /* mii1_rx_clk.uart2_txd */
>;
};
uart3_pins: pinmux_uart3 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x934, PIN_INPUT_PULLUP | MUX_MODE1) /* mii1_rxd3.uart3_rxd */
- AM33XX_IOPAD(0x938, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* mii1_rxd2.uart3_txd */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLUP, MUX_MODE1) /* mii1_rxd3.uart3_rxd */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_OUTPUT_PULLDOWN, MUX_MODE1) /* mii1_rxd2.uart3_txd */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-pdu001.dts b/arch/arm/boot/dts/am335x-pdu001.dts
index ae43d61f4e8b..3141255f72c2 100644
--- a/arch/arm/boot/dts/am335x-pdu001.dts
+++ b/arch/arm/boot/dts/am335x-pdu001.dts
@@ -92,162 +92,162 @@
i2c0_pins: pinmux_i2c0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x988, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */
- AM33XX_IOPAD(0x98c, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_INPUT_PULLUP, MUX_MODE0)
>;
};
i2c1_pins: pinmux_i2c1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE2) /* spi0_d1.i2c1_sda */
- AM33XX_IOPAD(0x95c, PIN_INPUT_PULLUP | MUX_MODE2) /* spi0_cs0.i2c1_scl */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D1, PIN_INPUT_PULLUP, MUX_MODE2) /* spi0_d1.i2c1_sda */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS0, PIN_INPUT_PULLUP, MUX_MODE2) /* spi0_cs0.i2c1_scl */
>;
};
i2c2_pins: pinmux_i2c2_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x950, PIN_INPUT_PULLUP | MUX_MODE2) /* spi0_clk.i2c2_sda */
- AM33XX_IOPAD(0x954, PIN_INPUT_PULLUP | MUX_MODE2) /* spi0_d0.i2c2_scl */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_SCLK, PIN_INPUT_PULLUP, MUX_MODE2) /* spi0_clk.i2c2_sda */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D0, PIN_INPUT_PULLUP, MUX_MODE2) /* spi0_d0.i2c2_scl */
>;
};
spi1_pins: pinmux_spi1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x990, PIN_OUTPUT | MUX_MODE3) /* mcasp0_aclkx.spi1_sclk */
- AM33XX_IOPAD(0x994, PIN_OUTPUT | MUX_MODE3) /* mcasp0_fsx.spi1_d0 */
- AM33XX_IOPAD(0x998, PIN_INPUT_PULLDOWN | MUX_MODE3) /* mcasp0_axr0.spi1_d1 */
- AM33XX_IOPAD(0x99C, PIN_OUTPUT | MUX_MODE3) /* mcasp0_ahclkr.spi1_cs0 */
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKX, PIN_OUTPUT, MUX_MODE3) /* mcasp0_aclkx.spi1_sclk */
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_FSX, PIN_OUTPUT, MUX_MODE3) /* mcasp0_fsx.spi1_d0 */
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AXR0, PIN_INPUT_PULLDOWN, MUX_MODE3) /* mcasp0_axr0.spi1_d1 */
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKR, PIN_OUTPUT, MUX_MODE3) /* mcasp0_ahclkr.spi1_cs0 */
>;
};
uart0_pins: pinmux_uart0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x96C, PIN_OUTPUT | MUX_MODE7) /* uart0_rtsn.gpio1_9 */
- AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
- AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RTSN, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_UART0_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
uart1_pins: pinmux_uart1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x980, PIN_INPUT_PULLUP | MUX_MODE0) /* uart1_rxd.uart1_rxd */
- AM33XX_IOPAD(0x984, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart1_txd.uart1_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART1_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
uart3_pins: pinmux_uart3_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x960, PIN_INPUT_PULLUP | MUX_MODE1) /* spi0_cs1.uart3_rxd */
- AM33XX_IOPAD(0x964, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* ecap0_in_pwm0_out.uart3_txd */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS1, PIN_INPUT_PULLUP, MUX_MODE1) /* spi0_cs1.uart3_rxd */
+ AM33XX_PADCONF(AM335X_PIN_ECAP0_IN_PWM0_OUT, PIN_OUTPUT_PULLDOWN, MUX_MODE1) /* ecap0_in_pwm0_out.uart3_txd */
>;
};
clkout2_pin: pinmux_clkout2_pin {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x9b4, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr1.clkout2 */
+ AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR1, PIN_OUTPUT_PULLDOWN, MUX_MODE3) /* xdma_event_intr1.clkout2 */
>;
};
cpsw_default: cpsw_default {
pinctrl-single,pins = <
/* Port 1 (emac0) */
- AM33XX_IOPAD(0x908, PIN_INPUT | MUX_MODE0) /* mii1_col.mii1_col */
- AM33XX_IOPAD(0x90C, PIN_INPUT | MUX_MODE0) /* mii1_crs.mii1_crs */
- AM33XX_IOPAD(0x910, PIN_INPUT | MUX_MODE0) /* mii1_rxer.mii1_rxer */
- AM33XX_IOPAD(0x914, PIN_OUTPUT | MUX_MODE0) /* mii1_txen.mii1_txen */
- AM33XX_IOPAD(0x918, PIN_INPUT | MUX_MODE0) /* mii1_rxdv.mii1_rxdv */
- AM33XX_IOPAD(0x91c, PIN_OUTPUT | MUX_MODE0) /* mii1_txd3.mii1_txd3 */
- AM33XX_IOPAD(0x920, PIN_OUTPUT | MUX_MODE0) /* mii1_txd2.mii1_txd2 */
- AM33XX_IOPAD(0x924, PIN_OUTPUT | MUX_MODE0) /* mii1_txd1.mii1_txd1 */
- AM33XX_IOPAD(0x928, PIN_OUTPUT | MUX_MODE0) /* mii1_txd0.mii1_txd0 */
- AM33XX_IOPAD(0x92c, PIN_INPUT | MUX_MODE0) /* mii1_txclk.mii1_txclk */
- AM33XX_IOPAD(0x930, PIN_INPUT | MUX_MODE0) /* mii1_rxclk.mii1_rxclk */
- AM33XX_IOPAD(0x934, PIN_INPUT | MUX_MODE0) /* mii1_rxd3.mii1_rxd3 */
- AM33XX_IOPAD(0x938, PIN_INPUT | MUX_MODE0) /* mii1_rxd2.mii1_rxd2 */
- AM33XX_IOPAD(0x93c, PIN_INPUT | MUX_MODE0) /* mii1_rxd1.mii1_rxd1 */
- AM33XX_IOPAD(0x940, PIN_INPUT | MUX_MODE0) /* mii1_rxd0.mii1_rxd0 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_COL, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_CRS, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT, MUX_MODE0)
/* Port 2 (emac1) */
- AM33XX_IOPAD(0x840, PIN_OUTPUT | MUX_MODE1) /* mii2_txen.gpmc_a0 */
- AM33XX_IOPAD(0x844, PIN_INPUT | MUX_MODE1) /* mii2_rxdv.gpmc_a1 */
- AM33XX_IOPAD(0x848, PIN_OUTPUT | MUX_MODE1) /* mii2_txd3.gpmc_a2 */
- AM33XX_IOPAD(0x84c, PIN_OUTPUT | MUX_MODE1) /* mii2_txd2.gpmc_a3 */
- AM33XX_IOPAD(0x850, PIN_OUTPUT | MUX_MODE1) /* mii2_txd1.gpmc_a4 */
- AM33XX_IOPAD(0x854, PIN_OUTPUT | MUX_MODE1) /* mii2_txd0.gpmc_a5 */
- AM33XX_IOPAD(0x858, PIN_INPUT | MUX_MODE1) /* mii2_txclk.gpmc_a6 */
- AM33XX_IOPAD(0x85c, PIN_INPUT | MUX_MODE1) /* mii2_rxclk.gpmc_a7 */
- AM33XX_IOPAD(0x860, PIN_INPUT | MUX_MODE1) /* mii2_rxd3.gpmc_a8 */
- AM33XX_IOPAD(0x864, PIN_INPUT | MUX_MODE1) /* mii2_rxd2.gpmc_a9 */
- AM33XX_IOPAD(0x868, PIN_INPUT | MUX_MODE1) /* mii2_rxd1.gpmc_a10 */
- AM33XX_IOPAD(0x86C, PIN_INPUT | MUX_MODE1) /* mii2_rxd0.gpmc_a11 */
- AM33XX_IOPAD(0x870, PIN_INPUT | MUX_MODE1) /* mii2_crs.gpmc_wait0 */
- AM33XX_IOPAD(0x874, PIN_INPUT | MUX_MODE1) /* mii2_rxer.gpmc_wpn */
- AM33XX_IOPAD(0x878, PIN_INPUT | MUX_MODE1) /* mii2_col.gpmc_ben1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A0, PIN_OUTPUT, MUX_MODE1) /* mii2_txen.gpmc_a0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A1, PIN_INPUT, MUX_MODE1) /* mii2_rxdv.gpmc_a1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A2, PIN_OUTPUT, MUX_MODE1) /* mii2_txd3.gpmc_a2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A3, PIN_OUTPUT, MUX_MODE1) /* mii2_txd2.gpmc_a3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A4, PIN_OUTPUT, MUX_MODE1) /* mii2_txd1.gpmc_a4 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_OUTPUT, MUX_MODE1) /* mii2_txd0.gpmc_a5 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A6, PIN_INPUT, MUX_MODE1) /* mii2_txclk.gpmc_a6 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A7, PIN_INPUT, MUX_MODE1) /* mii2_rxclk.gpmc_a7 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A8, PIN_INPUT, MUX_MODE1) /* mii2_rxd3.gpmc_a8 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A9, PIN_INPUT, MUX_MODE1) /* mii2_rxd2.gpmc_a9 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A10, PIN_INPUT, MUX_MODE1) /* mii2_rxd1.gpmc_a10 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A11, PIN_INPUT, MUX_MODE1) /* mii2_rxd0.gpmc_a11 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT, MUX_MODE1) /* mii2_crs.gpmc_wait0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WPN, PIN_INPUT, MUX_MODE1) /* mii2_rxer.gpmc_wpn */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_BEN1, PIN_INPUT, MUX_MODE1) /* mii2_col.gpmc_ben1 */
>;
};
davinci_mdio_default: davinci_mdio_default {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
- AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLUP | SLEWCTRL_FAST, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLUP, MUX_MODE0)
>;
};
mmc1_pins: pinmux_mmc1_pins {
/* eMMC */
pinctrl-single,pins = <
- AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat3 */
- AM33XX_IOPAD(0x8f4, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat2 */
- AM33XX_IOPAD(0x8f8, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat1 */
- AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat0 */
- AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_clk */
- AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_cmd */
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT2, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0)
>;
};
mmc2_pins: pinmux_mmc2_pins {
/* SD cardcage */
pinctrl-single,pins = <
- AM33XX_IOPAD(0x80c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad3.mmc1_dat3 */
- AM33XX_IOPAD(0x808, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad2.mmc1_dat2 */
- AM33XX_IOPAD(0x804, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad1.mmc1_dat1 */
- AM33XX_IOPAD(0x800, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad0.mmc1_dat0 */
- AM33XX_IOPAD(0x880, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn1.mmc1_clk */
- AM33XX_IOPAD(0x884, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn2.mmc1_cmd */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD3, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad3.mmc1_dat3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD2, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad2.mmc1_dat2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD1, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad1.mmc1_dat1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD0, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad0.mmc1_dat0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN1, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_csn1.mmc1_clk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN2, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_csn2.mmc1_cmd */
/* card change signal for frontpanel SD cardcage */
- AM33XX_IOPAD(0x890, PIN_INPUT | MUX_MODE7) /* gpmc_advn_ale.gpio2_2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_ADVN_ALE, PIN_INPUT, MUX_MODE7) /* gpmc_advn_ale.gpio2_2 */
>;
};
lcd_pins_s0: lcd_pins_s0 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x8a0, PIN_OUTPUT | MUX_MODE0) /* lcd_data0.lcd_data0 */
- AM33XX_IOPAD(0x8a4, PIN_OUTPUT | MUX_MODE0) /* lcd_data1.lcd_data1 */
- AM33XX_IOPAD(0x8a8, PIN_OUTPUT | MUX_MODE0) /* lcd_data2.lcd_data2 */
- AM33XX_IOPAD(0x8ac, PIN_OUTPUT | MUX_MODE0) /* lcd_data3.lcd_data3 */
- AM33XX_IOPAD(0x8b0, PIN_OUTPUT | MUX_MODE0) /* lcd_data4.lcd_data4 */
- AM33XX_IOPAD(0x8b4, PIN_OUTPUT | MUX_MODE0) /* lcd_data5.lcd_data5 */
- AM33XX_IOPAD(0x8b8, PIN_OUTPUT | MUX_MODE0) /* lcd_data6.lcd_data6 */
- AM33XX_IOPAD(0x8bc, PIN_OUTPUT | MUX_MODE0) /* lcd_data7.lcd_data7 */
- AM33XX_IOPAD(0x8c0, PIN_OUTPUT | MUX_MODE0) /* lcd_data8.lcd_data8 */
- AM33XX_IOPAD(0x8c4, PIN_OUTPUT | MUX_MODE0) /* lcd_data9.lcd_data9 */
- AM33XX_IOPAD(0x8c8, PIN_OUTPUT | MUX_MODE0) /* lcd_data10.lcd_data10 */
- AM33XX_IOPAD(0x8cc, PIN_OUTPUT | MUX_MODE0) /* lcd_data11.lcd_data11 */
- AM33XX_IOPAD(0x8d0, PIN_OUTPUT | MUX_MODE0) /* lcd_data12.lcd_data12 */
- AM33XX_IOPAD(0x8d4, PIN_OUTPUT | MUX_MODE0) /* lcd_data13.lcd_data13 */
- AM33XX_IOPAD(0x8d8, PIN_OUTPUT | MUX_MODE0) /* lcd_data14.lcd_data14 */
- AM33XX_IOPAD(0x8dc, PIN_OUTPUT | MUX_MODE0) /* lcd_data15.lcd_data15 */
- AM33XX_IOPAD(0x8e0, PIN_OUTPUT | MUX_MODE0) /* lcd_vsync.lcd_vsync */
- AM33XX_IOPAD(0x8e4, PIN_OUTPUT | MUX_MODE0) /* lcd_hsync.lcd_hsync */
- AM33XX_IOPAD(0x8e8, PIN_OUTPUT | MUX_MODE0) /* lcd_pclk.lcd_pclk */
- AM33XX_IOPAD(0x8ec, PIN_OUTPUT | MUX_MODE0) /* lcd_ac_bias_en.lcd_ac_bias_en */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA0, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA1, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA2, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA3, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA4, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA5, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA6, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA7, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA8, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA9, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA10, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA11, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA12, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA13, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA14, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA15, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_OUTPUT, MUX_MODE0)
>;
};
dcan0_pins: pinmux_dcan0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x978, PIN_OUTPUT | MUX_MODE2) /* uart1_ctsn.d_can0_tx */
- AM33XX_IOPAD(0x97c, PIN_INPUT_PULLDOWN | MUX_MODE2) /* uart1_rtsn.d_can0_rx */
+ AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_OUTPUT, MUX_MODE2) /* uart1_ctsn.d_can0_tx */
+ AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_INPUT_PULLDOWN, MUX_MODE2) /* uart1_rtsn.d_can0_rx */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-pepper.dts b/arch/arm/boot/dts/am335x-pepper.dts
index 6be79b8349ac..5c3e49f93ac4 100644
--- a/arch/arm/boot/dts/am335x-pepper.dts
+++ b/arch/arm/boot/dts/am335x-pepper.dts
@@ -93,14 +93,14 @@
&am33xx_pinmux {
i2c0_pins: pinmux_i2c0 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x988, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */
- AM33XX_IOPAD(0x98c, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_INPUT_PULLUP, MUX_MODE0)
>;
};
i2c1_pins: pinmux_i2c1 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x90C, PIN_INPUT_PULLUP | MUX_MODE3) /* mii1_crs,i2c1_sda */
- AM33XX_IOPAD(0x910, PIN_INPUT_PULLUP | MUX_MODE3) /* mii1_rxerr,i2c1_scl */
+ AM33XX_PADCONF(AM335X_PIN_MII1_CRS, PIN_INPUT_PULLUP, MUX_MODE3) /* mii1_crs,i2c1_sda */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLUP, MUX_MODE3) /* mii1_rxerr,i2c1_scl */
>;
};
};
@@ -130,7 +130,7 @@
&am33xx_pinmux {
accel_pins: pinmux_accel {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x898, PIN_INPUT | MUX_MODE7) /* gpmc_wen.gpio2_4 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WEN, PIN_INPUT, MUX_MODE7) /* gpmc_wen.gpio2_4 */
>;
};
};
@@ -177,12 +177,12 @@
&am33xx_pinmux {
audio_pins: pinmux_audio {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x9ac, PIN_INPUT_PULLDOWN | MUX_MODE0) /* mcasp0_ahcklx.mcasp0_ahclkx */
- AM33XX_IOPAD(0x994, PIN_INPUT_PULLDOWN | MUX_MODE0) /* mcasp0_fsx.mcasp0_fsx */
- AM33XX_IOPAD(0x990, PIN_INPUT_PULLDOWN | MUX_MODE0) /* mcasp0_aclkx.mcasp0_aclkx */
- AM33XX_IOPAD(0x998, PIN_INPUT_PULLDOWN | MUX_MODE0) /* mcasp0_axr0.mcasp0_axr0 */
- AM33XX_IOPAD(0x9a8, PIN_INPUT_PULLDOWN | MUX_MODE0) /* mcasp0_axr1.mcasp0_axr1 */
- AM33XX_IOPAD(0x840, PIN_OUTPUT | MUX_MODE7) /* gpmc_a0.gpio1_16 */
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKX, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_FSX, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKX, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AXR0, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AXR1, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A0, PIN_OUTPUT, MUX_MODE7) /* gpmc_a0.gpio1_16 */
>;
};
};
@@ -228,36 +228,36 @@
&am33xx_pinmux {
lcd_pins: pinmux_lcd {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x8a0, PIN_OUTPUT | MUX_MODE0) /* lcd_data0.lcd_data0 */
- AM33XX_IOPAD(0x8a4, PIN_OUTPUT | MUX_MODE0) /* lcd_data1.lcd_data1 */
- AM33XX_IOPAD(0x8a8, PIN_OUTPUT | MUX_MODE0) /* lcd_data2.lcd_data2 */
- AM33XX_IOPAD(0x8ac, PIN_OUTPUT | MUX_MODE0) /* lcd_data3.lcd_data3 */
- AM33XX_IOPAD(0x8b0, PIN_OUTPUT | MUX_MODE0) /* lcd_data4.lcd_data4 */
- AM33XX_IOPAD(0x8b4, PIN_OUTPUT | MUX_MODE0) /* lcd_data5.lcd_data5 */
- AM33XX_IOPAD(0x8b8, PIN_OUTPUT | MUX_MODE0) /* lcd_data6.lcd_data6 */
- AM33XX_IOPAD(0x8bc, PIN_OUTPUT | MUX_MODE0) /* lcd_data7.lcd_data7 */
- AM33XX_IOPAD(0x8c0, PIN_OUTPUT | MUX_MODE0) /* lcd_data8.lcd_data8 */
- AM33XX_IOPAD(0x8c4, PIN_OUTPUT | MUX_MODE0) /* lcd_data9.lcd_data9 */
- AM33XX_IOPAD(0x8c8, PIN_OUTPUT | MUX_MODE0) /* lcd_data10.lcd_data10 */
- AM33XX_IOPAD(0x8cc, PIN_OUTPUT | MUX_MODE0) /* lcd_data11.lcd_data11 */
- AM33XX_IOPAD(0x8d0, PIN_OUTPUT | MUX_MODE0) /* lcd_data12.lcd_data12 */
- AM33XX_IOPAD(0x8d4, PIN_OUTPUT | MUX_MODE0) /* lcd_data13.lcd_data13 */
- AM33XX_IOPAD(0x8d8, PIN_OUTPUT | MUX_MODE0) /* lcd_data14.lcd_data14 */
- AM33XX_IOPAD(0x8dc, PIN_OUTPUT | MUX_MODE0) /* lcd_data15.lcd_data15 */
- AM33XX_IOPAD(0x820, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad8.lcd_data16 */
- AM33XX_IOPAD(0x824, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad9.lcd_data17 */
- AM33XX_IOPAD(0x828, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad10.lcd_data18 */
- AM33XX_IOPAD(0x82c, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad11.lcd_data19 */
- AM33XX_IOPAD(0x830, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad12.lcd_data20 */
- AM33XX_IOPAD(0x834, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad13.lcd_data21 */
- AM33XX_IOPAD(0x838, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad14.lcd_data22 */
- AM33XX_IOPAD(0x83c, PIN_OUTPUT | MUX_MODE1) /* gpmc_ad15.lcd_data23 */
- AM33XX_IOPAD(0x8e0, PIN_OUTPUT | MUX_MODE0) /* lcd_vsync.lcd_vsync */
- AM33XX_IOPAD(0x8e4, PIN_OUTPUT | MUX_MODE0) /* lcd_hsync.lcd_hsync */
- AM33XX_IOPAD(0x8e8, PIN_OUTPUT | MUX_MODE0) /* lcd_pclk.lcd_pclk */
- AM33XX_IOPAD(0x8ec, PIN_OUTPUT | MUX_MODE0) /* lcd_ac_bias_en.lcd_ac_bias_en */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA0, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA1, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA2, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA3, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA4, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA5, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA6, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA7, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA8, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA9, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA10, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA11, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA12, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA13, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA14, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA15, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD8, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad8.lcd_data16 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD9, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad9.lcd_data17 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD10, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad10.lcd_data18 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD11, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad11.lcd_data19 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD12, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad12.lcd_data20 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD13, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad13.lcd_data21 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD14, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad14.lcd_data22 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD15, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad15.lcd_data23 */
+ AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_OUTPUT, MUX_MODE0)
/* Display Enable */
- AM33XX_IOPAD(0x86c, PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_a11.gpio1_27 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A11, PIN_OUTPUT_PULLUP, MUX_MODE7) /* gpmc_a11.gpio1_27 */
>;
};
};
@@ -299,29 +299,29 @@
&am33xx_pinmux {
ethernet_pins: pinmux_ethernet {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x914, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txen.rgmii1_tctl */
- AM33XX_IOPAD(0x918, PIN_INPUT_PULLUP | MUX_MODE2) /* mii1_rxdv.rgmii1_rctl */
- AM33XX_IOPAD(0x91c, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd3.rgmii1_td3 */
- AM33XX_IOPAD(0x920, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd2.rgmii1_td2 */
- AM33XX_IOPAD(0x924, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd1.rgmii1_td1 */
- AM33XX_IOPAD(0x928, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd0.rgmii1_td0 */
- AM33XX_IOPAD(0x92c, PIN_INPUT_PULLUP | MUX_MODE2) /* mii1_txclk.rgmii1_tclk */
- AM33XX_IOPAD(0x930, PIN_INPUT_PULLUP | MUX_MODE2) /* mii1_rxclk.rgmii1_rclk */
- AM33XX_IOPAD(0x934, PIN_INPUT_PULLUP | MUX_MODE2) /* mii1_rxd3.rgmii1_rxd3 */
- AM33XX_IOPAD(0x938, PIN_INPUT_PULLUP | MUX_MODE2) /* mii1_rxd2.rgmii1_rxd2 */
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLUP | MUX_MODE2) /* mii1_rxd1.rgmii1_rxd1 */
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLUP | MUX_MODE2) /* mii1_rxd0.rgmii1_rxd0 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txen.rgmii1_tctl */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLUP, MUX_MODE2) /* mii1_rxdv.rgmii1_rctl */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txd3.rgmii1_td3 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txd2.rgmii1_td2 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txd1.rgmii1_td1 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txd0.rgmii1_td0 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_INPUT_PULLUP, MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT_PULLUP, MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLUP, MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT_PULLUP, MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLUP, MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLUP, MUX_MODE2)
/* ethernet interrupt */
- AM33XX_IOPAD(0x944, PIN_INPUT_PULLUP | MUX_MODE7) /* rmii2_refclk.gpio0_29 */
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_INPUT_PULLUP, MUX_MODE7) /* rmii2_refclk.gpio0_29 */
/* ethernet PHY nReset */
- AM33XX_IOPAD(0x908, PIN_OUTPUT_PULLUP | MUX_MODE7) /* mii1_col.gpio3_0 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_COL, PIN_OUTPUT_PULLUP, MUX_MODE7) /* mii1_col.gpio3_0 */
>;
};
mdio_pins: pinmux_mdio {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
- AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLUP | SLEWCTRL_FAST, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLUP, MUX_MODE0)
>;
};
};
@@ -364,45 +364,45 @@
&am33xx_pinmux {
sd_pins: pinmux_sd_card {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat0.mmc0_dat0 */
- AM33XX_IOPAD(0x8f4, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat1.mmc0_dat1 */
- AM33XX_IOPAD(0x8f8, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat2.mmc0_dat2 */
- AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat3.mmc0_dat3 */
- AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_clk.mmc0_clk */
- AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_cmd.mmc0_cmd */
- AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE7) /* spi0_cs1.gpio0_6 */
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT2, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS1, PIN_INPUT, MUX_MODE7) /* spi0_cs1.gpio0_6 */
>;
};
emmc_pins: pinmux_emmc {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x880, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn1.mmc1_clk */
- AM33XX_IOPAD(0x884, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn2.mmc1_cmd */
- AM33XX_IOPAD(0x800, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad0.mmc1_dat0 */
- AM33XX_IOPAD(0x804, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad1.mmc1_dat1 */
- AM33XX_IOPAD(0x808, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad2.mmc1_dat2 */
- AM33XX_IOPAD(0x80c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad3.mmc1_dat3 */
- AM33XX_IOPAD(0x810, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad4.mmc1_dat4 */
- AM33XX_IOPAD(0x814, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad5.mmc1_dat5 */
- AM33XX_IOPAD(0x818, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad6.mmc1_dat6 */
- AM33XX_IOPAD(0x81c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad7.mmc1_dat7 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN1, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_csn1.mmc1_clk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN2, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_csn2.mmc1_cmd */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD0, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad0.mmc1_dat0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD1, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad1.mmc1_dat1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD2, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad2.mmc1_dat2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD3, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad3.mmc1_dat3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD4, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad4.mmc1_dat4 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD5, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad5.mmc1_dat5 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD6, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad6.mmc1_dat6 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD7, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad7.mmc1_dat7 */
/* EMMC nReset */
- AM33XX_IOPAD(0x874, PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_wpn.gpio0_31 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WPN, PIN_OUTPUT_PULLUP, MUX_MODE7) /* gpmc_wpn.gpio0_31 */
>;
};
wireless_pins: pinmux_wireless {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x844, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_a1.mmc2_dat0 */
- AM33XX_IOPAD(0x848, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_a2.mmc2_dat1 */
- AM33XX_IOPAD(0x84c, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_a3.mmc2_dat2 */
- AM33XX_IOPAD(0x878, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ben1.mmc2_dat3 */
- AM33XX_IOPAD(0x888, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_csn3.mmc2_cmd */
- AM33XX_IOPAD(0x88c, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_clk.mmc1_clk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A1, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_a1.mmc2_dat0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A2, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_a2.mmc2_dat1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A3, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_a3.mmc2_dat2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_BEN1, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_ben1.mmc2_dat3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN3, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_csn3.mmc2_cmd */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CLK, PIN_INPUT_PULLUP, MUX_MODE3) /* gpmc_clk.mmc1_clk */
/* WLAN nReset */
- AM33XX_IOPAD(0x860, PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_a8.gpio1_24 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A8, PIN_OUTPUT_PULLUP, MUX_MODE7) /* gpmc_a8.gpio1_24 */
/* WLAN nPower down */
- AM33XX_IOPAD(0x870, PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_wait0.gpio0_30 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_OUTPUT_PULLUP, MUX_MODE7) /* gpmc_wait0.gpio0_30 */
/* 32kHz Clock */
- AM33XX_IOPAD(0x9b4, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* xdma_event_intr1.clkout2 */
+ AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR1, PIN_OUTPUT_PULLDOWN, MUX_MODE3) /* xdma_event_intr1.clkout2 */
>;
};
};
@@ -498,10 +498,10 @@
&am33xx_pinmux {
spi0_pins: pinmux_spi0 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x950, PIN_INPUT_PULLUP | MUX_MODE0) /* spi0_sclk.spi0_sclk */
- AM33XX_IOPAD(0x95C, PIN_INPUT_PULLUP | MUX_MODE0) /* spi0_cs0.spi0_cs0 */
- AM33XX_IOPAD(0x954, PIN_INPUT_PULLUP | MUX_MODE0) /* spi0_d0.spi0_d0 */
- AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE0) /* spi0_d1.spi0_d1 */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_SCLK, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D1, PIN_INPUT_PULLUP, MUX_MODE0)
>;
};
};
@@ -539,16 +539,16 @@
&am33xx_pinmux {
uart0_pins: pinmux_uart0 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
- AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
uart1_pins: pinmux_uart1 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x978, PIN_INPUT_PULLUP | MUX_MODE0) /* uart1_ctsn.uart1_ctsn */
- AM33XX_IOPAD(0x97C, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart1_rtsn.uart1_rtsn */
- AM33XX_IOPAD(0x980, PIN_INPUT_PULLUP | MUX_MODE0) /* uart1_rxd.uart1_rxd */
- AM33XX_IOPAD(0x984, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart1_txd.uart1_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
};
@@ -591,9 +591,9 @@
usb_pins: pinmux_usb {
pinctrl-single,pins = <
/* USB0 Over-Current (active low) */
- AM33XX_IOPAD(0x864, PIN_INPUT | MUX_MODE7) /* gpmc_a9.gpio1_25 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A9, PIN_INPUT, MUX_MODE7) /* gpmc_a9.gpio1_25 */
/* USB1 Over-Current (active low) */
- AM33XX_IOPAD(0x868, PIN_INPUT | MUX_MODE7) /* gpmc_a10.gpio1_26 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A10, PIN_INPUT, MUX_MODE7) /* gpmc_a10.gpio1_26 */
>;
};
};
@@ -649,16 +649,16 @@
&am33xx_pinmux {
user_leds_pins: pinmux_user_leds {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x850, PIN_OUTPUT | MUX_MODE7) /* gpmc_a4.gpio1_20 */
- AM33XX_IOPAD(0x854, PIN_OUTPUT | MUX_MODE7) /* gpmc_a5.gpio1_21 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A4, PIN_OUTPUT, MUX_MODE7) /* gpmc_a4.gpio1_20 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_OUTPUT, MUX_MODE7) /* gpmc_a5.gpio1_21 */
>;
};
user_buttons_pins: pinmux_user_buttons {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x858, PIN_INPUT_PULLUP | MUX_MODE7) /* gpmc_a6.gpio1_22 */
- AM33XX_IOPAD(0x85C, PIN_INPUT_PULLUP | MUX_MODE7) /* gpmc_a7.gpio1_21 */
- AM33XX_IOPAD(0x964, PIN_INPUT_PULLUP | MUX_MODE7) /* gpmc_a8.gpio0_7 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A6, PIN_INPUT_PULLUP, MUX_MODE7) /* gpmc_a6.gpio1_22 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A7, PIN_INPUT_PULLUP, MUX_MODE7) /* gpmc_a7.gpio1_21 */
+ AM33XX_PADCONF(AM335X_PIN_ECAP0_IN_PWM0_OUT, PIN_INPUT_PULLUP, MUX_MODE7) /* gpmc_a8.gpio0_7 */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-phycore-som.dtsi b/arch/arm/boot/dts/am335x-phycore-som.dtsi
index 015adb626b03..23c3039c567e 100644
--- a/arch/arm/boot/dts/am335x-phycore-som.dtsi
+++ b/arch/arm/boot/dts/am335x-phycore-som.dtsi
@@ -57,22 +57,22 @@
&am33xx_pinmux {
ethernet0_pins: pinmux_ethernet0 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE1) /* mii1_crs.rmii1_crs_dv */
- AM33XX_IOPAD(0x910, PIN_INPUT_PULLDOWN | MUX_MODE1) /* mii1_rxerr.rmii1_rxerr */
- AM33XX_IOPAD(0x914, PIN_OUTPUT | MUX_MODE1) /* mii1_txen.rmii1_txen */
- AM33XX_IOPAD(0x924, PIN_OUTPUT | MUX_MODE1) /* mii1_txd1.rmii1_txd1 */
- AM33XX_IOPAD(0x928, PIN_OUTPUT | MUX_MODE1) /* mii1_txd0.rmii1_txd0 */
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE1) /* mii1_rxd1.rmii1_rxd1 */
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE1) /* mii1_rxd0.rmii1_rxd0 */
- AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii1_refclk.rmii1_refclk */
+ AM33XX_PADCONF(AM335X_PIN_MII1_CRS, PIN_INPUT_PULLDOWN, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLDOWN, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_OUTPUT, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_OUTPUT, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_OUTPUT, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_INPUT_PULLDOWN, MUX_MODE0)
>;
};
mdio_pins: pinmux_mdio {
pinctrl-single,pins = <
/* MDIO */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
- AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLUP | SLEWCTRL_FAST, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLUP, MUX_MODE0)
>;
};
};
@@ -104,8 +104,8 @@
&am33xx_pinmux {
i2c0_pins: pinmux_i2c0 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x988, PIN_INPUT | MUX_MODE0) /* i2c0_sda.i2c0_sda */
- AM33XX_IOPAD(0x98c, PIN_INPUT | MUX_MODE0) /* i2c0_scl.i2c0_scl */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_INPUT, MUX_MODE0)
>;
};
};
@@ -144,20 +144,20 @@
&am33xx_pinmux {
nandflash_pins: pinmux_nandflash {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x800, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad0.gpmc_ad0 */
- AM33XX_IOPAD(0x804, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad1.gpmc_ad1 */
- AM33XX_IOPAD(0x808, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad2.gpmc_ad2 */
- AM33XX_IOPAD(0x80c, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad3.gpmc_ad3 */
- AM33XX_IOPAD(0x810, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad4.gpmc_ad4 */
- AM33XX_IOPAD(0x814, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad5.gpmc_ad5 */
- AM33XX_IOPAD(0x818, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad6.gpmc_ad6 */
- AM33XX_IOPAD(0x81c, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_ad7.gpmc_ad7 */
- AM33XX_IOPAD(0x870, PIN_INPUT_PULLUP | MUX_MODE0) /* gpmc_wait0.gpmc_wait0 */
- AM33XX_IOPAD(0x87c, PIN_OUTPUT | MUX_MODE0) /* gpmc_csn0.gpmc_csn0 */
- AM33XX_IOPAD(0x890, PIN_OUTPUT | MUX_MODE0) /* gpmc_advn_ale.gpmc_advn_ale */
- AM33XX_IOPAD(0x894, PIN_OUTPUT | MUX_MODE0) /* gpmc_oen_ren.gpmc_oen_ren */
- AM33XX_IOPAD(0x898, PIN_OUTPUT | MUX_MODE0) /* gpmc_wen.gpmc_wen */
- AM33XX_IOPAD(0x89c, PIN_OUTPUT | MUX_MODE0) /* gpmc_be0n_cle.gpmc_be0n_cle */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD2, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD3, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD4, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD5, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD6, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD7, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN0, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_ADVN_ALE, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_OEN_REN, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WEN, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_BEN0_CLE, PIN_OUTPUT, MUX_MODE0)
>;
};
};
@@ -296,10 +296,10 @@
&am33xx_pinmux {
spi0_pins: pinmux_spi0 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x950, PIN_INPUT_PULLDOWN | MUX_MODE0) /* spi0_clk.spi0_clk */
- AM33XX_IOPAD(0x954, PIN_INPUT_PULLDOWN | MUX_MODE0) /* spi0_d0.spi0_d0 */
- AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE0) /* spi0_d1.spi0_d1 */
- AM33XX_IOPAD(0x95c, PIN_INPUT_PULLUP | MUX_MODE0) /* spi0_cs0.spi0_cs0 */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_SCLK, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D0, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS0, PIN_INPUT_PULLUP, MUX_MODE0)
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-pocketbeagle.dts b/arch/arm/boot/dts/am335x-pocketbeagle.dts
index 62fe5cab9fae..ff4f919d22f6 100644
--- a/arch/arm/boot/dts/am335x-pocketbeagle.dts
+++ b/arch/arm/boot/dts/am335x-pocketbeagle.dts
@@ -62,74 +62,74 @@
&am33xx_pinmux {
i2c2_pins: pinmux-i2c2-pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x97c, PIN_INPUT_PULLUP | MUX_MODE3) /* (D17) uart1_rtsn.I2C2_SCL */
- AM33XX_IOPAD(0x978, PIN_INPUT_PULLUP | MUX_MODE3) /* (D18) uart1_ctsn.I2C2_SDA */
+ AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_INPUT_PULLUP, MUX_MODE3) /* (D17) uart1_rtsn.I2C2_SCL */
+ AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_INPUT_PULLUP, MUX_MODE3) /* (D18) uart1_ctsn.I2C2_SDA */
>;
};
ehrpwm0_pins: pinmux-ehrpwm0-pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x990, PIN_OUTPUT_PULLDOWN | MUX_MODE1) /* (A13) mcasp0_aclkx.ehrpwm0A */
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKX, PIN_OUTPUT_PULLDOWN, MUX_MODE1) /* (A13) mcasp0_aclkx.ehrpwm0A */
>;
};
ehrpwm1_pins: pinmux-ehrpwm1-pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x848, PIN_OUTPUT_PULLDOWN | MUX_MODE6) /* (U14) gpmc_a2.ehrpwm1A */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A2, PIN_OUTPUT_PULLDOWN, MUX_MODE6) /* (U14) gpmc_a2.ehrpwm1A */
>;
};
mmc0_pins: pinmux-mmc0-pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE7) /* (C15) spi0_cs1.gpio0[6] */
- AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* (G16) mmc0_dat0.mmc0_dat0 */
- AM33XX_IOPAD(0x8f8, PIN_INPUT_PULLUP | MUX_MODE0) /* (G15) mmc0_dat1.mmc0_dat1 */
- AM33XX_IOPAD(0x8f4, PIN_INPUT_PULLUP | MUX_MODE0) /* (F18) mmc0_dat2.mmc0_dat2 */
- AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) /* (F17) mmc0_dat3.mmc0_dat3 */
- AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* (G18) mmc0_cmd.mmc0_cmd */
- AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* (G17) mmc0_clk.mmc0_clk */
- AM33XX_IOPAD(0x9a0, PIN_INPUT | MUX_MODE4) /* (B12) mcasp0_aclkr.mmc0_sdwp */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS1, PIN_INPUT, MUX_MODE7) /* (C15) spi0_cs1.gpio0[6] */
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT2, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKR, PIN_INPUT, MUX_MODE4) /* (B12) mcasp0_aclkr.mmc0_sdwp */
>;
};
spi0_pins: pinmux-spi0-pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x950, PIN_INPUT_PULLUP | MUX_MODE0) /* (A17) spi0_sclk.spi0_sclk */
- AM33XX_IOPAD(0x954, PIN_INPUT_PULLUP | MUX_MODE0) /* (B17) spi0_d0.spi0_d0 */
- AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE0) /* (B16) spi0_d1.spi0_d1 */
- AM33XX_IOPAD(0x95c, PIN_INPUT_PULLUP | MUX_MODE0) /* (A16) spi0_cs0.spi0_cs0 */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_SCLK, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS0, PIN_INPUT_PULLUP, MUX_MODE0)
>;
};
spi1_pins: pinmux-spi1-pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x964, PIN_INPUT_PULLUP | MUX_MODE4) /* (C18) eCAP0_in_PWM0_out.spi1_sclk */
- AM33XX_IOPAD(0x968, PIN_INPUT_PULLUP | MUX_MODE4) /* (E18) uart0_ctsn.spi1_d0 */
- AM33XX_IOPAD(0x96c, PIN_INPUT_PULLUP | MUX_MODE4) /* (E17) uart0_rtsn.spi1_d1 */
- AM33XX_IOPAD(0x9b0, PIN_INPUT_PULLUP | MUX_MODE4) /* (A15) xdma_event_intr0.spi1_cs1 */
+ AM33XX_PADCONF(AM335X_PIN_ECAP0_IN_PWM0_OUT, PIN_INPUT_PULLUP, MUX_MODE4) /* (C18) eCAP0_in_PWM0_out.spi1_sclk */
+ AM33XX_PADCONF(AM335X_PIN_UART0_CTSN, PIN_INPUT_PULLUP, MUX_MODE4) /* (E18) uart0_ctsn.spi1_d0 */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RTSN, PIN_INPUT_PULLUP, MUX_MODE4) /* (E17) uart0_rtsn.spi1_d1 */
+ AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR0, PIN_INPUT_PULLUP, MUX_MODE4) /* (A15) xdma_event_intr0.spi1_cs1 */
>;
};
usr_leds_pins: pinmux-usr-leds-pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x854, PIN_OUTPUT | MUX_MODE7) /* (V15) gpmc_a5.gpio1[21] - USR_LED_0 */
- AM33XX_IOPAD(0x858, PIN_OUTPUT | MUX_MODE7) /* (U15) gpmc_a6.gpio1[22] - USR_LED_1 */
- AM33XX_IOPAD(0x85c, PIN_OUTPUT | MUX_MODE7) /* (T15) gpmc_a7.gpio1[23] - USR_LED_2 */
- AM33XX_IOPAD(0x860, PIN_OUTPUT | MUX_MODE7) /* (V16) gpmc_a8.gpio1[24] - USR_LED_3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_OUTPUT, MUX_MODE7) /* (V15) gpmc_a5.gpio1[21] - USR_LED_0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A6, PIN_OUTPUT, MUX_MODE7) /* (U15) gpmc_a6.gpio1[22] - USR_LED_1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A7, PIN_OUTPUT, MUX_MODE7) /* (T15) gpmc_a7.gpio1[23] - USR_LED_2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A8, PIN_OUTPUT, MUX_MODE7) /* (V16) gpmc_a8.gpio1[24] - USR_LED_3 */
>;
};
uart0_pins: pinmux-uart0-pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0) /* (E15) uart0_rxd.uart0_rxd */
- AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* (E16) uart0_txd.uart0_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
uart4_pins: pinmux-uart4-pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x870, PIN_INPUT_PULLUP | MUX_MODE6) /* (T17) gpmc_wait0.uart4_rxd */
- AM33XX_IOPAD(0x874, PIN_OUTPUT_PULLDOWN | MUX_MODE6) /* (U17) gpmc_wpn.uart4_txd */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT_PULLUP, MUX_MODE6) /* (T17) gpmc_wait0.uart4_rxd */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WPN, PIN_OUTPUT_PULLDOWN, MUX_MODE6) /* (U17) gpmc_wpn.uart4_txd */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-sancloud-bbe.dts b/arch/arm/boot/dts/am335x-sancloud-bbe.dts
index 35527fdf56cc..7ed27b5c4756 100644
--- a/arch/arm/boot/dts/am335x-sancloud-bbe.dts
+++ b/arch/arm/boot/dts/am335x-sancloud-bbe.dts
@@ -23,70 +23,70 @@
cpsw_default: cpsw_default {
pinctrl-single,pins = <
/* Slave 1 */
- AM33XX_IOPAD(0x914, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txen.rgmii1_tctl */
- AM33XX_IOPAD(0x918, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxdv.rgmii1_rctl */
- AM33XX_IOPAD(0x91c, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd3.rgmii1_td3 */
- AM33XX_IOPAD(0x920, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd2.rgmii1_td2 */
- AM33XX_IOPAD(0x924, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd1.rgmii1_td1 */
- AM33XX_IOPAD(0x928, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txd0.rgmii1_td0 */
- AM33XX_IOPAD(0x92c, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mii1_txclk.rgmii1_tclk */
- AM33XX_IOPAD(0x930, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxclk.rgmii1_rclk */
- AM33XX_IOPAD(0x934, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd3.rgmii1_rd3 */
- AM33XX_IOPAD(0x938, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd2.rgmii1_rd2 */
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd1.rgmii1_rd1 */
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE2) /* mii1_rxd0.rgmii1_rd0 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txen.rgmii1_tctl */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxdv.rgmii1_rctl */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txd3.rgmii1_td3 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txd2.rgmii1_td2 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txd1.rgmii1_td1 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txd0.rgmii1_td0 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_OUTPUT_PULLDOWN, MUX_MODE2) /* mii1_txclk.rgmii1_tclk */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxclk.rgmii1_rclk */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxd3.rgmii1_rd3 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxd2.rgmii1_rd2 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxd1.rgmii1_rd1 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE2) /* mii1_rxd0.rgmii1_rd0 */
>;
};
cpsw_sleep: cpsw_sleep {
pinctrl-single,pins = <
/* Slave 1 reset value */
- AM33XX_IOPAD(0x914, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x918, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x91c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x920, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x924, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x928, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x92c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x930, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x934, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x938, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
davinci_mdio_default: davinci_mdio_default {
pinctrl-single,pins = <
/* MDIO */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
- AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLUP | SLEWCTRL_FAST, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLUP, MUX_MODE0)
>;
};
davinci_mdio_sleep: davinci_mdio_sleep {
pinctrl-single,pins = <
/* MDIO reset value */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x94c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
usb_hub_ctrl: usb_hub_ctrl {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x944, PIN_OUTPUT_PULLUP | MUX_MODE7) /* rmii1_refclk.gpio0_29 */
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_OUTPUT_PULLUP, MUX_MODE7) /* rmii1_refclk.gpio0_29 */
>;
};
mpu6050_pins: pinmux_mpu6050_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x968, PIN_INPUT | MUX_MODE7) /* uart0_ctsn.gpio1_8 */
+ AM33XX_PADCONF(AM335X_PIN_UART0_CTSN, PIN_INPUT, MUX_MODE7) /* uart0_ctsn.gpio1_8 */
>;
};
lps3331ap_pins: pinmux_lps3331ap_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x868, PIN_INPUT | MUX_MODE7) /* gpmc_a10.gpio1_26 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A10, PIN_INPUT, MUX_MODE7) /* gpmc_a10.gpio1_26 */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-sbc-t335.dts b/arch/arm/boot/dts/am335x-sbc-t335.dts
index 917d7ccc9109..07c46a59f1d2 100644
--- a/arch/arm/boot/dts/am335x-sbc-t335.dts
+++ b/arch/arm/boot/dts/am335x-sbc-t335.dts
@@ -70,122 +70,82 @@
lcd_pins_default: lcd_pins_default {
pinctrl-single,pins = <
/* gpmc_ad8.lcd_data23 */
- AM33XX_IOPAD(0x820, PIN_OUTPUT | MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD8, PIN_OUTPUT, MUX_MODE1)
/* gpmc_ad9.lcd_data22 */
- AM33XX_IOPAD(0x824, PIN_OUTPUT | MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD9, PIN_OUTPUT, MUX_MODE1)
/* gpmc_ad10.lcd_data21 */
- AM33XX_IOPAD(0x828, PIN_OUTPUT | MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD10, PIN_OUTPUT, MUX_MODE1)
/* gpmc_ad11.lcd_data20 */
- AM33XX_IOPAD(0x82c, PIN_OUTPUT | MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD11, PIN_OUTPUT, MUX_MODE1)
/* gpmc_ad12.lcd_data19 */
- AM33XX_IOPAD(0x830, PIN_OUTPUT | MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD12, PIN_OUTPUT, MUX_MODE1)
/* gpmc_ad13.lcd_data18 */
- AM33XX_IOPAD(0x834, PIN_OUTPUT | MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD13, PIN_OUTPUT, MUX_MODE1)
/* gpmc_ad14.lcd_data17 */
- AM33XX_IOPAD(0x838, PIN_OUTPUT | MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD14, PIN_OUTPUT, MUX_MODE1)
/* gpmc_ad15.lcd_data16 */
- AM33XX_IOPAD(0x83c, PIN_OUTPUT | MUX_MODE1)
- /* lcd_data0.lcd_data0 */
- AM33XX_IOPAD(0x8a0, PIN_OUTPUT | MUX_MODE0)
- /* lcd_data1.lcd_data1 */
- AM33XX_IOPAD(0x8a4, PIN_OUTPUT | MUX_MODE0)
- /* lcd_data2.lcd_data2 */
- AM33XX_IOPAD(0x8a8, PIN_OUTPUT | MUX_MODE0)
- /* lcd_data3.lcd_data3 */
- AM33XX_IOPAD(0x8ac, PIN_OUTPUT | MUX_MODE0)
- /* lcd_data4.lcd_data4 */
- AM33XX_IOPAD(0x8b0, PIN_OUTPUT | MUX_MODE0)
- /* lcd_data5.lcd_data5 */
- AM33XX_IOPAD(0x8b4, PIN_OUTPUT | MUX_MODE0)
- /* lcd_data6.lcd_data6 */
- AM33XX_IOPAD(0x8b8, PIN_OUTPUT | MUX_MODE0)
- /* lcd_data7.lcd_data7 */
- AM33XX_IOPAD(0x8bc, PIN_OUTPUT | MUX_MODE0)
- /* lcd_data8.lcd_data8 */
- AM33XX_IOPAD(0x8c0, PIN_OUTPUT | MUX_MODE0)
- /* lcd_data9.lcd_data9 */
- AM33XX_IOPAD(0x8c4, PIN_OUTPUT | MUX_MODE0)
- /* lcd_data10.lcd_data10 */
- AM33XX_IOPAD(0x8c8, PIN_OUTPUT | MUX_MODE0)
- /* lcd_data11.lcd_data11 */
- AM33XX_IOPAD(0x8cc, PIN_OUTPUT | MUX_MODE0)
- /* lcd_data12.lcd_data12 */
- AM33XX_IOPAD(0x8d0, PIN_OUTPUT | MUX_MODE0)
- /* lcd_data13.lcd_data13 */
- AM33XX_IOPAD(0x8d4, PIN_OUTPUT | MUX_MODE0)
- /* lcd_data14.lcd_data14 */
- AM33XX_IOPAD(0x8d8, PIN_OUTPUT | MUX_MODE0)
- /* lcd_data15.lcd_data15 */
- AM33XX_IOPAD(0x8dc, PIN_OUTPUT | MUX_MODE0)
- /* lcd_vsync.lcd_vsync */
- AM33XX_IOPAD(0x8e0, PIN_OUTPUT | MUX_MODE0)
- /* lcd_hsync.lcd_hsync */
- AM33XX_IOPAD(0x8e4, PIN_OUTPUT | MUX_MODE0)
- /* lcd_pclk.lcd_pclk */
- AM33XX_IOPAD(0x8e8, PIN_OUTPUT | MUX_MODE0)
- /* lcd_ac_bias_en.lcd_ac_bias_en */
- AM33XX_IOPAD(0x8ec, PIN_OUTPUT | MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD15, PIN_OUTPUT, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA0, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA1, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA2, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA3, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA4, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA5, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA6, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA7, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA8, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA9, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA10, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA11, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA12, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA13, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA14, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA15, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_OUTPUT, MUX_MODE0)
>;
};
lcd_pins_sleep: lcd_pins_sleep {
pinctrl-single,pins = <
/* gpmc_ad8.lcd_data23 */
- AM33XX_IOPAD(0x820, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD8, PIN_INPUT_PULLDOWN, MUX_MODE7)
/* gpmc_ad9.lcd_data22 */
- AM33XX_IOPAD(0x824, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD9, PIN_INPUT_PULLDOWN, MUX_MODE7)
/* gpmc_ad10.lcd_data21 */
- AM33XX_IOPAD(0x828, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD10, PIN_INPUT_PULLDOWN, MUX_MODE7)
/* gpmc_ad11.lcd_data20 */
- AM33XX_IOPAD(0x82c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD11, PIN_INPUT_PULLDOWN, MUX_MODE7)
/* gpmc_ad12.lcd_data19 */
- AM33XX_IOPAD(0x830, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD12, PIN_INPUT_PULLDOWN, MUX_MODE7)
/* gpmc_ad13.lcd_data18 */
- AM33XX_IOPAD(0x834, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD13, PIN_INPUT_PULLDOWN, MUX_MODE7)
/* gpmc_ad14.lcd_data17 */
- AM33XX_IOPAD(0x838, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD14, PIN_INPUT_PULLDOWN, MUX_MODE7)
/* gpmc_ad15.lcd_data16 */
- AM33XX_IOPAD(0x83c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- /* lcd_data0.lcd_data0 */
- AM33XX_IOPAD(0x8a0, PULL_DISABLE | MUX_MODE7)
- /* lcd_data1.lcd_data1 */
- AM33XX_IOPAD(0x8a4, PULL_DISABLE | MUX_MODE7)
- /* lcd_data2.lcd_data2 */
- AM33XX_IOPAD(0x8a8, PULL_DISABLE | MUX_MODE7)
- /* lcd_data3.lcd_data3 */
- AM33XX_IOPAD(0x8ac, PULL_DISABLE | MUX_MODE7)
- /* lcd_data4.lcd_data4 */
- AM33XX_IOPAD(0x8b0, PULL_DISABLE | MUX_MODE7)
- /* lcd_data5.lcd_data5 */
- AM33XX_IOPAD(0x8b4, PULL_DISABLE | MUX_MODE7)
- /* lcd_data6.lcd_data6 */
- AM33XX_IOPAD(0x8b8, PULL_DISABLE | MUX_MODE7)
- /* lcd_data7.lcd_data7 */
- AM33XX_IOPAD(0x8bc, PULL_DISABLE | MUX_MODE7)
- /* lcd_data8.lcd_data8 */
- AM33XX_IOPAD(0x8c0, PULL_DISABLE | MUX_MODE7)
- /* lcd_data9.lcd_data9 */
- AM33XX_IOPAD(0x8c4, PULL_DISABLE | MUX_MODE7)
- /* lcd_data10.lcd_data10 */
- AM33XX_IOPAD(0x8c8, PULL_DISABLE | MUX_MODE7)
- /* lcd_data11.lcd_data11 */
- AM33XX_IOPAD(0x8cc, PULL_DISABLE | MUX_MODE7)
- /* lcd_data12.lcd_data12 */
- AM33XX_IOPAD(0x8d0, PULL_DISABLE | MUX_MODE7)
- /* lcd_data13.lcd_data13 */
- AM33XX_IOPAD(0x8d4, PULL_DISABLE | MUX_MODE7)
- /* lcd_data14.lcd_data14 */
- AM33XX_IOPAD(0x8d8, PULL_DISABLE | MUX_MODE7)
- /* lcd_data15.lcd_data15 */
- AM33XX_IOPAD(0x8dc, PULL_DISABLE | MUX_MODE7)
- /* lcd_vsync.lcd_vsync */
- AM33XX_IOPAD(0x8e0, PIN_INPUT_PULLDOWN | MUX_MODE7)
- /* lcd_hsync.lcd_hsync */
- AM33XX_IOPAD(0x8e4, PIN_INPUT_PULLDOWN | MUX_MODE7)
- /* lcd_pclk.lcd_pclk */
- AM33XX_IOPAD(0x8e8, PIN_INPUT_PULLDOWN | MUX_MODE7)
- /* lcd_ac_bias_en.lcd_ac_bias_en */
- AM33XX_IOPAD(0x8ec, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD15, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA0, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA1, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA2, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA3, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA4, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA5, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA6, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA7, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA8, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA9, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA10, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA11, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA12, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA13, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA14, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA15, PULL_DISABLE, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-shc.dts b/arch/arm/boot/dts/am335x-shc.dts
index bfbe27a80006..5b0368504015 100644
--- a/arch/arm/boot/dts/am335x-shc.dts
+++ b/arch/arm/boot/dts/am335x-shc.dts
@@ -382,193 +382,191 @@
clkout2_pin: pinmux_clkout2_pin {
pinctrl-single,pins = <
/* xdma_event_intr1.clkout2 */
- AM33XX_IOPAD(0x9b4, PIN_INPUT | MUX_MODE6)
+ AM33XX_PADCONF(AM335X_PIN_XDMA_EVENT_INTR1, PIN_INPUT, MUX_MODE6)
>;
};
cpsw_default: cpsw_default {
pinctrl-single,pins = <
/* Slave 1 */
- AM33XX_IOPAD(0x910, PIN_INPUT_PULLDOWN | MUX_MODE0)
- AM33XX_IOPAD(0x914, PIN_OUTPUT_PULLDOWN | MUX_MODE0)
- AM33XX_IOPAD(0x918, PIN_INPUT_PULLDOWN | MUX_MODE0)
- AM33XX_IOPAD(0x91c, PIN_OUTPUT_PULLDOWN | MUX_MODE0)
- AM33XX_IOPAD(0x920, PIN_OUTPUT_PULLDOWN | MUX_MODE0)
- AM33XX_IOPAD(0x924, PIN_INPUT_PULLDOWN | MUX_MODE0)
- AM33XX_IOPAD(0x928, PIN_INPUT_PULLDOWN | MUX_MODE0)
- AM33XX_IOPAD(0x92c, PIN_INPUT_PULLUP | MUX_MODE0)
- AM33XX_IOPAD(0x930, PIN_INPUT_PULLDOWN | MUX_MODE0)
- AM33XX_IOPAD(0x934, PIN_INPUT_PULLDOWN | MUX_MODE0)
- AM33XX_IOPAD(0x938, PIN_INPUT_PULLDOWN | MUX_MODE0)
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE0)
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE0)
>;
};
cpsw_sleep: cpsw_sleep {
pinctrl-single,pins = <
/* Slave 1 reset value */
- AM33XX_IOPAD(0x910, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x914, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x918, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x91c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x920, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x924, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x928, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x92c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x930, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x934, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x938, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
davinci_mdio_default: davinci_mdio_default {
pinctrl-single,pins = <
- /* mdio_data.mdio_data */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0)
- /* mdio_clk.mdio_clk */
- AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLUP | MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLUP | SLEWCTRL_FAST, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLUP, MUX_MODE0)
>;
};
davinci_mdio_sleep: davinci_mdio_sleep {
pinctrl-single,pins = <
/* MDIO reset value */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x94c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
ehrpwm1_pins: pinmux_ehrpwm1 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x84c, PIN_OUTPUT | MUX_MODE6) /* gpmc_a3.gpio1_19 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A3, PIN_OUTPUT, MUX_MODE6) /* gpmc_a3.gpio1_19 */
>;
};
emmc_pins: pinmux_emmc_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x880, PIN_INPUT | MUX_MODE2)
- AM33XX_IOPAD(0x884, PIN_INPUT_PULLUP | MUX_MODE2)
- AM33XX_IOPAD(0x800, PIN_INPUT_PULLUP | MUX_MODE1)
- AM33XX_IOPAD(0x804, PIN_INPUT_PULLUP | MUX_MODE1)
- AM33XX_IOPAD(0x808, PIN_INPUT_PULLUP | MUX_MODE1)
- AM33XX_IOPAD(0x80c, PIN_INPUT_PULLUP | MUX_MODE1)
- AM33XX_IOPAD(0x810, PIN_INPUT_PULLUP | MUX_MODE1)
- AM33XX_IOPAD(0x814, PIN_INPUT_PULLUP | MUX_MODE1)
- AM33XX_IOPAD(0x818, PIN_INPUT_PULLUP | MUX_MODE1)
- AM33XX_IOPAD(0x81c, PIN_INPUT_PULLUP | MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN1, PIN_INPUT, MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN2, PIN_INPUT_PULLUP, MUX_MODE2)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD0, PIN_INPUT_PULLUP, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD1, PIN_INPUT_PULLUP, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD2, PIN_INPUT_PULLUP, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD3, PIN_INPUT_PULLUP, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD4, PIN_INPUT_PULLUP, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD5, PIN_INPUT_PULLUP, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD6, PIN_INPUT_PULLUP, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD7, PIN_INPUT_PULLUP, MUX_MODE1)
>;
};
i2c0_pins: pinmux_i2c0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x988, PIN_INPUT | MUX_MODE0)
- AM33XX_IOPAD(0x98c, PIN_INPUT | MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_INPUT, MUX_MODE0)
>;
};
mmc1_pins: pinmux_mmc1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE5)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS1, PIN_INPUT, MUX_MODE5)
>;
};
mmc3_pins: pinmux_mmc3_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x830, PIN_INPUT | MUX_MODE3)
- AM33XX_IOPAD(0x834, PIN_INPUT | MUX_MODE3)
- AM33XX_IOPAD(0x838, PIN_INPUT | MUX_MODE3)
- AM33XX_IOPAD(0x83c, PIN_INPUT | MUX_MODE3)
- AM33XX_IOPAD(0x888, PIN_INPUT | MUX_MODE3)
- AM33XX_IOPAD(0x88c, PIN_INPUT | MUX_MODE3)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD12, PIN_INPUT, MUX_MODE3)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD13, PIN_INPUT, MUX_MODE3)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD14, PIN_INPUT, MUX_MODE3)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD15, PIN_INPUT, MUX_MODE3)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN3, PIN_INPUT, MUX_MODE3)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CLK, PIN_INPUT, MUX_MODE3)
>;
};
uart0_pins: pinmux_uart0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x968, PIN_INPUT_PULLDOWN | MUX_MODE0)
- AM33XX_IOPAD(0x96c, PIN_OUTPUT | MUX_MODE0)
- AM33XX_IOPAD(0x970, PIN_INPUT_PULLDOWN | MUX_MODE0)
- AM33XX_IOPAD(0x974, PIN_OUTPUT | MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_CTSN, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_RTSN, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_RXD, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT, MUX_MODE0)
>;
};
uart1_pins: pinmux_uart1 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x978, PIN_INPUT_PULLDOWN | MUX_MODE0)
- AM33XX_IOPAD(0x97C, PIN_OUTPUT | MUX_MODE0)
- AM33XX_IOPAD(0x980, PIN_INPUT | MUX_MODE0)
- AM33XX_IOPAD(0x984, PIN_OUTPUT | MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_RXD, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_TXD, PIN_OUTPUT, MUX_MODE0)
>;
};
uart2_pins: pinmux_uart2_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x950, PIN_INPUT | MUX_MODE1)
- AM33XX_IOPAD(0x954, PIN_OUTPUT | MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_SCLK, PIN_INPUT, MUX_MODE1)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D0, PIN_OUTPUT, MUX_MODE1)
>;
};
uart4_pins: pinmux_uart4_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x870, PIN_INPUT_PULLUP | MUX_MODE6)
- AM33XX_IOPAD(0x874, PIN_OUTPUT_PULLUP | MUX_MODE6)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT_PULLUP, MUX_MODE6)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WPN, PIN_OUTPUT_PULLUP, MUX_MODE6)
>;
};
user_leds_s0: user_leds_s0 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x820, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x824, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x828, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x82c, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x840, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x844, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x848, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x850, PIN_OUTPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x854, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x858, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x85c, PIN_OUTPUT_PULLUP | MUX_MODE7)
- AM33XX_IOPAD(0x860, PIN_INPUT | MUX_MODE7)
- AM33XX_IOPAD(0x864, PIN_INPUT | MUX_MODE7)
- AM33XX_IOPAD(0x868, PIN_INPUT | MUX_MODE7)
- AM33XX_IOPAD(0x86c, PIN_INPUT | MUX_MODE7)
- AM33XX_IOPAD(0x878, PIN_OUTPUT_PULLUP | MUX_MODE7)
- AM33XX_IOPAD(0x87c, PIN_INPUT | MUX_MODE7)
- AM33XX_IOPAD(0x890, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x894, PIN_INPUT | MUX_MODE7)
- AM33XX_IOPAD(0x898, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x89c, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x8a0, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x8a4, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x8a8, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x8ac, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x8b0, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x8b4, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x8b8, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x8bc, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x8c0, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x8c4, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x8c8, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x8cc, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x8d0, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x8d4, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x8d8, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x8dc, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x8e0, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x8e4, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x8e8, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x8ec, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x958, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x95c, PIN_OUTPUT | MUX_MODE7)
- AM33XX_IOPAD(0x964, PIN_OUTPUT_PULLUP | MUX_MODE7)
- AM33XX_IOPAD(0x9a0, PIN_OUTPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x9a4, PIN_OUTPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x9a8, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x9ac, PIN_INPUT_PULLUP | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD8, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD9, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD10, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD11, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A0, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A1, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A2, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A4, PIN_OUTPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A6, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A7, PIN_OUTPUT_PULLUP, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A8, PIN_INPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A9, PIN_INPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A10, PIN_INPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A11, PIN_INPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_BEN1, PIN_OUTPUT_PULLUP, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN0, PIN_INPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_ADVN_ALE, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_OEN_REN, PIN_INPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WEN, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_GPMC_BEN0_CLE, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA0, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA1, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA2, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA3, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA4, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA5, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA6, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA7, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA8, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA9, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA10, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA11, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA12, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA13, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA14, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA15, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_RMII1_REF_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D1, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS0, PIN_OUTPUT, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_ECAP0_IN_PWM0_OUT, PIN_OUTPUT_PULLUP, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKR, PIN_OUTPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_FSR, PIN_OUTPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AXR1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKX, PIN_INPUT_PULLUP, MUX_MODE7)
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-sl50.dts b/arch/arm/boot/dts/am335x-sl50.dts
index 38d57b89f7d3..1ac0c8aa98c5 100644
--- a/arch/arm/boot/dts/am335x-sl50.dts
+++ b/arch/arm/boot/dts/am335x-sl50.dts
@@ -218,227 +218,227 @@
audio_pins: pinmux_audio_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x9ac, PIN_INPUT_PULLDOWN | MUX_MODE0) /* mcasp0_ahcklx.mcasp0_ahclkx */
- AM33XX_IOPAD(0x994, PIN_INPUT_PULLDOWN | MUX_MODE0) /* mcasp0_fsx.mcasp0_fsx */
- AM33XX_IOPAD(0x990, PIN_INPUT_PULLDOWN | MUX_MODE0) /* mcasp0_aclkx.mcasp0_aclkx */
- AM33XX_IOPAD(0x998, PIN_INPUT_PULLDOWN | MUX_MODE0) /* mcasp0_axr0.mcasp0_axr0 */
- AM33XX_IOPAD(0x99c, PIN_OUTPUT_PULLDOWN | MUX_MODE2) /* mcasp0_ahclkr.mcasp0_axr2 */
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKX, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_FSX, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKX, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AXR0, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKR, PIN_OUTPUT_PULLDOWN, MUX_MODE2)
>;
};
audio_pa_pins: pinmux_audio_pa_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x9a0, PIN_INPUT_PULLDOWN | MUX_MODE7) /* SoundPA_en - mcasp0_aclkr.gpio3_18 */
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKR, PIN_INPUT_PULLDOWN, MUX_MODE7) /* SoundPA_en - mcasp0_aclkr.gpio3_18 */
>;
};
audio_mclk_pins: pinmux_audio_mclk_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x86c, PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_a11.gpio1_27 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A11, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_a11.gpio1_27 */
>;
};
backlight0_pins: pinmux_backlight0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x898, PIN_OUTPUT | MUX_MODE7) /* gpmc_wen.gpio2_4 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WEN, PIN_OUTPUT, MUX_MODE7) /* gpmc_wen.gpio2_4 */
>;
};
backlight1_pins: pinmux_backlight1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x828, PIN_OUTPUT | MUX_MODE7) /* gpmc_ad10.gpio0_26 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD10, PIN_OUTPUT, MUX_MODE7) /* gpmc_ad10.gpio0_26 */
>;
};
lcd_pins: pinmux_lcd_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x8a0, PIN_OUTPUT | MUX_MODE0) /* lcd_data0.lcd_data0 */
- AM33XX_IOPAD(0x8a4, PIN_OUTPUT | MUX_MODE0) /* lcd_data1.lcd_data1 */
- AM33XX_IOPAD(0x8a8, PIN_OUTPUT | MUX_MODE0) /* lcd_data2.lcd_data2 */
- AM33XX_IOPAD(0x8ac, PIN_OUTPUT | MUX_MODE0) /* lcd_data3.lcd_data3 */
- AM33XX_IOPAD(0x8b0, PIN_OUTPUT | MUX_MODE0) /* lcd_data4.lcd_data4 */
- AM33XX_IOPAD(0x8b4, PIN_OUTPUT | MUX_MODE0) /* lcd_data5.lcd_data5 */
- AM33XX_IOPAD(0x8b8, PIN_OUTPUT | MUX_MODE0) /* lcd_data6.lcd_data6 */
- AM33XX_IOPAD(0x8bc, PIN_OUTPUT | MUX_MODE0) /* lcd_data7.lcd_data7 */
- AM33XX_IOPAD(0x8c0, PIN_OUTPUT | MUX_MODE0) /* lcd_data8.lcd_data8 */
- AM33XX_IOPAD(0x8c4, PIN_OUTPUT | MUX_MODE0) /* lcd_data9.lcd_data9 */
- AM33XX_IOPAD(0x8c8, PIN_OUTPUT | MUX_MODE0) /* lcd_data10.lcd_data10 */
- AM33XX_IOPAD(0x8cc, PIN_OUTPUT | MUX_MODE0) /* lcd_data11.lcd_data11 */
- AM33XX_IOPAD(0x8d0, PIN_OUTPUT | MUX_MODE0) /* lcd_data12.lcd_data12 */
- AM33XX_IOPAD(0x8d4, PIN_OUTPUT | MUX_MODE0) /* lcd_data13.lcd_data13 */
- AM33XX_IOPAD(0x8d8, PIN_OUTPUT | MUX_MODE0) /* lcd_data14.lcd_data14 */
- AM33XX_IOPAD(0x8dc, PIN_OUTPUT | MUX_MODE0) /* lcd_data15.lcd_data15 */
- AM33XX_IOPAD(0x8e0, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* lcd_vsync.lcd_vsync */
- AM33XX_IOPAD(0x8e4, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* lcd_hsync.lcd_hsync */
- AM33XX_IOPAD(0x8e8, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* lcd_pclk.lcd_pclk */
- AM33XX_IOPAD(0x8ec, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* lcd_ac_bias_en.lcd_ac_bias_en */
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA0, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA1, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA2, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA3, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA4, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA5, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA6, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA7, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA8, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA9, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA10, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA11, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA12, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA13, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA14, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_DATA15, PIN_OUTPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
led_pins: pinmux_led_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x854, PIN_OUTPUT | MUX_MODE7) /* gpmc_a5.gpio1_21 */
- AM33XX_IOPAD(0x858, PIN_OUTPUT | MUX_MODE7) /* gpmc_a6.gpio1_22 */
- AM33XX_IOPAD(0x85c, PIN_OUTPUT | MUX_MODE7) /* gpmc_a7.gpio1_23 */
- AM33XX_IOPAD(0x860, PIN_OUTPUT | MUX_MODE7) /* gpmc_a8.gpio1_24 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_OUTPUT, MUX_MODE7) /* gpmc_a5.gpio1_21 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A6, PIN_OUTPUT, MUX_MODE7) /* gpmc_a6.gpio1_22 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A7, PIN_OUTPUT, MUX_MODE7) /* gpmc_a7.gpio1_23 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A8, PIN_OUTPUT, MUX_MODE7) /* gpmc_a8.gpio1_24 */
>;
};
uart0_pins: pinmux_uart0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
- AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
uart1_pins: pinmux_uart1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x980, PIN_INPUT_PULLUP | MUX_MODE0) /* uart1_rxd.uart1_rxd */
- AM33XX_IOPAD(0x984, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart1_txd.uart1_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART1_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
uart4_pins: pinmux_uart4_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x870, PIN_INPUT_PULLUP | MUX_MODE6) /* gpmc_wait0.uart4_rxd */
- AM33XX_IOPAD(0x874, PIN_OUTPUT_PULLDOWN | MUX_MODE6) /* gpmc_wpn.uart4_txd */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WAIT0, PIN_INPUT_PULLUP, MUX_MODE6) /* gpmc_wait0.uart4_rxd */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WPN, PIN_OUTPUT_PULLDOWN, MUX_MODE6) /* gpmc_wpn.uart4_txd */
>;
};
i2c0_pins: pinmux_i2c0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x988, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_sda.i2c0_sda */
- AM33XX_IOPAD(0x98c, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c0_scl.i2c0_scl */
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SDA, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_I2C0_SCL, PIN_INPUT_PULLUP, MUX_MODE0)
>;
};
i2c2_pins: pinmux_i2c2_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x978, PIN_INPUT_PULLUP | MUX_MODE3) /* uart1_ctsn.i2c2_sda */
- AM33XX_IOPAD(0x97c, PIN_INPUT_PULLUP | MUX_MODE3) /* uart1_rtsn.i2c2_scl */
+ AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_INPUT_PULLUP, MUX_MODE3) /* uart1_ctsn.i2c2_sda */
+ AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_INPUT_PULLUP, MUX_MODE3) /* uart1_rtsn.i2c2_scl */
>;
};
cpsw_default: cpsw_default {
pinctrl-single,pins = <
/* Slave 1 */
- AM33XX_IOPAD(0x910, PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxerr.mii1_rxerr */
- AM33XX_IOPAD(0x914, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txen.mii1_txen */
- AM33XX_IOPAD(0x918, PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxdv.mii1_rxdv */
- AM33XX_IOPAD(0x91c, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd3.mii1_txd3 */
- AM33XX_IOPAD(0x920, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd2.mii1_txd2 */
- AM33XX_IOPAD(0x924, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd1.mii1_txd1 */
- AM33XX_IOPAD(0x928, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mii1_txd0.mii1_txd0 */
- AM33XX_IOPAD(0x92c, PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_txclk.mii1_txclk */
- AM33XX_IOPAD(0x930, PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxclk.mii1_rxclk */
- AM33XX_IOPAD(0x934, PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd3.mii1_rxd3 */
- AM33XX_IOPAD(0x938, PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd2.mii1_rxd2 */
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd1.mii1_rxd1 */
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLUP | MUX_MODE0) /* mii1_rxd0.mii1_rxd0 */
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLUP, MUX_MODE0)
>;
};
cpsw_sleep: cpsw_sleep {
pinctrl-single,pins = <
/* Slave 1 reset value */
- AM33XX_IOPAD(0x910, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x914, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x918, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x91c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x920, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x924, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x928, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x92c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x930, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x934, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x938, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x93c, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x940, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_ER, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_EN, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_DV, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_TX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RX_CLK, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD3, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD2, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD1, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MII1_RXD0, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
davinci_mdio_default: davinci_mdio_default {
pinctrl-single,pins = <
/* MDIO */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0) /* mdio_data.mdio_data */
- AM33XX_IOPAD(0x94c, PIN_OUTPUT_PULLUP | MUX_MODE0) /* mdio_clk.mdio_clk */
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLUP | SLEWCTRL_FAST, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLUP, MUX_MODE0)
/* Ethernet */
- AM33XX_IOPAD(0x838, PIN_INPUT_PULLUP | MUX_MODE7) /* Ethernet_nRST - gpmc_ad14.gpio1_14 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD14, PIN_INPUT_PULLUP, MUX_MODE7) /* Ethernet_nRST - gpmc_ad14.gpio1_14 */
>;
};
davinci_mdio_sleep: davinci_mdio_sleep {
pinctrl-single,pins = <
/* MDIO reset value */
- AM33XX_IOPAD(0x948, PIN_INPUT_PULLDOWN | MUX_MODE7)
- AM33XX_IOPAD(0x94c, PIN_INPUT_PULLDOWN | MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLDOWN, MUX_MODE7)
+ AM33XX_PADCONF(AM335X_PIN_MDC, PIN_INPUT_PULLDOWN, MUX_MODE7)
>;
};
mmc1_pins: pinmux_mmc1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x96c, PIN_INPUT | MUX_MODE7) /* uart0_rtsn.gpio1_9 */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RTSN, PIN_INPUT, MUX_MODE7) /* uart0_rtsn.gpio1_9 */
>;
};
emmc_pwrseq_pins: pinmux_emmc_pwrseq_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x850, PIN_OUTPUT_PULLUP | MUX_MODE7) /* gpmc_a4.gpio1_20 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A4, PIN_OUTPUT_PULLUP, MUX_MODE7) /* gpmc_a4.gpio1_20 */
>;
};
emmc_pins: pinmux_emmc_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x880, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn1.mmc1_clk */
- AM33XX_IOPAD(0x884, PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn2.mmc1_cmd */
- AM33XX_IOPAD(0x800, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad0.mmc1_dat0 */
- AM33XX_IOPAD(0x804, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad1.mmc1_dat1 */
- AM33XX_IOPAD(0x808, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad2.mmc1_dat2 */
- AM33XX_IOPAD(0x80c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad3.mmc1_dat3 */
- AM33XX_IOPAD(0x810, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad4.mmc1_dat4 */
- AM33XX_IOPAD(0x814, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad5.mmc1_dat5 */
- AM33XX_IOPAD(0x818, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad6.mmc1_dat6 */
- AM33XX_IOPAD(0x81c, PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad7.mmc1_dat7 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN1, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_csn1.mmc1_clk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_CSN2, PIN_INPUT_PULLUP, MUX_MODE2) /* gpmc_csn2.mmc1_cmd */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD0, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad0.mmc1_dat0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD1, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad1.mmc1_dat1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD2, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad2.mmc1_dat2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD3, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad3.mmc1_dat3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD4, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad4.mmc1_dat4 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD5, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad5.mmc1_dat5 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD6, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad6.mmc1_dat6 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD7, PIN_INPUT_PULLUP, MUX_MODE1) /* gpmc_ad7.mmc1_dat7 */
>;
};
ehrpwm1_pins: pinmux_ehrpwm1a_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x848, PIN_OUTPUT | MUX_MODE6) /* gpmc_a2.ehrpwm1a */
- AM33XX_IOPAD(0x84c, PIN_OUTPUT | MUX_MODE6) /* gpmc_a3.ehrpwm1b */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A2, PIN_OUTPUT, MUX_MODE6) /* gpmc_a2.ehrpwm1a */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A3, PIN_OUTPUT, MUX_MODE6) /* gpmc_a3.ehrpwm1b */
>;
};
rtc0_irq_pins: pinmux_rtc0_irq_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x824, PIN_INPUT_PULLUP | MUX_MODE7) /* gpmc_ad9.gpio0_23 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD9, PIN_INPUT_PULLUP, MUX_MODE7) /* gpmc_ad9.gpio0_23 */
>;
};
spi0_pins: pinmux_spi0_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x954, PIN_INPUT_PULLUP | MUX_MODE0) /* SPI0_MOSI - spi0_d0.spi0_d0 */
- AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE0) /* SPI0_MISO - spi0_d1.spi0_d1 */
- AM33XX_IOPAD(0x950, PIN_INPUT_PULLUP | MUX_MODE0) /* SPI0_CLK - spi0_clk.spi0_clk */
- AM33XX_IOPAD(0x95c, PIN_INPUT_PULLUP | MUX_MODE0) /* SPI0_CS0 (NBATTSS) - spi0_cs0.spi0_cs0 */
- AM33XX_IOPAD(0x960, PIN_INPUT_PULLUP | MUX_MODE0) /* SPI0_CS1 (FPGA_FLASH_NCS) - spi0_cs1.spi0_cs1 */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D0, PIN_INPUT_PULLUP, MUX_MODE0) /* SPI0_MOSI */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_D1, PIN_INPUT_PULLUP, MUX_MODE0) /* SPI0_MISO */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_SCLK, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS0, PIN_INPUT_PULLUP, MUX_MODE0) /* SPI0_CS0 (NBATTSS) */
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS1, PIN_INPUT_PULLUP, MUX_MODE0) /* SPI0_CS1 (FPGA_FLASH_NCS) */
>;
};
lwb_pins: pinmux_lwb_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x830, PIN_INPUT_PULLUP | MUX_MODE7) /* nKbdInt - gpmc_ad12.gpio1_12 */
- AM33XX_IOPAD(0x834, PIN_INPUT_PULLUP | MUX_MODE7) /* nKbdReset - gpmc_ad13.gpio1_13 */
- AM33XX_IOPAD(0x844, PIN_INPUT_PULLUP | MUX_MODE7) /* USB1_enPower - gpmc_a1.gpio1_17 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD12, PIN_INPUT_PULLUP, MUX_MODE7) /* nKbdInt - gpmc_ad12.gpio1_12 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD13, PIN_INPUT_PULLUP, MUX_MODE7) /* nKbdReset - gpmc_ad13.gpio1_13 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A1, PIN_INPUT_PULLUP, MUX_MODE7) /* USB1_enPower - gpmc_a1.gpio1_17 */
/* PDI Bus - Battery system */
- AM33XX_IOPAD(0x840, PIN_INPUT_PULLUP | MUX_MODE7) /* nBattReset gpmc_a0.gpio1_16 */
- AM33XX_IOPAD(0x83c, PIN_INPUT_PULLUP | MUX_MODE7) /* BattPDIData gpmc_ad15.gpio1_15 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A0, PIN_INPUT_PULLUP, MUX_MODE7) /* nBattReset gpmc_a0.gpio1_16 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD15, PIN_INPUT_PULLUP, MUX_MODE7) /* BattPDIData gpmc_ad15.gpio1_15 */
/* FPGA */
- AM33XX_IOPAD(0x820, PIN_INPUT_PULLUP | MUX_MODE7) /* FPGA_DONE - gpmc_ad8.gpio0_22 */
- AM33XX_IOPAD(0x840, PIN_INPUT_PULLUP | MUX_MODE7) /* FPGA_NRST - gpmc_a0.gpio1_16 */
- AM33XX_IOPAD(0x844, PIN_INPUT_PULLDOWN | MUX_MODE7) /* FPGA_RUN - gpmc_a1.gpio1_17 */
- AM33XX_IOPAD(0x864, PIN_INPUT_PULLUP | MUX_MODE7) /* ENFPGA - gpmc_a9.gpio1_25 */
- AM33XX_IOPAD(0x868, PIN_INPUT_PULLDOWN | MUX_MODE7) /* FPGA_PROGRAM - gpmc_a10.gpio1_26 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_AD8, PIN_INPUT_PULLUP, MUX_MODE7) /* FPGA_DONE - gpmc_ad8.gpio0_22 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A0, PIN_INPUT_PULLUP, MUX_MODE7) /* FPGA_NRST - gpmc_a0.gpio1_16 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A1, PIN_INPUT_PULLDOWN, MUX_MODE7) /* FPGA_RUN - gpmc_a1.gpio1_17 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A9, PIN_INPUT_PULLUP, MUX_MODE7) /* ENFPGA - gpmc_a9.gpio1_25 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A10, PIN_INPUT_PULLDOWN, MUX_MODE7) /* FPGA_PROGRAM - gpmc_a10.gpio1_26 */
>;
};
};
diff --git a/arch/arm/boot/dts/am335x-wega.dtsi b/arch/arm/boot/dts/am335x-wega.dtsi
index 8ce541739b24..b7d28a20341f 100644
--- a/arch/arm/boot/dts/am335x-wega.dtsi
+++ b/arch/arm/boot/dts/am335x-wega.dtsi
@@ -32,11 +32,11 @@
&am33xx_pinmux {
mcasp0_pins: pinmux_mcasp0 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x9AC, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mcasp0_ahclkx.mcasp0_ahclkx */
- AM33XX_IOPAD(0x990, PIN_INPUT_PULLDOWN | MUX_MODE0) /* mcasp0_aclkx.mcasp0_aclkx */
- AM33XX_IOPAD(0x994, PIN_INPUT_PULLDOWN | MUX_MODE0) /* mcasp0_fsx.mcasp0_fsx */
- AM33XX_IOPAD(0x998, PIN_INPUT_PULLDOWN | MUX_MODE0) /* mcasp0_axr0.mcasp0_axr0 */
- AM33XX_IOPAD(0x9A8, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* mcasp0_axr1.mcasp0_axr1 */
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AHCLKX, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKX, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_FSX, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AXR0, PIN_INPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MCASP0_AXR1, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
};
@@ -84,8 +84,8 @@
&am33xx_pinmux {
dcan1_pins: pinmux_dcan1 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x968, PIN_OUTPUT_PULLUP | MUX_MODE2) /* uart0_ctsn.d_can1_tx */
- AM33XX_IOPAD(0x96c, PIN_INPUT_PULLUP | MUX_MODE2) /* uart0_rtsn.d_can1_rx */
+ AM33XX_PADCONF(AM335X_PIN_UART0_CTSN, PIN_OUTPUT_PULLUP, MUX_MODE2) /* uart0_ctsn.d_can1_tx */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RTSN, PIN_INPUT_PULLUP, MUX_MODE2) /* uart0_rtsn.d_can1_rx */
>;
};
};
@@ -100,20 +100,20 @@
&am33xx_pinmux {
ethernet1_pins: pinmux_ethernet1 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x840, PIN_OUTPUT | MUX_MODE1) /* gpmc_a0.mii2_txen */
- AM33XX_IOPAD(0x844, PIN_INPUT_PULLDOWN | MUX_MODE1) /* gpmc_a1.mii2_rxdv */
- AM33XX_IOPAD(0x848, PIN_OUTPUT | MUX_MODE1) /* gpmc_a2.mii2_txd3 */
- AM33XX_IOPAD(0x84c, PIN_OUTPUT | MUX_MODE1) /* gpmc_a3.mii2_txd2 */
- AM33XX_IOPAD(0x850, PIN_OUTPUT | MUX_MODE1) /* gpmc_a4.mii2_txd1 */
- AM33XX_IOPAD(0x854, PIN_OUTPUT | MUX_MODE1) /* gpmc_a5.mii2_txd0 */
- AM33XX_IOPAD(0x858, PIN_INPUT_PULLDOWN | MUX_MODE1) /* gpmc_a6.mii2_txclk */
- AM33XX_IOPAD(0x85c, PIN_INPUT_PULLDOWN | MUX_MODE1) /* gpmc_a7.mii2_rxclk */
- AM33XX_IOPAD(0x860, PIN_INPUT_PULLDOWN | MUX_MODE1) /* gpmc_a8.mii2_rxd3 */
- AM33XX_IOPAD(0x864, PIN_INPUT_PULLDOWN | MUX_MODE1) /* gpmc_a9.mii2_rxd2 */
- AM33XX_IOPAD(0x868, PIN_INPUT_PULLDOWN | MUX_MODE1) /* gpmc_a10.mii2_rxd1 */
- AM33XX_IOPAD(0x86c, PIN_INPUT_PULLDOWN | MUX_MODE1) /* gpmc_a11.mii2_rxd0 */
- AM33XX_IOPAD(0x874, PIN_INPUT_PULLDOWN | MUX_MODE1) /* gpmc_wpn.mii2_rxerr */
- AM33XX_IOPAD(0x878, PIN_INPUT_PULLDOWN | MUX_MODE1) /* gpmc_ben1.mii2_col */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A0, PIN_OUTPUT, MUX_MODE1) /* gpmc_a0.mii2_txen */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A1, PIN_INPUT_PULLDOWN, MUX_MODE1) /* gpmc_a1.mii2_rxdv */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A2, PIN_OUTPUT, MUX_MODE1) /* gpmc_a2.mii2_txd3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A3, PIN_OUTPUT, MUX_MODE1) /* gpmc_a3.mii2_txd2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A4, PIN_OUTPUT, MUX_MODE1) /* gpmc_a4.mii2_txd1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A5, PIN_OUTPUT, MUX_MODE1) /* gpmc_a5.mii2_txd0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A6, PIN_INPUT_PULLDOWN, MUX_MODE1) /* gpmc_a6.mii2_txclk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A7, PIN_INPUT_PULLDOWN, MUX_MODE1) /* gpmc_a7.mii2_rxclk */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A8, PIN_INPUT_PULLDOWN, MUX_MODE1) /* gpmc_a8.mii2_rxd3 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A9, PIN_INPUT_PULLDOWN, MUX_MODE1) /* gpmc_a9.mii2_rxd2 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A10, PIN_INPUT_PULLDOWN, MUX_MODE1) /* gpmc_a10.mii2_rxd1 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_A11, PIN_INPUT_PULLDOWN, MUX_MODE1) /* gpmc_a11.mii2_rxd0 */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_WPN, PIN_INPUT_PULLDOWN, MUX_MODE1) /* gpmc_wpn.mii2_rxerr */
+ AM33XX_PADCONF(AM335X_PIN_GPMC_BEN1, PIN_INPUT_PULLDOWN, MUX_MODE1) /* gpmc_ben1.mii2_col */
>;
};
};
@@ -141,13 +141,13 @@
&am33xx_pinmux {
mmc1_pins: pinmux_mmc1 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat3.mmc0_dat3 */
- AM33XX_IOPAD(0x8f4, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat2.mmc0_dat2 */
- AM33XX_IOPAD(0x8f8, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat1.mmc0_dat1 */
- AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat0.mmc0_dat0 */
- AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_clk.mmc0_clk */
- AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_cmd.mmc0_cmd */
- AM33XX_IOPAD(0x960, PIN_INPUT_PULLUP | MUX_MODE7) /* spi0_cs1.mmc0_sdcd */
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT2, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT1, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_DAT0, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_SPI0_CS1, PIN_INPUT_PULLUP, MUX_MODE7) /* spi0_cs1.mmc0_sdcd */
>;
};
};
@@ -171,17 +171,17 @@
&am33xx_pinmux {
uart0_pins: pinmux_uart0 {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0) /* uart0_rxd.uart0_rxd */
- AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+ AM33XX_PADCONF(AM335X_PIN_UART0_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART0_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
uart1_pins: pinmux_uart1_pins {
pinctrl-single,pins = <
- AM33XX_IOPAD(0x980, PIN_INPUT_PULLUP | MUX_MODE0) /* uart1_rxd.uart1_rxd */
- AM33XX_IOPAD(0x984, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart1_txd.uart1_txd */
- AM33XX_IOPAD(0x978, PIN_INPUT | MUX_MODE0) /* uart1_ctsn.uart1_ctsn */
- AM33XX_IOPAD(0x97c, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart1_rtsn.uart1_rtsn */
+ AM33XX_PADCONF(AM335X_PIN_UART1_RXD, PIN_INPUT_PULLUP, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_TXD, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_CTSN, PIN_INPUT, MUX_MODE0)
+ AM33XX_PADCONF(AM335X_PIN_UART1_RTSN, PIN_OUTPUT_PULLDOWN, MUX_MODE0)
>;
};
};
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index 9dfd80e3b76e..9b8b132b04e1 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -80,6 +80,7 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&matrix_keypad_default>;
pinctrl-1 = <&matrix_keypad_sleep>;
+ wakeup-source;
row-gpios = <&gpio0 12 GPIO_ACTIVE_HIGH /* Bank0, pin12 */
&gpio0 13 GPIO_ACTIVE_HIGH /* Bank0, pin13 */
@@ -620,6 +621,12 @@
regulator-name = "vdcdc3";
regulator-boot-on;
regulator-always-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ regulator-state-disk {
+ regulator-off-in-suspend;
+ };
};
dcdc4: regulator-dcdc4 {
@@ -634,12 +641,16 @@
regulator-name = "v1_0bat";
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <1000000>;
+ regulator-boot-on;
+ regulator-always-on;
};
dcdc6: regulator-dcdc6 {
regulator-name = "v1_8bat";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
};
ldo1: regulator-ldo1 {
diff --git a/arch/arm/boot/dts/am5718.dtsi b/arch/arm/boot/dts/am5718.dtsi
new file mode 100644
index 000000000000..d51007c3e8c4
--- /dev/null
+++ b/arch/arm/boot/dts/am5718.dtsi
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#include "dra72x.dtsi"
+
+/ {
+ compatible = "ti,am5718", "ti,dra7";
+};
+
+/*
+ * These modules are not present on AM5718
+ *
+ * ATL
+ * VCP1, VCP2
+ * MLB
+ * ISS
+ * USB3, USB4
+ */
+
+&usb3_tm {
+ status = "disabled";
+};
+
+&usb4_tm {
+ status = "disabled";
+};
+
+&atl_tm {
+ status = "disabled";
+};
diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts
index 6432309b39e3..66116ad3f9f4 100644
--- a/arch/arm/boot/dts/am571x-idk.dts
+++ b/arch/arm/boot/dts/am571x-idk.dts
@@ -7,7 +7,7 @@
*/
/dts-v1/;
-#include "dra72x.dtsi"
+#include "am5718.dtsi"
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include "dra7-mmc-iodelay.dtsi"
diff --git a/arch/arm/boot/dts/am5728.dtsi b/arch/arm/boot/dts/am5728.dtsi
new file mode 100644
index 000000000000..82e5427ef6a9
--- /dev/null
+++ b/arch/arm/boot/dts/am5728.dtsi
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#include "dra74x.dtsi"
+
+/ {
+ compatible = "ti,am5728", "ti,dra7";
+};
+
+/*
+ * These modules are not present on AM5728
+ *
+ * EVE1, EVE2
+ * ATL
+ * VCP1, VCP2
+ * MLB
+ * ISS
+ * USB3, USB4
+ */
+
+&usb3_tm {
+ status = "disabled";
+};
+
+&usb4_tm {
+ status = "disabled";
+};
+
+&atl_tm {
+ status = "disabled";
+};
diff --git a/arch/arm/boot/dts/am572x-idk.dts b/arch/arm/boot/dts/am572x-idk.dts
index b2fb6e097be7..4f835222c266 100644
--- a/arch/arm/boot/dts/am572x-idk.dts
+++ b/arch/arm/boot/dts/am572x-idk.dts
@@ -8,15 +8,14 @@
/dts-v1/;
-#include "dra74x.dtsi"
+#include "am5728.dtsi"
#include "dra7-mmc-iodelay.dtsi"
#include "dra74x-mmc-iodelay.dtsi"
#include "am572x-idk-common.dtsi"
/ {
model = "TI AM5728 IDK";
- compatible = "ti,am5728-idk", "ti,am5728", "ti,dra742", "ti,dra74",
- "ti,dra7";
+ compatible = "ti,am5728-idk", "ti,am5728", "ti,dra7";
};
&mmc1 {
diff --git a/arch/arm/boot/dts/am5748.dtsi b/arch/arm/boot/dts/am5748.dtsi
new file mode 100644
index 000000000000..5e129759d04a
--- /dev/null
+++ b/arch/arm/boot/dts/am5748.dtsi
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#include "dra76x.dtsi"
+
+/ {
+ compatible = "ti,am5748", "ti,dra762", "ti,dra7";
+};
+
+/*
+ * These modules are not present on AM5748
+ *
+ * EVE1, EVE2
+ * ATL
+ * VCP1, VCP2
+ * MLB
+ * ISS
+ * USB3, USB4
+ */
+
+&usb3_tm {
+ status = "disabled";
+};
+
+&usb4_tm {
+ status = "disabled";
+};
+
+&atl_tm {
+ status = "disabled";
+};
diff --git a/arch/arm/boot/dts/am574x-idk.dts b/arch/arm/boot/dts/am574x-idk.dts
index 378dfa780ac1..dc5141c35610 100644
--- a/arch/arm/boot/dts/am574x-idk.dts
+++ b/arch/arm/boot/dts/am574x-idk.dts
@@ -6,14 +6,14 @@
/dts-v1/;
-#include "dra76x.dtsi"
+#include "am5748.dtsi"
#include "dra7-mmc-iodelay.dtsi"
#include "dra76x-mmc-iodelay.dtsi"
#include "am572x-idk-common.dtsi"
/ {
model = "TI AM5748 IDK";
- compatible = "ti,am5728-idk", "ti,dra762", "ti,dra7";
+ compatible = "ti,am5748-idk", "ti,am5748", "ti,dra762", "ti,dra7";
};
&qspi {
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
index 1e6620f139dd..2341a56ebab9 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
@@ -7,7 +7,7 @@
*/
/dts-v1/;
-#include "dra74x.dtsi"
+#include "am5728.dtsi"
#include "am57xx-commercial-grade.dtsi"
#include "dra74x-mmc-iodelay.dtsi"
#include <dt-bindings/gpio/gpio.h>
diff --git a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
index 4748ce8747ad..0460de0da2bf 100644
--- a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
+++ b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
@@ -13,7 +13,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include "dra74x.dtsi"
+#include "am5728.dtsi"
/ {
model = "CompuLab CL-SOM-AM57x";
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index 96c18703e471..3f4bb44d85f0 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -415,6 +415,8 @@
reg = <0x20300 0x34>, <0x20704 0x4>, <0x18260 0x4>;
clocks = <&coreclk 2>, <&refclk>;
clock-names = "nbclk", "fixed";
+ interrupts-extended = <&gic GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>,
+ <&gic GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
};
cpurst: cpurst@20800 {
diff --git a/arch/arm/boot/dts/aspeed-ast2500-evb.dts b/arch/arm/boot/dts/aspeed-ast2500-evb.dts
index 2375449c02d0..556ed469830c 100644
--- a/arch/arm/boot/dts/aspeed-ast2500-evb.dts
+++ b/arch/arm/boot/dts/aspeed-ast2500-evb.dts
@@ -13,12 +13,25 @@
chosen {
stdout-path = &uart5;
- bootargs = "console=ttyS4,115200 earlyprintk";
+ bootargs = "console=tty0 console=ttyS4,115200 earlyprintk";
};
memory@80000000 {
reg = <0x80000000 0x20000000>;
};
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ gfx_memory: framebuffer {
+ size = <0x01000000>;
+ alignment = <0x01000000>;
+ compatible = "shared-dma-pool";
+ reusable;
+ };
+ };
};
&fmc {
@@ -27,6 +40,7 @@
status = "okay";
m25p,fast-read;
label = "bmc";
+#include "openbmc-flash-layout.dtsi"
};
};
@@ -97,3 +111,8 @@
&uhci {
status = "okay";
};
+
+&gfx {
+ status = "okay";
+ memory-region = <&gfx_memory>;
+};
diff --git a/arch/arm/boot/dts/aspeed-bmc-facebook-cmm.dts b/arch/arm/boot/dts/aspeed-bmc-facebook-cmm.dts
index 9f194b5eeba4..43aba4071a5c 100644
--- a/arch/arm/boot/dts/aspeed-bmc-facebook-cmm.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-facebook-cmm.dts
@@ -56,6 +56,12 @@
memory@80000000 {
reg = <0x80000000 0x20000000>;
};
+
+ ast-adc-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&adc 0>, <&adc 1>, <&adc 2>, <&adc 3>,
+ <&adc 4>, <&adc 5>, <&adc 6>, <&adc 7>;
+ };
};
&pinctrl {
diff --git a/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts b/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts
index 4c2dcac738e8..c4521eda787c 100644
--- a/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts
@@ -64,6 +64,11 @@
status = "okay";
};
+&vuart {
+ // VUART Host Console
+ status = "okay";
+};
+
&uart1 {
// Host Console
status = "okay";
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts b/arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts
index b854ac0bae9a..b249da80fb83 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts
@@ -32,9 +32,9 @@
no-map;
};
- flash_memory: region@98000000 {
+ flash_memory: region@5c000000 {
no-map;
- reg = <0x98000000 0x01000000>; /* 16MB */
+ reg = <0x5C000000 0x02000000>; /* 32MB */
};
};
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts b/arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts
index 76fe994f2ba4..418a1988b262 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-romulus.dts
@@ -35,6 +35,13 @@
reg = <0x9ef00000 0x00100000>;
no-map;
};
+
+ gfx_memory: framebuffer {
+ size = <0x01000000>;
+ alignment = <0x01000000>;
+ compatible = "shared-dma-pool";
+ reusable;
+ };
};
leds {
@@ -238,6 +245,7 @@
&gfx {
status = "okay";
+ memory-region = <&gfx_memory>;
};
&pinctrl {
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts b/arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts
index ad54117c075e..f1356ca794d8 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-witherspoon.dts
@@ -26,6 +26,13 @@
no-map;
reg = <0x98000000 0x04000000>; /* 64M */
};
+
+ gfx_memory: framebuffer {
+ size = <0x01000000>;
+ alignment = <0x01000000>;
+ compatible = "shared-dma-pool";
+ reusable;
+ };
};
gpio-keys {
@@ -186,13 +193,49 @@
status = "okay";
label = "bmc";
m25p,fast-read;
-#include "openbmc-flash-layout.dtsi"
+
+ partitions {
+ #address-cells = < 1 >;
+ #size-cells = < 1 >;
+ compatible = "fixed-partitions";
+ u-boot@0 {
+ reg = < 0 0x60000 >;
+ label = "u-boot";
+ };
+ u-boot-env@60000 {
+ reg = < 0x60000 0x20000 >;
+ label = "u-boot-env";
+ };
+ obmc-ubi@80000 {
+ reg = < 0x80000 0x1F80000 >;
+ label = "obmc-ubi";
+ };
+ };
};
flash@1 {
status = "okay";
- label = "alt";
+ label = "alt-bmc";
m25p,fast-read;
+
+ partitions {
+ #address-cells = < 1 >;
+ #size-cells = < 1 >;
+ compatible = "fixed-partitions";
+ u-boot@0 {
+ reg = < 0 0x60000 >;
+ label = "alt-u-boot";
+ };
+ u-boot-env@60000 {
+ reg = < 0x60000 0x20000 >;
+ label = "alt-u-boot-env";
+ };
+ obmc-ubi@80000 {
+ reg = < 0x80000 0x1F80000 >;
+ label = "alt-obmc-ubi";
+ };
+ };
+
};
};
@@ -565,6 +608,7 @@
&gfx {
status = "okay";
+ memory-region = <&gfx_memory>;
};
&pinctrl {
@@ -592,3 +636,7 @@
&adc {
status = "okay";
};
+
+&vhub {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi
index 9549f867aa1e..5d7050d00874 100644
--- a/arch/arm/boot/dts/aspeed-g4.dtsi
+++ b/arch/arm/boot/dts/aspeed-g4.dtsi
@@ -209,6 +209,12 @@
clock-names = "PCLK";
};
+ rtc: rtc@1e781000 {
+ compatible = "aspeed,ast2400-rtc";
+ reg = <0x1e781000 0x18>;
+ status = "disabled";
+ };
+
uart1: serial@1e783000 {
compatible = "ns16550a";
reg = <0x1e783000 0x20>;
diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
index 85ed9dbec196..4345c3153ca7 100644
--- a/arch/arm/boot/dts/aspeed-g5.dtsi
+++ b/arch/arm/boot/dts/aspeed-g5.dtsi
@@ -232,6 +232,10 @@
compatible = "aspeed,ast2500-gfx", "syscon";
reg = <0x1e6e6000 0x1000>;
reg-io-width = <4>;
+ clocks = <&syscon ASPEED_CLK_GATE_D1CLK>;
+ resets = <&syscon ASPEED_RESET_CRT1>;
+ status = "disabled";
+ interrupts = <0x19>;
};
adc: adc@1e6e9000 {
@@ -243,6 +247,16 @@
status = "disabled";
};
+ video: video@1e700000 {
+ compatible = "aspeed,ast2500-video-engine";
+ reg = <0x1e700000 0x1000>;
+ clocks = <&syscon ASPEED_CLK_GATE_VCLK>,
+ <&syscon ASPEED_CLK_GATE_ECLK>;
+ clock-names = "vclk", "eclk";
+ interrupts = <7>;
+ status = "disabled";
+ };
+
sram: sram@1e720000 {
compatible = "mmio-sram";
reg = <0x1e720000 0x9000>; // 36K
@@ -260,6 +274,12 @@
#interrupt-cells = <2>;
};
+ rtc: rtc@1e781000 {
+ compatible = "aspeed,ast2500-rtc";
+ reg = <0x1e781000 0x18>;
+ status = "disabled";
+ };
+
timer: timer@1e782000 {
/* This timer is a Faraday FTTMR010 derivative */
compatible = "aspeed,ast2400-timer";
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
index 33a159c0163f..7788d5db65c2 100644
--- a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
+++ b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
@@ -1,47 +1,10 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* at91-sama5d27_som1.dtsi - Device Tree file for SAMA5D27 SoM1 board
*
* Copyright (c) 2017, Microchip Technology Inc.
* 2017 Cristian Birsan <cristian.birsan@microchip.com>
* 2017 Claudiu Beznea <claudiu.beznea@microchip.com>
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
#include "sama5d2.dtsi"
#include "sama5d2-pinfunc.h"
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
index a48180555ef5..89f0c9979b89 100644
--- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* at91-sama5d27_som1_ek.dts - Device Tree file for SAMA5D27-SOM1-EK board
*
@@ -5,44 +6,6 @@
* 2016 Nicolas Ferre <nicolas.ferre@atmel.com>
* 2017 Cristian Birsan <cristian.birsan@microchip.com>
* 2017 Claudiu Beznea <claudiu.beznea@microchip.com>
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
#include "at91-sama5d27_som1.dtsi"
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
index fa54e8866f1e..808e399fd39a 100644
--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
@@ -1,52 +1,16 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* at91-sama5d2_xplained.dts - Device Tree file for SAMA5D2 Xplained board
*
* Copyright (C) 2015 Atmel,
* 2015 Nicolas Ferre <nicolas.ferre@atmel.com>
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
#include "sama5d2.dtsi"
#include "sama5d2-pinfunc.h"
#include <dt-bindings/mfd/atmel-flexcom.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/regulator/active-semi,8945a-regulator.h>
/ {
model = "Atmel SAMA5D2 Xplained";
@@ -181,49 +145,102 @@
regulator-name = "VDD_1V35";
regulator-min-microvolt = <1350000>;
regulator-max-microvolt = <1350000>;
+ regulator-allowed-modes = <ACT8945A_REGULATOR_MODE_FIXED>,
+ <ACT8945A_REGULATOR_MODE_LOWPOWER>;
+ regulator-initial-mode = <ACT8945A_REGULATOR_MODE_FIXED>;
regulator-always-on;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-min-microvolt=<1400000>;
+ regulator-suspend-max-microvolt=<1400000>;
+ regulator-changeable-in-suspend;
+ regulator-mode=<ACT8945A_REGULATOR_MODE_LOWPOWER>;
+ };
};
vdd_1v2_reg: REG_DCDC2 {
regulator-name = "VDD_1V2";
regulator-min-microvolt = <1100000>;
regulator-max-microvolt = <1300000>;
+ regulator-allowed-modes = <ACT8945A_REGULATOR_MODE_FIXED>,
+ <ACT8945A_REGULATOR_MODE_LOWPOWER>;
+ regulator-initial-mode = <ACT8945A_REGULATOR_MODE_FIXED>;
regulator-always-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
};
vdd_3v3_reg: REG_DCDC3 {
regulator-name = "VDD_3V3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ regulator-allowed-modes = <ACT8945A_REGULATOR_MODE_FIXED>,
+ <ACT8945A_REGULATOR_MODE_LOWPOWER>;
+ regulator-initial-mode = <ACT8945A_REGULATOR_MODE_FIXED>;
regulator-always-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
};
vdd_fuse_reg: REG_LDO1 {
regulator-name = "VDD_FUSE";
regulator-min-microvolt = <2500000>;
regulator-max-microvolt = <2500000>;
+ regulator-allowed-modes = <ACT8945A_REGULATOR_MODE_NORMAL>,
+ <ACT8945A_REGULATOR_MODE_LOWPOWER>;
+ regulator-initial-mode = <ACT8945A_REGULATOR_MODE_NORMAL>;
regulator-always-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
};
vdd_3v3_lp_reg: REG_LDO2 {
regulator-name = "VDD_3V3_LP";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ regulator-allowed-modes = <ACT8945A_REGULATOR_MODE_NORMAL>,
+ <ACT8945A_REGULATOR_MODE_LOWPOWER>;
+ regulator-initial-mode = <ACT8945A_REGULATOR_MODE_NORMAL>;
regulator-always-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
};
vdd_led_reg: REG_LDO3 {
regulator-name = "VDD_LED";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ regulator-allowed-modes = <ACT8945A_REGULATOR_MODE_NORMAL>,
+ <ACT8945A_REGULATOR_MODE_LOWPOWER>;
+ regulator-initial-mode = <ACT8945A_REGULATOR_MODE_NORMAL>;
regulator-always-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
};
vdd_sdhc_1v8_reg: REG_LDO4 {
regulator-name = "VDD_SDHC_1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+ regulator-allowed-modes = <ACT8945A_REGULATOR_MODE_NORMAL>,
+ <ACT8945A_REGULATOR_MODE_LOWPOWER>;
+ regulator-initial-mode = <ACT8945A_REGULATOR_MODE_NORMAL>;
regulator-always-on;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
};
};
diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
index 43aef56ac74a..fdfc37d716e0 100644
--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
@@ -1,46 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* at91-sama5d4_xplained.dts - Device Tree file for SAMA5D4 Xplained board
*
* Copyright (C) 2015 Atmel,
* 2015 Josh Wu <josh.wu@atmel.com>
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
#include "sama5d4.dtsi"
diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts
index 12d5af938aa3..0cc1cff13e46 100644
--- a/arch/arm/boot/dts/at91-sama5d4ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d4ek.dts
@@ -1,46 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* at91-sama5d4ek.dts - Device Tree file for SAMA5D4 Evaluation Kit
*
* Copyright (C) 2014 Atmel,
* 2014 Nicolas Ferre <nicolas.ferre@atmel.com>
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
#include "sama5d4.dtsi"
diff --git a/arch/arm/boot/dts/at91-vinco.dts b/arch/arm/boot/dts/at91-vinco.dts
index 430277291e02..15050fdd479d 100644
--- a/arch/arm/boot/dts/at91-vinco.dts
+++ b/arch/arm/boot/dts/at91-vinco.dts
@@ -1,47 +1,10 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Device Tree file for VInCo platform
*
* Copyright (C) 2014 Atmel,
* 2014 Nicolas Ferre <nicolas.ferre@atmel.com>
* 2015 Gregory CLEMENT <gregory.clement@free-electrons.com>
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
#include "sama5d4.dtsi"
diff --git a/arch/arm/boot/dts/at91sam9260ek.dts b/arch/arm/boot/dts/at91sam9260ek.dts
index 07d1b571e601..81f808a10931 100644
--- a/arch/arm/boot/dts/at91sam9260ek.dts
+++ b/arch/arm/boot/dts/at91sam9260ek.dts
@@ -1,46 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Device Tree file for Atmel at91sam9260 Evaluation Kit
*
* Copyright (C) 2016 Atmel,
* 2016 Nicolas Ferre <nicolas.ferre@atmel.com>
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
#include "at91sam9260.dtsi"
diff --git a/arch/arm/boot/dts/at91sam9xe.dtsi b/arch/arm/boot/dts/at91sam9xe.dtsi
index 1304452f0fae..3f9d8caf8b0a 100644
--- a/arch/arm/boot/dts/at91sam9xe.dtsi
+++ b/arch/arm/boot/dts/at91sam9xe.dtsi
@@ -1,46 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* at91sam9xe.dtsi - Device Tree Include file for AT91SAM9XE family SoC
*
* Copyright (C) 2015 Atmel,
* 2015 Alexandre Belloni <alexandre.Belloni@free-electrons.com>
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
#include "at91sam9260.dtsi"
diff --git a/arch/arm/boot/dts/axp81x.dtsi b/arch/arm/boot/dts/axp81x.dtsi
index bd83962d3627..1dfeeceabf4c 100644
--- a/arch/arm/boot/dts/axp81x.dtsi
+++ b/arch/arm/boot/dts/axp81x.dtsi
@@ -171,4 +171,8 @@
status = "disabled";
};
};
+
+ usb_power_supply: usb-power-supply {
+ compatible = "x-powers,axp813-usb-power-supply";
+ };
};
diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
index 414f1cd68733..fe9f0bc29fec 100644
--- a/arch/arm/boot/dts/dra7-l4.dtsi
+++ b/arch/arm/boot/dts/dra7-l4.dtsi
@@ -2499,7 +2499,7 @@
ranges = <0x0 0x3a000 0x1000>;
};
- target-module@3c000 { /* 0x4843c000, ap 23 08.0 */
+ atl_tm: target-module@3c000 { /* 0x4843c000, ap 23 08.0 */
compatible = "ti,sysc-omap4", "ti,sysc";
reg = <0x3c000 0x4>;
reg-names = "rev";
@@ -4099,7 +4099,7 @@
};
};
- target-module@100000 { /* 0x48900000, ap 85 04.0 */
+ usb3_tm: target-module@100000 { /* 0x48900000, ap 85 04.0 */
compatible = "ti,sysc-omap4", "ti,sysc";
ti,hwmods = "usb_otg_ss3";
reg = <0x100000 0x4>,
@@ -4148,7 +4148,7 @@
};
};
- target-module@140000 { /* 0x48940000, ap 75 3c.0 */
+ usb4_tm: target-module@140000 { /* 0x48940000, ap 75 3c.0 */
compatible = "ti,sysc-omap4", "ti,sysc";
ti,hwmods = "usb_otg_ss4";
reg = <0x140000 0x4>,
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 2bc9add8b7a5..d87e932f45bd 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -193,6 +193,7 @@
ti,hwmods = "pcie1";
phys = <&pcie1_phy>;
phy-names = "pcie-phy0";
+ ti,syscon-lane-sel = <&scm_conf_pcie 0x18>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pcie1_intc 1>,
<0 0 0 2 &pcie1_intc 2>,
@@ -218,6 +219,7 @@
phys = <&pcie1_phy>;
phy-names = "pcie-phy0";
ti,syscon-unaligned-access = <&scm_conf1 0x14 1>;
+ ti,syscon-lane-sel = <&scm_conf_pcie 0x18>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/emev2-kzm9d.dts b/arch/arm/boot/dts/emev2-kzm9d.dts
index 1bb8e5c9d029..abfff54d6de5 100644
--- a/arch/arm/boot/dts/emev2-kzm9d.dts
+++ b/arch/arm/boot/dts/emev2-kzm9d.dts
@@ -25,7 +25,7 @@
};
chosen {
- bootargs = "ignore_loglevel root=/dev/nfs ip=dhcp";
+ bootargs = "ignore_loglevel rw root=/dev/nfs ip=dhcp";
stdout-path = "serial1:115200n8";
};
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index 5892a9f7622f..8ce3a7786b19 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -97,42 +97,46 @@
};
};
- soc: soc {
- compatible = "simple-bus";
+ fixed-rate-clocks {
#address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- fixed-rate-clocks {
- #address-cells = <1>;
- #size-cells = <0>;
+ #size-cells = <0>;
- xusbxti: clock@0 {
- compatible = "fixed-clock";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
- clock-frequency = <0>;
- #clock-cells = <0>;
- clock-output-names = "xusbxti";
- };
+ xusbxti: clock@0 {
+ compatible = "fixed-clock";
+ reg = <0>;
+ clock-frequency = <0>;
+ #clock-cells = <0>;
+ clock-output-names = "xusbxti";
+ };
- xxti: clock@1 {
- compatible = "fixed-clock";
- reg = <1>;
- clock-frequency = <0>;
- #clock-cells = <0>;
- clock-output-names = "xxti";
- };
+ xxti: clock@1 {
+ compatible = "fixed-clock";
+ reg = <1>;
+ clock-frequency = <0>;
+ #clock-cells = <0>;
+ clock-output-names = "xxti";
+ };
- xtcxo: clock@2 {
- compatible = "fixed-clock";
- reg = <2>;
- clock-frequency = <0>;
- #clock-cells = <0>;
- clock-output-names = "xtcxo";
- };
+ xtcxo: clock@2 {
+ compatible = "fixed-clock";
+ reg = <2>;
+ clock-frequency = <0>;
+ #clock-cells = <0>;
+ clock-output-names = "xtcxo";
};
+ };
+
+ pmu {
+ compatible = "arm,cortex-a7-pmu";
+ interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ soc: soc {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
sysram@2020000 {
compatible = "mmio-sram";
@@ -673,12 +677,6 @@
status = "disabled";
};
- pmu {
- compatible = "arm,cortex-a7-pmu";
- interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
- };
-
ppmu_dmc0: ppmu_dmc0@106a0000 {
compatible = "samsung,exynos-ppmu";
reg = <0x106a0000 0x2000>;
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index 6085e92ac2d7..36ccf227434d 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -51,6 +51,12 @@
serial3 = &serial_3;
};
+ pmu: pmu {
+ compatible = "arm,cortex-a9-pmu";
+ interrupt-parent = <&combiner>;
+ interrupts = <2 2>, <3 2>;
+ };
+
soc: soc {
compatible = "simple-bus";
#address-cells = <1>;
@@ -169,12 +175,6 @@
reg = <0x10440000 0x1000>;
};
- pmu: pmu {
- compatible = "arm,cortex-a9-pmu";
- interrupt-parent = <&combiner>;
- interrupts = <2 2>, <3 2>;
- };
-
sys_reg: syscon@10010000 {
compatible = "samsung,exynos4-sysreg", "syscon";
reg = <0x10010000 0x400>;
@@ -675,7 +675,7 @@
status = "disabled";
};
- amba {
+ amba: amba {
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-bus";
diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts
index dd9ec05eb0f7..36b1edea254a 100644
--- a/arch/arm/boot/dts/exynos4210-origen.dts
+++ b/arch/arm/boot/dts/exynos4210-origen.dts
@@ -30,8 +30,8 @@
};
chosen {
- bootargs ="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC2,115200 init=/linuxrc";
- stdout-path = &serial_2;
+ bootargs = "root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M init=/linuxrc";
+ stdout-path = "serial2:115200n8";
};
mmc_reg: voltage-regulator {
diff --git a/arch/arm/boot/dts/exynos4210-smdkv310.dts b/arch/arm/boot/dts/exynos4210-smdkv310.dts
index 7a3e621edede..77fc11e593ad 100644
--- a/arch/arm/boot/dts/exynos4210-smdkv310.dts
+++ b/arch/arm/boot/dts/exynos4210-smdkv310.dts
@@ -26,8 +26,8 @@
};
chosen {
- bootargs = "root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc";
- stdout-path = &serial_1;
+ bootargs = "root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M init=/linuxrc";
+ stdout-path = "serial1:115200n8";
};
fixed-rate-clocks {
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts
index 8dbc47d627a5..6882480dbaf7 100644
--- a/arch/arm/boot/dts/exynos4210-trats.dts
+++ b/arch/arm/boot/dts/exynos4210-trats.dts
@@ -26,8 +26,8 @@
};
chosen {
- bootargs = "console=ttySAC2,115200N8 root=/dev/mmcblk0p5 rootwait earlyprintk panic=5";
- stdout-path = &serial_2;
+ bootargs = "root=/dev/mmcblk0p5 rootwait earlyprintk panic=5";
+ stdout-path = "serial2:115200n8";
};
regulators {
diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts
index 5c3d98654f13..bf092e97e14f 100644
--- a/arch/arm/boot/dts/exynos4210-universal_c210.dts
+++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts
@@ -24,8 +24,8 @@
};
chosen {
- bootargs = "console=ttySAC2,115200N8 root=/dev/mmcblk0p5 rw rootwait earlyprintk panic=5 maxcpus=1";
- stdout-path = &serial_2;
+ bootargs = "root=/dev/mmcblk0p5 rw rootwait earlyprintk panic=5 maxcpus=1";
+ stdout-path = "serial2:115200n8";
};
@@ -177,6 +177,20 @@
};
};
+&amba {
+ mdma0: mdma@12840000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x12840000 0x1000>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clock CLK_MDMA>;
+ clock-names = "apb_pclk";
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <1>;
+ power-domains = <&pd_lcd0>;
+ };
+};
+
&camera {
status = "okay";
@@ -491,7 +505,8 @@
};
&mdma1 {
- reg = <0x12840000 0x1000>;
+ /* Use the secure mdma0 */
+ status = "disabled";
};
&mixer {
diff --git a/arch/arm/boot/dts/exynos4412-odroidu3.dts b/arch/arm/boot/dts/exynos4412-odroidu3.dts
index 2bdf899df436..96d99887bceb 100644
--- a/arch/arm/boot/dts/exynos4412-odroidu3.dts
+++ b/arch/arm/boot/dts/exynos4412-odroidu3.dts
@@ -34,8 +34,6 @@
fan0: pwm-fan {
compatible = "pwm-fan";
pwms = <&pwm 0 10000 0>;
- cooling-min-state = <0>;
- cooling-max-state = <3>;
#cooling-cells = <2>;
cooling-levels = <0 102 170 230>;
};
@@ -66,6 +64,11 @@
};
};
+&adc {
+ vdd-supply = <&ldo10_reg>;
+ /* Nothing connected to ADC inputs, keep it disabled */
+};
+
/* Supply for LAN9730/SMSC95xx */
&buck8_reg {
regulator-name = "BUCK8_P3V3";
diff --git a/arch/arm/boot/dts/exynos4412-origen.dts b/arch/arm/boot/dts/exynos4412-origen.dts
index 346f71932457..698de4345d16 100644
--- a/arch/arm/boot/dts/exynos4412-origen.dts
+++ b/arch/arm/boot/dts/exynos4412-origen.dts
@@ -25,8 +25,7 @@
};
chosen {
- bootargs ="console=ttySAC2,115200";
- stdout-path = &serial_2;
+ stdout-path = "serial2:115200n8";
};
firmware@203f000 {
diff --git a/arch/arm/boot/dts/exynos4412-smdk4412.dts b/arch/arm/boot/dts/exynos4412-smdk4412.dts
index 5c5c2887c14f..e70fb6e601f0 100644
--- a/arch/arm/boot/dts/exynos4412-smdk4412.dts
+++ b/arch/arm/boot/dts/exynos4412-smdk4412.dts
@@ -23,8 +23,8 @@
};
chosen {
- bootargs ="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc";
- stdout-path = &serial_1;
+ bootargs = "root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M init=/linuxrc";
+ stdout-path = "serial1:115200n8";
};
fixed-rate-clocks {
diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts
index 327ee980d3a5..aac533933c61 100644
--- a/arch/arm/boot/dts/exynos4412-trats2.dts
+++ b/arch/arm/boot/dts/exynos4412-trats2.dts
@@ -22,6 +22,7 @@
};
chosen {
- bootargs = "console=ttySAC2,115200N8 root=/dev/mmcblk0p5 rootwait earlyprintk panic=5";
+ bootargs = "root=/dev/mmcblk0p5 rootwait earlyprintk panic=5";
+ stdout-path = "serial2:115200n8";
};
};
diff --git a/arch/arm/boot/dts/exynos4412.dtsi b/arch/arm/boot/dts/exynos4412.dtsi
index 26ad6ab3c6af..e5c041ec0756 100644
--- a/arch/arm/boot/dts/exynos4412.dtsi
+++ b/arch/arm/boot/dts/exynos4412.dtsi
@@ -274,7 +274,7 @@
};
adc: adc@126c0000 {
- compatible = "samsung,exynos-adc-v1";
+ compatible = "samsung,exynos4212-adc";
reg = <0x126C0000 0x100>;
interrupt-parent = <&combiner>;
interrupts = <10 3>;
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index d5e66189ed2a..6dc96948a9cc 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -24,7 +24,8 @@
};
chosen {
- bootargs = "root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC2,115200 init=/linuxrc";
+ bootargs = "root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M init=/linuxrc";
+ stdout-path = "serial2:115200n8";
};
vdd: fixed-regulator-vdd {
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 80986b97dfe5..d5e0392b409e 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -157,6 +157,12 @@
};
};
+ pmu {
+ compatible = "arm,cortex-a15-pmu";
+ interrupt-parent = <&combiner>;
+ interrupts = <1 2>, <22 4>;
+ };
+
soc: soc {
sysram@2020000 {
compatible = "mmio-sram";
@@ -227,20 +233,6 @@
power-domains = <&pd_mau>;
};
- timer {
- compatible = "arm,armv7-timer";
- interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
- /*
- * Unfortunately we need this since some versions
- * of U-Boot on Exynos don't set the CNTFRQ register,
- * so we need the value from DT.
- */
- clock-frequency = <24000000>;
- };
-
mct@101c0000 {
compatible = "samsung,exynos4210-mct";
reg = <0x101C0000 0x800>;
@@ -265,12 +257,6 @@
};
};
- pmu {
- compatible = "arm,cortex-a15-pmu";
- interrupt-parent = <&combiner>;
- interrupts = <1 2>, <22 4>;
- };
-
pinctrl_0: pinctrl@11400000 {
compatible = "samsung,exynos5250-pinctrl";
reg = <0x11400000 0x1000>;
@@ -1097,6 +1083,20 @@
};
};
};
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ /*
+ * Unfortunately we need this since some versions
+ * of U-Boot on Exynos don't set the CNTFRQ register,
+ * so we need the value from DT.
+ */
+ clock-frequency = <24000000>;
+ };
};
&dp {
diff --git a/arch/arm/boot/dts/exynos5260-pinctrl.dtsi b/arch/arm/boot/dts/exynos5260-pinctrl.dtsi
index b1edb20b789e..17e2f3e0d71e 100644
--- a/arch/arm/boot/dts/exynos5260-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos5260-pinctrl.dtsi
@@ -153,6 +153,14 @@
#gpio-cells = <2>;
interrupt-controller;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
#interrupt-cells = <2>;
};
@@ -161,6 +169,14 @@
#gpio-cells = <2>;
interrupt-controller;
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>;
#interrupt-cells = <2>;
};
diff --git a/arch/arm/boot/dts/exynos5260-xyref5260.dts b/arch/arm/boot/dts/exynos5260-xyref5260.dts
index fa19c59b2fb6..36a2b77eeb9d 100644
--- a/arch/arm/boot/dts/exynos5260-xyref5260.dts
+++ b/arch/arm/boot/dts/exynos5260-xyref5260.dts
@@ -19,7 +19,7 @@
};
chosen {
- bootargs = "console=ttySAC2,115200";
+ stdout-path = "serial2:115200n8";
};
fin_pll: xxti {
diff --git a/arch/arm/boot/dts/exynos5260.dtsi b/arch/arm/boot/dts/exynos5260.dtsi
index 55167850619c..3581b57fbbf7 100644
--- a/arch/arm/boot/dts/exynos5260.dtsi
+++ b/arch/arm/boot/dts/exynos5260.dtsi
@@ -17,6 +17,10 @@
#size-cells = <1>;
aliases {
+ i2c0 = &hsi2c_0;
+ i2c1 = &hsi2c_1;
+ i2c2 = &hsi2c_2;
+ i2c3 = &hsi2c_3;
pinctrl0 = &pinctrl_0;
pinctrl1 = &pinctrl_1;
pinctrl2 = &pinctrl_2;
@@ -223,7 +227,7 @@
wakeup-interrupt-controller {
compatible = "samsung,exynos4210-wakeup-eint";
interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
};
};
@@ -288,6 +292,14 @@
#size-cells = <0>;
clocks = <&clock_fsys FSYS_CLK_MMC0>, <&clock_top TOP_SCLK_MMC0>;
clock-names = "biu", "ciu";
+ assigned-clocks =
+ <&clock_top TOP_MOUT_SCLK_FSYS_MMC0_SDCLKIN_A>,
+ <&clock_top TOP_MOUT_SCLK_FSYS_MMC0_SDCLKIN_B>,
+ <&clock_top TOP_SCLK_MMC0>;
+ assigned-clock-parents =
+ <&clock_top TOP_MOUT_BUSTOP_PLL_USER>,
+ <&clock_top TOP_MOUT_SCLK_FSYS_MMC0_SDCLKIN_A>;
+ assigned-clock-rates = <0>, <0>, <800000000>;
fifo-depth = <64>;
status = "disabled";
};
@@ -300,6 +312,14 @@
#size-cells = <0>;
clocks = <&clock_fsys FSYS_CLK_MMC1>, <&clock_top TOP_SCLK_MMC1>;
clock-names = "biu", "ciu";
+ assigned-clocks =
+ <&clock_top TOP_MOUT_SCLK_FSYS_MMC1_SDCLKIN_A>,
+ <&clock_top TOP_MOUT_SCLK_FSYS_MMC1_SDCLKIN_B>,
+ <&clock_top TOP_SCLK_MMC1>;
+ assigned-clock-parents =
+ <&clock_top TOP_MOUT_BUSTOP_PLL_USER>,
+ <&clock_top TOP_MOUT_SCLK_FSYS_MMC1_SDCLKIN_A>;
+ assigned-clock-rates = <0>, <0>, <800000000>;
fifo-depth = <64>;
status = "disabled";
};
@@ -312,9 +332,69 @@
#size-cells = <0>;
clocks = <&clock_fsys FSYS_CLK_MMC2>, <&clock_top TOP_SCLK_MMC2>;
clock-names = "biu", "ciu";
+ assigned-clocks =
+ <&clock_top TOP_MOUT_SCLK_FSYS_MMC2_SDCLKIN_A>,
+ <&clock_top TOP_MOUT_SCLK_FSYS_MMC2_SDCLKIN_B>,
+ <&clock_top TOP_SCLK_MMC2>;
+ assigned-clock-parents =
+ <&clock_top TOP_MOUT_BUSTOP_PLL_USER>,
+ <&clock_top TOP_MOUT_SCLK_FSYS_MMC2_SDCLKIN_A>;
+ assigned-clock-rates = <0>, <0>, <800000000>;
fifo-depth = <64>;
status = "disabled";
};
+
+ hsi2c_0: hsi2c@12da0000 {
+ compatible = "samsung,exynos5260-hsi2c";
+ reg = <0x12DA0000 0x1000>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_hs_bus>;
+ clocks = <&clock_peri PERI_CLK_HSIC0>;
+ clock-names = "hsi2c";
+ status = "disabled";
+ };
+
+ hsi2c_1: hsi2c@12db0000 {
+ compatible = "samsung,exynos5260-hsi2c";
+ reg = <0x12DB0000 0x1000>;
+ interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_hs_bus>;
+ clocks = <&clock_peri PERI_CLK_HSIC1>;
+ clock-names = "hsi2c";
+ status = "disabled";
+ };
+
+ hsi2c_2: hsi2c@12dc0000 {
+ compatible = "samsung,exynos5260-hsi2c";
+ reg = <0x12DC0000 0x1000>;
+ interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_hs_bus>;
+ clocks = <&clock_peri PERI_CLK_HSIC2>;
+ clock-names = "hsi2c";
+ status = "disabled";
+ };
+
+ hsi2c_3: hsi2c@12dd0000 {
+ compatible = "samsung,exynos5260-hsi2c";
+ reg = <0x12DD0000 0x1000>;
+ interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_hs_bus>;
+ clocks = <&clock_peri PERI_CLK_HSIC3>;
+ clock-names = "hsi2c";
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm/boot/dts/exynos5410-odroidxu.dts b/arch/arm/boot/dts/exynos5410-odroidxu.dts
index 434a7591ff63..8f9e08f940ab 100644
--- a/arch/arm/boot/dts/exynos5410-odroidxu.dts
+++ b/arch/arm/boot/dts/exynos5410-odroidxu.dts
@@ -38,8 +38,6 @@
fan0: pwm-fan {
compatible = "pwm-fan";
pwms = <&pwm 0 20972 0>;
- cooling-min-state = <0>;
- cooling-max-state = <3>;
#cooling-cells = <2>;
cooling-levels = <0 130 170 230>;
};
diff --git a/arch/arm/boot/dts/exynos5410-smdk5410.dts b/arch/arm/boot/dts/exynos5410-smdk5410.dts
index 8fc8c841d34b..dffa5e3ed90c 100644
--- a/arch/arm/boot/dts/exynos5410-smdk5410.dts
+++ b/arch/arm/boot/dts/exynos5410-smdk5410.dts
@@ -19,7 +19,7 @@
};
chosen {
- bootargs = "console=ttySAC2,115200";
+ stdout-path = "serial2:115200n8";
};
fin_pll: xxti {
diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
index 3447160e1fbf..dbf0306896f6 100644
--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
@@ -24,7 +24,7 @@
};
chosen {
- bootargs = "console=ttySAC3,115200";
+ stdout-path = "serial3:115200n8";
};
firmware@2073000 {
@@ -51,6 +51,15 @@
};
};
+&adc {
+ vdd-supply = <&ldo4_reg>;
+ status = "okay";
+};
+
+&cci {
+ status = "disabled";
+};
+
&cpu0 {
cpu-supply = <&buck2_reg>;
};
@@ -59,12 +68,268 @@
cpu-supply = <&buck6_reg>;
};
-&usbdrd_dwc3_1 {
- dr_mode = "host";
+&cpu0_thermal {
+ trips {
+ cpu0_alert0: cpu-alert-0 {
+ temperature = <60000>; /* millicelsius */
+ hysteresis = <5000>; /* millicelsius */
+ type = "passive";
+ };
+ cpu0_alert1: cpu-alert-1 {
+ temperature = <80000>; /* millicelsius */
+ hysteresis = <10000>; /* millicelsius */
+ type = "passive";
+ };
+ cpu0_alert2: cpu-alert-2 {
+ temperature = <110000>; /* millicelsius */
+ hysteresis = <10000>; /* millicelsius */
+ type = "passive";
+ };
+ cpu0_crit0: cpu-crit-0 {
+ temperature = <120000>; /* millicelsius */
+ hysteresis = <0>; /* millicelsius */
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ /*
+ * Reduce the CPU speed by 2 steps, down to: 1600 MHz
+ * and 1100 MHz.
+ */
+ map0 {
+ trip = <&cpu0_alert0>;
+ cooling-device = <&cpu0 0 2>,
+ <&cpu1 0 2>,
+ <&cpu2 0 2>,
+ <&cpu3 0 2>,
+ <&cpu4 0 2>,
+ <&cpu5 0 2>,
+ <&cpu6 0 2>,
+ <&cpu7 0 2>;
+ };
+
+ /*
+ * Reduce the CPU speed down to 1200 MHz big (6 steps)
+ * and 800 MHz LITTLE (5 steps).
+ */
+ map1 {
+ trip = <&cpu0_alert1>;
+ cooling-device = <&cpu0 3 6>,
+ <&cpu1 3 6>,
+ <&cpu2 3 6>,
+ <&cpu3 3 6>,
+ <&cpu4 3 5>,
+ <&cpu5 3 5>,
+ <&cpu6 3 5>,
+ <&cpu7 3 5>;
+ };
+
+ /*
+ * Reduce the CPU speed as much as possible, down to 700 MHz
+ * big (11 steps) and 600 MHz LITTLE (7 steps).
+ */
+ map2 {
+ trip = <&cpu0_alert2>;
+ cooling-device = <&cpu0 6 11>,
+ <&cpu1 6 11>,
+ <&cpu2 6 11>,
+ <&cpu3 6 11>,
+ <&cpu4 5 7>,
+ <&cpu5 5 7>,
+ <&cpu6 5 7>,
+ <&cpu7 5 7>;
+ };
+ };
};
-&cci {
- status = "disabled";
+&cpu1_thermal {
+ trips {
+ cpu1_alert0: cpu-alert-0 {
+ temperature = <60000>; /* millicelsius */
+ hysteresis = <5000>; /* millicelsius */
+ type = "passive";
+ };
+ cpu1_alert1: cpu-alert-1 {
+ temperature = <80000>; /* millicelsius */
+ hysteresis = <10000>; /* millicelsius */
+ type = "passive";
+ };
+ cpu1_alert2: cpu-alert-2 {
+ temperature = <110000>; /* millicelsius */
+ hysteresis = <10000>; /* millicelsius */
+ type = "passive";
+ };
+ cpu1_crit0: cpu-crit-0 {
+ temperature = <120000>; /* millicelsius */
+ hysteresis = <0>; /* millicelsius */
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu1_alert0>;
+ cooling-device = <&cpu0 0 2>,
+ <&cpu1 0 2>,
+ <&cpu2 0 2>,
+ <&cpu3 0 2>,
+ <&cpu4 0 2>,
+ <&cpu5 0 2>,
+ <&cpu6 0 2>,
+ <&cpu7 0 2>;
+ };
+
+ map1 {
+ trip = <&cpu1_alert1>;
+ cooling-device = <&cpu0 3 6>,
+ <&cpu1 3 6>,
+ <&cpu2 3 6>,
+ <&cpu3 3 6>,
+ <&cpu4 3 5>,
+ <&cpu5 3 5>,
+ <&cpu6 3 5>,
+ <&cpu7 3 5>;
+ };
+
+ map2 {
+ trip = <&cpu1_alert2>;
+ cooling-device = <&cpu0 6 11>,
+ <&cpu1 6 11>,
+ <&cpu2 6 11>,
+ <&cpu3 6 11>,
+ <&cpu4 5 7>,
+ <&cpu5 5 7>,
+ <&cpu6 5 7>,
+ <&cpu7 5 7>;
+ };
+ };
+};
+
+&cpu2_thermal {
+ trips {
+ cpu2_alert0: cpu-alert-0 {
+ temperature = <60000>; /* millicelsius */
+ hysteresis = <5000>; /* millicelsius */
+ type = "passive";
+ };
+ cpu2_alert1: cpu-alert-1 {
+ temperature = <80000>; /* millicelsius */
+ hysteresis = <10000>; /* millicelsius */
+ type = "passive";
+ };
+ cpu2_alert2: cpu-alert-2 {
+ temperature = <110000>; /* millicelsius */
+ hysteresis = <10000>; /* millicelsius */
+ type = "passive";
+ };
+ cpu2_crit0: cpu-crit-0 {
+ temperature = <120000>; /* millicelsius */
+ hysteresis = <0>; /* millicelsius */
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu2_alert0>;
+ cooling-device = <&cpu0 0 2>,
+ <&cpu1 0 2>,
+ <&cpu2 0 2>,
+ <&cpu3 0 2>,
+ <&cpu4 0 2>,
+ <&cpu5 0 2>,
+ <&cpu6 0 2>,
+ <&cpu7 0 2>;
+ };
+
+ map1 {
+ trip = <&cpu2_alert1>;
+ cooling-device = <&cpu0 3 6>,
+ <&cpu1 3 6>,
+ <&cpu2 3 6>,
+ <&cpu3 3 6>,
+ <&cpu4 3 5>,
+ <&cpu5 3 5>,
+ <&cpu6 3 5>,
+ <&cpu7 3 5>;
+ };
+
+ map2 {
+ trip = <&cpu2_alert2>;
+ cooling-device = <&cpu0 6 11>,
+ <&cpu1 6 11>,
+ <&cpu2 6 11>,
+ <&cpu3 6 11>,
+ <&cpu4 6 7>,
+ <&cpu5 6 7>,
+ <&cpu6 6 7>,
+ <&cpu7 6 7>;
+ };
+ };
+};
+
+&cpu3_thermal {
+ trips {
+ cpu3_alert0: cpu-alert-0 {
+ temperature = <60000>; /* millicelsius */
+ hysteresis = <5000>; /* millicelsius */
+ type = "passive";
+ };
+ cpu3_alert1: cpu-alert-1 {
+ temperature = <80000>; /* millicelsius */
+ hysteresis = <10000>; /* millicelsius */
+ type = "passive";
+ };
+ cpu3_alert2: cpu-alert-2 {
+ temperature = <110000>; /* millicelsius */
+ hysteresis = <10000>; /* millicelsius */
+ type = "passive";
+ };
+ cpu3_crit0: cpu-crit-0 {
+ temperature = <120000>; /* millicelsius */
+ hysteresis = <0>; /* millicelsius */
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu3_alert0>;
+ cooling-device = <&cpu0 0 2>,
+ <&cpu1 0 2>,
+ <&cpu2 0 2>,
+ <&cpu3 0 2>,
+ <&cpu4 0 2>,
+ <&cpu5 0 2>,
+ <&cpu6 0 2>,
+ <&cpu7 0 2>;
+ };
+
+ map1 {
+ trip = <&cpu3_alert1>;
+ cooling-device = <&cpu0 3 6>,
+ <&cpu1 3 6>,
+ <&cpu2 3 6>,
+ <&cpu3 3 6>,
+ <&cpu4 3 5>,
+ <&cpu5 3 5>,
+ <&cpu6 3 5>,
+ <&cpu7 3 5>;
+ };
+
+ map2 {
+ trip = <&cpu3_alert2>;
+ cooling-device = <&cpu0 6 11>,
+ <&cpu1 6 11>,
+ <&cpu2 6 11>,
+ <&cpu3 6 11>,
+ <&cpu4 5 7>,
+ <&cpu5 5 7>,
+ <&cpu6 5 7>,
+ <&cpu7 5 7>;
+ };
+ };
};
&hdmi {
@@ -107,12 +372,19 @@
regulator-name = "PVDD_APIO_1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+ regulator-always-on;
};
ldo3_reg: LDO3 {
regulator-name = "PVDD_APIO_MMCON_1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+ /*
+ * Must be always on, even though there is
+ * a consumer (mmc_0). Otherwise the board
+ * does not reboot with vendor U-Boot
+ * (Linaro for Arndale Octa, v2012.07).
+ */
regulator-always-on;
};
@@ -145,6 +417,7 @@
regulator-name = "PVDD_ABB_1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+ regulator-always-on;
};
ldo9_reg: LDO9 {
@@ -176,10 +449,17 @@
ldo13_reg: LDO13 {
regulator-name = "PVDD_APIO_MMCOFF_2V8";
- regulator-min-microvolt = <2800000>;
+ regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <2800000>;
};
+ ldo14_reg: LDO14 {
+ /* Unused */
+ regulator-name = "PVDD_LDO14";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3950000>;
+ };
+
ldo15_reg: LDO15 {
regulator-name = "PVDD_PERI_2V8";
regulator-min-microvolt = <3300000>;
@@ -192,6 +472,13 @@
regulator-max-microvolt = <2200000>;
};
+ ldo17_reg: LDO17 {
+ /* Unused */
+ regulator-name = "PVDD_LDO17";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3950000>;
+ };
+
ldo18_reg: LDO18 {
regulator-name = "PVDD_EMMC_1V8";
regulator-min-microvolt = <1800000>;
@@ -216,10 +503,17 @@
regulator-max-microvolt = <1800000>;
};
+ ldo22_reg: LDO22 {
+ /* Unused */
+ regulator-name = "PVDD_LDO22";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <2375000>;
+ };
+
ldo23_reg: LDO23 {
regulator-name = "PVDD_MIFS_1V1";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1100000>;
regulator-always-on;
};
@@ -229,6 +523,13 @@
regulator-max-microvolt = <2800000>;
};
+ ldo25_reg: LDO25 {
+ /* Unused */
+ regulator-name = "PVDD_LDO25";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3950000>;
+ };
+
ldo26_reg: LDO26 {
regulator-name = "PVDD_CAM0_AF_2V8";
regulator-min-microvolt = <3000000>;
@@ -237,8 +538,8 @@
ldo27_reg: LDO27 {
regulator-name = "PVDD_G3DS_1V0";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1100000>;
};
ldo28_reg: LDO28 {
@@ -253,6 +554,13 @@
regulator-max-microvolt = <1800000>;
};
+ ldo30_reg: LDO30 {
+ /* Unused */
+ regulator-name = "PVDD_LDO30";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3950000>;
+ };
+
ldo31_reg: LDO31 {
regulator-name = "PVDD_PERI_1V8";
regulator-min-microvolt = <1800000>;
@@ -271,12 +579,33 @@
regulator-max-microvolt = <1800000>;
};
+ ldo34_reg: LDO34 {
+ /* Unused */
+ regulator-name = "PVDD_LDO34";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3950000>;
+ };
+
ldo35_reg: LDO35 {
regulator-name = "PVDD_CAM0_DVDD_1V2";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
};
+ ldo36_reg: LDO36 {
+ /* Unused */
+ regulator-name = "PVDD_LDO36";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3950000>;
+ };
+
+ ldo37_reg: LDO37 {
+ /* Unused */
+ regulator-name = "PVDD_LDO37";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3950000>;
+ };
+
ldo38_reg: LDO38 {
regulator-name = "PVDD_CAM0_AVDD_2V8";
regulator-min-microvolt = <2800000>;
@@ -364,7 +693,7 @@
&mmc_0 {
status = "okay";
- broken-cd;
+ non-removable;
card-detect-delay = <200>;
samsung,dw-mshc-ciu-div = <3>;
samsung,dw-mshc-sdr-timing = <0 4>;
@@ -372,22 +701,27 @@
pinctrl-names = "default";
pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus1 &sd0_bus4 &sd0_bus8>;
vmmc-supply = <&ldo10_reg>;
+ vqmmc-supply = <&ldo3_reg>;
bus-width = <8>;
cap-mmc-highspeed;
+ mmc-hs200-1_8v;
};
&mmc_2 {
status = "okay";
card-detect-delay = <200>;
samsung,dw-mshc-ciu-div = <3>;
- samsung,dw-mshc-sdr-timing = <2 3>;
- samsung,dw-mshc-ddr-timing = <1 2>;
+ samsung,dw-mshc-sdr-timing = <0 4>;
+ samsung,dw-mshc-ddr-timing = <0 2>;
pinctrl-names = "default";
pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_cd &sd2_bus1 &sd2_bus4>;
vmmc-supply = <&ldo19_reg>;
vqmmc-supply = <&ldo13_reg>;
bus-width = <4>;
cap-sd-highspeed;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ sd-uhs-ddr50;
};
&pinctrl_0 {
@@ -404,3 +738,7 @@
clocks = <&clock CLK_RTC>, <&s2mps11_osc S2MPS11_CLK_AP>;
clock-names = "rtc", "rtc_src";
};
+
+&usbdrd_dwc3_1 {
+ dr_mode = "host";
+};
diff --git a/arch/arm/boot/dts/exynos5420-smdk5420.dts b/arch/arm/boot/dts/exynos5420-smdk5420.dts
index 3cf905047893..8240e5186972 100644
--- a/arch/arm/boot/dts/exynos5420-smdk5420.dts
+++ b/arch/arm/boot/dts/exynos5420-smdk5420.dts
@@ -21,7 +21,8 @@
};
chosen {
- bootargs = "console=ttySAC2,115200 init=/linuxrc";
+ bootargs = "init=/linuxrc";
+ stdout-path = "serial2:115200n8";
};
fixed-rate-clocks {
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index aaff15880761..5fb2326875dc 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -5,7 +5,7 @@
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * SAMSUNG EXYNOS54200 SoC device nodes are listed in this file.
+ * SAMSUNG EXYNOS5420 SoC device nodes are listed in this file.
* EXYNOS5420 based board files can include this file and provide
* values for board specfic bindings.
*/
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
index 51a843bd65ed..c3c2d85267da 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
@@ -22,11 +22,12 @@
"Headphone Jack", "HPL",
"Headphone Jack", "HPR",
"Headphone Jack", "MICBIAS",
- "IN1", "Headphone Jack",
+ "IN12", "Headphone Jack",
"Speakers", "SPKL",
"Speakers", "SPKR",
"I2S Playback", "Mixer DAI TX",
- "HiFi Playback", "Mixer DAI TX";
+ "HiFi Playback", "Mixer DAI TX",
+ "Mixer DAI RX", "HiFi Capture";
assigned-clocks = <&clock CLK_MOUT_EPLL>,
<&clock CLK_MOUT_MAU_EPLL>,
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
index 5f195ad7e467..93a48f2dda49 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
@@ -44,8 +44,6 @@
fan0: pwm-fan {
compatible = "pwm-fan";
pwms = <&pwm 0 20972 0>;
- cooling-min-state = <0>;
- cooling-max-state = <3>;
#cooling-cells = <2>;
cooling-levels = <0 130 170 230>;
};
diff --git a/arch/arm/boot/dts/exynos54xx.dtsi b/arch/arm/boot/dts/exynos54xx.dtsi
index de26e5ee0d2d..ae866bcc30c4 100644
--- a/arch/arm/boot/dts/exynos54xx.dtsi
+++ b/arch/arm/boot/dts/exynos54xx.dtsi
@@ -25,27 +25,27 @@
usbdrdphy1 = &usbdrd_phy1;
};
- soc: soc {
- arm_a7_pmu: arm-a7-pmu {
- compatible = "arm,cortex-a7-pmu";
- interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
- status = "disabled";
- };
+ arm_a7_pmu: arm-a7-pmu {
+ compatible = "arm,cortex-a7-pmu";
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
- arm_a15_pmu: arm-a15-pmu {
- compatible = "arm,cortex-a15-pmu";
- interrupt-parent = <&combiner>;
- interrupts = <1 2>,
- <7 0>,
- <16 6>,
- <19 2>;
- status = "disabled";
- };
+ arm_a15_pmu: arm-a15-pmu {
+ compatible = "arm,cortex-a15-pmu";
+ interrupt-parent = <&combiner>;
+ interrupts = <1 2>,
+ <7 0>,
+ <16 6>,
+ <19 2>;
+ status = "disabled";
+ };
+ soc: soc {
sysram@2020000 {
compatible = "mmio-sram";
reg = <0x02020000 0x54000>;
diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
index 592111c8d6fd..cfbfbc91a1e1 100644
--- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts
+++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
@@ -267,44 +267,50 @@
/* 32MB of flash */
reg = <0x30000000 0x02000000>;
- /*
- * This "RedBoot" is the Storlink derivative.
- */
- partition@0 {
- label = "RedBoot";
- reg = <0x00000000 0x00040000>;
- read-only;
- };
- /*
- * This firmware image contains the kernel catenated
- * with the squashfs root filesystem. For some reason
- * this is called "upgrade" on the vendor system.
- */
- partition@40000 {
- label = "upgrade";
- reg = <0x00040000 0x01f40000>;
- read-only;
- };
- /* RGDB, Residental Gateway Database? */
- partition@1f80000 {
- label = "rgdb";
- reg = <0x01f80000 0x00040000>;
- read-only;
- };
- /*
- * This partition contains MAC addresses for WAN,
- * WLAN and LAN, and the country code (for wireless
- * I guess).
- */
- partition@1fc0000 {
- label = "nvram";
- reg = <0x01fc0000 0x00020000>;
- read-only;
- };
- partition@1fe0000 {
- label = "LangPack";
- reg = <0x01fe0000 0x00020000>;
- read-only;
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ /*
+ * This "RedBoot" is the Storlink derivative.
+ */
+ partition@0 {
+ label = "RedBoot";
+ reg = <0x00000000 0x00040000>;
+ read-only;
+ };
+ /*
+ * This firmware image contains the kernel catenated
+ * with the squashfs root filesystem. For some reason
+ * this is called "upgrade" on the vendor system.
+ */
+ partition@40000 {
+ label = "upgrade";
+ reg = <0x00040000 0x01f40000>;
+ read-only;
+ };
+ /* RGDB, Residental Gateway Database? */
+ partition@1f80000 {
+ label = "rgdb";
+ reg = <0x01f80000 0x00040000>;
+ read-only;
+ };
+ /*
+ * This partition contains MAC addresses for WAN,
+ * WLAN and LAN, and the country code (for wireless
+ * I guess).
+ */
+ partition@1fc0000 {
+ label = "nvram";
+ reg = <0x01fc0000 0x00020000>;
+ read-only;
+ };
+ partition@1fe0000 {
+ label = "LangPack";
+ reg = <0x01fe0000 0x00020000>;
+ read-only;
+ };
};
};
diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
index 59cadeee23ed..9cbdc1a15cda 100644
--- a/arch/arm/boot/dts/imx35.dtsi
+++ b/arch/arm/boot/dts/imx35.dtsi
@@ -21,6 +21,12 @@
gpio0 = &gpio1;
gpio1 = &gpio2;
gpio2 = &gpio3;
+ i2c0 = &i2c1;
+ i2c1 = &i2c2;
+ i2c2 = &i2c3;
+ mmc0 = &esdhc1;
+ mmc1 = &esdhc2;
+ mmc2 = &esdhc3;
serial0 = &uart1;
serial1 = &uart2;
serial2 = &uart3;
diff --git a/arch/arm/boot/dts/imx50-kobo-aura.dts b/arch/arm/boot/dts/imx50-kobo-aura.dts
new file mode 100644
index 000000000000..a0eaf869b913
--- /dev/null
+++ b/arch/arm/boot/dts/imx50-kobo-aura.dts
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2019 Jonathan Neuschäfer
+//
+// The Kobo Aura e-book reader, model N514. The mainboard is marked as E606F0B.
+
+/dts-v1/;
+#include "imx50.dtsi"
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "Kobo Aura (N514)";
+ compatible = "kobo,aura", "fsl,imx50";
+
+ chosen {
+ stdout-path = "serial1:115200n8";
+ };
+
+ memory@70000000 {
+ device_type = "memory";
+ reg = <0x70000000 0x10000000>;
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_leds>;
+
+ on {
+ label = "kobo_aura:orange:on";
+ gpios = <&gpio6 24 GPIO_ACTIVE_LOW>;
+ panic-indicator;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpiokeys>;
+
+ power {
+ label = "Power Button";
+ gpios = <&gpio4 10 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_POWER>;
+ };
+
+ hallsensor {
+ label = "Hallsensor";
+ gpios = <&gpio5 15 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_RESERVED>;
+ linux,input-type = <EV_SW>;
+ };
+
+ frontlight {
+ label = "Frontlight";
+ gpios = <&gpio4 1 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_DISPLAYTOGGLE>;
+ };
+ };
+
+ sd2_pwrseq: pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sd2_reset>;
+ reset-gpios = <&gpio4 17 GPIO_ACTIVE_LOW>;
+ };
+
+ sd2_vmmc: gpio-regulator {
+ compatible = "regulator-gpio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sd2_vmmc>;
+ regulator-name = "vmmc";
+ states = <3300000 0>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ enable-gpio = <&gpio4 12 GPIO_ACTIVE_LOW>;
+ startup-delay-us = <100000>;
+ };
+};
+
+&esdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sd1>;
+ max-frequency = <50000000>;
+ bus-width = <4>;
+ cd-gpios = <&gpio5 17 GPIO_ACTIVE_LOW>;
+ disable-wp;
+ status = "okay";
+
+ /* External µSD card */
+};
+
+&esdhc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sd2>;
+ bus-width = <4>;
+ max-frequency = <50000000>;
+ disable-wp;
+ mmc-pwrseq = <&sd2_pwrseq>;
+ vmmc-supply = <&sd2_vmmc>;
+ status = "okay";
+
+ /* CyberTan WC121 SDIO WiFi (BCM43362) */
+};
+
+&esdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sd3>;
+ bus-width = <8>;
+ non-removable;
+ max-frequency = <50000000>;
+ disable-wp;
+ status = "okay";
+
+ /* Internal eMMC */
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ /* TODO: ektf2132 touch controller at 0x15 */
+};
+
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+
+ /* TODO: TPS65185 PMIC for E Ink at 0x68 */
+};
+
+&i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+
+ /* TODO: embedded controller at 0x43 */
+};
+
+&iomuxc {
+ pinctrl_gpiokeys: gpiokeys {
+ fsl,pins = <
+ MX50_PAD_CSPI_MISO__GPIO4_10 0x0
+ MX50_PAD_SD2_D7__GPIO5_15 0x0
+ MX50_PAD_KEY_ROW0__GPIO4_1 0x0
+ >;
+ };
+
+ pinctrl_i2c1: i2c1 {
+ fsl,pins = <
+ MX50_PAD_I2C1_SCL__I2C1_SCL 0x400001fd
+ MX50_PAD_I2C1_SDA__I2C1_SDA 0x400001fd
+ >;
+ };
+
+ pinctrl_i2c2: i2c2 {
+ fsl,pins = <
+ MX50_PAD_I2C2_SCL__I2C2_SCL 0x400001fd
+ MX50_PAD_I2C2_SDA__I2C2_SDA 0x400001fd
+ >;
+ };
+
+ pinctrl_i2c3: i2c3 {
+ fsl,pins = <
+ MX50_PAD_I2C3_SCL__I2C3_SCL 0x400001fd
+ MX50_PAD_I2C3_SDA__I2C3_SDA 0x400001fd
+ >;
+ };
+
+ pinctrl_leds: leds {
+ fsl,pins = <
+ MX50_PAD_PWM1__GPIO6_24 0x0
+ >;
+ };
+
+ pinctrl_sd1: sd1 {
+ fsl,pins = <
+ MX50_PAD_SD1_CMD__ESDHC1_CMD 0x1e4
+ MX50_PAD_SD1_CLK__ESDHC1_CLK 0xd4
+ MX50_PAD_SD1_D0__ESDHC1_DAT0 0x1d4
+ MX50_PAD_SD1_D1__ESDHC1_DAT1 0x1d4
+ MX50_PAD_SD1_D2__ESDHC1_DAT2 0x1d4
+ MX50_PAD_SD1_D3__ESDHC1_DAT3 0x1d4
+
+ MX50_PAD_SD2_CD__GPIO5_17 0x0
+ >;
+ };
+
+ pinctrl_sd2: sd2 {
+ fsl,pins = <
+ MX50_PAD_SD2_CMD__ESDHC2_CMD 0x1e4
+ MX50_PAD_SD2_CLK__ESDHC2_CLK 0xd4
+ MX50_PAD_SD2_D0__ESDHC2_DAT0 0x1d4
+ MX50_PAD_SD2_D1__ESDHC2_DAT1 0x1d4
+ MX50_PAD_SD2_D2__ESDHC2_DAT2 0x1d4
+ MX50_PAD_SD2_D3__ESDHC2_DAT3 0x1d4
+ >;
+ };
+
+ pinctrl_sd2_reset: sd2-reset {
+ fsl,pins = <
+ MX50_PAD_ECSPI2_MOSI__GPIO4_17 0x0
+ >;
+ };
+
+ pinctrl_sd2_vmmc: sd2-vmmc {
+ fsl,pins = <
+ MX50_PAD_ECSPI1_SCLK__GPIO4_12 0x0
+ >;
+ };
+
+ pinctrl_sd3: sd3 {
+ fsl,pins = <
+ MX50_PAD_SD3_CMD__ESDHC3_CMD 0x1e4
+ MX50_PAD_SD3_CLK__ESDHC3_CLK 0xd4
+ MX50_PAD_SD3_D0__ESDHC3_DAT0 0x1d4
+ MX50_PAD_SD3_D1__ESDHC3_DAT1 0x1d4
+ MX50_PAD_SD3_D2__ESDHC3_DAT2 0x1d4
+ MX50_PAD_SD3_D3__ESDHC3_DAT3 0x1d4
+ MX50_PAD_SD3_D4__ESDHC3_DAT4 0x1d4
+ MX50_PAD_SD3_D5__ESDHC3_DAT5 0x1d4
+ MX50_PAD_SD3_D6__ESDHC3_DAT6 0x1d4
+ MX50_PAD_SD3_D7__ESDHC3_DAT7 0x1d4
+ >;
+ };
+
+ pinctrl_uart2: uart2 {
+ fsl,pins = <
+ MX50_PAD_UART2_TXD__UART2_TXD_MUX 0x1e4
+ MX50_PAD_UART2_RXD__UART2_RXD_MUX 0x1e4
+ >;
+ };
+
+ pinctrl_usbphy: usbphy {
+ fsl,pins = <
+ MX50_PAD_ECSPI2_SS0__GPIO4_19 0x0
+ >;
+ };
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+&usbotg {
+ phy_type = "utmi_wide";
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
+&usbphy0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbphy>;
+ vbus-detect-gpio = <&gpio4 19 GPIO_ACTIVE_LOW>;
+};
diff --git a/arch/arm/boot/dts/imx50.dtsi b/arch/arm/boot/dts/imx50.dtsi
index ee1e3e8bf4ec..0bfe7c91d0eb 100644
--- a/arch/arm/boot/dts/imx50.dtsi
+++ b/arch/arm/boot/dts/imx50.dtsi
@@ -26,11 +26,21 @@
gpio3 = &gpio4;
gpio4 = &gpio5;
gpio5 = &gpio6;
+ i2c0 = &i2c1;
+ i2c1 = &i2c2;
+ i2c2 = &i2c3;
+ mmc0 = &esdhc1;
+ mmc1 = &esdhc2;
+ mmc2 = &esdhc3;
+ mmc3 = &esdhc4;
serial0 = &uart1;
serial1 = &uart2;
serial2 = &uart3;
serial3 = &uart4;
serial4 = &uart5;
+ spi0 = &ecspi1;
+ spi1 = &ecspi2;
+ spi2 = &cspi;
};
cpus {
@@ -76,6 +86,14 @@
};
};
+ usbphy0: usbphy-0 {
+ compatible = "usb-nop-xceiv";
+ clocks = <&clks IMX5_CLK_USB_PHY1_GATE>;
+ clock-names = "main_clk";
+ #phy-cells = <0>;
+ status = "okay";
+ };
+
soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -187,7 +205,8 @@
compatible = "fsl,imx50-usb", "fsl,imx27-usb";
reg = <0x53f80000 0x0200>;
interrupts = <18>;
- clocks = <&clks IMX5_CLK_USB_PHY1_GATE>;
+ clocks = <&clks IMX5_CLK_USBOH3_GATE>;
+ fsl,usbphy = <&usbphy0>;
status = "disabled";
};
@@ -411,7 +430,7 @@
reg = <0x63fb0000 0x4000>;
interrupts = <6>;
clocks = <&clks IMX5_CLK_SDMA_GATE>,
- <&clks IMX5_CLK_SDMA_GATE>;
+ <&clks IMX5_CLK_AHB>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
fsl,sdma-ram-script-name = "imx/sdma/sdma-imx50.bin";
diff --git a/arch/arm/boot/dts/imx51-zii-rdu1.dts b/arch/arm/boot/dts/imx51-zii-rdu1.dts
index a8220f08dcbf..3596060f52e7 100644
--- a/arch/arm/boot/dts/imx51-zii-rdu1.dts
+++ b/arch/arm/boot/dts/imx51-zii-rdu1.dts
@@ -1,42 +1,6 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* Copyright (C) 2017 Zodiac Inflight Innovations
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index a5ee25cedc10..0a4b9a5d9a9c 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -489,7 +489,7 @@
reg = <0x83fb0000 0x4000>;
interrupts = <6>;
clocks = <&clks IMX5_CLK_SDMA_GATE>,
- <&clks IMX5_CLK_SDMA_GATE>;
+ <&clks IMX5_CLK_AHB>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
fsl,sdma-ram-script-name = "imx/sdma/sdma-imx51.bin";
diff --git a/arch/arm/boot/dts/imx53-m53.dtsi b/arch/arm/boot/dts/imx53-m53.dtsi
index db2e5bce9b6a..d1770e1d5e50 100644
--- a/arch/arm/boot/dts/imx53-m53.dtsi
+++ b/arch/arm/boot/dts/imx53-m53.dtsi
@@ -52,7 +52,7 @@
clock-frequency = <400000>;
status = "okay";
- stmpe610@41 {
+ touchscreen@41 {
compatible = "st,stmpe610";
reg = <0x41>;
id = <0>;
diff --git a/arch/arm/boot/dts/imx53-m53menlo.dts b/arch/arm/boot/dts/imx53-m53menlo.dts
new file mode 100644
index 000000000000..f0a3fde0739c
--- /dev/null
+++ b/arch/arm/boot/dts/imx53-m53menlo.dts
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2019 Marek Vasut <marex@denx.de>
+ */
+
+/dts-v1/;
+#include "imx53-m53.dtsi"
+
+/ {
+ model = "MENLO M53 EMBEDDED DEVICE";
+ compatible = "menlo,m53menlo", "fsl,imx53";
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_led>;
+
+ user1 {
+ label = "TestLed601";
+ gpios = <&gpio6 1 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc0";
+ };
+
+ user2 {
+ label = "TestLed602";
+ gpios = <&gpio6 2 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ eth {
+ label = "EthLedYe";
+ gpios = <&gpio2 11 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "none";
+ };
+ };
+
+ panel {
+ compatible = "edt,etm070080dh6";
+ enable-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&lvds0_out>;
+ };
+ };
+ };
+
+ reg_usbh1_vbus: regulator-usbh1-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio1 2 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&can1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can1>;
+ status = "okay";
+};
+
+&can2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_can2>;
+ status = "okay";
+};
+
+&clks {
+ assigned-clocks = <&clks IMX5_CLK_CKO1_SEL>,
+ <&clks IMX5_CLK_CKO1_PODF>,
+ <&clks IMX5_CLK_CKO1>;
+ assigned-clock-parents = <&clks IMX5_CLK_AHB>;
+ assigned-clock-rates = <133333334>, <33333334>, <33333334>;
+};
+
+&esdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_esdhc1>;
+ cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec>;
+ phy-mode = "rmii";
+ status = "okay";
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ touchscreen@38 {
+ compatible = "edt,edt-ft5x06";
+ reg = <0x38>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_edt_ft5x06>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
+ reset-gpios = <&gpio2 9 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ pagesize = <32>;
+ };
+
+ dac@60 {
+ compatible = "microchip,mcp4725";
+ reg = <0x60>;
+ };
+};
+
+&i2c2 {
+ touchscreen@41 {
+ status = "disabled";
+ };
+};
+
+&i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ imx53-m53evk {
+ hoggrp {
+ fsl,pins = <
+ MX53_PAD_GPIO_0__CCM_SSI_EXT1_CLK 0x1c4
+ MX53_PAD_EIM_EB3__GPIO2_31 0x1d5
+ MX53_PAD_PATA_DA_0__GPIO7_6 0x1d5
+ MX53_PAD_GPIO_19__CCM_CLKO 0x1d5
+ MX53_PAD_CSI0_MCLK__CCM_CSI0_MCLK 0x1d5
+ MX53_PAD_CSI0_DAT4__GPIO5_22 0x1d5
+ MX53_PAD_CSI0_DAT5__GPIO5_23 0x1d5
+ MX53_PAD_CSI0_DAT6__GPIO5_24 0x1d5
+ MX53_PAD_CSI0_DAT7__GPIO5_25 0x1d5
+ MX53_PAD_CSI0_DAT8__GPIO5_26 0x1d5
+ MX53_PAD_CSI0_DAT9__GPIO5_27 0x1d5
+ MX53_PAD_CSI0_DAT10__GPIO5_28 0x1d5
+ MX53_PAD_CSI0_DAT11__GPIO5_29 0x1d5
+ MX53_PAD_CSI0_DAT14__GPIO6_0 0x1d5
+ >;
+ };
+
+ pinctrl_led: ledgrp {
+ fsl,pins = <
+ MX53_PAD_CSI0_DAT15__GPIO6_1 0x1d5
+ MX53_PAD_CSI0_DAT16__GPIO6_2 0x1d5
+ >;
+ };
+
+ pinctrl_can1: can1grp {
+ fsl,pins = <
+ MX53_PAD_GPIO_7__CAN1_TXCAN 0x1c4
+ MX53_PAD_GPIO_8__CAN1_RXCAN 0x1c4
+ >;
+ };
+
+ pinctrl_can2: can2grp {
+ fsl,pins = <
+ MX53_PAD_KEY_COL4__CAN2_TXCAN 0x1c4
+ MX53_PAD_KEY_ROW4__CAN2_RXCAN 0x1c4
+ >;
+ };
+
+ pinctrl_display_gpio: display-gpiogrp {
+ fsl,pins = <
+ MX53_PAD_CSI0_DAT12__GPIO5_30 0x1d5 /* Reset */
+ MX53_PAD_CSI0_DAT13__GPIO5_31 0x1d5 /* Interrupt */
+ >;
+ };
+
+ pinctrl_edt_ft5x06: edt-ft5x06grp {
+ fsl,pins = <
+ MX53_PAD_PATA_DATA9__GPIO2_9 0x1d5 /* Reset */
+ MX53_PAD_CSI0_DAT19__GPIO6_5 0x1d5 /* Interrupt */
+ MX53_PAD_PATA_DATA10__GPIO2_10 0x1d5 /* Wake */
+ >;
+ };
+
+ pinctrl_esdhc1: esdhc1grp {
+ fsl,pins = <
+ MX53_PAD_SD1_DATA0__ESDHC1_DAT0 0x1d5
+ MX53_PAD_SD1_DATA1__ESDHC1_DAT1 0x1d5
+ MX53_PAD_SD1_DATA2__ESDHC1_DAT2 0x1d5
+ MX53_PAD_SD1_DATA3__ESDHC1_DAT3 0x1d5
+ MX53_PAD_SD1_CMD__ESDHC1_CMD 0x1d5
+ MX53_PAD_SD1_CLK__ESDHC1_CLK 0x1d5
+ >;
+ };
+
+ pinctrl_fec: fecgrp {
+ fsl,pins = <
+ MX53_PAD_FEC_MDC__FEC_MDC 0x4
+ MX53_PAD_FEC_MDIO__FEC_MDIO 0x1fc
+ MX53_PAD_FEC_REF_CLK__FEC_TX_CLK 0x180
+ MX53_PAD_FEC_RX_ER__FEC_RX_ER 0x180
+ MX53_PAD_FEC_CRS_DV__FEC_RX_DV 0x180
+ MX53_PAD_FEC_RXD1__FEC_RDATA_1 0x180
+ MX53_PAD_FEC_RXD0__FEC_RDATA_0 0x180
+ MX53_PAD_FEC_TX_EN__FEC_TX_EN 0x4
+ MX53_PAD_FEC_TXD1__FEC_TDATA_1 0x4
+ MX53_PAD_FEC_TXD0__FEC_TDATA_0 0x4
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX53_PAD_EIM_D21__I2C1_SCL 0x400001e4
+ MX53_PAD_EIM_D28__I2C1_SDA 0x400001e4
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX53_PAD_GPIO_6__I2C3_SDA 0x400001e4
+ MX53_PAD_GPIO_5__I2C3_SCL 0x400001e4
+ >;
+ };
+
+ pinctrl_lvds0: lvds0grp {
+ /* LVDS pins only have pin mux configuration */
+ fsl,pins = <
+ MX53_PAD_LVDS0_CLK_P__LDB_LVDS0_CLK 0x80000000
+ MX53_PAD_LVDS0_TX0_P__LDB_LVDS0_TX0 0x80000000
+ MX53_PAD_LVDS0_TX1_P__LDB_LVDS0_TX1 0x80000000
+ MX53_PAD_LVDS0_TX2_P__LDB_LVDS0_TX2 0x80000000
+ MX53_PAD_LVDS0_TX3_P__LDB_LVDS0_TX3 0x80000000
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX53_PAD_PATA_DIOW__UART1_TXD_MUX 0x1e4
+ MX53_PAD_PATA_DMACK__UART1_RXD_MUX 0x1e4
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX53_PAD_PATA_BUFFER_EN__UART2_RXD_MUX 0x1e4
+ MX53_PAD_PATA_DMARQ__UART2_TXD_MUX 0x1e4
+ >;
+ };
+
+ pinctrl_usb: usbgrp {
+ fsl,pins = <
+ MX53_PAD_GPIO_2__GPIO1_2 0x1d5
+ MX53_PAD_GPIO_3__USBOH3_USBH1_OC 0x1d5
+ >;
+ };
+ };
+};
+
+&ldb {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lvds0>;
+ status = "okay";
+
+ lvds0: lvds-channel@0 {
+ reg = <0>;
+ fsl,data-mapping = "spwg";
+ fsl,data-width = <18>;
+ status = "okay";
+
+ port@2 {
+ reg = <2>;
+
+ lvds0_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+&usbh1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb>;
+ vbus-supply = <&reg_usbh1_vbus>;
+ phy_type = "utmi";
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
+&usbotg {
+ dr_mode = "peripheral";
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index b3300300aabe..9b672ed2486d 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -702,7 +702,7 @@
reg = <0x63fb0000 0x4000>;
interrupts = <6>;
clocks = <&clks IMX5_CLK_SDMA_GATE>,
- <&clks IMX5_CLK_SDMA_GATE>;
+ <&clks IMX5_CLK_AHB>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
fsl,sdma-ram-script-name = "imx/sdma/sdma-imx53.bin";
diff --git a/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi b/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi
index fb01fa6e4224..2a6ce87071f9 100644
--- a/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi
+++ b/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi
@@ -88,6 +88,7 @@
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
gpio = <&gpio7 12 GPIO_ACTIVE_HIGH>;
+ startup-delay-us = <70000>;
enable-active-high;
};
@@ -99,6 +100,7 @@
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
gpio = <&gpio1 26 GPIO_ACTIVE_HIGH>;
+ startup-delay-us = <70000>;
enable-active-high;
regulator-always-on;
};
@@ -216,7 +218,7 @@
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-reset-duration = <10>;
phy-reset-gpios = <&gpio1 24 GPIO_ACTIVE_LOW>;
phy-supply = <&reg_enet>;
@@ -247,9 +249,9 @@
gpio-cfg = <
0x0000 /* 0:Default */
0x0000 /* 1:Default */
- 0x0013 /* 2:FN_DMICCLK */
+ 0x0000 /* 2:FN_DMICCLK */
0x0000 /* 3:Default */
- 0x8014 /* 4:FN_DMICCDAT */
+ 0x0000 /* 4:FN_DMICCDAT */
0x0000 /* 5:Default */
>;
};
diff --git a/arch/arm/boot/dts/imx6dl-eckelmann-ci4x10.dts b/arch/arm/boot/dts/imx6dl-eckelmann-ci4x10.dts
new file mode 100644
index 000000000000..9eb2b73951b2
--- /dev/null
+++ b/arch/arm/boot/dts/imx6dl-eckelmann-ci4x10.dts
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016 Eckelmann AG.
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+
+#include "imx6dl.dtsi"
+
+/ {
+ model = "Eckelmann CI 4X10 Board";
+ compatible = "eckelmann,imx6dl-ci4x10", "fsl,imx6dl";
+
+ chosen {
+ stdout-path = &uart3;
+ };
+
+ memory@10000000 {
+ device_type = "memory";
+ reg = <0x10000000 0x40000000>;
+ };
+
+ rmii_clk: clock-rmii {
+ /* This clock is provided by the phy (KSZ8091RNB) */
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ };
+
+ reg_usb_h1_vbus: regulator-usb-h1-vbus {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usb_h1_vbus>;
+ compatible = "regulator-fixed";
+ regulator-name = "usb_h1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 31 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ siox {
+ compatible = "eckelmann,siox-gpio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_siox>;
+ din-gpios = <&gpio6 11 GPIO_ACTIVE_HIGH>;
+ dout-gpios = <&gpio6 8 GPIO_ACTIVE_HIGH>;
+ dclk-gpios = <&gpio6 9 GPIO_ACTIVE_HIGH>;
+ dld-gpios = <&gpio6 10 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&can1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan1>;
+ status = "okay";
+};
+
+&can2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan2>;
+ status = "okay";
+};
+
+&ecspi2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi2>;
+ cs-gpios = <&gpio5 12 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "everspin,mr25h256";
+ reg = <0>;
+ spi-max-frequency = <15000000>;
+ };
+};
+
+&ecspi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1>;
+ cs-gpios = <&gpio5 25 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+
+ tpm@0 {
+ compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+ };
+};
+
+&gpio2 {
+ gpio-line-names = "buzzer", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "";
+};
+
+&gpio4 {
+ gpio-line-names = "", "", "", "", "", "", "", "in2",
+ "prio2", "prio1", "aux", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "";
+};
+
+&gpio6 {
+ gpio-line-names = "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "in1",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "";
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ temperature-sensor@49 {
+ compatible = "ad,ad7414";
+ reg = <0x49>;
+ };
+
+ rtc@51 {
+ compatible = "nxp,pcf2127";
+ reg = <0x51>;
+ };
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ pinctrl_hog: hog {
+ fsl,pins = <
+ MX6QDL_PAD_NANDF_D0__GPIO2_IO00 0x00000018 /* buzzer */
+ MX6QDL_PAD_KEY_COL1__GPIO4_IO08 0x00000018 /* OUT_1 */
+ MX6QDL_PAD_KEY_ROW1__GPIO4_IO09 0x00000018 /* OUT_2 */
+ MX6QDL_PAD_KEY_COL2__GPIO4_IO10 0x00000018 /* OUT_3 */
+ MX6QDL_PAD_NANDF_CS2__GPIO6_IO15 0x00000000 /* In1 */
+ MX6QDL_PAD_KEY_ROW0__GPIO4_IO07 0x00000000 /* In2 */
+ MX6QDL_PAD_GPIO_9__GPIO1_IO09 0x00000018 /* unused watchdog pin */
+ MX6QDL_PAD_SD1_DAT2__GPIO1_IO19 0x00000018 /* unused watchdog pin */
+
+ >;
+ };
+
+ pinctrl_ecspi1: ecspi1grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT4__ECSPI1_SCLK 0x000100a0
+ MX6QDL_PAD_CSI0_DAT5__ECSPI1_MOSI 0x000100a0
+ MX6QDL_PAD_CSI0_DAT6__ECSPI1_MISO 0x000100a0
+ MX6QDL_PAD_CSI0_DAT7__GPIO5_IO25 0x000100a0
+ >;
+ };
+
+ pinctrl_ecspi2: ecspi2grp {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT19__ECSPI2_SCLK 0x000100b1
+ MX6QDL_PAD_EIM_CS1__ECSPI2_MOSI 0x000100b1
+ MX6QDL_PAD_EIM_OE__ECSPI2_MISO 0x000100b1
+ MX6QDL_PAD_DISP0_DAT18__GPIO5_IO12 0x000100b1
+ >;
+ };
+
+ pinctrl_enet: enetgrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8
+ MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x0001b098
+ MX6QDL_PAD_ENET_MDC__ENET_MDC 0x0001b098
+ MX6QDL_PAD_ENET_TXD0__ENET_TX_DATA0 0x0001b098
+ MX6QDL_PAD_ENET_TXD1__ENET_TX_DATA1 0x0001b098
+ MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN 0x0001b098
+ MX6QDL_PAD_ENET_RX_ER__ENET_RX_ER 0x0001b0b0
+ MX6QDL_PAD_ENET_RXD0__ENET_RX_DATA0 0x0001b0b0
+ MX6QDL_PAD_ENET_RXD1__ENET_RX_DATA1 0x0001b0b0
+ MX6QDL_PAD_ENET_CRS_DV__ENET_RX_EN 0x0001b0b0
+ MX6QDL_PAD_SD1_CMD__GPIO1_IO18 0x00000018
+ >;
+ };
+
+ pinctrl_flexcan1: flexcan1grp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_7__FLEXCAN1_TX 0x0001b020
+ MX6QDL_PAD_GPIO_8__FLEXCAN1_RX 0x0001b0b0
+ >;
+ };
+
+ pinctrl_flexcan2: flexcan2grp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL4__FLEXCAN2_TX 0x0001b020
+ MX6QDL_PAD_KEY_ROW4__FLEXCAN2_RX 0x0001b0b0
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ /* without SION i2c doesn't detect bus busy */
+ MX6QDL_PAD_CSI0_DAT9__I2C1_SCL 0x4001b820
+ MX6QDL_PAD_CSI0_DAT8__I2C1_SDA 0x4001b820
+ >;
+ };
+
+ pinctrl_pcie: pciegrp {
+ fsl,pins = <
+ MX6QDL_PAD_SD1_CLK__GPIO1_IO20 0x00000018
+ >;
+ };
+
+ pinctrl_reg_usb_h1_vbus: reg_usb_h1_vbusgrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D31__GPIO3_IO31 0x0001b0b0
+ >;
+ };
+
+ pinctrl_siox: sioxgrp {
+ fsl,pins = <
+ MX6QDL_PAD_NANDF_CS0__GPIO6_IO11 0x0001b010 /* DIN */
+ MX6QDL_PAD_NANDF_ALE__GPIO6_IO08 0x0001b010 /* DOUT */
+ MX6QDL_PAD_NANDF_WP_B__GPIO6_IO09 0x0001b010 /* DCLK */
+ MX6QDL_PAD_NANDF_RB0__GPIO6_IO10 0x0001b010 /* DLD */
+ >;
+ };
+
+ pinctrl_uart1_dte: uart1grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT11__UART1_TX_DATA 0x0001b010
+ MX6QDL_PAD_CSI0_DAT10__UART1_RX_DATA 0x0001b010
+ MX6QDL_PAD_EIM_D19__UART1_RTS_B 0x0001b010
+ MX6QDL_PAD_EIM_D20__UART1_CTS_B 0x0001b010
+ MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x0001b010 /* DCD */
+ MX6QDL_PAD_EIM_D24__GPIO3_IO24 0x0001b010 /* DTR */
+ MX6QDL_PAD_EIM_D25__GPIO3_IO25 0x0001b010 /* DSR */
+ >;
+ };
+
+ pinctrl_uart2_dte: uart2grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D27__UART2_TX_DATA 0x0001b010
+ MX6QDL_PAD_EIM_D26__UART2_RX_DATA 0x0001b010
+ MX6QDL_PAD_EIM_D28__UART2_RTS_B 0x0001b010
+ MX6QDL_PAD_EIM_D29__UART2_CTS_B 0x0001b010
+ MX6QDL_PAD_NANDF_D1__GPIO2_IO01 0x0001b010 /* DCD */
+ MX6QDL_PAD_GPIO_18__GPIO7_IO13 0x0001b010 /* DTR */
+ MX6QDL_PAD_NANDF_CS3__GPIO6_IO16 0x0001b010 /* DSR */
+ >;
+ };
+
+ pinctrl_uart3_dce: uart3grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD4_CLK__UART3_RX_DATA 0x0001b010
+ MX6QDL_PAD_SD4_CMD__UART3_TX_DATA 0x0001b010
+ >;
+ };
+
+ pinctrl_uart4_dce: uart4grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT13__UART4_RX_DATA 0x0001b010
+ MX6QDL_PAD_CSI0_DAT12__UART4_TX_DATA 0x0001b010
+ MX6QDL_PAD_CSI0_DAT17__GPIO6_IO03 0x0001b010
+ >;
+ };
+
+ pinctrl_uart5_dce: uart5grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT15__UART5_RX_DATA 0x0001b010
+ MX6QDL_PAD_CSI0_DAT14__UART5_TX_DATA 0x0001b010
+ MX6QDL_PAD_CSI0_DAT19__GPIO6_IO05 0x0001b010 /* RTS */
+ >;
+ };
+
+ pinctrl_usbh1: usbh1grp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D30__USB_H1_OC 0x0001b0b0
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CMD__SD3_CMD 0x00017059
+ MX6QDL_PAD_SD3_CLK__SD3_CLK 0x00010059
+ MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x00017059
+ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x00017059
+ MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x00017059
+ MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x00017059
+ MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x00017059
+ MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x00017059
+ MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x00017059
+ MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x00017059
+ >;
+ };
+};
+
+&fec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet>;
+ phy-mode = "rmii";
+ phy-reset-gpios = <&gpio1 18 GPIO_ACTIVE_LOW>;
+ phy-handle = <&phy>;
+ clocks = <&clks IMX6QDL_CLK_ENET>, <&clks IMX6QDL_CLK_ENET>, <&rmii_clk>;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ };
+ };
+};
+
+&pcie {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie>;
+ reset-gpio = <&gpio1 20 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_dte>;
+ uart-has-rtscts;
+ fsl,dte-mode;
+ dcd-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
+ dtr-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>;
+ dsr-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2_dte>;
+ uart-has-rtscts;
+ fsl,dte-mode;
+ dcd-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+ dtr-gpios = <&gpio7 13 GPIO_ACTIVE_LOW>;
+ dsr-gpios = <&gpio6 16 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3_dce>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4_dce>;
+ rts-gpios = <&gpio6 3 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&uart5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart5_dce>;
+ rts-gpios = <&gpio6 5 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&usbh1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbh1>;
+ vbus-supply = <&reg_usb_h1_vbus>;
+ status = "okay";
+};
+
+&usbotg {
+ dr_mode = "peripheral";
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx6dl-riotboard.dts b/arch/arm/boot/dts/imx6dl-riotboard.dts
index 65c184bb8fb0..d9de49efa802 100644
--- a/arch/arm/boot/dts/imx6dl-riotboard.dts
+++ b/arch/arm/boot/dts/imx6dl-riotboard.dts
@@ -92,7 +92,7 @@
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-reset-gpios = <&gpio3 31 GPIO_ACTIVE_LOW>;
interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
<&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/imx6dl-sabreauto.dts b/arch/arm/boot/dts/imx6dl-sabreauto.dts
index 660d52a245ba..ff3283c83a39 100644
--- a/arch/arm/boot/dts/imx6dl-sabreauto.dts
+++ b/arch/arm/boot/dts/imx6dl-sabreauto.dts
@@ -11,3 +11,18 @@
model = "Freescale i.MX6 DualLite/Solo SABRE Automotive Board";
compatible = "fsl,imx6dl-sabreauto", "fsl,imx6dl";
};
+
+&cpu0 {
+ operating-points = <
+ /* kHz uV */
+ 996000 1275000
+ 792000 1175000
+ 396000 1150000
+ >;
+ fsl,soc-operating-points = <
+ /* ARM kHz SOC-PU uV */
+ 996000 1200000
+ 792000 1175000
+ 396000 1175000
+ >;
+};
diff --git a/arch/arm/boot/dts/imx6q-ba16.dtsi b/arch/arm/boot/dts/imx6q-ba16.dtsi
index adc9455e42c7..37c63402157b 100644
--- a/arch/arm/boot/dts/imx6q-ba16.dtsi
+++ b/arch/arm/boot/dts/imx6q-ba16.dtsi
@@ -171,7 +171,7 @@
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6q-gw54xx.dts b/arch/arm/boot/dts/imx6q-gw54xx.dts
index 56e5b5050fcf..cb0a5f7d5a19 100644
--- a/arch/arm/boot/dts/imx6q-gw54xx.dts
+++ b/arch/arm/boot/dts/imx6q-gw54xx.dts
@@ -12,10 +12,30 @@
/dts-v1/;
#include "imx6q.dtsi"
#include "imx6qdl-gw54xx.dtsi"
+#include <dt-bindings/media/tda1997x.h>
/ {
model = "Gateworks Ventana i.MX6 Dual/Quad GW54XX";
compatible = "gw,imx6q-gw54xx", "gw,ventana", "fsl,imx6q";
+
+ sound-digital {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "tda1997x-audio";
+
+ simple-audio-card,dai-link@0 {
+ format = "i2s";
+
+ cpu {
+ sound-dai = <&ssi2>;
+ };
+
+ codec {
+ bitclock-master;
+ frame-master;
+ sound-dai = <&hdmi_receiver>;
+ };
+ };
+ };
};
&i2c3 {
@@ -35,6 +55,61 @@
};
};
};
+
+ hdmi_receiver: hdmi-receiver@48 {
+ compatible = "nxp,tda19971";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tda1997x>;
+ reg = <0x48>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ DOVDD-supply = <&reg_3p3v>;
+ AVDD-supply = <&sw4_reg>;
+ DVDD-supply = <&sw4_reg>;
+ #sound-dai-cells = <0>;
+ nxp,audout-format = "i2s";
+ nxp,audout-layout = <0>;
+ nxp,audout-width = <16>;
+ nxp,audout-mclk-fs = <128>;
+ /*
+ * The 8bpp YUV422 semi-planar mode outputs CbCr[11:4]
+ * and Y[11:4] across 16bits in the same cycle
+ * which we map to VP[15:08]<->CSI_DATA[19:12]
+ */
+ nxp,vidout-portcfg =
+ /*G_Y_11_8<->VP[15:12]<->CSI_DATA[19:16]*/
+ < TDA1997X_VP24_V15_12 TDA1997X_G_Y_11_8 >,
+ /*G_Y_7_4<->VP[11:08]<->CSI_DATA[15:12]*/
+ < TDA1997X_VP24_V11_08 TDA1997X_G_Y_7_4 >,
+ /*R_CR_CBCR_11_8<->VP[07:04]<->CSI_DATA[11:08]*/
+ < TDA1997X_VP24_V07_04 TDA1997X_R_CR_CBCR_11_8 >,
+ /*R_CR_CBCR_7_4<->VP[03:00]<->CSI_DATA[07:04]*/
+ < TDA1997X_VP24_V03_00 TDA1997X_R_CR_CBCR_7_4 >;
+
+ port {
+ tda1997x_to_ipu1_csi0_mux: endpoint {
+ remote-endpoint = <&ipu1_csi0_mux_from_parallel_sensor>;
+ bus-width = <16>;
+ hsync-active = <1>;
+ vsync-active = <1>;
+ data-active = <1>;
+ };
+ };
+ };
+};
+
+&ipu1_csi0_from_ipu1_csi0_mux {
+ bus-width = <16>;
+};
+
+&ipu1_csi0_mux_from_parallel_sensor {
+ remote-endpoint = <&tda1997x_to_ipu1_csi0_mux>;
+ bus-width = <16>;
+};
+
+&ipu1_csi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ipu1_csi0>;
};
&ipu2_csi1_from_ipu2_csi1_mux {
@@ -63,6 +138,30 @@
>;
};
+ pinctrl_ipu1_csi0: ipu1_csi0grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT4__IPU1_CSI0_DATA04 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT5__IPU1_CSI0_DATA05 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT6__IPU1_CSI0_DATA06 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT7__IPU1_CSI0_DATA07 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT8__IPU1_CSI0_DATA08 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT9__IPU1_CSI0_DATA09 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT10__IPU1_CSI0_DATA10 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT11__IPU1_CSI0_DATA11 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT12__IPU1_CSI0_DATA12 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT13__IPU1_CSI0_DATA13 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT14__IPU1_CSI0_DATA14 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT15__IPU1_CSI0_DATA15 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT16__IPU1_CSI0_DATA16 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT17__IPU1_CSI0_DATA17 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT18__IPU1_CSI0_DATA18 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT19__IPU1_CSI0_DATA19 0x1b0b0
+ MX6QDL_PAD_CSI0_MCLK__IPU1_CSI0_HSYNC 0x1b0b0
+ MX6QDL_PAD_CSI0_PIXCLK__IPU1_CSI0_PIXCLK 0x1b0b0
+ MX6QDL_PAD_CSI0_VSYNC__IPU1_CSI0_VSYNC 0x1b0b0
+ >;
+ };
+
pinctrl_ipu2_csi1: ipu2_csi1grp {
fsl,pins = <
MX6QDL_PAD_EIM_EB2__IPU2_CSI1_DATA19 0x1b0b0
@@ -78,4 +177,10 @@
MX6QDL_PAD_EIM_A16__IPU2_CSI1_PIXCLK 0x1b0b0
>;
};
+
+ pinctrl_tda1997x: tda1997xgrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_7__GPIO1_IO07 0x1b0b0
+ >;
+ };
};
diff --git a/arch/arm/boot/dts/imx6q-logicpd.dts b/arch/arm/boot/dts/imx6q-logicpd.dts
index 45eb0b7f75f8..d96ae54be338 100644
--- a/arch/arm/boot/dts/imx6q-logicpd.dts
+++ b/arch/arm/boot/dts/imx6q-logicpd.dts
@@ -21,6 +21,8 @@
panel-lvds0 {
compatible = "okaya,rs800480t-7x0gp";
+ power-supply = <&reg_lcd_reset>;
+ backlight = <&backlight>;
port {
panel_in_lvds0: endpoint {
@@ -38,7 +40,6 @@
regulator-max-microvolt = <3300000>;
gpio = <&gpio4 17 GPIO_ACTIVE_HIGH>;
enable-active-high;
- regulator-always-on;
vin-supply = <&reg_3v3>;
startup-delay-us = <500000>;
};
@@ -52,7 +53,6 @@
regulator-max-microvolt = <3300000>;
gpio = <&gpio5 2 GPIO_ACTIVE_HIGH>;
enable-active-high;
- regulator-always-on;
vin-supply = <&reg_lcd>;
};
};
diff --git a/arch/arm/boot/dts/imx6q-marsboard.dts b/arch/arm/boot/dts/imx6q-marsboard.dts
index d8ccb533b6b7..84b30bd6908f 100644
--- a/arch/arm/boot/dts/imx6q-marsboard.dts
+++ b/arch/arm/boot/dts/imx6q-marsboard.dts
@@ -110,7 +110,7 @@
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-reset-gpios = <&gpio3 31 GPIO_ACTIVE_LOW>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6q-tbs2910.dts b/arch/arm/boot/dts/imx6q-tbs2910.dts
index 2ce8399a10ba..bfff87ce2e1f 100644
--- a/arch/arm/boot/dts/imx6q-tbs2910.dts
+++ b/arch/arm/boot/dts/imx6q-tbs2910.dts
@@ -98,7 +98,7 @@
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6q-zii-rdu2.dts b/arch/arm/boot/dts/imx6q-zii-rdu2.dts
index 0f0743db2779..a1c5e69d81ba 100644
--- a/arch/arm/boot/dts/imx6q-zii-rdu2.dts
+++ b/arch/arm/boot/dts/imx6q-zii-rdu2.dts
@@ -1,42 +1,6 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* Copyright (C) 2016-2017 Zodiac Inflight Innovations
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
diff --git a/arch/arm/boot/dts/imx6qdl-apf6.dtsi b/arch/arm/boot/dts/imx6qdl-apf6.dtsi
index 1ebf29f43a24..4738c3c1ab50 100644
--- a/arch/arm/boot/dts/imx6qdl-apf6.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-apf6.dtsi
@@ -51,7 +51,7 @@
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-reset-duration = <10>;
phy-reset-gpios = <&gpio1 24 GPIO_ACTIVE_LOW>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx6qdl-emcon.dtsi b/arch/arm/boot/dts/imx6qdl-emcon.dtsi
index 397e205551c4..70d26616d771 100644
--- a/arch/arm/boot/dts/imx6qdl-emcon.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-emcon.dtsi
@@ -77,8 +77,6 @@
pwm_fan: pwm-fan {
compatible = "pwm-fan";
- cooling-min-state = <0>;
- cooling-max-state = <4>;
#cooling-cells = <2>;
pwms = <&pwm4 0 50000>;
cooling-levels = <0 64 127 191 255>;
diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
index 81b2fcf6eedf..e4d1c5250d1e 100644
--- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
@@ -10,6 +10,7 @@
*/
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/sound/fsl-imx-audmux.h>
/ {
/* these are used by bootloader for disabling nodes */
@@ -115,12 +116,12 @@
};
};
- sound {
+ sound-analog {
compatible = "fsl,imx6q-ventana-sgtl5000",
"fsl,imx-audio-sgtl5000";
model = "sgtl5000-audio";
ssi-controller = <&ssi1>;
- audio-codec = <&codec>;
+ audio-codec = <&sgtl5000>;
audio-routing =
"MIC_IN", "Mic Jack",
"Mic Jack", "Mic Bias",
@@ -134,6 +135,25 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_audmux>; /* AUD4<->sgtl5000 */
status = "okay";
+
+ ssi2 {
+ fsl,audmux-port = <1>;
+ fsl,port-config = <
+ (IMX_AUDMUX_V2_PTCR_TFSDIR |
+ IMX_AUDMUX_V2_PTCR_TFSEL(4+8) | /* RXFS */
+ IMX_AUDMUX_V2_PTCR_TCLKDIR |
+ IMX_AUDMUX_V2_PTCR_TCSEL(4+8) | /* RXC */
+ IMX_AUDMUX_V2_PTCR_SYN)
+ IMX_AUDMUX_V2_PDCR_RXDSEL(4)
+ >;
+ };
+
+ aud5 {
+ fsl,audmux-port = <4>;
+ fsl,port-config = <
+ IMX_AUDMUX_V2_PTCR_SYN
+ IMX_AUDMUX_V2_PDCR_RXDSEL(1)>;
+ };
};
&can1 {
@@ -332,7 +352,7 @@
pinctrl-0 = <&pinctrl_i2c3>;
status = "okay";
- codec: sgtl5000@a {
+ sgtl5000: audio-codec@a {
compatible = "fsl,sgtl5000";
reg = <0x0a>;
clocks = <&clks IMX6QDL_CLK_CKO>;
@@ -476,6 +496,9 @@
MX6QDL_PAD_SD2_DAT2__AUD4_TXD 0x110b0
MX6QDL_PAD_SD2_DAT1__AUD4_TXFS 0x130b0
MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x130b0 /* AUD4_MCK */
+ MX6QDL_PAD_EIM_D25__AUD5_RXC 0x130b0
+ MX6QDL_PAD_DISP0_DAT19__AUD5_RXD 0x130b0
+ MX6QDL_PAD_EIM_D24__AUD5_RXFS 0x130b0
>;
};
diff --git a/arch/arm/boot/dts/imx6qdl-gw551x.dtsi b/arch/arm/boot/dts/imx6qdl-gw551x.dtsi
index 8e46a80f57a4..c23ba229fd05 100644
--- a/arch/arm/boot/dts/imx6qdl-gw551x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw551x.dtsi
@@ -46,6 +46,8 @@
*/
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/media/tda1997x.h>
+#include <dt-bindings/sound/fsl-imx-audmux.h>
/ {
/* these are used by bootloader for disabling nodes */
@@ -99,6 +101,50 @@
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
};
+
+ sound-digital {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "tda1997x-audio";
+
+ simple-audio-card,dai-link@0 {
+ format = "i2s";
+
+ cpu {
+ sound-dai = <&ssi2>;
+ };
+
+ codec {
+ bitclock-master;
+ frame-master;
+ sound-dai = <&hdmi_receiver>;
+ };
+ };
+ };
+};
+
+&audmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux>; /* AUD5<->tda1997x */
+ status = "okay";
+
+ ssi1 {
+ fsl,audmux-port = <0>;
+ fsl,port-config = <
+ (IMX_AUDMUX_V2_PTCR_TFSDIR |
+ IMX_AUDMUX_V2_PTCR_TFSEL(4+8) | /* RXFS */
+ IMX_AUDMUX_V2_PTCR_TCLKDIR |
+ IMX_AUDMUX_V2_PTCR_TCSEL(4+8) | /* RXC */
+ IMX_AUDMUX_V2_PTCR_SYN)
+ IMX_AUDMUX_V2_PDCR_RXDSEL(4)
+ >;
+ };
+
+ aud5 {
+ fsl,audmux-port = <4>;
+ fsl,port-config = <
+ IMX_AUDMUX_V2_PTCR_SYN
+ IMX_AUDMUX_V2_PDCR_RXDSEL(0)>;
+ };
};
&can1 {
@@ -264,6 +310,60 @@
#gpio-cells = <2>;
};
+ hdmi_receiver: hdmi-receiver@48 {
+ compatible = "nxp,tda19971";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tda1997x>;
+ reg = <0x48>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ DOVDD-supply = <&reg_3p3>;
+ AVDD-supply = <&reg_1p8b>;
+ DVDD-supply = <&reg_1p8a>;
+ #sound-dai-cells = <0>;
+ nxp,audout-format = "i2s";
+ nxp,audout-layout = <0>;
+ nxp,audout-width = <16>;
+ nxp,audout-mclk-fs = <128>;
+ /*
+ * The 8bpp YUV422 semi-planar mode outputs CbCr[11:4]
+ * and Y[11:4] across 16bits in the same cycle
+ * which we map to VP[15:08]<->CSI_DATA[19:12]
+ */
+ nxp,vidout-portcfg =
+ /*G_Y_11_8<->VP[15:12]<->CSI_DATA[19:16]*/
+ < TDA1997X_VP24_V15_12 TDA1997X_G_Y_11_8 >,
+ /*G_Y_7_4<->VP[11:08]<->CSI_DATA[15:12]*/
+ < TDA1997X_VP24_V11_08 TDA1997X_G_Y_7_4 >,
+ /*R_CR_CBCR_11_8<->VP[07:04]<->CSI_DATA[11:08]*/
+ < TDA1997X_VP24_V07_04 TDA1997X_R_CR_CBCR_11_8 >,
+ /*R_CR_CBCR_7_4<->VP[03:00]<->CSI_DATA[07:04]*/
+ < TDA1997X_VP24_V03_00 TDA1997X_R_CR_CBCR_7_4 >;
+
+ port {
+ tda1997x_to_ipu1_csi0_mux: endpoint {
+ remote-endpoint = <&ipu1_csi0_mux_from_parallel_sensor>;
+ bus-width = <16>;
+ hsync-active = <1>;
+ vsync-active = <1>;
+ data-active = <1>;
+ };
+ };
+ };
+};
+
+&ipu1_csi0_from_ipu1_csi0_mux {
+ bus-width = <16>;
+};
+
+&ipu1_csi0_mux_from_parallel_sensor {
+ remote-endpoint = <&tda1997x_to_ipu1_csi0_mux>;
+ bus-width = <16>;
+};
+
+&ipu1_csi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ipu1_csi0>;
};
&pcie {
@@ -321,6 +421,14 @@
};
&iomuxc {
+ pinctrl_audmux: audmuxgrp {
+ fsl,pins = <
+ MX6QDL_PAD_DISP0_DAT19__AUD5_RXD 0x130b0
+ MX6QDL_PAD_DISP0_DAT14__AUD5_RXC 0x130b0
+ MX6QDL_PAD_DISP0_DAT13__AUD5_RXFS 0x130b0
+ >;
+ };
+
pinctrl_flexcan1: flexcan1grp {
fsl,pins = <
MX6QDL_PAD_KEY_ROW2__FLEXCAN1_RX 0x1b0b1
@@ -376,6 +484,30 @@
>;
};
+ pinctrl_ipu1_csi0: ipu1_csi0grp {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT4__IPU1_CSI0_DATA04 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT5__IPU1_CSI0_DATA05 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT6__IPU1_CSI0_DATA06 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT7__IPU1_CSI0_DATA07 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT8__IPU1_CSI0_DATA08 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT9__IPU1_CSI0_DATA09 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT10__IPU1_CSI0_DATA10 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT11__IPU1_CSI0_DATA11 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT12__IPU1_CSI0_DATA12 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT13__IPU1_CSI0_DATA13 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT14__IPU1_CSI0_DATA14 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT15__IPU1_CSI0_DATA15 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT16__IPU1_CSI0_DATA16 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT17__IPU1_CSI0_DATA17 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT18__IPU1_CSI0_DATA18 0x1b0b0
+ MX6QDL_PAD_CSI0_DAT19__IPU1_CSI0_DATA19 0x1b0b0
+ MX6QDL_PAD_CSI0_MCLK__IPU1_CSI0_HSYNC 0x1b0b0
+ MX6QDL_PAD_CSI0_PIXCLK__IPU1_CSI0_PIXCLK 0x1b0b0
+ MX6QDL_PAD_CSI0_VSYNC__IPU1_CSI0_VSYNC 0x1b0b0
+ >;
+ };
+
pinctrl_pcie: pciegrp {
fsl,pins = <
MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0 /* PCIE RST */
@@ -400,6 +532,12 @@
>;
};
+ pinctrl_tda1997x: tda1997xgrp {
+ fsl,pins = <
+ MX6QDL_PAD_GPIO_7__GPIO1_IO07 0x1b0b0
+ >;
+ };
+
pinctrl_uart2: uart2grp {
fsl,pins = <
MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA 0x1b0b1
diff --git a/arch/arm/boot/dts/imx6qdl-gw5903.dtsi b/arch/arm/boot/dts/imx6qdl-gw5903.dtsi
index 9cb9a7439121..aee9221f0f29 100644
--- a/arch/arm/boot/dts/imx6qdl-gw5903.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw5903.dtsi
@@ -311,7 +311,7 @@
tlv320aic3105: codec@18 {
compatible = "ti,tlv320aic3x";
reg = <0x18>;
- gpio-reset = <&gpio5 17 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&gpio5 17 GPIO_ACTIVE_LOW>;
clocks = <&clks IMX6QDL_CLK_CKO>;
ai3x-micbias-vg = <2>; /* MICBIAS_2_5V */
/* Regulators */
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
index 027df06c5dc7..7e53ac6cfa8a 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
@@ -79,7 +79,7 @@
status = "okay";
cs-gpios = <&gpio4 24 0>;
- flash@0 {
+ som_flash: flash@0 {
compatible = "m25p80", "jedec,spi-nor";
spi-max-frequency = <20000000>;
reg = <0>;
@@ -121,7 +121,7 @@
pinctrl-0 = <&pinctrl_i2c1>;
status = "okay";
- eeprom@50 {
+ som_eeprom: eeprom@50 {
compatible = "atmel,24c32";
reg = <0x50>;
};
diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
index 1280de50a984..f3404dd10537 100644
--- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
@@ -292,7 +292,7 @@
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
<&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
fsl,err006687-workaround-present;
diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
index a0705066ccba..185fb17a3500 100644
--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
@@ -202,7 +202,7 @@
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-sr-som.dtsi b/arch/arm/boot/dts/imx6qdl-sr-som.dtsi
index 4ccb7afc4b35..6d7f6b9035bc 100644
--- a/arch/arm/boot/dts/imx6qdl-sr-som.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sr-som.dtsi
@@ -53,7 +53,7 @@
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_microsom_enet_ar8035>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-reset-duration = <2>;
phy-reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx6qdl-var-dart.dtsi b/arch/arm/boot/dts/imx6qdl-var-dart.dtsi
index 8752a4961c47..c41cac502bac 100644
--- a/arch/arm/boot/dts/imx6qdl-var-dart.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-var-dart.dtsi
@@ -183,7 +183,7 @@
IOVDD-supply = <&reg_3p3v>;
DVDD-supply = <&reg_3p3v>;
ai3x-ocmv = <0>;
- gpio-reset = <&gpio5 5 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&gpio5 5 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
index b7d5fb421404..50d9a989e06a 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
@@ -224,7 +224,7 @@
&fec {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-reset-gpios = <&gpio3 29 GPIO_ACTIVE_LOW>;
interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_HIGH>,
<&intc 0 119 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
index 69942c7ff89d..93be00a60c88 100644
--- a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
@@ -1,42 +1,6 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* Copyright (C) 2016-2017 Zodiac Inflight Innovations
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
#include <dt-bindings/gpio/gpio.h>
@@ -210,6 +174,7 @@
panel {
power-supply = <&reg_3p3v_display>;
+ backlight = <&sp_backlight>;
status = "disabled";
port {
@@ -327,7 +292,7 @@
compatible = "zii,rave-sp-watchdog";
};
- backlight {
+ sp_backlight: backlight {
compatible = "zii,rave-sp-backlight";
};
@@ -384,7 +349,7 @@
AVDD-supply = <&reg_3p3v>;
IOVDD-supply = <&reg_3p3v>;
DVDD-supply = <&vgen4_reg>;
- gpio-reset = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
};
accel@1c {
@@ -528,6 +493,11 @@
};
};
+ watchdog@38 {
+ compatible = "zii,rave-wdt";
+ reg = <0x38>;
+ };
+
temp-sense@48 {
compatible = "national,lm75";
reg = <0x48>;
@@ -572,7 +542,7 @@
AVDD-supply = <&reg_3p3v>;
IOVDD-supply = <&reg_3p3v>;
DVDD-supply = <&vgen4_reg>;
- gpio-reset = <&gpio1 0 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
};
touchscreen@20 {
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index fe17a3405edc..b3a77bcf00d5 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -4,6 +4,7 @@
// Copyright 2011 Linaro Ltd.
#include <dt-bindings/clock/imx6qdl-clock.h>
+#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
/ {
@@ -279,6 +280,7 @@
ranges = <0x81000000 0 0 0x01f80000 0 0x00010000 /* downstream I/O */
0x82000000 0 0x01000000 0x01000000 0 0x00f00000>; /* non-prefetchable memory */
num-lanes = <1>;
+ num-viewport = <4>;
interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
#interrupt-cells = <1>;
@@ -833,6 +835,14 @@
status = "disabled";
};
+ snvs_pwrkey: snvs-powerkey {
+ compatible = "fsl,sec-v4.0-pwrkey";
+ regmap = <&snvs>;
+ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ linux,keycode = <KEY_POWER>;
+ wakeup-source;
+ };
+
snvs_lpgpr: snvs-lpgpr {
compatible = "fsl,imx6q-snvs-lpgpr";
};
@@ -918,7 +928,7 @@
compatible = "fsl,imx6q-sdma", "fsl,imx35-sdma";
reg = <0x020ec000 0x4000>;
interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX6QDL_CLK_SDMA>,
+ clocks = <&clks IMX6QDL_CLK_IPG>,
<&clks IMX6QDL_CLK_SDMA>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
@@ -1129,14 +1139,16 @@
reg = <0x021ac000 0x4000>;
};
- mmdc0: mmdc@21b0000 { /* MMDC0 */
+ mmdc0: memory-controller@21b0000 { /* MMDC0 */
compatible = "fsl,imx6q-mmdc";
reg = <0x021b0000 0x4000>;
clocks = <&clks IMX6QDL_CLK_MMDC_P0_IPG>;
};
- mmdc1: mmdc@21b4000 { /* MMDC1 */
+ mmdc1: memory-controller@21b4000 { /* MMDC1 */
+ compatible = "fsl,imx6q-mmdc";
reg = <0x021b4000 0x4000>;
+ status = "disabled";
};
weim: weim@21b8000 {
diff --git a/arch/arm/boot/dts/imx6qp-zii-rdu2.dts b/arch/arm/boot/dts/imx6qp-zii-rdu2.dts
index 98bf7a6b2850..57de447c4609 100644
--- a/arch/arm/boot/dts/imx6qp-zii-rdu2.dts
+++ b/arch/arm/boot/dts/imx6qp-zii-rdu2.dts
@@ -1,42 +1,6 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* Copyright (C) 2016-2017 Zodiac Inflight Innovations
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
index 4b4813f176cd..9ddbeea64b72 100644
--- a/arch/arm/boot/dts/imx6sl.dtsi
+++ b/arch/arm/boot/dts/imx6sl.dtsi
@@ -23,6 +23,13 @@
gpio2 = &gpio3;
gpio3 = &gpio4;
gpio4 = &gpio5;
+ i2c0 = &i2c1;
+ i2c1 = &i2c2;
+ i2c2 = &i2c3;
+ mmc0 = &usdhc1;
+ mmc1 = &usdhc2;
+ mmc2 = &usdhc3;
+ mmc3 = &usdhc4;
serial0 = &uart1;
serial1 = &uart2;
serial2 = &uart3;
@@ -741,7 +748,7 @@
reg = <0x020ec000 0x4000>;
interrupts = <0 2 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SL_CLK_SDMA>,
- <&clks IMX6SL_CLK_SDMA>;
+ <&clks IMX6SL_CLK_AHB>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
/* imx6sl reuses imx6q sdma firmware */
@@ -922,7 +929,7 @@
status = "disabled";
};
- mmdc: mmdc@21b0000 {
+ memory-controller@21b0000 {
compatible = "fsl,imx6sl-mmdc", "fsl,imx6q-mmdc";
reg = <0x021b0000 0x4000>;
clocks = <&clks IMX6SL_CLK_MMDC_P0_IPG>;
diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi
index 62847c68330b..1b4899f0fcde 100644
--- a/arch/arm/boot/dts/imx6sll.dtsi
+++ b/arch/arm/boot/dts/imx6sll.dtsi
@@ -64,6 +64,7 @@
198000 1175000
>;
clock-latency = <61036>; /* two CLK32 periods */
+ #cooling-cells = <2>;
clocks = <&clks IMX6SLL_CLK_ARM>,
<&clks IMX6SLL_CLK_PLL2_PFD2>,
<&clks IMX6SLL_CLK_STEP>,
@@ -621,7 +622,7 @@
compatible = "fsl,imx6sll-sdma", "fsl,imx35-sdma";
reg = <0x020ec000 0x4000>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX6SLL_CLK_SDMA>,
+ clocks = <&clks IMX6SLL_CLK_IPG>,
<&clks IMX6SLL_CLK_SDMA>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts
index b0ee324afe58..315044ccd65f 100644
--- a/arch/arm/boot/dts/imx6sx-sabreauto.dts
+++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts
@@ -75,7 +75,7 @@
&fec1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet1>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&ethphy1>;
fsl,magic-packet;
status = "okay";
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi
index 08ede56c3f10..f6972deb5e39 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dtsi
+++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi
@@ -191,7 +191,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet1>;
phy-supply = <&reg_enet_3v3>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&ethphy1>;
phy-reset-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index 5b16e65f7696..b16a123990a2 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -820,7 +820,7 @@
compatible = "fsl,imx6sx-sdma", "fsl,imx6q-sdma";
reg = <0x020ec000 0x4000>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX6SX_CLK_SDMA>,
+ clocks = <&clks IMX6SX_CLK_IPG>,
<&clks IMX6SX_CLK_SDMA>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
@@ -1017,7 +1017,7 @@
status = "disabled";
};
- mmdc: mmdc@21b0000 {
+ memory-controller@21b0000 {
compatible = "fsl,imx6sx-mmdc", "fsl,imx6q-mmdc";
reg = <0x021b0000 0x4000>;
clocks = <&clks IMX6SX_CLK_MMDC_P0_IPG>;
diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
index 62ed30c781ed..bbf010c73336 100644
--- a/arch/arm/boot/dts/imx6ul.dtsi
+++ b/arch/arm/boot/dts/imx6ul.dtsi
@@ -708,7 +708,7 @@
"fsl,imx35-sdma";
reg = <0x020ec000 0x4000>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX6UL_CLK_SDMA>,
+ clocks = <&clks IMX6UL_CLK_IPG>,
<&clks IMX6UL_CLK_SDMA>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
@@ -914,7 +914,7 @@
status = "disabled";
};
- mmdc: mmdc@21b0000 {
+ memory-controller@21b0000 {
compatible = "fsl,imx6ul-mmdc", "fsl,imx6q-mmdc";
reg = <0x021b0000 0x4000>;
clocks = <&clks IMX6UL_CLK_MMDC_P0_IPG>;
diff --git a/arch/arm/boot/dts/imx7-mba7.dtsi b/arch/arm/boot/dts/imx7-mba7.dtsi
new file mode 100644
index 000000000000..50abf18ad30b
--- /dev/null
+++ b/arch/arm/boot/dts/imx7-mba7.dtsi
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0 OR X11
+/*
+ * Device Tree Include file for TQ Systems MBa7 carrier board.
+ *
+ * Copyright (C) 2016 TQ Systems GmbH
+ * Author: Markus Niebel <Markus.Niebel@tq-group.com>
+ * Copyright (C) 2019 Bruno Thomsen <bruno.thomsen@gmail.com>
+ *
+ * Note: This file does not include nodes for all peripheral devices.
+ * As device driver coverage increases additional nodes can be added.
+ */
+
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/net/ti-dp83867.h>
+
+/ {
+ beeper {
+ compatible = "gpio-beeper";
+ gpios = <&pca9555 0 GPIO_ACTIVE_HIGH>;
+ };
+
+ chosen {
+ stdout-path = &uart6;
+ };
+
+ gpio_buttons: gpio-keys {
+ compatible = "gpio-keys";
+
+ button-0 {
+ /* #SWITCH_A */
+ label = "S11";
+ linux,code = <KEY_1>;
+ gpios = <&pca9555 13 GPIO_ACTIVE_LOW>;
+ };
+
+ button-1 {
+ /* #SWITCH_B */
+ label = "S12";
+ linux,code = <KEY_2>;
+ gpios = <&pca9555 14 GPIO_ACTIVE_LOW>;
+ };
+
+ button-2 {
+ /* #SWITCH_C */
+ label = "S13";
+ linux,code = <KEY_3>;
+ gpios = <&pca9555 15 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ led1 {
+ label = "led1";
+ gpios = <&pca9555 8 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "default-on";
+ };
+
+ led2 {
+ label = "led2";
+ gpios = <&pca9555 9 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ reg_sd1_vmmc: regulator-sd1-vmmc {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC3V3_SD1";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_fec1_pwdn: regulator-fec1-pwdn {
+ compatible = "regulator-fixed";
+ regulator-name = "PWDN_FEC1";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ gpio = <&gpio1 9 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_fec2_pwdn: regulator-fec2-pwdn {
+ compatible = "regulator-fixed";
+ regulator-name = "PWDN_FEC2";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ gpio = <&gpio2 31 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_usb_otg1_vbus: regulator-usb-otg1-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "VBUS_USBOTG1";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio1 5 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_usb_otg2_vbus: regulator-usb-otg2-vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "VBUS_USBOTG2";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio1 7 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_mpcie_1v5: regulator-mpcie-1v5 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC1V5_MPCIE";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
+ gpio = <&pca9555 12 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ reg_mpcie_3v3: regulator-mpcie-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC3V3_MPCIE";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pca9555 10 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ };
+
+ reg_mba_12v0: regulator-mba-12v0 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC12V0_MBA7";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ gpio = <&pca9555 11 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_lvds_transmitter: regulator-lvds-transmitter {
+ compatible = "regulator-fixed";
+ regulator-name = "#SHTDN_LVDS";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&pca9555 1 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_vref_1v8: regulator-vref-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC1V8_REF";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ vin-supply = <&sw2_reg>;
+ };
+
+ reg_audio_3v3: regulator-audio-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC3V3_AUDIO";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+};
+
+&adc1 {
+ vref-supply = <&reg_vref_1v8>;
+ status = "okay";
+};
+
+&adc2 {
+ vref-supply = <&reg_vref_1v8>;
+ status = "okay";
+};
+
+&ecspi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1>;
+ num-chipselects = <3>;
+ cs-gpios = <&gpio4 0 GPIO_ACTIVE_LOW>, <&gpio4 1 GPIO_ACTIVE_LOW>,
+ <&gpio4 2 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&ecspi2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi2>;
+ num-chipselects = <1>;
+ status = "okay";
+};
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet1>;
+ phy-mode = "rgmii-id";
+ phy-reset-gpios = <&gpio7 15 GPIO_ACTIVE_LOW>;
+ phy-reset-duration = <1>;
+ phy-reset-delay = <1>;
+ phy-supply = <&reg_fec1_pwdn>;
+ phy-handle = <&ethphy1_0>;
+ fsl,magic-packet;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy1_0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_50_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_50_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ /* LED1: Link/Activity, LED2: Error */
+ ti,led-function = <0x0db0>;
+ /* Active low, LED1 and LED2 driven by phy */
+ ti,led-ctrl = <0x1001>;
+ };
+ };
+};
+
+&flexcan1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan1>;
+ status = "okay";
+};
+
+&flexcan2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan2>;
+ status = "okay";
+};
+
+&i2c1 {
+ lm75: temperature-sensor@49 {
+ compatible = "national,lm75";
+ reg = <0x49>;
+ };
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+
+ tlv320aic32x4: audio-codec@18 {
+ compatible = "ti,tlv320aic32x4";
+ reg = <0x18>;
+ clocks = <&clks IMX7D_AUDIO_MCLK_ROOT_CLK>;
+ clock-names = "mclk";
+ ldoin-supply = <&reg_audio_3v3>;
+ iov-supply = <&reg_audio_3v3>;
+ };
+
+ pca9555: gpio-expander@20 {
+ compatible = "nxp,pca9555";
+ reg = <0x20>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pca9555>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&gpio7>;
+ interrupts = <12 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog_mba7_1>;
+
+ pinctrl_ecspi1: ecspi1grp {
+ fsl,pins = <
+ MX7D_PAD_ECSPI1_MISO__ECSPI1_MISO 0x7c
+ MX7D_PAD_ECSPI1_MOSI__ECSPI1_MOSI 0x74
+ MX7D_PAD_ECSPI1_SCLK__ECSPI1_SCLK 0x74
+ MX7D_PAD_UART1_RX_DATA__GPIO4_IO0 0x74
+ MX7D_PAD_UART1_TX_DATA__GPIO4_IO1 0x74
+ MX7D_PAD_UART2_RX_DATA__GPIO4_IO2 0x74
+ >;
+ };
+
+ pinctrl_ecspi2: ecspi2grp {
+ fsl,pins = <
+ MX7D_PAD_ECSPI2_MISO__ECSPI2_MISO 0x7c
+ MX7D_PAD_ECSPI2_MOSI__ECSPI2_MOSI 0x74
+ MX7D_PAD_ECSPI2_SCLK__ECSPI2_SCLK 0x74
+ MX7D_PAD_ECSPI2_SS0__ECSPI2_SS0 0x74
+ >;
+ };
+
+ pinctrl_enet1: enet1grp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO10__ENET1_MDIO 0x02
+ MX7D_PAD_GPIO1_IO11__ENET1_MDC 0x00
+ MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC 0x71
+ MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0 0x71
+ MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1 0x71
+ MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2 0x71
+ MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3 0x71
+ MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL 0x71
+ MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC 0x79
+ MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x79
+ MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x79
+ MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2 0x79
+ MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3 0x79
+ MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x79
+ /* Reset: SION, 100kPU, SRE_FAST, DSE_X1 */
+ MX7D_PAD_ENET1_COL__GPIO7_IO15 0x40000070
+ /* INT/PWDN: SION, 100kPU, HYS, SRE_FAST, DSE_X1 */
+ MX7D_PAD_GPIO1_IO09__GPIO1_IO9 0x40000078
+ >;
+ };
+
+ pinctrl_flexcan1: flexcan1grp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO12__FLEXCAN1_RX 0x5a
+ MX7D_PAD_GPIO1_IO13__FLEXCAN1_TX 0x52
+ >;
+ };
+
+ pinctrl_flexcan2: flexcan2grp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO14__FLEXCAN2_RX 0x5a
+ MX7D_PAD_GPIO1_IO15__FLEXCAN2_TX 0x52
+ >;
+ };
+
+ pinctrl_hog_mba7_1: hogmba71grp {
+ fsl,pins = <
+ /* Limitation: WDOG2_B / WDOG2_RESET not usable */
+ MX7D_PAD_ENET1_RX_CLK__GPIO7_IO13 0x4000007c
+ MX7D_PAD_ENET1_CRS__GPIO7_IO14 0x40000074
+ /* #BOOT_EN */
+ MX7D_PAD_UART2_TX_DATA__GPIO4_IO3 0x40000010
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX7D_PAD_I2C2_SCL__I2C2_SCL 0x40000078
+ MX7D_PAD_I2C2_SDA__I2C2_SDA 0x40000078
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX7D_PAD_I2C3_SCL__I2C3_SCL 0x40000078
+ MX7D_PAD_I2C3_SDA__I2C3_SDA 0x40000078
+ >;
+ };
+
+
+ pinctrl_pca9555: pca95550grp {
+ fsl,pins = <
+ MX7D_PAD_ENET1_TX_CLK__GPIO7_IO12 0x78
+ >;
+ };
+
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX7D_PAD_UART3_RX_DATA__UART3_DCE_RX 0x7e
+ MX7D_PAD_UART3_TX_DATA__UART3_DCE_TX 0x76
+ MX7D_PAD_UART3_CTS_B__UART3_DCE_CTS 0x76
+ MX7D_PAD_UART3_RTS_B__UART3_DCE_RTS 0x7e
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX7D_PAD_SAI2_TX_SYNC__UART4_DCE_RX 0x7e
+ MX7D_PAD_SAI2_TX_BCLK__UART4_DCE_TX 0x76
+ MX7D_PAD_SAI2_RX_DATA__UART4_DCE_CTS 0x76
+ MX7D_PAD_SAI2_TX_DATA__UART4_DCE_RTS 0x7e
+ >;
+ };
+
+ pinctrl_uart5: uart5grp {
+ fsl,pins = <
+ MX7D_PAD_I2C4_SCL__UART5_DCE_RX 0x7e
+ MX7D_PAD_I2C4_SDA__UART5_DCE_TX 0x76
+ >;
+ };
+
+ pinctrl_uart6: uart6grp {
+ fsl,pins = <
+ MX7D_PAD_EPDC_DATA08__UART6_DCE_RX 0x7d
+ MX7D_PAD_EPDC_DATA09__UART6_DCE_TX 0x75
+ MX7D_PAD_EPDC_DATA11__UART6_DCE_CTS 0x75
+ MX7D_PAD_EPDC_DATA10__UART6_DCE_RTS 0x7d
+ >;
+ };
+
+ pinctrl_uart7: uart7grp {
+ fsl,pins = <
+ MX7D_PAD_EPDC_DATA12__UART7_DCE_RX 0x7e
+ MX7D_PAD_EPDC_DATA13__UART7_DCE_TX 0x76
+ MX7D_PAD_EPDC_DATA15__UART7_DCE_CTS 0x76
+ /* Limitation: RTS is not connected */
+ MX7D_PAD_EPDC_DATA14__UART7_DCE_RTS 0x7e
+ >;
+ };
+
+ pinctrl_usdhc1_gpio: usdhc1grp_gpio {
+ fsl,pins = <
+ /* WP */
+ MX7D_PAD_SD1_WP__GPIO5_IO1 0x7c
+ /* CD */
+ MX7D_PAD_SD1_CD_B__GPIO5_IO0 0x7c
+ /* VSELECT */
+ MX7D_PAD_GPIO1_IO08__SD1_VSELECT 0x59
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX7D_PAD_SD1_CMD__SD1_CMD 0x5e
+ MX7D_PAD_SD1_CLK__SD1_CLK 0x57
+ MX7D_PAD_SD1_DATA0__SD1_DATA0 0x5e
+ MX7D_PAD_SD1_DATA1__SD1_DATA1 0x5e
+ MX7D_PAD_SD1_DATA2__SD1_DATA2 0x5e
+ MX7D_PAD_SD1_DATA3__SD1_DATA3 0x5e
+ >;
+ };
+
+ pinctrl_usdhc1_100mhz: usdhc1grp_100mhz {
+ fsl,pins = <
+ MX7D_PAD_SD1_CMD__SD1_CMD 0x5a
+ MX7D_PAD_SD1_CLK__SD1_CLK 0x57
+ MX7D_PAD_SD1_DATA0__SD1_DATA0 0x5a
+ MX7D_PAD_SD1_DATA1__SD1_DATA1 0x5a
+ MX7D_PAD_SD1_DATA2__SD1_DATA2 0x5a
+ MX7D_PAD_SD1_DATA3__SD1_DATA3 0x5a
+ >;
+ };
+
+ pinctrl_usdhc1_200mhz: usdhc1grp_200mhz {
+ fsl,pins = <
+ MX7D_PAD_SD1_CMD__SD1_CMD 0x5b
+ MX7D_PAD_SD1_CLK__SD1_CLK 0x57
+ MX7D_PAD_SD1_DATA0__SD1_DATA0 0x5b
+ MX7D_PAD_SD1_DATA1__SD1_DATA1 0x5b
+ MX7D_PAD_SD1_DATA2__SD1_DATA2 0x5b
+ MX7D_PAD_SD1_DATA3__SD1_DATA3 0x5b
+ >;
+ };
+};
+
+&iomuxc_lpsr {
+ pinctrl_pwm1: pwm1grp {
+ fsl,pins = <
+ /* LCD_CONTRAST */
+ MX7D_PAD_LPSR_GPIO1_IO01__PWM1_OUT 0x50
+ >;
+ };
+
+ pinctrl_usbotg1: usbotg1grp {
+ fsl,pins = <
+ MX7D_PAD_LPSR_GPIO1_IO04__USB_OTG1_OC 0x5c
+ MX7D_PAD_LPSR_GPIO1_IO05__GPIO1_IO5 0x59
+ >;
+ };
+};
+
+&pwm1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm1>;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ assigned-clocks = <&clks IMX7D_UART3_ROOT_SRC>;
+ assigned-clock-parents = <&clks IMX7D_OSC_24M_CLK>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+ assigned-clocks = <&clks IMX7D_UART4_ROOT_SRC>;
+ assigned-clock-parents = <&clks IMX7D_OSC_24M_CLK>;
+ status = "okay";
+};
+
+&uart5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart5>;
+ assigned-clocks = <&clks IMX7D_UART5_ROOT_SRC>;
+ assigned-clock-parents = <&clks IMX7D_OSC_24M_CLK>;
+ status = "okay";
+};
+
+&uart6 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart6>;
+ assigned-clocks = <&clks IMX7D_UART6_ROOT_SRC>;
+ assigned-clock-parents = <&clks IMX7D_OSC_24M_CLK>;
+ status = "okay";
+};
+
+&uart7 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart7>;
+ assigned-clocks = <&clks IMX7D_UART7_ROOT_SRC>;
+ assigned-clock-parents = <&clks IMX7D_OSC_24M_CLK>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+&usbh {
+ status = "okay";
+};
+
+&usbotg1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg1>;
+ vbus-supply = <&reg_usb_otg1_vbus>;
+ srp-disable;
+ hnp-disable;
+ adp-disable;
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usdhc1 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc1>, <&pinctrl_usdhc1_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>, <&pinctrl_usdhc1_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>, <&pinctrl_usdhc1_gpio>;
+ cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
+ vmmc-supply = <&reg_sd1_vmmc>;
+ bus-width = <4>;
+ no-1-8-v;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx7-tqma7.dtsi b/arch/arm/boot/dts/imx7-tqma7.dtsi
new file mode 100644
index 000000000000..9aaed85138cb
--- /dev/null
+++ b/arch/arm/boot/dts/imx7-tqma7.dtsi
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0 OR X11
+/*
+ * Device Tree Include file for TQ Systems TQMa7x boards with full mounted PCB.
+ *
+ * Copyright (C) 2016 TQ Systems GmbH
+ * Author: Markus Niebel <Markus.Niebel@tq-group.com>
+ * Copyright (C) 2019 Bruno Thomsen <bruno.thomsen@gmail.com>
+ */
+
+/ {
+ memory@80000000 {
+ device_type = "memory";
+ /* 512 MB - default configuration */
+ reg = <0x80000000 0x20000000>;
+ };
+};
+
+&cpu0 {
+ arm-supply = <&sw1a_reg>;
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ clock-frequency = <100000>;
+ status = "okay";
+
+ pfuze3000: pmic@8 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pmic1>;
+ compatible = "fsl,pfuze3000";
+ reg = <0x08>;
+
+ regulators {
+ sw1a_reg: sw1a {
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ /* use sw1c_reg to align with pfuze100/pfuze200 */
+ sw1c_reg: sw1b {
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1475000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ sw2_reg: sw2 {
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1850000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3a_reg: sw3 {
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1650000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ swbst_reg: swbst {
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5150000>;
+ };
+
+ snvs_reg: vsnvs {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vref_reg: vrefddr {
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vgen1_reg: vldo1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen2_reg: vldo2 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1550000>;
+ regulator-always-on;
+ };
+
+ vgen3_reg: vccsd {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen4_reg: v33 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen5_reg: vldo3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen6_reg: vldo4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ };
+ };
+
+ /* NXP SE97BTP with temperature sensor + eeprom */
+ se97b: temperature-sensor-eeprom@1e {
+ compatible = "nxp,se97b", "jedec,jc-42.4-temp";
+ reg = <0x1e>;
+ status = "okay";
+ };
+
+ /* ST M24C64 */
+ m24c64: eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ pagesize = <32>;
+ status = "okay";
+ };
+
+ at24c02: eeprom@56 {
+ compatible = "atmel,24c02";
+ reg = <0x56>;
+ pagesize = <16>;
+ status = "okay";
+ };
+
+ ds1339: rtc@68 {
+ compatible = "dallas,ds1339";
+ reg = <0x68>;
+ };
+};
+
+&iomuxc {
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX7D_PAD_I2C1_SDA__I2C1_SDA 0x40000078
+ MX7D_PAD_I2C1_SCL__I2C1_SCL 0x40000078
+ >;
+ };
+
+ pinctrl_pmic1: pmic1grp {
+ fsl,pins = <
+ MX7D_PAD_SD2_RESET_B__GPIO5_IO11 0x4000005C
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX7D_PAD_SD3_CMD__SD3_CMD 0x59
+ MX7D_PAD_SD3_CLK__SD3_CLK 0x56
+ MX7D_PAD_SD3_DATA0__SD3_DATA0 0x59
+ MX7D_PAD_SD3_DATA1__SD3_DATA1 0x59
+ MX7D_PAD_SD3_DATA2__SD3_DATA2 0x59
+ MX7D_PAD_SD3_DATA3__SD3_DATA3 0x59
+ MX7D_PAD_SD3_DATA4__SD3_DATA4 0x59
+ MX7D_PAD_SD3_DATA5__SD3_DATA5 0x59
+ MX7D_PAD_SD3_DATA6__SD3_DATA6 0x59
+ MX7D_PAD_SD3_DATA7__SD3_DATA7 0x59
+ MX7D_PAD_SD3_STROBE__SD3_STROBE 0x19
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3grp_100mhz {
+ fsl,pins = <
+ MX7D_PAD_SD3_CMD__SD3_CMD 0x5a
+ MX7D_PAD_SD3_CLK__SD3_CLK 0x51
+ MX7D_PAD_SD3_DATA0__SD3_DATA0 0x5a
+ MX7D_PAD_SD3_DATA1__SD3_DATA1 0x5a
+ MX7D_PAD_SD3_DATA2__SD3_DATA2 0x5a
+ MX7D_PAD_SD3_DATA3__SD3_DATA3 0x5a
+ MX7D_PAD_SD3_DATA4__SD3_DATA4 0x5a
+ MX7D_PAD_SD3_DATA5__SD3_DATA5 0x5a
+ MX7D_PAD_SD3_DATA6__SD3_DATA6 0x5a
+ MX7D_PAD_SD3_DATA7__SD3_DATA7 0x5a
+ MX7D_PAD_SD3_STROBE__SD3_STROBE 0x1a
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3grp_200mhz {
+ fsl,pins = <
+ MX7D_PAD_SD3_CMD__SD3_CMD 0x5b
+ MX7D_PAD_SD3_CLK__SD3_CLK 0x51
+ MX7D_PAD_SD3_DATA0__SD3_DATA0 0x5b
+ MX7D_PAD_SD3_DATA1__SD3_DATA1 0x5b
+ MX7D_PAD_SD3_DATA2__SD3_DATA2 0x5b
+ MX7D_PAD_SD3_DATA3__SD3_DATA3 0x5b
+ MX7D_PAD_SD3_DATA4__SD3_DATA4 0x5b
+ MX7D_PAD_SD3_DATA5__SD3_DATA5 0x5b
+ MX7D_PAD_SD3_DATA6__SD3_DATA6 0x5b
+ MX7D_PAD_SD3_DATA7__SD3_DATA7 0x5b
+ MX7D_PAD_SD3_STROBE__SD3_STROBE 0x1b
+ >;
+ };
+};
+
+&iomuxc_lpsr {
+ pinctrl_wdog1: wdog1grp {
+ fsl,pins = <
+ MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG_B 0x30
+ >;
+ };
+};
+
+&sdma {
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ assigned-clocks = <&clks IMX7D_USDHC3_ROOT_CLK>;
+ assigned-clock-rates = <400000000>;
+ bus-width = <8>;
+ non-removable;
+ vmmc-supply = <&vgen4_reg>;
+ vqmmc-supply = <&sw2_reg>;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog1>;
+ /*
+ * Errata e10574:
+ * WDOG reset needs to run with WDOG_RESET_B signal enabled.
+ * X1-51 (WDOG1#) signal needs carrier board handling to reset
+ * TQMa7 on X1-22 (RESET_IN#).
+ */
+ fsl,ext-reset-output;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx7d-mba7.dts b/arch/arm/boot/dts/imx7d-mba7.dts
new file mode 100644
index 000000000000..221274c73dbd
--- /dev/null
+++ b/arch/arm/boot/dts/imx7d-mba7.dts
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0 OR X11
+/*
+ * Device Tree Source for TQ Systems TQMa7D board on MBa7 carrier board.
+ *
+ * Copyright (C) 2016 TQ Systems GmbH
+ * Author: Markus Niebel <Markus.Niebel@tq-group.com>
+ * Copyright (C) 2019 Bruno Thomsen <bruno.thomsen@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "imx7d-tqma7.dtsi"
+#include "imx7-mba7.dtsi"
+
+/ {
+ model = "TQ Systems TQMa7D board on MBa7 carrier board";
+ compatible = "tq,imx7d-mba7", "fsl,imx7d";
+};
+
+&fec2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet2>;
+ phy-mode = "rgmii-id";
+ phy-reset-gpios = <&gpio2 28 GPIO_ACTIVE_LOW>;
+ phy-reset-duration = <1>;
+ phy-reset-delay = <1>;
+ phy-supply = <&reg_fec2_pwdn>;
+ phy-handle = <&ethphy2_0>;
+ fsl,magic-packet;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy2_0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_50_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_50_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ /* LED1: Link/Activity, LED2: error */
+ ti,led-function = <0x0db0>;
+ /* active low, LED1/2 driven by phy */
+ ti,led-ctrl = <0x1001>;
+ };
+ };
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog_mba7_1>;
+
+ pinctrl_enet2: enet2grp {
+ fsl,pins = <
+ MX7D_PAD_SD2_CD_B__ENET2_MDIO 0x02
+ MX7D_PAD_SD2_WP__ENET2_MDC 0x00
+ MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC 0x71
+ MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0 0x71
+ MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1 0x71
+ MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2 0x71
+ MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3 0x71
+ MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL 0x71
+ MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC 0x79
+ MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0 0x79
+ MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1 0x79
+ MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2 0x79
+ MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3 0x79
+ MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL 0x79
+ /* Reset: SION, 100kPU, SRE_FAST, DSE_X1 */
+ MX7D_PAD_EPDC_BDR0__GPIO2_IO28 0x40000070
+ /* INT/PWDN: SION, 100kPU, HYS, SRE_FAST, DSE_X1 */
+ MX7D_PAD_EPDC_PWR_STAT__GPIO2_IO31 0x40000078
+ >;
+ };
+
+ pinctrl_pcie: pciegrp {
+ fsl,pins = <
+ /* #pcie_wake */
+ MX7D_PAD_EPDC_PWR_COM__GPIO2_IO30 0x70
+ /* #pcie_rst */
+ MX7D_PAD_SD2_CLK__GPIO5_IO12 0x70
+ /* #pcie_dis */
+ MX7D_PAD_EPDC_BDR1__GPIO2_IO29 0x70
+ >;
+ };
+};
+
+&iomuxc_lpsr {
+ pinctrl_usbotg2: usbotg2grp {
+ fsl,pins = <
+ MX7D_PAD_LPSR_GPIO1_IO06__USB_OTG2_OC 0x5c
+ MX7D_PAD_LPSR_GPIO1_IO07__GPIO1_IO7 0x59
+ >;
+ };
+};
+
+&pcie {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie>;
+ /* 1.5V logically from 3.3V */
+ /* probe deferral not supported */
+ /* pcie-bus-supply = <&reg_mpcie_1v5>; */
+ reset-gpio = <&gpio5 12 GPIO_ACTIVE_LOW>;
+ disable-gpio = <&gpio2 29 GPIO_ACTIVE_LOW>;
+ power-on-gpio = <&gpio2 30 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
+&usbotg2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg2>;
+ vbus-supply = <&reg_usb_otg2_vbus>;
+ srp-disable;
+ hnp-disable;
+ adp-disable;
+ dr_mode = "host";
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/imx7d-pico.dtsi b/arch/arm/boot/dts/imx7d-pico.dtsi
index 3fd595a71202..6f50ebf31a0a 100644
--- a/arch/arm/boot/dts/imx7d-pico.dtsi
+++ b/arch/arm/boot/dts/imx7d-pico.dtsi
@@ -92,7 +92,7 @@
<&clks IMX7D_ENET1_TIME_ROOT_CLK>;
assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
assigned-clock-rates = <0>, <100000000>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&ethphy0>;
fsl,magic-packet;
phy-reset-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/imx7d-tqma7.dtsi b/arch/arm/boot/dts/imx7d-tqma7.dtsi
new file mode 100644
index 000000000000..8ad3048dac0d
--- /dev/null
+++ b/arch/arm/boot/dts/imx7d-tqma7.dtsi
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0 OR X11
+/*
+ * Device Tree Include file for TQ Systems TQMa7D board with NXP i.MX7Dual SoC.
+ *
+ * Copyright (C) 2016 TQ Systems GmbH
+ * Author: Markus Niebel <Markus.Niebel@tq-group.com>
+ * Copyright (C) 2019 Bruno Thomsen <bruno.thomsen@gmail.com>
+ */
+
+#include "imx7d.dtsi"
+#include "imx7-tqma7.dtsi"
diff --git a/arch/arm/boot/dts/imx7d-zii-rpu2.dts b/arch/arm/boot/dts/imx7d-zii-rpu2.dts
new file mode 100644
index 000000000000..3e467a94e8a6
--- /dev/null
+++ b/arch/arm/boot/dts/imx7d-zii-rpu2.dts
@@ -0,0 +1,941 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Device tree file for ZII's RPU2 board
+ *
+ * RPU - Remote Peripheral Unit
+ *
+ * Copyright (C) 2019 Zodiac Inflight Innovations
+ */
+
+/dts-v1/;
+#include <dt-bindings/thermal/thermal.h>
+#include "imx7d.dtsi"
+
+/ {
+ model = "ZII RPU2 Board";
+ compatible = "zii,imx7d-rpu2", "fsl,imx7d";
+
+ chosen {
+ stdout-path = &uart1;
+ };
+
+ cs2000_ref: oscillator {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24576000>;
+ };
+
+ cs2000_in_dummy: dummy-oscillator {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+ pinctrl-0 = <&pinctrl_leds_debug>;
+ pinctrl-names = "default";
+
+ debug {
+ label = "zii:green:debug1";
+ gpios = <&gpio2 8 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&adc1 0>, <&adc1 1>, <&adc1 2>, <&adc1 3>,
+ <&adc2 1>;
+ };
+
+ reg_can1_stby: regulator-can1-stby {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan1_stby>;
+ regulator-name = "can1-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio1 9 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_can2_stby: regulator-can2-stby {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan2_stby>;
+ regulator-name = "can2-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio1 8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_vref_1v8: regulator-vref-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "vref-1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ reg_3p3v: regulator-3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "GEN_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_5p0v_main: regulator-5p0v-main {
+ compatible = "regulator-fixed";
+ regulator-name = "5V_MAIN";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ sound1 {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "Audio Output 1";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,bitclock-master = <&sound1_codec>;
+ simple-audio-card,frame-master = <&sound1_codec>;
+ simple-audio-card,widgets =
+ "Headphone", "Headphone Jack";
+ simple-audio-card,routing =
+ "Headphone Jack", "HPLEFT",
+ "Headphone Jack", "HPRIGHT",
+ "LEFTIN", "HPL",
+ "RIGHTIN", "HPR";
+ simple-audio-card,aux-devs = <&hpa1>;
+
+ simple-audio-card,cpu {
+ sound-dai = <&sai1>;
+ };
+
+ sound1_codec: simple-audio-card,codec {
+ sound-dai = <&codec1>;
+ clocks = <&cs2000>;
+ };
+ };
+
+ sound2 {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "Audio Output 2";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,bitclock-master = <&sound2_codec>;
+ simple-audio-card,frame-master = <&sound2_codec>;
+ simple-audio-card,widgets =
+ "Headphone", "Headphone Jack";
+ simple-audio-card,routing =
+ "Headphone Jack", "HPLEFT",
+ "Headphone Jack", "HPRIGHT",
+ "LEFTIN", "HPL",
+ "RIGHTIN", "HPR";
+ simple-audio-card,aux-devs = <&hpa2>;
+
+ simple-audio-card,cpu {
+ sound-dai = <&sai2>;
+ };
+
+ sound2_codec: simple-audio-card,codec {
+ sound-dai = <&codec2>;
+ clocks = <&cs2000>;
+ };
+ };
+
+ sound3 {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "Audio Output 3";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,bitclock-master = <&sound3_codec>;
+ simple-audio-card,frame-master = <&sound3_codec>;
+ simple-audio-card,widgets =
+ "Headphone", "Headphone Jack";
+ simple-audio-card,routing =
+ "Headphone Jack", "HPLEFT",
+ "Headphone Jack", "HPRIGHT",
+ "LEFTIN", "HPL",
+ "RIGHTIN", "HPR";
+ simple-audio-card,aux-devs = <&hpa3>;
+
+ simple-audio-card,cpu {
+ sound-dai = <&sai3>;
+ };
+
+ sound3_codec: simple-audio-card,codec {
+ sound-dai = <&codec3>;
+ clocks = <&cs2000>;
+ };
+ };
+};
+
+&adc1 {
+ vref-supply = <&reg_vref_1v8>;
+ status = "okay";
+};
+
+&adc2 {
+ vref-supply = <&reg_vref_1v8>;
+ status = "okay";
+};
+
+&cpu0 {
+ arm-supply = <&sw1a_reg>;
+};
+
+&clks {
+ assigned-clocks = <&clks IMX7D_PLL_AUDIO_POST_DIV>;
+ assigned-clock-rates = <884736000>;
+};
+
+&ecspi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1>;
+ cs-gpios = <&gpio4 19 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+};
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet1>;
+ assigned-clocks = <&clks IMX7D_ENET1_TIME_ROOT_SRC>,
+ <&clks IMX7D_ENET1_TIME_ROOT_CLK>;
+ assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
+ assigned-clock-rates = <0>, <100000000>;
+ phy-mode = "rgmii";
+ status = "okay";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+
+ mdio1: mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ switch: switch@0 {
+ compatible = "marvell,mv88e6085";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_switch>;
+ reg = <0>;
+ eeprom-length = <512>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "eth_cu_1000_1";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "eth_cu_1000_2";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "pic";
+
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
+ };
+
+ port@5 {
+ reg = <5>;
+ label = "cpu";
+ ethernet = <&fec1>;
+ phy-mode = "rgmii-id";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ port@6 {
+ reg = <6>;
+ label = "gigabit_proc";
+ ethernet = <&fec2>;
+ phy-mode = "rgmii-id";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+ };
+ };
+};
+
+&fec2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_enet2>;
+ assigned-clocks = <&clks IMX7D_ENET2_TIME_ROOT_SRC>,
+ <&clks IMX7D_ENET2_TIME_ROOT_CLK>;
+ assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
+ assigned-clock-rates = <0>, <100000000>;
+ phy-mode = "rgmii";
+ fsl,magic-packet;
+ status = "okay";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+};
+
+&flexcan1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan1>;
+ xceiver-supply = <&reg_can1_stby>;
+ status = "okay";
+};
+
+&flexcan2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexcan2>;
+ xceiver-supply = <&reg_can2_stby>;
+ status = "okay";
+};
+
+&gpio1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio1>;
+
+ gpio-line-names = "", "", "", "", "", "", "", "",
+ "", "",
+ "usb_1_en_b",
+ "usb_2_en_b",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "";
+};
+
+&gpio2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio2>;
+
+ gpio-line-names = "12v_out_en_1",
+ "12v_out_en_2",
+ "12v_out_en_3",
+ "28v_out_en_5",
+ "28v_out_en_1",
+ "28v_out_en_2",
+ "28v_out_en_3",
+ "28v_out_en_4",
+ "", "",
+ "usb_3_en_b",
+ "usb_4_en_b",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ "", "", "", "";
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ pmic: pmic@8 {
+ compatible = "fsl,pfuze3000";
+ reg = <0x08>;
+
+ regulators {
+ sw1a_reg: sw1a {
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ sw1c_reg: sw1b {
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1475000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ sw2_reg: sw2 {
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1850000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3a_reg: sw3 {
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1650000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ swbst_reg: swbst {
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5150000>;
+ };
+
+ snvs_reg: vsnvs {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vref_reg: vrefddr {
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vgen1_reg: vldo1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen2_reg: vldo2 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1550000>;
+ regulator-always-on;
+ };
+
+ vgen3_reg: vccsd {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen4_reg: v33 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen5_reg: vldo3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen6_reg: vldo4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ };
+ };
+
+ cs2000: clkgen@4e {
+ compatible = "cirrus,cs2000-cp";
+ reg = <0x4e>;
+ #clock-cells = <0>;
+ clock-names = "clk_in", "ref_clk";
+ clocks = <&cs2000_in_dummy>, <&cs2000_ref>;
+ assigned-clocks = <&cs2000>;
+ assigned-clock-rates = <24000000>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c04";
+ reg = <0x50>;
+ };
+
+ eeprom@52 {
+ compatible = "atmel,24c04";
+ reg = <0x52>;
+ };
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+
+ codec2: codec@18 {
+ compatible = "ti,tlv320dac3100";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_codec2>;
+ reg = <0x18>;
+ #sound-dai-cells = <0>;
+ HPVDD-supply = <&reg_3p3v>;
+ SPRVDD-supply = <&reg_3p3v>;
+ SPLVDD-supply = <&reg_3p3v>;
+ AVDD-supply = <&reg_3p3v>;
+ IOVDD-supply = <&reg_3p3v>;
+ DVDD-supply = <&vgen4_reg>;
+ gpio-reset = <&gpio1 6 GPIO_ACTIVE_LOW>;
+ };
+
+ hpa2: amp@60 {
+ compatible = "ti,tpa6130a2";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tpa2>;
+ reg = <0x60>;
+ power-gpio = <&gpio3 27 GPIO_ACTIVE_HIGH>;
+ Vdd-supply = <&reg_5p0v_main>;
+ };
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+
+ codec3: codec@18 {
+ compatible = "ti,tlv320dac3100";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_codec3>;
+ reg = <0x18>;
+ #sound-dai-cells = <0>;
+ HPVDD-supply = <&reg_3p3v>;
+ SPRVDD-supply = <&reg_3p3v>;
+ SPLVDD-supply = <&reg_3p3v>;
+ AVDD-supply = <&reg_3p3v>;
+ IOVDD-supply = <&reg_3p3v>;
+ DVDD-supply = <&vgen4_reg>;
+ gpio-reset = <&gpio1 7 GPIO_ACTIVE_LOW>;
+ };
+
+ hpa3: amp@60 {
+ compatible = "ti,tpa6130a2";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tpa3>;
+ reg = <0x60>;
+ power-gpio = <&gpio3 28 GPIO_ACTIVE_HIGH>;
+ Vdd-supply = <&reg_5p0v_main>;
+ };
+};
+
+&i2c4 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c4>;
+ status = "okay";
+
+ codec1: codec@18 {
+ compatible = "ti,tlv320dac3100";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_codec1>;
+ reg = <0x18>;
+ #sound-dai-cells = <0>;
+ HPVDD-supply = <&reg_3p3v>;
+ SPRVDD-supply = <&reg_3p3v>;
+ SPLVDD-supply = <&reg_3p3v>;
+ AVDD-supply = <&reg_3p3v>;
+ IOVDD-supply = <&reg_3p3v>;
+ DVDD-supply = <&vgen4_reg>;
+ gpio-reset = <&gpio1 5 GPIO_ACTIVE_LOW>;
+ };
+
+ hpa1: amp@60 {
+ compatible = "ti,tpa6130a2";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_tpa1>;
+ reg = <0x60>;
+ power-gpio = <&gpio3 26 GPIO_ACTIVE_HIGH>;
+ Vdd-supply = <&reg_5p0v_main>;
+ };
+};
+
+&sai1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai1>;
+ assigned-clocks = <&clks IMX7D_SAI1_ROOT_SRC>,
+ <&clks IMX7D_SAI1_ROOT_CLK>;
+ assigned-clock-parents = <&clks IMX7D_PLL_AUDIO_POST_DIV>;
+ assigned-clock-rates = <0>, <36864000>;
+ status = "okay";
+};
+
+&sai2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai2>;
+ assigned-clocks = <&clks IMX7D_SAI2_ROOT_SRC>,
+ <&clks IMX7D_SAI2_ROOT_CLK>;
+ assigned-clock-parents = <&clks IMX7D_PLL_AUDIO_POST_DIV>;
+ assigned-clock-rates = <0>, <36864000>;
+ status = "okay";
+};
+
+&sai3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai3>;
+ assigned-clocks = <&clks IMX7D_SAI3_ROOT_SRC>,
+ <&clks IMX7D_SAI3_ROOT_CLK>;
+ assigned-clock-parents = <&clks IMX7D_PLL_AUDIO_POST_DIV>;
+ assigned-clock-rates = <0>, <36864000>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ assigned-clocks = <&clks IMX7D_UART2_ROOT_SRC>;
+ assigned-clock-parents = <&clks IMX7D_OSC_24M_CLK>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
+ assigned-clocks = <&clks IMX7D_UART4_ROOT_SRC>;
+ assigned-clock-parents = <&clks IMX7D_PLL_SYS_MAIN_240M_CLK>;
+ status = "okay";
+
+ rave-sp {
+ compatible = "zii,rave-sp-rdu2";
+ current-speed = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ watchdog {
+ compatible = "zii,rave-sp-watchdog";
+ };
+
+ eeprom@a3 {
+ compatible = "zii,rave-sp-eeprom";
+ reg = <0xa3 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ zii,eeprom-name = "main-eeprom";
+ };
+ };
+};
+
+&usbotg1 {
+ dr_mode = "host";
+ disable-over-current;
+ status = "okay";
+};
+
+&usbotg2 {
+ dr_mode = "host";
+ disable-over-current;
+ status = "okay";
+};
+
+&usdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ bus-width = <4>;
+ no-1-8-v;
+ no-sdio;
+ keep-power-in-suspend;
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ bus-width = <8>;
+ no-1-8-v;
+ non-removable;
+ no-sdio;
+ no-sd;
+ keep-power-in-suspend;
+ status = "okay";
+};
+
+&wdog1 {
+ status = "disabled";
+};
+
+&snvs_rtc {
+ status = "disabled";
+};
+
+&snvs_pwrkey {
+ status = "disabled";
+};
+
+&iomuxc {
+ pinctrl_ecspi1: ecspi1grp {
+ fsl,pins = <
+ MX7D_PAD_ECSPI1_SCLK__ECSPI1_SCLK 0x2
+ MX7D_PAD_ECSPI1_MOSI__ECSPI1_MOSI 0x2
+ MX7D_PAD_ECSPI1_MISO__ECSPI1_MISO 0x2
+ MX7D_PAD_ECSPI1_SS0__GPIO4_IO19 0x59
+ >;
+ };
+
+ pinctrl_enet1: enet1grp {
+ fsl,pins = <
+ MX7D_PAD_SD2_CD_B__ENET1_MDIO 0x3
+ MX7D_PAD_SD2_WP__ENET1_MDC 0x3
+ MX7D_PAD_ENET1_RGMII_TXC__ENET1_RGMII_TXC 0x1
+ MX7D_PAD_ENET1_RGMII_TD0__ENET1_RGMII_TD0 0x1
+ MX7D_PAD_ENET1_RGMII_TD1__ENET1_RGMII_TD1 0x1
+ MX7D_PAD_ENET1_RGMII_TD2__ENET1_RGMII_TD2 0x1
+ MX7D_PAD_ENET1_RGMII_TD3__ENET1_RGMII_TD3 0x1
+ MX7D_PAD_ENET1_RGMII_TX_CTL__ENET1_RGMII_TX_CTL 0x1
+ MX7D_PAD_ENET1_RGMII_RXC__ENET1_RGMII_RXC 0x1
+ MX7D_PAD_ENET1_RGMII_RD0__ENET1_RGMII_RD0 0x1
+ MX7D_PAD_ENET1_RGMII_RD1__ENET1_RGMII_RD1 0x1
+ MX7D_PAD_ENET1_RGMII_RD2__ENET1_RGMII_RD2 0x1
+ MX7D_PAD_ENET1_RGMII_RD3__ENET1_RGMII_RD3 0x1
+ MX7D_PAD_ENET1_RGMII_RX_CTL__ENET1_RGMII_RX_CTL 0x1
+ >;
+ };
+
+ pinctrl_enet2: enet2grp {
+ fsl,pins = <
+ MX7D_PAD_EPDC_GDSP__ENET2_RGMII_TXC 0x1
+ MX7D_PAD_EPDC_SDCE2__ENET2_RGMII_TD0 0x1
+ MX7D_PAD_EPDC_SDCE3__ENET2_RGMII_TD1 0x1
+ MX7D_PAD_EPDC_GDCLK__ENET2_RGMII_TD2 0x1
+ MX7D_PAD_EPDC_GDOE__ENET2_RGMII_TD3 0x1
+ MX7D_PAD_EPDC_GDRL__ENET2_RGMII_TX_CTL 0x1
+ MX7D_PAD_EPDC_SDCE1__ENET2_RGMII_RXC 0x1
+ MX7D_PAD_EPDC_SDCLK__ENET2_RGMII_RD0 0x1
+ MX7D_PAD_EPDC_SDLE__ENET2_RGMII_RD1 0x1
+ MX7D_PAD_EPDC_SDOE__ENET2_RGMII_RD2 0x1
+ MX7D_PAD_EPDC_SDSHR__ENET2_RGMII_RD3 0x1
+ MX7D_PAD_EPDC_SDCE0__ENET2_RGMII_RX_CTL 0x1
+ MX7D_PAD_UART1_TX_DATA__ENET2_1588_EVENT0_OUT 0x1
+ >;
+ };
+
+ pinctrl_flexcan1: flexcan1grp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO12__FLEXCAN1_RX 0x59
+ MX7D_PAD_GPIO1_IO13__FLEXCAN1_TX 0x59
+ >;
+ };
+
+ pinctrl_flexcan1_stby: flexcan1stbygrp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO08__GPIO1_IO8 0x59
+ >;
+ };
+
+ pinctrl_flexcan2: flexcan2grp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO14__FLEXCAN2_RX 0x59
+ MX7D_PAD_GPIO1_IO15__FLEXCAN2_TX 0x59
+ >;
+ };
+
+ pinctrl_flexcan2_stby: flexcan2stbygrp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO09__GPIO1_IO9 0x59
+ >;
+ };
+
+ pinctrl_gpio1: gpio1grp {
+ fsl,pins = <
+ MX7D_PAD_GPIO1_IO10__GPIO1_IO10 0x00
+ MX7D_PAD_GPIO1_IO11__GPIO1_IO11 0x00
+ >;
+ };
+
+ pinctrl_gpio2: gpio2grp {
+ fsl,pins = <
+ MX7D_PAD_EPDC_DATA00__GPIO2_IO0 0x00
+ MX7D_PAD_EPDC_DATA01__GPIO2_IO1 0x00
+ MX7D_PAD_EPDC_DATA02__GPIO2_IO2 0x00
+ MX7D_PAD_EPDC_DATA03__GPIO2_IO3 0x03
+ MX7D_PAD_EPDC_DATA04__GPIO2_IO4 0x03
+ MX7D_PAD_EPDC_DATA05__GPIO2_IO5 0x03
+ MX7D_PAD_EPDC_DATA06__GPIO2_IO6 0x03
+ MX7D_PAD_EPDC_DATA07__GPIO2_IO7 0x03
+ MX7D_PAD_EPDC_DATA10__GPIO2_IO10 0x00
+ MX7D_PAD_EPDC_DATA11__GPIO2_IO11 0x00
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX7D_PAD_I2C1_SDA__I2C1_SDA 0x4000007f
+ MX7D_PAD_I2C1_SCL__I2C1_SCL 0x4000007f
+ >;
+ };
+
+ pinctrl_i2c1_gpio: i2c1gpiogrp {
+ fsl,pins = <
+ MX7D_PAD_I2C1_SDA__GPIO4_IO9 0x4000007f
+ MX7D_PAD_I2C1_SCL__GPIO4_IO8 0x4000007f
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX7D_PAD_I2C2_SDA__I2C2_SDA 0x4000007f
+ MX7D_PAD_I2C2_SCL__I2C2_SCL 0x4000007f
+ >;
+ };
+
+ pinctrl_i2c2_gpio: i2c2gpiogrp {
+ fsl,pins = <
+ MX7D_PAD_I2C2_SDA__GPIO4_IO11 0x4000007f
+ MX7D_PAD_I2C2_SCL__GPIO4_IO10 0x4000007f
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX7D_PAD_I2C3_SDA__I2C3_SDA 0x4000007f
+ MX7D_PAD_I2C3_SCL__I2C3_SCL 0x4000007f
+ >;
+ };
+
+ pinctrl_i2c3_gpio: i2c3gpiogrp {
+ fsl,pins = <
+ MX7D_PAD_I2C3_SDA__GPIO4_IO13 0x4000007f
+ MX7D_PAD_I2C3_SCL__GPIO4_IO12 0x4000007f
+ >;
+ };
+
+ pinctrl_i2c4: i2c4grp {
+ fsl,pins = <
+ MX7D_PAD_I2C4_SDA__I2C4_SDA 0x4000007f
+ MX7D_PAD_I2C4_SCL__I2C4_SCL 0x4000007f
+ >;
+ };
+
+ pinctrl_i2c4_gpio: i2c4gpiogrp {
+ fsl,pins = <
+ MX7D_PAD_SAI1_RX_BCLK__GPIO6_IO17 0x4000007f
+ MX7D_PAD_SAI1_RX_SYNC__GPIO6_IO16 0x4000007f
+ >;
+ };
+
+ pinctrl_leds_debug: debuggrp {
+ fsl,pins = <
+ MX7D_PAD_EPDC_DATA08__GPIO2_IO8 0x59
+ >;
+ };
+
+ pinctrl_sai1: sai1grp {
+ fsl,pins = <
+ MX7D_PAD_SAI1_TX_BCLK__SAI1_TX_BCLK 0x1f
+ MX7D_PAD_SAI1_TX_SYNC__SAI1_TX_SYNC 0x1f
+ MX7D_PAD_SAI1_TX_DATA__SAI1_TX_DATA0 0x30
+ >;
+ };
+
+ pinctrl_sai2: sai2grp {
+ fsl,pins = <
+ MX7D_PAD_SAI2_TX_BCLK__SAI2_TX_BCLK 0x1f
+ MX7D_PAD_SAI2_TX_SYNC__SAI2_TX_SYNC 0x1f
+ MX7D_PAD_SAI2_TX_DATA__SAI2_TX_DATA0 0x30
+ >;
+ };
+
+ pinctrl_sai3: sai3grp {
+ fsl,pins = <
+ MX7D_PAD_UART3_TX_DATA__SAI3_TX_BCLK 0x1f
+ MX7D_PAD_UART3_CTS_B__SAI3_TX_SYNC 0x1f
+ MX7D_PAD_UART3_RTS_B__SAI3_TX_DATA0 0x30
+ >;
+ };
+
+ pinctrl_tpa1: tpa6130-1grp {
+ fsl,pins = <
+ MX7D_PAD_LCD_DATA21__GPIO3_IO26 0x40000038
+ >;
+ };
+
+ pinctrl_tpa2: tpa6130-2grp {
+ fsl,pins = <
+ MX7D_PAD_LCD_DATA22__GPIO3_IO27 0x40000038
+ >;
+ };
+
+ pinctrl_tpa3: tpa6130-3grp {
+ fsl,pins = <
+ MX7D_PAD_LCD_DATA23__GPIO3_IO28 0x40000038
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX7D_PAD_UART2_RX_DATA__UART2_DCE_RX 0x79
+ MX7D_PAD_UART2_TX_DATA__UART2_DCE_TX 0x79
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX7D_PAD_SD2_DATA0__UART4_DCE_RX 0x79
+ MX7D_PAD_SD2_DATA1__UART4_DCE_TX 0x79
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX7D_PAD_SD1_CMD__SD1_CMD 0x59
+ MX7D_PAD_SD1_CLK__SD1_CLK 0x19
+ MX7D_PAD_SD1_DATA0__SD1_DATA0 0x59
+ MX7D_PAD_SD1_DATA1__SD1_DATA1 0x59
+ MX7D_PAD_SD1_DATA2__SD1_DATA2 0x59
+ MX7D_PAD_SD1_DATA3__SD1_DATA3 0x59
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX7D_PAD_SD3_CMD__SD3_CMD 0x59
+ MX7D_PAD_SD3_CLK__SD3_CLK 0x19
+ MX7D_PAD_SD3_DATA0__SD3_DATA0 0x59
+ MX7D_PAD_SD3_DATA1__SD3_DATA1 0x59
+ MX7D_PAD_SD3_DATA2__SD3_DATA2 0x59
+ MX7D_PAD_SD3_DATA3__SD3_DATA3 0x59
+ MX7D_PAD_SD3_DATA4__SD3_DATA4 0x59
+ MX7D_PAD_SD3_DATA5__SD3_DATA5 0x59
+ MX7D_PAD_SD3_DATA6__SD3_DATA6 0x59
+ MX7D_PAD_SD3_DATA7__SD3_DATA7 0x59
+ MX7D_PAD_SD3_RESET_B__SD3_RESET_B 0x59
+ >;
+ };
+};
+
+&iomuxc_lpsr {
+ pinctrl_codec1: dac1grp {
+ fsl,pins = <
+ MX7D_PAD_LPSR_GPIO1_IO05__GPIO1_IO5 0x40000038
+ >;
+ };
+
+ pinctrl_codec2: dac2grp {
+ fsl,pins = <
+ MX7D_PAD_LPSR_GPIO1_IO06__GPIO1_IO6 0x40000038
+ >;
+ };
+
+ pinctrl_codec3: dac3grp {
+ fsl,pins = <
+ MX7D_PAD_LPSR_GPIO1_IO07__GPIO1_IO7 0x40000038
+ >;
+ };
+
+ pinctrl_switch: switchgrp {
+ fsl,pins = <
+ MX7D_PAD_LPSR_GPIO1_IO02__GPIO1_IO2 0x08
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
index 6eb98e7c568d..f33b560821b8 100644
--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -154,6 +154,7 @@
ranges = <0x81000000 0 0 0x4ff80000 0 0x00010000 /* downstream I/O */
0x82000000 0 0x40000000 0x40000000 0 0x0ff00000>; /* non-prefetchable memory */
num-lanes = <1>;
+ num-viewport = <4>;
interrupts = <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
#interrupt-cells = <1>;
diff --git a/arch/arm/boot/dts/imx7s-mba7.dts b/arch/arm/boot/dts/imx7s-mba7.dts
new file mode 100644
index 000000000000..a143d566a38b
--- /dev/null
+++ b/arch/arm/boot/dts/imx7s-mba7.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0 OR X11
+/*
+ * Device Tree Source for TQ Systems TQMa7S board on MBa7 carrier board.
+ *
+ * Copyright (C) 2016 TQ Systems GmbH
+ * Author: Markus Niebel <Markus.Niebel@tq-group.com>
+ * Copyright (C) 2019 Bruno Thomsen <bruno.thomsen@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "imx7s-tqma7.dtsi"
+#include "imx7-mba7.dtsi"
+
+/ {
+ model = "TQ Systems TQMa7S board on MBa7 carrier board";
+ compatible = "tq,imx7s-mba7", "fsl,imx7s";
+};
diff --git a/arch/arm/boot/dts/imx7s-tqma7.dtsi b/arch/arm/boot/dts/imx7s-tqma7.dtsi
new file mode 100644
index 000000000000..5f5433eb7dd7
--- /dev/null
+++ b/arch/arm/boot/dts/imx7s-tqma7.dtsi
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0 OR X11
+/*
+ * Device Tree Include file for TQ Systems TQMa7S board with NXP i.MX7Solo SoC.
+ *
+ * Copyright (C) 2016 TQ Systems GmbH
+ * Author: Markus Niebel <Markus.Niebel@tq-group.com>
+ * Copyright (C) 2019 Bruno Thomsen <bruno.thomsen@gmail.com>
+ */
+
+#include "imx7s.dtsi"
+#include "imx7-tqma7.dtsi"
diff --git a/arch/arm/boot/dts/imx7s-warp.dts b/arch/arm/boot/dts/imx7s-warp.dts
index 23431faecaf4..d6b4888fa686 100644
--- a/arch/arm/boot/dts/imx7s-warp.dts
+++ b/arch/arm/boot/dts/imx7s-warp.dts
@@ -55,6 +55,14 @@
regulator-always-on;
};
+ reg_peri_3p15v: regulator-peri-3p15v {
+ compatible = "regulator-fixed";
+ regulator-name = "peri_3p15v_reg";
+ regulator-min-microvolt = <3150000>;
+ regulator-max-microvolt = <3150000>;
+ regulator-always-on;
+ };
+
sound {
compatible = "simple-audio-card";
simple-audio-card,name = "imx7-sgtl5000";
@@ -77,6 +85,10 @@
assigned-clock-rates = <884736000>;
};
+&csi {
+ status = "okay";
+};
+
&i2c1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c1>;
@@ -121,6 +133,8 @@
swbst_reg: swbst {
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5150000>;
+ regulator-boot-on;
+ regulator-always-on;
};
snvs_reg: vsnvs {
@@ -178,6 +192,27 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c2>;
status = "okay";
+
+ ov2680: camera@36 {
+ compatible = "ovti,ov2680";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ov2680>;
+ reg = <0x36>;
+ clocks = <&osc>;
+ clock-names = "xvclk";
+ reset-gpios = <&gpio1 3 GPIO_ACTIVE_LOW>;
+ DOVDD-supply = <&sw2_reg>;
+ DVDD-supply = <&sw2_reg>;
+ AVDD-supply = <&reg_peri_3p15v>;
+
+ port {
+ ov2680_to_mipi: endpoint {
+ remote-endpoint = <&mipi_from_sensor>;
+ clock-lanes = <0>;
+ data-lanes = <1>;
+ };
+ };
+ };
};
&i2c3 {
@@ -211,6 +246,22 @@
};
};
+&mipi_csi {
+ clock-frequency = <166000000>;
+ fsl,csis-hs-settle = <3>;
+ status = "okay";
+
+ port@0 {
+ reg = <0>;
+
+ mipi_from_sensor: endpoint {
+ remote-endpoint = <&ov2680_to_mipi>;
+ data-lanes = <1>;
+ };
+
+ };
+};
+
&sai1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sai1>;
@@ -277,6 +328,10 @@
status = "okay";
};
+&video_mux {
+ status = "okay";
+};
+
&wdog1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_wdog>;
@@ -331,6 +386,12 @@
>;
};
+ pinctrl_ov2680: ov2660grp {
+ fsl,pins = <
+ MX7D_PAD_LPSR_GPIO1_IO03__GPIO1_IO3 0x14
+ >;
+ };
+
pinctrl_sai1: sai1grp {
fsl,pins = <
MX7D_PAD_SAI1_RX_DATA__SAI1_RX_DATA0 0x1f
diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
index e88f53a4c7f4..106711d2c01b 100644
--- a/arch/arm/boot/dts/imx7s.dtsi
+++ b/arch/arm/boot/dts/imx7s.dtsi
@@ -8,6 +8,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/reset/imx7-reset.h>
#include "imx7d-pinfunc.h"
/ {
@@ -497,8 +498,43 @@
gpr: iomuxc-gpr@30340000 {
compatible = "fsl,imx7d-iomuxc-gpr",
- "fsl,imx6q-iomuxc-gpr", "syscon";
+ "fsl,imx6q-iomuxc-gpr", "syscon",
+ "simple-mfd";
reg = <0x30340000 0x10000>;
+
+ mux: mux-controller {
+ compatible = "mmio-mux";
+ #mux-control-cells = <0>;
+ mux-reg-masks = <0x14 0x00000010>;
+ };
+
+ video_mux: csi-mux {
+ compatible = "video-mux";
+ mux-controls = <&mux 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ port@0 {
+ reg = <0>;
+ };
+
+ port@1 {
+ reg = <1>;
+
+ csi_mux_from_mipi_vc0: endpoint {
+ remote-endpoint = <&mipi_vc0_to_csi_mux>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ csi_mux_to_csi: endpoint {
+ remote-endpoint = <&csi_from_csi_mux>;
+ };
+ };
+ };
};
ocotp: ocotp-ctrl@30350000 {
@@ -606,7 +642,13 @@
#address-cells = <1>;
#size-cells = <0>;
- pgc_pcie_phy: pgc-power-domain@1 {
+ pgc_mipi_phy: power-domain@0 {
+ #power-domain-cells = <0>;
+ reg = <0>;
+ power-supply = <&reg_1p0d>;
+ };
+
+ pgc_pcie_phy: power-domain@1 {
#power-domain-cells = <0>;
reg = <1>;
power-supply = <&reg_1p0d>;
@@ -628,6 +670,7 @@
interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX7D_ADC_ROOT_CLK>;
clock-names = "adc";
+ #io-channel-cells = <1>;
status = "disabled";
};
@@ -637,6 +680,7 @@
interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX7D_ADC_ROOT_CLK>;
clock-names = "adc";
+ #io-channel-cells = <1>;
status = "disabled";
};
@@ -696,6 +740,23 @@
status = "disabled";
};
+ csi: csi@30710000 {
+ compatible = "fsl,imx7-csi";
+ reg = <0x30710000 0x10000>;
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_CLK_DUMMY>,
+ <&clks IMX7D_CSI_MCLK_ROOT_CLK>,
+ <&clks IMX7D_CLK_DUMMY>;
+ clock-names = "axi", "mclk", "dcic";
+ status = "disabled";
+
+ port {
+ csi_from_csi_mux: endpoint {
+ remote-endpoint = <&csi_mux_to_csi>;
+ };
+ };
+ };
+
lcdif: lcdif@30730000 {
compatible = "fsl,imx7d-lcdif", "fsl,imx28-lcdif";
reg = <0x30730000 0x10000>;
@@ -705,6 +766,35 @@
clock-names = "pix", "axi";
status = "disabled";
};
+
+ mipi_csi: mipi-csi@30750000 {
+ compatible = "fsl,imx7-mipi-csi2";
+ reg = <0x30750000 0x10000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX7D_IPG_ROOT_CLK>,
+ <&clks IMX7D_MIPI_CSI_ROOT_CLK>,
+ <&clks IMX7D_MIPI_DPHY_ROOT_CLK>;
+ clock-names = "pclk", "wrap", "phy";
+ power-domains = <&pgc_mipi_phy>;
+ phy-supply = <&reg_1p0d>;
+ resets = <&src IMX7_RESET_MIPI_PHY_MRST>;
+ reset-names = "mrst";
+ status = "disabled";
+
+ port@0 {
+ reg = <0>;
+ };
+
+ port@1 {
+ reg = <1>;
+
+ mipi_vc0_to_csi_mux: endpoint {
+ remote-endpoint = <&csi_mux_from_mipi_vc0>;
+ };
+ };
+ };
};
aips3: aips-bus@30800000 {
@@ -1067,8 +1157,8 @@
compatible = "fsl,imx7d-sdma", "fsl,imx35-sdma";
reg = <0x30bd0000 0x10000>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX7D_SDMA_CORE_CLK>,
- <&clks IMX7D_AHB_CHANNEL_ROOT_CLK>;
+ clocks = <&clks IMX7D_IPG_ROOT_CLK>,
+ <&clks IMX7D_SDMA_CORE_CLK>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";
diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi
index fca6e50f37c8..d6b711011cba 100644
--- a/arch/arm/boot/dts/imx7ulp.dtsi
+++ b/arch/arm/boot/dts/imx7ulp.dtsi
@@ -286,6 +286,12 @@
status = "disabled";
};
+ memory-controller@40ab0000 {
+ compatible = "fsl,imx7ulp-mmdc", "fsl,imx6q-mmdc";
+ reg = <0x40ab0000 0x1000>;
+ clocks = <&pcc3 IMX7ULP_CLK_MMDC>;
+ };
+
iomuxc1: pinctrl@40ac0000 {
compatible = "fsl,imx7ulp-iomuxc1";
reg = <0x40ac0000 0x1000>;
@@ -359,5 +365,11 @@
compatible = "fsl,imx7ulp-sim", "syscon";
reg = <0x410a3000 0x1000>;
};
+
+ ocotp: ocotp-ctrl@410a6000 {
+ compatible = "fsl,imx7ulp-ocotp", "syscon";
+ reg = <0x410a6000 0x4000>;
+ clocks = <&scg1 IMX7ULP_CLK_DUMMY>;
+ };
};
};
diff --git a/arch/arm/boot/dts/intel-ixp42x-linksys-nslu2.dts b/arch/arm/boot/dts/intel-ixp42x-linksys-nslu2.dts
new file mode 100644
index 000000000000..8fcd95805ff4
--- /dev/null
+++ b/arch/arm/boot/dts/intel-ixp42x-linksys-nslu2.dts
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Device Tree file for Linksys NSLU2
+ */
+
+/dts-v1/;
+
+#include "intel-ixp42x.dtsi"
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "Linksys NSLU2 (Network Storage Link for USB 2.0 Disk Drives)";
+ compatible = "linksys,nslu2", "intel,ixp42x";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memory@0 {
+ /* 32 MB SDRAM */
+ device_type = "memory";
+ reg = <0x00000000 0x2000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200n8 root=/dev/mtdblock2 rw rootfstype=squashfs,jffs2 rootwait";
+ stdout-path = "uart0:115200n8";
+ };
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ led-status {
+ label = "nslu2:red:status";
+ gpios = <&gpio0 0 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+ led-ready {
+ label = "nslu2:green:ready";
+ gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+ led-disk-1 {
+ label = "nslu2:green:disk-1";
+ gpios = <&gpio0 3 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+ led-disk-2 {
+ label = "nslu2:green:disk-2";
+ gpios = <&gpio0 2 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+
+ button-power {
+ wakeup-source;
+ linux,code = <KEY_POWER>;
+ label = "power";
+ gpios = <&gpio0 5 GPIO_ACTIVE_HIGH>;
+ };
+ button-reset {
+ wakeup-source;
+ linux,code = <KEY_ESC>;
+ label = "reset";
+ gpios = <&gpio0 12 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ i2c {
+ compatible = "i2c-gpio";
+ sda-gpios = <&gpio0 7 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpio0 6 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtc@6f {
+ compatible = "xicor,x1205";
+ reg = <0x6f>;
+ };
+ };
+
+ gpio-poweroff {
+ compatible = "gpio-poweroff";
+ gpios = <&gpio0 8 GPIO_ACTIVE_HIGH>;
+ timeout-ms = <5000>;
+ };
+
+ /* The first 16MB region on the expansion bus */
+ flash@50000000 {
+ compatible = "intel,ixp4xx-flash", "cfi-flash";
+ bank-width = <2>;
+ /*
+ * 8 MB of Flash in 0x20000 byte blocks
+ * mapped in at 0x50000000
+ */
+ reg = <0x50000000 0x800000>;
+
+ partitions {
+ compatible = "redboot-fis";
+ /* Eraseblock at 0x7e0000 */
+ fis-index-block = <0x3f>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/intel-ixp42x.dtsi b/arch/arm/boot/dts/intel-ixp42x.dtsi
new file mode 100644
index 000000000000..a9622ca850cc
--- /dev/null
+++ b/arch/arm/boot/dts/intel-ixp42x.dtsi
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Device Tree file for Intel XScale Network Processors
+ * in the IXP 42x series. This series has 32 interrupts.
+ */
+#include "intel-ixp4xx.dtsi"
+
+/ {
+ soc {
+ interrupt-controller@c8003000 {
+ compatible = "intel,ixp42x-interrupt";
+ };
+
+ /*
+ * This is the USB Device Mode (UDC) controller, which is used
+ * to present the IXP4xx as a device on a USB bus.
+ */
+ usb@c800b000 {
+ compatible = "intel,ixp4xx-udc";
+ reg = <0xc800b000 0x1000>;
+ interrupts = <12 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/intel-ixp43x-gateworks-gw2358.dts b/arch/arm/boot/dts/intel-ixp43x-gateworks-gw2358.dts
new file mode 100644
index 000000000000..ba1163a1e1e7
--- /dev/null
+++ b/arch/arm/boot/dts/intel-ixp43x-gateworks-gw2358.dts
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Device Tree file for Gateworks IXP43x-based Cambria GW2358
+ */
+
+/dts-v1/;
+
+#include "intel-ixp43x.dtsi"
+
+/ {
+ model = "Gateworks Cambria GW2358";
+ compatible = "gateworks,gw2358", "intel,ixp43x";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memory@0 {
+ /* 128 MB SDRAM */
+ device_type = "memory";
+ reg = <0x00000000 0x8000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200n8 root=/dev/mtdblock2 rw rootfstype=squashfs,jffs2 rootwait";
+ stdout-path = "uart0:115200n8";
+ };
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ led-user {
+ label = "gw2358:green:LED";
+ gpios = <&pld1 0 GPIO_ACTIVE_LOW>;
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+
+ i2c {
+ compatible = "i2c-gpio";
+ sda-gpios = <&gpio0 7 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpio0 6 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ hwmon@28 {
+ compatible = "adi,ad7418";
+ reg = <0x28>;
+ };
+ rtc: ds1672@68 {
+ compatible = "dallas,ds1672";
+ reg = <0x68>;
+ };
+ eeprom@51 {
+ compatible = "atmel,24c08";
+ reg = <0x51>;
+ pagesize = <16>;
+ size = <1024>;
+ read-only;
+ };
+ pld0: pld@56 {
+ compatible = "gateworks,pld-gpio";
+ reg = <0x56>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ /* This PLD just handles the LED and user button */
+ pld1: pld@57 {
+ compatible = "gateworks,pld-gpio";
+ reg = <0x57>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ };
+
+ flash@50000000 {
+ compatible = "intel,ixp4xx-flash", "cfi-flash";
+ bank-width = <2>;
+ /*
+ * 32 MB of Flash in 0x20000 byte blocks
+ * mapped in at 0x50000000
+ */
+ reg = <0x50000000 0x2000000>;
+
+ partitions {
+ compatible = "redboot-fis";
+ /* Eraseblock at 0x1fe0000 */
+ fis-index-block = <0xff>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/intel-ixp43x.dtsi b/arch/arm/boot/dts/intel-ixp43x.dtsi
new file mode 100644
index 000000000000..494fb2ff57a0
--- /dev/null
+++ b/arch/arm/boot/dts/intel-ixp43x.dtsi
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Device Tree file for Intel XScale Network Processors
+ * in the IXP 43x series. This series has 64 interrupts and adds a few more
+ * peripherals over the 42x series.
+ */
+#include "intel-ixp4xx.dtsi"
+
+/ {
+ soc {
+ interrupt-controller@c8003000 {
+ compatible = "intel,ixp43x-interrupt";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/intel-ixp45x-ixp46x.dtsi b/arch/arm/boot/dts/intel-ixp45x-ixp46x.dtsi
new file mode 100644
index 000000000000..f8cd506659dc
--- /dev/null
+++ b/arch/arm/boot/dts/intel-ixp45x-ixp46x.dtsi
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Device Tree file for Intel XScale Network Processors
+ * in the IXP45x and IXP46x series. This series has 64 interrupts and adds a
+ * few more peripherals over the 42x and 43x series so this extends the
+ * basic IXP4xx DTSI.
+ */
+#include "intel-ixp4xx.dtsi"
+
+/ {
+ soc {
+ interrupt-controller@c8003000 {
+ compatible = "intel,ixp43x-interrupt";
+ };
+
+ /*
+ * This is the USB Device Mode (UDC) controller, which is used
+ * to present the IXP4xx as a device on a USB bus.
+ */
+ usb@c800b000 {
+ compatible = "intel,ixp4xx-udc";
+ reg = <0xc800b000 0x1000>;
+ interrupts = <12 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ i2c@c8011000 {
+ compatible = "intel,ixp4xx-i2c";
+ reg = <0xc8011000 0x18>;
+ interrupts = <33 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/intel-ixp4xx.dtsi b/arch/arm/boot/dts/intel-ixp4xx.dtsi
new file mode 100644
index 000000000000..d4a09584f417
--- /dev/null
+++ b/arch/arm/boot/dts/intel-ixp4xx.dtsi
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Device Tree file for Intel XScale Network Processors
+ * in the IXP 4xx series.
+ */
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "simple-bus";
+ interrupt-parent = <&intcon>;
+
+ qmgr: queue-manager@60000000 {
+ compatible = "intel,ixp4xx-ahb-queue-manager";
+ reg = <0x60000000 0x4000>;
+ interrupts = <3 IRQ_TYPE_LEVEL_HIGH>, <4 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ uart0: serial@c8000000 {
+ compatible = "intel,xscale-uart";
+ reg = <0xc8000000 0x1000>;
+ /*
+ * The reg-offset and reg-shift is a side effect
+ * of running the platform in big endian mode.
+ */
+ reg-offset = <3>;
+ reg-shift = <2>;
+ interrupts = <15 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <14745600>;
+ no-loopback-test;
+ };
+
+ gpio0: gpio@c8004000 {
+ compatible = "intel,ixp4xx-gpio";
+ reg = <0xc8004000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ intcon: interrupt-controller@c8003000 {
+ /*
+ * Note: no compatible string. The subvariant of the
+ * chip needs to define what version it is. The
+ * location of the interrupt controller is fixed in
+ * memory across all variants.
+ */
+ reg = <0xc8003000 0x100>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ timer@c8005000 {
+ compatible = "intel,ixp4xx-timer";
+ reg = <0xc8005000 0x100>;
+ interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ npe@c8006000 {
+ compatible = "intel,ixp4xx-network-processing-engine";
+ reg = <0xc8006000 0x1000>, <0xc8007000 0x1000>, <0xc8008000 0x1000>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
index 4990ed90dcea..3e39b9a1f35d 100644
--- a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
+++ b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
@@ -153,7 +153,7 @@
pinctrl-names = "default";
pinctrl-0 = <&mmc1_pins>;
wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */
- cd-gpios = <&gpio4 14 IRQ_TYPE_LEVEL_LOW>; /* gpio_110 */
+ cd-gpios = <&gpio4 14 GPIO_ACTIVE_LOW>; /* gpio_110 */
vmmc-supply = <&vmmc1>;
bus-width = <4>;
cap-power-off-card;
diff --git a/arch/arm/boot/dts/lpc3250-ea3250.dts b/arch/arm/boot/dts/lpc3250-ea3250.dts
index f46a11827ef6..4adf4c96f798 100644
--- a/arch/arm/boot/dts/lpc3250-ea3250.dts
+++ b/arch/arm/boot/dts/lpc3250-ea3250.dts
@@ -201,6 +201,7 @@
&mac {
phy-mode = "rmii";
use-iram;
+ status = "okay";
};
/* Here, choose exactly one from: ohci, usbd */
diff --git a/arch/arm/boot/dts/lpc3250-phy3250.dts b/arch/arm/boot/dts/lpc3250-phy3250.dts
index ebd19258e22b..1b15f798794b 100644
--- a/arch/arm/boot/dts/lpc3250-phy3250.dts
+++ b/arch/arm/boot/dts/lpc3250-phy3250.dts
@@ -134,6 +134,7 @@
&mac {
phy-mode = "rmii";
use-iram;
+ status = "okay";
};
/* Here, choose exactly one from: ohci, usbd */
@@ -201,8 +202,6 @@
};
&ssp0 {
- #address-cells = <1>;
- #size-cells = <0>;
num-cs = <1>;
cs-gpios = <&gpio 3 5 0>;
status = "okay";
diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi
index 20b38f4ade37..7b7ec7b1217b 100644
--- a/arch/arm/boot/dts/lpc32xx.dtsi
+++ b/arch/arm/boot/dts/lpc32xx.dtsi
@@ -1,14 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* NXP LPC32xx SoC
*
+ * Copyright (C) 2015-2019 Vladimir Zapolskiy <vz@mleia.com>
* Copyright 2012 Roland Stigge <stigge@antcom.de>
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
*/
#include <dt-bindings/clock/lpc32xx-clock.h>
@@ -152,6 +147,7 @@
reg = <0x31060000 0x1000>;
interrupts = <29 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk LPC32XX_CLK_MAC>;
+ status = "disabled";
};
emc: memory-controller@31080000 {
@@ -185,6 +181,8 @@
interrupts = <20 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk LPC32XX_CLK_SSP0>;
clock-names = "apb_pclk";
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -192,6 +190,8 @@
compatible = "nxp,lpc3220-spi";
reg = <0x20088000 0x1000>;
clocks = <&clk LPC32XX_CLK_SPI1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -205,6 +205,8 @@
interrupts = <21 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk LPC32XX_CLK_SSP1>;
clock-names = "apb_pclk";
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
@@ -212,12 +214,15 @@
compatible = "nxp,lpc3220-spi";
reg = <0x20090000 0x1000>;
clocks = <&clk LPC32XX_CLK_SPI2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
i2s0: i2s@20094000 {
compatible = "nxp,lpc3220-i2s";
reg = <0x20094000 0x1000>;
+ status = "disabled";
};
sd: sd@20098000 {
@@ -232,7 +237,8 @@
i2s1: i2s@2009c000 {
compatible = "nxp,lpc3220-i2s";
- reg = <0x2009C000 0x1000>;
+ reg = <0x2009c000 0x1000>;
+ status = "disabled";
};
/* UART5 first since it is the default console, ttyS0 */
@@ -275,7 +281,7 @@
i2c1: i2c@400a0000 {
compatible = "nxp,pnx-i2c";
- reg = <0x400A0000 0x100>;
+ reg = <0x400a0000 0x100>;
interrupt-parent = <&sic1>;
interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
#address-cells = <1>;
@@ -286,7 +292,7 @@
i2c2: i2c@400a8000 {
compatible = "nxp,pnx-i2c";
- reg = <0x400A8000 0x100>;
+ reg = <0x400a8000 0x100>;
interrupt-parent = <&sic1>;
interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
#address-cells = <1>;
@@ -297,7 +303,7 @@
mpwm: mpwm@400e8000 {
compatible = "nxp,lpc3220-motor-pwm";
- reg = <0x400E8000 0x78>;
+ reg = <0x400e8000 0x78>;
status = "disabled";
#pwm-cells = <2>;
};
@@ -396,7 +402,7 @@
timer4: timer@4002c000 {
compatible = "nxp,lpc3220-timer";
- reg = <0x4002C000 0x1000>;
+ reg = <0x4002c000 0x1000>;
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
clocks = <&clk LPC32XX_CLK_TIMER4>;
clock-names = "timerclk";
@@ -414,7 +420,7 @@
watchdog: watchdog@4003c000 {
compatible = "nxp,pnx4008-wdt";
- reg = <0x4003C000 0x1000>;
+ reg = <0x4003c000 0x1000>;
clocks = <&clk LPC32XX_CLK_WDOG>;
};
@@ -453,7 +459,7 @@
timer1: timer@4004c000 {
compatible = "nxp,lpc3220-timer";
- reg = <0x4004C000 0x1000>;
+ reg = <0x4004c000 0x1000>;
interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
clocks = <&clk LPC32XX_CLK_TIMER1>;
clock-names = "timerclk";
@@ -479,7 +485,7 @@
pwm1: pwm@4005c000 {
compatible = "nxp,lpc3220-pwm";
- reg = <0x4005C000 0x4>;
+ reg = <0x4005c000 0x4>;
clocks = <&clk LPC32XX_CLK_PWM1>;
assigned-clocks = <&clk LPC32XX_CLK_PWM1>;
assigned-clock-parents = <&clk LPC32XX_CLK_PERIPH>;
@@ -488,7 +494,7 @@
pwm2: pwm@4005c004 {
compatible = "nxp,lpc3220-pwm";
- reg = <0x4005C004 0x4>;
+ reg = <0x4005c004 0x4>;
clocks = <&clk LPC32XX_CLK_PWM2>;
assigned-clocks = <&clk LPC32XX_CLK_PWM2>;
assigned-clock-parents = <&clk LPC32XX_CLK_PERIPH>;
diff --git a/arch/arm/boot/dts/ls1021a-moxa-uc-8410a.dts b/arch/arm/boot/dts/ls1021a-moxa-uc-8410a.dts
index ba1ddd93b8f8..dcb1d9bd0922 100644
--- a/arch/arm/boot/dts/ls1021a-moxa-uc-8410a.dts
+++ b/arch/arm/boot/dts/ls1021a-moxa-uc-8410a.dts
@@ -204,7 +204,6 @@
};
&qspi {
- fsl,qspi-has-second-chip;
status = "okay";
flash: flash@0 {
diff --git a/arch/arm/boot/dts/ls1021a-qds.dts b/arch/arm/boot/dts/ls1021a-qds.dts
index ca60730dda40..74a67604876c 100644
--- a/arch/arm/boot/dts/ls1021a-qds.dts
+++ b/arch/arm/boot/dts/ls1021a-qds.dts
@@ -146,6 +146,10 @@
status = "okay";
};
+&esdhc {
+ status = "okay";
+};
+
&i2c0 {
status = "okay";
diff --git a/arch/arm/boot/dts/ls1021a-twr.dts b/arch/arm/boot/dts/ls1021a-twr.dts
index 97e1fb7ea932..9b1fe99d55b1 100644
--- a/arch/arm/boot/dts/ls1021a-twr.dts
+++ b/arch/arm/boot/dts/ls1021a-twr.dts
@@ -145,7 +145,7 @@
};
&enet0 {
- tbi-handle = <&tbi1>;
+ tbi-handle = <&tbi0>;
phy-handle = <&sgmii_phy2>;
phy-connection-type = "sgmii";
status = "okay";
@@ -225,6 +225,13 @@
sgmii_phy2: ethernet-phy@2 {
reg = <0x2>;
};
+ tbi0: tbi-phy@1f {
+ reg = <0x1f>;
+ device_type = "tbi-phy";
+ };
+};
+
+&mdio1 {
tbi1: tbi-phy@1f {
reg = <0x1f>;
device_type = "tbi-phy";
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index b4f2723ecd86..464df4290ffc 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -186,7 +186,6 @@
interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "qspi_en", "qspi";
clocks = <&clockgen 4 1>, <&clockgen 4 1>;
- big-endian;
status = "disabled";
};
@@ -446,6 +445,34 @@
status = "disabled";
};
+ counter0: counter@29d0000 {
+ compatible = "fsl,ftm-quaddec";
+ reg = <0x0 0x29d0000 0x0 0x10000>;
+ big-endian;
+ status = "disabled";
+ };
+
+ counter1: counter@29e0000 {
+ compatible = "fsl,ftm-quaddec";
+ reg = <0x0 0x29e0000 0x0 0x10000>;
+ big-endian;
+ status = "disabled";
+ };
+
+ counter2: counter@29f0000 {
+ compatible = "fsl,ftm-quaddec";
+ reg = <0x0 0x29f0000 0x0 0x10000>;
+ big-endian;
+ status = "disabled";
+ };
+
+ counter3: counter@2a00000 {
+ compatible = "fsl,ftm-quaddec";
+ reg = <0x0 0x2a00000 0x0 0x10000>;
+ big-endian;
+ status = "disabled";
+ };
+
gpio0: gpio@2300000 {
compatible = "fsl,ls1021a-gpio", "fsl,qoriq-gpio";
reg = <0x0 0x2300000 0x0 0x10000>;
@@ -701,7 +728,7 @@
};
mdio0: mdio@2d24000 {
- compatible = "gianfar";
+ compatible = "fsl,etsec2-mdio";
device_type = "mdio";
#address-cells = <1>;
#size-cells = <0>;
@@ -709,6 +736,15 @@
<0x0 0x2d10030 0x0 0x4>;
};
+ mdio1: mdio@2d64000 {
+ compatible = "fsl,etsec2-mdio";
+ device_type = "mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0 0x2d64000 0x0 0x4000>,
+ <0x0 0x2d50030 0x0 0x4>;
+ };
+
ptp_clock@2d10e00 {
compatible = "fsl,etsec-ptp";
reg = <0x0 0x2d10e00 0x0 0xb0>;
diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi
index 6f54a8897574..8841783aceec 100644
--- a/arch/arm/boot/dts/meson.dtsi
+++ b/arch/arm/boot/dts/meson.dtsi
@@ -252,6 +252,15 @@
#size-cells = <0>;
status = "disabled";
};
+
+ rtc: rtc@740 {
+ compatible = "amlogic,meson6-rtc";
+ reg = <0x740 0x14>;
+ interrupts = <GIC_SPI 72 IRQ_TYPE_EDGE_RISING>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+ };
};
usb0: usb@c9040000 {
diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
index a9781243453e..7ef442462ea4 100644
--- a/arch/arm/boot/dts/meson8.dtsi
+++ b/arch/arm/boot/dts/meson8.dtsi
@@ -345,6 +345,11 @@
status = "disabled";
};
+ clock-measure@8758 {
+ compatible = "amlogic,meson8-clk-measure";
+ reg = <0x8758 0x1c>;
+ };
+
pinctrl_cbus: pinctrl@9880 {
compatible = "amlogic,meson8-cbus-pinctrl";
reg = <0x9880 0x10>;
@@ -536,6 +541,11 @@
compatible = "amlogic,meson8-pwm", "amlogic,meson8b-pwm";
};
+&rtc {
+ compatible = "amlogic,meson8-rtc";
+ resets = <&reset RESET_RTC>;
+};
+
&saradc {
compatible = "amlogic,meson8-saradc", "amlogic,meson-saradc";
clocks = <&clkc CLKID_XTAL>,
diff --git a/arch/arm/boot/dts/meson8b-ec100.dts b/arch/arm/boot/dts/meson8b-ec100.dts
index 3ca9638fad09..9bf4249cb60d 100644
--- a/arch/arm/boot/dts/meson8b-ec100.dts
+++ b/arch/arm/boot/dts/meson8b-ec100.dts
@@ -88,6 +88,14 @@
};
};
+ rtc32k_xtal: rtc32k-xtal-clk {
+ /* X2 in the schematics */
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ clock-output-names = "RTC32K";
+ #clock-cells = <0>;
+ };
+
usb_vbus: regulator-usb-vbus {
/*
* Silergy SY6288CCAC-GP 2A Power Distribution Switch.
@@ -347,6 +355,12 @@
clock-names = "clkin0";
};
+&rtc {
+ status = "okay";
+ clocks = <&rtc32k_xtal>;
+ vdd-supply = <&vcc_rtc>;
+};
+
/* exposed through the pin headers labeled "URDUG1" on the top of the PCB */
&uart_AO {
status = "okay";
diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts
index 3b0e0f8fbc23..f3ad9397f670 100644
--- a/arch/arm/boot/dts/meson8b-odroidc1.dts
+++ b/arch/arm/boot/dts/meson8b-odroidc1.dts
@@ -124,6 +124,14 @@
io-channels = <&saradc 8>;
};
+ rtc32k_xtal: rtc32k-xtal-clk {
+ /* X3 in the schematics */
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ clock-output-names = "RTC32K";
+ #clock-cells = <0>;
+ };
+
vcc_1v8: regulator-vcc-1v8 {
/*
* RICHTEK RT9179 configured for a fixed output voltage of
@@ -234,7 +242,59 @@
};
};
+&gpio {
+ gpio-line-names = /* Bank GPIOX */
+ "J2 Header Pin 35", "J2 Header Pin 36",
+ "J2 Header Pin 32", "J2 Header Pin 31",
+ "J2 Header Pin 29", "J2 Header Pin 18",
+ "J2 Header Pin 22", "J2 Header Pin 16",
+ "J2 Header Pin 23", "J2 Header Pin 21",
+ "J2 Header Pin 19", "J2 Header Pin 33",
+ "J2 Header Pin 8", "J2 Header Pin 10",
+ "J2 Header Pin 15", "J2 Header Pin 13",
+ "J2 Header Pin 24", "J2 Header Pin 26",
+ /* Bank GPIOY */
+ "Revision (upper)", "Revision (lower)",
+ "J2 Header Pin 7", "", "J2 Header Pin 12",
+ "J2 Header Pin 11", "", "", "",
+ "TFLASH_VDD_EN", "", "",
+ /* Bank GPIODV */
+ "VCCK_PWM (PWM_C)", "I2CA_SDA", "I2CA_SCL",
+ "I2CB_SDA", "I2CB_SCL", "VDDEE_PWM (PWM_D)",
+ "",
+ /* Bank GPIOH */
+ "HDMI_HPD", "HDMI_I2C_SDA", "HDMI_I2C_SCL",
+ "ETH_PHY_INTR", "ETH_PHY_NRST", "ETH_TXD1",
+ "ETH_TXD0", "ETH_TXD3", "ETH_TXD2",
+ "ETH_RGMII_TX_CLK",
+ /* Bank CARD */
+ "SD_DATA1 (SDB_D1)", "SD_DATA0 (SDB_D0)",
+ "SD_CLK", "SD_CMD", "SD_DATA3 (SDB_D3)",
+ "SD_DATA2 (SDB_D2)", "SD_CDN (SD_DET_N)",
+ /* Bank BOOT */
+ "SDC_D0 (EMMC)", "SDC_D1 (EMMC)",
+ "SDC_D2 (EMMC)", "SDC_D3 (EMMC)",
+ "SDC_D4 (EMMC)", "SDC_D5 (EMMC)",
+ "SDC_D6 (EMMC)", "SDC_D7 (EMMC)",
+ "SDC_CLK (EMMC)", "SDC_RSTn (EMMC)",
+ "SDC_CMD (EMMC)", "BOOT_SEL", "", "", "",
+ "", "", "", "",
+ /* Bank DIF */
+ "ETH_RXD1", "ETH_RXD0", "ETH_RX_DV",
+ "RGMII_RX_CLK", "ETH_RXD3", "ETH_RXD2",
+ "ETH_TXEN", "ETH_PHY_REF_CLK_25MOUT",
+ "ETH_MDC", "ETH_MDIO";
+};
+
&gpio_ao {
+ gpio-line-names = "UART TX", "UART RX", "",
+ "TF_3V3N_1V8_EN", "USB_HUB_RST_N",
+ "USB_OTG_PWREN", "J7 Header Pin 2",
+ "IR_IN", "J7 Header Pin 4",
+ "J7 Header Pin 6", "J7 Header Pin 5",
+ "J7 Header Pin 7", "HDMI_CEC",
+ "SYS_LED", "", "";
+
/*
* WARNING: The USB Hub on the Odroid-C1/C1+ needs a reset signal
* to be turned high in order to be detected by the USB Controller.
@@ -293,6 +353,12 @@
clock-names = "clkin0";
};
+&rtc {
+ /* needs to be enabled manually when a battery is connected */
+ clocks = <&rtc32k_xtal>;
+ vdd-supply = <&vdd_rtc>;
+};
+
&uart_AO {
status = "okay";
pinctrl-0 = <&uart_ao_a_pins>;
diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
index fe84a8c3ce81..800cd65fc50a 100644
--- a/arch/arm/boot/dts/meson8b.dtsi
+++ b/arch/arm/boot/dts/meson8b.dtsi
@@ -294,6 +294,11 @@
status = "disabled";
};
+ clock-measure@8758 {
+ compatible = "amlogic,meson8b-clk-measure";
+ reg = <0x8758 0x1c>;
+ };
+
pinctrl_cbus: pinctrl@9880 {
compatible = "amlogic,meson8b-cbus-pinctrl";
reg = <0x9880 0x10>;
@@ -505,6 +510,11 @@
compatible = "amlogic,meson8b-pwm";
};
+&rtc {
+ compatible = "amlogic,meson8b-rtc";
+ resets = <&reset RESET_RTC>;
+};
+
&saradc {
compatible = "amlogic,meson8b-saradc", "amlogic,meson-saradc";
clocks = <&clkc CLKID_XTAL>,
diff --git a/arch/arm/boot/dts/omap2420-n810.dts b/arch/arm/boot/dts/omap2420-n810.dts
index 96b9913ecc1f..09c1dbc0bb69 100644
--- a/arch/arm/boot/dts/omap2420-n810.dts
+++ b/arch/arm/boot/dts/omap2420-n810.dts
@@ -48,7 +48,7 @@
pinctrl-names = "default";
pinctrl-0 = <&aic33_pins>;
- gpio-reset = <&gpio4 22 GPIO_ACTIVE_LOW>; /* gpio118 */
+ reset-gpios = <&gpio4 22 GPIO_ACTIVE_LOW>; /* gpio118 */
ai3x-gpio-func = <
10 /* AIC3X_GPIO1_FUNC_DIGITAL_MIC_MODCLK */
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts
index e21ec929f096..714863f8f261 100644
--- a/arch/arm/boot/dts/omap4-droid4-xt894.dts
+++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts
@@ -214,7 +214,6 @@
width-mm = <50>;
height-mm = <89>;
- backlight = <&lcd_backlight>;
panel-timing {
clock-frequency = <0>; /* Calculated by dsi */
@@ -383,20 +382,30 @@
};
&i2c1 {
- lm3532@38 {
+ led-controller@38 {
compatible = "ti,lm3532";
+ #address-cells = <1>;
+ #size-cells = <0>;
reg = <0x38>;
enable-gpios = <&gpio6 12 GPIO_ACTIVE_HIGH>;
- lcd_backlight: backlight {
- compatible = "ti,lm3532-backlight";
+ ramp-up-us = <1024>;
+ ramp-down-us = <8193>;
- lcd {
- led-sources = <0 1 2>;
- ramp-up-msec = <1>;
- ramp-down-msec = <0>;
- };
+ led@0 {
+ reg = <0>;
+ led-sources = <2>;
+ ti,led-mode = <0>;
+ label = ":backlight";
+ linux,default-trigger = "backlight";
+ };
+
+ led@1 {
+ reg = <1>;
+ led-sources = <1>;
+ ti,led-mode = <0>;
+ label = ":kbd_backlight";
};
};
};
diff --git a/arch/arm/boot/dts/omap4-duovero.dtsi b/arch/arm/boot/dts/omap4-duovero.dtsi
index 5e8169153414..a1dacb8a6987 100644
--- a/arch/arm/boot/dts/omap4-duovero.dtsi
+++ b/arch/arm/boot/dts/omap4-duovero.dtsi
@@ -7,6 +7,7 @@
*/
#include "omap443x.dtsi"
+#include "omap4-mcpdm.dtsi"
/ {
model = "Gumstix Duovero";
@@ -82,16 +83,6 @@
>;
};
- mcpdm_pins: pinmux_mcpdm_pins {
- pinctrl-single,pins = <
- OMAP4_IOPAD(0x106, PIN_INPUT_PULLDOWN | MUX_MODE0) /* abe_pdm_ul_data.abe_pdm_ul_data */
- OMAP4_IOPAD(0x108, PIN_INPUT_PULLDOWN | MUX_MODE0) /* abe_pdm_dl_data.abe_pdm_dl_data */
- OMAP4_IOPAD(0x10a, PIN_INPUT_PULLUP | MUX_MODE0) /* abe_pdm_frame.abe_pdm_frame */
- OMAP4_IOPAD(0x10c, PIN_INPUT_PULLDOWN | MUX_MODE0) /* abe_pdm_lb_clk.abe_pdm_lb_clk */
- OMAP4_IOPAD(0x10e, PIN_INPUT_PULLDOWN | MUX_MODE0) /* abe_clks.abe_clks */
- >;
- };
-
mcbsp1_pins: pinmux_mcbsp1_pins {
pinctrl-single,pins = <
OMAP4_IOPAD(0x0fe, PIN_INPUT | MUX_MODE0) /* abe_mcbsp1_clkx.abe_mcbsp1_clkx */
@@ -210,16 +201,6 @@
status = "okay";
};
-&mcpdm {
- pinctrl-names = "default";
- pinctrl-0 = <&mcpdm_pins>;
-
- clocks = <&twl6040>;
- clock-names = "pdmclk";
-
- status = "okay";
-};
-
&mmc1 {
pinctrl-names = "default";
pinctrl-0 = <&mmc1_pins>;
diff --git a/arch/arm/boot/dts/omap4-l4-abe.dtsi b/arch/arm/boot/dts/omap4-l4-abe.dtsi
new file mode 100644
index 000000000000..67072df39bc7
--- /dev/null
+++ b/arch/arm/boot/dts/omap4-l4-abe.dtsi
@@ -0,0 +1,501 @@
+&l4_abe { /* 0x40100000 */
+ compatible = "ti,omap4-l4-abe", "simple-bus";
+ reg = <0x40100000 0x400>,
+ <0x40100400 0x400>;
+ reg-names = "la", "ap";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x00000000 0x40100000 0x100000>, /* segment 0 */
+ <0x49000000 0x49000000 0x100000>;
+ segment@0 { /* 0x40100000 */
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges =
+ /* CPU to L4 ABE mapping */
+ <0x00000000 0x00000000 0x000400>, /* ap 0 */
+ <0x00000400 0x00000400 0x000400>, /* ap 1 */
+ <0x00022000 0x00022000 0x001000>, /* ap 2 */
+ <0x00023000 0x00023000 0x001000>, /* ap 3 */
+ <0x00024000 0x00024000 0x001000>, /* ap 4 */
+ <0x00025000 0x00025000 0x001000>, /* ap 5 */
+ <0x00026000 0x00026000 0x001000>, /* ap 6 */
+ <0x00027000 0x00027000 0x001000>, /* ap 7 */
+ <0x00028000 0x00028000 0x001000>, /* ap 8 */
+ <0x00029000 0x00029000 0x001000>, /* ap 9 */
+ <0x0002a000 0x0002a000 0x001000>, /* ap 10 */
+ <0x0002b000 0x0002b000 0x001000>, /* ap 11 */
+ <0x0002e000 0x0002e000 0x001000>, /* ap 12 */
+ <0x0002f000 0x0002f000 0x001000>, /* ap 13 */
+ <0x00030000 0x00030000 0x001000>, /* ap 14 */
+ <0x00031000 0x00031000 0x001000>, /* ap 15 */
+ <0x00032000 0x00032000 0x001000>, /* ap 16 */
+ <0x00033000 0x00033000 0x001000>, /* ap 17 */
+ <0x00038000 0x00038000 0x001000>, /* ap 18 */
+ <0x00039000 0x00039000 0x001000>, /* ap 19 */
+ <0x0003a000 0x0003a000 0x001000>, /* ap 20 */
+ <0x0003b000 0x0003b000 0x001000>, /* ap 21 */
+ <0x0003c000 0x0003c000 0x001000>, /* ap 22 */
+ <0x0003d000 0x0003d000 0x001000>, /* ap 23 */
+ <0x0003e000 0x0003e000 0x001000>, /* ap 24 */
+ <0x0003f000 0x0003f000 0x001000>, /* ap 25 */
+ <0x00080000 0x00080000 0x010000>, /* ap 26 */
+ <0x00080000 0x00080000 0x001000>, /* ap 27 */
+ <0x000a0000 0x000a0000 0x010000>, /* ap 28 */
+ <0x000a0000 0x000a0000 0x001000>, /* ap 29 */
+ <0x000c0000 0x000c0000 0x010000>, /* ap 30 */
+ <0x000c0000 0x000c0000 0x001000>, /* ap 31 */
+ <0x000f1000 0x000f1000 0x001000>, /* ap 32 */
+ <0x000f2000 0x000f2000 0x001000>, /* ap 33 */
+
+ /* L3 to L4 ABE mapping */
+ <0x49000000 0x49000000 0x000400>, /* ap 0 */
+ <0x49000400 0x49000400 0x000400>, /* ap 1 */
+ <0x49022000 0x49022000 0x001000>, /* ap 2 */
+ <0x49023000 0x49023000 0x001000>, /* ap 3 */
+ <0x49024000 0x49024000 0x001000>, /* ap 4 */
+ <0x49025000 0x49025000 0x001000>, /* ap 5 */
+ <0x49026000 0x49026000 0x001000>, /* ap 6 */
+ <0x49027000 0x49027000 0x001000>, /* ap 7 */
+ <0x49028000 0x49028000 0x001000>, /* ap 8 */
+ <0x49029000 0x49029000 0x001000>, /* ap 9 */
+ <0x4902a000 0x4902a000 0x001000>, /* ap 10 */
+ <0x4902b000 0x4902b000 0x001000>, /* ap 11 */
+ <0x4902e000 0x4902e000 0x001000>, /* ap 12 */
+ <0x4902f000 0x4902f000 0x001000>, /* ap 13 */
+ <0x49030000 0x49030000 0x001000>, /* ap 14 */
+ <0x49031000 0x49031000 0x001000>, /* ap 15 */
+ <0x49032000 0x49032000 0x001000>, /* ap 16 */
+ <0x49033000 0x49033000 0x001000>, /* ap 17 */
+ <0x49038000 0x49038000 0x001000>, /* ap 18 */
+ <0x49039000 0x49039000 0x001000>, /* ap 19 */
+ <0x4903a000 0x4903a000 0x001000>, /* ap 20 */
+ <0x4903b000 0x4903b000 0x001000>, /* ap 21 */
+ <0x4903c000 0x4903c000 0x001000>, /* ap 22 */
+ <0x4903d000 0x4903d000 0x001000>, /* ap 23 */
+ <0x4903e000 0x4903e000 0x001000>, /* ap 24 */
+ <0x4903f000 0x4903f000 0x001000>, /* ap 25 */
+ <0x49080000 0x49080000 0x010000>, /* ap 26 */
+ <0x49080000 0x49080000 0x001000>, /* ap 27 */
+ <0x490a0000 0x490a0000 0x010000>, /* ap 28 */
+ <0x490a0000 0x490a0000 0x001000>, /* ap 29 */
+ <0x490c0000 0x490c0000 0x010000>, /* ap 30 */
+ <0x490c0000 0x490c0000 0x001000>, /* ap 31 */
+ <0x490f1000 0x490f1000 0x001000>, /* ap 32 */
+ <0x490f2000 0x490f2000 0x001000>; /* ap 33 */
+
+ target-module@22000 { /* 0x40122000, ap 2 02.0 */
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ ti,hwmods = "mcbsp1";
+ reg = <0x2208c 0x4>;
+ reg-names = "sysc";
+ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+ SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ /* Domains (V, P, C): iva, abe_pwrdm, abe_clkdm */
+ clocks = <&abe_clkctrl OMAP4_MCBSP1_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x22000 0x1000>,
+ <0x49022000 0x49022000 0x1000>;
+
+ mcbsp1: mcbsp@0 {
+ compatible = "ti,omap4-mcbsp";
+ reg = <0x0 0xff>, /* MPU private access */
+ <0x49022000 0xff>; /* L3 Interconnect */
+ reg-names = "mpu", "dma";
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "common";
+ ti,buffer-size = <128>;
+ dmas = <&sdma 33>,
+ <&sdma 34>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+ };
+
+ target-module@24000 { /* 0x40124000, ap 4 04.0 */
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ ti,hwmods = "mcbsp2";
+ reg = <0x2408c 0x4>;
+ reg-names = "sysc";
+ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+ SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ /* Domains (V, P, C): iva, abe_pwrdm, abe_clkdm */
+ clocks = <&abe_clkctrl OMAP4_MCBSP2_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x24000 0x1000>,
+ <0x49024000 0x49024000 0x1000>;
+
+ mcbsp2: mcbsp@0 {
+ compatible = "ti,omap4-mcbsp";
+ reg = <0x0 0xff>, /* MPU private access */
+ <0x49024000 0xff>; /* L3 Interconnect */
+ reg-names = "mpu", "dma";
+ interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "common";
+ ti,buffer-size = <128>;
+ dmas = <&sdma 17>,
+ <&sdma 18>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+ };
+
+ target-module@26000 { /* 0x40126000, ap 6 06.0 */
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ ti,hwmods = "mcbsp3";
+ reg = <0x2608c 0x4>;
+ reg-names = "sysc";
+ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+ SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ /* Domains (V, P, C): iva, abe_pwrdm, abe_clkdm */
+ clocks = <&abe_clkctrl OMAP4_MCBSP3_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x26000 0x1000>,
+ <0x49026000 0x49026000 0x1000>;
+
+ mcbsp3: mcbsp@0 {
+ compatible = "ti,omap4-mcbsp";
+ reg = <0x0 0xff>, /* MPU private access */
+ <0x49026000 0xff>; /* L3 Interconnect */
+ reg-names = "mpu", "dma";
+ interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "common";
+ ti,buffer-size = <128>;
+ dmas = <&sdma 19>,
+ <&sdma 20>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+ };
+
+ target-module@28000 { /* 0x40128000, ap 8 08.0 */
+ compatible = "ti,sysc-mcasp", "ti,sysc";
+ ti,hwmods = "mcasp";
+ reg = <0x28000 0x4>,
+ <0x28004 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ /* Domains (V, P, C): iva, abe_pwrdm, abe_clkdm */
+ clocks = <&abe_clkctrl OMAP4_MCASP_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x28000 0x1000>,
+ <0x49028000 0x49028000 0x1000>;
+
+ /*
+ * Child device unsupported by davinci-mcasp. At least
+ * RX path is disabled for omap4, and only DIT mode
+ * works with no I2S. See also old Android kernel
+ * omap-mcasp driver for more information.
+ */
+ };
+
+ target-module@2a000 { /* 0x4012a000, ap 10 0a.0 */
+ compatible = "ti,sysc";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x2a000 0x1000>,
+ <0x4902a000 0x4902a000 0x1000>;
+ };
+
+ target-module@2e000 { /* 0x4012e000, ap 12 0c.0 */
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ ti,hwmods = "dmic";
+ reg = <0x2e000 0x4>,
+ <0x2e010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <(SYSC_OMAP4_FREEEMU |
+ SYSC_OMAP4_SOFTRESET)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ /* Domains (V, P, C): iva, abe_pwrdm, abe_clkdm */
+ clocks = <&abe_clkctrl OMAP4_DMIC_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x2e000 0x1000>,
+ <0x4902e000 0x4902e000 0x1000>;
+
+ dmic: dmic@0 {
+ compatible = "ti,omap4-dmic";
+ reg = <0x0 0x7f>, /* MPU private access */
+ <0x4902e000 0x7f>; /* L3 Interconnect */
+ reg-names = "mpu", "dma";
+ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&sdma 67>;
+ dma-names = "up_link";
+ status = "disabled";
+ };
+ };
+
+ target-module@30000 { /* 0x40130000, ap 14 0e.0 */
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ ti,hwmods = "wd_timer3";
+ reg = <0x30000 0x4>,
+ <0x30010 0x4>,
+ <0x30014 0x4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-mask = <(SYSC_OMAP2_EMUFREE |
+ SYSC_OMAP2_SOFTRESET)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ ti,syss-mask = <1>;
+ /* Domains (V, P, C): iva, abe_pwrdm, abe_clkdm */
+ clocks = <&abe_clkctrl OMAP4_WD_TIMER3_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x30000 0x1000>,
+ <0x49030000 0x49030000 0x1000>;
+
+ wdt3: wdt@0 {
+ compatible = "ti,omap4-wdt", "ti,omap3-wdt";
+ reg = <0x0 0x80>;
+ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+ mcpdm_module: target-module@32000 { /* 0x40132000, ap 16 10.0 */
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ ti,hwmods = "mcpdm";
+ reg = <0x32000 0x4>,
+ <0x32010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <(SYSC_OMAP4_FREEEMU |
+ SYSC_OMAP4_SOFTRESET)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ /* Domains (V, P, C): iva, abe_pwrdm, abe_clkdm */
+ clocks = <&abe_clkctrl OMAP4_MCPDM_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x32000 0x1000>,
+ <0x49032000 0x49032000 0x1000>;
+
+ /* Must be only enabled for boards with pdmclk wired */
+ status = "disabled";
+
+ mcpdm: mcpdm@0 {
+ compatible = "ti,omap4-mcpdm";
+ reg = <0x0 0x7f>, /* MPU private access */
+ <0x49032000 0x7f>; /* L3 Interconnect */
+ reg-names = "mpu", "dma";
+ interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&sdma 65>,
+ <&sdma 66>;
+ dma-names = "up_link", "dn_link";
+ };
+ };
+
+ target-module@38000 { /* 0x40138000, ap 18 12.0 */
+ compatible = "ti,sysc-omap4-timer", "ti,sysc";
+ ti,hwmods = "timer5";
+ reg = <0x38000 0x4>,
+ <0x38010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <(SYSC_OMAP4_FREEEMU |
+ SYSC_OMAP4_SOFTRESET)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ /* Domains (V, P, C): iva, abe_pwrdm, abe_clkdm */
+ clocks = <&abe_clkctrl OMAP4_TIMER5_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x38000 0x1000>,
+ <0x49038000 0x49038000 0x1000>;
+
+ timer5: timer@0 {
+ compatible = "ti,omap4430-timer";
+ reg = <0x00000000 0x80>,
+ <0x49038000 0x80>;
+ clocks = <&abe_clkctrl OMAP4_TIMER5_CLKCTRL 24>;
+ clock-names = "fck";
+ interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+ ti,timer-dsp;
+ };
+ };
+
+ target-module@3a000 { /* 0x4013a000, ap 20 14.0 */
+ compatible = "ti,sysc-omap4-timer", "ti,sysc";
+ ti,hwmods = "timer6";
+ reg = <0x3a000 0x4>,
+ <0x3a010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <(SYSC_OMAP4_FREEEMU |
+ SYSC_OMAP4_SOFTRESET)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ /* Domains (V, P, C): iva, abe_pwrdm, abe_clkdm */
+ clocks = <&abe_clkctrl OMAP4_TIMER6_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x3a000 0x1000>,
+ <0x4903a000 0x4903a000 0x1000>;
+
+ timer6: timer@0 {
+ compatible = "ti,omap4430-timer";
+ reg = <0x00000000 0x80>,
+ <0x4903a000 0x80>;
+ clocks = <&abe_clkctrl OMAP4_TIMER6_CLKCTRL 24>;
+ clock-names = "fck";
+ interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+ ti,timer-dsp;
+ };
+ };
+
+ target-module@3c000 { /* 0x4013c000, ap 22 16.0 */
+ compatible = "ti,sysc-omap4-timer", "ti,sysc";
+ ti,hwmods = "timer7";
+ reg = <0x3c000 0x4>,
+ <0x3c010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <(SYSC_OMAP4_FREEEMU |
+ SYSC_OMAP4_SOFTRESET)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ /* Domains (V, P, C): iva, abe_pwrdm, abe_clkdm */
+ clocks = <&abe_clkctrl OMAP4_TIMER7_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x3c000 0x1000>,
+ <0x4903c000 0x4903c000 0x1000>;
+
+ timer7: timer@0 {
+ compatible = "ti,omap4430-timer";
+ reg = <0x00000000 0x80>,
+ <0x4903c000 0x80>;
+ clocks = <&abe_clkctrl OMAP4_TIMER7_CLKCTRL 24>;
+ clock-names = "fck";
+ interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
+ ti,timer-dsp;
+ };
+ };
+
+ target-module@3e000 { /* 0x4013e000, ap 24 18.0 */
+ compatible = "ti,sysc-omap4-timer", "ti,sysc";
+ ti,hwmods = "timer8";
+ reg = <0x3e000 0x4>,
+ <0x3e010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <(SYSC_OMAP4_FREEEMU |
+ SYSC_OMAP4_SOFTRESET)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ /* Domains (V, P, C): iva, abe_pwrdm, abe_clkdm */
+ clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x3e000 0x1000>,
+ <0x4903e000 0x4903e000 0x1000>;
+
+ timer8: timer@0 {
+ compatible = "ti,omap4430-timer";
+ reg = <0x00000000 0x80>,
+ <0x4903e000 0x80>;
+ clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>;
+ clock-names = "fck";
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ ti,timer-pwm;
+ ti,timer-dsp;
+ };
+ };
+
+ target-module@80000 { /* 0x40180000, ap 26 1a.0 */
+ compatible = "ti,sysc";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x80000 0x10000>,
+ <0x49080000 0x49080000 0x10000>;
+ };
+
+ target-module@a0000 { /* 0x401a0000, ap 28 1c.0 */
+ compatible = "ti,sysc";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0xa0000 0x10000>,
+ <0x490a0000 0x490a0000 0x10000>;
+ };
+
+ target-module@c0000 { /* 0x401c0000, ap 30 1e.0 */
+ compatible = "ti,sysc";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0xc0000 0x10000>,
+ <0x490c0000 0x490c0000 0x10000>;
+ };
+
+ target-module@f1000 { /* 0x401f1000, ap 32 20.0 */
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ ti,hwmods = "aess";
+ reg = <0xf1000 0x4>,
+ <0xf1010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-midle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ /* Domains (V, P, C): iva, abe_pwrdm, abe_clkdm */
+ clocks = <&abe_clkctrl OMAP4_AESS_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0xf1000 0x1000>,
+ <0x490f1000 0x490f1000 0x1000>;
+
+ /*
+ * No child device binding or driver in mainline.
+ * See Android tree and related upstreaming efforts
+ * for the old driver.
+ */
+ };
+ };
+};
+
diff --git a/arch/arm/boot/dts/omap4-mcpdm.dtsi b/arch/arm/boot/dts/omap4-mcpdm.dtsi
new file mode 100644
index 000000000000..915a9b31a33b
--- /dev/null
+++ b/arch/arm/boot/dts/omap4-mcpdm.dtsi
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common omap4 mcpdm configuration
+ *
+ * Only include this file if your board has pdmclk wired from the
+ * pmic to ABE as mcpdm uses an external clock for the module.
+ */
+
+&omap4_pmx_core {
+ mcpdm_pins: pinmux_mcpdm_pins {
+ pinctrl-single,pins = <
+ /* 0x4a100106 abe_pdm_ul_data.abe_pdm_ul_data ag25 */
+ OMAP4_IOPAD(0x106, PIN_INPUT_PULLDOWN | MUX_MODE0)
+
+ /* 0x4a100108 abe_pdm_dl_data.abe_pdm_dl_data af25 */
+ OMAP4_IOPAD(0x108, PIN_INPUT_PULLDOWN | MUX_MODE0)
+
+ /* 0x4a10010a abe_pdm_frame.abe_pdm_frame ae25 */
+ OMAP4_IOPAD(0x10a, PIN_INPUT_PULLUP | MUX_MODE0)
+
+ /* 0x4a10010c abe_pdm_lb_clk.abe_pdm_lb_clk af26 */
+ OMAP4_IOPAD(0x10c, PIN_INPUT_PULLDOWN | MUX_MODE0)
+
+ /* 0x4a10010e abe_clks.abe_clks ah26 */
+ OMAP4_IOPAD(0x10e, PIN_INPUT_PULLDOWN | MUX_MODE0)
+ >;
+ };
+};
+
+&mcpdm_module {
+ /*
+ * McPDM pads must be muxed at the interconnect target module
+ * level as the module on the SoC needs external clock from
+ * the PMIC
+ */
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcpdm_pins>;
+ status = "okay";
+};
+
+&mcpdm {
+ clocks = <&twl6040>;
+ clock-names = "pdmclk";
+};
diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi
index 926f018823a4..68e1894df713 100644
--- a/arch/arm/boot/dts/omap4-panda-common.dtsi
+++ b/arch/arm/boot/dts/omap4-panda-common.dtsi
@@ -7,6 +7,7 @@
*/
#include <dt-bindings/input/input.h>
#include "elpida_ecb240abacn.dtsi"
+#include "omap4-mcpdm.dtsi"
/ {
memory@80000000 {
@@ -226,16 +227,6 @@
>;
};
- mcpdm_pins: pinmux_mcpdm_pins {
- pinctrl-single,pins = <
- OMAP4_IOPAD(0x106, PIN_INPUT_PULLDOWN | MUX_MODE0) /* abe_pdm_ul_data.abe_pdm_ul_data */
- OMAP4_IOPAD(0x108, PIN_INPUT_PULLDOWN | MUX_MODE0) /* abe_pdm_dl_data.abe_pdm_dl_data */
- OMAP4_IOPAD(0x10a, PIN_INPUT_PULLUP | MUX_MODE0) /* abe_pdm_frame.abe_pdm_frame */
- OMAP4_IOPAD(0x10c, PIN_INPUT_PULLDOWN | MUX_MODE0) /* abe_pdm_lb_clk.abe_pdm_lb_clk */
- OMAP4_IOPAD(0x10e, PIN_INPUT_PULLDOWN | MUX_MODE0) /* abe_clks.abe_clks */
- >;
- };
-
mcbsp1_pins: pinmux_mcbsp1_pins {
pinctrl-single,pins = <
OMAP4_IOPAD(0x0fe, PIN_INPUT | MUX_MODE0) /* abe_mcbsp1_clkx.abe_mcbsp1_clkx */
@@ -509,16 +500,6 @@
status = "okay";
};
-&mcpdm {
- pinctrl-names = "default";
- pinctrl-0 = <&mcpdm_pins>;
-
- clocks = <&twl6040>;
- clock-names = "pdmclk";
-
- status = "okay";
-};
-
&twl_usb_comparator {
usb-supply = <&vusb>;
};
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index c88817bdcc56..fb51a4bffd35 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -9,6 +9,7 @@
#include "omap443x.dtsi"
#include "elpida_ecb240abacn.dtsi"
+#include "omap4-mcpdm.dtsi"
/ {
model = "TI OMAP4 SDP board";
@@ -246,16 +247,6 @@
>;
};
- mcpdm_pins: pinmux_mcpdm_pins {
- pinctrl-single,pins = <
- OMAP4_IOPAD(0x106, PIN_INPUT_PULLDOWN | MUX_MODE0) /* abe_pdm_ul_data.abe_pdm_ul_data */
- OMAP4_IOPAD(0x108, PIN_INPUT_PULLDOWN | MUX_MODE0) /* abe_pdm_dl_data.abe_pdm_dl_data */
- OMAP4_IOPAD(0x10a, PIN_INPUT_PULLUP | MUX_MODE0) /* abe_pdm_frame.abe_pdm_frame */
- OMAP4_IOPAD(0x10c, PIN_INPUT_PULLDOWN | MUX_MODE0) /* abe_pdm_lb_clk.abe_pdm_lb_clk */
- OMAP4_IOPAD(0x10e, PIN_INPUT_PULLDOWN | MUX_MODE0) /* abe_clks.abe_clks */
- >;
- };
-
dmic_pins: pinmux_dmic_pins {
pinctrl-single,pins = <
OMAP4_IOPAD(0x110, PIN_OUTPUT | MUX_MODE0) /* abe_dmic_clk1.abe_dmic_clk1 */
@@ -649,16 +640,6 @@
status = "okay";
};
-&mcpdm {
- pinctrl-names = "default";
- pinctrl-0 = <&mcpdm_pins>;
-
- clocks = <&twl6040>;
- clock-names = "pdmclk";
-
- status = "okay";
-};
-
&twl_usb_comparator {
usb-supply = <&vusb>;
};
diff --git a/arch/arm/boot/dts/omap4-var-som-om44.dtsi b/arch/arm/boot/dts/omap4-var-som-om44.dtsi
index 10fce28ceb5b..9562d372077c 100644
--- a/arch/arm/boot/dts/omap4-var-som-om44.dtsi
+++ b/arch/arm/boot/dts/omap4-var-som-om44.dtsi
@@ -7,6 +7,7 @@
* published by the Free Software Foundation.
*/
#include "omap4460.dtsi"
+#include "omap4-mcpdm.dtsi"
/ {
model = "Variscite VAR-SOM-OM44";
@@ -74,16 +75,6 @@
>;
};
- mcpdm_pins: pinmux_mcpdm_pins {
- pinctrl-single,pins = <
- OMAP4_IOPAD(0x106, PIN_INPUT_PULLDOWN | MUX_MODE0) /* abe_pdm_ul_data.abe_pdm_ul_data */
- OMAP4_IOPAD(0x108, PIN_INPUT_PULLDOWN | MUX_MODE0) /* abe_pdm_dl_data.abe_pdm_dl_data */
- OMAP4_IOPAD(0x10a, PIN_INPUT_PULLUP | MUX_MODE0) /* abe_pdm_frame.abe_pdm_frame */
- OMAP4_IOPAD(0x10c, PIN_INPUT_PULLDOWN | MUX_MODE0) /* abe_pdm_lb_clk.abe_pdm_lb_clk */
- OMAP4_IOPAD(0x10e, PIN_INPUT_PULLDOWN | MUX_MODE0) /* abe_clks.abe_clks */
- >;
- };
-
tsc2004_pins: pinmux_tsc2004_pins {
pinctrl-single,pins = <
OMAP4_IOPAD(0x090, PIN_INPUT | MUX_MODE3) /* gpmc_ncs4.gpio_101 (irq) */
@@ -251,16 +242,6 @@
status = "disabled";
};
-&mcpdm {
- pinctrl-names = "default";
- pinctrl-0 = <&mcpdm_pins>;
-
- clocks = <&twl6040>;
- clock-names = "pdmclk";
-
- status = "okay";
-};
-
&gpmc {
status = "disabled";
};
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 1a96d4317c97..442a737f35fe 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -148,6 +148,9 @@
l4_per: interconnect@48000000 {
};
+ l4_abe: interconnect@40100000 {
+ };
+
ocmcram: ocmcram@40304000 {
compatible = "mmio-sram";
reg = <0x40304000 0xa000>; /* 40k */
@@ -214,130 +217,6 @@
#iommu-cells = <0>;
ti,iommu-bus-err-back;
};
- target-module@40130000 {
- compatible = "ti,sysc-omap2", "ti,sysc";
- ti,hwmods = "wd_timer3";
- reg = <0x40130000 0x4>,
- <0x40130010 0x4>,
- <0x40130014 0x4>;
- reg-names = "rev", "sysc", "syss";
- ti,sysc-mask = <(SYSC_OMAP2_EMUFREE |
- SYSC_OMAP2_SOFTRESET)>;
- ti,sysc-sidle = <SYSC_IDLE_FORCE>,
- <SYSC_IDLE_NO>,
- <SYSC_IDLE_SMART>,
- <SYSC_IDLE_SMART_WKUP>;
- ti,syss-mask = <1>;
- /* Domains (V, P, C): abe, abe_pwrdm, abe_clkdm */
- clocks = <&abe_clkctrl OMAP4_WD_TIMER3_CLKCTRL 0>;
- clock-names = "fck";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x00000000 0x40130000 0x1000>, /* MPU private access */
- <0x49030000 0x49030000 0x0080>; /* L3 Interconnect */
-
- wdt3: wdt@0 {
- compatible = "ti,omap4-wdt", "ti,omap3-wdt";
- reg = <0x0 0x80>;
- interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
- };
- };
-
- mcpdm: mcpdm@40132000 {
- compatible = "ti,omap4-mcpdm";
- reg = <0x40132000 0x7f>, /* MPU private access */
- <0x49032000 0x7f>; /* L3 Interconnect */
- reg-names = "mpu", "dma";
- interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
- ti,hwmods = "mcpdm";
- dmas = <&sdma 65>,
- <&sdma 66>;
- dma-names = "up_link", "dn_link";
- status = "disabled";
- };
-
- dmic: dmic@4012e000 {
- compatible = "ti,omap4-dmic";
- reg = <0x4012e000 0x7f>, /* MPU private access */
- <0x4902e000 0x7f>; /* L3 Interconnect */
- reg-names = "mpu", "dma";
- interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
- ti,hwmods = "dmic";
- dmas = <&sdma 67>;
- dma-names = "up_link";
- status = "disabled";
- };
-
- mcbsp1: mcbsp@40122000 {
- compatible = "ti,omap4-mcbsp";
- reg = <0x40122000 0xff>, /* MPU private access */
- <0x49022000 0xff>; /* L3 Interconnect */
- reg-names = "mpu", "dma";
- interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "common";
- ti,buffer-size = <128>;
- ti,hwmods = "mcbsp1";
- dmas = <&sdma 33>,
- <&sdma 34>;
- dma-names = "tx", "rx";
- status = "disabled";
- };
-
- mcbsp2: mcbsp@40124000 {
- compatible = "ti,omap4-mcbsp";
- reg = <0x40124000 0xff>, /* MPU private access */
- <0x49024000 0xff>; /* L3 Interconnect */
- reg-names = "mpu", "dma";
- interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "common";
- ti,buffer-size = <128>;
- ti,hwmods = "mcbsp2";
- dmas = <&sdma 17>,
- <&sdma 18>;
- dma-names = "tx", "rx";
- status = "disabled";
- };
-
- mcbsp3: mcbsp@40126000 {
- compatible = "ti,omap4-mcbsp";
- reg = <0x40126000 0xff>, /* MPU private access */
- <0x49026000 0xff>; /* L3 Interconnect */
- reg-names = "mpu", "dma";
- interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "common";
- ti,buffer-size = <128>;
- ti,hwmods = "mcbsp3";
- dmas = <&sdma 19>,
- <&sdma 20>;
- dma-names = "tx", "rx";
- status = "disabled";
- };
-
- target-module@40128000 {
- compatible = "ti,sysc-mcasp", "ti,sysc";
- ti,hwmods = "mcasp";
- reg = <0x40128000 0x4>,
- <0x40128004 0x4>;
- reg-names = "rev", "sysc";
- ti,sysc-sidle = <SYSC_IDLE_FORCE>,
- <SYSC_IDLE_NO>,
- <SYSC_IDLE_SMART>,
- <SYSC_IDLE_SMART_WKUP>;
- clocks = <&abe_clkctrl OMAP4_MCASP_CLKCTRL 0>;
- clock-names = "fck";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x00000000 0x40128000 0x1000>, /* MPU */
- <0x49028000 0x49028000 0x1000>; /* L3 */
-
- /*
- * Child device unsupported by davinci-mcasp. At least
- * RX path is disabled for omap4, and only DIT mode
- * works with no I2S. See also old Android kernel
- * omap-mcasp driver for more information.
- */
- };
-
target-module@4012c000 {
compatible = "ti,sysc-omap4", "ti,sysc";
ti,hwmods = "slimbus1";
@@ -359,33 +238,6 @@
/* No child device binding or driver in mainline */
};
- target-module@401f1000 {
- compatible = "ti,sysc-omap4", "ti,sysc";
- ti,hwmods = "aess";
- reg = <0x401f1000 0x4>,
- <0x401f1010 0x4>;
- reg-names = "rev", "sysc";
- ti,sysc-midle = <SYSC_IDLE_FORCE>,
- <SYSC_IDLE_NO>,
- <SYSC_IDLE_SMART>,
- <SYSC_IDLE_SMART_WKUP>;
- ti,sysc-sidle = <SYSC_IDLE_FORCE>,
- <SYSC_IDLE_NO>,
- <SYSC_IDLE_SMART>;
- clocks = <&abe_clkctrl OMAP4_AESS_CLKCTRL 0>;
- clock-names = "fck";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x00000000 0x401f1000 0x1000>, /* MPU */
- <0x490f1000 0x490f1000 0x1000>; /* L3 */
-
- /*
- * No child device binding or driver in mainline.
- * See Android tree and related upstreaming efforts
- * for the old driver.
- */
- };
-
dmm@4e000000 {
compatible = "ti,omap4-dmm";
reg = <0x4e000000 0x800>;
@@ -417,43 +269,6 @@
hw-caps-temp-alert;
};
- timer5: timer@40138000 {
- compatible = "ti,omap4430-timer";
- reg = <0x40138000 0x80>,
- <0x49038000 0x80>;
- interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
- ti,hwmods = "timer5";
- ti,timer-dsp;
- };
-
- timer6: timer@4013a000 {
- compatible = "ti,omap4430-timer";
- reg = <0x4013a000 0x80>,
- <0x4903a000 0x80>;
- interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
- ti,hwmods = "timer6";
- ti,timer-dsp;
- };
-
- timer7: timer@4013c000 {
- compatible = "ti,omap4430-timer";
- reg = <0x4013c000 0x80>,
- <0x4903c000 0x80>;
- interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
- ti,hwmods = "timer7";
- ti,timer-dsp;
- };
-
- timer8: timer@4013e000 {
- compatible = "ti,omap4430-timer";
- reg = <0x4013e000 0x80>,
- <0x4903e000 0x80>;
- interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
- ti,hwmods = "timer8";
- ti,timer-pwm;
- ti,timer-dsp;
- };
-
aes1: aes@4b501000 {
compatible = "ti,omap4-aes";
ti,hwmods = "aes1";
@@ -629,4 +444,5 @@
};
#include "omap4-l4.dtsi"
+#include "omap4-l4-abe.dtsi"
#include "omap44xx-clocks.dtsi"
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
index 61a06f6add3c..2dc3e1950c96 100644
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
@@ -667,14 +667,16 @@
};
};
-&mcpdm {
+&mcpdm_module {
+ /* Module on the SoC needs external clock from the PMIC */
pinctrl-names = "default";
pinctrl-0 = <&mcpdm_pins>;
+ status = "okay";
+};
+&mcpdm {
clocks = <&twl6040>;
clock-names = "pdmclk";
-
- status = "okay";
};
&mcbsp1 {
diff --git a/arch/arm/boot/dts/omap5-l4-abe.dtsi b/arch/arm/boot/dts/omap5-l4-abe.dtsi
new file mode 100644
index 000000000000..dc9d0532f4cf
--- /dev/null
+++ b/arch/arm/boot/dts/omap5-l4-abe.dtsi
@@ -0,0 +1,447 @@
+&l4_abe { /* 0x40100000 */
+ compatible = "ti,omap5-l4-abe", "simple-bus";
+ reg = <0x40100000 0x400>,
+ <0x40100400 0x400>;
+ reg-names = "la", "ap";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x00000000 0x40100000 0x100000>, /* segment 0 */
+ <0x49000000 0x49000000 0x100000>;
+ segment@0 { /* 0x40100000 */
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges =
+ /* CPU to L4 ABE mapping */
+ <0x00000000 0x00000000 0x000400>, /* ap 0 */
+ <0x00000400 0x00000400 0x000400>, /* ap 1 */
+ <0x00022000 0x00022000 0x001000>, /* ap 2 */
+ <0x00023000 0x00023000 0x001000>, /* ap 3 */
+ <0x00024000 0x00024000 0x001000>, /* ap 4 */
+ <0x00025000 0x00025000 0x001000>, /* ap 5 */
+ <0x00026000 0x00026000 0x001000>, /* ap 6 */
+ <0x00027000 0x00027000 0x001000>, /* ap 7 */
+ <0x00028000 0x00028000 0x001000>, /* ap 8 */
+ <0x00029000 0x00029000 0x001000>, /* ap 9 */
+ <0x0002a000 0x0002a000 0x001000>, /* ap 10 */
+ <0x0002b000 0x0002b000 0x001000>, /* ap 11 */
+ <0x0002e000 0x0002e000 0x001000>, /* ap 12 */
+ <0x0002f000 0x0002f000 0x001000>, /* ap 13 */
+ <0x00030000 0x00030000 0x001000>, /* ap 14 */
+ <0x00031000 0x00031000 0x001000>, /* ap 15 */
+ <0x00032000 0x00032000 0x001000>, /* ap 16 */
+ <0x00033000 0x00033000 0x001000>, /* ap 17 */
+ <0x00038000 0x00038000 0x001000>, /* ap 18 */
+ <0x00039000 0x00039000 0x001000>, /* ap 19 */
+ <0x0003a000 0x0003a000 0x001000>, /* ap 20 */
+ <0x0003b000 0x0003b000 0x001000>, /* ap 21 */
+ <0x0003c000 0x0003c000 0x001000>, /* ap 22 */
+ <0x0003d000 0x0003d000 0x001000>, /* ap 23 */
+ <0x0003e000 0x0003e000 0x001000>, /* ap 24 */
+ <0x0003f000 0x0003f000 0x001000>, /* ap 25 */
+ <0x00080000 0x00080000 0x010000>, /* ap 26 */
+ <0x00080000 0x00080000 0x001000>, /* ap 27 */
+ <0x000a0000 0x000a0000 0x010000>, /* ap 28 */
+ <0x000a0000 0x000a0000 0x001000>, /* ap 29 */
+ <0x000c0000 0x000c0000 0x010000>, /* ap 30 */
+ <0x000c0000 0x000c0000 0x001000>, /* ap 31 */
+ <0x000f1000 0x000f1000 0x001000>, /* ap 32 */
+ <0x000f2000 0x000f2000 0x001000>, /* ap 33 */
+
+ /* L3 to L4 ABE mapping */
+ <0x49000000 0x49000000 0x000400>, /* ap 0 */
+ <0x49000400 0x49000400 0x000400>, /* ap 1 */
+ <0x49022000 0x49022000 0x001000>, /* ap 2 */
+ <0x49023000 0x49023000 0x001000>, /* ap 3 */
+ <0x49024000 0x49024000 0x001000>, /* ap 4 */
+ <0x49025000 0x49025000 0x001000>, /* ap 5 */
+ <0x49026000 0x49026000 0x001000>, /* ap 6 */
+ <0x49027000 0x49027000 0x001000>, /* ap 7 */
+ <0x49028000 0x49028000 0x001000>, /* ap 8 */
+ <0x49029000 0x49029000 0x001000>, /* ap 9 */
+ <0x4902a000 0x4902a000 0x001000>, /* ap 10 */
+ <0x4902b000 0x4902b000 0x001000>, /* ap 11 */
+ <0x4902e000 0x4902e000 0x001000>, /* ap 12 */
+ <0x4902f000 0x4902f000 0x001000>, /* ap 13 */
+ <0x49030000 0x49030000 0x001000>, /* ap 14 */
+ <0x49031000 0x49031000 0x001000>, /* ap 15 */
+ <0x49032000 0x49032000 0x001000>, /* ap 16 */
+ <0x49033000 0x49033000 0x001000>, /* ap 17 */
+ <0x49038000 0x49038000 0x001000>, /* ap 18 */
+ <0x49039000 0x49039000 0x001000>, /* ap 19 */
+ <0x4903a000 0x4903a000 0x001000>, /* ap 20 */
+ <0x4903b000 0x4903b000 0x001000>, /* ap 21 */
+ <0x4903c000 0x4903c000 0x001000>, /* ap 22 */
+ <0x4903d000 0x4903d000 0x001000>, /* ap 23 */
+ <0x4903e000 0x4903e000 0x001000>, /* ap 24 */
+ <0x4903f000 0x4903f000 0x001000>, /* ap 25 */
+ <0x49080000 0x49080000 0x010000>, /* ap 26 */
+ <0x49080000 0x49080000 0x001000>, /* ap 27 */
+ <0x490a0000 0x490a0000 0x010000>, /* ap 28 */
+ <0x490a0000 0x490a0000 0x001000>, /* ap 29 */
+ <0x490c0000 0x490c0000 0x010000>, /* ap 30 */
+ <0x490c0000 0x490c0000 0x001000>, /* ap 31 */
+ <0x490f1000 0x490f1000 0x001000>, /* ap 32 */
+ <0x490f2000 0x490f2000 0x001000>; /* ap 33 */
+
+ target-module@22000 { /* 0x40122000, ap 2 02.0 */
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ ti,hwmods = "mcbsp1";
+ reg = <0x2208c 0x4>;
+ reg-names = "sysc";
+ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+ SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ /* Domains (V, P, C): core, abe_pwrdm, abe_clkdm */
+ clocks = <&abe_clkctrl OMAP5_MCBSP1_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x22000 0x1000>,
+ <0x49022000 0x49022000 0x1000>;
+
+ mcbsp1: mcbsp@0 {
+ compatible = "ti,omap4-mcbsp";
+ reg = <0x0 0xff>, /* MPU private access */
+ <0x49022000 0xff>; /* L3 Interconnect */
+ reg-names = "mpu", "dma";
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "common";
+ ti,buffer-size = <128>;
+ dmas = <&sdma 33>,
+ <&sdma 34>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+ };
+
+ target-module@24000 { /* 0x40124000, ap 4 04.0 */
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ ti,hwmods = "mcbsp2";
+ reg = <0x2408c 0x4>;
+ reg-names = "sysc";
+ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+ SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ /* Domains (V, P, C): core, abe_pwrdm, abe_clkdm */
+ clocks = <&abe_clkctrl OMAP5_MCBSP2_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x24000 0x1000>,
+ <0x49024000 0x49024000 0x1000>;
+
+ mcbsp2: mcbsp@0 {
+ compatible = "ti,omap4-mcbsp";
+ reg = <0x0 0xff>, /* MPU private access */
+ <0x49024000 0xff>; /* L3 Interconnect */
+ reg-names = "mpu", "dma";
+ interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "common";
+ ti,buffer-size = <128>;
+ dmas = <&sdma 17>,
+ <&sdma 18>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+ };
+
+ target-module@26000 { /* 0x40126000, ap 6 06.0 */
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ ti,hwmods = "mcbsp3";
+ reg = <0x2608c 0x4>;
+ reg-names = "sysc";
+ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
+ SYSC_OMAP2_ENAWAKEUP |
+ SYSC_OMAP2_SOFTRESET)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ /* Domains (V, P, C): core, abe_pwrdm, abe_clkdm */
+ clocks = <&abe_clkctrl OMAP5_MCBSP3_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x26000 0x1000>,
+ <0x49026000 0x49026000 0x1000>;
+
+ mcbsp3: mcbsp@0 {
+ compatible = "ti,omap4-mcbsp";
+ reg = <0x0 0xff>, /* MPU private access */
+ <0x49026000 0xff>; /* L3 Interconnect */
+ reg-names = "mpu", "dma";
+ interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "common";
+ ti,buffer-size = <128>;
+ dmas = <&sdma 19>,
+ <&sdma 20>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+ };
+
+ target-module@28000 { /* 0x40128000, ap 8 08.0 */
+ compatible = "ti,sysc";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x28000 0x1000>,
+ <0x49028000 0x49028000 0x1000>;
+ };
+
+ target-module@2a000 { /* 0x4012a000, ap 10 0a.0 */
+ compatible = "ti,sysc";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x2a000 0x1000>,
+ <0x4902a000 0x4902a000 0x1000>;
+ };
+
+ target-module@2e000 { /* 0x4012e000, ap 12 0c.0 */
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ ti,hwmods = "dmic";
+ reg = <0x2e000 0x4>,
+ <0x2e010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <(SYSC_OMAP4_FREEEMU |
+ SYSC_OMAP4_SOFTRESET)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ /* Domains (V, P, C): core, abe_pwrdm, abe_clkdm */
+ clocks = <&abe_clkctrl OMAP5_DMIC_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x2e000 0x1000>,
+ <0x4902e000 0x4902e000 0x1000>;
+
+ dmic: dmic@0 {
+ compatible = "ti,omap4-dmic";
+ reg = <0x0 0x7f>, /* MPU private access */
+ <0x4902e000 0x7f>; /* L3 Interconnect */
+ reg-names = "mpu", "dma";
+ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&sdma 67>;
+ dma-names = "up_link";
+ status = "disabled";
+ };
+ };
+
+ target-module@30000 { /* 0x40130000, ap 14 0e.0 */
+ compatible = "ti,sysc";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x30000 0x1000>,
+ <0x49030000 0x49030000 0x1000>;
+ };
+
+ mcpdm_module: target-module@32000 { /* 0x40132000, ap 16 10.0 */
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ ti,hwmods = "mcpdm";
+ reg = <0x32000 0x4>,
+ <0x32010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <(SYSC_OMAP4_FREEEMU |
+ SYSC_OMAP4_SOFTRESET)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ /* Domains (V, P, C): core, abe_pwrdm, abe_clkdm */
+ clocks = <&abe_clkctrl OMAP5_MCPDM_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x32000 0x1000>,
+ <0x49032000 0x49032000 0x1000>;
+
+ /* Must be only enabled for boards with pdmclk wired */
+ status = "disabled";
+
+ mcpdm: mcpdm@0 {
+ compatible = "ti,omap4-mcpdm";
+ reg = <0x0 0x7f>, /* MPU private access */
+ <0x49032000 0x7f>; /* L3 Interconnect */
+ reg-names = "mpu", "dma";
+ interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&sdma 65>,
+ <&sdma 66>;
+ dma-names = "up_link", "dn_link";
+ };
+ };
+
+ target-module@38000 { /* 0x40138000, ap 18 12.0 */
+ compatible = "ti,sysc-omap4-timer", "ti,sysc";
+ ti,hwmods = "timer5";
+ reg = <0x38000 0x4>,
+ <0x38010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <(SYSC_OMAP4_FREEEMU |
+ SYSC_OMAP4_SOFTRESET)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ /* Domains (V, P, C): core, abe_pwrdm, abe_clkdm */
+ clocks = <&abe_clkctrl OMAP5_TIMER5_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x38000 0x1000>,
+ <0x49038000 0x49038000 0x1000>;
+
+ timer5: timer@0 {
+ compatible = "ti,omap5430-timer";
+ reg = <0x0 0x80>,
+ <0x49038000 0x80>;
+ clocks = <&abe_clkctrl OMAP5_TIMER5_CLKCTRL 24>;
+ clock-names = "fck";
+ interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+ ti,timer-dsp;
+ ti,timer-pwm;
+ };
+ };
+
+ target-module@3a000 { /* 0x4013a000, ap 20 14.0 */
+ compatible = "ti,sysc-omap4-timer", "ti,sysc";
+ ti,hwmods = "timer6";
+ reg = <0x3a000 0x4>,
+ <0x3a010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <(SYSC_OMAP4_FREEEMU |
+ SYSC_OMAP4_SOFTRESET)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ /* Domains (V, P, C): core, abe_pwrdm, abe_clkdm */
+ clocks = <&abe_clkctrl OMAP5_TIMER6_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x3a000 0x1000>,
+ <0x4903a000 0x4903a000 0x1000>;
+
+ timer6: timer@0 {
+ compatible = "ti,omap5430-timer";
+ reg = <0x0 0x80>,
+ <0x4903a000 0x80>;
+ clocks = <&abe_clkctrl OMAP5_TIMER6_CLKCTRL 24>;
+ clock-names = "fck";
+ interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+ ti,timer-dsp;
+ ti,timer-pwm;
+ };
+ };
+
+ target-module@3c000 { /* 0x4013c000, ap 22 16.0 */
+ compatible = "ti,sysc-omap4-timer", "ti,sysc";
+ ti,hwmods = "timer7";
+ reg = <0x3c000 0x4>,
+ <0x3c010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <(SYSC_OMAP4_FREEEMU |
+ SYSC_OMAP4_SOFTRESET)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ /* Domains (V, P, C): core, abe_pwrdm, abe_clkdm */
+ clocks = <&abe_clkctrl OMAP5_TIMER7_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x3c000 0x1000>,
+ <0x4903c000 0x4903c000 0x1000>;
+
+ timer7: timer@0 {
+ compatible = "ti,omap5430-timer";
+ reg = <0x0 0x80>,
+ <0x4903c000 0x80>;
+ clocks = <&abe_clkctrl OMAP5_TIMER7_CLKCTRL 24>;
+ clock-names = "fck";
+ interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
+ ti,timer-dsp;
+ };
+ };
+
+ target-module@3e000 { /* 0x4013e000, ap 24 18.0 */
+ compatible = "ti,sysc-omap4-timer", "ti,sysc";
+ ti,hwmods = "timer8";
+ reg = <0x3e000 0x4>,
+ <0x3e010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-mask = <(SYSC_OMAP4_FREEEMU |
+ SYSC_OMAP4_SOFTRESET)>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ /* Domains (V, P, C): core, abe_pwrdm, abe_clkdm */
+ clocks = <&abe_clkctrl OMAP5_TIMER8_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x3e000 0x1000>,
+ <0x4903e000 0x4903e000 0x1000>;
+
+ timer8: timer@0 {
+ compatible = "ti,omap5430-timer";
+ reg = <0x0 0x80>,
+ <0x4903e000 0x80>;
+ clocks = <&abe_clkctrl OMAP5_TIMER8_CLKCTRL 24>;
+ clock-names = "fck";
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ ti,timer-dsp;
+ ti,timer-pwm;
+ };
+ };
+
+ target-module@80000 { /* 0x40180000, ap 26 1a.0 */
+ compatible = "ti,sysc";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x80000 0x10000>,
+ <0x49080000 0x49080000 0x10000>;
+ };
+
+ target-module@a0000 { /* 0x401a0000, ap 28 1c.0 */
+ compatible = "ti,sysc";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0xa0000 0x10000>,
+ <0x490a0000 0x490a0000 0x10000>;
+ };
+
+ target-module@c0000 { /* 0x401c0000, ap 30 1e.0 */
+ compatible = "ti,sysc";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0xc0000 0x10000>,
+ <0x490c0000 0x490c0000 0x10000>;
+ };
+
+ target-module@f1000 { /* 0x401f1000, ap 32 20.0 */
+ compatible = "ti,sysc";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0xf1000 0x1000>,
+ <0x490f1000 0x490f1000 0x1000>;
+ };
+ };
+};
+
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 2fefaafdf901..4b40e4748649 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -161,6 +161,9 @@
l4_per: interconnect@48000000 {
};
+ l4_abe: interconnect@40100000 {
+ };
+
ocmcram: ocmcram@40300000 {
compatible = "mmio-sram";
reg = <0x40300000 0x20000>; /* 128k */
@@ -202,115 +205,6 @@
ti,iommu-bus-err-back;
};
- mcpdm: mcpdm@40132000 {
- compatible = "ti,omap4-mcpdm";
- reg = <0x40132000 0x7f>, /* MPU private access */
- <0x49032000 0x7f>; /* L3 Interconnect */
- reg-names = "mpu", "dma";
- interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
- ti,hwmods = "mcpdm";
- dmas = <&sdma 65>,
- <&sdma 66>;
- dma-names = "up_link", "dn_link";
- status = "disabled";
- };
-
- dmic: dmic@4012e000 {
- compatible = "ti,omap4-dmic";
- reg = <0x4012e000 0x7f>, /* MPU private access */
- <0x4902e000 0x7f>; /* L3 Interconnect */
- reg-names = "mpu", "dma";
- interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
- ti,hwmods = "dmic";
- dmas = <&sdma 67>;
- dma-names = "up_link";
- status = "disabled";
- };
-
- mcbsp1: mcbsp@40122000 {
- compatible = "ti,omap4-mcbsp";
- reg = <0x40122000 0xff>, /* MPU private access */
- <0x49022000 0xff>; /* L3 Interconnect */
- reg-names = "mpu", "dma";
- interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "common";
- ti,buffer-size = <128>;
- ti,hwmods = "mcbsp1";
- dmas = <&sdma 33>,
- <&sdma 34>;
- dma-names = "tx", "rx";
- status = "disabled";
- };
-
- mcbsp2: mcbsp@40124000 {
- compatible = "ti,omap4-mcbsp";
- reg = <0x40124000 0xff>, /* MPU private access */
- <0x49024000 0xff>; /* L3 Interconnect */
- reg-names = "mpu", "dma";
- interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "common";
- ti,buffer-size = <128>;
- ti,hwmods = "mcbsp2";
- dmas = <&sdma 17>,
- <&sdma 18>;
- dma-names = "tx", "rx";
- status = "disabled";
- };
-
- mcbsp3: mcbsp@40126000 {
- compatible = "ti,omap4-mcbsp";
- reg = <0x40126000 0xff>, /* MPU private access */
- <0x49026000 0xff>; /* L3 Interconnect */
- reg-names = "mpu", "dma";
- interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "common";
- ti,buffer-size = <128>;
- ti,hwmods = "mcbsp3";
- dmas = <&sdma 19>,
- <&sdma 20>;
- dma-names = "tx", "rx";
- status = "disabled";
- };
-
- timer5: timer@40138000 {
- compatible = "ti,omap5430-timer";
- reg = <0x40138000 0x80>,
- <0x49038000 0x80>;
- interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
- ti,hwmods = "timer5";
- ti,timer-dsp;
- ti,timer-pwm;
- };
-
- timer6: timer@4013a000 {
- compatible = "ti,omap5430-timer";
- reg = <0x4013a000 0x80>,
- <0x4903a000 0x80>;
- interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
- ti,hwmods = "timer6";
- ti,timer-dsp;
- ti,timer-pwm;
- };
-
- timer7: timer@4013c000 {
- compatible = "ti,omap5430-timer";
- reg = <0x4013c000 0x80>,
- <0x4903c000 0x80>;
- interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
- ti,hwmods = "timer7";
- ti,timer-dsp;
- };
-
- timer8: timer@4013e000 {
- compatible = "ti,omap5430-timer";
- reg = <0x4013e000 0x80>,
- <0x4903e000 0x80>;
- interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
- ti,hwmods = "timer8";
- ti,timer-dsp;
- ti,timer-pwm;
- };
-
dmm@4e000000 {
compatible = "ti,omap5-dmm";
reg = <0x4e000000 0x800>;
@@ -517,3 +411,6 @@
&core_thermal {
coefficients = <0 2000>;
};
+
+#include "omap5-l4-abe.dtsi"
+#include "omap54xx-clocks.dtsi"
diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
index bd6907db615b..65975df6a8c3 100644
--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
@@ -714,6 +714,7 @@
interrupt-controller;
#interrupt-cells = <2>;
gpio-controller;
+ gpio-ranges = <&pm8921_gpio 0 0 44>;
#gpio-cells = <2>;
};
@@ -1302,8 +1303,9 @@
<0x04700300 0x200>,
<0x04700500 0x5c>;
reg-names = "dsi_pll", "dsi_phy", "dsi_phy_regulator";
- clock-names = "iface_clk";
- clocks = <&mmcc DSI_M_AHB_CLK>;
+ clock-names = "iface_clk", "ref";
+ clocks = <&mmcc DSI_M_AHB_CLK>,
+ <&cxo_board>;
};
diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
index 9e75f97770ce..1008dfbcb972 100644
--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
@@ -400,8 +400,8 @@
#address-cells = <3>;
#size-cells = <2>;
- ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000
- 0x82000000 0 0x40300000 0x40300000 0 0x400000>;
+ ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000>,
+ <0x82000000 0 0x40300000 0x40300000 0 0x00d00000>;
interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
diff --git a/arch/arm/boot/dts/qcom-mdm9615.dtsi b/arch/arm/boot/dts/qcom-mdm9615.dtsi
index 02afc6a42005..356e9535f7a6 100644
--- a/arch/arm/boot/dts/qcom-mdm9615.dtsi
+++ b/arch/arm/boot/dts/qcom-mdm9615.dtsi
@@ -326,6 +326,7 @@
interrupt-controller;
#interrupt-cells = <2>;
gpio-controller;
+ gpio-ranges = <&pmicgpio 0 0 6>;
#gpio-cells = <2>;
};
};
diff --git a/arch/arm/boot/dts/qcom-msm8660.dtsi b/arch/arm/boot/dts/qcom-msm8660.dtsi
index 65a994f0e09b..ec5cbc468bd3 100644
--- a/arch/arm/boot/dts/qcom-msm8660.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8660.dtsi
@@ -295,6 +295,7 @@
interrupt-controller;
#interrupt-cells = <2>;
gpio-controller;
+ gpio-ranges = <&pm8058_gpio 0 0 44>;
#gpio-cells = <2>;
};
diff --git a/arch/arm/boot/dts/qcom-pma8084.dtsi b/arch/arm/boot/dts/qcom-pma8084.dtsi
index 8f5ea7add20f..ea1ca166165c 100644
--- a/arch/arm/boot/dts/qcom-pma8084.dtsi
+++ b/arch/arm/boot/dts/qcom-pma8084.dtsi
@@ -31,6 +31,7 @@
compatible = "qcom,pma8084-gpio", "qcom,spmi-gpio";
reg = <0xc000>;
gpio-controller;
+ gpio-ranges = <&pma8084_gpios 0 0 22>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm/boot/dts/r7s72100-rskrza1.dts b/arch/arm/boot/dts/r7s72100-rskrza1.dts
index 8ee44a100e9a..ff24301dc1be 100644
--- a/arch/arm/boot/dts/r7s72100-rskrza1.dts
+++ b/arch/arm/boot/dts/r7s72100-rskrza1.dts
@@ -34,12 +34,23 @@
};
leds {
- status = "okay";
compatible = "gpio-leds";
led0 {
gpios = <&port7 1 GPIO_ACTIVE_LOW>;
};
+
+ led1 {
+ gpios = <&io_expander1 0 GPIO_ACTIVE_LOW>;
+ };
+
+ led2 {
+ gpios = <&io_expander1 1 GPIO_ACTIVE_LOW>;
+ };
+
+ led3 {
+ gpios = <&io_expander1 2 GPIO_ACTIVE_LOW>;
+ };
};
};
@@ -47,6 +58,34 @@
clock-frequency = <13330000>;
};
+&i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_pins>;
+ status = "okay";
+
+ clock-frequency = <400000>;
+
+ io_expander1: gpio@20 {
+ compatible = "onnn,cat9554";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ io_expander2: gpio@21 {
+ compatible = "onnn,cat9554";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ eeprom@50 {
+ compatible = "renesas,r1ex24016", "atmel,24c16";
+ reg = <0x50>;
+ pagesize = <16>;
+ };
+};
+
&usb_x1_clk {
clock-frequency = <48000000>;
};
@@ -56,6 +95,11 @@
};
&pinctrl {
+ /* RIIC ch3 (Port Expander, EEPROM (MAC Addr), Audio Codec) */
+ i2c3_pins: i2c3 {
+ pinmux = <RZA1_PINMUX(1, 6, 1)>, /* RIIC3SCL */
+ <RZA1_PINMUX(1, 7, 1)>; /* RIIC3SDA */
+ };
/* Serial Console */
scif2_pins: serial2 {
diff --git a/arch/arm/boot/dts/r8a73a4-ape6evm.dts b/arch/arm/boot/dts/r8a73a4-ape6evm.dts
index d530f451467e..f70f4a3e5c43 100644
--- a/arch/arm/boot/dts/r8a73a4-ape6evm.dts
+++ b/arch/arm/boot/dts/r8a73a4-ape6evm.dts
@@ -19,7 +19,7 @@
};
chosen {
- bootargs = "ignore_loglevel root=/dev/nfs ip=dhcp rw";
+ bootargs = "ignore_loglevel rw root=/dev/nfs ip=dhcp";
stdout-path = "serial0:115200n8";
};
@@ -166,6 +166,33 @@
};
&bsc {
+ flash@0 {
+ compatible = "cfi-flash", "mtd-rom";
+ reg = <0x0 0x08000000>;
+ bank-width = <2>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "uboot";
+ reg = <0x00000000 0x00040000>;
+ read-only;
+ };
+ partition@40000 {
+ label = "uboot-env";
+ reg = <0x00040000 0x00040000>;
+ read-only;
+ };
+ partition@80000 {
+ label = "flash";
+ reg = <0x00080000 0x07f80000>;
+ };
+ };
+ };
+
ethernet@8000000 {
compatible = "smsc,lan9220", "smsc,lan9115";
reg = <0x08000000 0x1000>;
diff --git a/arch/arm/boot/dts/r8a77470-iwg23s-sbc.dts b/arch/arm/boot/dts/r8a77470-iwg23s-sbc.dts
index 77d18242ef59..2840eb0d6fd4 100644
--- a/arch/arm/boot/dts/r8a77470-iwg23s-sbc.dts
+++ b/arch/arm/boot/dts/r8a77470-iwg23s-sbc.dts
@@ -22,6 +22,17 @@
stdout-path = "serial1:115200n8";
};
+ hdmi-out {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con: endpoint {
+ remote-endpoint = <&bridge_out>;
+ };
+ };
+ };
+
memory@40000000 {
device_type = "memory";
reg = <0 0x40000000 0 0x20000000>;
@@ -80,10 +91,42 @@
status = "okay";
};
+&du {
+ pinctrl-0 = <&du0_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+
+ ports {
+ port@0 {
+ endpoint {
+ remote-endpoint = <&bridge_in>;
+ };
+ };
+ };
+};
+
+&ehci1 {
+ status = "okay";
+};
+
&extal_clk {
clock-frequency = <20000000>;
};
+&gpio2 {
+ interrupt-fixup {
+ gpio-hog;
+ gpios = <29 GPIO_ACTIVE_HIGH>;
+ line-name = "hdmi-hpd-int";
+ input;
+ };
+};
+
+&hsusb0 {
+ status = "okay";
+};
+
&i2c3 {
pinctrl-0 = <&i2c3_pins>;
pinctrl-names = "default";
@@ -97,12 +140,60 @@
};
};
+&i2c4 {
+ pinctrl-0 = <&i2c4_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+ clock-frequency = <100000>;
+
+ hdmi@39 {
+ compatible = "sil,sii9022";
+ reg = <0x39>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <29 IRQ_TYPE_LEVEL_LOW>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ bridge_in: endpoint {
+ remote-endpoint = <&du_out_rgb0>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ bridge_out: endpoint {
+ remote-endpoint = <&hdmi_con>;
+ };
+ };
+ };
+ };
+};
+
+&ohci1 {
+ status = "okay";
+};
+
&pfc {
avb_pins: avb {
groups = "avb_mdio", "avb_gmii_tx_rx";
function = "avb";
};
+ du0_pins: du0 {
+ groups = "du0_rgb888", "du0_sync", "du0_disp", "du0_clk0_out";
+ function = "du0";
+ };
+
+ i2c4_pins: i2c4 {
+ groups = "i2c4_e";
+ function = "i2c4";
+ };
+
i2c3_pins: i2c3 {
groups = "i2c3_c";
function = "i2c3";
@@ -135,6 +226,16 @@
function = "sdhi2";
power-source = <1800>;
};
+
+ usb0_pins: usb0 {
+ groups = "usb0";
+ function = "usb0";
+ };
+
+ usb1_pins: usb1 {
+ groups = "usb1";
+ function = "usb1";
+ };
};
&qspi0 {
@@ -195,3 +296,25 @@
sd-uhs-sdr50;
status = "okay";
};
+
+&usb2_phy0 {
+ status = "okay";
+};
+
+&usb2_phy1 {
+ status = "okay";
+};
+
+&usbphy0 {
+ pinctrl-0 = <&usb0_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
+
+&usbphy1 {
+ pinctrl-0 = <&usb1_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/r8a77470.dtsi b/arch/arm/boot/dts/r8a77470.dtsi
index f4e232bf9d03..56cb10b42ed9 100644
--- a/arch/arm/boot/dts/r8a77470.dtsi
+++ b/arch/arm/boot/dts/r8a77470.dtsi
@@ -325,6 +325,77 @@
status = "disabled";
};
+ hsusb0: hsusb@e6590000 {
+ compatible = "renesas,usbhs-r8a77470",
+ "renesas,rcar-gen2-usbhs";
+ reg = <0 0xe6590000 0 0x100>;
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 704>;
+ dmas = <&usb_dmac00 0>, <&usb_dmac00 1>,
+ <&usb_dmac10 0>, <&usb_dmac10 1>;
+ dma-names = "ch0", "ch1", "ch2", "ch3";
+ renesas,buswait = <4>;
+ phys = <&usb0 1>;
+ phy-names = "usb";
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 704>;
+ status = "disabled";
+ };
+
+ usbphy0: usb-phy@e6590100 {
+ compatible = "renesas,usb-phy-r8a77470",
+ "renesas,rcar-gen2-usb-phy";
+ reg = <0 0xe6590100 0 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cpg CPG_MOD 704>;
+ clock-names = "usbhs";
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 704>;
+ status = "disabled";
+
+ usb0: usb-channel@0 {
+ reg = <0>;
+ #phy-cells = <1>;
+ };
+ };
+
+ hsusb1: hsusb@e6598000 {
+ compatible = "renesas,usbhs-r8a77470",
+ "renesas,rcar-gen2-usbhs";
+ reg = <0 0xe6598000 0 0x100>;
+ interrupts = <GIC_SPI 291 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 706>;
+ dmas = <&usb_dmac01 0>, <&usb_dmac01 1>,
+ <&usb_dmac11 0>, <&usb_dmac11 1>;
+ dma-names = "ch0", "ch1", "ch2", "ch3";
+ renesas,buswait = <4>;
+ /* We need to turn on usbphy0 to make usbphy1 to work */
+ phys = <&usb1 1>;
+ phy-names = "usb";
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 706>;
+ status = "disabled";
+ };
+
+ usbphy1: usb-phy@e6598100 {
+ compatible = "renesas,usb-phy-r8a77470",
+ "renesas,rcar-gen2-usb-phy";
+ reg = <0 0xe6598100 0 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cpg CPG_MOD 706>;
+ clock-names = "usbhs";
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 706>;
+ status = "disabled";
+
+ usb1: usb-channel@0 {
+ reg = <0>;
+ #phy-cells = <1>;
+ };
+ };
+
usb_dmac00: dma-controller@e65a0000 {
compatible = "renesas,r8a77470-usb-dmac",
"renesas,usb-dmac";
@@ -588,6 +659,216 @@
status = "disabled";
};
+ hscif0: serial@e62c0000 {
+ compatible = "renesas,hscif-r8a77470",
+ "renesas,rcar-gen2-hscif", "renesas,hscif";
+ reg = <0 0xe62c0000 0 0x60>;
+ interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 717>,
+ <&cpg CPG_CORE R8A77470_CLK_ZS>, <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x39>, <&dmac0 0x3a>,
+ <&dmac1 0x39>, <&dmac1 0x3a>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 717>;
+ status = "disabled";
+ };
+
+ hscif1: serial@e62c8000 {
+ compatible = "renesas,hscif-r8a77470",
+ "renesas,rcar-gen2-hscif", "renesas,hscif";
+ reg = <0 0xe62c8000 0 0x60>;
+ interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 716>,
+ <&cpg CPG_CORE R8A77470_CLK_ZS>, <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x4d>, <&dmac0 0x4e>,
+ <&dmac1 0x4d>, <&dmac1 0x4e>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 716>;
+ status = "disabled";
+ };
+
+ hscif2: serial@e62d0000 {
+ compatible = "renesas,hscif-r8a77470",
+ "renesas,rcar-gen2-hscif", "renesas,hscif";
+ reg = <0 0xe62d0000 0 0x60>;
+ interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 713>,
+ <&cpg CPG_CORE R8A77470_CLK_ZS>, <&scif_clk>;
+ clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac0 0x3b>, <&dmac0 0x3c>,
+ <&dmac1 0x3b>, <&dmac1 0x3c>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 713>;
+ status = "disabled";
+ };
+
+ pwm0: pwm@e6e30000 {
+ compatible = "renesas,pwm-r8a77470", "renesas,pwm-rcar";
+ reg = <0 0xe6e30000 0 0x8>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm1: pwm@e6e31000 {
+ compatible = "renesas,pwm-r8a77470", "renesas,pwm-rcar";
+ reg = <0 0xe6e31000 0 0x8>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm2: pwm@e6e32000 {
+ compatible = "renesas,pwm-r8a77470", "renesas,pwm-rcar";
+ reg = <0 0xe6e32000 0 0x8>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm3: pwm@e6e33000 {
+ compatible = "renesas,pwm-r8a77470", "renesas,pwm-rcar";
+ reg = <0 0xe6e33000 0 0x8>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm4: pwm@e6e34000 {
+ compatible = "renesas,pwm-r8a77470", "renesas,pwm-rcar";
+ reg = <0 0xe6e34000 0 0x8>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm5: pwm@e6e35000 {
+ compatible = "renesas,pwm-r8a77470", "renesas,pwm-rcar";
+ reg = <0 0xe6e35000 0 0x8>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm6: pwm@e6e36000 {
+ compatible = "renesas,pwm-r8a77470", "renesas,pwm-rcar";
+ reg = <0 0xe6e36000 0 0x8>;
+ clocks = <&cpg CPG_MOD 523>;
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 523>;
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ vin0: video@e6ef0000 {
+ compatible = "renesas,vin-r8a77470",
+ "renesas,rcar-gen2-vin";
+ reg = <0 0xe6ef0000 0 0x1000>;
+ interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 811>;
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 811>;
+ status = "disabled";
+ };
+
+ vin1: video@e6ef1000 {
+ compatible = "renesas,vin-r8a77470",
+ "renesas,rcar-gen2-vin";
+ reg = <0 0xe6ef1000 0 0x1000>;
+ interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 810>;
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 810>;
+ status = "disabled";
+ };
+
+ ohci0: usb@ee080000 {
+ compatible = "generic-ohci";
+ reg = <0 0xee080000 0 0x100>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 703>;
+ phys = <&usb0 0>, <&usb2_phy0>;
+ phy-names = "usb";
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 703>;
+ status = "disabled";
+ };
+
+ ehci0: usb@ee080100 {
+ compatible = "generic-ehci";
+ reg = <0 0xee080100 0 0x100>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 703>;
+ phys = <&usb0 0>, <&usb2_phy0>;
+ phy-names = "usb";
+ companion = <&ohci0>;
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 703>;
+ status = "disabled";
+ };
+
+ usb2_phy0: usb-phy@ee080200 {
+ compatible = "renesas,usb2-phy-r8a77470";
+ reg = <0 0xee080200 0 0x700>;
+ clocks = <&cpg CPG_MOD 703>;
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 703>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
+ ohci1: usb@ee0c0000 {
+ compatible = "generic-ohci";
+ reg = <0 0xee0c0000 0 0x100>;
+ interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 705>;
+ phys = <&usb0 1>, <&usb2_phy1>, <&usb1 0>;
+ phy-names = "usb";
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 705>;
+ status = "disabled";
+ };
+
+ ehci1: usb@ee0c0100 {
+ compatible = "generic-ehci";
+ reg = <0 0xee0c0100 0 0x100>;
+ interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 705>;
+ phys = <&usb0 1>, <&usb2_phy1>, <&usb1 0>;
+ phy-names = "usb";
+ companion = <&ohci1>;
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 705>;
+ status = "disabled";
+ };
+
+ usb2_phy1: usb-phy@ee0c0200 {
+ compatible = "renesas,usb2-phy-r8a77470";
+ reg = <0 0xee0c0200 0 0x700>;
+ clocks = <&cpg CPG_MOD 705>;
+ power-domains = <&sysc R8A77470_PD_ALWAYS_ON>;
+ resets = <&cpg 705>;
+ #phy-cells = <0>;
+ status = "disabled";
+ };
+
sdhi0: sd@ee100000 {
compatible = "renesas,sdhi-r8a77470",
"renesas,rcar-gen2-sdhi";
@@ -643,6 +924,38 @@
resets = <&cpg 408>;
};
+ du: display@feb00000 {
+ compatible = "renesas,du-r8a77470";
+ reg = <0 0xfeb00000 0 0x40000>;
+ interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 724>,
+ <&cpg CPG_MOD 723>;
+ clock-names = "du.0", "du.1";
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ du_out_rgb0: endpoint {
+ };
+ };
+ port@1 {
+ reg = <1>;
+ du_out_rgb1: endpoint {
+ };
+ };
+ port@2 {
+ reg = <2>;
+ du_out_lvds0: endpoint {
+ };
+ };
+ };
+ };
+
prr: chipid@ff000044 {
compatible = "renesas,prr";
reg = <0 0xff000044 0 4>;
diff --git a/arch/arm/boot/dts/r8a7778-bockw.dts b/arch/arm/boot/dts/r8a7778-bockw.dts
index cecb22924ec4..0b49956069fc 100644
--- a/arch/arm/boot/dts/r8a7778-bockw.dts
+++ b/arch/arm/boot/dts/r8a7778-bockw.dts
@@ -25,7 +25,7 @@
};
chosen {
- bootargs = "ignore_loglevel ip=dhcp root=/dev/nfs rw";
+ bootargs = "ignore_loglevel rw root=/dev/nfs ip=dhcp";
stdout-path = "serial0:115200n8";
};
diff --git a/arch/arm/boot/dts/r8a7779-marzen.dts b/arch/arm/boot/dts/r8a7779-marzen.dts
index abc14e7a4c93..d4bee1ec9044 100644
--- a/arch/arm/boot/dts/r8a7779-marzen.dts
+++ b/arch/arm/boot/dts/r8a7779-marzen.dts
@@ -21,7 +21,7 @@
};
chosen {
- bootargs = "ignore_loglevel root=/dev/nfs ip=on";
+ bootargs = "ignore_loglevel rw root=/dev/nfs ip=dhcp";
stdout-path = "serial0:115200n8";
};
diff --git a/arch/arm/boot/dts/r8a7792-blanche.dts b/arch/arm/boot/dts/r8a7792-blanche.dts
index f92301290b02..b6fa80c3b07e 100644
--- a/arch/arm/boot/dts/r8a7792-blanche.dts
+++ b/arch/arm/boot/dts/r8a7792-blanche.dts
@@ -308,6 +308,26 @@
};
};
+&iic3 {
+ status = "okay";
+
+ pmic@58 {
+ compatible = "dlg,da9063";
+ reg = <0x58>;
+ interrupt-parent = <&irqc>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+
+ rtc {
+ compatible = "dlg,da9063-rtc";
+ };
+
+ wdt {
+ compatible = "dlg,da9063-watchdog";
+ };
+ };
+};
+
&du {
pinctrl-0 = <&du0_pins &du1_pins>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/r8a7792.dtsi b/arch/arm/boot/dts/r8a7792.dtsi
index 8e9eb4b704d3..38fb43d11b27 100644
--- a/arch/arm/boot/dts/r8a7792.dtsi
+++ b/arch/arm/boot/dts/r8a7792.dtsi
@@ -22,6 +22,7 @@
i2c3 = &i2c3;
i2c4 = &i2c4;
i2c5 = &i2c5;
+ i2c6 = &iic3;
spi0 = &qspi;
spi1 = &msiof0;
spi2 = &msiof1;
@@ -444,6 +445,23 @@
status = "disabled";
};
+ iic3: i2c@e60b0000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "renesas,iic-r8a7792",
+ "renesas,rcar-gen2-iic",
+ "renesas,rmobile-iic";
+ reg = <0 0xe60b0000 0 0x425>;
+ interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 926>;
+ dmas = <&dmac0 0x77>, <&dmac0 0x78>,
+ <&dmac1 0x77>, <&dmac1 0x78>;
+ dma-names = "tx", "rx", "tx", "rx";
+ power-domains = <&sysc R8A7792_PD_ALWAYS_ON>;
+ resets = <&cpg 926>;
+ status = "disabled";
+ };
+
dmac0: dma-controller@e6700000 {
compatible = "renesas,dmac-r8a7792",
"renesas,rcar-dmac";
diff --git a/arch/arm/boot/dts/r8a7794-alt.dts b/arch/arm/boot/dts/r8a7794-alt.dts
index ef7e2a837df6..0ab3d8d57f6d 100644
--- a/arch/arm/boot/dts/r8a7794-alt.dts
+++ b/arch/arm/boot/dts/r8a7794-alt.dts
@@ -199,6 +199,22 @@
};
};
+&pci0 {
+ status = "okay";
+ pinctrl-0 = <&usb0_pins>;
+ pinctrl-names = "default";
+};
+
+&pci1 {
+ status = "okay";
+ pinctrl-0 = <&usb1_pins>;
+ pinctrl-names = "default";
+};
+
+&usbphy {
+ status = "okay";
+};
+
&du {
pinctrl-0 = <&du_pins>;
pinctrl-names = "default";
@@ -293,6 +309,16 @@
function = "sdhi1";
power-source = <1800>;
};
+
+ usb0_pins: usb0 {
+ groups = "usb0";
+ function = "usb0";
+ };
+
+ usb1_pins: usb1 {
+ groups = "usb1";
+ function = "usb1";
+ };
};
&cmt0 {
@@ -377,6 +403,27 @@
pinctrl-names = "i2c-exio4";
};
+&i2c7 {
+ status = "okay";
+ clock-frequency = <100000>;
+
+ pmic@58 {
+ compatible = "dlg,da9063";
+ reg = <0x58>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <31 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+
+ rtc {
+ compatible = "dlg,da9063-rtc";
+ };
+
+ wdt {
+ compatible = "dlg,da9063-watchdog";
+ };
+ };
+};
+
&vin0 {
status = "okay";
pinctrl-0 = <&vin0_pins>;
diff --git a/arch/arm/boot/dts/rk3036-kylin.dts b/arch/arm/boot/dts/rk3036-kylin.dts
index 0173eb11ec28..fb3cf005cc90 100644
--- a/arch/arm/boot/dts/rk3036-kylin.dts
+++ b/arch/arm/boot/dts/rk3036-kylin.dts
@@ -368,31 +368,31 @@
&pinctrl {
leds {
led_ctl: led-ctl {
- rockchip,pins = <2 30 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PD6 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
pmic {
pmic_int: pmic-int {
- rockchip,pins = <2 2 RK_FUNC_GPIO &pcfg_pull_default>;
+ rockchip,pins = <2 RK_PA2 RK_FUNC_GPIO &pcfg_pull_default>;
};
};
sdio {
bt_wake_h: bt-wake-h {
- rockchip,pins = <2 8 RK_FUNC_GPIO &pcfg_pull_default>;
+ rockchip,pins = <2 RK_PB0 RK_FUNC_GPIO &pcfg_pull_default>;
};
};
sdmmc {
sdmmc_pwr: sdmmc-pwr {
- rockchip,pins = <2 28 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
sleep {
global_pwroff: global-pwroff {
- rockchip,pins = <2 7 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PA7 1 &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi
index 59c90863b0e7..0290ea4edd32 100644
--- a/arch/arm/boot/dts/rk3036.dtsi
+++ b/arch/arm/boot/dts/rk3036.dtsi
@@ -551,71 +551,71 @@
pwm0 {
pwm0_pin: pwm0-pin {
- rockchip,pins = <0 0 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA0 2 &pcfg_pull_none>;
};
};
pwm1 {
pwm1_pin: pwm1-pin {
- rockchip,pins = <0 1 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA1 2 &pcfg_pull_none>;
};
};
pwm2 {
pwm2_pin: pwm2-pin {
- rockchip,pins = <0 1 2 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA1 2 &pcfg_pull_none>;
};
};
pwm3 {
pwm3_pin: pwm3-pin {
- rockchip,pins = <0 27 1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PD3 1 &pcfg_pull_none>;
};
};
sdmmc {
sdmmc_clk: sdmmc-clk {
- rockchip,pins = <1 16 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PC0 1 &pcfg_pull_none>;
};
sdmmc_cmd: sdmmc-cmd {
- rockchip,pins = <1 15 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <1 RK_PB7 1 &pcfg_pull_default>;
};
sdmmc_cd: sdmmc-cd {
- rockchip,pins = <1 17 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <1 RK_PC1 1 &pcfg_pull_default>;
};
sdmmc_bus1: sdmmc-bus1 {
- rockchip,pins = <1 18 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <1 RK_PC2 1 &pcfg_pull_default>;
};
sdmmc_bus4: sdmmc-bus4 {
- rockchip,pins = <1 18 RK_FUNC_1 &pcfg_pull_default>,
- <1 19 RK_FUNC_1 &pcfg_pull_default>,
- <1 20 RK_FUNC_1 &pcfg_pull_default>,
- <1 21 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <1 RK_PC2 1 &pcfg_pull_default>,
+ <1 RK_PC3 1 &pcfg_pull_default>,
+ <1 RK_PC4 1 &pcfg_pull_default>,
+ <1 RK_PC5 1 &pcfg_pull_default>;
};
};
sdio {
sdio_bus1: sdio-bus1 {
- rockchip,pins = <0 11 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <0 RK_PB3 1 &pcfg_pull_default>;
};
sdio_bus4: sdio-bus4 {
- rockchip,pins = <0 11 RK_FUNC_1 &pcfg_pull_default>,
- <0 12 RK_FUNC_1 &pcfg_pull_default>,
- <0 13 RK_FUNC_1 &pcfg_pull_default>,
- <0 14 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <0 RK_PB3 1 &pcfg_pull_default>,
+ <0 RK_PB4 1 &pcfg_pull_default>,
+ <0 RK_PB5 1 &pcfg_pull_default>,
+ <0 RK_PB6 1 &pcfg_pull_default>;
};
sdio_cmd: sdio-cmd {
- rockchip,pins = <0 8 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <0 RK_PB0 1 &pcfg_pull_default>;
};
sdio_clk: sdio-clk {
- rockchip,pins = <0 9 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB1 1 &pcfg_pull_none>;
};
};
@@ -625,135 +625,135 @@
* We also have external pulls, so disable the internal ones.
*/
emmc_clk: emmc-clk {
- rockchip,pins = <2 4 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PA4 2 &pcfg_pull_none>;
};
emmc_cmd: emmc-cmd {
- rockchip,pins = <2 1 RK_FUNC_2 &pcfg_pull_default>;
+ rockchip,pins = <2 RK_PA1 2 &pcfg_pull_default>;
};
emmc_bus8: emmc-bus8 {
- rockchip,pins = <1 24 RK_FUNC_2 &pcfg_pull_default>,
- <1 25 RK_FUNC_2 &pcfg_pull_default>,
- <1 26 RK_FUNC_2 &pcfg_pull_default>,
- <1 27 RK_FUNC_2 &pcfg_pull_default>,
- <1 28 RK_FUNC_2 &pcfg_pull_default>,
- <1 29 RK_FUNC_2 &pcfg_pull_default>,
- <1 30 RK_FUNC_2 &pcfg_pull_default>,
- <1 31 RK_FUNC_2 &pcfg_pull_default>;
+ rockchip,pins = <1 RK_PD0 2 &pcfg_pull_default>,
+ <1 RK_PD1 2 &pcfg_pull_default>,
+ <1 RK_PD2 2 &pcfg_pull_default>,
+ <1 RK_PD3 2 &pcfg_pull_default>,
+ <1 RK_PD4 2 &pcfg_pull_default>,
+ <1 RK_PD5 2 &pcfg_pull_default>,
+ <1 RK_PD6 2 &pcfg_pull_default>,
+ <1 RK_PD7 2 &pcfg_pull_default>;
};
};
emac {
emac_xfer: emac-xfer {
- rockchip,pins = <2 10 RK_FUNC_1 &pcfg_pull_default>, /* crs_dvalid */
- <2 13 RK_FUNC_1 &pcfg_pull_default>, /* tx_en */
- <2 14 RK_FUNC_1 &pcfg_pull_default>, /* mac_clk */
- <2 15 RK_FUNC_1 &pcfg_pull_default>, /* rx_err */
- <2 16 RK_FUNC_1 &pcfg_pull_default>, /* rxd1 */
- <2 17 RK_FUNC_1 &pcfg_pull_default>, /* rxd0 */
- <2 18 RK_FUNC_1 &pcfg_pull_default>, /* txd1 */
- <2 19 RK_FUNC_1 &pcfg_pull_default>; /* txd0 */
+ rockchip,pins = <2 RK_PB2 1 &pcfg_pull_default>, /* crs_dvalid */
+ <2 RK_PB5 1 &pcfg_pull_default>, /* tx_en */
+ <2 RK_PB6 1 &pcfg_pull_default>, /* mac_clk */
+ <2 RK_PB7 1 &pcfg_pull_default>, /* rx_err */
+ <2 RK_PC0 1 &pcfg_pull_default>, /* rxd1 */
+ <2 RK_PC1 1 &pcfg_pull_default>, /* rxd0 */
+ <2 RK_PC2 1 &pcfg_pull_default>, /* txd1 */
+ <2 RK_PC3 1 &pcfg_pull_default>; /* txd0 */
};
emac_mdio: emac-mdio {
- rockchip,pins = <2 12 RK_FUNC_1 &pcfg_pull_default>, /* mac_md */
- <2 25 RK_FUNC_1 &pcfg_pull_default>; /* mac_mdclk */
+ rockchip,pins = <2 RK_PB4 1 &pcfg_pull_default>, /* mac_md */
+ <2 RK_PD1 1 &pcfg_pull_default>; /* mac_mdclk */
};
};
i2c0 {
i2c0_xfer: i2c0-xfer {
- rockchip,pins = <0 0 RK_FUNC_1 &pcfg_pull_none>,
- <0 1 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA0 1 &pcfg_pull_none>,
+ <0 RK_PA1 1 &pcfg_pull_none>;
};
};
i2c1 {
i2c1_xfer: i2c1-xfer {
- rockchip,pins = <0 2 RK_FUNC_1 &pcfg_pull_none>,
- <0 3 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA2 1 &pcfg_pull_none>,
+ <0 RK_PA3 1 &pcfg_pull_none>;
};
};
i2c2 {
i2c2_xfer: i2c2-xfer {
- rockchip,pins = <2 20 RK_FUNC_1 &pcfg_pull_none>,
- <2 21 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PC4 1 &pcfg_pull_none>,
+ <2 RK_PC5 1 &pcfg_pull_none>;
};
};
i2s {
i2s_bus: i2s-bus {
- rockchip,pins = <1 0 RK_FUNC_1 &pcfg_pull_default>,
- <1 1 RK_FUNC_1 &pcfg_pull_default>,
- <1 2 RK_FUNC_1 &pcfg_pull_default>,
- <1 3 RK_FUNC_1 &pcfg_pull_default>,
- <1 4 RK_FUNC_1 &pcfg_pull_default>,
- <1 5 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <1 RK_PA0 1 &pcfg_pull_default>,
+ <1 RK_PA1 1 &pcfg_pull_default>,
+ <1 RK_PA2 1 &pcfg_pull_default>,
+ <1 RK_PA3 1 &pcfg_pull_default>,
+ <1 RK_PA4 1 &pcfg_pull_default>,
+ <1 RK_PA5 1 &pcfg_pull_default>;
};
};
hdmi {
hdmi_ctl: hdmi-ctl {
- rockchip,pins = <1 8 RK_FUNC_1 &pcfg_pull_none>,
- <1 9 RK_FUNC_1 &pcfg_pull_none>,
- <1 10 RK_FUNC_1 &pcfg_pull_none>,
- <1 11 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PB0 1 &pcfg_pull_none>,
+ <1 RK_PB1 1 &pcfg_pull_none>,
+ <1 RK_PB2 1 &pcfg_pull_none>,
+ <1 RK_PB3 1 &pcfg_pull_none>;
};
};
uart0 {
uart0_xfer: uart0-xfer {
- rockchip,pins = <0 16 RK_FUNC_1 &pcfg_pull_default>,
- <0 17 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PC0 1 &pcfg_pull_default>,
+ <0 RK_PC1 1 &pcfg_pull_none>;
};
uart0_cts: uart0-cts {
- rockchip,pins = <0 18 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <0 RK_PC2 1 &pcfg_pull_default>;
};
uart0_rts: uart0-rts {
- rockchip,pins = <0 19 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PC3 1 &pcfg_pull_none>;
};
};
uart1 {
uart1_xfer: uart1-xfer {
- rockchip,pins = <2 22 RK_FUNC_1 &pcfg_pull_default>,
- <2 23 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PC6 1 &pcfg_pull_default>,
+ <2 RK_PC7 1 &pcfg_pull_none>;
};
/* no rts / cts for uart1 */
};
uart2 {
uart2_xfer: uart2-xfer {
- rockchip,pins = <1 18 RK_FUNC_2 &pcfg_pull_default>,
- <1 19 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PC2 2 &pcfg_pull_default>,
+ <1 RK_PC3 2 &pcfg_pull_none>;
};
/* no rts / cts for uart2 */
};
spi-pins {
spi_txd:spi-txd {
- rockchip,pins = <1 29 RK_FUNC_3 &pcfg_pull_default>;
+ rockchip,pins = <1 RK_PD5 3 &pcfg_pull_default>;
};
spi_rxd:spi-rxd {
- rockchip,pins = <1 28 RK_FUNC_3 &pcfg_pull_default>;
+ rockchip,pins = <1 RK_PD4 3 &pcfg_pull_default>;
};
spi_clk:spi-clk {
- rockchip,pins = <2 0 RK_FUNC_2 &pcfg_pull_default>;
+ rockchip,pins = <2 RK_PA0 2 &pcfg_pull_default>;
};
spi_cs0:spi-cs0 {
- rockchip,pins = <1 30 RK_FUNC_3 &pcfg_pull_default>;
+ rockchip,pins = <1 RK_PD6 3 &pcfg_pull_default>;
};
spi_cs1:spi-cs1 {
- rockchip,pins = <1 31 RK_FUNC_3 &pcfg_pull_default>;
+ rockchip,pins = <1 RK_PD7 3 &pcfg_pull_default>;
};
};
diff --git a/arch/arm/boot/dts/rk3066a-marsboard.dts b/arch/arm/boot/dts/rk3066a-marsboard.dts
index ce525b956ae5..7e01f6406a86 100644
--- a/arch/arm/boot/dts/rk3066a-marsboard.dts
+++ b/arch/arm/boot/dts/rk3066a-marsboard.dts
@@ -168,7 +168,7 @@
&pinctrl {
lan8720a {
phy_int: phy-int {
- rockchip,pins = <RK_GPIO1 26 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3066a-mk808.dts b/arch/arm/boot/dts/rk3066a-mk808.dts
index 9d2216d71f70..365eff621113 100644
--- a/arch/arm/boot/dts/rk3066a-mk808.dts
+++ b/arch/arm/boot/dts/rk3066a-mk808.dts
@@ -30,6 +30,17 @@
};
};
+ hdmi_con {
+ compatible = "hdmi-connector";
+ type = "c";
+
+ port {
+ hdmi_con_in: endpoint {
+ remote-endpoint = <&hdmi_out_con>;
+ };
+ };
+ };
+
vcc_io: vcc-io {
compatible = "regulator-fixed";
regulator-name = "vcc_io";
@@ -91,6 +102,20 @@
};
};
+&hdmi {
+ status = "okay";
+};
+
+&hdmi_in_vop1 {
+ status = "disabled";
+};
+
+&hdmi_out {
+ hdmi_out_con: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+};
+
&mmc0 {
bus-width = <4>;
cap-mmc-highspeed;
@@ -111,25 +136,25 @@
&pinctrl {
usb-host {
host_drv: host-drv {
- rockchip,pins = <RK_GPIO0 6 RK_FUNC_GPIO &pcfg_pull_default>;
+ rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_default>;
};
};
usb-otg {
otg_drv: otg-drv {
- rockchip,pins = <RK_GPIO0 5 RK_FUNC_GPIO &pcfg_pull_default>;
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_default>;
};
};
sdmmc {
sdmmc_pwr: sdmmc-pwr {
- rockchip,pins = <RK_GPIO3 7 RK_FUNC_GPIO &pcfg_pull_default>;
+ rockchip,pins = <3 RK_PA7 RK_FUNC_GPIO &pcfg_pull_default>;
};
};
sdio {
wifi_pwr: wifi-pwr {
- rockchip,pins = <RK_GPIO3 24 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
@@ -150,6 +175,10 @@
status = "okay";
};
+&vop0 {
+ status = "okay";
+};
+
&wdt {
status = "okay";
};
diff --git a/arch/arm/boot/dts/rk3066a-rayeager.dts b/arch/arm/boot/dts/rk3066a-rayeager.dts
index 949fa800582d..f9db6bb9fa11 100644
--- a/arch/arm/boot/dts/rk3066a-rayeager.dts
+++ b/arch/arm/boot/dts/rk3066a-rayeager.dts
@@ -322,71 +322,71 @@
ak8963 {
comp_int: comp-int {
- rockchip,pins = <4 17 RK_FUNC_GPIO &pcfg_pull_default>;
+ rockchip,pins = <4 RK_PC1 RK_FUNC_GPIO &pcfg_pull_default>;
};
};
emac {
rmii_rst: rmii-rst {
- rockchip,pins = <1 30 RK_FUNC_GPIO &pcfg_output_high>;
+ rockchip,pins = <1 RK_PD6 RK_FUNC_GPIO &pcfg_output_high>;
};
};
ir {
ir_int: ir-int {
- rockchip,pins = <6 1 RK_FUNC_GPIO &pcfg_pull_default>;
+ rockchip,pins = <6 RK_PA1 RK_FUNC_GPIO &pcfg_pull_default>;
};
};
keys {
pwr_key: pwr-key {
- rockchip,pins = <6 2 RK_FUNC_GPIO &pcfg_pull_default>;
+ rockchip,pins = <6 RK_PA2 RK_FUNC_GPIO &pcfg_pull_default>;
};
};
mma8452 {
gsensor_int: gsensor-int {
- rockchip,pins = <4 16 RK_FUNC_GPIO &pcfg_pull_default>;
+ rockchip,pins = <4 RK_PC0 RK_FUNC_GPIO &pcfg_pull_default>;
};
};
mmc {
sdmmc_pwr: sdmmc-pwr {
- rockchip,pins = <3 7 RK_FUNC_GPIO &pcfg_pull_default>;
+ rockchip,pins = <3 RK_PA7 RK_FUNC_GPIO &pcfg_pull_default>;
};
};
usb_host {
host_drv: host-drv {
- rockchip,pins = <0 6 RK_FUNC_GPIO &pcfg_pull_default>;
+ rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_default>;
};
hub_rst: hub-rst {
- rockchip,pins = <1 31 RK_FUNC_GPIO &pcfg_output_high>;
+ rockchip,pins = <1 RK_PD7 RK_FUNC_GPIO &pcfg_output_high>;
};
sata_pwr: sata-pwr {
- rockchip,pins = <4 22 RK_FUNC_GPIO &pcfg_pull_default>;
+ rockchip,pins = <4 RK_PC6 RK_FUNC_GPIO &pcfg_pull_default>;
};
sata_reset: sata-reset {
- rockchip,pins = <0 13 RK_FUNC_GPIO &pcfg_output_high>;
+ rockchip,pins = <0 RK_PB5 RK_FUNC_GPIO &pcfg_output_high>;
};
};
usb_otg {
otg_drv: otg-drv {
- rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_default>;
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_default>;
};
};
tps {
pmic_int: pmic-int {
- rockchip,pins = <6 4 RK_FUNC_GPIO &pcfg_pull_default>;
+ rockchip,pins = <6 RK_PA4 RK_FUNC_GPIO &pcfg_pull_default>;
};
pwr_hold: pwr-hold {
- rockchip,pins = <6 8 RK_FUNC_GPIO &pcfg_output_high>;
+ rockchip,pins = <6 RK_PB0 RK_FUNC_GPIO &pcfg_output_high>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3066a.dtsi b/arch/arm/boot/dts/rk3066a.dtsi
index 653127a377fa..3d1b02f45ffd 100644
--- a/arch/arm/boot/dts/rk3066a.dtsi
+++ b/arch/arm/boot/dts/rk3066a.dtsi
@@ -80,6 +80,11 @@
vop0_out: port {
#address-cells = <1>;
#size-cells = <0>;
+
+ vop0_out_hdmi: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&hdmi_in_vop0>;
+ };
};
};
@@ -101,6 +106,49 @@
vop1_out: port {
#address-cells = <1>;
#size-cells = <0>;
+
+ vop1_out_hdmi: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&hdmi_in_vop1>;
+ };
+ };
+ };
+
+ hdmi: hdmi@10116000 {
+ compatible = "rockchip,rk3066-hdmi";
+ reg = <0x10116000 0x2000>;
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru HCLK_HDMI>;
+ clock-names = "hclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdmii2c_xfer>, <&hdmi_hpd>;
+ power-domains = <&power RK3066_PD_VIO>;
+ rockchip,grf = <&grf>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ hdmi_in: port@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ hdmi_in_vop0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&vop0_out_hdmi>;
+ };
+
+ hdmi_in_vop1: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&vop1_out_hdmi>;
+ };
+ };
+
+ hdmi_out: port@1 {
+ reg = <1>;
+ };
};
};
@@ -343,33 +391,33 @@
emac {
emac_xfer: emac-xfer {
- rockchip,pins = <RK_GPIO1 16 RK_FUNC_2 &pcfg_pull_none>, /* mac_clk */
- <RK_GPIO1 17 RK_FUNC_2 &pcfg_pull_none>, /* tx_en */
- <RK_GPIO1 18 RK_FUNC_2 &pcfg_pull_none>, /* txd1 */
- <RK_GPIO1 19 RK_FUNC_2 &pcfg_pull_none>, /* txd0 */
- <RK_GPIO1 20 RK_FUNC_2 &pcfg_pull_none>, /* rx_err */
- <RK_GPIO1 21 RK_FUNC_2 &pcfg_pull_none>, /* crs_dvalid */
- <RK_GPIO1 22 RK_FUNC_2 &pcfg_pull_none>, /* rxd1 */
- <RK_GPIO1 23 RK_FUNC_2 &pcfg_pull_none>; /* rxd0 */
+ rockchip,pins = <1 RK_PC0 2 &pcfg_pull_none>, /* mac_clk */
+ <1 RK_PC1 2 &pcfg_pull_none>, /* tx_en */
+ <1 RK_PC2 2 &pcfg_pull_none>, /* txd1 */
+ <1 RK_PC3 2 &pcfg_pull_none>, /* txd0 */
+ <1 RK_PC4 2 &pcfg_pull_none>, /* rx_err */
+ <1 RK_PC5 2 &pcfg_pull_none>, /* crs_dvalid */
+ <1 RK_PC6 2 &pcfg_pull_none>, /* rxd1 */
+ <1 RK_PC7 2 &pcfg_pull_none>; /* rxd0 */
};
emac_mdio: emac-mdio {
- rockchip,pins = <RK_GPIO1 24 RK_FUNC_2 &pcfg_pull_none>, /* mac_md */
- <RK_GPIO1 25 RK_FUNC_2 &pcfg_pull_none>; /* mac_mdclk */
+ rockchip,pins = <1 RK_PD0 2 &pcfg_pull_none>, /* mac_md */
+ <1 RK_PD1 2 &pcfg_pull_none>; /* mac_mdclk */
};
};
emmc {
emmc_clk: emmc-clk {
- rockchip,pins = <RK_GPIO3 31 RK_FUNC_2 &pcfg_pull_default>;
+ rockchip,pins = <3 RK_PD7 2 &pcfg_pull_default>;
};
emmc_cmd: emmc-cmd {
- rockchip,pins = <RK_GPIO4 9 RK_FUNC_2 &pcfg_pull_default>;
+ rockchip,pins = <4 RK_PB1 2 &pcfg_pull_default>;
};
emmc_rst: emmc-rst {
- rockchip,pins = <RK_GPIO4 10 RK_FUNC_2 &pcfg_pull_default>;
+ rockchip,pins = <4 RK_PB2 2 &pcfg_pull_default>;
};
/*
@@ -380,245 +428,256 @@
*/
};
+ hdmi {
+ hdmi_hpd: hdmi-hpd {
+ rockchip,pins = <0 RK_PA0 1 &pcfg_pull_default>;
+ };
+
+ hdmii2c_xfer: hdmii2c-xfer {
+ rockchip,pins = <0 RK_PA1 1 &pcfg_pull_none>,
+ <0 RK_PA2 1 &pcfg_pull_none>;
+ };
+ };
+
i2c0 {
i2c0_xfer: i2c0-xfer {
- rockchip,pins = <RK_GPIO2 28 RK_FUNC_1 &pcfg_pull_none>,
- <RK_GPIO2 29 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PD4 1 &pcfg_pull_none>,
+ <2 RK_PD5 1 &pcfg_pull_none>;
};
};
i2c1 {
i2c1_xfer: i2c1-xfer {
- rockchip,pins = <RK_GPIO2 30 RK_FUNC_1 &pcfg_pull_none>,
- <RK_GPIO2 31 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PD6 1 &pcfg_pull_none>,
+ <2 RK_PD7 1 &pcfg_pull_none>;
};
};
i2c2 {
i2c2_xfer: i2c2-xfer {
- rockchip,pins = <RK_GPIO3 0 RK_FUNC_1 &pcfg_pull_none>,
- <RK_GPIO3 1 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PA0 1 &pcfg_pull_none>,
+ <3 RK_PA1 1 &pcfg_pull_none>;
};
};
i2c3 {
i2c3_xfer: i2c3-xfer {
- rockchip,pins = <RK_GPIO3 2 RK_FUNC_2 &pcfg_pull_none>,
- <RK_GPIO3 3 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PA2 2 &pcfg_pull_none>,
+ <3 RK_PA3 2 &pcfg_pull_none>;
};
};
i2c4 {
i2c4_xfer: i2c4-xfer {
- rockchip,pins = <RK_GPIO3 4 RK_FUNC_1 &pcfg_pull_none>,
- <RK_GPIO3 5 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PA4 1 &pcfg_pull_none>,
+ <3 RK_PA5 1 &pcfg_pull_none>;
};
};
pwm0 {
pwm0_out: pwm0-out {
- rockchip,pins = <RK_GPIO0 3 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA3 1 &pcfg_pull_none>;
};
};
pwm1 {
pwm1_out: pwm1-out {
- rockchip,pins = <RK_GPIO0 4 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA4 1 &pcfg_pull_none>;
};
};
pwm2 {
pwm2_out: pwm2-out {
- rockchip,pins = <RK_GPIO0 30 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PD6 1 &pcfg_pull_none>;
};
};
pwm3 {
pwm3_out: pwm3-out {
- rockchip,pins = <RK_GPIO0 31 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PD7 1 &pcfg_pull_none>;
};
};
spi0 {
spi0_clk: spi0-clk {
- rockchip,pins = <RK_GPIO1 5 RK_FUNC_2 &pcfg_pull_default>;
+ rockchip,pins = <1 RK_PA5 2 &pcfg_pull_default>;
};
spi0_cs0: spi0-cs0 {
- rockchip,pins = <RK_GPIO1 4 RK_FUNC_2 &pcfg_pull_default>;
+ rockchip,pins = <1 RK_PA4 2 &pcfg_pull_default>;
};
spi0_tx: spi0-tx {
- rockchip,pins = <RK_GPIO1 7 RK_FUNC_2 &pcfg_pull_default>;
+ rockchip,pins = <1 RK_PA7 2 &pcfg_pull_default>;
};
spi0_rx: spi0-rx {
- rockchip,pins = <RK_GPIO1 6 RK_FUNC_2 &pcfg_pull_default>;
+ rockchip,pins = <1 RK_PA6 2 &pcfg_pull_default>;
};
spi0_cs1: spi0-cs1 {
- rockchip,pins = <RK_GPIO4 15 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <4 RK_PB7 1 &pcfg_pull_default>;
};
};
spi1 {
spi1_clk: spi1-clk {
- rockchip,pins = <RK_GPIO2 19 RK_FUNC_2 &pcfg_pull_default>;
+ rockchip,pins = <2 RK_PC3 2 &pcfg_pull_default>;
};
spi1_cs0: spi1-cs0 {
- rockchip,pins = <RK_GPIO2 20 RK_FUNC_2 &pcfg_pull_default>;
+ rockchip,pins = <2 RK_PC4 2 &pcfg_pull_default>;
};
spi1_rx: spi1-rx {
- rockchip,pins = <RK_GPIO2 22 RK_FUNC_2 &pcfg_pull_default>;
+ rockchip,pins = <2 RK_PC6 2 &pcfg_pull_default>;
};
spi1_tx: spi1-tx {
- rockchip,pins = <RK_GPIO2 21 RK_FUNC_2 &pcfg_pull_default>;
+ rockchip,pins = <2 RK_PC5 2 &pcfg_pull_default>;
};
spi1_cs1: spi1-cs1 {
- rockchip,pins = <RK_GPIO2 23 RK_FUNC_2 &pcfg_pull_default>;
+ rockchip,pins = <2 RK_PC7 2 &pcfg_pull_default>;
};
};
uart0 {
uart0_xfer: uart0-xfer {
- rockchip,pins = <RK_GPIO1 0 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO1 1 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <1 RK_PA0 1 &pcfg_pull_default>,
+ <1 RK_PA1 1 &pcfg_pull_default>;
};
uart0_cts: uart0-cts {
- rockchip,pins = <RK_GPIO1 2 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <1 RK_PA2 1 &pcfg_pull_default>;
};
uart0_rts: uart0-rts {
- rockchip,pins = <RK_GPIO1 3 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <1 RK_PA3 1 &pcfg_pull_default>;
};
};
uart1 {
uart1_xfer: uart1-xfer {
- rockchip,pins = <RK_GPIO1 4 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO1 5 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <1 RK_PA4 1 &pcfg_pull_default>,
+ <1 RK_PA5 1 &pcfg_pull_default>;
};
uart1_cts: uart1-cts {
- rockchip,pins = <RK_GPIO1 6 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <1 RK_PA6 1 &pcfg_pull_default>;
};
uart1_rts: uart1-rts {
- rockchip,pins = <RK_GPIO1 7 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <1 RK_PA7 1 &pcfg_pull_default>;
};
};
uart2 {
uart2_xfer: uart2-xfer {
- rockchip,pins = <RK_GPIO1 8 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO1 9 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <1 RK_PB0 1 &pcfg_pull_default>,
+ <1 RK_PB1 1 &pcfg_pull_default>;
};
/* no rts / cts for uart2 */
};
uart3 {
uart3_xfer: uart3-xfer {
- rockchip,pins = <RK_GPIO3 27 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO3 28 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <3 RK_PD3 1 &pcfg_pull_default>,
+ <3 RK_PD4 1 &pcfg_pull_default>;
};
uart3_cts: uart3-cts {
- rockchip,pins = <RK_GPIO3 29 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <3 RK_PD5 1 &pcfg_pull_default>;
};
uart3_rts: uart3-rts {
- rockchip,pins = <RK_GPIO3 30 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <3 RK_PD6 1 &pcfg_pull_default>;
};
};
sd0 {
sd0_clk: sd0-clk {
- rockchip,pins = <RK_GPIO3 8 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <3 RK_PB0 1 &pcfg_pull_default>;
};
sd0_cmd: sd0-cmd {
- rockchip,pins = <RK_GPIO3 9 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <3 RK_PB1 1 &pcfg_pull_default>;
};
sd0_cd: sd0-cd {
- rockchip,pins = <RK_GPIO3 14 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <3 RK_PB6 1 &pcfg_pull_default>;
};
sd0_wp: sd0-wp {
- rockchip,pins = <RK_GPIO3 15 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <3 RK_PB7 1 &pcfg_pull_default>;
};
sd0_bus1: sd0-bus-width1 {
- rockchip,pins = <RK_GPIO3 10 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <3 RK_PB2 1 &pcfg_pull_default>;
};
sd0_bus4: sd0-bus-width4 {
- rockchip,pins = <RK_GPIO3 10 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO3 11 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO3 12 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO3 13 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <3 RK_PB2 1 &pcfg_pull_default>,
+ <3 RK_PB3 1 &pcfg_pull_default>,
+ <3 RK_PB4 1 &pcfg_pull_default>,
+ <3 RK_PB5 1 &pcfg_pull_default>;
};
};
sd1 {
sd1_clk: sd1-clk {
- rockchip,pins = <RK_GPIO3 21 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <3 RK_PC5 1 &pcfg_pull_default>;
};
sd1_cmd: sd1-cmd {
- rockchip,pins = <RK_GPIO3 16 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <3 RK_PC0 1 &pcfg_pull_default>;
};
sd1_cd: sd1-cd {
- rockchip,pins = <RK_GPIO3 22 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <3 RK_PC6 1 &pcfg_pull_default>;
};
sd1_wp: sd1-wp {
- rockchip,pins = <RK_GPIO3 23 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <3 RK_PC7 1 &pcfg_pull_default>;
};
sd1_bus1: sd1-bus-width1 {
- rockchip,pins = <RK_GPIO3 17 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <3 RK_PC1 1 &pcfg_pull_default>;
};
sd1_bus4: sd1-bus-width4 {
- rockchip,pins = <RK_GPIO3 17 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO3 18 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO3 19 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO3 20 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <3 RK_PC1 1 &pcfg_pull_default>,
+ <3 RK_PC2 1 &pcfg_pull_default>,
+ <3 RK_PC3 1 &pcfg_pull_default>,
+ <3 RK_PC4 1 &pcfg_pull_default>;
};
};
i2s0 {
i2s0_bus: i2s0-bus {
- rockchip,pins = <RK_GPIO0 7 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO0 8 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO0 9 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO0 10 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO0 11 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO0 12 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO0 13 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO0 14 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO0 15 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <0 RK_PA7 1 &pcfg_pull_default>,
+ <0 RK_PB0 1 &pcfg_pull_default>,
+ <0 RK_PB1 1 &pcfg_pull_default>,
+ <0 RK_PB2 1 &pcfg_pull_default>,
+ <0 RK_PB3 1 &pcfg_pull_default>,
+ <0 RK_PB4 1 &pcfg_pull_default>,
+ <0 RK_PB5 1 &pcfg_pull_default>,
+ <0 RK_PB6 1 &pcfg_pull_default>,
+ <0 RK_PB7 1 &pcfg_pull_default>;
};
};
i2s1 {
i2s1_bus: i2s1-bus {
- rockchip,pins = <RK_GPIO0 16 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO0 17 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO0 18 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO0 19 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO0 20 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO0 21 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <0 RK_PC0 1 &pcfg_pull_default>,
+ <0 RK_PC1 1 &pcfg_pull_default>,
+ <0 RK_PC2 1 &pcfg_pull_default>,
+ <0 RK_PC3 1 &pcfg_pull_default>,
+ <0 RK_PC4 1 &pcfg_pull_default>,
+ <0 RK_PC5 1 &pcfg_pull_default>;
};
};
i2s2 {
i2s2_bus: i2s2-bus {
- rockchip,pins = <RK_GPIO0 24 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO0 25 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO0 26 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO0 27 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO0 28 RK_FUNC_1 &pcfg_pull_default>,
- <RK_GPIO0 29 RK_FUNC_1 &pcfg_pull_default>;
+ rockchip,pins = <0 RK_PD0 1 &pcfg_pull_default>,
+ <0 RK_PD1 1 &pcfg_pull_default>,
+ <0 RK_PD2 1 &pcfg_pull_default>,
+ <0 RK_PD3 1 &pcfg_pull_default>,
+ <0 RK_PD4 1 &pcfg_pull_default>,
+ <0 RK_PD5 1 &pcfg_pull_default>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3188-px3-evb.dts b/arch/arm/boot/dts/rk3188-px3-evb.dts
index c0eaa9c5490b..c32e1d441cf7 100644
--- a/arch/arm/boot/dts/rk3188-px3-evb.dts
+++ b/arch/arm/boot/dts/rk3188-px3-evb.dts
@@ -247,10 +247,10 @@
usb {
host_vbus_drv: host-vbus-drv {
- rockchip,pins = <0 3 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
};
otg_vbus_drv: otg-vbus-drv {
- rockchip,pins = <2 31 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PD7 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3188-radxarock.dts b/arch/arm/boot/dts/rk3188-radxarock.dts
index 94bc81c24049..c9a7f5409960 100644
--- a/arch/arm/boot/dts/rk3188-radxarock.dts
+++ b/arch/arm/boot/dts/rk3188-radxarock.dts
@@ -307,40 +307,40 @@
act8846 {
act8846_dvs0_ctl: act8846-dvs0-ctl {
- rockchip,pins = <RK_GPIO3 27 RK_FUNC_GPIO &pcfg_output_low>;
+ rockchip,pins = <3 RK_PD3 RK_FUNC_GPIO &pcfg_output_low>;
};
};
hym8563 {
rtc_int: rtc-int {
- rockchip,pins = <RK_GPIO0 0 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
lan8720a {
phy_int: phy-int {
- rockchip,pins = <RK_GPIO3 26 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <3 RK_PD2 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
ir-receiver {
ir_recv_pin: ir-recv-pin {
- rockchip,pins = <RK_GPIO0 10 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
sd0 {
sdmmc_pwr: sdmmc-pwr {
- rockchip,pins = <RK_GPIO3 1 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
usb {
host_vbus_drv: host-vbus-drv {
- rockchip,pins = <0 3 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
};
otg_vbus_drv: otg-vbus-drv {
- rockchip,pins = <2 31 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PD7 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi
index 3ed49898f4b2..10ede65d90f3 100644
--- a/arch/arm/boot/dts/rk3188.dtsi
+++ b/arch/arm/boot/dts/rk3188.dtsi
@@ -315,15 +315,15 @@
emmc {
emmc_clk: emmc-clk {
- rockchip,pins = <RK_GPIO0 24 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PD0 2 &pcfg_pull_none>;
};
emmc_cmd: emmc-cmd {
- rockchip,pins = <RK_GPIO0 26 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PD2 2 &pcfg_pull_up>;
};
emmc_rst: emmc-rst {
- rockchip,pins = <RK_GPIO0 27 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PD3 2 &pcfg_pull_none>;
};
/*
@@ -336,291 +336,291 @@
emac {
emac_xfer: emac-xfer {
- rockchip,pins = <RK_GPIO3 16 RK_FUNC_2 &pcfg_pull_none>, /* tx_en */
- <RK_GPIO3 17 RK_FUNC_2 &pcfg_pull_none>, /* txd1 */
- <RK_GPIO3 18 RK_FUNC_2 &pcfg_pull_none>, /* txd0 */
- <RK_GPIO3 19 RK_FUNC_2 &pcfg_pull_none>, /* rxd0 */
- <RK_GPIO3 20 RK_FUNC_2 &pcfg_pull_none>, /* rxd1 */
- <RK_GPIO3 21 RK_FUNC_2 &pcfg_pull_none>, /* mac_clk */
- <RK_GPIO3 22 RK_FUNC_2 &pcfg_pull_none>, /* rx_err */
- <RK_GPIO3 23 RK_FUNC_2 &pcfg_pull_none>; /* crs_dvalid */
+ rockchip,pins = <3 RK_PC0 2 &pcfg_pull_none>, /* tx_en */
+ <3 RK_PC1 2 &pcfg_pull_none>, /* txd1 */
+ <3 RK_PC2 2 &pcfg_pull_none>, /* txd0 */
+ <3 RK_PC3 2 &pcfg_pull_none>, /* rxd0 */
+ <3 RK_PC4 2 &pcfg_pull_none>, /* rxd1 */
+ <3 RK_PC5 2 &pcfg_pull_none>, /* mac_clk */
+ <3 RK_PC6 2 &pcfg_pull_none>, /* rx_err */
+ <3 RK_PC7 2 &pcfg_pull_none>; /* crs_dvalid */
};
emac_mdio: emac-mdio {
- rockchip,pins = <RK_GPIO3 24 RK_FUNC_2 &pcfg_pull_none>,
- <RK_GPIO3 25 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PD0 2 &pcfg_pull_none>,
+ <3 RK_PD1 2 &pcfg_pull_none>;
};
};
i2c0 {
i2c0_xfer: i2c0-xfer {
- rockchip,pins = <RK_GPIO1 24 RK_FUNC_1 &pcfg_pull_none>,
- <RK_GPIO1 25 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PD0 1 &pcfg_pull_none>,
+ <1 RK_PD1 1 &pcfg_pull_none>;
};
};
i2c1 {
i2c1_xfer: i2c1-xfer {
- rockchip,pins = <RK_GPIO1 26 RK_FUNC_1 &pcfg_pull_none>,
- <RK_GPIO1 27 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PD2 1 &pcfg_pull_none>,
+ <1 RK_PD3 1 &pcfg_pull_none>;
};
};
i2c2 {
i2c2_xfer: i2c2-xfer {
- rockchip,pins = <RK_GPIO1 28 RK_FUNC_1 &pcfg_pull_none>,
- <RK_GPIO1 29 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PD4 1 &pcfg_pull_none>,
+ <1 RK_PD5 1 &pcfg_pull_none>;
};
};
i2c3 {
i2c3_xfer: i2c3-xfer {
- rockchip,pins = <RK_GPIO3 14 RK_FUNC_2 &pcfg_pull_none>,
- <RK_GPIO3 15 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PB6 2 &pcfg_pull_none>,
+ <3 RK_PB7 2 &pcfg_pull_none>;
};
};
i2c4 {
i2c4_xfer: i2c4-xfer {
- rockchip,pins = <RK_GPIO1 30 RK_FUNC_1 &pcfg_pull_none>,
- <RK_GPIO1 31 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PD6 1 &pcfg_pull_none>,
+ <1 RK_PD7 1 &pcfg_pull_none>;
};
};
lcdc1 {
lcdc1_dclk: lcdc1-dclk {
- rockchip,pins = <2 RK_PD0 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PD0 1 &pcfg_pull_none>;
};
lcdc1_den: lcdc1-den {
- rockchip,pins = <2 RK_PD1 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PD1 1 &pcfg_pull_none>;
};
lcdc1_hsync: lcdc1-hsync {
- rockchip,pins = <2 RK_PD2 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PD2 1 &pcfg_pull_none>;
};
lcdc1_vsync: lcdc1-vsync {
- rockchip,pins = <2 RK_PD3 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PD3 1 &pcfg_pull_none>;
};
lcdc1_rgb24: ldcd1-rgb24 {
- rockchip,pins = <2 RK_PA0 RK_FUNC_1 &pcfg_pull_none>,
- <2 RK_PA1 RK_FUNC_1 &pcfg_pull_none>,
- <2 RK_PA2 RK_FUNC_1 &pcfg_pull_none>,
- <2 RK_PA3 RK_FUNC_1 &pcfg_pull_none>,
- <2 RK_PA4 RK_FUNC_1 &pcfg_pull_none>,
- <2 RK_PA5 RK_FUNC_1 &pcfg_pull_none>,
- <2 RK_PA6 RK_FUNC_1 &pcfg_pull_none>,
- <2 RK_PA7 RK_FUNC_1 &pcfg_pull_none>,
- <2 RK_PB0 RK_FUNC_1 &pcfg_pull_none>,
- <2 RK_PB1 RK_FUNC_1 &pcfg_pull_none>,
- <2 RK_PB2 RK_FUNC_1 &pcfg_pull_none>,
- <2 RK_PB3 RK_FUNC_1 &pcfg_pull_none>,
- <2 RK_PB4 RK_FUNC_1 &pcfg_pull_none>,
- <2 RK_PB5 RK_FUNC_1 &pcfg_pull_none>,
- <2 RK_PB6 RK_FUNC_1 &pcfg_pull_none>,
- <2 RK_PB7 RK_FUNC_1 &pcfg_pull_none>,
- <2 RK_PC0 RK_FUNC_1 &pcfg_pull_none>,
- <2 RK_PC1 RK_FUNC_1 &pcfg_pull_none>,
- <2 RK_PC2 RK_FUNC_1 &pcfg_pull_none>,
- <2 RK_PC3 RK_FUNC_1 &pcfg_pull_none>,
- <2 RK_PC4 RK_FUNC_1 &pcfg_pull_none>,
- <2 RK_PC5 RK_FUNC_1 &pcfg_pull_none>,
- <2 RK_PC6 RK_FUNC_1 &pcfg_pull_none>,
- <2 RK_PC7 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PA0 1 &pcfg_pull_none>,
+ <2 RK_PA1 1 &pcfg_pull_none>,
+ <2 RK_PA2 1 &pcfg_pull_none>,
+ <2 RK_PA3 1 &pcfg_pull_none>,
+ <2 RK_PA4 1 &pcfg_pull_none>,
+ <2 RK_PA5 1 &pcfg_pull_none>,
+ <2 RK_PA6 1 &pcfg_pull_none>,
+ <2 RK_PA7 1 &pcfg_pull_none>,
+ <2 RK_PB0 1 &pcfg_pull_none>,
+ <2 RK_PB1 1 &pcfg_pull_none>,
+ <2 RK_PB2 1 &pcfg_pull_none>,
+ <2 RK_PB3 1 &pcfg_pull_none>,
+ <2 RK_PB4 1 &pcfg_pull_none>,
+ <2 RK_PB5 1 &pcfg_pull_none>,
+ <2 RK_PB6 1 &pcfg_pull_none>,
+ <2 RK_PB7 1 &pcfg_pull_none>,
+ <2 RK_PC0 1 &pcfg_pull_none>,
+ <2 RK_PC1 1 &pcfg_pull_none>,
+ <2 RK_PC2 1 &pcfg_pull_none>,
+ <2 RK_PC3 1 &pcfg_pull_none>,
+ <2 RK_PC4 1 &pcfg_pull_none>,
+ <2 RK_PC5 1 &pcfg_pull_none>,
+ <2 RK_PC6 1 &pcfg_pull_none>,
+ <2 RK_PC7 1 &pcfg_pull_none>;
};
};
pwm0 {
pwm0_out: pwm0-out {
- rockchip,pins = <RK_GPIO3 27 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PD3 1 &pcfg_pull_none>;
};
};
pwm1 {
pwm1_out: pwm1-out {
- rockchip,pins = <RK_GPIO3 28 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PD4 1 &pcfg_pull_none>;
};
};
pwm2 {
pwm2_out: pwm2-out {
- rockchip,pins = <RK_GPIO3 29 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PD5 1 &pcfg_pull_none>;
};
};
pwm3 {
pwm3_out: pwm3-out {
- rockchip,pins = <RK_GPIO3 30 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PD6 1 &pcfg_pull_none>;
};
};
spi0 {
spi0_clk: spi0-clk {
- rockchip,pins = <RK_GPIO1 6 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PA6 2 &pcfg_pull_up>;
};
spi0_cs0: spi0-cs0 {
- rockchip,pins = <RK_GPIO1 7 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PA7 2 &pcfg_pull_up>;
};
spi0_tx: spi0-tx {
- rockchip,pins = <RK_GPIO1 5 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PA5 2 &pcfg_pull_up>;
};
spi0_rx: spi0-rx {
- rockchip,pins = <RK_GPIO1 4 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PA4 2 &pcfg_pull_up>;
};
spi0_cs1: spi0-cs1 {
- rockchip,pins = <RK_GPIO1 15 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PB7 1 &pcfg_pull_up>;
};
};
spi1 {
spi1_clk: spi1-clk {
- rockchip,pins = <RK_GPIO0 30 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PD6 1 &pcfg_pull_up>;
};
spi1_cs0: spi1-cs0 {
- rockchip,pins = <RK_GPIO0 31 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PD7 1 &pcfg_pull_up>;
};
spi1_rx: spi1-rx {
- rockchip,pins = <RK_GPIO0 28 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PD4 1 &pcfg_pull_up>;
};
spi1_tx: spi1-tx {
- rockchip,pins = <RK_GPIO0 29 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PD5 1 &pcfg_pull_up>;
};
spi1_cs1: spi1-cs1 {
- rockchip,pins = <RK_GPIO1 14 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PB6 2 &pcfg_pull_up>;
};
};
uart0 {
uart0_xfer: uart0-xfer {
- rockchip,pins = <RK_GPIO1 0 RK_FUNC_1 &pcfg_pull_up>,
- <RK_GPIO1 1 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up>,
+ <1 RK_PA1 1 &pcfg_pull_none>;
};
uart0_cts: uart0-cts {
- rockchip,pins = <RK_GPIO1 2 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PA2 1 &pcfg_pull_none>;
};
uart0_rts: uart0-rts {
- rockchip,pins = <RK_GPIO1 3 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PA3 1 &pcfg_pull_none>;
};
};
uart1 {
uart1_xfer: uart1-xfer {
- rockchip,pins = <RK_GPIO1 4 RK_FUNC_1 &pcfg_pull_up>,
- <RK_GPIO1 5 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up>,
+ <1 RK_PA5 1 &pcfg_pull_none>;
};
uart1_cts: uart1-cts {
- rockchip,pins = <RK_GPIO1 6 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none>;
};
uart1_rts: uart1-rts {
- rockchip,pins = <RK_GPIO1 7 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PA7 1 &pcfg_pull_none>;
};
};
uart2 {
uart2_xfer: uart2-xfer {
- rockchip,pins = <RK_GPIO1 8 RK_FUNC_1 &pcfg_pull_up>,
- <RK_GPIO1 9 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PB0 1 &pcfg_pull_up>,
+ <1 RK_PB1 1 &pcfg_pull_none>;
};
/* no rts / cts for uart2 */
};
uart3 {
uart3_xfer: uart3-xfer {
- rockchip,pins = <RK_GPIO1 10 RK_FUNC_1 &pcfg_pull_up>,
- <RK_GPIO1 11 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PB2 1 &pcfg_pull_up>,
+ <1 RK_PB3 1 &pcfg_pull_none>;
};
uart3_cts: uart3-cts {
- rockchip,pins = <RK_GPIO1 12 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PB4 1 &pcfg_pull_none>;
};
uart3_rts: uart3-rts {
- rockchip,pins = <RK_GPIO1 13 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PB5 1 &pcfg_pull_none>;
};
};
sd0 {
sd0_clk: sd0-clk {
- rockchip,pins = <RK_GPIO3 2 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PA2 1 &pcfg_pull_none>;
};
sd0_cmd: sd0-cmd {
- rockchip,pins = <RK_GPIO3 3 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PA3 1 &pcfg_pull_none>;
};
sd0_cd: sd0-cd {
- rockchip,pins = <RK_GPIO3 8 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PB0 1 &pcfg_pull_none>;
};
sd0_wp: sd0-wp {
- rockchip,pins = <RK_GPIO3 9 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PB1 1 &pcfg_pull_none>;
};
sd0_pwr: sd0-pwr {
- rockchip,pins = <RK_GPIO3 1 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PA1 1 &pcfg_pull_none>;
};
sd0_bus1: sd0-bus-width1 {
- rockchip,pins = <RK_GPIO3 4 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PA4 1 &pcfg_pull_none>;
};
sd0_bus4: sd0-bus-width4 {
- rockchip,pins = <RK_GPIO3 4 RK_FUNC_1 &pcfg_pull_none>,
- <RK_GPIO3 5 RK_FUNC_1 &pcfg_pull_none>,
- <RK_GPIO3 6 RK_FUNC_1 &pcfg_pull_none>,
- <RK_GPIO3 7 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PA4 1 &pcfg_pull_none>,
+ <3 RK_PA5 1 &pcfg_pull_none>,
+ <3 RK_PA6 1 &pcfg_pull_none>,
+ <3 RK_PA7 1 &pcfg_pull_none>;
};
};
sd1 {
sd1_clk: sd1-clk {
- rockchip,pins = <RK_GPIO3 21 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PC5 1 &pcfg_pull_none>;
};
sd1_cmd: sd1-cmd {
- rockchip,pins = <RK_GPIO3 16 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PC0 1 &pcfg_pull_none>;
};
sd1_cd: sd1-cd {
- rockchip,pins = <RK_GPIO3 22 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PC6 1 &pcfg_pull_none>;
};
sd1_wp: sd1-wp {
- rockchip,pins = <RK_GPIO3 23 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PC7 1 &pcfg_pull_none>;
};
sd1_bus1: sd1-bus-width1 {
- rockchip,pins = <RK_GPIO3 17 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PC1 1 &pcfg_pull_none>;
};
sd1_bus4: sd1-bus-width4 {
- rockchip,pins = <RK_GPIO3 17 RK_FUNC_1 &pcfg_pull_none>,
- <RK_GPIO3 18 RK_FUNC_1 &pcfg_pull_none>,
- <RK_GPIO3 19 RK_FUNC_1 &pcfg_pull_none>,
- <RK_GPIO3 20 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PC1 1 &pcfg_pull_none>,
+ <3 RK_PC2 1 &pcfg_pull_none>,
+ <3 RK_PC3 1 &pcfg_pull_none>,
+ <3 RK_PC4 1 &pcfg_pull_none>;
};
};
i2s0 {
i2s0_bus: i2s0-bus {
- rockchip,pins = <RK_GPIO1 16 RK_FUNC_1 &pcfg_pull_none>,
- <RK_GPIO1 17 RK_FUNC_1 &pcfg_pull_none>,
- <RK_GPIO1 18 RK_FUNC_1 &pcfg_pull_none>,
- <RK_GPIO1 19 RK_FUNC_1 &pcfg_pull_none>,
- <RK_GPIO1 20 RK_FUNC_1 &pcfg_pull_none>,
- <RK_GPIO1 21 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PC0 1 &pcfg_pull_none>,
+ <1 RK_PC1 1 &pcfg_pull_none>,
+ <1 RK_PC2 1 &pcfg_pull_none>,
+ <1 RK_PC3 1 &pcfg_pull_none>,
+ <1 RK_PC4 1 &pcfg_pull_none>,
+ <1 RK_PC5 1 &pcfg_pull_none>;
};
};
spdif {
spdif_tx: spdif-tx {
- rockchip,pins = <RK_GPIO1 14 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PB6 1 &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi
index 29f19076dceb..da102fff96a2 100644
--- a/arch/arm/boot/dts/rk322x.dtsi
+++ b/arch/arm/boot/dts/rk322x.dtsi
@@ -865,228 +865,228 @@
emmc {
emmc_clk: emmc-clk {
- rockchip,pins = <2 7 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PA7 2 &pcfg_pull_none>;
};
emmc_cmd: emmc-cmd {
- rockchip,pins = <1 22 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PC6 2 &pcfg_pull_none>;
};
emmc_bus8: emmc-bus8 {
- rockchip,pins = <1 24 RK_FUNC_2 &pcfg_pull_none>,
- <1 25 RK_FUNC_2 &pcfg_pull_none>,
- <1 26 RK_FUNC_2 &pcfg_pull_none>,
- <1 27 RK_FUNC_2 &pcfg_pull_none>,
- <1 28 RK_FUNC_2 &pcfg_pull_none>,
- <1 29 RK_FUNC_2 &pcfg_pull_none>,
- <1 30 RK_FUNC_2 &pcfg_pull_none>,
- <1 31 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PD0 2 &pcfg_pull_none>,
+ <1 RK_PD1 2 &pcfg_pull_none>,
+ <1 RK_PD2 2 &pcfg_pull_none>,
+ <1 RK_PD3 2 &pcfg_pull_none>,
+ <1 RK_PD4 2 &pcfg_pull_none>,
+ <1 RK_PD5 2 &pcfg_pull_none>,
+ <1 RK_PD6 2 &pcfg_pull_none>,
+ <1 RK_PD7 2 &pcfg_pull_none>;
};
};
gmac {
rgmii_pins: rgmii-pins {
- rockchip,pins = <2 14 RK_FUNC_1 &pcfg_pull_none>,
- <2 12 RK_FUNC_1 &pcfg_pull_none>,
- <2 25 RK_FUNC_1 &pcfg_pull_none>,
- <2 19 RK_FUNC_1 &pcfg_pull_none_drv_12ma>,
- <2 18 RK_FUNC_1 &pcfg_pull_none_drv_12ma>,
- <2 22 RK_FUNC_1 &pcfg_pull_none_drv_12ma>,
- <2 23 RK_FUNC_1 &pcfg_pull_none_drv_12ma>,
- <2 9 RK_FUNC_1 &pcfg_pull_none_drv_12ma>,
- <2 13 RK_FUNC_1 &pcfg_pull_none_drv_12ma>,
- <2 17 RK_FUNC_1 &pcfg_pull_none>,
- <2 16 RK_FUNC_1 &pcfg_pull_none>,
- <2 21 RK_FUNC_2 &pcfg_pull_none>,
- <2 20 RK_FUNC_2 &pcfg_pull_none>,
- <2 11 RK_FUNC_1 &pcfg_pull_none>,
- <2 8 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PB6 1 &pcfg_pull_none>,
+ <2 RK_PB4 1 &pcfg_pull_none>,
+ <2 RK_PD1 1 &pcfg_pull_none>,
+ <2 RK_PC3 1 &pcfg_pull_none_drv_12ma>,
+ <2 RK_PC2 1 &pcfg_pull_none_drv_12ma>,
+ <2 RK_PC6 1 &pcfg_pull_none_drv_12ma>,
+ <2 RK_PC7 1 &pcfg_pull_none_drv_12ma>,
+ <2 RK_PB1 1 &pcfg_pull_none_drv_12ma>,
+ <2 RK_PB5 1 &pcfg_pull_none_drv_12ma>,
+ <2 RK_PC1 1 &pcfg_pull_none>,
+ <2 RK_PC0 1 &pcfg_pull_none>,
+ <2 RK_PC5 2 &pcfg_pull_none>,
+ <2 RK_PC4 2 &pcfg_pull_none>,
+ <2 RK_PB3 1 &pcfg_pull_none>,
+ <2 RK_PB0 1 &pcfg_pull_none>;
};
rmii_pins: rmii-pins {
- rockchip,pins = <2 14 RK_FUNC_1 &pcfg_pull_none>,
- <2 12 RK_FUNC_1 &pcfg_pull_none>,
- <2 25 RK_FUNC_1 &pcfg_pull_none>,
- <2 19 RK_FUNC_1 &pcfg_pull_none_drv_12ma>,
- <2 18 RK_FUNC_1 &pcfg_pull_none_drv_12ma>,
- <2 13 RK_FUNC_1 &pcfg_pull_none_drv_12ma>,
- <2 17 RK_FUNC_1 &pcfg_pull_none>,
- <2 16 RK_FUNC_1 &pcfg_pull_none>,
- <2 8 RK_FUNC_1 &pcfg_pull_none>,
- <2 15 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PB6 1 &pcfg_pull_none>,
+ <2 RK_PB4 1 &pcfg_pull_none>,
+ <2 RK_PD1 1 &pcfg_pull_none>,
+ <2 RK_PC3 1 &pcfg_pull_none_drv_12ma>,
+ <2 RK_PC2 1 &pcfg_pull_none_drv_12ma>,
+ <2 RK_PB5 1 &pcfg_pull_none_drv_12ma>,
+ <2 RK_PC1 1 &pcfg_pull_none>,
+ <2 RK_PC0 1 &pcfg_pull_none>,
+ <2 RK_PB0 1 &pcfg_pull_none>,
+ <2 RK_PB7 1 &pcfg_pull_none>;
};
phy_pins: phy-pins {
- rockchip,pins = <2 14 RK_FUNC_2 &pcfg_pull_none>,
- <2 8 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PB6 2 &pcfg_pull_none>,
+ <2 RK_PB0 2 &pcfg_pull_none>;
};
};
i2c0 {
i2c0_xfer: i2c0-xfer {
- rockchip,pins = <0 0 RK_FUNC_1 &pcfg_pull_none>,
- <0 1 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA0 1 &pcfg_pull_none>,
+ <0 RK_PA1 1 &pcfg_pull_none>;
};
};
i2c1 {
i2c1_xfer: i2c1-xfer {
- rockchip,pins = <0 2 RK_FUNC_1 &pcfg_pull_none>,
- <0 3 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA2 1 &pcfg_pull_none>,
+ <0 RK_PA3 1 &pcfg_pull_none>;
};
};
i2c2 {
i2c2_xfer: i2c2-xfer {
- rockchip,pins = <2 20 RK_FUNC_1 &pcfg_pull_none>,
- <2 21 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PC4 1 &pcfg_pull_none>,
+ <2 RK_PC5 1 &pcfg_pull_none>;
};
};
i2c3 {
i2c3_xfer: i2c3-xfer {
- rockchip,pins = <0 6 RK_FUNC_1 &pcfg_pull_none>,
- <0 7 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA6 1 &pcfg_pull_none>,
+ <0 RK_PA7 1 &pcfg_pull_none>;
};
};
spi-0 {
spi0_clk: spi0-clk {
- rockchip,pins = <0 9 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PB1 2 &pcfg_pull_up>;
};
spi0_cs0: spi0-cs0 {
- rockchip,pins = <0 14 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PB6 2 &pcfg_pull_up>;
};
spi0_tx: spi0-tx {
- rockchip,pins = <0 11 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PB3 2 &pcfg_pull_up>;
};
spi0_rx: spi0-rx {
- rockchip,pins = <0 13 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PB5 2 &pcfg_pull_up>;
};
spi0_cs1: spi0-cs1 {
- rockchip,pins = <1 12 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PB4 1 &pcfg_pull_up>;
};
};
spi-1 {
spi1_clk: spi1-clk {
- rockchip,pins = <0 23 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PC7 2 &pcfg_pull_up>;
};
spi1_cs0: spi1-cs0 {
- rockchip,pins = <2 2 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <2 RK_PA2 2 &pcfg_pull_up>;
};
spi1_rx: spi1-rx {
- rockchip,pins = <2 0 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <2 RK_PA0 2 &pcfg_pull_up>;
};
spi1_tx: spi1-tx {
- rockchip,pins = <2 1 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <2 RK_PA1 2 &pcfg_pull_up>;
};
spi1_cs1: spi1-cs1 {
- rockchip,pins = <2 3 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <2 RK_PA3 2 &pcfg_pull_up>;
};
};
i2s1 {
i2s1_bus: i2s1-bus {
- rockchip,pins = <0 8 RK_FUNC_1 &pcfg_pull_none>,
- <0 9 RK_FUNC_1 &pcfg_pull_none>,
- <0 11 RK_FUNC_1 &pcfg_pull_none>,
- <0 12 RK_FUNC_1 &pcfg_pull_none>,
- <0 13 RK_FUNC_1 &pcfg_pull_none>,
- <0 14 RK_FUNC_1 &pcfg_pull_none>,
- <1 2 RK_FUNC_2 &pcfg_pull_none>,
- <1 4 RK_FUNC_2 &pcfg_pull_none>,
- <1 5 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB0 1 &pcfg_pull_none>,
+ <0 RK_PB1 1 &pcfg_pull_none>,
+ <0 RK_PB3 1 &pcfg_pull_none>,
+ <0 RK_PB4 1 &pcfg_pull_none>,
+ <0 RK_PB5 1 &pcfg_pull_none>,
+ <0 RK_PB6 1 &pcfg_pull_none>,
+ <1 RK_PA2 2 &pcfg_pull_none>,
+ <1 RK_PA4 2 &pcfg_pull_none>,
+ <1 RK_PA5 2 &pcfg_pull_none>;
};
};
pwm0 {
pwm0_pin: pwm0-pin {
- rockchip,pins = <3 21 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PC5 1 &pcfg_pull_none>;
};
};
pwm1 {
pwm1_pin: pwm1-pin {
- rockchip,pins = <0 30 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PD6 2 &pcfg_pull_none>;
};
};
pwm2 {
pwm2_pin: pwm2-pin {
- rockchip,pins = <1 12 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PB4 2 &pcfg_pull_none>;
};
};
pwm3 {
pwm3_pin: pwm3-pin {
- rockchip,pins = <1 11 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PB3 2 &pcfg_pull_none>;
};
};
spdif {
spdif_tx: spdif-tx {
- rockchip,pins = <3 31 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PD7 2 &pcfg_pull_none>;
};
};
tsadc {
otp_gpio: otp-gpio {
- rockchip,pins = <0 24 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>;
};
otp_out: otp-out {
- rockchip,pins = <0 24 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PD0 2 &pcfg_pull_none>;
};
};
uart0 {
uart0_xfer: uart0-xfer {
- rockchip,pins = <2 26 RK_FUNC_1 &pcfg_pull_none>,
- <2 27 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PD2 1 &pcfg_pull_none>,
+ <2 RK_PD3 1 &pcfg_pull_none>;
};
uart0_cts: uart0-cts {
- rockchip,pins = <2 29 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PD5 1 &pcfg_pull_none>;
};
uart0_rts: uart0-rts {
- rockchip,pins = <0 17 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PC1 1 &pcfg_pull_none>;
};
};
uart1 {
uart1_xfer: uart1-xfer {
- rockchip,pins = <1 9 RK_FUNC_1 &pcfg_pull_none>,
- <1 10 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PB1 1 &pcfg_pull_none>,
+ <1 RK_PB2 1 &pcfg_pull_none>;
};
uart1_cts: uart1-cts {
- rockchip,pins = <1 8 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PB0 1 &pcfg_pull_none>;
};
uart1_rts: uart1-rts {
- rockchip,pins = <1 11 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PB3 1 &pcfg_pull_none>;
};
};
uart2 {
uart2_xfer: uart2-xfer {
- rockchip,pins = <1 18 RK_FUNC_2 &pcfg_pull_up>,
- <1 19 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PC2 2 &pcfg_pull_up>,
+ <1 RK_PC3 2 &pcfg_pull_none>;
};
uart21_xfer: uart21-xfer {
- rockchip,pins = <1 10 RK_FUNC_2 &pcfg_pull_up>,
- <1 9 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PB2 2 &pcfg_pull_up>,
+ <1 RK_PB1 2 &pcfg_pull_none>;
};
uart2_cts: uart2-cts {
- rockchip,pins = <0 25 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PD1 1 &pcfg_pull_none>;
};
uart2_rts: uart2-rts {
- rockchip,pins = <0 24 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PD0 1 &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-evb-act8846.dts b/arch/arm/boot/dts/rk3288-evb-act8846.dts
index 6592c809e2a5..80080767c365 100644
--- a/arch/arm/boot/dts/rk3288-evb-act8846.dts
+++ b/arch/arm/boot/dts/rk3288-evb-act8846.dts
@@ -175,13 +175,13 @@
&pinctrl {
lcd {
lcd_en: lcd-en {
- rockchip,pins = <7 3 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
wifi {
wifi_pwr: wifi-pwr {
- rockchip,pins = <7 9 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-evb.dtsi b/arch/arm/boot/dts/rk3288-evb.dtsi
index 97e4d552ff0f..820440715302 100644
--- a/arch/arm/boot/dts/rk3288-evb.dtsi
+++ b/arch/arm/boot/dts/rk3288-evb.dtsi
@@ -314,25 +314,25 @@
backlight {
bl_en: bl-en {
- rockchip,pins = <7 2 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
buttons {
pwrbtn: pwrbtn {
- rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
lcd {
lcd_cs: lcd-cs {
- rockchip,pins = <7 4 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
pmic {
pmic_int: pmic-int {
- rockchip,pins = <RK_GPIO0 4 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
@@ -342,34 +342,34 @@
* high-speed mode on EVB board so bump up to 8ma.
*/
sdmmc_bus4: sdmmc-bus4 {
- rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
- <6 17 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
- <6 18 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
- <6 19 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+ rockchip,pins = <6 RK_PC0 1 &pcfg_pull_up_drv_8ma>,
+ <6 RK_PC1 1 &pcfg_pull_up_drv_8ma>,
+ <6 RK_PC2 1 &pcfg_pull_up_drv_8ma>,
+ <6 RK_PC3 1 &pcfg_pull_up_drv_8ma>;
};
sdmmc_clk: sdmmc-clk {
- rockchip,pins = <6 20 RK_FUNC_1 &pcfg_pull_none_drv_8ma>;
+ rockchip,pins = <6 RK_PC4 1 &pcfg_pull_none_drv_8ma>;
};
sdmmc_cmd: sdmmc-cmd {
- rockchip,pins = <6 21 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+ rockchip,pins = <6 RK_PC5 1 &pcfg_pull_up_drv_8ma>;
};
sdmmc_pwr: sdmmc-pwr {
- rockchip,pins = <7 11 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
usb {
host_vbus_drv: host-vbus-drv {
- rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
eth_phy {
eth_phy_pwr: eth-phy-pwr {
- rockchip,pins = <0 6 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-fennec.dts b/arch/arm/boot/dts/rk3288-fennec.dts
index 29af26e6d442..4847cf902a15 100644
--- a/arch/arm/boot/dts/rk3288-fennec.dts
+++ b/arch/arm/boot/dts/rk3288-fennec.dts
@@ -278,27 +278,27 @@
gmac {
phy_int: phy-int {
- rockchip,pins = <0 9 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PB1 RK_FUNC_GPIO &pcfg_pull_up>;
};
phy_pmeb: phy-pmeb {
- rockchip,pins = <0 8 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
};
phy_rst: phy-rst {
- rockchip,pins = <4 8 RK_FUNC_GPIO &pcfg_output_high>;
+ rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_output_high>;
};
};
pmic {
pmic_int: pmic-int {
- rockchip,pins = <RK_GPIO0 4 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
usbphy {
host_drv: host-drv {
- rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-firefly-beta.dts b/arch/arm/boot/dts/rk3288-firefly-beta.dts
index 0f3c29d7fbab..135e8832141f 100644
--- a/arch/arm/boot/dts/rk3288-firefly-beta.dts
+++ b/arch/arm/boot/dts/rk3288-firefly-beta.dts
@@ -18,13 +18,13 @@
&pinctrl {
act8846 {
pmic_vsel: pmic-vsel {
- rockchip,pins = <7 1 RK_FUNC_GPIO &pcfg_output_low>;
+ rockchip,pins = <7 RK_PA1 RK_FUNC_GPIO &pcfg_output_low>;
};
};
ir {
ir_int: ir-int {
- rockchip,pins = <7 5 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <7 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-firefly-reload-core.dtsi b/arch/arm/boot/dts/rk3288-firefly-reload-core.dtsi
index f57f286a93c3..61435d8ee37b 100644
--- a/arch/arm/boot/dts/rk3288-firefly-reload-core.dtsi
+++ b/arch/arm/boot/dts/rk3288-firefly-reload-core.dtsi
@@ -224,25 +224,25 @@
act8846 {
pwr_hold: pwr-hold {
- rockchip,pins = <0 1 RK_FUNC_GPIO &pcfg_output_high>;
+ rockchip,pins = <0 RK_PA1 RK_FUNC_GPIO &pcfg_output_high>;
};
pmic_vsel: pmic-vsel {
- rockchip,pins = <7 14 RK_FUNC_GPIO &pcfg_output_low>;
+ rockchip,pins = <7 RK_PB6 RK_FUNC_GPIO &pcfg_output_low>;
};
};
gmac {
phy_int: phy-int {
- rockchip,pins = <0 9 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PB1 RK_FUNC_GPIO &pcfg_pull_up>;
};
phy_pmeb: phy-pmeb {
- rockchip,pins = <0 8 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
};
phy_rst: phy-rst {
- rockchip,pins = <4 8 RK_FUNC_GPIO &pcfg_output_high>;
+ rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_output_high>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-firefly-reload.dts b/arch/arm/boot/dts/rk3288-firefly-reload.dts
index 3a646c5f4fcf..1574383fd2dc 100644
--- a/arch/arm/boot/dts/rk3288-firefly-reload.dts
+++ b/arch/arm/boot/dts/rk3288-firefly-reload.dts
@@ -306,39 +306,39 @@
&pinctrl {
ir {
ir_int: ir-int {
- rockchip,pins = <7 0 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <7 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
dvp {
dvp_pwr: dvp-pwr {
- rockchip,pins = <0 11 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
};
cif_pwr: cif-pwr {
- rockchip,pins = <7 12 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
hym8563 {
rtc_int: rtc-int {
- rockchip,pins = <7 4 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <7 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
keys {
pwr_key: pwr-key {
- rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
leds {
power_led: power-led {
- rockchip,pins = <8 2 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <8 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
};
work_led: work-led {
- rockchip,pins = <8 1 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <8 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
@@ -348,44 +348,44 @@
* high-speed mode on firefly board so bump up to 12ma.
*/
sdmmc_bus4: sdmmc-bus4 {
- rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
- <6 17 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
- <6 18 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
- <6 19 RK_FUNC_1 &pcfg_pull_up_drv_12ma>;
+ rockchip,pins = <6 RK_PC0 1 &pcfg_pull_up_drv_12ma>,
+ <6 RK_PC1 1 &pcfg_pull_up_drv_12ma>,
+ <6 RK_PC2 1 &pcfg_pull_up_drv_12ma>,
+ <6 RK_PC3 1 &pcfg_pull_up_drv_12ma>;
};
sdmmc_clk: sdmmc-clk {
- rockchip,pins = <6 20 RK_FUNC_1 &pcfg_pull_none_12ma>;
+ rockchip,pins = <6 RK_PC4 1 &pcfg_pull_none_12ma>;
};
sdmmc_cmd: sdmmc-cmd {
- rockchip,pins = <6 21 RK_FUNC_1 &pcfg_pull_up_drv_12ma>;
+ rockchip,pins = <6 RK_PC5 1 &pcfg_pull_up_drv_12ma>;
};
sdmmc_pwr: sdmmc-pwr {
- rockchip,pins = <7 11 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
sdio {
wifi_enable: wifi-enable {
- rockchip,pins = <4 28 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <4 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
usb_host {
host_vbus_drv: host-vbus-drv {
- rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
};
usbhub_rst: usbhub-rst {
- rockchip,pins = <8 3 RK_FUNC_GPIO &pcfg_output_high>;
+ rockchip,pins = <8 RK_PA3 RK_FUNC_GPIO &pcfg_output_high>;
};
};
usb_otg {
otg_vbus_drv: otg-vbus-drv {
- rockchip,pins = <0 12 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-firefly.dts b/arch/arm/boot/dts/rk3288-firefly.dts
index 556ab42dd81c..313459dab2e4 100644
--- a/arch/arm/boot/dts/rk3288-firefly.dts
+++ b/arch/arm/boot/dts/rk3288-firefly.dts
@@ -18,13 +18,13 @@
&pinctrl {
act8846 {
pmic_vsel: pmic-vsel {
- rockchip,pins = <7 14 RK_FUNC_GPIO &pcfg_output_low>;
+ rockchip,pins = <7 RK_PB6 RK_FUNC_GPIO &pcfg_output_low>;
};
};
ir {
ir_int: ir-int {
- rockchip,pins = <7 0 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <7 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-firefly.dtsi b/arch/arm/boot/dts/rk3288-firefly.dtsi
index a6ff7eac4aa8..5e0a19004e46 100644
--- a/arch/arm/boot/dts/rk3288-firefly.dtsi
+++ b/arch/arm/boot/dts/rk3288-firefly.dtsi
@@ -392,49 +392,49 @@
act8846 {
pwr_hold: pwr-hold {
- rockchip,pins = <0 1 RK_FUNC_GPIO &pcfg_output_high>;
+ rockchip,pins = <0 RK_PA1 RK_FUNC_GPIO &pcfg_output_high>;
};
};
dvp {
dvp_pwr: dvp-pwr {
- rockchip,pins = <0 11 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
gmac {
phy_int: phy-int {
- rockchip,pins = <0 9 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PB1 RK_FUNC_GPIO &pcfg_pull_up>;
};
phy_pmeb: phy-pmeb {
- rockchip,pins = <0 8 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
};
phy_rst: phy-rst {
- rockchip,pins = <4 8 RK_FUNC_GPIO &pcfg_output_high>;
+ rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_output_high>;
};
};
hym8563 {
rtc_int: rtc-int {
- rockchip,pins = <7 4 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <7 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
keys {
pwr_key: pwr-key {
- rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
leds {
power_led: power-led {
- rockchip,pins = <8 2 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <8 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
};
work_led: work-led {
- rockchip,pins = <8 1 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <8 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
@@ -444,38 +444,38 @@
* high-speed mode on firefly board so bump up to 12ma.
*/
sdmmc_bus4: sdmmc-bus4 {
- rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
- <6 17 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
- <6 18 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
- <6 19 RK_FUNC_1 &pcfg_pull_up_drv_12ma>;
+ rockchip,pins = <6 RK_PC0 1 &pcfg_pull_up_drv_12ma>,
+ <6 RK_PC1 1 &pcfg_pull_up_drv_12ma>,
+ <6 RK_PC2 1 &pcfg_pull_up_drv_12ma>,
+ <6 RK_PC3 1 &pcfg_pull_up_drv_12ma>;
};
sdmmc_clk: sdmmc-clk {
- rockchip,pins = <6 20 RK_FUNC_1 &pcfg_pull_none_12ma>;
+ rockchip,pins = <6 RK_PC4 1 &pcfg_pull_none_12ma>;
};
sdmmc_cmd: sdmmc-cmd {
- rockchip,pins = <6 21 RK_FUNC_1 &pcfg_pull_up_drv_12ma>;
+ rockchip,pins = <6 RK_PC5 1 &pcfg_pull_up_drv_12ma>;
};
sdmmc_pwr: sdmmc-pwr {
- rockchip,pins = <7 11 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
usb_host {
host_vbus_drv: host-vbus-drv {
- rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
};
usbhub_rst: usbhub-rst {
- rockchip,pins = <8 3 RK_FUNC_GPIO &pcfg_output_high>;
+ rockchip,pins = <8 RK_PA3 RK_FUNC_GPIO &pcfg_output_high>;
};
};
usb_otg {
otg_vbus_drv: otg-vbus-drv {
- rockchip,pins = <0 12 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-miqi.dts b/arch/arm/boot/dts/rk3288-miqi.dts
index fb7365b604bb..c41d012c8850 100644
--- a/arch/arm/boot/dts/rk3288-miqi.dts
+++ b/arch/arm/boot/dts/rk3288-miqi.dts
@@ -296,29 +296,29 @@
act8846 {
pmic_int: pmic-int {
- rockchip,pins = <0 4 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up>;
};
pmic_sleep: pmic-sleep {
- rockchip,pins = <0 0 RK_FUNC_GPIO &pcfg_output_low>;
+ rockchip,pins = <0 RK_PA0 RK_FUNC_GPIO &pcfg_output_low>;
};
pmic_vsel: pmic-vsel {
- rockchip,pins = <7 1 RK_FUNC_GPIO &pcfg_output_low>;
+ rockchip,pins = <7 RK_PA1 RK_FUNC_GPIO &pcfg_output_low>;
};
};
gmac {
phy_int: phy-int {
- rockchip,pins = <0 9 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PB1 RK_FUNC_GPIO &pcfg_pull_up>;
};
phy_pmeb: phy-pmeb {
- rockchip,pins = <0 8 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
};
phy_rst: phy-rst {
- rockchip,pins = <4 8 RK_FUNC_GPIO &pcfg_output_high>;
+ rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_output_high>;
};
};
@@ -328,28 +328,28 @@
* high-speed mode on firefly board so bump up to 12ma.
*/
sdmmc_bus4: sdmmc-bus4 {
- rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
- <6 17 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
- <6 18 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
- <6 19 RK_FUNC_1 &pcfg_pull_up_drv_12ma>;
+ rockchip,pins = <6 RK_PC0 1 &pcfg_pull_up_drv_12ma>,
+ <6 RK_PC1 1 &pcfg_pull_up_drv_12ma>,
+ <6 RK_PC2 1 &pcfg_pull_up_drv_12ma>,
+ <6 RK_PC3 1 &pcfg_pull_up_drv_12ma>;
};
sdmmc_clk: sdmmc-clk {
- rockchip,pins = <6 20 RK_FUNC_1 &pcfg_pull_none_12ma>;
+ rockchip,pins = <6 RK_PC4 1 &pcfg_pull_none_12ma>;
};
sdmmc_cmd: sdmmc-cmd {
- rockchip,pins = <6 21 RK_FUNC_1 &pcfg_pull_up_drv_12ma>;
+ rockchip,pins = <6 RK_PC5 1 &pcfg_pull_up_drv_12ma>;
};
sdmmc_pwr: sdmmc-pwr {
- rockchip,pins = <7 11 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
usb_host {
host_vbus_drv: host-vbus-drv {
- rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-phycore-rdk.dts b/arch/arm/boot/dts/rk3288-phycore-rdk.dts
index 7077c3403483..1e33859de484 100644
--- a/arch/arm/boot/dts/rk3288-phycore-rdk.dts
+++ b/arch/arm/boot/dts/rk3288-phycore-rdk.dts
@@ -160,15 +160,15 @@
buttons {
user_button_pins: user-button-pins {
/* button 1 */
- rockchip,pins = <8 3 RK_FUNC_GPIO &pcfg_pull_up>,
+ rockchip,pins = <8 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>,
/* button 2 */
- <8 0 RK_FUNC_GPIO &pcfg_pull_up>;
+ <8 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
rv4162 {
i2c_rtc_int: i2c-rtc-int {
- rockchip,pins = <5 10 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <5 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
@@ -178,44 +178,44 @@
* high-speed mode on pcm-947 board so bump up to 12 mA.
*/
sdmmc_bus4: sdmmc-bus4 {
- rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
- <6 17 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
- <6 18 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
- <6 19 RK_FUNC_1 &pcfg_pull_up_drv_12ma>;
+ rockchip,pins = <6 RK_PC0 1 &pcfg_pull_up_drv_12ma>,
+ <6 RK_PC1 1 &pcfg_pull_up_drv_12ma>,
+ <6 RK_PC2 1 &pcfg_pull_up_drv_12ma>,
+ <6 RK_PC3 1 &pcfg_pull_up_drv_12ma>;
};
sdmmc_clk: sdmmc-clk {
- rockchip,pins = <6 20 RK_FUNC_1 &pcfg_pull_none_12ma>;
+ rockchip,pins = <6 RK_PC4 1 &pcfg_pull_none_12ma>;
};
sdmmc_cmd: sdmmc-cmd {
- rockchip,pins = <6 21 RK_FUNC_1 &pcfg_pull_up_drv_12ma>;
+ rockchip,pins = <6 RK_PC5 1 &pcfg_pull_up_drv_12ma>;
};
sdmmc_pwr: sdmmc-pwr {
- rockchip,pins = <7 11 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
touchscreen {
ts_irq_pin: ts-irq-pin {
- rockchip,pins = <5 15 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <5 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
usb_host {
host0_vbus_drv: host0-vbus-drv {
- rockchip,pins = <2 13 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
};
host1_vbus_drv: host1-vbus-drv {
- rockchip,pins = <2 0 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PA0 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
usb_otg {
otg_vbus_drv: otg-vbus-drv {
- rockchip,pins = <2 12 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-phycore-som.dtsi b/arch/arm/boot/dts/rk3288-phycore-som.dtsi
index c218dd54c9b5..77a47b9b756d 100644
--- a/arch/arm/boot/dts/rk3288-phycore-som.dtsi
+++ b/arch/arm/boot/dts/rk3288-phycore-som.dtsi
@@ -342,49 +342,49 @@
* We also have external pulls, so disable the internal ones.
*/
emmc_clk: emmc-clk {
- rockchip,pins = <3 18 RK_FUNC_2 &pcfg_pull_none_12ma>;
+ rockchip,pins = <3 RK_PC2 2 &pcfg_pull_none_12ma>;
};
emmc_cmd: emmc-cmd {
- rockchip,pins = <3 16 RK_FUNC_2 &pcfg_pull_none_12ma>;
+ rockchip,pins = <3 RK_PC0 2 &pcfg_pull_none_12ma>;
};
emmc_bus8: emmc-bus8 {
- rockchip,pins = <3 0 RK_FUNC_2 &pcfg_pull_none_12ma>,
- <3 1 RK_FUNC_2 &pcfg_pull_none_12ma>,
- <3 2 RK_FUNC_2 &pcfg_pull_none_12ma>,
- <3 3 RK_FUNC_2 &pcfg_pull_none_12ma>,
- <3 4 RK_FUNC_2 &pcfg_pull_none_12ma>,
- <3 5 RK_FUNC_2 &pcfg_pull_none_12ma>,
- <3 6 RK_FUNC_2 &pcfg_pull_none_12ma>,
- <3 7 RK_FUNC_2 &pcfg_pull_none_12ma>;
+ rockchip,pins = <3 RK_PA0 2 &pcfg_pull_none_12ma>,
+ <3 RK_PA1 2 &pcfg_pull_none_12ma>,
+ <3 RK_PA2 2 &pcfg_pull_none_12ma>,
+ <3 RK_PA3 2 &pcfg_pull_none_12ma>,
+ <3 RK_PA4 2 &pcfg_pull_none_12ma>,
+ <3 RK_PA5 2 &pcfg_pull_none_12ma>,
+ <3 RK_PA6 2 &pcfg_pull_none_12ma>,
+ <3 RK_PA7 2 &pcfg_pull_none_12ma>;
};
};
gmac {
phy_int: phy-int {
- rockchip,pins = <4 2 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <4 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>;
};
phy_rst: phy-rst {
- rockchip,pins = <4 8 RK_FUNC_GPIO &pcfg_output_high>;
+ rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_output_high>;
};
};
leds {
user_led: user-led {
- rockchip,pins = <7 2 RK_FUNC_GPIO &pcfg_output_high>;
+ rockchip,pins = <7 RK_PA2 RK_FUNC_GPIO &pcfg_output_high>;
};
};
pmic {
pmic_int: pmic-int {
- rockchip,pins = <RK_GPIO0 4 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up>;
};
/* Pin for switching state between sleep and non-sleep state */
pmic_sleep: pmic-sleep {
- rockchip,pins = <RK_GPIO0 0 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-r89.dts b/arch/arm/boot/dts/rk3288-r89.dts
index 28972fb4e221..a6ffc381abaa 100644
--- a/arch/arm/boot/dts/rk3288-r89.dts
+++ b/arch/arm/boot/dts/rk3288-r89.dts
@@ -265,39 +265,39 @@
act8846 {
pmic_vsel: pmic-vsel {
- rockchip,pins = <7 1 RK_FUNC_GPIO &pcfg_output_low>;
+ rockchip,pins = <7 RK_PA1 RK_FUNC_GPIO &pcfg_output_low>;
};
pwr_hold: pwr-hold {
- rockchip,pins = <0 6 RK_FUNC_GPIO &pcfg_output_high>;
+ rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_output_high>;
};
};
buttons {
pwrbtn: pwrbtn {
- rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
ir {
ir_int: ir-int {
- rockchip,pins = <7 0 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <7 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
pmic {
pmic_int: pmic-int {
- rockchip,pins = <RK_GPIO0 4 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
usb {
host_vbus_drv: host-vbus-drv {
- rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
};
otg_vbus_drv: otg-vbus-drv {
- rockchip,pins = <0 12 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-rock2-som.dtsi b/arch/arm/boot/dts/rk3288-rock2-som.dtsi
index 32e1ab336662..9f9e2bfd1295 100644
--- a/arch/arm/boot/dts/rk3288-rock2-som.dtsi
+++ b/arch/arm/boot/dts/rk3288-rock2-som.dtsi
@@ -231,13 +231,13 @@
emmc {
emmc_reset: emmc-reset {
- rockchip,pins = <3 9 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
gmac {
phy_rst: phy-rst {
- rockchip,pins = <4 8 RK_FUNC_GPIO &pcfg_output_high>;
+ rockchip,pins = <4 RK_PB0 RK_FUNC_GPIO &pcfg_output_high>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-rock2-square.dts b/arch/arm/boot/dts/rk3288-rock2-square.dts
index 5b7e1c9e92e1..cdcdc921ee09 100644
--- a/arch/arm/boot/dts/rk3288-rock2-square.dts
+++ b/arch/arm/boot/dts/rk3288-rock2-square.dts
@@ -204,53 +204,53 @@
&pinctrl {
ir {
ir_int: ir-int {
- rockchip,pins = <8 1 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <8 RK_PA1 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
keys {
pwr_key: pwr-key {
- rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
pmic {
pmic_int: pmic-int {
- rockchip,pins = <0 4 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
headphone {
hp_det: hp-det {
- rockchip,pins = <7 7 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PA7 RK_FUNC_GPIO &pcfg_pull_none>;
};
phone_ctl: phone-ctl {
- rockchip,pins = <8 0 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <8 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
usb {
host_vbus_drv: host-vbus-drv {
- rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
sata {
sata_pwr_en: sata-pwr-en {
- rockchip,pins = <0 13 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
sdmmc {
sdmmc_pwr: sdmmc-pwr {
- rockchip,pins = <7 11 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
sdio {
wifi_enable: wifi-enable {
- rockchip,pins = <4 28 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <4 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-tinker-s.dts b/arch/arm/boot/dts/rk3288-tinker-s.dts
index d97da89bcd51..970e13859198 100644
--- a/arch/arm/boot/dts/rk3288-tinker-s.dts
+++ b/arch/arm/boot/dts/rk3288-tinker-s.dts
@@ -23,3 +23,8 @@
mmc-ddr-1_8v;
status = "okay";
};
+
+&hdmi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdmi_cec_c0>;
+};
diff --git a/arch/arm/boot/dts/rk3288-tinker.dtsi b/arch/arm/boot/dts/rk3288-tinker.dtsi
index ef653c3209bc..293576869546 100644
--- a/arch/arm/boot/dts/rk3288-tinker.dtsi
+++ b/arch/arm/boot/dts/rk3288-tinker.dtsi
@@ -5,6 +5,7 @@
#include "rk3288.dtsi"
#include <dt-bindings/input/input.h>
+#include <dt-bindings/clock/rockchip,rk808.h>
/ {
chosen {
@@ -61,6 +62,16 @@
};
};
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&rk808 RK808_CLKOUT1>;
+ clock-names = "ext_clock";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_enable>;
+ reset-gpios = <&gpio4 RK_PD3 GPIO_ACTIVE_LOW>,
+ <&gpio4 RK_PD4 GPIO_ACTIVE_LOW>;
+ };
+
sound {
compatible = "simple-audio-card";
simple-audio-card,format = "i2s";
@@ -338,6 +349,7 @@
status = "okay";
sdcard-supply = <&vccio_sd>;
+ wifi-supply = <&vcc_18>;
};
&pinctrl {
@@ -352,68 +364,75 @@
backlight {
bl_en: bl-en {
- rockchip,pins = <7 2 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
buttons {
pwrbtn: pwrbtn {
- rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
eth_phy {
eth_phy_pwr: eth-phy-pwr {
- rockchip,pins = <0 6 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
pmic {
pmic_int: pmic-int {
- rockchip,pins = <RK_GPIO0 4 RK_FUNC_GPIO \
+ rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO \
&pcfg_pull_up>;
};
dvs_1: dvs-1 {
- rockchip,pins = <RK_GPIO0 11 RK_FUNC_GPIO \
+ rockchip,pins = <0 RK_PB3 RK_FUNC_GPIO \
&pcfg_pull_down>;
};
dvs_2: dvs-2 {
- rockchip,pins = <RK_GPIO0 12 RK_FUNC_GPIO \
+ rockchip,pins = <0 RK_PB4 RK_FUNC_GPIO \
&pcfg_pull_down>;
};
};
sdmmc {
sdmmc_bus4: sdmmc-bus4 {
- rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
- <6 17 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
- <6 18 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
- <6 19 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+ rockchip,pins = <6 RK_PC0 1 &pcfg_pull_up_drv_8ma>,
+ <6 RK_PC1 1 &pcfg_pull_up_drv_8ma>,
+ <6 RK_PC2 1 &pcfg_pull_up_drv_8ma>,
+ <6 RK_PC3 1 &pcfg_pull_up_drv_8ma>;
};
sdmmc_clk: sdmmc-clk {
- rockchip,pins = <6 20 RK_FUNC_1 \
+ rockchip,pins = <6 RK_PC4 1 \
&pcfg_pull_none_drv_8ma>;
};
sdmmc_cmd: sdmmc-cmd {
- rockchip,pins = <6 21 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+ rockchip,pins = <6 RK_PC5 1 &pcfg_pull_up_drv_8ma>;
};
sdmmc_pwr: sdmmc-pwr {
- rockchip,pins = <7 11 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
usb {
host_vbus_drv: host-vbus-drv {
- rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
};
pwr_3g: pwr-3g {
- rockchip,pins = <7 8 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ sdio {
+ wifi_enable: wifi-enable {
+ rockchip,pins = <4 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>,
+ <4 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
@@ -440,6 +459,24 @@
vqmmc-supply = <&vccio_sd>;
};
+&sdio0 {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ keep-power-in-suspend;
+ max-frequency = <50000000>;
+ mmc-pwrseq = <&sdio_pwrseq>;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdio0_bus4>, <&sdio0_cmd>, <&sdio0_clk>, <&sdio0_int>;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ vmmc-supply = <&vcc_io>;
+ vqmmc-supply = <&vcc_18>;
+ status = "okay";
+};
+
&tsadc {
rockchip,hw-tshut-mode = <1>; /* tshut mode 0:CRU 1:GPIO */
rockchip,hw-tshut-polarity = <1>; /* tshut polarity 0:LOW 1:HIGH */
diff --git a/arch/arm/boot/dts/rk3288-veyron-analog-audio.dtsi b/arch/arm/boot/dts/rk3288-veyron-analog-audio.dtsi
index eaf921694e68..445270aa136e 100644
--- a/arch/arm/boot/dts/rk3288-veyron-analog-audio.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron-analog-audio.dtsi
@@ -73,7 +73,7 @@
&pinctrl {
codec {
hp_det: hp-det {
- rockchip,pins = <6 5 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <6 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
};
/*
@@ -82,17 +82,17 @@
* we've got a ts3a227e chip but the driver requires it.
*/
int_codec: int-codec {
- rockchip,pins = <6 7 RK_FUNC_GPIO &pcfg_pull_down>;
+ rockchip,pins = <6 RK_PA7 RK_FUNC_GPIO &pcfg_pull_down>;
};
mic_det: mic-det {
- rockchip,pins = <6 11 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <6 RK_PB3 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
headset {
ts3a227e_int_l: ts3a227e-int-l {
- rockchip,pins = <0 3 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-veyron-brain.dts b/arch/arm/boot/dts/rk3288-veyron-brain.dts
index 5c94a33d695d..406146cbff29 100644
--- a/arch/arm/boot/dts/rk3288-veyron-brain.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-brain.dts
@@ -42,23 +42,23 @@
&pinctrl {
hdmi {
vcc50_hdmi_en: vcc50-hdmi-en {
- rockchip,pins = <7 2 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
pmic {
dvs_1: dvs-1 {
- rockchip,pins = <7 11 RK_FUNC_GPIO &pcfg_pull_down>;
+ rockchip,pins = <7 RK_PB3 RK_FUNC_GPIO &pcfg_pull_down>;
};
dvs_2: dvs-2 {
- rockchip,pins = <7 15 RK_FUNC_GPIO &pcfg_pull_down>;
+ rockchip,pins = <7 RK_PB7 RK_FUNC_GPIO &pcfg_pull_down>;
};
};
usb-host {
usb2_pwr_en: usb2-pwr-en {
- rockchip,pins = <0 12 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-veyron-chromebook.dtsi b/arch/arm/boot/dts/rk3288-veyron-chromebook.dtsi
index b54746df3661..fbef34578100 100644
--- a/arch/arm/boot/dts/rk3288-veyron-chromebook.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron-chromebook.dtsi
@@ -176,8 +176,7 @@
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <3300000>;
+ regulator-off-in-suspend;
};
};
};
@@ -229,6 +228,8 @@
&pinctrl {
pinctrl-0 = <
/* Common for sleep and wake, but no owners */
+ &ddr0_retention
+ &ddrio_pwroff
&global_pwroff
/* Wake only */
@@ -236,6 +237,8 @@
>;
pinctrl-1 = <
/* Common for sleep and wake, but no owners */
+ &ddr0_retention
+ &ddrio_pwroff
&global_pwroff
/* Sleep only */
@@ -244,51 +247,51 @@
backlight {
bl_en: bl-en {
- rockchip,pins = <7 2 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
buttons {
ap_lid_int_l: ap-lid-int-l {
- rockchip,pins = <0 6 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
charger {
ac_present_ap: ac-present-ap {
- rockchip,pins = <0 8 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
cros-ec {
ec_int: ec-int {
- rockchip,pins = <7 7 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PA7 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
suspend {
suspend_l_wake: suspend-l-wake {
- rockchip,pins = <0 17 RK_FUNC_GPIO &pcfg_output_low>;
+ rockchip,pins = <0 RK_PC1 RK_FUNC_GPIO &pcfg_output_low>;
};
suspend_l_sleep: suspend-l-sleep {
- rockchip,pins = <0 17 RK_FUNC_GPIO &pcfg_output_high>;
+ rockchip,pins = <0 RK_PC1 RK_FUNC_GPIO &pcfg_output_high>;
};
};
trackpad {
trackpad_int: trackpad-int {
- rockchip,pins = <7 3 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <7 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
usb-host {
host1_pwr_en: host1-pwr-en {
- rockchip,pins = <0 11 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
};
usbotg_pwren_h: usbotg-pwren-h {
- rockchip,pins = <0 12 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-veyron-jaq.dts b/arch/arm/boot/dts/rk3288-veyron-jaq.dts
index 9d6814c7f285..e248f55ee8d2 100644
--- a/arch/arm/boot/dts/rk3288-veyron-jaq.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-jaq.dts
@@ -138,39 +138,39 @@
&pinctrl {
backlight {
bl_pwr_en: bl_pwr_en {
- rockchip,pins = <2 12 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
buck-5v {
drv_5v: drv-5v {
- rockchip,pins = <7 21 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
hdmi {
vcc50_hdmi_en: vcc50-hdmi-en {
- rockchip,pins = <5 19 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <5 RK_PC3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
lcd {
lcd_enable_h: lcd-en {
- rockchip,pins = <7 14 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
};
avdd_1v8_disp_en: avdd-1v8-disp-en {
- rockchip,pins = <2 13 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
pmic {
dvs_1: dvs-1 {
- rockchip,pins = <7 12 RK_FUNC_GPIO &pcfg_pull_down>;
+ rockchip,pins = <7 RK_PB4 RK_FUNC_GPIO &pcfg_pull_down>;
};
dvs_2: dvs-2 {
- rockchip,pins = <7 15 RK_FUNC_GPIO &pcfg_pull_down>;
+ rockchip,pins = <7 RK_PB7 RK_FUNC_GPIO &pcfg_pull_down>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-veyron-jerry.dts b/arch/arm/boot/dts/rk3288-veyron-jerry.dts
index 2ba89895c33a..b1613af83d5d 100644
--- a/arch/arm/boot/dts/rk3288-veyron-jerry.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-jerry.dts
@@ -11,7 +11,10 @@
/ {
model = "Google Jerry";
- compatible = "google,veyron-jerry-rev7", "google,veyron-jerry-rev6",
+ compatible = "google,veyron-jerry-rev15", "google,veyron-jerry-rev14",
+ "google,veyron-jerry-rev13", "google,veyron-jerry-rev12",
+ "google,veyron-jerry-rev11", "google,veyron-jerry-rev10",
+ "google,veyron-jerry-rev7", "google,veyron-jerry-rev6",
"google,veyron-jerry-rev5", "google,veyron-jerry-rev4",
"google,veyron-jerry-rev3", "google,veyron-jerry",
"google,veyron", "rockchip,rk3288";
@@ -61,7 +64,9 @@
&rk808 {
pinctrl-names = "default";
- pinctrl-0 = <&pmic_int_l>;
+ pinctrl-0 = <&pmic_int_l &dvs_1 &dvs_2>;
+ dvs-gpios = <&gpio7 RK_PB4 GPIO_ACTIVE_HIGH>,
+ <&gpio7 RK_PB7 GPIO_ACTIVE_HIGH>;
regulators {
mic_vcc: LDO_REG2 {
@@ -101,39 +106,39 @@
&pinctrl {
backlight {
bl_pwr_en: bl_pwr_en {
- rockchip,pins = <2 12 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
buck-5v {
drv_5v: drv-5v {
- rockchip,pins = <7 21 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
hdmi {
vcc50_hdmi_en: vcc50-hdmi-en {
- rockchip,pins = <5 19 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <5 RK_PC3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
lcd {
lcd_enable_h: lcd-en {
- rockchip,pins = <7 14 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
};
avdd_1v8_disp_en: avdd-1v8-disp-en {
- rockchip,pins = <2 13 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
pmic {
dvs_1: dvs-1 {
- rockchip,pins = <7 12 RK_FUNC_GPIO &pcfg_pull_down>;
+ rockchip,pins = <7 RK_PB4 RK_FUNC_GPIO &pcfg_pull_down>;
};
dvs_2: dvs-2 {
- rockchip,pins = <7 15 RK_FUNC_GPIO &pcfg_pull_down>;
+ rockchip,pins = <7 RK_PB7 RK_FUNC_GPIO &pcfg_pull_down>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-veyron-mickey.dts b/arch/arm/boot/dts/rk3288-veyron-mickey.dts
index d889ab3c8235..e852594417b5 100644
--- a/arch/arm/boot/dts/rk3288-veyron-mickey.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-mickey.dts
@@ -186,17 +186,17 @@
&pinctrl {
hdmi {
power_hdmi_on: power-hdmi-on {
- rockchip,pins = <7 11 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
pmic {
dvs_1: dvs-1 {
- rockchip,pins = <7 12 RK_FUNC_GPIO &pcfg_pull_down>;
+ rockchip,pins = <7 RK_PB4 RK_FUNC_GPIO &pcfg_pull_down>;
};
dvs_2: dvs-2 {
- rockchip,pins = <7 15 RK_FUNC_GPIO &pcfg_pull_down>;
+ rockchip,pins = <7 RK_PB7 RK_FUNC_GPIO &pcfg_pull_down>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-veyron-mighty.dts b/arch/arm/boot/dts/rk3288-veyron-mighty.dts
new file mode 100644
index 000000000000..27fbc07476d2
--- /dev/null
+++ b/arch/arm/boot/dts/rk3288-veyron-mighty.dts
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Google Veyron Mighty Rev 1+ board device tree source
+ *
+ * Copyright 2015 Google, Inc
+ */
+
+/dts-v1/;
+
+#include "rk3288-veyron-jaq.dts"
+
+/ {
+ model = "Google Mighty";
+ compatible = "google,veyron-mighty-rev5", "google,veyron-mighty-rev4",
+ "google,veyron-mighty-rev3", "google,veyron-mighty-rev2",
+ "google,veyron-mighty-rev1", "google,veyron-mighty",
+ "google,veyron", "rockchip,rk3288";
+};
+
+&sdmmc {
+ pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd_disabled &sdmmc_cd_gpio
+ &sdmmc_wp_gpio &sdmmc_bus4>;
+ wp-gpios = <&gpio7 10 GPIO_ACTIVE_HIGH>;
+
+ /delete-property/ disable-wp;
+};
+
+&pinctrl {
+ sdmmc {
+ sdmmc_wp_gpio: sdmmc-wp-gpio {
+ rockchip,pins = <7 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/rk3288-veyron-minnie.dts b/arch/arm/boot/dts/rk3288-veyron-minnie.dts
index f95d0c5fcf71..468a1818545d 100644
--- a/arch/arm/boot/dts/rk3288-veyron-minnie.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-minnie.dts
@@ -191,65 +191,65 @@
&pinctrl {
backlight {
bl_pwr_en: bl_pwr_en {
- rockchip,pins = <2 12 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
buck-5v {
drv_5v: drv-5v {
- rockchip,pins = <7 21 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
buttons {
volum_down_l: volum-down-l {
- rockchip,pins = <5 11 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <5 RK_PB3 RK_FUNC_GPIO &pcfg_pull_up>;
};
volum_up_l: volum-up-l {
- rockchip,pins = <5 10 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <5 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
hdmi {
vcc50_hdmi_en: vcc50-hdmi-en {
- rockchip,pins = <5 19 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <5 RK_PC3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
lcd {
lcd_enable_h: lcd-en {
- rockchip,pins = <7 14 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
};
avdd_1v8_disp_en: avdd-1v8-disp-en {
- rockchip,pins = <2 13 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
pmic {
dvs_1: dvs-1 {
- rockchip,pins = <7 12 RK_FUNC_GPIO &pcfg_pull_down>;
+ rockchip,pins = <7 RK_PB4 RK_FUNC_GPIO &pcfg_pull_down>;
};
dvs_2: dvs-2 {
- rockchip,pins = <7 15 RK_FUNC_GPIO &pcfg_pull_down>;
+ rockchip,pins = <7 RK_PB7 RK_FUNC_GPIO &pcfg_pull_down>;
};
};
prochot {
gpio_prochot: gpio-prochot {
- rockchip,pins = <2 8 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
touchscreen {
touch_int: touch-int {
- rockchip,pins = <2 14 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
};
touch_rst: touch-rst {
- rockchip,pins = <2 15 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-veyron-pinky.dts b/arch/arm/boot/dts/rk3288-veyron-pinky.dts
index 2950aadf49f0..9645be7b3d8c 100644
--- a/arch/arm/boot/dts/rk3288-veyron-pinky.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-pinky.dts
@@ -55,19 +55,19 @@
&pinctrl {
buttons {
pwr_key_h: pwr-key-h {
- rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
emmc {
emmc_reset: emmc-reset {
- rockchip,pins = <7 12 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <7 RK_PB4 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
sdmmc {
sdmmc_wp_gpio: sdmmc-wp-gpio {
- rockchip,pins = <7 10 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <7 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-veyron-sdmmc.dtsi b/arch/arm/boot/dts/rk3288-veyron-sdmmc.dtsi
index a4570444cc79..fe950f9863e8 100644
--- a/arch/arm/boot/dts/rk3288-veyron-sdmmc.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron-sdmmc.dtsi
@@ -16,18 +16,18 @@
* We also have external pulls, so disable the internal ones.
*/
sdmmc_bus4: sdmmc-bus4 {
- rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_none_drv_8ma>,
- <6 17 RK_FUNC_1 &pcfg_pull_none_drv_8ma>,
- <6 18 RK_FUNC_1 &pcfg_pull_none_drv_8ma>,
- <6 19 RK_FUNC_1 &pcfg_pull_none_drv_8ma>;
+ rockchip,pins = <6 RK_PC0 1 &pcfg_pull_none_drv_8ma>,
+ <6 RK_PC1 1 &pcfg_pull_none_drv_8ma>,
+ <6 RK_PC2 1 &pcfg_pull_none_drv_8ma>,
+ <6 RK_PC3 1 &pcfg_pull_none_drv_8ma>;
};
sdmmc_clk: sdmmc-clk {
- rockchip,pins = <6 20 RK_FUNC_1 &pcfg_pull_none_drv_8ma>;
+ rockchip,pins = <6 RK_PC4 1 &pcfg_pull_none_drv_8ma>;
};
sdmmc_cmd: sdmmc-cmd {
- rockchip,pins = <6 21 RK_FUNC_1 &pcfg_pull_none_drv_8ma>;
+ rockchip,pins = <6 RK_PC5 1 &pcfg_pull_none_drv_8ma>;
};
/*
@@ -37,12 +37,12 @@
* think there's a card inserted
*/
sdmmc_cd_disabled: sdmmc-cd-disabled {
- rockchip,pins = <6 22 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <6 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>;
};
/* This is where we actually hook up CD */
sdmmc_cd_gpio: sdmmc-cd-gpio {
- rockchip,pins = <7 5 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-veyron-speedy.dts b/arch/arm/boot/dts/rk3288-veyron-speedy.dts
index e16421d80d22..2ac8748a3a0c 100644
--- a/arch/arm/boot/dts/rk3288-veyron-speedy.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-speedy.dts
@@ -104,39 +104,39 @@
&pinctrl {
backlight {
bl_pwr_en: bl_pwr_en {
- rockchip,pins = <2 12 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
buck-5v {
drv_5v: drv-5v {
- rockchip,pins = <7 21 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
hdmi {
vcc50_hdmi_en: vcc50-hdmi-en {
- rockchip,pins = <5 19 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <5 RK_PC3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
lcd {
lcd_enable_h: lcd-en {
- rockchip,pins = <7 14 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>;
};
avdd_1v8_disp_en: avdd-1v8-disp-en {
- rockchip,pins = <2 13 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
pmic {
dvs_1: dvs-1 {
- rockchip,pins = <7 12 RK_FUNC_GPIO &pcfg_pull_down>;
+ rockchip,pins = <7 RK_PB4 RK_FUNC_GPIO &pcfg_pull_down>;
};
dvs_2: dvs-2 {
- rockchip,pins = <7 15 RK_FUNC_GPIO &pcfg_pull_down>;
+ rockchip,pins = <7 RK_PB7 RK_FUNC_GPIO &pcfg_pull_down>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi
index 192dbc089ade..1252522392c7 100644
--- a/arch/arm/boot/dts/rk3288-veyron.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron.dtsi
@@ -60,12 +60,19 @@
pinctrl-0 = <&bt_enable_l>, <&wifi_enable_h>;
/*
- * On the module itself this is one of these (depending
- * on the actual card populated):
+ * Depending on the actual card populated GPIO4 D4 and D5
+ * correspond to one of these signals on the module:
+ *
+ * D4:
* - SDIO_RESET_L_WL_REG_ON
* - PDN (power down when low)
+ *
+ * D5:
+ * - BT_I2S_WS_BT_RFDISABLE_L
+ * - No connect
*/
- reset-gpios = <&gpio4 RK_PD4 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&gpio4 RK_PD4 GPIO_ACTIVE_LOW>,
+ <&gpio4 RK_PD5 GPIO_ACTIVE_LOW>;
};
vcc_5v: vcc-5v {
@@ -93,6 +100,23 @@
regulator-boot-on;
vin-supply = <&vcc_5v>;
};
+
+ vdd_logic: vdd-logic {
+ compatible = "pwm-regulator";
+ regulator-name = "vdd_logic";
+
+ pwms = <&pwm1 0 1994 0>;
+ pwm-supply = <&vcc33_sys>;
+
+ pwm-dutycycle-range = <0x7b 0>;
+ pwm-dutycycle-unit = <0x94>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <4000>;
+ };
};
&cpu0 {
@@ -193,8 +217,7 @@
regulator-max-microvolt = <1250000>;
regulator-ramp-delay = <6001>;
regulator-state-mem {
- regulator-on-in-suspend;
- regulator-suspend-microvolt = <1000000>;
+ regulator-off-in-suspend;
};
};
@@ -376,10 +399,6 @@
&uart0 {
status = "okay";
- /* We need to go faster than 24MHz, so adjust clock parents / rates */
- assigned-clocks = <&cru SCLK_UART0>;
- assigned-clock-rates = <48000000>;
-
/* Pins don't include flow control by default; add that in */
pinctrl-names = "default";
pinctrl-0 = <&uart0_xfer &uart0_cts &uart0_rts>;
@@ -431,10 +450,14 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <
/* Common for sleep and wake, but no owners */
+ &ddr0_retention
+ &ddrio_pwroff
&global_pwroff
>;
pinctrl-1 = <
/* Common for sleep and wake, but no owners */
+ &ddr0_retention
+ &ddrio_pwroff
&global_pwroff
>;
@@ -458,13 +481,13 @@
buttons {
pwr_key_l: pwr-key-l {
- rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
emmc {
emmc_reset: emmc-reset {
- rockchip,pins = <2 9 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>;
};
/*
@@ -472,51 +495,51 @@
* We also have external pulls, so disable the internal ones.
*/
emmc_clk: emmc-clk {
- rockchip,pins = <3 18 RK_FUNC_2 &pcfg_pull_none_drv_8ma>;
+ rockchip,pins = <3 RK_PC2 2 &pcfg_pull_none_drv_8ma>;
};
emmc_cmd: emmc-cmd {
- rockchip,pins = <3 16 RK_FUNC_2 &pcfg_pull_none_drv_8ma>;
+ rockchip,pins = <3 RK_PC0 2 &pcfg_pull_none_drv_8ma>;
};
emmc_bus8: emmc-bus8 {
- rockchip,pins = <3 0 RK_FUNC_2 &pcfg_pull_none_drv_8ma>,
- <3 1 RK_FUNC_2 &pcfg_pull_none_drv_8ma>,
- <3 2 RK_FUNC_2 &pcfg_pull_none_drv_8ma>,
- <3 3 RK_FUNC_2 &pcfg_pull_none_drv_8ma>,
- <3 4 RK_FUNC_2 &pcfg_pull_none_drv_8ma>,
- <3 5 RK_FUNC_2 &pcfg_pull_none_drv_8ma>,
- <3 6 RK_FUNC_2 &pcfg_pull_none_drv_8ma>,
- <3 7 RK_FUNC_2 &pcfg_pull_none_drv_8ma>;
+ rockchip,pins = <3 RK_PA0 2 &pcfg_pull_none_drv_8ma>,
+ <3 RK_PA1 2 &pcfg_pull_none_drv_8ma>,
+ <3 RK_PA2 2 &pcfg_pull_none_drv_8ma>,
+ <3 RK_PA3 2 &pcfg_pull_none_drv_8ma>,
+ <3 RK_PA4 2 &pcfg_pull_none_drv_8ma>,
+ <3 RK_PA5 2 &pcfg_pull_none_drv_8ma>,
+ <3 RK_PA6 2 &pcfg_pull_none_drv_8ma>,
+ <3 RK_PA7 2 &pcfg_pull_none_drv_8ma>;
};
};
pmic {
pmic_int_l: pmic-int-l {
- rockchip,pins = <RK_GPIO0 4 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
reboot {
ap_warm_reset_h: ap-warm-reset-h {
- rockchip,pins = <RK_GPIO0 13 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
recovery-switch {
rec_mode_l: rec-mode-l {
- rockchip,pins = <0 9 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PB1 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
sdio0 {
wifi_enable_h: wifienable-h {
- rockchip,pins = <4 28 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <4 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
};
/* NOTE: mislabelled on schematic; should be bt_enable_h */
bt_enable_l: bt-enable-l {
- rockchip,pins = <4 29 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <4 RK_PD5 RK_FUNC_GPIO &pcfg_pull_none>;
};
/*
@@ -524,30 +547,30 @@
* We also have external pulls, so disable the internal ones.
*/
sdio0_bus4: sdio0-bus4 {
- rockchip,pins = <4 20 RK_FUNC_1 &pcfg_pull_none_drv_8ma>,
- <4 21 RK_FUNC_1 &pcfg_pull_none_drv_8ma>,
- <4 22 RK_FUNC_1 &pcfg_pull_none_drv_8ma>,
- <4 23 RK_FUNC_1 &pcfg_pull_none_drv_8ma>;
+ rockchip,pins = <4 RK_PC4 1 &pcfg_pull_none_drv_8ma>,
+ <4 RK_PC5 1 &pcfg_pull_none_drv_8ma>,
+ <4 RK_PC6 1 &pcfg_pull_none_drv_8ma>,
+ <4 RK_PC7 1 &pcfg_pull_none_drv_8ma>;
};
sdio0_cmd: sdio0-cmd {
- rockchip,pins = <4 24 RK_FUNC_1 &pcfg_pull_none_drv_8ma>;
+ rockchip,pins = <4 RK_PD0 1 &pcfg_pull_none_drv_8ma>;
};
sdio0_clk: sdio0-clk {
- rockchip,pins = <4 25 RK_FUNC_1 &pcfg_pull_none_drv_8ma>;
+ rockchip,pins = <4 RK_PD1 1 &pcfg_pull_none_drv_8ma>;
};
};
tpm {
tpm_int_h: tpm-int-h {
- rockchip,pins = <7 4 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
write-protect {
fw_wp_ap: fw-wp-ap {
- rockchip,pins = <7 6 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm/boot/dts/rk3288-vyasa.dts b/arch/arm/boot/dts/rk3288-vyasa.dts
index 40b232eb5011..ba06e9f97ddc 100644
--- a/arch/arm/boot/dts/rk3288-vyasa.dts
+++ b/arch/arm/boot/dts/rk3288-vyasa.dts
@@ -448,13 +448,13 @@
pmic {
pmic_int: pmic-int {
- rockchip,pins = <RK_GPIO0 4 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
usb_host {
phy_pwr_en: phy-pwr-en {
- rockchip,pins = <RK_GPIO2 RK_PB1 RK_FUNC_GPIO &pcfg_output_high>;
+ rockchip,pins = <2 RK_PB1 RK_FUNC_GPIO &pcfg_output_high>;
};
usb2_pwr_en: usb2-pwr-en {
@@ -464,7 +464,7 @@
usb_otg {
otg_vbus_drv: otg-vbus-drv {
- rockchip,pins = <RK_GPIO0 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index a024d1e7e74c..aa017abf4f42 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -64,6 +64,7 @@
#cooling-cells = <2>; /* min followed by max */
clock-latency = <40000>;
clocks = <&cru ARMCLK>;
+ dynamic-power-coefficient = <370>;
};
cpu1: cpu@501 {
device_type = "cpu";
@@ -74,6 +75,7 @@
#cooling-cells = <2>; /* min followed by max */
clock-latency = <40000>;
clocks = <&cru ARMCLK>;
+ dynamic-power-coefficient = <370>;
};
cpu2: cpu@502 {
device_type = "cpu";
@@ -84,6 +86,7 @@
#cooling-cells = <2>; /* min followed by max */
clock-latency = <40000>;
clocks = <&cru ARMCLK>;
+ dynamic-power-coefficient = <370>;
};
cpu3: cpu@503 {
device_type = "cpu";
@@ -94,6 +97,7 @@
#cooling-cells = <2>; /* min followed by max */
clock-latency = <40000>;
clocks = <&cru ARMCLK>;
+ dynamic-power-coefficient = <370>;
};
};
@@ -569,6 +573,7 @@
pinctrl-1 = <&otp_out>;
pinctrl-2 = <&otp_gpio>;
#thermal-sensor-cells = <1>;
+ rockchip,grf = <&grf>;
rockchip,hw-tshut-temp = <95000>;
status = "disabled";
};
@@ -616,6 +621,7 @@
dr_mode = "host";
phys = <&usbphy2>;
phy-names = "usb2-phy";
+ snps,reset-phy-on-wake;
status = "disabled";
};
@@ -904,6 +910,8 @@
clocks = <&cru SCLK_OTGPHY0>;
clock-names = "phyclk";
#clock-cells = <0>;
+ resets = <&cru SRST_USBOTG_PHY>;
+ reset-names = "phy-reset";
};
usbphy1: usb-phy@334 {
@@ -912,6 +920,8 @@
clocks = <&cru SCLK_OTGPHY1>;
clock-names = "phyclk";
#clock-cells = <0>;
+ resets = <&cru SRST_USBHOST0_PHY>;
+ reset-names = "phy-reset";
};
usbphy2: usb-phy@348 {
@@ -920,6 +930,8 @@
clocks = <&cru SCLK_OTGPHY2>;
clock-names = "phyclk";
#clock-cells = <0>;
+ resets = <&cru SRST_USBHOST1_PHY>;
+ reset-names = "phy-reset";
};
};
};
@@ -1376,19 +1388,6 @@
reg = <0x0 0xffaf0080 0x0 0x20>;
};
- gic: interrupt-controller@ffc01000 {
- compatible = "arm,gic-400";
- interrupt-controller;
- #interrupt-cells = <3>;
- #address-cells = <0>;
-
- reg = <0x0 0xffc01000 0x0 0x1000>,
- <0x0 0xffc02000 0x0 0x2000>,
- <0x0 0xffc04000 0x0 0x2000>,
- <0x0 0xffc06000 0x0 0x2000>;
- interrupts = <GIC_PPI 9 0xf04>;
- };
-
efuse: efuse@ffb40000 {
compatible = "rockchip,rk3288-efuse";
reg = <0x0 0xffb40000 0x0 0x20>;
@@ -1402,6 +1401,19 @@
};
};
+ gic: interrupt-controller@ffc01000 {
+ compatible = "arm,gic-400";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+
+ reg = <0x0 0xffc01000 0x0 0x1000>,
+ <0x0 0xffc02000 0x0 0x2000>,
+ <0x0 0xffc04000 0x0 0x2000>,
+ <0x0 0xffc06000 0x0 0x2000>;
+ interrupts = <GIC_PPI 9 0xf04>;
+ };
+
pinctrl: pinctrl {
compatible = "rockchip,rk3288-pinctrl";
rockchip,grf = <&grf>;
@@ -1529,16 +1541,16 @@
hdmi {
hdmi_cec_c0: hdmi-cec-c0 {
- rockchip,pins = <7 RK_PC0 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PC0 2 &pcfg_pull_none>;
};
hdmi_cec_c7: hdmi-cec-c7 {
- rockchip,pins = <7 RK_PC7 RK_FUNC_4 &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PC7 4 &pcfg_pull_none>;
};
hdmi_ddc: hdmi-ddc {
- rockchip,pins = <7 19 RK_FUNC_2 &pcfg_pull_none>,
- <7 20 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PC3 2 &pcfg_pull_none>,
+ <7 RK_PC4 2 &pcfg_pull_none>;
};
};
@@ -1561,421 +1573,421 @@
sleep {
global_pwroff: global-pwroff {
- rockchip,pins = <0 0 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA0 1 &pcfg_pull_none>;
};
ddrio_pwroff: ddrio-pwroff {
- rockchip,pins = <0 1 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA1 1 &pcfg_pull_none>;
};
ddr0_retention: ddr0-retention {
- rockchip,pins = <0 2 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA2 1 &pcfg_pull_up>;
};
ddr1_retention: ddr1-retention {
- rockchip,pins = <0 3 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA3 1 &pcfg_pull_up>;
};
};
edp {
edp_hpd: edp-hpd {
- rockchip,pins = <7 11 RK_FUNC_2 &pcfg_pull_down>;
+ rockchip,pins = <7 RK_PB3 2 &pcfg_pull_down>;
};
};
i2c0 {
i2c0_xfer: i2c0-xfer {
- rockchip,pins = <0 15 RK_FUNC_1 &pcfg_pull_none>,
- <0 16 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB7 1 &pcfg_pull_none>,
+ <0 RK_PC0 1 &pcfg_pull_none>;
};
};
i2c1 {
i2c1_xfer: i2c1-xfer {
- rockchip,pins = <8 4 RK_FUNC_1 &pcfg_pull_none>,
- <8 5 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <8 RK_PA4 1 &pcfg_pull_none>,
+ <8 RK_PA5 1 &pcfg_pull_none>;
};
};
i2c2 {
i2c2_xfer: i2c2-xfer {
- rockchip,pins = <6 9 RK_FUNC_1 &pcfg_pull_none>,
- <6 10 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <6 RK_PB1 1 &pcfg_pull_none>,
+ <6 RK_PB2 1 &pcfg_pull_none>;
};
};
i2c3 {
i2c3_xfer: i2c3-xfer {
- rockchip,pins = <2 16 RK_FUNC_1 &pcfg_pull_none>,
- <2 17 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PC0 1 &pcfg_pull_none>,
+ <2 RK_PC1 1 &pcfg_pull_none>;
};
};
i2c4 {
i2c4_xfer: i2c4-xfer {
- rockchip,pins = <7 17 RK_FUNC_1 &pcfg_pull_none>,
- <7 18 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PC1 1 &pcfg_pull_none>,
+ <7 RK_PC2 1 &pcfg_pull_none>;
};
};
i2c5 {
i2c5_xfer: i2c5-xfer {
- rockchip,pins = <7 19 RK_FUNC_1 &pcfg_pull_none>,
- <7 20 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PC3 1 &pcfg_pull_none>,
+ <7 RK_PC4 1 &pcfg_pull_none>;
};
};
i2s0 {
i2s0_bus: i2s0-bus {
- rockchip,pins = <6 0 RK_FUNC_1 &pcfg_pull_none>,
- <6 1 RK_FUNC_1 &pcfg_pull_none>,
- <6 2 RK_FUNC_1 &pcfg_pull_none>,
- <6 3 RK_FUNC_1 &pcfg_pull_none>,
- <6 4 RK_FUNC_1 &pcfg_pull_none>,
- <6 8 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <6 RK_PA0 1 &pcfg_pull_none>,
+ <6 RK_PA1 1 &pcfg_pull_none>,
+ <6 RK_PA2 1 &pcfg_pull_none>,
+ <6 RK_PA3 1 &pcfg_pull_none>,
+ <6 RK_PA4 1 &pcfg_pull_none>,
+ <6 RK_PB0 1 &pcfg_pull_none>;
};
};
lcdc {
lcdc_ctl: lcdc-ctl {
- rockchip,pins = <1 24 RK_FUNC_1 &pcfg_pull_none>,
- <1 25 RK_FUNC_1 &pcfg_pull_none>,
- <1 26 RK_FUNC_1 &pcfg_pull_none>,
- <1 27 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PD0 1 &pcfg_pull_none>,
+ <1 RK_PD1 1 &pcfg_pull_none>,
+ <1 RK_PD2 1 &pcfg_pull_none>,
+ <1 RK_PD3 1 &pcfg_pull_none>;
};
};
sdmmc {
sdmmc_clk: sdmmc-clk {
- rockchip,pins = <6 20 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <6 RK_PC4 1 &pcfg_pull_none>;
};
sdmmc_cmd: sdmmc-cmd {
- rockchip,pins = <6 21 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <6 RK_PC5 1 &pcfg_pull_up>;
};
sdmmc_cd: sdmmc-cd {
- rockchip,pins = <6 22 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <6 RK_PC6 1 &pcfg_pull_up>;
};
sdmmc_bus1: sdmmc-bus1 {
- rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <6 RK_PC0 1 &pcfg_pull_up>;
};
sdmmc_bus4: sdmmc-bus4 {
- rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_up>,
- <6 17 RK_FUNC_1 &pcfg_pull_up>,
- <6 18 RK_FUNC_1 &pcfg_pull_up>,
- <6 19 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <6 RK_PC0 1 &pcfg_pull_up>,
+ <6 RK_PC1 1 &pcfg_pull_up>,
+ <6 RK_PC2 1 &pcfg_pull_up>,
+ <6 RK_PC3 1 &pcfg_pull_up>;
};
};
sdio0 {
sdio0_bus1: sdio0-bus1 {
- rockchip,pins = <4 20 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <4 RK_PC4 1 &pcfg_pull_up>;
};
sdio0_bus4: sdio0-bus4 {
- rockchip,pins = <4 20 RK_FUNC_1 &pcfg_pull_up>,
- <4 21 RK_FUNC_1 &pcfg_pull_up>,
- <4 22 RK_FUNC_1 &pcfg_pull_up>,
- <4 23 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <4 RK_PC4 1 &pcfg_pull_up>,
+ <4 RK_PC5 1 &pcfg_pull_up>,
+ <4 RK_PC6 1 &pcfg_pull_up>,
+ <4 RK_PC7 1 &pcfg_pull_up>;
};
sdio0_cmd: sdio0-cmd {
- rockchip,pins = <4 24 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <4 RK_PD0 1 &pcfg_pull_up>;
};
sdio0_clk: sdio0-clk {
- rockchip,pins = <4 25 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <4 RK_PD1 1 &pcfg_pull_none>;
};
sdio0_cd: sdio0-cd {
- rockchip,pins = <4 26 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <4 RK_PD2 1 &pcfg_pull_up>;
};
sdio0_wp: sdio0-wp {
- rockchip,pins = <4 27 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <4 RK_PD3 1 &pcfg_pull_up>;
};
sdio0_pwr: sdio0-pwr {
- rockchip,pins = <4 28 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <4 RK_PD4 1 &pcfg_pull_up>;
};
sdio0_bkpwr: sdio0-bkpwr {
- rockchip,pins = <4 29 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <4 RK_PD5 1 &pcfg_pull_up>;
};
sdio0_int: sdio0-int {
- rockchip,pins = <4 30 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <4 RK_PD6 1 &pcfg_pull_up>;
};
};
sdio1 {
sdio1_bus1: sdio1-bus1 {
- rockchip,pins = <3 24 4 &pcfg_pull_up>;
+ rockchip,pins = <3 RK_PD0 4 &pcfg_pull_up>;
};
sdio1_bus4: sdio1-bus4 {
- rockchip,pins = <3 24 4 &pcfg_pull_up>,
- <3 25 4 &pcfg_pull_up>,
- <3 26 4 &pcfg_pull_up>,
- <3 27 4 &pcfg_pull_up>;
+ rockchip,pins = <3 RK_PD0 4 &pcfg_pull_up>,
+ <3 RK_PD1 4 &pcfg_pull_up>,
+ <3 RK_PD2 4 &pcfg_pull_up>,
+ <3 RK_PD3 4 &pcfg_pull_up>;
};
sdio1_cd: sdio1-cd {
- rockchip,pins = <3 28 4 &pcfg_pull_up>;
+ rockchip,pins = <3 RK_PD4 4 &pcfg_pull_up>;
};
sdio1_wp: sdio1-wp {
- rockchip,pins = <3 29 4 &pcfg_pull_up>;
+ rockchip,pins = <3 RK_PD5 4 &pcfg_pull_up>;
};
sdio1_bkpwr: sdio1-bkpwr {
- rockchip,pins = <3 30 4 &pcfg_pull_up>;
+ rockchip,pins = <3 RK_PD6 4 &pcfg_pull_up>;
};
sdio1_int: sdio1-int {
- rockchip,pins = <3 31 4 &pcfg_pull_up>;
+ rockchip,pins = <3 RK_PD7 4 &pcfg_pull_up>;
};
sdio1_cmd: sdio1-cmd {
- rockchip,pins = <4 6 4 &pcfg_pull_up>;
+ rockchip,pins = <4 RK_PA6 4 &pcfg_pull_up>;
};
sdio1_clk: sdio1-clk {
- rockchip,pins = <4 7 4 &pcfg_pull_none>;
+ rockchip,pins = <4 RK_PA7 4 &pcfg_pull_none>;
};
sdio1_pwr: sdio1-pwr {
- rockchip,pins = <4 9 4 &pcfg_pull_up>;
+ rockchip,pins = <4 RK_PB1 4 &pcfg_pull_up>;
};
};
emmc {
emmc_clk: emmc-clk {
- rockchip,pins = <3 18 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PC2 2 &pcfg_pull_none>;
};
emmc_cmd: emmc-cmd {
- rockchip,pins = <3 16 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <3 RK_PC0 2 &pcfg_pull_up>;
};
emmc_pwr: emmc-pwr {
- rockchip,pins = <3 9 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <3 RK_PB1 2 &pcfg_pull_up>;
};
emmc_bus1: emmc-bus1 {
- rockchip,pins = <3 0 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <3 RK_PA0 2 &pcfg_pull_up>;
};
emmc_bus4: emmc-bus4 {
- rockchip,pins = <3 0 RK_FUNC_2 &pcfg_pull_up>,
- <3 1 RK_FUNC_2 &pcfg_pull_up>,
- <3 2 RK_FUNC_2 &pcfg_pull_up>,
- <3 3 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <3 RK_PA0 2 &pcfg_pull_up>,
+ <3 RK_PA1 2 &pcfg_pull_up>,
+ <3 RK_PA2 2 &pcfg_pull_up>,
+ <3 RK_PA3 2 &pcfg_pull_up>;
};
emmc_bus8: emmc-bus8 {
- rockchip,pins = <3 0 RK_FUNC_2 &pcfg_pull_up>,
- <3 1 RK_FUNC_2 &pcfg_pull_up>,
- <3 2 RK_FUNC_2 &pcfg_pull_up>,
- <3 3 RK_FUNC_2 &pcfg_pull_up>,
- <3 4 RK_FUNC_2 &pcfg_pull_up>,
- <3 5 RK_FUNC_2 &pcfg_pull_up>,
- <3 6 RK_FUNC_2 &pcfg_pull_up>,
- <3 7 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <3 RK_PA0 2 &pcfg_pull_up>,
+ <3 RK_PA1 2 &pcfg_pull_up>,
+ <3 RK_PA2 2 &pcfg_pull_up>,
+ <3 RK_PA3 2 &pcfg_pull_up>,
+ <3 RK_PA4 2 &pcfg_pull_up>,
+ <3 RK_PA5 2 &pcfg_pull_up>,
+ <3 RK_PA6 2 &pcfg_pull_up>,
+ <3 RK_PA7 2 &pcfg_pull_up>;
};
};
spi0 {
spi0_clk: spi0-clk {
- rockchip,pins = <5 12 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <5 RK_PB4 1 &pcfg_pull_up>;
};
spi0_cs0: spi0-cs0 {
- rockchip,pins = <5 13 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <5 RK_PB5 1 &pcfg_pull_up>;
};
spi0_tx: spi0-tx {
- rockchip,pins = <5 14 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <5 RK_PB6 1 &pcfg_pull_up>;
};
spi0_rx: spi0-rx {
- rockchip,pins = <5 15 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <5 RK_PB7 1 &pcfg_pull_up>;
};
spi0_cs1: spi0-cs1 {
- rockchip,pins = <5 16 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <5 RK_PC0 1 &pcfg_pull_up>;
};
};
spi1 {
spi1_clk: spi1-clk {
- rockchip,pins = <7 12 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <7 RK_PB4 2 &pcfg_pull_up>;
};
spi1_cs0: spi1-cs0 {
- rockchip,pins = <7 13 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <7 RK_PB5 2 &pcfg_pull_up>;
};
spi1_rx: spi1-rx {
- rockchip,pins = <7 14 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <7 RK_PB6 2 &pcfg_pull_up>;
};
spi1_tx: spi1-tx {
- rockchip,pins = <7 15 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <7 RK_PB7 2 &pcfg_pull_up>;
};
};
spi2 {
spi2_cs1: spi2-cs1 {
- rockchip,pins = <8 3 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <8 RK_PA3 1 &pcfg_pull_up>;
};
spi2_clk: spi2-clk {
- rockchip,pins = <8 6 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <8 RK_PA6 1 &pcfg_pull_up>;
};
spi2_cs0: spi2-cs0 {
- rockchip,pins = <8 7 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <8 RK_PA7 1 &pcfg_pull_up>;
};
spi2_rx: spi2-rx {
- rockchip,pins = <8 8 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <8 RK_PB0 1 &pcfg_pull_up>;
};
spi2_tx: spi2-tx {
- rockchip,pins = <8 9 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <8 RK_PB1 1 &pcfg_pull_up>;
};
};
uart0 {
uart0_xfer: uart0-xfer {
- rockchip,pins = <4 16 RK_FUNC_1 &pcfg_pull_up>,
- <4 17 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <4 RK_PC0 1 &pcfg_pull_up>,
+ <4 RK_PC1 1 &pcfg_pull_none>;
};
uart0_cts: uart0-cts {
- rockchip,pins = <4 18 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <4 RK_PC2 1 &pcfg_pull_up>;
};
uart0_rts: uart0-rts {
- rockchip,pins = <4 19 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <4 RK_PC3 1 &pcfg_pull_none>;
};
};
uart1 {
uart1_xfer: uart1-xfer {
- rockchip,pins = <5 8 RK_FUNC_1 &pcfg_pull_up>,
- <5 9 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <5 RK_PB0 1 &pcfg_pull_up>,
+ <5 RK_PB1 1 &pcfg_pull_none>;
};
uart1_cts: uart1-cts {
- rockchip,pins = <5 10 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <5 RK_PB2 1 &pcfg_pull_up>;
};
uart1_rts: uart1-rts {
- rockchip,pins = <5 11 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <5 RK_PB3 1 &pcfg_pull_none>;
};
};
uart2 {
uart2_xfer: uart2-xfer {
- rockchip,pins = <7 22 RK_FUNC_1 &pcfg_pull_up>,
- <7 23 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PC6 1 &pcfg_pull_up>,
+ <7 RK_PC7 1 &pcfg_pull_none>;
};
/* no rts / cts for uart2 */
};
uart3 {
uart3_xfer: uart3-xfer {
- rockchip,pins = <7 7 RK_FUNC_1 &pcfg_pull_up>,
- <7 8 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PA7 1 &pcfg_pull_up>,
+ <7 RK_PB0 1 &pcfg_pull_none>;
};
uart3_cts: uart3-cts {
- rockchip,pins = <7 9 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <7 RK_PB1 1 &pcfg_pull_up>;
};
uart3_rts: uart3-rts {
- rockchip,pins = <7 10 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PB2 1 &pcfg_pull_none>;
};
};
uart4 {
uart4_xfer: uart4-xfer {
- rockchip,pins = <5 15 3 &pcfg_pull_up>,
- <5 14 3 &pcfg_pull_none>;
+ rockchip,pins = <5 RK_PB7 3 &pcfg_pull_up>,
+ <5 RK_PB6 3 &pcfg_pull_none>;
};
uart4_cts: uart4-cts {
- rockchip,pins = <5 12 3 &pcfg_pull_up>;
+ rockchip,pins = <5 RK_PB4 3 &pcfg_pull_up>;
};
uart4_rts: uart4-rts {
- rockchip,pins = <5 13 3 &pcfg_pull_none>;
+ rockchip,pins = <5 RK_PB5 3 &pcfg_pull_none>;
};
};
tsadc {
otp_gpio: otp-gpio {
- rockchip,pins = <0 10 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
};
otp_out: otp-out {
- rockchip,pins = <0 10 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB2 1 &pcfg_pull_none>;
};
};
pwm0 {
pwm0_pin: pwm0-pin {
- rockchip,pins = <7 0 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PA0 1 &pcfg_pull_none>;
};
};
pwm1 {
pwm1_pin: pwm1-pin {
- rockchip,pins = <7 1 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PA1 1 &pcfg_pull_none>;
};
};
pwm2 {
pwm2_pin: pwm2-pin {
- rockchip,pins = <7 22 3 &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PC6 3 &pcfg_pull_none>;
};
};
pwm3 {
pwm3_pin: pwm3-pin {
- rockchip,pins = <7 23 3 &pcfg_pull_none>;
+ rockchip,pins = <7 RK_PC7 3 &pcfg_pull_none>;
};
};
gmac {
rgmii_pins: rgmii-pins {
- rockchip,pins = <3 30 3 &pcfg_pull_none>,
- <3 31 3 &pcfg_pull_none>,
- <3 26 3 &pcfg_pull_none>,
- <3 27 3 &pcfg_pull_none>,
- <3 28 3 &pcfg_pull_none_12ma>,
- <3 29 3 &pcfg_pull_none_12ma>,
- <3 24 3 &pcfg_pull_none_12ma>,
- <3 25 3 &pcfg_pull_none_12ma>,
- <4 0 3 &pcfg_pull_none>,
- <4 5 3 &pcfg_pull_none>,
- <4 6 3 &pcfg_pull_none>,
- <4 9 3 &pcfg_pull_none_12ma>,
- <4 4 3 &pcfg_pull_none_12ma>,
- <4 1 3 &pcfg_pull_none>,
- <4 3 3 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PD6 3 &pcfg_pull_none>,
+ <3 RK_PD7 3 &pcfg_pull_none>,
+ <3 RK_PD2 3 &pcfg_pull_none>,
+ <3 RK_PD3 3 &pcfg_pull_none>,
+ <3 RK_PD4 3 &pcfg_pull_none_12ma>,
+ <3 RK_PD5 3 &pcfg_pull_none_12ma>,
+ <3 RK_PD0 3 &pcfg_pull_none_12ma>,
+ <3 RK_PD1 3 &pcfg_pull_none_12ma>,
+ <4 RK_PA0 3 &pcfg_pull_none>,
+ <4 RK_PA5 3 &pcfg_pull_none>,
+ <4 RK_PA6 3 &pcfg_pull_none>,
+ <4 RK_PB1 3 &pcfg_pull_none_12ma>,
+ <4 RK_PA4 3 &pcfg_pull_none_12ma>,
+ <4 RK_PA1 3 &pcfg_pull_none>,
+ <4 RK_PA3 3 &pcfg_pull_none>;
};
rmii_pins: rmii-pins {
- rockchip,pins = <3 30 3 &pcfg_pull_none>,
- <3 31 3 &pcfg_pull_none>,
- <3 28 3 &pcfg_pull_none>,
- <3 29 3 &pcfg_pull_none>,
- <4 0 3 &pcfg_pull_none>,
- <4 5 3 &pcfg_pull_none>,
- <4 4 3 &pcfg_pull_none>,
- <4 1 3 &pcfg_pull_none>,
- <4 2 3 &pcfg_pull_none>,
- <4 3 3 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PD6 3 &pcfg_pull_none>,
+ <3 RK_PD7 3 &pcfg_pull_none>,
+ <3 RK_PD4 3 &pcfg_pull_none>,
+ <3 RK_PD5 3 &pcfg_pull_none>,
+ <4 RK_PA0 3 &pcfg_pull_none>,
+ <4 RK_PA5 3 &pcfg_pull_none>,
+ <4 RK_PA4 3 &pcfg_pull_none>,
+ <4 RK_PA1 3 &pcfg_pull_none>,
+ <4 RK_PA2 3 &pcfg_pull_none>,
+ <4 RK_PA3 3 &pcfg_pull_none>;
};
};
spdif {
spdif_tx: spdif-tx {
- rockchip,pins = <RK_GPIO6 11 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <6 RK_PB3 1 &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm/boot/dts/rv1108-elgin-r1.dts b/arch/arm/boot/dts/rv1108-elgin-r1.dts
index 1c4507b66fdd..b1db924710c8 100644
--- a/arch/arm/boot/dts/rv1108-elgin-r1.dts
+++ b/arch/arm/boot/dts/rv1108-elgin-r1.dts
@@ -37,7 +37,6 @@
&emmc {
bus-width = <8>;
cap-mmc-highspeed;
- disable-wp;
no-sd;
no-sdio;
non-removable;
diff --git a/arch/arm/boot/dts/rv1108.dtsi b/arch/arm/boot/dts/rv1108.dtsi
index f47ac86d2852..5876690ee09e 100644
--- a/arch/arm/boot/dts/rv1108.dtsi
+++ b/arch/arm/boot/dts/rv1108.dtsi
@@ -682,58 +682,58 @@
emmc {
emmc_bus8: emmc-bus8 {
- rockchip,pins = <2 RK_PA0 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <2 RK_PA1 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <2 RK_PA2 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <2 RK_PA3 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <2 RK_PA4 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <2 RK_PA5 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <2 RK_PA6 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <2 RK_PA7 RK_FUNC_2 &pcfg_pull_up_drv_8ma>;
+ rockchip,pins = <2 RK_PA0 2 &pcfg_pull_up_drv_8ma>,
+ <2 RK_PA1 2 &pcfg_pull_up_drv_8ma>,
+ <2 RK_PA2 2 &pcfg_pull_up_drv_8ma>,
+ <2 RK_PA3 2 &pcfg_pull_up_drv_8ma>,
+ <2 RK_PA4 2 &pcfg_pull_up_drv_8ma>,
+ <2 RK_PA5 2 &pcfg_pull_up_drv_8ma>,
+ <2 RK_PA6 2 &pcfg_pull_up_drv_8ma>,
+ <2 RK_PA7 2 &pcfg_pull_up_drv_8ma>;
};
emmc_clk: emmc-clk {
- rockchip,pins = <2 RK_PB6 RK_FUNC_1 &pcfg_pull_none_drv_8ma>;
+ rockchip,pins = <2 RK_PB6 1 &pcfg_pull_none_drv_8ma>;
};
emmc_cmd: emmc-cmd {
- rockchip,pins = <2 RK_PB4 RK_FUNC_2 &pcfg_pull_up_drv_8ma>;
+ rockchip,pins = <2 RK_PB4 2 &pcfg_pull_up_drv_8ma>;
};
};
gmac {
rmii_pins: rmii-pins {
- rockchip,pins = <1 RK_PC5 RK_FUNC_2 &pcfg_pull_none>,
- <1 RK_PC3 RK_FUNC_2 &pcfg_pull_none>,
- <1 RK_PC4 RK_FUNC_2 &pcfg_pull_none>,
- <1 RK_PB2 RK_FUNC_3 &pcfg_pull_none_drv_12ma>,
- <1 RK_PB3 RK_FUNC_3 &pcfg_pull_none_drv_12ma>,
- <1 RK_PB4 RK_FUNC_3 &pcfg_pull_none_drv_12ma>,
- <1 RK_PB5 RK_FUNC_3 &pcfg_pull_none>,
- <1 RK_PB6 RK_FUNC_3 &pcfg_pull_none>,
- <1 RK_PB7 RK_FUNC_3 &pcfg_pull_none>,
- <1 RK_PC2 RK_FUNC_3 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PC5 2 &pcfg_pull_none>,
+ <1 RK_PC3 2 &pcfg_pull_none>,
+ <1 RK_PC4 2 &pcfg_pull_none>,
+ <1 RK_PB2 3 &pcfg_pull_none_drv_12ma>,
+ <1 RK_PB3 3 &pcfg_pull_none_drv_12ma>,
+ <1 RK_PB4 3 &pcfg_pull_none_drv_12ma>,
+ <1 RK_PB5 3 &pcfg_pull_none>,
+ <1 RK_PB6 3 &pcfg_pull_none>,
+ <1 RK_PB7 3 &pcfg_pull_none>,
+ <1 RK_PC2 3 &pcfg_pull_none>;
};
};
i2c0 {
i2c0_xfer: i2c0-xfer {
- rockchip,pins = <0 RK_PB1 RK_FUNC_1 &pcfg_pull_none_smt>,
- <0 RK_PB2 RK_FUNC_1 &pcfg_pull_none_smt>;
+ rockchip,pins = <0 RK_PB1 1 &pcfg_pull_none_smt>,
+ <0 RK_PB2 1 &pcfg_pull_none_smt>;
};
};
i2c1 {
i2c1_xfer: i2c1-xfer {
- rockchip,pins = <2 RK_PD3 RK_FUNC_1 &pcfg_pull_up>,
- <2 RK_PD4 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <2 RK_PD3 1 &pcfg_pull_up>,
+ <2 RK_PD4 1 &pcfg_pull_up>;
};
};
i2c2m1 {
i2c2m1_xfer: i2c2m1-xfer {
- rockchip,pins = <0 RK_PC2 RK_FUNC_2 &pcfg_pull_none>,
- <0 RK_PC6 RK_FUNC_3 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PC2 2 &pcfg_pull_none>,
+ <0 RK_PC6 3 &pcfg_pull_none>;
};
i2c2m1_gpio: i2c2m1-gpio {
@@ -744,8 +744,8 @@
i2c2m05v {
i2c2m05v_xfer: i2c2m05v-xfer {
- rockchip,pins = <1 RK_PD5 RK_FUNC_2 &pcfg_pull_none>,
- <1 RK_PD4 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PD5 2 &pcfg_pull_none>,
+ <1 RK_PD4 2 &pcfg_pull_none>;
};
i2c2m05v_gpio: i2c2m05v-gpio {
@@ -756,123 +756,123 @@
i2c3 {
i2c3_xfer: i2c3-xfer {
- rockchip,pins = <0 RK_PB6 RK_FUNC_1 &pcfg_pull_none>,
- <0 RK_PC4 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB6 1 &pcfg_pull_none>,
+ <0 RK_PC4 2 &pcfg_pull_none>;
};
};
pwm0 {
pwm0_pin: pwm0-pin {
- rockchip,pins = <0 RK_PC5 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PC5 1 &pcfg_pull_none>;
};
};
pwm1 {
pwm1_pin: pwm1-pin {
- rockchip,pins = <0 RK_PC4 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PC4 1 &pcfg_pull_none>;
};
};
pwm2 {
pwm2_pin: pwm2-pin {
- rockchip,pins = <0 RK_PC6 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PC6 1 &pcfg_pull_none>;
};
};
pwm3 {
pwm3_pin: pwm3-pin {
- rockchip,pins = <0 RK_PC0 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PC0 1 &pcfg_pull_none>;
};
};
pwm4 {
pwm4_pin: pwm4-pin {
- rockchip,pins = <1 RK_PC1 RK_FUNC_3 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PC1 3 &pcfg_pull_none>;
};
};
pwm5 {
pwm5_pin: pwm5-pin {
- rockchip,pins = <1 RK_PA7 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PA7 2 &pcfg_pull_none>;
};
};
pwm6 {
pwm6_pin: pwm6-pin {
- rockchip,pins = <1 RK_PB0 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PB0 2 &pcfg_pull_none>;
};
};
pwm7 {
pwm7_pin: pwm7-pin {
- rockchip,pins = <1 RK_PB1 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PB1 2 &pcfg_pull_none>;
};
};
sdmmc {
sdmmc_clk: sdmmc-clk {
- rockchip,pins = <3 RK_PC4 RK_FUNC_1 &pcfg_pull_none_drv_4ma>;
+ rockchip,pins = <3 RK_PC4 1 &pcfg_pull_none_drv_4ma>;
};
sdmmc_cmd: sdmmc-cmd {
- rockchip,pins = <3 RK_PC5 RK_FUNC_1 &pcfg_pull_up_drv_4ma>;
+ rockchip,pins = <3 RK_PC5 1 &pcfg_pull_up_drv_4ma>;
};
sdmmc_cd: sdmmc-cd {
- rockchip,pins = <0 RK_PA1 RK_FUNC_1 &pcfg_pull_up_drv_4ma>;
+ rockchip,pins = <0 RK_PA1 1 &pcfg_pull_up_drv_4ma>;
};
sdmmc_bus1: sdmmc-bus1 {
- rockchip,pins = <3 RK_PC3 RK_FUNC_1 &pcfg_pull_up_drv_4ma>;
+ rockchip,pins = <3 RK_PC3 1 &pcfg_pull_up_drv_4ma>;
};
sdmmc_bus4: sdmmc-bus4 {
- rockchip,pins = <3 RK_PC3 RK_FUNC_1 &pcfg_pull_up_drv_4ma>,
- <3 RK_PC2 RK_FUNC_1 &pcfg_pull_up_drv_4ma>,
- <3 RK_PC1 RK_FUNC_1 &pcfg_pull_up_drv_4ma>,
- <3 RK_PC0 RK_FUNC_1 &pcfg_pull_up_drv_4ma>;
+ rockchip,pins = <3 RK_PC3 1 &pcfg_pull_up_drv_4ma>,
+ <3 RK_PC2 1 &pcfg_pull_up_drv_4ma>,
+ <3 RK_PC1 1 &pcfg_pull_up_drv_4ma>,
+ <3 RK_PC0 1 &pcfg_pull_up_drv_4ma>;
};
};
spim0 {
spim0_clk: spim0-clk {
- rockchip,pins = <1 RK_PD0 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PD0 2 &pcfg_pull_up>;
};
spim0_cs0: spim0-cs0 {
- rockchip,pins = <1 RK_PD1 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PD1 2 &pcfg_pull_up>;
};
spim0_tx: spim0-tx {
- rockchip,pins = <1 RK_PD3 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PD3 2 &pcfg_pull_up>;
};
spim0_rx: spim0-rx {
- rockchip,pins = <1 RK_PD2 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PD2 2 &pcfg_pull_up>;
};
};
spim1 {
spim1_clk: spim1-clk {
- rockchip,pins = <0 RK_PA3 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA3 1 &pcfg_pull_up>;
};
spim1_cs0: spim1-cs0 {
- rockchip,pins = <0 RK_PA4 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA4 1 &pcfg_pull_up>;
};
spim1_rx: spim1-rx {
- rockchip,pins = <0 RK_PB0 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PB0 1 &pcfg_pull_up>;
};
spim1_tx: spim1-tx {
- rockchip,pins = <0 RK_PA7 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA7 1 &pcfg_pull_up>;
};
};
tsadc {
otp_out: otp-out {
- rockchip,pins = <0 RK_PB7 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB7 1 &pcfg_pull_none>;
};
otp_gpio: otp-gpio {
@@ -882,16 +882,16 @@
uart0 {
uart0_xfer: uart0-xfer {
- rockchip,pins = <3 RK_PA6 RK_FUNC_1 &pcfg_pull_up>,
- <3 RK_PA5 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PA6 1 &pcfg_pull_up>,
+ <3 RK_PA5 1 &pcfg_pull_none>;
};
uart0_cts: uart0-cts {
- rockchip,pins = <3 RK_PA4 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PA4 1 &pcfg_pull_none>;
};
uart0_rts: uart0-rts {
- rockchip,pins = <3 RK_PA3 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PA3 1 &pcfg_pull_none>;
};
uart0_rts_gpio: uart0-rts-gpio {
@@ -901,40 +901,40 @@
uart1 {
uart1_xfer: uart1-xfer {
- rockchip,pins = <1 RK_PD3 RK_FUNC_1 &pcfg_pull_up>,
- <1 RK_PD2 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PD3 1 &pcfg_pull_up>,
+ <1 RK_PD2 1 &pcfg_pull_none>;
};
uart1_cts: uart1-cts {
- rockchip,pins = <1 RK_PD0 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PD0 1 &pcfg_pull_none>;
};
uart1_rts: uart1-rts {
- rockchip,pins = <1 RK_PD1 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PD1 1 &pcfg_pull_none>;
};
};
uart2m0 {
uart2m0_xfer: uart2m0-xfer {
- rockchip,pins = <2 RK_PD2 RK_FUNC_1 &pcfg_pull_up>,
- <2 RK_PD1 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PD2 1 &pcfg_pull_up>,
+ <2 RK_PD1 1 &pcfg_pull_none>;
};
};
uart2m1 {
uart2m1_xfer: uart2m1-xfer {
- rockchip,pins = <3 RK_PC3 RK_FUNC_2 &pcfg_pull_up>,
- <3 RK_PC2 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PC3 2 &pcfg_pull_up>,
+ <3 RK_PC2 2 &pcfg_pull_none>;
};
};
uart2_5v {
uart2_5v_cts: uart2_5v-cts {
- rockchip,pins = <1 RK_PD4 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PD4 1 &pcfg_pull_none>;
};
uart2_5v_rts: uart2_5v-rts {
- rockchip,pins = <1 RK_PD5 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PD5 1 &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm/boot/dts/s5pv210-goni.dts b/arch/arm/boot/dts/s5pv210-goni.dts
index eb6d1926c0d6..fbbd93707404 100644
--- a/arch/arm/boot/dts/s5pv210-goni.dts
+++ b/arch/arm/boot/dts/s5pv210-goni.dts
@@ -376,7 +376,7 @@
vdd_core-supply = <&ldo14_reg>;
clock-frequency = <16000000>;
- clocks = <&clock_cam 0>;
+ clocks = <&camera 0>;
clock-names = "mclk";
nreset-gpios = <&gpb 2 0>;
nstby-gpios = <&gpb 0 0>;
diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi
index a44d5eb56bed..2ad642f51fd9 100644
--- a/arch/arm/boot/dts/s5pv210.dtsi
+++ b/arch/arm/boot/dts/s5pv210.dtsi
@@ -585,12 +585,10 @@
clock-names = "sclk_cam0", "sclk_cam1";
#address-cells = <1>;
#size-cells = <1>;
+ #clock-cells = <1>;
+ clock-output-names = "cam_a_clkout", "cam_b_clkout";
ranges;
- clock_cam: clock-controller {
- #clock-cells = <1>;
- };
-
csis0: csis@fa600000 {
compatible = "samsung,s5pv210-csis";
reg = <0xfa600000 0x4000>;
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index d159ee42ef29..2e2c1a7b1d1d 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -1,46 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* sama5d2.dtsi - Device Tree Include file for SAMA5D2 family SoC
*
* Copyright (C) 2015 Atmel,
* 2015 Ludovic Desroches <ludovic.desroches@atmel.com>
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
#include <dt-bindings/dma/at91.h>
@@ -688,13 +651,13 @@
ranges = <0 0xf8044000 0x1420>;
};
- rstc@f8048000 {
+ reset_controller: rstc@f8048000 {
compatible = "atmel,sama5d3-rstc";
reg = <0xf8048000 0x10>;
clocks = <&clk32k>;
};
- shdwc@f8048010 {
+ shutdown_controller: shdwc@f8048010 {
compatible = "atmel,sama5d2-shdwc";
reg = <0xf8048010 0x10>;
clocks = <&clk32k>;
@@ -710,7 +673,7 @@
clocks = <&pmc PMC_TYPE_CORE PMC_MCK2>;
};
- watchdog@f8048040 {
+ watchdog: watchdog@f8048040 {
compatible = "atmel,sama5d4-wdt";
reg = <0xf8048040 0x10>;
interrupts = <4 IRQ_TYPE_LEVEL_HIGH 7>;
diff --git a/arch/arm/boot/dts/sama5d36ek_cmp.dts b/arch/arm/boot/dts/sama5d36ek_cmp.dts
index b632143844e5..66695b9a3e77 100644
--- a/arch/arm/boot/dts/sama5d36ek_cmp.dts
+++ b/arch/arm/boot/dts/sama5d36ek_cmp.dts
@@ -1,45 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* sama5d36ek_cmp.dts - Device Tree file for SAMA5D36-EK CMP board
*
* Copyright (C) 2016 Atmel,
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
#include "sama5d36.dtsi"
diff --git a/arch/arm/boot/dts/sama5d3xcm_cmp.dtsi b/arch/arm/boot/dts/sama5d3xcm_cmp.dtsi
index a02f59021364..9d2563602cbe 100644
--- a/arch/arm/boot/dts/sama5d3xcm_cmp.dtsi
+++ b/arch/arm/boot/dts/sama5d3xcm_cmp.dtsi
@@ -1,45 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* sama5d3xcm_cmp.dtsi - Device Tree Include file for SAMA5D36 CMP CPU Module
*
* Copyright (C) 2016 Atmel,
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
/ {
diff --git a/arch/arm/boot/dts/sama5d3xmb_cmp.dtsi b/arch/arm/boot/dts/sama5d3xmb_cmp.dtsi
index 97e171db5970..8a6916a69da4 100644
--- a/arch/arm/boot/dts/sama5d3xmb_cmp.dtsi
+++ b/arch/arm/boot/dts/sama5d3xmb_cmp.dtsi
@@ -1,45 +1,8 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* sama5d3xmb_cmp.dts - Device Tree file for SAMA5D3x CMP mother board
*
* Copyright (C) 2016 Atmel,
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
#include "sama5d3xcm_cmp.dtsi"
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index 6c1e41f94549..6ab27a7b388d 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -1,46 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* sama5d4.dtsi - Device Tree Include file for SAMA5D4 family SoC
*
* Copyright (C) 2014 Atmel,
* 2014 Nicolas Ferre <nicolas.ferre@atmel.com>
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
#include <dt-bindings/clock/at91.h>
diff --git a/arch/arm/boot/dts/socfpga_arria10_socdk_sdmmc.dts b/arch/arm/boot/dts/socfpga_arria10_socdk_sdmmc.dts
index df2bab1624d4..64dc0799f3d7 100644
--- a/arch/arm/boot/dts/socfpga_arria10_socdk_sdmmc.dts
+++ b/arch/arm/boot/dts/socfpga_arria10_socdk_sdmmc.dts
@@ -9,6 +9,7 @@
&mmc {
status = "okay";
cap-sd-highspeed;
+ cap-mmc-highspeed;
broken-cd;
bus-width = <4>;
};
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index e6ed7c0354a2..81fabf031eff 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -1196,21 +1196,73 @@
status = "disabled";
};
+ gpu@a0300000 {
+ /*
+ * This block is referred to as "Smart Graphics Adapter SGA500"
+ * in documentation but is in practice a pretty straight-forward
+ * MALI-400 GPU block.
+ */
+ compatible = "stericsson,db8500-mali", "arm,mali-400";
+ reg = <0xa0300000 0x10000>;
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "gp",
+ "gpmmu",
+ "pp0",
+ "ppmmu0",
+ "combined";
+ clocks = <&prcmu_clk PRCMU_ACLK>, <&prcmu_clk PRCMU_SGACLK>;
+ clock-names = "bus", "core";
+ mali-supply = <&db8500_sga_reg>;
+ power-domains = <&pm_domains DOMAIN_VAPE>;
+ };
+
mcde@a0350000 {
- compatible = "stericsson,mcde";
- reg = <0xa0350000 0x1000>, /* MCDE */
- <0xa0351000 0x1000>, /* DSI link 1 */
- <0xa0352000 0x1000>, /* DSI link 2 */
- <0xa0353000 0x1000>; /* DSI link 3 */
+ compatible = "ste,mcde";
+ reg = <0xa0350000 0x1000>;
interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+ epod-supply = <&db8500_b2r2_mcde_reg>;
+ vana-supply = <&ab8500_ldo_ana_reg>;
clocks = <&prcmu_clk PRCMU_MCDECLK>, /* Main MCDE clock */
<&prcmu_clk PRCMU_LCDCLK>, /* LCD clock */
- <&prcmu_clk PRCMU_PLLDSI>, /* HDMI clock */
- <&prcmu_clk PRCMU_DSI0CLK>, /* DSI 0 */
- <&prcmu_clk PRCMU_DSI1CLK>, /* DSI 1 */
- <&prcmu_clk PRCMU_DSI0ESCCLK>, /* TVout clock 0 */
- <&prcmu_clk PRCMU_DSI1ESCCLK>, /* TVout clock 1 */
- <&prcmu_clk PRCMU_DSI2ESCCLK>; /* TVout clock 2 */
+ <&prcmu_clk PRCMU_PLLDSI>; /* HDMI clock */
+ clock-names = "mcde", "lcd", "hdmi";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ status = "disabled";
+
+ dsi0: dsi@a0351000 {
+ compatible = "ste,mcde-dsi";
+ reg = <0xa0351000 0x1000>;
+ vana-supply = <&ab8500_ldo_ana_reg>;
+ clocks = <&prcmu_clk PRCMU_DSI0CLK>, <&prcmu_clk PRCMU_DSI0ESCCLK>;
+ clock-names = "hs", "lp";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ dsi1: dsi@a0352000 {
+ compatible = "ste,mcde-dsi";
+ reg = <0xa0352000 0x1000>;
+ vana-supply = <&ab8500_ldo_ana_reg>;
+ clocks = <&prcmu_clk PRCMU_DSI1CLK>, <&prcmu_clk PRCMU_DSI1ESCCLK>;
+ clock-names = "hs", "lp";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ dsi2: dsi@a0353000 {
+ compatible = "ste,mcde-dsi";
+ reg = <0xa0353000 0x1000>;
+ vana-supply = <&ab8500_ldo_ana_reg>;
+ /* This DSI port only has the Low Power / Energy Save clock */
+ clocks = <&prcmu_clk PRCMU_DSI2ESCCLK>;
+ clock-names = "lp";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
};
cryp@a03cb000 {
diff --git a/arch/arm/boot/dts/ste-href-stuib.dtsi b/arch/arm/boot/dts/ste-href-stuib.dtsi
index 35e944d8b5c4..eeaea21f5eca 100644
--- a/arch/arm/boot/dts/ste-href-stuib.dtsi
+++ b/arch/arm/boot/dts/ste-href-stuib.dtsi
@@ -190,5 +190,18 @@
};
};
};
+
+ mcde@a0350000 {
+ status = "okay";
+
+ dsi@a0351000 {
+ panel {
+ compatible = "samsung,s6d16d0";
+ reg = <0>;
+ vdd1-supply = <&ab8500_ldo_aux1_reg>;
+ reset-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+ };
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/ste-href-tvk1281618.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618.dtsi
index 0e7d77d719d7..76868444caa4 100644
--- a/arch/arm/boot/dts/ste-href-tvk1281618.dtsi
+++ b/arch/arm/boot/dts/ste-href-tvk1281618.dtsi
@@ -274,5 +274,18 @@
};
};
};
+
+ mcde@a0350000 {
+ status = "okay";
+
+ dsi@a0351000 {
+ panel {
+ compatible = "samsung,s6d16d0";
+ reg = <0>;
+ vdd1-supply = <&ab8500_ldo_aux1_reg>;
+ reset-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+ };
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/stm32f429.dtsi b/arch/arm/boot/dts/stm32f429.dtsi
index 588b6ef94e93..4a4954492ed1 100644
--- a/arch/arm/boot/dts/stm32f429.dtsi
+++ b/arch/arm/boot/dts/stm32f429.dtsi
@@ -80,6 +80,19 @@
};
soc {
+ romem: nvmem@1fff7800 {
+ compatible = "st,stm32f4-otp";
+ reg = <0x1fff7800 0x400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ts_cal1: calib@22c {
+ reg = <0x22c 0x2>;
+ };
+ ts_cal2: calib@22e {
+ reg = <0x22e 0x2>;
+ };
+ };
+
timer2: timer@40000000 {
compatible = "st,stm32-timer";
reg = <0x40000000 0x400>;
diff --git a/arch/arm/boot/dts/stm32f769-disco.dts b/arch/arm/boot/dts/stm32f769-disco.dts
index 3c7216844a9b..6f1d0ac8c31c 100644
--- a/arch/arm/boot/dts/stm32f769-disco.dts
+++ b/arch/arm/boot/dts/stm32f769-disco.dts
@@ -102,6 +102,10 @@
};
};
+&rcc {
+ compatible = "st,stm32f769-rcc", "st,stm32f746-rcc", "st,stm32-rcc";
+};
+
&cec {
pinctrl-0 = <&cec_pins_a>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/stm32h743-pinctrl.dtsi b/arch/arm/boot/dts/stm32h743-pinctrl.dtsi
index 980b2769caf9..e44e7baa3f17 100644
--- a/arch/arm/boot/dts/stm32h743-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stm32h743-pinctrl.dtsi
@@ -188,6 +188,74 @@
};
};
+ sdmmc1_b4_pins_a: sdmmc1-b4-0 {
+ pins {
+ pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDMMC1_D0 */
+ <STM32_PINMUX('C', 9, AF12)>, /* SDMMC1_D1 */
+ <STM32_PINMUX('C', 10, AF12)>, /* SDMMC1_D2 */
+ <STM32_PINMUX('C', 11, AF12)>, /* SDMMC1_D3 */
+ <STM32_PINMUX('C', 12, AF12)>, /* SDMMC1_CK */
+ <STM32_PINMUX('D', 2, AF12)>; /* SDMMC1_CMD */
+ slew-rate = <3>;
+ drive-push-pull;
+ bias-disable;
+ };
+ };
+
+ sdmmc1_b4_od_pins_a: sdmmc1-b4-od-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDMMC1_D0 */
+ <STM32_PINMUX('C', 9, AF12)>, /* SDMMC1_D1 */
+ <STM32_PINMUX('C', 10, AF12)>, /* SDMMC1_D2 */
+ <STM32_PINMUX('C', 11, AF12)>, /* SDMMC1_D3 */
+ <STM32_PINMUX('C', 12, AF12)>; /* SDMMC1_CK */
+ slew-rate = <3>;
+ drive-push-pull;
+ bias-disable;
+ };
+ pins2{
+ pinmux = <STM32_PINMUX('D', 2, AF12)>; /* SDMMC1_CMD */
+ slew-rate = <3>;
+ drive-open-drain;
+ bias-disable;
+ };
+ };
+
+ sdmmc1_b4_sleep_pins_a: sdmmc1-b4-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('C', 8, ANALOG)>, /* SDMMC1_D0 */
+ <STM32_PINMUX('C', 9, ANALOG)>, /* SDMMC1_D1 */
+ <STM32_PINMUX('C', 10, ANALOG)>, /* SDMMC1_D2 */
+ <STM32_PINMUX('C', 11, ANALOG)>, /* SDMMC1_D3 */
+ <STM32_PINMUX('C', 12, ANALOG)>, /* SDMMC1_CK */
+ <STM32_PINMUX('D', 2, ANALOG)>; /* SDMMC1_CMD */
+ };
+ };
+
+ sdmmc1_dir_pins_a: sdmmc1-dir-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('C', 6, AF8)>, /* SDMMC1_D0DIR */
+ <STM32_PINMUX('C', 7, AF8)>, /* SDMMC1_D123DIR */
+ <STM32_PINMUX('B', 9, AF7)>; /* SDMMC1_CDIR */
+ slew-rate = <3>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+ pins2{
+ pinmux = <STM32_PINMUX('B', 8, AF7)>; /* SDMMC1_CKIN */
+ bias-pull-up;
+ };
+ };
+
+ sdmmc1_dir_sleep_pins_a: sdmmc1-dir-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('C', 6, ANALOG)>, /* SDMMC1_D0DIR */
+ <STM32_PINMUX('C', 7, ANALOG)>, /* SDMMC1_D123DIR */
+ <STM32_PINMUX('B', 9, ANALOG)>, /* SDMMC1_CDIR */
+ <STM32_PINMUX('B', 8, ANALOG)>; /* SDMMC1_CKIN */
+ };
+ };
+
usart1_pins: usart1@0 {
pins1 {
pinmux = <STM32_PINMUX('B', 14, AF4)>; /* USART1_TX */
diff --git a/arch/arm/boot/dts/stm32h743.dtsi b/arch/arm/boot/dts/stm32h743.dtsi
index 5cac79ebebb1..c065266ee377 100644
--- a/arch/arm/boot/dts/stm32h743.dtsi
+++ b/arch/arm/boot/dts/stm32h743.dtsi
@@ -339,6 +339,20 @@
dma-requests = <32>;
};
+ sdmmc1: sdmmc@52007000 {
+ compatible = "arm,pl18x", "arm,primecell";
+ arm,primecell-periphid = <0x10153180>;
+ reg = <0x52007000 0x1000>;
+ interrupts = <49>;
+ interrupt-names = "cmd_irq";
+ clocks = <&rcc SDMMC1_CK>;
+ clock-names = "apb_pclk";
+ resets = <&rcc STM32H7_AHB3_RESET(SDMMC1)>;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ max-frequency = <120000000>;
+ };
+
exti: interrupt-controller@58000000 {
compatible = "st,stm32h7-exti";
interrupt-controller;
diff --git a/arch/arm/boot/dts/stm32h743i-disco.dts b/arch/arm/boot/dts/stm32h743i-disco.dts
index dd06c8f3d09a..3acd2e9c434e 100644
--- a/arch/arm/boot/dts/stm32h743i-disco.dts
+++ b/arch/arm/boot/dts/stm32h743i-disco.dts
@@ -61,6 +61,14 @@
aliases {
serial0 = &usart2;
};
+
+ v3v3: regulator-v3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "v3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
};
&clk_hse {
@@ -84,6 +92,18 @@
};
};
+&sdmmc1 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc1_b4_pins_a>;
+ pinctrl-1 = <&sdmmc1_b4_od_pins_a>;
+ pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>;
+ broken-cd;
+ st,neg-edge;
+ bus-width = <4>;
+ vmmc-supply = <&v3v3>;
+ status = "okay";
+};
+
&usart2 {
pinctrl-0 = <&usart2_pins>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/stm32h743i-eval.dts b/arch/arm/boot/dts/stm32h743i-eval.dts
index ebc3f0933f5c..ab78ad532375 100644
--- a/arch/arm/boot/dts/stm32h743i-eval.dts
+++ b/arch/arm/boot/dts/stm32h743i-eval.dts
@@ -70,13 +70,20 @@
regulator-always-on;
};
+ v2v9_sd: regulator-v2v9_sd {
+ compatible = "regulator-fixed";
+ regulator-name = "v2v9_sd";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ regulator-always-on;
+ };
+
usbotg_hs_phy: usb-phy {
#phy-cells = <0>;
compatible = "usb-nop-xceiv";
clocks = <&rcc USB1ULPI_CK>;
clock-names = "main_clk";
};
-
};
&adc_12 {
@@ -122,6 +129,20 @@
};
};
+&sdmmc1 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc1_b4_pins_a &sdmmc1_dir_pins_a>;
+ pinctrl-1 = <&sdmmc1_b4_od_pins_a &sdmmc1_dir_pins_a>;
+ pinctrl-2 = <&sdmmc1_b4_sleep_pins_a &sdmmc1_dir_sleep_pins_a>;
+ broken-cd;
+ st,sig-dir;
+ st,neg-edge;
+ st,use-ckin;
+ bus-width = <4>;
+ vmmc-supply = <&v2v9_sd>;
+ status = "okay";
+};
+
&usart1 {
pinctrl-0 = <&usart1_pins>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/stm32mp157-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp157-pinctrl.dtsi
index 9ec4694e93a7..85c417d9983b 100644
--- a/arch/arm/boot/dts/stm32mp157-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stm32mp157-pinctrl.dtsi
@@ -157,6 +157,27 @@
};
};
+ cec_pins_sleep_a: cec-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 15, ANALOG)>; /* HDMI_CEC */
+ };
+ };
+
+ cec_pins_b: cec-1 {
+ pins {
+ pinmux = <STM32_PINMUX('B', 6, AF5)>;
+ bias-disable;
+ drive-open-drain;
+ slew-rate = <0>;
+ };
+ };
+
+ cec_pins_sleep_b: cec-sleep-1 {
+ pins {
+ pinmux = <STM32_PINMUX('B', 6, ANALOG)>; /* HDMI_CEC */
+ };
+ };
+
ethernet0_rgmii_pins_a: rgmii-0 {
pins1 {
pinmux = <STM32_PINMUX('G', 5, AF11)>, /* ETH_RGMII_CLK125 */
@@ -213,6 +234,13 @@
};
};
+ i2c1_pins_sleep_a: i2c1-1 {
+ pins {
+ pinmux = <STM32_PINMUX('D', 12, ANALOG)>, /* I2C1_SCL */
+ <STM32_PINMUX('F', 15, ANALOG)>; /* I2C1_SDA */
+ };
+ };
+
i2c2_pins_a: i2c2-0 {
pins {
pinmux = <STM32_PINMUX('H', 4, AF4)>, /* I2C2_SCL */
@@ -223,6 +251,13 @@
};
};
+ i2c2_pins_sleep_a: i2c2-1 {
+ pins {
+ pinmux = <STM32_PINMUX('H', 4, ANALOG)>, /* I2C2_SCL */
+ <STM32_PINMUX('H', 5, ANALOG)>; /* I2C2_SDA */
+ };
+ };
+
i2c5_pins_a: i2c5-0 {
pins {
pinmux = <STM32_PINMUX('A', 11, AF4)>, /* I2C5_SCL */
@@ -233,6 +268,152 @@
};
};
+ i2c5_pins_sleep_a: i2c5-1 {
+ pins {
+ pinmux = <STM32_PINMUX('A', 11, ANALOG)>, /* I2C5_SCL */
+ <STM32_PINMUX('A', 12, ANALOG)>; /* I2C5_SDA */
+
+ };
+ };
+
+ ltdc_pins_a: ltdc-a-0 {
+ pins {
+ pinmux = <STM32_PINMUX('G', 7, AF14)>, /* LCD_CLK */
+ <STM32_PINMUX('I', 10, AF14)>, /* LCD_HSYNC */
+ <STM32_PINMUX('I', 9, AF14)>, /* LCD_VSYNC */
+ <STM32_PINMUX('F', 10, AF14)>, /* LCD_DE */
+ <STM32_PINMUX('H', 2, AF14)>, /* LCD_R0 */
+ <STM32_PINMUX('H', 3, AF14)>, /* LCD_R1 */
+ <STM32_PINMUX('H', 8, AF14)>, /* LCD_R2 */
+ <STM32_PINMUX('H', 9, AF14)>, /* LCD_R3 */
+ <STM32_PINMUX('H', 10, AF14)>, /* LCD_R4 */
+ <STM32_PINMUX('C', 0, AF14)>, /* LCD_R5 */
+ <STM32_PINMUX('H', 12, AF14)>, /* LCD_R6 */
+ <STM32_PINMUX('E', 15, AF14)>, /* LCD_R7 */
+ <STM32_PINMUX('E', 5, AF14)>, /* LCD_G0 */
+ <STM32_PINMUX('E', 6, AF14)>, /* LCD_G1 */
+ <STM32_PINMUX('H', 13, AF14)>, /* LCD_G2 */
+ <STM32_PINMUX('H', 14, AF14)>, /* LCD_G3 */
+ <STM32_PINMUX('H', 15, AF14)>, /* LCD_G4 */
+ <STM32_PINMUX('I', 0, AF14)>, /* LCD_G5 */
+ <STM32_PINMUX('I', 1, AF14)>, /* LCD_G6 */
+ <STM32_PINMUX('I', 2, AF14)>, /* LCD_G7 */
+ <STM32_PINMUX('D', 9, AF14)>, /* LCD_B0 */
+ <STM32_PINMUX('G', 12, AF14)>, /* LCD_B1 */
+ <STM32_PINMUX('G', 10, AF14)>, /* LCD_B2 */
+ <STM32_PINMUX('D', 10, AF14)>, /* LCD_B3 */
+ <STM32_PINMUX('I', 4, AF14)>, /* LCD_B4 */
+ <STM32_PINMUX('A', 3, AF14)>, /* LCD_B5 */
+ <STM32_PINMUX('B', 8, AF14)>, /* LCD_B6 */
+ <STM32_PINMUX('D', 8, AF14)>; /* LCD_B7 */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <1>;
+ };
+ };
+
+ ltdc_pins_sleep_a: ltdc-a-1 {
+ pins {
+ pinmux = <STM32_PINMUX('G', 7, ANALOG)>, /* LCD_CLK */
+ <STM32_PINMUX('I', 10, ANALOG)>, /* LCD_HSYNC */
+ <STM32_PINMUX('I', 9, ANALOG)>, /* LCD_VSYNC */
+ <STM32_PINMUX('F', 10, ANALOG)>, /* LCD_DE */
+ <STM32_PINMUX('H', 2, ANALOG)>, /* LCD_R0 */
+ <STM32_PINMUX('H', 3, ANALOG)>, /* LCD_R1 */
+ <STM32_PINMUX('H', 8, ANALOG)>, /* LCD_R2 */
+ <STM32_PINMUX('H', 9, ANALOG)>, /* LCD_R3 */
+ <STM32_PINMUX('H', 10, ANALOG)>, /* LCD_R4 */
+ <STM32_PINMUX('C', 0, ANALOG)>, /* LCD_R5 */
+ <STM32_PINMUX('H', 12, ANALOG)>, /* LCD_R6 */
+ <STM32_PINMUX('E', 15, ANALOG)>, /* LCD_R7 */
+ <STM32_PINMUX('E', 5, ANALOG)>, /* LCD_G0 */
+ <STM32_PINMUX('E', 6, ANALOG)>, /* LCD_G1 */
+ <STM32_PINMUX('H', 13, ANALOG)>, /* LCD_G2 */
+ <STM32_PINMUX('H', 14, ANALOG)>, /* LCD_G3 */
+ <STM32_PINMUX('H', 15, ANALOG)>, /* LCD_G4 */
+ <STM32_PINMUX('I', 0, ANALOG)>, /* LCD_G5 */
+ <STM32_PINMUX('I', 1, ANALOG)>, /* LCD_G6 */
+ <STM32_PINMUX('I', 2, ANALOG)>, /* LCD_G7 */
+ <STM32_PINMUX('D', 9, ANALOG)>, /* LCD_B0 */
+ <STM32_PINMUX('G', 12, ANALOG)>, /* LCD_B1 */
+ <STM32_PINMUX('G', 10, ANALOG)>, /* LCD_B2 */
+ <STM32_PINMUX('D', 10, ANALOG)>, /* LCD_B3 */
+ <STM32_PINMUX('I', 4, ANALOG)>, /* LCD_B4 */
+ <STM32_PINMUX('A', 3, ANALOG)>, /* LCD_B5 */
+ <STM32_PINMUX('B', 8, ANALOG)>, /* LCD_B6 */
+ <STM32_PINMUX('D', 8, ANALOG)>; /* LCD_B7 */
+ };
+ };
+
+ ltdc_pins_b: ltdc-b-0 {
+ pins {
+ pinmux = <STM32_PINMUX('I', 14, AF14)>, /* LCD_CLK */
+ <STM32_PINMUX('I', 12, AF14)>, /* LCD_HSYNC */
+ <STM32_PINMUX('I', 13, AF14)>, /* LCD_VSYNC */
+ <STM32_PINMUX('K', 7, AF14)>, /* LCD_DE */
+ <STM32_PINMUX('I', 15, AF14)>, /* LCD_R0 */
+ <STM32_PINMUX('J', 0, AF14)>, /* LCD_R1 */
+ <STM32_PINMUX('J', 1, AF14)>, /* LCD_R2 */
+ <STM32_PINMUX('J', 2, AF14)>, /* LCD_R3 */
+ <STM32_PINMUX('J', 3, AF14)>, /* LCD_R4 */
+ <STM32_PINMUX('J', 4, AF14)>, /* LCD_R5 */
+ <STM32_PINMUX('J', 5, AF14)>, /* LCD_R6 */
+ <STM32_PINMUX('J', 6, AF14)>, /* LCD_R7 */
+ <STM32_PINMUX('J', 7, AF14)>, /* LCD_G0 */
+ <STM32_PINMUX('J', 8, AF14)>, /* LCD_G1 */
+ <STM32_PINMUX('J', 9, AF14)>, /* LCD_G2 */
+ <STM32_PINMUX('J', 10, AF14)>, /* LCD_G3 */
+ <STM32_PINMUX('J', 11, AF14)>, /* LCD_G4 */
+ <STM32_PINMUX('K', 0, AF14)>, /* LCD_G5 */
+ <STM32_PINMUX('K', 1, AF14)>, /* LCD_G6 */
+ <STM32_PINMUX('K', 2, AF14)>, /* LCD_G7 */
+ <STM32_PINMUX('J', 12, AF14)>, /* LCD_B0 */
+ <STM32_PINMUX('J', 13, AF14)>, /* LCD_B1 */
+ <STM32_PINMUX('J', 14, AF14)>, /* LCD_B2 */
+ <STM32_PINMUX('J', 15, AF14)>, /* LCD_B3 */
+ <STM32_PINMUX('K', 3, AF14)>, /* LCD_B4 */
+ <STM32_PINMUX('K', 4, AF14)>, /* LCD_B5 */
+ <STM32_PINMUX('K', 5, AF14)>, /* LCD_B6 */
+ <STM32_PINMUX('K', 6, AF14)>; /* LCD_B7 */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <1>;
+ };
+ };
+
+ ltdc_pins_sleep_b: ltdc-b-1 {
+ pins {
+ pinmux = <STM32_PINMUX('I', 14, ANALOG)>, /* LCD_CLK */
+ <STM32_PINMUX('I', 12, ANALOG)>, /* LCD_HSYNC */
+ <STM32_PINMUX('I', 13, ANALOG)>, /* LCD_VSYNC */
+ <STM32_PINMUX('K', 7, ANALOG)>, /* LCD_DE */
+ <STM32_PINMUX('I', 15, ANALOG)>, /* LCD_R0 */
+ <STM32_PINMUX('J', 0, ANALOG)>, /* LCD_R1 */
+ <STM32_PINMUX('J', 1, ANALOG)>, /* LCD_R2 */
+ <STM32_PINMUX('J', 2, ANALOG)>, /* LCD_R3 */
+ <STM32_PINMUX('J', 3, ANALOG)>, /* LCD_R4 */
+ <STM32_PINMUX('J', 4, ANALOG)>, /* LCD_R5 */
+ <STM32_PINMUX('J', 5, ANALOG)>, /* LCD_R6 */
+ <STM32_PINMUX('J', 6, ANALOG)>, /* LCD_R7 */
+ <STM32_PINMUX('J', 7, ANALOG)>, /* LCD_G0 */
+ <STM32_PINMUX('J', 8, ANALOG)>, /* LCD_G1 */
+ <STM32_PINMUX('J', 9, ANALOG)>, /* LCD_G2 */
+ <STM32_PINMUX('J', 10, ANALOG)>, /* LCD_G3 */
+ <STM32_PINMUX('J', 11, ANALOG)>, /* LCD_G4 */
+ <STM32_PINMUX('K', 0, ANALOG)>, /* LCD_G5 */
+ <STM32_PINMUX('K', 1, ANALOG)>, /* LCD_G6 */
+ <STM32_PINMUX('K', 2, ANALOG)>, /* LCD_G7 */
+ <STM32_PINMUX('J', 12, ANALOG)>, /* LCD_B0 */
+ <STM32_PINMUX('J', 13, ANALOG)>, /* LCD_B1 */
+ <STM32_PINMUX('J', 14, ANALOG)>, /* LCD_B2 */
+ <STM32_PINMUX('J', 15, ANALOG)>, /* LCD_B3 */
+ <STM32_PINMUX('K', 3, ANALOG)>, /* LCD_B4 */
+ <STM32_PINMUX('K', 4, ANALOG)>, /* LCD_B5 */
+ <STM32_PINMUX('K', 5, ANALOG)>, /* LCD_B6 */
+ <STM32_PINMUX('K', 6, ANALOG)>; /* LCD_B7 */
+ };
+ };
+
m_can1_pins_a: m-can1-0 {
pins1 {
pinmux = <STM32_PINMUX('H', 13, AF9)>; /* CAN1_TX */
@@ -325,6 +506,87 @@
};
};
+ sdmmc1_b4_pins_a: sdmmc1-b4-0 {
+ pins {
+ pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDMMC1_D0 */
+ <STM32_PINMUX('C', 9, AF12)>, /* SDMMC1_D1 */
+ <STM32_PINMUX('C', 10, AF12)>, /* SDMMC1_D2 */
+ <STM32_PINMUX('C', 11, AF12)>, /* SDMMC1_D3 */
+ <STM32_PINMUX('C', 12, AF12)>, /* SDMMC1_CK */
+ <STM32_PINMUX('D', 2, AF12)>; /* SDMMC1_CMD */
+ slew-rate = <3>;
+ drive-push-pull;
+ bias-disable;
+ };
+ };
+
+ sdmmc1_b4_od_pins_a: sdmmc1-b4-od-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDMMC1_D0 */
+ <STM32_PINMUX('C', 9, AF12)>, /* SDMMC1_D1 */
+ <STM32_PINMUX('C', 10, AF12)>, /* SDMMC1_D2 */
+ <STM32_PINMUX('C', 11, AF12)>, /* SDMMC1_D3 */
+ <STM32_PINMUX('C', 12, AF12)>; /* SDMMC1_CK */
+ slew-rate = <3>;
+ drive-push-pull;
+ bias-disable;
+ };
+ pins2{
+ pinmux = <STM32_PINMUX('D', 2, AF12)>; /* SDMMC1_CMD */
+ slew-rate = <3>;
+ drive-open-drain;
+ bias-disable;
+ };
+ };
+
+ sdmmc1_b4_sleep_pins_a: sdmmc1-b4-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('C', 8, ANALOG)>, /* SDMMC1_D0 */
+ <STM32_PINMUX('C', 9, ANALOG)>, /* SDMMC1_D1 */
+ <STM32_PINMUX('C', 10, ANALOG)>, /* SDMMC1_D2 */
+ <STM32_PINMUX('C', 11, ANALOG)>, /* SDMMC1_D3 */
+ <STM32_PINMUX('C', 12, ANALOG)>, /* SDMMC1_CK */
+ <STM32_PINMUX('D', 2, ANALOG)>; /* SDMMC1_CMD */
+ };
+ };
+
+ sdmmc1_dir_pins_a: sdmmc1-dir-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('F', 2, AF11)>, /* SDMMC1_D0DIR */
+ <STM32_PINMUX('C', 7, AF8)>, /* SDMMC1_D123DIR */
+ <STM32_PINMUX('B', 9, AF11)>; /* SDMMC1_CDIR */
+ slew-rate = <3>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+ pins2{
+ pinmux = <STM32_PINMUX('E', 4, AF8)>; /* SDMMC1_CKIN */
+ bias-pull-up;
+ };
+ };
+
+ sdmmc1_dir_sleep_pins_a: sdmmc1-dir-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('F', 2, ANALOG)>, /* SDMMC1_D0DIR */
+ <STM32_PINMUX('C', 7, ANALOG)>, /* SDMMC1_D123DIR */
+ <STM32_PINMUX('B', 9, ANALOG)>, /* SDMMC1_CDIR */
+ <STM32_PINMUX('E', 4, ANALOG)>; /* SDMMC1_CKIN */
+ };
+ };
+
+ spdifrx_pins_a: spdifrx-0 {
+ pins {
+ pinmux = <STM32_PINMUX('G', 12, AF8)>; /* SPDIF_IN1 */
+ bias-disable;
+ };
+ };
+
+ spdifrx_sleep_pins_a: spdifrx-1 {
+ pins {
+ pinmux = <STM32_PINMUX('G', 12, ANALOG)>; /* SPDIF_IN1 */
+ };
+ };
+
uart4_pins_a: uart4-0 {
pins1 {
pinmux = <STM32_PINMUX('G', 11, AF6)>; /* UART4_TX */
@@ -371,6 +633,13 @@
};
};
+ i2c4_pins_sleep_a: i2c4-1 {
+ pins {
+ pinmux = <STM32_PINMUX('Z', 4, ANALOG)>, /* I2C4_SCL */
+ <STM32_PINMUX('Z', 5, ANALOG)>; /* I2C4_SDA */
+ };
+ };
+
spi1_pins_a: spi1-0 {
pins1 {
pinmux = <STM32_PINMUX('Z', 0, AF5)>, /* SPI1_SCK */
diff --git a/arch/arm/boot/dts/stm32mp157a-dk1.dts b/arch/arm/boot/dts/stm32mp157a-dk1.dts
new file mode 100644
index 000000000000..098dbfb06b61
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp157a-dk1.dts
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2019 - All Rights Reserved
+ * Author: Alexandre Torgue <alexandre.torgue@st.com> for STMicroelectronics.
+ */
+
+/dts-v1/;
+
+#include "stm32mp157c.dtsi"
+#include "stm32mp157-pinctrl.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/mfd/st,stpmic1.h>
+
+/ {
+ model = "STMicroelectronics STM32MP157A-DK1 Discovery Board";
+ compatible = "st,stm32mp157a-dk1", "st,stm32mp157";
+
+ aliases {
+ ethernet0 = &ethernet0;
+ serial0 = &uart4;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@c0000000 {
+ reg = <0xc0000000 0x20000000>;
+ };
+
+ led {
+ compatible = "gpio-leds";
+ blue {
+ label = "heartbeat";
+ gpios = <&gpiod 11 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ default-state = "off";
+ };
+ };
+};
+
+&cec {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&cec_pins_b>;
+ pinctrl-1 = <&cec_pins_sleep_b>;
+ status = "okay";
+};
+
+&ethernet0 {
+ status = "okay";
+ pinctrl-0 = <&ethernet0_rgmii_pins_a>;
+ pinctrl-1 = <&ethernet0_rgmii_pins_sleep_a>;
+ pinctrl-names = "default", "sleep";
+ phy-mode = "rgmii";
+ max-speed = <1000>;
+ phy-handle = <&phy0>;
+
+ mdio0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+ };
+};
+
+
+&i2c4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4_pins_a>;
+ i2c-scl-rising-time-ns = <185>;
+ i2c-scl-falling-time-ns = <20>;
+ status = "okay";
+ /* spare dmas for other usage */
+ /delete-property/dmas;
+ /delete-property/dma-names;
+
+ pmic: stpmic@33 {
+ compatible = "st,stpmic1";
+ reg = <0x33>;
+ interrupts-extended = <&gpioa 0 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ status = "okay";
+
+ regulators {
+ compatible = "st,stpmic1-regulators";
+ ldo1-supply = <&v3v3>;
+ ldo3-supply = <&vdd_ddr>;
+ ldo6-supply = <&v3v3>;
+ pwr_sw1-supply = <&bst_out>;
+ pwr_sw2-supply = <&bst_out>;
+
+ vddcore: buck1 {
+ regulator-name = "vddcore";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ vdd_ddr: buck2 {
+ regulator-name = "vdd_ddr";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ vdd: buck3 {
+ regulator-name = "vdd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ st,mask-reset;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ v3v3: buck4 {
+ regulator-name = "v3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-over-current-protection;
+ regulator-initial-mode = <0>;
+ };
+
+ v1v8_audio: ldo1 {
+ regulator-name = "v1v8_audio";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ interrupts = <IT_CURLIM_LDO1 0>;
+ };
+
+ v3v3_hdmi: ldo2 {
+ regulator-name = "v3v3_hdmi";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ interrupts = <IT_CURLIM_LDO2 0>;
+ };
+
+ vtt_ddr: ldo3 {
+ regulator-name = "vtt_ddr";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <750000>;
+ regulator-always-on;
+ regulator-over-current-protection;
+ };
+
+ vdd_usb: ldo4 {
+ regulator-name = "vdd_usb";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ interrupts = <IT_CURLIM_LDO4 0>;
+ };
+
+ vdda: ldo5 {
+ regulator-name = "vdda";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ interrupts = <IT_CURLIM_LDO5 0>;
+ regulator-boot-on;
+ };
+
+ v1v2_hdmi: ldo6 {
+ regulator-name = "v1v2_hdmi";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ interrupts = <IT_CURLIM_LDO6 0>;
+ };
+
+ vref_ddr: vref_ddr {
+ regulator-name = "vref_ddr";
+ regulator-always-on;
+ regulator-over-current-protection;
+ };
+
+ bst_out: boost {
+ regulator-name = "bst_out";
+ interrupts = <IT_OCP_BOOST 0>;
+ };
+
+ vbus_otg: pwr_sw1 {
+ regulator-name = "vbus_otg";
+ interrupts = <IT_OCP_OTG 0>;
+ };
+
+ vbus_sw: pwr_sw2 {
+ regulator-name = "vbus_sw";
+ interrupts = <IT_OCP_SWOUT 0>;
+ regulator-active-discharge;
+ };
+ };
+
+ onkey {
+ compatible = "st,stpmic1-onkey";
+ interrupts = <IT_PONKEY_F 0>, <IT_PONKEY_R 0>;
+ interrupt-names = "onkey-falling", "onkey-rising";
+ power-off-time-sec = <10>;
+ status = "okay";
+ };
+
+ watchdog {
+ compatible = "st,stpmic1-wdt";
+ status = "disabled";
+ };
+ };
+};
+
+&ipcc {
+ status = "okay";
+};
+
+&iwdg2 {
+ timeout-sec = <32>;
+ status = "okay";
+};
+
+&rng1 {
+ status = "okay";
+};
+
+&rtc {
+ status = "okay";
+};
+
+&sdmmc1 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc1_b4_pins_a>;
+ pinctrl-1 = <&sdmmc1_b4_od_pins_a>;
+ pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>;
+ broken-cd;
+ st,neg-edge;
+ bus-width = <4>;
+ vmmc-supply = <&v3v3>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart4_pins_a>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/stm32mp157c-dk2.dts b/arch/arm/boot/dts/stm32mp157c-dk2.dts
new file mode 100644
index 000000000000..20ea601a546d
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp157c-dk2.dts
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (C) STMicroelectronics 2019 - All Rights Reserved
+ * Author: Alexandre Torgue <alexandre.torgue@st.com> for STMicroelectronics.
+ */
+
+/dts-v1/;
+
+#include "stm32mp157a-dk1.dts"
+
+/ {
+ model = "STMicroelectronics STM32MP157C-DK2 Discovery Board";
+ compatible = "st,stm32mp157c-dk2", "st,stm32mp157";
+
+ reg18: reg18 {
+ compatible = "regulator-fixed";
+ regulator-name = "reg18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+};
+
+&dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+ phy-dsi-supply = <&reg18>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dsi_in: endpoint {
+ remote-endpoint = <&ltdc_ep1_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+
+ panel@0 {
+ compatible = "orisetech,otm8009a";
+ reg = <0>;
+ reset-gpios = <&gpioe 4 GPIO_ACTIVE_LOW>;
+ power-supply = <&v3v3>;
+ status = "okay";
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+ };
+};
+
+&ltdc {
+ status = "okay";
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ltdc_ep1_out: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&dsi_in>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/stm32mp157c-ed1.dts b/arch/arm/boot/dts/stm32mp157c-ed1.dts
index d66edb0c66cd..62a8c78e7e2e 100644
--- a/arch/arm/boot/dts/stm32mp157c-ed1.dts
+++ b/arch/arm/boot/dts/stm32mp157c-ed1.dts
@@ -7,6 +7,8 @@
#include "stm32mp157c.dtsi"
#include "stm32mp157-pinctrl.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/mfd/st,stpmic1.h>
/ {
model = "STMicroelectronics STM32MP157C eval daughter";
@@ -41,12 +43,17 @@
regulator-always-on;
};
- vdd_usb: vdd-usb {
- compatible = "regulator-fixed";
- regulator-name = "vdd_usb";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
+ sd_switch: regulator-sd_switch {
+ compatible = "regulator-gpio";
+ regulator-name = "sd_switch";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2900000>;
+ regulator-type = "voltage";
regulator-always-on;
+
+ gpios = <&gpiof 14 GPIO_ACTIVE_HIGH>;
+ gpios-states = <0>;
+ states = <1800000 0x1 2900000 0x0>;
};
};
@@ -60,6 +67,149 @@
i2c-scl-rising-time-ns = <185>;
i2c-scl-falling-time-ns = <20>;
status = "okay";
+ /* spare dmas for other usage */
+ /delete-property/dmas;
+ /delete-property/dma-names;
+
+ pmic: stpmic@33 {
+ compatible = "st,stpmic1";
+ reg = <0x33>;
+ interrupts-extended = <&gpioa 0 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ status = "okay";
+
+ regulators {
+ compatible = "st,stpmic1-regulators";
+ ldo1-supply = <&v3v3>;
+ ldo2-supply = <&v3v3>;
+ ldo3-supply = <&vdd_ddr>;
+ ldo5-supply = <&v3v3>;
+ ldo6-supply = <&v3v3>;
+ pwr_sw1-supply = <&bst_out>;
+ pwr_sw2-supply = <&bst_out>;
+
+ vddcore: buck1 {
+ regulator-name = "vddcore";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ vdd_ddr: buck2 {
+ regulator-name = "vdd_ddr";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ vdd: buck3 {
+ regulator-name = "vdd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ st,mask-reset;
+ regulator-initial-mode = <0>;
+ regulator-over-current-protection;
+ };
+
+ v3v3: buck4 {
+ regulator-name = "v3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-over-current-protection;
+ regulator-initial-mode = <0>;
+ };
+
+ vdda: ldo1 {
+ regulator-name = "vdda";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ interrupts = <IT_CURLIM_LDO1 0>;
+ };
+
+ v2v8: ldo2 {
+ regulator-name = "v2v8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ interrupts = <IT_CURLIM_LDO2 0>;
+ };
+
+ vtt_ddr: ldo3 {
+ regulator-name = "vtt_ddr";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <750000>;
+ regulator-always-on;
+ regulator-over-current-protection;
+ };
+
+ vdd_usb: ldo4 {
+ regulator-name = "vdd_usb";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ interrupts = <IT_CURLIM_LDO4 0>;
+ };
+
+ vdd_sd: ldo5 {
+ regulator-name = "vdd_sd";
+ regulator-min-microvolt = <2900000>;
+ regulator-max-microvolt = <2900000>;
+ interrupts = <IT_CURLIM_LDO5 0>;
+ regulator-boot-on;
+ };
+
+ v1v8: ldo6 {
+ regulator-name = "v1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ interrupts = <IT_CURLIM_LDO6 0>;
+ };
+
+ vref_ddr: vref_ddr {
+ regulator-name = "vref_ddr";
+ regulator-always-on;
+ regulator-over-current-protection;
+ };
+
+ bst_out: boost {
+ regulator-name = "bst_out";
+ interrupts = <IT_OCP_BOOST 0>;
+ };
+
+ vbus_otg: pwr_sw1 {
+ regulator-name = "vbus_otg";
+ interrupts = <IT_OCP_OTG 0>;
+ };
+
+ vbus_sw: pwr_sw2 {
+ regulator-name = "vbus_sw";
+ interrupts = <IT_OCP_SWOUT 0>;
+ regulator-active-discharge;
+ };
+ };
+
+ onkey {
+ compatible = "st,stpmic1-onkey";
+ interrupts = <IT_PONKEY_F 0>, <IT_PONKEY_R 0>;
+ interrupt-names = "onkey-falling", "onkey-rising";
+ power-off-time-sec = <10>;
+ status = "okay";
+ };
+
+ watchdog {
+ compatible = "st,stpmic1-wdt";
+ status = "disabled";
+ };
+ };
+};
+
+&ipcc {
+ status = "okay";
};
&iwdg2 {
@@ -75,6 +225,21 @@
status = "okay";
};
+&sdmmc1 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc1_b4_pins_a &sdmmc1_dir_pins_a>;
+ pinctrl-1 = <&sdmmc1_b4_od_pins_a &sdmmc1_dir_pins_a>;
+ pinctrl-2 = <&sdmmc1_b4_sleep_pins_a &sdmmc1_dir_sleep_pins_a>;
+ broken-cd;
+ st,sig-dir;
+ st,neg-edge;
+ st,use-ckin;
+ bus-width = <4>;
+ vmmc-supply = <&vdd_sd>;
+ vqmmc-supply = <&sd_switch>;
+ status = "okay";
+};
+
&timers6 {
status = "okay";
/* spare dmas for other usage */
diff --git a/arch/arm/boot/dts/stm32mp157c.dtsi b/arch/arm/boot/dts/stm32mp157c.dtsi
index f8bbfff5950b..2afeee65c3ea 100644
--- a/arch/arm/boot/dts/stm32mp157c.dtsi
+++ b/arch/arm/boot/dts/stm32mp157c.dtsi
@@ -379,6 +379,19 @@
status = "disabled";
};
+ spdifrx: audio-controller@4000d000 {
+ compatible = "st,stm32h7-spdifrx";
+ #sound-dai-cells = <0>;
+ reg = <0x4000d000 0x400>;
+ clocks = <&rcc SPDIF_K>;
+ clock-names = "kclk";
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&dmamux1 93 0x400 0x01>,
+ <&dmamux1 94 0x400 0x01>;
+ dma-names = "rx", "rx-ctrl";
+ status = "disabled";
+ };
+
usart2: serial@4000e000 {
compatible = "st,stm32h7-uart";
reg = <0x4000e000 0x400>;
@@ -886,6 +899,21 @@
status = "disabled";
};
+ ipcc: mailbox@4c001000 {
+ compatible = "st,stm32mp1-ipcc";
+ #mbox-cells = <1>;
+ reg = <0x4c001000 0x400>;
+ st,proc-id = <0>;
+ interrupts-extended =
+ <&intc GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+ <&exti 61 1>;
+ interrupt-names = "rx", "tx", "wakeup";
+ clocks = <&rcc IPCC>;
+ wakeup-source;
+ status = "disabled";
+ };
+
rcc: rcc@50000000 {
compatible = "st,stm32mp1-rcc", "syscon";
reg = <0x50000000 0x1000>;
@@ -903,6 +931,7 @@
syscfg: syscon@50020000 {
compatible = "st,stm32mp157-syscfg", "syscon";
reg = <0x50020000 0x400>;
+ clocks = <&rcc SYSCFG>;
};
lptimer2: timer@50021000 {
@@ -1050,6 +1079,20 @@
status = "disabled";
};
+ sdmmc1: sdmmc@58005000 {
+ compatible = "arm,pl18x", "arm,primecell";
+ arm,primecell-periphid = <0x10153180>;
+ reg = <0x58005000 0x1000>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "cmd_irq";
+ clocks = <&rcc SDMMC1_K>;
+ clock-names = "apb_pclk";
+ resets = <&rcc SDMMC1_R>;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ max-frequency = <120000000>;
+ };
+
crc1: crc@58009000 {
compatible = "st,stm32f7-crc";
reg = <0x58009000 0x400>;
@@ -1199,6 +1242,19 @@
status = "disabled";
};
+ bsec: nvmem@5c005000 {
+ compatible = "st,stm32mp15-bsec";
+ reg = <0x5c005000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ts_cal1: calib@5c {
+ reg = <0x5c 0x2>;
+ };
+ ts_cal2: calib@5e {
+ reg = <0x5e 0x2>;
+ };
+ };
+
i2c6: i2c@5c009000 {
compatible = "st,stm32f7-i2c";
reg = <0x5c009000 0x400>;
diff --git a/arch/arm/boot/dts/sun4i-a10-chuwi-v7-cw0825.dts b/arch/arm/boot/dts/sun4i-a10-chuwi-v7-cw0825.dts
index cf7b392dff31..74262988881c 100644
--- a/arch/arm/boot/dts/sun4i-a10-chuwi-v7-cw0825.dts
+++ b/arch/arm/boot/dts/sun4i-a10-chuwi-v7-cw0825.dts
@@ -131,20 +131,6 @@
status = "okay";
};
-&pio {
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH4";
- function = "gpio_in";
- bias-pull-up;
- };
-
- usb0_vbus_detect_pin: usb0-vbus-detect-pin {
- pins = "PH5";
- function = "gpio_in";
- bias-pull-down;
- };
-};
-
&reg_usb0_vbus {
status = "okay";
};
@@ -165,10 +151,8 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
- usb0_vbus_det-gpio = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
+ usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
+ usb0_vbus_det-gpios = <&pio 7 5 (GPIO_ACTIVE_HIGH | GPIO_PULL_DOWN)>; /* PH5 */
usb0_vbus-supply = <&reg_usb0_vbus>;
usb2_vbus-supply = <&reg_usb2_vbus>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun4i-a10-cubieboard.dts b/arch/arm/boot/dts/sun4i-a10-cubieboard.dts
index 197a1f2b75ff..7306c65df88a 100644
--- a/arch/arm/boot/dts/sun4i-a10-cubieboard.dts
+++ b/arch/arm/boot/dts/sun4i-a10-cubieboard.dts
@@ -184,12 +184,6 @@
function = "gpio_out";
drive-strength = <20>;
};
-
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH4";
- function = "gpio_in";
- bias-pull-up;
- };
};
&reg_ahci_5v {
@@ -254,9 +248,7 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>;
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
+ usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
usb1_vbus-supply = <&reg_usb1_vbus>;
usb2_vbus-supply = <&reg_usb2_vbus>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun4i-a10-dserve-dsrv9703c.dts b/arch/arm/boot/dts/sun4i-a10-dserve-dsrv9703c.dts
index 896e27a08727..8ee3ff42bd55 100644
--- a/arch/arm/boot/dts/sun4i-a10-dserve-dsrv9703c.dts
+++ b/arch/arm/boot/dts/sun4i-a10-dserve-dsrv9703c.dts
@@ -158,20 +158,6 @@
status = "okay";
};
-&pio {
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH4";
- function = "gpio_in";
- bias-pull-up;
- };
-
- usb0_vbus_detect_pin: usb0-vbus-detect-pin {
- pins = "PH5";
- function = "gpio_in";
- bias-pull-down;
- };
-};
-
&pwm {
pinctrl-names = "default";
pinctrl-0 = <&pwm0_pin>;
@@ -223,10 +209,8 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
- usb0_vbus_det-gpio = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
+ usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
+ usb0_vbus_det-gpios = <&pio 7 5 (GPIO_ACTIVE_HIGH | GPIO_PULL_DOWN)>; /* PH5 */
usb0_vbus-supply = <&reg_usb0_vbus>;
usb2_vbus-supply = <&reg_usb2_vbus>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun4i-a10-hyundai-a7hd.dts b/arch/arm/boot/dts/sun4i-a10-hyundai-a7hd.dts
index f63767cddd8e..bf2044bac42f 100644
--- a/arch/arm/boot/dts/sun4i-a10-hyundai-a7hd.dts
+++ b/arch/arm/boot/dts/sun4i-a10-hyundai-a7hd.dts
@@ -86,20 +86,6 @@
status = "okay";
};
-&pio {
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH4";
- function = "gpio_in";
- bias-pull-up;
- };
-
- usb0_vbus_detect_pin: usb0-vbus-detect-pin {
- pins = "PH5";
- function = "gpio_in";
- bias-pull-down;
- };
-};
-
&reg_usb0_vbus {
status = "okay";
};
@@ -121,10 +107,8 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
- usb0_vbus_det-gpio = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
+ usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
+ usb0_vbus_det-gpios = <&pio 7 5 (GPIO_ACTIVE_HIGH | GPIO_PULL_DOWN)>; /* PH5 */
usb0_vbus-supply = <&reg_usb0_vbus>;
usb2_vbus-supply = <&reg_usb2_vbus>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun4i-a10-inet1.dts b/arch/arm/boot/dts/sun4i-a10-inet1.dts
index 26d0c1d6a02b..ca878384e902 100644
--- a/arch/arm/boot/dts/sun4i-a10-inet1.dts
+++ b/arch/arm/boot/dts/sun4i-a10-inet1.dts
@@ -164,20 +164,6 @@
status = "okay";
};
-&pio {
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH4";
- function = "gpio_in";
- bias-pull-up;
- };
-
- usb0_vbus_detect_pin: usb0-vbus-detect-pin {
- pins = "PH5";
- function = "gpio_in";
- bias-pull-down;
- };
-};
-
&pwm {
pinctrl-names = "default";
pinctrl-0 = <&pwm0_pin>;
@@ -233,10 +219,8 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
- usb0_vbus_det-gpio = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
+ usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
+ usb0_vbus_det-gpios = <&pio 7 5 (GPIO_ACTIVE_HIGH | GPIO_PULL_DOWN)>; /* PH5 */
usb0_vbus-supply = <&reg_usb0_vbus>;
usb1_vbus-supply = <&reg_usb1_vbus>;
usb2_vbus-supply = <&reg_usb2_vbus>;
diff --git a/arch/arm/boot/dts/sun4i-a10-inet97fv2.dts b/arch/arm/boot/dts/sun4i-a10-inet97fv2.dts
index 71c27ea0b53e..76016f2ca29d 100644
--- a/arch/arm/boot/dts/sun4i-a10-inet97fv2.dts
+++ b/arch/arm/boot/dts/sun4i-a10-inet97fv2.dts
@@ -150,20 +150,6 @@
status = "okay";
};
-&pio {
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH4";
- function = "gpio_in";
- bias-pull-up;
- };
-
- usb0_vbus_detect_pin: usb0-vbus-detect-pin {
- pins = "PH5";
- function = "gpio_in";
- bias-pull-down;
- };
-};
-
&reg_dcdc2 {
regulator-always-on;
regulator-min-microvolt = <1000000>;
@@ -209,10 +195,8 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
- usb0_vbus_det-gpio = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
+ usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
+ usb0_vbus_det-gpios = <&pio 7 5 (GPIO_ACTIVE_HIGH | GPIO_PULL_DOWN)>; /* PH5 */
usb0_vbus-supply = <&reg_usb0_vbus>;
usb2_vbus-supply = <&reg_usb2_vbus>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun4i-a10-inet9f-rev03.dts b/arch/arm/boot/dts/sun4i-a10-inet9f-rev03.dts
index 2f0d966f39ad..0a562b2cc5bc 100644
--- a/arch/arm/boot/dts/sun4i-a10-inet9f-rev03.dts
+++ b/arch/arm/boot/dts/sun4i-a10-inet9f-rev03.dts
@@ -61,8 +61,6 @@
gpio-keys {
compatible = "gpio-keys-polled";
- pinctrl-names = "default";
- pinctrl-0 = <&key_pins_inet9f>;
poll-interval = <20>;
left-joystick-left {
@@ -70,7 +68,7 @@
linux,code = <ABS_X>;
linux,input-type = <EV_ABS>;
linux,input-value = <0xffffffff>; /* -1 */
- gpios = <&pio 0 6 GPIO_ACTIVE_LOW>; /* PA6 */
+ gpios = <&pio 0 6 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PA6 */
};
left-joystick-right {
@@ -78,7 +76,7 @@
linux,code = <ABS_X>;
linux,input-type = <EV_ABS>;
linux,input-value = <1>;
- gpios = <&pio 0 5 GPIO_ACTIVE_LOW>; /* PA5 */
+ gpios = <&pio 0 5 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PA5 */
};
left-joystick-up {
@@ -86,7 +84,7 @@
linux,code = <ABS_Y>;
linux,input-type = <EV_ABS>;
linux,input-value = <0xffffffff>; /* -1 */
- gpios = <&pio 0 8 GPIO_ACTIVE_LOW>; /* PA8 */
+ gpios = <&pio 0 8 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PA8 */
};
left-joystick-down {
@@ -94,7 +92,7 @@
linux,code = <ABS_Y>;
linux,input-type = <EV_ABS>;
linux,input-value = <1>;
- gpios = <&pio 0 9 GPIO_ACTIVE_LOW>; /* PA9 */
+ gpios = <&pio 0 9 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PA9 */
};
right-joystick-left {
@@ -102,7 +100,7 @@
linux,code = <ABS_Z>;
linux,input-type = <EV_ABS>;
linux,input-value = <0xffffffff>; /* -1 */
- gpios = <&pio 0 1 GPIO_ACTIVE_LOW>; /* PA1 */
+ gpios = <&pio 0 1 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PA1 */
};
right-joystick-right {
@@ -110,7 +108,7 @@
linux,code = <ABS_Z>;
linux,input-type = <EV_ABS>;
linux,input-value = <1>;
- gpios = <&pio 0 0 GPIO_ACTIVE_LOW>; /* PA0 */
+ gpios = <&pio 0 0 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PA0 */
};
right-joystick-up {
@@ -118,7 +116,7 @@
linux,code = <ABS_RZ>;
linux,input-type = <EV_ABS>;
linux,input-value = <0xffffffff>; /* -1 */
- gpios = <&pio 0 3 GPIO_ACTIVE_LOW>; /* PA3 */
+ gpios = <&pio 0 3 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PA3 */
};
right-joystick-down {
@@ -126,7 +124,7 @@
linux,code = <ABS_RZ>;
linux,input-type = <EV_ABS>;
linux,input-value = <1>;
- gpios = <&pio 0 4 GPIO_ACTIVE_LOW>; /* PA4 */
+ gpios = <&pio 0 4 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PA4 */
};
dpad-left {
@@ -134,7 +132,7 @@
linux,code = <ABS_HAT0X>;
linux,input-type = <EV_ABS>;
linux,input-value = <0xffffffff>; /* -1 */
- gpios = <&pio 7 23 GPIO_ACTIVE_LOW>; /* PH23 */
+ gpios = <&pio 7 23 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PH23 */
};
dpad-right {
@@ -142,7 +140,7 @@
linux,code = <ABS_HAT0X>;
linux,input-type = <EV_ABS>;
linux,input-value = <1>;
- gpios = <&pio 7 24 GPIO_ACTIVE_LOW>; /* PH24 */
+ gpios = <&pio 7 24 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PH24 */
};
dpad-up {
@@ -150,7 +148,7 @@
linux,code = <ABS_HAT0Y>;
linux,input-type = <EV_ABS>;
linux,input-value = <0xffffffff>; /* -1 */
- gpios = <&pio 7 25 GPIO_ACTIVE_LOW>; /* PH25 */
+ gpios = <&pio 7 25 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PH25 */
};
dpad-down {
@@ -158,55 +156,55 @@
linux,code = <ABS_HAT0Y>;
linux,input-type = <EV_ABS>;
linux,input-value = <1>;
- gpios = <&pio 7 26 GPIO_ACTIVE_LOW>; /* PH26 */
+ gpios = <&pio 7 26 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PH26 */
};
x {
label = "Button X";
linux,code = <BTN_X>;
- gpios = <&pio 0 16 GPIO_ACTIVE_LOW>; /* PA16 */
+ gpios = <&pio 0 16 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PA16 */
};
y {
label = "Button Y";
linux,code = <BTN_Y>;
- gpios = <&pio 0 14 GPIO_ACTIVE_LOW>; /* PA14 */
+ gpios = <&pio 0 14 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PA14 */
};
a {
label = "Button A";
linux,code = <BTN_A>;
- gpios = <&pio 0 17 GPIO_ACTIVE_LOW>; /* PA17 */
+ gpios = <&pio 0 17 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PA17 */
};
b {
label = "Button B";
linux,code = <BTN_B>;
- gpios = <&pio 0 15 GPIO_ACTIVE_LOW>; /* PA15 */
+ gpios = <&pio 0 15 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PA15 */
};
select {
label = "Select Button";
linux,code = <BTN_SELECT>;
- gpios = <&pio 0 11 GPIO_ACTIVE_LOW>; /* PA11 */
+ gpios = <&pio 0 11 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PA11 */
};
start {
label = "Start Button";
linux,code = <BTN_START>;
- gpios = <&pio 0 12 GPIO_ACTIVE_LOW>; /* PA12 */
+ gpios = <&pio 0 12 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PA12 */
};
top-left {
label = "Top Left Button";
linux,code = <BTN_TL>;
- gpios = <&pio 7 22 GPIO_ACTIVE_LOW>; /* PH22 */
+ gpios = <&pio 7 22 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PH22 */
};
top-right {
label = "Top Right Button";
linux,code = <BTN_TR>;
- gpios = <&pio 0 13 GPIO_ACTIVE_LOW>; /* PA13 */
+ gpios = <&pio 0 13 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; /* PA13 */
};
};
};
@@ -306,30 +304,6 @@
status = "okay";
};
-&pio {
- key_pins_inet9f: key-pins {
- pins = "PA0", "PA1", "PA3", "PA4",
- "PA5", "PA6", "PA8", "PA9",
- "PA11", "PA12", "PA13",
- "PA14", "PA15", "PA16", "PA17",
- "PH22", "PH23", "PH24", "PH25", "PH26";
- function = "gpio_in";
- bias-pull-up;
- };
-
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH4";
- function = "gpio_in";
- bias-pull-up;
- };
-
- usb0_vbus_detect_pin: usb0-vbus-detect-pin {
- pins = "PH5";
- function = "gpio_in";
- bias-pull-down;
- };
-};
-
&reg_dcdc2 {
regulator-always-on;
regulator-min-microvolt = <1000000>;
@@ -375,10 +349,8 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
- usb0_vbus_det-gpio = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
+ usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
+ usb0_vbus_det-gpios = <&pio 7 5 (GPIO_ACTIVE_HIGH | GPIO_PULL_DOWN)>; /* PH5 */
usb0_vbus-supply = <&reg_usb0_vbus>;
usb2_vbus-supply = <&reg_usb2_vbus>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun4i-a10-marsboard.dts b/arch/arm/boot/dts/sun4i-a10-marsboard.dts
index 0dbf69576512..58ad2ad9041f 100644
--- a/arch/arm/boot/dts/sun4i-a10-marsboard.dts
+++ b/arch/arm/boot/dts/sun4i-a10-marsboard.dts
@@ -148,14 +148,6 @@
status = "okay";
};
-&pio {
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH4";
- function = "gpio_in";
- bias-pull-up;
- };
-};
-
&reg_usb1_vbus {
status = "okay";
};
@@ -183,9 +175,7 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>;
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
+ usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
usb1_vbus-supply = <&reg_usb1_vbus>;
usb2_vbus-supply = <&reg_usb2_vbus>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
index b74a61496537..a8e537fd4bd6 100644
--- a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
+++ b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
@@ -186,18 +186,6 @@
function = "gpio_out";
drive-strength = <20>;
};
-
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH4";
- function = "gpio_in";
- bias-pull-up;
- };
-
- usb0_vbus_detect_pin: usb0-vbus-detect-pin {
- pins = "PH5";
- function = "gpio_in";
- bias-pull-down;
- };
};
&reg_ahci_5v {
@@ -229,10 +217,8 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
- usb0_vbus_det-gpio = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
+ usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
+ usb0_vbus_det-gpios = <&pio 7 5 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH5 */
usb0_vbus-supply = <&reg_usb0_vbus>;
usb1_vbus-supply = <&reg_usb1_vbus>;
usb2_vbus-supply = <&reg_usb2_vbus>;
diff --git a/arch/arm/boot/dts/sun4i-a10-pcduino.dts b/arch/arm/boot/dts/sun4i-a10-pcduino.dts
index d82a604f3d9c..0f1e781069e9 100644
--- a/arch/arm/boot/dts/sun4i-a10-pcduino.dts
+++ b/arch/arm/boot/dts/sun4i-a10-pcduino.dts
@@ -154,14 +154,6 @@
status = "okay";
};
-&pio {
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH4";
- function = "gpio_in";
- bias-pull-up;
- };
-};
-
#include "axp209.dtsi"
&reg_dcdc2 {
@@ -201,9 +193,7 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>;
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
+ usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
usb1_vbus-supply = <&reg_vcc5v0>; /* USB1 VBUS is always on */
usb2_vbus-supply = <&reg_vcc5v0>; /* USB2 VBUS is always on */
status = "okay";
diff --git a/arch/arm/boot/dts/sun4i-a10-pov-protab2-ips9.dts b/arch/arm/boot/dts/sun4i-a10-pov-protab2-ips9.dts
index 84b25be1ac94..24a3d23e1952 100644
--- a/arch/arm/boot/dts/sun4i-a10-pov-protab2-ips9.dts
+++ b/arch/arm/boot/dts/sun4i-a10-pov-protab2-ips9.dts
@@ -146,20 +146,6 @@
status = "okay";
};
-&pio {
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH4";
- function = "gpio_in";
- bias-pull-up;
- };
-
- usb0_vbus_detect_pin: usb0-vbus-detect-pin {
- pins = "PH5";
- function = "gpio_in";
- bias-pull-down;
- };
-};
-
&pwm {
pinctrl-names = "default";
pinctrl-0 = <&pwm0_pin>;
@@ -211,10 +197,8 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
- usb0_vbus_det-gpio = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
+ usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
+ usb0_vbus_det-gpios = <&pio 7 5 (GPIO_ACTIVE_HIGH | GPIO_PULL_DOWN)>; /* PH5 */
usb0_vbus-supply = <&reg_usb0_vbus>;
usb1_vbus-supply = <&reg_usb1_vbus>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 73c3ac42095f..e88daa4ef1af 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -267,7 +267,7 @@
#dma-cells = <2>;
};
- nfc: nand@1c03000 {
+ nfc: nand-controller@1c03000 {
compatible = "allwinner,sun4i-a10-nand";
reg = <0x01c03000 0x1000>;
interrupts = <37>;
@@ -342,6 +342,7 @@
"tcon-ch0",
"tcon-ch1";
clock-output-names = "tcon0-pixel-clock";
+ #clock-cells = <0>;
dmas = <&dma SUN4I_DMA_DEDICATED 14>;
ports {
@@ -391,6 +392,7 @@
"tcon-ch0",
"tcon-ch1";
clock-output-names = "tcon1-pixel-clock";
+ #clock-cells = <0>;
dmas = <&dma SUN4I_DMA_DEDICATED 15>;
ports {
@@ -494,13 +496,14 @@
phy-names = "usb";
extcon = <&usbphy 0>;
allwinner,sram = <&otg_sram 1>;
+ dr_mode = "otg";
status = "disabled";
};
usbphy: phy@1c13400 {
#phy-cells = <1>;
compatible = "allwinner,sun4i-a10-usb-phy";
- reg = <0x01c13400 0x10 0x01c14800 0x4 0x01c1c800 0x4>;
+ reg = <0x01c13400 0x10>, <0x01c14800 0x4>, <0x01c1c800 0x4>;
reg-names = "phy_ctrl", "pmu1", "pmu2";
clocks = <&ccu CLK_USB_PHY>;
clock-names = "usb_phy";
@@ -517,7 +520,6 @@
interrupts = <39>;
clocks = <&ccu CLK_AHB_EHCI0>;
phys = <&usbphy 1>;
- phy-names = "usb";
status = "disabled";
};
@@ -527,7 +529,6 @@
interrupts = <64>;
clocks = <&ccu CLK_USB_OHCI0>, <&ccu CLK_AHB_OHCI0>;
phys = <&usbphy 1>;
- phy-names = "usb";
status = "disabled";
};
@@ -607,7 +608,6 @@
interrupts = <40>;
clocks = <&ccu CLK_AHB_EHCI1>;
phys = <&usbphy 2>;
- phy-names = "usb";
status = "disabled";
};
@@ -617,7 +617,6 @@
interrupts = <65>;
clocks = <&ccu CLK_USB_OHCI1>, <&ccu CLK_AHB_OHCI1>;
phys = <&usbphy 2>;
- phy-names = "usb";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts b/arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts
index c88f08984483..8af0eae2ddc1 100644
--- a/arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-auxtek-t004.dts
@@ -119,12 +119,6 @@
};
&pio {
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PG12";
- function = "gpio_in";
- bias-pull-up;
- };
-
led_pins_t004: led-pin {
pins = "PB2";
function = "gpio_out";
@@ -149,9 +143,7 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>;
- usb0_id_det-gpio = <&pio 6 12 GPIO_ACTIVE_HIGH>; /* PG12 */
+ usb0_id_det-gpios = <&pio 6 12 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PG12 */
usb1_vbus-supply = <&reg_usb1_vbus>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
index 262c2ffbdcfa..5340b4164df2 100644
--- a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
@@ -218,12 +218,6 @@
function = "gpio_out";
drive-strength = <20>;
};
-
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PG12";
- function = "gpio_in";
- bias-pull-up;
- };
};
&reg_usb0_vbus {
@@ -271,9 +265,7 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>;
- usb0_id_det-gpio = <&pio 6 12 GPIO_ACTIVE_HIGH>; /* PG12 */
+ usb0_id_det-gpios = <&pio 6 12 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PG12 */
usb0_vbus-supply = <&reg_usb0_vbus>;
usb1_vbus-supply = <&reg_usb1_vbus>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun5i-a13-empire-electronix-d709.dts b/arch/arm/boot/dts/sun5i-a13-empire-electronix-d709.dts
index f3cede9beb63..a23bf24792ec 100644
--- a/arch/arm/boot/dts/sun5i-a13-empire-electronix-d709.dts
+++ b/arch/arm/boot/dts/sun5i-a13-empire-electronix-d709.dts
@@ -127,20 +127,6 @@
status = "okay";
};
-&pio {
- usb0_vbus_detect_pin: usb0-vbus-detect-pin {
- pins = "PG1";
- function = "gpio_in";
- bias-pull-down;
- };
-
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PG2";
- function = "gpio_in";
- bias-pull-up;
- };
-};
-
&pwm {
pinctrl-names = "default";
pinctrl-0 = <&pwm0_pin>;
@@ -195,10 +181,8 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
- usb0_id_det-gpio = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
- usb0_vbus_det-gpio = <&pio 6 1 GPIO_ACTIVE_HIGH>; /* PG1 */
+ usb0_id_det-gpios = <&pio 6 2 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PG2 */
+ usb0_vbus_det-gpios = <&pio 6 1 (GPIO_ACTIVE_HIGH | GPIO_PULL_DOWN)>; /* PG1 */
usb0_vbus-supply = <&reg_usb0_vbus>;
usb1_vbus-supply = <&reg_ldo3>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts b/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
index 9369f7453beb..9b9f2a574851 100644
--- a/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
+++ b/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
@@ -124,14 +124,6 @@
status = "okay";
};
-&pio {
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PG2";
- function = "gpio_in";
- bias-pull-up;
- };
-};
-
#include "axp209.dtsi"
&reg_dcdc2 {
@@ -182,9 +174,7 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>;
- usb0_id_det-gpios = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
+ usb0_id_det-gpios = <&pio 6 2 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PG2 */
usb0_vbus_det-gpios = <&pio 6 1 GPIO_ACTIVE_HIGH>; /* PG1 */
usb0_vbus-supply = <&reg_usb0_vbus>;
usb1_vbus-supply = <&reg_ldo3>;
diff --git a/arch/arm/boot/dts/sun5i-a13-licheepi-one.dts b/arch/arm/boot/dts/sun5i-a13-licheepi-one.dts
index ca8f3fd1ddfe..ba8d75b3c716 100644
--- a/arch/arm/boot/dts/sun5i-a13-licheepi-one.dts
+++ b/arch/arm/boot/dts/sun5i-a13-licheepi-one.dts
@@ -206,9 +206,8 @@
};
&usbphy {
- pinctrl-names = "default";
- usb0_id_det-gpio = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
- usb0_vbus_det-gpio = <&pio 6 1 GPIO_ACTIVE_HIGH>; /* PG1 */
+ usb0_id_det-gpios = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
+ usb0_vbus_det-gpios = <&pio 6 1 GPIO_ACTIVE_HIGH>; /* PG1 */
usb0_vbus-supply = <&reg_usb0_vbus>;
usb1_vbus-supply = <&reg_vcc5v0>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
index 943868e495bc..5df398d77238 100644
--- a/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
@@ -109,18 +109,6 @@
function = "gpio_out";
drive-strength = <20>;
};
-
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PG2";
- function = "gpio_in";
- bias-pull-up;
- };
-
- usb0_vbus_detect_pin: usb0-vbus-detect-pin {
- pins = "PG1";
- function = "gpio_in";
- bias-pull-down;
- };
};
&reg_usb0_vbus {
@@ -145,10 +133,8 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
- usb0_id_det-gpio = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
- usb0_vbus_det-gpio = <&pio 6 1 GPIO_ACTIVE_HIGH>; /* PG1 */
+ usb0_id_det-gpios = <&pio 6 2 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PG2 */
+ usb0_vbus_det-gpios = <&pio 6 1 (GPIO_ACTIVE_HIGH | GPIO_PULL_DOWN)>; /* PG1 */
usb0_vbus-supply = <&reg_usb0_vbus>;
usb1_vbus-supply = <&reg_usb1_vbus>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
index 9409c232d48a..39101228a755 100644
--- a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
+++ b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
@@ -74,8 +74,6 @@
bridge {
compatible = "dumb-vga-dac";
- #address-cells = <1>;
- #size-cells = <0>;
ports {
#address-cells = <1>;
@@ -204,18 +202,6 @@
function = "gpio_out";
drive-strength = <20>;
};
-
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PG2";
- function = "gpio_in";
- bias-pull-up;
- };
-
- usb0_vbus_detect_pin: usb0-vbus-detect-pin {
- pins = "PG1";
- function = "gpio_in";
- bias-pull-down;
- };
};
&reg_usb0_vbus {
@@ -253,10 +239,8 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
- usb0_id_det-gpio = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
- usb0_vbus_det-gpio = <&pio 6 1 GPIO_ACTIVE_HIGH>; /* PG1 */
+ usb0_id_det-gpios = <&pio 6 2 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PG2 */
+ usb0_vbus_det-gpios = <&pio 6 1 (GPIO_ACTIVE_HIGH | GPIO_PULL_DOWN)>; /* PG1 */
usb0_vbus-supply = <&reg_usb0_vbus>;
usb1_vbus-supply = <&reg_usb1_vbus>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun5i-a13-q8-tablet.dts b/arch/arm/boot/dts/sun5i-a13-q8-tablet.dts
index 7257f39b31ce..fde559a8b61e 100644
--- a/arch/arm/boot/dts/sun5i-a13-q8-tablet.dts
+++ b/arch/arm/boot/dts/sun5i-a13-q8-tablet.dts
@@ -53,16 +53,9 @@
power-supply = <&reg_vcc3v3>;
enable-gpios = <&axp_gpio 0 GPIO_ACTIVE_HIGH>; /* AXP GPIO0 */
backlight = <&backlight>;
- #address-cells = <1>;
- #size-cells = <0>;
- port@0 {
- reg = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- panel_input: endpoint@0 {
- reg = <0>;
+ port {
+ panel_input: endpoint {
remote-endpoint = <&tcon0_out_lcd>;
};
};
diff --git a/arch/arm/boot/dts/sun5i-a13-utoo-p66.dts b/arch/arm/boot/dts/sun5i-a13-utoo-p66.dts
index 732873cbeedc..be486d28d04f 100644
--- a/arch/arm/boot/dts/sun5i-a13-utoo-p66.dts
+++ b/arch/arm/boot/dts/sun5i-a13-utoo-p66.dts
@@ -58,13 +58,11 @@
/delete-property/stdout-path;
};
- i2c_lcd: i2c-gpio {
+ i2c_lcd: i2c {
/* The lcd panel i2c interface is hooked up via gpios */
compatible = "i2c-gpio";
- pinctrl-names = "default";
- pinctrl-0 = <&i2c_lcd_pins>;
- gpios = <&pio 6 12 GPIO_ACTIVE_HIGH>, /* PG12, sda */
- <&pio 6 10 GPIO_ACTIVE_HIGH>; /* PG10, scl */
+ sda-gpios = <&pio 6 12 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PG12 */
+ scl-gpios = <&pio 6 10 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PG10 */
i2c-gpio,delay-us = <5>;
};
};
@@ -94,14 +92,6 @@
};
};
-&pio {
- i2c_lcd_pins: i2c-lcd-pin {
- pins = "PG10", "PG12";
- function = "gpio_out";
- bias-pull-up;
- };
-};
-
&reg_usb0_vbus {
gpio = <&pio 1 4 GPIO_ACTIVE_HIGH>; /* PB4 */
};
diff --git a/arch/arm/boot/dts/sun5i-gr8-chip-pro.dts b/arch/arm/boot/dts/sun5i-gr8-chip-pro.dts
index 3f70b8c53132..a32cde3e32eb 100644
--- a/arch/arm/boot/dts/sun5i-gr8-chip-pro.dts
+++ b/arch/arm/boot/dts/sun5i-gr8-chip-pro.dts
@@ -133,8 +133,6 @@
status = "okay";
nand@0 {
- #address-cells = <2>;
- #size-cells = <2>;
reg = <0>;
allwinner,rb = <0>;
nand-ecc-mode = "hw";
@@ -233,7 +231,7 @@
};
&usbphy {
- usb0_id_det-gpio = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
+ usb0_id_det-gpios = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
usb0_vbus_power-supply = <&usb_power_supply>;
usb1_vbus-supply = <&reg_vcc5v0>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun5i-gr8-evb.dts b/arch/arm/boot/dts/sun5i-gr8-evb.dts
index 86e46aa59134..d003b895a696 100644
--- a/arch/arm/boot/dts/sun5i-gr8-evb.dts
+++ b/arch/arm/boot/dts/sun5i-gr8-evb.dts
@@ -325,8 +325,8 @@
};
&usbphy {
- usb0_id_det-gpio = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
- usb0_vbus_det-gpio = <&pio 6 1 GPIO_ACTIVE_HIGH>; /* PG1 */
+ usb0_id_det-gpios = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
+ usb0_vbus_det-gpios = <&pio 6 1 GPIO_ACTIVE_HIGH>; /* PG1 */
usb0_vbus_power-supply = <&usb_power_supply>;
usb1_vbus-supply = <&reg_usb1_vbus>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun5i-r8-chip.dts b/arch/arm/boot/dts/sun5i-r8-chip.dts
index f4298facf9dc..4bf4943d4eb7 100644
--- a/arch/arm/boot/dts/sun5i-r8-chip.dts
+++ b/arch/arm/boot/dts/sun5i-r8-chip.dts
@@ -84,9 +84,7 @@
onewire {
compatible = "w1-gpio";
- gpios = <&pio 3 2 GPIO_ACTIVE_HIGH>; /* PD2 */
- pinctrl-names = "default";
- pinctrl-0 = <&chip_w1_pin>;
+ gpios = <&pio 3 2 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PD2 */
};
};
@@ -173,14 +171,6 @@
status = "okay";
};
-&pio {
- chip_w1_pin: chip-w1-pin {
- pins = "PD2";
- function = "gpio_in";
- bias-pull-up;
- };
-};
-
&reg_dcdc2 {
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <1400000>;
@@ -279,7 +269,7 @@
&usbphy {
status = "okay";
- usb0_id_det-gpio = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
+ usb0_id_det-gpios = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
usb0_vbus_power-supply = <&usb_power_supply>;
usb0_vbus-supply = <&reg_usb0_vbus>;
usb1_vbus-supply = <&reg_vcc5v0>;
diff --git a/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi
index 5b1f0e198eb6..1a9926d71410 100644
--- a/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi
+++ b/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi
@@ -132,20 +132,6 @@
status = "okay";
};
-&pio {
- usb0_vbus_detect_pin: usb0-vbus-detect-pin {
- pins = "PG1";
- function = "gpio_in";
- bias-pull-down;
- };
-
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PG2";
- function = "gpio_in";
- bias-pull-up;
- };
-};
-
&reg_dcdc2 {
regulator-always-on;
regulator-min-microvolt = <1000000>;
@@ -198,10 +184,8 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
- usb0_id_det-gpio = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
- usb0_vbus_det-gpio = <&pio 6 1 GPIO_ACTIVE_HIGH>; /* PG1 */
+ usb0_id_det-gpios = <&pio 6 2 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PG2 */
+ usb0_vbus_det-gpios = <&pio 6 1 (GPIO_ACTIVE_HIGH | GPIO_PULL_DOWN)>; /* PG1 */
usb0_vbus_power-supply = <&usb_power_supply>;
usb0_vbus-supply = <&reg_usb0_vbus>;
usb1_vbus-supply = <&reg_ldo3>;
diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi
index 5497d985c54a..2fb438c4fe9d 100644
--- a/arch/arm/boot/dts/sun5i.dtsi
+++ b/arch/arm/boot/dts/sun5i.dtsi
@@ -127,6 +127,7 @@
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
+ dma-ranges;
ranges;
system-control@1c00000 {
@@ -181,6 +182,14 @@
};
};
+ mbus: dram-controller@1c01000 {
+ compatible = "allwinner,sun5i-a13-mbus";
+ reg = <0x01c01000 0x1000>;
+ clocks = <&ccu 99>;
+ dma-ranges = <0x00000000 0x40000000 0x20000000>;
+ #interconnect-cells = <1>;
+ };
+
dma: dma-controller@1c02000 {
compatible = "allwinner,sun4i-a10-dma";
reg = <0x01c02000 0x1000>;
@@ -189,7 +198,7 @@
#dma-cells = <2>;
};
- nfc: nand@1c03000 {
+ nfc: nand-controller@1c03000 {
compatible = "allwinner,sun4i-a10-nand";
reg = <0x01c03000 0x1000>;
interrupts = <37>;
@@ -238,11 +247,8 @@
status = "disabled";
port {
- #address-cells = <1>;
- #size-cells = <0>;
- tve0_in_tcon0: endpoint@0 {
- reg = <0>;
+ tve0_in_tcon0: endpoint {
remote-endpoint = <&tcon0_out_tve0>;
};
};
@@ -278,6 +284,7 @@
"tcon-ch0",
"tcon-ch1";
clock-output-names = "tcon-pixel-clock";
+ #clock-cells = <0>;
status = "disabled";
ports {
@@ -285,12 +292,9 @@
#size-cells = <0>;
tcon0_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0>;
- tcon0_in_be0: endpoint@0 {
- reg = <0>;
+ tcon0_in_be0: endpoint {
remote-endpoint = <&be0_out_tcon0>;
};
};
@@ -365,13 +369,14 @@
phy-names = "usb";
extcon = <&usbphy 0>;
allwinner,sram = <&otg_sram 1>;
+ dr_mode = "otg";
status = "disabled";
};
usbphy: phy@1c13400 {
#phy-cells = <1>;
compatible = "allwinner,sun5i-a13-usb-phy";
- reg = <0x01c13400 0x10 0x01c14800 0x4>;
+ reg = <0x01c13400 0x10>, <0x01c14800 0x4>;
reg-names = "phy_ctrl", "pmu1";
clocks = <&ccu CLK_USB_PHY0>;
clock-names = "usb_phy";
@@ -386,7 +391,6 @@
interrupts = <39>;
clocks = <&ccu CLK_AHB_EHCI>;
phys = <&usbphy 1>;
- phy-names = "usb";
status = "disabled";
};
@@ -396,7 +400,6 @@
interrupts = <40>;
clocks = <&ccu CLK_USB_OHCI>, <&ccu CLK_AHB_OHCI>;
phys = <&usbphy 1>;
- phy-names = "usb";
status = "disabled";
};
@@ -501,18 +504,18 @@
bias-pull-up;
};
- mmc2_8bit_pins: mmc2-8bit-pins {
+ mmc2_4bit_pc_pins: mmc2-4bit-pc-pins {
pins = "PC6", "PC7", "PC8", "PC9",
- "PC10", "PC11", "PC12", "PC13",
- "PC14", "PC15";
+ "PC10", "PC11";
function = "mmc2";
drive-strength = <30>;
bias-pull-up;
};
- mmc2_4bit_pc_pins: mmc2-4bit-pc-pins {
+ mmc2_8bit_pins: mmc2-8bit-pins {
pins = "PC6", "PC7", "PC8", "PC9",
- "PC10", "PC11";
+ "PC10", "PC11", "PC12", "PC13",
+ "PC14", "PC15";
function = "mmc2";
drive-strength = <30>;
bias-pull-up;
@@ -536,6 +539,11 @@
function = "nand0";
};
+ pwm0_pin: pwm0-pin {
+ pins = "PB2";
+ function = "pwm";
+ };
+
spi2_pe_pins: spi2-pe-pins {
pins = "PE1", "PE2", "PE3";
function = "spi2";
@@ -575,11 +583,6 @@
pins = "PG11", "PG12";
function = "uart3";
};
-
- pwm0_pin: pwm0-pin {
- pins = "PB2";
- function = "pwm";
- };
};
timer@1c20c00 {
@@ -727,6 +730,8 @@
clock-names = "ahb", "mod",
"ram";
resets = <&ccu RST_DE_FE>;
+ interconnects = <&mbus 19>;
+ interconnect-names = "dma-mem";
status = "disabled";
ports {
@@ -734,12 +739,9 @@
#size-cells = <0>;
fe0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <1>;
- fe0_out_be0: endpoint@0 {
- reg = <0>;
+ fe0_out_be0: endpoint {
remote-endpoint = <&be0_in_fe0>;
};
};
@@ -755,6 +757,8 @@
clock-names = "ahb", "mod",
"ram";
resets = <&ccu RST_DE_BE>;
+ interconnects = <&mbus 18>;
+ interconnect-names = "dma-mem";
status = "disabled";
assigned-clocks = <&ccu CLK_DE_BE>;
@@ -765,23 +769,17 @@
#size-cells = <0>;
be0_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0>;
- be0_in_fe0: endpoint@0 {
- reg = <0>;
+ be0_in_fe0: endpoint {
remote-endpoint = <&fe0_out_be0>;
};
};
be0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <1>;
- be0_out_tcon0: endpoint@0 {
- reg = <0>;
+ be0_out_tcon0: endpoint {
remote-endpoint = <&tcon0_in_be0>;
};
};
diff --git a/arch/arm/boot/dts/sun6i-a31-colombus.dts b/arch/arm/boot/dts/sun6i-a31-colombus.dts
index 0b7bedf85fb9..c3d56dc93513 100644
--- a/arch/arm/boot/dts/sun6i-a31-colombus.dts
+++ b/arch/arm/boot/dts/sun6i-a31-colombus.dts
@@ -63,10 +63,8 @@
i2c_lcd: i2c {
/* The lcd panel i2c interface is hooked up via gpios */
compatible = "i2c-gpio";
- pinctrl-names = "default";
- pinctrl-0 = <&i2c_lcd_pins>;
- gpios = <&pio 0 23 GPIO_ACTIVE_HIGH>, /* PA23, sda */
- <&pio 0 24 GPIO_ACTIVE_HIGH>; /* PA24, scl */
+ sda-gpios = <&pio 0 23 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PA23 */
+ scl-gpios = <&pio 0 24 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PA24 */
i2c-gpio,delay-us = <5>;
};
};
@@ -113,14 +111,6 @@
status = "okay";
};
-&pio {
- i2c_lcd_pins: i2c-lcd-pins {
- pins = "PA23", "PA24";
- function = "gpio_out";
- bias-pull-up;
- };
-};
-
&reg_usb2_vbus {
gpio = <&pio 7 24 GPIO_ACTIVE_HIGH>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
index e17a65b3561e..09832b4e8fc8 100644
--- a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
+++ b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
@@ -86,31 +86,23 @@
vga-dac {
compatible = "dumb-vga-dac";
vdd-supply = <&reg_vga_3v3>;
- #address-cells = <1>;
- #size-cells = <0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0>;
- vga_dac_in: endpoint@0 {
- reg = <0>;
+ vga_dac_in: endpoint {
remote-endpoint = <&tcon0_out_vga>;
};
};
port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <1>;
- vga_dac_out: endpoint@0 {
- reg = <0>;
+ vga_dac_out: endpoint {
remote-endpoint = <&vga_con_in>;
};
};
@@ -335,8 +327,8 @@
};
&usbphy {
- usb0_id_det-gpio = <&pio 0 15 GPIO_ACTIVE_HIGH>; /* PA15 */
- usb0_vbus_det-gpio = <&pio 0 16 GPIO_ACTIVE_HIGH>; /* PA16 */
+ usb0_id_det-gpios = <&pio 0 15 GPIO_ACTIVE_HIGH>; /* PA15 */
+ usb0_vbus_det-gpios = <&pio 0 16 GPIO_ACTIVE_HIGH>; /* PA16 */
usb0_vbus_power-supply = <&usb_power_supply>;
usb0_vbus-supply = <&reg_drivevbus>;
usb1_vbus-supply = <&reg_usb1_vbus>;
diff --git a/arch/arm/boot/dts/sun6i-a31-i7.dts b/arch/arm/boot/dts/sun6i-a31-i7.dts
index 0832ac5ae3ec..091eb2ac53b3 100644
--- a/arch/arm/boot/dts/sun6i-a31-i7.dts
+++ b/arch/arm/boot/dts/sun6i-a31-i7.dts
@@ -157,7 +157,6 @@
&spdif {
pinctrl-names = "default";
pinctrl-0 = <&spdif_tx_pin>;
- spdif-out = "okay";
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 13304b8c5139..c04efad81bbc 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -292,6 +292,7 @@
"tcon-ch0",
"tcon-ch1";
clock-output-names = "tcon0-pixel-clock";
+ #clock-cells = <0>;
ports {
#address-cells = <1>;
@@ -340,6 +341,7 @@
"tcon-ch0",
"tcon-ch1";
clock-output-names = "tcon1-pixel-clock";
+ #clock-cells = <0>;
ports {
#address-cells = <1>;
@@ -491,8 +493,6 @@
};
hdmi_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <1>;
};
};
@@ -508,6 +508,7 @@
phys = <&usbphy 0>;
phy-names = "usb";
extcon = <&usbphy 0>;
+ dr_mode = "otg";
status = "disabled";
};
@@ -542,7 +543,6 @@
clocks = <&ccu CLK_AHB1_EHCI0>;
resets = <&ccu RST_AHB1_EHCI0>;
phys = <&usbphy 1>;
- phy-names = "usb";
status = "disabled";
};
@@ -553,7 +553,6 @@
clocks = <&ccu CLK_AHB1_OHCI0>, <&ccu CLK_USB_OHCI0>;
resets = <&ccu RST_AHB1_OHCI0>;
phys = <&usbphy 1>;
- phy-names = "usb";
status = "disabled";
};
@@ -564,7 +563,6 @@
clocks = <&ccu CLK_AHB1_EHCI1>;
resets = <&ccu RST_AHB1_EHCI1>;
phys = <&usbphy 2>;
- phy-names = "usb";
status = "disabled";
};
@@ -575,7 +573,6 @@
clocks = <&ccu CLK_AHB1_OHCI1>, <&ccu CLK_USB_OHCI1>;
resets = <&ccu RST_AHB1_OHCI1>;
phys = <&usbphy 2>;
- phy-names = "usb";
status = "disabled";
};
@@ -1029,7 +1026,7 @@
};
gic: interrupt-controller@1c81000 {
- compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic";
+ compatible = "arm,gic-400";
reg = <0x01c81000 0x1000>,
<0x01c82000 0x2000>,
<0x01c84000 0x2000>,
@@ -1229,12 +1226,9 @@
};
be0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <1>;
- be0_out_drc0: endpoint@0 {
- reg = <0>;
+ be0_out_drc0: endpoint {
remote-endpoint = <&drc0_in_be0>;
};
};
@@ -1259,12 +1253,9 @@
#size-cells = <0>;
drc0_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0>;
- drc0_in_be0: endpoint@0 {
- reg = <0>;
+ drc0_in_be0: endpoint {
remote-endpoint = <&be0_out_drc0>;
};
};
@@ -1380,7 +1371,6 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <3>;
- #size-cells = <0>;
#gpio-cells = <3>;
s_ir_rx_pin: s-ir-rx-pin {
diff --git a/arch/arm/boot/dts/sun6i-a31s-primo81.dts b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
index 60b355f7184c..bc3170a0b8b5 100644
--- a/arch/arm/boot/dts/sun6i-a31s-primo81.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
@@ -260,7 +260,7 @@
};
&usbphy {
- usb0_id_det-gpio = <&pio 0 15 GPIO_ACTIVE_HIGH>; /* PA15 */
+ usb0_id_det-gpios = <&pio 0 15 GPIO_ACTIVE_HIGH>; /* PA15 */
usb0_vbus_power-supply = <&usb_power_supply>;
usb0_vbus-supply = <&reg_drivevbus>;
usb1_vbus-supply = <&reg_dldo1>;
diff --git a/arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi
index 86143de21c22..7de2abd541c1 100644
--- a/arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi
+++ b/arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi
@@ -73,14 +73,6 @@
status = "okay";
};
-&pio {
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PA15";
- function = "gpio_in";
- bias-pull-up;
- };
-};
-
&p2wi {
status = "okay";
@@ -173,9 +165,7 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>;
- usb0_id_det-gpio = <&pio 0 15 GPIO_ACTIVE_HIGH>; /* PA15 */
+ usb0_id_det-gpios = <&pio 0 15 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PA15 */
usb0_vbus_power-supply = <&usb_power_supply>;
usb0_vbus-supply = <&reg_drivevbus>;
usb1_vbus-supply = <&reg_dldo1>;
diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi.dts b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
index 81bc85d398c1..4df921632f7a 100644
--- a/arch/arm/boot/dts/sun7i-a20-bananapi.dts
+++ b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
@@ -246,12 +246,6 @@
"SPI-MISO", "SPI-CE1", "",
"IO-6", "IO-3", "IO-2", "IO-0", "", "", "", "",
"", "", "", "", "", "", "", "";
-
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH4";
- function = "gpio_in";
- bias-pull-up;
- };
};
#include "axp209.dtsi"
@@ -329,9 +323,7 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>;
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
+ usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
usb0_vbus_power-supply = <&usb_power_supply>;
usb0_vbus-supply = <&reg_usb0_vbus>;
usb1_vbus-supply = <&reg_usb1_vbus>;
diff --git a/arch/arm/boot/dts/sun7i-a20-cubieboard2.dts b/arch/arm/boot/dts/sun7i-a20-cubieboard2.dts
index 200685b0b1cb..08e5a5abf8cc 100644
--- a/arch/arm/boot/dts/sun7i-a20-cubieboard2.dts
+++ b/arch/arm/boot/dts/sun7i-a20-cubieboard2.dts
@@ -173,14 +173,6 @@
status = "okay";
};
-&pio {
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH4";
- function = "gpio_in";
- bias-pull-up;
- };
-};
-
&reg_ahci_5v {
status = "okay";
};
@@ -236,9 +228,7 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>;
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
+ usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
usb1_vbus-supply = <&reg_usb1_vbus>;
usb2_vbus-supply = <&reg_usb2_vbus>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
index f91e1bee44e8..3e170cfac86a 100644
--- a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
+++ b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts
@@ -229,14 +229,6 @@
status = "okay";
};
-&pio {
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH4";
- function = "gpio_in";
- bias-pull-up;
- };
-};
-
#include "axp209.dtsi"
&ac_power_supply {
@@ -322,9 +314,7 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>;
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
+ usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
usb0_vbus_power-supply = <&usb_power_supply>;
usb0_vbus-supply = <&reg_usb0_vbus>;
usb2_vbus-supply = <&reg_usb2_vbus>;
diff --git a/arch/arm/boot/dts/sun7i-a20-olimex-som204-evb.dts b/arch/arm/boot/dts/sun7i-a20-olimex-som204-evb.dts
index 823aabce0462..c34a83f666c7 100644
--- a/arch/arm/boot/dts/sun7i-a20-olimex-som204-evb.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olimex-som204-evb.dts
@@ -314,8 +314,8 @@
};
&usbphy {
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
- usb0_vbus_det-gpio = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
+ usb0_id_det-gpios = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
+ usb0_vbus_det-gpios = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
usb0_vbus_power-supply = <&usb_power_supply>;
usb0_vbus-supply = <&reg_usb0_vbus>;
usb1_vbus-supply = <&reg_usb1_vbus>;
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime.dts
index 5e411194bf62..e40dd47df8ce 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime.dts
@@ -174,18 +174,6 @@
function = "gpio_out";
drive-strength = <20>;
};
-
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH4";
- function = "gpio_in";
- bias-pull-up;
- };
-
- usb0_vbus_detect_pin: usb0-vbus-detect-pin {
- pins = "PH5";
- function = "gpio_in";
- bias-pull-down;
- };
};
&reg_ahci_5v {
@@ -217,10 +205,8 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
- usb0_vbus_det-gpio = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
+ usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
+ usb0_vbus_det-gpios = <&pio 7 5 (GPIO_ACTIVE_HIGH | GPIO_PULL_DOWN)>; /* PH5 */
usb0_vbus-supply = <&reg_usb0_vbus>;
usb1_vbus-supply = <&reg_usb1_vbus>;
usb2_vbus-supply = <&reg_usb2_vbus>;
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts
index 4e1c590eb098..95c6f8949076 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts
@@ -174,23 +174,17 @@
};
&pio {
+ vcc-pa-supply = <&reg_vcc3v3>;
+ vcc-pc-supply = <&reg_vcc3v3>;
+ vcc-pe-supply = <&reg_ldo3>;
+ vcc-pf-supply = <&reg_vcc3v3>;
+ vcc-pg-supply = <&reg_ldo4>;
+
led_pins_olinuxinolime: led-pins {
pins = "PH2";
function = "gpio_out";
drive-strength = <20>;
};
-
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH4";
- function = "gpio_in";
- bias-pull-up;
- };
-
- usb0_vbus_detect_pin: usb0-vbus-detect-pin {
- pins = "PH5";
- function = "gpio_in";
- bias-pull-down;
- };
};
&reg_ahci_5v {
@@ -267,10 +261,8 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
- usb0_vbus_det-gpio = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
+ usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
+ usb0_vbus_det-gpios = <&pio 7 5 (GPIO_ACTIVE_HIGH | GPIO_PULL_DOWN)>; /* PH5 */
usb0_vbus-supply = <&reg_usb0_vbus>;
usb1_vbus-supply = <&reg_usb1_vbus>;
usb2_vbus-supply = <&reg_usb2_vbus>;
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
index 840ae1194a66..0dcba070444a 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
@@ -252,18 +252,6 @@
function = "gpio_out";
drive-strength = <20>;
};
-
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH4";
- function = "gpio_in";
- bias-pull-up;
- };
-
- usb0_vbus_detect_pin: usb0-vbus-detect-pin {
- pins = "PH5";
- function = "gpio_in";
- bias-pull-down;
- };
};
#include "axp209.dtsi"
@@ -355,10 +343,8 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
- usb0_vbus_det-gpio = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
+ usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
+ usb0_vbus_det-gpios = <&pio 7 5 (GPIO_ACTIVE_HIGH | GPIO_PULL_DOWN)>; /* PH5 */
usb0_vbus-supply = <&reg_usb0_vbus>;
usb1_vbus-supply = <&reg_usb1_vbus>;
usb2_vbus-supply = <&reg_usb2_vbus>;
diff --git a/arch/arm/boot/dts/sun7i-a20-orangepi-mini.dts b/arch/arm/boot/dts/sun7i-a20-orangepi-mini.dts
index 15881081cac4..9628041bb3a3 100644
--- a/arch/arm/boot/dts/sun7i-a20-orangepi-mini.dts
+++ b/arch/arm/boot/dts/sun7i-a20-orangepi-mini.dts
@@ -176,14 +176,6 @@
status = "okay";
};
-&pio {
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH4";
- function = "gpio_in";
- bias-pull-up;
- };
-};
-
&reg_dcdc2 {
regulator-always-on;
regulator-min-microvolt = <1000000>;
@@ -239,9 +231,7 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>;
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
+ usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
usb0_vbus_power-supply = <&usb_power_supply>;
usb0_vbus-supply = <&reg_usb0_vbus>;
usb1_vbus-supply = <&reg_usb1_vbus>;
diff --git a/arch/arm/boot/dts/sun7i-a20-orangepi.dts b/arch/arm/boot/dts/sun7i-a20-orangepi.dts
index d64de2e73a9f..7b3532665c28 100644
--- a/arch/arm/boot/dts/sun7i-a20-orangepi.dts
+++ b/arch/arm/boot/dts/sun7i-a20-orangepi.dts
@@ -135,14 +135,6 @@
status = "okay";
};
-&pio {
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH4";
- function = "gpio_in";
- bias-pull-up;
- };
-};
-
&reg_dcdc2 {
regulator-always-on;
regulator-min-microvolt = <1000000>;
@@ -198,9 +190,7 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>;
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
+ usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
usb0_vbus_power-supply = <&usb_power_supply>;
usb0_vbus-supply = <&reg_usb0_vbus>;
usb1_vbus-supply = <&reg_usb1_vbus>;
diff --git a/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts b/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts
index 538ea15fa32f..173b676436e9 100644
--- a/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts
+++ b/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts
@@ -168,14 +168,6 @@
status = "okay";
};
-&pio {
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH4";
- function = "gpio_in";
- bias-pull-up;
- };
-};
-
&reg_ahci_5v {
gpio = <&pio 7 2 GPIO_ACTIVE_HIGH>; /* PH2 */
status = "okay";
@@ -226,9 +218,7 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>;
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
+ usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
usb1_vbus-supply = <&reg_usb1_vbus>;
usb2_vbus-supply = <&reg_usb1_vbus>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun7i-a20-pcduino3.dts b/arch/arm/boot/dts/sun7i-a20-pcduino3.dts
index a72ed4318d04..14a88aa16a97 100644
--- a/arch/arm/boot/dts/sun7i-a20-pcduino3.dts
+++ b/arch/arm/boot/dts/sun7i-a20-pcduino3.dts
@@ -168,14 +168,6 @@
status = "okay";
};
-&pio {
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH4";
- function = "gpio_in";
- bias-pull-up;
- };
-};
-
&reg_ahci_5v {
gpio = <&pio 7 2 GPIO_ACTIVE_HIGH>;
status = "okay";
@@ -226,9 +218,7 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>;
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
+ usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
usb1_vbus-supply = <&reg_usb1_vbus>;
usb2_vbus-supply = <&reg_usb2_vbus>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun7i-a20-wexler-tab7200.dts b/arch/arm/boot/dts/sun7i-a20-wexler-tab7200.dts
index ffade253d129..6a66b0432dfa 100644
--- a/arch/arm/boot/dts/sun7i-a20-wexler-tab7200.dts
+++ b/arch/arm/boot/dts/sun7i-a20-wexler-tab7200.dts
@@ -156,14 +156,6 @@
status = "okay";
};
-&pio {
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH4";
- function = "gpio_in";
- bias-pull-up;
- };
-};
-
&pwm {
pinctrl-names = "default";
pinctrl-0 = <&pwm0_pin>;
@@ -223,9 +215,7 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>;
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
+ usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
usb0_vbus_power-supply = <&usb_power_supply>;
usb0_vbus-supply = <&reg_usb0_vbus>;
usb1_vbus-supply = <&reg_usb1_vbus>;
diff --git a/arch/arm/boot/dts/sun7i-a20-wits-pro-a20-dkt.dts b/arch/arm/boot/dts/sun7i-a20-wits-pro-a20-dkt.dts
index c27e56091fb1..f8475a39777b 100644
--- a/arch/arm/boot/dts/sun7i-a20-wits-pro-a20-dkt.dts
+++ b/arch/arm/boot/dts/sun7i-a20-wits-pro-a20-dkt.dts
@@ -145,14 +145,6 @@
status = "okay";
};
-&pio {
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH4";
- function = "gpio_in";
- bias-pull-up;
- };
-};
-
&reg_dcdc2 {
regulator-always-on;
regulator-min-microvolt = <1000000>;
@@ -206,9 +198,7 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>;
- usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
+ usb0_id_det-gpios = <&pio 7 4 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH4 */
usb0_vbus_power-supply = <&usb_power_supply>;
usb0_vbus-supply = <&reg_usb0_vbus>;
usb1_vbus-supply = <&reg_usb1_vbus>;
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 641a8fa6d428..9ad8e445b240 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -333,7 +333,7 @@
#dma-cells = <2>;
};
- nfc: nand@1c03000 {
+ nfc: nand-controller@1c03000 {
compatible = "allwinner,sun4i-a10-nand";
reg = <0x01c03000 0x1000>;
interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
@@ -406,6 +406,7 @@
"tcon-ch0",
"tcon-ch1";
clock-output-names = "tcon0-pixel-clock";
+ #clock-cells = <0>;
dmas = <&dma SUN4I_DMA_DEDICATED 14>;
ports {
@@ -455,6 +456,7 @@
"tcon-ch0",
"tcon-ch1";
clock-output-names = "tcon1-pixel-clock";
+ #clock-cells = <0>;
dmas = <&dma SUN4I_DMA_DEDICATED 15>;
ports {
@@ -586,13 +588,14 @@
phy-names = "usb";
extcon = <&usbphy 0>;
allwinner,sram = <&otg_sram 1>;
+ dr_mode = "otg";
status = "disabled";
};
usbphy: phy@1c13400 {
#phy-cells = <1>;
compatible = "allwinner,sun7i-a20-usb-phy";
- reg = <0x01c13400 0x10 0x01c14800 0x4 0x01c1c800 0x4>;
+ reg = <0x01c13400 0x10>, <0x01c14800 0x4>, <0x01c1c800 0x4>;
reg-names = "phy_ctrl", "pmu1", "pmu2";
clocks = <&ccu CLK_USB_PHY>;
clock-names = "usb_phy";
@@ -609,7 +612,6 @@
interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&ccu CLK_AHB_EHCI0>;
phys = <&usbphy 1>;
- phy-names = "usb";
status = "disabled";
};
@@ -619,7 +621,6 @@
interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&ccu CLK_USB_OHCI0>, <&ccu CLK_AHB_OHCI0>;
phys = <&usbphy 1>;
- phy-names = "usb";
status = "disabled";
};
@@ -702,7 +703,6 @@
interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&ccu CLK_AHB_EHCI1>;
phys = <&usbphy 2>;
- phy-names = "usb";
status = "disabled";
};
@@ -712,7 +712,6 @@
interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&ccu CLK_USB_OHCI1>, <&ccu CLK_AHB_OHCI1>;
phys = <&usbphy 2>;
- phy-names = "usb";
status = "disabled";
};
@@ -751,21 +750,31 @@
#interrupt-cells = <3>;
#gpio-cells = <3>;
+ /omit-if-no-ref/
+ can_pa_pins: can-pa-pins {
+ pins = "PA16", "PA17";
+ function = "can";
+ };
+
+ /omit-if-no-ref/
can_ph_pins: can-ph-pins {
pins = "PH20", "PH21";
function = "can";
};
+ /omit-if-no-ref/
clk_out_a_pin: clk-out-a-pin {
pins = "PI12";
function = "clk_out_a";
};
+ /omit-if-no-ref/
clk_out_b_pin: clk-out-b-pin {
pins = "PI13";
function = "clk_out_b";
};
+ /omit-if-no-ref/
emac_pa_pins: emac-pa-pins {
pins = "PA0", "PA1", "PA2",
"PA3", "PA4", "PA5", "PA6",
@@ -775,6 +784,17 @@
function = "emac";
};
+ /omit-if-no-ref/
+ emac_ph_pins: emac-ph-pins {
+ pins = "PH8", "PH9", "PH10", "PH11",
+ "PH14", "PH15", "PH16", "PH17",
+ "PH18", "PH19", "PH20", "PH21",
+ "PH22", "PH23", "PH24", "PH25",
+ "PH26";
+ function = "emac";
+ };
+
+ /omit-if-no-ref/
gmac_mii_pins: gmac-mii-pins {
pins = "PA0", "PA1", "PA2",
"PA3", "PA4", "PA5", "PA6",
@@ -784,6 +804,7 @@
function = "gmac";
};
+ /omit-if-no-ref/
gmac_rgmii_pins: gmac-rgmii-pins {
pins = "PA0", "PA1", "PA2",
"PA3", "PA4", "PA5", "PA6",
@@ -798,46 +819,55 @@
drive-strength = <40>;
};
+ /omit-if-no-ref/
i2c0_pins: i2c0-pins {
pins = "PB0", "PB1";
function = "i2c0";
};
+ /omit-if-no-ref/
i2c1_pins: i2c1-pins {
pins = "PB18", "PB19";
function = "i2c1";
};
+ /omit-if-no-ref/
i2c2_pins: i2c2-pins {
pins = "PB20", "PB21";
function = "i2c2";
};
+ /omit-if-no-ref/
i2c3_pins: i2c3-pins {
pins = "PI0", "PI1";
function = "i2c3";
};
+ /omit-if-no-ref/
ir0_rx_pin: ir0-rx-pin {
pins = "PB4";
function = "ir0";
};
+ /omit-if-no-ref/
ir0_tx_pin: ir0-tx-pin {
pins = "PB3";
function = "ir0";
};
+ /omit-if-no-ref/
ir1_rx_pin: ir1-rx-pin {
pins = "PB23";
function = "ir1";
};
+ /omit-if-no-ref/
ir1_tx_pin: ir1-tx-pin {
pins = "PB22";
function = "ir1";
};
+ /omit-if-no-ref/
mmc0_pins: mmc0-pins {
pins = "PF0", "PF1", "PF2",
"PF3", "PF4", "PF5";
@@ -846,6 +876,7 @@
bias-pull-up;
};
+ /omit-if-no-ref/
mmc2_pins: mmc2-pins {
pins = "PC6", "PC7", "PC8",
"PC9", "PC10", "PC11";
@@ -854,6 +885,7 @@
bias-pull-up;
};
+ /omit-if-no-ref/
mmc3_pins: mmc3-pins {
pins = "PI4", "PI5", "PI6",
"PI7", "PI8", "PI9";
@@ -862,127 +894,206 @@
bias-pull-up;
};
+ /omit-if-no-ref/
ps2_0_pins: ps2-0-pins {
pins = "PI20", "PI21";
function = "ps2";
};
+ /omit-if-no-ref/
ps2_1_ph_pins: ps2-1-ph-pins {
pins = "PH12", "PH13";
function = "ps2";
};
+ /omit-if-no-ref/
pwm0_pin: pwm0-pin {
pins = "PB2";
function = "pwm";
};
+ /omit-if-no-ref/
pwm1_pin: pwm1-pin {
pins = "PI3";
function = "pwm";
};
+ /omit-if-no-ref/
spdif_tx_pin: spdif-tx-pin {
pins = "PB13";
function = "spdif";
bias-pull-up;
};
+ /omit-if-no-ref/
spi0_pi_pins: spi0-pi-pins {
pins = "PI11", "PI12", "PI13";
function = "spi0";
};
+ /omit-if-no-ref/
spi0_cs0_pi_pin: spi0-cs0-pi-pin {
pins = "PI10";
function = "spi0";
};
+ /omit-if-no-ref/
spi0_cs1_pi_pin: spi0-cs1-pi-pin {
pins = "PI14";
function = "spi0";
};
+ /omit-if-no-ref/
spi1_pi_pins: spi1-pi-pins {
pins = "PI17", "PI18", "PI19";
function = "spi1";
};
+ /omit-if-no-ref/
spi1_cs0_pi_pin: spi1-cs0-pi-pin {
pins = "PI16";
function = "spi1";
};
+ /omit-if-no-ref/
spi2_pb_pins: spi2-pb-pins {
pins = "PB15", "PB16", "PB17";
function = "spi2";
};
+ /omit-if-no-ref/
spi2_cs0_pb_pin: spi2-cs0-pb-pin {
pins = "PB14";
function = "spi2";
};
+ /omit-if-no-ref/
spi2_pc_pins: spi2-pc-pins {
pins = "PC20", "PC21", "PC22";
function = "spi2";
};
+ /omit-if-no-ref/
spi2_cs0_pc_pin: spi2-cs0-pc-pin {
pins = "PC19";
function = "spi2";
};
+ /omit-if-no-ref/
uart0_pb_pins: uart0-pb-pins {
pins = "PB22", "PB23";
function = "uart0";
};
+ /omit-if-no-ref/
+ uart0_pf_pins: uart0-pf-pins {
+ pins = "PF2", "PF4";
+ function = "uart0";
+ };
+
+ /omit-if-no-ref/
+ uart1_pa_pins: uart1-pa-pins {
+ pins = "PA10", "PA11";
+ function = "uart1";
+ };
+
+ /omit-if-no-ref/
+ uart1_cts_rts_pa_pins: uart1-cts-rts-pa-pins {
+ pins = "PA12", "PA13";
+ function = "uart1";
+ };
+
+ /omit-if-no-ref/
+ uart2_pa_pins: uart2-pa-pins {
+ pins = "PA2", "PA3";
+ function = "uart2";
+ };
+
+ /omit-if-no-ref/
+ uart2_cts_rts_pa_pins: uart2-cts-rts-pa-pins {
+ pins = "PA0", "PA1";
+ function = "uart2";
+ };
+
+ /omit-if-no-ref/
uart2_pi_pins: uart2-pi-pins {
pins = "PI18", "PI19";
function = "uart2";
};
+ /omit-if-no-ref/
uart2_cts_rts_pi_pins: uart2-cts-rts-pi-pins {
pins = "PI16", "PI17";
function = "uart2";
};
+ /omit-if-no-ref/
uart3_pg_pins: uart3-pg-pins {
pins = "PG6", "PG7";
function = "uart3";
};
+ /omit-if-no-ref/
uart3_cts_rts_pg_pins: uart3-cts-rts-pg-pins {
pins = "PG8", "PG9";
function = "uart3";
};
+ /omit-if-no-ref/
uart3_ph_pins: uart3-ph-pins {
pins = "PH0", "PH1";
function = "uart3";
};
+ /omit-if-no-ref/
+ uart3_cts_rts_ph_pins: uart3-cts-rts-ph-pins {
+ pins = "PH2", "PH3";
+ function = "uart3";
+ };
+
+ /omit-if-no-ref/
uart4_pg_pins: uart4-pg-pins {
pins = "PG10", "PG11";
function = "uart4";
};
+ /omit-if-no-ref/
uart4_ph_pins: uart4-ph-pins {
pins = "PH4", "PH5";
function = "uart4";
};
+ /omit-if-no-ref/
+ uart5_ph_pins: uart5-ph-pins {
+ pins = "PH6", "PH7";
+ function = "uart5";
+ };
+
+ /omit-if-no-ref/
uart5_pi_pins: uart5-pi-pins {
pins = "PI10", "PI11";
function = "uart5";
};
+ /omit-if-no-ref/
+ uart6_pa_pins: uart6-pa-pins {
+ pins = "PA12", "PA13";
+ function = "uart6";
+ };
+
+ /omit-if-no-ref/
uart6_pi_pins: uart6-pi-pins {
pins = "PI12", "PI13";
function = "uart6";
};
+ /omit-if-no-ref/
+ uart7_pa_pins: uart7-pa-pins {
+ pins = "PA14", "PA15";
+ function = "uart7";
+ };
+
+ /omit-if-no-ref/
uart7_pi_pins: uart7-pi-pins {
pins = "PI20", "PI21";
function = "uart7";
@@ -1341,7 +1452,7 @@
};
gic: interrupt-controller@1c81000 {
- compatible = "arm,gic-400", "arm,cortex-a7-gic", "arm,cortex-a15-gic";
+ compatible = "arm,gic-400";
reg = <0x01c81000 0x1000>,
<0x01c82000 0x2000>,
<0x01c84000 0x2000>,
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
index 43fe215e83ea..af2fa694a467 100644
--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
@@ -161,14 +161,18 @@
#dma-cells = <1>;
};
- nfc: nand@1c03000 {
- compatible = "allwinner,sun4i-a10-nand";
+ nfc: nand-controller@1c03000 {
+ compatible = "allwinner,sun8i-a23-nand-controller";
reg = <0x01c03000 0x1000>;
interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&ccu CLK_BUS_NAND>, <&ccu CLK_NAND>;
clock-names = "ahb", "mod";
resets = <&ccu RST_BUS_NAND>;
reset-names = "ahb";
+ dmas = <&dma 5>;
+ dma-names = "rxtx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&nand_pins &nand_cs0_pin &nand_rb0_pin>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
@@ -183,6 +187,7 @@
clock-names = "ahb",
"tcon-ch0";
clock-output-names = "tcon-pixel-clock";
+ #clock-cells = <0>;
resets = <&ccu RST_BUS_LCD>;
reset-names = "lcd";
status = "disabled";
@@ -192,19 +197,14 @@
#size-cells = <0>;
tcon0_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0>;
- tcon0_in_drc0: endpoint@0 {
- reg = <0>;
+ tcon0_in_drc0: endpoint {
remote-endpoint = <&drc0_out_tcon0>;
};
};
tcon0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <1>;
};
};
@@ -279,6 +279,7 @@
phys = <&usbphy 0>;
phy-names = "usb";
extcon = <&usbphy 0>;
+ dr_mode = "otg";
status = "disabled";
};
@@ -306,7 +307,6 @@
clocks = <&ccu CLK_BUS_EHCI>;
resets = <&ccu RST_BUS_EHCI>;
phys = <&usbphy 1>;
- phy-names = "usb";
status = "disabled";
};
@@ -317,7 +317,6 @@
clocks = <&ccu CLK_BUS_OHCI>, <&ccu CLK_USB_OHCI>;
resets = <&ccu RST_BUS_OHCI>;
phys = <&usbphy 1>;
- phy-names = "usb";
status = "disabled";
};
@@ -396,25 +395,25 @@
function = "nand0";
};
- nand_pins_cs0: nand-pins-cs0 {
+ nand_cs0_pin: nand-cs0-pin {
pins = "PC4";
function = "nand0";
bias-pull-up;
};
- nand_pins_cs1: nand-pins-cs1 {
+ nand_cs1_pin: nand-cs1-pin {
pins = "PC3";
function = "nand0";
bias-pull-up;
};
- nand_pins_rb0: nand-pins-rb0 {
+ nand_rb0_pin: nand-rb0-pin {
pins = "PC6";
function = "nand0";
bias-pull-up;
};
- nand_pins_rb1: nand-pins-rb1 {
+ nand_rb1_pin: nand-rb1-pin {
pins = "PC7";
function = "nand0";
bias-pull-up;
@@ -602,7 +601,7 @@
};
gic: interrupt-controller@1c81000 {
- compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic";
+ compatible = "arm,gic-400";
reg = <0x01c81000 0x1000>,
<0x01c82000 0x2000>,
<0x01c84000 0x2000>,
@@ -627,12 +626,9 @@
#size-cells = <0>;
fe0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <1>;
- fe0_out_be0: endpoint@0 {
- reg = <0>;
+ fe0_out_be0: endpoint {
remote-endpoint = <&be0_in_fe0>;
};
};
@@ -654,23 +650,17 @@
#size-cells = <0>;
be0_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0>;
- be0_in_fe0: endpoint@0 {
- reg = <0>;
+ be0_in_fe0: endpoint {
remote-endpoint = <&fe0_out_be0>;
};
};
be0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <1>;
- be0_out_drc0: endpoint@0 {
- reg = <0>;
+ be0_out_drc0: endpoint {
remote-endpoint = <&drc0_in_be0>;
};
};
@@ -694,23 +684,17 @@
#size-cells = <0>;
drc0_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0>;
- drc0_in_be0: endpoint@0 {
- reg = <0>;
+ drc0_in_be0: endpoint {
remote-endpoint = <&be0_out_drc0>;
};
};
drc0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <1>;
- drc0_out_tcon0: endpoint@0 {
- reg = <0>;
+ drc0_out_tcon0: endpoint {
remote-endpoint = <&tcon0_in_drc0>;
};
};
@@ -799,6 +783,20 @@
status = "disabled";
};
+ r_i2c: i2c@1f02400 {
+ compatible = "allwinner,sun8i-a23-i2c",
+ "allwinner,sun6i-a31-i2c";
+ reg = <0x01f02400 0x400>;
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&r_i2c_pins>;
+ clocks = <&apb0_gates 6>;
+ resets = <&apb0_rst 6>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
r_pio: pinctrl@1f02c00 {
compatible = "allwinner,sun8i-a23-r-pinctrl";
reg = <0x01f02c00 0x400>;
@@ -811,6 +809,12 @@
#interrupt-cells = <3>;
#gpio-cells = <3>;
+ r_i2c_pins: r-i2c-pins {
+ pins = "PL0", "PL1";
+ function = "s_i2c";
+ bias-pull-up;
+ };
+
r_rsb_pins: r-rsb-pins {
pins = "PL0", "PL1";
function = "s_rsb";
diff --git a/arch/arm/boot/dts/sun8i-a23-q8-tablet.dts b/arch/arm/boot/dts/sun8i-a23-q8-tablet.dts
index d4dab7c28398..5659c63d7d77 100644
--- a/arch/arm/boot/dts/sun8i-a23-q8-tablet.dts
+++ b/arch/arm/boot/dts/sun8i-a23-q8-tablet.dts
@@ -65,3 +65,9 @@
&panel {
compatible = "bananapi,s070wv20-ct16", "simple-panel";
};
+
+&tcon0_out {
+ tcon0_out_lcd: endpoint {
+ remote-endpoint = <&panel_input>;
+ };
+};
diff --git a/arch/arm/boot/dts/sun8i-a33-q8-tablet.dts b/arch/arm/boot/dts/sun8i-a33-q8-tablet.dts
index b0bc2360f8c4..9c5750c25613 100644
--- a/arch/arm/boot/dts/sun8i-a33-q8-tablet.dts
+++ b/arch/arm/boot/dts/sun8i-a33-q8-tablet.dts
@@ -48,3 +48,10 @@
model = "Q8 A33 Tablet";
compatible = "allwinner,q8-a33", "allwinner,sun8i-a33";
};
+
+&tcon0_out {
+ tcon0_out_lcd: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&panel_input>;
+ };
+};
diff --git a/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts b/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts
index f3667268adde..785798e3a104 100644
--- a/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts
+++ b/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts
@@ -63,16 +63,9 @@
panel {
compatible = "netron-dy,e231732";
- #address-cells = <1>;
- #size-cells = <0>;
- port@0 {
- reg = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- panel_input: endpoint@0 {
- reg = <0>;
+ port {
+ panel_input: endpoint {
remote-endpoint = <&tcon0_out_panel>;
};
};
diff --git a/arch/arm/boot/dts/sun8i-a33.dtsi b/arch/arm/boot/dts/sun8i-a33.dtsi
index 1111a6498102..1532a0e59af4 100644
--- a/arch/arm/boot/dts/sun8i-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a33.dtsi
@@ -265,19 +265,12 @@
phys = <&dphy>;
phy-names = "dphy";
status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
-
- dsi_in_tcon0: endpoint {
- remote-endpoint = <&tcon0_out_dsi>;
- };
+ port {
+ dsi_in_tcon0: endpoint {
+ remote-endpoint = <&tcon0_out_dsi>;
};
};
};
@@ -420,6 +413,9 @@
};
&tcon0_out {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
tcon0_out_dsi: endpoint@1 {
reg = <1>;
remote-endpoint = <&dsi_in_tcon0>;
diff --git a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
index 838be7b3715f..9d34eabba121 100644
--- a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
@@ -389,7 +389,19 @@
};
};
+&usb_otg {
+ dr_mode = "otg";
+ status = "okay";
+};
+
+&usb_power_supply {
+ status = "okay";
+};
+
&usbphy {
+ usb0_id_det-gpios = <&pio 7 11 GPIO_ACTIVE_HIGH>; /* PH11 */
+ usb0_vbus_power-supply = <&usb_power_supply>;
+ usb0_vbus-supply = <&reg_drivevbus>;
usb1_vbus-supply = <&reg_usb1_vbus>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
index fcbec3d7ccd7..ea299d3d84d0 100644
--- a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
@@ -420,7 +420,19 @@
};
};
+&usb_otg {
+ dr_mode = "otg";
+ status = "okay";
+};
+
+&usb_power_supply {
+ status = "okay";
+};
+
&usbphy {
+ usb0_id_det-gpios = <&pio 7 11 GPIO_ACTIVE_HIGH>; /* PH11 */
+ usb0_vbus_power-supply = <&usb_power_supply>;
+ usb0_vbus-supply = <&reg_drivevbus>;
usb1_vbus-supply = <&reg_usb1_vbus>;
usb2_vbus-supply = <&reg_usb2_vbus>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
index 98e8cea26dbe..66d078053d5f 100644
--- a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
@@ -46,6 +46,7 @@
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/pwm/pwm.h>
+#include <dt-bindings/input/input.h>
/ {
model = "TBS A711 Tablet";
@@ -98,6 +99,13 @@
};
};
+ reg_gps: reg-gps {
+ compatible = "regulator-fixed";
+ regulator-name = "gps";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+
reg_vbat: reg-vbat {
compatible = "regulator-fixed";
regulator-name = "vbat";
@@ -156,6 +164,18 @@
status = "okay";
};
+&i2c1 {
+ clock-frequency = <400000>;
+ status = "okay";
+
+ accelerometer@18 {
+ compatible = "bosch,bma250";
+ reg = <0x18>;
+ interrupt-parent = <&pio>;
+ interrupts = <7 10 IRQ_TYPE_EDGE_RISING>; /* PH10 / EINT10 */
+ };
+};
+
&mmc0 {
vmmc-supply = <&reg_dcdc1>;
pinctrl-names = "default";
@@ -200,6 +220,25 @@
status = "okay";
};
+&r_lradc {
+ vref-supply = <&reg_aldo2>;
+ status = "okay";
+
+ button@210 {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ channel = <0>;
+ voltage = <210000>;
+ };
+
+ button@410 {
+ label = "Volume Down";
+ linux,code = <KEY_VOLUMEDOWN>;
+ channel = <0>;
+ voltage = <410000>;
+ };
+};
+
&r_rsb {
status = "okay";
@@ -391,8 +430,7 @@
};
&tcon0_out {
- tcon0_out_lcd: endpoint@0 {
- reg = <0>;
+ tcon0_out_lcd: endpoint {
remote-endpoint = <&panel_input>;
};
};
@@ -407,7 +445,34 @@
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&uart1_pins>, <&uart1_rts_cts_pins>;
+ uart-has-rtscts;
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm20702a1";
+ clocks = <&ac100_rtc 1>;
+ clock-names = "lpo";
+ vbat-supply = <&reg_vbat>;
+ vddio-supply = <&reg_dldo1>;
+ device-wakeup-gpios = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
+ host-wakeup-gpios = <&r_pio 0 5 GPIO_ACTIVE_HIGH>; /* PL5 */
+ shutdown-gpios = <&r_pio 0 4 GPIO_ACTIVE_HIGH>; /* PL4 */
+ max-speed = <1500000>;
+ };
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_pb_pins>;
status = "okay";
+
+ gnss {
+ compatible = "u-blox,neo-6m";
+
+ v-bckp-supply = <&reg_rtc_ldo>;
+ vcc-supply = <&reg_gps>;
+ current-speed = <9600>;
+ };
};
&usb_otg {
@@ -418,7 +483,7 @@
&usbphy {
usb0_id_det-gpios = <&pio 7 11 GPIO_ACTIVE_HIGH>; /* PH11 */
usb0_vbus-supply = <&reg_drivevbus>;
- usb1_vbus_supply = <&reg_vmain>;
- usb2_vbus_supply = <&reg_vmain>;
+ usb1_vbus-supply = <&reg_vmain>;
+ usb2_vbus-supply = <&reg_vmain>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-a83t.dtsi b/arch/arm/boot/dts/sun8i-a83t.dtsi
index b099d2fbb5cd..392b0cabbf0d 100644
--- a/arch/arm/boot/dts/sun8i-a83t.dtsi
+++ b/arch/arm/boot/dts/sun8i-a83t.dtsi
@@ -61,79 +61,91 @@
#size-cells = <0>;
cpu0: cpu@0 {
- clocks = <&ccu CLK_C0CPUX>;
- clock-names = "cpu";
compatible = "arm,cortex-a7";
device_type = "cpu";
+ clocks = <&ccu CLK_C0CPUX>;
operating-points-v2 = <&cpu0_opp_table>;
cci-control-port = <&cci_control0>;
enable-method = "allwinner,sun8i-a83t-smp";
reg = <0>;
+ #cooling-cells = <2>;
};
cpu@1 {
compatible = "arm,cortex-a7";
device_type = "cpu";
+ clocks = <&ccu CLK_C0CPUX>;
operating-points-v2 = <&cpu0_opp_table>;
cci-control-port = <&cci_control0>;
enable-method = "allwinner,sun8i-a83t-smp";
reg = <1>;
+ #cooling-cells = <2>;
};
cpu@2 {
compatible = "arm,cortex-a7";
device_type = "cpu";
+ clocks = <&ccu CLK_C0CPUX>;
operating-points-v2 = <&cpu0_opp_table>;
cci-control-port = <&cci_control0>;
enable-method = "allwinner,sun8i-a83t-smp";
reg = <2>;
+ #cooling-cells = <2>;
};
cpu@3 {
compatible = "arm,cortex-a7";
device_type = "cpu";
+ clocks = <&ccu CLK_C0CPUX>;
operating-points-v2 = <&cpu0_opp_table>;
cci-control-port = <&cci_control0>;
enable-method = "allwinner,sun8i-a83t-smp";
reg = <3>;
+ #cooling-cells = <2>;
};
cpu100: cpu@100 {
- clocks = <&ccu CLK_C1CPUX>;
- clock-names = "cpu";
compatible = "arm,cortex-a7";
device_type = "cpu";
+ clocks = <&ccu CLK_C1CPUX>;
operating-points-v2 = <&cpu1_opp_table>;
cci-control-port = <&cci_control1>;
enable-method = "allwinner,sun8i-a83t-smp";
reg = <0x100>;
+ #cooling-cells = <2>;
};
cpu@101 {
compatible = "arm,cortex-a7";
device_type = "cpu";
+ clocks = <&ccu CLK_C1CPUX>;
operating-points-v2 = <&cpu1_opp_table>;
cci-control-port = <&cci_control1>;
enable-method = "allwinner,sun8i-a83t-smp";
reg = <0x101>;
+ #cooling-cells = <2>;
};
cpu@102 {
compatible = "arm,cortex-a7";
device_type = "cpu";
+ clocks = <&ccu CLK_C1CPUX>;
operating-points-v2 = <&cpu1_opp_table>;
cci-control-port = <&cci_control1>;
enable-method = "allwinner,sun8i-a83t-smp";
reg = <0x102>;
+ #cooling-cells = <2>;
};
cpu@103 {
compatible = "arm,cortex-a7";
device_type = "cpu";
+ clocks = <&ccu CLK_C1CPUX>;
operating-points-v2 = <&cpu1_opp_table>;
cci-control-port = <&cci_control1>;
enable-method = "allwinner,sun8i-a83t-smp";
reg = <0x103>;
+ #cooling-cells = <2>;
};
};
@@ -333,6 +345,11 @@
reg = <0>;
remote-endpoint = <&tcon0_in_mixer0>;
};
+
+ mixer0_out_tcon1: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&tcon1_in_mixer0>;
+ };
};
};
};
@@ -351,9 +368,17 @@
#size-cells = <0>;
mixer1_out: port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
reg = <1>;
- mixer1_out_tcon1: endpoint {
+ mixer1_out_tcon0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&tcon0_in_mixer1>;
+ };
+
+ mixer1_out_tcon1: endpoint@1 {
+ reg = <1>;
remote-endpoint = <&tcon1_in_mixer1>;
};
};
@@ -420,6 +445,7 @@
clocks = <&ccu CLK_BUS_TCON0>, <&ccu CLK_TCON0>;
clock-names = "ahb", "tcon-ch0";
clock-output-names = "tcon-pixel-clock";
+ #clock-cells = <0>;
resets = <&ccu RST_BUS_TCON0>, <&ccu RST_BUS_LVDS>;
reset-names = "lcd", "lvds";
@@ -436,11 +462,14 @@
reg = <0>;
remote-endpoint = <&mixer0_out_tcon0>;
};
+
+ tcon0_in_mixer1: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&mixer1_out_tcon0>;
+ };
};
tcon0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <1>;
};
};
@@ -460,9 +489,17 @@
#size-cells = <0>;
tcon1_in: port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
reg = <0>;
- tcon1_in_mixer1: endpoint {
+ tcon1_in_mixer0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&mixer0_out_tcon1>;
+ };
+
+ tcon1_in_mixer1: endpoint@1 {
+ reg = <1>;
remote-endpoint = <&mixer1_out_tcon1>;
};
};
@@ -557,6 +594,7 @@
phys = <&usbphy 0>;
phy-names = "usb";
extcon = <&usbphy 0>;
+ dr_mode = "otg";
status = "disabled";
};
@@ -594,7 +632,6 @@
clocks = <&ccu CLK_BUS_EHCI0>;
resets = <&ccu RST_BUS_EHCI0>;
phys = <&usbphy 1>;
- phy-names = "usb";
status = "disabled";
};
@@ -606,7 +643,6 @@
clocks = <&ccu CLK_BUS_OHCI0>, <&ccu CLK_USB_OHCI0>;
resets = <&ccu RST_BUS_OHCI0>;
phys = <&usbphy 1>;
- phy-names = "usb";
status = "disabled";
};
@@ -618,7 +654,6 @@
clocks = <&ccu CLK_BUS_EHCI1>;
resets = <&ccu RST_BUS_EHCI1>;
phys = <&usbphy 2>;
- phy-names = "usb";
status = "disabled";
};
@@ -671,6 +706,12 @@
function = "i2c1";
};
+ /omit-if-no-ref/
+ i2c2_pe_pins: i2c2-pe-pins {
+ pins = "PE14", "PE15";
+ function = "i2c2";
+ };
+
i2c2_ph_pins: i2c2-ph-pins {
pins = "PH4", "PH5";
function = "i2c2";
@@ -742,6 +783,12 @@
pins = "PG8", "PG9";
function = "uart1";
};
+
+ /omit-if-no-ref/
+ uart2_pb_pins: uart2-pb-pins {
+ pins = "PB0", "PB1";
+ function = "uart2";
+ };
};
timer@1c20c00 {
@@ -847,6 +894,39 @@
status = "disabled";
};
+ uart2: serial@1c28800 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c28800 0x400>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&ccu CLK_BUS_UART2>;
+ resets = <&ccu RST_BUS_UART2>;
+ status = "disabled";
+ };
+
+ uart3: serial@1c28c00 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c28c00 0x400>;
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&ccu CLK_BUS_UART3>;
+ resets = <&ccu RST_BUS_UART3>;
+ status = "disabled";
+ };
+
+ uart4: serial@1c29000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x01c29000 0x400>;
+ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clocks = <&ccu CLK_BUS_UART4>;
+ resets = <&ccu RST_BUS_UART4>;
+ status = "disabled";
+ };
+
i2c0: i2c@1c2ac00 {
compatible = "allwinner,sun8i-a83t-i2c",
"allwinner,sun6i-a31-i2c";
@@ -907,7 +987,7 @@
};
gic: interrupt-controller@1c81000 {
- compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic";
+ compatible = "arm,gic-400";
reg = <0x01c81000 0x1000>,
<0x01c82000 0x2000>,
<0x01c84000 0x2000>,
@@ -998,6 +1078,13 @@
status = "disabled";
};
+ r_lradc: lradc@1f03c00 {
+ compatible = "allwinner,sun8i-a83t-r-lradc";
+ reg = <0x01f03c00 0x100>;
+ interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
r_pio: pinctrl@1f02c00 {
compatible = "allwinner,sun8i-a83t-r-pinctrl";
reg = <0x01f02c00 0x400>;
diff --git a/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts b/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts
index 1db2541135a7..78a37a47185a 100644
--- a/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts
+++ b/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts
@@ -28,7 +28,6 @@
leds {
compatible = "gpio-leds";
- pinctrl-names = "default";
pwr_led {
label = "bananapi-m2-zero:red:pwr";
@@ -39,7 +38,6 @@
gpio_keys {
compatible = "gpio-keys";
- pinctrl-names = "default";
sw4 {
label = "power";
@@ -67,8 +65,9 @@
wifi_pwrseq: wifi_pwrseq {
compatible = "mmc-pwrseq-simple";
- pinctrl-names = "default";
reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 */
+ clocks = <&rtc 1>;
+ clock-names = "ext_clock";
};
};
@@ -115,14 +114,27 @@
&uart0 {
pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins_a>;
+ pinctrl-0 = <&uart0_pa_pins>;
status = "okay";
};
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&uart1_pins>, <&uart1_rts_cts_pins>;
+ uart-has-rtscts;
status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ clocks = <&rtc 1>;
+ clock-names = "lpo";
+ vbat-supply = <&reg_vcc3v3>;
+ vddio-supply = <&reg_vcc3v3>;
+ device-wakeup-gpios = <&pio 6 13 GPIO_ACTIVE_HIGH>; /* PG13 */
+ host-wakeup-gpios = <&pio 6 11 GPIO_ACTIVE_HIGH>; /* PG11 */
+ shutdown-gpios = <&pio 6 12 GPIO_ACTIVE_HIGH>; /* PG12 */
+ };
+
};
&usb_otg {
diff --git a/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts b/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts
index 84cd9c061227..4970eda2877e 100644
--- a/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts
+++ b/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts
@@ -178,7 +178,7 @@
&uart0 {
pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins_a>;
+ pinctrl-0 = <&uart0_pa_pins>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
index 25540b7694d5..6277f13f3eb3 100644
--- a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
+++ b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
@@ -142,7 +142,7 @@
&ir {
pinctrl-names = "default";
- pinctrl-0 = <&ir_pins_a>;
+ pinctrl-0 = <&r_ir_rx_pin>;
status = "okay";
};
@@ -193,13 +193,13 @@
&spdif {
pinctrl-names = "default";
- pinctrl-0 = <&spdif_tx_pins_a>;
+ pinctrl-0 = <&spdif_tx_pin>;
status = "okay";
};
&uart0 {
pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins_a>;
+ pinctrl-0 = <&uart0_pa_pins>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-h3-mapleboard-mp130.dts b/arch/arm/boot/dts/sun8i-h3-mapleboard-mp130.dts
index 2c952eacfef5..ff0a7a952e0c 100644
--- a/arch/arm/boot/dts/sun8i-h3-mapleboard-mp130.dts
+++ b/arch/arm/boot/dts/sun8i-h3-mapleboard-mp130.dts
@@ -84,15 +84,14 @@
&ir {
pinctrl-names = "default";
- pinctrl-0 = <&ir_pins_a>;
+ pinctrl-0 = <&r_ir_rx_pin>;
status = "okay";
};
&mmc0 {
vmmc-supply = <&reg_vcc3v3>;
bus-width = <4>;
- cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 */
- cd-inverted;
+ cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
status = "okay";
};
@@ -120,7 +119,7 @@
&uart0 {
pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins_a>;
+ pinctrl-0 = <&uart0_pa_pins>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi-m1-plus.dts b/arch/arm/boot/dts/sun8i-h3-nanopi-m1-plus.dts
index 4ec94d72f021..4ba533b0340f 100644
--- a/arch/arm/boot/dts/sun8i-h3-nanopi-m1-plus.dts
+++ b/arch/arm/boot/dts/sun8i-h3-nanopi-m1-plus.dts
@@ -64,7 +64,6 @@
wifi_pwrseq: wifi_pwrseq {
compatible = "mmc-pwrseq-simple";
- pinctrl-names = "default";
reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 */
};
@@ -121,7 +120,7 @@
&ir {
pinctrl-names = "default";
- pinctrl-0 = <&ir_pins_a>;
+ pinctrl-0 = <&r_ir_rx_pin>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi-m1.dts b/arch/arm/boot/dts/sun8i-h3-nanopi-m1.dts
index 9412668bb888..69243dcb30a6 100644
--- a/arch/arm/boot/dts/sun8i-h3-nanopi-m1.dts
+++ b/arch/arm/boot/dts/sun8i-h3-nanopi-m1.dts
@@ -93,7 +93,7 @@
&ir {
pinctrl-names = "default";
- pinctrl-0 = <&ir_pins_a>;
+ pinctrl-0 = <&r_ir_rx_pin>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi-neo-air.dts b/arch/arm/boot/dts/sun8i-h3-nanopi-neo-air.dts
index 6246d3eff39d..07867a0d569b 100644
--- a/arch/arm/boot/dts/sun8i-h3-nanopi-neo-air.dts
+++ b/arch/arm/boot/dts/sun8i-h3-nanopi-neo-air.dts
@@ -105,7 +105,7 @@
&uart0 {
pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins_a>;
+ pinctrl-0 = <&uart0_pa_pins>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi.dtsi b/arch/arm/boot/dts/sun8i-h3-nanopi.dtsi
index f110ee382239..4df29a65316d 100644
--- a/arch/arm/boot/dts/sun8i-h3-nanopi.dtsi
+++ b/arch/arm/boot/dts/sun8i-h3-nanopi.dtsi
@@ -59,8 +59,6 @@
leds {
compatible = "gpio-leds";
- pinctrl-names = "default";
- pinctrl-0 = <&leds_npi>, <&leds_r_npi>;
status {
label = "nanopi:blue:status";
@@ -78,8 +76,6 @@
r_gpio_keys {
compatible = "gpio-keys";
input-name = "k1";
- pinctrl-names = "default";
- pinctrl-0 = <&sw_r_npi>;
k1 {
label = "k1";
@@ -104,28 +100,9 @@
status = "okay";
};
-&pio {
- leds_npi: led_pins {
- pins = "PA10";
- function = "gpio_out";
- };
-};
-
-&r_pio {
- leds_r_npi: led_pins {
- pins = "PL10";
- function = "gpio_out";
- };
-
- sw_r_npi: key_pins {
- pins = "PL3";
- function = "gpio_in";
- };
-};
-
&uart0 {
pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins_a>;
+ pinctrl-0 = <&uart0_pa_pins>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts
index f1fc6bdca8be..597c425d08ec 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-2.dts
@@ -75,8 +75,6 @@
leds {
compatible = "gpio-leds";
- pinctrl-names = "default";
- pinctrl-0 = <&leds_opc>, <&leds_r_opc>;
status_led {
label = "orangepi:red:status";
@@ -92,8 +90,6 @@
r_gpio_keys {
compatible = "gpio-keys";
- pinctrl-names = "default";
- pinctrl-0 = <&sw_r_opc>;
sw2 {
label = "sw2";
@@ -110,8 +106,6 @@
wifi_pwrseq: wifi_pwrseq {
compatible = "mmc-pwrseq-simple";
- pinctrl-names = "default";
- pinctrl-0 = <&wifi_pwrseq_pin_orangepi>;
reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 WIFI_EN */
};
};
@@ -152,7 +146,7 @@
&ir {
pinctrl-names = "default";
- pinctrl-0 = <&ir_pins_a>;
+ pinctrl-0 = <&r_ir_rx_pin>;
status = "okay";
};
@@ -179,30 +173,6 @@
};
};
-&pio {
- leds_opc: led_pins {
- pins = "PA15";
- function = "gpio_out";
- };
-};
-
-&r_pio {
- leds_r_opc: led_pins {
- pins = "PL10";
- function = "gpio_out";
- };
-
- sw_r_opc: key_pins {
- pins = "PL3", "PL4";
- function = "gpio_in";
- };
-
- wifi_pwrseq_pin_orangepi: wifi_pwrseq_pin {
- pins = "PL7";
- function = "gpio_out";
- };
-};
-
&reg_usb1_vbus {
gpio = <&pio 6 13 GPIO_ACTIVE_HIGH>;
status = "okay";
@@ -210,7 +180,7 @@
&uart0 {
pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins_a>;
+ pinctrl-0 = <&uart0_pa_pins>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-lite.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-lite.dts
index 476ae8e387ca..6f9c97add54e 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-lite.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-lite.dts
@@ -74,8 +74,6 @@
leds {
compatible = "gpio-leds";
- pinctrl-names = "default";
- pinctrl-0 = <&leds_opc>, <&leds_r_opc>;
pwr_led {
label = "orangepi:green:pwr";
@@ -91,8 +89,6 @@
r_gpio_keys {
compatible = "gpio-keys";
- pinctrl-names = "default";
- pinctrl-0 = <&sw_r_opc>;
sw4 {
label = "sw4";
@@ -126,7 +122,7 @@
&ir {
pinctrl-names = "default";
- pinctrl-0 = <&ir_pins_a>;
+ pinctrl-0 = <&r_ir_rx_pin>;
status = "okay";
};
@@ -160,28 +156,9 @@
status = "okay";
};
-&pio {
- leds_opc: led_pins {
- pins = "PA15";
- function = "gpio_out";
- };
-};
-
-&r_pio {
- leds_r_opc: led_pins {
- pins = "PL10";
- function = "gpio_out";
- };
-
- sw_r_opc: key_pins {
- pins = "PL3";
- function = "gpio_in";
- };
-};
-
&uart0 {
pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins_a>;
+ pinctrl-0 = <&uart0_pa_pins>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
index 245fd658defb..840849169bed 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-one.dts
@@ -73,8 +73,6 @@
leds {
compatible = "gpio-leds";
- pinctrl-names = "default";
- pinctrl-0 = <&leds_opc>, <&leds_r_opc>;
pwr_led {
label = "orangepi:green:pwr";
@@ -90,8 +88,6 @@
r_gpio_keys {
compatible = "gpio-keys";
- pinctrl-names = "default";
- pinctrl-0 = <&sw_r_opc>;
sw4 {
label = "sw4";
@@ -166,25 +162,6 @@
status = "okay";
};
-&pio {
- leds_opc: led_pins {
- pins = "PA15";
- function = "gpio_out";
- };
-};
-
-&r_pio {
- leds_r_opc: led_pins {
- pins = "PL10";
- function = "gpio_out";
- };
-
- sw_r_opc: key_pins {
- pins = "PL3";
- function = "gpio_in";
- };
-};
-
&reg_usb0_vbus {
gpio = <&r_pio 0 2 GPIO_ACTIVE_HIGH>; /* PL2 */
status = "okay";
@@ -192,7 +169,7 @@
&uart0 {
pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins_a>;
+ pinctrl-0 = <&uart0_pa_pins>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts
index 46240334128f..5aff8ecc66cb 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-pc.dts
@@ -73,8 +73,6 @@
leds {
compatible = "gpio-leds";
- pinctrl-names = "default";
- pinctrl-0 = <&leds_opc>, <&leds_r_opc>;
pwr_led {
label = "orangepi:green:pwr";
@@ -90,8 +88,6 @@
r_gpio_keys {
compatible = "gpio-keys";
- pinctrl-names = "default";
- pinctrl-0 = <&sw_r_opc>;
sw4 {
label = "sw4";
@@ -152,7 +148,7 @@
&ir {
pinctrl-names = "default";
- pinctrl-0 = <&ir_pins_a>;
+ pinctrl-0 = <&r_ir_rx_pin>;
status = "okay";
};
@@ -179,13 +175,6 @@
status = "okay";
};
-&pio {
- leds_opc: led_pins {
- pins = "PA15";
- function = "gpio_out";
- };
-};
-
&r_i2c {
status = "okay";
@@ -210,18 +199,6 @@
};
};
-&r_pio {
- leds_r_opc: led_pins {
- pins = "PL10";
- function = "gpio_out";
- };
-
- sw_r_opc: key_pins {
- pins = "PL3";
- function = "gpio_in";
- };
-};
-
&reg_usb0_vbus {
gpio = <&r_pio 0 2 GPIO_ACTIVE_HIGH>; /* PL2 */
status = "okay";
@@ -229,7 +206,7 @@
&uart0 {
pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins_a>;
+ pinctrl-0 = <&uart0_pa_pins>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts
index ac8438c2cff1..97f497854e05 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-plus.dts
@@ -63,8 +63,6 @@
reg_usb3_vbus: usb3-vbus {
compatible = "regulator-fixed";
- pinctrl-names = "default";
- pinctrl-0 = <&usb3_vbus_pin_a>;
regulator-name = "usb3-vbus";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
@@ -116,13 +114,6 @@
bias-pull-up;
};
-&pio {
- usb3_vbus_pin_a: usb3_vbus_pin {
- pins = "PG11";
- function = "gpio_out";
- };
-};
-
&r_i2c {
status = "okay";
diff --git a/arch/arm/boot/dts/sun8i-h3-orangepi-zero-plus2.dts b/arch/arm/boot/dts/sun8i-h3-orangepi-zero-plus2.dts
index c834048c325e..b8f46e2802fd 100644
--- a/arch/arm/boot/dts/sun8i-h3-orangepi-zero-plus2.dts
+++ b/arch/arm/boot/dts/sun8i-h3-orangepi-zero-plus2.dts
@@ -79,7 +79,6 @@
wifi_pwrseq: wifi_pwrseq {
compatible = "mmc-pwrseq-simple";
- pinctrl-names = "default";
reset-gpios = <&pio 0 9 GPIO_ACTIVE_LOW>; /* PA9 */
post-power-on-delay-ms = <200>;
};
@@ -135,6 +134,6 @@
&uart0 {
pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins_a>;
+ pinctrl-0 = <&uart0_pa_pins>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-h3-rervision-dvk.dts b/arch/arm/boot/dts/sun8i-h3-rervision-dvk.dts
new file mode 100644
index 000000000000..4738f3a9efe4
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-h3-rervision-dvk.dts
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2019 Bootlin
+ * Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+ */
+
+/dts-v1/;
+#include "sun8i-h3.dtsi"
+#include "sunxi-common-regulators.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+ model = "RerVision H3-DVK";
+ compatible = "rervision,h3-dvk", "allwinner,sun8i-h3";
+
+ aliases {
+ ethernet0 = &emac;
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ connector {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con_in: endpoint {
+ remote-endpoint = <&hdmi_out_con>;
+ };
+ };
+ };
+};
+
+&de {
+ status = "okay";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&ehci2 {
+ status = "okay";
+};
+
+&ehci3 {
+ status = "okay";
+};
+
+&emac {
+ phy-handle = <&int_mii_phy>;
+ phy-mode = "mii";
+ allwinner,leds-active-low;
+ status = "okay";
+};
+
+&hdmi {
+ status = "okay";
+};
+
+&hdmi_out {
+ hdmi_out_con: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+};
+
+&mmc0 {
+ bus-width = <4>;
+ cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
+ status = "okay";
+ vmmc-supply = <&reg_vcc3v3>;
+};
+
+&mmc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_8bit_pins>;
+ vmmc-supply = <&reg_vcc3v3>;
+ bus-width = <8>;
+ non-removable;
+ cap-mmc-hw-reset;
+ status = "okay";
+};
+
+&ohci1 {
+ status = "okay";
+};
+
+&ohci2 {
+ status = "okay";
+};
+
+&ohci3 {
+ status = "okay";
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pa_pins>;
+ status = "okay";
+};
+
+&usb_otg {
+ status = "okay";
+ dr_mode = "peripheral";
+};
+
+&usbphy {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-h3.dtsi b/arch/arm/boot/dts/sun8i-h3.dtsi
index 959d265e7254..e37c30e811d3 100644
--- a/arch/arm/boot/dts/sun8i-h3.dtsi
+++ b/arch/arm/boot/dts/sun8i-h3.dtsi
@@ -231,3 +231,7 @@
&rtc {
compatible = "allwinner,sun8i-h3-rtc";
};
+
+&sid {
+ compatible = "allwinner,sun8i-h3-sid";
+};
diff --git a/arch/arm/boot/dts/sun8i-q8-common.dtsi b/arch/arm/boot/dts/sun8i-q8-common.dtsi
index 53104f4ccacc..3d9a1524e17e 100644
--- a/arch/arm/boot/dts/sun8i-q8-common.dtsi
+++ b/arch/arm/boot/dts/sun8i-q8-common.dtsi
@@ -54,16 +54,9 @@
backlight = <&backlight>;
enable-gpios = <&pio 7 7 GPIO_ACTIVE_HIGH>; /* PH7 */
power-supply = <&reg_dc1sw>;
- #address-cells = <1>;
- #size-cells = <0>;
- port@0 {
- reg = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- panel_input: endpoint@0 {
- reg = <0>;
+ port {
+ panel_input: endpoint {
remote-endpoint = <&tcon0_out_lcd>;
};
};
@@ -120,13 +113,6 @@
status = "okay";
};
-&tcon0_out {
- tcon0_out_lcd: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&panel_input>;
- };
-};
-
&usbphy {
usb1_vbus-supply = <&reg_dldo1>;
};
diff --git a/arch/arm/boot/dts/sun8i-r16-nintendo-nes-classic.dts b/arch/arm/boot/dts/sun8i-r16-nintendo-nes-classic.dts
index 32cf1ab33aab..246dec5846a4 100644
--- a/arch/arm/boot/dts/sun8i-r16-nintendo-nes-classic.dts
+++ b/arch/arm/boot/dts/sun8i-r16-nintendo-nes-classic.dts
@@ -34,8 +34,6 @@
/* 2Gb Macronix MX30LF2G18AC (3V) */
nand@0 {
- #address-cells = <1>;
- #size-cells = <1>;
reg = <0>;
allwinner,rb = <0>;
nand-ecc-mode = "hw";
diff --git a/arch/arm/boot/dts/sun8i-r16-parrot.dts b/arch/arm/boot/dts/sun8i-r16-parrot.dts
index 316998e9ec5d..4f48eec6b2ef 100644
--- a/arch/arm/boot/dts/sun8i-r16-parrot.dts
+++ b/arch/arm/boot/dts/sun8i-r16-parrot.dts
@@ -158,14 +158,6 @@
status = "okay";
};
-&pio {
- usb0_id_det: usb0-id-detect-pin {
- pins = "PD10";
- function = "gpio_in";
- bias-pull-up;
- };
-};
-
&r_rsb {
status = "okay";
@@ -314,10 +306,8 @@
&usbphy {
status = "okay";
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_det>;
usb0_vbus-supply = <&reg_drivevbus>;
- usb0_id_det-gpios = <&pio 3 10 GPIO_ACTIVE_HIGH>; /* PD10 */
+ usb0_id_det-gpios = <&pio 3 10 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PD10 */
usb0_vbus_power-supply = <&usb_power_supply>;
usb1_vbus-supply = <&reg_usb1_vbus>;
};
diff --git a/arch/arm/boot/dts/sun8i-r40.dtsi b/arch/arm/boot/dts/sun8i-r40.dtsi
index 06b685869f52..bb856e53b806 100644
--- a/arch/arm/boot/dts/sun8i-r40.dtsi
+++ b/arch/arm/boot/dts/sun8i-r40.dtsi
@@ -273,7 +273,6 @@
clocks = <&ccu CLK_BUS_EHCI1>;
resets = <&ccu RST_BUS_EHCI1>;
phys = <&usbphy 1>;
- phy-names = "usb";
status = "disabled";
};
@@ -285,7 +284,6 @@
<&ccu CLK_USB_OHCI1>;
resets = <&ccu RST_BUS_OHCI1>;
phys = <&usbphy 1>;
- phy-names = "usb";
status = "disabled";
};
@@ -296,7 +294,6 @@
clocks = <&ccu CLK_BUS_EHCI2>;
resets = <&ccu RST_BUS_EHCI2>;
phys = <&usbphy 2>;
- phy-names = "usb";
status = "disabled";
};
@@ -308,7 +305,6 @@
<&ccu CLK_USB_OHCI2>;
resets = <&ccu RST_BUS_OHCI2>;
phys = <&usbphy 2>;
- phy-names = "usb";
status = "disabled";
};
@@ -562,9 +558,7 @@
interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&ccu CLK_BUS_SATA>, <&ccu CLK_SATA>;
resets = <&ccu RST_BUS_SATA>;
- resets-name = "ahci";
- #address-cells = <1>;
- #size-cells = <0>;
+ reset-names = "ahci";
status = "disabled";
};
@@ -614,12 +608,9 @@
#size-cells = <0>;
tcon_top_mixer0_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0>;
- tcon_top_mixer0_in_mixer0: endpoint@0 {
- reg = <0>;
+ tcon_top_mixer0_in_mixer0: endpoint {
remote-endpoint = <&mixer0_out_tcon_top>;
};
};
diff --git a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
index 189e479eb95a..b3d8b8f056cd 100644
--- a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
+++ b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
@@ -86,14 +86,6 @@
status = "okay";
};
-&pio {
- usb0_id_detect_pin: usb0-id-detect-pin {
- pins = "PH8";
- function = "gpio_in";
- bias-pull-up;
- };
-};
-
&r_rsb {
status = "okay";
@@ -224,9 +216,7 @@
};
&usbphy {
- pinctrl-names = "default";
- pinctrl-0 = <&usb0_id_detect_pin>;
- usb0_id_det-gpio = <&pio 7 8 GPIO_ACTIVE_HIGH>; /* PH8 */
+ usb0_id_det-gpios = <&pio 7 8 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH8 */
usb0_vbus_power-supply = <&usb_power_supply>;
usb0_vbus-supply = <&reg_drivevbus>;
status = "okay";
diff --git a/arch/arm/boot/dts/sun8i-v3s-licheepi-zero.dts b/arch/arm/boot/dts/sun8i-v3s-licheepi-zero.dts
index 99c8cf7bb86c..2e4587d26ce5 100644
--- a/arch/arm/boot/dts/sun8i-v3s-licheepi-zero.dts
+++ b/arch/arm/boot/dts/sun8i-v3s-licheepi-zero.dts
@@ -96,6 +96,6 @@
};
&usbphy {
- usb0_id_det-gpio = <&pio 5 6 GPIO_ACTIVE_HIGH>;
+ usb0_id_det-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun8i-v3s.dtsi b/arch/arm/boot/dts/sun8i-v3s.dtsi
index 21e1806ca509..df72b1719c34 100644
--- a/arch/arm/boot/dts/sun8i-v3s.dtsi
+++ b/arch/arm/boot/dts/sun8i-v3s.dtsi
@@ -129,12 +129,9 @@
#size-cells = <0>;
mixer0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <1>;
- mixer0_out_tcon0: endpoint@0 {
- reg = <0>;
+ mixer0_out_tcon0: endpoint {
remote-endpoint = <&tcon0_in_mixer0>;
};
};
@@ -150,6 +147,7 @@
clock-names = "ahb",
"tcon-ch0";
clock-output-names = "tcon-pixel-clock";
+ #clock-cells = <0>;
resets = <&ccu RST_BUS_TCON0>;
reset-names = "lcd";
status = "disabled";
@@ -159,12 +157,9 @@
#size-cells = <0>;
tcon0_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0>;
- tcon0_in_mixer0: endpoint@0 {
- reg = <0>;
+ tcon0_in_mixer0: endpoint {
remote-endpoint = <&mixer0_out_tcon0>;
};
};
@@ -419,7 +414,7 @@
};
gic: interrupt-controller@1c81000 {
- compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic";
+ compatible = "arm,gic-400";
reg = <0x01c81000 0x1000>,
<0x01c82000 0x1000>,
<0x01c84000 0x2000>,
diff --git a/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts b/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts
index bf97f6244c23..f05cabd34b8e 100644
--- a/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts
+++ b/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts
@@ -105,6 +105,24 @@
#include "axp22x.dtsi"
+&mmc0 {
+ vmmc-supply = <&reg_dcdc1>;
+ bus-width = <4>;
+ cd-gpios = <&pio 7 13 GPIO_ACTIVE_LOW>; /* PH13 */
+ status = "okay";
+};
+
+&mmc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pg_pins>;
+ vmmc-supply = <&reg_dldo2>;
+ vqmmc-supply = <&reg_dldo1>;
+ mmc-pwrseq = <&wifi_pwrseq>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+};
+
&reg_aldo3 {
regulator-always-on;
regulator-min-microvolt = <2700000>;
@@ -152,24 +170,6 @@
regulator-name = "vcc-wifi";
};
-&mmc0 {
- vmmc-supply = <&reg_dcdc1>;
- bus-width = <4>;
- cd-gpios = <&pio 7 13 GPIO_ACTIVE_LOW>; /* PH13 */
- status = "okay";
-};
-
-&mmc1 {
- pinctrl-names = "default";
- pinctrl-0 = <&mmc1_pg_pins>;
- vmmc-supply = <&reg_dldo2>;
- vqmmc-supply = <&reg_dldo1>;
- mmc-pwrseq = <&wifi_pwrseq>;
- bus-width = <4>;
- non-removable;
- status = "okay";
-};
-
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart0_pb_pins>;
diff --git a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
index 28c034928d67..18156ffa3ce9 100644
--- a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
+++ b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
@@ -89,31 +89,23 @@
vga-dac {
compatible = "corpro,gm7123", "adi,adv7123", "dumb-vga-dac";
vdd-supply = <&reg_dcdc1>;
- #address-cells = <1>;
- #size-cells = <0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0>;
- vga_dac_in: endpoint@0 {
- reg = <0>;
+ vga_dac_in: endpoint {
remote-endpoint = <&tcon0_out_vga>;
};
};
port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <1>;
- vga_dac_out: endpoint@0 {
- reg = <0>;
+ vga_dac_out: endpoint {
remote-endpoint = <&vga_con_in>;
};
};
@@ -502,8 +494,7 @@
};
&tcon0_out {
- tcon0_out_vga: endpoint@0 {
- reg = <0>;
+ tcon0_out_vga: endpoint {
remote-endpoint = <&vga_dac_in>;
};
};
diff --git a/arch/arm/boot/dts/sun9i-a80-optimus.dts b/arch/arm/boot/dts/sun9i-a80-optimus.dts
index 864715ec3cb0..2ed28d9e2787 100644
--- a/arch/arm/boot/dts/sun9i-a80-optimus.dts
+++ b/arch/arm/boot/dts/sun9i-a80-optimus.dts
@@ -82,7 +82,7 @@
reg_usb1_vbus: usb1-vbus {
compatible = "regulator-fixed";
- pinctrl-names = "default";
+ regulator-name = "usb1-vbus";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
enable-active-high;
@@ -91,7 +91,7 @@
reg_usb3_vbus: usb3-vbus {
compatible = "regulator-fixed";
- pinctrl-names = "default";
+ regulator-name = "usb3-vbus";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
enable-active-high;
diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi
index 6fb292e0b662..0c1eec9000e3 100644
--- a/arch/arm/boot/dts/sun9i-a80.dtsi
+++ b/arch/arm/boot/dts/sun9i-a80.dtsi
@@ -289,7 +289,7 @@
status = "disabled";
};
- soc {
+ soc@20000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
@@ -342,7 +342,6 @@
clocks = <&usb_clocks CLK_BUS_HCI0>;
resets = <&usb_clocks RST_USB0_HCI>;
phys = <&usbphy1>;
- phy-names = "usb";
status = "disabled";
};
@@ -354,7 +353,6 @@
<&usb_clocks CLK_USB_OHCI0>;
resets = <&usb_clocks RST_USB0_HCI>;
phys = <&usbphy1>;
- phy-names = "usb";
status = "disabled";
};
@@ -376,7 +374,6 @@
clocks = <&usb_clocks CLK_BUS_HCI1>;
resets = <&usb_clocks RST_USB1_HCI>;
phys = <&usbphy2>;
- phy-names = "usb";
status = "disabled";
};
@@ -406,7 +403,6 @@
clocks = <&usb_clocks CLK_BUS_HCI2>;
resets = <&usb_clocks RST_USB2_HCI>;
phys = <&usbphy3>;
- phy-names = "usb";
status = "disabled";
};
@@ -418,7 +414,6 @@
<&usb_clocks CLK_USB_OHCI2>;
resets = <&usb_clocks RST_USB2_HCI>;
phys = <&usbphy3>;
- phy-names = "usb";
status = "disabled";
};
@@ -527,7 +522,7 @@
};
gic: interrupt-controller@1c41000 {
- compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic";
+ compatible = "arm,gic-400";
reg = <0x01c41000 0x1000>,
<0x01c42000 0x2000>,
<0x01c44000 0x2000>,
@@ -596,12 +591,9 @@
#size-cells = <0>;
fe0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <1>;
- fe0_out_deu0: endpoint@0 {
- reg = <0>;
+ fe0_out_deu0: endpoint {
remote-endpoint = <&deu0_in_fe0>;
};
};
@@ -623,12 +615,9 @@
#size-cells = <0>;
fe1_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <1>;
- fe1_out_deu1: endpoint@0 {
- reg = <0>;
+ fe1_out_deu1: endpoint {
remote-endpoint = <&deu1_in_fe1>;
};
};
@@ -666,12 +655,9 @@
};
be0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <1>;
- be0_out_drc0: endpoint@0 {
- reg = <0>;
+ be0_out_drc0: endpoint {
remote-endpoint = <&drc0_in_be0>;
};
};
@@ -709,12 +695,9 @@
};
be1_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <1>;
- be1_out_drc1: endpoint@0 {
- reg = <0>;
+ be1_out_drc1: endpoint {
remote-endpoint = <&drc1_in_be1>;
};
};
@@ -738,12 +721,9 @@
#size-cells = <0>;
deu0_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0>;
- deu0_in_fe0: endpoint@0 {
- reg = <0>;
+ deu0_in_fe0: endpoint {
remote-endpoint = <&fe0_out_deu0>;
};
};
@@ -783,12 +763,9 @@
#size-cells = <0>;
deu1_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0>;
- deu1_in_fe1: endpoint@0 {
- reg = <0>;
+ deu1_in_fe1: endpoint {
remote-endpoint = <&fe1_out_deu1>;
};
};
@@ -828,23 +805,17 @@
#size-cells = <0>;
drc0_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0>;
- drc0_in_be0: endpoint@0 {
- reg = <0>;
+ drc0_in_be0: endpoint {
remote-endpoint = <&be0_out_drc0>;
};
};
drc0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <1>;
- drc0_out_tcon0: endpoint@0 {
- reg = <0>;
+ drc0_out_tcon0: endpoint {
remote-endpoint = <&tcon0_in_drc0>;
};
};
@@ -868,23 +839,17 @@
#size-cells = <0>;
drc1_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0>;
- drc1_in_be1: endpoint@0 {
- reg = <0>;
+ drc1_in_be1: endpoint {
remote-endpoint = <&be1_out_drc1>;
};
};
drc1_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <1>;
- drc1_out_tcon1: endpoint@0 {
- reg = <0>;
+ drc1_out_tcon1: endpoint {
remote-endpoint = <&tcon1_in_drc1>;
};
};
@@ -900,25 +865,21 @@
resets = <&ccu RST_BUS_LCD0>, <&ccu RST_BUS_EDP>;
reset-names = "lcd", "edp";
clock-output-names = "tcon0-pixel-clock";
+ #clock-cells = <0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
tcon0_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0>;
- tcon0_in_drc0: endpoint@0 {
- reg = <0>;
+ tcon0_in_drc0: endpoint {
remote-endpoint = <&drc0_out_tcon0>;
};
};
tcon0_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <1>;
};
};
@@ -938,19 +899,14 @@
#size-cells = <0>;
tcon1_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0>;
- tcon1_in_drc1: endpoint@0 {
- reg = <0>;
+ tcon1_in_drc1: endpoint {
remote-endpoint = <&drc1_out_tcon1>;
};
};
tcon1_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <1>;
};
};
@@ -997,15 +953,13 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <3>;
- #size-cells = <0>;
#gpio-cells = <3>;
gmac_rgmii_pins: gmac-rgmii-pins {
- allwinner,pins = "PA0", "PA1", "PA2", "PA3",
- "PA4", "PA5", "PA7", "PA8",
- "PA9", "PA10", "PA12", "PA13",
- "PA15", "PA16", "PA17";
- allwinner,function = "gmac";
+ pins = "PA0", "PA1", "PA2", "PA3", "PA4", "PA5",
+ "PA7", "PA8", "PA9", "PA10", "PA12",
+ "PA13", "PA15", "PA16", "PA17";
+ function = "gmac";
/*
* data lines in RGMII mode use DDR mode
* and need a higher signal drive strength
diff --git a/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi b/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi
index 3bed375b9c03..39263e74fbb5 100644
--- a/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi
+++ b/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi
@@ -69,7 +69,6 @@
leds {
compatible = "gpio-leds";
- pinctrl-names = "default";
pwr_led {
label = "bananapi-m2-plus:red:pwr";
@@ -80,7 +79,6 @@
gpio_keys {
compatible = "gpio-keys";
- pinctrl-names = "default";
sw4 {
label = "power";
@@ -101,7 +99,6 @@
wifi_pwrseq: wifi_pwrseq {
compatible = "mmc-pwrseq-simple";
- pinctrl-names = "default";
reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 */
clocks = <&rtc 1>;
clock-names = "ext_clock";
@@ -153,7 +150,7 @@
&ir {
pinctrl-names = "default";
- pinctrl-0 = <&ir_pins_a>;
+ pinctrl-0 = <&r_ir_rx_pin>;
status = "okay";
};
@@ -210,7 +207,7 @@
&uart0 {
pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins_a>;
+ pinctrl-0 = <&uart0_pa_pins>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
index d74a6cbbfdf4..84977d4eb97a 100644
--- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi
+++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
@@ -227,6 +227,11 @@
#size-cells = <0>;
};
+ sid: eeprom@1c14000 {
+ /* compatible is in per SoC .dtsi file */
+ reg = <0x1c14000 0x400>;
+ };
+
usb_otg: usb@1c19000 {
compatible = "allwinner,sun8i-h3-musb";
reg = <0x01c19000 0x400>;
@@ -237,6 +242,7 @@
phys = <&usbphy 0>;
phy-names = "usb";
extcon = <&usbphy 0>;
+ dr_mode = "otg";
status = "disabled";
};
@@ -298,7 +304,6 @@
clocks = <&ccu CLK_BUS_EHCI1>, <&ccu CLK_BUS_OHCI1>;
resets = <&ccu RST_BUS_EHCI1>, <&ccu RST_BUS_OHCI1>;
phys = <&usbphy 1>;
- phy-names = "usb";
status = "disabled";
};
@@ -310,7 +315,6 @@
<&ccu CLK_USB_OHCI1>;
resets = <&ccu RST_BUS_EHCI1>, <&ccu RST_BUS_OHCI1>;
phys = <&usbphy 1>;
- phy-names = "usb";
status = "disabled";
};
@@ -321,7 +325,6 @@
clocks = <&ccu CLK_BUS_EHCI2>, <&ccu CLK_BUS_OHCI2>;
resets = <&ccu RST_BUS_EHCI2>, <&ccu RST_BUS_OHCI2>;
phys = <&usbphy 2>;
- phy-names = "usb";
status = "disabled";
};
@@ -333,7 +336,6 @@
<&ccu CLK_USB_OHCI2>;
resets = <&ccu RST_BUS_EHCI2>, <&ccu RST_BUS_OHCI2>;
phys = <&usbphy 2>;
- phy-names = "usb";
status = "disabled";
};
@@ -344,7 +346,6 @@
clocks = <&ccu CLK_BUS_EHCI3>, <&ccu CLK_BUS_OHCI3>;
resets = <&ccu RST_BUS_EHCI3>, <&ccu RST_BUS_OHCI3>;
phys = <&usbphy 3>;
- phy-names = "usb";
status = "disabled";
};
@@ -356,7 +357,6 @@
<&ccu CLK_USB_OHCI3>;
resets = <&ccu RST_BUS_EHCI3>, <&ccu RST_BUS_OHCI3>;
phys = <&usbphy 3>;
- phy-names = "usb";
status = "disabled";
};
@@ -381,14 +381,14 @@
interrupt-controller;
#interrupt-cells = <3>;
- csi_pins: csi {
+ csi_pins: csi-pins {
pins = "PE0", "PE2", "PE3", "PE4", "PE5",
"PE6", "PE7", "PE8", "PE9", "PE10",
"PE11";
function = "csi";
};
- emac_rgmii_pins: emac0 {
+ emac_rgmii_pins: emac-rgmii-pins {
pins = "PD0", "PD1", "PD2", "PD3", "PD4",
"PD5", "PD7", "PD8", "PD9", "PD10",
"PD12", "PD13", "PD15", "PD16", "PD17";
@@ -396,22 +396,22 @@
drive-strength = <40>;
};
- i2c0_pins: i2c0 {
+ i2c0_pins: i2c0-pins {
pins = "PA11", "PA12";
function = "i2c0";
};
- i2c1_pins: i2c1 {
+ i2c1_pins: i2c1-pins {
pins = "PA18", "PA19";
function = "i2c1";
};
- i2c2_pins: i2c2 {
+ i2c2_pins: i2c2-pins {
pins = "PE12", "PE13";
function = "i2c2";
};
- mmc0_pins: mmc0 {
+ mmc0_pins: mmc0-pins {
pins = "PF0", "PF1", "PF2", "PF3",
"PF4", "PF5";
function = "mmc0";
@@ -419,7 +419,7 @@
bias-pull-up;
};
- mmc1_pins: mmc1 {
+ mmc1_pins: mmc1-pins {
pins = "PG0", "PG1", "PG2", "PG3",
"PG4", "PG5";
function = "mmc1";
@@ -427,7 +427,7 @@
bias-pull-up;
};
- mmc2_8bit_pins: mmc2_8bit {
+ mmc2_8bit_pins: mmc2-8bit-pins {
pins = "PC5", "PC6", "PC8",
"PC9", "PC10", "PC11",
"PC12", "PC13", "PC14",
@@ -437,47 +437,47 @@
bias-pull-up;
};
- spdif_tx_pins_a: spdif {
+ spdif_tx_pin: spdif-tx-pin {
pins = "PA17";
function = "spdif";
};
- spi0_pins: spi0 {
+ spi0_pins: spi0-pins {
pins = "PC0", "PC1", "PC2", "PC3";
function = "spi0";
};
- spi1_pins: spi1 {
+ spi1_pins: spi1-pins {
pins = "PA15", "PA16", "PA14", "PA13";
function = "spi1";
};
- uart0_pins_a: uart0 {
+ uart0_pa_pins: uart0-pa-pins {
pins = "PA4", "PA5";
function = "uart0";
};
- uart1_pins: uart1 {
+ uart1_pins: uart1-pins {
pins = "PG6", "PG7";
function = "uart1";
};
- uart1_rts_cts_pins: uart1_rts_cts {
+ uart1_rts_cts_pins: uart1-rts-cts-pins {
pins = "PG8", "PG9";
function = "uart1";
};
- uart2_pins: uart2 {
+ uart2_pins: uart2-pins {
pins = "PA0", "PA1";
function = "uart2";
};
- uart3_pins: uart3 {
+ uart3_pins: uart3-pins {
pins = "PA13", "PA14";
function = "uart3";
};
- uart3_rts_cts_pins: uart3_rts_cts {
+ uart3_rts_cts_pins: uart3-rts-cts-pins {
pins = "PA15", "PA16";
function = "uart3";
};
@@ -855,12 +855,12 @@
interrupt-controller;
#interrupt-cells = <3>;
- ir_pins_a: ir {
+ r_ir_rx_pin: r-ir-rx-pin {
pins = "PL11";
function = "s_cir_rx";
};
- r_i2c_pins: r-i2c {
+ r_i2c_pins: r-i2c-pins {
pins = "PL0", "PL1";
function = "s_i2c";
};
diff --git a/arch/arm/boot/dts/sunxi-libretech-all-h3-cc.dtsi b/arch/arm/boot/dts/sunxi-libretech-all-h3-cc.dtsi
index 1eadc132390c..19b3b23cfaa8 100644
--- a/arch/arm/boot/dts/sunxi-libretech-all-h3-cc.dtsi
+++ b/arch/arm/boot/dts/sunxi-libretech-all-h3-cc.dtsi
@@ -167,7 +167,7 @@
&ir {
pinctrl-names = "default";
- pinctrl-0 = <&ir_pins_a>;
+ pinctrl-0 = <&r_ir_rx_pin>;
status = "okay";
};
@@ -205,7 +205,7 @@
&uart0 {
pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins_a>;
+ pinctrl-0 = <&uart0_pa_pins>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/tegra124-apalis-emc.dtsi b/arch/arm/boot/dts/tegra124-apalis-emc.dtsi
index ca2c3a557895..d18eaf4a4a3a 100644
--- a/arch/arm/boot/dts/tegra124-apalis-emc.dtsi
+++ b/arch/arm/boot/dts/tegra124-apalis-emc.dtsi
@@ -1,42 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR X11
/*
- * Copyright 2016 Toradex AG
+ * Copyright 2016-2019 Toradex AG
*
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
/ {
diff --git a/arch/arm/boot/dts/tegra124-apalis-eval.dts b/arch/arm/boot/dts/tegra124-apalis-eval.dts
index eaee10ef6512..ceb3f6388c7d 100644
--- a/arch/arm/boot/dts/tegra124-apalis-eval.dts
+++ b/arch/arm/boot/dts/tegra124-apalis-eval.dts
@@ -1,42 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR X11
/*
- * Copyright 2016-2018 Toradex AG
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright 2016-2019 Toradex AG
*/
/dts-v1/;
diff --git a/arch/arm/boot/dts/tegra124-apalis-v1.2-eval.dts b/arch/arm/boot/dts/tegra124-apalis-v1.2-eval.dts
index 7961eb4bd803..826b776fbe6f 100644
--- a/arch/arm/boot/dts/tegra124-apalis-v1.2-eval.dts
+++ b/arch/arm/boot/dts/tegra124-apalis-v1.2-eval.dts
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2016-2018 Toradex AG
*/
diff --git a/arch/arm/boot/dts/tegra124-apalis-v1.2.dtsi b/arch/arm/boot/dts/tegra124-apalis-v1.2.dtsi
index 367eb8c86098..0462ed2dd8b8 100644
--- a/arch/arm/boot/dts/tegra124-apalis-v1.2.dtsi
+++ b/arch/arm/boot/dts/tegra124-apalis-v1.2.dtsi
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Copyright 2016-2018 Toradex AG
*/
@@ -17,6 +17,7 @@
pcie@1003000 {
status = "okay";
+
avddio-pex-supply = <&reg_1v05_vdd>;
avdd-pex-pll-supply = <&reg_1v05_vdd>;
avdd-pll-erefe-supply = <&reg_1v05_avdd>;
@@ -1796,6 +1797,7 @@
<&{/padctl@7009f000/pads/usb2/lanes/usb2-2}>,
<&{/padctl@7009f000/pads/pcie/lanes/pcie-0}>;
phy-names = "usb2-0", "usb3-1", "usb2-1", "usb2-2", "usb3-0";
+
avddio-pex-supply = <&reg_1v05_vdd>;
avdd-pll-erefe-supply = <&reg_1v05_avdd>;
avdd-pll-utmip-supply = <&reg_1v8_vddio>;
@@ -1807,6 +1809,11 @@
};
padctl@7009f000 {
+ avdd-pll-utmip-supply = <&reg_1v8_vddio>;
+ avdd-pll-erefe-supply = <&reg_1v05_avdd>;
+ avdd-pex-pll-supply = <&reg_1v05_vdd>;
+ hvdd-pex-pll-e-supply = <&reg_module_3v3>;
+
pads {
usb2 {
status = "okay";
diff --git a/arch/arm/boot/dts/tegra124-apalis.dtsi b/arch/arm/boot/dts/tegra124-apalis.dtsi
index 13c93cd507d8..d1e8593ef0d9 100644
--- a/arch/arm/boot/dts/tegra124-apalis.dtsi
+++ b/arch/arm/boot/dts/tegra124-apalis.dtsi
@@ -1,42 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR X11
/*
- * Copyright 2016-2018 Toradex AG
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright 2016-2019 Toradex AG
*/
#include "tegra124.dtsi"
@@ -1837,6 +1801,11 @@
};
padctl@7009f000 {
+ avdd-pll-utmip-supply = <&reg_1v8_vddio>;
+ avdd-pll-erefe-supply = <&reg_1v05_avdd>;
+ avdd-pex-pll-supply = <&reg_1v05_vdd>;
+ hvdd-pex-pll-e-supply = <&reg_module_3v3>;
+
pads {
usb2 {
status = "okay";
diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
index 33bbb1c5285d..d5fd642f8b77 100644
--- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts
+++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
@@ -1721,6 +1721,11 @@
padctl@7009f000 {
status = "okay";
+ avdd-pll-utmip-supply = <&vddio_1v8>;
+ avdd-pll-erefe-supply = <&avdd_1v05_run>;
+ avdd-pex-pll-supply = <&vdd_1v05_run>;
+ hvdd-pex-pll-e-supply = <&vdd_3v3_lp0>;
+
pads {
usb2 {
status = "okay";
diff --git a/arch/arm/boot/dts/tegra124-nyan.dtsi b/arch/arm/boot/dts/tegra124-nyan.dtsi
index a1acd872bcf2..3b10f475037f 100644
--- a/arch/arm/boot/dts/tegra124-nyan.dtsi
+++ b/arch/arm/boot/dts/tegra124-nyan.dtsi
@@ -414,6 +414,11 @@
padctl@7009f000 {
status = "okay";
+ avdd-pll-utmip-supply = <&vddio_1v8>;
+ avdd-pll-erefe-supply = <&avdd_1v05_run>;
+ avdd-pex-pll-supply = <&vdd_1v05_run>;
+ hvdd-pex-pll-e-supply = <&vdd_3v3_lp0>;
+
pads {
usb2 {
status = "okay";
diff --git a/arch/arm/boot/dts/tegra124-venice2.dts b/arch/arm/boot/dts/tegra124-venice2.dts
index 4882b61fb680..5d5e6e18bc7b 100644
--- a/arch/arm/boot/dts/tegra124-venice2.dts
+++ b/arch/arm/boot/dts/tegra124-venice2.dts
@@ -921,6 +921,11 @@
};
padctl@7009f000 {
+ avdd-pll-utmip-supply = <&vddio_1v8>;
+ avdd-pll-erefe-supply = <&avdd_1v05_run>;
+ avdd-pex-pll-supply = <&vdd_1v05_run>;
+ hvdd-pex-pll-e-supply = <&vdd_3v3_lp0>;
+
pads {
usb2 {
status = "okay";
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index d2b553f76719..e074258d4518 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -370,6 +370,17 @@
reg = <0x6000c000 0x150>; /* AHB Arbitration + Gizmo Controller */
};
+ actmon@6000c800 {
+ compatible = "nvidia,tegra30-actmon";
+ reg = <0x6000c800 0x400>;
+ interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&tegra_car TEGRA30_CLK_ACTMON>,
+ <&tegra_car TEGRA30_CLK_EMC>;
+ clock-names = "actmon", "emc";
+ resets = <&tegra_car TEGRA30_CLK_ACTMON>;
+ reset-names = "actmon";
+ };
+
gpio: gpio@6000d000 {
compatible = "nvidia,tegra30-gpio";
reg = <0x6000d000 0x1000>;
diff --git a/arch/arm/boot/dts/vf610-zii-cfu1.dts b/arch/arm/boot/dts/vf610-zii-cfu1.dts
index 445c7dc306b2..9466913693ac 100644
--- a/arch/arm/boot/dts/vf610-zii-cfu1.dts
+++ b/arch/arm/boot/dts/vf610-zii-cfu1.dts
@@ -29,35 +29,30 @@
label = "zii:green:debug1";
gpios = <&gpio2 18 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
- max-brightness = <1>;
};
led-fail {
label = "zii:red:fail";
gpios = <&gpio3 12 GPIO_ACTIVE_LOW>;
default-state = "off";
- max-brightness = <1>;
};
led-status {
label = "zii:green:status";
gpios = <&gpio3 13 GPIO_ACTIVE_HIGH>;
default-state = "off";
- max-brightness = <1>;
};
led-debug-a {
label = "zii:green:debug_a";
gpios = <&gpio3 14 GPIO_ACTIVE_HIGH>;
default-state = "off";
- max-brightness = <1>;
};
led-debug-b {
label = "zii:green:debug_b";
gpios = <&gpio3 15 GPIO_ACTIVE_HIGH>;
default-state = "off";
- max-brightness = <1>;
};
};
@@ -92,9 +87,14 @@
bus-num = <1>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_dspi1>;
- status = "okay";
-
- m25p128@0 {
+ /*
+ * Some CFU1s come with SPI-NOR chip DNPed, so we leave this
+ * node disabled by default and rely on bootloader to enable
+ * it when appropriate.
+ */
+ status = "disabled";
+
+ flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "m25p128", "jedec,spi-nor";
@@ -212,7 +212,7 @@
pinctrl-0 = <&pinctrl_i2c0>;
status = "okay";
- pca9554@22 {
+ io-expander@22 {
compatible = "nxp,pca9554";
reg = <0x22>;
gpio-controller;
@@ -223,19 +223,23 @@
reg = <0x48>;
};
- at24c04@52 {
+ eeprom@52 {
compatible = "atmel,24c04";
reg = <0x52>;
label = "nvm";
};
- at24c04@54 {
+ eeprom@54 {
compatible = "atmel,24c04";
reg = <0x54>;
label = "nameplate";
};
};
+&snvsrtc {
+ status = "disabled";
+};
+
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart0>;
diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
index bd79e00bf615..48086c5e8549 100644
--- a/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
+++ b/arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
@@ -1,45 +1,6 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* Copyright (C) 2015, 2016 Zodiac Inflight Innovations
- *
- * Based on an original 'vf610-twr.dts' which is Copyright 2015,
- * Freescale Semiconductor, Inc.
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
@@ -334,11 +295,11 @@
gpio-sck = <&gpio1 12 GPIO_ACTIVE_HIGH>;
gpio-mosi = <&gpio1 11 GPIO_ACTIVE_HIGH>;
gpio-miso = <&gpio1 10 GPIO_ACTIVE_HIGH>;
- cs-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH
+ cs-gpios = <&gpio1 9 GPIO_ACTIVE_LOW
&gpio1 8 GPIO_ACTIVE_HIGH>;
num-chipselects = <2>;
- m25p128@0 {
+ flash@0 {
compatible = "m25p128", "jedec,spi-nor";
#address-cells = <1>;
#size-cells = <1>;
@@ -367,7 +328,7 @@
pinctrl-0 = <&pinctrl_i2c0>;
status = "okay";
- gpio5: pca9554@20 {
+ gpio5: io-expander@20 {
compatible = "nxp,pca9554";
reg = <0x20>;
gpio-controller;
@@ -375,7 +336,7 @@
};
- gpio6: pca9554@22 {
+ gpio6: io-expander@22 {
compatible = "nxp,pca9554";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pca9554_22>;
@@ -408,7 +369,7 @@
#size-cells = <0>;
reg = <0>;
- sfp1: at24c04@50 {
+ sfp1: eeprom@50 {
compatible = "atmel,24c02";
reg = <0x50>;
};
@@ -419,7 +380,7 @@
#size-cells = <0>;
reg = <1>;
- sfp2: at24c04@50 {
+ sfp2: eeprom@50 {
compatible = "atmel,24c02";
reg = <0x50>;
};
@@ -430,7 +391,7 @@
#size-cells = <0>;
reg = <2>;
- sfp3: at24c04@50 {
+ sfp3: eeprom@50 {
compatible = "atmel,24c02";
reg = <0x50>;
};
@@ -441,7 +402,7 @@
#size-cells = <0>;
reg = <3>;
- sfp4: at24c04@50 {
+ sfp4: eeprom@50 {
compatible = "atmel,24c02";
reg = <0x50>;
};
diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts
index 6f4a5602cefd..778e02c000d1 100644
--- a/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts
+++ b/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts
@@ -1,45 +1,6 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* Copyright (C) 2015, 2016 Zodiac Inflight Innovations
- *
- * Based on an original 'vf610-twr.dts' which is Copyright 2015,
- * Freescale Semiconductor, Inc.
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
*/
/dts-v1/;
@@ -277,7 +238,7 @@
status = "okay";
spi-num-chipselects = <2>;
- m25p128@0 {
+ flash@0 {
compatible = "m25p128", "jedec,spi-nor";
#address-cells = <1>;
#size-cells = <1>;
@@ -313,7 +274,7 @@
* P1 - WE2_CMD
* P2 - WE2_CLK
*/
- gpio5: pca9557@18 {
+ gpio5: io-expander@18 {
compatible = "nxp,pca9557";
reg = <0x18>;
gpio-controller;
@@ -361,7 +322,7 @@
* IO0 - WE1_CLK
* IO1 - WE1_CMD
*/
- gpio7: pca9554@22 {
+ gpio7: io-expander@22 {
compatible = "nxp,pca9554";
reg = <0x22>;
gpio-controller;
@@ -371,7 +332,7 @@
};
&i2c1 {
- at24mac602@50 {
+ eeprom@50 {
compatible = "atmel,24c02";
reg = <0x50>;
read-only;
diff --git a/arch/arm/boot/dts/vf610-zii-dev.dtsi b/arch/arm/boot/dts/vf610-zii-dev.dtsi
index 19eb4a849efb..0507e6dcbb21 100644
--- a/arch/arm/boot/dts/vf610-zii-dev.dtsi
+++ b/arch/arm/boot/dts/vf610-zii-dev.dtsi
@@ -138,7 +138,7 @@
pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c0>;
pinctrl-1 = <&pinctrl_i2c0_gpio>;
- scl-gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&gpio1 4 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
sda-gpios = <&gpio1 5 GPIO_ACTIVE_HIGH>;
status = "okay";
@@ -147,12 +147,12 @@
reg = <0x48>;
};
- at24c04@50 {
+ eeprom@50 {
compatible = "atmel,24c04";
reg = <0x50>;
};
- at24c04@52 {
+ eeprom@52 {
compatible = "atmel,24c04";
reg = <0x52>;
};
diff --git a/arch/arm/boot/dts/vf610-zii-scu4-aib.dts b/arch/arm/boot/dts/vf610-zii-scu4-aib.dts
index de6dfa57bec5..d7019e89f588 100644
--- a/arch/arm/boot/dts/vf610-zii-scu4-aib.dts
+++ b/arch/arm/boot/dts/vf610-zii-scu4-aib.dts
@@ -505,14 +505,14 @@
pinctrl-0 = <&pinctrl_i2c0>;
status = "okay";
- gpio5: pca9554@20 {
+ gpio5: io-expander@20 {
compatible = "nxp,pca9554";
reg = <0x20>;
gpio-controller;
#gpio-cells = <2>;
};
- gpio6: pca9554@22 {
+ gpio6: io-expander@22 {
compatible = "nxp,pca9554";
reg = <0x22>;
gpio-controller;
@@ -524,12 +524,12 @@
reg = <0x48>;
};
- at24c04@50 {
+ eeprom@50 {
compatible = "atmel,24c04";
reg = <0x50>;
};
- at24c04@52 {
+ eeprom@52 {
compatible = "atmel,24c04";
reg = <0x52>;
};
@@ -577,7 +577,7 @@
reg = <0x4f>;
};
- gpio7: pca9555@23 {
+ gpio7: io-expander@23 {
compatible = "nxp,pca9555";
gpio-controller;
#gpio-cells = <2>;
@@ -671,6 +671,10 @@
};
};
+&snvsrtc {
+ status = "disabled";
+};
+
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart0>;
diff --git a/arch/arm/boot/dts/vf610-zii-spb4.dts b/arch/arm/boot/dts/vf610-zii-spb4.dts
new file mode 100644
index 000000000000..9dde83ccb9d1
--- /dev/null
+++ b/arch/arm/boot/dts/vf610-zii-spb4.dts
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+
+/*
+ * Device tree file for ZII's SPB4 board
+ *
+ * SPB - Seat Power Box
+ *
+ * Copyright (C) 2019 Zodiac Inflight Innovations
+ */
+
+/dts-v1/;
+#include "vf610.dtsi"
+
+/ {
+ model = "ZII VF610 SPB4 Board";
+ compatible = "zii,vf610spb4", "zii,vf610dev", "fsl,vf610";
+
+ chosen {
+ stdout-path = &uart0;
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x20000000>;
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+ pinctrl-0 = <&pinctrl_leds_debug>;
+ pinctrl-names = "default";
+
+ led-debug {
+ label = "zii:green:debug1";
+ gpios = <&gpio2 18 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ reg_vcc_3v3_mcu: regulator-vcc-3v3-mcu {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_3v3_mcu";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+};
+
+&adc0 {
+ vref-supply = <&reg_vcc_3v3_mcu>;
+ status = "okay";
+};
+
+&adc1 {
+ vref-supply = <&reg_vcc_3v3_mcu>;
+ status = "okay";
+};
+
+&dspi1 {
+ bus-num = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_dspi1>;
+ status = "okay";
+
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "m25p128", "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <50000000>;
+ };
+};
+
+&edma0 {
+ status = "okay";
+};
+
+&edma1 {
+ status = "okay";
+};
+
+&esdhc0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_esdhc0>;
+ bus-width = <8>;
+ non-removable;
+ no-1-8-v;
+ keep-power-in-suspend;
+ no-sdio;
+ no-sd;
+ status = "okay";
+};
+
+&esdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_esdhc1>;
+ bus-width = <4>;
+ no-sdio;
+ status = "okay";
+};
+
+&fec1 {
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec1>;
+ status = "okay";
+
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
+
+ mdio1: mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ switch0: switch0@0 {
+ compatible = "marvell,mv88e6190";
+ pinctrl-0 = <&pinctrl_gpio_switch0>;
+ pinctrl-names = "default";
+ reg = <0>;
+ eeprom-length = <65536>;
+ reset-gpios = <&gpio3 11 GPIO_ACTIVE_LOW>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "cpu";
+ ethernet = <&fec1>;
+
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "eth_cu_1000_1";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "eth_cu_1000_2";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "eth_cu_1000_3";
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "eth_cu_1000_4";
+ };
+
+ port@5 {
+ reg = <5>;
+ label = "eth_cu_1000_5";
+ };
+
+ port@6 {
+ reg = <6>;
+ label = "eth_cu_1000_6";
+ };
+ };
+ };
+ };
+};
+
+&i2c0 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c0>;
+ status = "okay";
+
+ io-expander@22 {
+ compatible = "nxp,pca9554";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c04";
+ reg = <0x50>;
+ label = "nameplate";
+ };
+
+ eeprom@52 {
+ compatible = "atmel,24c04";
+ reg = <0x52>;
+ };
+};
+
+&snvsrtc {
+ status = "disabled";
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+
+ rave-sp {
+ compatible = "zii,rave-sp-rdu2";
+ current-speed = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ watchdog {
+ compatible = "zii,rave-sp-watchdog";
+ };
+
+ eeprom@a3 {
+ compatible = "zii,rave-sp-eeprom";
+ reg = <0xa3 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ zii,eeprom-name = "main-eeprom";
+ };
+ };
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ status = "okay";
+};
+
+&wdoga5 {
+ status = "disabled";
+};
+
+&iomuxc {
+ pinctrl_dspi1: dspi1grp {
+ fsl,pins = <
+ VF610_PAD_PTD5__DSPI1_CS0 0x1182
+ VF610_PAD_PTD4__DSPI1_CS1 0x1182
+ VF610_PAD_PTC6__DSPI1_SIN 0x1181
+ VF610_PAD_PTC7__DSPI1_SOUT 0x1182
+ VF610_PAD_PTC8__DSPI1_SCK 0x1182
+ >;
+ };
+
+ pinctrl_esdhc0: esdhc0grp {
+ fsl,pins = <
+ VF610_PAD_PTC0__ESDHC0_CLK 0x31ef
+ VF610_PAD_PTC1__ESDHC0_CMD 0x31ef
+ VF610_PAD_PTC2__ESDHC0_DAT0 0x31ef
+ VF610_PAD_PTC3__ESDHC0_DAT1 0x31ef
+ VF610_PAD_PTC4__ESDHC0_DAT2 0x31ef
+ VF610_PAD_PTC5__ESDHC0_DAT3 0x31ef
+ VF610_PAD_PTD23__ESDHC0_DAT4 0x31ef
+ VF610_PAD_PTD22__ESDHC0_DAT5 0x31ef
+ VF610_PAD_PTD21__ESDHC0_DAT6 0x31ef
+ VF610_PAD_PTD20__ESDHC0_DAT7 0x31ef
+ >;
+ };
+
+ pinctrl_esdhc1: esdhc1grp {
+ fsl,pins = <
+ VF610_PAD_PTA24__ESDHC1_CLK 0x31ef
+ VF610_PAD_PTA25__ESDHC1_CMD 0x31ef
+ VF610_PAD_PTA26__ESDHC1_DAT0 0x31ef
+ VF610_PAD_PTA27__ESDHC1_DAT1 0x31ef
+ VF610_PAD_PTA28__ESDHC1_DATA2 0x31ef
+ VF610_PAD_PTA29__ESDHC1_DAT3 0x31ef
+ >;
+ };
+
+ pinctrl_fec1: fec1grp {
+ fsl,pins = <
+ VF610_PAD_PTA6__RMII_CLKIN 0x30d1
+ VF610_PAD_PTC9__ENET_RMII1_MDC 0x30d2
+ VF610_PAD_PTC10__ENET_RMII1_MDIO 0x30d3
+ VF610_PAD_PTC11__ENET_RMII1_CRS 0x30d1
+ VF610_PAD_PTC12__ENET_RMII1_RXD1 0x30d1
+ VF610_PAD_PTC13__ENET_RMII1_RXD0 0x30d1
+ VF610_PAD_PTC14__ENET_RMII1_RXER 0x30d1
+ VF610_PAD_PTC15__ENET_RMII1_TXD1 0x30d2
+ VF610_PAD_PTC16__ENET_RMII1_TXD0 0x30d2
+ VF610_PAD_PTC17__ENET_RMII1_TXEN 0x30d2
+ >;
+ };
+
+ pinctrl_gpio_switch0: pinctrl-gpio-switch0 {
+ fsl,pins = <
+ VF610_PAD_PTE2__GPIO_107 0x31c2
+ VF610_PAD_PTB28__GPIO_98 0x219d
+ >;
+ };
+
+ pinctrl_i2c0: i2c0grp {
+ fsl,pins = <
+ VF610_PAD_PTB14__I2C0_SCL 0x37ff
+ VF610_PAD_PTB15__I2C0_SDA 0x37ff
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ VF610_PAD_PTB16__I2C1_SCL 0x37ff
+ VF610_PAD_PTB17__I2C1_SDA 0x37ff
+ >;
+ };
+
+ pinctrl_leds_debug: pinctrl-leds-debug {
+ fsl,pins = <
+ VF610_PAD_PTD3__GPIO_82 0x31c2
+ >;
+ };
+
+ pinctrl_uart0: uart0grp {
+ fsl,pins = <
+ VF610_PAD_PTB10__UART0_TX 0x21a2
+ VF610_PAD_PTB11__UART0_RX 0x21a1
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ VF610_PAD_PTB23__UART1_TX 0x21a2
+ VF610_PAD_PTB24__UART1_RX 0x21a1
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ VF610_PAD_PTD0__UART2_TX 0x21a2
+ VF610_PAD_PTD1__UART2_RX 0x21a1
+ >;
+ };
+
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ VF610_PAD_PTA30__UART3_TX 0x21a2
+ VF610_PAD_PTA31__UART3_RX 0x21a1
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/vf610-zii-ssmb-dtu.dts b/arch/arm/boot/dts/vf610-zii-ssmb-dtu.dts
index 2b10672fadbd..847c5858fea1 100644
--- a/arch/arm/boot/dts/vf610-zii-ssmb-dtu.dts
+++ b/arch/arm/boot/dts/vf610-zii-ssmb-dtu.dts
@@ -37,7 +37,6 @@
label = "zii:green:debug1";
gpios = <&gpio2 18 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
- max-brightness = <1>;
};
};
@@ -211,6 +210,10 @@
};
};
+&snvsrtc {
+ status = "disabled";
+};
+
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart0>;
diff --git a/arch/arm/boot/dts/vf610-zii-ssmb-spu3.dts b/arch/arm/boot/dts/vf610-zii-ssmb-spu3.dts
index 0d9fe5ac83a3..453fce80f858 100644
--- a/arch/arm/boot/dts/vf610-zii-ssmb-spu3.dts
+++ b/arch/arm/boot/dts/vf610-zii-ssmb-spu3.dts
@@ -37,7 +37,6 @@
label = "zii:green:debug1";
gpios = <&gpio2 18 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
- max-brightness = <1>;
};
};
@@ -70,7 +69,7 @@
*/
status = "disabled";
- m25p128@0 {
+ flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "m25p128", "jedec,spi-nor";
@@ -195,7 +194,7 @@
pinctrl-0 = <&pinctrl_i2c0>;
status = "okay";
- gpio6: pca9505@22 {
+ gpio6: io-expander@22 {
compatible = "nxp,pca9554";
reg = <0x22>;
gpio-controller;
@@ -207,18 +206,22 @@
reg = <0x48>;
};
- at24c04@50 {
+ eeprom@50 {
compatible = "atmel,24c04";
reg = <0x50>;
label = "nameplate";
};
- at24c04@52 {
+ eeprom@52 {
compatible = "atmel,24c04";
reg = <0x52>;
};
};
+&snvsrtc {
+ status = "disabled";
+};
+
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart0>;
@@ -250,6 +253,10 @@
};
};
+&wdoga5 {
+ status = "disabled";
+};
+
&iomuxc {
pinctrl_dspi1: dspi1grp {
fsl,pins = <
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index 45412d21aa6b..179ca8757a74 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -32,7 +32,7 @@
#include <mach/hardware.h>
#include <asm/mach/irq.h>
#include <asm/mach-types.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/hardware/sa1111.h>
diff --git a/arch/arm/configs/aspeed_g4_defconfig b/arch/arm/configs/aspeed_g4_defconfig
index 1446262921b4..190d6e9d3296 100644
--- a/arch/arm/configs/aspeed_g4_defconfig
+++ b/arch/arm/configs/aspeed_g4_defconfig
@@ -23,7 +23,6 @@ CONFIG_SLAB_FREELIST_RANDOM=y
CONFIG_JUMP_LABEL=y
CONFIG_STRICT_KERNEL_RWX=y
CONFIG_GCC_PLUGINS=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEBUG_FS is not set
# CONFIG_IOSCHED_DEADLINE is not set
@@ -248,7 +247,6 @@ CONFIG_PANIC_TIMEOUT=-1
# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHED_STACK_END_CHECK=y
CONFIG_FUNCTION_TRACER=y
-# CONFIG_TRACING_EVENTS_GPIO is not set
# CONFIG_RUNTIME_TESTING_MENU is not set
CONFIG_DEBUG_WX=y
CONFIG_DEBUG_USER=y
diff --git a/arch/arm/configs/aspeed_g5_defconfig b/arch/arm/configs/aspeed_g5_defconfig
index 02fa3a41add5..407ffb7655a8 100644
--- a/arch/arm/configs/aspeed_g5_defconfig
+++ b/arch/arm/configs/aspeed_g5_defconfig
@@ -23,7 +23,6 @@ CONFIG_SLAB_FREELIST_RANDOM=y
CONFIG_JUMP_LABEL=y
CONFIG_STRICT_KERNEL_RWX=y
CONFIG_GCC_PLUGINS=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEBUG_FS is not set
# CONFIG_IOSCHED_DEADLINE is not set
@@ -248,7 +247,6 @@ CONFIG_PANIC_TIMEOUT=-1
# CONFIG_SCHED_DEBUG is not set
CONFIG_SCHED_STACK_END_CHECK=y
CONFIG_FUNCTION_TRACER=y
-# CONFIG_TRACING_EVENTS_GPIO is not set
# CONFIG_RUNTIME_TESTING_MENU is not set
CONFIG_DEBUG_WX=y
CONFIG_DEBUG_USER=y
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
index e4b1be66b3f5..a88e31449880 100644
--- a/arch/arm/configs/at91_dt_defconfig
+++ b/arch/arm/configs/at91_dt_defconfig
@@ -9,7 +9,6 @@ CONFIG_EMBEDDED=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
@@ -56,7 +55,7 @@ CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_DATAFLASH=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_ATMEL=y
CONFIG_MTD_UBI=y
CONFIG_MTD_UBI_GLUEBI=y
diff --git a/arch/arm/configs/clps711x_defconfig b/arch/arm/configs/clps711x_defconfig
index fc105c9178cc..c255dab36bde 100644
--- a/arch/arm/configs/clps711x_defconfig
+++ b/arch/arm/configs/clps711x_defconfig
@@ -6,7 +6,6 @@ CONFIG_RD_LZMA=y
CONFIG_EMBEDDED=y
CONFIG_SLOB=y
CONFIG_JUMP_LABEL=y
-# CONFIG_LBDAF is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_CLPS711X=y
@@ -36,7 +35,7 @@ CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_CFI_STAA=y
CONFIG_MTD_PLATRAM=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_GPIO=y
CONFIG_NETDEVICES=y
# CONFIG_NET_CADENCE is not set
diff --git a/arch/arm/configs/cm_x2xx_defconfig b/arch/arm/configs/cm_x2xx_defconfig
index fb45b4983d3c..5344434df652 100644
--- a/arch/arm/configs/cm_x2xx_defconfig
+++ b/arch/arm/configs/cm_x2xx_defconfig
@@ -58,7 +58,7 @@ CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_CFI_STAA=y
CONFIG_MTD_PHYSMAP=y
CONFIG_MTD_PXA2XX=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_GPIO=m
CONFIG_MTD_NAND_CM_X270=y
CONFIG_MTD_NAND_PLATFORM=y
diff --git a/arch/arm/configs/cm_x300_defconfig b/arch/arm/configs/cm_x300_defconfig
index 5e349c625b71..3707a014cbc4 100644
--- a/arch/arm/configs/cm_x300_defconfig
+++ b/arch/arm/configs/cm_x300_defconfig
@@ -48,7 +48,7 @@ CONFIG_LIB80211=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_MARVELL=y
CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=y
diff --git a/arch/arm/configs/colibri_pxa270_defconfig b/arch/arm/configs/colibri_pxa270_defconfig
index 8995695fc118..8d484e4d51cc 100644
--- a/arch/arm/configs/colibri_pxa270_defconfig
+++ b/arch/arm/configs/colibri_pxa270_defconfig
@@ -64,7 +64,7 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_PHYSMAP=y
CONFIG_MTD_PXA2XX=y
CONFIG_MTD_BLOCK2MTD=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_DISKONCHIP=y
CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED=y
CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0x4000000
diff --git a/arch/arm/configs/corgi_defconfig b/arch/arm/configs/corgi_defconfig
index 09e1672777c9..d99725984947 100644
--- a/arch/arm/configs/corgi_defconfig
+++ b/arch/arm/configs/corgi_defconfig
@@ -87,7 +87,7 @@ CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_ROM=y
CONFIG_MTD_COMPLEX_MAPPINGS=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_SHARPSL=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_SD=y
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index 207962a656a2..4a8cad4d3707 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -74,7 +74,7 @@ CONFIG_MTD_CFI_INTELEXT=m
CONFIG_MTD_CFI_AMDSTD=m
CONFIG_MTD_PHYSMAP=m
CONFIG_MTD_M25P80=m
-CONFIG_MTD_NAND=m
+CONFIG_MTD_RAW_NAND=m
CONFIG_MTD_NAND_DAVINCI=m
CONFIG_MTD_SPI_NOR=m
CONFIG_MTD_UBI=m
diff --git a/arch/arm/configs/efm32_defconfig b/arch/arm/configs/efm32_defconfig
index ee42158f41ec..10ea92513a69 100644
--- a/arch/arm/configs/efm32_defconfig
+++ b/arch/arm/configs/efm32_defconfig
@@ -11,7 +11,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arm/configs/em_x270_defconfig b/arch/arm/configs/em_x270_defconfig
index 30a67523f860..61228a25ba8d 100644
--- a/arch/arm/configs/em_x270_defconfig
+++ b/arch/arm/configs/em_x270_defconfig
@@ -54,7 +54,7 @@ CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_CFI_STAA=y
CONFIG_MTD_PHYSMAP=y
CONFIG_MTD_PXA2XX=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_PLATFORM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/arm/configs/ep93xx_defconfig b/arch/arm/configs/ep93xx_defconfig
index 78cd73d1c795..14889a785f07 100644
--- a/arch/arm/configs/ep93xx_defconfig
+++ b/arch/arm/configs/ep93xx_defconfig
@@ -63,7 +63,7 @@ CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_CFI_STAA=y
CONFIG_MTD_ROM=y
CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_BLK_DEV_NBD=y
CONFIG_EEPROM_LEGACY=y
CONFIG_SCSI=y
diff --git a/arch/arm/configs/eseries_pxa_defconfig b/arch/arm/configs/eseries_pxa_defconfig
index eabb784cf7da..b85575867d21 100644
--- a/arch/arm/configs/eseries_pxa_defconfig
+++ b/arch/arm/configs/eseries_pxa_defconfig
@@ -43,7 +43,7 @@ CONFIG_MAC80211_RC_PID=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_STANDALONE is not set
CONFIG_MTD=m
-CONFIG_MTD_NAND=m
+CONFIG_MTD_RAW_NAND=m
CONFIG_MTD_NAND_TMIO=m
CONFIG_BLK_DEV_LOOP=m
# CONFIG_SCSI_PROC_FS is not set
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index d635edfb6ff2..c95c54284da2 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -9,7 +9,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_ARCH_EXYNOS=y
CONFIG_ARCH_EXYNOS3=y
-CONFIG_EXYNOS5420_MCPM=y
CONFIG_SMP=y
CONFIG_BIG_LITTLE=y
CONFIG_NR_CPUS=8
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
index 484e51fbd4a6..e3afca5bd9d6 100644
--- a/arch/arm/configs/ezx_defconfig
+++ b/arch/arm/configs/ezx_defconfig
@@ -13,7 +13,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
diff --git a/arch/arm/configs/h3600_defconfig b/arch/arm/configs/h3600_defconfig
index ebeca11faa48..175881b7da7c 100644
--- a/arch/arm/configs/h3600_defconfig
+++ b/arch/arm/configs/h3600_defconfig
@@ -4,7 +4,6 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
CONFIG_MODULES=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
index f204017c26b9..9b779e13e05d 100644
--- a/arch/arm/configs/imote2_defconfig
+++ b/arch/arm/configs/imote2_defconfig
@@ -12,7 +12,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index b37f8e675e40..f2cf0722e8e1 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -61,7 +61,7 @@ CONFIG_MTD_CFI_GEOMETRY=y
# CONFIG_MTD_CFI_I2 is not set
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_MXC=y
CONFIG_MTD_UBI=y
CONFIG_EEPROM_AT24=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 50fb01d70b10..8116648a8efd 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -110,7 +110,7 @@ CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_DATAFLASH=y
CONFIG_MTD_M25P80=y
CONFIG_MTD_SST25L=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_GPMI_NAND=y
CONFIG_MTD_NAND_VF610_NFC=y
CONFIG_MTD_NAND_MXC=y
diff --git a/arch/arm/configs/ixp4xx_defconfig b/arch/arm/configs/ixp4xx_defconfig
index 8c3c99cd6de9..39ebcce3bc2f 100644
--- a/arch/arm/configs/ixp4xx_defconfig
+++ b/arch/arm/configs/ixp4xx_defconfig
@@ -112,7 +112,7 @@ CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_IXP4XX=y
-CONFIG_MTD_NAND=m
+CONFIG_MTD_RAW_NAND=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
diff --git a/arch/arm/configs/keystone_defconfig b/arch/arm/configs/keystone_defconfig
index 3ded35a07f45..72fee57aad2f 100644
--- a/arch/arm/configs/keystone_defconfig
+++ b/arch/arm/configs/keystone_defconfig
@@ -124,7 +124,7 @@ CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_PLATRAM=y
CONFIG_MTD_M25P80=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_DAVINCI=y
CONFIG_MTD_SPI_NOR=y
CONFIG_MTD_UBI=y
diff --git a/arch/arm/configs/lpc32xx_defconfig b/arch/arm/configs/lpc32xx_defconfig
index e752fb704df0..4b3b2c693c29 100644
--- a/arch/arm/configs/lpc32xx_defconfig
+++ b/arch/arm/configs/lpc32xx_defconfig
@@ -47,7 +47,7 @@ CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_SLC_LPC32XX=y
CONFIG_MTD_NAND_MLC_LPC32XX=y
CONFIG_MTD_UBI=y
diff --git a/arch/arm/configs/mini2440_defconfig b/arch/arm/configs/mini2440_defconfig
index d95a8059d30b..7d26ca0b1302 100644
--- a/arch/arm/configs/mini2440_defconfig
+++ b/arch/arm/configs/mini2440_defconfig
@@ -92,7 +92,7 @@ CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_CFI_STAA=y
CONFIG_MTD_RAM=y
CONFIG_MTD_ROM=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_S3C2410=y
CONFIG_MTD_NAND_PLATFORM=y
CONFIG_MTD_LPDDR=y
@@ -152,7 +152,7 @@ CONFIG_SPI_S3C24XX=y
CONFIG_SPI_SPIDEV=y
CONFIG_GPIO_SYSFS=y
CONFIG_SENSORS_LM75=y
-CONFIG_THERMAL=m
+CONFIG_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_S3C2410_WATCHDOG=y
CONFIG_FB=y
diff --git a/arch/arm/configs/mmp2_defconfig b/arch/arm/configs/mmp2_defconfig
index 1eeee7f11d91..94deb0ed0541 100644
--- a/arch/arm/configs/mmp2_defconfig
+++ b/arch/arm/configs/mmp2_defconfig
@@ -28,7 +28,7 @@ CONFIG_IP_PNP=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_ONENAND=y
CONFIG_MTD_ONENAND_GENERIC=y
# CONFIG_BLK_DEV is not set
diff --git a/arch/arm/configs/moxart_defconfig b/arch/arm/configs/moxart_defconfig
index 078228a19339..6a11669fa536 100644
--- a/arch/arm/configs/moxart_defconfig
+++ b/arch/arm/configs/moxart_defconfig
@@ -15,7 +15,6 @@ CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_MULTI_V4=y
diff --git a/arch/arm/configs/multi_v4t_defconfig b/arch/arm/configs/multi_v4t_defconfig
index 9a6390c172d6..0b42bddfbc82 100644
--- a/arch/arm/configs/multi_v4t_defconfig
+++ b/arch/arm/configs/multi_v4t_defconfig
@@ -5,7 +5,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EMBEDDED=y
CONFIG_SLOB=y
CONFIG_JUMP_LABEL=y
-# CONFIG_LBDAF is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_MULTI_V4T=y
@@ -39,7 +38,7 @@ CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_CFI_STAA=y
CONFIG_MTD_PLATRAM=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_GPIO=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
diff --git a/arch/arm/configs/multi_v5_defconfig b/arch/arm/configs/multi_v5_defconfig
index 318b76fa26d1..63b5a8824f0f 100644
--- a/arch/arm/configs/multi_v5_defconfig
+++ b/arch/arm/configs/multi_v5_defconfig
@@ -87,7 +87,7 @@ CONFIG_MTD_CFI_GEOMETRY=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_STAA=y
CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_ATMEL=y
CONFIG_MTD_NAND_ORION=y
CONFIG_MTD_SPI_NOR=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index c75051b9392c..6b748f214eae 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -5,10 +5,6 @@ CONFIG_CGROUPS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_CMDLINE_PARTITION=y
CONFIG_ARCH_VIRT=y
CONFIG_ARCH_ALPINE=y
CONFIG_ARCH_ARTPEC=y
@@ -33,7 +29,6 @@ CONFIG_MACH_BERLIN_BG2CD=y
CONFIG_MACH_BERLIN_BG2Q=y
CONFIG_ARCH_DIGICOLOR=y
CONFIG_ARCH_EXYNOS=y
-CONFIG_EXYNOS5420_MCPM=y
CONFIG_ARCH_HIGHBANK=y
CONFIG_ARCH_HISI=y
CONFIG_ARCH_HI3xxx=y
@@ -48,8 +43,8 @@ CONFIG_SOC_IMX6Q=y
CONFIG_SOC_IMX6SL=y
CONFIG_SOC_IMX6SX=y
CONFIG_SOC_IMX6UL=y
-CONFIG_SOC_IMX7D=y
CONFIG_SOC_LS1021A=y
+CONFIG_SOC_IMX7D=y
CONFIG_SOC_VF610=y
CONFIG_ARCH_KEYSTONE=y
CONFIG_ARCH_MEDIATEK=y
@@ -76,24 +71,6 @@ CONFIG_ARCH_MSM8960=y
CONFIG_ARCH_MSM8974=y
CONFIG_ARCH_ROCKCHIP=y
CONFIG_ARCH_RENESAS=y
-CONFIG_ARCH_EMEV2=y
-CONFIG_ARCH_R7S72100=y
-CONFIG_ARCH_R7S9210=y
-CONFIG_ARCH_R8A73A4=y
-CONFIG_ARCH_R8A7740=y
-CONFIG_ARCH_R8A7743=y
-CONFIG_ARCH_R8A7744=y
-CONFIG_ARCH_R8A7745=y
-CONFIG_ARCH_R8A77470=y
-CONFIG_ARCH_R8A7778=y
-CONFIG_ARCH_R8A7779=y
-CONFIG_ARCH_R8A7790=y
-CONFIG_ARCH_R8A7791=y
-CONFIG_ARCH_R8A7792=y
-CONFIG_ARCH_R8A7793=y
-CONFIG_ARCH_R8A7794=y
-CONFIG_ARCH_R9A06G032=y
-CONFIG_ARCH_SH73A0=y
CONFIG_ARCH_SOCFPGA=y
CONFIG_PLAT_SPEAR=y
CONFIG_ARCH_SPEAR13XX=y
@@ -109,16 +86,6 @@ CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_VEXPRESS_TC2_PM=y
CONFIG_ARCH_WM8850=y
CONFIG_ARCH_ZYNQ=y
-CONFIG_PCIEPORTBUS=y
-CONFIG_PCI_MVEBU=y
-CONFIG_PCI_TEGRA=y
-CONFIG_PCI_RCAR_GEN2=y
-CONFIG_PCIE_RCAR=y
-CONFIG_PCI_DRA7XX_EP=y
-CONFIG_PCI_KEYSTONE=y
-CONFIG_PCI_ENDPOINT=y
-CONFIG_PCI_ENDPOINT_CONFIGFS=y
-CONFIG_PCI_EPF_TEST=m
CONFIG_SMP=y
CONFIG_NR_CPUS=16
CONFIG_SECCOMP=y
@@ -141,6 +108,29 @@ CONFIG_ARM_CPUIDLE=y
CONFIG_ARM_ZYNQ_CPUIDLE=y
CONFIG_ARM_EXYNOS_CPUIDLE=y
CONFIG_KERNEL_MODE_NEON=y
+CONFIG_RASPBERRYPI_FIRMWARE=y
+CONFIG_TRUSTED_FOUNDATIONS=y
+CONFIG_BCM47XX_NVRAM=y
+CONFIG_BCM47XX_SPROM=y
+CONFIG_EFI_VARS=m
+CONFIG_EFI_CAPSULE_LOADER=m
+CONFIG_ARM_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM_NEON=m
+CONFIG_CRYPTO_SHA1_ARM_CE=m
+CONFIG_CRYPTO_SHA2_ARM_CE=m
+CONFIG_CRYPTO_SHA512_ARM=m
+CONFIG_CRYPTO_AES_ARM=m
+CONFIG_CRYPTO_AES_ARM_BS=m
+CONFIG_CRYPTO_AES_ARM_CE=m
+CONFIG_CRYPTO_GHASH_ARM_CE=m
+CONFIG_CRYPTO_CRC32_ARM_CE=m
+CONFIG_CRYPTO_CHACHA20_NEON=m
+CONFIG_GCC_PLUGINS=y
+CONFIG_GCC_PLUGIN_STRUCTLEAK=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_CMDLINE_PARTITION=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -175,16 +165,29 @@ CONFIG_MAC80211=m
CONFIG_RFKILL=y
CONFIG_RFKILL_INPUT=y
CONFIG_RFKILL_GPIO=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_MVEBU=y
+CONFIG_PCI_TEGRA=y
+CONFIG_PCI_RCAR_GEN2=y
+CONFIG_PCIE_RCAR=y
+CONFIG_PCI_DRA7XX_EP=y
+CONFIG_PCI_KEYSTONE=y
+CONFIG_PCI_ENDPOINT=y
+CONFIG_PCI_ENDPOINT_CONFIGFS=y
+CONFIG_PCI_EPF_TEST=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_CMA_SIZE_MBYTES=64
CONFIG_OMAP_OCP2SCP=y
CONFIG_SIMPLE_PM_BUS=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_M25P80=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_DENALI_DT=y
CONFIG_MTD_NAND_OMAP2=y
CONFIG_MTD_NAND_OMAP_BCH=y
@@ -195,7 +198,6 @@ CONFIG_MTD_NAND_BRCMNAND=y
CONFIG_MTD_NAND_VF610_NFC=y
CONFIG_MTD_NAND_DAVINCI=y
CONFIG_MTD_SPI_NOR=y
-CONFIG_SPI_FSL_QUADSPI=m
CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
@@ -230,7 +232,6 @@ CONFIG_VIRTIO_NET=y
CONFIG_B53_SPI_DRIVER=m
CONFIG_B53_MDIO_DRIVER=m
CONFIG_B53_MMAP_DRIVER=m
-CONFIG_B53_SRAB_DRIVER=m
CONFIG_NET_DSA_BCM_SF2=m
CONFIG_SUN4I_EMAC=y
CONFIG_BCMGENET=m
@@ -259,7 +260,6 @@ CONFIG_BROADCOM_PHY=y
CONFIG_ICPLUS_PHY=y
CONFIG_MARVELL_PHY=y
CONFIG_MICREL_PHY=y
-CONFIG_REALTEK_PHY=y
CONFIG_ROCKCHIP_PHY=y
CONFIG_SMSC_PHY=y
CONFIG_USB_PEGASUS=y
@@ -288,6 +288,7 @@ CONFIG_MOUSE_ELAN_I2C=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ADC=m
CONFIG_TOUCHSCREEN_ATMEL_MXT=m
+CONFIG_TOUCHSCREEN_ELAN=m
CONFIG_TOUCHSCREEN_MMS114=m
CONFIG_TOUCHSCREEN_WM97XX=m
CONFIG_TOUCHSCREEN_ST1232=m
@@ -299,6 +300,7 @@ CONFIG_INPUT_MAX8997_HAPTIC=m
CONFIG_INPUT_CPCAP_PWRBUTTON=m
CONFIG_INPUT_AXP20X_PEK=m
CONFIG_INPUT_ADXL34X=m
+CONFIG_INPUT_STPMIC1_ONKEY=y
CONFIG_SERIO_AMBAKMI=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
@@ -349,6 +351,8 @@ CONFIG_SERIAL_DEV_BUS=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_ST=y
+CONFIG_TCG_TPM=m
+CONFIG_TCG_TIS_I2C_INFINEON=m
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_ARB_GPIO_CHALLENGE=m
CONFIG_I2C_MUX_PCA954x=y
@@ -386,6 +390,7 @@ CONFIG_SPI_BCM2835=y
CONFIG_SPI_BCM2835AUX=y
CONFIG_SPI_CADENCE=y
CONFIG_SPI_DAVINCI=y
+CONFIG_SPI_FSL_QUADSPI=m
CONFIG_SPI_GPIO=m
CONFIG_SPI_FSL_DSPI=m
CONFIG_SPI_OMAP24XX=y
@@ -444,9 +449,11 @@ CONFIG_POWER_RESET_RMOBILE=y
CONFIG_BATTERY_ACT8945A=y
CONFIG_BATTERY_CPCAP=m
CONFIG_BATTERY_SBS=y
+CONFIG_BATTERY_BQ27XXX=m
CONFIG_AXP20X_POWER=m
CONFIG_BATTERY_MAX17040=m
CONFIG_BATTERY_MAX17042=m
+CONFIG_CHARGER_GPIO=m
CONFIG_CHARGER_CPCAP=m
CONFIG_CHARGER_MAX14577=m
CONFIG_CHARGER_MAX77693=m
@@ -486,6 +493,7 @@ CONFIG_TEGRA_WATCHDOG=m
CONFIG_MESON_WATCHDOG=y
CONFIG_DIGICOLOR_WATCHDOG=y
CONFIG_RENESAS_WDT=m
+CONFIG_STPMIC1_WATCHDOG=y
CONFIG_BCM47XX_WDT=y
CONFIG_BCM2835_WDT=y
CONFIG_BCM_KONA_WDT=y
@@ -505,6 +513,7 @@ CONFIG_MFD_AXP20X_RSB=y
CONFIG_MFD_CROS_EC=m
CONFIG_CROS_EC_I2C=m
CONFIG_CROS_EC_SPI=m
+CONFIG_MFD_CROS_EC_CHARDEV=m
CONFIG_MFD_DA9063=m
CONFIG_MFD_MAX14577=y
CONFIG_MFD_MAX77686=y
@@ -527,6 +536,7 @@ CONFIG_MFD_TPS65218=y
CONFIG_MFD_TPS6586X=y
CONFIG_MFD_TPS65910=y
CONFIG_MFD_STM32_LPTIMER=m
+CONFIG_MFD_STPMIC1=y
CONFIG_REGULATOR_ACT8865=y
CONFIG_REGULATOR_ACT8945A=y
CONFIG_REGULATOR_ANATOP=y
@@ -559,6 +569,7 @@ CONFIG_REGULATOR_RN5T618=y
CONFIG_REGULATOR_S2MPS11=y
CONFIG_REGULATOR_S5M8767=y
CONFIG_REGULATOR_STM32_VREFBUF=m
+CONFIG_REGULATOR_STPMIC1=y
CONFIG_REGULATOR_TI_ABB=y
CONFIG_REGULATOR_TPS51632=y
CONFIG_REGULATOR_TPS62360=y
@@ -579,8 +590,6 @@ CONFIG_MEDIA_USB_SUPPORT=y
CONFIG_USB_VIDEO_CLASS=m
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_VIDEO_STM32_DCMI=m
-CONFIG_SOC_CAMERA=m
-CONFIG_SOC_CAMERA_PLATFORM=m
CONFIG_VIDEO_SAMSUNG_EXYNOS4_IS=m
CONFIG_VIDEO_S5P_FIMC=m
CONFIG_VIDEO_S5P_MIPI_CSIS=m
@@ -626,10 +635,12 @@ CONFIG_DRM_RCAR_LVDS=y
CONFIG_DRM_SUN4I=m
CONFIG_DRM_FSL_DCU=m
CONFIG_DRM_TEGRA=y
-CONFIG_DRM_PANEL_ORISETECH_OTM8009A=m
-CONFIG_DRM_PANEL_RAYDIUM_RM68200=m
+CONFIG_DRM_STM=m
+CONFIG_DRM_STM_DSI=m
CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_DRM_PANEL_SAMSUNG_LD9040=m
+CONFIG_DRM_PANEL_ORISETECH_OTM8009A=m
+CONFIG_DRM_PANEL_RAYDIUM_RM68200=m
CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03=m
CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m
CONFIG_DRM_DUMB_VGA_DAC=m
@@ -641,8 +652,6 @@ CONFIG_DRM_TOSHIBA_TC358764=m
CONFIG_DRM_I2C_ADV7511=m
CONFIG_DRM_I2C_ADV7511_AUDIO=y
CONFIG_DRM_STI=m
-CONFIG_DRM_STM=m
-CONFIG_DRM_STM_DSI=m
CONFIG_DRM_VC4=m
CONFIG_DRM_ETNAVIV=m
CONFIG_DRM_MXSFB=m
@@ -701,7 +710,6 @@ CONFIG_SND_SOC_SGTL5000=m
CONFIG_SND_SOC_SPDIF=m
CONFIG_SND_SOC_STI_SAS=m
CONFIG_SND_SOC_WM8978=m
-CONFIG_SND_SIMPLE_SCU_CARD=m
CONFIG_USB=y
CONFIG_USB_OTG=y
CONFIG_USB_XHCI_HCD=y
@@ -877,7 +885,6 @@ CONFIG_UNIPHIER_MDMAC=y
CONFIG_XILINX_DMA=y
CONFIG_QCOM_BAM_DMA=y
CONFIG_DW_DMAC=y
-CONFIG_SH_DMAE=y
CONFIG_RCAR_DMAC=y
CONFIG_RENESAS_USB_DMAC=m
CONFIG_VIRTIO_PCI=y
@@ -910,6 +917,24 @@ CONFIG_QCOM_GSBI=y
CONFIG_QCOM_PM=y
CONFIG_QCOM_SMD_RPM=m
CONFIG_QCOM_WCNSS_CTRL=m
+CONFIG_ARCH_EMEV2=y
+CONFIG_ARCH_R7S72100=y
+CONFIG_ARCH_R7S9210=y
+CONFIG_ARCH_R8A73A4=y
+CONFIG_ARCH_R8A7740=y
+CONFIG_ARCH_R8A7743=y
+CONFIG_ARCH_R8A7744=y
+CONFIG_ARCH_R8A7745=y
+CONFIG_ARCH_R8A77470=y
+CONFIG_ARCH_R8A7778=y
+CONFIG_ARCH_R8A7779=y
+CONFIG_ARCH_R8A7790=y
+CONFIG_ARCH_R8A7791=y
+CONFIG_ARCH_R8A7792=y
+CONFIG_ARCH_R8A7793=y
+CONFIG_ARCH_R8A7794=y
+CONFIG_ARCH_R9A06G032=y
+CONFIG_ARCH_SH73A0=y
CONFIG_ROCKCHIP_PM_DOMAINS=y
CONFIG_ARCH_TEGRA_2x_SOC=y
CONFIG_ARCH_TEGRA_3x_SOC=y
@@ -925,6 +950,7 @@ CONFIG_AT91_SAMA5D2_ADC=m
CONFIG_BERLIN2_ADC=m
CONFIG_CPCAP_ADC=m
CONFIG_EXYNOS_ADC=m
+CONFIG_MESON_SARADC=m
CONFIG_STM32_ADC_CORE=m
CONFIG_STM32_ADC=m
CONFIG_STM32_DFSDM_ADC=m
@@ -932,8 +958,12 @@ CONFIG_VF610_ADC=m
CONFIG_XILINX_XADC=y
CONFIG_STM32_LPTIMER_CNT=m
CONFIG_STM32_DAC=m
+CONFIG_ROCKCHIP_SARADC=m
+CONFIG_IIO_CROS_EC_SENSORS_CORE=m
+CONFIG_IIO_CROS_EC_SENSORS=m
CONFIG_MPU3050_I2C=y
CONFIG_CM36651=m
+CONFIG_IIO_CROS_EC_LIGHT_PROX=m
CONFIG_SENSORS_ISL29018=y
CONFIG_SENSORS_ISL29028=y
CONFIG_AK8975=y
@@ -969,24 +999,21 @@ CONFIG_PHY_RCAR_GEN2=m
CONFIG_PHY_ROCKCHIP_DP=m
CONFIG_PHY_ROCKCHIP_USB=y
CONFIG_PHY_SAMSUNG_USB2=m
+CONFIG_PHY_UNIPHIER_USB2=y
+CONFIG_PHY_UNIPHIER_USB3=y
CONFIG_PHY_MIPHY28LP=y
CONFIG_PHY_STIH407_USB=y
CONFIG_PHY_STM32_USBPHYC=y
CONFIG_PHY_TEGRA_XUSB=y
CONFIG_PHY_DM816X_USB=m
-CONFIG_PHY_UNIPHIER_USB3=y
-CONFIG_PHY_UNIPHIER_USB2=y
CONFIG_OMAP_USB2=y
CONFIG_TI_PIPE3=y
CONFIG_TWL4030_USB=m
+CONFIG_MESON_MX_EFUSE=m
+CONFIG_ROCKCHIP_EFUSE=m
CONFIG_NVMEM_IMX_OCOTP=y
CONFIG_NVMEM_SUNXI_SID=y
CONFIG_NVMEM_VF610_OCOTP=y
-CONFIG_RASPBERRYPI_FIRMWARE=y
-CONFIG_BCM47XX_NVRAM=y
-CONFIG_BCM47XX_SPROM=y
-CONFIG_EFI_VARS=m
-CONFIG_EFI_CAPSULE_LOADER=m
CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_MSDOS_FS=y
@@ -1008,8 +1035,6 @@ CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_PRINTK_TIME=y
-CONFIG_MAGIC_SYSRQ=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
@@ -1023,16 +1048,6 @@ CONFIG_CRYPTO_DEV_ATMEL_TDES=m
CONFIG_CRYPTO_DEV_ATMEL_SHA=m
CONFIG_CRYPTO_DEV_SUN4I_SS=m
CONFIG_CRYPTO_DEV_ROCKCHIP=m
-CONFIG_ARM_CRYPTO=y
-CONFIG_CRYPTO_SHA1_ARM_NEON=m
-CONFIG_CRYPTO_SHA1_ARM_CE=m
-CONFIG_CRYPTO_SHA2_ARM_CE=m
-CONFIG_CRYPTO_SHA512_ARM=m
-CONFIG_CRYPTO_AES_ARM=m
-CONFIG_CRYPTO_AES_ARM_BS=m
-CONFIG_CRYPTO_AES_ARM_CE=m
-CONFIG_CRYPTO_GHASH_ARM_CE=m
-CONFIG_CRYPTO_CRC32_ARM_CE=m
-CONFIG_CRYPTO_CHACHA20_NEON=m
-CONFIG_GCC_PLUGINS=y
-CONFIG_GCC_PLUGIN_STRUCTLEAK=y
+CONFIG_CMA_SIZE_MBYTES=64
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/arm/configs/mv78xx0_defconfig b/arch/arm/configs/mv78xx0_defconfig
index 0448bd8075ac..e9567513f068 100644
--- a/arch/arm/configs/mv78xx0_defconfig
+++ b/arch/arm/configs/mv78xx0_defconfig
@@ -47,7 +47,7 @@ CONFIG_MTD_CFI_GEOMETRY=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_ORION=y
CONFIG_BLK_DEV_LOOP=y
# CONFIG_SCSI_PROC_FS is not set
diff --git a/arch/arm/configs/mvebu_v5_defconfig b/arch/arm/configs/mvebu_v5_defconfig
index 4b598da0d086..0e5577a31851 100644
--- a/arch/arm/configs/mvebu_v5_defconfig
+++ b/arch/arm/configs/mvebu_v5_defconfig
@@ -77,7 +77,7 @@ CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_STAA=y
CONFIG_MTD_PHYSMAP=y
CONFIG_MTD_M25P80=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_ORION=y
CONFIG_MTD_SPI_NOR=y
CONFIG_BLK_DEV_LOOP=y
diff --git a/arch/arm/configs/mvebu_v7_defconfig b/arch/arm/configs/mvebu_v7_defconfig
index 55140219ab11..48f7b4277b8d 100644
--- a/arch/arm/configs/mvebu_v7_defconfig
+++ b/arch/arm/configs/mvebu_v7_defconfig
@@ -52,7 +52,7 @@ CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_CFI_STAA=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_M25P80=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_MARVELL=y
CONFIG_MTD_SPI_NOR=y
CONFIG_MTD_UBI=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index 38480596c449..ed570a0d1f2a 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -50,7 +50,7 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_DATAFLASH=y
CONFIG_MTD_M25P80=y
CONFIG_MTD_SST25L=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_GPMI_NAND=y
CONFIG_MTD_SPI_NOR=y
CONFIG_MTD_UBI=y
diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig
index 5f4c6aaa07f6..cfc094189d09 100644
--- a/arch/arm/configs/nhk8815_defconfig
+++ b/arch/arm/configs/nhk8815_defconfig
@@ -53,8 +53,8 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_ONENAND=y
CONFIG_MTD_ONENAND_VERIFY_WRITE=y
CONFIG_MTD_ONENAND_GENERIC=y
-CONFIG_MTD_NAND_ECC_SMC=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_FSMC=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=y
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index cfc00b0961ec..82af77c093f1 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -17,7 +17,6 @@ CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
@@ -90,7 +89,7 @@ CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 3f03ec6d2644..c7bf9c493646 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -143,8 +143,8 @@ CONFIG_MTD_M25P80=m
CONFIG_MTD_ONENAND=y
CONFIG_MTD_ONENAND_VERIFY_WRITE=y
CONFIG_MTD_ONENAND_OMAP2=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_ECC_BCH=y
+CONFIG_MTD_RAW_NAND=y
+CONFIG_MTD_NAND_ECC_SW_BCH=y
CONFIG_MTD_NAND_OMAP2=y
CONFIG_MTD_NAND_OMAP_BCH=y
CONFIG_MTD_SPI_NOR=m
diff --git a/arch/arm/configs/orion5x_defconfig b/arch/arm/configs/orion5x_defconfig
index bf9046331f6e..077e0fde1ff9 100644
--- a/arch/arm/configs/orion5x_defconfig
+++ b/arch/arm/configs/orion5x_defconfig
@@ -70,7 +70,7 @@ CONFIG_MTD_CFI_GEOMETRY=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_PLATFORM=y
CONFIG_MTD_NAND_ORION=y
CONFIG_BLK_DEV_LOOP=y
diff --git a/arch/arm/configs/oxnas_v6_defconfig b/arch/arm/configs/oxnas_v6_defconfig
index f6ba32c9d173..cae0db6b4eaf 100644
--- a/arch/arm/configs/oxnas_v6_defconfig
+++ b/arch/arm/configs/oxnas_v6_defconfig
@@ -50,7 +50,7 @@ CONFIG_SIMPLE_PM_BUS=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_OXNAS=y
CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=y
diff --git a/arch/arm/configs/pxa3xx_defconfig b/arch/arm/configs/pxa3xx_defconfig
index 3e0de035ab77..7681eea60127 100644
--- a/arch/arm/configs/pxa3xx_defconfig
+++ b/arch/arm/configs/pxa3xx_defconfig
@@ -31,7 +31,7 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_MARVELL=y
CONFIG_MTD_ONENAND=y
CONFIG_MTD_ONENAND_VERIFY_WRITE=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index d4654755b09c..07ebbdce3645 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -185,8 +185,8 @@ CONFIG_MTD_PXA2XX=m
CONFIG_MTD_M25P80=m
CONFIG_MTD_BLOCK2MTD=y
CONFIG_MTD_DOCG3=m
-CONFIG_MTD_NAND=m
-CONFIG_MTD_NAND_ECC_BCH=y
+CONFIG_MTD_RAW_NAND=m
+CONFIG_MTD_NAND_ECC_SW_BCH=y
CONFIG_MTD_NAND_GPIO=m
CONFIG_MTD_NAND_DISKONCHIP=m
CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED=y
@@ -387,7 +387,7 @@ CONFIG_SENSORS_LM75=m
CONFIG_SENSORS_LM90=m
CONFIG_SENSORS_LM95245=m
CONFIG_SENSORS_NTC_THERMISTOR=m
-CONFIG_THERMAL=m
+CONFIG_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_XILINX_WATCHDOG=m
CONFIG_SA1100_WATCHDOG=m
diff --git a/arch/arm/configs/qcom_defconfig b/arch/arm/configs/qcom_defconfig
index bd6440f23493..c1854751c99a 100644
--- a/arch/arm/configs/qcom_defconfig
+++ b/arch/arm/configs/qcom_defconfig
@@ -50,14 +50,15 @@ CONFIG_IP_PNP_DHCP=y
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
-CONFIG_CFG80211=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
CONFIG_RFKILL=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_M25P80=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_QCOM=y
CONFIG_MTD_SPI_NOR=y
CONFIG_BLK_DEV_LOOP=y
@@ -72,6 +73,8 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=m
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
CONFIG_ATL1C=y
@@ -85,6 +88,7 @@ CONFIG_SLIP_MODE_SLIP6=y
CONFIG_USB_USBNET=y
# CONFIG_USB_NET_AX8817X is not set
# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_WCN36XX=m
CONFIG_BRCMFMAC=m
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
@@ -94,6 +98,8 @@ CONFIG_KEYBOARD_PMIC8XXX=y
CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_INPUT_MISC=y
+CONFIG_INPUT_MSM_VIBRATOR=m
+CONFIG_INPUT_PM8941_PWRKEY=m
CONFIG_INPUT_PM8XXX_VIBRATOR=y
CONFIG_INPUT_PMIC8XXX_PWRKEY=y
CONFIG_INPUT_UINPUT=y
@@ -127,6 +133,7 @@ CONFIG_GPIO_SYSFS=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_MSM=y
CONFIG_CHARGER_QCOM_SMBB=y
+CONFIG_CHARGER_BQ24190=m
CONFIG_THERMAL=y
CONFIG_QCOM_TSENS=y
CONFIG_MFD_PM8XXX=y
@@ -226,7 +233,11 @@ CONFIG_IIO=y
CONFIG_IIO_BUFFER_CB=y
CONFIG_IIO_SW_TRIGGER=y
CONFIG_KXSD9=y
+CONFIG_QCOM_SPMI_IADC=m
+CONFIG_QCOM_SPMI_VADC=m
CONFIG_MPU3050_I2C=y
+CONFIG_INV_MPU6050_I2C=m
+CONFIG_TSL2772=m
CONFIG_AK8975=y
CONFIG_IIO_HRTIMER_TRIGGER=y
CONFIG_BMP280=y
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index 2afb359f3168..39c648594d93 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -192,7 +192,7 @@ CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_ROM=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_S3C2410=y
CONFIG_PARPORT=y
CONFIG_PARPORT_PC=m
diff --git a/arch/arm/configs/s3c6400_defconfig b/arch/arm/configs/s3c6400_defconfig
index 507d7ad7523a..6e2656567da6 100644
--- a/arch/arm/configs/s3c6400_defconfig
+++ b/arch/arm/configs/s3c6400_defconfig
@@ -23,7 +23,7 @@ CONFIG_CMDLINE="console=ttySAC0,115200 root=/dev/ram init=/linuxrc initrd=0x5100
CONFIG_VFP=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_S3C2410=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig
index b0026f73083d..d5341b0bd88d 100644
--- a/arch/arm/configs/sama5_defconfig
+++ b/arch/arm/configs/sama5_defconfig
@@ -66,7 +66,7 @@ CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_M25P80=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_ATMEL=y
CONFIG_MTD_SPI_NOR=y
CONFIG_MTD_UBI=y
@@ -150,7 +150,7 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_SOC_CAMERA=y
CONFIG_VIDEO_ATMEL_ISI=y
-CONFIG_SOC_CAMERA_OV2640=y
+CONFIG_SOC_CAMERA_OV2640=m
CONFIG_DRM=y
CONFIG_DRM_ATMEL_HLCDC=y
CONFIG_DRM_PANEL_SIMPLE=y
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index 9b0efac101ab..eb02ba9ec6e6 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -43,11 +43,13 @@ CONFIG_PCI_RCAR_GEN2=y
CONFIG_PCIE_RCAR=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_DMA_CMA=y
-CONFIG_CMA_SIZE_MBYTES=64
CONFIG_SIMPLE_PM_BUS=y
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_M25P80=y
CONFIG_MTD_SPI_NOR=y
CONFIG_EEPROM_AT24=y
@@ -123,7 +125,6 @@ CONFIG_VIDEO_ADV7604=y
CONFIG_VIDEO_ML86V7667=y
CONFIG_DRM=y
CONFIG_DRM_RCAR_DU=y
-CONFIG_DRM_RCAR_LVDS=y
CONFIG_DRM_DUMB_VGA_DAC=y
CONFIG_DRM_SII902X=y
CONFIG_DRM_I2C_ADV7511=y
@@ -141,12 +142,13 @@ CONFIG_SND_SOC_RCAR=y
CONFIG_SND_SOC_AK4642=y
CONFIG_SND_SOC_SGTL5000=y
CONFIG_SND_SOC_WM8978=y
-CONFIG_SND_SIMPLE_SCU_CARD=y
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_PLATFORM=y
CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_R8A66597_HCD=y
CONFIG_USB_RENESAS_USBHS=y
CONFIG_USB_GADGET=y
@@ -197,6 +199,7 @@ CONFIG_PWM_RENESAS_TPU=y
CONFIG_RESET_CONTROLLER=y
CONFIG_GENERIC_PHY=y
CONFIG_PHY_RCAR_GEN2=y
+CONFIG_PHY_RCAR_GEN3_USB2=y
# CONFIG_DNOTIFY is not set
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
@@ -209,6 +212,8 @@ CONFIG_NFS_V4_1=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=64
CONFIG_PRINTK_TIME=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_DEBUG_KERNEL=y
diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig
index 08d1b3e11d68..6701a975e785 100644
--- a/arch/arm/configs/socfpga_defconfig
+++ b/arch/arm/configs/socfpga_defconfig
@@ -21,7 +21,6 @@ CONFIG_NEON=y
CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_NET=y
CONFIG_PACKET=y
@@ -51,7 +50,7 @@ CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_M25P80=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_DENALI_DT=y
CONFIG_MTD_SPI_NOR=y
# CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set
@@ -106,6 +105,7 @@ CONFIG_SENSORS_LTC2978_REGULATOR=y
CONFIG_WATCHDOG=y
CONFIG_DW_WATCHDOG=y
CONFIG_MFD_ALTERA_A10SR=y
+CONFIG_MFD_ALTERA_SYSMGR=y
CONFIG_MFD_STMPE=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
@@ -127,6 +127,8 @@ CONFIG_RTC_DRV_DS1307=y
CONFIG_DMADEVICES=y
CONFIG_PL330_DMA=y
CONFIG_DMATEST=m
+CONFIG_IIO=y
+CONFIG_LTC2497=y
CONFIG_FPGA=y
CONFIG_FPGA_MGR_SOCFPGA=y
CONFIG_FPGA_MGR_SOCFPGA_A10=y
diff --git a/arch/arm/configs/spear13xx_defconfig b/arch/arm/configs/spear13xx_defconfig
index 7b36eeb928bb..8ee3679ca8b2 100644
--- a/arch/arm/configs/spear13xx_defconfig
+++ b/arch/arm/configs/spear13xx_defconfig
@@ -32,7 +32,7 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_OF_PARTS=y
CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_FSMC=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=16384
diff --git a/arch/arm/configs/spear3xx_defconfig b/arch/arm/configs/spear3xx_defconfig
index f1b52fb3461b..ddd73b25f75e 100644
--- a/arch/arm/configs/spear3xx_defconfig
+++ b/arch/arm/configs/spear3xx_defconfig
@@ -17,7 +17,7 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_OF_PARTS=y
CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_FSMC=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=16384
diff --git a/arch/arm/configs/spear6xx_defconfig b/arch/arm/configs/spear6xx_defconfig
index 124c244d8df1..5b410f0a365b 100644
--- a/arch/arm/configs/spear6xx_defconfig
+++ b/arch/arm/configs/spear6xx_defconfig
@@ -14,7 +14,7 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_OF_PARTS=y
CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_FSMC=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=16384
diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig
index 9ea82c118661..f6d2f674517c 100644
--- a/arch/arm/configs/spitz_defconfig
+++ b/arch/arm/configs/spitz_defconfig
@@ -84,7 +84,7 @@ CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_ROM=y
CONFIG_MTD_COMPLEX_MAPPINGS=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_SHARPSL=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_SD=y
diff --git a/arch/arm/configs/stm32_defconfig b/arch/arm/configs/stm32_defconfig
index 0258ba891376..152321d2893e 100644
--- a/arch/arm/configs/stm32_defconfig
+++ b/arch/arm/configs/stm32_defconfig
@@ -13,7 +13,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arm/configs/tango4_defconfig b/arch/arm/configs/tango4_defconfig
index 68725d4eae45..68eb16e583ac 100644
--- a/arch/arm/configs/tango4_defconfig
+++ b/arch/arm/configs/tango4_defconfig
@@ -39,7 +39,7 @@ CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
CONFIG_MTD_TESTS=m
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_TANGO=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_SCSI=y
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index c7b99ebf5fcf..8f5c6a5b444c 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -1,6 +1,7 @@
CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
@@ -14,23 +15,9 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_TEGRA=y
-CONFIG_PCI=y
-CONFIG_PCIEPORTBUS=y
-CONFIG_PCI_MSI=y
-CONFIG_PCI_TEGRA=y
CONFIG_SMP=y
-CONFIG_PREEMPT=y
-CONFIG_AEABI=y
CONFIG_HIGHMEM=y
-CONFIG_CMA=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_KEXEC=y
@@ -40,6 +27,13 @@ CONFIG_CPUFREQ_DT=y
CONFIG_CPU_IDLE=y
CONFIG_VFP=y
CONFIG_NEON=y
+CONFIG_TRUSTED_FOUNDATIONS=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_CMA=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -73,10 +67,12 @@ CONFIG_MAC80211=y
CONFIG_RFKILL=y
CONFIG_RFKILL_INPUT=y
CONFIG_RFKILL_GPIO=y
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_TEGRA=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_DMA_CMA=y
-CONFIG_CMA_SIZE_MBYTES=64
CONFIG_TEGRA_GMI=y
CONFIG_MTD=y
CONFIG_MTD_M25P80=y
@@ -152,7 +148,6 @@ CONFIG_WATCHDOG=y
CONFIG_TEGRA_WATCHDOG=y
CONFIG_MFD_AS3722=y
CONFIG_MFD_CROS_EC=y
-CONFIG_MFD_CROS_EC_SPI=y
CONFIG_MFD_MAX8907=y
CONFIG_MFD_STMPE=y
CONFIG_MFD_PALMAS=y
@@ -180,6 +175,7 @@ CONFIG_DRM_NOUVEAU=m
CONFIG_DRM_TEGRA=y
CONFIG_DRM_PANEL_SIMPLE=y
# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
# CONFIG_BACKLIGHT_GENERIC is not set
CONFIG_BACKLIGHT_PWM=y
CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -288,6 +284,10 @@ CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRC_CCITT=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=64
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
@@ -300,5 +300,3 @@ CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_SG=y
CONFIG_DEBUG_LL=y
CONFIG_EARLY_PRINTK=y
-CONFIG_CRYPTO_TWOFISH=y
-CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/trizeps4_defconfig b/arch/arm/configs/trizeps4_defconfig
index 2b5a224d2da1..ecad22501b48 100644
--- a/arch/arm/configs/trizeps4_defconfig
+++ b/arch/arm/configs/trizeps4_defconfig
@@ -76,7 +76,7 @@ CONFIG_MTD_DOC2001PLUS=y
CONFIG_MTD_DOCPROBE_ADVANCED=y
CONFIG_MTD_DOCPROBE_ADDRESS=0x4000000
CONFIG_MTD_DOCPROBE_HIGH=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_DISKONCHIP=y
CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED=y
CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0x4000000
diff --git a/arch/arm/configs/u300_defconfig b/arch/arm/configs/u300_defconfig
index 36d77406e31b..bedf397c75de 100644
--- a/arch/arm/configs/u300_defconfig
+++ b/arch/arm/configs/u300_defconfig
@@ -9,7 +9,6 @@ CONFIG_EXPERT=y
# CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_CFQ is not set
@@ -27,7 +26,7 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_FSMC=y
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
diff --git a/arch/arm/configs/vexpress_defconfig b/arch/arm/configs/vexpress_defconfig
index 392ed3b3613c..484d77a7f589 100644
--- a/arch/arm/configs/vexpress_defconfig
+++ b/arch/arm/configs/vexpress_defconfig
@@ -14,7 +14,6 @@ CONFIG_PROFILING=y
CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index 07e31941dc67..617c2c99ebfb 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -278,6 +278,8 @@ static int __xts_crypt(struct skcipher_request *req,
int err;
err = skcipher_walk_virt(&walk, req, true);
+ if (err)
+ return err;
crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv);
diff --git a/arch/arm/crypto/chacha-neon-glue.c b/arch/arm/crypto/chacha-neon-glue.c
index 9d6fda81986d..48a89537b828 100644
--- a/arch/arm/crypto/chacha-neon-glue.c
+++ b/arch/arm/crypto/chacha-neon-glue.c
@@ -21,6 +21,7 @@
#include <crypto/algapi.h>
#include <crypto/chacha.h>
+#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -93,7 +94,7 @@ static int chacha_neon(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
+ if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
return crypto_chacha_crypt(req);
return chacha_neon_stream_xor(req, ctx, req->iv);
@@ -107,7 +108,7 @@ static int xchacha_neon(struct skcipher_request *req)
u32 state[16];
u8 real_iv[16];
- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
+ if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
return crypto_xchacha_crypt(req);
crypto_chacha_init(state, ctx, req->iv);
diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c
index cd9e93b46c2d..e712c2a7d387 100644
--- a/arch/arm/crypto/crc32-ce-glue.c
+++ b/arch/arm/crypto/crc32-ce-glue.c
@@ -16,6 +16,7 @@
#include <linux/string.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <asm/hwcap.h>
#include <asm/neon.h>
@@ -113,7 +114,7 @@ static int crc32_pmull_update(struct shash_desc *desc, const u8 *data,
u32 *crc = shash_desc_ctx(desc);
unsigned int l;
- if (may_use_simd()) {
+ if (crypto_simd_usable()) {
if ((u32)data % SCALE_F) {
l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F));
@@ -147,7 +148,7 @@ static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data,
u32 *crc = shash_desc_ctx(desc);
unsigned int l;
- if (may_use_simd()) {
+ if (crypto_simd_usable()) {
if ((u32)data % SCALE_F) {
l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F));
diff --git a/arch/arm/crypto/crct10dif-ce-glue.c b/arch/arm/crypto/crct10dif-ce-glue.c
index 3d6b800b8396..3b24f2872592 100644
--- a/arch/arm/crypto/crct10dif-ce-glue.c
+++ b/arch/arm/crypto/crct10dif-ce-glue.c
@@ -15,6 +15,7 @@
#include <linux/string.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <asm/neon.h>
#include <asm/simd.h>
@@ -36,7 +37,7 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data,
{
u16 *crc = shash_desc_ctx(desc);
- if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
+ if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
kernel_neon_begin();
*crc = crc_t10dif_pmull(*crc, data, length);
kernel_neon_end();
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index b7d30b6cf49c..39d1ccec1aab 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -14,6 +14,7 @@
#include <asm/unaligned.h>
#include <crypto/cryptd.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/gf128mul.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
@@ -185,7 +186,6 @@ static int ghash_async_init(struct ahash_request *req)
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
desc->tfm = child;
- desc->flags = req->base.flags;
return crypto_shash_init(desc);
}
@@ -196,7 +196,7 @@ static int ghash_async_update(struct ahash_request *req)
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
- if (!may_use_simd() ||
+ if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -214,7 +214,7 @@ static int ghash_async_final(struct ahash_request *req)
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
- if (!may_use_simd() ||
+ if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -232,7 +232,7 @@ static int ghash_async_digest(struct ahash_request *req)
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
- if (!may_use_simd() ||
+ if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -242,7 +242,6 @@ static int ghash_async_digest(struct ahash_request *req)
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
desc->tfm = child;
- desc->flags = req->base.flags;
return shash_ahash_digest(req, desc);
}
}
@@ -255,7 +254,6 @@ static int ghash_async_import(struct ahash_request *req, const void *in)
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
desc->tfm = cryptd_ahash_child(ctx->cryptd_tfm);
- desc->flags = req->base.flags;
return crypto_shash_import(desc, in);
}
diff --git a/arch/arm/crypto/nhpoly1305-neon-glue.c b/arch/arm/crypto/nhpoly1305-neon-glue.c
index 49aae87cb2bc..ae5aefc44a4d 100644
--- a/arch/arm/crypto/nhpoly1305-neon-glue.c
+++ b/arch/arm/crypto/nhpoly1305-neon-glue.c
@@ -9,6 +9,7 @@
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/nhpoly1305.h>
#include <linux/module.h>
@@ -25,7 +26,7 @@ static void _nh_neon(const u32 *key, const u8 *message, size_t message_len,
static int nhpoly1305_neon_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
- if (srclen < 64 || !may_use_simd())
+ if (srclen < 64 || !crypto_simd_usable())
return crypto_nhpoly1305_update(desc, src, srclen);
do {
diff --git a/arch/arm/crypto/sha1-ce-glue.c b/arch/arm/crypto/sha1-ce-glue.c
index b732522e20f8..4c6c6900853c 100644
--- a/arch/arm/crypto/sha1-ce-glue.c
+++ b/arch/arm/crypto/sha1-ce-glue.c
@@ -9,6 +9,7 @@
*/
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha1_base.h>
#include <linux/cpufeature.h>
@@ -33,7 +34,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
{
struct sha1_state *sctx = shash_desc_ctx(desc);
- if (!may_use_simd() ||
+ if (!crypto_simd_usable() ||
(sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
return sha1_update_arm(desc, data, len);
@@ -47,7 +48,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return sha1_finup_arm(desc, data, len, out);
kernel_neon_begin();
diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
index d15e0ea2c95e..d6c95c213d42 100644
--- a/arch/arm/crypto/sha1_neon_glue.c
+++ b/arch/arm/crypto/sha1_neon_glue.c
@@ -19,6 +19,7 @@
*/
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
@@ -39,7 +40,7 @@ static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
{
struct sha1_state *sctx = shash_desc_ctx(desc);
- if (!may_use_simd() ||
+ if (!crypto_simd_usable() ||
(sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
return sha1_update_arm(desc, data, len);
@@ -54,7 +55,7 @@ static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
static int sha1_neon_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return sha1_finup_arm(desc, data, len, out);
kernel_neon_begin();
diff --git a/arch/arm/crypto/sha2-ce-glue.c b/arch/arm/crypto/sha2-ce-glue.c
index 1211a5c129fc..a47a9d4b663e 100644
--- a/arch/arm/crypto/sha2-ce-glue.c
+++ b/arch/arm/crypto/sha2-ce-glue.c
@@ -9,6 +9,7 @@
*/
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha256_base.h>
#include <linux/cpufeature.h>
@@ -34,7 +35,7 @@ static int sha2_ce_update(struct shash_desc *desc, const u8 *data,
{
struct sha256_state *sctx = shash_desc_ctx(desc);
- if (!may_use_simd() ||
+ if (!crypto_simd_usable() ||
(sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
return crypto_sha256_arm_update(desc, data, len);
@@ -49,7 +50,7 @@ static int sha2_ce_update(struct shash_desc *desc, const u8 *data,
static int sha2_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return crypto_sha256_arm_finup(desc, data, len, out);
kernel_neon_begin();
diff --git a/arch/arm/crypto/sha256_neon_glue.c b/arch/arm/crypto/sha256_neon_glue.c
index 1d82c6cd31a4..f3f6b1624fc3 100644
--- a/arch/arm/crypto/sha256_neon_glue.c
+++ b/arch/arm/crypto/sha256_neon_glue.c
@@ -15,6 +15,7 @@
*/
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <linux/string.h>
@@ -34,7 +35,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
{
struct sha256_state *sctx = shash_desc_ctx(desc);
- if (!may_use_simd() ||
+ if (!crypto_simd_usable() ||
(sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
return crypto_sha256_arm_update(desc, data, len);
@@ -49,7 +50,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
static int sha256_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return crypto_sha256_arm_finup(desc, data, len, out);
kernel_neon_begin();
diff --git a/arch/arm/crypto/sha512-neon-glue.c b/arch/arm/crypto/sha512-neon-glue.c
index 8a5642b41fd6..d33ab59c26c0 100644
--- a/arch/arm/crypto/sha512-neon-glue.c
+++ b/arch/arm/crypto/sha512-neon-glue.c
@@ -9,6 +9,7 @@
*/
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha512_base.h>
#include <linux/crypto.h>
@@ -30,7 +31,7 @@ static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
{
struct sha512_state *sctx = shash_desc_ctx(desc);
- if (!may_use_simd() ||
+ if (!crypto_simd_usable() ||
(sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
return sha512_arm_update(desc, data, len);
@@ -45,7 +46,7 @@ static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
static int sha512_neon_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return sha512_arm_finup(desc, data, len, out);
kernel_neon_begin();
diff --git a/arch/arm/firmware/Kconfig b/arch/arm/firmware/Kconfig
deleted file mode 100644
index ad396af68e47..000000000000
--- a/arch/arm/firmware/Kconfig
+++ /dev/null
@@ -1,29 +0,0 @@
-config ARCH_SUPPORTS_FIRMWARE
- bool
-
-config ARCH_SUPPORTS_TRUSTED_FOUNDATIONS
- bool
- select ARCH_SUPPORTS_FIRMWARE
-
-menu "Firmware options"
- depends on ARCH_SUPPORTS_FIRMWARE
-
-config TRUSTED_FOUNDATIONS
- bool "Trusted Foundations secure monitor support"
- depends on ARCH_SUPPORTS_TRUSTED_FOUNDATIONS
- default y
- help
- Some devices (including most Tegra-based consumer devices on the
- market) are booted with the Trusted Foundations secure monitor
- active, requiring some core operations to be performed by the secure
- monitor instead of the kernel.
-
- This option allows the kernel to invoke the secure monitor whenever
- required on devices using Trusted Foundations. See
- arch/arm/include/asm/trusted_foundations.h or the
- tlm,trusted-foundations device tree binding documentation for details
- on how to use it.
-
- Say n if you don't know what this is about.
-
-endmenu
diff --git a/arch/arm/firmware/Makefile b/arch/arm/firmware/Makefile
deleted file mode 100644
index 6e41336b0bc4..000000000000
--- a/arch/arm/firmware/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
-
-# tf_generic_smc() fails to build with -fsanitize-coverage=trace-pc
-KCOV_INSTRUMENT := n
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index a8a4eb7f6dae..60de9d13181a 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -9,15 +9,13 @@ generic-y += kdebug.h
generic-y += local.h
generic-y += local64.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += msi.h
generic-y += parport.h
generic-y += preempt.h
-generic-y += rwsem.h
generic-y += seccomp.h
-generic-y += segment.h
generic-y += serial.h
generic-y += simd.h
-generic-y += sizes.h
generic-y += trace_clock.h
generated-y += mach-types.h
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index 0a8d7bba2cb0..4b66ecd6be99 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -11,6 +11,10 @@
#include <clocksource/arm_arch_timer.h>
#ifdef CONFIG_ARM_ARCH_TIMER
+/* 32bit ARM doesn't know anything about timer errata... */
+#define has_erratum_handler(h) (false)
+#define erratum_handler(h) (arch_timer_##h)
+
int arch_timer_arch_init(void);
/*
@@ -79,7 +83,7 @@ static inline u32 arch_timer_get_cntfrq(void)
return val;
}
-static inline u64 arch_counter_get_cntpct(void)
+static inline u64 __arch_counter_get_cntpct(void)
{
u64 cval;
@@ -88,7 +92,12 @@ static inline u64 arch_counter_get_cntpct(void)
return cval;
}
-static inline u64 arch_counter_get_cntvct(void)
+static inline u64 __arch_counter_get_cntpct_stable(void)
+{
+ return __arch_counter_get_cntpct();
+}
+
+static inline u64 __arch_counter_get_cntvct(void)
{
u64 cval;
@@ -97,6 +106,11 @@ static inline u64 arch_counter_get_cntvct(void)
return cval;
}
+static inline u64 __arch_counter_get_cntvct_stable(void)
+{
+ return __arch_counter_get_cntvct();
+}
+
static inline u32 arch_timer_get_cntkctl(void)
{
u32 cntkctl;
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
index 07e27f212dc7..d2453e2d3f1f 100644
--- a/arch/arm/include/asm/cp15.h
+++ b/arch/arm/include/asm/cp15.h
@@ -68,6 +68,8 @@
#define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
+#define CNTVCT __ACCESS_CP15_64(1, c14)
+
extern unsigned long cr_alignment; /* defined in entry-armv.S */
static inline unsigned long get_cr(void)
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 99d9f630d6b6..1888c2d15da5 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -133,9 +133,11 @@ static inline void modify_domain(unsigned dom, unsigned type) { }
* instructions (inline assembly)
*/
#ifdef CONFIG_CPU_USE_DOMAINS
-#define TUSER(instr) #instr "t"
+#define TUSER(instr) TUSERCOND(instr, )
+#define TUSERCOND(instr, cond) #instr "t" #cond
#else
-#define TUSER(instr) #instr
+#define TUSER(instr) TUSERCOND(instr, )
+#define TUSERCOND(instr, cond) #instr #cond
#endif
#else /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/firmware.h b/arch/arm/include/asm/firmware.h
index 34c1d96ef46d..6698272bbcbf 100644
--- a/arch/arm/include/asm/firmware.h
+++ b/arch/arm/include/asm/firmware.h
@@ -24,7 +24,7 @@ struct firmware_ops {
/*
* Inform the firmware we intend to enter CPU idle mode
*/
- int (*prepare_idle)(void);
+ int (*prepare_idle)(unsigned long mode);
/*
* Enters CPU idle mode
*/
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 0a46676b4245..83c391b597d4 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -110,10 +110,11 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
preempt_disable();
__ua_flags = uaccess_save_and_enable();
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+ " .syntax unified\n"
"1: " TUSER(ldr) " %1, [%4]\n"
" teq %1, %2\n"
" it eq @ explicit IT needed for the 2b label\n"
- "2: " TUSER(streq) " %3, [%4]\n"
+ "2: " TUSERCOND(str, eq) " %3, [%4]\n"
__futex_atomic_ex_table("%5")
: "+r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index cba23eaa6072..7a88f160b1fb 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -6,6 +6,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
+/* number of IPIS _not_ including IPI_CPU_BACKTRACE */
#define NR_IPI 7
typedef struct {
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 6b51826ab3d1..7e22c81398c4 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -281,8 +281,6 @@ extern void _memcpy_fromio(void *, const volatile void __iomem *, size_t);
extern void _memcpy_toio(volatile void __iomem *, const void *, size_t);
extern void _memset_io(volatile void __iomem *, int, size_t);
-#define mmiowb()
-
/*
* Memory access primitives
* ------------------------
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 8927cae7c966..efb0e2c0d84c 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -343,4 +343,6 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
}
}
+static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) {}
+
#endif /* __ARM_KVM_EMULATE_H__ */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 770d73257ad9..075e1921fdd9 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -19,6 +19,7 @@
#ifndef __ARM_KVM_HOST_H__
#define __ARM_KVM_HOST_H__
+#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <asm/cputype.h>
@@ -53,6 +54,8 @@
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
+static inline int kvm_arm_init_sve(void) { return 0; }
+
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
@@ -150,9 +153,13 @@ struct kvm_cpu_context {
u32 cp15[NR_CP15_REGS];
};
-typedef struct kvm_cpu_context kvm_cpu_context_t;
+struct kvm_host_data {
+ struct kvm_cpu_context host_ctxt;
+};
+
+typedef struct kvm_host_data kvm_host_data_t;
-static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt,
+static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt,
int cpu)
{
/* The host's MPIDR is immutable, so let's set it up at boot time */
@@ -182,7 +189,7 @@ struct kvm_vcpu_arch {
struct kvm_vcpu_fault_info fault;
/* Host FP context */
- kvm_cpu_context_t *host_cpu_context;
+ struct kvm_cpu_context *host_cpu_context;
/* VGIC state */
struct vgic_cpu vgic_cpu;
@@ -361,6 +368,9 @@ static inline void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {}
+static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
+static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
+
static inline void kvm_arm_vhe_guest_enter(void) {}
static inline void kvm_arm_vhe_guest_exit(void) {}
@@ -409,4 +419,14 @@ static inline int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
return 0;
}
+static inline int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
+{
+ return -EINVAL;
+}
+
+static inline bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
+{
+ return true;
+}
+
#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/limits.h b/arch/arm/include/asm/limits.h
deleted file mode 100644
index ab159371d786..000000000000
--- a/arch/arm/include/asm/limits.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_PIPE_H
-#define __ASM_PIPE_H
-
-#ifndef PAGE_SIZE
-#include <asm/page.h>
-#endif
-
-#define PIPE_BUF PAGE_SIZE
-
-#endif
-
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 57fe73ea0f72..5d06f75ffad4 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -135,8 +135,8 @@ static inline void prefetchw(const void *ptr)
__asm__ __volatile__(
".arch_extension mp\n"
__ALT_SMP_ASM(
- WASM(pldw) "\t%a0",
- WASM(pld) "\t%a0"
+ "pldw\t%a0",
+ "pld\t%a0"
)
:: "p" (ptr));
}
diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h
index 9e11dce55e06..9587517649bd 100644
--- a/arch/arm/include/asm/stage2_pgtable.h
+++ b/arch/arm/include/asm/stage2_pgtable.h
@@ -32,14 +32,14 @@
#define stage2_pgd_present(kvm, pgd) pgd_present(pgd)
#define stage2_pgd_populate(kvm, pgd, pud) pgd_populate(NULL, pgd, pud)
#define stage2_pud_offset(kvm, pgd, address) pud_offset(pgd, address)
-#define stage2_pud_free(kvm, pud) pud_free(NULL, pud)
+#define stage2_pud_free(kvm, pud) do { } while (0)
#define stage2_pud_none(kvm, pud) pud_none(pud)
#define stage2_pud_clear(kvm, pud) pud_clear(pud)
#define stage2_pud_present(kvm, pud) pud_present(pud)
#define stage2_pud_populate(kvm, pud, pmd) pud_populate(NULL, pud, pmd)
#define stage2_pmd_offset(kvm, pud, address) pmd_offset(pud, address)
-#define stage2_pmd_free(kvm, pmd) pmd_free(NULL, pmd)
+#define stage2_pmd_free(kvm, pmd) free_page((unsigned long)pmd)
#define stage2_pud_huge(kvm, pud) pud_huge(pud)
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
index 080ce70cab12..fd02761ba06c 100644
--- a/arch/arm/include/asm/syscall.h
+++ b/arch/arm/include/asm/syscall.h
@@ -73,7 +73,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
memcpy(&regs->ARM_r0 + 1, args, 5 * sizeof(args[0]));
}
-static inline int syscall_get_arch(void)
+static inline int syscall_get_arch(struct task_struct *task)
{
/* ARM tasks don't change audit architectures on the fly. */
return AUDIT_ARCH_ARM;
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index f854148c8d7c..bc6d04a09899 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -33,271 +33,42 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
-#define MMU_GATHER_BUNDLE 8
-
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
static inline void __tlb_remove_table(void *_table)
{
free_page_and_swap_cache((struct page *)_table);
}
-struct mmu_table_batch {
- struct rcu_head rcu;
- unsigned int nr;
- void *tables[0];
-};
-
-#define MAX_TABLE_BATCH \
- ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
-
-extern void tlb_table_flush(struct mmu_gather *tlb);
-extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-
-#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry)
-#else
-#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry)
-#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
-
-/*
- * TLB handling. This allows us to remove pages from the page
- * tables, and efficiently handle the TLB issues.
- */
-struct mmu_gather {
- struct mm_struct *mm;
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
- struct mmu_table_batch *batch;
- unsigned int need_flush;
-#endif
- unsigned int fullmm;
- struct vm_area_struct *vma;
- unsigned long start, end;
- unsigned long range_start;
- unsigned long range_end;
- unsigned int nr;
- unsigned int max;
- struct page **pages;
- struct page *local[MMU_GATHER_BUNDLE];
-};
-
-DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
-
-/*
- * This is unnecessarily complex. There's three ways the TLB shootdown
- * code is used:
- * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
- * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
- * tlb->vma will be non-NULL.
- * 2. Unmapping all vmas. See exit_mmap().
- * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
- * tlb->vma will be non-NULL. Additionally, page tables will be freed.
- * 3. Unmapping argument pages. See shift_arg_pages().
- * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
- * tlb->vma will be NULL.
- */
-static inline void tlb_flush(struct mmu_gather *tlb)
-{
- if (tlb->fullmm || !tlb->vma)
- flush_tlb_mm(tlb->mm);
- else if (tlb->range_end > 0) {
- flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
- tlb->range_start = TASK_SIZE;
- tlb->range_end = 0;
- }
-}
-
-static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
-{
- if (!tlb->fullmm) {
- if (addr < tlb->range_start)
- tlb->range_start = addr;
- if (addr + PAGE_SIZE > tlb->range_end)
- tlb->range_end = addr + PAGE_SIZE;
- }
-}
-
-static inline void __tlb_alloc_page(struct mmu_gather *tlb)
-{
- unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
-
- if (addr) {
- tlb->pages = (void *)addr;
- tlb->max = PAGE_SIZE / sizeof(struct page *);
- }
-}
-
-static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
-{
- tlb_flush(tlb);
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
- tlb_table_flush(tlb);
-#endif
-}
-
-static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
-{
- free_pages_and_swap_cache(tlb->pages, tlb->nr);
- tlb->nr = 0;
- if (tlb->pages == tlb->local)
- __tlb_alloc_page(tlb);
-}
-
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
-{
- tlb_flush_mmu_tlbonly(tlb);
- tlb_flush_mmu_free(tlb);
-}
-
-static inline void
-arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- tlb->mm = mm;
- tlb->fullmm = !(start | (end+1));
- tlb->start = start;
- tlb->end = end;
- tlb->vma = NULL;
- tlb->max = ARRAY_SIZE(tlb->local);
- tlb->pages = tlb->local;
- tlb->nr = 0;
- __tlb_alloc_page(tlb);
+#include <asm-generic/tlb.h>
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
- tlb->batch = NULL;
+#ifndef CONFIG_HAVE_RCU_TABLE_FREE
+#define tlb_remove_table(tlb, entry) tlb_remove_page(tlb, entry)
#endif
-}
-
-static inline void
-arch_tlb_finish_mmu(struct mmu_gather *tlb,
- unsigned long start, unsigned long end, bool force)
-{
- if (force) {
- tlb->range_start = start;
- tlb->range_end = end;
- }
-
- tlb_flush_mmu(tlb);
-
- /* keep the page table cache within bounds */
- check_pgt_cache();
-
- if (tlb->pages != tlb->local)
- free_pages((unsigned long)tlb->pages, 0);
-}
-
-/*
- * Memorize the range for the TLB flush.
- */
-static inline void
-tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
-{
- tlb_add_flush(tlb, addr);
-}
-
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
- tlb_remove_tlb_entry(tlb, ptep, address)
-/*
- * In the case of tlb vma handling, we can optimise these away in the
- * case where we're doing a full MM flush. When we're doing a munmap,
- * the vmas are adjusted to only cover the region to be torn down.
- */
-static inline void
-tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
-{
- if (!tlb->fullmm) {
- flush_cache_range(vma, vma->vm_start, vma->vm_end);
- tlb->vma = vma;
- tlb->range_start = TASK_SIZE;
- tlb->range_end = 0;
- }
-}
static inline void
-tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
-{
- if (!tlb->fullmm)
- tlb_flush(tlb);
-}
-
-static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- tlb->pages[tlb->nr++] = page;
- VM_WARN_ON(tlb->nr > tlb->max);
- if (tlb->nr == tlb->max)
- return true;
- return false;
-}
-
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- if (__tlb_remove_page(tlb, page))
- tlb_flush_mmu(tlb);
-}
-
-static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
-{
- return __tlb_remove_page(tlb, page);
-}
-
-static inline void tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
-{
- return tlb_remove_page(tlb, page);
-}
-
-static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
- unsigned long addr)
+__pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
{
pgtable_page_dtor(pte);
-#ifdef CONFIG_ARM_LPAE
- tlb_add_flush(tlb, addr);
-#else
+#ifndef CONFIG_ARM_LPAE
/*
* With the classic ARM MMU, a pte page has two corresponding pmd
* entries, each covering 1MB.
*/
- addr &= PMD_MASK;
- tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
- tlb_add_flush(tlb, addr + SZ_1M);
+ addr = (addr & PMD_MASK) + SZ_1M;
+ __tlb_adjust_range(tlb, addr - PAGE_SIZE, 2 * PAGE_SIZE);
#endif
- tlb_remove_entry(tlb, pte);
-}
-
-static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
- unsigned long addr)
-{
-#ifdef CONFIG_ARM_LPAE
- tlb_add_flush(tlb, addr);
- tlb_remove_entry(tlb, virt_to_page(pmdp));
-#endif
+ tlb_remove_table(tlb, pte);
}
static inline void
-tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
-{
- tlb_add_flush(tlb, addr);
-}
-
-#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
-#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
-#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
-
-#define tlb_migrate_finish(mm) do { } while (0)
-
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
- unsigned int page_size)
+__pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
{
-}
-
-static inline void tlb_flush_remove_tables(struct mm_struct *mm)
-{
-}
+#ifdef CONFIG_ARM_LPAE
+ struct page *page = virt_to_page(pmdp);
-static inline void tlb_flush_remove_tables_local(void *arg)
-{
+ tlb_remove_table(tlb, page);
+#endif
}
#endif /* CONFIG_MMU */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index dff49845eb87..d49ce8f48be3 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -112,10 +112,11 @@ static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
unsigned long tmp;
asm volatile(
+ " .syntax unified\n"
" sub %1, %3, #1\n"
" subs %1, %1, %0\n"
" addhs %1, %1, #1\n"
- " subhss %1, %1, %2\n"
+ " subshs %1, %1, %2\n"
" movlo %0, #0\n"
: "+r" (safe_ptr), "=&r" (tmp)
: "r" (size), "r" (current_thread_info()->addr_limit)
diff --git a/arch/arm/kernel/atags.h b/arch/arm/kernel/atags.h
index 201100226301..067e12edc341 100644
--- a/arch/arm/kernel/atags.h
+++ b/arch/arm/kernel/atags.h
@@ -5,7 +5,7 @@ void convert_to_tag_list(struct tag *tags);
const struct machine_desc *setup_machine_tags(phys_addr_t __atags_pointer,
unsigned int machine_nr);
#else
-static inline const struct machine_desc *
+static inline const struct machine_desc * __init __noreturn
setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
{
early_print("no ATAGS support: can't continue\n");
diff --git a/arch/arm/kernel/dma-isa.c b/arch/arm/kernel/dma-isa.c
index 84363fe7bad2..10c45cc6b957 100644
--- a/arch/arm/kernel/dma-isa.c
+++ b/arch/arm/kernel/dma-isa.c
@@ -55,6 +55,12 @@ static int isa_get_dma_residue(unsigned int chan, dma_t *dma)
return chan < 4 ? count : (count << 1);
}
+static struct device isa_dma_dev = {
+ .init_name = "fallback device",
+ .coherent_dma_mask = ~(dma_addr_t)0,
+ .dma_mask = &isa_dma_dev.coherent_dma_mask,
+};
+
static void isa_enable_dma(unsigned int chan, dma_t *dma)
{
if (dma->invalid) {
@@ -89,7 +95,7 @@ static void isa_enable_dma(unsigned int chan, dma_t *dma)
dma->sg = &dma->buf;
dma->sgcount = 1;
dma->buf.length = dma->count;
- dma->buf.dma_address = dma_map_single(NULL,
+ dma->buf.dma_address = dma_map_single(&isa_dma_dev,
dma->addr, dma->count,
direction);
}
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 76bb8de6bf6b..be5edfdde558 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -549,8 +549,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
int ret;
/*
- * Increment event counter and perform fixup for the pre-signal
- * frame.
+ * Perform fixup for the pre-signal frame.
*/
rseq_signal_deliver(ksig, regs);
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index facd4240ca02..ebc53804d57b 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -70,6 +70,10 @@ enum ipi_msg_type {
IPI_CPU_STOP,
IPI_IRQ_WORK,
IPI_COMPLETION,
+ /*
+ * CPU_BACKTRACE is special and not included in NR_IPI
+ * or tracable with trace_ipi_*
+ */
IPI_CPU_BACKTRACE,
/*
* SGI8-15 can be reserved by secure firmware, and thus may
@@ -754,15 +758,20 @@ static int cpufreq_callback(struct notifier_block *nb,
unsigned long val, void *data)
{
struct cpufreq_freqs *freq = data;
- int cpu = freq->cpu;
+ struct cpumask *cpus = freq->policy->cpus;
+ int cpu, first = cpumask_first(cpus);
+ unsigned int lpj;
if (freq->flags & CPUFREQ_CONST_LOOPS)
return NOTIFY_OK;
- if (!per_cpu(l_p_j_ref, cpu)) {
- per_cpu(l_p_j_ref, cpu) =
- per_cpu(cpu_data, cpu).loops_per_jiffy;
- per_cpu(l_p_j_ref_freq, cpu) = freq->old;
+ if (!per_cpu(l_p_j_ref, first)) {
+ for_each_cpu(cpu, cpus) {
+ per_cpu(l_p_j_ref, cpu) =
+ per_cpu(cpu_data, cpu).loops_per_jiffy;
+ per_cpu(l_p_j_ref_freq, cpu) = freq->old;
+ }
+
if (!global_l_p_j_ref) {
global_l_p_j_ref = loops_per_jiffy;
global_l_p_j_ref_freq = freq->old;
@@ -774,10 +783,11 @@ static int cpufreq_callback(struct notifier_block *nb,
loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
global_l_p_j_ref_freq,
freq->new);
- per_cpu(cpu_data, cpu).loops_per_jiffy =
- cpufreq_scale(per_cpu(l_p_j_ref, cpu),
- per_cpu(l_p_j_ref_freq, cpu),
- freq->new);
+
+ lpj = cpufreq_scale(per_cpu(l_p_j_ref, first),
+ per_cpu(l_p_j_ref_freq, first), freq->new);
+ for_each_cpu(cpu, cpus)
+ per_cpu(cpu_data, cpu).loops_per_jiffy = lpj;
}
return NOTIFY_OK;
}
@@ -797,7 +807,7 @@ core_initcall(register_cpufreq_notifier);
static void raise_nmi(cpumask_t *mask)
{
- smp_cross_call(mask, IPI_CPU_BACKTRACE);
+ __smp_cross_call(mask, IPI_CPU_BACKTRACE);
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index a56e7c856ab5..86870f40f9a0 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -115,8 +115,6 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
* running on another CPU? For now, ignore it as we
* can't guarantee we won't explode.
*/
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
return;
#else
frame.fp = thread_saved_fp(tsk);
@@ -134,8 +132,6 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
}
walk_stackframe(&frame, save_trace, &data);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
}
void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
@@ -153,8 +149,6 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
frame.pc = regs->ARM_pc;
walk_stackframe(&frame, save_trace, &data);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
}
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 3f5320f46de2..f591026347a5 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -22,7 +22,6 @@ config KVM
bool "Kernel-based Virtual Machine (KVM) support"
depends on MMU && OF
select PREEMPT_NOTIFIERS
- select ANON_INODES
select ARM_GIC
select ARM_GIC_V3
select ARM_GIC_V3_ITS
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 903f23c309df..a2220e522f62 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -21,7 +21,6 @@ config SOC_SAMA5D2
depends on ARCH_MULTI_V7
select SOC_SAMA5
select CACHE_L2X0
- select HAVE_FB_ATMEL
select HAVE_AT91_UTMI
select HAVE_AT91_USB_CLK
select HAVE_AT91_H32MX
@@ -36,7 +35,6 @@ config SOC_SAMA5D3
bool "SAMA5D3 family"
depends on ARCH_MULTI_V7
select SOC_SAMA5
- select HAVE_FB_ATMEL
select HAVE_AT91_UTMI
select HAVE_AT91_SMD
select HAVE_AT91_USB_CLK
@@ -50,7 +48,6 @@ config SOC_SAMA5D4
depends on ARCH_MULTI_V7
select SOC_SAMA5
select CACHE_L2X0
- select HAVE_FB_ATMEL
select HAVE_AT91_UTMI
select HAVE_AT91_SMD
select HAVE_AT91_USB_CLK
@@ -107,6 +104,29 @@ config SOC_AT91SAM9
AT91SAM9X35
AT91SAM9XE
+comment "Clocksource driver selection"
+
+config ATMEL_CLOCKSOURCE_PIT
+ bool "Periodic Interval Timer (PIT) support"
+ depends on SOC_AT91SAM9 || SOC_SAMA5
+ default SOC_AT91SAM9 || SOC_SAMA5
+ select ATMEL_PIT
+ help
+ Select this to get a clocksource based on the Atmel Periodic Interval
+ Timer. It has a relatively low resolution and the TC Block clocksource
+ should be preferred.
+
+config ATMEL_CLOCKSOURCE_TCB
+ bool "Timer Counter Blocks (TCB) support"
+ default SOC_AT91RM9200 || SOC_AT91SAM9 || SOC_SAMA5
+ select ATMEL_TCB_CLKSRC
+ help
+ Select this to get a high precision clocksource based on a
+ TC block with a 5+ MHz base clock rate.
+ On platforms with 16-bit counters, two timer channels are combined
+ to make a single 32-bit timer.
+ It can also be used as a clock event device supporting oneshot mode.
+
config HAVE_AT91_UTMI
bool
diff --git a/arch/arm/mach-at91/at91sam9.c b/arch/arm/mach-at91/at91sam9.c
index 3dbdef4d3cbf..c12563b09656 100644
--- a/arch/arm/mach-at91/at91sam9.c
+++ b/arch/arm/mach-at91/at91sam9.c
@@ -32,3 +32,21 @@ DT_MACHINE_START(at91sam_dt, "Atmel AT91SAM9")
.init_machine = at91sam9_init,
.dt_compat = at91_dt_board_compat,
MACHINE_END
+
+static void __init sam9x60_init(void)
+{
+ of_platform_default_populate(NULL, NULL, NULL);
+
+ sam9x60_pm_init();
+}
+
+static const char *const sam9x60_dt_board_compat[] __initconst = {
+ "microchip,sam9x60",
+ NULL
+};
+
+DT_MACHINE_START(sam9x60_dt, "Microchip SAM9X60")
+ /* Maintainer: Microchip */
+ .init_machine = sam9x60_init,
+ .dt_compat = sam9x60_dt_board_compat,
+MACHINE_END
diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h
index e2bd17237964..72b45accfa0f 100644
--- a/arch/arm/mach-at91/generic.h
+++ b/arch/arm/mach-at91/generic.h
@@ -14,11 +14,13 @@
#ifdef CONFIG_PM
extern void __init at91rm9200_pm_init(void);
extern void __init at91sam9_pm_init(void);
+extern void __init sam9x60_pm_init(void);
extern void __init sama5_pm_init(void);
extern void __init sama5d2_pm_init(void);
#else
static inline void __init at91rm9200_pm_init(void) { }
static inline void __init at91sam9_pm_init(void) { }
+static inline void __init sam9x60_pm_init(void) { }
static inline void __init sama5_pm_init(void) { }
static inline void __init sama5d2_pm_init(void) { }
#endif
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 2a757dcaa1a5..6c8147536f3d 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -39,6 +39,20 @@ extern void at91_pinctrl_gpio_suspend(void);
extern void at91_pinctrl_gpio_resume(void);
#endif
+struct at91_soc_pm {
+ int (*config_shdwc_ws)(void __iomem *shdwc, u32 *mode, u32 *polarity);
+ int (*config_pmc_ws)(void __iomem *pmc, u32 mode, u32 polarity);
+ const struct of_device_id *ws_ids;
+ struct at91_pm_data data;
+};
+
+static struct at91_soc_pm soc_pm = {
+ .data = {
+ .standby_mode = AT91_PM_STANDBY,
+ .suspend_mode = AT91_PM_ULP0,
+ },
+};
+
static const match_table_t pm_modes __initconst = {
{ AT91_PM_STANDBY, "standby" },
{ AT91_PM_ULP0, "ulp0" },
@@ -47,16 +61,11 @@ static const match_table_t pm_modes __initconst = {
{ -1, NULL },
};
-static struct at91_pm_data pm_data = {
- .standby_mode = AT91_PM_STANDBY,
- .suspend_mode = AT91_PM_ULP0,
-};
-
#define at91_ramc_read(id, field) \
- __raw_readl(pm_data.ramc[id] + field)
+ __raw_readl(soc_pm.data.ramc[id] + field)
#define at91_ramc_write(id, field, value) \
- __raw_writel(value, pm_data.ramc[id] + field)
+ __raw_writel(value, soc_pm.data.ramc[id] + field)
static int at91_pm_valid_state(suspend_state_t state)
{
@@ -91,6 +100,8 @@ static const struct wakeup_source_info ws_info[] = {
{ .pmc_fsmr_bit = AT91_PMC_RTCAL, .shdwc_mr_bit = BIT(17) },
{ .pmc_fsmr_bit = AT91_PMC_USBAL },
{ .pmc_fsmr_bit = AT91_PMC_SDMMC_CD },
+ { .pmc_fsmr_bit = AT91_PMC_RTTAL },
+ { .pmc_fsmr_bit = AT91_PMC_RXLP_MCE },
};
static const struct of_device_id sama5d2_ws_ids[] = {
@@ -105,6 +116,17 @@ static const struct of_device_id sama5d2_ws_ids[] = {
{ /* sentinel */ }
};
+static const struct of_device_id sam9x60_ws_ids[] = {
+ { .compatible = "atmel,at91sam9x5-rtc", .data = &ws_info[1] },
+ { .compatible = "atmel,at91rm9200-ohci", .data = &ws_info[2] },
+ { .compatible = "usb-ohci", .data = &ws_info[2] },
+ { .compatible = "atmel,at91sam9g45-ehci", .data = &ws_info[2] },
+ { .compatible = "usb-ehci", .data = &ws_info[2] },
+ { .compatible = "atmel,at91sam9260-rtt", .data = &ws_info[4] },
+ { .compatible = "cdns,sam9x60-macb", .data = &ws_info[5] },
+ { /* sentinel */ }
+};
+
static int at91_pm_config_ws(unsigned int pm_mode, bool set)
{
const struct wakeup_source_info *wsi;
@@ -116,24 +138,22 @@ static int at91_pm_config_ws(unsigned int pm_mode, bool set)
if (pm_mode != AT91_PM_ULP1)
return 0;
- if (!pm_data.pmc || !pm_data.shdwc)
+ if (!soc_pm.data.pmc || !soc_pm.data.shdwc || !soc_pm.ws_ids)
return -EPERM;
if (!set) {
- writel(mode, pm_data.pmc + AT91_PMC_FSMR);
+ writel(mode, soc_pm.data.pmc + AT91_PMC_FSMR);
return 0;
}
- /* SHDWC.WUIR */
- val = readl(pm_data.shdwc + 0x0c);
- mode |= (val & 0x3ff);
- polarity |= ((val >> 16) & 0x3ff);
+ if (soc_pm.config_shdwc_ws)
+ soc_pm.config_shdwc_ws(soc_pm.data.shdwc, &mode, &polarity);
/* SHDWC.MR */
- val = readl(pm_data.shdwc + 0x04);
+ val = readl(soc_pm.data.shdwc + 0x04);
/* Loop through defined wakeup sources. */
- for_each_matching_node_and_match(np, sama5d2_ws_ids, &match) {
+ for_each_matching_node_and_match(np, soc_pm.ws_ids, &match) {
pdev = of_find_device_by_node(np);
if (!pdev)
continue;
@@ -155,8 +175,8 @@ put_device:
}
if (mode) {
- writel(mode, pm_data.pmc + AT91_PMC_FSMR);
- writel(polarity, pm_data.pmc + AT91_PMC_FSPR);
+ if (soc_pm.config_pmc_ws)
+ soc_pm.config_pmc_ws(soc_pm.data.pmc, mode, polarity);
} else {
pr_err("AT91: PM: no ULP1 wakeup sources found!");
}
@@ -164,6 +184,34 @@ put_device:
return mode ? 0 : -EPERM;
}
+static int at91_sama5d2_config_shdwc_ws(void __iomem *shdwc, u32 *mode,
+ u32 *polarity)
+{
+ u32 val;
+
+ /* SHDWC.WUIR */
+ val = readl(shdwc + 0x0c);
+ *mode |= (val & 0x3ff);
+ *polarity |= ((val >> 16) & 0x3ff);
+
+ return 0;
+}
+
+static int at91_sama5d2_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
+{
+ writel(mode, pmc + AT91_PMC_FSMR);
+ writel(polarity, pmc + AT91_PMC_FSPR);
+
+ return 0;
+}
+
+static int at91_sam9x60_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
+{
+ writel(mode, pmc + AT91_PMC_FSMR);
+
+ return 0;
+}
+
/*
* Called after processes are frozen, but before we shutdown devices.
*/
@@ -171,18 +219,18 @@ static int at91_pm_begin(suspend_state_t state)
{
switch (state) {
case PM_SUSPEND_MEM:
- pm_data.mode = pm_data.suspend_mode;
+ soc_pm.data.mode = soc_pm.data.suspend_mode;
break;
case PM_SUSPEND_STANDBY:
- pm_data.mode = pm_data.standby_mode;
+ soc_pm.data.mode = soc_pm.data.standby_mode;
break;
default:
- pm_data.mode = -1;
+ soc_pm.data.mode = -1;
}
- return at91_pm_config_ws(pm_data.mode, true);
+ return at91_pm_config_ws(soc_pm.data.mode, true);
}
/*
@@ -194,10 +242,10 @@ static int at91_pm_verify_clocks(void)
unsigned long scsr;
int i;
- scsr = readl(pm_data.pmc + AT91_PMC_SCSR);
+ scsr = readl(soc_pm.data.pmc + AT91_PMC_SCSR);
/* USB must not be using PLLB */
- if ((scsr & pm_data.uhp_udp_mask) != 0) {
+ if ((scsr & soc_pm.data.uhp_udp_mask) != 0) {
pr_err("AT91: PM - Suspend-to-RAM with USB still active\n");
return 0;
}
@@ -208,7 +256,7 @@ static int at91_pm_verify_clocks(void)
if ((scsr & (AT91_PMC_PCK0 << i)) == 0)
continue;
- css = readl(pm_data.pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
+ css = readl(soc_pm.data.pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
if (css != AT91_PMC_CSS_SLOW) {
pr_err("AT91: PM - Suspend-to-RAM with PCK%d src %d\n", i, css);
return 0;
@@ -230,7 +278,7 @@ static int at91_pm_verify_clocks(void)
*/
int at91_suspend_entering_slow_clock(void)
{
- return (pm_data.mode >= AT91_PM_ULP0);
+ return (soc_pm.data.mode >= AT91_PM_ULP0);
}
EXPORT_SYMBOL(at91_suspend_entering_slow_clock);
@@ -243,14 +291,14 @@ static int at91_suspend_finish(unsigned long val)
flush_cache_all();
outer_disable();
- at91_suspend_sram_fn(&pm_data);
+ at91_suspend_sram_fn(&soc_pm.data);
return 0;
}
static void at91_pm_suspend(suspend_state_t state)
{
- if (pm_data.mode == AT91_PM_BACKUP) {
+ if (soc_pm.data.mode == AT91_PM_BACKUP) {
pm_bu->suspended = 1;
cpu_suspend(0, at91_suspend_finish);
@@ -289,7 +337,7 @@ static int at91_pm_enter(suspend_state_t state)
/*
* Ensure that clocks are in a valid state.
*/
- if (pm_data.mode >= AT91_PM_ULP0 &&
+ if (soc_pm.data.mode >= AT91_PM_ULP0 &&
!at91_pm_verify_clocks())
goto error;
@@ -318,7 +366,7 @@ error:
*/
static void at91_pm_end(void)
{
- at91_pm_config_ws(pm_data.mode, false);
+ at91_pm_config_ws(soc_pm.data.mode, false);
}
@@ -351,7 +399,7 @@ static void at91rm9200_standby(void)
" str %2, [%1, %3]\n\t"
" mcr p15, 0, %0, c7, c0, 4\n\t"
:
- : "r" (0), "r" (pm_data.ramc[0]),
+ : "r" (0), "r" (soc_pm.data.ramc[0]),
"r" (1), "r" (AT91_MC_SDRAMC_SRR));
}
@@ -374,7 +422,7 @@ static void at91_ddr_standby(void)
at91_ramc_write(0, AT91_DDRSDRC_MDR, mdr);
}
- if (pm_data.ramc[1]) {
+ if (soc_pm.data.ramc[1]) {
saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
@@ -392,14 +440,14 @@ static void at91_ddr_standby(void)
/* self-refresh mode now */
at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
- if (pm_data.ramc[1])
+ if (soc_pm.data.ramc[1])
at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
cpu_do_idle();
at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr0);
at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
- if (pm_data.ramc[1]) {
+ if (soc_pm.data.ramc[1]) {
at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr1);
at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
}
@@ -429,7 +477,7 @@ static void at91sam9_sdram_standby(void)
u32 lpr0, lpr1 = 0;
u32 saved_lpr0, saved_lpr1 = 0;
- if (pm_data.ramc[1]) {
+ if (soc_pm.data.ramc[1]) {
saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
@@ -441,13 +489,13 @@ static void at91sam9_sdram_standby(void)
/* self-refresh mode now */
at91_ramc_write(0, AT91_SDRAMC_LPR, lpr0);
- if (pm_data.ramc[1])
+ if (soc_pm.data.ramc[1])
at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
cpu_do_idle();
at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr0);
- if (pm_data.ramc[1])
+ if (soc_pm.data.ramc[1])
at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
}
@@ -480,14 +528,14 @@ static __init void at91_dt_ramc(void)
const struct ramc_info *ramc;
for_each_matching_node_and_match(np, ramc_ids, &of_id) {
- pm_data.ramc[idx] = of_iomap(np, 0);
- if (!pm_data.ramc[idx])
+ soc_pm.data.ramc[idx] = of_iomap(np, 0);
+ if (!soc_pm.data.ramc[idx])
panic(pr_fmt("unable to map ramc[%d] cpu registers\n"), idx);
ramc = of_id->data;
if (!standby)
standby = ramc->idle;
- pm_data.memctrl = ramc->memctrl;
+ soc_pm.data.memctrl = ramc->memctrl;
idx++;
}
@@ -509,12 +557,17 @@ static void at91rm9200_idle(void)
* Disable the processor clock. The processor will be automatically
* re-enabled by an interrupt or by a reset.
*/
- writel(AT91_PMC_PCK, pm_data.pmc + AT91_PMC_SCDR);
+ writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
+}
+
+static void at91sam9x60_idle(void)
+{
+ cpu_do_idle();
}
static void at91sam9_idle(void)
{
- writel(AT91_PMC_PCK, pm_data.pmc + AT91_PMC_SCDR);
+ writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
cpu_do_idle();
}
@@ -566,8 +619,8 @@ static void __init at91_pm_sram_init(void)
static bool __init at91_is_pm_mode_active(int pm_mode)
{
- return (pm_data.standby_mode == pm_mode ||
- pm_data.suspend_mode == pm_mode);
+ return (soc_pm.data.standby_mode == pm_mode ||
+ soc_pm.data.suspend_mode == pm_mode);
}
static int __init at91_pm_backup_init(void)
@@ -577,6 +630,9 @@ static int __init at91_pm_backup_init(void)
struct platform_device *pdev = NULL;
int ret = -ENODEV;
+ if (!IS_ENABLED(CONFIG_SOC_SAMA5D2))
+ return -EPERM;
+
if (!at91_is_pm_mode_active(AT91_PM_BACKUP))
return 0;
@@ -586,7 +642,7 @@ static int __init at91_pm_backup_init(void)
return ret;
}
- pm_data.sfrbu = of_iomap(np, 0);
+ soc_pm.data.sfrbu = of_iomap(np, 0);
of_node_put(np);
np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam");
@@ -622,8 +678,8 @@ static int __init at91_pm_backup_init(void)
securam_fail:
put_device(&pdev->dev);
securam_fail_no_ref_dev:
- iounmap(pm_data.sfrbu);
- pm_data.sfrbu = NULL;
+ iounmap(soc_pm.data.sfrbu);
+ soc_pm.data.sfrbu = NULL;
return ret;
}
@@ -632,10 +688,10 @@ static void __init at91_pm_use_default_mode(int pm_mode)
if (pm_mode != AT91_PM_ULP1 && pm_mode != AT91_PM_BACKUP)
return;
- if (pm_data.standby_mode == pm_mode)
- pm_data.standby_mode = AT91_PM_ULP0;
- if (pm_data.suspend_mode == pm_mode)
- pm_data.suspend_mode = AT91_PM_ULP0;
+ if (soc_pm.data.standby_mode == pm_mode)
+ soc_pm.data.standby_mode = AT91_PM_ULP0;
+ if (soc_pm.data.suspend_mode == pm_mode)
+ soc_pm.data.suspend_mode = AT91_PM_ULP0;
}
static void __init at91_pm_modes_init(void)
@@ -653,7 +709,7 @@ static void __init at91_pm_modes_init(void)
goto ulp1_default;
}
- pm_data.shdwc = of_iomap(np, 0);
+ soc_pm.data.shdwc = of_iomap(np, 0);
of_node_put(np);
ret = at91_pm_backup_init();
@@ -667,8 +723,8 @@ static void __init at91_pm_modes_init(void)
return;
unmap:
- iounmap(pm_data.shdwc);
- pm_data.shdwc = NULL;
+ iounmap(soc_pm.data.shdwc);
+ soc_pm.data.shdwc = NULL;
ulp1_default:
at91_pm_use_default_mode(AT91_PM_ULP1);
backup_default:
@@ -711,14 +767,14 @@ static void __init at91_pm_init(void (*pm_idle)(void))
platform_device_register(&at91_cpuidle_device);
pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id);
- pm_data.pmc = of_iomap(pmc_np, 0);
- if (!pm_data.pmc) {
+ soc_pm.data.pmc = of_iomap(pmc_np, 0);
+ if (!soc_pm.data.pmc) {
pr_err("AT91: PM not supported, PMC not found\n");
return;
}
pmc = of_id->data;
- pm_data.uhp_udp_mask = pmc->uhp_udp_mask;
+ soc_pm.data.uhp_udp_mask = pmc->uhp_udp_mask;
if (pm_idle)
arm_pm_idle = pm_idle;
@@ -728,8 +784,8 @@ static void __init at91_pm_init(void (*pm_idle)(void))
if (at91_suspend_sram_fn) {
suspend_set_ops(&at91_pm_ops);
pr_info("AT91: PM: standby: %s, suspend: %s\n",
- pm_modes[pm_data.standby_mode].pattern,
- pm_modes[pm_data.suspend_mode].pattern);
+ pm_modes[soc_pm.data.standby_mode].pattern,
+ pm_modes[soc_pm.data.suspend_mode].pattern);
} else {
pr_info("AT91: PM not supported, due to no SRAM allocated\n");
}
@@ -750,6 +806,19 @@ void __init at91rm9200_pm_init(void)
at91_pm_init(at91rm9200_idle);
}
+void __init sam9x60_pm_init(void)
+{
+ if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
+ return;
+
+ at91_pm_modes_init();
+ at91_dt_ramc();
+ at91_pm_init(at91sam9x60_idle);
+
+ soc_pm.ws_ids = sam9x60_ws_ids;
+ soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
+}
+
void __init at91sam9_pm_init(void)
{
if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
@@ -775,6 +844,10 @@ void __init sama5d2_pm_init(void)
at91_pm_modes_init();
sama5_pm_init();
+
+ soc_pm.ws_ids = sama5d2_ws_ids;
+ soc_pm.config_shdwc_ws = at91_sama5d2_config_shdwc_ws;
+ soc_pm.config_pmc_ws = at91_sama5d2_config_pmc_ws;
}
static int __init at91_pm_modes_select(char *str)
@@ -795,8 +868,8 @@ static int __init at91_pm_modes_select(char *str)
if (suspend < 0)
return 0;
- pm_data.standby_mode = standby;
- pm_data.suspend_mode = suspend;
+ soc_pm.data.standby_mode = standby;
+ soc_pm.data.suspend_mode = suspend;
return 0;
}
diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
index bfe1c4d06901..77e29309cc6e 100644
--- a/arch/arm/mach-at91/pm_suspend.S
+++ b/arch/arm/mach-at91/pm_suspend.S
@@ -51,15 +51,6 @@ tmp2 .req r5
.endm
/*
- * Wait until PLLA has locked.
- */
- .macro wait_pllalock
-1: ldr tmp1, [pmc, #AT91_PMC_SR]
- tst tmp1, #AT91_PMC_LOCKA
- beq 1b
- .endm
-
-/*
* Put the processor to enter the idle state
*/
.macro at91_cpu_idle
@@ -178,11 +169,46 @@ ENDPROC(at91_backup_mode)
orr tmp1, tmp1, #AT91_PMC_KEY
str tmp1, [pmc, #AT91_CKGR_MOR]
+ /* Save RC oscillator state */
+ ldr tmp1, [pmc, #AT91_PMC_SR]
+ str tmp1, .saved_osc_status
+ tst tmp1, #AT91_PMC_MOSCRCS
+ bne 1f
+
+ /* Turn off RC oscillator */
+ ldr tmp1, [pmc, #AT91_CKGR_MOR]
+ bic tmp1, tmp1, #AT91_PMC_MOSCRCEN
+ bic tmp1, tmp1, #AT91_PMC_KEY_MASK
+ orr tmp1, tmp1, #AT91_PMC_KEY
+ str tmp1, [pmc, #AT91_CKGR_MOR]
+
+ /* Wait main RC disabled done */
+2: ldr tmp1, [pmc, #AT91_PMC_SR]
+ tst tmp1, #AT91_PMC_MOSCRCS
+ bne 2b
+
/* Wait for interrupt */
- at91_cpu_idle
+1: at91_cpu_idle
- /* Turn on the crystal oscillator */
+ /* Restore RC oscillator state */
+ ldr tmp1, .saved_osc_status
+ tst tmp1, #AT91_PMC_MOSCRCS
+ beq 4f
+
+ /* Turn on RC oscillator */
ldr tmp1, [pmc, #AT91_CKGR_MOR]
+ orr tmp1, tmp1, #AT91_PMC_MOSCRCEN
+ bic tmp1, tmp1, #AT91_PMC_KEY_MASK
+ orr tmp1, tmp1, #AT91_PMC_KEY
+ str tmp1, [pmc, #AT91_CKGR_MOR]
+
+ /* Wait main RC stabilization */
+3: ldr tmp1, [pmc, #AT91_PMC_SR]
+ tst tmp1, #AT91_PMC_MOSCRCS
+ beq 3b
+
+ /* Turn on the crystal oscillator */
+4: ldr tmp1, [pmc, #AT91_CKGR_MOR]
orr tmp1, tmp1, #AT91_PMC_MOSCEN
orr tmp1, tmp1, #AT91_PMC_KEY
str tmp1, [pmc, #AT91_CKGR_MOR]
@@ -197,8 +223,26 @@ ENDPROC(at91_backup_mode)
.macro at91_pm_ulp1_mode
ldr pmc, .pmc_base
- /* Switch the main clock source to 12-MHz RC oscillator */
+ /* Save RC oscillator state and check if it is enabled. */
+ ldr tmp1, [pmc, #AT91_PMC_SR]
+ str tmp1, .saved_osc_status
+ tst tmp1, #AT91_PMC_MOSCRCS
+ bne 2f
+
+ /* Enable RC oscillator */
ldr tmp1, [pmc, #AT91_CKGR_MOR]
+ orr tmp1, tmp1, #AT91_PMC_MOSCRCEN
+ bic tmp1, tmp1, #AT91_PMC_KEY_MASK
+ orr tmp1, tmp1, #AT91_PMC_KEY
+ str tmp1, [pmc, #AT91_CKGR_MOR]
+
+ /* Wait main RC stabilization */
+1: ldr tmp1, [pmc, #AT91_PMC_SR]
+ tst tmp1, #AT91_PMC_MOSCRCS
+ beq 1b
+
+ /* Switch the main clock source to 12-MHz RC oscillator */
+2: ldr tmp1, [pmc, #AT91_CKGR_MOR]
bic tmp1, tmp1, #AT91_PMC_MOSCSEL
bic tmp1, tmp1, #AT91_PMC_KEY_MASK
orr tmp1, tmp1, #AT91_PMC_KEY
@@ -262,6 +306,25 @@ ENDPROC(at91_backup_mode)
str tmp1, [pmc, #AT91_PMC_MCKR]
wait_mckrdy
+
+ /* Restore RC oscillator state */
+ ldr tmp1, .saved_osc_status
+ tst tmp1, #AT91_PMC_MOSCRCS
+ bne 3f
+
+ /* Disable RC oscillator */
+ ldr tmp1, [pmc, #AT91_CKGR_MOR]
+ bic tmp1, tmp1, #AT91_PMC_MOSCRCEN
+ bic tmp1, tmp1, #AT91_PMC_KEY_MASK
+ orr tmp1, tmp1, #AT91_PMC_KEY
+ str tmp1, [pmc, #AT91_CKGR_MOR]
+
+ /* Wait RC oscillator disable done */
+4: ldr tmp1, [pmc, #AT91_PMC_SR]
+ tst tmp1, #AT91_PMC_MOSCRCS
+ bne 4b
+
+3:
.endm
ENTRY(at91_ulp_mode)
@@ -279,14 +342,6 @@ ENTRY(at91_ulp_mode)
wait_mckrdy
- /* Save PLLA setting and disable it */
- ldr tmp1, [pmc, #AT91_CKGR_PLLAR]
- str tmp1, .saved_pllar
-
- mov tmp1, #AT91_PMC_PLLCOUNT
- orr tmp1, tmp1, #(1 << 29) /* bit 29 always set */
- str tmp1, [pmc, #AT91_CKGR_PLLAR]
-
ldr r0, .pm_mode
cmp r0, #AT91_PM_ULP1
beq ulp1_mode
@@ -301,18 +356,6 @@ ulp1_mode:
ulp_exit:
ldr pmc, .pmc_base
- /* Restore PLLA setting */
- ldr tmp1, .saved_pllar
- str tmp1, [pmc, #AT91_CKGR_PLLAR]
-
- tst tmp1, #(AT91_PMC_MUL & 0xff0000)
- bne 3f
- tst tmp1, #(AT91_PMC_MUL & ~0xff0000)
- beq 4f
-3:
- wait_pllalock
-4:
-
/*
* Restore master clock setting
*/
@@ -465,8 +508,6 @@ ENDPROC(at91_sramc_self_refresh)
.word 0
.saved_mckr:
.word 0
-.saved_pllar:
- .word 0
.saved_sam9_lpr:
.word 0
.saved_sam9_lpr1:
@@ -475,6 +516,8 @@ ENDPROC(at91_sramc_self_refresh)
.word 0
.saved_sam9_mdr1:
.word 0
+.saved_osc_status:
+ .word 0
ENTRY(at91_pm_suspend_in_sram_sz)
.word .-at91_pm_suspend_in_sram
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index ff097ecfa451..51a892702e27 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -29,6 +29,7 @@
#include <linux/platform_data/spi-davinci.h>
#include <linux/platform_data/usb-davinci.h>
#include <linux/platform_data/ti-aemif.h>
+#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#include <linux/nvmem-provider.h>
@@ -53,14 +54,50 @@ static const short da830_evm_usb11_pins[] = {
-1
};
-static struct gpiod_lookup_table da830_evm_usb_gpio_lookup = {
+static struct regulator_consumer_supply da830_evm_usb_supplies[] = {
+ REGULATOR_SUPPLY("vbus", NULL),
+};
+
+static struct regulator_init_data da830_evm_usb_vbus_data = {
+ .consumer_supplies = da830_evm_usb_supplies,
+ .num_consumer_supplies = ARRAY_SIZE(da830_evm_usb_supplies),
+};
+
+static struct fixed_voltage_config da830_evm_usb_vbus = {
+ .supply_name = "vbus",
+ .microvolts = 33000000,
+ .init_data = &da830_evm_usb_vbus_data,
+};
+
+static struct platform_device da830_evm_usb_vbus_device = {
+ .name = "reg-fixed-voltage",
+ .id = 0,
+ .dev = {
+ .platform_data = &da830_evm_usb_vbus,
+ },
+};
+
+static struct gpiod_lookup_table da830_evm_usb_oc_gpio_lookup = {
.dev_id = "ohci-da8xx",
.table = {
- GPIO_LOOKUP("davinci_gpio", ON_BD_USB_DRV, "vbus", 0),
GPIO_LOOKUP("davinci_gpio", ON_BD_USB_OVC, "oc", 0),
+ { }
},
};
+static struct gpiod_lookup_table da830_evm_usb_vbus_gpio_lookup = {
+ .dev_id = "reg-fixed-voltage.0",
+ .table = {
+ GPIO_LOOKUP("davinci_gpio", ON_BD_USB_DRV, "vbus", 0),
+ { }
+ },
+};
+
+static struct gpiod_lookup_table *da830_evm_usb_gpio_lookups[] = {
+ &da830_evm_usb_oc_gpio_lookup,
+ &da830_evm_usb_vbus_gpio_lookup,
+};
+
static struct da8xx_ohci_root_hub da830_evm_usb11_pdata = {
/* TPS2065 switch @ 5V */
.potpgt = (3 + 1) / 2, /* 3 ms max */
@@ -75,6 +112,9 @@ static __init void da830_evm_usb_init(void)
pr_warn("%s: USB PHY CLK registration failed: %d\n",
__func__, ret);
+ gpiod_add_lookup_tables(da830_evm_usb_gpio_lookups,
+ ARRAY_SIZE(da830_evm_usb_gpio_lookups));
+
ret = da8xx_register_usb_phy();
if (ret)
pr_warn("%s: USB PHY registration failed: %d\n",
@@ -100,7 +140,11 @@ static __init void da830_evm_usb_init(void)
return;
}
- gpiod_add_lookup_table(&da830_evm_usb_gpio_lookup);
+ ret = platform_device_register(&da830_evm_usb_vbus_device);
+ if (ret) {
+ pr_warn("%s: Unable to register the vbus supply\n", __func__);
+ return;
+ }
ret = da8xx_register_usb11(&da830_evm_usb11_pdata);
if (ret)
@@ -156,6 +200,7 @@ static struct gpiod_lookup_table mmc_gpios_table = {
GPIO_ACTIVE_LOW),
GPIO_LOOKUP("davinci_gpio", DA830_MMCSD_WP_PIN, "wp",
GPIO_ACTIVE_LOW),
+ { }
},
};
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 1fdc9283a8c5..4ee65a8a3b80 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -784,6 +784,7 @@ static struct gpiod_lookup_table mmc_gpios_table = {
GPIO_ACTIVE_LOW),
GPIO_LOOKUP("davinci_gpio", DA850_MMCSD_WP_PIN, "wp",
GPIO_ACTIVE_HIGH),
+ { }
},
};
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index 64d81fc86f14..5113273fda69 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -121,6 +121,7 @@ static struct gpiod_lookup_table i2c_recovery_gpiod_table = {
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
GPIO_LOOKUP("davinci_gpio", DM355_I2C_SCL_PIN, "scl",
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ { }
},
};
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index de15f782816e..9d87d4e440ea 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -663,6 +663,7 @@ static struct gpiod_lookup_table i2c_recovery_gpiod_table = {
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
GPIO_LOOKUP("davinci_gpio", DM644X_I2C_SCL_PIN, "scl",
GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ { }
},
};
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
index 0896af2bed24..db177a6a7e48 100644
--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
@@ -21,6 +21,7 @@
#include <linux/platform_data/mtd-davinci.h>
#include <linux/platform_data/mtd-davinci-aemif.h>
#include <linux/platform_data/ti-aemif.h>
+#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#include <asm/mach-types.h>
@@ -298,14 +299,50 @@ static const short da850_hawk_usb11_pins[] = {
-1
};
-static struct gpiod_lookup_table hawk_usb_gpio_lookup = {
+static struct regulator_consumer_supply hawk_usb_supplies[] = {
+ REGULATOR_SUPPLY("vbus", NULL),
+};
+
+static struct regulator_init_data hawk_usb_vbus_data = {
+ .consumer_supplies = hawk_usb_supplies,
+ .num_consumer_supplies = ARRAY_SIZE(hawk_usb_supplies),
+};
+
+static struct fixed_voltage_config hawk_usb_vbus = {
+ .supply_name = "vbus",
+ .microvolts = 3300000,
+ .init_data = &hawk_usb_vbus_data,
+};
+
+static struct platform_device hawk_usb_vbus_device = {
+ .name = "reg-fixed-voltage",
+ .id = 0,
+ .dev = {
+ .platform_data = &hawk_usb_vbus,
+ },
+};
+
+static struct gpiod_lookup_table hawk_usb_oc_gpio_lookup = {
.dev_id = "ohci-da8xx",
.table = {
- GPIO_LOOKUP("davinci_gpio", DA850_USB1_VBUS_PIN, "vbus", 0),
GPIO_LOOKUP("davinci_gpio", DA850_USB1_OC_PIN, "oc", 0),
+ { }
},
};
+static struct gpiod_lookup_table hawk_usb_vbus_gpio_lookup = {
+ .dev_id = "reg-fixed-voltage.0",
+ .table = {
+ GPIO_LOOKUP("davinci_gpio", DA850_USB1_VBUS_PIN, NULL, 0),
+ { }
+ },
+};
+
+static struct gpiod_lookup_table *hawk_usb_gpio_lookups[] = {
+ &hawk_usb_oc_gpio_lookup,
+ &hawk_usb_vbus_gpio_lookup,
+};
+
static struct da8xx_ohci_root_hub omapl138_hawk_usb11_pdata = {
/* TPS2087 switch @ 5V */
.potpgt = (3 + 1) / 2, /* 3 ms max */
@@ -326,12 +363,19 @@ static __init void omapl138_hawk_usb_init(void)
pr_warn("%s: USB PHY CLK registration failed: %d\n",
__func__, ret);
+ gpiod_add_lookup_tables(hawk_usb_gpio_lookups,
+ ARRAY_SIZE(hawk_usb_gpio_lookups));
+
ret = da8xx_register_usb_phy();
if (ret)
pr_warn("%s: USB PHY registration failed: %d\n",
__func__, ret);
- gpiod_add_lookup_table(&hawk_usb_gpio_lookup);
+ ret = platform_device_register(&hawk_usb_vbus_device);
+ if (ret) {
+ pr_warn("%s: Unable to register the vbus supply\n", __func__);
+ return;
+ }
ret = da8xx_register_usb11(&omapl138_hawk_usb11_pdata);
if (ret)
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c
index 63511f638ce4..e6b8ffd934a1 100644
--- a/arch/arm/mach-davinci/da830.c
+++ b/arch/arm/mach-davinci/da830.c
@@ -12,6 +12,7 @@
#include <linux/clk/davinci.h>
#include <linux/gpio.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/irqchip/irq-davinci-cp-intc.h>
#include <linux/platform_data/gpio-davinci.h>
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index 67ab71ba3ad3..77bc64d6e39b 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -18,6 +18,7 @@
#include <linux/cpufreq.h>
#include <linux/gpio.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/irqchip/irq-davinci-cp-intc.h>
#include <linux/mfd/da8xx-cfgchip.h>
#include <linux/platform_data/clk-da8xx-cfgchip.h>
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index b8dc674e06bc..036139fe0d0f 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -17,6 +17,7 @@
#include <linux/dma-contiguous.h>
#include <linux/dmaengine.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/serial_8250.h>
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 4a482445b9a2..c6073326be2e 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -15,6 +15,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/irqchip/irq-davinci-aintc.h>
#include <linux/platform_data/edma.h>
#include <linux/platform_data/gpio-davinci.h>
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 8e0a77315add..2f9ae6431bf5 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -19,6 +19,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/irqchip/irq-davinci-aintc.h>
#include <linux/platform_data/edma.h>
#include <linux/platform_data/gpio-davinci.h>
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index cecc7ceb8d34..1b9e9a6192ef 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -14,6 +14,7 @@
#include <linux/clkdev.h>
#include <linux/dmaengine.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/irqchip/irq-davinci-aintc.h>
#include <linux/platform_data/edma.h>
#include <linux/platform_data/gpio-davinci.h>
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index f33392f77a03..62ca952fe161 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -15,6 +15,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/irqchip/irq-davinci-aintc.h>
#include <linux/platform_data/edma.h>
#include <linux/platform_data/gpio-davinci.h>
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index 0d420a2bfe3e..d7b826d2695c 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -11,6 +11,7 @@
#include <linux/clk-provider.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_data/dma-mv_xor.h>
diff --git a/arch/arm/mach-ep93xx/adssphere.c b/arch/arm/mach-ep93xx/adssphere.c
index bda6c3a5c923..5d3a3e302012 100644
--- a/arch/arm/mach-ep93xx/adssphere.c
+++ b/arch/arm/mach-ep93xx/adssphere.c
@@ -15,7 +15,7 @@
#include <linux/platform_device.h>
#include <linux/sizes.h>
-#include <mach/hardware.h>
+#include "hardware.h"
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c
index d2eee707d27f..b9f523d9dc8c 100644
--- a/arch/arm/mach-ep93xx/clock.c
+++ b/arch/arm/mach-ep93xx/clock.c
@@ -20,8 +20,9 @@
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/clkdev.h>
+#include <linux/soc/cirrus/ep93xx.h>
-#include <mach/hardware.h>
+#include "hardware.h"
#include <asm/div64.h>
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 706515faee06..cc1382f879af 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -39,11 +39,13 @@
#include <linux/usb/ohci_pdriver.h>
#include <linux/random.h>
-#include <mach/hardware.h>
+#include "hardware.h"
#include <linux/platform_data/video-ep93xx.h>
#include <linux/platform_data/keypad-ep93xx.h>
#include <linux/platform_data/spi-ep93xx.h>
-#include <mach/gpio-ep93xx.h>
+#include <linux/soc/cirrus/ep93xx.h>
+
+#include "gpio-ep93xx.h"
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -123,7 +125,7 @@ void ep93xx_devcfg_set_clear(unsigned int set_bits, unsigned int clear_bits)
/**
* ep93xx_chip_revision() - returns the EP93xx chip revision
*
- * See <mach/platform.h> for more information.
+ * See "platform.h" for more information.
*/
unsigned int ep93xx_chip_revision(void)
{
diff --git a/arch/arm/mach-ep93xx/dma.c b/arch/arm/mach-ep93xx/dma.c
index 88a4c9b089a5..821427107b11 100644
--- a/arch/arm/mach-ep93xx/dma.c
+++ b/arch/arm/mach-ep93xx/dma.c
@@ -26,7 +26,7 @@
#include <linux/platform_device.h>
#include <linux/platform_data/dma-ep93xx.h>
-#include <mach/hardware.h>
+#include "hardware.h"
#include "soc.h"
diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c
index 34e18e9556d9..c8c47122cf1d 100644
--- a/arch/arm/mach-ep93xx/edb93xx.c
+++ b/arch/arm/mach-ep93xx/edb93xx.c
@@ -33,10 +33,10 @@
#include <sound/cs4271.h>
-#include <mach/hardware.h>
+#include "hardware.h"
#include <linux/platform_data/video-ep93xx.h>
#include <linux/platform_data/spi-ep93xx.h>
-#include <mach/gpio-ep93xx.h>
+#include "gpio-ep93xx.h"
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-ep93xx/gesbc9312.c b/arch/arm/mach-ep93xx/gesbc9312.c
index 0cca5b183309..ac48e3476587 100644
--- a/arch/arm/mach-ep93xx/gesbc9312.c
+++ b/arch/arm/mach-ep93xx/gesbc9312.c
@@ -15,7 +15,7 @@
#include <linux/platform_device.h>
#include <linux/sizes.h>
-#include <mach/hardware.h>
+#include "hardware.h"
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-ep93xx/include/mach/gpio-ep93xx.h b/arch/arm/mach-ep93xx/gpio-ep93xx.h
index 242af4a401ea..242af4a401ea 100644
--- a/arch/arm/mach-ep93xx/include/mach/gpio-ep93xx.h
+++ b/arch/arm/mach-ep93xx/gpio-ep93xx.h
diff --git a/arch/arm/mach-ep93xx/include/mach/hardware.h b/arch/arm/mach-ep93xx/hardware.h
index 8938906e780a..e7d850e04782 100644
--- a/arch/arm/mach-ep93xx/include/mach/hardware.h
+++ b/arch/arm/mach-ep93xx/hardware.h
@@ -6,7 +6,7 @@
#ifndef __ASM_ARCH_HARDWARE_H
#define __ASM_ARCH_HARDWARE_H
-#include <mach/platform.h>
+#include "platform.h"
/*
* The EP93xx has two external crystal oscillators. To generate the
diff --git a/arch/arm/mach-ep93xx/micro9.c b/arch/arm/mach-ep93xx/micro9.c
index 373583c29825..c7f64e4ff6c7 100644
--- a/arch/arm/mach-ep93xx/micro9.c
+++ b/arch/arm/mach-ep93xx/micro9.c
@@ -16,7 +16,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
-#include <mach/hardware.h>
+#include "hardware.h"
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-ep93xx/include/mach/platform.h b/arch/arm/mach-ep93xx/platform.h
index 6c41c794bed5..b4045a186239 100644
--- a/arch/arm/mach-ep93xx/include/mach/platform.h
+++ b/arch/arm/mach-ep93xx/platform.h
@@ -5,6 +5,7 @@
#ifndef __ASSEMBLY__
+#include <linux/platform_data/eth-ep93xx.h>
#include <linux/reboot.h>
struct device;
@@ -15,23 +16,9 @@ struct ep93xxfb_mach_info;
struct ep93xx_keypad_platform_data;
struct ep93xx_spi_info;
-struct ep93xx_eth_data
-{
- unsigned char dev_addr[6];
- unsigned char phy_id;
-};
-
void ep93xx_map_io(void);
void ep93xx_init_irq(void);
-#define EP93XX_CHIP_REV_D0 3
-#define EP93XX_CHIP_REV_D1 4
-#define EP93XX_CHIP_REV_E0 5
-#define EP93XX_CHIP_REV_E1 6
-#define EP93XX_CHIP_REV_E2 7
-
-unsigned int ep93xx_chip_revision(void);
-
void ep93xx_register_flash(unsigned int width,
resource_size_t start, resource_size_t size);
@@ -41,19 +28,11 @@ void ep93xx_register_spi(struct ep93xx_spi_info *info,
struct spi_board_info *devices, int num);
void ep93xx_register_fb(struct ep93xxfb_mach_info *data);
void ep93xx_register_pwm(int pwm0, int pwm1);
-int ep93xx_pwm_acquire_gpio(struct platform_device *pdev);
-void ep93xx_pwm_release_gpio(struct platform_device *pdev);
void ep93xx_register_keypad(struct ep93xx_keypad_platform_data *data);
-int ep93xx_keypad_acquire_gpio(struct platform_device *pdev);
-void ep93xx_keypad_release_gpio(struct platform_device *pdev);
void ep93xx_register_i2s(void);
-int ep93xx_i2s_acquire(void);
-void ep93xx_i2s_release(void);
void ep93xx_register_ac97(void);
void ep93xx_register_ide(void);
void ep93xx_register_adc(void);
-int ep93xx_ide_acquire_gpio(struct platform_device *pdev);
-void ep93xx_ide_release_gpio(struct platform_device *pdev);
struct device *ep93xx_init_devices(void);
extern void ep93xx_timer_init(void);
diff --git a/arch/arm/mach-ep93xx/simone.c b/arch/arm/mach-ep93xx/simone.c
index f0f38c0dba52..5a3c32fa7ace 100644
--- a/arch/arm/mach-ep93xx/simone.c
+++ b/arch/arm/mach-ep93xx/simone.c
@@ -27,8 +27,8 @@
#include <linux/gpio.h>
#include <linux/gpio/machine.h>
-#include <mach/hardware.h>
-#include <mach/gpio-ep93xx.h>
+#include "hardware.h"
+#include "gpio-ep93xx.h"
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-ep93xx/snappercl15.c b/arch/arm/mach-ep93xx/snappercl15.c
index cf0cb58b3454..f8f89551dbed 100644
--- a/arch/arm/mach-ep93xx/snappercl15.c
+++ b/arch/arm/mach-ep93xx/snappercl15.c
@@ -25,9 +25,9 @@
#include <linux/mtd/platnand.h>
-#include <mach/hardware.h>
+#include "hardware.h"
#include <linux/platform_data/video-ep93xx.h>
-#include <mach/gpio-ep93xx.h>
+#include "gpio-ep93xx.h"
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index a3a20c83c6b8..e9f369067293 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -24,8 +24,8 @@
#include <linux/platform_data/spi-ep93xx.h>
#include <linux/gpio/machine.h>
-#include <mach/gpio-ep93xx.h>
-#include <mach/hardware.h>
+#include "gpio-ep93xx.h"
+#include "hardware.h"
#include <mach/irqs.h>
#include <asm/mach-types.h>
diff --git a/arch/arm/mach-ep93xx/vision_ep9307.c b/arch/arm/mach-ep93xx/vision_ep9307.c
index f95a644769e4..d44db6d67f35 100644
--- a/arch/arm/mach-ep93xx/vision_ep9307.c
+++ b/arch/arm/mach-ep93xx/vision_ep9307.c
@@ -31,10 +31,10 @@
#include <sound/cs4271.h>
-#include <mach/hardware.h>
+#include "hardware.h"
#include <linux/platform_data/video-ep93xx.h>
#include <linux/platform_data/spi-ep93xx.h>
-#include <mach/gpio-ep93xx.h>
+#include "gpio-ep93xx.h"
#include <asm/mach-types.h>
#include <asm/mach/map.h>
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index b40963cf91c7..1c518b8ee520 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -106,21 +106,15 @@ config SOC_EXYNOS5420
bool "SAMSUNG EXYNOS5420"
default y
depends on ARCH_EXYNOS5
+ select MCPM if SMP
+ select ARM_CCI400_PORT_CTRL
+ select ARM_CPU_SUSPEND
config SOC_EXYNOS5800
bool "SAMSUNG EXYNOS5800"
default y
depends on SOC_EXYNOS5420
-config EXYNOS5420_MCPM
- bool "Exynos5420 Multi-Cluster PM support"
- depends on MCPM && SOC_EXYNOS5420
- select ARM_CCI400_PORT_CTRL
- select ARM_CPU_SUSPEND
- help
- This is needed to provide CPU and cluster power management
- on Exynos5420 implementing big.LITTLE.
-
config EXYNOS_CPU_SUSPEND
bool
select ARM_CPU_SUSPEND
diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
index cd00c82a1add..264dbaa89c3d 100644
--- a/arch/arm/mach-exynos/Makefile
+++ b/arch/arm/mach-exynos/Makefile
@@ -18,5 +18,5 @@ plus_sec := $(call as-instr,.arch_extension sec,+sec)
AFLAGS_exynos-smc.o :=-Wa,-march=armv7-a$(plus_sec)
AFLAGS_sleep.o :=-Wa,-march=armv7-a$(plus_sec)
-obj-$(CONFIG_EXYNOS5420_MCPM) += mcpm-exynos.o
+obj-$(CONFIG_MCPM) += mcpm-exynos.o
CFLAGS_mcpm-exynos.o += -march=armv7-a
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index 1b8699e94098..c93356a8d662 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -91,6 +91,7 @@ extern u32 cp15_save_power;
extern void __iomem *sysram_ns_base_addr;
extern void __iomem *sysram_base_addr;
+extern phys_addr_t sysram_base_phys;
extern void __iomem *pmu_base_addr;
void exynos_sysram_init(void);
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index 865dcc4c3181..9aa483366ebc 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -33,6 +33,7 @@ static struct platform_device exynos_cpuidle = {
};
void __iomem *sysram_base_addr __ro_after_init;
+phys_addr_t sysram_base_phys __ro_after_init;
void __iomem *sysram_ns_base_addr __ro_after_init;
void __init exynos_sysram_init(void)
@@ -43,6 +44,8 @@ void __init exynos_sysram_init(void)
if (!of_device_is_available(node))
continue;
sysram_base_addr = of_iomap(node, 0);
+ sysram_base_phys = of_translate_address(node,
+ of_get_address(node, 0, NULL, NULL));
break;
}
diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c
index d602e3bf3f96..2eaf2dbb8e81 100644
--- a/arch/arm/mach-exynos/firmware.c
+++ b/arch/arm/mach-exynos/firmware.c
@@ -196,6 +196,7 @@ bool __init exynos_secure_firmware_available(void)
return false;
addr = of_get_address(nd, 0, NULL, NULL);
+ of_node_put(nd);
if (!addr) {
pr_err("%s: No address specified.\n", __func__);
return false;
diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
index 72bc035bedbe..9a681b421ae1 100644
--- a/arch/arm/mach-exynos/mcpm-exynos.c
+++ b/arch/arm/mach-exynos/mcpm-exynos.c
@@ -75,14 +75,25 @@ static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
*/
if (cluster &&
cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) {
+ unsigned int timeout = 16;
+
/*
* Before we reset the Little cores, we should wait
* the SPARE2 register is set to 1 because the init
* codes of the iROM will set the register after
* initialization.
*/
- while (!pmu_raw_readl(S5P_PMU_SPARE2))
+ while (timeout && !pmu_raw_readl(S5P_PMU_SPARE2)) {
+ timeout--;
udelay(10);
+ }
+
+ if (timeout == 0) {
+ pr_err("cpu %u cluster %u powerup failed\n",
+ cpu, cluster);
+ exynos_cpu_power_down(cpunr);
+ return -ETIMEDOUT;
+ }
pmu_raw_writel(EXYNOS5420_KFC_CORE_RESET(cpu),
EXYNOS_SWRESET);
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index abcac6164233..0cbbae8bf1f8 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -214,13 +214,20 @@ static inline void __iomem *cpu_boot_reg(int cpu)
*/
void exynos_core_restart(u32 core_id)
{
+ unsigned int timeout = 16;
u32 val;
if (!of_machine_is_compatible("samsung,exynos3250"))
return;
- while (!pmu_raw_readl(S5P_PMU_SPARE2))
+ while (timeout && !pmu_raw_readl(S5P_PMU_SPARE2)) {
+ timeout--;
udelay(10);
+ }
+ if (timeout == 0) {
+ pr_err("cpu core %u restart failed\n", core_id);
+ return;
+ }
udelay(10);
val = pmu_raw_readl(EXYNOS_ARM_CORE_STATUS(core_id));
diff --git a/arch/arm/mach-exynos/smc.h b/arch/arm/mach-exynos/smc.h
index f355185d4239..98832e50852d 100644
--- a/arch/arm/mach-exynos/smc.h
+++ b/arch/arm/mach-exynos/smc.h
@@ -25,6 +25,13 @@
#define SMC_CMD_L2X0INVALL (-24)
#define SMC_CMD_L2X0DEBUG (-25)
+/* For Accessing CP15/SFR (General) */
+#define SMC_CMD_REG (-101)
+
+/* defines for SMC_CMD_REG */
+#define SMC_REG_CLASS_SFR_W (0x1 << 30)
+#define SMC_REG_ID_SFR_W(addr) (SMC_REG_CLASS_SFR_W | ((addr) >> 2))
+
#ifndef __ASSEMBLY__
extern void exynos_smc(u32 cmd, u32 arg1, u32 arg2, u32 arg3);
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index 0850505ac78b..be122af0de8f 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -31,6 +31,7 @@
#include <asm/suspend.h>
#include "common.h"
+#include "smc.h"
#define REG_TABLE_END (-1U)
@@ -62,6 +63,8 @@ struct exynos_pm_state {
int cpu_state;
unsigned int pmu_spare3;
void __iomem *sysram_base;
+ phys_addr_t sysram_phys;
+ bool secure_firmware;
};
static const struct exynos_pm_data *pm_data __ro_after_init;
@@ -265,9 +268,7 @@ static int exynos5420_cpu_suspend(unsigned long arg)
unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- writel_relaxed(0x0, pm_state.sysram_base + EXYNOS5420_CPU_STATE);
-
- if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM)) {
+ if (IS_ENABLED(CONFIG_MCPM)) {
mcpm_set_entry_vector(cpu, cluster, exynos_cpu_resume);
mcpm_cpu_suspend();
}
@@ -341,11 +342,16 @@ static void exynos5420_pm_prepare(void)
*/
pm_state.cpu_state = readl_relaxed(pm_state.sysram_base +
EXYNOS5420_CPU_STATE);
+ writel_relaxed(0x0, pm_state.sysram_base + EXYNOS5420_CPU_STATE);
+ if (pm_state.secure_firmware)
+ exynos_smc(SMC_CMD_REG, SMC_REG_ID_SFR_W(pm_state.sysram_phys +
+ EXYNOS5420_CPU_STATE),
+ 0, 0);
exynos_pm_enter_sleep_mode();
/* ensure at least INFORM0 has the resume address */
- if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM))
+ if (IS_ENABLED(CONFIG_MCPM))
pmu_raw_writel(__pa_symbol(mcpm_entry_point), S5P_INFORM0);
tmp = pmu_raw_readl(EXYNOS_L2_OPTION(0));
@@ -444,8 +450,27 @@ early_wakeup:
static void exynos5420_prepare_pm_resume(void)
{
- if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM))
+ unsigned int mpidr, cluster;
+
+ mpidr = read_cpuid_mpidr();
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+ if (IS_ENABLED(CONFIG_MCPM))
WARN_ON(mcpm_cpu_powered_up());
+
+ if (IS_ENABLED(CONFIG_HW_PERF_EVENTS) && cluster != 0) {
+ /*
+ * When system is resumed on the LITTLE/KFC core (cluster 1),
+ * the DSCR is not properly updated until the power is turned
+ * on also for the cluster 0. Enable it for a while to
+ * propagate the SPNIDEN and SPIDEN signals from Secure JTAG
+ * block and avoid undefined instruction issue on CP14 reset.
+ */
+ pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
+ EXYNOS_COMMON_CONFIGURATION(0));
+ pmu_raw_writel(0,
+ EXYNOS_COMMON_CONFIGURATION(0));
+ }
}
static void exynos5420_pm_resume(void)
@@ -460,6 +485,11 @@ static void exynos5420_pm_resume(void)
/* Restore the sysram cpu state register */
writel_relaxed(pm_state.cpu_state,
pm_state.sysram_base + EXYNOS5420_CPU_STATE);
+ if (pm_state.secure_firmware)
+ exynos_smc(SMC_CMD_REG,
+ SMC_REG_ID_SFR_W(pm_state.sysram_phys +
+ EXYNOS5420_CPU_STATE),
+ EXYNOS_AFTR_MAGIC, 0);
pmu_raw_writel(EXYNOS5420_USE_STANDBY_WFI_ALL,
S5P_CENTRAL_SEQ_OPTION);
@@ -639,8 +669,10 @@ void __init exynos_pm_init(void)
if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) {
pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
+ of_node_put(np);
return;
}
+ of_node_put(np);
pm_data = (const struct exynos_pm_data *) match->data;
@@ -659,8 +691,11 @@ void __init exynos_pm_init(void)
* Applicable as of now only to Exynos542x. If booted under secure
* firmware, the non-secure region of sysram should be used.
*/
- if (exynos_secure_firmware_available())
+ if (exynos_secure_firmware_available()) {
+ pm_state.sysram_phys = sysram_base_phys;
pm_state.sysram_base = sysram_ns_base_addr;
- else
+ pm_state.secure_firmware = true;
+ } else {
pm_state.sysram_base = sysram_base_addr;
+ }
}
diff --git a/arch/arm/mach-imx/devices/platform-fec.c b/arch/arm/mach-imx/devices/platform-fec.c
index b403a4fe2892..605c0af5851d 100644
--- a/arch/arm/mach-imx/devices/platform-fec.c
+++ b/arch/arm/mach-imx/devices/platform-fec.c
@@ -7,7 +7,7 @@
* Free Software Foundation.
*/
#include <linux/dma-mapping.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include "../hardware.h"
#include "devices-common.h"
diff --git a/arch/arm/mach-imx/devices/platform-gpio_keys.c b/arch/arm/mach-imx/devices/platform-gpio_keys.c
index 486282539c76..9f0a132ea1bc 100644
--- a/arch/arm/mach-imx/devices/platform-gpio_keys.c
+++ b/arch/arm/mach-imx/devices/platform-gpio_keys.c
@@ -15,7 +15,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include "../hardware.h"
#include "devices-common.h"
diff --git a/arch/arm/mach-imx/devices/platform-imx2-wdt.c b/arch/arm/mach-imx/devices/platform-imx2-wdt.c
index 8c134c8d7500..0c6d3c05fd6d 100644
--- a/arch/arm/mach-imx/devices/platform-imx2-wdt.c
+++ b/arch/arm/mach-imx/devices/platform-imx2-wdt.c
@@ -6,7 +6,7 @@
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*/
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include "../hardware.h"
#include "devices-common.h"
diff --git a/arch/arm/mach-imx/devices/platform-mxc_nand.c b/arch/arm/mach-imx/devices/platform-mxc_nand.c
index 676df4920c7b..046e0cc826c1 100644
--- a/arch/arm/mach-imx/devices/platform-mxc_nand.c
+++ b/arch/arm/mach-imx/devices/platform-mxc_nand.c
@@ -6,7 +6,7 @@
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*/
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include "../hardware.h"
#include "devices-common.h"
diff --git a/arch/arm/mach-imx/hardware.h b/arch/arm/mach-imx/hardware.h
index 90e10cbd8fd1..b5ca8cebe1d6 100644
--- a/arch/arm/mach-imx/hardware.h
+++ b/arch/arm/mach-imx/hardware.h
@@ -24,7 +24,7 @@
#include <asm/io.h>
#include <soc/imx/revision.h>
#endif
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#define addr_in_module(addr, mod) \
((unsigned long)(addr) - mod ## _BASE_ADDR < mod ## _SIZE)
diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c
index 87f45b926c78..e527532f6931 100644
--- a/arch/arm/mach-imx/pm-imx6.c
+++ b/arch/arm/mach-imx/pm-imx6.c
@@ -354,9 +354,11 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode)
*
* Note that IRQ #32 is GIC SPI #0.
*/
- imx_gpc_hwirq_unmask(0);
+ if (mode != WAIT_CLOCKED)
+ imx_gpc_hwirq_unmask(0);
writel_relaxed(val, ccm_base + CLPCR);
- imx_gpc_hwirq_mask(0);
+ if (mode != WAIT_CLOCKED)
+ imx_gpc_hwirq_mask(0);
return 0;
}
@@ -631,7 +633,7 @@ static void imx6_pm_stby_poweroff(void)
static int imx6_pm_stby_poweroff_probe(void)
{
if (pm_power_off) {
- pr_warn("%s: pm_power_off already claimed %p %pf!\n",
+ pr_warn("%s: pm_power_off already claimed %p %ps!\n",
__func__, pm_power_off, pm_power_off);
return -EBUSY;
}
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
index 8dfad012dfae..6ddbe153910a 100644
--- a/arch/arm/mach-integrator/impd1.c
+++ b/arch/arm/mach-integrator/impd1.c
@@ -27,7 +27,7 @@
#include <linux/irqchip/arm-vic.h>
#include <linux/gpio/machine.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include "lm.h"
#include "impd1.h"
diff --git a/arch/arm/mach-iop13xx/pci.c b/arch/arm/mach-iop13xx/pci.c
index 070d92ae1b6f..8426ab9e2f5a 100644
--- a/arch/arm/mach-iop13xx/pci.c
+++ b/arch/arm/mach-iop13xx/pci.c
@@ -24,7 +24,7 @@
#include <linux/export.h>
#include <asm/irq.h>
#include <mach/hardware.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/signal.h>
#include <asm/mach/pci.h>
#include "pci.h"
diff --git a/arch/arm/mach-iop13xx/tpmi.c b/arch/arm/mach-iop13xx/tpmi.c
index 116feb6b261e..d3d8c78e7d10 100644
--- a/arch/arm/mach-iop13xx/tpmi.c
+++ b/arch/arm/mach-iop13xx/tpmi.c
@@ -23,7 +23,7 @@
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <mach/irqs.h>
/* assumes CONTROLLER_ONLY# is never asserted in the ESSR register */
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index fea008123eb1..83afb80d38a8 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -4,6 +4,20 @@ menu "Intel IXP4xx Implementation Options"
comment "IXP4xx Platforms"
+config MACH_IXP4XX_OF
+ bool
+ prompt "Devce Tree IXP4xx boards"
+ default y
+ select ARM_APPENDED_DTB # Old Redboot bootloaders deployed
+ select I2C
+ select I2C_IOP3XX
+ select PCI
+ select SERIAL_OF_PLATFORM
+ select TIMER_OF
+ select USE_OF
+ help
+ Say 'Y' here to support Device Tree-based IXP4xx platforms.
+
config MACH_NSLU2
bool
prompt "Linksys NSLU2"
@@ -222,19 +236,6 @@ config IXP4XX_INDIRECT_PCI
need to use the indirect method instead. If you don't know
what you need, leave this option unselected.
-config IXP4XX_QMGR
- tristate "IXP4xx Queue Manager support"
- help
- This driver supports IXP4xx built-in hardware queue manager
- and is automatically selected by Ethernet and HSS drivers.
-
-config IXP4XX_NPE
- tristate "IXP4xx Network Processor Engine support"
- select FW_LOADER
- help
- This driver supports IXP4xx built-in network coprocessors
- and is automatically selected by Ethernet and HSS drivers.
-
endmenu
endif
diff --git a/arch/arm/mach-ixp4xx/Makefile b/arch/arm/mach-ixp4xx/Makefile
index f09994500a34..1fa4e6605782 100644
--- a/arch/arm/mach-ixp4xx/Makefile
+++ b/arch/arm/mach-ixp4xx/Makefile
@@ -6,6 +6,9 @@
obj-pci-y :=
obj-pci-n :=
+# Device tree platform
+obj-pci-$(CONFIG_MACH_IXP4XX_OF) += ixp4xx-of.o
+
obj-pci-$(CONFIG_ARCH_IXDP4XX) += ixdp425-pci.o
obj-pci-$(CONFIG_MACH_AVILA) += avila-pci.o
obj-pci-$(CONFIG_MACH_IXDPG425) += ixdpg425-pci.o
@@ -40,5 +43,3 @@ obj-$(CONFIG_MACH_GORAMO_MLR) += goramo_mlr.o
obj-$(CONFIG_MACH_ARCOM_VULCAN) += vulcan-setup.o
obj-$(CONFIG_PCI) += $(obj-pci-$(CONFIG_PCI)) common-pci.o
-obj-$(CONFIG_IXP4XX_QMGR) += ixp4xx_qmgr.o
-obj-$(CONFIG_IXP4XX_NPE) += ixp4xx_npe.o
diff --git a/arch/arm/mach-ixp4xx/avila-pci.c b/arch/arm/mach-ixp4xx/avila-pci.c
index 548c7d43ade6..9c834f0f4231 100644
--- a/arch/arm/mach-ixp4xx/avila-pci.c
+++ b/arch/arm/mach-ixp4xx/avila-pci.c
@@ -27,6 +27,8 @@
#include <mach/hardware.h>
#include <asm/mach-types.h>
+#include "irqs.h"
+
#define AVILA_MAX_DEV 4
#define LOFT_MAX_DEV 6
#define IRQ_LINES 4
diff --git a/arch/arm/mach-ixp4xx/avila-setup.c b/arch/arm/mach-ixp4xx/avila-setup.c
index 44cbbce6bda6..1981b33109cb 100644
--- a/arch/arm/mach-ixp4xx/avila-setup.c
+++ b/arch/arm/mach-ixp4xx/avila-setup.c
@@ -28,6 +28,8 @@
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
+#include "irqs.h"
+
#define AVILA_SDA_PIN 7
#define AVILA_SCL_PIN 6
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c
index 6835b17113e5..a53104bb28f5 100644
--- a/arch/arm/mach-ixp4xx/common-pci.c
+++ b/arch/arm/mach-ixp4xx/common-pci.c
@@ -31,7 +31,7 @@
#include <asm/cputype.h>
#include <asm/irq.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/mach/pci.h>
#include <mach/hardware.h>
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 846e033c56fa..381f452de28d 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -22,41 +22,29 @@
#include <linux/serial_core.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
-#include <linux/time.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
#include <linux/io.h>
#include <linux/export.h>
-#include <linux/gpio/driver.h>
#include <linux/cpu.h>
#include <linux/pci.h>
#include <linux/sched_clock.h>
+#include <linux/irqchip/irq-ixp4xx.h>
+#include <linux/platform_data/timer-ixp4xx.h>
#include <mach/udc.h>
#include <mach/hardware.h>
#include <mach/io.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
#include <asm/page.h>
+#include <asm/exception.h>
#include <asm/irq.h>
#include <asm/system_misc.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
-#define IXP4XX_TIMER_FREQ 66666000
-
-/*
- * The timer register doesn't allow to specify the two least significant bits of
- * the timeout value and assumes them being zero. So make sure IXP4XX_LATCH is
- * the best value with the two least significant bits unset.
- */
-#define IXP4XX_LATCH DIV_ROUND_CLOSEST(IXP4XX_TIMER_FREQ, \
- (IXP4XX_OST_RELOAD_MASK + 1) * HZ) * \
- (IXP4XX_OST_RELOAD_MASK + 1)
+#include "irqs.h"
-static void __init ixp4xx_clocksource_init(void);
-static void __init ixp4xx_clockevent_init(void);
-static struct clock_event_device clockevent_ixp4xx;
+#define IXP4XX_TIMER_FREQ 66666000
/*************************************************************************
* IXP4xx chipset I/O mapping
@@ -77,11 +65,6 @@ static struct map_desc ixp4xx_io_desc[] __initdata = {
.pfn = __phys_to_pfn(IXP4XX_PCI_CFG_BASE_PHYS),
.length = IXP4XX_PCI_CFG_REGION_SIZE,
.type = MT_DEVICE
- }, { /* Queue Manager */
- .virtual = (unsigned long)IXP4XX_QMGR_BASE_VIRT,
- .pfn = __phys_to_pfn(IXP4XX_QMGR_BASE_PHYS),
- .length = IXP4XX_QMGR_REGION_SIZE,
- .type = MT_DEVICE
},
};
@@ -90,258 +73,23 @@ void __init ixp4xx_map_io(void)
iotable_init(ixp4xx_io_desc, ARRAY_SIZE(ixp4xx_io_desc));
}
-/*
- * GPIO-functions
- */
-/*
- * The following converted to the real HW bits the gpio_line_config
- */
-/* GPIO pin types */
-#define IXP4XX_GPIO_OUT 0x1
-#define IXP4XX_GPIO_IN 0x2
-
-/* GPIO signal types */
-#define IXP4XX_GPIO_LOW 0
-#define IXP4XX_GPIO_HIGH 1
-
-/* GPIO Clocks */
-#define IXP4XX_GPIO_CLK_0 14
-#define IXP4XX_GPIO_CLK_1 15
-
-static void gpio_line_config(u8 line, u32 direction)
-{
- if (direction == IXP4XX_GPIO_IN)
- *IXP4XX_GPIO_GPOER |= (1 << line);
- else
- *IXP4XX_GPIO_GPOER &= ~(1 << line);
-}
-
-static void gpio_line_get(u8 line, int *value)
-{
- *value = (*IXP4XX_GPIO_GPINR >> line) & 0x1;
-}
-
-static void gpio_line_set(u8 line, int value)
-{
- if (value == IXP4XX_GPIO_HIGH)
- *IXP4XX_GPIO_GPOUTR |= (1 << line);
- else if (value == IXP4XX_GPIO_LOW)
- *IXP4XX_GPIO_GPOUTR &= ~(1 << line);
-}
-
-/*************************************************************************
- * IXP4xx chipset IRQ handling
- *
- * TODO: GPIO IRQs should be marked invalid until the user of the IRQ
- * (be it PCI or something else) configures that GPIO line
- * as an IRQ.
- **************************************************************************/
-enum ixp4xx_irq_type {
- IXP4XX_IRQ_LEVEL, IXP4XX_IRQ_EDGE
-};
-
-/* Each bit represents an IRQ: 1: edge-triggered, 0: level triggered */
-static unsigned long long ixp4xx_irq_edge = 0;
-
-/*
- * IRQ -> GPIO mapping table
- */
-static signed char irq2gpio[32] = {
- -1, -1, -1, -1, -1, -1, 0, 1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12, -1, -1,
-};
-
-static int ixp4xx_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
-{
- int irq;
-
- for (irq = 0; irq < 32; irq++) {
- if (irq2gpio[irq] == gpio)
- return irq;
- }
- return -EINVAL;
-}
-
-static int ixp4xx_set_irq_type(struct irq_data *d, unsigned int type)
-{
- int line = irq2gpio[d->irq];
- u32 int_style;
- enum ixp4xx_irq_type irq_type;
- volatile u32 *int_reg;
-
- /*
- * Only for GPIO IRQs
- */
- if (line < 0)
- return -EINVAL;
-
- switch (type){
- case IRQ_TYPE_EDGE_BOTH:
- int_style = IXP4XX_GPIO_STYLE_TRANSITIONAL;
- irq_type = IXP4XX_IRQ_EDGE;
- break;
- case IRQ_TYPE_EDGE_RISING:
- int_style = IXP4XX_GPIO_STYLE_RISING_EDGE;
- irq_type = IXP4XX_IRQ_EDGE;
- break;
- case IRQ_TYPE_EDGE_FALLING:
- int_style = IXP4XX_GPIO_STYLE_FALLING_EDGE;
- irq_type = IXP4XX_IRQ_EDGE;
- break;
- case IRQ_TYPE_LEVEL_HIGH:
- int_style = IXP4XX_GPIO_STYLE_ACTIVE_HIGH;
- irq_type = IXP4XX_IRQ_LEVEL;
- break;
- case IRQ_TYPE_LEVEL_LOW:
- int_style = IXP4XX_GPIO_STYLE_ACTIVE_LOW;
- irq_type = IXP4XX_IRQ_LEVEL;
- break;
- default:
- return -EINVAL;
- }
-
- if (irq_type == IXP4XX_IRQ_EDGE)
- ixp4xx_irq_edge |= (1 << d->irq);
- else
- ixp4xx_irq_edge &= ~(1 << d->irq);
-
- if (line >= 8) { /* pins 8-15 */
- line -= 8;
- int_reg = IXP4XX_GPIO_GPIT2R;
- } else { /* pins 0-7 */
- int_reg = IXP4XX_GPIO_GPIT1R;
- }
-
- /* Clear the style for the appropriate pin */
- *int_reg &= ~(IXP4XX_GPIO_STYLE_CLEAR <<
- (line * IXP4XX_GPIO_STYLE_SIZE));
-
- *IXP4XX_GPIO_GPISR = (1 << line);
-
- /* Set the new style */
- *int_reg |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE));
-
- /* Configure the line as an input */
- gpio_line_config(irq2gpio[d->irq], IXP4XX_GPIO_IN);
-
- return 0;
-}
-
-static void ixp4xx_irq_mask(struct irq_data *d)
-{
- if ((cpu_is_ixp46x() || cpu_is_ixp43x()) && d->irq >= 32)
- *IXP4XX_ICMR2 &= ~(1 << (d->irq - 32));
- else
- *IXP4XX_ICMR &= ~(1 << d->irq);
-}
-
-static void ixp4xx_irq_ack(struct irq_data *d)
-{
- int line = (d->irq < 32) ? irq2gpio[d->irq] : -1;
-
- if (line >= 0)
- *IXP4XX_GPIO_GPISR = (1 << line);
-}
-
-/*
- * Level triggered interrupts on GPIO lines can only be cleared when the
- * interrupt condition disappears.
- */
-static void ixp4xx_irq_unmask(struct irq_data *d)
-{
- if (!(ixp4xx_irq_edge & (1 << d->irq)))
- ixp4xx_irq_ack(d);
-
- if ((cpu_is_ixp46x() || cpu_is_ixp43x()) && d->irq >= 32)
- *IXP4XX_ICMR2 |= (1 << (d->irq - 32));
- else
- *IXP4XX_ICMR |= (1 << d->irq);
-}
-
-static struct irq_chip ixp4xx_irq_chip = {
- .name = "IXP4xx",
- .irq_ack = ixp4xx_irq_ack,
- .irq_mask = ixp4xx_irq_mask,
- .irq_unmask = ixp4xx_irq_unmask,
- .irq_set_type = ixp4xx_set_irq_type,
-};
-
void __init ixp4xx_init_irq(void)
{
- int i = 0;
-
/*
* ixp4xx does not implement the XScale PWRMODE register
* so it must not call cpu_do_idle().
*/
cpu_idle_poll_ctrl(true);
- /* Route all sources to IRQ instead of FIQ */
- *IXP4XX_ICLR = 0x0;
-
- /* Disable all interrupt */
- *IXP4XX_ICMR = 0x0;
-
- if (cpu_is_ixp46x() || cpu_is_ixp43x()) {
- /* Route upper 32 sources to IRQ instead of FIQ */
- *IXP4XX_ICLR2 = 0x00;
-
- /* Disable upper 32 interrupts */
- *IXP4XX_ICMR2 = 0x00;
- }
-
- /* Default to all level triggered */
- for(i = 0; i < NR_IRQS; i++) {
- irq_set_chip_and_handler(i, &ixp4xx_irq_chip,
- handle_level_irq);
- irq_clear_status_flags(i, IRQ_NOREQUEST);
- }
-}
-
-
-/*************************************************************************
- * IXP4xx timer tick
- * We use OS timer1 on the CPU for the timer tick and the timestamp
- * counter as a source of real clock ticks to account for missed jiffies.
- *************************************************************************/
-
-static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
-
- /* Clear Pending Interrupt by writing '1' to it */
- *IXP4XX_OSST = IXP4XX_OSST_TIMER_1_PEND;
-
- evt->event_handler(evt);
-
- return IRQ_HANDLED;
+ ixp4xx_irq_init(IXP4XX_INTC_BASE_PHYS,
+ (cpu_is_ixp46x() || cpu_is_ixp43x()));
}
-static struct irqaction ixp4xx_timer_irq = {
- .name = "timer1",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = ixp4xx_timer_interrupt,
- .dev_id = &clockevent_ixp4xx,
-};
-
void __init ixp4xx_timer_init(void)
{
- /* Reset/disable counter */
- *IXP4XX_OSRT1 = 0;
-
- /* Clear Pending Interrupt by writing '1' to it */
- *IXP4XX_OSST = IXP4XX_OSST_TIMER_1_PEND;
-
- /* Reset time-stamp counter */
- *IXP4XX_OSTS = 0;
-
- /* Connect the interrupt handler and enable the interrupt */
- setup_irq(IRQ_IXP4XX_TIMER1, &ixp4xx_timer_irq);
-
- ixp4xx_clocksource_init();
- ixp4xx_clockevent_init();
+ return ixp4xx_timer_setup(IXP4XX_TIMER_BASE_PHYS,
+ IRQ_IXP4XX_TIMER1,
+ IXP4XX_TIMER_FREQ);
}
static struct pxa2xx_udc_mach_info ixp4xx_udc_info;
@@ -364,6 +112,24 @@ static struct resource ixp4xx_udc_resources[] = {
},
};
+static struct resource ixp4xx_gpio_resource[] = {
+ {
+ .start = IXP4XX_GPIO_BASE_PHYS,
+ .end = IXP4XX_GPIO_BASE_PHYS + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device ixp4xx_gpio_device = {
+ .name = "ixp4xx-gpio",
+ .id = -1,
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .resource = ixp4xx_gpio_resource,
+ .num_resources = ARRAY_SIZE(ixp4xx_gpio_resource),
+};
+
/*
* USB device controller. The IXP4xx uses the same controller as PXA25X,
* so we just use the same device.
@@ -378,7 +144,61 @@ static struct platform_device ixp4xx_udc_device = {
},
};
+static struct resource ixp4xx_npe_resources[] = {
+ {
+ .start = IXP4XX_NPEA_BASE_PHYS,
+ .end = IXP4XX_NPEA_BASE_PHYS + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IXP4XX_NPEB_BASE_PHYS,
+ .end = IXP4XX_NPEB_BASE_PHYS + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IXP4XX_NPEC_BASE_PHYS,
+ .end = IXP4XX_NPEC_BASE_PHYS + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+
+};
+
+static struct platform_device ixp4xx_npe_device = {
+ .name = "ixp4xx-npe",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(ixp4xx_npe_resources),
+ .resource = ixp4xx_npe_resources,
+};
+
+static struct resource ixp4xx_qmgr_resources[] = {
+ {
+ .start = IXP4XX_QMGR_BASE_PHYS,
+ .end = IXP4XX_QMGR_BASE_PHYS + 0x3fff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_IXP4XX_QM1,
+ .end = IRQ_IXP4XX_QM1,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_IXP4XX_QM2,
+ .end = IRQ_IXP4XX_QM2,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device ixp4xx_qmgr_device = {
+ .name = "ixp4xx-qmgr",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(ixp4xx_qmgr_resources),
+ .resource = ixp4xx_qmgr_resources,
+};
+
static struct platform_device *ixp4xx_devices[] __initdata = {
+ &ixp4xx_npe_device,
+ &ixp4xx_qmgr_device,
+ &ixp4xx_gpio_device,
&ixp4xx_udc_device,
};
@@ -413,56 +233,12 @@ static struct platform_device *ixp46x_devices[] __initdata = {
unsigned long ixp4xx_exp_bus_size;
EXPORT_SYMBOL(ixp4xx_exp_bus_size);
-static int ixp4xx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
-{
- gpio_line_config(gpio, IXP4XX_GPIO_IN);
-
- return 0;
-}
-
-static int ixp4xx_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
- int level)
-{
- gpio_line_set(gpio, level);
- gpio_line_config(gpio, IXP4XX_GPIO_OUT);
-
- return 0;
-}
-
-static int ixp4xx_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
-{
- int value;
-
- gpio_line_get(gpio, &value);
-
- return value;
-}
-
-static void ixp4xx_gpio_set_value(struct gpio_chip *chip, unsigned gpio,
- int value)
-{
- gpio_line_set(gpio, value);
-}
-
-static struct gpio_chip ixp4xx_gpio_chip = {
- .label = "IXP4XX_GPIO_CHIP",
- .direction_input = ixp4xx_gpio_direction_input,
- .direction_output = ixp4xx_gpio_direction_output,
- .get = ixp4xx_gpio_get_value,
- .set = ixp4xx_gpio_set_value,
- .to_irq = ixp4xx_gpio_to_irq,
- .base = 0,
- .ngpio = 16,
-};
-
void __init ixp4xx_sys_init(void)
{
ixp4xx_exp_bus_size = SZ_16M;
platform_add_devices(ixp4xx_devices, ARRAY_SIZE(ixp4xx_devices));
- gpiochip_add_data(&ixp4xx_gpio_chip, NULL);
-
if (cpu_is_ixp46x()) {
int region;
@@ -481,103 +257,8 @@ void __init ixp4xx_sys_init(void)
ixp4xx_exp_bus_size >> 20);
}
-/*
- * sched_clock()
- */
-static u64 notrace ixp4xx_read_sched_clock(void)
-{
- return *IXP4XX_OSTS;
-}
-
-/*
- * clocksource
- */
-
-static u64 ixp4xx_clocksource_read(struct clocksource *c)
-{
- return *IXP4XX_OSTS;
-}
-
unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ;
EXPORT_SYMBOL(ixp4xx_timer_freq);
-static void __init ixp4xx_clocksource_init(void)
-{
- sched_clock_register(ixp4xx_read_sched_clock, 32, ixp4xx_timer_freq);
-
- clocksource_mmio_init(NULL, "OSTS", ixp4xx_timer_freq, 200, 32,
- ixp4xx_clocksource_read);
-}
-
-/*
- * clockevents
- */
-static int ixp4xx_set_next_event(unsigned long evt,
- struct clock_event_device *unused)
-{
- unsigned long opts = *IXP4XX_OSRT1 & IXP4XX_OST_RELOAD_MASK;
-
- *IXP4XX_OSRT1 = (evt & ~IXP4XX_OST_RELOAD_MASK) | opts;
-
- return 0;
-}
-
-static int ixp4xx_shutdown(struct clock_event_device *evt)
-{
- unsigned long opts = *IXP4XX_OSRT1 & IXP4XX_OST_RELOAD_MASK;
- unsigned long osrt = *IXP4XX_OSRT1 & ~IXP4XX_OST_RELOAD_MASK;
-
- opts &= ~IXP4XX_OST_ENABLE;
- *IXP4XX_OSRT1 = osrt | opts;
- return 0;
-}
-
-static int ixp4xx_set_oneshot(struct clock_event_device *evt)
-{
- unsigned long opts = IXP4XX_OST_ENABLE | IXP4XX_OST_ONE_SHOT;
- unsigned long osrt = 0;
-
- /* period set by 'set next_event' */
- *IXP4XX_OSRT1 = osrt | opts;
- return 0;
-}
-
-static int ixp4xx_set_periodic(struct clock_event_device *evt)
-{
- unsigned long opts = IXP4XX_OST_ENABLE;
- unsigned long osrt = IXP4XX_LATCH & ~IXP4XX_OST_RELOAD_MASK;
-
- *IXP4XX_OSRT1 = osrt | opts;
- return 0;
-}
-
-static int ixp4xx_resume(struct clock_event_device *evt)
-{
- unsigned long opts = *IXP4XX_OSRT1 & IXP4XX_OST_RELOAD_MASK;
- unsigned long osrt = *IXP4XX_OSRT1 & ~IXP4XX_OST_RELOAD_MASK;
-
- opts |= IXP4XX_OST_ENABLE;
- *IXP4XX_OSRT1 = osrt | opts;
- return 0;
-}
-
-static struct clock_event_device clockevent_ixp4xx = {
- .name = "ixp4xx timer1",
- .features = CLOCK_EVT_FEAT_PERIODIC |
- CLOCK_EVT_FEAT_ONESHOT,
- .rating = 200,
- .set_state_shutdown = ixp4xx_shutdown,
- .set_state_periodic = ixp4xx_set_periodic,
- .set_state_oneshot = ixp4xx_set_oneshot,
- .tick_resume = ixp4xx_resume,
- .set_next_event = ixp4xx_set_next_event,
-};
-
-static void __init ixp4xx_clockevent_init(void)
-{
- clockevent_ixp4xx.cpumask = cpumask_of(0);
- clockevents_config_and_register(&clockevent_ixp4xx, IXP4XX_TIMER_FREQ,
- 0xf, 0xfffffffe);
-}
void ixp4xx_restart(enum reboot_mode mode, const char *cmd)
{
diff --git a/arch/arm/mach-ixp4xx/coyote-pci.c b/arch/arm/mach-ixp4xx/coyote-pci.c
index 5d14ce2aee6d..a16c35d2bb96 100644
--- a/arch/arm/mach-ixp4xx/coyote-pci.c
+++ b/arch/arm/mach-ixp4xx/coyote-pci.c
@@ -23,6 +23,8 @@
#include <asm/irq.h>
#include <asm/mach/pci.h>
+#include "irqs.h"
+
#define SLOT0_DEVID 14
#define SLOT1_DEVID 15
diff --git a/arch/arm/mach-ixp4xx/coyote-setup.c b/arch/arm/mach-ixp4xx/coyote-setup.c
index 7e40fe70933b..7ca43ca2816d 100644
--- a/arch/arm/mach-ixp4xx/coyote-setup.c
+++ b/arch/arm/mach-ixp4xx/coyote-setup.c
@@ -25,6 +25,8 @@
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
+#include "irqs.h"
+
#define COYOTE_IDE_BASE_PHYS IXP4XX_EXP_BUS_BASE(3)
#define COYOTE_IDE_BASE_VIRT 0xFFFE1000
#define COYOTE_IDE_REGION_SIZE 0x1000
diff --git a/arch/arm/mach-ixp4xx/dsmg600-pci.c b/arch/arm/mach-ixp4xx/dsmg600-pci.c
index 8dca76937723..6899023bd1b7 100644
--- a/arch/arm/mach-ixp4xx/dsmg600-pci.c
+++ b/arch/arm/mach-ixp4xx/dsmg600-pci.c
@@ -22,6 +22,8 @@
#include <asm/mach/pci.h>
#include <asm/mach-types.h>
+#include "irqs.h"
+
#define MAX_DEV 4
#define IRQ_LINES 3
diff --git a/arch/arm/mach-ixp4xx/dsmg600-setup.c b/arch/arm/mach-ixp4xx/dsmg600-setup.c
index 397190f3a8da..4d4c62fced71 100644
--- a/arch/arm/mach-ixp4xx/dsmg600-setup.c
+++ b/arch/arm/mach-ixp4xx/dsmg600-setup.c
@@ -35,6 +35,8 @@
#include <asm/mach/flash.h>
#include <asm/mach/time.h>
+#include "irqs.h"
+
#define DSMG600_SDA_PIN 5
#define DSMG600_SCL_PIN 4
@@ -268,9 +270,6 @@ static void __init dsmg600_init(void)
{
ixp4xx_sys_init();
- /* Make sure that GPIO14 and GPIO15 are not used as clocks */
- *IXP4XX_GPIO_GPCLKR = 0;
-
dsmg600_flash_resource.start = IXP4XX_EXP_BUS_BASE(0);
dsmg600_flash_resource.end =
IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1;
diff --git a/arch/arm/mach-ixp4xx/fsg-pci.c b/arch/arm/mach-ixp4xx/fsg-pci.c
index fd4a8625b4ae..6c08bb9d9807 100644
--- a/arch/arm/mach-ixp4xx/fsg-pci.c
+++ b/arch/arm/mach-ixp4xx/fsg-pci.c
@@ -22,6 +22,8 @@
#include <asm/mach/pci.h>
#include <asm/mach-types.h>
+#include "irqs.h"
+
#define MAX_DEV 3
#define IRQ_LINES 3
diff --git a/arch/arm/mach-ixp4xx/fsg-setup.c b/arch/arm/mach-ixp4xx/fsg-setup.c
index f0a152e365b1..648932d8d7a8 100644
--- a/arch/arm/mach-ixp4xx/fsg-setup.c
+++ b/arch/arm/mach-ixp4xx/fsg-setup.c
@@ -29,6 +29,8 @@
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
+#include "irqs.h"
+
#define FSG_SDA_PIN 12
#define FSG_SCL_PIN 13
diff --git a/arch/arm/mach-ixp4xx/gateway7001-pci.c b/arch/arm/mach-ixp4xx/gateway7001-pci.c
index d9d6cc089707..903c75330b76 100644
--- a/arch/arm/mach-ixp4xx/gateway7001-pci.c
+++ b/arch/arm/mach-ixp4xx/gateway7001-pci.c
@@ -27,6 +27,8 @@
#include <asm/mach/pci.h>
+#include "irqs.h"
+
void __init gateway7001_pci_preinit(void)
{
irq_set_irq_type(IRQ_IXP4XX_GPIO10, IRQ_TYPE_LEVEL_LOW);
diff --git a/arch/arm/mach-ixp4xx/gateway7001-setup.c b/arch/arm/mach-ixp4xx/gateway7001-setup.c
index 1be6faf6da9a..678e7dfff0e5 100644
--- a/arch/arm/mach-ixp4xx/gateway7001-setup.c
+++ b/arch/arm/mach-ixp4xx/gateway7001-setup.c
@@ -28,6 +28,8 @@
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
+#include "irqs.h"
+
static struct flash_platform_data gateway7001_flash_data = {
.map_name = "cfi_probe",
.width = 2,
diff --git a/arch/arm/mach-ixp4xx/gtwx5715-pci.c b/arch/arm/mach-ixp4xx/gtwx5715-pci.c
index 551d114c9e14..1223d160448f 100644
--- a/arch/arm/mach-ixp4xx/gtwx5715-pci.c
+++ b/arch/arm/mach-ixp4xx/gtwx5715-pci.c
@@ -30,6 +30,8 @@
#include <mach/hardware.h>
#include <asm/mach/pci.h>
+#include "irqs.h"
+
#define SLOT0_DEVID 0
#define SLOT1_DEVID 1
#define INTA 10 /* slot 1 has INTA and INTB crossed */
diff --git a/arch/arm/mach-ixp4xx/gtwx5715-setup.c b/arch/arm/mach-ixp4xx/gtwx5715-setup.c
index 16a12994fb53..5dbdde8e2338 100644
--- a/arch/arm/mach-ixp4xx/gtwx5715-setup.c
+++ b/arch/arm/mach-ixp4xx/gtwx5715-setup.c
@@ -36,6 +36,8 @@
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
+#include "irqs.h"
+
/* GPIO 5,6,7 and 12 are hard wired to the Kendin KS8995M Switch
and operate as an SPI type interface. The details of the interface
are available on Kendin/Micrel's web site. */
diff --git a/arch/arm/mach-ixp4xx/include/mach/entry-macro.S b/arch/arm/mach-ixp4xx/include/mach/entry-macro.S
deleted file mode 100644
index 79adf83e2c3d..000000000000
--- a/arch/arm/mach-ixp4xx/include/mach/entry-macro.S
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * arch/arm/mach-ixp4xx/include/mach/entry-macro.S
- *
- * Low-level IRQ helper macros for IXP4xx-based platforms
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-#include <mach/hardware.h>
-
- .macro get_irqnr_preamble, base, tmp
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- ldr \irqstat, =(IXP4XX_INTC_BASE_VIRT+IXP4XX_ICIP_OFFSET)
- ldr \irqstat, [\irqstat] @ get interrupts
- cmp \irqstat, #0
- beq 1001f @ upper IRQ?
- clz \irqnr, \irqstat
- mov \base, #31
- sub \irqnr, \base, \irqnr
- b 1002f @ lower IRQ being
- @ handled
-
-1001:
- /*
- * IXP465/IXP435 has an upper IRQ status register
- */
-#if defined(CONFIG_CPU_IXP46X) || defined(CONFIG_CPU_IXP43X)
- ldr \irqstat, =(IXP4XX_INTC_BASE_VIRT+IXP4XX_ICIP2_OFFSET)
- ldr \irqstat, [\irqstat] @ get upper interrupts
- mov \irqnr, #63
- clz \irqstat, \irqstat
- cmp \irqstat, #32
- subne \irqnr, \irqnr, \irqstat
-#endif
-1002:
- .endm
-
-
diff --git a/arch/arm/mach-ixp4xx/include/mach/irqs.h b/arch/arm/mach-ixp4xx/include/mach/irqs.h
deleted file mode 100644
index 7e6d4cce7c27..000000000000
--- a/arch/arm/mach-ixp4xx/include/mach/irqs.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * arch/arm/mach-ixp4xx/include/mach/irqs.h
- *
- * IRQ definitions for IXP4XX based systems
- *
- * Copyright (C) 2002 Intel Corporation.
- * Copyright (C) 2003 MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#ifndef _ARCH_IXP4XX_IRQS_H_
-#define _ARCH_IXP4XX_IRQS_H_
-
-#define IRQ_IXP4XX_NPEA 0
-#define IRQ_IXP4XX_NPEB 1
-#define IRQ_IXP4XX_NPEC 2
-#define IRQ_IXP4XX_QM1 3
-#define IRQ_IXP4XX_QM2 4
-#define IRQ_IXP4XX_TIMER1 5
-#define IRQ_IXP4XX_GPIO0 6
-#define IRQ_IXP4XX_GPIO1 7
-#define IRQ_IXP4XX_PCI_INT 8
-#define IRQ_IXP4XX_PCI_DMA1 9
-#define IRQ_IXP4XX_PCI_DMA2 10
-#define IRQ_IXP4XX_TIMER2 11
-#define IRQ_IXP4XX_USB 12
-#define IRQ_IXP4XX_UART2 13
-#define IRQ_IXP4XX_TIMESTAMP 14
-#define IRQ_IXP4XX_UART1 15
-#define IRQ_IXP4XX_WDOG 16
-#define IRQ_IXP4XX_AHB_PMU 17
-#define IRQ_IXP4XX_XSCALE_PMU 18
-#define IRQ_IXP4XX_GPIO2 19
-#define IRQ_IXP4XX_GPIO3 20
-#define IRQ_IXP4XX_GPIO4 21
-#define IRQ_IXP4XX_GPIO5 22
-#define IRQ_IXP4XX_GPIO6 23
-#define IRQ_IXP4XX_GPIO7 24
-#define IRQ_IXP4XX_GPIO8 25
-#define IRQ_IXP4XX_GPIO9 26
-#define IRQ_IXP4XX_GPIO10 27
-#define IRQ_IXP4XX_GPIO11 28
-#define IRQ_IXP4XX_GPIO12 29
-#define IRQ_IXP4XX_SW_INT1 30
-#define IRQ_IXP4XX_SW_INT2 31
-#define IRQ_IXP4XX_USB_HOST 32
-#define IRQ_IXP4XX_I2C 33
-#define IRQ_IXP4XX_SSP 34
-#define IRQ_IXP4XX_TSYNC 35
-#define IRQ_IXP4XX_EAU_DONE 36
-#define IRQ_IXP4XX_SHA_DONE 37
-#define IRQ_IXP4XX_SWCP_PE 58
-#define IRQ_IXP4XX_QM_PE 60
-#define IRQ_IXP4XX_MCU_ECC 61
-#define IRQ_IXP4XX_EXP_PE 62
-
-#define _IXP4XX_GPIO_IRQ(n) (IRQ_IXP4XX_GPIO ## n)
-#define IXP4XX_GPIO_IRQ(n) _IXP4XX_GPIO_IRQ(n)
-
-/*
- * Only first 32 sources are valid if running on IXP42x systems
- */
-#if defined(CONFIG_CPU_IXP46X) || defined(CONFIG_CPU_IXP43X)
-#define NR_IRQS 64
-#else
-#define NR_IRQS 32
-#endif
-
-#define XSCALE_PMU_IRQ (IRQ_IXP4XX_XSCALE_PMU)
-
-#endif
diff --git a/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h b/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h
index b7ddd27419c2..588b76651085 100644
--- a/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h
+++ b/arch/arm/mach-ixp4xx/include/mach/ixp4xx-regs.h
@@ -43,8 +43,6 @@
* Queue Manager
*/
#define IXP4XX_QMGR_BASE_PHYS 0x60000000
-#define IXP4XX_QMGR_BASE_VIRT IOMEM(0xFEF15000)
-#define IXP4XX_QMGR_REGION_SIZE 0x00004000
/*
* Peripheral space, including debug UART. Must be section-aligned so that
@@ -132,9 +130,6 @@
#define IXP4XX_INTC_BASE_VIRT (IXP4XX_PERIPHERAL_BASE_VIRT + 0x3000)
#define IXP4XX_GPIO_BASE_VIRT (IXP4XX_PERIPHERAL_BASE_VIRT + 0x4000)
#define IXP4XX_TIMER_BASE_VIRT (IXP4XX_PERIPHERAL_BASE_VIRT + 0x5000)
-#define IXP4XX_NPEA_BASE_VIRT (IXP4XX_PERIPHERAL_BASE_VIRT + 0x6000)
-#define IXP4XX_NPEB_BASE_VIRT (IXP4XX_PERIPHERAL_BASE_VIRT + 0x7000)
-#define IXP4XX_NPEC_BASE_VIRT (IXP4XX_PERIPHERAL_BASE_VIRT + 0x8000)
#define IXP4XX_EthB_BASE_VIRT (IXP4XX_PERIPHERAL_BASE_VIRT + 0x9000)
#define IXP4XX_EthC_BASE_VIRT (IXP4XX_PERIPHERAL_BASE_VIRT + 0xA000)
#define IXP4XX_USB_BASE_VIRT (IXP4XX_PERIPHERAL_BASE_VIRT + 0xB000)
@@ -148,95 +143,6 @@
#define IXP4XX_SSP_BASE_VIRT (IXP4XX_PERIPHERAL_BASE_VIRT + 0x12000)
/*
- * Constants to make it easy to access Interrupt Controller registers
- */
-#define IXP4XX_ICPR_OFFSET 0x00 /* Interrupt Status */
-#define IXP4XX_ICMR_OFFSET 0x04 /* Interrupt Enable */
-#define IXP4XX_ICLR_OFFSET 0x08 /* Interrupt IRQ/FIQ Select */
-#define IXP4XX_ICIP_OFFSET 0x0C /* IRQ Status */
-#define IXP4XX_ICFP_OFFSET 0x10 /* FIQ Status */
-#define IXP4XX_ICHR_OFFSET 0x14 /* Interrupt Priority */
-#define IXP4XX_ICIH_OFFSET 0x18 /* IRQ Highest Pri Int */
-#define IXP4XX_ICFH_OFFSET 0x1C /* FIQ Highest Pri Int */
-
-/*
- * IXP465-only
- */
-#define IXP4XX_ICPR2_OFFSET 0x20 /* Interrupt Status 2 */
-#define IXP4XX_ICMR2_OFFSET 0x24 /* Interrupt Enable 2 */
-#define IXP4XX_ICLR2_OFFSET 0x28 /* Interrupt IRQ/FIQ Select 2 */
-#define IXP4XX_ICIP2_OFFSET 0x2C /* IRQ Status */
-#define IXP4XX_ICFP2_OFFSET 0x30 /* FIQ Status */
-#define IXP4XX_ICEEN_OFFSET 0x34 /* Error High Pri Enable */
-
-
-/*
- * Interrupt Controller Register Definitions.
- */
-
-#define IXP4XX_INTC_REG(x) ((volatile u32 *)(IXP4XX_INTC_BASE_VIRT+(x)))
-
-#define IXP4XX_ICPR IXP4XX_INTC_REG(IXP4XX_ICPR_OFFSET)
-#define IXP4XX_ICMR IXP4XX_INTC_REG(IXP4XX_ICMR_OFFSET)
-#define IXP4XX_ICLR IXP4XX_INTC_REG(IXP4XX_ICLR_OFFSET)
-#define IXP4XX_ICIP IXP4XX_INTC_REG(IXP4XX_ICIP_OFFSET)
-#define IXP4XX_ICFP IXP4XX_INTC_REG(IXP4XX_ICFP_OFFSET)
-#define IXP4XX_ICHR IXP4XX_INTC_REG(IXP4XX_ICHR_OFFSET)
-#define IXP4XX_ICIH IXP4XX_INTC_REG(IXP4XX_ICIH_OFFSET)
-#define IXP4XX_ICFH IXP4XX_INTC_REG(IXP4XX_ICFH_OFFSET)
-#define IXP4XX_ICPR2 IXP4XX_INTC_REG(IXP4XX_ICPR2_OFFSET)
-#define IXP4XX_ICMR2 IXP4XX_INTC_REG(IXP4XX_ICMR2_OFFSET)
-#define IXP4XX_ICLR2 IXP4XX_INTC_REG(IXP4XX_ICLR2_OFFSET)
-#define IXP4XX_ICIP2 IXP4XX_INTC_REG(IXP4XX_ICIP2_OFFSET)
-#define IXP4XX_ICFP2 IXP4XX_INTC_REG(IXP4XX_ICFP2_OFFSET)
-#define IXP4XX_ICEEN IXP4XX_INTC_REG(IXP4XX_ICEEN_OFFSET)
-
-/*
- * Constants to make it easy to access GPIO registers
- */
-#define IXP4XX_GPIO_GPOUTR_OFFSET 0x00
-#define IXP4XX_GPIO_GPOER_OFFSET 0x04
-#define IXP4XX_GPIO_GPINR_OFFSET 0x08
-#define IXP4XX_GPIO_GPISR_OFFSET 0x0C
-#define IXP4XX_GPIO_GPIT1R_OFFSET 0x10
-#define IXP4XX_GPIO_GPIT2R_OFFSET 0x14
-#define IXP4XX_GPIO_GPCLKR_OFFSET 0x18
-#define IXP4XX_GPIO_GPDBSELR_OFFSET 0x1C
-
-/*
- * GPIO Register Definitions.
- * [Only perform 32bit reads/writes]
- */
-#define IXP4XX_GPIO_REG(x) ((volatile u32 *)(IXP4XX_GPIO_BASE_VIRT+(x)))
-
-#define IXP4XX_GPIO_GPOUTR IXP4XX_GPIO_REG(IXP4XX_GPIO_GPOUTR_OFFSET)
-#define IXP4XX_GPIO_GPOER IXP4XX_GPIO_REG(IXP4XX_GPIO_GPOER_OFFSET)
-#define IXP4XX_GPIO_GPINR IXP4XX_GPIO_REG(IXP4XX_GPIO_GPINR_OFFSET)
-#define IXP4XX_GPIO_GPISR IXP4XX_GPIO_REG(IXP4XX_GPIO_GPISR_OFFSET)
-#define IXP4XX_GPIO_GPIT1R IXP4XX_GPIO_REG(IXP4XX_GPIO_GPIT1R_OFFSET)
-#define IXP4XX_GPIO_GPIT2R IXP4XX_GPIO_REG(IXP4XX_GPIO_GPIT2R_OFFSET)
-#define IXP4XX_GPIO_GPCLKR IXP4XX_GPIO_REG(IXP4XX_GPIO_GPCLKR_OFFSET)
-#define IXP4XX_GPIO_GPDBSELR IXP4XX_GPIO_REG(IXP4XX_GPIO_GPDBSELR_OFFSET)
-
-/*
- * GPIO register bit definitions
- */
-
-/* Interrupt styles
- */
-#define IXP4XX_GPIO_STYLE_ACTIVE_HIGH 0x0
-#define IXP4XX_GPIO_STYLE_ACTIVE_LOW 0x1
-#define IXP4XX_GPIO_STYLE_RISING_EDGE 0x2
-#define IXP4XX_GPIO_STYLE_FALLING_EDGE 0x3
-#define IXP4XX_GPIO_STYLE_TRANSITIONAL 0x4
-
-/*
- * Mask used to clear interrupt styles
- */
-#define IXP4XX_GPIO_STYLE_CLEAR 0x7
-#define IXP4XX_GPIO_STYLE_SIZE 3
-
-/*
* Constants to make it easy to access Timer Control/Status registers
*/
#define IXP4XX_OSTS_OFFSET 0x00 /* Continious TimeStamp */
diff --git a/arch/arm/mach-ixp4xx/include/mach/qmgr.h b/arch/arm/mach-ixp4xx/include/mach/qmgr.h
deleted file mode 100644
index 4de8da536dbb..000000000000
--- a/arch/arm/mach-ixp4xx/include/mach/qmgr.h
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License
- * as published by the Free Software Foundation.
- */
-
-#ifndef IXP4XX_QMGR_H
-#define IXP4XX_QMGR_H
-
-#include <linux/io.h>
-#include <linux/kernel.h>
-
-#define DEBUG_QMGR 0
-
-#define HALF_QUEUES 32
-#define QUEUES 64
-#define MAX_QUEUE_LENGTH 4 /* in dwords */
-
-#define QUEUE_STAT1_EMPTY 1 /* queue status bits */
-#define QUEUE_STAT1_NEARLY_EMPTY 2
-#define QUEUE_STAT1_NEARLY_FULL 4
-#define QUEUE_STAT1_FULL 8
-#define QUEUE_STAT2_UNDERFLOW 1
-#define QUEUE_STAT2_OVERFLOW 2
-
-#define QUEUE_WATERMARK_0_ENTRIES 0
-#define QUEUE_WATERMARK_1_ENTRY 1
-#define QUEUE_WATERMARK_2_ENTRIES 2
-#define QUEUE_WATERMARK_4_ENTRIES 3
-#define QUEUE_WATERMARK_8_ENTRIES 4
-#define QUEUE_WATERMARK_16_ENTRIES 5
-#define QUEUE_WATERMARK_32_ENTRIES 6
-#define QUEUE_WATERMARK_64_ENTRIES 7
-
-/* queue interrupt request conditions */
-#define QUEUE_IRQ_SRC_EMPTY 0
-#define QUEUE_IRQ_SRC_NEARLY_EMPTY 1
-#define QUEUE_IRQ_SRC_NEARLY_FULL 2
-#define QUEUE_IRQ_SRC_FULL 3
-#define QUEUE_IRQ_SRC_NOT_EMPTY 4
-#define QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY 5
-#define QUEUE_IRQ_SRC_NOT_NEARLY_FULL 6
-#define QUEUE_IRQ_SRC_NOT_FULL 7
-
-struct qmgr_regs {
- u32 acc[QUEUES][MAX_QUEUE_LENGTH]; /* 0x000 - 0x3FF */
- u32 stat1[4]; /* 0x400 - 0x40F */
- u32 stat2[2]; /* 0x410 - 0x417 */
- u32 statne_h; /* 0x418 - queue nearly empty */
- u32 statf_h; /* 0x41C - queue full */
- u32 irqsrc[4]; /* 0x420 - 0x42F IRC source */
- u32 irqen[2]; /* 0x430 - 0x437 IRQ enabled */
- u32 irqstat[2]; /* 0x438 - 0x43F - IRQ access only */
- u32 reserved[1776];
- u32 sram[2048]; /* 0x2000 - 0x3FFF - config and buffer */
-};
-
-void qmgr_set_irq(unsigned int queue, int src,
- void (*handler)(void *pdev), void *pdev);
-void qmgr_enable_irq(unsigned int queue);
-void qmgr_disable_irq(unsigned int queue);
-
-/* request_ and release_queue() must be called from non-IRQ context */
-
-#if DEBUG_QMGR
-extern char qmgr_queue_descs[QUEUES][32];
-
-int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
- unsigned int nearly_empty_watermark,
- unsigned int nearly_full_watermark,
- const char *desc_format, const char* name);
-#else
-int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
- unsigned int nearly_empty_watermark,
- unsigned int nearly_full_watermark);
-#define qmgr_request_queue(queue, len, nearly_empty_watermark, \
- nearly_full_watermark, desc_format, name) \
- __qmgr_request_queue(queue, len, nearly_empty_watermark, \
- nearly_full_watermark)
-#endif
-
-void qmgr_release_queue(unsigned int queue);
-
-
-static inline void qmgr_put_entry(unsigned int queue, u32 val)
-{
- struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
-#if DEBUG_QMGR
- BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
-
- printk(KERN_DEBUG "Queue %s(%i) put %X\n",
- qmgr_queue_descs[queue], queue, val);
-#endif
- __raw_writel(val, &qmgr_regs->acc[queue][0]);
-}
-
-static inline u32 qmgr_get_entry(unsigned int queue)
-{
- u32 val;
- const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
- val = __raw_readl(&qmgr_regs->acc[queue][0]);
-#if DEBUG_QMGR
- BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
-
- printk(KERN_DEBUG "Queue %s(%i) get %X\n",
- qmgr_queue_descs[queue], queue, val);
-#endif
- return val;
-}
-
-static inline int __qmgr_get_stat1(unsigned int queue)
-{
- const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
- return (__raw_readl(&qmgr_regs->stat1[queue >> 3])
- >> ((queue & 7) << 2)) & 0xF;
-}
-
-static inline int __qmgr_get_stat2(unsigned int queue)
-{
- const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
- BUG_ON(queue >= HALF_QUEUES);
- return (__raw_readl(&qmgr_regs->stat2[queue >> 4])
- >> ((queue & 0xF) << 1)) & 0x3;
-}
-
-/**
- * qmgr_stat_empty() - checks if a hardware queue is empty
- * @queue: queue number
- *
- * Returns non-zero value if the queue is empty.
- */
-static inline int qmgr_stat_empty(unsigned int queue)
-{
- BUG_ON(queue >= HALF_QUEUES);
- return __qmgr_get_stat1(queue) & QUEUE_STAT1_EMPTY;
-}
-
-/**
- * qmgr_stat_below_low_watermark() - checks if a queue is below low watermark
- * @queue: queue number
- *
- * Returns non-zero value if the queue is below low watermark.
- */
-static inline int qmgr_stat_below_low_watermark(unsigned int queue)
-{
- const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
- if (queue >= HALF_QUEUES)
- return (__raw_readl(&qmgr_regs->statne_h) >>
- (queue - HALF_QUEUES)) & 0x01;
- return __qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_EMPTY;
-}
-
-/**
- * qmgr_stat_above_high_watermark() - checks if a queue is above high watermark
- * @queue: queue number
- *
- * Returns non-zero value if the queue is above high watermark
- */
-static inline int qmgr_stat_above_high_watermark(unsigned int queue)
-{
- BUG_ON(queue >= HALF_QUEUES);
- return __qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_FULL;
-}
-
-/**
- * qmgr_stat_full() - checks if a hardware queue is full
- * @queue: queue number
- *
- * Returns non-zero value if the queue is full.
- */
-static inline int qmgr_stat_full(unsigned int queue)
-{
- const struct qmgr_regs __iomem *qmgr_regs = IXP4XX_QMGR_BASE_VIRT;
- if (queue >= HALF_QUEUES)
- return (__raw_readl(&qmgr_regs->statf_h) >>
- (queue - HALF_QUEUES)) & 0x01;
- return __qmgr_get_stat1(queue) & QUEUE_STAT1_FULL;
-}
-
-/**
- * qmgr_stat_underflow() - checks if a hardware queue experienced underflow
- * @queue: queue number
- *
- * Returns non-zero value if the queue experienced underflow.
- */
-static inline int qmgr_stat_underflow(unsigned int queue)
-{
- return __qmgr_get_stat2(queue) & QUEUE_STAT2_UNDERFLOW;
-}
-
-/**
- * qmgr_stat_overflow() - checks if a hardware queue experienced overflow
- * @queue: queue number
- *
- * Returns non-zero value if the queue experienced overflow.
- */
-static inline int qmgr_stat_overflow(unsigned int queue)
-{
- return __qmgr_get_stat2(queue) & QUEUE_STAT2_OVERFLOW;
-}
-
-#endif
diff --git a/arch/arm/mach-ixp4xx/irqs.h b/arch/arm/mach-ixp4xx/irqs.h
new file mode 100644
index 000000000000..6b7f220cf9e0
--- /dev/null
+++ b/arch/arm/mach-ixp4xx/irqs.h
@@ -0,0 +1,68 @@
+/*
+ * arch/arm/mach-ixp4xx/include/mach/irqs.h
+ *
+ * IRQ definitions for IXP4XX based systems
+ *
+ * Copyright (C) 2002 Intel Corporation.
+ * Copyright (C) 2003 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _ARCH_IXP4XX_IRQS_H_
+#define _ARCH_IXP4XX_IRQS_H_
+
+#define IRQ_IXP4XX_BASE 16
+
+#define IRQ_IXP4XX_NPEA (IRQ_IXP4XX_BASE + 0)
+#define IRQ_IXP4XX_NPEB (IRQ_IXP4XX_BASE + 1)
+#define IRQ_IXP4XX_NPEC (IRQ_IXP4XX_BASE + 2)
+#define IRQ_IXP4XX_QM1 (IRQ_IXP4XX_BASE + 3)
+#define IRQ_IXP4XX_QM2 (IRQ_IXP4XX_BASE + 4)
+#define IRQ_IXP4XX_TIMER1 (IRQ_IXP4XX_BASE + 5)
+#define IRQ_IXP4XX_GPIO0 (IRQ_IXP4XX_BASE + 6)
+#define IRQ_IXP4XX_GPIO1 (IRQ_IXP4XX_BASE + 7)
+#define IRQ_IXP4XX_PCI_INT (IRQ_IXP4XX_BASE + 8)
+#define IRQ_IXP4XX_PCI_DMA1 (IRQ_IXP4XX_BASE + 9)
+#define IRQ_IXP4XX_PCI_DMA2 (IRQ_IXP4XX_BASE + 10)
+#define IRQ_IXP4XX_TIMER2 (IRQ_IXP4XX_BASE + 11)
+#define IRQ_IXP4XX_USB (IRQ_IXP4XX_BASE + 12)
+#define IRQ_IXP4XX_UART2 (IRQ_IXP4XX_BASE + 13)
+#define IRQ_IXP4XX_TIMESTAMP (IRQ_IXP4XX_BASE + 14)
+#define IRQ_IXP4XX_UART1 (IRQ_IXP4XX_BASE + 15)
+#define IRQ_IXP4XX_WDOG (IRQ_IXP4XX_BASE + 16)
+#define IRQ_IXP4XX_AHB_PMU (IRQ_IXP4XX_BASE + 17)
+#define IRQ_IXP4XX_XSCALE_PMU (IRQ_IXP4XX_BASE + 18)
+#define IRQ_IXP4XX_GPIO2 (IRQ_IXP4XX_BASE + 19)
+#define IRQ_IXP4XX_GPIO3 (IRQ_IXP4XX_BASE + 20)
+#define IRQ_IXP4XX_GPIO4 (IRQ_IXP4XX_BASE + 21)
+#define IRQ_IXP4XX_GPIO5 (IRQ_IXP4XX_BASE + 22)
+#define IRQ_IXP4XX_GPIO6 (IRQ_IXP4XX_BASE + 23)
+#define IRQ_IXP4XX_GPIO7 (IRQ_IXP4XX_BASE + 24)
+#define IRQ_IXP4XX_GPIO8 (IRQ_IXP4XX_BASE + 25)
+#define IRQ_IXP4XX_GPIO9 (IRQ_IXP4XX_BASE + 26)
+#define IRQ_IXP4XX_GPIO10 (IRQ_IXP4XX_BASE + 27)
+#define IRQ_IXP4XX_GPIO11 (IRQ_IXP4XX_BASE + 28)
+#define IRQ_IXP4XX_GPIO12 (IRQ_IXP4XX_BASE + 29)
+#define IRQ_IXP4XX_SW_INT1 (IRQ_IXP4XX_BASE + 30)
+#define IRQ_IXP4XX_SW_INT2 (IRQ_IXP4XX_BASE + 31)
+#define IRQ_IXP4XX_USB_HOST (IRQ_IXP4XX_BASE + 32)
+#define IRQ_IXP4XX_I2C (IRQ_IXP4XX_BASE + 33)
+#define IRQ_IXP4XX_SSP (IRQ_IXP4XX_BASE + 34)
+#define IRQ_IXP4XX_TSYNC (IRQ_IXP4XX_BASE + 35)
+#define IRQ_IXP4XX_EAU_DONE (IRQ_IXP4XX_BASE + 36)
+#define IRQ_IXP4XX_SHA_DONE (IRQ_IXP4XX_BASE + 37)
+#define IRQ_IXP4XX_SWCP_PE (IRQ_IXP4XX_BASE + 58)
+#define IRQ_IXP4XX_QM_PE (IRQ_IXP4XX_BASE + 60)
+#define IRQ_IXP4XX_MCU_ECC (IRQ_IXP4XX_BASE + 61)
+#define IRQ_IXP4XX_EXP_PE (IRQ_IXP4XX_BASE + 62)
+
+#define _IXP4XX_GPIO_IRQ(n) (IRQ_IXP4XX_GPIO ## n)
+#define IXP4XX_GPIO_IRQ(n) _IXP4XX_GPIO_IRQ(n)
+
+#define XSCALE_PMU_IRQ (IRQ_IXP4XX_XSCALE_PMU)
+
+#endif
diff --git a/arch/arm/mach-ixp4xx/ixdp425-pci.c b/arch/arm/mach-ixp4xx/ixdp425-pci.c
index 318424dd3c50..c1340465b2ea 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-pci.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-pci.c
@@ -24,6 +24,8 @@
#include <mach/hardware.h>
#include <asm/mach-types.h>
+#include "irqs.h"
+
#define MAX_DEV 4
#define IRQ_LINES 4
diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c
index 57d7df79d838..6f0f7ed18ea8 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
@@ -32,6 +32,8 @@
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
+#include "irqs.h"
+
#define IXDP425_SDA_PIN 7
#define IXDP425_SCL_PIN 6
diff --git a/arch/arm/mach-ixp4xx/ixdpg425-pci.c b/arch/arm/mach-ixp4xx/ixdpg425-pci.c
index 1f8717ba13dc..ac0e9bc6eb4d 100644
--- a/arch/arm/mach-ixp4xx/ixdpg425-pci.c
+++ b/arch/arm/mach-ixp4xx/ixdpg425-pci.c
@@ -23,6 +23,8 @@
#include <asm/mach/pci.h>
+#include "irqs.h"
+
void __init ixdpg425_pci_preinit(void)
{
irq_set_irq_type(IRQ_IXP4XX_GPIO6, IRQ_TYPE_LEVEL_LOW);
diff --git a/arch/arm/mach-ixp4xx/ixp4xx-of.c b/arch/arm/mach-ixp4xx/ixp4xx-of.c
new file mode 100644
index 000000000000..7449b8319c8a
--- /dev/null
+++ b/arch/arm/mach-ixp4xx/ixp4xx-of.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IXP4xx Device Tree boot support
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <mach/hardware.h>
+#include <mach/ixp4xx-regs.h>
+
+static struct map_desc ixp4xx_of_io_desc[] __initdata = {
+ /*
+ * This is needed for runtime system configuration checks,
+ * such as reading if hardware so-and-so is present. This
+ * could eventually be converted into a syscon once all boards
+ * are converted to device tree.
+ */
+ {
+ .virtual = IXP4XX_EXP_CFG_BASE_VIRT,
+ .pfn = __phys_to_pfn(IXP4XX_EXP_CFG_BASE_PHYS),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ },
+#ifdef CONFIG_DEBUG_UART_8250
+ /* This is needed for LL-debug/earlyprintk/debug-macro.S */
+ {
+ .virtual = CONFIG_DEBUG_UART_VIRT,
+ .pfn = __phys_to_pfn(CONFIG_DEBUG_UART_PHYS),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ },
+#endif
+};
+
+static void __init ixp4xx_of_map_io(void)
+{
+ iotable_init(ixp4xx_of_io_desc, ARRAY_SIZE(ixp4xx_of_io_desc));
+}
+
+/*
+ * We handle 4 differen SoC families. These compatible strings are enough
+ * to provide the core so that different boards can add their more detailed
+ * specifics.
+ */
+static const char *ixp4xx_of_board_compat[] = {
+ "intel,ixp42x",
+ "intel,ixp43x",
+ "intel,ixp45x",
+ "intel,ixp46x",
+ NULL,
+};
+
+DT_MACHINE_START(IXP4XX_DT, "IXP4xx (Device Tree)")
+ .map_io = ixp4xx_of_map_io,
+ .dt_compat = ixp4xx_of_board_compat,
+MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/nas100d-pci.c b/arch/arm/mach-ixp4xx/nas100d-pci.c
index 8f0eba0a6800..925ef805f966 100644
--- a/arch/arm/mach-ixp4xx/nas100d-pci.c
+++ b/arch/arm/mach-ixp4xx/nas100d-pci.c
@@ -21,6 +21,8 @@
#include <asm/mach/pci.h>
#include <asm/mach-types.h>
+#include "irqs.h"
+
#define MAX_DEV 3
#define IRQ_LINES 3
diff --git a/arch/arm/mach-ixp4xx/nas100d-setup.c b/arch/arm/mach-ixp4xx/nas100d-setup.c
index 4138d6aa4c52..c142cfa8c5d6 100644
--- a/arch/arm/mach-ixp4xx/nas100d-setup.c
+++ b/arch/arm/mach-ixp4xx/nas100d-setup.c
@@ -34,6 +34,8 @@
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
+#include "irqs.h"
+
#define NAS100D_SDA_PIN 5
#define NAS100D_SCL_PIN 6
@@ -279,9 +281,6 @@ static void __init nas100d_init(void)
ixp4xx_sys_init();
- /* gpio 14 and 15 are _not_ clocks */
- *IXP4XX_GPIO_GPCLKR = 0;
-
nas100d_flash_resource.start = IXP4XX_EXP_BUS_BASE(0);
nas100d_flash_resource.end =
IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1;
diff --git a/arch/arm/mach-ixp4xx/nslu2-pci.c b/arch/arm/mach-ixp4xx/nslu2-pci.c
index 032defe111aa..d69ee4066d20 100644
--- a/arch/arm/mach-ixp4xx/nslu2-pci.c
+++ b/arch/arm/mach-ixp4xx/nslu2-pci.c
@@ -21,6 +21,8 @@
#include <asm/mach/pci.h>
#include <asm/mach-types.h>
+#include "irqs.h"
+
#define MAX_DEV 3
#define IRQ_LINES 3
diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c
index 341b263482ef..ee1877fcfafe 100644
--- a/arch/arm/mach-ixp4xx/nslu2-setup.c
+++ b/arch/arm/mach-ixp4xx/nslu2-setup.c
@@ -32,6 +32,8 @@
#include <asm/mach/flash.h>
#include <asm/mach/time.h>
+#include "irqs.h"
+
#define NSLU2_SDA_PIN 7
#define NSLU2_SCL_PIN 6
@@ -125,10 +127,18 @@ static struct platform_device nslu2_i2c_gpio = {
},
};
+static struct resource nslu2_beeper_resources[] = {
+ {
+ .start = IRQ_IXP4XX_TIMER2,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
static struct platform_device nslu2_beeper = {
.name = "ixp4xx-beeper",
.id = NSLU2_GPIO_BUZZ,
- .num_resources = 0,
+ .resource = nslu2_beeper_resources,
+ .num_resources = ARRAY_SIZE(nslu2_beeper_resources),
};
static struct resource nslu2_uart_resources[] = {
diff --git a/arch/arm/mach-ixp4xx/wg302v2-pci.c b/arch/arm/mach-ixp4xx/wg302v2-pci.c
index c92e5b82af36..cf83f7e24179 100644
--- a/arch/arm/mach-ixp4xx/wg302v2-pci.c
+++ b/arch/arm/mach-ixp4xx/wg302v2-pci.c
@@ -27,6 +27,8 @@
#include <asm/mach/pci.h>
+#include "irqs.h"
+
void __init wg302v2_pci_preinit(void)
{
irq_set_irq_type(IRQ_IXP4XX_GPIO8, IRQ_TYPE_LEVEL_LOW);
diff --git a/arch/arm/mach-ixp4xx/wg302v2-setup.c b/arch/arm/mach-ixp4xx/wg302v2-setup.c
index 90b3c604e8b6..8711e299229b 100644
--- a/arch/arm/mach-ixp4xx/wg302v2-setup.c
+++ b/arch/arm/mach-ixp4xx/wg302v2-setup.c
@@ -29,6 +29,8 @@
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
+#include "irqs.h"
+
static struct flash_platform_data wg302v2_flash_data = {
.map_name = "cfi_probe",
.width = 2,
diff --git a/arch/arm/mach-ks8695/include/mach/hardware.h b/arch/arm/mach-ks8695/include/mach/hardware.h
index 959c748ee8bb..877629b3d944 100644
--- a/arch/arm/mach-ks8695/include/mach/hardware.h
+++ b/arch/arm/mach-ks8695/include/mach/hardware.h
@@ -14,7 +14,7 @@
#ifndef __ASM_ARCH_HARDWARE_H
#define __ASM_ARCH_HARDWARE_H
-#include <asm/sizes.h>
+#include <linux/sizes.h>
/*
* Clocks are derived from MCLK, which is 25MHz
diff --git a/arch/arm/mach-lpc32xx/phy3250.c b/arch/arm/mach-lpc32xx/phy3250.c
index b3be60a8e467..66701bf43248 100644
--- a/arch/arm/mach-lpc32xx/phy3250.c
+++ b/arch/arm/mach-lpc32xx/phy3250.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Platform support for LPC32xx SoC
*
@@ -5,44 +6,14 @@
*
* Copyright (C) 2012 Roland Stigge <stigge@antcom.de>
* Copyright (C) 2010 NXP Semiconductors
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/dma-mapping.h>
-#include <linux/gpio.h>
-#include <linux/amba/bus.h>
-#include <linux/amba/clcd.h>
#include <linux/amba/pl08x.h>
-#include <linux/amba/mmci.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
-#include <linux/clk.h>
-#include <linux/mtd/lpc32xx_slc.h>
#include <linux/mtd/lpc32xx_mlc.h>
+#include <linux/mtd/lpc32xx_slc.h>
+#include <linux/of_platform.h>
-#include <asm/setup.h>
-#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-
-#include <mach/hardware.h>
-#include <mach/platform.h>
-#include <mach/board.h>
#include "common.h"
static struct pl08x_channel_data pl08x_slave_channels[] = {
@@ -90,8 +61,6 @@ static struct lpc32xx_mlc_platform_data lpc32xx_mlc_data = {
};
static const struct of_dev_auxdata lpc32xx_auxdata_lookup[] __initconst = {
- OF_DEV_AUXDATA("arm,pl022", 0x20084000, "dev:ssp0", NULL),
- OF_DEV_AUXDATA("arm,pl022", 0x2008C000, "dev:ssp1", NULL),
OF_DEV_AUXDATA("arm,pl080", 0x31000000, "pl08xdmac", &pl08x_pd),
OF_DEV_AUXDATA("nxp,lpc3220-slc", 0x20020000, "20020000.flash",
&lpc32xx_slc_data),
@@ -104,11 +73,6 @@ static void __init lpc3250_machine_init(void)
{
lpc32xx_serial_init();
- /* Test clock needed for UDA1380 initial init */
- __raw_writel(LPC32XX_CLKPWR_TESTCLK2_SEL_MOSC |
- LPC32XX_CLKPWR_TESTCLK_TESTCLK2_EN,
- LPC32XX_CLKPWR_TEST_CLK_SEL);
-
of_platform_default_populate(NULL, lpc32xx_auxdata_lookup, NULL);
}
diff --git a/arch/arm/mach-mediatek/mediatek.c b/arch/arm/mach-mediatek/mediatek.c
index b6a81ba1ce32..5a9c016b3c6c 100644
--- a/arch/arm/mach-mediatek/mediatek.c
+++ b/arch/arm/mach-mediatek/mediatek.c
@@ -15,6 +15,7 @@
* GNU General Public License for more details.
*/
#include <linux/init.h>
+#include <linux/io.h>
#include <asm/mach/arch.h>
#include <linux/of.h>
#include <linux/clk-provider.h>
diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
index f72e1e9f5fc5..dd762d1b083f 100644
--- a/arch/arm/mach-mv78xx0/common.c
+++ b/arch/arm/mach-mv78xx0/common.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <linux/ata_platform.h>
diff --git a/arch/arm/mach-mvebu/board-v7.c b/arch/arm/mach-mvebu/board-v7.c
index 0b10acd7d1b9..d2df5ef9382b 100644
--- a/arch/arm/mach-mvebu/board-v7.c
+++ b/arch/arm/mach-mvebu/board-v7.c
@@ -136,7 +136,6 @@ static void __init i2c_quirk(void)
of_update_property(np, new_compat);
}
- return;
}
static void __init mvebu_dt_init(void)
diff --git a/arch/arm/mach-mvebu/coherency_ll.S b/arch/arm/mach-mvebu/coherency_ll.S
index 8b2fbc8b6bc6..2d962fe48821 100644
--- a/arch/arm/mach-mvebu/coherency_ll.S
+++ b/arch/arm/mach-mvebu/coherency_ll.S
@@ -66,7 +66,7 @@ ENDPROC(ll_get_coherency_base)
* fabric registers
*/
ENTRY(ll_get_coherency_cpumask)
- mrc 15, 0, r3, cr0, cr0, 5
+ mrc p15, 0, r3, cr0, cr0, 5
and r3, r3, #15
mov r2, #(1 << 24)
lsl r3, r2, r3
diff --git a/arch/arm/mach-mvebu/kirkwood.c b/arch/arm/mach-mvebu/kirkwood.c
index 0aa88105d46e..ceaad6d5927e 100644
--- a/arch/arm/mach-mvebu/kirkwood.c
+++ b/arch/arm/mach-mvebu/kirkwood.c
@@ -92,7 +92,8 @@ static void __init kirkwood_dt_eth_fixup(void)
continue;
/* skip disabled nodes or nodes with valid MAC address*/
- if (!of_device_is_available(pnp) || of_get_mac_address(np))
+ if (!of_device_is_available(pnp) ||
+ !IS_ERR(of_get_mac_address(np)))
goto eth_fixup_skip;
clk = of_clk_get(pnp, 0);
@@ -107,8 +108,6 @@ static void __init kirkwood_dt_eth_fixup(void)
clk_prepare_enable(clk);
/* store MAC address register contents in local-mac-address */
- pr_err(FW_INFO "%pOF: local-mac-address is not set\n", np);
-
pmac = kzalloc(sizeof(*pmac) + 6, GFP_KERNEL);
if (!pmac)
goto eth_fixup_no_mem;
diff --git a/arch/arm/mach-mvebu/pm-board.c b/arch/arm/mach-mvebu/pm-board.c
index db17121d7d63..070552511699 100644
--- a/arch/arm/mach-mvebu/pm-board.c
+++ b/arch/arm/mach-mvebu/pm-board.c
@@ -79,7 +79,7 @@ static void mvebu_armada_pm_enter(void __iomem *sdram_reg, u32 srcmd)
static int __init mvebu_armada_pm_init(void)
{
struct device_node *np;
- struct device_node *gpio_ctrl_np;
+ struct device_node *gpio_ctrl_np = NULL;
int ret = 0, i;
if (!of_machine_is_compatible("marvell,axp-gp"))
@@ -126,18 +126,23 @@ static int __init mvebu_armada_pm_init(void)
goto out;
}
+ if (gpio_ctrl_np)
+ of_node_put(gpio_ctrl_np);
gpio_ctrl_np = args.np;
pic_raw_gpios[i] = args.args[0];
}
gpio_ctrl = of_iomap(gpio_ctrl_np, 0);
- if (!gpio_ctrl)
- return -ENOMEM;
+ if (!gpio_ctrl) {
+ ret = -ENOMEM;
+ goto out;
+ }
mvebu_pm_suspend_init(mvebu_armada_pm_enter);
out:
of_node_put(np);
+ of_node_put(gpio_ctrl_np);
return ret;
}
diff --git a/arch/arm/mach-mvebu/pmsu_ll.S b/arch/arm/mach-mvebu/pmsu_ll.S
index 88651221dbdd..7aae9a25cfeb 100644
--- a/arch/arm/mach-mvebu/pmsu_ll.S
+++ b/arch/arm/mach-mvebu/pmsu_ll.S
@@ -16,7 +16,7 @@
ENTRY(armada_38x_scu_power_up)
mrc p15, 4, r1, c15, c0 @ get SCU base address
orr r1, r1, #0x8 @ SCU CPU Power Status Register
- mrc 15, 0, r0, cr0, cr0, 5 @ get the CPU ID
+ mrc p15, 0, r0, cr0, cr0, 5 @ get the CPU ID
and r0, r0, #15
add r1, r1, r0
mov r0, #0x0
@@ -56,7 +56,6 @@ ENDPROC(armada_38x_cpu_resume)
/* The following code will be executed from SRAM */
ENTRY(mvebu_boot_wa_start)
-mvebu_boot_wa_start:
ARM_BE8(setend be)
adr r0, 1f
ldr r0, [r0] @ load the address of the
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 1b15d593837e..b6e814166ee0 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -749,7 +749,7 @@ static void __init ams_delta_init(void)
ARRAY_SIZE(ams_delta_gpio_tables));
leds_pdev = gpio_led_register_device(PLATFORM_DEVID_NONE, &leds_pdata);
- if (!IS_ERR(leds_pdev)) {
+ if (!IS_ERR_OR_NULL(leds_pdev)) {
leds_gpio_table.dev_id = dev_name(&leds_pdev->dev);
gpiod_add_lookup_table(&leds_gpio_table);
}
diff --git a/arch/arm/mach-omap1/include/mach/hardware.h b/arch/arm/mach-omap1/include/mach/hardware.h
index 5875a5098d35..e7c8ac7d83e3 100644
--- a/arch/arm/mach-omap1/include/mach/hardware.h
+++ b/arch/arm/mach-omap1/include/mach/hardware.h
@@ -36,7 +36,7 @@
#ifndef __ASM_ARCH_OMAP_HARDWARE_H
#define __ASM_ARCH_OMAP_HARDWARE_H
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#ifndef __ASSEMBLER__
#include <asm/types.h>
#include <mach/soc.h>
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index 42881f21cede..3e0f09cc0028 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -119,6 +119,9 @@ void __init ti_clk_init_features(void)
if (cpu_is_omap343x())
features.flags |= TI_CLK_DPLL_HAS_FREQSEL;
+ if (omap_type() == OMAP2_DEVICE_TYPE_GP)
+ features.flags |= TI_CLK_DEVICE_TYPE_GP;
+
/* Idlest value for interface clocks.
* 24xx uses 0 to indicate not ready, and 1 to indicate ready.
* 34xx reverses this, just to keep us on our toes
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 129455e822e4..6316da3623b3 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -336,6 +336,15 @@ static inline void omap5_secondary_hyp_startup(void)
}
#endif
+#ifdef CONFIG_SOC_DRA7XX
+extern int dra7xx_pciess_reset(struct omap_hwmod *oh);
+#else
+static inline int dra7xx_pciess_reset(struct omap_hwmod *oh)
+{
+ return 0;
+}
+#endif
+
void pdata_quirks_init(const struct of_device_id *);
void omap_auxdata_legacy_init(struct device *dev);
void omap_pcs_legacy_init(int irq, void (*rearm)(void));
diff --git a/arch/arm/mach-omap2/i2c.c b/arch/arm/mach-omap2/i2c.c
index 37ff25ee3d89..1d8efc303daf 100644
--- a/arch/arm/mach-omap2/i2c.c
+++ b/arch/arm/mach-omap2/i2c.c
@@ -53,15 +53,10 @@ int omap_i2c_reset(struct omap_hwmod *oh)
u16 i2c_con;
int c = 0;
- if (oh->class->rev == OMAP_I2C_IP_VERSION_2) {
- i2c_con = OMAP4_I2C_CON_OFFSET;
- } else if (oh->class->rev == OMAP_I2C_IP_VERSION_1) {
+ if (soc_is_omap24xx() || soc_is_omap34xx() || soc_is_am35xx())
i2c_con = OMAP2_I2C_CON_OFFSET;
- } else {
- WARN(1, "Cannot reset I2C block %s: unsupported revision\n",
- oh->name);
- return -EINVAL;
- }
+ else
+ i2c_con = OMAP4_I2C_CON_OFFSET;
/* Disable I2C */
v = omap_hwmod_read(oh, i2c_con);
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index bb8e0bb7ef5d..5e69c8caa1db 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -411,14 +411,9 @@ static int _set_hwmod_postsetup_state(struct omap_hwmod *oh, void *data)
static void __init __maybe_unused omap_hwmod_init_postsetup(void)
{
- u8 postsetup_state;
+ u8 postsetup_state = _HWMOD_STATE_DEFAULT;
/* Set the default postsetup state for all hwmods */
-#ifdef CONFIG_PM
- postsetup_state = _HWMOD_STATE_IDLE;
-#else
- postsetup_state = _HWMOD_STATE_ENABLED;
-#endif
omap_hwmod_for_each(_set_hwmod_postsetup_state, &postsetup_state);
}
diff --git a/arch/arm/mach-omap2/mmc.h b/arch/arm/mach-omap2/mmc.h
index 9145a6f720fc..7f4e053c3434 100644
--- a/arch/arm/mach-omap2/mmc.h
+++ b/arch/arm/mach-omap2/mmc.h
@@ -7,7 +7,15 @@
#define OMAP4_MMC_REG_OFFSET 0x100
struct omap_hwmod;
+
+#ifdef CONFIG_SOC_OMAP2420
int omap_msdi_reset(struct omap_hwmod *oh);
+#else
+static inline int omap_msdi_reset(struct omap_hwmod *oh)
+{
+ return 0;
+}
+#endif
/* called from board-specific card detection service routine */
extern void omap_mmc_notify_cover_event(struct device *dev, int slot,
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index 17558be4bf0a..7dcbe1736f7e 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -436,13 +436,13 @@ static int irq_notifier(struct notifier_block *self, unsigned long cmd, void *v)
{
switch (cmd) {
case CPU_CLUSTER_PM_ENTER:
- if (omap_type() == OMAP2_DEVICE_TYPE_GP)
+ if (omap_type() == OMAP2_DEVICE_TYPE_GP || soc_is_am43xx())
irq_save_context();
else
irq_save_secure_context();
break;
case CPU_CLUSTER_PM_EXIT:
- if (omap_type() == OMAP2_DEVICE_TYPE_GP)
+ if (omap_type() == OMAP2_DEVICE_TYPE_GP || soc_is_am43xx())
irq_restore_context();
break;
}
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 3a04c73ac03c..405ac24def05 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -155,6 +155,8 @@
#include "soc.h"
#include "common.h"
#include "clockdomain.h"
+#include "hdq1w.h"
+#include "mmc.h"
#include "powerdomain.h"
#include "cm2xxx.h"
#include "cm3xxx.h"
@@ -165,6 +167,7 @@
#include "prm33xx.h"
#include "prminst44xx.h"
#include "pm.h"
+#include "wd_timer.h"
/* Name of the OMAP hwmod for the MPU */
#define MPU_INITIATOR_NAME "mpu"
@@ -205,6 +208,20 @@ struct clkctrl_provider {
static LIST_HEAD(clkctrl_providers);
/**
+ * struct omap_hwmod_reset - IP specific reset functions
+ * @match: string to match against the module name
+ * @len: number of characters to match
+ * @reset: IP specific reset function
+ *
+ * Used only in cases where struct omap_hwmod is dynamically allocated.
+ */
+struct omap_hwmod_reset {
+ const char *match;
+ int len;
+ int (*reset)(struct omap_hwmod *oh);
+};
+
+/**
* struct omap_hwmod_soc_ops - fn ptrs for some SoC-specific operations
* @enable_module: function to enable a module (via MODULEMODE)
* @disable_module: function to disable a module (via MODULEMODE)
@@ -235,6 +252,7 @@ static struct omap_hwmod_soc_ops soc_ops;
/* omap_hwmod_list contains all registered struct omap_hwmods */
static LIST_HEAD(omap_hwmod_list);
+static DEFINE_MUTEX(list_lock);
/* mpu_oh: used to add/remove MPU initiator from sleepdep list */
static struct omap_hwmod *mpu_oh;
@@ -648,10 +666,10 @@ static struct clockdomain *_get_clkdm(struct omap_hwmod *oh)
if (oh->clkdm) {
return oh->clkdm;
} else if (oh->_clk) {
- if (__clk_get_flags(oh->_clk) & CLK_IS_BASIC)
+ if (!omap2_clk_is_hw_omap(__clk_get_hw(oh->_clk)))
return NULL;
clk = to_clk_hw_omap(__clk_get_hw(oh->_clk));
- return clk->clkdm;
+ return clk->clkdm;
}
return NULL;
}
@@ -2465,7 +2483,7 @@ static void _setup_iclk_autoidle(struct omap_hwmod *oh)
*/
static int _setup_reset(struct omap_hwmod *oh)
{
- int r;
+ int r = 0;
if (oh->_state != _HWMOD_STATE_INITIALIZED)
return -EINVAL;
@@ -2624,7 +2642,7 @@ static int _setup(struct omap_hwmod *oh, void *data)
* that the copy process would be relatively complex due to the large number
* of substructures.
*/
-static int __init _register(struct omap_hwmod *oh)
+static int _register(struct omap_hwmod *oh)
{
if (!oh || !oh->name || !oh->class || !oh->class->name ||
(oh->_state != _HWMOD_STATE_UNKNOWN))
@@ -2663,7 +2681,7 @@ static int __init _register(struct omap_hwmod *oh)
* locking in this code. Changes to this assumption will require
* additional locking. Returns 0.
*/
-static int __init _add_link(struct omap_hwmod_ocp_if *oi)
+static int _add_link(struct omap_hwmod_ocp_if *oi)
{
pr_debug("omap_hwmod: %s -> %s: adding link\n", oi->master->name,
oi->slave->name);
@@ -3241,9 +3259,10 @@ static int omap_hwmod_init_regbits(struct device *dev,
* @sysc_offs: sysc register offset
* @syss_offs: syss register offset
*/
-int omap_hwmod_init_reg_offs(struct device *dev,
- const struct ti_sysc_module_data *data,
- s32 *rev_offs, s32 *sysc_offs, s32 *syss_offs)
+static int omap_hwmod_init_reg_offs(struct device *dev,
+ const struct ti_sysc_module_data *data,
+ s32 *rev_offs, s32 *sysc_offs,
+ s32 *syss_offs)
{
*rev_offs = -ENODEV;
*sysc_offs = 0;
@@ -3267,9 +3286,9 @@ int omap_hwmod_init_reg_offs(struct device *dev,
* @data: module data
* @sysc_flags: module configuration
*/
-int omap_hwmod_init_sysc_flags(struct device *dev,
- const struct ti_sysc_module_data *data,
- u32 *sysc_flags)
+static int omap_hwmod_init_sysc_flags(struct device *dev,
+ const struct ti_sysc_module_data *data,
+ u32 *sysc_flags)
{
*sysc_flags = 0;
@@ -3341,9 +3360,9 @@ int omap_hwmod_init_sysc_flags(struct device *dev,
* @data: module data
* @idlemodes: module supported idle modes
*/
-int omap_hwmod_init_idlemodes(struct device *dev,
- const struct ti_sysc_module_data *data,
- u32 *idlemodes)
+static int omap_hwmod_init_idlemodes(struct device *dev,
+ const struct ti_sysc_module_data *data,
+ u32 *idlemodes)
{
*idlemodes = 0;
@@ -3434,14 +3453,18 @@ static int omap_hwmod_check_module(struct device *dev,
*
* Note that the allocations here cannot use devm as ti-sysc can rebind.
*/
-int omap_hwmod_allocate_module(struct device *dev, struct omap_hwmod *oh,
- const struct ti_sysc_module_data *data,
- struct sysc_regbits *sysc_fields,
- s32 rev_offs, s32 sysc_offs, s32 syss_offs,
- u32 sysc_flags, u32 idlemodes)
+static int omap_hwmod_allocate_module(struct device *dev, struct omap_hwmod *oh,
+ const struct ti_sysc_module_data *data,
+ struct sysc_regbits *sysc_fields,
+ s32 rev_offs, s32 sysc_offs,
+ s32 syss_offs, u32 sysc_flags,
+ u32 idlemodes)
{
struct omap_hwmod_class_sysconfig *sysc;
- struct omap_hwmod_class *class;
+ struct omap_hwmod_class *class = NULL;
+ struct omap_hwmod_ocp_if *oi = NULL;
+ struct clockdomain *clkdm = NULL;
+ struct clk *clk = NULL;
void __iomem *regs = NULL;
unsigned long flags;
@@ -3465,26 +3488,128 @@ int omap_hwmod_allocate_module(struct device *dev, struct omap_hwmod *oh,
}
/*
- * We need new oh->class as the other devices in the same class
+ * We may need a new oh->class as the other devices in the same class
* may not yet have ioremapped their registers.
*/
- class = kmemdup(oh->class, sizeof(*oh->class), GFP_KERNEL);
- if (!class)
- return -ENOMEM;
+ if (oh->class->name && strcmp(oh->class->name, data->name)) {
+ class = kmemdup(oh->class, sizeof(*oh->class), GFP_KERNEL);
+ if (!class)
+ return -ENOMEM;
+ }
- class->sysc = sysc;
+ if (list_empty(&oh->slave_ports)) {
+ oi = kcalloc(1, sizeof(*oi), GFP_KERNEL);
+ if (!oi)
+ return -ENOMEM;
+
+ /*
+ * Note that we assume interconnect interface clocks will be
+ * managed by the interconnect driver for OCPIF_SWSUP_IDLE case
+ * on omap24xx and omap3.
+ */
+ oi->slave = oh;
+ oi->user = OCP_USER_MPU | OCP_USER_SDMA;
+ }
+
+ if (!oh->_clk) {
+ struct clk_hw_omap *hwclk;
+
+ clk = of_clk_get_by_name(dev->of_node, "fck");
+ if (!IS_ERR(clk))
+ clk_prepare(clk);
+ else
+ clk = NULL;
+
+ /*
+ * Populate clockdomain based on dts clock. It is needed for
+ * clkdm_deny_idle() and clkdm_allow_idle() until we have have
+ * interconnect driver and reset driver capable of blocking
+ * clockdomain idle during reset, enable and idle.
+ */
+ if (clk) {
+ hwclk = to_clk_hw_omap(__clk_get_hw(clk));
+ if (hwclk && hwclk->clkdm_name)
+ clkdm = clkdm_lookup(hwclk->clkdm_name);
+ }
+
+ /*
+ * Note that we assume interconnect driver manages the clocks
+ * and do not need to populate oh->_clk for dynamically
+ * allocated modules.
+ */
+ clk_unprepare(clk);
+ clk_put(clk);
+ }
spin_lock_irqsave(&oh->_lock, flags);
if (regs)
oh->_mpu_rt_va = regs;
- oh->class = class;
+ if (class)
+ oh->class = class;
+ oh->class->sysc = sysc;
+ if (oi)
+ _add_link(oi);
+ if (clkdm)
+ oh->clkdm = clkdm;
oh->_state = _HWMOD_STATE_INITIALIZED;
+ oh->_postsetup_state = _HWMOD_STATE_DEFAULT;
_setup(oh, NULL);
spin_unlock_irqrestore(&oh->_lock, flags);
return 0;
}
+static const struct omap_hwmod_reset omap24xx_reset_quirks[] = {
+ { .match = "msdi", .len = 4, .reset = omap_msdi_reset, },
+};
+
+static const struct omap_hwmod_reset dra7_reset_quirks[] = {
+ { .match = "pcie", .len = 4, .reset = dra7xx_pciess_reset, },
+};
+
+static const struct omap_hwmod_reset omap_reset_quirks[] = {
+ { .match = "dss", .len = 3, .reset = omap_dss_reset, },
+ { .match = "hdq1w", .len = 5, .reset = omap_hdq1w_reset, },
+ { .match = "i2c", .len = 3, .reset = omap_i2c_reset, },
+ { .match = "wd_timer", .len = 8, .reset = omap2_wd_timer_reset, },
+};
+
+static void
+omap_hwmod_init_reset_quirk(struct device *dev, struct omap_hwmod *oh,
+ const struct ti_sysc_module_data *data,
+ const struct omap_hwmod_reset *quirks,
+ int quirks_sz)
+{
+ const struct omap_hwmod_reset *quirk;
+ int i;
+
+ for (i = 0; i < quirks_sz; i++) {
+ quirk = &quirks[i];
+ if (!strncmp(data->name, quirk->match, quirk->len)) {
+ oh->class->reset = quirk->reset;
+
+ return;
+ }
+ }
+}
+
+static void
+omap_hwmod_init_reset_quirks(struct device *dev, struct omap_hwmod *oh,
+ const struct ti_sysc_module_data *data)
+{
+ if (soc_is_omap24xx())
+ omap_hwmod_init_reset_quirk(dev, oh, data,
+ omap24xx_reset_quirks,
+ ARRAY_SIZE(omap24xx_reset_quirks));
+
+ if (soc_is_dra7xx())
+ omap_hwmod_init_reset_quirk(dev, oh, data, dra7_reset_quirks,
+ ARRAY_SIZE(dra7_reset_quirks));
+
+ omap_hwmod_init_reset_quirk(dev, oh, data, omap_reset_quirks,
+ ARRAY_SIZE(omap_reset_quirks));
+}
+
/**
* omap_hwmod_init_module - initialize new module
* @dev: struct device
@@ -3505,8 +3630,31 @@ int omap_hwmod_init_module(struct device *dev,
return -EINVAL;
oh = _lookup(data->name);
- if (!oh)
- return -ENODEV;
+ if (!oh) {
+ oh = kzalloc(sizeof(*oh), GFP_KERNEL);
+ if (!oh)
+ return -ENOMEM;
+
+ oh->name = data->name;
+ oh->_state = _HWMOD_STATE_UNKNOWN;
+ lockdep_register_key(&oh->hwmod_key);
+
+ /* Unused, can be handled by PRM driver handling resets */
+ oh->prcm.omap4.flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT;
+
+ oh->class = kzalloc(sizeof(*oh->class), GFP_KERNEL);
+ if (!oh->class) {
+ kfree(oh);
+ return -ENOMEM;
+ }
+
+ omap_hwmod_init_reset_quirks(dev, oh, data);
+
+ oh->class->name = data->name;
+ mutex_lock(&list_lock);
+ error = _register(oh);
+ mutex_unlock(&list_lock);
+ }
cookie->data = oh;
@@ -3527,10 +3675,20 @@ int omap_hwmod_init_module(struct device *dev,
if (error)
return error;
+ if (data->cfg->quirks & SYSC_QUIRK_NO_IDLE)
+ oh->flags |= HWMOD_NO_IDLE;
if (data->cfg->quirks & SYSC_QUIRK_NO_IDLE_ON_INIT)
oh->flags |= HWMOD_INIT_NO_IDLE;
if (data->cfg->quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
oh->flags |= HWMOD_INIT_NO_RESET;
+ if (data->cfg->quirks & SYSC_QUIRK_USE_CLOCKACT)
+ oh->flags |= HWMOD_SET_DEFAULT_CLOCKACT;
+ if (data->cfg->quirks & SYSC_QUIRK_SWSUP_SIDLE)
+ oh->flags |= HWMOD_SWSUP_SIDLE;
+ if (data->cfg->quirks & SYSC_QUIRK_SWSUP_SIDLE_ACT)
+ oh->flags |= HWMOD_SWSUP_SIDLE_ACT;
+ if (data->cfg->quirks & SYSC_QUIRK_SWSUP_MSTANDBY)
+ oh->flags |= HWMOD_SWSUP_MSTANDBY;
error = omap_hwmod_check_module(dev, oh, data, sysc_fields,
rev_offs, sysc_offs, syss_offs,
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index b70cdc21f8a2..fca9e072154b 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -493,11 +493,16 @@ struct omap_hwmod_omap4_prcm {
#define _HWMOD_STATE_IDLE 5
#define _HWMOD_STATE_DISABLED 6
+#ifdef CONFIG_PM
+#define _HWMOD_STATE_DEFAULT _HWMOD_STATE_IDLE
+#else
+#define _HWMOD_STATE_DEFAULT _HWMOD_STATE_ENABLED
+#endif
+
/**
* struct omap_hwmod_class - the type of an IP block
* @name: name of the hwmod_class
* @sysc: device SYSCONFIG/SYSSTATUS register data
- * @rev: revision of the IP class
* @pre_shutdown: ptr to fn to be executed immediately prior to device shutdown
* @reset: ptr to fn to be executed in place of the standard hwmod reset fn
* @enable_preprogram: ptr to fn to be executed during device enable
@@ -523,7 +528,6 @@ struct omap_hwmod_omap4_prcm {
struct omap_hwmod_class {
const char *name;
struct omap_hwmod_class_sysconfig *sysc;
- u32 rev;
int (*pre_shutdown)(struct omap_hwmod *oh);
int (*reset)(struct omap_hwmod *oh);
int (*enable_preprogram)(struct omap_hwmod *oh);
diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
index d684fac8f592..8122c8d4b69a 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
@@ -91,7 +91,6 @@ static struct omap_hwmod_class_sysconfig i2c_sysc = {
static struct omap_hwmod_class i2c_class = {
.name = "i2c",
.sysc = &i2c_sysc,
- .rev = OMAP_I2C_IP_VERSION_1,
.reset = &omap_i2c_reset,
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
index abef9f6f9bf5..f27cb60bde77 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
@@ -68,7 +68,6 @@ static struct omap_hwmod_class_sysconfig i2c_sysc = {
static struct omap_hwmod_class i2c_class = {
.name = "i2c",
.sysc = &i2c_sysc,
- .rev = OMAP_I2C_IP_VERSION_1,
.reset = &omap_i2c_reset,
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
index 9b30b6b471ae..e19f620c4074 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_interconnect_data.c
@@ -11,7 +11,7 @@
* XXX handle crossbar/shared link difference for L3?
* XXX these should be marked initdata for multi-OMAP kernels
*/
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include "omap_hwmod.h"
#include "l3_2xxx.h"
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
index 5345919a81f8..ed5f39d948de 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -96,7 +96,6 @@ static struct omap_hwmod_class_sysconfig omap2xxx_gpio_sysc = {
struct omap_hwmod_class omap2xxx_gpio_hwmod_class = {
.name = "gpio",
.sysc = &omap2xxx_gpio_sysc,
- .rev = 0,
};
/* system dma */
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h
index 6f81d7a4fec1..aaa6092426ea 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_common_data.h
@@ -30,24 +30,16 @@ extern struct omap_hwmod_ocp_if am33xx_l3_main__gfx;
extern struct omap_hwmod_ocp_if am33xx_l4_wkup__rtc;
extern struct omap_hwmod_ocp_if am33xx_l4_per__dcan0;
extern struct omap_hwmod_ocp_if am33xx_l4_per__dcan1;
-extern struct omap_hwmod_ocp_if am33xx_l4_per__gpio1;
-extern struct omap_hwmod_ocp_if am33xx_l4_per__gpio2;
-extern struct omap_hwmod_ocp_if am33xx_l4_per__gpio3;
extern struct omap_hwmod_ocp_if am33xx_cpgmac0__mdio;
extern struct omap_hwmod_ocp_if am33xx_l4_ls__elm;
extern struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss0;
extern struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss1;
extern struct omap_hwmod_ocp_if am33xx_l4_ls__epwmss2;
extern struct omap_hwmod_ocp_if am33xx_l3_s__gpmc;
-extern struct omap_hwmod_ocp_if am33xx_l4_per__i2c2;
-extern struct omap_hwmod_ocp_if am33xx_l4_per__i2c3;
extern struct omap_hwmod_ocp_if am33xx_l4_per__mailbox;
extern struct omap_hwmod_ocp_if am33xx_l4_ls__spinlock;
extern struct omap_hwmod_ocp_if am33xx_l4_ls__mcasp0;
extern struct omap_hwmod_ocp_if am33xx_l4_ls__mcasp1;
-extern struct omap_hwmod_ocp_if am33xx_l4_ls__mmc0;
-extern struct omap_hwmod_ocp_if am33xx_l4_ls__mmc1;
-extern struct omap_hwmod_ocp_if am33xx_l3_s__mmc2;
extern struct omap_hwmod_ocp_if am33xx_l4_ls__mcspi0;
extern struct omap_hwmod_ocp_if am33xx_l4_ls__mcspi1;
extern struct omap_hwmod_ocp_if am33xx_l4_ls__timer2;
@@ -60,11 +52,6 @@ extern struct omap_hwmod_ocp_if am33xx_l3_main__tpcc;
extern struct omap_hwmod_ocp_if am33xx_l3_main__tptc0;
extern struct omap_hwmod_ocp_if am33xx_l3_main__tptc1;
extern struct omap_hwmod_ocp_if am33xx_l3_main__tptc2;
-extern struct omap_hwmod_ocp_if am33xx_l4_ls__uart2;
-extern struct omap_hwmod_ocp_if am33xx_l4_ls__uart3;
-extern struct omap_hwmod_ocp_if am33xx_l4_ls__uart4;
-extern struct omap_hwmod_ocp_if am33xx_l4_ls__uart5;
-extern struct omap_hwmod_ocp_if am33xx_l4_ls__uart6;
extern struct omap_hwmod_ocp_if am33xx_l3_main__ocmc;
extern struct omap_hwmod_ocp_if am33xx_l3_main__sha0;
extern struct omap_hwmod_ocp_if am33xx_l3_main__aes0;
@@ -93,19 +80,10 @@ extern struct omap_hwmod am33xx_elm_hwmod;
extern struct omap_hwmod am33xx_epwmss0_hwmod;
extern struct omap_hwmod am33xx_epwmss1_hwmod;
extern struct omap_hwmod am33xx_epwmss2_hwmod;
-extern struct omap_hwmod am33xx_gpio1_hwmod;
-extern struct omap_hwmod am33xx_gpio2_hwmod;
-extern struct omap_hwmod am33xx_gpio3_hwmod;
extern struct omap_hwmod am33xx_gpmc_hwmod;
-extern struct omap_hwmod am33xx_i2c1_hwmod;
-extern struct omap_hwmod am33xx_i2c2_hwmod;
-extern struct omap_hwmod am33xx_i2c3_hwmod;
extern struct omap_hwmod am33xx_mailbox_hwmod;
extern struct omap_hwmod am33xx_mcasp0_hwmod;
extern struct omap_hwmod am33xx_mcasp1_hwmod;
-extern struct omap_hwmod am33xx_mmc0_hwmod;
-extern struct omap_hwmod am33xx_mmc1_hwmod;
-extern struct omap_hwmod am33xx_mmc2_hwmod;
extern struct omap_hwmod am33xx_rtc_hwmod;
extern struct omap_hwmod am33xx_spi0_hwmod;
extern struct omap_hwmod am33xx_spi1_hwmod;
@@ -121,19 +99,12 @@ extern struct omap_hwmod am33xx_tpcc_hwmod;
extern struct omap_hwmod am33xx_tptc0_hwmod;
extern struct omap_hwmod am33xx_tptc1_hwmod;
extern struct omap_hwmod am33xx_tptc2_hwmod;
-extern struct omap_hwmod am33xx_uart1_hwmod;
-extern struct omap_hwmod am33xx_uart2_hwmod;
-extern struct omap_hwmod am33xx_uart3_hwmod;
-extern struct omap_hwmod am33xx_uart4_hwmod;
-extern struct omap_hwmod am33xx_uart5_hwmod;
-extern struct omap_hwmod am33xx_uart6_hwmod;
extern struct omap_hwmod am33xx_wd_timer1_hwmod;
extern struct omap_hwmod_class am33xx_emif_hwmod_class;
extern struct omap_hwmod_class am33xx_l4_hwmod_class;
extern struct omap_hwmod_class am33xx_wkup_m3_hwmod_class;
extern struct omap_hwmod_class am33xx_control_hwmod_class;
-extern struct omap_hwmod_class am33xx_gpio_hwmod_class;
extern struct omap_hwmod_class am33xx_timer_hwmod_class;
extern struct omap_hwmod_class am33xx_epwmss_hwmod_class;
extern struct omap_hwmod_class am33xx_ehrpwm_hwmod_class;
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c
index e0001232bb4f..47a0e301b193 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_interconnect_data.c
@@ -122,30 +122,6 @@ struct omap_hwmod_ocp_if am33xx_l4_per__dcan1 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4 per/ls -> GPIO2 */
-struct omap_hwmod_ocp_if am33xx_l4_per__gpio1 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_gpio1_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4 per/ls -> gpio3 */
-struct omap_hwmod_ocp_if am33xx_l4_per__gpio2 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_gpio2_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4 per/ls -> gpio4 */
-struct omap_hwmod_ocp_if am33xx_l4_per__gpio3 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_gpio3_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
struct omap_hwmod_ocp_if am33xx_cpgmac0__mdio = {
.master = &am33xx_cpgmac0_hwmod,
.slave = &am33xx_mdio_hwmod,
@@ -188,21 +164,6 @@ struct omap_hwmod_ocp_if am33xx_l3_s__gpmc = {
.user = OCP_USER_MPU,
};
-/* i2c2 */
-struct omap_hwmod_ocp_if am33xx_l4_per__i2c2 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_i2c2_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-struct omap_hwmod_ocp_if am33xx_l4_per__i2c3 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_i2c3_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
/* l4 ls -> mailbox */
struct omap_hwmod_ocp_if am33xx_l4_per__mailbox = {
.master = &am33xx_l4_ls_hwmod,
@@ -235,30 +196,6 @@ struct omap_hwmod_ocp_if am33xx_l4_ls__mcasp1 = {
.user = OCP_USER_MPU,
};
-/* l4 ls -> mmc0 */
-struct omap_hwmod_ocp_if am33xx_l4_ls__mmc0 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_mmc0_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-/* l4 ls -> mmc1 */
-struct omap_hwmod_ocp_if am33xx_l4_ls__mmc1 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_mmc1_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-/* l3 s -> mmc2 */
-struct omap_hwmod_ocp_if am33xx_l3_s__mmc2 = {
- .master = &am33xx_l3_s_hwmod,
- .slave = &am33xx_mmc2_hwmod,
- .clk = "l3s_gclk",
- .user = OCP_USER_MPU,
-};
-
/* l4 ls -> mcspi0 */
struct omap_hwmod_ocp_if am33xx_l4_ls__mcspi0 = {
.master = &am33xx_l4_ls_hwmod,
@@ -355,46 +292,6 @@ struct omap_hwmod_ocp_if am33xx_l3_main__tptc2 = {
.user = OCP_USER_MPU,
};
-/* l4 ls -> uart2 */
-struct omap_hwmod_ocp_if am33xx_l4_ls__uart2 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_uart2_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-/* l4 ls -> uart3 */
-struct omap_hwmod_ocp_if am33xx_l4_ls__uart3 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_uart3_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-/* l4 ls -> uart4 */
-struct omap_hwmod_ocp_if am33xx_l4_ls__uart4 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_uart4_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-/* l4 ls -> uart5 */
-struct omap_hwmod_ocp_if am33xx_l4_ls__uart5 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_uart5_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
-/* l4 ls -> uart6 */
-struct omap_hwmod_ocp_if am33xx_l4_ls__uart6 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am33xx_uart6_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU,
-};
-
/* l3 main -> ocmc */
struct omap_hwmod_ocp_if am33xx_l3_main__ocmc = {
.master = &am33xx_l3_main_hwmod,
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
index 9ded7bf972e7..4c3543bae562 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
@@ -16,9 +16,7 @@
#include <linux/types.h>
-#include <linux/platform_data/hsmmc-omap.h>
#include "omap_hwmod.h"
-#include "i2c.h"
#include "wd_timer.h"
#include "cm33xx.h"
#include "prm33xx.h"
@@ -534,7 +532,6 @@ static struct omap_hwmod_class_sysconfig am33xx_gpio_sysc = {
struct omap_hwmod_class am33xx_gpio_hwmod_class = {
.name = "gpio",
.sysc = &am33xx_gpio_sysc,
- .rev = 2,
};
/* gpio1 */
@@ -627,68 +624,6 @@ struct omap_hwmod am33xx_gpmc_hwmod = {
},
};
-/* 'i2c' class */
-static struct omap_hwmod_class_sysconfig am33xx_i2c_sysc = {
- .rev_offs = 0,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0090,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
- SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class i2c_class = {
- .name = "i2c",
- .sysc = &am33xx_i2c_sysc,
- .rev = OMAP_I2C_IP_VERSION_2,
- .reset = &omap_i2c_reset,
-};
-
-/* i2c1 */
-struct omap_hwmod am33xx_i2c1_hwmod = {
- .name = "i2c1",
- .class = &i2c_class,
- .clkdm_name = "l4_wkup_clkdm",
- .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
- .main_clk = "dpll_per_m2_div4_wkupdm_ck",
- .prcm = {
- .omap4 = {
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* i2c1 */
-struct omap_hwmod am33xx_i2c2_hwmod = {
- .name = "i2c2",
- .class = &i2c_class,
- .clkdm_name = "l4ls_clkdm",
- .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
- .main_clk = "dpll_per_m2_div4_ck",
- .prcm = {
- .omap4 = {
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* i2c3 */
-struct omap_hwmod am33xx_i2c3_hwmod = {
- .name = "i2c3",
- .class = &i2c_class,
- .clkdm_name = "l4ls_clkdm",
- .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
- .main_clk = "dpll_per_m2_div4_ck",
- .prcm = {
- .omap4 = {
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
/*
* 'mailbox' class
* mailbox module allowing communication between the on-chip processors using a
@@ -762,76 +697,6 @@ struct omap_hwmod am33xx_mcasp1_hwmod = {
},
};
-/* 'mmc' class */
-static struct omap_hwmod_class_sysconfig am33xx_mmc_sysc = {
- .rev_offs = 0x2fc,
- .sysc_offs = 0x110,
- .syss_offs = 0x114,
- .sysc_flags = (SYSC_HAS_CLOCKACTIVITY | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_ENAWAKEUP | SYSC_HAS_SOFTRESET |
- SYSC_HAS_AUTOIDLE | SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class am33xx_mmc_hwmod_class = {
- .name = "mmc",
- .sysc = &am33xx_mmc_sysc,
-};
-
-/* mmc0 */
-static struct omap_hsmmc_dev_attr am33xx_mmc0_dev_attr = {
- .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
-};
-
-struct omap_hwmod am33xx_mmc0_hwmod = {
- .name = "mmc1",
- .class = &am33xx_mmc_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "mmc_clk",
- .prcm = {
- .omap4 = {
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .dev_attr = &am33xx_mmc0_dev_attr,
-};
-
-/* mmc1 */
-static struct omap_hsmmc_dev_attr am33xx_mmc1_dev_attr = {
- .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
-};
-
-struct omap_hwmod am33xx_mmc1_hwmod = {
- .name = "mmc2",
- .class = &am33xx_mmc_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .main_clk = "mmc_clk",
- .prcm = {
- .omap4 = {
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .dev_attr = &am33xx_mmc1_dev_attr,
-};
-
-/* mmc2 */
-static struct omap_hsmmc_dev_attr am33xx_mmc2_dev_attr = {
- .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
-};
-struct omap_hwmod am33xx_mmc2_hwmod = {
- .name = "mmc3",
- .class = &am33xx_mmc_hwmod_class,
- .clkdm_name = "l3s_clkdm",
- .main_clk = "mmc_clk",
- .prcm = {
- .omap4 = {
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .dev_attr = &am33xx_mmc2_dev_attr,
-};
-
/*
* 'rtc' class
* rtc subsystem
@@ -1132,102 +997,6 @@ struct omap_hwmod am33xx_tptc2_hwmod = {
},
};
-/* 'uart' class */
-static struct omap_hwmod_class_sysconfig uart_sysc = {
- .rev_offs = 0x50,
- .sysc_offs = 0x54,
- .syss_offs = 0x58,
- .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP |
- SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class uart_class = {
- .name = "uart",
- .sysc = &uart_sysc,
-};
-
-struct omap_hwmod am33xx_uart1_hwmod = {
- .name = "uart1",
- .class = &uart_class,
- .clkdm_name = "l4_wkup_clkdm",
- .flags = DEBUG_AM33XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
- .main_clk = "dpll_per_m2_div4_wkupdm_ck",
- .prcm = {
- .omap4 = {
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-struct omap_hwmod am33xx_uart2_hwmod = {
- .name = "uart2",
- .class = &uart_class,
- .clkdm_name = "l4ls_clkdm",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
- .main_clk = "dpll_per_m2_div4_ck",
- .prcm = {
- .omap4 = {
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* uart3 */
-struct omap_hwmod am33xx_uart3_hwmod = {
- .name = "uart3",
- .class = &uart_class,
- .clkdm_name = "l4ls_clkdm",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
- .main_clk = "dpll_per_m2_div4_ck",
- .prcm = {
- .omap4 = {
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-struct omap_hwmod am33xx_uart4_hwmod = {
- .name = "uart4",
- .class = &uart_class,
- .clkdm_name = "l4ls_clkdm",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
- .main_clk = "dpll_per_m2_div4_ck",
- .prcm = {
- .omap4 = {
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-struct omap_hwmod am33xx_uart5_hwmod = {
- .name = "uart5",
- .class = &uart_class,
- .clkdm_name = "l4ls_clkdm",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
- .main_clk = "dpll_per_m2_div4_ck",
- .prcm = {
- .omap4 = {
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-struct omap_hwmod am33xx_uart6_hwmod = {
- .name = "uart6",
- .class = &uart_class,
- .clkdm_name = "l4ls_clkdm",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
- .main_clk = "dpll_per_m2_div4_ck",
- .prcm = {
- .omap4 = {
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
/* 'wd_timer' class */
static struct omap_hwmod_class_sysconfig wdt_sysc = {
.rev_offs = 0x0,
@@ -1265,11 +1034,6 @@ struct omap_hwmod am33xx_wd_timer1_hwmod = {
static void omap_hwmod_am33xx_clkctrl(void)
{
- CLKCTRL(am33xx_uart2_hwmod, AM33XX_CM_PER_UART1_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_uart3_hwmod, AM33XX_CM_PER_UART2_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_uart4_hwmod, AM33XX_CM_PER_UART3_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_uart5_hwmod, AM33XX_CM_PER_UART4_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_uart6_hwmod, AM33XX_CM_PER_UART5_CLKCTRL_OFFSET);
CLKCTRL(am33xx_dcan0_hwmod, AM33XX_CM_PER_DCAN0_CLKCTRL_OFFSET);
CLKCTRL(am33xx_dcan1_hwmod, AM33XX_CM_PER_DCAN1_CLKCTRL_OFFSET);
CLKCTRL(am33xx_elm_hwmod, AM33XX_CM_PER_ELM_CLKCTRL_OFFSET);
@@ -1279,13 +1043,9 @@ static void omap_hwmod_am33xx_clkctrl(void)
CLKCTRL(am33xx_gpio1_hwmod, AM33XX_CM_PER_GPIO1_CLKCTRL_OFFSET);
CLKCTRL(am33xx_gpio2_hwmod, AM33XX_CM_PER_GPIO2_CLKCTRL_OFFSET);
CLKCTRL(am33xx_gpio3_hwmod, AM33XX_CM_PER_GPIO3_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_i2c2_hwmod, AM33XX_CM_PER_I2C1_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_i2c3_hwmod, AM33XX_CM_PER_I2C2_CLKCTRL_OFFSET);
CLKCTRL(am33xx_mailbox_hwmod, AM33XX_CM_PER_MAILBOX0_CLKCTRL_OFFSET);
CLKCTRL(am33xx_mcasp0_hwmod, AM33XX_CM_PER_MCASP0_CLKCTRL_OFFSET);
CLKCTRL(am33xx_mcasp1_hwmod, AM33XX_CM_PER_MCASP1_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_mmc0_hwmod, AM33XX_CM_PER_MMC0_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_mmc1_hwmod, AM33XX_CM_PER_MMC1_CLKCTRL_OFFSET);
CLKCTRL(am33xx_spi0_hwmod, AM33XX_CM_PER_SPI0_CLKCTRL_OFFSET);
CLKCTRL(am33xx_spi1_hwmod, AM33XX_CM_PER_SPI1_CLKCTRL_OFFSET);
CLKCTRL(am33xx_spinlock_hwmod, AM33XX_CM_PER_SPINLOCK_CLKCTRL_OFFSET);
@@ -1299,13 +1059,10 @@ static void omap_hwmod_am33xx_clkctrl(void)
AM33XX_CM_WKUP_SMARTREFLEX0_CLKCTRL_OFFSET);
CLKCTRL(am33xx_smartreflex1_hwmod,
AM33XX_CM_WKUP_SMARTREFLEX1_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_uart1_hwmod, AM33XX_CM_WKUP_UART0_CLKCTRL_OFFSET);
CLKCTRL(am33xx_timer1_hwmod, AM33XX_CM_WKUP_TIMER1_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_i2c1_hwmod, AM33XX_CM_WKUP_I2C0_CLKCTRL_OFFSET);
CLKCTRL(am33xx_wd_timer1_hwmod, AM33XX_CM_WKUP_WDT1_CLKCTRL_OFFSET);
CLKCTRL(am33xx_rtc_hwmod, AM33XX_CM_RTC_RTC_CLKCTRL_OFFSET);
PRCM_FLAGS(am33xx_rtc_hwmod, HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_mmc2_hwmod, AM33XX_CM_PER_MMC2_CLKCTRL_OFFSET);
CLKCTRL(am33xx_gpmc_hwmod, AM33XX_CM_PER_GPMC_CLKCTRL_OFFSET);
CLKCTRL(am33xx_l4_ls_hwmod, AM33XX_CM_PER_L4LS_CLKCTRL_OFFSET);
CLKCTRL(am33xx_l4_wkup_hwmod, AM33XX_CM_WKUP_L4WKUP_CLKCTRL_OFFSET);
@@ -1340,11 +1097,6 @@ void omap_hwmod_am33xx_reg(void)
static void omap_hwmod_am43xx_clkctrl(void)
{
- CLKCTRL(am33xx_uart2_hwmod, AM43XX_CM_PER_UART1_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_uart3_hwmod, AM43XX_CM_PER_UART2_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_uart4_hwmod, AM43XX_CM_PER_UART3_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_uart5_hwmod, AM43XX_CM_PER_UART4_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_uart6_hwmod, AM43XX_CM_PER_UART5_CLKCTRL_OFFSET);
CLKCTRL(am33xx_dcan0_hwmod, AM43XX_CM_PER_DCAN0_CLKCTRL_OFFSET);
CLKCTRL(am33xx_dcan1_hwmod, AM43XX_CM_PER_DCAN1_CLKCTRL_OFFSET);
CLKCTRL(am33xx_elm_hwmod, AM43XX_CM_PER_ELM_CLKCTRL_OFFSET);
@@ -1354,13 +1106,9 @@ static void omap_hwmod_am43xx_clkctrl(void)
CLKCTRL(am33xx_gpio1_hwmod, AM43XX_CM_PER_GPIO1_CLKCTRL_OFFSET);
CLKCTRL(am33xx_gpio2_hwmod, AM43XX_CM_PER_GPIO2_CLKCTRL_OFFSET);
CLKCTRL(am33xx_gpio3_hwmod, AM43XX_CM_PER_GPIO3_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_i2c2_hwmod, AM43XX_CM_PER_I2C1_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_i2c3_hwmod, AM43XX_CM_PER_I2C2_CLKCTRL_OFFSET);
CLKCTRL(am33xx_mailbox_hwmod, AM43XX_CM_PER_MAILBOX0_CLKCTRL_OFFSET);
CLKCTRL(am33xx_mcasp0_hwmod, AM43XX_CM_PER_MCASP0_CLKCTRL_OFFSET);
CLKCTRL(am33xx_mcasp1_hwmod, AM43XX_CM_PER_MCASP1_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_mmc0_hwmod, AM43XX_CM_PER_MMC0_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_mmc1_hwmod, AM43XX_CM_PER_MMC1_CLKCTRL_OFFSET);
CLKCTRL(am33xx_spi0_hwmod, AM43XX_CM_PER_SPI0_CLKCTRL_OFFSET);
CLKCTRL(am33xx_spi1_hwmod, AM43XX_CM_PER_SPI1_CLKCTRL_OFFSET);
CLKCTRL(am33xx_spinlock_hwmod, AM43XX_CM_PER_SPINLOCK_CLKCTRL_OFFSET);
@@ -1374,12 +1122,9 @@ static void omap_hwmod_am43xx_clkctrl(void)
AM43XX_CM_WKUP_SMARTREFLEX0_CLKCTRL_OFFSET);
CLKCTRL(am33xx_smartreflex1_hwmod,
AM43XX_CM_WKUP_SMARTREFLEX1_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_uart1_hwmod, AM43XX_CM_WKUP_UART0_CLKCTRL_OFFSET);
CLKCTRL(am33xx_timer1_hwmod, AM43XX_CM_WKUP_TIMER1_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_i2c1_hwmod, AM43XX_CM_WKUP_I2C0_CLKCTRL_OFFSET);
CLKCTRL(am33xx_wd_timer1_hwmod, AM43XX_CM_WKUP_WDT1_CLKCTRL_OFFSET);
CLKCTRL(am33xx_rtc_hwmod, AM43XX_CM_RTC_RTC_CLKCTRL_OFFSET);
- CLKCTRL(am33xx_mmc2_hwmod, AM43XX_CM_PER_MMC2_CLKCTRL_OFFSET);
CLKCTRL(am33xx_gpmc_hwmod, AM43XX_CM_PER_GPMC_CLKCTRL_OFFSET);
CLKCTRL(am33xx_l4_ls_hwmod, AM43XX_CM_PER_L4LS_CLKCTRL_OFFSET);
CLKCTRL(am33xx_l4_wkup_hwmod, AM43XX_CM_WKUP_L4WKUP_CLKCTRL_OFFSET);
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index c9483bc06228..c965af275e34 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -14,8 +14,6 @@
* GNU General Public License for more details.
*/
-#include <linux/platform_data/i2c-omap.h>
-
#include "omap_hwmod.h"
#include "omap_hwmod_common_data.h"
@@ -23,7 +21,6 @@
#include "cm33xx.h"
#include "prm33xx.h"
#include "prm-regbits-33xx.h"
-#include "i2c.h"
#include "wd_timer.h"
#include "omap_hwmod_33xx_43xx_common_data.h"
@@ -230,27 +227,6 @@ static struct omap_hwmod am33xx_control_hwmod = {
},
};
-/* gpio0 */
-static struct omap_hwmod_opt_clk gpio0_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio0_dbclk" },
-};
-
-static struct omap_hwmod am33xx_gpio0_hwmod = {
- .name = "gpio1",
- .class = &am33xx_gpio_hwmod_class,
- .clkdm_name = "l4_wkup_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "dpll_core_m4_div2_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM33XX_CM_WKUP_GPIO0_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .opt_clks = gpio0_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio0_opt_clks),
-};
-
/* lcdc */
static struct omap_hwmod_class_sysconfig lcdc_sysc = {
.rev_offs = 0x0,
@@ -388,22 +364,6 @@ static struct omap_hwmod_ocp_if am33xx_l4_wkup__control = {
.user = OCP_USER_MPU,
};
-/* L4 WKUP -> I2C1 */
-static struct omap_hwmod_ocp_if am33xx_l4_wkup__i2c1 = {
- .master = &am33xx_l4_wkup_hwmod,
- .slave = &am33xx_i2c1_hwmod,
- .clk = "dpll_core_m4_div2_ck",
- .user = OCP_USER_MPU,
-};
-
-/* L4 WKUP -> GPIO1 */
-static struct omap_hwmod_ocp_if am33xx_l4_wkup__gpio0 = {
- .master = &am33xx_l4_wkup_hwmod,
- .slave = &am33xx_gpio0_hwmod,
- .clk = "dpll_core_m4_div2_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* L4 WKUP -> ADC_TSC */
static struct omap_hwmod_ocp_if am33xx_l4_wkup__adc_tsc = {
.master = &am33xx_l4_wkup_hwmod,
@@ -434,14 +394,6 @@ static struct omap_hwmod_ocp_if am33xx_l4_wkup__timer1 = {
.user = OCP_USER_MPU,
};
-/* l4 wkup -> uart1 */
-static struct omap_hwmod_ocp_if am33xx_l4_wkup__uart1 = {
- .master = &am33xx_l4_wkup_hwmod,
- .slave = &am33xx_uart1_hwmod,
- .clk = "dpll_core_m4_div2_ck",
- .user = OCP_USER_MPU,
-};
-
/* l4 wkup -> wd_timer1 */
static struct omap_hwmod_ocp_if am33xx_l4_wkup__wd_timer1 = {
.master = &am33xx_l4_wkup_hwmod,
@@ -479,27 +431,16 @@ static struct omap_hwmod_ocp_if *am33xx_hwmod_ocp_ifs[] __initdata = {
&am33xx_l4_wkup__control,
&am33xx_l4_wkup__smartreflex0,
&am33xx_l4_wkup__smartreflex1,
- &am33xx_l4_wkup__uart1,
&am33xx_l4_wkup__timer1,
&am33xx_l4_wkup__rtc,
- &am33xx_l4_wkup__i2c1,
- &am33xx_l4_wkup__gpio0,
&am33xx_l4_wkup__adc_tsc,
&am33xx_l4_wkup__wd_timer1,
&am33xx_l4_hs__pruss,
&am33xx_l4_per__dcan0,
&am33xx_l4_per__dcan1,
- &am33xx_l4_per__gpio1,
- &am33xx_l4_per__gpio2,
- &am33xx_l4_per__gpio3,
- &am33xx_l4_per__i2c2,
- &am33xx_l4_per__i2c3,
&am33xx_l4_per__mailbox,
&am33xx_l4_ls__mcasp0,
&am33xx_l4_ls__mcasp1,
- &am33xx_l4_ls__mmc0,
- &am33xx_l4_ls__mmc1,
- &am33xx_l3_s__mmc2,
&am33xx_l4_ls__timer2,
&am33xx_l4_ls__timer3,
&am33xx_l4_ls__timer4,
@@ -507,11 +448,6 @@ static struct omap_hwmod_ocp_if *am33xx_hwmod_ocp_ifs[] __initdata = {
&am33xx_l4_ls__timer6,
&am33xx_l4_ls__timer7,
&am33xx_l3_main__tpcc,
- &am33xx_l4_ls__uart2,
- &am33xx_l4_ls__uart3,
- &am33xx_l4_ls__uart4,
- &am33xx_l4_ls__uart5,
- &am33xx_l4_ls__uart6,
&am33xx_l4_ls__spinlock,
&am33xx_l4_ls__elm,
&am33xx_l4_ls__epwmss0,
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 23e6a41a18eb..edff39921bf8 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -484,7 +484,6 @@ static struct omap_hwmod am35xx_uart4_hwmod = {
static struct omap_hwmod_class i2c_class = {
.name = "i2c",
.sysc = &i2c_sysc,
- .rev = OMAP_I2C_IP_VERSION_1,
.reset = &omap_i2c_reset,
};
@@ -707,7 +706,6 @@ static struct omap_hwmod_class_sysconfig omap3xxx_gpio_sysc = {
static struct omap_hwmod_class omap3xxx_gpio_hwmod_class = {
.name = "gpio",
.sysc = &omap3xxx_gpio_sysc,
- .rev = 1,
};
/* gpio1 */
@@ -1029,7 +1027,6 @@ static struct omap_hwmod_class_sysconfig omap34xx_sr_sysc = {
static struct omap_hwmod_class omap34xx_smartreflex_hwmod_class = {
.name = "smartreflex",
.sysc = &omap34xx_sr_sysc,
- .rev = 1,
};
static struct omap_hwmod_class_sysconfig omap36xx_sr_sysc = {
@@ -1044,7 +1041,6 @@ static struct omap_hwmod_class_sysconfig omap36xx_sr_sysc = {
static struct omap_hwmod_class omap36xx_smartreflex_hwmod_class = {
.name = "smartreflex",
.sysc = &omap36xx_sr_sysc,
- .rev = 2,
};
/* SR1 */
diff --git a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
index aa271ac5ebac..69571abc14fd 100644
--- a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
@@ -87,26 +87,6 @@ static struct omap_hwmod am43xx_control_hwmod = {
},
};
-static struct omap_hwmod_opt_clk gpio0_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio0_dbclk" },
-};
-
-static struct omap_hwmod am43xx_gpio0_hwmod = {
- .name = "gpio1",
- .class = &am33xx_gpio_hwmod_class,
- .clkdm_name = "l4_wkup_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "sys_clkin_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM43XX_CM_WKUP_GPIO0_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .opt_clks = gpio0_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio0_opt_clks),
-};
-
static struct omap_hwmod_class_sysconfig am43xx_synctimer_sysc = {
.rev_offs = 0x0,
.sysc_offs = 0x4,
@@ -264,46 +244,6 @@ static struct omap_hwmod am43xx_spi4_hwmod = {
},
};
-static struct omap_hwmod_opt_clk gpio4_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio4_dbclk" },
-};
-
-static struct omap_hwmod am43xx_gpio4_hwmod = {
- .name = "gpio5",
- .class = &am33xx_gpio_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l4ls_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM43XX_CM_PER_GPIO4_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .opt_clks = gpio4_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio4_opt_clks),
-};
-
-static struct omap_hwmod_opt_clk gpio5_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio5_dbclk" },
-};
-
-static struct omap_hwmod am43xx_gpio5_hwmod = {
- .name = "gpio6",
- .class = &am33xx_gpio_hwmod_class,
- .clkdm_name = "l4ls_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l4ls_gclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = AM43XX_CM_PER_GPIO5_CLKCTRL_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .opt_clks = gpio5_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio5_opt_clks),
-};
-
static struct omap_hwmod_class am43xx_ocp2scp_hwmod_class = {
.name = "ocp2scp",
};
@@ -650,20 +590,6 @@ static struct omap_hwmod_ocp_if am43xx_l4_wkup__control = {
.user = OCP_USER_MPU,
};
-static struct omap_hwmod_ocp_if am43xx_l4_wkup__i2c1 = {
- .master = &am33xx_l4_wkup_hwmod,
- .slave = &am33xx_i2c1_hwmod,
- .clk = "sys_clkin_ck",
- .user = OCP_USER_MPU,
-};
-
-static struct omap_hwmod_ocp_if am43xx_l4_wkup__gpio0 = {
- .master = &am33xx_l4_wkup_hwmod,
- .slave = &am43xx_gpio0_hwmod,
- .clk = "sys_clkin_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
static struct omap_hwmod_ocp_if am43xx_l4_wkup__adc_tsc = {
.master = &am33xx_l4_wkup_hwmod,
.slave = &am43xx_adc_tsc_hwmod,
@@ -685,13 +611,6 @@ static struct omap_hwmod_ocp_if am43xx_l4_wkup__timer1 = {
.user = OCP_USER_MPU,
};
-static struct omap_hwmod_ocp_if am43xx_l4_wkup__uart1 = {
- .master = &am33xx_l4_wkup_hwmod,
- .slave = &am33xx_uart1_hwmod,
- .clk = "sys_clkin_ck",
- .user = OCP_USER_MPU,
-};
-
static struct omap_hwmod_ocp_if am43xx_l4_wkup__wd_timer1 = {
.master = &am33xx_l4_wkup_hwmod,
.slave = &am33xx_wd_timer1_hwmod,
@@ -776,20 +695,6 @@ static struct omap_hwmod_ocp_if am43xx_l4_ls__mcspi4 = {
.user = OCP_USER_MPU,
};
-static struct omap_hwmod_ocp_if am43xx_l4_ls__gpio4 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am43xx_gpio4_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-static struct omap_hwmod_ocp_if am43xx_l4_ls__gpio5 = {
- .master = &am33xx_l4_ls_hwmod,
- .slave = &am43xx_gpio5_hwmod,
- .clk = "l4ls_gclk",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
static struct omap_hwmod_ocp_if am43xx_l4_ls__ocp2scp0 = {
.master = &am33xx_l4_ls_hwmod,
.slave = &am43xx_ocp2scp0_hwmod,
@@ -907,8 +812,6 @@ static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
&am43xx_l4_ls__mcspi2,
&am43xx_l4_ls__mcspi3,
&am43xx_l4_ls__mcspi4,
- &am43xx_l4_ls__gpio4,
- &am43xx_l4_ls__gpio5,
&am43xx_l3_main__pruss,
&am33xx_mpu__l3_main,
&am33xx_mpu__prcm,
@@ -927,27 +830,16 @@ static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
&am43xx_l4_wkup__control,
&am43xx_l4_wkup__smartreflex0,
&am43xx_l4_wkup__smartreflex1,
- &am43xx_l4_wkup__uart1,
&am43xx_l4_wkup__timer1,
- &am43xx_l4_wkup__i2c1,
- &am43xx_l4_wkup__gpio0,
&am43xx_l4_wkup__wd_timer1,
&am43xx_l4_wkup__adc_tsc,
&am43xx_l3_s__qspi,
&am33xx_l4_per__dcan0,
&am33xx_l4_per__dcan1,
- &am33xx_l4_per__gpio1,
- &am33xx_l4_per__gpio2,
- &am33xx_l4_per__gpio3,
- &am33xx_l4_per__i2c2,
- &am33xx_l4_per__i2c3,
&am33xx_l4_per__mailbox,
&am33xx_l4_per__rng,
&am33xx_l4_ls__mcasp0,
&am33xx_l4_ls__mcasp1,
- &am33xx_l4_ls__mmc0,
- &am33xx_l4_ls__mmc1,
- &am33xx_l3_s__mmc2,
&am33xx_l4_ls__timer2,
&am33xx_l4_ls__timer3,
&am33xx_l4_ls__timer4,
@@ -955,11 +847,6 @@ static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
&am33xx_l4_ls__timer6,
&am33xx_l4_ls__timer7,
&am33xx_l3_main__tpcc,
- &am33xx_l4_ls__uart2,
- &am33xx_l4_ls__uart3,
- &am33xx_l4_ls__uart4,
- &am33xx_l4_ls__uart5,
- &am33xx_l4_ls__uart6,
&am33xx_l4_ls__spinlock,
&am33xx_l4_ls__elm,
&am33xx_l4_ls__epwmss0,
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index a95dbac57a81..b8de550a15b4 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -21,9 +21,7 @@
*/
#include <linux/io.h>
-#include <linux/platform_data/hsmmc-omap.h>
#include <linux/power/smartreflex.h>
-#include <linux/platform_data/i2c-omap.h>
#include <linux/omap-dma.h>
@@ -33,7 +31,6 @@
#include "cm2_44xx.h"
#include "prm44xx.h"
#include "prm-regbits-44xx.h"
-#include "i2c.h"
#include "wd_timer.h"
/* Base offset for all OMAP4 interrupts external to MPUSS */
@@ -1056,160 +1053,6 @@ static struct omap_hwmod omap44xx_fdif_hwmod = {
};
/*
- * 'gpio' class
- * general purpose io module
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_gpio_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0114,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP |
- SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
- SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap44xx_gpio_hwmod_class = {
- .name = "gpio",
- .sysc = &omap44xx_gpio_sysc,
- .rev = 2,
-};
-
-/* gpio1 */
-static struct omap_hwmod_opt_clk gpio1_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio1_dbclk" },
-};
-
-static struct omap_hwmod omap44xx_gpio1_hwmod = {
- .name = "gpio1",
- .class = &omap44xx_gpio_hwmod_class,
- .clkdm_name = "l4_wkup_clkdm",
- .main_clk = "l4_wkup_clk_mux_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_WKUP_GPIO1_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_WKUP_GPIO1_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
- .opt_clks = gpio1_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio1_opt_clks),
-};
-
-/* gpio2 */
-static struct omap_hwmod_opt_clk gpio2_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio2_dbclk" },
-};
-
-static struct omap_hwmod omap44xx_gpio2_hwmod = {
- .name = "gpio2",
- .class = &omap44xx_gpio_hwmod_class,
- .clkdm_name = "l4_per_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l4_div_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L4PER_GPIO2_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L4PER_GPIO2_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
- .opt_clks = gpio2_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio2_opt_clks),
-};
-
-/* gpio3 */
-static struct omap_hwmod_opt_clk gpio3_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio3_dbclk" },
-};
-
-static struct omap_hwmod omap44xx_gpio3_hwmod = {
- .name = "gpio3",
- .class = &omap44xx_gpio_hwmod_class,
- .clkdm_name = "l4_per_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l4_div_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L4PER_GPIO3_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L4PER_GPIO3_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
- .opt_clks = gpio3_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio3_opt_clks),
-};
-
-/* gpio4 */
-static struct omap_hwmod_opt_clk gpio4_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio4_dbclk" },
-};
-
-static struct omap_hwmod omap44xx_gpio4_hwmod = {
- .name = "gpio4",
- .class = &omap44xx_gpio_hwmod_class,
- .clkdm_name = "l4_per_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l4_div_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L4PER_GPIO4_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L4PER_GPIO4_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
- .opt_clks = gpio4_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio4_opt_clks),
-};
-
-/* gpio5 */
-static struct omap_hwmod_opt_clk gpio5_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio5_dbclk" },
-};
-
-static struct omap_hwmod omap44xx_gpio5_hwmod = {
- .name = "gpio5",
- .class = &omap44xx_gpio_hwmod_class,
- .clkdm_name = "l4_per_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l4_div_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L4PER_GPIO5_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L4PER_GPIO5_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
- .opt_clks = gpio5_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio5_opt_clks),
-};
-
-/* gpio6 */
-static struct omap_hwmod_opt_clk gpio6_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio6_dbclk" },
-};
-
-static struct omap_hwmod omap44xx_gpio6_hwmod = {
- .name = "gpio6",
- .class = &omap44xx_gpio_hwmod_class,
- .clkdm_name = "l4_per_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l4_div_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L4PER_GPIO6_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L4PER_GPIO6_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
- .opt_clks = gpio6_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio6_opt_clks),
-};
-
-/*
* 'gpmc' class
* general purpose memory controller
*/
@@ -1355,94 +1198,6 @@ static struct omap_hwmod omap44xx_hsi_hwmod = {
};
/*
- * 'i2c' class
- * multimaster high-speed i2c controller
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_i2c_sysc = {
- .rev_offs = 0,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0090,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
- SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap44xx_i2c_hwmod_class = {
- .name = "i2c",
- .sysc = &omap44xx_i2c_sysc,
- .rev = OMAP_I2C_IP_VERSION_2,
- .reset = &omap_i2c_reset,
-};
-
-/* i2c1 */
-static struct omap_hwmod omap44xx_i2c1_hwmod = {
- .name = "i2c1",
- .class = &omap44xx_i2c_hwmod_class,
- .clkdm_name = "l4_per_clkdm",
- .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
- .main_clk = "func_96m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L4PER_I2C1_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L4PER_I2C1_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* i2c2 */
-static struct omap_hwmod omap44xx_i2c2_hwmod = {
- .name = "i2c2",
- .class = &omap44xx_i2c_hwmod_class,
- .clkdm_name = "l4_per_clkdm",
- .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
- .main_clk = "func_96m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L4PER_I2C2_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L4PER_I2C2_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* i2c3 */
-static struct omap_hwmod omap44xx_i2c3_hwmod = {
- .name = "i2c3",
- .class = &omap44xx_i2c_hwmod_class,
- .clkdm_name = "l4_per_clkdm",
- .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
- .main_clk = "func_96m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L4PER_I2C3_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L4PER_I2C3_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* i2c4 */
-static struct omap_hwmod omap44xx_i2c4_hwmod = {
- .name = "i2c4",
- .class = &omap44xx_i2c_hwmod_class,
- .clkdm_name = "l4_per_clkdm",
- .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
- .main_clk = "func_96m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L4PER_I2C4_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L4PER_I2C4_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
* 'ipu' class
* imaging processor unit
*/
@@ -1819,189 +1574,6 @@ static struct omap_hwmod omap44xx_mcpdm_hwmod = {
};
/*
- * 'mcspi' class
- * multichannel serial port interface (mcspi) / master/slave synchronous serial
- * bus
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_mcspi_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_RESET_STATUS |
- SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class omap44xx_mcspi_hwmod_class = {
- .name = "mcspi",
- .sysc = &omap44xx_mcspi_sysc,
-};
-
-/* mcspi1 */
-static struct omap_hwmod omap44xx_mcspi1_hwmod = {
- .name = "mcspi1",
- .class = &omap44xx_mcspi_hwmod_class,
- .clkdm_name = "l4_per_clkdm",
- .main_clk = "func_48m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L4PER_MCSPI1_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L4PER_MCSPI1_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* mcspi2 */
-static struct omap_hwmod omap44xx_mcspi2_hwmod = {
- .name = "mcspi2",
- .class = &omap44xx_mcspi_hwmod_class,
- .clkdm_name = "l4_per_clkdm",
- .main_clk = "func_48m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L4PER_MCSPI2_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L4PER_MCSPI2_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* mcspi3 */
-static struct omap_hwmod omap44xx_mcspi3_hwmod = {
- .name = "mcspi3",
- .class = &omap44xx_mcspi_hwmod_class,
- .clkdm_name = "l4_per_clkdm",
- .main_clk = "func_48m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L4PER_MCSPI3_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L4PER_MCSPI3_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* mcspi4 */
-static struct omap_hwmod omap44xx_mcspi4_hwmod = {
- .name = "mcspi4",
- .class = &omap44xx_mcspi_hwmod_class,
- .clkdm_name = "l4_per_clkdm",
- .main_clk = "func_48m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L4PER_MCSPI4_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L4PER_MCSPI4_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
- * 'mmc' class
- * multimedia card high-speed/sd/sdio (mmc/sd/sdio) host controller
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_mmc_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_MIDLEMODE |
- SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
- MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class omap44xx_mmc_hwmod_class = {
- .name = "mmc",
- .sysc = &omap44xx_mmc_sysc,
-};
-
-/* mmc1 */
-static struct omap_hsmmc_dev_attr mmc1_dev_attr = {
- .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
-};
-
-static struct omap_hwmod omap44xx_mmc1_hwmod = {
- .name = "mmc1",
- .class = &omap44xx_mmc_hwmod_class,
- .clkdm_name = "l3_init_clkdm",
- .main_clk = "hsmmc1_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L3INIT_MMC1_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L3INIT_MMC1_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .dev_attr = &mmc1_dev_attr,
-};
-
-/* mmc2 */
-static struct omap_hwmod omap44xx_mmc2_hwmod = {
- .name = "mmc2",
- .class = &omap44xx_mmc_hwmod_class,
- .clkdm_name = "l3_init_clkdm",
- .main_clk = "hsmmc2_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L3INIT_MMC2_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L3INIT_MMC2_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* mmc3 */
-static struct omap_hwmod omap44xx_mmc3_hwmod = {
- .name = "mmc3",
- .class = &omap44xx_mmc_hwmod_class,
- .clkdm_name = "l4_per_clkdm",
- .main_clk = "func_48m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L4PER_MMCSD3_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L4PER_MMCSD3_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* mmc4 */
-static struct omap_hwmod omap44xx_mmc4_hwmod = {
- .name = "mmc4",
- .class = &omap44xx_mmc_hwmod_class,
- .clkdm_name = "l4_per_clkdm",
- .main_clk = "func_48m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L4PER_MMCSD4_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L4PER_MMCSD4_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* mmc5 */
-static struct omap_hwmod omap44xx_mmc5_hwmod = {
- .name = "mmc5",
- .class = &omap44xx_mmc_hwmod_class,
- .clkdm_name = "l4_per_clkdm",
- .main_clk = "func_48m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L4PER_MMCSD5_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L4PER_MMCSD5_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
* 'mmu' class
* The memory management unit performs virtual to physical address translation
* for its requestors.
@@ -2367,7 +1939,6 @@ static struct omap_hwmod_class_sysconfig omap44xx_smartreflex_sysc = {
static struct omap_hwmod_class omap44xx_smartreflex_hwmod_class = {
.name = "smartreflex",
.sysc = &omap44xx_smartreflex_sysc,
- .rev = 2,
};
/* smartreflex_core */
@@ -2673,92 +2244,6 @@ static struct omap_hwmod omap44xx_timer11_hwmod = {
};
/*
- * 'uart' class
- * universal asynchronous receiver/transmitter (uart)
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_uart_sysc = {
- .rev_offs = 0x0050,
- .sysc_offs = 0x0054,
- .syss_offs = 0x0058,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP |
- SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
- SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap44xx_uart_hwmod_class = {
- .name = "uart",
- .sysc = &omap44xx_uart_sysc,
-};
-
-/* uart1 */
-static struct omap_hwmod omap44xx_uart1_hwmod = {
- .name = "uart1",
- .class = &omap44xx_uart_hwmod_class,
- .clkdm_name = "l4_per_clkdm",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
- .main_clk = "func_48m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L4PER_UART1_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L4PER_UART1_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* uart2 */
-static struct omap_hwmod omap44xx_uart2_hwmod = {
- .name = "uart2",
- .class = &omap44xx_uart_hwmod_class,
- .clkdm_name = "l4_per_clkdm",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
- .main_clk = "func_48m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L4PER_UART2_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L4PER_UART2_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* uart3 */
-static struct omap_hwmod omap44xx_uart3_hwmod = {
- .name = "uart3",
- .class = &omap44xx_uart_hwmod_class,
- .clkdm_name = "l4_per_clkdm",
- .flags = DEBUG_OMAP4UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
- .main_clk = "func_48m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L4PER_UART3_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L4PER_UART3_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* uart4 */
-static struct omap_hwmod omap44xx_uart4_hwmod = {
- .name = "uart4",
- .class = &omap44xx_uart_hwmod_class,
- .clkdm_name = "l4_per_clkdm",
- .flags = DEBUG_OMAP4UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
- .main_clk = "func_48m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L4PER_UART4_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L4PER_UART4_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
* 'usb_host_fs' class
* full-speed usb host controller
*/
@@ -3082,22 +2567,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_1 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* mmc1 -> l3_main_1 */
-static struct omap_hwmod_ocp_if omap44xx_mmc1__l3_main_1 = {
- .master = &omap44xx_mmc1_hwmod,
- .slave = &omap44xx_l3_main_1_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* mmc2 -> l3_main_1 */
-static struct omap_hwmod_ocp_if omap44xx_mmc2__l3_main_1 = {
- .master = &omap44xx_mmc2_hwmod,
- .slave = &omap44xx_l3_main_1_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* mpu -> l3_main_1 */
static struct omap_hwmod_ocp_if omap44xx_mpu__l3_main_1 = {
.master = &omap44xx_mpu_hwmod,
@@ -3554,54 +3023,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__fdif = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4_wkup -> gpio1 */
-static struct omap_hwmod_ocp_if omap44xx_l4_wkup__gpio1 = {
- .master = &omap44xx_l4_wkup_hwmod,
- .slave = &omap44xx_gpio1_hwmod,
- .clk = "l4_wkup_clk_mux_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> gpio2 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio2 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_gpio2_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> gpio3 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio3 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_gpio3_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> gpio4 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio4 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_gpio4_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> gpio5 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio5 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_gpio5_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> gpio6 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__gpio6 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_gpio6_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l3_main_2 -> gpmc */
static struct omap_hwmod_ocp_if omap44xx_l3_main_2__gpmc = {
.master = &omap44xx_l3_main_2_hwmod,
@@ -3634,38 +3055,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__hsi = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4_per -> i2c1 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c1 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_i2c1_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> i2c2 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c2 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_i2c2_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> i2c3 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c3 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_i2c3_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> i2c4 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__i2c4 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_i2c4_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l3_main_2 -> ipu */
static struct omap_hwmod_ocp_if omap44xx_l3_main_2__ipu = {
.master = &omap44xx_l3_main_2_hwmod,
@@ -3770,78 +3159,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__mcpdm = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4_per -> mcspi1 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi1 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_mcspi1_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> mcspi2 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi2 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_mcspi2_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> mcspi3 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi3 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_mcspi3_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> mcspi4 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__mcspi4 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_mcspi4_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> mmc1 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc1 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_mmc1_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> mmc2 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc2 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_mmc2_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> mmc3 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc3 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_mmc3_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> mmc4 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc4 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_mmc4_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> mmc5 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__mmc5 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_mmc5_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l3_main_2 -> ocmc_ram */
static struct omap_hwmod_ocp_if omap44xx_l3_main_2__ocmc_ram = {
.master = &omap44xx_l3_main_2_hwmod,
@@ -4050,38 +3367,6 @@ static struct omap_hwmod_ocp_if omap44xx_l4_per__timer11 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4_per -> uart1 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__uart1 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_uart1_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> uart2 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__uart2 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_uart2_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> uart3 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__uart3 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_uart3_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> uart4 */
-static struct omap_hwmod_ocp_if omap44xx_l4_per__uart4 = {
- .master = &omap44xx_l4_per_hwmod,
- .slave = &omap44xx_uart4_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l4_cfg -> usb_host_fs */
static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_cfg__usb_host_fs = {
.master = &omap44xx_l4_cfg_hwmod,
@@ -4164,8 +3449,6 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_dss__l3_main_1,
&omap44xx_l3_main_2__l3_main_1,
&omap44xx_l4_cfg__l3_main_1,
- &omap44xx_mmc1__l3_main_1,
- &omap44xx_mmc2__l3_main_1,
&omap44xx_mpu__l3_main_1,
&omap44xx_debugss__l3_main_2,
&omap44xx_dma_system__l3_main_2,
@@ -4222,20 +3505,10 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_per__dss_venc,
&omap44xx_l4_per__elm,
&omap44xx_l4_cfg__fdif,
- &omap44xx_l4_wkup__gpio1,
- &omap44xx_l4_per__gpio2,
- &omap44xx_l4_per__gpio3,
- &omap44xx_l4_per__gpio4,
- &omap44xx_l4_per__gpio5,
- &omap44xx_l4_per__gpio6,
&omap44xx_l3_main_2__gpmc,
&omap44xx_l3_main_2__gpu,
&omap44xx_l4_per__hdq1w,
&omap44xx_l4_cfg__hsi,
- &omap44xx_l4_per__i2c1,
- &omap44xx_l4_per__i2c2,
- &omap44xx_l4_per__i2c3,
- &omap44xx_l4_per__i2c4,
&omap44xx_l3_main_2__ipu,
&omap44xx_l3_main_2__iss,
/* &omap44xx_iva__sl2if, */
@@ -4249,15 +3522,6 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_abe__mcbsp3,
&omap44xx_l4_per__mcbsp4,
&omap44xx_l4_abe__mcpdm,
- &omap44xx_l4_per__mcspi1,
- &omap44xx_l4_per__mcspi2,
- &omap44xx_l4_per__mcspi3,
- &omap44xx_l4_per__mcspi4,
- &omap44xx_l4_per__mmc1,
- &omap44xx_l4_per__mmc2,
- &omap44xx_l4_per__mmc3,
- &omap44xx_l4_per__mmc4,
- &omap44xx_l4_per__mmc5,
&omap44xx_l3_main_2__mmu_ipu,
&omap44xx_l4_cfg__mmu_dsp,
&omap44xx_l3_main_2__ocmc_ram,
@@ -4286,10 +3550,6 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_per__timer9,
&omap44xx_l4_per__timer10,
&omap44xx_l4_per__timer11,
- &omap44xx_l4_per__uart1,
- &omap44xx_l4_per__uart2,
- &omap44xx_l4_per__uart3,
- &omap44xx_l4_per__uart4,
/* &omap44xx_l4_cfg__usb_host_fs, */
&omap44xx_l4_cfg__usb_host_hs,
&omap44xx_l4_cfg__usb_otg_hs,
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index 115473d441cd..29805cc9d74c 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -18,9 +18,7 @@
*/
#include <linux/io.h>
-#include <linux/platform_data/hsmmc-omap.h>
#include <linux/power/smartreflex.h>
-#include <linux/platform_data/i2c-omap.h>
#include <linux/omap-dma.h>
@@ -29,7 +27,6 @@
#include "cm1_54xx.h"
#include "cm2_54xx.h"
#include "prm54xx.h"
-#include "i2c.h"
#include "wd_timer.h"
/* Base offset for all OMAP5 interrupts external to MPUSS */
@@ -601,308 +598,6 @@ static struct omap_hwmod omap54xx_emif2_hwmod = {
};
/*
- * 'gpio' class
- * general purpose io module
- */
-
-static struct omap_hwmod_class_sysconfig omap54xx_gpio_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0114,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP |
- SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
- SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap54xx_gpio_hwmod_class = {
- .name = "gpio",
- .sysc = &omap54xx_gpio_sysc,
- .rev = 2,
-};
-
-/* gpio1 */
-static struct omap_hwmod_opt_clk gpio1_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio1_dbclk" },
-};
-
-static struct omap_hwmod omap54xx_gpio1_hwmod = {
- .name = "gpio1",
- .class = &omap54xx_gpio_hwmod_class,
- .clkdm_name = "wkupaon_clkdm",
- .main_clk = "wkupaon_iclk_mux",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_WKUPAON_GPIO1_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_WKUPAON_GPIO1_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
- .opt_clks = gpio1_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio1_opt_clks),
-};
-
-/* gpio2 */
-static struct omap_hwmod_opt_clk gpio2_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio2_dbclk" },
-};
-
-static struct omap_hwmod omap54xx_gpio2_hwmod = {
- .name = "gpio2",
- .class = &omap54xx_gpio_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l4_root_clk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L4PER_GPIO2_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L4PER_GPIO2_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
- .opt_clks = gpio2_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio2_opt_clks),
-};
-
-/* gpio3 */
-static struct omap_hwmod_opt_clk gpio3_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio3_dbclk" },
-};
-
-static struct omap_hwmod omap54xx_gpio3_hwmod = {
- .name = "gpio3",
- .class = &omap54xx_gpio_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l4_root_clk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L4PER_GPIO3_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L4PER_GPIO3_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
- .opt_clks = gpio3_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio3_opt_clks),
-};
-
-/* gpio4 */
-static struct omap_hwmod_opt_clk gpio4_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio4_dbclk" },
-};
-
-static struct omap_hwmod omap54xx_gpio4_hwmod = {
- .name = "gpio4",
- .class = &omap54xx_gpio_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l4_root_clk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L4PER_GPIO4_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L4PER_GPIO4_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
- .opt_clks = gpio4_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio4_opt_clks),
-};
-
-/* gpio5 */
-static struct omap_hwmod_opt_clk gpio5_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio5_dbclk" },
-};
-
-static struct omap_hwmod omap54xx_gpio5_hwmod = {
- .name = "gpio5",
- .class = &omap54xx_gpio_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l4_root_clk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L4PER_GPIO5_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L4PER_GPIO5_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
- .opt_clks = gpio5_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio5_opt_clks),
-};
-
-/* gpio6 */
-static struct omap_hwmod_opt_clk gpio6_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio6_dbclk" },
-};
-
-static struct omap_hwmod omap54xx_gpio6_hwmod = {
- .name = "gpio6",
- .class = &omap54xx_gpio_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l4_root_clk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L4PER_GPIO6_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L4PER_GPIO6_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
- .opt_clks = gpio6_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio6_opt_clks),
-};
-
-/* gpio7 */
-static struct omap_hwmod_opt_clk gpio7_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio7_dbclk" },
-};
-
-static struct omap_hwmod omap54xx_gpio7_hwmod = {
- .name = "gpio7",
- .class = &omap54xx_gpio_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l4_root_clk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L4PER_GPIO7_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L4PER_GPIO7_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
- .opt_clks = gpio7_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio7_opt_clks),
-};
-
-/* gpio8 */
-static struct omap_hwmod_opt_clk gpio8_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio8_dbclk" },
-};
-
-static struct omap_hwmod omap54xx_gpio8_hwmod = {
- .name = "gpio8",
- .class = &omap54xx_gpio_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l4_root_clk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L4PER_GPIO8_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L4PER_GPIO8_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
- .opt_clks = gpio8_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio8_opt_clks),
-};
-
-/*
- * 'i2c' class
- * multimaster high-speed i2c controller
- */
-
-static struct omap_hwmod_class_sysconfig omap54xx_i2c_sysc = {
- .rev_offs = 0,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0090,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
- SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap54xx_i2c_hwmod_class = {
- .name = "i2c",
- .sysc = &omap54xx_i2c_sysc,
- .reset = &omap_i2c_reset,
- .rev = OMAP_I2C_IP_VERSION_2,
-};
-
-/* i2c1 */
-static struct omap_hwmod omap54xx_i2c1_hwmod = {
- .name = "i2c1",
- .class = &omap54xx_i2c_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
- .main_clk = "func_96m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L4PER_I2C1_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L4PER_I2C1_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* i2c2 */
-static struct omap_hwmod omap54xx_i2c2_hwmod = {
- .name = "i2c2",
- .class = &omap54xx_i2c_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
- .main_clk = "func_96m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L4PER_I2C2_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L4PER_I2C2_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* i2c3 */
-static struct omap_hwmod omap54xx_i2c3_hwmod = {
- .name = "i2c3",
- .class = &omap54xx_i2c_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
- .main_clk = "func_96m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L4PER_I2C3_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L4PER_I2C3_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* i2c4 */
-static struct omap_hwmod omap54xx_i2c4_hwmod = {
- .name = "i2c4",
- .class = &omap54xx_i2c_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
- .main_clk = "func_96m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L4PER_I2C4_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L4PER_I2C4_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* i2c5 */
-static struct omap_hwmod omap54xx_i2c5_hwmod = {
- .name = "i2c5",
- .class = &omap54xx_i2c_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
- .main_clk = "func_96m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L4PER_I2C5_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L4PER_I2C5_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
* 'kbd' class
* keyboard controller
*/
@@ -1185,115 +880,6 @@ static struct omap_hwmod omap54xx_mcspi4_hwmod = {
};
/*
- * 'mmc' class
- * multimedia card high-speed/sd/sdio (mmc/sd/sdio) host controller
- */
-
-static struct omap_hwmod_class_sysconfig omap54xx_mmc_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_MIDLEMODE |
- SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
- MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class omap54xx_mmc_hwmod_class = {
- .name = "mmc",
- .sysc = &omap54xx_mmc_sysc,
-};
-
-/* mmc1 */
-static struct omap_hwmod_opt_clk mmc1_opt_clks[] = {
- { .role = "32khz_clk", .clk = "mmc1_32khz_clk" },
-};
-
-/* mmc1 dev_attr */
-static struct omap_hsmmc_dev_attr mmc1_dev_attr = {
- .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
-};
-
-static struct omap_hwmod omap54xx_mmc1_hwmod = {
- .name = "mmc1",
- .class = &omap54xx_mmc_hwmod_class,
- .clkdm_name = "l3init_clkdm",
- .main_clk = "mmc1_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L3INIT_MMC1_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L3INIT_MMC1_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .opt_clks = mmc1_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(mmc1_opt_clks),
- .dev_attr = &mmc1_dev_attr,
-};
-
-/* mmc2 */
-static struct omap_hwmod omap54xx_mmc2_hwmod = {
- .name = "mmc2",
- .class = &omap54xx_mmc_hwmod_class,
- .clkdm_name = "l3init_clkdm",
- .main_clk = "mmc2_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L3INIT_MMC2_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L3INIT_MMC2_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* mmc3 */
-static struct omap_hwmod omap54xx_mmc3_hwmod = {
- .name = "mmc3",
- .class = &omap54xx_mmc_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .main_clk = "func_48m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L4PER_MMC3_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L4PER_MMC3_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* mmc4 */
-static struct omap_hwmod omap54xx_mmc4_hwmod = {
- .name = "mmc4",
- .class = &omap54xx_mmc_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .main_clk = "func_48m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L4PER_MMC4_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L4PER_MMC4_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* mmc5 */
-static struct omap_hwmod omap54xx_mmc5_hwmod = {
- .name = "mmc5",
- .class = &omap54xx_mmc_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .main_clk = "func_96m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L4PER_MMC5_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L4PER_MMC5_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
* 'mmu' class
* The memory management unit performs virtual to physical address translation
* for its requestors.
@@ -1658,124 +1244,6 @@ static struct omap_hwmod omap54xx_timer11_hwmod = {
};
/*
- * 'uart' class
- * universal asynchronous receiver/transmitter (uart)
- */
-
-static struct omap_hwmod_class_sysconfig omap54xx_uart_sysc = {
- .rev_offs = 0x0050,
- .sysc_offs = 0x0054,
- .syss_offs = 0x0058,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP |
- SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
- SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class omap54xx_uart_hwmod_class = {
- .name = "uart",
- .sysc = &omap54xx_uart_sysc,
-};
-
-/* uart1 */
-static struct omap_hwmod omap54xx_uart1_hwmod = {
- .name = "uart1",
- .class = &omap54xx_uart_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
- .main_clk = "func_48m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L4PER_UART1_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L4PER_UART1_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* uart2 */
-static struct omap_hwmod omap54xx_uart2_hwmod = {
- .name = "uart2",
- .class = &omap54xx_uart_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
- .main_clk = "func_48m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L4PER_UART2_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L4PER_UART2_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* uart3 */
-static struct omap_hwmod omap54xx_uart3_hwmod = {
- .name = "uart3",
- .class = &omap54xx_uart_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = DEBUG_OMAP4UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
- .main_clk = "func_48m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L4PER_UART3_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L4PER_UART3_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* uart4 */
-static struct omap_hwmod omap54xx_uart4_hwmod = {
- .name = "uart4",
- .class = &omap54xx_uart_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = DEBUG_OMAP4UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
- .main_clk = "func_48m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L4PER_UART4_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L4PER_UART4_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* uart5 */
-static struct omap_hwmod omap54xx_uart5_hwmod = {
- .name = "uart5",
- .class = &omap54xx_uart_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
- .main_clk = "func_48m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L4PER_UART5_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L4PER_UART5_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* uart6 */
-static struct omap_hwmod omap54xx_uart6_hwmod = {
- .name = "uart6",
- .class = &omap54xx_uart_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
- .main_clk = "func_48m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L4PER_UART6_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L4PER_UART6_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
* 'usb_host_hs' class
* high-speed multi-port usb host controller
*/
@@ -2274,110 +1742,6 @@ static struct omap_hwmod_ocp_if omap54xx_mpu__emif2 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4_wkup -> gpio1 */
-static struct omap_hwmod_ocp_if omap54xx_l4_wkup__gpio1 = {
- .master = &omap54xx_l4_wkup_hwmod,
- .slave = &omap54xx_gpio1_hwmod,
- .clk = "wkupaon_iclk_mux",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> gpio2 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__gpio2 = {
- .master = &omap54xx_l4_per_hwmod,
- .slave = &omap54xx_gpio2_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> gpio3 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__gpio3 = {
- .master = &omap54xx_l4_per_hwmod,
- .slave = &omap54xx_gpio3_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> gpio4 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__gpio4 = {
- .master = &omap54xx_l4_per_hwmod,
- .slave = &omap54xx_gpio4_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> gpio5 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__gpio5 = {
- .master = &omap54xx_l4_per_hwmod,
- .slave = &omap54xx_gpio5_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> gpio6 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__gpio6 = {
- .master = &omap54xx_l4_per_hwmod,
- .slave = &omap54xx_gpio6_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> gpio7 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__gpio7 = {
- .master = &omap54xx_l4_per_hwmod,
- .slave = &omap54xx_gpio7_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> gpio8 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__gpio8 = {
- .master = &omap54xx_l4_per_hwmod,
- .slave = &omap54xx_gpio8_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> i2c1 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__i2c1 = {
- .master = &omap54xx_l4_per_hwmod,
- .slave = &omap54xx_i2c1_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> i2c2 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__i2c2 = {
- .master = &omap54xx_l4_per_hwmod,
- .slave = &omap54xx_i2c2_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> i2c3 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__i2c3 = {
- .master = &omap54xx_l4_per_hwmod,
- .slave = &omap54xx_i2c3_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> i2c4 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__i2c4 = {
- .master = &omap54xx_l4_per_hwmod,
- .slave = &omap54xx_i2c4_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> i2c5 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__i2c5 = {
- .master = &omap54xx_l4_per_hwmod,
- .slave = &omap54xx_i2c5_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l4_wkup -> kbd */
static struct omap_hwmod_ocp_if omap54xx_l4_wkup__kbd = {
.master = &omap54xx_l4_wkup_hwmod,
@@ -2458,46 +1822,6 @@ static struct omap_hwmod_ocp_if omap54xx_l4_per__mcspi4 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4_per -> mmc1 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__mmc1 = {
- .master = &omap54xx_l4_per_hwmod,
- .slave = &omap54xx_mmc1_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> mmc2 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__mmc2 = {
- .master = &omap54xx_l4_per_hwmod,
- .slave = &omap54xx_mmc2_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> mmc3 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__mmc3 = {
- .master = &omap54xx_l4_per_hwmod,
- .slave = &omap54xx_mmc3_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> mmc4 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__mmc4 = {
- .master = &omap54xx_l4_per_hwmod,
- .slave = &omap54xx_mmc4_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> mmc5 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__mmc5 = {
- .master = &omap54xx_l4_per_hwmod,
- .slave = &omap54xx_mmc5_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l4_cfg -> mpu */
static struct omap_hwmod_ocp_if omap54xx_l4_cfg__mpu = {
.master = &omap54xx_l4_cfg_hwmod,
@@ -2610,54 +1934,6 @@ static struct omap_hwmod_ocp_if omap54xx_l4_per__timer11 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4_per -> uart1 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__uart1 = {
- .master = &omap54xx_l4_per_hwmod,
- .slave = &omap54xx_uart1_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> uart2 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__uart2 = {
- .master = &omap54xx_l4_per_hwmod,
- .slave = &omap54xx_uart2_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> uart3 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__uart3 = {
- .master = &omap54xx_l4_per_hwmod,
- .slave = &omap54xx_uart3_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> uart4 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__uart4 = {
- .master = &omap54xx_l4_per_hwmod,
- .slave = &omap54xx_uart4_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> uart5 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__uart5 = {
- .master = &omap54xx_l4_per_hwmod,
- .slave = &omap54xx_uart5_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per -> uart6 */
-static struct omap_hwmod_ocp_if omap54xx_l4_per__uart6 = {
- .master = &omap54xx_l4_per_hwmod,
- .slave = &omap54xx_uart6_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l4_cfg -> usb_host_hs */
static struct omap_hwmod_ocp_if omap54xx_l4_cfg__usb_host_hs = {
.master = &omap54xx_l4_cfg_hwmod,
@@ -2719,19 +1995,6 @@ static struct omap_hwmod_ocp_if *omap54xx_hwmod_ocp_ifs[] __initdata = {
&omap54xx_l3_main_2__dss_rfbi,
&omap54xx_mpu__emif1,
&omap54xx_mpu__emif2,
- &omap54xx_l4_wkup__gpio1,
- &omap54xx_l4_per__gpio2,
- &omap54xx_l4_per__gpio3,
- &omap54xx_l4_per__gpio4,
- &omap54xx_l4_per__gpio5,
- &omap54xx_l4_per__gpio6,
- &omap54xx_l4_per__gpio7,
- &omap54xx_l4_per__gpio8,
- &omap54xx_l4_per__i2c1,
- &omap54xx_l4_per__i2c2,
- &omap54xx_l4_per__i2c3,
- &omap54xx_l4_per__i2c4,
- &omap54xx_l4_per__i2c5,
&omap54xx_l3_main_2__mmu_ipu,
&omap54xx_l4_wkup__kbd,
&omap54xx_l4_cfg__mailbox,
@@ -2743,11 +2006,6 @@ static struct omap_hwmod_ocp_if *omap54xx_hwmod_ocp_ifs[] __initdata = {
&omap54xx_l4_per__mcspi2,
&omap54xx_l4_per__mcspi3,
&omap54xx_l4_per__mcspi4,
- &omap54xx_l4_per__mmc1,
- &omap54xx_l4_per__mmc2,
- &omap54xx_l4_per__mmc3,
- &omap54xx_l4_per__mmc4,
- &omap54xx_l4_per__mmc5,
&omap54xx_l4_cfg__mpu,
&omap54xx_l4_cfg__spinlock,
&omap54xx_l4_cfg__ocp2scp1,
@@ -2762,12 +2020,6 @@ static struct omap_hwmod_ocp_if *omap54xx_hwmod_ocp_ifs[] __initdata = {
&omap54xx_l4_per__timer9,
&omap54xx_l4_per__timer10,
&omap54xx_l4_per__timer11,
- &omap54xx_l4_per__uart1,
- &omap54xx_l4_per__uart2,
- &omap54xx_l4_per__uart3,
- &omap54xx_l4_per__uart4,
- &omap54xx_l4_per__uart5,
- &omap54xx_l4_per__uart6,
&omap54xx_l4_cfg__usb_host_hs,
&omap54xx_l4_cfg__usb_tll_hs,
&omap54xx_l4_cfg__usb_otg_ss,
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index e6c7061a8e73..7e85bd27ce9a 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -18,9 +18,7 @@
*/
#include <linux/io.h>
-#include <linux/platform_data/hsmmc-omap.h>
#include <linux/power/smartreflex.h>
-#include <linux/platform_data/i2c-omap.h>
#include <linux/omap-dma.h>
@@ -29,7 +27,6 @@
#include "cm1_7xx.h"
#include "cm2_7xx.h"
#include "prm7xx.h"
-#include "i2c.h"
#include "wd_timer.h"
#include "soc.h"
@@ -693,7 +690,6 @@ static struct omap_hwmod_class_sysconfig dra7xx_aes_sysc = {
static struct omap_hwmod_class dra7xx_aes_hwmod_class = {
.name = "aes",
.sysc = &dra7xx_aes_sysc,
- .rev = 2,
};
/* AES1 */
@@ -737,7 +733,6 @@ static struct omap_hwmod_class_sysconfig dra7xx_sha0_sysc = {
static struct omap_hwmod_class dra7xx_sha0_hwmod_class = {
.name = "sham",
.sysc = &dra7xx_sha0_sysc,
- .rev = 2,
};
struct omap_hwmod dra7xx_sha0_hwmod = {
@@ -792,205 +787,6 @@ static struct omap_hwmod dra7xx_elm_hwmod = {
};
/*
- * 'gpio' class
- *
- */
-
-static struct omap_hwmod_class_sysconfig dra7xx_gpio_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0114,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP |
- SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
- SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class dra7xx_gpio_hwmod_class = {
- .name = "gpio",
- .sysc = &dra7xx_gpio_sysc,
- .rev = 2,
-};
-
-/* gpio1 */
-static struct omap_hwmod_opt_clk gpio1_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio1_dbclk" },
-};
-
-static struct omap_hwmod dra7xx_gpio1_hwmod = {
- .name = "gpio1",
- .class = &dra7xx_gpio_hwmod_class,
- .clkdm_name = "wkupaon_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "wkupaon_iclk_mux",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_WKUPAON_GPIO1_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_WKUPAON_GPIO1_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
- .opt_clks = gpio1_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio1_opt_clks),
-};
-
-/* gpio2 */
-static struct omap_hwmod_opt_clk gpio2_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio2_dbclk" },
-};
-
-static struct omap_hwmod dra7xx_gpio2_hwmod = {
- .name = "gpio2",
- .class = &dra7xx_gpio_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l3_iclk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER_GPIO2_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER_GPIO2_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
- .opt_clks = gpio2_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio2_opt_clks),
-};
-
-/* gpio3 */
-static struct omap_hwmod_opt_clk gpio3_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio3_dbclk" },
-};
-
-static struct omap_hwmod dra7xx_gpio3_hwmod = {
- .name = "gpio3",
- .class = &dra7xx_gpio_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l3_iclk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER_GPIO3_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER_GPIO3_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
- .opt_clks = gpio3_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio3_opt_clks),
-};
-
-/* gpio4 */
-static struct omap_hwmod_opt_clk gpio4_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio4_dbclk" },
-};
-
-static struct omap_hwmod dra7xx_gpio4_hwmod = {
- .name = "gpio4",
- .class = &dra7xx_gpio_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l3_iclk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER_GPIO4_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER_GPIO4_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
- .opt_clks = gpio4_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio4_opt_clks),
-};
-
-/* gpio5 */
-static struct omap_hwmod_opt_clk gpio5_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio5_dbclk" },
-};
-
-static struct omap_hwmod dra7xx_gpio5_hwmod = {
- .name = "gpio5",
- .class = &dra7xx_gpio_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l3_iclk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER_GPIO5_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER_GPIO5_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
- .opt_clks = gpio5_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio5_opt_clks),
-};
-
-/* gpio6 */
-static struct omap_hwmod_opt_clk gpio6_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio6_dbclk" },
-};
-
-static struct omap_hwmod dra7xx_gpio6_hwmod = {
- .name = "gpio6",
- .class = &dra7xx_gpio_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l3_iclk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER_GPIO6_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER_GPIO6_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
- .opt_clks = gpio6_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio6_opt_clks),
-};
-
-/* gpio7 */
-static struct omap_hwmod_opt_clk gpio7_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio7_dbclk" },
-};
-
-static struct omap_hwmod dra7xx_gpio7_hwmod = {
- .name = "gpio7",
- .class = &dra7xx_gpio_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l3_iclk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER_GPIO7_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER_GPIO7_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
- .opt_clks = gpio7_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio7_opt_clks),
-};
-
-/* gpio8 */
-static struct omap_hwmod_opt_clk gpio8_opt_clks[] = {
- { .role = "dbclk", .clk = "gpio8_dbclk" },
-};
-
-static struct omap_hwmod dra7xx_gpio8_hwmod = {
- .name = "gpio8",
- .class = &dra7xx_gpio_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
- .main_clk = "l3_iclk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER_GPIO8_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER_GPIO8_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
- .opt_clks = gpio8_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(gpio8_opt_clks),
-};
-
-/*
* 'gpmc' class
*
*/
@@ -1065,110 +861,6 @@ static struct omap_hwmod dra7xx_hdq1w_hwmod = {
};
/*
- * 'i2c' class
- *
- */
-
-static struct omap_hwmod_class_sysconfig dra7xx_i2c_sysc = {
- .rev_offs = 0,
- .sysc_offs = 0x0010,
- .syss_offs = 0x0090,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
- SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class dra7xx_i2c_hwmod_class = {
- .name = "i2c",
- .sysc = &dra7xx_i2c_sysc,
- .reset = &omap_i2c_reset,
- .rev = OMAP_I2C_IP_VERSION_2,
-};
-
-/* i2c1 */
-static struct omap_hwmod dra7xx_i2c1_hwmod = {
- .name = "i2c1",
- .class = &dra7xx_i2c_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
- .main_clk = "func_96m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER_I2C1_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER_I2C1_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* i2c2 */
-static struct omap_hwmod dra7xx_i2c2_hwmod = {
- .name = "i2c2",
- .class = &dra7xx_i2c_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
- .main_clk = "func_96m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER_I2C2_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER_I2C2_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* i2c3 */
-static struct omap_hwmod dra7xx_i2c3_hwmod = {
- .name = "i2c3",
- .class = &dra7xx_i2c_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
- .main_clk = "func_96m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER_I2C3_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER_I2C3_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* i2c4 */
-static struct omap_hwmod dra7xx_i2c4_hwmod = {
- .name = "i2c4",
- .class = &dra7xx_i2c_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
- .main_clk = "func_96m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER_I2C4_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER_I2C4_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* i2c5 */
-static struct omap_hwmod dra7xx_i2c5_hwmod = {
- .name = "i2c5",
- .class = &dra7xx_i2c_hwmod_class,
- .clkdm_name = "ipu_clkdm",
- .flags = HWMOD_16BIT_REG | HWMOD_SET_DEFAULT_CLOCKACT,
- .main_clk = "func_96m_fclk",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_IPU_I2C5_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_IPU_I2C5_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
* 'mailbox' class
*
*/
@@ -1632,118 +1324,6 @@ static struct omap_hwmod dra7xx_mcasp8_hwmod = {
};
/*
- * 'mmc' class
- *
- */
-
-static struct omap_hwmod_class_sysconfig dra7xx_mmc_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .sysc_flags = (SYSC_HAS_EMUFREE | SYSC_HAS_MIDLEMODE |
- SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
- MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class dra7xx_mmc_hwmod_class = {
- .name = "mmc",
- .sysc = &dra7xx_mmc_sysc,
-};
-
-/* mmc1 */
-static struct omap_hwmod_opt_clk mmc1_opt_clks[] = {
- { .role = "clk32k", .clk = "mmc1_clk32k" },
-};
-
-/* mmc1 dev_attr */
-static struct omap_hsmmc_dev_attr mmc1_dev_attr = {
- .flags = OMAP_HSMMC_SUPPORTS_DUAL_VOLT,
-};
-
-static struct omap_hwmod dra7xx_mmc1_hwmod = {
- .name = "mmc1",
- .class = &dra7xx_mmc_hwmod_class,
- .clkdm_name = "l3init_clkdm",
- .main_clk = "mmc1_fclk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L3INIT_MMC1_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L3INIT_MMC1_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .opt_clks = mmc1_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(mmc1_opt_clks),
- .dev_attr = &mmc1_dev_attr,
-};
-
-/* mmc2 */
-static struct omap_hwmod_opt_clk mmc2_opt_clks[] = {
- { .role = "clk32k", .clk = "mmc2_clk32k" },
-};
-
-static struct omap_hwmod dra7xx_mmc2_hwmod = {
- .name = "mmc2",
- .class = &dra7xx_mmc_hwmod_class,
- .clkdm_name = "l3init_clkdm",
- .main_clk = "mmc2_fclk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L3INIT_MMC2_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L3INIT_MMC2_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .opt_clks = mmc2_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(mmc2_opt_clks),
-};
-
-/* mmc3 */
-static struct omap_hwmod_opt_clk mmc3_opt_clks[] = {
- { .role = "clk32k", .clk = "mmc3_clk32k" },
-};
-
-static struct omap_hwmod dra7xx_mmc3_hwmod = {
- .name = "mmc3",
- .class = &dra7xx_mmc_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .main_clk = "mmc3_gfclk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER_MMC3_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER_MMC3_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .opt_clks = mmc3_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(mmc3_opt_clks),
-};
-
-/* mmc4 */
-static struct omap_hwmod_opt_clk mmc4_opt_clks[] = {
- { .role = "clk32k", .clk = "mmc4_clk32k" },
-};
-
-static struct omap_hwmod dra7xx_mmc4_hwmod = {
- .name = "mmc4",
- .class = &dra7xx_mmc_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .main_clk = "mmc4_gfclk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER_MMC4_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER_MMC4_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .opt_clks = mmc4_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(mmc4_opt_clks),
-};
-
-/*
* 'mpu' class
*
*/
@@ -1832,7 +1412,7 @@ static struct omap_hwmod dra7xx_ocp2scp3_hwmod = {
* We use a PCIeSS HWMOD class specific reset handler to deassert the hardreset
* lines after asserting them.
*/
-static int dra7xx_pciess_reset(struct omap_hwmod *oh)
+int dra7xx_pciess_reset(struct omap_hwmod *oh)
{
int i;
@@ -2019,7 +1599,6 @@ static struct omap_hwmod_class_sysconfig dra7xx_smartreflex_sysc = {
static struct omap_hwmod_class dra7xx_smartreflex_hwmod_class = {
.name = "smartreflex",
.sysc = &dra7xx_smartreflex_sysc,
- .rev = 2,
};
/* smartreflex_core */
@@ -2375,188 +1954,6 @@ static struct omap_hwmod dra7xx_timer16_hwmod = {
},
};
-/*
- * 'uart' class
- *
- */
-
-static struct omap_hwmod_class_sysconfig dra7xx_uart_sysc = {
- .rev_offs = 0x0050,
- .sysc_offs = 0x0054,
- .syss_offs = 0x0058,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP |
- SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
- SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type1,
-};
-
-static struct omap_hwmod_class dra7xx_uart_hwmod_class = {
- .name = "uart",
- .sysc = &dra7xx_uart_sysc,
-};
-
-/* uart1 */
-static struct omap_hwmod dra7xx_uart1_hwmod = {
- .name = "uart1",
- .class = &dra7xx_uart_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .main_clk = "uart1_gfclk_mux",
- .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP2UART1_FLAGS,
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER_UART1_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* uart2 */
-static struct omap_hwmod dra7xx_uart2_hwmod = {
- .name = "uart2",
- .class = &dra7xx_uart_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .main_clk = "uart2_gfclk_mux",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER_UART2_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER_UART2_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* uart3 */
-static struct omap_hwmod dra7xx_uart3_hwmod = {
- .name = "uart3",
- .class = &dra7xx_uart_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .main_clk = "uart3_gfclk_mux",
- .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP4UART3_FLAGS,
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER_UART3_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER_UART3_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* uart4 */
-static struct omap_hwmod dra7xx_uart4_hwmod = {
- .name = "uart4",
- .class = &dra7xx_uart_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .main_clk = "uart4_gfclk_mux",
- .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP4UART4_FLAGS,
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER_UART4_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER_UART4_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* uart5 */
-static struct omap_hwmod dra7xx_uart5_hwmod = {
- .name = "uart5",
- .class = &dra7xx_uart_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .main_clk = "uart5_gfclk_mux",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER_UART5_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER_UART5_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* uart6 */
-static struct omap_hwmod dra7xx_uart6_hwmod = {
- .name = "uart6",
- .class = &dra7xx_uart_hwmod_class,
- .clkdm_name = "ipu_clkdm",
- .main_clk = "uart6_gfclk_mux",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_IPU_UART6_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_IPU_UART6_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* uart7 */
-static struct omap_hwmod dra7xx_uart7_hwmod = {
- .name = "uart7",
- .class = &dra7xx_uart_hwmod_class,
- .clkdm_name = "l4per2_clkdm",
- .main_clk = "uart7_gfclk_mux",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER2_UART7_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER2_UART7_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* uart8 */
-static struct omap_hwmod dra7xx_uart8_hwmod = {
- .name = "uart8",
- .class = &dra7xx_uart_hwmod_class,
- .clkdm_name = "l4per2_clkdm",
- .main_clk = "uart8_gfclk_mux",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER2_UART8_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER2_UART8_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* uart9 */
-static struct omap_hwmod dra7xx_uart9_hwmod = {
- .name = "uart9",
- .class = &dra7xx_uart_hwmod_class,
- .clkdm_name = "l4per2_clkdm",
- .main_clk = "uart9_gfclk_mux",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER2_UART9_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER2_UART9_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* uart10 */
-static struct omap_hwmod dra7xx_uart10_hwmod = {
- .name = "uart10",
- .class = &dra7xx_uart_hwmod_class,
- .clkdm_name = "wkupaon_clkdm",
- .main_clk = "uart10_gfclk_mux",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_WKUPAON_UART10_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_WKUPAON_UART10_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
/* DES (the 'P' (public) device) */
static struct omap_hwmod_class_sysconfig dra7xx_des_sysc = {
.rev_offs = 0x0030,
@@ -3113,70 +2510,6 @@ static struct omap_hwmod_ocp_if dra7xx_l4_per1__elm = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4_wkup -> gpio1 */
-static struct omap_hwmod_ocp_if dra7xx_l4_wkup__gpio1 = {
- .master = &dra7xx_l4_wkup_hwmod,
- .slave = &dra7xx_gpio1_hwmod,
- .clk = "wkupaon_iclk_mux",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> gpio2 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__gpio2 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_gpio2_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> gpio3 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__gpio3 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_gpio3_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> gpio4 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__gpio4 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_gpio4_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> gpio5 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__gpio5 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_gpio5_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> gpio6 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__gpio6 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_gpio6_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> gpio7 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__gpio7 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_gpio7_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> gpio8 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__gpio8 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_gpio8_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l3_main_1 -> gpmc */
static struct omap_hwmod_ocp_if dra7xx_l3_main_1__gpmc = {
.master = &dra7xx_l3_main_1_hwmod,
@@ -3193,46 +2526,6 @@ static struct omap_hwmod_ocp_if dra7xx_l4_per1__hdq1w = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4_per1 -> i2c1 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__i2c1 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_i2c1_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> i2c2 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__i2c2 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_i2c2_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> i2c3 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__i2c3 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_i2c3_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> i2c4 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__i2c4 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_i2c4_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> i2c5 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__i2c5 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_i2c5_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l4_cfg -> mailbox1 */
static struct omap_hwmod_ocp_if dra7xx_l4_cfg__mailbox1 = {
.master = &dra7xx_l4_cfg_hwmod,
@@ -3369,38 +2662,6 @@ static struct omap_hwmod_ocp_if dra7xx_l4_per1__mcspi4 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4_per1 -> mmc1 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__mmc1 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_mmc1_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> mmc2 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__mmc2 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_mmc2_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> mmc3 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__mmc3 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_mmc3_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> mmc4 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__mmc4 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_mmc4_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l4_cfg -> mpu */
static struct omap_hwmod_ocp_if dra7xx_l4_cfg__mpu = {
.master = &dra7xx_l4_cfg_hwmod,
@@ -3633,62 +2894,6 @@ static struct omap_hwmod_ocp_if dra7xx_l4_per3__timer16 = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4_per1 -> uart1 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__uart1 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_uart1_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> uart2 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__uart2 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_uart2_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> uart3 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__uart3 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_uart3_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> uart4 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__uart4 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_uart4_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> uart5 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__uart5 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_uart5_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per1 -> uart6 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per1__uart6 = {
- .master = &dra7xx_l4_per1_hwmod,
- .slave = &dra7xx_uart6_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per2 -> uart7 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per2__uart7 = {
- .master = &dra7xx_l4_per2_hwmod,
- .slave = &dra7xx_uart7_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l4_per1 -> des */
static struct omap_hwmod_ocp_if dra7xx_l4_per1__des = {
.master = &dra7xx_l4_per1_hwmod,
@@ -3697,30 +2902,6 @@ static struct omap_hwmod_ocp_if dra7xx_l4_per1__des = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
-/* l4_per2 -> uart8 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per2__uart8 = {
- .master = &dra7xx_l4_per2_hwmod,
- .slave = &dra7xx_uart8_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per2 -> uart9 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per2__uart9 = {
- .master = &dra7xx_l4_per2_hwmod,
- .slave = &dra7xx_uart9_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_wkup -> uart10 */
-static struct omap_hwmod_ocp_if dra7xx_l4_wkup__uart10 = {
- .master = &dra7xx_l4_wkup_hwmod,
- .slave = &dra7xx_uart10_hwmod,
- .clk = "wkupaon_iclk_mux",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
/* l4_per1 -> rng */
static struct omap_hwmod_ocp_if dra7xx_l4_per1__rng = {
.master = &dra7xx_l4_per1_hwmod,
@@ -3866,21 +3047,8 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
&dra7xx_l3_main_1__aes2,
&dra7xx_l3_main_1__sha0,
&dra7xx_l4_per1__elm,
- &dra7xx_l4_wkup__gpio1,
- &dra7xx_l4_per1__gpio2,
- &dra7xx_l4_per1__gpio3,
- &dra7xx_l4_per1__gpio4,
- &dra7xx_l4_per1__gpio5,
- &dra7xx_l4_per1__gpio6,
- &dra7xx_l4_per1__gpio7,
- &dra7xx_l4_per1__gpio8,
&dra7xx_l3_main_1__gpmc,
&dra7xx_l4_per1__hdq1w,
- &dra7xx_l4_per1__i2c1,
- &dra7xx_l4_per1__i2c2,
- &dra7xx_l4_per1__i2c3,
- &dra7xx_l4_per1__i2c4,
- &dra7xx_l4_per1__i2c5,
&dra7xx_l4_cfg__mailbox1,
&dra7xx_l4_per3__mailbox2,
&dra7xx_l4_per3__mailbox3,
@@ -3898,10 +3066,6 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
&dra7xx_l4_per1__mcspi2,
&dra7xx_l4_per1__mcspi3,
&dra7xx_l4_per1__mcspi4,
- &dra7xx_l4_per1__mmc1,
- &dra7xx_l4_per1__mmc2,
- &dra7xx_l4_per1__mmc3,
- &dra7xx_l4_per1__mmc4,
&dra7xx_l4_cfg__mpu,
&dra7xx_l4_cfg__ocp2scp1,
&dra7xx_l4_cfg__ocp2scp3,
@@ -3929,16 +3093,6 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
&dra7xx_l4_per3__timer14,
&dra7xx_l4_per3__timer15,
&dra7xx_l4_per3__timer16,
- &dra7xx_l4_per1__uart1,
- &dra7xx_l4_per1__uart2,
- &dra7xx_l4_per1__uart3,
- &dra7xx_l4_per1__uart4,
- &dra7xx_l4_per1__uart5,
- &dra7xx_l4_per1__uart6,
- &dra7xx_l4_per2__uart7,
- &dra7xx_l4_per2__uart8,
- &dra7xx_l4_per2__uart9,
- &dra7xx_l4_wkup__uart10,
&dra7xx_l4_per1__des,
&dra7xx_l4_per3__usb_otg_ss1,
&dra7xx_l4_per3__usb_otg_ss2,
diff --git a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
index debcd88ab971..83230d9ce5ed 100644
--- a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
@@ -484,7 +484,6 @@ static struct omap_hwmod_class_sysconfig dm81xx_gpio_sysc = {
static struct omap_hwmod_class dm81xx_gpio_hwmod_class = {
.name = "gpio",
.sysc = &dm81xx_gpio_sysc,
- .rev = 2,
};
static struct omap_hwmod_opt_clk gpio1_opt_clks[] = {
diff --git a/arch/arm/mach-omap2/pm33xx-core.c b/arch/arm/mach-omap2/pm33xx-core.c
index 724cf5774a6c..f11442ed3eff 100644
--- a/arch/arm/mach-omap2/pm33xx-core.c
+++ b/arch/arm/mach-omap2/pm33xx-core.c
@@ -10,6 +10,12 @@
#include <asm/suspend.h>
#include <linux/errno.h>
#include <linux/platform_data/pm33xx.h>
+#include <linux/clk.h>
+#include <linux/platform_data/gpio-omap.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/wkup_m3_ipc.h>
+#include <linux/of.h>
+#include <linux/rtc.h>
#include "cm33xx.h"
#include "common.h"
@@ -38,6 +44,29 @@ static int am43xx_map_scu(void)
return 0;
}
+static int am33xx_check_off_mode_enable(void)
+{
+ if (enable_off_mode)
+ pr_warn("WARNING: This platform does not support off-mode, entering DeepSleep suspend.\n");
+
+ /* off mode not supported on am335x so return 0 always */
+ return 0;
+}
+
+static int am43xx_check_off_mode_enable(void)
+{
+ /*
+ * Check for am437x-gp-evm which has the right Hardware design to
+ * support this mode reliably.
+ */
+ if (of_machine_is_compatible("ti,am437x-gp-evm") && enable_off_mode)
+ return enable_off_mode;
+ else if (enable_off_mode)
+ pr_warn("WARNING: This platform does not support off-mode, entering DeepSleep suspend.\n");
+
+ return 0;
+}
+
static int amx3_common_init(void)
{
gfx_pwrdm = pwrdm_lookup("gfx_pwrdm");
@@ -51,10 +80,12 @@ static int amx3_common_init(void)
/* CEFUSE domain can be turned off post bootup */
cefuse_pwrdm = pwrdm_lookup("cefuse_pwrdm");
- if (cefuse_pwrdm)
- omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF);
- else
+ if (!cefuse_pwrdm)
pr_err("PM: Failed to get cefuse_pwrdm\n");
+ else if (omap_type() != OMAP2_DEVICE_TYPE_GP)
+ pr_info("PM: Leaving EFUSE power domain active\n");
+ else
+ omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF);
return 0;
}
@@ -139,7 +170,9 @@ static int am43xx_suspend(unsigned int state, int (*fn)(unsigned long),
scu_power_mode(scu_base, SCU_PM_POWEROFF);
ret = cpu_suspend(args, fn);
scu_power_mode(scu_base, SCU_PM_NORMAL);
- amx3_post_suspend_common();
+
+ if (!am43xx_check_off_mode_enable())
+ amx3_post_suspend_common();
return ret;
}
@@ -161,10 +194,48 @@ void __iomem *am43xx_get_rtc_base_addr(void)
return omap_hwmod_get_mpu_rt_va(rtc_oh);
}
+static void am43xx_save_context(void)
+{
+}
+
+static void am33xx_save_context(void)
+{
+ omap_intc_save_context();
+}
+
+static void am33xx_restore_context(void)
+{
+ omap_intc_restore_context();
+}
+
+static void am43xx_restore_context(void)
+{
+ /*
+ * HACK: restore dpll_per_clkdcoldo register contents, to avoid
+ * breaking suspend-resume
+ */
+ writel_relaxed(0x0, AM33XX_L4_WK_IO_ADDRESS(0x44df2e14));
+}
+
+static void am43xx_prepare_rtc_suspend(void)
+{
+ omap_hwmod_enable(rtc_oh);
+}
+
+static void am43xx_prepare_rtc_resume(void)
+{
+ omap_hwmod_idle(rtc_oh);
+}
+
static struct am33xx_pm_platform_data am33xx_ops = {
.init = am33xx_suspend_init,
.soc_suspend = am33xx_suspend,
.get_sram_addrs = amx3_get_sram_addrs,
+ .save_context = am33xx_save_context,
+ .restore_context = am33xx_restore_context,
+ .prepare_rtc_suspend = am43xx_prepare_rtc_suspend,
+ .prepare_rtc_resume = am43xx_prepare_rtc_resume,
+ .check_off_mode_enable = am33xx_check_off_mode_enable,
.get_rtc_base_addr = am43xx_get_rtc_base_addr,
};
@@ -172,6 +243,11 @@ static struct am33xx_pm_platform_data am43xx_ops = {
.init = am43xx_suspend_init,
.soc_suspend = am43xx_suspend,
.get_sram_addrs = amx3_get_sram_addrs,
+ .save_context = am43xx_save_context,
+ .restore_context = am43xx_restore_context,
+ .prepare_rtc_suspend = am43xx_prepare_rtc_suspend,
+ .prepare_rtc_resume = am43xx_prepare_rtc_resume,
+ .check_off_mode_enable = am43xx_check_off_mode_enable,
.get_rtc_base_addr = am43xx_get_rtc_base_addr,
};
diff --git a/arch/arm/mach-omap2/sleep43xx.S b/arch/arm/mach-omap2/sleep43xx.S
index 5b9343b58fc7..0c1031442571 100644
--- a/arch/arm/mach-omap2/sleep43xx.S
+++ b/arch/arm/mach-omap2/sleep43xx.S
@@ -368,6 +368,9 @@ wait_emif_enable1:
mov r1, #AM43XX_EMIF_POWEROFF_DISABLE
str r1, [r2, #0x0]
+ ldr r1, [r9, #EMIF_PM_RUN_HW_LEVELING]
+ blx r1
+
#ifdef CONFIG_CACHE_L2X0
ldr r2, l2_cache_base
ldr r0, [r2, #L2X0_CTRL]
diff --git a/arch/arm/mach-omap2/sr_device.c b/arch/arm/mach-omap2/sr_device.c
index 0854ed9ff379..248f6d9a1bb3 100644
--- a/arch/arm/mach-omap2/sr_device.c
+++ b/arch/arm/mach-omap2/sr_device.c
@@ -119,7 +119,10 @@ static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
}
sr_data->name = oh->name;
- sr_data->ip_type = oh->class->rev;
+ if (cpu_is_omap343x())
+ sr_data->ip_type = 1;
+ else
+ sr_data->ip_type = 2;
sr_data->senn_mod = 0x1;
sr_data->senp_mod = 0x1;
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index c67f92bfa30e..7bcb41137bbf 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/serial_8250.h>
diff --git a/arch/arm/mach-prima2/common.c b/arch/arm/mach-prima2/common.c
index ffe05c27087e..1607deab5290 100644
--- a/arch/arm/mach-prima2/common.c
+++ b/arch/arm/mach-prima2/common.c
@@ -8,7 +8,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <linux/of.h>
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index 4bcbd3d55b36..1f24e0259f99 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -35,7 +35,7 @@
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
diff --git a/arch/arm/mach-pxa/colibri-pxa270.c b/arch/arm/mach-pxa/colibri-pxa270.c
index e68acdd0cdbb..510625dde3cb 100644
--- a/arch/arm/mach-pxa/colibri-pxa270.c
+++ b/arch/arm/mach-pxa/colibri-pxa270.c
@@ -24,7 +24,7 @@
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#include <asm/mach-types.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <mach/audio.h>
#include "colibri.h"
diff --git a/arch/arm/mach-pxa/colibri-pxa300.c b/arch/arm/mach-pxa/colibri-pxa300.c
index 6a5558d95d4e..2f635bdc797f 100644
--- a/arch/arm/mach-pxa/colibri-pxa300.c
+++ b/arch/arm/mach-pxa/colibri-pxa300.c
@@ -18,7 +18,7 @@
#include <linux/interrupt.h>
#include <asm/mach-types.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
diff --git a/arch/arm/mach-pxa/colibri-pxa320.c b/arch/arm/mach-pxa/colibri-pxa320.c
index 17067a3039a8..ffcefe6dbc82 100644
--- a/arch/arm/mach-pxa/colibri-pxa320.c
+++ b/arch/arm/mach-pxa/colibri-pxa320.c
@@ -19,7 +19,7 @@
#include <linux/usb/gpio_vbus.h>
#include <asm/mach-types.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
diff --git a/arch/arm/mach-pxa/colibri-pxa3xx.c b/arch/arm/mach-pxa/colibri-pxa3xx.c
index e31a591e949f..0c88e4e417b4 100644
--- a/arch/arm/mach-pxa/colibri-pxa3xx.c
+++ b/arch/arm/mach-pxa/colibri-pxa3xx.c
@@ -17,7 +17,7 @@
#include <linux/etherdevice.h>
#include <asm/mach-types.h>
#include <mach/hardware.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/system_info.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
diff --git a/arch/arm/mach-pxa/gumstix.c b/arch/arm/mach-pxa/gumstix.c
index 4764acca5480..eb03283ccdee 100644
--- a/arch/arm/mach-pxa/gumstix.c
+++ b/arch/arm/mach-pxa/gumstix.c
@@ -33,7 +33,7 @@
#include <asm/mach-types.h>
#include <mach/hardware.h>
#include <asm/irq.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c
index e9f401b0a432..5c03c4f7b82e 100644
--- a/arch/arm/mach-pxa/lpd270.c
+++ b/arch/arm/mach-pxa/lpd270.c
@@ -33,7 +33,7 @@
#include <asm/mach-types.h>
#include <mach/hardware.h>
#include <asm/irq.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c
index c1bd0d544981..825939877839 100644
--- a/arch/arm/mach-pxa/lubbock.c
+++ b/arch/arm/mach-pxa/lubbock.c
@@ -39,7 +39,7 @@
#include <asm/mach-types.h>
#include <mach/hardware.h>
#include <asm/irq.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index d6e17d407ac0..b3f8592eebe6 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -40,7 +40,7 @@
#include <asm/mach-types.h>
#include <mach/hardware.h>
#include <asm/irq.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c
index c76f1daecfc9..99a2ee433f1f 100644
--- a/arch/arm/mach-pxa/trizeps4.c
+++ b/arch/arm/mach-pxa/trizeps4.c
@@ -35,7 +35,7 @@
#include <asm/memory.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index ab2f89266bbd..c4c25a2f24f6 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -58,7 +58,7 @@
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/system_info.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-rockchip/platsmp.c b/arch/arm/mach-rockchip/platsmp.c
index 51984a40b097..4675d9202000 100644
--- a/arch/arm/mach-rockchip/platsmp.c
+++ b/arch/arm/mach-rockchip/platsmp.c
@@ -245,6 +245,7 @@ static int __init rockchip_smp_prepare_pmu(void)
}
pmu_base = of_iomap(node, 0);
+ of_node_put(node);
if (!pmu_base) {
pr_err("%s: could not map pmu registers\n", __func__);
return -ENOMEM;
diff --git a/arch/arm/mach-rockchip/pm.c b/arch/arm/mach-rockchip/pm.c
index 0592534e0b88..065b09e6f1eb 100644
--- a/arch/arm/mach-rockchip/pm.c
+++ b/arch/arm/mach-rockchip/pm.c
@@ -59,7 +59,7 @@ static inline u32 rk3288_l2_config(void)
return l2ctlr;
}
-static void rk3288_config_bootdata(void)
+static void __init rk3288_config_bootdata(void)
{
rkpm_bootdata_cpusp = rk3288_bootram_phy + (SZ_4K - 8);
rkpm_bootdata_cpu_code = __pa_symbol(cpu_resume);
@@ -230,7 +230,7 @@ static void rk3288_suspend_finish(void)
pr_err("%s: Suspend finish failed\n", __func__);
}
-static int rk3288_suspend_init(struct device_node *np)
+static int __init rk3288_suspend_init(struct device_node *np)
{
struct device_node *sram_np;
struct resource res;
diff --git a/arch/arm/mach-rockchip/rockchip.c b/arch/arm/mach-rockchip/rockchip.c
index e41cabc4dc2b..06ab03b93109 100644
--- a/arch/arm/mach-rockchip/rockchip.c
+++ b/arch/arm/mach-rockchip/rockchip.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/irqchip.h>
#include <linux/clk-provider.h>
diff --git a/arch/arm/mach-rpc/dma.c b/arch/arm/mach-rpc/dma.c
index fb48f3141fb4..f2703ca17954 100644
--- a/arch/arm/mach-rpc/dma.c
+++ b/arch/arm/mach-rpc/dma.c
@@ -151,6 +151,12 @@ static void iomd_free_dma(unsigned int chan, dma_t *dma)
free_irq(idma->irq, idma);
}
+static struct device isa_dma_dev = {
+ .init_name = "fallback device",
+ .coherent_dma_mask = ~(dma_addr_t)0,
+ .dma_mask = &isa_dma_dev.coherent_dma_mask,
+};
+
static void iomd_enable_dma(unsigned int chan, dma_t *dma)
{
struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
@@ -168,7 +174,7 @@ static void iomd_enable_dma(unsigned int chan, dma_t *dma)
idma->dma.sg = &idma->dma.buf;
idma->dma.sgcount = 1;
idma->dma.buf.length = idma->dma.count;
- idma->dma.buf.dma_address = dma_map_single(NULL,
+ idma->dma.buf.dma_address = dma_map_single(&isa_dma_dev,
idma->dma.addr, idma->dma.count,
idma->dma.dma_mode == DMA_MODE_READ ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
diff --git a/arch/arm/mach-s3c24xx/include/mach/hardware.h b/arch/arm/mach-s3c24xx/include/mach/hardware.h
index 1b2975708e3f..f28ac6c78d82 100644
--- a/arch/arm/mach-s3c24xx/include/mach/hardware.h
+++ b/arch/arm/mach-s3c24xx/include/mach/hardware.h
@@ -15,7 +15,7 @@ extern unsigned int s3c2410_modify_misccr(unsigned int clr, unsigned int chg);
#endif /* __ASSEMBLY__ */
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <mach/map.h>
#endif /* __ASM_ARCH_HARDWARE_H */
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410-module.c b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
index 76c4855a03bc..937d0a83f8fd 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410-module.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
@@ -328,6 +328,8 @@ static const struct {
int num_i2c_devs;
const struct spi_board_info *spi_devs;
int num_spi_devs;
+
+ struct gpiod_lookup_table *gpiod_table;
} gf_mods[] = {
{ .id = 0x01, .rev = 0xff, .name = "1250-EV1 Springbank" },
{ .id = 0x02, .rev = 0xff, .name = "1251-EV1 Jura" },
@@ -362,13 +364,16 @@ static const struct {
.i2c_devs = wm1255_devs, .num_i2c_devs = ARRAY_SIZE(wm1255_devs) },
{ .id = 0x3c, .rev = 0xff, .name = "1273-EV1 Longmorn" },
{ .id = 0x3d, .rev = 0xff, .name = "1277-EV1 Littlemill",
- .i2c_devs = wm1277_devs, .num_i2c_devs = ARRAY_SIZE(wm1277_devs) },
+ .i2c_devs = wm1277_devs, .num_i2c_devs = ARRAY_SIZE(wm1277_devs),
+ .gpiod_table = &wm8994_gpiod_table },
{ .id = 0x3e, .rev = 0, .name = "WM5102-6271-EV1-CS127 Amrut",
.spi_devs = wm5102_reva_spi_devs,
- .num_spi_devs = ARRAY_SIZE(wm5102_reva_spi_devs) },
+ .num_spi_devs = ARRAY_SIZE(wm5102_reva_spi_devs),
+ .gpiod_table = &wm5102_reva_gpiod_table },
{ .id = 0x3e, .rev = -1, .name = "WM5102-6271-EV1-CS127 Amrut",
.spi_devs = wm5102_spi_devs,
- .num_spi_devs = ARRAY_SIZE(wm5102_spi_devs) },
+ .num_spi_devs = ARRAY_SIZE(wm5102_spi_devs),
+ .gpiod_table = &wm5102_gpiod_table },
{ .id = 0x3f, .rev = -1, .name = "WM2200-6271-CS90-M-REV1",
.i2c_devs = wm2200_i2c, .num_i2c_devs = ARRAY_SIZE(wm2200_i2c) },
};
@@ -408,6 +413,9 @@ static int wlf_gf_module_probe(struct i2c_client *i2c,
spi_register_board_info(gf_mods[i].spi_devs,
gf_mods[i].num_spi_devs);
+
+ if (gf_mods[i].gpiod_table)
+ gpiod_add_lookup_table(gf_mods[i].gpiod_table);
} else {
dev_warn(&i2c->dev, "Unknown module ID 0x%x revision %d\n",
id, rev + 1);
diff --git a/arch/arm/mach-sa1100/include/mach/memory.h b/arch/arm/mach-sa1100/include/mach/memory.h
index fa5cf4744992..3b19296f5062 100644
--- a/arch/arm/mach-sa1100/include/mach/memory.h
+++ b/arch/arm/mach-sa1100/include/mach/memory.h
@@ -8,7 +8,7 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
-#include <asm/sizes.h>
+#include <linux/sizes.h>
/*
* Because of the wide memory address space between physical RAM banks on the
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c
index eb60a71cf125..a671e4c994cf 100644
--- a/arch/arm/mach-sa1100/neponset.c
+++ b/arch/arm/mach-sa1100/neponset.c
@@ -21,7 +21,7 @@
#include <asm/mach-types.h>
#include <asm/mach/map.h>
#include <asm/hardware/sa1111.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <mach/hardware.h>
#include <mach/assabet.h>
diff --git a/arch/arm/mach-shmobile/pm-rcar-gen2.c b/arch/arm/mach-shmobile/pm-rcar-gen2.c
index 8c2a20591524..e84599dd96f1 100644
--- a/arch/arm/mach-shmobile/pm-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/pm-rcar-gen2.c
@@ -72,6 +72,7 @@ void __init rcar_gen2_pm_init(void)
}
error = of_address_to_resource(np, 0, &res);
+ of_node_put(np);
if (error) {
pr_err("Failed to get smp-sram address: %d\n", error);
return;
diff --git a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
index dc526ef2e9b3..ee949255ced3 100644
--- a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * R-Car Generation 2 da9063/da9210 regulator quirk
+ * R-Car Generation 2 da9063(L)/da9210 regulator quirk
*
* Certain Gen2 development boards have an da9063 and one or more da9210
* regulators. All of these regulators have their interrupt request lines
@@ -65,6 +65,7 @@ static struct i2c_msg da9210_msg = {
static const struct of_device_id rcar_gen2_quirk_match[] = {
{ .compatible = "dlg,da9063", .data = &da9063_msg },
+ { .compatible = "dlg,da9063l", .data = &da9063_msg },
{ .compatible = "dlg,da9210", .data = &da9210_msg },
{},
};
@@ -147,6 +148,7 @@ static int __init rcar_gen2_regulator_quirk(void)
if (!of_machine_is_compatible("renesas,koelsch") &&
!of_machine_is_compatible("renesas,lager") &&
+ !of_machine_is_compatible("renesas,porter") &&
!of_machine_is_compatible("renesas,stout") &&
!of_machine_is_compatible("renesas,gose"))
return -ENODEV;
@@ -210,7 +212,7 @@ static int __init rcar_gen2_regulator_quirk(void)
goto err_free;
}
- pr_info("IRQ2 is asserted, installing da9063/da9210 regulator quirk\n");
+ pr_info("IRQ2 is asserted, installing regulator quirk\n");
bus_register_notifier(&i2c_bus_type, &regulator_quirk_nb);
return 0;
diff --git a/arch/arm/mach-stm32/Kconfig b/arch/arm/mach-stm32/Kconfig
index 713c068b953f..651bdf4f9c9e 100644
--- a/arch/arm/mach-stm32/Kconfig
+++ b/arch/arm/mach-stm32/Kconfig
@@ -4,6 +4,7 @@ menuconfig ARCH_STM32
select HAVE_ARM_ARCH_TIMER if ARCH_MULTI_V7
select ARM_GIC if ARCH_MULTI_V7
select ARM_PSCI if ARCH_MULTI_V7
+ select ARM_AMBA
select ARCH_HAS_RESET_CONTROLLER
select CLKSRC_STM32
select PINCTRL
@@ -18,22 +19,18 @@ if ARM_SINGLE_ARMV7M
config MACH_STM32F429
bool "STMicroelectronics STM32F429"
- select ARM_AMBA
default y
config MACH_STM32F469
bool "STMicroelectronics STM32F469"
- select ARM_AMBA
default y
config MACH_STM32F746
bool "STMicroelectronics STM32F746"
- select ARM_AMBA
default y
config MACH_STM32F769
bool "STMicroelectronics STM32F769"
- select ARM_AMBA
default y
config MACH_STM32H743
diff --git a/arch/arm/mach-sunxi/mc_smp.c b/arch/arm/mach-sunxi/mc_smp.c
index b4037b603897..239084cf8192 100644
--- a/arch/arm/mach-sunxi/mc_smp.c
+++ b/arch/arm/mach-sunxi/mc_smp.c
@@ -89,6 +89,7 @@ static bool sunxi_core_is_cortex_a15(unsigned int core, unsigned int cluster)
{
struct device_node *node;
int cpu = cluster * SUNXI_CPUS_PER_CLUSTER + core;
+ bool is_compatible;
node = of_cpu_device_node_get(cpu);
@@ -107,7 +108,9 @@ static bool sunxi_core_is_cortex_a15(unsigned int core, unsigned int cluster)
return false;
}
- return of_device_is_compatible(node, "arm,cortex-a15");
+ is_compatible = of_device_is_compatible(node, "arm,cortex-a15");
+ of_node_put(node);
+ return is_compatible;
}
static int sunxi_cpu_power_switch_set(unsigned int cpu, unsigned int cluster,
diff --git a/arch/arm/mach-sunxi/platsmp.c b/arch/arm/mach-sunxi/platsmp.c
index 8fb5088464db..bdde9ef3aaa9 100644
--- a/arch/arm/mach-sunxi/platsmp.c
+++ b/arch/arm/mach-sunxi/platsmp.c
@@ -50,6 +50,7 @@ static void __init sun6i_smp_prepare_cpus(unsigned int max_cpus)
}
prcm_membase = of_iomap(node, 0);
+ of_node_put(node);
if (!prcm_membase) {
pr_err("Couldn't map A31 PRCM registers\n");
return;
@@ -63,6 +64,7 @@ static void __init sun6i_smp_prepare_cpus(unsigned int max_cpus)
}
cpucfg_membase = of_iomap(node, 0);
+ of_node_put(node);
if (!cpucfg_membase)
pr_err("Couldn't map A31 CPU config registers\n");
@@ -133,6 +135,7 @@ static void __init sun8i_smp_prepare_cpus(unsigned int max_cpus)
}
prcm_membase = of_iomap(node, 0);
+ of_node_put(node);
if (!prcm_membase) {
pr_err("Couldn't map A23 PRCM registers\n");
return;
@@ -146,6 +149,7 @@ static void __init sun8i_smp_prepare_cpus(unsigned int max_cpus)
}
cpucfg_membase = of_iomap(node, 0);
+ of_node_put(node);
if (!cpucfg_membase)
pr_err("Couldn't map A23 CPU config registers\n");
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index 7f3b83e0d324..3a06ba263e34 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -2,7 +2,7 @@
menuconfig ARCH_TEGRA
bool "NVIDIA Tegra"
depends on ARCH_MULTI_V7
- select ARCH_SUPPORTS_TRUSTED_FOUNDATIONS
+ select ARCH_HAS_RESET_CONTROLLER
select ARM_AMBA
select ARM_GIC
select CLKSRC_MMIO
@@ -10,8 +10,8 @@ menuconfig ARCH_TEGRA
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
select PINCTRL
+ select PM
select PM_OPP
- select ARCH_HAS_RESET_CONTROLLER
select RESET_CONTROLLER
select SOC_BUS
select ZONE_DMA if ARM_LPAE
diff --git a/arch/arm/mach-tegra/cpuidle-tegra114.c b/arch/arm/mach-tegra/cpuidle-tegra114.c
index e3fbcfedf845..43c695d83f03 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra114.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra114.c
@@ -21,6 +21,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/firmware/trusted_foundations.h>
+
#include <asm/cpuidle.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
@@ -46,7 +48,7 @@ static int tegra114_idle_power_down(struct cpuidle_device *dev,
tegra_set_cpu_in_lp2();
cpu_pm_enter();
- call_firmware_op(prepare_idle);
+ call_firmware_op(prepare_idle, TF_PM_MODE_LP2_NOFLUSH_L2);
/* Do suspend by ourselves if the firmware does not implement it */
if (call_firmware_op(do_idle, 0) == -ENOSYS)
diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
index 3f24addd7972..6620d61b5ec5 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
@@ -61,7 +61,8 @@ static struct cpuidle_driver tegra_idle_driver = {
.exit_latency = 5000,
.target_residency = 10000,
.power_usage = 0,
- .flags = CPUIDLE_FLAG_COUPLED,
+ .flags = CPUIDLE_FLAG_COUPLED |
+ CPUIDLE_FLAG_TIMER_STOP,
.name = "powered-down",
.desc = "CPU power gated",
},
@@ -136,12 +137,8 @@ static bool tegra20_cpu_cluster_power_down(struct cpuidle_device *dev,
if (tegra20_reset_cpu_1() || !tegra_cpu_rail_off_ready())
return false;
- tick_broadcast_enter();
-
tegra_idle_lp2_last();
- tick_broadcast_exit();
-
if (cpu_online(1))
tegra20_wake_cpu1_from_reset();
@@ -153,14 +150,10 @@ static bool tegra20_idle_enter_lp2_cpu_1(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- tick_broadcast_enter();
-
cpu_suspend(0, tegra20_sleep_cpu_secondary_finish);
tegra20_cpu_clear_resettable();
- tick_broadcast_exit();
-
return true;
}
#else
diff --git a/arch/arm/mach-tegra/cpuidle-tegra30.c b/arch/arm/mach-tegra/cpuidle-tegra30.c
index c1417361e10e..c8fe0447e3a9 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra30.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra30.c
@@ -56,6 +56,7 @@ static struct cpuidle_driver tegra_idle_driver = {
.exit_latency = 2000,
.target_residency = 2200,
.power_usage = 0,
+ .flags = CPUIDLE_FLAG_TIMER_STOP,
.name = "powered-down",
.desc = "CPU power gated",
},
@@ -76,12 +77,8 @@ static bool tegra30_cpu_cluster_power_down(struct cpuidle_device *dev,
return false;
}
- tick_broadcast_enter();
-
tegra_idle_lp2_last();
- tick_broadcast_exit();
-
return true;
}
@@ -90,14 +87,10 @@ static bool tegra30_cpu_core_power_down(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- tick_broadcast_enter();
-
smp_wmb();
cpu_suspend(0, tegra30_sleep_cpu_secondary_finish);
- tick_broadcast_exit();
-
return true;
}
#else
diff --git a/arch/arm/mach-tegra/iomap.h b/arch/arm/mach-tegra/iomap.h
index 9bc291e76887..ba61db7fe533 100644
--- a/arch/arm/mach-tegra/iomap.h
+++ b/arch/arm/mach-tegra/iomap.h
@@ -20,7 +20,7 @@
#define __MACH_TEGRA_IOMAP_H
#include <asm/pgtable.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#define TEGRA_IRAM_BASE 0x40000000
#define TEGRA_IRAM_SIZE SZ_256K
@@ -79,24 +79,15 @@
#define TEGRA_PMC_BASE 0x7000E400
#define TEGRA_PMC_SIZE SZ_256
-#define TEGRA_MC_BASE 0x7000F000
-#define TEGRA_MC_SIZE SZ_1K
-
#define TEGRA_EMC_BASE 0x7000F400
#define TEGRA_EMC_SIZE SZ_1K
-#define TEGRA114_MC_BASE 0x70019000
-#define TEGRA114_MC_SIZE SZ_4K
-
#define TEGRA_EMC0_BASE 0x7001A000
#define TEGRA_EMC0_SIZE SZ_2K
#define TEGRA_EMC1_BASE 0x7001A800
#define TEGRA_EMC1_SIZE SZ_2K
-#define TEGRA124_MC_BASE 0x70019000
-#define TEGRA124_MC_SIZE SZ_4K
-
#define TEGRA124_EMC_BASE 0x7001B000
#define TEGRA124_EMC_SIZE SZ_2K
diff --git a/arch/arm/mach-tegra/irammap.h b/arch/arm/mach-tegra/irammap.h
index e32e1742c9a1..6a7bb887585e 100644
--- a/arch/arm/mach-tegra/irammap.h
+++ b/arch/arm/mach-tegra/irammap.h
@@ -17,7 +17,7 @@
#ifndef __MACH_TEGRA_IRAMMAP_H
#define __MACH_TEGRA_IRAMMAP_H
-#include <asm/sizes.h>
+#include <linux/sizes.h>
/* The first 1K of IRAM is permanently reserved for the CPU reset handler */
#define TEGRA_IRAM_RESET_HANDLER_OFFSET 0
diff --git a/arch/arm/mach-tegra/pm.c b/arch/arm/mach-tegra/pm.c
index 1ad5719779b0..1b0ade06f204 100644
--- a/arch/arm/mach-tegra/pm.c
+++ b/arch/arm/mach-tegra/pm.c
@@ -27,12 +27,15 @@
#include <linux/spinlock.h>
#include <linux/suspend.h>
+#include <linux/firmware/trusted_foundations.h>
+
#include <soc/tegra/flowctrl.h>
#include <soc/tegra/fuse.h>
#include <soc/tegra/pm.h>
#include <soc/tegra/pmc.h>
#include <asm/cacheflush.h>
+#include <asm/firmware.h>
#include <asm/idmap.h>
#include <asm/proc-fns.h>
#include <asm/smp_plat.h>
@@ -159,6 +162,28 @@ int tegra_cpu_do_idle(void)
static int tegra_sleep_cpu(unsigned long v2p)
{
+ /*
+ * L2 cache disabling using kernel API only allowed when all
+ * secondary CPU's are offline. Cache have to be disabled with
+ * MMU-on if cache maintenance is done via Trusted Foundations
+ * firmware. Note that CPUIDLE won't ever enter powergate on Tegra30
+ * if any of secondary CPU's is online and this is the LP2-idle
+ * code-path only for Tegra20/30.
+ */
+ if (trusted_foundations_registered())
+ outer_disable();
+
+ /*
+ * Note that besides of setting up CPU reset vector this firmware
+ * call may also do the following, depending on the FW version:
+ * 1) Disable L2. But this doesn't matter since we already
+ * disabled the L2.
+ * 2) Disable D-cache. This need to be taken into account in
+ * particular by the tegra_disable_clean_inv_dcache() which
+ * shall avoid the re-disable.
+ */
+ call_firmware_op(prepare_idle, TF_PM_MODE_LP2);
+
setup_mm_for_reboot();
tegra_sleep_cpu_finish(v2p);
@@ -197,6 +222,14 @@ void tegra_idle_lp2_last(void)
cpu_suspend(PHYS_OFFSET - PAGE_OFFSET, &tegra_sleep_cpu);
+ /*
+ * Resume L2 cache if it wasn't re-enabled early during resume,
+ * which is the case for Tegra30 that has to re-enable the cache
+ * via firmware call. In other cases cache is already enabled and
+ * hence re-enabling is a no-op. This is always a no-op on Tegra114+.
+ */
+ outer_resume();
+
restore_cpu_complex();
cpu_cluster_pm_exit();
}
@@ -215,6 +248,15 @@ enum tegra_suspend_mode tegra_pm_validate_suspend_mode(
static int tegra_sleep_core(unsigned long v2p)
{
+ /*
+ * Cache have to be disabled with MMU-on if cache maintenance is done
+ * via Trusted Foundations firmware. This is a no-op on Tegra114+.
+ */
+ if (trusted_foundations_registered())
+ outer_disable();
+
+ call_firmware_op(prepare_idle, TF_PM_MODE_LP1);
+
setup_mm_for_reboot();
tegra_sleep_core_finish(v2p);
@@ -342,6 +384,14 @@ static int tegra_suspend_enter(suspend_state_t state)
cpu_suspend(PHYS_OFFSET - PAGE_OFFSET, tegra_sleep_func);
+ /*
+ * Resume L2 cache if it wasn't re-enabled early during resume,
+ * which is the case for Tegra30 that has to re-enable the cache
+ * via firmware call. In other cases cache is already enabled and
+ * hence re-enabling is a no-op.
+ */
+ outer_resume();
+
switch (mode) {
case TEGRA_SUSPEND_LP1:
tegra_suspend_exit_lp1();
diff --git a/arch/arm/mach-tegra/reset-handler.S b/arch/arm/mach-tegra/reset-handler.S
index e22ccf87eded..cd94d7c41fc0 100644
--- a/arch/arm/mach-tegra/reset-handler.S
+++ b/arch/arm/mach-tegra/reset-handler.S
@@ -20,6 +20,7 @@
#include <soc/tegra/flowctrl.h>
#include <soc/tegra/fuse.h>
+#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/cache.h>
@@ -29,8 +30,6 @@
#define PMC_SCRATCH41 0x140
-#define RESET_DATA(x) ((TEGRA_RESET_##x)*4)
-
#ifdef CONFIG_PM_SLEEP
/*
* tegra_resume
@@ -78,6 +77,7 @@ ENTRY(tegra_resume)
orr r1, r1, #1
str r1, [r0]
#endif
+ bl tegra_resume_trusted_foundations
#ifdef CONFIG_CACHE_L2X0
/* L2 cache resume & re-enable */
@@ -90,6 +90,30 @@ end_ca9_scu_l2_resume:
b cpu_resume
ENDPROC(tegra_resume)
+
+/*
+ * tegra_resume_trusted_foundations
+ *
+ * Trusted Foundations firmware initialization.
+ *
+ * Doesn't return if firmware presents.
+ * Corrupted registers: r1, r2
+ */
+ENTRY(tegra_resume_trusted_foundations)
+ /* Check whether Trusted Foundations firmware presents. */
+ mov32 r2, TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET
+ ldr r1, =__tegra_cpu_reset_handler_data_offset + \
+ RESET_DATA(TF_PRESENT)
+ ldr r1, [r2, r1]
+ cmp r1, #0
+ reteq lr
+
+ .arch_extension sec
+ /* First call after suspend wakes firmware. No arguments required. */
+ smc #0
+
+ b cpu_resume
+ENDPROC(tegra_resume_trusted_foundations)
#endif
.align L1_CACHE_SHIFT
@@ -115,12 +139,19 @@ ENTRY(__tegra_cpu_reset_handler_start)
* must be position-independent.
*/
+ .arm
.align L1_CACHE_SHIFT
ENTRY(__tegra_cpu_reset_handler)
cpsid aif, 0x13 @ SVC mode, interrupts disabled
tegra_get_soc_id TEGRA_APB_MISC_BASE, r6
+
+ adr r12, __tegra_cpu_reset_handler_data
+ ldr r5, [r12, #RESET_DATA(TF_PRESENT)]
+ cmp r5, #0
+ bne after_errata
+
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
t20_check:
cmp r6, #TEGRA20
@@ -155,7 +186,6 @@ after_errata:
and r10, r10, #0x3 @ R10 = CPU number
mov r11, #1
mov r11, r11, lsl r10 @ R11 = CPU mask
- adr r12, __tegra_cpu_reset_handler_data
#ifdef CONFIG_SMP
/* Does the OS know about this CPU? */
@@ -169,10 +199,9 @@ after_errata:
cmp r6, #TEGRA20
bne 1f
/* If not CPU0, don't let CPU0 reset CPU1 now that CPU1 is coming up. */
- mov32 r5, TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET
mov r0, #CPU_NOT_RESETTABLE
cmp r10, #0
- strbne r0, [r5, #__tegra20_cpu1_resettable_status_offset]
+ strbne r0, [r12, #RESET_DATA(RESETTABLE_STATUS)]
1:
#endif
@@ -277,14 +306,13 @@ ENDPROC(__tegra_cpu_reset_handler)
.align L1_CACHE_SHIFT
.type __tegra_cpu_reset_handler_data, %object
.globl __tegra_cpu_reset_handler_data
+ .globl __tegra_cpu_reset_handler_data_offset
+ .equ __tegra_cpu_reset_handler_data_offset, \
+ . - __tegra_cpu_reset_handler_start
__tegra_cpu_reset_handler_data:
- .rept TEGRA_RESET_DATA_SIZE
- .long 0
+ .rept TEGRA_RESET_DATA_SIZE
+ .long 0
.endr
- .globl __tegra20_cpu1_resettable_status_offset
- .equ __tegra20_cpu1_resettable_status_offset, \
- . - __tegra_cpu_reset_handler_start
- .byte 0
.align L1_CACHE_SHIFT
ENTRY(__tegra_cpu_reset_handler_end)
diff --git a/arch/arm/mach-tegra/reset.c b/arch/arm/mach-tegra/reset.c
index dc558892753c..35dc5d419b6f 100644
--- a/arch/arm/mach-tegra/reset.c
+++ b/arch/arm/mach-tegra/reset.c
@@ -19,6 +19,8 @@
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/firmware/trusted_foundations.h>
+
#include <soc/tegra/fuse.h>
#include <asm/cacheflush.h>
@@ -89,6 +91,8 @@ static void __init tegra_cpu_reset_handler_enable(void)
void __init tegra_cpu_reset_handler_init(void)
{
+ __tegra_cpu_reset_handler_data[TEGRA_RESET_TF_PRESENT] =
+ trusted_foundations_registered();
#ifdef CONFIG_SMP
__tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_PRESENT] =
diff --git a/arch/arm/mach-tegra/reset.h b/arch/arm/mach-tegra/reset.h
index 9c479c7925b8..db0e6b3097ab 100644
--- a/arch/arm/mach-tegra/reset.h
+++ b/arch/arm/mach-tegra/reset.h
@@ -25,7 +25,11 @@
#define TEGRA_RESET_STARTUP_SECONDARY 3
#define TEGRA_RESET_STARTUP_LP2 4
#define TEGRA_RESET_STARTUP_LP1 5
-#define TEGRA_RESET_DATA_SIZE 6
+#define TEGRA_RESET_RESETTABLE_STATUS 6
+#define TEGRA_RESET_TF_PRESENT 7
+#define TEGRA_RESET_DATA_SIZE 8
+
+#define RESET_DATA(x) ((TEGRA_RESET_##x)*4)
#ifndef __ASSEMBLY__
@@ -49,7 +53,8 @@ void __tegra_cpu_reset_handler_end(void);
(u32)__tegra_cpu_reset_handler_start)))
#define tegra20_cpu1_resettable_status \
(IO_ADDRESS(TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET + \
- (u32)__tegra20_cpu1_resettable_status_offset))
+ ((u32)&__tegra_cpu_reset_handler_data[TEGRA_RESET_RESETTABLE_STATUS] - \
+ (u32)__tegra_cpu_reset_handler_start)))
#endif
#define tegra_cpu_reset_handler_offset \
diff --git a/arch/arm/mach-tegra/sleep-tegra20.S b/arch/arm/mach-tegra/sleep-tegra20.S
index dedeebfccc55..50d51d3465f6 100644
--- a/arch/arm/mach-tegra/sleep-tegra20.S
+++ b/arch/arm/mach-tegra/sleep-tegra20.S
@@ -28,6 +28,7 @@
#include <asm/cache.h>
#include "irammap.h"
+#include "reset.h"
#include "sleep.h"
#define EMC_CFG 0xc
@@ -53,6 +54,9 @@
#define APB_MISC_XM2CFGCPADCTRL2 0x8e4
#define APB_MISC_XM2CFGDPADCTRL2 0x8e8
+#define __tegra20_cpu1_resettable_status_offset \
+ (__tegra_cpu_reset_handler_data_offset + RESET_DATA(RESETTABLE_STATUS))
+
.macro pll_enable, rd, r_car_base, pll_base
ldr \rd, [\r_car_base, #\pll_base]
tst \rd, #(1 << 30)
diff --git a/arch/arm/mach-tegra/sleep-tegra30.S b/arch/arm/mach-tegra/sleep-tegra30.S
index d0b4c486ddbf..7727e005c30e 100644
--- a/arch/arm/mach-tegra/sleep-tegra30.S
+++ b/arch/arm/mach-tegra/sleep-tegra30.S
@@ -44,8 +44,6 @@
#define EMC_XM2VTTGENPADCTRL 0x310
#define EMC_XM2VTTGENPADCTRL2 0x314
-#define MC_EMEM_ARB_CFG 0x90
-
#define PMC_CTRL 0x0
#define PMC_CTRL_SIDE_EFFECT_LP0 (1 << 14) /* enter LP0 when CPU pwr gated */
@@ -420,22 +418,6 @@ _pll_m_c_x_done:
movweq r0, #:lower16:TEGRA124_EMC_BASE
movteq r0, #:upper16:TEGRA124_EMC_BASE
- cmp r10, #TEGRA30
- moveq r2, #0x20
- movweq r4, #:lower16:TEGRA_MC_BASE
- movteq r4, #:upper16:TEGRA_MC_BASE
- cmp r10, #TEGRA114
- moveq r2, #0x34
- movweq r4, #:lower16:TEGRA114_MC_BASE
- movteq r4, #:upper16:TEGRA114_MC_BASE
- cmp r10, #TEGRA124
- moveq r2, #0x20
- movweq r4, #:lower16:TEGRA124_MC_BASE
- movteq r4, #:upper16:TEGRA124_MC_BASE
-
- ldr r1, [r5, r2] @ restore MC_EMEM_ARB_CFG
- str r1, [r4, #MC_EMEM_ARB_CFG]
-
exit_self_refresh:
ldr r1, [r5, #0xC] @ restore EMC_XM2VTTGENPADCTRL
str r1, [r0, #EMC_XM2VTTGENPADCTRL]
@@ -564,7 +546,6 @@ tegra30_sdram_pad_address:
.word TEGRA_PMC_BASE + PMC_IO_DPD_STATUS @0x14
.word TEGRA_CLK_RESET_BASE + CLK_RESET_CLK_SOURCE_MSELECT @0x18
.word TEGRA_CLK_RESET_BASE + CLK_RESET_SCLK_BURST @0x1c
- .word TEGRA_MC_BASE + MC_EMEM_ARB_CFG @0x20
tegra30_sdram_pad_address_end:
tegra114_sdram_pad_address:
@@ -581,7 +562,6 @@ tegra114_sdram_pad_address:
.word TEGRA_EMC1_BASE + EMC_AUTO_CAL_INTERVAL @0x28
.word TEGRA_EMC1_BASE + EMC_XM2VTTGENPADCTRL @0x2c
.word TEGRA_EMC1_BASE + EMC_XM2VTTGENPADCTRL2 @0x30
- .word TEGRA114_MC_BASE + MC_EMEM_ARB_CFG @0x34
tegra114_sdram_pad_adress_end:
tegra124_sdram_pad_address:
@@ -593,7 +573,6 @@ tegra124_sdram_pad_address:
.word TEGRA_PMC_BASE + PMC_IO_DPD_STATUS @0x14
.word TEGRA_CLK_RESET_BASE + CLK_RESET_CLK_SOURCE_MSELECT @0x18
.word TEGRA_CLK_RESET_BASE + CLK_RESET_SCLK_BURST @0x1c
- .word TEGRA124_MC_BASE + MC_EMEM_ARB_CFG @0x20
tegra124_sdram_pad_address_end:
tegra30_sdram_pad_size:
diff --git a/arch/arm/mach-tegra/sleep.S b/arch/arm/mach-tegra/sleep.S
index 5e3496753df1..1735ded5a812 100644
--- a/arch/arm/mach-tegra/sleep.S
+++ b/arch/arm/mach-tegra/sleep.S
@@ -49,8 +49,9 @@ ENTRY(tegra_disable_clean_inv_dcache)
/* Disable the D-cache */
mrc p15, 0, r2, c1, c0, 0
+ tst r2, #CR_C @ see tegra_sleep_cpu()
bic r2, r2, #CR_C
- mcr p15, 0, r2, c1, c0, 0
+ mcrne p15, 0, r2, c1, c0, 0
isb
/* Flush the D-cache */
@@ -132,10 +133,13 @@ ENTRY(tegra_shut_off_mmu)
#ifdef CONFIG_CACHE_L2X0
/* Disable L2 cache */
check_cpu_part_num 0xc09, r9, r10
- movweq r2, #:lower16:(TEGRA_ARM_PERIF_BASE + 0x3000)
- movteq r2, #:upper16:(TEGRA_ARM_PERIF_BASE + 0x3000)
- moveq r3, #0
- streq r3, [r2, #L2X0_CTRL]
+ retne r0
+
+ mov32 r2, TEGRA_ARM_PERIF_BASE + 0x3000
+ ldr r3, [r2, #L2X0_CTRL]
+ tst r3, #L2X0_CTRL_EN @ see tegra_sleep_cpu()
+ mov r3, #0
+ strne r3, [r2, #L2X0_CTRL]
#endif
ret r0
ENDPROC(tegra_shut_off_mmu)
diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c
index f9587be48235..3e88f67dd521 100644
--- a/arch/arm/mach-tegra/tegra.c
+++ b/arch/arm/mach-tegra/tegra.c
@@ -35,15 +35,17 @@
#include <linux/sys_soc.h>
#include <linux/usb/tegra_usb_phy.h>
+#include <linux/firmware/trusted_foundations.h>
+
#include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h>
+#include <asm/firmware.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include <asm/mach-types.h>
#include <asm/setup.h>
-#include <asm/trusted_foundations.h>
#include "board.h"
#include "common.h"
@@ -74,6 +76,7 @@ static void __init tegra_init_early(void)
{
of_register_trusted_foundations();
tegra_cpu_reset_handler_init();
+ call_firmware_op(l2x0_init);
}
static void __init tegra_dt_init_irq(void)
diff --git a/arch/arm/mach-u300/regulator.c b/arch/arm/mach-u300/regulator.c
index 595b574c2c50..96ec72bd3928 100644
--- a/arch/arm/mach-u300/regulator.c
+++ b/arch/arm/mach-u300/regulator.c
@@ -130,3 +130,5 @@ static int __init u300_init_boardpower(void)
}
device_initcall(u300_init_boardpower);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Linus Walleij");
diff --git a/arch/arm/mach-w90x900/include/mach/hardware.h b/arch/arm/mach-w90x900/include/mach/hardware.h
index fe3c6265a466..2e6555df538e 100644
--- a/arch/arm/mach-w90x900/include/mach/hardware.h
+++ b/arch/arm/mach-w90x900/include/mach/hardware.h
@@ -18,7 +18,7 @@
#ifndef __ASM_ARCH_HARDWARE_H
#define __ASM_ARCH_HARDWARE_H
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <mach/map.h>
#endif /* __ASM_ARCH_HARDWARE_H */
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index 6aba9ebf8041..7f634eaeaf10 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -15,6 +15,7 @@
*/
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/cpumask.h>
#include <linux/platform_device.h>
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index b54f8f8def36..e376883ab35b 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -133,7 +133,7 @@ static const char *usermode_action[] = {
static int alignment_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "User:\t\t%lu\n", ai_user);
- seq_printf(m, "System:\t\t%lu (%pF)\n", ai_sys, ai_sys_last_pc);
+ seq_printf(m, "System:\t\t%lu (%pS)\n", ai_sys, ai_sys_last_pc);
seq_printf(m, "Skipped:\t%lu\n", ai_skipped);
seq_printf(m, "Half:\t\t%lu\n", ai_half);
seq_printf(m, "Word:\t\t%lu\n", ai_word);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 43f46aa7ef33..0a75058c11f3 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1577,31 +1577,21 @@ static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
- unsigned long uaddr = vma->vm_start;
- unsigned long usize = vma->vm_end - vma->vm_start;
struct page **pages = __iommu_get_pages(cpu_addr, attrs);
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
- unsigned long off = vma->vm_pgoff;
+ int err;
if (!pages)
return -ENXIO;
- if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off)
+ if (vma->vm_pgoff >= nr_pages)
return -ENXIO;
- pages += off;
-
- do {
- int ret = vm_insert_page(vma, uaddr, *pages++);
- if (ret) {
- pr_err("Remapping memory failed: %d\n", ret);
- return ret;
- }
- uaddr += PAGE_SIZE;
- usize -= PAGE_SIZE;
- } while (usize > 0);
+ err = vm_map_pages(vma, pages, nr_pages);
+ if (err)
+ pr_err("Remapping memory failed: %d\n", err);
- return 0;
+ return err;
}
static int arm_iommu_mmap_attrs(struct device *dev,
struct vm_area_struct *vma, void *cpu_addr,
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index c2daabbe0af0..be0b42937888 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -182,21 +182,6 @@ int pfn_valid(unsigned long pfn)
EXPORT_SYMBOL(pfn_valid);
#endif
-#ifndef CONFIG_SPARSEMEM
-static void __init arm_memory_present(void)
-{
-}
-#else
-static void __init arm_memory_present(void)
-{
- struct memblock_region *reg;
-
- for_each_memblock(memory, reg)
- memory_present(0, memblock_region_memory_base_pfn(reg),
- memblock_region_memory_end_pfn(reg));
-}
-#endif
-
static bool arm_memblock_steal_permitted = true;
phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
@@ -293,7 +278,7 @@ void __init bootmem_init(void)
* Sparsemem tries to allocate bootmem in memory_present(),
* so must be done after the fixed reservations
*/
- arm_memory_present();
+ memblocks_present();
/*
* sparse_init() needs the bootmem allocator up and running.
@@ -695,27 +680,14 @@ void free_initmem(void)
}
#ifdef CONFIG_BLK_DEV_INITRD
-
-static int keep_initrd;
-
void free_initrd_mem(unsigned long start, unsigned long end)
{
- if (!keep_initrd) {
- if (start == initrd_start)
- start = round_down(start, PAGE_SIZE);
- if (end == initrd_end)
- end = round_up(end, PAGE_SIZE);
-
- poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
- free_reserved_area((void *)start, (void *)end, -1, "initrd");
- }
-}
+ if (start == initrd_start)
+ start = round_down(start, PAGE_SIZE);
+ if (end == initrd_end)
+ end = round_up(end, PAGE_SIZE);
-static int __init keepinitrd_setup(char *__unused)
-{
- keep_initrd = 1;
- return 1;
+ poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
+ free_reserved_area((void *)start, (void *)end, -1, "initrd");
}
-
-__setup("keepinitrd", keepinitrd_setup);
#endif
diff --git a/arch/arm/nwfpe/fpmodule.c b/arch/arm/nwfpe/fpmodule.c
index 1365e8650843..ee34c76e6624 100644
--- a/arch/arm/nwfpe/fpmodule.c
+++ b/arch/arm/nwfpe/fpmodule.c
@@ -147,7 +147,7 @@ void float_raise(signed char flags)
#ifdef CONFIG_DEBUG_USER
if (flags & debug)
printk(KERN_DEBUG
- "NWFPE: %s[%d] takes exception %08x at %pf from %08lx\n",
+ "NWFPE: %s[%d] takes exception %08x at %ps from %08lx\n",
current->comm, current->pid, flags,
__builtin_return_address(0), GET_USERREG()->ARM_pc);
#endif
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index d4012d6c0dcb..5ca4c5fd627a 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -1449,7 +1449,6 @@ static void __exit omap_system_dma_exit(void)
MODULE_DESCRIPTION("OMAP SYSTEM DMA DRIVER");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("Texas Instruments Inc");
/*
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index f51919974183..bf25f780c1c9 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -183,18 +183,12 @@ static int pxa_ssp_probe(struct platform_device *pdev)
static int pxa_ssp_remove(struct platform_device *pdev)
{
- struct resource *res;
struct ssp_device *ssp;
ssp = platform_get_drvdata(pdev);
if (ssp == NULL)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
- clk_put(ssp->clk);
-
mutex_lock(&ssp_lock);
list_del(&ssp->node);
mutex_unlock(&ssp_lock);
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index 0393917eaa57..aaf479a9e92d 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -441,3 +441,9 @@
425 common io_uring_setup sys_io_uring_setup
426 common io_uring_enter sys_io_uring_enter
427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
index f4efff9d3afb..fadf554d9391 100644
--- a/arch/arm/vdso/Makefile
+++ b/arch/arm/vdso/Makefile
@@ -10,12 +10,12 @@ obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
ccflags-y := -fPIC -fno-common -fno-builtin -fno-stack-protector
ccflags-y += -DDISABLE_BRANCH_PROFILING
-VDSO_LDFLAGS := -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1
-VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
-VDSO_LDFLAGS += -nostdlib -shared
-VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
-VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
-VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
+ldflags-y = -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
+ -z max-page-size=4096 -z common-page-size=4096 \
+ -nostdlib -shared \
+ $(call ld-option, --hash-style=sysv) \
+ $(call ld-option, --build-id) \
+ -T
obj-$(CONFIG_VDSO) += vdso.o
extra-$(CONFIG_VDSO) += vdso.lds
@@ -37,8 +37,8 @@ KCOV_INSTRUMENT := n
$(obj)/vdso.o : $(obj)/vdso.so
# Link rule for the .so file
-$(obj)/vdso.so.raw: $(src)/vdso.lds $(obj-vdso) FORCE
- $(call if_changed,vdsold)
+$(obj)/vdso.so.raw: $(obj)/vdso.lds $(obj-vdso) FORCE
+ $(call if_changed,ld)
$(obj)/vdso.so.dbg: $(obj)/vdso.so.raw $(obj)/vdsomunge FORCE
$(call if_changed,vdsomunge)
@@ -48,11 +48,6 @@ $(obj)/%.so: OBJCOPYFLAGS := -S
$(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
-# Actual build commands
-quiet_cmd_vdsold = VDSO $@
- cmd_vdsold = $(CC) $(c_flags) $(VDSO_LDFLAGS) \
- -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@
-
quiet_cmd_vdsomunge = MUNGE $@
cmd_vdsomunge = $(objtree)/$(obj)/vdsomunge $< $@
diff --git a/arch/arm/vdso/vgettimeofday.c b/arch/arm/vdso/vgettimeofday.c
index a9dd619c6c29..7bdbf5d5c47d 100644
--- a/arch/arm/vdso/vgettimeofday.c
+++ b/arch/arm/vdso/vgettimeofday.c
@@ -18,9 +18,9 @@
#include <linux/compiler.h>
#include <linux/hrtimer.h>
#include <linux/time.h>
-#include <asm/arch_timer.h>
#include <asm/barrier.h>
#include <asm/bug.h>
+#include <asm/cp15.h>
#include <asm/page.h>
#include <asm/unistd.h>
#include <asm/vdso_datapage.h>
@@ -123,7 +123,8 @@ static notrace u64 get_ns(struct vdso_data *vdata)
u64 cycle_now;
u64 nsec;
- cycle_now = arch_counter_get_cntvct();
+ isb();
+ cycle_now = read_sysreg(CNTVCT);
cycle_delta = (cycle_now - vdata->cs_cycle_last) & vdata->cs_mask;
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index e70a49fc8dcd..da2a7044a124 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -70,8 +70,9 @@ unsigned long __pfn_to_mfn(unsigned long pfn)
entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
if (entry->pfn <= pfn &&
entry->pfn + entry->nr_pages > pfn) {
+ unsigned long mfn = entry->mfn + (pfn - entry->pfn);
read_unlock_irqrestore(&p2m_lock, irqflags);
- return entry->mfn + (pfn - entry->pfn);
+ return mfn;
}
if (pfn < entry->pfn)
n = n->rb_left;
@@ -156,6 +157,7 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
rc = xen_add_phys_to_mach_entry(p2m_entry);
if (rc < 0) {
write_unlock_irqrestore(&p2m_lock, irqflags);
+ kfree(p2m_entry);
return false;
}
write_unlock_irqrestore(&p2m_lock, irqflags);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7e34b9eba5de..4780eb7af842 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -13,13 +13,15 @@ config ARM64
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_COHERENT_TO_PFN
select ARCH_HAS_DMA_MMAP_PGPROT
+ select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
- select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
+ select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_KCOV
+ select ARCH_HAS_KEEPINITRD
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SETUP_DMA_OPS
@@ -58,6 +60,7 @@ config ARM64
select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT
select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT
select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT
+ select ARCH_KEEP_MEMBLOCK
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
@@ -90,6 +93,7 @@ config ARM64
select GENERIC_CLOCKEVENTS
select GENERIC_CLOCKEVENTS_BROADCAST
select GENERIC_CPU_AUTOPROBE
+ select GENERIC_CPU_VULNERABILITIES
select GENERIC_EARLY_IOREMAP
select GENERIC_IDLE_POLL_SETUP
select GENERIC_IRQ_MULTI_HANDLER
@@ -148,8 +152,8 @@ config ARM64
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_RCU_TABLE_FREE
- select HAVE_RCU_TABLE_INVALIDATE
select HAVE_RSEQ
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
@@ -237,9 +241,6 @@ config LOCKDEP_SUPPORT
config TRACE_IRQFLAGS_SUPPORT
def_bool y
-config RWSEM_XCHGADD_ALGORITHM
- def_bool y
-
config GENERIC_BUG
def_bool y
depends on BUG
@@ -297,7 +298,7 @@ menu "Kernel Features"
menu "ARM errata workarounds via the alternatives framework"
config ARM64_WORKAROUND_CLEAN_CACHE
- def_bool n
+ bool
config ARM64_ERRATUM_826319
bool "Cortex-A53: 826319: System might deadlock if a write cannot complete until read data is accepted"
@@ -464,26 +465,28 @@ config ARM64_ERRATUM_1024718
bool "Cortex-A55: 1024718: Update of DBM/AP bits without break before make might result in incorrect update"
default y
help
- This option adds work around for Arm Cortex-A55 Erratum 1024718.
+ This option adds a workaround for ARM Cortex-A55 Erratum 1024718.
Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect
update of the hardware dirty bit when the DBM/AP bits are updated
- without a break-before-make. The work around is to disable the usage
+ without a break-before-make. The workaround is to disable the usage
of hardware DBM locally on the affected cores. CPUs not affected by
- erratum will continue to use the feature.
+ this erratum will continue to use the feature.
If unsure, say Y.
config ARM64_ERRATUM_1188873
- bool "Cortex-A76: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result"
+ bool "Cortex-A76/Neoverse-N1: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result"
default y
+ depends on COMPAT
select ARM_ARCH_TIMER_OOL_WORKAROUND
help
- This option adds work arounds for ARM Cortex-A76 erratum 1188873
+ This option adds a workaround for ARM Cortex-A76/Neoverse-N1
+ erratum 1188873.
- Affected Cortex-A76 cores (r0p0, r1p0, r2p0) could cause
- register corruption when accessing the timer registers from
- AArch32 userspace.
+ Affected Cortex-A76/Neoverse-N1 cores (r0p0, r1p0, r2p0) could
+ cause register corruption when accessing the timer registers
+ from AArch32 userspace.
If unsure, say Y.
@@ -491,7 +494,7 @@ config ARM64_ERRATUM_1165522
bool "Cortex-A76: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
default y
help
- This option adds work arounds for ARM Cortex-A76 erratum 1165522
+ This option adds a workaround for ARM Cortex-A76 erratum 1165522.
Affected Cortex-A76 cores (r0p0, r1p0, r2p0) could end-up with
corrupted TLBs by speculating an AT instruction during a guest
@@ -504,7 +507,7 @@ config ARM64_ERRATUM_1286807
default y
select ARM64_WORKAROUND_REPEAT_TLBI
help
- This option adds workaround for ARM Cortex-A76 erratum 1286807
+ This option adds a workaround for ARM Cortex-A76 erratum 1286807.
On the affected Cortex-A76 cores (r0p0 to r3p0), if a virtual
address for a cacheable mapping of a location is being
@@ -521,10 +524,10 @@ config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
help
- Enable workaround for erratum 22375, 24313.
+ Enable workaround for errata 22375 and 24313.
This implements two gicv3-its errata workarounds for ThunderX. Both
- with small impact affecting only ITS table allocation.
+ with a small impact affecting only ITS table allocation.
erratum 22375: only alloc 8MB table size
erratum 24313: ignore memory access type
@@ -588,9 +591,6 @@ config QCOM_FALKOR_ERRATUM_1003
config ARM64_WORKAROUND_REPEAT_TLBI
bool
- help
- Enable the repeat TLBI workaround for Falkor erratum 1009 and
- Cortex-A76 erratum 1286807.
config QCOM_FALKOR_ERRATUM_1009
bool "Falkor E1009: Prematurely complete a DSB after a TLBI"
@@ -626,7 +626,7 @@ config HISILICON_ERRATUM_161600802
bool "Hip07 161600802: Erroneous redistributor VLPI base"
default y
help
- The HiSilicon Hip07 SoC usees the wrong redistributor base
+ The HiSilicon Hip07 SoC uses the wrong redistributor base
when issued ITS commands such as VMOVP and VMAPP, and requires
a 128kB offset to be applied to the target address in this commands.
@@ -646,7 +646,7 @@ config FUJITSU_ERRATUM_010001
bool "Fujitsu-A64FX erratum E#010001: Undefined fault may occur wrongly"
default y
help
- This option adds workaround for Fujitsu-A64FX erratum E#010001.
+ This option adds a workaround for Fujitsu-A64FX erratum E#010001.
On some variants of the Fujitsu-A64FX cores ver(1.0, 1.1), memory
accesses may cause undefined fault (Data abort, DFSC=0b111111).
This fault occurs under a specific hardware condition when a
@@ -657,7 +657,7 @@ config FUJITSU_ERRATUM_010001
case-4 TTBR1_EL2 with TCR_EL2.NFD1 == 1.
The workaround is to ensure these bits are clear in TCR_ELx.
- The workaround only affect the Fujitsu-A64FX.
+ The workaround only affects the Fujitsu-A64FX.
If unsure, say Y.
@@ -889,6 +889,9 @@ config ARCH_WANT_HUGE_PMD_SHARE
config ARCH_HAS_CACHE_LINE_SIZE
def_bool y
+config ARCH_ENABLE_SPLIT_PMD_PTLOCK
+ def_bool y if PGTABLE_LEVELS > 2
+
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
---help---
@@ -1078,9 +1081,65 @@ config RODATA_FULL_DEFAULT_ENABLED
This requires the linear region to be mapped down to pages,
which may adversely affect performance in some cases.
+config ARM64_SW_TTBR0_PAN
+ bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
+ help
+ Enabling this option prevents the kernel from accessing
+ user-space memory directly by pointing TTBR0_EL1 to a reserved
+ zeroed area and reserved ASID. The user access routines
+ restore the valid TTBR0_EL1 temporarily.
+
+menuconfig COMPAT
+ bool "Kernel support for 32-bit EL0"
+ depends on ARM64_4K_PAGES || EXPERT
+ select COMPAT_BINFMT_ELF if BINFMT_ELF
+ select HAVE_UID16
+ select OLD_SIGSUSPEND3
+ select COMPAT_OLD_SIGACTION
+ help
+ This option enables support for a 32-bit EL0 running under a 64-bit
+ kernel at EL1. AArch32-specific components such as system calls,
+ the user helper functions, VFP support and the ptrace interface are
+ handled appropriately by the kernel.
+
+ If you use a page size other than 4KB (i.e, 16KB or 64KB), please be aware
+ that you will only be able to execute AArch32 binaries that were compiled
+ with page size aligned segments.
+
+ If you want to execute 32-bit userspace applications, say Y.
+
+if COMPAT
+
+config KUSER_HELPERS
+ bool "Enable kuser helpers page for 32 bit applications"
+ default y
+ help
+ Warning: disabling this option may break 32-bit user programs.
+
+ Provide kuser helpers to compat tasks. The kernel provides
+ helper code to userspace in read only form at a fixed location
+ to allow userspace to be independent of the CPU type fitted to
+ the system. This permits binaries to be run on ARMv4 through
+ to ARMv8 without modification.
+
+ See Documentation/arm/kernel_user_helpers.txt for details.
+
+ However, the fixed address nature of these helpers can be used
+ by ROP (return orientated programming) authors when creating
+ exploits.
+
+ If all of the binaries and libraries which run on your platform
+ are built specifically for your platform, and make no use of
+ these helpers, then you can turn this option off to hinder
+ such exploits. However, in that case, if a binary or library
+ relying on those helpers is run, it will not function correctly.
+
+ Say N here only if you are absolutely certain that you do not
+ need these helpers; otherwise, the safe option is to say Y.
+
+
menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions"
- depends on COMPAT
depends on SYSCTL
help
Legacy software support may require certain instructions
@@ -1146,13 +1205,7 @@ config SETEND_EMULATION
If unsure, say Y
endif
-config ARM64_SW_TTBR0_PAN
- bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
- help
- Enabling this option prevents the kernel from accessing
- user-space memory directly by pointing TTBR0_EL1 to a reserved
- zeroed area and reserved ASID. The user access routines
- restore the valid TTBR0_EL1 temporarily.
+endif
menu "ARMv8.1 architectural features"
@@ -1288,6 +1341,7 @@ menu "ARMv8.3 architectural features"
config ARM64_PTR_AUTH
bool "Enable support for pointer authentication"
default y
+ depends on !KVM || ARM64_VHE
help
Pointer authentication (part of the ARMv8.3 Extensions) provides
instructions for signing and authenticating pointers against secret
@@ -1301,8 +1355,9 @@ config ARM64_PTR_AUTH
context-switched along with the process.
The feature is detected at runtime. If the feature is not present in
- hardware it will not be advertised to userspace nor will it be
- enabled.
+ hardware it will not be advertised to userspace/KVM guest nor will it
+ be enabled. However, KVM guest also require VHE mode and hence
+ CONFIG_ARM64_VHE=y option to use this feature.
endmenu
@@ -1318,6 +1373,9 @@ config ARM64_SVE
To enable use of this extension on CPUs that implement it, say Y.
+ On CPUs that support the SVE2 extensions, this option will enable
+ those too.
+
Note that for architectural reasons, firmware _must_ implement SVE
support when running on SVE capable hardware. The required support
is present in:
@@ -1351,7 +1409,7 @@ config ARM64_PSEUDO_NMI
help
Adds support for mimicking Non-Maskable Interrupts through the use of
GIC interrupt priority. This support requires version 3 or later of
- Arm GIC.
+ ARM GIC.
This high priority configuration for interrupts needs to be
explicitly enabled by setting the kernel parameter
@@ -1475,25 +1533,6 @@ config DMI
endmenu
-config COMPAT
- bool "Kernel support for 32-bit EL0"
- depends on ARM64_4K_PAGES || EXPERT
- select COMPAT_BINFMT_ELF if BINFMT_ELF
- select HAVE_UID16
- select OLD_SIGSUSPEND3
- select COMPAT_OLD_SIGACTION
- help
- This option enables support for a 32-bit EL0 running under a 64-bit
- kernel at EL1. AArch32-specific components such as system calls,
- the user helper functions, VFP support and the ptrace interface are
- handled appropriately by the kernel.
-
- If you use a page size other than 4KB (i.e, 16KB or 64KB), please be aware
- that you will only be able to execute AArch32 binaries that were compiled
- with page size aligned segments.
-
- If you want to execute 32-bit userspace applications, say Y.
-
config SYSVIPC_COMPAT
def_bool y
depends on COMPAT && SYSVIPC
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index b5ca9c50876d..42eca656faa8 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -7,6 +7,11 @@ config ARCH_ACTIONS
help
This enables support for the Actions Semiconductor S900 SoC family.
+config ARCH_AGILEX
+ bool "Intel's Agilex SoCFPGA Family"
+ help
+ This enables support for Intel's Agilex SoCFPGA Family.
+
config ARCH_SUNXI
bool "Allwinner sunxi 64-bit SoC Family"
select ARCH_HAS_RESET_CONTROLLER
@@ -82,6 +87,11 @@ config ARCH_EXYNOS
config ARCH_K3
bool "Texas Instruments Inc. K3 multicore SoC architecture"
select PM_GENERIC_DOMAINS if PM
+ select MAILBOX
+ select TI_MESSAGE_MANAGER
+ select TI_SCI_PROTOCOL
+ select TI_SCI_INTR_IRQCHIP
+ select TI_SCI_INTA_IRQCHIP
help
This enables support for Texas Instruments' K3 multicore SoC
architecture.
@@ -210,6 +220,7 @@ config ARCH_SYNQUACER
config ARCH_TEGRA
bool "NVIDIA Tegra SoC Family"
select ARCH_HAS_RESET_CONTROLLER
+ select ARM_GIC_PM
select CLKDEV_LOOKUP
select CLKSRC_MMIO
select TIMER_OF
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 5bc7533a12c7..f19b762c008d 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -13,6 +13,7 @@ subdir-y += cavium
subdir-y += exynos
subdir-y += freescale
subdir-y += hisilicon
+subdir-y += intel
subdir-y += lg
subdir-y += marvell
subdir-y += mediatek
diff --git a/arch/arm64/boot/dts/allwinner/Makefile b/arch/arm64/boot/dts/allwinner/Makefile
index 0b0917111099..f6db0611cb85 100644
--- a/arch/arm64/boot/dts/allwinner/Makefile
+++ b/arch/arm64/boot/dts/allwinner/Makefile
@@ -2,6 +2,7 @@
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-amarula-relic.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-bananapi-m64.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-nanopi-a64.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-oceanic-5205-5inmfd.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-olinuxino.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-orangepi-win.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-a64-pine64-lts.dtb
@@ -19,6 +20,8 @@ dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-orangepi-pc2.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-orangepi-prime.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-orangepi-zero-plus.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h5-orangepi-zero-plus2.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h6-beelink-gs1.dtb
+dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h6-orangepi-3.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h6-orangepi-lite2.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h6-orangepi-one-plus.dtb
dtb-$(CONFIG_ARCH_SUNXI) += sun50i-h6-pine-h64.dtb
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-amarula-relic.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-amarula-relic.dts
index 6cb2b7f0c817..019ae09ea0fd 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-amarula-relic.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-amarula-relic.dts
@@ -22,6 +22,41 @@
stdout-path = "serial0:115200n8";
};
+ i2c {
+ compatible = "i2c-gpio";
+ sda-gpios = <&pio 4 13 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&pio 4 12 GPIO_ACTIVE_HIGH>;
+ i2c-gpio,delay-us = <5>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ov5640: camera@3c {
+ compatible = "ovti,ov5640";
+ reg = <0x3c>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&csi_mclk_pin>;
+ clocks = <&ccu CLK_CSI_MCLK>;
+ clock-names = "xclk";
+
+ AVDD-supply = <&reg_aldo1>;
+ DOVDD-supply = <&reg_dldo3>;
+ DVDD-supply = <&reg_eldo3>;
+ reset-gpios = <&pio 4 14 GPIO_ACTIVE_LOW>; /* CSI-RST-R: PE14 */
+ powerdown-gpios = <&pio 4 15 GPIO_ACTIVE_HIGH>; /* CSI-STBY-R: PE15 */
+
+ port {
+ ov5640_ep: endpoint {
+ remote-endpoint = <&csi_ep>;
+ bus-width = <8>;
+ hsync-active = <1>; /* Active high */
+ vsync-active = <0>; /* Active low */
+ data-active = <1>; /* Active high */
+ pclk-sample = <1>; /* Rising */
+ };
+ };
+ };
+ };
+
wifi_pwrseq: wifi-pwrseq {
compatible = "mmc-pwrseq-simple";
clocks = <&rtc 1>;
@@ -30,10 +65,40 @@
};
};
+&csi {
+ status = "okay";
+
+ port {
+ csi_ep: endpoint {
+ remote-endpoint = <&ov5640_ep>;
+ bus-width = <8>;
+ hsync-active = <1>; /* Active high */
+ vsync-active = <0>; /* Active low */
+ data-active = <1>; /* Active high */
+ pclk-sample = <1>; /* Rising */
+ };
+ };
+};
+
&ehci0 {
status = "okay";
};
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+ status = "okay";
+
+ sensor@48 {
+ compatible = "st,stlm75";
+ reg = <0x48>;
+ };
+};
+
+&i2c0_pins {
+ bias-pull-up;
+};
+
&mmc1 {
pinctrl-names = "default";
pinctrl-0 = <&mmc1_pins>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
index 7793ebb5d2b8..0a56c0c23ba1 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
@@ -104,7 +104,7 @@
};
&codec_analog {
- hpvcc-supply = <&reg_eldo1>;
+ cpvdd-supply = <&reg_eldo1>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-oceanic-5205-5inmfd.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-oceanic-5205-5inmfd.dts
new file mode 100644
index 000000000000..6a2154525d1e
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-oceanic-5205-5inmfd.dts
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2019 Oceanic Systems (UK) Ltd.
+ * Copyright (C) 2019 Amarula Solutions B.V.
+ * Author: Jagan Teki <jagan@amarulasolutions.com>
+ */
+
+/dts-v1/;
+
+#include "sun50i-a64-sopine.dtsi"
+
+/ {
+ model = "Oceanic 5205 5inMFD";
+ compatible = "oceanic,5205-5inmfd", "allwinner,sun50i-a64";
+
+ aliases {
+ ethernet0 = &emac;
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&emac {
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii_pins>;
+ phy-mode = "rgmii";
+ phy-handle = <&ext_rgmii_phy>;
+ phy-supply = <&reg_dc1sw>;
+ allwinner,tx-delay-ps = <600>;
+ status = "okay";
+};
+
+&mdio {
+ ext_rgmii_phy: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ };
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&reg_dc1sw {
+ regulator-name = "vcc-phy";
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pb_pins>;
+ status = "okay";
+};
+
+&usb_otg {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usbphy {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
index c0b9cc7a6b3a..b7ac6374b178 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
@@ -80,7 +80,7 @@
};
&codec_analog {
- hpvcc-supply = <&reg_eldo1>;
+ cpvdd-supply = <&reg_eldo1>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
index d22736a62481..2b6345db7dc0 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
@@ -94,7 +94,7 @@
};
&codec_analog {
- hpvcc-supply = <&reg_eldo1>;
+ cpvdd-supply = <&reg_eldo1>;
status = "okay";
};
@@ -104,7 +104,6 @@
&ehci0 {
phys = <&usbphy 0>;
- phy-names = "usb";
status = "okay";
};
@@ -151,7 +150,6 @@
&ohci0 {
phys = <&usbphy 0>;
- phy-names = "usb";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
index d2651f284aa0..9d20e13f0c02 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
@@ -48,7 +48,7 @@
#include <dt-bindings/gpio/gpio.h>
&codec_analog {
- hpvcc-supply = <&reg_eldo1>;
+ cpvdd-supply = <&reg_eldo1>;
};
&mmc0 {
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
index 7b7b14ba58e6..0ec46b969a75 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
@@ -21,6 +21,15 @@
serial0 = &uart0;
};
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm 0 50000 0>;
+ power-supply = <&reg_dcdc1>;
+ brightness-levels = <0 5 7 10 14 20 28 40 56 80 112>;
+ default-brightness-level = <5>;
+ enable-gpios = <&pio 3 23 GPIO_ACTIVE_HIGH>; /* PD23 */
+ };
+
chosen {
stdout-path = "serial0:115200n8";
@@ -131,6 +140,10 @@
status = "okay";
};
+&pwm {
+ status = "okay";
+};
+
&r_rsb {
status = "okay";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index e628d063931b..8c5b521e6389 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -217,7 +217,7 @@
#size-cells = <1>;
ranges;
- de2@1000000 {
+ bus@1000000 {
compatible = "allwinner,sun50i-a64-de2";
reg = <0x1000000 0x400000>;
allwinner,sram = <&de2_sram 1>;
@@ -251,11 +251,19 @@
#size-cells = <0>;
mixer0_out: port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
reg = <1>;
- mixer0_out_tcon0: endpoint {
+ mixer0_out_tcon0: endpoint@0 {
+ reg = <0>;
remote-endpoint = <&tcon0_in_mixer0>;
};
+
+ mixer0_out_tcon1: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&tcon1_in_mixer0>;
+ };
};
};
};
@@ -274,9 +282,17 @@
#size-cells = <0>;
mixer1_out: port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
reg = <1>;
- mixer1_out_tcon1: endpoint {
+ mixer1_out_tcon0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&tcon0_in_mixer1>;
+ };
+
+ mixer1_out_tcon1: endpoint@1 {
+ reg = <1>;
remote-endpoint = <&tcon1_in_mixer1>;
};
};
@@ -338,6 +354,7 @@
clocks = <&ccu CLK_BUS_TCON0>, <&ccu CLK_TCON0>;
clock-names = "ahb", "tcon-ch0";
clock-output-names = "tcon-pixel-clock";
+ #clock-cells = <0>;
resets = <&ccu RST_BUS_TCON0>, <&ccu RST_BUS_LVDS>;
reset-names = "lcd", "lvds";
@@ -354,6 +371,11 @@
reg = <0>;
remote-endpoint = <&mixer0_out_tcon0>;
};
+
+ tcon0_in_mixer1: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&mixer1_out_tcon0>;
+ };
};
tcon0_out: port@1 {
@@ -379,9 +401,17 @@
#size-cells = <0>;
tcon1_in: port@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
reg = <0>;
- tcon1_in_mixer1: endpoint {
+ tcon1_in_mixer0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&mixer0_out_tcon1>;
+ };
+
+ tcon1_in_mixer1: endpoint@1 {
+ reg = <1>;
remote-endpoint = <&mixer1_out_tcon1>;
};
};
@@ -467,6 +497,7 @@
phys = <&usbphy 0>;
phy-names = "usb";
extcon = <&usbphy 0>;
+ dr_mode = "otg";
status = "disabled";
};
@@ -522,7 +553,6 @@
resets = <&ccu RST_BUS_OHCI1>,
<&ccu RST_BUS_EHCI1>;
phys = <&usbphy 1>;
- phy-names = "usb";
status = "disabled";
};
@@ -534,7 +564,6 @@
<&ccu CLK_USB_OHCI1>;
resets = <&ccu RST_BUS_OHCI1>;
phys = <&usbphy 1>;
- phy-names = "usb";
status = "disabled";
};
@@ -553,7 +582,8 @@
interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ccu 58>;
+ clocks = <&ccu 58>, <&osc24M>, <&rtc 0>;
+ clock-names = "apb", "hosc", "losc";
gpio-controller;
#gpio-cells = <3>;
interrupt-controller;
@@ -565,12 +595,18 @@
function = "csi";
};
- i2c0_pins: i2c0_pins {
+ /omit-if-no-ref/
+ csi_mclk_pin: csi-mclk-pin {
+ pins = "PE1";
+ function = "csi";
+ };
+
+ i2c0_pins: i2c0-pins {
pins = "PH0", "PH1";
function = "i2c0";
};
- i2c1_pins: i2c1_pins {
+ i2c1_pins: i2c1-pins {
pins = "PH2", "PH3";
function = "i2c1";
};
@@ -607,19 +643,19 @@
bias-pull-up;
};
- pwm_pin: pwm_pin {
+ pwm_pin: pwm-pin {
pins = "PD22";
function = "pwm";
};
- rmii_pins: rmii_pins {
+ rmii_pins: rmii-pins {
pins = "PD10", "PD11", "PD13", "PD14", "PD17",
"PD18", "PD19", "PD20", "PD22", "PD23";
function = "emac";
drive-strength = <40>;
};
- rgmii_pins: rgmii_pins {
+ rgmii_pins: rgmii-pins {
pins = "PD8", "PD9", "PD10", "PD11", "PD12",
"PD13", "PD15", "PD16", "PD17", "PD18",
"PD19", "PD20", "PD21", "PD22", "PD23";
@@ -627,17 +663,17 @@
drive-strength = <40>;
};
- spdif_tx_pin: spdif {
+ spdif_tx_pin: spdif-tx-pin {
pins = "PH8";
function = "spdif";
};
- spi0_pins: spi0 {
+ spi0_pins: spi0-pins {
pins = "PC0", "PC1", "PC2", "PC3";
function = "spi0";
};
- spi1_pins: spi1 {
+ spi1_pins: spi1-pins {
pins = "PD0", "PD1", "PD2", "PD3";
function = "spi1";
};
@@ -647,12 +683,12 @@
function = "uart0";
};
- uart1_pins: uart1_pins {
+ uart1_pins: uart1-pins {
pins = "PG6", "PG7";
function = "uart1";
};
- uart1_rts_cts_pins: uart1_rts_cts_pins {
+ uart1_rts_cts_pins: uart1-rts-cts-pins {
pins = "PG8", "PG9";
function = "uart1";
};
@@ -730,7 +766,6 @@
clocks = <&ccu CLK_BUS_CODEC>, <&ccu CLK_AC_DIG>;
clock-names = "apb", "mod";
resets = <&ccu RST_BUS_CODEC>;
- reset-names = "rst";
dmas = <&dma 15>, <&dma 15>;
dma-names = "rx", "tx";
status = "disabled";
@@ -1064,12 +1099,12 @@
function = "s_i2c";
};
- r_pwm_pin: pwm {
+ r_pwm_pin: r-pwm-pin {
pins = "PL10";
function = "s_pwm";
};
- r_rsb_pins: rsb {
+ r_rsb_pins: r-rsb-pins {
pins = "PL0", "PL1";
function = "s_rsb";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-emlid-neutis-n5-devboard.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-emlid-neutis-n5-devboard.dts
index 85e7993a74e7..62409afbaf06 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-emlid-neutis-n5-devboard.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-emlid-neutis-n5-devboard.dts
@@ -46,7 +46,6 @@
vdd_cpux: gpio-regulator {
compatible = "regulator-gpio";
- pinctrl-names = "default";
regulator-name = "vdd-cpux";
regulator-type = "voltage";
regulator-boot-on;
@@ -133,7 +132,7 @@
&uart0 {
pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins_a>;
+ pinctrl-0 = <&uart0_pa_pins>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-emlid-neutis-n5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5-emlid-neutis-n5.dtsi
index e4d50373c8ef..82f4b44d525f 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-emlid-neutis-n5.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-emlid-neutis-n5.dtsi
@@ -21,7 +21,6 @@
wifi_pwrseq: wifi_pwrseq {
compatible = "mmc-pwrseq-simple";
- pinctrl-names = "default";
reset-gpios = <&pio 2 7 GPIO_ACTIVE_LOW>; /* PC7 */
post-power-on-delay-ms = <200>;
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts
index 506e25ba028a..9887948d5c86 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo-plus2.dts
@@ -78,7 +78,6 @@
reg_gmac_3v3: gmac-3v3 {
compatible = "regulator-fixed";
- pinctrl-names = "default";
regulator-name = "gmac-3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
@@ -96,7 +95,6 @@
vdd_cpux: gpio-regulator {
compatible = "regulator-gpio";
- pinctrl-names = "default";
regulator-name = "vdd-cpux";
regulator-type = "voltage";
regulator-boot-on;
@@ -112,7 +110,6 @@
wifi_pwrseq: wifi_pwrseq {
compatible = "mmc-pwrseq-simple";
- pinctrl-names = "default";
reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 */
post-power-on-delay-ms = <200>;
};
@@ -191,7 +188,7 @@
&uart0 {
pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins_a>;
+ pinctrl-0 = <&uart0_pa_pins>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts
index cc268a69786c..57a6f45036c1 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-nanopi-neo2.dts
@@ -142,7 +142,7 @@
&uart0 {
pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins_a>;
+ pinctrl-0 = <&uart0_pa_pins>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
index 3e0d5a9c096d..e126c1c9f05c 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-pc2.dts
@@ -180,7 +180,7 @@
&ir {
pinctrl-names = "default";
- pinctrl-0 = <&ir_pins_a>;
+ pinctrl-0 = <&r_ir_rx_pin>;
status = "okay";
};
@@ -221,7 +221,7 @@
&uart0 {
pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins_a>;
+ pinctrl-0 = <&uart0_pa_pins>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
index b75ca4d7d001..d9b3ed257088 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-prime.dts
@@ -187,7 +187,7 @@
&ir {
pinctrl-names = "default";
- pinctrl-0 = <&ir_pins_a>;
+ pinctrl-0 = <&r_ir_rx_pin>;
status = "okay";
};
@@ -224,7 +224,7 @@
&uart0 {
pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins_a>;
+ pinctrl-0 = <&uart0_pa_pins>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
index 1238de25a969..db6ea7b58999 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
@@ -127,7 +127,7 @@
&uart0 {
pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins_a>;
+ pinctrl-0 = <&uart0_pa_pins>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
index 53c8c11620e0..dacf61399527 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts
@@ -78,7 +78,6 @@
wifi_pwrseq: wifi_pwrseq {
compatible = "mmc-pwrseq-simple";
- pinctrl-names = "default";
reset-gpios = <&pio 0 9 GPIO_ACTIVE_LOW>; /* PA9 */
post-power-on-delay-ms = <200>;
};
@@ -134,7 +133,7 @@
&uart0 {
pinctrl-names = "default";
- pinctrl-0 = <&uart0_pins_a>;
+ pinctrl-0 = <&uart0_pa_pins>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
index 96acafd3a852..f002a496d7cb 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
@@ -209,3 +209,7 @@
&rtc {
compatible = "allwinner,sun50i-h5-rtc";
};
+
+&sid {
+ compatible = "allwinner,sun50i-h5-sid";
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
new file mode 100644
index 000000000000..0dc33c90dd60
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: (GPL-2.0+ or MIT)
+/*
+ * Copyright (C) 2019 Clément Péron <peron.clem@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "sun50i-h6.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "Beelink GS1";
+ compatible = "azw,beelink-gs1", "allwinner,sun50i-h6";
+
+ aliases {
+ ethernet0 = &emac;
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ connector {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_con_in: endpoint {
+ remote-endpoint = <&hdmi_out_con>;
+ };
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ power {
+ label = "beelink:white:power";
+ gpios = <&r_pio 0 4 GPIO_ACTIVE_HIGH>; /* PL4 */
+ default-state = "on";
+ };
+ };
+
+ reg_vcc5v: vcc5v {
+ /* board wide 5V supply directly from the DC jack */
+ compatible = "regulator-fixed";
+ regulator-name = "vcc-5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+};
+
+&de {
+ status = "okay";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&emac {
+ pinctrl-names = "default";
+ pinctrl-0 = <&ext_rgmii_pins>;
+ phy-mode = "rgmii";
+ phy-handle = <&ext_rgmii_phy>;
+ phy-supply = <&reg_aldo2>;
+ status = "okay";
+};
+
+&hdmi {
+ status = "okay";
+};
+
+&hdmi_out {
+ hdmi_out_con: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+};
+
+&mdio {
+ ext_rgmii_phy: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ };
+};
+
+&mmc0 {
+ vmmc-supply = <&reg_cldo1>;
+ cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>;
+ bus-width = <4>;
+ status = "okay";
+};
+
+&mmc2 {
+ vmmc-supply = <&reg_cldo1>;
+ vqmmc-supply = <&reg_bldo2>;
+ non-removable;
+ cap-mmc-hw-reset;
+ bus-width = <8>;
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&pio {
+ vcc-pd-supply = <&reg_cldo1>;
+ vcc-pg-supply = <&reg_aldo1>;
+};
+
+&r_i2c {
+ status = "okay";
+
+ axp805: pmic@36 {
+ compatible = "x-powers,axp805", "x-powers,axp806";
+ reg = <0x36>;
+ interrupt-parent = <&r_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ x-powers,self-working-mode;
+ vina-supply = <&reg_vcc5v>;
+ vinb-supply = <&reg_vcc5v>;
+ vinc-supply = <&reg_vcc5v>;
+ vind-supply = <&reg_vcc5v>;
+ vine-supply = <&reg_vcc5v>;
+ aldoin-supply = <&reg_vcc5v>;
+ bldoin-supply = <&reg_vcc5v>;
+ cldoin-supply = <&reg_vcc5v>;
+
+ regulators {
+ reg_aldo1: aldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-pl";
+ };
+
+ reg_aldo2: aldo2 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-ac200";
+ regulator-enable-ramp-delay = <100000>;
+ };
+
+ reg_aldo3: aldo3 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc25-dram";
+ };
+
+ reg_bldo1: bldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-bias-pll";
+ };
+
+ reg_bldo2: bldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-efuse-pcie-hdmi-io";
+ };
+
+ reg_bldo3: bldo3 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-dcxoio";
+ };
+
+ bldo4 {
+ /* unused */
+ };
+
+ reg_cldo1: cldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-3v3";
+ };
+
+ reg_cldo2: cldo2 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-wifi-1";
+ };
+
+ reg_cldo3: cldo3 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-wifi-2";
+ };
+
+ reg_dcdca: dcdca {
+ regulator-always-on;
+ regulator-min-microvolt = <810000>;
+ regulator-max-microvolt = <1080000>;
+ regulator-name = "vdd-cpu";
+ };
+
+ reg_dcdcc: dcdcc {
+ regulator-min-microvolt = <810000>;
+ regulator-max-microvolt = <1080000>;
+ regulator-name = "vdd-gpu";
+ };
+
+ reg_dcdcd: dcdcd {
+ regulator-always-on;
+ regulator-min-microvolt = <960000>;
+ regulator-max-microvolt = <960000>;
+ regulator-name = "vdd-sys";
+ };
+
+ reg_dcdce: dcdce {
+ regulator-always-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "vcc-dram";
+ };
+
+ sw {
+ /* unused */
+ };
+ };
+ };
+};
+
+&r_pio {
+ /*
+ * PL0 and PL1 are used for PMIC I2C
+ * don't enable the pl-supply else
+ * it will fail at boot
+ *
+ * vcc-pl-supply = <&reg_aldo1>;
+ */
+ vcc-pm-supply = <&reg_aldo1>;
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_ph_pins>;
+ status = "okay";
+};
+
+&usb2otg {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb2phy {
+ usb0_vbus-supply = <&reg_vcc5v>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
new file mode 100644
index 000000000000..17d496990108
--- /dev/null
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: (GPL-2.0+ or MIT)
+/*
+ * Copyright (C) 2019 Ondřej Jirman <megous@megous.com>
+ */
+
+/dts-v1/;
+
+#include "sun50i-h6.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "OrangePi 3";
+ compatible = "xunlong,orangepi-3", "allwinner,sun50i-h6";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ power {
+ label = "orangepi:red:power";
+ gpios = <&r_pio 0 4 GPIO_ACTIVE_HIGH>; /* PL4 */
+ default-state = "on";
+ };
+
+ status {
+ label = "orangepi:green:status";
+ gpios = <&r_pio 0 7 GPIO_ACTIVE_HIGH>; /* PL7 */
+ };
+ };
+
+ reg_vcc5v: vcc5v {
+ /* board wide 5V supply directly from the DC jack */
+ compatible = "regulator-fixed";
+ regulator-name = "vcc-5v";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+};
+
+&cpu0 {
+ cpu-supply = <&reg_dcdca>;
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ehci3 {
+ status = "okay";
+};
+
+&mmc0 {
+ vmmc-supply = <&reg_cldo1>;
+ cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
+ bus-width = <4>;
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&ohci3 {
+ status = "okay";
+};
+
+&pio {
+ vcc-pc-supply = <&reg_bldo2>;
+ vcc-pd-supply = <&reg_cldo1>;
+};
+
+&r_i2c {
+ status = "okay";
+
+ axp805: pmic@36 {
+ compatible = "x-powers,axp805", "x-powers,axp806";
+ reg = <0x36>;
+ interrupt-parent = <&r_intc>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ x-powers,self-working-mode;
+ vina-supply = <&reg_vcc5v>;
+ vinb-supply = <&reg_vcc5v>;
+ vinc-supply = <&reg_vcc5v>;
+ vind-supply = <&reg_vcc5v>;
+ vine-supply = <&reg_vcc5v>;
+ aldoin-supply = <&reg_vcc5v>;
+ bldoin-supply = <&reg_vcc5v>;
+ cldoin-supply = <&reg_vcc5v>;
+
+ regulators {
+ reg_aldo1: aldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc-pl-led-ir";
+ };
+
+ reg_aldo2: aldo2 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc33-audio-tv-ephy-mac";
+ };
+
+ /* ALDO3 is shorted to CLDO1 */
+ reg_aldo3: aldo3 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc33-io-pd-emmc-sd-usb-uart-1";
+ };
+
+ reg_bldo1: bldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc18-dram-bias-pll";
+ };
+
+ reg_bldo2: bldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-name = "vcc-efuse-pcie-hdmi-pc";
+ };
+
+ bldo3 {
+ /* unused */
+ };
+
+ bldo4 {
+ /* unused */
+ };
+
+ reg_cldo1: cldo1 {
+ regulator-always-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "vcc33-io-pd-emmc-sd-usb-uart-2";
+ };
+
+ cldo2 {
+ /* unused */
+ };
+
+ cldo3 {
+ /* unused */
+ };
+
+ reg_dcdca: dcdca {
+ regulator-always-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1160000>;
+ regulator-name = "vdd-cpu";
+ };
+
+ reg_dcdcc: dcdcc {
+ regulator-min-microvolt = <810000>;
+ regulator-max-microvolt = <1080000>;
+ regulator-name = "vdd-gpu";
+ };
+
+ reg_dcdcd: dcdcd {
+ regulator-always-on;
+ regulator-min-microvolt = <960000>;
+ regulator-max-microvolt = <960000>;
+ regulator-name = "vdd-sys";
+ };
+
+ reg_dcdce: dcdce {
+ regulator-always-on;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-name = "vcc-dram";
+ };
+
+ sw {
+ /* unused */
+ };
+ };
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_ph_pins>;
+ status = "okay";
+};
+
+&usb2otg {
+ /*
+ * This board doesn't have a controllable VBUS even though it
+ * does have an ID pin. Using it as anything but a USB host is
+ * unsafe.
+ */
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb2phy {
+ usb0_id_det-gpios = <&pio 2 15 GPIO_ACTIVE_HIGH>; /* PC15 */
+ usb0_vbus-supply = <&reg_vcc5v>;
+ usb3_vbus-supply = <&reg_vcc5v>;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
index b2526dac2fcf..62e27948a3fa 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
@@ -56,8 +56,6 @@
};
&mmc0 {
- pinctrl-names = "default";
- pinctrl-0 = <&mmc0_pins>;
vmmc-supply = <&reg_cldo1>;
cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>;
bus-width = <4>;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
index bdb8470fc8dc..4802902e128f 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
@@ -104,8 +104,6 @@
};
&mmc0 {
- pinctrl-names = "default";
- pinctrl-0 = <&mmc0_pins>;
vmmc-supply = <&reg_cldo1>;
cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>;
bus-width = <4>;
@@ -113,8 +111,6 @@
};
&mmc2 {
- pinctrl-names = "default";
- pinctrl-0 = <&mmc2_pins>;
vmmc-supply = <&reg_cldo1>;
vqmmc-supply = <&reg_bldo2>;
non-removable;
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
index c9e861a50a63..16c5c3d0fd81 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
@@ -101,7 +101,7 @@
#size-cells = <1>;
ranges;
- display-engine@1000000 {
+ bus@1000000 {
compatible = "allwinner,sun50i-h6-de3",
"allwinner,sun50i-a64-de2";
reg = <0x1000000 0x400000>;
@@ -146,6 +146,17 @@
};
};
+ video-codec@1c0e000 {
+ compatible = "allwinner,sun50i-h6-video-engine";
+ reg = <0x01c0e000 0x2000>;
+ clocks = <&ccu CLK_BUS_VE>, <&ccu CLK_VE>,
+ <&ccu CLK_MBUS_VE>;
+ clock-names = "ahb", "mod", "ram";
+ resets = <&ccu RST_BUS_VE>;
+ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+ allwinner,sram = <&ve_sram 1>;
+ };
+
syscon: syscon@3000000 {
compatible = "allwinner,sun50i-h6-system-control",
"allwinner,sun50i-a64-system-control";
@@ -192,6 +203,11 @@
#reset-cells = <1>;
};
+ sid: sid@3006000 {
+ compatible = "allwinner,sun50i-h6-sid";
+ reg = <0x03006000 0x400>;
+ };
+
pio: pinctrl@300b000 {
compatible = "allwinner,sun50i-h6-pinctrl";
reg = <0x0300b000 0x400>;
@@ -206,7 +222,7 @@
interrupt-controller;
#interrupt-cells = <3>;
- ext_rgmii_pins: rgmii_pins {
+ ext_rgmii_pins: rgmii-pins {
pins = "PD0", "PD1", "PD2", "PD3", "PD4",
"PD5", "PD7", "PD8", "PD9", "PD10",
"PD11", "PD12", "PD13", "PD19", "PD20";
@@ -227,6 +243,15 @@
bias-pull-up;
};
+ /omit-if-no-ref/
+ mmc1_pins: mmc1-pins {
+ pins = "PG0", "PG1", "PG2", "PG3",
+ "PG4", "PG5";
+ function = "mmc1";
+ drive-strength = <30>;
+ bias-pull-up;
+ };
+
mmc2_pins: mmc2-pins {
pins = "PC1", "PC4", "PC5", "PC6",
"PC7", "PC8", "PC9", "PC10",
@@ -236,7 +261,7 @@
bias-pull-up;
};
- uart0_ph_pins: uart0-ph {
+ uart0_ph_pins: uart0-ph-pins {
pins = "PH0", "PH1";
function = "uart0";
};
@@ -262,6 +287,8 @@
resets = <&ccu RST_BUS_MMC0>;
reset-names = "ahb";
interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_pins>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
@@ -276,6 +303,8 @@
resets = <&ccu RST_BUS_MMC1>;
reset-names = "ahb";
interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
@@ -290,6 +319,8 @@
resets = <&ccu RST_BUS_MMC2>;
reset-names = "ahb";
interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_pins>;
status = "disabled";
#address-cells = <1>;
#size-cells = <0>;
@@ -425,7 +456,6 @@
resets = <&ccu RST_BUS_OHCI3>,
<&ccu RST_BUS_EHCI3>;
phys = <&usb2phy 3>;
- phy-names = "usb";
status = "disabled";
};
@@ -437,7 +467,6 @@
<&ccu CLK_USB_OHCI3>;
resets = <&ccu RST_BUS_OHCI3>;
phys = <&usb2phy 3>;
- phy-names = "usb";
status = "disabled";
};
@@ -614,7 +643,7 @@
interrupt-controller;
#interrupt-cells = <3>;
- r_i2c_pins: r-i2c {
+ r_i2c_pins: r-i2c-pins {
pins = "PL0", "PL1";
function = "s_i2c";
};
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
index cd7c76e58b09..fe107ce115ef 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
@@ -393,7 +393,7 @@
};
sysmgr: sysmgr@ffd12000 {
- compatible = "altr,sys-mgr", "syscon";
+ compatible = "altr,sys-mgr-s10","altr,sys-mgr";
reg = <0xffd12000 0x228>;
};
@@ -534,11 +534,12 @@
};
eccmgr {
- compatible = "altr,socfpga-a10-ecc-manager";
+ compatible = "altr,socfpga-s10-ecc-manager",
+ "altr,socfpga-a10-ecc-manager";
altr,sysmgr-syscon = <&sysmgr>;
#address-cells = <1>;
#size-cells = <1>;
- interrupts = <0 15 4>, <0 95 4>;
+ interrupts = <0 15 4>;
interrupt-controller;
#interrupt-cells = <2>;
ranges;
@@ -546,31 +547,31 @@
sdramedac {
compatible = "altr,sdram-edac-s10";
altr,sdr-syscon = <&sdr>;
- interrupts = <16 4>, <48 4>;
+ interrupts = <16 4>;
};
usb0-ecc@ff8c4000 {
- compatible = "altr,socfpga-usb-ecc";
+ compatible = "altr,socfpga-s10-usb-ecc",
+ "altr,socfpga-usb-ecc";
reg = <0xff8c4000 0x100>;
altr,ecc-parent = <&usb0>;
- interrupts = <2 4>,
- <34 4>;
+ interrupts = <2 4>;
};
emac0-rx-ecc@ff8c0000 {
- compatible = "altr,socfpga-eth-mac-ecc";
+ compatible = "altr,socfpga-s10-eth-mac-ecc",
+ "altr,socfpga-eth-mac-ecc";
reg = <0xff8c0000 0x100>;
altr,ecc-parent = <&gmac0>;
- interrupts = <4 4>,
- <36 4>;
+ interrupts = <4 4>;
};
emac0-tx-ecc@ff8c0400 {
- compatible = "altr,socfpga-eth-mac-ecc";
+ compatible = "altr,socfpga-s10-eth-mac-ecc",
+ "altr,socfpga-eth-mac-ecc";
reg = <0xff8c0400 0x100>;
altr,ecc-parent = <&gmac0>;
- interrupts = <5 4>,
- <37 4>;
+ interrupts = <5 4>;
};
};
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
index 2e3863ee12b3..d037563ad21c 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts
@@ -107,6 +107,7 @@
&mmc {
status = "okay";
cap-sd-highspeed;
+ cap-mmc-highspeed;
broken-cd;
bus-width = <4>;
};
@@ -159,7 +160,7 @@
#size-cells = <1>;
compatible = "n25q00a";
reg = <0>;
- spi-max-frequency = <50000000>;
+ spi-max-frequency = <100000000>;
m25p,fast-read;
cdns,page-size = <256>;
diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile
index 0821fed4c074..e129c03ced14 100644
--- a/arch/arm64/boot/dts/amlogic/Makefile
+++ b/arch/arm64/boot/dts/amlogic/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_MESON) += meson-axg-s400.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-g12a-sei510.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12a-u200.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-g12a-x96-max.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-nanopi-k2.dtb
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
new file mode 100644
index 000000000000..34b40587e5ef
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 BayLibre SAS. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "meson-g12a.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/meson-g12a-gpio.h>
+
+/ {
+ compatible = "seirobotics,sei510", "amlogic,g12a";
+ model = "SEI Robotics SEI510";
+
+ aliases {
+ serial0 = &uart_AO;
+ };
+
+ adc_keys {
+ compatible = "adc-keys";
+ io-channels = <&saradc 0>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1800000>;
+
+ button-onoff {
+ label = "On/Off";
+ linux,code = <KEY_POWER>;
+ press-threshold-microvolt = <1700000>;
+ };
+ };
+
+ ao_5v: regulator-ao_5v {
+ compatible = "regulator-fixed";
+ regulator-name = "AO_5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&dc_in>;
+ regulator-always-on;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ cvbs-connector {
+ compatible = "composite-video-connector";
+
+ port {
+ cvbs_connector_in: endpoint {
+ remote-endpoint = <&cvbs_vdac_out>;
+ };
+ };
+ };
+
+ dc_in: regulator-dc_in {
+ compatible = "regulator-fixed";
+ regulator-name = "DC_IN";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ emmc_1v8: regulator-emmc_1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "EMMC_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vddao_3v3>;
+ regulator-always-on;
+ };
+
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_tmds_out>;
+ };
+ };
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x40000000>;
+ };
+
+ reserved-memory {
+ /* TEE Reserved Memory */
+ bl32_reserved: bl32@5000000 {
+ reg = <0x0 0x05300000 0x0 0x2000000>;
+ no-map;
+ };
+ };
+
+ vddao_3v3: regulator-vddao_3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDAO_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&dc_in>;
+ regulator-always-on;
+ };
+
+ vddao_3v3_t: regultor-vddao_3v3_t {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDAO_3V3_T";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vddao_3v3>;
+ gpio = <&gpio GPIOH_8 GPIO_OPEN_DRAIN>;
+ enable-active-high;
+ };
+
+ vddio_ao1v8: regulator-vddio_ao1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDIO_AO1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vddao_3v3>;
+ regulator-always-on;
+ };
+};
+
+&cec_AO {
+ pinctrl-0 = <&cec_ao_a_h_pins>;
+ pinctrl-names = "default";
+ status = "disabled";
+ hdmi-phandle = <&hdmi_tx>;
+};
+
+&cecb_AO {
+ pinctrl-0 = <&cec_ao_b_h_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+ hdmi-phandle = <&hdmi_tx>;
+};
+
+&cvbs_vdac_port {
+ cvbs_vdac_out: endpoint {
+ remote-endpoint = <&cvbs_connector_in>;
+ };
+};
+
+&saradc {
+ status = "okay";
+ vref-supply = <&vddio_ao1v8>;
+};
+
+&uart_A {
+ status = "okay";
+ pinctrl-0 = <&uart_a_pins>, <&uart_a_cts_rts_pins>;
+ pinctrl-names = "default";
+ uart-has-rtscts;
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&hdmi_tx {
+ status = "okay";
+ pinctrl-0 = <&hdmitx_hpd_pins>, <&hdmitx_ddc_pins>;
+ pinctrl-names = "default";
+};
+
+&hdmi_tx_tmds_port {
+ hdmi_tx_tmds_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+};
+
+&uart_AO {
+ status = "okay";
+ pinctrl-0 = <&uart_ao_a_pins>;
+ pinctrl-names = "default";
+};
+
+&usb {
+ status = "okay";
+ dr_mode = "host";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts
index c44dbdddf2cf..0e8045b8a915 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-u200.dts
@@ -6,6 +6,8 @@
/dts-v1/;
#include "meson-g12a.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/gpio/meson-g12a-gpio.h>
/ {
compatible = "amlogic,u200", "amlogic,g12a";
@@ -21,9 +23,154 @@
device_type = "memory";
reg = <0x0 0x0 0x0 0x40000000>;
};
+
+ cvbs-connector {
+ compatible = "composite-video-connector";
+
+ port {
+ cvbs_connector_in: endpoint {
+ remote-endpoint = <&cvbs_vdac_out>;
+ };
+ };
+ };
+
+ flash_1v8: regulator-flash_1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "FLASH_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_3v3>;
+ regulator-always-on;
+ };
+
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_tmds_out>;
+ };
+ };
+ };
+
+ main_12v: regulator-main_12v {
+ compatible = "regulator-fixed";
+ regulator-name = "12V";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-always-on;
+ };
+
+ vcc_1v8: regulator-vcc_1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_3v3>;
+ regulator-always-on;
+ };
+
+ vcc_3v3: regulator-vcc_3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vddao_3v3>;
+ regulator-always-on;
+ /* FIXME: actually controlled by VDDCPU_B_EN */
+ };
+
+ vcc_5v: regulator-vcc_5v {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&main_12v>;
+
+ gpio = <&gpio GPIOH_8 GPIO_OPEN_DRAIN>;
+ enable-active-high;
+ };
+
+ usb_pwr_en: regulator-usb_pwr_en {
+ compatible = "regulator-fixed";
+ regulator-name = "USB_PWR_EN";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&vcc_5v>;
+
+ gpio = <&gpio GPIOH_6 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ vddao_1v8: regulator-vddao_1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDAO_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vddao_3v3>;
+ regulator-always-on;
+ };
+
+ vddao_3v3: regulator-vddao_3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDAO_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&main_12v>;
+ regulator-always-on;
+ };
+
+};
+
+&cec_AO {
+ pinctrl-0 = <&cec_ao_a_h_pins>;
+ pinctrl-names = "default";
+ status = "disabled";
+ hdmi-phandle = <&hdmi_tx>;
+};
+
+&cecb_AO {
+ pinctrl-0 = <&cec_ao_b_h_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+ hdmi-phandle = <&hdmi_tx>;
+};
+
+&cvbs_vdac_port {
+ cvbs_vdac_out: endpoint {
+ remote-endpoint = <&cvbs_connector_in>;
+ };
+};
+
+&hdmi_tx {
+ status = "okay";
+ pinctrl-0 = <&hdmitx_hpd_pins>, <&hdmitx_ddc_pins>;
+ pinctrl-names = "default";
+ hdmi-supply = <&vcc_5v>;
+};
+
+&hdmi_tx_tmds_port {
+ hdmi_tx_tmds_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
};
&uart_AO {
status = "okay";
+ pinctrl-0 = <&uart_ao_a_pins>;
+ pinctrl-names = "default";
};
+&usb {
+ status = "okay";
+ vbus-supply = <&usb_pwr_en>;
+};
+
+&usb2_phy0 {
+ phy-supply = <&vcc_5v>;
+};
+
+&usb2_phy1 {
+ phy-supply = <&vcc_5v>;
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts
index c62d3d5706ff..b3d913f28f12 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts
@@ -6,6 +6,8 @@
/dts-v1/;
#include "meson-g12a.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/gpio/meson-g12a-gpio.h>
/ {
compatible = "amediatech,x96-max", "amlogic,u200", "amlogic,g12a";
@@ -21,8 +23,146 @@
device_type = "memory";
reg = <0x0 0x0 0x0 0x40000000>;
};
+
+ cvbs-connector {
+ compatible = "composite-video-connector";
+
+ port {
+ cvbs_connector_in: endpoint {
+ remote-endpoint = <&cvbs_vdac_out>;
+ };
+ };
+ };
+
+ hdmi-connector {
+ compatible = "hdmi-connector";
+ type = "a";
+
+ port {
+ hdmi_connector_in: endpoint {
+ remote-endpoint = <&hdmi_tx_tmds_out>;
+ };
+ };
+ };
+
+ flash_1v8: regulator-flash_1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "FLASH_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_3v3>;
+ regulator-always-on;
+ };
+
+ dc_in: regulator-dc_in {
+ compatible = "regulator-fixed";
+ regulator-name = "DC_IN";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ vcc_1v8: regulator-vcc_1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_3v3>;
+ regulator-always-on;
+ };
+
+ vcc_3v3: regulator-vcc_3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vddao_3v3>;
+ regulator-always-on;
+ /* FIXME: actually controlled by VDDCPU_B_EN */
+ };
+
+ vcc_5v: regulator-vcc_5v {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC_5V";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&dc_in>;
+
+ gpio = <&gpio GPIOH_8 GPIO_OPEN_DRAIN>;
+ enable-active-low;
+ };
+
+ vddao_1v8: regulator-vddao_1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDAO_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vddao_3v3>;
+ regulator-always-on;
+ };
+
+ vddao_3v3: regulator-vddao_3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDAO_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&dc_in>;
+ regulator-always-on;
+ };
+};
+
+&cec_AO {
+ pinctrl-0 = <&cec_ao_a_h_pins>;
+ pinctrl-names = "default";
+ status = "disabled";
+ hdmi-phandle = <&hdmi_tx>;
+};
+
+&cecb_AO {
+ pinctrl-0 = <&cec_ao_b_h_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+ hdmi-phandle = <&hdmi_tx>;
+};
+
+&cvbs_vdac_port {
+ cvbs_vdac_out: endpoint {
+ remote-endpoint = <&cvbs_connector_in>;
+ };
+};
+
+&hdmi_tx {
+ status = "okay";
+ pinctrl-0 = <&hdmitx_hpd_pins>, <&hdmitx_ddc_pins>;
+ pinctrl-names = "default";
+ hdmi-supply = <&vcc_5v>;
+};
+
+&hdmi_tx_tmds_port {
+ hdmi_tx_tmds_out: endpoint {
+ remote-endpoint = <&hdmi_connector_in>;
+ };
+};
+
+&uart_A {
+ status = "okay";
+ pinctrl-0 = <&uart_a_pins>, <&uart_a_cts_rts_pins>;
+ pinctrl-names = "default";
+ uart-has-rtscts;
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
+ };
};
&uart_AO {
status = "okay";
+ pinctrl-0 = <&uart_ao_a_pins>;
+ pinctrl-names = "default";
+};
+
+&usb {
+ status = "okay";
+ dr_mode = "host";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
index 17c6217f8a84..9f72396ba710 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
@@ -3,9 +3,13 @@
* Copyright (c) 2018 Amlogic, Inc. All rights reserved.
*/
+#include <dt-bindings/phy/phy.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/clock/g12a-clkc.h>
+#include <dt-bindings/clock/g12a-aoclkc.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/reset/amlogic,meson-g12a-reset.h>
/ {
compatible = "amlogic,g12a";
@@ -55,6 +59,14 @@
};
};
+ efuse: efuse {
+ compatible = "amlogic,meson-gxbb-efuse";
+ clocks = <&clkc CLKID_EFUSE>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ read-only;
+ };
+
psci {
compatible = "arm,psci-1.0";
method = "smc";
@@ -70,6 +82,18 @@
reg = <0x0 0x05000000 0x0 0x300000>;
no-map;
};
+
+ linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+ size = <0x0 0x10000000>;
+ alignment = <0x0 0x400000>;
+ linux,cma-default;
+ };
+ };
+
+ sm: secure-monitor {
+ compatible = "amlogic,meson-gxbb-sm";
};
soc {
@@ -85,12 +109,177 @@
#size-cells = <2>;
ranges = <0x0 0x0 0x0 0xff600000 0x0 0x200000>;
+ hdmi_tx: hdmi-tx@0 {
+ compatible = "amlogic,meson-g12a-dw-hdmi";
+ reg = <0x0 0x0 0x0 0x10000>;
+ interrupts = <GIC_SPI 57 IRQ_TYPE_EDGE_RISING>;
+ resets = <&reset RESET_HDMITX_CAPB3>,
+ <&reset RESET_HDMITX_PHY>,
+ <&reset RESET_HDMITX>;
+ reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy";
+ clocks = <&clkc CLKID_HDMI>,
+ <&clkc CLKID_HTX_PCLK>,
+ <&clkc CLKID_VPU_INTR>;
+ clock-names = "isfr", "iahb", "venci";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ /* VPU VENC Input */
+ hdmi_tx_venc_port: port@0 {
+ reg = <0>;
+
+ hdmi_tx_in: endpoint {
+ remote-endpoint = <&hdmi_tx_out>;
+ };
+ };
+
+ /* TMDS Output */
+ hdmi_tx_tmds_port: port@1 {
+ reg = <1>;
+ };
+ };
+
periphs: bus@34400 {
compatible = "simple-bus";
reg = <0x0 0x34400 0x0 0x400>;
#address-cells = <2>;
#size-cells = <2>;
ranges = <0x0 0x0 0x0 0x34400 0x0 0x400>;
+
+ periphs_pinctrl: pinctrl@40 {
+ compatible = "amlogic,meson-g12a-periphs-pinctrl";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ gpio: bank@40 {
+ reg = <0x0 0x40 0x0 0x4c>,
+ <0x0 0xe8 0x0 0x18>,
+ <0x0 0x120 0x0 0x18>,
+ <0x0 0x2c0 0x0 0x40>,
+ <0x0 0x340 0x0 0x1c>;
+ reg-names = "gpio",
+ "pull",
+ "pull-enable",
+ "mux",
+ "ds";
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&periphs_pinctrl 0 0 86>;
+ };
+
+ cec_ao_a_h_pins: cec_ao_a_h {
+ mux {
+ groups = "cec_ao_a_h";
+ function = "cec_ao_a_h";
+ bias-disable;
+ };
+ };
+
+ cec_ao_b_h_pins: cec_ao_b_h {
+ mux {
+ groups = "cec_ao_b_h";
+ function = "cec_ao_b_h";
+ bias-disable;
+ };
+ };
+
+ hdmitx_ddc_pins: hdmitx_ddc {
+ mux {
+ groups = "hdmitx_sda",
+ "hdmitx_sck";
+ function = "hdmitx";
+ bias-disable;
+ };
+ };
+
+ hdmitx_hpd_pins: hdmitx_hpd {
+ mux {
+ groups = "hdmitx_hpd_in";
+ function = "hdmitx";
+ bias-disable;
+ };
+ };
+
+ uart_a_pins: uart-a {
+ mux {
+ groups = "uart_a_tx",
+ "uart_a_rx";
+ function = "uart_a";
+ bias-disable;
+ };
+ };
+
+ uart_a_cts_rts_pins: uart-a-cts-rts {
+ mux {
+ groups = "uart_a_cts",
+ "uart_a_rts";
+ function = "uart_a";
+ bias-disable;
+ };
+ };
+
+ uart_b_pins: uart-b {
+ mux {
+ groups = "uart_b_tx",
+ "uart_b_rx";
+ function = "uart_b";
+ bias-disable;
+ };
+ };
+
+ uart_c_pins: uart-c {
+ mux {
+ groups = "uart_c_tx",
+ "uart_c_rx";
+ function = "uart_c";
+ bias-disable;
+ };
+ };
+
+ uart_c_cts_rts_pins: uart-c-cts-rts {
+ mux {
+ groups = "uart_c_cts",
+ "uart_c_rts";
+ function = "uart_c";
+ bias-disable;
+ };
+ };
+ };
+ };
+
+ usb2_phy0: phy@36000 {
+ compatible = "amlogic,g12a-usb2-phy";
+ reg = <0x0 0x36000 0x0 0x2000>;
+ clocks = <&xtal>;
+ clock-names = "xtal";
+ resets = <&reset RESET_USB_PHY20>;
+ reset-names = "phy";
+ #phy-cells = <0>;
+ };
+
+ dmc: bus@38000 {
+ compatible = "simple-bus";
+ reg = <0x0 0x38000 0x0 0x400>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x0 0x0 0x38000 0x0 0x400>;
+
+ canvas: video-lut@48 {
+ compatible = "amlogic,canvas";
+ reg = <0x0 0x48 0x0 0x14>;
+ };
+ };
+
+ usb2_phy1: phy@3a000 {
+ compatible = "amlogic,g12a-usb2-phy";
+ reg = <0x0 0x3a000 0x0 0x2000>;
+ clocks = <&xtal>;
+ clock-names = "xtal";
+ resets = <&reset RESET_USB_PHY21>;
+ reset-names = "phy";
+ #phy-cells = <0>;
};
hiu: bus@3c000 {
@@ -113,6 +302,18 @@
};
};
};
+
+ usb3_pcie_phy: phy@46000 {
+ compatible = "amlogic,g12a-usb3-pcie-phy";
+ reg = <0x0 0x46000 0x0 0x2000>;
+ clocks = <&clkc CLKID_PCIE_PLL>;
+ clock-names = "ref_clk";
+ resets = <&reset RESET_PCIE_PHY>;
+ reset-names = "phy";
+ assigned-clocks = <&clkc CLKID_PCIE_PLL>;
+ assigned-clock-rates = <100000000>;
+ #phy-cells = <1>;
+ };
};
aobus: bus@ff800000 {
@@ -122,6 +323,128 @@
#size-cells = <2>;
ranges = <0x0 0x0 0x0 0xff800000 0x0 0x100000>;
+ rti: sys-ctrl@0 {
+ compatible = "amlogic,meson-gx-ao-sysctrl",
+ "simple-mfd", "syscon";
+ reg = <0x0 0x0 0x0 0x100>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x0 0x0 0x0 0x0 0x0 0x100>;
+
+ clkc_AO: clock-controller {
+ compatible = "amlogic,meson-g12a-aoclkc";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ clocks = <&xtal>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "mpeg-clk";
+ };
+
+ pwrc_vpu: power-controller-vpu {
+ compatible = "amlogic,meson-g12a-pwrc-vpu";
+ #power-domain-cells = <0>;
+ amlogic,hhi-sysctrl = <&hhi>;
+ resets = <&reset RESET_VIU>,
+ <&reset RESET_VENC>,
+ <&reset RESET_VCBUS>,
+ <&reset RESET_BT656>,
+ <&reset RESET_RDMA>,
+ <&reset RESET_VENCI>,
+ <&reset RESET_VENCP>,
+ <&reset RESET_VDAC>,
+ <&reset RESET_VDI6>,
+ <&reset RESET_VENCL>,
+ <&reset RESET_VID_LOCK>;
+ clocks = <&clkc CLKID_VPU>,
+ <&clkc CLKID_VAPB>;
+ clock-names = "vpu", "vapb";
+ /*
+ * VPU clocking is provided by two identical clock paths
+ * VPU_0 and VPU_1 muxed to a single clock by a glitch
+ * free mux to safely change frequency while running.
+ * Same for VAPB but with a final gate after the glitch free mux.
+ */
+ assigned-clocks = <&clkc CLKID_VPU_0_SEL>,
+ <&clkc CLKID_VPU_0>,
+ <&clkc CLKID_VPU>, /* Glitch free mux */
+ <&clkc CLKID_VAPB_0_SEL>,
+ <&clkc CLKID_VAPB_0>,
+ <&clkc CLKID_VAPB_SEL>; /* Glitch free mux */
+ assigned-clock-parents = <&clkc CLKID_FCLK_DIV3>,
+ <0>, /* Do Nothing */
+ <&clkc CLKID_VPU_0>,
+ <&clkc CLKID_FCLK_DIV4>,
+ <0>, /* Do Nothing */
+ <&clkc CLKID_VAPB_0>;
+ assigned-clock-rates = <0>, /* Do Nothing */
+ <666666666>,
+ <0>, /* Do Nothing */
+ <0>, /* Do Nothing */
+ <250000000>,
+ <0>; /* Do Nothing */
+ };
+
+ ao_pinctrl: pinctrl@14 {
+ compatible = "amlogic,meson-g12a-aobus-pinctrl";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ gpio_ao: bank@14 {
+ reg = <0x0 0x14 0x0 0x8>,
+ <0x0 0x1c 0x0 0x8>,
+ <0x0 0x24 0x0 0x14>;
+ reg-names = "mux",
+ "ds",
+ "gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&ao_pinctrl 0 0 15>;
+ };
+
+ uart_ao_a_pins: uart-a-ao {
+ mux {
+ groups = "uart_ao_a_tx",
+ "uart_ao_a_rx";
+ function = "uart_ao_a";
+ bias-disable;
+ };
+ };
+
+ uart_ao_a_cts_rts_pins: uart-ao-a-cts-rts {
+ mux {
+ groups = "uart_ao_a_cts",
+ "uart_ao_a_rts";
+ function = "uart_ao_a";
+ bias-disable;
+ };
+ };
+ };
+ };
+
+ cec_AO: cec@100 {
+ compatible = "amlogic,meson-gx-ao-cec";
+ reg = <0x0 0x00100 0x0 0x14>;
+ interrupts = <GIC_SPI 199 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clkc_AO CLKID_AO_CEC>;
+ clock-names = "core";
+ status = "disabled";
+ };
+
+ sec_AO: ao-secure@140 {
+ compatible = "amlogic,meson-gx-ao-secure", "syscon";
+ reg = <0x0 0x140 0x0 0x140>;
+ amlogic,has-chip-id;
+ };
+
+ cecb_AO: cec@280 {
+ compatible = "amlogic,meson-g12a-ao-cec";
+ reg = <0x0 0x00280 0x0 0x1c>;
+ interrupts = <GIC_SPI 203 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clkc_AO CLKID_AO_CTS_OSCIN>;
+ clock-names = "oscin";
+ status = "disabled";
+ };
+
uart_AO: serial@3000 {
compatible = "amlogic,meson-gx-uart",
"amlogic,meson-ao-uart";
@@ -141,6 +464,46 @@
clock-names = "xtal", "pclk", "baud";
status = "disabled";
};
+
+ saradc: adc@9000 {
+ compatible = "amlogic,meson-g12a-saradc",
+ "amlogic,meson-saradc";
+ reg = <0x0 0x9000 0x0 0x48>;
+ #io-channel-cells = <1>;
+ interrupts = <GIC_SPI 200 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&xtal>,
+ <&clkc_AO CLKID_AO_SAR_ADC>,
+ <&clkc_AO CLKID_AO_SAR_ADC_CLK>,
+ <&clkc_AO CLKID_AO_SAR_ADC_SEL>;
+ clock-names = "clkin", "core", "adc_clk", "adc_sel";
+ status = "disabled";
+ };
+ };
+
+ vpu: vpu@ff900000 {
+ compatible = "amlogic,meson-g12a-vpu";
+ reg = <0x0 0xff900000 0x0 0x100000>,
+ <0x0 0xff63c000 0x0 0x1000>;
+ reg-names = "vpu", "hhi";
+ interrupts = <GIC_SPI 3 IRQ_TYPE_EDGE_RISING>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ amlogic,canvas = <&canvas>;
+ power-domains = <&pwrc_vpu>;
+
+ /* CVBS VDAC output port */
+ cvbs_vdac_port: port@0 {
+ reg = <0>;
+ };
+
+ /* HDMI-TX output port */
+ hdmi_tx_port: port@1 {
+ reg = <1>;
+
+ hdmi_tx_out: endpoint {
+ remote-endpoint = <&hdmi_tx_in>;
+ };
+ };
};
gic: interrupt-controller@ffc01000 {
@@ -163,10 +526,112 @@
#size-cells = <2>;
ranges = <0x0 0x0 0x0 0xffd00000 0x0 0x100000>;
+ reset: reset-controller@1004 {
+ compatible = "amlogic,meson-g12a-reset",
+ "amlogic,meson-axg-reset";
+ reg = <0x0 0x1004 0x0 0x9c>;
+ #reset-cells = <1>;
+ };
+
clk_msr: clock-measure@18000 {
compatible = "amlogic,meson-g12a-clk-measure";
reg = <0x0 0x18000 0x0 0x10>;
};
+
+ uart_C: serial@22000 {
+ compatible = "amlogic,meson-gx-uart";
+ reg = <0x0 0x22000 0x0 0x18>;
+ interrupts = <GIC_SPI 93 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&xtal>, <&clkc CLKID_UART2>, <&xtal>;
+ clock-names = "xtal", "pclk", "baud";
+ status = "disabled";
+ };
+
+ uart_B: serial@23000 {
+ compatible = "amlogic,meson-gx-uart";
+ reg = <0x0 0x23000 0x0 0x18>;
+ interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&xtal>, <&clkc CLKID_UART1>, <&xtal>;
+ clock-names = "xtal", "pclk", "baud";
+ status = "disabled";
+ };
+
+ uart_A: serial@24000 {
+ compatible = "amlogic,meson-gx-uart";
+ reg = <0x0 0x24000 0x0 0x18>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&xtal>, <&clkc CLKID_UART0>, <&xtal>;
+ clock-names = "xtal", "pclk", "baud";
+ status = "disabled";
+ };
+ };
+
+ usb: usb@ffe09000 {
+ status = "disabled";
+ compatible = "amlogic,meson-g12a-usb-ctrl";
+ reg = <0x0 0xffe09000 0x0 0xa0>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ clocks = <&clkc CLKID_USB>;
+ resets = <&reset RESET_USB>;
+
+ dr_mode = "otg";
+
+ phys = <&usb2_phy0>, <&usb2_phy1>,
+ <&usb3_pcie_phy PHY_TYPE_USB3>;
+ phy-names = "usb2-phy0", "usb2-phy1", "usb3-phy0";
+
+ dwc2: usb@ff400000 {
+ compatible = "amlogic,meson-g12a-usb", "snps,dwc2";
+ reg = <0x0 0xff400000 0x0 0x40000>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clkc CLKID_USB1_DDR_BRIDGE>;
+ clock-names = "ddr";
+ phys = <&usb2_phy1>;
+ dr_mode = "peripheral";
+ g-rx-fifo-size = <192>;
+ g-np-tx-fifo-size = <128>;
+ g-tx-fifo-size = <128 128 16 16 16>;
+ };
+
+ dwc3: usb@ff500000 {
+ compatible = "snps,dwc3";
+ reg = <0x0 0xff500000 0x0 0x100000>;
+ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ dr_mode = "host";
+ snps,dis_u2_susphy_quirk;
+ snps,quirk-frame-length-adjustment;
+ };
+ };
+
+ mali: gpu@ffe40000 {
+ compatible = "amlogic,meson-g12a-mali", "arm,mali-bifrost";
+ reg = <0x0 0xffe40000 0x0 0x40000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "gpu", "mmu", "job";
+ clocks = <&clkc CLKID_MALI>;
+ resets = <&reset RESET_DVALIN_CAPB3>, <&reset RESET_DVALIN>;
+
+ /*
+ * Mali clocking is provided by two identical clock paths
+ * MALI_0 and MALI_1 muxed to a single clock by a glitch
+ * free mux to safely change frequency while running.
+ */
+ assigned-clocks = <&clkc CLKID_MALI_0_SEL>,
+ <&clkc CLKID_MALI_0>,
+ <&clkc CLKID_MALI>; /* Glitch free mux */
+ assigned-clock-parents = <&clkc CLKID_FCLK_DIV2P5>,
+ <0>, /* Do Nothing */
+ <&clkc CLKID_MALI_0>;
+ assigned-clock-rates = <0>, /* Do Nothing */
+ <800000000>,
+ <0>; /* Do Nothing */
};
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts
index 9a8a8a7e4b53..b5667f1fb2c8 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-phicomm-n1.dts
@@ -14,6 +14,16 @@
cvbs-connector {
status = "disabled";
};
+
+ leds {
+ compatible = "gpio-leds";
+
+ status {
+ label = "n1:white:status";
+ gpios = <&gpio_ao GPIOAO_9 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+ };
};
&cvbs_vdac_port {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
index 8acfd40090d2..25f3b6b14043 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
@@ -178,3 +178,7 @@
pinctrl-0 = <&uart_ao_a_pins>;
pinctrl-names = "default";
};
+
+&usb0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
index ed3a3d5adf31..7a85a82bf65d 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
@@ -91,6 +91,33 @@
reset-names = "phy";
status = "okay";
};
+
+ mali: gpu@c0000 {
+ compatible = "amlogic,meson-gxm-mali", "arm,mali-t820";
+ reg = <0x0 0xc0000 0x0 0x40000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "gpu", "mmu", "job";
+ clocks = <&clkc CLKID_MALI>;
+ resets = <&reset RESET_MALI_CAPB3>, <&reset RESET_MALI>;
+
+ /*
+ * Mali clocking is provided by two identical clock paths
+ * MALI_0 and MALI_1 muxed to a single clock by a glitch
+ * free mux to safely change frequency while running.
+ */
+ assigned-clocks = <&clkc CLKID_MALI_0_SEL>,
+ <&clkc CLKID_MALI_0>,
+ <&clkc CLKID_MALI>; /* Glitch free mux */
+ assigned-clock-parents = <&clkc CLKID_FCLK_DIV3>,
+ <0>, /* Do Nothing */
+ <&clkc CLKID_MALI_0>;
+ assigned-clock-rates = <0>, /* Do Nothing */
+ <666666666>,
+ <0>; /* Do Nothing */
+ };
};
&clkc_AO {
diff --git a/arch/arm64/boot/dts/bitmain/bm1880-sophon-edge.dts b/arch/arm64/boot/dts/bitmain/bm1880-sophon-edge.dts
index 6a3255597138..3e8c70778e24 100644
--- a/arch/arm64/boot/dts/bitmain/bm1880-sophon-edge.dts
+++ b/arch/arm64/boot/dts/bitmain/bm1880-sophon-edge.dts
@@ -8,6 +8,28 @@
#include "bm1880.dtsi"
+/*
+ * GPIO name legend: proper name = the GPIO line is used as GPIO
+ * NC = not connected (pin out but not routed from the chip to
+ * anything the board)
+ * "[PER]" = pin is muxed for [peripheral] (not GPIO)
+ * LSEC = Low Speed External Connector
+ * HSEC = High Speed External Connector
+ *
+ * Line names are taken from the schematic "sophon-edge-schematics"
+ * version, 1.0210.
+ *
+ * For the lines routed to the external connectors the
+ * lines are named after the 96Boards CE Specification 1.0,
+ * Appendix "Expansion Connector Signal Description".
+ *
+ * When the 96Board naming of a line and the schematic name of
+ * the same line are in conflict, the 96Board specification
+ * takes precedence. This is only for the informational
+ * lines i.e. "[FOO]", the GPIO named lines "GPIO-A" thru "GPIO-L"
+ * are the only ones actually used for GPIO.
+ */
+
/ {
compatible = "bitmain,sophon-edge", "bitmain,bm1880";
model = "Sophon Edge";
@@ -32,19 +54,140 @@
clock-frequency = <500000000>;
#clock-cells = <0>;
};
+
+ soc {
+ gpio0: gpio@50027000 {
+ porta: gpio-controller@0 {
+ gpio-line-names =
+ "GPIO-A", /* GPIO0, LSEC pin 23 */
+ "GPIO-C", /* GPIO1, LSEC pin 25 */
+ "[GPIO2_PHY0_RST]", /* GPIO2 */
+ "GPIO-E", /* GPIO3, LSEC pin 27 */
+ "[USB_DET]", /* GPIO4 */
+ "[EN_P5V]", /* GPIO5 */
+ "[VDDIO_MS1_SEL]", /* GPIO6 */
+ "GPIO-G", /* GPIO7, LSEC pin 29 */
+ "[BM_TUSB_RST_L]", /* GPIO8 */
+ "[EN_P5V_USBHUB]", /* GPIO9 */
+ "NC",
+ "LED_WIFI", /* GPIO11 */
+ "LED_BT", /* GPIO12 */
+ "[BM_BLM8221_EN_L]", /* GPIO13 */
+ "NC", /* GPIO14 */
+ "NC", /* GPIO15 */
+ "NC", /* GPIO16 */
+ "NC", /* GPIO17 */
+ "NC", /* GPIO18 */
+ "NC", /* GPIO19 */
+ "NC", /* GPIO20 */
+ "NC", /* GPIO21 */
+ "NC", /* GPIO22 */
+ "NC", /* GPIO23 */
+ "NC", /* GPIO24 */
+ "NC", /* GPIO25 */
+ "NC", /* GPIO26 */
+ "NC", /* GPIO27 */
+ "NC", /* GPIO28 */
+ "NC", /* GPIO29 */
+ "NC", /* GPIO30 */
+ "NC"; /* GPIO31 */
+ };
+ };
+
+ gpio1: gpio@50027400 {
+ portb: gpio-controller@0 {
+ gpio-line-names =
+ "NC", /* GPIO32 */
+ "NC", /* GPIO33 */
+ "[I2C0_SDA]", /* GPIO34, LSEC pin 17 */
+ "[I2C0_SCL]", /* GPIO35, LSEC pin 15 */
+ "[JTAG0_TDO]", /* GPIO36 */
+ "[JTAG0_TCK]", /* GPIO37 */
+ "[JTAG0_TDI]", /* GPIO38 */
+ "[JTAG0_TMS]", /* GPIO39 */
+ "[JTAG0_TRST_X]", /* GPIO40 */
+ "[JTAG1_TDO]", /* GPIO41 */
+ "[JTAG1_TCK]", /* GPIO42 */
+ "[JTAG1_TDI]", /* GPIO43 */
+ "[CPU_TX]", /* GPIO44 */
+ "[CPU_RX]", /* GPIO45 */
+ "[UART1_TXD]", /* GPIO46 */
+ "[UART1_RXD]", /* GPIO47 */
+ "[UART0_TXD]", /* GPIO48 */
+ "[UART0_RXD]", /* GPIO49 */
+ "GPIO-I", /* GPIO50, LSEC pin 31 */
+ "GPIO-K", /* GPIO51, LSEC pin 33 */
+ "USER_LED2", /* GPIO52 */
+ "USER_LED1", /* GPIO53 */
+ "[UART0_RTS]", /* GPIO54 */
+ "[UART0_CTS]", /* GPIO55 */
+ "USER_LED4", /* GPIO56, JTAG1_TRST_X */
+ "USER_LED3", /* GPIO57, JTAG1_TMS */
+ "[I2S0_SCLK]", /* GPIO58 */
+ "[I2S0_FS]", /* GPIO59 */
+ "[I2S0_SDI]", /* GPIO60 */
+ "[I2S0_SDO]", /* GPIO61 */
+ "GPIO-B", /* GPIO62, LSEC pin 24 */
+ "GPIO-F"; /* GPIO63, I2S1_SCLK, LSEC pin 28 */
+ };
+ };
+
+ gpio2: gpio@50027800 {
+ portc: gpio-controller@0 {
+ gpio-line-names =
+ "GPIO-D", /* GPIO64, I2S1_FS, LSEC pin 26 */
+ "GPIO-J", /* GPIO65, I2S1_SDI, LSEC pin 32 */
+ "GPIO-H", /* GPIO66, I2S1_SDO, LSEC pin 30 */
+ "GPIO-L", /* GPIO67, LSEC pin 34 */
+ "[SPI0_CS]", /* GPIO68, SPI1_CS, LSEC pin 12 */
+ "[SPI0_DIN]", /* GPIO69, SPI1_SDI, LSEC pin 10 */
+ "[SPI0_DOUT]", /* GPIO70, SPI1_SDO, LSEC pin 14 */
+ "[SPI0_SCLK]"; /* GPIO71, SPI1_SCK, LSEC pin 8 */
+ };
+ };
+ };
+};
+
+&pinctrl {
+ pinctrl_uart0_default: pinctrl-uart0-default {
+ pinmux {
+ groups = "uart0_grp";
+ function = "uart0";
+ };
+ };
+
+ pinctrl_uart1_default: pinctrl-uart1-default {
+ pinmux {
+ groups = "uart1_grp";
+ function = "uart1";
+ };
+ };
+
+ pinctrl_uart2_default: pinctrl-uart2-default {
+ pinmux {
+ groups = "uart2_grp";
+ function = "uart2";
+ };
+ };
};
&uart0 {
status = "okay";
clocks = <&uart_clk>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart0_default>;
};
&uart1 {
status = "okay";
clocks = <&uart_clk>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1_default>;
};
&uart2 {
status = "okay";
clocks = <&uart_clk>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2_default>;
};
diff --git a/arch/arm64/boot/dts/bitmain/bm1880.dtsi b/arch/arm64/boot/dts/bitmain/bm1880.dtsi
index 55a4769e0de2..7726fd4c6be6 100644
--- a/arch/arm64/boot/dts/bitmain/bm1880.dtsi
+++ b/arch/arm64/boot/dts/bitmain/bm1880.dtsi
@@ -80,6 +80,74 @@
#interrupt-cells = <3>;
};
+ sctrl: system-controller@50010000 {
+ compatible = "bitmain,bm1880-sctrl", "syscon",
+ "simple-mfd";
+ reg = <0x0 0x50010000 0x0 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x50010000 0x1000>;
+
+ pinctrl: pinctrl@50 {
+ compatible = "bitmain,bm1880-pinctrl";
+ reg = <0x50 0x4B0>;
+ };
+ };
+
+ gpio0: gpio@50027000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dw-apb-gpio";
+ reg = <0x0 0x50027000 0x0 0x400>;
+
+ porta: gpio-controller@0 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <32>;
+ reg = <0>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+ gpio1: gpio@50027400 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dw-apb-gpio";
+ reg = <0x0 0x50027400 0x0 0x400>;
+
+ portb: gpio-controller@0 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <32>;
+ reg = <0>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+ gpio2: gpio@50027800 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dw-apb-gpio";
+ reg = <0x0 0x50027800 0x0 0x400>;
+
+ portc: gpio-controller@0 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <8>;
+ reg = <0>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
uart0: serial@58018000 {
compatible = "snps,dw-apb-uart";
reg = <0x0 0x58018000 0x0 0x2000>;
diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
index d88e2f0e179a..d2de16645e10 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
@@ -289,6 +289,12 @@
assigned-clock-parents = <&cmu_top CLK_ACLK_MFC_400>;
};
+&cmu_mif {
+ assigned-clocks = <&cmu_mif CLK_MOUT_SCLK_DSD_A>, <&cmu_mif CLK_DIV_SCLK_DSD>;
+ assigned-clock-parents = <&cmu_mif CLK_MOUT_MFC_PLL_DIV2>;
+ assigned-clock-rates = <0>, <333000000>;
+};
+
&cmu_mscl {
assigned-clocks = <&cmu_mscl CLK_MOUT_ACLK_MSCL_400_USER>,
<&cmu_mscl CLK_MOUT_SCLK_JPEG_USER>,
diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts b/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts
index 3d7e0a782243..dda5d2746a74 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts
+++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2.dts
@@ -33,7 +33,8 @@
<&cmu_disp CLK_MOUT_DISP_PLL>,
<&cmu_mif CLK_MOUT_SCLK_DECON_TV_ECLK_A>,
<&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK_USER>,
- <&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK>;
+ <&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK>,
+ <&cmu_disp CLK_MOUT_SCLK_DSD_USER>;
assigned-clock-parents = <0>, <0>,
<&cmu_mif CLK_ACLK_DISP_333>,
<&cmu_mif CLK_SCLK_DSIM0_DISP>,
@@ -45,7 +46,8 @@
<&cmu_disp CLK_FOUT_DISP_PLL>,
<&cmu_mif CLK_MOUT_BUS_PLL_DIV2>,
<&cmu_mif CLK_SCLK_DECON_TV_ECLK_DISP>,
- <&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK_USER>;
+ <&cmu_disp CLK_MOUT_SCLK_DECON_TV_ECLK_USER>,
+ <&cmu_mif CLK_SCLK_DSD_DISP>;
assigned-clock-rates = <250000000>, <400000000>;
};
diff --git a/arch/arm64/boot/dts/exynos/exynos5433.dtsi b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
index a04e80327b6e..d29d13f4694f 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
@@ -23,6 +23,31 @@
interrupt-parent = <&gic>;
+ arm_a53_pmu {
+ compatible = "arm,cortex-a53-pmu", "arm,armv8-pmuv3";
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
+ };
+
+ arm_a57_pmu {
+ compatible = "arm,cortex-a57-pmu", "arm,armv8-pmuv3";
+ interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu4>, <&cpu5>, <&cpu6>, <&cpu7>;
+ };
+
+ xxti: clock {
+ /* XXTI */
+ compatible = "fixed-clock";
+ clock-output-names = "oscclk";
+ #clock-cells = <0>;
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -237,35 +262,11 @@
#size-cells = <1>;
ranges;
- arm_a53_pmu {
- compatible = "arm,cortex-a53-pmu", "arm,armv8-pmuv3";
- interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
- };
-
- arm_a57_pmu {
- compatible = "arm,cortex-a57-pmu", "arm,armv8-pmuv3";
- interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-affinity = <&cpu4>, <&cpu5>, <&cpu6>, <&cpu7>;
- };
-
chipid@10000000 {
compatible = "samsung,exynos4210-chipid";
reg = <0x10000000 0x100>;
};
- xxti: xxti {
- compatible = "fixed-clock";
- clock-output-names = "oscclk";
- #clock-cells = <0>;
- };
-
cmu_top: clock-controller@10030000 {
compatible = "samsung,exynos5433-cmu-top";
reg = <0x10030000 0x1000>;
@@ -559,6 +560,15 @@
<&cmu_top CLK_DIV_ACLK_IMEM_200>;
};
+ slim_sss: slim-sss@11140000 {
+ compatible = "samsung,exynos5433-slim-sss";
+ reg = <0x11140000 0x1000>;
+ interrupts = <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>;
+ clock-names = "aclk", "pclk";
+ clocks = <&cmu_imem CLK_ACLK_SLIMSSS>,
+ <&cmu_imem CLK_PCLK_SLIMSSS>;
+ };
+
pd_gscl: power-domain@105c4000 {
compatible = "samsung,exynos5433-pd";
reg = <0x105c4000 0x20>;
@@ -848,12 +858,13 @@
<&cmu_disp CLK_ACLK_XIU_DECON1X>,
<&cmu_disp CLK_PCLK_SMMU_DECON1X>,
<&cmu_disp CLK_SCLK_DECON_VCLK>,
- <&cmu_disp CLK_SCLK_DECON_ECLK>;
+ <&cmu_disp CLK_SCLK_DECON_ECLK>,
+ <&cmu_disp CLK_SCLK_DSD>;
clock-names = "pclk", "aclk_decon", "aclk_smmu_decon0x",
"aclk_xiu_decon0x", "pclk_smmu_decon0x",
"aclk_smmu_decon1x", "aclk_xiu_decon1x",
"pclk_smmu_decon1x", "sclk_decon_vclk",
- "sclk_decon_eclk";
+ "sclk_decon_eclk", "dsd";
power-domains = <&pd_disp>;
interrupt-names = "fifo", "vsync", "lcd_sys";
interrupts = <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>,
@@ -890,12 +901,13 @@
<&cmu_disp CLK_ACLK_XIU_TV1X>,
<&cmu_disp CLK_PCLK_SMMU_TV1X>,
<&cmu_disp CLK_SCLK_DECON_TV_VCLK>,
- <&cmu_disp CLK_SCLK_DECON_TV_ECLK>;
+ <&cmu_disp CLK_SCLK_DECON_TV_ECLK>,
+ <&cmu_disp CLK_SCLK_DSD>;
clock-names = "pclk", "aclk_decon", "aclk_smmu_decon0x",
"aclk_xiu_decon0x", "pclk_smmu_decon0x",
"aclk_smmu_decon1x", "aclk_xiu_decon1x",
"pclk_smmu_decon1x", "sclk_decon_vclk",
- "sclk_decon_eclk";
+ "sclk_decon_eclk", "dsd";
samsung,disp-sysreg = <&syscon_disp>;
power-domains = <&pd_disp>;
interrupt-names = "fifo", "vsync", "lcd_sys";
@@ -1022,11 +1034,12 @@
reg = <0x13c00000 0x1000>;
interrupts = <GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "pclk", "aclk", "aclk_xiu",
- "aclk_gsclbend";
+ "aclk_gsclbend", "gsd";
clocks = <&cmu_gscl CLK_PCLK_GSCL0>,
<&cmu_gscl CLK_ACLK_GSCL0>,
<&cmu_gscl CLK_ACLK_XIU_GSCLX>,
- <&cmu_gscl CLK_ACLK_GSCLBEND_333>;
+ <&cmu_gscl CLK_ACLK_GSCLBEND_333>,
+ <&cmu_gscl CLK_ACLK_GSD>;
iommus = <&sysmmu_gscl0>;
power-domains = <&pd_gscl>;
};
@@ -1036,11 +1049,12 @@
reg = <0x13c10000 0x1000>;
interrupts = <GIC_SPI 298 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "pclk", "aclk", "aclk_xiu",
- "aclk_gsclbend";
+ "aclk_gsclbend", "gsd";
clocks = <&cmu_gscl CLK_PCLK_GSCL1>,
<&cmu_gscl CLK_ACLK_GSCL1>,
<&cmu_gscl CLK_ACLK_XIU_GSCLX>,
- <&cmu_gscl CLK_ACLK_GSCLBEND_333>;
+ <&cmu_gscl CLK_ACLK_GSCLBEND_333>,
+ <&cmu_gscl CLK_ACLK_GSD>;
iommus = <&sysmmu_gscl1>;
power-domains = <&pd_gscl>;
};
@@ -1050,11 +1064,12 @@
reg = <0x13c20000 0x1000>;
interrupts = <GIC_SPI 299 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "pclk", "aclk", "aclk_xiu",
- "aclk_gsclbend";
+ "aclk_gsclbend", "gsd";
clocks = <&cmu_gscl CLK_PCLK_GSCL2>,
<&cmu_gscl CLK_ACLK_GSCL2>,
<&cmu_gscl CLK_ACLK_XIU_GSCLX>,
- <&cmu_gscl CLK_ACLK_GSCLBEND_333>;
+ <&cmu_gscl CLK_ACLK_GSCLBEND_333>,
+ <&cmu_gscl CLK_ACLK_GSD>;
iommus = <&sysmmu_gscl2>;
power-domains = <&pd_gscl>;
};
diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi
index 967558a93d82..077d23478901 100644
--- a/arch/arm64/boot/dts/exynos/exynos7.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi
@@ -28,6 +28,23 @@
tmuctrl0 = &tmuctrl_0;
};
+ arm-pmu {
+ compatible = "arm,cortex-a57-pmu", "arm,armv8-pmuv3";
+ interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu_atlas0>, <&cpu_atlas1>,
+ <&cpu_atlas2>, <&cpu_atlas3>;
+ };
+
+ fin_pll: clock {
+ /* XXTI */
+ compatible = "fixed-clock";
+ clock-output-names = "fin_pll";
+ #clock-cells = <0>;
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -77,12 +94,6 @@
reg = <0x10000000 0x100>;
};
- fin_pll: xxti {
- compatible = "fixed-clock";
- clock-output-names = "fin_pll";
- #clock-cells = <0>;
- };
-
gic: interrupt-controller@11001000 {
compatible = "arm,gic-400";
#interrupt-cells = <3>;
@@ -469,28 +480,6 @@
status = "disabled";
};
- arm-pmu {
- compatible = "arm,cortex-a57-pmu", "arm,armv8-pmuv3";
- interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-affinity = <&cpu_atlas0>, <&cpu_atlas1>,
- <&cpu_atlas2>, <&cpu_atlas3>;
- };
-
- timer {
- compatible = "arm,armv8-timer";
- interrupts = <GIC_PPI 13
- (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 14
- (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 11
- (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 10
- (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
- };
-
pmu_system_controller: system-controller@105c0000 {
compatible = "samsung,exynos7-pmu", "syscon";
reg = <0x105c0000 0x5000>;
@@ -635,6 +624,18 @@
};
};
};
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+ };
};
#include "exynos7-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index 13604e558dc1..0bd122f60549 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -20,5 +20,8 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls2088a-rdb.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-qds.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-rdb.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-evk.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mq-zii-ultra-rmb3.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mq-zii-ultra-zest.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8qxp-mek.dtb
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-oxalis.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-oxalis.dts
index 7c726267ec8f..9927b096d343 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a-oxalis.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-oxalis.dts
@@ -87,6 +87,10 @@
status = "okay";
};
+&pcie {
+ status = "okay";
+};
+
&sai2 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
index 1ce0042b2a14..ec6257a5b251 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
@@ -475,7 +475,7 @@
interrupts = <0 126 IRQ_TYPE_LEVEL_HIGH>;
};
- pcie@3400000 {
+ pcie: pcie@3400000 {
compatible = "fsl,ls1012a-pcie";
reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */
0x40 0x00000000 0x0 0x00002000>; /* configuration space */
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts
index 14c79f4691ea..b359068d9605 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts
@@ -32,6 +32,49 @@
device_type = "memory";
reg = <0x0 0x80000000 0x1 0x00000000>;
};
+
+ sys_mclk: clock-mclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ };
+
+ reg_1p8v: regulator-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,widgets =
+ "Microphone", "Microphone Jack",
+ "Headphone", "Headphone Jack",
+ "Speaker", "Speaker Ext",
+ "Line", "Line In Jack";
+ simple-audio-card,routing =
+ "MIC_IN", "Microphone Jack",
+ "Microphone Jack", "Mic Bias",
+ "LINE_IN", "Line In Jack",
+ "Headphone Jack", "HP_OUT",
+ "Speaker Ext", "LINE_OUT";
+
+ simple-audio-card,cpu {
+ sound-dai = <&sai1>;
+ frame-master;
+ bitclock-master;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&sgtl5000>;
+ frame-master;
+ bitclock-master;
+ system-clock-frequency = <25000000>;
+ };
+ };
};
&duart0 {
@@ -89,5 +132,24 @@
reg = <0x57>;
};
};
+
+ i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x5>;
+
+ sgtl5000: audio-codec@a {
+ #sound-dai-cells = <0>;
+ compatible = "fsl,sgtl5000";
+ reg = <0xa>;
+ VDDA-supply = <&reg_1p8v>;
+ VDDIO-supply = <&reg_1p8v>;
+ clocks = <&sys_mclk>;
+ };
+ };
};
};
+
+&sai1 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
index f86b054a74ae..f9c272fb0738 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
@@ -28,6 +28,49 @@
device_type = "memory";
reg = <0x0 0x80000000 0x1 0x0000000>;
};
+
+ sys_mclk: clock-mclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ };
+
+ reg_1p8v: regulator-1p8v {
+ compatible = "regulator-fixed";
+ regulator-name = "1P8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ sound {
+ compatible = "simple-audio-card";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,widgets =
+ "Microphone", "Microphone Jack",
+ "Headphone", "Headphone Jack",
+ "Speaker", "Speaker Ext",
+ "Line", "Line In Jack";
+ simple-audio-card,routing =
+ "MIC_IN", "Microphone Jack",
+ "Microphone Jack", "Mic Bias",
+ "LINE_IN", "Line In Jack",
+ "Headphone Jack", "HP_OUT",
+ "Speaker Ext", "LINE_OUT";
+
+ simple-audio-card,cpu {
+ sound-dai = <&sai4>;
+ frame-master;
+ bitclock-master;
+ };
+
+ simple-audio-card,codec {
+ sound-dai = <&sgtl5000>;
+ frame-master;
+ bitclock-master;
+ system-clock-frequency = <25000000>;
+ };
+ };
};
&i2c0 {
@@ -39,6 +82,22 @@
#address-cells = <1>;
#size-cells = <0>;
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1>;
+
+ sgtl5000: audio-codec@a {
+ #sound-dai-cells = <0>;
+ compatible = "fsl,sgtl5000";
+ reg = <0xa>;
+ VDDA-supply = <&reg_1p8v>;
+ VDDIO-supply = <&reg_1p8v>;
+ clocks = <&sys_mclk>;
+ sclk-strength = <3>;
+ };
+ };
+
i2c@2 {
#address-cells = <1>;
#size-cells = <0>;
@@ -88,3 +147,7 @@
&enetc_port1 {
status = "disabled";
};
+
+&sai4 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
index 2896bbcfa3bb..b04581249f0b 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
@@ -89,6 +89,11 @@
IRQ_TYPE_LEVEL_LOW)>;
};
+ pmu {
+ compatible = "arm,cortex-a72-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
gic: interrupt-controller@6000000 {
compatible= "arm,gic-v3";
#address-cells = <2>;
@@ -235,6 +240,21 @@
status = "disabled";
};
+ edma0: dma-controller@22c0000 {
+ #dma-cells = <2>;
+ compatible = "fsl,vf610-edma";
+ reg = <0x0 0x22c0000 0x0 0x10000>,
+ <0x0 0x22d0000 0x0 0x10000>,
+ <0x0 0x22e0000 0x0 0x10000>;
+ interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "edma-tx", "edma-err";
+ dma-channels = <32>;
+ clock-names = "dmamux0", "dmamux1";
+ clocks = <&clockgen 4 1>,
+ <&clockgen 4 1>;
+ };
+
gpio1: gpio@2300000 {
compatible = "fsl,qoriq-gpio";
reg = <0x0 0x2300000 0x0 0x10000>;
@@ -277,7 +297,7 @@
sata: sata@3200000 {
compatible = "fsl,ls1028a-ahci";
reg = <0x0 0x3200000 0x0 0x10000>,
- <0x0 0x20140520 0x0 0x4>;
+ <0x7 0x100520 0x0 0x4>;
reg-names = "ahci", "sata-ecc";
interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clockgen 4 1>;
@@ -336,6 +356,48 @@
<GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>;
};
+ sai1: audio-controller@f100000 {
+ #sound-dai-cells = <0>;
+ compatible = "fsl,vf610-sai";
+ reg = <0x0 0xf100000 0x0 0x10000>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen 4 1>, <&clockgen 4 1>,
+ <&clockgen 4 1>, <&clockgen 4 1>;
+ clock-names = "bus", "mclk1", "mclk2", "mclk3";
+ dma-names = "tx", "rx";
+ dmas = <&edma0 1 4>,
+ <&edma0 1 3>;
+ status = "disabled";
+ };
+
+ sai2: audio-controller@f110000 {
+ #sound-dai-cells = <0>;
+ compatible = "fsl,vf610-sai";
+ reg = <0x0 0xf110000 0x0 0x10000>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen 4 1>, <&clockgen 4 1>,
+ <&clockgen 4 1>, <&clockgen 4 1>;
+ clock-names = "bus", "mclk1", "mclk2", "mclk3";
+ dma-names = "tx", "rx";
+ dmas = <&edma0 1 6>,
+ <&edma0 1 5>;
+ status = "disabled";
+ };
+
+ sai4: audio-controller@f130000 {
+ #sound-dai-cells = <0>;
+ compatible = "fsl,vf610-sai";
+ reg = <0x0 0xf130000 0x0 0x10000>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen 4 1>, <&clockgen 4 1>,
+ <&clockgen 4 1>, <&clockgen 4 1>;
+ clock-names = "bus", "mclk1", "mclk2", "mclk3";
+ dma-names = "tx", "rx";
+ dmas = <&edma0 1 10>,
+ <&edma0 1 9>;
+ status = "disabled";
+ };
+
pcie@1f0000000 { /* Integrated Endpoint Root Complex */
compatible = "pci-host-ecam-generic";
reg = <0x01 0xf0000000 0x0 0x100000>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
index 17ca357e854f..4223a2352d45 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
@@ -15,7 +15,6 @@
model = "LS1043A RDB Board";
aliases {
- crypto = &crypto;
serial0 = &duart0;
serial1 = &duart1;
serial2 = &duart2;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index 6fd6116509cc..71d9ed9ff985 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -18,6 +18,7 @@
#size-cells = <2>;
aliases {
+ crypto = &crypto;
fman0 = &fman0;
ethernet0 = &enet0;
ethernet1 = &enet1;
@@ -296,7 +297,6 @@
interrupts = <0 99 0x4>;
clock-names = "qspi_en", "qspi";
clocks = <&clockgen 4 0>, <&clockgen 4 0>;
- big-endian;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
index cb7185014d3a..b0ef08b090dd 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
@@ -215,8 +215,6 @@
interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "qspi_en", "qspi";
clocks = <&clockgen 4 1>, <&clockgen 4 1>;
- big-endian;
- fsl,qspi-has-second-chip;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-lx2160a-qds.dts
index 99a22abbe725..1a5acf62f23c 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-qds.dts
@@ -95,6 +95,22 @@
};
};
+&sata0 {
+ status = "okay";
+};
+
+&sata1 {
+ status = "okay";
+};
+
+&sata2 {
+ status = "okay";
+};
+
+&sata3 {
+ status = "okay";
+};
+
&uart0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts
index 9df37b159415..c2817b784232 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts
@@ -128,6 +128,22 @@
};
};
+&sata0 {
+ status = "okay";
+};
+
+&sata1 {
+ status = "okay";
+};
+
+&sata2 {
+ status = "okay";
+};
+
+&sata3 {
+ status = "okay";
+};
+
&uart0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
index fe87204850b5..125a8cc2c5b3 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
@@ -33,6 +33,7 @@
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster0_l2>;
+ cpu-idle-states = <&cpu_pw20>;
};
cpu@1 {
@@ -48,6 +49,7 @@
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster0_l2>;
+ cpu-idle-states = <&cpu_pw20>;
};
cpu@100 {
@@ -63,6 +65,7 @@
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster1_l2>;
+ cpu-idle-states = <&cpu_pw20>;
};
cpu@101 {
@@ -78,6 +81,7 @@
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster1_l2>;
+ cpu-idle-states = <&cpu_pw20>;
};
cpu@200 {
@@ -93,6 +97,7 @@
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster2_l2>;
+ cpu-idle-states = <&cpu_pw20>;
};
cpu@201 {
@@ -108,6 +113,7 @@
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster2_l2>;
+ cpu-idle-states = <&cpu_pw20>;
};
cpu@300 {
@@ -123,6 +129,7 @@
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster3_l2>;
+ cpu-idle-states = <&cpu_pw20>;
};
cpu@301 {
@@ -138,6 +145,7 @@
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster3_l2>;
+ cpu-idle-states = <&cpu_pw20>;
};
cpu@400 {
@@ -153,6 +161,7 @@
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster4_l2>;
+ cpu-idle-states = <&cpu_pw20>;
};
cpu@401 {
@@ -168,6 +177,7 @@
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster4_l2>;
+ cpu-idle-states = <&cpu_pw20>;
};
cpu@500 {
@@ -183,6 +193,7 @@
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster5_l2>;
+ cpu-idle-states = <&cpu_pw20>;
};
cpu@501 {
@@ -198,6 +209,7 @@
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster5_l2>;
+ cpu-idle-states = <&cpu_pw20>;
};
cpu@600 {
@@ -213,6 +225,7 @@
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster6_l2>;
+ cpu-idle-states = <&cpu_pw20>;
};
cpu@601 {
@@ -228,6 +241,7 @@
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster6_l2>;
+ cpu-idle-states = <&cpu_pw20>;
};
cpu@700 {
@@ -243,6 +257,7 @@
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster7_l2>;
+ cpu-idle-states = <&cpu_pw20>;
};
cpu@701 {
@@ -258,6 +273,7 @@
i-cache-line-size = <64>;
i-cache-sets = <192>;
next-level-cache = <&cluster7_l2>;
+ cpu-idle-states = <&cpu_pw20>;
};
cluster0_l2: l2-cache0 {
@@ -323,6 +339,15 @@
cache-sets = <1024>;
cache-level = <2>;
};
+
+ cpu_pw20: cpu-pw20 {
+ compatible = "arm,idle-state";
+ idle-state-name = "PW20";
+ arm,psci-suspend-param = <0x0>;
+ entry-latency-us = <2000>;
+ exit-latency-us = <2000>;
+ min-residency-us = <6000>;
+ };
};
gic: interrupt-controller@6000000 {
@@ -687,6 +712,50 @@
status = "disabled";
};
+ sata0: sata@3200000 {
+ compatible = "fsl,lx2160a-ahci";
+ reg = <0x0 0x3200000 0x0 0x10000>,
+ <0x7 0x100520 0x0 0x4>;
+ reg-names = "ahci", "sata-ecc";
+ interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen 4 3>;
+ dma-coherent;
+ status = "disabled";
+ };
+
+ sata1: sata@3210000 {
+ compatible = "fsl,lx2160a-ahci";
+ reg = <0x0 0x3210000 0x0 0x10000>,
+ <0x7 0x100520 0x0 0x4>;
+ reg-names = "ahci", "sata-ecc";
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen 4 3>;
+ dma-coherent;
+ status = "disabled";
+ };
+
+ sata2: sata@3220000 {
+ compatible = "fsl,lx2160a-ahci";
+ reg = <0x0 0x3220000 0x0 0x10000>,
+ <0x7 0x100520 0x0 0x4>;
+ reg-names = "ahci", "sata-ecc";
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen 4 3>;
+ dma-coherent;
+ status = "disabled";
+ };
+
+ sata3: sata@3230000 {
+ compatible = "fsl,lx2160a-ahci";
+ reg = <0x0 0x3230000 0x0 0x10000>,
+ <0x7 0x100520 0x0 0x4>;
+ reg-names = "ahci", "sata-ecc";
+ interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clockgen 4 3>;
+ dma-coherent;
+ status = "disabled";
+ };
+
smmu: iommu@5000000 {
compatible = "arm,mmu-500";
reg = <0 0x5000000 0 0x800000>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dts b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts
new file mode 100644
index 000000000000..2d5d89475b76
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dts
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2019 NXP
+ */
+
+/dts-v1/;
+
+#include "imx8mm.dtsi"
+
+/ {
+ model = "FSL i.MX8MM EVK board";
+ compatible = "fsl,imx8mm-evk", "fsl,imx8mm";
+
+ chosen {
+ stdout-path = &uart2;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_led>;
+
+ status {
+ label = "status";
+ gpios = <&gpio3 16 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+ };
+
+ reg_usdhc2_vmmc: regulator-usdhc2 {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usdhc2_vmmc>;
+ regulator-name = "VSD_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+};
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec1>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy0>;
+ fsl,magic-packet;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ at803x,led-act-blind-workaround;
+ at803x,eee-okay;
+ at803x,vddio-1p8v;
+ };
+ };
+};
+
+&uart2 { /* console */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+ cd-gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
+ bus-width = <4>;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+
+ pinctrl_fec1: fec1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_ENET_MDC_ENET1_MDC 0x3
+ MX8MM_IOMUXC_ENET_MDIO_ENET1_MDIO 0x3
+ MX8MM_IOMUXC_ENET_TD3_ENET1_RGMII_TD3 0x1f
+ MX8MM_IOMUXC_ENET_TD2_ENET1_RGMII_TD2 0x1f
+ MX8MM_IOMUXC_ENET_TD1_ENET1_RGMII_TD1 0x1f
+ MX8MM_IOMUXC_ENET_TD0_ENET1_RGMII_TD0 0x1f
+ MX8MM_IOMUXC_ENET_RD3_ENET1_RGMII_RD3 0x91
+ MX8MM_IOMUXC_ENET_RD2_ENET1_RGMII_RD2 0x91
+ MX8MM_IOMUXC_ENET_RD1_ENET1_RGMII_RD1 0x91
+ MX8MM_IOMUXC_ENET_RD0_ENET1_RGMII_RD0 0x91
+ MX8MM_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x1f
+ MX8MM_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91
+ MX8MM_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91
+ MX8MM_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f
+ MX8MM_IOMUXC_SAI2_RXC_GPIO4_IO22 0x19
+ >;
+ };
+
+ pinctrl_gpio_led: gpioledgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_NAND_READY_B_GPIO3_IO16 0x19
+ >;
+ };
+
+ pinctrl_reg_usdhc2_vmmc: regusdhc2vmmc {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_RESET_B_GPIO2_IO19 0x41
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART2_RXD_UART2_DCE_RX 0x140
+ MX8MM_IOMUXC_UART2_TXD_UART2_DCE_TX 0x140
+ >;
+ };
+
+ pinctrl_usdhc2_gpio: usdhc2grpgpio {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO15_GPIO1_IO15 0x1c4
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x190
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d0
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d0
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d0
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d0
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d0
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2grp100mhz {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x194
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2grp200mhz {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x196
+ MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d6
+ MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d6
+ MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d6
+ MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d6
+ MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d6
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x190
+ MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d0
+ MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d0
+ MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d0
+ MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d0
+ MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d0
+ MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d0
+ MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d0
+ MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d0
+ MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d0
+ MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x190
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+ fsl,pins = <
+ MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x194
+ MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d4
+ MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d4
+ MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d4
+ MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d4
+ MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d4
+ MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d4
+ MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d4
+ MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d4
+ MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d4
+ MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x194
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+ fsl,pins = <
+ MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x196
+ MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d6
+ MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d6
+ MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d6
+ MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d6
+ MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d6
+ MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d6
+ MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d6
+ MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d6
+ MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d6
+ MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x196
+ >;
+ };
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO02_WDOG1_WDOG_B 0xc6
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
new file mode 100644
index 000000000000..6b407a94c06e
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
@@ -0,0 +1,733 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright 2019 NXP
+ */
+
+#include <dt-bindings/clock/imx8mm-clock.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/thermal/thermal.h>
+
+#include "imx8mm-pinfunc.h"
+
+/ {
+ compatible = "fsl,imx8mm";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ ethernet0 = &fec1;
+ i2c0 = &i2c1;
+ i2c1 = &i2c2;
+ i2c2 = &i2c3;
+ i2c3 = &i2c4;
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart3;
+ serial3 = &uart4;
+ spi0 = &ecspi1;
+ spi1 = &ecspi2;
+ spi2 = &ecspi3;
+ mmc0 = &usdhc1;
+ mmc1 = &usdhc2;
+ mmc2 = &usdhc3;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+ gpio3 = &gpio4;
+ gpio4 = &gpio5;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ A53_0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x0>;
+ clock-latency = <61036>; /* two CLK32 periods */
+ clocks = <&clk IMX8MM_CLK_ARM>;
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ operating-points-v2 = <&a53_opp_table>;
+ };
+
+ A53_1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x1>;
+ clock-latency = <61036>; /* two CLK32 periods */
+ clocks = <&clk IMX8MM_CLK_ARM>;
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ operating-points-v2 = <&a53_opp_table>;
+ };
+
+ A53_2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x2>;
+ clock-latency = <61036>; /* two CLK32 periods */
+ clocks = <&clk IMX8MM_CLK_ARM>;
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ operating-points-v2 = <&a53_opp_table>;
+ };
+
+ A53_3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ reg = <0x3>;
+ clock-latency = <61036>; /* two CLK32 periods */
+ clocks = <&clk IMX8MM_CLK_ARM>;
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ operating-points-v2 = <&a53_opp_table>;
+ };
+
+ A53_L2: l2-cache0 {
+ compatible = "cache";
+ };
+ };
+
+ a53_opp_table: opp-table {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-1200000000 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <850000>;
+ clock-latency-ns = <150000>;
+ };
+
+ opp-1600000000 {
+ opp-hz = /bits/ 64 <1600000000>;
+ opp-microvolt = <900000>;
+ clock-latency-ns = <150000>;
+ opp-suspend;
+ };
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0x0 0x40000000 0 0x80000000>;
+ };
+
+ osc_32k: clock-osc-32k {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "osc_32k";
+ };
+
+ osc_24m: clock-osc-24m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "osc_24m";
+ };
+
+ clk_ext1: clock-ext1 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <133000000>;
+ clock-output-names = "clk_ext1";
+ };
+
+ clk_ext2: clock-ext2 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <133000000>;
+ clock-output-names = "clk_ext2";
+ };
+
+ clk_ext3: clock-ext3 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <133000000>;
+ clock-output-names = "clk_ext3";
+ };
+
+ clk_ext4: clock-ext4 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency= <133000000>;
+ clock-output-names = "clk_ext4";
+ };
+
+ gic: interrupt-controller@38800000 {
+ compatible = "arm,gic-v3";
+ reg = <0x0 0x38800000 0 0x10000>, /* GIC Dist */
+ <0x0 0x38880000 0 0xC0000>; /* GICR (RD_base + SGI_base) */
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <GIC_PPI 7
+ (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_HIGH)>;
+ interrupt-affinity = <&A53_0>, <&A53_1>, <&A53_2>, <&A53_3>;
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>, /* Physical Secure */
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>, /* Physical Non-Secure */
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>, /* Virtual */
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(6) | IRQ_TYPE_LEVEL_LOW)>; /* Hypervisor */
+ clock-frequency = <8000000>;
+ arm,no-tick-in-suspend;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x0 0x0 0x3e000000>;
+
+ aips1: bus@30000000 {
+ compatible = "fsl,aips-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ gpio1: gpio@30200000 {
+ compatible = "fsl,imx8mm-gpio", "fsl,imx35-gpio";
+ reg = <0x30200000 0x10000>;
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio@30210000 {
+ compatible = "fsl,imx8mm-gpio", "fsl,imx35-gpio";
+ reg = <0x30210000 0x10000>;
+ interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio3: gpio@30220000 {
+ compatible = "fsl,imx8mm-gpio", "fsl,imx35-gpio";
+ reg = <0x30220000 0x10000>;
+ interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio4: gpio@30230000 {
+ compatible = "fsl,imx8mm-gpio", "fsl,imx35-gpio";
+ reg = <0x30230000 0x10000>;
+ interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio5: gpio@30240000 {
+ compatible = "fsl,imx8mm-gpio", "fsl,imx35-gpio";
+ reg = <0x30240000 0x10000>;
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ wdog1: watchdog@30280000 {
+ compatible = "fsl,imx8mm-wdt", "fsl,imx21-wdt";
+ reg = <0x30280000 0x10000>;
+ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_WDOG1_ROOT>;
+ status = "disabled";
+ };
+
+ wdog2: watchdog@30290000 {
+ compatible = "fsl,imx8mm-wdt", "fsl,imx21-wdt";
+ reg = <0x30290000 0x10000>;
+ interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_WDOG2_ROOT>;
+ status = "disabled";
+ };
+
+ wdog3: watchdog@302a0000 {
+ compatible = "fsl,imx8mm-wdt", "fsl,imx21-wdt";
+ reg = <0x302a0000 0x10000>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_WDOG3_ROOT>;
+ status = "disabled";
+ };
+
+ sdma2: dma-controller@302c0000 {
+ compatible = "fsl,imx8mm-sdma", "fsl,imx7d-sdma";
+ reg = <0x302c0000 0x10000>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_SDMA2_ROOT>,
+ <&clk IMX8MM_CLK_SDMA2_ROOT>;
+ clock-names = "ipg", "ahb";
+ #dma-cells = <3>;
+ fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";
+ };
+
+ sdma3: dma-controller@302b0000 {
+ compatible = "fsl,imx8mm-sdma", "fsl,imx7d-sdma";
+ reg = <0x302b0000 0x10000>;
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_SDMA3_ROOT>,
+ <&clk IMX8MM_CLK_SDMA3_ROOT>;
+ clock-names = "ipg", "ahb";
+ #dma-cells = <3>;
+ fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";
+ };
+
+ iomuxc: pinctrl@30330000 {
+ compatible = "fsl,imx8mm-iomuxc";
+ reg = <0x30330000 0x10000>;
+ };
+
+ gpr: iomuxc-gpr@30340000 {
+ compatible = "fsl,imx8mm-iomuxc-gpr", "syscon";
+ reg = <0x30340000 0x10000>;
+ };
+
+ ocotp: ocotp-ctrl@30350000 {
+ compatible = "fsl,imx8mm-ocotp", "fsl,imx7d-ocotp", "syscon";
+ reg = <0x30350000 0x10000>;
+ clocks = <&clk IMX8MM_CLK_OCOTP_ROOT>;
+ /* For nvmem subnodes */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+
+ anatop: anatop@30360000 {
+ compatible = "fsl,imx8mm-anatop", "syscon", "simple-bus";
+ reg = <0x30360000 0x10000>;
+ };
+
+ snvs: snvs@30370000 {
+ compatible = "fsl,sec-v4.0-mon","syscon", "simple-mfd";
+ reg = <0x30370000 0x10000>;
+
+ snvs_rtc: snvs-rtc-lp {
+ compatible = "fsl,sec-v4.0-mon-rtc-lp";
+ regmap = <&snvs>;
+ offset = <0x34>;
+ interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ snvs_pwrkey: snvs-powerkey {
+ compatible = "fsl,sec-v4.0-pwrkey";
+ regmap = <&snvs>;
+ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+ linux,keycode = <KEY_POWER>;
+ wakeup-source;
+ };
+ };
+
+ clk: clock-controller@30380000 {
+ compatible = "fsl,imx8mm-ccm";
+ reg = <0x30380000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&osc_32k>, <&osc_24m>, <&clk_ext1>, <&clk_ext2>,
+ <&clk_ext3>, <&clk_ext4>;
+ clock-names = "osc_32k", "osc_24m", "clk_ext1", "clk_ext2",
+ "clk_ext3", "clk_ext4";
+ };
+
+ src: reset-controller@30390000 {
+ compatible = "fsl,imx8mm-src", "syscon";
+ reg = <0x30390000 0x10000>;
+ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+ #reset-cells = <1>;
+ };
+ };
+
+ aips2: bus@30400000 {
+ compatible = "fsl,aips-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ pwm1: pwm@30660000 {
+ compatible = "fsl,imx8mm-pwm", "fsl,imx27-pwm";
+ reg = <0x30660000 0x10000>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_PWM1_ROOT>,
+ <&clk IMX8MM_CLK_PWM1_ROOT>;
+ clock-names = "ipg", "per";
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm2: pwm@30670000 {
+ compatible = "fsl,imx8mm-pwm", "fsl,imx27-pwm";
+ reg = <0x30670000 0x10000>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_PWM2_ROOT>,
+ <&clk IMX8MM_CLK_PWM2_ROOT>;
+ clock-names = "ipg", "per";
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm3: pwm@30680000 {
+ compatible = "fsl,imx8mm-pwm", "fsl,imx27-pwm";
+ reg = <0x30680000 0x10000>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_PWM3_ROOT>,
+ <&clk IMX8MM_CLK_PWM3_ROOT>;
+ clock-names = "ipg", "per";
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+
+ pwm4: pwm@30690000 {
+ compatible = "fsl,imx8mm-pwm", "fsl,imx27-pwm";
+ reg = <0x30690000 0x10000>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_PWM4_ROOT>,
+ <&clk IMX8MM_CLK_PWM4_ROOT>;
+ clock-names = "ipg", "per";
+ #pwm-cells = <2>;
+ status = "disabled";
+ };
+ };
+
+ aips3: bus@30800000 {
+ compatible = "fsl,aips-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ ecspi1: spi@30820000 {
+ compatible = "fsl,imx8mm-ecspi", "fsl,imx51-ecspi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x30820000 0x10000>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_ECSPI1_ROOT>,
+ <&clk IMX8MM_CLK_ECSPI1_ROOT>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma1 0 7 1>, <&sdma1 1 7 2>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ ecspi2: spi@30830000 {
+ compatible = "fsl,imx8mm-ecspi", "fsl,imx51-ecspi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x30830000 0x10000>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_ECSPI2_ROOT>,
+ <&clk IMX8MM_CLK_ECSPI2_ROOT>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma1 2 7 1>, <&sdma1 3 7 2>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ ecspi3: spi@30840000 {
+ compatible = "fsl,imx8mm-ecspi", "fsl,imx51-ecspi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x30840000 0x10000>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_ECSPI3_ROOT>,
+ <&clk IMX8MM_CLK_ECSPI3_ROOT>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma1 4 7 1>, <&sdma1 5 7 2>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ uart1: serial@30860000 {
+ compatible = "fsl,imx8mm-uart", "fsl,imx6q-uart";
+ reg = <0x30860000 0x10000>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_UART1_ROOT>,
+ <&clk IMX8MM_CLK_UART1_ROOT>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma1 22 4 0>, <&sdma1 23 4 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ uart3: serial@30880000 {
+ compatible = "fsl,imx8mm-uart", "fsl,imx6q-uart";
+ reg = <0x30880000 0x10000>;
+ interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_UART3_ROOT>,
+ <&clk IMX8MM_CLK_UART3_ROOT>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma1 26 4 0>, <&sdma1 27 4 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ uart2: serial@30890000 {
+ compatible = "fsl,imx8mm-uart", "fsl,imx6q-uart";
+ reg = <0x30890000 0x10000>;
+ interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_UART2_ROOT>,
+ <&clk IMX8MM_CLK_UART2_ROOT>;
+ clock-names = "ipg", "per";
+ status = "disabled";
+ };
+
+ i2c1: i2c@30a20000 {
+ compatible = "fsl,imx8mm-i2c", "fsl,imx21-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x30a20000 0x10000>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_I2C1_ROOT>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@30a30000 {
+ compatible = "fsl,imx8mm-i2c", "fsl,imx21-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x30a30000 0x10000>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_I2C2_ROOT>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@30a40000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx8mm-i2c", "fsl,imx21-i2c";
+ reg = <0x30a40000 0x10000>;
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_I2C3_ROOT>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@30a50000 {
+ compatible = "fsl,imx8mm-i2c", "fsl,imx21-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x30a50000 0x10000>;
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_I2C4_ROOT>;
+ status = "disabled";
+ };
+
+ uart4: serial@30a60000 {
+ compatible = "fsl,imx8mm-uart", "fsl,imx6q-uart";
+ reg = <0x30a60000 0x10000>;
+ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_UART4_ROOT>,
+ <&clk IMX8MM_CLK_UART4_ROOT>;
+ clock-names = "ipg", "per";
+ dmas = <&sdma1 28 4 0>, <&sdma1 29 4 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
+ usdhc1: mmc@30b40000 {
+ compatible = "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc";
+ reg = <0x30b40000 0x10000>;
+ interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_DUMMY>,
+ <&clk IMX8MM_CLK_NAND_USDHC_BUS>,
+ <&clk IMX8MM_CLK_USDHC1_ROOT>;
+ clock-names = "ipg", "ahb", "per";
+ assigned-clocks = <&clk IMX8MM_CLK_USDHC1>;
+ assigned-clock-rates = <400000000>;
+ fsl,tuning-start-tap = <20>;
+ fsl,tuning-step= <2>;
+ bus-width = <4>;
+ status = "disabled";
+ };
+
+ usdhc2: mmc@30b50000 {
+ compatible = "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc";
+ reg = <0x30b50000 0x10000>;
+ interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_DUMMY>,
+ <&clk IMX8MM_CLK_NAND_USDHC_BUS>,
+ <&clk IMX8MM_CLK_USDHC2_ROOT>;
+ clock-names = "ipg", "ahb", "per";
+ fsl,tuning-start-tap = <20>;
+ fsl,tuning-step= <2>;
+ bus-width = <4>;
+ status = "disabled";
+ };
+
+ usdhc3: mmc@30b60000 {
+ compatible = "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc";
+ reg = <0x30b60000 0x10000>;
+ interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_DUMMY>,
+ <&clk IMX8MM_CLK_NAND_USDHC_BUS>,
+ <&clk IMX8MM_CLK_USDHC3_ROOT>;
+ clock-names = "ipg", "ahb", "per";
+ assigned-clocks = <&clk IMX8MM_CLK_USDHC3_ROOT>;
+ assigned-clock-rates = <400000000>;
+ fsl,tuning-start-tap = <20>;
+ fsl,tuning-step= <2>;
+ bus-width = <4>;
+ status = "disabled";
+ };
+
+ sdma1: dma-controller@30bd0000 {
+ compatible = "fsl,imx8mm-sdma", "fsl,imx7d-sdma";
+ reg = <0x30bd0000 0x10000>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_SDMA1_ROOT>,
+ <&clk IMX8MM_CLK_SDMA1_ROOT>;
+ clock-names = "ipg", "ahb";
+ #dma-cells = <3>;
+ fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";
+ };
+
+ fec1: ethernet@30be0000 {
+ compatible = "fsl,imx8mm-fec", "fsl,imx6sx-fec";
+ reg = <0x30be0000 0x10000>;
+ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_ENET1_ROOT>,
+ <&clk IMX8MM_CLK_ENET1_ROOT>,
+ <&clk IMX8MM_CLK_ENET_TIMER>,
+ <&clk IMX8MM_CLK_ENET_REF>,
+ <&clk IMX8MM_CLK_ENET_PHY_REF>;
+ clock-names = "ipg", "ahb", "ptp",
+ "enet_clk_ref", "enet_out";
+ assigned-clocks = <&clk IMX8MM_CLK_ENET_AXI>,
+ <&clk IMX8MM_CLK_ENET_TIMER>,
+ <&clk IMX8MM_CLK_ENET_REF>,
+ <&clk IMX8MM_CLK_ENET_TIMER>;
+ assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_266M>,
+ <&clk IMX8MM_SYS_PLL2_100M>,
+ <&clk IMX8MM_SYS_PLL2_125M>;
+ assigned-clock-rates = <0>, <0>, <125000000>, <100000000>;
+ fsl,num-tx-queues = <3>;
+ fsl,num-rx-queues = <3>;
+ status = "disabled";
+ };
+
+ };
+
+ aips4: bus@32c00000 {
+ compatible = "fsl,aips-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ usbotg1: usb@32e40000 {
+ compatible = "fsl,imx8mm-usb", "fsl,imx7d-usb";
+ reg = <0x32e40000 0x200>;
+ interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_USB1_CTRL_ROOT>;
+ clock-names = "usb1_ctrl_root_clk";
+ assigned-clocks = <&clk IMX8MM_CLK_USB_BUS>,
+ <&clk IMX8MM_CLK_USB_CORE_REF>;
+ assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_500M>,
+ <&clk IMX8MM_SYS_PLL1_100M>;
+ fsl,usbphy = <&usbphynop1>;
+ fsl,usbmisc = <&usbmisc1 0>;
+ status = "disabled";
+ };
+
+ usbphynop1: usbphynop1 {
+ compatible = "usb-nop-xceiv";
+ clocks = <&clk IMX8MM_CLK_USB_PHY_REF>;
+ assigned-clocks = <&clk IMX8MM_CLK_USB_PHY_REF>;
+ assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_100M>;
+ clock-names = "main_clk";
+ };
+
+ usbmisc1: usbmisc@32e40200 {
+ compatible = "fsl,imx8mm-usbmisc", "fsl,imx7d-usbmisc";
+ #index-cells = <1>;
+ reg = <0x32e40200 0x200>;
+ };
+
+ usbotg2: usb@32e50000 {
+ compatible = "fsl,imx8mm-usb", "fsl,imx7d-usb";
+ reg = <0x32e50000 0x200>;
+ interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MM_CLK_USB1_CTRL_ROOT>;
+ clock-names = "usb1_ctrl_root_clk";
+ assigned-clocks = <&clk IMX8MM_CLK_USB_BUS>,
+ <&clk IMX8MM_CLK_USB_CORE_REF>;
+ assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_500M>,
+ <&clk IMX8MM_SYS_PLL1_100M>;
+ fsl,usbphy = <&usbphynop2>;
+ fsl,usbmisc = <&usbmisc2 0>;
+ status = "disabled";
+ };
+
+ usbphynop2: usbphynop2 {
+ compatible = "usb-nop-xceiv";
+ clocks = <&clk IMX8MM_CLK_USB_PHY_REF>;
+ assigned-clocks = <&clk IMX8MM_CLK_USB_PHY_REF>;
+ assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_100M>;
+ clock-names = "main_clk";
+ };
+
+ usbmisc2: usbmisc@32e50200 {
+ compatible = "fsl,imx8mm-usbmisc", "fsl,imx7d-usbmisc";
+ #index-cells = <1>;
+ reg = <0x32e50200 0x200>;
+ };
+
+ };
+
+ dma_apbh: dma-controller@33000000 {
+ compatible = "fsl,imx7d-dma-apbh", "fsl,imx28-dma-apbh";
+ reg = <0x33000000 0x2000>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "gpmi0", "gpmi1", "gpmi2", "gpmi3";
+ #dma-cells = <1>;
+ dma-channels = <4>;
+ clocks = <&clk IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK>;
+ };
+
+ gpmi: nand-controller@33002000{
+ compatible = "fsl,imx8mm-gpmi-nand", "fsl,imx7d-gpmi-nand";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x33002000 0x2000>, <0x33004000 0x4000>;
+ reg-names = "gpmi-nand", "bch";
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "bch";
+ clocks = <&clk IMX8MM_CLK_NAND_ROOT>,
+ <&clk IMX8MM_CLK_NAND_USDHC_BUS_RAWNAND_CLK>;
+ clock-names = "gpmi_io", "gpmi_bch_apb";
+ dmas = <&dma_apbh 0>;
+ dma-names = "rx-tx";
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
index 54737bf1772f..b2038be8bbd7 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts
@@ -21,6 +21,12 @@
reg = <0x00000000 0x40000000 0 0xc0000000>;
};
+ pcie0_refclk: pcie0-refclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ };
+
reg_usdhc2_vmmc: regulator-vsd-3v3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_reg_usdhc2>;
@@ -31,6 +37,63 @@
gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>;
enable-active-high;
};
+
+ buck2_reg: regulator-buck2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_buck2>;
+ compatible = "regulator-gpio";
+ regulator-name = "vdd_arm";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1000000>;
+ gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
+ states = <1000000 0x0
+ 900000 0x1>;
+ };
+
+ wm8524: audio-codec {
+ #sound-dai-cells = <0>;
+ compatible = "wlf,wm8524";
+ wlf,mute-gpios = <&gpio1 8 GPIO_ACTIVE_LOW>;
+ };
+
+ sound-wm8524 {
+ compatible = "simple-audio-card";
+ simple-audio-card,name = "wm8524-audio";
+ simple-audio-card,format = "i2s";
+ simple-audio-card,frame-master = <&cpudai>;
+ simple-audio-card,bitclock-master = <&cpudai>;
+ simple-audio-card,widgets =
+ "Line", "Left Line Out Jack",
+ "Line", "Right Line Out Jack";
+ simple-audio-card,routing =
+ "Left Line Out Jack", "LINEVOUTL",
+ "Right Line Out Jack", "LINEVOUTR";
+
+ cpudai: simple-audio-card,cpu {
+ sound-dai = <&sai2>;
+ };
+
+ link_codec: simple-audio-card,codec {
+ sound-dai = <&wm8524>;
+ clocks = <&clk IMX8MQ_CLK_SAI2_ROOT>;
+ };
+ };
+};
+
+&A53_0 {
+ cpu-supply = <&buck2_reg>;
+};
+
+&A53_1 {
+ cpu-supply = <&buck2_reg>;
+};
+
+&A53_2 {
+ cpu-supply = <&buck2_reg>;
+};
+
+&A53_3 {
+ cpu-supply = <&buck2_reg>;
};
&fec1 {
@@ -52,6 +115,26 @@
};
};
+&sai2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai2>;
+ assigned-clocks = <&clk IMX8MQ_CLK_SAI2>;
+ assigned-clock-parents = <&clk IMX8MQ_AUDIO_PLL1_OUT>;
+ assigned-clock-rates = <24576000>;
+ status = "okay";
+};
+
+&gpio5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wifi_reset>;
+
+ wl-reg-on {
+ gpio-hog;
+ gpios = <29 GPIO_ACTIVE_HIGH>;
+ output-high;
+ };
+};
+
&i2c1 {
clock-frequency = <100000>;
pinctrl-names = "default";
@@ -143,6 +226,22 @@
};
};
+&pcie0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie0>;
+ reset-gpio = <&gpio5 28 GPIO_ACTIVE_LOW>;
+ clocks = <&clk IMX8MQ_CLK_PCIE1_ROOT>,
+ <&clk IMX8MQ_CLK_PCIE1_AUX>,
+ <&clk IMX8MQ_CLK_PCIE1_PHY>,
+ <&pcie0_refclk>;
+ clock-names = "pcie", "pcie_aux", "pcie_phy", "pcie_bus";
+ status = "okay";
+};
+
+&pgc_gpu {
+ power-supply = <&sw1a_reg>;
+};
+
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
@@ -203,6 +302,13 @@
};
&iomuxc {
+ pinctrl_buck2: vddarmgrp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_GPIO1_IO13_GPIO1_IO13 0x19
+ >;
+
+ };
+
pinctrl_fec1: fec1grp {
fsl,pins = <
MX8MQ_IOMUXC_ENET_MDC_ENET1_MDC 0x3
@@ -230,6 +336,13 @@
>;
};
+ pinctrl_pcie0: pcie0grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_I2C4_SCL_PCIE1_CLKREQ_B 0x76
+ MX8MQ_IOMUXC_UART4_RXD_GPIO5_IO28 0x16
+ >;
+ };
+
pinctrl_qspi: qspigrp {
fsl,pins = <
MX8MQ_IOMUXC_NAND_ALE_QSPI_A_SCLK 0x82
@@ -248,6 +361,16 @@
>;
};
+ pinctrl_sai2: sai2grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SAI2_TXFS_SAI2_TX_SYNC 0xd6
+ MX8MQ_IOMUXC_SAI2_TXC_SAI2_TX_BCLK 0xd6
+ MX8MQ_IOMUXC_SAI2_MCLK_SAI2_MCLK 0xd6
+ MX8MQ_IOMUXC_SAI2_TXD0_SAI2_TX_DATA0 0xd6
+ MX8MQ_IOMUXC_GPIO1_IO08_GPIO1_IO8 0xd6
+ >;
+ };
+
pinctrl_uart1: uart1grp {
fsl,pins = <
MX8MQ_IOMUXC_UART1_RXD_UART1_DCE_RX 0x49
@@ -347,4 +470,10 @@
MX8MQ_IOMUXC_GPIO1_IO02_WDOG1_WDOG_B 0xc6
>;
};
+
+ pinctrl_wifi_reset: wifiresetgrp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_UART4_TXD_GPIO5_IO29 0x16
+ >;
+ };
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
new file mode 100644
index 000000000000..d2a6da479980
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (C) 2019 Zodiac Inflight Innovations
+ */
+
+/dts-v1/;
+
+#include "imx8mq-zii-ultra.dtsi"
+
+/ {
+ model = "ZII i.MX8MQ Ultra RMB3 Board";
+ compatible = "zii,imx8mq-ultra-rmb3", "zii,imx8mq-ultra", "fsl,imx8mq";
+};
+
+&ecspi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1>;
+ cs-gpios = <&gpio5 9 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ nor_flash: flash@0 {
+ compatible = "st,n25q128a13", "jedec,spi-nor";
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+ };
+};
+
+&i2c2 {
+ temp-sense@48 {
+ compatible = "national,lm75";
+ reg = <0x48>;
+ };
+};
+
+&i2c4 {
+ touchscreen@20 {
+ compatible = "syna,rmi4-i2c";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ts>;
+ reg = <0x20>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rmi4-f01@1 {
+ reg = <0x1>;
+ syna,nosleep-mode = <2>;
+ };
+
+ rmi4-f11@11 {
+ reg = <0x11>;
+ touchscreen-inverted-x;
+ touchscreen-swapped-x-y;
+ syna,sensor-type = <1>;
+ };
+
+ rmi4-f12@12 {
+ reg = <0x12>;
+ touchscreen-inverted-x;
+ touchscreen-swapped-x-y;
+ syna,sensor-type = <1>;
+ };
+ };
+
+ touchscreen@2a {
+ compatible = "eeti,exc3000";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ts>;
+ reg = <0x2a>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>;
+ touchscreen-inverted-x;
+ touchscreen-swapped-x-y;
+ status = "disabled";
+ };
+};
+
+&usbhub {
+ swap-dx-lanes = <0>;
+};
+
+&iomuxc {
+ pinctrl_ecspi1: ecspi1grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x19
+ MX8MQ_IOMUXC_ECSPI1_SCLK_ECSPI1_SCLK 0x82
+ MX8MQ_IOMUXC_ECSPI1_MISO_ECSPI1_MISO 0x82
+ MX8MQ_IOMUXC_ECSPI1_MOSI_ECSPI1_MOSI 0x82
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-zest.dts b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-zest.dts
new file mode 100644
index 000000000000..1084d9330403
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-zest.dts
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (C) 2019 Zodiac Inflight Innovations
+ */
+
+/dts-v1/;
+
+#include "imx8mq-zii-ultra.dtsi"
+
+/ {
+ model = "ZII i.MX8MQ Ultra Zest Board";
+ compatible = "zii,imx8mq-ultra-zest", "zii,imx8mq-ultra", "fsl,imx8mq";
+};
+
+&i2c4 {
+ touchscreen@4a {
+ compatible = "atmel,maxtouch";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ts>;
+ reg = <0x4a>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
new file mode 100644
index 000000000000..7a1706f969f0
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
@@ -0,0 +1,725 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (C) 2019 Zodiac Inflight Innovations
+ */
+
+#include "imx8mq.dtsi"
+
+/ {
+ aliases {
+ mdio-gpio0 = &mdio0;
+ rtc0 = &ds1341;
+ };
+
+ chosen {
+ stdout-path = &uart1;
+ };
+
+ mdio0: bitbang-mdio {
+ compatible = "virtual,mdio-gpio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_mdio_bitbang>, <&pinctrl_fec1_phy_reset>;
+ gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>, /* MDC */
+ <&gpio1 14 GPIO_ACTIVE_HIGH>; /* MDIO */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ reset-gpios = <&gpio1 29 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ pcie0_refclk: clock-pcie0-refclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ };
+
+ pcie1_refclk: clock-pcie1-refclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ };
+
+ reg_12p0_main: regulator-12p0-main {
+ compatible = "regulator-fixed";
+ regulator-name = "12V_MAIN";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ reg_5p0_main: regulator-5p0-main {
+ compatible = "regulator-fixed";
+ vin-supply = <&reg_12p0_main>;
+ regulator-name = "5V_MAIN";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ reg_3p3_main: regulator-3p3-main {
+ compatible = "regulator-fixed";
+ vin-supply = <&reg_12p0_main>;
+ regulator-name = "3V3V_MAIN";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ reg_5p0_user_usb: regulator-5p0-user-usb {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_user_usb>;
+ vin-supply = <&reg_5p0_main>;
+ regulator-name = "5V_USER_USB";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 12 GPIO_ACTIVE_LOW>;
+ startup-delay-us = <1000>;
+ };
+
+ reg_usdhc2_vmmc: regulator-vsd-3v3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usdhc2>;
+ compatible = "regulator-fixed";
+ vin-supply = <&reg_3p3_main>;
+ regulator-name = "3V3_SD";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ reg_arm: regulator-arm {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_arm>;
+ compatible = "regulator-gpio";
+ vin-supply = <&reg_12p0_main>;
+ regulator-name = "0V9_ARM";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1000000>;
+ gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
+ states = <1000000 0x0
+ 900000 0x1>;
+ regulator-always-on;
+ };
+};
+
+&A53_0 {
+ cpu-supply = <&reg_arm>;
+};
+
+&A53_1 {
+ cpu-supply = <&reg_arm>;
+};
+
+&A53_2 {
+ cpu-supply = <&reg_arm>;
+};
+
+&A53_3 {
+ cpu-supply = <&reg_arm>;
+};
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec1>;
+
+ phy-handle = <&phy0>;
+ phy-mode = "rmii";
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+
+ switch: switch@0 {
+ compatible = "marvell,mv88e6085";
+ pinctrl-0 = <&pinctrl_switch_irq>;
+ pinctrl-names = "default";
+ reg = <0>;
+ dsa,member = <0 0>;
+ eeprom-length = <512>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "gigabit_proc";
+ phy-handle = <&switchphy0>;
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "netaux";
+ phy-handle = <&switchphy1>;
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "cpu";
+ ethernet = <&fec1>;
+
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "netright";
+ phy-handle = <&switchphy3>;
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "netleft";
+ phy-handle = <&switchphy4>;
+ };
+ };
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switchphy0: switchphy@0 {
+ reg = <0>;
+ interrupt-parent = <&switch>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ switchphy1: switchphy@1 {
+ reg = <1>;
+ interrupt-parent = <&switch>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ switchphy2: switchphy@2 {
+ reg = <2>;
+ interrupt-parent = <&switch>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ switchphy3: switchphy@3 {
+ reg = <3>;
+ interrupt-parent = <&switch>;
+ interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ switchphy4: switchphy@4 {
+ reg = <4>;
+ interrupt-parent = <&switch>;
+ interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+ };
+ };
+};
+
+&gpio3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio3_hog>;
+
+ usb-emulation {
+ gpio-hog;
+ gpios = <10 GPIO_ACTIVE_HIGH>;
+ output-low;
+ line-name = "usb-emulation";
+ };
+
+ usb-mode1 {
+ gpio-hog;
+ gpios = <11 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "usb-mode1";
+ };
+
+ usb-mode2 {
+ gpio-hog;
+ gpios = <13 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "usb-mode2";
+ };
+};
+
+&i2c1 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+};
+
+&i2c2 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+
+ pmic@8 {
+ compatible = "fsl,pfuze100";
+ reg = <0x8>;
+
+ regulators {
+ sw1a_reg: sw1ab {
+ regulator-min-microvolt = <825000>;
+ regulator-max-microvolt = <1100000>;
+ };
+
+ sw1c_reg: sw1c {
+ regulator-min-microvolt = <825000>;
+ regulator-max-microvolt = <1100000>;
+ };
+
+ sw2_reg: sw2 {
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-always-on;
+ };
+
+ sw3a_reg: sw3ab {
+ regulator-min-microvolt = <825000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-always-on;
+ };
+
+ sw4_reg: sw4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ swbst_reg: swbst {
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5150000>;
+ };
+
+ snvs_reg: vsnvs {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-always-on;
+ };
+
+ vref_reg: vrefddr {
+ regulator-always-on;
+ };
+
+ vgen1_reg: vgen1 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1550000>;
+ };
+
+ vgen2_reg: vgen2 {
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <975000>;
+ regulator-always-on;
+ };
+
+ vgen3_reg: vgen3 {
+ regulator-min-microvolt = <1675000>;
+ regulator-max-microvolt = <1975000>;
+ regulator-always-on;
+ };
+
+ vgen4_reg: vgen4 {
+ regulator-min-microvolt = <1625000>;
+ regulator-max-microvolt = <1875000>;
+ regulator-always-on;
+ };
+
+ vgen5_reg: vgen5 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3625000>;
+ regulator-always-on;
+ };
+
+ vgen6_reg: vgen6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ };
+ };
+
+ eeprom@54 {
+ compatible = "atmel,24c128";
+ reg = <0x54>;
+ };
+
+ ds1341: rtc@68 {
+ compatible = "dallas,ds1341";
+ reg = <0x68>;
+ };
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+
+ usbhub: usbhub@2c {
+ compatible ="microchip,usb2513b";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbhub>;
+ reg = <0x2c>;
+ reset-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&i2c4 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c4>;
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+
+ rave-sp {
+ compatible = "zii,rave-sp-rdu2";
+ current-speed = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ watchdog {
+ compatible = "zii,rave-sp-watchdog";
+ };
+
+ backlight {
+ compatible = "zii,rave-sp-backlight";
+ };
+
+ pwrbutton {
+ compatible = "zii,rave-sp-pwrbutton";
+ };
+
+ eeprom@a3 {
+ compatible = "zii,rave-sp-eeprom";
+ reg = <0xa3 0x4000>;
+ zii,eeprom-name = "dds-eeprom";
+ };
+
+ eeprom@a4 {
+ compatible = "zii,rave-sp-eeprom";
+ reg = <0xa4 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ zii,eeprom-name = "main-eeprom";
+ };
+ };
+};
+
+&usb3_phy0 {
+ vbus-supply = <&reg_5p0_user_usb>;
+ status = "okay";
+};
+
+&usb_dwc3_0 {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb3_phy1 {
+ vbus-supply = <&reg_5p0_main>;
+ status = "okay";
+};
+
+&usb_dwc3_1 {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&pcie0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie0>;
+ reset-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
+ clocks = <&clk IMX8MQ_CLK_PCIE1_ROOT>,
+ <&clk IMX8MQ_CLK_PCIE1_AUX>,
+ <&clk IMX8MQ_CLK_PCIE1_PHY>,
+ <&pcie0_refclk>;
+ clock-names = "pcie", "pcie_aux", "pcie_phy", "pcie_bus";
+ status = "okay";
+};
+
+&pcie1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie1>;
+ reset-gpio = <&gpio1 6 GPIO_ACTIVE_LOW>;
+ clocks = <&clk IMX8MQ_CLK_PCIE2_ROOT>,
+ <&clk IMX8MQ_CLK_PCIE2_AUX>,
+ <&clk IMX8MQ_CLK_PCIE2_PHY>,
+ <&pcie1_refclk>;
+ clock-names = "pcie", "pcie_aux", "pcie_phy", "pcie_bus";
+ status = "okay";
+};
+
+&pgc_gpu {
+ power-supply = <&sw1a_reg>;
+};
+
+&pgc_vpu {
+ power-supply = <&sw1c_reg>;
+};
+
+&usdhc1 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+ vqmmc-supply = <&sw4_reg>;
+ bus-width = <8>;
+ non-removable;
+ no-sd;
+ no-sdio;
+ status = "okay";
+};
+
+&usdhc2 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
+ cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ status = "okay";
+};
+
+&snvs_rtc {
+ status = "disabled";
+};
+
+&iomuxc {
+ pinctrl_fec1: fec1grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_ENET_MDC_ENET1_MDC 0x3
+ MX8MQ_IOMUXC_ENET_MDIO_ENET1_MDIO 0x23
+ MX8MQ_IOMUXC_ENET_TD1_ENET1_RGMII_TD1 0x1f
+ MX8MQ_IOMUXC_ENET_TD0_ENET1_RGMII_TD0 0x1f
+ MX8MQ_IOMUXC_ENET_RD1_ENET1_RGMII_RD1 0x91
+ MX8MQ_IOMUXC_ENET_RD0_ENET1_RGMII_RD0 0x91
+ MX8MQ_IOMUXC_ENET_TD2_ENET1_TX_CLK 0x1f
+ MX8MQ_IOMUXC_ENET_RXC_ENET1_RX_ER 0x91
+ MX8MQ_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91
+ MX8MQ_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f
+ >;
+ };
+
+ pinctrl_fec1_phy_reset: fec1phyresetgrp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_ENET_RD3_GPIO1_IO29 0x11
+ >;
+ };
+
+ pinctrl_gpio3_hog: gpio3hoggrp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_NAND_DATA04_GPIO3_IO10 0x6
+ MX8MQ_IOMUXC_NAND_DATA05_GPIO3_IO11 0x6
+ MX8MQ_IOMUXC_NAND_DATA07_GPIO3_IO13 0x6
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_I2C1_SCL_I2C1_SCL 0x4000007f
+ MX8MQ_IOMUXC_I2C1_SDA_I2C1_SDA 0x4000007f
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_I2C2_SCL_I2C2_SCL 0x4000007f
+ MX8MQ_IOMUXC_I2C2_SDA_I2C2_SDA 0x4000007f
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_I2C3_SCL_I2C3_SCL 0x4000007f
+ MX8MQ_IOMUXC_I2C3_SDA_I2C3_SDA 0x4000007f
+ >;
+ };
+
+ pinctrl_i2c4: i2c4grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_I2C4_SCL_I2C4_SCL 0x4000007f
+ MX8MQ_IOMUXC_I2C4_SDA_I2C4_SDA 0x4000007f
+ >;
+ };
+
+ pinctrl_mdio_bitbang: bitbangmdiogrp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_GPIO1_IO13_GPIO1_IO13 0x44
+ MX8MQ_IOMUXC_GPIO1_IO14_GPIO1_IO14 0x64
+ >;
+ };
+
+ pinctrl_pcie0: pcie0grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_UART4_RXD_PCIE1_CLKREQ_B 0x66
+ MX8MQ_IOMUXC_GPIO1_IO03_GPIO1_IO3 0x6
+ >;
+ };
+
+ pinctrl_pcie1: pcie1grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_UART4_TXD_PCIE2_CLKREQ_B 0x66
+ MX8MQ_IOMUXC_GPIO1_IO06_GPIO1_IO6 0x6
+ >;
+ };
+
+ pinctrl_reg_arm: regarmgrp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_NAND_READY_B_GPIO3_IO16 0x19
+ >;
+ };
+
+ pinctrl_reg_usdhc2: regusdhc2grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SD2_RESET_B_GPIO2_IO19 0x41
+ >;
+ };
+
+ pinctrl_reg_user_usb: reguserusbgrp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_NAND_DATA06_GPIO3_IO12 0x6
+ >;
+ };
+
+ pinctrl_switch_irq: switchgrp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_GPIO1_IO15_GPIO1_IO15 0x41
+ >;
+ };
+
+ pinctrl_ts: tsgrp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_GPIO1_IO11_GPIO1_IO11 0x96
+ MX8MQ_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x96
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_UART1_RXD_UART1_DCE_RX 0x49
+ MX8MQ_IOMUXC_UART1_TXD_UART1_DCE_TX 0x49
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_UART2_RXD_UART2_DCE_RX 0x49
+ MX8MQ_IOMUXC_UART2_TXD_UART2_DCE_TX 0x49
+ >;
+ };
+
+ pinctrl_usbhub: usbhubgrp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SAI5_MCLK_GPIO3_IO25 0x41
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x83
+ MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc3
+ MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc3
+ MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc3
+ MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc3
+ MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc3
+ MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc3
+ MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc3
+ MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc3
+ MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc3
+ MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x83
+ MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1
+ >;
+ };
+
+ pinctrl_usdhc1_100mhz: usdhc1-100grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x8d
+ MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xcd
+ MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xcd
+ MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xcd
+ MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xcd
+ MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xcd
+ MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xcd
+ MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xcd
+ MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xcd
+ MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xcd
+ MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x8d
+ MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1
+ >;
+ };
+
+ pinctrl_usdhc1_200mhz: usdhc1-200grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x9f
+ MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xdf
+ MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xdf
+ MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xdf
+ MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xdf
+ MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xdf
+ MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xdf
+ MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xdf
+ MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xdf
+ MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xdf
+ MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x9f
+ MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SD2_CLK_USDHC2_CLK 0x83
+ MX8MQ_IOMUXC_SD2_CMD_USDHC2_CMD 0xc3
+ MX8MQ_IOMUXC_SD2_DATA0_USDHC2_DATA0 0xc3
+ MX8MQ_IOMUXC_SD2_DATA1_USDHC2_DATA1 0xc3
+ MX8MQ_IOMUXC_SD2_DATA2_USDHC2_DATA2 0xc3
+ MX8MQ_IOMUXC_SD2_DATA3_USDHC2_DATA3 0xc3
+ MX8MQ_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xc1
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2-100grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SD2_CLK_USDHC2_CLK 0x85
+ MX8MQ_IOMUXC_SD2_CMD_USDHC2_CMD 0xc5
+ MX8MQ_IOMUXC_SD2_DATA0_USDHC2_DATA0 0xc5
+ MX8MQ_IOMUXC_SD2_DATA1_USDHC2_DATA1 0xc5
+ MX8MQ_IOMUXC_SD2_DATA2_USDHC2_DATA2 0xc5
+ MX8MQ_IOMUXC_SD2_DATA3_USDHC2_DATA3 0xc5
+ MX8MQ_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xc1
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2-200grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SD2_CLK_USDHC2_CLK 0x87
+ MX8MQ_IOMUXC_SD2_CMD_USDHC2_CMD 0xc7
+ MX8MQ_IOMUXC_SD2_DATA0_USDHC2_DATA0 0xc7
+ MX8MQ_IOMUXC_SD2_DATA1_USDHC2_DATA1 0xc7
+ MX8MQ_IOMUXC_SD2_DATA2_USDHC2_DATA2 0xc7
+ MX8MQ_IOMUXC_SD2_DATA3_USDHC2_DATA3 0xc7
+ MX8MQ_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xc1
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index 9155bd4784eb..6d635ba0904c 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -6,8 +6,10 @@
#include <dt-bindings/clock/imx8mq-clock.h>
#include <dt-bindings/power/imx8mq-power.h>
+#include <dt-bindings/reset/imx8mq-reset.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/thermal/thermal.h>
#include "imx8mq-pinfunc.h"
/ {
@@ -87,32 +89,48 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x0>;
+ clock-latency = <61036>; /* two CLK32 periods */
+ clocks = <&clk IMX8MQ_CLK_ARM>;
enable-method = "psci";
next-level-cache = <&A53_L2>;
+ operating-points-v2 = <&a53_opp_table>;
+ #cooling-cells = <2>;
};
A53_1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x1>;
+ clock-latency = <61036>; /* two CLK32 periods */
+ clocks = <&clk IMX8MQ_CLK_ARM>;
enable-method = "psci";
next-level-cache = <&A53_L2>;
+ operating-points-v2 = <&a53_opp_table>;
+ #cooling-cells = <2>;
};
A53_2: cpu@2 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x2>;
+ clock-latency = <61036>; /* two CLK32 periods */
+ clocks = <&clk IMX8MQ_CLK_ARM>;
enable-method = "psci";
next-level-cache = <&A53_L2>;
+ operating-points-v2 = <&a53_opp_table>;
+ #cooling-cells = <2>;
};
A53_3: cpu@3 {
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x3>;
+ clock-latency = <61036>; /* two CLK32 periods */
+ clocks = <&clk IMX8MQ_CLK_ARM>;
enable-method = "psci";
next-level-cache = <&A53_L2>;
+ operating-points-v2 = <&a53_opp_table>;
+ #cooling-cells = <2>;
};
A53_L2: l2-cache0 {
@@ -120,6 +138,24 @@
};
};
+ a53_opp_table: opp-table {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-800000000 {
+ opp-hz = /bits/ 64 <800000000>;
+ opp-microvolt = <900000>;
+ clock-latency-ns = <150000>;
+ };
+
+ opp-1300000000 {
+ opp-hz = /bits/ 64 <1300000000>;
+ opp-microvolt = <1000000>;
+ clock-latency-ns = <150000>;
+ opp-suspend;
+ };
+ };
+
pmu {
compatible = "arm,cortex-a53-pmu";
interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
@@ -132,6 +168,67 @@
method = "smc";
};
+ thermal-zones {
+ cpu-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <2000>;
+ thermal-sensors = <&tmu 0>;
+
+ trips {
+ cpu_alert: cpu-alert {
+ temperature = <80000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu-crit {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert>;
+ cooling-device =
+ <&A53_0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A53_1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A53_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&A53_3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ gpu-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <2000>;
+ thermal-sensors = <&tmu 1>;
+
+ trips {
+ gpu-crit {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ vpu-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <2000>;
+ thermal-sensors = <&tmu 2>;
+
+ trips {
+ vpu-crit {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, /* Physical Secure */
@@ -160,6 +257,7 @@
reg = <0x30200000 0x10000>;
interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_GPIO1_ROOT>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
@@ -171,6 +269,7 @@
reg = <0x30210000 0x10000>;
interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_GPIO2_ROOT>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
@@ -182,6 +281,7 @@
reg = <0x30220000 0x10000>;
interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_GPIO3_ROOT>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
@@ -193,6 +293,7 @@
reg = <0x30230000 0x10000>;
interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_GPIO4_ROOT>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
@@ -204,12 +305,65 @@
reg = <0x30240000 0x10000>;
interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_GPIO5_ROOT>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
};
+ tmu: tmu@30260000 {
+ compatible = "fsl,imx8mq-tmu";
+ reg = <0x30260000 0x10000>;
+ interrupt = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ little-endian;
+ fsl,tmu-range = <0xb0000 0xa0026 0x80048 0x70061>;
+ fsl,tmu-calibration = <0x00000000 0x00000023
+ 0x00000001 0x00000029
+ 0x00000002 0x0000002f
+ 0x00000003 0x00000035
+ 0x00000004 0x0000003d
+ 0x00000005 0x00000043
+ 0x00000006 0x0000004b
+ 0x00000007 0x00000051
+ 0x00000008 0x00000057
+ 0x00000009 0x0000005f
+ 0x0000000a 0x00000067
+ 0x0000000b 0x0000006f
+
+ 0x00010000 0x0000001b
+ 0x00010001 0x00000023
+ 0x00010002 0x0000002b
+ 0x00010003 0x00000033
+ 0x00010004 0x0000003b
+ 0x00010005 0x00000043
+ 0x00010006 0x0000004b
+ 0x00010007 0x00000055
+ 0x00010008 0x0000005d
+ 0x00010009 0x00000067
+ 0x0001000a 0x00000070
+
+ 0x00020000 0x00000017
+ 0x00020001 0x00000023
+ 0x00020002 0x0000002d
+ 0x00020003 0x00000037
+ 0x00020004 0x00000041
+ 0x00020005 0x0000004b
+ 0x00020006 0x00000057
+ 0x00020007 0x00000063
+ 0x00020008 0x0000006f
+
+ 0x00030000 0x00000015
+ 0x00030001 0x00000021
+ 0x00030002 0x0000002d
+ 0x00030003 0x00000039
+ 0x00030004 0x00000045
+ 0x00030005 0x00000053
+ 0x00030006 0x0000005f
+ 0x00030007 0x00000071>;
+ #thermal-sensor-cells = <1>;
+ };
+
wdog1: watchdog@30280000 {
compatible = "fsl,imx8mq-wdt", "fsl,imx21-wdt";
reg = <0x30280000 0x10000>;
@@ -234,16 +388,35 @@
status = "disabled";
};
+ sdma2: sdma@302c0000 {
+ compatible = "fsl,imx8mq-sdma","fsl,imx7d-sdma";
+ reg = <0x302c0000 0x10000>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_SDMA2_ROOT>,
+ <&clk IMX8MQ_CLK_SDMA2_ROOT>;
+ clock-names = "ipg", "ahb";
+ #dma-cells = <3>;
+ fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";
+ };
+
iomuxc: iomuxc@30330000 {
compatible = "fsl,imx8mq-iomuxc";
reg = <0x30330000 0x10000>;
};
iomuxc_gpr: syscon@30340000 {
- compatible = "fsl,imx8mq-iomuxc-gpr", "syscon";
+ compatible = "fsl,imx8mq-iomuxc-gpr", "fsl,imx6q-iomuxc-gpr", "syscon";
reg = <0x30340000 0x10000>;
};
+ ocotp: ocotp-ctrl@30350000 {
+ compatible = "fsl,imx8mq-ocotp", "syscon";
+ reg = <0x30350000 0x10000>;
+ clocks = <&clk IMX8MQ_CLK_OCOTP_ROOT>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+
anatop: syscon@30360000 {
compatible = "fsl,imx8mq-anatop", "syscon";
reg = <0x30360000 0x10000>;
@@ -278,6 +451,12 @@
"clk_ext3", "clk_ext4";
};
+ src: reset-controller@30390000 {
+ compatible = "fsl,imx8mq-src", "syscon";
+ reg = <0x30390000 0x10000>;
+ #reset-cells = <1>;
+ };
+
gpc: gpc@303a0000 {
compatible = "fsl,imx8mq-gpc";
reg = <0x303a0000 0x10000>;
@@ -294,9 +473,25 @@
reg = <IMX8M_POWER_DOMAIN_MIPI>;
};
- pgc_pcie1: power-domain@1 {
+ /*
+ * As per comment in ATF source code:
+ *
+ * PCIE1 and PCIE2 share the
+ * same reset signal, if we
+ * power down PCIE2, PCIE1
+ * will be held in reset too.
+ *
+ * So instead of creating two
+ * separate power domains for
+ * PCIE1 and PCIE2 we create a
+ * link between both and use
+ * it as a shared PCIE power
+ * domain.
+ */
+ pgc_pcie: power-domain@1 {
#power-domain-cells = <0>;
reg = <IMX8M_POWER_DOMAIN_PCIE1>;
+ power-domains = <&pgc_pcie2>;
};
pgc_otg1: power-domain@2 {
@@ -478,6 +673,21 @@
status = "disabled";
};
+ sai2: sai@308b0000 {
+ #sound-dai-cells = <0>;
+ compatible = "fsl,imx8mq-sai",
+ "fsl,imx6sx-sai";
+ reg = <0x308b0000 0x10000>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_SAI2_IPG>,
+ <&clk IMX8MQ_CLK_SAI2_ROOT>,
+ <&clk IMX8MQ_CLK_DUMMY>, <&clk IMX8MQ_CLK_DUMMY>;
+ clock-names = "bus", "mclk1", "mclk2", "mclk3";
+ dmas = <&sdma1 10 24 0>, <&sdma1 11 24 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
i2c1: i2c@30a20000 {
compatible = "fsl,imx8mq-i2c", "fsl,imx21-i2c";
reg = <0x30a20000 0x10000>;
@@ -575,6 +785,17 @@
status = "disabled";
};
+ sdma1: sdma@30bd0000 {
+ compatible = "fsl,imx8mq-sdma","fsl,imx7d-sdma";
+ reg = <0x30bd0000 0x10000>;
+ interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_SDMA1_ROOT>,
+ <&clk IMX8MQ_CLK_AHB>;
+ clock-names = "ipg", "ahb";
+ #dma-cells = <3>;
+ fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";
+ };
+
fec1: ethernet@30be0000 {
compatible = "fsl,imx8mq-fec", "fsl,imx6sx-fec";
reg = <0x30be0000 0x10000>;
@@ -594,6 +815,30 @@
};
};
+ gpu: gpu@38000000 {
+ compatible = "vivante,gc";
+ reg = <0x38000000 0x40000>;
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_GPU_ROOT>,
+ <&clk IMX8MQ_CLK_GPU_SHADER_DIV>,
+ <&clk IMX8MQ_CLK_GPU_AXI>,
+ <&clk IMX8MQ_CLK_GPU_AHB>;
+ clock-names = "core", "shader", "bus", "reg";
+ assigned-clocks = <&clk IMX8MQ_CLK_GPU_CORE_SRC>,
+ <&clk IMX8MQ_CLK_GPU_SHADER_SRC>,
+ <&clk IMX8MQ_CLK_GPU_AXI>,
+ <&clk IMX8MQ_CLK_GPU_AHB>,
+ <&clk IMX8MQ_GPU_PLL_BYPASS>;
+ assigned-clock-parents = <&clk IMX8MQ_GPU_PLL_OUT>,
+ <&clk IMX8MQ_GPU_PLL_OUT>,
+ <&clk IMX8MQ_GPU_PLL_OUT>,
+ <&clk IMX8MQ_GPU_PLL_OUT>,
+ <&clk IMX8MQ_GPU_PLL>;
+ assigned-clock-rates = <800000000>, <800000000>,
+ <800000000>, <800000000>, <0>;
+ power-domains = <&pgc_gpu>;
+ };
+
usb_dwc3_0: usb@38100000 {
compatible = "fsl,imx8mq-dwc3", "snps,dwc3";
reg = <0x38100000 0x10000>;
@@ -658,6 +903,66 @@
status = "disabled";
};
+
+ pcie0: pcie@33800000 {
+ compatible = "fsl,imx8mq-pcie";
+ reg = <0x33800000 0x400000>,
+ <0x1ff00000 0x80000>;
+ reg-names = "dbi", "config";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ bus-range = <0x00 0xff>;
+ ranges = <0x81000000 0 0x00000000 0x1ff80000 0 0x00010000 /* downstream I/O 64KB */
+ 0x82000000 0 0x18000000 0x18000000 0 0x07f00000>; /* non-prefetchable memory */
+ num-lanes = <1>;
+ num-viewport = <4>;
+ interrupts = <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &gic GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &gic GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &gic GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &gic GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
+ fsl,max-link-speed = <2>;
+ power-domains = <&pgc_pcie>;
+ resets = <&src IMX8MQ_RESET_PCIEPHY>,
+ <&src IMX8MQ_RESET_PCIE_CTRL_APPS_EN>,
+ <&src IMX8MQ_RESET_PCIE_CTRL_APPS_TURNOFF>;
+ reset-names = "pciephy", "apps", "turnoff";
+ status = "disabled";
+ };
+
+ pcie1: pcie@33c00000 {
+ compatible = "fsl,imx8mq-pcie";
+ reg = <0x33c00000 0x400000>,
+ <0x27f00000 0x80000>;
+ reg-names = "dbi", "config";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ ranges = <0x81000000 0 0x00000000 0x27f80000 0 0x00010000 /* downstream I/O 64KB */
+ 0x82000000 0 0x20000000 0x20000000 0 0x07f00000>; /* non-prefetchable memory */
+ num-lanes = <1>;
+ num-viewport = <4>;
+ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &gic GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &gic GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &gic GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &gic GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+ fsl,max-link-speed = <2>;
+ power-domains = <&pgc_pcie>;
+ resets = <&src IMX8MQ_RESET_PCIEPHY2>,
+ <&src IMX8MQ_RESET_PCIE2_CTRL_APPS_EN>,
+ <&src IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF>;
+ reset-names = "pciephy", "apps", "turnoff";
+ status = "disabled";
+ };
+
gic: interrupt-controller@38800000 {
compatible = "arm,gic-v3";
reg = <0x38800000 0x10000>, /* GIC Dist */
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
index 03aad66545c5..bfdada2db176 100644
--- a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
+++ b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
@@ -60,6 +60,82 @@
};
};
+&adma_i2c1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpi2c1 &pinctrl_ioexp_rst>;
+ status = "okay";
+
+ i2c-switch@71 {
+ compatible = "nxp,pca9646", "nxp,pca9546";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x71>;
+ reset-gpios = <&lsio_gpio1 1 GPIO_ACTIVE_LOW>;
+
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ max7322: gpio@68 {
+ compatible = "maxim,max7322";
+ reg = <0x68>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ };
+
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+
+ pressure-sensor@60 {
+ compatible = "fsl,mpl3115";
+ reg = <0x60>;
+ };
+ };
+
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+
+ pca9557_a: gpio@1a {
+ compatible = "nxp,pca9557";
+ reg = <0x1a>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ pca9557_b: gpio@1d {
+ compatible = "nxp,pca9557";
+ reg = <0x1d>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ light-sensor@44 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_isl29023>;
+ compatible = "isil,isl29023";
+ reg = <0x44>;
+ interrupt-parent = <&lsio_gpio1>;
+ interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
+ };
+ };
+ };
+};
+
&usdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc1>;
@@ -100,6 +176,25 @@
>;
};
+ pinctrl_ioexp_rst: ioexp_rst_grp {
+ fsl,pins = <
+ IMX8QXP_SPI2_SDO_LSIO_GPIO1_IO01 0x06000021
+ >;
+ };
+
+ pinctrl_isl29023: isl29023grp {
+ fsl,pins = <
+ IMX8QXP_SPI2_SDI_LSIO_GPIO1_IO02 0x00000021
+ >;
+ };
+
+ pinctrl_lpi2c1: lpi2c1grp {
+ fsl,pins = <
+ IMX8QXP_USB_SS3_TC1_ADMA_I2C1_SCL 0x06000021
+ IMX8QXP_USB_SS3_TC3_ADMA_I2C1_SDA 0x06000021
+ >;
+ };
+
pinctrl_lpuart0: lpuart0grp {
fsl,pins = <
IMX8QXP_UART0_RX_ADMA_UART0_RX 0x06000020
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp.dtsi b/arch/arm64/boot/dts/freescale/imx8qxp.dtsi
index 4c3dd95ed488..0683ee2a48ae 100644
--- a/arch/arm64/boot/dts/freescale/imx8qxp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8qxp.dtsi
@@ -21,6 +21,7 @@
mmc1 = &usdhc2;
mmc2 = &usdhc3;
serial0 = &adma_lpuart0;
+ mu1 = &lsio_mu1;
};
cpus {
@@ -34,6 +35,9 @@
reg = <0x0 0x0>;
enable-method = "psci";
next-level-cache = <&A35_L2>;
+ clocks = <&clk IMX_A35_CLK>;
+ operating-points-v2 = <&a35_opp_table>;
+ #cooling-cells = <2>;
};
A35_1: cpu@1 {
@@ -42,6 +46,9 @@
reg = <0x0 0x1>;
enable-method = "psci";
next-level-cache = <&A35_L2>;
+ clocks = <&clk IMX_A35_CLK>;
+ operating-points-v2 = <&a35_opp_table>;
+ #cooling-cells = <2>;
};
A35_2: cpu@2 {
@@ -50,6 +57,9 @@
reg = <0x0 0x2>;
enable-method = "psci";
next-level-cache = <&A35_L2>;
+ clocks = <&clk IMX_A35_CLK>;
+ operating-points-v2 = <&a35_opp_table>;
+ #cooling-cells = <2>;
};
A35_3: cpu@3 {
@@ -58,6 +68,9 @@
reg = <0x0 0x3>;
enable-method = "psci";
next-level-cache = <&A35_L2>;
+ clocks = <&clk IMX_A35_CLK>;
+ operating-points-v2 = <&a35_opp_table>;
+ #cooling-cells = <2>;
};
A35_L2: l2-cache0 {
@@ -65,6 +78,24 @@
};
};
+ a35_opp_table: opp-table {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-900000000 {
+ opp-hz = /bits/ 64 <900000000>;
+ opp-microvolt = <1000000>;
+ clock-latency-ns = <150000>;
+ };
+
+ opp-1200000000 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <1100000>;
+ clock-latency-ns = <150000>;
+ opp-suspend;
+ };
+ };
+
gic: interrupt-controller@51a00000 {
compatible = "arm,gic-v3";
reg = <0x0 0x51a00000 0 0x10000>, /* GIC Dist */
@@ -87,7 +118,8 @@
scu {
compatible = "fsl,imx-scu";
mbox-names = "tx0", "tx1", "tx2", "tx3",
- "rx0", "rx1", "rx2", "rx3";
+ "rx0", "rx1", "rx2", "rx3",
+ "gip3";
mboxes = <&lsio_mu1 0 0
&lsio_mu1 0 1
&lsio_mu1 0 2
@@ -95,7 +127,8 @@
&lsio_mu1 1 0
&lsio_mu1 1 1
&lsio_mu1 1 2
- &lsio_mu1 1 3>;
+ &lsio_mu1 1 3
+ &lsio_mu1 3 3>;
clk: clock-controller {
compatible = "fsl,imx8qxp-clk";
@@ -163,6 +196,39 @@
status = "disabled";
};
+ adma_lpuart1: serial@5a070000 {
+ compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart";
+ reg = <0x5a070000 0x1000>;
+ interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+ clocks = <&adma_lpcg IMX_ADMA_LPCG_UART1_BAUD_CLK>;
+ clock-names = "ipg";
+ power-domains = <&pd IMX_SC_R_UART_1>;
+ status = "disabled";
+ };
+
+ adma_lpuart2: serial@5a080000 {
+ compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart";
+ reg = <0x5a080000 0x1000>;
+ interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+ clocks = <&adma_lpcg IMX_ADMA_LPCG_UART2_BAUD_CLK>;
+ clock-names = "ipg";
+ power-domains = <&pd IMX_SC_R_UART_2>;
+ status = "disabled";
+ };
+
+ adma_lpuart3: serial@5a090000 {
+ compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart";
+ reg = <0x5a090000 0x1000>;
+ interrupts = <GIC_SPI 228 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+ clocks = <&adma_lpcg IMX_ADMA_LPCG_UART3_BAUD_CLK>;
+ clock-names = "ipg";
+ power-domains = <&pd IMX_SC_R_UART_3>;
+ status = "disabled";
+ };
+
adma_i2c0: i2c@5a800000 {
compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c";
reg = <0x5a800000 0x4000>;
@@ -328,7 +394,7 @@
compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
reg = <0x5d1b0000 0x10000>;
interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
- #mbox-cells = <0>;
+ #mbox-cells = <2>;
status = "disabled";
};
@@ -339,11 +405,19 @@
#mbox-cells = <2>;
};
+ lsio_mu2: mailbox@5d1d0000 {
+ compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
+ reg = <0x5d1d0000 0x10000>;
+ interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
lsio_mu3: mailbox@5d1e0000 {
compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
reg = <0x5d1e0000 0x10000>;
interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
- #mbox-cells = <0>;
+ #mbox-cells = <2>;
status = "disabled";
};
@@ -351,7 +425,7 @@
compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
reg = <0x5d1f0000 0x10000>;
interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
- #mbox-cells = <0>;
+ #mbox-cells = <2>;
status = "disabled";
};
@@ -443,4 +517,9 @@
power-domains = <&pd IMX_SC_R_GPIO_7>;
};
};
+
+ watchdog {
+ compatible = "fsl,imx8qxp-sc-wdt", "fsl,imx-sc-wdt";
+ timeout-sec = <60>;
+ };
};
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
index 2f19e0e5b7cf..aa6a8ad31be2 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi
@@ -478,6 +478,8 @@
compatible = "arm,pl011", "arm,primecell";
reg = <0x0 0xfdf00000 0x0 0x1000>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+ dma-names = "rx", "tx";
+ dmas = <&dma0 2 &dma0 3>;
clocks = <&crg_ctrl HI3660_CLK_GATE_UART1>,
<&crg_ctrl HI3660_CLK_GATE_UART1>;
clock-names = "uartclk", "apb_pclk";
@@ -490,6 +492,8 @@
compatible = "arm,pl011", "arm,primecell";
reg = <0x0 0xfdf03000 0x0 0x1000>;
interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+ dma-names = "rx", "tx";
+ dmas = <&dma0 4 &dma0 5>;
clocks = <&crg_ctrl HI3660_CLK_GATE_UART2>,
<&crg_ctrl HI3660_PCLK>;
clock-names = "uartclk", "apb_pclk";
@@ -514,6 +518,8 @@
compatible = "arm,pl011", "arm,primecell";
reg = <0x0 0xfdf01000 0x0 0x1000>;
interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+ dma-names = "rx", "tx";
+ dmas = <&dma0 6 &dma0 7>;
clocks = <&crg_ctrl HI3660_CLK_GATE_UART4>,
<&crg_ctrl HI3660_CLK_GATE_UART4>;
clock-names = "uartclk", "apb_pclk";
@@ -526,6 +532,8 @@
compatible = "arm,pl011", "arm,primecell";
reg = <0x0 0xfdf05000 0x0 0x1000>;
interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+ dma-names = "rx", "tx";
+ dmas = <&dma0 8 &dma0 9>;
clocks = <&crg_ctrl HI3660_CLK_GATE_UART5>,
<&crg_ctrl HI3660_CLK_GATE_UART5>;
clock-names = "uartclk", "apb_pclk";
@@ -552,13 +560,23 @@
#dma-cells = <1>;
dma-channels = <16>;
dma-requests = <32>;
- dma-min-chan = <1>;
+ dma-channel-mask = <0xfffe>;
interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&crg_ctrl HI3660_CLK_GATE_DMAC>;
dma-no-cci;
dma-type = "hi3660_dma";
};
+ asp_dmac: dma-controller@e804b000 {
+ compatible = "hisilicon,hisi-pcm-asp-dma-1.0";
+ reg = <0x0 0xe804b000 0x0 0x1000>;
+ #dma-cells = <1>;
+ dma-channels = <16>;
+ dma-requests = <32>;
+ interrupts = <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "asp_dma_irq";
+ };
+
rtc0: rtc@fff04000 {
compatible = "arm,pl031", "arm,primecell";
reg = <0x0 0Xfff04000 0x0 0x1000>;
diff --git a/arch/arm64/boot/dts/hisilicon/hi3670-hikey970.dts b/arch/arm64/boot/dts/hisilicon/hi3670-hikey970.dts
index c9775b66629f..7dac33d4fd5c 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3670-hikey970.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi3670-hikey970.dts
@@ -8,6 +8,7 @@
*/
/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
#include "hi3670.dtsi"
#include "hikey970-pinctrl.dtsi"
@@ -17,6 +18,8 @@
compatible = "hisilicon,hi3670-hikey970", "hisilicon,hi3670";
aliases {
+ mshc1 = &dwmmc1;
+ mshc2 = &dwmmc2;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
@@ -35,6 +38,37 @@
/* expect bootloader to fill in this region */
reg = <0x0 0x0 0x0 0x0>;
};
+
+ sd_1v8: regulator-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-1.8V";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ sd_3v3: regulator-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-3.3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ wlan_en: wlan-en-1-8v {
+ compatible = "regulator-fixed";
+ regulator-name = "wlan-en-regulator";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ /* GPIO_051_WIFI_EN */
+ gpio = <&gpio6 3 0>;
+
+ /* WLAN card specific delay */
+ startup-delay-us = <70000>;
+ enable-active-high;
+ };
};
/*
@@ -354,6 +388,47 @@
"GPIO_231_HDMI_INT";
};
+&dwmmc1 {
+ bus-width = <0x4>;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ cap-sd-highspeed;
+ disable-wp;
+ cd-inverted;
+ cd-gpios = <&gpio25 5 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sd_pmx_func
+ &sd_clk_cfg_func
+ &sd_cfg_func>;
+ vmmc-supply = <&sd_3v3>;
+ vqmmc-supply = <&sd_1v8>;
+ status = "okay";
+};
+
+&dwmmc2 { /* WIFI */
+ bus-width = <0x4>;
+ non-removable;
+ broken-cd;
+ cap-power-off-card;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdio_pmx_func
+ &sdio_clk_cfg_func
+ &sdio_cfg_func>;
+ /* WL_EN */
+ vmmc-supply = <&wlan_en>;
+ status = "ok";
+
+ wlcore: wlcore@2 {
+ compatible = "ti,wl1837";
+ reg = <2>; /* sdio func num */
+ /* WL_IRQ, GPIO_177_WL_WAKEUP_AP */
+ interrupt-parent = <&gpio22>;
+ interrupts = <1 IRQ_TYPE_EDGE_RISING>;
+ };
+};
+
&uart0 {
/* On High speed expansion header */
label = "HS-UART0";
diff --git a/arch/arm64/boot/dts/hisilicon/hi3670.dtsi b/arch/arm64/boot/dts/hisilicon/hi3670.dtsi
index 2ed06e4588b8..2dcffa3ed218 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3670.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi3670.dtsi
@@ -151,6 +151,13 @@
#clock-cells = <1>;
};
+ crg_rst: crg_rst_controller {
+ compatible = "hisilicon,hi3670-reset",
+ "hisilicon,hi3660-reset";
+ #reset-cells = <2>;
+ hisi,rst-syscon = <&crg_ctrl>;
+ };
+
pctrl: pctrl@e8a09000 {
compatible = "hisilicon,hi3670-pctrl", "syscon";
reg = <0x0 0xe8a09000 0x0 0x1000>;
@@ -647,5 +654,60 @@
clocks = <&sctrl HI3670_PCLK_AO_GPIO6>;
clock-names = "apb_pclk";
};
+
+ /* UFS */
+ ufs: ufs@ff3c0000 {
+ compatible = "hisilicon,hi3670-ufs", "jedec,ufs-2.1";
+ /* 0: HCI standard */
+ /* 1: UFS SYS CTRL */
+ reg = <0x0 0xff3c0000 0x0 0x1000>,
+ <0x0 0xff3e0000 0x0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3670_CLK_GATE_UFSIO_REF>,
+ <&crg_ctrl HI3670_CLK_GATE_UFS_SUBSYS>;
+ clock-names = "ref_clk", "phy_clk";
+ freq-table-hz = <0 0>, <0 0>;
+ /* offset: 0x84; bit: 12 */
+ resets = <&crg_rst 0x84 12>;
+ reset-names = "rst";
+ };
+
+ /* SD */
+ dwmmc1: dwmmc1@ff37f000 {
+ compatible = "hisilicon,hi3670-dw-mshc",
+ "hisilicon,hi3660-dw-mshc";
+ reg = <0x0 0xff37f000 0x0 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3670_CLK_GATE_SD>,
+ <&crg_ctrl HI3670_HCLK_GATE_SD>;
+ clock-names = "ciu", "biu";
+ clock-frequency = <3200000>;
+ resets = <&crg_rst 0x94 18>;
+ reset-names = "reset";
+ hisilicon,peripheral-syscon = <&sctrl>;
+ card-detect-delay = <200>;
+ status = "disabled";
+ };
+
+ /* SDIO */
+ dwmmc2: dwmmc2@fc183000 {
+ compatible = "hisilicon,hi3670-dw-mshc",
+ "hisilicon,hi3660-dw-mshc";
+ reg = <0x0 0xfc183000 0x0 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&crg_ctrl HI3670_CLK_GATE_SDIO>,
+ <&crg_ctrl HI3670_HCLK_GATE_SDIO>;
+ clock-names = "ciu", "biu";
+ clock-frequency = <3200000>;
+ resets = <&crg_rst 0x94 20>;
+ reset-names = "reset";
+ card-detect-delay = <200>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm64/boot/dts/hisilicon/hikey970-pinctrl.dtsi b/arch/arm64/boot/dts/hisilicon/hikey970-pinctrl.dtsi
index 67bb52d43619..d456b0aa6f58 100644
--- a/arch/arm64/boot/dts/hisilicon/hikey970-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hikey970-pinctrl.dtsi
@@ -196,6 +196,16 @@
/* pin base, nr pins & gpio function */
pinctrl-single,gpio-range = <&range 0 10 0>;
+ sdio_pmx_func: sdio_pmx_func {
+ pinctrl-single,pins = <
+ 0x000 MUX_M1 /* SDIO_CLK */
+ 0x004 MUX_M1 /* SDIO_CMD */
+ 0x008 MUX_M1 /* SDIO_DATA0 */
+ 0x00c MUX_M1 /* SDIO_DATA1 */
+ 0x010 MUX_M1 /* SDIO_DATA2 */
+ 0x014 MUX_M1 /* SDIO_DATA3 */
+ >;
+ };
};
pmx6: pinmux@fc182800 {
@@ -203,6 +213,52 @@
reg = <0x0 0xfc182800 0x0 0x028>;
#pinctrl-cells = <1>;
pinctrl-single,register-width = <0x20>;
+
+ sdio_clk_cfg_func: sdio_clk_cfg_func {
+ pinctrl-single,pins = <
+ 0x000 0x0 /* SDIO_CLK */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE6_32MA DRIVE6_MASK
+ >;
+ };
+
+ sdio_cfg_func: sdio_cfg_func {
+ pinctrl-single,pins = <
+ 0x004 0x0 /* SDIO_CMD */
+ 0x008 0x0 /* SDIO_DATA0 */
+ 0x00c 0x0 /* SDIO_DATA1 */
+ 0x010 0x0 /* SDIO_DATA2 */
+ 0x014 0x0 /* SDIO_DATA3 */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_UP
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE6_19MA DRIVE6_MASK
+ >;
+ };
};
pmx7: pinmux@ff37e000 {
@@ -214,6 +270,17 @@
pinctrl-single,function-mask = <7>;
/* pin base, nr pins & gpio function */
pinctrl-single,gpio-range = <&range 0 12 0>;
+
+ sd_pmx_func: sd_pmx_func {
+ pinctrl-single,pins = <
+ 0x000 MUX_M1 /* SD_CLK */
+ 0x004 MUX_M1 /* SD_CMD */
+ 0x008 MUX_M1 /* SD_DATA0 */
+ 0x00c MUX_M1 /* SD_DATA1 */
+ 0x010 MUX_M1 /* SD_DATA2 */
+ 0x014 MUX_M1 /* SD_DATA3 */
+ >;
+ };
};
pmx8: pinmux@ff37e800 {
@@ -221,6 +288,54 @@
reg = <0x0 0xff37e800 0x0 0x030>;
#pinctrl-cells = <1>;
pinctrl-single,register-width = <0x20>;
+
+ sd_clk_cfg_func: sd_clk_cfg_func {
+ pinctrl-single,pins = <
+ 0x000 0x0 /* SD_CLK */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_DIS
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE6_32MA
+ DRIVE6_MASK
+ >;
+ };
+
+ sd_cfg_func: sd_cfg_func {
+ pinctrl-single,pins = <
+ 0x004 0x0 /* SD_CMD */
+ 0x008 0x0 /* SD_DATA0 */
+ 0x00c 0x0 /* SD_DATA1 */
+ 0x010 0x0 /* SD_DATA2 */
+ 0x014 0x0 /* SD_DATA3 */
+ >;
+ pinctrl-single,bias-pulldown = <
+ PULL_DIS
+ PULL_DOWN
+ PULL_DIS
+ PULL_DOWN
+ >;
+ pinctrl-single,bias-pullup = <
+ PULL_UP
+ PULL_UP
+ PULL_DIS
+ PULL_UP
+ >;
+ pinctrl-single,drive-strength = <
+ DRIVE6_19MA
+ DRIVE6_MASK
+ >;
+ };
};
pmx1: pinmux@fff11000 {
diff --git a/arch/arm64/boot/dts/intel/Makefile b/arch/arm64/boot/dts/intel/Makefile
new file mode 100644
index 000000000000..9606ac85ac70
--- /dev/null
+++ b/arch/arm64/boot/dts/intel/Makefile
@@ -0,0 +1 @@
+dtb-$(CONFIG_ARCH_AGILEX) += socfpga_agilex_socdk.dtb
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
new file mode 100644
index 000000000000..e4ceb3a73c81
--- /dev/null
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019, Intel Corporation
+ */
+
+/dts-v1/;
+#include <dt-bindings/reset/altr,rst-mgr-s10.h>
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ compatible = "intel,socfpga-agilex";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ compatible = "arm,cortex-a53";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0x0>;
+ };
+
+ cpu1: cpu@1 {
+ compatible = "arm,cortex-a53";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0x1>;
+ };
+
+ cpu2: cpu@2 {
+ compatible = "arm,cortex-a53";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0x2>;
+ };
+
+ cpu3: cpu@3 {
+ compatible = "arm,cortex-a53";
+ device_type = "cpu";
+ enable-method = "psci";
+ reg = <0x3>;
+ };
+ };
+
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <0 120 8>,
+ <0 121 8>,
+ <0 122 8>,
+ <0 123 8>;
+ interrupt-affinity = <&cpu0>,
+ <&cpu1>,
+ <&cpu2>,
+ <&cpu3>;
+ interrupt-parent = <&intc>;
+ };
+
+ psci {
+ compatible = "arm,psci-0.2";
+ method = "smc";
+ };
+
+ intc: intc@fffc1000 {
+ compatible = "arm,gic-400", "arm,cortex-a15-gic";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x0 0xfffc1000 0x0 0x1000>,
+ <0x0 0xfffc2000 0x0 0x2000>,
+ <0x0 0xfffc4000 0x0 0x2000>,
+ <0x0 0xfffc6000 0x0 0x2000>;
+ };
+
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ device_type = "soc";
+ interrupt-parent = <&intc>;
+ ranges = <0 0 0 0xffffffff>;
+
+ gmac0: ethernet@ff800000 {
+ compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac";
+ reg = <0xff800000 0x2000>;
+ interrupts = <0 90 4>;
+ interrupt-names = "macirq";
+ mac-address = [00 00 00 00 00 00];
+ resets = <&rst EMAC0_RESET>, <&rst EMAC0_OCP_RESET>;
+ reset-names = "stmmaceth", "stmmaceth-ocp";
+ tx-fifo-depth = <16384>;
+ rx-fifo-depth = <16384>;
+ snps,multicast-filter-bins = <256>;
+ iommus = <&smmu 1>;
+ status = "disabled";
+ };
+
+ gmac1: ethernet@ff802000 {
+ compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac";
+ reg = <0xff802000 0x2000>;
+ interrupts = <0 91 4>;
+ interrupt-names = "macirq";
+ mac-address = [00 00 00 00 00 00];
+ resets = <&rst EMAC1_RESET>, <&rst EMAC1_OCP_RESET>;
+ reset-names = "stmmaceth", "stmmaceth-ocp";
+ tx-fifo-depth = <16384>;
+ rx-fifo-depth = <16384>;
+ snps,multicast-filter-bins = <256>;
+ iommus = <&smmu 2>;
+ status = "disabled";
+ };
+
+ gmac2: ethernet@ff804000 {
+ compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac";
+ reg = <0xff804000 0x2000>;
+ interrupts = <0 92 4>;
+ interrupt-names = "macirq";
+ mac-address = [00 00 00 00 00 00];
+ resets = <&rst EMAC2_RESET>, <&rst EMAC2_OCP_RESET>;
+ reset-names = "stmmaceth", "stmmaceth-ocp";
+ tx-fifo-depth = <16384>;
+ rx-fifo-depth = <16384>;
+ snps,multicast-filter-bins = <256>;
+ iommus = <&smmu 3>;
+ status = "disabled";
+ };
+
+ gpio0: gpio@ffc03200 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dw-apb-gpio";
+ reg = <0xffc03200 0x100>;
+ resets = <&rst GPIO0_RESET>;
+ status = "disabled";
+
+ porta: gpio-controller@0 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <24>;
+ reg = <0>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <0 110 4>;
+ };
+ };
+
+ gpio1: gpio@ffc03300 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dw-apb-gpio";
+ reg = <0xffc03300 0x100>;
+ resets = <&rst GPIO1_RESET>;
+ status = "disabled";
+
+ portb: gpio-controller@0 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <24>;
+ reg = <0>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <0 111 4>;
+ };
+ };
+
+ i2c0: i2c@ffc02800 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0xffc02800 0x100>;
+ interrupts = <0 103 4>;
+ resets = <&rst I2C0_RESET>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@ffc02900 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0xffc02900 0x100>;
+ interrupts = <0 104 4>;
+ resets = <&rst I2C1_RESET>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@ffc02a00 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0xffc02a00 0x100>;
+ interrupts = <0 105 4>;
+ resets = <&rst I2C2_RESET>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@ffc02b00 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0xffc02b00 0x100>;
+ interrupts = <0 106 4>;
+ resets = <&rst I2C3_RESET>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@ffc02c00 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0xffc02c00 0x100>;
+ interrupts = <0 107 4>;
+ resets = <&rst I2C4_RESET>;
+ status = "disabled";
+ };
+
+ mmc: dwmmc0@ff808000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "altr,socfpga-dw-mshc";
+ reg = <0xff808000 0x1000>;
+ interrupts = <0 96 4>;
+ fifo-depth = <0x400>;
+ resets = <&rst SDMMC_RESET>;
+ reset-names = "reset";
+ iommus = <&smmu 5>;
+ status = "disabled";
+ };
+
+ ocram: sram@ffe00000 {
+ compatible = "mmio-sram";
+ reg = <0xffe00000 0x40000>;
+ };
+
+ pdma: pdma@ffda0000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0xffda0000 0x1000>;
+ interrupts = <0 81 4>,
+ <0 82 4>,
+ <0 83 4>,
+ <0 84 4>,
+ <0 85 4>,
+ <0 86 4>,
+ <0 87 4>,
+ <0 88 4>,
+ <0 89 4>;
+ #dma-cells = <1>;
+ #dma-channels = <8>;
+ #dma-requests = <32>;
+ };
+
+ rst: rstmgr@ffd11000 {
+ #reset-cells = <1>;
+ compatible = "altr,stratix10-rst-mgr";
+ reg = <0xffd11000 0x100>;
+ };
+
+ smmu: iommu@fa000000 {
+ compatible = "arm,mmu-500", "arm,smmu-v2";
+ reg = <0xfa000000 0x40000>;
+ #global-interrupts = <2>;
+ #iommu-cells = <1>;
+ interrupt-parent = <&intc>;
+ interrupts = <0 128 4>, /* Global Secure Fault */
+ <0 129 4>, /* Global Non-secure Fault */
+ /* Non-secure Context Interrupts (32) */
+ <0 138 4>, <0 139 4>, <0 140 4>, <0 141 4>,
+ <0 142 4>, <0 143 4>, <0 144 4>, <0 145 4>,
+ <0 146 4>, <0 147 4>, <0 148 4>, <0 149 4>,
+ <0 150 4>, <0 151 4>, <0 152 4>, <0 153 4>,
+ <0 154 4>, <0 155 4>, <0 156 4>, <0 157 4>,
+ <0 158 4>, <0 159 4>, <0 160 4>, <0 161 4>,
+ <0 162 4>, <0 163 4>, <0 164 4>, <0 165 4>,
+ <0 166 4>, <0 167 4>, <0 168 4>, <0 169 4>;
+ stream-match-mask = <0x7ff0>;
+ status = "disabled";
+ };
+
+ spi0: spi@ffda4000 {
+ compatible = "snps,dw-apb-ssi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xffda4000 0x1000>;
+ interrupts = <0 99 4>;
+ resets = <&rst SPIM0_RESET>;
+ reg-io-width = <4>;
+ num-cs = <4>;
+ status = "disabled";
+ };
+
+ spi1: spi@ffda5000 {
+ compatible = "snps,dw-apb-ssi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xffda5000 0x1000>;
+ interrupts = <0 100 4>;
+ resets = <&rst SPIM1_RESET>;
+ reg-io-width = <4>;
+ num-cs = <4>;
+ status = "disabled";
+ };
+
+ sysmgr: sysmgr@ffd12000 {
+ compatible = "altr,sys-mgr", "syscon";
+ reg = <0xffd12000 0x500>;
+ };
+
+ /* Local timer */
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <1 13 0xf08>,
+ <1 14 0xf08>,
+ <1 11 0xf08>,
+ <1 10 0xf08>;
+ };
+
+ timer0: timer0@ffc03000 {
+ compatible = "snps,dw-apb-timer";
+ interrupts = <0 113 4>;
+ reg = <0xffc03000 0x100>;
+ };
+
+ timer1: timer1@ffc03100 {
+ compatible = "snps,dw-apb-timer";
+ interrupts = <0 114 4>;
+ reg = <0xffc03100 0x100>;
+ };
+
+ timer2: timer2@ffd00000 {
+ compatible = "snps,dw-apb-timer";
+ interrupts = <0 115 4>;
+ reg = <0xffd00000 0x100>;
+ };
+
+ timer3: timer3@ffd00100 {
+ compatible = "snps,dw-apb-timer";
+ interrupts = <0 116 4>;
+ reg = <0xffd00100 0x100>;
+ };
+
+ uart0: serial0@ffc02000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0xffc02000 0x100>;
+ interrupts = <0 108 4>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ resets = <&rst UART0_RESET>;
+ status = "disabled";
+ };
+
+ uart1: serial1@ffc02100 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0xffc02100 0x100>;
+ interrupts = <0 109 4>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ resets = <&rst UART1_RESET>;
+ status = "disabled";
+ };
+
+ usbphy0: usbphy@0 {
+ #phy-cells = <0>;
+ compatible = "usb-nop-xceiv";
+ status = "okay";
+ };
+
+ usb0: usb@ffb00000 {
+ compatible = "snps,dwc2";
+ reg = <0xffb00000 0x40000>;
+ interrupts = <0 93 4>;
+ phys = <&usbphy0>;
+ phy-names = "usb2-phy";
+ resets = <&rst USB0_RESET>, <&rst USB0_OCP_RESET>;
+ reset-names = "dwc2", "dwc2-ecc";
+ iommus = <&smmu 6>;
+ status = "disabled";
+ };
+
+ usb1: usb@ffb40000 {
+ compatible = "snps,dwc2";
+ reg = <0xffb40000 0x40000>;
+ interrupts = <0 94 4>;
+ phys = <&usbphy0>;
+ phy-names = "usb2-phy";
+ resets = <&rst USB1_RESET>, <&rst USB1_OCP_RESET>;
+ reset-names = "dwc2", "dwc2-ecc";
+ iommus = <&smmu 7>;
+ status = "disabled";
+ };
+
+ watchdog0: watchdog@ffd00200 {
+ compatible = "snps,dw-wdt";
+ reg = <0xffd00200 0x100>;
+ interrupts = <0 117 4>;
+ resets = <&rst WATCHDOG0_RESET>;
+ status = "disabled";
+ };
+
+ watchdog1: watchdog@ffd00300 {
+ compatible = "snps,dw-wdt";
+ reg = <0xffd00300 0x100>;
+ interrupts = <0 118 4>;
+ resets = <&rst WATCHDOG1_RESET>;
+ status = "disabled";
+ };
+
+ watchdog2: watchdog@ffd00400 {
+ compatible = "snps,dw-wdt";
+ reg = <0xffd00400 0x100>;
+ interrupts = <0 125 4>;
+ resets = <&rst WATCHDOG2_RESET>;
+ status = "disabled";
+ };
+
+ watchdog3: watchdog@ffd00500 {
+ compatible = "snps,dw-wdt";
+ reg = <0xffd00500 0x100>;
+ interrupts = <0 126 4>;
+ resets = <&rst WATCHDOG3_RESET>;
+ status = "disabled";
+ };
+
+ sdr: sdr@f8011100 {
+ compatible = "altr,sdr-ctl", "syscon";
+ reg = <0xf8011100 0xc0>;
+ };
+
+ qspi: spi@ff8d2000 {
+ compatible = "cdns,qspi-nor";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xff8d2000 0x100>,
+ <0xff900000 0x100000>;
+ interrupts = <0 3 4>;
+ cdns,fifo-depth = <128>;
+ cdns,fifo-width = <4>;
+ cdns,trigger-address = <0x00000000>;
+
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts
new file mode 100644
index 000000000000..7814a9e8eb08
--- /dev/null
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019, Intel Corporation
+ */
+#include "socfpga_agilex.dtsi"
+
+/ {
+ model = "SoCFPGA Agilex SoCDK";
+
+ aliases {
+ serial0 = &uart0;
+ ethernet0 = &gmac0;
+ ethernet1 = &gmac1;
+ ethernet2 = &gmac2;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the reg */
+ reg = <0 0 0 0>;
+ };
+};
+
+&gpio1 {
+ status = "okay";
+};
+
+&gmac0 {
+ status = "okay";
+ phy-mode = "rgmii";
+ phy-handle = <&phy0>;
+
+ max-frame-size = <9000>;
+
+ mdio0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+ phy0: ethernet-phy@0 {
+ reg = <4>;
+
+ txd0-skew-ps = <0>; /* -420ps */
+ txd1-skew-ps = <0>; /* -420ps */
+ txd2-skew-ps = <0>; /* -420ps */
+ txd3-skew-ps = <0>; /* -420ps */
+ rxd0-skew-ps = <420>; /* 0ps */
+ rxd1-skew-ps = <420>; /* 0ps */
+ rxd2-skew-ps = <420>; /* 0ps */
+ rxd3-skew-ps = <420>; /* 0ps */
+ txen-skew-ps = <0>; /* -420ps */
+ txc-skew-ps = <900>; /* 0ps */
+ rxdv-skew-ps = <420>; /* 0ps */
+ rxc-skew-ps = <1680>; /* 780ps */
+ };
+ };
+};
+
+&mmc {
+ status = "okay";
+ cap-sd-highspeed;
+ broken-cd;
+ bus-width = <4>;
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&watchdog0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
index 2468762283a5..9143aa13ceb1 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
+++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
@@ -226,6 +226,11 @@
marvell,function = "gpio";
};
+ cp0_wlan_disable_pins: wlan-disable-pins {
+ marvell,pins = "mpp51";
+ marvell,function = "gpio";
+ };
+
cp0_sdhci_pins: sdhci-pins {
marvell,pins = "mpp55", "mpp56", "mpp57", "mpp58", "mpp59",
"mpp60", "mpp61";
@@ -235,7 +240,7 @@
&cp0_pcie0 {
pinctrl-names = "default";
- pinctrl-0 = <&cp0_pci0_reset_pins>;
+ pinctrl-0 = <&cp0_pci0_reset_pins &cp0_wlan_disable_pins>;
reset-gpios = <&cp0_gpio2 0 GPIO_ACTIVE_LOW>;
status = "okay";
};
@@ -253,6 +258,12 @@
output-low;
};
+ wlan_disable {
+ gpio-hog;
+ gpios = <19 GPIO_ACTIVE_LOW>;
+ output-low;
+ };
+
lte_disable {
gpio-hog;
gpios = <21 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm64/boot/dts/mediatek/mt2712-pinfunc.h b/arch/arm64/boot/dts/mediatek/mt2712-pinfunc.h
index 1b4cb0c55744..385c455a7c98 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712-pinfunc.h
+++ b/arch/arm64/boot/dts/mediatek/mt2712-pinfunc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2018 MediaTek Inc.
* Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
index 976d92a94738..43307bad3f0d 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
@@ -819,7 +819,6 @@
#size-cells = <2>;
#interrupt-cells = <1>;
ranges;
- num-lanes = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pcie_intc0 0>,
<0 0 0 2 &pcie_intc0 1>,
@@ -840,7 +839,6 @@
#size-cells = <2>;
#interrupt-cells = <1>;
ranges;
- num-lanes = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pcie_intc1 0>,
<0 0 0 2 &pcie_intc1 1>,
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index c3c360161c5d..15f1842f6df3 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -178,12 +178,12 @@
cpu2: cpu@100 {
device_type = "cpu";
- compatible = "arm,cortex-a57";
+ compatible = "arm,cortex-a72";
reg = <0x100>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
#cooling-cells = <2>;
- clocks = <&infracfg CLK_INFRA_CA57SEL>,
+ clocks = <&infracfg CLK_INFRA_CA72SEL>,
<&apmixedsys CLK_APMIXED_MAINPLL>;
clock-names = "cpu", "intermediate";
operating-points-v2 = <&cluster1_opp>;
@@ -191,12 +191,12 @@
cpu3: cpu@101 {
device_type = "cpu";
- compatible = "arm,cortex-a57";
+ compatible = "arm,cortex-a72";
reg = <0x101>;
enable-method = "psci";
cpu-idle-states = <&CPU_SLEEP_0>;
#cooling-cells = <2>;
- clocks = <&infracfg CLK_INFRA_CA57SEL>,
+ clocks = <&infracfg CLK_INFRA_CA72SEL>,
<&apmixedsys CLK_APMIXED_MAINPLL>;
clock-names = "cpu", "intermediate";
operating-points-v2 = <&cluster1_opp>;
@@ -216,6 +216,20 @@
};
};
+ pmu_a53 {
+ compatible = "arm,cortex-a53-pmu";
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 9 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-affinity = <&cpu0>, <&cpu1>;
+ };
+
+ pmu_a72 {
+ compatible = "arm,cortex-a72-pmu";
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 13 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-affinity = <&cpu2>, <&cpu3>;
+ };
+
psci {
compatible = "arm,psci-1.0", "arm,psci-0.2", "arm,psci";
method = "smc";
@@ -1307,6 +1321,15 @@
"vencpll",
"venc_lt_sel",
"vdec_bus_clk_src";
+ assigned-clocks = <&topckgen CLK_TOP_VENC_LT_SEL>,
+ <&topckgen CLK_TOP_CCI400_SEL>,
+ <&topckgen CLK_TOP_VDEC_SEL>,
+ <&apmixedsys CLK_APMIXED_VCODECPLL>,
+ <&apmixedsys CLK_APMIXED_VENCPLL>;
+ assigned-clock-parents = <&topckgen CLK_TOP_VCODECPLL_370P5>,
+ <&topckgen CLK_TOP_UNIVPLL_D2>,
+ <&topckgen CLK_TOP_VCODECPLL>;
+ assigned-clock-rates = <0>, <0>, <0>, <1482000000>, <800000000>;
};
larb1: larb@16010000 {
@@ -1372,6 +1395,10 @@
"venc_sel",
"venc_lt_sel_src",
"venc_lt_sel";
+ assigned-clocks = <&topckgen CLK_TOP_VENC_SEL>,
+ <&topckgen CLK_TOP_VENC_LT_SEL>;
+ assigned-clock-parents = <&topckgen CLK_TOP_VENCPLL_D2>,
+ <&topckgen CLK_TOP_UNIVPLL1_D2>;
};
vencltsys: clock-controller@19000000 {
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-pinfunc.h b/arch/arm64/boot/dts/mediatek/mt8183-pinfunc.h
new file mode 100644
index 000000000000..6221cd712718
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-pinfunc.h
@@ -0,0 +1,1120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ * Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
+ *
+ */
+
+#ifndef __MT8183_PINFUNC_H
+#define __MT8183_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_MRG_SYNC (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_PCM0_SYNC (MTK_PIN_NO(0) | 2)
+#define PINMUX_GPIO0__FUNC_TP_GPIO0_AO (MTK_PIN_NO(0) | 3)
+#define PINMUX_GPIO0__FUNC_SRCLKENAI0 (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_SCP_SPI2_CS (MTK_PIN_NO(0) | 5)
+#define PINMUX_GPIO0__FUNC_I2S3_MCK (MTK_PIN_NO(0) | 6)
+#define PINMUX_GPIO0__FUNC_SPI2_CSB (MTK_PIN_NO(0) | 7)
+
+#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_MRG_CLK (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_PCM0_CLK (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_TP_GPIO1_AO (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_CLKM3 (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_SCP_SPI2_MO (MTK_PIN_NO(1) | 5)
+#define PINMUX_GPIO1__FUNC_I2S3_BCK (MTK_PIN_NO(1) | 6)
+#define PINMUX_GPIO1__FUNC_SPI2_MO (MTK_PIN_NO(1) | 7)
+
+#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_MRG_DO (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_PCM0_DO (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_TP_GPIO2_AO (MTK_PIN_NO(2) | 3)
+#define PINMUX_GPIO2__FUNC_SCL6 (MTK_PIN_NO(2) | 4)
+#define PINMUX_GPIO2__FUNC_SCP_SPI2_CK (MTK_PIN_NO(2) | 5)
+#define PINMUX_GPIO2__FUNC_I2S3_LRCK (MTK_PIN_NO(2) | 6)
+#define PINMUX_GPIO2__FUNC_SPI2_CLK (MTK_PIN_NO(2) | 7)
+
+#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_MRG_DI (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_PCM0_DI (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_TP_GPIO3_AO (MTK_PIN_NO(3) | 3)
+#define PINMUX_GPIO3__FUNC_SDA6 (MTK_PIN_NO(3) | 4)
+#define PINMUX_GPIO3__FUNC_TDM_MCK (MTK_PIN_NO(3) | 5)
+#define PINMUX_GPIO3__FUNC_I2S3_DO (MTK_PIN_NO(3) | 6)
+#define PINMUX_GPIO3__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(3) | 7)
+
+#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_PWM_B (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_I2S0_MCK (MTK_PIN_NO(4) | 2)
+#define PINMUX_GPIO4__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(4) | 3)
+#define PINMUX_GPIO4__FUNC_MD_URXD1 (MTK_PIN_NO(4) | 4)
+#define PINMUX_GPIO4__FUNC_TDM_BCK (MTK_PIN_NO(4) | 5)
+#define PINMUX_GPIO4__FUNC_TP_GPIO4_AO (MTK_PIN_NO(4) | 6)
+#define PINMUX_GPIO4__FUNC_DAP_MD32_SWD (MTK_PIN_NO(4) | 7)
+
+#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_PWM_C (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_I2S0_BCK (MTK_PIN_NO(5) | 2)
+#define PINMUX_GPIO5__FUNC_SSPM_URXD_AO (MTK_PIN_NO(5) | 3)
+#define PINMUX_GPIO5__FUNC_MD_UTXD1 (MTK_PIN_NO(5) | 4)
+#define PINMUX_GPIO5__FUNC_TDM_LRCK (MTK_PIN_NO(5) | 5)
+#define PINMUX_GPIO5__FUNC_TP_GPIO5_AO (MTK_PIN_NO(5) | 6)
+#define PINMUX_GPIO5__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(5) | 7)
+
+#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_PWM_A (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_I2S0_LRCK (MTK_PIN_NO(6) | 2)
+#define PINMUX_GPIO6__FUNC_IDDIG (MTK_PIN_NO(6) | 3)
+#define PINMUX_GPIO6__FUNC_MD_URXD0 (MTK_PIN_NO(6) | 4)
+#define PINMUX_GPIO6__FUNC_TDM_DATA0 (MTK_PIN_NO(6) | 5)
+#define PINMUX_GPIO6__FUNC_TP_GPIO6_AO (MTK_PIN_NO(6) | 6)
+#define PINMUX_GPIO6__FUNC_CMFLASH (MTK_PIN_NO(6) | 7)
+
+#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_SPI1_B_MI (MTK_PIN_NO(7) | 1)
+#define PINMUX_GPIO7__FUNC_I2S0_DI (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_USB_DRVVBUS (MTK_PIN_NO(7) | 3)
+#define PINMUX_GPIO7__FUNC_MD_UTXD0 (MTK_PIN_NO(7) | 4)
+#define PINMUX_GPIO7__FUNC_TDM_DATA1 (MTK_PIN_NO(7) | 5)
+#define PINMUX_GPIO7__FUNC_TP_GPIO7_AO (MTK_PIN_NO(7) | 6)
+#define PINMUX_GPIO7__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(7) | 7)
+
+#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_SPI1_B_CSB (MTK_PIN_NO(8) | 1)
+#define PINMUX_GPIO8__FUNC_ANT_SEL3 (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_SCL7 (MTK_PIN_NO(8) | 3)
+#define PINMUX_GPIO8__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(8) | 4)
+#define PINMUX_GPIO8__FUNC_TDM_DATA2 (MTK_PIN_NO(8) | 5)
+#define PINMUX_GPIO8__FUNC_MD_INT0 (MTK_PIN_NO(8) | 6)
+#define PINMUX_GPIO8__FUNC_JTRSTN_SEL1 (MTK_PIN_NO(8) | 7)
+
+#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_SPI1_B_MO (MTK_PIN_NO(9) | 1)
+#define PINMUX_GPIO9__FUNC_ANT_SEL4 (MTK_PIN_NO(9) | 2)
+#define PINMUX_GPIO9__FUNC_CMMCLK2 (MTK_PIN_NO(9) | 3)
+#define PINMUX_GPIO9__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(9) | 4)
+#define PINMUX_GPIO9__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(9) | 5)
+#define PINMUX_GPIO9__FUNC_IO_JTAG_TRSTN (MTK_PIN_NO(9) | 6)
+#define PINMUX_GPIO9__FUNC_DBG_MON_B10 (MTK_PIN_NO(9) | 7)
+
+#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_SPI1_B_CLK (MTK_PIN_NO(10) | 1)
+#define PINMUX_GPIO10__FUNC_ANT_SEL5 (MTK_PIN_NO(10) | 2)
+#define PINMUX_GPIO10__FUNC_CMMCLK3 (MTK_PIN_NO(10) | 3)
+#define PINMUX_GPIO10__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(10) | 4)
+#define PINMUX_GPIO10__FUNC_TDM_DATA3 (MTK_PIN_NO(10) | 5)
+#define PINMUX_GPIO10__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(10) | 6)
+#define PINMUX_GPIO10__FUNC_DBG_MON_B11 (MTK_PIN_NO(10) | 7)
+
+#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_TP_URXD1_AO (MTK_PIN_NO(11) | 1)
+#define PINMUX_GPIO11__FUNC_IDDIG (MTK_PIN_NO(11) | 2)
+#define PINMUX_GPIO11__FUNC_SCL6 (MTK_PIN_NO(11) | 3)
+#define PINMUX_GPIO11__FUNC_UCTS1 (MTK_PIN_NO(11) | 4)
+#define PINMUX_GPIO11__FUNC_UCTS0 (MTK_PIN_NO(11) | 5)
+#define PINMUX_GPIO11__FUNC_SRCLKENAI1 (MTK_PIN_NO(11) | 6)
+#define PINMUX_GPIO11__FUNC_I2S5_MCK (MTK_PIN_NO(11) | 7)
+
+#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_TP_UTXD1_AO (MTK_PIN_NO(12) | 1)
+#define PINMUX_GPIO12__FUNC_USB_DRVVBUS (MTK_PIN_NO(12) | 2)
+#define PINMUX_GPIO12__FUNC_SDA6 (MTK_PIN_NO(12) | 3)
+#define PINMUX_GPIO12__FUNC_URTS1 (MTK_PIN_NO(12) | 4)
+#define PINMUX_GPIO12__FUNC_URTS0 (MTK_PIN_NO(12) | 5)
+#define PINMUX_GPIO12__FUNC_I2S2_DI2 (MTK_PIN_NO(12) | 6)
+#define PINMUX_GPIO12__FUNC_I2S5_BCK (MTK_PIN_NO(12) | 7)
+
+#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_DBPI_D0 (MTK_PIN_NO(13) | 1)
+#define PINMUX_GPIO13__FUNC_SPI5_MI (MTK_PIN_NO(13) | 2)
+#define PINMUX_GPIO13__FUNC_PCM0_SYNC (MTK_PIN_NO(13) | 3)
+#define PINMUX_GPIO13__FUNC_MD_URXD0 (MTK_PIN_NO(13) | 4)
+#define PINMUX_GPIO13__FUNC_ANT_SEL3 (MTK_PIN_NO(13) | 5)
+#define PINMUX_GPIO13__FUNC_I2S0_MCK (MTK_PIN_NO(13) | 6)
+#define PINMUX_GPIO13__FUNC_DBG_MON_B15 (MTK_PIN_NO(13) | 7)
+
+#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_DBPI_D1 (MTK_PIN_NO(14) | 1)
+#define PINMUX_GPIO14__FUNC_SPI5_CSB (MTK_PIN_NO(14) | 2)
+#define PINMUX_GPIO14__FUNC_PCM0_CLK (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_MD_UTXD0 (MTK_PIN_NO(14) | 4)
+#define PINMUX_GPIO14__FUNC_ANT_SEL4 (MTK_PIN_NO(14) | 5)
+#define PINMUX_GPIO14__FUNC_I2S0_BCK (MTK_PIN_NO(14) | 6)
+#define PINMUX_GPIO14__FUNC_DBG_MON_B16 (MTK_PIN_NO(14) | 7)
+
+#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_DBPI_D2 (MTK_PIN_NO(15) | 1)
+#define PINMUX_GPIO15__FUNC_SPI5_MO (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_PCM0_DO (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_MD_URXD1 (MTK_PIN_NO(15) | 4)
+#define PINMUX_GPIO15__FUNC_ANT_SEL5 (MTK_PIN_NO(15) | 5)
+#define PINMUX_GPIO15__FUNC_I2S0_LRCK (MTK_PIN_NO(15) | 6)
+#define PINMUX_GPIO15__FUNC_DBG_MON_B17 (MTK_PIN_NO(15) | 7)
+
+#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_DBPI_D3 (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_SPI5_CLK (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_PCM0_DI (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_MD_UTXD1 (MTK_PIN_NO(16) | 4)
+#define PINMUX_GPIO16__FUNC_ANT_SEL6 (MTK_PIN_NO(16) | 5)
+#define PINMUX_GPIO16__FUNC_I2S0_DI (MTK_PIN_NO(16) | 6)
+#define PINMUX_GPIO16__FUNC_DBG_MON_B23 (MTK_PIN_NO(16) | 7)
+
+#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_DBPI_D4 (MTK_PIN_NO(17) | 1)
+#define PINMUX_GPIO17__FUNC_SPI4_MI (MTK_PIN_NO(17) | 2)
+#define PINMUX_GPIO17__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(17) | 3)
+#define PINMUX_GPIO17__FUNC_MD_INT0 (MTK_PIN_NO(17) | 4)
+#define PINMUX_GPIO17__FUNC_ANT_SEL7 (MTK_PIN_NO(17) | 5)
+#define PINMUX_GPIO17__FUNC_I2S3_MCK (MTK_PIN_NO(17) | 6)
+#define PINMUX_GPIO17__FUNC_DBG_MON_A1 (MTK_PIN_NO(17) | 7)
+
+#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_DBPI_D5 (MTK_PIN_NO(18) | 1)
+#define PINMUX_GPIO18__FUNC_SPI4_CSB (MTK_PIN_NO(18) | 2)
+#define PINMUX_GPIO18__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(18) | 3)
+#define PINMUX_GPIO18__FUNC_MD_INT0 (MTK_PIN_NO(18) | 4)
+#define PINMUX_GPIO18__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(18) | 5)
+#define PINMUX_GPIO18__FUNC_I2S3_BCK (MTK_PIN_NO(18) | 6)
+#define PINMUX_GPIO18__FUNC_DBG_MON_A2 (MTK_PIN_NO(18) | 7)
+
+#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_DBPI_D6 (MTK_PIN_NO(19) | 1)
+#define PINMUX_GPIO19__FUNC_SPI4_MO (MTK_PIN_NO(19) | 2)
+#define PINMUX_GPIO19__FUNC_CONN_MCU_TDO (MTK_PIN_NO(19) | 3)
+#define PINMUX_GPIO19__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(19) | 4)
+#define PINMUX_GPIO19__FUNC_URXD1 (MTK_PIN_NO(19) | 5)
+#define PINMUX_GPIO19__FUNC_I2S3_LRCK (MTK_PIN_NO(19) | 6)
+#define PINMUX_GPIO19__FUNC_DBG_MON_A3 (MTK_PIN_NO(19) | 7)
+
+#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_DBPI_D7 (MTK_PIN_NO(20) | 1)
+#define PINMUX_GPIO20__FUNC_SPI4_CLK (MTK_PIN_NO(20) | 2)
+#define PINMUX_GPIO20__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(20) | 3)
+#define PINMUX_GPIO20__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(20) | 4)
+#define PINMUX_GPIO20__FUNC_UTXD1 (MTK_PIN_NO(20) | 5)
+#define PINMUX_GPIO20__FUNC_I2S3_DO (MTK_PIN_NO(20) | 6)
+#define PINMUX_GPIO20__FUNC_DBG_MON_A19 (MTK_PIN_NO(20) | 7)
+
+#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_DBPI_D8 (MTK_PIN_NO(21) | 1)
+#define PINMUX_GPIO21__FUNC_SPI3_MI (MTK_PIN_NO(21) | 2)
+#define PINMUX_GPIO21__FUNC_CONN_MCU_TMS (MTK_PIN_NO(21) | 3)
+#define PINMUX_GPIO21__FUNC_DAP_MD32_SWD (MTK_PIN_NO(21) | 4)
+#define PINMUX_GPIO21__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(21) | 5)
+#define PINMUX_GPIO21__FUNC_I2S2_MCK (MTK_PIN_NO(21) | 6)
+#define PINMUX_GPIO21__FUNC_DBG_MON_B5 (MTK_PIN_NO(21) | 7)
+
+#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_DBPI_D9 (MTK_PIN_NO(22) | 1)
+#define PINMUX_GPIO22__FUNC_SPI3_CSB (MTK_PIN_NO(22) | 2)
+#define PINMUX_GPIO22__FUNC_CONN_MCU_TCK (MTK_PIN_NO(22) | 3)
+#define PINMUX_GPIO22__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(22) | 4)
+#define PINMUX_GPIO22__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(22) | 5)
+#define PINMUX_GPIO22__FUNC_I2S2_BCK (MTK_PIN_NO(22) | 6)
+#define PINMUX_GPIO22__FUNC_DBG_MON_B6 (MTK_PIN_NO(22) | 7)
+
+#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_DBPI_D10 (MTK_PIN_NO(23) | 1)
+#define PINMUX_GPIO23__FUNC_SPI3_MO (MTK_PIN_NO(23) | 2)
+#define PINMUX_GPIO23__FUNC_CONN_MCU_TDI (MTK_PIN_NO(23) | 3)
+#define PINMUX_GPIO23__FUNC_UCTS1 (MTK_PIN_NO(23) | 4)
+#define PINMUX_GPIO23__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(23) | 5)
+#define PINMUX_GPIO23__FUNC_I2S2_LRCK (MTK_PIN_NO(23) | 6)
+#define PINMUX_GPIO23__FUNC_DBG_MON_B7 (MTK_PIN_NO(23) | 7)
+
+#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_DBPI_D11 (MTK_PIN_NO(24) | 1)
+#define PINMUX_GPIO24__FUNC_SPI3_CLK (MTK_PIN_NO(24) | 2)
+#define PINMUX_GPIO24__FUNC_SRCLKENAI0 (MTK_PIN_NO(24) | 3)
+#define PINMUX_GPIO24__FUNC_URTS1 (MTK_PIN_NO(24) | 4)
+#define PINMUX_GPIO24__FUNC_IO_JTAG_TCK (MTK_PIN_NO(24) | 5)
+#define PINMUX_GPIO24__FUNC_I2S2_DI (MTK_PIN_NO(24) | 6)
+#define PINMUX_GPIO24__FUNC_DBG_MON_B31 (MTK_PIN_NO(24) | 7)
+
+#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_DBPI_HSYNC (MTK_PIN_NO(25) | 1)
+#define PINMUX_GPIO25__FUNC_ANT_SEL0 (MTK_PIN_NO(25) | 2)
+#define PINMUX_GPIO25__FUNC_SCL6 (MTK_PIN_NO(25) | 3)
+#define PINMUX_GPIO25__FUNC_KPCOL2 (MTK_PIN_NO(25) | 4)
+#define PINMUX_GPIO25__FUNC_IO_JTAG_TMS (MTK_PIN_NO(25) | 5)
+#define PINMUX_GPIO25__FUNC_I2S1_MCK (MTK_PIN_NO(25) | 6)
+#define PINMUX_GPIO25__FUNC_DBG_MON_B0 (MTK_PIN_NO(25) | 7)
+
+#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_DBPI_VSYNC (MTK_PIN_NO(26) | 1)
+#define PINMUX_GPIO26__FUNC_ANT_SEL1 (MTK_PIN_NO(26) | 2)
+#define PINMUX_GPIO26__FUNC_SDA6 (MTK_PIN_NO(26) | 3)
+#define PINMUX_GPIO26__FUNC_KPROW2 (MTK_PIN_NO(26) | 4)
+#define PINMUX_GPIO26__FUNC_IO_JTAG_TDI (MTK_PIN_NO(26) | 5)
+#define PINMUX_GPIO26__FUNC_I2S1_BCK (MTK_PIN_NO(26) | 6)
+#define PINMUX_GPIO26__FUNC_DBG_MON_B1 (MTK_PIN_NO(26) | 7)
+
+#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_DBPI_DE (MTK_PIN_NO(27) | 1)
+#define PINMUX_GPIO27__FUNC_ANT_SEL2 (MTK_PIN_NO(27) | 2)
+#define PINMUX_GPIO27__FUNC_SCL7 (MTK_PIN_NO(27) | 3)
+#define PINMUX_GPIO27__FUNC_DMIC_CLK (MTK_PIN_NO(27) | 4)
+#define PINMUX_GPIO27__FUNC_IO_JTAG_TDO (MTK_PIN_NO(27) | 5)
+#define PINMUX_GPIO27__FUNC_I2S1_LRCK (MTK_PIN_NO(27) | 6)
+#define PINMUX_GPIO27__FUNC_DBG_MON_B9 (MTK_PIN_NO(27) | 7)
+
+#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_DBPI_CK (MTK_PIN_NO(28) | 1)
+#define PINMUX_GPIO28__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(28) | 2)
+#define PINMUX_GPIO28__FUNC_SDA7 (MTK_PIN_NO(28) | 3)
+#define PINMUX_GPIO28__FUNC_DMIC_DAT (MTK_PIN_NO(28) | 4)
+#define PINMUX_GPIO28__FUNC_IO_JTAG_TRSTN (MTK_PIN_NO(28) | 5)
+#define PINMUX_GPIO28__FUNC_I2S1_DO (MTK_PIN_NO(28) | 6)
+#define PINMUX_GPIO28__FUNC_DBG_MON_B32 (MTK_PIN_NO(28) | 7)
+
+#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_MSDC1_CLK (MTK_PIN_NO(29) | 1)
+#define PINMUX_GPIO29__FUNC_IO_JTAG_TCK (MTK_PIN_NO(29) | 2)
+#define PINMUX_GPIO29__FUNC_UDI_TCK (MTK_PIN_NO(29) | 3)
+#define PINMUX_GPIO29__FUNC_CONN_DSP_JCK (MTK_PIN_NO(29) | 4)
+#define PINMUX_GPIO29__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(29) | 5)
+#define PINMUX_GPIO29__FUNC_PCM1_CLK (MTK_PIN_NO(29) | 6)
+#define PINMUX_GPIO29__FUNC_DBG_MON_A6 (MTK_PIN_NO(29) | 7)
+
+#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_MSDC1_DAT3 (MTK_PIN_NO(30) | 1)
+#define PINMUX_GPIO30__FUNC_DAP_MD32_SWD (MTK_PIN_NO(30) | 2)
+#define PINMUX_GPIO30__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(30) | 3)
+#define PINMUX_GPIO30__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(30) | 4)
+#define PINMUX_GPIO30__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(30) | 5)
+#define PINMUX_GPIO30__FUNC_PCM1_DI (MTK_PIN_NO(30) | 6)
+#define PINMUX_GPIO30__FUNC_DBG_MON_A7 (MTK_PIN_NO(30) | 7)
+
+#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_MSDC1_CMD (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_IO_JTAG_TMS (MTK_PIN_NO(31) | 2)
+#define PINMUX_GPIO31__FUNC_UDI_TMS (MTK_PIN_NO(31) | 3)
+#define PINMUX_GPIO31__FUNC_CONN_DSP_JMS (MTK_PIN_NO(31) | 4)
+#define PINMUX_GPIO31__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(31) | 5)
+#define PINMUX_GPIO31__FUNC_PCM1_SYNC (MTK_PIN_NO(31) | 6)
+#define PINMUX_GPIO31__FUNC_DBG_MON_A8 (MTK_PIN_NO(31) | 7)
+
+#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_MSDC1_DAT0 (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_IO_JTAG_TDI (MTK_PIN_NO(32) | 2)
+#define PINMUX_GPIO32__FUNC_UDI_TDI (MTK_PIN_NO(32) | 3)
+#define PINMUX_GPIO32__FUNC_CONN_DSP_JDI (MTK_PIN_NO(32) | 4)
+#define PINMUX_GPIO32__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(32) | 5)
+#define PINMUX_GPIO32__FUNC_PCM1_DO0 (MTK_PIN_NO(32) | 6)
+#define PINMUX_GPIO32__FUNC_DBG_MON_A9 (MTK_PIN_NO(32) | 7)
+
+#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_MSDC1_DAT2 (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_IO_JTAG_TRSTN (MTK_PIN_NO(33) | 2)
+#define PINMUX_GPIO33__FUNC_UDI_NTRST (MTK_PIN_NO(33) | 3)
+#define PINMUX_GPIO33__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(33) | 4)
+#define PINMUX_GPIO33__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(33) | 5)
+#define PINMUX_GPIO33__FUNC_PCM1_DO2 (MTK_PIN_NO(33) | 6)
+#define PINMUX_GPIO33__FUNC_DBG_MON_A10 (MTK_PIN_NO(33) | 7)
+
+#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_MSDC1_DAT1 (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_IO_JTAG_TDO (MTK_PIN_NO(34) | 2)
+#define PINMUX_GPIO34__FUNC_UDI_TDO (MTK_PIN_NO(34) | 3)
+#define PINMUX_GPIO34__FUNC_CONN_DSP_JDO (MTK_PIN_NO(34) | 4)
+#define PINMUX_GPIO34__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(34) | 5)
+#define PINMUX_GPIO34__FUNC_PCM1_DO1 (MTK_PIN_NO(34) | 6)
+#define PINMUX_GPIO34__FUNC_DBG_MON_A11 (MTK_PIN_NO(34) | 7)
+
+#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_CCU_JTAG_TDO (MTK_PIN_NO(35) | 2)
+#define PINMUX_GPIO35__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(35) | 3)
+#define PINMUX_GPIO35__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(35) | 5)
+#define PINMUX_GPIO35__FUNC_CONN_DSP_JMS (MTK_PIN_NO(35) | 6)
+#define PINMUX_GPIO35__FUNC_DBG_MON_A28 (MTK_PIN_NO(35) | 7)
+
+#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_CCU_JTAG_TMS (MTK_PIN_NO(36) | 2)
+#define PINMUX_GPIO36__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(36) | 3)
+#define PINMUX_GPIO36__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(36) | 4)
+#define PINMUX_GPIO36__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(36) | 5)
+#define PINMUX_GPIO36__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(36) | 6)
+#define PINMUX_GPIO36__FUNC_DBG_MON_A29 (MTK_PIN_NO(36) | 7)
+
+#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(37) | 1)
+#define PINMUX_GPIO37__FUNC_CCU_JTAG_TDI (MTK_PIN_NO(37) | 2)
+#define PINMUX_GPIO37__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(37) | 3)
+#define PINMUX_GPIO37__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(37) | 5)
+#define PINMUX_GPIO37__FUNC_CONN_DSP_JDO (MTK_PIN_NO(37) | 6)
+#define PINMUX_GPIO37__FUNC_DBG_MON_A30 (MTK_PIN_NO(37) | 7)
+
+#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(38) | 1)
+#define PINMUX_GPIO38__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(38) | 3)
+#define PINMUX_GPIO38__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(38) | 4)
+#define PINMUX_GPIO38__FUNC_DBG_MON_A20 (MTK_PIN_NO(38) | 7)
+
+#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(39) | 1)
+#define PINMUX_GPIO39__FUNC_CCU_JTAG_TCK (MTK_PIN_NO(39) | 2)
+#define PINMUX_GPIO39__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(39) | 3)
+#define PINMUX_GPIO39__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(39) | 5)
+#define PINMUX_GPIO39__FUNC_CONN_DSP_JCK (MTK_PIN_NO(39) | 6)
+#define PINMUX_GPIO39__FUNC_DBG_MON_A31 (MTK_PIN_NO(39) | 7)
+
+#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_CCU_JTAG_TRST (MTK_PIN_NO(40) | 2)
+#define PINMUX_GPIO40__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(40) | 3)
+#define PINMUX_GPIO40__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(40) | 5)
+#define PINMUX_GPIO40__FUNC_CONN_DSP_JDI (MTK_PIN_NO(40) | 6)
+#define PINMUX_GPIO40__FUNC_DBG_MON_A32 (MTK_PIN_NO(40) | 7)
+
+#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_IDDIG (MTK_PIN_NO(41) | 1)
+#define PINMUX_GPIO41__FUNC_URXD1 (MTK_PIN_NO(41) | 2)
+#define PINMUX_GPIO41__FUNC_UCTS0 (MTK_PIN_NO(41) | 3)
+#define PINMUX_GPIO41__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(41) | 4)
+#define PINMUX_GPIO41__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(41) | 5)
+#define PINMUX_GPIO41__FUNC_DMIC_CLK (MTK_PIN_NO(41) | 6)
+
+#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_USB_DRVVBUS (MTK_PIN_NO(42) | 1)
+#define PINMUX_GPIO42__FUNC_UTXD1 (MTK_PIN_NO(42) | 2)
+#define PINMUX_GPIO42__FUNC_URTS0 (MTK_PIN_NO(42) | 3)
+#define PINMUX_GPIO42__FUNC_SSPM_URXD_AO (MTK_PIN_NO(42) | 4)
+#define PINMUX_GPIO42__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(42) | 5)
+#define PINMUX_GPIO42__FUNC_DMIC_DAT (MTK_PIN_NO(42) | 6)
+
+#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_DISP_PWM (MTK_PIN_NO(43) | 1)
+
+#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_DSI_TE (MTK_PIN_NO(44) | 1)
+
+#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_LCM_RST (MTK_PIN_NO(45) | 1)
+
+#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(46) | 1)
+#define PINMUX_GPIO46__FUNC_URXD1 (MTK_PIN_NO(46) | 2)
+#define PINMUX_GPIO46__FUNC_UCTS1 (MTK_PIN_NO(46) | 3)
+#define PINMUX_GPIO46__FUNC_CCU_UTXD_AO (MTK_PIN_NO(46) | 4)
+#define PINMUX_GPIO46__FUNC_TP_UCTS1_AO (MTK_PIN_NO(46) | 5)
+#define PINMUX_GPIO46__FUNC_IDDIG (MTK_PIN_NO(46) | 6)
+#define PINMUX_GPIO46__FUNC_I2S5_LRCK (MTK_PIN_NO(46) | 7)
+
+#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(47) | 1)
+#define PINMUX_GPIO47__FUNC_UTXD1 (MTK_PIN_NO(47) | 2)
+#define PINMUX_GPIO47__FUNC_URTS1 (MTK_PIN_NO(47) | 3)
+#define PINMUX_GPIO47__FUNC_CCU_URXD_AO (MTK_PIN_NO(47) | 4)
+#define PINMUX_GPIO47__FUNC_TP_URTS1_AO (MTK_PIN_NO(47) | 5)
+#define PINMUX_GPIO47__FUNC_USB_DRVVBUS (MTK_PIN_NO(47) | 6)
+#define PINMUX_GPIO47__FUNC_I2S5_DO (MTK_PIN_NO(47) | 7)
+
+#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_SCL5 (MTK_PIN_NO(48) | 1)
+
+#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_SDA5 (MTK_PIN_NO(49) | 1)
+
+#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_SCL3 (MTK_PIN_NO(50) | 1)
+
+#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_SDA3 (MTK_PIN_NO(51) | 1)
+
+#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_BPI_ANT2 (MTK_PIN_NO(52) | 1)
+
+#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_BPI_ANT0 (MTK_PIN_NO(53) | 1)
+
+#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_BPI_OLAT1 (MTK_PIN_NO(54) | 1)
+
+#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_BPI_BUS8 (MTK_PIN_NO(55) | 1)
+
+#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_BPI_BUS9 (MTK_PIN_NO(56) | 1)
+#define PINMUX_GPIO56__FUNC_SCL_6306 (MTK_PIN_NO(56) | 2)
+
+#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_BPI_BUS10 (MTK_PIN_NO(57) | 1)
+#define PINMUX_GPIO57__FUNC_SDA_6306 (MTK_PIN_NO(57) | 2)
+
+#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_RFIC0_BSI_D2 (MTK_PIN_NO(58) | 1)
+#define PINMUX_GPIO58__FUNC_SPM_BSI_D2 (MTK_PIN_NO(58) | 2)
+#define PINMUX_GPIO58__FUNC_PWM_B (MTK_PIN_NO(58) | 3)
+
+#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_RFIC0_BSI_D1 (MTK_PIN_NO(59) | 1)
+#define PINMUX_GPIO59__FUNC_SPM_BSI_D1 (MTK_PIN_NO(59) | 2)
+
+#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_RFIC0_BSI_D0 (MTK_PIN_NO(60) | 1)
+#define PINMUX_GPIO60__FUNC_SPM_BSI_D0 (MTK_PIN_NO(60) | 2)
+
+#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_MIPI1_SDATA (MTK_PIN_NO(61) | 1)
+
+#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_MIPI1_SCLK (MTK_PIN_NO(62) | 1)
+
+#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_MIPI0_SDATA (MTK_PIN_NO(63) | 1)
+
+#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_MIPI0_SCLK (MTK_PIN_NO(64) | 1)
+
+#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_MIPI3_SDATA (MTK_PIN_NO(65) | 1)
+#define PINMUX_GPIO65__FUNC_BPI_OLAT2 (MTK_PIN_NO(65) | 2)
+
+#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_MIPI3_SCLK (MTK_PIN_NO(66) | 1)
+#define PINMUX_GPIO66__FUNC_BPI_OLAT3 (MTK_PIN_NO(66) | 2)
+
+#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_MIPI2_SDATA (MTK_PIN_NO(67) | 1)
+
+#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_MIPI2_SCLK (MTK_PIN_NO(68) | 1)
+
+#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_BPI_BUS7 (MTK_PIN_NO(69) | 1)
+
+#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_BPI_BUS6 (MTK_PIN_NO(70) | 1)
+
+#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define PINMUX_GPIO71__FUNC_BPI_BUS5 (MTK_PIN_NO(71) | 1)
+
+#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define PINMUX_GPIO72__FUNC_BPI_BUS4 (MTK_PIN_NO(72) | 1)
+
+#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define PINMUX_GPIO73__FUNC_BPI_BUS3 (MTK_PIN_NO(73) | 1)
+
+#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_BPI_BUS2 (MTK_PIN_NO(74) | 1)
+
+#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_BPI_BUS1 (MTK_PIN_NO(75) | 1)
+
+#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_BPI_BUS0 (MTK_PIN_NO(76) | 1)
+
+#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_BPI_ANT1 (MTK_PIN_NO(77) | 1)
+
+#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_BPI_OLAT0 (MTK_PIN_NO(78) | 1)
+
+#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_BPI_PA_VM1 (MTK_PIN_NO(79) | 1)
+#define PINMUX_GPIO79__FUNC_MIPI4_SDATA (MTK_PIN_NO(79) | 2)
+
+#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_BPI_PA_VM0 (MTK_PIN_NO(80) | 1)
+#define PINMUX_GPIO80__FUNC_MIPI4_SCLK (MTK_PIN_NO(80) | 2)
+
+#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_SDA1 (MTK_PIN_NO(81) | 1)
+
+#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_SDA0 (MTK_PIN_NO(82) | 1)
+
+#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_SCL0 (MTK_PIN_NO(83) | 1)
+
+#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_SCL1 (MTK_PIN_NO(84) | 1)
+
+#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_SPI0_MI (MTK_PIN_NO(85) | 1)
+#define PINMUX_GPIO85__FUNC_SCP_SPI0_MI (MTK_PIN_NO(85) | 2)
+#define PINMUX_GPIO85__FUNC_CLKM3 (MTK_PIN_NO(85) | 3)
+#define PINMUX_GPIO85__FUNC_I2S1_BCK (MTK_PIN_NO(85) | 4)
+#define PINMUX_GPIO85__FUNC_MFG_DFD_JTAG_TDO (MTK_PIN_NO(85) | 5)
+#define PINMUX_GPIO85__FUNC_DFD_TDO (MTK_PIN_NO(85) | 6)
+#define PINMUX_GPIO85__FUNC_JTDO_SEL1 (MTK_PIN_NO(85) | 7)
+
+#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_SPI0_CSB (MTK_PIN_NO(86) | 1)
+#define PINMUX_GPIO86__FUNC_SCP_SPI0_CS (MTK_PIN_NO(86) | 2)
+#define PINMUX_GPIO86__FUNC_CLKM0 (MTK_PIN_NO(86) | 3)
+#define PINMUX_GPIO86__FUNC_I2S1_LRCK (MTK_PIN_NO(86) | 4)
+#define PINMUX_GPIO86__FUNC_MFG_DFD_JTAG_TMS (MTK_PIN_NO(86) | 5)
+#define PINMUX_GPIO86__FUNC_DFD_TMS (MTK_PIN_NO(86) | 6)
+#define PINMUX_GPIO86__FUNC_JTMS_SEL1 (MTK_PIN_NO(86) | 7)
+
+#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_SPI0_MO (MTK_PIN_NO(87) | 1)
+#define PINMUX_GPIO87__FUNC_SCP_SPI0_MO (MTK_PIN_NO(87) | 2)
+#define PINMUX_GPIO87__FUNC_SDA1 (MTK_PIN_NO(87) | 3)
+#define PINMUX_GPIO87__FUNC_I2S1_DO (MTK_PIN_NO(87) | 4)
+#define PINMUX_GPIO87__FUNC_MFG_DFD_JTAG_TDI (MTK_PIN_NO(87) | 5)
+#define PINMUX_GPIO87__FUNC_DFD_TDI (MTK_PIN_NO(87) | 6)
+#define PINMUX_GPIO87__FUNC_JTDI_SEL1 (MTK_PIN_NO(87) | 7)
+
+#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_SPI0_CLK (MTK_PIN_NO(88) | 1)
+#define PINMUX_GPIO88__FUNC_SCP_SPI0_CK (MTK_PIN_NO(88) | 2)
+#define PINMUX_GPIO88__FUNC_SCL1 (MTK_PIN_NO(88) | 3)
+#define PINMUX_GPIO88__FUNC_I2S1_MCK (MTK_PIN_NO(88) | 4)
+#define PINMUX_GPIO88__FUNC_MFG_DFD_JTAG_TCK (MTK_PIN_NO(88) | 5)
+#define PINMUX_GPIO88__FUNC_DFD_TCK_XI (MTK_PIN_NO(88) | 6)
+#define PINMUX_GPIO88__FUNC_JTCK_SEL1 (MTK_PIN_NO(88) | 7)
+
+#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_SRCLKENAI0 (MTK_PIN_NO(89) | 1)
+#define PINMUX_GPIO89__FUNC_PWM_C (MTK_PIN_NO(89) | 2)
+#define PINMUX_GPIO89__FUNC_I2S5_BCK (MTK_PIN_NO(89) | 3)
+#define PINMUX_GPIO89__FUNC_ANT_SEL6 (MTK_PIN_NO(89) | 4)
+#define PINMUX_GPIO89__FUNC_SDA8 (MTK_PIN_NO(89) | 5)
+#define PINMUX_GPIO89__FUNC_CMVREF0 (MTK_PIN_NO(89) | 6)
+#define PINMUX_GPIO89__FUNC_DBG_MON_A21 (MTK_PIN_NO(89) | 7)
+
+#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_PWM_A (MTK_PIN_NO(90) | 1)
+#define PINMUX_GPIO90__FUNC_CMMCLK2 (MTK_PIN_NO(90) | 2)
+#define PINMUX_GPIO90__FUNC_I2S5_LRCK (MTK_PIN_NO(90) | 3)
+#define PINMUX_GPIO90__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(90) | 4)
+#define PINMUX_GPIO90__FUNC_SCL8 (MTK_PIN_NO(90) | 5)
+#define PINMUX_GPIO90__FUNC_PTA_RXD (MTK_PIN_NO(90) | 6)
+#define PINMUX_GPIO90__FUNC_DBG_MON_A22 (MTK_PIN_NO(90) | 7)
+
+#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_KPROW1 (MTK_PIN_NO(91) | 1)
+#define PINMUX_GPIO91__FUNC_PWM_B (MTK_PIN_NO(91) | 2)
+#define PINMUX_GPIO91__FUNC_I2S5_DO (MTK_PIN_NO(91) | 3)
+#define PINMUX_GPIO91__FUNC_ANT_SEL7 (MTK_PIN_NO(91) | 4)
+#define PINMUX_GPIO91__FUNC_CMMCLK3 (MTK_PIN_NO(91) | 5)
+#define PINMUX_GPIO91__FUNC_PTA_TXD (MTK_PIN_NO(91) | 6)
+
+#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_KPROW0 (MTK_PIN_NO(92) | 1)
+
+#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_KPCOL0 (MTK_PIN_NO(93) | 1)
+#define PINMUX_GPIO93__FUNC_DBG_MON_B27 (MTK_PIN_NO(93) | 7)
+
+#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_KPCOL1 (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_I2S2_DI2 (MTK_PIN_NO(94) | 2)
+#define PINMUX_GPIO94__FUNC_I2S5_MCK (MTK_PIN_NO(94) | 3)
+#define PINMUX_GPIO94__FUNC_CMMCLK2 (MTK_PIN_NO(94) | 4)
+#define PINMUX_GPIO94__FUNC_SCP_SPI2_MI (MTK_PIN_NO(94) | 5)
+#define PINMUX_GPIO94__FUNC_SRCLKENAI1 (MTK_PIN_NO(94) | 6)
+#define PINMUX_GPIO94__FUNC_SPI2_MI (MTK_PIN_NO(94) | 7)
+
+#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_URXD0 (MTK_PIN_NO(95) | 1)
+#define PINMUX_GPIO95__FUNC_UTXD0 (MTK_PIN_NO(95) | 2)
+#define PINMUX_GPIO95__FUNC_MD_URXD0 (MTK_PIN_NO(95) | 3)
+#define PINMUX_GPIO95__FUNC_MD_URXD1 (MTK_PIN_NO(95) | 4)
+#define PINMUX_GPIO95__FUNC_SSPM_URXD_AO (MTK_PIN_NO(95) | 5)
+#define PINMUX_GPIO95__FUNC_CCU_URXD_AO (MTK_PIN_NO(95) | 6)
+
+#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_UTXD0 (MTK_PIN_NO(96) | 1)
+#define PINMUX_GPIO96__FUNC_URXD0 (MTK_PIN_NO(96) | 2)
+#define PINMUX_GPIO96__FUNC_MD_UTXD0 (MTK_PIN_NO(96) | 3)
+#define PINMUX_GPIO96__FUNC_MD_UTXD1 (MTK_PIN_NO(96) | 4)
+#define PINMUX_GPIO96__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(96) | 5)
+#define PINMUX_GPIO96__FUNC_CCU_UTXD_AO (MTK_PIN_NO(96) | 6)
+#define PINMUX_GPIO96__FUNC_DBG_MON_B2 (MTK_PIN_NO(96) | 7)
+
+#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_UCTS0 (MTK_PIN_NO(97) | 1)
+#define PINMUX_GPIO97__FUNC_I2S2_MCK (MTK_PIN_NO(97) | 2)
+#define PINMUX_GPIO97__FUNC_IDDIG (MTK_PIN_NO(97) | 3)
+#define PINMUX_GPIO97__FUNC_CONN_MCU_TDO (MTK_PIN_NO(97) | 4)
+#define PINMUX_GPIO97__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(97) | 5)
+#define PINMUX_GPIO97__FUNC_IO_JTAG_TDO (MTK_PIN_NO(97) | 6)
+#define PINMUX_GPIO97__FUNC_DBG_MON_B3 (MTK_PIN_NO(97) | 7)
+
+#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_URTS0 (MTK_PIN_NO(98) | 1)
+#define PINMUX_GPIO98__FUNC_I2S2_BCK (MTK_PIN_NO(98) | 2)
+#define PINMUX_GPIO98__FUNC_USB_DRVVBUS (MTK_PIN_NO(98) | 3)
+#define PINMUX_GPIO98__FUNC_CONN_MCU_TMS (MTK_PIN_NO(98) | 4)
+#define PINMUX_GPIO98__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(98) | 5)
+#define PINMUX_GPIO98__FUNC_IO_JTAG_TMS (MTK_PIN_NO(98) | 6)
+#define PINMUX_GPIO98__FUNC_DBG_MON_B4 (MTK_PIN_NO(98) | 7)
+
+#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_CMMCLK0 (MTK_PIN_NO(99) | 1)
+#define PINMUX_GPIO99__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(99) | 4)
+#define PINMUX_GPIO99__FUNC_DBG_MON_B28 (MTK_PIN_NO(99) | 7)
+
+#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_CMMCLK1 (MTK_PIN_NO(100) | 1)
+#define PINMUX_GPIO100__FUNC_PWM_C (MTK_PIN_NO(100) | 2)
+#define PINMUX_GPIO100__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(100) | 3)
+#define PINMUX_GPIO100__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(100) | 4)
+#define PINMUX_GPIO100__FUNC_DBG_MON_B29 (MTK_PIN_NO(100) | 7)
+
+#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_CLKM2 (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_I2S2_LRCK (MTK_PIN_NO(101) | 2)
+#define PINMUX_GPIO101__FUNC_CMVREF1 (MTK_PIN_NO(101) | 3)
+#define PINMUX_GPIO101__FUNC_CONN_MCU_TCK (MTK_PIN_NO(101) | 4)
+#define PINMUX_GPIO101__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(101) | 5)
+#define PINMUX_GPIO101__FUNC_IO_JTAG_TCK (MTK_PIN_NO(101) | 6)
+
+#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_CLKM1 (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_I2S2_DI (MTK_PIN_NO(102) | 2)
+#define PINMUX_GPIO102__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(102) | 3)
+#define PINMUX_GPIO102__FUNC_CONN_MCU_TDI (MTK_PIN_NO(102) | 4)
+#define PINMUX_GPIO102__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(102) | 5)
+#define PINMUX_GPIO102__FUNC_IO_JTAG_TDI (MTK_PIN_NO(102) | 6)
+#define PINMUX_GPIO102__FUNC_DBG_MON_B8 (MTK_PIN_NO(102) | 7)
+
+#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_SCL2 (MTK_PIN_NO(103) | 1)
+
+#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_SDA2 (MTK_PIN_NO(104) | 1)
+
+#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_SCL4 (MTK_PIN_NO(105) | 1)
+
+#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_SDA4 (MTK_PIN_NO(106) | 1)
+
+#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_DMIC_CLK (MTK_PIN_NO(107) | 1)
+#define PINMUX_GPIO107__FUNC_ANT_SEL0 (MTK_PIN_NO(107) | 2)
+#define PINMUX_GPIO107__FUNC_CLKM0 (MTK_PIN_NO(107) | 3)
+#define PINMUX_GPIO107__FUNC_SDA7 (MTK_PIN_NO(107) | 4)
+#define PINMUX_GPIO107__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(107) | 5)
+#define PINMUX_GPIO107__FUNC_PWM_A (MTK_PIN_NO(107) | 6)
+#define PINMUX_GPIO107__FUNC_DBG_MON_B12 (MTK_PIN_NO(107) | 7)
+
+#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_CMMCLK2 (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_ANT_SEL1 (MTK_PIN_NO(108) | 2)
+#define PINMUX_GPIO108__FUNC_CLKM1 (MTK_PIN_NO(108) | 3)
+#define PINMUX_GPIO108__FUNC_SCL8 (MTK_PIN_NO(108) | 4)
+#define PINMUX_GPIO108__FUNC_DAP_MD32_SWD (MTK_PIN_NO(108) | 5)
+#define PINMUX_GPIO108__FUNC_PWM_B (MTK_PIN_NO(108) | 6)
+#define PINMUX_GPIO108__FUNC_DBG_MON_B13 (MTK_PIN_NO(108) | 7)
+
+#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_DMIC_DAT (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_ANT_SEL2 (MTK_PIN_NO(109) | 2)
+#define PINMUX_GPIO109__FUNC_CLKM2 (MTK_PIN_NO(109) | 3)
+#define PINMUX_GPIO109__FUNC_SDA8 (MTK_PIN_NO(109) | 4)
+#define PINMUX_GPIO109__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(109) | 5)
+#define PINMUX_GPIO109__FUNC_PWM_C (MTK_PIN_NO(109) | 6)
+#define PINMUX_GPIO109__FUNC_DBG_MON_B14 (MTK_PIN_NO(109) | 7)
+
+#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_SCL7 (MTK_PIN_NO(110) | 1)
+#define PINMUX_GPIO110__FUNC_ANT_SEL0 (MTK_PIN_NO(110) | 2)
+#define PINMUX_GPIO110__FUNC_TP_URXD1_AO (MTK_PIN_NO(110) | 3)
+#define PINMUX_GPIO110__FUNC_USB_DRVVBUS (MTK_PIN_NO(110) | 4)
+#define PINMUX_GPIO110__FUNC_SRCLKENAI1 (MTK_PIN_NO(110) | 5)
+#define PINMUX_GPIO110__FUNC_KPCOL2 (MTK_PIN_NO(110) | 6)
+#define PINMUX_GPIO110__FUNC_URXD1 (MTK_PIN_NO(110) | 7)
+
+#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_CMMCLK3 (MTK_PIN_NO(111) | 1)
+#define PINMUX_GPIO111__FUNC_ANT_SEL1 (MTK_PIN_NO(111) | 2)
+#define PINMUX_GPIO111__FUNC_SRCLKENAI0 (MTK_PIN_NO(111) | 3)
+#define PINMUX_GPIO111__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(111) | 4)
+#define PINMUX_GPIO111__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(111) | 5)
+#define PINMUX_GPIO111__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(111) | 7)
+
+#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_SDA7 (MTK_PIN_NO(112) | 1)
+#define PINMUX_GPIO112__FUNC_ANT_SEL2 (MTK_PIN_NO(112) | 2)
+#define PINMUX_GPIO112__FUNC_TP_UTXD1_AO (MTK_PIN_NO(112) | 3)
+#define PINMUX_GPIO112__FUNC_IDDIG (MTK_PIN_NO(112) | 4)
+#define PINMUX_GPIO112__FUNC_AGPS_SYNC (MTK_PIN_NO(112) | 5)
+#define PINMUX_GPIO112__FUNC_KPROW2 (MTK_PIN_NO(112) | 6)
+#define PINMUX_GPIO112__FUNC_UTXD1 (MTK_PIN_NO(112) | 7)
+
+#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_CONN_TOP_CLK (MTK_PIN_NO(113) | 1)
+#define PINMUX_GPIO113__FUNC_SCL6 (MTK_PIN_NO(113) | 3)
+#define PINMUX_GPIO113__FUNC_AUXIF_CLK0 (MTK_PIN_NO(113) | 4)
+#define PINMUX_GPIO113__FUNC_TP_UCTS1_AO (MTK_PIN_NO(113) | 6)
+
+#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_CONN_TOP_DATA (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_SDA6 (MTK_PIN_NO(114) | 3)
+#define PINMUX_GPIO114__FUNC_AUXIF_ST0 (MTK_PIN_NO(114) | 4)
+#define PINMUX_GPIO114__FUNC_TP_URTS1_AO (MTK_PIN_NO(114) | 6)
+
+#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_CONN_BT_CLK (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_UTXD1 (MTK_PIN_NO(115) | 2)
+#define PINMUX_GPIO115__FUNC_PTA_TXD (MTK_PIN_NO(115) | 3)
+#define PINMUX_GPIO115__FUNC_AUXIF_CLK1 (MTK_PIN_NO(115) | 4)
+#define PINMUX_GPIO115__FUNC_DAP_MD32_SWD (MTK_PIN_NO(115) | 5)
+#define PINMUX_GPIO115__FUNC_TP_UTXD1_AO (MTK_PIN_NO(115) | 6)
+
+#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_CONN_BT_DATA (MTK_PIN_NO(116) | 1)
+#define PINMUX_GPIO116__FUNC_IPU_JTAG_TRST (MTK_PIN_NO(116) | 2)
+#define PINMUX_GPIO116__FUNC_AUXIF_ST1 (MTK_PIN_NO(116) | 4)
+#define PINMUX_GPIO116__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(116) | 5)
+#define PINMUX_GPIO116__FUNC_TP_URXD2_AO (MTK_PIN_NO(116) | 6)
+#define PINMUX_GPIO116__FUNC_DBG_MON_A0 (MTK_PIN_NO(116) | 7)
+
+#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_CONN_WF_HB0 (MTK_PIN_NO(117) | 1)
+#define PINMUX_GPIO117__FUNC_IPU_JTAG_TDO (MTK_PIN_NO(117) | 2)
+#define PINMUX_GPIO117__FUNC_TP_UTXD2_AO (MTK_PIN_NO(117) | 6)
+#define PINMUX_GPIO117__FUNC_DBG_MON_A4 (MTK_PIN_NO(117) | 7)
+
+#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_CONN_WF_HB1 (MTK_PIN_NO(118) | 1)
+#define PINMUX_GPIO118__FUNC_IPU_JTAG_TDI (MTK_PIN_NO(118) | 2)
+#define PINMUX_GPIO118__FUNC_SSPM_URXD_AO (MTK_PIN_NO(118) | 5)
+#define PINMUX_GPIO118__FUNC_TP_UCTS2_AO (MTK_PIN_NO(118) | 6)
+#define PINMUX_GPIO118__FUNC_DBG_MON_A5 (MTK_PIN_NO(118) | 7)
+
+#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_CONN_WF_HB2 (MTK_PIN_NO(119) | 1)
+#define PINMUX_GPIO119__FUNC_IPU_JTAG_TCK (MTK_PIN_NO(119) | 2)
+#define PINMUX_GPIO119__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(119) | 5)
+#define PINMUX_GPIO119__FUNC_TP_URTS2_AO (MTK_PIN_NO(119) | 6)
+
+#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_CONN_WB_PTA (MTK_PIN_NO(120) | 1)
+#define PINMUX_GPIO120__FUNC_IPU_JTAG_TMS (MTK_PIN_NO(120) | 2)
+#define PINMUX_GPIO120__FUNC_CCU_URXD_AO (MTK_PIN_NO(120) | 5)
+
+#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_CONN_HRST_B (MTK_PIN_NO(121) | 1)
+#define PINMUX_GPIO121__FUNC_URXD1 (MTK_PIN_NO(121) | 2)
+#define PINMUX_GPIO121__FUNC_PTA_RXD (MTK_PIN_NO(121) | 3)
+#define PINMUX_GPIO121__FUNC_CCU_UTXD_AO (MTK_PIN_NO(121) | 5)
+#define PINMUX_GPIO121__FUNC_TP_URXD1_AO (MTK_PIN_NO(121) | 6)
+
+#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_MSDC0_CMD (MTK_PIN_NO(122) | 1)
+#define PINMUX_GPIO122__FUNC_SSPM_URXD2_AO (MTK_PIN_NO(122) | 2)
+#define PINMUX_GPIO122__FUNC_ANT_SEL1 (MTK_PIN_NO(122) | 3)
+#define PINMUX_GPIO122__FUNC_DBG_MON_A12 (MTK_PIN_NO(122) | 7)
+
+#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_MSDC0_DAT0 (MTK_PIN_NO(123) | 1)
+#define PINMUX_GPIO123__FUNC_ANT_SEL0 (MTK_PIN_NO(123) | 3)
+#define PINMUX_GPIO123__FUNC_DBG_MON_A13 (MTK_PIN_NO(123) | 7)
+
+#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_MSDC0_CLK (MTK_PIN_NO(124) | 1)
+#define PINMUX_GPIO124__FUNC_DBG_MON_A14 (MTK_PIN_NO(124) | 7)
+
+#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_MSDC0_DAT2 (MTK_PIN_NO(125) | 1)
+#define PINMUX_GPIO125__FUNC_MRG_CLK (MTK_PIN_NO(125) | 3)
+#define PINMUX_GPIO125__FUNC_DBG_MON_A15 (MTK_PIN_NO(125) | 7)
+
+#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_MSDC0_DAT4 (MTK_PIN_NO(126) | 1)
+#define PINMUX_GPIO126__FUNC_ANT_SEL5 (MTK_PIN_NO(126) | 3)
+#define PINMUX_GPIO126__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(126) | 6)
+#define PINMUX_GPIO126__FUNC_DBG_MON_A16 (MTK_PIN_NO(126) | 7)
+
+#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_MSDC0_DAT6 (MTK_PIN_NO(127) | 1)
+#define PINMUX_GPIO127__FUNC_ANT_SEL4 (MTK_PIN_NO(127) | 3)
+#define PINMUX_GPIO127__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(127) | 6)
+#define PINMUX_GPIO127__FUNC_DBG_MON_A17 (MTK_PIN_NO(127) | 7)
+
+#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_MSDC0_DAT1 (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_ANT_SEL2 (MTK_PIN_NO(128) | 3)
+#define PINMUX_GPIO128__FUNC_UFS_UNIPRO_SDA (MTK_PIN_NO(128) | 6)
+#define PINMUX_GPIO128__FUNC_DBG_MON_A18 (MTK_PIN_NO(128) | 7)
+
+#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_MSDC0_DAT5 (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_ANT_SEL3 (MTK_PIN_NO(129) | 3)
+#define PINMUX_GPIO129__FUNC_UFS_UNIPRO_SCL (MTK_PIN_NO(129) | 6)
+#define PINMUX_GPIO129__FUNC_DBG_MON_A23 (MTK_PIN_NO(129) | 7)
+
+#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_MSDC0_DAT7 (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_MRG_DO (MTK_PIN_NO(130) | 3)
+#define PINMUX_GPIO130__FUNC_DBG_MON_A24 (MTK_PIN_NO(130) | 7)
+
+#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_MSDC0_DSL (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_MRG_SYNC (MTK_PIN_NO(131) | 3)
+#define PINMUX_GPIO131__FUNC_DBG_MON_A25 (MTK_PIN_NO(131) | 7)
+
+#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_MSDC0_DAT3 (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_MRG_DI (MTK_PIN_NO(132) | 3)
+#define PINMUX_GPIO132__FUNC_DBG_MON_A26 (MTK_PIN_NO(132) | 7)
+
+#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_MSDC0_RSTB (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_AGPS_SYNC (MTK_PIN_NO(133) | 3)
+#define PINMUX_GPIO133__FUNC_DBG_MON_A27 (MTK_PIN_NO(133) | 7)
+
+#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_RTC32K_CK (MTK_PIN_NO(134) | 1)
+
+#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_WATCHDOG (MTK_PIN_NO(135) | 1)
+
+#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(136) | 1)
+#define PINMUX_GPIO136__FUNC_AUD_CLK_MISO (MTK_PIN_NO(136) | 2)
+#define PINMUX_GPIO136__FUNC_I2S1_MCK (MTK_PIN_NO(136) | 3)
+#define PINMUX_GPIO136__FUNC_UFS_UNIPRO_SCL (MTK_PIN_NO(136) | 6)
+
+#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(137) | 1)
+#define PINMUX_GPIO137__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(137) | 2)
+#define PINMUX_GPIO137__FUNC_I2S1_BCK (MTK_PIN_NO(137) | 3)
+
+#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_I2S1_LRCK (MTK_PIN_NO(138) | 3)
+#define PINMUX_GPIO138__FUNC_DBG_MON_B24 (MTK_PIN_NO(138) | 7)
+
+#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(139) | 1)
+#define PINMUX_GPIO139__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(139) | 2)
+#define PINMUX_GPIO139__FUNC_I2S1_DO (MTK_PIN_NO(139) | 3)
+#define PINMUX_GPIO139__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(139) | 6)
+
+#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_AUD_CLK_MISO (MTK_PIN_NO(140) | 1)
+#define PINMUX_GPIO140__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(140) | 2)
+#define PINMUX_GPIO140__FUNC_I2S0_MCK (MTK_PIN_NO(140) | 3)
+#define PINMUX_GPIO140__FUNC_UFS_UNIPRO_SDA (MTK_PIN_NO(140) | 6)
+
+#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(141) | 1)
+#define PINMUX_GPIO141__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(141) | 2)
+#define PINMUX_GPIO141__FUNC_I2S0_BCK (MTK_PIN_NO(141) | 3)
+
+#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(142) | 1)
+#define PINMUX_GPIO142__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(142) | 2)
+#define PINMUX_GPIO142__FUNC_I2S0_LRCK (MTK_PIN_NO(142) | 3)
+#define PINMUX_GPIO142__FUNC_VOW_DAT_MISO (MTK_PIN_NO(142) | 4)
+#define PINMUX_GPIO142__FUNC_DBG_MON_B25 (MTK_PIN_NO(142) | 7)
+
+#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(143) | 2)
+#define PINMUX_GPIO143__FUNC_I2S0_DI (MTK_PIN_NO(143) | 3)
+#define PINMUX_GPIO143__FUNC_VOW_CLK_MISO (MTK_PIN_NO(143) | 4)
+#define PINMUX_GPIO143__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(143) | 6)
+#define PINMUX_GPIO143__FUNC_DBG_MON_B26 (MTK_PIN_NO(143) | 7)
+
+#define PINMUX_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define PINMUX_GPIO144__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(144) | 1)
+#define PINMUX_GPIO144__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(144) | 2)
+
+#define PINMUX_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0)
+#define PINMUX_GPIO145__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(145) | 1)
+
+#define PINMUX_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0)
+#define PINMUX_GPIO146__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(146) | 1)
+#define PINMUX_GPIO146__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(146) | 2)
+
+#define PINMUX_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0)
+#define PINMUX_GPIO147__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(147) | 1)
+
+#define PINMUX_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0)
+#define PINMUX_GPIO148__FUNC_SRCLKENA0 (MTK_PIN_NO(148) | 1)
+
+#define PINMUX_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0)
+#define PINMUX_GPIO149__FUNC_SRCLKENA1 (MTK_PIN_NO(149) | 1)
+
+#define PINMUX_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0)
+#define PINMUX_GPIO150__FUNC_PWM_A (MTK_PIN_NO(150) | 1)
+#define PINMUX_GPIO150__FUNC_CMFLASH (MTK_PIN_NO(150) | 2)
+#define PINMUX_GPIO150__FUNC_CLKM0 (MTK_PIN_NO(150) | 3)
+#define PINMUX_GPIO150__FUNC_DBG_MON_B30 (MTK_PIN_NO(150) | 7)
+
+#define PINMUX_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0)
+#define PINMUX_GPIO151__FUNC_PWM_B (MTK_PIN_NO(151) | 1)
+#define PINMUX_GPIO151__FUNC_CMVREF0 (MTK_PIN_NO(151) | 2)
+#define PINMUX_GPIO151__FUNC_CLKM1 (MTK_PIN_NO(151) | 3)
+#define PINMUX_GPIO151__FUNC_DBG_MON_B20 (MTK_PIN_NO(151) | 7)
+
+#define PINMUX_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0)
+#define PINMUX_GPIO152__FUNC_PWM_C (MTK_PIN_NO(152) | 1)
+#define PINMUX_GPIO152__FUNC_CMFLASH (MTK_PIN_NO(152) | 2)
+#define PINMUX_GPIO152__FUNC_CLKM2 (MTK_PIN_NO(152) | 3)
+#define PINMUX_GPIO152__FUNC_DBG_MON_B21 (MTK_PIN_NO(152) | 7)
+
+#define PINMUX_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0)
+#define PINMUX_GPIO153__FUNC_PWM_A (MTK_PIN_NO(153) | 1)
+#define PINMUX_GPIO153__FUNC_CMVREF0 (MTK_PIN_NO(153) | 2)
+#define PINMUX_GPIO153__FUNC_CLKM3 (MTK_PIN_NO(153) | 3)
+#define PINMUX_GPIO153__FUNC_DBG_MON_B22 (MTK_PIN_NO(153) | 7)
+
+#define PINMUX_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0)
+#define PINMUX_GPIO154__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(154) | 1)
+#define PINMUX_GPIO154__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(154) | 2)
+#define PINMUX_GPIO154__FUNC_DBG_MON_B18 (MTK_PIN_NO(154) | 7)
+
+#define PINMUX_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0)
+#define PINMUX_GPIO155__FUNC_ANT_SEL0 (MTK_PIN_NO(155) | 1)
+#define PINMUX_GPIO155__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(155) | 2)
+#define PINMUX_GPIO155__FUNC_CMVREF1 (MTK_PIN_NO(155) | 3)
+#define PINMUX_GPIO155__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(155) | 7)
+
+#define PINMUX_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0)
+#define PINMUX_GPIO156__FUNC_ANT_SEL1 (MTK_PIN_NO(156) | 1)
+#define PINMUX_GPIO156__FUNC_SRCLKENAI0 (MTK_PIN_NO(156) | 2)
+#define PINMUX_GPIO156__FUNC_SCL6 (MTK_PIN_NO(156) | 3)
+#define PINMUX_GPIO156__FUNC_KPCOL2 (MTK_PIN_NO(156) | 4)
+#define PINMUX_GPIO156__FUNC_IDDIG (MTK_PIN_NO(156) | 5)
+#define PINMUX_GPIO156__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(156) | 7)
+
+#define PINMUX_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0)
+#define PINMUX_GPIO157__FUNC_ANT_SEL2 (MTK_PIN_NO(157) | 1)
+#define PINMUX_GPIO157__FUNC_SRCLKENAI1 (MTK_PIN_NO(157) | 2)
+#define PINMUX_GPIO157__FUNC_SDA6 (MTK_PIN_NO(157) | 3)
+#define PINMUX_GPIO157__FUNC_KPROW2 (MTK_PIN_NO(157) | 4)
+#define PINMUX_GPIO157__FUNC_USB_DRVVBUS (MTK_PIN_NO(157) | 5)
+#define PINMUX_GPIO157__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(157) | 7)
+
+#define PINMUX_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0)
+#define PINMUX_GPIO158__FUNC_ANT_SEL3 (MTK_PIN_NO(158) | 1)
+
+#define PINMUX_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0)
+#define PINMUX_GPIO159__FUNC_ANT_SEL4 (MTK_PIN_NO(159) | 1)
+
+#define PINMUX_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0)
+#define PINMUX_GPIO160__FUNC_ANT_SEL5 (MTK_PIN_NO(160) | 1)
+
+#define PINMUX_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0)
+#define PINMUX_GPIO161__FUNC_SPI1_A_MI (MTK_PIN_NO(161) | 1)
+#define PINMUX_GPIO161__FUNC_SCP_SPI1_MI (MTK_PIN_NO(161) | 2)
+#define PINMUX_GPIO161__FUNC_IDDIG (MTK_PIN_NO(161) | 3)
+#define PINMUX_GPIO161__FUNC_ANT_SEL6 (MTK_PIN_NO(161) | 4)
+#define PINMUX_GPIO161__FUNC_KPCOL2 (MTK_PIN_NO(161) | 5)
+#define PINMUX_GPIO161__FUNC_PTA_RXD (MTK_PIN_NO(161) | 6)
+#define PINMUX_GPIO161__FUNC_DBG_MON_B19 (MTK_PIN_NO(161) | 7)
+
+#define PINMUX_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0)
+#define PINMUX_GPIO162__FUNC_SPI1_A_CSB (MTK_PIN_NO(162) | 1)
+#define PINMUX_GPIO162__FUNC_SCP_SPI1_CS (MTK_PIN_NO(162) | 2)
+#define PINMUX_GPIO162__FUNC_USB_DRVVBUS (MTK_PIN_NO(162) | 3)
+#define PINMUX_GPIO162__FUNC_ANT_SEL5 (MTK_PIN_NO(162) | 4)
+#define PINMUX_GPIO162__FUNC_KPROW2 (MTK_PIN_NO(162) | 5)
+#define PINMUX_GPIO162__FUNC_PTA_TXD (MTK_PIN_NO(162) | 6)
+
+#define PINMUX_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0)
+#define PINMUX_GPIO163__FUNC_SPI1_A_MO (MTK_PIN_NO(163) | 1)
+#define PINMUX_GPIO163__FUNC_SCP_SPI1_MO (MTK_PIN_NO(163) | 2)
+#define PINMUX_GPIO163__FUNC_SDA1 (MTK_PIN_NO(163) | 3)
+#define PINMUX_GPIO163__FUNC_ANT_SEL4 (MTK_PIN_NO(163) | 4)
+#define PINMUX_GPIO163__FUNC_CMMCLK2 (MTK_PIN_NO(163) | 5)
+#define PINMUX_GPIO163__FUNC_DMIC_CLK (MTK_PIN_NO(163) | 6)
+
+#define PINMUX_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0)
+#define PINMUX_GPIO164__FUNC_SPI1_A_CLK (MTK_PIN_NO(164) | 1)
+#define PINMUX_GPIO164__FUNC_SCP_SPI1_CK (MTK_PIN_NO(164) | 2)
+#define PINMUX_GPIO164__FUNC_SCL1 (MTK_PIN_NO(164) | 3)
+#define PINMUX_GPIO164__FUNC_ANT_SEL3 (MTK_PIN_NO(164) | 4)
+#define PINMUX_GPIO164__FUNC_CMMCLK3 (MTK_PIN_NO(164) | 5)
+#define PINMUX_GPIO164__FUNC_DMIC_DAT (MTK_PIN_NO(164) | 6)
+
+#define PINMUX_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0)
+#define PINMUX_GPIO165__FUNC_PWM_B (MTK_PIN_NO(165) | 1)
+#define PINMUX_GPIO165__FUNC_CMMCLK2 (MTK_PIN_NO(165) | 2)
+#define PINMUX_GPIO165__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(165) | 3)
+#define PINMUX_GPIO165__FUNC_TDM_MCK_2ND (MTK_PIN_NO(165) | 6)
+#define PINMUX_GPIO165__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(165) | 7)
+
+#define PINMUX_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0)
+#define PINMUX_GPIO166__FUNC_ANT_SEL6 (MTK_PIN_NO(166) | 1)
+
+#define PINMUX_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0)
+#define PINMUX_GPIO167__FUNC_RFIC0_BSI_EN (MTK_PIN_NO(167) | 1)
+#define PINMUX_GPIO167__FUNC_SPM_BSI_EN (MTK_PIN_NO(167) | 2)
+
+#define PINMUX_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0)
+#define PINMUX_GPIO168__FUNC_RFIC0_BSI_CK (MTK_PIN_NO(168) | 1)
+#define PINMUX_GPIO168__FUNC_SPM_BSI_CK (MTK_PIN_NO(168) | 2)
+
+#define PINMUX_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0)
+#define PINMUX_GPIO169__FUNC_PWM_C (MTK_PIN_NO(169) | 1)
+#define PINMUX_GPIO169__FUNC_CMMCLK3 (MTK_PIN_NO(169) | 2)
+#define PINMUX_GPIO169__FUNC_CMVREF1 (MTK_PIN_NO(169) | 3)
+#define PINMUX_GPIO169__FUNC_ANT_SEL7 (MTK_PIN_NO(169) | 4)
+#define PINMUX_GPIO169__FUNC_AGPS_SYNC (MTK_PIN_NO(169) | 5)
+#define PINMUX_GPIO169__FUNC_TDM_BCK_2ND (MTK_PIN_NO(169) | 6)
+#define PINMUX_GPIO169__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(169) | 7)
+
+#define PINMUX_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0)
+#define PINMUX_GPIO170__FUNC_I2S1_BCK (MTK_PIN_NO(170) | 1)
+#define PINMUX_GPIO170__FUNC_I2S3_BCK (MTK_PIN_NO(170) | 2)
+#define PINMUX_GPIO170__FUNC_SCL7 (MTK_PIN_NO(170) | 3)
+#define PINMUX_GPIO170__FUNC_I2S5_BCK (MTK_PIN_NO(170) | 4)
+#define PINMUX_GPIO170__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(170) | 5)
+#define PINMUX_GPIO170__FUNC_TDM_LRCK_2ND (MTK_PIN_NO(170) | 6)
+#define PINMUX_GPIO170__FUNC_ANT_SEL3 (MTK_PIN_NO(170) | 7)
+
+#define PINMUX_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0)
+#define PINMUX_GPIO171__FUNC_I2S1_LRCK (MTK_PIN_NO(171) | 1)
+#define PINMUX_GPIO171__FUNC_I2S3_LRCK (MTK_PIN_NO(171) | 2)
+#define PINMUX_GPIO171__FUNC_SDA7 (MTK_PIN_NO(171) | 3)
+#define PINMUX_GPIO171__FUNC_I2S5_LRCK (MTK_PIN_NO(171) | 4)
+#define PINMUX_GPIO171__FUNC_URXD1 (MTK_PIN_NO(171) | 5)
+#define PINMUX_GPIO171__FUNC_TDM_DATA0_2ND (MTK_PIN_NO(171) | 6)
+#define PINMUX_GPIO171__FUNC_ANT_SEL4 (MTK_PIN_NO(171) | 7)
+
+#define PINMUX_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0)
+#define PINMUX_GPIO172__FUNC_I2S1_DO (MTK_PIN_NO(172) | 1)
+#define PINMUX_GPIO172__FUNC_I2S3_DO (MTK_PIN_NO(172) | 2)
+#define PINMUX_GPIO172__FUNC_SCL8 (MTK_PIN_NO(172) | 3)
+#define PINMUX_GPIO172__FUNC_I2S5_DO (MTK_PIN_NO(172) | 4)
+#define PINMUX_GPIO172__FUNC_UTXD1 (MTK_PIN_NO(172) | 5)
+#define PINMUX_GPIO172__FUNC_TDM_DATA1_2ND (MTK_PIN_NO(172) | 6)
+#define PINMUX_GPIO172__FUNC_ANT_SEL5 (MTK_PIN_NO(172) | 7)
+
+#define PINMUX_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0)
+#define PINMUX_GPIO173__FUNC_I2S1_MCK (MTK_PIN_NO(173) | 1)
+#define PINMUX_GPIO173__FUNC_I2S3_MCK (MTK_PIN_NO(173) | 2)
+#define PINMUX_GPIO173__FUNC_SDA8 (MTK_PIN_NO(173) | 3)
+#define PINMUX_GPIO173__FUNC_I2S5_MCK (MTK_PIN_NO(173) | 4)
+#define PINMUX_GPIO173__FUNC_UCTS0 (MTK_PIN_NO(173) | 5)
+#define PINMUX_GPIO173__FUNC_TDM_DATA2_2ND (MTK_PIN_NO(173) | 6)
+#define PINMUX_GPIO173__FUNC_ANT_SEL6 (MTK_PIN_NO(173) | 7)
+
+#define PINMUX_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0)
+#define PINMUX_GPIO174__FUNC_I2S2_DI (MTK_PIN_NO(174) | 1)
+#define PINMUX_GPIO174__FUNC_I2S0_DI (MTK_PIN_NO(174) | 2)
+#define PINMUX_GPIO174__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(174) | 3)
+#define PINMUX_GPIO174__FUNC_I2S2_DI2 (MTK_PIN_NO(174) | 4)
+#define PINMUX_GPIO174__FUNC_URTS0 (MTK_PIN_NO(174) | 5)
+#define PINMUX_GPIO174__FUNC_TDM_DATA3_2ND (MTK_PIN_NO(174) | 6)
+#define PINMUX_GPIO174__FUNC_ANT_SEL7 (MTK_PIN_NO(174) | 7)
+
+#define PINMUX_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0)
+#define PINMUX_GPIO175__FUNC_ANT_SEL7 (MTK_PIN_NO(175) | 1)
+
+#define PINMUX_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0)
+
+#define PINMUX_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0)
+
+#define PINMUX_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0)
+
+#define PINMUX_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0)
+
+#endif /* __MT8183-PINFUNC_H */
diff --git a/arch/arm64/boot/dts/nvidia/Makefile b/arch/arm64/boot/dts/nvidia/Makefile
index 6b8ab5568481..bcd018c3162b 100644
--- a/arch/arm64/boot/dts/nvidia/Makefile
+++ b/arch/arm64/boot/dts/nvidia/Makefile
@@ -3,6 +3,7 @@ dtb-$(CONFIG_ARCH_TEGRA_132_SOC) += tegra132-norrin.dtb
dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p2371-0000.dtb
dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p2371-2180.dtb
dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p2571.dtb
+dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p3450-0000.dtb
dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-smaug.dtb
dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p2894-0050-a08.dtb
dtb-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186-p2771-0000.dtb
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
index 31457f32e4d0..14d7fea82daf 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
@@ -58,6 +58,93 @@
status = "okay";
};
+ padctl@3520000 {
+ status = "disabled";
+
+ avdd-pll-erefeut-supply = <&vdd_1v8_pll>;
+ avdd-usb-supply = <&vdd_3v3_sys>;
+ dvdd-pex-supply = <&vdd_pex>;
+ dvdd-pex-pll-supply = <&vdd_pex>;
+ hvdd-pex-supply = <&vdd_1v8>;
+ hvdd-pex-pll-supply = <&vdd_1v8>;
+ vclamp-usb-supply = <&vdd_1v8>;
+ vddio-hsic-supply = <&gnd>;
+
+ pads {
+ usb2 {
+ status = "okay";
+
+ lanes {
+ usb2-0 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+
+ usb2-1 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+
+ usb2-2 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+ };
+ };
+
+ usb3 {
+ status = "okay";
+
+ lanes {
+ usb3-0 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+
+ usb3-1 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+
+ usb3-2 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+ };
+ };
+ };
+
+ ports {
+ usb2-0 {
+ status = "okay";
+ mode = "otg";
+
+ vbus-supply = <&vdd_usb0>;
+ };
+
+ usb2-1 {
+ status = "okay";
+ mode = "host";
+
+ vbus-supply = <&vdd_usb1>;
+ };
+
+ usb3-0 {
+ nvidia,usb2-companion = <1>;
+ status = "okay";
+ };
+ };
+ };
+
+ usb@3530000 {
+ status = "disabled";
+
+ phys = <&{/padctl@3520000/pads/usb2/lanes/usb2-0}>,
+ <&{/padctl@3520000/pads/usb2/lanes/usb2-1}>,
+ <&{/padctl@3520000/pads/usb3/lanes/usb3-0}>;
+ phy-names = "usb2-0", "usb2-1", "usb3-0";
+ };
+
pcie@10003000 {
status = "okay";
@@ -182,5 +269,33 @@
vin-supply = <&vdd_5v0_sys>;
};
+
+ vdd_usb0: regulator@102 {
+ compatible = "regulator-fixed";
+ reg = <102>;
+
+ regulator-name = "VDD_USB0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+
+ gpio = <&gpio TEGRA_MAIN_GPIO(L, 4) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ vin-supply = <&vdd_5v0_sys>;
+ };
+
+ vdd_usb1: regulator@103 {
+ compatible = "regulator-fixed";
+ reg = <103>;
+
+ regulator-name = "VDD_USB1";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+
+ gpio = <&gpio TEGRA_MAIN_GPIO(L, 5) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ vin-supply = <&vdd_5v0_sys>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
index 89a2da46efae..64686b033c38 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
@@ -268,43 +268,30 @@
regulator-name = "AVDD_DSI_CSI_1V2";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
- /* XXX */
- regulator-always-on;
- regulator-boot-on;
};
vdd_1v8: sd2 {
regulator-name = "VDD_1V8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- /* XXX */
- regulator-always-on;
- regulator-boot-on;
};
vdd_3v3_sys: sd3 {
regulator-name = "VDD_3V3_SYS";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
- /* XXX */
- regulator-always-on;
- regulator-boot-on;
};
- ldo0 {
+ vdd_1v8_pll: ldo0 {
regulator-name = "VDD_1V8_AP_PLL";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- /* XXX */
- regulator-always-on;
- regulator-boot-on;
};
ldo2 {
regulator-name = "VDDIO_3V3_AOHV";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
- /* XXX */
regulator-always-on;
regulator-boot-on;
};
@@ -331,18 +318,12 @@
regulator-name = "VDD_HDMI_1V05";
regulator-min-microvolt = <1050000>;
regulator-max-microvolt = <1050000>;
- /* XXX */
- regulator-always-on;
- regulator-boot-on;
};
vdd_pex: ldo8 {
regulator-name = "VDD_PEX_1V05";
regulator-min-microvolt = <1050000>;
regulator-max-microvolt = <1050000>;
- /* XXX */
- regulator-always-on;
- regulator-boot-on;
};
};
};
@@ -360,10 +341,21 @@
#address-cells = <1>;
#size-cells = <0>;
- vdd_5v0_sys: regulator@0 {
+ gnd: regulator@0 {
compatible = "regulator-fixed";
reg = <0>;
+ regulator-name = "GND";
+ regulator-min-microvolt = <0>;
+ regulator-max-microvolt = <0>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_5v0_sys: regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <1>;
+
regulator-name = "VDD_5V0_SYS";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
@@ -371,18 +363,14 @@
regulator-boot-on;
};
- vdd_1v8_ap: regulator@1 {
+ vdd_1v8_ap: regulator@2 {
compatible = "regulator-fixed";
- reg = <1>;
+ reg = <2>;
regulator-name = "VDD_1V8_AP";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- /* XXX */
- regulator-always-on;
- regulator-boot-on;
-
gpio = <&pmic 1 GPIO_ACTIVE_HIGH>;
enable-active-high;
diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
index 97aeb946ed5e..426ac0bdf6a6 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
@@ -60,6 +60,7 @@
clock-names = "master_bus", "slave_bus", "rx", "tx", "ptp_ref";
resets = <&bpmp TEGRA186_RESET_EQOS>;
reset-names = "eqos";
+ iommus = <&smmu TEGRA186_SID_EQOS>;
status = "disabled";
snps,write-requests = <1>;
@@ -317,10 +318,11 @@
nvidia,pad-autocal-pull-down-offset-1v8-timeout = <0x0a>;
nvidia,pad-autocal-pull-up-offset-3v3-timeout = <0x0a>;
nvidia,pad-autocal-pull-down-offset-3v3-timeout = <0x0a>;
- nvidia,default-tap = <0x5>;
- nvidia,default-trim = <0x9>;
+ nvidia,default-tap = <0x9>;
+ nvidia,default-trim = <0x5>;
nvidia,dqs-trim = <63>;
mmc-hs400-1_8v;
+ supports-cqe;
status = "disabled";
};
@@ -337,9 +339,145 @@
<&bpmp TEGRA186_RESET_HDA2CODEC_2X>;
reset-names = "hda", "hda2hdmi", "hda2codec_2x";
power-domains = <&bpmp TEGRA186_POWER_DOMAIN_DISP>;
+ iommus = <&smmu TEGRA186_SID_HDA>;
status = "disabled";
};
+ padctl: padctl@3520000 {
+ compatible = "nvidia,tegra186-xusb-padctl";
+ reg = <0x0 0x03520000 0x0 0x1000>,
+ <0x0 0x03540000 0x0 0x1000>;
+ reg-names = "padctl", "ao";
+
+ resets = <&bpmp TEGRA186_RESET_XUSB_PADCTL>;
+ reset-names = "padctl";
+
+ status = "disabled";
+
+ pads {
+ usb2 {
+ clocks = <&bpmp TEGRA186_CLK_USB2_TRK>;
+ clock-names = "trk";
+ status = "disabled";
+
+ lanes {
+ usb2-0 {
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+
+ usb2-1 {
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+
+ usb2-2 {
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+ };
+ };
+
+ hsic {
+ clocks = <&bpmp TEGRA186_CLK_HSIC_TRK>;
+ clock-names = "trk";
+ status = "disabled";
+
+ lanes {
+ hsic-0 {
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+ };
+ };
+
+ usb3 {
+ status = "disabled";
+
+ lanes {
+ usb3-0 {
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+
+ usb3-1 {
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+
+ usb3-2 {
+ status = "disabled";
+ #phy-cells = <0>;
+ };
+ };
+ };
+ };
+
+ ports {
+ usb2-0 {
+ status = "disabled";
+ };
+
+ usb2-1 {
+ status = "disabled";
+ };
+
+ usb2-2 {
+ status = "disabled";
+ };
+
+ hsic-0 {
+ status = "disabled";
+ };
+
+ usb3-0 {
+ status = "disabled";
+ };
+
+ usb3-1 {
+ status = "disabled";
+ };
+
+ usb3-2 {
+ status = "disabled";
+ };
+ };
+ };
+
+ usb@3530000 {
+ compatible = "nvidia,tegra186-xusb";
+ reg = <0x0 0x03530000 0x0 0x8000>,
+ <0x0 0x03538000 0x0 0x1000>;
+ reg-names = "hcd", "fpci";
+
+ interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&bpmp TEGRA186_CLK_XUSB_HOST>,
+ <&bpmp TEGRA186_CLK_XUSB_FALCON>,
+ <&bpmp TEGRA186_CLK_XUSB_SS>,
+ <&bpmp TEGRA186_CLK_XUSB_CORE_SS>,
+ <&bpmp TEGRA186_CLK_CLK_M>,
+ <&bpmp TEGRA186_CLK_XUSB_FS>,
+ <&bpmp TEGRA186_CLK_PLLU>,
+ <&bpmp TEGRA186_CLK_CLK_M>,
+ <&bpmp TEGRA186_CLK_PLLE>;
+ clock-names = "xusb_host", "xusb_falcon_src", "xusb_ss",
+ "xusb_ss_src", "xusb_hs_src", "xusb_fs_src",
+ "pll_u_480m", "clk_m", "pll_e";
+
+ power-domains = <&bpmp TEGRA186_POWER_DOMAIN_XUSBC>,
+ <&bpmp TEGRA186_POWER_DOMAIN_XUSBA>;
+ power-domain-names = "xusb_host", "xusb_ss";
+ nvidia,xusb-padctl = <&padctl>;
+
+ status = "disabled";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
fuse@3820000 {
compatible = "nvidia,tegra186-efuse";
reg = <0x0 0x03820000 0x0 0x10000>;
@@ -535,6 +673,10 @@
<&bpmp TEGRA186_RESET_PCIEXCLK>;
reset-names = "afi", "pex", "pcie_x";
+ iommus = <&smmu TEGRA186_SID_AFI>;
+ iommu-map = <0x0 &smmu TEGRA186_SID_AFI 0x1000>;
+ iommu-map-mask = <0x0>;
+
status = "disabled";
pci@1,0 {
@@ -1022,6 +1164,7 @@
bpmp: bpmp {
compatible = "nvidia,tegra186-bpmp";
+ iommus = <&smmu TEGRA186_SID_BPMP>;
mboxes = <&hsp_top0 TEGRA_HSP_MBOX_TYPE_DB
TEGRA_HSP_DB_MASTER_BPMP>;
shmem = <&cpu_bpmp_tx &cpu_bpmp_rx>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
index 246c1ebbd055..0fd5bd29fbf9 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
@@ -256,6 +256,7 @@
interrupt-parent = <&gpio>;
interrupts = <TEGRA194_MAIN_GPIO(H, 2)
IRQ_TYPE_LEVEL_LOW>;
+ vcc-supply = <&vdd_1v8ls>;
#thermal-sensor-cells = <1>;
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
index b62e96945846..73801b48d1d8 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
@@ -57,8 +57,6 @@
pwms = <&pwm4 0 45334>;
cooling-levels = <0 64 128 255>;
- cooling-min-state = <0>;
- cooling-max-state = <3>;
#cooling-cells = <2>;
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
index 053458a5db55..4dcd0d36189a 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
@@ -305,6 +305,12 @@
cpu@3 {
enable-method = "psci";
};
+
+ idle-states {
+ cpu-sleep {
+ status = "okay";
+ };
+ };
};
psci {
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts b/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
index 9fad0d27278e..5a57396b5948 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts
@@ -99,4 +99,16 @@
pinctrl-0 = <&dvfs_pwm_active_state>;
pinctrl-1 = <&dvfs_pwm_inactive_state>;
};
+
+ aconnect@702c0000 {
+ status = "okay";
+
+ dma@702e2000 {
+ status = "okay";
+ };
+
+ agic@702f9000 {
+ status = "okay";
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
index 95e890d8a119..a7dc319214a4 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
@@ -1352,6 +1352,11 @@
padctl@7009f000 {
status = "okay";
+ avdd-pll-utmip-supply = <&vdd_1v8>;
+ avdd-pll-uerefe-supply = <&avdd_1v05_pll>;
+ dvdd-pex-pll-supply = <&vdd_pex_1v05>;
+ hvdd-pex-pll-e-supply = <&vdd_1v8>;
+
pads {
usb2 {
status = "okay";
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2894.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2894.dtsi
index 3ddf173ccc18..88a4b9333d84 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-p2894.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2894.dtsi
@@ -1629,6 +1629,12 @@
cpu@3 {
enable-method = "psci";
};
+
+ idle-states {
+ cpu-sleep {
+ status = "okay";
+ };
+ };
};
psci {
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts b/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
new file mode 100644
index 000000000000..5d0181908f45
--- /dev/null
+++ b/arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
@@ -0,0 +1,650 @@
+// SPDX-License-Identifier: GPL-2.0
+/dts-v1/;
+
+#include <dt-bindings/input/gpio-keys.h>
+#include <dt-bindings/input/linux-event-codes.h>
+#include <dt-bindings/mfd/max77620.h>
+
+#include "tegra210.dtsi"
+
+/ {
+ model = "NVIDIA Jetson Nano Developer Kit";
+ compatible = "nvidia,p3450-0000", "nvidia,tegra210";
+
+ aliases {
+ ethernet = "/pcie@1003000/pci@2,0/ethernet@0,0";
+ rtc0 = "/i2c@7000d000/pmic@3c";
+ rtc1 = "/rtc@7000e000";
+ serial0 = &uarta;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0x1 0x0>;
+ };
+
+ pcie@1003000 {
+ status = "okay";
+
+ avdd-pll-uerefe-supply = <&vdd_pex_1v05>;
+ hvddio-pex-supply = <&vdd_1v8>;
+ dvddio-pex-supply = <&vdd_pex_1v05>;
+ dvdd-pex-pll-supply = <&vdd_pex_1v05>;
+ hvdd-pex-pll-e-supply = <&vdd_1v8>;
+ vddio-pex-ctl-supply = <&vdd_1v8>;
+
+ pci@1,0 {
+ phys = <&{/padctl@7009f000/pads/pcie/lanes/pcie-1}>,
+ <&{/padctl@7009f000/pads/pcie/lanes/pcie-2}>,
+ <&{/padctl@7009f000/pads/pcie/lanes/pcie-3}>,
+ <&{/padctl@7009f000/pads/pcie/lanes/pcie-4}>;
+ phy-names = "pcie-0", "pcie-1", "pcie-2", "pcie-3";
+ nvidia,num-lanes = <4>;
+ status = "okay";
+ };
+
+ pci@2,0 {
+ phys = <&{/padctl@7009f000/pads/pcie/lanes/pcie-0}>;
+ phy-names = "pcie-0";
+ status = "okay";
+
+ ethernet@0,0 {
+ reg = <0x000000 0 0 0 0>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ };
+ };
+
+ host1x@50000000 {
+ dpaux@54040000 {
+ status = "okay";
+ };
+
+ sor@54580000 {
+ status = "okay";
+
+ avdd-io-supply = <&avdd_1v05>;
+ vdd-pll-supply = <&vdd_1v8>;
+ hdmi-supply = <&vdd_hdmi>;
+
+ nvidia,ddc-i2c-bus = <&hdmi_ddc>;
+ nvidia,hpd-gpio = <&gpio TEGRA_GPIO(CC, 1)
+ GPIO_ACTIVE_LOW>;
+ nvidia,xbar-cfg = <0 1 2 3 4>;
+ };
+ };
+
+ gpu@57000000 {
+ vdd-supply = <&vdd_gpu>;
+ status = "okay";
+ };
+
+ /* debug port */
+ serial@70006000 {
+ status = "okay";
+ };
+
+ hdmi_ddc: i2c@7000c700 {
+ status = "okay";
+ clock-frequency = <100000>;
+ };
+
+ i2c@7000d000 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ pmic: pmic@3c {
+ compatible = "maxim,max77620";
+ reg = <0x3c>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+
+ #interrupt-cells = <2>;
+ interrupt-controller;
+
+ #gpio-cells = <2>;
+ gpio-controller;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&max77620_default>;
+
+ max77620_default: pinmux {
+ gpio0 {
+ pins = "gpio0";
+ function = "gpio";
+ };
+
+ gpio1 {
+ pins = "gpio1";
+ function = "fps-out";
+ drive-push-pull = <1>;
+ maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
+ maxim,active-fps-power-up-slot = <0>;
+ maxim,active-fps-power-down-slot = <7>;
+ };
+
+ gpio2 {
+ pins = "gpio2";
+ function = "fps-out";
+ drive-open-drain = <1>;
+ maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+ maxim,active-fps-power-up-slot = <0>;
+ maxim,active-fps-power-down-slot = <7>;
+ };
+
+ gpio3 {
+ pins = "gpio3";
+ function = "fps-out";
+ drive-open-drain = <1>;
+ maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+ maxim,active-fps-power-up-slot = <4>;
+ maxim,active-fps-power-down-slot = <3>;
+ };
+
+ gpio4 {
+ pins = "gpio4";
+ function = "32k-out1";
+ };
+
+ gpio5_6_7 {
+ pins = "gpio5", "gpio6", "gpio7";
+ function = "gpio";
+ drive-push-pull = <1>;
+ };
+ };
+
+ fps {
+ fps0 {
+ maxim,fps-event-source = <MAX77620_FPS_EVENT_SRC_EN0>;
+ maxim,suspend-fps-time-period-us = <5120>;
+ };
+
+ fps1 {
+ maxim,fps-event-source = <MAX77620_FPS_EVENT_SRC_EN1>;
+ maxim,suspend-fps-time-period-us = <5120>;
+ };
+
+ fps2 {
+ maxim,fps-event-source = <MAX77620_FPS_EVENT_SRC_EN0>;
+ };
+ };
+
+ regulators {
+ in-ldo0-1-supply = <&vdd_pre>;
+ in-ldo2-supply = <&vdd_3v3_sys>;
+ in-ldo3-5-supply = <&vdd_1v8>;
+ in-ldo4-6-supply = <&vdd_5v0_sys>;
+ in-ldo7-8-supply = <&vdd_pre>;
+ in-sd0-supply = <&vdd_5v0_sys>;
+ in-sd1-supply = <&vdd_5v0_sys>;
+ in-sd2-supply = <&vdd_5v0_sys>;
+ in-sd3-supply = <&vdd_5v0_sys>;
+
+ vdd_soc: sd0 {
+ regulator-name = "VDD_SOC";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1170000>;
+ regulator-enable-ramp-delay = <146>;
+ regulator-disable-ramp-delay = <4080>;
+ regulator-ramp-delay = <27500>;
+ regulator-ramp-delay-scale = <300>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_1>;
+ maxim,active-fps-power-up-slot = <1>;
+ maxim,active-fps-power-down-slot = <6>;
+ };
+
+ vdd_ddr: sd1 {
+ regulator-name = "VDD_DDR_1V1_PMIC";
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+ regulator-enable-ramp-delay = <176>;
+ regulator-disable-ramp-delay = <145800>;
+ regulator-ramp-delay = <27500>;
+ regulator-ramp-delay-scale = <300>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+ maxim,active-fps-power-up-slot = <5>;
+ maxim,active-fps-power-down-slot = <2>;
+ };
+
+ vdd_pre: sd2 {
+ regulator-name = "VDD_PRE_REG_1V35";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-enable-ramp-delay = <176>;
+ regulator-disable-ramp-delay = <32000>;
+ regulator-ramp-delay = <27500>;
+ regulator-ramp-delay-scale = <350>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_1>;
+ maxim,active-fps-power-up-slot = <2>;
+ maxim,active-fps-power-down-slot = <5>;
+ };
+
+ vdd_1v8: sd3 {
+ regulator-name = "VDD_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-enable-ramp-delay = <242>;
+ regulator-disable-ramp-delay = <118000>;
+ regulator-ramp-delay = <27500>;
+ regulator-ramp-delay-scale = <360>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+ maxim,active-fps-power-up-slot = <3>;
+ maxim,active-fps-power-down-slot = <4>;
+ };
+
+ vdd_sys_1v2: ldo0 {
+ regulator-name = "AVDD_SYS_1V2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-enable-ramp-delay = <26>;
+ regulator-disable-ramp-delay = <626>;
+ regulator-ramp-delay = <100000>;
+ regulator-ramp-delay-scale = <200>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
+ maxim,active-fps-power-up-slot = <0>;
+ maxim,active-fps-power-down-slot = <7>;
+ };
+
+ vdd_pex_1v05: ldo1 {
+ regulator-name = "VDD_PEX_1V05";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-enable-ramp-delay = <22>;
+ regulator-disable-ramp-delay = <650>;
+ regulator-ramp-delay = <100000>;
+ regulator-ramp-delay-scale = <200>;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
+ maxim,active-fps-power-up-slot = <0>;
+ maxim,active-fps-power-down-slot = <7>;
+ };
+
+ vddio_sdmmc: ldo2 {
+ regulator-name = "VDDIO_SDMMC";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <62>;
+ regulator-disable-ramp-delay = <650>;
+ regulator-ramp-delay = <100000>;
+ regulator-ramp-delay-scale = <200>;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
+ maxim,active-fps-power-up-slot = <0>;
+ maxim,active-fps-power-down-slot = <7>;
+ };
+
+ ldo3 {
+ status = "disabled";
+ };
+
+ vdd_rtc: ldo4 {
+ regulator-name = "VDD_RTC";
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-enable-ramp-delay = <22>;
+ regulator-disable-ramp-delay = <610>;
+ regulator-ramp-delay = <100000>;
+ regulator-ramp-delay-scale = <200>;
+ regulator-disable-active-discharge;
+ regulator-always-on;
+ regulator-boot-on;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
+ maxim,active-fps-power-up-slot = <1>;
+ maxim,active-fps-power-down-slot = <6>;
+ };
+
+ ldo5 {
+ status = "disabled";
+ };
+
+ ldo6 {
+ status = "disabled";
+ };
+
+ avdd_1v05_pll: ldo7 {
+ regulator-name = "AVDD_1V05_PLL";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-enable-ramp-delay = <24>;
+ regulator-disable-ramp-delay = <2768>;
+ regulator-ramp-delay = <100000>;
+ regulator-ramp-delay-scale = <200>;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_1>;
+ maxim,active-fps-power-up-slot = <3>;
+ maxim,active-fps-power-down-slot = <4>;
+ };
+
+ avdd_1v05: ldo8 {
+ regulator-name = "AVDD_SATA_HDMI_DP_1V05";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-enable-ramp-delay = <22>;
+ regulator-disable-ramp-delay = <1160>;
+ regulator-ramp-delay = <100000>;
+ regulator-ramp-delay-scale = <200>;
+
+ maxim,active-fps-source = <MAX77620_FPS_SRC_1>;
+ maxim,active-fps-power-up-slot = <6>;
+ maxim,active-fps-power-down-slot = <1>;
+ };
+ };
+ };
+ };
+
+ pmc@7000e400 {
+ nvidia,invert-interrupt;
+ };
+
+ hda@70030000 {
+ nvidia,model = "jetson-nano-hda";
+
+ status = "okay";
+ };
+
+ usb@70090000 {
+ phys = <&{/padctl@7009f000/pads/usb2/lanes/usb2-0}>,
+ <&{/padctl@7009f000/pads/usb2/lanes/usb2-1}>,
+ <&{/padctl@7009f000/pads/usb2/lanes/usb2-2}>,
+ <&{/padctl@7009f000/pads/pcie/lanes/pcie-6}>;
+ phy-names = "usb2-0", "usb2-1", "usb2-2", "usb3-0";
+
+ avdd-usb-supply = <&vdd_3v3_sys>;
+ dvddio-pex-supply = <&vdd_pex_1v05>;
+ hvddio-pex-supply = <&vdd_1v8>;
+ /* these really belong to the XUSB pad controller */
+ avdd-pll-utmip-supply = <&vdd_1v8>;
+ avdd-pll-uerefe-supply = <&vdd_pex_1v05>;
+ dvdd-usb-ss-pll-supply = <&vdd_pex_1v05>;
+ hvdd-usb-ss-pll-e-supply = <&vdd_1v8>;
+
+ status = "okay";
+ };
+
+ padctl@7009f000 {
+ status = "okay";
+
+ avdd-pll-utmip-supply = <&vdd_1v8>;
+ avdd-pll-uerefe-supply = <&vdd_pex_1v05>;
+ dvdd-pex-pll-supply = <&vdd_pex_1v05>;
+ hvdd-pex-pll-e-supply = <&vdd_1v8>;
+
+ pads {
+ usb2 {
+ status = "okay";
+
+ lanes {
+ usb2-0 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+
+ usb2-1 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+
+ usb2-2 {
+ nvidia,function = "xusb";
+ status = "okay";
+ };
+ };
+ };
+
+ pcie {
+ status = "okay";
+
+ lanes {
+ pcie-0 {
+ nvidia,function = "pcie-x1";
+ status = "okay";
+ };
+
+ pcie-1 {
+ nvidia,function = "pcie-x4";
+ status = "okay";
+ };
+
+ pcie-2 {
+ nvidia,function = "pcie-x4";
+ status = "okay";
+ };
+
+ pcie-3 {
+ nvidia,function = "pcie-x4";
+ status = "okay";
+ };
+
+ pcie-4 {
+ nvidia,function = "pcie-x4";
+ status = "okay";
+ };
+
+ pcie-5 {
+ nvidia,function = "usb3-ss";
+ status = "okay";
+ };
+
+ pcie-6 {
+ nvidia,function = "usb3-ss";
+ status = "okay";
+ };
+ };
+ };
+ };
+
+ ports {
+ usb2-0 {
+ status = "okay";
+ mode = "otg";
+ };
+
+ usb2-1 {
+ status = "okay";
+ mode = "host";
+ };
+
+ usb2-2 {
+ status = "okay";
+ mode = "host";
+ };
+
+ usb3-0 {
+ status = "okay";
+ nvidia,usb2-companion = <1>;
+ vbus-supply = <&vdd_hub_3v3>;
+ };
+ };
+ };
+
+ sdhci@700b0000 {
+ status = "okay";
+ bus-width = <4>;
+
+ cd-gpios = <&gpio TEGRA_GPIO(Z, 1) GPIO_ACTIVE_LOW>;
+
+ vqmmc-supply = <&vddio_sdmmc>;
+ vmmc-supply = <&vdd_3v3_sd>;
+ };
+
+ clocks {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ clk32k_in: clock@0 {
+ compatible = "fixed-clock";
+ reg = <0>;
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+ };
+
+ cpus {
+ cpu@0 {
+ enable-method = "psci";
+ };
+
+ cpu@1 {
+ enable-method = "psci";
+ };
+
+ cpu@2 {
+ enable-method = "psci";
+ };
+
+ cpu@3 {
+ enable-method = "psci";
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ power {
+ label = "Power";
+ gpios = <&gpio TEGRA_GPIO(X, 5) GPIO_ACTIVE_LOW>;
+ linux,input-type = <EV_KEY>;
+ linux,code = <KEY_POWER>;
+ debounce-interval = <30>;
+ wakeup-event-action = <EV_ACT_ASSERTED>;
+ wakeup-source;
+ };
+
+ force-recovery {
+ label = "Force Recovery";
+ gpios = <&gpio TEGRA_GPIO(X, 6) GPIO_ACTIVE_LOW>;
+ linux,input-type = <EV_KEY>;
+ linux,code = <BTN_1>;
+ debounce-interval = <30>;
+ };
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ regulators {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vdd_5v0_sys: regulator@0 {
+ compatible = "regulator-fixed";
+ reg = <0>;
+
+ regulator-name = "VDD_5V0_SYS";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_3v3_sys: regulator@1 {
+ compatible = "regulator-fixed";
+ reg = <1>;
+ regulator-name = "VDD_3V3_SYS";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <240>;
+ regulator-disable-ramp-delay = <11340>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ gpio = <&pmic 3 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ vin-supply = <&vdd_5v0_sys>;
+ };
+
+ vdd_3v3_sd: regulator@2 {
+ compatible = "regulator-fixed";
+ reg = <2>;
+
+ regulator-name = "VDD_3V3_SD";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&gpio TEGRA_GPIO(Z, 3) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ vin-supply = <&vdd_3v3_sys>;
+ };
+
+ vdd_hdmi: regulator@3 {
+ compatible = "regulator-fixed";
+ reg = <3>;
+
+ regulator-name = "VDD_HDMI_5V0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+
+ vin-supply = <&vdd_5v0_sys>;
+ };
+
+ vdd_hub_3v3: regulator@4 {
+ compatible = "regulator-fixed";
+ reg = <4>;
+
+ regulator-name = "VDD_HUB_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpio = <&gpio TEGRA_GPIO(A, 6) GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ vin-supply = <&vdd_5v0_sys>;
+ };
+
+ vdd_cpu: regulator@5 {
+ compatible = "regulator-fixed";
+ reg = <5>;
+
+ regulator-name = "VDD_CPU";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ gpio = <&pmic 5 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ vin-supply = <&vdd_5v0_sys>;
+ };
+
+ vdd_gpu: regulator@6 {
+ compatible = "regulator-fixed";
+ reg = <6>;
+
+ regulator-name = "VDD_GPU";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-enable-ramp-delay = <250>;
+
+ gpio = <&pmic 6 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+
+ vin-supply = <&vdd_5v0_sys>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
index a4b8f668a6d4..72c7a04ac1df 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
@@ -1654,6 +1654,11 @@
padctl@7009f000 {
status = "okay";
+ avdd-pll-utmip-supply = <&pp1800>;
+ avdd-pll-uerefe-supply = <&pp1050_avdd>;
+ dvdd-pex-pll-supply = <&avddio_1v05>;
+ hvdd-pex-pll-e-supply = <&pp1800>;
+
pads {
usb2 {
status = "okay";
@@ -1751,6 +1756,13 @@
cpu@3 {
enable-method = "psci";
};
+
+ idle-states {
+ cpu-sleep {
+ arm,psci-suspend-param = <0x00010007>;
+ status = "okay";
+ };
+ };
};
gpio-keys {
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index 6574396d2257..a550c0a4d572 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -384,14 +384,22 @@
};
timer@60005000 {
- compatible = "nvidia,tegra210-timer", "nvidia,tegra20-timer";
+ compatible = "nvidia,tegra210-timer";
reg = <0x0 0x60005000 0x0 0x400>;
- interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&tegra_car TEGRA210_CLK_TIMER>;
clock-names = "timer";
};
@@ -1363,24 +1371,51 @@
<&dfll>;
clock-names = "cpu_g", "pll_x", "pll_p", "dfll";
clock-latency = <300000>;
+ cpu-idle-states = <&CPU_SLEEP>;
+ next-level-cache = <&L2>;
};
cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <1>;
+ cpu-idle-states = <&CPU_SLEEP>;
+ next-level-cache = <&L2>;
};
cpu@2 {
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <2>;
+ cpu-idle-states = <&CPU_SLEEP>;
+ next-level-cache = <&L2>;
};
cpu@3 {
device_type = "cpu";
compatible = "arm,cortex-a57";
reg = <3>;
+ cpu-idle-states = <&CPU_SLEEP>;
+ next-level-cache = <&L2>;
+ };
+
+ idle-states {
+ entry-method = "psci";
+
+ CPU_SLEEP: cpu-sleep {
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x40000007>;
+ entry-latency-us = <100>;
+ exit-latency-us = <30>;
+ min-residency-us = <1000>;
+ wakeup-latency-us = <130>;
+ idle-state-name = "cpu-sleep";
+ status = "disabled";
+ };
+ };
+
+ L2: l2-cache {
+ compatible = "cache";
};
};
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c-pins.dtsi
index 6a573875d45a..1c0d06f59d00 100644
--- a/arch/arm64/boot/dts/qcom/apq8096-db820c-pins.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8096-db820c-pins.dtsi
@@ -62,4 +62,56 @@
bias-disable;
};
};
+
+ hdmi_hpd_active: hdmi_hpd_active {
+ mux {
+ pins = "gpio34";
+ function = "hdmi_hot";
+ };
+
+ config {
+ pins = "gpio34";
+ bias-pull-down;
+ drive-strength = <16>;
+ };
+ };
+
+ hdmi_hpd_suspend: hdmi_hpd_suspend {
+ mux {
+ pins = "gpio34";
+ function = "hdmi_hot";
+ };
+
+ config {
+ pins = "gpio34";
+ bias-pull-down;
+ drive-strength = <2>;
+ };
+ };
+
+ hdmi_ddc_active: hdmi_ddc_active {
+ mux {
+ pins = "gpio32", "gpio33";
+ function = "hdmi_ddc";
+ };
+
+ config {
+ pins = "gpio32", "gpio33";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ hdmi_ddc_suspend: hdmi_ddc_suspend {
+ mux {
+ pins = "gpio32", "gpio33";
+ function = "hdmi_ddc";
+ };
+
+ config {
+ pins = "gpio32", "gpio33";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi
index a6ad3d7fe655..31a3e3311ad5 100644
--- a/arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8096-db820c-pmic-pins.dtsi
@@ -36,6 +36,14 @@
};
};
+ audio_mclk: clk_div1 {
+ pinconf {
+ pins = "gpio15";
+ function = "func1";
+ power-source = <PM8994_GPIO_S4>; // 1.8V
+ };
+ };
+
volume_up_gpio: pm8996_gpio2 {
pinconf {
pins = "gpio2";
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
index 6d50449fbcdf..943f69912074 100644
--- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
@@ -18,6 +18,8 @@
#include "apq8096-db820c-pmic-pins.dtsi"
#include <dt-bindings/input/input.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/sound/qcom,q6afe.h>
+#include <dt-bindings/sound/qcom,q6asm.h>
/*
* GPIO name legend: proper name = the GPIO line is used as GPIO
@@ -63,6 +65,7 @@
};
clocks {
+ compatible = "simple-bus";
divclk4: divclk4 {
compatible = "fixed-clock";
#clock-cells = <0>;
@@ -72,6 +75,15 @@
pinctrl-names = "default";
pinctrl-0 = <&divclk4_pin_a>;
};
+
+ div1_mclk: divclk1 {
+ compatible = "gpio-gate-clock";
+ pinctrl-0 = <&audio_mclk>;
+ pinctrl-names = "default";
+ clocks = <&rpmcc RPM_SMD_DIV_CLK1>;
+ #clock-cells = <0>;
+ enable-gpios = <&pm8994_gpios 15 0>;
+ };
};
soc {
@@ -452,6 +464,43 @@
perst-gpio = <&msmgpio 114 GPIO_ACTIVE_LOW>;
};
};
+
+ slim_msm: slim@91c0000 {
+ ngd@1 {
+ wcd9335: codec@1{
+ clock-names = "mclk", "slimbus";
+ clocks = <&div1_mclk>,
+ <&rpmcc RPM_SMD_BB_CLK1>;
+ };
+ };
+ };
+
+ mdss@900000 {
+ status = "okay";
+
+ mdp@901000 {
+ status = "okay";
+ };
+
+ hdmi-phy@9a0600 {
+ status = "okay";
+
+ vddio-supply = <&pm8994_l12>;
+ vcca-supply = <&pm8994_l28>;
+ #phy-cells = <0>;
+ };
+
+ hdmi-tx@9a0000 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&hdmi_hpd_active &hdmi_ddc_active>;
+ pinctrl-1 = <&hdmi_hpd_suspend &hdmi_ddc_suspend>;
+
+ core-vdda-supply = <&pm8994_l12>;
+ core-vcc-supply = <&pm8994_s4>;
+ };
+ };
};
@@ -639,3 +688,75 @@
};
};
};
+
+&sound {
+ compatible = "qcom,apq8096-sndcard";
+ model = "DB820c";
+ audio-routing = "RX_BIAS", "MCLK";
+
+ mm1-dai-link {
+ link-name = "MultiMedia1";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ mm2-dai-link {
+ link-name = "MultiMedia2";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA2>;
+ };
+ };
+
+ mm3-dai-link {
+ link-name = "MultiMedia3";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA3>;
+ };
+ };
+
+ hdmi-dai-link {
+ link-name = "HDMI";
+ cpu {
+ sound-dai = <&q6afedai HDMI_RX>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+
+ codec {
+ sound-dai = <&hdmi 0>;
+ };
+ };
+
+ slim-dai-link {
+ link-name = "SLIM Playback";
+ cpu {
+ sound-dai = <&q6afedai SLIMBUS_6_RX>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+
+ codec {
+ sound-dai = <&wcd9335 6>;
+ };
+ };
+
+ slimcap-dai-link {
+ link-name = "SLIM Capture";
+ cpu {
+ sound-dai = <&q6afedai SLIMBUS_0_TX>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+
+ codec {
+ sound-dai = <&wcd9335 1>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index 0803ca8c02da..423dda996b5d 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -180,19 +180,19 @@
};
thermal-zones {
- cpu-thermal0 {
+ cpu0_1-thermal {
polling-delay-passive = <250>;
polling-delay = <1000>;
thermal-sensors = <&tsens 4>;
trips {
- cpu_alert0: trip0 {
+ cpu0_1_alert0: trip-point@0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit0: trip1 {
+ cpu0_1_crit: cpu_crit {
temperature = <110000>;
hysteresis = <2000>;
type = "critical";
@@ -201,7 +201,7 @@
cooling-maps {
map0 {
- trip = <&cpu_alert0>;
+ trip = <&cpu0_1_alert0>;
cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
<&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
<&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
@@ -210,19 +210,19 @@
};
};
- cpu-thermal1 {
+ cpu2_3-thermal {
polling-delay-passive = <250>;
polling-delay = <1000>;
thermal-sensors = <&tsens 3>;
trips {
- cpu_alert1: trip0 {
+ cpu2_3_alert0: trip-point@0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit1: trip1 {
+ cpu2_3_crit: cpu_crit {
temperature = <110000>;
hysteresis = <2000>;
type = "critical";
@@ -231,7 +231,7 @@
cooling-maps {
map0 {
- trip = <&cpu_alert1>;
+ trip = <&cpu2_3_alert0>;
cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
<&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
<&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
@@ -247,12 +247,12 @@
thermal-sensors = <&tsens 2>;
trips {
- gpu_alert: trip0 {
+ gpu_alert0: trip-point@0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
};
- gpu_crit: trip1 {
+ gpu_crit: gpu_crit {
temperature = <95000>;
hysteresis = <2000>;
type = "critical";
@@ -267,18 +267,27 @@
thermal-sensors = <&tsens 1>;
trips {
- cam_alert: trip0 {
+ cam_alert0: trip-point@0 {
temperature = <75000>;
hysteresis = <2000>;
- type = "passive";
+ type = "hot";
};
- cam_crit: trip1 {
- temperature = <95000>;
+ };
+ };
+
+ modem-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 0>;
+
+ trips {
+ modem_alert0: trip-point@0 {
+ temperature = <85000>;
hysteresis = <2000>;
- type = "critical";
+ type = "hot";
};
};
-
};
};
@@ -1015,8 +1024,9 @@
#clock-cells = <1>;
#phy-cells = <0>;
- clocks = <&gcc GCC_MDSS_AHB_CLK>;
- clock-names = "iface";
+ clocks = <&gcc GCC_MDSS_AHB_CLK>,
+ <&xo_board>;
+ clock-names = "iface", "ref";
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8996-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8996-pins.dtsi
index 131878db9852..fba2229b6236 100644
--- a/arch/arm64/boot/dts/qcom/msm8996-pins.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996-pins.dtsi
@@ -13,6 +13,49 @@
&msmgpio {
+ wcd9xxx_intr {
+ wcd_intr_default: wcd_intr_default{
+ mux {
+ pins = "gpio54";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio54";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ input-enable;
+ };
+ };
+ };
+
+ cdc_reset_ctrl {
+ cdc_reset_sleep: cdc_reset_sleep {
+ mux {
+ pins = "gpio64";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio64";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+ };
+ cdc_reset_active:cdc_reset_active {
+ mux {
+ pins = "gpio64";
+ function = "gpio";
+ };
+ config {
+ pins = "gpio64";
+ drive-strength = <16>;
+ bias-pull-down;
+ output-high;
+ };
+ };
+ };
+
blsp1_spi0_default: blsp1_spi0_default {
pinmux {
function = "blsp_spi1";
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index c761269caf80..c4e7fde9d88e 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -14,6 +14,7 @@
#include <dt-bindings/clock/qcom,gcc-msm8996.h>
#include <dt-bindings/clock/qcom,mmcc-msm8996.h>
#include <dt-bindings/clock/qcom,rpmcc.h>
+#include <dt-bindings/soc/qcom,apr.h>
/ {
interrupt-parent = <&intc>;
@@ -84,6 +85,12 @@
qcom,client-id = <1>;
qcom,vmid = <15>;
};
+
+ zap_shader_region: gpu@8f200000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0x90b00000 0x0 0xa00000>;
+ no-map;
+ };
};
cpus {
@@ -154,20 +161,20 @@
};
thermal-zones {
- cpu-thermal0 {
+ cpu0-thermal {
polling-delay-passive = <250>;
polling-delay = <1000>;
thermal-sensors = <&tsens0 3>;
trips {
- cpu_alert0: trip0 {
+ cpu0_alert0: trip-point@0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit0: trip1 {
+ cpu0_crit: cpu_crit {
temperature = <110000>;
hysteresis = <2000>;
type = "critical";
@@ -175,20 +182,20 @@
};
};
- cpu-thermal1 {
+ cpu1-thermal {
polling-delay-passive = <250>;
polling-delay = <1000>;
thermal-sensors = <&tsens0 5>;
trips {
- cpu_alert1: trip0 {
+ cpu1_alert0: trip-point@0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit1: trip1 {
+ cpu1_crit: cpu_crit {
temperature = <110000>;
hysteresis = <2000>;
type = "critical";
@@ -196,20 +203,20 @@
};
};
- cpu-thermal2 {
+ cpu2-thermal {
polling-delay-passive = <250>;
polling-delay = <1000>;
thermal-sensors = <&tsens0 8>;
trips {
- cpu_alert2: trip0 {
+ cpu2_alert0: trip-point@0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit2: trip1 {
+ cpu2_crit: cpu_crit {
temperature = <110000>;
hysteresis = <2000>;
type = "critical";
@@ -217,26 +224,176 @@
};
};
- cpu-thermal3 {
+ cpu3-thermal {
polling-delay-passive = <250>;
polling-delay = <1000>;
thermal-sensors = <&tsens0 10>;
trips {
- cpu_alert3: trip0 {
+ cpu3_alert0: trip-point@0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit3: trip1 {
+ cpu3_crit: cpu_crit {
temperature = <110000>;
hysteresis = <2000>;
type = "critical";
};
};
};
+
+ gpu-thermal-top {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 6>;
+
+ trips {
+ gpu1_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ gpu-thermal-bottom {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 7>;
+
+ trips {
+ gpu2_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ m4m-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 1>;
+
+ trips {
+ m4m_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ l3-or-venus-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 2>;
+
+ trips {
+ l3_or_venus_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ cluster0-l2-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 7>;
+
+ trips {
+ cluster0_l2_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ cluster1-l2-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 12>;
+
+ trips {
+ cluster1_l2_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ camera-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 1>;
+
+ trips {
+ camera_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ q6-dsp-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 2>;
+
+ trips {
+ q6_dsp_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ mem-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 3>;
+
+ trips {
+ mem_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ modemtx-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 4>;
+
+ trips {
+ modemtx_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
};
timer {
@@ -796,6 +953,11 @@
reg = <0x24f 0x1>;
bits = <1 4>;
};
+
+ gpu_speed_bin: gpu_speed_bin@133 {
+ reg = <0x133 0x1>;
+ bits = <5 3>;
+ };
};
phy@34000 {
@@ -1138,6 +1300,70 @@
};
};
+ adreno_smmu: arm,smmu@b40000 {
+ compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
+ reg = <0xb40000 0x10000>;
+
+ #global-interrupts = <1>;
+ interrupts = <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+
+ clocks = <&mmcc GPU_AHB_CLK>,
+ <&gcc GCC_MMSS_BIMC_GFX_CLK>;
+ clock-names = "iface", "bus";
+
+ power-domains = <&mmcc GPU_GDSC>;
+
+ status = "disabled";
+ };
+
+ mdp_smmu: arm,smmu@d00000 {
+ compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
+ reg = <0xd00000 0x10000>;
+
+ #global-interrupts = <1>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>;
+ #iommu-cells = <1>;
+ clocks = <&mmcc SMMU_MDP_AHB_CLK>,
+ <&mmcc SMMU_MDP_AXI_CLK>;
+ clock-names = "iface", "bus";
+
+ power-domains = <&mmcc MDSS_GDSC>;
+
+ status = "disabled";
+ };
+
+ lpass_q6_smmu: arm,smmu-lpass_q6@1600000 {
+ compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
+ reg = <0x1600000 0x20000>;
+ #iommu-cells = <1>;
+ power-domains = <&gcc HLOS1_VOTE_LPASS_CORE_GDSC>;
+
+ #global-interrupts = <1>;
+ interrupts = <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 393 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&gcc GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CLK>,
+ <&gcc GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CLK>;
+ clock-names = "iface", "bus";
+ status = "disabled";
+ };
+
agnoc@0 {
power-domains = <&gcc AGGRE0_NOC_GDSC>;
compatible = "simple-pm-bus";
@@ -1303,6 +1529,265 @@
"bus_slave";
};
};
+
+ slimbam:dma@9184000
+ {
+ compatible = "qcom,bam-v1.7.0";
+ qcom,controlled-remotely;
+ reg = <0x9184000 0x32000>;
+ num-channels = <31>;
+ interrupts = <0 164 IRQ_TYPE_LEVEL_HIGH>;
+ #dma-cells = <1>;
+ qcom,ee = <1>;
+ qcom,num-ees = <2>;
+ };
+
+ slim_msm: slim@91c0000 {
+ compatible = "qcom,slim-ngd-v1.5.0";
+ reg = <0x91c0000 0x2C000>;
+ reg-names = "ctrl";
+ interrupts = <0 163 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&slimbam 3>, <&slimbam 4>,
+ <&slimbam 5>, <&slimbam 6>;
+ dma-names = "rx", "tx", "tx2", "rx2";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ngd@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ tasha_ifd: tas-ifd {
+ compatible = "slim217,1a0";
+ reg = <0 0>;
+ };
+
+ wcd9335: codec@1{
+ pinctrl-0 = <&cdc_reset_active &wcd_intr_default>;
+ pinctrl-names = "default";
+
+ compatible = "slim217,1a0";
+ reg = <1 0>;
+
+ interrupt-parent = <&msmgpio>;
+ interrupts = <54 IRQ_TYPE_LEVEL_HIGH>,
+ <53 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "intr1", "intr2";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reset-gpios = <&msmgpio 64 0>;
+
+ slim-ifc-dev = <&tasha_ifd>;
+
+ vdd-buck-supply = <&pm8994_s4>;
+ vdd-buck-sido-supply = <&pm8994_s4>;
+ vdd-tx-supply = <&pm8994_s4>;
+ vdd-rx-supply = <&pm8994_s4>;
+ vdd-io-supply = <&pm8994_s4>;
+
+ #sound-dai-cells = <1>;
+ };
+ };
+ };
+
+ gpu@b00000 {
+ compatible = "qcom,adreno-530.2", "qcom,adreno";
+ #stream-id-cells = <16>;
+
+ reg = <0xb00000 0x3f000>;
+ reg-names = "kgsl_3d0_reg_memory";
+
+ interrupts = <0 300 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&mmcc GPU_GX_GFX3D_CLK>,
+ <&mmcc GPU_AHB_CLK>,
+ <&mmcc GPU_GX_RBBMTIMER_CLK>,
+ <&gcc GCC_BIMC_GFX_CLK>,
+ <&gcc GCC_MMSS_BIMC_GFX_CLK>;
+
+ clock-names = "core",
+ "iface",
+ "rbbmtimer",
+ "mem",
+ "mem_iface";
+
+ power-domains = <&mmcc GPU_GDSC>;
+ iommus = <&adreno_smmu 0>;
+
+ nvmem-cells = <&gpu_speed_bin>;
+ nvmem-cell-names = "speed_bin";
+
+ qcom,gpu-quirk-two-pass-use-wfi;
+ qcom,gpu-quirk-fault-detect-mask;
+
+ operating-points-v2 = <&gpu_opp_table>;
+
+ gpu_opp_table: opp-table {
+ compatible ="operating-points-v2";
+
+ /*
+ * 624Mhz and 560Mhz are only available on speed
+ * bin (1 << 0). All the rest are available on
+ * all bins of the hardware
+ */
+ opp-624000000 {
+ opp-hz = /bits/ 64 <624000000>;
+ opp-supported-hw = <0x01>;
+ };
+ opp-560000000 {
+ opp-hz = /bits/ 64 <560000000>;
+ opp-supported-hw = <0x01>;
+ };
+ opp-510000000 {
+ opp-hz = /bits/ 64 <510000000>;
+ opp-supported-hw = <0xFF>;
+ };
+ opp-401800000 {
+ opp-hz = /bits/ 64 <401800000>;
+ opp-supported-hw = <0xFF>;
+ };
+ opp-315000000 {
+ opp-hz = /bits/ 64 <315000000>;
+ opp-supported-hw = <0xFF>;
+ };
+ opp-214000000 {
+ opp-hz = /bits/ 64 <214000000>;
+ opp-supported-hw = <0xFF>;
+ };
+ opp-133000000 {
+ opp-hz = /bits/ 64 <133000000>;
+ opp-supported-hw = <0xFF>;
+ };
+ };
+
+ zap-shader {
+ memory-region = <&zap_shader_region>;
+ };
+ };
+
+ mdss: mdss@900000 {
+ compatible = "qcom,mdss";
+
+ reg = <0x900000 0x1000>,
+ <0x9b0000 0x1040>,
+ <0x9b8000 0x1040>;
+ reg-names = "mdss_phys",
+ "vbif_phys",
+ "vbif_nrt_phys";
+
+ power-domains = <&mmcc MDSS_GDSC>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ clocks = <&mmcc MDSS_AHB_CLK>;
+ clock-names = "iface_clk";
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ mdp: mdp@901000 {
+ compatible = "qcom,mdp5";
+ reg = <0x901000 0x90000>;
+ reg-names = "mdp_phys";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&mmcc MDSS_AHB_CLK>,
+ <&mmcc MDSS_AXI_CLK>,
+ <&mmcc MDSS_MDP_CLK>,
+ <&mmcc SMMU_MDP_AXI_CLK>,
+ <&mmcc MDSS_VSYNC_CLK>;
+ clock-names = "iface_clk",
+ "bus_clk",
+ "core_clk",
+ "iommu_clk",
+ "vsync_clk";
+
+ iommus = <&mdp_smmu 0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ mdp5_intf3_out: endpoint {
+ remote-endpoint = <&hdmi_in>;
+ };
+ };
+ };
+ };
+
+ hdmi: hdmi-tx@9a0000 {
+ compatible = "qcom,hdmi-tx-8996";
+ reg = <0x009a0000 0x50c>,
+ <0x00070000 0x6158>,
+ <0x009e0000 0xfff>;
+ reg-names = "core_physical",
+ "qfprom_physical",
+ "hdcp_physical";
+
+ interrupt-parent = <&mdss>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+
+ clocks = <&mmcc MDSS_MDP_CLK>,
+ <&mmcc MDSS_AHB_CLK>,
+ <&mmcc MDSS_HDMI_CLK>,
+ <&mmcc MDSS_HDMI_AHB_CLK>,
+ <&mmcc MDSS_EXTPCLK_CLK>;
+ clock-names =
+ "mdp_core_clk",
+ "iface_clk",
+ "core_clk",
+ "alt_iface_clk",
+ "extp_clk";
+
+ phys = <&hdmi_phy>;
+ phy-names = "hdmi_phy";
+ #sound-dai-cells = <1>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ hdmi_in: endpoint {
+ remote-endpoint = <&mdp5_intf3_out>;
+ };
+ };
+ };
+ };
+
+ hdmi_phy: hdmi-phy@9a0600 {
+ #phy-cells = <0>;
+ compatible = "qcom,hdmi-phy-8996";
+ reg = <0x9a0600 0x1c4>,
+ <0x9a0a00 0x124>,
+ <0x9a0c00 0x124>,
+ <0x9a0e00 0x124>,
+ <0x9a1000 0x124>,
+ <0x9a1200 0x0c8>;
+ reg-names = "hdmi_pll",
+ "hdmi_tx_l0",
+ "hdmi_tx_l1",
+ "hdmi_tx_l2",
+ "hdmi_tx_l3",
+ "hdmi_phy";
+
+ clocks = <&mmcc MDSS_AHB_CLK>,
+ <&gcc GCC_HDMI_CLKREF_CLK>;
+ clock-names = "iface_clk",
+ "ref_clk";
+ };
+ };
+ };
+
+ sound: sound {
};
adsp-pil {
@@ -1331,6 +1816,55 @@
mboxes = <&apcs_glb 8>;
qcom,smd-edge = <1>;
qcom,remote-pid = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ apr {
+ power-domains = <&gcc HLOS1_VOTE_LPASS_ADSP_GDSC>;
+ compatible = "qcom,apr-v2";
+ qcom,smd-channels = "apr_audio_svc";
+ reg = <APR_DOMAIN_ADSP>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ q6core {
+ reg = <APR_SVC_ADSP_CORE>;
+ compatible = "qcom,q6core";
+ };
+
+ q6afe: q6afe {
+ compatible = "qcom,q6afe";
+ reg = <APR_SVC_AFE>;
+ q6afedai: dais {
+ compatible = "qcom,q6afe-dais";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #sound-dai-cells = <1>;
+ hdmi@1 {
+ reg = <1>;
+ };
+ };
+ };
+
+ q6asm: q6asm {
+ compatible = "qcom,q6asm";
+ reg = <APR_SVC_ASM>;
+ q6asmdai: dais {
+ compatible = "qcom,q6asm-dais";
+ #sound-dai-cells = <1>;
+ iommus = <&lpass_q6_smmu 1>;
+ };
+ };
+
+ q6adm: q6adm {
+ compatible = "qcom,q6adm";
+ reg = <APR_SVC_ADM>;
+ q6routing: routing {
+ compatible = "qcom,q6adm-routing";
+ #sound-dai-cells = <0>;
+ };
+ };
+ };
+
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
index f0901067b043..f09f3e03f708 100644
--- a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
@@ -15,44 +15,6 @@
stdout-path = "serial0:115200n8";
};
- thermal-zones {
- battery-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
-
- thermal-sensors = <&tsens0 0>;
-
- trips {
- battery_crit: trip0 {
- temperature = <60000>;
- hysteresis = <2000>;
- type = "critical";
- };
- };
- };
-
- skin-thermal {
- polling-delay-passive = <250>;
- polling-delay = <1000>;
-
- thermal-sensors = <&tsens1 5>;
-
- trips {
- skin_alert: trip0 {
- temperature = <44000>;
- hysteresis = <2000>;
- type = "passive";
- };
-
- skip_crit: trip1 {
- temperature = <70000>;
- hysteresis = <2000>;
- type = "critical";
- };
- };
- };
- };
-
vph_pwr: vph-pwr-regulator {
compatible = "regulator-fixed";
regulator-name = "vph_pwr";
@@ -111,6 +73,7 @@
vreg_s4a_1p8: s4 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
+ regulator-allow-set-load;
};
vreg_s5a_2p04: s5 {
regulator-min-microvolt = <1904000>;
@@ -195,6 +158,7 @@
vreg_l20a_2p95: l20 {
regulator-min-microvolt = <2960000>;
regulator-max-microvolt = <2960000>;
+ regulator-allow-set-load;
};
vreg_l21a_2p95: l21 {
regulator-min-microvolt = <2960000>;
@@ -221,6 +185,7 @@
vreg_l26a_1p2: l26 {
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
+ regulator-allow-set-load;
};
vreg_l28_3p0: l28 {
regulator-min-microvolt = <3008000>;
@@ -267,6 +232,25 @@
pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
};
+&ufshc {
+ vcc-supply = <&vreg_l20a_2p95>;
+ vccq-supply = <&vreg_l26a_1p2>;
+ vccq2-supply = <&vreg_s4a_1p8>;
+ vcc-max-microamp = <750000>;
+ vccq-max-microamp = <560000>;
+ vccq2-max-microamp = <750000>;
+};
+
+&ufsphy {
+ vdda-phy-supply = <&vreg_l1a_0p875>;
+ vdda-pll-supply = <&vreg_l2a_1p2>;
+ vddp-ref-clk-supply = <&vreg_l26a_1p2>;
+ vdda-phy-max-microamp = <51400>;
+ vdda-pll-max-microamp = <14600>;
+ vddp-ref-clk-max-microamp = <100>;
+ vddp-ref-clk-always-on;
+};
+
&usb3 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
index 3fd0769fe648..574be78a936e 100644
--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
@@ -78,7 +78,6 @@
compatible = "arm,armv8";
reg = <0x0 0x0>;
enable-method = "psci";
- efficiency = <1024>;
next-level-cache = <&L2_0>;
L2_0: l2-cache {
compatible = "arm,arch-cache";
@@ -97,7 +96,6 @@
compatible = "arm,armv8";
reg = <0x0 0x1>;
enable-method = "psci";
- efficiency = <1024>;
next-level-cache = <&L2_0>;
L1_I_1: l1-icache {
compatible = "arm,arch-cache";
@@ -112,7 +110,6 @@
compatible = "arm,armv8";
reg = <0x0 0x2>;
enable-method = "psci";
- efficiency = <1024>;
next-level-cache = <&L2_0>;
L1_I_2: l1-icache {
compatible = "arm,arch-cache";
@@ -127,7 +124,6 @@
compatible = "arm,armv8";
reg = <0x0 0x3>;
enable-method = "psci";
- efficiency = <1024>;
next-level-cache = <&L2_0>;
L1_I_3: l1-icache {
compatible = "arm,arch-cache";
@@ -142,7 +138,6 @@
compatible = "arm,armv8";
reg = <0x0 0x100>;
enable-method = "psci";
- efficiency = <1536>;
next-level-cache = <&L2_1>;
L2_1: l2-cache {
compatible = "arm,arch-cache";
@@ -161,7 +156,6 @@
compatible = "arm,armv8";
reg = <0x0 0x101>;
enable-method = "psci";
- efficiency = <1536>;
next-level-cache = <&L2_1>;
L1_I_101: l1-icache {
compatible = "arm,arch-cache";
@@ -176,7 +170,6 @@
compatible = "arm,armv8";
reg = <0x0 0x102>;
enable-method = "psci";
- efficiency = <1536>;
next-level-cache = <&L2_1>;
L1_I_102: l1-icache {
compatible = "arm,arch-cache";
@@ -191,7 +184,6 @@
compatible = "arm,armv8";
reg = <0x0 0x103>;
enable-method = "psci";
- efficiency = <1536>;
next-level-cache = <&L2_1>;
L1_I_103: l1-icache {
compatible = "arm,arch-cache";
@@ -346,20 +338,20 @@
};
thermal-zones {
- cpu-thermal0 {
+ cpu0-thermal {
polling-delay-passive = <250>;
polling-delay = <1000>;
- thermal-sensors = <&tsens0 6>;
+ thermal-sensors = <&tsens0 1>;
trips {
- cpu_alert0: trip0 {
+ cpu0_alert0: trip-point@0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit0: trip1 {
+ cpu0_crit: cpu_crit {
temperature = <110000>;
hysteresis = <2000>;
type = "critical";
@@ -367,20 +359,20 @@
};
};
- cpu-thermal1 {
+ cpu1-thermal {
polling-delay-passive = <250>;
polling-delay = <1000>;
- thermal-sensors = <&tsens0 7>;
+ thermal-sensors = <&tsens0 2>;
trips {
- cpu_alert1: trip0 {
+ cpu1_alert0: trip-point@0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit1: trip1 {
+ cpu1_crit: cpu_crit {
temperature = <110000>;
hysteresis = <2000>;
type = "critical";
@@ -388,20 +380,20 @@
};
};
- cpu-thermal2 {
+ cpu2-thermal {
polling-delay-passive = <250>;
polling-delay = <1000>;
- thermal-sensors = <&tsens0 8>;
+ thermal-sensors = <&tsens0 3>;
trips {
- cpu_alert2: trip0 {
+ cpu2_alert0: trip-point@0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit2: trip1 {
+ cpu2_crit: cpu_crit {
temperature = <110000>;
hysteresis = <2000>;
type = "critical";
@@ -409,20 +401,20 @@
};
};
- cpu-thermal3 {
+ cpu3-thermal {
polling-delay-passive = <250>;
polling-delay = <1000>;
- thermal-sensors = <&tsens0 9>;
+ thermal-sensors = <&tsens0 4>;
trips {
- cpu_alert3: trip0 {
+ cpu3_alert0: trip-point@0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit3: trip1 {
+ cpu3_crit: cpu_crit {
temperature = <110000>;
hysteresis = <2000>;
type = "critical";
@@ -430,20 +422,20 @@
};
};
- cpu-thermal4 {
+ cpu4-thermal {
polling-delay-passive = <250>;
polling-delay = <1000>;
- thermal-sensors = <&tsens0 10>;
+ thermal-sensors = <&tsens0 7>;
trips {
- cpu_alert4: trip0 {
+ cpu4_alert0: trip-point@0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit4: trip1 {
+ cpu4_crit: cpu_crit {
temperature = <110000>;
hysteresis = <2000>;
type = "critical";
@@ -451,20 +443,20 @@
};
};
- cpu-thermal5 {
+ cpu5-thermal {
polling-delay-passive = <250>;
polling-delay = <1000>;
- thermal-sensors = <&tsens0 11>;
+ thermal-sensors = <&tsens0 8>;
trips {
- cpu_alert5: trip0 {
+ cpu5_alert0: trip-point@0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit5: trip1 {
+ cpu5_crit: cpu_crit {
temperature = <110000>;
hysteresis = <2000>;
type = "critical";
@@ -472,20 +464,20 @@
};
};
- cpu-thermal6 {
+ cpu6-thermal {
polling-delay-passive = <250>;
polling-delay = <1000>;
- thermal-sensors = <&tsens1 0>;
+ thermal-sensors = <&tsens0 9>;
trips {
- cpu_alert6: trip0 {
+ cpu6_alert0: trip-point@0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit6: trip1 {
+ cpu6_crit: cpu_crit {
temperature = <110000>;
hysteresis = <2000>;
type = "critical";
@@ -493,20 +485,20 @@
};
};
- cpu-thermal7 {
+ cpu7-thermal {
polling-delay-passive = <250>;
polling-delay = <1000>;
- thermal-sensors = <&tsens1 1>;
+ thermal-sensors = <&tsens0 10>;
trips {
- cpu_alert7: trip0 {
+ cpu7_alert0: trip-point@0 {
temperature = <75000>;
hysteresis = <2000>;
type = "passive";
};
- cpu_crit7: trip1 {
+ cpu7_crit: cpu_crit {
temperature = <110000>;
hysteresis = <2000>;
type = "critical";
@@ -514,11 +506,169 @@
};
};
- gpu-thermal {
+ gpu-thermal-bottom {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 12>;
+
+ trips {
+ gpu1_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ gpu-thermal-top {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 13>;
+
+ trips {
+ gpu2_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ clust0-mhm-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 5>;
+
+ trips {
+ cluster0_mhm_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ clust1-mhm-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 6>;
+
+ trips {
+ cluster1_mhm_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ cluster1-l2-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 11>;
+
+ trips {
+ cluster1_l2_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ modem-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 1>;
+
+ trips {
+ modem_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ mem-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 2>;
+
+ trips {
+ mem_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ wlan-thermal {
polling-delay-passive = <250>;
polling-delay = <1000>;
thermal-sensors = <&tsens1 3>;
+
+ trips {
+ wlan_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ q6-dsp-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 4>;
+
+ trips {
+ q6_dsp_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ camera-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 5>;
+
+ trips {
+ camera_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ multimedia-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 6>;
+
+ trips {
+ multimedia_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
};
};
@@ -590,17 +740,19 @@
cell-index = <0>;
};
- tsens0: thermal@10aa000 {
+ tsens0: thermal@10ab000 {
compatible = "qcom,msm8998-tsens", "qcom,tsens-v2";
- reg = <0x10aa000 0x2000>;
+ reg = <0x10ab000 0x1000>, /* TM */
+ <0x10aa000 0x1000>; /* SROT */
- #qcom,sensors = <12>;
+ #qcom,sensors = <14>;
#thermal-sensor-cells = <1>;
};
- tsens1: thermal@10ad000 {
+ tsens1: thermal@10ae000 {
compatible = "qcom,msm8998-tsens", "qcom,tsens-v2";
- reg = <0x10ad000 0x2000>;
+ reg = <0x10ae000 0x1000>, /* TM */
+ <0x10ad000 0x1000>; /* SROT */
#qcom,sensors = <8>;
#thermal-sensor-cells = <1>;
@@ -889,7 +1041,7 @@
blsp2_i2c5: i2c@c1ba000 {
compatible = "qcom,i2c-qup-v2.2.1";
- reg = <0x0c175000 0x600>;
+ reg = <0x0c1ba000 0x600>;
interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_BLSP2_QUP6_I2C_APPS_CLK>,
@@ -983,6 +1135,75 @@
redistributor-stride = <0x0 0x20000>;
interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
};
+
+ ufshc: ufshc@1da4000 {
+ compatible = "qcom,msm8998-ufshc", "qcom,ufshc", "jedec,ufs-2.0";
+ reg = <0x01da4000 0x2500>;
+ interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&ufsphy_lanes>;
+ phy-names = "ufsphy";
+ lanes-per-direction = <2>;
+ power-domains = <&gcc UFS_GDSC>;
+ #reset-cells = <1>;
+
+ clock-names =
+ "core_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk",
+ "rx_lane1_sync_clk";
+ clocks =
+ <&gcc GCC_UFS_AXI_CLK>,
+ <&gcc GCC_AGGRE1_UFS_AXI_CLK>,
+ <&gcc GCC_UFS_AHB_CLK>,
+ <&gcc GCC_UFS_UNIPRO_CORE_CLK>,
+ <&rpmcc RPM_SMD_LN_BB_CLK1>,
+ <&gcc GCC_UFS_TX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_RX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_RX_SYMBOL_1_CLK>;
+ freq-table-hz =
+ <50000000 200000000>,
+ <0 0>,
+ <0 0>,
+ <37500000 150000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>;
+
+ resets = <&gcc GCC_UFS_BCR>;
+ reset-names = "rst";
+ };
+
+ ufsphy: phy@1da7000 {
+ compatible = "qcom,msm8998-qmp-ufs-phy";
+ reg = <0x01da7000 0x18c>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ clock-names =
+ "ref",
+ "ref_aux";
+ clocks =
+ <&gcc GCC_UFS_CLKREF_CLK>,
+ <&gcc GCC_UFS_PHY_AUX_CLK>;
+
+ reset-names = "ufsphy";
+ resets = <&ufshc 0>;
+
+ ufsphy_lanes: lanes@1da7400 {
+ reg = <0x01da7400 0x128>,
+ <0x01da7600 0x1fc>,
+ <0x01da7c00 0x1dc>,
+ <0x01da7800 0x128>,
+ <0x01da7a00 0x1fc>;
+ #phy-cells = <0>;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/pm8005.dtsi b/arch/arm64/boot/dts/qcom/pm8005.dtsi
index c0ddf128136c..3f97607d8baa 100644
--- a/arch/arm64/boot/dts/qcom/pm8005.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8005.dtsi
@@ -15,6 +15,7 @@
compatible = "qcom,pm8005-gpio", "qcom,spmi-gpio";
reg = <0xc000>;
gpio-controller;
+ gpio-ranges = <&pm8005_gpio 0 0 4>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm64/boot/dts/qcom/pm8998.dtsi b/arch/arm64/boot/dts/qcom/pm8998.dtsi
index 43cb5ea14089..d3ca35a940fb 100644
--- a/arch/arm64/boot/dts/qcom/pm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8998.dtsi
@@ -58,6 +58,8 @@
compatible = "qcom,spmi-temp-alarm";
reg = <0x2400>;
interrupts = <0x0 0x24 0x0 IRQ_TYPE_EDGE_RISING>;
+ io-channels = <&pm8998_adc ADC5_DIE_TEMP>;
+ io-channel-names = "thermal";
#thermal-sensor-cells = <0>;
};
@@ -93,6 +95,7 @@
compatible = "qcom,pm8998-gpio", "qcom,spmi-gpio";
reg = <0xc000>;
gpio-controller;
+ gpio-ranges = <&pm8998_gpio 0 0 26>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm64/boot/dts/qcom/pmi8994.dtsi b/arch/arm64/boot/dts/qcom/pmi8994.dtsi
index 3aee10e3f921..21e05215abe4 100644
--- a/arch/arm64/boot/dts/qcom/pmi8994.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8994.dtsi
@@ -14,6 +14,7 @@
compatible = "qcom,pmi8994-gpio", "qcom,spmi-gpio";
reg = <0xc000>;
gpio-controller;
+ gpio-ranges = <&pmi8994_gpios 0 0 10>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index 051f57e7d6ac..23f9146a161e 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -13,6 +13,7 @@
compatible = "qcom,pmi8998-gpio", "qcom,spmi-gpio";
reg = <0xc000>;
gpio-controller;
+ gpio-ranges = <&pmi8998_gpio 0 0 14>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm64/boot/dts/qcom/pms405.dtsi b/arch/arm64/boot/dts/qcom/pms405.dtsi
index 1bb836d1e8aa..e8e186bc1ea7 100644
--- a/arch/arm64/boot/dts/qcom/pms405.dtsi
+++ b/arch/arm64/boot/dts/qcom/pms405.dtsi
@@ -131,4 +131,15 @@
interrupts = <0x0 0x61 0x1 IRQ_TYPE_NONE>;
};
};
+
+ pms405_1: pms405@1 {
+ compatible = "qcom,spmi-pmic";
+ reg = <0x1 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pms405_spmi_regulators: regulators {
+ compatible = "qcom,pms405-regulators";
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/qcs404-evb-1000.dts b/arch/arm64/boot/dts/qcom/qcs404-evb-1000.dts
index 2c14903d808e..937eb4555ffe 100644
--- a/arch/arm64/boot/dts/qcom/qcs404-evb-1000.dts
+++ b/arch/arm64/boot/dts/qcom/qcs404-evb-1000.dts
@@ -7,5 +7,6 @@
/ {
model = "Qualcomm Technologies, Inc. QCS404 EVB 1000";
- compatible = "qcom,qcs404-evb";
+ compatible = "qcom,qcs404-evb-1000", "qcom,qcs404-evb",
+ "qcom,qcs404";
};
diff --git a/arch/arm64/boot/dts/qcom/qcs404-evb-4000.dts b/arch/arm64/boot/dts/qcom/qcs404-evb-4000.dts
index 11269ad3de0d..479ad3ac6c28 100644
--- a/arch/arm64/boot/dts/qcom/qcs404-evb-4000.dts
+++ b/arch/arm64/boot/dts/qcom/qcs404-evb-4000.dts
@@ -3,9 +3,92 @@
/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
#include "qcs404-evb.dtsi"
/ {
model = "Qualcomm Technologies, Inc. QCS404 EVB 4000";
- compatible = "qcom,qcs404-evb";
+ compatible = "qcom,qcs404-evb-4000", "qcom,qcs404-evb",
+ "qcom,qcs404";
+};
+
+&ethernet {
+ status = "ok";
+
+ snps,reset-gpio = <&tlmm 60 GPIO_ACTIVE_LOW>;
+ snps,reset-active-low;
+ snps,reset-delays-us = <0 10000 10000>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&ethernet_defaults>;
+
+ phy-handle = <&phy1>;
+ phy-mode = "rgmii";
+ mdio {
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ compatible = "snps,dwmac-mdio";
+ phy1: phy@4 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ device_type = "ethernet-phy";
+ reg = <0x4>;
+ };
+ };
+};
+
+&tlmm {
+ ethernet_defaults: ethernet-defaults {
+ int {
+ pins = "gpio61";
+ function = "rgmii_int";
+ bias-disable;
+ drive-strength = <2>;
+ };
+ mdc {
+ pins = "gpio76";
+ function = "rgmii_mdc";
+ bias-pull-up;
+ };
+ mdio {
+ pins = "gpio75";
+ function = "rgmii_mdio";
+ bias-pull-up;
+ };
+ tx {
+ pins = "gpio67", "gpio66", "gpio65", "gpio64";
+ function = "rgmii_tx";
+ bias-pull-up;
+ drive-strength = <16>;
+ };
+ rx {
+ pins = "gpio73", "gpio72", "gpio71", "gpio70";
+ function = "rgmii_rx";
+ bias-disable;
+ drive-strength = <2>;
+ };
+ tx-ctl {
+ pins = "gpio68";
+ function = "rgmii_ctl";
+ bias-pull-up;
+ drive-strength = <16>;
+ };
+ rx-ctl {
+ pins = "gpio74";
+ function = "rgmii_ctl";
+ bias-disable;
+ drive-strength = <2>;
+ };
+ tx-ck {
+ pins = "gpio63";
+ function = "rgmii_ck";
+ bias-pull-up;
+ drive-strength = <16>;
+ };
+ rx-ck {
+ pins = "gpio69";
+ function = "rgmii_ck";
+ bias-disable;
+ drive-strength = <2>;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
index 50b3589c7f15..2c3127167e3c 100644
--- a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
@@ -7,6 +7,7 @@
/ {
aliases {
serial0 = &blsp1_uart2;
+ serial1 = &blsp1_uart3;
};
chosen {
@@ -19,6 +20,52 @@
regulator-always-on;
regulator-boot-on;
};
+
+ vdd_ch0_3p3:
+ vdd_esmps3_3p3: vdd-esmps3-3p3-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "eSMPS3_3P3";
+
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+};
+
+&blsp1_uart3 {
+ status = "okay";
+
+ bluetooth {
+ compatible = "qcom,wcn3990-bt";
+ vddio-supply = <&vreg_l6_1p8>;
+ vddxo-supply = <&vreg_l5_1p8>;
+ vddrf-supply = <&vreg_l1_1p3>;
+ vddch0-supply = <&vdd_ch0_3p3>;
+
+ local-bd-address = [ 02 00 00 00 5a ad ];
+
+ max-speed = <3200000>;
+ };
+};
+
+&blsp1_dma {
+ qcom,controlled-remotely;
+};
+
+&blsp2_dma {
+ qcom,controlled-remotely;
+};
+
+&pms405_spmi_regulators {
+ vdd_s3-supply = <&pms405_s3>;
+
+ pms405_s3: s3 {
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-name = "vdd_apc";
+ regulator-min-microvolt = <1048000>;
+ regulator-max-microvolt = <1352000>;
+ };
};
&remoteproc_adsp {
@@ -37,18 +84,18 @@
pms405-regulators {
compatible = "qcom,rpm-pms405-regulators";
- vdd-s1-supply = <&vph_pwr>;
- vdd-s2-supply = <&vph_pwr>;
- vdd-s3-supply = <&vph_pwr>;
- vdd-s4-supply = <&vph_pwr>;
- vdd-s5-supply = <&vph_pwr>;
- vdd-l1-l2-supply = <&vreg_s5_1p35>;
- vdd-l3-l8-supply = <&vreg_s5_1p35>;
- vdd-l4-supply = <&vreg_s5_1p35>;
- vdd-l5-l6-supply = <&vreg_s4_1p8>;
- vdd-l7-supply = <&vph_pwr>;
- vdd-l9-supply = <&vreg_s5_1p35>;
- vdd-l10-l11-l12-l13-supply = <&vph_pwr>;
+ vdd_s1-supply = <&vph_pwr>;
+ vdd_s2-supply = <&vph_pwr>;
+ vdd_s3-supply = <&vph_pwr>;
+ vdd_s4-supply = <&vph_pwr>;
+ vdd_s5-supply = <&vph_pwr>;
+ vdd_l1_l2-supply = <&vreg_s5_1p35>;
+ vdd_l3_l8-supply = <&vreg_s5_1p35>;
+ vdd_l4-supply = <&vreg_s5_1p35>;
+ vdd_l5_l6-supply = <&vreg_s4_1p8>;
+ vdd_l7-supply = <&vph_pwr>;
+ vdd_l9-supply = <&vreg_s5_1p35>;
+ vdd_l10_l11_l12_l13-supply = <&vph_pwr>;
vreg_s4_1p8: s4 {
regulator-min-microvolt = <1728000>;
@@ -56,8 +103,8 @@
};
vreg_s5_1p35: s5 {
- regulator-min-microvolt = <>;
- regulator-max-microvolt = <>;
+ regulator-min-microvolt = <1352000>;
+ regulator-max-microvolt = <1352000>;
};
vreg_l1_1p3: l1 {
@@ -71,7 +118,7 @@
};
vreg_l3_1p05: l3 {
- regulator-min-microvolt = <976000>;
+ regulator-min-microvolt = <1050000>;
regulator-max-microvolt = <1160000>;
};
@@ -205,3 +252,21 @@
bias-disable;
};
};
+
+&blsp1_uart3_default {
+ cts {
+ pins = "gpio84";
+ bias-disable;
+ };
+
+ rts-tx {
+ pins = "gpio85", "gpio82";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rx {
+ pins = "gpio83";
+ bias-pull-up;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi
index e8fd26633d57..ffedf9640af7 100644
--- a/arch/arm64/boot/dts/qcom/qcs404.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi
@@ -435,7 +435,6 @@
clocks = <&gcc GCC_BLSP1_AHB_CLK>;
clock-names = "bam_clk";
#dma-cells = <1>;
- qcom,controlled-remotely = <1>;
qcom,ee = <0>;
status = "okay";
};
@@ -479,6 +478,27 @@
status = "okay";
};
+ ethernet: ethernet@7a80000 {
+ compatible = "qcom,qcs404-ethqos";
+ reg = <0x07a80000 0x10000>,
+ <0x07a96000 0x100>;
+ reg-names = "stmmaceth", "rgmii";
+ clock-names = "stmmaceth", "pclk", "ptp_ref", "rgmii";
+ clocks = <&gcc GCC_ETH_AXI_CLK>,
+ <&gcc GCC_ETH_SLAVE_AHB_CLK>,
+ <&gcc GCC_ETH_PTP_CLK>,
+ <&gcc GCC_ETH_RGMII_CLK>;
+ interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_lpi";
+
+ snps,tso;
+ rx-fifo-depth = <4096>;
+ tx-fifo-depth = <4096>;
+
+ status = "disabled";
+ };
+
wifi: wifi@a000000 {
compatible = "qcom,wcn3990-wifi";
reg = <0xa000000 0x800000>;
@@ -659,7 +679,6 @@
clocks = <&gcc GCC_BLSP2_AHB_CLK>;
clock-names = "bam_clk";
#dma-cells = <1>;
- qcom,controlled-remotely = <1>;
qcom,ee = <0>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
index af8c6a2445a2..02b8357c8ce8 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
@@ -48,6 +48,10 @@
};
};
+&adsp_pas {
+ status = "okay";
+};
+
&apps_rsc {
pm8998-rpmh-regulators {
compatible = "qcom,pm8998-rpmh-regulators";
@@ -344,6 +348,10 @@
};
};
+&cdsp_pas {
+ status = "okay";
+};
+
&gcc {
protected-clocks = <GCC_QSPI_CORE_CLK>,
<GCC_QSPI_CORE_CLK_SRC>,
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 5308f1671824..fcb93300ca62 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -11,8 +11,10 @@
#include <dt-bindings/clock/qcom,lpass-sdm845.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/clock/qcom,videocc-sdm845.h>
+#include <dt-bindings/interconnect/qcom,sdm845.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/phy/phy-qcom-qusb2.h>
+#include <dt-bindings/power/qcom-rpmpd.h>
#include <dt-bindings/reset/qcom,sdm845-aoss.h>
#include <dt-bindings/reset/qcom,sdm845-pdc.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
@@ -73,29 +75,78 @@
#size-cells = <2>;
ranges;
- memory@85fc0000 {
+ hyp_mem: memory@85700000 {
+ reg = <0 0x85700000 0 0x600000>;
+ no-map;
+ };
+
+ xbl_mem: memory@85e00000 {
+ reg = <0 0x85e00000 0 0x100000>;
+ no-map;
+ };
+
+ aop_mem: memory@85fc0000 {
reg = <0 0x85fc0000 0 0x20000>;
no-map;
};
- memory@85fe0000 {
+ aop_cmd_db_mem: memory@85fe0000 {
compatible = "qcom,cmd-db";
- reg = <0x0 0x85fe0000 0x0 0x20000>;
+ reg = <0x0 0x85fe0000 0 0x20000>;
no-map;
};
smem_mem: memory@86000000 {
- reg = <0x0 0x86000000 0x0 0x200000>;
+ reg = <0x0 0x86000000 0 0x200000>;
no-map;
};
- memory@86200000 {
+ tz_mem: memory@86200000 {
reg = <0 0x86200000 0 0x2d00000>;
no-map;
};
- wlan_msa_mem: memory@96700000 {
- reg = <0 0x96700000 0 0x100000>;
+ rmtfs_mem: memory@88f00000 {
+ compatible = "qcom,rmtfs-mem";
+ reg = <0 0x88f00000 0 0x200000>;
+ no-map;
+
+ qcom,client-id = <1>;
+ qcom,vmid = <15>;
+ };
+
+ qseecom_mem: memory@8ab00000 {
+ reg = <0 0x8ab00000 0 0x1400000>;
+ no-map;
+ };
+
+ camera_mem: memory@8bf00000 {
+ reg = <0 0x8bf00000 0 0x500000>;
+ no-map;
+ };
+
+ ipa_fw_mem: memory@8c400000 {
+ reg = <0 0x8c400000 0 0x10000>;
+ no-map;
+ };
+
+ ipa_gsi_mem: memory@8c410000 {
+ reg = <0 0x8c410000 0 0x5000>;
+ no-map;
+ };
+
+ gpu_mem: memory@8c415000 {
+ reg = <0 0x8c415000 0 0x2000>;
+ no-map;
+ };
+
+ adsp_mem: memory@8c500000 {
+ reg = <0 0x8c500000 0 0x1a00000>;
+ no-map;
+ };
+
+ wlan_msa_mem: memory@8df00000 {
+ reg = <0 0x8df00000 0 0x100000>;
no-map;
};
@@ -104,10 +155,30 @@
no-map;
};
+ venus_mem: memory@95800000 {
+ reg = <0 0x95800000 0 0x500000>;
+ no-map;
+ };
+
+ cdsp_mem: memory@95d00000 {
+ reg = <0 0x95d00000 0 0x800000>;
+ no-map;
+ };
+
mba_region: memory@96500000 {
reg = <0 0x96500000 0 0x200000>;
no-map;
};
+
+ slpi_mem: memory@96700000 {
+ reg = <0 0x96700000 0 0x1400000>;
+ no-map;
+ };
+
+ spss_mem: memory@97b00000 {
+ reg = <0 0x97b00000 0 0x100000>;
+ no-map;
+ };
};
cpus {
@@ -119,6 +190,7 @@
compatible = "qcom,kryo385";
reg = <0x0 0x0>;
enable-method = "psci";
+ capacity-dmips-mhz = <607>;
qcom,freq-domain = <&cpufreq_hw 0>;
#cooling-cells = <2>;
next-level-cache = <&L2_0>;
@@ -136,6 +208,7 @@
compatible = "qcom,kryo385";
reg = <0x0 0x100>;
enable-method = "psci";
+ capacity-dmips-mhz = <607>;
qcom,freq-domain = <&cpufreq_hw 0>;
#cooling-cells = <2>;
next-level-cache = <&L2_100>;
@@ -150,6 +223,7 @@
compatible = "qcom,kryo385";
reg = <0x0 0x200>;
enable-method = "psci";
+ capacity-dmips-mhz = <607>;
qcom,freq-domain = <&cpufreq_hw 0>;
#cooling-cells = <2>;
next-level-cache = <&L2_200>;
@@ -164,6 +238,7 @@
compatible = "qcom,kryo385";
reg = <0x0 0x300>;
enable-method = "psci";
+ capacity-dmips-mhz = <607>;
qcom,freq-domain = <&cpufreq_hw 0>;
#cooling-cells = <2>;
next-level-cache = <&L2_300>;
@@ -178,6 +253,7 @@
compatible = "qcom,kryo385";
reg = <0x0 0x400>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
qcom,freq-domain = <&cpufreq_hw 1>;
#cooling-cells = <2>;
next-level-cache = <&L2_400>;
@@ -192,6 +268,7 @@
compatible = "qcom,kryo385";
reg = <0x0 0x500>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
qcom,freq-domain = <&cpufreq_hw 1>;
#cooling-cells = <2>;
next-level-cache = <&L2_500>;
@@ -206,6 +283,7 @@
compatible = "qcom,kryo385";
reg = <0x0 0x600>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
qcom,freq-domain = <&cpufreq_hw 1>;
#cooling-cells = <2>;
next-level-cache = <&L2_600>;
@@ -220,6 +298,7 @@
compatible = "qcom,kryo385";
reg = <0x0 0x700>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
qcom,freq-domain = <&cpufreq_hw 1>;
#cooling-cells = <2>;
next-level-cache = <&L2_700>;
@@ -228,6 +307,44 @@
next-level-cache = <&L3_0>;
};
};
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&CPU0>;
+ };
+
+ core1 {
+ cpu = <&CPU1>;
+ };
+
+ core2 {
+ cpu = <&CPU2>;
+ };
+
+ core3 {
+ cpu = <&CPU3>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&CPU4>;
+ };
+
+ core1 {
+ cpu = <&CPU5>;
+ };
+
+ core2 {
+ cpu = <&CPU6>;
+ };
+
+ core3 {
+ cpu = <&CPU7>;
+ };
+ };
+ };
};
pmu {
@@ -264,6 +381,64 @@
};
};
+ adsp_pas: remoteproc-adsp {
+ compatible = "qcom,sdm845-adsp-pas";
+
+ interrupts-extended = <&intc GIC_SPI 162 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&adsp_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ memory-region = <&adsp_mem>;
+
+ qcom,smem-states = <&adsp_smp2p_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ glink-edge {
+ interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
+ label = "lpass";
+ qcom,remote-pid = <2>;
+ mboxes = <&apss_shared 8>;
+ };
+ };
+
+ cdsp_pas: remoteproc-cdsp {
+ compatible = "qcom,sdm845-cdsp-pas";
+
+ interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>,
+ <&cdsp_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&cdsp_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&cdsp_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&cdsp_smp2p_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ memory-region = <&cdsp_mem>;
+
+ qcom,smem-states = <&cdsp_smp2p_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ glink-edge {
+ interrupts = <GIC_SPI 574 IRQ_TYPE_EDGE_RISING>;
+ label = "turing";
+ qcom,remote-pid = <5>;
+ mboxes = <&apss_shared 4>;
+ };
+ };
+
tcsr_mutex: hwlock {
compatible = "qcom,tcsr-mutex";
syscon = <&tcsr_mutex_regs 0 0x1000>;
@@ -1033,6 +1208,7 @@
phy-names = "ufsphy";
lanes-per-direction = <2>;
power-domains = <&gcc UFS_PHY_GDSC>;
+ #reset-cells = <1>;
iommus = <&apps_smmu 0x100 0xf>;
@@ -1078,6 +1254,8 @@
clocks = <&gcc GCC_UFS_MEM_CLKREF_CLK>,
<&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+ resets = <&ufs_mem_hc 0>;
+ reset-names = "ufsphy";
status = "disabled";
ufs_mem_phy_lanes: lanes@1d87400 {
@@ -1853,8 +2031,9 @@
#clock-cells = <1>;
#phy-cells = <0>;
- clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>;
- clock-names = "iface";
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface", "ref";
status = "disabled";
};
@@ -1919,8 +2098,9 @@
#clock-cells = <1>;
#phy-cells = <0>;
- clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>;
- clock-names = "iface";
+ clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "iface", "ref";
status = "disabled";
};
@@ -2098,43 +2278,43 @@
compatible = "operating-points-v2";
rpmhpd_opp_ret: opp1 {
- opp-level = <16>;
+ opp-level = <RPMH_REGULATOR_LEVEL_RETENTION>;
};
rpmhpd_opp_min_svs: opp2 {
- opp-level = <48>;
+ opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
};
rpmhpd_opp_low_svs: opp3 {
- opp-level = <64>;
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
};
rpmhpd_opp_svs: opp4 {
- opp-level = <128>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
};
rpmhpd_opp_svs_l1: opp5 {
- opp-level = <192>;
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
};
rpmhpd_opp_nom: opp6 {
- opp-level = <256>;
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
};
rpmhpd_opp_nom_l1: opp7 {
- opp-level = <320>;
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
};
rpmhpd_opp_nom_l2: opp8 {
- opp-level = <336>;
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L2>;
};
rpmhpd_opp_turbo: opp9 {
- opp-level = <384>;
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
};
rpmhpd_opp_turbo_l1: opp10 {
- opp-level = <416>;
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
};
};
};
@@ -2611,5 +2791,210 @@
};
};
};
+
+ aoss0-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 0>;
+
+ trips {
+ aoss0_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ cluster0-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 5>;
+
+ trips {
+ cluster0_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ cluster0_crit: cluster0_crit {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cluster1-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 6>;
+
+ trips {
+ cluster1_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ cluster1_crit: cluster1_crit {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ gpu-thermal-top {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 11>;
+
+ trips {
+ gpu1_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ gpu-thermal-bottom {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 12>;
+
+ trips {
+ gpu2_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ aoss1-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 0>;
+
+ trips {
+ aoss1_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ q6-modem-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 1>;
+
+ trips {
+ q6_modem_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ mem-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 2>;
+
+ trips {
+ mem_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ wlan-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 3>;
+
+ trips {
+ wlan_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ q6-hvx-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 4>;
+
+ trips {
+ q6_hvx_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ camera-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 5>;
+
+ trips {
+ camera_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ video-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 6>;
+
+ trips {
+ video_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ modem-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 7>;
+
+ trips {
+ modem_alert0: trip-point@0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/renesas/cat875.dtsi b/arch/arm64/boot/dts/renesas/cat875.dtsi
index 14db66755a89..aaefc3ae56d5 100644
--- a/arch/arm64/boot/dts/renesas/cat875.dtsi
+++ b/arch/arm64/boot/dts/renesas/cat875.dtsi
@@ -30,6 +30,18 @@
};
};
+&can0 {
+ pinctrl-0 = <&can0_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&can1 {
+ pinctrl-0 = <&can1_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
&pciec0 {
status = "okay";
};
@@ -41,4 +53,14 @@
function = "avb";
};
};
+
+ can0_pins: can0 {
+ groups = "can0_data";
+ function = "can0";
+ };
+
+ can1_pins: can1 {
+ groups = "can1_data";
+ function = "can1";
+ };
};
diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
index ef3cff2dd1b6..de282c4794ed 100644
--- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
@@ -879,8 +879,10 @@
"renesas,rcar-gen3-can";
reg = <0 0xe6c30000 0 0x1000>;
interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 916>, <&can_clk>;
- clock-names = "clkp1", "can_clk";
+ clocks = <&cpg CPG_MOD 916>,
+ <&cpg CPG_CORE R8A774A1_CLK_CANFD>,
+ <&can_clk>;
+ clock-names = "clkp1", "clkp2", "can_clk";
power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>;
resets = <&cpg 916>;
status = "disabled";
@@ -891,8 +893,10 @@
"renesas,rcar-gen3-can";
reg = <0 0xe6c38000 0 0x1000>;
interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 915>, <&can_clk>;
- clock-names = "clkp1", "can_clk";
+ clocks = <&cpg CPG_MOD 915>,
+ <&cpg CPG_CORE R8A774A1_CLK_CANFD>,
+ <&can_clk>;
+ clock-names = "clkp1", "clkp2", "can_clk";
power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>;
resets = <&cpg 915>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
index 96ee0d2c6357..013a48c01211 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
+++ b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
@@ -22,6 +22,30 @@
stdout-path = "serial0:115200n8";
};
+ leds {
+ compatible = "gpio-leds";
+
+ led0 {
+ gpios = <&gpio5 19 GPIO_ACTIVE_HIGH>;
+ label = "LED0";
+ };
+
+ led1 {
+ gpios = <&gpio3 14 GPIO_ACTIVE_HIGH>;
+ label = "LED1";
+ };
+
+ led2 {
+ gpios = <&gpio4 10 GPIO_ACTIVE_HIGH>;
+ label = "LED2";
+ };
+
+ led3 {
+ gpios = <&gpio6 4 GPIO_ACTIVE_HIGH>;
+ label = "LED3";
+ };
+ };
+
memory@48000000 {
device_type = "memory";
/* first 128MB is reserved for secure area. */
@@ -52,10 +76,33 @@
};
};
+&ehci0 {
+ dr_mode = "host";
+ status = "okay";
+};
+
&extal_clk {
clock-frequency = <48000000>;
};
+&i2c1 {
+ pinctrl-0 = <&i2c1_pins>;
+ pinctrl-names = "default";
+
+ status = "okay";
+ clock-frequency = <400000>;
+
+ rtc@32 {
+ compatible = "epson,rx8571";
+ reg = <0x32>;
+ };
+};
+
+&ohci0 {
+ dr_mode = "host";
+ status = "okay";
+};
+
&pcie_bus_clk {
clock-frequency = <100000000>;
};
@@ -66,6 +113,11 @@
};
&pfc {
+ i2c1_pins: i2c1 {
+ groups = "i2c1_b";
+ function = "i2c1";
+ };
+
scif2_pins: scif2 {
groups = "scif2_data_a";
function = "scif2";
@@ -84,6 +136,11 @@
};
};
+&rwdt {
+ timeout-sec = <60>;
+ status = "okay";
+};
+
&scif2 {
pinctrl-0 = <&scif2_pins>;
pinctrl-names = "default";
@@ -104,3 +161,8 @@
sd-uhs-sdr104;
status = "okay";
};
+
+&usb2_phy0 {
+ renesas,no-otg-pins;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
index 1ea684af99c4..3f86db199dbf 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
@@ -76,7 +76,7 @@
power-domains = <&sysc R8A774C0_PD_CA53_CPU0>;
next-level-cache = <&L2_CA53>;
enable-method = "psci";
- clocks =<&cpg CPG_CORE R8A774C0_CLK_Z2>;
+ clocks = <&cpg CPG_CORE R8A774C0_CLK_Z2>;
operating-points-v2 = <&cluster1_opp>;
};
@@ -87,7 +87,7 @@
power-domains = <&sysc R8A774C0_PD_CA53_CPU1>;
next-level-cache = <&L2_CA53>;
enable-method = "psci";
- clocks =<&cpg CPG_CORE R8A774C0_CLK_Z2>;
+ clocks = <&cpg CPG_CORE R8A774C0_CLK_Z2>;
operating-points-v2 = <&cluster1_opp>;
};
@@ -969,8 +969,10 @@
"renesas,rcar-gen3-can";
reg = <0 0xe6c30000 0 0x1000>;
interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 916>, <&can_clk>;
- clock-names = "clkp1", "can_clk";
+ clocks = <&cpg CPG_MOD 916>,
+ <&cpg CPG_CORE R8A774C0_CLK_CANFD>,
+ <&can_clk>;
+ clock-names = "clkp1", "clkp2", "can_clk";
power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
resets = <&cpg 916>;
status = "disabled";
@@ -981,13 +983,40 @@
"renesas,rcar-gen3-can";
reg = <0 0xe6c38000 0 0x1000>;
interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 915>, <&can_clk>;
- clock-names = "clkp1", "can_clk";
+ clocks = <&cpg CPG_MOD 915>,
+ <&cpg CPG_CORE R8A774C0_CLK_CANFD>,
+ <&can_clk>;
+ clock-names = "clkp1", "clkp2", "can_clk";
power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
resets = <&cpg 915>;
status = "disabled";
};
+ canfd: can@e66c0000 {
+ compatible = "renesas,r8a774c0-canfd",
+ "renesas,rcar-gen3-canfd";
+ reg = <0 0xe66c0000 0 0x8000>;
+ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 914>,
+ <&cpg CPG_CORE R8A774C0_CLK_CANFD>,
+ <&can_clk>;
+ clock-names = "fck", "canfd", "can_clk";
+ assigned-clocks = <&cpg CPG_CORE R8A774C0_CLK_CANFD>;
+ assigned-clock-rates = <40000000>;
+ power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
+ resets = <&cpg 914>;
+ status = "disabled";
+
+ channel0 {
+ status = "disabled";
+ };
+
+ channel1 {
+ status = "disabled";
+ };
+ };
+
pwm0: pwm@e6e30000 {
compatible = "renesas,pwm-r8a774c0", "renesas,pwm-rcar";
reg = <0 0xe6e30000 0 0x8>;
@@ -1740,8 +1769,7 @@
};
csi40: csi2@feaa0000 {
- compatible = "renesas,r8a774c0-csi2",
- "renesas,rcar-gen3-csi2";
+ compatible = "renesas,r8a774c0-csi2";
reg = <0 0xfeaa0000 0 0x10000>;
interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 716>;
diff --git a/arch/arm64/boot/dts/renesas/r8a7795.dtsi b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
index abeac3059383..097538cc4b1f 100644
--- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7795.dtsi
@@ -462,6 +462,76 @@
reg = <0 0xe6060000 0 0x50c>;
};
+ cmt0: timer@e60f0000 {
+ compatible = "renesas,r8a7795-cmt0",
+ "renesas,rcar-gen3-cmt0";
+ reg = <0 0xe60f0000 0 0x1004>;
+ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 303>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 303>;
+ status = "disabled";
+ };
+
+ cmt1: timer@e6130000 {
+ compatible = "renesas,r8a7795-cmt1",
+ "renesas,rcar-gen3-cmt1";
+ reg = <0 0xe6130000 0 0x1004>;
+ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 302>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 302>;
+ status = "disabled";
+ };
+
+ cmt2: timer@e6140000 {
+ compatible = "renesas,r8a7795-cmt1",
+ "renesas,rcar-gen3-cmt1";
+ reg = <0 0xe6140000 0 0x1004>;
+ interrupts = <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 301>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 301>;
+ status = "disabled";
+ };
+
+ cmt3: timer@e6148000 {
+ compatible = "renesas,r8a7795-cmt1",
+ "renesas,rcar-gen3-cmt1";
+ reg = <0 0xe6148000 0 0x1004>;
+ interrupts = <GIC_SPI 470 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 471 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 300>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 300>;
+ status = "disabled";
+ };
+
cpg: clock-controller@e6150000 {
compatible = "renesas,r8a7795-cpg-mssr";
reg = <0 0xe6150000 0 0x1000>;
@@ -1836,7 +1906,7 @@
<0 0xec5a0000 0 0x100>, /* ADG */
<0 0xec540000 0 0x1000>, /* SSIU */
<0 0xec541000 0 0x280>, /* SSI */
- <0 0xec740000 0 0x200>; /* Audio DMAC peri peri*/
+ <0 0xec760000 0 0x200>; /* Audio DMAC peri peri*/
reg-names = "scu", "adg", "ssiu", "ssi", "audmapp";
clocks = <&cpg CPG_MOD 1005>,
diff --git a/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts
index b4f9567cb9f8..2aefa53cb16b 100644
--- a/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts
+++ b/arch/arm64/boot/dts/renesas/r8a7796-salvator-x.dts
@@ -68,6 +68,7 @@
ports {
/* rsnd_port0 is on salvator-common */
rsnd_port1: port@1 {
+ reg = <1>;
rsnd_endpoint1: endpoint {
remote-endpoint = <&dw_hdmi0_snd_in>;
diff --git a/arch/arm64/boot/dts/renesas/r8a7796-salvator-xs.dts b/arch/arm64/boot/dts/renesas/r8a7796-salvator-xs.dts
index 31f12059355e..d58ede18108d 100644
--- a/arch/arm64/boot/dts/renesas/r8a7796-salvator-xs.dts
+++ b/arch/arm64/boot/dts/renesas/r8a7796-salvator-xs.dts
@@ -68,6 +68,7 @@
ports {
/* rsnd_port0 is on salvator-common */
rsnd_port1: port@1 {
+ reg = <1>;
rsnd_endpoint1: endpoint {
remote-endpoint = <&dw_hdmi0_snd_in>;
diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
index cdf784899cf8..d5e2f4af83a4 100644
--- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
@@ -1775,7 +1775,7 @@
<0 0xec5a0000 0 0x100>, /* ADG */
<0 0xec540000 0 0x1000>, /* SSIU */
<0 0xec541000 0 0x280>, /* SSI */
- <0 0xec740000 0 0x200>; /* Audio DMAC peri peri*/
+ <0 0xec760000 0 0x200>; /* Audio DMAC peri peri*/
reg-names = "scu", "adg", "ssiu", "ssi", "audmapp";
clocks = <&cpg CPG_MOD 1005>,
@@ -2162,17 +2162,6 @@
dma-names = "rx", "tx";
};
};
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
- port@0 {
- reg = <0>;
- };
- port@1 {
- reg = <1>;
- };
- };
};
audma0: dma-controller@ec700000 {
diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
index 9763d108e183..2554b1742dbf 100644
--- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
@@ -317,6 +317,76 @@
reg = <0 0xe6060000 0 0x50c>;
};
+ cmt0: timer@e60f0000 {
+ compatible = "renesas,r8a77965-cmt0",
+ "renesas,rcar-gen3-cmt0";
+ reg = <0 0xe60f0000 0 0x1004>;
+ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 303>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
+ resets = <&cpg 303>;
+ status = "disabled";
+ };
+
+ cmt1: timer@e6130000 {
+ compatible = "renesas,r8a77965-cmt1",
+ "renesas,rcar-gen3-cmt1";
+ reg = <0 0xe6130000 0 0x1004>;
+ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 302>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
+ resets = <&cpg 302>;
+ status = "disabled";
+ };
+
+ cmt2: timer@e6140000 {
+ compatible = "renesas,r8a77965-cmt1",
+ "renesas,rcar-gen3-cmt1";
+ reg = <0 0xe6140000 0 0x1004>;
+ interrupts = <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 301>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
+ resets = <&cpg 301>;
+ status = "disabled";
+ };
+
+ cmt3: timer@e6148000 {
+ compatible = "renesas,r8a77965-cmt1",
+ "renesas,rcar-gen3-cmt1";
+ reg = <0 0xe6148000 0 0x1004>;
+ interrupts = <GIC_SPI 470 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 471 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 300>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
+ resets = <&cpg 300>;
+ status = "disabled";
+ };
+
cpg: clock-controller@e6150000 {
compatible = "renesas,r8a77965-cpg-mssr";
reg = <0 0xe6150000 0 0x1000>;
@@ -1461,7 +1531,7 @@
<0 0xec5a0000 0 0x100>, /* ADG */
<0 0xec540000 0 0x1000>, /* SSIU */
<0 0xec541000 0 0x280>, /* SSI */
- <0 0xec740000 0 0x200>; /* Audio DMAC peri peri*/
+ <0 0xec760000 0 0x200>; /* Audio DMAC peri peri*/
reg-names = "scu", "adg", "ssiu", "ssi", "audmapp";
clocks = <&cpg CPG_MOD 1005>,
@@ -1585,56 +1655,267 @@
};
};
+ rcar_sound,ssiu {
+ ssiu00: ssiu-0 {
+ dmas = <&audma0 0x15>, <&audma1 0x16>;
+ dma-names = "rx", "tx";
+ };
+ ssiu01: ssiu-1 {
+ dmas = <&audma0 0x35>, <&audma1 0x36>;
+ dma-names = "rx", "tx";
+ };
+ ssiu02: ssiu-2 {
+ dmas = <&audma0 0x37>, <&audma1 0x38>;
+ dma-names = "rx", "tx";
+ };
+ ssiu03: ssiu-3 {
+ dmas = <&audma0 0x47>, <&audma1 0x48>;
+ dma-names = "rx", "tx";
+ };
+ ssiu04: ssiu-4 {
+ dmas = <&audma0 0x3F>, <&audma1 0x40>;
+ dma-names = "rx", "tx";
+ };
+ ssiu05: ssiu-5 {
+ dmas = <&audma0 0x43>, <&audma1 0x44>;
+ dma-names = "rx", "tx";
+ };
+ ssiu06: ssiu-6 {
+ dmas = <&audma0 0x4F>, <&audma1 0x50>;
+ dma-names = "rx", "tx";
+ };
+ ssiu07: ssiu-7 {
+ dmas = <&audma0 0x53>, <&audma1 0x54>;
+ dma-names = "rx", "tx";
+ };
+ ssiu10: ssiu-8 {
+ dmas = <&audma0 0x49>, <&audma1 0x4a>;
+ dma-names = "rx", "tx";
+ };
+ ssiu11: ssiu-9 {
+ dmas = <&audma0 0x4B>, <&audma1 0x4C>;
+ dma-names = "rx", "tx";
+ };
+ ssiu12: ssiu-10 {
+ dmas = <&audma0 0x57>, <&audma1 0x58>;
+ dma-names = "rx", "tx";
+ };
+ ssiu13: ssiu-11 {
+ dmas = <&audma0 0x59>, <&audma1 0x5A>;
+ dma-names = "rx", "tx";
+ };
+ ssiu14: ssiu-12 {
+ dmas = <&audma0 0x5F>, <&audma1 0x60>;
+ dma-names = "rx", "tx";
+ };
+ ssiu15: ssiu-13 {
+ dmas = <&audma0 0xC3>, <&audma1 0xC4>;
+ dma-names = "rx", "tx";
+ };
+ ssiu16: ssiu-14 {
+ dmas = <&audma0 0xC7>, <&audma1 0xC8>;
+ dma-names = "rx", "tx";
+ };
+ ssiu17: ssiu-15 {
+ dmas = <&audma0 0xCB>, <&audma1 0xCC>;
+ dma-names = "rx", "tx";
+ };
+ ssiu20: ssiu-16 {
+ dmas = <&audma0 0x63>, <&audma1 0x64>;
+ dma-names = "rx", "tx";
+ };
+ ssiu21: ssiu-17 {
+ dmas = <&audma0 0x67>, <&audma1 0x68>;
+ dma-names = "rx", "tx";
+ };
+ ssiu22: ssiu-18 {
+ dmas = <&audma0 0x6B>, <&audma1 0x6C>;
+ dma-names = "rx", "tx";
+ };
+ ssiu23: ssiu-19 {
+ dmas = <&audma0 0x6D>, <&audma1 0x6E>;
+ dma-names = "rx", "tx";
+ };
+ ssiu24: ssiu-20 {
+ dmas = <&audma0 0xCF>, <&audma1 0xCE>;
+ dma-names = "rx", "tx";
+ };
+ ssiu25: ssiu-21 {
+ dmas = <&audma0 0xEB>, <&audma1 0xEC>;
+ dma-names = "rx", "tx";
+ };
+ ssiu26: ssiu-22 {
+ dmas = <&audma0 0xED>, <&audma1 0xEE>;
+ dma-names = "rx", "tx";
+ };
+ ssiu27: ssiu-23 {
+ dmas = <&audma0 0xEF>, <&audma1 0xF0>;
+ dma-names = "rx", "tx";
+ };
+ ssiu30: ssiu-24 {
+ dmas = <&audma0 0x6f>, <&audma1 0x70>;
+ dma-names = "rx", "tx";
+ };
+ ssiu31: ssiu-25 {
+ dmas = <&audma0 0x21>, <&audma1 0x22>;
+ dma-names = "rx", "tx";
+ };
+ ssiu32: ssiu-26 {
+ dmas = <&audma0 0x23>, <&audma1 0x24>;
+ dma-names = "rx", "tx";
+ };
+ ssiu33: ssiu-27 {
+ dmas = <&audma0 0x25>, <&audma1 0x26>;
+ dma-names = "rx", "tx";
+ };
+ ssiu34: ssiu-28 {
+ dmas = <&audma0 0x27>, <&audma1 0x28>;
+ dma-names = "rx", "tx";
+ };
+ ssiu35: ssiu-29 {
+ dmas = <&audma0 0x29>, <&audma1 0x2A>;
+ dma-names = "rx", "tx";
+ };
+ ssiu36: ssiu-30 {
+ dmas = <&audma0 0x2B>, <&audma1 0x2C>;
+ dma-names = "rx", "tx";
+ };
+ ssiu37: ssiu-31 {
+ dmas = <&audma0 0x2D>, <&audma1 0x2E>;
+ dma-names = "rx", "tx";
+ };
+ ssiu40: ssiu-32 {
+ dmas = <&audma0 0x71>, <&audma1 0x72>;
+ dma-names = "rx", "tx";
+ };
+ ssiu41: ssiu-33 {
+ dmas = <&audma0 0x17>, <&audma1 0x18>;
+ dma-names = "rx", "tx";
+ };
+ ssiu42: ssiu-34 {
+ dmas = <&audma0 0x19>, <&audma1 0x1A>;
+ dma-names = "rx", "tx";
+ };
+ ssiu43: ssiu-35 {
+ dmas = <&audma0 0x1B>, <&audma1 0x1C>;
+ dma-names = "rx", "tx";
+ };
+ ssiu44: ssiu-36 {
+ dmas = <&audma0 0x1D>, <&audma1 0x1E>;
+ dma-names = "rx", "tx";
+ };
+ ssiu45: ssiu-37 {
+ dmas = <&audma0 0x1F>, <&audma1 0x20>;
+ dma-names = "rx", "tx";
+ };
+ ssiu46: ssiu-38 {
+ dmas = <&audma0 0x31>, <&audma1 0x32>;
+ dma-names = "rx", "tx";
+ };
+ ssiu47: ssiu-39 {
+ dmas = <&audma0 0x33>, <&audma1 0x34>;
+ dma-names = "rx", "tx";
+ };
+ ssiu50: ssiu-40 {
+ dmas = <&audma0 0x73>, <&audma1 0x74>;
+ dma-names = "rx", "tx";
+ };
+ ssiu60: ssiu-41 {
+ dmas = <&audma0 0x75>, <&audma1 0x76>;
+ dma-names = "rx", "tx";
+ };
+ ssiu70: ssiu-42 {
+ dmas = <&audma0 0x79>, <&audma1 0x7a>;
+ dma-names = "rx", "tx";
+ };
+ ssiu80: ssiu-43 {
+ dmas = <&audma0 0x7b>, <&audma1 0x7c>;
+ dma-names = "rx", "tx";
+ };
+ ssiu90: ssiu-44 {
+ dmas = <&audma0 0x7d>, <&audma1 0x7e>;
+ dma-names = "rx", "tx";
+ };
+ ssiu91: ssiu-45 {
+ dmas = <&audma0 0x7F>, <&audma1 0x80>;
+ dma-names = "rx", "tx";
+ };
+ ssiu92: ssiu-46 {
+ dmas = <&audma0 0x81>, <&audma1 0x82>;
+ dma-names = "rx", "tx";
+ };
+ ssiu93: ssiu-47 {
+ dmas = <&audma0 0x83>, <&audma1 0x84>;
+ dma-names = "rx", "tx";
+ };
+ ssiu94: ssiu-48 {
+ dmas = <&audma0 0xA3>, <&audma1 0xA4>;
+ dma-names = "rx", "tx";
+ };
+ ssiu95: ssiu-49 {
+ dmas = <&audma0 0xA5>, <&audma1 0xA6>;
+ dma-names = "rx", "tx";
+ };
+ ssiu96: ssiu-50 {
+ dmas = <&audma0 0xA7>, <&audma1 0xA8>;
+ dma-names = "rx", "tx";
+ };
+ ssiu97: ssiu-51 {
+ dmas = <&audma0 0xA9>, <&audma1 0xAA>;
+ dma-names = "rx", "tx";
+ };
+ };
+
rcar_sound,ssi {
ssi0: ssi-0 {
interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x01>, <&audma1 0x02>, <&audma0 0x15>, <&audma1 0x16>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x01>, <&audma1 0x02>;
+ dma-names = "rx", "tx";
};
ssi1: ssi-1 {
interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x03>, <&audma1 0x04>, <&audma0 0x49>, <&audma1 0x4a>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x03>, <&audma1 0x04>;
+ dma-names = "rx", "tx";
};
ssi2: ssi-2 {
interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x05>, <&audma1 0x06>, <&audma0 0x63>, <&audma1 0x64>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x05>, <&audma1 0x06>;
+ dma-names = "rx", "tx";
};
ssi3: ssi-3 {
interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x07>, <&audma1 0x08>, <&audma0 0x6f>, <&audma1 0x70>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x07>, <&audma1 0x08>;
+ dma-names = "rx", "tx";
};
ssi4: ssi-4 {
interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x09>, <&audma1 0x0a>, <&audma0 0x71>, <&audma1 0x72>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x09>, <&audma1 0x0a>;
+ dma-names = "rx", "tx";
};
ssi5: ssi-5 {
interrupts = <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x0b>, <&audma1 0x0c>, <&audma0 0x73>, <&audma1 0x74>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x0b>, <&audma1 0x0c>;
+ dma-names = "rx", "tx";
};
ssi6: ssi-6 {
interrupts = <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x0d>, <&audma1 0x0e>, <&audma0 0x75>, <&audma1 0x76>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x0d>, <&audma1 0x0e>;
+ dma-names = "rx", "tx";
};
ssi7: ssi-7 {
interrupts = <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x0f>, <&audma1 0x10>, <&audma0 0x79>, <&audma1 0x7a>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x0f>, <&audma1 0x10>;
+ dma-names = "rx", "tx";
};
ssi8: ssi-8 {
interrupts = <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x11>, <&audma1 0x12>, <&audma0 0x7b>, <&audma1 0x7c>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x11>, <&audma1 0x12>;
+ dma-names = "rx", "tx";
};
ssi9: ssi-9 {
interrupts = <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x13>, <&audma1 0x14>, <&audma0 0x7d>, <&audma1 0x7e>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x13>, <&audma1 0x14>;
+ dma-names = "rx", "tx";
};
};
};
@@ -2166,7 +2447,6 @@
du: display@feb00000 {
compatible = "renesas,du-r8a77965";
reg = <0 0xfeb00000 0 0x80000>;
- reg-names = "du";
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
index 4081622d548a..a901a341dcf7 100644
--- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
@@ -865,6 +865,7 @@
clocks = <&cpg CPG_MOD 811>;
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
resets = <&cpg 811>;
+ renesas,id = <0>;
status = "disabled";
ports {
@@ -892,6 +893,7 @@
clocks = <&cpg CPG_MOD 810>;
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
status = "disabled";
+ renesas,id = <1>;
resets = <&cpg 810>;
ports {
@@ -919,6 +921,7 @@
clocks = <&cpg CPG_MOD 809>;
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
resets = <&cpg 809>;
+ renesas,id = <2>;
status = "disabled";
ports {
@@ -946,6 +949,7 @@
clocks = <&cpg CPG_MOD 808>;
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
resets = <&cpg 808>;
+ renesas,id = <3>;
status = "disabled";
ports {
@@ -973,6 +977,7 @@
clocks = <&cpg CPG_MOD 807>;
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
resets = <&cpg 807>;
+ renesas,id = <4>;
status = "disabled";
ports {
@@ -1000,6 +1005,7 @@
clocks = <&cpg CPG_MOD 806>;
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
resets = <&cpg 806>;
+ renesas,id = <5>;
status = "disabled";
ports {
@@ -1027,6 +1033,7 @@
clocks = <&cpg CPG_MOD 805>;
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
resets = <&cpg 805>;
+ renesas,id = <6>;
status = "disabled";
ports {
@@ -1054,6 +1061,7 @@
clocks = <&cpg CPG_MOD 804>;
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
resets = <&cpg 804>;
+ renesas,id = <7>;
status = "disabled";
ports {
@@ -1081,6 +1089,7 @@
clocks = <&cpg CPG_MOD 628>;
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
resets = <&cpg 628>;
+ renesas,id = <8>;
status = "disabled";
};
@@ -1091,6 +1100,7 @@
clocks = <&cpg CPG_MOD 627>;
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
resets = <&cpg 627>;
+ renesas,id = <9>;
status = "disabled";
};
@@ -1101,6 +1111,7 @@
clocks = <&cpg CPG_MOD 625>;
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
resets = <&cpg 625>;
+ renesas,id = <10>;
status = "disabled";
};
@@ -1111,6 +1122,7 @@
clocks = <&cpg CPG_MOD 618>;
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
resets = <&cpg 618>;
+ renesas,id = <11>;
status = "disabled";
};
@@ -1121,6 +1133,7 @@
clocks = <&cpg CPG_MOD 612>;
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
resets = <&cpg 612>;
+ renesas,id = <12>;
status = "disabled";
};
@@ -1131,6 +1144,7 @@
clocks = <&cpg CPG_MOD 608>;
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
resets = <&cpg 608>;
+ renesas,id = <13>;
status = "disabled";
};
@@ -1141,6 +1155,7 @@
clocks = <&cpg CPG_MOD 605>;
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
resets = <&cpg 605>;
+ renesas,id = <14>;
status = "disabled";
};
@@ -1151,6 +1166,7 @@
clocks = <&cpg CPG_MOD 604>;
power-domains = <&sysc R8A77980_PD_ALWAYS_ON>;
resets = <&cpg 604>;
+ renesas,id = <15>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
index 144c0820cf60..c72772589953 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0
/*
* Device Tree Source for the ebisu board
*
@@ -19,7 +19,7 @@
};
chosen {
- bootargs = "ignore_loglevel";
+ bootargs = "ignore_loglevel rw root=/dev/nfs ip=dhcp";
stdout-path = "serial0:115200n8";
};
@@ -337,6 +337,15 @@
&i2c0 {
status = "okay";
+ io_expander: gpio@20 {
+ compatible = "onnn,pca9654";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <22 IRQ_TYPE_LEVEL_LOW>;
+ };
+
hdmi-encoder@39 {
compatible = "adi,adv7511w";
reg = <0x39>;
@@ -398,7 +407,7 @@
};
port@a {
- reg = <0xa>;
+ reg = <10>;
adv7482_txa: endpoint {
clock-lanes = <0>;
@@ -440,6 +449,28 @@
};
};
+&i2c_dvfs {
+ status = "okay";
+
+ clock-frequency = <400000>;
+
+ pmic: pmic@30 {
+ pinctrl-0 = <&irq0_pins>;
+ pinctrl-names = "default";
+
+ compatible = "rohm,bd9571mwv";
+ reg = <0x30>;
+ interrupt-parent = <&intc_ex>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ rohm,ddr-backup-power = <0x1>;
+ rohm,rstbmode-level;
+ };
+};
+
&lvds0 {
status = "okay";
@@ -458,6 +489,13 @@
};
&lvds1 {
+ /*
+ * Even though the LVDS1 output is not connected, the encoder must be
+ * enabled to supply a pixel clock to the DU for the DPAD output when
+ * LVDS0 is in use.
+ */
+ status = "okay";
+
clocks = <&cpg CPG_MOD 727>,
<&x13_clk>,
<&extal_clk>;
@@ -495,6 +533,11 @@
function = "du";
};
+ irq0_pins: irq0 {
+ groups = "intc_ex_irq0";
+ function = "intc_ex";
+ };
+
pwm3_pins: pwm3 {
groups = "pwm3_b";
function = "pwm3";
@@ -650,6 +693,10 @@
status = "okay";
};
+&vin5 {
+ status = "okay";
+};
+
&xhci0 {
pinctrl-0 = <&usb30_pins>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
index d2ad665fe2d9..56cb566ffa09 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0
/*
* Device Tree Source for the R-Car E3 (R8A77990) SoC
*
@@ -284,6 +284,76 @@
status = "disabled";
};
+ cmt0: timer@e60f0000 {
+ compatible = "renesas,r8a77990-cmt0",
+ "renesas,rcar-gen3-cmt0";
+ reg = <0 0xe60f0000 0 0x1004>;
+ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 303>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
+ resets = <&cpg 303>;
+ status = "disabled";
+ };
+
+ cmt1: timer@e6130000 {
+ compatible = "renesas,r8a77990-cmt1",
+ "renesas,rcar-gen3-cmt1";
+ reg = <0 0xe6130000 0 0x1004>;
+ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 302>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
+ resets = <&cpg 302>;
+ status = "disabled";
+ };
+
+ cmt2: timer@e6140000 {
+ compatible = "renesas,r8a77990-cmt1",
+ "renesas,rcar-gen3-cmt1";
+ reg = <0 0xe6140000 0 0x1004>;
+ interrupts = <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 301>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
+ resets = <&cpg 301>;
+ status = "disabled";
+ };
+
+ cmt3: timer@e6148000 {
+ compatible = "renesas,r8a77990-cmt1",
+ "renesas,rcar-gen3-cmt1";
+ reg = <0 0xe6148000 0 0x1004>;
+ interrupts = <GIC_SPI 470 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 471 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 300>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
+ resets = <&cpg 300>;
+ status = "disabled";
+ };
+
cpg: clock-controller@e6150000 {
compatible = "renesas,r8a77990-cpg-mssr";
reg = <0 0xe6150000 0 0x1000>;
@@ -1656,7 +1726,7 @@
};
csi40: csi2@feaa0000 {
- compatible = "renesas,r8a77990-csi2", "renesas,rcar-gen3-csi2";
+ compatible = "renesas,r8a77990-csi2";
reg = <0 0xfeaa0000 0 0x10000>;
interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 716>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
index db2bed1751b8..a7dc11e36fd9 100644
--- a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
@@ -20,7 +20,7 @@
};
chosen {
- bootargs = "ignore_loglevel";
+ bootargs = "ignore_loglevel rw root=/dev/nfs ip=dhcp";
stdout-path = "serial0:115200n8";
};
@@ -168,7 +168,6 @@
pinctrl-names = "default";
renesas,no-ether-link;
phy-handle = <&phy0>;
- phy-mode = "rgmii-txid";
status = "okay";
phy0: ethernet-phy@0 {
@@ -179,6 +178,18 @@
};
};
+&can0 {
+ pinctrl-0 = <&can0_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+&can1 {
+ pinctrl-0 = <&can1_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
&du {
pinctrl-0 = <&du_pins>;
pinctrl-names = "default";
@@ -356,6 +367,13 @@
};
&lvds1 {
+ /*
+ * Even though the LVDS1 output is not connected, the encoder must be
+ * enabled to supply a pixel clock to the DU for the DPAD output when
+ * LVDS0 is in use.
+ */
+ status = "okay";
+
clocks = <&cpg CPG_MOD 727>,
<&x12_clk>,
<&extal_clk>;
@@ -375,6 +393,16 @@
};
};
+ can0_pins: can0 {
+ groups = "can0_data_a";
+ function = "can0";
+ };
+
+ can1_pins: can1 {
+ groups = "can1_data_a";
+ function = "can1";
+ };
+
du_pins: du {
groups = "du_rgb888", "du_sync", "du_disp", "du_clk_out_0";
function = "du";
diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
index a225c2457274..2dba1328acfa 100644
--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
@@ -29,6 +29,7 @@
*/
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
/ {
aliases {
@@ -86,6 +87,63 @@
};
};
+ keys {
+ compatible = "gpio-keys";
+
+ pinctrl-0 = <&keys_pins>;
+ pinctrl-names = "default";
+
+ key-1 {
+ gpios = <&gpio5 17 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_1>;
+ label = "SW4-1";
+ wakeup-source;
+ debounce-interval = <20>;
+ };
+ key-2 {
+ gpios = <&gpio5 20 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_2>;
+ label = "SW4-2";
+ wakeup-source;
+ debounce-interval = <20>;
+ };
+ key-3 {
+ gpios = <&gpio5 22 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_3>;
+ label = "SW4-3";
+ wakeup-source;
+ debounce-interval = <20>;
+ };
+ key-4 {
+ gpios = <&gpio5 23 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_4>;
+ label = "SW4-4";
+ wakeup-source;
+ debounce-interval = <20>;
+ };
+ key-a {
+ gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_A>;
+ label = "TSW0";
+ wakeup-source;
+ debounce-interval = <20>;
+ };
+ key-b {
+ gpios = <&gpio6 12 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_B>;
+ label = "TSW1";
+ wakeup-source;
+ debounce-interval = <20>;
+ };
+ key-c {
+ gpios = <&gpio6 13 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_C>;
+ label = "TSW2";
+ wakeup-source;
+ debounce-interval = <20>;
+ };
+ };
+
reg_1p8v: regulator0 {
compatible = "regulator-fixed";
regulator-name = "fixed-1.8V";
@@ -572,6 +630,11 @@
function = "intc_ex";
};
+ keys_pins: keys {
+ pins = "GP_5_17", "GP_5_20", "GP_5_22";
+ bias-pull-up;
+ };
+
pwm1_pins: pwm1 {
groups = "pwm1_a";
function = "pwm1";
@@ -719,6 +782,11 @@
};
};
+&rwdt {
+ timeout-sec = <60>;
+ status = "okay";
+};
+
&scif1 {
pinctrl-0 = <&scif1_pins>;
pinctrl-names = "default";
@@ -857,11 +925,6 @@
status = "okay";
};
-&rwdt {
- timeout-sec = <60>;
- status = "okay";
-};
-
&xhci0 {
pinctrl-0 = <&usb30_pins>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
index 1b28fa72ea0b..5f2687acbf94 100644
--- a/arch/arm64/boot/dts/rockchip/Makefile
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -18,6 +18,8 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-gru-scarlet-inx.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-gru-scarlet-kd.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopc-t4.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopi-m4.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopi-neo4.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-orangepi.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-puma-haikou.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-roc-pc.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-rock-pi-4.dtb
diff --git a/arch/arm64/boot/dts/rockchip/px30-evb.dts b/arch/arm64/boot/dts/rockchip/px30-evb.dts
index 263d7f3dbc44..6eb7407a84aa 100644
--- a/arch/arm64/boot/dts/rockchip/px30-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/px30-evb.dts
@@ -145,12 +145,12 @@
soc_slppin_slp: soc_slppin_slp {
rockchip,pins =
- <0 RK_PA4 RK_FUNC_1 &pcfg_pull_none>;
+ <0 RK_PA4 1 &pcfg_pull_none>;
};
soc_slppin_rst: soc_slppin_rst {
rockchip,pins =
- <0 RK_PA4 RK_FUNC_2 &pcfg_pull_none>;
+ <0 RK_PA4 2 &pcfg_pull_none>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts
index 8302d86d35c4..49c4b96da3d4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts
@@ -208,7 +208,7 @@
sdio-pwrseq {
wifi_enable_h: wifi-enable-h {
rockchip,pins =
- <1 18 RK_FUNC_GPIO &pcfg_pull_none>;
+ <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
index 0e34354b2092..5d499c9086fb 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
@@ -81,18 +81,55 @@
regulator-always-on;
regulator-boot-on;
};
+
+ leds {
+ compatible = "gpio-leds";
+
+ power {
+ label = "firefly:blue:power";
+ linux,default-trigger = "heartbeat";
+ gpios = <&rk805 1 GPIO_ACTIVE_LOW>;
+ default-state = "on";
+ mode = <0x23>;
+ };
+
+ user {
+ label = "firefly:yellow:user";
+ linux,default-trigger = "mmc1";
+ gpios = <&rk805 0 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ mode = <0x05>;
+ };
+ };
};
&cpu0 {
cpu-supply = <&vdd_arm>;
};
+&cpu1 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cpu2 {
+ cpu-supply = <&vdd_arm>;
+};
+
+&cpu3 {
+ cpu-supply = <&vdd_arm>;
+};
+
&emmc {
bus-width = <8>;
cap-mmc-highspeed;
+ max-frequency = <150000000>;
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
non-removable;
pinctrl-names = "default";
pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>;
+ vmmc-supply = <&vcc_io>;
+ vqmmc-supply = <&vcc18_emmc>;
status = "okay";
};
@@ -113,6 +150,14 @@
status = "okay";
};
+&hdmi {
+ status = "okay";
+};
+
+&hdmiphy {
+ status = "okay";
+};
+
&i2c1 {
status = "okay";
@@ -296,3 +341,11 @@
&usb_host0_ohci {
status = "okay";
};
+
+&vop {
+ status = "okay";
+};
+
+&vop_mmu {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
index 79b4d1d4b5d6..7cfd5ca6cc85 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
@@ -64,6 +64,27 @@
regulator-max-microvolt = <5000000>;
};
+ ir-receiver {
+ compatible = "gpio-ir-receiver";
+ gpios = <&gpio2 RK_PA2 GPIO_ACTIVE_LOW>;
+ pinctrl-0 = <&ir_int>;
+ pinctrl-names = "default";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ power {
+ gpios = <&rk805 1 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "mmc0";
+ };
+
+ standby {
+ gpios = <&rk805 0 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
sound {
compatible = "audio-graph-card";
label = "rockchip,rk3328";
@@ -156,6 +177,8 @@
interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
#clock-cells = <1>;
clock-output-names = "xin32k", "rk805-clkout2";
+ gpio-controller;
+ #gpio-cells = <2>;
pinctrl-names = "default";
pinctrl-0 = <&pmic_int_l>;
rockchip,system-power-controller;
@@ -217,7 +240,7 @@
};
vcc_18: LDO_REG1 {
- regulator-name = "vdd_18";
+ regulator-name = "vcc_18";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-always-on;
@@ -229,7 +252,7 @@
};
vcc18_emmc: LDO_REG2 {
- regulator-name = "vcc_18emmc";
+ regulator-name = "vcc18_emmc";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-always-on;
@@ -280,6 +303,12 @@
};
&pinctrl {
+ ir {
+ ir_int: ir-int {
+ rockchip,pins = <2 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
pmic {
pmic_int_l: pmic-int-l {
rockchip,pins = <2 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index dabef1a21649..994468671b19 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -657,14 +657,17 @@
interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru PCLK_HDMI>,
- <&cru SCLK_HDMI_SFC>;
+ <&cru SCLK_HDMI_SFC>,
+ <&cru SCLK_RTC32K>;
clock-names = "iahb",
- "isfr";
+ "isfr",
+ "cec";
phys = <&hdmiphy>;
phy-names = "hdmi";
pinctrl-names = "default";
pinctrl-0 = <&hdmi_cec &hdmii2c_xfer &hdmi_hpd>;
rockchip,grf = <&grf>;
+ #sound-dai-cells = <0>;
status = "disabled";
ports {
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi b/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi
index e96eb62f362b..1c52f47c43a6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi
@@ -154,60 +154,60 @@
backlight {
bl_en: bl-en {
- rockchip,pins = <0 20 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PC4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
emmc {
emmc_bus8: emmc-bus8 {
- rockchip,pins = <1 18 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <1 19 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <1 20 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <1 21 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <1 22 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <1 23 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <1 24 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <1 25 RK_FUNC_2 &pcfg_pull_up_drv_8ma>;
+ rockchip,pins = <1 RK_PC2 2 &pcfg_pull_up_drv_8ma>,
+ <1 RK_PC3 2 &pcfg_pull_up_drv_8ma>,
+ <1 RK_PC4 2 &pcfg_pull_up_drv_8ma>,
+ <1 RK_PC5 2 &pcfg_pull_up_drv_8ma>,
+ <1 RK_PC6 2 &pcfg_pull_up_drv_8ma>,
+ <1 RK_PC7 2 &pcfg_pull_up_drv_8ma>,
+ <1 RK_PD0 2 &pcfg_pull_up_drv_8ma>,
+ <1 RK_PD1 2 &pcfg_pull_up_drv_8ma>;
};
emmc-clk {
- rockchip,pins = <2 4 RK_FUNC_2 &pcfg_pull_none_drv_8ma>;
+ rockchip,pins = <2 RK_PA4 2 &pcfg_pull_none_drv_8ma>;
};
emmc-cmd {
- rockchip,pins = <1 26 RK_FUNC_2 &pcfg_pull_up_drv_8ma>;
+ rockchip,pins = <1 RK_PD2 2 &pcfg_pull_up_drv_8ma>;
};
emmc_reset: emmc-reset {
- rockchip,pins = <2 3 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
keys {
pwr_key: pwr-key {
- rockchip,pins = <0 2 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
pmic {
pmic_int: pmic-int {
- rockchip,pins = <0 1 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA1 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
sdio {
wifi_reg_on: wifi-reg-on {
- rockchip,pins = <3 4 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
};
bt_rst: bt-rst {
- rockchip,pins = <3 5 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
usb {
host_vbus_drv: host-vbus-drv {
- rockchip,pins = <0 4 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
index 8fa550cbd1a4..1d0778ff217c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
@@ -233,23 +233,23 @@
&pinctrl {
ir {
ir_int: ir-int {
- rockchip,pins = <3 30 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PD6 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
keys {
pwr_key: pwr-key {
- rockchip,pins = <0 2 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
pmic {
pmic_sleep: pmic-sleep {
- rockchip,pins = <0 0 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA0 2 &pcfg_pull_none>;
};
pmic_int: pmic-int {
- rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-lion-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3368-lion-haikou.dts
index fca8e87d8f52..8251f3c0d0a8 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-lion-haikou.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-lion-haikou.dts
@@ -113,34 +113,34 @@
haikou_pin_hog: haikou-pin-hog {
rockchip,pins =
/* LID_BTN */
- <RK_GPIO3 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>,
+ <3 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>,
/* BATLOW# */
- <RK_GPIO0 RK_PD6 RK_FUNC_GPIO &pcfg_pull_up>,
+ <0 RK_PD6 RK_FUNC_GPIO &pcfg_pull_up>,
/* SLP_BTN# */
- <RK_GPIO3 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>,
+ <3 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>,
/* BIOS_DISABLE# */
- <RK_GPIO3 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>;
+ <3 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
leds {
led_sd_haikou: led-sd-gpio {
rockchip,pins =
- <RK_GPIO0 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
+ <0 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
sdmmc {
sdmmc_cd_gpio: sdmmc-cd-gpio {
rockchip,pins =
- <RK_GPIO2 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
+ <2 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
usb_otg {
otg_vbus_drv: otg-vbus-drv {
rockchip,pins =
- <RK_GPIO0 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
+ <0 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi
index 1b35d612b660..e17311e09082 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi
@@ -56,8 +56,6 @@
fan: fan@18 {
compatible = "ti,amc6821";
reg = <0x18>;
- cooling-min-state = <0>;
- cooling-max-state = <9>;
#cooling-cells = <2>;
};
@@ -274,17 +272,17 @@
leds {
led_pins_module: led-module-gpio {
rockchip,pins =
- <RK_GPIO2 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>,
- <RK_GPIO3 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
+ <2 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>,
+ <3 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
pmic {
pmic_int_l: pmic-int-l {
- rockchip,pins = <RK_GPIO0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
};
pmic_sleep: pmic-sleep {
- rockchip,pins = <RK_GPIO0 RK_PA0 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA0 2 &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
index f5aa3cad67c5..6cc310255da8 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
@@ -226,73 +226,73 @@
emmc {
emmc_bus8: emmc-bus8 {
- rockchip,pins = <1 18 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <1 19 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <1 20 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <1 21 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <1 22 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <1 23 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <1 24 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <1 25 RK_FUNC_2 &pcfg_pull_up_drv_8ma>;
+ rockchip,pins = <1 RK_PC2 2 &pcfg_pull_up_drv_8ma>,
+ <1 RK_PC3 2 &pcfg_pull_up_drv_8ma>,
+ <1 RK_PC4 2 &pcfg_pull_up_drv_8ma>,
+ <1 RK_PC5 2 &pcfg_pull_up_drv_8ma>,
+ <1 RK_PC6 2 &pcfg_pull_up_drv_8ma>,
+ <1 RK_PC7 2 &pcfg_pull_up_drv_8ma>,
+ <1 RK_PD0 2 &pcfg_pull_up_drv_8ma>,
+ <1 RK_PD1 2 &pcfg_pull_up_drv_8ma>;
};
emmc-clk {
- rockchip,pins = <2 4 RK_FUNC_2 &pcfg_pull_none_drv_8ma>;
+ rockchip,pins = <2 RK_PA4 2 &pcfg_pull_none_drv_8ma>;
};
emmc-cmd {
- rockchip,pins = <1 26 RK_FUNC_2 &pcfg_pull_up_drv_8ma>;
+ rockchip,pins = <1 RK_PD2 2 &pcfg_pull_up_drv_8ma>;
};
emmc_reset: emmc-reset {
- rockchip,pins = <2 3 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
keys {
pwr_key: pwr-key {
- rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_down>;
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_down>;
};
};
leds {
stby_pwren: stby-pwren {
- rockchip,pins = <0 12 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
};
led_ctl: led-ctl {
- rockchip,pins = <3 29 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PD5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
sdmmc {
sdmmc_clk: sdmmc-clk {
- rockchip,pins = <2 9 RK_FUNC_1 &pcfg_pull_none_drv_8ma>;
+ rockchip,pins = <2 RK_PB1 1 &pcfg_pull_none_drv_8ma>;
};
sdmmc_cmd: sdmmc-cmd {
- rockchip,pins = <2 10 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+ rockchip,pins = <2 RK_PB2 1 &pcfg_pull_up_drv_8ma>;
};
sdmmc_cd: sdmmc-cd {
- rockchip,pins = <2 11 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+ rockchip,pins = <2 RK_PB3 1 &pcfg_pull_up_drv_8ma>;
};
sdmmc_bus1: sdmmc-bus1 {
- rockchip,pins = <2 5 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+ rockchip,pins = <2 RK_PA5 1 &pcfg_pull_up_drv_8ma>;
};
sdmmc_bus4: sdmmc-bus4 {
- rockchip,pins = <2 5 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
- <2 6 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
- <2 7 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
- <2 8 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+ rockchip,pins = <2 RK_PA5 1 &pcfg_pull_up_drv_8ma>,
+ <2 RK_PA6 1 &pcfg_pull_up_drv_8ma>,
+ <2 RK_PA7 1 &pcfg_pull_up_drv_8ma>,
+ <2 RK_PB0 1 &pcfg_pull_up_drv_8ma>;
};
};
usb {
host_vbus_drv: host-vbus-drv {
- rockchip,pins = <0 4 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts b/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts
index 41edcfd53184..231db0305a03 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts
@@ -218,17 +218,17 @@
&pinctrl {
keys {
pwr_key: pwr-key {
- rockchip,pins = <0 2 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
pmic {
pmic_sleep: pmic-sleep {
- rockchip,pins = <0 0 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA0 2 &pcfg_pull_none>;
};
pmic_int: pmic-int {
- rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
index d34064c65f10..006a1fb6a816 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
@@ -235,64 +235,64 @@
emmc {
emmc_bus8: emmc-bus8 {
- rockchip,pins = <1 18 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <1 19 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <1 20 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <1 21 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <1 22 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <1 23 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <1 24 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
- <1 25 RK_FUNC_2 &pcfg_pull_up_drv_8ma>;
+ rockchip,pins = <1 RK_PC2 2 &pcfg_pull_up_drv_8ma>,
+ <1 RK_PC3 2 &pcfg_pull_up_drv_8ma>,
+ <1 RK_PC4 2 &pcfg_pull_up_drv_8ma>,
+ <1 RK_PC5 2 &pcfg_pull_up_drv_8ma>,
+ <1 RK_PC6 2 &pcfg_pull_up_drv_8ma>,
+ <1 RK_PC7 2 &pcfg_pull_up_drv_8ma>,
+ <1 RK_PD0 2 &pcfg_pull_up_drv_8ma>,
+ <1 RK_PD1 2 &pcfg_pull_up_drv_8ma>;
};
emmc-clk {
- rockchip,pins = <2 4 RK_FUNC_2 &pcfg_pull_none_drv_8ma>;
+ rockchip,pins = <2 RK_PA4 2 &pcfg_pull_none_drv_8ma>;
};
emmc-cmd {
- rockchip,pins = <1 26 RK_FUNC_2 &pcfg_pull_up_drv_8ma>;
+ rockchip,pins = <1 RK_PD2 2 &pcfg_pull_up_drv_8ma>;
};
emmc_reset: emmc-reset {
- rockchip,pins = <2 3 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
ir {
ir_int: ir-int {
- rockchip,pins = <3 30 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <3 RK_PD6 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
keys {
pwr_key: pwr-key {
- rockchip,pins = <0 2 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
leds {
stby_pwren: stby-pwren {
- rockchip,pins = <0 12 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
};
led_ctl: led-ctl {
- rockchip,pins = <3 29 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PD5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
sdio {
wifi_reg_on: wifi-reg-on {
- rockchip,pins = <3 4 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
};
bt_rst: bt-rst {
- rockchip,pins = <3 5 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
usb {
host_vbus_drv: host-vbus-drv {
- rockchip,pins = <0 4 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index 06e7c31d7d07..fd86188010b2 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -881,345 +881,345 @@
emmc {
emmc_clk: emmc-clk {
- rockchip,pins = <2 4 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PA4 2 &pcfg_pull_none>;
};
emmc_cmd: emmc-cmd {
- rockchip,pins = <1 26 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PD2 2 &pcfg_pull_up>;
};
emmc_pwr: emmc-pwr {
- rockchip,pins = <1 27 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PD3 2 &pcfg_pull_up>;
};
emmc_bus1: emmc-bus1 {
- rockchip,pins = <1 18 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PC2 2 &pcfg_pull_up>;
};
emmc_bus4: emmc-bus4 {
- rockchip,pins = <1 18 RK_FUNC_2 &pcfg_pull_up>,
- <1 19 RK_FUNC_2 &pcfg_pull_up>,
- <1 20 RK_FUNC_2 &pcfg_pull_up>,
- <1 21 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PC2 2 &pcfg_pull_up>,
+ <1 RK_PC3 2 &pcfg_pull_up>,
+ <1 RK_PC4 2 &pcfg_pull_up>,
+ <1 RK_PC5 2 &pcfg_pull_up>;
};
emmc_bus8: emmc-bus8 {
- rockchip,pins = <1 18 RK_FUNC_2 &pcfg_pull_up>,
- <1 19 RK_FUNC_2 &pcfg_pull_up>,
- <1 20 RK_FUNC_2 &pcfg_pull_up>,
- <1 21 RK_FUNC_2 &pcfg_pull_up>,
- <1 22 RK_FUNC_2 &pcfg_pull_up>,
- <1 23 RK_FUNC_2 &pcfg_pull_up>,
- <1 24 RK_FUNC_2 &pcfg_pull_up>,
- <1 25 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PC2 2 &pcfg_pull_up>,
+ <1 RK_PC3 2 &pcfg_pull_up>,
+ <1 RK_PC4 2 &pcfg_pull_up>,
+ <1 RK_PC5 2 &pcfg_pull_up>,
+ <1 RK_PC6 2 &pcfg_pull_up>,
+ <1 RK_PC7 2 &pcfg_pull_up>,
+ <1 RK_PD0 2 &pcfg_pull_up>,
+ <1 RK_PD1 2 &pcfg_pull_up>;
};
};
gmac {
rgmii_pins: rgmii-pins {
- rockchip,pins = <3 22 RK_FUNC_1 &pcfg_pull_none>,
- <3 24 RK_FUNC_1 &pcfg_pull_none>,
- <3 19 RK_FUNC_1 &pcfg_pull_none>,
- <3 8 RK_FUNC_1 &pcfg_pull_none_12ma>,
- <3 9 RK_FUNC_1 &pcfg_pull_none_12ma>,
- <3 10 RK_FUNC_1 &pcfg_pull_none_12ma>,
- <3 14 RK_FUNC_1 &pcfg_pull_none_12ma>,
- <3 28 RK_FUNC_1 &pcfg_pull_none_12ma>,
- <3 13 RK_FUNC_1 &pcfg_pull_none_12ma>,
- <3 15 RK_FUNC_1 &pcfg_pull_none>,
- <3 16 RK_FUNC_1 &pcfg_pull_none>,
- <3 17 RK_FUNC_1 &pcfg_pull_none>,
- <3 18 RK_FUNC_1 &pcfg_pull_none>,
- <3 25 RK_FUNC_1 &pcfg_pull_none>,
- <3 20 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PC6 1 &pcfg_pull_none>,
+ <3 RK_PD0 1 &pcfg_pull_none>,
+ <3 RK_PC3 1 &pcfg_pull_none>,
+ <3 RK_PB0 1 &pcfg_pull_none_12ma>,
+ <3 RK_PB1 1 &pcfg_pull_none_12ma>,
+ <3 RK_PB2 1 &pcfg_pull_none_12ma>,
+ <3 RK_PB6 1 &pcfg_pull_none_12ma>,
+ <3 RK_PD4 1 &pcfg_pull_none_12ma>,
+ <3 RK_PB5 1 &pcfg_pull_none_12ma>,
+ <3 RK_PB7 1 &pcfg_pull_none>,
+ <3 RK_PC0 1 &pcfg_pull_none>,
+ <3 RK_PC1 1 &pcfg_pull_none>,
+ <3 RK_PC2 1 &pcfg_pull_none>,
+ <3 RK_PD1 1 &pcfg_pull_none>,
+ <3 RK_PC4 1 &pcfg_pull_none>;
};
rmii_pins: rmii-pins {
- rockchip,pins = <3 22 RK_FUNC_1 &pcfg_pull_none>,
- <3 24 RK_FUNC_1 &pcfg_pull_none>,
- <3 19 RK_FUNC_1 &pcfg_pull_none>,
- <3 8 RK_FUNC_1 &pcfg_pull_none_12ma>,
- <3 9 RK_FUNC_1 &pcfg_pull_none_12ma>,
- <3 13 RK_FUNC_1 &pcfg_pull_none_12ma>,
- <3 15 RK_FUNC_1 &pcfg_pull_none>,
- <3 16 RK_FUNC_1 &pcfg_pull_none>,
- <3 20 RK_FUNC_1 &pcfg_pull_none>,
- <3 21 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PC6 1 &pcfg_pull_none>,
+ <3 RK_PD0 1 &pcfg_pull_none>,
+ <3 RK_PC3 1 &pcfg_pull_none>,
+ <3 RK_PB0 1 &pcfg_pull_none_12ma>,
+ <3 RK_PB1 1 &pcfg_pull_none_12ma>,
+ <3 RK_PB5 1 &pcfg_pull_none_12ma>,
+ <3 RK_PB7 1 &pcfg_pull_none>,
+ <3 RK_PC0 1 &pcfg_pull_none>,
+ <3 RK_PC4 1 &pcfg_pull_none>,
+ <3 RK_PC5 1 &pcfg_pull_none>;
};
};
i2c0 {
i2c0_xfer: i2c0-xfer {
- rockchip,pins = <0 6 RK_FUNC_1 &pcfg_pull_none>,
- <0 7 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA6 1 &pcfg_pull_none>,
+ <0 RK_PA7 1 &pcfg_pull_none>;
};
};
i2c1 {
i2c1_xfer: i2c1-xfer {
- rockchip,pins = <2 21 RK_FUNC_1 &pcfg_pull_none>,
- <2 22 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PC5 1 &pcfg_pull_none>,
+ <2 RK_PC6 1 &pcfg_pull_none>;
};
};
i2c2 {
i2c2_xfer: i2c2-xfer {
- rockchip,pins = <0 9 RK_FUNC_2 &pcfg_pull_none>,
- <3 31 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB1 2 &pcfg_pull_none>,
+ <3 RK_PD7 2 &pcfg_pull_none>;
};
};
i2c3 {
i2c3_xfer: i2c3-xfer {
- rockchip,pins = <1 16 RK_FUNC_1 &pcfg_pull_none>,
- <1 17 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PC0 1 &pcfg_pull_none>,
+ <1 RK_PC1 1 &pcfg_pull_none>;
};
};
i2c4 {
i2c4_xfer: i2c4-xfer {
- rockchip,pins = <3 24 RK_FUNC_2 &pcfg_pull_none>,
- <3 25 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PD0 2 &pcfg_pull_none>,
+ <3 RK_PD1 2 &pcfg_pull_none>;
};
};
i2c5 {
i2c5_xfer: i2c5-xfer {
- rockchip,pins = <3 26 RK_FUNC_2 &pcfg_pull_none>,
- <3 27 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PD2 2 &pcfg_pull_none>,
+ <3 RK_PD3 2 &pcfg_pull_none>;
};
};
i2s {
i2s_8ch_bus: i2s-8ch-bus {
- rockchip,pins = <2 12 RK_FUNC_1 &pcfg_pull_none>,
- <2 13 RK_FUNC_1 &pcfg_pull_none>,
- <2 14 RK_FUNC_1 &pcfg_pull_none>,
- <2 15 RK_FUNC_1 &pcfg_pull_none>,
- <2 16 RK_FUNC_1 &pcfg_pull_none>,
- <2 17 RK_FUNC_1 &pcfg_pull_none>,
- <2 18 RK_FUNC_1 &pcfg_pull_none>,
- <2 19 RK_FUNC_1 &pcfg_pull_none>,
- <2 20 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PB4 1 &pcfg_pull_none>,
+ <2 RK_PB5 1 &pcfg_pull_none>,
+ <2 RK_PB6 1 &pcfg_pull_none>,
+ <2 RK_PB7 1 &pcfg_pull_none>,
+ <2 RK_PC0 1 &pcfg_pull_none>,
+ <2 RK_PC1 1 &pcfg_pull_none>,
+ <2 RK_PC2 1 &pcfg_pull_none>,
+ <2 RK_PC3 1 &pcfg_pull_none>,
+ <2 RK_PC4 1 &pcfg_pull_none>;
};
};
pwm0 {
pwm0_pin: pwm0-pin {
- rockchip,pins = <3 8 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PB0 2 &pcfg_pull_none>;
};
};
pwm1 {
pwm1_pin: pwm1-pin {
- rockchip,pins = <0 8 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB0 2 &pcfg_pull_none>;
};
};
pwm3 {
pwm3_pin: pwm3-pin {
- rockchip,pins = <3 29 RK_FUNC_3 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PD5 3 &pcfg_pull_none>;
};
};
sdio0 {
sdio0_bus1: sdio0-bus1 {
- rockchip,pins = <2 28 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <2 RK_PD4 1 &pcfg_pull_up>;
};
sdio0_bus4: sdio0-bus4 {
- rockchip,pins = <2 28 RK_FUNC_1 &pcfg_pull_up>,
- <2 29 RK_FUNC_1 &pcfg_pull_up>,
- <2 30 RK_FUNC_1 &pcfg_pull_up>,
- <2 31 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <2 RK_PD4 1 &pcfg_pull_up>,
+ <2 RK_PD5 1 &pcfg_pull_up>,
+ <2 RK_PD6 1 &pcfg_pull_up>,
+ <2 RK_PD7 1 &pcfg_pull_up>;
};
sdio0_cmd: sdio0-cmd {
- rockchip,pins = <3 0 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <3 RK_PA0 1 &pcfg_pull_up>;
};
sdio0_clk: sdio0-clk {
- rockchip,pins = <3 1 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PA1 1 &pcfg_pull_none>;
};
sdio0_cd: sdio0-cd {
- rockchip,pins = <3 2 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <3 RK_PA2 1 &pcfg_pull_up>;
};
sdio0_wp: sdio0-wp {
- rockchip,pins = <3 3 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <3 RK_PA3 1 &pcfg_pull_up>;
};
sdio0_pwr: sdio0-pwr {
- rockchip,pins = <3 4 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <3 RK_PA4 1 &pcfg_pull_up>;
};
sdio0_bkpwr: sdio0-bkpwr {
- rockchip,pins = <3 5 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <3 RK_PA5 1 &pcfg_pull_up>;
};
sdio0_int: sdio0-int {
- rockchip,pins = <3 6 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <3 RK_PA6 1 &pcfg_pull_up>;
};
};
sdmmc {
sdmmc_clk: sdmmc-clk {
- rockchip,pins = <2 9 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PB1 1 &pcfg_pull_none>;
};
sdmmc_cmd: sdmmc-cmd {
- rockchip,pins = <2 10 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <2 RK_PB2 1 &pcfg_pull_up>;
};
sdmmc_cd: sdmmc-cd {
- rockchip,pins = <2 11 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <2 RK_PB3 1 &pcfg_pull_up>;
};
sdmmc_bus1: sdmmc-bus1 {
- rockchip,pins = <2 5 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <2 RK_PA5 1 &pcfg_pull_up>;
};
sdmmc_bus4: sdmmc-bus4 {
- rockchip,pins = <2 5 RK_FUNC_1 &pcfg_pull_up>,
- <2 6 RK_FUNC_1 &pcfg_pull_up>,
- <2 7 RK_FUNC_1 &pcfg_pull_up>,
- <2 8 RK_FUNC_1 &pcfg_pull_up>;
+ rockchip,pins = <2 RK_PA5 1 &pcfg_pull_up>,
+ <2 RK_PA6 1 &pcfg_pull_up>,
+ <2 RK_PA7 1 &pcfg_pull_up>,
+ <2 RK_PB0 1 &pcfg_pull_up>;
};
};
spdif {
spdif_tx: spdif-tx {
- rockchip,pins = <2 RK_PC7 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PC7 1 &pcfg_pull_none>;
};
};
spi0 {
spi0_clk: spi0-clk {
- rockchip,pins = <1 29 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PD5 2 &pcfg_pull_up>;
};
spi0_cs0: spi0-cs0 {
- rockchip,pins = <1 24 RK_FUNC_3 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PD0 3 &pcfg_pull_up>;
};
spi0_cs1: spi0-cs1 {
- rockchip,pins = <1 25 RK_FUNC_3 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PD1 3 &pcfg_pull_up>;
};
spi0_tx: spi0-tx {
- rockchip,pins = <1 23 RK_FUNC_3 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PC7 3 &pcfg_pull_up>;
};
spi0_rx: spi0-rx {
- rockchip,pins = <1 22 RK_FUNC_3 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PC6 3 &pcfg_pull_up>;
};
};
spi1 {
spi1_clk: spi1-clk {
- rockchip,pins = <1 14 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PB6 2 &pcfg_pull_up>;
};
spi1_cs0: spi1-cs0 {
- rockchip,pins = <1 15 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PB7 2 &pcfg_pull_up>;
};
spi1_cs1: spi1-cs1 {
- rockchip,pins = <3 28 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <3 RK_PD4 2 &pcfg_pull_up>;
};
spi1_rx: spi1-rx {
- rockchip,pins = <1 16 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PC0 2 &pcfg_pull_up>;
};
spi1_tx: spi1-tx {
- rockchip,pins = <1 17 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PC1 2 &pcfg_pull_up>;
};
};
spi2 {
spi2_clk: spi2-clk {
- rockchip,pins = <0 12 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PB4 2 &pcfg_pull_up>;
};
spi2_cs0: spi2-cs0 {
- rockchip,pins = <0 13 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PB5 2 &pcfg_pull_up>;
};
spi2_rx: spi2-rx {
- rockchip,pins = <0 10 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PB2 2 &pcfg_pull_up>;
};
spi2_tx: spi2-tx {
- rockchip,pins = <0 11 RK_FUNC_2 &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PB3 2 &pcfg_pull_up>;
};
};
tsadc {
otp_gpio: otp-gpio {
- rockchip,pins = <0 3 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
};
otp_out: otp-out {
- rockchip,pins = <0 3 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA3 1 &pcfg_pull_none>;
};
};
uart0 {
uart0_xfer: uart0-xfer {
- rockchip,pins = <2 24 RK_FUNC_1 &pcfg_pull_up>,
- <2 25 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PD0 1 &pcfg_pull_up>,
+ <2 RK_PD1 1 &pcfg_pull_none>;
};
uart0_cts: uart0-cts {
- rockchip,pins = <2 26 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PD2 1 &pcfg_pull_none>;
};
uart0_rts: uart0-rts {
- rockchip,pins = <2 27 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PD3 1 &pcfg_pull_none>;
};
};
uart1 {
uart1_xfer: uart1-xfer {
- rockchip,pins = <0 20 RK_FUNC_3 &pcfg_pull_up>,
- <0 21 RK_FUNC_3 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PC4 3 &pcfg_pull_up>,
+ <0 RK_PC5 3 &pcfg_pull_none>;
};
uart1_cts: uart1-cts {
- rockchip,pins = <0 22 RK_FUNC_3 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PC6 3 &pcfg_pull_none>;
};
uart1_rts: uart1-rts {
- rockchip,pins = <0 23 RK_FUNC_3 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PC7 3 &pcfg_pull_none>;
};
};
uart2 {
uart2_xfer: uart2-xfer {
- rockchip,pins = <2 6 RK_FUNC_2 &pcfg_pull_up>,
- <2 5 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PA6 2 &pcfg_pull_up>,
+ <2 RK_PA5 2 &pcfg_pull_none>;
};
/* no rts / cts for uart2 */
};
uart3 {
uart3_xfer: uart3-xfer {
- rockchip,pins = <3 29 RK_FUNC_2 &pcfg_pull_up>,
- <3 30 RK_FUNC_3 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PD5 2 &pcfg_pull_up>,
+ <3 RK_PD6 3 &pcfg_pull_none>;
};
uart3_cts: uart3-cts {
- rockchip,pins = <3 16 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PC0 2 &pcfg_pull_none>;
};
uart3_rts: uart3-rts {
- rockchip,pins = <3 17 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <3 RK_PC1 2 &pcfg_pull_none>;
};
};
uart4 {
uart4_xfer: uart4-xfer {
- rockchip,pins = <0 27 RK_FUNC_3 &pcfg_pull_up>,
- <0 26 RK_FUNC_3 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PD3 3 &pcfg_pull_up>,
+ <0 RK_PD2 3 &pcfg_pull_none>;
};
uart4_cts: uart4-cts {
- rockchip,pins = <0 24 RK_FUNC_3 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PD0 3 &pcfg_pull_none>;
};
uart4_rts: uart4-rts {
- rockchip,pins = <0 25 RK_FUNC_3 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PD1 3 &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-evb.dts b/arch/arm64/boot/dts/rockchip/rk3399-evb.dts
index 959ddc3c7df5..77008dca45bc 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-evb.dts
@@ -208,19 +208,19 @@
pmic {
pmic_int_l: pmic-int-l {
rockchip,pins =
- <1 21 RK_FUNC_GPIO &pcfg_pull_up>;
+ <1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>;
};
pmic_dvs2: pmic-dvs2 {
rockchip,pins =
- <1 18 RK_FUNC_GPIO &pcfg_pull_down>;
+ <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_down>;
};
};
usb2 {
vcc5v0_host_en: vcc5v0-host-en {
rockchip,pins =
- <4 25 RK_FUNC_GPIO &pcfg_pull_none>;
+ <4 RK_PD1 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-ficus.dts b/arch/arm64/boot/dts/rockchip/rk3399-ficus.dts
index 027d428917b8..6b059bd7a04f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-ficus.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-ficus.dts
@@ -95,53 +95,53 @@
gmac {
rgmii_sleep_pins: rgmii-sleep-pins {
rockchip,pins =
- <3 15 RK_FUNC_GPIO &pcfg_output_low>;
+ <3 RK_PB7 RK_FUNC_GPIO &pcfg_output_low>;
};
};
pcie {
pcie_drv: pcie-drv {
rockchip,pins =
- <1 24 RK_FUNC_GPIO &pcfg_pull_none>;
+ <1 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
usb2 {
host_vbus_drv: host-vbus-drv {
rockchip,pins =
- <4 27 RK_FUNC_GPIO &pcfg_pull_none>;
+ <4 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
leds {
user_led1: user_led1 {
rockchip,pins =
- <4 25 RK_FUNC_GPIO &pcfg_pull_none>;
+ <4 RK_PD1 RK_FUNC_GPIO &pcfg_pull_none>;
};
user_led2: user_led2 {
rockchip,pins =
- <4 26 RK_FUNC_GPIO &pcfg_pull_none>;
+ <4 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
};
user_led3: user_led3 {
rockchip,pins =
- <4 30 RK_FUNC_GPIO &pcfg_pull_none>;
+ <4 RK_PD6 RK_FUNC_GPIO &pcfg_pull_none>;
};
user_led4: user_led4 {
rockchip,pins =
- <1 0 RK_FUNC_GPIO &pcfg_pull_none>;
+ <1 RK_PA0 RK_FUNC_GPIO &pcfg_pull_none>;
};
wlan_led: wlan_led {
rockchip,pins =
- <1 1 RK_FUNC_GPIO &pcfg_pull_none>;
+ <1 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>;
};
bt_led: bt_led {
rockchip,pins =
- <1 4 RK_FUNC_GPIO &pcfg_pull_none>;
+ <1 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
index d1cf404b8708..a9f4d6d7d2b7 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
@@ -73,7 +73,7 @@
&pinctrl {
tpm {
h1_int_od_l: h1-int-od-l {
- rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
index 931640e9aed4..7cd6d470c1cb 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi
@@ -365,27 +365,27 @@ ap_i2c_tp: &i2c5 {
&pinctrl {
discrete-regulators {
pp1500_en: pp1500-en {
- rockchip,pins = <RK_GPIO0 10 RK_FUNC_GPIO
+ rockchip,pins = <0 RK_PB2 RK_FUNC_GPIO
&pcfg_pull_none>;
};
pp1800_audio_en: pp1800-audio-en {
- rockchip,pins = <RK_GPIO0 2 RK_FUNC_GPIO
+ rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO
&pcfg_pull_down>;
};
pp3000_en: pp3000-en {
- rockchip,pins = <RK_GPIO0 12 RK_FUNC_GPIO
+ rockchip,pins = <0 RK_PB4 RK_FUNC_GPIO
&pcfg_pull_none>;
};
pp3300_disp_en: pp3300-disp-en {
- rockchip,pins = <RK_GPIO4 27 RK_FUNC_GPIO
+ rockchip,pins = <4 RK_PD3 RK_FUNC_GPIO
&pcfg_pull_none>;
};
wlan_module_pd_l: wlan-module-pd-l {
- rockchip,pins = <RK_GPIO0 4 RK_FUNC_GPIO
+ rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO
&pcfg_pull_down>;
};
};
@@ -393,10 +393,10 @@ ap_i2c_tp: &i2c5 {
&wifi {
wifi_perst_l: wifi-perst-l {
- rockchip,pins = <2 27 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>;
};
wlan_host_wake_l: wlan-host-wake-l {
- rockchip,pins = <0 8 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
index 15e254a77391..3e2272b56eb7 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
@@ -290,24 +290,24 @@ ap_i2c_dig: &i2c2 {
digitizer {
/* Has external pullup */
cpu1_dig_irq_l: cpu1-dig-irq-l {
- rockchip,pins = <2 4 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
};
/* Has external pullup */
cpu1_dig_pdct_l: cpu1-dig-pdct-l {
- rockchip,pins = <2 5 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
discrete-regulators {
cpu3_pen_pwr_en: cpu3-pen-pwr-en {
- rockchip,pins = <4 30 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <4 RK_PD6 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
pen {
cpu1_pen_eject: cpu1-pen-eject {
- rockchip,pins = <0 13 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
index 62ea7d6a7d4a..50dfab51f175 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
@@ -455,58 +455,58 @@ camera: &i2c7 {
/* PINCTRL OVERRIDES */
&ec_ap_int_l {
- rockchip,pins = <1 18 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_up>;
};
&ap_fw_wp {
- rockchip,pins = <0 13 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
};
&bl_en {
- rockchip,pins = <4 21 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <4 RK_PC5 RK_FUNC_GPIO &pcfg_pull_none>;
};
&bt_host_wake_l {
- rockchip,pins = <1 2 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
};
&ec_ap_int_l {
- rockchip,pins = <1 18 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_up>;
};
&headset_int_l {
- rockchip,pins = <1 23 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PC7 RK_FUNC_GPIO &pcfg_pull_up>;
};
&i2s0_8ch_bus {
rockchip,pins =
- <3 24 RK_FUNC_1 &pcfg_pull_none_6ma>,
- <3 25 RK_FUNC_1 &pcfg_pull_none_6ma>,
- <3 26 RK_FUNC_1 &pcfg_pull_none_6ma>,
- <3 27 RK_FUNC_1 &pcfg_pull_none_6ma>,
- <3 31 RK_FUNC_1 &pcfg_pull_none_6ma>,
- <4 0 RK_FUNC_1 &pcfg_pull_none_6ma>;
+ <3 RK_PD0 1 &pcfg_pull_none_6ma>,
+ <3 RK_PD1 1 &pcfg_pull_none_6ma>,
+ <3 RK_PD2 1 &pcfg_pull_none_6ma>,
+ <3 RK_PD3 1 &pcfg_pull_none_6ma>,
+ <3 RK_PD7 1 &pcfg_pull_none_6ma>,
+ <4 RK_PA0 1 &pcfg_pull_none_6ma>;
};
/* there is no external pull up, so need to set this pin pull up */
&sdmmc_cd_gpio {
- rockchip,pins = <1 11 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PB3 RK_FUNC_GPIO &pcfg_pull_up>;
};
&sd_pwr_1800_sel {
- rockchip,pins = <2 28 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <2 RK_PD4 RK_FUNC_GPIO &pcfg_pull_up>;
};
&sdmode_en {
- rockchip,pins = <0 2 RK_FUNC_GPIO &pcfg_pull_down>;
+ rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_down>;
};
&touch_reset_l {
- rockchip,pins = <0 10 RK_FUNC_GPIO &pcfg_pull_down>;
+ rockchip,pins = <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_down>;
};
&touch_int_l {
- rockchip,pins = <1 4 RK_FUNC_GPIO &pcfg_pull_down>;
+ rockchip,pins = <1 RK_PA4 RK_FUNC_GPIO &pcfg_pull_down>;
};
&pinctrl {
@@ -523,84 +523,84 @@ camera: &i2c7 {
camera {
pp1250_cam_en: pp1250-dvdd {
- rockchip,pins = <2 4 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
};
pp2800_cam_en: pp2800-avdd {
- rockchip,pins = <2 24 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>;
};
ucam_rst: ucam_rst {
- rockchip,pins = <2 3 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
};
wcam_rst: wcam_rst {
- rockchip,pins = <2 5 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PA5 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
digitizer {
pen_int_odl: pen-int-odl {
- rockchip,pins = <1 0 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PA0 RK_FUNC_GPIO &pcfg_pull_up>;
};
pen_reset_l: pen-reset-l {
- rockchip,pins = <0 12 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
discrete-regulators {
display_rst_l: display-rst-l {
- rockchip,pins = <4 25 RK_FUNC_GPIO &pcfg_pull_down>;
+ rockchip,pins = <4 RK_PD1 RK_FUNC_GPIO &pcfg_pull_down>;
};
ppvarp_lcd_en: ppvarp-lcd-en {
- rockchip,pins = <4 27 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <4 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>;
};
ppvarn_lcd_en: ppvarn-lcd-en {
- rockchip,pins = <4 28 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <4 RK_PD4 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
dmic {
dmic_en: dmic-en {
- rockchip,pins = <4 3 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <4 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
pen {
pen_eject_odl: pen-eject-odl {
- rockchip,pins = <1 1 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PA1 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
tpm {
h1_int_od_l: h1-int-od-l {
- rockchip,pins = <1 17 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PC1 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
};
&wifi {
bt_en_1v8_l: bt-en-1v8-l {
- rockchip,pins = <0 8 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>;
};
wlan_pd_1v8_l: wlan-pd-1v8-l {
- rockchip,pins = <0 4 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
};
/* Default pull-up, but just to be clear */
wlan_rf_kill_1v8_l: wlan-rf-kill-1v8-l {
- rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
};
wifi_perst_l: wifi-perst-l {
- rockchip,pins = <0 3 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
};
wlan_host_wake_l: wlan-host-wake-l {
- rockchip,pins = <1 3 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
index da03fa9c5662..dd5624975c9b 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
@@ -676,29 +676,29 @@ ap_i2c_audio: &i2c8 {
backlight-enable {
bl_en: bl-en {
- rockchip,pins = <1 17 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PC1 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
cros-ec {
ec_ap_int_l: ec-ap-int-l {
- rockchip,pins = <RK_GPIO0 1 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA1 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
discrete-regulators {
sd_io_pwr_en: sd-io-pwr-en {
- rockchip,pins = <RK_GPIO2 2 RK_FUNC_GPIO
+ rockchip,pins = <2 RK_PA2 RK_FUNC_GPIO
&pcfg_pull_none>;
};
sd_pwr_1800_sel: sd-pwr-1800-sel {
- rockchip,pins = <RK_GPIO2 28 RK_FUNC_GPIO
+ rockchip,pins = <2 RK_PD4 RK_FUNC_GPIO
&pcfg_pull_none>;
};
sd_slot_pwr_en: sd-slot-pwr-en {
- rockchip,pins = <RK_GPIO4 29 RK_FUNC_GPIO
+ rockchip,pins = <4 RK_PD5 RK_FUNC_GPIO
&pcfg_pull_none>;
};
};
@@ -706,17 +706,17 @@ ap_i2c_audio: &i2c8 {
codec {
/* Has external pullup */
headset_int_l: headset-int-l {
- rockchip,pins = <1 23 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PC7 RK_FUNC_GPIO &pcfg_pull_none>;
};
mic_int: mic-int {
- rockchip,pins = <1 13 RK_FUNC_GPIO &pcfg_pull_down>;
+ rockchip,pins = <1 RK_PB5 RK_FUNC_GPIO &pcfg_pull_down>;
};
};
max98357a {
sdmode_en: sdmode-en {
- rockchip,pins = <1 2 RK_FUNC_GPIO &pcfg_pull_down>;
+ rockchip,pins = <1 RK_PA2 RK_FUNC_GPIO &pcfg_pull_down>;
};
};
@@ -727,7 +727,7 @@ ap_i2c_audio: &i2c8 {
* to hack this as gpio, so the EP could be able to
* de-assert it along and make ClockPM(CPM) work.
*/
- rockchip,pins = <2 26 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
@@ -738,20 +738,20 @@ ap_i2c_audio: &i2c8 {
*/
sdmmc_bus4: sdmmc-bus4 {
rockchip,pins =
- <4 8 RK_FUNC_1 &pcfg_pull_none_8ma>,
- <4 9 RK_FUNC_1 &pcfg_pull_none_8ma>,
- <4 10 RK_FUNC_1 &pcfg_pull_none_8ma>,
- <4 11 RK_FUNC_1 &pcfg_pull_none_8ma>;
+ <4 RK_PB0 1 &pcfg_pull_none_8ma>,
+ <4 RK_PB1 1 &pcfg_pull_none_8ma>,
+ <4 RK_PB2 1 &pcfg_pull_none_8ma>,
+ <4 RK_PB3 1 &pcfg_pull_none_8ma>;
};
sdmmc_clk: sdmmc-clk {
rockchip,pins =
- <4 12 RK_FUNC_1 &pcfg_pull_none_8ma>;
+ <4 RK_PB4 1 &pcfg_pull_none_8ma>;
};
sdmmc_cmd: sdmmc-cmd {
rockchip,pins =
- <4 13 RK_FUNC_1 &pcfg_pull_none_8ma>;
+ <4 RK_PB5 1 &pcfg_pull_none_8ma>;
};
/*
@@ -765,12 +765,12 @@ ap_i2c_audio: &i2c8 {
*/
sdmmc_cd: sdmmc-cd {
rockchip,pins =
- <0 7 RK_FUNC_1 &pcfg_pull_none>;
+ <0 RK_PA7 1 &pcfg_pull_none>;
};
/* This is where we actually hook up CD; has external pull */
sdmmc_cd_gpio: sdmmc-cd-gpio {
- rockchip,pins = <4 24 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <4 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
@@ -780,47 +780,47 @@ ap_i2c_audio: &i2c8 {
* Pull down SPI1 CLK/CS/RX/TX during suspend, to
* prevent leakage.
*/
- rockchip,pins = <1 9 RK_FUNC_GPIO &pcfg_pull_down>,
- <1 10 RK_FUNC_GPIO &pcfg_pull_down>,
- <1 7 RK_FUNC_GPIO &pcfg_pull_down>,
- <1 8 RK_FUNC_GPIO &pcfg_pull_down>;
+ rockchip,pins = <1 RK_PB1 RK_FUNC_GPIO &pcfg_pull_down>,
+ <1 RK_PB2 RK_FUNC_GPIO &pcfg_pull_down>,
+ <1 RK_PA7 RK_FUNC_GPIO &pcfg_pull_down>,
+ <1 RK_PB0 RK_FUNC_GPIO &pcfg_pull_down>;
};
};
touchscreen {
touch_int_l: touch-int-l {
- rockchip,pins = <3 13 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <3 RK_PB5 RK_FUNC_GPIO &pcfg_pull_up>;
};
touch_reset_l: touch-reset-l {
- rockchip,pins = <4 26 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <4 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
trackpad {
ap_i2c_tp_pu_en: ap-i2c-tp-pu-en {
- rockchip,pins = <3 12 RK_FUNC_GPIO &pcfg_output_high>;
+ rockchip,pins = <3 RK_PB4 RK_FUNC_GPIO &pcfg_output_high>;
};
trackpad_int_l: trackpad-int-l {
- rockchip,pins = <1 4 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
wifi: wifi {
wlan_module_reset_l: wlan-module-reset-l {
- rockchip,pins = <1 11 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
};
bt_host_wake_l: bt-host-wake-l {
/* Kevin has an external pull up, but Gru does not */
- rockchip,pins = <0 3 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
write-protect {
ap_fw_wp: ap-fw-wp {
- rockchip,pins = <1 18 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts b/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts
index 84433cf02be9..2a127985ab17 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts
@@ -52,13 +52,80 @@
pinctrl-names = "default";
pinctrl-0 = <&ir_rx>;
};
+
+ fan: pwm-fan {
+ compatible = "pwm-fan";
+ /*
+ * With 20KHz PWM and an EVERCOOL EC4007H12SA fan, these levels
+ * work out to 0, ~1200, ~3000, and 5000RPM respectively.
+ */
+ cooling-levels = <0 12 18 255>;
+ #cooling-cells = <2>;
+ fan-supply = <&vcc12v0_sys>;
+ pwms = <&pwm1 0 50000 0>;
+ };
+};
+
+&cpu_thermal {
+ trips {
+ cpu_warm: cpu_warm {
+ temperature = <55000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+ cpu_hot: cpu_hot {
+ temperature = <65000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+ };
+
+ cooling-maps {
+ map2 {
+ trip = <&cpu_warm>;
+ cooling-device = <&fan THERMAL_NO_LIMIT 1>;
+ };
+
+ map3 {
+ trip = <&cpu_hot>;
+ cooling-device = <&fan 2 THERMAL_NO_LIMIT>;
+ };
+ };
+};
+
+&gpu_thermal {
+ trips {
+ gpu_warm: gpu_warm {
+ temperature = <55000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+
+ gpu_hot: gpu_hot {
+ temperature = <65000>;
+ hysteresis = <2000>;
+ type = "active";
+ };
+ };
+ cooling-maps {
+ map1 {
+ trip = <&gpu_warm>;
+ cooling-device = <&fan THERMAL_NO_LIMIT 1>;
+ };
+
+ map2 {
+ trip = <&gpu_hot>;
+ cooling-device = <&fan 2 THERMAL_NO_LIMIT>;
+ };
+ };
};
&pinctrl {
ir {
ir_rx: ir-rx {
/* external pullup to VCC3V3_SYS, despite being 1.8V :/ */
- rockchip,pins = <0 RK_PA6 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA6 1 &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi-neo4.dts b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-neo4.dts
new file mode 100644
index 000000000000..195410b089b9
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-neo4.dts
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2019 Amarula Solutions B.V.
+ * Author: Jagan Teki <jagan@amarulasolutions.com>
+ */
+
+/dts-v1/;
+
+#include "rk3399-nanopi4.dtsi"
+
+/ {
+ model = "FriendlyARM NanoPi NEO4";
+ compatible = "friendlyarm,nanopi-neo4", "rockchip,rk3399";
+
+ vdd_5v: vdd-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_5v";
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vcc5v0_core: vcc5v0-core {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_core";
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vdd_5v>;
+ };
+
+ vcc5v0_usb1: vcc5v0-usb1 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc5v0_usb1";
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc5v0_sys>;
+ };
+};
+
+&vcc3v3_sys {
+ vin-supply = <&vcc5v0_core>;
+};
+
+&u2phy0_host {
+ phy-supply = <&vcc5v0_usb1>;
+};
+
+&vbus_typec {
+ regulator-always-on;
+ vin-supply = <&vdd_5v>;
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
index d325e117287b..dd16c80d923e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
@@ -148,15 +148,28 @@
assigned-clocks = <&cru SCLK_RMII_SRC>;
clock_in_out = "input";
pinctrl-names = "default";
- pinctrl-0 = <&rgmii_pins>;
+ pinctrl-0 = <&rgmii_pins>, <&phy_intb>, <&phy_rstb>;
+ phy-handle = <&rtl8211e>;
phy-mode = "rgmii";
phy-supply = <&vcc3v3_s3>;
snps,reset-active-low;
- snps,reset-delays-us = <0 10000 50000>;
+ snps,reset-delays-us = <0 10000 30000>;
snps,reset-gpio = <&gpio3 RK_PB7 GPIO_ACTIVE_LOW>;
tx_delay = <0x28>;
rx_delay = <0x11>;
status = "okay";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtl8211e: phy@1 {
+ reg = <1>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <RK_PB2 IRQ_TYPE_LEVEL_LOW>;
+ };
+ };
};
&gpu {
@@ -481,6 +494,16 @@
};
};
+ phy {
+ phy_intb: phy-intb {
+ rockchip,pins = <3 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+
+ phy_rstb: phy-rstb {
+ rockchip,pins = <3 RK_PB7 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
pmic {
cpu_b_sleep: cpu-b-sleep {
rockchip,pins = <1 RK_PC1 RK_FUNC_GPIO &pcfg_pull_down>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
new file mode 100644
index 000000000000..0541dfce924d
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
@@ -0,0 +1,790 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2017 Fuzhou Rockchip Electronics Co., Ltd.
+ */
+
+/dts-v1/;
+
+#include "dt-bindings/pwm/pwm.h"
+#include "dt-bindings/input/input.h"
+#include "rk3399.dtsi"
+#include "rk3399-opp.dtsi"
+
+/ {
+ model = "Orange Pi RK3399 Board";
+ compatible = "rockchip,rk3399-orangepi", "rockchip,rk3399";
+
+ chosen {
+ stdout-path = "serial2:1500000n8";
+ };
+
+ clkin_gmac: external-gmac-clock {
+ compatible = "fixed-clock";
+ clock-frequency = <125000000>;
+ clock-output-names = "clkin_gmac";
+ #clock-cells = <0>;
+ };
+
+ adc-keys {
+ compatible = "adc-keys";
+ io-channels = <&saradc 1>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1800000>;
+ poll-interval = <100>;
+
+ button-up {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ press-threshold-microvolt = <100000>;
+ };
+
+ button-down {
+ label = "Volume Down";
+ linux,code = <KEY_VOLUMEDOWN>;
+ press-threshold-microvolt = <300000>;
+ };
+
+ back {
+ label = "Back";
+ linux,code = <KEY_BACK>;
+ press-threshold-microvolt = <985000>;
+ };
+
+ menu {
+ label = "Menu";
+ linux,code = <KEY_MENU>;
+ press-threshold-microvolt = <1314000>;
+ };
+ };
+
+ dc_12v: dc-12v {
+ compatible = "regulator-fixed";
+ regulator-name = "dc_12v";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ };
+
+ keys: gpio-keys {
+ compatible = "gpio-keys";
+ autorepeat;
+
+ power {
+ debounce-interval = <100>;
+ gpios = <&gpio0 RK_PA5 GPIO_ACTIVE_LOW>;
+ label = "GPIO Power";
+ linux,code = <KEY_POWER>;
+ linux,input-type = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwr_btn>;
+ wakeup-source;
+ };
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&rk808 1>;
+ clock-names = "ext_clock";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_reg_on_h>;
+ reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
+ };
+
+ /* switched by pmic_sleep */
+ vcc1v8_s3: vcca1v8_s3: vcc1v8-s3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc1v8_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_1v8>;
+ };
+
+ vcc3v0_sd: vcc3v0-sd {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 RK_PA1 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc0_pwr_h>;
+ regulator-boot-on;
+ regulator-max-microvolt = <3000000>;
+ regulator-min-microvolt = <3000000>;
+ regulator-name = "vcc3v0_sd";
+ vin-supply = <&vcc3v3_sys>;
+ };
+
+ vcc3v3_sys: vcc3v3-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc3v3_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_sys>;
+ };
+
+ vcc5v0_host: vcc5v0-host-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio4 RK_PD1 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_host_en>;
+ regulator-name = "vcc5v0_host";
+ regulator-always-on;
+ vin-supply = <&vcc_sys>;
+ };
+
+ vbus_typec: vbus-typec-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio1 RK_PA3 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&vcc5v0_typec_en>;
+ regulator-name = "vbus_typec";
+ vin-supply = <&vcc_sys>;
+ };
+
+ vcc_sys: vcc-sys {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_sys";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&dc_12v>;
+ };
+
+ vdd_log: vdd-log {
+ compatible = "pwm-regulator";
+ pwms = <&pwm2 0 25000 1>;
+ regulator-name = "vdd_log";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1400000>;
+ vin-supply = <&vcc_sys>;
+ };
+};
+
+&cpu_l0 {
+ cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l1 {
+ cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l2 {
+ cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_l3 {
+ cpu-supply = <&vdd_cpu_l>;
+};
+
+&cpu_b0 {
+ cpu-supply = <&vdd_cpu_b>;
+};
+
+&cpu_b1 {
+ cpu-supply = <&vdd_cpu_b>;
+};
+
+&emmc_phy {
+ status = "okay";
+};
+
+&gmac {
+ assigned-clocks = <&cru SCLK_RMII_SRC>;
+ assigned-clock-parents = <&clkin_gmac>;
+ clock_in_out = "input";
+ phy-supply = <&vcc3v3_s3>;
+ phy-mode = "rgmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii_pins>;
+ snps,reset-gpio = <&gpio3 RK_PB7 GPIO_ACTIVE_LOW>;
+ snps,reset-active-low;
+ snps,reset-delays-us = <0 10000 50000>;
+ tx_delay = <0x28>;
+ rx_delay = <0x11>;
+ status = "okay";
+};
+
+&gpu {
+ mali-supply = <&vdd_gpu>;
+ status = "okay";
+};
+
+&hdmi {
+ ddc-i2c-bus = <&i2c3>;
+ status = "okay";
+};
+
+&hdmi_sound {
+ status = "okay";
+};
+
+&i2c0 {
+ clock-frequency = <400000>;
+ i2c-scl-rising-time-ns = <168>;
+ i2c-scl-falling-time-ns = <4>;
+ status = "okay";
+
+ rk808: pmic@1b {
+ compatible = "rockchip,rk808";
+ reg = <0x1b>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
+ #clock-cells = <1>;
+ clock-output-names = "rtc_clko_soc", "rtc_clko_wifi";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_int_l>;
+ rockchip,system-power-controller;
+ wakeup-source;
+
+ vcc1-supply = <&vcc3v3_sys>;
+ vcc2-supply = <&vcc3v3_sys>;
+ vcc3-supply = <&vcc3v3_sys>;
+ vcc4-supply = <&vcc3v3_sys>;
+ vcc6-supply = <&vcc3v3_sys>;
+ vcc7-supply = <&vcc3v3_sys>;
+ vcc8-supply = <&vcc3v3_sys>;
+ vcc9-supply = <&vcc3v3_sys>;
+ vcc10-supply = <&vcc3v3_sys>;
+ vcc11-supply = <&vcc3v3_sys>;
+ vcc12-supply = <&vcc3v3_sys>;
+ vddio-supply = <&vcc_3v0>;
+
+ regulators {
+ vdd_center: DCDC_REG1 {
+ regulator-name = "vdd_center";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-ramp-delay = <6001>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_cpu_l: DCDC_REG2 {
+ regulator-name = "vdd_cpu_l";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-ramp-delay = <6001>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_ddr: DCDC_REG3 {
+ regulator-name = "vcc_ddr";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ };
+ };
+
+ vcc_1v8: DCDC_REG4 {
+ regulator-name = "vcc_1v8";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc1v8_dvp: LDO_REG1 {
+ regulator-name = "vcc1v8_dvp";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v0_tp: LDO_REG2 {
+ regulator-name = "vcc3v0_tp";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc1v8_pmupll: LDO_REG3 {
+ regulator-name = "vcc1v8_pmupll";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1800000>;
+ };
+ };
+
+ vcc_sdio: LDO_REG4 {
+ regulator-name = "vcc_sdio";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3000000>;
+ };
+ };
+
+ vcca3v0_codec: LDO_REG5 {
+ regulator-name = "vcca3v0_codec";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_1v5: LDO_REG6 {
+ regulator-name = "vcc_1v5";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <1500000>;
+ };
+ };
+
+ vcca1v8_codec: LDO_REG7 {
+ regulator-name = "vcca1v8_codec";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <2500000>;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc_3v0: LDO_REG8 {
+ regulator-name = "vcc_3v0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3400000>;
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-microvolt = <3000000>;
+ };
+ };
+
+ vcc3v3_s3: SWITCH_REG1 {
+ regulator-name = "vcc3v3_s3";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vcc3v3_s0: SWITCH_REG2 {
+ regulator-name = "vcc3v3_s0";
+ regulator-always-on;
+ regulator-boot-on;
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+ };
+ };
+
+ vdd_cpu_b: regulator@40 {
+ compatible = "silergy,syr827";
+ reg = <0x40>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu_b";
+ regulator-min-microvolt = <712500>;
+ regulator-max-microvolt = <1500000>;
+ regulator-ramp-delay = <1000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc3v3_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ vdd_gpu: regulator@41 {
+ compatible = "silergy,syr828";
+ reg = <0x41>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_gpu";
+ regulator-min-microvolt = <712500>;
+ regulator-max-microvolt = <1500000>;
+ regulator-ramp-delay = <1000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc3v3_sys>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+};
+
+&i2c1 {
+ i2c-scl-rising-time-ns = <450>;
+ i2c-scl-falling-time-ns = <15>;
+ status = "okay";
+};
+
+&i2c3 {
+ i2c-scl-rising-time-ns = <450>;
+ i2c-scl-falling-time-ns = <15>;
+ status = "okay";
+};
+
+&i2c4 {
+ clock-frequency = <400000>;
+ i2c-scl-rising-time-ns = <450>;
+ i2c-scl-falling-time-ns = <15>;
+ status = "okay";
+
+ ak09911@c {
+ compatible = "asahi-kasei,ak09911";
+ reg = <0x0c>;
+ vdd-supply = <&vcc3v3_s3>;
+ vid-supply = <&vcc3v3_s3>;
+ };
+
+ mpu6500@68 {
+ compatible = "invensense,mpu6500";
+ reg = <0x68>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <RK_PC6 IRQ_TYPE_EDGE_RISING>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gsensor_int_l>;
+ vddio-supply = <&vcc3v3_s3>;
+ };
+
+ lsm6ds3@6a {
+ compatible = "st,lsm6ds3";
+ reg = <0x6a>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <RK_PD0 IRQ_TYPE_EDGE_RISING>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gyr_int_l>;
+ vdd-supply = <&vcc3v3_s3>;
+ vddio-supply = <&vcc3v3_s3>;
+ };
+
+ cm32181@10 {
+ compatible = "capella,cm32181";
+ reg = <0x10>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <RK_PD0 IRQ_TYPE_EDGE_RISING>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&light_int_l>;
+ vdd-supply = <&vcc3v3_s3>;
+ };
+
+ fusb302@22 {
+ compatible = "fcs,fusb302";
+ reg = <0x22>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <RK_PA2 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&chg_cc_int_l>;
+ vbus-supply = <&vbus_typec>;
+ };
+};
+
+&io_domains {
+ status = "okay";
+ bt656-supply = <&vcc_3v0>;
+ audio-supply = <&vcca1v8_codec>;
+ sdmmc-supply = <&vcc_sdio>;
+ gpio1830-supply = <&vcc_3v0>;
+};
+
+&pmu_io_domains {
+ status = "okay";
+ pmu1830-supply = <&vcc_3v0>;
+};
+
+&pinctrl {
+ buttons {
+ pwr_btn: pwr-btn {
+ rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ pmic {
+ pmic_int_l: pmic-int-l {
+ rockchip,pins =
+ <1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ sd {
+ sdmmc0_pwr_h: sdmmc0-pwr-h {
+ rockchip,pins =
+ <0 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb2 {
+ vcc5v0_host_en: vcc5v0-host-en {
+ rockchip,pins =
+ <4 RK_PD1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ vcc5v0_typec_en: vcc5v0-typec-en {
+ rockchip,pins =
+ <1 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ sdio-pwrseq {
+ wifi_reg_on_h: wifi-reg-on-h {
+ rockchip,pins = <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ wifi {
+ wifi_host_wake_l: wifi-host-wake-l {
+ rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ bluetooth {
+ bt_reg_on_h: bt-enable-h {
+ rockchip,pins = <0 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_host_wake_l: bt-host-wake-l {
+ rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_wake_l: bt-wake-l {
+ rockchip,pins = <2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ mpu6500 {
+ gsensor_int_l: gsensor-int-l {
+ rockchip,pins = <1 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ lsm6ds3 {
+ gyr_int_l: gyr-int-l {
+ rockchip,pins = <1 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ cm32181 {
+ light_int_l: light-int-l {
+ rockchip,pins = <4 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ fusb302 {
+ chg_cc_int_l: chg-cc-int-l {
+ rockchip,pins = <1 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+};
+
+&pwm0 {
+ status = "okay";
+};
+
+&pwm2 {
+ status = "okay";
+};
+
+&saradc {
+ vref-supply = <&vcca1v8_s3>;
+ status = "okay";
+};
+
+&sdhci {
+ bus-width = <8>;
+ mmc-hs400-1_8v;
+ mmc-hs400-enhanced-strobe;
+ non-removable;
+ status = "okay";
+};
+
+&sdio0 {
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ clock-frequency = <50000000>;
+ disable-wp;
+ keep-power-in-suspend;
+ max-frequency = <50000000>;
+ mmc-pwrseq = <&sdio_pwrseq>;
+ non-removable;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdio0_bus4 &sdio0_cmd &sdio0_clk>;
+ sd-uhs-sdr104;
+ status = "okay";
+
+ brcmf: wifi@1 {
+ compatible = "brcm,bcm4329-fmac";
+ interrupt-parent = <&gpio0>;
+ interrupts = <RK_PA3 GPIO_ACTIVE_HIGH>;
+ interrupt-names = "host-wake";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_host_wake_l>;
+ };
+};
+
+&sdmmc {
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ cap-sd-highspeed;
+ cd-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>;
+ clock-frequency = <150000000>;
+ disable-wp;
+ max-frequency = <150000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
+ vmmc-supply = <&vcc3v0_sd>;
+ vqmmc-supply = <&vcc_sdio>;
+ status = "okay";
+};
+
+&tcphy0 {
+ status = "okay";
+};
+
+&tcphy1 {
+ status = "okay";
+};
+
+&tsadc {
+ rockchip,hw-tshut-mode = <1>;
+ rockchip,hw-tshut-polarity = <1>;
+ status = "okay";
+};
+
+&u2phy0 {
+ status = "okay";
+
+ u2phy0_otg: otg-port {
+ phy-supply = <&vbus_typec>;
+ status = "okay";
+ };
+
+ u2phy0_host: host-port {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+ };
+};
+
+&u2phy1 {
+ status = "okay";
+
+ u2phy1_otg: otg-port {
+ status = "okay";
+ };
+
+ u2phy1_host: host-port {
+ phy-supply = <&vcc5v0_host>;
+ status = "okay";
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_xfer &uart0_cts &uart0_rts>;
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ clocks = <&rk808 1>;
+ clock-names = "lpo";
+ device-wakeup-gpios = <&gpio2 RK_PD2 GPIO_ACTIVE_HIGH>;
+ host-wakeup-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpio0 RK_PB1 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_host_wake_l &bt_wake_l &bt_reg_on_h>;
+ vbat-supply = <&vcc3v3_sys>;
+ vddio-supply = <&vcc_1v8>;
+ };
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_host0_ohci {
+ status = "okay";
+};
+
+&usb_host1_ehci {
+ status = "okay";
+};
+
+&usb_host1_ohci {
+ status = "okay";
+};
+
+&usbdrd3_0 {
+ status = "okay";
+};
+
+&usbdrd_dwc3_0 {
+ status = "okay";
+ dr_mode = "otg";
+};
+
+&usbdrd3_1 {
+ status = "okay";
+};
+
+&usbdrd_dwc3_1 {
+ status = "okay";
+ dr_mode = "host";
+};
+
+&vopb {
+ status = "okay";
+};
+
+&vopb_mmu {
+ status = "okay";
+};
+
+&vopl {
+ status = "okay";
+};
+
+&vopl_mmu {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
index 1e6a71066c16..d80d6b726820 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
@@ -168,27 +168,27 @@
haikou_pin_hog: haikou-pin-hog {
rockchip,pins =
/* LID_BTN */
- <RK_GPIO0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up>,
+ <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_up>,
/* BATLOW# */
- <RK_GPIO0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>,
+ <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>,
/* SLP_BTN# */
- <RK_GPIO0 RK_PB3 RK_FUNC_GPIO &pcfg_pull_up>,
+ <0 RK_PB3 RK_FUNC_GPIO &pcfg_pull_up>,
/* BIOS_DISABLE# */
- <RK_GPIO0 RK_PB1 RK_FUNC_GPIO &pcfg_pull_up>;
+ <0 RK_PB1 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
leds {
led_sd_haikou: led-sd-gpio {
rockchip,pins =
- <RK_GPIO1 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
+ <1 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
usb2 {
otg_vbus_drv: otg-vbus-drv {
rockchip,pins =
- <RK_GPIO0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
+ <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index 0130b9f98c9d..62ea288a1a70 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -146,6 +146,7 @@
&emmc_phy {
status = "okay";
+ drive-impedance-ohm = <33>;
};
&gmac {
@@ -369,8 +370,6 @@
fan: fan@18 {
compatible = "ti,amc6821";
reg = <0x18>;
- cooling-min-state = <0>;
- cooling-max-state = <9>;
#cooling-cells = <2>;
};
@@ -413,10 +412,10 @@
*/
&i2s0_2ch_bus {
rockchip,pins =
- <RK_GPIO3 RK_PD0 RK_FUNC_1 &pcfg_pull_none>,
- <RK_GPIO3 RK_PD2 RK_FUNC_1 &pcfg_pull_none>,
- <RK_GPIO3 RK_PD3 RK_FUNC_1 &pcfg_pull_none>,
- <RK_GPIO3 RK_PD7 RK_FUNC_1 &pcfg_pull_none>;
+ <3 RK_PD0 1 &pcfg_pull_none>,
+ <3 RK_PD2 1 &pcfg_pull_none>,
+ <3 RK_PD3 1 &pcfg_pull_none>,
+ <3 RK_PD7 1 &pcfg_pull_none>;
};
&io_domains {
@@ -440,29 +439,29 @@
i2c8 {
i2c8_xfer_a: i2c8-xfer {
rockchip,pins =
- <RK_GPIO1 RK_PC4 RK_FUNC_1 &pcfg_pull_up>,
- <RK_GPIO1 RK_PC5 RK_FUNC_1 &pcfg_pull_up>;
+ <1 RK_PC4 1 &pcfg_pull_up>,
+ <1 RK_PC5 1 &pcfg_pull_up>;
};
};
leds {
led_pin_module: led-module-gpio {
rockchip,pins =
- <RK_GPIO2 RK_PD1 RK_FUNC_GPIO &pcfg_pull_none>;
+ <2 RK_PD1 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
pmic {
pmic_int_l: pmic-int-l {
rockchip,pins =
- <RK_GPIO1 RK_PC6 RK_FUNC_GPIO &pcfg_pull_up>;
+ <1 RK_PC6 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
usb2 {
vcc5v0_host_en: vcc5v0-host-en {
rockchip,pins =
- <RK_GPIO4 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
+ <4 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts
index 844eac939a97..e030627159c6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts
@@ -157,6 +157,11 @@
status = "okay";
};
+&gpu {
+ mali-supply = <&vdd_gpu>;
+ status = "okay";
+};
+
&hdmi {
ddc-i2c-bus = <&i2c3>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
index 2927db4dda9d..c7d48d41e184 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
@@ -18,6 +18,15 @@
reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
};
+ vcc12v_dcin: vcc12v-dcin {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc12v_dcin";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
vcc1v8_s0: vcc1v8-s0 {
compatible = "regulator-fixed";
regulator-name = "vcc1v8_s0";
@@ -26,12 +35,13 @@
regulator-always-on;
};
- vcc_sys: vcc-sys {
+ vcc5v0_sys: vcc5v0-sys {
compatible = "regulator-fixed";
- regulator-name = "vcc_sys";
+ regulator-name = "vcc5v0_sys";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
regulator-always-on;
+ vin-supply = <&vcc12v_dcin>;
};
vcc3v3_sys: vcc3v3-sys {
@@ -40,7 +50,7 @@
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
- vin-supply = <&vcc_sys>;
+ vin-supply = <&vcc5v0_sys>;
};
vcc3v3_pcie: vcc3v3-pcie-regulator {
@@ -64,7 +74,7 @@
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
regulator-always-on;
- vin-supply = <&vcc_sys>;
+ vin-supply = <&vcc5v0_sys>;
};
};
@@ -96,6 +106,11 @@
status = "okay";
};
+&gpu {
+ mali-supply = <&vdd_gpu>;
+ status = "okay";
+};
+
&hdmi {
ddc-i2c-bus = <&i2c3>;
pinctrl-names = "default";
@@ -123,7 +138,7 @@
regulator-ramp-delay = <1000>;
regulator-always-on;
regulator-boot-on;
- vin-supply = <&vcc_sys>;
+ vin-supply = <&vcc5v0_sys>;
status = "okay";
regulator-state-mem {
@@ -141,7 +156,7 @@
regulator-ramp-delay = <1000>;
regulator-always-on;
regulator-boot-on;
- vin-supply = <&vcc_sys>;
+ vin-supply = <&vcc5v0_sys>;
regulator-state-mem {
regulator-off-in-suspend;
};
@@ -159,16 +174,16 @@
#clock-cells = <1>;
clock-output-names = "xin32k", "rk808-clkout2";
- vcc1-supply = <&vcc_sys>;
- vcc2-supply = <&vcc_sys>;
- vcc3-supply = <&vcc_sys>;
- vcc4-supply = <&vcc_sys>;
- vcc6-supply = <&vcc_sys>;
- vcc7-supply = <&vcc_sys>;
+ vcc1-supply = <&vcc5v0_sys>;
+ vcc2-supply = <&vcc5v0_sys>;
+ vcc3-supply = <&vcc5v0_sys>;
+ vcc4-supply = <&vcc5v0_sys>;
+ vcc6-supply = <&vcc5v0_sys>;
+ vcc7-supply = <&vcc5v0_sys>;
vcc8-supply = <&vcc3v3_sys>;
- vcc9-supply = <&vcc_sys>;
- vcc10-supply = <&vcc_sys>;
- vcc11-supply = <&vcc_sys>;
+ vcc9-supply = <&vcc5v0_sys>;
+ vcc10-supply = <&vcc5v0_sys>;
+ vcc11-supply = <&vcc5v0_sys>;
vcc12-supply = <&vcc3v3_sys>;
vddio-supply = <&vcc_1v8>;
@@ -396,62 +411,62 @@
sdmmc {
sdmmc_bus1: sdmmc-bus1 {
rockchip,pins =
- <4 8 RK_FUNC_1 &pcfg_pull_up_8ma>;
+ <4 RK_PB0 1 &pcfg_pull_up_8ma>;
};
sdmmc_bus4: sdmmc-bus4 {
rockchip,pins =
- <4 8 RK_FUNC_1 &pcfg_pull_up_8ma>,
- <4 9 RK_FUNC_1 &pcfg_pull_up_8ma>,
- <4 10 RK_FUNC_1 &pcfg_pull_up_8ma>,
- <4 11 RK_FUNC_1 &pcfg_pull_up_8ma>;
+ <4 RK_PB0 1 &pcfg_pull_up_8ma>,
+ <4 RK_PB1 1 &pcfg_pull_up_8ma>,
+ <4 RK_PB2 1 &pcfg_pull_up_8ma>,
+ <4 RK_PB3 1 &pcfg_pull_up_8ma>;
};
sdmmc_clk: sdmmc-clk {
rockchip,pins =
- <4 12 RK_FUNC_1 &pcfg_pull_none_18ma>;
+ <4 RK_PB4 1 &pcfg_pull_none_18ma>;
};
sdmmc_cmd: sdmmc-cmd {
rockchip,pins =
- <4 13 RK_FUNC_1 &pcfg_pull_up_8ma>;
+ <4 RK_PB5 1 &pcfg_pull_up_8ma>;
};
};
sdio0 {
sdio0_bus4: sdio0-bus4 {
rockchip,pins =
- <2 20 RK_FUNC_1 &pcfg_pull_up_20ma>,
- <2 21 RK_FUNC_1 &pcfg_pull_up_20ma>,
- <2 22 RK_FUNC_1 &pcfg_pull_up_20ma>,
- <2 23 RK_FUNC_1 &pcfg_pull_up_20ma>;
+ <2 RK_PC4 1 &pcfg_pull_up_20ma>,
+ <2 RK_PC5 1 &pcfg_pull_up_20ma>,
+ <2 RK_PC6 1 &pcfg_pull_up_20ma>,
+ <2 RK_PC7 1 &pcfg_pull_up_20ma>;
};
sdio0_cmd: sdio0-cmd {
rockchip,pins =
- <2 24 RK_FUNC_1 &pcfg_pull_up_20ma>;
+ <2 RK_PD0 1 &pcfg_pull_up_20ma>;
};
sdio0_clk: sdio0-clk {
rockchip,pins =
- <2 25 RK_FUNC_1 &pcfg_pull_none_20ma>;
+ <2 RK_PD1 1 &pcfg_pull_none_20ma>;
};
};
pmic {
pmic_int_l: pmic-int-l {
rockchip,pins =
- <1 21 RK_FUNC_GPIO &pcfg_pull_up>;
+ <1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>;
};
vsel1_gpio: vsel1-gpio {
rockchip,pins =
- <1 17 RK_FUNC_GPIO &pcfg_pull_down>;
+ <1 RK_PC1 RK_FUNC_GPIO &pcfg_pull_down>;
};
vsel2_gpio: vsel2-gpio {
rockchip,pins =
- <1 14 RK_FUNC_GPIO &pcfg_pull_down>;
+ <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_down>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts
index 1f2394e0587d..20ec7d1c25d7 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts
@@ -222,6 +222,10 @@
status = "okay";
};
+&hdmi_sound {
+ status = "okay";
+};
+
&gpu {
mali-supply = <&vdd_gpu>;
status = "okay";
@@ -504,7 +508,7 @@
status = "okay";
bt656-supply = <&vcc1v8_dvp>;
- audio-supply = <&vcca1v8_codec>;
+ audio-supply = <&vcc_3v0>;
sdmmc-supply = <&vcc_sdio>;
gpio1830-supply = <&vcc_3v0>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
index 946d3589575a..04623e52ac5d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
@@ -471,7 +471,7 @@
fan {
motor_pwr: motor-pwr {
rockchip,pins =
- <RK_GPIO1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
+ <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
@@ -493,7 +493,7 @@
sd {
sdmmc0_pwr_h: sdmmc0-pwr-h {
rockchip,pins =
- <RK_GPIO0 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>;
+ <0 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index db9d948c0b03..196ac9b78076 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -71,6 +71,7 @@
compatible = "arm,cortex-a53";
reg = <0x0 0x0>;
enable-method = "psci";
+ capacity-dmips-mhz = <485>;
clocks = <&cru ARMCLKL>;
#cooling-cells = <2>; /* min followed by max */
dynamic-power-coefficient = <100>;
@@ -82,6 +83,7 @@
compatible = "arm,cortex-a53";
reg = <0x0 0x1>;
enable-method = "psci";
+ capacity-dmips-mhz = <485>;
clocks = <&cru ARMCLKL>;
#cooling-cells = <2>; /* min followed by max */
dynamic-power-coefficient = <100>;
@@ -93,6 +95,7 @@
compatible = "arm,cortex-a53";
reg = <0x0 0x2>;
enable-method = "psci";
+ capacity-dmips-mhz = <485>;
clocks = <&cru ARMCLKL>;
#cooling-cells = <2>; /* min followed by max */
dynamic-power-coefficient = <100>;
@@ -104,6 +107,7 @@
compatible = "arm,cortex-a53";
reg = <0x0 0x3>;
enable-method = "psci";
+ capacity-dmips-mhz = <485>;
clocks = <&cru ARMCLKL>;
#cooling-cells = <2>; /* min followed by max */
dynamic-power-coefficient = <100>;
@@ -115,6 +119,7 @@
compatible = "arm,cortex-a72";
reg = <0x0 0x100>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
clocks = <&cru ARMCLKB>;
#cooling-cells = <2>; /* min followed by max */
dynamic-power-coefficient = <436>;
@@ -126,6 +131,7 @@
compatible = "arm,cortex-a72";
reg = <0x0 0x101>;
enable-method = "psci";
+ capacity-dmips-mhz = <1024>;
clocks = <&cru ARMCLKB>;
#cooling-cells = <2>; /* min followed by max */
dynamic-power-coefficient = <436>;
@@ -333,6 +339,7 @@
phys = <&emmc_phy>;
phy-names = "phy_arasan";
power-domains = <&power RK3399_PD_EMMC>;
+ disable-cqe-dcmd;
status = "disabled";
};
@@ -1450,6 +1457,7 @@
clock-names = "refclk";
#phy-cells = <1>;
resets = <&cru SRST_PCIEPHY>;
+ drive-impedance-ohm = <50>;
reset-names = "phy";
status = "disabled";
};
@@ -2045,14 +2053,14 @@
clock {
clk_32k: clk-32k {
- rockchip,pins = <0 0 RK_FUNC_2 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA0 2 &pcfg_pull_none>;
};
};
edp {
edp_hpd: edp-hpd {
rockchip,pins =
- <4 23 RK_FUNC_2 &pcfg_pull_none>;
+ <4 RK_PC7 2 &pcfg_pull_none>;
};
};
@@ -2060,576 +2068,576 @@
rgmii_pins: rgmii-pins {
rockchip,pins =
/* mac_txclk */
- <3 17 RK_FUNC_1 &pcfg_pull_none_13ma>,
+ <3 RK_PC1 1 &pcfg_pull_none_13ma>,
/* mac_rxclk */
- <3 14 RK_FUNC_1 &pcfg_pull_none>,
+ <3 RK_PB6 1 &pcfg_pull_none>,
/* mac_mdio */
- <3 13 RK_FUNC_1 &pcfg_pull_none>,
+ <3 RK_PB5 1 &pcfg_pull_none>,
/* mac_txen */
- <3 12 RK_FUNC_1 &pcfg_pull_none_13ma>,
+ <3 RK_PB4 1 &pcfg_pull_none_13ma>,
/* mac_clk */
- <3 11 RK_FUNC_1 &pcfg_pull_none>,
+ <3 RK_PB3 1 &pcfg_pull_none>,
/* mac_rxdv */
- <3 9 RK_FUNC_1 &pcfg_pull_none>,
+ <3 RK_PB1 1 &pcfg_pull_none>,
/* mac_mdc */
- <3 8 RK_FUNC_1 &pcfg_pull_none>,
+ <3 RK_PB0 1 &pcfg_pull_none>,
/* mac_rxd1 */
- <3 7 RK_FUNC_1 &pcfg_pull_none>,
+ <3 RK_PA7 1 &pcfg_pull_none>,
/* mac_rxd0 */
- <3 6 RK_FUNC_1 &pcfg_pull_none>,
+ <3 RK_PA6 1 &pcfg_pull_none>,
/* mac_txd1 */
- <3 5 RK_FUNC_1 &pcfg_pull_none_13ma>,
+ <3 RK_PA5 1 &pcfg_pull_none_13ma>,
/* mac_txd0 */
- <3 4 RK_FUNC_1 &pcfg_pull_none_13ma>,
+ <3 RK_PA4 1 &pcfg_pull_none_13ma>,
/* mac_rxd3 */
- <3 3 RK_FUNC_1 &pcfg_pull_none>,
+ <3 RK_PA3 1 &pcfg_pull_none>,
/* mac_rxd2 */
- <3 2 RK_FUNC_1 &pcfg_pull_none>,
+ <3 RK_PA2 1 &pcfg_pull_none>,
/* mac_txd3 */
- <3 1 RK_FUNC_1 &pcfg_pull_none_13ma>,
+ <3 RK_PA1 1 &pcfg_pull_none_13ma>,
/* mac_txd2 */
- <3 0 RK_FUNC_1 &pcfg_pull_none_13ma>;
+ <3 RK_PA0 1 &pcfg_pull_none_13ma>;
};
rmii_pins: rmii-pins {
rockchip,pins =
/* mac_mdio */
- <3 13 RK_FUNC_1 &pcfg_pull_none>,
+ <3 RK_PB5 1 &pcfg_pull_none>,
/* mac_txen */
- <3 12 RK_FUNC_1 &pcfg_pull_none_13ma>,
+ <3 RK_PB4 1 &pcfg_pull_none_13ma>,
/* mac_clk */
- <3 11 RK_FUNC_1 &pcfg_pull_none>,
+ <3 RK_PB3 1 &pcfg_pull_none>,
/* mac_rxer */
- <3 10 RK_FUNC_1 &pcfg_pull_none>,
+ <3 RK_PB2 1 &pcfg_pull_none>,
/* mac_rxdv */
- <3 9 RK_FUNC_1 &pcfg_pull_none>,
+ <3 RK_PB1 1 &pcfg_pull_none>,
/* mac_mdc */
- <3 8 RK_FUNC_1 &pcfg_pull_none>,
+ <3 RK_PB0 1 &pcfg_pull_none>,
/* mac_rxd1 */
- <3 7 RK_FUNC_1 &pcfg_pull_none>,
+ <3 RK_PA7 1 &pcfg_pull_none>,
/* mac_rxd0 */
- <3 6 RK_FUNC_1 &pcfg_pull_none>,
+ <3 RK_PA6 1 &pcfg_pull_none>,
/* mac_txd1 */
- <3 5 RK_FUNC_1 &pcfg_pull_none_13ma>,
+ <3 RK_PA5 1 &pcfg_pull_none_13ma>,
/* mac_txd0 */
- <3 4 RK_FUNC_1 &pcfg_pull_none_13ma>;
+ <3 RK_PA4 1 &pcfg_pull_none_13ma>;
};
};
i2c0 {
i2c0_xfer: i2c0-xfer {
rockchip,pins =
- <1 15 RK_FUNC_2 &pcfg_pull_none>,
- <1 16 RK_FUNC_2 &pcfg_pull_none>;
+ <1 RK_PB7 2 &pcfg_pull_none>,
+ <1 RK_PC0 2 &pcfg_pull_none>;
};
};
i2c1 {
i2c1_xfer: i2c1-xfer {
rockchip,pins =
- <4 2 RK_FUNC_1 &pcfg_pull_none>,
- <4 1 RK_FUNC_1 &pcfg_pull_none>;
+ <4 RK_PA2 1 &pcfg_pull_none>,
+ <4 RK_PA1 1 &pcfg_pull_none>;
};
};
i2c2 {
i2c2_xfer: i2c2-xfer {
rockchip,pins =
- <2 1 RK_FUNC_2 &pcfg_pull_none_12ma>,
- <2 0 RK_FUNC_2 &pcfg_pull_none_12ma>;
+ <2 RK_PA1 2 &pcfg_pull_none_12ma>,
+ <2 RK_PA0 2 &pcfg_pull_none_12ma>;
};
};
i2c3 {
i2c3_xfer: i2c3-xfer {
rockchip,pins =
- <4 17 RK_FUNC_1 &pcfg_pull_none>,
- <4 16 RK_FUNC_1 &pcfg_pull_none>;
+ <4 RK_PC1 1 &pcfg_pull_none>,
+ <4 RK_PC0 1 &pcfg_pull_none>;
};
};
i2c4 {
i2c4_xfer: i2c4-xfer {
rockchip,pins =
- <1 12 RK_FUNC_1 &pcfg_pull_none>,
- <1 11 RK_FUNC_1 &pcfg_pull_none>;
+ <1 RK_PB4 1 &pcfg_pull_none>,
+ <1 RK_PB3 1 &pcfg_pull_none>;
};
};
i2c5 {
i2c5_xfer: i2c5-xfer {
rockchip,pins =
- <3 11 RK_FUNC_2 &pcfg_pull_none>,
- <3 10 RK_FUNC_2 &pcfg_pull_none>;
+ <3 RK_PB3 2 &pcfg_pull_none>,
+ <3 RK_PB2 2 &pcfg_pull_none>;
};
};
i2c6 {
i2c6_xfer: i2c6-xfer {
rockchip,pins =
- <2 10 RK_FUNC_2 &pcfg_pull_none>,
- <2 9 RK_FUNC_2 &pcfg_pull_none>;
+ <2 RK_PB2 2 &pcfg_pull_none>,
+ <2 RK_PB1 2 &pcfg_pull_none>;
};
};
i2c7 {
i2c7_xfer: i2c7-xfer {
rockchip,pins =
- <2 8 RK_FUNC_2 &pcfg_pull_none>,
- <2 7 RK_FUNC_2 &pcfg_pull_none>;
+ <2 RK_PB0 2 &pcfg_pull_none>,
+ <2 RK_PA7 2 &pcfg_pull_none>;
};
};
i2c8 {
i2c8_xfer: i2c8-xfer {
rockchip,pins =
- <1 21 RK_FUNC_1 &pcfg_pull_none>,
- <1 20 RK_FUNC_1 &pcfg_pull_none>;
+ <1 RK_PC5 1 &pcfg_pull_none>,
+ <1 RK_PC4 1 &pcfg_pull_none>;
};
};
i2s0 {
i2s0_2ch_bus: i2s0-2ch-bus {
rockchip,pins =
- <3 24 RK_FUNC_1 &pcfg_pull_none>,
- <3 25 RK_FUNC_1 &pcfg_pull_none>,
- <3 26 RK_FUNC_1 &pcfg_pull_none>,
- <3 27 RK_FUNC_1 &pcfg_pull_none>,
- <3 31 RK_FUNC_1 &pcfg_pull_none>,
- <4 0 RK_FUNC_1 &pcfg_pull_none>;
+ <3 RK_PD0 1 &pcfg_pull_none>,
+ <3 RK_PD1 1 &pcfg_pull_none>,
+ <3 RK_PD2 1 &pcfg_pull_none>,
+ <3 RK_PD3 1 &pcfg_pull_none>,
+ <3 RK_PD7 1 &pcfg_pull_none>,
+ <4 RK_PA0 1 &pcfg_pull_none>;
};
i2s0_8ch_bus: i2s0-8ch-bus {
rockchip,pins =
- <3 24 RK_FUNC_1 &pcfg_pull_none>,
- <3 25 RK_FUNC_1 &pcfg_pull_none>,
- <3 26 RK_FUNC_1 &pcfg_pull_none>,
- <3 27 RK_FUNC_1 &pcfg_pull_none>,
- <3 28 RK_FUNC_1 &pcfg_pull_none>,
- <3 29 RK_FUNC_1 &pcfg_pull_none>,
- <3 30 RK_FUNC_1 &pcfg_pull_none>,
- <3 31 RK_FUNC_1 &pcfg_pull_none>,
- <4 0 RK_FUNC_1 &pcfg_pull_none>;
+ <3 RK_PD0 1 &pcfg_pull_none>,
+ <3 RK_PD1 1 &pcfg_pull_none>,
+ <3 RK_PD2 1 &pcfg_pull_none>,
+ <3 RK_PD3 1 &pcfg_pull_none>,
+ <3 RK_PD4 1 &pcfg_pull_none>,
+ <3 RK_PD5 1 &pcfg_pull_none>,
+ <3 RK_PD6 1 &pcfg_pull_none>,
+ <3 RK_PD7 1 &pcfg_pull_none>,
+ <4 RK_PA0 1 &pcfg_pull_none>;
};
};
i2s1 {
i2s1_2ch_bus: i2s1-2ch-bus {
rockchip,pins =
- <4 3 RK_FUNC_1 &pcfg_pull_none>,
- <4 4 RK_FUNC_1 &pcfg_pull_none>,
- <4 5 RK_FUNC_1 &pcfg_pull_none>,
- <4 6 RK_FUNC_1 &pcfg_pull_none>,
- <4 7 RK_FUNC_1 &pcfg_pull_none>;
+ <4 RK_PA3 1 &pcfg_pull_none>,
+ <4 RK_PA4 1 &pcfg_pull_none>,
+ <4 RK_PA5 1 &pcfg_pull_none>,
+ <4 RK_PA6 1 &pcfg_pull_none>,
+ <4 RK_PA7 1 &pcfg_pull_none>;
};
};
sdio0 {
sdio0_bus1: sdio0-bus1 {
rockchip,pins =
- <2 RK_PC4 RK_FUNC_1 &pcfg_pull_up>;
+ <2 RK_PC4 1 &pcfg_pull_up>;
};
sdio0_bus4: sdio0-bus4 {
rockchip,pins =
- <2 RK_PC4 RK_FUNC_1 &pcfg_pull_up>,
- <2 RK_PC5 RK_FUNC_1 &pcfg_pull_up>,
- <2 RK_PC6 RK_FUNC_1 &pcfg_pull_up>,
- <2 RK_PC7 RK_FUNC_1 &pcfg_pull_up>;
+ <2 RK_PC4 1 &pcfg_pull_up>,
+ <2 RK_PC5 1 &pcfg_pull_up>,
+ <2 RK_PC6 1 &pcfg_pull_up>,
+ <2 RK_PC7 1 &pcfg_pull_up>;
};
sdio0_cmd: sdio0-cmd {
rockchip,pins =
- <2 RK_PD0 RK_FUNC_1 &pcfg_pull_up>;
+ <2 RK_PD0 1 &pcfg_pull_up>;
};
sdio0_clk: sdio0-clk {
rockchip,pins =
- <2 RK_PD1 RK_FUNC_1 &pcfg_pull_none>;
+ <2 RK_PD1 1 &pcfg_pull_none>;
};
sdio0_cd: sdio0-cd {
rockchip,pins =
- <2 RK_PD2 RK_FUNC_1 &pcfg_pull_up>;
+ <2 RK_PD2 1 &pcfg_pull_up>;
};
sdio0_pwr: sdio0-pwr {
rockchip,pins =
- <2 RK_PD3 RK_FUNC_1 &pcfg_pull_up>;
+ <2 RK_PD3 1 &pcfg_pull_up>;
};
sdio0_bkpwr: sdio0-bkpwr {
rockchip,pins =
- <2 RK_PD4 RK_FUNC_1 &pcfg_pull_up>;
+ <2 RK_PD4 1 &pcfg_pull_up>;
};
sdio0_wp: sdio0-wp {
rockchip,pins =
- <0 RK_PA3 RK_FUNC_1 &pcfg_pull_up>;
+ <0 RK_PA3 1 &pcfg_pull_up>;
};
sdio0_int: sdio0-int {
rockchip,pins =
- <0 RK_PA4 RK_FUNC_1 &pcfg_pull_up>;
+ <0 RK_PA4 1 &pcfg_pull_up>;
};
};
sdmmc {
sdmmc_bus1: sdmmc-bus1 {
rockchip,pins =
- <4 RK_PB0 RK_FUNC_1 &pcfg_pull_up>;
+ <4 RK_PB0 1 &pcfg_pull_up>;
};
sdmmc_bus4: sdmmc-bus4 {
rockchip,pins =
- <4 RK_PB0 RK_FUNC_1 &pcfg_pull_up>,
- <4 RK_PB1 RK_FUNC_1 &pcfg_pull_up>,
- <4 RK_PB2 RK_FUNC_1 &pcfg_pull_up>,
- <4 RK_PB3 RK_FUNC_1 &pcfg_pull_up>;
+ <4 RK_PB0 1 &pcfg_pull_up>,
+ <4 RK_PB1 1 &pcfg_pull_up>,
+ <4 RK_PB2 1 &pcfg_pull_up>,
+ <4 RK_PB3 1 &pcfg_pull_up>;
};
sdmmc_clk: sdmmc-clk {
rockchip,pins =
- <4 RK_PB4 RK_FUNC_1 &pcfg_pull_none>;
+ <4 RK_PB4 1 &pcfg_pull_none>;
};
sdmmc_cmd: sdmmc-cmd {
rockchip,pins =
- <4 RK_PB5 RK_FUNC_1 &pcfg_pull_up>;
+ <4 RK_PB5 1 &pcfg_pull_up>;
};
sdmmc_cd: sdmmc-cd {
rockchip,pins =
- <0 RK_PA7 RK_FUNC_1 &pcfg_pull_up>;
+ <0 RK_PA7 1 &pcfg_pull_up>;
};
sdmmc_wp: sdmmc-wp {
rockchip,pins =
- <0 RK_PB0 RK_FUNC_1 &pcfg_pull_up>;
+ <0 RK_PB0 1 &pcfg_pull_up>;
};
};
sleep {
ap_pwroff: ap-pwroff {
- rockchip,pins = <1 5 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PA5 1 &pcfg_pull_none>;
};
ddrio_pwroff: ddrio-pwroff {
- rockchip,pins = <0 1 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <0 RK_PA1 1 &pcfg_pull_none>;
};
};
spdif {
spdif_bus: spdif-bus {
rockchip,pins =
- <4 21 RK_FUNC_1 &pcfg_pull_none>;
+ <4 RK_PC5 1 &pcfg_pull_none>;
};
spdif_bus_1: spdif-bus-1 {
rockchip,pins =
- <3 RK_PC0 RK_FUNC_3 &pcfg_pull_none>;
+ <3 RK_PC0 3 &pcfg_pull_none>;
};
};
spi0 {
spi0_clk: spi0-clk {
rockchip,pins =
- <3 6 RK_FUNC_2 &pcfg_pull_up>;
+ <3 RK_PA6 2 &pcfg_pull_up>;
};
spi0_cs0: spi0-cs0 {
rockchip,pins =
- <3 7 RK_FUNC_2 &pcfg_pull_up>;
+ <3 RK_PA7 2 &pcfg_pull_up>;
};
spi0_cs1: spi0-cs1 {
rockchip,pins =
- <3 8 RK_FUNC_2 &pcfg_pull_up>;
+ <3 RK_PB0 2 &pcfg_pull_up>;
};
spi0_tx: spi0-tx {
rockchip,pins =
- <3 5 RK_FUNC_2 &pcfg_pull_up>;
+ <3 RK_PA5 2 &pcfg_pull_up>;
};
spi0_rx: spi0-rx {
rockchip,pins =
- <3 4 RK_FUNC_2 &pcfg_pull_up>;
+ <3 RK_PA4 2 &pcfg_pull_up>;
};
};
spi1 {
spi1_clk: spi1-clk {
rockchip,pins =
- <1 9 RK_FUNC_2 &pcfg_pull_up>;
+ <1 RK_PB1 2 &pcfg_pull_up>;
};
spi1_cs0: spi1-cs0 {
rockchip,pins =
- <1 10 RK_FUNC_2 &pcfg_pull_up>;
+ <1 RK_PB2 2 &pcfg_pull_up>;
};
spi1_rx: spi1-rx {
rockchip,pins =
- <1 7 RK_FUNC_2 &pcfg_pull_up>;
+ <1 RK_PA7 2 &pcfg_pull_up>;
};
spi1_tx: spi1-tx {
rockchip,pins =
- <1 8 RK_FUNC_2 &pcfg_pull_up>;
+ <1 RK_PB0 2 &pcfg_pull_up>;
};
};
spi2 {
spi2_clk: spi2-clk {
rockchip,pins =
- <2 11 RK_FUNC_1 &pcfg_pull_up>;
+ <2 RK_PB3 1 &pcfg_pull_up>;
};
spi2_cs0: spi2-cs0 {
rockchip,pins =
- <2 12 RK_FUNC_1 &pcfg_pull_up>;
+ <2 RK_PB4 1 &pcfg_pull_up>;
};
spi2_rx: spi2-rx {
rockchip,pins =
- <2 9 RK_FUNC_1 &pcfg_pull_up>;
+ <2 RK_PB1 1 &pcfg_pull_up>;
};
spi2_tx: spi2-tx {
rockchip,pins =
- <2 10 RK_FUNC_1 &pcfg_pull_up>;
+ <2 RK_PB2 1 &pcfg_pull_up>;
};
};
spi3 {
spi3_clk: spi3-clk {
rockchip,pins =
- <1 17 RK_FUNC_1 &pcfg_pull_up>;
+ <1 RK_PC1 1 &pcfg_pull_up>;
};
spi3_cs0: spi3-cs0 {
rockchip,pins =
- <1 18 RK_FUNC_1 &pcfg_pull_up>;
+ <1 RK_PC2 1 &pcfg_pull_up>;
};
spi3_rx: spi3-rx {
rockchip,pins =
- <1 15 RK_FUNC_1 &pcfg_pull_up>;
+ <1 RK_PB7 1 &pcfg_pull_up>;
};
spi3_tx: spi3-tx {
rockchip,pins =
- <1 16 RK_FUNC_1 &pcfg_pull_up>;
+ <1 RK_PC0 1 &pcfg_pull_up>;
};
};
spi4 {
spi4_clk: spi4-clk {
rockchip,pins =
- <3 2 RK_FUNC_2 &pcfg_pull_up>;
+ <3 RK_PA2 2 &pcfg_pull_up>;
};
spi4_cs0: spi4-cs0 {
rockchip,pins =
- <3 3 RK_FUNC_2 &pcfg_pull_up>;
+ <3 RK_PA3 2 &pcfg_pull_up>;
};
spi4_rx: spi4-rx {
rockchip,pins =
- <3 0 RK_FUNC_2 &pcfg_pull_up>;
+ <3 RK_PA0 2 &pcfg_pull_up>;
};
spi4_tx: spi4-tx {
rockchip,pins =
- <3 1 RK_FUNC_2 &pcfg_pull_up>;
+ <3 RK_PA1 2 &pcfg_pull_up>;
};
};
spi5 {
spi5_clk: spi5-clk {
rockchip,pins =
- <2 22 RK_FUNC_2 &pcfg_pull_up>;
+ <2 RK_PC6 2 &pcfg_pull_up>;
};
spi5_cs0: spi5-cs0 {
rockchip,pins =
- <2 23 RK_FUNC_2 &pcfg_pull_up>;
+ <2 RK_PC7 2 &pcfg_pull_up>;
};
spi5_rx: spi5-rx {
rockchip,pins =
- <2 20 RK_FUNC_2 &pcfg_pull_up>;
+ <2 RK_PC4 2 &pcfg_pull_up>;
};
spi5_tx: spi5-tx {
rockchip,pins =
- <2 21 RK_FUNC_2 &pcfg_pull_up>;
+ <2 RK_PC5 2 &pcfg_pull_up>;
};
};
testclk {
test_clkout0: test-clkout0 {
rockchip,pins =
- <0 0 RK_FUNC_1 &pcfg_pull_none>;
+ <0 RK_PA0 1 &pcfg_pull_none>;
};
test_clkout1: test-clkout1 {
rockchip,pins =
- <2 25 RK_FUNC_2 &pcfg_pull_none>;
+ <2 RK_PD1 2 &pcfg_pull_none>;
};
test_clkout2: test-clkout2 {
rockchip,pins =
- <0 8 RK_FUNC_3 &pcfg_pull_none>;
+ <0 RK_PB0 3 &pcfg_pull_none>;
};
};
tsadc {
otp_gpio: otp-gpio {
- rockchip,pins = <1 6 RK_FUNC_GPIO &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
};
otp_out: otp-out {
- rockchip,pins = <1 6 RK_FUNC_1 &pcfg_pull_none>;
+ rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none>;
};
};
uart0 {
uart0_xfer: uart0-xfer {
rockchip,pins =
- <2 16 RK_FUNC_1 &pcfg_pull_up>,
- <2 17 RK_FUNC_1 &pcfg_pull_none>;
+ <2 RK_PC0 1 &pcfg_pull_up>,
+ <2 RK_PC1 1 &pcfg_pull_none>;
};
uart0_cts: uart0-cts {
rockchip,pins =
- <2 18 RK_FUNC_1 &pcfg_pull_none>;
+ <2 RK_PC2 1 &pcfg_pull_none>;
};
uart0_rts: uart0-rts {
rockchip,pins =
- <2 19 RK_FUNC_1 &pcfg_pull_none>;
+ <2 RK_PC3 1 &pcfg_pull_none>;
};
};
uart1 {
uart1_xfer: uart1-xfer {
rockchip,pins =
- <3 12 RK_FUNC_2 &pcfg_pull_up>,
- <3 13 RK_FUNC_2 &pcfg_pull_none>;
+ <3 RK_PB4 2 &pcfg_pull_up>,
+ <3 RK_PB5 2 &pcfg_pull_none>;
};
};
uart2a {
uart2a_xfer: uart2a-xfer {
rockchip,pins =
- <4 8 RK_FUNC_2 &pcfg_pull_up>,
- <4 9 RK_FUNC_2 &pcfg_pull_none>;
+ <4 RK_PB0 2 &pcfg_pull_up>,
+ <4 RK_PB1 2 &pcfg_pull_none>;
};
};
uart2b {
uart2b_xfer: uart2b-xfer {
rockchip,pins =
- <4 16 RK_FUNC_2 &pcfg_pull_up>,
- <4 17 RK_FUNC_2 &pcfg_pull_none>;
+ <4 RK_PC0 2 &pcfg_pull_up>,
+ <4 RK_PC1 2 &pcfg_pull_none>;
};
};
uart2c {
uart2c_xfer: uart2c-xfer {
rockchip,pins =
- <4 19 RK_FUNC_1 &pcfg_pull_up>,
- <4 20 RK_FUNC_1 &pcfg_pull_none>;
+ <4 RK_PC3 1 &pcfg_pull_up>,
+ <4 RK_PC4 1 &pcfg_pull_none>;
};
};
uart3 {
uart3_xfer: uart3-xfer {
rockchip,pins =
- <3 14 RK_FUNC_2 &pcfg_pull_up>,
- <3 15 RK_FUNC_2 &pcfg_pull_none>;
+ <3 RK_PB6 2 &pcfg_pull_up>,
+ <3 RK_PB7 2 &pcfg_pull_none>;
};
uart3_cts: uart3-cts {
rockchip,pins =
- <3 18 RK_FUNC_2 &pcfg_pull_none>;
+ <3 RK_PC0 2 &pcfg_pull_none>;
};
uart3_rts: uart3-rts {
rockchip,pins =
- <3 19 RK_FUNC_2 &pcfg_pull_none>;
+ <3 RK_PC1 2 &pcfg_pull_none>;
};
};
uart4 {
uart4_xfer: uart4-xfer {
rockchip,pins =
- <1 7 RK_FUNC_1 &pcfg_pull_up>,
- <1 8 RK_FUNC_1 &pcfg_pull_none>;
+ <1 RK_PA7 1 &pcfg_pull_up>,
+ <1 RK_PB0 1 &pcfg_pull_none>;
};
};
uarthdcp {
uarthdcp_xfer: uarthdcp-xfer {
rockchip,pins =
- <4 21 RK_FUNC_2 &pcfg_pull_up>,
- <4 22 RK_FUNC_2 &pcfg_pull_none>;
+ <4 RK_PC5 2 &pcfg_pull_up>,
+ <4 RK_PC6 2 &pcfg_pull_none>;
};
};
pwm0 {
pwm0_pin: pwm0-pin {
rockchip,pins =
- <4 RK_PC2 RK_FUNC_1 &pcfg_pull_none>;
+ <4 RK_PC2 1 &pcfg_pull_none>;
};
pwm0_pin_pull_down: pwm0-pin-pull-down {
rockchip,pins =
- <4 RK_PC2 RK_FUNC_1 &pcfg_pull_down>;
+ <4 RK_PC2 1 &pcfg_pull_down>;
};
vop0_pwm_pin: vop0-pwm-pin {
rockchip,pins =
- <4 RK_PC2 RK_FUNC_2 &pcfg_pull_none>;
+ <4 RK_PC2 2 &pcfg_pull_none>;
};
vop1_pwm_pin: vop1-pwm-pin {
rockchip,pins =
- <4 RK_PC2 RK_FUNC_3 &pcfg_pull_none>;
+ <4 RK_PC2 3 &pcfg_pull_none>;
};
};
pwm1 {
pwm1_pin: pwm1-pin {
rockchip,pins =
- <4 RK_PC6 RK_FUNC_1 &pcfg_pull_none>;
+ <4 RK_PC6 1 &pcfg_pull_none>;
};
pwm1_pin_pull_down: pwm1-pin-pull-down {
rockchip,pins =
- <4 RK_PC6 RK_FUNC_1 &pcfg_pull_down>;
+ <4 RK_PC6 1 &pcfg_pull_down>;
};
};
pwm2 {
pwm2_pin: pwm2-pin {
rockchip,pins =
- <1 RK_PC3 RK_FUNC_1 &pcfg_pull_none>;
+ <1 RK_PC3 1 &pcfg_pull_none>;
};
pwm2_pin_pull_down: pwm2-pin-pull-down {
rockchip,pins =
- <1 RK_PC3 RK_FUNC_1 &pcfg_pull_down>;
+ <1 RK_PC3 1 &pcfg_pull_down>;
};
};
pwm3a {
pwm3a_pin: pwm3a-pin {
rockchip,pins =
- <0 RK_PA6 RK_FUNC_1 &pcfg_pull_none>;
+ <0 RK_PA6 1 &pcfg_pull_none>;
};
};
pwm3b {
pwm3b_pin: pwm3b-pin {
rockchip,pins =
- <1 RK_PB6 RK_FUNC_1 &pcfg_pull_none>;
+ <1 RK_PB6 1 &pcfg_pull_none>;
};
};
hdmi {
hdmi_i2c_xfer: hdmi-i2c-xfer {
rockchip,pins =
- <4 RK_PC1 RK_FUNC_3 &pcfg_pull_none>,
- <4 RK_PC0 RK_FUNC_3 &pcfg_pull_none>;
+ <4 RK_PC1 3 &pcfg_pull_none>,
+ <4 RK_PC0 3 &pcfg_pull_none>;
};
hdmi_cec: hdmi-cec {
rockchip,pins =
- <4 RK_PC7 RK_FUNC_1 &pcfg_pull_none>;
+ <4 RK_PC7 1 &pcfg_pull_none>;
};
};
diff --git a/arch/arm64/boot/dts/sprd/whale2.dtsi b/arch/arm64/boot/dts/sprd/whale2.dtsi
index eb6be5675f79..4bb862c6b083 100644
--- a/arch/arm64/boot/dts/sprd/whale2.dtsi
+++ b/arch/arm64/boot/dts/sprd/whale2.dtsi
@@ -75,7 +75,9 @@
"sprd,sc9836-uart";
reg = <0x0 0x100>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ext_26m>;
+ clock-names = "enable", "uart", "source";
+ clocks = <&apapb_gate CLK_UART0_EB>,
+ <&ap_clk CLK_UART0>, <&ext_26m>;
status = "disabled";
};
@@ -84,7 +86,9 @@
"sprd,sc9836-uart";
reg = <0x100000 0x100>;
interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ext_26m>;
+ clock-names = "enable", "uart", "source";
+ clocks = <&apapb_gate CLK_UART1_EB>,
+ <&ap_clk CLK_UART1>, <&ext_26m>;
status = "disabled";
};
@@ -93,7 +97,9 @@
"sprd,sc9836-uart";
reg = <0x200000 0x100>;
interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ext_26m>;
+ clock-names = "enable", "uart", "source";
+ clocks = <&apapb_gate CLK_UART2_EB>,
+ <&ap_clk CLK_UART2>, <&ext_26m>;
status = "disabled";
};
@@ -102,7 +108,9 @@
"sprd,sc9836-uart";
reg = <0x300000 0x100>;
interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&ext_26m>;
+ clock-names = "enable", "uart", "source";
+ clocks = <&apapb_gate CLK_UART3_EB>,
+ <&ap_clk CLK_UART3>, <&ext_26m>;
status = "disabled";
};
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts
index 11cc67184fa9..2421ec71a201 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zc1751-xm016-dc2.dts
@@ -89,6 +89,7 @@
ti,rx-internal-delay = <0x8>;
ti,tx-internal-delay = <0xa>;
ti,fifo-depth = <0x1>;
+ ti,dp83867-rxctrl-strap-quirk;
};
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
index cef81671f3ab..2a3b66547c6d 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
@@ -110,6 +110,7 @@
ti,rx-internal-delay = <0x8>;
ti,tx-internal-delay = <0xa>;
ti,fifo-depth = <0x1>;
+ ti,dp83867-rxctrl-strap-quirk;
};
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts
index af4d86882a5c..1780ed237daf 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revB.dts
@@ -21,6 +21,7 @@
ti,rx-internal-delay = <0x8>;
ti,tx-internal-delay = <0xa>;
ti,fifo-depth = <0x1>;
+ ti,dp83867-rxctrl-strap-quirk;
};
/* Cleanup from RevA */
/delete-node/ phy@21;
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts
index d4ad19a38c93..8f456146409f 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu104-revA.dts
@@ -55,6 +55,7 @@
ti,rx-internal-delay = <0x8>;
ti,tx-internal-delay = <0xa>;
ti,fifo-depth = <0x1>;
+ ti,dp83867-rxctrl-strap-quirk;
};
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
index 94cf5094df64..93ce7eb81498 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
@@ -111,6 +111,7 @@
ti,rx-internal-delay = <0x8>;
ti,tx-internal-delay = <0xa>;
ti,fifo-depth = <0x1>;
+ ti,dp83867-rxctrl-strap-quirk;
};
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts
index 460adc378295..8bb0001a026f 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu111-revA.dts
@@ -106,6 +106,7 @@
ti,rx-internal-delay = <0x8>;
ti,tx-internal-delay = <0xa>;
ti,fifo-depth = <0x1>;
+ ti,dp83867-rxctrl-strap-quirk;
};
};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 2d9c39033c1a..4d583514258c 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -29,6 +29,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
+CONFIG_ARCH_AGILEX=y
CONFIG_ARCH_SUNXI=y
CONFIG_ARCH_ALPINE=y
CONFIG_ARCH_BCM2835=y
@@ -46,15 +47,6 @@ CONFIG_ARCH_MVEBU=y
CONFIG_ARCH_MXC=y
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_RENESAS=y
-CONFIG_ARCH_R8A774A1=y
-CONFIG_ARCH_R8A774C0=y
-CONFIG_ARCH_R8A7795=y
-CONFIG_ARCH_R8A7796=y
-CONFIG_ARCH_R8A77965=y
-CONFIG_ARCH_R8A77970=y
-CONFIG_ARCH_R8A77980=y
-CONFIG_ARCH_R8A77990=y
-CONFIG_ARCH_R8A77995=y
CONFIG_ARCH_ROCKCHIP=y
CONFIG_ARCH_SEATTLE=y
CONFIG_ARCH_STRATIX10=y
@@ -68,25 +60,6 @@ CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_XGENE=y
CONFIG_ARCH_ZX=y
CONFIG_ARCH_ZYNQMP=y
-CONFIG_PCI=y
-CONFIG_PCIEPORTBUS=y
-CONFIG_PCI_IOV=y
-CONFIG_HOTPLUG_PCI=y
-CONFIG_HOTPLUG_PCI_ACPI=y
-CONFIG_PCI_AARDVARK=y
-CONFIG_PCI_TEGRA=y
-CONFIG_PCIE_RCAR=y
-CONFIG_PCI_HOST_GENERIC=y
-CONFIG_PCI_XGENE=y
-CONFIG_PCI_HOST_THUNDER_PEM=y
-CONFIG_PCI_HOST_THUNDER_ECAM=y
-CONFIG_PCIE_ROCKCHIP_HOST=m
-CONFIG_PCI_LAYERSCAPE=y
-CONFIG_PCI_HISI=y
-CONFIG_PCIE_QCOM=y
-CONFIG_PCIE_ARMADA_8K=y
-CONFIG_PCIE_KIRIN=y
-CONFIG_PCIE_HISI_STB=y
CONFIG_ARM64_VA_BITS_48=y
CONFIG_SCHED_MC=y
CONFIG_NUMA=y
@@ -112,6 +85,7 @@ CONFIG_ARM_SCPI_CPUFREQ=y
CONFIG_ARM_TEGRA186_CPUFREQ=y
CONFIG_ARM_SCPI_PROTOCOL=y
CONFIG_RASPBERRYPI_FIRMWARE=y
+CONFIG_INTEL_STRATIX10_SERVICE=y
CONFIG_TI_SCI_PROTOCOL=y
CONFIG_EFI_CAPSULE_LOADER=y
CONFIG_IMX_SCU=y
@@ -196,17 +170,36 @@ CONFIG_MAC80211_LEDS=y
CONFIG_RFKILL=m
CONFIG_NET_9P=y
CONFIG_NET_9P_VIRTIO=y
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_IOV=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_PCI_AARDVARK=y
+CONFIG_PCI_TEGRA=y
+CONFIG_PCIE_RCAR=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCI_XGENE=y
+CONFIG_PCIE_ALTERA=y
+CONFIG_PCIE_ALTERA_MSI=y
+CONFIG_PCI_HOST_THUNDER_PEM=y
+CONFIG_PCI_HOST_THUNDER_ECAM=y
+CONFIG_PCIE_ROCKCHIP_HOST=m
+CONFIG_PCI_LAYERSCAPE=y
+CONFIG_PCI_HISI=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCIE_ARMADA_8K=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCIE_HISI_STB=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_DMA_CMA=y
-CONFIG_CMA_SIZE_MBYTES=32
CONFIG_HISILICON_LPC=y
CONFIG_SIMPLE_PM_BUS=y
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_M25P80=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_DENALI_DT=y
CONFIG_MTD_NAND_MARVELL=y
CONFIG_MTD_NAND_QCOM=y
@@ -222,10 +215,10 @@ CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_SAS_ATA=y
CONFIG_SCSI_HISI_SAS=y
CONFIG_SCSI_HISI_SAS_PCI=y
-CONFIG_SCSI_UFSHCD=m
-CONFIG_SCSI_UFSHCD_PLATFORM=m
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=m
-CONFIG_SCSI_UFS_HISI=m
+CONFIG_SCSI_UFS_HISI=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_AHCI_PLATFORM=y
@@ -364,6 +357,7 @@ CONFIG_SPI=y
CONFIG_SPI_ARMADA_3700=y
CONFIG_SPI_BCM2835=m
CONFIG_SPI_BCM2835AUX=m
+CONFIG_SPI_NXP_FLEXSPI=y
CONFIG_SPI_MESON_SPICC=m
CONFIG_SPI_MESON_SPIFC=m
CONFIG_SPI_ORION=y
@@ -372,7 +366,7 @@ CONFIG_SPI_ROCKCHIP=y
CONFIG_SPI_QUP=y
CONFIG_SPI_S3C64XX=y
CONFIG_SPI_SPIDEV=m
-CONFIG_SPI_NXP_FLEXSPI=y
+CONFIG_SPI_SUN6I=y
CONFIG_SPMI=y
CONFIG_PINCTRL_SINGLE=y
CONFIG_PINCTRL_MAX77620=y
@@ -387,7 +381,6 @@ CONFIG_PINCTRL_QCS404=y
CONFIG_PINCTRL_QDF2XXX=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
CONFIG_PINCTRL_SDM845=y
-CONFIG_PINCTRL_MTK_MOORE=y
CONFIG_GPIO_DWAPB=y
CONFIG_GPIO_MB86S7X=y
CONFIG_GPIO_PL061=y
@@ -408,6 +401,7 @@ CONFIG_BATTERY_SBS=m
CONFIG_BATTERY_BQ27XXX=y
CONFIG_SENSORS_ARM_SCPI=y
CONFIG_SENSORS_LM90=m
+CONFIG_SENSORS_PWM_FAN=m
CONFIG_SENSORS_RASPBERRYPI_HWMON=m
CONFIG_SENSORS_INA2XX=m
CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
@@ -432,6 +426,7 @@ CONFIG_MESON_WATCHDOG=m
CONFIG_RENESAS_WDT=y
CONFIG_UNIPHIER_WATCHDOG=y
CONFIG_BCM2835_WDT=y
+CONFIG_MFD_ALTERA_SYSMGR=y
CONFIG_MFD_BD9571MWV=y
CONFIG_MFD_AXP20X_I2C=y
CONFIG_MFD_AXP20X_RSB=y
@@ -472,14 +467,14 @@ CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
# CONFIG_DVB_NET is not set
-CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_MEDIA_USB_SUPPORT=y
CONFIG_USB_VIDEO_CLASS=m
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_SUN6I_CSI=m
CONFIG_V4L_MEM2MEM_DRIVERS=y
CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
-CONFIG_VIDEO_SUN6I_CSI=m
CONFIG_VIDEO_RENESAS_FCP=m
CONFIG_VIDEO_RENESAS_VSP1=m
CONFIG_DRM=m
@@ -498,7 +493,6 @@ CONFIG_ROCKCHIP_DW_HDMI=y
CONFIG_ROCKCHIP_DW_MIPI_DSI=y
CONFIG_ROCKCHIP_INNO_HDMI=y
CONFIG_DRM_RCAR_DU=m
-CONFIG_DRM_RCAR_LVDS=m
CONFIG_DRM_SUN4I=m
CONFIG_DRM_SUN8I_DW_HDMI=m
CONFIG_DRM_SUN8I_MIXER=m
@@ -513,7 +507,6 @@ CONFIG_DRM_MESON=m
CONFIG_DRM_PL111=m
CONFIG_FB=y
CONFIG_FB_MODE_HELPERS=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_BACKLIGHT_GENERIC=m
CONFIG_BACKLIGHT_PWM=m
CONFIG_BACKLIGHT_LP855X=m
@@ -522,22 +515,24 @@ CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_VGA16 is not set
CONFIG_SOUND=y
CONFIG_SND=y
+CONFIG_SND_HDA_TEGRA=m
+CONFIG_SND_HDA_CODEC_HDMI=m
CONFIG_SND_SOC=y
CONFIG_SND_BCM2835_SOC_I2S=m
+CONFIG_SND_MESON_AXG_SOUND_CARD=m
CONFIG_SND_SOC_ROCKCHIP=m
CONFIG_SND_SOC_ROCKCHIP_SPDIF=m
CONFIG_SND_SOC_ROCKCHIP_RT5645=m
CONFIG_SND_SOC_RK3399_GRU_SOUND=m
-CONFIG_SND_MESON_AXG_SOUND_CARD=m
CONFIG_SND_SOC_SAMSUNG=y
CONFIG_SND_SOC_RCAR=m
CONFIG_SND_SOC_AK4613=m
-CONFIG_SND_SOC_PCM3168A_I2C=m
-CONFIG_SND_SIMPLE_CARD=m
-CONFIG_SND_AUDIO_GRAPH_CARD=m
CONFIG_SND_SOC_ES7134=m
CONFIG_SND_SOC_ES7241=m
+CONFIG_SND_SOC_PCM3168A_I2C=m
CONFIG_SND_SOC_TAS571X=m
+CONFIG_SND_SIMPLE_CARD=m
+CONFIG_SND_AUDIO_GRAPH_CARD=m
CONFIG_I2C_HID=m
CONFIG_USB=y
CONFIG_USB_OTG=y
@@ -605,6 +600,7 @@ CONFIG_EDAC_GHES=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_MAX77686=y
CONFIG_RTC_DRV_RK808=m
+CONFIG_RTC_DRV_RX8581=m
CONFIG_RTC_DRV_S5M=y
CONFIG_RTC_DRV_DS3232=y
CONFIG_RTC_DRV_EFI=y
@@ -619,6 +615,7 @@ CONFIG_RTC_DRV_XGENE=y
CONFIG_DMADEVICES=y
CONFIG_DMA_BCM2835=m
CONFIG_K3_DMA=y
+CONFIG_MV_XOR=y
CONFIG_MV_XOR_V2=y
CONFIG_PL330_DMA=y
CONFIG_TEGRA20_APB_DMA=y
@@ -676,7 +673,6 @@ CONFIG_RPMSG_QCOM_GLINK_RPM=y
CONFIG_RPMSG_QCOM_GLINK_SMEM=m
CONFIG_RPMSG_QCOM_SMD=y
CONFIG_RASPBERRYPI_POWER=y
-CONFIG_IMX_GPCV2_PM_DOMAINS=y
CONFIG_QCOM_COMMAND_DB=y
CONFIG_QCOM_GENI_SE=y
CONFIG_QCOM_GLINK_SSR=m
@@ -685,6 +681,15 @@ CONFIG_QCOM_SMEM=y
CONFIG_QCOM_SMD_RPM=y
CONFIG_QCOM_SMP2P=y
CONFIG_QCOM_SMSM=y
+CONFIG_ARCH_R8A774A1=y
+CONFIG_ARCH_R8A774C0=y
+CONFIG_ARCH_R8A7795=y
+CONFIG_ARCH_R8A7796=y
+CONFIG_ARCH_R8A77965=y
+CONFIG_ARCH_R8A77970=y
+CONFIG_ARCH_R8A77980=y
+CONFIG_ARCH_R8A77990=y
+CONFIG_ARCH_R8A77995=y
CONFIG_ROCKCHIP_PM_DOMAINS=y
CONFIG_ARCH_TEGRA_132_SOC=y
CONFIG_ARCH_TEGRA_210_SOC=y
@@ -740,6 +745,12 @@ CONFIG_QCOM_QFPROM=y
CONFIG_ROCKCHIP_EFUSE=y
CONFIG_UNIPHIER_EFUSE=y
CONFIG_MESON_EFUSE=m
+CONFIG_FPGA=y
+CONFIG_FPGA_MGR_STRATIX10_SOC=m
+CONFIG_FPGA_BRIDGE=m
+CONFIG_ALTERA_FREEZE_BRIDGE=m
+CONFIG_FPGA_REGION=m
+CONFIG_OF_FPGA_REGION=m
CONFIG_TEE=y
CONFIG_OPTEE=y
CONFIG_EXT2_FS=y
@@ -770,6 +781,8 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_SECURITY=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=32
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index 5fc6f51908fd..cb89c80800b5 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -14,6 +14,7 @@
#include <crypto/aes.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <linux/module.h>
@@ -109,7 +110,7 @@ static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
u32 abytes, u32 *macp)
{
- if (may_use_simd()) {
+ if (crypto_simd_usable()) {
kernel_neon_begin();
ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc,
num_rounds(key));
@@ -255,7 +256,7 @@ static int ccm_encrypt(struct aead_request *req)
err = skcipher_walk_aead_encrypt(&walk, req, false);
- if (may_use_simd()) {
+ if (crypto_simd_usable()) {
while (walk.nbytes) {
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
@@ -313,7 +314,7 @@ static int ccm_decrypt(struct aead_request *req)
err = skcipher_walk_aead_decrypt(&walk, req, false);
- if (may_use_simd()) {
+ if (crypto_simd_usable()) {
while (walk.nbytes) {
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
@@ -372,7 +373,7 @@ static struct aead_alg ccm_aes_alg = {
static int __init aes_mod_init(void)
{
- if (!(elf_hwcap & HWCAP_AES))
+ if (!cpu_have_named_feature(AES))
return -ENODEV;
return crypto_register_aead(&ccm_aes_alg);
}
diff --git a/arch/arm64/crypto/aes-ce-glue.c b/arch/arm64/crypto/aes-ce-glue.c
index e6b3227bbf57..3213843fcb46 100644
--- a/arch/arm64/crypto/aes-ce-glue.c
+++ b/arch/arm64/crypto/aes-ce-glue.c
@@ -12,6 +12,7 @@
#include <asm/simd.h>
#include <asm/unaligned.h>
#include <crypto/aes.h>
+#include <crypto/internal/simd.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
#include <linux/module.h>
@@ -52,7 +53,7 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- if (!may_use_simd()) {
+ if (!crypto_simd_usable()) {
__aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx));
return;
}
@@ -66,7 +67,7 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- if (!may_use_simd()) {
+ if (!crypto_simd_usable()) {
__aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx));
return;
}
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 1e676625ef33..f0ceb545bd1e 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -405,7 +405,7 @@ static int ctr_encrypt_sync(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return aes_ctr_encrypt_fallback(ctx, req);
return ctr_encrypt(req);
@@ -642,7 +642,7 @@ static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks,
{
int rounds = 6 + ctx->key_length / 4;
- if (may_use_simd()) {
+ if (crypto_simd_usable()) {
kernel_neon_begin();
aes_mac_update(in, ctx->key_enc, rounds, blocks, dg, enc_before,
enc_after);
@@ -707,7 +707,7 @@ static int cbcmac_final(struct shash_desc *desc, u8 *out)
struct mac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
struct mac_desc_ctx *ctx = shash_desc_ctx(desc);
- mac_do_update(&tctx->key, NULL, 0, ctx->dg, 1, 0);
+ mac_do_update(&tctx->key, NULL, 0, ctx->dg, (ctx->len != 0), 0);
memcpy(out, ctx->dg, AES_BLOCK_SIZE);
diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
index e7a95a566462..02b65d9eb947 100644
--- a/arch/arm64/crypto/aes-neonbs-glue.c
+++ b/arch/arm64/crypto/aes-neonbs-glue.c
@@ -288,7 +288,7 @@ static int ctr_encrypt_sync(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return aes_ctr_encrypt_fallback(&ctx->fallback, req);
return ctr_encrypt(req);
@@ -304,6 +304,8 @@ static int __xts_crypt(struct skcipher_request *req,
int err;
err = skcipher_walk_virt(&walk, req, false);
+ if (err)
+ return err;
kernel_neon_begin();
neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, ctx->key.rounds, 1);
@@ -440,7 +442,7 @@ static int __init aes_init(void)
int err;
int i;
- if (!(elf_hwcap & HWCAP_ASIMD))
+ if (!cpu_have_named_feature(ASIMD))
return -ENODEV;
err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
diff --git a/arch/arm64/crypto/chacha-neon-glue.c b/arch/arm64/crypto/chacha-neon-glue.c
index bece1d85bd81..82029cda2e77 100644
--- a/arch/arm64/crypto/chacha-neon-glue.c
+++ b/arch/arm64/crypto/chacha-neon-glue.c
@@ -21,6 +21,7 @@
#include <crypto/algapi.h>
#include <crypto/chacha.h>
+#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -90,7 +91,7 @@ static int chacha_neon(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
+ if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
return crypto_chacha_crypt(req);
return chacha_neon_stream_xor(req, ctx, req->iv);
@@ -104,7 +105,7 @@ static int xchacha_neon(struct skcipher_request *req)
u32 state[16];
u8 real_iv[16];
- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
+ if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
return crypto_xchacha_crypt(req);
crypto_chacha_init(state, ctx, req->iv);
@@ -173,7 +174,7 @@ static struct skcipher_alg algs[] = {
static int __init chacha_simd_mod_init(void)
{
- if (!(elf_hwcap & HWCAP_ASIMD))
+ if (!cpu_have_named_feature(ASIMD))
return -ENODEV;
return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c
index dd325829ee44..2e0a7d2eee24 100644
--- a/arch/arm64/crypto/crct10dif-ce-glue.c
+++ b/arch/arm64/crypto/crct10dif-ce-glue.c
@@ -16,6 +16,7 @@
#include <linux/string.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <asm/neon.h>
#include <asm/simd.h>
@@ -38,7 +39,7 @@ static int crct10dif_update_pmull_p8(struct shash_desc *desc, const u8 *data,
{
u16 *crc = shash_desc_ctx(desc);
- if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
+ if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
kernel_neon_begin();
*crc = crc_t10dif_pmull_p8(*crc, data, length);
kernel_neon_end();
@@ -54,7 +55,7 @@ static int crct10dif_update_pmull_p64(struct shash_desc *desc, const u8 *data,
{
u16 *crc = shash_desc_ctx(desc);
- if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
+ if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
kernel_neon_begin();
*crc = crc_t10dif_pmull_p64(*crc, data, length);
kernel_neon_end();
@@ -101,7 +102,7 @@ static struct shash_alg crc_t10dif_alg[] = {{
static int __init crc_t10dif_mod_init(void)
{
- if (elf_hwcap & HWCAP_PMULL)
+ if (cpu_have_named_feature(PMULL))
return crypto_register_shashes(crc_t10dif_alg,
ARRAY_SIZE(crc_t10dif_alg));
else
@@ -111,7 +112,7 @@ static int __init crc_t10dif_mod_init(void)
static void __exit crc_t10dif_mod_exit(void)
{
- if (elf_hwcap & HWCAP_PMULL)
+ if (cpu_have_named_feature(PMULL))
crypto_unregister_shashes(crc_t10dif_alg,
ARRAY_SIZE(crc_t10dif_alg));
else
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 791ad422c427..b39ed99b06fb 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -17,6 +17,7 @@
#include <crypto/gf128mul.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/cpufeature.h>
@@ -89,7 +90,7 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src,
struct ghash_key const *k,
const char *head))
{
- if (likely(may_use_simd())) {
+ if (likely(crypto_simd_usable())) {
kernel_neon_begin();
simd_update(blocks, dg, src, key, head);
kernel_neon_end();
@@ -441,7 +442,7 @@ static int gcm_encrypt(struct aead_request *req)
err = skcipher_walk_aead_encrypt(&walk, req, false);
- if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
+ if (likely(crypto_simd_usable() && walk.total >= 2 * AES_BLOCK_SIZE)) {
u32 const *rk = NULL;
kernel_neon_begin();
@@ -473,9 +474,11 @@ static int gcm_encrypt(struct aead_request *req)
put_unaligned_be32(2, iv + GCM_IV_SIZE);
while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
- int blocks = walk.nbytes / AES_BLOCK_SIZE;
+ const int blocks =
+ walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
u8 *dst = walk.dst.virt.addr;
u8 *src = walk.src.virt.addr;
+ int remaining = blocks;
do {
__aes_arm64_encrypt(ctx->aes_key.key_enc,
@@ -485,9 +488,9 @@ static int gcm_encrypt(struct aead_request *req)
dst += AES_BLOCK_SIZE;
src += AES_BLOCK_SIZE;
- } while (--blocks > 0);
+ } while (--remaining > 0);
- ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg,
+ ghash_do_update(blocks, dg,
walk.dst.virt.addr, &ctx->ghash_key,
NULL, pmull_ghash_update_p64);
@@ -563,7 +566,7 @@ static int gcm_decrypt(struct aead_request *req)
err = skcipher_walk_aead_decrypt(&walk, req, false);
- if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) {
+ if (likely(crypto_simd_usable() && walk.total >= 2 * AES_BLOCK_SIZE)) {
u32 const *rk = NULL;
kernel_neon_begin();
@@ -609,7 +612,7 @@ static int gcm_decrypt(struct aead_request *req)
put_unaligned_be32(2, iv + GCM_IV_SIZE);
while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
- int blocks = walk.nbytes / AES_BLOCK_SIZE;
+ int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
u8 *dst = walk.dst.virt.addr;
u8 *src = walk.src.virt.addr;
@@ -704,10 +707,10 @@ static int __init ghash_ce_mod_init(void)
{
int ret;
- if (!(elf_hwcap & HWCAP_ASIMD))
+ if (!cpu_have_named_feature(ASIMD))
return -ENODEV;
- if (elf_hwcap & HWCAP_PMULL)
+ if (cpu_have_named_feature(PMULL))
ret = crypto_register_shashes(ghash_alg,
ARRAY_SIZE(ghash_alg));
else
@@ -717,7 +720,7 @@ static int __init ghash_ce_mod_init(void)
if (ret)
return ret;
- if (elf_hwcap & HWCAP_PMULL) {
+ if (cpu_have_named_feature(PMULL)) {
ret = crypto_register_aead(&gcm_aes_alg);
if (ret)
crypto_unregister_shashes(ghash_alg,
@@ -728,7 +731,7 @@ static int __init ghash_ce_mod_init(void)
static void __exit ghash_ce_mod_exit(void)
{
- if (elf_hwcap & HWCAP_PMULL)
+ if (cpu_have_named_feature(PMULL))
crypto_unregister_shashes(ghash_alg, ARRAY_SIZE(ghash_alg));
else
crypto_unregister_shash(ghash_alg);
diff --git a/arch/arm64/crypto/nhpoly1305-neon-glue.c b/arch/arm64/crypto/nhpoly1305-neon-glue.c
index 22cc32ac9448..895d3727c1fb 100644
--- a/arch/arm64/crypto/nhpoly1305-neon-glue.c
+++ b/arch/arm64/crypto/nhpoly1305-neon-glue.c
@@ -9,6 +9,7 @@
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/nhpoly1305.h>
#include <linux/module.h>
@@ -25,7 +26,7 @@ static void _nh_neon(const u32 *key, const u8 *message, size_t message_len,
static int nhpoly1305_neon_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
- if (srclen < 64 || !may_use_simd())
+ if (srclen < 64 || !crypto_simd_usable())
return crypto_nhpoly1305_update(desc, src, srclen);
do {
@@ -56,7 +57,7 @@ static struct shash_alg nhpoly1305_alg = {
static int __init nhpoly1305_mod_init(void)
{
- if (!(elf_hwcap & HWCAP_ASIMD))
+ if (!cpu_have_named_feature(ASIMD))
return -ENODEV;
return crypto_register_shash(&nhpoly1305_alg);
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
index 17fac2889f56..eaa7a8258f1c 100644
--- a/arch/arm64/crypto/sha1-ce-glue.c
+++ b/arch/arm64/crypto/sha1-ce-glue.c
@@ -12,6 +12,7 @@
#include <asm/simd.h>
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha1_base.h>
#include <linux/cpufeature.h>
@@ -38,7 +39,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
{
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return crypto_sha1_update(desc, data, len);
sctx->finalize = 0;
@@ -56,7 +57,7 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return crypto_sha1_finup(desc, data, len, out);
/*
@@ -78,7 +79,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
{
struct sha1_ce_state *sctx = shash_desc_ctx(desc);
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return crypto_sha1_finup(desc, NULL, 0, out);
sctx->finalize = 0;
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 261f5195cab7..a725997e55f2 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -12,6 +12,7 @@
#include <asm/simd.h>
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha256_base.h>
#include <linux/cpufeature.h>
@@ -42,7 +43,7 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
{
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return sha256_base_do_update(desc, data, len,
(sha256_block_fn *)sha256_block_data_order);
@@ -61,7 +62,7 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
- if (!may_use_simd()) {
+ if (!crypto_simd_usable()) {
if (len)
sha256_base_do_update(desc, data, len,
(sha256_block_fn *)sha256_block_data_order);
@@ -90,7 +91,7 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
{
struct sha256_ce_state *sctx = shash_desc_ctx(desc);
- if (!may_use_simd()) {
+ if (!crypto_simd_usable()) {
sha256_base_do_finalize(desc,
(sha256_block_fn *)sha256_block_data_order);
return sha256_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c
index 4aedeaefd61f..e62298740e31 100644
--- a/arch/arm64/crypto/sha256-glue.c
+++ b/arch/arm64/crypto/sha256-glue.c
@@ -14,6 +14,7 @@
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha256_base.h>
#include <linux/cryptohash.h>
@@ -89,7 +90,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
{
struct sha256_state *sctx = shash_desc_ctx(desc);
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return sha256_base_do_update(desc, data, len,
(sha256_block_fn *)sha256_block_data_order);
@@ -119,7 +120,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!may_use_simd()) {
+ if (!crypto_simd_usable()) {
if (len)
sha256_base_do_update(desc, data, len,
(sha256_block_fn *)sha256_block_data_order);
@@ -173,7 +174,7 @@ static int __init sha256_mod_init(void)
if (ret)
return ret;
- if (elf_hwcap & HWCAP_ASIMD) {
+ if (cpu_have_named_feature(ASIMD)) {
ret = crypto_register_shashes(neon_algs, ARRAY_SIZE(neon_algs));
if (ret)
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
@@ -183,7 +184,7 @@ static int __init sha256_mod_init(void)
static void __exit sha256_mod_fini(void)
{
- if (elf_hwcap & HWCAP_ASIMD)
+ if (cpu_have_named_feature(ASIMD))
crypto_unregister_shashes(neon_algs, ARRAY_SIZE(neon_algs));
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c
index a336feac0f59..9a4bbfc45f40 100644
--- a/arch/arm64/crypto/sha3-ce-glue.c
+++ b/arch/arm64/crypto/sha3-ce-glue.c
@@ -14,6 +14,7 @@
#include <asm/simd.h>
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/sha3.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
@@ -32,7 +33,7 @@ static int sha3_update(struct shash_desc *desc, const u8 *data,
struct sha3_state *sctx = shash_desc_ctx(desc);
unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return crypto_sha3_update(desc, data, len);
if ((sctx->partial + len) >= sctx->rsiz) {
@@ -76,7 +77,7 @@ static int sha3_final(struct shash_desc *desc, u8 *out)
__le64 *digest = (__le64 *)out;
int i;
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return crypto_sha3_final(desc, out);
sctx->buf[sctx->partial++] = 0x06;
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
index f2c5f28c622a..2369540040aa 100644
--- a/arch/arm64/crypto/sha512-ce-glue.c
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -13,6 +13,7 @@
#include <asm/simd.h>
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/sha.h>
#include <crypto/sha512_base.h>
#include <linux/cpufeature.h>
@@ -31,7 +32,7 @@ asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return sha512_base_do_update(desc, data, len,
(sha512_block_fn *)sha512_block_data_order);
@@ -46,7 +47,7 @@ static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!may_use_simd()) {
+ if (!crypto_simd_usable()) {
if (len)
sha512_base_do_update(desc, data, len,
(sha512_block_fn *)sha512_block_data_order);
@@ -65,7 +66,7 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
static int sha512_ce_final(struct shash_desc *desc, u8 *out)
{
- if (!may_use_simd()) {
+ if (!crypto_simd_usable()) {
sha512_base_do_finalize(desc,
(sha512_block_fn *)sha512_block_data_order);
return sha512_base_finish(desc, out);
diff --git a/arch/arm64/crypto/sm3-ce-glue.c b/arch/arm64/crypto/sm3-ce-glue.c
index 88938a20d9b2..5d15533799a2 100644
--- a/arch/arm64/crypto/sm3-ce-glue.c
+++ b/arch/arm64/crypto/sm3-ce-glue.c
@@ -12,6 +12,7 @@
#include <asm/simd.h>
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/sm3.h>
#include <crypto/sm3_base.h>
#include <linux/cpufeature.h>
@@ -28,7 +29,7 @@ asmlinkage void sm3_ce_transform(struct sm3_state *sst, u8 const *src,
static int sm3_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return crypto_sm3_update(desc, data, len);
kernel_neon_begin();
@@ -40,7 +41,7 @@ static int sm3_ce_update(struct shash_desc *desc, const u8 *data,
static int sm3_ce_final(struct shash_desc *desc, u8 *out)
{
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return crypto_sm3_finup(desc, NULL, 0, out);
kernel_neon_begin();
@@ -53,7 +54,7 @@ static int sm3_ce_final(struct shash_desc *desc, u8 *out)
static int sm3_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- if (!may_use_simd())
+ if (!crypto_simd_usable())
return crypto_sm3_finup(desc, data, len, out);
kernel_neon_begin();
diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c
index 0c4fc223f225..2754c875d39c 100644
--- a/arch/arm64/crypto/sm4-ce-glue.c
+++ b/arch/arm64/crypto/sm4-ce-glue.c
@@ -3,6 +3,7 @@
#include <asm/neon.h>
#include <asm/simd.h>
#include <crypto/sm4.h>
+#include <crypto/internal/simd.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
@@ -20,7 +21,7 @@ static void sm4_ce_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
- if (!may_use_simd()) {
+ if (!crypto_simd_usable()) {
crypto_sm4_encrypt(tfm, out, in);
} else {
kernel_neon_begin();
@@ -33,7 +34,7 @@ static void sm4_ce_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
- if (!may_use_simd()) {
+ if (!crypto_simd_usable()) {
crypto_sm4_decrypt(tfm, out, in);
} else {
kernel_neon_begin();
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 1e17ea5c372b..1de6e05ce48b 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -13,14 +13,12 @@ generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += msi.h
generic-y += qrwlock.h
generic-y += qspinlock.h
-generic-y += rwsem.h
-generic-y += segment.h
generic-y += serial.h
generic-y += set_memory.h
-generic-y += sizes.h
generic-y += switch_to.h
generic-y += trace_clock.h
generic-y += unaligned.h
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index f2a234d6516c..b7bca1ae09e6 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -31,11 +31,23 @@
#include <clocksource/arm_arch_timer.h>
#if IS_ENABLED(CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND)
-extern struct static_key_false arch_timer_read_ool_enabled;
-#define needs_unstable_timer_counter_workaround() \
- static_branch_unlikely(&arch_timer_read_ool_enabled)
+#define has_erratum_handler(h) \
+ ({ \
+ const struct arch_timer_erratum_workaround *__wa; \
+ __wa = __this_cpu_read(timer_unstable_counter_workaround); \
+ (__wa && __wa->h); \
+ })
+
+#define erratum_handler(h) \
+ ({ \
+ const struct arch_timer_erratum_workaround *__wa; \
+ __wa = __this_cpu_read(timer_unstable_counter_workaround); \
+ (__wa && __wa->h) ? __wa->h : arch_timer_##h; \
+ })
+
#else
-#define needs_unstable_timer_counter_workaround() false
+#define has_erratum_handler(h) false
+#define erratum_handler(h) (arch_timer_##h)
#endif
enum arch_timer_erratum_match_type {
@@ -61,23 +73,37 @@ struct arch_timer_erratum_workaround {
DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
timer_unstable_counter_workaround);
+/* inline sysreg accessors that make erratum_handler() work */
+static inline notrace u32 arch_timer_read_cntp_tval_el0(void)
+{
+ return read_sysreg(cntp_tval_el0);
+}
+
+static inline notrace u32 arch_timer_read_cntv_tval_el0(void)
+{
+ return read_sysreg(cntv_tval_el0);
+}
+
+static inline notrace u64 arch_timer_read_cntpct_el0(void)
+{
+ return read_sysreg(cntpct_el0);
+}
+
+static inline notrace u64 arch_timer_read_cntvct_el0(void)
+{
+ return read_sysreg(cntvct_el0);
+}
+
#define arch_timer_reg_read_stable(reg) \
-({ \
- u64 _val; \
- if (needs_unstable_timer_counter_workaround()) { \
- const struct arch_timer_erratum_workaround *wa; \
+ ({ \
+ u64 _val; \
+ \
preempt_disable_notrace(); \
- wa = __this_cpu_read(timer_unstable_counter_workaround); \
- if (wa && wa->read_##reg) \
- _val = wa->read_##reg(); \
- else \
- _val = read_sysreg(reg); \
+ _val = erratum_handler(read_ ## reg)(); \
preempt_enable_notrace(); \
- } else { \
- _val = read_sysreg(reg); \
- } \
- _val; \
-})
+ \
+ _val; \
+ })
/*
* These register accessors are marked inline so the compiler can
@@ -148,18 +174,67 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
isb();
}
-static inline u64 arch_counter_get_cntpct(void)
+/*
+ * Ensure that reads of the counter are treated the same as memory reads
+ * for the purposes of ordering by subsequent memory barriers.
+ *
+ * This insanity brought to you by speculative system register reads,
+ * out-of-order memory accesses, sequence locks and Thomas Gleixner.
+ *
+ * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
+ */
+#define arch_counter_enforce_ordering(val) do { \
+ u64 tmp, _val = (val); \
+ \
+ asm volatile( \
+ " eor %0, %1, %1\n" \
+ " add %0, sp, %0\n" \
+ " ldr xzr, [%0]" \
+ : "=r" (tmp) : "r" (_val)); \
+} while (0)
+
+static inline u64 __arch_counter_get_cntpct_stable(void)
+{
+ u64 cnt;
+
+ isb();
+ cnt = arch_timer_reg_read_stable(cntpct_el0);
+ arch_counter_enforce_ordering(cnt);
+ return cnt;
+}
+
+static inline u64 __arch_counter_get_cntpct(void)
{
+ u64 cnt;
+
isb();
- return arch_timer_reg_read_stable(cntpct_el0);
+ cnt = read_sysreg(cntpct_el0);
+ arch_counter_enforce_ordering(cnt);
+ return cnt;
}
-static inline u64 arch_counter_get_cntvct(void)
+static inline u64 __arch_counter_get_cntvct_stable(void)
{
+ u64 cnt;
+
isb();
- return arch_timer_reg_read_stable(cntvct_el0);
+ cnt = arch_timer_reg_read_stable(cntvct_el0);
+ arch_counter_enforce_ordering(cnt);
+ return cnt;
}
+static inline u64 __arch_counter_get_cntvct(void)
+{
+ u64 cnt;
+
+ isb();
+ cnt = read_sysreg(cntvct_el0);
+ arch_counter_enforce_ordering(cnt);
+ return cnt;
+}
+
+#undef arch_counter_enforce_ordering
+
static inline int arch_timer_arch_init(void)
{
return 0;
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index c5308d01e228..039fbd822ec6 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -407,10 +407,14 @@ alternative_endif
.ifc \op, cvap
sys 3, c7, c12, 1, \kaddr // dc cvap
.else
+ .ifc \op, cvadp
+ sys 3, c7, c13, 1, \kaddr // dc cvadp
+ .else
dc \op, \kaddr
.endif
.endif
.endif
+ .endif
add \kaddr, \kaddr, \tmp1
cmp \kaddr, \size
b.lo 9998b
@@ -442,8 +446,8 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
* reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
*/
.macro reset_pmuserenr_el0, tmpreg
- mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
- sbfx \tmpreg, \tmpreg, #8, #4
+ mrs \tmpreg, id_aa64dfr0_el1
+ sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4
cmp \tmpreg, #1 // Skip if no PMU present
b.lt 9000f
msr pmuserenr_el0, xzr // Disable PMU access from EL0
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index f66bb04fdf2d..85b6bedbcc68 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -20,6 +20,8 @@
#ifndef __ASSEMBLY__
+#include <linux/kasan-checks.h>
+
#define __nops(n) ".rept " #n "\nnop\n.endr\n"
#define nops(n) asm volatile(__nops(n))
@@ -72,31 +74,33 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx,
#define __smp_store_release(p, v) \
do { \
+ typeof(p) __p = (p); \
union { typeof(*p) __val; char __c[1]; } __u = \
- { .__val = (__force typeof(*p)) (v) }; \
+ { .__val = (__force typeof(*p)) (v) }; \
compiletime_assert_atomic_type(*p); \
+ kasan_check_write(__p, sizeof(*p)); \
switch (sizeof(*p)) { \
case 1: \
asm volatile ("stlrb %w1, %0" \
- : "=Q" (*p) \
+ : "=Q" (*__p) \
: "r" (*(__u8 *)__u.__c) \
: "memory"); \
break; \
case 2: \
asm volatile ("stlrh %w1, %0" \
- : "=Q" (*p) \
+ : "=Q" (*__p) \
: "r" (*(__u16 *)__u.__c) \
: "memory"); \
break; \
case 4: \
asm volatile ("stlr %w1, %0" \
- : "=Q" (*p) \
+ : "=Q" (*__p) \
: "r" (*(__u32 *)__u.__c) \
: "memory"); \
break; \
case 8: \
asm volatile ("stlr %1, %0" \
- : "=Q" (*p) \
+ : "=Q" (*__p) \
: "r" (*(__u64 *)__u.__c) \
: "memory"); \
break; \
@@ -106,27 +110,29 @@ do { \
#define __smp_load_acquire(p) \
({ \
union { typeof(*p) __val; char __c[1]; } __u; \
+ typeof(p) __p = (p); \
compiletime_assert_atomic_type(*p); \
+ kasan_check_read(__p, sizeof(*p)); \
switch (sizeof(*p)) { \
case 1: \
asm volatile ("ldarb %w0, %1" \
: "=r" (*(__u8 *)__u.__c) \
- : "Q" (*p) : "memory"); \
+ : "Q" (*__p) : "memory"); \
break; \
case 2: \
asm volatile ("ldarh %w0, %1" \
: "=r" (*(__u16 *)__u.__c) \
- : "Q" (*p) : "memory"); \
+ : "Q" (*__p) : "memory"); \
break; \
case 4: \
asm volatile ("ldar %w0, %1" \
: "=r" (*(__u32 *)__u.__c) \
- : "Q" (*p) : "memory"); \
+ : "Q" (*__p) : "memory"); \
break; \
case 8: \
asm volatile ("ldar %0, %1" \
: "=r" (*(__u64 *)__u.__c) \
- : "Q" (*p) : "memory"); \
+ : "Q" (*__p) : "memory"); \
break; \
} \
__u.__val; \
diff --git a/arch/arm64/include/asm/boot.h b/arch/arm64/include/asm/boot.h
index 355e552a9175..c7f67da13cd9 100644
--- a/arch/arm64/include/asm/boot.h
+++ b/arch/arm64/include/asm/boot.h
@@ -3,7 +3,7 @@
#ifndef __ASM_BOOT_H
#define __ASM_BOOT_H
-#include <asm/sizes.h>
+#include <linux/sizes.h>
/*
* arm64 requires the DTB to be 8 byte aligned and
diff --git a/arch/arm64/include/asm/brk-imm.h b/arch/arm64/include/asm/brk-imm.h
index 2945fe6cd863..d84294064e6a 100644
--- a/arch/arm64/include/asm/brk-imm.h
+++ b/arch/arm64/include/asm/brk-imm.h
@@ -11,6 +11,8 @@
/*
* #imm16 values used for BRK instruction generation
+ * 0x004: for installing kprobes
+ * 0x005: for installing uprobes
* Allowed values for kgdb are 0x400 - 0x7ff
* 0x100: for triggering a fault on purpose (reserved)
* 0x400: for dynamic BRK instruction
@@ -18,10 +20,13 @@
* 0x800: kernel-mode BUG() and WARN() traps
* 0x9xx: tag-based KASAN trap (allowed values 0x900 - 0x9ff)
*/
+#define KPROBES_BRK_IMM 0x004
+#define UPROBES_BRK_IMM 0x005
#define FAULT_BRK_IMM 0x100
#define KGDB_DYN_DBG_BRK_IMM 0x400
#define KGDB_COMPILED_DBG_BRK_IMM 0x401
#define BUG_BRK_IMM 0x800
#define KASAN_BRK_IMM 0x900
+#define KASAN_BRK_MASK 0x0ff
#endif
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index f6a76e43f39e..defdc67d9ab4 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -61,7 +61,8 @@
#define ARM64_HAS_GENERIC_AUTH_ARCH 40
#define ARM64_HAS_GENERIC_AUTH_IMP_DEF 41
#define ARM64_HAS_IRQ_PRIO_MASKING 42
+#define ARM64_HAS_DCPODP 43
-#define ARM64_NCAPS 43
+#define ARM64_NCAPS 44
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index e505e1fbd2b9..bc895c869892 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -14,15 +14,8 @@
#include <asm/hwcap.h>
#include <asm/sysreg.h>
-/*
- * In the arm64 world (as in the ARM world), elf_hwcap is used both internally
- * in the kernel and for user space to keep track of which optional features
- * are supported by the current system. So let's map feature 'x' to HWCAP_x.
- * Note that HWCAP_x constants are bit fields so we need to take the log.
- */
-
-#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
-#define cpu_feature(x) ilog2(HWCAP_ ## x)
+#define MAX_CPU_FEATURES 64
+#define cpu_feature(x) KERNEL_HWCAP_ ## x
#ifndef __ASSEMBLY__
@@ -399,14 +392,16 @@ extern DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS)
bool this_cpu_has_cap(unsigned int cap);
+void cpu_set_feature(unsigned int num);
+bool cpu_have_feature(unsigned int num);
+unsigned long cpu_get_elf_hwcap(void);
+unsigned long cpu_get_elf_hwcap2(void);
-static inline bool cpu_have_feature(unsigned int num)
-{
- return elf_hwcap & (1UL << num);
-}
+#define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name))
+#define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name))
/* System capability check for constant caps */
-static inline bool __cpus_have_const_cap(int num)
+static __always_inline bool __cpus_have_const_cap(int num)
{
if (num >= ARM64_NCAPS)
return false;
@@ -420,7 +415,7 @@ static inline bool cpus_have_cap(unsigned int num)
return test_bit(num, cpu_hwcaps);
}
-static inline bool cpus_have_const_cap(int num)
+static __always_inline bool cpus_have_const_cap(int num)
{
if (static_branch_likely(&arm64_const_caps_ready))
return __cpus_have_const_cap(num);
@@ -638,11 +633,7 @@ static inline int arm64_get_ssbd_state(void)
#endif
}
-#ifdef CONFIG_ARM64_SSBD
void arm64_set_ssbd_mitigation(bool state);
-#else
-static inline void arm64_set_ssbd_mitigation(bool state) {}
-#endif
extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 5f1437099b99..2602bae334fb 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -89,6 +89,7 @@
#define ARM_CPU_PART_CORTEX_A35 0xD04
#define ARM_CPU_PART_CORTEX_A55 0xD05
#define ARM_CPU_PART_CORTEX_A76 0xD0B
+#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
#define APM_CPU_PART_POTENZA 0x000
@@ -118,6 +119,7 @@
#define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35)
#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
+#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index a44cf5225429..0679f781696d 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -65,12 +65,9 @@
#define CACHE_FLUSH_IS_SAFE 1
/* kprobes BRK opcodes with ESR encoding */
-#define BRK64_ESR_MASK 0xFFFF
-#define BRK64_ESR_KPROBES 0x0004
-#define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (BRK64_ESR_KPROBES << 5))
+#define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (KPROBES_BRK_IMM << 5))
/* uprobes BRK opcodes with ESR encoding */
-#define BRK64_ESR_UPROBES 0x0005
-#define BRK64_OPCODE_UPROBES (AARCH64_BREAK_MON | (BRK64_ESR_UPROBES << 5))
+#define BRK64_OPCODE_UPROBES (AARCH64_BREAK_MON | (UPROBES_BRK_IMM << 5))
/* AArch32 */
#define DBG_ESR_EVT_BKPT 0x4
@@ -94,18 +91,24 @@ struct step_hook {
int (*fn)(struct pt_regs *regs, unsigned int esr);
};
-void register_step_hook(struct step_hook *hook);
-void unregister_step_hook(struct step_hook *hook);
+void register_user_step_hook(struct step_hook *hook);
+void unregister_user_step_hook(struct step_hook *hook);
+
+void register_kernel_step_hook(struct step_hook *hook);
+void unregister_kernel_step_hook(struct step_hook *hook);
struct break_hook {
struct list_head node;
- u32 esr_val;
- u32 esr_mask;
int (*fn)(struct pt_regs *regs, unsigned int esr);
+ u16 imm;
+ u16 mask; /* These bits are ignored when comparing with imm */
};
-void register_break_hook(struct break_hook *hook);
-void unregister_break_hook(struct break_hook *hook);
+void register_user_break_hook(struct break_hook *hook);
+void unregister_user_break_hook(struct break_hook *hook);
+
+void register_kernel_break_hook(struct break_hook *hook);
+void unregister_kernel_break_hook(struct break_hook *hook);
u8 debug_monitors_arch(void);
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 6adc1a90e7e6..355d120b78cb 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -214,10 +214,10 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
set_thread_flag(TIF_32BIT); \
})
#define COMPAT_ARCH_DLINFO
-extern int aarch32_setup_vectors_page(struct linux_binprm *bprm,
- int uses_interp);
+extern int aarch32_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
#define compat_arch_setup_additional_pages \
- aarch32_setup_vectors_page
+ aarch32_setup_additional_pages
#endif /* CONFIG_COMPAT */
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 52233f00d53d..0e27fe91d5ea 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -156,9 +156,7 @@
ESR_ELx_WFx_ISS_WFI)
/* BRK instruction trap from AArch64 state */
-#define ESR_ELx_VAL_BRK64(imm) \
- ((ESR_ELx_EC_BRK64 << ESR_ELx_EC_SHIFT) | ESR_ELx_IL | \
- ((imm) & 0xffff))
+#define ESR_ELx_BRK64_ISS_COMMENT_MASK 0xffff
/* ISS field definitions for System instruction traps */
#define ESR_ELx_SYS64_ISS_RES0_SHIFT 22
@@ -198,9 +196,10 @@
/*
* User space cache operations have the following sysreg encoding
* in System instructions.
- * op0=1, op1=3, op2=1, crn=7, crm={ 5, 10, 11, 12, 14 }, WRITE (L=0)
+ * op0=1, op1=3, op2=1, crn=7, crm={ 5, 10, 11, 12, 13, 14 }, WRITE (L=0)
*/
#define ESR_ELx_SYS64_ISS_CRM_DC_CIVAC 14
+#define ESR_ELx_SYS64_ISS_CRM_DC_CVADP 13
#define ESR_ELx_SYS64_ISS_CRM_DC_CVAP 12
#define ESR_ELx_SYS64_ISS_CRM_DC_CVAU 11
#define ESR_ELx_SYS64_ISS_CRM_DC_CVAC 10
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index dd1ad3950ef5..df62bbd33a9a 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -24,10 +24,13 @@
#ifndef __ASSEMBLY__
+#include <linux/bitmap.h>
#include <linux/build_bug.h>
+#include <linux/bug.h>
#include <linux/cache.h>
#include <linux/init.h>
#include <linux/stddef.h>
+#include <linux/types.h>
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/* Masks for extracting the FPSR and FPCR from the FPSCR */
@@ -56,7 +59,8 @@ extern void fpsimd_restore_current_state(void);
extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
extern void fpsimd_bind_task_to_cpu(void);
-extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state);
+extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
+ void *sve_state, unsigned int sve_vl);
extern void fpsimd_flush_task_state(struct task_struct *target);
extern void fpsimd_flush_cpu_state(void);
@@ -87,6 +91,29 @@ extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
extern u64 read_zcr_features(void);
extern int __ro_after_init sve_max_vl;
+extern int __ro_after_init sve_max_virtualisable_vl;
+extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
+
+/*
+ * Helpers to translate bit indices in sve_vq_map to VQ values (and
+ * vice versa). This allows find_next_bit() to be used to find the
+ * _maximum_ VQ not exceeding a certain value.
+ */
+static inline unsigned int __vq_to_bit(unsigned int vq)
+{
+ return SVE_VQ_MAX - vq;
+}
+
+static inline unsigned int __bit_to_vq(unsigned int bit)
+{
+ return SVE_VQ_MAX - bit;
+}
+
+/* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */
+static inline bool sve_vq_available(unsigned int vq)
+{
+ return test_bit(__vq_to_bit(vq), sve_vq_map);
+}
#ifdef CONFIG_ARM64_SVE
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index c7e1a7837706..a56efb5626fa 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -23,26 +23,34 @@
#include <asm/errno.h>
+#define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */
+
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
do { \
+ unsigned int loops = FUTEX_MAX_LOOPS; \
+ \
uaccess_enable(); \
asm volatile( \
" prfm pstl1strm, %2\n" \
"1: ldxr %w1, %2\n" \
insn "\n" \
"2: stlxr %w0, %w3, %2\n" \
-" cbnz %w0, 1b\n" \
-" dmb ish\n" \
+" cbz %w0, 3f\n" \
+" sub %w4, %w4, %w0\n" \
+" cbnz %w4, 1b\n" \
+" mov %w0, %w7\n" \
"3:\n" \
+" dmb ish\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
-"4: mov %w0, %w5\n" \
+"4: mov %w0, %w6\n" \
" b 3b\n" \
" .popsection\n" \
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
- : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
- : "r" (oparg), "Ir" (-EFAULT) \
+ : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp), \
+ "+r" (loops) \
+ : "r" (oparg), "Ir" (-EFAULT), "Ir" (-EAGAIN) \
: "memory"); \
uaccess_disable(); \
} while (0)
@@ -57,23 +65,23 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
switch (op) {
case FUTEX_OP_SET:
- __futex_atomic_op("mov %w3, %w4",
+ __futex_atomic_op("mov %w3, %w5",
ret, oldval, uaddr, tmp, oparg);
break;
case FUTEX_OP_ADD:
- __futex_atomic_op("add %w3, %w1, %w4",
+ __futex_atomic_op("add %w3, %w1, %w5",
ret, oldval, uaddr, tmp, oparg);
break;
case FUTEX_OP_OR:
- __futex_atomic_op("orr %w3, %w1, %w4",
+ __futex_atomic_op("orr %w3, %w1, %w5",
ret, oldval, uaddr, tmp, oparg);
break;
case FUTEX_OP_ANDN:
- __futex_atomic_op("and %w3, %w1, %w4",
+ __futex_atomic_op("and %w3, %w1, %w5",
ret, oldval, uaddr, tmp, ~oparg);
break;
case FUTEX_OP_XOR:
- __futex_atomic_op("eor %w3, %w1, %w4",
+ __futex_atomic_op("eor %w3, %w1, %w5",
ret, oldval, uaddr, tmp, oparg);
break;
default:
@@ -93,6 +101,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
u32 oldval, u32 newval)
{
int ret = 0;
+ unsigned int loops = FUTEX_MAX_LOOPS;
u32 val, tmp;
u32 __user *uaddr;
@@ -104,24 +113,30 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
" prfm pstl1strm, %2\n"
"1: ldxr %w1, %2\n"
-" sub %w3, %w1, %w4\n"
-" cbnz %w3, 3f\n"
-"2: stlxr %w3, %w5, %2\n"
-" cbnz %w3, 1b\n"
-" dmb ish\n"
+" sub %w3, %w1, %w5\n"
+" cbnz %w3, 4f\n"
+"2: stlxr %w3, %w6, %2\n"
+" cbz %w3, 3f\n"
+" sub %w4, %w4, %w3\n"
+" cbnz %w4, 1b\n"
+" mov %w0, %w8\n"
"3:\n"
+" dmb ish\n"
+"4:\n"
" .pushsection .fixup,\"ax\"\n"
-"4: mov %w0, %w6\n"
-" b 3b\n"
+"5: mov %w0, %w7\n"
+" b 4b\n"
" .popsection\n"
- _ASM_EXTABLE(1b, 4b)
- _ASM_EXTABLE(2b, 4b)
- : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
- : "r" (oldval), "r" (newval), "Ir" (-EFAULT)
+ _ASM_EXTABLE(1b, 5b)
+ _ASM_EXTABLE(2b, 5b)
+ : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops)
+ : "r" (oldval), "r" (newval), "Ir" (-EFAULT), "Ir" (-EAGAIN)
: "memory");
uaccess_disable();
- *uval = val;
+ if (!ret)
+ *uval = val;
+
return ret;
}
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index c6a07a3b433e..4aad6382f631 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -70,8 +70,4 @@ extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
#include <asm-generic/hugetlb.h>
-#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
-static inline bool gigantic_page_supported(void) { return true; }
-#endif
-
#endif /* __ASM_HUGETLB_H */
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 400b80b49595..b4bfb6672168 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -17,6 +17,7 @@
#define __ASM_HWCAP_H
#include <uapi/asm/hwcap.h>
+#include <asm/cpufeature.h>
#define COMPAT_HWCAP_HALF (1 << 1)
#define COMPAT_HWCAP_THUMB (1 << 2)
@@ -40,11 +41,67 @@
#define COMPAT_HWCAP2_CRC32 (1 << 4)
#ifndef __ASSEMBLY__
+#include <linux/log2.h>
+
+/*
+ * For userspace we represent hwcaps as a collection of HWCAP{,2}_x bitfields
+ * as described in uapi/asm/hwcap.h. For the kernel we represent hwcaps as
+ * natural numbers (in a single range of size MAX_CPU_FEATURES) defined here
+ * with prefix KERNEL_HWCAP_ mapped to their HWCAP{,2}_x counterpart.
+ *
+ * Hwcaps should be set and tested within the kernel via the
+ * cpu_{set,have}_named_feature(feature) where feature is the unique suffix
+ * of KERNEL_HWCAP_{feature}.
+ */
+#define __khwcap_feature(x) const_ilog2(HWCAP_ ## x)
+#define KERNEL_HWCAP_FP __khwcap_feature(FP)
+#define KERNEL_HWCAP_ASIMD __khwcap_feature(ASIMD)
+#define KERNEL_HWCAP_EVTSTRM __khwcap_feature(EVTSTRM)
+#define KERNEL_HWCAP_AES __khwcap_feature(AES)
+#define KERNEL_HWCAP_PMULL __khwcap_feature(PMULL)
+#define KERNEL_HWCAP_SHA1 __khwcap_feature(SHA1)
+#define KERNEL_HWCAP_SHA2 __khwcap_feature(SHA2)
+#define KERNEL_HWCAP_CRC32 __khwcap_feature(CRC32)
+#define KERNEL_HWCAP_ATOMICS __khwcap_feature(ATOMICS)
+#define KERNEL_HWCAP_FPHP __khwcap_feature(FPHP)
+#define KERNEL_HWCAP_ASIMDHP __khwcap_feature(ASIMDHP)
+#define KERNEL_HWCAP_CPUID __khwcap_feature(CPUID)
+#define KERNEL_HWCAP_ASIMDRDM __khwcap_feature(ASIMDRDM)
+#define KERNEL_HWCAP_JSCVT __khwcap_feature(JSCVT)
+#define KERNEL_HWCAP_FCMA __khwcap_feature(FCMA)
+#define KERNEL_HWCAP_LRCPC __khwcap_feature(LRCPC)
+#define KERNEL_HWCAP_DCPOP __khwcap_feature(DCPOP)
+#define KERNEL_HWCAP_SHA3 __khwcap_feature(SHA3)
+#define KERNEL_HWCAP_SM3 __khwcap_feature(SM3)
+#define KERNEL_HWCAP_SM4 __khwcap_feature(SM4)
+#define KERNEL_HWCAP_ASIMDDP __khwcap_feature(ASIMDDP)
+#define KERNEL_HWCAP_SHA512 __khwcap_feature(SHA512)
+#define KERNEL_HWCAP_SVE __khwcap_feature(SVE)
+#define KERNEL_HWCAP_ASIMDFHM __khwcap_feature(ASIMDFHM)
+#define KERNEL_HWCAP_DIT __khwcap_feature(DIT)
+#define KERNEL_HWCAP_USCAT __khwcap_feature(USCAT)
+#define KERNEL_HWCAP_ILRCPC __khwcap_feature(ILRCPC)
+#define KERNEL_HWCAP_FLAGM __khwcap_feature(FLAGM)
+#define KERNEL_HWCAP_SSBS __khwcap_feature(SSBS)
+#define KERNEL_HWCAP_SB __khwcap_feature(SB)
+#define KERNEL_HWCAP_PACA __khwcap_feature(PACA)
+#define KERNEL_HWCAP_PACG __khwcap_feature(PACG)
+
+#define __khwcap2_feature(x) (const_ilog2(HWCAP2_ ## x) + 32)
+#define KERNEL_HWCAP_DCPODP __khwcap2_feature(DCPODP)
+#define KERNEL_HWCAP_SVE2 __khwcap2_feature(SVE2)
+#define KERNEL_HWCAP_SVEAES __khwcap2_feature(SVEAES)
+#define KERNEL_HWCAP_SVEPMULL __khwcap2_feature(SVEPMULL)
+#define KERNEL_HWCAP_SVEBITPERM __khwcap2_feature(SVEBITPERM)
+#define KERNEL_HWCAP_SVESHA3 __khwcap2_feature(SVESHA3)
+#define KERNEL_HWCAP_SVESM4 __khwcap2_feature(SVESM4)
+
/*
* This yields a mask that user programs can use to figure out what
* instruction set this cpu supports.
*/
-#define ELF_HWCAP (elf_hwcap)
+#define ELF_HWCAP cpu_get_elf_hwcap()
+#define ELF_HWCAP2 cpu_get_elf_hwcap2()
#ifdef CONFIG_COMPAT
#define COMPAT_ELF_HWCAP (compat_elf_hwcap)
@@ -60,6 +117,5 @@ enum {
#endif
};
-extern unsigned long elf_hwcap;
#endif
#endif
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 9c01f04db64d..ec894de0ed4e 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -277,6 +277,7 @@ __AARCH64_INSN_FUNCS(adrp, 0x9F000000, 0x90000000)
__AARCH64_INSN_FUNCS(prfm, 0x3FC00000, 0x39800000)
__AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000)
__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
+__AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0xB8200000)
__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
__AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000)
__AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)
@@ -394,6 +395,13 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
enum aarch64_insn_register state,
enum aarch64_insn_size_type size,
enum aarch64_insn_ldst_type type);
+u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
+ enum aarch64_insn_register address,
+ enum aarch64_insn_register value,
+ enum aarch64_insn_size_type size);
+u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
+ enum aarch64_insn_register value,
+ enum aarch64_insn_size_type size);
u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
int imm, enum aarch64_insn_variant variant,
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 8bb7210ac286..b807cb9b517d 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -124,8 +124,6 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
#define __io_par(v) __iormb(v)
#define __iowmb() wmb()
-#define mmiowb() do { } while (0)
-
/*
* Relaxed I/O memory access primitives. These follow the Device memory
* ordering rules but do not guarantee any ordering relative to Normal memory
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index 43d8366c1e87..629963189085 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -43,7 +43,7 @@ static inline void arch_local_irq_enable(void)
asm volatile(ALTERNATIVE(
"msr daifclr, #2 // arch_local_irq_enable\n"
"nop",
- "msr_s " __stringify(SYS_ICC_PMR_EL1) ",%0\n"
+ __msr_s(SYS_ICC_PMR_EL1, "%0")
"dsb sy",
ARM64_HAS_IRQ_PRIO_MASKING)
:
@@ -55,7 +55,7 @@ static inline void arch_local_irq_disable(void)
{
asm volatile(ALTERNATIVE(
"msr daifset, #2 // arch_local_irq_disable",
- "msr_s " __stringify(SYS_ICC_PMR_EL1) ", %0",
+ __msr_s(SYS_ICC_PMR_EL1, "%0"),
ARM64_HAS_IRQ_PRIO_MASKING)
:
: "r" ((unsigned long) GIC_PRIO_IRQOFF)
@@ -86,7 +86,7 @@ static inline unsigned long arch_local_save_flags(void)
"mov %0, %1\n"
"nop\n"
"nop",
- "mrs_s %0, " __stringify(SYS_ICC_PMR_EL1) "\n"
+ __mrs_s("%0", SYS_ICC_PMR_EL1)
"ands %1, %1, " __stringify(PSR_I_BIT) "\n"
"csel %0, %0, %2, eq",
ARM64_HAS_IRQ_PRIO_MASKING)
@@ -116,7 +116,7 @@ static inline void arch_local_irq_restore(unsigned long flags)
asm volatile(ALTERNATIVE(
"msr daif, %0\n"
"nop",
- "msr_s " __stringify(SYS_ICC_PMR_EL1) ", %0\n"
+ __msr_s(SYS_ICC_PMR_EL1, "%0")
"dsb sy",
ARM64_HAS_IRQ_PRIO_MASKING)
: "+r" (flags)
diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h
index d5a44cf859e9..21721fbf44e7 100644
--- a/arch/arm64/include/asm/kprobes.h
+++ b/arch/arm64/include/asm/kprobes.h
@@ -54,8 +54,6 @@ void arch_remove_kprobe(struct kprobe *);
int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
-int kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr);
-int kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr);
void kretprobe_trampoline(void);
void __kprobes *trampoline_probe_handler(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index f5b79e995f40..ff73f5462aca 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -108,7 +108,8 @@ extern u32 __kvm_get_mdcr_el2(void);
.endm
.macro get_host_ctxt reg, tmp
- hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp
+ hyp_adr_this_cpu \reg, kvm_host_data, \tmp
+ add \reg, \reg, #HOST_DATA_CONTEXT
.endm
.macro get_vcpu_ptr vcpu, ctxt
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index d3842791e1c4..613427fafff9 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -98,6 +98,22 @@ static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 |= HCR_TWE;
}
+static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
+}
+
+static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
+}
+
+static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu)
+{
+ if (vcpu_has_ptrauth(vcpu))
+ vcpu_ptrauth_disable(vcpu);
+}
+
static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.vsesr_el2;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index a01fe087e022..2a8d3f8ca22c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -22,9 +22,13 @@
#ifndef __ARM64_KVM_HOST_H__
#define __ARM64_KVM_HOST_H__
+#include <linux/bitmap.h>
#include <linux/types.h>
+#include <linux/jump_label.h>
#include <linux/kvm_types.h>
+#include <linux/percpu.h>
#include <asm/arch_gicv3.h>
+#include <asm/barrier.h>
#include <asm/cpufeature.h>
#include <asm/daifflags.h>
#include <asm/fpsimd.h>
@@ -45,7 +49,7 @@
#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
-#define KVM_VCPU_MAX_FEATURES 4
+#define KVM_VCPU_MAX_FEATURES 7
#define KVM_REQ_SLEEP \
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
@@ -54,8 +58,12 @@
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
+extern unsigned int kvm_sve_max_vl;
+int kvm_arm_init_sve(void);
+
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext);
void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
@@ -117,6 +125,7 @@ enum vcpu_sysreg {
SCTLR_EL1, /* System Control Register */
ACTLR_EL1, /* Auxiliary Control Register */
CPACR_EL1, /* Coprocessor Access Control */
+ ZCR_EL1, /* SVE Control */
TTBR0_EL1, /* Translation Table Base Register 0 */
TTBR1_EL1, /* Translation Table Base Register 1 */
TCR_EL1, /* Translation Control Register */
@@ -152,6 +161,18 @@ enum vcpu_sysreg {
PMSWINC_EL0, /* Software Increment Register */
PMUSERENR_EL0, /* User Enable Register */
+ /* Pointer Authentication Registers in a strict increasing order. */
+ APIAKEYLO_EL1,
+ APIAKEYHI_EL1,
+ APIBKEYLO_EL1,
+ APIBKEYHI_EL1,
+ APDAKEYLO_EL1,
+ APDAKEYHI_EL1,
+ APDBKEYLO_EL1,
+ APDBKEYHI_EL1,
+ APGAKEYLO_EL1,
+ APGAKEYHI_EL1,
+
/* 32bit specific registers. Keep them at the end of the range */
DACR32_EL2, /* Domain Access Control Register */
IFSR32_EL2, /* Instruction Fault Status Register */
@@ -212,7 +233,17 @@ struct kvm_cpu_context {
struct kvm_vcpu *__hyp_running_vcpu;
};
-typedef struct kvm_cpu_context kvm_cpu_context_t;
+struct kvm_pmu_events {
+ u32 events_host;
+ u32 events_guest;
+};
+
+struct kvm_host_data {
+ struct kvm_cpu_context host_ctxt;
+ struct kvm_pmu_events pmu_events;
+};
+
+typedef struct kvm_host_data kvm_host_data_t;
struct vcpu_reset_state {
unsigned long pc;
@@ -223,6 +254,8 @@ struct vcpu_reset_state {
struct kvm_vcpu_arch {
struct kvm_cpu_context ctxt;
+ void *sve_state;
+ unsigned int sve_max_vl;
/* HYP configuration */
u64 hcr_el2;
@@ -255,7 +288,7 @@ struct kvm_vcpu_arch {
struct kvm_guest_debug_arch external_debug_state;
/* Pointer to host CPU context */
- kvm_cpu_context_t *host_cpu_context;
+ struct kvm_cpu_context *host_cpu_context;
struct thread_info *host_thread_info; /* hyp VA */
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
@@ -318,12 +351,40 @@ struct kvm_vcpu_arch {
bool sysregs_loaded_on_cpu;
};
+/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
+#define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \
+ sve_ffr_offset((vcpu)->arch.sve_max_vl)))
+
+#define vcpu_sve_state_size(vcpu) ({ \
+ size_t __size_ret; \
+ unsigned int __vcpu_vq; \
+ \
+ if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
+ __size_ret = 0; \
+ } else { \
+ __vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl); \
+ __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \
+ } \
+ \
+ __size_ret; \
+})
+
/* vcpu_arch flags field values: */
#define KVM_ARM64_DEBUG_DIRTY (1 << 0)
#define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */
#define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */
#define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */
#define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */
+#define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */
+#define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */
+#define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */
+
+#define vcpu_has_sve(vcpu) (system_supports_sve() && \
+ ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
+
+#define vcpu_has_ptrauth(vcpu) ((system_supports_address_auth() || \
+ system_supports_generic_auth()) && \
+ ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH))
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
@@ -432,9 +493,9 @@ void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
-DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
+DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data);
-static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt,
+static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt,
int cpu)
{
/* The host's MPIDR is immutable, so let's set it up at boot time */
@@ -452,8 +513,8 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
* kernel's mapping to the linear mapping, and store it in tpidr_el2
* so that we can use adr_l to access per-cpu variables in EL2.
*/
- u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_cpu_state) -
- (u64)kvm_ksym_ref(kvm_host_cpu_state));
+ u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_data) -
+ (u64)kvm_ksym_ref(kvm_host_data));
/*
* Call initialization code, and switch to the full blown HYP code.
@@ -491,9 +552,10 @@ static inline bool kvm_arch_requires_vhe(void)
return false;
}
+void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
+
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
-static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
@@ -516,11 +578,28 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
+static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
+{
+ return (!has_vhe() && attr->exclude_host);
+}
+
#ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
{
return kvm_arch_vcpu_run_map_fp(vcpu);
}
+
+void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
+void kvm_clr_pmu_events(u32 clr);
+
+void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt);
+bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt);
+
+void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
+void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
+#else
+static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
+static inline void kvm_clr_pmu_events(u32 clr) {}
#endif
static inline void kvm_arm_vhe_guest_enter(void)
@@ -594,4 +673,10 @@ void kvm_arch_free_vm(struct kvm *kvm);
int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
+int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
+bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
+
+#define kvm_arm_vcpu_sve_finalized(vcpu) \
+ ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 4da765f2cca5..09fe8bd15f6e 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -30,7 +30,7 @@
({ \
u64 reg; \
asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\
- "mrs_s %0, " __stringify(r##vh),\
+ __mrs_s("%0", r##vh), \
ARM64_HAS_VIRT_HOST_EXTN) \
: "=r" (reg)); \
reg; \
@@ -40,7 +40,7 @@
do { \
u64 __val = (u64)(v); \
asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\
- "msr_s " __stringify(r##vh) ", %x0",\
+ __msr_s(r##vh, "%x0"), \
ARM64_HAS_VIRT_HOST_EXTN) \
: : "rZ" (__val)); \
} while (0)
@@ -149,7 +149,6 @@ void __debug_switch_to_host(struct kvm_vcpu *vcpu);
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
-bool __fpsimd_enabled(void);
void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
void deactivate_traps_vhe_put(void);
diff --git a/arch/arm64/include/asm/kvm_ptrauth.h b/arch/arm64/include/asm/kvm_ptrauth.h
new file mode 100644
index 000000000000..6301813dcace
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_ptrauth.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* arch/arm64/include/asm/kvm_ptrauth.h: Guest/host ptrauth save/restore
+ * Copyright 2019 Arm Limited
+ * Authors: Mark Rutland <mark.rutland@arm.com>
+ * Amit Daniel Kachhap <amit.kachhap@arm.com>
+ */
+
+#ifndef __ASM_KVM_PTRAUTH_H
+#define __ASM_KVM_PTRAUTH_H
+
+#ifdef __ASSEMBLY__
+
+#include <asm/sysreg.h>
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+
+#define PTRAUTH_REG_OFFSET(x) (x - CPU_APIAKEYLO_EL1)
+
+/*
+ * CPU_AP*_EL1 values exceed immediate offset range (512) for stp
+ * instruction so below macros takes CPU_APIAKEYLO_EL1 as base and
+ * calculates the offset of the keys from this base to avoid an extra add
+ * instruction. These macros assumes the keys offsets follow the order of
+ * the sysreg enum in kvm_host.h.
+ */
+.macro ptrauth_save_state base, reg1, reg2
+ mrs_s \reg1, SYS_APIAKEYLO_EL1
+ mrs_s \reg2, SYS_APIAKEYHI_EL1
+ stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIAKEYLO_EL1)]
+ mrs_s \reg1, SYS_APIBKEYLO_EL1
+ mrs_s \reg2, SYS_APIBKEYHI_EL1
+ stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIBKEYLO_EL1)]
+ mrs_s \reg1, SYS_APDAKEYLO_EL1
+ mrs_s \reg2, SYS_APDAKEYHI_EL1
+ stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDAKEYLO_EL1)]
+ mrs_s \reg1, SYS_APDBKEYLO_EL1
+ mrs_s \reg2, SYS_APDBKEYHI_EL1
+ stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDBKEYLO_EL1)]
+ mrs_s \reg1, SYS_APGAKEYLO_EL1
+ mrs_s \reg2, SYS_APGAKEYHI_EL1
+ stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APGAKEYLO_EL1)]
+.endm
+
+.macro ptrauth_restore_state base, reg1, reg2
+ ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIAKEYLO_EL1)]
+ msr_s SYS_APIAKEYLO_EL1, \reg1
+ msr_s SYS_APIAKEYHI_EL1, \reg2
+ ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIBKEYLO_EL1)]
+ msr_s SYS_APIBKEYLO_EL1, \reg1
+ msr_s SYS_APIBKEYHI_EL1, \reg2
+ ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDAKEYLO_EL1)]
+ msr_s SYS_APDAKEYLO_EL1, \reg1
+ msr_s SYS_APDAKEYHI_EL1, \reg2
+ ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDBKEYLO_EL1)]
+ msr_s SYS_APDBKEYLO_EL1, \reg1
+ msr_s SYS_APDBKEYHI_EL1, \reg2
+ ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APGAKEYLO_EL1)]
+ msr_s SYS_APGAKEYLO_EL1, \reg1
+ msr_s SYS_APGAKEYHI_EL1, \reg2
+.endm
+
+/*
+ * Both ptrauth_switch_to_guest and ptrauth_switch_to_host macros will
+ * check for the presence of one of the cpufeature flag
+ * ARM64_HAS_ADDRESS_AUTH_ARCH or ARM64_HAS_ADDRESS_AUTH_IMP_DEF and
+ * then proceed ahead with the save/restore of Pointer Authentication
+ * key registers.
+ */
+.macro ptrauth_switch_to_guest g_ctxt, reg1, reg2, reg3
+alternative_if ARM64_HAS_ADDRESS_AUTH_ARCH
+ b 1000f
+alternative_else_nop_endif
+alternative_if_not ARM64_HAS_ADDRESS_AUTH_IMP_DEF
+ b 1001f
+alternative_else_nop_endif
+1000:
+ ldr \reg1, [\g_ctxt, #(VCPU_HCR_EL2 - VCPU_CONTEXT)]
+ and \reg1, \reg1, #(HCR_API | HCR_APK)
+ cbz \reg1, 1001f
+ add \reg1, \g_ctxt, #CPU_APIAKEYLO_EL1
+ ptrauth_restore_state \reg1, \reg2, \reg3
+1001:
+.endm
+
+.macro ptrauth_switch_to_host g_ctxt, h_ctxt, reg1, reg2, reg3
+alternative_if ARM64_HAS_ADDRESS_AUTH_ARCH
+ b 2000f
+alternative_else_nop_endif
+alternative_if_not ARM64_HAS_ADDRESS_AUTH_IMP_DEF
+ b 2001f
+alternative_else_nop_endif
+2000:
+ ldr \reg1, [\g_ctxt, #(VCPU_HCR_EL2 - VCPU_CONTEXT)]
+ and \reg1, \reg1, #(HCR_API | HCR_APK)
+ cbz \reg1, 2001f
+ add \reg1, \g_ctxt, #CPU_APIAKEYLO_EL1
+ ptrauth_save_state \reg1, \reg2, \reg3
+ add \reg1, \h_ctxt, #CPU_APIAKEYLO_EL1
+ ptrauth_restore_state \reg1, \reg2, \reg3
+ isb
+2001:
+.endm
+
+#else /* !CONFIG_ARM64_PTR_AUTH */
+.macro ptrauth_switch_to_guest g_ctxt, reg1, reg2, reg3
+.endm
+.macro ptrauth_switch_to_host g_ctxt, h_ctxt, reg1, reg2, reg3
+.endm
+#endif /* CONFIG_ARM64_PTR_AUTH */
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_KVM_PTRAUTH_H */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 290195168bb3..8ffcf5a512bb 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -26,7 +26,7 @@
#include <linux/types.h>
#include <asm/bug.h>
#include <asm/page-def.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
/*
* Size of the PCI I/O space. This must remain a power of two so that
@@ -302,7 +302,7 @@ static inline void *phys_to_virt(phys_addr_t x)
*/
#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
-#ifndef CONFIG_SPARSEMEM_VMEMMAP
+#if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#else
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index 52fa47c73bf0..dabba4b2c61f 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -33,12 +33,22 @@
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return (pmd_t *)__get_free_page(PGALLOC_GFP);
+ struct page *page;
+
+ page = alloc_page(PGALLOC_GFP);
+ if (!page)
+ return NULL;
+ if (!pgtable_pmd_page_ctor(page)) {
+ __free_page(page);
+ return NULL;
+ }
+ return page_address(page);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
{
BUG_ON((unsigned long)pmdp & (PAGE_SIZE-1));
+ pgtable_pmd_page_dtor(virt_to_page(pmdp));
free_page((unsigned long)pmdp);
}
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index de70c1eabf33..2c41b04708fe 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -478,6 +478,8 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
return __pmd_to_phys(pmd);
}
+static inline void pte_unmap(pte_t *pte) { }
+
/* Find an entry in the third-level page table. */
#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
@@ -485,9 +487,6 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
#define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr))))
#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
-#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
-#define pte_unmap(pte) do { } while (0)
-#define pte_unmap_nested(pte) do { } while (0)
#define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
#define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h
index 15d49515efdd..d328540cb85e 100644
--- a/arch/arm64/include/asm/pointer_auth.h
+++ b/arch/arm64/include/asm/pointer_auth.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_POINTER_AUTH_H
#define __ASM_POINTER_AUTH_H
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 5d9ce62bdebd..fcd0e691b1ea 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -57,7 +57,15 @@
#define TASK_SIZE_64 (UL(1) << vabits_user)
#ifdef CONFIG_COMPAT
+#if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS)
+/*
+ * With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied
+ * by the compat vectors page.
+ */
#define TASK_SIZE_32 UL(0x100000000)
+#else
+#define TASK_SIZE_32 (UL(0x100000000) - PAGE_SIZE)
+#endif /* CONFIG_ARM64_64K_PAGES */
#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
TASK_SIZE_32 : TASK_SIZE_64)
#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index ec60174c8c18..b2de32939ada 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -305,6 +305,28 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
return regs->regs[0];
}
+/**
+ * regs_get_kernel_argument() - get Nth function argument in kernel
+ * @regs: pt_regs of that context
+ * @n: function argument number (start from 0)
+ *
+ * regs_get_argument() returns @n th argument of the function call.
+ *
+ * Note that this chooses the most likely register mapping. In very rare
+ * cases this may not return correct data, for example, if one of the
+ * function parameters is 16 bytes or bigger. In such cases, we cannot
+ * get access the parameter correctly and the register assignment of
+ * subsequent parameters will be shifted.
+ */
+static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
+ unsigned int n)
+{
+#define NR_REG_ARGUMENTS 8
+ if (n < NR_REG_ARGUMENTS)
+ return pt_regs_read_reg(regs, n);
+ return 0;
+}
+
/* We must avoid circular header include via sched.h */
struct task_struct;
int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task);
diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h
index ffe47d766c25..63e0b92a5fbb 100644
--- a/arch/arm64/include/asm/sdei.h
+++ b/arch/arm64/include/asm/sdei.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2017 Arm Ltd.
#ifndef __ASM_SDEI_H
#define __ASM_SDEI_H
diff --git a/arch/arm64/include/asm/signal32.h b/arch/arm64/include/asm/signal32.h
index 81abea0b7650..58e288aaf0ba 100644
--- a/arch/arm64/include/asm/signal32.h
+++ b/arch/arm64/include/asm/signal32.h
@@ -20,8 +20,6 @@
#ifdef CONFIG_COMPAT
#include <linux/compat.h>
-#define AARCH32_KERN_SIGRET_CODE_OFFSET 0x500
-
int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs);
int compat_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h
index 5412fa40825e..915809e4ac32 100644
--- a/arch/arm64/include/asm/stage2_pgtable.h
+++ b/arch/arm64/include/asm/stage2_pgtable.h
@@ -119,7 +119,7 @@ static inline pud_t *stage2_pud_offset(struct kvm *kvm,
static inline void stage2_pud_free(struct kvm *kvm, pud_t *pud)
{
if (kvm_stage2_has_pud(kvm))
- pud_free(NULL, pud);
+ free_page((unsigned long)pud);
}
static inline bool stage2_pud_table_empty(struct kvm *kvm, pud_t *pudp)
@@ -192,7 +192,7 @@ static inline pmd_t *stage2_pmd_offset(struct kvm *kvm,
static inline void stage2_pmd_free(struct kvm *kvm, pmd_t *pmd)
{
if (kvm_stage2_has_pmd(kvm))
- pmd_free(NULL, pmd);
+ free_page((unsigned long)pmd);
}
static inline bool stage2_pud_huge(struct kvm *kvm, pud_t pud)
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
index a179df3674a1..a65167f5cded 100644
--- a/arch/arm64/include/asm/syscall.h
+++ b/arch/arm64/include/asm/syscall.h
@@ -87,9 +87,9 @@ static inline void syscall_set_arguments(struct task_struct *task,
* We don't care about endianness (__AUDIT_ARCH_LE bit) here because
* AArch64 has the same system calls both on little- and big- endian.
*/
-static inline int syscall_get_arch(void)
+static inline int syscall_get_arch(struct task_struct *task)
{
- if (is_compat_task())
+ if (is_compat_thread(task_thread_info(task)))
return AUDIT_ARCH_ARM;
return AUDIT_ARCH_AARCH64;
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 5b267dec6194..902d75b60914 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -454,6 +454,9 @@
#define SYS_ICH_LR14_EL2 __SYS__LR8_EL2(6)
#define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7)
+/* VHE encodings for architectural EL0/1 system registers */
+#define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0)
+
/* Common SCTLR_ELx flags. */
#define SCTLR_ELx_DSSBS (_BITUL(44))
#define SCTLR_ELx_ENIA (_BITUL(31))
@@ -606,6 +609,20 @@
#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1
#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
+/* id_aa64zfr0 */
+#define ID_AA64ZFR0_SM4_SHIFT 40
+#define ID_AA64ZFR0_SHA3_SHIFT 32
+#define ID_AA64ZFR0_BITPERM_SHIFT 16
+#define ID_AA64ZFR0_AES_SHIFT 4
+#define ID_AA64ZFR0_SVEVER_SHIFT 0
+
+#define ID_AA64ZFR0_SM4 0x1
+#define ID_AA64ZFR0_SHA3 0x1
+#define ID_AA64ZFR0_BITPERM 0x1
+#define ID_AA64ZFR0_AES 0x1
+#define ID_AA64ZFR0_AES_PMULL 0x2
+#define ID_AA64ZFR0_SVEVER_SVE2 0x1
+
/* id_aa64mmfr0 */
#define ID_AA64MMFR0_TGRAN4_SHIFT 28
#define ID_AA64MMFR0_TGRAN64_SHIFT 24
@@ -746,20 +763,39 @@
#include <linux/build_bug.h>
#include <linux/types.h>
-asm(
-" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
-" .equ .L__reg_num_x\\num, \\num\n"
-" .endr\n"
+#define __DEFINE_MRS_MSR_S_REGNUM \
+" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \
+" .equ .L__reg_num_x\\num, \\num\n" \
+" .endr\n" \
" .equ .L__reg_num_xzr, 31\n"
-"\n"
-" .macro mrs_s, rt, sreg\n"
- __emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt))
+
+#define DEFINE_MRS_S \
+ __DEFINE_MRS_MSR_S_REGNUM \
+" .macro mrs_s, rt, sreg\n" \
+ __emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt)) \
" .endm\n"
-"\n"
-" .macro msr_s, sreg, rt\n"
- __emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt))
+
+#define DEFINE_MSR_S \
+ __DEFINE_MRS_MSR_S_REGNUM \
+" .macro msr_s, sreg, rt\n" \
+ __emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt)) \
" .endm\n"
-);
+
+#define UNDEFINE_MRS_S \
+" .purgem mrs_s\n"
+
+#define UNDEFINE_MSR_S \
+" .purgem msr_s\n"
+
+#define __mrs_s(v, r) \
+ DEFINE_MRS_S \
+" mrs_s " v ", " __stringify(r) "\n" \
+ UNDEFINE_MRS_S
+
+#define __msr_s(r, v) \
+ DEFINE_MSR_S \
+" msr_s " __stringify(r) ", " v "\n" \
+ UNDEFINE_MSR_S
/*
* Unlike read_cpuid, calls to read_sysreg are never expected to be
@@ -787,13 +823,13 @@ asm(
*/
#define read_sysreg_s(r) ({ \
u64 __val; \
- asm volatile("mrs_s %0, " __stringify(r) : "=r" (__val)); \
+ asm volatile(__mrs_s("%0", r) : "=r" (__val)); \
__val; \
})
#define write_sysreg_s(v, r) do { \
u64 __val = (u64)(v); \
- asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
+ asm volatile(__msr_s(r, "%x0") : : "rZ" (__val)); \
} while (0)
/*
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index 32693f34f431..fca95424e873 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -41,7 +41,6 @@ void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
int sig, int code, const char *name);
struct mm_struct;
-extern void show_pte(unsigned long addr);
extern void __show_regs(struct pt_regs *);
extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 106fdc951b6e..a287189ca8b4 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -27,6 +27,7 @@ static inline void __tlb_remove_table(void *_table)
free_page_and_swap_cache((struct page *)_table);
}
+#define tlb_flush tlb_flush
static void tlb_flush(struct mmu_gather *tlb);
#include <asm-generic/tlb.h>
@@ -62,7 +63,10 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
unsigned long addr)
{
- tlb_remove_table(tlb, virt_to_page(pmdp));
+ struct page *page = virt_to_page(pmdp);
+
+ pgtable_pmd_page_dtor(page);
+ tlb_remove_table(tlb, page);
}
#endif
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index f2a83ff6b73c..70e6882853c0 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -44,7 +44,7 @@
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5)
#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800)
-#define __NR_compat_syscalls 428
+#define __NR_compat_syscalls 434
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 23f1a44acada..c39e90600bb3 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -874,6 +874,18 @@ __SYSCALL(__NR_io_uring_setup, sys_io_uring_setup)
__SYSCALL(__NR_io_uring_enter, sys_io_uring_enter)
#define __NR_io_uring_register 427
__SYSCALL(__NR_io_uring_register, sys_io_uring_register)
+#define __NR_open_tree 428
+__SYSCALL(__NR_open_tree, sys_open_tree)
+#define __NR_move_mount 429
+__SYSCALL(__NR_move_mount, sys_move_mount)
+#define __NR_fsopen 430
+__SYSCALL(__NR_fsopen, sys_fsopen)
+#define __NR_fsconfig 431
+__SYSCALL(__NR_fsconfig, sys_fsconfig)
+#define __NR_fsmount 432
+__SYSCALL(__NR_fsmount, sys_fsmount)
+#define __NR_fspick 433
+__SYSCALL(__NR_fspick, sys_fspick)
/*
* Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h
index 2b9a63771eda..f89263c8e11a 100644
--- a/arch/arm64/include/asm/vdso_datapage.h
+++ b/arch/arm64/include/asm/vdso_datapage.h
@@ -38,6 +38,7 @@ struct vdso_data {
__u32 tz_minuteswest; /* Whacky timezone stuff */
__u32 tz_dsttime;
__u32 use_syscall;
+ __u32 hrtimer_res;
};
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/vmap_stack.h b/arch/arm64/include/asm/vmap_stack.h
index 0b5ec6e08c10..0a12115d9638 100644
--- a/arch/arm64/include/asm/vmap_stack.h
+++ b/arch/arm64/include/asm/vmap_stack.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2017 Arm Ltd.
#ifndef __ASM_VMAP_STACK_H
#define __ASM_VMAP_STACK_H
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 5f0750c2199c..1a772b162191 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -18,7 +18,7 @@
#define _UAPI__ASM_HWCAP_H
/*
- * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP
+ * HWCAP flags - for AT_HWCAP
*/
#define HWCAP_FP (1 << 0)
#define HWCAP_ASIMD (1 << 1)
@@ -53,4 +53,15 @@
#define HWCAP_PACA (1 << 30)
#define HWCAP_PACG (1UL << 31)
+/*
+ * HWCAP2 flags - for AT_HWCAP2
+ */
+#define HWCAP2_DCPODP (1 << 0)
+#define HWCAP2_SVE2 (1 << 1)
+#define HWCAP2_SVEAES (1 << 2)
+#define HWCAP2_SVEPMULL (1 << 3)
+#define HWCAP2_SVEBITPERM (1 << 4)
+#define HWCAP2_SVESHA3 (1 << 5)
+#define HWCAP2_SVESM4 (1 << 6)
+
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 97c3478ee6e7..7b7ac0f6cec9 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -35,6 +35,7 @@
#include <linux/psci.h>
#include <linux/types.h>
#include <asm/ptrace.h>
+#include <asm/sve_context.h>
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE
@@ -102,6 +103,9 @@ struct kvm_regs {
#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
#define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */
#define KVM_ARM_VCPU_PMU_V3 3 /* Support guest PMUv3 */
+#define KVM_ARM_VCPU_SVE 4 /* enable SVE for this CPU */
+#define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */
+#define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */
struct kvm_vcpu_init {
__u32 target;
@@ -226,6 +230,45 @@ struct kvm_vcpu_events {
KVM_REG_ARM_FW | ((r) & 0xffff))
#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
+/* SVE registers */
+#define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT)
+
+/* Z- and P-regs occupy blocks at the following offsets within this range: */
+#define KVM_REG_ARM64_SVE_ZREG_BASE 0
+#define KVM_REG_ARM64_SVE_PREG_BASE 0x400
+#define KVM_REG_ARM64_SVE_FFR_BASE 0x600
+
+#define KVM_ARM64_SVE_NUM_ZREGS __SVE_NUM_ZREGS
+#define KVM_ARM64_SVE_NUM_PREGS __SVE_NUM_PREGS
+
+#define KVM_ARM64_SVE_MAX_SLICES 32
+
+#define KVM_REG_ARM64_SVE_ZREG(n, i) \
+ (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_ZREG_BASE | \
+ KVM_REG_SIZE_U2048 | \
+ (((n) & (KVM_ARM64_SVE_NUM_ZREGS - 1)) << 5) | \
+ ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1)))
+
+#define KVM_REG_ARM64_SVE_PREG(n, i) \
+ (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_PREG_BASE | \
+ KVM_REG_SIZE_U256 | \
+ (((n) & (KVM_ARM64_SVE_NUM_PREGS - 1)) << 5) | \
+ ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1)))
+
+#define KVM_REG_ARM64_SVE_FFR(i) \
+ (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_FFR_BASE | \
+ KVM_REG_SIZE_U256 | \
+ ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1)))
+
+#define KVM_ARM64_SVE_VQ_MIN __SVE_VQ_MIN
+#define KVM_ARM64_SVE_VQ_MAX __SVE_VQ_MAX
+
+/* Vector lengths pseudo-register: */
+#define KVM_REG_ARM64_SVE_VLS (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | \
+ KVM_REG_SIZE_U512 | 0xffff)
+#define KVM_ARM64_SVE_VLS_WORDS \
+ ((KVM_ARM64_SVE_VQ_MAX - KVM_ARM64_SVE_VQ_MIN) / 64 + 1)
+
/* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index cd434d0719c1..9e7dcb2c31c7 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -7,9 +7,9 @@ CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
CFLAGS_armv8_deprecated.o := -I$(src)
-CFLAGS_REMOVE_ftrace.o = -pg
-CFLAGS_REMOVE_insn.o = -pg
-CFLAGS_REMOVE_return_address.o = -pg
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_insn.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
# Object file lists.
obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
@@ -27,8 +27,9 @@ OBJCOPYFLAGS := --prefix-symbols=__efistub_
$(obj)/%.stub.o: $(obj)/%.o FORCE
$(call if_changed,objcopy)
-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
- sys_compat.o
+obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
+ sigreturn32.o sys_compat.o
+obj-$(CONFIG_KUSER_HELPERS) += kuser32.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
diff --git a/arch/arm64/kernel/acpi_numa.c b/arch/arm64/kernel/acpi_numa.c
index eac1d0cc595c..7ff800045434 100644
--- a/arch/arm64/kernel/acpi_numa.c
+++ b/arch/arm64/kernel/acpi_numa.c
@@ -45,7 +45,7 @@ static inline int get_cpu_for_acpi_id(u32 uid)
return -EINVAL;
}
-static int __init acpi_parse_gicc_pxm(struct acpi_subtable_header *header,
+static int __init acpi_parse_gicc_pxm(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_srat_gicc_affinity *pa;
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 7f40dcbdd51d..947e39896e28 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -94,7 +94,7 @@ int main(void)
DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW);
- DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
+ DEFINE(CLOCK_REALTIME_RES, offsetof(struct vdso_data, hrtimer_res));
DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE);
DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC);
@@ -125,9 +125,16 @@ int main(void)
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
+ DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
+ DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1]));
+ DEFINE(CPU_APIBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIBKEYLO_EL1]));
+ DEFINE(CPU_APDAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDAKEYLO_EL1]));
+ DEFINE(CPU_APDBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDBKEYLO_EL1]));
+ DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1]));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
+ DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt));
#endif
#ifdef CONFIG_CPU_PM
DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 9950bb0cbd52..e88d4e7bdfc7 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -19,6 +19,7 @@
#include <linux/arm-smccc.h>
#include <linux/psci.h>
#include <linux/types.h>
+#include <linux/cpu.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpufeature.h>
@@ -109,7 +110,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
@@ -131,9 +131,9 @@ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
__flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
}
-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
- const char *hyp_vecs_start,
- const char *hyp_vecs_end)
+static void install_bp_hardening_cb(bp_hardening_cb_t fn,
+ const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
{
static DEFINE_RAW_SPINLOCK(bp_lock);
int cpu, slot = -1;
@@ -169,7 +169,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
#define __smccc_workaround_1_smc_start NULL
#define __smccc_workaround_1_smc_end NULL
-static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+static void install_bp_hardening_cb(bp_hardening_cb_t fn,
const char *hyp_vecs_start,
const char *hyp_vecs_end)
{
@@ -177,23 +177,6 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
}
#endif /* CONFIG_KVM_INDIRECT_VECTORS */
-static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
- bp_hardening_cb_t fn,
- const char *hyp_vecs_start,
- const char *hyp_vecs_end)
-{
- u64 pfr0;
-
- if (!entry->matches(entry, SCOPE_LOCAL_CPU))
- return;
-
- pfr0 = read_cpuid(ID_AA64PFR0_EL1);
- if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
- return;
-
- __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
-}
-
#include <uapi/linux/psci.h>
#include <linux/arm-smccc.h>
#include <linux/psci.h>
@@ -220,60 +203,83 @@ static void qcom_link_stack_sanitization(void)
: "=&r" (tmp));
}
-static void
-enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
+static bool __nospectre_v2;
+static int __init parse_nospectre_v2(char *str)
+{
+ __nospectre_v2 = true;
+ return 0;
+}
+early_param("nospectre_v2", parse_nospectre_v2);
+
+/*
+ * -1: No workaround
+ * 0: No workaround required
+ * 1: Workaround installed
+ */
+static int detect_harden_bp_fw(void)
{
bp_hardening_cb_t cb;
void *smccc_start, *smccc_end;
struct arm_smccc_res res;
u32 midr = read_cpuid_id();
- if (!entry->matches(entry, SCOPE_LOCAL_CPU))
- return;
-
if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
- return;
+ return -1;
switch (psci_ops.conduit) {
case PSCI_CONDUIT_HVC:
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
- if ((int)res.a0 < 0)
- return;
- cb = call_hvc_arch_workaround_1;
- /* This is a guest, no need to patch KVM vectors */
- smccc_start = NULL;
- smccc_end = NULL;
+ switch ((int)res.a0) {
+ case 1:
+ /* Firmware says we're just fine */
+ return 0;
+ case 0:
+ cb = call_hvc_arch_workaround_1;
+ /* This is a guest, no need to patch KVM vectors */
+ smccc_start = NULL;
+ smccc_end = NULL;
+ break;
+ default:
+ return -1;
+ }
break;
case PSCI_CONDUIT_SMC:
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
- if ((int)res.a0 < 0)
- return;
- cb = call_smc_arch_workaround_1;
- smccc_start = __smccc_workaround_1_smc_start;
- smccc_end = __smccc_workaround_1_smc_end;
+ switch ((int)res.a0) {
+ case 1:
+ /* Firmware says we're just fine */
+ return 0;
+ case 0:
+ cb = call_smc_arch_workaround_1;
+ smccc_start = __smccc_workaround_1_smc_start;
+ smccc_end = __smccc_workaround_1_smc_end;
+ break;
+ default:
+ return -1;
+ }
break;
default:
- return;
+ return -1;
}
if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
cb = qcom_link_stack_sanitization;
- install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
+ if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
+ install_bp_hardening_cb(cb, smccc_start, smccc_end);
- return;
+ return 1;
}
-#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
-#ifdef CONFIG_ARM64_SSBD
DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
+static bool __ssb_safe = true;
static const struct ssbd_options {
const char *str;
@@ -343,6 +349,11 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
void arm64_set_ssbd_mitigation(bool state)
{
+ if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
+ pr_info_once("SSBD disabled by kernel configuration\n");
+ return;
+ }
+
if (this_cpu_has_cap(ARM64_SSBS)) {
if (state)
asm volatile(SET_PSTATE_SSBS(0));
@@ -372,16 +383,28 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
struct arm_smccc_res res;
bool required = true;
s32 val;
+ bool this_cpu_safe = false;
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+ if (cpu_mitigations_off())
+ ssbd_state = ARM64_SSBD_FORCE_DISABLE;
+
+ /* delay setting __ssb_safe until we get a firmware response */
+ if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
+ this_cpu_safe = true;
+
if (this_cpu_has_cap(ARM64_SSBS)) {
+ if (!this_cpu_safe)
+ __ssb_safe = false;
required = false;
goto out_printmsg;
}
if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
ssbd_state = ARM64_SSBD_UNKNOWN;
+ if (!this_cpu_safe)
+ __ssb_safe = false;
return false;
}
@@ -398,6 +421,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
default:
ssbd_state = ARM64_SSBD_UNKNOWN;
+ if (!this_cpu_safe)
+ __ssb_safe = false;
return false;
}
@@ -406,14 +431,18 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
switch (val) {
case SMCCC_RET_NOT_SUPPORTED:
ssbd_state = ARM64_SSBD_UNKNOWN;
+ if (!this_cpu_safe)
+ __ssb_safe = false;
return false;
+ /* machines with mixed mitigation requirements must not return this */
case SMCCC_RET_NOT_REQUIRED:
pr_info_once("%s mitigation not required\n", entry->desc);
ssbd_state = ARM64_SSBD_MITIGATED;
return false;
case SMCCC_RET_SUCCESS:
+ __ssb_safe = false;
required = true;
break;
@@ -423,6 +452,8 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
default:
WARN_ON(1);
+ if (!this_cpu_safe)
+ __ssb_safe = false;
return false;
}
@@ -462,7 +493,14 @@ out_printmsg:
return required;
}
-#endif /* CONFIG_ARM64_SSBD */
+
+/* known invulnerable cores */
+static const struct midr_range arm64_ssb_cpus[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ {},
+};
static void __maybe_unused
cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
@@ -507,26 +545,67 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
CAP_MIDR_RANGE_LIST(midr_list)
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+/* Track overall mitigation state. We are only mitigated if all cores are ok */
+static bool __hardenbp_enab = true;
+static bool __spectrev2_safe = true;
/*
- * List of CPUs where we need to issue a psci call to
- * harden the branch predictor.
+ * List of CPUs that do not need any Spectre-v2 mitigation at all.
*/
-static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
- MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
- MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
- MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
- MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
- MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
- MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
- {},
+static const struct midr_range spectre_v2_safe_list[] = {
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ { /* sentinel */ }
};
-#endif
+/*
+ * Track overall bp hardening for all heterogeneous cores in the machine.
+ * We are only considered "safe" if all booted cores are known safe.
+ */
+static bool __maybe_unused
+check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ int need_wa;
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+ /* If the CPU has CSV2 set, we're safe */
+ if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
+ ID_AA64PFR0_CSV2_SHIFT))
+ return false;
+
+ /* Alternatively, we have a list of unaffected CPUs */
+ if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
+ return false;
+
+ /* Fallback to firmware detection */
+ need_wa = detect_harden_bp_fw();
+ if (!need_wa)
+ return false;
+
+ __spectrev2_safe = false;
+
+ if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
+ pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
+ __hardenbp_enab = false;
+ return false;
+ }
+
+ /* forced off */
+ if (__nospectre_v2 || cpu_mitigations_off()) {
+ pr_info_once("spectrev2 mitigation disabled by command line option\n");
+ __hardenbp_enab = false;
+ return false;
+ }
+
+ if (need_wa < 0) {
+ pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
+ __hardenbp_enab = false;
+ }
+
+ return (need_wa > 0);
+}
#ifdef CONFIG_HARDEN_EL2_VECTORS
@@ -603,6 +682,16 @@ static const struct midr_range workaround_clean_cache[] = {
};
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1188873
+static const struct midr_range erratum_1188873_list[] = {
+ /* Cortex-A76 r0p0 to r2p0 */
+ MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
+ /* Neoverse-N1 r0p0 to r2p0 */
+ MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 2, 0),
+ {},
+};
+#endif
+
const struct arm64_cpu_capabilities arm64_errata[] = {
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
{
@@ -701,13 +790,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
},
#endif
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
- .cpu_enable = enable_smccc_arch_workaround_1,
- ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = check_branch_predictor,
},
-#endif
#ifdef CONFIG_HARDEN_EL2_VECTORS
{
.desc = "EL2 vector hardening",
@@ -715,20 +802,18 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
},
#endif
-#ifdef CONFIG_ARM64_SSBD
{
.desc = "Speculative Store Bypass Disable",
.capability = ARM64_SSBD,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = has_ssbd_mitigation,
+ .midr_range_list = arm64_ssb_cpus,
},
-#endif
#ifdef CONFIG_ARM64_ERRATUM_1188873
{
- /* Cortex-A76 r0p0 to r2p0 */
.desc = "ARM erratum 1188873",
.capability = ARM64_WORKAROUND_1188873,
- ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
+ ERRATA_MIDR_RANGE_LIST(erratum_1188873_list),
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_1165522
@@ -742,3 +827,38 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
}
};
+
+ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ if (__spectrev2_safe)
+ return sprintf(buf, "Not affected\n");
+
+ if (__hardenbp_enab)
+ return sprintf(buf, "Mitigation: Branch predictor hardening\n");
+
+ return sprintf(buf, "Vulnerable\n");
+}
+
+ssize_t cpu_show_spec_store_bypass(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (__ssb_safe)
+ return sprintf(buf, "Not affected\n");
+
+ switch (ssbd_state) {
+ case ARM64_SSBD_KERNEL:
+ case ARM64_SSBD_FORCE_ENABLE:
+ if (IS_ENABLED(CONFIG_ARM64_SSBD))
+ return sprintf(buf,
+ "Mitigation: Speculative Store Bypass disabled via prctl\n");
+ }
+
+ return sprintf(buf, "Vulnerable\n");
+}
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index ea001241bdd4..00f8b8612b69 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -85,6 +85,7 @@ static const char *__init cpu_read_enable_method(int cpu)
pr_err("%pOF: missing enable-method property\n",
dn);
}
+ of_node_put(dn);
} else {
enable_method = acpi_get_enable_method(cpu);
if (!enable_method) {
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 4061de10cea6..ca27e08e3d8a 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -25,6 +25,7 @@
#include <linux/stop_machine.h>
#include <linux/types.h>
#include <linux/mm.h>
+#include <linux/cpu.h>
#include <asm/cpu.h>
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
@@ -35,8 +36,8 @@
#include <asm/traps.h>
#include <asm/virt.h>
-unsigned long elf_hwcap __read_mostly;
-EXPORT_SYMBOL_GPL(elf_hwcap);
+/* Kernel representation of AT_HWCAP and AT_HWCAP2 */
+static unsigned long elf_hwcap __read_mostly;
#ifdef CONFIG_COMPAT
#define COMPAT_ELF_HWCAP_DEFAULT \
@@ -184,6 +185,15 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
ARM64_FTR_END,
};
+static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
@@ -392,7 +402,7 @@ static const struct __ftr_reg_entry {
/* Op1 = 0, CRn = 0, CRm = 4 */
ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
- ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
+ ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0),
/* Op1 = 0, CRn = 0, CRm = 5 */
ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
@@ -947,7 +957,7 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
return has_cpuid_feature(entry, scope);
}
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static bool __meltdown_safe = true;
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
@@ -966,7 +976,17 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
{ /* sentinel */ }
};
- char const *str = "command line option";
+ char const *str = "kpti command line option";
+ bool meltdown_safe;
+
+ meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
+
+ /* Defer to CPU feature registers */
+ if (has_cpuid_feature(entry, scope))
+ meltdown_safe = true;
+
+ if (!meltdown_safe)
+ __meltdown_safe = false;
/*
* For reasons that aren't entirely clear, enabling KPTI on Cavium
@@ -978,6 +998,24 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
__kpti_forced = -1;
}
+ /* Useful for KASLR robustness */
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
+ if (!__kpti_forced) {
+ str = "KASLR";
+ __kpti_forced = 1;
+ }
+ }
+
+ if (cpu_mitigations_off() && !__kpti_forced) {
+ str = "mitigations=off";
+ __kpti_forced = -1;
+ }
+
+ if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
+ pr_info_once("kernel page table isolation disabled by kernel configuration\n");
+ return false;
+ }
+
/* Forced? */
if (__kpti_forced) {
pr_info_once("kernel page table isolation forced %s by %s\n",
@@ -985,18 +1023,10 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
return __kpti_forced > 0;
}
- /* Useful for KASLR robustness */
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
- return kaslr_offset() > 0;
-
- /* Don't force KPTI for CPUs that are not vulnerable */
- if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
- return false;
-
- /* Defer to CPU feature registers */
- return !has_cpuid_feature(entry, scope);
+ return !meltdown_safe;
}
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static void
kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
{
@@ -1026,6 +1056,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
return;
}
+#else
+static void
+kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
+{
+}
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
static int __init parse_kpti(char *str)
{
@@ -1039,7 +1075,6 @@ static int __init parse_kpti(char *str)
return 0;
}
early_param("kpti", parse_kpti);
-#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
#ifdef CONFIG_ARM64_HW_AFDBM
static inline void __cpu_enable_hw_dbm(void)
@@ -1306,7 +1341,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.field_pos = ID_AA64PFR0_EL0_SHIFT,
.min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
},
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
{
.desc = "Kernel page table isolation (KPTI)",
.capability = ARM64_UNMAP_KERNEL_AT_EL0,
@@ -1322,7 +1356,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = unmap_kernel_at_el0,
.cpu_enable = kpti_install_ng_mappings,
},
-#endif
{
/* FP/SIMD is not implemented */
.capability = ARM64_HAS_NO_FPSIMD,
@@ -1340,6 +1373,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.field_pos = ID_AA64ISAR1_DPB_SHIFT,
.min_field_value = 1,
},
+ {
+ .desc = "Data cache clean to Point of Deep Persistence",
+ .capability = ARM64_HAS_DCPODP,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64ISAR1_DPB_SHIFT,
+ .min_field_value = 2,
+ },
#endif
#ifdef CONFIG_ARM64_SVE
{
@@ -1571,39 +1614,46 @@ static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
#endif
static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FLAGM),
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_DIT),
- HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP),
- HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
- HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
- HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
- HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC),
- HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SB),
- HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_PMULL),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AES),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA1),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM4),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FCMA),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_LRCPC),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SB),
+ HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT),
#ifdef CONFIG_ARM64_SVE
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SVEVER_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SVEVER_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES_PMULL, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BITPERM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BITPERM, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SHA3_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SHA3, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
+ HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SM4_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SM4, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
#endif
- HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS),
+ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS),
#ifdef CONFIG_ARM64_PTR_AUTH
- HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, HWCAP_PACA),
- HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, HWCAP_PACG),
+ HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA),
+ HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG),
#endif
{},
};
@@ -1623,7 +1673,7 @@ static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
{
switch (cap->hwcap_type) {
case CAP_HWCAP:
- elf_hwcap |= cap->hwcap;
+ cpu_set_feature(cap->hwcap);
break;
#ifdef CONFIG_COMPAT
case CAP_COMPAT_HWCAP:
@@ -1646,7 +1696,7 @@ static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
switch (cap->hwcap_type) {
case CAP_HWCAP:
- rc = (elf_hwcap & cap->hwcap) != 0;
+ rc = cpu_have_feature(cap->hwcap);
break;
#ifdef CONFIG_COMPAT
case CAP_COMPAT_HWCAP:
@@ -1667,7 +1717,7 @@ static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
{
/* We support emulation of accesses to CPU ID feature registers */
- elf_hwcap |= HWCAP_CPUID;
+ cpu_set_named_feature(CPUID);
for (; hwcaps->matches; hwcaps++)
if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
cap_set_elf_hwcap(hwcaps);
@@ -1863,7 +1913,7 @@ static void verify_sve_features(void)
unsigned int len = zcr & ZCR_ELx_LEN_MASK;
if (len < safe_len || sve_verify_vq_map()) {
- pr_crit("CPU%d: SVE: required vector length(s) missing\n",
+ pr_crit("CPU%d: SVE: vector length support mismatch\n",
smp_processor_id());
cpu_die_early();
}
@@ -1947,6 +1997,35 @@ bool this_cpu_has_cap(unsigned int n)
return false;
}
+void cpu_set_feature(unsigned int num)
+{
+ WARN_ON(num >= MAX_CPU_FEATURES);
+ elf_hwcap |= BIT(num);
+}
+EXPORT_SYMBOL_GPL(cpu_set_feature);
+
+bool cpu_have_feature(unsigned int num)
+{
+ WARN_ON(num >= MAX_CPU_FEATURES);
+ return elf_hwcap & BIT(num);
+}
+EXPORT_SYMBOL_GPL(cpu_have_feature);
+
+unsigned long cpu_get_elf_hwcap(void)
+{
+ /*
+ * We currently only populate the first 32 bits of AT_HWCAP. Please
+ * note that for userspace compatibility we guarantee that bits 62
+ * and 63 will always be returned as 0.
+ */
+ return lower_32_bits(elf_hwcap);
+}
+
+unsigned long cpu_get_elf_hwcap2(void)
+{
+ return upper_32_bits(elf_hwcap);
+}
+
static void __init setup_system_capabilities(void)
{
/*
@@ -2101,3 +2180,15 @@ static int __init enable_mrs_emulation(void)
}
core_initcall(enable_mrs_emulation);
+
+ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ if (__meltdown_safe)
+ return sprintf(buf, "Not affected\n");
+
+ if (arm64_kernel_unmapped_at_el0())
+ return sprintf(buf, "Mitigation: PTI\n");
+
+ return sprintf(buf, "Vulnerable\n");
+}
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index ca0685f33900..f6f7936be6e7 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -85,6 +85,13 @@ static const char *const hwcap_str[] = {
"sb",
"paca",
"pacg",
+ "dcpodp",
+ "sve2",
+ "sveaes",
+ "svepmull",
+ "svebitperm",
+ "svesha3",
+ "svesm4",
NULL
};
@@ -167,7 +174,7 @@ static int c_show(struct seq_file *m, void *v)
#endif /* CONFIG_COMPAT */
} else {
for (j = 0; hwcap_str[j]; j++)
- if (elf_hwcap & (1 << j))
+ if (cpu_have_feature(j))
seq_printf(m, " %s", hwcap_str[j]);
}
seq_puts(m, "\n");
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index d7bb6aefae0a..555b6bd2f3d6 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -135,6 +135,7 @@ NOKPROBE_SYMBOL(disable_debug_monitors);
*/
static int clear_os_lock(unsigned int cpu)
{
+ write_sysreg(0, osdlr_el1);
write_sysreg(0, oslar_el1);
isb();
return 0;
@@ -163,25 +164,46 @@ static void clear_regs_spsr_ss(struct pt_regs *regs)
}
NOKPROBE_SYMBOL(clear_regs_spsr_ss);
-/* EL1 Single Step Handler hooks */
-static LIST_HEAD(step_hook);
-static DEFINE_SPINLOCK(step_hook_lock);
+static DEFINE_SPINLOCK(debug_hook_lock);
+static LIST_HEAD(user_step_hook);
+static LIST_HEAD(kernel_step_hook);
-void register_step_hook(struct step_hook *hook)
+static void register_debug_hook(struct list_head *node, struct list_head *list)
{
- spin_lock(&step_hook_lock);
- list_add_rcu(&hook->node, &step_hook);
- spin_unlock(&step_hook_lock);
+ spin_lock(&debug_hook_lock);
+ list_add_rcu(node, list);
+ spin_unlock(&debug_hook_lock);
+
}
-void unregister_step_hook(struct step_hook *hook)
+static void unregister_debug_hook(struct list_head *node)
{
- spin_lock(&step_hook_lock);
- list_del_rcu(&hook->node);
- spin_unlock(&step_hook_lock);
+ spin_lock(&debug_hook_lock);
+ list_del_rcu(node);
+ spin_unlock(&debug_hook_lock);
synchronize_rcu();
}
+void register_user_step_hook(struct step_hook *hook)
+{
+ register_debug_hook(&hook->node, &user_step_hook);
+}
+
+void unregister_user_step_hook(struct step_hook *hook)
+{
+ unregister_debug_hook(&hook->node);
+}
+
+void register_kernel_step_hook(struct step_hook *hook)
+{
+ register_debug_hook(&hook->node, &kernel_step_hook);
+}
+
+void unregister_kernel_step_hook(struct step_hook *hook)
+{
+ unregister_debug_hook(&hook->node);
+}
+
/*
* Call registered single step handlers
* There is no Syndrome info to check for determining the handler.
@@ -191,11 +213,14 @@ void unregister_step_hook(struct step_hook *hook)
static int call_step_hook(struct pt_regs *regs, unsigned int esr)
{
struct step_hook *hook;
+ struct list_head *list;
int retval = DBG_HOOK_ERROR;
+ list = user_mode(regs) ? &user_step_hook : &kernel_step_hook;
+
rcu_read_lock();
- list_for_each_entry_rcu(hook, &step_hook, node) {
+ list_for_each_entry_rcu(hook, list, node) {
retval = hook->fn(regs, esr);
if (retval == DBG_HOOK_HANDLED)
break;
@@ -222,7 +247,7 @@ static void send_user_sigtrap(int si_code)
"User debug trap");
}
-static int single_step_handler(unsigned long addr, unsigned int esr,
+static int single_step_handler(unsigned long unused, unsigned int esr,
struct pt_regs *regs)
{
bool handler_found = false;
@@ -234,10 +259,6 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
if (!reinstall_suspended_bps(regs))
return 0;
-#ifdef CONFIG_KPROBES
- if (kprobe_single_step_handler(regs, esr) == DBG_HOOK_HANDLED)
- handler_found = true;
-#endif
if (!handler_found && call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
handler_found = true;
@@ -264,61 +285,59 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
}
NOKPROBE_SYMBOL(single_step_handler);
-/*
- * Breakpoint handler is re-entrant as another breakpoint can
- * hit within breakpoint handler, especically in kprobes.
- * Use reader/writer locks instead of plain spinlock.
- */
-static LIST_HEAD(break_hook);
-static DEFINE_SPINLOCK(break_hook_lock);
+static LIST_HEAD(user_break_hook);
+static LIST_HEAD(kernel_break_hook);
-void register_break_hook(struct break_hook *hook)
+void register_user_break_hook(struct break_hook *hook)
{
- spin_lock(&break_hook_lock);
- list_add_rcu(&hook->node, &break_hook);
- spin_unlock(&break_hook_lock);
+ register_debug_hook(&hook->node, &user_break_hook);
}
-void unregister_break_hook(struct break_hook *hook)
+void unregister_user_break_hook(struct break_hook *hook)
{
- spin_lock(&break_hook_lock);
- list_del_rcu(&hook->node);
- spin_unlock(&break_hook_lock);
- synchronize_rcu();
+ unregister_debug_hook(&hook->node);
+}
+
+void register_kernel_break_hook(struct break_hook *hook)
+{
+ register_debug_hook(&hook->node, &kernel_break_hook);
+}
+
+void unregister_kernel_break_hook(struct break_hook *hook)
+{
+ unregister_debug_hook(&hook->node);
}
static int call_break_hook(struct pt_regs *regs, unsigned int esr)
{
struct break_hook *hook;
+ struct list_head *list;
int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
+ list = user_mode(regs) ? &user_break_hook : &kernel_break_hook;
+
rcu_read_lock();
- list_for_each_entry_rcu(hook, &break_hook, node)
- if ((esr & hook->esr_mask) == hook->esr_val)
+ list_for_each_entry_rcu(hook, list, node) {
+ unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
+
+ if ((comment & ~hook->mask) == hook->imm)
fn = hook->fn;
+ }
rcu_read_unlock();
return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
}
NOKPROBE_SYMBOL(call_break_hook);
-static int brk_handler(unsigned long addr, unsigned int esr,
+static int brk_handler(unsigned long unused, unsigned int esr,
struct pt_regs *regs)
{
- bool handler_found = false;
-
-#ifdef CONFIG_KPROBES
- if ((esr & BRK64_ESR_MASK) == BRK64_ESR_KPROBES) {
- if (kprobe_breakpoint_handler(regs, esr) == DBG_HOOK_HANDLED)
- handler_found = true;
- }
-#endif
- if (!handler_found && call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
- handler_found = true;
+ if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
+ return 0;
- if (!handler_found && user_mode(regs)) {
+ if (user_mode(regs)) {
send_user_sigtrap(TRAP_BRKPT);
- } else if (!handler_found) {
+ } else {
pr_warn("Unexpected kernel BRK exception at EL1\n");
return -EFAULT;
}
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index c50a7a75f2e0..1a7811b7e3c4 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -336,6 +336,21 @@ alternative_if ARM64_WORKAROUND_845719
alternative_else_nop_endif
#endif
3:
+#ifdef CONFIG_ARM64_ERRATUM_1188873
+alternative_if_not ARM64_WORKAROUND_1188873
+ b 4f
+alternative_else_nop_endif
+ /*
+ * if (x22.mode32 == cntkctl_el1.el0vcten)
+ * cntkctl_el1.el0vcten = ~cntkctl_el1.el0vcten
+ */
+ mrs x1, cntkctl_el1
+ eon x0, x1, x22, lsr #3
+ tbz x0, #1, 4f
+ eor x1, x1, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
+ msr cntkctl_el1, x1
+4:
+#endif
apply_ssbd 0, x0, x1
.endif
@@ -362,11 +377,11 @@ alternative_else_nop_endif
.if \el == 0
alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
- bne 4f
+ bne 5f
msr far_el1, x30
tramp_alias x30, tramp_exit_native
br x30
-4:
+5:
tramp_alias x30, tramp_exit_compat
br x30
#endif
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 5ebe73b69961..a38bf74bcca8 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -18,6 +18,7 @@
*/
#include <linux/bitmap.h>
+#include <linux/bitops.h>
#include <linux/bottom_half.h>
#include <linux/bug.h>
#include <linux/cache.h>
@@ -48,6 +49,7 @@
#include <asm/sigcontext.h>
#include <asm/sysreg.h>
#include <asm/traps.h>
+#include <asm/virt.h>
#define FPEXC_IOF (1 << 0)
#define FPEXC_DZF (1 << 1)
@@ -119,6 +121,8 @@
*/
struct fpsimd_last_state_struct {
struct user_fpsimd_state *st;
+ void *sve_state;
+ unsigned int sve_vl;
};
static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
@@ -130,14 +134,23 @@ static int sve_default_vl = -1;
/* Maximum supported vector length across all CPUs (initially poisoned) */
int __ro_after_init sve_max_vl = SVE_VL_MIN;
-/* Set of available vector lengths, as vq_to_bit(vq): */
-static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
+int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN;
+
+/*
+ * Set of available vector lengths,
+ * where length vq encoded as bit __vq_to_bit(vq):
+ */
+__ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
+/* Set of vector lengths present on at least one cpu: */
+static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
+
static void __percpu *efi_sve_state;
#else /* ! CONFIG_ARM64_SVE */
/* Dummy declaration for code that will be optimised out: */
extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
+extern __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
extern void __percpu *efi_sve_state;
#endif /* ! CONFIG_ARM64_SVE */
@@ -235,14 +248,15 @@ static void task_fpsimd_load(void)
*/
void fpsimd_save(void)
{
- struct user_fpsimd_state *st = __this_cpu_read(fpsimd_last_state.st);
+ struct fpsimd_last_state_struct const *last =
+ this_cpu_ptr(&fpsimd_last_state);
/* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
WARN_ON(!in_softirq() && !irqs_disabled());
if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
- if (WARN_ON(sve_get_vl() != current->thread.sve_vl)) {
+ if (WARN_ON(sve_get_vl() != last->sve_vl)) {
/*
* Can't save the user regs, so current would
* re-enter user with corrupt state.
@@ -252,32 +266,15 @@ void fpsimd_save(void)
return;
}
- sve_save_state(sve_pffr(&current->thread), &st->fpsr);
+ sve_save_state((char *)last->sve_state +
+ sve_ffr_offset(last->sve_vl),
+ &last->st->fpsr);
} else
- fpsimd_save_state(st);
+ fpsimd_save_state(last->st);
}
}
/*
- * Helpers to translate bit indices in sve_vq_map to VQ values (and
- * vice versa). This allows find_next_bit() to be used to find the
- * _maximum_ VQ not exceeding a certain value.
- */
-
-static unsigned int vq_to_bit(unsigned int vq)
-{
- return SVE_VQ_MAX - vq;
-}
-
-static unsigned int bit_to_vq(unsigned int bit)
-{
- if (WARN_ON(bit >= SVE_VQ_MAX))
- bit = SVE_VQ_MAX - 1;
-
- return SVE_VQ_MAX - bit;
-}
-
-/*
* All vector length selection from userspace comes through here.
* We're on a slow path, so some sanity-checks are included.
* If things go wrong there's a bug somewhere, but try to fall back to a
@@ -298,8 +295,8 @@ static unsigned int find_supported_vector_length(unsigned int vl)
vl = max_vl;
bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
- vq_to_bit(sve_vq_from_vl(vl)));
- return sve_vl_from_vq(bit_to_vq(bit));
+ __vq_to_bit(sve_vq_from_vl(vl)));
+ return sve_vl_from_vq(__bit_to_vq(bit));
}
#ifdef CONFIG_SYSCTL
@@ -550,7 +547,6 @@ int sve_set_vector_length(struct task_struct *task,
local_bh_disable();
fpsimd_save();
- set_thread_flag(TIF_FOREIGN_FPSTATE);
}
fpsimd_flush_task_state(task);
@@ -624,12 +620,6 @@ int sve_get_current_vl(void)
return sve_prctl_status(0);
}
-/*
- * Bitmap for temporary storage of the per-CPU set of supported vector lengths
- * during secondary boot.
- */
-static DECLARE_BITMAP(sve_secondary_vq_map, SVE_VQ_MAX);
-
static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
{
unsigned int vq, vl;
@@ -644,40 +634,82 @@ static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */
vl = sve_get_vl();
vq = sve_vq_from_vl(vl); /* skip intervening lengths */
- set_bit(vq_to_bit(vq), map);
+ set_bit(__vq_to_bit(vq), map);
}
}
+/*
+ * Initialise the set of known supported VQs for the boot CPU.
+ * This is called during kernel boot, before secondary CPUs are brought up.
+ */
void __init sve_init_vq_map(void)
{
sve_probe_vqs(sve_vq_map);
+ bitmap_copy(sve_vq_partial_map, sve_vq_map, SVE_VQ_MAX);
}
/*
* If we haven't committed to the set of supported VQs yet, filter out
* those not supported by the current CPU.
+ * This function is called during the bring-up of early secondary CPUs only.
*/
void sve_update_vq_map(void)
{
- sve_probe_vqs(sve_secondary_vq_map);
- bitmap_and(sve_vq_map, sve_vq_map, sve_secondary_vq_map, SVE_VQ_MAX);
+ DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
+
+ sve_probe_vqs(tmp_map);
+ bitmap_and(sve_vq_map, sve_vq_map, tmp_map, SVE_VQ_MAX);
+ bitmap_or(sve_vq_partial_map, sve_vq_partial_map, tmp_map, SVE_VQ_MAX);
}
-/* Check whether the current CPU supports all VQs in the committed set */
+/*
+ * Check whether the current CPU supports all VQs in the committed set.
+ * This function is called during the bring-up of late secondary CPUs only.
+ */
int sve_verify_vq_map(void)
{
- int ret = 0;
+ DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
+ unsigned long b;
- sve_probe_vqs(sve_secondary_vq_map);
- bitmap_andnot(sve_secondary_vq_map, sve_vq_map, sve_secondary_vq_map,
- SVE_VQ_MAX);
- if (!bitmap_empty(sve_secondary_vq_map, SVE_VQ_MAX)) {
+ sve_probe_vqs(tmp_map);
+
+ bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
+ if (bitmap_intersects(tmp_map, sve_vq_map, SVE_VQ_MAX)) {
pr_warn("SVE: cpu%d: Required vector length(s) missing\n",
smp_processor_id());
- ret = -EINVAL;
+ return -EINVAL;
}
- return ret;
+ if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
+ return 0;
+
+ /*
+ * For KVM, it is necessary to ensure that this CPU doesn't
+ * support any vector length that guests may have probed as
+ * unsupported.
+ */
+
+ /* Recover the set of supported VQs: */
+ bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
+ /* Find VQs supported that are not globally supported: */
+ bitmap_andnot(tmp_map, tmp_map, sve_vq_map, SVE_VQ_MAX);
+
+ /* Find the lowest such VQ, if any: */
+ b = find_last_bit(tmp_map, SVE_VQ_MAX);
+ if (b >= SVE_VQ_MAX)
+ return 0; /* no mismatches */
+
+ /*
+ * Mismatches above sve_max_virtualisable_vl are fine, since
+ * no guest is allowed to configure ZCR_EL2.LEN to exceed this:
+ */
+ if (sve_vl_from_vq(__bit_to_vq(b)) <= sve_max_virtualisable_vl) {
+ pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n",
+ smp_processor_id());
+ return -EINVAL;
+ }
+
+ return 0;
}
static void __init sve_efi_setup(void)
@@ -744,6 +776,8 @@ u64 read_zcr_features(void)
void __init sve_setup(void)
{
u64 zcr;
+ DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
+ unsigned long b;
if (!system_supports_sve())
return;
@@ -753,8 +787,8 @@ void __init sve_setup(void)
* so sve_vq_map must have at least SVE_VQ_MIN set.
* If something went wrong, at least try to patch it up:
*/
- if (WARN_ON(!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
- set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map);
+ if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
+ set_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map);
zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1);
@@ -772,11 +806,31 @@ void __init sve_setup(void)
*/
sve_default_vl = find_supported_vector_length(64);
+ bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map,
+ SVE_VQ_MAX);
+
+ b = find_last_bit(tmp_map, SVE_VQ_MAX);
+ if (b >= SVE_VQ_MAX)
+ /* No non-virtualisable VLs found */
+ sve_max_virtualisable_vl = SVE_VQ_MAX;
+ else if (WARN_ON(b == SVE_VQ_MAX - 1))
+ /* No virtualisable VLs? This is architecturally forbidden. */
+ sve_max_virtualisable_vl = SVE_VQ_MIN;
+ else /* b + 1 < SVE_VQ_MAX */
+ sve_max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1));
+
+ if (sve_max_virtualisable_vl > sve_max_vl)
+ sve_max_virtualisable_vl = sve_max_vl;
+
pr_info("SVE: maximum available vector length %u bytes per vector\n",
sve_max_vl);
pr_info("SVE: default vector length %u bytes per vector\n",
sve_default_vl);
+ /* KVM decides whether to support mismatched systems. Just warn here: */
+ if (sve_max_virtualisable_vl < sve_max_vl)
+ pr_warn("SVE: unvirtualisable vector lengths present\n");
+
sve_efi_setup();
}
@@ -816,12 +870,11 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs)
local_bh_disable();
fpsimd_save();
- fpsimd_to_sve(current);
/* Force ret_to_user to reload the registers: */
fpsimd_flush_task_state(current);
- set_thread_flag(TIF_FOREIGN_FPSTATE);
+ fpsimd_to_sve(current);
if (test_and_set_thread_flag(TIF_SVE))
WARN_ON(1); /* SVE access shouldn't have trapped */
@@ -894,9 +947,9 @@ void fpsimd_flush_thread(void)
local_bh_disable();
+ fpsimd_flush_task_state(current);
memset(&current->thread.uw.fpsimd_state, 0,
sizeof(current->thread.uw.fpsimd_state));
- fpsimd_flush_task_state(current);
if (system_supports_sve()) {
clear_thread_flag(TIF_SVE);
@@ -933,8 +986,6 @@ void fpsimd_flush_thread(void)
current->thread.sve_vl_onexec = 0;
}
- set_thread_flag(TIF_FOREIGN_FPSTATE);
-
local_bh_enable();
}
@@ -974,6 +1025,8 @@ void fpsimd_bind_task_to_cpu(void)
this_cpu_ptr(&fpsimd_last_state);
last->st = &current->thread.uw.fpsimd_state;
+ last->sve_state = current->thread.sve_state;
+ last->sve_vl = current->thread.sve_vl;
current->thread.fpsimd_cpu = smp_processor_id();
if (system_supports_sve()) {
@@ -987,7 +1040,8 @@ void fpsimd_bind_task_to_cpu(void)
}
}
-void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st)
+void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
+ unsigned int sve_vl)
{
struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state);
@@ -995,6 +1049,8 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st)
WARN_ON(!in_softirq() && !irqs_disabled());
last->st = st;
+ last->sve_state = sve_state;
+ last->sve_vl = sve_vl;
}
/*
@@ -1043,12 +1099,29 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
/*
* Invalidate live CPU copies of task t's FPSIMD state
+ *
+ * This function may be called with preemption enabled. The barrier()
+ * ensures that the assignment to fpsimd_cpu is visible to any
+ * preemption/softirq that could race with set_tsk_thread_flag(), so
+ * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared.
+ *
+ * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any
+ * subsequent code.
*/
void fpsimd_flush_task_state(struct task_struct *t)
{
t->thread.fpsimd_cpu = NR_CPUS;
+
+ barrier();
+ set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
+
+ barrier();
}
+/*
+ * Invalidate any task's FPSIMD state that is present on this cpu.
+ * This function must be called with softirqs disabled.
+ */
void fpsimd_flush_cpu_state(void)
{
__this_cpu_write(fpsimd_last_state.st, NULL);
@@ -1258,14 +1331,14 @@ static inline void fpsimd_hotplug_init(void) { }
*/
static int __init fpsimd_init(void)
{
- if (elf_hwcap & HWCAP_FP) {
+ if (cpu_have_named_feature(FP)) {
fpsimd_pm_init();
fpsimd_hotplug_init();
} else {
pr_notice("Floating-point is not implemented\n");
}
- if (!(elf_hwcap & HWCAP_ASIMD))
+ if (!cpu_have_named_feature(ASIMD))
pr_notice("Advanced SIMD is not implemented\n");
return sve_sysctl_init();
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index eecf7927dab0..fcae3f85c6cd 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -505,7 +505,7 @@ ENTRY(el2_setup)
* kernel is intended to run at EL2.
*/
mrs x2, id_aa64mmfr1_el1
- ubfx x2, x2, #8, #4
+ ubfx x2, x2, #ID_AA64MMFR1_VHE_SHIFT, #4
#else
mov x2, xzr
#endif
@@ -538,7 +538,7 @@ set_hcr:
#ifdef CONFIG_ARM_GIC_V3
/* GICv3 system register access */
mrs x0, id_aa64pfr0_el1
- ubfx x0, x0, #24, #4
+ ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4
cbz x0, 3f
mrs_s x0, SYS_ICC_SRE_EL2
@@ -564,8 +564,8 @@ set_hcr:
#endif
/* EL2 debug */
- mrs x1, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
- sbfx x0, x1, #8, #4
+ mrs x1, id_aa64dfr0_el1
+ sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4
cmp x0, #1
b.lt 4f // Skip if no PMU present
mrs x0, pmcr_el0 // Disable debug access traps
@@ -574,7 +574,7 @@ set_hcr:
csel x3, xzr, x0, lt // all PMU counters from EL1
/* Statistical profiling */
- ubfx x0, x1, #32, #4 // Check ID_AA64DFR0_EL1 PMSVer
+ ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
cbz x0, 7f // Skip if SPE not present
cbnz x2, 6f // VHE?
mrs_s x4, SYS_PMBIDR_EL1 // If SPE available at EL2,
@@ -684,7 +684,7 @@ ENTRY(__boot_cpu_mode)
* with MMU turned off.
*/
ENTRY(__early_cpu_boot_status)
- .long 0
+ .quad 0
.popsection
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 7820a4a688fa..9e2b5882cdeb 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -734,6 +734,46 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
state);
}
+u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
+ enum aarch64_insn_register address,
+ enum aarch64_insn_register value,
+ enum aarch64_insn_size_type size)
+{
+ u32 insn = aarch64_insn_get_ldadd_value();
+
+ switch (size) {
+ case AARCH64_INSN_SIZE_32:
+ case AARCH64_INSN_SIZE_64:
+ break;
+ default:
+ pr_err("%s: unimplemented size encoding %d\n", __func__, size);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ insn = aarch64_insn_encode_ldst_size(size, insn);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
+ result);
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
+ address);
+
+ return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
+ value);
+}
+
+u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
+ enum aarch64_insn_register value,
+ enum aarch64_insn_size_type size)
+{
+ /*
+ * STADD is simply encoded as an alias for LDADD with XZR as
+ * the destination register.
+ */
+ return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
+ value, size);
+}
+
static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
enum aarch64_insn_prfm_target target,
enum aarch64_insn_prfm_policy policy,
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index 691854b77c7f..30853d5b7859 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -244,9 +244,6 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr)
{
- if (user_mode(regs))
- return DBG_HOOK_ERROR;
-
kgdb_handle_exception(1, SIGTRAP, 0, regs);
return DBG_HOOK_HANDLED;
}
@@ -254,9 +251,6 @@ NOKPROBE_SYMBOL(kgdb_brk_fn)
static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
{
- if (user_mode(regs))
- return DBG_HOOK_ERROR;
-
compiled_break = 1;
kgdb_handle_exception(1, SIGTRAP, 0, regs);
@@ -266,7 +260,7 @@ NOKPROBE_SYMBOL(kgdb_compiled_brk_fn);
static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
{
- if (user_mode(regs) || !kgdb_single_step)
+ if (!kgdb_single_step)
return DBG_HOOK_ERROR;
kgdb_handle_exception(1, SIGTRAP, 0, regs);
@@ -275,15 +269,13 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
NOKPROBE_SYMBOL(kgdb_step_brk_fn);
static struct break_hook kgdb_brkpt_hook = {
- .esr_mask = 0xffffffff,
- .esr_val = (u32)ESR_ELx_VAL_BRK64(KGDB_DYN_DBG_BRK_IMM),
- .fn = kgdb_brk_fn
+ .fn = kgdb_brk_fn,
+ .imm = KGDB_DYN_DBG_BRK_IMM,
};
static struct break_hook kgdb_compiled_brkpt_hook = {
- .esr_mask = 0xffffffff,
- .esr_val = (u32)ESR_ELx_VAL_BRK64(KGDB_COMPILED_DBG_BRK_IMM),
- .fn = kgdb_compiled_brk_fn
+ .fn = kgdb_compiled_brk_fn,
+ .imm = KGDB_COMPILED_DBG_BRK_IMM,
};
static struct step_hook kgdb_step_hook = {
@@ -332,9 +324,9 @@ int kgdb_arch_init(void)
if (ret != 0)
return ret;
- register_break_hook(&kgdb_brkpt_hook);
- register_break_hook(&kgdb_compiled_brkpt_hook);
- register_step_hook(&kgdb_step_hook);
+ register_kernel_break_hook(&kgdb_brkpt_hook);
+ register_kernel_break_hook(&kgdb_compiled_brkpt_hook);
+ register_kernel_step_hook(&kgdb_step_hook);
return 0;
}
@@ -345,9 +337,9 @@ int kgdb_arch_init(void)
*/
void kgdb_arch_exit(void)
{
- unregister_break_hook(&kgdb_brkpt_hook);
- unregister_break_hook(&kgdb_compiled_brkpt_hook);
- unregister_step_hook(&kgdb_step_hook);
+ unregister_kernel_break_hook(&kgdb_brkpt_hook);
+ unregister_kernel_break_hook(&kgdb_compiled_brkpt_hook);
+ unregister_kernel_step_hook(&kgdb_step_hook);
unregister_die_notifier(&kgdb_notifier);
}
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S
index 997e6b27ff6a..49825e9e421e 100644
--- a/arch/arm64/kernel/kuser32.S
+++ b/arch/arm64/kernel/kuser32.S
@@ -1,29 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Low-level user helpers placed in the vectors page for AArch32.
+ * AArch32 user helpers.
* Based on the kuser helpers in arch/arm/kernel/entry-armv.S.
*
* Copyright (C) 2005-2011 Nicolas Pitre <nico@fluxnic.net>
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2012-2018 ARM Ltd.
*
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- *
- * AArch32 user helpers.
- *
- * Each segment is 32-byte aligned and will be moved to the top of the high
- * vector page. New segments (if ever needed) must be added in front of
- * existing ones. This mechanism should be used only for things that are
- * really small and justified, and not be abused freely.
+ * The kuser helpers below are mapped at a fixed address by
+ * aarch32_setup_additional_pages() and are provided for compatibility
+ * reasons with 32 bit (aarch32) applications that need them.
*
* See Documentation/arm/kernel_user_helpers.txt for formal definitions.
*/
@@ -77,42 +62,3 @@ __kuser_helper_version: // 0xffff0ffc
.word ((__kuser_helper_end - __kuser_helper_start) >> 5)
.globl __kuser_helper_end
__kuser_helper_end:
-
-/*
- * AArch32 sigreturn code
- *
- * For ARM syscalls, the syscall number has to be loaded into r7.
- * We do not support an OABI userspace.
- *
- * For Thumb syscalls, we also pass the syscall number via r7. We therefore
- * need two 16-bit instructions.
- */
- .globl __aarch32_sigret_code_start
-__aarch32_sigret_code_start:
-
- /*
- * ARM Code
- */
- .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn
- .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn
-
- /*
- * Thumb code
- */
- .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn
- .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn
-
- /*
- * ARM code
- */
- .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn
- .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn
-
- /*
- * Thumb code
- */
- .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn
- .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn
-
- .globl __aarch32_sigret_code_end
-__aarch32_sigret_code_end:
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 4addb38bc250..348d12eec566 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -26,6 +26,7 @@
#include <linux/acpi.h>
#include <linux/clocksource.h>
+#include <linux/kvm_host.h>
#include <linux/of.h>
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
@@ -431,7 +432,7 @@ static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
return val;
}
-static inline u64 armv8pmu_read_counter(struct perf_event *event)
+static u64 armv8pmu_read_counter(struct perf_event *event)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
@@ -468,7 +469,7 @@ static inline void armv8pmu_write_hw_counter(struct perf_event *event,
}
}
-static inline void armv8pmu_write_counter(struct perf_event *event, u64 value)
+static void armv8pmu_write_counter(struct perf_event *event, u64 value)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
@@ -528,12 +529,21 @@ static inline int armv8pmu_enable_counter(int idx)
static inline void armv8pmu_enable_event_counter(struct perf_event *event)
{
+ struct perf_event_attr *attr = &event->attr;
int idx = event->hw.idx;
+ u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
- armv8pmu_enable_counter(idx);
if (armv8pmu_event_is_chained(event))
- armv8pmu_enable_counter(idx - 1);
- isb();
+ counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
+
+ kvm_set_pmu_events(counter_bits, attr);
+
+ /* We rely on the hypervisor switch code to enable guest counters */
+ if (!kvm_pmu_counter_deferred(attr)) {
+ armv8pmu_enable_counter(idx);
+ if (armv8pmu_event_is_chained(event))
+ armv8pmu_enable_counter(idx - 1);
+ }
}
static inline int armv8pmu_disable_counter(int idx)
@@ -546,11 +556,21 @@ static inline int armv8pmu_disable_counter(int idx)
static inline void armv8pmu_disable_event_counter(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
+ struct perf_event_attr *attr = &event->attr;
int idx = hwc->idx;
+ u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx));
if (armv8pmu_event_is_chained(event))
- armv8pmu_disable_counter(idx - 1);
- armv8pmu_disable_counter(idx);
+ counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1));
+
+ kvm_clr_pmu_events(counter_bits);
+
+ /* We rely on the hypervisor switch code to disable guest counters */
+ if (!kvm_pmu_counter_deferred(attr)) {
+ if (armv8pmu_event_is_chained(event))
+ armv8pmu_disable_counter(idx - 1);
+ armv8pmu_disable_counter(idx);
+ }
}
static inline int armv8pmu_enable_intens(int idx)
@@ -827,14 +847,23 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
* with other architectures (x86 and Power).
*/
if (is_kernel_in_hyp_mode()) {
- if (!attr->exclude_kernel)
+ if (!attr->exclude_kernel && !attr->exclude_host)
config_base |= ARMV8_PMU_INCLUDE_EL2;
- } else {
- if (attr->exclude_kernel)
+ if (attr->exclude_guest)
config_base |= ARMV8_PMU_EXCLUDE_EL1;
- if (!attr->exclude_hv)
+ if (attr->exclude_host)
+ config_base |= ARMV8_PMU_EXCLUDE_EL0;
+ } else {
+ if (!attr->exclude_hv && !attr->exclude_host)
config_base |= ARMV8_PMU_INCLUDE_EL2;
}
+
+ /*
+ * Filter out !VHE kernels and guest kernels
+ */
+ if (attr->exclude_kernel)
+ config_base |= ARMV8_PMU_EXCLUDE_EL1;
+
if (attr->exclude_user)
config_base |= ARMV8_PMU_EXCLUDE_EL0;
@@ -864,6 +893,9 @@ static void armv8pmu_reset(void *info)
armv8pmu_disable_intens(idx);
}
+ /* Clear the counters we flip at guest entry/exit */
+ kvm_clr_pmu_events(U32_MAX);
+
/*
* Initialize & Reset PMNC. Request overflow interrupt for
* 64 bit cycle counter but cheat in armv8pmu_write_counter().
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 7a679caf4585..2509fcb6d404 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -439,15 +439,12 @@ kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr)
return DBG_HOOK_ERROR;
}
-int __kprobes
+static int __kprobes
kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
int retval;
- if (user_mode(regs))
- return DBG_HOOK_ERROR;
-
/* return error if this is not our step */
retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
@@ -461,16 +458,22 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
return retval;
}
-int __kprobes
+static struct step_hook kprobes_step_hook = {
+ .fn = kprobe_single_step_handler,
+};
+
+static int __kprobes
kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
{
- if (user_mode(regs))
- return DBG_HOOK_ERROR;
-
kprobe_handler(regs);
return DBG_HOOK_HANDLED;
}
+static struct break_hook kprobes_break_hook = {
+ .imm = KPROBES_BRK_IMM,
+ .fn = kprobe_breakpoint_handler,
+};
+
/*
* Provide a blacklist of symbols identifying ranges which cannot be kprobed.
* This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
@@ -599,5 +602,8 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
int __init arch_init_kprobes(void)
{
+ register_kernel_break_hook(&kprobes_break_hook);
+ register_kernel_step_hook(&kprobes_step_hook);
+
return 0;
}
diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c
index 636ca0119c0e..605945eac1f8 100644
--- a/arch/arm64/kernel/probes/uprobes.c
+++ b/arch/arm64/kernel/probes/uprobes.c
@@ -171,7 +171,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self,
static int uprobe_breakpoint_handler(struct pt_regs *regs,
unsigned int esr)
{
- if (user_mode(regs) && uprobe_pre_sstep_notifier(regs))
+ if (uprobe_pre_sstep_notifier(regs))
return DBG_HOOK_HANDLED;
return DBG_HOOK_ERROR;
@@ -182,21 +182,16 @@ static int uprobe_single_step_handler(struct pt_regs *regs,
{
struct uprobe_task *utask = current->utask;
- if (user_mode(regs)) {
- WARN_ON(utask &&
- (instruction_pointer(regs) != utask->xol_vaddr + 4));
-
- if (uprobe_post_sstep_notifier(regs))
- return DBG_HOOK_HANDLED;
- }
+ WARN_ON(utask && (instruction_pointer(regs) != utask->xol_vaddr + 4));
+ if (uprobe_post_sstep_notifier(regs))
+ return DBG_HOOK_HANDLED;
return DBG_HOOK_ERROR;
}
/* uprobe breakpoint handler hook */
static struct break_hook uprobes_break_hook = {
- .esr_mask = BRK64_ESR_MASK,
- .esr_val = BRK64_ESR_UPROBES,
+ .imm = UPROBES_BRK_IMM,
.fn = uprobe_breakpoint_handler,
};
@@ -207,8 +202,8 @@ static struct step_hook uprobes_step_hook = {
static int __init arch_init_uprobes(void)
{
- register_break_hook(&uprobes_break_hook);
- register_step_hook(&uprobes_step_hook);
+ register_user_break_hook(&uprobes_break_hook);
+ register_user_step_hook(&uprobes_step_hook);
return 0;
}
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 867a7cea70e5..a9b0485df074 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -296,11 +296,6 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
*/
fpsimd_flush_task_state(current);
- barrier();
- /* From now, fpsimd_thread_switch() won't clear TIF_FOREIGN_FPSTATE */
-
- set_thread_flag(TIF_FOREIGN_FPSTATE);
- barrier();
/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
sve_alloc(current);
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index cb7800acd19f..caea6e25db2a 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -403,8 +403,7 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
if (ka->sa.sa_flags & SA_SIGINFO)
idx += 3;
- retcode = AARCH32_VECTORS_BASE +
- AARCH32_KERN_SIGRET_CODE_OFFSET +
+ retcode = (unsigned long)current->mm->context.vdso +
(idx << 2) + thumb;
}
diff --git a/arch/arm64/kernel/sigreturn32.S b/arch/arm64/kernel/sigreturn32.S
new file mode 100644
index 000000000000..475d30d471ac
--- /dev/null
+++ b/arch/arm64/kernel/sigreturn32.S
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AArch32 sigreturn code.
+ * Based on the kuser helpers in arch/arm/kernel/entry-armv.S.
+ *
+ * Copyright (C) 2005-2011 Nicolas Pitre <nico@fluxnic.net>
+ * Copyright (C) 2012-2018 ARM Ltd.
+ *
+ * For ARM syscalls, the syscall number has to be loaded into r7.
+ * We do not support an OABI userspace.
+ *
+ * For Thumb syscalls, we also pass the syscall number via r7. We therefore
+ * need two 16-bit instructions.
+ */
+
+#include <asm/unistd.h>
+
+ .globl __aarch32_sigret_code_start
+__aarch32_sigret_code_start:
+
+ /*
+ * ARM Code
+ */
+ .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn
+ .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn
+
+ /*
+ * Thumb code
+ */
+ .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn
+ .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn
+
+ /*
+ * ARM code
+ */
+ .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn
+ .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn
+
+ /*
+ * Thumb code
+ */
+ .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn
+ .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn
+
+ .globl __aarch32_sigret_code_end
+__aarch32_sigret_code_end:
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 824de7038967..bb4b3f07761a 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -586,7 +586,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
}
static int __init
-acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
+acpi_parse_gic_cpu_interface(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_generic_interrupt *processor;
@@ -595,7 +595,7 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
if (BAD_MADT_GICC_ENTRY(processor, end))
return -EINVAL;
- acpi_table_print_madt_entry(header);
+ acpi_table_print_madt_entry(&header->common);
acpi_map_gic_cpu_interface(processor);
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index d908b5e9e949..b00ec7d483d1 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -140,8 +140,6 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
#endif
walk_stackframe(current, &frame, save_trace, &data);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace_regs);
@@ -172,8 +170,6 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
#endif
walk_stackframe(tsk, &frame, save_trace, &data);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
put_task_stack(tsk);
}
diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c
index b44065fb1616..6f91e8116514 100644
--- a/arch/arm64/kernel/sys.c
+++ b/arch/arm64/kernel/sys.c
@@ -31,7 +31,7 @@
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,
- unsigned long, fd, off_t, off)
+ unsigned long, fd, unsigned long, off)
{
if (offset_in_page(off) != 0)
return -EINVAL;
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 29755989f616..ade32046f3fe 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -462,6 +462,9 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */
__user_cache_maint("dc civac", address, ret);
break;
+ case ESR_ELx_SYS64_ISS_CRM_DC_CVADP: /* DC CVADP */
+ __user_cache_maint("sys 3, c7, c13, 1", address, ret);
+ break;
case ESR_ELx_SYS64_ISS_CRM_DC_CVAP: /* DC CVAP */
__user_cache_maint("sys 3, c7, c12, 1", address, ret);
break;
@@ -496,7 +499,7 @@ static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
{
int rt = ESR_ELx_SYS64_ISS_RT(esr);
- pt_regs_write_reg(regs, rt, arch_counter_get_cntvct());
+ pt_regs_write_reg(regs, rt, arch_timer_read_counter());
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
@@ -668,7 +671,7 @@ static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
{
int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
- u64 val = arch_counter_get_cntvct();
+ u64 val = arch_timer_read_counter();
pt_regs_write_reg(regs, rt, lower_32_bits(val));
pt_regs_write_reg(regs, rt2, upper_32_bits(val));
@@ -950,9 +953,6 @@ int is_valid_bugaddr(unsigned long addr)
static int bug_handler(struct pt_regs *regs, unsigned int esr)
{
- if (user_mode(regs))
- return DBG_HOOK_ERROR;
-
switch (report_bug(regs->pc, regs)) {
case BUG_TRAP_TYPE_BUG:
die("Oops - BUG", regs, 0);
@@ -972,9 +972,8 @@ static int bug_handler(struct pt_regs *regs, unsigned int esr)
}
static struct break_hook bug_break_hook = {
- .esr_val = 0xf2000000 | BUG_BRK_IMM,
- .esr_mask = 0xffffffff,
.fn = bug_handler,
+ .imm = BUG_BRK_IMM,
};
#ifdef CONFIG_KASAN_SW_TAGS
@@ -992,9 +991,6 @@ static int kasan_handler(struct pt_regs *regs, unsigned int esr)
u64 addr = regs->regs[0];
u64 pc = regs->pc;
- if (user_mode(regs))
- return DBG_HOOK_ERROR;
-
kasan_report(addr, size, write, pc);
/*
@@ -1019,13 +1015,10 @@ static int kasan_handler(struct pt_regs *regs, unsigned int esr)
return DBG_HOOK_HANDLED;
}
-#define KASAN_ESR_VAL (0xf2000000 | KASAN_BRK_IMM)
-#define KASAN_ESR_MASK 0xffffff00
-
static struct break_hook kasan_break_hook = {
- .esr_val = KASAN_ESR_VAL,
- .esr_mask = KASAN_ESR_MASK,
- .fn = kasan_handler,
+ .fn = kasan_handler,
+ .imm = KASAN_BRK_IMM,
+ .mask = KASAN_BRK_MASK,
};
#endif
@@ -1037,7 +1030,9 @@ int __init early_brk64(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
#ifdef CONFIG_KASAN_SW_TAGS
- if ((esr & KASAN_ESR_MASK) == KASAN_ESR_VAL)
+ unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
+
+ if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
#endif
return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
@@ -1046,8 +1041,8 @@ int __init early_brk64(unsigned long addr, unsigned int esr,
/* This registration must happen early, before debug_traps_init(). */
void __init trap_init(void)
{
- register_break_hook(&bug_break_hook);
+ register_kernel_break_hook(&bug_break_hook);
#ifdef CONFIG_KASAN_SW_TAGS
- register_break_hook(&kasan_break_hook);
+ register_kernel_break_hook(&kasan_break_hook);
#endif
}
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 2d419006ad43..8074cbd3a3a8 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -1,5 +1,5 @@
/*
- * VDSO implementation for AArch64 and vector page setup for AArch32.
+ * VDSO implementations.
*
* Copyright (C) 2012 ARM Limited
*
@@ -53,61 +53,129 @@ struct vdso_data *vdso_data = &vdso_data_store.data;
/*
* Create and map the vectors page for AArch32 tasks.
*/
-static struct page *vectors_page[1] __ro_after_init;
+#define C_VECTORS 0
+#define C_SIGPAGE 1
+#define C_PAGES (C_SIGPAGE + 1)
+static struct page *aarch32_vdso_pages[C_PAGES] __ro_after_init;
+static const struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = {
+ {
+ .name = "[vectors]", /* ABI */
+ .pages = &aarch32_vdso_pages[C_VECTORS],
+ },
+ {
+ .name = "[sigpage]", /* ABI */
+ .pages = &aarch32_vdso_pages[C_SIGPAGE],
+ },
+};
-static int __init alloc_vectors_page(void)
+static int aarch32_alloc_kuser_vdso_page(void)
{
extern char __kuser_helper_start[], __kuser_helper_end[];
- extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
-
int kuser_sz = __kuser_helper_end - __kuser_helper_start;
- int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
- unsigned long vpage;
+ unsigned long vdso_page;
- vpage = get_zeroed_page(GFP_ATOMIC);
+ if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
+ return 0;
- if (!vpage)
+ vdso_page = get_zeroed_page(GFP_ATOMIC);
+ if (!vdso_page)
return -ENOMEM;
- /* kuser helpers */
- memcpy((void *)vpage + 0x1000 - kuser_sz, __kuser_helper_start,
- kuser_sz);
+ memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
+ kuser_sz);
+ aarch32_vdso_pages[C_VECTORS] = virt_to_page(vdso_page);
+ flush_dcache_page(aarch32_vdso_pages[C_VECTORS]);
+ return 0;
+}
- /* sigreturn code */
- memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET,
- __aarch32_sigret_code_start, sigret_sz);
+static int __init aarch32_alloc_vdso_pages(void)
+{
+ extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
+ int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
+ unsigned long sigpage;
+ int ret;
- flush_icache_range(vpage, vpage + PAGE_SIZE);
- vectors_page[0] = virt_to_page(vpage);
+ sigpage = get_zeroed_page(GFP_ATOMIC);
+ if (!sigpage)
+ return -ENOMEM;
- return 0;
+ memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
+ aarch32_vdso_pages[C_SIGPAGE] = virt_to_page(sigpage);
+ flush_dcache_page(aarch32_vdso_pages[C_SIGPAGE]);
+
+ ret = aarch32_alloc_kuser_vdso_page();
+ if (ret)
+ free_page(sigpage);
+
+ return ret;
}
-arch_initcall(alloc_vectors_page);
+arch_initcall(aarch32_alloc_vdso_pages);
-int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
+static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
{
- struct mm_struct *mm = current->mm;
- unsigned long addr = AARCH32_VECTORS_BASE;
- static const struct vm_special_mapping spec = {
- .name = "[vectors]",
- .pages = vectors_page,
+ void *ret;
+
+ if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
+ return 0;
+
+ /*
+ * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
+ * not safe to CoW the page containing the CPU exception vectors.
+ */
+ ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
+ VM_READ | VM_EXEC |
+ VM_MAYREAD | VM_MAYEXEC,
+ &aarch32_vdso_spec[C_VECTORS]);
- };
+ return PTR_ERR_OR_ZERO(ret);
+}
+
+static int aarch32_sigreturn_setup(struct mm_struct *mm)
+{
+ unsigned long addr;
void *ret;
- if (down_write_killable(&mm->mmap_sem))
- return -EINTR;
- current->mm->context.vdso = (void *)addr;
+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+ if (IS_ERR_VALUE(addr)) {
+ ret = ERR_PTR(addr);
+ goto out;
+ }
- /* Map vectors page at the high address. */
+ /*
+ * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
+ * set breakpoints.
+ */
ret = _install_special_mapping(mm, addr, PAGE_SIZE,
- VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
- &spec);
+ VM_READ | VM_EXEC | VM_MAYREAD |
+ VM_MAYWRITE | VM_MAYEXEC,
+ &aarch32_vdso_spec[C_SIGPAGE]);
+ if (IS_ERR(ret))
+ goto out;
- up_write(&mm->mmap_sem);
+ mm->context.vdso = (void *)addr;
+out:
return PTR_ERR_OR_ZERO(ret);
}
+
+int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ struct mm_struct *mm = current->mm;
+ int ret;
+
+ if (down_write_killable(&mm->mmap_sem))
+ return -EINTR;
+
+ ret = aarch32_kuser_helpers_setup(mm);
+ if (ret)
+ goto out;
+
+ ret = aarch32_sigreturn_setup(mm);
+
+out:
+ up_write(&mm->mmap_sem);
+ return ret;
+}
#endif /* CONFIG_COMPAT */
static int vdso_mremap(const struct vm_special_mapping *sm,
@@ -146,8 +214,6 @@ static int __init vdso_init(void)
}
vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
- pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
- vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data);
/* Allocate the vDSO pagelist, plus a page for the data. */
vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
@@ -232,6 +298,9 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
+ /* Read without the seqlock held by clock_getres() */
+ WRITE_ONCE(vdso_data->hrtimer_res, hrtimer_resolution);
+
if (!use_syscall) {
/* tkr_mono.cycle_last == tkr_raw.cycle_last */
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index b215c712d897..744b9dbaba03 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -12,17 +12,12 @@ obj-vdso := gettimeofday.o note.o sigreturn.o
targets := $(obj-vdso) vdso.so vdso.so.dbg
obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
-ccflags-y := -shared -fno-common -fno-builtin
-ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
- $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 \
+ $(call ld-option, --hash-style=sysv) -n -T
# Disable gcov profiling for VDSO code
GCOV_PROFILE := n
-# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared
-# down to collect2, resulting in silent corruption of the vDSO image.
-ccflags-y += -Wl,-shared
-
obj-y += vdso.o
extra-y += vdso.lds
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
@@ -31,8 +26,8 @@ CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
$(obj)/vdso.o : $(obj)/vdso.so
# Link rule for the .so file, .lds has to be first
-$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso)
- $(call if_changed,vdsold)
+$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
+ $(call if_changed,ld)
# Strip rule for the .so file
$(obj)/%.so: OBJCOPYFLAGS := -S
@@ -42,9 +37,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
# Generate VDSO offsets using helper script
gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
-define cmd_vdsosym
- $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
-endef
+ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
$(call if_changed,vdsosym)
@@ -54,8 +47,6 @@ $(obj-vdso): %.o: %.S FORCE
$(call if_changed_dep,vdsoas)
# Actual build commands
-quiet_cmd_vdsold = VDSOL $@
- cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
quiet_cmd_vdsoas = VDSOA $@
cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index c39872a7b03c..856fee6d3512 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -73,6 +73,13 @@ x_tmp .req x8
movn x_tmp, #0xff00, lsl #48
and \res, x_tmp, \res
mul \res, \res, \mult
+ /*
+ * Fake address dependency from the value computed from the counter
+ * register to subsequent data page accesses so that the sequence
+ * locking also orders the read of the counter.
+ */
+ and x_tmp, \res, xzr
+ add vdso_data, vdso_data, x_tmp
.endm
/*
@@ -147,12 +154,12 @@ ENTRY(__kernel_gettimeofday)
/* w11 = cs_mono_mult, w12 = cs_shift */
ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
- seqcnt_check fail=1b
get_nsec_per_sec res=x9
lsl x9, x9, x12
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
+ seqcnt_check fail=1b
get_ts_realtime res_sec=x10, res_nsec=x11, \
clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
@@ -211,13 +218,13 @@ realtime:
/* w11 = cs_mono_mult, w12 = cs_shift */
ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
- seqcnt_check fail=realtime
/* All computations are done with left-shifted nsecs. */
get_nsec_per_sec res=x9
lsl x9, x9, x12
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
+ seqcnt_check fail=realtime
get_ts_realtime res_sec=x10, res_nsec=x11, \
clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
clock_gettime_return, shift=1
@@ -231,7 +238,6 @@ monotonic:
ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
ldp x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC]
- seqcnt_check fail=monotonic
/* All computations are done with left-shifted nsecs. */
lsl x4, x4, x12
@@ -239,6 +245,7 @@ monotonic:
lsl x9, x9, x12
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
+ seqcnt_check fail=monotonic
get_ts_realtime res_sec=x10, res_nsec=x11, \
clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
@@ -253,13 +260,13 @@ monotonic_raw:
/* w11 = cs_raw_mult, w12 = cs_shift */
ldp w12, w11, [vdso_data, #VDSO_CS_SHIFT]
ldp x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC]
- seqcnt_check fail=monotonic_raw
/* All computations are done with left-shifted nsecs. */
get_nsec_per_sec res=x9
lsl x9, x9, x12
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
+ seqcnt_check fail=monotonic_raw
get_ts_clock_raw res_sec=x10, res_nsec=x11, \
clock_nsec=x15, nsec_to_sec=x9
@@ -301,13 +308,14 @@ ENTRY(__kernel_clock_getres)
ccmp w0, #CLOCK_MONOTONIC_RAW, #0x4, ne
b.ne 1f
- ldr x2, 5f
+ adr vdso_data, _vdso_data
+ ldr w2, [vdso_data, #CLOCK_REALTIME_RES]
b 2f
1:
cmp w0, #CLOCK_REALTIME_COARSE
ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
b.ne 4f
- ldr x2, 6f
+ ldr x2, 5f
2:
cbz x1, 3f
stp xzr, x2, [x1]
@@ -321,8 +329,6 @@ ENTRY(__kernel_clock_getres)
svc #0
ret
5:
- .quad CLOCK_REALTIME_RES
-6:
.quad CLOCK_COARSE_RES
.cfi_endproc
ENDPROC(__kernel_clock_getres)
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index a3f85624313e..a67121d419a2 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -23,7 +23,6 @@ config KVM
depends on OF
select MMU_NOTIFIER
select PREEMPT_NOTIFIERS
- select ANON_INODES
select HAVE_KVM_CPU_RELAX_INTERCEPT
select HAVE_KVM_ARCH_TLB_FLUSH_ALL
select KVM_MMIO
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 690e033a91c0..3ac1a64d2fb9 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -17,7 +17,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/psci.o $(KVM)/arm/perf.o
kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o va_layout.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o
-kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o fpsimd.o
+kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o fpsimd.o pmu.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/aarch32.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index aac7808ce216..6e3c9c8b2df9 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/thread_info.h>
#include <linux/kvm_host.h>
+#include <asm/fpsimd.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_host.h>
#include <asm/kvm_mmu.h>
@@ -85,9 +86,12 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
WARN_ON_ONCE(!irqs_disabled());
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
- fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.gp_regs.fp_regs);
+ fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.gp_regs.fp_regs,
+ vcpu->arch.sve_state,
+ vcpu->arch.sve_max_vl);
+
clear_thread_flag(TIF_FOREIGN_FPSTATE);
- clear_thread_flag(TIF_SVE);
+ update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu));
}
}
@@ -100,14 +104,21 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
{
unsigned long flags;
+ bool host_has_sve = system_supports_sve();
+ bool guest_has_sve = vcpu_has_sve(vcpu);
local_irq_save(flags);
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
+ u64 *guest_zcr = &vcpu->arch.ctxt.sys_regs[ZCR_EL1];
+
/* Clean guest FP state to memory and invalidate cpu view */
fpsimd_save();
fpsimd_flush_cpu_state();
- } else if (system_supports_sve()) {
+
+ if (guest_has_sve)
+ *guest_zcr = read_sysreg_s(SYS_ZCR_EL12);
+ } else if (host_has_sve) {
/*
* The FPSIMD/SVE state in the CPU has not been touched, and we
* have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index dd436a50fce7..3ae2f82fca46 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -19,18 +19,25 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/bits.h>
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/nospec.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <kvm/arm_psci.h>
#include <asm/cputype.h>
#include <linux/uaccess.h>
+#include <asm/fpsimd.h>
#include <asm/kvm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
+#include <asm/kvm_host.h>
+#include <asm/sigcontext.h>
#include "trace.h"
@@ -52,12 +59,19 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
return 0;
}
+static bool core_reg_offset_is_vreg(u64 off)
+{
+ return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
+ off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr);
+}
+
static u64 core_reg_offset_from_id(u64 id)
{
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
}
-static int validate_core_offset(const struct kvm_one_reg *reg)
+static int validate_core_offset(const struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg)
{
u64 off = core_reg_offset_from_id(reg->id);
int size;
@@ -89,11 +103,19 @@ static int validate_core_offset(const struct kvm_one_reg *reg)
return -EINVAL;
}
- if (KVM_REG_SIZE(reg->id) == size &&
- IS_ALIGNED(off, size / sizeof(__u32)))
- return 0;
+ if (KVM_REG_SIZE(reg->id) != size ||
+ !IS_ALIGNED(off, size / sizeof(__u32)))
+ return -EINVAL;
- return -EINVAL;
+ /*
+ * The KVM_REG_ARM64_SVE regs must be used instead of
+ * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
+ * SVE-enabled vcpus:
+ */
+ if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
+ return -EINVAL;
+
+ return 0;
}
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
@@ -115,7 +137,7 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
- if (validate_core_offset(reg))
+ if (validate_core_offset(vcpu, reg))
return -EINVAL;
if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
@@ -140,7 +162,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
- if (validate_core_offset(reg))
+ if (validate_core_offset(vcpu, reg))
return -EINVAL;
if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
@@ -183,6 +205,239 @@ out:
return err;
}
+#define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
+#define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
+
+static bool vq_present(
+ const u64 (*const vqs)[KVM_ARM64_SVE_VLS_WORDS],
+ unsigned int vq)
+{
+ return (*vqs)[vq_word(vq)] & vq_mask(vq);
+}
+
+static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ unsigned int max_vq, vq;
+ u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
+
+ if (!vcpu_has_sve(vcpu))
+ return -ENOENT;
+
+ if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
+ return -EINVAL;
+
+ memset(vqs, 0, sizeof(vqs));
+
+ max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
+ for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
+ if (sve_vq_available(vq))
+ vqs[vq_word(vq)] |= vq_mask(vq);
+
+ if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ unsigned int max_vq, vq;
+ u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
+
+ if (!vcpu_has_sve(vcpu))
+ return -ENOENT;
+
+ if (kvm_arm_vcpu_sve_finalized(vcpu))
+ return -EPERM; /* too late! */
+
+ if (WARN_ON(vcpu->arch.sve_state))
+ return -EINVAL;
+
+ if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs)))
+ return -EFAULT;
+
+ max_vq = 0;
+ for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq)
+ if (vq_present(&vqs, vq))
+ max_vq = vq;
+
+ if (max_vq > sve_vq_from_vl(kvm_sve_max_vl))
+ return -EINVAL;
+
+ /*
+ * Vector lengths supported by the host can't currently be
+ * hidden from the guest individually: instead we can only set a
+ * maxmium via ZCR_EL2.LEN. So, make sure the available vector
+ * lengths match the set requested exactly up to the requested
+ * maximum:
+ */
+ for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
+ if (vq_present(&vqs, vq) != sve_vq_available(vq))
+ return -EINVAL;
+
+ /* Can't run with no vector lengths at all: */
+ if (max_vq < SVE_VQ_MIN)
+ return -EINVAL;
+
+ /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
+ vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq);
+
+ return 0;
+}
+
+#define SVE_REG_SLICE_SHIFT 0
+#define SVE_REG_SLICE_BITS 5
+#define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS)
+#define SVE_REG_ID_BITS 5
+
+#define SVE_REG_SLICE_MASK \
+ GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \
+ SVE_REG_SLICE_SHIFT)
+#define SVE_REG_ID_MASK \
+ GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT)
+
+#define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS)
+
+#define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0))
+#define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
+
+/*
+ * Number of register slices required to cover each whole SVE register.
+ * NOTE: Only the first slice every exists, for now.
+ * If you are tempted to modify this, you must also rework sve_reg_to_region()
+ * to match:
+ */
+#define vcpu_sve_slices(vcpu) 1
+
+/* Bounds of a single SVE register slice within vcpu->arch.sve_state */
+struct sve_state_reg_region {
+ unsigned int koffset; /* offset into sve_state in kernel memory */
+ unsigned int klen; /* length in kernel memory */
+ unsigned int upad; /* extra trailing padding in user memory */
+};
+
+/*
+ * Validate SVE register ID and get sanitised bounds for user/kernel SVE
+ * register copy
+ */
+static int sve_reg_to_region(struct sve_state_reg_region *region,
+ struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg)
+{
+ /* reg ID ranges for Z- registers */
+ const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0);
+ const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1,
+ SVE_NUM_SLICES - 1);
+
+ /* reg ID ranges for P- registers and FFR (which are contiguous) */
+ const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0);
+ const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1);
+
+ unsigned int vq;
+ unsigned int reg_num;
+
+ unsigned int reqoffset, reqlen; /* User-requested offset and length */
+ unsigned int maxlen; /* Maxmimum permitted length */
+
+ size_t sve_state_size;
+
+ const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1,
+ SVE_NUM_SLICES - 1);
+
+ /* Verify that the P-regs and FFR really do have contiguous IDs: */
+ BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1);
+
+ /* Verify that we match the UAPI header: */
+ BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
+
+ reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
+
+ if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
+ if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
+ return -ENOENT;
+
+ vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
+
+ reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
+ SVE_SIG_REGS_OFFSET;
+ reqlen = KVM_SVE_ZREG_SIZE;
+ maxlen = SVE_SIG_ZREG_SIZE(vq);
+ } else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
+ if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
+ return -ENOENT;
+
+ vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
+
+ reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
+ SVE_SIG_REGS_OFFSET;
+ reqlen = KVM_SVE_PREG_SIZE;
+ maxlen = SVE_SIG_PREG_SIZE(vq);
+ } else {
+ return -EINVAL;
+ }
+
+ sve_state_size = vcpu_sve_state_size(vcpu);
+ if (WARN_ON(!sve_state_size))
+ return -EINVAL;
+
+ region->koffset = array_index_nospec(reqoffset, sve_state_size);
+ region->klen = min(maxlen, reqlen);
+ region->upad = reqlen - region->klen;
+
+ return 0;
+}
+
+static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ int ret;
+ struct sve_state_reg_region region;
+ char __user *uptr = (char __user *)reg->addr;
+
+ /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
+ if (reg->id == KVM_REG_ARM64_SVE_VLS)
+ return get_sve_vls(vcpu, reg);
+
+ /* Try to interpret reg ID as an architectural SVE register... */
+ ret = sve_reg_to_region(&region, vcpu, reg);
+ if (ret)
+ return ret;
+
+ if (!kvm_arm_vcpu_sve_finalized(vcpu))
+ return -EPERM;
+
+ if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
+ region.klen) ||
+ clear_user(uptr + region.klen, region.upad))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ int ret;
+ struct sve_state_reg_region region;
+ const char __user *uptr = (const char __user *)reg->addr;
+
+ /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
+ if (reg->id == KVM_REG_ARM64_SVE_VLS)
+ return set_sve_vls(vcpu, reg);
+
+ /* Try to interpret reg ID as an architectural SVE register... */
+ ret = sve_reg_to_region(&region, vcpu, reg);
+ if (ret)
+ return ret;
+
+ if (!kvm_arm_vcpu_sve_finalized(vcpu))
+ return -EPERM;
+
+ if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
+ region.klen))
+ return -EFAULT;
+
+ return 0;
+}
+
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
return -EINVAL;
@@ -193,9 +448,37 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
return -EINVAL;
}
-static unsigned long num_core_regs(void)
+static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
+ u64 __user *uindices)
+{
+ unsigned int i;
+ int n = 0;
+ const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
+
+ for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
+ /*
+ * The KVM_REG_ARM64_SVE regs must be used instead of
+ * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
+ * SVE-enabled vcpus:
+ */
+ if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(i))
+ continue;
+
+ if (uindices) {
+ if (put_user(core_reg | i, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+
+ n++;
+ }
+
+ return n;
+}
+
+static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
{
- return sizeof(struct kvm_regs) / sizeof(__u32);
+ return copy_core_reg_indices(vcpu, NULL);
}
/**
@@ -251,6 +534,67 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
}
+static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
+{
+ const unsigned int slices = vcpu_sve_slices(vcpu);
+
+ if (!vcpu_has_sve(vcpu))
+ return 0;
+
+ /* Policed by KVM_GET_REG_LIST: */
+ WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
+
+ return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */)
+ + 1; /* KVM_REG_ARM64_SVE_VLS */
+}
+
+static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
+ u64 __user *uindices)
+{
+ const unsigned int slices = vcpu_sve_slices(vcpu);
+ u64 reg;
+ unsigned int i, n;
+ int num_regs = 0;
+
+ if (!vcpu_has_sve(vcpu))
+ return 0;
+
+ /* Policed by KVM_GET_REG_LIST: */
+ WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
+
+ /*
+ * Enumerate this first, so that userspace can save/restore in
+ * the order reported by KVM_GET_REG_LIST:
+ */
+ reg = KVM_REG_ARM64_SVE_VLS;
+ if (put_user(reg, uindices++))
+ return -EFAULT;
+ ++num_regs;
+
+ for (i = 0; i < slices; i++) {
+ for (n = 0; n < SVE_NUM_ZREGS; n++) {
+ reg = KVM_REG_ARM64_SVE_ZREG(n, i);
+ if (put_user(reg, uindices++))
+ return -EFAULT;
+ num_regs++;
+ }
+
+ for (n = 0; n < SVE_NUM_PREGS; n++) {
+ reg = KVM_REG_ARM64_SVE_PREG(n, i);
+ if (put_user(reg, uindices++))
+ return -EFAULT;
+ num_regs++;
+ }
+
+ reg = KVM_REG_ARM64_SVE_FFR(i);
+ if (put_user(reg, uindices++))
+ return -EFAULT;
+ num_regs++;
+ }
+
+ return num_regs;
+}
+
/**
* kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
*
@@ -258,8 +602,15 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
*/
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
{
- return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
- + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS;
+ unsigned long res = 0;
+
+ res += num_core_regs(vcpu);
+ res += num_sve_regs(vcpu);
+ res += kvm_arm_num_sys_reg_descs(vcpu);
+ res += kvm_arm_get_fw_num_regs(vcpu);
+ res += NUM_TIMER_REGS;
+
+ return res;
}
/**
@@ -269,23 +620,25 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
*/
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
- unsigned int i;
- const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
int ret;
- for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
- if (put_user(core_reg | i, uindices))
- return -EFAULT;
- uindices++;
- }
+ ret = copy_core_reg_indices(vcpu, uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_sve_reg_indices(vcpu, uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
- if (ret)
+ if (ret < 0)
return ret;
uindices += kvm_arm_get_fw_num_regs(vcpu);
ret = copy_timer_indices(vcpu, uindices);
- if (ret)
+ if (ret < 0)
return ret;
uindices += NUM_TIMER_REGS;
@@ -298,12 +651,11 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
return -EINVAL;
- /* Register group 16 means we want a core register. */
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
- return get_core_reg(vcpu, reg);
-
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
- return kvm_arm_get_fw_reg(vcpu, reg);
+ switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
+ case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg);
+ case KVM_REG_ARM_FW: return kvm_arm_get_fw_reg(vcpu, reg);
+ case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg);
+ }
if (is_timer_reg(reg->id))
return get_timer_reg(vcpu, reg);
@@ -317,12 +669,11 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
return -EINVAL;
- /* Register group 16 means we set a core register. */
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
- return set_core_reg(vcpu, reg);
-
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
- return kvm_arm_set_fw_reg(vcpu, reg);
+ switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
+ case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg);
+ case KVM_REG_ARM_FW: return kvm_arm_set_fw_reg(vcpu, reg);
+ case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg);
+ }
if (is_timer_reg(reg->id))
return set_timer_reg(vcpu, reg);
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 0b7983442071..516aead3c2a9 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -173,20 +173,40 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
+#define __ptrauth_save_key(regs, key) \
+({ \
+ regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
+ regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
+})
+
+/*
+ * Handle the guest trying to use a ptrauth instruction, or trying to access a
+ * ptrauth register.
+ */
+void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpu_context *ctxt;
+
+ if (vcpu_has_ptrauth(vcpu)) {
+ vcpu_ptrauth_enable(vcpu);
+ ctxt = vcpu->arch.host_cpu_context;
+ __ptrauth_save_key(ctxt->sys_regs, APIA);
+ __ptrauth_save_key(ctxt->sys_regs, APIB);
+ __ptrauth_save_key(ctxt->sys_regs, APDA);
+ __ptrauth_save_key(ctxt->sys_regs, APDB);
+ __ptrauth_save_key(ctxt->sys_regs, APGA);
+ } else {
+ kvm_inject_undefined(vcpu);
+ }
+}
+
/*
* Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
* a NOP).
*/
static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
- /*
- * We don't currently support ptrauth in a guest, and we mask the ID
- * registers to prevent well-behaved guests from trying to make use of
- * it.
- *
- * Inject an UNDEF, as if the feature really isn't present.
- */
- kvm_inject_undefined(vcpu);
+ kvm_arm_vcpu_ptrauth_trap(vcpu);
return 1;
}
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index 675fdc186e3b..93ba3d7ef027 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -24,6 +24,7 @@
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
+#include <asm/kvm_ptrauth.h>
#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
@@ -64,6 +65,13 @@ ENTRY(__guest_enter)
add x18, x0, #VCPU_CONTEXT
+ // Macro ptrauth_switch_to_guest format:
+ // ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
+ // The below macro to restore guest keys is not implemented in C code
+ // as it may cause Pointer Authentication key signing mismatch errors
+ // when this feature is enabled for kernel code.
+ ptrauth_switch_to_guest x18, x0, x1, x2
+
// Restore guest regs x0-x17
ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
@@ -118,6 +126,13 @@ ENTRY(__guest_exit)
get_host_ctxt x2, x3
+ // Macro ptrauth_switch_to_guest format:
+ // ptrauth_switch_to_host(guest cxt, host cxt, tmp1, tmp2, tmp3)
+ // The below macro to save/restore keys is not implemented in C code
+ // as it may cause Pointer Authentication key signing mismatch errors
+ // when this feature is enabled for kernel code.
+ ptrauth_switch_to_host x1, x2, x3, x4, x5
+
// Now restore the host regs
restore_callee_saved_regs x2
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 3563fe655cd5..22b4c335e0b2 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -100,7 +100,10 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
val = read_sysreg(cpacr_el1);
val |= CPACR_EL1_TTA;
val &= ~CPACR_EL1_ZEN;
- if (!update_fp_enabled(vcpu)) {
+ if (update_fp_enabled(vcpu)) {
+ if (vcpu_has_sve(vcpu))
+ val |= CPACR_EL1_ZEN;
+ } else {
val &= ~CPACR_EL1_FPEN;
__activate_traps_fpsimd32(vcpu);
}
@@ -317,16 +320,48 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
return true;
}
-static bool __hyp_text __hyp_switch_fpsimd(struct kvm_vcpu *vcpu)
+/* Check for an FPSIMD/SVE trap and handle as appropriate */
+static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
{
- struct user_fpsimd_state *host_fpsimd = vcpu->arch.host_fpsimd_state;
+ bool vhe, sve_guest, sve_host;
+ u8 hsr_ec;
- if (has_vhe())
- write_sysreg(read_sysreg(cpacr_el1) | CPACR_EL1_FPEN,
- cpacr_el1);
- else
+ if (!system_supports_fpsimd())
+ return false;
+
+ if (system_supports_sve()) {
+ sve_guest = vcpu_has_sve(vcpu);
+ sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE;
+ vhe = true;
+ } else {
+ sve_guest = false;
+ sve_host = false;
+ vhe = has_vhe();
+ }
+
+ hsr_ec = kvm_vcpu_trap_get_class(vcpu);
+ if (hsr_ec != ESR_ELx_EC_FP_ASIMD &&
+ hsr_ec != ESR_ELx_EC_SVE)
+ return false;
+
+ /* Don't handle SVE traps for non-SVE vcpus here: */
+ if (!sve_guest)
+ if (hsr_ec != ESR_ELx_EC_FP_ASIMD)
+ return false;
+
+ /* Valid trap. Switch the context: */
+
+ if (vhe) {
+ u64 reg = read_sysreg(cpacr_el1) | CPACR_EL1_FPEN;
+
+ if (sve_guest)
+ reg |= CPACR_EL1_ZEN;
+
+ write_sysreg(reg, cpacr_el1);
+ } else {
write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP,
cptr_el2);
+ }
isb();
@@ -335,21 +370,28 @@ static bool __hyp_text __hyp_switch_fpsimd(struct kvm_vcpu *vcpu)
* In the SVE case, VHE is assumed: it is enforced by
* Kconfig and kvm_arch_init().
*/
- if (system_supports_sve() &&
- (vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE)) {
+ if (sve_host) {
struct thread_struct *thread = container_of(
- host_fpsimd,
+ vcpu->arch.host_fpsimd_state,
struct thread_struct, uw.fpsimd_state);
- sve_save_state(sve_pffr(thread), &host_fpsimd->fpsr);
+ sve_save_state(sve_pffr(thread),
+ &vcpu->arch.host_fpsimd_state->fpsr);
} else {
- __fpsimd_save_state(host_fpsimd);
+ __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
}
vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
}
- __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs);
+ if (sve_guest) {
+ sve_load_state(vcpu_sve_pffr(vcpu),
+ &vcpu->arch.ctxt.gp_regs.fp_regs.fpsr,
+ sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
+ write_sysreg_s(vcpu->arch.ctxt.sys_regs[ZCR_EL1], SYS_ZCR_EL12);
+ } else {
+ __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs);
+ }
/* Skip restoring fpexc32 for AArch64 guests */
if (!(read_sysreg(hcr_el2) & HCR_RW))
@@ -385,10 +427,10 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
* and restore the guest context lazily.
* If FP/SIMD is not implemented, handle the trap and inject an
* undefined instruction exception to the guest.
+ * Similarly for trapped SVE accesses.
*/
- if (system_supports_fpsimd() &&
- kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_FP_ASIMD)
- return __hyp_switch_fpsimd(vcpu);
+ if (__hyp_handle_fpsimd(vcpu))
+ return true;
if (!__populate_fault_info(vcpu))
return true;
@@ -524,6 +566,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
+ bool pmu_switch_needed;
u64 exit_code;
/*
@@ -543,6 +586,8 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
+ pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
+
__sysreg_save_state_nvhe(host_ctxt);
__activate_vm(kern_hyp_va(vcpu->kvm));
@@ -589,6 +634,9 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
*/
__debug_switch_to_host(vcpu);
+ if (pmu_switch_needed)
+ __pmu_switch_to_host(host_ctxt);
+
/* Returning to host will clear PSR.I, remask PMR if needed */
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQOFF);
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
new file mode 100644
index 000000000000..3da94a5bb6b7
--- /dev/null
+++ b/arch/arm64/kvm/pmu.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Arm Limited
+ * Author: Andrew Murray <Andrew.Murray@arm.com>
+ */
+#include <linux/kvm_host.h>
+#include <linux/perf_event.h>
+#include <asm/kvm_hyp.h>
+
+/*
+ * Given the perf event attributes and system type, determine
+ * if we are going to need to switch counters at guest entry/exit.
+ */
+static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
+{
+ /**
+ * With VHE the guest kernel runs at EL1 and the host at EL2,
+ * where user (EL0) is excluded then we have no reason to switch
+ * counters.
+ */
+ if (has_vhe() && attr->exclude_user)
+ return false;
+
+ /* Only switch if attributes are different */
+ return (attr->exclude_host != attr->exclude_guest);
+}
+
+/*
+ * Add events to track that we may want to switch at guest entry/exit
+ * time.
+ */
+void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
+{
+ struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data);
+
+ if (!kvm_pmu_switch_needed(attr))
+ return;
+
+ if (!attr->exclude_host)
+ ctx->pmu_events.events_host |= set;
+ if (!attr->exclude_guest)
+ ctx->pmu_events.events_guest |= set;
+}
+
+/*
+ * Stop tracking events
+ */
+void kvm_clr_pmu_events(u32 clr)
+{
+ struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data);
+
+ ctx->pmu_events.events_host &= ~clr;
+ ctx->pmu_events.events_guest &= ~clr;
+}
+
+/**
+ * Disable host events, enable guest events
+ */
+bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
+{
+ struct kvm_host_data *host;
+ struct kvm_pmu_events *pmu;
+
+ host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ pmu = &host->pmu_events;
+
+ if (pmu->events_host)
+ write_sysreg(pmu->events_host, pmcntenclr_el0);
+
+ if (pmu->events_guest)
+ write_sysreg(pmu->events_guest, pmcntenset_el0);
+
+ return (pmu->events_host || pmu->events_guest);
+}
+
+/**
+ * Disable guest events, enable host events
+ */
+void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
+{
+ struct kvm_host_data *host;
+ struct kvm_pmu_events *pmu;
+
+ host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ pmu = &host->pmu_events;
+
+ if (pmu->events_guest)
+ write_sysreg(pmu->events_guest, pmcntenclr_el0);
+
+ if (pmu->events_host)
+ write_sysreg(pmu->events_host, pmcntenset_el0);
+}
+
+#define PMEVTYPER_READ_CASE(idx) \
+ case idx: \
+ return read_sysreg(pmevtyper##idx##_el0)
+
+#define PMEVTYPER_WRITE_CASE(idx) \
+ case idx: \
+ write_sysreg(val, pmevtyper##idx##_el0); \
+ break
+
+#define PMEVTYPER_CASES(readwrite) \
+ PMEVTYPER_##readwrite##_CASE(0); \
+ PMEVTYPER_##readwrite##_CASE(1); \
+ PMEVTYPER_##readwrite##_CASE(2); \
+ PMEVTYPER_##readwrite##_CASE(3); \
+ PMEVTYPER_##readwrite##_CASE(4); \
+ PMEVTYPER_##readwrite##_CASE(5); \
+ PMEVTYPER_##readwrite##_CASE(6); \
+ PMEVTYPER_##readwrite##_CASE(7); \
+ PMEVTYPER_##readwrite##_CASE(8); \
+ PMEVTYPER_##readwrite##_CASE(9); \
+ PMEVTYPER_##readwrite##_CASE(10); \
+ PMEVTYPER_##readwrite##_CASE(11); \
+ PMEVTYPER_##readwrite##_CASE(12); \
+ PMEVTYPER_##readwrite##_CASE(13); \
+ PMEVTYPER_##readwrite##_CASE(14); \
+ PMEVTYPER_##readwrite##_CASE(15); \
+ PMEVTYPER_##readwrite##_CASE(16); \
+ PMEVTYPER_##readwrite##_CASE(17); \
+ PMEVTYPER_##readwrite##_CASE(18); \
+ PMEVTYPER_##readwrite##_CASE(19); \
+ PMEVTYPER_##readwrite##_CASE(20); \
+ PMEVTYPER_##readwrite##_CASE(21); \
+ PMEVTYPER_##readwrite##_CASE(22); \
+ PMEVTYPER_##readwrite##_CASE(23); \
+ PMEVTYPER_##readwrite##_CASE(24); \
+ PMEVTYPER_##readwrite##_CASE(25); \
+ PMEVTYPER_##readwrite##_CASE(26); \
+ PMEVTYPER_##readwrite##_CASE(27); \
+ PMEVTYPER_##readwrite##_CASE(28); \
+ PMEVTYPER_##readwrite##_CASE(29); \
+ PMEVTYPER_##readwrite##_CASE(30)
+
+/*
+ * Read a value direct from PMEVTYPER<idx> where idx is 0-30
+ * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
+ */
+static u64 kvm_vcpu_pmu_read_evtype_direct(int idx)
+{
+ switch (idx) {
+ PMEVTYPER_CASES(READ);
+ case ARMV8_PMU_CYCLE_IDX:
+ return read_sysreg(pmccfiltr_el0);
+ default:
+ WARN_ON(1);
+ }
+
+ return 0;
+}
+
+/*
+ * Write a value direct to PMEVTYPER<idx> where idx is 0-30
+ * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
+ */
+static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val)
+{
+ switch (idx) {
+ PMEVTYPER_CASES(WRITE);
+ case ARMV8_PMU_CYCLE_IDX:
+ write_sysreg(val, pmccfiltr_el0);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+/*
+ * Modify ARMv8 PMU events to include EL0 counting
+ */
+static void kvm_vcpu_pmu_enable_el0(unsigned long events)
+{
+ u64 typer;
+ u32 counter;
+
+ for_each_set_bit(counter, &events, 32) {
+ typer = kvm_vcpu_pmu_read_evtype_direct(counter);
+ typer &= ~ARMV8_PMU_EXCLUDE_EL0;
+ kvm_vcpu_pmu_write_evtype_direct(counter, typer);
+ }
+}
+
+/*
+ * Modify ARMv8 PMU events to exclude EL0 counting
+ */
+static void kvm_vcpu_pmu_disable_el0(unsigned long events)
+{
+ u64 typer;
+ u32 counter;
+
+ for_each_set_bit(counter, &events, 32) {
+ typer = kvm_vcpu_pmu_read_evtype_direct(counter);
+ typer |= ARMV8_PMU_EXCLUDE_EL0;
+ kvm_vcpu_pmu_write_evtype_direct(counter, typer);
+ }
+}
+
+/*
+ * On VHE ensure that only guest events have EL0 counting enabled
+ */
+void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpu_context *host_ctxt;
+ struct kvm_host_data *host;
+ u32 events_guest, events_host;
+
+ if (!has_vhe())
+ return;
+
+ host_ctxt = vcpu->arch.host_cpu_context;
+ host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ events_guest = host->pmu_events.events_guest;
+ events_host = host->pmu_events.events_host;
+
+ kvm_vcpu_pmu_enable_el0(events_guest);
+ kvm_vcpu_pmu_disable_el0(events_host);
+}
+
+/*
+ * On VHE ensure that only host events have EL0 counting enabled
+ */
+void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpu_context *host_ctxt;
+ struct kvm_host_data *host;
+ u32 events_guest, events_host;
+
+ if (!has_vhe())
+ return;
+
+ host_ctxt = vcpu->arch.host_cpu_context;
+ host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ events_guest = host->pmu_events.events_guest;
+ events_host = host->pmu_events.events_host;
+
+ kvm_vcpu_pmu_enable_el0(events_host);
+ kvm_vcpu_pmu_disable_el0(events_guest);
+}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index e2a0500cd7a2..1140b4485575 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -20,20 +20,26 @@
*/
#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <linux/hw_breakpoint.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
#include <kvm/arm_arch_timer.h>
#include <asm/cpufeature.h>
#include <asm/cputype.h>
+#include <asm/fpsimd.h>
#include <asm/ptrace.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
+#include <asm/virt.h>
/* Maximum phys_shift supported for any VM on this host */
static u32 kvm_ipa_limit;
@@ -92,6 +98,14 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ARM_VM_IPA_SIZE:
r = kvm_ipa_limit;
break;
+ case KVM_CAP_ARM_SVE:
+ r = system_supports_sve();
+ break;
+ case KVM_CAP_ARM_PTRAUTH_ADDRESS:
+ case KVM_CAP_ARM_PTRAUTH_GENERIC:
+ r = has_vhe() && system_supports_address_auth() &&
+ system_supports_generic_auth();
+ break;
default:
r = 0;
}
@@ -99,13 +113,148 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
return r;
}
+unsigned int kvm_sve_max_vl;
+
+int kvm_arm_init_sve(void)
+{
+ if (system_supports_sve()) {
+ kvm_sve_max_vl = sve_max_virtualisable_vl;
+
+ /*
+ * The get_sve_reg()/set_sve_reg() ioctl interface will need
+ * to be extended with multiple register slice support in
+ * order to support vector lengths greater than
+ * SVE_VL_ARCH_MAX:
+ */
+ if (WARN_ON(kvm_sve_max_vl > SVE_VL_ARCH_MAX))
+ kvm_sve_max_vl = SVE_VL_ARCH_MAX;
+
+ /*
+ * Don't even try to make use of vector lengths that
+ * aren't available on all CPUs, for now:
+ */
+ if (kvm_sve_max_vl < sve_max_vl)
+ pr_warn("KVM: SVE vector length for guests limited to %u bytes\n",
+ kvm_sve_max_vl);
+ }
+
+ return 0;
+}
+
+static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
+{
+ if (!system_supports_sve())
+ return -EINVAL;
+
+ /* Verify that KVM startup enforced this when SVE was detected: */
+ if (WARN_ON(!has_vhe()))
+ return -EINVAL;
+
+ vcpu->arch.sve_max_vl = kvm_sve_max_vl;
+
+ /*
+ * Userspace can still customize the vector lengths by writing
+ * KVM_REG_ARM64_SVE_VLS. Allocation is deferred until
+ * kvm_arm_vcpu_finalize(), which freezes the configuration.
+ */
+ vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE;
+
+ return 0;
+}
+
+/*
+ * Finalize vcpu's maximum SVE vector length, allocating
+ * vcpu->arch.sve_state as necessary.
+ */
+static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
+{
+ void *buf;
+ unsigned int vl;
+
+ vl = vcpu->arch.sve_max_vl;
+
+ /*
+ * Resposibility for these properties is shared between
+ * kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and
+ * set_sve_vls(). Double-check here just to be sure:
+ */
+ if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl ||
+ vl > SVE_VL_ARCH_MAX))
+ return -EIO;
+
+ buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ vcpu->arch.sve_state = buf;
+ vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED;
+ return 0;
+}
+
+int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
+{
+ switch (feature) {
+ case KVM_ARM_VCPU_SVE:
+ if (!vcpu_has_sve(vcpu))
+ return -EINVAL;
+
+ if (kvm_arm_vcpu_sve_finalized(vcpu))
+ return -EPERM;
+
+ return kvm_vcpu_finalize_sve(vcpu);
+ }
+
+ return -EINVAL;
+}
+
+bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
+{
+ if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu))
+ return false;
+
+ return true;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+ kfree(vcpu->arch.sve_state);
+}
+
+static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
+{
+ if (vcpu_has_sve(vcpu))
+ memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu));
+}
+
+static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
+{
+ /* Support ptrauth only if the system supports these capabilities. */
+ if (!has_vhe())
+ return -EINVAL;
+
+ if (!system_supports_address_auth() ||
+ !system_supports_generic_auth())
+ return -EINVAL;
+ /*
+ * For now make sure that both address/generic pointer authentication
+ * features are requested by the userspace together.
+ */
+ if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
+ !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features))
+ return -EINVAL;
+
+ vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH;
+ return 0;
+}
+
/**
* kvm_reset_vcpu - sets core registers and sys_regs to reset value
* @vcpu: The VCPU pointer
*
* This function finds the right table above and sets the registers on
* the virtual CPU struct to their architecturally defined reset
- * values.
+ * values, except for registers whose reset is deferred until
+ * kvm_arm_vcpu_finalize().
*
* Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
* ioctl or as part of handling a request issued by another VCPU in the PSCI
@@ -131,6 +280,22 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
if (loaded)
kvm_arch_vcpu_put(vcpu);
+ if (!kvm_arm_vcpu_sve_finalized(vcpu)) {
+ if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) {
+ ret = kvm_vcpu_enable_sve(vcpu);
+ if (ret)
+ goto out;
+ }
+ } else {
+ kvm_vcpu_reset_sve(vcpu);
+ }
+
+ if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
+ test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
+ if (kvm_vcpu_enable_ptrauth(vcpu))
+ goto out;
+ }
+
switch (vcpu->arch.target) {
default:
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 539feecda5b8..857b226bcdde 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -695,6 +695,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
val |= p->regval & ARMV8_PMU_PMCR_MASK;
__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
kvm_pmu_handle_pmcr(vcpu, val);
+ kvm_vcpu_pmu_restore_guest(vcpu);
} else {
/* PMCR.P & PMCR.C are RAZ */
val = __vcpu_sys_reg(vcpu, PMCR_EL0)
@@ -850,6 +851,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
if (p->is_write) {
kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
__vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
+ kvm_vcpu_pmu_restore_guest(vcpu);
} else {
p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
}
@@ -875,6 +877,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
/* accessing PMCNTENSET_EL0 */
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
kvm_pmu_enable_counter(vcpu, val);
+ kvm_vcpu_pmu_restore_guest(vcpu);
} else {
/* accessing PMCNTENCLR_EL0 */
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
@@ -1007,6 +1010,37 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{ SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
+static bool trap_ptrauth(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
+{
+ kvm_arm_vcpu_ptrauth_trap(vcpu);
+
+ /*
+ * Return false for both cases as we never skip the trapped
+ * instruction:
+ *
+ * - Either we re-execute the same key register access instruction
+ * after enabling ptrauth.
+ * - Or an UNDEF is injected as ptrauth is not supported/enabled.
+ */
+ return false;
+}
+
+static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN_USER | REG_HIDDEN_GUEST;
+}
+
+#define __PTRAUTH_KEY(k) \
+ { SYS_DESC(SYS_## k), trap_ptrauth, reset_unknown, k, \
+ .visibility = ptrauth_visibility}
+
+#define PTRAUTH_KEY(k) \
+ __PTRAUTH_KEY(k ## KEYLO_EL1), \
+ __PTRAUTH_KEY(k ## KEYHI_EL1)
+
static bool access_arch_timer(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
@@ -1044,25 +1078,20 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
}
/* Read a sanitised cpufeature ID register by sys_reg_desc */
-static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
+static u64 read_id_reg(const struct kvm_vcpu *vcpu,
+ struct sys_reg_desc const *r, bool raz)
{
u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
(u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
- if (id == SYS_ID_AA64PFR0_EL1) {
- if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT))
- kvm_debug("SVE unsupported for guests, suppressing\n");
-
+ if (id == SYS_ID_AA64PFR0_EL1 && !vcpu_has_sve(vcpu)) {
val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
- } else if (id == SYS_ID_AA64ISAR1_EL1) {
- const u64 ptrauth_mask = (0xfUL << ID_AA64ISAR1_APA_SHIFT) |
- (0xfUL << ID_AA64ISAR1_API_SHIFT) |
- (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
- (0xfUL << ID_AA64ISAR1_GPI_SHIFT);
- if (val & ptrauth_mask)
- kvm_debug("ptrauth unsupported for guests, suppressing\n");
- val &= ~ptrauth_mask;
+ } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
+ val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) |
+ (0xfUL << ID_AA64ISAR1_API_SHIFT) |
+ (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
+ (0xfUL << ID_AA64ISAR1_GPI_SHIFT));
}
return val;
@@ -1078,7 +1107,7 @@ static bool __access_id_reg(struct kvm_vcpu *vcpu,
if (p->is_write)
return write_to_read_only(vcpu, p, r);
- p->regval = read_id_reg(r, raz);
+ p->regval = read_id_reg(vcpu, r, raz);
return true;
}
@@ -1100,6 +1129,81 @@ static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
+/* Visibility overrides for SVE-specific control registers */
+static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (vcpu_has_sve(vcpu))
+ return 0;
+
+ return REG_HIDDEN_USER | REG_HIDDEN_GUEST;
+}
+
+/* Visibility overrides for SVE-specific ID registers */
+static unsigned int sve_id_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (vcpu_has_sve(vcpu))
+ return 0;
+
+ return REG_HIDDEN_USER;
+}
+
+/* Generate the emulated ID_AA64ZFR0_EL1 value exposed to the guest */
+static u64 guest_id_aa64zfr0_el1(const struct kvm_vcpu *vcpu)
+{
+ if (!vcpu_has_sve(vcpu))
+ return 0;
+
+ return read_sanitised_ftr_reg(SYS_ID_AA64ZFR0_EL1);
+}
+
+static bool access_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
+{
+ if (p->is_write)
+ return write_to_read_only(vcpu, p, rd);
+
+ p->regval = guest_id_aa64zfr0_el1(vcpu);
+ return true;
+}
+
+static int get_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ u64 val;
+
+ if (WARN_ON(!vcpu_has_sve(vcpu)))
+ return -ENOENT;
+
+ val = guest_id_aa64zfr0_el1(vcpu);
+ return reg_to_user(uaddr, &val, reg->id);
+}
+
+static int set_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ const u64 id = sys_reg_to_index(rd);
+ int err;
+ u64 val;
+
+ if (WARN_ON(!vcpu_has_sve(vcpu)))
+ return -ENOENT;
+
+ err = reg_from_user(&val, uaddr, id);
+ if (err)
+ return err;
+
+ /* This is what we mean by invariant: you can't change it. */
+ if (val != guest_id_aa64zfr0_el1(vcpu))
+ return -EINVAL;
+
+ return 0;
+}
+
/*
* cpufeature ID register user accessors
*
@@ -1107,16 +1211,18 @@ static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
* are stored, and for set_id_reg() we don't allow the effective value
* to be changed.
*/
-static int __get_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
+static int __get_id_reg(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd, void __user *uaddr,
bool raz)
{
const u64 id = sys_reg_to_index(rd);
- const u64 val = read_id_reg(rd, raz);
+ const u64 val = read_id_reg(vcpu, rd, raz);
return reg_to_user(uaddr, &val, id);
}
-static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
+static int __set_id_reg(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd, void __user *uaddr,
bool raz)
{
const u64 id = sys_reg_to_index(rd);
@@ -1128,7 +1234,7 @@ static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
return err;
/* This is what we mean by invariant: you can't change it. */
- if (val != read_id_reg(rd, raz))
+ if (val != read_id_reg(vcpu, rd, raz))
return -EINVAL;
return 0;
@@ -1137,25 +1243,25 @@ static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- return __get_id_reg(rd, uaddr, false);
+ return __get_id_reg(vcpu, rd, uaddr, false);
}
static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- return __set_id_reg(rd, uaddr, false);
+ return __set_id_reg(vcpu, rd, uaddr, false);
}
static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- return __get_id_reg(rd, uaddr, true);
+ return __get_id_reg(vcpu, rd, uaddr, true);
}
static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- return __set_id_reg(rd, uaddr, true);
+ return __set_id_reg(vcpu, rd, uaddr, true);
}
static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
@@ -1343,7 +1449,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_SANITISED(ID_AA64PFR1_EL1),
ID_UNALLOCATED(4,2),
ID_UNALLOCATED(4,3),
- ID_UNALLOCATED(4,4),
+ { SYS_DESC(SYS_ID_AA64ZFR0_EL1), access_id_aa64zfr0_el1, .get_user = get_id_aa64zfr0_el1, .set_user = set_id_aa64zfr0_el1, .visibility = sve_id_visibility },
ID_UNALLOCATED(4,5),
ID_UNALLOCATED(4,6),
ID_UNALLOCATED(4,7),
@@ -1380,10 +1486,17 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
+ { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
+ PTRAUTH_KEY(APIA),
+ PTRAUTH_KEY(APIB),
+ PTRAUTH_KEY(APDA),
+ PTRAUTH_KEY(APDB),
+ PTRAUTH_KEY(APGA),
+
{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
@@ -1924,6 +2037,12 @@ static void perform_access(struct kvm_vcpu *vcpu,
{
trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
+ /* Check for regs disabled by runtime config */
+ if (sysreg_hidden_from_guest(vcpu, r)) {
+ kvm_inject_undefined(vcpu);
+ return;
+ }
+
/*
* Not having an accessor means that we have configured a trap
* that we don't know how to handle. This certainly qualifies
@@ -2435,6 +2554,10 @@ int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
if (!r)
return get_invariant_sys_reg(reg->id, uaddr);
+ /* Check for regs disabled by runtime config */
+ if (sysreg_hidden_from_user(vcpu, r))
+ return -ENOENT;
+
if (r->get_user)
return (r->get_user)(vcpu, r, reg, uaddr);
@@ -2456,6 +2579,10 @@ int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
if (!r)
return set_invariant_sys_reg(reg->id, uaddr);
+ /* Check for regs disabled by runtime config */
+ if (sysreg_hidden_from_user(vcpu, r))
+ return -ENOENT;
+
if (r->set_user)
return (r->set_user)(vcpu, r, reg, uaddr);
@@ -2512,7 +2639,8 @@ static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
return true;
}
-static int walk_one_sys_reg(const struct sys_reg_desc *rd,
+static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd,
u64 __user **uind,
unsigned int *total)
{
@@ -2523,6 +2651,9 @@ static int walk_one_sys_reg(const struct sys_reg_desc *rd,
if (!(rd->reg || rd->get_user))
return 0;
+ if (sysreg_hidden_from_user(vcpu, rd))
+ return 0;
+
if (!copy_reg_to_user(rd, uind))
return -EFAULT;
@@ -2551,9 +2682,9 @@ static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
int cmp = cmp_sys_reg(i1, i2);
/* target-specific overrides generic entry. */
if (cmp <= 0)
- err = walk_one_sys_reg(i1, &uind, &total);
+ err = walk_one_sys_reg(vcpu, i1, &uind, &total);
else
- err = walk_one_sys_reg(i2, &uind, &total);
+ err = walk_one_sys_reg(vcpu, i2, &uind, &total);
if (err)
return err;
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index 3b1bc7f01d0b..2be99508dcb9 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -64,8 +64,15 @@ struct sys_reg_desc {
const struct kvm_one_reg *reg, void __user *uaddr);
int (*set_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr);
+
+ /* Return mask of REG_* runtime visibility overrides */
+ unsigned int (*visibility)(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd);
};
+#define REG_HIDDEN_USER (1 << 0) /* hidden from userspace ioctls */
+#define REG_HIDDEN_GUEST (1 << 1) /* hidden from guest */
+
static inline void print_sys_reg_instr(const struct sys_reg_params *p)
{
/* Look, we even formatted it for you to paste into the table! */
@@ -102,6 +109,24 @@ static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r
__vcpu_sys_reg(vcpu, r->reg) = r->val;
}
+static inline bool sysreg_hidden_from_guest(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *r)
+{
+ if (likely(!r->visibility))
+ return false;
+
+ return r->visibility(vcpu, r) & REG_HIDDEN_GUEST;
+}
+
+static inline bool sysreg_hidden_from_user(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *r)
+{
+ if (likely(!r->visibility))
+ return false;
+
+ return r->visibility(vcpu, r) & REG_HIDDEN_USER;
+}
+
static inline int cmp_sys_reg(const struct sys_reg_desc *i1,
const struct sys_reg_desc *i2)
{
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 5540a1638baf..33c2a4abda04 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -24,7 +24,7 @@ CFLAGS_atomic_ll_sc.o := -ffixed-x1 -ffixed-x2 \
-fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12 \
-fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \
-fcall-saved-x18 -fomit-frame-pointer
-CFLAGS_REMOVE_atomic_ll_sc.o := -pg
+CFLAGS_REMOVE_atomic_ll_sc.o := $(CC_FLAGS_FTRACE)
GCOV_PROFILE_atomic_ll_sc.o := n
KASAN_SANITIZE_atomic_ll_sc.o := n
KCOV_INSTRUMENT_atomic_ll_sc.o := n
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 1a7e92ab69eb..0cb0e09995e1 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -148,7 +148,7 @@ static inline bool is_ttbr1_addr(unsigned long addr)
/*
* Dump out the page tables associated with 'addr' in the currently active mm.
*/
-void show_pte(unsigned long addr)
+static void show_pte(unsigned long addr)
{
struct mm_struct *mm;
pgd_t *pgdp;
@@ -810,13 +810,12 @@ void __init hook_debug_fault_code(int nr,
debug_fault_info[nr].name = name;
}
-asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
- unsigned int esr,
- struct pt_regs *regs)
+asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
+ unsigned int esr,
+ struct pt_regs *regs)
{
const struct fault_info *inf = esr_to_debug_fault_info(esr);
unsigned long pc = instruction_pointer(regs);
- int rv;
/*
* Tell lockdep we disabled irqs in entry.S. Do nothing if they were
@@ -828,17 +827,12 @@ asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
if (user_mode(regs) && !is_ttbr0_addr(pc))
arm64_apply_bp_hardening();
- if (!inf->fn(addr_if_watchpoint, esr, regs)) {
- rv = 1;
- } else {
+ if (inf->fn(addr_if_watchpoint, esr, regs)) {
arm64_notify_die(inf->name, regs,
inf->sig, inf->code, (void __user *)pc, esr);
- rv = 0;
}
if (interrupts_enabled(regs))
trace_hardirqs_on();
-
- return rv;
}
NOKPROBE_SYMBOL(do_debug_exception);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 7cae155e81a5..d2adffb81b5d 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -48,7 +48,7 @@
#include <asm/numa.h>
#include <asm/sections.h>
#include <asm/setup.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/tlb.h>
#include <asm/alternative.h>
@@ -377,7 +377,7 @@ void __init arm64_memblock_init(void)
base + size > memblock_start_of_DRAM() +
linear_region_size,
"initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
- initrd_start = 0;
+ phys_initrd_size = 0;
} else {
memblock_remove(base, size); /* clear MEMBLOCK_ flags */
memblock_add(base, size);
@@ -440,6 +440,7 @@ void __init bootmem_init(void)
early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
max_pfn = max_low_pfn = max;
+ min_low_pfn = min;
arm64_numa_init();
/*
@@ -535,7 +536,7 @@ void __init mem_init(void)
else
swiotlb_force = SWIOTLB_NO_FORCE;
- set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
+ set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
#ifndef CONFIG_SPARSEMEM_VMEMMAP
free_unused_memmap();
@@ -577,24 +578,11 @@ void free_initmem(void)
}
#ifdef CONFIG_BLK_DEV_INITRD
-
-static int keep_initrd __initdata;
-
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
- if (!keep_initrd) {
- free_reserved_area((void *)start, (void *)end, 0, "initrd");
- memblock_free(__virt_to_phys(start), end - start);
- }
-}
-
-static int __init keepinitrd_setup(char *__unused)
-{
- keep_initrd = 1;
- return 1;
+ free_reserved_area((void *)start, (void *)end, 0, "initrd");
+ memblock_free(__virt_to_phys(start), end - start);
}
-
-__setup("keepinitrd", keepinitrd_setup);
#endif
/*
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index e97f018ff740..a170c6369a68 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -40,7 +40,7 @@
#include <asm/kernel-pgtable.h>
#include <asm/sections.h>
#include <asm/setup.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
#include <asm/ptdump.h>
@@ -97,7 +97,7 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
}
EXPORT_SYMBOL(phys_mem_access_prot);
-static phys_addr_t __init early_pgtable_alloc(void)
+static phys_addr_t __init early_pgtable_alloc(int shift)
{
phys_addr_t phys;
void *ptr;
@@ -174,7 +174,7 @@ static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
unsigned long end, phys_addr_t phys,
pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void),
+ phys_addr_t (*pgtable_alloc)(int),
int flags)
{
unsigned long next;
@@ -184,7 +184,7 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
if (pmd_none(pmd)) {
phys_addr_t pte_phys;
BUG_ON(!pgtable_alloc);
- pte_phys = pgtable_alloc();
+ pte_phys = pgtable_alloc(PAGE_SHIFT);
__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
pmd = READ_ONCE(*pmdp);
}
@@ -208,7 +208,7 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void), int flags)
+ phys_addr_t (*pgtable_alloc)(int), int flags)
{
unsigned long next;
pmd_t *pmdp;
@@ -246,7 +246,7 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
unsigned long end, phys_addr_t phys,
pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void), int flags)
+ phys_addr_t (*pgtable_alloc)(int), int flags)
{
unsigned long next;
pud_t pud = READ_ONCE(*pudp);
@@ -258,7 +258,7 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
if (pud_none(pud)) {
phys_addr_t pmd_phys;
BUG_ON(!pgtable_alloc);
- pmd_phys = pgtable_alloc();
+ pmd_phys = pgtable_alloc(PMD_SHIFT);
__pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
pud = READ_ONCE(*pudp);
}
@@ -294,7 +294,7 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
phys_addr_t phys, pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void),
+ phys_addr_t (*pgtable_alloc)(int),
int flags)
{
unsigned long next;
@@ -304,7 +304,7 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
if (pgd_none(pgd)) {
phys_addr_t pud_phys;
BUG_ON(!pgtable_alloc);
- pud_phys = pgtable_alloc();
+ pud_phys = pgtable_alloc(PUD_SHIFT);
__pgd_populate(pgdp, pud_phys, PUD_TYPE_TABLE);
pgd = READ_ONCE(*pgdp);
}
@@ -345,7 +345,7 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot,
- phys_addr_t (*pgtable_alloc)(void),
+ phys_addr_t (*pgtable_alloc)(int),
int flags)
{
unsigned long addr, length, end, next;
@@ -371,17 +371,36 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
} while (pgdp++, addr = next, addr != end);
}
-static phys_addr_t pgd_pgtable_alloc(void)
+static phys_addr_t __pgd_pgtable_alloc(int shift)
{
void *ptr = (void *)__get_free_page(PGALLOC_GFP);
- if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
- BUG();
+ BUG_ON(!ptr);
/* Ensure the zeroed page is visible to the page table walker */
dsb(ishst);
return __pa(ptr);
}
+static phys_addr_t pgd_pgtable_alloc(int shift)
+{
+ phys_addr_t pa = __pgd_pgtable_alloc(shift);
+
+ /*
+ * Call proper page table ctor in case later we need to
+ * call core mm functions like apply_to_page_range() on
+ * this pre-allocated page table.
+ *
+ * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is
+ * folded, and if so pgtable_pmd_page_ctor() becomes nop.
+ */
+ if (shift == PAGE_SHIFT)
+ BUG_ON(!pgtable_page_ctor(phys_to_page(pa)));
+ else if (shift == PMD_SHIFT)
+ BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa)));
+
+ return pa;
+}
+
/*
* This function can only be used to modify existing table entries,
* without allocating new levels of table. Note that this permits the
@@ -583,7 +602,7 @@ static int __init map_entry_trampoline(void)
/* Map only the text into the trampoline page table */
memset(tramp_pg_dir, 0, PGD_SIZE);
__create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
- prot, pgd_pgtable_alloc, 0);
+ prot, __pgd_pgtable_alloc, 0);
/* Map both the text and data into the kernel page table */
__set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
@@ -1046,8 +1065,8 @@ int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
- bool want_memblock)
+int arch_add_memory(int nid, u64 start, u64 size,
+ struct mhp_restrictions *restrictions)
{
int flags = 0;
@@ -1055,9 +1074,9 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
- size, PAGE_KERNEL, pgd_pgtable_alloc, flags);
+ size, PAGE_KERNEL, __pgd_pgtable_alloc, flags);
return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
- altmap, want_memblock);
+ restrictions);
}
#endif
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index 06a6f264f2dd..5202f63c29c9 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -124,7 +124,7 @@ static void __init setup_node_to_cpumask_map(void)
}
/*
- * Set the cpu to node and mem mapping
+ * Set the cpu to node and mem mapping
*/
void numa_store_cpu_info(unsigned int cpu)
{
@@ -200,7 +200,7 @@ void __init setup_per_cpu_areas(void)
#endif
/**
- * numa_add_memblk - Set node id to memblk
+ * numa_add_memblk() - Set node id to memblk
* @nid: NUMA node ID of the new memblk
* @start: Start address of the new memblk
* @end: End address of the new memblk
@@ -223,7 +223,7 @@ int __init numa_add_memblk(int nid, u64 start, u64 end)
return ret;
}
-/**
+/*
* Initialize NODE_DATA for a node on the local memory
*/
static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
@@ -257,7 +257,7 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
}
-/**
+/*
* numa_free_distance
*
* The current table is freed.
@@ -277,10 +277,8 @@ void __init numa_free_distance(void)
numa_distance = NULL;
}
-/**
- *
+/*
* Create a new NUMA distance table.
- *
*/
static int __init numa_alloc_distance(void)
{
@@ -311,7 +309,7 @@ static int __init numa_alloc_distance(void)
}
/**
- * numa_set_distance - Set inter node NUMA distance from node to node.
+ * numa_set_distance() - Set inter node NUMA distance from node to node.
* @from: the 'from' node to set distance
* @to: the 'to' node to set distance
* @distance: NUMA distance
@@ -321,7 +319,6 @@ static int __init numa_alloc_distance(void)
*
* If @from or @to is higher than the highest known node or lower than zero
* or @distance doesn't make sense, the call is ignored.
- *
*/
void __init numa_set_distance(int from, int to, int distance)
{
@@ -347,7 +344,7 @@ void __init numa_set_distance(int from, int to, int distance)
numa_distance[from * numa_distance_cnt + to] = distance;
}
-/**
+/*
* Return NUMA distance @from to @to
*/
int __node_distance(int from, int to)
@@ -422,13 +419,15 @@ out_free_distance:
}
/**
- * dummy_numa_init - Fallback dummy NUMA init
+ * dummy_numa_init() - Fallback dummy NUMA init
*
* Used if there's no underlying NUMA architecture, NUMA initialization
* fails, or NUMA is disabled on the command line.
*
* Must online at least one node (node 0) and add memory blocks that cover all
* allowed memory. It is unlikely that this function fails.
+ *
+ * Return: 0 on success, -errno on failure.
*/
static int __init dummy_numa_init(void)
{
@@ -454,9 +453,9 @@ static int __init dummy_numa_init(void)
}
/**
- * arm64_numa_init - Initialize NUMA
+ * arm64_numa_init() - Initialize NUMA
*
- * Try each configured NUMA initialization method until one succeeds. The
+ * Try each configured NUMA initialization method until one succeeds. The
* last fallback is dummy single node config encomapssing whole memory.
*/
void __init arm64_numa_init(void)
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index aa0817c9c4c3..fdd626d34274 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -65,24 +65,25 @@ ENTRY(cpu_do_suspend)
mrs x2, tpidr_el0
mrs x3, tpidrro_el0
mrs x4, contextidr_el1
- mrs x5, cpacr_el1
- mrs x6, tcr_el1
- mrs x7, vbar_el1
- mrs x8, mdscr_el1
- mrs x9, oslsr_el1
- mrs x10, sctlr_el1
+ mrs x5, osdlr_el1
+ mrs x6, cpacr_el1
+ mrs x7, tcr_el1
+ mrs x8, vbar_el1
+ mrs x9, mdscr_el1
+ mrs x10, oslsr_el1
+ mrs x11, sctlr_el1
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
- mrs x11, tpidr_el1
+ mrs x12, tpidr_el1
alternative_else
- mrs x11, tpidr_el2
+ mrs x12, tpidr_el2
alternative_endif
- mrs x12, sp_el0
+ mrs x13, sp_el0
stp x2, x3, [x0]
- stp x4, xzr, [x0, #16]
- stp x5, x6, [x0, #32]
- stp x7, x8, [x0, #48]
- stp x9, x10, [x0, #64]
- stp x11, x12, [x0, #80]
+ stp x4, x5, [x0, #16]
+ stp x6, x7, [x0, #32]
+ stp x8, x9, [x0, #48]
+ stp x10, x11, [x0, #64]
+ stp x12, x13, [x0, #80]
ret
ENDPROC(cpu_do_suspend)
@@ -105,8 +106,8 @@ ENTRY(cpu_do_resume)
msr cpacr_el1, x6
/* Don't change t0sz here, mask those bits when restoring */
- mrs x5, tcr_el1
- bfi x8, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
+ mrs x7, tcr_el1
+ bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
msr tcr_el1, x8
msr vbar_el1, x9
@@ -130,6 +131,7 @@ alternative_endif
/*
* Restore oslsr_el1 by writing oslar_el1
*/
+ msr osdlr_el1, x5
ubfx x11, x11, #1, #1
msr oslar_el1, x11
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
index 783de51a6c4e..76606e87233f 100644
--- a/arch/arm64/net/bpf_jit.h
+++ b/arch/arm64/net/bpf_jit.h
@@ -100,11 +100,9 @@
#define A64_STXR(sf, Rt, Rn, Rs) \
A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
-/* Prefetch */
-#define A64_PRFM(Rn, type, target, policy) \
- aarch64_insn_gen_prefetch(Rn, AARCH64_INSN_PRFM_TYPE_##type, \
- AARCH64_INSN_PRFM_TARGET_##target, \
- AARCH64_INSN_PRFM_POLICY_##policy)
+/* LSE atomics */
+#define A64_STADD(sf, Rn, Rs) \
+ aarch64_insn_gen_stadd(Rn, Rs, A64_SIZE(sf))
/* Add/subtract (immediate) */
#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index aaddc0217e73..df845cee438e 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -365,7 +365,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
const bool is64 = BPF_CLASS(code) == BPF_ALU64 ||
BPF_CLASS(code) == BPF_JMP;
const bool isdw = BPF_SIZE(code) == BPF_DW;
- u8 jmp_cond;
+ u8 jmp_cond, reg;
s32 jmp_offset;
#define check_imm(bits, imm) do { \
@@ -756,19 +756,28 @@ emit_cond_jmp:
break;
}
break;
+
/* STX XADD: lock *(u32 *)(dst + off) += src */
case BPF_STX | BPF_XADD | BPF_W:
/* STX XADD: lock *(u64 *)(dst + off) += src */
case BPF_STX | BPF_XADD | BPF_DW:
- emit_a64_mov_i(1, tmp, off, ctx);
- emit(A64_ADD(1, tmp, tmp, dst), ctx);
- emit(A64_PRFM(tmp, PST, L1, STRM), ctx);
- emit(A64_LDXR(isdw, tmp2, tmp), ctx);
- emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
- emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx);
- jmp_offset = -3;
- check_imm19(jmp_offset);
- emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
+ if (!off) {
+ reg = dst;
+ } else {
+ emit_a64_mov_i(1, tmp, off, ctx);
+ emit(A64_ADD(1, tmp, tmp, dst), ctx);
+ reg = tmp;
+ }
+ if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) {
+ emit(A64_STADD(isdw, reg, src), ctx);
+ } else {
+ emit(A64_LDXR(isdw, tmp2, reg), ctx);
+ emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
+ emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx);
+ jmp_offset = -3;
+ check_imm19(jmp_offset);
+ emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
+ }
break;
default:
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index e5cd3c5f8399..eeb0471268a0 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -20,6 +20,7 @@ config C6X
select GENERIC_CLOCKEVENTS
select MODULES_USE_ELF_RELA
select ARCH_NO_COHERENT_DMA_MMAP
+ select MMU_GATHER_NO_RANGE if MMU
config MMU
def_bool n
@@ -27,9 +28,6 @@ config MMU
config FPU
def_bool n
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
config GENERIC_CALIBRATE_DELAY
def_bool y
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index 249c9f6f26dc..2162eb32dcec 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -23,13 +23,13 @@ generic-y += kvm_para.h
generic-y += local.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += mmu.h
generic-y += mmu_context.h
generic-y += pci.h
generic-y += percpu.h
generic-y += pgalloc.h
generic-y += preempt.h
-generic-y += segment.h
generic-y += serial.h
generic-y += shmparam.h
generic-y += tlbflush.h
diff --git a/arch/c6x/include/asm/syscall.h b/arch/c6x/include/asm/syscall.h
index 15ba8599858e..5bcdcb651b19 100644
--- a/arch/c6x/include/asm/syscall.h
+++ b/arch/c6x/include/asm/syscall.h
@@ -11,6 +11,7 @@
#ifndef __ASM_C6X_SYSCALL_H
#define __ASM_C6X_SYSCALL_H
+#include <uapi/linux/audit.h>
#include <linux/err.h>
#include <linux/sched.h>
@@ -69,4 +70,10 @@ static inline void syscall_set_arguments(struct task_struct *task,
regs->a9 = *args;
}
+static inline int syscall_get_arch(struct task_struct *task)
+{
+ return IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
+ ? AUDIT_ARCH_C6XBE : AUDIT_ARCH_C6X;
+}
+
#endif /* __ASM_C6X_SYSCALLS_H */
diff --git a/arch/c6x/include/asm/tlb.h b/arch/c6x/include/asm/tlb.h
index 34525dea1356..240ba0febb57 100644
--- a/arch/c6x/include/asm/tlb.h
+++ b/arch/c6x/include/asm/tlb.h
@@ -2,8 +2,6 @@
#ifndef _ASM_C6X_TLB_H
#define _ASM_C6X_TLB_H
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
#include <asm-generic/tlb.h>
#endif /* _ASM_C6X_TLB_H */
diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c
index fe582c3a1794..573242b160e1 100644
--- a/arch/c6x/mm/init.c
+++ b/arch/c6x/mm/init.c
@@ -68,15 +68,3 @@ void __init mem_init(void)
mem_init_print_info(NULL);
}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void __init free_initrd_mem(unsigned long start, unsigned long end)
-{
- free_reserved_area((void *)start, (void *)end, -1, "initrd");
-}
-#endif
-
-void __init free_initmem(void)
-{
- free_initmem_default(-1);
-}
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index 725a115759c9..ce0799077f3b 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -1,6 +1,7 @@
config CSKY
def_bool y
select ARCH_32BIT_OFF_T
+ select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_USE_BUILTIN_BSWAP
@@ -29,15 +30,20 @@ config CSKY
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
select HAVE_ARCH_TRACEHOOK
+ select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO
select HAVE_KERNEL_LZMA
select HAVE_PERF_EVENTS
- select HAVE_C_RECORDMCOUNT
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
select HAVE_DMA_API_DEBUG
select HAVE_DMA_CONTIGUOUS
+ select HAVE_SYSCALL_TRACEPOINTS
select MAY_HAVE_SPARSE_IRQ
select MODULES_USE_ELF_RELA if MODULES
select OF
@@ -92,9 +98,6 @@ config GENERIC_HWEIGHT
config MMU
def_bool y
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
config STACKTRACE_SUPPORT
def_bool y
diff --git a/arch/csky/Makefile b/arch/csky/Makefile
index 3607a6e8f66c..6b87f6c22ad6 100644
--- a/arch/csky/Makefile
+++ b/arch/csky/Makefile
@@ -36,7 +36,7 @@ endif
ifneq ($(CSKYABI),)
MCPU_STR = $(CPUTYPE)$(FPUEXT)$(VDSPEXT)$(TEEEXT)
-KBUILD_CFLAGS += -mcpu=$(MCPU_STR)
+KBUILD_CFLAGS += -mcpu=$(CPUTYPE) -Wa,-mcpu=$(MCPU_STR)
KBUILD_CFLAGS += -DCSKYCPU_DEF_NAME=\"$(MCPU_STR)\"
KBUILD_CFLAGS += -msoft-float -mdiv
KBUILD_CFLAGS += -fno-tree-vectorize
diff --git a/arch/csky/abiv1/inc/abi/ckmmu.h b/arch/csky/abiv1/inc/abi/ckmmu.h
index 3a002017bebe..81f37715c0d2 100644
--- a/arch/csky/abiv1/inc/abi/ckmmu.h
+++ b/arch/csky/abiv1/inc/abi/ckmmu.h
@@ -40,6 +40,26 @@ static inline void write_mmu_entryhi(int value)
cpwcr("cpcr4", value);
}
+static inline unsigned long read_mmu_msa0(void)
+{
+ return cprcr("cpcr30");
+}
+
+static inline void write_mmu_msa0(unsigned long value)
+{
+ cpwcr("cpcr30", value);
+}
+
+static inline unsigned long read_mmu_msa1(void)
+{
+ return cprcr("cpcr31");
+}
+
+static inline void write_mmu_msa1(unsigned long value)
+{
+ cpwcr("cpcr31", value);
+}
+
/*
* TLB operations.
*/
@@ -65,11 +85,11 @@ static inline void tlb_invalid_indexed(void)
static inline void setup_pgd(unsigned long pgd, bool kernel)
{
- cpwcr("cpcr29", pgd);
+ cpwcr("cpcr29", pgd | BIT(0));
}
static inline unsigned long get_pgd(void)
{
- return cprcr("cpcr29");
+ return cprcr("cpcr29") & ~BIT(0);
}
#endif /* __ASM_CSKY_CKMMUV1_H */
diff --git a/arch/csky/abiv1/inc/abi/entry.h b/arch/csky/abiv1/inc/abi/entry.h
index 3f3faab3d747..7ab78bd0f3b1 100644
--- a/arch/csky/abiv1/inc/abi/entry.h
+++ b/arch/csky/abiv1/inc/abi/entry.h
@@ -16,9 +16,6 @@
#define LSAVE_A4 40
#define LSAVE_A5 44
-#define EPC_INCREASE 2
-#define EPC_KEEP 0
-
.macro USPTOKSP
mtcr sp, ss1
mfcr sp, ss0
@@ -29,10 +26,6 @@
mfcr sp, ss1
.endm
-.macro INCTRAP rx
- addi \rx, EPC_INCREASE
-.endm
-
.macro SAVE_ALL epc_inc
mtcr r13, ss2
mfcr r13, epsr
@@ -150,11 +143,35 @@
cpwcr \rx, cpcr8
.endm
-.macro SETUP_MMU rx
- lrw \rx, PHYS_OFFSET | 0xe
- cpwcr \rx, cpcr30
- lrw \rx, (PHYS_OFFSET + 0x20000000) | 0xe
- cpwcr \rx, cpcr31
+.macro SETUP_MMU
+ /* Init psr and enable ee */
+ lrw r6, DEFAULT_PSR_VALUE
+ mtcr r6, psr
+ psrset ee
+
+ /* Select MMU as co-processor */
+ cpseti cp15
+
+ /*
+ * cpcr30 format:
+ * 31 - 29 | 28 - 4 | 3 | 2 | 1 | 0
+ * BA Reserved C D V
+ */
+ cprcr r6, cpcr30
+ lsri r6, 28
+ lsli r6, 28
+ addi r6, 0xe
+ cpwcr r6, cpcr30
+
+ lsri r6, 28
+ addi r6, 2
+ lsli r6, 28
+ addi r6, 0xe
+ cpwcr r6, cpcr31
.endm
+.macro ANDI_R3 rx, imm
+ lsri \rx, 3
+ andi \rx, (\imm >> 3)
+.endm
#endif /* __ASM_CSKY_ENTRY_H */
diff --git a/arch/csky/abiv1/inc/abi/regdef.h b/arch/csky/abiv1/inc/abi/regdef.h
index 876689291b71..104707fbdcc1 100644
--- a/arch/csky/abiv1/inc/abi/regdef.h
+++ b/arch/csky/abiv1/inc/abi/regdef.h
@@ -5,9 +5,8 @@
#define __ASM_CSKY_REGDEF_H
#define syscallid r1
-#define r11_sig r11
-
#define regs_syscallid(regs) regs->regs[9]
+#define regs_fp(regs) regs->regs[2]
/*
* PSR format:
@@ -23,4 +22,6 @@
#define SYSTRACE_SAVENUM 2
+#define TRAP0_SIZE 2
+
#endif /* __ASM_CSKY_REGDEF_H */
diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c
index d22c95ffc74d..5bb887b275e1 100644
--- a/arch/csky/abiv2/cacheflush.c
+++ b/arch/csky/abiv2/cacheflush.c
@@ -34,10 +34,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
{
unsigned long addr, pfn;
struct page *page;
- void *va;
-
- if (!(vma->vm_flags & VM_EXEC))
- return;
pfn = pte_pfn(*pte);
if (unlikely(!pfn_valid(pfn)))
@@ -47,14 +43,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
if (page == ZERO_PAGE(0))
return;
- va = page_address(page);
- addr = (unsigned long) va;
-
- if (va == NULL && PageHighMem(page))
- addr = (unsigned long) kmap_atomic(page);
+ addr = (unsigned long) kmap_atomic(page);
cache_wbinv_range(addr, addr + PAGE_SIZE);
- if (va == NULL && PageHighMem(page))
- kunmap_atomic((void *) addr);
+ kunmap_atomic((void *) addr);
}
diff --git a/arch/csky/abiv2/inc/abi/ckmmu.h b/arch/csky/abiv2/inc/abi/ckmmu.h
index 97230ad9427c..e4480e6bc3b3 100644
--- a/arch/csky/abiv2/inc/abi/ckmmu.h
+++ b/arch/csky/abiv2/inc/abi/ckmmu.h
@@ -42,6 +42,26 @@ static inline void write_mmu_entryhi(int value)
mtcr("cr<4, 15>", value);
}
+static inline unsigned long read_mmu_msa0(void)
+{
+ return mfcr("cr<30, 15>");
+}
+
+static inline void write_mmu_msa0(unsigned long value)
+{
+ mtcr("cr<30, 15>", value);
+}
+
+static inline unsigned long read_mmu_msa1(void)
+{
+ return mfcr("cr<31, 15>");
+}
+
+static inline void write_mmu_msa1(unsigned long value)
+{
+ mtcr("cr<31, 15>", value);
+}
+
/*
* TLB operations.
*/
@@ -70,18 +90,16 @@ static inline void tlb_invalid_indexed(void)
mtcr("cr<8, 15>", 0x02000000);
}
-/* setup hardrefil pgd */
-static inline unsigned long get_pgd(void)
-{
- return mfcr("cr<29, 15>");
-}
-
static inline void setup_pgd(unsigned long pgd, bool kernel)
{
if (kernel)
- mtcr("cr<28, 15>", pgd);
+ mtcr("cr<28, 15>", pgd | BIT(0));
else
- mtcr("cr<29, 15>", pgd);
+ mtcr("cr<29, 15>", pgd | BIT(0));
}
+static inline unsigned long get_pgd(void)
+{
+ return mfcr("cr<29, 15>") & ~BIT(0);
+}
#endif /* __ASM_CSKY_CKMMUV2_H */
diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h
index edc5cc04c4de..9897a16b45e5 100644
--- a/arch/csky/abiv2/inc/abi/entry.h
+++ b/arch/csky/abiv2/inc/abi/entry.h
@@ -14,18 +14,11 @@
#define LSAVE_A2 32
#define LSAVE_A3 36
-#define EPC_INCREASE 4
-#define EPC_KEEP 0
-
#define KSPTOUSP
#define USPTOKSP
#define usp cr<14, 1>
-.macro INCTRAP rx
- addi \rx, EPC_INCREASE
-.endm
-
.macro SAVE_ALL epc_inc
subi sp, 152
stw tls, (sp, 0)
@@ -169,10 +162,80 @@
mtcr \rx, cr<8, 15>
.endm
-.macro SETUP_MMU rx
- lrw \rx, PHYS_OFFSET | 0xe
- mtcr \rx, cr<30, 15>
- lrw \rx, (PHYS_OFFSET + 0x20000000) | 0xe
- mtcr \rx, cr<31, 15>
+.macro SETUP_MMU
+ /* Init psr and enable ee */
+ lrw r6, DEFAULT_PSR_VALUE
+ mtcr r6, psr
+ psrset ee
+
+ /* Invalid I/Dcache BTB BHT */
+ movi r6, 7
+ lsli r6, 16
+ addi r6, (1<<4) | 3
+ mtcr r6, cr17
+
+ /* Invalid all TLB */
+ bgeni r6, 26
+ mtcr r6, cr<8, 15> /* Set MCIR */
+
+ /* Check MMU on/off */
+ mfcr r6, cr18
+ btsti r6, 0
+ bt 1f
+
+ /* MMU off: setup mapping tlb entry */
+ movi r6, 0
+ mtcr r6, cr<6, 15> /* Set MPR with 4K page size */
+
+ grs r6, 1f /* Get current pa by PC */
+ bmaski r7, (PAGE_SHIFT + 1) /* r7 = 0x1fff */
+ andn r6, r7
+ mtcr r6, cr<4, 15> /* Set MEH */
+
+ mov r8, r6
+ movi r7, 0x00000006
+ or r8, r7
+ mtcr r8, cr<2, 15> /* Set MEL0 */
+ movi r7, 0x00001006
+ or r8, r7
+ mtcr r8, cr<3, 15> /* Set MEL1 */
+
+ bgeni r8, 28
+ mtcr r8, cr<8, 15> /* Set MCIR to write TLB */
+
+ br 2f
+1:
+ /*
+ * MMU on: use origin MSA value from bootloader
+ *
+ * cr<30/31, 15> MSA register format:
+ * 31 - 29 | 28 - 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * BA Reserved SH WA B SO SEC C D V
+ */
+ mfcr r6, cr<30, 15> /* Get MSA0 */
+2:
+ lsri r6, 28
+ lsli r6, 28
+ addi r6, 0x1ce
+ mtcr r6, cr<30, 15> /* Set MSA0 */
+
+ lsri r6, 28
+ addi r6, 2
+ lsli r6, 28
+ addi r6, 0x1ce
+ mtcr r6, cr<31, 15> /* Set MSA1 */
+
+ /* enable MMU */
+ mfcr r6, cr18
+ bseti r6, 0
+ mtcr r6, cr18
+
+ jmpi 3f /* jump to va */
+3:
+.endm
+
+.macro ANDI_R3 rx, imm
+ lsri \rx, 3
+ andi \rx, (\imm >> 3)
.endm
#endif /* __ASM_CSKY_ENTRY_H */
diff --git a/arch/csky/abiv2/inc/abi/regdef.h b/arch/csky/abiv2/inc/abi/regdef.h
index c72abb781bdc..d7328bbc1ce7 100644
--- a/arch/csky/abiv2/inc/abi/regdef.h
+++ b/arch/csky/abiv2/inc/abi/regdef.h
@@ -5,9 +5,8 @@
#define __ASM_CSKY_REGDEF_H
#define syscallid r7
-#define r11_sig r11
-
#define regs_syscallid(regs) regs->regs[3]
+#define regs_fp(regs) regs->regs[4]
/*
* PSR format:
@@ -23,4 +22,6 @@
#define SYSTRACE_SAVENUM 5
+#define TRAP0_SIZE 4
+
#endif /* __ASM_CSKY_REGDEF_H */
diff --git a/arch/csky/abiv2/mcount.S b/arch/csky/abiv2/mcount.S
index c633379956f5..326402e65f9e 100644
--- a/arch/csky/abiv2/mcount.S
+++ b/arch/csky/abiv2/mcount.S
@@ -61,10 +61,17 @@
addi sp, 16
.endm
+.macro nop32_stub
+ nop32
+ nop32
+ nop32
+.endm
+
ENTRY(ftrace_stub)
jmp lr
END(ftrace_stub)
+#ifndef CONFIG_DYNAMIC_FTRACE
ENTRY(_mcount)
mcount_enter
@@ -76,7 +83,7 @@ ENTRY(_mcount)
bf skip_ftrace
mov a0, lr
- subi a0, MCOUNT_INSN_SIZE
+ subi a0, 4
ldw a1, (sp, 24)
jsr r26
@@ -101,13 +108,41 @@ skip_ftrace:
mcount_exit
#endif
END(_mcount)
+#else /* CONFIG_DYNAMIC_FTRACE */
+ENTRY(_mcount)
+ mov t1, lr
+ ldw lr, (sp, 0)
+ addi sp, 4
+ jmp t1
+ENDPROC(_mcount)
+
+ENTRY(ftrace_caller)
+ mcount_enter
+
+ ldw a0, (sp, 16)
+ subi a0, 4
+ ldw a1, (sp, 24)
+
+ nop
+GLOBAL(ftrace_call)
+ nop32_stub
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ nop
+GLOBAL(ftrace_graph_call)
+ nop32_stub
+#endif
+
+ mcount_exit
+ENDPROC(ftrace_caller)
+#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
mov a0, sp
addi a0, 24
ldw a1, (sp, 16)
- subi a1, MCOUNT_INSN_SIZE
+ subi a1, 4
mov a2, r8
lrw r26, prepare_ftrace_return
jsr r26
diff --git a/arch/csky/abiv2/memmove.S b/arch/csky/abiv2/memmove.S
index b0c42ecf1889..5721e73ad3d8 100644
--- a/arch/csky/abiv2/memmove.S
+++ b/arch/csky/abiv2/memmove.S
@@ -35,11 +35,7 @@ ENTRY(memmove)
.L_len_larger_16bytes:
subi r1, 16
subi r0, 16
-#if defined(__CSKY_VDSPV2__)
- vldx.8 vr0, (r1), r19
- PRE_BNEZAD (r18)
- vstx.8 vr0, (r0), r19
-#elif defined(__CK860__)
+#if defined(__CK860__)
ldw r3, (r1, 12)
stw r3, (r0, 12)
ldw r3, (r1, 8)
diff --git a/arch/csky/boot/dts/include/dt-bindings b/arch/csky/boot/dts/include/dt-bindings
deleted file mode 120000
index 08c00e4972fa..000000000000
--- a/arch/csky/boot/dts/include/dt-bindings
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../include/dt-bindings \ No newline at end of file
diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild
index 2a0abe8f2a35..c1a6c0f31150 100644
--- a/arch/csky/include/asm/Kbuild
+++ b/arch/csky/include/asm/Kbuild
@@ -1,6 +1,5 @@
generic-y += asm-offsets.h
generic-y += bugs.h
-generic-y += clkdev.h
generic-y += compat.h
generic-y += current.h
generic-y += delay.h
@@ -12,7 +11,6 @@ generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += fb.h
-generic-y += ftrace.h
generic-y += futex.h
generic-y += gpio.h
generic-y += hardirq.h
@@ -28,16 +26,14 @@ generic-y += linkage.h
generic-y += local.h
generic-y += local64.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += module.h
-generic-y += mutex.h
generic-y += pci.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += qrwlock.h
-generic-y += scatterlist.h
generic-y += sections.h
generic-y += serial.h
-generic-y += shm.h
generic-y += timex.h
generic-y += topology.h
generic-y += trace_clock.h
diff --git a/arch/csky/include/asm/ftrace.h b/arch/csky/include/asm/ftrace.h
index 7547c45312a8..ba35d93ecda2 100644
--- a/arch/csky/include/asm/ftrace.h
+++ b/arch/csky/include/asm/ftrace.h
@@ -4,10 +4,26 @@
#ifndef __ASM_CSKY_FTRACE_H
#define __ASM_CSKY_FTRACE_H
-#define MCOUNT_INSN_SIZE 4
+#define MCOUNT_INSN_SIZE 14
#define HAVE_FUNCTION_GRAPH_FP_TEST
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
+#define MCOUNT_ADDR ((unsigned long)_mcount)
+
+#ifndef __ASSEMBLY__
+
+extern void _mcount(unsigned long);
+
+extern void ftrace_graph_call(void);
+
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ return addr;
+}
+
+struct dyn_arch_ftrace {
+};
+#endif /* !__ASSEMBLY__ */
#endif /* __ASM_CSKY_FTRACE_H */
diff --git a/arch/csky/include/asm/mmu_context.h b/arch/csky/include/asm/mmu_context.h
index b2905c0485a7..734db3a122e1 100644
--- a/arch/csky/include/asm/mmu_context.h
+++ b/arch/csky/include/asm/mmu_context.h
@@ -14,23 +14,10 @@
#include <linux/sched.h>
#include <abi/ckmmu.h>
-static inline void tlbmiss_handler_setup_pgd(unsigned long pgd, bool kernel)
-{
- pgd -= PAGE_OFFSET;
- pgd += PHYS_OFFSET;
- pgd |= 1;
- setup_pgd(pgd, kernel);
-}
-
#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
- tlbmiss_handler_setup_pgd((unsigned long)pgd, 0)
+ setup_pgd(__pa(pgd), false)
#define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \
- tlbmiss_handler_setup_pgd((unsigned long)pgd, 1)
-
-static inline unsigned long tlb_get_pgd(void)
-{
- return ((get_pgd() - PHYS_OFFSET) & ~1) + PAGE_OFFSET;
-}
+ setup_pgd(__pa(pgd), true)
#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
diff --git a/arch/csky/include/asm/page.h b/arch/csky/include/asm/page.h
index 73cf2bd66a13..9738eacefdc7 100644
--- a/arch/csky/include/asm/page.h
+++ b/arch/csky/include/asm/page.h
@@ -8,7 +8,7 @@
#include <linux/const.h>
/*
- * PAGE_SHIFT determines the page size
+ * PAGE_SHIFT determines the page size: 4KB
*/
#define PAGE_SHIFT 12
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
@@ -17,12 +17,18 @@
#define THREAD_MASK (~(THREAD_SIZE - 1))
#define THREAD_SHIFT (PAGE_SHIFT + 1)
+
/*
- * NOTE: virtual isn't really correct, actually it should be the offset into the
- * memory node, but we have no highmem, so that works for now.
- * TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots
- * of the shifts unnecessary.
+ * For C-SKY "User-space:Kernel-space" is "2GB:2GB" fixed by hardware and there
+ * are two segment registers (MSA0 + MSA1) to mapping 512MB + 512MB physical
+ * address region. We use them mapping kernel 1GB direct-map address area and
+ * for more than 1GB of memory we use highmem.
*/
+#define PAGE_OFFSET 0x80000000
+#define SSEG_SIZE 0x20000000
+#define LOWMEM_LIMIT (SSEG_SIZE * 2)
+
+#define PHYS_OFFSET_OFFSET (CONFIG_RAM_BASE & (SSEG_SIZE - 1))
#ifndef __ASSEMBLY__
@@ -50,9 +56,6 @@ struct page;
struct vm_area_struct;
-/*
- * These are used to make use of C type-checking..
- */
typedef struct { unsigned long pte_low; } pte_t;
#define pte_val(x) ((x).pte_low)
@@ -69,18 +72,13 @@ typedef struct page *pgtable_t;
#define __pgd(x) ((pgd_t) { (x) })
#define __pgprot(x) ((pgprot_t) { (x) })
-#endif /* !__ASSEMBLY__ */
+extern unsigned long va_pa_offset;
-#define PHYS_OFFSET (CONFIG_RAM_BASE & ~(LOWMEM_LIMIT - 1))
-#define PHYS_OFFSET_OFFSET (CONFIG_RAM_BASE & (LOWMEM_LIMIT - 1))
-#define ARCH_PFN_OFFSET PFN_DOWN(CONFIG_RAM_BASE)
+#define ARCH_PFN_OFFSET PFN_DOWN(va_pa_offset + PHYS_OFFSET_OFFSET)
-#define PAGE_OFFSET 0x80000000
-#define LOWMEM_LIMIT 0x40000000
+#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + va_pa_offset)
+#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - va_pa_offset))
-#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
-#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - \
- PHYS_OFFSET))
#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
#define MAP_NR(x) PFN_DOWN((unsigned long)(x) - PAGE_OFFSET - \
@@ -90,15 +88,10 @@ typedef struct page *pgtable_t;
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-/*
- * main RAM and kernel working space are coincident at 0x80000000, but to make
- * life more interesting, there's also an uncached virtual shadow at 0xb0000000
- * - these mappings are fixed in the MMU
- */
-
#define pfn_to_kaddr(x) __va(PFN_PHYS(x))
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
+#endif /* !__ASSEMBLY__ */
#endif /* __ASM_CSKY_PAGE_H */
diff --git a/arch/csky/include/asm/perf_event.h b/arch/csky/include/asm/perf_event.h
index ea8193122294..572093e11001 100644
--- a/arch/csky/include/asm/perf_event.h
+++ b/arch/csky/include/asm/perf_event.h
@@ -4,4 +4,12 @@
#ifndef __ASM_CSKY_PERF_EVENT_H
#define __ASM_CSKY_PERF_EVENT_H
+#include <abi/regdef.h>
+
+#define perf_arch_fetch_caller_regs(regs, __ip) { \
+ (regs)->pc = (__ip); \
+ regs_fp(regs) = (unsigned long) __builtin_frame_address(0); \
+ asm volatile("mov %0, sp\n":"=r"((regs)->usp)); \
+}
+
#endif /* __ASM_PERF_EVENT_ELF_H */
diff --git a/arch/csky/include/asm/ptrace.h b/arch/csky/include/asm/ptrace.h
new file mode 100644
index 000000000000..d0aba7b32417
--- /dev/null
+++ b/arch/csky/include/asm/ptrace.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef __ASM_CSKY_PTRACE_H
+#define __ASM_CSKY_PTRACE_H
+
+#include <uapi/asm/ptrace.h>
+#include <asm/traps.h>
+#include <linux/types.h>
+
+#ifndef __ASSEMBLY__
+
+#define PS_S 0x80000000 /* Supervisor Mode */
+
+#define arch_has_single_step() (1)
+#define current_pt_regs() \
+({ (struct pt_regs *)((char *)current_thread_info() + THREAD_SIZE) - 1; })
+
+#define user_stack_pointer(regs) ((regs)->usp)
+
+#define user_mode(regs) (!((regs)->sr & PS_S))
+#define instruction_pointer(regs) ((regs)->pc)
+#define profile_pc(regs) instruction_pointer(regs)
+
+static inline bool in_syscall(struct pt_regs const *regs)
+{
+ return ((regs->sr >> 16) & 0xff) == VEC_TRAP0;
+}
+
+static inline void forget_syscall(struct pt_regs *regs)
+{
+ regs->sr &= ~(0xff << 16);
+}
+
+static inline unsigned long regs_return_value(struct pt_regs *regs)
+{
+ return regs->a0;
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_CSKY_PTRACE_H */
diff --git a/arch/csky/include/asm/syscall.h b/arch/csky/include/asm/syscall.h
index bda0a446c63e..f624fa3bbc22 100644
--- a/arch/csky/include/asm/syscall.h
+++ b/arch/csky/include/asm/syscall.h
@@ -8,6 +8,8 @@
#include <abi/regdef.h>
#include <uapi/linux/audit.h>
+extern void *sys_call_table[];
+
static inline int
syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
{
@@ -15,6 +17,13 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
}
static inline void
+syscall_set_nr(struct task_struct *task, struct pt_regs *regs,
+ int sysno)
+{
+ regs_syscallid(regs) = sysno;
+}
+
+static inline void
syscall_rollback(struct task_struct *task, struct pt_regs *regs)
{
regs->a0 = regs->orig_a0;
@@ -60,7 +69,7 @@ syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
}
static inline int
-syscall_get_arch(void)
+syscall_get_arch(struct task_struct *task)
{
return AUDIT_ARCH_CSKY;
}
diff --git a/arch/csky/include/asm/thread_info.h b/arch/csky/include/asm/thread_info.h
index 0e9d035d712b..0b546a55a8bf 100644
--- a/arch/csky/include/asm/thread_info.h
+++ b/arch/csky/include/asm/thread_info.h
@@ -51,29 +51,26 @@ static inline struct thread_info *current_thread_info(void)
#endif /* !__ASSEMBLY__ */
-/* entry.S relies on these definitions!
- * bits 0-5 are tested at every exception exit
- */
#define TIF_SIGPENDING 0 /* signal pending */
#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
-#define TIF_SYSCALL_TRACE 5 /* syscall trace active */
-#define TIF_DELAYED_TRACE 14 /* single step a syscall */
+#define TIF_SYSCALL_TRACE 3 /* syscall trace active */
+#define TIF_SYSCALL_TRACEPOINT 4 /* syscall tracepoint instrumentation */
+#define TIF_SYSCALL_AUDIT 5 /* syscall auditing */
#define TIF_POLLING_NRFLAG 16 /* poll_idle() is TIF_NEED_RESCHED */
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
-#define TIF_FREEZE 19 /* thread is freezing for suspend */
#define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */
#define TIF_SECCOMP 21 /* secure computing */
-#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
-#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
-#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
-#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
-#define _TIF_DELAYED_TRACE (1 << TIF_DELAYED_TRACE)
-#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
+#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_MEMDIE (1 << TIF_MEMDIE)
-#define _TIF_FREEZE (1 << TIF_FREEZE)
-#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
-#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
+#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#endif /* _ASM_CSKY_THREAD_INFO_H */
diff --git a/arch/csky/include/asm/unistd.h b/arch/csky/include/asm/unistd.h
index 284487477a61..da7a18295615 100644
--- a/arch/csky/include/asm/unistd.h
+++ b/arch/csky/include/asm/unistd.h
@@ -2,3 +2,5 @@
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <uapi/asm/unistd.h>
+
+#define NR_syscalls (__NR_syscalls)
diff --git a/arch/csky/include/uapi/asm/perf_regs.h b/arch/csky/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000000..ee323d818592
--- /dev/null
+++ b/arch/csky/include/uapi/asm/perf_regs.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
+
+#ifndef _ASM_CSKY_PERF_REGS_H
+#define _ASM_CSKY_PERF_REGS_H
+
+/* Index of struct pt_regs */
+enum perf_event_csky_regs {
+ PERF_REG_CSKY_TLS,
+ PERF_REG_CSKY_LR,
+ PERF_REG_CSKY_PC,
+ PERF_REG_CSKY_SR,
+ PERF_REG_CSKY_SP,
+ PERF_REG_CSKY_ORIG_A0,
+ PERF_REG_CSKY_A0,
+ PERF_REG_CSKY_A1,
+ PERF_REG_CSKY_A2,
+ PERF_REG_CSKY_A3,
+ PERF_REG_CSKY_REGS0,
+ PERF_REG_CSKY_REGS1,
+ PERF_REG_CSKY_REGS2,
+ PERF_REG_CSKY_REGS3,
+ PERF_REG_CSKY_REGS4,
+ PERF_REG_CSKY_REGS5,
+ PERF_REG_CSKY_REGS6,
+ PERF_REG_CSKY_REGS7,
+ PERF_REG_CSKY_REGS8,
+ PERF_REG_CSKY_REGS9,
+#if defined(__CSKYABIV2__)
+ PERF_REG_CSKY_EXREGS0,
+ PERF_REG_CSKY_EXREGS1,
+ PERF_REG_CSKY_EXREGS2,
+ PERF_REG_CSKY_EXREGS3,
+ PERF_REG_CSKY_EXREGS4,
+ PERF_REG_CSKY_EXREGS5,
+ PERF_REG_CSKY_EXREGS6,
+ PERF_REG_CSKY_EXREGS7,
+ PERF_REG_CSKY_EXREGS8,
+ PERF_REG_CSKY_EXREGS9,
+ PERF_REG_CSKY_EXREGS10,
+ PERF_REG_CSKY_EXREGS11,
+ PERF_REG_CSKY_EXREGS12,
+ PERF_REG_CSKY_EXREGS13,
+ PERF_REG_CSKY_EXREGS14,
+ PERF_REG_CSKY_HI,
+ PERF_REG_CSKY_LO,
+ PERF_REG_CSKY_DCSR,
+#endif
+ PERF_REG_CSKY_MAX,
+};
+#endif /* _ASM_CSKY_PERF_REGS_H */
diff --git a/arch/csky/include/uapi/asm/ptrace.h b/arch/csky/include/uapi/asm/ptrace.h
index a4eaa8ddf0b1..4e248d5b86ef 100644
--- a/arch/csky/include/uapi/asm/ptrace.h
+++ b/arch/csky/include/uapi/asm/ptrace.h
@@ -48,20 +48,5 @@ struct user_fp {
unsigned long reserved;
};
-#ifdef __KERNEL__
-
-#define PS_S 0x80000000 /* Supervisor Mode */
-
-#define arch_has_single_step() (1)
-#define current_pt_regs() \
-({ (struct pt_regs *)((char *)current_thread_info() + THREAD_SIZE) - 1; })
-
-#define user_stack_pointer(regs) ((regs)->usp)
-
-#define user_mode(regs) (!((regs)->sr & PS_S))
-#define instruction_pointer(regs) ((regs)->pc)
-#define profile_pc(regs) instruction_pointer(regs)
-
-#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#endif /* _CSKY_PTRACE_H */
diff --git a/arch/csky/kernel/Makefile b/arch/csky/kernel/Makefile
index 484e6d3a3647..1624b04bffb5 100644
--- a/arch/csky/kernel/Makefile
+++ b/arch/csky/kernel/Makefile
@@ -9,6 +9,8 @@ obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_CSKY_PMU_V1) += perf_event.o
+obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
+obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
diff --git a/arch/csky/kernel/atomic.S b/arch/csky/kernel/atomic.S
index d2357c8f85bd..5b84f11485ae 100644
--- a/arch/csky/kernel/atomic.S
+++ b/arch/csky/kernel/atomic.S
@@ -12,11 +12,10 @@
* If *ptr != oldval && return 1,
* else *ptr = newval return 0.
*/
-#ifdef CONFIG_CPU_HAS_LDSTEX
ENTRY(csky_cmpxchg)
USPTOKSP
mfcr a3, epc
- INCTRAP a3
+ addi a3, TRAP0_SIZE
subi sp, 8
stw a3, (sp, 0)
@@ -24,6 +23,7 @@ ENTRY(csky_cmpxchg)
stw a3, (sp, 4)
psrset ee
+#ifdef CONFIG_CPU_HAS_LDSTEX
1:
ldex a3, (a2)
cmpne a0, a3
@@ -33,27 +33,7 @@ ENTRY(csky_cmpxchg)
bez a3, 1b
2:
sync.is
- mvc a0
- ldw a3, (sp, 0)
- mtcr a3, epc
- ldw a3, (sp, 4)
- mtcr a3, epsr
- addi sp, 8
- KSPTOUSP
- rte
-END(csky_cmpxchg)
#else
-ENTRY(csky_cmpxchg)
- USPTOKSP
- mfcr a3, epc
- INCTRAP a3
-
- subi sp, 8
- stw a3, (sp, 0)
- mfcr a3, epsr
- stw a3, (sp, 4)
-
- psrset ee
1:
ldw a3, (a2)
cmpne a0, a3
@@ -61,6 +41,7 @@ ENTRY(csky_cmpxchg)
2:
stw a1, (a2)
3:
+#endif
mvc a0
ldw a3, (sp, 0)
mtcr a3, epc
@@ -71,6 +52,7 @@ ENTRY(csky_cmpxchg)
rte
END(csky_cmpxchg)
+#ifndef CONFIG_CPU_HAS_LDSTEX
/*
* Called from tlbmodified exception
*/
diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S
index 5137ed9062bd..a7e84ccccbd8 100644
--- a/arch/csky/kernel/entry.S
+++ b/arch/csky/kernel/entry.S
@@ -40,7 +40,8 @@ ENTRY(csky_\name)
WR_MCIR a2
#endif
bclri r6, 0
- lrw a2, PHYS_OFFSET
+ lrw a2, va_pa_offset
+ ld.w a2, (a2, 0)
subu r6, a2
bseti r6, 31
@@ -50,7 +51,8 @@ ENTRY(csky_\name)
addu r6, a2
ldw r6, (r6)
- lrw a2, PHYS_OFFSET
+ lrw a2, va_pa_offset
+ ld.w a2, (a2, 0)
subu r6, a2
bseti r6, 31
@@ -91,7 +93,7 @@ ENTRY(csky_\name)
mfcr a3, ss2
mfcr r6, ss3
mfcr a2, ss4
- SAVE_ALL EPC_KEEP
+ SAVE_ALL 0
.endm
.macro tlbop_end is_write
RD_MEH a2
@@ -99,7 +101,6 @@ ENTRY(csky_\name)
mov a0, sp
movi a1, \is_write
jbsr do_page_fault
- movi r11_sig, 0 /* r11 = 0, Not a syscall. */
jmpi ret_from_exception
.endm
@@ -118,7 +119,7 @@ jbsr csky_cmpxchg_fixup
tlbop_end 1
ENTRY(csky_systemcall)
- SAVE_ALL EPC_INCREASE
+ SAVE_ALL TRAP0_SIZE
psrset ee, ie
@@ -136,8 +137,9 @@ ENTRY(csky_systemcall)
bmaski r10, THREAD_SHIFT
andn r9, r10
ldw r8, (r9, TINFO_FLAGS)
- btsti r8, TIF_SYSCALL_TRACE
- bt 1f
+ ANDI_R3 r8, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
+ cmpnei r8, 0
+ bt csky_syscall_trace
#if defined(__CSKYABIV2__)
subi sp, 8
stw r5, (sp, 0x4)
@@ -150,10 +152,9 @@ ENTRY(csky_systemcall)
stw a0, (sp, LSAVE_A0) /* Save return value */
jmpi ret_from_exception
-1:
- movi a0, 0 /* enter system call */
- mov a1, sp /* sp = pt_regs pointer */
- jbsr syscall_trace
+csky_syscall_trace:
+ mov a0, sp /* sp = pt_regs pointer */
+ jbsr syscall_trace_enter
/* Prepare args before do system call */
ldw a0, (sp, LSAVE_A0)
ldw a1, (sp, LSAVE_A1)
@@ -173,9 +174,8 @@ ENTRY(csky_systemcall)
#endif
stw a0, (sp, LSAVE_A0) /* Save return value */
- movi a0, 1 /* leave system call */
- mov a1, sp /* right now, sp --> pt_regs */
- jbsr syscall_trace
+ mov a0, sp /* right now, sp --> pt_regs */
+ jbsr syscall_trace_exit
br ret_from_exception
ENTRY(ret_from_kernel_thread)
@@ -190,14 +190,11 @@ ENTRY(ret_from_fork)
bmaski r10, THREAD_SHIFT
andn r9, r10
ldw r8, (r9, TINFO_FLAGS)
- movi r11_sig, 1
- btsti r8, TIF_SYSCALL_TRACE
- bf 3f
- movi a0, 1
- mov a1, sp /* sp = pt_regs pointer */
- jbsr syscall_trace
-3:
- jbsr ret_from_exception
+ ANDI_R3 r8, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
+ cmpnei r8, 0
+ bf ret_from_exception
+ mov a0, sp /* sp = pt_regs pointer */
+ jbsr syscall_trace_exit
ret_from_exception:
ld syscallid, (sp, LSAVE_PSR)
@@ -212,41 +209,30 @@ ret_from_exception:
bmaski r10, THREAD_SHIFT
andn r9, r10
-resume_userspace:
ldw r8, (r9, TINFO_FLAGS)
andi r8, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
cmpnei r8, 0
bt exit_work
-1: RESTORE_ALL
+1:
+ RESTORE_ALL
exit_work:
+ lrw syscallid, ret_from_exception
+ mov lr, syscallid
+
btsti r8, TIF_NEED_RESCHED
bt work_resched
- /* If thread_info->flag is empty, RESTORE_ALL */
- cmpnei r8, 0
- bf 1b
- mov a1, sp
- mov a0, r8
- mov a2, r11_sig /* syscall? */
- btsti r8, TIF_SIGPENDING /* delivering a signal? */
- /* prevent further restarts(set r11 = 0) */
- clrt r11_sig
- jbsr do_notify_resume /* do signals */
- br resume_userspace
+
+ mov a0, sp
+ mov a1, r8
+ jmpi do_notify_resume
work_resched:
- lrw syscallid, ret_from_exception
- mov r15, syscallid /* Return address in link */
jmpi schedule
-ENTRY(sys_rt_sigreturn)
- movi r11_sig, 0
- jmpi do_rt_sigreturn
-
ENTRY(csky_trap)
- SAVE_ALL EPC_KEEP
+ SAVE_ALL 0
psrset ee
- movi r11_sig, 0 /* r11 = 0, Not a syscall. */
mov a0, sp /* Push Stack pointer arg */
jbsr trap_c /* Call C-level trap handler */
jmpi ret_from_exception
@@ -261,7 +247,7 @@ ENTRY(csky_get_tls)
/* increase epc for continue */
mfcr a0, epc
- INCTRAP a0
+ addi a0, TRAP0_SIZE
mtcr a0, epc
/* get current task thread_info with kernel 8K stack */
@@ -278,9 +264,8 @@ ENTRY(csky_get_tls)
rte
ENTRY(csky_irq)
- SAVE_ALL EPC_KEEP
+ SAVE_ALL 0
psrset ee
- movi r11_sig, 0 /* r11 = 0, Not a syscall. */
#ifdef CONFIG_PREEMPT
mov r9, sp /* Get current stack pointer */
diff --git a/arch/csky/kernel/ftrace.c b/arch/csky/kernel/ftrace.c
index 274c431f1810..44f4880179b7 100644
--- a/arch/csky/kernel/ftrace.c
+++ b/arch/csky/kernel/ftrace.c
@@ -3,6 +3,137 @@
#include <linux/ftrace.h>
#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+#define NOP 0x4000
+#define NOP32_HI 0xc400
+#define NOP32_LO 0x4820
+#define PUSH_LR 0x14d0
+#define MOVIH_LINK 0xea3a
+#define ORI_LINK 0xef5a
+#define JSR_LINK 0xe8fa
+#define BSR_LINK 0xe000
+
+/*
+ * Gcc-csky with -pg will insert stub in function prologue:
+ * push lr
+ * jbsr _mcount
+ * nop32
+ * nop32
+ *
+ * If the (callee - current_pc) is less then 64MB, we'll use bsr:
+ * push lr
+ * bsr _mcount
+ * nop32
+ * nop32
+ * else we'll use (movih + ori + jsr):
+ * push lr
+ * movih r26, ...
+ * ori r26, ...
+ * jsr r26
+ *
+ * (r26 is our reserved link-reg)
+ *
+ */
+static inline void make_jbsr(unsigned long callee, unsigned long pc,
+ uint16_t *call, bool nolr)
+{
+ long offset;
+
+ call[0] = nolr ? NOP : PUSH_LR;
+
+ offset = (long) callee - (long) pc;
+
+ if (unlikely(offset < -67108864 || offset > 67108864)) {
+ call[1] = MOVIH_LINK;
+ call[2] = callee >> 16;
+ call[3] = ORI_LINK;
+ call[4] = callee & 0xffff;
+ call[5] = JSR_LINK;
+ call[6] = 0;
+ } else {
+ offset = offset >> 1;
+
+ call[1] = BSR_LINK |
+ ((uint16_t)((unsigned long) offset >> 16) & 0x3ff);
+ call[2] = (uint16_t)((unsigned long) offset & 0xffff);
+ call[3] = call[5] = NOP32_HI;
+ call[4] = call[6] = NOP32_LO;
+ }
+}
+
+static uint16_t nops[7] = {NOP, NOP32_HI, NOP32_LO, NOP32_HI, NOP32_LO,
+ NOP32_HI, NOP32_LO};
+static int ftrace_check_current_nop(unsigned long hook)
+{
+ uint16_t olds[7];
+ unsigned long hook_pos = hook - 2;
+
+ if (probe_kernel_read((void *)olds, (void *)hook_pos, sizeof(nops)))
+ return -EFAULT;
+
+ if (memcmp((void *)nops, (void *)olds, sizeof(nops))) {
+ pr_err("%p: nop but get (%04x %04x %04x %04x %04x %04x %04x)\n",
+ (void *)hook_pos,
+ olds[0], olds[1], olds[2], olds[3], olds[4], olds[5],
+ olds[6]);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ftrace_modify_code(unsigned long hook, unsigned long target,
+ bool enable, bool nolr)
+{
+ uint16_t call[7];
+
+ unsigned long hook_pos = hook - 2;
+ int ret = 0;
+
+ make_jbsr(target, hook, call, nolr);
+
+ ret = probe_kernel_write((void *)hook_pos, enable ? call : nops,
+ sizeof(nops));
+ if (ret)
+ return -EPERM;
+
+ flush_icache_range(hook_pos, hook_pos + MCOUNT_INSN_SIZE);
+
+ return 0;
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ int ret = ftrace_check_current_nop(rec->ip);
+
+ if (ret)
+ return ret;
+
+ return ftrace_modify_code(rec->ip, addr, true, false);
+}
+
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
+ unsigned long addr)
+{
+ return ftrace_modify_code(rec->ip, addr, false, false);
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ int ret = ftrace_modify_code((unsigned long)&ftrace_call,
+ (unsigned long)func, true, true);
+ return ret;
+}
+
+int __init ftrace_dyn_arch_init(void)
+{
+ return 0;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
@@ -43,8 +174,21 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
*(unsigned long *)frame_pointer = return_hooker;
}
}
-#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_code((unsigned long)&ftrace_graph_call,
+ (unsigned long)&ftrace_graph_caller, true, true);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_code((unsigned long)&ftrace_graph_call,
+ (unsigned long)&ftrace_graph_caller, false, true);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
/* _mcount is defined in abi's mcount.S */
-extern void _mcount(void);
EXPORT_SYMBOL(_mcount);
diff --git a/arch/csky/kernel/head.S b/arch/csky/kernel/head.S
index 9c4ec473b76b..61989f9241c0 100644
--- a/arch/csky/kernel/head.S
+++ b/arch/csky/kernel/head.S
@@ -7,16 +7,11 @@
__HEAD
ENTRY(_start)
- /* set super user mode */
- lrw a3, DEFAULT_PSR_VALUE
- mtcr a3, psr
- psrset ee
-
- SETUP_MMU a3
+ SETUP_MMU
/* set stack point */
- lrw a3, init_thread_union + THREAD_SIZE
- mov sp, a3
+ lrw r6, init_thread_union + THREAD_SIZE
+ mov sp, r6
jmpi csky_start
END(_start)
@@ -24,53 +19,12 @@ END(_start)
#ifdef CONFIG_SMP
.align 10
ENTRY(_start_smp_secondary)
- /* Invalid I/Dcache BTB BHT */
- movi a3, 7
- lsli a3, 16
- addi a3, (1<<4) | 3
- mtcr a3, cr17
-
- tlbi.alls
-
- /* setup PAGEMASK */
- movi a3, 0
- mtcr a3, cr<6, 15>
-
- /* setup MEL0/MEL1 */
- grs a0, _start_smp_pc
-_start_smp_pc:
- bmaski a1, 13
- andn a0, a1
- movi a1, 0x00000006
- movi a2, 0x00001006
- or a1, a0
- or a2, a0
- mtcr a1, cr<2, 15>
- mtcr a2, cr<3, 15>
-
- /* setup MEH */
- mtcr a0, cr<4, 15>
-
- /* write TLB */
- bgeni a3, 28
- mtcr a3, cr<8, 15>
-
- SETUP_MMU a3
-
- /* enable MMU */
- movi a3, 1
- mtcr a3, cr18
-
- jmpi _goto_mmu_on
-_goto_mmu_on:
- lrw a3, DEFAULT_PSR_VALUE
- mtcr a3, psr
- psrset ee
+ SETUP_MMU
/* set stack point */
- lrw a3, secondary_stack
- ld.w a3, (a3, 0)
- mov sp, a3
+ lrw r6, secondary_stack
+ ld.w r6, (r6, 0)
+ mov sp, r6
jmpi csky_start_secondary
END(_start_smp_secondary)
diff --git a/arch/csky/kernel/perf_callchain.c b/arch/csky/kernel/perf_callchain.c
new file mode 100644
index 000000000000..e68ff375c8f8
--- /dev/null
+++ b/arch/csky/kernel/perf_callchain.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+
+/* Kernel callchain */
+struct stackframe {
+ unsigned long fp;
+ unsigned long lr;
+};
+
+static int unwind_frame_kernel(struct stackframe *frame)
+{
+ if (kstack_end((void *)frame->fp))
+ return -EPERM;
+ if (frame->fp & 0x3 || frame->fp < TASK_SIZE)
+ return -EPERM;
+
+ *frame = *(struct stackframe *)frame->fp;
+ if (__kernel_text_address(frame->lr)) {
+ int graph = 0;
+
+ frame->lr = ftrace_graph_ret_addr(NULL, &graph, frame->lr,
+ NULL);
+ }
+ return 0;
+}
+
+static void notrace walk_stackframe(struct stackframe *fr,
+ struct perf_callchain_entry_ctx *entry)
+{
+ do {
+ perf_callchain_store(entry, fr->lr);
+ } while (unwind_frame_kernel(fr) >= 0);
+}
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
+ unsigned long fp, unsigned long reg_lr)
+{
+ struct stackframe buftail;
+ unsigned long lr = 0;
+ unsigned long *user_frame_tail = (unsigned long *)fp;
+
+ /* Check accessibility of one struct frame_tail beyond */
+ if (!access_ok(user_frame_tail, sizeof(buftail)))
+ return 0;
+ if (__copy_from_user_inatomic(&buftail, user_frame_tail,
+ sizeof(buftail)))
+ return 0;
+
+ if (reg_lr != 0)
+ lr = reg_lr;
+ else
+ lr = buftail.lr;
+
+ fp = buftail.fp;
+ perf_callchain_store(entry, lr);
+
+ return fp;
+}
+
+/*
+ * This will be called when the target is in user mode
+ * This function will only be called when we use
+ * "PERF_SAMPLE_CALLCHAIN" in
+ * kernel/events/core.c:perf_prepare_sample()
+ *
+ * How to trigger perf_callchain_[user/kernel] :
+ * $ perf record -e cpu-clock --call-graph fp ./program
+ * $ perf report --call-graph
+ *
+ * On C-SKY platform, the program being sampled and the C library
+ * need to be compiled with * -mbacktrace, otherwise the user
+ * stack will not contain function frame.
+ */
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ unsigned long fp = 0;
+
+ /* C-SKY does not support virtualization. */
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+ return;
+
+ fp = regs->regs[4];
+ perf_callchain_store(entry, regs->pc);
+
+ /*
+ * While backtrace from leaf function, lr is normally
+ * not saved inside frame on C-SKY, so get lr from pt_regs
+ * at the sample point. However, lr value can be incorrect if
+ * lr is used as temp register
+ */
+ fp = user_backtrace(entry, fp, regs->lr);
+
+ while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
+ fp = user_backtrace(entry, fp, 0);
+}
+
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ struct stackframe fr;
+
+ /* C-SKY does not support virtualization. */
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ pr_warn("C-SKY does not support perf in guest mode!");
+ return;
+ }
+
+ fr.fp = regs->regs[4];
+ fr.lr = regs->lr;
+ walk_stackframe(&fr, entry);
+}
diff --git a/arch/csky/kernel/perf_regs.c b/arch/csky/kernel/perf_regs.c
new file mode 100644
index 000000000000..eb32838b8210
--- /dev/null
+++ b/arch/csky/kernel/perf_regs.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/bug.h>
+#include <asm/perf_regs.h>
+#include <asm/ptrace.h>
+
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+ if (WARN_ON_ONCE((u32)idx >= PERF_REG_CSKY_MAX))
+ return 0;
+
+ return (u64)*((u32 *)regs + idx);
+}
+
+#define REG_RESERVED (~((1ULL << PERF_REG_CSKY_MAX) - 1))
+
+int perf_reg_validate(u64 mask)
+{
+ if (!mask || mask & REG_RESERVED)
+ return -EINVAL;
+
+ return 0;
+}
+
+u64 perf_reg_abi(struct task_struct *task)
+{
+ return PERF_SAMPLE_REGS_ABI_32;
+}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy)
+{
+ regs_user->regs = task_pt_regs(current);
+ regs_user->abi = perf_reg_abi(current);
+}
diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c
index f2f12fff36f7..313623a19ecb 100644
--- a/arch/csky/kernel/ptrace.c
+++ b/arch/csky/kernel/ptrace.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+#include <linux/audit.h>
#include <linux/elf.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@@ -11,6 +12,7 @@
#include <linux/sched/task_stack.h>
#include <linux/signal.h>
#include <linux/smp.h>
+#include <linux/tracehook.h>
#include <linux/uaccess.h>
#include <linux/user.h>
@@ -22,6 +24,9 @@
#include <abi/regdef.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
/* sets the trace bits. */
#define TRACE_MODE_SI (1 << 14)
#define TRACE_MODE_RUN 0
@@ -207,35 +212,27 @@ long arch_ptrace(struct task_struct *child, long request,
return ret;
}
-/*
- * If process's system calls is traces, do some corresponding handles in this
- * function before entering system call function and after exiting system call
- * function.
- */
-asmlinkage void syscall_trace(int why, struct pt_regs *regs)
+asmlinkage void syscall_trace_enter(struct pt_regs *regs)
{
- long saved_why;
- /*
- * Save saved_why, why is used to denote syscall entry/exit;
- * why = 0:entry, why = 1: exit
- */
- saved_why = regs->regs[SYSTRACE_SAVENUM];
- regs->regs[SYSTRACE_SAVENUM] = why;
-
- ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
- ? 0x80 : 0));
-
- /*
- * this isn't the same as continuing with a signal, but it will do
- * for normal use. strace only continues with a signal if the
- * stopping signal is not SIGTRAP. -brl
- */
- if (current->exit_code) {
- send_sig(current->exit_code, current, 1);
- current->exit_code = 0;
- }
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ if (tracehook_report_syscall_entry(regs))
+ syscall_set_nr(current, regs, -1);
+
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_enter(regs, syscall_get_nr(current, regs));
+
+ audit_syscall_entry(regs_syscallid(regs), regs->a0, regs->a1, regs->a2, regs->a3);
+}
+
+asmlinkage void syscall_trace_exit(struct pt_regs *regs)
+{
+ audit_syscall_exit(regs);
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, 0);
- regs->regs[SYSTRACE_SAVENUM] = saved_why;
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_exit(regs, syscall_get_return_value(current, regs));
}
extern void show_stack(struct task_struct *task, unsigned long *stack);
diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c
index dff8b89444ec..23ee604aafdb 100644
--- a/arch/csky/kernel/setup.c
+++ b/arch/csky/kernel/setup.c
@@ -142,18 +142,24 @@ void __init setup_arch(char **cmdline_p)
#endif
}
-asmlinkage __visible void __init csky_start(unsigned int unused, void *param)
+unsigned long va_pa_offset;
+EXPORT_SYMBOL(va_pa_offset);
+
+asmlinkage __visible void __init csky_start(unsigned int unused,
+ void *dtb_start)
{
/* Clean up bss section */
memset(__bss_start, 0, __bss_stop - __bss_start);
+ va_pa_offset = read_mmu_msa0() & ~(SSEG_SIZE - 1);
+
pre_trap_init();
pre_mmu_init();
- if (param == NULL)
+ if (dtb_start == NULL)
early_init_dt_scan(__dtb_start);
else
- early_init_dt_scan(param);
+ early_init_dt_scan(dtb_start);
start_kernel();
diff --git a/arch/csky/kernel/signal.c b/arch/csky/kernel/signal.c
index 207a891479d2..04a43cfd4e09 100644
--- a/arch/csky/kernel/signal.c
+++ b/arch/csky/kernel/signal.c
@@ -1,26 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/kernel.h>
#include <linux/signal.h>
+#include <linux/uaccess.h>
#include <linux/syscalls.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/ptrace.h>
-#include <linux/unistd.h>
-#include <linux/stddef.h>
-#include <linux/highuid.h>
-#include <linux/personality.h>
-#include <linux/tty.h>
-#include <linux/binfmts.h>
#include <linux/tracehook.h>
-#include <linux/freezer.h>
-#include <linux/uaccess.h>
-#include <asm/setup.h>
-#include <asm/pgtable.h>
#include <asm/traps.h>
#include <asm/ucontext.h>
#include <asm/vdso.h>
@@ -29,110 +13,117 @@
#ifdef CONFIG_CPU_HAS_FPU
#include <abi/fpu.h>
-
-static int restore_fpu_state(struct sigcontext *sc)
+static int restore_fpu_state(struct sigcontext __user *sc)
{
int err = 0;
struct user_fp user_fp;
- err = copy_from_user(&user_fp, &sc->sc_user_fp, sizeof(user_fp));
+ err = __copy_from_user(&user_fp, &sc->sc_user_fp, sizeof(user_fp));
restore_from_user_fp(&user_fp);
return err;
}
-static int save_fpu_state(struct sigcontext *sc)
+static int save_fpu_state(struct sigcontext __user *sc)
{
struct user_fp user_fp;
save_to_user_fp(&user_fp);
- return copy_to_user(&sc->sc_user_fp, &user_fp, sizeof(user_fp));
+ return __copy_to_user(&sc->sc_user_fp, &user_fp, sizeof(user_fp));
}
#else
-static inline int restore_fpu_state(struct sigcontext *sc) { return 0; }
-static inline int save_fpu_state(struct sigcontext *sc) { return 0; }
+#define restore_fpu_state(sigcontext) (0)
+#define save_fpu_state(sigcontext) (0)
#endif
struct rt_sigframe {
- int sig;
- struct siginfo *pinfo;
- void *puc;
struct siginfo info;
struct ucontext uc;
};
-static int
-restore_sigframe(struct pt_regs *regs,
- struct sigcontext *sc, int *pr2)
+static long restore_sigcontext(struct pt_regs *regs,
+ struct sigcontext __user *sc)
{
int err = 0;
- /* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->task->restart_block.fn = do_no_restart_syscall;
-
- err |= copy_from_user(regs, &sc->sc_pt_regs, sizeof(struct pt_regs));
+ /* sc_pt_regs is structured the same as the start of pt_regs */
+ err |= __copy_from_user(regs, &sc->sc_pt_regs, sizeof(struct pt_regs));
+ /* Restore the floating-point state. */
err |= restore_fpu_state(sc);
- *pr2 = regs->a0;
return err;
}
-asmlinkage int
-do_rt_sigreturn(void)
+SYSCALL_DEFINE0(rt_sigreturn)
{
- sigset_t set;
- int a0;
struct pt_regs *regs = current_pt_regs();
- struct rt_sigframe *frame = (struct rt_sigframe *)(regs->usp);
+ struct rt_sigframe __user *frame;
+ struct task_struct *task;
+ sigset_t set;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ frame = (struct rt_sigframe __user *)regs->usp;
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
+
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
- sigdelsetmask(&set, (sigmask(SIGKILL) | sigmask(SIGSTOP)));
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
- if (restore_sigframe(regs, &frame->uc.uc_mcontext, &a0))
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
goto badframe;
- return a0;
+ if (restore_altstack(&frame->uc.uc_stack))
+ goto badframe;
+
+ return regs->a0;
badframe:
- force_sig(SIGSEGV, current);
+ task = current;
+ force_sig(SIGSEGV, task);
return 0;
}
-static int setup_sigframe(struct sigcontext *sc, struct pt_regs *regs)
+static int setup_sigcontext(struct rt_sigframe __user *frame,
+ struct pt_regs *regs)
{
+ struct sigcontext __user *sc = &frame->uc.uc_mcontext;
int err = 0;
- err |= copy_to_user(&sc->sc_pt_regs, regs, sizeof(struct pt_regs));
+ err |= __copy_to_user(&sc->sc_pt_regs, regs, sizeof(struct pt_regs));
err |= save_fpu_state(sc);
return err;
}
-static inline void *
-get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
+static inline void __user *get_sigframe(struct ksignal *ksig,
+ struct pt_regs *regs, size_t framesize)
{
- unsigned long usp;
+ unsigned long sp;
+ /* Default to using normal stack */
+ sp = regs->usp;
+
+ /*
+ * If we are on the alternate signal stack and would overflow it, don't.
+ * Return an always-bogus address instead so we will die with SIGSEGV.
+ */
+ if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize)))
+ return (void __user __force *)(-1UL);
- /* Default to using normal stack. */
- usp = regs->usp;
+ /* This is the X/Open sanctioned signal stack switching. */
+ sp = sigsp(sp, ksig) - framesize;
- /* This is the X/Open sanctioned signal stack switching. */
- if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(usp)) {
- if (!on_sig_stack(usp))
- usp = current->sas_ss_sp + current->sas_ss_size;
- }
- return (void *)((usp - frame_size) & -8UL);
+ /* Align the stack frame. */
+ sp &= -8UL;
+
+ return (void __user *)sp;
}
static int
@@ -140,205 +131,128 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe *frame;
int err = 0;
-
struct csky_vdso *vdso = current->mm->context.vdso;
- frame = get_sigframe(&ksig->ka, regs, sizeof(*frame));
- if (!frame)
- return 1;
+ frame = get_sigframe(ksig, regs, sizeof(*frame));
+ if (!access_ok(frame, sizeof(*frame)))
+ return -EFAULT;
- err |= __put_user(ksig->sig, &frame->sig);
- err |= __put_user(&frame->info, &frame->pinfo);
- err |= __put_user(&frame->uc, &frame->puc);
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
- /* Create the ucontext. */
+ /* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
- err |= __put_user(0, &frame->uc.uc_link);
- err |= __put_user((void *)current->sas_ss_sp,
- &frame->uc.uc_stack.ss_sp);
- err |= __put_user(sas_ss_flags(regs->usp),
- &frame->uc.uc_stack.ss_flags);
- err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
- err |= setup_sigframe(&frame->uc.uc_mcontext, regs);
- err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
-
+ err |= __put_user(NULL, &frame->uc.uc_link);
+ err |= __save_altstack(&frame->uc.uc_stack, regs->usp);
+ err |= setup_sigcontext(frame, regs);
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err)
- goto give_sigsegv;
+ return -EFAULT;
- /* Set up registers for signal handler */
- regs->usp = (unsigned long)frame;
- regs->pc = (unsigned long)ksig->ka.sa.sa_handler;
- regs->lr = (unsigned long)vdso->rt_signal_retcode;
+ /* Set up to return from userspace. */
+ regs->lr = (unsigned long)(vdso->rt_signal_retcode);
-adjust_stack:
- regs->a0 = ksig->sig; /* first arg is signo */
- regs->a1 = (unsigned long)(&(frame->info));
- regs->a2 = (unsigned long)(&(frame->uc));
- return err;
+ /*
+ * Set up registers for signal handler.
+ * Registers that we don't modify keep the value they had from
+ * user-space at the time we took the signal.
+ * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
+ * since some things rely on this (e.g. glibc's debug/segfault.c).
+ */
+ regs->pc = (unsigned long)ksig->ka.sa.sa_handler;
+ regs->usp = (unsigned long)frame;
+ regs->a0 = ksig->sig; /* a0: signal number */
+ regs->a1 = (unsigned long)(&(frame->info)); /* a1: siginfo pointer */
+ regs->a2 = (unsigned long)(&(frame->uc)); /* a2: ucontext pointer */
-give_sigsegv:
- if (ksig->sig == SIGSEGV)
- ksig->ka.sa.sa_handler = SIG_DFL;
- force_sig(SIGSEGV, current);
- goto adjust_stack;
+ return 0;
}
-/*
- * OK, we're invoking a handler
- */
-static int
-handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
- int ret;
sigset_t *oldset = sigmask_to_save();
+ int ret;
- /*
- * set up the stack frame, regardless of SA_SIGINFO,
- * and pass info anyway.
- */
- ret = setup_rt_frame(ksig, oldset, regs);
+ /* Are we from a system call? */
+ if (in_syscall(regs)) {
+ /* Avoid additional syscall restarting via ret_from_exception */
+ forget_syscall(regs);
+
+ /* If so, check system call restarting.. */
+ switch (regs->a0) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ regs->a0 = -EINTR;
+ break;
- if (ret != 0) {
- force_sigsegv(ksig->sig, current);
- return ret;
+ case -ERESTARTSYS:
+ if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
+ regs->a0 = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ regs->a0 = regs->orig_a0;
+ regs->pc -= TRAP0_SIZE;
+ break;
+ }
}
- /* Block the signal if we were successful. */
- spin_lock_irq(&current->sighand->siglock);
- sigorsets(&current->blocked, &current->blocked, &ksig->ka.sa.sa_mask);
- if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
- sigaddset(&current->blocked, ksig->sig);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ /* Set up the stack frame */
+ ret = setup_rt_frame(ksig, oldset, regs);
- return 0;
+ signal_setup_done(ret, ksig, 0);
}
-/*
- * Note that 'init' is a special process: it doesn't get signals it doesn't
- * want to handle. Thus you cannot kill init even with a SIGKILL even by
- * mistake.
- *
- * Note that we go through the signals twice: once to check the signals
- * that the kernel can handle, and then we build all the user-level signal
- * handling stack-frames in one go after that.
- */
-static void do_signal(struct pt_regs *regs, int syscall)
+static void do_signal(struct pt_regs *regs)
{
- unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
struct ksignal ksig;
- /*
- * We want the common case to go fast, which
- * is why we may in certain cases get here from
- * kernel mode. Just return without doing anything
- * if so.
- */
- if (!user_mode(regs))
+ if (get_signal(&ksig)) {
+ /* Actually deliver the signal */
+ handle_signal(&ksig, regs);
return;
+ }
- /*
- * If we were from a system call, check for system call restarting...
- */
- if (syscall) {
- continue_addr = regs->pc;
-#if defined(__CSKYABIV2__)
- restart_addr = continue_addr - 4;
-#else
- restart_addr = continue_addr - 2;
-#endif
- retval = regs->a0;
+ /* Did we come from a system call? */
+ if (in_syscall(regs)) {
+ /* Avoid additional syscall restarting via ret_from_exception */
+ forget_syscall(regs);
- /*
- * Prepare for system call restart. We do this here so that a
- * debugger will see the already changed.
- */
- switch (retval) {
+ /* Restart the system call - no handlers present */
+ switch (regs->a0) {
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
regs->a0 = regs->orig_a0;
- regs->pc = restart_addr;
+ regs->pc -= TRAP0_SIZE;
break;
case -ERESTART_RESTARTBLOCK:
- regs->a0 = -EINTR;
+ regs->a0 = regs->orig_a0;
+ regs_syscallid(regs) = __NR_restart_syscall;
+ regs->pc -= TRAP0_SIZE;
break;
}
}
- if (try_to_freeze())
- goto no_signal;
-
/*
- * Get the signal to deliver. When running under ptrace, at this
- * point the debugger may change all our registers ...
+ * If there is no signal to deliver, we just put the saved
+ * sigmask back.
*/
- if (get_signal(&ksig)) {
- /*
- * Depending on the signal settings we may need to revert the
- * decision to restart the system call. But skip this if a
- * debugger has chosen to restart at a different PC.
- */
- if (regs->pc == restart_addr) {
- if (retval == -ERESTARTNOHAND ||
- (retval == -ERESTARTSYS &&
- !(ksig.ka.sa.sa_flags & SA_RESTART))) {
- regs->a0 = -EINTR;
- regs->pc = continue_addr;
- }
- }
-
- /* Whee! Actually deliver the signal. */
- if (handle_signal(&ksig, regs) == 0) {
- /*
- * A signal was successfully delivered; the saved
- * sigmask will have been stored in the signal frame,
- * and will be restored by sigreturn, so we can simply
- * clear the TIF_RESTORE_SIGMASK flag.
- */
- if (test_thread_flag(TIF_RESTORE_SIGMASK))
- clear_thread_flag(TIF_RESTORE_SIGMASK);
- }
- return;
- }
-
-no_signal:
- if (syscall) {
- /*
- * Handle restarting a different system call. As above,
- * if a debugger has chosen to restart at a different PC,
- * ignore the restart.
- */
- if (retval == -ERESTART_RESTARTBLOCK
- && regs->pc == continue_addr) {
-#if defined(__CSKYABIV2__)
- regs->regs[3] = __NR_restart_syscall;
- regs->pc -= 4;
-#else
- regs->regs[9] = __NR_restart_syscall;
- regs->pc -= 2;
-#endif
- }
-
- /*
- * If there's no signal to deliver, we just put the saved
- * sigmask back.
- */
- if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
- clear_thread_flag(TIF_RESTORE_SIGMASK);
- sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
- }
- }
+ restore_saved_sigmask();
}
-asmlinkage void
-do_notify_resume(unsigned int thread_flags, struct pt_regs *regs, int syscall)
+/*
+ * notification of userspace execution resumption
+ * - triggered by the _TIF_WORK_MASK flags
+ */
+asmlinkage void do_notify_resume(struct pt_regs *regs,
+ unsigned long thread_info_flags)
{
- if (thread_flags & _TIF_SIGPENDING)
- do_signal(regs, syscall);
+ /* Handle pending signal delivery */
+ if (thread_info_flags & _TIF_SIGPENDING)
+ do_signal(regs);
- if (thread_flags & _TIF_NOTIFY_RESUME) {
+ if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
}
diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c
index d6f4b66b93e2..18041f46ded1 100644
--- a/arch/csky/mm/fault.c
+++ b/arch/csky/mm/fault.c
@@ -15,9 +15,9 @@
#include <linux/smp.h>
#include <linux/version.h>
#include <linux/vt_kern.h>
-#include <linux/kernel.h>
#include <linux/extable.h>
#include <linux/uaccess.h>
+#include <linux/perf_event.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
@@ -82,7 +82,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
unsigned long pgd_base;
- pgd_base = tlb_get_pgd();
+ pgd_base = (unsigned long)__va(get_pgd());
pgd = (pgd_t *)pgd_base + offset;
pgd_k = init_mm.pgd + offset;
@@ -107,6 +107,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
return;
}
#endif
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -154,10 +156,15 @@ good_area:
goto bad_area;
BUG();
}
- if (fault & VM_FAULT_MAJOR)
+ if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
- else
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
+ address);
+ } else {
tsk->min_flt++;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
+ address);
+ }
up_read(&mm->mmap_sem);
return;
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index c071da34e081..ecfc4b4b6373 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -23,13 +23,11 @@ config H8300
select HAVE_ARCH_KGDB
select HAVE_ARCH_HASH
select CPU_NO_EFFICIENT_FFS
+ select UACCESS_MEMCPY
config CPU_BIG_ENDIAN
def_bool y
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
config GENERIC_HWEIGHT
def_bool y
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index e3dead402e5f..789214ac99bf 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -29,6 +29,7 @@ generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += mmu.h
generic-y += mmu_context.h
generic-y += module.h
@@ -37,16 +38,15 @@ generic-y += pci.h
generic-y += percpu.h
generic-y += pgalloc.h
generic-y += preempt.h
-generic-y += scatterlist.h
generic-y += sections.h
generic-y += serial.h
generic-y += shmparam.h
-generic-y += sizes.h
generic-y += spinlock.h
generic-y += timex.h
generic-y += tlbflush.h
generic-y += topology.h
generic-y += trace_clock.h
+generic-y += uaccess.h
generic-y += unaligned.h
generic-y += vga.h
generic-y += word-at-a-time.h
diff --git a/arch/h8300/include/asm/syscall.h b/arch/h8300/include/asm/syscall.h
index ddd483c6ca95..01666b8bb263 100644
--- a/arch/h8300/include/asm/syscall.h
+++ b/arch/h8300/include/asm/syscall.h
@@ -8,6 +8,7 @@
#include <linux/linkage.h>
#include <linux/types.h>
#include <linux/ptrace.h>
+#include <uapi/linux/audit.h>
static inline int
syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
@@ -27,6 +28,11 @@ syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
*args = regs->er6;
}
+static inline int
+syscall_get_arch(struct task_struct *task)
+{
+ return AUDIT_ARCH_H8300;
+}
/* Misc syscall related bits */
diff --git a/arch/h8300/include/asm/tlb.h b/arch/h8300/include/asm/tlb.h
index 98f344279904..d8201ca31206 100644
--- a/arch/h8300/include/asm/tlb.h
+++ b/arch/h8300/include/asm/tlb.h
@@ -2,8 +2,6 @@
#ifndef __H8300_TLB_H__
#define __H8300_TLB_H__
-#define tlb_flush(tlb) do { } while (0)
-
#include <asm-generic/tlb.h>
#endif
diff --git a/arch/h8300/include/asm/uaccess.h b/arch/h8300/include/asm/uaccess.h
deleted file mode 100644
index bc8031949d07..000000000000
--- a/arch/h8300/include/asm/uaccess.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_UACCESS_H
-#define _ASM_UACCESS_H
-
-#include <linux/string.h>
-
-static inline __must_check unsigned long
-raw_copy_from_user(void *to, const void __user * from, unsigned long n)
-{
- if (__builtin_constant_p(n)) {
- switch(n) {
- case 1:
- *(u8 *)to = *(u8 __force *)from;
- return 0;
- case 2:
- *(u16 *)to = *(u16 __force *)from;
- return 0;
- case 4:
- *(u32 *)to = *(u32 __force *)from;
- return 0;
- }
- }
-
- memcpy(to, (const void __force *)from, n);
- return 0;
-}
-
-static inline __must_check unsigned long
-raw_copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- if (__builtin_constant_p(n)) {
- switch(n) {
- case 1:
- *(u8 __force *)to = *(u8 *)from;
- return 0;
- case 2:
- *(u16 __force *)to = *(u16 *)from;
- return 0;
- case 4:
- *(u32 __force *)to = *(u32 *)from;
- return 0;
- default:
- break;
- }
- }
-
- memcpy((void __force *)to, from, n);
- return 0;
-}
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
-
-#include <asm-generic/uaccess.h>
-
-#endif
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
index b32bfa1fe99e..23a979a85f14 100644
--- a/arch/h8300/kernel/setup.c
+++ b/arch/h8300/kernel/setup.c
@@ -13,6 +13,7 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/console.h>
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
index 0f04a5e9aa4f..1eab16b1a0bc 100644
--- a/arch/h8300/mm/init.c
+++ b/arch/h8300/mm/init.c
@@ -102,17 +102,3 @@ void __init mem_init(void)
mem_init_print_info(NULL);
}
-
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- free_reserved_area((void *)start, (void *)end, -1, "initrd");
-}
-#endif
-
-void
-free_initmem(void)
-{
- free_initmem_default(-1);
-}
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index ac441680dcc0..b7d404bbaa0f 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -22,7 +22,6 @@ config HEXAGON
select GENERIC_IRQ_SHOW
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
- select ARCH_DISCARD_MEMBLOCK
select NEED_SG_DMA_LENGTH
select NO_IOPORT_MAP
select GENERIC_IOMAP
@@ -65,12 +64,6 @@ config GENERIC_CSUM
config GENERIC_IRQ_PROBE
def_bool y
-config RWSEM_GENERIC_SPINLOCK
- def_bool n
-
-config RWSEM_XCHGADD_ALGORITHM
- def_bool y
-
config GENERIC_HWEIGHT
def_bool y
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index d046e8ccdf78..84bb1ed1b931 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -24,15 +24,13 @@ generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += pci.h
generic-y += percpu.h
generic-y += preempt.h
-generic-y += rwsem.h
generic-y += sections.h
-generic-y += segment.h
generic-y += serial.h
generic-y += shmparam.h
-generic-y += sizes.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += unaligned.h
diff --git a/arch/hexagon/include/asm/elf.h b/arch/hexagon/include/asm/elf.h
index 80311e7b8ca6..d10fbd54ae51 100644
--- a/arch/hexagon/include/asm/elf.h
+++ b/arch/hexagon/include/asm/elf.h
@@ -23,11 +23,7 @@
#include <asm/ptrace.h>
#include <asm/user.h>
-
-/*
- * This should really be in linux/elf-em.h.
- */
-#define EM_HEXAGON 164 /* QUALCOMM Hexagon */
+#include <linux/elf-em.h>
struct elf32_hdr;
diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h
index e17262ad125e..3d0ae09c2b8e 100644
--- a/arch/hexagon/include/asm/io.h
+++ b/arch/hexagon/include/asm/io.h
@@ -184,8 +184,6 @@ static inline void writel(u32 data, volatile void __iomem *addr)
#define writew_relaxed __raw_writew
#define writel_relaxed __raw_writel
-#define mmiowb()
-
/*
* Need an mtype somewhere in here, for cache type deals?
* This is probably too long for an inline.
diff --git a/arch/hexagon/include/asm/syscall.h b/arch/hexagon/include/asm/syscall.h
index ae3a1e24fabd..dab26a71f577 100644
--- a/arch/hexagon/include/asm/syscall.h
+++ b/arch/hexagon/include/asm/syscall.h
@@ -21,6 +21,8 @@
#ifndef _ASM_HEXAGON_SYSCALL_H
#define _ASM_HEXAGON_SYSCALL_H
+#include <uapi/linux/audit.h>
+
typedef long (*syscall_fn)(unsigned long, unsigned long,
unsigned long, unsigned long,
unsigned long, unsigned long);
@@ -41,4 +43,10 @@ static inline void syscall_get_arguments(struct task_struct *task,
{
memcpy(args, &(&regs->r00)[0], 6 * sizeof(args[0]));
}
+
+static inline int syscall_get_arch(struct task_struct *task)
+{
+ return AUDIT_ARCH_HEXAGON;
+}
+
#endif
diff --git a/arch/hexagon/include/asm/tlb.h b/arch/hexagon/include/asm/tlb.h
index 2f00772cc08a..f71c4ba83614 100644
--- a/arch/hexagon/include/asm/tlb.h
+++ b/arch/hexagon/include/asm/tlb.h
@@ -22,18 +22,6 @@
#include <linux/pagemap.h>
#include <asm/tlbflush.h>
-/*
- * We don't need any special per-pte or per-vma handling...
- */
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
-
-/*
- * .. because we flush the whole mm when it fills up
- */
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
#include <asm-generic/tlb.h>
#endif
diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h
index a30e58d5f351..7a34092e8b58 100644
--- a/arch/hexagon/include/asm/uaccess.h
+++ b/arch/hexagon/include/asm/uaccess.h
@@ -24,7 +24,6 @@
* User space memory access functions
*/
#include <linux/mm.h>
-#include <asm/segment.h>
#include <asm/sections.h>
/*
diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c
index 1719ede9e9bd..41cf34243ea1 100644
--- a/arch/hexagon/mm/init.c
+++ b/arch/hexagon/mm/init.c
@@ -85,16 +85,6 @@ void __init mem_init(void)
}
/*
- * free_initmem - frees memory used by stuff declared with __init
- *
- * Todo: free pages between __init_begin and __init_end; possibly
- * some devtree related stuff as well.
- */
-void __ref free_initmem(void)
-{
-}
-
-/*
* free_initrd_mem - frees... initrd memory.
* @start - start of init memory
* @end - end of init memory
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 8d7396bd1790..7468d8e50467 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -33,7 +33,6 @@ config IA64
select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB
select VIRT_TO_BUS
- select ARCH_DISCARD_MEMBLOCK
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
select GENERIC_IRQ_SHOW
@@ -83,10 +82,6 @@ config STACKTRACE_SUPPORT
config GENERIC_LOCKBREAK
def_bool n
-config RWSEM_XCHGADD_ALGORITHM
- bool
- default y
-
config HUGETLB_PAGE_SIZE_VARIABLE
bool
depends on HUGETLB_PAGE
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index 1e6fef69bb01..a511d62d447a 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -113,20 +113,6 @@ extern int valid_mmap_phys_addr_range (unsigned long pfn, size_t count);
*/
#define __ia64_mf_a() ia64_mfa()
-/**
- * ___ia64_mmiowb - I/O write barrier
- *
- * Ensure ordering of I/O space writes. This will make sure that writes
- * following the barrier will arrive after all previous writes. For most
- * ia64 platforms, this is a simple 'mf.a' instruction.
- *
- * See Documentation/driver-api/device-io.rst for more information.
- */
-static inline void ___ia64_mmiowb(void)
-{
- ia64_mfa();
-}
-
static inline void*
__ia64_mk_io_addr (unsigned long port)
{
@@ -161,7 +147,6 @@ __ia64_mk_io_addr (unsigned long port)
#define __ia64_writew ___ia64_writew
#define __ia64_writel ___ia64_writel
#define __ia64_writeq ___ia64_writeq
-#define __ia64_mmiowb ___ia64_mmiowb
/*
* For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure
@@ -296,7 +281,6 @@ __outsl (unsigned long port, const void *src, unsigned long count)
#define __outb platform_outb
#define __outw platform_outw
#define __outl platform_outl
-#define __mmiowb platform_mmiowb
#define inb(p) __inb(p)
#define inw(p) __inw(p)
@@ -310,7 +294,6 @@ __outsl (unsigned long port, const void *src, unsigned long count)
#define outsb(p,s,c) __outsb(p,s,c)
#define outsw(p,s,c) __outsw(p,s,c)
#define outsl(p,s,c) __outsl(p,s,c)
-#define mmiowb() __mmiowb()
/*
* The address passed to these functions are ioremap()ped already.
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index 5133739966bc..beae261fbcb4 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -30,7 +30,6 @@ typedef void ia64_mv_irq_init_t (void);
typedef void ia64_mv_send_ipi_t (int, int, int, int);
typedef void ia64_mv_timer_interrupt_t (int, void *);
typedef void ia64_mv_global_tlb_purge_t (struct mm_struct *, unsigned long, unsigned long, unsigned long);
-typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
typedef u8 ia64_mv_irq_to_vector (int);
typedef unsigned int ia64_mv_local_vector_to_irq (u8);
typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *);
@@ -80,11 +79,6 @@ machvec_noop (void)
}
static inline void
-machvec_noop_mm (struct mm_struct *mm)
-{
-}
-
-static inline void
machvec_noop_task (struct task_struct *task)
{
}
@@ -96,7 +90,6 @@ machvec_noop_bus (struct pci_bus *bus)
extern void machvec_setup (char **);
extern void machvec_timer_interrupt (int, void *);
-extern void machvec_tlb_migrate_finish (struct mm_struct *);
# if defined (CONFIG_IA64_HP_SIM)
# include <asm/machvec_hpsim.h>
@@ -124,7 +117,6 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
# define platform_send_ipi ia64_mv.send_ipi
# define platform_timer_interrupt ia64_mv.timer_interrupt
# define platform_global_tlb_purge ia64_mv.global_tlb_purge
-# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish
# define platform_dma_init ia64_mv.dma_init
# define platform_dma_get_ops ia64_mv.dma_get_ops
# define platform_irq_to_vector ia64_mv.irq_to_vector
@@ -167,7 +159,6 @@ struct ia64_machine_vector {
ia64_mv_send_ipi_t *send_ipi;
ia64_mv_timer_interrupt_t *timer_interrupt;
ia64_mv_global_tlb_purge_t *global_tlb_purge;
- ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
ia64_mv_dma_init *dma_init;
ia64_mv_dma_get_ops *dma_get_ops;
ia64_mv_irq_to_vector *irq_to_vector;
@@ -206,7 +197,6 @@ struct ia64_machine_vector {
platform_send_ipi, \
platform_timer_interrupt, \
platform_global_tlb_purge, \
- platform_tlb_migrate_finish, \
platform_dma_init, \
platform_dma_get_ops, \
platform_irq_to_vector, \
@@ -270,9 +260,6 @@ extern const struct dma_map_ops *dma_get_ops(struct device *);
#ifndef platform_global_tlb_purge
# define platform_global_tlb_purge ia64_global_tlb_purge /* default to architected version */
#endif
-#ifndef platform_tlb_migrate_finish
-# define platform_tlb_migrate_finish machvec_noop_mm
-#endif
#ifndef platform_kernel_launch_event
# define platform_kernel_launch_event machvec_noop
#endif
diff --git a/arch/ia64/include/asm/machvec_sn2.h b/arch/ia64/include/asm/machvec_sn2.h
index b5153d300289..a243e4fb4877 100644
--- a/arch/ia64/include/asm/machvec_sn2.h
+++ b/arch/ia64/include/asm/machvec_sn2.h
@@ -34,7 +34,6 @@ extern ia64_mv_irq_init_t sn_irq_init;
extern ia64_mv_send_ipi_t sn2_send_IPI;
extern ia64_mv_timer_interrupt_t sn_timer_interrupt;
extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge;
-extern ia64_mv_tlb_migrate_finish_t sn_tlb_migrate_finish;
extern ia64_mv_irq_to_vector sn_irq_to_vector;
extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq;
extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem;
@@ -77,7 +76,6 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
#define platform_send_ipi sn2_send_IPI
#define platform_timer_interrupt sn_timer_interrupt
#define platform_global_tlb_purge sn2_global_tlb_purge
-#define platform_tlb_migrate_finish sn_tlb_migrate_finish
#define platform_pci_fixup sn_pci_fixup
#define platform_inb __sn_inb
#define platform_inw __sn_inw
diff --git a/arch/ia64/include/asm/mmiowb.h b/arch/ia64/include/asm/mmiowb.h
new file mode 100644
index 000000000000..297b85ac84a0
--- /dev/null
+++ b/arch/ia64/include/asm/mmiowb.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_IA64_MMIOWB_H
+#define _ASM_IA64_MMIOWB_H
+
+#include <asm/machvec.h>
+
+/**
+ * ___ia64_mmiowb - I/O write barrier
+ *
+ * Ensure ordering of I/O space writes. This will make sure that writes
+ * following the barrier will arrive after all previous writes. For most
+ * ia64 platforms, this is a simple 'mf.a' instruction.
+ */
+static inline void ___ia64_mmiowb(void)
+{
+ ia64_mfa();
+}
+
+#define __ia64_mmiowb ___ia64_mmiowb
+#define mmiowb() platform_mmiowb()
+
+#include <asm-generic/mmiowb.h>
+
+#endif /* _ASM_IA64_MMIOWB_H */
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
deleted file mode 100644
index 917910607e0e..000000000000
--- a/arch/ia64/include/asm/rwsem.h
+++ /dev/null
@@ -1,172 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * R/W semaphores for ia64
- *
- * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com>
- * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com>
- * Copyright (C) 2005 Christoph Lameter <cl@linux.com>
- *
- * Based on asm-i386/rwsem.h and other architecture implementation.
- *
- * The MSW of the count is the negated number of active writers and
- * waiting lockers, and the LSW is the total number of active locks.
- *
- * The lock count is initialized to 0 (no active and no waiting lockers).
- *
- * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for
- * the case of an uncontended lock. Readers increment by 1 and see a positive
- * value when uncontended, negative if there are writers (and maybe) readers
- * waiting (in which case it goes to sleep).
- */
-
-#ifndef _ASM_IA64_RWSEM_H
-#define _ASM_IA64_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
-#endif
-
-#include <asm/intrinsics.h>
-
-#define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000)
-#define RWSEM_ACTIVE_BIAS (1L)
-#define RWSEM_ACTIVE_MASK (0xffffffffL)
-#define RWSEM_WAITING_BIAS (-0x100000000L)
-#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-static inline int
-___down_read (struct rw_semaphore *sem)
-{
- long result = ia64_fetchadd8_acq((unsigned long *)&sem->count.counter, 1);
-
- return (result < 0);
-}
-
-static inline void
-__down_read (struct rw_semaphore *sem)
-{
- if (___down_read(sem))
- rwsem_down_read_failed(sem);
-}
-
-static inline int
-__down_read_killable (struct rw_semaphore *sem)
-{
- if (___down_read(sem))
- if (IS_ERR(rwsem_down_read_failed_killable(sem)))
- return -EINTR;
-
- return 0;
-}
-
-/*
- * lock for writing
- */
-static inline long
-___down_write (struct rw_semaphore *sem)
-{
- long old, new;
-
- do {
- old = atomic_long_read(&sem->count);
- new = old + RWSEM_ACTIVE_WRITE_BIAS;
- } while (atomic_long_cmpxchg_acquire(&sem->count, old, new) != old);
-
- return old;
-}
-
-static inline void
-__down_write (struct rw_semaphore *sem)
-{
- if (___down_write(sem))
- rwsem_down_write_failed(sem);
-}
-
-static inline int
-__down_write_killable (struct rw_semaphore *sem)
-{
- if (___down_write(sem)) {
- if (IS_ERR(rwsem_down_write_failed_killable(sem)))
- return -EINTR;
- }
-
- return 0;
-}
-
-/*
- * unlock after reading
- */
-static inline void
-__up_read (struct rw_semaphore *sem)
-{
- long result = ia64_fetchadd8_rel((unsigned long *)&sem->count.counter, -1);
-
- if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
- rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void
-__up_write (struct rw_semaphore *sem)
-{
- long old, new;
-
- do {
- old = atomic_long_read(&sem->count);
- new = old - RWSEM_ACTIVE_WRITE_BIAS;
- } while (atomic_long_cmpxchg_release(&sem->count, old, new) != old);
-
- if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0)
- rwsem_wake(sem);
-}
-
-/*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
-static inline int
-__down_read_trylock (struct rw_semaphore *sem)
-{
- long tmp;
- while ((tmp = atomic_long_read(&sem->count)) >= 0) {
- if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp, tmp+1)) {
- return 1;
- }
- }
- return 0;
-}
-
-/*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
-static inline int
-__down_write_trylock (struct rw_semaphore *sem)
-{
- long tmp = atomic_long_cmpxchg_acquire(&sem->count,
- RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS);
- return tmp == RWSEM_UNLOCKED_VALUE;
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void
-__downgrade_write (struct rw_semaphore *sem)
-{
- long old, new;
-
- do {
- old = atomic_long_read(&sem->count);
- new = old - RWSEM_WAITING_BIAS;
- } while (atomic_long_cmpxchg_release(&sem->count, old, new) != old);
-
- if (old < 0)
- rwsem_downgrade_wake(sem);
-}
-
-#endif /* _ASM_IA64_RWSEM_H */
diff --git a/arch/ia64/include/asm/segment.h b/arch/ia64/include/asm/segment.h
deleted file mode 100644
index b89e2b3d648f..000000000000
--- a/arch/ia64/include/asm/segment.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_IA64_SEGMENT_H
-#define _ASM_IA64_SEGMENT_H
-
-/* Only here because we have some old header files that expect it.. */
-
-#endif /* _ASM_IA64_SEGMENT_H */
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index afd0b3121b4c..5f620e66384e 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -73,6 +73,8 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
+ /* This could be optimised with ARCH_HAS_MMIOWB */
+ mmiowb();
asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
WRITE_ONCE(*p, (tmp + 2) & ~1);
}
diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h
index 0d9e7fab4a79..da108cd45174 100644
--- a/arch/ia64/include/asm/syscall.h
+++ b/arch/ia64/include/asm/syscall.h
@@ -74,7 +74,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
ia64_syscall_get_set_arguments(task, regs, args, 1);
}
-static inline int syscall_get_arch(void)
+static inline int syscall_get_arch(struct task_struct *task)
{
return AUDIT_ARCH_IA64;
}
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 516355a774bf..86ec034ba499 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -47,263 +47,6 @@
#include <asm/tlbflush.h>
#include <asm/machvec.h>
-/*
- * If we can't allocate a page to make a big batch of page pointers
- * to work on, then just handle a few from the on-stack structure.
- */
-#define IA64_GATHER_BUNDLE 8
-
-struct mmu_gather {
- struct mm_struct *mm;
- unsigned int nr;
- unsigned int max;
- unsigned char fullmm; /* non-zero means full mm flush */
- unsigned char need_flush; /* really unmapped some PTEs? */
- unsigned long start, end;
- unsigned long start_addr;
- unsigned long end_addr;
- struct page **pages;
- struct page *local[IA64_GATHER_BUNDLE];
-};
-
-struct ia64_tr_entry {
- u64 ifa;
- u64 itir;
- u64 pte;
- u64 rr;
-}; /*Record for tr entry!*/
-
-extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
-extern void ia64_ptr_entry(u64 target_mask, int slot);
-
-extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
-
-/*
- region register macros
-*/
-#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
-#define RR_VE(val) (((val) & 0x0000000000000001) << 0)
-#define RR_VE_MASK 0x0000000000000001L
-#define RR_VE_SHIFT 0
-#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
-#define RR_PS(val) (((val) & 0x000000000000003f) << 2)
-#define RR_PS_MASK 0x00000000000000fcL
-#define RR_PS_SHIFT 2
-#define RR_RID_MASK 0x00000000ffffff00L
-#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
-
-static inline void
-ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
-{
- tlb->need_flush = 0;
-
- if (tlb->fullmm) {
- /*
- * Tearing down the entire address space. This happens both as a result
- * of exit() and execve(). The latter case necessitates the call to
- * flush_tlb_mm() here.
- */
- flush_tlb_mm(tlb->mm);
- } else if (unlikely (end - start >= 1024*1024*1024*1024UL
- || REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
- {
- /*
- * If we flush more than a tera-byte or across regions, we're probably
- * better off just flushing the entire TLB(s). This should be very rare
- * and is not worth optimizing for.
- */
- flush_tlb_all();
- } else {
- /*
- * flush_tlb_range() takes a vma instead of a mm pointer because
- * some architectures want the vm_flags for ITLB/DTLB flush.
- */
- struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
-
- /* flush the address range from the tlb: */
- flush_tlb_range(&vma, start, end);
- /* now flush the virt. page-table area mapping the address range: */
- flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
- }
-
-}
-
-static inline void
-ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
-{
- unsigned long i;
- unsigned int nr;
-
- /* lastly, release the freed pages */
- nr = tlb->nr;
-
- tlb->nr = 0;
- tlb->start_addr = ~0UL;
- for (i = 0; i < nr; ++i)
- free_page_and_swap_cache(tlb->pages[i]);
-}
-
-/*
- * Flush the TLB for address range START to END and, if not in fast mode, release the
- * freed pages that where gathered up to this point.
- */
-static inline void
-ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
-{
- if (!tlb->need_flush)
- return;
- ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
- ia64_tlb_flush_mmu_free(tlb);
-}
-
-static inline void __tlb_alloc_page(struct mmu_gather *tlb)
-{
- unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
-
- if (addr) {
- tlb->pages = (void *)addr;
- tlb->max = PAGE_SIZE / sizeof(void *);
- }
-}
-
-
-static inline void
-arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- tlb->mm = mm;
- tlb->max = ARRAY_SIZE(tlb->local);
- tlb->pages = tlb->local;
- tlb->nr = 0;
- tlb->fullmm = !(start | (end+1));
- tlb->start = start;
- tlb->end = end;
- tlb->start_addr = ~0UL;
-}
-
-/*
- * Called at the end of the shootdown operation to free up any resources that were
- * collected.
- */
-static inline void
-arch_tlb_finish_mmu(struct mmu_gather *tlb,
- unsigned long start, unsigned long end, bool force)
-{
- if (force)
- tlb->need_flush = 1;
- /*
- * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
- * tlb->end_addr.
- */
- ia64_tlb_flush_mmu(tlb, start, end);
-
- /* keep the page table cache within bounds */
- check_pgt_cache();
-
- if (tlb->pages != tlb->local)
- free_pages((unsigned long)tlb->pages, 0);
-}
-
-/*
- * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
- * must be delayed until after the TLB has been flushed (see comments at the beginning of
- * this file).
- */
-static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- tlb->need_flush = 1;
-
- if (!tlb->nr && tlb->pages == tlb->local)
- __tlb_alloc_page(tlb);
-
- tlb->pages[tlb->nr++] = page;
- VM_WARN_ON(tlb->nr > tlb->max);
- if (tlb->nr == tlb->max)
- return true;
- return false;
-}
-
-static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
-{
- ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
-}
-
-static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
-{
- ia64_tlb_flush_mmu_free(tlb);
-}
-
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
-{
- ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
-}
-
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- if (__tlb_remove_page(tlb, page))
- tlb_flush_mmu(tlb);
-}
-
-static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
-{
- return __tlb_remove_page(tlb, page);
-}
-
-static inline void tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
-{
- return tlb_remove_page(tlb, page);
-}
-
-/*
- * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
- * PTE, not just those pointing to (normal) physical memory.
- */
-static inline void
-__tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
-{
- if (tlb->start_addr == ~0UL)
- tlb->start_addr = address;
- tlb->end_addr = address + PAGE_SIZE;
-}
-
-#define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
-
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-
-#define tlb_remove_tlb_entry(tlb, ptep, addr) \
-do { \
- tlb->need_flush = 1; \
- __tlb_remove_tlb_entry(tlb, ptep, addr); \
-} while (0)
-
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
- tlb_remove_tlb_entry(tlb, ptep, address)
-
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
- unsigned int page_size)
-{
-}
-
-#define pte_free_tlb(tlb, ptep, address) \
-do { \
- tlb->need_flush = 1; \
- __pte_free_tlb(tlb, ptep, address); \
-} while (0)
-
-#define pmd_free_tlb(tlb, ptep, address) \
-do { \
- tlb->need_flush = 1; \
- __pmd_free_tlb(tlb, ptep, address); \
-} while (0)
-
-#define pud_free_tlb(tlb, pudp, address) \
-do { \
- tlb->need_flush = 1; \
- __pud_free_tlb(tlb, pudp, address); \
-} while (0)
+#include <asm-generic/tlb.h>
#endif /* _ASM_IA64_TLB_H */
diff --git a/arch/ia64/include/asm/tlbflush.h b/arch/ia64/include/asm/tlbflush.h
index 25e280810f6c..ceac10c4d6e2 100644
--- a/arch/ia64/include/asm/tlbflush.h
+++ b/arch/ia64/include/asm/tlbflush.h
@@ -14,6 +14,31 @@
#include <asm/mmu_context.h>
#include <asm/page.h>
+struct ia64_tr_entry {
+ u64 ifa;
+ u64 itir;
+ u64 pte;
+ u64 rr;
+}; /*Record for tr entry!*/
+
+extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
+extern void ia64_ptr_entry(u64 target_mask, int slot);
+extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
+
+/*
+ region register macros
+*/
+#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
+#define RR_VE(val) (((val) & 0x0000000000000001) << 0)
+#define RR_VE_MASK 0x0000000000000001L
+#define RR_VE_SHIFT 0
+#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
+#define RR_PS(val) (((val) & 0x000000000000003f) << 2)
+#define RR_PS_MASK 0x00000000000000fcL
+#define RR_PS_SHIFT 2
+#define RR_RID_MASK 0x00000000ffffff00L
+#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
+
/*
* Now for some TLB flushing routines. This is the kind of stuff that
* can be very expensive, so try to avoid them whenever possible.
diff --git a/arch/ia64/include/uapi/asm/sockios.h b/arch/ia64/include/uapi/asm/sockios.h
deleted file mode 100644
index f27a12f95d20..000000000000
--- a/arch/ia64/include/uapi/asm/sockios.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _ASM_IA64_SOCKIOS_H
-#define _ASM_IA64_SOCKIOS_H
-
-/*
- * Socket-level I/O control calls.
- *
- * Based on <asm-i386/sockios.h>.
- *
- * Modified 1998, 1999
- * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
- */
-#define FIOSETOWN 0x8901
-#define SIOCSPGRP 0x8902
-#define FIOGETOWN 0x8903
-#define SIOCGPGRP 0x8904
-#define SIOCATMARK 0x8905
-#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
-#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
-
-#endif /* _ASM_IA64_SOCKIOS_H */
diff --git a/arch/ia64/kernel/Makefile.gate b/arch/ia64/kernel/Makefile.gate
index f53faf48b7ce..846867bff6d6 100644
--- a/arch/ia64/kernel/Makefile.gate
+++ b/arch/ia64/kernel/Makefile.gate
@@ -11,7 +11,7 @@ quiet_cmd_gate = GATE $@
cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@
GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \
- $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+ -Wl,--hash-style=sysv
$(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE
$(call if_changed,gate)
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 41eb281709da..1435e7a1a8cd 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -177,7 +177,7 @@ struct acpi_table_madt *acpi_madt __initdata;
static u8 has_8259;
static int __init
-acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
+acpi_parse_lapic_addr_ovr(union acpi_subtable_headers * header,
const unsigned long end)
{
struct acpi_madt_local_apic_override *lapic;
@@ -195,7 +195,7 @@ acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
}
static int __init
-acpi_parse_lsapic(struct acpi_subtable_header * header, const unsigned long end)
+acpi_parse_lsapic(union acpi_subtable_headers *header, const unsigned long end)
{
struct acpi_madt_local_sapic *lsapic;
@@ -216,7 +216,7 @@ acpi_parse_lsapic(struct acpi_subtable_header * header, const unsigned long end)
}
static int __init
-acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
+acpi_parse_lapic_nmi(union acpi_subtable_headers * header, const unsigned long end)
{
struct acpi_madt_local_apic_nmi *lacpi_nmi;
@@ -230,7 +230,7 @@ acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long e
}
static int __init
-acpi_parse_iosapic(struct acpi_subtable_header * header, const unsigned long end)
+acpi_parse_iosapic(union acpi_subtable_headers * header, const unsigned long end)
{
struct acpi_madt_io_sapic *iosapic;
@@ -245,7 +245,7 @@ acpi_parse_iosapic(struct acpi_subtable_header * header, const unsigned long end
static unsigned int __initdata acpi_madt_rev;
static int __init
-acpi_parse_plat_int_src(struct acpi_subtable_header * header,
+acpi_parse_plat_int_src(union acpi_subtable_headers * header,
const unsigned long end)
{
struct acpi_madt_interrupt_source *plintsrc;
@@ -329,7 +329,7 @@ unsigned int get_cpei_target_cpu(void)
}
static int __init
-acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
+acpi_parse_int_src_ovr(union acpi_subtable_headers * header,
const unsigned long end)
{
struct acpi_madt_interrupt_override *p;
@@ -350,7 +350,7 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
}
static int __init
-acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
+acpi_parse_nmi_src(union acpi_subtable_headers * header, const unsigned long end)
{
struct acpi_madt_nmi_source *nmi_src;
diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c
index 1b604d02250b..ebd82535f51b 100644
--- a/arch/ia64/kernel/machvec.c
+++ b/arch/ia64/kernel/machvec.c
@@ -10,7 +10,9 @@
#include <asm/page.h>
-struct ia64_machine_vector ia64_mv;
+struct ia64_machine_vector ia64_mv = {
+ .mmiowb = ___ia64_mmiowb
+};
EXPORT_SYMBOL(ia64_mv);
static struct ia64_machine_vector * __init
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 583a3746d70b..c9cfa760cd57 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -1058,9 +1058,7 @@ check_bugs (void)
static int __init run_dmi_scan(void)
{
- dmi_scan_machine();
- dmi_memdev_walk();
- dmi_set_dump_stack_arch_desc();
+ dmi_setup();
return 0;
}
core_initcall(run_dmi_scan);
diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl
index 56e3d0b685e1..e01df3f2f80d 100644
--- a/arch/ia64/kernel/syscalls/syscall.tbl
+++ b/arch/ia64/kernel/syscalls/syscall.tbl
@@ -348,3 +348,9 @@
425 common io_uring_setup sys_io_uring_setup
426 common io_uring_enter sys_io_uring_enter
427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index e49200e31750..d28e29103bdb 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -666,14 +666,14 @@ mem_init (void)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
- bool want_memblock)
+int arch_add_memory(int nid, u64 start, u64 size,
+ struct mhp_restrictions *restrictions)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;
- ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
+ ret = __add_pages(nid, start_pfn, nr_pages, restrictions);
if (ret)
printk("%s: Problem encountered in __add_pages() as ret=%d\n",
__func__, ret);
@@ -682,20 +682,15 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-int arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap)
+void arch_remove_memory(int nid, u64 start, u64 size,
+ struct vmem_altmap *altmap)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
struct zone *zone;
- int ret;
zone = page_zone(pfn_to_page(start_pfn));
- ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
- if (ret)
- pr_warn("%s: Problem encountered in __remove_pages() as"
- " ret=%d\n", __func__, ret);
-
- return ret;
+ __remove_pages(zone, start_pfn, nr_pages, altmap);
}
#endif
#endif
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index 5fc89aabdce1..5158bd28de05 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -305,8 +305,8 @@ local_flush_tlb_all (void)
ia64_srlz_i(); /* srlz.i implies srlz.d */
}
-void
-flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
+static void
+__flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
@@ -343,6 +343,25 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
preempt_enable();
ia64_srlz_i(); /* srlz.i implies srlz.d */
}
+
+void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if (unlikely(end - start >= 1024*1024*1024*1024UL
+ || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) {
+ /*
+ * If we flush more than a tera-byte or across regions, we're
+ * probably better off just flushing the entire TLB(s). This
+ * should be very rare and is not worth optimizing for.
+ */
+ flush_tlb_all();
+ } else {
+ /* flush the address range from the tlb */
+ __flush_tlb_range(vma, start, end);
+ /* flush the virt. page-table area mapping the addr range */
+ __flush_tlb_range(vma, ia64_thash(start), ia64_thash(end));
+ }
+}
EXPORT_SYMBOL(flush_tlb_range);
void ia64_tlb_init(void)
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index b73b0ebf8214..b510f4f17fd4 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -120,13 +120,6 @@ void sn_migrate(struct task_struct *task)
cpu_relax();
}
-void sn_tlb_migrate_finish(struct mm_struct *mm)
-{
- /* flush_tlb_mm is inefficient if more than 1 users of mm */
- if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1)
- flush_tlb_mm(mm);
-}
-
static void
sn2_ipi_flush_all_tlb(struct mm_struct *mm)
{
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index b54206408f91..218e037ef901 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -20,25 +20,17 @@ config M68K
select GENERIC_STRNCPY_FROM_USER if MMU
select GENERIC_STRNLEN_USER if MMU
select ARCH_WANT_IPC_PARSE_VERSION
- select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_REL
select MODULES_USE_ELF_RELA
select OLD_SIGSUSPEND3
select OLD_SIGACTION
- select ARCH_DISCARD_MEMBLOCK
+ select MMU_GATHER_NO_RANGE if MMU
config CPU_BIG_ENDIAN
def_bool y
-config RWSEM_GENERIC_SPINLOCK
- bool
- default y
-
-config RWSEM_XCHGADD_ALGORITHM
- bool
-
config ARCH_HAS_ILOG2_U32
bool
diff --git a/arch/m68k/amiga/cia.c b/arch/m68k/amiga/cia.c
index 2081b8cd5591..b9aee983e6f4 100644
--- a/arch/m68k/amiga/cia.c
+++ b/arch/m68k/amiga/cia.c
@@ -88,10 +88,19 @@ static irqreturn_t cia_handler(int irq, void *dev_id)
struct ciabase *base = dev_id;
int mach_irq;
unsigned char ints;
+ unsigned long flags;
+ /* Interrupts get disabled while the timer irq flag is cleared and
+ * the timer interrupt serviced.
+ */
mach_irq = base->cia_irq;
+ local_irq_save(flags);
ints = cia_set_irq(base, CIA_ICR_ALL);
amiga_custom.intreq = base->int_mask;
+ if (ints & 1)
+ generic_handle_irq(mach_irq);
+ local_irq_restore(flags);
+ mach_irq++, ints >>= 1;
for (; ints; mach_irq++, ints >>= 1) {
if (ints & 1)
generic_handle_irq(mach_irq);
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
index 65f63a457130..c32ab8041cf6 100644
--- a/arch/m68k/amiga/config.c
+++ b/arch/m68k/amiga/config.c
@@ -17,6 +17,7 @@
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/tty.h>
+#include <linux/clocksource.h>
#include <linux/console.h>
#include <linux/rtc.h>
#include <linux/init.h>
@@ -95,8 +96,6 @@ static char amiga_model_name[13] = "Amiga ";
static void amiga_sched_init(irq_handler_t handler);
static void amiga_get_model(char *model);
static void amiga_get_hardware_list(struct seq_file *m);
-/* amiga specific timer functions */
-static u32 amiga_gettimeoffset(void);
extern void amiga_mksound(unsigned int count, unsigned int ticks);
static void amiga_reset(void);
extern void amiga_init_sound(void);
@@ -386,7 +385,6 @@ void __init config_amiga(void)
mach_init_IRQ = amiga_init_IRQ;
mach_get_model = amiga_get_model;
mach_get_hardware_list = amiga_get_hardware_list;
- arch_gettimeoffset = amiga_gettimeoffset;
/*
* default MAX_DMA=0xffffffff on all machines. If we don't do so, the SCSI
@@ -464,7 +462,29 @@ void __init config_amiga(void)
*(unsigned char *)ZTWO_VADDR(0xde0002) |= 0x80;
}
+static u64 amiga_read_clk(struct clocksource *cs);
+
+static struct clocksource amiga_clk = {
+ .name = "ciab",
+ .rating = 250,
+ .read = amiga_read_clk,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
static unsigned short jiffy_ticks;
+static u32 clk_total, clk_offset;
+
+static irqreturn_t ciab_timer_handler(int irq, void *dev_id)
+{
+ irq_handler_t timer_routine = dev_id;
+
+ clk_total += jiffy_ticks;
+ clk_offset = 0;
+ timer_routine(0, NULL);
+
+ return IRQ_HANDLED;
+}
static void __init amiga_sched_init(irq_handler_t timer_routine)
{
@@ -484,19 +504,22 @@ static void __init amiga_sched_init(irq_handler_t timer_routine)
* Please don't change this to use ciaa, as it interferes with the
* SCSI code. We'll have to take a look at this later
*/
- if (request_irq(IRQ_AMIGA_CIAB_TA, timer_routine, 0, "timer", NULL))
+ if (request_irq(IRQ_AMIGA_CIAB_TA, ciab_timer_handler, IRQF_TIMER,
+ "timer", timer_routine))
pr_err("Couldn't register timer interrupt\n");
/* start timer */
ciab.cra |= 0x11;
-}
-#define TICK_SIZE 10000
+ clocksource_register_hz(&amiga_clk, amiga_eclock);
+}
-/* This is always executed with interrupts disabled. */
-static u32 amiga_gettimeoffset(void)
+static u64 amiga_read_clk(struct clocksource *cs)
{
unsigned short hi, lo, hi2;
- u32 ticks, offset = 0;
+ unsigned long flags;
+ u32 ticks;
+
+ local_irq_save(flags);
/* read CIA B timer A current value */
hi = ciab.tahi;
@@ -513,12 +536,14 @@ static u32 amiga_gettimeoffset(void)
if (ticks > jiffy_ticks / 2)
/* check for pending interrupt */
if (cia_set_irq(&ciab_base, 0) & CIA_ICR_TA)
- offset = 10000;
+ clk_offset = jiffy_ticks;
ticks = jiffy_ticks - ticks;
- ticks = (10000 * ticks) / jiffy_ticks;
+ ticks += clk_offset + clk_total;
+
+ local_irq_restore(flags);
- return (ticks + offset) * 1000;
+ return ticks;
}
static void amiga_reset(void) __noreturn;
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c
index aef8d42e078d..7d168e6dfb01 100644
--- a/arch/m68k/apollo/config.c
+++ b/arch/m68k/apollo/config.c
@@ -29,7 +29,6 @@ u_long apollo_model;
extern void dn_sched_init(irq_handler_t handler);
extern void dn_init_IRQ(void);
-extern u32 dn_gettimeoffset(void);
extern int dn_dummy_hwclk(int, struct rtc_time *);
extern void dn_dummy_reset(void);
#ifdef CONFIG_HEARTBEAT
@@ -152,7 +151,6 @@ void __init config_apollo(void)
mach_sched_init=dn_sched_init; /* */
mach_init_IRQ=dn_init_IRQ;
- arch_gettimeoffset = dn_gettimeoffset;
mach_max_dma_address = 0xffffffff;
mach_hwclk = dn_dummy_hwclk; /* */
mach_reset = dn_dummy_reset; /* */
@@ -205,11 +203,6 @@ void dn_sched_init(irq_handler_t timer_routine)
pr_err("Couldn't register timer interrupt\n");
}
-u32 dn_gettimeoffset(void)
-{
- return 0xdeadbeef;
-}
-
int dn_dummy_hwclk(int op, struct rtc_time *t) {
diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c
index 3d2b63bedf05..56f02ea2c248 100644
--- a/arch/m68k/atari/ataints.c
+++ b/arch/m68k/atari/ataints.c
@@ -142,7 +142,7 @@ struct mfptimerbase {
.name = "MFP Timer D"
};
-static irqreturn_t mfptimer_handler(int irq, void *dev_id)
+static irqreturn_t mfp_timer_d_handler(int irq, void *dev_id)
{
struct mfptimerbase *base = dev_id;
int mach_irq;
@@ -344,7 +344,7 @@ void __init atari_init_IRQ(void)
st_mfp.tim_ct_cd = (st_mfp.tim_ct_cd & 0xf0) | 0x6;
/* request timer D dispatch handler */
- if (request_irq(IRQ_MFP_TIMD, mfptimer_handler, IRQF_SHARED,
+ if (request_irq(IRQ_MFP_TIMD, mfp_timer_d_handler, IRQF_SHARED,
stmfp_base.name, &stmfp_base))
pr_err("Couldn't register %s interrupt\n", stmfp_base.name);
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index 4fcc4b1df1c0..902255e7b5b2 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -78,7 +78,6 @@ static void atari_heartbeat(int on);
/* atari specific timer functions (in time.c) */
extern void atari_sched_init(irq_handler_t);
-extern u32 atari_gettimeoffset(void);
extern int atari_mste_hwclk (int, struct rtc_time *);
extern int atari_tt_hwclk (int, struct rtc_time *);
@@ -205,7 +204,6 @@ void __init config_atari(void)
mach_init_IRQ = atari_init_IRQ;
mach_get_model = atari_get_model;
mach_get_hardware_list = atari_get_hardware_list;
- arch_gettimeoffset = atari_gettimeoffset;
mach_reset = atari_reset;
mach_max_dma_address = 0xffffff;
#if IS_ENABLED(CONFIG_INPUT_M68K_BEEP)
diff --git a/arch/m68k/atari/time.c b/arch/m68k/atari/time.c
index 9cca64286464..ce923a523695 100644
--- a/arch/m68k/atari/time.c
+++ b/arch/m68k/atari/time.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
+#include <linux/clocksource.h>
#include <linux/delay.h>
#include <linux/export.h>
@@ -24,6 +25,35 @@
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL_GPL(rtc_lock);
+static u64 atari_read_clk(struct clocksource *cs);
+
+static struct clocksource atari_clk = {
+ .name = "mfp",
+ .rating = 100,
+ .read = atari_read_clk,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u32 clk_total;
+static u8 last_timer_count;
+
+static irqreturn_t mfp_timer_c_handler(int irq, void *dev_id)
+{
+ irq_handler_t timer_routine = dev_id;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ do {
+ last_timer_count = st_mfp.tim_dt_c;
+ } while (last_timer_count == 1);
+ clk_total += INT_TICKS;
+ timer_routine(0, NULL);
+ local_irq_restore(flags);
+
+ return IRQ_HANDLED;
+}
+
void __init
atari_sched_init(irq_handler_t timer_routine)
{
@@ -32,31 +62,33 @@ atari_sched_init(irq_handler_t timer_routine)
/* start timer C, div = 1:100 */
st_mfp.tim_ct_cd = (st_mfp.tim_ct_cd & 15) | 0x60;
/* install interrupt service routine for MFP Timer C */
- if (request_irq(IRQ_MFP_TIMC, timer_routine, 0, "timer", timer_routine))
+ if (request_irq(IRQ_MFP_TIMC, mfp_timer_c_handler, IRQF_TIMER, "timer",
+ timer_routine))
pr_err("Couldn't register timer interrupt\n");
+
+ clocksource_register_hz(&atari_clk, INT_CLK);
}
/* ++andreas: gettimeoffset fixed to check for pending interrupt */
-#define TICK_SIZE 10000
-
-/* This is always executed with interrupts disabled. */
-u32 atari_gettimeoffset(void)
+static u64 atari_read_clk(struct clocksource *cs)
{
- u32 ticks, offset = 0;
-
- /* read MFP timer C current value */
- ticks = st_mfp.tim_dt_c;
- /* The probability of underflow is less than 2% */
- if (ticks > INT_TICKS - INT_TICKS / 50)
- /* Check for pending timer interrupt */
- if (st_mfp.int_pn_b & (1 << 5))
- offset = TICK_SIZE;
-
- ticks = INT_TICKS - ticks;
- ticks = ticks * 10000L / INT_TICKS;
-
- return (ticks + offset) * 1000;
+ unsigned long flags;
+ u8 count;
+ u32 ticks;
+
+ local_irq_save(flags);
+ /* Ensure that the count is monotonically decreasing, even though
+ * the result may briefly stop changing after counter wrap-around.
+ */
+ count = min(st_mfp.tim_dt_c, last_timer_count);
+ last_timer_count = count;
+
+ ticks = INT_TICKS - count;
+ ticks += clk_total;
+ local_irq_restore(flags);
+
+ return ticks;
}
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
index 143ee9fa3893..8ebaabc931cd 100644
--- a/arch/m68k/bvme6000/config.c
+++ b/arch/m68k/bvme6000/config.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/tty.h>
+#include <linux/clocksource.h>
#include <linux/console.h>
#include <linux/linkage.h>
#include <linux/init.h>
@@ -39,16 +40,10 @@
static void bvme6000_get_model(char *model);
extern void bvme6000_sched_init(irq_handler_t handler);
-extern u32 bvme6000_gettimeoffset(void);
extern int bvme6000_hwclk (int, struct rtc_time *);
extern void bvme6000_reset (void);
void bvme6000_set_vectors (void);
-/* Save tick handler routine pointer, will point to xtime_update() in
- * kernel/timer/timekeeping.c, called via bvme6000_process_int() */
-
-static irq_handler_t tick_handler;
-
int __init bvme6000_parse_bootinfo(const struct bi_record *bi)
{
@@ -110,7 +105,6 @@ void __init config_bvme6000(void)
mach_max_dma_address = 0xffffffff;
mach_sched_init = bvme6000_sched_init;
mach_init_IRQ = bvme6000_init_IRQ;
- arch_gettimeoffset = bvme6000_gettimeoffset;
mach_hwclk = bvme6000_hwclk;
mach_reset = bvme6000_reset;
mach_get_model = bvme6000_get_model;
@@ -154,15 +148,38 @@ irqreturn_t bvme6000_abort_int (int irq, void *dev_id)
return IRQ_HANDLED;
}
+static u64 bvme6000_read_clk(struct clocksource *cs);
+
+static struct clocksource bvme6000_clk = {
+ .name = "rtc",
+ .rating = 250,
+ .read = bvme6000_read_clk,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u32 clk_total, clk_offset;
+
+#define RTC_TIMER_CLOCK_FREQ 8000000
+#define RTC_TIMER_CYCLES (RTC_TIMER_CLOCK_FREQ / HZ)
+#define RTC_TIMER_COUNT ((RTC_TIMER_CYCLES / 2) - 1)
static irqreturn_t bvme6000_timer_int (int irq, void *dev_id)
{
+ irq_handler_t timer_routine = dev_id;
+ unsigned long flags;
volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
- unsigned char msr = rtc->msr & 0xc0;
+ unsigned char msr;
+ local_irq_save(flags);
+ msr = rtc->msr & 0xc0;
rtc->msr = msr | 0x20; /* Ack the interrupt */
+ clk_total += RTC_TIMER_CYCLES;
+ clk_offset = 0;
+ timer_routine(0, NULL);
+ local_irq_restore(flags);
- return tick_handler(irq, dev_id);
+ return IRQ_HANDLED;
}
/*
@@ -181,14 +198,13 @@ void bvme6000_sched_init (irq_handler_t timer_routine)
rtc->msr = 0; /* Ensure timer registers accessible */
- tick_handler = timer_routine;
- if (request_irq(BVME_IRQ_RTC, bvme6000_timer_int, 0,
- "timer", bvme6000_timer_int))
+ if (request_irq(BVME_IRQ_RTC, bvme6000_timer_int, IRQF_TIMER, "timer",
+ timer_routine))
panic ("Couldn't register timer int");
rtc->t1cr_omr = 0x04; /* Mode 2, ext clk */
- rtc->t1msb = 39999 >> 8;
- rtc->t1lsb = 39999 & 0xff;
+ rtc->t1msb = RTC_TIMER_COUNT >> 8;
+ rtc->t1lsb = RTC_TIMER_COUNT & 0xff;
rtc->irr_icr1 &= 0xef; /* Route timer 1 to INTR pin */
rtc->msr = 0x40; /* Access int.cntrl, etc */
rtc->pfr_icr0 = 0x80; /* Just timer 1 ints enabled */
@@ -200,14 +216,14 @@ void bvme6000_sched_init (irq_handler_t timer_routine)
rtc->msr = msr;
+ clocksource_register_hz(&bvme6000_clk, RTC_TIMER_CLOCK_FREQ);
+
if (request_irq(BVME_IRQ_ABORT, bvme6000_abort_int, 0,
"abort", bvme6000_abort_int))
panic ("Couldn't register abort int");
}
-/* This is always executed with interrupts disabled. */
-
/*
* NOTE: Don't accept any readings within 5us of rollover, as
* the T1INT bit may be a little slow getting set. There is also
@@ -215,14 +231,18 @@ void bvme6000_sched_init (irq_handler_t timer_routine)
* results...
*/
-u32 bvme6000_gettimeoffset(void)
+static u64 bvme6000_read_clk(struct clocksource *cs)
{
+ unsigned long flags;
volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
volatile PitRegsPtr pit = (PitRegsPtr)BVME_PIT_BASE;
- unsigned char msr = rtc->msr & 0xc0;
+ unsigned char msr, msb;
unsigned char t1int, t1op;
u32 v = 800000, ov;
+ local_irq_save(flags);
+
+ msr = rtc->msr & 0xc0;
rtc->msr = 0; /* Ensure timer registers accessible */
do {
@@ -230,22 +250,25 @@ u32 bvme6000_gettimeoffset(void)
t1int = rtc->msr & 0x20;
t1op = pit->pcdr & 0x04;
rtc->t1cr_omr |= 0x40; /* Latch timer1 */
- v = rtc->t1msb << 8; /* Read timer1 */
- v |= rtc->t1lsb; /* Read timer1 */
+ msb = rtc->t1msb; /* Read timer1 */
+ v = (msb << 8) | rtc->t1lsb; /* Read timer1 */
} while (t1int != (rtc->msr & 0x20) ||
t1op != (pit->pcdr & 0x04) ||
abs(ov-v) > 80 ||
- v > 39960);
+ v > RTC_TIMER_COUNT - (RTC_TIMER_COUNT / 100));
- v = 39999 - v;
+ v = RTC_TIMER_COUNT - v;
if (!t1op) /* If in second half cycle.. */
- v += 40000;
- v /= 8; /* Convert ticks to microseconds */
- if (t1int)
- v += 10000; /* Int pending, + 10ms */
+ v += RTC_TIMER_CYCLES / 2;
+ if (msb > 0 && t1int)
+ clk_offset = RTC_TIMER_CYCLES;
rtc->msr = msr;
- return v * 1000;
+ v += clk_offset + clk_total;
+
+ local_irq_restore(flags);
+
+ return v;
}
/*
diff --git a/arch/m68k/configs/amcore_defconfig b/arch/m68k/configs/amcore_defconfig
index 0857cdbfde0c..d5e683dd885d 100644
--- a/arch/m68k/configs/amcore_defconfig
+++ b/arch/m68k/configs/amcore_defconfig
@@ -12,7 +12,6 @@ CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_CFQ is not set
# CONFIG_MMU is not set
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 525421ae277d..fea392cfcf1b 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -56,6 +56,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -210,9 +211,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -234,9 +232,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -313,7 +308,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -460,12 +454,12 @@ CONFIG_RTC_DRV_RP5C01=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -573,9 +567,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -640,6 +636,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -649,4 +646,5 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index db0e654a88d5..2474d267460e 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -52,6 +52,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -206,9 +207,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -230,9 +228,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -309,7 +304,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -420,12 +414,12 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -533,9 +527,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -600,6 +596,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -609,4 +606,5 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 1451168eb789..0fc7d2992fe0 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -59,6 +59,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -213,9 +214,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -237,9 +235,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -316,7 +311,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -442,12 +436,12 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -555,9 +549,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -622,6 +618,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -631,4 +628,5 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index b0d3609f5bb3..699df9fdf866 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -49,6 +49,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -203,9 +204,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -227,9 +225,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -306,7 +301,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -413,12 +407,12 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -526,9 +520,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -593,6 +589,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -602,4 +599,5 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 4ed7c151347c..b50802255324 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -51,6 +51,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -205,9 +206,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -229,9 +227,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -308,7 +303,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -422,12 +416,12 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -535,9 +529,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -602,6 +598,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -611,4 +608,5 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/m5475evb_defconfig b/arch/m68k/configs/m5475evb_defconfig
index 4f4ccd13c11b..434bd3750966 100644
--- a/arch/m68k/configs/m5475evb_defconfig
+++ b/arch/m68k/configs/m5475evb_defconfig
@@ -11,7 +11,6 @@ CONFIG_SYSCTL_SYSCALL=y
# CONFIG_AIO is not set
CONFIG_EMBEDDED=y
CONFIG_MODULES=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 0dc544e1ce1f..04e7d70f6030 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -50,6 +50,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -204,9 +205,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -228,9 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -310,7 +305,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -444,12 +438,12 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -557,9 +551,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -624,6 +620,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -633,4 +630,5 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 5a7b7b0d6e72..5e1cc4c17852 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -70,6 +70,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -224,9 +225,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -248,9 +246,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -330,7 +325,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -526,12 +520,12 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -639,9 +633,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -706,6 +702,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -715,4 +712,5 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 71eb9be1803b..170ac8792c2d 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -48,6 +48,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -202,9 +203,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -226,9 +224,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -305,7 +300,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -412,12 +406,12 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -525,9 +519,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -592,6 +588,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -601,4 +598,5 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index ea2ebd4241c0..d865592a423e 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -49,6 +49,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -203,9 +204,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -227,9 +225,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -306,7 +301,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -413,12 +407,12 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -526,9 +520,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -593,6 +589,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -602,4 +599,5 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index cef6dc47c725..034a9de90484 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -50,6 +50,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -204,9 +205,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -228,9 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -307,7 +302,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -431,12 +425,12 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -544,9 +538,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -611,6 +607,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -620,4 +617,5 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/stmark2_defconfig b/arch/m68k/configs/stmark2_defconfig
index 69f23c7b0497..27fa9465d19d 100644
--- a/arch/m68k/configs/stmark2_defconfig
+++ b/arch/m68k/configs/stmark2_defconfig
@@ -17,7 +17,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_COMPAT_BRK is not set
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_BLK_CMDLINE_PARSER=y
# CONFIG_MMU is not set
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 69f2282dc4e9..49be0f9fcd8d 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -46,6 +46,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -200,9 +201,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -224,9 +222,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -303,7 +298,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -415,12 +409,12 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -528,9 +522,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -595,6 +591,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -604,3 +601,4 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index e91267e868b2..a71acf4a6004 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -46,6 +46,7 @@ CONFIG_TLS=m
CONFIG_XFRM_MIGRATE=y
CONFIG_NET_KEY=y
CONFIG_XDP_SOCKETS=y
+CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
@@ -200,9 +201,6 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
-CONFIG_NFT_MASQ_IPV4=m
-CONFIG_NFT_REDIR_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -224,9 +222,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_MASQ_IPV6=m
-CONFIG_NFT_REDIR_IPV6=m
CONFIG_NFT_DUP_IPV6=m
CONFIG_NFT_FIB_IPV6=m
CONFIG_NF_FLOW_TABLE_IPV6=m
@@ -303,7 +298,6 @@ CONFIG_AF_KCM=m
# CONFIG_WIRELESS is not set
CONFIG_PSAMPLE=m
CONFIG_NET_IFE=m
-CONFIG_NET_DEVLINK=m
# CONFIG_UEVENT_HELPER is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -414,12 +408,12 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
+# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
CONFIG_OCFS2_FS=m
# CONFIG_OCFS2_DEBUG_MASKLOG is not set
-CONFIG_FS_ENCRYPTION=m
CONFIG_FANOTIFY=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -527,9 +521,11 @@ CONFIG_CRYPTO_AEGIS256=m
CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
@@ -594,6 +590,7 @@ CONFIG_TEST_OVERFLOW=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_IDA=m
+CONFIG_TEST_VMALLOC=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
CONFIG_FIND_BIT_BENCHMARK=m
@@ -603,4 +600,5 @@ CONFIG_TEST_UDELAY=m
CONFIG_TEST_STATIC_KEYS=m
CONFIG_TEST_KMOD=m
CONFIG_TEST_MEMCAT_P=m
+CONFIG_TEST_STACKINIT=m
CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/hp300/config.c b/arch/m68k/hp300/config.c
index a19bcd23f80b..a161d44fd20b 100644
--- a/arch/m68k/hp300/config.c
+++ b/arch/m68k/hp300/config.c
@@ -254,7 +254,6 @@ void __init config_hp300(void)
mach_sched_init = hp300_sched_init;
mach_init_IRQ = hp300_init_IRQ;
mach_get_model = hp300_get_model;
- arch_gettimeoffset = hp300_gettimeoffset;
mach_hwclk = hp300_hwclk;
mach_get_ss = hp300_get_ss;
mach_reset = hp300_reset;
diff --git a/arch/m68k/hp300/time.c b/arch/m68k/hp300/time.c
index 289d928a46cb..bfee13e1d0fe 100644
--- a/arch/m68k/hp300/time.c
+++ b/arch/m68k/hp300/time.c
@@ -8,6 +8,7 @@
*/
#include <asm/ptrace.h>
+#include <linux/clocksource.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/sched.h>
@@ -19,6 +20,18 @@
#include <asm/traps.h>
#include <asm/blinken.h>
+static u64 hp300_read_clk(struct clocksource *cs);
+
+static struct clocksource hp300_clk = {
+ .name = "timer",
+ .rating = 250,
+ .read = hp300_read_clk,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u32 clk_total, clk_offset;
+
/* Clock hardware definitions */
#define CLOCKBASE 0xf05f8000
@@ -28,39 +41,61 @@
#define CLKCR3 CLKCR1
#define CLKSR CLKCR2
#define CLKMSB1 0x5
+#define CLKLSB1 0x7
#define CLKMSB2 0x9
#define CLKMSB3 0xD
+#define CLKSR_INT1 BIT(0)
+
/* This is for machines which generate the exact clock. */
-#define USECS_PER_JIFFY (1000000/HZ)
-#define INTVAL ((10000 / 4) - 1)
+#define HP300_TIMER_CLOCK_FREQ 250000
+#define HP300_TIMER_CYCLES (HP300_TIMER_CLOCK_FREQ / HZ)
+#define INTVAL (HP300_TIMER_CYCLES - 1)
static irqreturn_t hp300_tick(int irq, void *dev_id)
{
+ irq_handler_t timer_routine = dev_id;
+ unsigned long flags;
unsigned long tmp;
- irq_handler_t vector = dev_id;
+
+ local_irq_save(flags);
in_8(CLOCKBASE + CLKSR);
asm volatile ("movpw %1@(5),%0" : "=d" (tmp) : "a" (CLOCKBASE));
+ clk_total += INTVAL;
+ clk_offset = 0;
+ timer_routine(0, NULL);
+ local_irq_restore(flags);
+
/* Turn off the network and SCSI leds */
blinken_leds(0, 0xe0);
- return vector(irq, NULL);
+ return IRQ_HANDLED;
}
-u32 hp300_gettimeoffset(void)
+static u64 hp300_read_clk(struct clocksource *cs)
{
- /* Read current timer 1 value */
- unsigned char lsb, msb1, msb2;
- unsigned short ticks;
-
- msb1 = in_8(CLOCKBASE + 5);
- lsb = in_8(CLOCKBASE + 7);
- msb2 = in_8(CLOCKBASE + 5);
- if (msb1 != msb2)
- /* A carry happened while we were reading. Read it again */
- lsb = in_8(CLOCKBASE + 7);
- ticks = INTVAL - ((msb2 << 8) | lsb);
- return ((USECS_PER_JIFFY * ticks) / INTVAL) * 1000;
+ unsigned long flags;
+ unsigned char lsb, msb, msb_new;
+ u32 ticks;
+
+ local_irq_save(flags);
+ /* Read current timer 1 value */
+ msb = in_8(CLOCKBASE + CLKMSB1);
+again:
+ if ((in_8(CLOCKBASE + CLKSR) & CLKSR_INT1) && msb > 0)
+ clk_offset = INTVAL;
+ lsb = in_8(CLOCKBASE + CLKLSB1);
+ msb_new = in_8(CLOCKBASE + CLKMSB1);
+ if (msb_new != msb) {
+ msb = msb_new;
+ goto again;
+ }
+
+ ticks = INTVAL - ((msb << 8) | lsb);
+ ticks += clk_offset + clk_total;
+ local_irq_restore(flags);
+
+ return ticks;
}
void __init hp300_sched_init(irq_handler_t vector)
@@ -70,9 +105,11 @@ void __init hp300_sched_init(irq_handler_t vector)
asm volatile(" movpw %0,%1@(5)" : : "d" (INTVAL), "a" (CLOCKBASE));
- if (request_irq(IRQ_AUTO_6, hp300_tick, 0, "timer tick", vector))
+ if (request_irq(IRQ_AUTO_6, hp300_tick, IRQF_TIMER, "timer tick", vector))
pr_err("Couldn't register timer interrupt\n");
out_8(CLOCKBASE + CLKCR2, 0x1); /* select CR1 */
out_8(CLOCKBASE + CLKCR1, 0x40); /* enable irq */
+
+ clocksource_register_hz(&hp300_clk, HP300_TIMER_CLOCK_FREQ);
}
diff --git a/arch/m68k/hp300/time.h b/arch/m68k/hp300/time.h
index f5583ec4033d..1d77b55cc72a 100644
--- a/arch/m68k/hp300/time.h
+++ b/arch/m68k/hp300/time.h
@@ -1,2 +1 @@
extern void hp300_sched_init(irq_handler_t vector);
-extern u32 hp300_gettimeoffset(void);
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 2c359d9e80f6..0ddae4a74adb 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -18,6 +18,7 @@ generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += sections.h
diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h
index 782b78f8a048..6c03ca5bc436 100644
--- a/arch/m68k/include/asm/io_mm.h
+++ b/arch/m68k/include/asm/io_mm.h
@@ -377,8 +377,6 @@ static inline void isa_delay(void)
#define writesw(port, buf, nr) raw_outsw((port), (u16 *)(buf), (nr))
#define writesl(port, buf, nr) raw_outsl((port), (u32 *)(buf), (nr))
-#define mmiowb()
-
#ifndef CONFIG_SUN3
#define IO_SPACE_LIMIT 0xffff
#else
diff --git a/arch/m68k/include/asm/mvme147hw.h b/arch/m68k/include/asm/mvme147hw.h
index 9c7ff67c5ffd..257b29184af9 100644
--- a/arch/m68k/include/asm/mvme147hw.h
+++ b/arch/m68k/include/asm/mvme147hw.h
@@ -66,7 +66,7 @@ struct pcc_regs {
#define PCC_INT_ENAB 0x08
#define PCC_TIMER_INT_CLR 0x80
-#define PCC_TIMER_PRELOAD 63936l
+#define PCC_TIMER_CLR_OVF 0x04
#define PCC_LEVEL_ABORT 0x07
#define PCC_LEVEL_SERIAL 0x04
diff --git a/arch/m68k/include/asm/syscall.h b/arch/m68k/include/asm/syscall.h
new file mode 100644
index 000000000000..465ac039be09
--- /dev/null
+++ b/arch/m68k/include/asm/syscall.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_M68K_SYSCALL_H
+#define _ASM_M68K_SYSCALL_H
+
+#include <uapi/linux/audit.h>
+
+static inline int syscall_get_arch(struct task_struct *task)
+{
+ return AUDIT_ARCH_M68K;
+}
+
+#endif /* _ASM_M68K_SYSCALL_H */
diff --git a/arch/m68k/include/asm/tlb.h b/arch/m68k/include/asm/tlb.h
index b4b9efb6f963..3c81f6adfc8b 100644
--- a/arch/m68k/include/asm/tlb.h
+++ b/arch/m68k/include/asm/tlb.h
@@ -2,20 +2,6 @@
#ifndef _M68K_TLB_H
#define _M68K_TLB_H
-/*
- * m68k doesn't need any special per-pte or
- * per-vma handling..
- */
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
-
-/*
- * .. because we flush the whole mm when it
- * fills up.
- */
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
#include <asm-generic/tlb.h>
#endif /* _M68K_TLB_H */
diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl
index df4ec3ec71d1..7e3d0734b2f3 100644
--- a/arch/m68k/kernel/syscalls/syscall.tbl
+++ b/arch/m68k/kernel/syscalls/syscall.tbl
@@ -427,3 +427,9 @@
425 common io_uring_setup sys_io_uring_setup
426 common io_uring_enter sys_io_uring_enter
427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index cd9317d53276..11be08f4f750 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -54,8 +54,6 @@ struct mac_booter_data mac_bi_data;
/* The phys. video addr. - might be bogus on some machines */
static unsigned long mac_orig_videoaddr;
-/* Mac specific timer functions */
-extern u32 mac_gettimeoffset(void);
extern int mac_hwclk(int, struct rtc_time *);
extern void iop_preinit(void);
extern void iop_init(void);
@@ -155,7 +153,6 @@ void __init config_mac(void)
mach_sched_init = mac_sched_init;
mach_init_IRQ = mac_init_IRQ;
mach_get_model = mac_get_model;
- arch_gettimeoffset = mac_gettimeoffset;
mach_hwclk = mac_hwclk;
mach_reset = mac_reset;
mach_halt = mac_poweroff;
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index 0b0289459173..3c2cfcb74982 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -23,6 +23,7 @@
*
*/
+#include <linux/clocksource.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@@ -55,16 +56,6 @@ static __u8 rbv_clear;
static int gIER,gIFR,gBufA,gBufB;
/*
- * Timer defs.
- */
-
-#define TICK_SIZE 10000
-#define MAC_CLOCK_TICK (783300/HZ) /* ticks per HZ */
-#define MAC_CLOCK_LOW (MAC_CLOCK_TICK&0xFF)
-#define MAC_CLOCK_HIGH (MAC_CLOCK_TICK>>8)
-
-
-/*
* On Macs with a genuine VIA chip there is no way to mask an individual slot
* interrupt. This limitation also seems to apply to VIA clone logic cores in
* Quadra-like ASICs. (RBV and OSS machines don't have this limitation.)
@@ -272,22 +263,6 @@ void __init via_init(void)
}
/*
- * Start the 100 Hz clock
- */
-
-void __init via_init_clock(irq_handler_t func)
-{
- via1[vACR] |= 0x40;
- via1[vT1LL] = MAC_CLOCK_LOW;
- via1[vT1LH] = MAC_CLOCK_HIGH;
- via1[vT1CL] = MAC_CLOCK_LOW;
- via1[vT1CH] = MAC_CLOCK_HIGH;
-
- if (request_irq(IRQ_MAC_TIMER_1, func, 0, "timer", func))
- pr_err("Couldn't register %s interrupt\n", "timer");
-}
-
-/*
* Debugging dump, used in various places to see what's going on.
*/
@@ -315,29 +290,6 @@ void via_debug_dump(void)
}
/*
- * This is always executed with interrupts disabled.
- *
- * TBI: get time offset between scheduling timer ticks
- */
-
-u32 mac_gettimeoffset(void)
-{
- unsigned long ticks, offset = 0;
-
- /* read VIA1 timer 2 current value */
- ticks = via1[vT1CL] | (via1[vT1CH] << 8);
- /* The probability of underflow is less than 2% */
- if (ticks > MAC_CLOCK_TICK - MAC_CLOCK_TICK / 50)
- /* Check for pending timer interrupt in VIA1 IFR */
- if (via1[vIFR] & 0x40) offset = TICK_SIZE;
-
- ticks = MAC_CLOCK_TICK - ticks;
- ticks = ticks * 10000L / MAC_CLOCK_TICK;
-
- return (ticks + offset) * 1000;
-}
-
-/*
* Flush the L2 cache on Macs that have it by flipping
* the system into 24-bit mode for an instant.
*/
@@ -440,6 +392,8 @@ void via_nubus_irq_shutdown(int irq)
* via6522.c :-), disable/pending masks added.
*/
+#define VIA_TIMER_1_INT BIT(6)
+
void via1_irq(struct irq_desc *desc)
{
int irq_num;
@@ -449,6 +403,21 @@ void via1_irq(struct irq_desc *desc)
if (!events)
return;
+ irq_num = IRQ_MAC_TIMER_1;
+ irq_bit = VIA_TIMER_1_INT;
+ if (events & irq_bit) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ via1[vIFR] = irq_bit;
+ generic_handle_irq(irq_num);
+ local_irq_restore(flags);
+
+ events &= ~irq_bit;
+ if (!events)
+ return;
+ }
+
irq_num = VIA1_SOURCE_BASE;
irq_bit = 1;
do {
@@ -605,3 +574,82 @@ int via2_scsi_drq_pending(void)
return via2[gIFR] & (1 << IRQ_IDX(IRQ_MAC_SCSIDRQ));
}
EXPORT_SYMBOL(via2_scsi_drq_pending);
+
+/* timer and clock source */
+
+#define VIA_CLOCK_FREQ 783360 /* VIA "phase 2" clock in Hz */
+#define VIA_TIMER_CYCLES (VIA_CLOCK_FREQ / HZ) /* clock cycles per jiffy */
+
+#define VIA_TC (VIA_TIMER_CYCLES - 2) /* including 0 and -1 */
+#define VIA_TC_LOW (VIA_TC & 0xFF)
+#define VIA_TC_HIGH (VIA_TC >> 8)
+
+static u64 mac_read_clk(struct clocksource *cs);
+
+static struct clocksource mac_clk = {
+ .name = "via1",
+ .rating = 250,
+ .read = mac_read_clk,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u32 clk_total, clk_offset;
+
+static irqreturn_t via_timer_handler(int irq, void *dev_id)
+{
+ irq_handler_t timer_routine = dev_id;
+
+ clk_total += VIA_TIMER_CYCLES;
+ clk_offset = 0;
+ timer_routine(0, NULL);
+
+ return IRQ_HANDLED;
+}
+
+void __init via_init_clock(irq_handler_t timer_routine)
+{
+ if (request_irq(IRQ_MAC_TIMER_1, via_timer_handler, IRQF_TIMER, "timer",
+ timer_routine)) {
+ pr_err("Couldn't register %s interrupt\n", "timer");
+ return;
+ }
+
+ via1[vT1LL] = VIA_TC_LOW;
+ via1[vT1LH] = VIA_TC_HIGH;
+ via1[vT1CL] = VIA_TC_LOW;
+ via1[vT1CH] = VIA_TC_HIGH;
+ via1[vACR] |= 0x40;
+
+ clocksource_register_hz(&mac_clk, VIA_CLOCK_FREQ);
+}
+
+static u64 mac_read_clk(struct clocksource *cs)
+{
+ unsigned long flags;
+ u8 count_high;
+ u16 count;
+ u32 ticks;
+
+ /*
+ * Timer counter wrap-around is detected with the timer interrupt flag
+ * but reading the counter low byte (vT1CL) would reset the flag.
+ * Also, accessing both counter registers is essentially a data race.
+ * These problems are avoided by ignoring the low byte. Clock accuracy
+ * is 256 times worse (error can reach 0.327 ms) but CPU overhead is
+ * reduced by avoiding slow VIA register accesses.
+ */
+
+ local_irq_save(flags);
+ count_high = via1[vT1CH];
+ if (count_high == 0xFF)
+ count_high = 0;
+ if (count_high > 0 && (via1[vIFR] & VIA_TIMER_1_INT))
+ clk_offset = VIA_TIMER_CYCLES;
+ count = count_high << 8;
+ ticks = VIA_TIMER_CYCLES - count;
+ ticks += clk_offset + clk_total;
+ local_irq_restore(flags);
+
+ return ticks;
+}
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 8868a4c9adae..778cacb7d57b 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -147,10 +147,3 @@ void __init mem_init(void)
init_pointer_tables();
mem_init_print_info(NULL);
}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- free_reserved_area((void *)start, (void *)end, -1, "initrd");
-}
-#endif
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index adea549d240e..545a1fe0e119 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/tty.h>
+#include <linux/clocksource.h>
#include <linux/console.h>
#include <linux/linkage.h>
#include <linux/init.h>
@@ -38,18 +39,12 @@
static void mvme147_get_model(char *model);
extern void mvme147_sched_init(irq_handler_t handler);
-extern u32 mvme147_gettimeoffset(void);
extern int mvme147_hwclk (int, struct rtc_time *);
extern void mvme147_reset (void);
static int bcd2int (unsigned char b);
-/* Save tick handler routine pointer, will point to xtime_update() in
- * kernel/time/timekeeping.c, called via mvme147_process_int() */
-
-irq_handler_t tick_handler;
-
int __init mvme147_parse_bootinfo(const struct bi_record *bi)
{
@@ -89,7 +84,6 @@ void __init config_mvme147(void)
mach_max_dma_address = 0x01000000;
mach_sched_init = mvme147_sched_init;
mach_init_IRQ = mvme147_init_IRQ;
- arch_gettimeoffset = mvme147_gettimeoffset;
mach_hwclk = mvme147_hwclk;
mach_reset = mvme147_reset;
mach_get_model = mvme147_get_model;
@@ -99,45 +93,76 @@ void __init config_mvme147(void)
vme_brdtype = VME_TYPE_MVME147;
}
+static u64 mvme147_read_clk(struct clocksource *cs);
+
+static struct clocksource mvme147_clk = {
+ .name = "pcc",
+ .rating = 250,
+ .read = mvme147_read_clk,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u32 clk_total;
+
+#define PCC_TIMER_CLOCK_FREQ 160000
+#define PCC_TIMER_CYCLES (PCC_TIMER_CLOCK_FREQ / HZ)
+#define PCC_TIMER_PRELOAD (0x10000 - PCC_TIMER_CYCLES)
/* Using pcc tick timer 1 */
static irqreturn_t mvme147_timer_int (int irq, void *dev_id)
{
+ irq_handler_t timer_routine = dev_id;
+ unsigned long flags;
+
+ local_irq_save(flags);
m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;
- m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1;
- return tick_handler(irq, dev_id);
+ m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF;
+ clk_total += PCC_TIMER_CYCLES;
+ timer_routine(0, NULL);
+ local_irq_restore(flags);
+
+ return IRQ_HANDLED;
}
void mvme147_sched_init (irq_handler_t timer_routine)
{
- tick_handler = timer_routine;
- if (request_irq(PCC_IRQ_TIMER1, mvme147_timer_int, 0, "timer 1", NULL))
+ if (request_irq(PCC_IRQ_TIMER1, mvme147_timer_int, IRQF_TIMER,
+ "timer 1", timer_routine))
pr_err("Couldn't register timer interrupt\n");
/* Init the clock with a value */
- /* our clock goes off every 6.25us */
+ /* The clock counter increments until 0xFFFF then reloads */
m147_pcc->t1_preload = PCC_TIMER_PRELOAD;
m147_pcc->t1_cntrl = 0x0; /* clear timer */
m147_pcc->t1_cntrl = 0x3; /* start timer */
m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR; /* clear pending ints */
m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1;
+
+ clocksource_register_hz(&mvme147_clk, PCC_TIMER_CLOCK_FREQ);
}
-/* This is always executed with interrupts disabled. */
-/* XXX There are race hazards in this code XXX */
-u32 mvme147_gettimeoffset(void)
+static u64 mvme147_read_clk(struct clocksource *cs)
{
- volatile unsigned short *cp = (volatile unsigned short *)0xfffe1012;
- unsigned short n;
-
- n = *cp;
- while (n != *cp)
- n = *cp;
-
- n -= PCC_TIMER_PRELOAD;
- return ((unsigned long)n * 25 / 4) * 1000;
+ unsigned long flags;
+ u8 overflow, tmp;
+ u16 count;
+ u32 ticks;
+
+ local_irq_save(flags);
+ tmp = m147_pcc->t1_cntrl >> 4;
+ count = m147_pcc->t1_count;
+ overflow = m147_pcc->t1_cntrl >> 4;
+ if (overflow != tmp)
+ count = m147_pcc->t1_count;
+ count -= PCC_TIMER_PRELOAD;
+ ticks = count + overflow * PCC_TIMER_CYCLES;
+ ticks += clk_total;
+ local_irq_restore(flags);
+
+ return ticks;
}
static int bcd2int (unsigned char b)
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index 6ee36a5b528d..9bc2da69f80c 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/tty.h>
+#include <linux/clocksource.h>
#include <linux/console.h>
#include <linux/linkage.h>
#include <linux/init.h>
@@ -44,17 +45,11 @@ static MK48T08ptr_t volatile rtc = (MK48T08ptr_t)MVME_RTC_BASE;
static void mvme16x_get_model(char *model);
extern void mvme16x_sched_init(irq_handler_t handler);
-extern u32 mvme16x_gettimeoffset(void);
extern int mvme16x_hwclk (int, struct rtc_time *);
extern void mvme16x_reset (void);
int bcd2int (unsigned char b);
-/* Save tick handler routine pointer, will point to xtime_update() in
- * kernel/time/timekeeping.c, called via mvme16x_process_int() */
-
-static irq_handler_t tick_handler;
-
unsigned short mvme16x_config;
EXPORT_SYMBOL(mvme16x_config);
@@ -120,11 +115,11 @@ static void __init mvme16x_init_IRQ (void)
m68k_setup_user_interrupt(VEC_USER, 192);
}
-#define pcc2chip ((volatile u_char *)0xfff42000)
-#define PccSCCMICR 0x1d
-#define PccSCCTICR 0x1e
-#define PccSCCRICR 0x1f
-#define PccTPIACKR 0x25
+#define PCC2CHIP (0xfff42000)
+#define PCCSCCMICR (PCC2CHIP + 0x1d)
+#define PCCSCCTICR (PCC2CHIP + 0x1e)
+#define PCCSCCRICR (PCC2CHIP + 0x1f)
+#define PCCTPIACKR (PCC2CHIP + 0x25)
#ifdef CONFIG_EARLY_PRINTK
@@ -232,10 +227,10 @@ void mvme16x_cons_write(struct console *co, const char *str, unsigned count)
base_addr[CyIER] = CyTxMpty;
while (1) {
- if (pcc2chip[PccSCCTICR] & 0x20)
+ if (in_8(PCCSCCTICR) & 0x20)
{
/* We have a Tx int. Acknowledge it */
- sink = pcc2chip[PccTPIACKR];
+ sink = in_8(PCCTPIACKR);
if ((base_addr[CyLICR] >> 2) == port) {
if (i == count) {
/* Last char of string is now output */
@@ -277,7 +272,6 @@ void __init config_mvme16x(void)
mach_max_dma_address = 0xffffffff;
mach_sched_init = mvme16x_sched_init;
mach_init_IRQ = mvme16x_init_IRQ;
- arch_gettimeoffset = mvme16x_gettimeoffset;
mach_hwclk = mvme16x_hwclk;
mach_reset = mvme16x_reset;
mach_get_model = mvme16x_get_model;
@@ -350,10 +344,46 @@ static irqreturn_t mvme16x_abort_int (int irq, void *dev_id)
return IRQ_HANDLED;
}
+static u64 mvme16x_read_clk(struct clocksource *cs);
+
+static struct clocksource mvme16x_clk = {
+ .name = "pcc",
+ .rating = 250,
+ .read = mvme16x_read_clk,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u32 clk_total;
+
+#define PCC_TIMER_CLOCK_FREQ 1000000
+#define PCC_TIMER_CYCLES (PCC_TIMER_CLOCK_FREQ / HZ)
+
+#define PCCTCMP1 (PCC2CHIP + 0x04)
+#define PCCTCNT1 (PCC2CHIP + 0x08)
+#define PCCTOVR1 (PCC2CHIP + 0x17)
+#define PCCTIC1 (PCC2CHIP + 0x1b)
+
+#define PCCTOVR1_TIC_EN 0x01
+#define PCCTOVR1_COC_EN 0x02
+#define PCCTOVR1_OVR_CLR 0x04
+
+#define PCCTIC1_INT_CLR 0x08
+#define PCCTIC1_INT_EN 0x10
+
static irqreturn_t mvme16x_timer_int (int irq, void *dev_id)
{
- *(volatile unsigned char *)0xfff4201b |= 8;
- return tick_handler(irq, dev_id);
+ irq_handler_t timer_routine = dev_id;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ out_8(PCCTIC1, in_8(PCCTIC1) | PCCTIC1_INT_CLR);
+ out_8(PCCTOVR1, PCCTOVR1_OVR_CLR);
+ clk_total += PCC_TIMER_CYCLES;
+ timer_routine(0, NULL);
+ local_irq_restore(flags);
+
+ return IRQ_HANDLED;
}
void mvme16x_sched_init (irq_handler_t timer_routine)
@@ -361,16 +391,17 @@ void mvme16x_sched_init (irq_handler_t timer_routine)
uint16_t brdno = be16_to_cpu(mvme_bdid.brdno);
int irq;
- tick_handler = timer_routine;
/* Using PCCchip2 or MC2 chip tick timer 1 */
- *(volatile unsigned long *)0xfff42008 = 0;
- *(volatile unsigned long *)0xfff42004 = 10000; /* 10ms */
- *(volatile unsigned char *)0xfff42017 |= 3;
- *(volatile unsigned char *)0xfff4201b = 0x16;
- if (request_irq(MVME16x_IRQ_TIMER, mvme16x_timer_int, 0,
- "timer", mvme16x_timer_int))
+ out_be32(PCCTCNT1, 0);
+ out_be32(PCCTCMP1, PCC_TIMER_CYCLES);
+ out_8(PCCTOVR1, in_8(PCCTOVR1) | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
+ out_8(PCCTIC1, PCCTIC1_INT_EN | 6);
+ if (request_irq(MVME16x_IRQ_TIMER, mvme16x_timer_int, IRQF_TIMER, "timer",
+ timer_routine))
panic ("Couldn't register timer int");
+ clocksource_register_hz(&mvme16x_clk, PCC_TIMER_CLOCK_FREQ);
+
if (brdno == 0x0162 || brdno == 0x172)
irq = MVME162_IRQ_ABORT;
else
@@ -380,11 +411,23 @@ void mvme16x_sched_init (irq_handler_t timer_routine)
panic ("Couldn't register abort int");
}
-
-/* This is always executed with interrupts disabled. */
-u32 mvme16x_gettimeoffset(void)
+static u64 mvme16x_read_clk(struct clocksource *cs)
{
- return (*(volatile u32 *)0xfff42008) * 1000;
+ unsigned long flags;
+ u8 overflow, tmp;
+ u32 ticks;
+
+ local_irq_save(flags);
+ tmp = in_8(PCCTOVR1) >> 4;
+ ticks = in_be32(PCCTCNT1);
+ overflow = in_8(PCCTOVR1) >> 4;
+ if (overflow != tmp)
+ ticks = in_be32(PCCTCNT1);
+ ticks += overflow * PCC_TIMER_CYCLES;
+ ticks += clk_total;
+ local_irq_restore(flags);
+
+ return ticks;
}
int bcd2int (unsigned char b)
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index 96810d91da2b..e63eb5f06999 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -40,7 +40,6 @@ extern void q40_init_IRQ(void);
static void q40_get_model(char *model);
extern void q40_sched_init(irq_handler_t handler);
-static u32 q40_gettimeoffset(void);
static int q40_hwclk(int, struct rtc_time *);
static unsigned int q40_get_ss(void);
static int q40_get_rtc_pll(struct rtc_pll_info *pll);
@@ -169,7 +168,6 @@ void __init config_q40(void)
mach_sched_init = q40_sched_init;
mach_init_IRQ = q40_init_IRQ;
- arch_gettimeoffset = q40_gettimeoffset;
mach_hwclk = q40_hwclk;
mach_get_ss = q40_get_ss;
mach_get_rtc_pll = q40_get_rtc_pll;
@@ -201,13 +199,6 @@ int __init q40_parse_bootinfo(const struct bi_record *rec)
return 1;
}
-
-static u32 q40_gettimeoffset(void)
-{
- return 5000 * (ql_ticks != 0) * 1000;
-}
-
-
/*
* Looks like op is non-zero for setting the clock, and zero for
* reading the clock.
diff --git a/arch/m68k/q40/q40ints.c b/arch/m68k/q40/q40ints.c
index 3e7603202977..1c696906c159 100644
--- a/arch/m68k/q40/q40ints.c
+++ b/arch/m68k/q40/q40ints.c
@@ -127,10 +127,10 @@ void q40_mksound(unsigned int hz, unsigned int ticks)
sound_ticks = ticks << 1;
}
-static irq_handler_t q40_timer_routine;
-
-static irqreturn_t q40_timer_int (int irq, void * dev)
+static irqreturn_t q40_timer_int(int irq, void *dev_id)
{
+ irq_handler_t timer_routine = dev_id;
+
ql_ticks = ql_ticks ? 0 : 1;
if (sound_ticks) {
unsigned char sval=(sound_ticks & 1) ? 128-SVOL : 128+SVOL;
@@ -139,8 +139,13 @@ static irqreturn_t q40_timer_int (int irq, void * dev)
*DAC_RIGHT=sval;
}
- if (!ql_ticks)
- q40_timer_routine(irq, dev);
+ if (!ql_ticks) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ timer_routine(0, NULL);
+ local_irq_restore(flags);
+ }
return IRQ_HANDLED;
}
@@ -148,11 +153,9 @@ void q40_sched_init (irq_handler_t timer_routine)
{
int timer_irq;
- q40_timer_routine = timer_routine;
timer_irq = Q40_IRQ_FRAME;
- if (request_irq(timer_irq, q40_timer_int, 0,
- "timer", q40_timer_int))
+ if (request_irq(timer_irq, q40_timer_int, 0, "timer", timer_routine))
panic("Couldn't register timer int");
master_outb(-1, FRAME_CLEAR_REG);
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index 542c4404861c..229ea37dfe1b 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -37,7 +37,6 @@
char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
-extern u32 sun3_gettimeoffset(void);
static void sun3_sched_init(irq_handler_t handler);
extern void sun3_get_model (char* model);
extern int sun3_hwclk(int set, struct rtc_time *t);
@@ -138,7 +137,6 @@ void __init config_sun3(void)
mach_sched_init = sun3_sched_init;
mach_init_IRQ = sun3_init_IRQ;
mach_reset = sun3_reboot;
- arch_gettimeoffset = sun3_gettimeoffset;
mach_get_model = sun3_get_model;
mach_hwclk = sun3_hwclk;
mach_halt = sun3_halt;
diff --git a/arch/m68k/sun3/intersil.c b/arch/m68k/sun3/intersil.c
index d911070af02a..8fc74864de81 100644
--- a/arch/m68k/sun3/intersil.c
+++ b/arch/m68k/sun3/intersil.c
@@ -22,13 +22,6 @@
#define STOP_VAL (INTERSIL_STOP | INTERSIL_INT_ENABLE | INTERSIL_24H_MODE)
#define START_VAL (INTERSIL_RUN | INTERSIL_INT_ENABLE | INTERSIL_24H_MODE)
-/* does this need to be implemented? */
-u32 sun3_gettimeoffset(void)
-{
- return 1000;
-}
-
-
/* get/set hwclock */
int sun3_hwclk(int set, struct rtc_time *t)
diff --git a/arch/m68k/sun3/sun3ints.c b/arch/m68k/sun3/sun3ints.c
index 6bbca30c9188..a5824abb4a39 100644
--- a/arch/m68k/sun3/sun3ints.c
+++ b/arch/m68k/sun3/sun3ints.c
@@ -61,8 +61,10 @@ static irqreturn_t sun3_int7(int irq, void *dev_id)
static irqreturn_t sun3_int5(int irq, void *dev_id)
{
+ unsigned long flags;
unsigned int cnt;
+ local_irq_save(flags);
#ifdef CONFIG_SUN3
intersil_clear();
#endif
@@ -76,6 +78,7 @@ static irqreturn_t sun3_int5(int irq, void *dev_id)
cnt = kstat_irqs_cpu(irq, 0);
if (!(cnt % 20))
sun3_leds(led_pattern[cnt % 160 / 20]);
+ local_irq_restore(flags);
return IRQ_HANDLED;
}
diff --git a/arch/m68k/sun3x/config.c b/arch/m68k/sun3x/config.c
index 33d3a1c6fba0..03ce7f9facfe 100644
--- a/arch/m68k/sun3x/config.c
+++ b/arch/m68k/sun3x/config.c
@@ -49,7 +49,6 @@ void __init config_sun3x(void)
mach_sched_init = sun3x_sched_init;
mach_init_IRQ = sun3_init_IRQ;
- arch_gettimeoffset = sun3x_gettimeoffset;
mach_reset = sun3x_reboot;
mach_hwclk = sun3x_hwclk;
diff --git a/arch/m68k/sun3x/time.c b/arch/m68k/sun3x/time.c
index 047e2bcee3d7..9163294b0fb6 100644
--- a/arch/m68k/sun3x/time.c
+++ b/arch/m68k/sun3x/time.c
@@ -73,22 +73,21 @@ int sun3x_hwclk(int set, struct rtc_time *t)
return 0;
}
-/* Not much we can do here */
-u32 sun3x_gettimeoffset(void)
-{
- return 0L;
-}
#if 0
-static void sun3x_timer_tick(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t sun3x_timer_tick(int irq, void *dev_id)
{
- void (*vector)(int, void *, struct pt_regs *) = dev_id;
+ irq_handler_t timer_routine = dev_id;
+ unsigned long flags;
- /* Clear the pending interrupt - pulse the enable line low */
- disable_irq(5);
- enable_irq(5);
+ local_irq_save(flags);
+ /* Clear the pending interrupt - pulse the enable line low */
+ disable_irq(5);
+ enable_irq(5);
+ timer_routine(0, NULL);
+ local_irq_restore(flags);
- vector(irq, NULL, regs);
+ return IRQ_HANDLED;
}
#endif
diff --git a/arch/m68k/sun3x/time.h b/arch/m68k/sun3x/time.h
index 496f406412ad..86ce78bb3c28 100644
--- a/arch/m68k/sun3x/time.h
+++ b/arch/m68k/sun3x/time.h
@@ -3,7 +3,6 @@
#define SUN3X_TIME_H
extern int sun3x_hwclk(int set, struct rtc_time *t);
-u32 sun3x_gettimeoffset(void);
void sun3x_sched_init(irq_handler_t vector);
struct mostek_dt {
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index a51b965b3b82..adb179f519f9 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -41,6 +41,7 @@ config MICROBLAZE
select TRACING_SUPPORT
select VIRT_TO_BUS
select CPU_NO_EFFICIENT_FFS
+ select MMU_GATHER_NO_RANGE if MMU
# Endianness selection
choice
@@ -58,15 +59,9 @@ config CPU_LITTLE_ENDIAN
endchoice
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
config ZONE_DMA
def_bool y
-config RWSEM_XCHGADD_ALGORITHM
- bool
-
config ARCH_HAS_ILOG2_U32
def_bool n
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 1a8285c3f693..17a8d0a62038 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -23,6 +23,7 @@ generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += parport.h
generic-y += percpu.h
generic-y += preempt.h
diff --git a/arch/microblaze/include/asm/syscall.h b/arch/microblaze/include/asm/syscall.h
index 833d3a53dab3..3a6924f3cbde 100644
--- a/arch/microblaze/include/asm/syscall.h
+++ b/arch/microblaze/include/asm/syscall.h
@@ -105,7 +105,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
asmlinkage unsigned long do_syscall_trace_enter(struct pt_regs *regs);
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs);
-static inline int syscall_get_arch(void)
+static inline int syscall_get_arch(struct task_struct *task)
{
return AUDIT_ARCH_MICROBLAZE;
}
diff --git a/arch/microblaze/include/asm/tlb.h b/arch/microblaze/include/asm/tlb.h
index 99b6ded54849..628a78ee0a72 100644
--- a/arch/microblaze/include/asm/tlb.h
+++ b/arch/microblaze/include/asm/tlb.h
@@ -11,16 +11,7 @@
#ifndef _ASM_MICROBLAZE_TLB_H
#define _ASM_MICROBLAZE_TLB_H
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
#include <linux/pagemap.h>
-
-#ifdef CONFIG_MMU
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
-#endif
-
#include <asm-generic/tlb.h>
#endif /* _ASM_MICROBLAZE_TLB_H */
diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl
index 4964947732af..26339e417695 100644
--- a/arch/microblaze/kernel/syscalls/syscall.tbl
+++ b/arch/microblaze/kernel/syscalls/syscall.tbl
@@ -433,3 +433,9 @@
425 common io_uring_setup sys_io_uring_setup
426 common io_uring_enter sys_io_uring_enter
427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 7e97d44f6538..a015a951c8b7 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -186,18 +186,6 @@ void __init setup_memory(void)
paging_init();
}
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- free_reserved_area((void *)start, (void *)end, -1, "initrd");
-}
-#endif
-
-void free_initmem(void)
-{
- free_initmem_default(-1);
-}
-
void __init mem_init(void)
{
high_memory = (void *)__va(memory_start + lowmem_size - 1);
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index c2ce1e42b888..8fe54fda31dc 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -75,7 +75,7 @@ static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
p >= memory_start && p < virt_to_phys(high_memory) &&
!(p >= __virt_to_phys((phys_addr_t)__bss_stop) &&
p < __virt_to_phys((phys_addr_t)__bss_stop))) {
- pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %pf\n",
+ pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %ps\n",
(unsigned long)p, __builtin_return_address(0));
return NULL;
}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 4a5f5b0ee9a9..70d3200476bf 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -5,7 +5,6 @@ config MIPS
select ARCH_32BIT_OFF_T if !64BIT
select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
select ARCH_CLOCKSOURCE_DATA
- select ARCH_DISCARD_MEMBLOCK
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UBSAN_SANITIZE_ALL
@@ -44,8 +43,7 @@ config MIPS
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT
- select HAVE_CBPF_JIT if (!64BIT && !CPU_MICROMIPS)
- select HAVE_EBPF_JIT if (64BIT && !CPU_MICROMIPS)
+ select HAVE_EBPF_JIT if (!CPU_MICROMIPS)
select HAVE_CONTEXT_TRACKING
select HAVE_COPY_THREAD_TLS
select HAVE_C_RECORDMCOUNT
@@ -276,7 +274,7 @@ config BCM47XX
select BCM47XX_SPROM
select BCM47XX_SSB if !BCM47XX_BCMA
help
- Support for BCM47XX based boards
+ Support for BCM47XX based boards
config BCM63XX
bool "Broadcom BCM63XX based boards"
@@ -295,7 +293,7 @@ config BCM63XX
select MIPS_L1_CACHE_SHIFT_4
select CLKDEV_LOOKUP
help
- Support for BCM63XX based boards
+ Support for BCM63XX based boards
config MIPS_COBALT
bool "Cobalt Server"
@@ -374,10 +372,10 @@ config MACH_JAZZ
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_100HZ
help
- This a family of machines based on the MIPS R4030 chipset which was
- used by several vendors to build RISC/os and Windows NT workstations.
- Members include the Acer PICA, MIPS Magnum 4000, MIPS Millennium and
- Olivetti M700-10 workstations.
+ This a family of machines based on the MIPS R4030 chipset which was
+ used by several vendors to build RISC/os and Windows NT workstations.
+ Members include the Acer PICA, MIPS Magnum 4000, MIPS Millennium and
+ Olivetti M700-10 workstations.
config MACH_INGENIC
bool "Ingenic SoC based machines"
@@ -573,14 +571,14 @@ config NXP_STB220
bool "NXP STB220 board"
select SOC_PNX833X
help
- Support for NXP Semiconductors STB220 Development Board.
+ Support for NXP Semiconductors STB220 Development Board.
config NXP_STB225
bool "NXP 225 board"
select SOC_PNX833X
select SOC_PNX8335
help
- Support for NXP Semiconductors STB225 Development Board.
+ Support for NXP Semiconductors STB225 Development Board.
config PMC_MSP
bool "PMC-Sierra MSP chipsets"
@@ -676,7 +674,10 @@ config SGI_IP27
select SYS_HAS_EARLY_PRINTK
select HAVE_PCI
select IRQ_MIPS_CPU
+ select IRQ_DOMAIN_HIERARCHY
select NR_CPUS_DEFAULT_64
+ select PCI_DRIVERS_GENERIC
+ select PCI_XTALK_BRIDGE
select SYS_HAS_CPU_R10000
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
@@ -722,9 +723,9 @@ config SGI_IP28
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
select MIPS_L1_CACHE_SHIFT_7
- help
- This is the SGI Indigo2 with R10000 processor. To compile a Linux
- kernel that runs on these, say Y here.
+ help
+ This is the SGI Indigo2 with R10000 processor. To compile a Linux
+ kernel that runs on these, say Y here.
config SGI_IP32
bool "SGI IP32 (O2)"
@@ -1037,13 +1038,6 @@ source "arch/mips/paravirt/Kconfig"
endmenu
-config RWSEM_GENERIC_SPINLOCK
- bool
- default y
-
-config RWSEM_XCHGADD_ALGORITHM
- bool
-
config GENERIC_HWEIGHT
bool
default y
@@ -1175,9 +1169,9 @@ config HOLES_IN_ZONE
config SYS_SUPPORTS_RELOCATABLE
bool
help
- Selected if the platform supports relocating the kernel.
- The platform must provide plat_get_fdt() if it selects CONFIG_USE_OF
- to allow access to command line and entropy sources.
+ Selected if the platform supports relocating the kernel.
+ The platform must provide plat_get_fdt() if it selects CONFIG_USE_OF
+ to allow access to command line and entropy sources.
config MIPS_CBPF_JIT
def_bool y
@@ -1250,6 +1244,9 @@ config IRQ_GT641XX
config PCI_GT64XXX_PCI0
bool
+config PCI_XTALK_BRIDGE
+ bool
+
config NO_EXCEPT_FILL
bool
@@ -2120,8 +2117,8 @@ config MIPS_PGD_C0_CONTEXT
# Set to y for ptrace access to watch registers.
#
config HARDWARE_WATCHPOINTS
- bool
- default y if CPU_MIPSR1 || CPU_MIPSR2 || CPU_MIPSR6
+ bool
+ default y if CPU_MIPSR1 || CPU_MIPSR2 || CPU_MIPSR6
menu "Kernel type"
@@ -2185,10 +2182,10 @@ config PAGE_SIZE_4KB
bool "4kB"
depends on !CPU_LOONGSON2 && !CPU_LOONGSON3
help
- This option select the standard 4kB Linux page size. On some
- R3000-family processors this is the only available page size. Using
- 4kB page size will minimize memory consumption and is therefore
- recommended for low memory systems.
+ This option select the standard 4kB Linux page size. On some
+ R3000-family processors this is the only available page size. Using
+ 4kB page size will minimize memory consumption and is therefore
+ recommended for low memory systems.
config PAGE_SIZE_8KB
bool "8kB"
@@ -2481,7 +2478,6 @@ config SB1_PASS_2_1_WORKAROUNDS
depends on CPU_SB1 && CPU_SB1_PASS_2
default y
-
choice
prompt "SmartMIPS or microMIPS ASE support"
@@ -2689,16 +2685,16 @@ config RANDOMIZE_BASE
bool "Randomize the address of the kernel image"
depends on RELOCATABLE
---help---
- Randomizes the physical and virtual address at which the
- kernel image is loaded, as a security feature that
- deters exploit attempts relying on knowledge of the location
- of kernel internals.
+ Randomizes the physical and virtual address at which the
+ kernel image is loaded, as a security feature that
+ deters exploit attempts relying on knowledge of the location
+ of kernel internals.
- Entropy is generated using any coprocessor 0 registers available.
+ Entropy is generated using any coprocessor 0 registers available.
- The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET.
+ The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET.
- If unsure, say N.
+ If unsure, say N.
config RANDOMIZE_BASE_MAX_OFFSET
hex "Maximum kASLR offset" if EXPERT
@@ -2828,7 +2824,7 @@ choice
prompt "Timer frequency"
default HZ_250
help
- Allows the configuration of the timer frequency.
+ Allows the configuration of the timer frequency.
config HZ_24
bool "24 HZ" if SYS_SUPPORTS_24HZ || SYS_SUPPORTS_ARBIT_HZ
@@ -3128,10 +3124,10 @@ config ARCH_MMAP_RND_BITS_MAX
default 15
config ARCH_MMAP_RND_COMPAT_BITS_MIN
- default 8
+ default 8
config ARCH_MMAP_RND_COMPAT_BITS_MAX
- default 15
+ default 15
config I8253
bool
diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c
index d129475fd40d..a95a894aceaf 100644
--- a/arch/mips/alchemy/common/clock.c
+++ b/arch/mips/alchemy/common/clock.c
@@ -160,7 +160,7 @@ static struct clk __init *alchemy_clk_setup_cpu(const char *parent_name,
id.name = ALCHEMY_CPU_CLK;
id.parent_names = &parent_name;
id.num_parents = 1;
- id.flags = CLK_IS_BASIC;
+ id.flags = 0;
id.ops = &alchemy_clkops_cpu;
h->init = &id;
diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c
index 1454d9f6ab2d..b8f3397c59c9 100644
--- a/arch/mips/alchemy/common/platform.c
+++ b/arch/mips/alchemy/common/platform.c
@@ -131,9 +131,7 @@ static void __init alchemy_setup_uarts(int ctype)
}
-/* The dmamask must be set for OHCI/EHCI to work */
-static u64 alchemy_ohci_dmamask = DMA_BIT_MASK(32);
-static u64 __maybe_unused alchemy_ehci_dmamask = DMA_BIT_MASK(32);
+static u64 alchemy_all_dmamask = DMA_BIT_MASK(32);
/* Power on callback for the ehci platform driver */
static int alchemy_ehci_power_on(struct platform_device *pdev)
@@ -231,7 +229,7 @@ static void __init alchemy_setup_usb(int ctype)
res[1].flags = IORESOURCE_IRQ;
pdev->name = "ohci-platform";
pdev->id = 0;
- pdev->dev.dma_mask = &alchemy_ohci_dmamask;
+ pdev->dev.dma_mask = &alchemy_all_dmamask;
pdev->dev.platform_data = &alchemy_ohci_pdata;
if (platform_device_register(pdev))
@@ -251,7 +249,7 @@ static void __init alchemy_setup_usb(int ctype)
res[1].flags = IORESOURCE_IRQ;
pdev->name = "ehci-platform";
pdev->id = 0;
- pdev->dev.dma_mask = &alchemy_ehci_dmamask;
+ pdev->dev.dma_mask = &alchemy_all_dmamask;
pdev->dev.platform_data = &alchemy_ehci_pdata;
if (platform_device_register(pdev))
@@ -271,7 +269,7 @@ static void __init alchemy_setup_usb(int ctype)
res[1].flags = IORESOURCE_IRQ;
pdev->name = "ohci-platform";
pdev->id = 1;
- pdev->dev.dma_mask = &alchemy_ohci_dmamask;
+ pdev->dev.dma_mask = &alchemy_all_dmamask;
pdev->dev.platform_data = &alchemy_ohci_pdata;
if (platform_device_register(pdev))
@@ -338,7 +336,11 @@ static struct platform_device au1xxx_eth0_device = {
.name = "au1000-eth",
.id = 0,
.num_resources = MAC_RES_COUNT,
- .dev.platform_data = &au1xxx_eth0_platform_data,
+ .dev = {
+ .dma_mask = &alchemy_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &au1xxx_eth0_platform_data,
+ },
};
static struct resource au1xxx_eth1_resources[][MAC_RES_COUNT] __initdata = {
@@ -370,7 +372,11 @@ static struct platform_device au1xxx_eth1_device = {
.name = "au1000-eth",
.id = 1,
.num_resources = MAC_RES_COUNT,
- .dev.platform_data = &au1xxx_eth1_platform_data,
+ .dev = {
+ .dma_mask = &alchemy_all_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &au1xxx_eth1_platform_data,
+ },
};
void __init au1xxx_override_eth_cfg(unsigned int port,
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index d4ca97e2ec6c..228cdc736db7 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 25a57895a3a3..298b46b4e9cb 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/clk.h>
diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig
index 29471038d817..6889f74e06f5 100644
--- a/arch/mips/bcm47xx/Kconfig
+++ b/arch/mips/bcm47xx/Kconfig
@@ -15,9 +15,9 @@ config BCM47XX_SSB
select SSB_DRIVER_GPIO
default y
help
- Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support.
+ Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support.
- This will generate an image with support for SSB and MIPS32 R1 instruction set.
+ This will generate an image with support for SSB and MIPS32 R1 instruction set.
config BCM47XX_BCMA
bool "BCMA Support for Broadcom BCM47XX"
@@ -31,8 +31,8 @@ config BCM47XX_BCMA
select BCMA_DRIVER_GPIO
default y
help
- Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus.
+ Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus.
- This will generate an image with support for BCMA and MIPS32 R2 instruction set.
+ This will generate an image with support for BCMA and MIPS32 R2 instruction set.
endif
diff --git a/arch/mips/bcm63xx/boards/Kconfig b/arch/mips/bcm63xx/boards/Kconfig
index f60d96610ace..492c3bd005d5 100644
--- a/arch/mips/bcm63xx/boards/Kconfig
+++ b/arch/mips/bcm63xx/boards/Kconfig
@@ -5,7 +5,7 @@ choice
default BOARD_BCM963XX
config BOARD_BCM963XX
- bool "Generic Broadcom 963xx boards"
+ bool "Generic Broadcom 963xx boards"
select SSB
endchoice
diff --git a/arch/mips/configs/ar7_defconfig b/arch/mips/configs/ar7_defconfig
index 9fbfb6e5c7d2..c83fdf649327 100644
--- a/arch/mips/configs/ar7_defconfig
+++ b/arch/mips/configs/ar7_defconfig
@@ -18,7 +18,6 @@ CONFIG_KEXEC=y
# CONFIG_SECCOMP is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_BSD_DISKLABEL=y
diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig
index 249f5285e343..91ce75edbfb4 100644
--- a/arch/mips/configs/bcm47xx_defconfig
+++ b/arch/mips/configs/bcm47xx_defconfig
@@ -41,7 +41,7 @@ CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_PHYSMAP=y
CONFIG_MTD_BCM47XXSFLASH=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_BCM47XXNFLASH=y
CONFIG_NETDEVICES=y
CONFIG_B44=y
diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig
index 412800d5d7e0..50bebce28500 100644
--- a/arch/mips/configs/ci20_defconfig
+++ b/arch/mips/configs/ci20_defconfig
@@ -51,7 +51,7 @@ CONFIG_DEVTMPFS=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=32
CONFIG_MTD=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_JZ4780=y
CONFIG_MTD_UBI=y
CONFIG_MTD_UBI_FASTMAP=y
diff --git a/arch/mips/configs/db1xxx_defconfig b/arch/mips/configs/db1xxx_defconfig
index 34633b7611cb..bc9b6ae046b2 100644
--- a/arch/mips/configs/db1xxx_defconfig
+++ b/arch/mips/configs/db1xxx_defconfig
@@ -95,8 +95,8 @@ CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP=y
CONFIG_MTD_M25P80=y
CONFIG_MTD_SST25L=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_ECC_BCH=y
+CONFIG_MTD_RAW_NAND=y
+CONFIG_MTD_NAND_ECC_SW_BCH=y
CONFIG_MTD_NAND_AU1550=y
CONFIG_MTD_NAND_PLATFORM=y
CONFIG_MTD_SPI_NOR=y
diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig
index 0c86ed86266a..30a6eafdb1d0 100644
--- a/arch/mips/configs/decstation_defconfig
+++ b/arch/mips/configs/decstation_defconfig
@@ -17,7 +17,6 @@ CONFIG_TC=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_LBDAF is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_OSF_PARTITION=y
# CONFIG_EFI_PARTITION is not set
diff --git a/arch/mips/configs/decstation_r4k_defconfig b/arch/mips/configs/decstation_r4k_defconfig
index 0e54ab2680ce..e2b58dbf4aa9 100644
--- a/arch/mips/configs/decstation_r4k_defconfig
+++ b/arch/mips/configs/decstation_r4k_defconfig
@@ -16,7 +16,6 @@ CONFIG_TC=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_LBDAF is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_OSF_PARTITION=y
# CONFIG_EFI_PARTITION is not set
diff --git a/arch/mips/configs/generic/board-ni169445.config b/arch/mips/configs/generic/board-ni169445.config
index f72223b366ca..1ed0d3e8715e 100644
--- a/arch/mips/configs/generic/board-ni169445.config
+++ b/arch/mips/configs/generic/board-ni169445.config
@@ -15,9 +15,9 @@ CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_NAND_ECC=y
-CONFIG_MTD_NAND_ECC_BCH=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_ECC_SW_HAMMING=y
+CONFIG_MTD_NAND_ECC_SW_BCH=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_GPIO=y
CONFIG_MTD_NAND_IDS=y
diff --git a/arch/mips/configs/generic/board-ocelot.config b/arch/mips/configs/generic/board-ocelot.config
index 184eb65a6ba7..1134fbb99fc2 100644
--- a/arch/mips/configs/generic/board-ocelot.config
+++ b/arch/mips/configs/generic/board-ocelot.config
@@ -10,7 +10,7 @@ CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_M25P80=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_PLATFORM=y
CONFIG_MTD_SPI_NOR=y
CONFIG_MTD_UBI=y
diff --git a/arch/mips/configs/generic_defconfig b/arch/mips/configs/generic_defconfig
index 5d80521e5d5a..714169e411cf 100644
--- a/arch/mips/configs/generic_defconfig
+++ b/arch/mips/configs/generic_defconfig
@@ -26,6 +26,7 @@ CONFIG_MIPS_CPS=y
CONFIG_HIGHMEM=y
CONFIG_NR_CPUS=16
CONFIG_MIPS_O32_FP64_SUPPORT=y
+CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_TRIM_UNUSED_KSYMS=y
diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
index ff40fbc2f439..21a1168ae301 100644
--- a/arch/mips/configs/ip22_defconfig
+++ b/arch/mips/configs/ip22_defconfig
@@ -228,7 +228,7 @@ CONFIG_SERIAL_IP22_ZILOG=m
# CONFIG_HW_RANDOM is not set
CONFIG_RAW_DRIVER=m
# CONFIG_HWMON is not set
-CONFIG_THERMAL=m
+CONFIG_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_INDYDOG=m
# CONFIG_VGA_CONSOLE is not set
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index 81c47e18131b..54db5dedf776 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -271,7 +271,7 @@ CONFIG_I2C_PARPORT_LIGHT=m
CONFIG_I2C_TAOS_EVM=m
CONFIG_I2C_STUB=m
# CONFIG_HWMON is not set
-CONFIG_THERMAL=m
+CONFIG_THERMAL=y
CONFIG_MFD_PCF50633=m
CONFIG_PCF50633_ADC=m
CONFIG_PCF50633_GPIO=m
diff --git a/arch/mips/configs/loongson1b_defconfig b/arch/mips/configs/loongson1b_defconfig
index b064d68a5424..3d390a7494d6 100644
--- a/arch/mips/configs/loongson1b_defconfig
+++ b/arch/mips/configs/loongson1b_defconfig
@@ -19,7 +19,6 @@ CONFIG_MACH_LOONGSON32=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
@@ -42,7 +41,7 @@ CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_SCSI=m
diff --git a/arch/mips/configs/loongson1c_defconfig b/arch/mips/configs/loongson1c_defconfig
index 5d76559b56cd..247d56e94c0a 100644
--- a/arch/mips/configs/loongson1c_defconfig
+++ b/arch/mips/configs/loongson1c_defconfig
@@ -20,7 +20,6 @@ CONFIG_LOONGSON1_LS1C=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
@@ -43,7 +42,7 @@ CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_SCSI=m
diff --git a/arch/mips/configs/qi_lb60_defconfig b/arch/mips/configs/qi_lb60_defconfig
index 7671fe6a8042..1a0677d04982 100644
--- a/arch/mips/configs/qi_lb60_defconfig
+++ b/arch/mips/configs/qi_lb60_defconfig
@@ -44,7 +44,7 @@ CONFIG_TCP_CONG_WESTWOOD=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_JZ4740=y
CONFIG_MTD_UBI=y
CONFIG_NETDEVICES=y
diff --git a/arch/mips/configs/rb532_defconfig b/arch/mips/configs/rb532_defconfig
index 7befe05fd813..50632a3103dd 100644
--- a/arch/mips/configs/rb532_defconfig
+++ b/arch/mips/configs/rb532_defconfig
@@ -19,7 +19,6 @@ CONFIG_PCI=y
# CONFIG_PCI_QUIRKS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_MAC_PARTITION=y
@@ -110,7 +109,7 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_BLOCK2MTD=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_PLATFORM=y
CONFIG_ATA=y
# CONFIG_ATA_VERBOSE_ERROR is not set
diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig
index 50a2c9ad583f..5e389db35fa7 100644
--- a/arch/mips/configs/rbtx49xx_defconfig
+++ b/arch/mips/configs/rbtx49xx_defconfig
@@ -17,7 +17,6 @@ CONFIG_TOSHIBA_RBTX4938_MPLEX_KEEP=y
CONFIG_PCI=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_NET=y
CONFIG_PACKET=y
@@ -40,7 +39,7 @@ CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_PHYSMAP=y
CONFIG_MTD_RBTX4939=y
-CONFIG_MTD_NAND=m
+CONFIG_MTD_RAW_NAND=m
CONFIG_MTD_NAND_TXX9NDFMC=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/mips/configs/xway_defconfig b/arch/mips/configs/xway_defconfig
index 2bb02ea9fb4e..203db83c3ee9 100644
--- a/arch/mips/configs/xway_defconfig
+++ b/arch/mips/configs/xway_defconfig
@@ -81,7 +81,7 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_PHYSMAP=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_LANTIQ=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_XWAY=y
CONFIG_EEPROM_93CX6=m
CONFIG_SCSI=y
diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
index a106f8113842..a84475f1924f 100644
--- a/arch/mips/generic/init.c
+++ b/arch/mips/generic/init.c
@@ -43,14 +43,14 @@ void __init *plat_get_fdt(void)
/* Already set up */
return (void *)fdt;
- if ((fw_arg0 == -2) && !fdt_check_header((void *)fw_arg1)) {
+ if ((fw_arg0 == -2) && !fdt_check_header((void *)fw_passed_dtb)) {
/*
* We booted using the UHI boot protocol, so we have been
* provided with the appropriate device tree for the board.
* Make use of it & search for any machine struct based upon
* the root compatible string.
*/
- fdt = (void *)fw_arg1;
+ fdt = (void *)fw_passed_dtb;
for_each_mips_machine(check_mach) {
match = mips_machine_is_compatible(check_mach, fdt);
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 87b86cdf126a..a03cd4e24f37 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -19,7 +19,6 @@ generic-y += preempt.h
generic-y += qrwlock.h
generic-y += qspinlock.h
generic-y += sections.h
-generic-y += segment.h
generic-y += trace_clock.h
generic-y += unaligned.h
generic-y += user.h
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 830c93a010c3..9a466dde9b96 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -482,7 +482,7 @@ static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *
* Return the bit position (0..63) of the most significant 1 bit in a word
* Returns -1 if no 1 bit exists
*/
-static inline unsigned long __fls(unsigned long word)
+static __always_inline unsigned long __fls(unsigned long word)
{
int num;
@@ -548,7 +548,7 @@ static inline unsigned long __fls(unsigned long word)
* Returns 0..SZLONG-1
* Undefined if no bit exists, so code should check against 0 first.
*/
-static inline unsigned long __ffs(unsigned long word)
+static __always_inline unsigned long __ffs(unsigned long word)
{
return __fls(word & -word);
}
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index a301a8f4bc66..235bc2f52113 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -92,6 +92,7 @@ extern unsigned long mips_machtype;
#define BOOT_MEM_ROM_DATA 2
#define BOOT_MEM_RESERVED 3
#define BOOT_MEM_INIT_RAM 4
+#define BOOT_MEM_NOMAP 5
/*
* A memory map that's built upon what was determined
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 845fbbc7a2e3..29997e42480e 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -102,9 +102,6 @@ static inline void set_io_port_base(unsigned long base)
#define iobarrier_w() wmb()
#define iobarrier_sync() iob()
-/* Some callers use this older API instead. */
-#define mmiowb() iobarrier_w()
-
/*
* virt_to_phys - map virtual addresses to physical
* @address: address to remap
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index e4456e450f94..3185fd3220ec 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -11,6 +11,7 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
+#include <asm/isa-rev.h>
#define JUMP_LABEL_NOP_SIZE 4
@@ -21,15 +22,20 @@
#endif
#ifdef CONFIG_CPU_MICROMIPS
-#define B_INSN "b32"
+# define B_INSN "b32"
+# define J_INSN "j32"
+#elif MIPS_ISA_REV >= 6
+# define B_INSN "bc"
+# define J_INSN "bc"
#else
-#define B_INSN "b"
+# define B_INSN "b"
+# define J_INSN "j"
#endif
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\t" B_INSN " 2f\n\t"
- "2:\tnop\n\t"
+ "2:\t.insn\n\t"
".pushsection __jump_table, \"aw\"\n\t"
WORD_INSN " 1b, %l[l_yes], %0\n\t"
".popsection\n\t"
@@ -42,8 +48,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:\tj %l[l_yes]\n\t"
- "nop\n\t"
+ asm_volatile_goto("1:\t" J_INSN " %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
WORD_INSN " 1b, %l[l_yes], %0\n\t"
".popsection\n\t"
diff --git a/arch/mips/include/asm/mach-ip27/topology.h b/arch/mips/include/asm/mach-ip27/topology.h
index 42ea1313626c..965f0793a5f9 100644
--- a/arch/mips/include/asm/mach-ip27/topology.h
+++ b/arch/mips/include/asm/mach-ip27/topology.h
@@ -7,18 +7,9 @@
#include <asm/mmzone.h>
struct cpuinfo_ip27 {
-// cpuid_t p_cpuid; /* PROM assigned cpuid */
cnodeid_t p_nodeid; /* my node ID in compact-id-space */
nasid_t p_nasid; /* my node ID in numa-as-id-space */
unsigned char p_slice; /* Physical position on node board */
-#if 0
- unsigned long loops_per_sec;
- unsigned long ipi_count;
- unsigned long irq_attempt[NR_IRQS];
- unsigned long smp_local_irq_count;
- unsigned long prof_multiplier;
- unsigned long prof_counter;
-#endif
};
extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS];
@@ -30,7 +21,7 @@ extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS];
struct pci_bus;
extern int pcibus_to_node(struct pci_bus *);
-#define cpumask_of_pcibus(bus) (cpu_online_mask)
+#define cpumask_of_pcibus(bus) (cpumask_of_node(pcibus_to_node(bus)))
extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
diff --git a/arch/mips/include/asm/mmiowb.h b/arch/mips/include/asm/mmiowb.h
new file mode 100644
index 000000000000..a40824e3ef8e
--- /dev/null
+++ b/arch/mips/include/asm/mmiowb.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_MMIOWB_H
+#define _ASM_MMIOWB_H
+
+#include <asm/io.h>
+
+#define mmiowb() iobarrier_w()
+
+#include <asm-generic/mmiowb.h>
+
+#endif /* _ASM_MMIOWB_H */
diff --git a/arch/mips/include/asm/pci/bridge.h b/arch/mips/include/asm/pci/bridge.h
index 23574c27eb40..a92cd30b48c9 100644
--- a/arch/mips/include/asm/pci/bridge.h
+++ b/arch/mips/include/asm/pci/bridge.h
@@ -801,15 +801,13 @@ struct bridge_err_cmdword {
#define PCI64_ATTR_RMF_SHFT 48
struct bridge_controller {
- struct pci_controller pc;
- struct resource mem;
- struct resource io;
struct resource busn;
struct bridge_regs *base;
- nasid_t nasid;
- unsigned int widget_id;
- u64 baddr;
+ unsigned long baddr;
+ unsigned long intr_addr;
+ struct irq_domain *domain;
unsigned int pci_int[8];
+ nasid_t nasid;
};
#define BRIDGE_CONTROLLER(bus) \
@@ -822,8 +820,4 @@ struct bridge_controller {
#define bridge_clr(bc, reg, val) \
__raw_writel(__raw_readl(&bc->base->reg) & ~(val), &bc->base->reg)
-extern int request_bridge_irq(struct bridge_controller *bc, int pin);
-
-extern struct pci_ops bridge_pci_ops;
-
#endif /* _ASM_PCI_BRIDGE_H */
diff --git a/arch/mips/include/asm/sn/irq_alloc.h b/arch/mips/include/asm/sn/irq_alloc.h
new file mode 100644
index 000000000000..09b89cecff56
--- /dev/null
+++ b/arch/mips/include/asm/sn/irq_alloc.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_SN_IRQ_ALLOC_H
+#define __ASM_SN_IRQ_ALLOC_H
+
+struct irq_alloc_info {
+ void *ctrl;
+ nasid_t nasid;
+ int pin;
+};
+
+#endif /* __ASM_SN_IRQ_ALLOC_H */
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index ee81297d9117..8a88eb265516 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -11,6 +11,21 @@
#include <asm/processor.h>
#include <asm/qrwlock.h>
+
+#include <asm-generic/qspinlock_types.h>
+
+#define queued_spin_unlock queued_spin_unlock
+/**
+ * queued_spin_unlock - release a queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ */
+static inline void queued_spin_unlock(struct qspinlock *lock)
+{
+ /* This could be optimised with ARCH_HAS_MMIOWB */
+ mmiowb();
+ smp_store_release(&lock->locked, 0);
+}
+
#include <asm/qspinlock.h>
#endif /* _ASM_SPINLOCK_H */
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index a2b4748655df..acf80ae0a430 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -141,14 +141,14 @@ extern const unsigned long sys_call_table[];
extern const unsigned long sys32_call_table[];
extern const unsigned long sysn32_call_table[];
-static inline int syscall_get_arch(void)
+static inline int syscall_get_arch(struct task_struct *task)
{
int arch = AUDIT_ARCH_MIPS;
#ifdef CONFIG_64BIT
- if (!test_thread_flag(TIF_32BIT_REGS)) {
+ if (!test_tsk_thread_flag(task, TIF_32BIT_REGS)) {
arch |= __AUDIT_ARCH_64BIT;
/* N32 sets only TIF_32BIT_ADDR */
- if (test_thread_flag(TIF_32BIT_ADDR))
+ if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
arch |= __AUDIT_ARCH_CONVENTION_MIPS64_N32;
}
#endif
diff --git a/arch/mips/include/asm/tlb.h b/arch/mips/include/asm/tlb.h
index b6823b9e94da..90f3ad76d9e0 100644
--- a/arch/mips/include/asm/tlb.h
+++ b/arch/mips/include/asm/tlb.h
@@ -5,23 +5,6 @@
#include <asm/cpu-features.h>
#include <asm/mipsregs.h>
-/*
- * MIPS doesn't need any special per-pte or per-vma handling, except
- * we need to flush cache for area to be unmapped.
- */
-#define tlb_start_vma(tlb, vma) \
- do { \
- if (!tlb->fullmm) \
- flush_cache_range(vma, vma->vm_start, vma->vm_end); \
- } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
-
-/*
- * .. because we flush the whole mm when it fills up.
- */
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
#define _UNIQUE_ENTRYHI(base, idx) \
(((base) + ((idx) << (PAGE_SHIFT + 1))) | \
(cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0))
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index b1990dd75f27..f7effca791a5 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -86,14 +86,18 @@ Ip_u2u1(_ctcmsa);
Ip_u2u1s3(_daddiu);
Ip_u3u1u2(_daddu);
Ip_u1u2(_ddivu);
+Ip_u3u1u2(_ddivu_r6);
Ip_u1(_di);
Ip_u2u1msbu3(_dins);
Ip_u2u1msbu3(_dinsm);
Ip_u2u1msbu3(_dinsu);
Ip_u1u2(_divu);
+Ip_u3u1u2(_divu_r6);
Ip_u1u2u3(_dmfc0);
+Ip_u3u1u2(_dmodu);
Ip_u1u2u3(_dmtc0);
Ip_u1u2(_dmultu);
+Ip_u3u1u2(_dmulu);
Ip_u2u1u3(_drotr);
Ip_u2u1u3(_drotr32);
Ip_u2u1(_dsbh);
@@ -131,6 +135,7 @@ Ip_u1u2u3(_mfc0);
Ip_u1u2u3(_mfhc0);
Ip_u1(_mfhi);
Ip_u1(_mflo);
+Ip_u3u1u2(_modu);
Ip_u3u1u2(_movn);
Ip_u3u1u2(_movz);
Ip_u1u2u3(_mtc0);
@@ -139,6 +144,7 @@ Ip_u1(_mthi);
Ip_u1(_mtlo);
Ip_u3u1u2(_mul);
Ip_u1u2(_multu);
+Ip_u3u1u2(_mulu);
Ip_u3u1u2(_nor);
Ip_u3u1u2(_or);
Ip_u2u1u3(_ori);
@@ -149,6 +155,8 @@ Ip_u2s3u1(_sb);
Ip_u2s3u1(_sc);
Ip_u2s3u1(_scd);
Ip_u2s3u1(_sd);
+Ip_u3u1u2(_seleqz);
+Ip_u3u1u2(_selnez);
Ip_u2s3u1(_sh);
Ip_u2u1u3(_sll);
Ip_u3u2u1(_sllv);
diff --git a/arch/mips/include/asm/xtalk/xtalk.h b/arch/mips/include/asm/xtalk/xtalk.h
index 26d2ed1fa917..680e7efebbaf 100644
--- a/arch/mips/include/asm/xtalk/xtalk.h
+++ b/arch/mips/include/asm/xtalk/xtalk.h
@@ -47,15 +47,6 @@ typedef struct xtalk_piomap_s *xtalk_piomap_t;
#define XIO_PORT(x) ((xwidgetnum_t)(((x)&XIO_PORT_BITS) >> XIO_PORT_SHIFT))
#define XIO_PACK(p, o) ((((uint64_t)(p))<<XIO_PORT_SHIFT) | ((o)&XIO_ADDR_BITS))
-#ifdef CONFIG_PCI
-extern int bridge_probe(nasid_t nasid, int widget, int masterwid);
-#else
-static inline int bridge_probe(nasid_t nasid, int widget, int masterwid)
-{
- return 0;
-}
-#endif
-
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_XTALK_XTALK_H */
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 40fbb5dd66df..eaa3a80affdf 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -55,9 +55,9 @@ enum spec_op {
spec3_unused_op, spec4_unused_op, slt_op, sltu_op,
dadd_op, daddu_op, dsub_op, dsubu_op,
tge_op, tgeu_op, tlt_op, tltu_op,
- teq_op, spec5_unused_op, tne_op, spec6_unused_op,
- dsll_op, spec7_unused_op, dsrl_op, dsra_op,
- dsll32_op, spec8_unused_op, dsrl32_op, dsra32_op
+ teq_op, seleqz_op, tne_op, selnez_op,
+ dsll_op, spec5_unused_op, dsrl_op, dsra_op,
+ dsll32_op, spec6_unused_op, dsrl32_op, dsra32_op
};
/*
diff --git a/arch/mips/include/uapi/asm/sockios.h b/arch/mips/include/uapi/asm/sockios.h
index 5b40a88593fa..66f60234f290 100644
--- a/arch/mips/include/uapi/asm/sockios.h
+++ b/arch/mips/include/uapi/asm/sockios.h
@@ -21,7 +21,7 @@
#define SIOCSPGRP _IOW('s', 8, pid_t)
#define SIOCGPGRP _IOR('s', 9, pid_t)
-#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
-#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
+#define SIOCGSTAMP_OLD 0x8906 /* Get stamp (timeval) */
+#define SIOCGSTAMPNS_OLD 0x8907 /* Get stamp (timespec) */
#endif /* _ASM_SOCKIOS_H */
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index bada74af7641..c04b97aace4a 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -42,8 +42,8 @@ static inline void align_mod(const int align, const int mod)
: "n"(align), "n"(mod));
}
-static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
- const int align, const int mod)
+static __always_inline void mult_sh_align_mod(long *v1, long *v2, long *w,
+ const int align, const int mod)
{
unsigned long flags;
int m1, m2;
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index d5e335e6846a..6126b77d5a62 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -1973,6 +1973,14 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
panic("Unknown Ingenic Processor ID!");
break;
}
+
+ /*
+ * The config0 register in the Xburst CPUs with a processor ID of
+ * PRID_COMP_INGENIC_D0 report themselves as MIPS32r2 compatible,
+ * but they don't actually support this ISA.
+ */
+ if ((c->processor_id & PRID_COMP_MASK) == PRID_COMP_INGENIC_D0)
+ c->isa_level &= ~MIPS_CPU_ISA_M32R2;
}
static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index d7de8adcfcc8..5469d43b6966 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -58,15 +58,14 @@ resume_kernel:
local_irq_disable
lw t0, TI_PRE_COUNT($28)
bnez t0, restore_all
-need_resched:
LONG_L t0, TI_FLAGS($28)
andi t1, t0, _TIF_NEED_RESCHED
beqz t1, restore_all
LONG_L t0, PT_STATUS(sp) # Interrupts off?
andi t0, 1
beqz t0, restore_all
- jal preempt_schedule_irq
- b need_resched
+ PTR_LA ra, restore_all
+ j preempt_schedule_irq
#endif
FEXPORT(ret_from_kernel_thread)
diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
index ab943927f97a..662c8db9f45b 100644
--- a/arch/mips/kernel/jump_label.c
+++ b/arch/mips/kernel/jump_label.c
@@ -40,18 +40,38 @@ void arch_jump_label_transform(struct jump_entry *e,
{
union mips_instruction *insn_p;
union mips_instruction insn;
+ long offset;
insn_p = (union mips_instruction *)msk_isa16_mode(e->code);
- /* Jump only works within an aligned region its delay slot is in. */
- BUG_ON((e->target & ~J_RANGE_MASK) != ((e->code + 4) & ~J_RANGE_MASK));
-
/* Target must have the right alignment and ISA must be preserved. */
BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT);
if (type == JUMP_LABEL_JMP) {
- insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op;
- insn.j_format.target = e->target >> J_RANGE_SHIFT;
+ if (!IS_ENABLED(CONFIG_CPU_MICROMIPS) && MIPS_ISA_REV >= 6) {
+ offset = e->target - ((unsigned long)insn_p + 4);
+ offset >>= 2;
+
+ /*
+ * The branch offset must fit in the instruction's 26
+ * bit field.
+ */
+ WARN_ON((offset >= BIT(25)) ||
+ (offset < -(long)BIT(25)));
+
+ insn.j_format.opcode = bc6_op;
+ insn.j_format.target = offset;
+ } else {
+ /*
+ * Jump only works within an aligned region its delay
+ * slot is in.
+ */
+ WARN_ON((e->target & ~J_RANGE_MASK) !=
+ ((e->code + 4) & ~J_RANGE_MASK));
+
+ insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op;
+ insn.j_format.target = e->target >> J_RANGE_SHIFT;
+ }
} else {
insn.word = 0; /* nop */
}
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 413863508f6f..d67fb64e908c 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -64,17 +64,11 @@ struct mips_perf_event {
#define CNTR_EVEN 0x55555555
#define CNTR_ODD 0xaaaaaaaa
#define CNTR_ALL 0xffffffff
-#ifdef CONFIG_MIPS_MT_SMP
enum {
T = 0,
V = 1,
P = 2,
} range;
-#else
- #define T
- #define V
- #define P
-#endif
};
static struct mips_perf_event raw_event;
@@ -325,9 +319,7 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
{
struct perf_event *event = container_of(evt, struct perf_event, hw);
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
-#ifdef CONFIG_MIPS_MT_SMP
unsigned int range = evt->event_base >> 24;
-#endif /* CONFIG_MIPS_MT_SMP */
WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
@@ -336,21 +328,15 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
/* Make sure interrupt enabled. */
MIPS_PERFCTRL_IE;
-#ifdef CONFIG_CPU_BMIPS5000
- {
+ if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) {
/* enable the counter for the calling thread */
cpuc->saved_ctrl[idx] |=
(1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC;
- }
-#else
-#ifdef CONFIG_MIPS_MT_SMP
- if (range > V) {
+ } else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) {
/* The counter is processor wide. Set it up to count all TCs. */
pr_debug("Enabling perf counter for all TCs\n");
cpuc->saved_ctrl[idx] |= M_TC_EN_ALL;
- } else
-#endif /* CONFIG_MIPS_MT_SMP */
- {
+ } else {
unsigned int cpu, ctrl;
/*
@@ -365,7 +351,6 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
cpuc->saved_ctrl[idx] |= ctrl;
pr_debug("Enabling perf counter for CPU%d\n", cpu);
}
-#endif /* CONFIG_CPU_BMIPS5000 */
/*
* We do not actually let the counter run. Leave it until start().
*/
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 93b8e0b4332f..28bf01961bb2 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -41,13 +41,27 @@ char *mips_get_machine_name(void)
#ifdef CONFIG_USE_OF
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
- return add_memory_region(base, size, BOOT_MEM_RAM);
+ if (base >= PHYS_ADDR_MAX) {
+ pr_warn("Trying to add an invalid memory region, skipped\n");
+ return;
+ }
+
+ /* Truncate the passed memory region instead of type casting */
+ if (base + size - 1 >= PHYS_ADDR_MAX || base + size < base) {
+ pr_warn("Truncate memory region %llx @ %llx to size %llx\n",
+ size, base, PHYS_ADDR_MAX - base);
+ size = PHYS_ADDR_MAX - base;
+ }
+
+ add_memory_region(base, size, BOOT_MEM_RAM);
}
int __init early_init_dt_reserve_memory_arch(phys_addr_t base,
phys_addr_t size, bool nomap)
{
- add_memory_region(base, size, BOOT_MEM_RESERVED);
+ add_memory_region(base, size,
+ nomap ? BOOT_MEM_NOMAP : BOOT_MEM_RESERVED);
+
return 0;
}
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 3a62f80958e1..414b6e9c900b 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -1418,7 +1418,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
unsigned long args[6];
sd.nr = syscall;
- sd.arch = syscall_get_arch();
+ sd.arch = syscall_get_arch(current);
syscall_get_arguments(current, regs, args);
for (i = 0; i < 6; i++)
sd.args[i] = args[i];
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 8d1dc6c71173..ab349d2381c3 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -27,6 +27,7 @@
#include <linux/dma-contiguous.h>
#include <linux/decompress/generic.h>
#include <linux/of_fdt.h>
+#include <linux/of_reserved_mem.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
@@ -178,6 +179,7 @@ static bool __init __maybe_unused memory_region_available(phys_addr_t start,
in_ram = true;
break;
case BOOT_MEM_RESERVED:
+ case BOOT_MEM_NOMAP:
if ((start >= start_ && start < end_) ||
(start < start_ && start + size >= start_))
free = false;
@@ -213,6 +215,9 @@ static void __init print_memory_map(void)
case BOOT_MEM_RESERVED:
printk(KERN_CONT "(reserved)\n");
break;
+ case BOOT_MEM_NOMAP:
+ printk(KERN_CONT "(nomap)\n");
+ break;
default:
printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
break;
@@ -371,7 +376,6 @@ static void __init bootmem_init(void)
static void __init bootmem_init(void)
{
- unsigned long reserved_end;
phys_addr_t ramstart = PHYS_ADDR_MAX;
int i;
@@ -382,10 +386,10 @@ static void __init bootmem_init(void)
* will reserve the area used for the initrd.
*/
init_initrd();
- reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
- memblock_reserve(PHYS_OFFSET,
- (reserved_end << PAGE_SHIFT) - PHYS_OFFSET);
+ /* Reserve memory occupied by kernel. */
+ memblock_reserve(__pa_symbol(&_text),
+ __pa_symbol(&_end) - __pa_symbol(&_text));
/*
* max_low_pfn is not a number of pages. The number of pages
@@ -394,10 +398,7 @@ static void __init bootmem_init(void)
min_low_pfn = ~0UL;
max_low_pfn = 0;
- /*
- * Find the highest page frame number we have available
- * and the lowest used RAM address
- */
+ /* Find the highest and lowest page frame numbers we have available. */
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long start, end;
@@ -427,13 +428,6 @@ static void __init bootmem_init(void)
max_low_pfn = end;
if (start < min_low_pfn)
min_low_pfn = start;
- if (end <= reserved_end)
- continue;
-#ifdef CONFIG_BLK_DEV_INITRD
- /* Skip zones before initrd and initrd itself */
- if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
- continue;
-#endif
}
if (min_low_pfn >= max_low_pfn)
@@ -474,6 +468,7 @@ static void __init bootmem_init(void)
max_low_pfn = PFN_DOWN(HIGHMEM_START);
}
+ /* Install all valid RAM ranges to the memblock memory region */
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long start, end;
@@ -481,98 +476,38 @@ static void __init bootmem_init(void)
end = PFN_DOWN(boot_mem_map.map[i].addr
+ boot_mem_map.map[i].size);
- if (start <= min_low_pfn)
+ if (start < min_low_pfn)
start = min_low_pfn;
- if (start >= end)
- continue;
-
#ifndef CONFIG_HIGHMEM
+ /* Ignore highmem regions if highmem is unsupported */
if (end > max_low_pfn)
end = max_low_pfn;
-
- /*
- * ... finally, is the area going away?
- */
+#endif
if (end <= start)
continue;
-#endif
memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
- }
-
- /*
- * Register fully available low RAM pages with the bootmem allocator.
- */
- for (i = 0; i < boot_mem_map.nr_map; i++) {
- unsigned long start, end, size;
- start = PFN_UP(boot_mem_map.map[i].addr);
- end = PFN_DOWN(boot_mem_map.map[i].addr
- + boot_mem_map.map[i].size);
-
- /*
- * Reserve usable memory.
- */
+ /* Reserve any memory except the ordinary RAM ranges. */
switch (boot_mem_map.map[i].type) {
case BOOT_MEM_RAM:
break;
- case BOOT_MEM_INIT_RAM:
- memory_present(0, start, end);
- continue;
- default:
- /* Not usable memory */
- if (start > min_low_pfn && end < max_low_pfn)
- memblock_reserve(boot_mem_map.map[i].addr,
- boot_mem_map.map[i].size);
-
+ case BOOT_MEM_NOMAP: /* Discard the range from the system. */
+ memblock_remove(PFN_PHYS(start), PFN_PHYS(end - start));
continue;
+ default: /* Reserve the rest of the memory types at boot time */
+ memblock_reserve(PFN_PHYS(start), PFN_PHYS(end - start));
+ break;
}
/*
- * We are rounding up the start address of usable memory
- * and at the end of the usable range downwards.
+ * In any case the added to the memblock memory regions
+ * (highmem/lowmem, available/reserved, etc) are considered
+ * as present, so inform sparsemem about them.
*/
- if (start >= max_low_pfn)
- continue;
- if (start < reserved_end)
- start = reserved_end;
- if (end > max_low_pfn)
- end = max_low_pfn;
-
- /*
- * ... finally, is the area going away?
- */
- if (end <= start)
- continue;
- size = end - start;
-
- /* Register lowmem ranges */
memory_present(0, start, end);
}
-#ifdef CONFIG_RELOCATABLE
- /*
- * The kernel reserves all memory below its _end symbol as bootmem,
- * but the kernel may now be at a much higher address. The memory
- * between the original and new locations may be returned to the system.
- */
- if (__pa_symbol(_text) > __pa_symbol(VMLINUX_LOAD_ADDRESS)) {
- unsigned long offset;
- extern void show_kernel_relocation(const char *level);
-
- offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
- memblock_free(__pa_symbol(VMLINUX_LOAD_ADDRESS), offset);
-
-#if defined(CONFIG_DEBUG_KERNEL) && defined(CONFIG_DEBUG_INFO)
- /*
- * This information is necessary when debugging the kernel
- * But is a security vulnerability otherwise!
- */
- show_kernel_relocation(KERN_INFO);
-#endif
- }
-#endif
-
/*
* Reserve initrd memory if needed.
*/
@@ -781,7 +716,6 @@ static void __init request_crashkernel(struct resource *res)
*/
static void __init arch_mem_init(char **cmdline_p)
{
- struct memblock_region *reg;
extern void plat_mem_setup(void);
/*
@@ -809,6 +743,9 @@ static void __init arch_mem_init(char **cmdline_p)
arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
BOOT_MEM_INIT_RAM);
+ arch_mem_addpart(PFN_DOWN(__pa_symbol(&__bss_start)) << PAGE_SHIFT,
+ PFN_UP(__pa_symbol(&__bss_stop)) << PAGE_SHIFT,
+ BOOT_MEM_RAM);
pr_info("Determined physical RAM map:\n");
print_memory_map();
@@ -884,13 +821,16 @@ static void __init arch_mem_init(char **cmdline_p)
plat_swiotlb_setup();
dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
- /* Tell bootmem about cma reserved memblock section */
- for_each_memblock(reserved, reg)
- if (reg->size != 0)
- memblock_reserve(reg->base, reg->size);
- reserve_bootmem_region(__pa_symbol(&__nosave_begin),
- __pa_symbol(&__nosave_end)); /* Reserve for hibernation */
+ /* Reserve for hibernation. */
+ memblock_reserve(__pa_symbol(&__nosave_begin),
+ __pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin));
+
+ fdt_init_reserved_mem();
+
+ memblock_dump_all();
+
+ early_memtest(PFN_PHYS(min_low_pfn), PFN_PHYS(max_low_pfn));
}
static void __init resource_init(void)
@@ -935,6 +875,7 @@ static void __init resource_init(void)
res->flags |= IORESOURCE_SYSRAM;
break;
case BOOT_MEM_RESERVED:
+ case BOOT_MEM_NOMAP:
default:
res->name = "reserved";
}
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
index 9392dfe33f97..0e2dd68ade57 100644
--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -366,3 +366,9 @@
425 n32 io_uring_setup sys_io_uring_setup
426 n32 io_uring_enter sys_io_uring_enter
427 n32 io_uring_register sys_io_uring_register
+428 n32 open_tree sys_open_tree
+429 n32 move_mount sys_move_mount
+430 n32 fsopen sys_fsopen
+431 n32 fsconfig sys_fsconfig
+432 n32 fsmount sys_fsmount
+433 n32 fspick sys_fspick
diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl
index cd0c8aa21fba..5eebfa0d155c 100644
--- a/arch/mips/kernel/syscalls/syscall_n64.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n64.tbl
@@ -342,3 +342,9 @@
425 n64 io_uring_setup sys_io_uring_setup
426 n64 io_uring_enter sys_io_uring_enter
427 n64 io_uring_register sys_io_uring_register
+428 n64 open_tree sys_open_tree
+429 n64 move_mount sys_move_mount
+430 n64 fsopen sys_fsopen
+431 n64 fsconfig sys_fsconfig
+432 n64 fsmount sys_fsmount
+433 n64 fspick sys_fspick
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
index e849e8ffe4a2..3cc1374e02d0 100644
--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -415,3 +415,9 @@
425 o32 io_uring_setup sys_io_uring_setup
426 o32 io_uring_enter sys_io_uring_enter
427 o32 io_uring_register sys_io_uring_register
+428 o32 open_tree sys_open_tree
+429 o32 move_mount sys_move_mount
+430 o32 fsopen sys_fsopen
+431 o32 fsconfig sys_fsconfig
+432 o32 fsmount sys_fsmount
+433 o32 fspick sys_fspick
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 98ca55d62201..c52766a5b85f 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2151,7 +2151,7 @@ static void configure_hwrena(void)
static void configure_exception_vector(void)
{
- if (cpu_has_veic || cpu_has_vint) {
+ if (cpu_has_mips_r2_r6) {
unsigned long sr = set_c0_status(ST0_BEV);
/* If available, use WG to set top bits of EBASE */
if (cpu_has_ebase_wg) {
@@ -2163,6 +2163,8 @@ static void configure_exception_vector(void)
}
write_c0_ebase(ebase);
write_c0_status(sr);
+ }
+ if (cpu_has_veic || cpu_has_vint) {
/* Setting vector spacing enables EI/VI mode */
change_c0_intctl(0x3e0, VECTORSPACING);
}
@@ -2193,22 +2195,6 @@ void per_cpu_trap_init(bool is_boot_cpu)
* o read IntCtl.IPFDC to determine the fast debug channel interrupt
*/
if (cpu_has_mips_r2_r6) {
- /*
- * We shouldn't trust a secondary core has a sane EBASE register
- * so use the one calculated by the boot CPU.
- */
- if (!is_boot_cpu) {
- /* If available, use WG to set top bits of EBASE */
- if (cpu_has_ebase_wg) {
-#ifdef CONFIG_64BIT
- write_c0_ebase_64(ebase | MIPS_EBASE_WG);
-#else
- write_c0_ebase(ebase | MIPS_EBASE_WG);
-#endif
- }
- write_c0_ebase(ebase);
- }
-
cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
@@ -2284,19 +2270,27 @@ void __init trap_init(void)
extern char except_vec3_generic;
extern char except_vec4;
extern char except_vec3_r4000;
- unsigned long i;
+ unsigned long i, vec_size;
+ phys_addr_t ebase_pa;
check_wait();
- if (cpu_has_veic || cpu_has_vint) {
- unsigned long size = 0x200 + VECTORSPACING*64;
- phys_addr_t ebase_pa;
+ if (!cpu_has_mips_r2_r6) {
+ ebase = CAC_BASE;
+ ebase_pa = virt_to_phys((void *)ebase);
+ vec_size = 0x400;
- ebase = (unsigned long)
- memblock_alloc(size, 1 << fls(size));
- if (!ebase)
+ memblock_reserve(ebase_pa, vec_size);
+ } else {
+ if (cpu_has_veic || cpu_has_vint)
+ vec_size = 0x200 + VECTORSPACING*64;
+ else
+ vec_size = PAGE_SIZE;
+
+ ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size));
+ if (!ebase_pa)
panic("%s: Failed to allocate %lu bytes align=0x%x\n",
- __func__, size, 1 << fls(size));
+ __func__, vec_size, 1 << fls(vec_size));
/*
* Try to ensure ebase resides in KSeg0 if possible.
@@ -2309,23 +2303,10 @@ void __init trap_init(void)
* EVA is special though as it allows segments to be rearranged
* and to become uncached during cache error handling.
*/
- ebase_pa = __pa(ebase);
if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
ebase = CKSEG0ADDR(ebase_pa);
- } else {
- ebase = CAC_BASE;
-
- if (cpu_has_mips_r2_r6) {
- if (cpu_has_ebase_wg) {
-#ifdef CONFIG_64BIT
- ebase = (read_c0_ebase_64() & ~0xfff);
-#else
- ebase = (read_c0_ebase() & ~0xfff);
-#endif
- } else {
- ebase += (read_c0_ebase() & 0x3ffff000);
- }
- }
+ else
+ ebase = (unsigned long)phys_to_virt(ebase_pa);
}
if (cpu_has_mmips) {
@@ -2459,7 +2440,7 @@ void __init trap_init(void)
else
set_handler(0x080, &except_vec3_generic, 0x80);
- local_flush_icache_range(ebase, ebase + 0x400);
+ local_flush_icache_range(ebase, ebase + vec_size);
sort_extable(__start___dbe_table, __stop___dbe_table);
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index 4528bc9c3cb1..eac25aef21e0 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -21,7 +21,6 @@ config KVM
depends on MIPS_FP_SUPPORT
select EXPORT_UASM
select PREEMPT_NOTIFIERS
- select ANON_INODES
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select HAVE_KVM_VCPU_ASYNC_IOCTL
select KVM_MMIO
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 0074427b04fb..e5de6bac8197 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -1141,9 +1141,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
unsigned long pc = vcpu->arch.pc;
int index;
- get_random_bytes(&index, sizeof(index));
- index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
-
+ index = prandom_u32_max(KVM_MIPS_GUEST_TLB_SIZE);
tlb = &vcpu->arch.guest_tlb[index];
kvm_mips_invalidate_guest_tlb(vcpu, tlb);
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
index 0d14e0d8eacf..4c2b4483683c 100644
--- a/arch/mips/mm/gup.c
+++ b/arch/mips/mm/gup.c
@@ -235,7 +235,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
* get_user_pages_fast() - pin user pages in memory
* @start: starting user address
* @nr_pages: number of pages from start to pin
- * @write: whether pages will be written to
+ * @gup_flags: flags modifying pin behaviour
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long.
*
@@ -247,8 +247,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
* requested. If nr_pages is 0 or negative, returns 0. If no pages
* were pinned, returns -errno.
*/
-int get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
+int get_user_pages_fast(unsigned long start, int nr_pages,
+ unsigned int gup_flags, struct page **pages)
{
struct mm_struct *mm = current->mm;
unsigned long addr, len, end;
@@ -273,7 +273,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
goto slow;
- if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
+ if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE,
+ pages, &nr))
goto slow;
} while (pgdp++, addr = next, addr != end);
local_irq_enable();
@@ -289,7 +290,7 @@ slow_irqon:
pages += nr;
ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT,
- pages, write ? FOLL_WRITE : 0);
+ pages, gup_flags);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index bbb196ad5f26..8a038b30d3c4 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -504,14 +504,6 @@ void free_init_pages(const char *what, unsigned long begin, unsigned long end)
printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
- "initrd");
-}
-#endif
-
void (*free_init_pages_eva)(void *begin, void *end) = NULL;
void __ref free_initmem(void)
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 6abe40fc413d..7154a1d99aad 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -76,14 +76,22 @@ static const struct insn insn_table[insn_invalid] = {
[insn_daddiu] = {M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
[insn_daddu] = {M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD},
[insn_ddivu] = {M(spec_op, 0, 0, 0, 0, ddivu_op), RS | RT},
+ [insn_ddivu_r6] = {M(spec_op, 0, 0, 0, ddivu_ddivu6_op, ddivu_op),
+ RS | RT | RD},
[insn_di] = {M(cop0_op, mfmc0_op, 0, 12, 0, 0), RT},
[insn_dins] = {M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE},
[insn_dinsm] = {M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE},
[insn_dinsu] = {M(spec3_op, 0, 0, 0, 0, dinsu_op), RS | RT | RD | RE},
[insn_divu] = {M(spec_op, 0, 0, 0, 0, divu_op), RS | RT},
+ [insn_divu_r6] = {M(spec_op, 0, 0, 0, divu_divu6_op, divu_op),
+ RS | RT | RD},
[insn_dmfc0] = {M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
+ [insn_dmodu] = {M(spec_op, 0, 0, 0, ddivu_dmodu_op, ddivu_op),
+ RS | RT | RD},
[insn_dmtc0] = {M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
[insn_dmultu] = {M(spec_op, 0, 0, 0, 0, dmultu_op), RS | RT},
+ [insn_dmulu] = {M(spec_op, 0, 0, 0, dmult_dmul_op, dmultu_op),
+ RS | RT | RD},
[insn_drotr] = {M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE},
[insn_drotr32] = {M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE},
[insn_dsbh] = {M(spec3_op, 0, 0, 0, dsbh_op, dbshfl_op), RT | RD},
@@ -132,12 +140,16 @@ static const struct insn insn_table[insn_invalid] = {
[insn_mfhc0] = {M(cop0_op, mfhc0_op, 0, 0, 0, 0), RT | RD | SET},
[insn_mfhi] = {M(spec_op, 0, 0, 0, 0, mfhi_op), RD},
[insn_mflo] = {M(spec_op, 0, 0, 0, 0, mflo_op), RD},
+ [insn_modu] = {M(spec_op, 0, 0, 0, divu_modu_op, divu_op),
+ RS | RT | RD},
[insn_movn] = {M(spec_op, 0, 0, 0, 0, movn_op), RS | RT | RD},
[insn_movz] = {M(spec_op, 0, 0, 0, 0, movz_op), RS | RT | RD},
[insn_mtc0] = {M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
[insn_mthc0] = {M(cop0_op, mthc0_op, 0, 0, 0, 0), RT | RD | SET},
[insn_mthi] = {M(spec_op, 0, 0, 0, 0, mthi_op), RS},
[insn_mtlo] = {M(spec_op, 0, 0, 0, 0, mtlo_op), RS},
+ [insn_mulu] = {M(spec_op, 0, 0, 0, multu_mulu_op, multu_op),
+ RS | RT | RD},
#ifndef CONFIG_CPU_MIPSR6
[insn_mul] = {M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
#else
@@ -163,6 +175,8 @@ static const struct insn insn_table[insn_invalid] = {
[insn_scd] = {M6(spec3_op, 0, 0, 0, scd6_op), RS | RT | SIMM9},
#endif
[insn_sd] = {M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
+ [insn_seleqz] = {M(spec_op, 0, 0, 0, 0, seleqz_op), RS | RT | RD},
+ [insn_selnez] = {M(spec_op, 0, 0, 0, 0, selnez_op), RS | RT | RD},
[insn_sh] = {M(sh_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
[insn_sll] = {M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE},
[insn_sllv] = {M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD},
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 45b6264ff308..c56f129c9a4b 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -50,21 +50,22 @@ enum opcode {
insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bgtz, insn_blez,
insn_bltz, insn_bltzl, insn_bne, insn_break, insn_cache, insn_cfc1,
insn_cfcmsa, insn_ctc1, insn_ctcmsa, insn_daddiu, insn_daddu, insn_ddivu,
- insn_di, insn_dins, insn_dinsm, insn_dinsu, insn_divu, insn_dmfc0,
- insn_dmtc0, insn_dmultu, insn_drotr, insn_drotr32, insn_dsbh, insn_dshd,
- insn_dsll, insn_dsll32, insn_dsllv, insn_dsra, insn_dsra32, insn_dsrav,
- insn_dsrl, insn_dsrl32, insn_dsrlv, insn_dsubu, insn_eret, insn_ext,
- insn_ins, insn_j, insn_jal, insn_jalr, insn_jr, insn_lb, insn_lbu,
- insn_ld, insn_lddir, insn_ldpte, insn_ldx, insn_lh, insn_lhu,
- insn_ll, insn_lld, insn_lui, insn_lw, insn_lwu, insn_lwx, insn_mfc0,
- insn_mfhc0, insn_mfhi, insn_mflo, insn_movn, insn_movz, insn_mtc0,
- insn_mthc0, insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_nor,
- insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sb,
- insn_sc, insn_scd, insn_sd, insn_sh, insn_sll, insn_sllv,
- insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra, insn_srav,
- insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall,
- insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh,
- insn_xor, insn_xori, insn_yield,
+ insn_ddivu_r6, insn_di, insn_dins, insn_dinsm, insn_dinsu, insn_divu,
+ insn_divu_r6, insn_dmfc0, insn_dmodu, insn_dmtc0, insn_dmultu,
+ insn_dmulu, insn_drotr, insn_drotr32, insn_dsbh, insn_dshd, insn_dsll,
+ insn_dsll32, insn_dsllv, insn_dsra, insn_dsra32, insn_dsrav, insn_dsrl,
+ insn_dsrl32, insn_dsrlv, insn_dsubu, insn_eret, insn_ext, insn_ins,
+ insn_j, insn_jal, insn_jalr, insn_jr, insn_lb, insn_lbu, insn_ld,
+ insn_lddir, insn_ldpte, insn_ldx, insn_lh, insn_lhu, insn_ll, insn_lld,
+ insn_lui, insn_lw, insn_lwu, insn_lwx, insn_mfc0, insn_mfhc0, insn_mfhi,
+ insn_mflo, insn_modu, insn_movn, insn_movz, insn_mtc0, insn_mthc0,
+ insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_mulu, insn_nor,
+ insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sb, insn_sc,
+ insn_scd, insn_seleqz, insn_selnez, insn_sd, insn_sh, insn_sll,
+ insn_sllv, insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra,
+ insn_srav, insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync,
+ insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait,
+ insn_wsbh, insn_xor, insn_xori, insn_yield,
insn_invalid /* insn_invalid must be last */
};
@@ -287,13 +288,17 @@ I_u2u1(_cfcmsa)
I_u1u2(_ctc1)
I_u2u1(_ctcmsa)
I_u1u2(_ddivu)
+I_u3u1u2(_ddivu_r6)
I_u1u2u3(_dmfc0)
+I_u3u1u2(_dmodu)
I_u1u2u3(_dmtc0)
I_u1u2(_dmultu)
+I_u3u1u2(_dmulu)
I_u2u1s3(_daddiu)
I_u3u1u2(_daddu)
I_u1(_di);
I_u1u2(_divu)
+I_u3u1u2(_divu_r6)
I_u2u1(_dsbh);
I_u2u1(_dshd);
I_u2u1u3(_dsll)
@@ -327,6 +332,7 @@ I_u2s3u1(_lw)
I_u2s3u1(_lwu)
I_u1u2u3(_mfc0)
I_u1u2u3(_mfhc0)
+I_u3u1u2(_modu)
I_u3u1u2(_movn)
I_u3u1u2(_movz)
I_u1(_mfhi)
@@ -337,6 +343,7 @@ I_u1(_mthi)
I_u1(_mtlo)
I_u3u1u2(_mul)
I_u1u2(_multu)
+I_u3u1u2(_mulu)
I_u3u1u2(_nor)
I_u3u1u2(_or)
I_u2u1u3(_ori)
@@ -345,6 +352,8 @@ I_u2s3u1(_sb)
I_u2s3u1(_sc)
I_u2s3u1(_scd)
I_u2s3u1(_sd)
+I_u3u1u2(_seleqz)
+I_u3u1u2(_selnez)
I_u2s3u1(_sh)
I_u2u1u3(_sll)
I_u3u2u1(_sllv)
diff --git a/arch/mips/net/Makefile b/arch/mips/net/Makefile
index 47d678416715..72a78462f872 100644
--- a/arch/mips/net/Makefile
+++ b/arch/mips/net/Makefile
@@ -1,4 +1,3 @@
# MIPS networking code
-obj-$(CONFIG_MIPS_CBPF_JIT) += bpf_jit.o bpf_jit_asm.o
obj-$(CONFIG_MIPS_EBPF_JIT) += ebpf_jit.o
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
deleted file mode 100644
index 3a0e34f4e615..000000000000
--- a/arch/mips/net/bpf_jit.c
+++ /dev/null
@@ -1,1270 +0,0 @@
-/*
- * Just-In-Time compiler for BPF filters on MIPS
- *
- * Copyright (c) 2014 Imagination Technologies Ltd.
- * Author: Markos Chandras <markos.chandras@imgtec.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; version 2 of the License.
- */
-
-#include <linux/bitops.h>
-#include <linux/compiler.h>
-#include <linux/errno.h>
-#include <linux/filter.h>
-#include <linux/if_vlan.h>
-#include <linux/moduleloader.h>
-#include <linux/netdevice.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <asm/asm.h>
-#include <asm/bitops.h>
-#include <asm/cacheflush.h>
-#include <asm/cpu-features.h>
-#include <asm/uasm.h>
-
-#include "bpf_jit.h"
-
-/* ABI
- * r_skb_hl SKB header length
- * r_data SKB data pointer
- * r_off Offset
- * r_A BPF register A
- * r_X BPF register X
- * r_skb *skb
- * r_M *scratch memory
- * r_skb_len SKB length
- *
- * On entry (*bpf_func)(*skb, *filter)
- * a0 = MIPS_R_A0 = skb;
- * a1 = MIPS_R_A1 = filter;
- *
- * Stack
- * ...
- * M[15]
- * M[14]
- * M[13]
- * ...
- * M[0] <-- r_M
- * saved reg k-1
- * saved reg k-2
- * ...
- * saved reg 0 <-- r_sp
- * <no argument area>
- *
- * Packet layout
- *
- * <--------------------- len ------------------------>
- * <--skb-len(r_skb_hl)-->< ----- skb->data_len ------>
- * ----------------------------------------------------
- * | skb->data |
- * ----------------------------------------------------
- */
-
-#define ptr typeof(unsigned long)
-
-#define SCRATCH_OFF(k) (4 * (k))
-
-/* JIT flags */
-#define SEEN_CALL (1 << BPF_MEMWORDS)
-#define SEEN_SREG_SFT (BPF_MEMWORDS + 1)
-#define SEEN_SREG_BASE (1 << SEEN_SREG_SFT)
-#define SEEN_SREG(x) (SEEN_SREG_BASE << (x))
-#define SEEN_OFF SEEN_SREG(2)
-#define SEEN_A SEEN_SREG(3)
-#define SEEN_X SEEN_SREG(4)
-#define SEEN_SKB SEEN_SREG(5)
-#define SEEN_MEM SEEN_SREG(6)
-/* SEEN_SK_DATA also implies skb_hl an skb_len */
-#define SEEN_SKB_DATA (SEEN_SREG(7) | SEEN_SREG(1) | SEEN_SREG(0))
-
-/* Arguments used by JIT */
-#define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */
-
-#define SBIT(x) (1 << (x)) /* Signed version of BIT() */
-
-/**
- * struct jit_ctx - JIT context
- * @skf: The sk_filter
- * @prologue_bytes: Number of bytes for prologue
- * @idx: Instruction index
- * @flags: JIT flags
- * @offsets: Instruction offsets
- * @target: Memory location for the compiled filter
- */
-struct jit_ctx {
- const struct bpf_prog *skf;
- unsigned int prologue_bytes;
- u32 idx;
- u32 flags;
- u32 *offsets;
- u32 *target;
-};
-
-
-static inline int optimize_div(u32 *k)
-{
- /* power of 2 divides can be implemented with right shift */
- if (!(*k & (*k-1))) {
- *k = ilog2(*k);
- return 1;
- }
-
- return 0;
-}
-
-static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx);
-
-/* Simply emit the instruction if the JIT memory space has been allocated */
-#define emit_instr(ctx, func, ...) \
-do { \
- if ((ctx)->target != NULL) { \
- u32 *p = &(ctx)->target[ctx->idx]; \
- uasm_i_##func(&p, ##__VA_ARGS__); \
- } \
- (ctx)->idx++; \
-} while (0)
-
-/*
- * Similar to emit_instr but it must be used when we need to emit
- * 32-bit or 64-bit instructions
- */
-#define emit_long_instr(ctx, func, ...) \
-do { \
- if ((ctx)->target != NULL) { \
- u32 *p = &(ctx)->target[ctx->idx]; \
- UASM_i_##func(&p, ##__VA_ARGS__); \
- } \
- (ctx)->idx++; \
-} while (0)
-
-/* Determine if immediate is within the 16-bit signed range */
-static inline bool is_range16(s32 imm)
-{
- return !(imm >= SBIT(15) || imm < -SBIT(15));
-}
-
-static inline void emit_addu(unsigned int dst, unsigned int src1,
- unsigned int src2, struct jit_ctx *ctx)
-{
- emit_instr(ctx, addu, dst, src1, src2);
-}
-
-static inline void emit_nop(struct jit_ctx *ctx)
-{
- emit_instr(ctx, nop);
-}
-
-/* Load a u32 immediate to a register */
-static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
-{
- if (ctx->target != NULL) {
- /* addiu can only handle s16 */
- if (!is_range16(imm)) {
- u32 *p = &ctx->target[ctx->idx];
- uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16);
- p = &ctx->target[ctx->idx + 1];
- uasm_i_ori(&p, dst, r_tmp_imm, imm & 0xffff);
- } else {
- u32 *p = &ctx->target[ctx->idx];
- uasm_i_addiu(&p, dst, r_zero, imm);
- }
- }
- ctx->idx++;
-
- if (!is_range16(imm))
- ctx->idx++;
-}
-
-static inline void emit_or(unsigned int dst, unsigned int src1,
- unsigned int src2, struct jit_ctx *ctx)
-{
- emit_instr(ctx, or, dst, src1, src2);
-}
-
-static inline void emit_ori(unsigned int dst, unsigned src, u32 imm,
- struct jit_ctx *ctx)
-{
- if (imm >= BIT(16)) {
- emit_load_imm(r_tmp, imm, ctx);
- emit_or(dst, src, r_tmp, ctx);
- } else {
- emit_instr(ctx, ori, dst, src, imm);
- }
-}
-
-static inline void emit_daddiu(unsigned int dst, unsigned int src,
- int imm, struct jit_ctx *ctx)
-{
- /*
- * Only used for stack, so the imm is relatively small
- * and it fits in 15-bits
- */
- emit_instr(ctx, daddiu, dst, src, imm);
-}
-
-static inline void emit_addiu(unsigned int dst, unsigned int src,
- u32 imm, struct jit_ctx *ctx)
-{
- if (!is_range16(imm)) {
- emit_load_imm(r_tmp, imm, ctx);
- emit_addu(dst, r_tmp, src, ctx);
- } else {
- emit_instr(ctx, addiu, dst, src, imm);
- }
-}
-
-static inline void emit_and(unsigned int dst, unsigned int src1,
- unsigned int src2, struct jit_ctx *ctx)
-{
- emit_instr(ctx, and, dst, src1, src2);
-}
-
-static inline void emit_andi(unsigned int dst, unsigned int src,
- u32 imm, struct jit_ctx *ctx)
-{
- /* If imm does not fit in u16 then load it to register */
- if (imm >= BIT(16)) {
- emit_load_imm(r_tmp, imm, ctx);
- emit_and(dst, src, r_tmp, ctx);
- } else {
- emit_instr(ctx, andi, dst, src, imm);
- }
-}
-
-static inline void emit_xor(unsigned int dst, unsigned int src1,
- unsigned int src2, struct jit_ctx *ctx)
-{
- emit_instr(ctx, xor, dst, src1, src2);
-}
-
-static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx)
-{
- /* If imm does not fit in u16 then load it to register */
- if (imm >= BIT(16)) {
- emit_load_imm(r_tmp, imm, ctx);
- emit_xor(dst, src, r_tmp, ctx);
- } else {
- emit_instr(ctx, xori, dst, src, imm);
- }
-}
-
-static inline void emit_stack_offset(int offset, struct jit_ctx *ctx)
-{
- emit_long_instr(ctx, ADDIU, r_sp, r_sp, offset);
-}
-
-static inline void emit_subu(unsigned int dst, unsigned int src1,
- unsigned int src2, struct jit_ctx *ctx)
-{
- emit_instr(ctx, subu, dst, src1, src2);
-}
-
-static inline void emit_neg(unsigned int reg, struct jit_ctx *ctx)
-{
- emit_subu(reg, r_zero, reg, ctx);
-}
-
-static inline void emit_sllv(unsigned int dst, unsigned int src,
- unsigned int sa, struct jit_ctx *ctx)
-{
- emit_instr(ctx, sllv, dst, src, sa);
-}
-
-static inline void emit_sll(unsigned int dst, unsigned int src,
- unsigned int sa, struct jit_ctx *ctx)
-{
- /* sa is 5-bits long */
- if (sa >= BIT(5))
- /* Shifting >= 32 results in zero */
- emit_jit_reg_move(dst, r_zero, ctx);
- else
- emit_instr(ctx, sll, dst, src, sa);
-}
-
-static inline void emit_srlv(unsigned int dst, unsigned int src,
- unsigned int sa, struct jit_ctx *ctx)
-{
- emit_instr(ctx, srlv, dst, src, sa);
-}
-
-static inline void emit_srl(unsigned int dst, unsigned int src,
- unsigned int sa, struct jit_ctx *ctx)
-{
- /* sa is 5-bits long */
- if (sa >= BIT(5))
- /* Shifting >= 32 results in zero */
- emit_jit_reg_move(dst, r_zero, ctx);
- else
- emit_instr(ctx, srl, dst, src, sa);
-}
-
-static inline void emit_slt(unsigned int dst, unsigned int src1,
- unsigned int src2, struct jit_ctx *ctx)
-{
- emit_instr(ctx, slt, dst, src1, src2);
-}
-
-static inline void emit_sltu(unsigned int dst, unsigned int src1,
- unsigned int src2, struct jit_ctx *ctx)
-{
- emit_instr(ctx, sltu, dst, src1, src2);
-}
-
-static inline void emit_sltiu(unsigned dst, unsigned int src,
- unsigned int imm, struct jit_ctx *ctx)
-{
- /* 16 bit immediate */
- if (!is_range16((s32)imm)) {
- emit_load_imm(r_tmp, imm, ctx);
- emit_sltu(dst, src, r_tmp, ctx);
- } else {
- emit_instr(ctx, sltiu, dst, src, imm);
- }
-
-}
-
-/* Store register on the stack */
-static inline void emit_store_stack_reg(ptr reg, ptr base,
- unsigned int offset,
- struct jit_ctx *ctx)
-{
- emit_long_instr(ctx, SW, reg, offset, base);
-}
-
-static inline void emit_store(ptr reg, ptr base, unsigned int offset,
- struct jit_ctx *ctx)
-{
- emit_instr(ctx, sw, reg, offset, base);
-}
-
-static inline void emit_load_stack_reg(ptr reg, ptr base,
- unsigned int offset,
- struct jit_ctx *ctx)
-{
- emit_long_instr(ctx, LW, reg, offset, base);
-}
-
-static inline void emit_load(unsigned int reg, unsigned int base,
- unsigned int offset, struct jit_ctx *ctx)
-{
- emit_instr(ctx, lw, reg, offset, base);
-}
-
-static inline void emit_load_byte(unsigned int reg, unsigned int base,
- unsigned int offset, struct jit_ctx *ctx)
-{
- emit_instr(ctx, lb, reg, offset, base);
-}
-
-static inline void emit_half_load(unsigned int reg, unsigned int base,
- unsigned int offset, struct jit_ctx *ctx)
-{
- emit_instr(ctx, lh, reg, offset, base);
-}
-
-static inline void emit_half_load_unsigned(unsigned int reg, unsigned int base,
- unsigned int offset, struct jit_ctx *ctx)
-{
- emit_instr(ctx, lhu, reg, offset, base);
-}
-
-static inline void emit_mul(unsigned int dst, unsigned int src1,
- unsigned int src2, struct jit_ctx *ctx)
-{
- emit_instr(ctx, mul, dst, src1, src2);
-}
-
-static inline void emit_div(unsigned int dst, unsigned int src,
- struct jit_ctx *ctx)
-{
- if (ctx->target != NULL) {
- u32 *p = &ctx->target[ctx->idx];
- uasm_i_divu(&p, dst, src);
- p = &ctx->target[ctx->idx + 1];
- uasm_i_mflo(&p, dst);
- }
- ctx->idx += 2; /* 2 insts */
-}
-
-static inline void emit_mod(unsigned int dst, unsigned int src,
- struct jit_ctx *ctx)
-{
- if (ctx->target != NULL) {
- u32 *p = &ctx->target[ctx->idx];
- uasm_i_divu(&p, dst, src);
- p = &ctx->target[ctx->idx + 1];
- uasm_i_mfhi(&p, dst);
- }
- ctx->idx += 2; /* 2 insts */
-}
-
-static inline void emit_dsll(unsigned int dst, unsigned int src,
- unsigned int sa, struct jit_ctx *ctx)
-{
- emit_instr(ctx, dsll, dst, src, sa);
-}
-
-static inline void emit_dsrl32(unsigned int dst, unsigned int src,
- unsigned int sa, struct jit_ctx *ctx)
-{
- emit_instr(ctx, dsrl32, dst, src, sa);
-}
-
-static inline void emit_wsbh(unsigned int dst, unsigned int src,
- struct jit_ctx *ctx)
-{
- emit_instr(ctx, wsbh, dst, src);
-}
-
-/* load pointer to register */
-static inline void emit_load_ptr(unsigned int dst, unsigned int src,
- int imm, struct jit_ctx *ctx)
-{
- /* src contains the base addr of the 32/64-pointer */
- emit_long_instr(ctx, LW, dst, imm, src);
-}
-
-/* load a function pointer to register */
-static inline void emit_load_func(unsigned int reg, ptr imm,
- struct jit_ctx *ctx)
-{
- if (IS_ENABLED(CONFIG_64BIT)) {
- /* At this point imm is always 64-bit */
- emit_load_imm(r_tmp, (u64)imm >> 32, ctx);
- emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
- emit_ori(r_tmp, r_tmp_imm, (imm >> 16) & 0xffff, ctx);
- emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
- emit_ori(reg, r_tmp_imm, imm & 0xffff, ctx);
- } else {
- emit_load_imm(reg, imm, ctx);
- }
-}
-
-/* Move to real MIPS register */
-static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
-{
- emit_long_instr(ctx, ADDU, dst, src, r_zero);
-}
-
-/* Move to JIT (32-bit) register */
-static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
-{
- emit_addu(dst, src, r_zero, ctx);
-}
-
-/* Compute the immediate value for PC-relative branches. */
-static inline u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
-{
- if (ctx->target == NULL)
- return 0;
-
- /*
- * We want a pc-relative branch. We only do forward branches
- * so tgt is always after pc. tgt is the instruction offset
- * we want to jump to.
-
- * Branch on MIPS:
- * I: target_offset <- sign_extend(offset)
- * I+1: PC += target_offset (delay slot)
- *
- * ctx->idx currently points to the branch instruction
- * but the offset is added to the delay slot so we need
- * to subtract 4.
- */
- return ctx->offsets[tgt] -
- (ctx->idx * 4 - ctx->prologue_bytes) - 4;
-}
-
-static inline void emit_bcond(int cond, unsigned int reg1, unsigned int reg2,
- unsigned int imm, struct jit_ctx *ctx)
-{
- if (ctx->target != NULL) {
- u32 *p = &ctx->target[ctx->idx];
-
- switch (cond) {
- case MIPS_COND_EQ:
- uasm_i_beq(&p, reg1, reg2, imm);
- break;
- case MIPS_COND_NE:
- uasm_i_bne(&p, reg1, reg2, imm);
- break;
- case MIPS_COND_ALL:
- uasm_i_b(&p, imm);
- break;
- default:
- pr_warn("%s: Unhandled branch conditional: %d\n",
- __func__, cond);
- }
- }
- ctx->idx++;
-}
-
-static inline void emit_b(unsigned int imm, struct jit_ctx *ctx)
-{
- emit_bcond(MIPS_COND_ALL, r_zero, r_zero, imm, ctx);
-}
-
-static inline void emit_jalr(unsigned int link, unsigned int reg,
- struct jit_ctx *ctx)
-{
- emit_instr(ctx, jalr, link, reg);
-}
-
-static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx)
-{
- emit_instr(ctx, jr, reg);
-}
-
-static inline u16 align_sp(unsigned int num)
-{
- /* Double word alignment for 32-bit, quadword for 64-bit */
- unsigned int align = IS_ENABLED(CONFIG_64BIT) ? 16 : 8;
- num = (num + (align - 1)) & -align;
- return num;
-}
-
-static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
-{
- int i = 0, real_off = 0;
- u32 sflags, tmp_flags;
-
- /* Adjust the stack pointer */
- if (offset)
- emit_stack_offset(-align_sp(offset), ctx);
-
- tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
- /* sflags is essentially a bitmap */
- while (tmp_flags) {
- if ((sflags >> i) & 0x1) {
- emit_store_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
- ctx);
- real_off += SZREG;
- }
- i++;
- tmp_flags >>= 1;
- }
-
- /* save return address */
- if (ctx->flags & SEEN_CALL) {
- emit_store_stack_reg(r_ra, r_sp, real_off, ctx);
- real_off += SZREG;
- }
-
- /* Setup r_M leaving the alignment gap if necessary */
- if (ctx->flags & SEEN_MEM) {
- if (real_off % (SZREG * 2))
- real_off += SZREG;
- emit_long_instr(ctx, ADDIU, r_M, r_sp, real_off);
- }
-}
-
-static void restore_bpf_jit_regs(struct jit_ctx *ctx,
- unsigned int offset)
-{
- int i, real_off = 0;
- u32 sflags, tmp_flags;
-
- tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
- /* sflags is a bitmap */
- i = 0;
- while (tmp_flags) {
- if ((sflags >> i) & 0x1) {
- emit_load_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
- ctx);
- real_off += SZREG;
- }
- i++;
- tmp_flags >>= 1;
- }
-
- /* restore return address */
- if (ctx->flags & SEEN_CALL)
- emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
-
- /* Restore the sp and discard the scrach memory */
- if (offset)
- emit_stack_offset(align_sp(offset), ctx);
-}
-
-static unsigned int get_stack_depth(struct jit_ctx *ctx)
-{
- int sp_off = 0;
-
-
- /* How may s* regs do we need to preserved? */
- sp_off += hweight32(ctx->flags >> SEEN_SREG_SFT) * SZREG;
-
- if (ctx->flags & SEEN_MEM)
- sp_off += 4 * BPF_MEMWORDS; /* BPF_MEMWORDS are 32-bit */
-
- if (ctx->flags & SEEN_CALL)
- sp_off += SZREG; /* Space for our ra register */
-
- return sp_off;
-}
-
-static void build_prologue(struct jit_ctx *ctx)
-{
- int sp_off;
-
- /* Calculate the total offset for the stack pointer */
- sp_off = get_stack_depth(ctx);
- save_bpf_jit_regs(ctx, sp_off);
-
- if (ctx->flags & SEEN_SKB)
- emit_reg_move(r_skb, MIPS_R_A0, ctx);
-
- if (ctx->flags & SEEN_SKB_DATA) {
- /* Load packet length */
- emit_load(r_skb_len, r_skb, offsetof(struct sk_buff, len),
- ctx);
- emit_load(r_tmp, r_skb, offsetof(struct sk_buff, data_len),
- ctx);
- /* Load the data pointer */
- emit_load_ptr(r_skb_data, r_skb,
- offsetof(struct sk_buff, data), ctx);
- /* Load the header length */
- emit_subu(r_skb_hl, r_skb_len, r_tmp, ctx);
- }
-
- if (ctx->flags & SEEN_X)
- emit_jit_reg_move(r_X, r_zero, ctx);
-
- /*
- * Do not leak kernel data to userspace, we only need to clear
- * r_A if it is ever used. In fact if it is never used, we
- * will not save/restore it, so clearing it in this case would
- * corrupt the state of the caller.
- */
- if (bpf_needs_clear_a(&ctx->skf->insns[0]) &&
- (ctx->flags & SEEN_A))
- emit_jit_reg_move(r_A, r_zero, ctx);
-}
-
-static void build_epilogue(struct jit_ctx *ctx)
-{
- unsigned int sp_off;
-
- /* Calculate the total offset for the stack pointer */
-
- sp_off = get_stack_depth(ctx);
- restore_bpf_jit_regs(ctx, sp_off);
-
- /* Return */
- emit_jr(r_ra, ctx);
- emit_nop(ctx);
-}
-
-#define CHOOSE_LOAD_FUNC(K, func) \
- ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
- func##_positive)
-
-static int build_body(struct jit_ctx *ctx)
-{
- const struct bpf_prog *prog = ctx->skf;
- const struct sock_filter *inst;
- unsigned int i, off, condt;
- u32 k, b_off __maybe_unused;
- u8 (*sk_load_func)(unsigned long *skb, int offset);
-
- for (i = 0; i < prog->len; i++) {
- u16 code;
-
- inst = &(prog->insns[i]);
- pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n",
- __func__, inst->code, inst->jt, inst->jf, inst->k);
- k = inst->k;
- code = bpf_anc_helper(inst);
-
- if (ctx->target == NULL)
- ctx->offsets[i] = ctx->idx * 4;
-
- switch (code) {
- case BPF_LD | BPF_IMM:
- /* A <- k ==> li r_A, k */
- ctx->flags |= SEEN_A;
- emit_load_imm(r_A, k, ctx);
- break;
- case BPF_LD | BPF_W | BPF_LEN:
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
- /* A <- len ==> lw r_A, offset(skb) */
- ctx->flags |= SEEN_SKB | SEEN_A;
- off = offsetof(struct sk_buff, len);
- emit_load(r_A, r_skb, off, ctx);
- break;
- case BPF_LD | BPF_MEM:
- /* A <- M[k] ==> lw r_A, offset(M) */
- ctx->flags |= SEEN_MEM | SEEN_A;
- emit_load(r_A, r_M, SCRATCH_OFF(k), ctx);
- break;
- case BPF_LD | BPF_W | BPF_ABS:
- /* A <- P[k:4] */
- sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_word);
- goto load;
- case BPF_LD | BPF_H | BPF_ABS:
- /* A <- P[k:2] */
- sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_half);
- goto load;
- case BPF_LD | BPF_B | BPF_ABS:
- /* A <- P[k:1] */
- sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_byte);
-load:
- emit_load_imm(r_off, k, ctx);
-load_common:
- ctx->flags |= SEEN_CALL | SEEN_OFF |
- SEEN_SKB | SEEN_A | SEEN_SKB_DATA;
-
- emit_load_func(r_s0, (ptr)sk_load_func, ctx);
- emit_reg_move(MIPS_R_A0, r_skb, ctx);
- emit_jalr(MIPS_R_RA, r_s0, ctx);
- /* Load second argument to delay slot */
- emit_reg_move(MIPS_R_A1, r_off, ctx);
- /* Check the error value */
- emit_bcond(MIPS_COND_EQ, r_ret, 0, b_imm(i + 1, ctx),
- ctx);
- /* Load return register on DS for failures */
- emit_reg_move(r_ret, r_zero, ctx);
- /* Return with error */
- emit_b(b_imm(prog->len, ctx), ctx);
- emit_nop(ctx);
- break;
- case BPF_LD | BPF_W | BPF_IND:
- /* A <- P[X + k:4] */
- sk_load_func = sk_load_word;
- goto load_ind;
- case BPF_LD | BPF_H | BPF_IND:
- /* A <- P[X + k:2] */
- sk_load_func = sk_load_half;
- goto load_ind;
- case BPF_LD | BPF_B | BPF_IND:
- /* A <- P[X + k:1] */
- sk_load_func = sk_load_byte;
-load_ind:
- ctx->flags |= SEEN_OFF | SEEN_X;
- emit_addiu(r_off, r_X, k, ctx);
- goto load_common;
- case BPF_LDX | BPF_IMM:
- /* X <- k */
- ctx->flags |= SEEN_X;
- emit_load_imm(r_X, k, ctx);
- break;
- case BPF_LDX | BPF_MEM:
- /* X <- M[k] */
- ctx->flags |= SEEN_X | SEEN_MEM;
- emit_load(r_X, r_M, SCRATCH_OFF(k), ctx);
- break;
- case BPF_LDX | BPF_W | BPF_LEN:
- /* X <- len */
- ctx->flags |= SEEN_X | SEEN_SKB;
- off = offsetof(struct sk_buff, len);
- emit_load(r_X, r_skb, off, ctx);
- break;
- case BPF_LDX | BPF_B | BPF_MSH:
- /* X <- 4 * (P[k:1] & 0xf) */
- ctx->flags |= SEEN_X | SEEN_CALL | SEEN_SKB;
- /* Load offset to a1 */
- emit_load_func(r_s0, (ptr)sk_load_byte, ctx);
- /*
- * This may emit two instructions so it may not fit
- * in the delay slot. So use a0 in the delay slot.
- */
- emit_load_imm(MIPS_R_A1, k, ctx);
- emit_jalr(MIPS_R_RA, r_s0, ctx);
- emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
- /* Check the error value */
- emit_bcond(MIPS_COND_NE, r_ret, 0,
- b_imm(prog->len, ctx), ctx);
- emit_reg_move(r_ret, r_zero, ctx);
- /* We are good */
- /* X <- P[1:K] & 0xf */
- emit_andi(r_X, r_A, 0xf, ctx);
- /* X << 2 */
- emit_b(b_imm(i + 1, ctx), ctx);
- emit_sll(r_X, r_X, 2, ctx); /* delay slot */
- break;
- case BPF_ST:
- /* M[k] <- A */
- ctx->flags |= SEEN_MEM | SEEN_A;
- emit_store(r_A, r_M, SCRATCH_OFF(k), ctx);
- break;
- case BPF_STX:
- /* M[k] <- X */
- ctx->flags |= SEEN_MEM | SEEN_X;
- emit_store(r_X, r_M, SCRATCH_OFF(k), ctx);
- break;
- case BPF_ALU | BPF_ADD | BPF_K:
- /* A += K */
- ctx->flags |= SEEN_A;
- emit_addiu(r_A, r_A, k, ctx);
- break;
- case BPF_ALU | BPF_ADD | BPF_X:
- /* A += X */
- ctx->flags |= SEEN_A | SEEN_X;
- emit_addu(r_A, r_A, r_X, ctx);
- break;
- case BPF_ALU | BPF_SUB | BPF_K:
- /* A -= K */
- ctx->flags |= SEEN_A;
- emit_addiu(r_A, r_A, -k, ctx);
- break;
- case BPF_ALU | BPF_SUB | BPF_X:
- /* A -= X */
- ctx->flags |= SEEN_A | SEEN_X;
- emit_subu(r_A, r_A, r_X, ctx);
- break;
- case BPF_ALU | BPF_MUL | BPF_K:
- /* A *= K */
- /* Load K to scratch register before MUL */
- ctx->flags |= SEEN_A;
- emit_load_imm(r_s0, k, ctx);
- emit_mul(r_A, r_A, r_s0, ctx);
- break;
- case BPF_ALU | BPF_MUL | BPF_X:
- /* A *= X */
- ctx->flags |= SEEN_A | SEEN_X;
- emit_mul(r_A, r_A, r_X, ctx);
- break;
- case BPF_ALU | BPF_DIV | BPF_K:
- /* A /= k */
- if (k == 1)
- break;
- if (optimize_div(&k)) {
- ctx->flags |= SEEN_A;
- emit_srl(r_A, r_A, k, ctx);
- break;
- }
- ctx->flags |= SEEN_A;
- emit_load_imm(r_s0, k, ctx);
- emit_div(r_A, r_s0, ctx);
- break;
- case BPF_ALU | BPF_MOD | BPF_K:
- /* A %= k */
- if (k == 1) {
- ctx->flags |= SEEN_A;
- emit_jit_reg_move(r_A, r_zero, ctx);
- } else {
- ctx->flags |= SEEN_A;
- emit_load_imm(r_s0, k, ctx);
- emit_mod(r_A, r_s0, ctx);
- }
- break;
- case BPF_ALU | BPF_DIV | BPF_X:
- /* A /= X */
- ctx->flags |= SEEN_X | SEEN_A;
- /* Check if r_X is zero */
- emit_bcond(MIPS_COND_EQ, r_X, r_zero,
- b_imm(prog->len, ctx), ctx);
- emit_load_imm(r_ret, 0, ctx); /* delay slot */
- emit_div(r_A, r_X, ctx);
- break;
- case BPF_ALU | BPF_MOD | BPF_X:
- /* A %= X */
- ctx->flags |= SEEN_X | SEEN_A;
- /* Check if r_X is zero */
- emit_bcond(MIPS_COND_EQ, r_X, r_zero,
- b_imm(prog->len, ctx), ctx);
- emit_load_imm(r_ret, 0, ctx); /* delay slot */
- emit_mod(r_A, r_X, ctx);
- break;
- case BPF_ALU | BPF_OR | BPF_K:
- /* A |= K */
- ctx->flags |= SEEN_A;
- emit_ori(r_A, r_A, k, ctx);
- break;
- case BPF_ALU | BPF_OR | BPF_X:
- /* A |= X */
- ctx->flags |= SEEN_A;
- emit_ori(r_A, r_A, r_X, ctx);
- break;
- case BPF_ALU | BPF_XOR | BPF_K:
- /* A ^= k */
- ctx->flags |= SEEN_A;
- emit_xori(r_A, r_A, k, ctx);
- break;
- case BPF_ANC | SKF_AD_ALU_XOR_X:
- case BPF_ALU | BPF_XOR | BPF_X:
- /* A ^= X */
- ctx->flags |= SEEN_A;
- emit_xor(r_A, r_A, r_X, ctx);
- break;
- case BPF_ALU | BPF_AND | BPF_K:
- /* A &= K */
- ctx->flags |= SEEN_A;
- emit_andi(r_A, r_A, k, ctx);
- break;
- case BPF_ALU | BPF_AND | BPF_X:
- /* A &= X */
- ctx->flags |= SEEN_A | SEEN_X;
- emit_and(r_A, r_A, r_X, ctx);
- break;
- case BPF_ALU | BPF_LSH | BPF_K:
- /* A <<= K */
- ctx->flags |= SEEN_A;
- emit_sll(r_A, r_A, k, ctx);
- break;
- case BPF_ALU | BPF_LSH | BPF_X:
- /* A <<= X */
- ctx->flags |= SEEN_A | SEEN_X;
- emit_sllv(r_A, r_A, r_X, ctx);
- break;
- case BPF_ALU | BPF_RSH | BPF_K:
- /* A >>= K */
- ctx->flags |= SEEN_A;
- emit_srl(r_A, r_A, k, ctx);
- break;
- case BPF_ALU | BPF_RSH | BPF_X:
- ctx->flags |= SEEN_A | SEEN_X;
- emit_srlv(r_A, r_A, r_X, ctx);
- break;
- case BPF_ALU | BPF_NEG:
- /* A = -A */
- ctx->flags |= SEEN_A;
- emit_neg(r_A, ctx);
- break;
- case BPF_JMP | BPF_JA:
- /* pc += K */
- emit_b(b_imm(i + k + 1, ctx), ctx);
- emit_nop(ctx);
- break;
- case BPF_JMP | BPF_JEQ | BPF_K:
- /* pc += ( A == K ) ? pc->jt : pc->jf */
- condt = MIPS_COND_EQ | MIPS_COND_K;
- goto jmp_cmp;
- case BPF_JMP | BPF_JEQ | BPF_X:
- ctx->flags |= SEEN_X;
- /* pc += ( A == X ) ? pc->jt : pc->jf */
- condt = MIPS_COND_EQ | MIPS_COND_X;
- goto jmp_cmp;
- case BPF_JMP | BPF_JGE | BPF_K:
- /* pc += ( A >= K ) ? pc->jt : pc->jf */
- condt = MIPS_COND_GE | MIPS_COND_K;
- goto jmp_cmp;
- case BPF_JMP | BPF_JGE | BPF_X:
- ctx->flags |= SEEN_X;
- /* pc += ( A >= X ) ? pc->jt : pc->jf */
- condt = MIPS_COND_GE | MIPS_COND_X;
- goto jmp_cmp;
- case BPF_JMP | BPF_JGT | BPF_K:
- /* pc += ( A > K ) ? pc->jt : pc->jf */
- condt = MIPS_COND_GT | MIPS_COND_K;
- goto jmp_cmp;
- case BPF_JMP | BPF_JGT | BPF_X:
- ctx->flags |= SEEN_X;
- /* pc += ( A > X ) ? pc->jt : pc->jf */
- condt = MIPS_COND_GT | MIPS_COND_X;
-jmp_cmp:
- /* Greater or Equal */
- if ((condt & MIPS_COND_GE) ||
- (condt & MIPS_COND_GT)) {
- if (condt & MIPS_COND_K) { /* K */
- ctx->flags |= SEEN_A;
- emit_sltiu(r_s0, r_A, k, ctx);
- } else { /* X */
- ctx->flags |= SEEN_A |
- SEEN_X;
- emit_sltu(r_s0, r_A, r_X, ctx);
- }
- /* A < (K|X) ? r_scrach = 1 */
- b_off = b_imm(i + inst->jf + 1, ctx);
- emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off,
- ctx);
- emit_nop(ctx);
- /* A > (K|X) ? scratch = 0 */
- if (condt & MIPS_COND_GT) {
- /* Checking for equality */
- ctx->flags |= SEEN_A | SEEN_X;
- if (condt & MIPS_COND_K)
- emit_load_imm(r_s0, k, ctx);
- else
- emit_jit_reg_move(r_s0, r_X,
- ctx);
- b_off = b_imm(i + inst->jf + 1, ctx);
- emit_bcond(MIPS_COND_EQ, r_A, r_s0,
- b_off, ctx);
- emit_nop(ctx);
- /* Finally, A > K|X */
- b_off = b_imm(i + inst->jt + 1, ctx);
- emit_b(b_off, ctx);
- emit_nop(ctx);
- } else {
- /* A >= (K|X) so jump */
- b_off = b_imm(i + inst->jt + 1, ctx);
- emit_b(b_off, ctx);
- emit_nop(ctx);
- }
- } else {
- /* A == K|X */
- if (condt & MIPS_COND_K) { /* K */
- ctx->flags |= SEEN_A;
- emit_load_imm(r_s0, k, ctx);
- /* jump true */
- b_off = b_imm(i + inst->jt + 1, ctx);
- emit_bcond(MIPS_COND_EQ, r_A, r_s0,
- b_off, ctx);
- emit_nop(ctx);
- /* jump false */
- b_off = b_imm(i + inst->jf + 1,
- ctx);
- emit_bcond(MIPS_COND_NE, r_A, r_s0,
- b_off, ctx);
- emit_nop(ctx);
- } else { /* X */
- /* jump true */
- ctx->flags |= SEEN_A | SEEN_X;
- b_off = b_imm(i + inst->jt + 1,
- ctx);
- emit_bcond(MIPS_COND_EQ, r_A, r_X,
- b_off, ctx);
- emit_nop(ctx);
- /* jump false */
- b_off = b_imm(i + inst->jf + 1, ctx);
- emit_bcond(MIPS_COND_NE, r_A, r_X,
- b_off, ctx);
- emit_nop(ctx);
- }
- }
- break;
- case BPF_JMP | BPF_JSET | BPF_K:
- ctx->flags |= SEEN_A;
- /* pc += (A & K) ? pc -> jt : pc -> jf */
- emit_load_imm(r_s1, k, ctx);
- emit_and(r_s0, r_A, r_s1, ctx);
- /* jump true */
- b_off = b_imm(i + inst->jt + 1, ctx);
- emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
- emit_nop(ctx);
- /* jump false */
- b_off = b_imm(i + inst->jf + 1, ctx);
- emit_b(b_off, ctx);
- emit_nop(ctx);
- break;
- case BPF_JMP | BPF_JSET | BPF_X:
- ctx->flags |= SEEN_X | SEEN_A;
- /* pc += (A & X) ? pc -> jt : pc -> jf */
- emit_and(r_s0, r_A, r_X, ctx);
- /* jump true */
- b_off = b_imm(i + inst->jt + 1, ctx);
- emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
- emit_nop(ctx);
- /* jump false */
- b_off = b_imm(i + inst->jf + 1, ctx);
- emit_b(b_off, ctx);
- emit_nop(ctx);
- break;
- case BPF_RET | BPF_A:
- ctx->flags |= SEEN_A;
- if (i != prog->len - 1)
- /*
- * If this is not the last instruction
- * then jump to the epilogue
- */
- emit_b(b_imm(prog->len, ctx), ctx);
- emit_reg_move(r_ret, r_A, ctx); /* delay slot */
- break;
- case BPF_RET | BPF_K:
- /*
- * It can emit two instructions so it does not fit on
- * the delay slot.
- */
- emit_load_imm(r_ret, k, ctx);
- if (i != prog->len - 1) {
- /*
- * If this is not the last instruction
- * then jump to the epilogue
- */
- emit_b(b_imm(prog->len, ctx), ctx);
- emit_nop(ctx);
- }
- break;
- case BPF_MISC | BPF_TAX:
- /* X = A */
- ctx->flags |= SEEN_X | SEEN_A;
- emit_jit_reg_move(r_X, r_A, ctx);
- break;
- case BPF_MISC | BPF_TXA:
- /* A = X */
- ctx->flags |= SEEN_A | SEEN_X;
- emit_jit_reg_move(r_A, r_X, ctx);
- break;
- /* AUX */
- case BPF_ANC | SKF_AD_PROTOCOL:
- /* A = ntohs(skb->protocol */
- ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A;
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
- protocol) != 2);
- off = offsetof(struct sk_buff, protocol);
- emit_half_load(r_A, r_skb, off, ctx);
-#ifdef CONFIG_CPU_LITTLE_ENDIAN
- /* This needs little endian fixup */
- if (cpu_has_wsbh) {
- /* R2 and later have the wsbh instruction */
- emit_wsbh(r_A, r_A, ctx);
- } else {
- /* Get first byte */
- emit_andi(r_tmp_imm, r_A, 0xff, ctx);
- /* Shift it */
- emit_sll(r_tmp, r_tmp_imm, 8, ctx);
- /* Get second byte */
- emit_srl(r_tmp_imm, r_A, 8, ctx);
- emit_andi(r_tmp_imm, r_tmp_imm, 0xff, ctx);
- /* Put everyting together in r_A */
- emit_or(r_A, r_tmp, r_tmp_imm, ctx);
- }
-#endif
- break;
- case BPF_ANC | SKF_AD_CPU:
- ctx->flags |= SEEN_A | SEEN_OFF;
- /* A = current_thread_info()->cpu */
- BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info,
- cpu) != 4);
- off = offsetof(struct thread_info, cpu);
- /* $28/gp points to the thread_info struct */
- emit_load(r_A, 28, off, ctx);
- break;
- case BPF_ANC | SKF_AD_IFINDEX:
- /* A = skb->dev->ifindex */
- case BPF_ANC | SKF_AD_HATYPE:
- /* A = skb->dev->type */
- ctx->flags |= SEEN_SKB | SEEN_A;
- off = offsetof(struct sk_buff, dev);
- /* Load *dev pointer */
- emit_load_ptr(r_s0, r_skb, off, ctx);
- /* error (0) in the delay slot */
- emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
- b_imm(prog->len, ctx), ctx);
- emit_reg_move(r_ret, r_zero, ctx);
- if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
- off = offsetof(struct net_device, ifindex);
- emit_load(r_A, r_s0, off, ctx);
- } else { /* (code == (BPF_ANC | SKF_AD_HATYPE) */
- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
- off = offsetof(struct net_device, type);
- emit_half_load_unsigned(r_A, r_s0, off, ctx);
- }
- break;
- case BPF_ANC | SKF_AD_MARK:
- ctx->flags |= SEEN_SKB | SEEN_A;
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
- off = offsetof(struct sk_buff, mark);
- emit_load(r_A, r_skb, off, ctx);
- break;
- case BPF_ANC | SKF_AD_RXHASH:
- ctx->flags |= SEEN_SKB | SEEN_A;
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
- off = offsetof(struct sk_buff, hash);
- emit_load(r_A, r_skb, off, ctx);
- break;
- case BPF_ANC | SKF_AD_VLAN_TAG:
- ctx->flags |= SEEN_SKB | SEEN_A;
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
- vlan_tci) != 2);
- off = offsetof(struct sk_buff, vlan_tci);
- emit_half_load_unsigned(r_A, r_skb, off, ctx);
- break;
- case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
- ctx->flags |= SEEN_SKB | SEEN_A;
- emit_load_byte(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET(), ctx);
- if (PKT_VLAN_PRESENT_BIT)
- emit_srl(r_A, r_A, PKT_VLAN_PRESENT_BIT, ctx);
- if (PKT_VLAN_PRESENT_BIT < 7)
- emit_andi(r_A, r_A, 1, ctx);
- break;
- case BPF_ANC | SKF_AD_PKTTYPE:
- ctx->flags |= SEEN_SKB;
-
- emit_load_byte(r_tmp, r_skb, PKT_TYPE_OFFSET(), ctx);
- /* Keep only the last 3 bits */
- emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx);
-#ifdef __BIG_ENDIAN_BITFIELD
- /* Get the actual packet type to the lower 3 bits */
- emit_srl(r_A, r_A, 5, ctx);
-#endif
- break;
- case BPF_ANC | SKF_AD_QUEUE:
- ctx->flags |= SEEN_SKB | SEEN_A;
- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
- queue_mapping) != 2);
- BUILD_BUG_ON(offsetof(struct sk_buff,
- queue_mapping) > 0xff);
- off = offsetof(struct sk_buff, queue_mapping);
- emit_half_load_unsigned(r_A, r_skb, off, ctx);
- break;
- default:
- pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
- inst->code);
- return -1;
- }
- }
-
- /* compute offsets only during the first pass */
- if (ctx->target == NULL)
- ctx->offsets[i] = ctx->idx * 4;
-
- return 0;
-}
-
-void bpf_jit_compile(struct bpf_prog *fp)
-{
- struct jit_ctx ctx;
- unsigned int alloc_size, tmp_idx;
-
- if (!bpf_jit_enable)
- return;
-
- memset(&ctx, 0, sizeof(ctx));
-
- ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
- if (ctx.offsets == NULL)
- return;
-
- ctx.skf = fp;
-
- if (build_body(&ctx))
- goto out;
-
- tmp_idx = ctx.idx;
- build_prologue(&ctx);
- ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
- /* just to complete the ctx.idx count */
- build_epilogue(&ctx);
-
- alloc_size = 4 * ctx.idx;
- ctx.target = module_alloc(alloc_size);
- if (ctx.target == NULL)
- goto out;
-
- /* Clean it */
- memset(ctx.target, 0, alloc_size);
-
- ctx.idx = 0;
-
- /* Generate the actual JIT code */
- build_prologue(&ctx);
- build_body(&ctx);
- build_epilogue(&ctx);
-
- /* Update the icache */
- flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx));
-
- if (bpf_jit_enable > 1)
- /* Dump JIT code */
- bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
-
- fp->bpf_func = (void *)ctx.target;
- fp->jited = 1;
-
-out:
- kfree(ctx.offsets);
-}
-
-void bpf_jit_free(struct bpf_prog *fp)
-{
- if (fp->jited)
- module_memfree(fp->bpf_func);
-
- bpf_prog_unlock_free(fp);
-}
diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S
deleted file mode 100644
index 57154c5883b6..000000000000
--- a/arch/mips/net/bpf_jit_asm.S
+++ /dev/null
@@ -1,285 +0,0 @@
-/*
- * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF
- * compiler.
- *
- * Copyright (C) 2015 Imagination Technologies Ltd.
- * Author: Markos Chandras <markos.chandras@imgtec.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; version 2 of the License.
- */
-
-#include <asm/asm.h>
-#include <asm/isa-rev.h>
-#include <asm/regdef.h>
-#include "bpf_jit.h"
-
-/* ABI
- *
- * r_skb_hl skb header length
- * r_skb_data skb data
- * r_off(a1) offset register
- * r_A BPF register A
- * r_X PF register X
- * r_skb(a0) *skb
- * r_M *scratch memory
- * r_skb_le skb length
- * r_s0 Scratch register 0
- * r_s1 Scratch register 1
- *
- * On entry:
- * a0: *skb
- * a1: offset (imm or imm + X)
- *
- * All non-BPF-ABI registers are free for use. On return, we only
- * care about r_ret. The BPF-ABI registers are assumed to remain
- * unmodified during the entire filter operation.
- */
-
-#define skb a0
-#define offset a1
-#define SKF_LL_OFF (-0x200000) /* Can't include linux/filter.h in assembly */
-
- /* We know better :) so prevent assembler reordering etc */
- .set noreorder
-
-#define is_offset_negative(TYPE) \
- /* If offset is negative we have more work to do */ \
- slti t0, offset, 0; \
- bgtz t0, bpf_slow_path_##TYPE##_neg; \
- /* Be careful what follows in DS. */
-
-#define is_offset_in_header(SIZE, TYPE) \
- /* Reading from header? */ \
- addiu $r_s0, $r_skb_hl, -SIZE; \
- slt t0, $r_s0, offset; \
- bgtz t0, bpf_slow_path_##TYPE; \
-
-LEAF(sk_load_word)
- is_offset_negative(word)
-FEXPORT(sk_load_word_positive)
- is_offset_in_header(4, word)
- /* Offset within header boundaries */
- PTR_ADDU t1, $r_skb_data, offset
- .set reorder
- lw $r_A, 0(t1)
- .set noreorder
-#ifdef CONFIG_CPU_LITTLE_ENDIAN
-# if MIPS_ISA_REV >= 2
- wsbh t0, $r_A
- rotr $r_A, t0, 16
-# else
- sll t0, $r_A, 24
- srl t1, $r_A, 24
- srl t2, $r_A, 8
- or t0, t0, t1
- andi t2, t2, 0xff00
- andi t1, $r_A, 0xff00
- or t0, t0, t2
- sll t1, t1, 8
- or $r_A, t0, t1
-# endif
-#endif
- jr $r_ra
- move $r_ret, zero
- END(sk_load_word)
-
-LEAF(sk_load_half)
- is_offset_negative(half)
-FEXPORT(sk_load_half_positive)
- is_offset_in_header(2, half)
- /* Offset within header boundaries */
- PTR_ADDU t1, $r_skb_data, offset
- lhu $r_A, 0(t1)
-#ifdef CONFIG_CPU_LITTLE_ENDIAN
-# if MIPS_ISA_REV >= 2
- wsbh $r_A, $r_A
-# else
- sll t0, $r_A, 8
- srl t1, $r_A, 8
- andi t0, t0, 0xff00
- or $r_A, t0, t1
-# endif
-#endif
- jr $r_ra
- move $r_ret, zero
- END(sk_load_half)
-
-LEAF(sk_load_byte)
- is_offset_negative(byte)
-FEXPORT(sk_load_byte_positive)
- is_offset_in_header(1, byte)
- /* Offset within header boundaries */
- PTR_ADDU t1, $r_skb_data, offset
- lbu $r_A, 0(t1)
- jr $r_ra
- move $r_ret, zero
- END(sk_load_byte)
-
-/*
- * call skb_copy_bits:
- * (prototype in linux/skbuff.h)
- *
- * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len)
- *
- * o32 mandates we leave 4 spaces for argument registers in case
- * the callee needs to use them. Even though we don't care about
- * the argument registers ourselves, we need to allocate that space
- * to remain ABI compliant since the callee may want to use that space.
- * We also allocate 2 more spaces for $r_ra and our return register (*to).
- *
- * n64 is a bit different. The *caller* will allocate the space to preserve
- * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no
- * good reason but it does not matter that much really.
- *
- * (void *to) is returned in r_s0
- *
- */
-#ifdef CONFIG_CPU_LITTLE_ENDIAN
-#define DS_OFFSET(SIZE) (4 * SZREG)
-#else
-#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE))
-#endif
-#define bpf_slow_path_common(SIZE) \
- /* Quick check. Are we within reasonable boundaries? */ \
- LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \
- sltu $r_s0, offset, $r_s1; \
- beqz $r_s0, fault; \
- /* Load 4th argument in DS */ \
- LONG_ADDIU a3, zero, SIZE; \
- PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
- PTR_LA t0, skb_copy_bits; \
- PTR_S $r_ra, (5 * SZREG)($r_sp); \
- /* Assign low slot to a2 */ \
- PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \
- jalr t0; \
- /* Reset our destination slot (DS but it's ok) */ \
- INT_S zero, (4 * SZREG)($r_sp); \
- /* \
- * skb_copy_bits returns 0 on success and -EFAULT \
- * on error. Our data live in a2. Do not bother with \
- * our data if an error has been returned. \
- */ \
- /* Restore our frame */ \
- PTR_L $r_ra, (5 * SZREG)($r_sp); \
- INT_L $r_s0, (4 * SZREG)($r_sp); \
- bltz v0, fault; \
- PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
- move $r_ret, zero; \
-
-NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
- bpf_slow_path_common(4)
-#ifdef CONFIG_CPU_LITTLE_ENDIAN
-# if MIPS_ISA_REV >= 2
- wsbh t0, $r_s0
- jr $r_ra
- rotr $r_A, t0, 16
-# else
- sll t0, $r_s0, 24
- srl t1, $r_s0, 24
- srl t2, $r_s0, 8
- or t0, t0, t1
- andi t2, t2, 0xff00
- andi t1, $r_s0, 0xff00
- or t0, t0, t2
- sll t1, t1, 8
- jr $r_ra
- or $r_A, t0, t1
-# endif
-#else
- jr $r_ra
- move $r_A, $r_s0
-#endif
-
- END(bpf_slow_path_word)
-
-NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
- bpf_slow_path_common(2)
-#ifdef CONFIG_CPU_LITTLE_ENDIAN
-# if MIPS_ISA_REV >= 2
- jr $r_ra
- wsbh $r_A, $r_s0
-# else
- sll t0, $r_s0, 8
- andi t1, $r_s0, 0xff00
- andi t0, t0, 0xff00
- srl t1, t1, 8
- jr $r_ra
- or $r_A, t0, t1
-# endif
-#else
- jr $r_ra
- move $r_A, $r_s0
-#endif
-
- END(bpf_slow_path_half)
-
-NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp)
- bpf_slow_path_common(1)
- jr $r_ra
- move $r_A, $r_s0
-
- END(bpf_slow_path_byte)
-
-/*
- * Negative entry points
- */
- .macro bpf_is_end_of_data
- li t0, SKF_LL_OFF
- /* Reading link layer data? */
- slt t1, offset, t0
- bgtz t1, fault
- /* Be careful what follows in DS. */
- .endm
-/*
- * call skb_copy_bits:
- * (prototype in linux/filter.h)
- *
- * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
- * int k, unsigned int size)
- *
- * see above (bpf_slow_path_common) for ABI restrictions
- */
-#define bpf_negative_common(SIZE) \
- PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
- PTR_LA t0, bpf_internal_load_pointer_neg_helper; \
- PTR_S $r_ra, (5 * SZREG)($r_sp); \
- jalr t0; \
- li a2, SIZE; \
- PTR_L $r_ra, (5 * SZREG)($r_sp); \
- /* Check return pointer */ \
- beqz v0, fault; \
- PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
- /* Preserve our pointer */ \
- move $r_s0, v0; \
- /* Set return value */ \
- move $r_ret, zero; \
-
-bpf_slow_path_word_neg:
- bpf_is_end_of_data
-NESTED(sk_load_word_negative, (6 * SZREG), $r_sp)
- bpf_negative_common(4)
- jr $r_ra
- lw $r_A, 0($r_s0)
- END(sk_load_word_negative)
-
-bpf_slow_path_half_neg:
- bpf_is_end_of_data
-NESTED(sk_load_half_negative, (6 * SZREG), $r_sp)
- bpf_negative_common(2)
- jr $r_ra
- lhu $r_A, 0($r_s0)
- END(sk_load_half_negative)
-
-bpf_slow_path_byte_neg:
- bpf_is_end_of_data
-NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp)
- bpf_negative_common(1)
- jr $r_ra
- lbu $r_A, 0($r_s0)
- END(sk_load_byte_negative)
-
-fault:
- jr $r_ra
- addiu $r_ret, zero, 1
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index 0effd3cba9a7..dfd5a4b1b779 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -22,6 +22,7 @@
#include <asm/byteorder.h>
#include <asm/cacheflush.h>
#include <asm/cpu-features.h>
+#include <asm/isa-rev.h>
#include <asm/uasm.h>
/* Registers used by JIT */
@@ -125,15 +126,21 @@ static enum reg_val_type get_reg_val_type(const struct jit_ctx *ctx,
}
/* Simply emit the instruction if the JIT memory space has been allocated */
-#define emit_instr(ctx, func, ...) \
-do { \
- if ((ctx)->target != NULL) { \
- u32 *p = &(ctx)->target[ctx->idx]; \
- uasm_i_##func(&p, ##__VA_ARGS__); \
- } \
- (ctx)->idx++; \
+#define emit_instr_long(ctx, func64, func32, ...) \
+do { \
+ if ((ctx)->target != NULL) { \
+ u32 *p = &(ctx)->target[ctx->idx]; \
+ if (IS_ENABLED(CONFIG_64BIT)) \
+ uasm_i_##func64(&p, ##__VA_ARGS__); \
+ else \
+ uasm_i_##func32(&p, ##__VA_ARGS__); \
+ } \
+ (ctx)->idx++; \
} while (0)
+#define emit_instr(ctx, func, ...) \
+ emit_instr_long(ctx, func, func, ##__VA_ARGS__)
+
static unsigned int j_target(struct jit_ctx *ctx, int target_idx)
{
unsigned long target_va, base_va;
@@ -186,8 +193,9 @@ enum which_ebpf_reg {
* separate frame pointer, so BPF_REG_10 relative accesses are
* adjusted to be $sp relative.
*/
-int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn,
- enum which_ebpf_reg w)
+static int ebpf_to_mips_reg(struct jit_ctx *ctx,
+ const struct bpf_insn *insn,
+ enum which_ebpf_reg w)
{
int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ?
insn->src_reg : insn->dst_reg;
@@ -273,17 +281,17 @@ static int gen_int_prologue(struct jit_ctx *ctx)
* If RA we are doing a function call and may need
* extra 8-byte tmp area.
*/
- stack_adjust += 16;
+ stack_adjust += 2 * sizeof(long);
if (ctx->flags & EBPF_SAVE_S0)
- stack_adjust += 8;
+ stack_adjust += sizeof(long);
if (ctx->flags & EBPF_SAVE_S1)
- stack_adjust += 8;
+ stack_adjust += sizeof(long);
if (ctx->flags & EBPF_SAVE_S2)
- stack_adjust += 8;
+ stack_adjust += sizeof(long);
if (ctx->flags & EBPF_SAVE_S3)
- stack_adjust += 8;
+ stack_adjust += sizeof(long);
if (ctx->flags & EBPF_SAVE_S4)
- stack_adjust += 8;
+ stack_adjust += sizeof(long);
BUILD_BUG_ON(MAX_BPF_STACK & 7);
locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0;
@@ -297,41 +305,49 @@ static int gen_int_prologue(struct jit_ctx *ctx)
* On tail call we skip this instruction, and the TCC is
* passed in $v1 from the caller.
*/
- emit_instr(ctx, daddiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT);
+ emit_instr(ctx, addiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT);
if (stack_adjust)
- emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack_adjust);
+ emit_instr_long(ctx, daddiu, addiu,
+ MIPS_R_SP, MIPS_R_SP, -stack_adjust);
else
return 0;
- store_offset = stack_adjust - 8;
+ store_offset = stack_adjust - sizeof(long);
if (ctx->flags & EBPF_SAVE_RA) {
- emit_instr(ctx, sd, MIPS_R_RA, store_offset, MIPS_R_SP);
- store_offset -= 8;
+ emit_instr_long(ctx, sd, sw,
+ MIPS_R_RA, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
}
if (ctx->flags & EBPF_SAVE_S0) {
- emit_instr(ctx, sd, MIPS_R_S0, store_offset, MIPS_R_SP);
- store_offset -= 8;
+ emit_instr_long(ctx, sd, sw,
+ MIPS_R_S0, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
}
if (ctx->flags & EBPF_SAVE_S1) {
- emit_instr(ctx, sd, MIPS_R_S1, store_offset, MIPS_R_SP);
- store_offset -= 8;
+ emit_instr_long(ctx, sd, sw,
+ MIPS_R_S1, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
}
if (ctx->flags & EBPF_SAVE_S2) {
- emit_instr(ctx, sd, MIPS_R_S2, store_offset, MIPS_R_SP);
- store_offset -= 8;
+ emit_instr_long(ctx, sd, sw,
+ MIPS_R_S2, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
}
if (ctx->flags & EBPF_SAVE_S3) {
- emit_instr(ctx, sd, MIPS_R_S3, store_offset, MIPS_R_SP);
- store_offset -= 8;
+ emit_instr_long(ctx, sd, sw,
+ MIPS_R_S3, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
}
if (ctx->flags & EBPF_SAVE_S4) {
- emit_instr(ctx, sd, MIPS_R_S4, store_offset, MIPS_R_SP);
- store_offset -= 8;
+ emit_instr_long(ctx, sd, sw,
+ MIPS_R_S4, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
}
if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1))
- emit_instr(ctx, daddu, MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO);
+ emit_instr_long(ctx, daddu, addu,
+ MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO);
return 0;
}
@@ -340,7 +356,7 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
{
const struct bpf_prog *prog = ctx->skf;
int stack_adjust = ctx->stack_size;
- int store_offset = stack_adjust - 8;
+ int store_offset = stack_adjust - sizeof(long);
enum reg_val_type td;
int r0 = MIPS_R_V0;
@@ -352,33 +368,40 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
}
if (ctx->flags & EBPF_SAVE_RA) {
- emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
- store_offset -= 8;
+ emit_instr_long(ctx, ld, lw,
+ MIPS_R_RA, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
}
if (ctx->flags & EBPF_SAVE_S0) {
- emit_instr(ctx, ld, MIPS_R_S0, store_offset, MIPS_R_SP);
- store_offset -= 8;
+ emit_instr_long(ctx, ld, lw,
+ MIPS_R_S0, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
}
if (ctx->flags & EBPF_SAVE_S1) {
- emit_instr(ctx, ld, MIPS_R_S1, store_offset, MIPS_R_SP);
- store_offset -= 8;
+ emit_instr_long(ctx, ld, lw,
+ MIPS_R_S1, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
}
if (ctx->flags & EBPF_SAVE_S2) {
- emit_instr(ctx, ld, MIPS_R_S2, store_offset, MIPS_R_SP);
- store_offset -= 8;
+ emit_instr_long(ctx, ld, lw,
+ MIPS_R_S2, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
}
if (ctx->flags & EBPF_SAVE_S3) {
- emit_instr(ctx, ld, MIPS_R_S3, store_offset, MIPS_R_SP);
- store_offset -= 8;
+ emit_instr_long(ctx, ld, lw,
+ MIPS_R_S3, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
}
if (ctx->flags & EBPF_SAVE_S4) {
- emit_instr(ctx, ld, MIPS_R_S4, store_offset, MIPS_R_SP);
- store_offset -= 8;
+ emit_instr_long(ctx, ld, lw,
+ MIPS_R_S4, store_offset, MIPS_R_SP);
+ store_offset -= sizeof(long);
}
emit_instr(ctx, jr, dest_reg);
if (stack_adjust)
- emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, stack_adjust);
+ emit_instr_long(ctx, daddiu, addiu,
+ MIPS_R_SP, MIPS_R_SP, stack_adjust);
else
emit_instr(ctx, nop);
@@ -645,6 +668,10 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
s64 t64s;
int bpf_op = BPF_OP(insn->code);
+ if (IS_ENABLED(CONFIG_32BIT) && ((BPF_CLASS(insn->code) == BPF_ALU64)
+ || (bpf_op == BPF_DW)))
+ return -EINVAL;
+
switch (insn->code) {
case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */
case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */
@@ -677,8 +704,12 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
if (insn->imm == 1) /* Mult by 1 is a nop */
break;
gen_imm_to_reg(insn, MIPS_R_AT, ctx);
- emit_instr(ctx, dmultu, MIPS_R_AT, dst);
- emit_instr(ctx, mflo, dst);
+ if (MIPS_ISA_REV >= 6) {
+ emit_instr(ctx, dmulu, dst, dst, MIPS_R_AT);
+ } else {
+ emit_instr(ctx, dmultu, MIPS_R_AT, dst);
+ emit_instr(ctx, mflo, dst);
+ }
break;
case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */
dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
@@ -700,8 +731,12 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
if (insn->imm == 1) /* Mult by 1 is a nop */
break;
gen_imm_to_reg(insn, MIPS_R_AT, ctx);
- emit_instr(ctx, multu, dst, MIPS_R_AT);
- emit_instr(ctx, mflo, dst);
+ if (MIPS_ISA_REV >= 6) {
+ emit_instr(ctx, mulu, dst, dst, MIPS_R_AT);
+ } else {
+ emit_instr(ctx, multu, dst, MIPS_R_AT);
+ emit_instr(ctx, mflo, dst);
+ }
break;
case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */
dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
@@ -732,6 +767,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
break;
}
gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ if (MIPS_ISA_REV >= 6) {
+ if (bpf_op == BPF_DIV)
+ emit_instr(ctx, divu_r6, dst, dst, MIPS_R_AT);
+ else
+ emit_instr(ctx, modu, dst, dst, MIPS_R_AT);
+ break;
+ }
emit_instr(ctx, divu, dst, MIPS_R_AT);
if (bpf_op == BPF_DIV)
emit_instr(ctx, mflo, dst);
@@ -754,6 +796,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
break;
}
gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ if (MIPS_ISA_REV >= 6) {
+ if (bpf_op == BPF_DIV)
+ emit_instr(ctx, ddivu_r6, dst, dst, MIPS_R_AT);
+ else
+ emit_instr(ctx, modu, dst, dst, MIPS_R_AT);
+ break;
+ }
emit_instr(ctx, ddivu, dst, MIPS_R_AT);
if (bpf_op == BPF_DIV)
emit_instr(ctx, mflo, dst);
@@ -819,11 +868,23 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
emit_instr(ctx, and, dst, dst, src);
break;
case BPF_MUL:
- emit_instr(ctx, dmultu, dst, src);
- emit_instr(ctx, mflo, dst);
+ if (MIPS_ISA_REV >= 6) {
+ emit_instr(ctx, dmulu, dst, dst, src);
+ } else {
+ emit_instr(ctx, dmultu, dst, src);
+ emit_instr(ctx, mflo, dst);
+ }
break;
case BPF_DIV:
case BPF_MOD:
+ if (MIPS_ISA_REV >= 6) {
+ if (bpf_op == BPF_DIV)
+ emit_instr(ctx, ddivu_r6,
+ dst, dst, src);
+ else
+ emit_instr(ctx, modu, dst, dst, src);
+ break;
+ }
emit_instr(ctx, ddivu, dst, src);
if (bpf_op == BPF_DIV)
emit_instr(ctx, mflo, dst);
@@ -903,6 +964,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
break;
case BPF_DIV:
case BPF_MOD:
+ if (MIPS_ISA_REV >= 6) {
+ if (bpf_op == BPF_DIV)
+ emit_instr(ctx, divu_r6, dst, dst, src);
+ else
+ emit_instr(ctx, modu, dst, dst, src);
+ break;
+ }
emit_instr(ctx, divu, dst, src);
if (bpf_op == BPF_DIV)
emit_instr(ctx, mflo, dst);
@@ -1006,8 +1074,15 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
emit_instr(ctx, dsubu, MIPS_R_T8, dst, src);
emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
/* SP known to be non-zero, movz becomes boolean not */
- emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8);
- emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8);
+ if (MIPS_ISA_REV >= 6) {
+ emit_instr(ctx, seleqz, MIPS_R_T9,
+ MIPS_R_SP, MIPS_R_T8);
+ } else {
+ emit_instr(ctx, movz, MIPS_R_T9,
+ MIPS_R_SP, MIPS_R_T8);
+ emit_instr(ctx, movn, MIPS_R_T9,
+ MIPS_R_ZERO, MIPS_R_T8);
+ }
emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT);
cmp_eq = bpf_op == BPF_JGT;
dst = MIPS_R_AT;
@@ -1234,7 +1309,7 @@ jeq_common:
case BPF_JMP | BPF_CALL:
ctx->flags |= EBPF_SAVE_RA;
- t64s = (s64)insn->imm + (s64)__bpf_call_base;
+ t64s = (s64)insn->imm + (long)__bpf_call_base;
emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s);
emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
/* delay slot */
@@ -1366,6 +1441,17 @@ jeq_common:
if (src < 0)
return src;
if (BPF_MODE(insn->code) == BPF_XADD) {
+ /*
+ * If mem_off does not fit within the 9 bit ll/sc
+ * instruction immediate field, use a temp reg.
+ */
+ if (MIPS_ISA_REV >= 6 &&
+ (mem_off >= BIT(8) || mem_off < -BIT(8))) {
+ emit_instr(ctx, daddiu, MIPS_R_T6,
+ dst, mem_off);
+ mem_off = 0;
+ dst = MIPS_R_T6;
+ }
switch (BPF_SIZE(insn->code)) {
case BPF_W:
if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
@@ -1720,7 +1806,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
unsigned int image_size;
u8 *image_ptr;
- if (!prog->jit_requested || !cpu_has_mips64r2)
+ if (!prog->jit_requested || MIPS_ISA_REV < 2)
return prog;
tmp = bpf_jit_blind_constants(prog);
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index c4f976593061..d6de4cb2e31c 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_PCI_AR2315) += pci-ar2315.o
obj-$(CONFIG_SOC_AR71XX) += pci-ar71xx.o
obj-$(CONFIG_PCI_AR724X) += pci-ar724x.o
obj-$(CONFIG_MIPS_PCI_VIRTIO) += pci-virtio-guest.o
+obj-$(CONFIG_PCI_XTALK_BRIDGE) += pci-xtalk-bridge.o
#
# These are still pretty much in the old state, watch, go blind.
#
@@ -39,7 +40,7 @@ obj-$(CONFIG_MIPS_MALTA) += fixup-malta.o pci-malta.o
obj-$(CONFIG_PMC_MSP7120_GW) += fixup-pmcmsp.o ops-pmcmsp.o
obj-$(CONFIG_PMC_MSP7120_EVAL) += fixup-pmcmsp.o ops-pmcmsp.o
obj-$(CONFIG_PMC_MSP7120_FPGA) += fixup-pmcmsp.o ops-pmcmsp.o
-obj-$(CONFIG_SGI_IP27) += ops-bridge.o pci-ip27.o
+obj-$(CONFIG_SGI_IP27) += pci-ip27.o
obj-$(CONFIG_SGI_IP32) += fixup-ip32.o ops-mace.o pci-ip32.o
obj-$(CONFIG_SIBYTE_SB1250) += fixup-sb1250.o pci-sb1250.o
obj-$(CONFIG_SIBYTE_BCM112X) += fixup-sb1250.o pci-sb1250.o
diff --git a/arch/mips/pci/ops-bridge.c b/arch/mips/pci/ops-bridge.c
deleted file mode 100644
index df95b0da08f2..000000000000
--- a/arch/mips/pci/ops-bridge.c
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1999, 2000, 04, 06 Ralf Baechle (ralf@linux-mips.org)
- * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- */
-#include <linux/pci.h>
-#include <asm/paccess.h>
-#include <asm/pci/bridge.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/sn0/hub.h>
-
-/*
- * Most of the IOC3 PCI config register aren't present
- * we emulate what is needed for a normal PCI enumeration
- */
-static u32 emulate_ioc3_cfg(int where, int size)
-{
- if (size == 1 && where == 0x3d)
- return 0x01;
- else if (size == 2 && where == 0x3c)
- return 0x0100;
- else if (size == 4 && where == 0x3c)
- return 0x00000100;
-
- return 0;
-}
-
-/*
- * The Bridge ASIC supports both type 0 and type 1 access. Type 1 is
- * not really documented, so right now I can't write code which uses it.
- * Therefore we use type 0 accesses for now even though they won't work
- * correctly for PCI-to-PCI bridges.
- *
- * The function is complicated by the ultimate brokenness of the IOC3 chip
- * which is used in SGI systems. The IOC3 can only handle 32-bit PCI
- * accesses and does only decode parts of it's address space.
- */
-
-static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 * value)
-{
- struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
- struct bridge_regs *bridge = bc->base;
- int slot = PCI_SLOT(devfn);
- int fn = PCI_FUNC(devfn);
- volatile void *addr;
- u32 cf, shift, mask;
- int res;
-
- addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[PCI_VENDOR_ID];
- if (get_dbe(cf, (u32 *) addr))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- /*
- * IOC3 is broken beyond belief ... Don't even give the
- * generic PCI code a chance to look at it for real ...
- */
- if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
- goto is_ioc3;
-
- addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)];
-
- if (size == 1)
- res = get_dbe(*value, (u8 *) addr);
- else if (size == 2)
- res = get_dbe(*value, (u16 *) addr);
- else
- res = get_dbe(*value, (u32 *) addr);
-
- return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
-
-is_ioc3:
-
- /*
- * IOC3 special handling
- */
- if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) {
- *value = emulate_ioc3_cfg(where, size);
- return PCIBIOS_SUCCESSFUL;
- }
-
- addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
-
- if (get_dbe(cf, (u32 *) addr))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- shift = ((where & 3) << 3);
- mask = (0xffffffffU >> ((4 - size) << 3));
- *value = (cf >> shift) & mask;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int pci_conf1_read_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 * value)
-{
- struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
- struct bridge_regs *bridge = bc->base;
- int busno = bus->number;
- int slot = PCI_SLOT(devfn);
- int fn = PCI_FUNC(devfn);
- volatile void *addr;
- u32 cf, shift, mask;
- int res;
-
- bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
- addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID];
- if (get_dbe(cf, (u32 *) addr))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- /*
- * IOC3 is broken beyond belief ... Don't even give the
- * generic PCI code a chance to look at it for real ...
- */
- if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
- goto is_ioc3;
-
- bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
- addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))];
-
- if (size == 1)
- res = get_dbe(*value, (u8 *) addr);
- else if (size == 2)
- res = get_dbe(*value, (u16 *) addr);
- else
- res = get_dbe(*value, (u32 *) addr);
-
- return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
-
-is_ioc3:
-
- /*
- * IOC3 special handling
- */
- if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) {
- *value = emulate_ioc3_cfg(where, size);
- return PCIBIOS_SUCCESSFUL;
- }
-
- bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
- addr = &bridge->b_type1_cfg.c[(fn << 8) | where];
-
- if (get_dbe(cf, (u32 *) addr))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- shift = ((where & 3) << 3);
- mask = (0xffffffffU >> ((4 - size) << 3));
- *value = (cf >> shift) & mask;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int pci_read_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 * value)
-{
- if (!pci_is_root_bus(bus))
- return pci_conf1_read_config(bus, devfn, where, size, value);
-
- return pci_conf0_read_config(bus, devfn, where, size, value);
-}
-
-static int pci_conf0_write_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 value)
-{
- struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
- struct bridge_regs *bridge = bc->base;
- int slot = PCI_SLOT(devfn);
- int fn = PCI_FUNC(devfn);
- volatile void *addr;
- u32 cf, shift, mask, smask;
- int res;
-
- addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[PCI_VENDOR_ID];
- if (get_dbe(cf, (u32 *) addr))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- /*
- * IOC3 is broken beyond belief ... Don't even give the
- * generic PCI code a chance to look at it for real ...
- */
- if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
- goto is_ioc3;
-
- addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)];
-
- if (size == 1) {
- res = put_dbe(value, (u8 *) addr);
- } else if (size == 2) {
- res = put_dbe(value, (u16 *) addr);
- } else {
- res = put_dbe(value, (u32 *) addr);
- }
-
- if (res)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- return PCIBIOS_SUCCESSFUL;
-
-is_ioc3:
-
- /*
- * IOC3 special handling
- */
- if ((where >= 0x14 && where < 0x40) || (where >= 0x48))
- return PCIBIOS_SUCCESSFUL;
-
- addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
-
- if (get_dbe(cf, (u32 *) addr))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- shift = ((where & 3) << 3);
- mask = (0xffffffffU >> ((4 - size) << 3));
- smask = mask << shift;
-
- cf = (cf & ~smask) | ((value & mask) << shift);
- if (put_dbe(cf, (u32 *) addr))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int pci_conf1_write_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 value)
-{
- struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
- struct bridge_regs *bridge = bc->base;
- int slot = PCI_SLOT(devfn);
- int fn = PCI_FUNC(devfn);
- int busno = bus->number;
- volatile void *addr;
- u32 cf, shift, mask, smask;
- int res;
-
- bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
- addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID];
- if (get_dbe(cf, (u32 *) addr))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- /*
- * IOC3 is broken beyond belief ... Don't even give the
- * generic PCI code a chance to look at it for real ...
- */
- if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
- goto is_ioc3;
-
- addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))];
-
- if (size == 1) {
- res = put_dbe(value, (u8 *) addr);
- } else if (size == 2) {
- res = put_dbe(value, (u16 *) addr);
- } else {
- res = put_dbe(value, (u32 *) addr);
- }
-
- if (res)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- return PCIBIOS_SUCCESSFUL;
-
-is_ioc3:
-
- /*
- * IOC3 special handling
- */
- if ((where >= 0x14 && where < 0x40) || (where >= 0x48))
- return PCIBIOS_SUCCESSFUL;
-
- addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
-
- if (get_dbe(cf, (u32 *) addr))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- shift = ((where & 3) << 3);
- mask = (0xffffffffU >> ((4 - size) << 3));
- smask = mask << shift;
-
- cf = (cf & ~smask) | ((value & mask) << shift);
- if (put_dbe(cf, (u32 *) addr))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int pci_write_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 value)
-{
- if (!pci_is_root_bus(bus))
- return pci_conf1_write_config(bus, devfn, where, size, value);
-
- return pci_conf0_write_config(bus, devfn, where, size, value);
-}
-
-struct pci_ops bridge_pci_ops = {
- .read = pci_read_config,
- .write = pci_write_config,
-};
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c
index 3c177b4d0609..441eb9383b20 100644
--- a/arch/mips/pci/pci-ip27.c
+++ b/arch/mips/pci/pci-ip27.c
@@ -7,162 +7,7 @@
* Copyright (C) 1999, 2000, 04 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/pci.h>
-#include <linux/smp.h>
-#include <linux/dma-direct.h>
-#include <asm/sn/arch.h>
#include <asm/pci/bridge.h>
-#include <asm/paccess.h>
-#include <asm/sn/intr.h>
-#include <asm/sn/sn0/hub.h>
-
-/*
- * Max #PCI busses we can handle; ie, max #PCI bridges.
- */
-#define MAX_PCI_BUSSES 40
-
-/*
- * XXX: No kmalloc available when we do our crosstalk scan,
- * we should try to move it later in the boot process.
- */
-static struct bridge_controller bridges[MAX_PCI_BUSSES];
-
-extern struct pci_ops bridge_pci_ops;
-
-int bridge_probe(nasid_t nasid, int widget_id, int masterwid)
-{
- unsigned long offset = NODE_OFFSET(nasid);
- struct bridge_controller *bc;
- static int num_bridges = 0;
- int slot;
-
- pci_set_flags(PCI_PROBE_ONLY);
-
- printk("a bridge\n");
-
- /* XXX: kludge alert.. */
- if (!num_bridges)
- ioport_resource.end = ~0UL;
-
- bc = &bridges[num_bridges];
-
- bc->pc.pci_ops = &bridge_pci_ops;
- bc->pc.mem_resource = &bc->mem;
- bc->pc.io_resource = &bc->io;
-
- bc->pc.index = num_bridges;
-
- bc->mem.name = "Bridge PCI MEM";
- bc->pc.mem_offset = offset;
- bc->mem.start = 0;
- bc->mem.end = ~0UL;
- bc->mem.flags = IORESOURCE_MEM;
-
- bc->io.name = "Bridge IO MEM";
- bc->pc.io_offset = offset;
- bc->io.start = 0UL;
- bc->io.end = ~0UL;
- bc->io.flags = IORESOURCE_IO;
-
- bc->widget_id = widget_id;
- bc->nasid = nasid;
-
- bc->baddr = (u64)masterwid << 60 | PCI64_ATTR_BAR;
-
- /*
- * point to this bridge
- */
- bc->base = (struct bridge_regs *)RAW_NODE_SWIN_BASE(nasid, widget_id);
-
- /*
- * Clear all pending interrupts.
- */
- bridge_write(bc, b_int_rst_stat, BRIDGE_IRR_ALL_CLR);
-
- /*
- * Until otherwise set up, assume all interrupts are from slot 0
- */
- bridge_write(bc, b_int_device, 0x0);
-
- /*
- * swap pio's to pci mem and io space (big windows)
- */
- bridge_set(bc, b_wid_control, BRIDGE_CTRL_IO_SWAP |
- BRIDGE_CTRL_MEM_SWAP);
-#ifdef CONFIG_PAGE_SIZE_4KB
- bridge_clr(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE);
-#else /* 16kB or larger */
- bridge_set(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE);
-#endif
-
- /*
- * Hmm... IRIX sets additional bits in the address which
- * are documented as reserved in the bridge docs.
- */
- bridge_write(bc, b_wid_int_upper, 0x8000 | (masterwid << 16));
- bridge_write(bc, b_wid_int_lower, 0x01800090); /* PI_INT_PEND_MOD off*/
- bridge_write(bc, b_dir_map, (masterwid << 20)); /* DMA */
- bridge_write(bc, b_int_enable, 0);
-
- for (slot = 0; slot < 8; slot ++) {
- bridge_set(bc, b_device[slot].reg, BRIDGE_DEV_SWAP_DIR);
- bc->pci_int[slot] = -1;
- }
- bridge_read(bc, b_wid_tflush); /* wait until Bridge PIO complete */
-
- register_pci_controller(&bc->pc);
-
- num_bridges++;
-
- return 0;
-}
-
-/*
- * All observed requests have pin == 1. We could have a global here, that
- * gets incremented and returned every time - unfortunately, pci_map_irq
- * may be called on the same device over and over, and need to return the
- * same value. On O2000, pin can be 0 or 1, and PCI slots can be [0..7].
- *
- * A given PCI device, in general, should be able to intr any of the cpus
- * on any one of the hubs connected to its xbow.
- */
-int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- return 0;
-}
-
-static inline struct pci_dev *bridge_root_dev(struct pci_dev *dev)
-{
- while (dev->bus->parent) {
- /* Move up the chain of bridges. */
- dev = dev->bus->self;
- }
-
- return dev;
-}
-
-/* Do platform specific device initialization at pci_enable_device() time */
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
- struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
- struct pci_dev *rdev = bridge_root_dev(dev);
- int slot = PCI_SLOT(rdev->devfn);
- int irq;
-
- irq = bc->pci_int[slot];
- if (irq == -1) {
- irq = request_bridge_irq(bc, slot);
- if (irq < 0)
- return irq;
-
- bc->pci_int[slot] = irq;
- }
- dev->irq = irq;
-
- return 0;
-}
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
{
@@ -177,29 +22,6 @@ phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
return dma_addr & ~(0xffUL << 56);
}
-/*
- * Device might live on a subordinate PCI bus. XXX Walk up the chain of buses
- * to find the slot number in sense of the bridge device register.
- * XXX This also means multiple devices might rely on conflicting bridge
- * settings.
- */
-
-static inline void pci_disable_swapping(struct pci_dev *dev)
-{
- struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
- struct bridge_regs *bridge = bc->base;
- int slot = PCI_SLOT(dev->devfn);
-
- /* Turn off byte swapping */
- bridge->b_device[slot].reg &= ~BRIDGE_DEV_SWAP_DIR;
- bridge->b_widget.w_tflush; /* Flush */
-}
-
-static void pci_fixup_ioc3(struct pci_dev *d)
-{
- pci_disable_swapping(d);
-}
-
#ifdef CONFIG_NUMA
int pcibus_to_node(struct pci_bus *bus)
{
@@ -209,6 +31,3 @@ int pcibus_to_node(struct pci_bus *bus)
}
EXPORT_SYMBOL(pcibus_to_node);
#endif /* CONFIG_NUMA */
-
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3,
- pci_fixup_ioc3);
diff --git a/arch/mips/pci/pci-xtalk-bridge.c b/arch/mips/pci/pci-xtalk-bridge.c
new file mode 100644
index 000000000000..bcf7f559789a
--- /dev/null
+++ b/arch/mips/pci/pci-xtalk-bridge.c
@@ -0,0 +1,610 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2003 Christoph Hellwig (hch@lst.de)
+ * Copyright (C) 1999, 2000, 04 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/smp.h>
+#include <linux/dma-direct.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/xtalk-bridge.h>
+
+#include <asm/pci/bridge.h>
+#include <asm/paccess.h>
+#include <asm/sn/irq_alloc.h>
+
+/*
+ * Most of the IOC3 PCI config register aren't present
+ * we emulate what is needed for a normal PCI enumeration
+ */
+static u32 emulate_ioc3_cfg(int where, int size)
+{
+ if (size == 1 && where == 0x3d)
+ return 0x01;
+ else if (size == 2 && where == 0x3c)
+ return 0x0100;
+ else if (size == 4 && where == 0x3c)
+ return 0x00000100;
+
+ return 0;
+}
+
+static void bridge_disable_swapping(struct pci_dev *dev)
+{
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
+ int slot = PCI_SLOT(dev->devfn);
+
+ /* Turn off byte swapping */
+ bridge_clr(bc, b_device[slot].reg, BRIDGE_DEV_SWAP_DIR);
+ bridge_read(bc, b_widget.w_tflush); /* Flush */
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3,
+ bridge_disable_swapping);
+
+
+/*
+ * The Bridge ASIC supports both type 0 and type 1 access. Type 1 is
+ * not really documented, so right now I can't write code which uses it.
+ * Therefore we use type 0 accesses for now even though they won't work
+ * correctly for PCI-to-PCI bridges.
+ *
+ * The function is complicated by the ultimate brokenness of the IOC3 chip
+ * which is used in SGI systems. The IOC3 can only handle 32-bit PCI
+ * accesses and does only decode parts of it's address space.
+ */
+static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
+ struct bridge_regs *bridge = bc->base;
+ int slot = PCI_SLOT(devfn);
+ int fn = PCI_FUNC(devfn);
+ void *addr;
+ u32 cf, shift, mask;
+ int res;
+
+ addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[PCI_VENDOR_ID];
+ if (get_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * IOC3 is broken beyond belief ... Don't even give the
+ * generic PCI code a chance to look at it for real ...
+ */
+ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
+ goto is_ioc3;
+
+ addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)];
+
+ if (size == 1)
+ res = get_dbe(*value, (u8 *)addr);
+ else if (size == 2)
+ res = get_dbe(*value, (u16 *)addr);
+ else
+ res = get_dbe(*value, (u32 *)addr);
+
+ return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
+
+is_ioc3:
+
+ /*
+ * IOC3 special handling
+ */
+ if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) {
+ *value = emulate_ioc3_cfg(where, size);
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
+ if (get_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ shift = ((where & 3) << 3);
+ mask = (0xffffffffU >> ((4 - size) << 3));
+ *value = (cf >> shift) & mask;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf1_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
+ struct bridge_regs *bridge = bc->base;
+ int busno = bus->number;
+ int slot = PCI_SLOT(devfn);
+ int fn = PCI_FUNC(devfn);
+ void *addr;
+ u32 cf, shift, mask;
+ int res;
+
+ bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
+ addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID];
+ if (get_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * IOC3 is broken beyond belief ... Don't even give the
+ * generic PCI code a chance to look at it for real ...
+ */
+ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
+ goto is_ioc3;
+
+ addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))];
+
+ if (size == 1)
+ res = get_dbe(*value, (u8 *)addr);
+ else if (size == 2)
+ res = get_dbe(*value, (u16 *)addr);
+ else
+ res = get_dbe(*value, (u32 *)addr);
+
+ return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
+
+is_ioc3:
+
+ /*
+ * IOC3 special handling
+ */
+ if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) {
+ *value = emulate_ioc3_cfg(where, size);
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ addr = &bridge->b_type1_cfg.c[(fn << 8) | where];
+ if (get_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ shift = ((where & 3) << 3);
+ mask = (0xffffffffU >> ((4 - size) << 3));
+ *value = (cf >> shift) & mask;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ if (!pci_is_root_bus(bus))
+ return pci_conf1_read_config(bus, devfn, where, size, value);
+
+ return pci_conf0_read_config(bus, devfn, where, size, value);
+}
+
+static int pci_conf0_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
+ struct bridge_regs *bridge = bc->base;
+ int slot = PCI_SLOT(devfn);
+ int fn = PCI_FUNC(devfn);
+ void *addr;
+ u32 cf, shift, mask, smask;
+ int res;
+
+ addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[PCI_VENDOR_ID];
+ if (get_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * IOC3 is broken beyond belief ... Don't even give the
+ * generic PCI code a chance to look at it for real ...
+ */
+ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
+ goto is_ioc3;
+
+ addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)];
+
+ if (size == 1)
+ res = put_dbe(value, (u8 *)addr);
+ else if (size == 2)
+ res = put_dbe(value, (u16 *)addr);
+ else
+ res = put_dbe(value, (u32 *)addr);
+
+ if (res)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return PCIBIOS_SUCCESSFUL;
+
+is_ioc3:
+
+ /*
+ * IOC3 special handling
+ */
+ if ((where >= 0x14 && where < 0x40) || (where >= 0x48))
+ return PCIBIOS_SUCCESSFUL;
+
+ addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
+
+ if (get_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ shift = ((where & 3) << 3);
+ mask = (0xffffffffU >> ((4 - size) << 3));
+ smask = mask << shift;
+
+ cf = (cf & ~smask) | ((value & mask) << shift);
+ if (put_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_conf1_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
+ struct bridge_regs *bridge = bc->base;
+ int slot = PCI_SLOT(devfn);
+ int fn = PCI_FUNC(devfn);
+ int busno = bus->number;
+ void *addr;
+ u32 cf, shift, mask, smask;
+ int res;
+
+ bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11));
+ addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID];
+ if (get_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * IOC3 is broken beyond belief ... Don't even give the
+ * generic PCI code a chance to look at it for real ...
+ */
+ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16)))
+ goto is_ioc3;
+
+ addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))];
+
+ if (size == 1)
+ res = put_dbe(value, (u8 *)addr);
+ else if (size == 2)
+ res = put_dbe(value, (u16 *)addr);
+ else
+ res = put_dbe(value, (u32 *)addr);
+
+ if (res)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return PCIBIOS_SUCCESSFUL;
+
+is_ioc3:
+
+ /*
+ * IOC3 special handling
+ */
+ if ((where >= 0x14 && where < 0x40) || (where >= 0x48))
+ return PCIBIOS_SUCCESSFUL;
+
+ addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2];
+ if (get_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ shift = ((where & 3) << 3);
+ mask = (0xffffffffU >> ((4 - size) << 3));
+ smask = mask << shift;
+
+ cf = (cf & ~smask) | ((value & mask) << shift);
+ if (put_dbe(cf, (u32 *)addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ if (!pci_is_root_bus(bus))
+ return pci_conf1_write_config(bus, devfn, where, size, value);
+
+ return pci_conf0_write_config(bus, devfn, where, size, value);
+}
+
+static struct pci_ops bridge_pci_ops = {
+ .read = pci_read_config,
+ .write = pci_write_config,
+};
+
+struct bridge_irq_chip_data {
+ struct bridge_controller *bc;
+ nasid_t nasid;
+};
+
+static int bridge_set_affinity(struct irq_data *d, const struct cpumask *mask,
+ bool force)
+{
+#ifdef CONFIG_NUMA
+ struct bridge_irq_chip_data *data = d->chip_data;
+ int bit = d->parent_data->hwirq;
+ int pin = d->hwirq;
+ nasid_t nasid;
+ int ret, cpu;
+
+ ret = irq_chip_set_affinity_parent(d, mask, force);
+ if (ret >= 0) {
+ cpu = cpumask_first_and(mask, cpu_online_mask);
+ nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
+ bridge_write(data->bc, b_int_addr[pin].addr,
+ (((data->bc->intr_addr >> 30) & 0x30000) |
+ bit | (nasid << 8)));
+ bridge_read(data->bc, b_wid_tflush);
+ }
+ return ret;
+#else
+ return irq_chip_set_affinity_parent(d, mask, force);
+#endif
+}
+
+struct irq_chip bridge_irq_chip = {
+ .name = "BRIDGE",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_affinity = bridge_set_affinity
+};
+
+static int bridge_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct bridge_irq_chip_data *data;
+ struct irq_alloc_info *info = arg;
+ int ret;
+
+ if (nr_irqs > 1 || !info)
+ return -EINVAL;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
+ if (ret >= 0) {
+ data->bc = info->ctrl;
+ data->nasid = info->nasid;
+ irq_domain_set_info(domain, virq, info->pin, &bridge_irq_chip,
+ data, handle_level_irq, NULL, NULL);
+ } else {
+ kfree(data);
+ }
+
+ return ret;
+}
+
+static void bridge_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *irqd = irq_domain_get_irq_data(domain, virq);
+
+ if (nr_irqs)
+ return;
+
+ kfree(irqd->chip_data);
+ irq_domain_free_irqs_top(domain, virq, nr_irqs);
+}
+
+static int bridge_domain_activate(struct irq_domain *domain,
+ struct irq_data *irqd, bool reserve)
+{
+ struct bridge_irq_chip_data *data = irqd->chip_data;
+ struct bridge_controller *bc = data->bc;
+ int bit = irqd->parent_data->hwirq;
+ int pin = irqd->hwirq;
+ u32 device;
+
+ bridge_write(bc, b_int_addr[pin].addr,
+ (((bc->intr_addr >> 30) & 0x30000) |
+ bit | (data->nasid << 8)));
+ bridge_set(bc, b_int_enable, (1 << pin));
+ bridge_set(bc, b_int_enable, 0x7ffffe00); /* more stuff in int_enable */
+
+ /*
+ * Enable sending of an interrupt clear packt to the hub on a high to
+ * low transition of the interrupt pin.
+ *
+ * IRIX sets additional bits in the address which are documented as
+ * reserved in the bridge docs.
+ */
+ bridge_set(bc, b_int_mode, (1UL << pin));
+
+ /*
+ * We assume the bridge to have a 1:1 mapping between devices
+ * (slots) and intr pins.
+ */
+ device = bridge_read(bc, b_int_device);
+ device &= ~(7 << (pin*3));
+ device |= (pin << (pin*3));
+ bridge_write(bc, b_int_device, device);
+
+ bridge_read(bc, b_wid_tflush);
+ return 0;
+}
+
+static void bridge_domain_deactivate(struct irq_domain *domain,
+ struct irq_data *irqd)
+{
+ struct bridge_irq_chip_data *data = irqd->chip_data;
+
+ bridge_clr(data->bc, b_int_enable, (1 << irqd->hwirq));
+ bridge_read(data->bc, b_wid_tflush);
+}
+
+static const struct irq_domain_ops bridge_domain_ops = {
+ .alloc = bridge_domain_alloc,
+ .free = bridge_domain_free,
+ .activate = bridge_domain_activate,
+ .deactivate = bridge_domain_deactivate
+};
+
+/*
+ * All observed requests have pin == 1. We could have a global here, that
+ * gets incremented and returned every time - unfortunately, pci_map_irq
+ * may be called on the same device over and over, and need to return the
+ * same value. On O2000, pin can be 0 or 1, and PCI slots can be [0..7].
+ *
+ * A given PCI device, in general, should be able to intr any of the cpus
+ * on any one of the hubs connected to its xbow.
+ */
+static int bridge_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
+ struct irq_alloc_info info;
+ int irq;
+
+ irq = bc->pci_int[slot];
+ if (irq == -1) {
+ info.ctrl = bc;
+ info.nasid = bc->nasid;
+ info.pin = slot;
+
+ irq = irq_domain_alloc_irqs(bc->domain, 1, bc->nasid, &info);
+ if (irq < 0)
+ return irq;
+
+ bc->pci_int[slot] = irq;
+ }
+ return irq;
+}
+
+static int bridge_probe(struct platform_device *pdev)
+{
+ struct xtalk_bridge_platform_data *bd = dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct bridge_controller *bc;
+ struct pci_host_bridge *host;
+ struct irq_domain *domain, *parent;
+ struct fwnode_handle *fn;
+ int slot;
+ int err;
+
+ parent = irq_get_default_host();
+ if (!parent)
+ return -ENODEV;
+ fn = irq_domain_alloc_named_fwnode("BRIDGE");
+ if (!fn)
+ return -ENOMEM;
+ domain = irq_domain_create_hierarchy(parent, 0, 8, fn,
+ &bridge_domain_ops, NULL);
+ irq_domain_free_fwnode(fn);
+ if (!domain)
+ return -ENOMEM;
+
+ pci_set_flags(PCI_PROBE_ONLY);
+
+ host = devm_pci_alloc_host_bridge(dev, sizeof(*bc));
+ if (!host) {
+ err = -ENOMEM;
+ goto err_remove_domain;
+ }
+
+ bc = pci_host_bridge_priv(host);
+
+ bc->busn.name = "Bridge PCI busn";
+ bc->busn.start = 0;
+ bc->busn.end = 0xff;
+ bc->busn.flags = IORESOURCE_BUS;
+
+ bc->domain = domain;
+
+ pci_add_resource_offset(&host->windows, &bd->mem, bd->mem_offset);
+ pci_add_resource_offset(&host->windows, &bd->io, bd->io_offset);
+ pci_add_resource(&host->windows, &bc->busn);
+
+ err = devm_request_pci_bus_resources(dev, &host->windows);
+ if (err < 0)
+ goto err_free_resource;
+
+ bc->nasid = bd->nasid;
+
+ bc->baddr = (u64)bd->masterwid << 60 | PCI64_ATTR_BAR;
+ bc->base = (struct bridge_regs *)bd->bridge_addr;
+ bc->intr_addr = bd->intr_addr;
+
+ /*
+ * Clear all pending interrupts.
+ */
+ bridge_write(bc, b_int_rst_stat, BRIDGE_IRR_ALL_CLR);
+
+ /*
+ * Until otherwise set up, assume all interrupts are from slot 0
+ */
+ bridge_write(bc, b_int_device, 0x0);
+
+ /*
+ * disable swapping for big windows
+ */
+ bridge_clr(bc, b_wid_control,
+ BRIDGE_CTRL_IO_SWAP | BRIDGE_CTRL_MEM_SWAP);
+#ifdef CONFIG_PAGE_SIZE_4KB
+ bridge_clr(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE);
+#else /* 16kB or larger */
+ bridge_set(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE);
+#endif
+
+ /*
+ * Hmm... IRIX sets additional bits in the address which
+ * are documented as reserved in the bridge docs.
+ */
+ bridge_write(bc, b_wid_int_upper,
+ ((bc->intr_addr >> 32) & 0xffff) | (bd->masterwid << 16));
+ bridge_write(bc, b_wid_int_lower, bc->intr_addr & 0xffffffff);
+ bridge_write(bc, b_dir_map, (bd->masterwid << 20)); /* DMA */
+ bridge_write(bc, b_int_enable, 0);
+
+ for (slot = 0; slot < 8; slot++) {
+ bridge_set(bc, b_device[slot].reg, BRIDGE_DEV_SWAP_DIR);
+ bc->pci_int[slot] = -1;
+ }
+ bridge_read(bc, b_wid_tflush); /* wait until Bridge PIO complete */
+
+ host->dev.parent = dev;
+ host->sysdata = bc;
+ host->busnr = 0;
+ host->ops = &bridge_pci_ops;
+ host->map_irq = bridge_map_irq;
+ host->swizzle_irq = pci_common_swizzle;
+
+ err = pci_scan_root_bus_bridge(host);
+ if (err < 0)
+ goto err_free_resource;
+
+ pci_bus_claim_resources(host->bus);
+ pci_bus_add_devices(host->bus);
+
+ platform_set_drvdata(pdev, host->bus);
+
+ return 0;
+
+err_free_resource:
+ pci_free_resource_list(&host->windows);
+err_remove_domain:
+ irq_domain_remove(domain);
+ return err;
+}
+
+static int bridge_remove(struct platform_device *pdev)
+{
+ struct pci_bus *bus = platform_get_drvdata(pdev);
+ struct bridge_controller *bc = BRIDGE_CONTROLLER(bus);
+
+ irq_domain_remove(bc->domain);
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(bus);
+ pci_remove_root_bus(bus);
+ pci_unlock_rescan_remove();
+
+ return 0;
+}
+
+static struct platform_driver bridge_driver = {
+ .probe = bridge_probe,
+ .remove = bridge_remove,
+ .driver = {
+ .name = "xtalk-bridge",
+ }
+};
+
+builtin_platform_driver(bridge_driver);
diff --git a/arch/mips/pic32/Kconfig b/arch/mips/pic32/Kconfig
index e284e89183cc..7acbb50c1dcd 100644
--- a/arch/mips/pic32/Kconfig
+++ b/arch/mips/pic32/Kconfig
@@ -39,12 +39,12 @@ choice
Select the devicetree.
config DTB_PIC32_NONE
- bool "None"
+ bool "None"
config DTB_PIC32_MZDA_SK
- bool "PIC32MZDA Starter Kit"
- depends on PIC32MZDA
- select BUILTIN_DTB
+ bool "PIC32MZDA Starter Kit"
+ depends on PIC32MZDA
+ select BUILTIN_DTB
endchoice
diff --git a/arch/mips/pnx833x/Platform b/arch/mips/pnx833x/Platform
index 794526caab12..6b1a847d593f 100644
--- a/arch/mips/pnx833x/Platform
+++ b/arch/mips/pnx833x/Platform
@@ -1,5 +1,5 @@
# NXP STB225
platform-$(CONFIG_SOC_PNX833X) += pnx833x/
-cflags-$(CONFIG_SOC_PNX833X) += -Iarch/mips/include/asm/mach-pnx833x
+cflags-$(CONFIG_SOC_PNX833X) += -I $(srctree)/arch/mips/include/asm/mach-pnx833x
load-$(CONFIG_NXP_STB220) += 0xffffffff80001000
load-$(CONFIG_NXP_STB225) += 0xffffffff80001000
diff --git a/arch/mips/sgi-ip22/ip22-platform.c b/arch/mips/sgi-ip22/ip22-platform.c
index 37ad26716579..0b2002e02a47 100644
--- a/arch/mips/sgi-ip22/ip22-platform.c
+++ b/arch/mips/sgi-ip22/ip22-platform.c
@@ -3,6 +3,7 @@
#include <linux/if_ether.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
#include <asm/paccess.h>
#include <asm/sgi/ip22.h>
@@ -25,6 +26,8 @@ static struct sgiwd93_platform_data sgiwd93_0_pd = {
.irq = SGI_WD93_0_IRQ,
};
+static u64 sgiwd93_0_dma_mask = DMA_BIT_MASK(32);
+
static struct platform_device sgiwd93_0_device = {
.name = "sgiwd93",
.id = 0,
@@ -32,6 +35,8 @@ static struct platform_device sgiwd93_0_device = {
.resource = sgiwd93_0_resources,
.dev = {
.platform_data = &sgiwd93_0_pd,
+ .dma_mask = &sgiwd93_0_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -49,6 +54,8 @@ static struct sgiwd93_platform_data sgiwd93_1_pd = {
.irq = SGI_WD93_1_IRQ,
};
+static u64 sgiwd93_1_dma_mask = DMA_BIT_MASK(32);
+
static struct platform_device sgiwd93_1_device = {
.name = "sgiwd93",
.id = 1,
@@ -56,6 +63,8 @@ static struct platform_device sgiwd93_1_device = {
.resource = sgiwd93_1_resources,
.dev = {
.platform_data = &sgiwd93_1_pd,
+ .dma_mask = &sgiwd93_1_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -96,6 +105,8 @@ static struct resource sgiseeq_0_resources[] = {
static struct sgiseeq_platform_data eth0_pd;
+static u64 sgiseeq_dma_mask = DMA_BIT_MASK(32);
+
static struct platform_device eth0_device = {
.name = "sgiseeq",
.id = 0,
@@ -103,6 +114,8 @@ static struct platform_device eth0_device = {
.resource = sgiseeq_0_resources,
.dev = {
.platform_data = &eth0_pd,
+ .dma_mask = &sgiseeq_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
index 6074efeff894..066b33f50bcc 100644
--- a/arch/mips/sgi-ip27/ip27-init.c
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -184,5 +184,7 @@ void __init plat_mem_setup(void)
ioc3_eth_init();
+ ioport_resource.start = 0;
+ ioport_resource.end = ~0UL;
set_io_port_base(IO_BASE);
}
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index a32f843cdbe0..37be04975831 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -12,22 +12,20 @@
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
+#include <linux/sched.h>
#include <asm/io.h>
#include <asm/irq_cpu.h>
-#include <asm/pci/bridge.h>
#include <asm/sn/addrs.h>
#include <asm/sn/agent.h>
#include <asm/sn/arch.h>
#include <asm/sn/hub.h>
#include <asm/sn/intr.h>
+#include <asm/sn/irq_alloc.h>
struct hub_irq_data {
- struct bridge_controller *bc;
u64 *irq_mask[2];
cpuid_t cpu;
- int bit;
- int pin;
};
static DECLARE_BITMAP(hub_irq_map, IP27_HUB_IRQ_COUNT);
@@ -54,7 +52,7 @@ static void enable_hub_irq(struct irq_data *d)
struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
- set_bit(hd->bit, mask);
+ set_bit(d->hwirq, mask);
__raw_writeq(mask[0], hd->irq_mask[0]);
__raw_writeq(mask[1], hd->irq_mask[1]);
}
@@ -64,71 +62,11 @@ static void disable_hub_irq(struct irq_data *d)
struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
- clear_bit(hd->bit, mask);
+ clear_bit(d->hwirq, mask);
__raw_writeq(mask[0], hd->irq_mask[0]);
__raw_writeq(mask[1], hd->irq_mask[1]);
}
-static unsigned int startup_bridge_irq(struct irq_data *d)
-{
- struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
- struct bridge_controller *bc;
- nasid_t nasid;
- u32 device;
- int pin;
-
- if (!hd)
- return -EINVAL;
-
- pin = hd->pin;
- bc = hd->bc;
-
- nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(hd->cpu));
- bridge_write(bc, b_int_addr[pin].addr,
- (0x20000 | hd->bit | (nasid << 8)));
- bridge_set(bc, b_int_enable, (1 << pin));
- bridge_set(bc, b_int_enable, 0x7ffffe00); /* more stuff in int_enable */
-
- /*
- * Enable sending of an interrupt clear packt to the hub on a high to
- * low transition of the interrupt pin.
- *
- * IRIX sets additional bits in the address which are documented as
- * reserved in the bridge docs.
- */
- bridge_set(bc, b_int_mode, (1UL << pin));
-
- /*
- * We assume the bridge to have a 1:1 mapping between devices
- * (slots) and intr pins.
- */
- device = bridge_read(bc, b_int_device);
- device &= ~(7 << (pin*3));
- device |= (pin << (pin*3));
- bridge_write(bc, b_int_device, device);
-
- bridge_read(bc, b_wid_tflush);
-
- enable_hub_irq(d);
-
- return 0; /* Never anything pending. */
-}
-
-static void shutdown_bridge_irq(struct irq_data *d)
-{
- struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
- struct bridge_controller *bc;
-
- if (!hd)
- return;
-
- disable_hub_irq(d);
-
- bc = hd->bc;
- bridge_clr(bc, b_int_enable, (1 << hd->pin));
- bridge_read(bc, b_wid_tflush);
-}
-
static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask)
{
nasid_t nasid;
@@ -144,9 +82,6 @@ static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask)
hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_B);
hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_B);
}
-
- /* Make sure it's not already pending when we connect it. */
- REMOTE_HUB_CLR_INTR(nasid, hd->bit);
}
static int set_affinity_hub_irq(struct irq_data *d, const struct cpumask *mask,
@@ -163,7 +98,7 @@ static int set_affinity_hub_irq(struct irq_data *d, const struct cpumask *mask,
setup_hub_mask(hd, mask);
if (irqd_is_started(d))
- startup_bridge_irq(d);
+ enable_hub_irq(d);
irq_data_update_effective_affinity(d, cpumask_of(hd->cpu));
@@ -172,20 +107,22 @@ static int set_affinity_hub_irq(struct irq_data *d, const struct cpumask *mask,
static struct irq_chip hub_irq_type = {
.name = "HUB",
- .irq_startup = startup_bridge_irq,
- .irq_shutdown = shutdown_bridge_irq,
.irq_mask = disable_hub_irq,
.irq_unmask = enable_hub_irq,
.irq_set_affinity = set_affinity_hub_irq,
};
-int request_bridge_irq(struct bridge_controller *bc, int pin)
+static int hub_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
{
+ struct irq_alloc_info *info = arg;
struct hub_irq_data *hd;
struct hub_data *hub;
struct irq_desc *desc;
int swlevel;
- int irq;
+
+ if (nr_irqs > 1 || !info)
+ return -EINVAL;
hd = kzalloc(sizeof(*hd), GFP_KERNEL);
if (!hd)
@@ -196,46 +133,41 @@ int request_bridge_irq(struct bridge_controller *bc, int pin)
kfree(hd);
return -EAGAIN;
}
- irq = swlevel + IP27_HUB_IRQ_BASE;
-
- hd->bc = bc;
- hd->bit = swlevel;
- hd->pin = pin;
- irq_set_chip_data(irq, hd);
+ irq_domain_set_info(domain, virq, swlevel, &hub_irq_type, hd,
+ handle_level_irq, NULL, NULL);
/* use CPU connected to nearest hub */
- hub = hub_data(NASID_TO_COMPACT_NODEID(bc->nasid));
+ hub = hub_data(NASID_TO_COMPACT_NODEID(info->nasid));
setup_hub_mask(hd, &hub->h_cpus);
- desc = irq_to_desc(irq);
- desc->irq_common_data.node = bc->nasid;
+ /* Make sure it's not already pending when we connect it. */
+ REMOTE_HUB_CLR_INTR(info->nasid, swlevel);
+
+ desc = irq_to_desc(virq);
+ desc->irq_common_data.node = info->nasid;
cpumask_copy(desc->irq_common_data.affinity, &hub->h_cpus);
- return irq;
+ return 0;
}
-void ip27_hub_irq_init(void)
+static void hub_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
{
- int i;
+ struct irq_data *irqd;
- for (i = IP27_HUB_IRQ_BASE;
- i < (IP27_HUB_IRQ_BASE + IP27_HUB_IRQ_COUNT); i++)
- irq_set_chip_and_handler(i, &hub_irq_type, handle_level_irq);
-
- /*
- * Some interrupts are reserved by hardware or by software convention.
- * Mark these as reserved right away so they won't be used accidentally
- * later.
- */
- for (i = 0; i <= BASE_PCI_IRQ; i++)
- set_bit(i, hub_irq_map);
-
- set_bit(IP_PEND0_6_63, hub_irq_map);
+ if (nr_irqs > 1)
+ return;
- for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++)
- set_bit(i, hub_irq_map);
+ irqd = irq_domain_get_irq_data(domain, virq);
+ if (irqd && irqd->chip_data)
+ kfree(irqd->chip_data);
}
+static const struct irq_domain_ops hub_domain_ops = {
+ .alloc = hub_domain_alloc,
+ .free = hub_domain_free,
+};
+
/*
* This code is unnecessarily complex, because we do
* intr enabling. Basically, once we grab the set of intrs we need
@@ -252,7 +184,9 @@ static void ip27_do_irq_mask0(struct irq_desc *desc)
{
cpuid_t cpu = smp_processor_id();
unsigned long *mask = per_cpu(irq_enable_mask, cpu);
+ struct irq_domain *domain;
u64 pend0;
+ int irq;
/* copied from Irix intpend0() */
pend0 = LOCAL_HUB_L(PI_INT_PEND0);
@@ -276,7 +210,14 @@ static void ip27_do_irq_mask0(struct irq_desc *desc)
generic_smp_call_function_interrupt();
} else
#endif
- generic_handle_irq(__ffs(pend0) + IP27_HUB_IRQ_BASE);
+ {
+ domain = irq_desc_get_handler_data(desc);
+ irq = irq_linear_revmap(domain, __ffs(pend0));
+ if (irq)
+ generic_handle_irq(irq);
+ else
+ spurious_interrupt();
+ }
LOCAL_HUB_L(PI_INT_PEND0);
}
@@ -285,7 +226,9 @@ static void ip27_do_irq_mask1(struct irq_desc *desc)
{
cpuid_t cpu = smp_processor_id();
unsigned long *mask = per_cpu(irq_enable_mask, cpu);
+ struct irq_domain *domain;
u64 pend1;
+ int irq;
/* copied from Irix intpend0() */
pend1 = LOCAL_HUB_L(PI_INT_PEND1);
@@ -294,7 +237,12 @@ static void ip27_do_irq_mask1(struct irq_desc *desc)
if (!pend1)
return;
- generic_handle_irq(__ffs(pend1) + IP27_HUB_IRQ_BASE + 64);
+ domain = irq_desc_get_handler_data(desc);
+ irq = irq_linear_revmap(domain, __ffs(pend1) + 64);
+ if (irq)
+ generic_handle_irq(irq);
+ else
+ spurious_interrupt();
LOCAL_HUB_L(PI_INT_PEND1);
}
@@ -325,11 +273,41 @@ void install_ipi(void)
void __init arch_init_irq(void)
{
+ struct irq_domain *domain;
+ struct fwnode_handle *fn;
+ int i;
+
mips_cpu_irq_init();
- ip27_hub_irq_init();
+
+ /*
+ * Some interrupts are reserved by hardware or by software convention.
+ * Mark these as reserved right away so they won't be used accidentally
+ * later.
+ */
+ for (i = 0; i <= BASE_PCI_IRQ; i++)
+ set_bit(i, hub_irq_map);
+
+ set_bit(IP_PEND0_6_63, hub_irq_map);
+
+ for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++)
+ set_bit(i, hub_irq_map);
+
+ fn = irq_domain_alloc_named_fwnode("HUB");
+ WARN_ON(fn == NULL);
+ if (!fn)
+ return;
+ domain = irq_domain_create_linear(fn, IP27_HUB_IRQ_COUNT,
+ &hub_domain_ops, NULL);
+ WARN_ON(domain == NULL);
+ if (!domain)
+ return;
+
+ irq_set_default_host(domain);
irq_set_percpu_devid(IP27_HUB_PEND0_IRQ);
- irq_set_chained_handler(IP27_HUB_PEND0_IRQ, ip27_do_irq_mask0);
+ irq_set_chained_handler_and_data(IP27_HUB_PEND0_IRQ, ip27_do_irq_mask0,
+ domain);
irq_set_percpu_devid(IP27_HUB_PEND1_IRQ);
- irq_set_chained_handler(IP27_HUB_PEND1_IRQ, ip27_do_irq_mask1);
+ irq_set_chained_handler_and_data(IP27_HUB_PEND1_IRQ, ip27_do_irq_mask1,
+ domain);
}
diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c
index ce06aaa115ae..bd5cb855c6e5 100644
--- a/arch/mips/sgi-ip27/ip27-xtalk.c
+++ b/arch/mips/sgi-ip27/ip27-xtalk.c
@@ -9,6 +9,9 @@
#include <linux/kernel.h>
#include <linux/smp.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/xtalk-bridge.h>
+#include <asm/sn/addrs.h>
#include <asm/sn/types.h>
#include <asm/sn/klconfig.h>
#include <asm/sn/hub.h>
@@ -20,7 +23,48 @@
#define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbow in Xbridge */
#define BASE_XBOW_PORT 8 /* Lowest external port */
-extern int bridge_probe(nasid_t nasid, int widget, int masterwid);
+static void bridge_platform_create(nasid_t nasid, int widget, int masterwid)
+{
+ struct xtalk_bridge_platform_data *bd;
+ struct platform_device *pdev;
+ unsigned long offset;
+
+ bd = kzalloc(sizeof(*bd), GFP_KERNEL);
+ if (!bd)
+ goto no_mem;
+ pdev = platform_device_alloc("xtalk-bridge", PLATFORM_DEVID_AUTO);
+ if (!pdev) {
+ kfree(bd);
+ goto no_mem;
+ }
+
+ offset = NODE_OFFSET(nasid);
+
+ bd->bridge_addr = RAW_NODE_SWIN_BASE(nasid, widget);
+ bd->intr_addr = BIT_ULL(47) + 0x01800000 + PI_INT_PEND_MOD;
+ bd->nasid = nasid;
+ bd->masterwid = masterwid;
+
+ bd->mem.name = "Bridge PCI MEM";
+ bd->mem.start = offset + (widget << SWIN_SIZE_BITS);
+ bd->mem.end = bd->mem.start + SWIN_SIZE - 1;
+ bd->mem.flags = IORESOURCE_MEM;
+ bd->mem_offset = offset;
+
+ bd->io.name = "Bridge PCI IO";
+ bd->io.start = offset + (widget << SWIN_SIZE_BITS);
+ bd->io.end = bd->io.start + SWIN_SIZE - 1;
+ bd->io.flags = IORESOURCE_IO;
+ bd->io_offset = offset;
+
+ platform_device_add_data(pdev, bd, sizeof(*bd));
+ platform_device_add(pdev);
+ pr_info("xtalk:n%d/%x bridge widget\n", nasid, widget);
+ return;
+
+no_mem:
+ pr_warn("xtalk:n%d/%x bridge create out of memory\n", nasid, widget);
+}
static int probe_one_port(nasid_t nasid, int widget, int masterwid)
{
@@ -31,13 +75,10 @@ static int probe_one_port(nasid_t nasid, int widget, int masterwid)
(RAW_NODE_SWIN_BASE(nasid, widget) + WIDGET_ID);
partnum = XWIDGET_PART_NUM(widget_id);
- printk(KERN_INFO "Cpu %d, Nasid 0x%x, widget 0x%x (partnum 0x%x) is ",
- smp_processor_id(), nasid, widget, partnum);
-
switch (partnum) {
case BRIDGE_WIDGET_PART_NUM:
case XBRIDGE_WIDGET_PART_NUM:
- bridge_probe(nasid, widget, masterwid);
+ bridge_platform_create(nasid, widget, masterwid);
break;
default:
break;
@@ -52,8 +93,6 @@ static int xbow_probe(nasid_t nasid)
klxbow_t *xbow_p;
unsigned masterwid, i;
- printk("is xbow\n");
-
/*
* found xbow, so may have multiple bridges
* need to probe xbow
@@ -117,19 +156,17 @@ static void xtalk_probe_node(cnodeid_t nid)
(RAW_NODE_SWIN_BASE(nasid, 0x0) + WIDGET_ID);
partnum = XWIDGET_PART_NUM(widget_id);
- printk(KERN_INFO "Cpu %d, Nasid 0x%x: partnum 0x%x is ",
- smp_processor_id(), nasid, partnum);
-
switch (partnum) {
case BRIDGE_WIDGET_PART_NUM:
- bridge_probe(nasid, 0x8, 0xa);
+ bridge_platform_create(nasid, 0x8, 0xa);
break;
case XBOW_WIDGET_PART_NUM:
case XXBOW_WIDGET_PART_NUM:
+ pr_info("xtalk:n%d/0 xbow widget\n", nasid);
xbow_probe(nasid);
break;
default:
- printk(" unknown widget??\n");
+ pr_info("xtalk:n%d/0 unknown widget (0x%x)\n", nasid, partnum);
break;
}
}
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 70a1ab66d252..46537c2ca86a 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -26,6 +26,7 @@
#include <linux/leds.h>
#include <linux/device.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <linux/irq.h>
#include <asm/bootinfo.h>
#include <asm/idle.h>
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index 0ede4deb8181..7221df24cb23 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -46,9 +46,7 @@ endif
VDSO_LDFLAGS := \
-Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 \
$(addprefix -Wl$(comma),$(filter -E%,$(KBUILD_CFLAGS))) \
- -nostdlib -shared \
- $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
- $(call cc-ldoption, -Wl$(comma)--build-id)
+ -nostdlib -shared -Wl,--hash-style=sysv -Wl,--build-id
GCOV_PROFILE := n
UBSAN_SANITIZE := n
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
index addb7f5f5264..2245169c72af 100644
--- a/arch/nds32/Kconfig
+++ b/arch/nds32/Kconfig
@@ -4,7 +4,7 @@
#
config NDS32
- def_bool y
+ def_bool y
select ARCH_32BIT_OFF_T
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
@@ -51,23 +51,20 @@ config GENERIC_CALIBRATE_DELAY
def_bool y
config GENERIC_CSUM
- def_bool y
+ def_bool y
config GENERIC_HWEIGHT
- def_bool y
+ def_bool y
config GENERIC_LOCKBREAK
- def_bool y
- depends on PREEMPT
-
-config RWSEM_GENERIC_SPINLOCK
def_bool y
+ depends on PREEMPT
config TRACE_IRQFLAGS_SUPPORT
def_bool y
config STACKTRACE_SUPPORT
- def_bool y
+ def_bool y
config FIX_EARLYCON_MEM
def_bool y
@@ -82,11 +79,11 @@ config NR_CPUS
default 1
config MMU
- def_bool y
+ def_bool y
config NDS32_BUILTIN_DTB
- string "Builtin DTB"
- default ""
+ string "Builtin DTB"
+ default ""
help
User can use it to specify the dts of the SoC
endmenu
diff --git a/arch/nds32/include/asm/Kbuild b/arch/nds32/include/asm/Kbuild
index 64ceff7ab99b..f37d5004a01c 100644
--- a/arch/nds32/include/asm/Kbuild
+++ b/arch/nds32/include/asm/Kbuild
@@ -4,11 +4,8 @@ generic-y += bitops.h
generic-y += bug.h
generic-y += bugs.h
generic-y += checksum.h
-generic-y += clkdev.h
generic-y += cmpxchg.h
-generic-y += cmpxchg-local.h
generic-y += compat.h
-generic-y += cputime.h
generic-y += device.h
generic-y += div64.h
generic-y += dma.h
@@ -27,18 +24,16 @@ generic-y += kdebug.h
generic-y += kmap_types.h
generic-y += kprobes.h
generic-y += kvm_para.h
-generic-y += limits.h
generic-y += local.h
generic-y += local64.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += parport.h
generic-y += pci.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += sections.h
-generic-y += segment.h
generic-y += serial.h
-generic-y += sizes.h
generic-y += switch_to.h
generic-y += timex.h
generic-y += topology.h
diff --git a/arch/nds32/include/asm/assembler.h b/arch/nds32/include/asm/assembler.h
index c3855782a541..5e7c56926049 100644
--- a/arch/nds32/include/asm/assembler.h
+++ b/arch/nds32/include/asm/assembler.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_ASSEMBLER_H__
diff --git a/arch/nds32/include/asm/barrier.h b/arch/nds32/include/asm/barrier.h
index faafc373ea6c..16413172fd50 100644
--- a/arch/nds32/include/asm/barrier.h
+++ b/arch/nds32/include/asm/barrier.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_ASM_BARRIER_H
diff --git a/arch/nds32/include/asm/bitfield.h b/arch/nds32/include/asm/bitfield.h
index 7414fcbbab4e..e75212c76b20 100644
--- a/arch/nds32/include/asm/bitfield.h
+++ b/arch/nds32/include/asm/bitfield.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_BITFIELD_H__
diff --git a/arch/nds32/include/asm/cache.h b/arch/nds32/include/asm/cache.h
index 347db4881c5f..fc3c41b59169 100644
--- a/arch/nds32/include/asm/cache.h
+++ b/arch/nds32/include/asm/cache.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_CACHE_H__
diff --git a/arch/nds32/include/asm/cache_info.h b/arch/nds32/include/asm/cache_info.h
index 38ec458ba543..e89d8078f3a6 100644
--- a/arch/nds32/include/asm/cache_info.h
+++ b/arch/nds32/include/asm/cache_info.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
struct cache_info {
diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h
index 8b26198d51bb..d9ac7e6408ef 100644
--- a/arch/nds32/include/asm/cacheflush.h
+++ b/arch/nds32/include/asm/cacheflush.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_CACHEFLUSH_H__
diff --git a/arch/nds32/include/asm/current.h b/arch/nds32/include/asm/current.h
index b4dcd22b7bcb..65d30096142b 100644
--- a/arch/nds32/include/asm/current.h
+++ b/arch/nds32/include/asm/current.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASM_NDS32_CURRENT_H
diff --git a/arch/nds32/include/asm/delay.h b/arch/nds32/include/asm/delay.h
index 519ba97acb6e..56ea3894f8f8 100644
--- a/arch/nds32/include/asm/delay.h
+++ b/arch/nds32/include/asm/delay.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_DELAY_H__
diff --git a/arch/nds32/include/asm/elf.h b/arch/nds32/include/asm/elf.h
index 95f3ea253e4c..1c8e56d7013d 100644
--- a/arch/nds32/include/asm/elf.h
+++ b/arch/nds32/include/asm/elf.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASMNDS32_ELF_H
@@ -10,14 +10,13 @@
#include <asm/ptrace.h>
#include <asm/fpu.h>
+#include <linux/elf-em.h>
typedef unsigned long elf_greg_t;
typedef unsigned long elf_freg_t[3];
extern unsigned int elf_hwcap;
-#define EM_NDS32 167
-
#define R_NDS32_NONE 0
#define R_NDS32_16_RELA 19
#define R_NDS32_32_RELA 20
diff --git a/arch/nds32/include/asm/fixmap.h b/arch/nds32/include/asm/fixmap.h
index 0e60e153a71a..5a4bf11e5800 100644
--- a/arch/nds32/include/asm/fixmap.h
+++ b/arch/nds32/include/asm/fixmap.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_NDS32_FIXMAP_H
diff --git a/arch/nds32/include/asm/futex.h b/arch/nds32/include/asm/futex.h
index baf178bf1d0b..5213c65c2e0b 100644
--- a/arch/nds32/include/asm/futex.h
+++ b/arch/nds32/include/asm/futex.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_FUTEX_H__
diff --git a/arch/nds32/include/asm/highmem.h b/arch/nds32/include/asm/highmem.h
index 425d546cb059..b3a82c97ded3 100644
--- a/arch/nds32/include/asm/highmem.h
+++ b/arch/nds32/include/asm/highmem.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASM_HIGHMEM_H
diff --git a/arch/nds32/include/asm/io.h b/arch/nds32/include/asm/io.h
index 71cd226d6863..16f262322b8f 100644
--- a/arch/nds32/include/asm/io.h
+++ b/arch/nds32/include/asm/io.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_NDS32_IO_H
@@ -55,8 +55,6 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
#define __iormb() rmb()
#define __iowmb() wmb()
-#define mmiowb() __asm__ __volatile__ ("msync all" : : : "memory");
-
/*
* {read,write}{b,w,l,q}_relaxed() are like the regular version, but
* are not guaranteed to provide ordering against spinlocks or memory
diff --git a/arch/nds32/include/asm/irqflags.h b/arch/nds32/include/asm/irqflags.h
index 2bfd00f8bc48..fb45ec46bb1b 100644
--- a/arch/nds32/include/asm/irqflags.h
+++ b/arch/nds32/include/asm/irqflags.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#include <asm/nds32.h>
diff --git a/arch/nds32/include/asm/l2_cache.h b/arch/nds32/include/asm/l2_cache.h
index 37dd5ef61de8..3ea48e19e6de 100644
--- a/arch/nds32/include/asm/l2_cache.h
+++ b/arch/nds32/include/asm/l2_cache.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef L2_CACHE_H
diff --git a/arch/nds32/include/asm/linkage.h b/arch/nds32/include/asm/linkage.h
index e708c8bdb926..a696469abb70 100644
--- a/arch/nds32/include/asm/linkage.h
+++ b/arch/nds32/include/asm/linkage.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_LINKAGE_H
diff --git a/arch/nds32/include/asm/memory.h b/arch/nds32/include/asm/memory.h
index 60efc726b56e..940d32842793 100644
--- a/arch/nds32/include/asm/memory.h
+++ b/arch/nds32/include/asm/memory.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_NDS32_MEMORY_H
@@ -15,14 +15,6 @@
#define PHYS_OFFSET (0x0)
#endif
-#ifndef __virt_to_bus
-#define __virt_to_bus __virt_to_phys
-#endif
-
-#ifndef __bus_to_virt
-#define __bus_to_virt __phys_to_virt
-#endif
-
/*
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
diff --git a/arch/nds32/include/asm/mmu.h b/arch/nds32/include/asm/mmu.h
index 88b9ee8c1064..89d63afee455 100644
--- a/arch/nds32/include/asm/mmu.h
+++ b/arch/nds32/include/asm/mmu.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_MMU_H
diff --git a/arch/nds32/include/asm/mmu_context.h b/arch/nds32/include/asm/mmu_context.h
index fd7d13cefccc..b8fd3d189fdc 100644
--- a/arch/nds32/include/asm/mmu_context.h
+++ b/arch/nds32/include/asm/mmu_context.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_NDS32_MMU_CONTEXT_H
diff --git a/arch/nds32/include/asm/module.h b/arch/nds32/include/asm/module.h
index 16cf9c7237ad..a3a08e993c65 100644
--- a/arch/nds32/include/asm/module.h
+++ b/arch/nds32/include/asm/module.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASM_NDS32_MODULE_H
diff --git a/arch/nds32/include/asm/nds32.h b/arch/nds32/include/asm/nds32.h
index 68c38151c3e4..4994f6a9e0a0 100644
--- a/arch/nds32/include/asm/nds32.h
+++ b/arch/nds32/include/asm/nds32.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASM_NDS32_NDS32_H_
diff --git a/arch/nds32/include/asm/page.h b/arch/nds32/include/asm/page.h
index 947f0491c9a7..8feb1fa12f01 100644
--- a/arch/nds32/include/asm/page.h
+++ b/arch/nds32/include/asm/page.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* Copyright (C) 2005-2017 Andes Technology Corporation
*/
diff --git a/arch/nds32/include/asm/pgalloc.h b/arch/nds32/include/asm/pgalloc.h
index 3c5fee5b5759..3cbc749c79aa 100644
--- a/arch/nds32/include/asm/pgalloc.h
+++ b/arch/nds32/include/asm/pgalloc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASMNDS32_PGALLOC_H
diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h
index 9f52db930c00..c70cc56bec09 100644
--- a/arch/nds32/include/asm/pgtable.h
+++ b/arch/nds32/include/asm/pgtable.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASMNDS32_PGTABLE_H
@@ -6,7 +6,7 @@
#define __PAGETABLE_PMD_FOLDED 1
#include <asm-generic/4level-fixup.h>
-#include <asm-generic/sizes.h>
+#include <linux/sizes.h>
#include <asm/memory.h>
#include <asm/nds32.h>
diff --git a/arch/nds32/include/asm/proc-fns.h b/arch/nds32/include/asm/proc-fns.h
index bedc4f59e064..27c617fa77af 100644
--- a/arch/nds32/include/asm/proc-fns.h
+++ b/arch/nds32/include/asm/proc-fns.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_PROCFNS_H__
diff --git a/arch/nds32/include/asm/processor.h b/arch/nds32/include/asm/processor.h
index 72024f8bc129..b82369c7659d 100644
--- a/arch/nds32/include/asm/processor.h
+++ b/arch/nds32/include/asm/processor.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_NDS32_PROCESSOR_H
diff --git a/arch/nds32/include/asm/ptrace.h b/arch/nds32/include/asm/ptrace.h
index c4538839055c..919ee223620c 100644
--- a/arch/nds32/include/asm/ptrace.h
+++ b/arch/nds32/include/asm/ptrace.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_NDS32_PTRACE_H
diff --git a/arch/nds32/include/asm/shmparam.h b/arch/nds32/include/asm/shmparam.h
index fd1cff64b68e..3aeee946973d 100644
--- a/arch/nds32/include/asm/shmparam.h
+++ b/arch/nds32/include/asm/shmparam.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASMNDS32_SHMPARAM_H
diff --git a/arch/nds32/include/asm/string.h b/arch/nds32/include/asm/string.h
index 179272caa540..cae8fe16de98 100644
--- a/arch/nds32/include/asm/string.h
+++ b/arch/nds32/include/asm/string.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_NDS32_STRING_H
diff --git a/arch/nds32/include/asm/swab.h b/arch/nds32/include/asm/swab.h
index e01a755a37d2..362a466f2976 100644
--- a/arch/nds32/include/asm/swab.h
+++ b/arch/nds32/include/asm/swab.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_SWAB_H__
diff --git a/arch/nds32/include/asm/syscall.h b/arch/nds32/include/asm/syscall.h
index 671ebd357496..899b2fb4b52f 100644
--- a/arch/nds32/include/asm/syscall.h
+++ b/arch/nds32/include/asm/syscall.h
@@ -1,10 +1,11 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASM_NDS32_SYSCALL_H
#define _ASM_NDS32_SYSCALL_H 1
+#include <uapi/linux/audit.h>
#include <linux/err.h>
struct task_struct;
struct pt_regs;
@@ -145,4 +146,12 @@ void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
memcpy(&regs->uregs[0] + 1, args, 5 * sizeof(args[0]));
}
+
+static inline int
+syscall_get_arch(struct task_struct *task)
+{
+ return IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
+ ? AUDIT_ARCH_NDS32BE : AUDIT_ARCH_NDS32;
+}
+
#endif /* _ASM_NDS32_SYSCALL_H */
diff --git a/arch/nds32/include/asm/syscalls.h b/arch/nds32/include/asm/syscalls.h
index da32101b455d..f3b16f602cb5 100644
--- a/arch/nds32/include/asm/syscalls.h
+++ b/arch/nds32/include/asm/syscalls.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_NDS32_SYSCALLS_H
diff --git a/arch/nds32/include/asm/thread_info.h b/arch/nds32/include/asm/thread_info.h
index bff741ff337b..c135111ec44e 100644
--- a/arch/nds32/include/asm/thread_info.h
+++ b/arch/nds32/include/asm/thread_info.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_NDS32_THREAD_INFO_H
@@ -42,7 +42,6 @@ struct thread_info {
* TIF_SIGPENDING - signal pending
* TIF_NEED_RESCHED - rescheduling necessary
* TIF_NOTIFY_RESUME - callback before returning to user
- * TIF_USEDFPU - FPU was used by this task this quantum (SMP)
* TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
*/
#define TIF_SIGPENDING 1
@@ -50,7 +49,6 @@ struct thread_info {
#define TIF_SINGLESTEP 3
#define TIF_NOTIFY_RESUME 4 /* callback before returning to user */
#define TIF_SYSCALL_TRACE 8
-#define TIF_USEDFPU 16
#define TIF_POLLING_NRFLAG 17
#define TIF_MEMDIE 18
#define TIF_FREEZE 19
diff --git a/arch/nds32/include/asm/tlb.h b/arch/nds32/include/asm/tlb.h
index b35ae5eae3ab..a8aff1c8b4f4 100644
--- a/arch/nds32/include/asm/tlb.h
+++ b/arch/nds32/include/asm/tlb.h
@@ -1,25 +1,9 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASMNDS32_TLB_H
#define __ASMNDS32_TLB_H
-#define tlb_start_vma(tlb,vma) \
- do { \
- if (!tlb->fullmm) \
- flush_cache_range(vma, vma->vm_start, vma->vm_end); \
- } while (0)
-
-#define tlb_end_vma(tlb,vma) \
- do { \
- if(!tlb->fullmm) \
- flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
- } while (0)
-
-#define __tlb_remove_tlb_entry(tlb, pte, addr) do { } while (0)
-
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
#include <asm-generic/tlb.h>
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
diff --git a/arch/nds32/include/asm/tlbflush.h b/arch/nds32/include/asm/tlbflush.h
index 9b411f401903..97155366ea01 100644
--- a/arch/nds32/include/asm/tlbflush.h
+++ b/arch/nds32/include/asm/tlbflush.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASMNDS32_TLBFLUSH_H
@@ -42,6 +42,5 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t * pte);
-void tlb_migrate_finish(struct mm_struct *mm);
#endif
diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h
index 116598b47c4d..8916ad9f9f13 100644
--- a/arch/nds32/include/asm/uaccess.h
+++ b/arch/nds32/include/asm/uaccess.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASMANDES_UACCESS_H
diff --git a/arch/nds32/include/asm/unistd.h b/arch/nds32/include/asm/unistd.h
index b586a2862beb..bf5e2d440913 100644
--- a/arch/nds32/include/asm/unistd.h
+++ b/arch/nds32/include/asm/unistd.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/nds32/include/asm/vdso.h b/arch/nds32/include/asm/vdso.h
index af2c6afc2469..89b113ffc3dc 100644
--- a/arch/nds32/include/asm/vdso.h
+++ b/arch/nds32/include/asm/vdso.h
@@ -1,5 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * SPDX-License-Identifier: GPL-2.0
* Copyright (C) 2005-2017 Andes Technology Corporation
*/
diff --git a/arch/nds32/include/asm/vdso_datapage.h b/arch/nds32/include/asm/vdso_datapage.h
index 79db5a12ca5e..74c68802021e 100644
--- a/arch/nds32/include/asm/vdso_datapage.h
+++ b/arch/nds32/include/asm/vdso_datapage.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2012 ARM Limited
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_VDSO_DATAPAGE_H
@@ -20,6 +20,7 @@ struct vdso_data {
u32 xtime_clock_sec; /* CLOCK_REALTIME - seconds */
u32 cs_mult; /* clocksource multiplier */
u32 cs_shift; /* Cycle to nanosecond divisor (power of two) */
+ u32 hrtimer_res; /* hrtimer resolution */
u64 cs_cycle_last; /* last cycle value */
u64 cs_mask; /* clocksource mask */
diff --git a/arch/nds32/include/asm/vdso_timer_info.h b/arch/nds32/include/asm/vdso_timer_info.h
index 50ba117cff12..328439ce37db 100644
--- a/arch/nds32/include/asm/vdso_timer_info.h
+++ b/arch/nds32/include/asm/vdso_timer_info.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
extern struct timer_info_t timer_info;
diff --git a/arch/nds32/include/uapi/asm/auxvec.h b/arch/nds32/include/uapi/asm/auxvec.h
index 2d3213f5e595..b5d58ea8decb 100644
--- a/arch/nds32/include/uapi/asm/auxvec.h
+++ b/arch/nds32/include/uapi/asm/auxvec.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_AUXVEC_H
diff --git a/arch/nds32/include/uapi/asm/byteorder.h b/arch/nds32/include/uapi/asm/byteorder.h
index a23f6f3a2468..511e653c709d 100644
--- a/arch/nds32/include/uapi/asm/byteorder.h
+++ b/arch/nds32/include/uapi/asm/byteorder.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_BYTEORDER_H__
diff --git a/arch/nds32/include/uapi/asm/cachectl.h b/arch/nds32/include/uapi/asm/cachectl.h
index 4cdca9b23974..73793662815c 100644
--- a/arch/nds32/include/uapi/asm/cachectl.h
+++ b/arch/nds32/include/uapi/asm/cachectl.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 1994, 1995, 1996 by Ralf Baechle
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASM_CACHECTL
diff --git a/arch/nds32/include/uapi/asm/param.h b/arch/nds32/include/uapi/asm/param.h
index e3fb723ee362..2977534a6bd3 100644
--- a/arch/nds32/include/uapi/asm/param.h
+++ b/arch/nds32/include/uapi/asm/param.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_NDS32_PARAM_H
diff --git a/arch/nds32/include/uapi/asm/ptrace.h b/arch/nds32/include/uapi/asm/ptrace.h
index 358c99e399d0..1a6e01c00e6f 100644
--- a/arch/nds32/include/uapi/asm/ptrace.h
+++ b/arch/nds32/include/uapi/asm/ptrace.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __UAPI_ASM_NDS32_PTRACE_H
diff --git a/arch/nds32/include/uapi/asm/sigcontext.h b/arch/nds32/include/uapi/asm/sigcontext.h
index 58afc416473e..628ff6b75825 100644
--- a/arch/nds32/include/uapi/asm/sigcontext.h
+++ b/arch/nds32/include/uapi/asm/sigcontext.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASMNDS32_SIGCONTEXT_H
diff --git a/arch/nds32/include/uapi/asm/unistd.h b/arch/nds32/include/uapi/asm/unistd.h
index 4ec8f543103f..c691735017ed 100644
--- a/arch/nds32/include/uapi/asm/unistd.h
+++ b/arch/nds32/include/uapi/asm/unistd.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2005-2017 Andes Technology Corporation
#define __ARCH_WANT_STAT64
diff --git a/arch/nds32/kernel/.gitignore b/arch/nds32/kernel/.gitignore
new file mode 100644
index 000000000000..c5f676c3c224
--- /dev/null
+++ b/arch/nds32/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/nds32/kernel/cacheinfo.c b/arch/nds32/kernel/cacheinfo.c
index 0a7bc696dd55..aab98e447feb 100644
--- a/arch/nds32/kernel/cacheinfo.c
+++ b/arch/nds32/kernel/cacheinfo.c
@@ -13,7 +13,7 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
this_leaf->level = level;
this_leaf->type = type;
this_leaf->coherency_line_size = CACHE_LINE_SIZE(cache_type);
- this_leaf->number_of_sets = CACHE_SET(cache_type);;
+ this_leaf->number_of_sets = CACHE_SET(cache_type);
this_leaf->ways_of_associativity = CACHE_WAY(cache_type);
this_leaf->size = this_leaf->number_of_sets *
this_leaf->coherency_line_size * this_leaf->ways_of_associativity;
diff --git a/arch/nds32/kernel/ex-exit.S b/arch/nds32/kernel/ex-exit.S
index 97ba15cd4180..1df02a793364 100644
--- a/arch/nds32/kernel/ex-exit.S
+++ b/arch/nds32/kernel/ex-exit.S
@@ -163,7 +163,7 @@ resume_kernel:
gie_disable
lwi $t0, [tsk+#TSK_TI_PREEMPT]
bnez $t0, no_work_pending
-need_resched:
+
lwi $t0, [tsk+#TSK_TI_FLAGS]
andi $p1, $t0, #_TIF_NEED_RESCHED
beqz $p1, no_work_pending
@@ -173,7 +173,7 @@ need_resched:
beqz $t0, no_work_pending
jal preempt_schedule_irq
- b need_resched
+ b no_work_pending
#endif
/*
diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c
index 8a41372551ff..fd2a54b8cd57 100644
--- a/arch/nds32/kernel/ftrace.c
+++ b/arch/nds32/kernel/ftrace.c
@@ -7,7 +7,6 @@
#ifndef CONFIG_DYNAMIC_FTRACE
extern void (*ftrace_trace_function)(unsigned long, unsigned long,
struct ftrace_ops*, struct pt_regs*);
-extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
extern void ftrace_graph_caller(void);
noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
diff --git a/arch/nds32/kernel/head.S b/arch/nds32/kernel/head.S
index db64b78b1232..fcefb62606ca 100644
--- a/arch/nds32/kernel/head.S
+++ b/arch/nds32/kernel/head.S
@@ -7,7 +7,7 @@
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/pgtable.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/thread_info.h>
#ifdef CONFIG_CPU_BIG_ENDIAN
diff --git a/arch/nds32/kernel/nds32_ksyms.c b/arch/nds32/kernel/nds32_ksyms.c
index 5ecebd0e60cb..20719e42ae36 100644
--- a/arch/nds32/kernel/nds32_ksyms.c
+++ b/arch/nds32/kernel/nds32_ksyms.c
@@ -23,9 +23,3 @@ EXPORT_SYMBOL(memzero);
EXPORT_SYMBOL(__arch_copy_from_user);
EXPORT_SYMBOL(__arch_copy_to_user);
EXPORT_SYMBOL(__arch_clear_user);
-
-/* cache handling */
-EXPORT_SYMBOL(cpu_icache_inval_all);
-EXPORT_SYMBOL(cpu_dcache_wbinval_all);
-EXPORT_SYMBOL(cpu_dma_inval_range);
-EXPORT_SYMBOL(cpu_dma_wb_range);
diff --git a/arch/nds32/kernel/vdso.c b/arch/nds32/kernel/vdso.c
index 016f15891f6d..90bcae6f8554 100644
--- a/arch/nds32/kernel/vdso.c
+++ b/arch/nds32/kernel/vdso.c
@@ -220,6 +220,7 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->xtime_coarse_sec = tk->xtime_sec;
vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >>
tk->tkr_mono.shift;
+ vdso_data->hrtimer_res = hrtimer_resolution;
vdso_write_end(vdso_data);
}
diff --git a/arch/nds32/kernel/vdso/.gitignore b/arch/nds32/kernel/vdso/.gitignore
new file mode 100644
index 000000000000..f8b69d84238e
--- /dev/null
+++ b/arch/nds32/kernel/vdso/.gitignore
@@ -0,0 +1 @@
+vdso.lds
diff --git a/arch/nds32/kernel/vdso/Makefile b/arch/nds32/kernel/vdso/Makefile
index e6c50a701313..8792fda19a64 100644
--- a/arch/nds32/kernel/vdso/Makefile
+++ b/arch/nds32/kernel/vdso/Makefile
@@ -11,10 +11,8 @@ obj-vdso := note.o datapage.o sigreturn.o gettimeofday.o
targets := $(obj-vdso) vdso.so vdso.so.dbg
obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
-ccflags-y := -shared -fno-common -fno-builtin
-ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
- $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
-ccflags-y += -fPIC -Wl,-shared -g
+ccflags-y := -shared -fno-common -fno-builtin -nostdlib -fPIC -Wl,-shared -g \
+ -Wl,-soname=linux-vdso.so.1 -Wl,--hash-style=sysv
# Disable gcov profiling for VDSO code
GCOV_PROFILE := n
@@ -28,7 +26,7 @@ CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
$(obj)/vdso.o : $(obj)/vdso.so
# Link rule for the .so file, .lds has to be first
-$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso)
+$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold)
@@ -40,9 +38,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
# Generate VDSO offsets using helper script
gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
-define cmd_vdsosym
- $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
-endef
+ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
$(call if_changed,vdsosym)
@@ -65,7 +61,7 @@ gettimeofday.o : gettimeofday.c FORCE
# Actual build commands
quiet_cmd_vdsold = VDSOL $@
- cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
+ cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $(real-prereqs) -o $@
quiet_cmd_vdsoas = VDSOA $@
cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
quiet_cmd_vdsocc = VDSOA $@
diff --git a/arch/nds32/kernel/vdso/gettimeofday.c b/arch/nds32/kernel/vdso/gettimeofday.c
index 038721af40e3..b02581891c33 100644
--- a/arch/nds32/kernel/vdso/gettimeofday.c
+++ b/arch/nds32/kernel/vdso/gettimeofday.c
@@ -208,6 +208,8 @@ static notrace int clock_getres_fallback(clockid_t _clk_id,
notrace int __vdso_clock_getres(clockid_t clk_id, struct timespec *res)
{
+ struct vdso_data *vdata = __get_datapage();
+
if (res == NULL)
return 0;
switch (clk_id) {
@@ -215,7 +217,7 @@ notrace int __vdso_clock_getres(clockid_t clk_id, struct timespec *res)
case CLOCK_MONOTONIC:
case CLOCK_MONOTONIC_RAW:
res->tv_sec = 0;
- res->tv_nsec = CLOCK_REALTIME_RES;
+ res->tv_nsec = vdata->hrtimer_res;
break;
case CLOCK_REALTIME_COARSE:
case CLOCK_MONOTONIC_COARSE:
diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c
index 1d03633f89a9..55703b03d172 100644
--- a/arch/nds32/mm/init.c
+++ b/arch/nds32/mm/init.c
@@ -252,18 +252,6 @@ void __init mem_init(void)
return;
}
-void free_initmem(void)
-{
- free_initmem_default(-1);
-}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- free_reserved_area((void *)start, (void *)end, -1, "initrd");
-}
-#endif
-
void __set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags)
{
@@ -272,7 +260,7 @@ void __set_fixmap(enum fixed_addresses idx,
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
- pte = (pte_t *)&fixmap_pmd_p[pte_index(addr)];;
+ pte = (pte_t *)&fixmap_pmd_p[pte_index(addr)];
if (pgprot_val(flags)) {
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index 4ef15a61b7bc..26a9c760a98b 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -23,7 +23,7 @@ config NIOS2
select SPARSE_IRQ
select USB_ARCH_HAS_HCD if USB_SUPPORT
select CPU_NO_EFFICIENT_FFS
- select ARCH_DISCARD_MEMBLOCK
+ select MMU_GATHER_NO_RANGE if MMU
config GENERIC_CSUM
def_bool y
@@ -40,9 +40,6 @@ config NO_IOPORT_MAP
config FPU
def_bool n
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
config TRACE_IRQFLAGS_SUPPORT
def_bool n
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild
index 88a667d12aaa..a8ffdd007f6c 100644
--- a/arch/nios2/include/asm/Kbuild
+++ b/arch/nios2/include/asm/Kbuild
@@ -27,12 +27,12 @@ generic-y += kvm_para.h
generic-y += local.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += module.h
generic-y += pci.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += sections.h
-generic-y += segment.h
generic-y += serial.h
generic-y += spinlock.h
generic-y += topology.h
diff --git a/arch/nios2/include/asm/syscall.h b/arch/nios2/include/asm/syscall.h
index d7624ed06efb..c4f3f8b86f28 100644
--- a/arch/nios2/include/asm/syscall.h
+++ b/arch/nios2/include/asm/syscall.h
@@ -17,6 +17,7 @@
#ifndef __ASM_NIOS2_SYSCALL_H__
#define __ASM_NIOS2_SYSCALL_H__
+#include <uapi/linux/audit.h>
#include <linux/err.h>
#include <linux/sched.h>
@@ -79,4 +80,9 @@ static inline void syscall_set_arguments(struct task_struct *task,
regs->r9 = *args;
}
+static inline int syscall_get_arch(struct task_struct *task)
+{
+ return AUDIT_ARCH_NIOS2;
+}
+
#endif
diff --git a/arch/nios2/include/asm/tlb.h b/arch/nios2/include/asm/tlb.h
index d3bc648e08b5..f9f2e27e32dd 100644
--- a/arch/nios2/include/asm/tlb.h
+++ b/arch/nios2/include/asm/tlb.h
@@ -11,22 +11,12 @@
#ifndef _ASM_NIOS2_TLB_H
#define _ASM_NIOS2_TLB_H
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
extern void set_mmu_pid(unsigned long pid);
/*
- * NiosII doesn't need any special per-pte or per-vma handling, except
- * we need to flush cache for the area to be unmapped.
+ * NIOS32 does have flush_tlb_range(), but it lacks a limit and fallback to
+ * full mm invalidation. So use flush_tlb_mm() for everything.
*/
-#define tlb_start_vma(tlb, vma) \
- do { \
- if (!tlb->fullmm) \
- flush_cache_range(vma, vma->vm_start, vma->vm_end); \
- } while (0)
-
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#include <linux/pagemap.h>
#include <asm-generic/tlb.h>
diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c
index 16cea5776b87..2c609c2516b2 100644
--- a/arch/nios2/mm/init.c
+++ b/arch/nios2/mm/init.c
@@ -82,18 +82,6 @@ void __init mmu_init(void)
flush_tlb_all();
}
-#ifdef CONFIG_BLK_DEV_INITRD
-void __init free_initrd_mem(unsigned long start, unsigned long end)
-{
- free_reserved_area((void *)start, (void *)end, -1, "initrd");
-}
-#endif
-
-void __ref free_initmem(void)
-{
- free_initmem_default(-1);
-}
-
#define __page_aligned(order) __aligned(PAGE_SIZE << (order))
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index a5e361fbb75a..7cfb20555b10 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -36,6 +36,7 @@ config OPENRISC
select OMPIC if SMP
select ARCH_WANT_FRAME_POINTERS
select GENERIC_IRQ_MULTI_HANDLER
+ select MMU_GATHER_NO_RANGE if MMU
config CPU_BIG_ENDIAN
def_bool y
@@ -43,12 +44,6 @@ config CPU_BIG_ENDIAN
config MMU
def_bool y
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
-config RWSEM_XCHGADD_ALGORITHM
- def_bool n
-
config GENERIC_HWEIGHT
def_bool y
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index 22aa97136c01..164be10062bc 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -24,6 +24,7 @@ generic-y += kvm_para.h
generic-y += local.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += module.h
generic-y += pci.h
generic-y += percpu.h
@@ -33,7 +34,6 @@ generic-y += qspinlock.h
generic-y += qrwlock_types.h
generic-y += qrwlock.h
generic-y += sections.h
-generic-y += segment.h
generic-y += shmparam.h
generic-y += switch_to.h
generic-y += topology.h
diff --git a/arch/openrisc/include/asm/syscall.h b/arch/openrisc/include/asm/syscall.h
index b4ff07c1baed..61de227f53a1 100644
--- a/arch/openrisc/include/asm/syscall.h
+++ b/arch/openrisc/include/asm/syscall.h
@@ -68,7 +68,7 @@ syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
memcpy(&regs->gpr[3], args, 6 * sizeof(args[0]));
}
-static inline int syscall_get_arch(void)
+static inline int syscall_get_arch(struct task_struct *task)
{
return AUDIT_ARCH_OPENRISC;
}
diff --git a/arch/openrisc/include/asm/tlb.h b/arch/openrisc/include/asm/tlb.h
index fa4376a4515d..92d8a4209884 100644
--- a/arch/openrisc/include/asm/tlb.h
+++ b/arch/openrisc/include/asm/tlb.h
@@ -20,14 +20,10 @@
#define __ASM_OPENRISC_TLB_H__
/*
- * or32 doesn't need any special per-pte or
- * per-vma handling..
+ * OpenRISC doesn't have an efficient flush_tlb_range() so use flush_tlb_mm()
+ * for everything.
*/
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <linux/pagemap.h>
#include <asm-generic/tlb.h>
diff --git a/arch/openrisc/kernel/ptrace.c b/arch/openrisc/kernel/ptrace.c
index eb97a8e7c8aa..e8fb2a764f46 100644
--- a/arch/openrisc/kernel/ptrace.c
+++ b/arch/openrisc/kernel/ptrace.c
@@ -30,7 +30,6 @@
#include <linux/elf.h>
#include <asm/thread_info.h>
-#include <asm/segment.h>
#include <asm/page.h>
#include <asm/pgtable.h>
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index c605bdad1746..17c00d06d91b 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -39,7 +39,6 @@
#include <linux/device.h>
#include <asm/sections.h>
-#include <asm/segment.h>
#include <asm/pgtable.h>
#include <asm/types.h>
#include <asm/setup.h>
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
index d8981cbb852a..6ed7293ef007 100644
--- a/arch/openrisc/kernel/traps.c
+++ b/arch/openrisc/kernel/traps.c
@@ -35,7 +35,6 @@
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
-#include <asm/segment.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/unwinder.h>
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index caeb4184e8a6..e63cb4a91a3e 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -32,7 +32,6 @@
#include <linux/blkdev.h> /* for initrd_* */
#include <linux/pagemap.h>
-#include <asm/segment.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/dma.h>
@@ -223,15 +222,3 @@ void __init mem_init(void)
mem_init_done = 1;
return;
}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- free_reserved_area((void *)start, (void *)end, -1, "initrd");
-}
-#endif
-
-void free_initmem(void)
-{
- free_initmem_default(-1);
-}
diff --git a/arch/openrisc/mm/tlb.c b/arch/openrisc/mm/tlb.c
index 6c253a2e86bc..7f9f50161dfe 100644
--- a/arch/openrisc/mm/tlb.c
+++ b/arch/openrisc/mm/tlb.c
@@ -26,7 +26,6 @@
#include <linux/mm.h>
#include <linux/init.h>
-#include <asm/segment.h>
#include <asm/tlbflush.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index c8e621296092..09407ed1aacd 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -36,6 +36,7 @@ config PARISC
select GENERIC_STRNCPY_FROM_USER
select SYSCTL_ARCH_UNALIGN_ALLOW
select SYSCTL_EXCEPTION_TRACE
+ select ARCH_DISCARD_MEMBLOCK
select HAVE_MOD_ARCH_SPECIFIC
select VIRT_TO_BUS
select MODULES_USE_ELF_RELA
@@ -44,6 +45,8 @@ config PARISC
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HASH
+ select HAVE_ARCH_JUMP_LABEL
+ select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_REGS_AND_STACK_ACCESS_API
@@ -54,6 +57,9 @@ config PARISC
select CPU_NO_EFFICIENT_FFS
select NEED_DMA_MAP_STATE
select NEED_SG_DMA_LENGTH
+ select HAVE_ARCH_KGDB
+ select HAVE_KPROBES
+ select HAVE_KRETPROBES
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
@@ -75,12 +81,6 @@ config GENERIC_LOCKBREAK
default y
depends on SMP && PREEMPT
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
-config RWSEM_XCHGADD_ALGORITHM
- bool
-
config ARCH_HAS_ILOG2_U32
bool
default n
@@ -311,21 +311,16 @@ config ARCH_SELECT_MEMORY_MODEL
def_bool y
depends on 64BIT
-config ARCH_DISCONTIGMEM_ENABLE
+config ARCH_SPARSEMEM_ENABLE
def_bool y
depends on 64BIT
config ARCH_FLATMEM_ENABLE
def_bool y
-config ARCH_DISCONTIGMEM_DEFAULT
+config ARCH_SPARSEMEM_DEFAULT
def_bool y
- depends on ARCH_DISCONTIGMEM_ENABLE
-
-config NODES_SHIFT
- int
- default "3"
- depends on NEED_MULTIPLE_NODES
+ depends on ARCH_SPARSEMEM_ENABLE
source "kernel/Kconfig.hz"
diff --git a/arch/parisc/boot/compressed/head.S b/arch/parisc/boot/compressed/head.S
index 5aba20fa48aa..e8b798fd0cf0 100644
--- a/arch/parisc/boot/compressed/head.S
+++ b/arch/parisc/boot/compressed/head.S
@@ -22,7 +22,7 @@
__HEAD
ENTRY(startup)
- .level LEVEL
+ .level PA_ASM_LEVEL
#define PSW_W_SM 0x200
#define PSW_W_BIT 36
@@ -63,7 +63,7 @@ $bss_loop:
load32 BOOTADDR(decompress_kernel),%r3
#ifdef CONFIG_64BIT
- .level LEVEL
+ .level PA_ASM_LEVEL
ssm PSW_W_SM, %r0 /* set W-bit */
depdi 0, 31, 32, %r3
#endif
@@ -72,7 +72,7 @@ $bss_loop:
startup_continue:
#ifdef CONFIG_64BIT
- .level LEVEL
+ .level PA_ASM_LEVEL
rsm PSW_W_SM, %r0 /* clear W-bit */
#endif
diff --git a/arch/parisc/boot/compressed/misc.c b/arch/parisc/boot/compressed/misc.c
index 2556bb181813..2d395998f524 100644
--- a/arch/parisc/boot/compressed/misc.c
+++ b/arch/parisc/boot/compressed/misc.c
@@ -145,14 +145,13 @@ static int putchar(int c)
void __noreturn error(char *x)
{
- puts("\n\n");
- puts(x);
- puts("\n\n -- System halted");
+ if (x) puts(x);
+ puts("\n -- System halted\n");
while (1) /* wait forever */
;
}
-static int print_hex(unsigned long num)
+static int print_num(unsigned long num, int base)
{
const char hex[] = "0123456789abcdef";
char str[40];
@@ -160,12 +159,14 @@ static int print_hex(unsigned long num)
str[i--] = '\0';
do {
- str[i--] = hex[num & 0x0f];
- num >>= 4;
+ str[i--] = hex[num % base];
+ num = num / base;
} while (num);
- str[i--] = 'x';
- str[i] = '0';
+ if (base == 16) {
+ str[i--] = 'x';
+ str[i] = '0';
+ } else i++;
puts(&str[i]);
return 0;
@@ -187,8 +188,9 @@ put:
if (fmt[++i] == '%')
goto put;
+ print_num(va_arg(args, unsigned long),
+ fmt[i] == 'x' ? 16:10);
++i;
- print_hex(va_arg(args, unsigned long));
}
va_end(args);
@@ -327,8 +329,15 @@ unsigned long decompress_kernel(unsigned int started_wide,
free_mem_end_ptr = rd_start;
#endif
- if (free_mem_ptr >= free_mem_end_ptr)
- error("Kernel too big for machine.");
+ if (free_mem_ptr >= free_mem_end_ptr) {
+ int free_ram;
+ free_ram = (free_mem_ptr >> 20) + 1;
+ if (free_ram < 32)
+ free_ram = 32;
+ printf("\nKernel requires at least %d MB RAM.\n",
+ free_ram);
+ error(NULL);
+ }
#ifdef DEBUG
printf("\n");
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
index 37ae4b57c001..a8f9bbef0975 100644
--- a/arch/parisc/configs/generic-32bit_defconfig
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -14,7 +14,6 @@ CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PA7100LC=y
CONFIG_SMP=y
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index 9bcd0c903dbb..005ee8ad0446 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -10,16 +10,15 @@ generic-y += hw_irq.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += kdebug.h
-generic-y += kprobes.h
generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += seccomp.h
-generic-y += segment.h
generic-y += trace_clock.h
generic-y += user.h
generic-y += vga.h
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
index c17ec0ee6e7c..d85738a7bbe6 100644
--- a/arch/parisc/include/asm/assembly.h
+++ b/arch/parisc/include/asm/assembly.h
@@ -61,14 +61,14 @@
#define LDCW ldcw,co
#define BL b,l
# ifdef CONFIG_64BIT
-# define LEVEL 2.0w
+# define PA_ASM_LEVEL 2.0w
# else
-# define LEVEL 2.0
+# define PA_ASM_LEVEL 2.0
# endif
#else
#define LDCW ldcw
#define BL bl
-#define LEVEL 1.1
+#define PA_ASM_LEVEL 1.1
#endif
#ifdef __ASSEMBLY__
diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
index 006fb939cac8..73ca89a47f49 100644
--- a/arch/parisc/include/asm/cache.h
+++ b/arch/parisc/include/asm/cache.h
@@ -24,9 +24,6 @@
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
-/* Read-only memory is marked before mark_rodata_ro() is called. */
-#define __ro_after_init __read_mostly
-
void parisc_cache_init(void); /* initializes cache-flushing */
void disable_sr_hashing_asm(int); /* low level support for above */
void disable_sr_hashing(void); /* turns off space register hashing */
@@ -44,22 +41,22 @@ void parisc_setup_cache_timing(void);
#define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" \
ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
- : : "r" (addr))
+ : : "r" (addr) : "memory")
#define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" \
ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
ALTERNATIVE(ALT_COND_NO_SPLIT_TLB, INSN_NOP) \
- : : "r" (addr))
+ : : "r" (addr) : "memory")
#define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" \
ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
- : : "r" (addr))
+ : : "r" (addr) : "memory")
#define asm_io_fdc(addr) asm volatile("fdc %%r0(%0)" \
ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \
ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) \
- : : "r" (addr))
+ : : "r" (addr) : "memory")
#define asm_io_sync() asm volatile("sync" \
ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \
- ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :: )
+ ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :::"memory")
#endif /* ! __ASSEMBLY__ */
diff --git a/arch/parisc/include/asm/fixmap.h b/arch/parisc/include/asm/fixmap.h
index f7c3a0905de4..288da73d4cc0 100644
--- a/arch/parisc/include/asm/fixmap.h
+++ b/arch/parisc/include/asm/fixmap.h
@@ -15,17 +15,34 @@
* from areas congruently mapped with user space. It is 8MB large
* and must be 16MB aligned */
#define TMPALIAS_MAP_START ((__PAGE_OFFSET) - 16*1024*1024)
+
+#define FIXMAP_SIZE (FIX_BITMAP_COUNT << PAGE_SHIFT)
+#define FIXMAP_START (TMPALIAS_MAP_START - FIXMAP_SIZE)
/* This is the kernel area for all maps (vmalloc, dma etc.) most
* usually, it extends up to TMPALIAS_MAP_START. Virtual addresses
* 0..GATEWAY_PAGE_SIZE are reserved for the gateway page */
#define KERNEL_MAP_START (GATEWAY_PAGE_SIZE)
-#define KERNEL_MAP_END (TMPALIAS_MAP_START)
+#define KERNEL_MAP_END (FIXMAP_START)
#ifndef __ASSEMBLY__
+
+
+enum fixed_addresses {
+ /* Support writing RO kernel text via kprobes, jump labels, etc. */
+ FIX_TEXT_POKE0,
+ FIX_BITMAP_COUNT
+};
+
extern void *parisc_vmalloc_start;
#define PCXL_DMA_MAP_SIZE (8*1024*1024)
#define VMALLOC_START ((unsigned long)parisc_vmalloc_start)
#define VMALLOC_END (KERNEL_MAP_END)
+
+#define __fix_to_virt(_x) (FIXMAP_START + ((_x) << PAGE_SHIFT))
+
+void set_fixmap(enum fixed_addresses idx, phys_addr_t phys);
+void clear_fixmap(enum fixed_addresses idx);
+
#endif /*__ASSEMBLY__*/
#endif /*_ASM_FIXMAP_H*/
diff --git a/arch/parisc/include/asm/hardware.h b/arch/parisc/include/asm/hardware.h
index d6e1ed145031..9d3d7737c58b 100644
--- a/arch/parisc/include/asm/hardware.h
+++ b/arch/parisc/include/asm/hardware.h
@@ -120,7 +120,7 @@ extern void get_pci_node_path(struct pci_dev *dev, struct hardware_path *path);
extern void init_parisc_bus(void);
extern struct device *hwpath_to_device(struct hardware_path *modpath);
extern void device_to_hwpath(struct device *dev, struct hardware_path *path);
-
+extern int machine_has_merced_bus(void);
/* inventory.c: */
extern void do_memory_inventory(void);
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h
index 30a8315d5c07..93d37010b375 100644
--- a/arch/parisc/include/asm/io.h
+++ b/arch/parisc/include/asm/io.h
@@ -229,8 +229,6 @@ static inline void writeq(unsigned long long q, volatile void __iomem *addr)
#define writel_relaxed(l, addr) writel(l, addr)
#define writeq_relaxed(q, addr) writeq(q, addr)
-#define mmiowb() do { } while (0)
-
void memset_io(volatile void __iomem *addr, unsigned char val, int count);
void memcpy_fromio(void *dst, const volatile void __iomem *src, int count);
void memcpy_toio(volatile void __iomem *dst, const void *src, int count);
diff --git a/arch/parisc/include/asm/jump_label.h b/arch/parisc/include/asm/jump_label.h
new file mode 100644
index 000000000000..7efb1aa2f7f8
--- /dev/null
+++ b/arch/parisc/include/asm/jump_label.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PARISC_JUMP_LABEL_H
+#define _ASM_PARISC_JUMP_LABEL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm/assembly.h>
+
+#define JUMP_LABEL_NOP_SIZE 4
+
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
+{
+ asm_volatile_goto("1:\n\t"
+ "nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".word 1b - ., %l[l_yes] - .\n\t"
+ __stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
+ ".popsection\n\t"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+ asm_volatile_goto("1:\n\t"
+ "b,n %l[l_yes]\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".word 1b - ., %l[l_yes] - .\n\t"
+ __stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
+ ".popsection\n\t"
+ : : "i" (&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+#endif /* __ASSEMBLY__ */
+#endif
diff --git a/arch/parisc/include/asm/kgdb.h b/arch/parisc/include/asm/kgdb.h
new file mode 100644
index 000000000000..f23e7f8f13a5
--- /dev/null
+++ b/arch/parisc/include/asm/kgdb.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PA-RISC KGDB support
+ *
+ * Copyright (c) 2019 Sven Schnelle <svens@stackframe.org>
+ *
+ */
+
+#ifndef __PARISC_KGDB_H__
+#define __PARISC_KGDB_H__
+
+#define BREAK_INSTR_SIZE 4
+#define PARISC_KGDB_COMPILED_BREAK_INSN 0x3ffc01f
+#define PARISC_KGDB_BREAK_INSN 0x3ffa01f
+
+
+#define NUMREGBYTES sizeof(struct parisc_gdb_regs)
+#define BUFMAX 4096
+
+#define CACHE_FLUSH_IS_SAFE 1
+
+#ifndef __ASSEMBLY__
+
+static inline void arch_kgdb_breakpoint(void)
+{
+ asm(".word %0" : : "i"(PARISC_KGDB_COMPILED_BREAK_INSN) : "memory");
+}
+
+struct parisc_gdb_regs {
+ unsigned long gpr[32];
+ unsigned long sar;
+ unsigned long iaoq_f;
+ unsigned long iasq_f;
+ unsigned long iaoq_b;
+ unsigned long iasq_b;
+ unsigned long eiem;
+ unsigned long iir;
+ unsigned long isr;
+ unsigned long ior;
+ unsigned long ipsw;
+ unsigned long __unused0;
+ unsigned long sr4;
+ unsigned long sr0;
+ unsigned long sr1;
+ unsigned long sr2;
+ unsigned long sr3;
+ unsigned long sr5;
+ unsigned long sr6;
+ unsigned long sr7;
+ unsigned long cr0;
+ unsigned long pid1;
+ unsigned long pid2;
+ unsigned long scrccr;
+ unsigned long pid3;
+ unsigned long pid4;
+ unsigned long cr24;
+ unsigned long cr25;
+ unsigned long cr26;
+ unsigned long cr27;
+ unsigned long cr28;
+ unsigned long cr29;
+ unsigned long cr30;
+
+ u64 fr[32];
+};
+
+#endif
+#endif
diff --git a/arch/parisc/include/asm/kprobes.h b/arch/parisc/include/asm/kprobes.h
new file mode 100644
index 000000000000..e09cf2deeafe
--- /dev/null
+++ b/arch/parisc/include/asm/kprobes.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * arch/parisc/include/asm/kprobes.h
+ *
+ * PA-RISC kprobes implementation
+ *
+ * Copyright (c) 2019 Sven Schnelle <svens@stackframe.org>
+ */
+
+#ifndef _PARISC_KPROBES_H
+#define _PARISC_KPROBES_H
+
+#ifdef CONFIG_KPROBES
+
+#include <asm-generic/kprobes.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/notifier.h>
+
+#define PARISC_KPROBES_BREAK_INSN 0x3ff801f
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+#define MAX_INSN_SIZE 1
+
+typedef u32 kprobe_opcode_t;
+struct kprobe;
+
+void arch_remove_kprobe(struct kprobe *p);
+
+#define flush_insn_slot(p) \
+ flush_icache_range((unsigned long)&(p)->ainsn.insn[0], \
+ (unsigned long)&(p)->ainsn.insn[0] + \
+ sizeof(kprobe_opcode_t))
+
+#define kretprobe_blacklist_size 0
+
+struct arch_specific_insn {
+ kprobe_opcode_t *insn;
+};
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned long status;
+};
+
+struct kprobe_ctlblk {
+ unsigned int kprobe_status;
+ struct prev_kprobe prev_kprobe;
+ unsigned long iaoq[2];
+};
+
+int __kprobes parisc_kprobe_break_handler(struct pt_regs *regs);
+int __kprobes parisc_kprobe_ss_handler(struct pt_regs *regs);
+
+#endif /* CONFIG_KPROBES */
+#endif /* _PARISC_KPROBES_H */
diff --git a/arch/parisc/include/asm/mmzone.h b/arch/parisc/include/asm/mmzone.h
index fafa3893fd70..8d390406d862 100644
--- a/arch/parisc/include/asm/mmzone.h
+++ b/arch/parisc/include/asm/mmzone.h
@@ -2,62 +2,6 @@
#ifndef _PARISC_MMZONE_H
#define _PARISC_MMZONE_H
-#define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */
+#define MAX_PHYSMEM_RANGES 4 /* Fix the size for now (current known max is 3) */
-#ifdef CONFIG_DISCONTIGMEM
-
-extern int npmem_ranges;
-
-struct node_map_data {
- pg_data_t pg_data;
-};
-
-extern struct node_map_data node_data[];
-
-#define NODE_DATA(nid) (&node_data[nid].pg_data)
-
-/* We have these possible memory map layouts:
- * Astro: 0-3.75, 67.75-68, 4-64
- * zx1: 0-1, 257-260, 4-256
- * Stretch (N-class): 0-2, 4-32, 34-xxx
- */
-
-/* Since each 1GB can only belong to one region (node), we can create
- * an index table for pfn to nid lookup; each entry in pfnnid_map
- * represents 1GB, and contains the node that the memory belongs to. */
-
-#define PFNNID_SHIFT (30 - PAGE_SHIFT)
-#define PFNNID_MAP_MAX 512 /* support 512GB */
-extern signed char pfnnid_map[PFNNID_MAP_MAX];
-
-#ifndef CONFIG_64BIT
-#define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
-#else
-/* io can be 0xf0f0f0f0f0xxxxxx or 0xfffffffff0000000 */
-#define pfn_is_io(pfn) ((pfn & (0xf000000000000000UL >> PAGE_SHIFT)) == (0xf000000000000000UL >> PAGE_SHIFT))
-#endif
-
-static inline int pfn_to_nid(unsigned long pfn)
-{
- unsigned int i;
-
- if (unlikely(pfn_is_io(pfn)))
- return 0;
-
- i = pfn >> PFNNID_SHIFT;
- BUG_ON(i >= ARRAY_SIZE(pfnnid_map));
-
- return pfnnid_map[i];
-}
-
-static inline int pfn_valid(int pfn)
-{
- int nid = pfn_to_nid(pfn);
-
- if (nid >= 0)
- return (pfn < node_end_pfn(nid));
- return 0;
-}
-
-#endif
#endif /* _PARISC_MMZONE_H */
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index b77f49ce6220..93caf17ac5e2 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -147,9 +147,9 @@ extern int npmem_ranges;
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
-#ifndef CONFIG_DISCONTIGMEM
+#ifndef CONFIG_SPARSEMEM
#define pfn_valid(pfn) ((pfn) < max_mapnr)
-#endif /* CONFIG_DISCONTIGMEM */
+#endif
#ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_SHIFT PMD_SHIFT /* fixed for transparent huge pages */
diff --git a/arch/parisc/include/asm/patch.h b/arch/parisc/include/asm/patch.h
new file mode 100644
index 000000000000..685b58a13968
--- /dev/null
+++ b/arch/parisc/include/asm/patch.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _PARISC_KERNEL_PATCH_H
+#define _PARISC_KERNEL_PATCH_H
+
+/* stop machine and patch kernel text */
+void patch_text(void *addr, unsigned int insn);
+
+/* patch kernel text with machine already stopped (e.g. in kgdb) */
+void __patch_text(void *addr, unsigned int insn);
+
+#endif
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index d05c678c77c4..ea75cc966dae 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -41,6 +41,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
__pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
#endif
}
+ spin_lock_init(pgd_spinlock(actual_pgd));
return actual_pgd;
}
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index c7bb74e22436..a39b079e73f2 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -17,7 +17,7 @@
#include <asm/processor.h>
#include <asm/cache.h>
-extern spinlock_t pa_tlb_lock;
+static inline spinlock_t *pgd_spinlock(pgd_t *);
/*
* kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
@@ -34,16 +34,46 @@ extern spinlock_t pa_tlb_lock;
*/
#define kern_addr_valid(addr) (1)
-/* Purge data and instruction TLB entries. Must be called holding
- * the pa_tlb_lock. The TLB purge instructions are slow on SMP
- * machines since the purge must be broadcast to all CPUs.
+/* This is for the serialization of PxTLB broadcasts. At least on the N class
+ * systems, only one PxTLB inter processor broadcast can be active at any one
+ * time on the Merced bus.
+
+ * PTE updates are protected by locks in the PMD.
+ */
+extern spinlock_t pa_tlb_flush_lock;
+extern spinlock_t pa_swapper_pg_lock;
+#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
+extern int pa_serialize_tlb_flushes;
+#else
+#define pa_serialize_tlb_flushes (0)
+#endif
+
+#define purge_tlb_start(flags) do { \
+ if (pa_serialize_tlb_flushes) \
+ spin_lock_irqsave(&pa_tlb_flush_lock, flags); \
+ else \
+ local_irq_save(flags); \
+ } while (0)
+#define purge_tlb_end(flags) do { \
+ if (pa_serialize_tlb_flushes) \
+ spin_unlock_irqrestore(&pa_tlb_flush_lock, flags); \
+ else \
+ local_irq_restore(flags); \
+ } while (0)
+
+/* Purge data and instruction TLB entries. The TLB purge instructions
+ * are slow on SMP machines since the purge must be broadcast to all CPUs.
*/
static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
{
+ unsigned long flags;
+
+ purge_tlb_start(flags);
mtsp(mm->context, 1);
pdtlb(addr);
pitlb(addr);
+ purge_tlb_end(flags);
}
/* Certain architectures need to do special things when PTEs
@@ -59,11 +89,11 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
do { \
pte_t old_pte; \
unsigned long flags; \
- spin_lock_irqsave(&pa_tlb_lock, flags); \
+ spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);\
old_pte = *ptep; \
set_pte(ptep, pteval); \
purge_tlb_entries(mm, addr); \
- spin_unlock_irqrestore(&pa_tlb_lock, flags); \
+ spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);\
} while (0)
#endif /* !__ASSEMBLY__ */
@@ -88,10 +118,10 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#if CONFIG_PGTABLE_LEVELS == 3
#define PGD_ORDER 1 /* Number of pages per pgd */
#define PMD_ORDER 1 /* Number of pages per pmd */
-#define PGD_ALLOC_ORDER 2 /* first pgd contains pmd */
+#define PGD_ALLOC_ORDER (2 + 1) /* first pgd contains pmd */
#else
#define PGD_ORDER 1 /* Number of pages per pgd */
-#define PGD_ALLOC_ORDER PGD_ORDER
+#define PGD_ALLOC_ORDER (PGD_ORDER + 1)
#endif
/* Definitions for 3rd level (we use PLD here for Page Lower directory
@@ -459,6 +489,15 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+static inline spinlock_t *pgd_spinlock(pgd_t *pgd)
+{
+ if (unlikely(pgd == swapper_pg_dir))
+ return &pa_swapper_pg_lock;
+ return (spinlock_t *)((char *)pgd + (PAGE_SIZE << (PGD_ALLOC_ORDER - 1)));
+}
+
+
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
pte_t pte;
@@ -467,15 +506,15 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
if (!pte_young(*ptep))
return 0;
- spin_lock_irqsave(&pa_tlb_lock, flags);
+ spin_lock_irqsave(pgd_spinlock(vma->vm_mm->pgd), flags);
pte = *ptep;
if (!pte_young(pte)) {
- spin_unlock_irqrestore(&pa_tlb_lock, flags);
+ spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
return 0;
}
set_pte(ptep, pte_mkold(pte));
purge_tlb_entries(vma->vm_mm, addr);
- spin_unlock_irqrestore(&pa_tlb_lock, flags);
+ spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
return 1;
}
@@ -485,11 +524,11 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t old_pte;
unsigned long flags;
- spin_lock_irqsave(&pa_tlb_lock, flags);
+ spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
old_pte = *ptep;
set_pte(ptep, __pte(0));
purge_tlb_entries(mm, addr);
- spin_unlock_irqrestore(&pa_tlb_lock, flags);
+ spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
return old_pte;
}
@@ -497,10 +536,10 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
unsigned long flags;
- spin_lock_irqsave(&pa_tlb_lock, flags);
+ spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
set_pte(ptep, pte_wrprotect(*ptep));
purge_tlb_entries(mm, addr);
- spin_unlock_irqrestore(&pa_tlb_lock, flags);
+ spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
}
#define pte_same(A,B) (pte_val(A) == pte_val(B))
diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h
index 9ff033d261ab..143fb2a89dd8 100644
--- a/arch/parisc/include/asm/ptrace.h
+++ b/arch/parisc/include/asm/ptrace.h
@@ -37,4 +37,17 @@ extern int regs_query_register_offset(const char *name);
extern const char *regs_query_register_name(unsigned int offset);
#define MAX_REG_OFFSET (offsetof(struct pt_regs, ipsw))
+#define kernel_stack_pointer(regs) ((regs)->gr[30])
+
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+ unsigned int offset)
+{
+ if (unlikely(offset > MAX_REG_OFFSET))
+ return 0;
+ return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n);
+int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr);
+
#endif
diff --git a/arch/parisc/include/asm/sparsemem.h b/arch/parisc/include/asm/sparsemem.h
new file mode 100644
index 000000000000..b5c3a79045b4
--- /dev/null
+++ b/arch/parisc/include/asm/sparsemem.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASM_PARISC_SPARSEMEM_H
+#define ASM_PARISC_SPARSEMEM_H
+
+/* We have these possible memory map layouts:
+ * Astro: 0-3.75, 67.75-68, 4-64
+ * zx1: 0-1, 257-260, 4-256
+ * Stretch (N-class): 0-2, 4-32, 34-xxx
+ */
+
+#define MAX_PHYSMEM_BITS 39 /* 512 GB */
+#define SECTION_SIZE_BITS 27 /* 128 MB */
+
+#endif
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 8a63515f03bf..197d2247e4db 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -37,7 +37,11 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
volatile unsigned int *a;
a = __ldcw_align(x);
+#ifdef CONFIG_SMP
+ (void) __ldcw(a);
+#else
mb();
+#endif
*a = 1;
}
diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h
index 62a6d477fae0..80757e43cf2c 100644
--- a/arch/parisc/include/asm/syscall.h
+++ b/arch/parisc/include/asm/syscall.h
@@ -48,11 +48,11 @@ static inline void syscall_rollback(struct task_struct *task,
/* do nothing */
}
-static inline int syscall_get_arch(void)
+static inline int syscall_get_arch(struct task_struct *task)
{
int arch = AUDIT_ARCH_PARISC;
#ifdef CONFIG_64BIT
- if (!is_compat_task())
+ if (!__is_compat_task(task))
arch = AUDIT_ARCH_PARISC64;
#endif
return arch;
diff --git a/arch/parisc/include/asm/tlb.h b/arch/parisc/include/asm/tlb.h
index 0c881e74d8a6..8c0446b04c9e 100644
--- a/arch/parisc/include/asm/tlb.h
+++ b/arch/parisc/include/asm/tlb.h
@@ -2,24 +2,6 @@
#ifndef _PARISC_TLB_H
#define _PARISC_TLB_H
-#define tlb_flush(tlb) \
-do { if ((tlb)->fullmm) \
- flush_tlb_mm((tlb)->mm);\
-} while (0)
-
-#define tlb_start_vma(tlb, vma) \
-do { if (!(tlb)->fullmm) \
- flush_cache_range(vma, vma->vm_start, vma->vm_end); \
-} while (0)
-
-#define tlb_end_vma(tlb, vma) \
-do { if (!(tlb)->fullmm) \
- flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
-} while (0)
-
-#define __tlb_remove_tlb_entry(tlb, pte, address) \
- do { } while (0)
-
#include <asm-generic/tlb.h>
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h
index 6804374efa66..c5ded01d45be 100644
--- a/arch/parisc/include/asm/tlbflush.h
+++ b/arch/parisc/include/asm/tlbflush.h
@@ -8,21 +8,6 @@
#include <linux/sched.h>
#include <asm/mmu_context.h>
-
-/* This is for the serialisation of PxTLB broadcasts. At least on the
- * N class systems, only one PxTLB inter processor broadcast can be
- * active at any one time on the Merced bus. This tlb purge
- * synchronisation is fairly lightweight and harmless so we activate
- * it on all systems not just the N class.
-
- * It is also used to ensure PTE updates are atomic and consistent
- * with the TLB.
- */
-extern spinlock_t pa_tlb_lock;
-
-#define purge_tlb_start(flags) spin_lock_irqsave(&pa_tlb_lock, flags)
-#define purge_tlb_end(flags) spin_unlock_irqrestore(&pa_tlb_lock, flags)
-
extern void flush_tlb_all(void);
extern void flush_tlb_all_local(void *);
@@ -79,13 +64,6 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
- unsigned long flags, sid;
-
- sid = vma->vm_mm->context;
- purge_tlb_start(flags);
- mtsp(sid, 1);
- pdtlb(addr);
- pitlb(addr);
- purge_tlb_end(flags);
+ purge_tlb_entries(vma->vm_mm, addr);
}
#endif
diff --git a/arch/parisc/include/uapi/asm/sockios.h b/arch/parisc/include/uapi/asm/sockios.h
deleted file mode 100644
index 66a3ba64d53f..000000000000
--- a/arch/parisc/include/uapi/asm/sockios.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef __ARCH_PARISC_SOCKIOS__
-#define __ARCH_PARISC_SOCKIOS__
-
-/* Socket-level I/O control calls. */
-#define FIOSETOWN 0x8901
-#define SIOCSPGRP 0x8902
-#define FIOGETOWN 0x8903
-#define SIOCGPGRP 0x8904
-#define SIOCATMARK 0x8905
-#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
-#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
-
-#endif
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile
index 8e5f1ab65c68..fc0df5c44468 100644
--- a/arch/parisc/kernel/Makefile
+++ b/arch/parisc/kernel/Makefile
@@ -9,7 +9,8 @@ obj-y := cache.o pacache.o setup.o pdt.o traps.o time.o irq.o \
pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \
ptrace.o hardware.o inventory.o drivers.o alternative.o \
signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
- process.o processor.o pdc_cons.o pdc_chassis.o unwind.o
+ process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \
+ patch.o
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
@@ -32,3 +33,6 @@ obj-$(CONFIG_64BIT) += perf.o perf_asm.o $(obj64-y)
obj-$(CONFIG_PARISC_CPU_TOPOLOGY) += topology.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_KPROBES) += kprobes.o
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 804880efa11e..a82b3eaa5398 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -29,9 +29,9 @@
#include <asm/sections.h>
#include <asm/shmparam.h>
-int split_tlb __read_mostly;
-int dcache_stride __read_mostly;
-int icache_stride __read_mostly;
+int split_tlb __ro_after_init;
+int dcache_stride __ro_after_init;
+int icache_stride __ro_after_init;
EXPORT_SYMBOL(dcache_stride);
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
@@ -40,16 +40,23 @@ void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
-/* On some machines (e.g. ones with the Merced bus), there can be
+/* On some machines (i.e., ones with the Merced bus), there can be
* only a single PxTLB broadcast at a time; this must be guaranteed
- * by software. We put a spinlock around all TLB flushes to
- * ensure this.
+ * by software. We need a spinlock around all TLB flushes to ensure
+ * this.
*/
-DEFINE_SPINLOCK(pa_tlb_lock);
+DEFINE_SPINLOCK(pa_tlb_flush_lock);
-struct pdc_cache_info cache_info __read_mostly;
+/* Swapper page setup lock. */
+DEFINE_SPINLOCK(pa_swapper_pg_lock);
+
+#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
+int pa_serialize_tlb_flushes __ro_after_init;
+#endif
+
+struct pdc_cache_info cache_info __ro_after_init;
#ifndef CONFIG_PA20
-static struct pdc_btlb_info btlb_info __read_mostly;
+static struct pdc_btlb_info btlb_info __ro_after_init;
#endif
#ifdef CONFIG_SMP
@@ -374,10 +381,10 @@ EXPORT_SYMBOL(flush_data_cache_local);
EXPORT_SYMBOL(flush_kernel_icache_range_asm);
#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
-static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
+static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
#define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
-static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
+static unsigned long parisc_tlb_flush_threshold __ro_after_init = FLUSH_TLB_THRESHOLD;
void __init parisc_setup_cache_timing(void)
{
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index 5eb979d04b90..00a181f1ecc6 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -38,9 +38,10 @@
#include <asm/io.h>
#include <asm/pdc.h>
#include <asm/parisc-device.h>
+#include <asm/ropes.h>
/* See comments in include/asm-parisc/pci.h */
-const struct dma_map_ops *hppa_dma_ops __read_mostly;
+const struct dma_map_ops *hppa_dma_ops __ro_after_init;
EXPORT_SYMBOL(hppa_dma_ops);
static struct device root = {
@@ -257,6 +258,30 @@ static struct parisc_device *find_device_by_addr(unsigned long hpa)
return ret ? d.dev : NULL;
}
+static int __init is_IKE_device(struct device *dev, void *data)
+{
+ struct parisc_device *pdev = to_parisc_device(dev);
+
+ if (!check_dev(dev))
+ return 0;
+ if (pdev->id.hw_type != HPHW_BCPORT)
+ return 0;
+ if (IS_IKE(pdev) ||
+ (pdev->id.hversion == REO_MERCED_PORT) ||
+ (pdev->id.hversion == REOG_MERCED_PORT)) {
+ return 1;
+ }
+ return 0;
+}
+
+int __init machine_has_merced_bus(void)
+{
+ int ret;
+
+ ret = for_each_padev(is_IKE_device, NULL);
+ return ret ? 1 : 0;
+}
+
/**
* find_pa_parent_type - Find a parent of a specific type
* @dev: The device to start searching from
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index d5eb19efa65b..a1fc04570ade 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -50,12 +50,8 @@
.import pa_tlb_lock,data
.macro load_pa_tlb_lock reg
-#if __PA_LDCW_ALIGNMENT > 4
- load32 PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
- depi 0,31,__PA_LDCW_ALIGN_ORDER, \reg
-#else
- load32 PA(pa_tlb_lock), \reg
-#endif
+ mfctl %cr25,\reg
+ addil L%(PAGE_SIZE << (PGD_ALLOC_ORDER - 1)),\reg
.endm
/* space_to_prot macro creates a prot id from a space id */
@@ -471,8 +467,9 @@
nop
LDREG 0(\ptp),\pte
bb,<,n \pte,_PAGE_PRESENT_BIT,3f
+ LDCW 0(\tmp),\tmp1
b \fault
- stw,ma \spc,0(\tmp)
+ stw \spc,0(\tmp)
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
2: LDREG 0(\ptp),\pte
@@ -481,20 +478,22 @@
.endm
/* Release pa_tlb_lock lock without reloading lock address. */
- .macro tlb_unlock0 spc,tmp
+ .macro tlb_unlock0 spc,tmp,tmp1
#ifdef CONFIG_SMP
98: or,COND(=) %r0,\spc,%r0
- stw,ma \spc,0(\tmp)
+ LDCW 0(\tmp),\tmp1
+ or,COND(=) %r0,\spc,%r0
+ stw \spc,0(\tmp)
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
#endif
.endm
/* Release pa_tlb_lock lock. */
- .macro tlb_unlock1 spc,tmp
+ .macro tlb_unlock1 spc,tmp,tmp1
#ifdef CONFIG_SMP
98: load_pa_tlb_lock \tmp
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
- tlb_unlock0 \spc,\tmp
+ tlb_unlock0 \spc,\tmp,\tmp1
#endif
.endm
@@ -1177,7 +1176,7 @@ dtlb_miss_20w:
idtlbt pte,prot
- tlb_unlock1 spc,t0
+ tlb_unlock1 spc,t0,t1
rfir
nop
@@ -1203,7 +1202,7 @@ nadtlb_miss_20w:
idtlbt pte,prot
- tlb_unlock1 spc,t0
+ tlb_unlock1 spc,t0,t1
rfir
nop
@@ -1237,7 +1236,7 @@ dtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- tlb_unlock1 spc,t0
+ tlb_unlock1 spc,t0,t1
rfir
nop
@@ -1270,7 +1269,7 @@ nadtlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- tlb_unlock1 spc,t0
+ tlb_unlock1 spc,t0,t1
rfir
nop
@@ -1299,7 +1298,7 @@ dtlb_miss_20:
idtlbt pte,prot
- tlb_unlock1 spc,t0
+ tlb_unlock1 spc,t0,t1
rfir
nop
@@ -1327,7 +1326,7 @@ nadtlb_miss_20:
idtlbt pte,prot
- tlb_unlock1 spc,t0
+ tlb_unlock1 spc,t0,t1
rfir
nop
@@ -1434,7 +1433,7 @@ itlb_miss_20w:
iitlbt pte,prot
- tlb_unlock1 spc,t0
+ tlb_unlock1 spc,t0,t1
rfir
nop
@@ -1458,7 +1457,7 @@ naitlb_miss_20w:
iitlbt pte,prot
- tlb_unlock1 spc,t0
+ tlb_unlock1 spc,t0,t1
rfir
nop
@@ -1492,7 +1491,7 @@ itlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- tlb_unlock1 spc,t0
+ tlb_unlock1 spc,t0,t1
rfir
nop
@@ -1516,7 +1515,7 @@ naitlb_miss_11:
mtsp t1, %sr1 /* Restore sr1 */
- tlb_unlock1 spc,t0
+ tlb_unlock1 spc,t0,t1
rfir
nop
@@ -1546,7 +1545,7 @@ itlb_miss_20:
iitlbt pte,prot
- tlb_unlock1 spc,t0
+ tlb_unlock1 spc,t0,t1
rfir
nop
@@ -1566,7 +1565,7 @@ naitlb_miss_20:
iitlbt pte,prot
- tlb_unlock1 spc,t0
+ tlb_unlock1 spc,t0,t1
rfir
nop
@@ -1596,7 +1595,7 @@ dbit_trap_20w:
idtlbt pte,prot
- tlb_unlock0 spc,t0
+ tlb_unlock0 spc,t0,t1
rfir
nop
#else
@@ -1622,7 +1621,7 @@ dbit_trap_11:
mtsp t1, %sr1 /* Restore sr1 */
- tlb_unlock0 spc,t0
+ tlb_unlock0 spc,t0,t1
rfir
nop
@@ -1642,7 +1641,7 @@ dbit_trap_20:
idtlbt pte,prot
- tlb_unlock0 spc,t0
+ tlb_unlock0 spc,t0,t1
rfir
nop
#endif
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
index 7a17551ea31e..f01e102bbfa2 100644
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -87,7 +87,7 @@ extern unsigned long pdc_result2[NUM_PDC_RESULT];
/* Firmware needs to be initially set to narrow to determine the
* actual firmware width. */
-int parisc_narrow_firmware __read_mostly = 1;
+int parisc_narrow_firmware __ro_after_init = 1;
#endif
/* On most currently-supported platforms, IODC I/O calls are 32-bit calls
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
index e46a4157a894..a28f915993b1 100644
--- a/arch/parisc/kernel/ftrace.c
+++ b/arch/parisc/kernel/ftrace.c
@@ -51,7 +51,6 @@ void notrace __hot ftrace_function_trampoline(unsigned long parent,
unsigned long org_sp_gr3)
{
extern ftrace_func_t ftrace_trace_function; /* depends on CONFIG_DYNAMIC_FTRACE */
- extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
if (ftrace_trace_function != ftrace_stub) {
/* struct ftrace_ops *op, struct pt_regs *regs); */
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index fbb4e43fda05..951a339369dd 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -22,7 +22,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
- .level LEVEL
+ .level PA_ASM_LEVEL
__INITDATA
ENTRY(boot_args)
@@ -258,7 +258,7 @@ stext_pdc_ret:
ldo R%PA(fault_vector_11)(%r10),%r10
$is_pa20:
- .level LEVEL /* restore 1.1 || 2.0w */
+ .level PA_ASM_LEVEL /* restore 1.1 || 2.0w */
#endif /*!CONFIG_64BIT*/
load32 PA(fault_vector_20),%r10
@@ -329,6 +329,19 @@ smp_slave_stext:
mtsp %r0,%sr6
mtsp %r0,%sr7
+#ifdef CONFIG_64BIT
+ /*
+ * Enable Wide mode early, in case the task_struct for the idle
+ * task in smp_init_current_idle_task was allocated above 4GB.
+ */
+1: mfia %rp /* clear upper part of pcoq */
+ ldo 2f-1b(%rp),%rp
+ depdi 0,31,32,%rp
+ bv (%rp)
+ ssm PSW_SM_W,%r0
+2:
+#endif
+
/* Initialize the SP - monarch sets up smp_init_current_idle_task */
load32 PA(smp_init_current_idle_task),%sp
LDREG 0(%sp),%sp /* load task address */
@@ -363,7 +376,7 @@ smp_slave_stext:
ENDPROC(parisc_kernel_start)
#ifndef CONFIG_64BIT
- .section .data..read_mostly
+ .section .data..ro_after_init
.align 4
.export $global$,data
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
index 35d05fdd7483..3f4a91c0b805 100644
--- a/arch/parisc/kernel/inventory.c
+++ b/arch/parisc/kernel/inventory.c
@@ -31,6 +31,7 @@
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/parisc-device.h>
+#include <asm/tlbflush.h>
/*
** Debug options
@@ -38,12 +39,12 @@
*/
#undef DEBUG_PAT
-int pdc_type __read_mostly = PDC_TYPE_ILLEGAL;
+int pdc_type __ro_after_init = PDC_TYPE_ILLEGAL;
/* cell number and location (PAT firmware only) */
-unsigned long parisc_cell_num __read_mostly;
-unsigned long parisc_cell_loc __read_mostly;
-unsigned long parisc_pat_pdc_cap __read_mostly;
+unsigned long parisc_cell_num __ro_after_init;
+unsigned long parisc_cell_loc __ro_after_init;
+unsigned long parisc_pat_pdc_cap __ro_after_init;
void __init setup_pdc(void)
@@ -638,4 +639,10 @@ void __init do_device_inventory(void)
}
printk(KERN_INFO "Found devices:\n");
print_parisc_devices();
+
+#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
+ pa_serialize_tlb_flushes = machine_has_merced_bus();
+ if (pa_serialize_tlb_flushes)
+ pr_info("Merced bus found: Enable PxTLB serialization.\n");
+#endif
}
diff --git a/arch/parisc/kernel/jump_label.c b/arch/parisc/kernel/jump_label.c
new file mode 100644
index 000000000000..d2f3cb12e282
--- /dev/null
+++ b/arch/parisc/kernel/jump_label.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Helge Deller <deller@gmx.de>
+ *
+ * Based on arch/arm64/kernel/jump_label.c
+ */
+#include <linux/kernel.h>
+#include <linux/jump_label.h>
+#include <linux/bug.h>
+#include <asm/alternative.h>
+#include <asm/patch.h>
+
+static inline int reassemble_17(int as17)
+{
+ return (((as17 & 0x10000) >> 16) |
+ ((as17 & 0x0f800) << 5) |
+ ((as17 & 0x00400) >> 8) |
+ ((as17 & 0x003ff) << 3));
+}
+
+void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ void *addr = (void *)jump_entry_code(entry);
+ u32 insn;
+
+ if (type == JUMP_LABEL_JMP) {
+ void *target = (void *)jump_entry_target(entry);
+ int distance = target - addr;
+ /*
+ * Encode the PA1.1 "b,n" instruction with a 17-bit
+ * displacement. In case we hit the BUG(), we could use
+ * another branch instruction with a 22-bit displacement on
+ * 64-bit CPUs instead. But this seems sufficient for now.
+ */
+ distance -= 8;
+ BUG_ON(distance > 262143 || distance < -262144);
+ insn = 0xe8000002 | reassemble_17(distance >> 2);
+ } else {
+ insn = INSN_NOP;
+ }
+
+ patch_text(addr, insn);
+}
+
+void arch_jump_label_transform_static(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ /*
+ * We use the architected NOP in arch_static_branch, so there's no
+ * need to patch an identical NOP over the top of it here. The core
+ * will call arch_jump_label_transform from a module notifier if the
+ * NOP needs to be replaced by a branch.
+ */
+}
diff --git a/arch/parisc/kernel/kgdb.c b/arch/parisc/kernel/kgdb.c
new file mode 100644
index 000000000000..664278db9b97
--- /dev/null
+++ b/arch/parisc/kernel/kgdb.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PA-RISC KGDB support
+ *
+ * Copyright (c) 2019 Sven Schnelle <svens@stackframe.org>
+ *
+ */
+
+#include <linux/kgdb.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/notifier.h>
+#include <linux/kdebug.h>
+#include <linux/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/traps.h>
+#include <asm/processor.h>
+#include <asm/patch.h>
+#include <asm/cacheflush.h>
+
+const struct kgdb_arch arch_kgdb_ops = {
+ .gdb_bpt_instr = { 0x03, 0xff, 0xa0, 0x1f }
+};
+
+static int __kgdb_notify(struct die_args *args, unsigned long cmd)
+{
+ struct pt_regs *regs = args->regs;
+
+ if (kgdb_handle_exception(1, args->signr, cmd, regs))
+ return NOTIFY_DONE;
+ return NOTIFY_STOP;
+}
+
+static int kgdb_notify(struct notifier_block *self,
+ unsigned long cmd, void *ptr)
+{
+ unsigned long flags;
+ int ret;
+
+ local_irq_save(flags);
+ ret = __kgdb_notify(ptr, cmd);
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static struct notifier_block kgdb_notifier = {
+ .notifier_call = kgdb_notify,
+ .priority = -INT_MAX,
+};
+
+int kgdb_arch_init(void)
+{
+ return register_die_notifier(&kgdb_notifier);
+}
+
+void kgdb_arch_exit(void)
+{
+ unregister_die_notifier(&kgdb_notifier);
+}
+
+void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
+{
+ struct parisc_gdb_regs *gr = (struct parisc_gdb_regs *)gdb_regs;
+
+ memset(gr, 0, sizeof(struct parisc_gdb_regs));
+
+ memcpy(gr->gpr, regs->gr, sizeof(gr->gpr));
+ memcpy(gr->fr, regs->fr, sizeof(gr->fr));
+
+ gr->sr0 = regs->sr[0];
+ gr->sr1 = regs->sr[1];
+ gr->sr2 = regs->sr[2];
+ gr->sr3 = regs->sr[3];
+ gr->sr4 = regs->sr[4];
+ gr->sr5 = regs->sr[5];
+ gr->sr6 = regs->sr[6];
+ gr->sr7 = regs->sr[7];
+
+ gr->sar = regs->sar;
+ gr->iir = regs->iir;
+ gr->isr = regs->isr;
+ gr->ior = regs->ior;
+ gr->ipsw = regs->ipsw;
+ gr->cr27 = regs->cr27;
+
+ gr->iaoq_f = regs->iaoq[0];
+ gr->iasq_f = regs->iasq[0];
+
+ gr->iaoq_b = regs->iaoq[1];
+ gr->iasq_b = regs->iasq[1];
+}
+
+void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
+{
+ struct parisc_gdb_regs *gr = (struct parisc_gdb_regs *)gdb_regs;
+
+
+ memcpy(regs->gr, gr->gpr, sizeof(regs->gr));
+ memcpy(regs->fr, gr->fr, sizeof(regs->fr));
+
+ regs->sr[0] = gr->sr0;
+ regs->sr[1] = gr->sr1;
+ regs->sr[2] = gr->sr2;
+ regs->sr[3] = gr->sr3;
+ regs->sr[4] = gr->sr4;
+ regs->sr[5] = gr->sr5;
+ regs->sr[6] = gr->sr6;
+ regs->sr[7] = gr->sr7;
+
+ regs->sar = gr->sar;
+ regs->iir = gr->iir;
+ regs->isr = gr->isr;
+ regs->ior = gr->ior;
+ regs->ipsw = gr->ipsw;
+ regs->cr27 = gr->cr27;
+
+ regs->iaoq[0] = gr->iaoq_f;
+ regs->iasq[0] = gr->iasq_f;
+
+ regs->iaoq[1] = gr->iaoq_b;
+ regs->iasq[1] = gr->iasq_b;
+}
+
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs,
+ struct task_struct *task)
+{
+ struct pt_regs *regs = task_pt_regs(task);
+ unsigned long gr30, iaoq;
+
+ gr30 = regs->gr[30];
+ iaoq = regs->iaoq[0];
+
+ regs->gr[30] = regs->ksp;
+ regs->iaoq[0] = regs->kpc;
+ pt_regs_to_gdb_regs(gdb_regs, regs);
+
+ regs->gr[30] = gr30;
+ regs->iaoq[0] = iaoq;
+
+}
+
+static void step_instruction_queue(struct pt_regs *regs)
+{
+ regs->iaoq[0] = regs->iaoq[1];
+ regs->iaoq[1] += 4;
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->iaoq[0] = ip;
+ regs->iaoq[1] = ip + 4;
+}
+
+int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+{
+ int ret = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
+ BREAK_INSTR_SIZE);
+ if (ret)
+ return ret;
+
+ __patch_text((void *)bpt->bpt_addr,
+ *(unsigned int *)&arch_kgdb_ops.gdb_bpt_instr);
+ return ret;
+}
+
+int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
+{
+ __patch_text((void *)bpt->bpt_addr, *(unsigned int *)&bpt->saved_instr);
+ return 0;
+}
+
+int kgdb_arch_handle_exception(int trap, int signo,
+ int err_code, char *inbuf, char *outbuf,
+ struct pt_regs *regs)
+{
+ unsigned long addr;
+ char *p = inbuf + 1;
+
+ switch (inbuf[0]) {
+ case 'D':
+ case 'c':
+ case 'k':
+ kgdb_contthread = NULL;
+ kgdb_single_step = 0;
+
+ if (kgdb_hex2long(&p, &addr))
+ kgdb_arch_set_pc(regs, addr);
+ else if (trap == 9 && regs->iir ==
+ PARISC_KGDB_COMPILED_BREAK_INSN)
+ step_instruction_queue(regs);
+ return 0;
+ case 's':
+ kgdb_single_step = 1;
+ if (kgdb_hex2long(&p, &addr)) {
+ kgdb_arch_set_pc(regs, addr);
+ } else if (trap == 9 && regs->iir ==
+ PARISC_KGDB_COMPILED_BREAK_INSN) {
+ step_instruction_queue(regs);
+ mtctl(-1, 0);
+ } else {
+ mtctl(0, 0);
+ }
+ regs->gr[0] |= PSW_R;
+ return 0;
+
+ }
+ return -1;
+}
diff --git a/arch/parisc/kernel/kprobes.c b/arch/parisc/kernel/kprobes.c
new file mode 100644
index 000000000000..d58960b33bda
--- /dev/null
+++ b/arch/parisc/kernel/kprobes.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * arch/parisc/kernel/kprobes.c
+ *
+ * PA-RISC kprobes implementation
+ *
+ * Copyright (c) 2019 Sven Schnelle <svens@stackframe.org>
+ */
+
+#include <linux/types.h>
+#include <linux/kprobes.h>
+#include <linux/slab.h>
+#include <asm/cacheflush.h>
+#include <asm/patch.h>
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ if ((unsigned long)p->addr & 3UL)
+ return -EINVAL;
+
+ p->ainsn.insn = get_insn_slot();
+ if (!p->ainsn.insn)
+ return -ENOMEM;
+
+ memcpy(p->ainsn.insn, p->addr,
+ MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+ p->opcode = *p->addr;
+ flush_insn_slot(p);
+ return 0;
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+ if (!p->ainsn.insn)
+ return;
+
+ free_insn_slot(p->ainsn.insn, 0);
+ p->ainsn.insn = NULL;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ patch_text(p->addr, PARISC_KPROBES_BREAK_INSN);
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ patch_text(p->addr, p->opcode);
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+}
+
+static inline void __kprobes set_current_kprobe(struct kprobe *p)
+{
+ __this_cpu_write(current_kprobe, p);
+}
+
+static void __kprobes setup_singlestep(struct kprobe *p,
+ struct kprobe_ctlblk *kcb, struct pt_regs *regs)
+{
+ kcb->iaoq[0] = regs->iaoq[0];
+ kcb->iaoq[1] = regs->iaoq[1];
+ regs->iaoq[0] = (unsigned long)p->ainsn.insn;
+ mtctl(0, 0);
+ regs->gr[0] |= PSW_R;
+}
+
+int __kprobes parisc_kprobe_break_handler(struct pt_regs *regs)
+{
+ struct kprobe *p;
+ struct kprobe_ctlblk *kcb;
+
+ preempt_disable();
+
+ kcb = get_kprobe_ctlblk();
+ p = get_kprobe((unsigned long *)regs->iaoq[0]);
+
+ if (!p) {
+ preempt_enable_no_resched();
+ return 0;
+ }
+
+ if (kprobe_running()) {
+ /*
+ * We have reentered the kprobe_handler, since another kprobe
+ * was hit while within the handler, we save the original
+ * kprobes and single step on the instruction of the new probe
+ * without calling any user handlers to avoid recursive
+ * kprobes.
+ */
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p);
+ kprobes_inc_nmissed_count(p);
+ setup_singlestep(p, kcb, regs);
+ kcb->kprobe_status = KPROBE_REENTER;
+ return 1;
+ }
+
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ /* If we have no pre-handler or it returned 0, we continue with
+ * normal processing. If we have a pre-handler and it returned
+ * non-zero - which means user handler setup registers to exit
+ * to another instruction, we must skip the single stepping.
+ */
+
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ setup_singlestep(p, kcb, regs);
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ } else {
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+ }
+ return 1;
+}
+
+int __kprobes parisc_kprobe_ss_handler(struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ struct kprobe *p = kprobe_running();
+
+ if (regs->iaoq[0] != (unsigned long)p->ainsn.insn+4)
+ return 0;
+
+ /* restore back original saved kprobe variables and continue */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ return 1;
+ }
+
+ /* for absolute branch instructions we can copy iaoq_b. for relative
+ * branch instructions we need to calculate the new address based on the
+ * difference between iaoq_f and iaoq_b. We cannot use iaoq_b without
+ * modificationt because it's based on our ainsn.insn address.
+ */
+
+ if (p->post_handler)
+ p->post_handler(p, regs, 0);
+
+ switch (regs->iir >> 26) {
+ case 0x38: /* BE */
+ case 0x39: /* BE,L */
+ case 0x3a: /* BV */
+ case 0x3b: /* BVE */
+ /* for absolute branches, regs->iaoq[1] has already the right
+ * address
+ */
+ regs->iaoq[0] = kcb->iaoq[1];
+ break;
+ default:
+ regs->iaoq[1] = kcb->iaoq[0];
+ regs->iaoq[1] += (regs->iaoq[1] - regs->iaoq[0]) + 4;
+ regs->iaoq[0] = kcb->iaoq[1];
+ break;
+ }
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ reset_current_kprobe();
+ return 1;
+}
+
+static inline void kretprobe_trampoline(void)
+{
+ asm volatile("nop");
+ asm volatile("nop");
+}
+
+static int __kprobes trampoline_probe_handler(struct kprobe *p,
+ struct pt_regs *regs);
+
+static struct kprobe trampoline_p = {
+ .pre_handler = trampoline_probe_handler
+};
+
+static int __kprobes trampoline_probe_handler(struct kprobe *p,
+ struct pt_regs *regs)
+{
+ struct kretprobe_instance *ri = NULL;
+ struct hlist_head *head, empty_rp;
+ struct hlist_node *tmp;
+ unsigned long flags, orig_ret_address = 0;
+ unsigned long trampoline_address = (unsigned long)trampoline_p.addr;
+ kprobe_opcode_t *correct_ret_addr = NULL;
+
+ INIT_HLIST_HEAD(&empty_rp);
+ kretprobe_hash_lock(current, &head, &flags);
+
+ /*
+ * It is possible to have multiple instances associated with a given
+ * task either because multiple functions in the call path have
+ * a return probe installed on them, and/or more than one return
+ * probe was registered for a target function.
+ *
+ * We can handle this because:
+ * - instances are always inserted at the head of the list
+ * - when multiple return probes are registered for the same
+ * function, the first instance's ret_addr will point to the
+ * real return address, and all the rest will point to
+ * kretprobe_trampoline
+ */
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
+ correct_ret_addr = ri->ret_addr;
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
+ if (ri->rp && ri->rp->handler) {
+ __this_cpu_write(current_kprobe, &ri->rp->kp);
+ get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
+ ri->ret_addr = correct_ret_addr;
+ ri->rp->handler(ri, regs);
+ __this_cpu_write(current_kprobe, NULL);
+ }
+
+ recycle_rp_inst(ri, &empty_rp);
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_hash_unlock(current, &flags);
+
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
+ hlist_del(&ri->hlist);
+ kfree(ri);
+ }
+ instruction_pointer_set(regs, orig_ret_address);
+ return 1;
+}
+
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+ ri->ret_addr = (kprobe_opcode_t *)regs->gr[2];
+
+ /* Replace the return addr with trampoline addr. */
+ regs->gr[2] = (unsigned long)trampoline_p.addr;
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+ return p->addr == trampoline_p.addr;
+}
+bool arch_kprobe_on_func_entry(unsigned long offset)
+{
+ return !offset;
+}
+
+int __init arch_init_kprobes(void)
+{
+ trampoline_p.addr = (kprobe_opcode_t *)
+ dereference_function_descriptor(kretprobe_trampoline);
+ return register_kprobe(&trampoline_p);
+}
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 187f032c9dd8..4e4e8eb25874 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -311,39 +311,6 @@ fdsync:
nop
ENDPROC_CFI(flush_data_cache_local)
-/* Macros to serialize TLB purge operations on SMP. */
-
- .macro tlb_lock la,flags,tmp
-#ifdef CONFIG_SMP
-98:
-#if __PA_LDCW_ALIGNMENT > 4
- load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
- depi 0,31,__PA_LDCW_ALIGN_ORDER, \la
-#else
- load32 pa_tlb_lock, \la
-#endif
- rsm PSW_SM_I,\flags
-1: LDCW 0(\la),\tmp
- cmpib,<>,n 0,\tmp,3f
-2: ldw 0(\la),\tmp
- cmpb,<> %r0,\tmp,1b
- nop
- b,n 2b
-3:
-99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
-#endif
- .endm
-
- .macro tlb_unlock la,flags,tmp
-#ifdef CONFIG_SMP
-98: ldi 1,\tmp
- sync
- stw \tmp,0(\la)
- mtsm \flags
-99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
-#endif
- .endm
-
/* Clear page using kernel mapping. */
ENTRY_CFI(clear_page_asm)
@@ -601,10 +568,8 @@ ENTRY_CFI(copy_user_page_asm)
pdtlb,l %r0(%r28)
pdtlb,l %r0(%r29)
#else
- tlb_lock %r20,%r21,%r22
0: pdtlb %r0(%r28)
1: pdtlb %r0(%r29)
- tlb_unlock %r20,%r21,%r22
ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SMP, INSN_PxTLB)
#endif
@@ -743,9 +708,7 @@ ENTRY_CFI(clear_user_page_asm)
#ifdef CONFIG_PA20
pdtlb,l %r0(%r28)
#else
- tlb_lock %r20,%r21,%r22
0: pdtlb %r0(%r28)
- tlb_unlock %r20,%r21,%r22
ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
#endif
@@ -821,9 +784,7 @@ ENTRY_CFI(flush_dcache_page_asm)
#ifdef CONFIG_PA20
pdtlb,l %r0(%r28)
#else
- tlb_lock %r20,%r21,%r22
0: pdtlb %r0(%r28)
- tlb_unlock %r20,%r21,%r22
ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
#endif
@@ -882,9 +843,7 @@ ENTRY_CFI(purge_dcache_page_asm)
#ifdef CONFIG_PA20
pdtlb,l %r0(%r28)
#else
- tlb_lock %r20,%r21,%r22
0: pdtlb %r0(%r28)
- tlb_unlock %r20,%r21,%r22
ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
#endif
@@ -948,10 +907,8 @@ ENTRY_CFI(flush_icache_page_asm)
1: pitlb,l %r0(%sr4,%r28)
ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SPLIT_TLB, INSN_NOP)
#else
- tlb_lock %r20,%r21,%r22
0: pdtlb %r0(%r28)
1: pitlb %r0(%sr4,%r28)
- tlb_unlock %r20,%r21,%r22
ALTERNATIVE(0b, 0b+4, ALT_COND_NO_SMP, INSN_PxTLB)
ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SMP, INSN_PxTLB)
ALTERNATIVE(1b, 1b+4, ALT_COND_NO_SPLIT_TLB, INSN_NOP)
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 7baa2265d439..174213b1716e 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -138,12 +138,6 @@ extern void $$dyncall(void);
EXPORT_SYMBOL($$dyncall);
#endif
-#ifdef CONFIG_DISCONTIGMEM
-#include <asm/mmzone.h>
-EXPORT_SYMBOL(node_data);
-EXPORT_SYMBOL(pfnnid_map);
-#endif
-
#ifdef CONFIG_FUNCTION_TRACER
extern void _mcount(void);
EXPORT_SYMBOL(_mcount);
diff --git a/arch/parisc/kernel/patch.c b/arch/parisc/kernel/patch.c
new file mode 100644
index 000000000000..cdcd981278b3
--- /dev/null
+++ b/arch/parisc/kernel/patch.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+ /*
+ * functions to patch RO kernel text during runtime
+ *
+ * Copyright (c) 2019 Sven Schnelle <svens@stackframe.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/kprobes.h>
+#include <linux/mm.h>
+#include <linux/stop_machine.h>
+
+#include <asm/cacheflush.h>
+#include <asm/fixmap.h>
+#include <asm/patch.h>
+
+struct patch {
+ void *addr;
+ unsigned int insn;
+};
+
+static void __kprobes *patch_map(void *addr, int fixmap)
+{
+ unsigned long uintaddr = (uintptr_t) addr;
+ bool module = !core_kernel_text(uintaddr);
+ struct page *page;
+
+ if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
+ page = vmalloc_to_page(addr);
+ else if (!module && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
+ page = virt_to_page(addr);
+ else
+ return addr;
+
+ set_fixmap(fixmap, page_to_phys(page));
+
+ return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
+}
+
+static void __kprobes patch_unmap(int fixmap)
+{
+ clear_fixmap(fixmap);
+}
+
+void __kprobes __patch_text(void *addr, unsigned int insn)
+{
+ void *waddr = addr;
+ int size;
+
+ waddr = patch_map(addr, FIX_TEXT_POKE0);
+ *(u32 *)waddr = insn;
+ size = sizeof(u32);
+ flush_kernel_vmap_range(waddr, size);
+ patch_unmap(FIX_TEXT_POKE0);
+ flush_icache_range((uintptr_t)(addr),
+ (uintptr_t)(addr) + size);
+}
+
+static int __kprobes patch_text_stop_machine(void *data)
+{
+ struct patch *patch = data;
+
+ __patch_text(patch->addr, patch->insn);
+
+ return 0;
+}
+
+void __kprobes patch_text(void *addr, unsigned int insn)
+{
+ struct patch patch = {
+ .addr = addr,
+ .insn = insn,
+ };
+
+ stop_machine_cpuslocked(patch_text_stop_machine, &patch, NULL);
+}
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
index ae684ac6efb6..bc41ca243cfe 100644
--- a/arch/parisc/kernel/pci.c
+++ b/arch/parisc/kernel/pci.c
@@ -45,14 +45,14 @@
* #define pci_post_reset_delay 50
*/
-struct pci_port_ops *pci_port __read_mostly;
-struct pci_bios_ops *pci_bios __read_mostly;
+struct pci_port_ops *pci_port __ro_after_init;
+struct pci_bios_ops *pci_bios __ro_after_init;
-static int pci_hba_count __read_mostly;
+static int pci_hba_count __ro_after_init;
/* parisc_pci_hba used by pci_port->in/out() ops to lookup bus data. */
#define PCI_HBA_MAX 32
-static struct pci_hba_data *parisc_pci_hba[PCI_HBA_MAX] __read_mostly;
+static struct pci_hba_data *parisc_pci_hba[PCI_HBA_MAX] __ro_after_init;
/********************************************************************
diff --git a/arch/parisc/kernel/perf_images.h b/arch/parisc/kernel/perf_images.h
index 7fef9644df47..c108fee989d9 100644
--- a/arch/parisc/kernel/perf_images.h
+++ b/arch/parisc/kernel/perf_images.h
@@ -25,7 +25,7 @@
#define PCXU_IMAGE_SIZE 584
-static uint32_t onyx_images[][PCXU_IMAGE_SIZE/sizeof(uint32_t)] __read_mostly = {
+static uint32_t onyx_images[][PCXU_IMAGE_SIZE/sizeof(uint32_t)] __ro_after_init = {
/*
* CPI:
*
@@ -2093,7 +2093,7 @@ static uint32_t onyx_images[][PCXU_IMAGE_SIZE/sizeof(uint32_t)] __read_mostly =
};
#define PCXW_IMAGE_SIZE 576
-static uint32_t cuda_images[][PCXW_IMAGE_SIZE/sizeof(uint32_t)] __read_mostly = {
+static uint32_t cuda_images[][PCXW_IMAGE_SIZE/sizeof(uint32_t)] __ro_after_init = {
/*
* CPI: FROM CPI.IDF (Image 0)
*
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 841db71958cd..89e4f4497ffb 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -192,7 +192,8 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
* QEMU idle the host too.
*/
-int running_on_qemu __read_mostly;
+int running_on_qemu __ro_after_init;
+EXPORT_SYMBOL(running_on_qemu);
void __cpuidle arch_cpu_idle_dead(void)
{
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index 7f4d042856b5..e715871cd4ac 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -43,10 +43,10 @@
#include <asm/irq.h> /* for struct irq_region */
#include <asm/parisc-device.h>
-struct system_cpuinfo_parisc boot_cpu_data __read_mostly;
+struct system_cpuinfo_parisc boot_cpu_data __ro_after_init;
EXPORT_SYMBOL(boot_cpu_data);
#ifdef CONFIG_PA8X00
-int _parisc_requires_coherency __read_mostly;
+int _parisc_requires_coherency __ro_after_init;
EXPORT_SYMBOL(_parisc_requires_coherency);
#endif
@@ -305,7 +305,8 @@ void __init collect_boot_cpu_data(void)
if (pdc_model_platform_info(orig_prod_num, current_prod_num, serial_no) == PDC_OK) {
printk(KERN_INFO "product %s, original product %s, S/N: %s\n",
- current_prod_num, orig_prod_num, serial_no);
+ current_prod_num[0] ? current_prod_num : "n/a",
+ orig_prod_num, serial_no);
add_device_randomness(orig_prod_num, strlen(orig_prod_num));
add_device_randomness(current_prod_num, strlen(current_prod_num));
add_device_randomness(serial_no, strlen(serial_no));
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 0964c236e3e5..a3d2fb4e6dd2 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -789,3 +789,38 @@ const char *regs_query_register_name(unsigned int offset)
return roff->name;
return NULL;
}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @addr: address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
+{
+ return ((addr & ~(THREAD_SIZE - 1)) ==
+ (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs: pt_regs which contains kernel stack pointer.
+ * @n: stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
+{
+ unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+
+ addr -= n;
+
+ if (!regs_within_kernel_stack(regs, (unsigned long)addr))
+ return 0;
+
+ return *addr;
+}
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index d908058d05c1..e05cb2a5c16d 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -343,6 +343,12 @@ static int __init parisc_init(void)
boot_cpu_data.cpu_hz / 1000000,
boot_cpu_data.cpu_hz % 1000000 );
+#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
+ /* Don't serialize TLB flushes if we run on one CPU only. */
+ if (num_online_cpus() == 1)
+ pa_serialize_tlb_flushes = 0;
+#endif
+
apply_alternatives_all();
parisc_setup_cache_timing();
diff --git a/arch/parisc/kernel/stacktrace.c b/arch/parisc/kernel/stacktrace.c
index ec5835e83a7a..6f0b9c8d8052 100644
--- a/arch/parisc/kernel/stacktrace.c
+++ b/arch/parisc/kernel/stacktrace.c
@@ -29,22 +29,17 @@ static void dump_trace(struct task_struct *task, struct stack_trace *trace)
}
}
-
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
void save_stack_trace(struct stack_trace *trace)
{
dump_trace(current, trace);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
dump_trace(tsk, trace);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 376ea0d1b275..4407ac4c1d84 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -86,7 +86,8 @@ static unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
stack_base = STACK_SIZE_MAX;
/* Add space for stack randomization. */
- stack_base += (STACK_RND_MASK << PAGE_SHIFT);
+ if (current->flags & PF_RANDOMIZE)
+ stack_base += (STACK_RND_MASK << PAGE_SHIFT);
return PAGE_ALIGN(STACK_TOP - stack_base);
}
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 4f77bd9be66b..97ac707c6bff 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -48,7 +48,7 @@ registers).
*/
#define KILL_INSN break 0,0
- .level LEVEL
+ .level PA_ASM_LEVEL
.text
@@ -640,7 +640,10 @@ cas_action:
sub,<> %r28, %r25, %r0
2: stw %r24, 0(%r26)
/* Free lock */
- sync
+#ifdef CONFIG_SMP
+98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
+99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+#endif
stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
/* Clear thread register indicator */
@@ -655,7 +658,10 @@ cas_action:
3:
/* Error occurred on load or store */
/* Free lock */
- sync
+#ifdef CONFIG_SMP
+98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
+99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+#endif
stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20)
@@ -857,7 +863,10 @@ cas2_action:
cas2_end:
/* Free lock */
- sync
+#ifdef CONFIG_SMP
+98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
+99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+#endif
stw %r20, 0(%sr2,%r20)
/* Enable interrupts */
ssm PSW_SM_I, %r0
@@ -868,7 +877,10 @@ cas2_end:
22:
/* Error occurred on load or store */
/* Free lock */
- sync
+#ifdef CONFIG_SMP
+98: LDCW 0(%sr2,%r20), %r1 /* Barrier */
+99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+#endif
stw %r20, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
ldo 1(%r0),%r28
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index fe8ca623add8..c9e377d59232 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -424,3 +424,9 @@
425 common io_uring_setup sys_io_uring_setup
426 common io_uring_enter sys_io_uring_enter
427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index a1e772f909cb..04508158815c 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -40,7 +40,7 @@
#include <linux/timex.h>
-static unsigned long clocktick __read_mostly; /* timer cycles per tick */
+static unsigned long clocktick __ro_after_init; /* timer cycles per tick */
/*
* We keep time on PA-RISC Linux by using the Interval Timer which is
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 7e1ccafadf57..096e319adeb3 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -42,6 +42,8 @@
#include <asm/unwind.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
+#include <linux/kgdb.h>
+#include <linux/kprobes.h>
#include "../math-emu/math-emu.h" /* for handle_fpe() */
@@ -293,6 +295,22 @@ static void handle_break(struct pt_regs *regs)
(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
}
+#ifdef CONFIG_KPROBES
+ if (unlikely(iir == PARISC_KPROBES_BREAK_INSN)) {
+ parisc_kprobe_break_handler(regs);
+ return;
+ }
+
+#endif
+
+#ifdef CONFIG_KGDB
+ if (unlikely(iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
+ iir == PARISC_KGDB_BREAK_INSN)) {
+ kgdb_handle_exception(9, SIGTRAP, 0, regs);
+ return;
+ }
+#endif
+
if (unlikely(iir != GDB_BREAK_INSN))
parisc_printk_ratelimited(0, regs,
KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
@@ -518,6 +536,19 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
case 3:
/* Recovery counter trap */
regs->gr[0] &= ~PSW_R;
+
+#ifdef CONFIG_KPROBES
+ if (parisc_kprobe_ss_handler(regs))
+ return;
+#endif
+
+#ifdef CONFIG_KGDB
+ if (kgdb_single_step) {
+ kgdb_handle_exception(0, SIGTRAP, 0, regs);
+ return;
+ }
+#endif
+
if (user_space(regs))
handle_gdb_break(regs, TRAP_TRACE);
/* else this must be the start of a syscall - just let it run */
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 2d14f17838d2..87ae476d1c4f 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -40,7 +40,7 @@ static DEFINE_SPINLOCK(unwind_lock);
* we can call unwind_init as early in the bootup process as
* possible (before the slab allocator is initialized)
*/
-static struct unwind_table kernel_unwind_table __read_mostly;
+static struct unwind_table kernel_unwind_table __ro_after_init;
static LIST_HEAD(unwind_tables);
static inline const struct unwind_table_entry *
diff --git a/arch/parisc/mm/Makefile b/arch/parisc/mm/Makefile
index 134393de69d2..20e39b043a60 100644
--- a/arch/parisc/mm/Makefile
+++ b/arch/parisc/mm/Makefile
@@ -2,5 +2,5 @@
# Makefile for arch/parisc/mm
#
-obj-y := init.o fault.o ioremap.o
+obj-y := init.o fault.o ioremap.o fixmap.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/parisc/mm/fixmap.c b/arch/parisc/mm/fixmap.c
new file mode 100644
index 000000000000..c8d41b54fb19
--- /dev/null
+++ b/arch/parisc/mm/fixmap.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fixmaps for parisc
+ *
+ * Copyright (c) 2019 Sven Schnelle <svens@stackframe.org>
+ */
+
+#include <linux/kprobes.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <asm/fixmap.h>
+
+void set_fixmap(enum fixed_addresses idx, phys_addr_t phys)
+{
+ unsigned long vaddr = __fix_to_virt(idx);
+ pgd_t *pgd = pgd_offset_k(vaddr);
+ pmd_t *pmd = pmd_offset(pgd, vaddr);
+ pte_t *pte;
+
+ if (pmd_none(*pmd))
+ pmd = pmd_alloc(NULL, pgd, vaddr);
+
+ pte = pte_offset_kernel(pmd, vaddr);
+ if (pte_none(*pte))
+ pte = pte_alloc_kernel(pmd, vaddr);
+
+ set_pte_at(&init_mm, vaddr, pte, __mk_pte(phys, PAGE_KERNEL_RWX));
+ flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
+}
+
+void clear_fixmap(enum fixed_addresses idx)
+{
+ unsigned long vaddr = __fix_to_virt(idx);
+ pgd_t *pgd = pgd_offset_k(vaddr);
+ pmd_t *pmd = pmd_offset(pgd, vaddr);
+ pte_t *pte = pte_offset_kernel(pmd, vaddr);
+
+ pte_clear(&init_mm, vaddr, pte);
+
+ flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
+}
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c
index d77479ae3af2..d578809e55cf 100644
--- a/arch/parisc/mm/hugetlbpage.c
+++ b/arch/parisc/mm/hugetlbpage.c
@@ -139,9 +139,9 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
{
unsigned long flags;
- purge_tlb_start(flags);
+ spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
__set_huge_pte_at(mm, addr, ptep, entry);
- purge_tlb_end(flags);
+ spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
}
@@ -151,10 +151,10 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
unsigned long flags;
pte_t entry;
- purge_tlb_start(flags);
+ spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
entry = *ptep;
__set_huge_pte_at(mm, addr, ptep, __pte(0));
- purge_tlb_end(flags);
+ spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
return entry;
}
@@ -166,10 +166,10 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long flags;
pte_t old_pte;
- purge_tlb_start(flags);
+ spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
old_pte = *ptep;
__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
- purge_tlb_end(flags);
+ spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
}
int huge_ptep_set_access_flags(struct vm_area_struct *vma,
@@ -178,13 +178,14 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
{
unsigned long flags;
int changed;
+ struct mm_struct *mm = vma->vm_mm;
- purge_tlb_start(flags);
+ spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);
changed = !pte_same(*ptep, pte);
if (changed) {
- __set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ __set_huge_pte_at(mm, addr, ptep, pte);
}
- purge_tlb_end(flags);
+ spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);
return changed;
}
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index d0b166256f1a..ddca8287d43b 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -32,6 +32,7 @@
#include <asm/mmzone.h>
#include <asm/sections.h>
#include <asm/msgbuf.h>
+#include <asm/sparsemem.h>
extern int data_start;
extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
@@ -48,11 +49,6 @@ pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
-#ifdef CONFIG_DISCONTIGMEM
-struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
-signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
-#endif
-
static struct resource data_resource = {
.name = "Kernel data",
.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
@@ -70,17 +66,17 @@ static struct resource pdcdata_resource = {
.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
};
-static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
+static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __ro_after_init;
/* The following array is initialized from the firmware specific
* information retrieved in kernel/inventory.c.
*/
-physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
-int npmem_ranges __read_mostly;
+physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __initdata;
+int npmem_ranges __initdata;
#ifdef CONFIG_64BIT
-#define MAX_MEM (~0UL)
+#define MAX_MEM (1UL << MAX_PHYSMEM_BITS)
#else /* !CONFIG_64BIT */
#define MAX_MEM (3584U*1024U*1024U)
#endif /* !CONFIG_64BIT */
@@ -119,7 +115,7 @@ static void __init mem_limit_func(void)
static void __init setup_bootmem(void)
{
unsigned long mem_max;
-#ifndef CONFIG_DISCONTIGMEM
+#ifndef CONFIG_SPARSEMEM
physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
int npmem_holes;
#endif
@@ -137,23 +133,20 @@ static void __init setup_bootmem(void)
int j;
for (j = i; j > 0; j--) {
- unsigned long tmp;
+ physmem_range_t tmp;
if (pmem_ranges[j-1].start_pfn <
pmem_ranges[j].start_pfn) {
break;
}
- tmp = pmem_ranges[j-1].start_pfn;
- pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn;
- pmem_ranges[j].start_pfn = tmp;
- tmp = pmem_ranges[j-1].pages;
- pmem_ranges[j-1].pages = pmem_ranges[j].pages;
- pmem_ranges[j].pages = tmp;
+ tmp = pmem_ranges[j-1];
+ pmem_ranges[j-1] = pmem_ranges[j];
+ pmem_ranges[j] = tmp;
}
}
-#ifndef CONFIG_DISCONTIGMEM
+#ifndef CONFIG_SPARSEMEM
/*
* Throw out ranges that are too far apart (controlled by
* MAX_GAP).
@@ -165,7 +158,7 @@ static void __init setup_bootmem(void)
pmem_ranges[i-1].pages) > MAX_GAP) {
npmem_ranges = i;
printk("Large gap in memory detected (%ld pages). "
- "Consider turning on CONFIG_DISCONTIGMEM\n",
+ "Consider turning on CONFIG_SPARSEMEM\n",
pmem_ranges[i].start_pfn -
(pmem_ranges[i-1].start_pfn +
pmem_ranges[i-1].pages));
@@ -230,9 +223,8 @@ static void __init setup_bootmem(void)
printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
-#ifndef CONFIG_DISCONTIGMEM
+#ifndef CONFIG_SPARSEMEM
/* Merge the ranges, keeping track of the holes */
-
{
unsigned long end_pfn;
unsigned long hole_pages;
@@ -255,18 +247,6 @@ static void __init setup_bootmem(void)
}
#endif
-#ifdef CONFIG_DISCONTIGMEM
- for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
- memset(NODE_DATA(i), 0, sizeof(pg_data_t));
- }
- memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
-
- for (i = 0; i < npmem_ranges; i++) {
- node_set_state(i, N_NORMAL_MEMORY);
- node_set_online(i);
- }
-#endif
-
/*
* Initialize and free the full range of memory in each range.
*/
@@ -314,7 +294,7 @@ static void __init setup_bootmem(void)
memblock_reserve(__pa(KERNEL_BINARY_TEXT_START),
(unsigned long)(_end - KERNEL_BINARY_TEXT_START));
-#ifndef CONFIG_DISCONTIGMEM
+#ifndef CONFIG_SPARSEMEM
/* reserve the holes */
@@ -360,19 +340,13 @@ static void __init setup_bootmem(void)
/* Initialize Page Deallocation Table (PDT) and check for bad memory. */
pdc_pdt_init();
-}
-static int __init parisc_text_address(unsigned long vaddr)
-{
- static unsigned long head_ptr __initdata;
-
- if (!head_ptr)
- head_ptr = PAGE_MASK & (unsigned long)
- dereference_function_descriptor(&parisc_kernel_start);
-
- return core_kernel_text(vaddr) || vaddr == head_ptr;
+ memblock_allow_resize();
+ memblock_dump_all();
}
+static bool kernel_set_to_readonly;
+
static void __init map_pages(unsigned long start_vaddr,
unsigned long start_paddr, unsigned long size,
pgprot_t pgprot, int force)
@@ -389,10 +363,11 @@ static void __init map_pages(unsigned long start_vaddr,
unsigned long vaddr;
unsigned long ro_start;
unsigned long ro_end;
- unsigned long kernel_end;
+ unsigned long kernel_start, kernel_end;
ro_start = __pa((unsigned long)_text);
ro_end = __pa((unsigned long)&data_start);
+ kernel_start = __pa((unsigned long)&__init_begin);
kernel_end = __pa((unsigned long)&_end);
end_paddr = start_paddr + size;
@@ -455,26 +430,30 @@ static void __init map_pages(unsigned long start_vaddr,
pg_table = (pte_t *) __va(pg_table) + start_pte;
for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
pte_t pte;
-
- if (force)
- pte = __mk_pte(address, pgprot);
- else if (parisc_text_address(vaddr)) {
- pte = __mk_pte(address, PAGE_KERNEL_EXEC);
- if (address >= ro_start && address < kernel_end)
- pte = pte_mkhuge(pte);
+ pgprot_t prot;
+ bool huge = false;
+
+ if (force) {
+ prot = pgprot;
+ } else if (address < kernel_start || address >= kernel_end) {
+ /* outside kernel memory */
+ prot = PAGE_KERNEL;
+ } else if (!kernel_set_to_readonly) {
+ /* still initializing, allow writing to RO memory */
+ prot = PAGE_KERNEL_RWX;
+ huge = true;
+ } else if (address >= ro_start) {
+ /* Code (ro) and Data areas */
+ prot = (address < ro_end) ?
+ PAGE_KERNEL_EXEC : PAGE_KERNEL;
+ huge = true;
+ } else {
+ prot = PAGE_KERNEL;
}
- else
-#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
- if (address >= ro_start && address < ro_end) {
- pte = __mk_pte(address, PAGE_KERNEL_EXEC);
+
+ pte = __mk_pte(address, prot);
+ if (huge)
pte = pte_mkhuge(pte);
- } else
-#endif
- {
- pte = __mk_pte(address, pgprot);
- if (address >= ro_start && address < kernel_end)
- pte = pte_mkhuge(pte);
- }
if (address >= end_paddr)
break;
@@ -495,7 +474,7 @@ static void __init map_pages(unsigned long start_vaddr,
void __init set_kernel_text_rw(int enable_read_write)
{
- unsigned long start = (unsigned long) _text;
+ unsigned long start = (unsigned long) __init_begin;
unsigned long end = (unsigned long) &data_start;
map_pages(start, __pa(start), end-start,
@@ -510,6 +489,12 @@ void __ref free_initmem(void)
{
unsigned long init_begin = (unsigned long)__init_begin;
unsigned long init_end = (unsigned long)__init_end;
+ unsigned long kernel_end = (unsigned long)&_end;
+
+ /* Remap kernel text and data, but do not touch init section yet. */
+ kernel_set_to_readonly = true;
+ map_pages(init_end, __pa(init_end), kernel_end - init_end,
+ PAGE_KERNEL, 0);
/* The init text pages are marked R-X. We have to
* flush the icache and mark them RW-
@@ -526,7 +511,7 @@ void __ref free_initmem(void)
PAGE_KERNEL, 1);
/* force the kernel to see the new TLB entries */
- __flush_tlb_range(0, init_begin, init_end);
+ __flush_tlb_range(0, init_begin, kernel_end);
/* finally dump all the instructions which were cached, since the
* pages are no-longer executable */
@@ -544,8 +529,9 @@ void mark_rodata_ro(void)
{
/* rodata memory was already mapped with KERNEL_RO access rights by
pagetable_init() and map_pages(). No need to do additional stuff here */
- printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
- (unsigned long)(__end_rodata - __start_rodata) >> 10);
+ unsigned long roai_size = __end_ro_after_init - __start_ro_after_init;
+
+ pr_info("Write protected read-only-after-init data: %luk\n", roai_size >> 10);
}
#endif
@@ -571,11 +557,11 @@ void mark_rodata_ro(void)
#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
& ~(VM_MAP_OFFSET-1)))
-void *parisc_vmalloc_start __read_mostly;
+void *parisc_vmalloc_start __ro_after_init;
EXPORT_SYMBOL(parisc_vmalloc_start);
#ifdef CONFIG_PA11
-unsigned long pcxl_dma_start __read_mostly;
+unsigned long pcxl_dma_start __ro_after_init;
#endif
void __init mem_init(void)
@@ -622,15 +608,19 @@ void __init mem_init(void)
* But keep code for debugging purposes.
*/
printk("virtual kernel memory layout:\n"
- " vmalloc : 0x%px - 0x%px (%4ld MB)\n"
- " memory : 0x%px - 0x%px (%4ld MB)\n"
- " .init : 0x%px - 0x%px (%4ld kB)\n"
- " .data : 0x%px - 0x%px (%4ld kB)\n"
- " .text : 0x%px - 0x%px (%4ld kB)\n",
+ " vmalloc : 0x%px - 0x%px (%4ld MB)\n"
+ " fixmap : 0x%px - 0x%px (%4ld kB)\n"
+ " memory : 0x%px - 0x%px (%4ld MB)\n"
+ " .init : 0x%px - 0x%px (%4ld kB)\n"
+ " .data : 0x%px - 0x%px (%4ld kB)\n"
+ " .text : 0x%px - 0x%px (%4ld kB)\n",
(void*)VMALLOC_START, (void*)VMALLOC_END,
(VMALLOC_END - VMALLOC_START) >> 20,
+ (void *)FIXMAP_START, (void *)(FIXMAP_START + FIXMAP_SIZE),
+ (unsigned long)(FIXMAP_SIZE / 1024),
+
__va(0), high_memory,
((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
@@ -645,7 +635,7 @@ void __init mem_init(void)
#endif
}
-unsigned long *empty_zero_page __read_mostly;
+unsigned long *empty_zero_page __ro_after_init;
EXPORT_SYMBOL(empty_zero_page);
/*
@@ -709,37 +699,46 @@ static void __init gateway_init(void)
PAGE_SIZE, PAGE_GATEWAY, 1);
}
-void __init paging_init(void)
+static void __init parisc_bootmem_free(void)
{
+ unsigned long zones_size[MAX_NR_ZONES] = { 0, };
+ unsigned long holes_size[MAX_NR_ZONES] = { 0, };
+ unsigned long mem_start_pfn = ~0UL, mem_end_pfn = 0, mem_size_pfn = 0;
int i;
+ for (i = 0; i < npmem_ranges; i++) {
+ unsigned long start = pmem_ranges[i].start_pfn;
+ unsigned long size = pmem_ranges[i].pages;
+ unsigned long end = start + size;
+
+ if (mem_start_pfn > start)
+ mem_start_pfn = start;
+ if (mem_end_pfn < end)
+ mem_end_pfn = end;
+ mem_size_pfn += size;
+ }
+
+ zones_size[0] = mem_end_pfn - mem_start_pfn;
+ holes_size[0] = zones_size[0] - mem_size_pfn;
+
+ free_area_init_node(0, zones_size, mem_start_pfn, holes_size);
+}
+
+void __init paging_init(void)
+{
setup_bootmem();
pagetable_init();
gateway_init();
flush_cache_all_local(); /* start with known state */
flush_tlb_all_local(NULL);
- for (i = 0; i < npmem_ranges; i++) {
- unsigned long zones_size[MAX_NR_ZONES] = { 0, };
-
- zones_size[ZONE_NORMAL] = pmem_ranges[i].pages;
-
-#ifdef CONFIG_DISCONTIGMEM
- /* Need to initialize the pfnnid_map before we can initialize
- the zone */
- {
- int j;
- for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT);
- j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT);
- j++) {
- pfnnid_map[j] = i;
- }
- }
-#endif
-
- free_area_init_node(i, zones_size,
- pmem_ranges[i].start_pfn, NULL);
- }
+ /*
+ * Mark all memblocks as present for sparsemem using
+ * memory_present() and then initialize sparsemem.
+ */
+ memblocks_present();
+ sparse_init();
+ parisc_bootmem_free();
}
#ifdef CONFIG_PA20
@@ -921,10 +920,3 @@ void flush_tlb_all(void)
spin_unlock(&sid_lock);
}
#endif
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- free_reserved_area((void *)start, (void *)end, -1, "initrd");
-}
-#endif
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2d0be82c3061..8c1c636308c8 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -103,13 +103,6 @@ config LOCKDEP_SUPPORT
bool
default y
-config RWSEM_GENERIC_SPINLOCK
- bool
-
-config RWSEM_XCHGADD_ALGORITHM
- bool
- default y
-
config GENERIC_LOCKBREAK
bool
default y
@@ -132,6 +125,7 @@ config PPC
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV
+ select ARCH_HAS_MMIOWB if PPC64
select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_PMEM_API if PPC64
select ARCH_HAS_PTE_SPECIAL
@@ -143,6 +137,7 @@ config PPC
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAS_ZONE_DEVICE if PPC_BOOK3S_64
select ARCH_HAVE_NMI_SAFE_CMPXCHG
+ select ARCH_KEEP_MEMBLOCK
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
@@ -173,6 +168,7 @@ config PPC
select GENERIC_TIME_VSYSCALL
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL
+ select HAVE_ARCH_KASAN if PPC32
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
@@ -218,6 +214,8 @@ config PPC
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE if SMP
+ select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
+ select HAVE_MMU_GATHER_PAGE_SIZE
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
select HAVE_SYSCALL_TRACEPOINTS
@@ -318,6 +316,10 @@ config ARCH_SUSPEND_POSSIBLE
(PPC_85xx && !PPC_E500MC) || PPC_86xx || PPC_PSERIES \
|| 44x || 40x
+config ARCH_SUSPEND_NONZERO_CPU
+ def_bool y
+ depends on PPC_POWERNV || PPC_PSERIES
+
config PPC_DCR_NATIVE
bool
@@ -375,7 +377,6 @@ config ZONE_DMA
config PGTABLE_LEVELS
int
default 2 if !PPC64
- default 3 if PPC_64K_PAGES && !PPC_BOOK3S_64
default 4
source "arch/powerpc/sysdev/Kconfig"
@@ -391,7 +392,7 @@ source "kernel/Kconfig.hz"
config HUGETLB_PAGE_SIZE_VARIABLE
bool
- depends on HUGETLB_PAGE
+ depends on HUGETLB_PAGE && PPC_BOOK3S_64
default y
config MATH_EMULATION
@@ -832,9 +833,9 @@ config CMDLINE_BOOL
bool "Default bootloader kernel arguments"
config CMDLINE
- string "Initial kernel command string"
- depends on CMDLINE_BOOL
- default "console=ttyS0,9600 console=tty0 root=/dev/sda2"
+ string "Initial kernel command string" if CMDLINE_BOOL
+ default "console=ttyS0,9600 console=tty0 root=/dev/sda2" if CMDLINE_BOOL
+ default ""
help
On some platforms, there is currently no way for the boot loader to
pass arguments to the kernel. For these platforms, you can supply
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 4e00cb0a5464..c59920920ddc 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -117,6 +117,14 @@ config XMON_DISASSEMBLY
to say Y here, unless you're building for a memory-constrained
system.
+config XMON_DEFAULT_RO_MODE
+ bool "Restrict xmon to read-only operations by default"
+ depends on XMON
+ default y
+ help
+ Operate xmon in read-only mode. The cmdline options 'xmon=rw' and
+ 'xmon=ro' override this default.
+
config DEBUGGER
bool
depends on KGDB || XMON
@@ -361,8 +369,32 @@ config PPC_PTDUMP
If you are unsure, say N.
+config PPC_DEBUG_WX
+ bool "Warn on W+X mappings at boot"
+ depends on PPC_PTDUMP
+ help
+ Generate a warning if any W+X mappings are found at boot.
+
+ This is useful for discovering cases where the kernel is leaving
+ W+X mappings after applying NX, as such mappings are a security risk.
+
+ Note that even if the check fails, your kernel is possibly
+ still fine, as W+X mappings are not a security hole in
+ themselves, what they do is that they make the exploitation
+ of other unfixed kernel bugs easier.
+
+ There is no runtime or memory usage effect of this option
+ once the kernel has booted up - it's a one time check.
+
+ If in doubt, say "Y".
+
config PPC_FAST_ENDIAN_SWITCH
bool "Deprecated fast endian-switch syscall"
depends on DEBUG_KERNEL && PPC_BOOK3S_64
help
If you're unsure what this is, say N.
+
+config KASAN_SHADOW_OFFSET
+ hex
+ depends on KASAN
+ default 0xe0000000
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 7de49889bd5d..c345b79414a9 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -34,11 +34,10 @@ ifdef CONFIG_PPC_BOOK3S_32
KBUILD_CFLAGS += -mcpu=powerpc
endif
-ifeq ($(CROSS_COMPILE),)
-KBUILD_DEFCONFIG := $(shell uname -m)_defconfig
-else
-KBUILD_DEFCONFIG := ppc64_defconfig
-endif
+# If we're on a ppc/ppc64/ppc64le machine use that defconfig, otherwise just use
+# ppc64_defconfig because we have nothing better to go on.
+uname := $(shell uname -m)
+KBUILD_DEFCONFIG := $(if $(filter ppc%,$(uname)),$(uname),ppc64)_defconfig
ifdef CONFIG_PPC64
new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi)
@@ -212,7 +211,7 @@ endif
asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1)
-KBUILD_CPPFLAGS += -Iarch/$(ARCH) $(asinstr)
+KBUILD_CPPFLAGS += -I $(srctree)/arch/$(ARCH) $(asinstr)
KBUILD_AFLAGS += $(AFLAGS-y)
KBUILD_CFLAGS += $(call cc-option,-msoft-float)
KBUILD_CFLAGS += -pipe $(CFLAGS-y)
@@ -367,6 +366,10 @@ ppc32_allmodconfig:
$(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/book3s_32.config \
-f $(srctree)/Makefile allmodconfig
+PHONY += ppc_defconfig
+ppc_defconfig:
+ $(call merge_into_defconfig,book3s_32.config,)
+
PHONY += ppc64le_allmodconfig
ppc64le_allmodconfig:
$(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/le.config \
@@ -406,7 +409,9 @@ vdso_install:
ifdef CONFIG_PPC64
$(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@
endif
+ifdef CONFIG_VDSO32
$(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@
+endif
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/powerpc/boot/addnote.c b/arch/powerpc/boot/addnote.c
index 9d9f6f334d3c..3da3e2b1b51b 100644
--- a/arch/powerpc/boot/addnote.c
+++ b/arch/powerpc/boot/addnote.c
@@ -223,7 +223,11 @@ main(int ac, char **av)
PUT_16(E_PHNUM, np + 2);
/* write back */
- lseek(fd, (long) 0, SEEK_SET);
+ i = lseek(fd, (long) 0, SEEK_SET);
+ if (i < 0) {
+ perror("lseek");
+ exit(1);
+ }
i = write(fd, buf, n);
if (i < 0) {
perror("write");
diff --git a/arch/powerpc/boot/dts/fsl/b4qds.dtsi b/arch/powerpc/boot/dts/fsl/b4qds.dtsi
index 999efd3bc167..05be919f3545 100644
--- a/arch/powerpc/boot/dts/fsl/b4qds.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4qds.dtsi
@@ -40,6 +40,7 @@
interrupt-parent = <&mpic>;
aliases {
+ crypto = &crypto;
phy_sgmii_10 = &phy_sgmii_10;
phy_sgmii_11 = &phy_sgmii_11;
phy_sgmii_1c = &phy_sgmii_1c;
diff --git a/arch/powerpc/configs/40x/kilauea_defconfig b/arch/powerpc/configs/40x/kilauea_defconfig
index b5cc7426c21f..3da091f651d6 100644
--- a/arch/powerpc/configs/40x/kilauea_defconfig
+++ b/arch/powerpc/configs/40x/kilauea_defconfig
@@ -33,7 +33,7 @@ CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_NDFC=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
diff --git a/arch/powerpc/configs/40x/obs600_defconfig b/arch/powerpc/configs/40x/obs600_defconfig
index aac06d2ad01a..38d3d7769a2f 100644
--- a/arch/powerpc/configs/40x/obs600_defconfig
+++ b/arch/powerpc/configs/40x/obs600_defconfig
@@ -33,7 +33,7 @@ CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_NDFC=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
diff --git a/arch/powerpc/configs/44x/canyonlands_defconfig b/arch/powerpc/configs/44x/canyonlands_defconfig
index c8e6f048a122..d427cee027a6 100644
--- a/arch/powerpc/configs/44x/canyonlands_defconfig
+++ b/arch/powerpc/configs/44x/canyonlands_defconfig
@@ -32,7 +32,7 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_NDFC=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
diff --git a/arch/powerpc/configs/44x/eiger_defconfig b/arch/powerpc/configs/44x/eiger_defconfig
index f6dc23fef683..f593258806ad 100644
--- a/arch/powerpc/configs/44x/eiger_defconfig
+++ b/arch/powerpc/configs/44x/eiger_defconfig
@@ -33,7 +33,7 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_NDFC=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
diff --git a/arch/powerpc/configs/44x/sequoia_defconfig b/arch/powerpc/configs/44x/sequoia_defconfig
index 1e04122912f3..f34fee9464e5 100644
--- a/arch/powerpc/configs/44x/sequoia_defconfig
+++ b/arch/powerpc/configs/44x/sequoia_defconfig
@@ -33,7 +33,7 @@ CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_NDFC=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
diff --git a/arch/powerpc/configs/44x/warp_defconfig b/arch/powerpc/configs/44x/warp_defconfig
index 6c02f53271cd..6ae88d4879bf 100644
--- a/arch/powerpc/configs/44x/warp_defconfig
+++ b/arch/powerpc/configs/44x/warp_defconfig
@@ -34,7 +34,7 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_NDFC=y
CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
index 1f69f4edf074..9dffb2e7f735 100644
--- a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
@@ -31,7 +31,7 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
index 797fc3ffddee..a42232732c6d 100644
--- a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
@@ -31,7 +31,7 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
diff --git a/arch/powerpc/configs/85xx-hw.config b/arch/powerpc/configs/85xx-hw.config
index c03d0fb16665..9575a38c9155 100644
--- a/arch/powerpc/configs/85xx-hw.config
+++ b/arch/powerpc/configs/85xx-hw.config
@@ -71,7 +71,7 @@ CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_M25P80=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_PHYSMAP=y
CONFIG_MTD_PLATRAM=y
diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
index dd98f43b2fb8..d70b60314dad 100644
--- a/arch/powerpc/configs/85xx/ge_imp3a_defconfig
+++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
@@ -73,7 +73,7 @@ CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
diff --git a/arch/powerpc/configs/85xx/socrates_defconfig b/arch/powerpc/configs/85xx/socrates_defconfig
index 6106fadbbd8b..7037a6d8018c 100644
--- a/arch/powerpc/configs/85xx/socrates_defconfig
+++ b/arch/powerpc/configs/85xx/socrates_defconfig
@@ -31,7 +31,7 @@ CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_SOCRATES=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/powerpc/configs/85xx/tqm8548_defconfig b/arch/powerpc/configs/85xx/tqm8548_defconfig
index 2697e4e8a761..1c63cbdc3211 100644
--- a/arch/powerpc/configs/85xx/tqm8548_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8548_defconfig
@@ -35,8 +35,8 @@ CONFIG_MTD=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND_ECC_SMC=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_FSL_UPM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
index 6531139a8a8d..78f5beb2928c 100644
--- a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
+++ b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
@@ -65,7 +65,7 @@ CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_CFI_STAA=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_MTD_NAND_FSL_UPM=y
CONFIG_BLK_DEV_LOOP=y
diff --git a/arch/powerpc/configs/86xx-hw.config b/arch/powerpc/configs/86xx-hw.config
index d3dd6b8865c0..151164cf8cb3 100644
--- a/arch/powerpc/configs/86xx-hw.config
+++ b/arch/powerpc/configs/86xx-hw.config
@@ -47,7 +47,7 @@ CONFIG_MTD_CFI=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_NAND_FSL_ELBC=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_NETDEVICES=y
CONFIG_NET_TULIP=y
diff --git a/arch/powerpc/configs/mpc512x_defconfig b/arch/powerpc/configs/mpc512x_defconfig
index e4bfb1101c0e..e4bf8aa87e60 100644
--- a/arch/powerpc/configs/mpc512x_defconfig
+++ b/arch/powerpc/configs/mpc512x_defconfig
@@ -46,7 +46,7 @@ CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_ROM=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_MPC5121_NFC=y
CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig
index d1b82035d35f..005d00020fb9 100644
--- a/arch/powerpc/configs/mpc83xx_defconfig
+++ b/arch/powerpc/configs/mpc83xx_defconfig
@@ -46,7 +46,7 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index 6daa56f8895c..c0423b2cf7c0 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -51,7 +51,7 @@ CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_SLRAM=y
CONFIG_MTD_PHRAM=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_PASEMI=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index 66dd6bf45cde..db48039e0b11 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -44,7 +44,7 @@ CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=m
+CONFIG_MTD_RAW_NAND=m
CONFIG_MTD_NAND_NDFC=m
CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_GLUEBI=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index ea79c519863d..62e12f61a3b2 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -217,6 +217,7 @@ CONFIG_USB_MON=m
CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_EHCI_HCD_PPC_OF is not set
CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_XHCI_HCD=y
CONFIG_USB_STORAGE=m
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=m
diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
index 1bcd468ab422..a887616e35a2 100644
--- a/arch/powerpc/configs/skiroot_defconfig
+++ b/arch/powerpc/configs/skiroot_defconfig
@@ -163,6 +163,8 @@ CONFIG_S2IO=m
CONFIG_MLX4_EN=m
# CONFIG_MLX4_CORE_GEN2 is not set
CONFIG_MLX5_CORE=m
+CONFIG_MLX5_CORE_EN=y
+# CONFIG_MLX5_EN_RXNFC is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROSEMI is not set
CONFIG_MYRI10GE=m
diff --git a/arch/powerpc/crypto/crc-vpmsum_test.c b/arch/powerpc/crypto/crc-vpmsum_test.c
index 0153a9c6f4af..98ea4f4d3dde 100644
--- a/arch/powerpc/crypto/crc-vpmsum_test.c
+++ b/arch/powerpc/crypto/crc-vpmsum_test.c
@@ -78,16 +78,12 @@ static int __init crc_test_init(void)
pr_info("crc-vpmsum_test begins, %lu iterations\n", iterations);
for (i=0; i<iterations; i++) {
- size_t len, offset;
+ size_t offset = prandom_u32_max(16);
+ size_t len = prandom_u32_max(MAX_CRC_LENGTH);
- get_random_bytes(data, MAX_CRC_LENGTH);
- get_random_bytes(&len, sizeof(len));
- get_random_bytes(&offset, sizeof(offset));
-
- len %= MAX_CRC_LENGTH;
- offset &= 15;
if (len <= offset)
continue;
+ prandom_bytes(data, len);
len -= offset;
crypto_shash_update(crct10dif_shash, data+offset, len);
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
index fd1d6c83f0c0..c4fa242dd652 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -1,10 +1,12 @@
#include <linux/crc32.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/cpufeature.h>
+#include <asm/simd.h>
#include <asm/switch_to.h>
#define CHKSUM_BLOCK_SIZE 1
@@ -22,7 +24,7 @@ static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len)
unsigned int prealign;
unsigned int tail;
- if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || in_interrupt())
+ if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || !crypto_simd_usable())
return __crc32c_le(crc, p, len);
if ((unsigned long)p & VMX_ALIGN_MASK) {
diff --git a/arch/powerpc/crypto/crct10dif-vpmsum_glue.c b/arch/powerpc/crypto/crct10dif-vpmsum_glue.c
index 02ea277863d1..e27ff16573b5 100644
--- a/arch/powerpc/crypto/crct10dif-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crct10dif-vpmsum_glue.c
@@ -12,11 +12,13 @@
#include <linux/crc-t10dif.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/cpufeature.h>
+#include <asm/simd.h>
#include <asm/switch_to.h>
#define VMX_ALIGN 16
@@ -32,7 +34,7 @@ static u16 crct10dif_vpmsum(u16 crci, unsigned char const *p, size_t len)
unsigned int tail;
u32 crc = crci;
- if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || in_interrupt())
+ if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) || !crypto_simd_usable())
return crc_t10dif_generic(crc, p, len);
if ((unsigned long)p & VMX_ALIGN_MASK) {
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index a0c132bedfae..b9f6e72bf4e5 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -8,6 +8,6 @@ generic-y += irq_regs.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
-generic-y += rwsem.h
generic-y += vtime.h
generic-y += msi.h
+generic-y += simd.h
diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
new file mode 100644
index 000000000000..677e9babef80
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/32/kup.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_32_KUP_H
+#define _ASM_POWERPC_BOOK3S_32_KUP_H
+
+#include <asm/book3s/32/mmu-hash.h>
+
+#ifdef __ASSEMBLY__
+
+.macro kuep_update_sr gpr1, gpr2 /* NEVER use r0 as gpr2 due to addis */
+101: mtsrin \gpr1, \gpr2
+ addi \gpr1, \gpr1, 0x111 /* next VSID */
+ rlwinm \gpr1, \gpr1, 0, 0xf0ffffff /* clear VSID overflow */
+ addis \gpr2, \gpr2, 0x1000 /* address of next segment */
+ bdnz 101b
+ isync
+.endm
+
+.macro kuep_lock gpr1, gpr2
+#ifdef CONFIG_PPC_KUEP
+ li \gpr1, NUM_USER_SEGMENTS
+ li \gpr2, 0
+ mtctr \gpr1
+ mfsrin \gpr1, \gpr2
+ oris \gpr1, \gpr1, SR_NX@h /* set Nx */
+ kuep_update_sr \gpr1, \gpr2
+#endif
+.endm
+
+.macro kuep_unlock gpr1, gpr2
+#ifdef CONFIG_PPC_KUEP
+ li \gpr1, NUM_USER_SEGMENTS
+ li \gpr2, 0
+ mtctr \gpr1
+ mfsrin \gpr1, \gpr2
+ rlwinm \gpr1, \gpr1, 0, ~SR_NX /* Clear Nx */
+ kuep_update_sr \gpr1, \gpr2
+#endif
+.endm
+
+#ifdef CONFIG_PPC_KUAP
+
+.macro kuap_update_sr gpr1, gpr2, gpr3 /* NEVER use r0 as gpr2 due to addis */
+101: mtsrin \gpr1, \gpr2
+ addi \gpr1, \gpr1, 0x111 /* next VSID */
+ rlwinm \gpr1, \gpr1, 0, 0xf0ffffff /* clear VSID overflow */
+ addis \gpr2, \gpr2, 0x1000 /* address of next segment */
+ cmplw \gpr2, \gpr3
+ blt- 101b
+ isync
+.endm
+
+.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3
+ lwz \gpr2, KUAP(\thread)
+ rlwinm. \gpr3, \gpr2, 28, 0xf0000000
+ stw \gpr2, STACK_REGS_KUAP(\sp)
+ beq+ 102f
+ li \gpr1, 0
+ stw \gpr1, KUAP(\thread)
+ mfsrin \gpr1, \gpr2
+ oris \gpr1, \gpr1, SR_KS@h /* set Ks */
+ kuap_update_sr \gpr1, \gpr2, \gpr3
+102:
+.endm
+
+.macro kuap_restore sp, current, gpr1, gpr2, gpr3
+ lwz \gpr2, STACK_REGS_KUAP(\sp)
+ rlwinm. \gpr3, \gpr2, 28, 0xf0000000
+ stw \gpr2, THREAD + KUAP(\current)
+ beq+ 102f
+ mfsrin \gpr1, \gpr2
+ rlwinm \gpr1, \gpr1, 0, ~SR_KS /* Clear Ks */
+ kuap_update_sr \gpr1, \gpr2, \gpr3
+102:
+.endm
+
+.macro kuap_check current, gpr
+#ifdef CONFIG_PPC_KUAP_DEBUG
+ lwz \gpr2, KUAP(thread)
+999: twnei \gpr, 0
+ EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
+#endif
+.endm
+
+#endif /* CONFIG_PPC_KUAP */
+
+#else /* !__ASSEMBLY__ */
+
+#ifdef CONFIG_PPC_KUAP
+
+#include <linux/sched.h>
+
+static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
+{
+ barrier(); /* make sure thread.kuap is updated before playing with SRs */
+ while (addr < end) {
+ mtsrin(sr, addr);
+ sr += 0x111; /* next VSID */
+ sr &= 0xf0ffffff; /* clear VSID overflow */
+ addr += 0x10000000; /* address of next segment */
+ }
+ isync(); /* Context sync required after mtsrin() */
+}
+
+static inline void allow_user_access(void __user *to, const void __user *from, u32 size)
+{
+ u32 addr, end;
+
+ if (__builtin_constant_p(to) && to == NULL)
+ return;
+
+ addr = (__force u32)to;
+
+ if (!addr || addr >= TASK_SIZE || !size)
+ return;
+
+ end = min(addr + size, TASK_SIZE);
+ current->thread.kuap = (addr & 0xf0000000) | ((((end - 1) >> 28) + 1) & 0xf);
+ kuap_update_sr(mfsrin(addr) & ~SR_KS, addr, end); /* Clear Ks */
+}
+
+static inline void prevent_user_access(void __user *to, const void __user *from, u32 size)
+{
+ u32 addr = (__force u32)to;
+ u32 end = min(addr + size, TASK_SIZE);
+
+ if (!addr || addr >= TASK_SIZE || !size)
+ return;
+
+ current->thread.kuap = 0;
+ kuap_update_sr(mfsrin(addr) | SR_KS, addr, end); /* set Ks */
+}
+
+static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
+{
+ if (!is_write)
+ return false;
+
+ return WARN(!regs->kuap, "Bug: write fault blocked by segment registers !");
+}
+
+#endif /* CONFIG_PPC_KUAP */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_BOOK3S_32_KUP_H */
diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
index 5cb588395fdc..2e277ca0170f 100644
--- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
@@ -10,8 +10,6 @@
* BATs
*/
-#include <asm/page.h>
-
/* Block size masks */
#define BL_128K 0x000
#define BL_256K 0x001
@@ -49,8 +47,6 @@ struct ppc_bat {
u32 batu;
u32 batl;
};
-
-typedef pte_t *pgtable_t;
#endif /* !__ASSEMBLY__ */
/*
@@ -63,6 +59,11 @@ typedef pte_t *pgtable_t;
#define PP_RWRW 2 /* Supervisor read/write, User read/write */
#define PP_RXRX 3 /* Supervisor read, User read */
+/* Values for Segment Registers */
+#define SR_NX 0x10000000 /* No Execute */
+#define SR_KP 0x20000000 /* User key */
+#define SR_KS 0x40000000 /* Supervisor key */
+
#ifndef __ASSEMBLY__
/*
diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h
index 3633502e102c..998317702630 100644
--- a/arch/powerpc/include/asm/book3s/32/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
@@ -5,28 +5,6 @@
#include <linux/threads.h>
#include <linux/slab.h>
-/*
- * Functions that deal with pagetables that could be at any level of
- * the table need to be passed an "index_size" so they know how to
- * handle allocation. For PTE pages (which are linked to a struct
- * page for now, and drawn from the main get_free_pages() pool), the
- * allocation size will be (2^index_size * sizeof(pointer)) and
- * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
- *
- * The maximum index size needs to be big enough to allow any
- * pagetable sizes we need, but small enough to fit in the low bits of
- * any page table pointer. In other words all pagetables, even tiny
- * ones, must be aligned to allow at least enough low 0 bits to
- * contain this value. This value is also used as a mask, so it must
- * be one less than a power of two.
- */
-#define MAX_PGTABLE_INDEX_SIZE 0xf
-
-extern void __bad_pte(pmd_t *pmd);
-
-extern struct kmem_cache *pgtable_cache[];
-#define PGT_CACHE(shift) pgtable_cache[shift]
-
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
@@ -59,24 +37,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
*pmdp = __pmd(__pa(pte_page) | _PMD_PRESENT);
}
-#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
-
-extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
-extern pgtable_t pte_alloc_one(struct mm_struct *mm);
-void pte_frag_destroy(void *pte_frag);
-pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel);
-void pte_fragment_free(unsigned long *table, int kernel);
-
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- pte_fragment_free((unsigned long *)pte, 1);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
-{
- pte_fragment_free((unsigned long *)ptepage, 0);
-}
-
static inline void pgtable_free(void *table, unsigned index_size)
{
if (!index_size) {
@@ -87,7 +47,6 @@ static inline void pgtable_free(void *table, unsigned index_size)
}
}
-#define check_pgt_cache() do { } while (0)
#define get_hugepd_cache_index(x) (x)
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index aa8406b8f7ba..838de59f6754 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -134,15 +134,24 @@ static inline bool pte_user(pte_t pte)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+
+#ifndef __ASSEMBLY__
+
+int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
+
+#endif /* !__ASSEMBLY__ */
+
/*
* This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
* value (for now) on others, from where we can start layout kernel
* virtual space that goes below PKMAP and FIXMAP
*/
+#include <asm/fixmap.h>
+
#ifdef CONFIG_HIGHMEM
#define KVIRT_TOP PKMAP_BASE
#else
-#define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */
+#define KVIRT_TOP FIXADDR_START
#endif
/*
@@ -373,8 +382,6 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
-int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
-
/* Generic accessors to PTE bits */
static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
static inline int pte_read(pte_t pte) { return 1; }
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index cf5ba5254299..8fd8599c9395 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -2,10 +2,10 @@
#ifndef _ASM_POWERPC_BOOK3S_64_HASH_4K_H
#define _ASM_POWERPC_BOOK3S_64_HASH_4K_H
-#define H_PTE_INDEX_SIZE 9
-#define H_PMD_INDEX_SIZE 7
-#define H_PUD_INDEX_SIZE 9
-#define H_PGD_INDEX_SIZE 9
+#define H_PTE_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps: 2^9 x 4KB = 2MB
+#define H_PMD_INDEX_SIZE 7 // size: 8B << 7 = 1KB, maps: 2^7 x 2MB = 256MB
+#define H_PUD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps: 2^9 x 256MB = 128GB
+#define H_PGD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps: 2^9 x 128GB = 64TB
/*
* Each context is 512TB. But on 4k we restrict our max TASK size to 64TB
@@ -13,6 +13,21 @@
*/
#define MAX_EA_BITS_PER_CONTEXT 46
+#define REGION_SHIFT (MAX_EA_BITS_PER_CONTEXT - 2)
+
+/*
+ * Our page table limit us to 64TB. Hence for the kernel mapping,
+ * each MAP area is limited to 16 TB.
+ * The four map areas are: linear mapping, vmap, IO and vmemmap
+ */
+#define H_KERN_MAP_SIZE (ASM_CONST(1) << REGION_SHIFT)
+
+/*
+ * Define the address range of the kernel non-linear virtual area
+ * 16TB
+ */
+#define H_KERN_VIRT_START ASM_CONST(0xc000100000000000)
+
#ifndef __ASSEMBLY__
#define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
#define H_PMD_TABLE_SIZE (sizeof(pmd_t) << H_PMD_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index f82ee8a3b561..d1d9177d9ebd 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -2,16 +2,29 @@
#ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H
#define _ASM_POWERPC_BOOK3S_64_HASH_64K_H
-#define H_PTE_INDEX_SIZE 8
-#define H_PMD_INDEX_SIZE 10
-#define H_PUD_INDEX_SIZE 10
-#define H_PGD_INDEX_SIZE 8
+#define H_PTE_INDEX_SIZE 8 // size: 8B << 8 = 2KB, maps 2^8 x 64KB = 16MB
+#define H_PMD_INDEX_SIZE 10 // size: 8B << 10 = 8KB, maps 2^10 x 16MB = 16GB
+#define H_PUD_INDEX_SIZE 10 // size: 8B << 10 = 8KB, maps 2^10 x 16GB = 16TB
+#define H_PGD_INDEX_SIZE 8 // size: 8B << 8 = 2KB, maps 2^8 x 16TB = 4PB
+
/*
* Each context is 512TB size. SLB miss for first context/default context
* is handled in the hotpath.
*/
#define MAX_EA_BITS_PER_CONTEXT 49
+#define REGION_SHIFT MAX_EA_BITS_PER_CONTEXT
+
+/*
+ * We use one context for each MAP area.
+ */
+#define H_KERN_MAP_SIZE (1UL << MAX_EA_BITS_PER_CONTEXT)
+
+/*
+ * Define the address range of the kernel non-linear virtual area
+ * 2PB
+ */
+#define H_KERN_VIRT_START ASM_CONST(0xc008000000000000)
/*
* 64k aligned address free up few of the lower bits of RPN for us
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index 54b7af6cd27f..2781ebf6add4 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -29,6 +29,10 @@
#define H_PGTABLE_EADDR_SIZE (H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \
H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
#define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
+/*
+ * Top 2 bits are ignored in page table walk.
+ */
+#define EA_MASK (~(0xcUL << 60))
/*
* We store the slot details in the second half of page table.
@@ -42,59 +46,64 @@
#endif
/*
- * Define the address range of the kernel non-linear virtual area. In contrast
- * to the linear mapping, this is managed using the kernel page tables and then
- * inserted into the hash page table to actually take effect, similarly to user
- * mappings.
+ * +------------------------------+
+ * | |
+ * | |
+ * | |
+ * +------------------------------+ Kernel virtual map end (0xc00e000000000000)
+ * | |
+ * | |
+ * | 512TB/16TB of vmemmap |
+ * | |
+ * | |
+ * +------------------------------+ Kernel vmemmap start
+ * | |
+ * | 512TB/16TB of IO map |
+ * | |
+ * +------------------------------+ Kernel IO map start
+ * | |
+ * | 512TB/16TB of vmap |
+ * | |
+ * +------------------------------+ Kernel virt start (0xc008000000000000)
+ * | |
+ * | |
+ * | |
+ * +------------------------------+ Kernel linear (0xc.....)
*/
-#define H_KERN_VIRT_START ASM_CONST(0xD000000000000000)
-/*
- * Allow virtual mapping of one context size.
- * 512TB for 64K page size
- * 64TB for 4K page size
- */
-#define H_KERN_VIRT_SIZE (1UL << MAX_EA_BITS_PER_CONTEXT)
+#define H_VMALLOC_START H_KERN_VIRT_START
+#define H_VMALLOC_SIZE H_KERN_MAP_SIZE
+#define H_VMALLOC_END (H_VMALLOC_START + H_VMALLOC_SIZE)
-/*
- * 8TB IO mapping size
- */
-#define H_KERN_IO_SIZE ASM_CONST(0x80000000000) /* 8T */
+#define H_KERN_IO_START H_VMALLOC_END
+#define H_KERN_IO_SIZE H_KERN_MAP_SIZE
+#define H_KERN_IO_END (H_KERN_IO_START + H_KERN_IO_SIZE)
-/*
- * The vmalloc space starts at the beginning of the kernel non-linear virtual
- * region, and occupies 504T (64K) or 56T (4K)
- */
-#define H_VMALLOC_START H_KERN_VIRT_START
-#define H_VMALLOC_SIZE (H_KERN_VIRT_SIZE - H_KERN_IO_SIZE)
-#define H_VMALLOC_END (H_VMALLOC_START + H_VMALLOC_SIZE)
+#define H_VMEMMAP_START H_KERN_IO_END
+#define H_VMEMMAP_SIZE H_KERN_MAP_SIZE
+#define H_VMEMMAP_END (H_VMEMMAP_START + H_VMEMMAP_SIZE)
-#define H_KERN_IO_START H_VMALLOC_END
+#define NON_LINEAR_REGION_ID(ea) ((((unsigned long)ea - H_KERN_VIRT_START) >> REGION_SHIFT) + 2)
/*
* Region IDs
*/
-#define REGION_SHIFT 60UL
-#define REGION_MASK (0xfUL << REGION_SHIFT)
-#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
-
-#define VMALLOC_REGION_ID (REGION_ID(H_VMALLOC_START))
-#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
-#define VMEMMAP_REGION_ID (0xfUL) /* Server only */
-#define USER_REGION_ID (0UL)
+#define USER_REGION_ID 0
+#define LINEAR_MAP_REGION_ID 1
+#define VMALLOC_REGION_ID NON_LINEAR_REGION_ID(H_VMALLOC_START)
+#define IO_REGION_ID NON_LINEAR_REGION_ID(H_KERN_IO_START)
+#define VMEMMAP_REGION_ID NON_LINEAR_REGION_ID(H_VMEMMAP_START)
+#define INVALID_REGION_ID (VMEMMAP_REGION_ID + 1)
/*
* Defines the address of the vmemap area, in its own region on
* hash table CPUs.
*/
-#define H_VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT)
-
#ifdef CONFIG_PPC_MM_SLICES
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#endif /* CONFIG_PPC_MM_SLICES */
-
/* PTEIDX nibble */
#define _PTEIDX_SECONDARY 0x8
#define _PTEIDX_GROUP_IX 0x7
@@ -103,6 +112,26 @@
#define H_PUD_BAD_BITS (PMD_TABLE_SIZE-1)
#ifndef __ASSEMBLY__
+static inline int get_region_id(unsigned long ea)
+{
+ int region_id;
+ int id = (ea >> 60UL);
+
+ if (id == 0)
+ return USER_REGION_ID;
+
+ if (id != (PAGE_OFFSET >> 60))
+ return INVALID_REGION_ID;
+
+ if (ea < H_KERN_VIRT_START)
+ return LINEAR_MAP_REGION_ID;
+
+ BUILD_BUG_ON(NON_LINEAR_REGION_ID(H_VMALLOC_START) != 2);
+
+ region_id = NON_LINEAR_REGION_ID(ea);
+ return region_id;
+}
+
#define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS)
#define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS)
static inline int hash__pgd_bad(pgd_t pgd)
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
index ec2a55a553c7..12e150e615b7 100644
--- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -36,8 +36,8 @@ static inline int hstate_get_psize(struct hstate *hstate)
}
}
-#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
-static inline bool gigantic_page_supported(void)
+#define __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED
+static inline bool gigantic_page_runtime_supported(void)
{
/*
* We used gigantic page reservation with hypervisor assist in some case.
@@ -49,7 +49,6 @@ static inline bool gigantic_page_supported(void)
return true;
}
-#endif
/* hugepd entry valid bit */
#define HUGEPD_VAL_BITS (0x8000000000000000UL)
@@ -62,4 +61,76 @@ extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t old_pte, pte_t new_pte);
+/*
+ * This should work for other subarchs too. But right now we use the
+ * new format only for 64bit book3s
+ */
+static inline pte_t *hugepd_page(hugepd_t hpd)
+{
+ BUG_ON(!hugepd_ok(hpd));
+ /*
+ * We have only four bits to encode, MMU page size
+ */
+ BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
+ return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
+}
+
+static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
+{
+ return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
+}
+
+static inline unsigned int hugepd_shift(hugepd_t hpd)
+{
+ return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
+}
+static inline void flush_hugetlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+ if (radix_enabled())
+ return radix__flush_hugetlb_page(vma, vmaddr);
+}
+
+static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
+ unsigned int pdshift)
+{
+ unsigned long idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
+
+ return hugepd_page(hpd) + idx;
+}
+
+static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
+{
+ *hpdp = __hugepd(__pa(new) | HUGEPD_VAL_BITS | (shift_to_mmu_psize(pshift) << 2));
+}
+
+void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+
+static inline int check_and_get_huge_psize(int shift)
+{
+ int mmu_psize;
+
+ if (shift > SLICE_HIGH_SHIFT)
+ return -EINVAL;
+
+ mmu_psize = shift_to_mmu_psize(shift);
+
+ /*
+ * We need to make sure that for different page sizes reported by
+ * firmware we only add hugetlb support for page sizes that can be
+ * supported by linux page table layout.
+ * For now we have
+ * Radix: 2M and 1G
+ * Hash: 16M and 16G
+ */
+ if (radix_enabled()) {
+ if (mmu_psize != MMU_PAGE_2M && mmu_psize != MMU_PAGE_1G)
+ return -EINVAL;
+ } else {
+ if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G)
+ return -EINVAL;
+ }
+ return mmu_psize;
+}
+
#endif
diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h
new file mode 100644
index 000000000000..f254de956d6a
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
+#define _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
+
+#include <linux/const.h>
+
+#define AMR_KUAP_BLOCK_READ UL(0x4000000000000000)
+#define AMR_KUAP_BLOCK_WRITE UL(0x8000000000000000)
+#define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
+#define AMR_KUAP_SHIFT 62
+
+#ifdef __ASSEMBLY__
+
+.macro kuap_restore_amr gpr
+#ifdef CONFIG_PPC_KUAP
+ BEGIN_MMU_FTR_SECTION_NESTED(67)
+ ld \gpr, STACK_REGS_KUAP(r1)
+ mtspr SPRN_AMR, \gpr
+ END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
+#endif
+.endm
+
+.macro kuap_check_amr gpr1, gpr2
+#ifdef CONFIG_PPC_KUAP_DEBUG
+ BEGIN_MMU_FTR_SECTION_NESTED(67)
+ mfspr \gpr1, SPRN_AMR
+ li \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT)
+ sldi \gpr2, \gpr2, AMR_KUAP_SHIFT
+999: tdne \gpr1, \gpr2
+ EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
+ END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
+#endif
+.endm
+
+.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
+#ifdef CONFIG_PPC_KUAP
+ BEGIN_MMU_FTR_SECTION_NESTED(67)
+ .ifnb \msr_pr_cr
+ bne \msr_pr_cr, 99f
+ .endif
+ mfspr \gpr1, SPRN_AMR
+ std \gpr1, STACK_REGS_KUAP(r1)
+ li \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT)
+ sldi \gpr2, \gpr2, AMR_KUAP_SHIFT
+ cmpd \use_cr, \gpr1, \gpr2
+ beq \use_cr, 99f
+ // We don't isync here because we very recently entered via rfid
+ mtspr SPRN_AMR, \gpr2
+ isync
+99:
+ END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
+#endif
+.endm
+
+#else /* !__ASSEMBLY__ */
+
+#ifdef CONFIG_PPC_KUAP
+
+#include <asm/reg.h>
+
+/*
+ * We support individually allowing read or write, but we don't support nesting
+ * because that would require an expensive read/modify write of the AMR.
+ */
+
+static inline void set_kuap(unsigned long value)
+{
+ if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
+ return;
+
+ /*
+ * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
+ * before and after the move to AMR. See table 6 on page 1134.
+ */
+ isync();
+ mtspr(SPRN_AMR, value);
+ isync();
+}
+
+static inline void allow_user_access(void __user *to, const void __user *from,
+ unsigned long size)
+{
+ // This is written so we can resolve to a single case at build time
+ if (__builtin_constant_p(to) && to == NULL)
+ set_kuap(AMR_KUAP_BLOCK_WRITE);
+ else if (__builtin_constant_p(from) && from == NULL)
+ set_kuap(AMR_KUAP_BLOCK_READ);
+ else
+ set_kuap(0);
+}
+
+static inline void prevent_user_access(void __user *to, const void __user *from,
+ unsigned long size)
+{
+ set_kuap(AMR_KUAP_BLOCKED);
+}
+
+static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
+{
+ return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
+ (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
+ "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
+}
+#endif /* CONFIG_PPC_KUAP */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index a28a28079edb..1e4705516a54 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -588,7 +588,8 @@ extern void slb_set_size(u16 size);
#endif
#define MAX_VMALLOC_CTX_CNT 1
-#define MAX_MEMMAP_CTX_CNT 1
+#define MAX_IO_CTX_CNT 1
+#define MAX_VMEMMAP_CTX_CNT 1
/*
* 256MB segment
@@ -601,13 +602,10 @@ extern void slb_set_size(u16 size);
* would give a protovsid of 0x1fffffffff. That will result in a VSID 0
* because of the modulo operation in vsid scramble.
*
- * We add one extra context to MIN_USER_CONTEXT so that we can map kernel
- * context easily. The +1 is to map the unused 0xe region mapping.
*/
#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 2)
#define MIN_USER_CONTEXT (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
- MAX_MEMMAP_CTX_CNT + 2)
-
+ MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT)
/*
* For platforms that support on 65bit VA we limit the context bits
*/
@@ -657,8 +655,8 @@ extern void slb_set_size(u16 size);
/* 4 bits per slice and we have one slice per 1TB */
#define SLICE_ARRAY_SIZE (H_PGTABLE_RANGE >> 41)
-#define TASK_SLICE_ARRAY_SZ(x) ((x)->context.slb_addr_limit >> 41)
-
+#define LOW_SLICE_ARRAY_SZ (BITS_PER_LONG / BITS_PER_BYTE)
+#define TASK_SLICE_ARRAY_SZ(x) ((x)->hash_context->slb_addr_limit >> 41)
#ifndef __ASSEMBLY__
#ifdef CONFIG_PPC_SUBPAGE_PROT
@@ -687,12 +685,41 @@ struct subpage_prot_table {
#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
extern void subpage_prot_free(struct mm_struct *mm);
-extern void subpage_prot_init_new_context(struct mm_struct *mm);
#else
static inline void subpage_prot_free(struct mm_struct *mm) {}
-static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
#endif /* CONFIG_PPC_SUBPAGE_PROT */
+/*
+ * One bit per slice. We have lower slices which cover 256MB segments
+ * upto 4G range. That gets us 16 low slices. For the rest we track slices
+ * in 1TB size.
+ */
+struct slice_mask {
+ u64 low_slices;
+ DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH);
+};
+
+struct hash_mm_context {
+ u16 user_psize; /* page size index */
+
+ /* SLB page size encodings*/
+ unsigned char low_slices_psize[LOW_SLICE_ARRAY_SZ];
+ unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
+ unsigned long slb_addr_limit;
+#ifdef CONFIG_PPC_64K_PAGES
+ struct slice_mask mask_64k;
+#endif
+ struct slice_mask mask_4k;
+#ifdef CONFIG_HUGETLB_PAGE
+ struct slice_mask mask_16m;
+ struct slice_mask mask_16g;
+#endif
+
+#ifdef CONFIG_PPC_SUBPAGE_PROT
+ struct subpage_prot_table *spt;
+#endif /* CONFIG_PPC_SUBPAGE_PROT */
+};
+
#if 0
/*
* The code below is equivalent to this function for arguments
@@ -747,7 +774,7 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
/*
* Bad address. We return VSID 0 for that
*/
- if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE)
+ if ((ea & EA_MASK) >= H_PGTABLE_RANGE)
return 0;
if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
@@ -774,28 +801,29 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
* 0x00002 - [ 0xc002000000000000 - 0xc003ffffffffffff]
* 0x00003 - [ 0xc004000000000000 - 0xc005ffffffffffff]
* 0x00004 - [ 0xc006000000000000 - 0xc007ffffffffffff]
-
- * 0x00005 - [ 0xd000000000000000 - 0xd001ffffffffffff ]
- * 0x00006 - Not used - Can map 0xe000000000000000 range.
- * 0x00007 - [ 0xf000000000000000 - 0xf001ffffffffffff ]
*
- * So we can compute the context from the region (top nibble) by
- * subtracting 11, or 0xc - 1.
+ * vmap, IO, vmemap
+ *
+ * 0x00005 - [ 0xc008000000000000 - 0xc009ffffffffffff]
+ * 0x00006 - [ 0xc00a000000000000 - 0xc00bffffffffffff]
+ * 0x00007 - [ 0xc00c000000000000 - 0xc00dffffffffffff]
+ *
*/
static inline unsigned long get_kernel_context(unsigned long ea)
{
- unsigned long region_id = REGION_ID(ea);
+ unsigned long region_id = get_region_id(ea);
unsigned long ctx;
/*
- * For linear mapping we do support multiple context
+ * Depending on Kernel config, kernel region can have one context
+ * or more.
*/
- if (region_id == KERNEL_REGION_ID) {
+ if (region_id == LINEAR_MAP_REGION_ID) {
/*
* We already verified ea to be not beyond the addr limit.
*/
- ctx = 1 + ((ea & ~REGION_MASK) >> MAX_EA_BITS_PER_CONTEXT);
+ ctx = 1 + ((ea & EA_MASK) >> MAX_EA_BITS_PER_CONTEXT);
} else
- ctx = (region_id - 0xc) + MAX_KERNEL_CTX_CNT;
+ ctx = region_id + MAX_KERNEL_CTX_CNT - 1;
return ctx;
}
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 1ceee000c18d..74d24201fc4f 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -25,15 +25,22 @@ struct mmu_psize_def {
};
};
extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+#endif /* __ASSEMBLY__ */
/*
- * For BOOK3s 64 with 4k and 64K linux page size
- * we want to use pointers, because the page table
- * actually store pfn
+ * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
+ * if we increase SECTIONS_WIDTH we will not store node details in page->flags and
+ * page_to_nid does a page->section->node lookup
+ * Hence only increase for VMEMMAP. Further depending on SPARSEMEM_EXTREME reduce
+ * memory requirements with large number of sections.
+ * 51 bits is the max physical real address on POWER9
*/
-typedef pte_t *pgtable_t;
-
-#endif /* __ASSEMBLY__ */
+#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) && \
+ defined(CONFIG_PPC_64K_PAGES)
+#define MAX_PHYSMEM_BITS 51
+#else
+#define MAX_PHYSMEM_BITS 46
+#endif
/* 64-bit classic hash table MMU */
#include <asm/book3s/64/mmu-hash.h>
@@ -89,16 +96,6 @@ struct spinlock;
/* Maximum possible number of NPUs in a system. */
#define NV_MAX_NPUS 8
-/*
- * One bit per slice. We have lower slices which cover 256MB segments
- * upto 4G range. That gets us 16 low slices. For the rest we track slices
- * in 1TB size.
- */
-struct slice_mask {
- u64 low_slices;
- DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH);
-};
-
typedef struct {
union {
/*
@@ -112,7 +109,6 @@ typedef struct {
mm_context_id_t id;
mm_context_id_t extended_id[TASK_SIZE_USER64/TASK_CONTEXT_SIZE];
};
- u16 user_psize; /* page size index */
/* Number of bits in the mm_cpumask */
atomic_t active_cpus;
@@ -122,27 +118,9 @@ typedef struct {
/* NPU NMMU context */
struct npu_context *npu_context;
+ struct hash_mm_context *hash_context;
-#ifdef CONFIG_PPC_MM_SLICES
- /* SLB page size encodings*/
- unsigned char low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
- unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
- unsigned long slb_addr_limit;
-# ifdef CONFIG_PPC_64K_PAGES
- struct slice_mask mask_64k;
-# endif
- struct slice_mask mask_4k;
-# ifdef CONFIG_HUGETLB_PAGE
- struct slice_mask mask_16m;
- struct slice_mask mask_16g;
-# endif
-#else
- u16 sllp; /* SLB page size encoding */
-#endif
unsigned long vdso_base;
-#ifdef CONFIG_PPC_SUBPAGE_PROT
- struct subpage_prot_table spt;
-#endif /* CONFIG_PPC_SUBPAGE_PROT */
/*
* pagetable fragment support
*/
@@ -163,6 +141,60 @@ typedef struct {
#endif
} mm_context_t;
+static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
+{
+ return ctx->hash_context->user_psize;
+}
+
+static inline void mm_ctx_set_user_psize(mm_context_t *ctx, u16 user_psize)
+{
+ ctx->hash_context->user_psize = user_psize;
+}
+
+static inline unsigned char *mm_ctx_low_slices(mm_context_t *ctx)
+{
+ return ctx->hash_context->low_slices_psize;
+}
+
+static inline unsigned char *mm_ctx_high_slices(mm_context_t *ctx)
+{
+ return ctx->hash_context->high_slices_psize;
+}
+
+static inline unsigned long mm_ctx_slb_addr_limit(mm_context_t *ctx)
+{
+ return ctx->hash_context->slb_addr_limit;
+}
+
+static inline void mm_ctx_set_slb_addr_limit(mm_context_t *ctx, unsigned long limit)
+{
+ ctx->hash_context->slb_addr_limit = limit;
+}
+
+static inline struct slice_mask *slice_mask_for_size(mm_context_t *ctx, int psize)
+{
+#ifdef CONFIG_PPC_64K_PAGES
+ if (psize == MMU_PAGE_64K)
+ return &ctx->hash_context->mask_64k;
+#endif
+#ifdef CONFIG_HUGETLB_PAGE
+ if (psize == MMU_PAGE_16M)
+ return &ctx->hash_context->mask_16m;
+ if (psize == MMU_PAGE_16G)
+ return &ctx->hash_context->mask_16g;
+#endif
+ BUG_ON(psize != MMU_PAGE_4K);
+
+ return &ctx->hash_context->mask_4k;
+}
+
+#ifdef CONFIG_PPC_SUBPAGE_PROT
+static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx)
+{
+ return ctx->hash_context->spt;
+}
+#endif
+
/*
* The current system page and segment sizes
*/
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index 138bc2ecc0c4..d45e4449619f 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -19,29 +19,7 @@ struct vmemmap_backing {
};
extern struct vmemmap_backing *vmemmap_list;
-/*
- * Functions that deal with pagetables that could be at any level of
- * the table need to be passed an "index_size" so they know how to
- * handle allocation. For PTE pages (which are linked to a struct
- * page for now, and drawn from the main get_free_pages() pool), the
- * allocation size will be (2^index_size * sizeof(pointer)) and
- * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
- *
- * The maximum index size needs to be big enough to allow any
- * pagetable sizes we need, but small enough to fit in the low bits of
- * any page table pointer. In other words all pagetables, even tiny
- * ones, must be aligned to allow at least enough low 0 bits to
- * contain this value. This value is also used as a mask, so it must
- * be one less than a power of two.
- */
-#define MAX_PGTABLE_INDEX_SIZE 0xf
-
-extern struct kmem_cache *pgtable_cache[];
-#define PGT_CACHE(shift) pgtable_cache[shift]
-
-extern pte_t *pte_fragment_alloc(struct mm_struct *, int);
extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
-extern void pte_fragment_free(unsigned long *, int);
extern void pmd_fragment_free(unsigned long *);
extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
#ifdef CONFIG_SMP
@@ -81,6 +59,9 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
pgtable_gfp_flags(mm, GFP_KERNEL));
+ if (unlikely(!pgd))
+ return pgd;
+
/*
* Don't scan the PGD for pointers, it contains references to PUDs but
* those references are not full pointers and so can't be recognised by
@@ -185,31 +166,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
*pmd = __pmd(__pgtable_ptr_val(pte_page) | PMD_VAL_BITS);
}
-static inline pgtable_t pmd_pgtable(pmd_t pmd)
-{
- return (pgtable_t)pmd_page_vaddr(pmd);
-}
-
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
-{
- return (pte_t *)pte_fragment_alloc(mm, 1);
-}
-
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
-{
- return (pgtable_t)pte_fragment_alloc(mm, 0);
-}
-
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- pte_fragment_free((unsigned long *)pte, 1);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
-{
- pte_fragment_free((unsigned long *)ptepage, 0);
-}
-
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
@@ -221,8 +177,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
pgtable_free_tlb(tlb, table, PTE_INDEX);
}
-#define check_pgt_cache() do { } while (0)
-
extern atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
static inline void update_page_count(int psize, long count)
{
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 581f91be9dd4..7dede2e34b70 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -277,9 +277,11 @@ extern unsigned long __vmalloc_end;
extern unsigned long __kernel_virt_start;
extern unsigned long __kernel_virt_size;
extern unsigned long __kernel_io_start;
+extern unsigned long __kernel_io_end;
#define KERN_VIRT_START __kernel_virt_start
-#define KERN_VIRT_SIZE __kernel_virt_size
#define KERN_IO_START __kernel_io_start
+#define KERN_IO_END __kernel_io_end
+
extern struct page *vmemmap;
extern unsigned long ioremap_bot;
extern unsigned long pci_io_base;
@@ -296,8 +298,7 @@ extern unsigned long pci_io_base;
#include <asm/barrier.h>
/*
- * The second half of the kernel virtual space is used for IO mappings,
- * it's itself carved into the PIO region (ISA and PHB IO space) and
+ * IO space itself carved into the PIO region (ISA and PHB IO space) and
* the ioremap space
*
* ISA_IO_BASE = KERN_IO_START, 64K reserved area
@@ -310,7 +311,7 @@ extern unsigned long pci_io_base;
#define PHB_IO_BASE (ISA_IO_END)
#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
#define IOREMAP_BASE (PHB_IO_END)
-#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE)
+#define IOREMAP_END (KERN_IO_END)
/* Advertise special mapping type for AGP */
#define HAVE_PAGE_AGP
@@ -992,7 +993,8 @@ extern struct page *pgd_page(pgd_t pgd);
(((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr))
#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
-#define pte_unmap(pte) do { } while(0)
+
+static inline void pte_unmap(pte_t *pte) { }
/* to find an entry in a kernel page-table-directory */
/* This now only contains the vmalloc pages */
diff --git a/arch/powerpc/include/asm/book3s/64/radix-4k.h b/arch/powerpc/include/asm/book3s/64/radix-4k.h
index 863c3e8286fb..d5f5ab73dc7f 100644
--- a/arch/powerpc/include/asm/book3s/64/radix-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/radix-4k.h
@@ -5,10 +5,11 @@
/*
* For 4K page size supported index is 13/9/9/9
*/
-#define RADIX_PTE_INDEX_SIZE 9 /* 2MB huge page */
-#define RADIX_PMD_INDEX_SIZE 9 /* 1G huge page */
-#define RADIX_PUD_INDEX_SIZE 9
-#define RADIX_PGD_INDEX_SIZE 13
+#define RADIX_PTE_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps 2^9 x 4K = 2MB
+#define RADIX_PMD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps 2^9 x 2MB = 1GB
+#define RADIX_PUD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps 2^9 x 1GB = 512GB
+#define RADIX_PGD_INDEX_SIZE 13 // size: 8B << 13 = 64KB, maps 2^13 x 512GB = 4PB
+
/*
* One fragment per per page
*/
diff --git a/arch/powerpc/include/asm/book3s/64/radix-64k.h b/arch/powerpc/include/asm/book3s/64/radix-64k.h
index ccb78ca9d0c5..54e33828b0fb 100644
--- a/arch/powerpc/include/asm/book3s/64/radix-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/radix-64k.h
@@ -5,10 +5,10 @@
/*
* For 64K page size supported index is 13/9/9/5
*/
-#define RADIX_PTE_INDEX_SIZE 5 /* 2MB huge page */
-#define RADIX_PMD_INDEX_SIZE 9 /* 1G huge page */
-#define RADIX_PUD_INDEX_SIZE 9
-#define RADIX_PGD_INDEX_SIZE 13
+#define RADIX_PTE_INDEX_SIZE 5 // size: 8B << 5 = 256B, maps 2^5 x 64K = 2MB
+#define RADIX_PMD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps 2^9 x 2MB = 1GB
+#define RADIX_PUD_INDEX_SIZE 9 // size: 8B << 9 = 4KB, maps 2^9 x 1GB = 512GB
+#define RADIX_PGD_INDEX_SIZE 13 // size: 8B << 13 = 64KB, maps 2^13 x 512GB = 4PB
/*
* We use a 256 byte PTE page fragment in radix
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 5ab134eeed20..574eca33f893 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -72,19 +72,17 @@
* | |
* | |
* | |
- * +------------------------------+ Kernel IO map end (0xc010000000000000)
+ * +------------------------------+ Kernel vmemmap end (0xc010000000000000)
* | |
+ * | 512TB |
* | |
- * | 1/2 of virtual map |
+ * +------------------------------+ Kernel IO map end/vmemap start
* | |
+ * | 512TB |
* | |
- * +------------------------------+ Kernel IO map start
+ * +------------------------------+ Kernel vmap end/ IO map start
* | |
- * | 1/4 of virtual map |
- * | |
- * +------------------------------+ Kernel vmemap start
- * | |
- * | 1/4 of virtual map |
+ * | 512TB |
* | |
* +------------------------------+ Kernel virt start (0xc008000000000000)
* | |
@@ -93,24 +91,24 @@
* +------------------------------+ Kernel linear (0xc.....)
*/
-#define RADIX_KERN_VIRT_START ASM_CONST(0xc008000000000000)
-#define RADIX_KERN_VIRT_SIZE ASM_CONST(0x0008000000000000)
-
+#define RADIX_KERN_VIRT_START ASM_CONST(0xc008000000000000)
/*
- * The vmalloc space starts at the beginning of that region, and
- * occupies a quarter of it on radix config.
- * (we keep a quarter for the virtual memmap)
+ * 49 = MAX_EA_BITS_PER_CONTEXT (hash specific). To make sure we pick
+ * the same value as hash.
*/
+#define RADIX_KERN_MAP_SIZE (1UL << 49)
+
#define RADIX_VMALLOC_START RADIX_KERN_VIRT_START
-#define RADIX_VMALLOC_SIZE (RADIX_KERN_VIRT_SIZE >> 2)
+#define RADIX_VMALLOC_SIZE RADIX_KERN_MAP_SIZE
#define RADIX_VMALLOC_END (RADIX_VMALLOC_START + RADIX_VMALLOC_SIZE)
-/*
- * Defines the address of the vmemap area, in its own region on
- * hash table CPUs.
- */
-#define RADIX_VMEMMAP_BASE (RADIX_VMALLOC_END)
-#define RADIX_KERN_IO_START (RADIX_KERN_VIRT_START + (RADIX_KERN_VIRT_SIZE >> 1))
+#define RADIX_KERN_IO_START RADIX_VMALLOC_END
+#define RADIX_KERN_IO_SIZE RADIX_KERN_MAP_SIZE
+#define RADIX_KERN_IO_END (RADIX_KERN_IO_START + RADIX_KERN_IO_SIZE)
+
+#define RADIX_VMEMMAP_START RADIX_KERN_IO_END
+#define RADIX_VMEMMAP_SIZE RADIX_KERN_MAP_SIZE
+#define RADIX_VMEMMAP_END (RADIX_VMEMMAP_START + RADIX_VMEMMAP_SIZE)
#ifndef __ASSEMBLY__
#define RADIX_PTE_TABLE_SIZE (sizeof(pte_t) << RADIX_PTE_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/book3s/64/slice.h b/arch/powerpc/include/asm/book3s/64/slice.h
index db0dedab65ee..f0d3194ba41b 100644
--- a/arch/powerpc/include/asm/book3s/64/slice.h
+++ b/arch/powerpc/include/asm/book3s/64/slice.h
@@ -2,8 +2,6 @@
#ifndef _ASM_POWERPC_BOOK3S_64_SLICE_H
#define _ASM_POWERPC_BOOK3S_64_SLICE_H
-#ifdef CONFIG_PPC_MM_SLICES
-
#define SLICE_LOW_SHIFT 28
#define SLICE_LOW_TOP (0x100000000ul)
#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
@@ -13,15 +11,6 @@
#define SLICE_NUM_HIGH (H_PGTABLE_RANGE >> SLICE_HIGH_SHIFT)
#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
-#else /* CONFIG_PPC_MM_SLICES */
-
-#define get_slice_psize(mm, addr) ((mm)->context.user_psize)
-#define slice_set_user_psize(mm, psize) \
-do { \
- (mm)->context.user_psize = (psize); \
- (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
-} while (0)
-
-#endif /* CONFIG_PPC_MM_SLICES */
+#define SLB_ADDR_LIMIT_DEFAULT DEFAULT_MAP_WINDOW_USER64
#endif /* _ASM_POWERPC_BOOK3S_64_SLICE_H */
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
index 43e5f31fe64d..9844b3ded187 100644
--- a/arch/powerpc/include/asm/cpuidle.h
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -27,10 +27,11 @@
* the THREAD_WINKLE_BITS are set, which indicate which threads have not
* yet woken from the winkle state.
*/
-#define PNV_CORE_IDLE_LOCK_BIT 0x10000000
+#define NR_PNV_CORE_IDLE_LOCK_BIT 28
+#define PNV_CORE_IDLE_LOCK_BIT (1ULL << NR_PNV_CORE_IDLE_LOCK_BIT)
+#define PNV_CORE_IDLE_WINKLE_COUNT_SHIFT 16
#define PNV_CORE_IDLE_WINKLE_COUNT 0x00010000
-#define PNV_CORE_IDLE_WINKLE_COUNT_ALL_BIT 0x00080000
#define PNV_CORE_IDLE_WINKLE_COUNT_BITS 0x000F0000
#define PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT 8
#define PNV_CORE_IDLE_THREAD_WINKLE_BITS 0x0000FF00
@@ -68,16 +69,6 @@
#define ERR_DEEP_STATE_ESL_MISMATCH -2
#ifndef __ASSEMBLY__
-/* Additional SPRs that need to be saved/restored during stop */
-struct stop_sprs {
- u64 pid;
- u64 ldbar;
- u64 fscr;
- u64 hfscr;
- u64 mmcr1;
- u64 mmcr2;
- u64 mmcra;
-};
#define PNV_IDLE_NAME_LEN 16
struct pnv_idle_states_t {
@@ -92,10 +83,6 @@ struct pnv_idle_states_t {
extern struct pnv_idle_states_t *pnv_idle_states;
extern int nr_pnv_idle_states;
-extern u32 pnv_fastsleep_workaround_at_entry[];
-extern u32 pnv_fastsleep_workaround_at_exit[];
-
-extern u64 pnv_first_deep_stop_state;
unsigned long pnv_cpu_offline(unsigned int cpu);
int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags);
diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h
index 7c1d8e74b25d..7f3279b014db 100644
--- a/arch/powerpc/include/asm/drmem.h
+++ b/arch/powerpc/include/asm/drmem.h
@@ -17,6 +17,9 @@ struct drmem_lmb {
u32 drc_index;
u32 aa_index;
u32 flags;
+#ifdef CONFIG_MEMORY_HOTPLUG
+ int nid;
+#endif
};
struct drmem_lmb_info {
@@ -104,4 +107,22 @@ static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb)
lmb->aa_index = 0xffffffff;
}
+#ifdef CONFIG_MEMORY_HOTPLUG
+static inline void lmb_set_nid(struct drmem_lmb *lmb)
+{
+ lmb->nid = memory_add_physaddr_to_nid(lmb->base_addr);
+}
+static inline void lmb_clear_nid(struct drmem_lmb *lmb)
+{
+ lmb->nid = -1;
+}
+#else
+static inline void lmb_set_nid(struct drmem_lmb *lmb)
+{
+}
+static inline void lmb_clear_nid(struct drmem_lmb *lmb)
+{
+}
+#endif
+
#endif /* _ASM_POWERPC_LMB_H */
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 937bb630093f..bef4e05a6823 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -497,6 +497,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
RESTORE_CTR(r1, area); \
b bad_stack; \
3: EXCEPTION_PROLOG_COMMON_1(); \
+ kuap_save_amr_and_lock r9, r10, cr1, cr0; \
beq 4f; /* if from kernel mode */ \
ACCOUNT_CPU_USER_ENTRY(r13, r9, r10); \
SAVE_PPR(area, r9); \
@@ -691,6 +692,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
*/
#define EXCEPTION_COMMON_NORET_STACK(area, trap, label, hdlr, additions) \
EXCEPTION_PROLOG_COMMON_1(); \
+ kuap_save_amr_and_lock r9, r10, cr1; \
EXCEPTION_PROLOG_COMMON_2(area); \
EXCEPTION_PROLOG_COMMON_3(trap); \
/* Volatile regs are potentially clobbered here */ \
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
index 188776befaf9..e2099c0a15c3 100644
--- a/arch/powerpc/include/asm/fadump.h
+++ b/arch/powerpc/include/asm/fadump.h
@@ -219,5 +219,6 @@ extern void fadump_cleanup(void);
static inline int is_fadump_active(void) { return 0; }
static inline int should_fadump_crash(void) { return 0; }
static inline void crash_fadump(struct pt_regs *regs, const char *str) { }
+static inline void fadump_cleanup(void) { }
#endif
#endif
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 40a6c9261a6b..f6fc31f8baff 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -100,6 +100,9 @@ label##5: \
#define END_MMU_FTR_SECTION(msk, val) \
END_MMU_FTR_SECTION_NESTED(msk, val, 97)
+#define END_MMU_FTR_SECTION_NESTED_IFSET(msk, label) \
+ END_MMU_FTR_SECTION_NESTED((msk), (msk), label)
+
#define END_MMU_FTR_SECTION_IFSET(msk) END_MMU_FTR_SECTION((msk), (msk))
#define END_MMU_FTR_SECTION_IFCLR(msk) END_MMU_FTR_SECTION((msk), 0)
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index b9fbed84ddca..0cfc365d814b 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -22,7 +22,12 @@
#include <asm/kmap_types.h>
#endif
+#ifdef CONFIG_KASAN
+#include <asm/kasan.h>
+#define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE)
+#else
#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
+#endif
/*
* Here we define all the compile-time 'special' virtual
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index 88b38b37c21b..3a6aa57b9d90 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -35,6 +35,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
{
int oldval = 0, ret;
+ allow_write_to_user(uaddr, sizeof(*uaddr));
pagefault_disable();
switch (op) {
@@ -62,6 +63,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
if (!ret)
*oval = oldval;
+ prevent_write_to_user(uaddr, sizeof(*uaddr));
return ret;
}
@@ -75,6 +77,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
if (!access_ok(uaddr, sizeof(u32)))
return -EFAULT;
+ allow_write_to_user(uaddr, sizeof(*uaddr));
__asm__ __volatile__ (
PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
@@ -95,6 +98,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "cc", "memory");
*uval = prev;
+ prevent_write_to_user(uaddr, sizeof(*uaddr));
return ret;
}
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 8d40565ad0c3..20a101046cff 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -6,82 +6,16 @@
#include <asm/page.h>
#ifdef CONFIG_PPC_BOOK3S_64
-
#include <asm/book3s/64/hugetlb.h>
-/*
- * This should work for other subarchs too. But right now we use the
- * new format only for 64bit book3s
- */
-static inline pte_t *hugepd_page(hugepd_t hpd)
-{
- BUG_ON(!hugepd_ok(hpd));
- /*
- * We have only four bits to encode, MMU page size
- */
- BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
- return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
-}
-
-static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
-{
- return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
-}
-
-static inline unsigned int hugepd_shift(hugepd_t hpd)
-{
- return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
-}
-static inline void flush_hugetlb_page(struct vm_area_struct *vma,
- unsigned long vmaddr)
-{
- if (radix_enabled())
- return radix__flush_hugetlb_page(vma, vmaddr);
-}
-
-#else
-
-static inline pte_t *hugepd_page(hugepd_t hpd)
-{
- BUG_ON(!hugepd_ok(hpd));
-#ifdef CONFIG_PPC_8xx
- return (pte_t *)__va(hpd_val(hpd) & ~HUGEPD_SHIFT_MASK);
-#else
- return (pte_t *)((hpd_val(hpd) &
- ~HUGEPD_SHIFT_MASK) | PD_HUGE);
-#endif
-}
-
-static inline unsigned int hugepd_shift(hugepd_t hpd)
-{
-#ifdef CONFIG_PPC_8xx
- return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
-#else
- return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
-#endif
-}
-
+#elif defined(CONFIG_PPC_FSL_BOOK3E)
+#include <asm/nohash/hugetlb-book3e.h>
+#elif defined(CONFIG_PPC_8xx)
+#include <asm/nohash/32/hugetlb-8xx.h>
#endif /* CONFIG_PPC_BOOK3S_64 */
+extern bool hugetlb_disabled;
-static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
- unsigned pdshift)
-{
- /*
- * On FSL BookE, we have multiple higher-level table entries that
- * point to the same hugepte. Just use the first one since they're all
- * identical. So for that case, idx=0.
- */
- unsigned long idx = 0;
-
- pte_t *dir = hugepd_page(hpd);
-#ifdef CONFIG_PPC_8xx
- idx = (addr & ((1UL << pdshift) - 1)) >> PAGE_SHIFT;
-#elif !defined(CONFIG_PPC_FSL_BOOK3E)
- idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
-#endif
-
- return dir + idx;
-}
+void hugetlbpage_init_default(void);
void flush_dcache_icache_hugepage(struct page *page);
@@ -99,15 +33,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
pte_t pte);
-#ifdef CONFIG_PPC_8xx
-static inline void flush_hugetlb_page(struct vm_area_struct *vma,
- unsigned long vmaddr)
-{
- flush_tlb_page(vma, vmaddr);
-}
-#else
-void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-#endif
#define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index ece4dc89c90b..0fe8c1e46bbc 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -90,10 +90,18 @@ static inline void hw_breakpoint_disable(void)
extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs);
int hw_breakpoint_handler(struct die_args *args);
+extern int set_dawr(struct arch_hw_breakpoint *brk);
+extern bool dawr_force_enable;
+static inline bool dawr_enabled(void)
+{
+ return dawr_force_enable;
+}
+
#else /* CONFIG_HAVE_HW_BREAKPOINT */
static inline void hw_breakpoint_disable(void) { }
static inline void thread_change_pc(struct task_struct *tsk,
struct pt_regs *regs) { }
+static inline bool dawr_enabled(void) { return false; }
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif /* __KERNEL__ */
#endif /* _PPC_BOOK3S_64_HW_BREAKPOINT_H */
diff --git a/arch/powerpc/include/asm/imc-pmu.h b/arch/powerpc/include/asm/imc-pmu.h
index 69f516ecb2fd..7c2ef0e42661 100644
--- a/arch/powerpc/include/asm/imc-pmu.h
+++ b/arch/powerpc/include/asm/imc-pmu.h
@@ -33,6 +33,7 @@
*/
#define THREAD_IMC_LDBAR_MASK 0x0003ffffffffe000ULL
#define THREAD_IMC_ENABLE 0x8000000000000000ULL
+#define TRACE_IMC_ENABLE 0x4000000000000000ULL
/*
* For debugfs interface for imc-mode and imc-command
@@ -59,6 +60,34 @@ struct imc_events {
char *scale;
};
+/*
+ * Trace IMC hardware updates a 64bytes record on
+ * Core Performance Monitoring Counter (CPMC)
+ * overflow. Here is the layout for the trace imc record
+ *
+ * DW 0 : Timebase
+ * DW 1 : Program Counter
+ * DW 2 : PIDR information
+ * DW 3 : CPMC1
+ * DW 4 : CPMC2
+ * DW 5 : CPMC3
+ * Dw 6 : CPMC4
+ * DW 7 : Timebase
+ * .....
+ *
+ * The following is the data structure to hold trace imc data.
+ */
+struct trace_imc_data {
+ u64 tb1;
+ u64 ip;
+ u64 val;
+ u64 cpmc1;
+ u64 cpmc2;
+ u64 cpmc3;
+ u64 cpmc4;
+ u64 tb2;
+};
+
/* Event attribute array index */
#define IMC_FORMAT_ATTR 0
#define IMC_EVENT_ATTR 1
@@ -69,6 +98,13 @@ struct imc_events {
#define IMC_EVENT_OFFSET_MASK 0xffffffffULL
/*
+ * Macro to mask bits 0:21 of first double word(which is the timebase) to
+ * compare with 8th double word (timebase) of trace imc record data.
+ */
+#define IMC_TRACE_RECORD_TB1_MASK 0x3ffffffffffULL
+
+
+/*
* Device tree parser code detects IMC pmu support and
* registers new IMC pmus. This structure will hold the
* pmu functions, events, counter memory information
@@ -113,6 +149,7 @@ struct imc_pmu_ref {
enum {
IMC_TYPE_THREAD = 0x1,
+ IMC_TYPE_TRACE = 0x2,
IMC_TYPE_CORE = 0x4,
IMC_TYPE_CHIP = 0x10,
};
@@ -123,6 +160,8 @@ enum {
#define IMC_DOMAIN_NEST 1
#define IMC_DOMAIN_CORE 2
#define IMC_DOMAIN_THREAD 3
+/* For trace-imc the domain is still thread but it operates in trace-mode */
+#define IMC_DOMAIN_TRACE 4
extern int init_imc_pmu(struct device_node *parent,
struct imc_pmu *pmu_ptr, int pmu_id);
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 4b73847e9b95..1fad67b46409 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -34,14 +34,11 @@ extern struct pci_dev *isa_bridge_pcidev;
#include <asm/byteorder.h>
#include <asm/synch.h>
#include <asm/delay.h>
+#include <asm/mmiowb.h>
#include <asm/mmu.h>
#include <asm/ppc_asm.h>
#include <asm/pgtable.h>
-#ifdef CONFIG_PPC64
-#include <asm/paca.h>
-#endif
-
#define SIO_CONFIG_RA 0x398
#define SIO_CONFIG_RD 0x399
@@ -107,12 +104,6 @@ extern bool isa_io_special;
*
*/
-#ifdef CONFIG_PPC64
-#define IO_SET_SYNC_FLAG() do { local_paca->io_sync = 1; } while(0)
-#else
-#define IO_SET_SYNC_FLAG()
-#endif
-
#define DEF_MMIO_IN_X(name, size, insn) \
static inline u##size name(const volatile u##size __iomem *addr) \
{ \
@@ -127,7 +118,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \
{ \
__asm__ __volatile__("sync;"#insn" %1,%y0" \
: "=Z" (*addr) : "r" (val) : "memory"); \
- IO_SET_SYNC_FLAG(); \
+ mmiowb_set_pending(); \
}
#define DEF_MMIO_IN_D(name, size, insn) \
@@ -144,7 +135,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \
{ \
__asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \
: "=m" (*addr) : "r" (val) : "memory"); \
- IO_SET_SYNC_FLAG(); \
+ mmiowb_set_pending(); \
}
DEF_MMIO_IN_D(in_8, 8, lbz);
@@ -652,24 +643,6 @@ static inline void name at \
#include <asm-generic/iomap.h>
-#ifdef CONFIG_PPC32
-#define mmiowb()
-#else
-/*
- * Enforce synchronisation of stores vs. spin_unlock
- * (this does it explicitly, though our implementation of spin_unlock
- * does it implicitely too)
- */
-static inline void mmiowb(void)
-{
- unsigned long tmp;
-
- __asm__ __volatile__("sync; li %0,0; stb %0,%1(13)"
- : "=&r" (tmp) : "i" (offsetof(struct paca_struct, io_sync))
- : "memory");
-}
-#endif /* !CONFIG_PPC32 */
-
static inline void iosync(void)
{
__asm__ __volatile__ ("sync" : : : "memory");
diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
new file mode 100644
index 000000000000..296e51c2f066
--- /dev/null
+++ b/arch/powerpc/include/asm/kasan.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifdef CONFIG_KASAN
+#define _GLOBAL_KASAN(fn) _GLOBAL(__##fn)
+#define _GLOBAL_TOC_KASAN(fn) _GLOBAL_TOC(__##fn)
+#define EXPORT_SYMBOL_KASAN(fn) EXPORT_SYMBOL(__##fn)
+#else
+#define _GLOBAL_KASAN(fn) _GLOBAL(fn)
+#define _GLOBAL_TOC_KASAN(fn) _GLOBAL_TOC(fn)
+#define EXPORT_SYMBOL_KASAN(fn)
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <asm/page.h>
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+
+#define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + \
+ (PAGE_OFFSET >> KASAN_SHADOW_SCALE_SHIFT))
+
+#define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET)
+
+#define KASAN_SHADOW_END 0UL
+
+#define KASAN_SHADOW_SIZE (KASAN_SHADOW_END - KASAN_SHADOW_START)
+
+#ifdef CONFIG_KASAN
+void kasan_early_init(void);
+void kasan_mmu_init(void);
+void kasan_init(void);
+#else
+static inline void kasan_init(void) { }
+static inline void kasan_mmu_init(void) { }
+#endif
+
+#endif /* __ASSEMBLY */
+#endif
diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
new file mode 100644
index 000000000000..5b5e39643a27
--- /dev/null
+++ b/arch/powerpc/include/asm/kup.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_KUP_H_
+#define _ASM_POWERPC_KUP_H_
+
+#ifdef CONFIG_PPC64
+#include <asm/book3s/64/kup-radix.h>
+#endif
+#ifdef CONFIG_PPC_8xx
+#include <asm/nohash/32/kup-8xx.h>
+#endif
+#ifdef CONFIG_PPC_BOOK3S_32
+#include <asm/book3s/32/kup.h>
+#endif
+
+#ifdef __ASSEMBLY__
+#ifndef CONFIG_PPC_KUAP
+.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3
+.endm
+
+.macro kuap_restore sp, current, gpr1, gpr2, gpr3
+.endm
+
+.macro kuap_check current, gpr
+.endm
+
+#endif
+
+#else /* !__ASSEMBLY__ */
+
+#include <asm/pgtable.h>
+
+void setup_kup(void);
+
+#ifdef CONFIG_PPC_KUEP
+void setup_kuep(bool disabled);
+#else
+static inline void setup_kuep(bool disabled) { }
+#endif /* CONFIG_PPC_KUEP */
+
+#ifdef CONFIG_PPC_KUAP
+void setup_kuap(bool disabled);
+#else
+static inline void setup_kuap(bool disabled) { }
+static inline void allow_user_access(void __user *to, const void __user *from,
+ unsigned long size) { }
+static inline void prevent_user_access(void __user *to, const void __user *from,
+ unsigned long size) { }
+static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write) { return false; }
+#endif /* CONFIG_PPC_KUAP */
+
+static inline void allow_read_from_user(const void __user *from, unsigned long size)
+{
+ allow_user_access(NULL, from, size);
+}
+
+static inline void allow_write_to_user(void __user *to, unsigned long size)
+{
+ allow_user_access(to, NULL, size);
+}
+
+static inline void prevent_read_from_user(const void __user *from, unsigned long size)
+{
+ prevent_user_access(NULL, from, size);
+}
+
+static inline void prevent_write_to_user(void __user *to, unsigned long size)
+{
+ prevent_user_access(to, NULL, size);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_KUP_H_ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index e6b5bb012ccb..013c76a0a03e 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -201,6 +201,8 @@ struct kvmppc_spapr_tce_iommu_table {
struct kref kref;
};
+#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
+
struct kvmppc_spapr_tce_table {
struct list_head list;
struct kvm *kvm;
@@ -210,6 +212,7 @@ struct kvmppc_spapr_tce_table {
u64 offset; /* in pages */
u64 size; /* window size in pages */
struct list_head iommu_tables;
+ struct mutex alloc_lock;
struct page *pages[0];
};
@@ -222,6 +225,7 @@ extern struct kvm_device_ops kvm_xics_ops;
struct kvmppc_xive;
struct kvmppc_xive_vcpu;
extern struct kvm_device_ops kvm_xive_ops;
+extern struct kvm_device_ops kvm_xive_native_ops;
struct kvmppc_passthru_irqmap;
@@ -312,7 +316,11 @@ struct kvm_arch {
#endif
#ifdef CONFIG_KVM_XICS
struct kvmppc_xics *xics;
- struct kvmppc_xive *xive;
+ struct kvmppc_xive *xive; /* Current XIVE device in use */
+ struct {
+ struct kvmppc_xive *native;
+ struct kvmppc_xive *xics_on_xive;
+ } xive_devices;
struct kvmppc_passthru_irqmap *pimap;
#endif
struct kvmppc_ops *kvm_ops;
@@ -449,6 +457,7 @@ struct kvmppc_passthru_irqmap {
#define KVMPPC_IRQ_DEFAULT 0
#define KVMPPC_IRQ_MPIC 1
#define KVMPPC_IRQ_XICS 2 /* Includes a XIVE option */
+#define KVMPPC_IRQ_XIVE 3 /* XIVE native exploitation mode */
#define MMIO_HPTE_CACHE_SIZE 4
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index ac22b28ae78d..bc892380e6cd 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -197,10 +197,6 @@ extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
(iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
(stt)->size, (ioba), (npages)) ? \
H_PARAMETER : H_SUCCESS)
-extern long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
- unsigned long *ua, unsigned long **prmap);
-extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
- unsigned long idx, unsigned long tce);
extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce);
extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
@@ -273,6 +269,7 @@ union kvmppc_one_reg {
u64 addr;
u64 length;
} vpaval;
+ u64 xive_timaval[2];
};
struct kvmppc_ops {
@@ -480,6 +477,9 @@ extern void kvm_hv_vm_activated(void);
extern void kvm_hv_vm_deactivated(void);
extern bool kvm_hv_mode_active(void);
+extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
+ struct kvm_nested_guest *nested);
+
#else
static inline void __init kvm_cma_reserve(void)
{}
@@ -594,6 +594,22 @@ extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
int level, bool line_status);
extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
+
+static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE;
+}
+
+extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu);
+extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
+extern void kvmppc_xive_native_init_module(void);
+extern void kvmppc_xive_native_exit_module(void);
+extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
+ union kvmppc_one_reg *val);
+extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
+ union kvmppc_one_reg *val);
+
#else
static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
u32 priority) { return -1; }
@@ -617,6 +633,21 @@ static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { retur
static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
int level, bool line_status) { return -ENODEV; }
static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
+
+static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
+ { return 0; }
+static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
+static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
+static inline void kvmppc_xive_native_init_module(void) { }
+static inline void kvmppc_xive_native_exit_module(void) { }
+static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
+ union kvmppc_one_reg *val)
+{ return 0; }
+static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
+ union kvmppc_one_reg *val)
+{ return -ENOENT; }
+
#endif /* CONFIG_KVM_XIVE */
#if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER)
@@ -665,6 +696,8 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index);
long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index);
+long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long dest, unsigned long src);
long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
unsigned long slb_v, unsigned int status, bool data);
unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h
index 5070df19d463..c005aee5ea43 100644
--- a/arch/powerpc/include/asm/livepatch.h
+++ b/arch/powerpc/include/asm/livepatch.h
@@ -24,11 +24,6 @@
#include <linux/sched/task_stack.h>
#ifdef CONFIG_LIVEPATCH
-static inline int klp_check_compiler_support(void)
-{
- return 0;
-}
-
static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
regs->nip = ip;
diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
index 17996bc9382b..23247a132ce8 100644
--- a/arch/powerpc/include/asm/mce.h
+++ b/arch/powerpc/include/asm/mce.h
@@ -31,7 +31,7 @@ enum MCE_Version {
enum MCE_Severity {
MCE_SEV_NO_ERROR = 0,
MCE_SEV_WARNING = 1,
- MCE_SEV_ERROR_SYNC = 2,
+ MCE_SEV_SEVERE = 2,
MCE_SEV_FATAL = 3,
};
@@ -56,6 +56,14 @@ enum MCE_ErrorType {
MCE_ERROR_TYPE_LINK = 7,
};
+enum MCE_ErrorClass {
+ MCE_ECLASS_UNKNOWN = 0,
+ MCE_ECLASS_HARDWARE,
+ MCE_ECLASS_HARD_INDETERMINATE,
+ MCE_ECLASS_SOFTWARE,
+ MCE_ECLASS_SOFT_INDETERMINATE,
+};
+
enum MCE_UeErrorType {
MCE_UE_ERROR_INDETERMINATE = 0,
MCE_UE_ERROR_IFETCH = 1,
@@ -110,73 +118,75 @@ enum MCE_LinkErrorType {
};
struct machine_check_event {
- enum MCE_Version version:8; /* 0x00 */
- uint8_t in_use; /* 0x01 */
- enum MCE_Severity severity:8; /* 0x02 */
- enum MCE_Initiator initiator:8; /* 0x03 */
- enum MCE_ErrorType error_type:8; /* 0x04 */
- enum MCE_Disposition disposition:8; /* 0x05 */
- uint8_t reserved_1[2]; /* 0x06 */
- uint64_t gpr3; /* 0x08 */
- uint64_t srr0; /* 0x10 */
- uint64_t srr1; /* 0x18 */
- union { /* 0x20 */
+ enum MCE_Version version:8;
+ u8 in_use;
+ enum MCE_Severity severity:8;
+ enum MCE_Initiator initiator:8;
+ enum MCE_ErrorType error_type:8;
+ enum MCE_ErrorClass error_class:8;
+ enum MCE_Disposition disposition:8;
+ bool sync_error;
+ u16 cpu;
+ u64 gpr3;
+ u64 srr0;
+ u64 srr1;
+ union {
struct {
enum MCE_UeErrorType ue_error_type:8;
- uint8_t effective_address_provided;
- uint8_t physical_address_provided;
- uint8_t reserved_1[5];
- uint64_t effective_address;
- uint64_t physical_address;
- uint8_t reserved_2[8];
+ u8 effective_address_provided;
+ u8 physical_address_provided;
+ u8 reserved_1[5];
+ u64 effective_address;
+ u64 physical_address;
+ u8 reserved_2[8];
} ue_error;
struct {
enum MCE_SlbErrorType slb_error_type:8;
- uint8_t effective_address_provided;
- uint8_t reserved_1[6];
- uint64_t effective_address;
- uint8_t reserved_2[16];
+ u8 effective_address_provided;
+ u8 reserved_1[6];
+ u64 effective_address;
+ u8 reserved_2[16];
} slb_error;
struct {
enum MCE_EratErrorType erat_error_type:8;
- uint8_t effective_address_provided;
- uint8_t reserved_1[6];
- uint64_t effective_address;
- uint8_t reserved_2[16];
+ u8 effective_address_provided;
+ u8 reserved_1[6];
+ u64 effective_address;
+ u8 reserved_2[16];
} erat_error;
struct {
enum MCE_TlbErrorType tlb_error_type:8;
- uint8_t effective_address_provided;
- uint8_t reserved_1[6];
- uint64_t effective_address;
- uint8_t reserved_2[16];
+ u8 effective_address_provided;
+ u8 reserved_1[6];
+ u64 effective_address;
+ u8 reserved_2[16];
} tlb_error;
struct {
enum MCE_UserErrorType user_error_type:8;
- uint8_t effective_address_provided;
- uint8_t reserved_1[6];
- uint64_t effective_address;
- uint8_t reserved_2[16];
+ u8 effective_address_provided;
+ u8 reserved_1[6];
+ u64 effective_address;
+ u8 reserved_2[16];
} user_error;
struct {
enum MCE_RaErrorType ra_error_type:8;
- uint8_t effective_address_provided;
- uint8_t reserved_1[6];
- uint64_t effective_address;
- uint8_t reserved_2[16];
+ u8 effective_address_provided;
+ u8 reserved_1[6];
+ u64 effective_address;
+ u8 reserved_2[16];
} ra_error;
struct {
enum MCE_LinkErrorType link_error_type:8;
- uint8_t effective_address_provided;
- uint8_t reserved_1[6];
- uint64_t effective_address;
- uint8_t reserved_2[16];
+ u8 effective_address_provided;
+ u8 reserved_1[6];
+ u64 effective_address;
+ u8 reserved_2[16];
} link_error;
} u;
};
@@ -194,6 +204,8 @@ struct mce_error_info {
} u;
enum MCE_Severity severity:8;
enum MCE_Initiator initiator:8;
+ enum MCE_ErrorClass error_class:8;
+ bool sync_error;
};
#define MAX_MC_EVT 100
@@ -210,6 +222,7 @@ extern void release_mce_event(void);
extern void machine_check_queue_event(void);
extern void machine_check_print_event_info(struct machine_check_event *evt,
bool user_mode, bool in_guest);
+unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr);
#ifdef CONFIG_PPC_BOOK3S_64
void flush_and_reload_slb(void);
#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/include/asm/mmiowb.h b/arch/powerpc/include/asm/mmiowb.h
new file mode 100644
index 000000000000..74a00127eb20
--- /dev/null
+++ b/arch/powerpc/include/asm/mmiowb.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_MMIOWB_H
+#define _ASM_POWERPC_MMIOWB_H
+
+#ifdef CONFIG_MMIOWB
+
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+#include <asm/paca.h>
+
+#define arch_mmiowb_state() (&local_paca->mmiowb_state)
+#define mmiowb() mb()
+
+#endif /* CONFIG_MMIOWB */
+
+#include <asm-generic/mmiowb.h>
+
+#endif /* _ASM_POWERPC_MMIOWB_H */
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 8ddd4a91bdc1..ba94ce8c22d7 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -107,6 +107,11 @@
*/
#define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000)
+/*
+ * Supports KUAP (key 0 controlling userspace addresses) on radix
+ */
+#define MMU_FTR_RADIX_KUAP ASM_CONST(0x80000000)
+
/* MMU feature bit sets for various CPUs */
#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 \
MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
@@ -124,6 +129,9 @@
#ifndef __ASSEMBLY__
#include <linux/bug.h>
#include <asm/cputable.h>
+#include <asm/page.h>
+
+typedef pte_t *pgtable_t;
#ifdef CONFIG_PPC_FSL_BOOK3E
#include <asm/percpu.h>
@@ -164,7 +172,10 @@ enum {
#endif
#ifdef CONFIG_PPC_RADIX_MMU
MMU_FTR_TYPE_RADIX |
-#endif
+#ifdef CONFIG_PPC_KUAP
+ MMU_FTR_RADIX_KUAP |
+#endif /* CONFIG_PPC_KUAP */
+#endif /* CONFIG_PPC_RADIX_MMU */
0,
};
@@ -341,21 +352,6 @@ static inline bool strict_kernel_rwx_enabled(void)
*/
#define MMU_PAGE_COUNT 16
-/*
- * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
- * if we increase SECTIONS_WIDTH we will not store node details in page->flags and
- * page_to_nid does a page->section->node lookup
- * Hence only increase for VMEMMAP. Further depending on SPARSEMEM_EXTREME reduce
- * memory requirements with large number of sections.
- * 51 bits is the max physical real address on POWER9
- */
-#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) && \
- defined (CONFIG_PPC_64K_PAGES)
-#define MAX_PHYSMEM_BITS 51
-#elif defined(CONFIG_PPC64)
-#define MAX_PHYSMEM_BITS 46
-#endif
-
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/mmu.h>
#else /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 6ee8195a2ffb..58efca934311 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -52,6 +52,7 @@ static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
{
return false;
}
+static inline void mm_iommu_init(struct mm_struct *mm) { }
#endif
extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
extern void set_context(unsigned long id, pgd_t *pgd);
@@ -228,16 +229,9 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
#endif
}
-#ifdef CONFIG_PPC_BOOK3E_64
-static inline void arch_exit_mmap(struct mm_struct *mm)
-{
-}
-#else
extern void arch_exit_mmap(struct mm_struct *mm);
-#endif
static inline void arch_unmap(struct mm_struct *mm,
- struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
diff --git a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
new file mode 100644
index 000000000000..a46616937d20
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_32_HUGETLB_8XX_H
+#define _ASM_POWERPC_NOHASH_32_HUGETLB_8XX_H
+
+#define PAGE_SHIFT_8M 23
+
+static inline pte_t *hugepd_page(hugepd_t hpd)
+{
+ BUG_ON(!hugepd_ok(hpd));
+
+ return (pte_t *)__va(hpd_val(hpd) & ~HUGEPD_SHIFT_MASK);
+}
+
+static inline unsigned int hugepd_shift(hugepd_t hpd)
+{
+ return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
+}
+
+static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
+ unsigned int pdshift)
+{
+ unsigned long idx = (addr & ((1UL << pdshift) - 1)) >> PAGE_SHIFT;
+
+ return hugepd_page(hpd) + idx;
+}
+
+static inline void flush_hugetlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+ flush_tlb_page(vma, vmaddr);
+}
+
+static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
+{
+ *hpdp = __hugepd(__pa(new) | _PMD_USER | _PMD_PRESENT |
+ (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M : _PMD_PAGE_512K));
+}
+
+static inline int check_and_get_huge_psize(int shift)
+{
+ return shift_to_mmu_psize(shift);
+}
+
+#endif /* _ASM_POWERPC_NOHASH_32_HUGETLB_8XX_H */
diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
new file mode 100644
index 000000000000..1c3133b5f86a
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_KUP_8XX_H_
+#define _ASM_POWERPC_KUP_8XX_H_
+
+#include <asm/bug.h>
+
+#ifdef CONFIG_PPC_KUAP
+
+#ifdef __ASSEMBLY__
+
+.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3
+ lis \gpr2, MD_APG_KUAP@h /* only APG0 and APG1 are used */
+ mfspr \gpr1, SPRN_MD_AP
+ mtspr SPRN_MD_AP, \gpr2
+ stw \gpr1, STACK_REGS_KUAP(\sp)
+.endm
+
+.macro kuap_restore sp, current, gpr1, gpr2, gpr3
+ lwz \gpr1, STACK_REGS_KUAP(\sp)
+ mtspr SPRN_MD_AP, \gpr1
+.endm
+
+.macro kuap_check current, gpr
+#ifdef CONFIG_PPC_KUAP_DEBUG
+ mfspr \gpr, SPRN_MD_AP
+ rlwinm \gpr, \gpr, 16, 0xffff
+999: twnei \gpr, MD_APG_KUAP@h
+ EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
+#endif
+.endm
+
+#else /* !__ASSEMBLY__ */
+
+#include <asm/reg.h>
+
+static inline void allow_user_access(void __user *to, const void __user *from,
+ unsigned long size)
+{
+ mtspr(SPRN_MD_AP, MD_APG_INIT);
+}
+
+static inline void prevent_user_access(void __user *to, const void __user *from,
+ unsigned long size)
+{
+ mtspr(SPRN_MD_AP, MD_APG_KUAP);
+}
+
+static inline bool bad_kuap_fault(struct pt_regs *regs, bool is_write)
+{
+ return WARN(!((regs->kuap ^ MD_APG_KUAP) & 0xf0000000),
+ "Bug: fault blocked by AP register !");
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_PPC_KUAP */
+
+#endif /* _ASM_POWERPC_KUP_8XX_H_ */
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
index 0a1a3fc54e54..76af5b0cb16e 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
@@ -35,11 +35,18 @@
* Then we use the APG to say whether accesses are according to Page rules or
* "all Supervisor" rules (Access to all)
* Therefore, we define 2 APG groups. lsb is _PMD_USER
- * 0 => No user => 01 (all accesses performed according to page definition)
+ * 0 => Kernel => 01 (all accesses performed according to page definition)
* 1 => User => 00 (all accesses performed as supervisor iaw page definition)
- * We define all 16 groups so that all other bits of APG can take any value
+ * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
+ */
+#define MI_APG_INIT 0x4fffffff
+
+/*
+ * 0 => Kernel => 01 (all accesses performed according to page definition)
+ * 1 => User => 10 (all accesses performed according to swaped page definition)
+ * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
*/
-#define MI_APG_INIT 0x44444444
+#define MI_APG_KUEP 0x6fffffff
/* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MI_RPN is written, bits in
@@ -108,11 +115,18 @@
* Then we use the APG to say whether accesses are according to Page rules or
* "all Supervisor" rules (Access to all)
* Therefore, we define 2 APG groups. lsb is _PMD_USER
- * 0 => No user => 01 (all accesses performed according to page definition)
+ * 0 => Kernel => 01 (all accesses performed according to page definition)
* 1 => User => 00 (all accesses performed as supervisor iaw page definition)
- * We define all 16 groups so that all other bits of APG can take any value
+ * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
+ */
+#define MD_APG_INIT 0x4fffffff
+
+/*
+ * 0 => No user => 01 (all accesses performed according to page definition)
+ * 1 => User => 10 (all accesses performed according to swaped page definition)
+ * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
*/
-#define MD_APG_INIT 0x44444444
+#define MD_APG_KUAP 0x6fffffff
/* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MD_RPN is written, bits in
@@ -167,9 +181,26 @@
#ifdef CONFIG_PPC_MM_SLICES
#include <asm/nohash/32/slice.h>
#define SLICE_ARRAY_SIZE (1 << (32 - SLICE_LOW_SHIFT - 1))
+#define LOW_SLICE_ARRAY_SZ SLICE_ARRAY_SIZE
#endif
+#if defined(CONFIG_PPC_4K_PAGES)
+#define mmu_virtual_psize MMU_PAGE_4K
+#elif defined(CONFIG_PPC_16K_PAGES)
+#define mmu_virtual_psize MMU_PAGE_16K
+#define PTE_FRAG_NR 4
+#define PTE_FRAG_SIZE_SHIFT 12
+#define PTE_FRAG_SIZE (1UL << 12)
+#else
+#error "Unsupported PAGE_SIZE"
+#endif
+
+#define mmu_linear_psize MMU_PAGE_8M
+
#ifndef __ASSEMBLY__
+
+#include <linux/mmdebug.h>
+
struct slice_mask {
u64 low_slices;
DECLARE_BITMAP(high_slices, 0);
@@ -185,14 +216,56 @@ typedef struct {
unsigned char high_slices_psize[0];
unsigned long slb_addr_limit;
struct slice_mask mask_base_psize; /* 4k or 16k */
-# ifdef CONFIG_HUGETLB_PAGE
struct slice_mask mask_512k;
struct slice_mask mask_8m;
-# endif
#endif
void *pte_frag;
} mm_context_t;
+#ifdef CONFIG_PPC_MM_SLICES
+static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
+{
+ return ctx->user_psize;
+}
+
+static inline void mm_ctx_set_user_psize(mm_context_t *ctx, u16 user_psize)
+{
+ ctx->user_psize = user_psize;
+}
+
+static inline unsigned char *mm_ctx_low_slices(mm_context_t *ctx)
+{
+ return ctx->low_slices_psize;
+}
+
+static inline unsigned char *mm_ctx_high_slices(mm_context_t *ctx)
+{
+ return ctx->high_slices_psize;
+}
+
+static inline unsigned long mm_ctx_slb_addr_limit(mm_context_t *ctx)
+{
+ return ctx->slb_addr_limit;
+}
+
+static inline void mm_ctx_set_slb_addr_limit(mm_context_t *ctx, unsigned long limit)
+{
+ ctx->slb_addr_limit = limit;
+}
+
+static inline struct slice_mask *slice_mask_for_size(mm_context_t *ctx, int psize)
+{
+ if (psize == MMU_PAGE_512K)
+ return &ctx->mask_512k;
+ if (psize == MMU_PAGE_8M)
+ return &ctx->mask_8m;
+
+ BUG_ON(psize != mmu_virtual_psize);
+
+ return &ctx->mask_base_psize;
+}
+#endif /* CONFIG_PPC_MM_SLICE */
+
#define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
@@ -242,17 +315,4 @@ extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf;
#endif /* !__ASSEMBLY__ */
-#if defined(CONFIG_PPC_4K_PAGES)
-#define mmu_virtual_psize MMU_PAGE_4K
-#elif defined(CONFIG_PPC_16K_PAGES)
-#define mmu_virtual_psize MMU_PAGE_16K
-#define PTE_FRAG_NR 4
-#define PTE_FRAG_SIZE_SHIFT 12
-#define PTE_FRAG_SIZE (1UL << 12)
-#else
-#error "Unsupported PAGE_SIZE"
-#endif
-
-#define mmu_linear_psize MMU_PAGE_8M
-
#endif /* _ASM_POWERPC_MMU_8XX_H_ */
diff --git a/arch/powerpc/include/asm/nohash/32/mmu.h b/arch/powerpc/include/asm/nohash/32/mmu.h
deleted file mode 100644
index 7d94a36d57d2..000000000000
--- a/arch/powerpc/include/asm/nohash/32/mmu.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_NOHASH_32_MMU_H_
-#define _ASM_POWERPC_NOHASH_32_MMU_H_
-
-#include <asm/page.h>
-
-#if defined(CONFIG_40x)
-/* 40x-style software loaded TLB */
-#include <asm/nohash/32/mmu-40x.h>
-#elif defined(CONFIG_44x)
-/* 44x-style software loaded TLB */
-#include <asm/nohash/32/mmu-44x.h>
-#elif defined(CONFIG_PPC_BOOK3E_MMU)
-/* Freescale Book-E software loaded TLB or Book-3e (ISA 2.06+) MMU */
-#include <asm/nohash/mmu-book3e.h>
-#elif defined (CONFIG_PPC_8xx)
-/* Motorola/Freescale 8xx software loaded TLB */
-#include <asm/nohash/32/mmu-8xx.h>
-#endif
-
-#ifndef __ASSEMBLY__
-typedef pte_t *pgtable_t;
-#endif
-
-#endif /* _ASM_POWERPC_NOHASH_32_MMU_H_ */
diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h
index bd186e85b4f7..11eac371e7e0 100644
--- a/arch/powerpc/include/asm/nohash/32/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h
@@ -6,39 +6,6 @@
#include <linux/slab.h>
/*
- * Functions that deal with pagetables that could be at any level of
- * the table need to be passed an "index_size" so they know how to
- * handle allocation. For PTE pages (which are linked to a struct
- * page for now, and drawn from the main get_free_pages() pool), the
- * allocation size will be (2^index_size * sizeof(pointer)) and
- * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
- *
- * The maximum index size needs to be big enough to allow any
- * pagetable sizes we need, but small enough to fit in the low bits of
- * any page table pointer. In other words all pagetables, even tiny
- * ones, must be aligned to allow at least enough low 0 bits to
- * contain this value. This value is also used as a mask, so it must
- * be one less than a power of two.
- */
-#define MAX_PGTABLE_INDEX_SIZE 0xf
-
-extern void __bad_pte(pmd_t *pmd);
-
-extern struct kmem_cache *pgtable_cache[];
-#define PGT_CACHE(shift) pgtable_cache[shift]
-
-static inline pgd_t *pgd_alloc(struct mm_struct *mm)
-{
- return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
- pgtable_gfp_flags(mm, GFP_KERNEL));
-}
-
-static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
-{
- kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
-}
-
-/*
* We don't have any real pmd's, and this code never triggers because
* the pgd will always be present..
*/
@@ -47,96 +14,22 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
#define __pmd_free_tlb(tlb,x,a) do { } while (0)
/* #define pgd_populate(mm, pmd, pte) BUG() */
-#ifndef CONFIG_BOOKE
-
-static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
- pte_t *pte)
-{
- *pmdp = __pmd(__pa(pte) | _PMD_PRESENT);
-}
-
-static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
- pgtable_t pte_page)
-{
- *pmdp = __pmd(__pa(pte_page) | _PMD_USER | _PMD_PRESENT);
-}
-
-#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
-#else
-
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
pte_t *pte)
{
- *pmdp = __pmd((unsigned long)pte | _PMD_PRESENT);
+ if (IS_ENABLED(CONFIG_BOOKE))
+ *pmdp = __pmd((unsigned long)pte | _PMD_PRESENT);
+ else
+ *pmdp = __pmd(__pa(pte) | _PMD_PRESENT);
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pte_page)
{
- *pmdp = __pmd((unsigned long)pte_page | _PMD_PRESENT);
+ if (IS_ENABLED(CONFIG_BOOKE))
+ *pmdp = __pmd((unsigned long)pte_page | _PMD_PRESENT);
+ else
+ *pmdp = __pmd(__pa(pte_page) | _PMD_USER | _PMD_PRESENT);
}
-#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
-#endif
-
-extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
-extern pgtable_t pte_alloc_one(struct mm_struct *mm);
-void pte_frag_destroy(void *pte_frag);
-pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel);
-void pte_fragment_free(unsigned long *table, int kernel);
-
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- pte_fragment_free((unsigned long *)pte, 1);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
-{
- pte_fragment_free((unsigned long *)ptepage, 0);
-}
-
-static inline void pgtable_free(void *table, unsigned index_size)
-{
- if (!index_size) {
- pte_fragment_free((unsigned long *)table, 0);
- } else {
- BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
- kmem_cache_free(PGT_CACHE(index_size), table);
- }
-}
-
-#define check_pgt_cache() do { } while (0)
-#define get_hugepd_cache_index(x) (x)
-
-#ifdef CONFIG_SMP
-static inline void pgtable_free_tlb(struct mmu_gather *tlb,
- void *table, int shift)
-{
- unsigned long pgf = (unsigned long)table;
- BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
- pgf |= shift;
- tlb_remove_table(tlb, (void *)pgf);
-}
-
-static inline void __tlb_remove_table(void *_table)
-{
- void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
- unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
-
- pgtable_free(table, shift);
-}
-#else
-static inline void pgtable_free_tlb(struct mmu_gather *tlb,
- void *table, int shift)
-{
- pgtable_free(table, shift);
-}
-#endif
-
-static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
- unsigned long address)
-{
- tlb_flush_pgtable(tlb, address);
- pgtable_free_tlb(tlb, table, 0);
-}
#endif /* _ASM_POWERPC_PGALLOC_32_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index bed433358260..0284f8f5305f 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -64,15 +64,24 @@ extern int icache_44x_need_flush;
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+#ifndef __ASSEMBLY__
+
+int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
+
+#endif /* !__ASSEMBLY__ */
+
+
/*
* This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
* value (for now) on others, from where we can start layout kernel
* virtual space that goes below PKMAP and FIXMAP
*/
+#include <asm/fixmap.h>
+
#ifdef CONFIG_HIGHMEM
#define KVIRT_TOP PKMAP_BASE
#else
-#define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */
+#define KVIRT_TOP FIXADDR_START
#endif
/*
@@ -379,8 +388,6 @@ static inline int pte_young(pte_t pte)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
-int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
-
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/nohash/32/slice.h b/arch/powerpc/include/asm/nohash/32/slice.h
index 777d62e40ac0..39eb0154ae2d 100644
--- a/arch/powerpc/include/asm/nohash/32/slice.h
+++ b/arch/powerpc/include/asm/nohash/32/slice.h
@@ -13,6 +13,8 @@
#define SLICE_NUM_HIGH 0ul
#define GET_HIGH_SLICE_INDEX(addr) (addr & 0)
+#define SLB_ADDR_LIMIT_DEFAULT DEFAULT_MAP_WINDOW
+
#endif /* CONFIG_PPC_MM_SLICES */
#endif /* _ASM_POWERPC_NOHASH_32_SLICE_H */
diff --git a/arch/powerpc/include/asm/nohash/64/mmu.h b/arch/powerpc/include/asm/nohash/64/mmu.h
deleted file mode 100644
index e6585480dfc4..000000000000
--- a/arch/powerpc/include/asm/nohash/64/mmu.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_NOHASH_64_MMU_H_
-#define _ASM_POWERPC_NOHASH_64_MMU_H_
-
-/* Freescale Book-E software loaded TLB or Book-3e (ISA 2.06+) MMU */
-#include <asm/nohash/mmu-book3e.h>
-
-#ifndef __ASSEMBLY__
-typedef struct page *pgtable_t;
-#endif
-
-#endif /* _ASM_POWERPC_NOHASH_64_MMU_H_ */
diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h
index 66d086f85bd5..62321cd12da9 100644
--- a/arch/powerpc/include/asm/nohash/64/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h
@@ -18,37 +18,6 @@ struct vmemmap_backing {
};
extern struct vmemmap_backing *vmemmap_list;
-/*
- * Functions that deal with pagetables that could be at any level of
- * the table need to be passed an "index_size" so they know how to
- * handle allocation. For PTE pages (which are linked to a struct
- * page for now, and drawn from the main get_free_pages() pool), the
- * allocation size will be (2^index_size * sizeof(pointer)) and
- * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
- *
- * The maximum index size needs to be big enough to allow any
- * pagetable sizes we need, but small enough to fit in the low bits of
- * any page table pointer. In other words all pagetables, even tiny
- * ones, must be aligned to allow at least enough low 0 bits to
- * contain this value. This value is also used as a mask, so it must
- * be one less than a power of two.
- */
-#define MAX_PGTABLE_INDEX_SIZE 0xf
-
-extern struct kmem_cache *pgtable_cache[];
-#define PGT_CACHE(shift) pgtable_cache[shift]
-
-static inline pgd_t *pgd_alloc(struct mm_struct *mm)
-{
- return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
- pgtable_gfp_flags(mm, GFP_KERNEL));
-}
-
-static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
-{
- kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
-}
-
#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, (unsigned long)PUD)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
@@ -76,11 +45,9 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
pgtable_t pte_page)
{
- pmd_set(pmd, (unsigned long)page_address(pte_page));
+ pmd_set(pmd, (unsigned long)pte_page);
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
@@ -92,91 +59,9 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
}
-
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
-{
- return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
-}
-
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
-{
- struct page *page;
- pte_t *pte;
-
- pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
- if (!pte)
- return NULL;
- page = virt_to_page(pte);
- if (!pgtable_page_ctor(page)) {
- __free_page(page);
- return NULL;
- }
- return page;
-}
-
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- free_page((unsigned long)pte);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
-{
- pgtable_page_dtor(ptepage);
- __free_page(ptepage);
-}
-
-static inline void pgtable_free(void *table, int shift)
-{
- if (!shift) {
- pgtable_page_dtor(virt_to_page(table));
- free_page((unsigned long)table);
- } else {
- BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
- kmem_cache_free(PGT_CACHE(shift), table);
- }
-}
-
-#define get_hugepd_cache_index(x) (x)
-#ifdef CONFIG_SMP
-static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
-{
- unsigned long pgf = (unsigned long)table;
-
- BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
- pgf |= shift;
- tlb_remove_table(tlb, (void *)pgf);
-}
-
-static inline void __tlb_remove_table(void *_table)
-{
- void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
- unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
-
- pgtable_free(table, shift);
-}
-
-#else
-static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
-{
- pgtable_free(table, shift);
-}
-#endif
-
-static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
- unsigned long address)
-{
- tlb_flush_pgtable(tlb, address);
- pgtable_free_tlb(tlb, page_address(table), 0);
-}
-
#define __pmd_free_tlb(tlb, pmd, addr) \
pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX)
-#ifndef CONFIG_PPC_64K_PAGES
#define __pud_free_tlb(tlb, pud, addr) \
pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
-#endif /* CONFIG_PPC_64K_PAGES */
-
-#define check_pgt_cache() do { } while (0)
-
#endif /* _ASM_POWERPC_PGALLOC_64_H */
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index e77ed9761632..b9f66cf15c31 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -10,10 +10,6 @@
#include <asm/barrier.h>
#include <asm/asm-const.h>
-#ifdef CONFIG_PPC_64K_PAGES
-#error "Page size not supported"
-#endif
-
#define FIRST_USER_ADDRESS 0UL
/*
@@ -23,11 +19,7 @@
PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define PMD_CACHE_INDEX (PMD_INDEX_SIZE + 1)
-#else
#define PMD_CACHE_INDEX PMD_INDEX_SIZE
-#endif
#define PUD_CACHE_INDEX PUD_INDEX_SIZE
/*
@@ -73,7 +65,6 @@
#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START))
#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
-#define VMEMMAP_REGION_ID (0xfUL) /* Server only */
#define USER_REGION_ID (0UL)
/*
@@ -205,7 +196,8 @@ static inline void pgd_set(pgd_t *pgdp, unsigned long val)
(((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
-#define pte_unmap(pte) do { } while(0)
+
+static inline void pte_unmap(pte_t *pte) { }
/* to find an entry in a kernel page-table-directory */
/* This now only contains the vmalloc pages */
diff --git a/arch/powerpc/include/asm/nohash/64/slice.h b/arch/powerpc/include/asm/nohash/64/slice.h
deleted file mode 100644
index ad0d6e3cc1c5..000000000000
--- a/arch/powerpc/include/asm/nohash/64/slice.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_NOHASH_64_SLICE_H
-#define _ASM_POWERPC_NOHASH_64_SLICE_H
-
-#ifdef CONFIG_PPC_64K_PAGES
-#define get_slice_psize(mm, addr) MMU_PAGE_64K
-#else /* CONFIG_PPC_64K_PAGES */
-#define get_slice_psize(mm, addr) MMU_PAGE_4K
-#endif /* !CONFIG_PPC_64K_PAGES */
-#define slice_set_user_psize(mm, psize) do { BUG(); } while (0)
-
-#endif /* _ASM_POWERPC_NOHASH_64_SLICE_H */
diff --git a/arch/powerpc/include/asm/nohash/hugetlb-book3e.h b/arch/powerpc/include/asm/nohash/hugetlb-book3e.h
new file mode 100644
index 000000000000..ecd8694cb229
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/hugetlb-book3e.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_HUGETLB_BOOK3E_H
+#define _ASM_POWERPC_NOHASH_HUGETLB_BOOK3E_H
+
+static inline pte_t *hugepd_page(hugepd_t hpd)
+{
+ if (WARN_ON(!hugepd_ok(hpd)))
+ return NULL;
+
+ return (pte_t *)((hpd_val(hpd) & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
+}
+
+static inline unsigned int hugepd_shift(hugepd_t hpd)
+{
+ return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
+}
+
+static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
+ unsigned int pdshift)
+{
+ /*
+ * On FSL BookE, we have multiple higher-level table entries that
+ * point to the same hugepte. Just use the first one since they're all
+ * identical. So for that case, idx=0.
+ */
+ return hugepd_page(hpd);
+}
+
+void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+
+static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
+{
+ /* We use the old format for PPC_FSL_BOOK3E */
+ *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
+}
+
+static inline int check_and_get_huge_psize(int shift)
+{
+ if (shift & 1) /* Not a power of 4 */
+ return -EINVAL;
+
+ return shift_to_mmu_psize(shift);
+}
+
+#endif /* _ASM_POWERPC_NOHASH_HUGETLB_BOOK3E_H */
diff --git a/arch/powerpc/include/asm/nohash/mmu-book3e.h b/arch/powerpc/include/asm/nohash/mmu-book3e.h
index e20072972e35..4c9777d256fb 100644
--- a/arch/powerpc/include/asm/nohash/mmu-book3e.h
+++ b/arch/powerpc/include/asm/nohash/mmu-book3e.h
@@ -306,6 +306,8 @@ extern int book3e_htw_mode;
#define mmu_cleanup_all NULL
+#define MAX_PHYSMEM_BITS 44
+
#endif
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/nohash/mmu.h b/arch/powerpc/include/asm/nohash/mmu.h
index a037cb1efb57..edc793e5f08f 100644
--- a/arch/powerpc/include/asm/nohash/mmu.h
+++ b/arch/powerpc/include/asm/nohash/mmu.h
@@ -2,10 +2,18 @@
#ifndef _ASM_POWERPC_NOHASH_MMU_H_
#define _ASM_POWERPC_NOHASH_MMU_H_
-#ifdef CONFIG_PPC64
-#include <asm/nohash/64/mmu.h>
-#else
-#include <asm/nohash/32/mmu.h>
+#if defined(CONFIG_40x)
+/* 40x-style software loaded TLB */
+#include <asm/nohash/32/mmu-40x.h>
+#elif defined(CONFIG_44x)
+/* 44x-style software loaded TLB */
+#include <asm/nohash/32/mmu-44x.h>
+#elif defined(CONFIG_PPC_BOOK3E_MMU)
+/* Freescale Book-E software loaded TLB or Book-3e (ISA 2.06+) MMU */
+#include <asm/nohash/mmu-book3e.h>
+#elif defined (CONFIG_PPC_8xx)
+/* Motorola/Freescale 8xx software loaded TLB */
+#include <asm/nohash/32/mmu-8xx.h>
#endif
#endif /* _ASM_POWERPC_NOHASH_MMU_H_ */
diff --git a/arch/powerpc/include/asm/nohash/pgalloc.h b/arch/powerpc/include/asm/nohash/pgalloc.h
index 0634f2949438..332b13b4ecdb 100644
--- a/arch/powerpc/include/asm/nohash/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/pgalloc.h
@@ -3,6 +3,7 @@
#define _ASM_POWERPC_NOHASH_PGALLOC_H
#include <linux/mm.h>
+#include <linux/slab.h>
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
#ifdef CONFIG_PPC64
@@ -16,9 +17,64 @@ static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
}
#endif /* !CONFIG_PPC_BOOK3E */
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
+}
+
#ifdef CONFIG_PPC64
#include <asm/nohash/64/pgalloc.h>
#else
#include <asm/nohash/32/pgalloc.h>
#endif
+
+static inline void pgtable_free(void *table, int shift)
+{
+ if (!shift) {
+ pte_fragment_free((unsigned long *)table, 0);
+ } else {
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+ kmem_cache_free(PGT_CACHE(shift), table);
+ }
+}
+
+#define get_hugepd_cache_index(x) (x)
+
+#ifdef CONFIG_SMP
+static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
+{
+ unsigned long pgf = (unsigned long)table;
+
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
+ pgf |= shift;
+ tlb_remove_table(tlb, (void *)pgf);
+}
+
+static inline void __tlb_remove_table(void *_table)
+{
+ void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
+ unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
+
+ pgtable_free(table, shift);
+}
+
+#else
+static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
+{
+ pgtable_free(table, shift);
+}
+#endif
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
+ unsigned long address)
+{
+ tlb_flush_pgtable(tlb, address);
+ pgtable_free_tlb(tlb, table, 0);
+}
#endif /* _ASM_POWERPC_NOHASH_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/nohash/pte-book3e.h b/arch/powerpc/include/asm/nohash/pte-book3e.h
index dd40d200f274..813918f40765 100644
--- a/arch/powerpc/include/asm/nohash/pte-book3e.h
+++ b/arch/powerpc/include/asm/nohash/pte-book3e.h
@@ -60,13 +60,8 @@
#define _PAGE_SPECIAL _PAGE_SW0
/* Base page size */
-#ifdef CONFIG_PPC_64K_PAGES
-#define _PAGE_PSIZE _PAGE_PSIZE_64K
-#define PTE_RPN_SHIFT (28)
-#else
#define _PAGE_PSIZE _PAGE_PSIZE_4K
#define PTE_RPN_SHIFT (24)
-#endif
#define PTE_WIMGE_SHIFT (19)
#define PTE_BAP_SHIFT (2)
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index 870fb7b239ea..e1577cfa7186 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -186,8 +186,8 @@
#define OPAL_XIVE_FREE_IRQ 140
#define OPAL_XIVE_SYNC 141
#define OPAL_XIVE_DUMP 142
-#define OPAL_XIVE_RESERVED3 143
-#define OPAL_XIVE_RESERVED4 144
+#define OPAL_XIVE_GET_QUEUE_STATE 143
+#define OPAL_XIVE_SET_QUEUE_STATE 144
#define OPAL_SIGNAL_SYSTEM_RESET 145
#define OPAL_NPU_INIT_CONTEXT 146
#define OPAL_NPU_DESTROY_CONTEXT 147
@@ -209,8 +209,10 @@
#define OPAL_SENSOR_GROUP_ENABLE 163
#define OPAL_PCI_GET_PBCQ_TUNNEL_BAR 164
#define OPAL_PCI_SET_PBCQ_TUNNEL_BAR 165
+#define OPAL_HANDLE_HMI2 166
#define OPAL_NX_COPROC_INIT 167
-#define OPAL_LAST 167
+#define OPAL_XIVE_GET_VP_STATE 170
+#define OPAL_LAST 170
#define QUIESCE_HOLD 1 /* Spin all calls at entry */
#define QUIESCE_REJECT 2 /* Fail all calls with OPAL_BUSY */
@@ -634,6 +636,15 @@ struct OpalHMIEvent {
} u;
};
+/* OPAL_HANDLE_HMI2 out_flags */
+enum {
+ OPAL_HMI_FLAGS_TB_RESYNC = (1ull << 0), /* Timebase has been resynced */
+ OPAL_HMI_FLAGS_DEC_LOST = (1ull << 1), /* DEC lost, needs to be reprogrammed */
+ OPAL_HMI_FLAGS_HDEC_LOST = (1ull << 2), /* HDEC lost, needs to be reprogrammed */
+ OPAL_HMI_FLAGS_TOD_TB_FAIL = (1ull << 3), /* TOD/TB recovery failed. */
+ OPAL_HMI_FLAGS_NEW_EVENT = (1ull << 63), /* An event has been created */
+};
+
enum {
OPAL_P7IOC_DIAG_TYPE_NONE = 0,
OPAL_P7IOC_DIAG_TYPE_RGC = 1,
@@ -1118,6 +1129,7 @@ enum {
enum {
OPAL_IMC_COUNTERS_NEST = 1,
OPAL_IMC_COUNTERS_CORE = 2,
+ OPAL_IMC_COUNTERS_TRACE = 3,
};
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index a55b01c90bb1..4cc37e708bc7 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -203,6 +203,7 @@ int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer,
int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data);
int64_t opal_sensor_read_u64(u32 sensor_hndl, int token, __be64 *sensor_data);
int64_t opal_handle_hmi(void);
+int64_t opal_handle_hmi2(__be64 *out_flags);
int64_t opal_register_dump_region(uint32_t id, uint64_t start, uint64_t end);
int64_t opal_unregister_dump_region(uint32_t id);
int64_t opal_slw_set_reg(uint64_t cpu_pir, uint64_t sprn, uint64_t val);
@@ -279,6 +280,13 @@ int64_t opal_xive_allocate_irq(uint32_t chip_id);
int64_t opal_xive_free_irq(uint32_t girq);
int64_t opal_xive_sync(uint32_t type, uint32_t id);
int64_t opal_xive_dump(uint32_t type, uint32_t id);
+int64_t opal_xive_get_queue_state(uint64_t vp, uint32_t prio,
+ __be32 *out_qtoggle,
+ __be32 *out_qindex);
+int64_t opal_xive_set_queue_state(uint64_t vp, uint32_t prio,
+ uint32_t qtoggle,
+ uint32_t qindex);
+int64_t opal_xive_get_vp_state(uint64_t vp, __be64 *out_w01);
int64_t opal_pci_set_p2p(uint64_t phb_init, uint64_t phb_target,
uint64_t desc, uint16_t pe_number);
@@ -352,6 +360,7 @@ int opal_power_control_init(void);
extern int opal_machine_check(struct pt_regs *regs);
extern bool opal_mce_check_early_recovery(struct pt_regs *regs);
extern int opal_hmi_exception_early(struct pt_regs *regs);
+extern int opal_hmi_exception_early2(struct pt_regs *regs);
extern int opal_handle_hmi_exception(struct pt_regs *regs);
extern void opal_shutdown(void);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index e843bc5d1a0f..62f27e0aef7c 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -34,6 +34,8 @@
#include <asm/cpuidle.h>
#include <asm/atomic.h>
+#include <asm-generic/mmiowb_types.h>
+
register struct paca_struct *local_paca asm("r13");
#if defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_SMP)
@@ -171,9 +173,7 @@ struct paca_struct {
u16 trap_save; /* Used when bad stack is encountered */
u8 irq_soft_mask; /* mask for irq soft masking */
u8 irq_happened; /* irq happened while soft-disabled */
- u8 io_sync; /* writel() needs spin_unlock sync */
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
- u8 nap_state_lost; /* NV GPR values lost in power7_idle */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
u8 pmcregs_in_use; /* pseries puts this in lppaca */
#endif
@@ -183,23 +183,28 @@ struct paca_struct {
#endif
#ifdef CONFIG_PPC_POWERNV
- /* Per-core mask tracking idle threads and a lock bit-[L][TTTTTTTT] */
- u32 *core_idle_state_ptr;
- u8 thread_idle_state; /* PNV_THREAD_RUNNING/NAP/SLEEP */
- /* Mask to indicate thread id in core */
- u8 thread_mask;
- /* Mask to denote subcore sibling threads */
- u8 subcore_sibling_mask;
- /* Flag to request this thread not to stop */
- atomic_t dont_stop;
- /* The PSSCR value that the kernel requested before going to stop */
- u64 requested_psscr;
-
- /*
- * Save area for additional SPRs that need to be
- * saved/restored during cpuidle stop.
- */
- struct stop_sprs stop_sprs;
+ /* PowerNV idle fields */
+ /* PNV_CORE_IDLE_* bits, all siblings work on thread 0 paca */
+ unsigned long idle_state;
+ union {
+ /* P7/P8 specific fields */
+ struct {
+ /* PNV_THREAD_RUNNING/NAP/SLEEP */
+ u8 thread_idle_state;
+ /* Mask to denote subcore sibling threads */
+ u8 subcore_sibling_mask;
+ };
+
+ /* P9 specific fields */
+ struct {
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /* The PSSCR value that the kernel requested before going to stop */
+ u64 requested_psscr;
+ /* Flag to request this thread not to stop */
+ atomic_t dont_stop;
+#endif
+ };
+ };
#endif
#ifdef CONFIG_PPC_BOOK3S_64
@@ -264,6 +269,9 @@ struct paca_struct {
#ifdef CONFIG_STACKPROTECTOR
unsigned long canary;
#endif
+#ifdef CONFIG_MMIOWB
+ struct mmiowb_state mmiowb_state;
+#endif
} ____cacheline_aligned;
extern void copy_mm_to_paca(struct mm_struct *mm);
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index ed870468ef6f..dbc8c0679480 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -28,11 +28,15 @@
#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
#ifndef __ASSEMBLY__
-#ifdef CONFIG_HUGETLB_PAGE
-extern bool hugetlb_disabled;
-extern unsigned int HPAGE_SHIFT;
-#else
+#ifndef CONFIG_HUGETLB_PAGE
#define HPAGE_SHIFT PAGE_SHIFT
+#elif defined(CONFIG_PPC_BOOK3S_64)
+extern unsigned int hpage_shift;
+#define HPAGE_SHIFT hpage_shift
+#elif defined(CONFIG_PPC_8xx)
+#define HPAGE_SHIFT 19 /* 512k pages */
+#elif defined(CONFIG_PPC_FSL_BOOK3E)
+#define HPAGE_SHIFT 22 /* 4M pages */
#endif
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
@@ -132,18 +136,7 @@ static inline bool pfn_valid(unsigned long pfn)
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
-#ifdef CONFIG_PPC_BOOK3S_64
-/*
- * On hash the vmalloc and other regions alias to the kernel region when passed
- * through __pa(), which virt_to_pfn() uses. That means virt_addr_valid() can
- * return true for some vmalloc addresses, which is incorrect. So explicitly
- * check that the address is in the kernel region.
- */
-#define virt_addr_valid(kaddr) (REGION_ID(kaddr) == KERNEL_REGION_ID && \
- pfn_valid(virt_to_pfn(kaddr)))
-#else
#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
-#endif
/*
* On Book-E parts we need __va to parse the device tree and we can't
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index e11f03007b57..2b2c60a1a66d 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -20,10 +20,61 @@ static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp)
#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
+pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel);
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
+{
+ return (pte_t *)pte_fragment_alloc(mm, 1);
+}
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
+{
+ return (pgtable_t)pte_fragment_alloc(mm, 0);
+}
+
+void pte_frag_destroy(void *pte_frag);
+void pte_fragment_free(unsigned long *table, int kernel);
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ pte_fragment_free((unsigned long *)pte, 1);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
+{
+ pte_fragment_free((unsigned long *)ptepage, 0);
+}
+
+/*
+ * Functions that deal with pagetables that could be at any level of
+ * the table need to be passed an "index_size" so they know how to
+ * handle allocation. For PTE pages, the allocation size will be
+ * (2^index_size * sizeof(pointer)) and allocations are drawn from
+ * the kmem_cache in PGT_CACHE(index_size).
+ *
+ * The maximum index size needs to be big enough to allow any
+ * pagetable sizes we need, but small enough to fit in the low bits of
+ * any page table pointer. In other words all pagetables, even tiny
+ * ones, must be aligned to allow at least enough low 0 bits to
+ * contain this value. This value is also used as a mask, so it must
+ * be one less than a power of two.
+ */
+#define MAX_PGTABLE_INDEX_SIZE 0xf
+
+extern struct kmem_cache *pgtable_cache[];
+#define PGT_CACHE(shift) pgtable_cache[shift]
+
+static inline void check_pgt_cache(void) { }
+
#ifdef CONFIG_PPC_BOOK3S
#include <asm/book3s/pgalloc.h>
#else
#include <asm/nohash/pgalloc.h>
#endif
+static inline pgtable_t pmd_pgtable(pmd_t pmd)
+{
+ return (pgtable_t)pmd_page_vaddr(pmd);
+}
+
#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h
index a89c67b62680..b169bbf95fcb 100644
--- a/arch/powerpc/include/asm/pgtable-be-types.h
+++ b/arch/powerpc/include/asm/pgtable-be-types.h
@@ -33,11 +33,7 @@ static inline __be64 pmd_raw(pmd_t x)
return x.pmd;
}
-/*
- * 64 bit hash always use 4 level table. Everybody else use 4 level
- * only for 4K page size.
- */
-#if defined(CONFIG_PPC_BOOK3S_64) || !defined(CONFIG_PPC_64K_PAGES)
+/* 64 bit always use 4 level table. */
typedef struct { __be64 pud; } pud_t;
#define __pud(x) ((pud_t) { cpu_to_be64(x) })
#define __pud_raw(x) ((pud_t) { (x) })
@@ -51,7 +47,6 @@ static inline __be64 pud_raw(pud_t x)
return x.pud;
}
-#endif /* CONFIG_PPC_BOOK3S_64 || !CONFIG_PPC_64K_PAGES */
#endif /* CONFIG_PPC64 */
/* PGD level */
@@ -77,7 +72,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
* With hash config 64k pages additionally define a bigger "real PTE" type that
* gathers the "second half" part of the PTE for pseudo 64k pages
*/
-#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_BOOK3S_64)
+#ifdef CONFIG_PPC_64K_PAGES
typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
#else
typedef struct { pte_t pte; } real_pte_t;
diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h
index 3b0edf041b2e..d11b4c61d686 100644
--- a/arch/powerpc/include/asm/pgtable-types.h
+++ b/arch/powerpc/include/asm/pgtable-types.h
@@ -23,18 +23,13 @@ static inline unsigned long pmd_val(pmd_t x)
return x.pmd;
}
-/*
- * 64 bit hash always use 4 level table. Everybody else use 4 level
- * only for 4K page size.
- */
-#if defined(CONFIG_PPC_BOOK3S_64) || !defined(CONFIG_PPC_64K_PAGES)
+/* 64 bit always use 4 level table. */
typedef struct { unsigned long pud; } pud_t;
#define __pud(x) ((pud_t) { (x) })
static inline unsigned long pud_val(pud_t x)
{
return x.pud;
}
-#endif /* CONFIG_PPC_BOOK3S_64 || !CONFIG_PPC_64K_PAGES */
#endif /* CONFIG_PPC64 */
/* PGD level */
@@ -54,7 +49,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
* With hash config 64k pages additionally define a bigger "real PTE" type that
* gathers the "second half" part of the PTE for pseudo 64k pages
*/
-#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_BOOK3S_64)
+#ifdef CONFIG_PPC_64K_PAGES
typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
#else
typedef struct { pte_t pte; } real_pte_t;
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 505550fb2935..3f53be60fb01 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -89,9 +89,6 @@ extern void paging_init(void);
*/
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
-extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
- unsigned long end, int write,
- struct page **pages, int *nr);
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_large(pmd) 0
#endif
@@ -108,6 +105,12 @@ void mark_initmem_nx(void);
static inline void mark_initmem_nx(void) { }
#endif
+#ifdef CONFIG_PPC_DEBUG_WX
+void ptdump_check_wx(void);
+#else
+static inline void ptdump_check_wx(void) { }
+#endif
+
/*
* When used, PTE_FRAG_NR is defined in subarch pgtable.h
* so we are sure it is included when arriving here.
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 3351bcf42f2d..706ac5df546f 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -164,6 +164,9 @@ struct thread_struct {
unsigned long rtas_sp; /* stack pointer for when in RTAS */
#endif
#endif
+#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
+ unsigned long kuap; /* opened segments for user access */
+#endif
/* Debug Registers */
struct debug_reg debug;
struct thread_fp_state fp_state;
@@ -411,14 +414,17 @@ static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
}
#endif
+/* asm stubs */
+extern unsigned long isa300_idle_stop_noloss(unsigned long psscr_val);
+extern unsigned long isa300_idle_stop_mayloss(unsigned long psscr_val);
+extern unsigned long isa206_idle_insn_mayloss(unsigned long type);
+
extern unsigned long cpuidle_disable;
enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
extern int powersave_nap; /* set if nap mode can be used in idle loop */
-extern unsigned long power7_idle_insn(unsigned long type); /* PNV_THREAD_NAP/etc*/
+
extern void power7_idle_type(unsigned long type);
-extern unsigned long power9_idle_stop(unsigned long psscr_val);
-extern unsigned long power9_offline_stop(unsigned long psscr_val);
extern void power9_idle_type(unsigned long stop_psscr_val,
unsigned long stop_psscr_mask);
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 64271e562fed..6f047730e642 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -52,10 +52,17 @@ struct pt_regs
};
};
+ union {
+ struct {
#ifdef CONFIG_PPC64
- unsigned long ppr;
- unsigned long __pad; /* Maintain 16 byte interrupt stack alignment */
+ unsigned long ppr;
+#endif
+#ifdef CONFIG_PPC_KUAP
+ unsigned long kuap;
#endif
+ };
+ unsigned long __pad[2]; /* Maintain 16 byte interrupt stack alignment */
+ };
};
#endif
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index c5b2aff0ce8e..10caa145f98b 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -168,6 +168,7 @@
#define PSSCR_ESL 0x00200000 /* Enable State Loss */
#define PSSCR_SD 0x00400000 /* Status Disable */
#define PSSCR_PLS 0xf000000000000000 /* Power-saving Level Status */
+#define PSSCR_PLS_SHIFT 60
#define PSSCR_GUEST_VIS 0xf0000000000003ffUL /* Guest-visible PSSCR fields */
#define PSSCR_FAKE_SUSPEND 0x00000400 /* Fake-suspend bit (P9 DD2.2) */
#define PSSCR_FAKE_SUSPEND_LG 10 /* Fake-suspend bit position */
@@ -758,10 +759,9 @@
#define SRR1_WAKERESET 0x00100000 /* System reset */
#define SRR1_WAKEHDBELL 0x000c0000 /* Hypervisor doorbell on P8 */
#define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */
-#define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained,
- * may not be recoverable */
-#define SRR1_WS_DEEPER 0x00020000 /* Some resources not maintained */
-#define SRR1_WS_DEEP 0x00010000 /* All resources maintained */
+#define SRR1_WS_HVLOSS 0x00030000 /* HV resources not maintained */
+#define SRR1_WS_GPRLOSS 0x00020000 /* GPRs not maintained */
+#define SRR1_WS_NOLOSS 0x00010000 /* All resources maintained */
#define SRR1_PROGTM 0x00200000 /* TM Bad Thing */
#define SRR1_PROGFPE 0x00100000 /* Floating Point Enabled */
#define SRR1_PROGILL 0x00080000 /* Illegal instruction */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index eb2a33d5df26..e382bd6ede84 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -41,7 +41,7 @@
#if defined(CONFIG_PPC_BOOK3E_64)
#define MSR_64BIT MSR_CM
-#define MSR_ (MSR_ME | MSR_CE)
+#define MSR_ (MSR_ME | MSR_RI | MSR_CE)
#define MSR_KERNEL (MSR_ | MSR_64BIT)
#define MSR_USER32 (MSR_ | MSR_PR | MSR_EE)
#define MSR_USER64 (MSR_USER32 | MSR_64BIT)
diff --git a/arch/powerpc/include/asm/slice.h b/arch/powerpc/include/asm/slice.h
index 44816cbc4198..c6f466f4c241 100644
--- a/arch/powerpc/include/asm/slice.h
+++ b/arch/powerpc/include/asm/slice.h
@@ -4,9 +4,7 @@
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/slice.h>
-#elif defined(CONFIG_PPC64)
-#include <asm/nohash/64/slice.h>
-#elif defined(CONFIG_PPC_MMU_NOHASH)
+#elif defined(CONFIG_PPC_MMU_NOHASH_32)
#include <asm/nohash/32/slice.h>
#endif
@@ -38,6 +36,11 @@ void slice_setup_new_exec(void);
static inline void slice_init_new_context_exec(struct mm_struct *mm) {}
+static inline unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
+{
+ return 0;
+}
+
#endif /* CONFIG_PPC_MM_SLICES */
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/sparsemem.h b/arch/powerpc/include/asm/sparsemem.h
index 68da49320592..3192d454a733 100644
--- a/arch/powerpc/include/asm/sparsemem.h
+++ b/arch/powerpc/include/asm/sparsemem.h
@@ -17,9 +17,9 @@ extern int create_section_mapping(unsigned long start, unsigned long end, int ni
extern int remove_section_mapping(unsigned long start, unsigned long end);
#ifdef CONFIG_PPC_BOOK3S_64
-extern void resize_hpt_for_hotplug(unsigned long new_mem_size);
+extern int resize_hpt_for_hotplug(unsigned long new_mem_size);
#else
-static inline void resize_hpt_for_hotplug(unsigned long new_mem_size) { }
+static inline int resize_hpt_for_hotplug(unsigned long new_mem_size) { return 0; }
#endif
#ifdef CONFIG_NUMA
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 685c72310f5d..15b39c407c4e 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -39,19 +39,6 @@
#define LOCK_TOKEN 1
#endif
-#if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
-#define CLEAR_IO_SYNC (get_paca()->io_sync = 0)
-#define SYNC_IO do { \
- if (unlikely(get_paca()->io_sync)) { \
- mb(); \
- get_paca()->io_sync = 0; \
- } \
- } while (0)
-#else
-#define CLEAR_IO_SYNC
-#define SYNC_IO
-#endif
-
#ifdef CONFIG_PPC_PSERIES
#define vcpu_is_preempted vcpu_is_preempted
static inline bool vcpu_is_preempted(int cpu)
@@ -99,7 +86,6 @@ static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
- CLEAR_IO_SYNC;
return __arch_spin_trylock(lock) == 0;
}
@@ -130,7 +116,6 @@ extern void __rw_yield(arch_rwlock_t *lock);
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
- CLEAR_IO_SYNC;
while (1) {
if (likely(__arch_spin_trylock(lock) == 0))
break;
@@ -148,7 +133,6 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
unsigned long flags_dis;
- CLEAR_IO_SYNC;
while (1) {
if (likely(__arch_spin_trylock(lock) == 0))
break;
@@ -167,7 +151,6 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
- SYNC_IO;
__asm__ __volatile__("# arch_spin_unlock\n\t"
PPC_RELEASE_BARRIER: : :"memory");
lock->slock = 0;
diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h
index 1647de15a31e..9bf6dffb4090 100644
--- a/arch/powerpc/include/asm/string.h
+++ b/arch/powerpc/include/asm/string.h
@@ -4,14 +4,17 @@
#ifdef __KERNEL__
+#ifndef CONFIG_KASAN
#define __HAVE_ARCH_STRNCPY
#define __HAVE_ARCH_STRNCMP
+#define __HAVE_ARCH_MEMCHR
+#define __HAVE_ARCH_MEMCMP
+#define __HAVE_ARCH_MEMSET16
+#endif
+
#define __HAVE_ARCH_MEMSET
#define __HAVE_ARCH_MEMCPY
#define __HAVE_ARCH_MEMMOVE
-#define __HAVE_ARCH_MEMCMP
-#define __HAVE_ARCH_MEMCHR
-#define __HAVE_ARCH_MEMSET16
#define __HAVE_ARCH_MEMCPY_FLUSHCACHE
extern char * strcpy(char *,const char *);
@@ -27,7 +30,27 @@ extern int memcmp(const void *,const void *,__kernel_size_t);
extern void * memchr(const void *,int,__kernel_size_t);
extern void * memcpy_flushcache(void *,const void *,__kernel_size_t);
+void *__memset(void *s, int c, __kernel_size_t count);
+void *__memcpy(void *to, const void *from, __kernel_size_t n);
+void *__memmove(void *to, const void *from, __kernel_size_t n);
+
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+/*
+ * For files that are not instrumented (e.g. mm/slub.c) we
+ * should use not instrumented version of mem* functions.
+ */
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+
+#endif
+
#ifdef CONFIG_PPC64
+#ifndef CONFIG_KASAN
#define __HAVE_ARCH_MEMSET32
#define __HAVE_ARCH_MEMSET64
@@ -49,8 +72,11 @@ static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)
{
return __memset64(p, v, n * 8);
}
+#endif
#else
+#ifndef CONFIG_KASAN
#define __HAVE_ARCH_STRLEN
+#endif
extern void *memset16(uint16_t *, uint16_t, __kernel_size_t);
#endif
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index 1243045bad2d..a048fed0722f 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -94,9 +94,15 @@ static inline void syscall_set_arguments(struct task_struct *task,
regs->orig_gpr3 = args[0];
}
-static inline int syscall_get_arch(void)
+static inline int syscall_get_arch(struct task_struct *task)
{
- int arch = is_32bit_task() ? AUDIT_ARCH_PPC : AUDIT_ARCH_PPC64;
+ int arch;
+
+ if (IS_ENABLED(CONFIG_PPC64) && !test_tsk_thread_flag(task, TIF_32BIT))
+ arch = AUDIT_ARCH_PPC64;
+ else
+ arch = AUDIT_ARCH_PPC;
+
#ifdef __LITTLE_ENDIAN__
arch |= __AUDIT_ARCH_LE;
#endif
diff --git a/arch/powerpc/include/asm/task_size_64.h b/arch/powerpc/include/asm/task_size_64.h
index eab4779f6b84..c993482237ed 100644
--- a/arch/powerpc/include/asm/task_size_64.h
+++ b/arch/powerpc/include/asm/task_size_64.h
@@ -20,7 +20,7 @@
/*
* For now 512TB is only supported with book3s and 64K linux page size.
*/
-#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES)
+#ifdef CONFIG_PPC_64K_PAGES
/*
* Max value currently used:
*/
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 54bf7e68a7e1..57e968413d1e 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -36,6 +36,8 @@ extern unsigned long ppc_proc_freq;
extern unsigned long ppc_tb_freq;
#define DEFAULT_TB_FREQ 125000000UL
+extern bool tb_invalid;
+
struct div_result {
u64 result_high;
u64 result_low;
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index e24c67d5ba75..34fba1ce27f7 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -27,8 +27,8 @@
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry __tlb_remove_tlb_entry
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
+#define tlb_flush tlb_flush
extern void tlb_flush(struct mmu_gather *tlb);
/* Get the generic bits... */
@@ -46,22 +46,6 @@ static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
#endif
}
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
- unsigned int page_size)
-{
- if (!tlb->page_size)
- tlb->page_size = page_size;
- else if (tlb->page_size != page_size) {
- if (!tlb->fullmm)
- tlb_flush_mmu(tlb);
- /*
- * update the page size after flush for the new
- * mmu_gather.
- */
- tlb->page_size = page_size;
- }
-}
-
#ifdef CONFIG_SMP
static inline int mm_is_core_local(struct mm_struct *mm)
{
diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h
index 58ef8c43a89d..08cd60cd70b7 100644
--- a/arch/powerpc/include/asm/trace.h
+++ b/arch/powerpc/include/asm/trace.h
@@ -54,6 +54,22 @@ DEFINE_EVENT(ppc64_interrupt_class, timer_interrupt_exit,
TP_ARGS(regs)
);
+#ifdef CONFIG_PPC_DOORBELL
+DEFINE_EVENT(ppc64_interrupt_class, doorbell_entry,
+
+ TP_PROTO(struct pt_regs *regs),
+
+ TP_ARGS(regs)
+);
+
+DEFINE_EVENT(ppc64_interrupt_class, doorbell_exit,
+
+ TP_PROTO(struct pt_regs *regs),
+
+ TP_ARGS(regs)
+);
+#endif
+
#ifdef CONFIG_PPC_PSERIES
extern int hcall_tracepoint_regfunc(void);
extern void hcall_tracepoint_unregfunc(void);
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 4d6d905e9138..76f34346b642 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -6,6 +6,7 @@
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/extable.h>
+#include <asm/kup.h>
/*
* The fs value determines whether argument validity checking should be
@@ -140,6 +141,7 @@ extern long __put_user_bad(void);
#define __put_user_size(x, ptr, size, retval) \
do { \
retval = 0; \
+ allow_write_to_user(ptr, size); \
switch (size) { \
case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
@@ -147,6 +149,7 @@ do { \
case 8: __put_user_asm2(x, ptr, retval); break; \
default: __put_user_bad(); \
} \
+ prevent_write_to_user(ptr, size); \
} while (0)
#define __put_user_nocheck(x, ptr, size) \
@@ -239,6 +242,7 @@ do { \
__chk_user_ptr(ptr); \
if (size > sizeof(x)) \
(x) = __get_user_bad(); \
+ allow_read_from_user(ptr, size); \
switch (size) { \
case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
@@ -246,6 +250,7 @@ do { \
case 8: __get_user_asm2(x, ptr, retval); break; \
default: (x) = __get_user_bad(); \
} \
+ prevent_read_from_user(ptr, size); \
} while (0)
/*
@@ -305,15 +310,21 @@ extern unsigned long __copy_tofrom_user(void __user *to,
static inline unsigned long
raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
- return __copy_tofrom_user(to, from, n);
+ unsigned long ret;
+
+ allow_user_access(to, from, n);
+ ret = __copy_tofrom_user(to, from, n);
+ prevent_user_access(to, from, n);
+ return ret;
}
#endif /* __powerpc64__ */
static inline unsigned long raw_copy_from_user(void *to,
const void __user *from, unsigned long n)
{
+ unsigned long ret;
if (__builtin_constant_p(n) && (n <= 8)) {
- unsigned long ret = 1;
+ ret = 1;
switch (n) {
case 1:
@@ -338,14 +349,18 @@ static inline unsigned long raw_copy_from_user(void *to,
}
barrier_nospec();
- return __copy_tofrom_user((__force void __user *)to, from, n);
+ allow_read_from_user(from, n);
+ ret = __copy_tofrom_user((__force void __user *)to, from, n);
+ prevent_read_from_user(from, n);
+ return ret;
}
static inline unsigned long raw_copy_to_user(void __user *to,
const void *from, unsigned long n)
{
+ unsigned long ret;
if (__builtin_constant_p(n) && (n <= 8)) {
- unsigned long ret = 1;
+ ret = 1;
switch (n) {
case 1:
@@ -365,17 +380,24 @@ static inline unsigned long raw_copy_to_user(void __user *to,
return 0;
}
- return __copy_tofrom_user(to, (__force const void __user *)from, n);
+ allow_write_to_user(to, n);
+ ret = __copy_tofrom_user(to, (__force const void __user *)from, n);
+ prevent_write_to_user(to, n);
+ return ret;
}
extern unsigned long __clear_user(void __user *addr, unsigned long size);
static inline unsigned long clear_user(void __user *addr, unsigned long size)
{
+ unsigned long ret = size;
might_fault();
- if (likely(access_ok(addr, size)))
- return __clear_user(addr, size);
- return size;
+ if (likely(access_ok(addr, size))) {
+ allow_write_to_user(addr, size);
+ ret = __clear_user(addr, size);
+ prevent_write_to_user(addr, size);
+ }
+ return ret;
}
extern long strncpy_from_user(char *dst, const char __user *src, long count);
diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h
index 3c704f5dd3ae..eaf76f57023a 100644
--- a/arch/powerpc/include/asm/xive.h
+++ b/arch/powerpc/include/asm/xive.h
@@ -23,6 +23,7 @@
* same offset regardless of where the code is executing
*/
extern void __iomem *xive_tima;
+extern unsigned long xive_tima_os;
/*
* Offset in the TM area of our current execution level (provided by
@@ -73,6 +74,8 @@ struct xive_q {
u32 esc_irq;
atomic_t count;
atomic_t pending_count;
+ u64 guest_qaddr;
+ u32 guest_qshift;
};
/* Global enable flags for the XIVE support */
@@ -109,12 +112,26 @@ extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
extern void xive_native_sync_source(u32 hw_irq);
+extern void xive_native_sync_queue(u32 hw_irq);
extern bool is_xive_irq(struct irq_chip *chip);
extern int xive_native_enable_vp(u32 vp_id, bool single_escalation);
extern int xive_native_disable_vp(u32 vp_id);
extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id);
extern bool xive_native_has_single_escalation(void);
+extern int xive_native_get_queue_info(u32 vp_id, uint32_t prio,
+ u64 *out_qpage,
+ u64 *out_qsize,
+ u64 *out_qeoi_page,
+ u32 *out_escalate_irq,
+ u64 *out_qflags);
+
+extern int xive_native_get_queue_state(u32 vp_id, uint32_t prio, u32 *qtoggle,
+ u32 *qindex);
+extern int xive_native_set_queue_state(u32 vp_id, uint32_t prio, u32 qtoggle,
+ u32 qindex);
+extern int xive_native_get_vp_state(u32 vp_id, u64 *out_state);
+
#else
static inline bool xive_enabled(void) { return false; }
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 26ca425f4c2c..b0f72dea8b11 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -482,6 +482,8 @@ struct kvm_ppc_cpu_char {
#define KVM_REG_PPC_ICP_PPRI_SHIFT 16 /* pending irq priority */
#define KVM_REG_PPC_ICP_PPRI_MASK 0xff
+#define KVM_REG_PPC_VP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x8d)
+
/* Device control API: PPC-specific devices */
#define KVM_DEV_MPIC_GRP_MISC 1
#define KVM_DEV_MPIC_BASE_ADDR 0 /* 64-bit */
@@ -677,4 +679,48 @@ struct kvm_ppc_cpu_char {
#define KVM_XICS_PRESENTED (1ULL << 43)
#define KVM_XICS_QUEUED (1ULL << 44)
+/* POWER9 XIVE Native Interrupt Controller */
+#define KVM_DEV_XIVE_GRP_CTRL 1
+#define KVM_DEV_XIVE_RESET 1
+#define KVM_DEV_XIVE_EQ_SYNC 2
+#define KVM_DEV_XIVE_GRP_SOURCE 2 /* 64-bit source identifier */
+#define KVM_DEV_XIVE_GRP_SOURCE_CONFIG 3 /* 64-bit source identifier */
+#define KVM_DEV_XIVE_GRP_EQ_CONFIG 4 /* 64-bit EQ identifier */
+#define KVM_DEV_XIVE_GRP_SOURCE_SYNC 5 /* 64-bit source identifier */
+
+/* Layout of 64-bit XIVE source attribute values */
+#define KVM_XIVE_LEVEL_SENSITIVE (1ULL << 0)
+#define KVM_XIVE_LEVEL_ASSERTED (1ULL << 1)
+
+/* Layout of 64-bit XIVE source configuration attribute values */
+#define KVM_XIVE_SOURCE_PRIORITY_SHIFT 0
+#define KVM_XIVE_SOURCE_PRIORITY_MASK 0x7
+#define KVM_XIVE_SOURCE_SERVER_SHIFT 3
+#define KVM_XIVE_SOURCE_SERVER_MASK 0xfffffff8ULL
+#define KVM_XIVE_SOURCE_MASKED_SHIFT 32
+#define KVM_XIVE_SOURCE_MASKED_MASK 0x100000000ULL
+#define KVM_XIVE_SOURCE_EISN_SHIFT 33
+#define KVM_XIVE_SOURCE_EISN_MASK 0xfffffffe00000000ULL
+
+/* Layout of 64-bit EQ identifier */
+#define KVM_XIVE_EQ_PRIORITY_SHIFT 0
+#define KVM_XIVE_EQ_PRIORITY_MASK 0x7
+#define KVM_XIVE_EQ_SERVER_SHIFT 3
+#define KVM_XIVE_EQ_SERVER_MASK 0xfffffff8ULL
+
+/* Layout of EQ configuration values (64 bytes) */
+struct kvm_ppc_xive_eq {
+ __u32 flags;
+ __u32 qshift;
+ __u64 qaddr;
+ __u32 qtoggle;
+ __u32 qindex;
+ __u8 pad[40];
+};
+
+#define KVM_XIVE_EQ_ALWAYS_NOTIFY 0x00000001
+
+#define KVM_XIVE_TIMA_PAGE_OFFSET 0
+#define KVM_XIVE_ESB_PAGE_OFFSET 4
+
#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index cddadccf551d..0ea6c4aa3a20 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -31,6 +31,18 @@ CFLAGS_REMOVE_btext.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_prom.o = $(CC_FLAGS_FTRACE)
endif
+KASAN_SANITIZE_early_32.o := n
+KASAN_SANITIZE_cputable.o := n
+KASAN_SANITIZE_prom_init.o := n
+KASAN_SANITIZE_btext.o := n
+
+ifdef CONFIG_KASAN
+CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING
+CFLAGS_cputable.o += -DDISABLE_BRANCH_PROFILING
+CFLAGS_prom_init.o += -DDISABLE_BRANCH_PROFILING
+CFLAGS_btext.o += -DDISABLE_BRANCH_PROFILING
+endif
+
obj-y := cputable.o ptrace.o syscalls.o \
irq.o align.o signal_32.o pmc.o vdso.o \
process.o systbl.o idle.o \
@@ -93,7 +105,7 @@ extra-y += vmlinux.lds
obj-$(CONFIG_RELOCATABLE) += reloc_$(BITS).o
-obj-$(CONFIG_PPC32) += entry_32.o setup_32.o
+obj-$(CONFIG_PPC32) += entry_32.o setup_32.o early_32.o
obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_BOOTX_TEXT) += btext.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 86a61e5f8285..8e02444e9d3d 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -147,6 +147,9 @@ int main(void)
#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
OFFSET(THREAD_KVM_VCPU, thread_struct, kvm_vcpu);
#endif
+#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
+ OFFSET(KUAP, thread_struct, kuap);
+#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
OFFSET(PACATMSCRATCH, paca_struct, tm_scratch);
@@ -268,7 +271,6 @@ int main(void)
OFFSET(ACCOUNT_USER_TIME, paca_struct, accounting.utime);
OFFSET(ACCOUNT_SYSTEM_TIME, paca_struct, accounting.stime);
OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save);
- OFFSET(PACA_NAPSTATELOST, paca_struct, nap_state_lost);
OFFSET(PACA_SPRG_VDSO, paca_struct, sprg_vdso);
#else /* CONFIG_PPC64 */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
@@ -332,6 +334,10 @@ int main(void)
STACK_PT_REGS_OFFSET(_PPR, ppr);
#endif /* CONFIG_PPC64 */
+#ifdef CONFIG_PPC_KUAP
+ STACK_PT_REGS_OFFSET(STACK_REGS_KUAP, kuap);
+#endif
+
#if defined(CONFIG_PPC32)
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
@@ -766,23 +772,6 @@ int main(void)
OFFSET(VCPU_TIMING_LAST_ENTER_TBL, kvm_vcpu, arch.timing_last_enter.tv32.tbl);
#endif
-#ifdef CONFIG_PPC_POWERNV
- OFFSET(PACA_CORE_IDLE_STATE_PTR, paca_struct, core_idle_state_ptr);
- OFFSET(PACA_THREAD_IDLE_STATE, paca_struct, thread_idle_state);
- OFFSET(PACA_THREAD_MASK, paca_struct, thread_mask);
- OFFSET(PACA_SUBCORE_SIBLING_MASK, paca_struct, subcore_sibling_mask);
- OFFSET(PACA_REQ_PSSCR, paca_struct, requested_psscr);
- OFFSET(PACA_DONT_STOP, paca_struct, dont_stop);
-#define STOP_SPR(x, f) OFFSET(x, paca_struct, stop_sprs.f)
- STOP_SPR(STOP_PID, pid);
- STOP_SPR(STOP_LDBAR, ldbar);
- STOP_SPR(STOP_FSCR, fscr);
- STOP_SPR(STOP_HFSCR, hfscr);
- STOP_SPR(STOP_MMCR1, mmcr1);
- STOP_SPR(STOP_MMCR2, mmcr2);
- STOP_SPR(STOP_MMCRA, mmcra);
-#endif
-
DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER);
DEFINE(PPC_DBELL_MSGTYPE, PPC_DBELL_MSGTYPE);
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index 53102764fd2f..862e2890bd3d 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -759,23 +759,21 @@ static void cacheinfo_create_index_dir(struct cache *cache, int index,
index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
if (!index_dir)
- goto err;
+ return;
index_dir->cache = cache;
rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
cache_dir->kobj, "index%d", index);
- if (rc)
- goto err;
+ if (rc) {
+ kobject_put(&index_dir->kobj);
+ return;
+ }
index_dir->next = cache_dir->index;
cache_dir->index = index_dir;
cacheinfo_create_index_opt_attrs(index_dir);
-
- return;
-err:
- kfree(index_dir);
}
static void cacheinfo_sysfs_populate(unsigned int cpu_id,
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 1eab54bc6ee9..cd12f362b61f 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -2147,7 +2147,11 @@ void __init set_cur_cpu_spec(struct cpu_spec *s)
struct cpu_spec *t = &the_cpu_spec;
t = PTRRELOC(t);
- *t = *s;
+ /*
+ * use memcpy() instead of *t = *s so that GCC replaces it
+ * by __memcpy() when KASAN is active
+ */
+ memcpy(t, s, sizeof(*t));
*PTRRELOC(&cur_cpu_spec) = &the_cpu_spec;
}
@@ -2161,8 +2165,11 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
t = PTRRELOC(t);
old = *t;
- /* Copy everything, then do fixups */
- *t = *s;
+ /*
+ * Copy everything, then do fixups. Use memcpy() instead of *t = *s
+ * so that GCC replaces it by __memcpy() when KASAN is active
+ */
+ memcpy(t, s, sizeof(*t));
/*
* If we are overriding a previous value derived from the real
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index b6fe883b1016..5ec3b3835925 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -18,6 +18,7 @@
#include <asm/dbell.h>
#include <asm/irq_regs.h>
#include <asm/kvm_ppc.h>
+#include <asm/trace.h>
#ifdef CONFIG_SMP
@@ -81,6 +82,7 @@ void doorbell_exception(struct pt_regs *regs)
struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
+ trace_doorbell_entry(regs);
ppc_msgsync();
@@ -91,6 +93,7 @@ void doorbell_exception(struct pt_regs *regs)
smp_ipi_demux_relaxed(); /* already performed the barrier */
+ trace_doorbell_exit(regs);
irq_exit();
set_irq_regs(old_regs);
}
diff --git a/arch/powerpc/kernel/early_32.c b/arch/powerpc/kernel/early_32.c
new file mode 100644
index 000000000000..3482118ffe76
--- /dev/null
+++ b/arch/powerpc/kernel/early_32.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Early init before relocation
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <asm/setup.h>
+#include <asm/sections.h>
+#include <asm/asm-prototypes.h>
+
+/*
+ * We're called here very early in the boot.
+ *
+ * Note that the kernel may be running at an address which is different
+ * from the address that it was linked at, so we must use RELOC/PTRRELOC
+ * to access static data (including strings). -- paulus
+ */
+notrace unsigned long __init early_init(unsigned long dt_ptr)
+{
+ unsigned long offset = reloc_offset();
+
+ /* First zero the BSS */
+ memset(PTRRELOC(&__bss_start), 0, __bss_stop - __bss_start);
+
+ /*
+ * Identify the CPU type and fix up code sections
+ * that depend on which cpu we have.
+ */
+ identify_cpu(offset, mfspr(SPRN_PVR));
+
+ apply_feature_fixups();
+
+ return KERNELBASE + offset;
+}
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index b61cfd29c76f..c18f3490a77e 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -36,15 +36,10 @@
#include <asm/asm-405.h>
#include <asm/feature-fixups.h>
#include <asm/barrier.h>
+#include <asm/kup.h>
+#include <asm/bug.h>
-/*
- * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
- */
-#if MSR_KERNEL >= 0x10000
-#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
-#else
-#define LOAD_MSR_KERNEL(r, x) li r,(x)
-#endif
+#include "head_32.h"
/*
* Align to 4k in order to ensure that all functions modyfing srr0/srr1
@@ -150,8 +145,8 @@ transfer_to_handler:
stw r12,_CTR(r11)
stw r2,_XER(r11)
mfspr r12,SPRN_SPRG_THREAD
- addi r2,r12,-THREAD
beq 2f /* if from user, fix up THREAD.regs */
+ addi r2, r12, -THREAD
addi r11,r1,STACK_FRAME_OVERHEAD
stw r11,PT_REGS(r12)
#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
@@ -161,6 +156,9 @@ transfer_to_handler:
andis. r12,r12,DBCR0_IDM@h
#endif
ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
+#ifdef CONFIG_PPC_BOOK3S_32
+ kuep_lock r11, r12
+#endif
#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
beq+ 3f
/* From user and task is ptraced - load up global dbcr0 */
@@ -186,6 +184,8 @@ transfer_to_handler:
2: /* if from kernel, check interrupted DOZE/NAP mode and
* check for stack overflow
*/
+ kuap_save_and_lock r11, r12, r9, r2, r0
+ addi r2, r12, -THREAD
lwz r9,KSP_LIMIT(r12)
cmplw r1,r9 /* if r1 <= ksp_limit */
ble- stack_ovf /* then the kernel stack overflowed */
@@ -207,26 +207,43 @@ transfer_to_handler_cont:
mtspr SPRN_NRI, r0
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
+ /*
+ * When tracing IRQ state (lockdep) we enable the MMU before we call
+ * the IRQ tracing functions as they might access vmalloc space or
+ * perform IOs for console output.
+ *
+ * To speed up the syscall path where interrupts stay on, let's check
+ * first if we are changing the MSR value at all.
+ */
+ tophys(r12, r1)
+ lwz r12,_MSR(r12)
+ andi. r12,r12,MSR_EE
+ bne 1f
+
+ /* MSR isn't changing, just transition directly */
+#endif
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r10
+ mtlr r9
+ SYNC
+ RFI /* jump to handler, enable MMU */
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+1: /* MSR is changing, re-enable MMU so we can notify lockdep. We need to
+ * keep interrupts disabled at this point otherwise we might risk
+ * taking an interrupt before we tell lockdep they are enabled.
+ */
lis r12,reenable_mmu@h
ori r12,r12,reenable_mmu@l
+ LOAD_MSR_KERNEL(r0, MSR_KERNEL)
mtspr SPRN_SRR0,r12
- mtspr SPRN_SRR1,r10
+ mtspr SPRN_SRR1,r0
SYNC
RFI
-reenable_mmu: /* re-enable mmu so we can */
- mfmsr r10
- lwz r12,_MSR(r1)
- xor r10,r10,r12
- andi. r10,r10,MSR_EE /* Did EE change? */
- beq 1f
+reenable_mmu:
/*
- * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
- * If from user mode there is only one stack frame on the stack, and
- * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
- * stack frame to make trace_hardirqs_off happy.
- *
- * This is handy because we also need to save a bunch of GPRs,
+ * We save a bunch of GPRs,
* r3 can be different from GPR3(r1) at this point, r9 and r11
* contains the old MSR and handler address respectively,
* r4 & r5 can contain page fault arguments that need to be passed
@@ -234,14 +251,19 @@ reenable_mmu: /* re-enable mmu so we can */
* they aren't useful past this point (aren't syscall arguments),
* the rest is restored from the exception frame.
*/
+
stwu r1,-32(r1)
stw r9,8(r1)
stw r11,12(r1)
stw r3,16(r1)
stw r4,20(r1)
stw r5,24(r1)
- bl trace_hardirqs_off
- lwz r5,24(r1)
+
+ /* If we are disabling interrupts (normal case), simply log it with
+ * lockdep
+ */
+1: bl trace_hardirqs_off
+2: lwz r5,24(r1)
lwz r4,20(r1)
lwz r3,16(r1)
lwz r11,12(r1)
@@ -251,15 +273,9 @@ reenable_mmu: /* re-enable mmu so we can */
lwz r6,GPR6(r1)
lwz r7,GPR7(r1)
lwz r8,GPR8(r1)
-1: mtctr r11
+ mtctr r11
mtlr r9
bctr /* jump to handler */
-#else /* CONFIG_TRACE_IRQFLAGS */
- mtspr SPRN_SRR0,r11
- mtspr SPRN_SRR1,r10
- mtlr r9
- SYNC
- RFI /* jump to handler, enable MMU */
#endif /* CONFIG_TRACE_IRQFLAGS */
#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
@@ -272,6 +288,7 @@ reenable_mmu: /* re-enable mmu so we can */
lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
rlwinm r9,r9,0,~MSR_EE
lwz r12,_LINK(r11) /* and return to address in LR */
+ kuap_restore r11, r2, r3, r4, r5
b fast_exception_return
#endif
@@ -301,6 +318,33 @@ stack_ovf:
SYNC
RFI
+#ifdef CONFIG_TRACE_IRQFLAGS
+trace_syscall_entry_irq_off:
+ /*
+ * Syscall shouldn't happen while interrupts are disabled,
+ * so let's do a warning here.
+ */
+0: trap
+ EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
+ bl trace_hardirqs_on
+
+ /* Now enable for real */
+ LOAD_MSR_KERNEL(r10, MSR_KERNEL | MSR_EE)
+ mtmsr r10
+
+ REST_GPR(0, r1)
+ REST_4GPRS(3, r1)
+ REST_2GPRS(7, r1)
+ b DoSyscall
+#endif /* CONFIG_TRACE_IRQFLAGS */
+
+ .globl transfer_to_syscall
+transfer_to_syscall:
+#ifdef CONFIG_TRACE_IRQFLAGS
+ andi. r12,r9,MSR_EE
+ beq- trace_syscall_entry_irq_off
+#endif /* CONFIG_TRACE_IRQFLAGS */
+
/*
* Handle a system call.
*/
@@ -312,33 +356,14 @@ _GLOBAL(DoSyscall)
stw r3,ORIG_GPR3(r1)
li r12,0
stw r12,RESULT(r1)
- lwz r11,_CCR(r1) /* Clear SO bit in CR */
- rlwinm r11,r11,0,4,2
- stw r11,_CCR(r1)
#ifdef CONFIG_TRACE_IRQFLAGS
- /* Return from syscalls can (and generally will) hard enable
- * interrupts. You aren't supposed to call a syscall with
- * interrupts disabled in the first place. However, to ensure
- * that we get it right vs. lockdep if it happens, we force
- * that hard enable here with appropriate tracing if we see
- * that we have been called with interrupts off
- */
+ /* Make sure interrupts are enabled */
mfmsr r11
andi. r12,r11,MSR_EE
- bne+ 1f
- /* We came in with interrupts disabled, we enable them now */
- bl trace_hardirqs_on
- mfmsr r11
- lwz r0,GPR0(r1)
- lwz r3,GPR3(r1)
- lwz r4,GPR4(r1)
- ori r11,r11,MSR_EE
- lwz r5,GPR5(r1)
- lwz r6,GPR6(r1)
- lwz r7,GPR7(r1)
- lwz r8,GPR8(r1)
- mtmsr r11
-1:
+ /* We came in with interrupts disabled, we WARN and mark them enabled
+ * for lockdep now */
+0: tweqi r12, 0
+ EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
#endif /* CONFIG_TRACE_IRQFLAGS */
lwz r11,TI_FLAGS(r2)
andi. r11,r11,_TIF_SYSCALL_DOTRACE
@@ -392,8 +417,7 @@ syscall_exit_cont:
lwz r8,_MSR(r1)
#ifdef CONFIG_TRACE_IRQFLAGS
/* If we are going to return from the syscall with interrupts
- * off, we trace that here. It shouldn't happen though but we
- * want to catch the bugger if it does right ?
+ * off, we trace that here. It shouldn't normally happen.
*/
andi. r10,r8,MSR_EE
bne+ 1f
@@ -422,12 +446,11 @@ BEGIN_FTR_SECTION
lwarx r7,0,r1
END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
stwcx. r0,0,r1 /* to clear the reservation */
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- andi. r4,r8,MSR_PR
- beq 3f
ACCOUNT_CPU_USER_EXIT(r2, r5, r7)
-3:
+#ifdef CONFIG_PPC_BOOK3S_32
+ kuep_unlock r5, r7
#endif
+ kuap_check r2, r4
lwz r4,_LINK(r1)
lwz r5,_CCR(r1)
mtlr r4
@@ -678,6 +701,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE)
stw r10,_CCR(r1)
stw r1,KSP(r3) /* Set old stack pointer */
+ kuap_check r2, r4
#ifdef CONFIG_SMP
/* We need a sync somewhere here to make sure that if the
* previous task gets rescheduled on another CPU, it sees all
@@ -820,6 +844,9 @@ restore_user:
bnel- load_dbcr0
#endif
ACCOUNT_CPU_USER_EXIT(r2, r10, r11)
+#ifdef CONFIG_PPC_BOOK3S_32
+ kuep_unlock r10, r11
+#endif
b restore
@@ -866,12 +893,12 @@ resume_kernel:
/* check current_thread_info->preempt_count */
lwz r0,TI_PREEMPT(r2)
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
- bne restore
+ bne restore_kuap
andi. r8,r8,_TIF_NEED_RESCHED
- beq+ restore
+ beq+ restore_kuap
lwz r3,_MSR(r1)
andi. r0,r3,MSR_EE /* interrupts off? */
- beq restore /* don't schedule if so */
+ beq restore_kuap /* don't schedule if so */
#ifdef CONFIG_TRACE_IRQFLAGS
/* Lockdep thinks irqs are enabled, we need to call
* preempt_schedule_irq with IRQs off, so we inform lockdep
@@ -879,10 +906,7 @@ resume_kernel:
*/
bl trace_hardirqs_off
#endif
-1: bl preempt_schedule_irq
- lwz r3,TI_FLAGS(r2)
- andi. r0,r3,_TIF_NEED_RESCHED
- bne- 1b
+ bl preempt_schedule_irq
#ifdef CONFIG_TRACE_IRQFLAGS
/* And now, to properly rebalance the above, we tell lockdep they
* are being turned back on, which will happen when we return
@@ -890,6 +914,8 @@ resume_kernel:
bl trace_hardirqs_on
#endif
#endif /* CONFIG_PREEMPT */
+restore_kuap:
+ kuap_restore r1, r2, r9, r10, r0
/* interrupts are hard-disabled at this point */
restore:
@@ -913,28 +939,14 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
* off in this assembly code while peeking at TI_FLAGS() and such. However
* we need to inform it if the exception turned interrupts off, and we
* are about to trun them back on.
- *
- * The problem here sadly is that we don't know whether the exceptions was
- * one that turned interrupts off or not. So we always tell lockdep about
- * turning them on here when we go back to wherever we came from with EE
- * on, even if that may meen some redudant calls being tracked. Maybe later
- * we could encode what the exception did somewhere or test the exception
- * type in the pt_regs but that sounds overkill
*/
andi. r10,r9,MSR_EE
beq 1f
- /*
- * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
- * which is the stack frame here, we need to force a stack frame
- * in case we came from user space.
- */
stwu r1,-32(r1)
mflr r0
stw r0,4(r1)
- stwu r1,-32(r1)
bl trace_hardirqs_on
- lwz r1,0(r1)
- lwz r1,0(r1)
+ addi r1, r1, 32
lwz r9,_MSR(r1)
1:
#endif /* CONFIG_TRACE_IRQFLAGS */
@@ -1197,6 +1209,7 @@ load_dbcr0:
.section .bss
.align 4
+ .global global_dbcr0
global_dbcr0:
.space 8*NR_CPUS
.previous
@@ -1207,9 +1220,10 @@ do_work: /* r10 contains MSR_KERNEL here */
beq do_user_signal
do_resched: /* r10 contains MSR_KERNEL here */
- /* Note: We don't need to inform lockdep that we are enabling
- * interrupts here. As far as it knows, they are already enabled
- */
+#ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_on
+ mfmsr r10
+#endif
ori r10,r10,MSR_EE
SYNC
MTMSRD(r10) /* hard-enable interrupts */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 15c67d2c0534..d978af78bf2a 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -46,6 +46,7 @@
#include <asm/exception-64e.h>
#endif
#include <asm/feature-fixups.h>
+#include <asm/kup.h>
/*
* System calls.
@@ -120,6 +121,9 @@ END_BTB_FLUSH_SECTION
addi r9,r1,STACK_FRAME_OVERHEAD
ld r11,exception_marker@toc(r2)
std r11,-16(r9) /* "regshere" marker */
+
+ kuap_check_amr r10, r11
+
#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
BEGIN_FW_FTR_SECTION
beq 33f
@@ -275,6 +279,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
andi. r6,r8,MSR_PR
ld r4,_LINK(r1)
+ kuap_check_amr r10, r11
+
#ifdef CONFIG_PPC_BOOK3S
/*
* Clear MSR_RI, MSR_EE is already and remains disabled. We could do
@@ -296,6 +302,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
std r8, PACATMSCRATCH(r13)
#endif
+ /*
+ * We don't need to restore AMR on the way back to userspace for KUAP.
+ * The value of AMR only matters while we're in the kernel.
+ */
ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
ld r2,GPR2(r1)
ld r1,GPR1(r1)
@@ -306,8 +316,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
RFI_TO_USER
b . /* prevent speculative execution */
- /* exit to kernel */
-1: ld r2,GPR2(r1)
+1: /* exit to kernel */
+ kuap_restore_amr r2
+
+ ld r2,GPR2(r1)
ld r1,GPR1(r1)
mtlr r4
mtcr r5
@@ -594,6 +606,8 @@ _GLOBAL(_switch)
std r23,_CCR(r1)
std r1,KSP(r3) /* Set old stack pointer */
+ kuap_check_amr r9, r10
+
FLUSH_COUNT_CACHE
/*
@@ -851,13 +865,7 @@ resume_kernel:
* sure we are soft-disabled first and reconcile irq state.
*/
RECONCILE_IRQ_STATE(r3,r4)
-1: bl preempt_schedule_irq
-
- /* Re-test flags and eventually loop */
- ld r9, PACA_THREAD_INFO(r13)
- ld r4,TI_FLAGS(r9)
- andi. r0,r4,_TIF_NEED_RESCHED
- bne 1b
+ bl preempt_schedule_irq
/*
* arch_local_irq_restore() from preempt_schedule_irq above may
@@ -942,6 +950,8 @@ fast_exception_return:
ld r4,_XER(r1)
mtspr SPRN_XER,r4
+ kuap_check_amr r5, r6
+
REST_8GPRS(5, r1)
andi. r0,r3,MSR_RI
@@ -974,6 +984,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
REST_GPR(13, r1)
+ /*
+ * We don't need to restore AMR on the way back to userspace for KUAP.
+ * The value of AMR only matters while we're in the kernel.
+ */
mtspr SPRN_SRR1,r3
ld r2,_CCR(r1)
@@ -1006,6 +1020,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
ld r0,GPR0(r1)
ld r2,GPR2(r1)
ld r3,GPR3(r1)
+
+ kuap_restore_amr r4
+
ld r4,GPR4(r1)
ld r1,GPR1(r1)
RFI_TO_KERNEL
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 9481a117e242..6b86055e5251 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -19,6 +19,7 @@
#include <asm/cpuidle.h>
#include <asm/head-64.h>
#include <asm/feature-fixups.h>
+#include <asm/kup.h>
/*
* There are a few constraints to be concerned with.
@@ -120,7 +121,9 @@ EXC_VIRT_NONE(0x4000, 0x100)
mfspr r10,SPRN_SRR1 ; \
rlwinm. r10,r10,47-31,30,31 ; \
beq- 1f ; \
- cmpwi cr3,r10,2 ; \
+ cmpwi cr1,r10,2 ; \
+ mfspr r3,SPRN_SRR1 ; \
+ bltlr cr1 ; /* no state loss, return to idle caller */ \
BRANCH_TO_C000(r10, system_reset_idle_common) ; \
1: \
KVMTEST_PR(n) ; \
@@ -144,8 +147,11 @@ TRAMP_KVM(PACA_EXNMI, 0x100)
#ifdef CONFIG_PPC_P7_NAP
EXC_COMMON_BEGIN(system_reset_idle_common)
- mfspr r12,SPRN_SRR1
- b pnv_powersave_wakeup
+ /*
+ * This must be a direct branch (without linker branch stub) because
+ * we can not use TOC at this point as r2 may not be restored yet.
+ */
+ b idle_return_gpr_loss
#endif
/*
@@ -309,6 +315,7 @@ TRAMP_REAL_BEGIN(machine_check_common_early)
mfspr r11,SPRN_DSISR /* Save DSISR */
std r11,_DSISR(r1)
std r9,_CCR(r1) /* Save CR in stackframe */
+ kuap_save_amr_and_lock r9, r10, cr1
/* Save r9 through r13 from EXMC save area to stack frame. */
EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
mfmsr r11 /* get MSR value */
@@ -427,17 +434,17 @@ EXC_COMMON_BEGIN(machine_check_idle_common)
* Then decrement MCE nesting after finishing with the stack.
*/
ld r3,_MSR(r1)
+ ld r4,_LINK(r1)
lhz r11,PACA_IN_MCE(r13)
subi r11,r11,1
sth r11,PACA_IN_MCE(r13)
- /* Turn off the RI bit because SRR1 is used by idle wakeup code. */
- /* Recoverability could be improved by reducing the use of SRR1. */
- li r11,0
- mtmsrd r11,1
-
- b pnv_powersave_wakeup_mce
+ mtlr r4
+ rlwinm r10,r3,47-31,30,31
+ cmpwi cr1,r10,2
+ bltlr cr1 /* no state loss, return to idle caller */
+ b idle_return_gpr_loss
#endif
/*
* Handle machine check early in real mode. We come here with
@@ -1109,6 +1116,7 @@ TRAMP_REAL_BEGIN(hmi_exception_early)
mfspr r11,SPRN_HSRR0 /* Save HSRR0 */
mfspr r12,SPRN_HSRR1 /* Save HSRR1 */
EXCEPTION_PROLOG_COMMON_1()
+ /* We don't touch AMR here, we never go to virtual mode */
EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
EXCEPTION_PROLOG_COMMON_3(0xe60)
addi r3,r1,STACK_FRAME_OVERHEAD
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 45a8d0be1c96..25f063f56ec5 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -36,6 +36,7 @@
#include <linux/sysfs.h>
#include <linux/slab.h>
#include <linux/cma.h>
+#include <linux/hugetlb.h>
#include <asm/debugfs.h>
#include <asm/page.h>
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 529dcc21c3f9..cecd57e1d046 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -63,6 +63,7 @@ _GLOBAL(load_fp_state)
REST_32FPVSRS(0, R4, R3)
blr
EXPORT_SYMBOL(load_fp_state)
+_ASM_NOKPROBE_SYMBOL(load_fp_state); /* used by restore_math */
/*
* Store FP state into memory, including FPSCR
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index e25b615e9f9e..755fab9641d6 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -37,6 +37,8 @@
#include <asm/export.h>
#include <asm/feature-fixups.h>
+#include "head_32.h"
+
/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
#define LOAD_BAT(n, reg, RA, RB) \
/* see the comment for clear_bats() -- Cort */ \
@@ -160,6 +162,10 @@ __after_mmu_off:
bl flush_tlbs
bl initial_bats
+ bl load_segment_registers
+#ifdef CONFIG_KASAN
+ bl early_hash_table
+#endif
#if defined(CONFIG_BOOTX_TEXT)
bl setup_disp_bat
#endif
@@ -205,7 +211,7 @@ __after_mmu_off:
*/
turn_on_mmu:
mfmsr r0
- ori r0,r0,MSR_DR|MSR_IR
+ ori r0,r0,MSR_DR|MSR_IR|MSR_RI
mtspr SPRN_SRR1,r0
lis r0,start_here@h
ori r0,r0,start_here@l
@@ -242,103 +248,6 @@ __secondary_hold_spinloop:
__secondary_hold_acknowledge:
.long -1
-/*
- * Exception entry code. This code runs with address translation
- * turned off, i.e. using physical addresses.
- * We assume sprg3 has the physical address of the current
- * task's thread_struct.
- */
-#define EXCEPTION_PROLOG \
- mtspr SPRN_SPRG_SCRATCH0,r10; \
- mtspr SPRN_SPRG_SCRATCH1,r11; \
- mfcr r10; \
- EXCEPTION_PROLOG_1; \
- EXCEPTION_PROLOG_2
-
-#define EXCEPTION_PROLOG_1 \
- mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \
- andi. r11,r11,MSR_PR; \
- tophys(r11,r1); /* use tophys(r1) if kernel */ \
- beq 1f; \
- mfspr r11,SPRN_SPRG_THREAD; \
- lwz r11,TASK_STACK-THREAD(r11); \
- addi r11,r11,THREAD_SIZE; \
- tophys(r11,r11); \
-1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */
-
-
-#define EXCEPTION_PROLOG_2 \
- stw r10,_CCR(r11); /* save registers */ \
- stw r12,GPR12(r11); \
- stw r9,GPR9(r11); \
- mfspr r10,SPRN_SPRG_SCRATCH0; \
- stw r10,GPR10(r11); \
- mfspr r12,SPRN_SPRG_SCRATCH1; \
- stw r12,GPR11(r11); \
- mflr r10; \
- stw r10,_LINK(r11); \
- mfspr r12,SPRN_SRR0; \
- mfspr r9,SPRN_SRR1; \
- stw r1,GPR1(r11); \
- stw r1,0(r11); \
- tovirt(r1,r11); /* set new kernel sp */ \
- li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
- MTMSRD(r10); /* (except for mach check in rtas) */ \
- stw r0,GPR0(r11); \
- lis r10,STACK_FRAME_REGS_MARKER@ha; /* exception frame marker */ \
- addi r10,r10,STACK_FRAME_REGS_MARKER@l; \
- stw r10,8(r11); \
- SAVE_4GPRS(3, r11); \
- SAVE_2GPRS(7, r11)
-
-/*
- * Note: code which follows this uses cr0.eq (set if from kernel),
- * r11, r12 (SRR0), and r9 (SRR1).
- *
- * Note2: once we have set r1 we are in a position to take exceptions
- * again, and we could thus set MSR:RI at that point.
- */
-
-/*
- * Exception vectors.
- */
-#define EXCEPTION(n, label, hdlr, xfer) \
- . = n; \
- DO_KVM n; \
-label: \
- EXCEPTION_PROLOG; \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- xfer(n, hdlr)
-
-#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \
- li r10,trap; \
- stw r10,_TRAP(r11); \
- li r10,MSR_KERNEL; \
- copyee(r10, r9); \
- bl tfer; \
-i##n: \
- .long hdlr; \
- .long ret
-
-#define COPY_EE(d, s) rlwimi d,s,0,16,16
-#define NOCOPY(d, s)
-
-#define EXC_XFER_STD(n, hdlr) \
- EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \
- ret_from_except_full)
-
-#define EXC_XFER_LITE(n, hdlr) \
- EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
- ret_from_except)
-
-#define EXC_XFER_EE(n, hdlr) \
- EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
- ret_from_except_full)
-
-#define EXC_XFER_EE_LITE(n, hdlr) \
- EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
- ret_from_except)
-
/* System reset */
/* core99 pmac starts the seconary here by changing the vector, and
putting it back to what it was (unknown_exception) when done. */
@@ -387,7 +296,11 @@ DataAccess:
EXCEPTION_PROLOG
mfspr r10,SPRN_DSISR
stw r10,_DSISR(r11)
+#ifdef CONFIG_PPC_KUAP
+ andis. r0,r10,(DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
+#else
andis. r0,r10,(DSISR_BAD_FAULT_32S|DSISR_DABRMATCH)@h
+#endif
bne 1f /* if not, try to put a PTE */
mfspr r4,SPRN_DAR /* into the hash table */
rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */
@@ -428,7 +341,7 @@ Alignment:
mfspr r5,SPRN_DSISR
stw r5,_DSISR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_EE(0x600, alignment_exception)
+ EXC_XFER_STD(0x600, alignment_exception)
/* Program check exception */
EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
@@ -449,24 +362,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
bl load_up_fpu /* if from user, just load it up */
b fast_exception_return
1: addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
+ EXC_XFER_LITE(0x800, kernel_fp_unavailable_exception)
/* Decrementer */
EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
- EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_STD)
/* System call */
. = 0xc00
DO_KVM 0xc00
SystemCall:
- EXCEPTION_PROLOG
- EXC_XFER_EE_LITE(0xc00, DoSyscall)
+ SYSCALL_ENTRY 0xc00
/* Single step - not used on 601 */
EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
- EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD)
/*
* The Altivec unavailable trap is at 0x0f20. Foo.
@@ -522,9 +434,9 @@ InstructionTLBMiss:
andc. r1,r1,r0 /* check access & ~permission */
bne- InstructionAddressInvalid /* return if access not permitted */
/* Convert linux-style PTE to low word of PPC-style PTE */
- rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
- ori r1, r1, 0xe05 /* clear out reserved bits */
- andc r1, r0, r1 /* PP = user? 2 : 0 */
+ rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
+ ori r1, r1, 0xe06 /* clear out reserved bits */
+ andc r1, r0, r1 /* PP = user? 1 : 0 */
BEGIN_FTR_SECTION
rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
@@ -590,11 +502,11 @@ DataLoadTLBMiss:
* we would need to update the pte atomically with lwarx/stwcx.
*/
/* Convert linux-style PTE to low word of PPC-style PTE */
- rlwinm r1,r0,32-10,31,31 /* _PAGE_RW -> PP lsb */
+ rlwinm r1,r0,32-9,30,30 /* _PAGE_RW -> PP msb */
rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
ori r1,r1,0xe04 /* clear out reserved bits */
- andc r1,r0,r1 /* PP = user? rw? 2: 3: 0 */
+ andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */
BEGIN_FTR_SECTION
rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
@@ -670,9 +582,9 @@ DataStoreTLBMiss:
* we would need to update the pte atomically with lwarx/stwcx.
*/
/* Convert linux-style PTE to low word of PPC-style PTE */
- rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
- li r1,0xe05 /* clear out reserved bits & PP lsb */
- andc r1,r0,r1 /* PP = user? 2: 0 */
+ rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
+ li r1,0xe06 /* clear out reserved bits & PP msb */
+ andc r1,r0,r1 /* PP = user? 1: 0 */
BEGIN_FTR_SECTION
rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
@@ -698,35 +610,35 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
#define altivec_assist_exception unknown_exception
#endif
- EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_EE)
- EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE)
- EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_EE)
+ EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD)
+ EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_STD)
+ EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_STD)
EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
- EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE)
- EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x2f00, Trap_2f, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_STD)
+ EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x2f00, Trap_2f, unknown_exception, EXC_XFER_STD)
. = 0x3000
@@ -738,7 +650,7 @@ AltiVecUnavailable:
b fast_exception_return
#endif /* CONFIG_ALTIVEC */
1: addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
+ EXC_XFER_LITE(0xf20, altivec_unavailable_exception)
PerformanceMonitor:
EXCEPTION_PROLOG
@@ -880,11 +792,24 @@ _ENTRY(__restore_cpu_setup)
blr
#endif /* !defined(CONFIG_PPC_BOOK3S_32) */
-
/*
* Load stuff into the MMU. Intended to be called with
* IR=0 and DR=0.
*/
+#ifdef CONFIG_KASAN
+early_hash_table:
+ sync /* Force all PTE updates to finish */
+ isync
+ tlbia /* Clear all TLB entries */
+ sync /* wait for tlbia/tlbie to finish */
+ TLBSYNC /* ... on all CPUs */
+ /* Load the SDR1 register (hash table base & size) */
+ lis r6, early_hash - PAGE_OFFSET@h
+ ori r6, r6, 3 /* 256kB table */
+ mtspr SPRN_SDR1, r6
+ blr
+#endif
+
load_up_mmu:
sync /* Force all PTE updates to finish */
isync
@@ -896,14 +821,6 @@ load_up_mmu:
tophys(r6,r6)
lwz r6,_SDR1@l(r6)
mtspr SPRN_SDR1,r6
- li r0,16 /* load up segment register values */
- mtctr r0 /* for context 0 */
- lis r3,0x2000 /* Ku = 1, VSID = 0 */
- li r4,0
-3: mtsrin r3,r4
- addi r3,r3,0x111 /* increment VSID */
- addis r4,r4,0x1000 /* address of next segment */
- bdnz 3b
/* Load the BAT registers with the values set up by MMU_init.
MMU_init takes care of whether we're on a 601 or not. */
@@ -925,6 +842,32 @@ BEGIN_MMU_FTR_SECTION
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
blr
+load_segment_registers:
+ li r0, NUM_USER_SEGMENTS /* load up user segment register values */
+ mtctr r0 /* for context 0 */
+ li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
+#ifdef CONFIG_PPC_KUEP
+ oris r3, r3, SR_NX@h /* Set Nx */
+#endif
+#ifdef CONFIG_PPC_KUAP
+ oris r3, r3, SR_KS@h /* Set Ks */
+#endif
+ li r4, 0
+3: mtsrin r3, r4
+ addi r3, r3, 0x111 /* increment VSID */
+ addis r4, r4, 0x1000 /* address of next segment */
+ bdnz 3b
+ li r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
+ mtctr r0 /* for context 0 */
+ rlwinm r3, r3, 0, ~SR_NX /* Nx = 0 */
+ rlwinm r3, r3, 0, ~SR_KS /* Ks = 0 */
+ oris r3, r3, SR_KP@h /* Kp = 1 */
+3: mtsrin r3, r4
+ addi r3, r3, 0x111 /* increment VSID */
+ addis r4, r4, 0x1000 /* address of next segment */
+ bdnz 3b
+ blr
+
/*
* This is where the main kernel code starts.
*/
@@ -950,11 +893,17 @@ start_here:
* Do early platform-specific initialization,
* and set up the MMU.
*/
+#ifdef CONFIG_KASAN
+ bl kasan_early_init
+#endif
li r3,0
mr r4,r31
bl machine_init
bl __save_cpu_setup
bl MMU_init
+BEGIN_MMU_FTR_SECTION
+ bl MMU_init_hw_patch
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
/*
* Go back to running unmapped so we can load up new values
@@ -1006,7 +955,12 @@ _ENTRY(switch_mmu_context)
blt- 4f
mulli r3,r3,897 /* multiply context by skew factor */
rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
- addis r3,r3,0x6000 /* Set Ks, Ku bits */
+#ifdef CONFIG_PPC_KUEP
+ oris r3, r3, SR_NX@h /* Set Nx */
+#endif
+#ifdef CONFIG_PPC_KUAP
+ oris r3, r3, SR_KS@h /* Set Ks */
+#endif
li r0,NUM_USER_SEGMENTS
mtctr r0
diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
new file mode 100644
index 000000000000..4a692553651f
--- /dev/null
+++ b/arch/powerpc/kernel/head_32.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __HEAD_32_H__
+#define __HEAD_32_H__
+
+#include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */
+
+/*
+ * MSR_KERNEL is > 0x8000 on 4xx/Book-E since it include MSR_CE.
+ */
+.macro __LOAD_MSR_KERNEL r, x
+.if \x >= 0x8000
+ lis \r, (\x)@h
+ ori \r, \r, (\x)@l
+.else
+ li \r, (\x)
+.endif
+.endm
+#define LOAD_MSR_KERNEL(r, x) __LOAD_MSR_KERNEL r, x
+
+/*
+ * Exception entry code. This code runs with address translation
+ * turned off, i.e. using physical addresses.
+ * We assume sprg3 has the physical address of the current
+ * task's thread_struct.
+ */
+
+.macro EXCEPTION_PROLOG
+ mtspr SPRN_SPRG_SCRATCH0,r10
+ mtspr SPRN_SPRG_SCRATCH1,r11
+ mfcr r10
+ EXCEPTION_PROLOG_1
+ EXCEPTION_PROLOG_2
+.endm
+
+.macro EXCEPTION_PROLOG_1
+ mfspr r11,SPRN_SRR1 /* check whether user or kernel */
+ andi. r11,r11,MSR_PR
+ tophys(r11,r1) /* use tophys(r1) if kernel */
+ beq 1f
+ mfspr r11,SPRN_SPRG_THREAD
+ lwz r11,TASK_STACK-THREAD(r11)
+ addi r11,r11,THREAD_SIZE
+ tophys(r11,r11)
+1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */
+.endm
+
+.macro EXCEPTION_PROLOG_2
+ stw r10,_CCR(r11) /* save registers */
+ stw r12,GPR12(r11)
+ stw r9,GPR9(r11)
+ mfspr r10,SPRN_SPRG_SCRATCH0
+ stw r10,GPR10(r11)
+ mfspr r12,SPRN_SPRG_SCRATCH1
+ stw r12,GPR11(r11)
+ mflr r10
+ stw r10,_LINK(r11)
+ mfspr r12,SPRN_SRR0
+ mfspr r9,SPRN_SRR1
+ stw r1,GPR1(r11)
+ stw r1,0(r11)
+ tovirt(r1,r11) /* set new kernel sp */
+#ifdef CONFIG_40x
+ rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
+#else
+ li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR) /* can take exceptions */
+ MTMSRD(r10) /* (except for mach check in rtas) */
+#endif
+ stw r0,GPR0(r11)
+ lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
+ addi r10,r10,STACK_FRAME_REGS_MARKER@l
+ stw r10,8(r11)
+ SAVE_4GPRS(3, r11)
+ SAVE_2GPRS(7, r11)
+.endm
+
+.macro SYSCALL_ENTRY trapno
+ mfspr r12,SPRN_SPRG_THREAD
+ mfcr r10
+ lwz r11,TASK_STACK-THREAD(r12)
+ mflr r9
+ addi r11,r11,THREAD_SIZE - INT_FRAME_SIZE
+ rlwinm r10,r10,0,4,2 /* Clear SO bit in CR */
+ tophys(r11,r11)
+ stw r10,_CCR(r11) /* save registers */
+ mfspr r10,SPRN_SRR0
+ stw r9,_LINK(r11)
+ mfspr r9,SPRN_SRR1
+ stw r1,GPR1(r11)
+ stw r1,0(r11)
+ tovirt(r1,r11) /* set new kernel sp */
+ stw r10,_NIP(r11)
+#ifdef CONFIG_40x
+ rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
+#else
+ LOAD_MSR_KERNEL(r10, MSR_KERNEL & ~(MSR_IR|MSR_DR)) /* can take exceptions */
+ MTMSRD(r10) /* (except for mach check in rtas) */
+#endif
+ lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
+ stw r2,GPR2(r11)
+ addi r10,r10,STACK_FRAME_REGS_MARKER@l
+ stw r9,_MSR(r11)
+ li r2, \trapno + 1
+ stw r10,8(r11)
+ stw r2,_TRAP(r11)
+ SAVE_GPR(0, r11)
+ SAVE_4GPRS(3, r11)
+ SAVE_2GPRS(7, r11)
+ addi r11,r1,STACK_FRAME_OVERHEAD
+ addi r2,r12,-THREAD
+ stw r11,PT_REGS(r12)
+#if defined(CONFIG_40x)
+ /* Check to see if the dbcr0 register is set up to debug. Use the
+ internal debug mode bit to do this. */
+ lwz r12,THREAD_DBCR0(r12)
+ andis. r12,r12,DBCR0_IDM@h
+#endif
+ ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
+#if defined(CONFIG_40x)
+ beq+ 3f
+ /* From user and task is ptraced - load up global dbcr0 */
+ li r12,-1 /* clear all pending debug events */
+ mtspr SPRN_DBSR,r12
+ lis r11,global_dbcr0@ha
+ tophys(r11,r11)
+ addi r11,r11,global_dbcr0@l
+ lwz r12,0(r11)
+ mtspr SPRN_DBCR0,r12
+ lwz r12,4(r11)
+ addi r12,r12,-1
+ stw r12,4(r11)
+#endif
+
+3:
+ tovirt(r2, r2) /* set r2 to current */
+ lis r11, transfer_to_syscall@h
+ ori r11, r11, transfer_to_syscall@l
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /*
+ * If MSR is changing we need to keep interrupts disabled at this point
+ * otherwise we might risk taking an interrupt before we tell lockdep
+ * they are enabled.
+ */
+ LOAD_MSR_KERNEL(r10, MSR_KERNEL)
+ rlwimi r10, r9, 0, MSR_EE
+#else
+ LOAD_MSR_KERNEL(r10, MSR_KERNEL | MSR_EE)
+#endif
+#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
+ mtspr SPRN_NRI, r0
+#endif
+ mtspr SPRN_SRR1,r10
+ mtspr SPRN_SRR0,r11
+ SYNC
+ RFI /* jump to handler, enable MMU */
+.endm
+
+/*
+ * Note: code which follows this uses cr0.eq (set if from kernel),
+ * r11, r12 (SRR0), and r9 (SRR1).
+ *
+ * Note2: once we have set r1 we are in a position to take exceptions
+ * again, and we could thus set MSR:RI at that point.
+ */
+
+/*
+ * Exception vectors.
+ */
+#ifdef CONFIG_PPC_BOOK3S
+#define START_EXCEPTION(n, label) \
+ . = n; \
+ DO_KVM n; \
+label:
+
+#else
+#define START_EXCEPTION(n, label) \
+ . = n; \
+label:
+
+#endif
+
+#define EXCEPTION(n, label, hdlr, xfer) \
+ START_EXCEPTION(n, label) \
+ EXCEPTION_PROLOG; \
+ addi r3,r1,STACK_FRAME_OVERHEAD; \
+ xfer(n, hdlr)
+
+#define EXC_XFER_TEMPLATE(hdlr, trap, msr, tfer, ret) \
+ li r10,trap; \
+ stw r10,_TRAP(r11); \
+ LOAD_MSR_KERNEL(r10, msr); \
+ bl tfer; \
+ .long hdlr; \
+ .long ret
+
+#define EXC_XFER_STD(n, hdlr) \
+ EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, transfer_to_handler_full, \
+ ret_from_except_full)
+
+#define EXC_XFER_LITE(n, hdlr) \
+ EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
+ ret_from_except)
+
+#endif /* __HEAD_32_H__ */
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index a9c934f2319b..cf54b784100d 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -44,6 +44,8 @@
#include <asm/export.h>
#include <asm/asm-405.h>
+#include "head_32.h"
+
/* As with the other PowerPC ports, it is expected that when code
* execution begins here, the following registers contain valid, yet
* optional, information:
@@ -99,46 +101,6 @@ _ENTRY(saved_ksp_limit)
.space 4
/*
- * Exception vector entry code. This code runs with address translation
- * turned off (i.e. using physical addresses). We assume SPRG_THREAD has
- * the physical address of the current task thread_struct.
- * Note that we have to have decremented r1 before we write to any fields
- * of the exception frame, since a critical interrupt could occur at any
- * time, and it will write to the area immediately below the current r1.
- */
-#define NORMAL_EXCEPTION_PROLOG \
- mtspr SPRN_SPRG_SCRATCH0,r10; /* save two registers to work with */\
- mtspr SPRN_SPRG_SCRATCH1,r11; \
- mtspr SPRN_SPRG_SCRATCH2,r1; \
- mfcr r10; /* save CR in r10 for now */\
- mfspr r11,SPRN_SRR1; /* check whether user or kernel */\
- andi. r11,r11,MSR_PR; \
- beq 1f; \
- mfspr r1,SPRN_SPRG_THREAD; /* if from user, start at top of */\
- lwz r1,TASK_STACK-THREAD(r1); /* this thread's kernel stack */\
- addi r1,r1,THREAD_SIZE; \
-1: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\
- tophys(r11,r1); \
- stw r10,_CCR(r11); /* save various registers */\
- stw r12,GPR12(r11); \
- stw r9,GPR9(r11); \
- mfspr r10,SPRN_SPRG_SCRATCH0; \
- stw r10,GPR10(r11); \
- mfspr r12,SPRN_SPRG_SCRATCH1; \
- stw r12,GPR11(r11); \
- mflr r10; \
- stw r10,_LINK(r11); \
- mfspr r10,SPRN_SPRG_SCRATCH2; \
- mfspr r12,SPRN_SRR0; \
- stw r10,GPR1(r11); \
- mfspr r9,SPRN_SRR1; \
- stw r10,0(r11); \
- rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
- stw r0,GPR0(r11); \
- SAVE_4GPRS(3, r11); \
- SAVE_2GPRS(7, r11)
-
-/*
* Exception prolog for critical exceptions. This is a little different
* from the normal exception prolog above since a critical exception
* can potentially occur at any point during normal exception processing.
@@ -177,6 +139,9 @@ _ENTRY(saved_ksp_limit)
tovirt(r1,r11); \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
stw r0,GPR0(r11); \
+ lis r10, STACK_FRAME_REGS_MARKER@ha; /* exception frame marker */\
+ addi r10, r10, STACK_FRAME_REGS_MARKER@l; \
+ stw r10, 8(r11); \
SAVE_4GPRS(3, r11); \
SAVE_2GPRS(7, r11)
@@ -196,53 +161,12 @@ _ENTRY(saved_ksp_limit)
/*
* Exception vectors.
*/
-#define START_EXCEPTION(n, label) \
- . = n; \
-label:
-
-#define EXCEPTION(n, label, hdlr, xfer) \
- START_EXCEPTION(n, label); \
- NORMAL_EXCEPTION_PROLOG; \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- xfer(n, hdlr)
-
#define CRITICAL_EXCEPTION(n, label, hdlr) \
START_EXCEPTION(n, label); \
CRITICAL_EXCEPTION_PROLOG; \
addi r3,r1,STACK_FRAME_OVERHEAD; \
EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
- NOCOPY, crit_transfer_to_handler, \
- ret_from_crit_exc)
-
-#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret) \
- li r10,trap; \
- stw r10,_TRAP(r11); \
- lis r10,msr@h; \
- ori r10,r10,msr@l; \
- copyee(r10, r9); \
- bl tfer; \
- .long hdlr; \
- .long ret
-
-#define COPY_EE(d, s) rlwimi d,s,0,16,16
-#define NOCOPY(d, s)
-
-#define EXC_XFER_STD(n, hdlr) \
- EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \
- ret_from_except_full)
-
-#define EXC_XFER_LITE(n, hdlr) \
- EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
- ret_from_except)
-
-#define EXC_XFER_EE(n, hdlr) \
- EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \
- ret_from_except_full)
-
-#define EXC_XFER_EE_LITE(n, hdlr) \
- EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \
- ret_from_except)
-
+ crit_transfer_to_handler, ret_from_crit_exc)
/*
* 0x0100 - Critical Interrupt Exception
@@ -393,7 +317,7 @@ label:
* This is caused by a fetch from non-execute or guarded pages.
*/
START_EXCEPTION(0x0400, InstructionAccess)
- NORMAL_EXCEPTION_PROLOG
+ EXCEPTION_PROLOG
mr r4,r12 /* Pass SRR0 as arg2 */
li r5,0 /* Pass zero as arg3 */
EXC_XFER_LITE(0x400, handle_page_fault)
@@ -403,33 +327,32 @@ label:
/* 0x0600 - Alignment Exception */
START_EXCEPTION(0x0600, Alignment)
- NORMAL_EXCEPTION_PROLOG
+ EXCEPTION_PROLOG
mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */
stw r4,_DEAR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_EE(0x600, alignment_exception)
+ EXC_XFER_STD(0x600, alignment_exception)
/* 0x0700 - Program Exception */
START_EXCEPTION(0x0700, ProgramCheck)
- NORMAL_EXCEPTION_PROLOG
+ EXCEPTION_PROLOG
mfspr r4,SPRN_ESR /* Grab the ESR and save it */
stw r4,_ESR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_STD(0x700, program_check_exception)
- EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_STD)
/* 0x0C00 - System Call Exception */
START_EXCEPTION(0x0C00, SystemCall)
- NORMAL_EXCEPTION_PROLOG
- EXC_XFER_EE_LITE(0xc00, DoSyscall)
+ SYSCALL_ENTRY 0xc00
- EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_STD)
/* 0x1000 - Programmable Interval Timer (PIT) Exception */
. = 0x1000
@@ -646,25 +569,25 @@ label:
mfspr r10, SPRN_SPRG_SCRATCH0
b InstructionAccess
- EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_STD)
#ifdef CONFIG_IBM405_ERR51
/* 405GP errata 51 */
START_EXCEPTION(0x1700, Trap_17)
b DTLBMiss
#else
- EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_STD)
#endif
- EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_STD)
/* Check for a single step debug exception while in an exception
* handler before state has been saved. This is to catch the case
@@ -726,11 +649,11 @@ label:
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_TEMPLATE(DebugException, 0x2002, \
(MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
- NOCOPY, crit_transfer_to_handler, ret_from_crit_exc)
+ crit_transfer_to_handler, ret_from_crit_exc)
/* Programmable Interval Timer (PIT) Exception. (from 0x1000) */
Decrementer:
- NORMAL_EXCEPTION_PROLOG
+ EXCEPTION_PROLOG
lis r0,TSR_PIS@h
mtspr SPRN_TSR,r0 /* Clear the PIT exception */
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -738,9 +661,9 @@ Decrementer:
/* Fixed Interval Timer (FIT) Exception. (from 0x1010) */
FITException:
- NORMAL_EXCEPTION_PROLOG
+ EXCEPTION_PROLOG
addi r3,r1,STACK_FRAME_OVERHEAD;
- EXC_XFER_EE(0x1010, unknown_exception)
+ EXC_XFER_STD(0x1010, unknown_exception)
/* Watchdog Timer (WDT) Exception. (from 0x1020) */
WDTException:
@@ -748,15 +671,14 @@ WDTException:
addi r3,r1,STACK_FRAME_OVERHEAD;
EXC_XFER_TEMPLATE(WatchdogException, 0x1020+2,
(MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)),
- NOCOPY, crit_transfer_to_handler,
- ret_from_crit_exc)
+ crit_transfer_to_handler, ret_from_crit_exc)
/*
* The other Data TLB exceptions bail out to this point
* if they can't resolve the lightweight TLB fault.
*/
DataAccess:
- NORMAL_EXCEPTION_PROLOG
+ EXCEPTION_PROLOG
mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
stw r5,_ESR(r11)
mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
@@ -848,6 +770,9 @@ start_here:
/*
* Decide what sort of machine this is and initialize the MMU.
*/
+#ifdef CONFIG_KASAN
+ bl kasan_early_init
+#endif
li r3,0
mr r4,r31
bl machine_init
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 37117ab11584..f15fba58c744 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -203,6 +203,9 @@ _ENTRY(_start);
/*
* Decide what sort of machine this is and initialize the MMU.
*/
+#ifdef CONFIG_KASAN
+ bl kasan_early_init
+#endif
li r3,0
mr r4,r31
bl machine_init
@@ -278,16 +281,15 @@ interrupt_base:
FP_UNAVAILABLE_EXCEPTION
#else
EXCEPTION(0x2010, BOOKE_INTERRUPT_FP_UNAVAIL, \
- FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
+ FloatingPointUnavailable, unknown_exception, EXC_XFER_STD)
#endif
/* System Call Interrupt */
START_EXCEPTION(SystemCall)
- NORMAL_EXCEPTION_PROLOG(BOOKE_INTERRUPT_SYSCALL)
- EXC_XFER_EE_LITE(0x0c00, DoSyscall)
+ SYSCALL_ENTRY 0xc00 BOOKE_INTERRUPT_SYSCALL
/* Auxiliary Processor Unavailable Interrupt */
EXCEPTION(0x2020, BOOKE_INTERRUPT_AP_UNAVAIL, \
- AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
+ AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_STD)
/* Decrementer Interrupt */
DECREMENTER_EXCEPTION
@@ -295,7 +297,7 @@ interrupt_base:
/* Fixed Internal Timer Interrupt */
/* TODO: Add FIT support */
EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, \
- unknown_exception, EXC_XFER_EE)
+ unknown_exception, EXC_XFER_STD)
/* Watchdog Timer Interrupt */
/* TODO: Add watchdog support */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 3fad8d499767..5321a11c2835 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -968,7 +968,9 @@ start_here_multiplatform:
/* Restore parameters passed from prom_init/kexec */
mr r3,r31
- bl early_setup /* also sets r13 and SPRG_PACA */
+ LOAD_REG_ADDR(r12, DOTSYM(early_setup))
+ mtctr r12
+ bctrl /* also sets r13 and SPRG_PACA */
LOAD_REG_ADDR(r3, start_here_common)
ld r4,PACAKMSR(r13)
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 03c73b4c6435..885be7f3d29a 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -33,6 +33,8 @@
#include <asm/export.h>
#include <asm/code-patching-asm.h>
+#include "head_32.h"
+
#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000
/* By simply checking Address >= 0x80000000, we know if its a kernel address */
#define SIMPLE_KERNEL_ADDRESS 1
@@ -123,102 +125,6 @@ instruction_counter:
.space 4
#endif
-/*
- * Exception entry code. This code runs with address translation
- * turned off, i.e. using physical addresses.
- * We assume sprg3 has the physical address of the current
- * task's thread_struct.
- */
-#define EXCEPTION_PROLOG \
- mtspr SPRN_SPRG_SCRATCH0, r10; \
- mtspr SPRN_SPRG_SCRATCH1, r11; \
- mfcr r10; \
- EXCEPTION_PROLOG_1; \
- EXCEPTION_PROLOG_2
-
-#define EXCEPTION_PROLOG_1 \
- mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \
- andi. r11,r11,MSR_PR; \
- tophys(r11,r1); /* use tophys(r1) if kernel */ \
- beq 1f; \
- mfspr r11,SPRN_SPRG_THREAD; \
- lwz r11,TASK_STACK-THREAD(r11); \
- addi r11,r11,THREAD_SIZE; \
- tophys(r11,r11); \
-1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */
-
-
-#define EXCEPTION_PROLOG_2 \
- stw r10,_CCR(r11); /* save registers */ \
- stw r12,GPR12(r11); \
- stw r9,GPR9(r11); \
- mfspr r10,SPRN_SPRG_SCRATCH0; \
- stw r10,GPR10(r11); \
- mfspr r12,SPRN_SPRG_SCRATCH1; \
- stw r12,GPR11(r11); \
- mflr r10; \
- stw r10,_LINK(r11); \
- mfspr r12,SPRN_SRR0; \
- mfspr r9,SPRN_SRR1; \
- stw r1,GPR1(r11); \
- stw r1,0(r11); \
- tovirt(r1,r11); /* set new kernel sp */ \
- li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
- mtmsr r10; \
- stw r0,GPR0(r11); \
- lis r10, STACK_FRAME_REGS_MARKER@ha; /* exception frame marker */ \
- addi r10, r10, STACK_FRAME_REGS_MARKER@l; \
- stw r10, 8(r11); \
- SAVE_4GPRS(3, r11); \
- SAVE_2GPRS(7, r11)
-
-/*
- * Note: code which follows this uses cr0.eq (set if from kernel),
- * r11, r12 (SRR0), and r9 (SRR1).
- *
- * Note2: once we have set r1 we are in a position to take exceptions
- * again, and we could thus set MSR:RI at that point.
- */
-
-/*
- * Exception vectors.
- */
-#define EXCEPTION(n, label, hdlr, xfer) \
- . = n; \
-label: \
- EXCEPTION_PROLOG; \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- xfer(n, hdlr)
-
-#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \
- li r10,trap; \
- stw r10,_TRAP(r11); \
- li r10,MSR_KERNEL; \
- copyee(r10, r9); \
- bl tfer; \
-i##n: \
- .long hdlr; \
- .long ret
-
-#define COPY_EE(d, s) rlwimi d,s,0,16,16
-#define NOCOPY(d, s)
-
-#define EXC_XFER_STD(n, hdlr) \
- EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \
- ret_from_except_full)
-
-#define EXC_XFER_LITE(n, hdlr) \
- EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
- ret_from_except)
-
-#define EXC_XFER_EE(n, hdlr) \
- EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
- ret_from_except_full)
-
-#define EXC_XFER_EE_LITE(n, hdlr) \
- EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
- ret_from_except)
-
/* System reset */
EXCEPTION(0x100, Reset, system_reset_exception, EXC_XFER_STD)
@@ -261,7 +167,7 @@ Alignment:
mfspr r5,SPRN_DSISR
stw r5,_DSISR(r11)
addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_EE(0x600, alignment_exception)
+ EXC_XFER_STD(0x600, alignment_exception)
/* Program check exception */
EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
@@ -273,19 +179,18 @@ Alignment:
/* Decrementer */
EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
- EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_STD)
/* System call */
. = 0xc00
SystemCall:
- EXCEPTION_PROLOG
- EXC_XFER_EE_LITE(0xc00, DoSyscall)
+ SYSCALL_ENTRY 0xc00
/* Single step - not used on 601 */
EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
- EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0xf00, Trap_0f, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0xf00, Trap_0f, unknown_exception, EXC_XFER_STD)
/* On the MPC8xx, this is a software emulation interrupt. It occurs
* for all unimplemented and illegal instructions.
@@ -615,13 +520,13 @@ DARFixed:/* Return from dcbx instruction bug workaround */
/* 0x300 is DataAccess exception, needed by bad_page_fault() */
EXC_XFER_LITE(0x300, handle_page_fault)
- EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_STD)
/* On the MPC8xx, these next four traps are used for development
* support of breakpoints and such. Someday I will get around to
@@ -643,7 +548,7 @@ DataBreakpoint:
mfspr r4,SPRN_BAR
stw r4,_DAR(r11)
mfspr r5,SPRN_DSISR
- EXC_XFER_EE(0x1c00, do_break)
+ EXC_XFER_STD(0x1c00, do_break)
11:
mtcr r10
mfspr r10, SPRN_SPRG_SCRATCH0
@@ -663,10 +568,10 @@ InstructionBreakpoint:
mfspr r10, SPRN_SPRG_SCRATCH0
rfi
#else
- EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
#endif
- EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
. = 0x2000
@@ -853,6 +758,9 @@ start_here:
/*
* Decide what sort of machine this is and initialize the MMU.
*/
+#ifdef CONFIG_KASAN
+ bl kasan_early_init
+#endif
li r3,0
mr r4,r31
bl machine_init
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index 1b22a8dea399..bfeb469e8106 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -6,6 +6,8 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_booke_hv_asm.h>
+#ifdef __ASSEMBLY__
+
/*
* Macros used for common Book-e exception handling
*/
@@ -81,6 +83,101 @@ END_BTB_FLUSH_SECTION
SAVE_4GPRS(3, r11); \
SAVE_2GPRS(7, r11)
+.macro SYSCALL_ENTRY trapno intno
+ mfspr r10, SPRN_SPRG_THREAD
+#ifdef CONFIG_KVM_BOOKE_HV
+BEGIN_FTR_SECTION
+ mtspr SPRN_SPRG_WSCRATCH0, r10
+ stw r11, THREAD_NORMSAVE(0)(r10)
+ stw r13, THREAD_NORMSAVE(2)(r10)
+ mfcr r13 /* save CR in r13 for now */
+ mfspr r11, SPRN_SRR1
+ mtocrf 0x80, r11 /* check MSR[GS] without clobbering reg */
+ bf 3, 1975f
+ b kvmppc_handler_BOOKE_INTERRUPT_\intno\()_SPRN_SRR1
+1975:
+ mr r12, r13
+ lwz r13, THREAD_NORMSAVE(2)(r10)
+FTR_SECTION_ELSE
+#endif
+ mfcr r12
+#ifdef CONFIG_KVM_BOOKE_HV
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
+#endif
+ BOOKE_CLEAR_BTB(r11)
+ lwz r11, TASK_STACK - THREAD(r10)
+ rlwinm r12,r12,0,4,2 /* Clear SO bit in CR */
+ ALLOC_STACK_FRAME(r11, THREAD_SIZE - INT_FRAME_SIZE)
+ stw r12, _CCR(r11) /* save various registers */
+ mflr r12
+ stw r12,_LINK(r11)
+ mfspr r12,SPRN_SRR0
+ stw r1, GPR1(r11)
+ mfspr r9,SPRN_SRR1
+ stw r1, 0(r11)
+ mr r1, r11
+ stw r12,_NIP(r11)
+ rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
+ lis r12, STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
+ stw r2,GPR2(r11)
+ addi r12, r12, STACK_FRAME_REGS_MARKER@l
+ stw r9,_MSR(r11)
+ li r2, \trapno + 1
+ stw r12, 8(r11)
+ stw r2,_TRAP(r11)
+ SAVE_GPR(0, r11)
+ SAVE_4GPRS(3, r11)
+ SAVE_2GPRS(7, r11)
+
+ addi r11,r1,STACK_FRAME_OVERHEAD
+ addi r2,r10,-THREAD
+ stw r11,PT_REGS(r10)
+ /* Check to see if the dbcr0 register is set up to debug. Use the
+ internal debug mode bit to do this. */
+ lwz r12,THREAD_DBCR0(r10)
+ andis. r12,r12,DBCR0_IDM@h
+ ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
+ beq+ 3f
+ /* From user and task is ptraced - load up global dbcr0 */
+ li r12,-1 /* clear all pending debug events */
+ mtspr SPRN_DBSR,r12
+ lis r11,global_dbcr0@ha
+ tophys(r11,r11)
+ addi r11,r11,global_dbcr0@l
+#ifdef CONFIG_SMP
+ lwz r9,TASK_CPU(r2)
+ slwi r9,r9,3
+ add r11,r11,r9
+#endif
+ lwz r12,0(r11)
+ mtspr SPRN_DBCR0,r12
+ lwz r12,4(r11)
+ addi r12,r12,-1
+ stw r12,4(r11)
+
+3:
+ tovirt(r2, r2) /* set r2 to current */
+ lis r11, transfer_to_syscall@h
+ ori r11, r11, transfer_to_syscall@l
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /*
+ * If MSR is changing we need to keep interrupts disabled at this point
+ * otherwise we might risk taking an interrupt before we tell lockdep
+ * they are enabled.
+ */
+ lis r10, MSR_KERNEL@h
+ ori r10, r10, MSR_KERNEL@l
+ rlwimi r10, r9, 0, MSR_EE
+#else
+ lis r10, (MSR_KERNEL | MSR_EE)@h
+ ori r10, r10, (MSR_KERNEL | MSR_EE)@l
+#endif
+ mtspr SPRN_SRR1,r10
+ mtspr SPRN_SRR0,r11
+ SYNC
+ RFI /* jump to handler, enable MMU */
+.endm
+
/* To handle the additional exception priority levels on 40x and Book-E
* processors we allocate a stack per additional priority level.
*
@@ -217,8 +314,7 @@ label:
CRITICAL_EXCEPTION_PROLOG(intno); \
addi r3,r1,STACK_FRAME_OVERHEAD; \
EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
- NOCOPY, crit_transfer_to_handler, \
- ret_from_crit_exc)
+ crit_transfer_to_handler, ret_from_crit_exc)
#define MCHECK_EXCEPTION(n, label, hdlr) \
START_EXCEPTION(label); \
@@ -227,36 +323,23 @@ label:
stw r5,_ESR(r11); \
addi r3,r1,STACK_FRAME_OVERHEAD; \
EXC_XFER_TEMPLATE(hdlr, n+4, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
- NOCOPY, mcheck_transfer_to_handler, \
- ret_from_mcheck_exc)
+ mcheck_transfer_to_handler, ret_from_mcheck_exc)
-#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret) \
+#define EXC_XFER_TEMPLATE(hdlr, trap, msr, tfer, ret) \
li r10,trap; \
stw r10,_TRAP(r11); \
lis r10,msr@h; \
ori r10,r10,msr@l; \
- copyee(r10, r9); \
bl tfer; \
.long hdlr; \
.long ret
-#define COPY_EE(d, s) rlwimi d,s,0,16,16
-#define NOCOPY(d, s)
-
#define EXC_XFER_STD(n, hdlr) \
- EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \
+ EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, transfer_to_handler_full, \
ret_from_except_full)
#define EXC_XFER_LITE(n, hdlr) \
- EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
- ret_from_except)
-
-#define EXC_XFER_EE(n, hdlr) \
- EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \
- ret_from_except_full)
-
-#define EXC_XFER_EE_LITE(n, hdlr) \
- EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \
+ EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
ret_from_except)
/* Check for a single step debug exception while in an exception
@@ -323,7 +406,7 @@ label:
/* continue normal handling for a debug exception... */ \
2: mfspr r4,SPRN_DBSR; \
addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_TEMPLATE(DebugException, 0x2008, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), NOCOPY, debug_transfer_to_handler, ret_from_debug_exc)
+ EXC_XFER_TEMPLATE(DebugException, 0x2008, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), debug_transfer_to_handler, ret_from_debug_exc)
#define DEBUG_CRIT_EXCEPTION \
START_EXCEPTION(DebugCrit); \
@@ -376,7 +459,7 @@ label:
/* continue normal handling for a critical exception... */ \
2: mfspr r4,SPRN_DBSR; \
addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), NOCOPY, crit_transfer_to_handler, ret_from_crit_exc)
+ EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), crit_transfer_to_handler, ret_from_crit_exc)
#define DATA_STORAGE_EXCEPTION \
START_EXCEPTION(DataStorage) \
@@ -401,7 +484,7 @@ label:
mfspr r4,SPRN_DEAR; /* Grab the DEAR and save it */ \
stw r4,_DEAR(r11); \
addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_EE(0x0600, alignment_exception)
+ EXC_XFER_STD(0x0600, alignment_exception)
#define PROGRAM_EXCEPTION \
START_EXCEPTION(Program) \
@@ -426,9 +509,9 @@ label:
bl load_up_fpu; /* if from user, just load it up */ \
b fast_exception_return; \
1: addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
+ EXC_XFER_STD(0x800, kernel_fp_unavailable_exception)
-#ifndef __ASSEMBLY__
+#else /* __ASSEMBLY__ */
struct exception_regs {
unsigned long mas0;
unsigned long mas1;
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 32332e24e421..6621f230cc37 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -268,6 +268,9 @@ set_ivor:
/*
* Decide what sort of machine this is and initialize the MMU.
*/
+#ifdef CONFIG_KASAN
+ bl kasan_early_init
+#endif
mr r3,r30
mr r4,r31
bl machine_init
@@ -380,7 +383,7 @@ interrupt_base:
EXC_XFER_LITE(0x0300, handle_page_fault)
1:
addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_EE_LITE(0x0300, CacheLockingException)
+ EXC_XFER_LITE(0x0300, CacheLockingException)
/* Instruction Storage Interrupt */
INSTRUCTION_STORAGE_EXCEPTION
@@ -401,21 +404,20 @@ interrupt_base:
#ifdef CONFIG_E200
/* E200 treats 'normal' floating point instructions as FP Unavail exception */
EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \
- program_check_exception, EXC_XFER_EE)
+ program_check_exception, EXC_XFER_STD)
#else
EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \
- unknown_exception, EXC_XFER_EE)
+ unknown_exception, EXC_XFER_STD)
#endif
#endif
/* System Call Interrupt */
START_EXCEPTION(SystemCall)
- NORMAL_EXCEPTION_PROLOG(SYSCALL)
- EXC_XFER_EE_LITE(0x0c00, DoSyscall)
+ SYSCALL_ENTRY 0xc00 SYSCALL
/* Auxiliary Processor Unavailable Interrupt */
EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, \
- unknown_exception, EXC_XFER_EE)
+ unknown_exception, EXC_XFER_STD)
/* Decrementer Interrupt */
DECREMENTER_EXCEPTION
@@ -423,7 +425,7 @@ interrupt_base:
/* Fixed Internal Timer Interrupt */
/* TODO: Add FIT support */
EXCEPTION(0x3100, FIT, FixedIntervalTimer, \
- unknown_exception, EXC_XFER_EE)
+ unknown_exception, EXC_XFER_STD)
/* Watchdog Timer Interrupt */
#ifdef CONFIG_BOOKE_WDT
@@ -633,25 +635,25 @@ END_BTB_FLUSH_SECTION
bl load_up_spe
b fast_exception_return
1: addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_EE_LITE(0x2010, KernelSPE)
+ EXC_XFER_LITE(0x2010, KernelSPE)
#elif defined(CONFIG_SPE_POSSIBLE)
EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, \
- unknown_exception, EXC_XFER_EE)
+ unknown_exception, EXC_XFER_STD)
#endif /* CONFIG_SPE_POSSIBLE */
/* SPE Floating Point Data */
#ifdef CONFIG_SPE
EXCEPTION(0x2030, SPE_FP_DATA, SPEFloatingPointData,
- SPEFloatingPointException, EXC_XFER_EE)
+ SPEFloatingPointException, EXC_XFER_STD)
/* SPE Floating Point Round */
EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
- SPEFloatingPointRoundException, EXC_XFER_EE)
+ SPEFloatingPointRoundException, EXC_XFER_STD)
#elif defined(CONFIG_SPE_POSSIBLE)
EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData,
- unknown_exception, EXC_XFER_EE)
+ unknown_exception, EXC_XFER_STD)
EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
- unknown_exception, EXC_XFER_EE)
+ unknown_exception, EXC_XFER_STD)
#endif /* CONFIG_SPE_POSSIBLE */
@@ -674,10 +676,10 @@ END_BTB_FLUSH_SECTION
unknown_exception)
/* Hypercall */
- EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception, EXC_XFER_STD)
/* Embedded Hypervisor Privilege */
- EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception, EXC_XFER_STD)
interrupt_end:
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index fec8a6773119..da307dd93ee3 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -29,11 +29,15 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
#include <asm/hw_breakpoint.h>
#include <asm/processor.h>
#include <asm/sstep.h>
#include <asm/debug.h>
+#include <asm/debugfs.h>
+#include <asm/hvcall.h>
#include <linux/uaccess.h>
/*
@@ -174,7 +178,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
if (!ppc_breakpoint_available())
return -ENODEV;
length_max = 8; /* DABR */
- if (cpu_has_feature(CPU_FTR_DAWR)) {
+ if (dawr_enabled()) {
length_max = 512 ; /* 64 doublewords */
/* DAWR region can't cross 512 boundary */
if ((attr->bp_addr >> 9) !=
@@ -376,3 +380,59 @@ void hw_breakpoint_pmu_read(struct perf_event *bp)
{
/* TODO */
}
+
+bool dawr_force_enable;
+EXPORT_SYMBOL_GPL(dawr_force_enable);
+
+static ssize_t dawr_write_file_bool(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct arch_hw_breakpoint null_brk = {0, 0, 0};
+ size_t rc;
+
+ /* Send error to user if they hypervisor won't allow us to write DAWR */
+ if ((!dawr_force_enable) &&
+ (firmware_has_feature(FW_FEATURE_LPAR)) &&
+ (set_dawr(&null_brk) != H_SUCCESS))
+ return -1;
+
+ rc = debugfs_write_file_bool(file, user_buf, count, ppos);
+ if (rc)
+ return rc;
+
+ /* If we are clearing, make sure all CPUs have the DAWR cleared */
+ if (!dawr_force_enable)
+ smp_call_function((smp_call_func_t)set_dawr, &null_brk, 0);
+
+ return rc;
+}
+
+static const struct file_operations dawr_enable_fops = {
+ .read = debugfs_read_file_bool,
+ .write = dawr_write_file_bool,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static int __init dawr_force_setup(void)
+{
+ dawr_force_enable = false;
+
+ if (cpu_has_feature(CPU_FTR_DAWR)) {
+ /* Don't setup sysfs file for user control on P8 */
+ dawr_force_enable = true;
+ return 0;
+ }
+
+ if (PVR_VER(mfspr(SPRN_PVR)) == PVR_POWER9) {
+ /* Turn DAWR off by default, but allow admin to turn it on */
+ dawr_force_enable = false;
+ debugfs_create_file_unsafe("dawr_enable_dangerous", 0600,
+ powerpc_debugfs_root,
+ &dawr_force_enable,
+ &dawr_enable_fops);
+ }
+ return 0;
+}
+arch_initcall(dawr_force_setup);
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 7f5ac2e8581b..2dfbd5d5b932 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -1,956 +1,188 @@
/*
- * This file contains idle entry/exit functions for POWER7,
- * POWER8 and POWER9 CPUs.
+ * Copyright 2018, IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
+ *
+ * This file contains general idle entry/exit functions to save
+ * and restore stack and NVGPRs which allows C code to call idle
+ * states that lose GPRs, and it will return transparently with
+ * SRR1 wakeup reason return value.
+ *
+ * The platform / CPU caller must ensure SPRs and any other non-GPR
+ * state is saved and restored correctly, handle KVM, interrupts, etc.
*/
-#include <linux/threads.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/cputable.h>
-#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/ppc-opcode.h>
-#include <asm/hw_irq.h>
-#include <asm/kvm_book3s_asm.h>
-#include <asm/opal.h>
#include <asm/cpuidle.h>
-#include <asm/exception-64s.h>
-#include <asm/book3s/64/mmu-hash.h>
-#include <asm/mmu.h>
-#include <asm/asm-compat.h>
-#include <asm/feature-fixups.h>
-
-#undef DEBUG
-
-/*
- * Use unused space in the interrupt stack to save and restore
- * registers for winkle support.
- */
-#define _MMCR0 GPR0
-#define _SDR1 GPR3
-#define _PTCR GPR3
-#define _RPR GPR4
-#define _SPURR GPR5
-#define _PURR GPR6
-#define _TSCR GPR7
-#define _DSCR GPR8
-#define _AMOR GPR9
-#define _WORT GPR10
-#define _WORC GPR11
-#define _LPCR GPR12
-
-#define PSSCR_EC_ESL_MASK_SHIFTED (PSSCR_EC | PSSCR_ESL) >> 16
- .text
-
-/*
- * Used by threads before entering deep idle states. Saves SPRs
- * in interrupt stack frame
- */
-save_sprs_to_stack:
- /*
- * Note all register i.e per-core, per-subcore or per-thread is saved
- * here since any thread in the core might wake up first
- */
-BEGIN_FTR_SECTION
- /*
- * Note - SDR1 is dropped in Power ISA v3. Hence not restoring
- * SDR1 here
- */
- mfspr r3,SPRN_PTCR
- std r3,_PTCR(r1)
- mfspr r3,SPRN_LPCR
- std r3,_LPCR(r1)
-FTR_SECTION_ELSE
- mfspr r3,SPRN_SDR1
- std r3,_SDR1(r1)
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
- mfspr r3,SPRN_RPR
- std r3,_RPR(r1)
- mfspr r3,SPRN_SPURR
- std r3,_SPURR(r1)
- mfspr r3,SPRN_PURR
- std r3,_PURR(r1)
- mfspr r3,SPRN_TSCR
- std r3,_TSCR(r1)
- mfspr r3,SPRN_DSCR
- std r3,_DSCR(r1)
- mfspr r3,SPRN_AMOR
- std r3,_AMOR(r1)
- mfspr r3,SPRN_WORT
- std r3,_WORT(r1)
- mfspr r3,SPRN_WORC
- std r3,_WORC(r1)
/*
- * On POWER9, there are idle states such as stop4, invoked via cpuidle,
- * that lose hypervisor resources. In such cases, we need to save
- * additional SPRs before entering those idle states so that they can
- * be restored to their older values on wakeup from the idle state.
+ * Desired PSSCR in r3
*
- * On POWER8, the only such deep idle state is winkle which is used
- * only in the context of CPU-Hotplug, where these additional SPRs are
- * reinitiazed to a sane value. Hence there is no need to save/restore
- * these SPRs.
+ * No state will be lost regardless of wakeup mechanism (interrupt or NIA).
+ *
+ * An EC=0 type wakeup will return with a value of 0. SRESET wakeup (which can
+ * happen with xscom SRESET and possibly MCE) may clobber volatiles except LR,
+ * and must blr, to return to caller with r3 set according to caller's expected
+ * return code (for Book3S/64 that is SRR1).
*/
-BEGIN_FTR_SECTION
- blr
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
-
-power9_save_additional_sprs:
- mfspr r3, SPRN_PID
- mfspr r4, SPRN_LDBAR
- std r3, STOP_PID(r13)
- std r4, STOP_LDBAR(r13)
-
- mfspr r3, SPRN_FSCR
- mfspr r4, SPRN_HFSCR
- std r3, STOP_FSCR(r13)
- std r4, STOP_HFSCR(r13)
-
- mfspr r3, SPRN_MMCRA
- mfspr r4, SPRN_MMCR0
- std r3, STOP_MMCRA(r13)
- std r4, _MMCR0(r1)
-
- mfspr r3, SPRN_MMCR1
- mfspr r4, SPRN_MMCR2
- std r3, STOP_MMCR1(r13)
- std r4, STOP_MMCR2(r13)
- blr
-
-power9_restore_additional_sprs:
- ld r3,_LPCR(r1)
- ld r4, STOP_PID(r13)
- mtspr SPRN_LPCR,r3
- mtspr SPRN_PID, r4
-
- ld r3, STOP_LDBAR(r13)
- ld r4, STOP_FSCR(r13)
- mtspr SPRN_LDBAR, r3
- mtspr SPRN_FSCR, r4
-
- ld r3, STOP_HFSCR(r13)
- ld r4, STOP_MMCRA(r13)
- mtspr SPRN_HFSCR, r3
- mtspr SPRN_MMCRA, r4
-
- ld r3, _MMCR0(r1)
- ld r4, STOP_MMCR1(r13)
- mtspr SPRN_MMCR0, r3
- mtspr SPRN_MMCR1, r4
-
- ld r3, STOP_MMCR2(r13)
- ld r4, PACA_SPRG_VDSO(r13)
- mtspr SPRN_MMCR2, r3
- mtspr SPRN_SPRG3, r4
+_GLOBAL(isa300_idle_stop_noloss)
+ mtspr SPRN_PSSCR,r3
+ PPC_STOP
+ li r3,0
blr
/*
- * Used by threads when the lock bit of core_idle_state is set.
- * Threads will spin in HMT_LOW until the lock bit is cleared.
- * r14 - pointer to core_idle_state
- * r15 - used to load contents of core_idle_state
- * r9 - used as a temporary variable
+ * Desired PSSCR in r3
+ *
+ * GPRs may be lost, so they are saved here. Wakeup is by interrupt only.
+ * The SRESET wakeup returns to this function's caller by calling
+ * idle_return_gpr_loss with r3 set to desired return value.
+ *
+ * A wakeup without GPR loss may alteratively be handled as in
+ * isa300_idle_stop_noloss and blr directly, as an optimisation.
+ *
+ * The caller is responsible for saving/restoring SPRs, MSR, timebase,
+ * etc.
*/
-
-core_idle_lock_held:
- HMT_LOW
-3: lwz r15,0(r14)
- andis. r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
- bne 3b
- HMT_MEDIUM
- lwarx r15,0,r14
- andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
- bne- core_idle_lock_held
- blr
+_GLOBAL(isa300_idle_stop_mayloss)
+ mtspr SPRN_PSSCR,r3
+ std r1,PACAR1(r13)
+ mflr r4
+ mfcr r5
+ /* use stack red zone rather than a new frame for saving regs */
+ std r2,-8*0(r1)
+ std r14,-8*1(r1)
+ std r15,-8*2(r1)
+ std r16,-8*3(r1)
+ std r17,-8*4(r1)
+ std r18,-8*5(r1)
+ std r19,-8*6(r1)
+ std r20,-8*7(r1)
+ std r21,-8*8(r1)
+ std r22,-8*9(r1)
+ std r23,-8*10(r1)
+ std r24,-8*11(r1)
+ std r25,-8*12(r1)
+ std r26,-8*13(r1)
+ std r27,-8*14(r1)
+ std r28,-8*15(r1)
+ std r29,-8*16(r1)
+ std r30,-8*17(r1)
+ std r31,-8*18(r1)
+ std r4,-8*19(r1)
+ std r5,-8*20(r1)
+ /* 168 bytes */
+ PPC_STOP
+ b . /* catch bugs */
/*
- * Pass requested state in r3:
- * r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8
- * - Requested PSSCR value in POWER9
+ * Desired return value in r3
+ *
+ * The idle wakeup SRESET interrupt can call this after calling
+ * to return to the idle sleep function caller with r3 as the return code.
*
- * Address of idle handler to branch to in realmode in r4
+ * This must not be used if idle was entered via a _noloss function (use
+ * a simple blr instead).
*/
-pnv_powersave_common:
- /* Use r3 to pass state nap/sleep/winkle */
- /* NAP is a state loss, we create a regs frame on the
- * stack, fill it up with the state we care about and
- * stick a pointer to it in PACAR1. We really only
- * need to save PC, some CR bits and the NV GPRs,
- * but for now an interrupt frame will do.
- */
- mtctr r4
-
- mflr r0
- std r0,16(r1)
- stdu r1,-INT_FRAME_SIZE(r1)
- std r0,_LINK(r1)
- std r0,_NIP(r1)
-
- /* We haven't lost state ... yet */
- li r0,0
- stb r0,PACA_NAPSTATELOST(r13)
-
- /* Continue saving state */
- SAVE_GPR(2, r1)
- SAVE_NVGPRS(r1)
- mfcr r5
- std r5,_CCR(r1)
- std r1,PACAR1(r13)
-
-BEGIN_FTR_SECTION
- /*
- * POWER9 does not require real mode to stop, and presently does not
- * set hwthread_state for KVM (threads don't share MMU context), so
- * we can remain in virtual mode for this.
- */
- bctr
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
- /*
- * POWER8
- * Go to real mode to do the nap, as required by the architecture.
- * Also, we need to be in real mode before setting hwthread_state,
- * because as soon as we do that, another thread can switch
- * the MMU context to the guest.
- */
- LOAD_REG_IMMEDIATE(r7, MSR_IDLE)
- mtmsrd r7,0
- bctr
+_GLOBAL(idle_return_gpr_loss)
+ ld r1,PACAR1(r13)
+ ld r4,-8*19(r1)
+ ld r5,-8*20(r1)
+ mtlr r4
+ mtcr r5
+ /*
+ * KVM nap requires r2 to be saved, rather than just restoring it
+ * from PACATOC. This could be avoided for that less common case
+ * if KVM saved its r2.
+ */
+ ld r2,-8*0(r1)
+ ld r14,-8*1(r1)
+ ld r15,-8*2(r1)
+ ld r16,-8*3(r1)
+ ld r17,-8*4(r1)
+ ld r18,-8*5(r1)
+ ld r19,-8*6(r1)
+ ld r20,-8*7(r1)
+ ld r21,-8*8(r1)
+ ld r22,-8*9(r1)
+ ld r23,-8*10(r1)
+ ld r24,-8*11(r1)
+ ld r25,-8*12(r1)
+ ld r26,-8*13(r1)
+ ld r27,-8*14(r1)
+ ld r28,-8*15(r1)
+ ld r29,-8*16(r1)
+ ld r30,-8*17(r1)
+ ld r31,-8*18(r1)
+ blr
/*
* This is the sequence required to execute idle instructions, as
* specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0.
+ *
+ * The 0(r1) slot is used to save r2 in isa206, so use that here.
*/
#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \
/* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
- std r0,0(r1); \
+ std r2,0(r1); \
ptesync; \
- ld r0,0(r1); \
-236: cmpd cr0,r0,r0; \
+ ld r2,0(r1); \
+236: cmpd cr0,r2,r2; \
bne 236b; \
- IDLE_INST;
-
-
- .globl pnv_enter_arch207_idle_mode
-pnv_enter_arch207_idle_mode:
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- /* Tell KVM we're entering idle */
- li r4,KVM_HWTHREAD_IN_IDLE
- /******************************************************/
- /* N O T E W E L L ! ! ! N O T E W E L L */
- /* The following store to HSTATE_HWTHREAD_STATE(r13) */
- /* MUST occur in real mode, i.e. with the MMU off, */
- /* and the MMU must stay off until we clear this flag */
- /* and test HSTATE_HWTHREAD_REQ(r13) in */
- /* pnv_powersave_wakeup in this file. */
- /* The reason is that another thread can switch the */
- /* MMU to a guest context whenever this flag is set */
- /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
- /* that would potentially cause this thread to start */
- /* executing instructions from guest memory in */
- /* hypervisor mode, leading to a host crash or data */
- /* corruption, or worse. */
- /******************************************************/
- stb r4,HSTATE_HWTHREAD_STATE(r13)
-#endif
- stb r3,PACA_THREAD_IDLE_STATE(r13)
- cmpwi cr3,r3,PNV_THREAD_SLEEP
- bge cr3,2f
- IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP)
- /* No return */
-2:
- /* Sleep or winkle */
- lbz r7,PACA_THREAD_MASK(r13)
- ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
- li r5,0
- beq cr3,3f
- lis r5,PNV_CORE_IDLE_WINKLE_COUNT@h
-3:
-lwarx_loop1:
- lwarx r15,0,r14
-
- andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
- bnel- core_idle_lock_held
-
- add r15,r15,r5 /* Add if winkle */
- andc r15,r15,r7 /* Clear thread bit */
-
- andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS
-
-/*
- * If cr0 = 0, then current thread is the last thread of the core entering
- * sleep. Last thread needs to execute the hardware bug workaround code if
- * required by the platform.
- * Make the workaround call unconditionally here. The below branch call is
- * patched out when the idle states are discovered if the platform does not
- * require it.
- */
-.global pnv_fastsleep_workaround_at_entry
-pnv_fastsleep_workaround_at_entry:
- beq fastsleep_workaround_at_entry
-
- stwcx. r15,0,r14
- bne- lwarx_loop1
- isync
-
-common_enter: /* common code for all the threads entering sleep or winkle */
- bgt cr3,enter_winkle
- IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP)
-
-fastsleep_workaround_at_entry:
- oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
- stwcx. r15,0,r14
- bne- lwarx_loop1
- isync
-
- /* Fast sleep workaround */
- li r3,1
- li r4,1
- bl opal_config_cpu_idle_state
-
- /* Unlock */
- xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
- lwsync
- stw r15,0(r14)
- b common_enter
-
-enter_winkle:
- bl save_sprs_to_stack
-
- IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE)
-
-/*
- * r3 - PSSCR value corresponding to the requested stop state.
- */
-power_enter_stop:
-/*
- * Check if we are executing the lite variant with ESL=EC=0
- */
- andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED
- clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */
- bne .Lhandle_esl_ec_set
- PPC_STOP
- li r3,0 /* Since we didn't lose state, return 0 */
- std r3, PACA_REQ_PSSCR(r13)
-
- /*
- * pnv_wakeup_noloss() expects r12 to contain the SRR1 value so
- * it can determine if the wakeup reason is an HMI in
- * CHECK_HMI_INTERRUPT.
- *
- * However, when we wakeup with ESL=0, SRR1 will not contain the wakeup
- * reason, so there is no point setting r12 to SRR1.
- *
- * Further, we clear r12 here, so that we don't accidentally enter the
- * HMI in pnv_wakeup_noloss() if the value of r12[42:45] == WAKE_HMI.
- */
- li r12, 0
- b pnv_wakeup_noloss
-
-.Lhandle_esl_ec_set:
-BEGIN_FTR_SECTION
- /*
- * POWER9 DD2.0 or earlier can incorrectly set PMAO when waking up after
- * a state-loss idle. Saving and restoring MMCR0 over idle is a
- * workaround.
- */
- mfspr r4,SPRN_MMCR0
- std r4,_MMCR0(r1)
-END_FTR_SECTION_IFCLR(CPU_FTR_POWER9_DD2_1)
+ IDLE_INST; \
+ b . /* catch bugs */
/*
- * Check if the requested state is a deep idle state.
- */
- LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
- ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
- cmpd r3,r4
- bge .Lhandle_deep_stop
- PPC_STOP /* Does not return (system reset interrupt) */
-
-.Lhandle_deep_stop:
-/*
- * Entering deep idle state.
- * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to
- * stack and enter stop
- */
- lbz r7,PACA_THREAD_MASK(r13)
- ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
-
-lwarx_loop_stop:
- lwarx r15,0,r14
- andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
- bnel- core_idle_lock_held
- andc r15,r15,r7 /* Clear thread bit */
-
- stwcx. r15,0,r14
- bne- lwarx_loop_stop
- isync
-
- bl save_sprs_to_stack
-
- PPC_STOP /* Does not return (system reset interrupt) */
-
-/*
- * Entered with MSR[EE]=0 and no soft-masked interrupts pending.
- * r3 contains desired idle state (PNV_THREAD_NAP/SLEEP/WINKLE).
- */
-_GLOBAL(power7_idle_insn)
- /* Now check if user or arch enabled NAP mode */
- LOAD_REG_ADDR(r4, pnv_enter_arch207_idle_mode)
- b pnv_powersave_common
-
-#define CHECK_HMI_INTERRUPT \
-BEGIN_FTR_SECTION_NESTED(66); \
- rlwinm r0,r12,45-31,0xf; /* extract wake reason field (P8) */ \
-FTR_SECTION_ELSE_NESTED(66); \
- rlwinm r0,r12,45-31,0xe; /* P7 wake reason field is 3 bits */ \
-ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
- cmpwi r0,0xa; /* Hypervisor maintenance ? */ \
- bne+ 20f; \
- /* Invoke opal call to handle hmi */ \
- ld r2,PACATOC(r13); \
- ld r1,PACAR1(r13); \
- std r3,ORIG_GPR3(r1); /* Save original r3 */ \
- li r3,0; /* NULL argument */ \
- bl hmi_exception_realmode; \
- nop; \
- ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \
-20: nop;
-
-/*
- * Entered with MSR[EE]=0 and no soft-masked interrupts pending.
- * r3 contains desired PSSCR register value.
+ * Desired instruction type in r3
*
- * Offline (CPU unplug) case also must notify KVM that the CPU is
- * idle.
- */
-_GLOBAL(power9_offline_stop)
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- /*
- * Tell KVM we're entering idle.
- * This does not have to be done in real mode because the P9 MMU
- * is independent per-thread. Some steppings share radix/hash mode
- * between threads, but in that case KVM has a barrier sync in real
- * mode before and after switching between radix and hash.
- */
- li r4,KVM_HWTHREAD_IN_IDLE
- stb r4,HSTATE_HWTHREAD_STATE(r13)
-#endif
- /* fall through */
-
-_GLOBAL(power9_idle_stop)
- std r3, PACA_REQ_PSSCR(r13)
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
-BEGIN_FTR_SECTION
- sync
- lwz r5, PACA_DONT_STOP(r13)
- cmpwi r5, 0
- bne 1f
-END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
-#endif
- mtspr SPRN_PSSCR,r3
- LOAD_REG_ADDR(r4,power_enter_stop)
- b pnv_powersave_common
- /* No return */
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
-1:
- /*
- * We get here when TM / thread reconfiguration bug workaround
- * code wants to get the CPU into SMT4 mode, and therefore
- * we are being asked not to stop.
- */
- li r3, 0
- std r3, PACA_REQ_PSSCR(r13)
- blr /* return 0 for wakeup cause / SRR1 value */
-#endif
-
-/*
- * Called from machine check handler for powersave wakeups.
- * Low level machine check processing has already been done. Now just
- * go through the wake up path to get everything in order.
+ * GPRs may be lost, so they are saved here. Wakeup is by interrupt only.
+ * The SRESET wakeup returns to this function's caller by calling
+ * idle_return_gpr_loss with r3 set to desired return value.
*
- * r3 - The original SRR1 value.
- * Original SRR[01] have been clobbered.
- * MSR_RI is clear.
- */
-.global pnv_powersave_wakeup_mce
-pnv_powersave_wakeup_mce:
- /* Set cr3 for pnv_powersave_wakeup */
- rlwinm r11,r3,47-31,30,31
- cmpwi cr3,r11,2
-
- /*
- * Now put the original SRR1 with SRR1_WAKEMCE_RESVD as the wake
- * reason into r12, which allows reuse of the system reset wakeup
- * code without being mistaken for another type of wakeup.
- */
- oris r12,r3,SRR1_WAKEMCE_RESVD@h
-
- b pnv_powersave_wakeup
-
-/*
- * Called from reset vector for powersave wakeups.
- * cr3 - set to gt if waking up with partial/complete hypervisor state loss
- * r12 - SRR1
- */
-.global pnv_powersave_wakeup
-pnv_powersave_wakeup:
- ld r2, PACATOC(r13)
-
-BEGIN_FTR_SECTION
- bl pnv_restore_hyp_resource_arch300
-FTR_SECTION_ELSE
- bl pnv_restore_hyp_resource_arch207
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
-
- li r0,PNV_THREAD_RUNNING
- stb r0,PACA_THREAD_IDLE_STATE(r13) /* Clear thread state */
-
- mr r3,r12
-
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- lbz r0,HSTATE_HWTHREAD_STATE(r13)
- cmpwi r0,KVM_HWTHREAD_IN_KERNEL
- beq 0f
- li r0,KVM_HWTHREAD_IN_KERNEL
- stb r0,HSTATE_HWTHREAD_STATE(r13)
- /* Order setting hwthread_state vs. testing hwthread_req */
- sync
-0: lbz r0,HSTATE_HWTHREAD_REQ(r13)
- cmpwi r0,0
- beq 1f
- b kvm_start_guest
-1:
-#endif
-
- /* Return SRR1 from power7_nap() */
- blt cr3,pnv_wakeup_noloss
- b pnv_wakeup_loss
-
-/*
- * Check whether we have woken up with hypervisor state loss.
- * If yes, restore hypervisor state and return back to link.
+ * A wakeup without GPR loss may alteratively be handled as in
+ * isa300_idle_stop_noloss and blr directly, as an optimisation.
*
- * cr3 - set to gt if waking up with partial/complete hypervisor state loss
- */
-pnv_restore_hyp_resource_arch300:
- /*
- * Workaround for POWER9, if we lost resources, the ERAT
- * might have been mixed up and needs flushing. We also need
- * to reload MMCR0 (see comment above). We also need to set
- * then clear bit 60 in MMCRA to ensure the PMU starts running.
- */
- blt cr3,1f
-BEGIN_FTR_SECTION
- PPC_INVALIDATE_ERAT
- ld r1,PACAR1(r13)
- ld r4,_MMCR0(r1)
- mtspr SPRN_MMCR0,r4
-END_FTR_SECTION_IFCLR(CPU_FTR_POWER9_DD2_1)
- mfspr r4,SPRN_MMCRA
- ori r4,r4,(1 << (63-60))
- mtspr SPRN_MMCRA,r4
- xori r4,r4,(1 << (63-60))
- mtspr SPRN_MMCRA,r4
-1:
- /*
- * POWER ISA 3. Use PSSCR to determine if we
- * are waking up from deep idle state
- */
- LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
- ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
-
- /*
- * 0-3 bits correspond to Power-Saving Level Status
- * which indicates the idle state we are waking up from
- */
- mfspr r5, SPRN_PSSCR
- rldicl r5,r5,4,60
- li r0, 0 /* clear requested_psscr to say we're awake */
- std r0, PACA_REQ_PSSCR(r13)
- cmpd cr4,r5,r4
- bge cr4,pnv_wakeup_tb_loss /* returns to caller */
-
- blr /* Waking up without hypervisor state loss. */
-
-/* Same calling convention as arch300 */
-pnv_restore_hyp_resource_arch207:
- /*
- * POWER ISA 2.07 or less.
- * Check if we slept with sleep or winkle.
- */
- lbz r4,PACA_THREAD_IDLE_STATE(r13)
- cmpwi cr2,r4,PNV_THREAD_NAP
- bgt cr2,pnv_wakeup_tb_loss /* Either sleep or Winkle */
-
- /*
- * We fall through here if PACA_THREAD_IDLE_STATE shows we are waking
- * up from nap. At this stage CR3 shouldn't contains 'gt' since that
- * indicates we are waking with hypervisor state loss from nap.
- */
- bgt cr3,.
-
- blr /* Waking up without hypervisor state loss */
-
-/*
- * Called if waking up from idle state which can cause either partial or
- * complete hyp state loss.
- * In POWER8, called if waking up from fastsleep or winkle
- * In POWER9, called if waking up from stop state >= pnv_first_deep_stop_state
- *
- * r13 - PACA
- * cr3 - gt if waking up with partial/complete hypervisor state loss
- *
- * If ISA300:
- * cr4 - gt or eq if waking up from complete hypervisor state loss.
+ * The caller is responsible for saving/restoring SPRs, MSR, timebase,
+ * etc.
*
- * If ISA207:
- * r4 - PACA_THREAD_IDLE_STATE
+ * This must be called in real-mode (MSR_IDLE).
*/
-pnv_wakeup_tb_loss:
- ld r1,PACAR1(r13)
- /*
- * Before entering any idle state, the NVGPRs are saved in the stack.
- * If there was a state loss, or PACA_NAPSTATELOST was set, then the
- * NVGPRs are restored. If we are here, it is likely that state is lost,
- * but not guaranteed -- neither ISA207 nor ISA300 tests to reach
- * here are the same as the test to restore NVGPRS:
- * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300,
- * and SRR1 test for restoring NVGPRs.
- *
- * We are about to clobber NVGPRs now, so set NAPSTATELOST to
- * guarantee they will always be restored. This might be tightened
- * with careful reading of specs (particularly for ISA300) but this
- * is already a slow wakeup path and it's simpler to be safe.
- */
- li r0,1
- stb r0,PACA_NAPSTATELOST(r13)
-
- /*
- *
- * Save SRR1 and LR in NVGPRs as they might be clobbered in
- * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required
- * to determine the wakeup reason if we branch to kvm_start_guest. LR
- * is required to return back to reset vector after hypervisor state
- * restore is complete.
- */
- mr r19,r12
- mr r18,r4
- mflr r17
-BEGIN_FTR_SECTION
- CHECK_HMI_INTERRUPT
-END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
-
- ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
- lbz r7,PACA_THREAD_MASK(r13)
-
- /*
- * Take the core lock to synchronize against other threads.
- *
- * Lock bit is set in one of the 2 cases-
- * a. In the sleep/winkle enter path, the last thread is executing
- * fastsleep workaround code.
- * b. In the wake up path, another thread is executing fastsleep
- * workaround undo code or resyncing timebase or restoring context
- * In either case loop until the lock bit is cleared.
- */
-1:
- lwarx r15,0,r14
- andis. r9,r15,PNV_CORE_IDLE_LOCK_BIT@h
- bnel- core_idle_lock_held
- oris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
- stwcx. r15,0,r14
- bne- 1b
- isync
-
- andi. r9,r15,PNV_CORE_IDLE_THREAD_BITS
- cmpwi cr2,r9,0
-
- /*
- * At this stage
- * cr2 - eq if first thread to wakeup in core
- * cr3- gt if waking up with partial/complete hypervisor state loss
- * ISA300:
- * cr4 - gt or eq if waking up from complete hypervisor state loss.
- */
-
-BEGIN_FTR_SECTION
- /*
- * Were we in winkle?
- * If yes, check if all threads were in winkle, decrement our
- * winkle count, set all thread winkle bits if all were in winkle.
- * Check if our thread has a winkle bit set, and set cr4 accordingly
- * (to match ISA300, above). Pseudo-code for core idle state
- * transitions for ISA207 is as follows (everything happens atomically
- * due to store conditional and/or lock bit):
- *
- * nap_idle() { }
- * nap_wake() { }
- *
- * sleep_idle()
- * {
- * core_idle_state &= ~thread_in_core
- * }
- *
- * sleep_wake()
- * {
- * bool first_in_core, first_in_subcore;
- *
- * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0;
- * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0;
- *
- * core_idle_state |= thread_in_core;
- * }
- *
- * winkle_idle()
- * {
- * core_idle_state &= ~thread_in_core;
- * core_idle_state += 1 << WINKLE_COUNT_SHIFT;
- * }
- *
- * winkle_wake()
- * {
- * bool first_in_core, first_in_subcore, winkle_state_lost;
- *
- * first_in_core = (core_idle_state & IDLE_THREAD_BITS) == 0;
- * first_in_subcore = (core_idle_state & SUBCORE_SIBLING_MASK) == 0;
- *
- * core_idle_state |= thread_in_core;
- *
- * if ((core_idle_state & WINKLE_MASK) == (8 << WINKLE_COUNT_SIHFT))
- * core_idle_state |= THREAD_WINKLE_BITS;
- * core_idle_state -= 1 << WINKLE_COUNT_SHIFT;
- *
- * winkle_state_lost = core_idle_state &
- * (thread_in_core << WINKLE_THREAD_SHIFT);
- * core_idle_state &= ~(thread_in_core << WINKLE_THREAD_SHIFT);
- * }
- *
- */
- cmpwi r18,PNV_THREAD_WINKLE
+_GLOBAL(isa206_idle_insn_mayloss)
+ std r1,PACAR1(r13)
+ mflr r4
+ mfcr r5
+ /* use stack red zone rather than a new frame for saving regs */
+ std r2,-8*0(r1)
+ std r14,-8*1(r1)
+ std r15,-8*2(r1)
+ std r16,-8*3(r1)
+ std r17,-8*4(r1)
+ std r18,-8*5(r1)
+ std r19,-8*6(r1)
+ std r20,-8*7(r1)
+ std r21,-8*8(r1)
+ std r22,-8*9(r1)
+ std r23,-8*10(r1)
+ std r24,-8*11(r1)
+ std r25,-8*12(r1)
+ std r26,-8*13(r1)
+ std r27,-8*14(r1)
+ std r28,-8*15(r1)
+ std r29,-8*16(r1)
+ std r30,-8*17(r1)
+ std r31,-8*18(r1)
+ std r4,-8*19(r1)
+ std r5,-8*20(r1)
+ cmpwi r3,PNV_THREAD_NAP
+ bne 1f
+ IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP)
+1: cmpwi r3,PNV_THREAD_SLEEP
bne 2f
- andis. r9,r15,PNV_CORE_IDLE_WINKLE_COUNT_ALL_BIT@h
- subis r15,r15,PNV_CORE_IDLE_WINKLE_COUNT@h
- beq 2f
- ori r15,r15,PNV_CORE_IDLE_THREAD_WINKLE_BITS /* all were winkle */
-2:
- /* Shift thread bit to winkle mask, then test if this thread is set,
- * and remove it from the winkle bits */
- slwi r8,r7,8
- and r8,r8,r15
- andc r15,r15,r8
- cmpwi cr4,r8,1 /* cr4 will be gt if our bit is set, lt if not */
-
- lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
- and r4,r4,r15
- cmpwi r4,0 /* Check if first in subcore */
-
- or r15,r15,r7 /* Set thread bit */
- beq first_thread_in_subcore
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
-
- or r15,r15,r7 /* Set thread bit */
- beq cr2,first_thread_in_core
-
- /* Not first thread in core or subcore to wake up */
- b clear_lock
-
-first_thread_in_subcore:
- /*
- * If waking up from sleep, subcore state is not lost. Hence
- * skip subcore state restore
- */
- blt cr4,subcore_state_restored
-
- /* Restore per-subcore state */
- ld r4,_SDR1(r1)
- mtspr SPRN_SDR1,r4
-
- ld r4,_RPR(r1)
- mtspr SPRN_RPR,r4
- ld r4,_AMOR(r1)
- mtspr SPRN_AMOR,r4
-
-subcore_state_restored:
- /*
- * Check if the thread is also the first thread in the core. If not,
- * skip to clear_lock.
- */
- bne cr2,clear_lock
-
-first_thread_in_core:
-
- /*
- * First thread in the core waking up from any state which can cause
- * partial or complete hypervisor state loss. It needs to
- * call the fastsleep workaround code if the platform requires it.
- * Call it unconditionally here. The below branch instruction will
- * be patched out if the platform does not have fastsleep or does not
- * require the workaround. Patching will be performed during the
- * discovery of idle-states.
- */
-.global pnv_fastsleep_workaround_at_exit
-pnv_fastsleep_workaround_at_exit:
- b fastsleep_workaround_at_exit
-
-timebase_resync:
- /*
- * Use cr3 which indicates that we are waking up with atleast partial
- * hypervisor state loss to determine if TIMEBASE RESYNC is needed.
- */
- ble cr3,.Ltb_resynced
- /* Time base re-sync */
- bl opal_resync_timebase;
- /*
- * If waking up from sleep (POWER8), per core state
- * is not lost, skip to clear_lock.
- */
-.Ltb_resynced:
- blt cr4,clear_lock
-
- /*
- * First thread in the core to wake up and its waking up with
- * complete hypervisor state loss. Restore per core hypervisor
- * state.
- */
-BEGIN_FTR_SECTION
- ld r4,_PTCR(r1)
- mtspr SPRN_PTCR,r4
- ld r4,_RPR(r1)
- mtspr SPRN_RPR,r4
- ld r4,_AMOR(r1)
- mtspr SPRN_AMOR,r4
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
-
- ld r4,_TSCR(r1)
- mtspr SPRN_TSCR,r4
- ld r4,_WORC(r1)
- mtspr SPRN_WORC,r4
-
-clear_lock:
- xoris r15,r15,PNV_CORE_IDLE_LOCK_BIT@h
- lwsync
- stw r15,0(r14)
-
-common_exit:
- /*
- * Common to all threads.
- *
- * If waking up from sleep, hypervisor state is not lost. Hence
- * skip hypervisor state restore.
- */
- blt cr4,hypervisor_state_restored
-
- /* Waking up from winkle */
-
-BEGIN_MMU_FTR_SECTION
- b no_segments
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
- /* Restore SLB from PACA */
- ld r8,PACA_SLBSHADOWPTR(r13)
-
- .rept SLB_NUM_BOLTED
- li r3, SLBSHADOW_SAVEAREA
- LDX_BE r5, r8, r3
- addi r3, r3, 8
- LDX_BE r6, r8, r3
- andis. r7,r5,SLB_ESID_V@h
- beq 1f
- slbmte r6,r5
-1: addi r8,r8,16
- .endr
-no_segments:
-
- /* Restore per thread state */
-
- ld r4,_SPURR(r1)
- mtspr SPRN_SPURR,r4
- ld r4,_PURR(r1)
- mtspr SPRN_PURR,r4
- ld r4,_DSCR(r1)
- mtspr SPRN_DSCR,r4
- ld r4,_WORT(r1)
- mtspr SPRN_WORT,r4
-
- /* Call cur_cpu_spec->cpu_restore() */
- LOAD_REG_ADDR(r4, cur_cpu_spec)
- ld r4,0(r4)
- ld r12,CPU_SPEC_RESTORE(r4)
-#ifdef PPC64_ELF_ABI_v1
- ld r12,0(r12)
-#endif
- mtctr r12
- bctrl
-
-/*
- * On POWER9, we can come here on wakeup from a cpuidle stop state.
- * Hence restore the additional SPRs to the saved value.
- *
- * On POWER8, we come here only on winkle. Since winkle is used
- * only in the case of CPU-Hotplug, we don't need to restore
- * the additional SPRs.
- */
-BEGIN_FTR_SECTION
- bl power9_restore_additional_sprs
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
-hypervisor_state_restored:
-
- mr r12,r19
- mtlr r17
- blr /* return to pnv_powersave_wakeup */
-
-fastsleep_workaround_at_exit:
- li r3,1
- li r4,0
- bl opal_config_cpu_idle_state
- b timebase_resync
-
-/*
- * R3 here contains the value that will be returned to the caller
- * of power7_nap.
- * R12 contains SRR1 for CHECK_HMI_INTERRUPT.
- */
-.global pnv_wakeup_loss
-pnv_wakeup_loss:
- ld r1,PACAR1(r13)
-BEGIN_FTR_SECTION
- CHECK_HMI_INTERRUPT
-END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
- REST_NVGPRS(r1)
- REST_GPR(2, r1)
- ld r4,PACAKMSR(r13)
- ld r5,_LINK(r1)
- ld r6,_CCR(r1)
- addi r1,r1,INT_FRAME_SIZE
- mtlr r5
- mtcr r6
- mtmsrd r4
- blr
+ IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP)
+2: IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE)
-/*
- * R3 here contains the value that will be returned to the caller
- * of power7_nap.
- * R12 contains SRR1 for CHECK_HMI_INTERRUPT.
- */
-pnv_wakeup_noloss:
- lbz r0,PACA_NAPSTATELOST(r13)
- cmpwi r0,0
- bne pnv_wakeup_loss
- ld r1,PACAR1(r13)
-BEGIN_FTR_SECTION
- CHECK_HMI_INTERRUPT
-END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
- ld r4,PACAKMSR(r13)
- ld r5,_NIP(r1)
- ld r6,_CCR(r1)
- addi r1,r1,INT_FRAME_SIZE
- mtlr r5
- mtcr r6
- mtmsrd r4
- blr
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 8a936723c791..ada901af4950 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -81,10 +81,7 @@
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat);
-int __irq_offset_value;
-
#ifdef CONFIG_PPC32
-EXPORT_SYMBOL(__irq_offset_value);
atomic_t ppc_n_lost_interrupts;
#ifdef CONFIG_TAU_INT
@@ -261,16 +258,9 @@ notrace void arch_local_irq_restore(unsigned long mask)
*/
irq_happened = get_irq_happened();
if (!irq_happened) {
- /*
- * FIXME. Here we'd like to be able to do:
- *
- * #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
- * WARN_ON(!(mfmsr() & MSR_EE));
- * #endif
- *
- * But currently it hits in a few paths, we should fix those and
- * enable the warning.
- */
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+ WARN_ON(!(mfmsr() & MSR_EE));
+#endif
return;
}
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index b5fec1f9751a..4581377cfc98 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -112,6 +112,7 @@ void save_mce_event(struct pt_regs *regs, long handled,
mce->srr1 = regs->msr;
mce->gpr3 = regs->gpr[3];
mce->in_use = 1;
+ mce->cpu = get_paca()->paca_index;
/* Mark it recovered if we have handled it and MSR(RI=1). */
if (handled && (regs->msr & MSR_RI))
@@ -121,6 +122,8 @@ void save_mce_event(struct pt_regs *regs, long handled,
mce->initiator = mce_err->initiator;
mce->severity = mce_err->severity;
+ mce->sync_error = mce_err->sync_error;
+ mce->error_class = mce_err->error_class;
/*
* Populate the mce error_type and type-specific error_type.
@@ -310,7 +313,11 @@ static void machine_check_process_queued_event(struct irq_work *work)
void machine_check_print_event_info(struct machine_check_event *evt,
bool user_mode, bool in_guest)
{
- const char *level, *sevstr, *subtype;
+ const char *level, *sevstr, *subtype, *err_type;
+ uint64_t ea = 0, pa = 0;
+ int n = 0;
+ char dar_str[50];
+ char pa_str[50];
static const char *mc_ue_types[] = {
"Indeterminate",
"Instruction fetch",
@@ -357,6 +364,13 @@ void machine_check_print_event_info(struct machine_check_event *evt,
"Store (timeout)",
"Page table walk Load/Store (timeout)",
};
+ static const char *mc_error_class[] = {
+ "Unknown",
+ "Hardware error",
+ "Probable Hardware error (some chance of software cause)",
+ "Software error",
+ "Probable Software error (some chance of hardware cause)",
+ };
/* Print things out */
if (evt->version != MCE_V1) {
@@ -371,9 +385,9 @@ void machine_check_print_event_info(struct machine_check_event *evt,
break;
case MCE_SEV_WARNING:
level = KERN_WARNING;
- sevstr = "";
+ sevstr = "Warning";
break;
- case MCE_SEV_ERROR_SYNC:
+ case MCE_SEV_SEVERE:
level = KERN_ERR;
sevstr = "Severe";
break;
@@ -384,101 +398,107 @@ void machine_check_print_event_info(struct machine_check_event *evt,
break;
}
- printk("%s%s Machine check interrupt [%s]\n", level, sevstr,
- evt->disposition == MCE_DISPOSITION_RECOVERED ?
- "Recovered" : "Not recovered");
-
- if (in_guest) {
- printk("%s Guest NIP: %016llx\n", level, evt->srr0);
- } else if (user_mode) {
- printk("%s NIP: [%016llx] PID: %d Comm: %s\n", level,
- evt->srr0, current->pid, current->comm);
- } else {
- printk("%s NIP [%016llx]: %pS\n", level, evt->srr0,
- (void *)evt->srr0);
- }
-
- printk("%s Initiator: %s\n", level,
- evt->initiator == MCE_INITIATOR_CPU ? "CPU" : "Unknown");
switch (evt->error_type) {
case MCE_ERROR_TYPE_UE:
+ err_type = "UE";
subtype = evt->u.ue_error.ue_error_type <
ARRAY_SIZE(mc_ue_types) ?
mc_ue_types[evt->u.ue_error.ue_error_type]
: "Unknown";
- printk("%s Error type: UE [%s]\n", level, subtype);
if (evt->u.ue_error.effective_address_provided)
- printk("%s Effective address: %016llx\n",
- level, evt->u.ue_error.effective_address);
+ ea = evt->u.ue_error.effective_address;
if (evt->u.ue_error.physical_address_provided)
- printk("%s Physical address: %016llx\n",
- level, evt->u.ue_error.physical_address);
+ pa = evt->u.ue_error.physical_address;
break;
case MCE_ERROR_TYPE_SLB:
+ err_type = "SLB";
subtype = evt->u.slb_error.slb_error_type <
ARRAY_SIZE(mc_slb_types) ?
mc_slb_types[evt->u.slb_error.slb_error_type]
: "Unknown";
- printk("%s Error type: SLB [%s]\n", level, subtype);
if (evt->u.slb_error.effective_address_provided)
- printk("%s Effective address: %016llx\n",
- level, evt->u.slb_error.effective_address);
+ ea = evt->u.slb_error.effective_address;
break;
case MCE_ERROR_TYPE_ERAT:
+ err_type = "ERAT";
subtype = evt->u.erat_error.erat_error_type <
ARRAY_SIZE(mc_erat_types) ?
mc_erat_types[evt->u.erat_error.erat_error_type]
: "Unknown";
- printk("%s Error type: ERAT [%s]\n", level, subtype);
if (evt->u.erat_error.effective_address_provided)
- printk("%s Effective address: %016llx\n",
- level, evt->u.erat_error.effective_address);
+ ea = evt->u.erat_error.effective_address;
break;
case MCE_ERROR_TYPE_TLB:
+ err_type = "TLB";
subtype = evt->u.tlb_error.tlb_error_type <
ARRAY_SIZE(mc_tlb_types) ?
mc_tlb_types[evt->u.tlb_error.tlb_error_type]
: "Unknown";
- printk("%s Error type: TLB [%s]\n", level, subtype);
if (evt->u.tlb_error.effective_address_provided)
- printk("%s Effective address: %016llx\n",
- level, evt->u.tlb_error.effective_address);
+ ea = evt->u.tlb_error.effective_address;
break;
case MCE_ERROR_TYPE_USER:
+ err_type = "User";
subtype = evt->u.user_error.user_error_type <
ARRAY_SIZE(mc_user_types) ?
mc_user_types[evt->u.user_error.user_error_type]
: "Unknown";
- printk("%s Error type: User [%s]\n", level, subtype);
if (evt->u.user_error.effective_address_provided)
- printk("%s Effective address: %016llx\n",
- level, evt->u.user_error.effective_address);
+ ea = evt->u.user_error.effective_address;
break;
case MCE_ERROR_TYPE_RA:
+ err_type = "Real address";
subtype = evt->u.ra_error.ra_error_type <
ARRAY_SIZE(mc_ra_types) ?
mc_ra_types[evt->u.ra_error.ra_error_type]
: "Unknown";
- printk("%s Error type: Real address [%s]\n", level, subtype);
if (evt->u.ra_error.effective_address_provided)
- printk("%s Effective address: %016llx\n",
- level, evt->u.ra_error.effective_address);
+ ea = evt->u.ra_error.effective_address;
break;
case MCE_ERROR_TYPE_LINK:
+ err_type = "Link";
subtype = evt->u.link_error.link_error_type <
ARRAY_SIZE(mc_link_types) ?
mc_link_types[evt->u.link_error.link_error_type]
: "Unknown";
- printk("%s Error type: Link [%s]\n", level, subtype);
if (evt->u.link_error.effective_address_provided)
- printk("%s Effective address: %016llx\n",
- level, evt->u.link_error.effective_address);
+ ea = evt->u.link_error.effective_address;
break;
default:
case MCE_ERROR_TYPE_UNKNOWN:
- printk("%s Error type: Unknown\n", level);
+ err_type = "Unknown";
+ subtype = "";
break;
}
+
+ dar_str[0] = pa_str[0] = '\0';
+ if (ea && evt->srr0 != ea) {
+ /* Load/Store address */
+ n = sprintf(dar_str, "DAR: %016llx ", ea);
+ if (pa)
+ sprintf(dar_str + n, "paddr: %016llx ", pa);
+ } else if (pa) {
+ sprintf(pa_str, " paddr: %016llx", pa);
+ }
+
+ printk("%sMCE: CPU%d: machine check (%s) %s %s %s %s[%s]\n",
+ level, evt->cpu, sevstr, in_guest ? "Guest" : "Host",
+ err_type, subtype, dar_str,
+ evt->disposition == MCE_DISPOSITION_RECOVERED ?
+ "Recovered" : "Not recovered");
+
+ if (in_guest || user_mode) {
+ printk("%sMCE: CPU%d: PID: %d Comm: %s %sNIP: [%016llx]%s\n",
+ level, evt->cpu, current->pid, current->comm,
+ in_guest ? "Guest " : "", evt->srr0, pa_str);
+ } else {
+ printk("%sMCE: CPU%d: NIP: [%016llx] %pS%s\n",
+ level, evt->cpu, evt->srr0, (void *)evt->srr0, pa_str);
+ }
+
+ subtype = evt->error_class < ARRAY_SIZE(mc_error_class) ?
+ mc_error_class[evt->error_class] : "Unknown";
+ printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
}
EXPORT_SYMBOL_GPL(machine_check_print_event_info);
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index 6b800eec31f2..b5e876efe864 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -36,7 +36,7 @@
* Convert an address related to an mm to a PFN. NOTE: we are in real
* mode, we could potentially race with page table updates.
*/
-static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
+unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
{
pte_t *ptep;
unsigned long flags;
@@ -131,213 +131,232 @@ struct mce_ierror_table {
bool nip_valid; /* nip is a valid indicator of faulting address */
unsigned int error_type;
unsigned int error_subtype;
+ unsigned int error_class;
unsigned int initiator;
unsigned int severity;
+ bool sync_error;
};
static const struct mce_ierror_table mce_p7_ierror_table[] = {
{ 0x00000000001c0000, 0x0000000000040000, true,
- MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH, MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000001c0000, 0x0000000000080000, true,
- MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY, MCE_ECLASS_HARD_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000001c0000, 0x00000000000c0000, true,
- MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000000001c0000, 0x0000000000100000, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_INDETERMINATE, /* BOTH */
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000000001c0000, 0x0000000000140000, true,
- MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000000001c0000, 0x0000000000180000, true,
- MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH, MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000001c0000, 0x00000000001c0000, true,
- MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
-{ 0, 0, 0, 0, 0, 0 } };
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH, MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
+{ 0, 0, 0, 0, 0, 0, 0 } };
static const struct mce_ierror_table mce_p8_ierror_table[] = {
{ 0x00000000081c0000, 0x0000000000040000, true,
- MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH, MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x0000000000080000, true,
- MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY, MCE_ECLASS_HARD_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x00000000000c0000, true,
- MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000000081c0000, 0x0000000000100000, true,
- MCE_ERROR_TYPE_ERAT,MCE_ERAT_ERROR_MULTIHIT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000000081c0000, 0x0000000000140000, true,
- MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000000081c0000, 0x0000000000180000, true,
MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x00000000001c0000, true,
- MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH, MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x0000000008000000, true,
- MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_IFETCH_TIMEOUT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_IFETCH_TIMEOUT, MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x0000000008040000, true,
MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
-{ 0, 0, 0, 0, 0, 0 } };
+ MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
+{ 0, 0, 0, 0, 0, 0, 0 } };
static const struct mce_ierror_table mce_p9_ierror_table[] = {
{ 0x00000000081c0000, 0x0000000000040000, true,
- MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH, MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x0000000000080000, true,
- MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY, MCE_ECLASS_HARD_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x00000000000c0000, true,
- MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000000081c0000, 0x0000000000100000, true,
- MCE_ERROR_TYPE_ERAT,MCE_ERAT_ERROR_MULTIHIT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000000081c0000, 0x0000000000140000, true,
- MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000000081c0000, 0x0000000000180000, true,
- MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH, MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x00000000001c0000, true,
- MCE_ERROR_TYPE_RA, MCE_RA_ERROR_IFETCH_FOREIGN,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_IFETCH_FOREIGN, MCE_ECLASS_SOFTWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x0000000008000000, true,
- MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_IFETCH_TIMEOUT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_IFETCH_TIMEOUT, MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x0000000008040000, true,
MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x00000000080c0000, true,
- MCE_ERROR_TYPE_RA, MCE_RA_ERROR_IFETCH,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_IFETCH, MCE_ECLASS_SOFTWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x0000000008100000, true,
- MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH, MCE_ECLASS_SOFTWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000000081c0000, 0x0000000008140000, false,
- MCE_ERROR_TYPE_RA, MCE_RA_ERROR_STORE,
- MCE_INITIATOR_CPU, MCE_SEV_FATAL, }, /* ASYNC is fatal */
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_STORE, MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_FATAL, false }, /* ASYNC is fatal */
{ 0x00000000081c0000, 0x0000000008180000, false,
MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_STORE_TIMEOUT,
- MCE_INITIATOR_CPU, MCE_SEV_FATAL, }, /* ASYNC is fatal */
-{ 0x00000000081c0000, 0x00000000081c0000, true,
+ MCE_INITIATOR_CPU, MCE_SEV_FATAL, false }, /* ASYNC is fatal */
+{ 0x00000000081c0000, 0x00000000081c0000, true, MCE_ECLASS_HARDWARE,
MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
-{ 0, 0, 0, 0, 0, 0 } };
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
+{ 0, 0, 0, 0, 0, 0, 0 } };
struct mce_derror_table {
unsigned long dsisr_value;
bool dar_valid; /* dar is a valid indicator of faulting address */
unsigned int error_type;
unsigned int error_subtype;
+ unsigned int error_class;
unsigned int initiator;
unsigned int severity;
+ bool sync_error;
};
static const struct mce_derror_table mce_p7_derror_table[] = {
{ 0x00008000, false,
- MCE_ERROR_TYPE_UE, MCE_UE_ERROR_LOAD_STORE,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_LOAD_STORE, MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00004000, true,
MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000800, true,
- MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000400, true,
- MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000080, true,
- MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, /* Before PARITY */
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000100, true,
- MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY, MCE_ECLASS_HARD_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000040, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_INDETERMINATE, /* BOTH */
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
-{ 0, false, 0, 0, 0, 0 } };
+ MCE_ECLASS_HARD_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
+{ 0, false, 0, 0, 0, 0, 0 } };
static const struct mce_derror_table mce_p8_derror_table[] = {
{ 0x00008000, false,
- MCE_ERROR_TYPE_UE, MCE_UE_ERROR_LOAD_STORE,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_LOAD_STORE, MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00004000, true,
MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00002000, true,
- MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_LOAD_TIMEOUT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_LOAD_TIMEOUT, MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00001000, true,
MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000800, true,
- MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000400, true,
- MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000200, true,
MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, /* SECONDARY ERAT */
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000080, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, /* Before PARITY */
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000100, true,
- MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
-{ 0, false, 0, 0, 0, 0 } };
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY, MCE_ECLASS_HARD_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
+{ 0, false, 0, 0, 0, 0, 0 } };
static const struct mce_derror_table mce_p9_derror_table[] = {
{ 0x00008000, false,
- MCE_ERROR_TYPE_UE, MCE_UE_ERROR_LOAD_STORE,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_LOAD_STORE, MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00004000, true,
MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00002000, true,
- MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_LOAD_TIMEOUT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_LOAD_TIMEOUT, MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00001000, true,
MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000800, true,
- MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000400, true,
- MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000200, false,
- MCE_ERROR_TYPE_USER, MCE_USER_ERROR_TLBIE,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_USER, MCE_USER_ERROR_TLBIE, MCE_ECLASS_SOFTWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000080, true,
MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, /* Before PARITY */
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
{ 0x00000100, true,
- MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY, MCE_ECLASS_HARD_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000040, true,
- MCE_ERROR_TYPE_RA, MCE_RA_ERROR_LOAD,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_LOAD, MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000020, false,
MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000010, false,
MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0x00000008, false,
- MCE_ERROR_TYPE_RA, MCE_RA_ERROR_LOAD_STORE_FOREIGN,
- MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
-{ 0, false, 0, 0, 0, 0 } };
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_LOAD_STORE_FOREIGN, MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
+{ 0, false, 0, 0, 0, 0, 0 } };
static int mce_find_instr_ea_and_pfn(struct pt_regs *regs, uint64_t *addr,
uint64_t *phys_addr)
@@ -404,6 +423,7 @@ static int mce_handle_ierror(struct pt_regs *regs,
/* now fill in mce_error_info */
mce_err->error_type = table[i].error_type;
+ mce_err->error_class = table[i].error_class;
switch (table[i].error_type) {
case MCE_ERROR_TYPE_UE:
mce_err->u.ue_error_type = table[i].error_subtype;
@@ -427,11 +447,12 @@ static int mce_handle_ierror(struct pt_regs *regs,
mce_err->u.link_error_type = table[i].error_subtype;
break;
}
+ mce_err->sync_error = table[i].sync_error;
mce_err->severity = table[i].severity;
mce_err->initiator = table[i].initiator;
if (table[i].nip_valid) {
*addr = regs->nip;
- if (mce_err->severity == MCE_SEV_ERROR_SYNC &&
+ if (mce_err->sync_error &&
table[i].error_type == MCE_ERROR_TYPE_UE) {
unsigned long pfn;
@@ -448,8 +469,10 @@ static int mce_handle_ierror(struct pt_regs *regs,
}
mce_err->error_type = MCE_ERROR_TYPE_UNKNOWN;
- mce_err->severity = MCE_SEV_ERROR_SYNC;
+ mce_err->error_class = MCE_ECLASS_UNKNOWN;
+ mce_err->severity = MCE_SEV_SEVERE;
mce_err->initiator = MCE_INITIATOR_CPU;
+ mce_err->sync_error = true;
return 0;
}
@@ -496,6 +519,7 @@ static int mce_handle_derror(struct pt_regs *regs,
/* now fill in mce_error_info */
mce_err->error_type = table[i].error_type;
+ mce_err->error_class = table[i].error_class;
switch (table[i].error_type) {
case MCE_ERROR_TYPE_UE:
mce_err->u.ue_error_type = table[i].error_subtype;
@@ -519,11 +543,12 @@ static int mce_handle_derror(struct pt_regs *regs,
mce_err->u.link_error_type = table[i].error_subtype;
break;
}
+ mce_err->sync_error = table[i].sync_error;
mce_err->severity = table[i].severity;
mce_err->initiator = table[i].initiator;
if (table[i].dar_valid)
*addr = regs->dar;
- else if (mce_err->severity == MCE_SEV_ERROR_SYNC &&
+ else if (mce_err->sync_error &&
table[i].error_type == MCE_ERROR_TYPE_UE) {
/*
* We do a maximum of 4 nested MCE calls, see
@@ -539,8 +564,10 @@ static int mce_handle_derror(struct pt_regs *regs,
return handled;
mce_err->error_type = MCE_ERROR_TYPE_UNKNOWN;
- mce_err->severity = MCE_SEV_ERROR_SYNC;
+ mce_err->error_class = MCE_ECLASS_UNKNOWN;
+ mce_err->severity = MCE_SEV_SEVERE;
mce_err->initiator = MCE_INITIATOR_CPU;
+ mce_err->sync_error = true;
return 0;
}
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index e7382abee868..9cc91d03ab62 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -267,12 +267,12 @@ void copy_mm_to_paca(struct mm_struct *mm)
get_paca()->mm_ctx_id = context->id;
#ifdef CONFIG_PPC_MM_SLICES
- VM_BUG_ON(!mm->context.slb_addr_limit);
- get_paca()->mm_ctx_slb_addr_limit = mm->context.slb_addr_limit;
- memcpy(&get_paca()->mm_ctx_low_slices_psize,
- &context->low_slices_psize, sizeof(context->low_slices_psize));
- memcpy(&get_paca()->mm_ctx_high_slices_psize,
- &context->high_slices_psize, TASK_SLICE_ARRAY_SZ(mm));
+ VM_BUG_ON(!mm_ctx_slb_addr_limit(context));
+ get_paca()->mm_ctx_slb_addr_limit = mm_ctx_slb_addr_limit(context);
+ memcpy(&get_paca()->mm_ctx_low_slices_psize, mm_ctx_low_slices(context),
+ LOW_SLICE_ARRAY_SZ);
+ memcpy(&get_paca()->mm_ctx_high_slices_psize, mm_ctx_high_slices(context),
+ TASK_SLICE_ARRAY_SZ(context));
#else /* CONFIG_PPC_MM_SLICES */
get_paca()->mm_ctx_user_psize = context->user_psize;
get_paca()->mm_ctx_sllp = context->sllp;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index dd9e0d5386ee..87da40129927 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -67,6 +67,7 @@
#include <asm/cpu_has_feature.h>
#include <asm/asm-prototypes.h>
#include <asm/stacktrace.h>
+#include <asm/hw_breakpoint.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
@@ -133,7 +134,8 @@ static int __init enable_strict_msr_control(char *str)
}
early_param("ppc_strict_facility_enable", enable_strict_msr_control);
-unsigned long msr_check_and_set(unsigned long bits)
+/* notrace because it's called by restore_math */
+unsigned long notrace msr_check_and_set(unsigned long bits)
{
unsigned long oldmsr = mfmsr();
unsigned long newmsr;
@@ -152,7 +154,8 @@ unsigned long msr_check_and_set(unsigned long bits)
}
EXPORT_SYMBOL_GPL(msr_check_and_set);
-void __msr_check_and_clear(unsigned long bits)
+/* notrace because it's called by restore_math */
+void notrace __msr_check_and_clear(unsigned long bits)
{
unsigned long oldmsr = mfmsr();
unsigned long newmsr;
@@ -525,7 +528,17 @@ void giveup_all(struct task_struct *tsk)
}
EXPORT_SYMBOL(giveup_all);
-void restore_math(struct pt_regs *regs)
+/*
+ * The exception exit path calls restore_math() with interrupts hard disabled
+ * but the soft irq state not "reconciled". ftrace code that calls
+ * local_irq_save/restore causes warnings.
+ *
+ * Rather than complicate the exit path, just don't trace restore_math. This
+ * could be done by having ftrace entry code check for this un-reconciled
+ * condition where MSR[EE]=0 and PACA_IRQ_HARD_DIS is not set, and
+ * temporarily fix it up for the duration of the ftrace call.
+ */
+void notrace restore_math(struct pt_regs *regs)
{
unsigned long msr;
@@ -784,7 +797,7 @@ static inline int set_dabr(struct arch_hw_breakpoint *brk)
return __set_dabr(dabr, dabrx);
}
-static inline int set_dawr(struct arch_hw_breakpoint *brk)
+int set_dawr(struct arch_hw_breakpoint *brk)
{
unsigned long dawr, dawrx, mrd;
@@ -816,7 +829,7 @@ void __set_breakpoint(struct arch_hw_breakpoint *brk)
{
memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk));
- if (cpu_has_feature(CPU_FTR_DAWR))
+ if (dawr_enabled())
// Power8 or later
set_dawr(brk);
else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
@@ -830,8 +843,8 @@ void __set_breakpoint(struct arch_hw_breakpoint *brk)
/* Check if we have DAWR or DABR hardware */
bool ppc_breakpoint_available(void)
{
- if (cpu_has_feature(CPU_FTR_DAWR))
- return true; /* POWER8 DAWR */
+ if (dawr_enabled())
+ return true; /* POWER8 DAWR or POWER9 forced DAWR */
if (cpu_has_feature(CPU_FTR_ARCH_207S))
return false; /* POWER9 with DAWR disabled */
/* DABR: Everything but POWER8 and POWER9 */
@@ -1151,11 +1164,6 @@ static inline void restore_sprs(struct thread_struct *old_thread,
thread_pkey_regs_restore(new_thread, old_thread);
}
-#ifdef CONFIG_PPC_BOOK3S_64
-#define CP_SIZE 128
-static const u8 dummy_copy_buffer[CP_SIZE] __attribute__((aligned(CP_SIZE)));
-#endif
-
struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *new)
{
@@ -1729,7 +1737,8 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
#ifdef CONFIG_PPC_BOOK3S_64
- preload_new_slb_context(start, sp);
+ if (!radix_enabled())
+ preload_new_slb_context(start, sp);
#endif
#endif
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index f33ff4163a51..00682b8df330 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -154,10 +154,8 @@ static struct prom_t __prombss prom;
static unsigned long __prombss prom_entry;
-#define PROM_SCRATCH_SIZE 256
-
static char __prombss of_stdout_device[256];
-static char __prombss prom_scratch[PROM_SCRATCH_SIZE];
+static char __prombss prom_scratch[256];
static unsigned long __prombss dt_header_start;
static unsigned long __prombss dt_struct_start, dt_struct_end;
@@ -224,6 +222,135 @@ static bool __prombss rtas_has_query_cpu_stopped;
#define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
#define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
+/* Copied from lib/string.c and lib/kstrtox.c */
+
+static int __init prom_strcmp(const char *cs, const char *ct)
+{
+ unsigned char c1, c2;
+
+ while (1) {
+ c1 = *cs++;
+ c2 = *ct++;
+ if (c1 != c2)
+ return c1 < c2 ? -1 : 1;
+ if (!c1)
+ break;
+ }
+ return 0;
+}
+
+static char __init *prom_strcpy(char *dest, const char *src)
+{
+ char *tmp = dest;
+
+ while ((*dest++ = *src++) != '\0')
+ /* nothing */;
+ return tmp;
+}
+
+static int __init prom_strncmp(const char *cs, const char *ct, size_t count)
+{
+ unsigned char c1, c2;
+
+ while (count) {
+ c1 = *cs++;
+ c2 = *ct++;
+ if (c1 != c2)
+ return c1 < c2 ? -1 : 1;
+ if (!c1)
+ break;
+ count--;
+ }
+ return 0;
+}
+
+static size_t __init prom_strlen(const char *s)
+{
+ const char *sc;
+
+ for (sc = s; *sc != '\0'; ++sc)
+ /* nothing */;
+ return sc - s;
+}
+
+static int __init prom_memcmp(const void *cs, const void *ct, size_t count)
+{
+ const unsigned char *su1, *su2;
+ int res = 0;
+
+ for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
+ if ((res = *su1 - *su2) != 0)
+ break;
+ return res;
+}
+
+static char __init *prom_strstr(const char *s1, const char *s2)
+{
+ size_t l1, l2;
+
+ l2 = prom_strlen(s2);
+ if (!l2)
+ return (char *)s1;
+ l1 = prom_strlen(s1);
+ while (l1 >= l2) {
+ l1--;
+ if (!prom_memcmp(s1, s2, l2))
+ return (char *)s1;
+ s1++;
+ }
+ return NULL;
+}
+
+static size_t __init prom_strlcpy(char *dest, const char *src, size_t size)
+{
+ size_t ret = prom_strlen(src);
+
+ if (size) {
+ size_t len = (ret >= size) ? size - 1 : ret;
+ memcpy(dest, src, len);
+ dest[len] = '\0';
+ }
+ return ret;
+}
+
+#ifdef CONFIG_PPC_PSERIES
+static int __init prom_strtobool(const char *s, bool *res)
+{
+ if (!s)
+ return -EINVAL;
+
+ switch (s[0]) {
+ case 'y':
+ case 'Y':
+ case '1':
+ *res = true;
+ return 0;
+ case 'n':
+ case 'N':
+ case '0':
+ *res = false;
+ return 0;
+ case 'o':
+ case 'O':
+ switch (s[1]) {
+ case 'n':
+ case 'N':
+ *res = true;
+ return 0;
+ case 'f':
+ case 'F':
+ *res = false;
+ return 0;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+#endif
/* This is the one and *ONLY* place where we actually call open
* firmware.
@@ -501,14 +628,14 @@ static int __init prom_next_node(phandle *nodep)
}
}
-static inline int prom_getprop(phandle node, const char *pname,
- void *value, size_t valuelen)
+static inline int __init prom_getprop(phandle node, const char *pname,
+ void *value, size_t valuelen)
{
return call_prom("getprop", 4, 1, node, ADDR(pname),
(u32)(unsigned long) value, (u32) valuelen);
}
-static inline int prom_getproplen(phandle node, const char *pname)
+static inline int __init prom_getproplen(phandle node, const char *pname)
{
return call_prom("getproplen", 2, 1, node, ADDR(pname));
}
@@ -555,7 +682,7 @@ static int __init prom_setprop(phandle node, const char *nodename,
add_string(&p, tohex((u32)(unsigned long) value));
add_string(&p, tohex(valuelen));
add_string(&p, tohex(ADDR(pname)));
- add_string(&p, tohex(strlen(pname)));
+ add_string(&p, tohex(prom_strlen(pname)));
add_string(&p, "property");
*p = 0;
return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
@@ -631,33 +758,30 @@ static void __init early_cmdline_parse(void)
const char *opt;
char *p;
- int l __maybe_unused = 0;
+ int l = 0;
prom_cmd_line[0] = 0;
p = prom_cmd_line;
if ((long)prom.chosen > 0)
l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
-#ifdef CONFIG_CMDLINE
- if (l <= 0 || p[0] == '\0') /* dbl check */
- strlcpy(prom_cmd_line,
- CONFIG_CMDLINE, sizeof(prom_cmd_line));
-#endif /* CONFIG_CMDLINE */
+ if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && (l <= 0 || p[0] == '\0')) /* dbl check */
+ prom_strlcpy(prom_cmd_line, CONFIG_CMDLINE, sizeof(prom_cmd_line));
prom_printf("command line: %s\n", prom_cmd_line);
#ifdef CONFIG_PPC64
- opt = strstr(prom_cmd_line, "iommu=");
+ opt = prom_strstr(prom_cmd_line, "iommu=");
if (opt) {
prom_printf("iommu opt is: %s\n", opt);
opt += 6;
while (*opt && *opt == ' ')
opt++;
- if (!strncmp(opt, "off", 3))
+ if (!prom_strncmp(opt, "off", 3))
prom_iommu_off = 1;
- else if (!strncmp(opt, "force", 5))
+ else if (!prom_strncmp(opt, "force", 5))
prom_iommu_force_on = 1;
}
#endif
- opt = strstr(prom_cmd_line, "mem=");
+ opt = prom_strstr(prom_cmd_line, "mem=");
if (opt) {
opt += 4;
prom_memory_limit = prom_memparse(opt, (const char **)&opt);
@@ -669,13 +793,13 @@ static void __init early_cmdline_parse(void)
#ifdef CONFIG_PPC_PSERIES
prom_radix_disable = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
- opt = strstr(prom_cmd_line, "disable_radix");
+ opt = prom_strstr(prom_cmd_line, "disable_radix");
if (opt) {
opt += 13;
if (*opt && *opt == '=') {
bool val;
- if (kstrtobool(++opt, &val))
+ if (prom_strtobool(++opt, &val))
prom_radix_disable = false;
else
prom_radix_disable = val;
@@ -1028,7 +1152,7 @@ static int __init prom_count_smt_threads(void)
type[0] = 0;
prom_getprop(node, "device_type", type, sizeof(type));
- if (strcmp(type, "cpu"))
+ if (prom_strcmp(type, "cpu"))
continue;
/*
* There is an entry for each smt thread, each entry being
@@ -1138,8 +1262,14 @@ static void __init prom_check_platform_support(void)
int prop_len = prom_getproplen(prom.chosen,
"ibm,arch-vec-5-platform-support");
- /* First copy the architecture vec template */
- ibm_architecture_vec = ibm_architecture_vec_template;
+ /*
+ * First copy the architecture vec template
+ *
+ * use memcpy() instead of *vec = *vec_template so that GCC replaces it
+ * by __memcpy() when KASAN is active
+ */
+ memcpy(&ibm_architecture_vec, &ibm_architecture_vec_template,
+ sizeof(ibm_architecture_vec));
if (prop_len > 1) {
int i;
@@ -1475,7 +1605,7 @@ static void __init prom_init_mem(void)
*/
prom_getprop(node, "name", type, sizeof(type));
}
- if (strcmp(type, "memory"))
+ if (prom_strcmp(type, "memory"))
continue;
plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
@@ -1487,8 +1617,8 @@ static void __init prom_init_mem(void)
endp = p + (plen / sizeof(cell_t));
#ifdef DEBUG_PROM
- memset(path, 0, PROM_SCRATCH_SIZE);
- call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
+ memset(path, 0, sizeof(prom_scratch));
+ call_prom("package-to-path", 3, 1, node, path, sizeof(prom_scratch) - 1);
prom_debug(" node %s :\n", path);
#endif /* DEBUG_PROM */
@@ -1756,19 +1886,19 @@ static void __init prom_initialize_tce_table(void)
prom_getprop(node, "device_type", type, sizeof(type));
prom_getprop(node, "model", model, sizeof(model));
- if ((type[0] == 0) || (strstr(type, "pci") == NULL))
+ if ((type[0] == 0) || (prom_strstr(type, "pci") == NULL))
continue;
/* Keep the old logic intact to avoid regression. */
if (compatible[0] != 0) {
- if ((strstr(compatible, "python") == NULL) &&
- (strstr(compatible, "Speedwagon") == NULL) &&
- (strstr(compatible, "Winnipeg") == NULL))
+ if ((prom_strstr(compatible, "python") == NULL) &&
+ (prom_strstr(compatible, "Speedwagon") == NULL) &&
+ (prom_strstr(compatible, "Winnipeg") == NULL))
continue;
} else if (model[0] != 0) {
- if ((strstr(model, "ython") == NULL) &&
- (strstr(model, "peedwagon") == NULL) &&
- (strstr(model, "innipeg") == NULL))
+ if ((prom_strstr(model, "ython") == NULL) &&
+ (prom_strstr(model, "peedwagon") == NULL) &&
+ (prom_strstr(model, "innipeg") == NULL))
continue;
}
@@ -1796,10 +1926,10 @@ static void __init prom_initialize_tce_table(void)
local_alloc_bottom = base;
/* It seems OF doesn't null-terminate the path :-( */
- memset(path, 0, PROM_SCRATCH_SIZE);
+ memset(path, 0, sizeof(prom_scratch));
/* Call OF to setup the TCE hardware */
if (call_prom("package-to-path", 3, 1, node,
- path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
+ path, sizeof(prom_scratch) - 1) == PROM_ERROR) {
prom_printf("package-to-path failed\n");
}
@@ -1917,12 +2047,12 @@ static void __init prom_hold_cpus(void)
type[0] = 0;
prom_getprop(node, "device_type", type, sizeof(type));
- if (strcmp(type, "cpu") != 0)
+ if (prom_strcmp(type, "cpu") != 0)
continue;
/* Skip non-configured cpus. */
if (prom_getprop(node, "status", type, sizeof(type)) > 0)
- if (strcmp(type, "okay") != 0)
+ if (prom_strcmp(type, "okay") != 0)
continue;
reg = cpu_to_be32(-1); /* make sparse happy */
@@ -1998,9 +2128,9 @@ static void __init prom_find_mmu(void)
return;
version[sizeof(version) - 1] = 0;
/* XXX might need to add other versions here */
- if (strcmp(version, "Open Firmware, 1.0.5") == 0)
+ if (prom_strcmp(version, "Open Firmware, 1.0.5") == 0)
of_workarounds = OF_WA_CLAIM;
- else if (strncmp(version, "FirmWorks,3.", 12) == 0) {
+ else if (prom_strncmp(version, "FirmWorks,3.", 12) == 0) {
of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
} else
@@ -2033,7 +2163,7 @@ static void __init prom_init_stdout(void)
call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
prom_printf("OF stdout device is: %s\n", of_stdout_device);
prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
- path, strlen(path) + 1);
+ path, prom_strlen(path) + 1);
/* instance-to-package fails on PA-Semi */
stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
@@ -2043,7 +2173,7 @@ static void __init prom_init_stdout(void)
/* If it's a display, note it */
memset(type, 0, sizeof(type));
prom_getprop(stdout_node, "device_type", type, sizeof(type));
- if (strcmp(type, "display") == 0)
+ if (prom_strcmp(type, "display") == 0)
prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
}
}
@@ -2064,19 +2194,19 @@ static int __init prom_find_machine_type(void)
compat[len] = 0;
while (i < len) {
char *p = &compat[i];
- int sl = strlen(p);
+ int sl = prom_strlen(p);
if (sl == 0)
break;
- if (strstr(p, "Power Macintosh") ||
- strstr(p, "MacRISC"))
+ if (prom_strstr(p, "Power Macintosh") ||
+ prom_strstr(p, "MacRISC"))
return PLATFORM_POWERMAC;
#ifdef CONFIG_PPC64
/* We must make sure we don't detect the IBM Cell
* blades as pSeries due to some firmware issues,
* so we do it here.
*/
- if (strstr(p, "IBM,CBEA") ||
- strstr(p, "IBM,CPBW-1.0"))
+ if (prom_strstr(p, "IBM,CBEA") ||
+ prom_strstr(p, "IBM,CPBW-1.0"))
return PLATFORM_GENERIC;
#endif /* CONFIG_PPC64 */
i += sl + 1;
@@ -2093,7 +2223,7 @@ static int __init prom_find_machine_type(void)
compat, sizeof(compat)-1);
if (len <= 0)
return PLATFORM_GENERIC;
- if (strcmp(compat, "chrp"))
+ if (prom_strcmp(compat, "chrp"))
return PLATFORM_GENERIC;
/* Default to pSeries. We need to know if we are running LPAR */
@@ -2155,19 +2285,19 @@ static void __init prom_check_displays(void)
for (node = 0; prom_next_node(&node); ) {
memset(type, 0, sizeof(type));
prom_getprop(node, "device_type", type, sizeof(type));
- if (strcmp(type, "display") != 0)
+ if (prom_strcmp(type, "display") != 0)
continue;
/* It seems OF doesn't null-terminate the path :-( */
path = prom_scratch;
- memset(path, 0, PROM_SCRATCH_SIZE);
+ memset(path, 0, sizeof(prom_scratch));
/*
* leave some room at the end of the path for appending extra
* arguments
*/
if (call_prom("package-to-path", 3, 1, node, path,
- PROM_SCRATCH_SIZE-10) == PROM_ERROR)
+ sizeof(prom_scratch) - 10) == PROM_ERROR)
continue;
prom_printf("found display : %s, opening... ", path);
@@ -2259,9 +2389,9 @@ static unsigned long __init dt_find_string(char *str)
s = os = (char *)dt_string_start;
s += 4;
while (s < (char *)dt_string_end) {
- if (strcmp(s, str) == 0)
+ if (prom_strcmp(s, str) == 0)
return s - os;
- s += strlen(s) + 1;
+ s += prom_strlen(s) + 1;
}
return 0;
}
@@ -2294,7 +2424,7 @@ static void __init scan_dt_build_strings(phandle node,
}
/* skip "name" */
- if (strcmp(namep, "name") == 0) {
+ if (prom_strcmp(namep, "name") == 0) {
*mem_start = (unsigned long)namep;
prev_name = "name";
continue;
@@ -2306,7 +2436,7 @@ static void __init scan_dt_build_strings(phandle node,
namep = sstart + soff;
} else {
/* Trim off some if we can */
- *mem_start = (unsigned long)namep + strlen(namep) + 1;
+ *mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
dt_string_end = *mem_start;
}
prev_name = namep;
@@ -2363,8 +2493,8 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
/* get it again for debugging */
path = prom_scratch;
- memset(path, 0, PROM_SCRATCH_SIZE);
- call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
+ memset(path, 0, sizeof(prom_scratch));
+ call_prom("package-to-path", 3, 1, node, path, sizeof(prom_scratch) - 1);
/* get and store all properties */
prev_name = "";
@@ -2375,7 +2505,7 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
break;
/* skip "name" */
- if (strcmp(pname, "name") == 0) {
+ if (prom_strcmp(pname, "name") == 0) {
prev_name = "name";
continue;
}
@@ -2406,7 +2536,7 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
call_prom("getprop", 4, 1, node, pname, valp, l);
*mem_start = _ALIGN(*mem_start, 4);
- if (!strcmp(pname, "phandle"))
+ if (!prom_strcmp(pname, "phandle"))
has_phandle = 1;
}
@@ -2476,8 +2606,8 @@ static void __init flatten_device_tree(void)
/* Add "phandle" in there, we'll need it */
namep = make_room(&mem_start, &mem_end, 16, 1);
- strcpy(namep, "phandle");
- mem_start = (unsigned long)namep + strlen(namep) + 1;
+ prom_strcpy(namep, "phandle");
+ mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
/* Build string array */
prom_printf("Building dt strings...\n");
@@ -2799,7 +2929,7 @@ static void __init fixup_device_tree_efika(void)
rv = prom_getprop(node, "model", prop, sizeof(prop));
if (rv == PROM_ERROR)
return;
- if (strcmp(prop, "EFIKA5K2"))
+ if (prom_strcmp(prop, "EFIKA5K2"))
return;
prom_printf("Applying EFIKA device tree fixups\n");
@@ -2807,13 +2937,13 @@ static void __init fixup_device_tree_efika(void)
/* Claiming to be 'chrp' is death */
node = call_prom("finddevice", 1, 1, ADDR("/"));
rv = prom_getprop(node, "device_type", prop, sizeof(prop));
- if (rv != PROM_ERROR && (strcmp(prop, "chrp") == 0))
+ if (rv != PROM_ERROR && (prom_strcmp(prop, "chrp") == 0))
prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
/* CODEGEN,description is exposed in /proc/cpuinfo so
fix that too */
rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
- if (rv != PROM_ERROR && (strstr(prop, "CHRP")))
+ if (rv != PROM_ERROR && (prom_strstr(prop, "CHRP")))
prom_setprop(node, "/", "CODEGEN,description",
"Efika 5200B PowerPC System",
sizeof("Efika 5200B PowerPC System"));
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh
index 667df97d2595..4cac45cb5de5 100644
--- a/arch/powerpc/kernel/prom_init_check.sh
+++ b/arch/powerpc/kernel/prom_init_check.sh
@@ -16,10 +16,18 @@
# If you really need to reference something from prom_init.o add
# it to the list below:
+grep "^CONFIG_KASAN=y$" .config >/dev/null
+if [ $? -eq 0 ]
+then
+ MEM_FUNCS="__memcpy __memset"
+else
+ MEM_FUNCS="memcpy memset"
+fi
+
WHITELIST="add_reloc_offset __bss_start __bss_stop copy_and_flush
-_end enter_prom memcpy memset reloc_offset __secondary_hold
+_end enter_prom $MEM_FUNCS reloc_offset __secondary_hold
__secondary_hold_acknowledge __secondary_hold_spinloop __start
-strcmp strcpy strlcpy strlen strncmp strstr kstrtobool logo_linux_clut224
+logo_linux_clut224
reloc_got2 kernstart_addr memstart_addr linux_banner _stext
__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC."
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index d9ac7d94656e..684b0b315c32 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -43,6 +43,7 @@
#include <asm/tm.h>
#include <asm/asm-prototypes.h>
#include <asm/debug.h>
+#include <asm/hw_breakpoint.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
@@ -3088,7 +3089,7 @@ long arch_ptrace(struct task_struct *child, long request,
dbginfo.sizeof_condition = 0;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
- if (cpu_has_feature(CPU_FTR_DAWR))
+ if (dawr_enabled())
dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
#else
dbginfo.features = 0;
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index b33bafb8fcea..e1c9cf079503 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -57,7 +57,7 @@ void setup_barrier_nospec(void)
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
- if (!no_nospec)
+ if (!no_nospec && !cpu_mitigations_off())
enable_barrier_nospec(enable);
}
@@ -104,6 +104,14 @@ static __init int barrier_nospec_debugfs_init(void)
return 0;
}
device_initcall(barrier_nospec_debugfs_init);
+
+static __init int security_feature_debugfs_init(void)
+{
+ debugfs_create_x64("security_features", 0400, powerpc_debugfs_root,
+ (u64 *)&powerpc_security_features);
+ return 0;
+}
+device_initcall(security_feature_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
#ifdef CONFIG_PPC_FSL_BOOK3E
@@ -116,7 +124,7 @@ static int __init handle_nospectre_v2(char *p)
early_param("nospectre_v2", handle_nospectre_v2);
void setup_spectre_v2(void)
{
- if (no_spectrev2)
+ if (no_spectrev2 || cpu_mitigations_off())
do_btb_flush_fixups();
else
btb_flush_enabled = true;
@@ -300,7 +308,7 @@ void setup_stf_barrier(void)
stf_enabled_flush_types = type;
- if (!no_stf_barrier)
+ if (!no_stf_barrier && !cpu_mitigations_off())
stf_barrier_enable(enable);
}
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 2e5dfb6e0823..aad9f5df6ab6 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -67,6 +67,7 @@
#include <asm/livepatch.h>
#include <asm/mmu_context.h>
#include <asm/cpu_has_feature.h>
+#include <asm/kasan.h>
#include "setup.h"
@@ -133,13 +134,11 @@ int crashing_cpu = -1;
/* also used by kexec */
void machine_shutdown(void)
{
-#ifdef CONFIG_FA_DUMP
/*
* if fadump is active, cleanup the fadump registration before we
* shutdown.
*/
fadump_cleanup();
-#endif
if (ppc_md.machine_shutdown)
ppc_md.machine_shutdown();
@@ -200,14 +199,15 @@ static void show_cpuinfo_summary(struct seq_file *m)
{
struct device_node *root;
const char *model = NULL;
-#if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
unsigned long bogosum = 0;
int i;
- for_each_online_cpu(i)
- bogosum += loops_per_jiffy;
- seq_printf(m, "total bogomips\t: %lu.%02lu\n",
- bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
-#endif /* CONFIG_SMP && CONFIG_PPC32 */
+
+ if (IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_PPC32)) {
+ for_each_online_cpu(i)
+ bogosum += loops_per_jiffy;
+ seq_printf(m, "total bogomips\t: %lu.%02lu\n",
+ bogosum / (500000 / HZ), bogosum / (5000 / HZ) % 100);
+ }
seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
if (ppc_md.name)
seq_printf(m, "platform\t: %s\n", ppc_md.name);
@@ -221,11 +221,10 @@ static void show_cpuinfo_summary(struct seq_file *m)
if (ppc_md.show_cpuinfo != NULL)
ppc_md.show_cpuinfo(m);
-#ifdef CONFIG_PPC32
/* Display the amount of memory */
- seq_printf(m, "Memory\t\t: %d MB\n",
- (unsigned int)(total_memory / (1024 * 1024)));
-#endif
+ if (IS_ENABLED(CONFIG_PPC32))
+ seq_printf(m, "Memory\t\t: %d MB\n",
+ (unsigned int)(total_memory / (1024 * 1024)));
}
static int show_cpuinfo(struct seq_file *m, void *v)
@@ -252,26 +251,24 @@ static int show_cpuinfo(struct seq_file *m, void *v)
else
seq_printf(m, "unknown (%08x)", pvr);
-#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC))
seq_printf(m, ", altivec supported");
-#endif /* CONFIG_ALTIVEC */
seq_printf(m, "\n");
#ifdef CONFIG_TAU
- if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) {
-#ifdef CONFIG_TAU_AVERAGE
- /* more straightforward, but potentially misleading */
- seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
- cpu_temp(cpu_id));
-#else
- /* show the actual temp sensor range */
- u32 temp;
- temp = cpu_temp_both(cpu_id);
- seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
- temp & 0xff, temp >> 16);
-#endif
+ if (cpu_has_feature(CPU_FTR_TAU)) {
+ if (IS_ENABLED(CONFIG_TAU_AVERAGE)) {
+ /* more straightforward, but potentially misleading */
+ seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
+ cpu_temp(cpu_id));
+ } else {
+ /* show the actual temp sensor range */
+ u32 temp;
+ temp = cpu_temp_both(cpu_id);
+ seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
+ temp & 0xff, temp >> 16);
+ }
}
#endif /* CONFIG_TAU */
@@ -335,11 +332,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n",
maj, min, PVR_VER(pvr), PVR_REV(pvr));
-#ifdef CONFIG_PPC32
- seq_printf(m, "bogomips\t: %lu.%02lu\n",
- loops_per_jiffy / (500000/HZ),
- (loops_per_jiffy / (5000/HZ)) % 100);
-#endif
+ if (IS_ENABLED(CONFIG_PPC32))
+ seq_printf(m, "bogomips\t: %lu.%02lu\n", loops_per_jiffy / (500000 / HZ),
+ (loops_per_jiffy / (5000 / HZ)) % 100);
+
seq_printf(m, "\n");
/* If this is the last cpu, print the summary */
@@ -401,8 +397,8 @@ void __init check_for_initrd(void)
#ifdef CONFIG_SMP
-int threads_per_core, threads_per_subcore, threads_shift;
-cpumask_t threads_core_mask;
+int threads_per_core, threads_per_subcore, threads_shift __read_mostly;
+cpumask_t threads_core_mask __read_mostly;
EXPORT_SYMBOL_GPL(threads_per_core);
EXPORT_SYMBOL_GPL(threads_per_subcore);
EXPORT_SYMBOL_GPL(threads_shift);
@@ -740,23 +736,19 @@ void __init setup_panic(void)
* BUG() in that case.
*/
-#ifdef CONFIG_NOT_COHERENT_CACHE
-#define KERNEL_COHERENCY 0
-#else
-#define KERNEL_COHERENCY 1
-#endif
+#define KERNEL_COHERENCY (!IS_ENABLED(CONFIG_NOT_COHERENT_CACHE))
static int __init check_cache_coherency(void)
{
struct device_node *np;
const void *prop;
- int devtree_coherency;
+ bool devtree_coherency;
np = of_find_node_by_path("/");
prop = of_get_property(np, "coherency-off", NULL);
of_node_put(np);
- devtree_coherency = prop ? 0 : 1;
+ devtree_coherency = prop ? false : true;
if (devtree_coherency != KERNEL_COHERENCY) {
printk(KERN_ERR
@@ -799,12 +791,6 @@ void arch_setup_pdev_archdata(struct platform_device *pdev)
static __init void print_system_info(void)
{
pr_info("-----------------------------------------------------\n");
-#ifdef CONFIG_PPC_BOOK3S_64
- pr_info("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
-#endif
-#ifdef CONFIG_PPC_BOOK3S_32
- pr_info("Hash_size = 0x%lx\n", Hash_size);
-#endif
pr_info("phys_mem_size = 0x%llx\n",
(unsigned long long)memblock_phys_mem_size());
@@ -826,18 +812,7 @@ static __init void print_system_info(void)
pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features);
#endif
-#ifdef CONFIG_PPC_BOOK3S_64
- if (htab_address)
- pr_info("htab_address = 0x%p\n", htab_address);
- if (htab_hash_mask)
- pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask);
-#endif
-#ifdef CONFIG_PPC_BOOK3S_32
- if (Hash)
- pr_info("Hash = 0x%p\n", Hash);
- if (Hash_mask)
- pr_info("Hash_mask = 0x%lx\n", Hash_mask);
-#endif
+ print_system_hash_info();
if (PHYSICAL_START > 0)
pr_info("physical_start = 0x%llx\n",
@@ -868,6 +843,8 @@ static void smp_setup_pacas(void)
*/
void __init setup_arch(char **cmdline_p)
{
+ kasan_init();
+
*cmdline_p = boot_command_line;
/* Set a half-reasonable default so udelay does something sensible */
@@ -947,20 +924,7 @@ void __init setup_arch(char **cmdline_p)
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = klimit;
-#ifdef CONFIG_PPC_MM_SLICES
-#ifdef CONFIG_PPC64
- if (!radix_enabled())
- init_mm.context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
-#elif defined(CONFIG_PPC_8xx)
- init_mm.context.slb_addr_limit = DEFAULT_MAP_WINDOW;
-#else
-#error "context.addr_limit not initialized."
-#endif
-#endif
-
-#ifdef CONFIG_SPAPR_TCE_IOMMU
mm_iommu_init(&init_mm);
-#endif
irqstack_early_init();
exc_lvl_early_init();
emergency_stack_init();
@@ -969,9 +933,9 @@ void __init setup_arch(char **cmdline_p)
early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
-#ifdef CONFIG_DUMMY_CONSOLE
- conswitchp = &dummy_con;
-#endif
+ if (IS_ENABLED(CONFIG_DUMMY_CONSOLE))
+ conswitchp = &dummy_con;
+
if (ppc_md.setup_arch)
ppc_md.setup_arch();
@@ -983,10 +947,8 @@ void __init setup_arch(char **cmdline_p)
/* Initialize the MMU context management stuff. */
mmu_context_init();
-#ifdef CONFIG_PPC64
/* Interrupt code needs to be 64K-aligned. */
- if ((unsigned long)_stext & 0xffff)
+ if (IS_ENABLED(CONFIG_PPC64) && (unsigned long)_stext & 0xffff)
panic("Kernelbase not 64K-aligned (0x%lx)!\n",
(unsigned long)_stext);
-#endif
}
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 4a65e08a6042..3fb9f64f88fd 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -64,34 +64,6 @@ EXPORT_SYMBOL(DMA_MODE_READ);
EXPORT_SYMBOL(DMA_MODE_WRITE);
/*
- * We're called here very early in the boot.
- *
- * Note that the kernel may be running at an address which is different
- * from the address that it was linked at, so we must use RELOC/PTRRELOC
- * to access static data (including strings). -- paulus
- */
-notrace unsigned long __init early_init(unsigned long dt_ptr)
-{
- unsigned long offset = reloc_offset();
-
- /* First zero the BSS -- use memset_io, some platforms don't have
- * caches on yet */
- memset_io((void __iomem *)PTRRELOC(&__bss_start), 0,
- __bss_stop - __bss_start);
-
- /*
- * Identify the CPU type and fix up code sections
- * that depend on which cpu we have.
- */
- identify_cpu(offset, mfspr(SPRN_PVR));
-
- apply_feature_fixups();
-
- return KERNELBASE + offset;
-}
-
-
-/*
* This is run before start_kernel(), the kernel has been relocated
* and we are running with enough of the MMU enabled to have our
* proper kernel virtual addresses
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index ba404dd9ce1d..a400854a5036 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -68,6 +68,7 @@
#include <asm/cputhreads.h>
#include <asm/hw_irq.h>
#include <asm/feature-fixups.h>
+#include <asm/kup.h>
#include "setup.h"
@@ -331,6 +332,12 @@ void __init early_setup(unsigned long dt_ptr)
*/
configure_exceptions();
+ /*
+ * Configure Kernel Userspace Protection. This needs to happen before
+ * feature fixups for platforms that implement this using features.
+ */
+ setup_kup();
+
/* Apply all the dynamic patching */
apply_feature_fixups();
setup_feature_keys();
@@ -383,6 +390,9 @@ void early_setup_secondary(void)
/* Initialize the hash table or TLB handling */
early_init_mmu_secondary();
+ /* Perform any KUP setup that is per-cpu */
+ setup_kup();
+
/*
* At this point, we can let interrupts switch to virtual mode
* (the MMU has been setup), so adjust the MSR in the PACA to
@@ -932,7 +942,7 @@ void setup_rfi_flush(enum l1d_flush_type types, bool enable)
enabled_flush_types = types;
- if (!no_rfi_flush)
+ if (!no_rfi_flush && !cpu_mitigations_off())
rfi_flush_enable(enable);
}
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 6794466f6420..06c299ef6132 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -565,7 +565,7 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
preempt_disable();
/* pull in MSR TS bits from user context */
- regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
+ regs->msr |= msr & MSR_TS_MASK;
/*
* Ensure that TM is enabled in regs->msr before we leave the signal
@@ -745,6 +745,31 @@ SYSCALL_DEFINE0(rt_sigreturn)
if (MSR_TM_SUSPENDED(mfmsr()))
tm_reclaim_current(0);
+ /*
+ * Disable MSR[TS] bit also, so, if there is an exception in the
+ * code below (as a page fault in copy_ckvsx_to_user()), it does
+ * not recheckpoint this task if there was a context switch inside
+ * the exception.
+ *
+ * A major page fault can indirectly call schedule(). A reschedule
+ * process in the middle of an exception can have a side effect
+ * (Changing the CPU MSR[TS] state), since schedule() is called
+ * with the CPU MSR[TS] disable and returns with MSR[TS]=Suspended
+ * (switch_to() calls tm_recheckpoint() for the 'new' process). In
+ * this case, the process continues to be the same in the CPU, but
+ * the CPU state just changed.
+ *
+ * This can cause a TM Bad Thing, since the MSR in the stack will
+ * have the MSR[TS]=0, and this is what will be used to RFID.
+ *
+ * Clearing MSR[TS] state here will avoid a recheckpoint if there
+ * is any process reschedule in kernel space. The MSR[TS] state
+ * does not need to be saved also, since it will be replaced with
+ * the MSR[TS] that came from user context later, at
+ * restore_tm_sigcontexts.
+ */
+ regs->msr &= ~MSR_TS_MASK;
+
if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
goto badframe;
if (MSR_TM_ACTIVE(msr)) {
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index 00f5a63c8d9a..103655d84b4b 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -509,3 +509,9 @@
425 common io_uring_setup sys_io_uring_setup
426 common io_uring_enter sys_io_uring_enter
427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index e8e93c2c7d03..7a1708875d27 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -610,7 +610,7 @@ SYSFS_PMCSETUP(pa6t_pmc2, SPRN_PA6T_PMC2);
SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3);
SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4);
SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5);
-#ifdef CONFIG_DEBUG_KERNEL
+#ifdef CONFIG_DEBUG_MISC
SYSFS_SPRSETUP(hid0, SPRN_HID0);
SYSFS_SPRSETUP(hid1, SPRN_HID1);
SYSFS_SPRSETUP(hid4, SPRN_HID4);
@@ -639,7 +639,7 @@ SYSFS_SPRSETUP(tsr0, SPRN_PA6T_TSR0);
SYSFS_SPRSETUP(tsr1, SPRN_PA6T_TSR1);
SYSFS_SPRSETUP(tsr2, SPRN_PA6T_TSR2);
SYSFS_SPRSETUP(tsr3, SPRN_PA6T_TSR3);
-#endif /* CONFIG_DEBUG_KERNEL */
+#endif /* CONFIG_DEBUG_MISC */
#endif /* HAS_PPC_PMC_PA6T */
#ifdef HAS_PPC_PMC_IBM
@@ -680,7 +680,7 @@ static struct device_attribute pa6t_attrs[] = {
__ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3),
__ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4),
__ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5),
-#ifdef CONFIG_DEBUG_KERNEL
+#ifdef CONFIG_DEBUG_MISC
__ATTR(hid0, 0600, show_hid0, store_hid0),
__ATTR(hid1, 0600, show_hid1, store_hid1),
__ATTR(hid4, 0600, show_hid4, store_hid4),
@@ -709,7 +709,7 @@ static struct device_attribute pa6t_attrs[] = {
__ATTR(tsr1, 0600, show_tsr1, store_tsr1),
__ATTR(tsr2, 0600, show_tsr2, store_tsr2),
__ATTR(tsr3, 0600, show_tsr3, store_tsr3),
-#endif /* CONFIG_DEBUG_KERNEL */
+#endif /* CONFIG_DEBUG_MISC */
};
#endif /* HAS_PPC_PMC_PA6T */
#endif /* HAS_PPC_PMC_CLASSIC */
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index bc0503ef9c9c..325d60633dfa 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -43,7 +43,6 @@
#include <linux/timex.h>
#include <linux/kernel_stat.h>
#include <linux/time.h>
-#include <linux/clockchips.h>
#include <linux/init.h>
#include <linux/profile.h>
#include <linux/cpu.h>
@@ -151,6 +150,8 @@ EXPORT_SYMBOL_GPL(ppc_proc_freq);
unsigned long ppc_tb_freq;
EXPORT_SYMBOL_GPL(ppc_tb_freq);
+bool tb_invalid;
+
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
/*
* Factor for converting from cputime_t (timebase ticks) to
@@ -460,6 +461,13 @@ void __delay(unsigned long loops)
diff += 1000000000;
spin_cpu_relax();
} while (diff < loops);
+ } else if (tb_invalid) {
+ /*
+ * TB is in error state and isn't ticking anymore.
+ * HMI handler was unable to recover from TB error.
+ * Return immediately, so that kernel won't get stuck here.
+ */
+ spin_cpu_relax();
} else {
start = get_tbl();
while (get_tbl() - start < loops)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 1fd45a8650e1..83e59fdaa62d 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -179,7 +179,7 @@ extern void panic_flush_kmsg_end(void)
kmsg_dump(KMSG_DUMP_PANIC);
bust_spinlocks(0);
debug_locks_off();
- console_flush_on_panic();
+ console_flush_on_panic(CONSOLE_FLUSH_PENDING);
}
static unsigned long oops_begin(struct pt_regs *regs)
@@ -2088,6 +2088,10 @@ void SPEFloatingPointException(struct pt_regs *regs)
int code = FPE_FLTUNK;
int err;
+ /* We restore the interrupt state now */
+ if (!arch_irq_disabled_regs(regs))
+ local_irq_enable();
+
flush_spe_to_thread(current);
spefscr = current->thread.spefscr;
@@ -2133,6 +2137,10 @@ void SPEFloatingPointRoundException(struct pt_regs *regs)
extern int speround_handler(struct pt_regs *regs);
int err;
+ /* We restore the interrupt state now */
+ if (!arch_irq_disabled_regs(regs))
+ local_irq_enable();
+
preempt_disable();
if (regs->msr & MSR_SPE)
giveup_spe(current);
diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile
index ce199f6e4256..06f54d947057 100644
--- a/arch/powerpc/kernel/vdso32/Makefile
+++ b/arch/powerpc/kernel/vdso32/Makefile
@@ -26,9 +26,8 @@ GCOV_PROFILE := n
KCOV_INSTRUMENT := n
UBSAN_SANITIZE := n
-ccflags-y := -shared -fno-common -fno-builtin
-ccflags-y += -nostdlib -Wl,-soname=linux-vdso32.so.1 \
- $(call cc-ldoption, -Wl$(comma)--hash-style=both)
+ccflags-y := -shared -fno-common -fno-builtin -nostdlib \
+ -Wl,-soname=linux-vdso32.so.1 -Wl,--hash-style=both
asflags-y := -D__VDSO32__ -s
obj-y += vdso32_wrapper.o
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile
index 28e7d112aa2f..32ebb3522ea1 100644
--- a/arch/powerpc/kernel/vdso64/Makefile
+++ b/arch/powerpc/kernel/vdso64/Makefile
@@ -12,9 +12,8 @@ GCOV_PROFILE := n
KCOV_INSTRUMENT := n
UBSAN_SANITIZE := n
-ccflags-y := -shared -fno-common -fno-builtin
-ccflags-y += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
- $(call cc-ldoption, -Wl$(comma)--hash-style=both)
+ccflags-y := -shared -fno-common -fno-builtin -nostdlib \
+ -Wl,-soname=linux-vdso64.so.1 -Wl,--hash-style=both
asflags-y := -D__VDSO64__ -s
obj-y += vdso64_wrapper.o
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 21165da0052d..8eb867dbad5f 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -21,6 +21,7 @@ _GLOBAL(load_vr_state)
REST_32VRS(0,r4,r3)
blr
EXPORT_SYMBOL(load_vr_state)
+_ASM_NOKPROBE_SYMBOL(load_vr_state); /* used by restore_math */
/*
* Store VMX state into memory, including VSCR.
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index 3c6ab22a0c4e..af3c15a1d41e 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -77,7 +77,7 @@ static u64 wd_smp_panic_timeout_tb __read_mostly; /* panic other CPUs */
static u64 wd_timer_period_ms __read_mostly; /* interval between heartbeat */
-static DEFINE_PER_CPU(struct timer_list, wd_timer);
+static DEFINE_PER_CPU(struct hrtimer, wd_hrtimer);
static DEFINE_PER_CPU(u64, wd_timer_tb);
/* SMP checker bits */
@@ -293,21 +293,21 @@ out:
nmi_exit();
}
-static void wd_timer_reset(unsigned int cpu, struct timer_list *t)
-{
- t->expires = jiffies + msecs_to_jiffies(wd_timer_period_ms);
- if (wd_timer_period_ms > 1000)
- t->expires = __round_jiffies_up(t->expires, cpu);
- add_timer_on(t, cpu);
-}
-
-static void wd_timer_fn(struct timer_list *t)
+static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
{
int cpu = smp_processor_id();
+ if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
+ return HRTIMER_NORESTART;
+
+ if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
+ return HRTIMER_NORESTART;
+
watchdog_timer_interrupt(cpu);
- wd_timer_reset(cpu, t);
+ hrtimer_forward_now(hrtimer, ms_to_ktime(wd_timer_period_ms));
+
+ return HRTIMER_RESTART;
}
void arch_touch_nmi_watchdog(void)
@@ -323,37 +323,22 @@ void arch_touch_nmi_watchdog(void)
}
EXPORT_SYMBOL(arch_touch_nmi_watchdog);
-static void start_watchdog_timer_on(unsigned int cpu)
-{
- struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
-
- per_cpu(wd_timer_tb, cpu) = get_tb();
-
- timer_setup(t, wd_timer_fn, TIMER_PINNED);
- wd_timer_reset(cpu, t);
-}
-
-static void stop_watchdog_timer_on(unsigned int cpu)
-{
- struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
-
- del_timer_sync(t);
-}
-
-static int start_wd_on_cpu(unsigned int cpu)
+static void start_watchdog(void *arg)
{
+ struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer);
+ int cpu = smp_processor_id();
unsigned long flags;
if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
WARN_ON(1);
- return 0;
+ return;
}
if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
- return 0;
+ return;
if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
- return 0;
+ return;
wd_smp_lock(&flags);
cpumask_set_cpu(cpu, &wd_cpus_enabled);
@@ -363,27 +348,40 @@ static int start_wd_on_cpu(unsigned int cpu)
}
wd_smp_unlock(&flags);
- start_watchdog_timer_on(cpu);
+ *this_cpu_ptr(&wd_timer_tb) = get_tb();
- return 0;
+ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer->function = watchdog_timer_fn;
+ hrtimer_start(hrtimer, ms_to_ktime(wd_timer_period_ms),
+ HRTIMER_MODE_REL_PINNED);
}
-static int stop_wd_on_cpu(unsigned int cpu)
+static int start_watchdog_on_cpu(unsigned int cpu)
{
+ return smp_call_function_single(cpu, start_watchdog, NULL, true);
+}
+
+static void stop_watchdog(void *arg)
+{
+ struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer);
+ int cpu = smp_processor_id();
unsigned long flags;
if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
- return 0; /* Can happen in CPU unplug case */
+ return; /* Can happen in CPU unplug case */
- stop_watchdog_timer_on(cpu);
+ hrtimer_cancel(hrtimer);
wd_smp_lock(&flags);
cpumask_clear_cpu(cpu, &wd_cpus_enabled);
wd_smp_unlock(&flags);
wd_smp_clear_cpu_pending(cpu, get_tb());
+}
- return 0;
+static int stop_watchdog_on_cpu(unsigned int cpu)
+{
+ return smp_call_function_single(cpu, stop_watchdog, NULL, true);
}
static void watchdog_calc_timeouts(void)
@@ -402,7 +400,7 @@ void watchdog_nmi_stop(void)
int cpu;
for_each_cpu(cpu, &wd_cpus_enabled)
- stop_wd_on_cpu(cpu);
+ stop_watchdog_on_cpu(cpu);
}
void watchdog_nmi_start(void)
@@ -411,7 +409,7 @@ void watchdog_nmi_start(void)
watchdog_calc_timeouts();
for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
- start_wd_on_cpu(cpu);
+ start_watchdog_on_cpu(cpu);
}
/*
@@ -423,7 +421,8 @@ int __init watchdog_nmi_probe(void)
err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"powerpc/watchdog:online",
- start_wd_on_cpu, stop_wd_on_cpu);
+ start_watchdog_on_cpu,
+ stop_watchdog_on_cpu);
if (err < 0) {
pr_warn("could not be initialized");
return err;
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index bfdde04e4905..f53997a8ca62 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -20,7 +20,6 @@ if VIRTUALIZATION
config KVM
bool
select PREEMPT_NOTIFIERS
- select ANON_INODES
select HAVE_KVM_EVENTFD
select HAVE_KVM_VCPU_ASYNC_IOCTL
select SRCU
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 3223aec88b2c..4c67cc79de7c 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -94,7 +94,7 @@ endif
kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
book3s_xics.o
-kvm-book3s_64-objs-$(CONFIG_KVM_XIVE) += book3s_xive.o
+kvm-book3s_64-objs-$(CONFIG_KVM_XIVE) += book3s_xive.o book3s_xive_native.o
kvm-book3s_64-objs-$(CONFIG_SPAPR_TCE_IOMMU) += book3s_64_vio.o
kvm-book3s_64-module-objs := \
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 10c5579d20ce..61a212d0daf0 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -651,6 +651,18 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
break;
#endif /* CONFIG_KVM_XICS */
+#ifdef CONFIG_KVM_XIVE
+ case KVM_REG_PPC_VP_STATE:
+ if (!vcpu->arch.xive_vcpu) {
+ r = -ENXIO;
+ break;
+ }
+ if (xive_enabled())
+ r = kvmppc_xive_native_get_vp(vcpu, val);
+ else
+ r = -ENXIO;
+ break;
+#endif /* CONFIG_KVM_XIVE */
case KVM_REG_PPC_FSCR:
*val = get_reg_val(id, vcpu->arch.fscr);
break;
@@ -724,6 +736,18 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
break;
#endif /* CONFIG_KVM_XICS */
+#ifdef CONFIG_KVM_XIVE
+ case KVM_REG_PPC_VP_STATE:
+ if (!vcpu->arch.xive_vcpu) {
+ r = -ENXIO;
+ break;
+ }
+ if (xive_enabled())
+ r = kvmppc_xive_native_set_vp(vcpu, val);
+ else
+ r = -ENXIO;
+ break;
+#endif /* CONFIG_KVM_XIVE */
case KVM_REG_PPC_FSCR:
vcpu->arch.fscr = set_reg_val(id, *val);
break;
@@ -891,6 +915,17 @@ void kvmppc_core_destroy_vm(struct kvm *kvm)
kvmppc_rtas_tokens_free(kvm);
WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
#endif
+
+#ifdef CONFIG_KVM_XICS
+ /*
+ * Free the XIVE devices which are not directly freed by the
+ * device 'release' method
+ */
+ kfree(kvm->arch.xive_devices.native);
+ kvm->arch.xive_devices.native = NULL;
+ kfree(kvm->arch.xive_devices.xics_on_xive);
+ kvm->arch.xive_devices.xics_on_xive = NULL;
+#endif /* CONFIG_KVM_XICS */
}
int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
@@ -1050,6 +1085,9 @@ static int kvmppc_book3s_init(void)
if (xics_on_xive()) {
kvmppc_xive_init_module();
kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
+ kvmppc_xive_native_init_module();
+ kvm_register_device_ops(&kvm_xive_native_ops,
+ KVM_DEV_TYPE_XIVE);
} else
#endif
kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
@@ -1060,8 +1098,10 @@ static int kvmppc_book3s_init(void)
static void kvmppc_book3s_exit(void)
{
#ifdef CONFIG_KVM_XICS
- if (xics_on_xive())
+ if (xics_on_xive()) {
kvmppc_xive_exit_module();
+ kvmppc_xive_native_exit_module();
+ }
#endif
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
kvmppc_book3s_exit_pr();
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index be7bc070eae5..ab3d484c5e2e 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -600,7 +600,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* If writing != 0, then the HPTE must allow writing, if we get here */
write_ok = writing;
hva = gfn_to_hva_memslot(memslot, gfn);
- npages = get_user_pages_fast(hva, 1, writing, pages);
+ npages = get_user_pages_fast(hva, 1, writing ? FOLL_WRITE : 0, pages);
if (npages < 1) {
/* Check if it's an I/O mapping */
down_read(&current->mm->mmap_sem);
@@ -1193,7 +1193,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
goto err;
hva = gfn_to_hva_memslot(memslot, gfn);
- npages = get_user_pages_fast(hva, 1, 1, pages);
+ npages = get_user_pages_fast(hva, 1, FOLL_WRITE, pages);
if (npages < 1)
goto err;
page = pages[0];
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index f02b04973710..66270e07449a 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -228,11 +228,33 @@ static void release_spapr_tce_table(struct rcu_head *head)
unsigned long i, npages = kvmppc_tce_pages(stt->size);
for (i = 0; i < npages; i++)
- __free_page(stt->pages[i]);
+ if (stt->pages[i])
+ __free_page(stt->pages[i]);
kfree(stt);
}
+static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt,
+ unsigned long sttpage)
+{
+ struct page *page = stt->pages[sttpage];
+
+ if (page)
+ return page;
+
+ mutex_lock(&stt->alloc_lock);
+ page = stt->pages[sttpage];
+ if (!page) {
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ WARN_ON_ONCE(!page);
+ if (page)
+ stt->pages[sttpage] = page;
+ }
+ mutex_unlock(&stt->alloc_lock);
+
+ return page;
+}
+
static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
{
struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
@@ -241,7 +263,10 @@ static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
return VM_FAULT_SIGBUS;
- page = stt->pages[vmf->pgoff];
+ page = kvm_spapr_get_tce_page(stt, vmf->pgoff);
+ if (!page)
+ return VM_FAULT_OOM;
+
get_page(page);
vmf->page = page;
return 0;
@@ -296,7 +321,6 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvmppc_spapr_tce_table *siter;
unsigned long npages, size = args->size;
int ret = -ENOMEM;
- int i;
if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
(args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
@@ -318,14 +342,9 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
stt->offset = args->offset;
stt->size = size;
stt->kvm = kvm;
+ mutex_init(&stt->alloc_lock);
INIT_LIST_HEAD_RCU(&stt->iommu_tables);
- for (i = 0; i < npages; i++) {
- stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!stt->pages[i])
- goto fail;
- }
-
mutex_lock(&kvm->lock);
/* Check this LIOBN hasn't been previously allocated */
@@ -352,17 +371,28 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
if (ret >= 0)
return ret;
- fail:
- for (i = 0; i < npages; i++)
- if (stt->pages[i])
- __free_page(stt->pages[i]);
-
kfree(stt);
fail_acct:
kvmppc_account_memlimit(kvmppc_stt_pages(npages), false);
return ret;
}
+static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
+ unsigned long *ua)
+{
+ unsigned long gfn = tce >> PAGE_SHIFT;
+ struct kvm_memory_slot *memslot;
+
+ memslot = search_memslots(kvm_memslots(kvm), gfn);
+ if (!memslot)
+ return -EINVAL;
+
+ *ua = __gfn_to_hva_memslot(memslot, gfn) |
+ (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
+
+ return 0;
+}
+
static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
unsigned long tce)
{
@@ -378,7 +408,7 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
if (iommu_tce_check_gpa(stt->page_shift, gpa))
return H_TOO_HARD;
- if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
+ if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
return H_TOO_HARD;
list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
@@ -397,6 +427,36 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
return H_SUCCESS;
}
+/*
+ * Handles TCE requests for emulated devices.
+ * Puts guest TCE values to the table and expects user space to convert them.
+ * Cannot fail so kvmppc_tce_validate must be called before it.
+ */
+static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
+ unsigned long idx, unsigned long tce)
+{
+ struct page *page;
+ u64 *tbl;
+ unsigned long sttpage;
+
+ idx -= stt->offset;
+ sttpage = idx / TCES_PER_PAGE;
+ page = stt->pages[sttpage];
+
+ if (!page) {
+ /* We allow any TCE, not just with read|write permissions */
+ if (!tce)
+ return;
+
+ page = kvm_spapr_get_tce_page(stt, sttpage);
+ if (!page)
+ return;
+ }
+ tbl = page_to_virt(page);
+
+ tbl[idx % TCES_PER_PAGE] = tce;
+}
+
static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
unsigned long entry)
{
@@ -543,15 +603,15 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
if (ret != H_SUCCESS)
return ret;
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+
ret = kvmppc_tce_validate(stt, tce);
if (ret != H_SUCCESS)
- return ret;
+ goto unlock_exit;
dir = iommu_tce_direction(tce);
- idx = srcu_read_lock(&vcpu->kvm->srcu);
-
- if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
+ if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
ret = H_PARAMETER;
goto unlock_exit;
}
@@ -612,7 +672,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
return ret;
idx = srcu_read_lock(&vcpu->kvm->srcu);
- if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
+ if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
ret = H_TOO_HARD;
goto unlock_exit;
}
@@ -647,7 +707,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
}
tce = be64_to_cpu(tce);
- if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
+ if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua))
return H_PARAMETER;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 2206bc729b9a..484b47fa3960 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -66,8 +66,6 @@
#endif
-#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
-
/*
* Finds a TCE table descriptor by LIOBN.
*
@@ -88,6 +86,25 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
EXPORT_SYMBOL_GPL(kvmppc_find_table);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce,
+ unsigned long *ua, unsigned long **prmap)
+{
+ unsigned long gfn = tce >> PAGE_SHIFT;
+ struct kvm_memory_slot *memslot;
+
+ memslot = search_memslots(kvm_memslots_raw(kvm), gfn);
+ if (!memslot)
+ return -EINVAL;
+
+ *ua = __gfn_to_hva_memslot(memslot, gfn) |
+ (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
+
+ if (prmap)
+ *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
+
+ return 0;
+}
+
/*
* Validates TCE address.
* At the moment flags and page mask are validated.
@@ -111,7 +128,7 @@ static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
if (iommu_tce_check_gpa(stt->page_shift, gpa))
return H_PARAMETER;
- if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
+ if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua, NULL))
return H_TOO_HARD;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -129,7 +146,6 @@ static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
return H_SUCCESS;
}
-#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
/* Note on the use of page_address() in real mode,
*
@@ -161,13 +177,9 @@ static u64 *kvmppc_page_address(struct page *page)
/*
* Handles TCE requests for emulated devices.
* Puts guest TCE values to the table and expects user space to convert them.
- * Called in both real and virtual modes.
- * Cannot fail so kvmppc_tce_validate must be called before it.
- *
- * WARNING: This will be called in real-mode on HV KVM and virtual
- * mode on PR KVM
+ * Cannot fail so kvmppc_rm_tce_validate must be called before it.
*/
-void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
+static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table *stt,
unsigned long idx, unsigned long tce)
{
struct page *page;
@@ -175,35 +187,48 @@ void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
idx -= stt->offset;
page = stt->pages[idx / TCES_PER_PAGE];
+ /*
+ * page must not be NULL in real mode,
+ * kvmppc_rm_ioba_validate() must have taken care of this.
+ */
+ WARN_ON_ONCE_RM(!page);
tbl = kvmppc_page_address(page);
tbl[idx % TCES_PER_PAGE] = tce;
}
-EXPORT_SYMBOL_GPL(kvmppc_tce_put);
-long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
- unsigned long *ua, unsigned long **prmap)
+/*
+ * TCEs pages are allocated in kvmppc_rm_tce_put() which won't be able to do so
+ * in real mode.
+ * Check if kvmppc_rm_tce_put() can succeed in real mode, i.e. a TCEs page is
+ * allocated or not required (when clearing a tce entry).
+ */
+static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table *stt,
+ unsigned long ioba, unsigned long npages, bool clearing)
{
- unsigned long gfn = tce >> PAGE_SHIFT;
- struct kvm_memory_slot *memslot;
+ unsigned long i, idx, sttpage, sttpages;
+ unsigned long ret = kvmppc_ioba_validate(stt, ioba, npages);
- memslot = search_memslots(kvm_memslots(kvm), gfn);
- if (!memslot)
- return -EINVAL;
-
- *ua = __gfn_to_hva_memslot(memslot, gfn) |
- (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
+ if (ret)
+ return ret;
+ /*
+ * clearing==true says kvmppc_rm_tce_put won't be allocating pages
+ * for empty tces.
+ */
+ if (clearing)
+ return H_SUCCESS;
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- if (prmap)
- *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
-#endif
+ idx = (ioba >> stt->page_shift) - stt->offset;
+ sttpage = idx / TCES_PER_PAGE;
+ sttpages = _ALIGN_UP(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) /
+ TCES_PER_PAGE;
+ for (i = sttpage; i < sttpage + sttpages; ++i)
+ if (!stt->pages[i])
+ return H_TOO_HARD;
- return 0;
+ return H_SUCCESS;
}
-EXPORT_SYMBOL_GPL(kvmppc_tce_to_ua);
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
unsigned long entry, unsigned long *hpa,
enum dma_data_direction *direction)
@@ -381,7 +406,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
if (!stt)
return H_TOO_HARD;
- ret = kvmppc_ioba_validate(stt, ioba, 1);
+ ret = kvmppc_rm_ioba_validate(stt, ioba, 1, tce == 0);
if (ret != H_SUCCESS)
return ret;
@@ -390,7 +415,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
return ret;
dir = iommu_tce_direction(tce);
- if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
+ if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
return H_PARAMETER;
entry = ioba >> stt->page_shift;
@@ -409,7 +434,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
}
}
- kvmppc_tce_put(stt, entry, tce);
+ kvmppc_rm_tce_put(stt, entry, tce);
return H_SUCCESS;
}
@@ -480,7 +505,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
if (tce_list & (SZ_4K - 1))
return H_PARAMETER;
- ret = kvmppc_ioba_validate(stt, ioba, npages);
+ ret = kvmppc_rm_ioba_validate(stt, ioba, npages, false);
if (ret != H_SUCCESS)
return ret;
@@ -492,7 +517,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
*/
struct mm_iommu_table_group_mem_t *mem;
- if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
+ if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
return H_TOO_HARD;
mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
@@ -508,7 +533,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
* We do not require memory to be preregistered in this case
* so lock rmap and do __find_linux_pte_or_hugepte().
*/
- if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
+ if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
return H_TOO_HARD;
rmap = (void *) vmalloc_to_phys(rmap);
@@ -542,7 +567,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
ua = 0;
- if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
+ if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
return H_PARAMETER;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -557,7 +582,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
}
}
- kvmppc_tce_put(stt, entry + i, tce);
+ kvmppc_rm_tce_put(stt, entry + i, tce);
}
unlock_exit:
@@ -583,7 +608,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
if (!stt)
return H_TOO_HARD;
- ret = kvmppc_ioba_validate(stt, ioba, npages);
+ ret = kvmppc_rm_ioba_validate(stt, ioba, npages, tce_value == 0);
if (ret != H_SUCCESS)
return ret;
@@ -610,7 +635,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
}
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
- kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
+ kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
return H_SUCCESS;
}
@@ -635,6 +660,10 @@ long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
idx = (ioba >> stt->page_shift) - stt->offset;
page = stt->pages[idx / TCES_PER_PAGE];
+ if (!page) {
+ vcpu->arch.regs.gpr[4] = 0;
+ return H_SUCCESS;
+ }
tbl = (u64 *)page_address(page);
vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 06964350b97a..d5fc624e0655 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -74,6 +74,7 @@
#include <asm/opal.h>
#include <asm/xics.h>
#include <asm/xive.h>
+#include <asm/hw_breakpoint.h>
#include "book3s.h"
@@ -749,7 +750,7 @@ static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu)
/*
* Ensure that the read of vcore->dpdes comes after the read
* of vcpu->doorbell_request. This barrier matches the
- * smb_wmb() in kvmppc_guest_entry_inject().
+ * smp_wmb() in kvmppc_guest_entry_inject().
*/
smp_rmb();
vc = vcpu->arch.vcore;
@@ -801,6 +802,80 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
}
}
+/* Copy guest memory in place - must reside within a single memslot */
+static int kvmppc_copy_guest(struct kvm *kvm, gpa_t to, gpa_t from,
+ unsigned long len)
+{
+ struct kvm_memory_slot *to_memslot = NULL;
+ struct kvm_memory_slot *from_memslot = NULL;
+ unsigned long to_addr, from_addr;
+ int r;
+
+ /* Get HPA for from address */
+ from_memslot = gfn_to_memslot(kvm, from >> PAGE_SHIFT);
+ if (!from_memslot)
+ return -EFAULT;
+ if ((from + len) >= ((from_memslot->base_gfn + from_memslot->npages)
+ << PAGE_SHIFT))
+ return -EINVAL;
+ from_addr = gfn_to_hva_memslot(from_memslot, from >> PAGE_SHIFT);
+ if (kvm_is_error_hva(from_addr))
+ return -EFAULT;
+ from_addr |= (from & (PAGE_SIZE - 1));
+
+ /* Get HPA for to address */
+ to_memslot = gfn_to_memslot(kvm, to >> PAGE_SHIFT);
+ if (!to_memslot)
+ return -EFAULT;
+ if ((to + len) >= ((to_memslot->base_gfn + to_memslot->npages)
+ << PAGE_SHIFT))
+ return -EINVAL;
+ to_addr = gfn_to_hva_memslot(to_memslot, to >> PAGE_SHIFT);
+ if (kvm_is_error_hva(to_addr))
+ return -EFAULT;
+ to_addr |= (to & (PAGE_SIZE - 1));
+
+ /* Perform copy */
+ r = raw_copy_in_user((void __user *)to_addr, (void __user *)from_addr,
+ len);
+ if (r)
+ return -EFAULT;
+ mark_page_dirty(kvm, to >> PAGE_SHIFT);
+ return 0;
+}
+
+static long kvmppc_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long dest, unsigned long src)
+{
+ u64 pg_sz = SZ_4K; /* 4K page size */
+ u64 pg_mask = SZ_4K - 1;
+ int ret;
+
+ /* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */
+ if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE |
+ H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED))
+ return H_PARAMETER;
+
+ /* dest (and src if copy_page flag set) must be page aligned */
+ if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask)))
+ return H_PARAMETER;
+
+ /* zero and/or copy the page as determined by the flags */
+ if (flags & H_COPY_PAGE) {
+ ret = kvmppc_copy_guest(vcpu->kvm, dest, src, pg_sz);
+ if (ret < 0)
+ return H_PARAMETER;
+ } else if (flags & H_ZERO_PAGE) {
+ ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz);
+ if (ret < 0)
+ return H_PARAMETER;
+ }
+
+ /* We can ignore the remaining flags */
+
+ return H_SUCCESS;
+}
+
static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
{
struct kvmppc_vcore *vcore = target->arch.vcore;
@@ -1003,6 +1078,11 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
if (nesting_enabled(vcpu->kvm))
ret = kvmhv_copy_tofrom_guest_nested(vcpu);
break;
+ case H_PAGE_INIT:
+ ret = kvmppc_h_page_init(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6));
+ break;
default:
return RESUME_HOST;
}
@@ -1047,6 +1127,7 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
case H_IPOLL:
case H_XIRR_X:
#endif
+ case H_PAGE_INIT:
return 1;
}
@@ -2504,37 +2585,6 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
}
}
-static void kvmppc_radix_check_need_tlb_flush(struct kvm *kvm, int pcpu,
- struct kvm_nested_guest *nested)
-{
- cpumask_t *need_tlb_flush;
- int lpid;
-
- if (!cpu_has_feature(CPU_FTR_HVMODE))
- return;
-
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- pcpu &= ~0x3UL;
-
- if (nested) {
- lpid = nested->shadow_lpid;
- need_tlb_flush = &nested->need_tlb_flush;
- } else {
- lpid = kvm->arch.lpid;
- need_tlb_flush = &kvm->arch.need_tlb_flush;
- }
-
- mtspr(SPRN_LPID, lpid);
- isync();
- smp_mb();
-
- if (cpumask_test_cpu(pcpu, need_tlb_flush)) {
- radix__local_flush_tlb_lpid_guest(lpid);
- /* Clear the bit after the TLB flush */
- cpumask_clear_cpu(pcpu, need_tlb_flush);
- }
-}
-
static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
{
int cpu;
@@ -3228,19 +3278,11 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
for (sub = 0; sub < core_info.n_subcores; ++sub)
spin_unlock(&core_info.vc[sub]->lock);
- if (kvm_is_radix(vc->kvm)) {
- /*
- * Do we need to flush the process scoped TLB for the LPAR?
- *
- * On POWER9, individual threads can come in here, but the
- * TLB is shared between the 4 threads in a core, hence
- * invalidating on one thread invalidates for all.
- * Thus we make all 4 threads use the same bit here.
- *
- * Hash must be flushed in realmode in order to use tlbiel.
- */
- kvmppc_radix_check_need_tlb_flush(vc->kvm, pcpu, NULL);
- }
+ guest_enter_irqoff();
+
+ srcu_idx = srcu_read_lock(&vc->kvm->srcu);
+
+ this_cpu_disable_ftrace();
/*
* Interrupts will be enabled once we get into the guest,
@@ -3248,19 +3290,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
*/
trace_hardirqs_on();
- guest_enter_irqoff();
-
- srcu_idx = srcu_read_lock(&vc->kvm->srcu);
-
- this_cpu_disable_ftrace();
-
trap = __kvmppc_vcore_entry();
+ trace_hardirqs_off();
+
this_cpu_enable_ftrace();
srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
- trace_hardirqs_off();
set_irq_happened(trap);
spin_lock(&vc->lock);
@@ -3374,7 +3411,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
mtspr(SPRN_PURR, vcpu->arch.purr);
mtspr(SPRN_SPURR, vcpu->arch.spurr);
- if (cpu_has_feature(CPU_FTR_DAWR)) {
+ if (dawr_enabled()) {
mtspr(SPRN_DAWR, vcpu->arch.dawr);
mtspr(SPRN_DAWRX, vcpu->arch.dawrx);
}
@@ -3423,7 +3460,9 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
- mtspr(SPRN_PSSCR, host_psscr);
+ /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
+ mtspr(SPRN_PSSCR, host_psscr |
+ (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
mtspr(SPRN_HFSCR, host_hfscr);
mtspr(SPRN_CIABR, host_ciabr);
mtspr(SPRN_DAWR, host_dawr);
@@ -3511,6 +3550,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
#ifdef CONFIG_ALTIVEC
load_vr_state(&vcpu->arch.vr);
#endif
+ mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
mtspr(SPRN_DSCR, vcpu->arch.dscr);
mtspr(SPRN_IAMR, vcpu->arch.iamr);
@@ -3602,6 +3642,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
#ifdef CONFIG_ALTIVEC
store_vr_state(&vcpu->arch.vr);
#endif
+ vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
if (cpu_has_feature(CPU_FTR_TM) ||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
@@ -3967,7 +4008,7 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
unsigned long lpcr)
{
int trap, r, pcpu;
- int srcu_idx;
+ int srcu_idx, lpid;
struct kvmppc_vcore *vc;
struct kvm *kvm = vcpu->kvm;
struct kvm_nested_guest *nested = vcpu->arch.nested;
@@ -4043,8 +4084,12 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
vc->vcore_state = VCORE_RUNNING;
trace_kvmppc_run_core(vc, 0);
- if (cpu_has_feature(CPU_FTR_HVMODE))
- kvmppc_radix_check_need_tlb_flush(kvm, pcpu, nested);
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
+ mtspr(SPRN_LPID, lpid);
+ isync();
+ kvmppc_check_need_tlb_flush(kvm, pcpu, nested);
+ }
trace_hardirqs_on();
guest_enter_irqoff();
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index b0cf22477e87..6035d24f1d1d 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -805,3 +805,60 @@ void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
vcpu->arch.doorbell_request = 0;
}
}
+
+static void flush_guest_tlb(struct kvm *kvm)
+{
+ unsigned long rb, set;
+
+ rb = PPC_BIT(52); /* IS = 2 */
+ if (kvm_is_radix(kvm)) {
+ /* R=1 PRS=1 RIC=2 */
+ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
+ : : "r" (rb), "i" (1), "i" (1), "i" (2),
+ "r" (0) : "memory");
+ for (set = 1; set < kvm->arch.tlb_sets; ++set) {
+ rb += PPC_BIT(51); /* increment set number */
+ /* R=1 PRS=1 RIC=0 */
+ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
+ : : "r" (rb), "i" (1), "i" (1), "i" (0),
+ "r" (0) : "memory");
+ }
+ } else {
+ for (set = 0; set < kvm->arch.tlb_sets; ++set) {
+ /* R=0 PRS=0 RIC=0 */
+ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
+ : : "r" (rb), "i" (0), "i" (0), "i" (0),
+ "r" (0) : "memory");
+ rb += PPC_BIT(51); /* increment set number */
+ }
+ }
+ asm volatile("ptesync": : :"memory");
+}
+
+void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
+ struct kvm_nested_guest *nested)
+{
+ cpumask_t *need_tlb_flush;
+
+ /*
+ * On POWER9, individual threads can come in here, but the
+ * TLB is shared between the 4 threads in a core, hence
+ * invalidating on one thread invalidates for all.
+ * Thus we make all 4 threads use the same bit.
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ pcpu = cpu_first_thread_sibling(pcpu);
+
+ if (nested)
+ need_tlb_flush = &nested->need_tlb_flush;
+ else
+ need_tlb_flush = &kvm->arch.need_tlb_flush;
+
+ if (cpumask_test_cpu(pcpu, need_tlb_flush)) {
+ flush_guest_tlb(kvm);
+
+ /* Clear the bit after the TLB flush */
+ cpumask_clear_cpu(pcpu, need_tlb_flush);
+ }
+}
+EXPORT_SYMBOL_GPL(kvmppc_check_need_tlb_flush);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 3b3791ed74a6..8431ad1e8391 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -13,6 +13,7 @@
#include <linux/hugetlb.h>
#include <linux/module.h>
#include <linux/log2.h>
+#include <linux/sizes.h>
#include <asm/trace.h>
#include <asm/kvm_ppc.h>
@@ -867,6 +868,149 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
return ret;
}
+static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long gpa,
+ int writing, unsigned long *hpa,
+ struct kvm_memory_slot **memslot_p)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_memory_slot *memslot;
+ unsigned long gfn, hva, pa, psize = PAGE_SHIFT;
+ unsigned int shift;
+ pte_t *ptep, pte;
+
+ /* Find the memslot for this address */
+ gfn = gpa >> PAGE_SHIFT;
+ memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
+ if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
+ return H_PARAMETER;
+
+ /* Translate to host virtual address */
+ hva = __gfn_to_hva_memslot(memslot, gfn);
+
+ /* Try to find the host pte for that virtual address */
+ ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
+ if (!ptep)
+ return H_TOO_HARD;
+ pte = kvmppc_read_update_linux_pte(ptep, writing);
+ if (!pte_present(pte))
+ return H_TOO_HARD;
+
+ /* Convert to a physical address */
+ if (shift)
+ psize = 1UL << shift;
+ pa = pte_pfn(pte) << PAGE_SHIFT;
+ pa |= hva & (psize - 1);
+ pa |= gpa & ~PAGE_MASK;
+
+ if (hpa)
+ *hpa = pa;
+ if (memslot_p)
+ *memslot_p = memslot;
+
+ return H_SUCCESS;
+}
+
+static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
+ unsigned long dest)
+{
+ struct kvm_memory_slot *memslot;
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long pa, mmu_seq;
+ long ret = H_SUCCESS;
+ int i;
+
+ /* Used later to detect if we might have been invalidated */
+ mmu_seq = kvm->mmu_notifier_seq;
+ smp_rmb();
+
+ ret = kvmppc_get_hpa(vcpu, dest, 1, &pa, &memslot);
+ if (ret != H_SUCCESS)
+ return ret;
+
+ /* Check if we've been invalidated */
+ raw_spin_lock(&kvm->mmu_lock.rlock);
+ if (mmu_notifier_retry(kvm, mmu_seq)) {
+ ret = H_TOO_HARD;
+ goto out_unlock;
+ }
+
+ /* Zero the page */
+ for (i = 0; i < SZ_4K; i += L1_CACHE_BYTES, pa += L1_CACHE_BYTES)
+ dcbz((void *)pa);
+ kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
+
+out_unlock:
+ raw_spin_unlock(&kvm->mmu_lock.rlock);
+ return ret;
+}
+
+static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
+ unsigned long dest, unsigned long src)
+{
+ unsigned long dest_pa, src_pa, mmu_seq;
+ struct kvm_memory_slot *dest_memslot;
+ struct kvm *kvm = vcpu->kvm;
+ long ret = H_SUCCESS;
+
+ /* Used later to detect if we might have been invalidated */
+ mmu_seq = kvm->mmu_notifier_seq;
+ smp_rmb();
+
+ ret = kvmppc_get_hpa(vcpu, dest, 1, &dest_pa, &dest_memslot);
+ if (ret != H_SUCCESS)
+ return ret;
+ ret = kvmppc_get_hpa(vcpu, src, 0, &src_pa, NULL);
+ if (ret != H_SUCCESS)
+ return ret;
+
+ /* Check if we've been invalidated */
+ raw_spin_lock(&kvm->mmu_lock.rlock);
+ if (mmu_notifier_retry(kvm, mmu_seq)) {
+ ret = H_TOO_HARD;
+ goto out_unlock;
+ }
+
+ /* Copy the page */
+ memcpy((void *)dest_pa, (void *)src_pa, SZ_4K);
+
+ kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
+
+out_unlock:
+ raw_spin_unlock(&kvm->mmu_lock.rlock);
+ return ret;
+}
+
+long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long dest, unsigned long src)
+{
+ struct kvm *kvm = vcpu->kvm;
+ u64 pg_mask = SZ_4K - 1; /* 4K page size */
+ long ret = H_SUCCESS;
+
+ /* Don't handle radix mode here, go up to the virtual mode handler */
+ if (kvm_is_radix(kvm))
+ return H_TOO_HARD;
+
+ /* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */
+ if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE |
+ H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED))
+ return H_PARAMETER;
+
+ /* dest (and src if copy_page flag set) must be page aligned */
+ if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask)))
+ return H_PARAMETER;
+
+ /* zero and/or copy the page as determined by the flags */
+ if (flags & H_COPY_PAGE)
+ ret = kvmppc_do_h_page_init_copy(vcpu, dest, src);
+ else if (flags & H_ZERO_PAGE)
+ ret = kvmppc_do_h_page_init_zero(vcpu, dest);
+
+ /* We can ignore the other flags */
+
+ return ret;
+}
+
void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
unsigned long pte_index)
{
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 3b9662a4207e..085509148d95 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -822,7 +822,7 @@ static inline void this_cpu_inc_rm(unsigned int __percpu *addr)
raddr = per_cpu_ptr(addr, cpu);
l = (unsigned long)raddr;
- if (REGION_ID(l) == VMALLOC_REGION_ID) {
+ if (get_region_id(l) == VMALLOC_REGION_ID) {
l = vmalloc_to_phys(raddr);
raddr = (unsigned int *)l;
}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 3a5e719ef032..f9b2620fbecd 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -35,6 +35,7 @@
#include <asm/thread_info.h>
#include <asm/asm-compat.h>
#include <asm/feature-fixups.h>
+#include <asm/cpuidle.h>
/* Sign-extend HDEC if not on POWER9 */
#define EXTEND_HDEC(reg) \
@@ -45,6 +46,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
/* Values in HSTATE_NAPPING(r13) */
#define NAPPING_CEDE 1
#define NAPPING_NOVCPU 2
+#define NAPPING_UNSPLIT 3
/* Stack frame offsets for kvmppc_hv_entry */
#define SFS 208
@@ -290,17 +292,19 @@ kvm_novcpu_exit:
b kvmhv_switch_to_host
/*
- * We come in here when wakened from nap mode.
- * Relocation is off and most register values are lost.
- * r13 points to the PACA.
+ * We come in here when wakened from Linux offline idle code.
+ * Relocation is off
* r3 contains the SRR1 wakeup value, SRR1 is trashed.
*/
- .globl kvm_start_guest
-kvm_start_guest:
- /* Set runlatch bit the minute you wake up from nap */
- mfspr r0, SPRN_CTRLF
- ori r0, r0, 1
- mtspr SPRN_CTRLT, r0
+_GLOBAL(idle_kvm_start_guest)
+ ld r4,PACAEMERGSP(r13)
+ mfcr r5
+ mflr r0
+ std r1,0(r4)
+ std r5,8(r4)
+ std r0,16(r4)
+ subi r1,r4,STACK_FRAME_OVERHEAD
+ SAVE_NVGPRS(r1)
/*
* Could avoid this and pass it through in r3. For now,
@@ -308,27 +312,23 @@ kvm_start_guest:
*/
mtspr SPRN_SRR1,r3
- ld r2,PACATOC(r13)
-
li r0,0
stb r0,PACA_FTRACE_ENABLED(r13)
li r0,KVM_HWTHREAD_IN_KVM
stb r0,HSTATE_HWTHREAD_STATE(r13)
- /* NV GPR values from power7_idle() will no longer be valid */
- li r0,1
- stb r0,PACA_NAPSTATELOST(r13)
-
- /* were we napping due to cede? */
+ /* kvm cede / napping does not come through here */
lbz r0,HSTATE_NAPPING(r13)
- cmpwi r0,NAPPING_CEDE
- beq kvm_end_cede
- cmpwi r0,NAPPING_NOVCPU
- beq kvm_novcpu_wakeup
+ twnei r0,0
+
+ b 1f
+
+kvm_unsplit_wakeup:
+ li r0, 0
+ stb r0, HSTATE_NAPPING(r13)
- ld r1,PACAEMERGSP(r13)
- subi r1,r1,STACK_FRAME_OVERHEAD
+1:
/*
* We weren't napping due to cede, so this must be a secondary
@@ -437,19 +437,25 @@ kvm_no_guest:
lbz r3, HSTATE_HWTHREAD_REQ(r13)
cmpwi r3, 0
bne 54f
-/*
- * We jump to pnv_wakeup_loss, which will return to the caller
- * of power7_nap in the powernv cpu offline loop. The value we
- * put in r3 becomes the return value for power7_nap. pnv_wakeup_loss
- * requires SRR1 in r12.
- */
+
+ /*
+ * Jump to idle_return_gpr_loss, which returns to the
+ * idle_kvm_start_guest caller.
+ */
li r3, LPCR_PECE0
mfspr r4, SPRN_LPCR
rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
mtspr SPRN_LPCR, r4
- li r3, 0
- mfspr r12,SPRN_SRR1
- b pnv_wakeup_loss
+ /* set up r3 for return */
+ mfspr r3,SPRN_SRR1
+ REST_NVGPRS(r1)
+ addi r1, r1, STACK_FRAME_OVERHEAD
+ ld r0, 16(r1)
+ ld r5, 8(r1)
+ ld r1, 0(r1)
+ mtlr r0
+ mtcr r5
+ blr
53: HMT_LOW
ld r5, HSTATE_KVM_VCORE(r13)
@@ -534,6 +540,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
lbz r0, KVM_SPLIT_DO_NAP(r3)
cmpwi r0, 0
beq 57f
+ li r3, NAPPING_UNSPLIT
+ stb r3, HSTATE_NAPPING(r13)
li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
mfspr r5, SPRN_LPCR
rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
@@ -581,11 +589,8 @@ kvmppc_hv_entry:
1:
#endif
- /* Use cr7 as an indication of radix mode */
ld r5, HSTATE_KVM_VCORE(r13)
ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
- lbz r0, KVM_RADIX(r9)
- cmpwi cr7, r0, 0
/*
* POWER7/POWER8 host -> guest partition switch code.
@@ -608,9 +613,6 @@ kvmppc_hv_entry:
cmpwi r6,0
bne 10f
- /* Radix has already switched LPID and flushed core TLB */
- bne cr7, 22f
-
lwz r7,KVM_LPID(r9)
BEGIN_FTR_SECTION
ld r6,KVM_SDR1(r9)
@@ -622,41 +624,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mtspr SPRN_LPID,r7
isync
- /* See if we need to flush the TLB. Hash has to be done in RM */
- lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
-BEGIN_FTR_SECTION
- /*
- * On POWER9, individual threads can come in here, but the
- * TLB is shared between the 4 threads in a core, hence
- * invalidating on one thread invalidates for all.
- * Thus we make all 4 threads use the same bit here.
- */
- clrrdi r6,r6,2
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
- clrldi r7,r6,64-6 /* extract bit number (6 bits) */
- srdi r6,r6,6 /* doubleword number */
- sldi r6,r6,3 /* address offset */
- add r6,r6,r9
- addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
- li r8,1
- sld r8,r8,r7
- ld r7,0(r6)
- and. r7,r7,r8
- beq 22f
- /* Flush the TLB of any entries for this LPID */
- lwz r0,KVM_TLB_SETS(r9)
- mtctr r0
- li r7,0x800 /* IS field = 0b10 */
- ptesync
- li r0,0 /* RS for P9 version of tlbiel */
-28: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */
- addi r7,r7,0x1000
- bdnz 28b
- ptesync
-23: ldarx r7,0,r6 /* clear the bit after TLB flushed */
- andc r7,r7,r8
- stdcx. r7,0,r6
- bne 23b
+ /* See if we need to flush the TLB. */
+ mr r3, r9 /* kvm pointer */
+ lhz r4, PACAPACAINDEX(r13) /* physical cpu number */
+ li r5, 0 /* nested vcpu pointer */
+ bl kvmppc_check_need_tlb_flush
+ nop
+ ld r5, HSTATE_KVM_VCORE(r13)
/* Add timebase offset onto timebase */
22: ld r8,VCORE_TB_OFFSET(r5)
@@ -822,18 +796,21 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mtspr SPRN_IAMR, r5
mtspr SPRN_PSPB, r6
mtspr SPRN_FSCR, r7
- ld r5, VCPU_DAWR(r4)
- ld r6, VCPU_DAWRX(r4)
- ld r7, VCPU_CIABR(r4)
- ld r8, VCPU_TAR(r4)
/*
* Handle broken DAWR case by not writing it. This means we
* can still store the DAWR register for migration.
*/
-BEGIN_FTR_SECTION
+ LOAD_REG_ADDR(r5, dawr_force_enable)
+ lbz r5, 0(r5)
+ cmpdi r5, 0
+ beq 1f
+ ld r5, VCPU_DAWR(r4)
+ ld r6, VCPU_DAWRX(r4)
mtspr SPRN_DAWR, r5
mtspr SPRN_DAWRX, r6
-END_FTR_SECTION_IFSET(CPU_FTR_DAWR)
+1:
+ ld r7, VCPU_CIABR(r4)
+ ld r8, VCPU_TAR(r4)
mtspr SPRN_CIABR, r7
mtspr SPRN_TAR, r8
ld r5, VCPU_IC(r4)
@@ -969,17 +946,27 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
#ifdef CONFIG_KVM_XICS
/* We are entering the guest on that thread, push VCPU to XIVE */
- ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
- cmpldi cr0, r10, 0
- beq no_xive
ld r11, VCPU_XIVE_SAVED_STATE(r4)
li r9, TM_QW1_OS
+ lwz r8, VCPU_XIVE_CAM_WORD(r4)
+ li r7, TM_QW1_OS + TM_WORD2
+ mfmsr r0
+ andi. r0, r0, MSR_DR /* in real mode? */
+ beq 2f
+ ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
+ cmpldi cr1, r10, 0
+ beq cr1, no_xive
+ eieio
+ stdx r11,r9,r10
+ stwx r8,r7,r10
+ b 3f
+2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
+ cmpldi cr1, r10, 0
+ beq cr1, no_xive
eieio
stdcix r11,r9,r10
- lwz r11, VCPU_XIVE_CAM_WORD(r4)
- li r9, TM_QW1_OS + TM_WORD2
- stwcix r11,r9,r10
- li r9, 1
+ stwcix r8,r7,r10
+3: li r9, 1
stb r9, VCPU_XIVE_PUSHED(r4)
eieio
@@ -998,12 +985,16 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
* on, we mask it.
*/
lbz r0, VCPU_XIVE_ESC_ON(r4)
- cmpwi r0,0
- beq 1f
- ld r10, VCPU_XIVE_ESC_RADDR(r4)
+ cmpwi cr1, r0,0
+ beq cr1, 1f
li r9, XIVE_ESB_SET_PQ_01
+ beq 4f /* in real mode? */
+ ld r10, VCPU_XIVE_ESC_VADDR(r4)
+ ldx r0, r10, r9
+ b 5f
+4: ld r10, VCPU_XIVE_ESC_RADDR(r4)
ldcix r0, r10, r9
- sync
+5: sync
/* We have a possible subtle race here: The escalation interrupt might
* have fired and be on its way to the host queue while we mask it,
@@ -2281,7 +2272,7 @@ hcall_real_table:
#endif
.long 0 /* 0x24 - H_SET_SPRG0 */
.long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
- .long 0 /* 0x2c */
+ .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table
.long 0 /* 0x30 */
.long 0 /* 0x34 */
.long 0 /* 0x38 */
@@ -2513,11 +2504,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
blr
2:
-BEGIN_FTR_SECTION
- /* POWER9 with disabled DAWR */
+ LOAD_REG_ADDR(r11, dawr_force_enable)
+ lbz r11, 0(r11)
+ cmpdi r11, 0
li r3, H_HARDWARE
- blr
-END_FTR_SECTION_IFCLR(CPU_FTR_DAWR)
+ beqlr
/* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
rlwimi r5, r4, 2, DAWRX_WT
@@ -2654,6 +2645,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
+ /* Go back to host stack */
+ ld r1, HSTATE_HOST_R1(r13)
+
/*
* Take a nap until a decrementer or external or doobell interrupt
* occurs, with PECE1 and PECE0 set in LPCR.
@@ -2682,26 +2676,42 @@ BEGIN_FTR_SECTION
* requested level = 0 (just stop dispatching)
*/
lis r3, (PSSCR_EC | PSSCR_ESL)@h
- mtspr SPRN_PSSCR, r3
/* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
li r4, LPCR_PECE_HVEE@higher
sldi r4, r4, 32
or r5, r5, r4
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+FTR_SECTION_ELSE
+ li r3, PNV_THREAD_NAP
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
mtspr SPRN_LPCR,r5
isync
- li r0, 0
- std r0, HSTATE_SCRATCH0(r13)
- ptesync
- ld r0, HSTATE_SCRATCH0(r13)
-1: cmpd r0, r0
- bne 1b
+
BEGIN_FTR_SECTION
- nap
+ bl isa300_idle_stop_mayloss
FTR_SECTION_ELSE
- PPC_STOP
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
- b .
+ bl isa206_idle_insn_mayloss
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
+
+ mfspr r0, SPRN_CTRLF
+ ori r0, r0, 1
+ mtspr SPRN_CTRLT, r0
+
+ mtspr SPRN_SRR1, r3
+
+ li r0, 0
+ stb r0, PACA_FTRACE_ENABLED(r13)
+
+ li r0, KVM_HWTHREAD_IN_KVM
+ stb r0, HSTATE_HWTHREAD_STATE(r13)
+
+ lbz r0, HSTATE_NAPPING(r13)
+ cmpwi r0, NAPPING_CEDE
+ beq kvm_end_cede
+ cmpwi r0, NAPPING_NOVCPU
+ beq kvm_novcpu_wakeup
+ cmpwi r0, NAPPING_UNSPLIT
+ beq kvm_unsplit_wakeup
+ twi 31,0,0 /* Nap state must not be zero */
33: mr r4, r3
li r3, 0
@@ -2709,12 +2719,11 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
b 34f
kvm_end_cede:
+ /* Woken by external or decrementer interrupt */
+
/* get vcpu pointer */
ld r4, HSTATE_KVM_VCPU(r13)
- /* Woken by external or decrementer interrupt */
- ld r1, HSTATE_HOST_R1(r13)
-
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
addi r3, r4, VCPU_TB_RMINTR
bl kvmhv_accumulate_time
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index f78d002f0fe0..4953957333b7 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -166,7 +166,8 @@ static irqreturn_t xive_esc_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio)
+int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
+ bool single_escalation)
{
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
struct xive_q *q = &xc->queues[prio];
@@ -185,7 +186,7 @@ static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio)
return -EIO;
}
- if (xc->xive->single_escalation)
+ if (single_escalation)
name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
vcpu->kvm->arch.lpid, xc->server_num);
else
@@ -217,7 +218,7 @@ static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio)
* interrupt, thus leaving it effectively masked after
* it fires once.
*/
- if (xc->xive->single_escalation) {
+ if (single_escalation) {
struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
@@ -291,7 +292,8 @@ static int xive_check_provisioning(struct kvm *kvm, u8 prio)
continue;
rc = xive_provision_queue(vcpu, prio);
if (rc == 0 && !xive->single_escalation)
- xive_attach_escalation(vcpu, prio);
+ kvmppc_xive_attach_escalation(vcpu, prio,
+ xive->single_escalation);
if (rc)
return rc;
}
@@ -342,7 +344,7 @@ static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
}
-static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
+int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
{
struct kvm_vcpu *vcpu;
int i, rc;
@@ -380,11 +382,6 @@ static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
return -EBUSY;
}
-static u32 xive_vp(struct kvmppc_xive *xive, u32 server)
-{
- return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
-}
-
static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
struct kvmppc_xive_src_block *sb,
struct kvmppc_xive_irq_state *state)
@@ -430,8 +427,8 @@ static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
*/
if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
xive_native_configure_irq(hw_num,
- xive_vp(xive, state->act_server),
- MASKED, state->number);
+ kvmppc_xive_vp(xive, state->act_server),
+ MASKED, state->number);
/* set old_p so we can track if an H_EOI was done */
state->old_p = true;
state->old_q = false;
@@ -486,8 +483,8 @@ static void xive_finish_unmask(struct kvmppc_xive *xive,
*/
if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
xive_native_configure_irq(hw_num,
- xive_vp(xive, state->act_server),
- state->act_priority, state->number);
+ kvmppc_xive_vp(xive, state->act_server),
+ state->act_priority, state->number);
/* If an EOI is needed, do it here */
if (!state->old_p)
xive_vm_source_eoi(hw_num, xd);
@@ -535,7 +532,7 @@ static int xive_target_interrupt(struct kvm *kvm,
* priority. The count for that new target will have
* already been incremented.
*/
- rc = xive_select_target(kvm, &server, prio);
+ rc = kvmppc_xive_select_target(kvm, &server, prio);
/*
* We failed to find a target ? Not much we can do
@@ -563,7 +560,7 @@ static int xive_target_interrupt(struct kvm *kvm,
kvmppc_xive_select_irq(state, &hw_num, NULL);
return xive_native_configure_irq(hw_num,
- xive_vp(xive, server),
+ kvmppc_xive_vp(xive, server),
prio, state->number);
}
@@ -849,7 +846,8 @@ int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
/*
* We can't update the state of a "pushed" VCPU, but that
- * shouldn't happen.
+ * shouldn't happen because the vcpu->mutex makes running a
+ * vcpu mutually exclusive with doing one_reg get/set on it.
*/
if (WARN_ON(vcpu->arch.xive_pushed))
return -EIO;
@@ -940,6 +938,13 @@ int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
/* Turn the IPI hard off */
xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
+ /*
+ * Reset ESB guest mapping. Needed when ESB pages are exposed
+ * to the guest in XIVE native mode
+ */
+ if (xive->ops && xive->ops->reset_mapped)
+ xive->ops->reset_mapped(kvm, guest_irq);
+
/* Grab info about irq */
state->pt_number = hw_irq;
state->pt_data = irq_data_get_irq_handler_data(host_data);
@@ -951,7 +956,7 @@ int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
* which is fine for a never started interrupt.
*/
xive_native_configure_irq(hw_irq,
- xive_vp(xive, state->act_server),
+ kvmppc_xive_vp(xive, state->act_server),
state->act_priority, state->number);
/*
@@ -1025,9 +1030,17 @@ int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
state->pt_number = 0;
state->pt_data = NULL;
+ /*
+ * Reset ESB guest mapping. Needed when ESB pages are exposed
+ * to the guest in XIVE native mode
+ */
+ if (xive->ops && xive->ops->reset_mapped) {
+ xive->ops->reset_mapped(kvm, guest_irq);
+ }
+
/* Reconfigure the IPI */
xive_native_configure_irq(state->ipi_number,
- xive_vp(xive, state->act_server),
+ kvmppc_xive_vp(xive, state->act_server),
state->act_priority, state->number);
/*
@@ -1049,7 +1062,7 @@ int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
}
EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
-static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
+void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
{
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
struct kvm *kvm = vcpu->kvm;
@@ -1083,14 +1096,35 @@ static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
arch_spin_unlock(&sb->lock);
}
}
+
+ /* Disable vcpu's escalation interrupt */
+ if (vcpu->arch.xive_esc_on) {
+ __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
+ XIVE_ESB_SET_PQ_01));
+ vcpu->arch.xive_esc_on = false;
+ }
+
+ /*
+ * Clear pointers to escalation interrupt ESB.
+ * This is safe because the vcpu->mutex is held, preventing
+ * any other CPU from concurrently executing a KVM_RUN ioctl.
+ */
+ vcpu->arch.xive_esc_vaddr = 0;
+ vcpu->arch.xive_esc_raddr = 0;
}
void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
{
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
- struct kvmppc_xive *xive = xc->xive;
+ struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
int i;
+ if (!kvmppc_xics_enabled(vcpu))
+ return;
+
+ if (!xc)
+ return;
+
pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
/* Ensure no interrupt is still routed to that VP */
@@ -1129,6 +1163,10 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
}
/* Free the VP */
kfree(xc);
+
+ /* Cleanup the vcpu */
+ vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
+ vcpu->arch.xive_vcpu = NULL;
}
int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
@@ -1146,7 +1184,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
}
if (xive->kvm != vcpu->kvm)
return -EPERM;
- if (vcpu->arch.irq_type)
+ if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
return -EBUSY;
if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
pr_devel("Duplicate !\n");
@@ -1166,7 +1204,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
xc->xive = xive;
xc->vcpu = vcpu;
xc->server_num = cpu;
- xc->vp_id = xive_vp(xive, cpu);
+ xc->vp_id = kvmppc_xive_vp(xive, cpu);
xc->mfrr = 0xff;
xc->valid = true;
@@ -1219,7 +1257,8 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
if (xive->qmap & (1 << i)) {
r = xive_provision_queue(vcpu, i);
if (r == 0 && !xive->single_escalation)
- xive_attach_escalation(vcpu, i);
+ kvmppc_xive_attach_escalation(
+ vcpu, i, xive->single_escalation);
if (r)
goto bail;
} else {
@@ -1234,7 +1273,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
}
/* If not done above, attach priority 0 escalation */
- r = xive_attach_escalation(vcpu, 0);
+ r = kvmppc_xive_attach_escalation(vcpu, 0, xive->single_escalation);
if (r)
goto bail;
@@ -1485,8 +1524,8 @@ static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
return 0;
}
-static struct kvmppc_xive_src_block *xive_create_src_block(struct kvmppc_xive *xive,
- int irq)
+struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
+ struct kvmppc_xive *xive, int irq)
{
struct kvm *kvm = xive->kvm;
struct kvmppc_xive_src_block *sb;
@@ -1509,6 +1548,7 @@ static struct kvmppc_xive_src_block *xive_create_src_block(struct kvmppc_xive *x
for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
+ sb->irq_state[i].eisn = 0;
sb->irq_state[i].guest_priority = MASKED;
sb->irq_state[i].saved_priority = MASKED;
sb->irq_state[i].act_priority = MASKED;
@@ -1565,7 +1605,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
sb = kvmppc_xive_find_source(xive, irq, &idx);
if (!sb) {
pr_devel("No source, creating source block...\n");
- sb = xive_create_src_block(xive, irq);
+ sb = kvmppc_xive_create_src_block(xive, irq);
if (!sb) {
pr_devel("Failed to create block...\n");
return -ENOMEM;
@@ -1789,7 +1829,7 @@ static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
xive_cleanup_irq_data(xd);
}
-static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
+void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
{
int i;
@@ -1810,16 +1850,55 @@ static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
}
}
-static void kvmppc_xive_free(struct kvm_device *dev)
+/*
+ * Called when device fd is closed. kvm->lock is held.
+ */
+static void kvmppc_xive_release(struct kvm_device *dev)
{
struct kvmppc_xive *xive = dev->private;
struct kvm *kvm = xive->kvm;
+ struct kvm_vcpu *vcpu;
int i;
+ int was_ready;
+
+ pr_devel("Releasing xive device\n");
debugfs_remove(xive->dentry);
- if (kvm)
- kvm->arch.xive = NULL;
+ /*
+ * Clearing mmu_ready temporarily while holding kvm->lock
+ * is a way of ensuring that no vcpus can enter the guest
+ * until we drop kvm->lock. Doing kick_all_cpus_sync()
+ * ensures that any vcpu executing inside the guest has
+ * exited the guest. Once kick_all_cpus_sync() has finished,
+ * we know that no vcpu can be executing the XIVE push or
+ * pull code, or executing a XICS hcall.
+ *
+ * Since this is the device release function, we know that
+ * userspace does not have any open fd referring to the
+ * device. Therefore there can not be any of the device
+ * attribute set/get functions being executed concurrently,
+ * and similarly, the connect_vcpu and set/clr_mapped
+ * functions also cannot be being executed.
+ */
+ was_ready = kvm->arch.mmu_ready;
+ kvm->arch.mmu_ready = 0;
+ kick_all_cpus_sync();
+
+ /*
+ * We should clean up the vCPU interrupt presenters first.
+ */
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ /*
+ * Take vcpu->mutex to ensure that no one_reg get/set ioctl
+ * (i.e. kvmppc_xive_[gs]et_icp) can be done concurrently.
+ */
+ mutex_lock(&vcpu->mutex);
+ kvmppc_xive_cleanup_vcpu(vcpu);
+ mutex_unlock(&vcpu->mutex);
+ }
+
+ kvm->arch.xive = NULL;
/* Mask and free interrupts */
for (i = 0; i <= xive->max_sbid; i++) {
@@ -1832,11 +1911,47 @@ static void kvmppc_xive_free(struct kvm_device *dev)
if (xive->vp_base != XIVE_INVALID_VP)
xive_native_free_vp_block(xive->vp_base);
+ kvm->arch.mmu_ready = was_ready;
+
+ /*
+ * A reference of the kvmppc_xive pointer is now kept under
+ * the xive_devices struct of the machine for reuse. It is
+ * freed when the VM is destroyed for now until we fix all the
+ * execution paths.
+ */
- kfree(xive);
kfree(dev);
}
+/*
+ * When the guest chooses the interrupt mode (XICS legacy or XIVE
+ * native), the VM will switch of KVM device. The previous device will
+ * be "released" before the new one is created.
+ *
+ * Until we are sure all execution paths are well protected, provide a
+ * fail safe (transitional) method for device destruction, in which
+ * the XIVE device pointer is recycled and not directly freed.
+ */
+struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type)
+{
+ struct kvmppc_xive **kvm_xive_device = type == KVM_DEV_TYPE_XIVE ?
+ &kvm->arch.xive_devices.native :
+ &kvm->arch.xive_devices.xics_on_xive;
+ struct kvmppc_xive *xive = *kvm_xive_device;
+
+ if (!xive) {
+ xive = kzalloc(sizeof(*xive), GFP_KERNEL);
+ *kvm_xive_device = xive;
+ } else {
+ memset(xive, 0, sizeof(*xive));
+ }
+
+ return xive;
+}
+
+/*
+ * Create a XICS device with XIVE backend. kvm->lock is held.
+ */
static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
{
struct kvmppc_xive *xive;
@@ -1845,7 +1960,7 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
pr_devel("Creating xive for partition\n");
- xive = kzalloc(sizeof(*xive), GFP_KERNEL);
+ xive = kvmppc_xive_get_device(kvm, type);
if (!xive)
return -ENOMEM;
@@ -1883,6 +1998,43 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
return 0;
}
+int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ unsigned int i;
+
+ for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
+ struct xive_q *q = &xc->queues[i];
+ u32 i0, i1, idx;
+
+ if (!q->qpage && !xc->esc_virq[i])
+ continue;
+
+ seq_printf(m, " [q%d]: ", i);
+
+ if (q->qpage) {
+ idx = q->idx;
+ i0 = be32_to_cpup(q->qpage + idx);
+ idx = (idx + 1) & q->msk;
+ i1 = be32_to_cpup(q->qpage + idx);
+ seq_printf(m, "T=%d %08x %08x...\n", q->toggle,
+ i0, i1);
+ }
+ if (xc->esc_virq[i]) {
+ struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
+ struct xive_irq_data *xd =
+ irq_data_get_irq_handler_data(d);
+ u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
+
+ seq_printf(m, "E:%c%c I(%d:%llx:%llx)",
+ (pq & XIVE_ESB_VAL_P) ? 'P' : 'p',
+ (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q',
+ xc->esc_virq[i], pq, xd->eoi_page);
+ seq_puts(m, "\n");
+ }
+ }
+ return 0;
+}
static int xive_debug_show(struct seq_file *m, void *private)
{
@@ -1908,7 +2060,6 @@ static int xive_debug_show(struct seq_file *m, void *private)
kvm_for_each_vcpu(i, vcpu, kvm) {
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
- unsigned int i;
if (!xc)
continue;
@@ -1918,33 +2069,8 @@ static int xive_debug_show(struct seq_file *m, void *private)
xc->server_num, xc->cppr, xc->hw_cppr,
xc->mfrr, xc->pending,
xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
- for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
- struct xive_q *q = &xc->queues[i];
- u32 i0, i1, idx;
-
- if (!q->qpage && !xc->esc_virq[i])
- continue;
- seq_printf(m, " [q%d]: ", i);
-
- if (q->qpage) {
- idx = q->idx;
- i0 = be32_to_cpup(q->qpage + idx);
- idx = (idx + 1) & q->msk;
- i1 = be32_to_cpup(q->qpage + idx);
- seq_printf(m, "T=%d %08x %08x... \n", q->toggle, i0, i1);
- }
- if (xc->esc_virq[i]) {
- struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
- struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
- u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
- seq_printf(m, "E:%c%c I(%d:%llx:%llx)",
- (pq & XIVE_ESB_VAL_P) ? 'P' : 'p',
- (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q',
- xc->esc_virq[i], pq, xd->eoi_page);
- seq_printf(m, "\n");
- }
- }
+ kvmppc_xive_debug_show_queues(m, vcpu);
t_rm_h_xirr += xc->stat_rm_h_xirr;
t_rm_h_ipoll += xc->stat_rm_h_ipoll;
@@ -1999,7 +2125,7 @@ struct kvm_device_ops kvm_xive_ops = {
.name = "kvm-xive",
.create = kvmppc_xive_create,
.init = kvmppc_xive_init,
- .destroy = kvmppc_xive_free,
+ .release = kvmppc_xive_release,
.set_attr = xive_set_attr,
.get_attr = xive_get_attr,
.has_attr = xive_has_attr,
diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h
index a08ae6fd4c51..426146332984 100644
--- a/arch/powerpc/kvm/book3s_xive.h
+++ b/arch/powerpc/kvm/book3s_xive.h
@@ -13,6 +13,13 @@
#include "book3s_xics.h"
/*
+ * The XIVE Interrupt source numbers are within the range 0 to
+ * KVMPPC_XICS_NR_IRQS.
+ */
+#define KVMPPC_XIVE_FIRST_IRQ 0
+#define KVMPPC_XIVE_NR_IRQS KVMPPC_XICS_NR_IRQS
+
+/*
* State for one guest irq source.
*
* For each guest source we allocate a HW interrupt in the XIVE
@@ -54,6 +61,9 @@ struct kvmppc_xive_irq_state {
bool saved_p;
bool saved_q;
u8 saved_scan_prio;
+
+ /* Xive native */
+ u32 eisn; /* Guest Effective IRQ number */
};
/* Select the "right" interrupt (IPI vs. passthrough) */
@@ -84,6 +94,11 @@ struct kvmppc_xive_src_block {
struct kvmppc_xive_irq_state irq_state[KVMPPC_XICS_IRQ_PER_ICS];
};
+struct kvmppc_xive;
+
+struct kvmppc_xive_ops {
+ int (*reset_mapped)(struct kvm *kvm, unsigned long guest_irq);
+};
struct kvmppc_xive {
struct kvm *kvm;
@@ -122,6 +137,10 @@ struct kvmppc_xive {
/* Flags */
u8 single_escalation;
+
+ struct kvmppc_xive_ops *ops;
+ struct address_space *mapping;
+ struct mutex mapping_lock;
};
#define KVMPPC_XIVE_Q_COUNT 8
@@ -198,6 +217,11 @@ static inline struct kvmppc_xive_src_block *kvmppc_xive_find_source(struct kvmpp
return xive->src_blocks[bid];
}
+static inline u32 kvmppc_xive_vp(struct kvmppc_xive *xive, u32 server)
+{
+ return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
+}
+
/*
* Mapping between guest priorities and host priorities
* is as follow.
@@ -248,5 +272,18 @@ extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
+/*
+ * Common Xive routines for XICS-over-XIVE and XIVE native
+ */
+void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu);
+int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu);
+struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
+ struct kvmppc_xive *xive, int irq);
+void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb);
+int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio);
+int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
+ bool single_escalation);
+struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type);
+
#endif /* CONFIG_KVM_XICS */
#endif /* _KVM_PPC_BOOK3S_XICS_H */
diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
new file mode 100644
index 000000000000..6a8e698c4b6e
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_xive_native.c
@@ -0,0 +1,1249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2019, IBM Corporation.
+ */
+
+#define pr_fmt(fmt) "xive-kvm: " fmt
+
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/file.h>
+#include <asm/uaccess.h>
+#include <asm/kvm_book3s.h>
+#include <asm/kvm_ppc.h>
+#include <asm/hvcall.h>
+#include <asm/xive.h>
+#include <asm/xive-regs.h>
+#include <asm/debug.h>
+#include <asm/debugfs.h>
+#include <asm/opal.h>
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "book3s_xive.h"
+
+static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset)
+{
+ u64 val;
+
+ if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
+ offset |= offset << 4;
+
+ val = in_be64(xd->eoi_mmio + offset);
+ return (u8)val;
+}
+
+static void kvmppc_xive_native_cleanup_queue(struct kvm_vcpu *vcpu, int prio)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct xive_q *q = &xc->queues[prio];
+
+ xive_native_disable_queue(xc->vp_id, q, prio);
+ if (q->qpage) {
+ put_page(virt_to_page(q->qpage));
+ q->qpage = NULL;
+ }
+}
+
+void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ int i;
+
+ if (!kvmppc_xive_enabled(vcpu))
+ return;
+
+ if (!xc)
+ return;
+
+ pr_devel("native_cleanup_vcpu(cpu=%d)\n", xc->server_num);
+
+ /* Ensure no interrupt is still routed to that VP */
+ xc->valid = false;
+ kvmppc_xive_disable_vcpu_interrupts(vcpu);
+
+ /* Disable the VP */
+ xive_native_disable_vp(xc->vp_id);
+
+ /* Free the queues & associated interrupts */
+ for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
+ /* Free the escalation irq */
+ if (xc->esc_virq[i]) {
+ free_irq(xc->esc_virq[i], vcpu);
+ irq_dispose_mapping(xc->esc_virq[i]);
+ kfree(xc->esc_virq_names[i]);
+ xc->esc_virq[i] = 0;
+ }
+
+ /* Free the queue */
+ kvmppc_xive_native_cleanup_queue(vcpu, i);
+ }
+
+ /* Free the VP */
+ kfree(xc);
+
+ /* Cleanup the vcpu */
+ vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
+ vcpu->arch.xive_vcpu = NULL;
+}
+
+int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 server_num)
+{
+ struct kvmppc_xive *xive = dev->private;
+ struct kvmppc_xive_vcpu *xc = NULL;
+ int rc;
+
+ pr_devel("native_connect_vcpu(server=%d)\n", server_num);
+
+ if (dev->ops != &kvm_xive_native_ops) {
+ pr_devel("Wrong ops !\n");
+ return -EPERM;
+ }
+ if (xive->kvm != vcpu->kvm)
+ return -EPERM;
+ if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
+ return -EBUSY;
+ if (server_num >= KVM_MAX_VCPUS) {
+ pr_devel("Out of bounds !\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&vcpu->kvm->lock);
+
+ if (kvmppc_xive_find_server(vcpu->kvm, server_num)) {
+ pr_devel("Duplicate !\n");
+ rc = -EEXIST;
+ goto bail;
+ }
+
+ xc = kzalloc(sizeof(*xc), GFP_KERNEL);
+ if (!xc) {
+ rc = -ENOMEM;
+ goto bail;
+ }
+
+ vcpu->arch.xive_vcpu = xc;
+ xc->xive = xive;
+ xc->vcpu = vcpu;
+ xc->server_num = server_num;
+
+ xc->vp_id = kvmppc_xive_vp(xive, server_num);
+ xc->valid = true;
+ vcpu->arch.irq_type = KVMPPC_IRQ_XIVE;
+
+ rc = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
+ if (rc) {
+ pr_err("Failed to get VP info from OPAL: %d\n", rc);
+ goto bail;
+ }
+
+ /*
+ * Enable the VP first as the single escalation mode will
+ * affect escalation interrupts numbering
+ */
+ rc = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
+ if (rc) {
+ pr_err("Failed to enable VP in OPAL: %d\n", rc);
+ goto bail;
+ }
+
+ /* Configure VCPU fields for use by assembly push/pull */
+ vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
+ vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
+
+ /* TODO: reset all queues to a clean state ? */
+bail:
+ mutex_unlock(&vcpu->kvm->lock);
+ if (rc)
+ kvmppc_xive_native_cleanup_vcpu(vcpu);
+
+ return rc;
+}
+
+/*
+ * Device passthrough support
+ */
+static int kvmppc_xive_native_reset_mapped(struct kvm *kvm, unsigned long irq)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+
+ if (irq >= KVMPPC_XIVE_NR_IRQS)
+ return -EINVAL;
+
+ /*
+ * Clear the ESB pages of the IRQ number being mapped (or
+ * unmapped) into the guest and let the the VM fault handler
+ * repopulate with the appropriate ESB pages (device or IC)
+ */
+ pr_debug("clearing esb pages for girq 0x%lx\n", irq);
+ mutex_lock(&xive->mapping_lock);
+ if (xive->mapping)
+ unmap_mapping_range(xive->mapping,
+ irq * (2ull << PAGE_SHIFT),
+ 2ull << PAGE_SHIFT, 1);
+ mutex_unlock(&xive->mapping_lock);
+ return 0;
+}
+
+static struct kvmppc_xive_ops kvmppc_xive_native_ops = {
+ .reset_mapped = kvmppc_xive_native_reset_mapped,
+};
+
+static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct kvm_device *dev = vma->vm_file->private_data;
+ struct kvmppc_xive *xive = dev->private;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ struct xive_irq_data *xd;
+ u32 hw_num;
+ u16 src;
+ u64 page;
+ unsigned long irq;
+ u64 page_offset;
+
+ /*
+ * Linux/KVM uses a two pages ESB setting, one for trigger and
+ * one for EOI
+ */
+ page_offset = vmf->pgoff - vma->vm_pgoff;
+ irq = page_offset / 2;
+
+ sb = kvmppc_xive_find_source(xive, irq, &src);
+ if (!sb) {
+ pr_devel("%s: source %lx not found !\n", __func__, irq);
+ return VM_FAULT_SIGBUS;
+ }
+
+ state = &sb->irq_state[src];
+ kvmppc_xive_select_irq(state, &hw_num, &xd);
+
+ arch_spin_lock(&sb->lock);
+
+ /*
+ * first/even page is for trigger
+ * second/odd page is for EOI and management.
+ */
+ page = page_offset % 2 ? xd->eoi_page : xd->trig_page;
+ arch_spin_unlock(&sb->lock);
+
+ if (WARN_ON(!page)) {
+ pr_err("%s: accessing invalid ESB page for source %lx !\n",
+ __func__, irq);
+ return VM_FAULT_SIGBUS;
+ }
+
+ vmf_insert_pfn(vma, vmf->address, page >> PAGE_SHIFT);
+ return VM_FAULT_NOPAGE;
+}
+
+static const struct vm_operations_struct xive_native_esb_vmops = {
+ .fault = xive_native_esb_fault,
+};
+
+static vm_fault_t xive_native_tima_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+
+ switch (vmf->pgoff - vma->vm_pgoff) {
+ case 0: /* HW - forbid access */
+ case 1: /* HV - forbid access */
+ return VM_FAULT_SIGBUS;
+ case 2: /* OS */
+ vmf_insert_pfn(vma, vmf->address, xive_tima_os >> PAGE_SHIFT);
+ return VM_FAULT_NOPAGE;
+ case 3: /* USER - TODO */
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+static const struct vm_operations_struct xive_native_tima_vmops = {
+ .fault = xive_native_tima_fault,
+};
+
+static int kvmppc_xive_native_mmap(struct kvm_device *dev,
+ struct vm_area_struct *vma)
+{
+ struct kvmppc_xive *xive = dev->private;
+
+ /* We only allow mappings at fixed offset for now */
+ if (vma->vm_pgoff == KVM_XIVE_TIMA_PAGE_OFFSET) {
+ if (vma_pages(vma) > 4)
+ return -EINVAL;
+ vma->vm_ops = &xive_native_tima_vmops;
+ } else if (vma->vm_pgoff == KVM_XIVE_ESB_PAGE_OFFSET) {
+ if (vma_pages(vma) > KVMPPC_XIVE_NR_IRQS * 2)
+ return -EINVAL;
+ vma->vm_ops = &xive_native_esb_vmops;
+ } else {
+ return -EINVAL;
+ }
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP;
+ vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
+
+ /*
+ * Grab the KVM device file address_space to be able to clear
+ * the ESB pages mapping when a device is passed-through into
+ * the guest.
+ */
+ xive->mapping = vma->vm_file->f_mapping;
+ return 0;
+}
+
+static int kvmppc_xive_native_set_source(struct kvmppc_xive *xive, long irq,
+ u64 addr)
+{
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u64 __user *ubufp = (u64 __user *) addr;
+ u64 val;
+ u16 idx;
+ int rc;
+
+ pr_devel("%s irq=0x%lx\n", __func__, irq);
+
+ if (irq < KVMPPC_XIVE_FIRST_IRQ || irq >= KVMPPC_XIVE_NR_IRQS)
+ return -E2BIG;
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb) {
+ pr_debug("No source, creating source block...\n");
+ sb = kvmppc_xive_create_src_block(xive, irq);
+ if (!sb) {
+ pr_err("Failed to create block...\n");
+ return -ENOMEM;
+ }
+ }
+ state = &sb->irq_state[idx];
+
+ if (get_user(val, ubufp)) {
+ pr_err("fault getting user info !\n");
+ return -EFAULT;
+ }
+
+ arch_spin_lock(&sb->lock);
+
+ /*
+ * If the source doesn't already have an IPI, allocate
+ * one and get the corresponding data
+ */
+ if (!state->ipi_number) {
+ state->ipi_number = xive_native_alloc_irq();
+ if (state->ipi_number == 0) {
+ pr_err("Failed to allocate IRQ !\n");
+ rc = -ENXIO;
+ goto unlock;
+ }
+ xive_native_populate_irq_data(state->ipi_number,
+ &state->ipi_data);
+ pr_debug("%s allocated hw_irq=0x%x for irq=0x%lx\n", __func__,
+ state->ipi_number, irq);
+ }
+
+ /* Restore LSI state */
+ if (val & KVM_XIVE_LEVEL_SENSITIVE) {
+ state->lsi = true;
+ if (val & KVM_XIVE_LEVEL_ASSERTED)
+ state->asserted = true;
+ pr_devel(" LSI ! Asserted=%d\n", state->asserted);
+ }
+
+ /* Mask IRQ to start with */
+ state->act_server = 0;
+ state->act_priority = MASKED;
+ xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
+ xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
+
+ /* Increment the number of valid sources and mark this one valid */
+ if (!state->valid)
+ xive->src_count++;
+ state->valid = true;
+
+ rc = 0;
+
+unlock:
+ arch_spin_unlock(&sb->lock);
+
+ return rc;
+}
+
+static int kvmppc_xive_native_update_source_config(struct kvmppc_xive *xive,
+ struct kvmppc_xive_src_block *sb,
+ struct kvmppc_xive_irq_state *state,
+ u32 server, u8 priority, bool masked,
+ u32 eisn)
+{
+ struct kvm *kvm = xive->kvm;
+ u32 hw_num;
+ int rc = 0;
+
+ arch_spin_lock(&sb->lock);
+
+ if (state->act_server == server && state->act_priority == priority &&
+ state->eisn == eisn)
+ goto unlock;
+
+ pr_devel("new_act_prio=%d new_act_server=%d mask=%d act_server=%d act_prio=%d\n",
+ priority, server, masked, state->act_server,
+ state->act_priority);
+
+ kvmppc_xive_select_irq(state, &hw_num, NULL);
+
+ if (priority != MASKED && !masked) {
+ rc = kvmppc_xive_select_target(kvm, &server, priority);
+ if (rc)
+ goto unlock;
+
+ state->act_priority = priority;
+ state->act_server = server;
+ state->eisn = eisn;
+
+ rc = xive_native_configure_irq(hw_num,
+ kvmppc_xive_vp(xive, server),
+ priority, eisn);
+ } else {
+ state->act_priority = MASKED;
+ state->act_server = 0;
+ state->eisn = 0;
+
+ rc = xive_native_configure_irq(hw_num, 0, MASKED, 0);
+ }
+
+unlock:
+ arch_spin_unlock(&sb->lock);
+ return rc;
+}
+
+static int kvmppc_xive_native_set_source_config(struct kvmppc_xive *xive,
+ long irq, u64 addr)
+{
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u64 __user *ubufp = (u64 __user *) addr;
+ u16 src;
+ u64 kvm_cfg;
+ u32 server;
+ u8 priority;
+ bool masked;
+ u32 eisn;
+
+ sb = kvmppc_xive_find_source(xive, irq, &src);
+ if (!sb)
+ return -ENOENT;
+
+ state = &sb->irq_state[src];
+
+ if (!state->valid)
+ return -EINVAL;
+
+ if (get_user(kvm_cfg, ubufp))
+ return -EFAULT;
+
+ pr_devel("%s irq=0x%lx cfg=%016llx\n", __func__, irq, kvm_cfg);
+
+ priority = (kvm_cfg & KVM_XIVE_SOURCE_PRIORITY_MASK) >>
+ KVM_XIVE_SOURCE_PRIORITY_SHIFT;
+ server = (kvm_cfg & KVM_XIVE_SOURCE_SERVER_MASK) >>
+ KVM_XIVE_SOURCE_SERVER_SHIFT;
+ masked = (kvm_cfg & KVM_XIVE_SOURCE_MASKED_MASK) >>
+ KVM_XIVE_SOURCE_MASKED_SHIFT;
+ eisn = (kvm_cfg & KVM_XIVE_SOURCE_EISN_MASK) >>
+ KVM_XIVE_SOURCE_EISN_SHIFT;
+
+ if (priority != xive_prio_from_guest(priority)) {
+ pr_err("invalid priority for queue %d for VCPU %d\n",
+ priority, server);
+ return -EINVAL;
+ }
+
+ return kvmppc_xive_native_update_source_config(xive, sb, state, server,
+ priority, masked, eisn);
+}
+
+static int kvmppc_xive_native_sync_source(struct kvmppc_xive *xive,
+ long irq, u64 addr)
+{
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ struct xive_irq_data *xd;
+ u32 hw_num;
+ u16 src;
+ int rc = 0;
+
+ pr_devel("%s irq=0x%lx", __func__, irq);
+
+ sb = kvmppc_xive_find_source(xive, irq, &src);
+ if (!sb)
+ return -ENOENT;
+
+ state = &sb->irq_state[src];
+
+ rc = -EINVAL;
+
+ arch_spin_lock(&sb->lock);
+
+ if (state->valid) {
+ kvmppc_xive_select_irq(state, &hw_num, &xd);
+ xive_native_sync_source(hw_num);
+ rc = 0;
+ }
+
+ arch_spin_unlock(&sb->lock);
+ return rc;
+}
+
+static int xive_native_validate_queue_size(u32 qshift)
+{
+ /*
+ * We only support 64K pages for the moment. This is also
+ * advertised in the DT property "ibm,xive-eq-sizes"
+ */
+ switch (qshift) {
+ case 0: /* EQ reset */
+ case 16:
+ return 0;
+ case 12:
+ case 21:
+ case 24:
+ default:
+ return -EINVAL;
+ }
+}
+
+static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
+ long eq_idx, u64 addr)
+{
+ struct kvm *kvm = xive->kvm;
+ struct kvm_vcpu *vcpu;
+ struct kvmppc_xive_vcpu *xc;
+ void __user *ubufp = (void __user *) addr;
+ u32 server;
+ u8 priority;
+ struct kvm_ppc_xive_eq kvm_eq;
+ int rc;
+ __be32 *qaddr = 0;
+ struct page *page;
+ struct xive_q *q;
+ gfn_t gfn;
+ unsigned long page_size;
+
+ /*
+ * Demangle priority/server tuple from the EQ identifier
+ */
+ priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >>
+ KVM_XIVE_EQ_PRIORITY_SHIFT;
+ server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >>
+ KVM_XIVE_EQ_SERVER_SHIFT;
+
+ if (copy_from_user(&kvm_eq, ubufp, sizeof(kvm_eq)))
+ return -EFAULT;
+
+ vcpu = kvmppc_xive_find_server(kvm, server);
+ if (!vcpu) {
+ pr_err("Can't find server %d\n", server);
+ return -ENOENT;
+ }
+ xc = vcpu->arch.xive_vcpu;
+
+ if (priority != xive_prio_from_guest(priority)) {
+ pr_err("Trying to restore invalid queue %d for VCPU %d\n",
+ priority, server);
+ return -EINVAL;
+ }
+ q = &xc->queues[priority];
+
+ pr_devel("%s VCPU %d priority %d fl:%x shift:%d addr:%llx g:%d idx:%d\n",
+ __func__, server, priority, kvm_eq.flags,
+ kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex);
+
+ /*
+ * sPAPR specifies a "Unconditional Notify (n) flag" for the
+ * H_INT_SET_QUEUE_CONFIG hcall which forces notification
+ * without using the coalescing mechanisms provided by the
+ * XIVE END ESBs. This is required on KVM as notification
+ * using the END ESBs is not supported.
+ */
+ if (kvm_eq.flags != KVM_XIVE_EQ_ALWAYS_NOTIFY) {
+ pr_err("invalid flags %d\n", kvm_eq.flags);
+ return -EINVAL;
+ }
+
+ rc = xive_native_validate_queue_size(kvm_eq.qshift);
+ if (rc) {
+ pr_err("invalid queue size %d\n", kvm_eq.qshift);
+ return rc;
+ }
+
+ /* reset queue and disable queueing */
+ if (!kvm_eq.qshift) {
+ q->guest_qaddr = 0;
+ q->guest_qshift = 0;
+
+ rc = xive_native_configure_queue(xc->vp_id, q, priority,
+ NULL, 0, true);
+ if (rc) {
+ pr_err("Failed to reset queue %d for VCPU %d: %d\n",
+ priority, xc->server_num, rc);
+ return rc;
+ }
+
+ if (q->qpage) {
+ put_page(virt_to_page(q->qpage));
+ q->qpage = NULL;
+ }
+
+ return 0;
+ }
+
+ if (kvm_eq.qaddr & ((1ull << kvm_eq.qshift) - 1)) {
+ pr_err("queue page is not aligned %llx/%llx\n", kvm_eq.qaddr,
+ 1ull << kvm_eq.qshift);
+ return -EINVAL;
+ }
+
+ gfn = gpa_to_gfn(kvm_eq.qaddr);
+ page = gfn_to_page(kvm, gfn);
+ if (is_error_page(page)) {
+ pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr);
+ return -EINVAL;
+ }
+
+ page_size = kvm_host_page_size(kvm, gfn);
+ if (1ull << kvm_eq.qshift > page_size) {
+ pr_warn("Incompatible host page size %lx!\n", page_size);
+ return -EINVAL;
+ }
+
+ qaddr = page_to_virt(page) + (kvm_eq.qaddr & ~PAGE_MASK);
+
+ /*
+ * Backup the queue page guest address to the mark EQ page
+ * dirty for migration.
+ */
+ q->guest_qaddr = kvm_eq.qaddr;
+ q->guest_qshift = kvm_eq.qshift;
+
+ /*
+ * Unconditional Notification is forced by default at the
+ * OPAL level because the use of END ESBs is not supported by
+ * Linux.
+ */
+ rc = xive_native_configure_queue(xc->vp_id, q, priority,
+ (__be32 *) qaddr, kvm_eq.qshift, true);
+ if (rc) {
+ pr_err("Failed to configure queue %d for VCPU %d: %d\n",
+ priority, xc->server_num, rc);
+ put_page(page);
+ return rc;
+ }
+
+ /*
+ * Only restore the queue state when needed. When doing the
+ * H_INT_SET_SOURCE_CONFIG hcall, it should not.
+ */
+ if (kvm_eq.qtoggle != 1 || kvm_eq.qindex != 0) {
+ rc = xive_native_set_queue_state(xc->vp_id, priority,
+ kvm_eq.qtoggle,
+ kvm_eq.qindex);
+ if (rc)
+ goto error;
+ }
+
+ rc = kvmppc_xive_attach_escalation(vcpu, priority,
+ xive->single_escalation);
+error:
+ if (rc)
+ kvmppc_xive_native_cleanup_queue(vcpu, priority);
+ return rc;
+}
+
+static int kvmppc_xive_native_get_queue_config(struct kvmppc_xive *xive,
+ long eq_idx, u64 addr)
+{
+ struct kvm *kvm = xive->kvm;
+ struct kvm_vcpu *vcpu;
+ struct kvmppc_xive_vcpu *xc;
+ struct xive_q *q;
+ void __user *ubufp = (u64 __user *) addr;
+ u32 server;
+ u8 priority;
+ struct kvm_ppc_xive_eq kvm_eq;
+ u64 qaddr;
+ u64 qshift;
+ u64 qeoi_page;
+ u32 escalate_irq;
+ u64 qflags;
+ int rc;
+
+ /*
+ * Demangle priority/server tuple from the EQ identifier
+ */
+ priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >>
+ KVM_XIVE_EQ_PRIORITY_SHIFT;
+ server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >>
+ KVM_XIVE_EQ_SERVER_SHIFT;
+
+ vcpu = kvmppc_xive_find_server(kvm, server);
+ if (!vcpu) {
+ pr_err("Can't find server %d\n", server);
+ return -ENOENT;
+ }
+ xc = vcpu->arch.xive_vcpu;
+
+ if (priority != xive_prio_from_guest(priority)) {
+ pr_err("invalid priority for queue %d for VCPU %d\n",
+ priority, server);
+ return -EINVAL;
+ }
+ q = &xc->queues[priority];
+
+ memset(&kvm_eq, 0, sizeof(kvm_eq));
+
+ if (!q->qpage)
+ return 0;
+
+ rc = xive_native_get_queue_info(xc->vp_id, priority, &qaddr, &qshift,
+ &qeoi_page, &escalate_irq, &qflags);
+ if (rc)
+ return rc;
+
+ kvm_eq.flags = 0;
+ if (qflags & OPAL_XIVE_EQ_ALWAYS_NOTIFY)
+ kvm_eq.flags |= KVM_XIVE_EQ_ALWAYS_NOTIFY;
+
+ kvm_eq.qshift = q->guest_qshift;
+ kvm_eq.qaddr = q->guest_qaddr;
+
+ rc = xive_native_get_queue_state(xc->vp_id, priority, &kvm_eq.qtoggle,
+ &kvm_eq.qindex);
+ if (rc)
+ return rc;
+
+ pr_devel("%s VCPU %d priority %d fl:%x shift:%d addr:%llx g:%d idx:%d\n",
+ __func__, server, priority, kvm_eq.flags,
+ kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex);
+
+ if (copy_to_user(ubufp, &kvm_eq, sizeof(kvm_eq)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static void kvmppc_xive_reset_sources(struct kvmppc_xive_src_block *sb)
+{
+ int i;
+
+ for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
+ struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
+
+ if (!state->valid)
+ continue;
+
+ if (state->act_priority == MASKED)
+ continue;
+
+ state->eisn = 0;
+ state->act_server = 0;
+ state->act_priority = MASKED;
+ xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
+ xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
+ if (state->pt_number) {
+ xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
+ xive_native_configure_irq(state->pt_number,
+ 0, MASKED, 0);
+ }
+ }
+}
+
+static int kvmppc_xive_reset(struct kvmppc_xive *xive)
+{
+ struct kvm *kvm = xive->kvm;
+ struct kvm_vcpu *vcpu;
+ unsigned int i;
+
+ pr_devel("%s\n", __func__);
+
+ mutex_lock(&kvm->lock);
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ unsigned int prio;
+
+ if (!xc)
+ continue;
+
+ kvmppc_xive_disable_vcpu_interrupts(vcpu);
+
+ for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
+
+ /* Single escalation, no queue 7 */
+ if (prio == 7 && xive->single_escalation)
+ break;
+
+ if (xc->esc_virq[prio]) {
+ free_irq(xc->esc_virq[prio], vcpu);
+ irq_dispose_mapping(xc->esc_virq[prio]);
+ kfree(xc->esc_virq_names[prio]);
+ xc->esc_virq[prio] = 0;
+ }
+
+ kvmppc_xive_native_cleanup_queue(vcpu, prio);
+ }
+ }
+
+ for (i = 0; i <= xive->max_sbid; i++) {
+ struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
+
+ if (sb) {
+ arch_spin_lock(&sb->lock);
+ kvmppc_xive_reset_sources(sb);
+ arch_spin_unlock(&sb->lock);
+ }
+ }
+
+ mutex_unlock(&kvm->lock);
+
+ return 0;
+}
+
+static void kvmppc_xive_native_sync_sources(struct kvmppc_xive_src_block *sb)
+{
+ int j;
+
+ for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
+ struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
+ struct xive_irq_data *xd;
+ u32 hw_num;
+
+ if (!state->valid)
+ continue;
+
+ /*
+ * The struct kvmppc_xive_irq_state reflects the state
+ * of the EAS configuration and not the state of the
+ * source. The source is masked setting the PQ bits to
+ * '-Q', which is what is being done before calling
+ * the KVM_DEV_XIVE_EQ_SYNC control.
+ *
+ * If a source EAS is configured, OPAL syncs the XIVE
+ * IC of the source and the XIVE IC of the previous
+ * target if any.
+ *
+ * So it should be fine ignoring MASKED sources as
+ * they have been synced already.
+ */
+ if (state->act_priority == MASKED)
+ continue;
+
+ kvmppc_xive_select_irq(state, &hw_num, &xd);
+ xive_native_sync_source(hw_num);
+ xive_native_sync_queue(hw_num);
+ }
+}
+
+static int kvmppc_xive_native_vcpu_eq_sync(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ unsigned int prio;
+
+ if (!xc)
+ return -ENOENT;
+
+ for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
+ struct xive_q *q = &xc->queues[prio];
+
+ if (!q->qpage)
+ continue;
+
+ /* Mark EQ page dirty for migration */
+ mark_page_dirty(vcpu->kvm, gpa_to_gfn(q->guest_qaddr));
+ }
+ return 0;
+}
+
+static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive)
+{
+ struct kvm *kvm = xive->kvm;
+ struct kvm_vcpu *vcpu;
+ unsigned int i;
+
+ pr_devel("%s\n", __func__);
+
+ mutex_lock(&kvm->lock);
+ for (i = 0; i <= xive->max_sbid; i++) {
+ struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
+
+ if (sb) {
+ arch_spin_lock(&sb->lock);
+ kvmppc_xive_native_sync_sources(sb);
+ arch_spin_unlock(&sb->lock);
+ }
+ }
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ kvmppc_xive_native_vcpu_eq_sync(vcpu);
+ }
+ mutex_unlock(&kvm->lock);
+
+ return 0;
+}
+
+static int kvmppc_xive_native_set_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ struct kvmppc_xive *xive = dev->private;
+
+ switch (attr->group) {
+ case KVM_DEV_XIVE_GRP_CTRL:
+ switch (attr->attr) {
+ case KVM_DEV_XIVE_RESET:
+ return kvmppc_xive_reset(xive);
+ case KVM_DEV_XIVE_EQ_SYNC:
+ return kvmppc_xive_native_eq_sync(xive);
+ }
+ break;
+ case KVM_DEV_XIVE_GRP_SOURCE:
+ return kvmppc_xive_native_set_source(xive, attr->attr,
+ attr->addr);
+ case KVM_DEV_XIVE_GRP_SOURCE_CONFIG:
+ return kvmppc_xive_native_set_source_config(xive, attr->attr,
+ attr->addr);
+ case KVM_DEV_XIVE_GRP_EQ_CONFIG:
+ return kvmppc_xive_native_set_queue_config(xive, attr->attr,
+ attr->addr);
+ case KVM_DEV_XIVE_GRP_SOURCE_SYNC:
+ return kvmppc_xive_native_sync_source(xive, attr->attr,
+ attr->addr);
+ }
+ return -ENXIO;
+}
+
+static int kvmppc_xive_native_get_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ struct kvmppc_xive *xive = dev->private;
+
+ switch (attr->group) {
+ case KVM_DEV_XIVE_GRP_EQ_CONFIG:
+ return kvmppc_xive_native_get_queue_config(xive, attr->attr,
+ attr->addr);
+ }
+ return -ENXIO;
+}
+
+static int kvmppc_xive_native_has_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_XIVE_GRP_CTRL:
+ switch (attr->attr) {
+ case KVM_DEV_XIVE_RESET:
+ case KVM_DEV_XIVE_EQ_SYNC:
+ return 0;
+ }
+ break;
+ case KVM_DEV_XIVE_GRP_SOURCE:
+ case KVM_DEV_XIVE_GRP_SOURCE_CONFIG:
+ case KVM_DEV_XIVE_GRP_SOURCE_SYNC:
+ if (attr->attr >= KVMPPC_XIVE_FIRST_IRQ &&
+ attr->attr < KVMPPC_XIVE_NR_IRQS)
+ return 0;
+ break;
+ case KVM_DEV_XIVE_GRP_EQ_CONFIG:
+ return 0;
+ }
+ return -ENXIO;
+}
+
+/*
+ * Called when device fd is closed
+ */
+static void kvmppc_xive_native_release(struct kvm_device *dev)
+{
+ struct kvmppc_xive *xive = dev->private;
+ struct kvm *kvm = xive->kvm;
+ struct kvm_vcpu *vcpu;
+ int i;
+ int was_ready;
+
+ debugfs_remove(xive->dentry);
+
+ pr_devel("Releasing xive native device\n");
+
+ /*
+ * Clearing mmu_ready temporarily while holding kvm->lock
+ * is a way of ensuring that no vcpus can enter the guest
+ * until we drop kvm->lock. Doing kick_all_cpus_sync()
+ * ensures that any vcpu executing inside the guest has
+ * exited the guest. Once kick_all_cpus_sync() has finished,
+ * we know that no vcpu can be executing the XIVE push or
+ * pull code or accessing the XIVE MMIO regions.
+ *
+ * Since this is the device release function, we know that
+ * userspace does not have any open fd or mmap referring to
+ * the device. Therefore there can not be any of the
+ * device attribute set/get, mmap, or page fault functions
+ * being executed concurrently, and similarly, the
+ * connect_vcpu and set/clr_mapped functions also cannot
+ * be being executed.
+ */
+ was_ready = kvm->arch.mmu_ready;
+ kvm->arch.mmu_ready = 0;
+ kick_all_cpus_sync();
+
+ /*
+ * We should clean up the vCPU interrupt presenters first.
+ */
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ /*
+ * Take vcpu->mutex to ensure that no one_reg get/set ioctl
+ * (i.e. kvmppc_xive_native_[gs]et_vp) can be being done.
+ */
+ mutex_lock(&vcpu->mutex);
+ kvmppc_xive_native_cleanup_vcpu(vcpu);
+ mutex_unlock(&vcpu->mutex);
+ }
+
+ kvm->arch.xive = NULL;
+
+ for (i = 0; i <= xive->max_sbid; i++) {
+ if (xive->src_blocks[i])
+ kvmppc_xive_free_sources(xive->src_blocks[i]);
+ kfree(xive->src_blocks[i]);
+ xive->src_blocks[i] = NULL;
+ }
+
+ if (xive->vp_base != XIVE_INVALID_VP)
+ xive_native_free_vp_block(xive->vp_base);
+
+ kvm->arch.mmu_ready = was_ready;
+
+ /*
+ * A reference of the kvmppc_xive pointer is now kept under
+ * the xive_devices struct of the machine for reuse. It is
+ * freed when the VM is destroyed for now until we fix all the
+ * execution paths.
+ */
+
+ kfree(dev);
+}
+
+/*
+ * Create a XIVE device. kvm->lock is held.
+ */
+static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
+{
+ struct kvmppc_xive *xive;
+ struct kvm *kvm = dev->kvm;
+ int ret = 0;
+
+ pr_devel("Creating xive native device\n");
+
+ if (kvm->arch.xive)
+ return -EEXIST;
+
+ xive = kvmppc_xive_get_device(kvm, type);
+ if (!xive)
+ return -ENOMEM;
+
+ dev->private = xive;
+ xive->dev = dev;
+ xive->kvm = kvm;
+ kvm->arch.xive = xive;
+ mutex_init(&xive->mapping_lock);
+
+ /*
+ * Allocate a bunch of VPs. KVM_MAX_VCPUS is a large value for
+ * a default. Getting the max number of CPUs the VM was
+ * configured with would improve our usage of the XIVE VP space.
+ */
+ xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS);
+ pr_devel("VP_Base=%x\n", xive->vp_base);
+
+ if (xive->vp_base == XIVE_INVALID_VP)
+ ret = -ENXIO;
+
+ xive->single_escalation = xive_native_has_single_escalation();
+ xive->ops = &kvmppc_xive_native_ops;
+
+ if (ret)
+ kfree(xive);
+
+ return ret;
+}
+
+/*
+ * Interrupt Pending Buffer (IPB) offset
+ */
+#define TM_IPB_SHIFT 40
+#define TM_IPB_MASK (((u64) 0xFF) << TM_IPB_SHIFT)
+
+int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ u64 opal_state;
+ int rc;
+
+ if (!kvmppc_xive_enabled(vcpu))
+ return -EPERM;
+
+ if (!xc)
+ return -ENOENT;
+
+ /* Thread context registers. We only care about IPB and CPPR */
+ val->xive_timaval[0] = vcpu->arch.xive_saved_state.w01;
+
+ /* Get the VP state from OPAL */
+ rc = xive_native_get_vp_state(xc->vp_id, &opal_state);
+ if (rc)
+ return rc;
+
+ /*
+ * Capture the backup of IPB register in the NVT structure and
+ * merge it in our KVM VP state.
+ */
+ val->xive_timaval[0] |= cpu_to_be64(opal_state & TM_IPB_MASK);
+
+ pr_devel("%s NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x opal=%016llx\n",
+ __func__,
+ vcpu->arch.xive_saved_state.nsr,
+ vcpu->arch.xive_saved_state.cppr,
+ vcpu->arch.xive_saved_state.ipb,
+ vcpu->arch.xive_saved_state.pipr,
+ vcpu->arch.xive_saved_state.w01,
+ (u32) vcpu->arch.xive_cam_word, opal_state);
+
+ return 0;
+}
+
+int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
+
+ pr_devel("%s w01=%016llx vp=%016llx\n", __func__,
+ val->xive_timaval[0], val->xive_timaval[1]);
+
+ if (!kvmppc_xive_enabled(vcpu))
+ return -EPERM;
+
+ if (!xc || !xive)
+ return -ENOENT;
+
+ /* We can't update the state of a "pushed" VCPU */
+ if (WARN_ON(vcpu->arch.xive_pushed))
+ return -EBUSY;
+
+ /*
+ * Restore the thread context registers. IPB and CPPR should
+ * be the only ones that matter.
+ */
+ vcpu->arch.xive_saved_state.w01 = val->xive_timaval[0];
+
+ /*
+ * There is no need to restore the XIVE internal state (IPB
+ * stored in the NVT) as the IPB register was merged in KVM VP
+ * state when captured.
+ */
+ return 0;
+}
+
+static int xive_native_debug_show(struct seq_file *m, void *private)
+{
+ struct kvmppc_xive *xive = m->private;
+ struct kvm *kvm = xive->kvm;
+ struct kvm_vcpu *vcpu;
+ unsigned int i;
+
+ if (!kvm)
+ return 0;
+
+ seq_puts(m, "=========\nVCPU state\n=========\n");
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+
+ if (!xc)
+ continue;
+
+ seq_printf(m, "cpu server %#x NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x\n",
+ xc->server_num,
+ vcpu->arch.xive_saved_state.nsr,
+ vcpu->arch.xive_saved_state.cppr,
+ vcpu->arch.xive_saved_state.ipb,
+ vcpu->arch.xive_saved_state.pipr,
+ vcpu->arch.xive_saved_state.w01,
+ (u32) vcpu->arch.xive_cam_word);
+
+ kvmppc_xive_debug_show_queues(m, vcpu);
+ }
+
+ return 0;
+}
+
+static int xive_native_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, xive_native_debug_show, inode->i_private);
+}
+
+static const struct file_operations xive_native_debug_fops = {
+ .open = xive_native_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void xive_native_debugfs_init(struct kvmppc_xive *xive)
+{
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
+ if (!name) {
+ pr_err("%s: no memory for name\n", __func__);
+ return;
+ }
+
+ xive->dentry = debugfs_create_file(name, 0444, powerpc_debugfs_root,
+ xive, &xive_native_debug_fops);
+
+ pr_debug("%s: created %s\n", __func__, name);
+ kfree(name);
+}
+
+static void kvmppc_xive_native_init(struct kvm_device *dev)
+{
+ struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
+
+ /* Register some debug interfaces */
+ xive_native_debugfs_init(xive);
+}
+
+struct kvm_device_ops kvm_xive_native_ops = {
+ .name = "kvm-xive-native",
+ .create = kvmppc_xive_native_create,
+ .init = kvmppc_xive_native_init,
+ .release = kvmppc_xive_native_release,
+ .set_attr = kvmppc_xive_native_set_attr,
+ .get_attr = kvmppc_xive_native_get_attr,
+ .has_attr = kvmppc_xive_native_has_attr,
+ .mmap = kvmppc_xive_native_mmap,
+};
+
+void kvmppc_xive_native_init_module(void)
+{
+ ;
+}
+
+void kvmppc_xive_native_exit_module(void)
+{
+ ;
+}
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
index 033363d6e764..0737acfd17f1 100644
--- a/arch/powerpc/kvm/book3s_xive_template.c
+++ b/arch/powerpc/kvm/book3s_xive_template.c
@@ -130,24 +130,14 @@ static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc,
*/
prio = ffs(pending) - 1;
- /*
- * If the most favoured prio we found pending is less
- * favored (or equal) than a pending IPI, we return
- * the IPI instead.
- *
- * Note: If pending was 0 and mfrr is 0xff, we will
- * not spurriously take an IPI because mfrr cannot
- * then be smaller than cppr.
- */
- if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
- prio = xc->mfrr;
- hirq = XICS_IPI;
- break;
- }
-
/* Don't scan past the guest cppr */
- if (prio >= xc->cppr || prio > 7)
+ if (prio >= xc->cppr || prio > 7) {
+ if (xc->mfrr < xc->cppr) {
+ prio = xc->mfrr;
+ hirq = XICS_IPI;
+ }
break;
+ }
/* Grab queue and pointers */
q = &xc->queues[prio];
@@ -184,9 +174,12 @@ skip_ipi:
* been set and another occurrence of the IPI will trigger.
*/
if (hirq == XICS_IPI || (prio == 0 && !qpage)) {
- if (scan_type == scan_fetch)
+ if (scan_type == scan_fetch) {
GLUE(X_PFX,source_eoi)(xc->vp_ipi,
&xc->vp_ipi_data);
+ q->idx = idx;
+ q->toggle = toggle;
+ }
/* Loop back on same queue with updated idx/toggle */
#ifdef XIVE_RUNTIME_CHECKS
WARN_ON(hirq && hirq != XICS_IPI);
@@ -199,32 +192,41 @@ skip_ipi:
if (hirq == XICS_DUMMY)
goto skip_ipi;
- /* If fetching, update queue pointers */
- if (scan_type == scan_fetch) {
- q->idx = idx;
- q->toggle = toggle;
- }
-
- /* Something found, stop searching */
- if (hirq)
- break;
-
- /* Clear the pending bit on the now empty queue */
- pending &= ~(1 << prio);
+ /* Clear the pending bit if the queue is now empty */
+ if (!hirq) {
+ pending &= ~(1 << prio);
- /*
- * Check if the queue count needs adjusting due to
- * interrupts being moved away.
- */
- if (atomic_read(&q->pending_count)) {
- int p = atomic_xchg(&q->pending_count, 0);
- if (p) {
+ /*
+ * Check if the queue count needs adjusting due to
+ * interrupts being moved away.
+ */
+ if (atomic_read(&q->pending_count)) {
+ int p = atomic_xchg(&q->pending_count, 0);
+ if (p) {
#ifdef XIVE_RUNTIME_CHECKS
- WARN_ON(p > atomic_read(&q->count));
+ WARN_ON(p > atomic_read(&q->count));
#endif
- atomic_sub(p, &q->count);
+ atomic_sub(p, &q->count);
+ }
}
}
+
+ /*
+ * If the most favoured prio we found pending is less
+ * favored (or equal) than a pending IPI, we return
+ * the IPI instead.
+ */
+ if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
+ prio = xc->mfrr;
+ hirq = XICS_IPI;
+ break;
+ }
+
+ /* If fetching, update queue pointers */
+ if (scan_type == scan_fetch) {
+ q->idx = idx;
+ q->toggle = toggle;
+ }
}
/* If we are just taking a "peek", do nothing else */
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index 24296f4cadc6..e0af53fd78c5 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -783,7 +783,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
if (!pages)
return -ENOMEM;
- ret = get_user_pages_fast(cfg->array, num_pages, 1, pages);
+ ret = get_user_pages_fast(cfg->array, num_pages, FOLL_WRITE, pages);
if (ret < 0)
goto free_pages;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 8885377ec3e0..3393b166817a 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -570,6 +570,16 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_PPC_GET_CPU_CHAR:
r = 1;
break;
+#ifdef CONFIG_KVM_XIVE
+ case KVM_CAP_PPC_IRQ_XIVE:
+ /*
+ * We need XIVE to be enabled on the platform (implies
+ * a POWER9 processor) and the PowerNV platform, as
+ * nested is not yet supported.
+ */
+ r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE);
+ break;
+#endif
case KVM_CAP_PPC_ALLOC_HTAB:
r = hv_enabled;
@@ -644,9 +654,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
else
r = num_online_cpus();
break;
- case KVM_CAP_NR_MEMSLOTS:
- r = KVM_USER_MEM_SLOTS;
- break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
@@ -753,6 +760,9 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
else
kvmppc_xics_free_icp(vcpu);
break;
+ case KVMPPC_IRQ_XIVE:
+ kvmppc_xive_native_cleanup_vcpu(vcpu);
+ break;
}
kvmppc_core_vcpu_free(vcpu);
@@ -1941,6 +1951,30 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
break;
}
#endif /* CONFIG_KVM_XICS */
+#ifdef CONFIG_KVM_XIVE
+ case KVM_CAP_PPC_IRQ_XIVE: {
+ struct fd f;
+ struct kvm_device *dev;
+
+ r = -EBADF;
+ f = fdget(cap->args[0]);
+ if (!f.file)
+ break;
+
+ r = -ENXIO;
+ if (!xive_enabled())
+ break;
+
+ r = -EPERM;
+ dev = kvm_device_from_filp(f.file);
+ if (dev)
+ r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
+ cap->args[1]);
+
+ fdput(f);
+ break;
+ }
+#endif /* CONFIG_KVM_XIVE */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
case KVM_CAP_PPC_FWNMI:
r = -EINVAL;
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 79396e184bca..c55f9c27bf79 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -8,9 +8,22 @@ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
-obj-y += string.o alloc.o code-patching.o feature-fixups.o
+KASAN_SANITIZE_code-patching.o := n
+KASAN_SANITIZE_feature-fixups.o := n
-obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o strlen_32.o
+ifdef CONFIG_KASAN
+CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING
+CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING
+endif
+
+obj-y += alloc.o code-patching.o feature-fixups.o
+
+ifndef CONFIG_KASAN
+obj-y += string.o memcmp_$(BITS).o
+obj-$(CONFIG_PPC32) += strlen_32.o
+endif
+
+obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
@@ -34,7 +47,7 @@ obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o \
test_emulate_step_exec_instr.o
obj-y += checksum_$(BITS).o checksum_wrappers.o \
- string_$(BITS).o memcmp_$(BITS).o
+ string_$(BITS).o
obj-y += sstep.o ldstfp.o quad.o
obj64-y += quad.o
diff --git a/arch/powerpc/lib/checksum_wrappers.c b/arch/powerpc/lib/checksum_wrappers.c
index 890d4ddd91d6..bb9307ce2440 100644
--- a/arch/powerpc/lib/checksum_wrappers.c
+++ b/arch/powerpc/lib/checksum_wrappers.c
@@ -29,6 +29,7 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst,
unsigned int csum;
might_sleep();
+ allow_read_from_user(src, len);
*err_ptr = 0;
@@ -60,6 +61,7 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst,
}
out:
+ prevent_read_from_user(src, len);
return (__force __wsum)csum;
}
EXPORT_SYMBOL(csum_and_copy_from_user);
@@ -70,6 +72,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
unsigned int csum;
might_sleep();
+ allow_write_to_user(dst, len);
*err_ptr = 0;
@@ -97,6 +100,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
}
out:
+ prevent_write_to_user(dst, len);
return (__force __wsum)csum;
}
EXPORT_SYMBOL(csum_and_copy_to_user);
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 506413a2c25e..90c9d4a1e36f 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -15,7 +15,6 @@
#include <linux/cpuhotplug.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
-#include <linux/kprobes.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
@@ -26,9 +25,9 @@
static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
unsigned int *patch_addr)
{
- int err;
+ int err = 0;
- __put_user_size(instr, patch_addr, 4, err);
+ __put_user_asm(instr, patch_addr, err, "stw");
if (err)
return err;
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index ba66846fe973..d5642481fb98 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -14,6 +14,7 @@
#include <asm/ppc_asm.h>
#include <asm/export.h>
#include <asm/code-patching-asm.h>
+#include <asm/kasan.h>
#define COPY_16_BYTES \
lwz r7,4(r4); \
@@ -68,6 +69,7 @@ CACHELINE_BYTES = L1_CACHE_BYTES
LG_CACHELINE_BYTES = L1_CACHE_SHIFT
CACHELINE_MASK = (L1_CACHE_BYTES-1)
+#ifndef CONFIG_KASAN
_GLOBAL(memset16)
rlwinm. r0 ,r5, 31, 1, 31
addi r6, r3, -4
@@ -81,6 +83,7 @@ _GLOBAL(memset16)
sth r4, 4(r6)
blr
EXPORT_SYMBOL(memset16)
+#endif
/*
* Use dcbz on the complete cache lines in the destination
@@ -91,7 +94,7 @@ EXPORT_SYMBOL(memset16)
* We therefore skip the optimised bloc that uses dcbz. This jump is
* replaced by a nop once cache is active. This is done in machine_init()
*/
-_GLOBAL(memset)
+_GLOBAL_KASAN(memset)
cmplwi 0,r5,4
blt 7f
@@ -151,6 +154,7 @@ _GLOBAL(memset)
bdnz 9b
blr
EXPORT_SYMBOL(memset)
+EXPORT_SYMBOL_KASAN(memset)
/*
* This version uses dcbz on the complete cache lines in the
@@ -163,12 +167,12 @@ EXPORT_SYMBOL(memset)
* We therefore jump to generic_memcpy which doesn't use dcbz. This jump is
* replaced by a nop once cache is active. This is done in machine_init()
*/
-_GLOBAL(memmove)
+_GLOBAL_KASAN(memmove)
cmplw 0,r3,r4
bgt backwards_memcpy
/* fall through */
-_GLOBAL(memcpy)
+_GLOBAL_KASAN(memcpy)
1: b generic_memcpy
patch_site 1b, patch__memcpy_nocache
@@ -244,6 +248,8 @@ _GLOBAL(memcpy)
65: blr
EXPORT_SYMBOL(memcpy)
EXPORT_SYMBOL(memmove)
+EXPORT_SYMBOL_KASAN(memcpy)
+EXPORT_SYMBOL_KASAN(memmove)
generic_memcpy:
srwi. r7,r5,3
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S
index 3c3be02f33b7..7f6bd031c306 100644
--- a/arch/powerpc/lib/mem_64.S
+++ b/arch/powerpc/lib/mem_64.S
@@ -12,7 +12,9 @@
#include <asm/errno.h>
#include <asm/ppc_asm.h>
#include <asm/export.h>
+#include <asm/kasan.h>
+#ifndef CONFIG_KASAN
_GLOBAL(__memset16)
rlwimi r4,r4,16,0,15
/* fall through */
@@ -29,8 +31,9 @@ _GLOBAL(__memset64)
EXPORT_SYMBOL(__memset16)
EXPORT_SYMBOL(__memset32)
EXPORT_SYMBOL(__memset64)
+#endif
-_GLOBAL(memset)
+_GLOBAL_KASAN(memset)
neg r0,r3
rlwimi r4,r4,8,16,23
andi. r0,r0,7 /* # bytes to be 8-byte aligned */
@@ -96,8 +99,9 @@ _GLOBAL(memset)
stb r4,0(r6)
blr
EXPORT_SYMBOL(memset)
+EXPORT_SYMBOL_KASAN(memset)
-_GLOBAL_TOC(memmove)
+_GLOBAL_TOC_KASAN(memmove)
cmplw 0,r3,r4
bgt backwards_memcpy
b memcpy
@@ -139,3 +143,4 @@ _GLOBAL(backwards_memcpy)
mtctr r7
b 1b
EXPORT_SYMBOL(memmove)
+EXPORT_SYMBOL_KASAN(memmove)
diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S
index 273ea67e60a1..25c3772c1dfb 100644
--- a/arch/powerpc/lib/memcpy_64.S
+++ b/arch/powerpc/lib/memcpy_64.S
@@ -11,6 +11,7 @@
#include <asm/export.h>
#include <asm/asm-compat.h>
#include <asm/feature-fixups.h>
+#include <asm/kasan.h>
#ifndef SELFTEST_CASE
/* For big-endian, 0 == most CPUs, 1 == POWER6, 2 == Cell */
@@ -18,7 +19,7 @@
#endif
.align 7
-_GLOBAL_TOC(memcpy)
+_GLOBAL_TOC_KASAN(memcpy)
BEGIN_FTR_SECTION
#ifdef __LITTLE_ENDIAN__
cmpdi cr7,r5,0
@@ -230,3 +231,4 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
blr
#endif
EXPORT_SYMBOL(memcpy)
+EXPORT_SYMBOL_KASAN(memcpy)
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 3c1bd9fa23cd..0f499db315d6 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -5,53 +5,18 @@
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-CFLAGS_REMOVE_slb.o = $(CC_FLAGS_FTRACE)
-
obj-y := fault.o mem.o pgtable.o mmap.o \
init_$(BITS).o pgtable_$(BITS).o \
+ pgtable-frag.o \
init-common.o mmu_context.o drmem.o
-obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
- tlb_nohash_low.o
-obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(BITS)e.o
-hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o
-obj-$(CONFIG_PPC_BOOK3E_64) += pgtable-book3e.o
-obj-$(CONFIG_PPC_BOOK3S_64) += pgtable-hash64.o hash_utils_64.o slb.o \
- $(hash64-y) mmu_context_book3s64.o \
- pgtable-book3s64.o pgtable-frag.o
-obj-$(CONFIG_PPC32) += pgtable-frag.o
-obj-$(CONFIG_PPC_RADIX_MMU) += pgtable-radix.o tlb-radix.o
-obj-$(CONFIG_PPC_BOOK3S_32) += ppc_mmu_32.o hash_low_32.o mmu_context_hash32.o
-obj-$(CONFIG_PPC_BOOK3S) += tlb_hash$(BITS).o
-ifdef CONFIG_PPC_BOOK3S_64
-obj-$(CONFIG_PPC_4K_PAGES) += hash64_4k.o
-obj-$(CONFIG_PPC_64K_PAGES) += hash64_64k.o
-endif
-obj-$(CONFIG_40x) += 40x_mmu.o
-obj-$(CONFIG_44x) += 44x_mmu.o
-obj-$(CONFIG_PPC_8xx) += 8xx_mmu.o
-obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke_mmu.o
+obj-$(CONFIG_PPC_MMU_NOHASH) += nohash/
+obj-$(CONFIG_PPC_BOOK3S_32) += book3s32/
+obj-$(CONFIG_PPC_BOOK3S_64) += book3s64/
obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
-obj-$(CONFIG_PPC_SPLPAR) += vphn.o
obj-$(CONFIG_PPC_MM_SLICES) += slice.o
-obj-y += hugetlbpage.o
-ifdef CONFIG_HUGETLB_PAGE
-obj-$(CONFIG_PPC_BOOK3S_64) += hugetlbpage-hash64.o
-obj-$(CONFIG_PPC_RADIX_MMU) += hugetlbpage-radix.o
-obj-$(CONFIG_PPC_BOOK3E_MMU) += hugetlbpage-book3e.o
-endif
-obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hugepage-hash64.o
-obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o
-obj-$(CONFIG_SPAPR_TCE_IOMMU) += mmu_context_iommu.o
obj-$(CONFIG_PPC_PTDUMP) += ptdump/
-obj-$(CONFIG_PPC_MEM_KEYS) += pkeys.o
-
-# Disable kcov instrumentation on sensitive code
-# This is necessary for booting with kcov enabled on book3e machines
-KCOV_INSTRUMENT_tlb_nohash.o := n
-KCOV_INSTRUMENT_fsl_booke_mmu.o := n
-
-# Instrumenting the SLB fault path can lead to duplicate SLB entries
-KCOV_INSTRUMENT_slb.o := n
+obj-$(CONFIG_KASAN) += kasan/
diff --git a/arch/powerpc/mm/book3s32/Makefile b/arch/powerpc/mm/book3s32/Makefile
new file mode 100644
index 000000000000..1732eaa740a9
--- /dev/null
+++ b/arch/powerpc/mm/book3s32/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+
+KASAN_SANITIZE_mmu.o := n
+
+ifdef CONFIG_KASAN
+CFLAGS_mmu.o += -DDISABLE_BRANCH_PROFILING
+endif
+
+obj-y += mmu.o hash_low.o mmu_context.o tlb.o
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/book3s32/hash_low.S
index a6c491f18a04..8366c2abeafc 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/book3s32/hash_low.S
@@ -309,13 +309,13 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64)
_GLOBAL(create_hpte)
/* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
- rlwinm r8,r5,32-10,31,31 /* _PAGE_RW -> PP lsb */
- rlwinm r0,r5,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
+ rlwinm r8,r5,32-9,30,30 /* _PAGE_RW -> PP msb */
+ rlwinm r0,r5,32-6,30,30 /* _PAGE_DIRTY -> PP msb */
and r8,r8,r0 /* writable if _RW & _DIRTY */
rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */
rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */
ori r8,r8,0xe04 /* clear out reserved bits */
- andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */
+ andc r8,r5,r8 /* PP = user? (rw&dirty? 1: 3): 0 */
BEGIN_FTR_SECTION
rlwinm r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
@@ -539,7 +539,8 @@ _GLOBAL(flush_hash_pages)
#ifdef CONFIG_SMP
lis r9, (mmu_hash_lock - PAGE_OFFSET)@ha
addi r9, r9, (mmu_hash_lock - PAGE_OFFSET)@l
- lwz r8,TASK_CPU(r2)
+ tophys (r8, r2)
+ lwz r8, TASK_CPU(r8)
oris r8,r8,9
10: lwarx r0,0,r9
cmpi 0,r0,0
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/book3s32/mmu.c
index f29d2f118b44..fc073cb2c517 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -34,11 +34,12 @@
#include <asm/code-patching.h>
#include <asm/sections.h>
-#include "mmu_decl.h"
+#include <mm/mmu_decl.h>
-struct hash_pte *Hash, *Hash_end;
-unsigned long Hash_size, Hash_mask;
+struct hash_pte *Hash;
+static unsigned long Hash_size, Hash_mask;
unsigned long _SDR1;
+static unsigned int hash_mb, hash_mb2;
struct ppc_bat BATS[8][2]; /* 8 pairs of IBAT, DBAT */
@@ -98,10 +99,20 @@ static int find_free_bat(void)
return -1;
}
+/*
+ * This function calculates the size of the larger block usable to map the
+ * beginning of an area based on the start address and size of that area:
+ * - max block size is 8M on 601 and 256 on other 6xx.
+ * - base address must be aligned to the block size. So the maximum block size
+ * is identified by the lowest bit set to 1 in the base address (for instance
+ * if base is 0x16000000, max size is 0x02000000).
+ * - block size has to be a power of two. This is calculated by finding the
+ * highest bit set to 1.
+ */
static unsigned int block_size(unsigned long base, unsigned long top)
{
unsigned int max_size = (cpu_has_feature(CPU_FTR_601) ? 8 : 256) << 20;
- unsigned int base_shift = (fls(base) - 1) & 31;
+ unsigned int base_shift = (ffs(base) - 1) & 31;
unsigned int block_shift = (fls(top - base) - 1) & 31;
return min3(max_size, 1U << base_shift, 1U << block_shift);
@@ -157,7 +168,7 @@ static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long to
unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
{
- int done;
+ unsigned long done;
unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
if (__map_without_bats) {
@@ -169,10 +180,10 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
return __mmu_mapin_ram(base, top);
done = __mmu_mapin_ram(base, border);
- if (done != border - base)
+ if (done != border)
return done;
- return done + __mmu_mapin_ram(border, top);
+ return __mmu_mapin_ram(border, top);
}
void mmu_mark_initmem_nx(void)
@@ -308,7 +319,6 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
*/
void __init MMU_init_hw(void)
{
- unsigned int hmask, mb, mb2;
unsigned int n_hpteg, lg_n_hpteg;
if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
@@ -345,26 +355,34 @@ void __init MMU_init_hw(void)
__func__, Hash_size, Hash_size);
_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
- Hash_end = (struct hash_pte *) ((unsigned long)Hash + Hash_size);
+ pr_info("Total memory = %lldMB; using %ldkB for hash table\n",
+ (unsigned long long)(total_memory >> 20), Hash_size >> 10);
- printk("Total memory = %lldMB; using %ldkB for hash table (at %p)\n",
- (unsigned long long)(total_memory >> 20), Hash_size >> 10, Hash);
+ Hash_mask = n_hpteg - 1;
+ hash_mb2 = hash_mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
+ if (lg_n_hpteg > 16)
+ hash_mb2 = 16 - LG_HPTEG_SIZE;
+}
+
+void __init MMU_init_hw_patch(void)
+{
+ unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
+
+ if (ppc_md.progress)
+ ppc_md.progress("hash:patch", 0x345);
+ if (ppc_md.progress)
+ ppc_md.progress("hash:done", 0x205);
+
+ /* WARNING: Make sure nothing can trigger a KASAN check past this point */
/*
* Patch up the instructions in hashtable.S:create_hpte
*/
- if ( ppc_md.progress ) ppc_md.progress("hash:patch", 0x345);
- Hash_mask = n_hpteg - 1;
- hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
- mb2 = mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
- if (lg_n_hpteg > 16)
- mb2 = 16 - LG_HPTEG_SIZE;
-
modify_instruction_site(&patch__hash_page_A0, 0xffff,
((unsigned int)Hash - PAGE_OFFSET) >> 16);
- modify_instruction_site(&patch__hash_page_A1, 0x7c0, mb << 6);
- modify_instruction_site(&patch__hash_page_A2, 0x7c0, mb2 << 6);
+ modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6);
+ modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6);
modify_instruction_site(&patch__hash_page_B, 0xffff, hmask);
modify_instruction_site(&patch__hash_page_C, 0xffff, hmask);
@@ -373,11 +391,9 @@ void __init MMU_init_hw(void)
*/
modify_instruction_site(&patch__flush_hash_A0, 0xffff,
((unsigned int)Hash - PAGE_OFFSET) >> 16);
- modify_instruction_site(&patch__flush_hash_A1, 0x7c0, mb << 6);
- modify_instruction_site(&patch__flush_hash_A2, 0x7c0, mb2 << 6);
+ modify_instruction_site(&patch__flush_hash_A1, 0x7c0, hash_mb << 6);
+ modify_instruction_site(&patch__flush_hash_A2, 0x7c0, hash_mb2 << 6);
modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask);
-
- if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205);
}
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
@@ -394,3 +410,33 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
else /* Anything else has 256M mapped */
memblock_set_current_limit(min_t(u64, first_memblock_size, 0x10000000));
}
+
+void __init print_system_hash_info(void)
+{
+ pr_info("Hash_size = 0x%lx\n", Hash_size);
+ if (Hash_mask)
+ pr_info("Hash_mask = 0x%lx\n", Hash_mask);
+}
+
+#ifdef CONFIG_PPC_KUEP
+void __init setup_kuep(bool disabled)
+{
+ pr_info("Activating Kernel Userspace Execution Prevention\n");
+
+ if (cpu_has_feature(CPU_FTR_601))
+ pr_warn("KUEP is not working on powerpc 601 (No NX bit in Seg Regs)\n");
+
+ if (disabled)
+ pr_warn("KUEP cannot be disabled yet on 6xx when compiled in\n");
+}
+#endif
+
+#ifdef CONFIG_PPC_KUAP
+void __init setup_kuap(bool disabled)
+{
+ pr_info("Activating Kernel Userspace Access Protection\n");
+
+ if (disabled)
+ pr_warn("KUAP cannot be disabled yet on 6xx when compiled in\n");
+}
+#endif
diff --git a/arch/powerpc/mm/mmu_context_hash32.c b/arch/powerpc/mm/book3s32/mmu_context.c
index 921c1e33e941..921c1e33e941 100644
--- a/arch/powerpc/mm/mmu_context_hash32.c
+++ b/arch/powerpc/mm/book3s32/mmu_context.c
diff --git a/arch/powerpc/mm/tlb_hash32.c b/arch/powerpc/mm/book3s32/tlb.c
index cf8472cf3d59..8d56f0417f87 100644
--- a/arch/powerpc/mm/tlb_hash32.c
+++ b/arch/powerpc/mm/book3s32/tlb.c
@@ -32,7 +32,7 @@
#include <asm/tlbflush.h>
#include <asm/tlb.h>
-#include "mmu_decl.h"
+#include <mm/mmu_decl.h>
/*
* Called when unmapping pages to flush entries from the TLB/hash table.
diff --git a/arch/powerpc/mm/book3s64/Makefile b/arch/powerpc/mm/book3s64/Makefile
new file mode 100644
index 000000000000..974b4fc19f4f
--- /dev/null
+++ b/arch/powerpc/mm/book3s64/Makefile
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y := $(NO_MINIMAL_TOC)
+
+CFLAGS_REMOVE_slb.o = $(CC_FLAGS_FTRACE)
+
+obj-y += hash_pgtable.o hash_utils.o slb.o \
+ mmu_context.o pgtable.o hash_tlb.o
+obj-$(CONFIG_PPC_NATIVE) += hash_native.o
+obj-$(CONFIG_PPC_RADIX_MMU) += radix_pgtable.o radix_tlb.o
+obj-$(CONFIG_PPC_4K_PAGES) += hash_4k.o
+obj-$(CONFIG_PPC_64K_PAGES) += hash_64k.o
+obj-$(CONFIG_PPC_SPLPAR) += vphn.o
+obj-$(CONFIG_HUGETLB_PAGE) += hash_hugetlbpage.o
+ifdef CONFIG_HUGETLB_PAGE
+obj-$(CONFIG_PPC_RADIX_MMU) += radix_hugetlbpage.o
+endif
+obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hash_hugepage.o
+obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage_prot.o
+obj-$(CONFIG_SPAPR_TCE_IOMMU) += iommu_api.o
+obj-$(CONFIG_PPC_MEM_KEYS) += pkeys.o
+
+# Instrumenting the SLB fault path can lead to duplicate SLB entries
+KCOV_INSTRUMENT_slb.o := n
diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/book3s64/hash_4k.c
index 6fa6765a10eb..22e787123cdf 100644
--- a/arch/powerpc/mm/hash64_4k.c
+++ b/arch/powerpc/mm/book3s64/hash_4k.c
@@ -1,6 +1,6 @@
/*
* Copyright IBM Corporation, 2015
- * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU Lesser General Public License
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/book3s64/hash_64k.c
index 3afa253d7f52..7084ce2951e6 100644
--- a/arch/powerpc/mm/hash64_64k.c
+++ b/arch/powerpc/mm/book3s64/hash_64k.c
@@ -1,6 +1,6 @@
/*
* Copyright IBM Corporation, 2015
- * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU Lesser General Public License
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/book3s64/hash_hugepage.c
index dfbc3b32f09b..440823797de7 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/book3s64/hash_hugepage.c
@@ -1,6 +1,6 @@
/*
* Copyright IBM Corporation, 2013
- * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2.1 of the GNU Lesser General Public License
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/book3s64/hash_hugetlbpage.c
index b0d9209d9a86..eefa89c6117b 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/book3s64/hash_hugetlbpage.c
@@ -15,6 +15,9 @@
#include <asm/cacheflush.h>
#include <asm/machdep.h>
+unsigned int hpage_shift;
+EXPORT_SYMBOL(hpage_shift);
+
extern long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
unsigned long pa, unsigned long rlags,
unsigned long vflags, int psize, int ssize);
@@ -34,7 +37,8 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
/* Search the Linux page table for a match with va */
vpn = hpt_vpn(ea, vsid, ssize);
- /* At this point, we have a pte (old_pte) which can be used to build
+ /*
+ * At this point, we have a pte (old_pte) which can be used to build
* or update an HPTE. There are 2 cases:
*
* 1. There is a valid (present) pte with no associated HPTE (this is
@@ -55,8 +59,10 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
if (unlikely(!check_pte_access(access, old_pte)))
return 1;
- /* Try to lock the PTE, add ACCESSED and DIRTY if it was
- * a write access */
+ /*
+ * Try to lock the PTE, add ACCESSED and DIRTY if it was
+ * a write access
+ */
new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED;
if (access & _PAGE_WRITE)
new_pte |= _PAGE_DIRTY;
@@ -74,8 +80,10 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
rpte = __real_pte(__pte(old_pte), ptep, offset);
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
- /* No CPU has hugepages but lacks no execute, so we
- * don't need to worry about that case */
+ /*
+ * No CPU has hugepages but lacks no execute, so we
+ * don't need to worry about that case
+ */
rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
/* Check if pte already has an hpte (case 2) */
@@ -145,3 +153,16 @@ void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr
old_pte, pte);
set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
}
+
+void hugetlbpage_init_default(void)
+{
+ /* Set default large page size. Currently, we pick 16M or 1M
+ * depending on what is available
+ */
+ if (mmu_psize_defs[MMU_PAGE_16M].shift)
+ hpage_shift = mmu_psize_defs[MMU_PAGE_16M].shift;
+ else if (mmu_psize_defs[MMU_PAGE_1M].shift)
+ hpage_shift = mmu_psize_defs[MMU_PAGE_1M].shift;
+ else if (mmu_psize_defs[MMU_PAGE_2M].shift)
+ hpage_shift = mmu_psize_defs[MMU_PAGE_2M].shift;
+}
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/book3s64/hash_native.c
index aaa28fd918fe..aaa28fd918fe 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/book3s64/hash_native.c
diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/book3s64/hash_pgtable.c
index c08d49046a96..1fd025dba4a3 100644
--- a/arch/powerpc/mm/pgtable-hash64.c
+++ b/arch/powerpc/mm/book3s64/hash_pgtable.c
@@ -19,7 +19,7 @@
#include <asm/mmu.h>
#include <asm/tlb.h>
-#include "mmu_decl.h"
+#include <mm/mmu_decl.h>
#define CREATE_TRACE_POINTS
#include <trace/events/thp.h>
@@ -112,9 +112,16 @@ int __meminit hash__vmemmap_create_mapping(unsigned long start,
unsigned long page_size,
unsigned long phys)
{
- int rc = htab_bolt_mapping(start, start + page_size, phys,
- pgprot_val(PAGE_KERNEL),
- mmu_vmemmap_psize, mmu_kernel_ssize);
+ int rc;
+
+ if ((start + page_size) >= H_VMEMMAP_END) {
+ pr_warn("Outside the supported range\n");
+ return -1;
+ }
+
+ rc = htab_bolt_mapping(start, start + page_size, phys,
+ pgprot_val(PAGE_KERNEL),
+ mmu_vmemmap_psize, mmu_kernel_ssize);
if (rc < 0) {
int rc2 = htab_remove_mapping(start, start + page_size,
mmu_vmemmap_psize,
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/book3s64/hash_tlb.c
index 87d71dd25441..d4f0101447b1 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/book3s64/hash_tlb.c
@@ -55,7 +55,8 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
i = batch->index;
- /* Get page size (maybe move back to caller).
+ /*
+ * Get page size (maybe move back to caller).
*
* NOTE: when using special 64K mappings in 4K environment like
* for SPEs, we obtain the page size from the slice, which thus
@@ -77,10 +78,12 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
#endif
} else {
psize = pte_pagesize_index(mm, addr, pte);
- /* Mask the address for the standard page size. If we
+ /*
+ * Mask the address for the standard page size. If we
* have a 64k page kernel, but the hardware does not
* support 64k pages, this might be different from the
- * hardware page size encoded in the slice table. */
+ * hardware page size encoded in the slice table.
+ */
addr &= PAGE_MASK;
offset = PTRS_PER_PTE;
}
@@ -161,7 +164,8 @@ void hash__tlb_flush(struct mmu_gather *tlb)
{
struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch);
- /* If there's a TLB batch pending, then we must flush it because the
+ /*
+ * If there's a TLB batch pending, then we must flush it because the
* pages are going to be freed and we really don't want to have a CPU
* access a freed page because it has a stale TLB
*/
@@ -201,7 +205,8 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
BUG_ON(!mm->pgd);
- /* Note: Normally, we should only ever use a batch within a
+ /*
+ * Note: Normally, we should only ever use a batch within a
* PTE locked section. This violates the rule, but will work
* since we don't actually modify the PTEs, we just flush the
* hash while leaving the PTEs intact (including their reference
@@ -238,7 +243,8 @@ void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr)
unsigned long flags;
addr = _ALIGN_DOWN(addr, PMD_SIZE);
- /* Note: Normally, we should only ever use a batch within a
+ /*
+ * Note: Normally, we should only ever use a batch within a
* PTE locked section. This violates the rule, but will work
* since we don't actually modify the PTEs, we just flush the
* hash while leaving the PTEs intact (including their reference
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 0a4f939a8161..919a861a8ec0 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -37,6 +37,7 @@
#include <linux/context_tracking.h>
#include <linux/libfdt.h>
#include <linux/pkeys.h>
+#include <linux/hugetlb.h>
#include <asm/debugfs.h>
#include <asm/processor.h>
@@ -65,6 +66,8 @@
#include <asm/pte-walk.h>
#include <asm/asm-prototypes.h>
+#include <mm/mmu_decl.h>
+
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
#else
@@ -128,7 +131,8 @@ static DEFINE_SPINLOCK(linear_map_hash_lock);
struct mmu_hash_ops mmu_hash_ops;
EXPORT_SYMBOL(mmu_hash_ops);
-/* There are definitions of page sizes arrays to be used when none
+/*
+ * These are definitions of page sizes arrays to be used when none
* is provided by the firmware.
*/
@@ -145,7 +149,8 @@ static struct mmu_psize_def mmu_psize_defaults[] = {
},
};
-/* POWER4, GPUL, POWER5
+/*
+ * POWER4, GPUL, POWER5
*
* Support for 16Mb large pages
*/
@@ -479,7 +484,8 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
}
#ifdef CONFIG_HUGETLB_PAGE
-/* Scan for 16G memory blocks that have been set aside for huge pages
+/*
+ * Scan for 16G memory blocks that have been set aside for huge pages
* and reserve those blocks for 16G huge pages.
*/
static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
@@ -496,8 +502,10 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
if (type == NULL || strcmp(type, "memory") != 0)
return 0;
- /* This property is the log base 2 of the number of virtual pages that
- * will represent this memory block. */
+ /*
+ * This property is the log base 2 of the number of virtual pages that
+ * will represent this memory block.
+ */
page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
if (page_count_prop == NULL)
return 0;
@@ -673,7 +681,8 @@ static void __init htab_init_page_sizes(void)
#endif /* CONFIG_PPC_64K_PAGES */
#ifdef CONFIG_SPARSEMEM_VMEMMAP
- /* We try to use 16M pages for vmemmap if that is supported
+ /*
+ * We try to use 16M pages for vmemmap if that is supported
* and we have at least 1G of RAM at boot
*/
if (mmu_psize_defs[MMU_PAGE_16M].shift &&
@@ -742,7 +751,8 @@ unsigned htab_shift_for_mem_size(unsigned long mem_size)
static unsigned long __init htab_get_table_size(void)
{
- /* If hash size isn't already provided by the platform, we try to
+ /*
+ * If hash size isn't already provided by the platform, we try to
* retrieve it from the device-tree. If it's not there neither, we
* calculate it now based on the total RAM size
*/
@@ -755,12 +765,12 @@ static unsigned long __init htab_get_table_size(void)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-void resize_hpt_for_hotplug(unsigned long new_mem_size)
+int resize_hpt_for_hotplug(unsigned long new_mem_size)
{
unsigned target_hpt_shift;
if (!mmu_hash_ops.resize_hpt)
- return;
+ return 0;
target_hpt_shift = htab_shift_for_mem_size(new_mem_size);
@@ -772,23 +782,25 @@ void resize_hpt_for_hotplug(unsigned long new_mem_size)
* reduce unless the target shift is at least 2 below the
* current shift
*/
- if ((target_hpt_shift > ppc64_pft_size)
- || (target_hpt_shift < (ppc64_pft_size - 1))) {
- int rc;
-
- rc = mmu_hash_ops.resize_hpt(target_hpt_shift);
- if (rc && (rc != -ENODEV))
- printk(KERN_WARNING
- "Unable to resize hash page table to target order %d: %d\n",
- target_hpt_shift, rc);
- }
+ if (target_hpt_shift > ppc64_pft_size ||
+ target_hpt_shift < ppc64_pft_size - 1)
+ return mmu_hash_ops.resize_hpt(target_hpt_shift);
+
+ return 0;
}
int hash__create_section_mapping(unsigned long start, unsigned long end, int nid)
{
- int rc = htab_bolt_mapping(start, end, __pa(start),
- pgprot_val(PAGE_KERNEL), mmu_linear_psize,
- mmu_kernel_ssize);
+ int rc;
+
+ if (end >= H_VMALLOC_START) {
+ pr_warn("Outside the supported range\n");
+ return -1;
+ }
+
+ rc = htab_bolt_mapping(start, end, __pa(start),
+ pgprot_val(PAGE_KERNEL), mmu_linear_psize,
+ mmu_kernel_ssize);
if (rc < 0) {
int rc2 = htab_remove_mapping(start, end, mmu_linear_psize,
@@ -929,6 +941,11 @@ static void __init htab_initialize(void)
DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
base, size, prot);
+ if ((base + size) >= H_VMALLOC_START) {
+ pr_warn("Outside the supported range\n");
+ continue;
+ }
+
BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
prot, mmu_linear_psize, mmu_kernel_ssize));
}
@@ -968,6 +985,7 @@ void __init hash__early_init_devtree(void)
htab_scan_page_sizes();
}
+struct hash_mm_context init_hash_mm_context;
void __init hash__early_init_mmu(void)
{
#ifndef CONFIG_PPC_64K_PAGES
@@ -1013,11 +1031,11 @@ void __init hash__early_init_mmu(void)
__pgd_val_bits = HASH_PGD_VAL_BITS;
__kernel_virt_start = H_KERN_VIRT_START;
- __kernel_virt_size = H_KERN_VIRT_SIZE;
__vmalloc_start = H_VMALLOC_START;
__vmalloc_end = H_VMALLOC_END;
__kernel_io_start = H_KERN_IO_START;
- vmemmap = (struct page *)H_VMEMMAP_BASE;
+ __kernel_io_end = H_KERN_IO_END;
+ vmemmap = (struct page *)H_VMEMMAP_START;
ioremap_bot = IOREMAP_BASE;
#ifdef CONFIG_PCI
@@ -1035,12 +1053,16 @@ void __init hash__early_init_mmu(void)
if (!mmu_hash_ops.hpte_insert)
panic("hash__early_init_mmu: No MMU hash ops defined!\n");
- /* Initialize the MMU Hash table and create the linear mapping
+ /*
+ * Initialize the MMU Hash table and create the linear mapping
* of memory. Has to be done before SLB initialization as this is
* currently where the page size encoding is obtained.
*/
htab_initialize();
+ init_mm.context.hash_context = &init_hash_mm_context;
+ mm_ctx_set_slb_addr_limit(&init_mm.context, SLB_ADDR_LIMIT_DEFAULT);
+
pr_info("Initializing hash mmu with SLB\n");
/* Initialize SLB management */
slb_initialize();
@@ -1147,10 +1169,13 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
*/
static int subpage_protection(struct mm_struct *mm, unsigned long ea)
{
- struct subpage_prot_table *spt = &mm->context.spt;
+ struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context);
u32 spp = 0;
u32 **sbpm, *sbpp;
+ if (!spt)
+ return 0;
+
if (ea >= spt->maxaddr)
return 0;
if (ea < 0x100000000UL) {
@@ -1214,7 +1239,8 @@ static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
}
}
-/* Result code is:
+/*
+ * Result code is:
* 0 - handled
* 1 - normal page fault
* -1 - critical hash insertion error
@@ -1238,7 +1264,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
trace_hash_fault(ea, access, trap);
/* Get region & vsid */
- switch (REGION_ID(ea)) {
+ switch (get_region_id(ea)) {
case USER_REGION_ID:
user_region = 1;
if (! mm) {
@@ -1252,15 +1278,19 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
break;
case VMALLOC_REGION_ID:
vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
- if (ea < VMALLOC_END)
- psize = mmu_vmalloc_psize;
- else
- psize = mmu_io_psize;
+ psize = mmu_vmalloc_psize;
+ ssize = mmu_kernel_ssize;
+ break;
+
+ case IO_REGION_ID:
+ vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
+ psize = mmu_io_psize;
ssize = mmu_kernel_ssize;
break;
default:
- /* Not a valid range
- * Send the problem up to do_page_fault
+ /*
+ * Not a valid range
+ * Send the problem up to do_page_fault()
*/
rc = 1;
goto bail;
@@ -1285,7 +1315,8 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
flags |= HPTE_LOCAL_UPDATE;
#ifndef CONFIG_PPC_64K_PAGES
- /* If we use 4K pages and our psize is not 4K, then we might
+ /*
+ * If we use 4K pages and our psize is not 4K, then we might
* be hitting a special driver mapping, and need to align the
* address before we fetch the PTE.
*
@@ -1307,7 +1338,8 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
/* Add _PAGE_PRESENT to the required access perm */
access |= _PAGE_PRESENT;
- /* Pre-check access permissions (will be re-checked atomically
+ /*
+ * Pre-check access permissions (will be re-checked atomically
* in __hash_page_XX but this pre-check is a fast path
*/
if (!check_pte_access(access, pte_val(*ptep))) {
@@ -1354,7 +1386,8 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
psize = MMU_PAGE_4K;
}
- /* If this PTE is non-cacheable and we have restrictions on
+ /*
+ * If this PTE is non-cacheable and we have restrictions on
* using non cacheable large pages, then we switch to 4k
*/
if (mmu_ci_restrictions && psize == MMU_PAGE_64K && pte_ci(*ptep)) {
@@ -1395,7 +1428,8 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
flags, ssize, spp);
}
- /* Dump some info in case of hash insertion failure, they should
+ /*
+ * Dump some info in case of hash insertion failure, they should
* never happen so it is really useful to know if/when they do
*/
if (rc == -1)
@@ -1421,7 +1455,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
unsigned long flags = 0;
struct mm_struct *mm = current->mm;
- if (REGION_ID(ea) == VMALLOC_REGION_ID)
+ if ((get_region_id(ea) == VMALLOC_REGION_ID) ||
+ (get_region_id(ea) == IO_REGION_ID))
mm = &init_mm;
if (dsisr & DSISR_NOHPTE)
@@ -1437,8 +1472,9 @@ int __hash_page(unsigned long ea, unsigned long msr, unsigned long trap,
unsigned long access = _PAGE_PRESENT | _PAGE_READ;
unsigned long flags = 0;
struct mm_struct *mm = current->mm;
+ unsigned int region_id = get_region_id(ea);
- if (REGION_ID(ea) == VMALLOC_REGION_ID)
+ if ((region_id == VMALLOC_REGION_ID) || (region_id == IO_REGION_ID))
mm = &init_mm;
if (dsisr & DSISR_NOHPTE)
@@ -1455,7 +1491,7 @@ int __hash_page(unsigned long ea, unsigned long msr, unsigned long trap,
* 2) user space access kernel space.
*/
access |= _PAGE_PRIVILEGED;
- if ((msr & MSR_PR) || (REGION_ID(ea) == USER_REGION_ID))
+ if ((msr & MSR_PR) || (region_id == USER_REGION_ID))
access &= ~_PAGE_PRIVILEGED;
if (trap == 0x400)
@@ -1470,7 +1506,7 @@ static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
int psize = get_slice_psize(mm, ea);
/* We only prefault standard pages for now */
- if (unlikely(psize != mm->context.user_psize))
+ if (unlikely(psize != mm_ctx_user_psize(&mm->context)))
return false;
/*
@@ -1499,7 +1535,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
int rc, ssize, update_flags = 0;
unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0);
- BUG_ON(REGION_ID(ea) != USER_REGION_ID);
+ BUG_ON(get_region_id(ea) != USER_REGION_ID);
if (!should_hash_preload(mm, ea))
return;
@@ -1549,7 +1585,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
/* Hash it in */
#ifdef CONFIG_PPC_64K_PAGES
- if (mm->context.user_psize == MMU_PAGE_64K)
+ if (mm_ctx_user_psize(&mm->context) == MMU_PAGE_64K)
rc = __hash_page_64K(ea, access, vsid, ptep, trap,
update_flags, ssize);
else
@@ -1562,8 +1598,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
*/
if (rc == -1)
hash_failure_debug(ea, access, vsid, trap, ssize,
- mm->context.user_psize,
- mm->context.user_psize,
+ mm_ctx_user_psize(&mm->context),
+ mm_ctx_user_psize(&mm->context),
pte_val(*ptep));
out_exit:
local_irq_restore(flags);
@@ -1634,7 +1670,8 @@ unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift,
return gslot;
}
-/* WARNING: This is called from hash_low_64.S, if you change this prototype,
+/*
+ * WARNING: This is called from hash_low_64.S, if you change this prototype,
* do not forget to update the assembly call site !
*/
void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
@@ -1855,7 +1892,8 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
{
- /* We don't currently support the first MEMBLOCK not mapping 0
+ /*
+ * We don't currently support the first MEMBLOCK not mapping 0
* physical on those processors
*/
BUG_ON(first_memblock_base != 0);
@@ -1909,3 +1947,14 @@ static int __init hash64_debugfs(void)
}
machine_device_initcall(pseries, hash64_debugfs);
#endif /* CONFIG_DEBUG_FS */
+
+void __init print_system_hash_info(void)
+{
+ pr_info("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
+
+ if (htab_hash_mask)
+ pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask);
+ pr_info("kernel vmalloc start = 0x%lx\n", KERN_VIRT_START);
+ pr_info("kernel IO start = 0x%lx\n", KERN_IO_START);
+ pr_info("kernel vmemmap start = 0x%lx\n", (unsigned long)vmemmap);
+}
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/book3s64/iommu_api.c
index 8330f135294f..5c521f3924a5 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/book3s64/iommu_api.c
@@ -141,8 +141,9 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
for (entry = 0; entry < entries; entry += chunk) {
unsigned long n = min(entries - entry, chunk);
- ret = get_user_pages_longterm(ua + (entry << PAGE_SHIFT), n,
- FOLL_WRITE, mem->hpages + entry, NULL);
+ ret = get_user_pages(ua + (entry << PAGE_SHIFT), n,
+ FOLL_WRITE | FOLL_LONGTERM,
+ mem->hpages + entry, NULL);
if (ret == n) {
pinned += n;
continue;
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/book3s64/mmu_context.c
index f720c5cc0b5e..cb2b08635508 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/book3s64/mmu_context.c
@@ -63,6 +63,13 @@ static int hash__init_new_context(struct mm_struct *mm)
if (index < 0)
return index;
+ mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context),
+ GFP_KERNEL);
+ if (!mm->context.hash_context) {
+ ida_free(&mmu_context_ida, index);
+ return -ENOMEM;
+ }
+
/*
* The old code would re-promote on fork, we don't do that when using
* slices as it could cause problem promoting slices that have been
@@ -77,10 +84,26 @@ static int hash__init_new_context(struct mm_struct *mm)
* We should not be calling init_new_context() on init_mm. Hence a
* check against 0 is OK.
*/
- if (mm->context.id == 0)
+ if (mm->context.id == 0) {
+ memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context));
slice_init_new_context_exec(mm);
+ } else {
+ /* This is fork. Copy hash_context details from current->mm */
+ memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context));
+#ifdef CONFIG_PPC_SUBPAGE_PROT
+ /* inherit subpage prot detalis if we have one. */
+ if (current->mm->context.hash_context->spt) {
+ mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table),
+ GFP_KERNEL);
+ if (!mm->context.hash_context->spt) {
+ ida_free(&mmu_context_ida, index);
+ kfree(mm->context.hash_context);
+ return -ENOMEM;
+ }
+ }
+#endif
- subpage_prot_init_new_context(mm);
+ }
pkey_mm_init(mm);
return index;
@@ -118,6 +141,7 @@ static int radix__init_new_context(struct mm_struct *mm)
asm volatile("ptesync;isync" : : : "memory");
mm->context.npu_context = NULL;
+ mm->context.hash_context = NULL;
return index;
}
@@ -162,6 +186,7 @@ static void destroy_contexts(mm_context_t *ctx)
if (context_id)
ida_free(&mmu_context_ida, context_id);
}
+ kfree(ctx->hash_context);
}
static void pmd_frag_destroy(void *pmd_frag)
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/book3s64/pgtable.c
index a4341aba0af4..16bda049187a 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -17,7 +17,7 @@
#include <asm/trace.h>
#include <asm/powernv.h>
-#include "mmu_decl.h"
+#include <mm/mmu_decl.h>
#include <trace/events/thp.h>
unsigned long __pmd_frag_nr;
diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c
index 587807763737..ae7fca40e5b3 100644
--- a/arch/powerpc/mm/pkeys.c
+++ b/arch/powerpc/mm/book3s64/pkeys.c
@@ -7,6 +7,7 @@
#include <asm/mman.h>
#include <asm/mmu_context.h>
+#include <asm/mmu.h>
#include <asm/setup.h>
#include <linux/pkeys.h>
#include <linux/of_device.h>
diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
index cab06331c0c0..cab06331c0c0 100644
--- a/arch/powerpc/mm/hugetlbpage-radix.c
+++ b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 154472a28c77..c9bcf428dd2b 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -29,6 +29,7 @@
#include <asm/powernv.h>
#include <asm/sections.h>
#include <asm/trace.h>
+#include <asm/uaccess.h>
#include <trace/events/thp.h>
@@ -135,6 +136,10 @@ static int __map_kernel_page(unsigned long ea, unsigned long pa,
*/
BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
+#ifdef CONFIG_PPC_64K_PAGES
+ BUILD_BUG_ON(RADIX_KERN_MAP_SIZE != (1UL << MAX_EA_BITS_PER_CONTEXT));
+#endif
+
if (unlikely(!slab_is_available()))
return early_map_kernel_page(ea, pa, flags, map_page_size,
nid, region_start, region_end);
@@ -334,6 +339,12 @@ void __init radix_init_pgtable(void)
* page tables will be allocated within the range. No
* need or a node (which we don't have yet).
*/
+
+ if ((reg->base + reg->size) >= RADIX_VMALLOC_START) {
+ pr_warn("Outside the supported range\n");
+ continue;
+ }
+
WARN_ON(create_physical_mapping(reg->base,
reg->base + reg->size,
-1));
@@ -531,8 +542,15 @@ static void radix_init_amor(void)
mtspr(SPRN_AMOR, (3ul << 62));
}
-static void radix_init_iamr(void)
+#ifdef CONFIG_PPC_KUEP
+void setup_kuep(bool disabled)
{
+ if (disabled || !early_radix_enabled())
+ return;
+
+ if (smp_processor_id() == boot_cpuid)
+ pr_info("Activating Kernel Userspace Execution Prevention\n");
+
/*
* Radix always uses key0 of the IAMR to determine if an access is
* allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
@@ -540,6 +558,25 @@ static void radix_init_iamr(void)
*/
mtspr(SPRN_IAMR, (1ul << 62));
}
+#endif
+
+#ifdef CONFIG_PPC_KUAP
+void setup_kuap(bool disabled)
+{
+ if (disabled || !early_radix_enabled())
+ return;
+
+ if (smp_processor_id() == boot_cpuid) {
+ pr_info("Activating Kernel Userspace Access Prevention\n");
+ cur_cpu_spec->mmu_features |= MMU_FTR_RADIX_KUAP;
+ }
+
+ /* Make sure userspace can't change the AMR */
+ mtspr(SPRN_UAMOR, 0);
+ mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
+ isync();
+}
+#endif
void __init radix__early_init_mmu(void)
{
@@ -574,11 +611,11 @@ void __init radix__early_init_mmu(void)
__pgd_val_bits = RADIX_PGD_VAL_BITS;
__kernel_virt_start = RADIX_KERN_VIRT_START;
- __kernel_virt_size = RADIX_KERN_VIRT_SIZE;
__vmalloc_start = RADIX_VMALLOC_START;
__vmalloc_end = RADIX_VMALLOC_END;
__kernel_io_start = RADIX_KERN_IO_START;
- vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
+ __kernel_io_end = RADIX_KERN_IO_END;
+ vmemmap = (struct page *)RADIX_VMEMMAP_START;
ioremap_bot = IOREMAP_BASE;
#ifdef CONFIG_PCI
@@ -601,7 +638,6 @@ void __init radix__early_init_mmu(void)
memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
- radix_init_iamr();
radix_init_pgtable();
/* Switch to the guard PID before turning on MMU */
radix__switch_mmu_context(NULL, &init_mm);
@@ -623,7 +659,6 @@ void radix__early_init_mmu_secondary(void)
__pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
radix_init_amor();
}
- radix_init_iamr();
radix__switch_mmu_context(NULL, &init_mm);
if (cpu_has_feature(CPU_FTR_HVMODE))
@@ -646,7 +681,8 @@ void radix__mmu_cleanup_all(void)
void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
{
- /* We don't currently support the first MEMBLOCK not mapping 0
+ /*
+ * We don't currently support the first MEMBLOCK not mapping 0
* physical on those processors
*/
BUG_ON(first_memblock_base != 0);
@@ -866,6 +902,11 @@ static void __meminit remove_pagetable(unsigned long start, unsigned long end)
int __meminit radix__create_section_mapping(unsigned long start, unsigned long end, int nid)
{
+ if (end >= RADIX_VMALLOC_START) {
+ pr_warn("Outside the supported range\n");
+ return -1;
+ }
+
return create_physical_mapping(start, end, nid);
}
@@ -893,6 +934,11 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start,
int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
int ret;
+ if ((start + page_size) >= RADIX_VMEMMAP_END) {
+ pr_warn("Outside the supported range\n");
+ return -1;
+ }
+
ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid);
BUG_ON(ret);
@@ -958,45 +1004,44 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre
void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable)
{
- struct list_head *lh = (struct list_head *) pgtable;
+ struct list_head *lh = (struct list_head *) pgtable;
- assert_spin_locked(pmd_lockptr(mm, pmdp));
+ assert_spin_locked(pmd_lockptr(mm, pmdp));
- /* FIFO */
- if (!pmd_huge_pte(mm, pmdp))
- INIT_LIST_HEAD(lh);
- else
- list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
- pmd_huge_pte(mm, pmdp) = pgtable;
+ /* FIFO */
+ if (!pmd_huge_pte(mm, pmdp))
+ INIT_LIST_HEAD(lh);
+ else
+ list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
+ pmd_huge_pte(mm, pmdp) = pgtable;
}
pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
{
- pte_t *ptep;
- pgtable_t pgtable;
- struct list_head *lh;
-
- assert_spin_locked(pmd_lockptr(mm, pmdp));
-
- /* FIFO */
- pgtable = pmd_huge_pte(mm, pmdp);
- lh = (struct list_head *) pgtable;
- if (list_empty(lh))
- pmd_huge_pte(mm, pmdp) = NULL;
- else {
- pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
- list_del(lh);
- }
- ptep = (pte_t *) pgtable;
- *ptep = __pte(0);
- ptep++;
- *ptep = __pte(0);
- return pgtable;
-}
+ pte_t *ptep;
+ pgtable_t pgtable;
+ struct list_head *lh;
+ assert_spin_locked(pmd_lockptr(mm, pmdp));
+
+ /* FIFO */
+ pgtable = pmd_huge_pte(mm, pmdp);
+ lh = (struct list_head *) pgtable;
+ if (list_empty(lh))
+ pmd_huge_pte(mm, pmdp) = NULL;
+ else {
+ pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
+ list_del(lh);
+ }
+ ptep = (pte_t *) pgtable;
+ *ptep = __pte(0);
+ ptep++;
+ *ptep = __pte(0);
+ return pgtable;
+}
pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pmd_t *pmdp)
+ unsigned long addr, pmd_t *pmdp)
{
pmd_t old_pmd;
unsigned long old;
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/book3s64/radix_tlb.c
index 6a23b9ebd2a1..4d841369399f 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -90,7 +90,7 @@ void radix__tlbiel_all(unsigned int action)
asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
}
-static inline void __tlbiel_pid(unsigned long pid, int set,
+static __always_inline void __tlbiel_pid(unsigned long pid, int set,
unsigned long ric)
{
unsigned long rb,rs,prs,r;
@@ -106,7 +106,7 @@ static inline void __tlbiel_pid(unsigned long pid, int set,
trace_tlbie(0, 1, rb, rs, ric, prs, r);
}
-static inline void __tlbie_pid(unsigned long pid, unsigned long ric)
+static __always_inline void __tlbie_pid(unsigned long pid, unsigned long ric)
{
unsigned long rb,rs,prs,r;
@@ -120,7 +120,7 @@ static inline void __tlbie_pid(unsigned long pid, unsigned long ric)
trace_tlbie(0, 0, rb, rs, ric, prs, r);
}
-static inline void __tlbiel_lpid(unsigned long lpid, int set,
+static __always_inline void __tlbiel_lpid(unsigned long lpid, int set,
unsigned long ric)
{
unsigned long rb,rs,prs,r;
@@ -136,7 +136,7 @@ static inline void __tlbiel_lpid(unsigned long lpid, int set,
trace_tlbie(lpid, 1, rb, rs, ric, prs, r);
}
-static inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
+static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
{
unsigned long rb,rs,prs,r;
@@ -928,7 +928,7 @@ void radix__tlb_flush(struct mmu_gather *tlb)
tlb->need_flush_all = 0;
}
-static inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
+static __always_inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
unsigned long start, unsigned long end,
int psize, bool also_pwc)
{
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/book3s64/slb.c
index 5986df48359b..c22742218bd3 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/book3s64/slb.c
@@ -554,7 +554,8 @@ void slb_initialize(void)
asm volatile("isync; slbia; isync":::"memory");
create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX);
- /* For the boot cpu, we're running on the stack in init_thread_union,
+ /*
+ * For the boot cpu, we're running on the stack in init_thread_union,
* which is in the first segment of the linear mapping, and also
* get_paca()->kstack hasn't been initialized yet.
* For secondary cpus, we need to bolt the kernel stack entry now.
@@ -691,10 +692,10 @@ static long slb_allocate_kernel(unsigned long ea, unsigned long id)
unsigned long flags;
int ssize;
- if (id == KERNEL_REGION_ID) {
+ if (id == LINEAR_MAP_REGION_ID) {
/* We only support upto MAX_PHYSMEM_BITS */
- if ((ea & ~REGION_MASK) > (1UL << MAX_PHYSMEM_BITS))
+ if ((ea & EA_MASK) > (1UL << MAX_PHYSMEM_BITS))
return -EFAULT;
flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp;
@@ -702,20 +703,25 @@ static long slb_allocate_kernel(unsigned long ea, unsigned long id)
#ifdef CONFIG_SPARSEMEM_VMEMMAP
} else if (id == VMEMMAP_REGION_ID) {
- if ((ea & ~REGION_MASK) >= (1ULL << MAX_EA_BITS_PER_CONTEXT))
+ if (ea >= H_VMEMMAP_END)
return -EFAULT;
flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmemmap_psize].sllp;
#endif
} else if (id == VMALLOC_REGION_ID) {
- if ((ea & ~REGION_MASK) >= (1ULL << MAX_EA_BITS_PER_CONTEXT))
+ if (ea >= H_VMALLOC_END)
return -EFAULT;
- if (ea < H_VMALLOC_END)
- flags = local_paca->vmalloc_sllp;
- else
- flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp;
+ flags = local_paca->vmalloc_sllp;
+
+ } else if (id == IO_REGION_ID) {
+
+ if (ea >= H_KERN_IO_END)
+ return -EFAULT;
+
+ flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp;
+
} else {
return -EFAULT;
}
@@ -725,6 +731,7 @@ static long slb_allocate_kernel(unsigned long ea, unsigned long id)
ssize = MMU_SEGSIZE_256M;
context = get_kernel_context(ea);
+
return slb_insert_entry(ea, context, flags, ssize, true);
}
@@ -739,7 +746,7 @@ static long slb_allocate_user(struct mm_struct *mm, unsigned long ea)
* consider this as bad access if we take a SLB miss
* on an address above addr limit.
*/
- if (ea >= mm->context.slb_addr_limit)
+ if (ea >= mm_ctx_slb_addr_limit(&mm->context))
return -EFAULT;
context = get_user_context(&mm->context, ea);
@@ -761,7 +768,7 @@ static long slb_allocate_user(struct mm_struct *mm, unsigned long ea)
long do_slb_fault(struct pt_regs *regs, unsigned long ea)
{
- unsigned long id = REGION_ID(ea);
+ unsigned long id = get_region_id(ea);
/* IRQs are not reconciled here, so can't check irqs_disabled */
VM_WARN_ON(mfmsr() & MSR_EE);
@@ -784,7 +791,7 @@ long do_slb_fault(struct pt_regs *regs, unsigned long ea)
* first class kernel code. But for performance it's probably nicer
* if they go via fast_exception_return too.
*/
- if (id >= KERNEL_REGION_ID) {
+ if (id >= LINEAR_MAP_REGION_ID) {
long err;
#ifdef CONFIG_DEBUG_VM
/* Catch recursive kernel SLB faults. */
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/book3s64/subpage_prot.c
index 5e4178790dee..473dd430e306 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/book3s64/subpage_prot.c
@@ -25,10 +25,13 @@
*/
void subpage_prot_free(struct mm_struct *mm)
{
- struct subpage_prot_table *spt = &mm->context.spt;
+ struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context);
unsigned long i, j, addr;
u32 **p;
+ if (!spt)
+ return;
+
for (i = 0; i < 4; ++i) {
if (spt->low_prot[i]) {
free_page((unsigned long)spt->low_prot[i]);
@@ -48,13 +51,7 @@ void subpage_prot_free(struct mm_struct *mm)
free_page((unsigned long)p);
}
spt->maxaddr = 0;
-}
-
-void subpage_prot_init_new_context(struct mm_struct *mm)
-{
- struct subpage_prot_table *spt = &mm->context.spt;
-
- memset(spt, 0, sizeof(*spt));
+ kfree(spt);
}
static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
@@ -93,13 +90,18 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
static void subpage_prot_clear(unsigned long addr, unsigned long len)
{
struct mm_struct *mm = current->mm;
- struct subpage_prot_table *spt = &mm->context.spt;
+ struct subpage_prot_table *spt;
u32 **spm, *spp;
unsigned long i;
size_t nw;
unsigned long next, limit;
down_write(&mm->mmap_sem);
+
+ spt = mm_ctx_subpage_prot(&mm->context);
+ if (!spt)
+ goto err_out;
+
limit = addr + len;
if (limit > spt->maxaddr)
limit = spt->maxaddr;
@@ -127,6 +129,8 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
/* now flush any existing HPTEs for the range */
hpte_flush_range(mm, addr, nw);
}
+
+err_out:
up_write(&mm->mmap_sem);
}
@@ -189,7 +193,7 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
unsigned long, len, u32 __user *, map)
{
struct mm_struct *mm = current->mm;
- struct subpage_prot_table *spt = &mm->context.spt;
+ struct subpage_prot_table *spt;
u32 **spm, *spp;
unsigned long i;
size_t nw;
@@ -218,6 +222,21 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
return -EFAULT;
down_write(&mm->mmap_sem);
+
+ spt = mm_ctx_subpage_prot(&mm->context);
+ if (!spt) {
+ /*
+ * Allocate subpage prot table if not already done.
+ * Do this with mmap_sem held
+ */
+ spt = kzalloc(sizeof(struct subpage_prot_table), GFP_KERNEL);
+ if (!spt) {
+ err = -ENOMEM;
+ goto out;
+ }
+ mm->context.hash_context->spt = spt;
+ }
+
subpage_mark_vma_nohuge(mm, addr, len);
for (limit = addr + len; addr < limit; addr = next) {
next = pmd_addr_end(addr, limit);
diff --git a/arch/powerpc/mm/vphn.c b/arch/powerpc/mm/book3s64/vphn.c
index f83044faac23..0ee7734afb50 100644
--- a/arch/powerpc/mm/vphn.c
+++ b/arch/powerpc/mm/book3s64/vphn.c
@@ -42,7 +42,8 @@ int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
u16 new = be16_to_cpup(field++);
if (is_32bit) {
- /* Let's concatenate the 16 bits of this field to the
+ /*
+ * Let's concatenate the 16 bits of this field to the
* 15 lower bits of the previous field
*/
unpacked[++nr_assoc_doms] =
@@ -56,7 +57,8 @@ int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
unpacked[++nr_assoc_doms] =
cpu_to_be32(new & VPHN_FIELD_MASK);
} else {
- /* Data is in the lower 15 bits of this field
+ /*
+ * Data is in the lower 15 bits of this field
* concatenated with the next 16 bit field
*/
last = new;
diff --git a/arch/powerpc/mm/vphn.h b/arch/powerpc/mm/book3s64/vphn.h
index f9ffdb3942fc..f0b93c2dd578 100644
--- a/arch/powerpc/mm/vphn.h
+++ b/arch/powerpc/mm/book3s64/vphn.h
@@ -2,8 +2,7 @@
#ifndef _ARCH_POWERPC_MM_VPHN_H_
#define _ARCH_POWERPC_MM_VPHN_H_
-/* The H_HOME_NODE_ASSOCIATIVITY h_call returns 6 64-bit registers.
- */
+/* The H_HOME_NODE_ASSOCIATIVITY h_call returns 6 64-bit registers. */
#define VPHN_REGISTER_COUNT 6
/*
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index c8da352e8686..f137286740cb 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -105,7 +105,7 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
u64 vsid, vsidkey;
int psize, ssize;
- switch (REGION_ID(ea)) {
+ switch (get_region_id(ea)) {
case USER_REGION_ID:
pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
if (mm == NULL)
@@ -117,16 +117,20 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
break;
case VMALLOC_REGION_ID:
pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__, ea);
- if (ea < VMALLOC_END)
- psize = mmu_vmalloc_psize;
- else
- psize = mmu_io_psize;
+ psize = mmu_vmalloc_psize;
ssize = mmu_kernel_ssize;
vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
vsidkey = SLB_VSID_KERNEL;
break;
- case KERNEL_REGION_ID:
- pr_devel("%s: 0x%llx -- KERNEL_REGION_ID\n", __func__, ea);
+ case IO_REGION_ID:
+ pr_devel("%s: 0x%llx -- IO_REGION_ID\n", __func__, ea);
+ psize = mmu_io_psize;
+ ssize = mmu_kernel_ssize;
+ vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
+ vsidkey = SLB_VSID_KERNEL;
+ break;
+ case LINEAR_MAP_REGION_ID:
+ pr_devel("%s: 0x%llx -- LINEAR_MAP_REGION_ID\n", __func__, ea);
psize = mmu_linear_psize;
ssize = mmu_kernel_ssize;
vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index b5d2658c26af..2f6154b76328 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -36,7 +36,7 @@
#include <asm/tlbflush.h>
#include <asm/dma.h>
-#include "mmu_decl.h"
+#include <mm/mmu_decl.h>
/*
* This address range defaults to a value that is safe for all
diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c
index 3f1803672c9b..641891df2046 100644
--- a/arch/powerpc/mm/drmem.c
+++ b/arch/powerpc/mm/drmem.c
@@ -366,8 +366,10 @@ static void __init init_drmem_v1_lmbs(const __be32 *prop)
if (!drmem_info->lmbs)
return;
- for_each_drmem_lmb(lmb)
+ for_each_drmem_lmb(lmb) {
read_drconf_v1_cell(lmb, &prop);
+ lmb_set_nid(lmb);
+ }
}
static void __init init_drmem_v2_lmbs(const __be32 *prop)
@@ -412,6 +414,8 @@ static void __init init_drmem_v2_lmbs(const __be32 *prop)
lmb->aa_index = dr_cell.aa_index;
lmb->flags = dr_cell.flags;
+
+ lmb_set_nid(lmb);
}
}
}
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 887f11bcf330..b5d3578d9f65 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -44,6 +44,7 @@
#include <asm/mmu_context.h>
#include <asm/siginfo.h>
#include <asm/debug.h>
+#include <asm/kup.h>
static inline bool notify_page_fault(struct pt_regs *regs)
{
@@ -223,19 +224,46 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr,
}
/* Is this a bad kernel fault ? */
-static bool bad_kernel_fault(bool is_exec, unsigned long error_code,
- unsigned long address)
+static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address, bool is_write)
{
+ int is_exec = TRAP(regs) == 0x400;
+
/* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */
if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT |
DSISR_PROTFAULT))) {
- printk_ratelimited(KERN_CRIT "kernel tried to execute"
- " exec-protected page (%lx) -"
- "exploit attempt? (uid: %d)\n",
- address, from_kuid(&init_user_ns,
- current_uid()));
+ pr_crit_ratelimited("kernel tried to execute %s page (%lx) - exploit attempt? (uid: %d)\n",
+ address >= TASK_SIZE ? "exec-protected" : "user",
+ address,
+ from_kuid(&init_user_ns, current_uid()));
+
+ // Kernel exec fault is always bad
+ return true;
}
- return is_exec || (address >= TASK_SIZE);
+
+ if (!is_exec && address < TASK_SIZE && (error_code & DSISR_PROTFAULT) &&
+ !search_exception_tables(regs->nip)) {
+ pr_crit_ratelimited("Kernel attempted to access user page (%lx) - exploit attempt? (uid: %d)\n",
+ address,
+ from_kuid(&init_user_ns, current_uid()));
+ }
+
+ // Kernel fault on kernel address is bad
+ if (address >= TASK_SIZE)
+ return true;
+
+ // Fault on user outside of certain regions (eg. copy_tofrom_user()) is bad
+ if (!search_exception_tables(regs->nip))
+ return true;
+
+ // Read/write fault in a valid region (the exception table search passed
+ // above), but blocked by KUAP is bad, it can never succeed.
+ if (bad_kuap_fault(regs, is_write))
+ return true;
+
+ // What's left? Kernel fault on user in well defined regions (extable
+ // matched), and allowed by KUAP in the faulting context.
+ return false;
}
static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
@@ -455,9 +483,10 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
/*
* The kernel should never take an execute fault nor should it
- * take a page fault to a kernel address.
+ * take a page fault to a kernel address or a page fault to a user
+ * address outside of dedicated places
*/
- if (unlikely(!is_user && bad_kernel_fault(is_exec, error_code, address)))
+ if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write)))
return SIGSEGV;
/*
diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c
index 82a0e37557a5..320c1672b2ae 100644
--- a/arch/powerpc/mm/highmem.c
+++ b/arch/powerpc/mm/highmem.c
@@ -43,9 +43,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-#ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(*(kmap_pte-idx)));
-#endif
+ WARN_ON(IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !pte_none(*(kmap_pte - idx)));
__set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
local_flush_tlb_page(NULL, vaddr);
@@ -56,7 +54,6 @@ EXPORT_SYMBOL(kmap_atomic_prot);
void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- int type __maybe_unused;
if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
pagefault_enable();
@@ -64,14 +61,12 @@ void __kunmap_atomic(void *kvaddr)
return;
}
- type = kmap_atomic_idx();
-
-#ifdef CONFIG_DEBUG_HIGHMEM
- {
+ if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM)) {
+ int type = kmap_atomic_idx();
unsigned int idx;
idx = type + KM_TYPE_NR * smp_processor_id();
- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+ WARN_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
/*
* force other mappings to Oops if they'll try to access
@@ -80,7 +75,6 @@ void __kunmap_atomic(void *kvaddr)
pte_clear(&init_mm, vaddr, kmap_pte-idx);
local_flush_tlb_page(NULL, vaddr);
}
-#endif
kmap_atomic_idx_pop();
pagefault_enable();
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 9e732bb2c84a..b5d92dc32844 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -26,20 +26,8 @@
#include <asm/hugetlb.h>
#include <asm/pte-walk.h>
-
-#ifdef CONFIG_HUGETLB_PAGE
-
-#define PAGE_SHIFT_64K 16
-#define PAGE_SHIFT_512K 19
-#define PAGE_SHIFT_8M 23
-#define PAGE_SHIFT_16M 24
-#define PAGE_SHIFT_16G 34
-
bool hugetlb_disabled = false;
-unsigned int HPAGE_SHIFT;
-EXPORT_SYMBOL(HPAGE_SHIFT);
-
#define hugepd_none(hpd) (hpd_val(hpd) == 0)
#define PTE_T_ORDER (__builtin_ffs(sizeof(pte_t)) - __builtin_ffs(sizeof(void *)))
@@ -98,19 +86,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
for (i = 0; i < num_hugepd; i++, hpdp++) {
if (unlikely(!hugepd_none(*hpdp)))
break;
- else {
-#ifdef CONFIG_PPC_BOOK3S_64
- *hpdp = __hugepd(__pa(new) | HUGEPD_VAL_BITS |
- (shift_to_mmu_psize(pshift) << 2));
-#elif defined(CONFIG_PPC_8xx)
- *hpdp = __hugepd(__pa(new) | _PMD_USER |
- (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
- _PMD_PAGE_512K) | _PMD_PRESENT);
-#else
- /* We use the old format for PPC_FSL_BOOK3E */
- *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
-#endif
- }
+ hugepd_populate(hpdp, new, pshift);
}
/* If we bailed from the for loop early, an error occurred, clean up */
if (i < num_hugepd) {
@@ -250,7 +226,7 @@ int __init alloc_bootmem_huge_page(struct hstate *h)
return __alloc_bootmem_huge_page(h);
}
-#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
+#ifndef CONFIG_PPC_BOOK3S_64
#define HUGEPD_FREELIST_SIZE \
((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
@@ -542,23 +518,6 @@ static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
return (__boundary - 1 < end - 1) ? __boundary : end;
}
-int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned pdshift,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- pte_t *ptep;
- unsigned long sz = 1UL << hugepd_shift(hugepd);
- unsigned long next;
-
- ptep = hugepte_offset(hugepd, addr, pdshift);
- do {
- next = hugepte_addr_end(addr, end, sz);
- if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
- return 0;
- } while (ptep++, addr = next, addr != end);
-
- return 1;
-}
-
#ifdef CONFIG_PPC_MM_SLICES
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
@@ -578,24 +537,15 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
{
-#ifdef CONFIG_PPC_MM_SLICES
/* With radix we don't use slice, so derive it from vma*/
- if (!radix_enabled()) {
+ if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled()) {
unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
return 1UL << mmu_psize_to_shift(psize);
}
-#endif
return vma_kernel_pagesize(vma);
}
-static inline bool is_power_of_4(unsigned long x)
-{
- if (is_power_of_2(x))
- return (__ilog2(x) % 2) ? false : true;
- return false;
-}
-
static int __init add_huge_page_size(unsigned long long size)
{
int shift = __ffs(size);
@@ -603,37 +553,13 @@ static int __init add_huge_page_size(unsigned long long size)
/* Check that it is a page size supported by the hardware and
* that it fits within pagetable and slice limits. */
- if (size <= PAGE_SIZE)
- return -EINVAL;
-#if defined(CONFIG_PPC_FSL_BOOK3E)
- if (!is_power_of_4(size))
+ if (size <= PAGE_SIZE || !is_power_of_2(size))
return -EINVAL;
-#elif !defined(CONFIG_PPC_8xx)
- if (!is_power_of_2(size) || (shift > SLICE_HIGH_SHIFT))
- return -EINVAL;
-#endif
- if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
+ mmu_psize = check_and_get_huge_psize(shift);
+ if (mmu_psize < 0)
return -EINVAL;
-#ifdef CONFIG_PPC_BOOK3S_64
- /*
- * We need to make sure that for different page sizes reported by
- * firmware we only add hugetlb support for page sizes that can be
- * supported by linux page table layout.
- * For now we have
- * Radix: 2M and 1G
- * Hash: 16M and 16G
- */
- if (radix_enabled()) {
- if (mmu_psize != MMU_PAGE_2M && mmu_psize != MMU_PAGE_1G)
- return -EINVAL;
- } else {
- if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G)
- return -EINVAL;
- }
-#endif
-
BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
/* Return if huge page size has already been setup */
@@ -669,10 +595,10 @@ static int __init hugetlbpage_init(void)
return 0;
}
-#if !defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_PPC_8xx)
- if (!radix_enabled() && !mmu_has_feature(MMU_FTR_16M_PAGE))
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled() &&
+ !mmu_has_feature(MMU_FTR_16M_PAGE))
return -ENODEV;
-#endif
+
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
unsigned shift;
unsigned pdshift;
@@ -710,29 +636,13 @@ static int __init hugetlbpage_init(void)
pgtable_cache_add(PTE_INDEX_SIZE);
else if (pdshift > shift)
pgtable_cache_add(pdshift - shift);
-#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
- else
+ else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || IS_ENABLED(CONFIG_PPC_8xx))
pgtable_cache_add(PTE_T_ORDER);
-#endif
}
-#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
- /* Default hpage size = 4M on FSL_BOOK3E and 512k on 8xx */
- if (mmu_psize_defs[MMU_PAGE_4M].shift)
- HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
- else if (mmu_psize_defs[MMU_PAGE_512K].shift)
- HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_512K].shift;
-#else
- /* Set default large page size. Currently, we pick 16M or 1M
- * depending on what is available
- */
- if (mmu_psize_defs[MMU_PAGE_16M].shift)
- HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
- else if (mmu_psize_defs[MMU_PAGE_1M].shift)
- HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
- else if (mmu_psize_defs[MMU_PAGE_2M].shift)
- HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift;
-#endif
+ if (IS_ENABLED(CONFIG_HUGETLB_PAGE_SIZE_VARIABLE))
+ hugetlbpage_init_default();
+
return 0;
}
@@ -756,113 +666,8 @@ void flush_dcache_icache_hugepage(struct page *page)
}
}
-#endif /* CONFIG_HUGETLB_PAGE */
-
-/*
- * We have 4 cases for pgds and pmds:
- * (1) invalid (all zeroes)
- * (2) pointer to next table, as normal; bottom 6 bits == 0
- * (3) leaf pte for huge page _PAGE_PTE set
- * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
- *
- * So long as we atomically load page table pointers we are safe against teardown,
- * we can follow the address down to the the page and take a ref on it.
- * This function need to be called with interrupts disabled. We use this variant
- * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
- */
-pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
- bool *is_thp, unsigned *hpage_shift)
-{
- pgd_t pgd, *pgdp;
- pud_t pud, *pudp;
- pmd_t pmd, *pmdp;
- pte_t *ret_pte;
- hugepd_t *hpdp = NULL;
- unsigned pdshift = PGDIR_SHIFT;
-
- if (hpage_shift)
- *hpage_shift = 0;
-
- if (is_thp)
- *is_thp = false;
-
- pgdp = pgdir + pgd_index(ea);
- pgd = READ_ONCE(*pgdp);
- /*
- * Always operate on the local stack value. This make sure the
- * value don't get updated by a parallel THP split/collapse,
- * page fault or a page unmap. The return pte_t * is still not
- * stable. So should be checked there for above conditions.
- */
- if (pgd_none(pgd))
- return NULL;
- else if (pgd_huge(pgd)) {
- ret_pte = (pte_t *) pgdp;
- goto out;
- } else if (is_hugepd(__hugepd(pgd_val(pgd))))
- hpdp = (hugepd_t *)&pgd;
- else {
- /*
- * Even if we end up with an unmap, the pgtable will not
- * be freed, because we do an rcu free and here we are
- * irq disabled
- */
- pdshift = PUD_SHIFT;
- pudp = pud_offset(&pgd, ea);
- pud = READ_ONCE(*pudp);
-
- if (pud_none(pud))
- return NULL;
- else if (pud_huge(pud)) {
- ret_pte = (pte_t *) pudp;
- goto out;
- } else if (is_hugepd(__hugepd(pud_val(pud))))
- hpdp = (hugepd_t *)&pud;
- else {
- pdshift = PMD_SHIFT;
- pmdp = pmd_offset(&pud, ea);
- pmd = READ_ONCE(*pmdp);
- /*
- * A hugepage collapse is captured by pmd_none, because
- * it mark the pmd none and do a hpte invalidate.
- */
- if (pmd_none(pmd))
- return NULL;
-
- if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
- if (is_thp)
- *is_thp = true;
- ret_pte = (pte_t *) pmdp;
- goto out;
- }
- /*
- * pmd_large check below will handle the swap pmd pte
- * we need to do both the check because they are config
- * dependent.
- */
- if (pmd_huge(pmd) || pmd_large(pmd)) {
- ret_pte = (pte_t *) pmdp;
- goto out;
- } else if (is_hugepd(__hugepd(pmd_val(pmd))))
- hpdp = (hugepd_t *)&pmd;
- else
- return pte_offset_kernel(&pmd, ea);
- }
- }
- if (!hpdp)
- return NULL;
-
- ret_pte = hugepte_offset(*hpdp, ea, pdshift);
- pdshift = hugepd_shift(*hpdp);
-out:
- if (hpage_shift)
- *hpage_shift = pdshift;
- return ret_pte;
-}
-EXPORT_SYMBOL_GPL(__find_linux_pte);
-
-int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
+static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
{
unsigned long pte_end;
struct page *head, *page;
@@ -908,3 +713,20 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
return 1;
}
+
+int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned int pdshift,
+ unsigned long end, int write, struct page **pages, int *nr)
+{
+ pte_t *ptep;
+ unsigned long sz = 1UL << hugepd_shift(hugepd);
+ unsigned long next;
+
+ ptep = hugepte_offset(hugepd, addr, pdshift);
+ do {
+ next = hugepte_addr_end(addr, end, sz);
+ if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
+ return 0;
+ } while (ptep++, addr = next, addr != end);
+
+ return 1;
+}
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
index 1e6910eb70ed..3bcae9e5e954 100644
--- a/arch/powerpc/mm/init-common.c
+++ b/arch/powerpc/mm/init-common.c
@@ -24,6 +24,32 @@
#include <linux/string.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
+#include <asm/kup.h>
+
+static bool disable_kuep = !IS_ENABLED(CONFIG_PPC_KUEP);
+static bool disable_kuap = !IS_ENABLED(CONFIG_PPC_KUAP);
+
+static int __init parse_nosmep(char *p)
+{
+ disable_kuep = true;
+ pr_warn("Disabling Kernel Userspace Execution Prevention\n");
+ return 0;
+}
+early_param("nosmep", parse_nosmep);
+
+static int __init parse_nosmap(char *p)
+{
+ disable_kuap = true;
+ pr_warn("Disabling Kernel Userspace Access Protection\n");
+ return 0;
+}
+early_param("nosmap", parse_nosmap);
+
+void __ref setup_kup(void)
+{
+ setup_kuep(disable_kuep);
+ setup_kuap(disable_kuap);
+}
#define CTOR(shift) static void ctor_##shift(void *addr) \
{ \
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 41a3513cadc9..c3121b6c8cbd 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -45,8 +45,10 @@
#include <asm/tlb.h>
#include <asm/sections.h>
#include <asm/hugetlb.h>
+#include <asm/kup.h>
+#include <asm/kasan.h>
-#include "mmu_decl.h"
+#include <mm/mmu_decl.h>
#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
/* The amount of lowmem must be within 0xF0000000 - KERNELBASE. */
@@ -178,6 +180,10 @@ void __init MMU_init(void)
btext_unmap();
#endif
+ kasan_mmu_init();
+
+ setup_kup();
+
/* Shortly after that, the entire linear mapping will be available */
memblock_set_current_limit(lowmem_end_addr);
}
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index a4c155af1597..45b02fa11cd8 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -66,7 +66,7 @@
#include <asm/iommu.h>
#include <asm/vdso.h>
-#include "mmu_decl.h"
+#include <mm/mmu_decl.h>
phys_addr_t memstart_addr = ~0;
EXPORT_SYMBOL_GPL(memstart_addr);
diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile
new file mode 100644
index 000000000000..6577897673dd
--- /dev/null
+++ b/arch/powerpc/mm/kasan/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+KASAN_SANITIZE := n
+
+obj-$(CONFIG_PPC32) += kasan_init_32.o
diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
new file mode 100644
index 000000000000..0d62be3cba47
--- /dev/null
+++ b/arch/powerpc/mm/kasan/kasan_init_32.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define DISABLE_BRANCH_PROFILING
+
+#include <linux/kasan.h>
+#include <linux/printk.h>
+#include <linux/memblock.h>
+#include <linux/sched/task.h>
+#include <linux/vmalloc.h>
+#include <asm/pgalloc.h>
+#include <asm/code-patching.h>
+#include <mm/mmu_decl.h>
+
+static void kasan_populate_pte(pte_t *ptep, pgprot_t prot)
+{
+ unsigned long va = (unsigned long)kasan_early_shadow_page;
+ phys_addr_t pa = __pa(kasan_early_shadow_page);
+ int i;
+
+ for (i = 0; i < PTRS_PER_PTE; i++, ptep++)
+ __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
+}
+
+static int kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
+{
+ pmd_t *pmd;
+ unsigned long k_cur, k_next;
+
+ pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start);
+
+ for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) {
+ pte_t *new;
+
+ k_next = pgd_addr_end(k_cur, k_end);
+ if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
+ continue;
+
+ new = pte_alloc_one_kernel(&init_mm);
+
+ if (!new)
+ return -ENOMEM;
+ if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ kasan_populate_pte(new, PAGE_READONLY);
+ else
+ kasan_populate_pte(new, PAGE_KERNEL_RO);
+ pmd_populate_kernel(&init_mm, pmd, new);
+ }
+ return 0;
+}
+
+static void __ref *kasan_get_one_page(void)
+{
+ if (slab_is_available())
+ return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+
+ return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+}
+
+static int __ref kasan_init_region(void *start, size_t size)
+{
+ unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
+ unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
+ unsigned long k_cur;
+ int ret;
+ void *block = NULL;
+
+ ret = kasan_init_shadow_page_tables(k_start, k_end);
+ if (ret)
+ return ret;
+
+ if (!slab_is_available())
+ block = memblock_alloc(k_end - k_start, PAGE_SIZE);
+
+ for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE) {
+ pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
+ void *va = block ? block + k_cur - k_start : kasan_get_one_page();
+ pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
+
+ if (!va)
+ return -ENOMEM;
+
+ __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
+ }
+ flush_tlb_kernel_range(k_start, k_end);
+ return 0;
+}
+
+static void __init kasan_remap_early_shadow_ro(void)
+{
+ if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ kasan_populate_pte(kasan_early_shadow_pte, PAGE_READONLY);
+ else
+ kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL_RO);
+
+ flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
+}
+
+void __init kasan_mmu_init(void)
+{
+ int ret;
+ struct memblock_region *reg;
+
+ if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
+ ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
+
+ if (ret)
+ panic("kasan: kasan_init_shadow_page_tables() failed");
+ }
+
+ for_each_memblock(memory, reg) {
+ phys_addr_t base = reg->base;
+ phys_addr_t top = min(base + reg->size, total_lowmem);
+
+ if (base >= top)
+ continue;
+
+ ret = kasan_init_region(__va(base), top - base);
+ if (ret)
+ panic("kasan: kasan_init_region() failed");
+ }
+}
+
+void __init kasan_init(void)
+{
+ kasan_remap_early_shadow_ro();
+
+ clear_page(kasan_early_shadow_page);
+
+ /* At this point kasan is fully initialized. Enable error messages */
+ init_task.kasan_depth = 0;
+ pr_info("KASAN init done\n");
+}
+
+#ifdef CONFIG_MODULES
+void *module_alloc(unsigned long size)
+{
+ void *base = vmalloc_exec(size);
+
+ if (!base)
+ return NULL;
+
+ if (!kasan_init_region(base, size))
+ return base;
+
+ vfree(base);
+
+ return NULL;
+}
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_32
+u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0};
+
+static void __init kasan_early_hash_table(void)
+{
+ modify_instruction_site(&patch__hash_page_A0, 0xffff, __pa(early_hash) >> 16);
+ modify_instruction_site(&patch__flush_hash_A0, 0xffff, __pa(early_hash) >> 16);
+
+ Hash = (struct hash_pte *)early_hash;
+}
+#else
+static void __init kasan_early_hash_table(void) {}
+#endif
+
+void __init kasan_early_init(void)
+{
+ unsigned long addr = KASAN_SHADOW_START;
+ unsigned long end = KASAN_SHADOW_END;
+ unsigned long next;
+ pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(addr), addr), addr);
+
+ BUILD_BUG_ON(KASAN_SHADOW_START & ~PGDIR_MASK);
+
+ kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL);
+
+ do {
+ next = pgd_addr_end(addr, end);
+ pmd_populate_kernel(&init_mm, pmd, kasan_early_shadow_pte);
+ } while (pmd++, addr = next, addr != end);
+
+ if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ kasan_early_hash_table();
+}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index f6787f90e158..e885fe2aafcc 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -54,7 +54,7 @@
#include <asm/swiotlb.h>
#include <asm/rtas.h>
-#include "mmu_decl.h"
+#include <mm/mmu_decl.h>
#ifndef CPU_FTR_COHERENT_ICACHE
#define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
@@ -109,8 +109,8 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end)
return -ENODEV;
}
-int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
- bool want_memblock)
+int __ref arch_add_memory(int nid, u64 start, u64 size,
+ struct mhp_restrictions *restrictions)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
@@ -127,12 +127,12 @@ int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *
}
flush_inval_dcache_range(start, start + size);
- return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
+ return __add_pages(nid, start_pfn, nr_pages, restrictions);
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-int __meminit arch_remove_memory(int nid, u64 start, u64 size,
- struct vmem_altmap *altmap)
+void __ref arch_remove_memory(int nid, u64 start, u64 size,
+ struct vmem_altmap *altmap)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
@@ -147,23 +147,21 @@ int __meminit arch_remove_memory(int nid, u64 start, u64 size,
if (altmap)
page += vmem_altmap_offset(altmap);
- ret = __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
- if (ret)
- return ret;
+ __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
/* Remove htab bolted mappings for this section of memory */
start = (unsigned long)__va(start);
flush_inval_dcache_range(start, start + size);
ret = remove_section_mapping(start, start + size);
+ WARN_ON_ONCE(ret);
/* Ensure all vmalloc mappings are flushed in case they also
* hit that section of memory
*/
vm_unmap_aliases();
- resize_hpt_for_hotplug(memblock_phys_mem_size());
-
- return ret;
+ if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC)
+ pr_warn("Hash collision while resizing HPT\n");
}
#endif
#endif /* CONFIG_MEMORY_HOTPLUG */
@@ -309,6 +307,10 @@ void __init mem_init(void)
mem_init_print_info(NULL);
#ifdef CONFIG_PPC32
pr_info("Kernel virtual memory layout:\n");
+#ifdef CONFIG_KASAN
+ pr_info(" * 0x%08lx..0x%08lx : kasan shadow mem\n",
+ KASAN_SHADOW_START, KASAN_SHADOW_END);
+#endif
pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
#ifdef CONFIG_HIGHMEM
pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
@@ -333,13 +335,6 @@ void free_initmem(void)
free_initmem_default(POISON_FREE_INITMEM);
}
-#ifdef CONFIG_BLK_DEV_INITRD
-void __init free_initrd_mem(unsigned long start, unsigned long end)
-{
- free_reserved_area((void *)start, (void *)end, -1, "initrd");
-}
-#endif
-
/*
* This is called when a page has been modified by the kernel.
* It just marks the page as not i-cache clean. We do the i-cache
diff --git a/arch/powerpc/mm/mmu_context.c b/arch/powerpc/mm/mmu_context.c
index bb52320b7369..6b049d82b98a 100644
--- a/arch/powerpc/mm/mmu_context.c
+++ b/arch/powerpc/mm/mmu_context.c
@@ -98,7 +98,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
switch_mmu_context(prev, next, tsk);
}
-#ifdef CONFIG_PPC32
+#ifndef CONFIG_PPC_BOOK3S_64
void arch_exit_mmap(struct mm_struct *mm)
{
void *frag = pte_frag_get(&mm->context);
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 74ff61dabcb1..7bac0aa2026a 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -83,6 +83,8 @@ static inline void _tlbivax_bcast(unsigned long address, unsigned int pid,
}
#endif
+static inline void print_system_hash_info(void) {}
+
#else /* CONFIG_PPC_MMU_NOHASH */
extern void hash_preload(struct mm_struct *mm, unsigned long ea,
@@ -92,6 +94,8 @@ extern void hash_preload(struct mm_struct *mm, unsigned long ea,
extern void _tlbie(unsigned long address);
extern void _tlbia(void);
+void print_system_hash_info(void);
+
#endif /* CONFIG_PPC_MMU_NOHASH */
#ifdef CONFIG_PPC32
@@ -104,8 +108,8 @@ extern int __map_without_bats;
extern unsigned int rtas_data, rtas_size;
struct hash_pte;
-extern struct hash_pte *Hash, *Hash_end;
-extern unsigned long Hash_size, Hash_mask;
+extern struct hash_pte *Hash;
+extern u8 early_hash[];
#endif /* CONFIG_PPC32 */
@@ -130,6 +134,7 @@ extern void wii_memory_fixups(void);
*/
#ifdef CONFIG_PPC32
extern void MMU_init_hw(void);
+void MMU_init_hw_patch(void);
unsigned long mmu_mapin_ram(unsigned long base, unsigned long top);
#endif
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/nohash/40x.c
index b9cf6f8764b0..460459b6f53e 100644
--- a/arch/powerpc/mm/40x_mmu.c
+++ b/arch/powerpc/mm/nohash/40x.c
@@ -49,7 +49,7 @@
#include <asm/machdep.h>
#include <asm/setup.h>
-#include "mmu_decl.h"
+#include <mm/mmu_decl.h>
extern int __map_without_ltlbs;
/*
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/nohash/44x.c
index aad127acdbaa..c07983ebc02e 100644
--- a/arch/powerpc/mm/44x_mmu.c
+++ b/arch/powerpc/mm/nohash/44x.c
@@ -31,7 +31,7 @@
#include <asm/cacheflush.h>
#include <asm/code-patching.h>
-#include "mmu_decl.h"
+#include <mm/mmu_decl.h>
/* Used by the 44x TLB replacement exception handler.
* Just needed it declared someplace.
diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/nohash/8xx.c
index fe1f6443d57f..70d55b615b62 100644
--- a/arch/powerpc/mm/8xx_mmu.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -17,7 +17,7 @@
#include <asm/fixmap.h>
#include <asm/code-patching.h>
-#include "mmu_decl.h"
+#include <mm/mmu_decl.h>
#define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
@@ -213,3 +213,27 @@ void flush_instruction_cache(void)
mtspr(SPRN_IC_CST, IDC_INVALL);
isync();
}
+
+#ifdef CONFIG_PPC_KUEP
+void __init setup_kuep(bool disabled)
+{
+ if (disabled)
+ return;
+
+ pr_info("Activating Kernel Userspace Execution Prevention\n");
+
+ mtspr(SPRN_MI_AP, MI_APG_KUEP);
+}
+#endif
+
+#ifdef CONFIG_PPC_KUAP
+void __init setup_kuap(bool disabled)
+{
+ pr_info("Activating Kernel Userspace Access Protection\n");
+
+ if (disabled)
+ pr_warn("KUAP cannot be disabled yet on 8xx when compiled in\n");
+
+ mtspr(SPRN_MD_AP, MD_APG_KUAP);
+}
+#endif
diff --git a/arch/powerpc/mm/nohash/Makefile b/arch/powerpc/mm/nohash/Makefile
new file mode 100644
index 000000000000..33b6f6f29d3f
--- /dev/null
+++ b/arch/powerpc/mm/nohash/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
+
+obj-y += mmu_context.o tlb.o tlb_low.o
+obj-$(CONFIG_PPC_BOOK3E_64) += tlb_low_64e.o book3e_pgtable.o
+obj-$(CONFIG_40x) += 40x.o
+obj-$(CONFIG_44x) += 44x.o
+obj-$(CONFIG_PPC_8xx) += 8xx.o
+obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke.o
+ifdef CONFIG_HUGETLB_PAGE
+obj-$(CONFIG_PPC_FSL_BOOK3E) += book3e_hugetlbpage.o
+endif
+
+# Disable kcov instrumentation on sensitive code
+# This is necessary for booting with kcov enabled on book3e machines
+KCOV_INSTRUMENT_tlb.o := n
+KCOV_INSTRUMENT_fsl_booke.o := n
diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/nohash/book3e_hugetlbpage.c
index f84ec46cdb26..61915f4d3c7f 100644
--- a/arch/powerpc/mm/hugetlbpage-book3e.c
+++ b/arch/powerpc/mm/nohash/book3e_hugetlbpage.c
@@ -11,8 +11,9 @@
#include <asm/mmu.h>
-#ifdef CONFIG_PPC_FSL_BOOK3E
#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+
static inline int tlb1_next(void)
{
struct paca_struct *paca = get_paca();
@@ -29,33 +30,6 @@ static inline int tlb1_next(void)
tcd->esel_next = next;
return this;
}
-#else
-static inline int tlb1_next(void)
-{
- int index, ncams;
-
- ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
-
- index = this_cpu_read(next_tlbcam_idx);
-
- /* Just round-robin the entries and wrap when we hit the end */
- if (unlikely(index == ncams - 1))
- __this_cpu_write(next_tlbcam_idx, tlbcam_index);
- else
- __this_cpu_inc(next_tlbcam_idx);
-
- return index;
-}
-#endif /* !PPC64 */
-#endif /* FSL */
-
-static inline int mmu_get_tsize(int psize)
-{
- return mmu_psize_defs[psize].enc;
-}
-
-#if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_PPC64)
-#include <asm/paca.h>
static inline void book3e_tlb_lock(void)
{
@@ -98,6 +72,23 @@ static inline void book3e_tlb_unlock(void)
paca->tcd_ptr->lock = 0;
}
#else
+static inline int tlb1_next(void)
+{
+ int index, ncams;
+
+ ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
+
+ index = this_cpu_read(next_tlbcam_idx);
+
+ /* Just round-robin the entries and wrap when we hit the end */
+ if (unlikely(index == ncams - 1))
+ __this_cpu_write(next_tlbcam_idx, tlbcam_index);
+ else
+ __this_cpu_inc(next_tlbcam_idx);
+
+ return index;
+}
+
static inline void book3e_tlb_lock(void)
{
}
@@ -139,10 +130,7 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
unsigned long psize, tsize, shift;
unsigned long flags;
struct mm_struct *mm;
-
-#ifdef CONFIG_PPC_FSL_BOOK3E
int index;
-#endif
if (unlikely(is_kernel_addr(ea)))
return;
@@ -166,11 +154,9 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
return;
}
-#ifdef CONFIG_PPC_FSL_BOOK3E
/* We have to use the CAM(TLB1) on FSL parts for hugepages */
index = tlb1_next();
mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1));
-#endif
mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize);
mas2 = ea & ~((1UL << shift) - 1);
diff --git a/arch/powerpc/mm/pgtable-book3e.c b/arch/powerpc/mm/nohash/book3e_pgtable.c
index 1032ef7aaf62..75e9e2c35fe2 100644
--- a/arch/powerpc/mm/pgtable-book3e.c
+++ b/arch/powerpc/mm/nohash/book3e_pgtable.c
@@ -15,7 +15,7 @@
#include <asm/tlb.h>
#include <asm/dma.h>
-#include "mmu_decl.h"
+#include <mm/mmu_decl.h>
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
@@ -55,7 +55,7 @@ void vmemmap_remove_mapping(unsigned long start,
#endif
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
-static __ref void *early_alloc_pgtable(unsigned long size)
+static void __init *early_alloc_pgtable(unsigned long size)
{
void *ptr;
@@ -74,7 +74,7 @@ static __ref void *early_alloc_pgtable(unsigned long size)
* map_kernel_page adds an entry to the ioremap page table
* and adds an entry to the HPT, possibly bolting it
*/
-int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
+int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
{
pgd_t *pgdp;
pud_t *pudp;
@@ -98,20 +98,17 @@ int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
#ifndef __PAGETABLE_PUD_FOLDED
if (pgd_none(*pgdp)) {
pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
- BUG_ON(pudp == NULL);
pgd_populate(&init_mm, pgdp, pudp);
}
#endif /* !__PAGETABLE_PUD_FOLDED */
pudp = pud_offset(pgdp, ea);
if (pud_none(*pudp)) {
pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
- BUG_ON(pmdp == NULL);
pud_populate(&init_mm, pudp, pmdp);
}
pmdp = pmd_offset(pudp, ea);
if (!pmd_present(*pmdp)) {
ptep = early_alloc_pgtable(PAGE_SIZE);
- BUG_ON(ptep == NULL);
pmd_populate_kernel(&init_mm, pmdp, ptep);
}
ptep = pte_offset_kernel(pmdp, ea);
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/nohash/fsl_booke.c
index 210cbc1faf63..71a1a36751dd 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/nohash/fsl_booke.c
@@ -54,7 +54,7 @@
#include <asm/setup.h>
#include <asm/paca.h>
-#include "mmu_decl.h"
+#include <mm/mmu_decl.h>
unsigned int tlbcam_index;
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/nohash/mmu_context.c
index 1945c5f19f5e..ae4505d5b4b8 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/nohash/mmu_context.c
@@ -52,7 +52,7 @@
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
-#include "mmu_decl.h"
+#include <mm/mmu_decl.h>
/*
* The MPC8xx has only 16 contexts. We rotate through them on each task switch.
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/nohash/tlb.c
index ac23dc1c6535..24f88efb05bf 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/nohash/tlb.c
@@ -46,7 +46,7 @@
#include <asm/hugetlb.h>
#include <asm/paca.h>
-#include "mmu_decl.h"
+#include <mm/mmu_decl.h>
/*
* This struct lists the sw-supported page sizes. The hardawre MMU may support
@@ -433,11 +433,7 @@ void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
unsigned long rid = (address & rmask) | 0x1000000000000000ul;
unsigned long vpte = address & ~rmask;
-#ifdef CONFIG_PPC_64K_PAGES
- vpte = (vpte >> (PAGE_SHIFT - 4)) & ~0xfffful;
-#else
vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
-#endif
vpte |= rid;
__flush_tlb_page(tlb->mm, vpte, tsize, 0);
}
@@ -625,21 +621,12 @@ static void early_init_this_mmu(void)
case PPC_HTW_IBM:
mas4 |= MAS4_INDD;
-#ifdef CONFIG_PPC_64K_PAGES
- mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT;
- mmu_pte_psize = MMU_PAGE_256M;
-#else
mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
mmu_pte_psize = MMU_PAGE_1M;
-#endif
break;
case PPC_HTW_NONE:
-#ifdef CONFIG_PPC_64K_PAGES
- mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT;
-#else
mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
-#endif
mmu_pte_psize = mmu_virtual_psize;
break;
}
@@ -800,5 +787,9 @@ void __init early_init_mmu(void)
#ifdef CONFIG_PPC_47x
early_init_mmu_47x();
#endif
+
+#ifdef CONFIG_PPC_MM_SLICES
+ mm_ctx_set_slb_addr_limit(&init_mm.context, SLB_ADDR_LIMIT_DEFAULT);
+#endif
}
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/nohash/tlb_low.S
index e066a658acac..e066a658acac 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/nohash/tlb_low.S
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S
index 9ed90064f542..58959ce15415 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/nohash/tlb_low_64e.S
@@ -24,11 +24,7 @@
#include <asm/kvm_booke_hv_asm.h>
#include <asm/feature-fixups.h>
-#ifdef CONFIG_PPC_64K_PAGES
-#define VPTE_PMD_SHIFT (PTE_INDEX_SIZE+1)
-#else
#define VPTE_PMD_SHIFT (PTE_INDEX_SIZE)
-#endif
#define VPTE_PUD_SHIFT (VPTE_PMD_SHIFT + PMD_INDEX_SIZE)
#define VPTE_PGD_SHIFT (VPTE_PUD_SHIFT + PUD_INDEX_SIZE)
#define VPTE_INDEX_SIZE (VPTE_PGD_SHIFT + PGD_INDEX_SIZE)
@@ -167,13 +163,11 @@ MMU_FTR_SECTION_ELSE
ldx r14,r14,r15 /* grab pgd entry */
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
-#ifndef CONFIG_PPC_64K_PAGES
rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
clrrdi r15,r15,3
cmpdi cr0,r14,0
bge tlb_miss_fault_bolted /* Bad pgd entry or hugepage; bail */
ldx r14,r14,r15 /* grab pud entry */
-#endif /* CONFIG_PPC_64K_PAGES */
rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
clrrdi r15,r15,3
@@ -682,18 +676,7 @@ normal_tlb_miss:
* order to handle the weird page table format used by linux
*/
ori r10,r15,0x1
-#ifdef CONFIG_PPC_64K_PAGES
- /* For the top bits, 16 bytes per PTE */
- rldicl r14,r16,64-(PAGE_SHIFT-4),PAGE_SHIFT-4+4
- /* Now create the bottom bits as 0 in position 0x8000 and
- * the rest calculated for 8 bytes per PTE
- */
- rldicl r15,r16,64-(PAGE_SHIFT-3),64-15
- /* Insert the bottom bits in */
- rlwimi r14,r15,0,16,31
-#else
rldicl r14,r16,64-(PAGE_SHIFT-3),PAGE_SHIFT-3+4
-#endif
sldi r15,r10,60
clrrdi r14,r14,3
or r10,r15,r14
@@ -732,11 +715,7 @@ finish_normal_tlb_miss:
/* Check page size, if not standard, update MAS1 */
rldicl r11,r14,64-8,64-8
-#ifdef CONFIG_PPC_64K_PAGES
- cmpldi cr0,r11,BOOK3E_PAGESZ_64K
-#else
cmpldi cr0,r11,BOOK3E_PAGESZ_4K
-#endif
beq- 1f
mfspr r11,SPRN_MAS1
rlwimi r11,r14,31,21,24
@@ -857,14 +836,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
cmpdi cr0,r15,0
bge virt_page_table_tlb_miss_fault
-#ifndef CONFIG_PPC_64K_PAGES
/* Get to PUD entry */
rldicl r11,r16,64-VPTE_PUD_SHIFT,64-PUD_INDEX_SIZE-3
clrrdi r10,r11,3
ldx r15,r10,r15
cmpdi cr0,r15,0
bge virt_page_table_tlb_miss_fault
-#endif /* CONFIG_PPC_64K_PAGES */
/* Get to PMD entry */
rldicl r11,r16,64-VPTE_PMD_SHIFT,64-PMD_INDEX_SIZE-3
@@ -1106,14 +1083,12 @@ htw_tlb_miss:
cmpdi cr0,r15,0
bge htw_tlb_miss_fault
-#ifndef CONFIG_PPC_64K_PAGES
/* Get to PUD entry */
rldicl r11,r16,64-(PUD_SHIFT-3),64-PUD_INDEX_SIZE-3
clrrdi r10,r11,3
ldx r15,r10,r15
cmpdi cr0,r15,0
bge htw_tlb_miss_fault
-#endif /* CONFIG_PPC_64K_PAGES */
/* Get to PMD entry */
rldicl r11,r16,64-(PMD_SHIFT-3),64-PMD_INDEX_SIZE-3
@@ -1132,9 +1107,7 @@ htw_tlb_miss:
* 4K page we need to extract a bit from the virtual address and
* insert it into the "PA52" bit of the RPN.
*/
-#ifndef CONFIG_PPC_64K_PAGES
rlwimi r15,r16,32-9,20,20
-#endif
/* Now we build the MAS:
*
* MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
@@ -1144,11 +1117,7 @@ htw_tlb_miss:
* MAS 2 : Use defaults
* MAS 3+7 : Needs to be done
*/
-#ifdef CONFIG_PPC_64K_PAGES
- ori r10,r15,(BOOK3E_PAGESZ_64K << MAS3_SPSIZE_SHIFT)
-#else
ori r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
-#endif
BEGIN_MMU_FTR_SECTION
srdi r16,r10,32
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index f976676004ad..57e64273cb33 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -32,7 +32,6 @@
#include <asm/sparsemem.h>
#include <asm/prom.h>
#include <asm/smp.h>
-#include <asm/cputhreads.h>
#include <asm/topology.h>
#include <asm/firmware.h>
#include <asm/paca.h>
@@ -908,16 +907,22 @@ static int __init early_numa(char *p)
}
early_param("numa", early_numa);
-static bool topology_updates_enabled = true;
+/*
+ * The platform can inform us through one of several mechanisms
+ * (post-migration device tree updates, PRRN or VPHN) that the NUMA
+ * assignment of a resource has changed. This controls whether we act
+ * on that. Disabled by default.
+ */
+static bool topology_updates_enabled;
static int __init early_topology_updates(char *p)
{
if (!p)
return 0;
- if (!strcmp(p, "off")) {
- pr_info("Disabling topology updates\n");
- topology_updates_enabled = false;
+ if (!strcmp(p, "on")) {
+ pr_warn("Caution: enabling topology updates\n");
+ topology_updates_enabled = true;
}
return 0;
@@ -1063,7 +1068,7 @@ u64 memory_hotplug_max(void)
/* Virtual Processor Home Node (VPHN) support */
#ifdef CONFIG_PPC_SPLPAR
-#include "vphn.h"
+#include "book3s64/vphn.h"
struct topology_update_data {
struct topology_update_data *next;
@@ -1498,6 +1503,9 @@ int start_topology_update(void)
{
int rc = 0;
+ if (!topology_updates_enabled)
+ return 0;
+
if (firmware_has_feature(FW_FEATURE_PRRN)) {
if (!prrn_enabled) {
prrn_enabled = 1;
@@ -1531,6 +1539,9 @@ int stop_topology_update(void)
{
int rc = 0;
+ if (!topology_updates_enabled)
+ return 0;
+
if (prrn_enabled) {
prrn_enabled = 0;
#ifdef CONFIG_SMP
@@ -1588,11 +1599,13 @@ static ssize_t topology_write(struct file *file, const char __user *buf,
kbuf[read_len] = '\0';
- if (!strncmp(kbuf, "on", 2))
+ if (!strncmp(kbuf, "on", 2)) {
+ topology_updates_enabled = true;
start_topology_update();
- else if (!strncmp(kbuf, "off", 3))
+ } else if (!strncmp(kbuf, "off", 3)) {
stop_topology_update();
- else
+ topology_updates_enabled = false;
+ } else
return -EINVAL;
return count;
@@ -1607,9 +1620,7 @@ static const struct file_operations topology_ops = {
static int topology_update_init(void)
{
- /* Do not poll for changes if disabled at boot */
- if (topology_updates_enabled)
- start_topology_update();
+ start_topology_update();
if (vphn_enabled)
topology_schedule_update();
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index d3d61d29b4f1..db4a6253df92 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -30,6 +30,7 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
+#include <asm/hugetlb.h>
static inline int is_exec_fault(void)
{
@@ -299,3 +300,116 @@ unsigned long vmalloc_to_phys(void *va)
return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va);
}
EXPORT_SYMBOL_GPL(vmalloc_to_phys);
+
+/*
+ * We have 4 cases for pgds and pmds:
+ * (1) invalid (all zeroes)
+ * (2) pointer to next table, as normal; bottom 6 bits == 0
+ * (3) leaf pte for huge page _PAGE_PTE set
+ * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
+ *
+ * So long as we atomically load page table pointers we are safe against teardown,
+ * we can follow the address down to the the page and take a ref on it.
+ * This function need to be called with interrupts disabled. We use this variant
+ * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
+ */
+pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
+ bool *is_thp, unsigned *hpage_shift)
+{
+ pgd_t pgd, *pgdp;
+ pud_t pud, *pudp;
+ pmd_t pmd, *pmdp;
+ pte_t *ret_pte;
+ hugepd_t *hpdp = NULL;
+ unsigned pdshift = PGDIR_SHIFT;
+
+ if (hpage_shift)
+ *hpage_shift = 0;
+
+ if (is_thp)
+ *is_thp = false;
+
+ pgdp = pgdir + pgd_index(ea);
+ pgd = READ_ONCE(*pgdp);
+ /*
+ * Always operate on the local stack value. This make sure the
+ * value don't get updated by a parallel THP split/collapse,
+ * page fault or a page unmap. The return pte_t * is still not
+ * stable. So should be checked there for above conditions.
+ */
+ if (pgd_none(pgd))
+ return NULL;
+
+ if (pgd_huge(pgd)) {
+ ret_pte = (pte_t *)pgdp;
+ goto out;
+ }
+ if (is_hugepd(__hugepd(pgd_val(pgd)))) {
+ hpdp = (hugepd_t *)&pgd;
+ goto out_huge;
+ }
+
+ /*
+ * Even if we end up with an unmap, the pgtable will not
+ * be freed, because we do an rcu free and here we are
+ * irq disabled
+ */
+ pdshift = PUD_SHIFT;
+ pudp = pud_offset(&pgd, ea);
+ pud = READ_ONCE(*pudp);
+
+ if (pud_none(pud))
+ return NULL;
+
+ if (pud_huge(pud)) {
+ ret_pte = (pte_t *)pudp;
+ goto out;
+ }
+ if (is_hugepd(__hugepd(pud_val(pud)))) {
+ hpdp = (hugepd_t *)&pud;
+ goto out_huge;
+ }
+ pdshift = PMD_SHIFT;
+ pmdp = pmd_offset(&pud, ea);
+ pmd = READ_ONCE(*pmdp);
+ /*
+ * A hugepage collapse is captured by pmd_none, because
+ * it mark the pmd none and do a hpte invalidate.
+ */
+ if (pmd_none(pmd))
+ return NULL;
+
+ if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
+ if (is_thp)
+ *is_thp = true;
+ ret_pte = (pte_t *)pmdp;
+ goto out;
+ }
+ /*
+ * pmd_large check below will handle the swap pmd pte
+ * we need to do both the check because they are config
+ * dependent.
+ */
+ if (pmd_huge(pmd) || pmd_large(pmd)) {
+ ret_pte = (pte_t *)pmdp;
+ goto out;
+ }
+ if (is_hugepd(__hugepd(pmd_val(pmd)))) {
+ hpdp = (hugepd_t *)&pmd;
+ goto out_huge;
+ }
+
+ return pte_offset_kernel(&pmd, ea);
+
+out_huge:
+ if (!hpdp)
+ return NULL;
+
+ ret_pte = hugepte_offset(*hpdp, ea, pdshift);
+ pdshift = hugepd_shift(*hpdp);
+out:
+ if (hpage_shift)
+ *hpage_shift = pdshift;
+ return ret_pte;
+}
+EXPORT_SYMBOL_GPL(__find_linux_pte);
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 6e56a6240bfa..16ada373b32b 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -36,26 +36,13 @@
#include <asm/setup.h>
#include <asm/sections.h>
-#include "mmu_decl.h"
+#include <mm/mmu_decl.h>
unsigned long ioremap_bot;
EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
extern char etext[], _stext[], _sinittext[], _einittext[];
-__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
-{
- if (!slab_is_available())
- return memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
-
- return (pte_t *)pte_fragment_alloc(mm, 1);
-}
-
-pgtable_t pte_alloc_one(struct mm_struct *mm)
-{
- return (pgtable_t)pte_fragment_alloc(mm, 0);
-}
-
void __iomem *
ioremap(phys_addr_t addr, unsigned long size)
{
@@ -205,7 +192,29 @@ void iounmap(volatile void __iomem *addr)
}
EXPORT_SYMBOL(iounmap);
-int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
+static void __init *early_alloc_pgtable(unsigned long size)
+{
+ void *ptr = memblock_alloc(size, size);
+
+ if (!ptr)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, size, size);
+
+ return ptr;
+}
+
+static pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
+{
+ if (pmd_none(*pmdp)) {
+ pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
+
+ pmd_populate_kernel(&init_mm, pmdp, ptep);
+ }
+ return pte_offset_kernel(pmdp, va);
+}
+
+
+int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
{
pmd_t *pd;
pte_t *pg;
@@ -214,7 +223,10 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
/* Use upper 10 bits of VA to index the first level map */
pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
/* Use middle 10 bits of VA to index the second-level map */
- pg = pte_alloc_kernel(pd, va);
+ if (likely(slab_is_available()))
+ pg = pte_alloc_kernel(pd, va);
+ else
+ pg = early_pte_alloc_kernel(pd, va);
if (pg != 0) {
err = 0;
/* The PTE should never be already set nor present in the
@@ -384,6 +396,9 @@ void mark_rodata_ro(void)
PFN_DOWN((unsigned long)__start_rodata);
change_page_attr(page, numpages, PAGE_KERNEL_RO);
+
+ // mark_initmem_nx() should have already run by now
+ ptdump_check_wx();
}
#endif
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index fb1375c07e8c..d2d976ff8a0e 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -52,7 +52,7 @@
#include <asm/firmware.h>
#include <asm/dma.h>
-#include "mmu_decl.h"
+#include <mm/mmu_decl.h>
#ifdef CONFIG_PPC_BOOK3S_64
@@ -90,14 +90,13 @@ unsigned long __pgd_val_bits;
EXPORT_SYMBOL(__pgd_val_bits);
unsigned long __kernel_virt_start;
EXPORT_SYMBOL(__kernel_virt_start);
-unsigned long __kernel_virt_size;
-EXPORT_SYMBOL(__kernel_virt_size);
unsigned long __vmalloc_start;
EXPORT_SYMBOL(__vmalloc_start);
unsigned long __vmalloc_end;
EXPORT_SYMBOL(__vmalloc_end);
unsigned long __kernel_io_start;
EXPORT_SYMBOL(__kernel_io_start);
+unsigned long __kernel_io_end;
struct page *vmemmap;
EXPORT_SYMBOL(vmemmap);
unsigned long __pte_frag_nr;
@@ -121,6 +120,11 @@ void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_
if (pgprot_val(prot) & H_PAGE_4K_PFN)
return NULL;
+ if ((ea + size) >= (void *)IOREMAP_END) {
+ pr_warn("Outside the supported range\n");
+ return NULL;
+ }
+
WARN_ON(pa & ~PAGE_MASK);
WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
WARN_ON(size & ~PAGE_MASK);
@@ -328,6 +332,9 @@ void mark_rodata_ro(void)
radix__mark_rodata_ro();
else
hash__mark_rodata_ro();
+
+ // mark_initmem_nx() should have already run by now
+ ptdump_check_wx();
}
void mark_initmem_nx(void)
diff --git a/arch/powerpc/mm/ptdump/hashpagetable.c b/arch/powerpc/mm/ptdump/hashpagetable.c
index b430e4e08af6..b9bda0105841 100644
--- a/arch/powerpc/mm/ptdump/hashpagetable.c
+++ b/arch/powerpc/mm/ptdump/hashpagetable.c
@@ -500,7 +500,7 @@ static void populate_markers(void)
address_markers[7].start_address = IOREMAP_BASE;
address_markers[8].start_address = IOREMAP_END;
#ifdef CONFIG_PPC_BOOK3S_64
- address_markers[9].start_address = H_VMEMMAP_BASE;
+ address_markers[9].start_address = H_VMEMMAP_START;
#else
address_markers[9].start_address = VMEMMAP_BASE;
#endif
diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
index 37138428ab55..646876d9da64 100644
--- a/arch/powerpc/mm/ptdump/ptdump.c
+++ b/arch/powerpc/mm/ptdump/ptdump.c
@@ -31,7 +31,7 @@
#include "ptdump.h"
#ifdef CONFIG_PPC32
-#define KERN_VIRT_START 0
+#define KERN_VIRT_START PAGE_OFFSET
#endif
/*
@@ -68,6 +68,8 @@ struct pg_state {
unsigned long last_pa;
unsigned int level;
u64 current_flags;
+ bool check_wx;
+ unsigned long wx_pages;
};
struct addr_marker {
@@ -101,9 +103,25 @@ static struct addr_marker address_markers[] = {
{ 0, "Fixmap start" },
{ 0, "Fixmap end" },
#endif
+#ifdef CONFIG_KASAN
+ { 0, "kasan shadow mem start" },
+ { 0, "kasan shadow mem end" },
+#endif
{ -1, NULL },
};
+#define pt_dump_seq_printf(m, fmt, args...) \
+({ \
+ if (m) \
+ seq_printf(m, fmt, ##args); \
+})
+
+#define pt_dump_seq_putc(m, c) \
+({ \
+ if (m) \
+ seq_putc(m, c); \
+})
+
static void dump_flag_info(struct pg_state *st, const struct flag_info
*flag, u64 pte, int num)
{
@@ -121,19 +139,19 @@ static void dump_flag_info(struct pg_state *st, const struct flag_info
val = pte & flag->val;
if (flag->shift)
val = val >> flag->shift;
- seq_printf(st->seq, " %s:%llx", flag->set, val);
+ pt_dump_seq_printf(st->seq, " %s:%llx", flag->set, val);
} else {
if ((pte & flag->mask) == flag->val)
s = flag->set;
else
s = flag->clear;
if (s)
- seq_printf(st->seq, " %s", s);
+ pt_dump_seq_printf(st->seq, " %s", s);
}
st->current_flags &= ~flag->mask;
}
if (st->current_flags != 0)
- seq_printf(st->seq, " unknown flags:%llx", st->current_flags);
+ pt_dump_seq_printf(st->seq, " unknown flags:%llx", st->current_flags);
}
static void dump_addr(struct pg_state *st, unsigned long addr)
@@ -148,12 +166,12 @@ static void dump_addr(struct pg_state *st, unsigned long addr)
#define REG "0x%08lx"
#endif
- seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1);
+ pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1);
if (st->start_pa == st->last_pa && st->start_address + PAGE_SIZE != addr) {
- seq_printf(st->seq, "[" REG "]", st->start_pa);
+ pt_dump_seq_printf(st->seq, "[" REG "]", st->start_pa);
delta = PAGE_SIZE >> 10;
} else {
- seq_printf(st->seq, " " REG " ", st->start_pa);
+ pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa);
delta = (addr - st->start_address) >> 10;
}
/* Work out what appropriate unit to use */
@@ -161,10 +179,24 @@ static void dump_addr(struct pg_state *st, unsigned long addr)
delta >>= 10;
unit++;
}
- seq_printf(st->seq, "%9lu%c", delta, *unit);
+ pt_dump_seq_printf(st->seq, "%9lu%c", delta, *unit);
}
+static void note_prot_wx(struct pg_state *st, unsigned long addr)
+{
+ if (!st->check_wx)
+ return;
+
+ if (!((st->current_flags & pgprot_val(PAGE_KERNEL_X)) == pgprot_val(PAGE_KERNEL_X)))
+ return;
+
+ WARN_ONCE(1, "powerpc/mm: Found insecure W+X mapping at address %p/%pS\n",
+ (void *)st->start_address, (void *)st->start_address);
+
+ st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
+}
+
static void note_page(struct pg_state *st, unsigned long addr,
unsigned int level, u64 val)
{
@@ -178,7 +210,7 @@ static void note_page(struct pg_state *st, unsigned long addr,
st->start_address = addr;
st->start_pa = pa;
st->last_pa = pa;
- seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
/*
* Dump the section of virtual memory when:
* - the PTE flags from one entry to the next differs.
@@ -194,6 +226,7 @@ static void note_page(struct pg_state *st, unsigned long addr,
/* Check the PTE flags */
if (st->current_flags) {
+ note_prot_wx(st, addr);
dump_addr(st, addr);
/* Dump all the flags */
@@ -202,7 +235,7 @@ static void note_page(struct pg_state *st, unsigned long addr,
st->current_flags,
pg_level[st->level].num);
- seq_putc(st->seq, '\n');
+ pt_dump_seq_putc(st->seq, '\n');
}
/*
@@ -211,7 +244,7 @@ static void note_page(struct pg_state *st, unsigned long addr,
*/
while (addr >= st->marker[1].start_address) {
st->marker++;
- seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
}
st->start_address = addr;
st->start_pa = pa;
@@ -303,8 +336,9 @@ static void populate_markers(void)
address_markers[i++].start_address = PHB_IO_END;
address_markers[i++].start_address = IOREMAP_BASE;
address_markers[i++].start_address = IOREMAP_END;
+ /* What is the ifdef about? */
#ifdef CONFIG_PPC_BOOK3S_64
- address_markers[i++].start_address = H_VMEMMAP_BASE;
+ address_markers[i++].start_address = H_VMEMMAP_START;
#else
address_markers[i++].start_address = VMEMMAP_BASE;
#endif
@@ -322,6 +356,10 @@ static void populate_markers(void)
#endif
address_markers[i++].start_address = FIXADDR_START;
address_markers[i++].start_address = FIXADDR_TOP;
+#ifdef CONFIG_KASAN
+ address_markers[i++].start_address = KASAN_SHADOW_START;
+ address_markers[i++].start_address = KASAN_SHADOW_END;
+#endif
#endif /* CONFIG_PPC64 */
}
@@ -366,6 +404,30 @@ static void build_pgtable_complete_mask(void)
pg_level[i].mask |= pg_level[i].flag[j].mask;
}
+#ifdef CONFIG_PPC_DEBUG_WX
+void ptdump_check_wx(void)
+{
+ struct pg_state st = {
+ .seq = NULL,
+ .marker = address_markers,
+ .check_wx = true,
+ };
+
+ if (radix_enabled())
+ st.start_address = PAGE_OFFSET;
+ else
+ st.start_address = KERN_VIRT_START;
+
+ walk_pagetables(&st);
+
+ if (st.wx_pages)
+ pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n",
+ st.wx_pages);
+ else
+ pr_info("Checked W+X mappings: passed, no W+X pages found\n");
+}
+#endif
+
static int ptdump_init(void)
{
struct dentry *debugfs_file;
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index aec91dbcdc0b..97fbf7b54422 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -101,7 +101,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
{
struct vm_area_struct *vma;
- if ((mm->context.slb_addr_limit - len) < addr)
+ if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr)
return 0;
vma = find_vma(mm, addr);
return (!vma || (addr + len) <= vm_start_gap(vma));
@@ -118,13 +118,11 @@ static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
unsigned long start = slice << SLICE_HIGH_SHIFT;
unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
-#ifdef CONFIG_PPC64
/* Hack, so that each addresses is controlled by exactly one
* of the high or low area bitmaps, the first high area starts
* at 4GB, not 0 */
if (start == 0)
- start = SLICE_LOW_TOP;
-#endif
+ start = (unsigned long)SLICE_LOW_TOP;
return !slice_area_is_free(mm, start, end - start);
}
@@ -150,40 +148,6 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
__set_bit(i, ret->high_slices);
}
-#ifdef CONFIG_PPC_BOOK3S_64
-static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize)
-{
-#ifdef CONFIG_PPC_64K_PAGES
- if (psize == MMU_PAGE_64K)
- return &mm->context.mask_64k;
-#endif
- if (psize == MMU_PAGE_4K)
- return &mm->context.mask_4k;
-#ifdef CONFIG_HUGETLB_PAGE
- if (psize == MMU_PAGE_16M)
- return &mm->context.mask_16m;
- if (psize == MMU_PAGE_16G)
- return &mm->context.mask_16g;
-#endif
- BUG();
-}
-#elif defined(CONFIG_PPC_8xx)
-static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize)
-{
- if (psize == mmu_virtual_psize)
- return &mm->context.mask_base_psize;
-#ifdef CONFIG_HUGETLB_PAGE
- if (psize == MMU_PAGE_512K)
- return &mm->context.mask_512k;
- if (psize == MMU_PAGE_8M)
- return &mm->context.mask_8m;
-#endif
- BUG();
-}
-#else
-#error "Must define the slice masks for page sizes supported by the platform"
-#endif
-
static bool slice_check_range_fits(struct mm_struct *mm,
const struct slice_mask *available,
unsigned long start, unsigned long len)
@@ -246,14 +210,14 @@ static void slice_convert(struct mm_struct *mm,
slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
slice_print_mask(" mask", mask);
- psize_mask = slice_mask_for_size(mm, psize);
+ psize_mask = slice_mask_for_size(&mm->context, psize);
/* We need to use a spinlock here to protect against
* concurrent 64k -> 4k demotion ...
*/
spin_lock_irqsave(&slice_convert_lock, flags);
- lpsizes = mm->context.low_slices_psize;
+ lpsizes = mm_ctx_low_slices(&mm->context);
for (i = 0; i < SLICE_NUM_LOW; i++) {
if (!(mask->low_slices & (1u << i)))
continue;
@@ -263,7 +227,7 @@ static void slice_convert(struct mm_struct *mm,
/* Update the slice_mask */
old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf;
- old_mask = slice_mask_for_size(mm, old_psize);
+ old_mask = slice_mask_for_size(&mm->context, old_psize);
old_mask->low_slices &= ~(1u << i);
psize_mask->low_slices |= 1u << i;
@@ -272,8 +236,8 @@ static void slice_convert(struct mm_struct *mm,
(((unsigned long)psize) << (mask_index * 4));
}
- hpsizes = mm->context.high_slices_psize;
- for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
+ hpsizes = mm_ctx_high_slices(&mm->context);
+ for (i = 0; i < GET_HIGH_SLICE_INDEX(mm_ctx_slb_addr_limit(&mm->context)); i++) {
if (!test_bit(i, mask->high_slices))
continue;
@@ -282,7 +246,7 @@ static void slice_convert(struct mm_struct *mm,
/* Update the slice_mask */
old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf;
- old_mask = slice_mask_for_size(mm, old_psize);
+ old_mask = slice_mask_for_size(&mm->context, old_psize);
__clear_bit(i, old_mask->high_slices);
__set_bit(i, psize_mask->high_slices);
@@ -292,8 +256,8 @@ static void slice_convert(struct mm_struct *mm,
}
slice_dbg(" lsps=%lx, hsps=%lx\n",
- (unsigned long)mm->context.low_slices_psize,
- (unsigned long)mm->context.high_slices_psize);
+ (unsigned long)mm_ctx_low_slices(&mm->context),
+ (unsigned long)mm_ctx_high_slices(&mm->context));
spin_unlock_irqrestore(&slice_convert_lock, flags);
@@ -393,7 +357,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
* DEFAULT_MAP_WINDOW we should apply this.
*/
if (high_limit > DEFAULT_MAP_WINDOW)
- addr += mm->context.slb_addr_limit - DEFAULT_MAP_WINDOW;
+ addr += mm_ctx_slb_addr_limit(&mm->context) - DEFAULT_MAP_WINDOW;
while (addr > min_addr) {
info.high_limit = addr;
@@ -505,20 +469,20 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
return -ENOMEM;
}
- if (high_limit > mm->context.slb_addr_limit) {
+ if (high_limit > mm_ctx_slb_addr_limit(&mm->context)) {
/*
* Increasing the slb_addr_limit does not require
* slice mask cache to be recalculated because it should
* be already initialised beyond the old address limit.
*/
- mm->context.slb_addr_limit = high_limit;
+ mm_ctx_set_slb_addr_limit(&mm->context, high_limit);
on_each_cpu(slice_flush_segments, mm, 1);
}
/* Sanity checks */
BUG_ON(mm->task_size == 0);
- BUG_ON(mm->context.slb_addr_limit == 0);
+ BUG_ON(mm_ctx_slb_addr_limit(&mm->context) == 0);
VM_BUG_ON(radix_enabled());
slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
@@ -538,7 +502,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
/* First make up a "good" mask of slices that have the right size
* already
*/
- maskp = slice_mask_for_size(mm, psize);
+ maskp = slice_mask_for_size(&mm->context, psize);
/*
* Here "good" means slices that are already the right page size,
@@ -565,7 +529,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
* a pointer to good mask for the next code to use.
*/
if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
- compat_maskp = slice_mask_for_size(mm, MMU_PAGE_4K);
+ compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
if (fixed)
slice_or_mask(&good_mask, maskp, compat_maskp);
else
@@ -642,14 +606,13 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
newaddr = slice_find_area(mm, len, &potential_mask,
psize, topdown, high_limit);
-#ifdef CONFIG_PPC_64K_PAGES
- if (newaddr == -ENOMEM && psize == MMU_PAGE_64K) {
+ if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && newaddr == -ENOMEM &&
+ psize == MMU_PAGE_64K) {
/* retry the search with 4k-page slices included */
slice_or_mask(&potential_mask, &potential_mask, compat_maskp);
newaddr = slice_find_area(mm, len, &potential_mask,
psize, topdown, high_limit);
}
-#endif
if (newaddr == -ENOMEM)
return -ENOMEM;
@@ -696,7 +659,7 @@ unsigned long arch_get_unmapped_area(struct file *filp,
unsigned long flags)
{
return slice_get_unmapped_area(addr, len, flags,
- current->mm->context.user_psize, 0);
+ mm_ctx_user_psize(&current->mm->context), 0);
}
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
@@ -706,7 +669,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
const unsigned long flags)
{
return slice_get_unmapped_area(addr0, len, flags,
- current->mm->context.user_psize, 1);
+ mm_ctx_user_psize(&current->mm->context), 1);
}
unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
@@ -717,10 +680,10 @@ unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
VM_BUG_ON(radix_enabled());
if (slice_addr_is_low(addr)) {
- psizes = mm->context.low_slices_psize;
+ psizes = mm_ctx_low_slices(&mm->context);
index = GET_LOW_SLICE_INDEX(addr);
} else {
- psizes = mm->context.high_slices_psize;
+ psizes = mm_ctx_high_slices(&mm->context);
index = GET_HIGH_SLICE_INDEX(addr);
}
mask_index = index & 0x1;
@@ -741,27 +704,22 @@ void slice_init_new_context_exec(struct mm_struct *mm)
* case of fork it is just inherited from the mm being
* duplicated.
*/
-#ifdef CONFIG_PPC64
- mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
-#else
- mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW;
-#endif
-
- mm->context.user_psize = psize;
+ mm_ctx_set_slb_addr_limit(&mm->context, SLB_ADDR_LIMIT_DEFAULT);
+ mm_ctx_set_user_psize(&mm->context, psize);
/*
* Set all slice psizes to the default.
*/
- lpsizes = mm->context.low_slices_psize;
+ lpsizes = mm_ctx_low_slices(&mm->context);
memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1);
- hpsizes = mm->context.high_slices_psize;
+ hpsizes = mm_ctx_high_slices(&mm->context);
memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1);
/*
* Slice mask cache starts zeroed, fill the default size cache.
*/
- mask = slice_mask_for_size(mm, psize);
+ mask = slice_mask_for_size(&mm->context, psize);
mask->low_slices = ~0UL;
if (SLICE_NUM_HIGH)
bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
@@ -777,7 +735,7 @@ void slice_setup_new_exec(void)
if (!is_32bit_task())
return;
- mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW;
+ mm_ctx_set_slb_addr_limit(&mm->context, DEFAULT_MAP_WINDOW);
}
#endif
@@ -816,22 +774,21 @@ int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
unsigned long len)
{
const struct slice_mask *maskp;
- unsigned int psize = mm->context.user_psize;
+ unsigned int psize = mm_ctx_user_psize(&mm->context);
VM_BUG_ON(radix_enabled());
- maskp = slice_mask_for_size(mm, psize);
-#ifdef CONFIG_PPC_64K_PAGES
+ maskp = slice_mask_for_size(&mm->context, psize);
+
/* We need to account for 4k slices too */
- if (psize == MMU_PAGE_64K) {
+ if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
const struct slice_mask *compat_maskp;
struct slice_mask available;
- compat_maskp = slice_mask_for_size(mm, MMU_PAGE_4K);
+ compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
slice_or_mask(&available, maskp, compat_maskp);
return !slice_check_range_fits(mm, &available, addr, len);
}
-#endif
return !slice_check_range_fits(mm, maskp, addr, len);
}
diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile
index ab26df5bacb9..c155dcbb8691 100644
--- a/arch/powerpc/perf/Makefile
+++ b/arch/powerpc/perf/Makefile
@@ -5,7 +5,8 @@ obj-$(CONFIG_PERF_EVENTS) += callchain.o perf_regs.o
obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o bhrb.o
obj64-$(CONFIG_PPC_PERF_CTRS) += ppc970-pmu.o power5-pmu.o \
power5+-pmu.o power6-pmu.o power7-pmu.o \
- isa207-common.o power8-pmu.o power9-pmu.o
+ isa207-common.o power8-pmu.o power9-pmu.o \
+ generic-compat-pmu.o
obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
obj-$(CONFIG_PPC_POWERNV) += imc-pmu.o
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index b0723002a396..a66fb9c01c9e 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -22,6 +22,10 @@
#include <asm/ptrace.h>
#include <asm/code-patching.h>
+#ifdef CONFIG_PPC64
+#include "internal.h"
+#endif
+
#define BHRB_MAX_ENTRIES 32
#define BHRB_TARGET 0x0000000000000002
#define BHRB_PREDICTION 0x0000000000000001
@@ -2294,3 +2298,27 @@ int register_power_pmu(struct power_pmu *pmu)
power_pmu_prepare_cpu, NULL);
return 0;
}
+
+#ifdef CONFIG_PPC64
+static int __init init_ppc64_pmu(void)
+{
+ /* run through all the pmu drivers one at a time */
+ if (!init_power5_pmu())
+ return 0;
+ else if (!init_power5p_pmu())
+ return 0;
+ else if (!init_power6_pmu())
+ return 0;
+ else if (!init_power7_pmu())
+ return 0;
+ else if (!init_power8_pmu())
+ return 0;
+ else if (!init_power9_pmu())
+ return 0;
+ else if (!init_ppc970_pmu())
+ return 0;
+ else
+ return init_generic_compat_pmu();
+}
+early_initcall(init_ppc64_pmu);
+#endif
diff --git a/arch/powerpc/perf/generic-compat-pmu.c b/arch/powerpc/perf/generic-compat-pmu.c
new file mode 100644
index 000000000000..5e5a54d5588e
--- /dev/null
+++ b/arch/powerpc/perf/generic-compat-pmu.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright 2019 Madhavan Srinivasan, IBM Corporation.
+
+#define pr_fmt(fmt) "generic-compat-pmu: " fmt
+
+#include "isa207-common.h"
+
+/*
+ * Raw event encoding:
+ *
+ * 60 56 52 48 44 40 36 32
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ *
+ * 28 24 20 16 12 8 4 0
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ * [ pmc ] [unit ] [ ] m [ pmcxsel ]
+ * | |
+ * | *- mark
+ * |
+ * |
+ * *- combine
+ *
+ * Below uses IBM bit numbering.
+ *
+ * MMCR1[x:y] = unit (PMCxUNIT)
+ * MMCR1[24] = pmc1combine[0]
+ * MMCR1[25] = pmc1combine[1]
+ * MMCR1[26] = pmc2combine[0]
+ * MMCR1[27] = pmc2combine[1]
+ * MMCR1[28] = pmc3combine[0]
+ * MMCR1[29] = pmc3combine[1]
+ * MMCR1[30] = pmc4combine[0]
+ * MMCR1[31] = pmc4combine[1]
+ *
+ */
+
+/*
+ * Some power9 event codes.
+ */
+#define EVENT(_name, _code) _name = _code,
+
+enum {
+EVENT(PM_CYC, 0x0001e)
+EVENT(PM_INST_CMPL, 0x00002)
+};
+
+#undef EVENT
+
+GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
+GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
+
+static struct attribute *generic_compat_events_attr[] = {
+ GENERIC_EVENT_PTR(PM_CYC),
+ GENERIC_EVENT_PTR(PM_INST_CMPL),
+ NULL
+};
+
+static struct attribute_group generic_compat_pmu_events_group = {
+ .name = "events",
+ .attrs = generic_compat_events_attr,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-19");
+PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
+PMU_FORMAT_ATTR(mark, "config:8");
+PMU_FORMAT_ATTR(combine, "config:10-11");
+PMU_FORMAT_ATTR(unit, "config:12-15");
+PMU_FORMAT_ATTR(pmc, "config:16-19");
+
+static struct attribute *generic_compat_pmu_format_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_pmcxsel.attr,
+ &format_attr_mark.attr,
+ &format_attr_combine.attr,
+ &format_attr_unit.attr,
+ &format_attr_pmc.attr,
+ NULL,
+};
+
+static struct attribute_group generic_compat_pmu_format_group = {
+ .name = "format",
+ .attrs = generic_compat_pmu_format_attr,
+};
+
+static const struct attribute_group *generic_compat_pmu_attr_groups[] = {
+ &generic_compat_pmu_format_group,
+ &generic_compat_pmu_events_group,
+ NULL,
+};
+
+static int compat_generic_events[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
+ [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ */
+static int generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+ [ C(L1D) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(L1I) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(LL) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(BPU) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
+#undef C
+
+static struct power_pmu generic_compat_pmu = {
+ .name = "GENERIC_COMPAT",
+ .n_counter = MAX_PMU_COUNTERS,
+ .add_fields = ISA207_ADD_FIELDS,
+ .test_adder = ISA207_TEST_ADDER,
+ .compute_mmcr = isa207_compute_mmcr,
+ .get_constraint = isa207_get_constraint,
+ .disable_pmc = isa207_disable_pmc,
+ .flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
+ .n_generic = ARRAY_SIZE(compat_generic_events),
+ .generic_events = compat_generic_events,
+ .cache_events = &generic_compat_cache_events,
+ .attr_groups = generic_compat_pmu_attr_groups,
+};
+
+int init_generic_compat_pmu(void)
+{
+ int rc = 0;
+
+ rc = register_power_pmu(&generic_compat_pmu);
+ if (rc)
+ return rc;
+
+ /* Tell userspace that EBB is supported */
+ cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
+
+ return 0;
+}
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index b1c37cc3fa98..31fa753e2eb2 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -43,12 +43,17 @@ static DEFINE_PER_CPU(u64 *, thread_imc_mem);
static struct imc_pmu *thread_imc_pmu;
static int thread_imc_mem_size;
+/* Trace IMC data structures */
+static DEFINE_PER_CPU(u64 *, trace_imc_mem);
+static struct imc_pmu_ref *trace_imc_refc;
+static int trace_imc_mem_size;
+
static struct imc_pmu *imc_event_to_pmu(struct perf_event *event)
{
return container_of(event->pmu, struct imc_pmu, pmu);
}
-PMU_FORMAT_ATTR(event, "config:0-40");
+PMU_FORMAT_ATTR(event, "config:0-61");
PMU_FORMAT_ATTR(offset, "config:0-31");
PMU_FORMAT_ATTR(rvalue, "config:32");
PMU_FORMAT_ATTR(mode, "config:33-40");
@@ -65,6 +70,25 @@ static struct attribute_group imc_format_group = {
.attrs = imc_format_attrs,
};
+/* Format attribute for imc trace-mode */
+PMU_FORMAT_ATTR(cpmc_reserved, "config:0-19");
+PMU_FORMAT_ATTR(cpmc_event, "config:20-27");
+PMU_FORMAT_ATTR(cpmc_samplesel, "config:28-29");
+PMU_FORMAT_ATTR(cpmc_load, "config:30-61");
+static struct attribute *trace_imc_format_attrs[] = {
+ &format_attr_event.attr,
+ &format_attr_cpmc_reserved.attr,
+ &format_attr_cpmc_event.attr,
+ &format_attr_cpmc_samplesel.attr,
+ &format_attr_cpmc_load.attr,
+ NULL,
+};
+
+static struct attribute_group trace_imc_format_group = {
+.name = "format",
+.attrs = trace_imc_format_attrs,
+};
+
/* Get the cpumask printed to a buffer "buf" */
static ssize_t imc_pmu_cpumask_get_attr(struct device *dev,
struct device_attribute *attr,
@@ -487,6 +511,11 @@ static int nest_imc_event_init(struct perf_event *event)
* Get the base memory addresss for this cpu.
*/
chip_id = cpu_to_chip_id(event->cpu);
+
+ /* Return, if chip_id is not valid */
+ if (chip_id < 0)
+ return -ENODEV;
+
pcni = pmu->mem_info;
do {
if (pcni->id == chip_id) {
@@ -494,7 +523,7 @@ static int nest_imc_event_init(struct perf_event *event)
break;
}
pcni++;
- } while (pcni);
+ } while (pcni->vbase != 0);
if (!flag)
return -ENODEV;
@@ -788,8 +817,11 @@ static int core_imc_event_init(struct perf_event *event)
}
/*
- * Allocates a page of memory for each of the online cpus, and write the
- * physical base address of that page to the LDBAR for that cpu.
+ * Allocates a page of memory for each of the online cpus, and load
+ * LDBAR with 0.
+ * The physical base address of the page allocated for a cpu will be
+ * written to the LDBAR for that cpu, when the thread-imc event
+ * is added.
*
* LDBAR Register Layout:
*
@@ -807,7 +839,7 @@ static int core_imc_event_init(struct perf_event *event)
*/
static int thread_imc_mem_alloc(int cpu_id, int size)
{
- u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, cpu_id);
+ u64 *local_mem = per_cpu(thread_imc_mem, cpu_id);
int nid = cpu_to_node(cpu_id);
if (!local_mem) {
@@ -824,9 +856,7 @@ static int thread_imc_mem_alloc(int cpu_id, int size)
per_cpu(thread_imc_mem, cpu_id) = local_mem;
}
- ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | THREAD_IMC_ENABLE;
-
- mtspr(SPRN_LDBAR, ldbar_value);
+ mtspr(SPRN_LDBAR, 0);
return 0;
}
@@ -858,6 +888,9 @@ static int thread_imc_event_init(struct perf_event *event)
if (event->attr.type != event->pmu->type)
return -ENOENT;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
/* Sampling not supported */
if (event->hw.sample_period)
return -EINVAL;
@@ -977,6 +1010,7 @@ static int thread_imc_event_add(struct perf_event *event, int flags)
{
int core_id;
struct imc_pmu_ref *ref;
+ u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, smp_processor_id());
if (flags & PERF_EF_START)
imc_event_start(event, flags);
@@ -985,6 +1019,9 @@ static int thread_imc_event_add(struct perf_event *event, int flags)
return -EINVAL;
core_id = smp_processor_id() / threads_per_core;
+ ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | THREAD_IMC_ENABLE;
+ mtspr(SPRN_LDBAR, ldbar_value);
+
/*
* imc pmus are enabled only when it is used.
* See if this is triggered for the first time.
@@ -1016,11 +1053,7 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
int core_id;
struct imc_pmu_ref *ref;
- /*
- * Take a snapshot and calculate the delta and update
- * the event counter values.
- */
- imc_event_update(event);
+ mtspr(SPRN_LDBAR, 0);
core_id = smp_processor_id() / threads_per_core;
ref = &core_imc_refc[core_id];
@@ -1039,6 +1072,240 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
ref->refc = 0;
}
mutex_unlock(&ref->lock);
+ /*
+ * Take a snapshot and calculate the delta and update
+ * the event counter values.
+ */
+ imc_event_update(event);
+}
+
+/*
+ * Allocate a page of memory for each cpu, and load LDBAR with 0.
+ */
+static int trace_imc_mem_alloc(int cpu_id, int size)
+{
+ u64 *local_mem = per_cpu(trace_imc_mem, cpu_id);
+ int phys_id = cpu_to_node(cpu_id), rc = 0;
+ int core_id = (cpu_id / threads_per_core);
+
+ if (!local_mem) {
+ local_mem = page_address(alloc_pages_node(phys_id,
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
+ __GFP_NOWARN, get_order(size)));
+ if (!local_mem)
+ return -ENOMEM;
+ per_cpu(trace_imc_mem, cpu_id) = local_mem;
+
+ /* Initialise the counters for trace mode */
+ rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_TRACE, __pa((void *)local_mem),
+ get_hard_smp_processor_id(cpu_id));
+ if (rc) {
+ pr_info("IMC:opal init failed for trace imc\n");
+ return rc;
+ }
+ }
+
+ /* Init the mutex, if not already */
+ trace_imc_refc[core_id].id = core_id;
+ mutex_init(&trace_imc_refc[core_id].lock);
+
+ mtspr(SPRN_LDBAR, 0);
+ return 0;
+}
+
+static int ppc_trace_imc_cpu_online(unsigned int cpu)
+{
+ return trace_imc_mem_alloc(cpu, trace_imc_mem_size);
+}
+
+static int ppc_trace_imc_cpu_offline(unsigned int cpu)
+{
+ mtspr(SPRN_LDBAR, 0);
+ return 0;
+}
+
+static int trace_imc_cpu_init(void)
+{
+ return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE,
+ "perf/powerpc/imc_trace:online",
+ ppc_trace_imc_cpu_online,
+ ppc_trace_imc_cpu_offline);
+}
+
+static u64 get_trace_imc_event_base_addr(void)
+{
+ return (u64)per_cpu(trace_imc_mem, smp_processor_id());
+}
+
+/*
+ * Function to parse trace-imc data obtained
+ * and to prepare the perf sample.
+ */
+static int trace_imc_prepare_sample(struct trace_imc_data *mem,
+ struct perf_sample_data *data,
+ u64 *prev_tb,
+ struct perf_event_header *header,
+ struct perf_event *event)
+{
+ /* Sanity checks for a valid record */
+ if (be64_to_cpu(READ_ONCE(mem->tb1)) > *prev_tb)
+ *prev_tb = be64_to_cpu(READ_ONCE(mem->tb1));
+ else
+ return -EINVAL;
+
+ if ((be64_to_cpu(READ_ONCE(mem->tb1)) & IMC_TRACE_RECORD_TB1_MASK) !=
+ be64_to_cpu(READ_ONCE(mem->tb2)))
+ return -EINVAL;
+
+ /* Prepare perf sample */
+ data->ip = be64_to_cpu(READ_ONCE(mem->ip));
+ data->period = event->hw.last_period;
+
+ header->type = PERF_RECORD_SAMPLE;
+ header->size = sizeof(*header) + event->header_size;
+ header->misc = 0;
+
+ if (is_kernel_addr(data->ip))
+ header->misc |= PERF_RECORD_MISC_KERNEL;
+ else
+ header->misc |= PERF_RECORD_MISC_USER;
+
+ perf_event_header__init_id(header, data, event);
+
+ return 0;
+}
+
+static void dump_trace_imc_data(struct perf_event *event)
+{
+ struct trace_imc_data *mem;
+ int i, ret;
+ u64 prev_tb = 0;
+
+ mem = (struct trace_imc_data *)get_trace_imc_event_base_addr();
+ for (i = 0; i < (trace_imc_mem_size / sizeof(struct trace_imc_data));
+ i++, mem++) {
+ struct perf_sample_data data;
+ struct perf_event_header header;
+
+ ret = trace_imc_prepare_sample(mem, &data, &prev_tb, &header, event);
+ if (ret) /* Exit, if not a valid record */
+ break;
+ else {
+ /* If this is a valid record, create the sample */
+ struct perf_output_handle handle;
+
+ if (perf_output_begin(&handle, event, header.size))
+ return;
+
+ perf_output_sample(&handle, &header, &data, event);
+ perf_output_end(&handle);
+ }
+ }
+}
+
+static int trace_imc_event_add(struct perf_event *event, int flags)
+{
+ int core_id = smp_processor_id() / threads_per_core;
+ struct imc_pmu_ref *ref = NULL;
+ u64 local_mem, ldbar_value;
+
+ /* Set trace-imc bit in ldbar and load ldbar with per-thread memory address */
+ local_mem = get_trace_imc_event_base_addr();
+ ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | TRACE_IMC_ENABLE;
+
+ if (core_imc_refc)
+ ref = &core_imc_refc[core_id];
+ if (!ref) {
+ /* If core-imc is not enabled, use trace-imc reference count */
+ if (trace_imc_refc)
+ ref = &trace_imc_refc[core_id];
+ if (!ref)
+ return -EINVAL;
+ }
+ mtspr(SPRN_LDBAR, ldbar_value);
+ mutex_lock(&ref->lock);
+ if (ref->refc == 0) {
+ if (opal_imc_counters_start(OPAL_IMC_COUNTERS_TRACE,
+ get_hard_smp_processor_id(smp_processor_id()))) {
+ mutex_unlock(&ref->lock);
+ pr_err("trace-imc: Unable to start the counters for core %d\n", core_id);
+ mtspr(SPRN_LDBAR, 0);
+ return -EINVAL;
+ }
+ }
+ ++ref->refc;
+ mutex_unlock(&ref->lock);
+
+ return 0;
+}
+
+static void trace_imc_event_read(struct perf_event *event)
+{
+ return;
+}
+
+static void trace_imc_event_stop(struct perf_event *event, int flags)
+{
+ u64 local_mem = get_trace_imc_event_base_addr();
+ dump_trace_imc_data(event);
+ memset((void *)local_mem, 0, sizeof(u64));
+}
+
+static void trace_imc_event_start(struct perf_event *event, int flags)
+{
+ return;
+}
+
+static void trace_imc_event_del(struct perf_event *event, int flags)
+{
+ int core_id = smp_processor_id() / threads_per_core;
+ struct imc_pmu_ref *ref = NULL;
+
+ if (core_imc_refc)
+ ref = &core_imc_refc[core_id];
+ if (!ref) {
+ /* If core-imc is not enabled, use trace-imc reference count */
+ if (trace_imc_refc)
+ ref = &trace_imc_refc[core_id];
+ if (!ref)
+ return;
+ }
+ mtspr(SPRN_LDBAR, 0);
+ mutex_lock(&ref->lock);
+ ref->refc--;
+ if (ref->refc == 0) {
+ if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_TRACE,
+ get_hard_smp_processor_id(smp_processor_id()))) {
+ mutex_unlock(&ref->lock);
+ pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id);
+ return;
+ }
+ } else if (ref->refc < 0) {
+ ref->refc = 0;
+ }
+ mutex_unlock(&ref->lock);
+ trace_imc_event_stop(event, flags);
+}
+
+static int trace_imc_event_init(struct perf_event *event)
+{
+ struct task_struct *target;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ /* Return if this is a couting event */
+ if (event->attr.sample_period == 0)
+ return -ENOENT;
+
+ event->hw.idx = -1;
+ target = event->hw.target;
+
+ event->pmu->task_ctx_nr = perf_hw_context;
+ return 0;
}
/* update_pmu_ops : Populate the appropriate operations for "pmu" */
@@ -1071,6 +1338,14 @@ static int update_pmu_ops(struct imc_pmu *pmu)
pmu->pmu.cancel_txn = thread_imc_pmu_cancel_txn;
pmu->pmu.commit_txn = thread_imc_pmu_commit_txn;
break;
+ case IMC_DOMAIN_TRACE:
+ pmu->pmu.event_init = trace_imc_event_init;
+ pmu->pmu.add = trace_imc_event_add;
+ pmu->pmu.del = trace_imc_event_del;
+ pmu->pmu.start = trace_imc_event_start;
+ pmu->pmu.stop = trace_imc_event_stop;
+ pmu->pmu.read = trace_imc_event_read;
+ pmu->attr_groups[IMC_FORMAT_ATTR] = &trace_imc_format_group;
default:
break;
}
@@ -1163,6 +1438,18 @@ static void cleanup_all_thread_imc_memory(void)
}
}
+static void cleanup_all_trace_imc_memory(void)
+{
+ int i, order = get_order(trace_imc_mem_size);
+
+ for_each_online_cpu(i) {
+ if (per_cpu(trace_imc_mem, i))
+ free_pages((u64)per_cpu(trace_imc_mem, i), order);
+
+ }
+ kfree(trace_imc_refc);
+}
+
/* Function to free the attr_groups which are dynamically allocated */
static void imc_common_mem_free(struct imc_pmu *pmu_ptr)
{
@@ -1204,6 +1491,11 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE);
cleanup_all_thread_imc_memory();
}
+
+ if (pmu_ptr->domain == IMC_DOMAIN_TRACE) {
+ cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE);
+ cleanup_all_trace_imc_memory();
+ }
}
/*
@@ -1286,6 +1578,27 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
thread_imc_pmu = pmu_ptr;
break;
+ case IMC_DOMAIN_TRACE:
+ /* Update the pmu name */
+ pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc");
+ if (!pmu_ptr->pmu.name)
+ return -ENOMEM;
+
+ nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
+ trace_imc_refc = kcalloc(nr_cores, sizeof(struct imc_pmu_ref),
+ GFP_KERNEL);
+ if (!trace_imc_refc)
+ return -ENOMEM;
+
+ trace_imc_mem_size = pmu_ptr->counter_mem_size;
+ for_each_online_cpu(cpu) {
+ res = trace_imc_mem_alloc(cpu, trace_imc_mem_size);
+ if (res) {
+ cleanup_all_trace_imc_memory();
+ goto err;
+ }
+ }
+ break;
default:
return -EINVAL;
}
@@ -1359,6 +1672,14 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
}
break;
+ case IMC_DOMAIN_TRACE:
+ ret = trace_imc_cpu_init();
+ if (ret) {
+ cleanup_all_trace_imc_memory();
+ goto err_free_mem;
+ }
+
+ break;
default:
return -EINVAL; /* Unknown domain */
}
diff --git a/arch/powerpc/perf/internal.h b/arch/powerpc/perf/internal.h
new file mode 100644
index 000000000000..f755c64da137
--- /dev/null
+++ b/arch/powerpc/perf/internal.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright 2019 Madhavan Srinivasan, IBM Corporation.
+
+extern int init_ppc970_pmu(void);
+extern int init_power5_pmu(void);
+extern int init_power5p_pmu(void);
+extern int init_power6_pmu(void);
+extern int init_power7_pmu(void);
+extern int init_power8_pmu(void);
+extern int init_power9_pmu(void);
+extern int init_generic_compat_pmu(void);
diff --git a/arch/powerpc/perf/power5+-pmu.c b/arch/powerpc/perf/power5+-pmu.c
index 0526dac66007..9aa803504cb2 100644
--- a/arch/powerpc/perf/power5+-pmu.c
+++ b/arch/powerpc/perf/power5+-pmu.c
@@ -677,7 +677,7 @@ static struct power_pmu power5p_pmu = {
.cache_events = &power5p_cache_events,
};
-static int __init init_power5p_pmu(void)
+int init_power5p_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type ||
(strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5+")
@@ -686,5 +686,3 @@ static int __init init_power5p_pmu(void)
return register_power_pmu(&power5p_pmu);
}
-
-early_initcall(init_power5p_pmu);
diff --git a/arch/powerpc/perf/power5-pmu.c b/arch/powerpc/perf/power5-pmu.c
index 4dc99f9f7962..30cb13d081a9 100644
--- a/arch/powerpc/perf/power5-pmu.c
+++ b/arch/powerpc/perf/power5-pmu.c
@@ -618,7 +618,7 @@ static struct power_pmu power5_pmu = {
.flags = PPMU_HAS_SSLOT,
};
-static int __init init_power5_pmu(void)
+int init_power5_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type ||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5"))
@@ -626,5 +626,3 @@ static int __init init_power5_pmu(void)
return register_power_pmu(&power5_pmu);
}
-
-early_initcall(init_power5_pmu);
diff --git a/arch/powerpc/perf/power6-pmu.c b/arch/powerpc/perf/power6-pmu.c
index 9c9d646b68a1..80ec48632cfe 100644
--- a/arch/powerpc/perf/power6-pmu.c
+++ b/arch/powerpc/perf/power6-pmu.c
@@ -540,7 +540,7 @@ static struct power_pmu power6_pmu = {
.cache_events = &power6_cache_events,
};
-static int __init init_power6_pmu(void)
+int init_power6_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type ||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power6"))
@@ -548,5 +548,3 @@ static int __init init_power6_pmu(void)
return register_power_pmu(&power6_pmu);
}
-
-early_initcall(init_power6_pmu);
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index 6dbae9884ec4..bb6efd5d2530 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -445,7 +445,7 @@ static struct power_pmu power7_pmu = {
.cache_events = &power7_cache_events,
};
-static int __init init_power7_pmu(void)
+int init_power7_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type ||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power7"))
@@ -456,5 +456,3 @@ static int __init init_power7_pmu(void)
return register_power_pmu(&power7_pmu);
}
-
-early_initcall(init_power7_pmu);
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index d12a2db26353..bcc3409a06de 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -379,7 +379,7 @@ static struct power_pmu power8_pmu = {
.bhrb_nr = 32,
};
-static int __init init_power8_pmu(void)
+int init_power8_pmu(void)
{
int rc;
@@ -399,4 +399,3 @@ static int __init init_power8_pmu(void)
return 0;
}
-early_initcall(init_power8_pmu);
diff --git a/arch/powerpc/perf/power9-events-list.h b/arch/powerpc/perf/power9-events-list.h
index 063c9d9f2516..6b1dc9a83ede 100644
--- a/arch/powerpc/perf/power9-events-list.h
+++ b/arch/powerpc/perf/power9-events-list.h
@@ -63,8 +63,6 @@ EVENT(PM_RUN_CYC_ALT, 0x200f4)
/* Instruction Dispatched */
EVENT(PM_INST_DISP, 0x200f2)
EVENT(PM_INST_DISP_ALT, 0x300f2)
-/* Alternate Branch event code */
-EVENT(PM_BR_CMPL_ALT, 0x10012)
/* Branch event that are not strongly biased */
EVENT(PM_BR_2PATH, 0x20036)
/* ALternate branch event that are not strongly biased */
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 030544e35959..3a31ac6f4805 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -437,7 +437,7 @@ static struct power_pmu power9_pmu = {
.bhrb_nr = 32,
};
-static int __init init_power9_pmu(void)
+int init_power9_pmu(void)
{
int rc = 0;
unsigned int pvr = mfspr(SPRN_PVR);
@@ -467,4 +467,3 @@ static int __init init_power9_pmu(void)
return 0;
}
-early_initcall(init_power9_pmu);
diff --git a/arch/powerpc/perf/ppc970-pmu.c b/arch/powerpc/perf/ppc970-pmu.c
index 8b6a8a36fa38..1d3370914022 100644
--- a/arch/powerpc/perf/ppc970-pmu.c
+++ b/arch/powerpc/perf/ppc970-pmu.c
@@ -490,7 +490,7 @@ static struct power_pmu ppc970_pmu = {
.flags = PPMU_NO_SIPR | PPMU_NO_CONT_SAMPLING,
};
-static int __init init_ppc970_pmu(void)
+int init_ppc970_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type ||
(strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970")
@@ -499,5 +499,3 @@ static int __init init_ppc970_pmu(void)
return register_power_pmu(&ppc970_pmu);
}
-
-early_initcall(init_ppc970_pmu);
diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c
index b3097fe6441b..af265ae40a61 100644
--- a/arch/powerpc/platforms/512x/clock-commonclk.c
+++ b/arch/powerpc/platforms/512x/clock-commonclk.c
@@ -239,6 +239,7 @@ static inline struct clk *mpc512x_clk_divider(
const char *name, const char *parent_name, u8 clkflags,
u32 __iomem *reg, u8 pos, u8 len, int divflags)
{
+ divflags |= CLK_DIVIDER_BIG_ENDIAN;
return clk_register_divider(NULL, name, parent_name, clkflags,
reg, pos, len, divflags, &clklock);
}
@@ -250,7 +251,7 @@ static inline struct clk *mpc512x_clk_divtable(
{
u8 divflags;
- divflags = 0;
+ divflags = CLK_DIVIDER_BIG_ENDIAN;
return clk_register_divider_table(NULL, name, parent_name, 0,
reg, pos, len, divflags,
divtab, &clklock);
@@ -261,10 +262,12 @@ static inline struct clk *mpc512x_clk_gated(
u32 __iomem *reg, u8 pos)
{
int clkflags;
+ u8 gateflags;
clkflags = CLK_SET_RATE_PARENT;
+ gateflags = CLK_GATE_BIG_ENDIAN;
return clk_register_gate(NULL, name, parent_name, clkflags,
- reg, pos, 0, &clklock);
+ reg, pos, gateflags, &clklock);
}
static inline struct clk *mpc512x_clk_muxed(const char *name,
@@ -275,7 +278,7 @@ static inline struct clk *mpc512x_clk_muxed(const char *name,
u8 muxflags;
clkflags = CLK_SET_RATE_PARENT;
- muxflags = 0;
+ muxflags = CLK_MUX_BIG_ENDIAN;
return clk_register_mux(NULL, name,
parent_names, parent_count, clkflags,
reg, pos, len, muxflags, &clklock);
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index 17cf249b18ee..3cb2f07ce8eb 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -628,7 +628,7 @@ static int mpc52xx_wdt_open(struct inode *inode, struct file *file)
}
file->private_data = mpc52xx_gpt_wdt;
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static int mpc52xx_wdt_release(struct inode *inode, struct file *file)
diff --git a/arch/powerpc/platforms/83xx/usb.c b/arch/powerpc/platforms/83xx/usb.c
index 5c31d8292d3b..e7c2c3fb011a 100644
--- a/arch/powerpc/platforms/83xx/usb.c
+++ b/arch/powerpc/platforms/83xx/usb.c
@@ -221,8 +221,10 @@ int mpc837x_usb_cfg(void)
int ret = 0;
np = of_find_compatible_node(NULL, NULL, "fsl-usb2-dr");
- if (!np || !of_device_is_available(np))
+ if (!np || !of_device_is_available(np)) {
+ of_node_put(np);
return -ENODEV;
+ }
prop = of_get_property(np, "phy_type", NULL);
if (!prop || (strcmp(prop, "ulpi") && strcmp(prop, "serial"))) {
diff --git a/arch/powerpc/platforms/8xx/pic.c b/arch/powerpc/platforms/8xx/pic.c
index 8d5a25d43ef3..e9617d35fd1f 100644
--- a/arch/powerpc/platforms/8xx/pic.c
+++ b/arch/powerpc/platforms/8xx/pic.c
@@ -153,10 +153,9 @@ int mpc8xx_pic_init(void)
if (mpc8xx_pic_host == NULL) {
printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n");
ret = -ENOMEM;
- goto out;
}
- return 0;
+ ret = 0;
out:
of_node_put(np);
return ret;
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 50cd09b4e05d..2794235e9d3e 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -25,6 +25,8 @@ config PPC_BOOK3S_32
bool "512x/52xx/6xx/7xx/74xx/82xx/83xx/86xx"
select PPC_FPU
select PPC_HAVE_PMU_SUPPORT
+ select PPC_HAVE_KUEP
+ select PPC_HAVE_KUAP
config PPC_85xx
bool "Freescale 85xx"
@@ -34,6 +36,9 @@ config PPC_8xx
bool "Freescale 8xx"
select FSL_SOC
select SYS_SUPPORTS_HUGETLBFS
+ select PPC_HAVE_KUEP
+ select PPC_HAVE_KUAP
+ select PPC_MM_SLICES if HUGETLB_PAGE
config 40x
bool "AMCC 40x"
@@ -75,6 +80,7 @@ config PPC_BOOK3S_64
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_SUPPORTS_NUMA_BALANCING
select IRQ_WORK
+ select PPC_MM_SLICES
config PPC_BOOK3E_64
bool "Embedded processors"
@@ -325,7 +331,9 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
config PPC_RADIX_MMU
bool "Radix MMU Support"
depends on PPC_BOOK3S_64 && HUGETLB_PAGE
- select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
+ select ARCH_HAS_GIGANTIC_PAGE
+ select PPC_HAVE_KUEP
+ select PPC_HAVE_KUAP
default y
help
Enable support for the Power ISA 3.0 Radix style MMU. Currently this
@@ -345,6 +353,37 @@ config PPC_RADIX_MMU_DEFAULT
If you're unsure, say Y.
+config PPC_HAVE_KUEP
+ bool
+
+config PPC_KUEP
+ bool "Kernel Userspace Execution Prevention"
+ depends on PPC_HAVE_KUEP
+ default y
+ help
+ Enable support for Kernel Userspace Execution Prevention (KUEP)
+
+ If you're unsure, say Y.
+
+config PPC_HAVE_KUAP
+ bool
+
+config PPC_KUAP
+ bool "Kernel Userspace Access Protection"
+ depends on PPC_HAVE_KUAP
+ default y
+ help
+ Enable support for Kernel Userspace Access Protection (KUAP)
+
+ If you're unsure, say Y.
+
+config PPC_KUAP_DEBUG
+ bool "Extra debugging for Kernel Userspace Access Protection"
+ depends on PPC_HAVE_KUAP && (PPC_RADIX_MMU || PPC_32)
+ help
+ Add extra debugging for Kernel Userspace Access Protection (KUAP)
+ If you're unsure, say N.
+
config ARCH_ENABLE_HUGEPAGE_MIGRATION
def_bool y
depends on PPC_BOOK3S_64 && HUGETLB_PAGE && MIGRATION
@@ -354,14 +393,16 @@ config PPC_MMU_NOHASH
def_bool y
depends on !PPC_BOOK3S
+config PPC_MMU_NOHASH_32
+ def_bool y
+ depends on PPC_MMU_NOHASH && PPC32
+
config PPC_BOOK3E_MMU
def_bool y
depends on FSL_BOOKE || PPC_BOOK3E
config PPC_MM_SLICES
bool
- default y if PPC_BOOK3S_64
- default y if PPC_8xx && HUGETLB_PAGE
config PPC_HAVE_PMU_SUPPORT
bool
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 7f12c7b78c0f..6646f152d57b 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -194,7 +194,7 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
* faults need to be deferred to process context.
*/
if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) &&
- (REGION_ID(ea) != USER_REGION_ID)) {
+ (get_region_id(ea) != USER_REGION_ID)) {
spin_unlock(&spu->register_lock);
ret = hash_page(ea,
@@ -224,7 +224,7 @@ static void __spu_kernel_slb(void *addr, struct copro_slb *slb)
unsigned long ea = (unsigned long)addr;
u64 llp;
- if (REGION_ID(ea) == KERNEL_REGION_ID)
+ if (get_region_id(ea) == LINEAR_MAP_REGION_ID)
llp = mmu_psize_defs[mmu_linear_psize].sllp;
else
llp = mmu_psize_defs[mmu_virtual_psize].sllp;
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 48c2477e7e2a..bfb9ca99ac05 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -588,7 +588,7 @@ static int spufs_pipe_open(struct inode *inode, struct file *file)
struct spufs_inode_info *i = SPUFS_I(inode);
file->private_data = i->i_ctx;
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
/*
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index db329d4bf1c3..c1a75216050a 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -71,17 +71,11 @@ spufs_alloc_inode(struct super_block *sb)
return &ei->vfs_inode;
}
-static void spufs_i_callback(struct rcu_head *head)
+static void spufs_free_inode(struct inode *inode)
{
- struct inode *inode = container_of(head, struct inode, i_rcu);
kmem_cache_free(spufs_inode_cache, SPUFS_I(inode));
}
-static void spufs_destroy_inode(struct inode *inode)
-{
- call_rcu(&inode->i_rcu, spufs_i_callback);
-}
-
static void
spufs_init_once(void *p)
{
@@ -739,7 +733,7 @@ spufs_fill_super(struct super_block *sb, void *data, int silent)
struct spufs_sb_info *info;
static const struct super_operations s_ops = {
.alloc_inode = spufs_alloc_inode,
- .destroy_inode = spufs_destroy_inode,
+ .free_inode = spufs_free_inode,
.statfs = simple_statfs,
.evict_inode = spufs_evict_inode,
.show_options = spufs_show_options,
diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c
index 0409714e8070..829bf3697dc9 100644
--- a/arch/powerpc/platforms/embedded6xx/holly.c
+++ b/arch/powerpc/platforms/embedded6xx/holly.c
@@ -44,7 +44,8 @@
#define HOLLY_PCI_CFG_PHYS 0x7c000000
-int holly_exclude_device(struct pci_controller *hose, u_char bus, u_char devfn)
+static int holly_exclude_device(struct pci_controller *hose, u_char bus,
+ u_char devfn)
{
if (bus == 0 && PCI_SLOT(devfn) == 0)
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -187,13 +188,13 @@ static void __init holly_init_IRQ(void)
tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
}
-void holly_show_cpuinfo(struct seq_file *m)
+static void holly_show_cpuinfo(struct seq_file *m)
{
seq_printf(m, "vendor\t\t: IBM\n");
seq_printf(m, "machine\t\t: PPC750 GX/CL\n");
}
-void __noreturn holly_restart(char *cmd)
+static void __noreturn holly_restart(char *cmd)
{
__be32 __iomem *ocn_bar1 = NULL;
unsigned long bar;
@@ -233,18 +234,6 @@ void __noreturn holly_restart(char *cmd)
for (;;) ;
}
-void holly_power_off(void)
-{
- local_irq_disable();
- /* No way to shut power off with software */
- for (;;) ;
-}
-
-void holly_halt(void)
-{
- holly_power_off();
-}
-
/*
* Called very early, device-tree isn't unflattened
*/
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
index 20ebf35d7913..f4247ade71ca 100644
--- a/arch/powerpc/platforms/powermac/Makefile
+++ b/arch/powerpc/platforms/powermac/Makefile
@@ -2,6 +2,12 @@
CFLAGS_bootx_init.o += -fPIC
CFLAGS_bootx_init.o += $(call cc-option, -fno-stack-protector)
+KASAN_SANITIZE_bootx_init.o := n
+
+ifdef CONFIG_KASAN
+CFLAGS_bootx_init.o += -DDISABLE_BRANCH_PROFILING
+endif
+
ifdef CONFIG_FUNCTION_TRACER
# Do not trace early boot code
CFLAGS_REMOVE_bootx_init.o = $(CC_FLAGS_FTRACE)
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index e52f9b06dd9c..c9133f7908ca 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -16,6 +16,7 @@
#include <linux/device.h>
#include <linux/cpu.h>
+#include <asm/asm-prototypes.h>
#include <asm/firmware.h>
#include <asm/machdep.h>
#include <asm/opal.h>
@@ -48,10 +49,10 @@ static u64 pnv_default_stop_mask;
static bool default_stop_found;
/*
- * First deep stop state. Used to figure out when to save/restore
- * hypervisor context.
+ * First stop state levels when SPR and TB loss can occur.
*/
-u64 pnv_first_deep_stop_state = MAX_STOP_STATE;
+static u64 pnv_first_tb_loss_level = MAX_STOP_STATE + 1;
+static u64 pnv_first_spr_loss_level = MAX_STOP_STATE + 1;
/*
* psscr value and mask of the deepest stop idle state.
@@ -62,6 +63,8 @@ static u64 pnv_deepest_stop_psscr_mask;
static u64 pnv_deepest_stop_flag;
static bool deepest_stop_found;
+static unsigned long power7_offline_type;
+
static int pnv_save_sprs_for_deep_states(void)
{
int cpu;
@@ -72,12 +75,12 @@ static int pnv_save_sprs_for_deep_states(void)
* all cpus at boot. Get these reg values of current cpu and use the
* same across all cpus.
*/
- uint64_t lpcr_val = mfspr(SPRN_LPCR);
- uint64_t hid0_val = mfspr(SPRN_HID0);
- uint64_t hid1_val = mfspr(SPRN_HID1);
- uint64_t hid4_val = mfspr(SPRN_HID4);
- uint64_t hid5_val = mfspr(SPRN_HID5);
- uint64_t hmeer_val = mfspr(SPRN_HMEER);
+ uint64_t lpcr_val = mfspr(SPRN_LPCR);
+ uint64_t hid0_val = mfspr(SPRN_HID0);
+ uint64_t hid1_val = mfspr(SPRN_HID1);
+ uint64_t hid4_val = mfspr(SPRN_HID4);
+ uint64_t hid5_val = mfspr(SPRN_HID5);
+ uint64_t hmeer_val = mfspr(SPRN_HMEER);
uint64_t msr_val = MSR_IDLE;
uint64_t psscr_val = pnv_deepest_stop_psscr_val;
@@ -137,89 +140,6 @@ static int pnv_save_sprs_for_deep_states(void)
return 0;
}
-static void pnv_alloc_idle_core_states(void)
-{
- int i, j;
- int nr_cores = cpu_nr_cores();
- u32 *core_idle_state;
-
- /*
- * core_idle_state - The lower 8 bits track the idle state of
- * each thread of the core.
- *
- * The most significant bit is the lock bit.
- *
- * Initially all the bits corresponding to threads_per_core
- * are set. They are cleared when the thread enters deep idle
- * state like sleep and winkle/stop.
- *
- * Initially the lock bit is cleared. The lock bit has 2
- * purposes:
- * a. While the first thread in the core waking up from
- * idle is restoring core state, it prevents other
- * threads in the core from switching to process
- * context.
- * b. While the last thread in the core is saving the
- * core state, it prevents a different thread from
- * waking up.
- */
- for (i = 0; i < nr_cores; i++) {
- int first_cpu = i * threads_per_core;
- int node = cpu_to_node(first_cpu);
- size_t paca_ptr_array_size;
-
- core_idle_state = kmalloc_node(sizeof(u32), GFP_KERNEL, node);
- *core_idle_state = (1 << threads_per_core) - 1;
- paca_ptr_array_size = (threads_per_core *
- sizeof(struct paca_struct *));
-
- for (j = 0; j < threads_per_core; j++) {
- int cpu = first_cpu + j;
-
- paca_ptrs[cpu]->core_idle_state_ptr = core_idle_state;
- paca_ptrs[cpu]->thread_idle_state = PNV_THREAD_RUNNING;
- paca_ptrs[cpu]->thread_mask = 1 << j;
- }
- }
-
- update_subcore_sibling_mask();
-
- if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) {
- int rc = pnv_save_sprs_for_deep_states();
-
- if (likely(!rc))
- return;
-
- /*
- * The stop-api is unable to restore hypervisor
- * resources on wakeup from platform idle states which
- * lose full context. So disable such states.
- */
- supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT;
- pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n");
- pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n");
-
- if (cpu_has_feature(CPU_FTR_ARCH_300) &&
- (pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) {
- /*
- * Use the default stop state for CPU-Hotplug
- * if available.
- */
- if (default_stop_found) {
- pnv_deepest_stop_psscr_val =
- pnv_default_stop_val;
- pnv_deepest_stop_psscr_mask =
- pnv_default_stop_mask;
- pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n",
- pnv_deepest_stop_psscr_val);
- } else { /* Fallback to snooze loop for CPU-Hotplug */
- deepest_stop_found = false;
- pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n");
- }
- }
- }
-}
-
u32 pnv_get_supported_cpuidle_states(void)
{
return supported_cpuidle_states;
@@ -238,6 +158,9 @@ static void pnv_fastsleep_workaround_apply(void *info)
*err = 1;
}
+static bool power7_fastsleep_workaround_entry = true;
+static bool power7_fastsleep_workaround_exit = true;
+
/*
* Used to store fastsleep workaround state
* 0 - Workaround applied/undone at fastsleep entry/exit path (Default)
@@ -269,21 +192,15 @@ static ssize_t store_fastsleep_workaround_applyonce(struct device *dev,
* fastsleep_workaround_applyonce = 1 implies
* fastsleep workaround needs to be left in 'applied' state on all
* the cores. Do this by-
- * 1. Patching out the call to 'undo' workaround in fastsleep exit path
- * 2. Sending ipi to all the cores which have at least one online thread
- * 3. Patching out the call to 'apply' workaround in fastsleep entry
- * path
+ * 1. Disable the 'undo' workaround in fastsleep exit path
+ * 2. Sendi IPIs to all the cores which have at least one online thread
+ * 3. Disable the 'apply' workaround in fastsleep entry path
+ *
* There is no need to send ipi to cores which have all threads
* offlined, as last thread of the core entering fastsleep or deeper
* state would have applied workaround.
*/
- err = patch_instruction(
- (unsigned int *)pnv_fastsleep_workaround_at_exit,
- PPC_INST_NOP);
- if (err) {
- pr_err("fastsleep_workaround_applyonce change failed while patching pnv_fastsleep_workaround_at_exit");
- goto fail;
- }
+ power7_fastsleep_workaround_exit = false;
get_online_cpus();
primary_thread_mask = cpu_online_cores_map();
@@ -296,13 +213,7 @@ static ssize_t store_fastsleep_workaround_applyonce(struct device *dev,
goto fail;
}
- err = patch_instruction(
- (unsigned int *)pnv_fastsleep_workaround_at_entry,
- PPC_INST_NOP);
- if (err) {
- pr_err("fastsleep_workaround_applyonce change failed while patching pnv_fastsleep_workaround_at_entry");
- goto fail;
- }
+ power7_fastsleep_workaround_entry = false;
fastsleep_workaround_applyonce = 1;
@@ -315,27 +226,346 @@ static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600,
show_fastsleep_workaround_applyonce,
store_fastsleep_workaround_applyonce);
-static unsigned long __power7_idle_type(unsigned long type)
+static inline void atomic_start_thread_idle(void)
{
+ int cpu = raw_smp_processor_id();
+ int first = cpu_first_thread_sibling(cpu);
+ int thread_nr = cpu_thread_in_core(cpu);
+ unsigned long *state = &paca_ptrs[first]->idle_state;
+
+ clear_bit(thread_nr, state);
+}
+
+static inline void atomic_stop_thread_idle(void)
+{
+ int cpu = raw_smp_processor_id();
+ int first = cpu_first_thread_sibling(cpu);
+ int thread_nr = cpu_thread_in_core(cpu);
+ unsigned long *state = &paca_ptrs[first]->idle_state;
+
+ set_bit(thread_nr, state);
+}
+
+static inline void atomic_lock_thread_idle(void)
+{
+ int cpu = raw_smp_processor_id();
+ int first = cpu_first_thread_sibling(cpu);
+ unsigned long *state = &paca_ptrs[first]->idle_state;
+
+ while (unlikely(test_and_set_bit_lock(NR_PNV_CORE_IDLE_LOCK_BIT, state)))
+ barrier();
+}
+
+static inline void atomic_unlock_and_stop_thread_idle(void)
+{
+ int cpu = raw_smp_processor_id();
+ int first = cpu_first_thread_sibling(cpu);
+ unsigned long thread = 1UL << cpu_thread_in_core(cpu);
+ unsigned long *state = &paca_ptrs[first]->idle_state;
+ u64 s = READ_ONCE(*state);
+ u64 new, tmp;
+
+ BUG_ON(!(s & PNV_CORE_IDLE_LOCK_BIT));
+ BUG_ON(s & thread);
+
+again:
+ new = (s | thread) & ~PNV_CORE_IDLE_LOCK_BIT;
+ tmp = cmpxchg(state, s, new);
+ if (unlikely(tmp != s)) {
+ s = tmp;
+ goto again;
+ }
+}
+
+static inline void atomic_unlock_thread_idle(void)
+{
+ int cpu = raw_smp_processor_id();
+ int first = cpu_first_thread_sibling(cpu);
+ unsigned long *state = &paca_ptrs[first]->idle_state;
+
+ BUG_ON(!test_bit(NR_PNV_CORE_IDLE_LOCK_BIT, state));
+ clear_bit_unlock(NR_PNV_CORE_IDLE_LOCK_BIT, state);
+}
+
+/* P7 and P8 */
+struct p7_sprs {
+ /* per core */
+ u64 tscr;
+ u64 worc;
+
+ /* per subcore */
+ u64 sdr1;
+ u64 rpr;
+
+ /* per thread */
+ u64 lpcr;
+ u64 hfscr;
+ u64 fscr;
+ u64 purr;
+ u64 spurr;
+ u64 dscr;
+ u64 wort;
+
+ /* per thread SPRs that get lost in shallow states */
+ u64 amr;
+ u64 iamr;
+ u64 amor;
+ u64 uamor;
+};
+
+static unsigned long power7_idle_insn(unsigned long type)
+{
+ int cpu = raw_smp_processor_id();
+ int first = cpu_first_thread_sibling(cpu);
+ unsigned long *state = &paca_ptrs[first]->idle_state;
+ unsigned long thread = 1UL << cpu_thread_in_core(cpu);
+ unsigned long core_thread_mask = (1UL << threads_per_core) - 1;
unsigned long srr1;
+ bool full_winkle;
+ struct p7_sprs sprs = {}; /* avoid false use-uninitialised */
+ bool sprs_saved = false;
+ int rc;
- if (!prep_irq_for_idle_irqsoff())
- return 0;
+ if (unlikely(type != PNV_THREAD_NAP)) {
+ atomic_lock_thread_idle();
+
+ BUG_ON(!(*state & thread));
+ *state &= ~thread;
+
+ if (power7_fastsleep_workaround_entry) {
+ if ((*state & core_thread_mask) == 0) {
+ rc = opal_config_cpu_idle_state(
+ OPAL_CONFIG_IDLE_FASTSLEEP,
+ OPAL_CONFIG_IDLE_APPLY);
+ BUG_ON(rc);
+ }
+ }
+
+ if (type == PNV_THREAD_WINKLE) {
+ sprs.tscr = mfspr(SPRN_TSCR);
+ sprs.worc = mfspr(SPRN_WORC);
+
+ sprs.sdr1 = mfspr(SPRN_SDR1);
+ sprs.rpr = mfspr(SPRN_RPR);
+
+ sprs.lpcr = mfspr(SPRN_LPCR);
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ sprs.hfscr = mfspr(SPRN_HFSCR);
+ sprs.fscr = mfspr(SPRN_FSCR);
+ }
+ sprs.purr = mfspr(SPRN_PURR);
+ sprs.spurr = mfspr(SPRN_SPURR);
+ sprs.dscr = mfspr(SPRN_DSCR);
+ sprs.wort = mfspr(SPRN_WORT);
+
+ sprs_saved = true;
+
+ /*
+ * Increment winkle counter and set all winkle bits if
+ * all threads are winkling. This allows wakeup side to
+ * distinguish between fast sleep and winkle state
+ * loss. Fast sleep still has to resync the timebase so
+ * this may not be a really big win.
+ */
+ *state += 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT;
+ if ((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS)
+ >> PNV_CORE_IDLE_WINKLE_COUNT_SHIFT
+ == threads_per_core)
+ *state |= PNV_CORE_IDLE_THREAD_WINKLE_BITS;
+ WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0);
+ }
+
+ atomic_unlock_thread_idle();
+ }
+
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ sprs.amr = mfspr(SPRN_AMR);
+ sprs.iamr = mfspr(SPRN_IAMR);
+ sprs.amor = mfspr(SPRN_AMOR);
+ sprs.uamor = mfspr(SPRN_UAMOR);
+ }
+
+ local_paca->thread_idle_state = type;
+ srr1 = isa206_idle_insn_mayloss(type); /* go idle */
+ local_paca->thread_idle_state = PNV_THREAD_RUNNING;
+
+ WARN_ON_ONCE(!srr1);
+ WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
+
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) {
+ /*
+ * We don't need an isync after the mtsprs here because
+ * the upcoming mtmsrd is execution synchronizing.
+ */
+ mtspr(SPRN_AMR, sprs.amr);
+ mtspr(SPRN_IAMR, sprs.iamr);
+ mtspr(SPRN_AMOR, sprs.amor);
+ mtspr(SPRN_UAMOR, sprs.uamor);
+ }
+ }
+
+ if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI))
+ hmi_exception_realmode(NULL);
+
+ if (likely((srr1 & SRR1_WAKESTATE) != SRR1_WS_HVLOSS)) {
+ if (unlikely(type != PNV_THREAD_NAP)) {
+ atomic_lock_thread_idle();
+ if (type == PNV_THREAD_WINKLE) {
+ WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0);
+ *state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT;
+ *state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT);
+ }
+ atomic_unlock_and_stop_thread_idle();
+ }
+ return srr1;
+ }
+
+ /* HV state loss */
+ BUG_ON(type == PNV_THREAD_NAP);
+
+ atomic_lock_thread_idle();
+
+ full_winkle = false;
+ if (type == PNV_THREAD_WINKLE) {
+ WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0);
+ *state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT;
+ if (*state & (thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT)) {
+ *state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT);
+ full_winkle = true;
+ BUG_ON(!sprs_saved);
+ }
+ }
+
+ WARN_ON(*state & thread);
+
+ if ((*state & core_thread_mask) != 0)
+ goto core_woken;
+
+ /* Per-core SPRs */
+ if (full_winkle) {
+ mtspr(SPRN_TSCR, sprs.tscr);
+ mtspr(SPRN_WORC, sprs.worc);
+ }
+
+ if (power7_fastsleep_workaround_exit) {
+ rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP,
+ OPAL_CONFIG_IDLE_UNDO);
+ BUG_ON(rc);
+ }
+
+ /* TB */
+ if (opal_resync_timebase() != OPAL_SUCCESS)
+ BUG();
+
+core_woken:
+ if (!full_winkle)
+ goto subcore_woken;
+
+ if ((*state & local_paca->subcore_sibling_mask) != 0)
+ goto subcore_woken;
+
+ /* Per-subcore SPRs */
+ mtspr(SPRN_SDR1, sprs.sdr1);
+ mtspr(SPRN_RPR, sprs.rpr);
+
+subcore_woken:
+ /*
+ * isync after restoring shared SPRs and before unlocking. Unlock
+ * only contains hwsync which does not necessarily do the right
+ * thing for SPRs.
+ */
+ isync();
+ atomic_unlock_and_stop_thread_idle();
+
+ /* Fast sleep does not lose SPRs */
+ if (!full_winkle)
+ return srr1;
+
+ /* Per-thread SPRs */
+ mtspr(SPRN_LPCR, sprs.lpcr);
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ mtspr(SPRN_HFSCR, sprs.hfscr);
+ mtspr(SPRN_FSCR, sprs.fscr);
+ }
+ mtspr(SPRN_PURR, sprs.purr);
+ mtspr(SPRN_SPURR, sprs.spurr);
+ mtspr(SPRN_DSCR, sprs.dscr);
+ mtspr(SPRN_WORT, sprs.wort);
+
+ mtspr(SPRN_SPRG3, local_paca->sprg_vdso);
+
+ /*
+ * The SLB has to be restored here, but it sometimes still
+ * contains entries, so the __ variant must be used to prevent
+ * multi hits.
+ */
+ __slb_restore_bolted_realmode();
+
+ return srr1;
+}
+
+extern unsigned long idle_kvm_start_guest(unsigned long srr1);
+
+#ifdef CONFIG_HOTPLUG_CPU
+static unsigned long power7_offline(void)
+{
+ unsigned long srr1;
+
+ mtmsr(MSR_IDLE);
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /* Tell KVM we're entering idle. */
+ /******************************************************/
+ /* N O T E W E L L ! ! ! N O T E W E L L */
+ /* The following store to HSTATE_HWTHREAD_STATE(r13) */
+ /* MUST occur in real mode, i.e. with the MMU off, */
+ /* and the MMU must stay off until we clear this flag */
+ /* and test HSTATE_HWTHREAD_REQ(r13) in */
+ /* pnv_powersave_wakeup in this file. */
+ /* The reason is that another thread can switch the */
+ /* MMU to a guest context whenever this flag is set */
+ /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
+ /* that would potentially cause this thread to start */
+ /* executing instructions from guest memory in */
+ /* hypervisor mode, leading to a host crash or data */
+ /* corruption, or worse. */
+ /******************************************************/
+ local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE;
+#endif
__ppc64_runlatch_off();
- srr1 = power7_idle_insn(type);
+ srr1 = power7_idle_insn(power7_offline_type);
__ppc64_runlatch_on();
- fini_irq_for_idle_irqsoff();
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL;
+ /* Order setting hwthread_state vs. testing hwthread_req */
+ smp_mb();
+ if (local_paca->kvm_hstate.hwthread_req)
+ srr1 = idle_kvm_start_guest(srr1);
+#endif
+
+ mtmsr(MSR_KERNEL);
return srr1;
}
+#endif
void power7_idle_type(unsigned long type)
{
unsigned long srr1;
- srr1 = __power7_idle_type(type);
+ if (!prep_irq_for_idle_irqsoff())
+ return;
+
+ mtmsr(MSR_IDLE);
+ __ppc64_runlatch_off();
+ srr1 = power7_idle_insn(type);
+ __ppc64_runlatch_on();
+ mtmsr(MSR_KERNEL);
+
+ fini_irq_for_idle_irqsoff();
irq_set_pending_from_srr1(srr1);
}
@@ -347,33 +577,292 @@ void power7_idle(void)
power7_idle_type(PNV_THREAD_NAP);
}
-static unsigned long __power9_idle_type(unsigned long stop_psscr_val,
- unsigned long stop_psscr_mask)
+struct p9_sprs {
+ /* per core */
+ u64 ptcr;
+ u64 rpr;
+ u64 tscr;
+ u64 ldbar;
+
+ /* per thread */
+ u64 lpcr;
+ u64 hfscr;
+ u64 fscr;
+ u64 pid;
+ u64 purr;
+ u64 spurr;
+ u64 dscr;
+ u64 wort;
+
+ u64 mmcra;
+ u32 mmcr0;
+ u32 mmcr1;
+ u64 mmcr2;
+
+ /* per thread SPRs that get lost in shallow states */
+ u64 amr;
+ u64 iamr;
+ u64 amor;
+ u64 uamor;
+};
+
+static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
{
- unsigned long psscr;
+ int cpu = raw_smp_processor_id();
+ int first = cpu_first_thread_sibling(cpu);
+ unsigned long *state = &paca_ptrs[first]->idle_state;
+ unsigned long core_thread_mask = (1UL << threads_per_core) - 1;
unsigned long srr1;
+ unsigned long pls;
+ unsigned long mmcr0 = 0;
+ struct p9_sprs sprs = {}; /* avoid false used-uninitialised */
+ bool sprs_saved = false;
- if (!prep_irq_for_idle_irqsoff())
- return 0;
+ if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
+ /* EC=ESL=0 case */
+
+ BUG_ON(!mmu_on);
+
+ /*
+ * Wake synchronously. SRESET via xscom may still cause
+ * a 0x100 powersave wakeup with SRR1 reason!
+ */
+ srr1 = isa300_idle_stop_noloss(psscr); /* go idle */
+ if (likely(!srr1))
+ return 0;
+
+ /*
+ * Registers not saved, can't recover!
+ * This would be a hardware bug
+ */
+ BUG_ON((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS);
+
+ goto out;
+ }
+
+ /* EC=ESL=1 case */
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ if (cpu_has_feature(CPU_FTR_P9_TM_XER_SO_BUG)) {
+ local_paca->requested_psscr = psscr;
+ /* order setting requested_psscr vs testing dont_stop */
+ smp_mb();
+ if (atomic_read(&local_paca->dont_stop)) {
+ local_paca->requested_psscr = 0;
+ return 0;
+ }
+ }
+#endif
+
+ if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1)) {
+ /*
+ * POWER9 DD2 can incorrectly set PMAO when waking up
+ * after a state-loss idle. Saving and restoring MMCR0
+ * over idle is a workaround.
+ */
+ mmcr0 = mfspr(SPRN_MMCR0);
+ }
+ if ((psscr & PSSCR_RL_MASK) >= pnv_first_spr_loss_level) {
+ sprs.lpcr = mfspr(SPRN_LPCR);
+ sprs.hfscr = mfspr(SPRN_HFSCR);
+ sprs.fscr = mfspr(SPRN_FSCR);
+ sprs.pid = mfspr(SPRN_PID);
+ sprs.purr = mfspr(SPRN_PURR);
+ sprs.spurr = mfspr(SPRN_SPURR);
+ sprs.dscr = mfspr(SPRN_DSCR);
+ sprs.wort = mfspr(SPRN_WORT);
+
+ sprs.mmcra = mfspr(SPRN_MMCRA);
+ sprs.mmcr0 = mfspr(SPRN_MMCR0);
+ sprs.mmcr1 = mfspr(SPRN_MMCR1);
+ sprs.mmcr2 = mfspr(SPRN_MMCR2);
+
+ sprs.ptcr = mfspr(SPRN_PTCR);
+ sprs.rpr = mfspr(SPRN_RPR);
+ sprs.tscr = mfspr(SPRN_TSCR);
+ sprs.ldbar = mfspr(SPRN_LDBAR);
+
+ sprs_saved = true;
+
+ atomic_start_thread_idle();
+ }
+
+ sprs.amr = mfspr(SPRN_AMR);
+ sprs.iamr = mfspr(SPRN_IAMR);
+ sprs.amor = mfspr(SPRN_AMOR);
+ sprs.uamor = mfspr(SPRN_UAMOR);
+
+ srr1 = isa300_idle_stop_mayloss(psscr); /* go idle */
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ local_paca->requested_psscr = 0;
+#endif
psscr = mfspr(SPRN_PSSCR);
- psscr = (psscr & ~stop_psscr_mask) | stop_psscr_val;
+ WARN_ON_ONCE(!srr1);
+ WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
+
+ if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) {
+ unsigned long mmcra;
+
+ /*
+ * We don't need an isync after the mtsprs here because the
+ * upcoming mtmsrd is execution synchronizing.
+ */
+ mtspr(SPRN_AMR, sprs.amr);
+ mtspr(SPRN_IAMR, sprs.iamr);
+ mtspr(SPRN_AMOR, sprs.amor);
+ mtspr(SPRN_UAMOR, sprs.uamor);
+
+ /*
+ * Workaround for POWER9 DD2.0, if we lost resources, the ERAT
+ * might have been corrupted and needs flushing. We also need
+ * to reload MMCR0 (see mmcr0 comment above).
+ */
+ if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1)) {
+ asm volatile(PPC_INVALIDATE_ERAT);
+ mtspr(SPRN_MMCR0, mmcr0);
+ }
+
+ /*
+ * DD2.2 and earlier need to set then clear bit 60 in MMCRA
+ * to ensure the PMU starts running.
+ */
+ mmcra = mfspr(SPRN_MMCRA);
+ mmcra |= PPC_BIT(60);
+ mtspr(SPRN_MMCRA, mmcra);
+ mmcra &= ~PPC_BIT(60);
+ mtspr(SPRN_MMCRA, mmcra);
+ }
+
+ if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI))
+ hmi_exception_realmode(NULL);
+
+ /*
+ * On POWER9, SRR1 bits do not match exactly as expected.
+ * SRR1_WS_GPRLOSS (10b) can also result in SPR loss, so
+ * just always test PSSCR for SPR/TB state loss.
+ */
+ pls = (psscr & PSSCR_PLS) >> PSSCR_PLS_SHIFT;
+ if (likely(pls < pnv_first_spr_loss_level)) {
+ if (sprs_saved)
+ atomic_stop_thread_idle();
+ goto out;
+ }
+
+ /* HV state loss */
+ BUG_ON(!sprs_saved);
+
+ atomic_lock_thread_idle();
+
+ if ((*state & core_thread_mask) != 0)
+ goto core_woken;
+
+ /* Per-core SPRs */
+ mtspr(SPRN_PTCR, sprs.ptcr);
+ mtspr(SPRN_RPR, sprs.rpr);
+ mtspr(SPRN_TSCR, sprs.tscr);
+ mtspr(SPRN_LDBAR, sprs.ldbar);
+
+ if (pls >= pnv_first_tb_loss_level) {
+ /* TB loss */
+ if (opal_resync_timebase() != OPAL_SUCCESS)
+ BUG();
+ }
+
+ /*
+ * isync after restoring shared SPRs and before unlocking. Unlock
+ * only contains hwsync which does not necessarily do the right
+ * thing for SPRs.
+ */
+ isync();
+
+core_woken:
+ atomic_unlock_and_stop_thread_idle();
+
+ /* Per-thread SPRs */
+ mtspr(SPRN_LPCR, sprs.lpcr);
+ mtspr(SPRN_HFSCR, sprs.hfscr);
+ mtspr(SPRN_FSCR, sprs.fscr);
+ mtspr(SPRN_PID, sprs.pid);
+ mtspr(SPRN_PURR, sprs.purr);
+ mtspr(SPRN_SPURR, sprs.spurr);
+ mtspr(SPRN_DSCR, sprs.dscr);
+ mtspr(SPRN_WORT, sprs.wort);
+
+ mtspr(SPRN_MMCRA, sprs.mmcra);
+ mtspr(SPRN_MMCR0, sprs.mmcr0);
+ mtspr(SPRN_MMCR1, sprs.mmcr1);
+ mtspr(SPRN_MMCR2, sprs.mmcr2);
+
+ mtspr(SPRN_SPRG3, local_paca->sprg_vdso);
+
+ if (!radix_enabled())
+ __slb_restore_bolted_realmode();
+
+out:
+ if (mmu_on)
+ mtmsr(MSR_KERNEL);
+
+ return srr1;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static unsigned long power9_offline_stop(unsigned long psscr)
+{
+ unsigned long srr1;
+
+#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE
__ppc64_runlatch_off();
- srr1 = power9_idle_stop(psscr);
+ srr1 = power9_idle_stop(psscr, true);
__ppc64_runlatch_on();
+#else
+ /*
+ * Tell KVM we're entering idle.
+ * This does not have to be done in real mode because the P9 MMU
+ * is independent per-thread. Some steppings share radix/hash mode
+ * between threads, but in that case KVM has a barrier sync in real
+ * mode before and after switching between radix and hash.
+ *
+ * kvm_start_guest must still be called in real mode though, hence
+ * the false argument.
+ */
+ local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE;
- fini_irq_for_idle_irqsoff();
+ __ppc64_runlatch_off();
+ srr1 = power9_idle_stop(psscr, false);
+ __ppc64_runlatch_on();
+
+ local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL;
+ /* Order setting hwthread_state vs. testing hwthread_req */
+ smp_mb();
+ if (local_paca->kvm_hstate.hwthread_req)
+ srr1 = idle_kvm_start_guest(srr1);
+ mtmsr(MSR_KERNEL);
+#endif
return srr1;
}
+#endif
void power9_idle_type(unsigned long stop_psscr_val,
unsigned long stop_psscr_mask)
{
+ unsigned long psscr;
unsigned long srr1;
- srr1 = __power9_idle_type(stop_psscr_val, stop_psscr_mask);
+ if (!prep_irq_for_idle_irqsoff())
+ return;
+
+ psscr = mfspr(SPRN_PSSCR);
+ psscr = (psscr & ~stop_psscr_mask) | stop_psscr_val;
+
+ __ppc64_runlatch_off();
+ srr1 = power9_idle_stop(psscr, true);
+ __ppc64_runlatch_on();
+
+ fini_irq_for_idle_irqsoff();
+
irq_set_pending_from_srr1(srr1);
}
@@ -409,7 +898,7 @@ void pnv_power9_force_smt4_catch(void)
atomic_inc(&paca_ptrs[cpu0+thr]->dont_stop);
}
/* order setting dont_stop vs testing requested_psscr */
- mb();
+ smp_mb();
for (thr = 0; thr < threads_per_core; ++thr) {
if (!paca_ptrs[cpu0+thr]->requested_psscr)
++awake_threads;
@@ -481,7 +970,6 @@ void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
unsigned long pnv_cpu_offline(unsigned int cpu)
{
unsigned long srr1;
- u32 idle_states = pnv_get_supported_cpuidle_states();
__ppc64_runlatch_off();
@@ -492,15 +980,8 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
psscr = (psscr & ~pnv_deepest_stop_psscr_mask) |
pnv_deepest_stop_psscr_val;
srr1 = power9_offline_stop(psscr);
-
- } else if ((idle_states & OPAL_PM_WINKLE_ENABLED) &&
- (idle_states & OPAL_PM_LOSE_FULL_CONTEXT)) {
- srr1 = power7_idle_insn(PNV_THREAD_WINKLE);
- } else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
- (idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
- srr1 = power7_idle_insn(PNV_THREAD_SLEEP);
- } else if (idle_states & OPAL_PM_NAP_ENABLED) {
- srr1 = power7_idle_insn(PNV_THREAD_NAP);
+ } else if (cpu_has_feature(CPU_FTR_ARCH_206) && power7_offline_type) {
+ srr1 = power7_offline();
} else {
/* This is the fallback method. We emulate snooze */
while (!generic_check_cpu_restart(cpu)) {
@@ -596,33 +1077,44 @@ int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags)
* @dt_idle_states: Number of idle state entries
* Returns 0 on success
*/
-static int __init pnv_power9_idle_init(void)
+static void __init pnv_power9_idle_init(void)
{
u64 max_residency_ns = 0;
int i;
/*
- * Set pnv_first_deep_stop_state, pnv_deepest_stop_psscr_{val,mask},
- * and the pnv_default_stop_{val,mask}.
- *
- * pnv_first_deep_stop_state should be set to the first stop
- * level to cause hypervisor state loss.
- *
* pnv_deepest_stop_{val,mask} should be set to values corresponding to
* the deepest stop state.
*
* pnv_default_stop_{val,mask} should be set to values corresponding to
- * the shallowest (OPAL_PM_STOP_INST_FAST) loss-less stop state.
+ * the deepest loss-less (OPAL_PM_STOP_INST_FAST) stop state.
*/
- pnv_first_deep_stop_state = MAX_STOP_STATE;
+ pnv_first_tb_loss_level = MAX_STOP_STATE + 1;
+ pnv_first_spr_loss_level = MAX_STOP_STATE + 1;
for (i = 0; i < nr_pnv_idle_states; i++) {
int err;
struct pnv_idle_states_t *state = &pnv_idle_states[i];
u64 psscr_rl = state->psscr_val & PSSCR_RL_MASK;
+ if ((state->flags & OPAL_PM_TIMEBASE_STOP) &&
+ (pnv_first_tb_loss_level > psscr_rl))
+ pnv_first_tb_loss_level = psscr_rl;
+
if ((state->flags & OPAL_PM_LOSE_FULL_CONTEXT) &&
- pnv_first_deep_stop_state > psscr_rl)
- pnv_first_deep_stop_state = psscr_rl;
+ (pnv_first_spr_loss_level > psscr_rl))
+ pnv_first_spr_loss_level = psscr_rl;
+
+ /*
+ * The idle code does not deal with TB loss occurring
+ * in a shallower state than SPR loss, so force it to
+ * behave like SPRs are lost if TB is lost. POWER9 would
+ * never encouter this, but a POWER8 core would if it
+ * implemented the stop instruction. So this is for forward
+ * compatibility.
+ */
+ if ((state->flags & OPAL_PM_TIMEBASE_STOP) &&
+ (pnv_first_spr_loss_level > psscr_rl))
+ pnv_first_spr_loss_level = psscr_rl;
err = validate_psscr_val_mask(&state->psscr_val,
&state->psscr_mask,
@@ -647,6 +1139,7 @@ static int __init pnv_power9_idle_init(void)
pnv_default_stop_val = state->psscr_val;
pnv_default_stop_mask = state->psscr_mask;
default_stop_found = true;
+ WARN_ON(state->flags & OPAL_PM_LOSE_FULL_CONTEXT);
}
}
@@ -666,10 +1159,40 @@ static int __init pnv_power9_idle_init(void)
pnv_deepest_stop_psscr_mask);
}
- pr_info("cpuidle-powernv: Requested Level (RL) value of first deep stop = 0x%llx\n",
- pnv_first_deep_stop_state);
+ pr_info("cpuidle-powernv: First stop level that may lose SPRs = 0x%lld\n",
+ pnv_first_spr_loss_level);
- return 0;
+ pr_info("cpuidle-powernv: First stop level that may lose timebase = 0x%lld\n",
+ pnv_first_tb_loss_level);
+}
+
+static void __init pnv_disable_deep_states(void)
+{
+ /*
+ * The stop-api is unable to restore hypervisor
+ * resources on wakeup from platform idle states which
+ * lose full context. So disable such states.
+ */
+ supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT;
+ pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n");
+ pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n");
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300) &&
+ (pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) {
+ /*
+ * Use the default stop state for CPU-Hotplug
+ * if available.
+ */
+ if (default_stop_found) {
+ pnv_deepest_stop_psscr_val = pnv_default_stop_val;
+ pnv_deepest_stop_psscr_mask = pnv_default_stop_mask;
+ pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n",
+ pnv_deepest_stop_psscr_val);
+ } else { /* Fallback to snooze loop for CPU-Hotplug */
+ deepest_stop_found = false;
+ pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n");
+ }
+ }
}
/*
@@ -684,10 +1207,8 @@ static void __init pnv_probe_idle_states(void)
return;
}
- if (cpu_has_feature(CPU_FTR_ARCH_300)) {
- if (pnv_power9_idle_init())
- return;
- }
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ pnv_power9_idle_init();
for (i = 0; i < nr_pnv_idle_states; i++)
supported_cpuidle_states |= pnv_idle_states[i].flags;
@@ -807,11 +1328,33 @@ out:
static int __init pnv_init_idle_states(void)
{
+ int cpu;
int rc = 0;
- supported_cpuidle_states = 0;
+
+ /* Set up PACA fields */
+ for_each_present_cpu(cpu) {
+ struct paca_struct *p = paca_ptrs[cpu];
+
+ p->idle_state = 0;
+ if (cpu == cpu_first_thread_sibling(cpu))
+ p->idle_state = (1 << threads_per_core) - 1;
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /* P7/P8 nap */
+ p->thread_idle_state = PNV_THREAD_RUNNING;
+ } else {
+ /* P9 stop */
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ p->requested_psscr = 0;
+ atomic_set(&p->dont_stop, 0);
+#endif
+ }
+ }
/* In case we error out nr_pnv_idle_states will be zero */
nr_pnv_idle_states = 0;
+ supported_cpuidle_states = 0;
+
if (cpuidle_disable != IDLE_NO_OVERRIDE)
goto out;
rc = pnv_parse_cpuidle_dt();
@@ -819,27 +1362,40 @@ static int __init pnv_init_idle_states(void)
return rc;
pnv_probe_idle_states();
- if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
- patch_instruction(
- (unsigned int *)pnv_fastsleep_workaround_at_entry,
- PPC_INST_NOP);
- patch_instruction(
- (unsigned int *)pnv_fastsleep_workaround_at_exit,
- PPC_INST_NOP);
- } else {
- /*
- * OPAL_PM_SLEEP_ENABLED_ER1 is set. It indicates that
- * workaround is needed to use fastsleep. Provide sysfs
- * control to choose how this workaround has to be applied.
- */
- device_create_file(cpu_subsys.dev_root,
+ if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
+ power7_fastsleep_workaround_entry = false;
+ power7_fastsleep_workaround_exit = false;
+ } else {
+ /*
+ * OPAL_PM_SLEEP_ENABLED_ER1 is set. It indicates that
+ * workaround is needed to use fastsleep. Provide sysfs
+ * control to choose how this workaround has to be
+ * applied.
+ */
+ device_create_file(cpu_subsys.dev_root,
&dev_attr_fastsleep_workaround_applyonce);
- }
+ }
+
+ update_subcore_sibling_mask();
+
+ if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED) {
+ ppc_md.power_save = power7_idle;
+ power7_offline_type = PNV_THREAD_NAP;
+ }
- pnv_alloc_idle_core_states();
+ if ((supported_cpuidle_states & OPAL_PM_WINKLE_ENABLED) &&
+ (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT))
+ power7_offline_type = PNV_THREAD_WINKLE;
+ else if ((supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED) ||
+ (supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1))
+ power7_offline_type = PNV_THREAD_SLEEP;
+ }
- if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED)
- ppc_md.power_save = power7_idle;
+ if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) {
+ if (pnv_save_sprs_for_deep_states())
+ pnv_disable_deep_states();
+ }
out:
return 0;
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index dc23d9d2a7d9..495550432f3d 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -1213,9 +1213,8 @@ int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid,
* Currently we only support radix and non-zero LPCR only makes sense
* for hash tables so skiboot expects the LPCR parameter to be a zero.
*/
- ret = opal_npu_map_lpar(nphb->opal_id,
- PCI_DEVID(gpdev->bus->number, gpdev->devfn), lparid,
- 0 /* LPCR bits */);
+ ret = opal_npu_map_lpar(nphb->opal_id, pci_dev_id(gpdev), lparid,
+ 0 /* LPCR bits */);
if (ret) {
dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret);
return ret;
@@ -1224,7 +1223,7 @@ int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid,
dev_dbg(&gpdev->dev, "init context opalid=%llu msr=%lx\n",
nphb->opal_id, msr);
ret = opal_npu_init_context(nphb->opal_id, 0/*__unused*/, msr,
- PCI_DEVID(gpdev->bus->number, gpdev->devfn));
+ pci_dev_id(gpdev));
if (ret < 0)
dev_err(&gpdev->dev, "Failed to init context: %d\n", ret);
else
@@ -1258,7 +1257,7 @@ int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev)
dev_dbg(&gpdev->dev, "destroy context opalid=%llu\n",
nphb->opal_id);
ret = opal_npu_destroy_context(nphb->opal_id, 0/*__unused*/,
- PCI_DEVID(gpdev->bus->number, gpdev->devfn));
+ pci_dev_id(gpdev));
if (ret < 0) {
dev_err(&gpdev->dev, "Failed to destroy context: %d\n", ret);
return ret;
@@ -1266,9 +1265,8 @@ int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev)
/* Set LPID to 0 anyway, just to be safe */
dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=0\n", nphb->opal_id);
- ret = opal_npu_map_lpar(nphb->opal_id,
- PCI_DEVID(gpdev->bus->number, gpdev->devfn), 0 /*LPID*/,
- 0 /* LPCR bits */);
+ ret = opal_npu_map_lpar(nphb->opal_id, pci_dev_id(gpdev), 0 /*LPID*/,
+ 0 /* LPCR bits */);
if (ret)
dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret);
diff --git a/arch/powerpc/platforms/powernv/opal-call.c b/arch/powerpc/platforms/powernv/opal-call.c
index daad8c45c8e7..36c8fa3647a2 100644
--- a/arch/powerpc/platforms/powernv/opal-call.c
+++ b/arch/powerpc/platforms/powernv/opal-call.c
@@ -121,6 +121,8 @@ static int64_t opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3,
#define OPAL_CALL(name, opcode) \
int64_t name(int64_t a0, int64_t a1, int64_t a2, int64_t a3, \
+ int64_t a4, int64_t a5, int64_t a6, int64_t a7); \
+int64_t name(int64_t a0, int64_t a1, int64_t a2, int64_t a3, \
int64_t a4, int64_t a5, int64_t a6, int64_t a7) \
{ \
return opal_call(a0, a1, a2, a3, a4, a5, a6, a7, opcode); \
@@ -218,6 +220,7 @@ OPAL_CALL(opal_sensor_read, OPAL_SENSOR_READ);
OPAL_CALL(opal_get_param, OPAL_GET_PARAM);
OPAL_CALL(opal_set_param, OPAL_SET_PARAM);
OPAL_CALL(opal_handle_hmi, OPAL_HANDLE_HMI);
+OPAL_CALL(opal_handle_hmi2, OPAL_HANDLE_HMI2);
OPAL_CALL(opal_config_cpu_idle_state, OPAL_CONFIG_CPU_IDLE_STATE);
OPAL_CALL(opal_slw_set_reg, OPAL_SLW_SET_REG);
OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION);
@@ -260,6 +263,9 @@ OPAL_CALL(opal_xive_get_vp_info, OPAL_XIVE_GET_VP_INFO);
OPAL_CALL(opal_xive_set_vp_info, OPAL_XIVE_SET_VP_INFO);
OPAL_CALL(opal_xive_sync, OPAL_XIVE_SYNC);
OPAL_CALL(opal_xive_dump, OPAL_XIVE_DUMP);
+OPAL_CALL(opal_xive_get_queue_state, OPAL_XIVE_GET_QUEUE_STATE);
+OPAL_CALL(opal_xive_set_queue_state, OPAL_XIVE_SET_QUEUE_STATE);
+OPAL_CALL(opal_xive_get_vp_state, OPAL_XIVE_GET_VP_STATE);
OPAL_CALL(opal_signal_system_reset, OPAL_SIGNAL_SYSTEM_RESET);
OPAL_CALL(opal_npu_init_context, OPAL_NPU_INIT_CONTEXT);
OPAL_CALL(opal_npu_destroy_context, OPAL_NPU_DESTROY_CONTEXT);
diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
index 58a07948c76e..3e497b91d210 100644
--- a/arch/powerpc/platforms/powernv/opal-imc.c
+++ b/arch/powerpc/platforms/powernv/opal-imc.c
@@ -127,7 +127,7 @@ static int imc_get_mem_addr_nest(struct device_node *node,
nr_chips))
goto error;
- pmu_ptr->mem_info = kcalloc(nr_chips, sizeof(*pmu_ptr->mem_info),
+ pmu_ptr->mem_info = kcalloc(nr_chips + 1, sizeof(*pmu_ptr->mem_info),
GFP_KERNEL);
if (!pmu_ptr->mem_info)
goto error;
@@ -284,6 +284,9 @@ static int opal_imc_counters_probe(struct platform_device *pdev)
case IMC_TYPE_THREAD:
domain = IMC_DOMAIN_THREAD;
break;
+ case IMC_TYPE_TRACE:
+ domain = IMC_DOMAIN_TRACE;
+ break;
default:
pr_warn("IMC Unknown Device type \n");
domain = -1;
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 2b0eca104f86..f2b063b027f0 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -505,7 +505,7 @@ static int opal_recover_mce(struct pt_regs *regs,
recovered = 0;
}
- if (!recovered && evt->severity == MCE_SEV_ERROR_SYNC) {
+ if (!recovered && evt->sync_error) {
/*
* Try to kill processes if we get a synchronous machine check
* (e.g., one caused by execution of this instruction). This
@@ -614,6 +614,27 @@ int opal_hmi_exception_early(struct pt_regs *regs)
return 0;
}
+int opal_hmi_exception_early2(struct pt_regs *regs)
+{
+ s64 rc;
+ __be64 out_flags;
+
+ /*
+ * call opal hmi handler.
+ * Check 64-bit flag mask to find out if an event was generated,
+ * and whether TB is still valid or not etc.
+ */
+ rc = opal_handle_hmi2(&out_flags);
+ if (rc != OPAL_SUCCESS)
+ return 0;
+
+ if (be64_to_cpu(out_flags) & OPAL_HMI_FLAGS_NEW_EVENT)
+ local_paca->hmi_event_available = 1;
+ if (be64_to_cpu(out_flags) & OPAL_HMI_FLAGS_TOD_TB_FAIL)
+ tb_invalid = true;
+ return 1;
+}
+
/* HMI exception handler called in virtual mode during check_irq_replay. */
int opal_handle_hmi_exception(struct pt_regs *regs)
{
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 3ead4c237ed0..126602b4e399 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -847,11 +847,11 @@ static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
if (rc)
- pe_warn(pe, "OPAL error %ld remove self from PELTV\n", rc);
+ pe_warn(pe, "OPAL error %lld remove self from PELTV\n", rc);
rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
bcomp, dcomp, fcomp, OPAL_UNMAP_PE);
if (rc)
- pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
+ pe_err(pe, "OPAL error %lld trying to setup PELT table\n", rc);
pe->pbus = NULL;
pe->pdev = NULL;
@@ -1174,11 +1174,12 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
pe->rid = bus->busn_res.start << 8;
if (all)
- pe_info(pe, "Secondary bus %d..%d associated with PE#%x\n",
- bus->busn_res.start, bus->busn_res.end, pe->pe_number);
+ pe_info(pe, "Secondary bus %pad..%pad associated with PE#%x\n",
+ &bus->busn_res.start, &bus->busn_res.end,
+ pe->pe_number);
else
- pe_info(pe, "Secondary bus %d associated with PE#%x\n",
- bus->busn_res.start, pe->pe_number);
+ pe_info(pe, "Secondary bus %pad associated with PE#%x\n",
+ &bus->busn_res.start, pe->pe_number);
if (pnv_ioda_configure_pe(phb, pe)) {
/* XXX What do we do here ? */
@@ -1448,7 +1449,7 @@ static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe
tbl = pe->table_group.tables[0];
rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
if (rc)
- pe_warn(pe, "OPAL error %ld release DMA window\n", rc);
+ pe_warn(pe, "OPAL error %lld release DMA window\n", rc);
pnv_pci_ioda2_set_bypass(pe, false);
if (pe->table_group.group) {
@@ -1836,7 +1837,7 @@ static bool pnv_pci_ioda_iommu_bypass_supported(struct pci_dev *pdev,
struct pnv_ioda_pe *pe;
if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
- return -ENODEV;
+ return false;
pe = &phb->ioda.pe_array[pdn->pe_number];
if (pe->tce_bypass_enabled) {
@@ -1859,7 +1860,7 @@ static bool pnv_pci_ioda_iommu_bypass_supported(struct pci_dev *pdev,
/* Configure the bypass mode */
s64 rc = pnv_pci_ioda_dma_64bit_bypass(pe);
if (rc)
- return rc;
+ return false;
/* 4GB offset bypasses 32-bit space */
pdev->dev.archdata.dma_offset = (1ULL << 32);
return true;
@@ -2286,8 +2287,8 @@ found:
__pa(addr) + tce32_segsz * i,
tce32_segsz, IOMMU_PAGE_SIZE_4K);
if (rc) {
- pe_err(pe, " Failed to configure 32-bit TCE table,"
- " err %ld\n", rc);
+ pe_err(pe, " Failed to configure 32-bit TCE table, err %lld\n",
+ rc);
goto fail;
}
}
@@ -2332,9 +2333,9 @@ static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
const __u64 win_size = tbl->it_size << tbl->it_page_shift;
- pe_info(pe, "Setting up window#%d %llx..%llx pg=%x\n", num,
- start_addr, start_addr + win_size - 1,
- IOMMU_PAGE_SIZE(tbl));
+ pe_info(pe, "Setting up window#%d %llx..%llx pg=%lx\n",
+ num, start_addr, start_addr + win_size - 1,
+ IOMMU_PAGE_SIZE(tbl));
/*
* Map TCE table through TVT. The TVE index is the PE number
@@ -2348,7 +2349,7 @@ static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
size << 3,
IOMMU_PAGE_SIZE(tbl));
if (rc) {
- pe_err(pe, "Failed to configure TCE table, err %ld\n", rc);
+ pe_err(pe, "Failed to configure TCE table, err %lld\n", rc);
return rc;
}
@@ -3450,7 +3451,7 @@ static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
#ifdef CONFIG_IOMMU_API
rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
if (rc)
- pe_warn(pe, "OPAL error %ld release DMA window\n", rc);
+ pe_warn(pe, "OPAL error %lld release DMA window\n", rc);
#endif
pnv_pci_ioda2_set_bypass(pe, false);
@@ -3484,7 +3485,7 @@ static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe,
phb->ioda.reserved_pe_idx, win, 0, idx);
if (rc != OPAL_SUCCESS)
- pe_warn(pe, "Error %ld unmapping (%d) segment#%d\n",
+ pe_warn(pe, "Error %lld unmapping (%d) segment#%d\n",
rc, win, idx);
map[idx] = IODA_INVALID_PE;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 8e36da379252..be26ab3d99e0 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -2,6 +2,7 @@
#ifndef __POWERNV_PCI_H
#define __POWERNV_PCI_H
+#include <linux/compiler.h> /* for __printf */
#include <linux/iommu.h>
#include <asm/iommu.h>
#include <asm/msi_bitmap.h>
@@ -204,6 +205,7 @@ extern unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
__u64 window_size, __u32 levels);
extern int pnv_eeh_post_init(void);
+__printf(3, 4)
extern void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
const char *fmt, ...);
#define pe_err(pe, fmt, ...) \
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 14befee4b3f1..3cf40f689aac 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -401,7 +401,10 @@ static void __init pnv_setup_machdep_opal(void)
/* ppc_md.system_reset_exception gets filled in by pnv_smp_init() */
ppc_md.machine_check_exception = opal_machine_check;
ppc_md.mce_check_early_recovery = opal_mce_check_early_recovery;
- ppc_md.hmi_exception_early = opal_hmi_exception_early;
+ if (opal_check_token(OPAL_HANDLE_HMI2))
+ ppc_md.hmi_exception_early = opal_hmi_exception_early2;
+ else
+ ppc_md.hmi_exception_early = opal_hmi_exception_early;
ppc_md.handle_hmi_exception = opal_handle_hmi_exception;
}
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
index 45563004feda..1d7a9fd30dd1 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
@@ -183,7 +183,7 @@ static void unsplit_core(void)
cpu = smp_processor_id();
if (cpu_thread_in_core(cpu) != 0) {
while (mfspr(SPRN_HID0) & mask)
- power7_idle_insn(PNV_THREAD_NAP);
+ power7_idle_type(PNV_THREAD_NAP);
per_cpu(split_state, cpu).step = SYNC_STEP_UNSPLIT;
return;
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index d291b618a559..47087832f8b2 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -379,7 +379,7 @@ static int dlpar_add_lmb(struct drmem_lmb *);
static int dlpar_remove_lmb(struct drmem_lmb *lmb)
{
unsigned long block_sz;
- int nid, rc;
+ int rc;
if (!lmb_is_removable(lmb))
return -EINVAL;
@@ -389,14 +389,14 @@ static int dlpar_remove_lmb(struct drmem_lmb *lmb)
return rc;
block_sz = pseries_memory_block_size();
- nid = memory_add_physaddr_to_nid(lmb->base_addr);
- __remove_memory(nid, lmb->base_addr, block_sz);
+ __remove_memory(lmb->nid, lmb->base_addr, block_sz);
/* Update memory regions for memory remove */
memblock_remove(lmb->base_addr, block_sz);
invalidate_lmb_associativity_index(lmb);
+ lmb_clear_nid(lmb);
lmb->flags &= ~DRCONF_MEM_ASSIGNED;
return 0;
@@ -653,7 +653,7 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
static int dlpar_add_lmb(struct drmem_lmb *lmb)
{
unsigned long block_sz;
- int nid, rc;
+ int rc;
if (lmb->flags & DRCONF_MEM_ASSIGNED)
return -EINVAL;
@@ -664,13 +664,11 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
return rc;
}
+ lmb_set_nid(lmb);
block_sz = memory_block_size_bytes();
- /* Find the node id for this address */
- nid = memory_add_physaddr_to_nid(lmb->base_addr);
-
/* Add the memory */
- rc = __add_memory(nid, lmb->base_addr, block_sz);
+ rc = __add_memory(lmb->nid, lmb->base_addr, block_sz);
if (rc) {
invalidate_lmb_associativity_index(lmb);
return rc;
@@ -678,8 +676,9 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
rc = dlpar_online_lmb(lmb);
if (rc) {
- __remove_memory(nid, lmb->base_addr, block_sz);
+ __remove_memory(lmb->nid, lmb->base_addr, block_sz);
invalidate_lmb_associativity_index(lmb);
+ lmb_clear_nid(lmb);
} else {
lmb->flags |= DRCONF_MEM_ASSIGNED;
}
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 36eb1ddbac69..03bbb299320e 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -105,7 +105,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
unsigned long attrs)
{
u64 proto_tce;
- __be64 *tcep, *tces;
+ __be64 *tcep;
u64 rpn;
proto_tce = TCE_PCI_READ; // Read allowed
@@ -113,7 +113,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
if (direction != DMA_TO_DEVICE)
proto_tce |= TCE_PCI_WRITE;
- tces = tcep = ((__be64 *)tbl->it_base) + index;
+ tcep = ((__be64 *)tbl->it_base) + index;
while (npages--) {
/* can't move this out since we might cross MEMBLOCK boundary */
@@ -129,9 +129,9 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
{
- __be64 *tcep, *tces;
+ __be64 *tcep;
- tces = tcep = ((__be64 *)tbl->it_base) + index;
+ tcep = ((__be64 *)tbl->it_base) + index;
while (npages--)
*(tcep++) = 0;
@@ -945,7 +945,7 @@ static phys_addr_t ddw_memory_hotplug_max(void)
for_each_node_by_type(memory, "memory") {
unsigned long start, size;
- int ranges, n_mem_addr_cells, n_mem_size_cells, len;
+ int n_mem_addr_cells, n_mem_size_cells, len;
const __be32 *memcell_buf;
memcell_buf = of_get_property(memory, "reg", &len);
@@ -955,9 +955,6 @@ static phys_addr_t ddw_memory_hotplug_max(void)
n_mem_addr_cells = of_n_addr_cells(memory);
n_mem_size_cells = of_n_size_cells(memory);
- /* ranges in cell */
- ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
-
start = of_read_number(memcell_buf, n_mem_addr_cells);
memcell_buf += n_mem_addr_cells;
size = of_read_number(memcell_buf, n_mem_size_cells);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index f2a9f0adc2d3..1034ef1fe2b4 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -901,8 +901,10 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
break;
case H_PARAMETER:
+ pr_warn("Invalid argument from H_RESIZE_HPT_PREPARE\n");
return -EINVAL;
case H_RESOURCE:
+ pr_warn("Operation not permitted from H_RESIZE_HPT_PREPARE\n");
return -EPERM;
default:
pr_warn("Unexpected error %d from H_RESIZE_HPT_PREPARE\n", rc);
@@ -918,7 +920,6 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
if (rc != 0) {
switch (state.commit_rc) {
case H_PTEG_FULL:
- pr_warn("Hash collision while resizing HPT\n");
return -ENOSPC;
default:
diff --git a/arch/powerpc/platforms/pseries/pmem.c b/arch/powerpc/platforms/pseries/pmem.c
index 27f0a915c8a9..f860a897a9e0 100644
--- a/arch/powerpc/platforms/pseries/pmem.c
+++ b/arch/powerpc/platforms/pseries/pmem.c
@@ -106,7 +106,7 @@ static ssize_t pmem_drc_remove_node(u32 drc_index)
int dlpar_hp_pmem(struct pseries_hp_errorlog *hp_elog)
{
- u32 count, drc_index;
+ u32 drc_index;
int rc;
/* slim chance, but we might get a hotplug event while booting */
@@ -123,7 +123,6 @@ int dlpar_hp_pmem(struct pseries_hp_errorlog *hp_elog)
return -EINVAL;
}
- count = hp_elog->_drc_u.drc_count;
drc_index = hp_elog->_drc_u.drc_index;
lock_device_hotplug();
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 452dcfd7e5dd..c97d15352f9f 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -539,44 +539,44 @@ static void pseries_print_mce_info(struct pt_regs *regs,
int disposition = rtas_error_disposition(errp);
static const char * const initiators[] = {
- "Unknown",
- "CPU",
- "PCI",
- "ISA",
- "Memory",
- "Power Mgmt",
+ [0] = "Unknown",
+ [1] = "CPU",
+ [2] = "PCI",
+ [3] = "ISA",
+ [4] = "Memory",
+ [5] = "Power Mgmt",
};
static const char * const mc_err_types[] = {
- "UE",
- "SLB",
- "ERAT",
- "Unknown",
- "TLB",
- "D-Cache",
- "Unknown",
- "I-Cache",
+ [0] = "UE",
+ [1] = "SLB",
+ [2] = "ERAT",
+ [3] = "Unknown",
+ [4] = "TLB",
+ [5] = "D-Cache",
+ [6] = "Unknown",
+ [7] = "I-Cache",
};
static const char * const mc_ue_types[] = {
- "Indeterminate",
- "Instruction fetch",
- "Page table walk ifetch",
- "Load/Store",
- "Page table walk Load/Store",
+ [0] = "Indeterminate",
+ [1] = "Instruction fetch",
+ [2] = "Page table walk ifetch",
+ [3] = "Load/Store",
+ [4] = "Page table walk Load/Store",
};
/* SLB sub errors valid values are 0x0, 0x1, 0x2 */
static const char * const mc_slb_types[] = {
- "Parity",
- "Multihit",
- "Indeterminate",
+ [0] = "Parity",
+ [1] = "Multihit",
+ [2] = "Indeterminate",
};
/* TLB and ERAT sub errors valid values are 0x1, 0x2, 0x3 */
static const char * const mc_soft_types[] = {
- "Unknown",
- "Parity",
- "Multihit",
- "Indeterminate",
+ [0] = "Unknown",
+ [1] = "Parity",
+ [2] = "Multihit",
+ [3] = "Indeterminate",
};
if (!rtas_error_extended(errp)) {
@@ -707,6 +707,87 @@ out:
return disposition;
}
+#ifdef CONFIG_MEMORY_FAILURE
+
+static DEFINE_PER_CPU(int, rtas_ue_count);
+static DEFINE_PER_CPU(unsigned long, rtas_ue_paddr[MAX_MC_EVT]);
+
+#define UE_EFFECTIVE_ADDR_PROVIDED 0x40
+#define UE_LOGICAL_ADDR_PROVIDED 0x20
+
+
+static void pseries_hwpoison_work_fn(struct work_struct *work)
+{
+ unsigned long paddr;
+ int index;
+
+ while (__this_cpu_read(rtas_ue_count) > 0) {
+ index = __this_cpu_read(rtas_ue_count) - 1;
+ paddr = __this_cpu_read(rtas_ue_paddr[index]);
+ memory_failure(paddr >> PAGE_SHIFT, 0);
+ __this_cpu_dec(rtas_ue_count);
+ }
+}
+
+static DECLARE_WORK(hwpoison_work, pseries_hwpoison_work_fn);
+
+static void queue_ue_paddr(unsigned long paddr)
+{
+ int index;
+
+ index = __this_cpu_inc_return(rtas_ue_count) - 1;
+ if (index >= MAX_MC_EVT) {
+ __this_cpu_dec(rtas_ue_count);
+ return;
+ }
+ this_cpu_write(rtas_ue_paddr[index], paddr);
+ schedule_work(&hwpoison_work);
+}
+
+static void pseries_do_memory_failure(struct pt_regs *regs,
+ struct pseries_mc_errorlog *mce_log)
+{
+ unsigned long paddr;
+
+ if (mce_log->sub_err_type & UE_LOGICAL_ADDR_PROVIDED) {
+ paddr = be64_to_cpu(mce_log->logical_address);
+ } else if (mce_log->sub_err_type & UE_EFFECTIVE_ADDR_PROVIDED) {
+ unsigned long pfn;
+
+ pfn = addr_to_pfn(regs,
+ be64_to_cpu(mce_log->effective_address));
+ if (pfn == ULONG_MAX)
+ return;
+ paddr = pfn << PAGE_SHIFT;
+ } else {
+ return;
+ }
+ queue_ue_paddr(paddr);
+}
+
+static void pseries_process_ue(struct pt_regs *regs,
+ struct rtas_error_log *errp)
+{
+ struct pseries_errorlog *pseries_log;
+ struct pseries_mc_errorlog *mce_log;
+
+ if (!rtas_error_extended(errp))
+ return;
+
+ pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE);
+ if (!pseries_log)
+ return;
+
+ mce_log = (struct pseries_mc_errorlog *)pseries_log->data;
+
+ if (mce_log->error_type == MC_ERROR_TYPE_UE)
+ pseries_do_memory_failure(regs, mce_log);
+}
+#else
+static inline void pseries_process_ue(struct pt_regs *regs,
+ struct rtas_error_log *errp) { }
+#endif /*CONFIG_MEMORY_FAILURE */
+
/*
* Process MCE rtas errlog event.
*/
@@ -765,6 +846,8 @@ static int recover_mce(struct pt_regs *regs, struct rtas_error_log *err)
recovered = 1;
}
+ pseries_process_ue(regs, err);
+
/* Queue irq work to log this rtas event later. */
irq_work_queue(&mce_errlog_process_work);
diff --git a/arch/powerpc/purgatory/Makefile b/arch/powerpc/purgatory/Makefile
index 4314ba5baf43..7c6d8b14f440 100644
--- a/arch/powerpc/purgatory/Makefile
+++ b/arch/powerpc/purgatory/Makefile
@@ -1,4 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
+
+KASAN_SANITIZE := n
+
targets += trampoline.o purgatory.ro kexec-purgatory.c
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined
diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c
index 1f1af12f23e2..026619c9a8cb 100644
--- a/arch/powerpc/sysdev/tsi108_dev.c
+++ b/arch/powerpc/sysdev/tsi108_dev.c
@@ -18,6 +18,7 @@
#include <linux/irq.h>
#include <linux/export.h>
#include <linux/device.h>
+#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/of_net.h>
#include <asm/tsi108.h>
@@ -105,8 +106,8 @@ static int __init tsi108_eth_of_init(void)
}
mac_addr = of_get_mac_address(np);
- if (mac_addr)
- memcpy(tsi_eth_data.mac_addr, mac_addr, 6);
+ if (!IS_ERR(mac_addr))
+ ether_addr_copy(tsi_eth_data.mac_addr, mac_addr);
ph = of_get_property(np, "mdio-handle", NULL);
mdio = of_find_node_by_phandle(*ph);
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 1ca127d052a6..7782201e5fe8 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -437,6 +437,12 @@ void xive_native_sync_source(u32 hw_irq)
}
EXPORT_SYMBOL_GPL(xive_native_sync_source);
+void xive_native_sync_queue(u32 hw_irq)
+{
+ opal_xive_sync(XIVE_SYNC_QUEUE, hw_irq);
+}
+EXPORT_SYMBOL_GPL(xive_native_sync_queue);
+
static const struct xive_ops xive_native_ops = {
.populate_irq_data = xive_native_populate_irq_data,
.configure_irq = xive_native_configure_irq,
@@ -515,6 +521,9 @@ u32 xive_native_default_eq_shift(void)
}
EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
+unsigned long xive_tima_os;
+EXPORT_SYMBOL_GPL(xive_tima_os);
+
bool __init xive_native_init(void)
{
struct device_node *np;
@@ -567,6 +576,14 @@ bool __init xive_native_init(void)
for_each_possible_cpu(cpu)
kvmppc_set_xive_tima(cpu, r.start, tima);
+ /* Resource 2 is OS window */
+ if (of_address_to_resource(np, 2, &r)) {
+ pr_err("Failed to get thread mgmnt area resource\n");
+ return false;
+ }
+
+ xive_tima_os = r.start;
+
/* Grab size of provisionning pages */
xive_parse_provisioning(np);
@@ -711,3 +728,96 @@ bool xive_native_has_single_escalation(void)
return xive_has_single_esc;
}
EXPORT_SYMBOL_GPL(xive_native_has_single_escalation);
+
+int xive_native_get_queue_info(u32 vp_id, u32 prio,
+ u64 *out_qpage,
+ u64 *out_qsize,
+ u64 *out_qeoi_page,
+ u32 *out_escalate_irq,
+ u64 *out_qflags)
+{
+ __be64 qpage;
+ __be64 qsize;
+ __be64 qeoi_page;
+ __be32 escalate_irq;
+ __be64 qflags;
+ s64 rc;
+
+ rc = opal_xive_get_queue_info(vp_id, prio, &qpage, &qsize,
+ &qeoi_page, &escalate_irq, &qflags);
+ if (rc) {
+ pr_err("OPAL failed to get queue info for VCPU %d/%d : %lld\n",
+ vp_id, prio, rc);
+ return -EIO;
+ }
+
+ if (out_qpage)
+ *out_qpage = be64_to_cpu(qpage);
+ if (out_qsize)
+ *out_qsize = be32_to_cpu(qsize);
+ if (out_qeoi_page)
+ *out_qeoi_page = be64_to_cpu(qeoi_page);
+ if (out_escalate_irq)
+ *out_escalate_irq = be32_to_cpu(escalate_irq);
+ if (out_qflags)
+ *out_qflags = be64_to_cpu(qflags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xive_native_get_queue_info);
+
+int xive_native_get_queue_state(u32 vp_id, u32 prio, u32 *qtoggle, u32 *qindex)
+{
+ __be32 opal_qtoggle;
+ __be32 opal_qindex;
+ s64 rc;
+
+ rc = opal_xive_get_queue_state(vp_id, prio, &opal_qtoggle,
+ &opal_qindex);
+ if (rc) {
+ pr_err("OPAL failed to get queue state for VCPU %d/%d : %lld\n",
+ vp_id, prio, rc);
+ return -EIO;
+ }
+
+ if (qtoggle)
+ *qtoggle = be32_to_cpu(opal_qtoggle);
+ if (qindex)
+ *qindex = be32_to_cpu(opal_qindex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xive_native_get_queue_state);
+
+int xive_native_set_queue_state(u32 vp_id, u32 prio, u32 qtoggle, u32 qindex)
+{
+ s64 rc;
+
+ rc = opal_xive_set_queue_state(vp_id, prio, qtoggle, qindex);
+ if (rc) {
+ pr_err("OPAL failed to set queue state for VCPU %d/%d : %lld\n",
+ vp_id, prio, rc);
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xive_native_set_queue_state);
+
+int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
+{
+ __be64 state;
+ s64 rc;
+
+ rc = opal_xive_get_vp_state(vp_id, &state);
+ if (rc) {
+ pr_err("OPAL failed to get vp state for VCPU %d : %lld\n",
+ vp_id, rc);
+ return -EIO;
+ }
+
+ if (out_state)
+ *out_state = be64_to_cpu(state);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xive_native_get_vp_state);
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index 3050f9323254..f142570ad860 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -7,6 +7,7 @@ subdir-ccflags-y := $(call cc-disable-warning, builtin-requires-header)
GCOV_PROFILE := n
KCOV_INSTRUMENT := n
UBSAN_SANITIZE := n
+KASAN_SANITIZE := n
# Disable ftrace for the entire directory
ORIG_CFLAGS := $(KBUILD_CFLAGS)
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index a0f44f992360..1b0149b2bb6c 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -80,6 +80,7 @@ static int set_indicator_token = RTAS_UNKNOWN_SERVICE;
#endif
static unsigned long in_xmon __read_mostly = 0;
static int xmon_on = IS_ENABLED(CONFIG_XMON_DEFAULT);
+static bool xmon_is_ro = IS_ENABLED(CONFIG_XMON_DEFAULT_RO_MODE);
static unsigned long adrs;
static int size = 1;
@@ -202,6 +203,8 @@ static void dump_tlb_book3e(void);
#define GETWORD(v) (((v)[0] << 24) + ((v)[1] << 16) + ((v)[2] << 8) + (v)[3])
#endif
+static const char *xmon_ro_msg = "Operation disabled: xmon in read-only mode\n";
+
static char *help_string = "\
Commands:\n\
b show breakpoints\n\
@@ -989,6 +992,10 @@ cmds(struct pt_regs *excp)
memlocate();
break;
case 'z':
+ if (xmon_is_ro) {
+ printf(xmon_ro_msg);
+ break;
+ }
memzcan();
break;
case 'i':
@@ -1042,6 +1049,10 @@ cmds(struct pt_regs *excp)
set_lpp_cmd();
break;
case 'b':
+ if (xmon_is_ro) {
+ printf(xmon_ro_msg);
+ break;
+ }
bpt_cmds();
break;
case 'C':
@@ -1055,6 +1066,10 @@ cmds(struct pt_regs *excp)
bootcmds();
break;
case 'p':
+ if (xmon_is_ro) {
+ printf(xmon_ro_msg);
+ break;
+ }
proccall();
break;
case 'P':
@@ -1777,6 +1792,11 @@ read_spr(int n, unsigned long *vp)
static void
write_spr(int n, unsigned long val)
{
+ if (xmon_is_ro) {
+ printf(xmon_ro_msg);
+ return;
+ }
+
if (setjmp(bus_error_jmp) == 0) {
catch_spr_faults = 1;
sync();
@@ -2016,6 +2036,12 @@ mwrite(unsigned long adrs, void *buf, int size)
char *p, *q;
n = 0;
+
+ if (xmon_is_ro) {
+ printf(xmon_ro_msg);
+ return n;
+ }
+
if (setjmp(bus_error_jmp) == 0) {
catch_memory_errors = 1;
sync();
@@ -2429,9 +2455,11 @@ static void dump_one_paca(int cpu)
DUMP(p, trap_save, "%#-*x");
DUMP(p, irq_soft_mask, "%#-*x");
DUMP(p, irq_happened, "%#-*x");
- DUMP(p, io_sync, "%#-*x");
+#ifdef CONFIG_MMIOWB
+ DUMP(p, mmiowb_state.nesting_count, "%#-*x");
+ DUMP(p, mmiowb_state.mmiowb_pending, "%#-*x");
+#endif
DUMP(p, irq_work_pending, "%#-*x");
- DUMP(p, nap_state_lost, "%#-*x");
DUMP(p, sprg_vdso, "%#-*llx");
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -2439,19 +2467,16 @@ static void dump_one_paca(int cpu)
#endif
#ifdef CONFIG_PPC_POWERNV
- DUMP(p, core_idle_state_ptr, "%-*px");
- DUMP(p, thread_idle_state, "%#-*x");
- DUMP(p, thread_mask, "%#-*x");
- DUMP(p, subcore_sibling_mask, "%#-*x");
- DUMP(p, requested_psscr, "%#-*llx");
- DUMP(p, stop_sprs.pid, "%#-*llx");
- DUMP(p, stop_sprs.ldbar, "%#-*llx");
- DUMP(p, stop_sprs.fscr, "%#-*llx");
- DUMP(p, stop_sprs.hfscr, "%#-*llx");
- DUMP(p, stop_sprs.mmcr1, "%#-*llx");
- DUMP(p, stop_sprs.mmcr2, "%#-*llx");
- DUMP(p, stop_sprs.mmcra, "%#-*llx");
- DUMP(p, dont_stop.counter, "%#-*x");
+ DUMP(p, idle_state, "%#-*lx");
+ if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) {
+ DUMP(p, thread_idle_state, "%#-*x");
+ DUMP(p, subcore_sibling_mask, "%#-*x");
+ } else {
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ DUMP(p, requested_psscr, "%#-*llx");
+ DUMP(p, dont_stop.counter, "%#-*x");
+#endif
+ }
#endif
DUMP(p, accounting.utime, "%#-*lx");
@@ -2884,9 +2909,17 @@ memops(int cmd)
scanhex((void *)&mcount);
switch( cmd ){
case 'm':
+ if (xmon_is_ro) {
+ printf(xmon_ro_msg);
+ break;
+ }
memmove((void *)mdest, (void *)msrc, mcount);
break;
case 's':
+ if (xmon_is_ro) {
+ printf(xmon_ro_msg);
+ break;
+ }
memset((void *)mdest, mval, mcount);
break;
case 'd':
@@ -3796,6 +3829,14 @@ static int __init early_parse_xmon(char *p)
} else if (strncmp(p, "on", 2) == 0) {
xmon_init(1);
xmon_on = 1;
+ } else if (strncmp(p, "rw", 2) == 0) {
+ xmon_init(1);
+ xmon_on = 1;
+ xmon_is_ro = false;
+ } else if (strncmp(p, "ro", 2) == 0) {
+ xmon_init(1);
+ xmon_on = 1;
+ xmon_is_ro = true;
} else if (strncmp(p, "off", 3) == 0)
xmon_on = 0;
else
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index eb56c82d8aa1..ee32c66e1af3 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -27,7 +27,7 @@ config RISCV
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select GENERIC_SMP_IDLE_THREAD
- select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A
+ select GENERIC_ATOMIC64 if !64BIT
select HAVE_ARCH_AUDITSYSCALL
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_DMA_CONTIGUOUS
@@ -35,7 +35,6 @@ config RISCV
select HAVE_PERF_EVENTS
select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN
- select RISCV_ISA_A if SMP
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
select HAVE_ARCH_TRACEHOOK
@@ -48,6 +47,7 @@ config RISCV
select RISCV_TIMER
select GENERIC_IRQ_MULTI_HANDLER
select ARCH_HAS_PTE_SPECIAL
+ select ARCH_HAS_MMIOWB
select HAVE_EBPF_JIT if 64BIT
config MMU
@@ -69,9 +69,6 @@ config STACKTRACE_SUPPORT
config TRACE_IRQFLAGS_SUPPORT
def_bool y
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
config GENERIC_BUG
def_bool y
depends on BUG
@@ -197,9 +194,6 @@ config RISCV_ISA_C
If you don't know what to do here, say Y.
-config RISCV_ISA_A
- def_bool y
-
menu "supported PMU type"
depends on PERF_EVENTS
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index c6342e638ef7..6b0741c9f348 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -39,9 +39,8 @@ endif
KBUILD_CFLAGS += -Wall
# ISA string setting
-riscv-march-$(CONFIG_ARCH_RV32I) := rv32im
-riscv-march-$(CONFIG_ARCH_RV64I) := rv64im
-riscv-march-$(CONFIG_RISCV_ISA_A) := $(riscv-march-y)a
+riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima
+riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima
riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd
riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c
KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y))
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index cccd12cf27d4..3d019e062c6f 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -1,9 +1,9 @@
generic-y += bugs.h
generic-y += checksum.h
generic-y += compat.h
-generic-y += cputime.h
generic-y += device.h
generic-y += div64.h
+generic-y += extable.h
generic-y += dma.h
generic-y += dma-contiguous.h
generic-y += dma-mapping.h
@@ -11,7 +11,6 @@ generic-y += emergency-restart.h
generic-y += exec.h
generic-y += fb.h
generic-y += hardirq.h
-generic-y += hash.h
generic-y += hw_irq.h
generic-y += irq_regs.h
generic-y += irq_work.h
@@ -21,10 +20,8 @@ generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mm-arch-hooks.h
-generic-y += mutex.h
generic-y += percpu.h
generic-y += preempt.h
-generic-y += scatterlist.h
generic-y += sections.h
generic-y += serial.h
generic-y += shmparam.h
diff --git a/arch/riscv/include/asm/bug.h b/arch/riscv/include/asm/bug.h
index bfc7f099ab1f..52a1fbdeab3b 100644
--- a/arch/riscv/include/asm/bug.h
+++ b/arch/riscv/include/asm/bug.h
@@ -21,7 +21,12 @@
#include <asm/asm.h>
#ifdef CONFIG_GENERIC_BUG
-#define __BUG_INSN _AC(0x00100073, UL) /* ebreak */
+#define __INSN_LENGTH_MASK _UL(0x3)
+#define __INSN_LENGTH_32 _UL(0x3)
+#define __COMPRESSED_INSN_MASK _UL(0xffff)
+
+#define __BUG_INSN_32 _UL(0x00100073) /* ebreak */
+#define __BUG_INSN_16 _UL(0x9002) /* c.ebreak */
#ifndef __ASSEMBLY__
typedef u32 bug_insn_t;
@@ -38,38 +43,46 @@ typedef u32 bug_insn_t;
#define __BUG_ENTRY \
__BUG_ENTRY_ADDR "\n\t" \
__BUG_ENTRY_FILE "\n\t" \
- RISCV_SHORT " %1"
+ RISCV_SHORT " %1\n\t" \
+ RISCV_SHORT " %2"
#else
#define __BUG_ENTRY \
- __BUG_ENTRY_ADDR
+ __BUG_ENTRY_ADDR "\n\t" \
+ RISCV_SHORT " %2"
#endif
-#define BUG() \
+#define __BUG_FLAGS(flags) \
do { \
__asm__ __volatile__ ( \
"1:\n\t" \
"ebreak\n" \
- ".pushsection __bug_table,\"a\"\n\t" \
+ ".pushsection __bug_table,\"aw\"\n\t" \
"2:\n\t" \
__BUG_ENTRY "\n\t" \
- ".org 2b + %2\n\t" \
+ ".org 2b + %3\n\t" \
".popsection" \
: \
: "i" (__FILE__), "i" (__LINE__), \
- "i" (sizeof(struct bug_entry))); \
- unreachable(); \
+ "i" (flags), \
+ "i" (sizeof(struct bug_entry))); \
} while (0)
+
#endif /* !__ASSEMBLY__ */
#else /* CONFIG_GENERIC_BUG */
#ifndef __ASSEMBLY__
-#define BUG() \
-do { \
+#define __BUG_FLAGS(flags) do { \
__asm__ __volatile__ ("ebreak\n"); \
- unreachable(); \
} while (0)
#endif /* !__ASSEMBLY__ */
#endif /* CONFIG_GENERIC_BUG */
+#define BUG() do { \
+ __BUG_FLAGS(0); \
+ unreachable(); \
+} while (0)
+
+#define __WARN_FLAGS(flags) __BUG_FLAGS(BUGFLAG_WARNING|(flags))
+
#define HAVE_ARCH_BUG
#include <asm-generic/bug.h>
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index 8f13074413a7..1f4ba68ab9aa 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -47,7 +47,7 @@ static inline void flush_dcache_page(struct page *page)
#else /* CONFIG_SMP */
-#define flush_icache_all() sbi_remote_fence_i(NULL)
+void flush_icache_all(void);
void flush_icache_mm(struct mm_struct *mm, bool local);
#endif /* CONFIG_SMP */
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 28a0d1cb374c..3c3c26c3a1f1 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -14,64 +14,95 @@
#ifndef _ASM_RISCV_CSR_H
#define _ASM_RISCV_CSR_H
+#include <asm/asm.h>
#include <linux/const.h>
/* Status register flags */
-#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
-#define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */
-#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */
-#define SR_SUM _AC(0x00040000, UL) /* Supervisor may access User Memory */
-
-#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */
-#define SR_FS_OFF _AC(0x00000000, UL)
-#define SR_FS_INITIAL _AC(0x00002000, UL)
-#define SR_FS_CLEAN _AC(0x00004000, UL)
-#define SR_FS_DIRTY _AC(0x00006000, UL)
-
-#define SR_XS _AC(0x00018000, UL) /* Extension Status */
-#define SR_XS_OFF _AC(0x00000000, UL)
-#define SR_XS_INITIAL _AC(0x00008000, UL)
-#define SR_XS_CLEAN _AC(0x00010000, UL)
-#define SR_XS_DIRTY _AC(0x00018000, UL)
+#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
+#define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */
+#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */
+#define SR_SUM _AC(0x00040000, UL) /* Supervisor User Memory Access */
+
+#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */
+#define SR_FS_OFF _AC(0x00000000, UL)
+#define SR_FS_INITIAL _AC(0x00002000, UL)
+#define SR_FS_CLEAN _AC(0x00004000, UL)
+#define SR_FS_DIRTY _AC(0x00006000, UL)
+
+#define SR_XS _AC(0x00018000, UL) /* Extension Status */
+#define SR_XS_OFF _AC(0x00000000, UL)
+#define SR_XS_INITIAL _AC(0x00008000, UL)
+#define SR_XS_CLEAN _AC(0x00010000, UL)
+#define SR_XS_DIRTY _AC(0x00018000, UL)
#ifndef CONFIG_64BIT
-#define SR_SD _AC(0x80000000, UL) /* FS/XS dirty */
+#define SR_SD _AC(0x80000000, UL) /* FS/XS dirty */
#else
-#define SR_SD _AC(0x8000000000000000, UL) /* FS/XS dirty */
+#define SR_SD _AC(0x8000000000000000, UL) /* FS/XS dirty */
#endif
/* SATP flags */
-#if __riscv_xlen == 32
-#define SATP_PPN _AC(0x003FFFFF, UL)
-#define SATP_MODE_32 _AC(0x80000000, UL)
-#define SATP_MODE SATP_MODE_32
+#ifndef CONFIG_64BIT
+#define SATP_PPN _AC(0x003FFFFF, UL)
+#define SATP_MODE_32 _AC(0x80000000, UL)
+#define SATP_MODE SATP_MODE_32
#else
-#define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL)
-#define SATP_MODE_39 _AC(0x8000000000000000, UL)
-#define SATP_MODE SATP_MODE_39
+#define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL)
+#define SATP_MODE_39 _AC(0x8000000000000000, UL)
+#define SATP_MODE SATP_MODE_39
#endif
-/* Interrupt Enable and Interrupt Pending flags */
-#define SIE_SSIE _AC(0x00000002, UL) /* Software Interrupt Enable */
-#define SIE_STIE _AC(0x00000020, UL) /* Timer Interrupt Enable */
-#define SIE_SEIE _AC(0x00000200, UL) /* External Interrupt Enable */
-
-#define EXC_INST_MISALIGNED 0
-#define EXC_INST_ACCESS 1
-#define EXC_BREAKPOINT 3
-#define EXC_LOAD_ACCESS 5
-#define EXC_STORE_ACCESS 7
-#define EXC_SYSCALL 8
-#define EXC_INST_PAGE_FAULT 12
-#define EXC_LOAD_PAGE_FAULT 13
-#define EXC_STORE_PAGE_FAULT 15
+/* SCAUSE */
+#define SCAUSE_IRQ_FLAG (_AC(1, UL) << (__riscv_xlen - 1))
+
+#define IRQ_U_SOFT 0
+#define IRQ_S_SOFT 1
+#define IRQ_M_SOFT 3
+#define IRQ_U_TIMER 4
+#define IRQ_S_TIMER 5
+#define IRQ_M_TIMER 7
+#define IRQ_U_EXT 8
+#define IRQ_S_EXT 9
+#define IRQ_M_EXT 11
+
+#define EXC_INST_MISALIGNED 0
+#define EXC_INST_ACCESS 1
+#define EXC_BREAKPOINT 3
+#define EXC_LOAD_ACCESS 5
+#define EXC_STORE_ACCESS 7
+#define EXC_SYSCALL 8
+#define EXC_INST_PAGE_FAULT 12
+#define EXC_LOAD_PAGE_FAULT 13
+#define EXC_STORE_PAGE_FAULT 15
+
+/* SIE (Interrupt Enable) and SIP (Interrupt Pending) flags */
+#define SIE_SSIE (_AC(0x1, UL) << IRQ_S_SOFT)
+#define SIE_STIE (_AC(0x1, UL) << IRQ_S_TIMER)
+#define SIE_SEIE (_AC(0x1, UL) << IRQ_S_EXT)
+
+#define CSR_CYCLE 0xc00
+#define CSR_TIME 0xc01
+#define CSR_INSTRET 0xc02
+#define CSR_SSTATUS 0x100
+#define CSR_SIE 0x104
+#define CSR_STVEC 0x105
+#define CSR_SCOUNTEREN 0x106
+#define CSR_SSCRATCH 0x140
+#define CSR_SEPC 0x141
+#define CSR_SCAUSE 0x142
+#define CSR_STVAL 0x143
+#define CSR_SIP 0x144
+#define CSR_SATP 0x180
+#define CSR_CYCLEH 0xc80
+#define CSR_TIMEH 0xc81
+#define CSR_INSTRETH 0xc82
#ifndef __ASSEMBLY__
#define csr_swap(csr, val) \
({ \
unsigned long __v = (unsigned long)(val); \
- __asm__ __volatile__ ("csrrw %0, " #csr ", %1" \
+ __asm__ __volatile__ ("csrrw %0, " __ASM_STR(csr) ", %1"\
: "=r" (__v) : "rK" (__v) \
: "memory"); \
__v; \
@@ -80,7 +111,7 @@
#define csr_read(csr) \
({ \
register unsigned long __v; \
- __asm__ __volatile__ ("csrr %0, " #csr \
+ __asm__ __volatile__ ("csrr %0, " __ASM_STR(csr) \
: "=r" (__v) : \
: "memory"); \
__v; \
@@ -89,7 +120,7 @@
#define csr_write(csr, val) \
({ \
unsigned long __v = (unsigned long)(val); \
- __asm__ __volatile__ ("csrw " #csr ", %0" \
+ __asm__ __volatile__ ("csrw " __ASM_STR(csr) ", %0" \
: : "rK" (__v) \
: "memory"); \
})
@@ -97,7 +128,7 @@
#define csr_read_set(csr, val) \
({ \
unsigned long __v = (unsigned long)(val); \
- __asm__ __volatile__ ("csrrs %0, " #csr ", %1" \
+ __asm__ __volatile__ ("csrrs %0, " __ASM_STR(csr) ", %1"\
: "=r" (__v) : "rK" (__v) \
: "memory"); \
__v; \
@@ -106,7 +137,7 @@
#define csr_set(csr, val) \
({ \
unsigned long __v = (unsigned long)(val); \
- __asm__ __volatile__ ("csrs " #csr ", %0" \
+ __asm__ __volatile__ ("csrs " __ASM_STR(csr) ", %0" \
: : "rK" (__v) \
: "memory"); \
})
@@ -114,7 +145,7 @@
#define csr_read_clear(csr, val) \
({ \
unsigned long __v = (unsigned long)(val); \
- __asm__ __volatile__ ("csrrc %0, " #csr ", %1" \
+ __asm__ __volatile__ ("csrrc %0, " __ASM_STR(csr) ", %1"\
: "=r" (__v) : "rK" (__v) \
: "memory"); \
__v; \
@@ -123,7 +154,7 @@
#define csr_clear(csr, val) \
({ \
unsigned long __v = (unsigned long)(val); \
- __asm__ __volatile__ ("csrc " #csr ", %0" \
+ __asm__ __volatile__ ("csrc " __ASM_STR(csr) ", %0" \
: : "rK" (__v) \
: "memory"); \
})
diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h
index 697fc23b0d5a..ce0cd7d77eb0 100644
--- a/arch/riscv/include/asm/elf.h
+++ b/arch/riscv/include/asm/elf.h
@@ -27,13 +27,7 @@
#define ELF_CLASS ELFCLASS32
#endif
-#if defined(__LITTLE_ENDIAN)
#define ELF_DATA ELFDATA2LSB
-#elif defined(__BIG_ENDIAN)
-#define ELF_DATA ELFDATA2MSB
-#else
-#error "Unknown endianness"
-#endif
/*
* This is used to ensure we don't load something for the wrong architecture.
diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h
index 66641624d8a5..4ad6409c4647 100644
--- a/arch/riscv/include/asm/futex.h
+++ b/arch/riscv/include/asm/futex.h
@@ -7,18 +7,6 @@
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
-#ifndef CONFIG_RISCV_ISA_A
-/*
- * Use the generic interrupt disabling versions if the A extension
- * is not supported.
- */
-#ifdef CONFIG_SMP
-#error "Can't support generic futex calls without A extension on SMP"
-#endif
-#include <asm-generic/futex.h>
-
-#else /* CONFIG_RISCV_ISA_A */
-
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <linux/errno.h>
@@ -124,5 +112,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return ret;
}
-#endif /* CONFIG_RISCV_ISA_A */
#endif /* _ASM_FUTEX_H */
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index 1d9c1376dc64..744fd92e77bc 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -20,6 +20,7 @@
#define _ASM_RISCV_IO_H
#include <linux/types.h>
+#include <asm/mmiowb.h>
extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
@@ -100,18 +101,6 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
#endif
/*
- * FIXME: I'm flip-flopping on whether or not we should keep this or enforce
- * the ordering with I/O on spinlocks like PowerPC does. The worry is that
- * drivers won't get this correct, but I also don't want to introduce a fence
- * into the lock code that otherwise only uses AMOs (and is essentially defined
- * by the ISA to be correct). For now I'm leaving this here: "o,w" is
- * sufficient to ensure that all writes to the device have completed before the
- * write to the spinlock is allowed to commit. I surmised this from reading
- * "ACQUIRES VS I/O ACCESSES" in memory-barriers.txt.
- */
-#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");
-
-/*
* Unordered I/O memory access primitives. These are even more relaxed than
* the relaxed versions, as they don't even order accesses between successive
* operations to the I/O regions.
@@ -165,7 +154,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
#define __io_br() do {} while (0)
#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory");
#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory");
-#define __io_aw() do {} while (0)
+#define __io_aw() mmiowb_set_pending()
#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; })
diff --git a/arch/riscv/include/asm/irqflags.h b/arch/riscv/include/asm/irqflags.h
index 07a3c6d5706f..1a69b3bcd371 100644
--- a/arch/riscv/include/asm/irqflags.h
+++ b/arch/riscv/include/asm/irqflags.h
@@ -21,25 +21,25 @@
/* read interrupt enabled status */
static inline unsigned long arch_local_save_flags(void)
{
- return csr_read(sstatus);
+ return csr_read(CSR_SSTATUS);
}
/* unconditionally enable interrupts */
static inline void arch_local_irq_enable(void)
{
- csr_set(sstatus, SR_SIE);
+ csr_set(CSR_SSTATUS, SR_SIE);
}
/* unconditionally disable interrupts */
static inline void arch_local_irq_disable(void)
{
- csr_clear(sstatus, SR_SIE);
+ csr_clear(CSR_SSTATUS, SR_SIE);
}
/* get status and disable interrupts */
static inline unsigned long arch_local_irq_save(void)
{
- return csr_read_clear(sstatus, SR_SIE);
+ return csr_read_clear(CSR_SSTATUS, SR_SIE);
}
/* test flags */
@@ -57,7 +57,7 @@ static inline int arch_irqs_disabled(void)
/* set interrupt enabled status */
static inline void arch_local_irq_restore(unsigned long flags)
{
- csr_set(sstatus, flags & SR_SIE);
+ csr_set(CSR_SSTATUS, flags & SR_SIE);
}
#endif /* _ASM_RISCV_IRQFLAGS_H */
diff --git a/arch/riscv/include/asm/mmiowb.h b/arch/riscv/include/asm/mmiowb.h
new file mode 100644
index 000000000000..5d7e3a2b4e3b
--- /dev/null
+++ b/arch/riscv/include/asm/mmiowb.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_MMIOWB_H
+#define _ASM_RISCV_MMIOWB_H
+
+/*
+ * "o,w" is sufficient to ensure that all writes to the device have completed
+ * before the write to the spinlock is allowed to commit.
+ */
+#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");
+
+#include <asm-generic/mmiowb.h>
+
+#endif /* ASM_RISCV_MMIOWB_H */
diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
index 336d60ec5698..bf4f097a9051 100644
--- a/arch/riscv/include/asm/mmu_context.h
+++ b/arch/riscv/include/asm/mmu_context.h
@@ -20,8 +20,6 @@
#include <linux/mm.h>
#include <linux/sched.h>
-#include <asm/tlbflush.h>
-#include <asm/cacheflush.h>
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *task)
@@ -39,61 +37,8 @@ static inline void destroy_context(struct mm_struct *mm)
{
}
-/*
- * When necessary, performs a deferred icache flush for the given MM context,
- * on the local CPU. RISC-V has no direct mechanism for instruction cache
- * shoot downs, so instead we send an IPI that informs the remote harts they
- * need to flush their local instruction caches. To avoid pathologically slow
- * behavior in a common case (a bunch of single-hart processes on a many-hart
- * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
- * executing a MM context and instead schedule a deferred local instruction
- * cache flush to be performed before execution resumes on each hart. This
- * actually performs that local instruction cache flush, which implicitly only
- * refers to the current hart.
- */
-static inline void flush_icache_deferred(struct mm_struct *mm)
-{
-#ifdef CONFIG_SMP
- unsigned int cpu = smp_processor_id();
- cpumask_t *mask = &mm->context.icache_stale_mask;
-
- if (cpumask_test_cpu(cpu, mask)) {
- cpumask_clear_cpu(cpu, mask);
- /*
- * Ensure the remote hart's writes are visible to this hart.
- * This pairs with a barrier in flush_icache_mm.
- */
- smp_mb();
- local_flush_icache_all();
- }
-#endif
-}
-
-static inline void switch_mm(struct mm_struct *prev,
- struct mm_struct *next, struct task_struct *task)
-{
- if (likely(prev != next)) {
- /*
- * Mark the current MM context as inactive, and the next as
- * active. This is at least used by the icache flushing
- * routines in order to determine who should
- */
- unsigned int cpu = smp_processor_id();
-
- cpumask_clear_cpu(cpu, mm_cpumask(prev));
- cpumask_set_cpu(cpu, mm_cpumask(next));
-
- /*
- * Use the old spbtr name instead of using the current satp
- * name to support binutils 2.29 which doesn't know about the
- * privileged ISA 1.10 yet.
- */
- csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE);
- local_flush_tlb_all();
-
- flush_icache_deferred(next);
- }
-}
+void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *task);
static inline void activate_mm(struct mm_struct *prev,
struct mm_struct *next)
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h
index d35ec2f41381..9c867a4bac83 100644
--- a/arch/riscv/include/asm/ptrace.h
+++ b/arch/riscv/include/asm/ptrace.h
@@ -70,47 +70,38 @@ struct pt_regs {
/* Helpers for working with the instruction pointer */
-#define GET_IP(regs) ((regs)->sepc)
-#define SET_IP(regs, val) (GET_IP(regs) = (val))
-
static inline unsigned long instruction_pointer(struct pt_regs *regs)
{
- return GET_IP(regs);
+ return regs->sepc;
}
static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
- SET_IP(regs, val);
+ regs->sepc = val;
}
#define profile_pc(regs) instruction_pointer(regs)
/* Helpers for working with the user stack pointer */
-#define GET_USP(regs) ((regs)->sp)
-#define SET_USP(regs, val) (GET_USP(regs) = (val))
-
static inline unsigned long user_stack_pointer(struct pt_regs *regs)
{
- return GET_USP(regs);
+ return regs->sp;
}
static inline void user_stack_pointer_set(struct pt_regs *regs,
unsigned long val)
{
- SET_USP(regs, val);
+ regs->sp = val;
}
/* Helpers for working with the frame pointer */
-#define GET_FP(regs) ((regs)->s0)
-#define SET_FP(regs, val) (GET_FP(regs) = (val))
-
static inline unsigned long frame_pointer(struct pt_regs *regs)
{
- return GET_FP(regs);
+ return regs->s0;
}
static inline void frame_pointer_set(struct pt_regs *regs,
unsigned long val)
{
- SET_FP(regs, val);
+ regs->s0 = val;
}
static inline unsigned long regs_return_value(struct pt_regs *regs)
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index b6bb10b92fe2..19f231615510 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -26,22 +26,27 @@
#define SBI_REMOTE_SFENCE_VMA_ASID 7
#define SBI_SHUTDOWN 8
-#define SBI_CALL(which, arg0, arg1, arg2) ({ \
+#define SBI_CALL(which, arg0, arg1, arg2, arg3) ({ \
register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0); \
register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1); \
register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2); \
+ register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3); \
register uintptr_t a7 asm ("a7") = (uintptr_t)(which); \
asm volatile ("ecall" \
: "+r" (a0) \
- : "r" (a1), "r" (a2), "r" (a7) \
+ : "r" (a1), "r" (a2), "r" (a3), "r" (a7) \
: "memory"); \
a0; \
})
/* Lazy implementations until SBI is finalized */
-#define SBI_CALL_0(which) SBI_CALL(which, 0, 0, 0)
-#define SBI_CALL_1(which, arg0) SBI_CALL(which, arg0, 0, 0)
-#define SBI_CALL_2(which, arg0, arg1) SBI_CALL(which, arg0, arg1, 0)
+#define SBI_CALL_0(which) SBI_CALL(which, 0, 0, 0, 0)
+#define SBI_CALL_1(which, arg0) SBI_CALL(which, arg0, 0, 0, 0)
+#define SBI_CALL_2(which, arg0, arg1) SBI_CALL(which, arg0, arg1, 0, 0)
+#define SBI_CALL_3(which, arg0, arg1, arg2) \
+ SBI_CALL(which, arg0, arg1, arg2, 0)
+#define SBI_CALL_4(which, arg0, arg1, arg2, arg3) \
+ SBI_CALL(which, arg0, arg1, arg2, arg3)
static inline void sbi_console_putchar(int ch)
{
@@ -86,7 +91,7 @@ static inline void sbi_remote_sfence_vma(const unsigned long *hart_mask,
unsigned long start,
unsigned long size)
{
- SBI_CALL_1(SBI_REMOTE_SFENCE_VMA, hart_mask);
+ SBI_CALL_3(SBI_REMOTE_SFENCE_VMA, hart_mask, start, size);
}
static inline void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
@@ -94,7 +99,7 @@ static inline void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
unsigned long size,
unsigned long asid)
{
- SBI_CALL_1(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask);
+ SBI_CALL_4(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask, start, size, asid);
}
#endif
diff --git a/arch/riscv/include/asm/sifive_l2_cache.h b/arch/riscv/include/asm/sifive_l2_cache.h
new file mode 100644
index 000000000000..04f6748fc50b
--- /dev/null
+++ b/arch/riscv/include/asm/sifive_l2_cache.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SiFive L2 Cache Controller header file
+ *
+ */
+
+#ifndef _ASM_RISCV_SIFIVE_L2_CACHE_H
+#define _ASM_RISCV_SIFIVE_L2_CACHE_H
+
+extern int register_sifive_l2_error_notifier(struct notifier_block *nb);
+extern int unregister_sifive_l2_error_notifier(struct notifier_block *nb);
+
+#define SIFIVE_L2_ERR_TYPE_CE 0
+#define SIFIVE_L2_ERR_TYPE_UE 1
+
+#endif /* _ASM_RISCV_SIFIVE_L2_CACHE_H */
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
index a3d5273ded7c..0f2fe1794c8f 100644
--- a/arch/riscv/include/asm/syscall.h
+++ b/arch/riscv/include/asm/syscall.h
@@ -88,7 +88,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
memcpy(&regs->a1, args, 5 * sizeof(regs->a1));
}
-static inline int syscall_get_arch(void)
+static inline int syscall_get_arch(struct task_struct *task)
{
#ifdef CONFIG_64BIT
return AUDIT_ARCH_RISCV64;
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 1c9cc8389928..9c039870019b 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -28,7 +28,9 @@
#include <asm/processor.h>
#include <asm/csr.h>
-typedef unsigned long mm_segment_t;
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
/*
* low level task data that entry.S needs immediate access to
diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h
index 439dc7072e05..1ad8d093c58b 100644
--- a/arch/riscv/include/asm/tlb.h
+++ b/arch/riscv/include/asm/tlb.h
@@ -18,6 +18,7 @@ struct mmu_gather;
static void tlb_flush(struct mmu_gather *tlb);
+#define tlb_flush tlb_flush
#include <asm-generic/tlb.h>
static inline void tlb_flush(struct mmu_gather *tlb)
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index fb53a8089e76..b26f407be5c8 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -23,6 +23,7 @@
#include <linux/compiler.h>
#include <linux/thread_info.h>
#include <asm/byteorder.h>
+#include <asm/extable.h>
#include <asm/asm.h>
#define __enable_user_access() \
@@ -38,8 +39,10 @@
* For historical reasons, these macros are grossly misnamed.
*/
-#define KERNEL_DS (~0UL)
-#define USER_DS (TASK_SIZE)
+#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+
+#define KERNEL_DS MAKE_MM_SEG(~0UL)
+#define USER_DS MAKE_MM_SEG(TASK_SIZE)
#define get_fs() (current_thread_info()->addr_limit)
@@ -48,9 +51,9 @@ static inline void set_fs(mm_segment_t fs)
current_thread_info()->addr_limit = fs;
}
-#define segment_eq(a, b) ((a) == (b))
+#define segment_eq(a, b) ((a).seg == (b).seg)
-#define user_addr_max() (get_fs())
+#define user_addr_max() (get_fs().seg)
/**
@@ -82,7 +85,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
{
const mm_segment_t fs = get_fs();
- return (size <= fs) && (addr <= (fs - size));
+ return size <= fs.seg && addr <= fs.seg - size;
}
/*
@@ -98,21 +101,8 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
* on our cache or tlb entries.
*/
-struct exception_table_entry {
- unsigned long insn, fixup;
-};
-
-extern int fixup_exception(struct pt_regs *state);
-
-#if defined(__LITTLE_ENDIAN)
-#define __MSW 1
#define __LSW 0
-#elif defined(__BIG_ENDIAN)
-#define __MSW 0
-#define __LSW 1
-#else
-#error "Unknown endianness"
-#endif
+#define __MSW 1
/*
* The "__xxx" versions of the user access functions do not verify the address
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index dac98348c6a3..578bb5efc085 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -312,9 +312,6 @@ void asm_offsets(void)
- offsetof(struct task_struct, thread.fstate.f[0])
);
- /* The assembler needs access to THREAD_SIZE as well. */
- DEFINE(ASM_THREAD_SIZE, THREAD_SIZE);
-
/*
* We allocate a pt_regs on the stack when entering the kernel. This
* ensures the alignment is sane.
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index cf2fca12414a..c8d2a3223099 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -136,8 +136,7 @@ static void c_stop(struct seq_file *m, void *v)
static int c_show(struct seq_file *m, void *v)
{
unsigned long cpu_id = (unsigned long)v - 1;
- struct device_node *node = of_get_cpu_node(cpuid_to_hartid_map(cpu_id),
- NULL);
+ struct device_node *node = of_get_cpu_node(cpu_id, NULL);
const char *compat, *isa, *mmu;
seq_printf(m, "processor\t: %lu\n", cpu_id);
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index fd9b57c8b4ce..1c1ecc238cfa 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -37,11 +37,11 @@
* the kernel thread pointer. If we came from the kernel, sscratch
* will contain 0, and we should continue on the current TP.
*/
- csrrw tp, sscratch, tp
+ csrrw tp, CSR_SSCRATCH, tp
bnez tp, _save_context
_restore_kernel_tpsp:
- csrr tp, sscratch
+ csrr tp, CSR_SSCRATCH
REG_S sp, TASK_TI_KERNEL_SP(tp)
_save_context:
REG_S sp, TASK_TI_USER_SP(tp)
@@ -87,11 +87,11 @@ _save_context:
li t0, SR_SUM | SR_FS
REG_L s0, TASK_TI_USER_SP(tp)
- csrrc s1, sstatus, t0
- csrr s2, sepc
- csrr s3, sbadaddr
- csrr s4, scause
- csrr s5, sscratch
+ csrrc s1, CSR_SSTATUS, t0
+ csrr s2, CSR_SEPC
+ csrr s3, CSR_STVAL
+ csrr s4, CSR_SCAUSE
+ csrr s5, CSR_SSCRATCH
REG_S s0, PT_SP(sp)
REG_S s1, PT_SSTATUS(sp)
REG_S s2, PT_SEPC(sp)
@@ -107,8 +107,8 @@ _save_context:
.macro RESTORE_ALL
REG_L a0, PT_SSTATUS(sp)
REG_L a2, PT_SEPC(sp)
- csrw sstatus, a0
- csrw sepc, a2
+ csrw CSR_SSTATUS, a0
+ csrw CSR_SEPC, a2
REG_L x1, PT_RA(sp)
REG_L x3, PT_GP(sp)
@@ -155,7 +155,7 @@ ENTRY(handle_exception)
* Set sscratch register to 0, so that if a recursive exception
* occurs, the exception vector knows it came from the kernel
*/
- csrw sscratch, x0
+ csrw CSR_SSCRATCH, x0
/* Load the global pointer */
.option push
@@ -248,7 +248,7 @@ resume_userspace:
* Save TP into sscratch, so we can find the kernel data structures
* again.
*/
- csrw sscratch, tp
+ csrw CSR_SSCRATCH, tp
restore_all:
RESTORE_ALL
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index fe884cd69abd..370c66ce187a 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -23,7 +23,8 @@
__INIT
ENTRY(_start)
/* Mask all interrupts */
- csrw sie, zero
+ csrw CSR_SIE, zero
+ csrw CSR_SIP, zero
/* Load the global pointer */
.option push
@@ -68,14 +69,10 @@ clear_bss_done:
/* Restore C environment */
la tp, init_task
sw zero, TASK_TI_CPU(tp)
-
- la sp, init_thread_union
- li a0, ASM_THREAD_SIZE
- add sp, sp, a0
+ la sp, init_thread_union + THREAD_SIZE
/* Start the kernel */
- mv a0, s0
- mv a1, s1
+ mv a0, s1
call parse_dtb
tail start_kernel
@@ -89,7 +86,7 @@ relocate:
/* Point stvec to virtual address of intruction after satp write */
la a0, 1f
add a0, a0, a1
- csrw stvec, a0
+ csrw CSR_STVEC, a0
/* Compute satp for kernel page tables, but don't load it yet */
la a2, swapper_pg_dir
@@ -99,18 +96,20 @@ relocate:
/*
* Load trampoline page directory, which will cause us to trap to
- * stvec if VA != PA, or simply fall through if VA == PA
+ * stvec if VA != PA, or simply fall through if VA == PA. We need a
+ * full fence here because setup_vm() just wrote these PTEs and we need
+ * to ensure the new translations are in use.
*/
la a0, trampoline_pg_dir
srl a0, a0, PAGE_SHIFT
or a0, a0, a1
sfence.vma
- csrw sptbr, a0
+ csrw CSR_SATP, a0
.align 2
1:
/* Set trap vector to spin forever to help debug */
la a0, .Lsecondary_park
- csrw stvec, a0
+ csrw CSR_STVEC, a0
/* Reload the global pointer */
.option push
@@ -118,8 +117,14 @@ relocate:
la gp, __global_pointer$
.option pop
- /* Switch to kernel page tables */
- csrw sptbr, a2
+ /*
+ * Switch to kernel page tables. A full fence is necessary in order to
+ * avoid using the trampoline translations, which are only correct for
+ * the first superpage. Fetching the fence is guarnteed to work
+ * because that first superpage is translated the same way.
+ */
+ csrw CSR_SATP, a2
+ sfence.vma
ret
@@ -130,7 +135,7 @@ relocate:
/* Set trap vector to spin forever to help debug */
la a3, .Lsecondary_park
- csrw stvec, a3
+ csrw CSR_STVEC, a3
slli a3, a0, LGREG
la a1, __cpu_up_stack_pointer
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
index 48e6b7db83a1..6d8659388c49 100644
--- a/arch/riscv/kernel/irq.c
+++ b/arch/riscv/kernel/irq.c
@@ -14,17 +14,9 @@
/*
* Possible interrupt causes:
*/
-#define INTERRUPT_CAUSE_SOFTWARE 1
-#define INTERRUPT_CAUSE_TIMER 5
-#define INTERRUPT_CAUSE_EXTERNAL 9
-
-/*
- * The high order bit of the trap cause register is always set for
- * interrupts, which allows us to differentiate them from exceptions
- * quickly. The INTERRUPT_CAUSE_* macros don't contain that bit, so we
- * need to mask it off.
- */
-#define INTERRUPT_CAUSE_FLAG (1UL << (__riscv_xlen - 1))
+#define INTERRUPT_CAUSE_SOFTWARE IRQ_S_SOFT
+#define INTERRUPT_CAUSE_TIMER IRQ_S_TIMER
+#define INTERRUPT_CAUSE_EXTERNAL IRQ_S_EXT
int arch_show_interrupts(struct seq_file *p, int prec)
{
@@ -37,7 +29,7 @@ asmlinkage void __irq_entry do_IRQ(struct pt_regs *regs)
struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
- switch (regs->scause & ~INTERRUPT_CAUSE_FLAG) {
+ switch (regs->scause & ~SCAUSE_IRQ_FLAG) {
case INTERRUPT_CAUSE_TIMER:
riscv_timer_interrupt();
break;
@@ -54,7 +46,8 @@ asmlinkage void __irq_entry do_IRQ(struct pt_regs *regs)
handle_arch_irq(regs);
break;
default:
- panic("unexpected interrupt cause");
+ pr_alert("unexpected interrupt cause 0x%lx", regs->scause);
+ BUG();
}
irq_exit();
diff --git a/arch/riscv/kernel/perf_event.c b/arch/riscv/kernel/perf_event.c
index 667ee70defea..91626d9ae5f2 100644
--- a/arch/riscv/kernel/perf_event.c
+++ b/arch/riscv/kernel/perf_event.c
@@ -185,10 +185,10 @@ static inline u64 read_counter(int idx)
switch (idx) {
case RISCV_PMU_CYCLE:
- val = csr_read(cycle);
+ val = csr_read(CSR_CYCLE);
break;
case RISCV_PMU_INSTRET:
- val = csr_read(instret);
+ val = csr_read(CSR_INSTRET);
break;
default:
WARN_ON_ONCE(idx < 0 || idx > RISCV_MAX_COUNTERS);
diff --git a/arch/riscv/kernel/reset.c b/arch/riscv/kernel/reset.c
index 2a53d26ffdd6..ed637aee514b 100644
--- a/arch/riscv/kernel/reset.c
+++ b/arch/riscv/kernel/reset.c
@@ -12,11 +12,15 @@
*/
#include <linux/reboot.h>
-#include <linux/export.h>
#include <asm/sbi.h>
-void (*pm_power_off)(void) = machine_power_off;
-EXPORT_SYMBOL(pm_power_off);
+static void default_power_off(void)
+{
+ sbi_shutdown();
+ while (1);
+}
+
+void (*pm_power_off)(void) = default_power_off;
void machine_restart(char *cmd)
{
@@ -26,11 +30,10 @@ void machine_restart(char *cmd)
void machine_halt(void)
{
- machine_power_off();
+ pm_power_off();
}
void machine_power_off(void)
{
- sbi_shutdown();
- while (1);
+ pm_power_off();
}
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 540a331d1376..d93bcce004e3 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -52,9 +52,11 @@ struct screen_info screen_info = {
atomic_t hart_lottery;
unsigned long boot_cpu_hartid;
-void __init parse_dtb(unsigned int hartid, void *dtb)
+void __init parse_dtb(phys_addr_t dtb_phys)
{
- if (early_init_dt_scan(__va(dtb)))
+ void *dtb = __va(dtb_phys);
+
+ if (early_init_dt_scan(dtb))
return;
pr_err("No DTB passed to the kernel\n");
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 837e1646091a..804d6ee4f3c5 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -234,6 +234,9 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
/* Are we from a system call? */
if (regs->scause == EXC_SYSCALL) {
+ /* Avoid additional syscall restarting via ret_from_exception */
+ regs->scause = -1UL;
+
/* If so, check system call restarting.. */
switch (regs->a0) {
case -ERESTART_RESTARTBLOCK:
@@ -272,6 +275,9 @@ static void do_signal(struct pt_regs *regs)
/* Did we come from a system call? */
if (regs->scause == EXC_SYSCALL) {
+ /* Avoid additional syscall restarting via ret_from_exception */
+ regs->scause = -1UL;
+
/* Restart the system call - no handlers present */
switch (regs->a0) {
case -ERESTARTNOHAND:
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 0c41d07ec281..b2537ffa855c 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -42,7 +42,7 @@ unsigned long __cpuid_to_hartid_map[NR_CPUS] = {
void __init smp_setup_processor_id(void)
{
- cpuid_to_hartid_map(0) = boot_cpu_hartid;
+ cpuid_to_hartid_map(0) = boot_cpu_hartid;
}
/* A collection of single bit ipi messages. */
@@ -53,7 +53,7 @@ static struct {
int riscv_hartid_to_cpuid(int hartid)
{
- int i = -1;
+ int i;
for (i = 0; i < NR_CPUS; i++)
if (cpuid_to_hartid_map(i) == hartid)
@@ -70,6 +70,12 @@ void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
for_each_cpu(cpu, in)
cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
}
+
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
+{
+ return phys_id == cpuid_to_hartid_map(cpu);
+}
+
/* Unsupported */
int setup_profiling_timer(unsigned int multiplier)
{
@@ -89,7 +95,7 @@ void riscv_software_interrupt(void)
unsigned long *stats = ipi_data[smp_processor_id()].stats;
/* Clear pending IPI */
- csr_clear(sip, SIE_SSIE);
+ csr_clear(CSR_SIP, SIE_SSIE);
while (true) {
unsigned long ops;
@@ -199,52 +205,3 @@ void smp_send_reschedule(int cpu)
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
}
-/*
- * Performs an icache flush for the given MM context. RISC-V has no direct
- * mechanism for instruction cache shoot downs, so instead we send an IPI that
- * informs the remote harts they need to flush their local instruction caches.
- * To avoid pathologically slow behavior in a common case (a bunch of
- * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
- * IPIs for harts that are not currently executing a MM context and instead
- * schedule a deferred local instruction cache flush to be performed before
- * execution resumes on each hart.
- */
-void flush_icache_mm(struct mm_struct *mm, bool local)
-{
- unsigned int cpu;
- cpumask_t others, hmask, *mask;
-
- preempt_disable();
-
- /* Mark every hart's icache as needing a flush for this MM. */
- mask = &mm->context.icache_stale_mask;
- cpumask_setall(mask);
- /* Flush this hart's I$ now, and mark it as flushed. */
- cpu = smp_processor_id();
- cpumask_clear_cpu(cpu, mask);
- local_flush_icache_all();
-
- /*
- * Flush the I$ of other harts concurrently executing, and mark them as
- * flushed.
- */
- cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
- local |= cpumask_empty(&others);
- if (mm != current->active_mm || !local) {
- cpumask_clear(&hmask);
- riscv_cpuid_to_hartid_mask(&others, &hmask);
- sbi_remote_fence_i(hmask.bits);
- } else {
- /*
- * It's assumed that at least one strongly ordered operation is
- * performed on this hart between setting a hart's cpumask bit
- * and scheduling this MM context on that hart. Sending an SBI
- * remote message will do this, but in the case where no
- * messages are sent we still need to order this hart's writes
- * with flush_icache_deferred().
- */
- smp_mb();
- }
-
- preempt_enable();
-}
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index eb533b5c2c8c..7a0b62252524 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -47,6 +47,17 @@ void __init smp_prepare_boot_cpu(void)
void __init smp_prepare_cpus(unsigned int max_cpus)
{
+ int cpuid;
+
+ /* This covers non-smp usecase mandated by "nosmp" option */
+ if (max_cpus == 0)
+ return;
+
+ for_each_possible_cpu(cpuid) {
+ if (cpuid == smp_processor_id())
+ continue;
+ set_cpu_present(cpuid, true);
+ }
}
void __init setup_smp(void)
@@ -73,12 +84,19 @@ void __init setup_smp(void)
}
cpuid_to_hartid_map(cpuid) = hart;
- set_cpu_possible(cpuid, true);
- set_cpu_present(cpuid, true);
cpuid++;
}
BUG_ON(!found_boot_cpu);
+
+ if (cpuid > nr_cpu_ids)
+ pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n",
+ cpuid, nr_cpu_ids);
+
+ for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
+ if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID)
+ set_cpu_possible(cpuid, true);
+ }
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index a4b1d94371a0..e80a5e8da119 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -33,9 +33,9 @@ static void notrace walk_stackframe(struct task_struct *task,
unsigned long fp, sp, pc;
if (regs) {
- fp = GET_FP(regs);
- sp = GET_USP(regs);
- pc = GET_IP(regs);
+ fp = frame_pointer(regs);
+ sp = user_stack_pointer(regs);
+ pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
const register unsigned long current_sp __asm__ ("sp");
fp = (unsigned long)__builtin_frame_address(0);
@@ -64,12 +64,8 @@ static void notrace walk_stackframe(struct task_struct *task,
frame = (struct stackframe *)fp - 1;
sp = fp;
fp = frame->fp;
-#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
(unsigned long *)(fp - 8));
-#else
- pc = frame->ra - 0x4;
-#endif
}
}
@@ -82,8 +78,8 @@ static void notrace walk_stackframe(struct task_struct *task,
unsigned long *ksp;
if (regs) {
- sp = GET_USP(regs);
- pc = GET_IP(regs);
+ sp = user_stack_pointer(regs);
+ pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
const register unsigned long current_sp __asm__ ("sp");
sp = current_sp;
@@ -169,8 +165,6 @@ static bool save_trace(unsigned long pc, void *arg)
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
walk_stackframe(tsk, NULL, save_trace, trace);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 24a9333dda2c..3d1a651dc54c 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -70,7 +70,7 @@ void do_trap(struct pt_regs *regs, int signo, int code,
&& printk_ratelimit()) {
pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT,
tsk->comm, task_pid_nr(tsk), signo, code, addr);
- print_vma_addr(KERN_CONT " in ", GET_IP(regs));
+ print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
pr_cont("\n");
show_regs(regs);
}
@@ -118,6 +118,17 @@ DO_ERROR_INFO(do_trap_ecall_s,
DO_ERROR_INFO(do_trap_ecall_m,
SIGILL, ILL_ILLTRP, "environment call from M-mode");
+#ifdef CONFIG_GENERIC_BUG
+static inline unsigned long get_break_insn_length(unsigned long pc)
+{
+ bug_insn_t insn;
+
+ if (probe_kernel_address((bug_insn_t *)pc, insn))
+ return 0;
+ return (((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) ? 4UL : 2UL);
+}
+#endif /* CONFIG_GENERIC_BUG */
+
asmlinkage void do_trap_break(struct pt_regs *regs)
{
#ifdef CONFIG_GENERIC_BUG
@@ -129,8 +140,8 @@ asmlinkage void do_trap_break(struct pt_regs *regs)
case BUG_TRAP_TYPE_NONE:
break;
case BUG_TRAP_TYPE_WARN:
- regs->sepc += sizeof(bug_insn_t);
- return;
+ regs->sepc += get_break_insn_length(regs->sepc);
+ break;
case BUG_TRAP_TYPE_BUG:
die(regs, "Kernel BUG");
}
@@ -145,11 +156,14 @@ int is_valid_bugaddr(unsigned long pc)
{
bug_insn_t insn;
- if (pc < PAGE_OFFSET)
+ if (pc < VMALLOC_START)
return 0;
if (probe_kernel_address((bug_insn_t *)pc, insn))
return 0;
- return (insn == __BUG_INSN);
+ if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
+ return (insn == __BUG_INSN_32);
+ else
+ return ((insn & __COMPRESSED_INSN_MASK) == __BUG_INSN_16);
}
#endif /* CONFIG_GENERIC_BUG */
@@ -159,9 +173,9 @@ void __init trap_init(void)
* Set sup0 scratch register to 0, indicating to exception vector
* that we are presently executing in the kernel
*/
- csr_write(sscratch, 0);
+ csr_write(CSR_SSCRATCH, 0);
/* Set the exception vector address */
- csr_write(stvec, &handle_exception);
+ csr_write(CSR_STVEC, &handle_exception);
/* Enable all interrupts */
- csr_write(sie, -1);
+ csr_write(CSR_SIE, -1);
}
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index fec62b24df89..b07b765f312a 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -36,7 +36,7 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
# these symbols in the kernel code rather than hand-coded addresses.
SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
- $(call cc-ldoption, -Wl$(comma)--hash-style=both)
+ -Wl,--hash-style=both
$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
$(call if_changed,vdsold)
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index b68aac701803..8db569141485 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -9,3 +9,5 @@ obj-y += fault.o
obj-y += extable.o
obj-y += ioremap.o
obj-y += cacheflush.o
+obj-y += context.o
+obj-y += sifive_l2_cache.o
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index 498c0a0814fe..497b7d07af0c 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -14,6 +14,67 @@
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
+#ifdef CONFIG_SMP
+
+#include <asm/sbi.h>
+
+void flush_icache_all(void)
+{
+ sbi_remote_fence_i(NULL);
+}
+
+/*
+ * Performs an icache flush for the given MM context. RISC-V has no direct
+ * mechanism for instruction cache shoot downs, so instead we send an IPI that
+ * informs the remote harts they need to flush their local instruction caches.
+ * To avoid pathologically slow behavior in a common case (a bunch of
+ * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
+ * IPIs for harts that are not currently executing a MM context and instead
+ * schedule a deferred local instruction cache flush to be performed before
+ * execution resumes on each hart.
+ */
+void flush_icache_mm(struct mm_struct *mm, bool local)
+{
+ unsigned int cpu;
+ cpumask_t others, hmask, *mask;
+
+ preempt_disable();
+
+ /* Mark every hart's icache as needing a flush for this MM. */
+ mask = &mm->context.icache_stale_mask;
+ cpumask_setall(mask);
+ /* Flush this hart's I$ now, and mark it as flushed. */
+ cpu = smp_processor_id();
+ cpumask_clear_cpu(cpu, mask);
+ local_flush_icache_all();
+
+ /*
+ * Flush the I$ of other harts concurrently executing, and mark them as
+ * flushed.
+ */
+ cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
+ local |= cpumask_empty(&others);
+ if (mm != current->active_mm || !local) {
+ cpumask_clear(&hmask);
+ riscv_cpuid_to_hartid_mask(&others, &hmask);
+ sbi_remote_fence_i(hmask.bits);
+ } else {
+ /*
+ * It's assumed that at least one strongly ordered operation is
+ * performed on this hart between setting a hart's cpumask bit
+ * and scheduling this MM context on that hart. Sending an SBI
+ * remote message will do this, but in the case where no
+ * messages are sent we still need to order this hart's writes
+ * with flush_icache_deferred().
+ */
+ smp_mb();
+ }
+
+ preempt_enable();
+}
+
+#endif /* CONFIG_SMP */
+
void flush_icache_pte(pte_t pte)
{
struct page *page = pte_page(pte);
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
new file mode 100644
index 000000000000..89ceb3cbe218
--- /dev/null
+++ b/arch/riscv/mm/context.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/mm.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
+/*
+ * When necessary, performs a deferred icache flush for the given MM context,
+ * on the local CPU. RISC-V has no direct mechanism for instruction cache
+ * shoot downs, so instead we send an IPI that informs the remote harts they
+ * need to flush their local instruction caches. To avoid pathologically slow
+ * behavior in a common case (a bunch of single-hart processes on a many-hart
+ * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
+ * executing a MM context and instead schedule a deferred local instruction
+ * cache flush to be performed before execution resumes on each hart. This
+ * actually performs that local instruction cache flush, which implicitly only
+ * refers to the current hart.
+ */
+static inline void flush_icache_deferred(struct mm_struct *mm)
+{
+#ifdef CONFIG_SMP
+ unsigned int cpu = smp_processor_id();
+ cpumask_t *mask = &mm->context.icache_stale_mask;
+
+ if (cpumask_test_cpu(cpu, mask)) {
+ cpumask_clear_cpu(cpu, mask);
+ /*
+ * Ensure the remote hart's writes are visible to this hart.
+ * This pairs with a barrier in flush_icache_mm.
+ */
+ smp_mb();
+ local_flush_icache_all();
+ }
+
+#endif
+}
+
+void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *task)
+{
+ unsigned int cpu;
+
+ if (unlikely(prev == next))
+ return;
+
+ /*
+ * Mark the current MM context as inactive, and the next as
+ * active. This is at least used by the icache flushing
+ * routines in order to determine who should be flushed.
+ */
+ cpu = smp_processor_id();
+
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+
+ /*
+ * Use the old spbtr name instead of using the current satp
+ * name to support binutils 2.29 which doesn't know about the
+ * privileged ISA 1.10 yet.
+ */
+ csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE);
+ local_flush_tlb_all();
+
+ flush_icache_deferred(next);
+}
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 88401d5125bc..cec8be9e2d6a 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -229,8 +229,9 @@ vmalloc_fault:
pte_t *pte_k;
int index;
+ /* User mode accesses just cause a SIGSEGV */
if (user_mode(regs))
- goto bad_area;
+ return do_trap(regs, SIGSEGV, code, addr, tsk);
/*
* Synchronize this task's top level page-table
@@ -239,13 +240,9 @@ vmalloc_fault:
* Do _not_ use "tsk->active_mm->pgd" here.
* We might be inside an interrupt in the middle
* of a task switch.
- *
- * Note: Use the old spbtr name instead of using the current
- * satp name to support binutils 2.29 which doesn't know about
- * the privileged ISA 1.10 yet.
*/
index = pgd_index(addr);
- pgd = (pgd_t *)pfn_to_virt(csr_read(sptbr)) + index;
+ pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
pgd_k = init_mm.pgd + index;
if (!pgd_present(*pgd_k))
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index bc7b77e34d09..8bf6f9c2d48c 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -66,11 +66,6 @@ void __init mem_init(void)
mem_init_print_info(NULL);
}
-void free_initmem(void)
-{
- free_initmem_default(0);
-}
-
#ifdef CONFIG_BLK_DEV_INITRD
static void __init setup_initrd(void)
{
diff --git a/arch/riscv/mm/sifive_l2_cache.c b/arch/riscv/mm/sifive_l2_cache.c
new file mode 100644
index 000000000000..4eb64619b3f4
--- /dev/null
+++ b/arch/riscv/mm/sifive_l2_cache.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SiFive L2 cache controller Driver
+ *
+ * Copyright (C) 2018-2019 SiFive, Inc.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <asm/sifive_l2_cache.h>
+
+#define SIFIVE_L2_DIRECCFIX_LOW 0x100
+#define SIFIVE_L2_DIRECCFIX_HIGH 0x104
+#define SIFIVE_L2_DIRECCFIX_COUNT 0x108
+
+#define SIFIVE_L2_DATECCFIX_LOW 0x140
+#define SIFIVE_L2_DATECCFIX_HIGH 0x144
+#define SIFIVE_L2_DATECCFIX_COUNT 0x148
+
+#define SIFIVE_L2_DATECCFAIL_LOW 0x160
+#define SIFIVE_L2_DATECCFAIL_HIGH 0x164
+#define SIFIVE_L2_DATECCFAIL_COUNT 0x168
+
+#define SIFIVE_L2_CONFIG 0x00
+#define SIFIVE_L2_WAYENABLE 0x08
+#define SIFIVE_L2_ECCINJECTERR 0x40
+
+#define SIFIVE_L2_MAX_ECCINTR 3
+
+static void __iomem *l2_base;
+static int g_irq[SIFIVE_L2_MAX_ECCINTR];
+
+enum {
+ DIR_CORR = 0,
+ DATA_CORR,
+ DATA_UNCORR,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *sifive_test;
+
+static ssize_t l2_write(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ unsigned int val;
+
+ if (kstrtouint_from_user(data, count, 0, &val))
+ return -EINVAL;
+ if ((val >= 0 && val < 0xFF) || (val >= 0x10000 && val < 0x100FF))
+ writel(val, l2_base + SIFIVE_L2_ECCINJECTERR);
+ else
+ return -EINVAL;
+ return count;
+}
+
+static const struct file_operations l2_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = l2_write
+};
+
+static void setup_sifive_debug(void)
+{
+ sifive_test = debugfs_create_dir("sifive_l2_cache", NULL);
+
+ debugfs_create_file("sifive_debug_inject_error", 0200,
+ sifive_test, NULL, &l2_fops);
+}
+#endif
+
+static void l2_config_read(void)
+{
+ u32 regval, val;
+
+ regval = readl(l2_base + SIFIVE_L2_CONFIG);
+ val = regval & 0xFF;
+ pr_info("L2CACHE: No. of Banks in the cache: %d\n", val);
+ val = (regval & 0xFF00) >> 8;
+ pr_info("L2CACHE: No. of ways per bank: %d\n", val);
+ val = (regval & 0xFF0000) >> 16;
+ pr_info("L2CACHE: Sets per bank: %llu\n", (uint64_t)1 << val);
+ val = (regval & 0xFF000000) >> 24;
+ pr_info("L2CACHE: Bytes per cache block: %llu\n", (uint64_t)1 << val);
+
+ regval = readl(l2_base + SIFIVE_L2_WAYENABLE);
+ pr_info("L2CACHE: Index of the largest way enabled: %d\n", regval);
+}
+
+static const struct of_device_id sifive_l2_ids[] = {
+ { .compatible = "sifive,fu540-c000-ccache" },
+ { /* end of table */ },
+};
+
+static ATOMIC_NOTIFIER_HEAD(l2_err_chain);
+
+int register_sifive_l2_error_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&l2_err_chain, nb);
+}
+EXPORT_SYMBOL_GPL(register_sifive_l2_error_notifier);
+
+int unregister_sifive_l2_error_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&l2_err_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_sifive_l2_error_notifier);
+
+static irqreturn_t l2_int_handler(int irq, void *device)
+{
+ unsigned int regval, add_h, add_l;
+
+ if (irq == g_irq[DIR_CORR]) {
+ add_h = readl(l2_base + SIFIVE_L2_DIRECCFIX_HIGH);
+ add_l = readl(l2_base + SIFIVE_L2_DIRECCFIX_LOW);
+ pr_err("L2CACHE: DirError @ 0x%08X.%08X\n", add_h, add_l);
+ regval = readl(l2_base + SIFIVE_L2_DIRECCFIX_COUNT);
+ atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE,
+ "DirECCFix");
+ }
+ if (irq == g_irq[DATA_CORR]) {
+ add_h = readl(l2_base + SIFIVE_L2_DATECCFIX_HIGH);
+ add_l = readl(l2_base + SIFIVE_L2_DATECCFIX_LOW);
+ pr_err("L2CACHE: DataError @ 0x%08X.%08X\n", add_h, add_l);
+ regval = readl(l2_base + SIFIVE_L2_DATECCFIX_COUNT);
+ atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_CE,
+ "DatECCFix");
+ }
+ if (irq == g_irq[DATA_UNCORR]) {
+ add_h = readl(l2_base + SIFIVE_L2_DATECCFAIL_HIGH);
+ add_l = readl(l2_base + SIFIVE_L2_DATECCFAIL_LOW);
+ pr_err("L2CACHE: DataFail @ 0x%08X.%08X\n", add_h, add_l);
+ regval = readl(l2_base + SIFIVE_L2_DATECCFAIL_COUNT);
+ atomic_notifier_call_chain(&l2_err_chain, SIFIVE_L2_ERR_TYPE_UE,
+ "DatECCFail");
+ }
+
+ return IRQ_HANDLED;
+}
+
+int __init sifive_l2_init(void)
+{
+ struct device_node *np;
+ struct resource res;
+ int i, rc;
+
+ np = of_find_matching_node(NULL, sifive_l2_ids);
+ if (!np)
+ return -ENODEV;
+
+ if (of_address_to_resource(np, 0, &res))
+ return -ENODEV;
+
+ l2_base = ioremap(res.start, resource_size(&res));
+ if (!l2_base)
+ return -ENOMEM;
+
+ for (i = 0; i < SIFIVE_L2_MAX_ECCINTR; i++) {
+ g_irq[i] = irq_of_parse_and_map(np, i);
+ rc = request_irq(g_irq[i], l2_int_handler, 0, "l2_ecc", NULL);
+ if (rc) {
+ pr_err("L2CACHE: Could not request IRQ %d\n", g_irq[i]);
+ return rc;
+ }
+ }
+
+ l2_config_read();
+
+#ifdef CONFIG_DEBUG_FS
+ setup_sifive_debug();
+#endif
+ return 0;
+}
+device_initcall(sifive_l2_init);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b6e3d0653002..109243fdb6ec 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -14,12 +14,6 @@ config LOCKDEP_SUPPORT
config STACKTRACE_SUPPORT
def_bool y
-config RWSEM_GENERIC_SPINLOCK
- bool
-
-config RWSEM_XCHGADD_ALGORITHM
- def_bool y
-
config ARCH_HAS_ILOG2_U32
def_bool n
@@ -69,7 +63,7 @@ config S390
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
- select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
+ select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_KCOV
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SET_MEMORY
@@ -106,6 +100,7 @@ config S390
select ARCH_INLINE_WRITE_UNLOCK_BH
select ARCH_INLINE_WRITE_UNLOCK_IRQ
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
+ select ARCH_KEEP_MEMBLOCK
select ARCH_SAVE_PAGE_KEYS if HIBERNATION
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_NUMA_BALANCING
@@ -149,6 +144,7 @@ config S390
select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS
+ select HAVE_GENERIC_GUP
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4
@@ -164,11 +160,13 @@ config S390
select HAVE_PERF_USER_STACK_DUMP
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MEMBLOCK_PHYS_MAP
+ select HAVE_MMU_GATHER_NO_GATHER
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NOP_MCOUNT
select HAVE_OPROFILE
select HAVE_PCI
select HAVE_PERF_EVENTS
+ select HAVE_RCU_TABLE_FREE
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ
select HAVE_SYSCALL_TRACEPOINTS
@@ -188,7 +186,6 @@ config S390
select TTY
select VIRT_CPU_ACCOUNTING
select ARCH_HAS_SCALED_CPUTIME
- select VIRT_TO_BUS
select HAVE_NMI
@@ -240,6 +237,7 @@ choice
config MARCH_Z900
bool "IBM zSeries model z800 and z900"
+ depends on !CC_IS_CLANG
select HAVE_MARCH_Z900_FEATURES
help
Select this to enable optimizations for model z800/z900 (2064 and
@@ -248,6 +246,7 @@ config MARCH_Z900
config MARCH_Z990
bool "IBM zSeries model z890 and z990"
+ depends on !CC_IS_CLANG
select HAVE_MARCH_Z990_FEATURES
help
Select this to enable optimizations for model z890/z990 (2084 and
@@ -256,6 +255,7 @@ config MARCH_Z990
config MARCH_Z9_109
bool "IBM System z9"
+ depends on !CC_IS_CLANG
select HAVE_MARCH_Z9_109_FEATURES
help
Select this to enable optimizations for IBM System z9 (2094 and
@@ -347,12 +347,15 @@ config TUNE_DEFAULT
config TUNE_Z900
bool "IBM zSeries model z800 and z900"
+ depends on !CC_IS_CLANG
config TUNE_Z990
bool "IBM zSeries model z890 and z990"
+ depends on !CC_IS_CLANG
config TUNE_Z9_109
bool "IBM System z9"
+ depends on !CC_IS_CLANG
config TUNE_Z10
bool "IBM System z10"
@@ -388,6 +391,9 @@ config COMPAT
(and some other stuff like libraries and such) is needed for
executing 31 bit applications. It is safe to say "Y".
+config COMPAT_VDSO
+ def_bool COMPAT && !CC_IS_CLANG
+
config SYSVIPC_COMPAT
def_bool y if COMPAT && SYSVIPC
@@ -549,6 +555,17 @@ config ARCH_HAS_KEXEC_PURGATORY
def_bool y
depends on KEXEC_FILE
+config KEXEC_VERIFY_SIG
+ bool "Verify kernel signature during kexec_file_load() syscall"
+ depends on KEXEC_FILE && SYSTEM_DATA_VERIFICATION
+ help
+ This option makes kernel signature verification mandatory for
+ the kexec_file_load() syscall.
+
+ In addition to that option, you need to enable signature
+ verification for the corresponding kernel image type being
+ loaded in order for this to work.
+
config ARCH_RANDOM
def_bool y
prompt "s390 architectural random number generation API"
@@ -609,6 +626,29 @@ config EXPOLINE_FULL
endchoice
+config RELOCATABLE
+ bool "Build a relocatable kernel"
+ select MODULE_REL_CRCS if MODVERSIONS
+ default y
+ help
+ This builds a kernel image that retains relocation information
+ so it can be loaded at an arbitrary address.
+ The kernel is linked as a position-independent executable (PIE)
+ and contains dynamic relocations which are processed early in the
+ bootup process.
+ The relocations make the kernel image about 15% larger (compressed
+ 10%), but are discarded at runtime.
+
+config RANDOMIZE_BASE
+ bool "Randomize the address of the kernel image (KASLR)"
+ depends on RELOCATABLE
+ default y
+ help
+ In support of Kernel Address Space Layout Randomization (KASLR),
+ this randomizes the address at which the kernel image is loaded,
+ as a security feature that deters exploit attempts relying on
+ knowledge of the location of kernel internals.
+
endmenu
menu "Memory setup"
@@ -837,6 +877,17 @@ config HAVE_PNETID
menu "Virtualization"
+config PROTECTED_VIRTUALIZATION_GUEST
+ def_bool n
+ prompt "Protected virtualization guest support"
+ help
+ Select this option, if you want to be able to run this
+ kernel as a protected virtualization KVM guest.
+ Protected virtualization capable machines have a mini hypervisor
+ located at machine level (an ultravisor). With help of the
+ Ultravisor, KVM will be able to run "protected" VMs, special
+ VMs whose memory and management data are unavailable to KVM.
+
config PFAULT
def_bool y
prompt "Pseudo page fault support"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index e21053e5e0da..de8521fc9de5 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -10,16 +10,22 @@
# Copyright (C) 1994 by Linus Torvalds
#
+KBUILD_DEFCONFIG := defconfig
+
LD_BFD := elf64-s390
KBUILD_LDFLAGS := -m elf64_s390
KBUILD_AFLAGS_MODULE += -fPIC
KBUILD_CFLAGS_MODULE += -fPIC
KBUILD_AFLAGS += -m64
KBUILD_CFLAGS += -m64
+ifeq ($(CONFIG_RELOCATABLE),y)
+KBUILD_CFLAGS += -fPIE
+LDFLAGS_vmlinux := -pie
+endif
aflags_dwarf := -Wa,-gdwarf-2
-KBUILD_AFLAGS_DECOMPRESSOR := -m64 -D__ASSEMBLY__
+KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__
KBUILD_AFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),$(aflags_dwarf))
-KBUILD_CFLAGS_DECOMPRESSOR := -m64 -O2
+KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2
KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float
KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables
@@ -111,7 +117,7 @@ endif
cfi := $(call as-instr,.cfi_startproc\n.cfi_val_offset 15$(comma)-160\n.cfi_endproc,-DCONFIG_AS_CFI_VAL_OFFSET=1)
KBUILD_CFLAGS += -mbackchain -msoft-float $(cflags-y)
-KBUILD_CFLAGS += -pipe -fno-strength-reduce -Wno-sign-compare
+KBUILD_CFLAGS += -pipe -Wno-sign-compare
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables $(cfi)
KBUILD_AFLAGS += $(aflags-y) $(cfi)
export KBUILD_AFLAGS_DECOMPRESSOR
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index c844eaf24ed7..7cba96e7587b 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -12,25 +12,35 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
#
-# Use -march=z900 for als.c to be able to print an error
+# Use minimum architecture for als.c to be able to print an error
# message if the kernel is started on a machine which is too old
#
-ifneq ($(CC_FLAGS_MARCH),-march=z900)
+ifndef CONFIG_CC_IS_CLANG
+CC_FLAGS_MARCH_MINIMUM := -march=z900
+else
+CC_FLAGS_MARCH_MINIMUM := -march=z10
+endif
+
+ifneq ($(CC_FLAGS_MARCH),$(CC_FLAGS_MARCH_MINIMUM))
AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
-AFLAGS_head.o += -march=z900
+AFLAGS_head.o += $(CC_FLAGS_MARCH_MINIMUM)
AFLAGS_REMOVE_mem.o += $(CC_FLAGS_MARCH)
-AFLAGS_mem.o += -march=z900
+AFLAGS_mem.o += $(CC_FLAGS_MARCH_MINIMUM)
CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
-CFLAGS_als.o += -march=z900
+CFLAGS_als.o += $(CC_FLAGS_MARCH_MINIMUM)
CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH)
-CFLAGS_sclp_early_core.o += -march=z900
+CFLAGS_sclp_early_core.o += $(CC_FLAGS_MARCH_MINIMUM)
endif
CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
-obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o string.o ebcdic.o
-obj-y += sclp_early_core.o mem.o ipl_vmparm.o cmdline.o ctype.o
-targets := bzImage startup.a section_cmp.boot.data $(obj-y)
+obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o
+obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
+obj-y += ctype.o text_dma.o
+obj-$(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) += uv.o
+obj-$(CONFIG_RELOCATABLE) += machine_kexec_reloc.o
+obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
+targets := bzImage startup.a section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y)
subdir- := compressed
OBJECTS := $(addprefix $(obj)/,$(obj-y))
@@ -48,7 +58,7 @@ define cmd_section_cmp
touch $@
endef
-$(obj)/bzImage: $(obj)/compressed/vmlinux $(obj)/section_cmp.boot.data FORCE
+$(obj)/bzImage: $(obj)/compressed/vmlinux $(obj)/section_cmp.boot.data $(obj)/section_cmp.boot.preserved.data FORCE
$(call if_changed,objcopy)
$(obj)/section_cmp%: vmlinux $(obj)/compressed/vmlinux FORCE
diff --git a/arch/s390/boot/als.c b/arch/s390/boot/als.c
index f902215e9cd9..ff6801d401c4 100644
--- a/arch/s390/boot/als.c
+++ b/arch/s390/boot/als.c
@@ -99,7 +99,7 @@ static void facility_mismatch(void)
print_machine_type();
print_missing_facilities();
sclp_early_printk("See Principles of Operations for facility bits\n");
- disabled_wait(0x8badcccc);
+ disabled_wait();
}
void verify_facilities(void)
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
index 82bc06346e05..ad57c2205a71 100644
--- a/arch/s390/boot/boot.h
+++ b/arch/s390/boot/boot.h
@@ -9,5 +9,10 @@ void setup_boot_command_line(void);
void parse_boot_command_line(void);
void setup_memory_end(void);
void print_missing_facilities(void);
+unsigned long get_random_base(unsigned long safe_addr);
+
+extern int kaslr_enabled;
+
+unsigned long read_ipl_report(unsigned long safe_offset);
#endif /* BOOT_BOOT_H */
diff --git a/arch/s390/boot/compressed/decompressor.h b/arch/s390/boot/compressed/decompressor.h
index e1c1f2ec60f4..c15eb7114d83 100644
--- a/arch/s390/boot/compressed/decompressor.h
+++ b/arch/s390/boot/compressed/decompressor.h
@@ -17,6 +17,11 @@ struct vmlinux_info {
unsigned long bss_size; /* uncompressed image .bss size */
unsigned long bootdata_off;
unsigned long bootdata_size;
+ unsigned long bootdata_preserved_off;
+ unsigned long bootdata_preserved_size;
+ unsigned long dynsym_start;
+ unsigned long rela_dyn_start;
+ unsigned long rela_dyn_end;
};
extern char _vmlinux_info[];
diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S
index 7efc3938f595..635217eb3d91 100644
--- a/arch/s390/boot/compressed/vmlinux.lds.S
+++ b/arch/s390/boot/compressed/vmlinux.lds.S
@@ -33,7 +33,29 @@ SECTIONS
*(.data.*)
_edata = . ;
}
+ /*
+ * .dma section for code, data, ex_table that need to stay below 2 GB,
+ * even when the kernel is relocate: above 2 GB.
+ */
+ _sdma = .;
+ .dma.text : {
+ . = ALIGN(PAGE_SIZE);
+ _stext_dma = .;
+ *(.dma.text)
+ . = ALIGN(PAGE_SIZE);
+ _etext_dma = .;
+ }
+ . = ALIGN(16);
+ .dma.ex_table : {
+ _start_dma_ex_table = .;
+ KEEP(*(.dma.ex_table))
+ _stop_dma_ex_table = .;
+ }
+ .dma.data : { *(.dma.data) }
+ _edma = .;
+
BOOT_DATA
+ BOOT_DATA_PRESERVED
/*
* uncompressed image info used by the decompressor it should match
@@ -55,6 +77,8 @@ SECTIONS
_compressed_start = .;
*(.vmlinux.bin.compressed)
_compressed_end = .;
+ FILL(0xff);
+ . = ALIGN(4096);
}
. = ALIGN(256);
.bss : {
diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S
index ce2cbbc41742..028aab03a9e7 100644
--- a/arch/s390/boot/head.S
+++ b/arch/s390/boot/head.S
@@ -305,7 +305,7 @@ ENTRY(startup_kdump)
xc 0x300(256),0x300
xc 0xe00(256),0xe00
xc 0xf00(256),0xf00
- lctlg %c0,%c15,0x200(%r0) # initialize control registers
+ lctlg %c0,%c15,.Lctl-.LPG0(%r13) # load control registers
stcke __LC_BOOT_CLOCK
mvc __LC_LAST_UPDATE_CLOCK(8),__LC_BOOT_CLOCK+1
spt 6f-.LPG0(%r13)
@@ -319,20 +319,54 @@ ENTRY(startup_kdump)
.align 8
6: .long 0x7fffffff,0xffffffff
+.Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space
+ .quad 0 # cr1: primary space segment table
+ .quad .Lduct # cr2: dispatchable unit control table
+ .quad 0 # cr3: instruction authorization
+ .quad 0xffff # cr4: instruction authorization
+ .quad .Lduct # cr5: primary-aste origin
+ .quad 0 # cr6: I/O interrupts
+ .quad 0 # cr7: secondary space segment table
+ .quad 0 # cr8: access registers translation
+ .quad 0 # cr9: tracing off
+ .quad 0 # cr10: tracing off
+ .quad 0 # cr11: tracing off
+ .quad 0 # cr12: tracing off
+ .quad 0 # cr13: home space segment table
+ .quad 0xc0000000 # cr14: machine check handling off
+ .quad .Llinkage_stack # cr15: linkage stack operations
+
+ .section .dma.data,"aw",@progbits
+.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0
+ .long 0,0,0,0,0,0,0,0
+.Llinkage_stack:
+ .long 0,0,0x89000000,0,0,0,0x8a000000,0
+ .align 64
+.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0
+ .align 128
+.Lduald:.rept 8
+ .long 0x80000000,0,0,0 # invalid access-list entries
+ .endr
+ .previous
+
#include "head_kdump.S"
#
# params at 10400 (setup.h)
+# Must be keept in sync with struct parmarea in setup.h
#
.org PARMAREA
- .long 0,0 # IPL_DEVICE
- .long 0,0 # INITRD_START
- .long 0,0 # INITRD_SIZE
- .long 0,0 # OLDMEM_BASE
- .long 0,0 # OLDMEM_SIZE
+ .quad 0 # IPL_DEVICE
+ .quad 0 # INITRD_START
+ .quad 0 # INITRD_SIZE
+ .quad 0 # OLDMEM_BASE
+ .quad 0 # OLDMEM_SIZE
.org COMMAND_LINE
.byte "root=/dev/ram0 ro"
.byte 0
- .org 0x11000
+ .org EARLY_SCCB_OFFSET
+ .fill 4096
+
+ .org HEAD_END
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
index 36beb56de021..3c49bde8aa5e 100644
--- a/arch/s390/boot/ipl_parm.c
+++ b/arch/s390/boot/ipl_parm.c
@@ -7,16 +7,19 @@
#include <asm/sections.h>
#include <asm/boot_data.h>
#include <asm/facility.h>
+#include <asm/uv.h>
#include "boot.h"
char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
-struct ipl_parameter_block __bootdata(early_ipl_block);
-int __bootdata(early_ipl_block_valid);
+struct ipl_parameter_block __bootdata_preserved(ipl_block);
+int __bootdata_preserved(ipl_block_valid);
unsigned long __bootdata(memory_end);
int __bootdata(memory_end_set);
int __bootdata(noexec_disabled);
+int kaslr_enabled __section(.data);
+
static inline int __diag308(unsigned long subcode, void *addr)
{
register unsigned long _addr asm("0") = (unsigned long)addr;
@@ -45,13 +48,15 @@ void store_ipl_parmblock(void)
{
int rc;
- rc = __diag308(DIAG308_STORE, &early_ipl_block);
+ uv_set_shared(__pa(&ipl_block));
+ rc = __diag308(DIAG308_STORE, &ipl_block);
+ uv_remove_shared(__pa(&ipl_block));
if (rc == DIAG308_RC_OK &&
- early_ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION)
- early_ipl_block_valid = 1;
+ ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION)
+ ipl_block_valid = 1;
}
-static size_t scpdata_length(const char *buf, size_t count)
+static size_t scpdata_length(const u8 *buf, size_t count)
{
while (count) {
if (buf[count - 1] != '\0' && buf[count - 1] != ' ')
@@ -68,26 +73,26 @@ static size_t ipl_block_get_ascii_scpdata(char *dest, size_t size,
size_t i;
int has_lowercase;
- count = min(size - 1, scpdata_length(ipb->ipl_info.fcp.scp_data,
- ipb->ipl_info.fcp.scp_data_len));
+ count = min(size - 1, scpdata_length(ipb->fcp.scp_data,
+ ipb->fcp.scp_data_len));
if (!count)
goto out;
has_lowercase = 0;
for (i = 0; i < count; i++) {
- if (!isascii(ipb->ipl_info.fcp.scp_data[i])) {
+ if (!isascii(ipb->fcp.scp_data[i])) {
count = 0;
goto out;
}
- if (!has_lowercase && islower(ipb->ipl_info.fcp.scp_data[i]))
+ if (!has_lowercase && islower(ipb->fcp.scp_data[i]))
has_lowercase = 1;
}
if (has_lowercase)
- memcpy(dest, ipb->ipl_info.fcp.scp_data, count);
+ memcpy(dest, ipb->fcp.scp_data, count);
else
for (i = 0; i < count; i++)
- dest[i] = tolower(ipb->ipl_info.fcp.scp_data[i]);
+ dest[i] = tolower(ipb->fcp.scp_data[i]);
out:
dest[count] = '\0';
return count;
@@ -103,14 +108,14 @@ static void append_ipl_block_parm(void)
delim = early_command_line + len; /* '\0' character position */
parm = early_command_line + len + 1; /* append right after '\0' */
- switch (early_ipl_block.hdr.pbt) {
- case DIAG308_IPL_TYPE_CCW:
+ switch (ipl_block.pb0_hdr.pbt) {
+ case IPL_PBT_CCW:
rc = ipl_block_get_ascii_vmparm(
- parm, COMMAND_LINE_SIZE - len - 1, &early_ipl_block);
+ parm, COMMAND_LINE_SIZE - len - 1, &ipl_block);
break;
- case DIAG308_IPL_TYPE_FCP:
+ case IPL_PBT_FCP:
rc = ipl_block_get_ascii_scpdata(
- parm, COMMAND_LINE_SIZE - len - 1, &early_ipl_block);
+ parm, COMMAND_LINE_SIZE - len - 1, &ipl_block);
break;
}
if (rc) {
@@ -141,7 +146,7 @@ void setup_boot_command_line(void)
strcpy(early_command_line, strim(COMMAND_LINE));
/* append IPL PARM data to the boot command line */
- if (early_ipl_block_valid)
+ if (!is_prot_virt_guest() && ipl_block_valid)
append_ipl_block_parm();
}
@@ -211,6 +216,7 @@ void parse_boot_command_line(void)
char *args;
int rc;
+ kaslr_enabled = IS_ENABLED(CONFIG_RANDOMIZE_BASE);
args = strcpy(command_line_buf, early_command_line);
while (*args) {
args = next_arg(args, &param, &val);
@@ -228,15 +234,21 @@ void parse_boot_command_line(void)
if (!strcmp(param, "facilities"))
modify_fac_list(val);
+
+ if (!strcmp(param, "nokaslr"))
+ kaslr_enabled = 0;
}
}
void setup_memory_end(void)
{
#ifdef CONFIG_CRASH_DUMP
- if (!OLDMEM_BASE && early_ipl_block_valid &&
- early_ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP &&
- early_ipl_block.ipl_info.fcp.opt == DIAG308_IPL_OPT_DUMP) {
+ if (OLDMEM_BASE) {
+ kaslr_enabled = 0;
+ } else if (ipl_block_valid &&
+ ipl_block.pb0_hdr.pbt == IPL_PBT_FCP &&
+ ipl_block.fcp.opt == IPL_PB0_FCP_OPT_DUMP) {
+ kaslr_enabled = 0;
if (!sclp_early_get_hsa_size(&memory_end) && memory_end)
memory_end_set = 1;
}
diff --git a/arch/s390/boot/ipl_report.c b/arch/s390/boot/ipl_report.c
new file mode 100644
index 000000000000..0b4965573656
--- /dev/null
+++ b/arch/s390/boot/ipl_report.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <asm/ebcdic.h>
+#include <asm/sclp.h>
+#include <asm/sections.h>
+#include <asm/boot_data.h>
+#include <uapi/asm/ipl.h>
+#include "boot.h"
+
+int __bootdata_preserved(ipl_secure_flag);
+
+unsigned long __bootdata_preserved(ipl_cert_list_addr);
+unsigned long __bootdata_preserved(ipl_cert_list_size);
+
+unsigned long __bootdata(early_ipl_comp_list_addr);
+unsigned long __bootdata(early_ipl_comp_list_size);
+
+#define for_each_rb_entry(entry, rb) \
+ for (entry = rb->entries; \
+ (void *) entry + sizeof(*entry) <= (void *) rb + rb->len; \
+ entry++)
+
+static inline bool intersects(unsigned long addr0, unsigned long size0,
+ unsigned long addr1, unsigned long size1)
+{
+ return addr0 + size0 > addr1 && addr1 + size1 > addr0;
+}
+
+static unsigned long find_bootdata_space(struct ipl_rb_components *comps,
+ struct ipl_rb_certificates *certs,
+ unsigned long safe_addr)
+{
+ struct ipl_rb_certificate_entry *cert;
+ struct ipl_rb_component_entry *comp;
+ size_t size;
+
+ /*
+ * Find the length for the IPL report boot data
+ */
+ early_ipl_comp_list_size = 0;
+ for_each_rb_entry(comp, comps)
+ early_ipl_comp_list_size += sizeof(*comp);
+ ipl_cert_list_size = 0;
+ for_each_rb_entry(cert, certs)
+ ipl_cert_list_size += sizeof(unsigned int) + cert->len;
+ size = ipl_cert_list_size + early_ipl_comp_list_size;
+
+ /*
+ * Start from safe_addr to find a free memory area large
+ * enough for the IPL report boot data. This area is used
+ * for ipl_cert_list_addr/ipl_cert_list_size and
+ * early_ipl_comp_list_addr/early_ipl_comp_list_size. It must
+ * not overlap with any component or any certificate.
+ */
+repeat:
+ if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
+ intersects(INITRD_START, INITRD_SIZE, safe_addr, size))
+ safe_addr = INITRD_START + INITRD_SIZE;
+ for_each_rb_entry(comp, comps)
+ if (intersects(safe_addr, size, comp->addr, comp->len)) {
+ safe_addr = comp->addr + comp->len;
+ goto repeat;
+ }
+ for_each_rb_entry(cert, certs)
+ if (intersects(safe_addr, size, cert->addr, cert->len)) {
+ safe_addr = cert->addr + cert->len;
+ goto repeat;
+ }
+ early_ipl_comp_list_addr = safe_addr;
+ ipl_cert_list_addr = safe_addr + early_ipl_comp_list_size;
+
+ return safe_addr + size;
+}
+
+static void copy_components_bootdata(struct ipl_rb_components *comps)
+{
+ struct ipl_rb_component_entry *comp, *ptr;
+
+ ptr = (struct ipl_rb_component_entry *) early_ipl_comp_list_addr;
+ for_each_rb_entry(comp, comps)
+ memcpy(ptr++, comp, sizeof(*ptr));
+}
+
+static void copy_certificates_bootdata(struct ipl_rb_certificates *certs)
+{
+ struct ipl_rb_certificate_entry *cert;
+ void *ptr;
+
+ ptr = (void *) ipl_cert_list_addr;
+ for_each_rb_entry(cert, certs) {
+ *(unsigned int *) ptr = cert->len;
+ ptr += sizeof(unsigned int);
+ memcpy(ptr, (void *) cert->addr, cert->len);
+ ptr += cert->len;
+ }
+}
+
+unsigned long read_ipl_report(unsigned long safe_addr)
+{
+ struct ipl_rb_certificates *certs;
+ struct ipl_rb_components *comps;
+ struct ipl_pl_hdr *pl_hdr;
+ struct ipl_rl_hdr *rl_hdr;
+ struct ipl_rb_hdr *rb_hdr;
+ unsigned long tmp;
+ void *rl_end;
+
+ /*
+ * Check if there is a IPL report by looking at the copy
+ * of the IPL parameter information block.
+ */
+ if (!ipl_block_valid ||
+ !(ipl_block.hdr.flags & IPL_PL_FLAG_IPLSR))
+ return safe_addr;
+ ipl_secure_flag = !!(ipl_block.hdr.flags & IPL_PL_FLAG_SIPL);
+ /*
+ * There is an IPL report, to find it load the pointer to the
+ * IPL parameter information block from lowcore and skip past
+ * the IPL parameter list, then align the address to a double
+ * word boundary.
+ */
+ tmp = (unsigned long) S390_lowcore.ipl_parmblock_ptr;
+ pl_hdr = (struct ipl_pl_hdr *) tmp;
+ tmp = (tmp + pl_hdr->len + 7) & -8UL;
+ rl_hdr = (struct ipl_rl_hdr *) tmp;
+ /* Walk through the IPL report blocks in the IPL Report list */
+ certs = NULL;
+ comps = NULL;
+ rl_end = (void *) rl_hdr + rl_hdr->len;
+ rb_hdr = (void *) rl_hdr + sizeof(*rl_hdr);
+ while ((void *) rb_hdr + sizeof(*rb_hdr) < rl_end &&
+ (void *) rb_hdr + rb_hdr->len <= rl_end) {
+
+ switch (rb_hdr->rbt) {
+ case IPL_RBT_CERTIFICATES:
+ certs = (struct ipl_rb_certificates *) rb_hdr;
+ break;
+ case IPL_RBT_COMPONENTS:
+ comps = (struct ipl_rb_components *) rb_hdr;
+ break;
+ default:
+ break;
+ }
+
+ rb_hdr = (void *) rb_hdr + rb_hdr->len;
+ }
+
+ /*
+ * With either the component list or the certificate list
+ * missing the kernel will stay ignorant of secure IPL.
+ */
+ if (!comps || !certs)
+ return safe_addr;
+
+ /*
+ * Copy component and certificate list to a safe area
+ * where the decompressed kernel can find them.
+ */
+ safe_addr = find_bootdata_space(comps, certs, safe_addr);
+ copy_components_bootdata(comps);
+ copy_certificates_bootdata(certs);
+
+ return safe_addr;
+}
diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
new file mode 100644
index 000000000000..3bdd8132e56b
--- /dev/null
+++ b/arch/s390/boot/kaslr.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2019
+ */
+#include <asm/mem_detect.h>
+#include <asm/cpacf.h>
+#include <asm/timex.h>
+#include <asm/sclp.h>
+#include "compressed/decompressor.h"
+
+#define PRNG_MODE_TDES 1
+#define PRNG_MODE_SHA512 2
+#define PRNG_MODE_TRNG 3
+
+struct prno_parm {
+ u32 res;
+ u32 reseed_counter;
+ u64 stream_bytes;
+ u8 V[112];
+ u8 C[112];
+};
+
+struct prng_parm {
+ u8 parm_block[32];
+ u32 reseed_counter;
+ u64 byte_counter;
+};
+
+static int check_prng(void)
+{
+ if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) {
+ sclp_early_printk("KASLR disabled: CPU has no PRNG\n");
+ return 0;
+ }
+ if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
+ return PRNG_MODE_TRNG;
+ if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN))
+ return PRNG_MODE_SHA512;
+ else
+ return PRNG_MODE_TDES;
+}
+
+static unsigned long get_random(unsigned long limit)
+{
+ struct prng_parm prng = {
+ /* initial parameter block for tdes mode, copied from libica */
+ .parm_block = {
+ 0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
+ 0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
+ 0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
+ 0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0
+ },
+ };
+ unsigned long seed, random;
+ struct prno_parm prno;
+ __u64 entropy[4];
+ int mode, i;
+
+ mode = check_prng();
+ seed = get_tod_clock_fast();
+ switch (mode) {
+ case PRNG_MODE_TRNG:
+ cpacf_trng(NULL, 0, (u8 *) &random, sizeof(random));
+ break;
+ case PRNG_MODE_SHA512:
+ cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, &prno, NULL, 0,
+ (u8 *) &seed, sizeof(seed));
+ cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prno, (u8 *) &random,
+ sizeof(random), NULL, 0);
+ break;
+ case PRNG_MODE_TDES:
+ /* add entropy */
+ *(unsigned long *) prng.parm_block ^= seed;
+ for (i = 0; i < 16; i++) {
+ cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
+ (char *) entropy, (char *) entropy,
+ sizeof(entropy));
+ memcpy(prng.parm_block, entropy, sizeof(entropy));
+ }
+ random = seed;
+ cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block, (u8 *) &random,
+ (u8 *) &random, sizeof(random));
+ break;
+ default:
+ random = 0;
+ }
+ return random % limit;
+}
+
+unsigned long get_random_base(unsigned long safe_addr)
+{
+ unsigned long base, start, end, kernel_size;
+ unsigned long block_sum, offset;
+ int i;
+
+ if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE) {
+ if (safe_addr < INITRD_START + INITRD_SIZE)
+ safe_addr = INITRD_START + INITRD_SIZE;
+ }
+ safe_addr = ALIGN(safe_addr, THREAD_SIZE);
+
+ kernel_size = vmlinux.image_size + vmlinux.bss_size;
+ block_sum = 0;
+ for_each_mem_detect_block(i, &start, &end) {
+ if (memory_end_set) {
+ if (start >= memory_end)
+ break;
+ if (end > memory_end)
+ end = memory_end;
+ }
+ if (end - start < kernel_size)
+ continue;
+ block_sum += end - start - kernel_size;
+ }
+ if (!block_sum) {
+ sclp_early_printk("KASLR disabled: not enough memory\n");
+ return 0;
+ }
+
+ base = get_random(block_sum);
+ if (base == 0)
+ return 0;
+ if (base < safe_addr)
+ base = safe_addr;
+ block_sum = offset = 0;
+ for_each_mem_detect_block(i, &start, &end) {
+ if (memory_end_set) {
+ if (start >= memory_end)
+ break;
+ if (end > memory_end)
+ end = memory_end;
+ }
+ if (end - start < kernel_size)
+ continue;
+ block_sum += end - start - kernel_size;
+ if (base <= block_sum) {
+ base = start + base - offset;
+ base = ALIGN_DOWN(base, THREAD_SIZE);
+ break;
+ }
+ offset = block_sum;
+ }
+ return base;
+}
diff --git a/arch/s390/boot/machine_kexec_reloc.c b/arch/s390/boot/machine_kexec_reloc.c
new file mode 100644
index 000000000000..b7a5d0f72097
--- /dev/null
+++ b/arch/s390/boot/machine_kexec_reloc.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../kernel/machine_kexec_reloc.c"
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index bdfc5549a299..7b0d05414618 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -1,11 +1,55 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/string.h>
+#include <linux/elf.h>
+#include <asm/sections.h>
#include <asm/setup.h>
+#include <asm/kexec.h>
#include <asm/sclp.h>
+#include <asm/diag.h>
+#include <asm/uv.h>
#include "compressed/decompressor.h"
#include "boot.h"
extern char __boot_data_start[], __boot_data_end[];
+extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
+unsigned long __bootdata_preserved(__kaslr_offset);
+
+/*
+ * Some code and data needs to stay below 2 GB, even when the kernel would be
+ * relocated above 2 GB, because it has to use 31 bit addresses.
+ * Such code and data is part of the .dma section, and its location is passed
+ * over to the decompressed / relocated kernel via the .boot.preserved.data
+ * section.
+ */
+extern char _sdma[], _edma[];
+extern char _stext_dma[], _etext_dma[];
+extern struct exception_table_entry _start_dma_ex_table[];
+extern struct exception_table_entry _stop_dma_ex_table[];
+unsigned long __bootdata_preserved(__sdma) = __pa(&_sdma);
+unsigned long __bootdata_preserved(__edma) = __pa(&_edma);
+unsigned long __bootdata_preserved(__stext_dma) = __pa(&_stext_dma);
+unsigned long __bootdata_preserved(__etext_dma) = __pa(&_etext_dma);
+struct exception_table_entry *
+ __bootdata_preserved(__start_dma_ex_table) = _start_dma_ex_table;
+struct exception_table_entry *
+ __bootdata_preserved(__stop_dma_ex_table) = _stop_dma_ex_table;
+
+int _diag210_dma(struct diag210 *addr);
+int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode);
+int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode);
+void _diag0c_dma(struct hypfs_diag0c_entry *entry);
+void _diag308_reset_dma(void);
+struct diag_ops __bootdata_preserved(diag_dma_ops) = {
+ .diag210 = _diag210_dma,
+ .diag26c = _diag26c_dma,
+ .diag14 = _diag14_dma,
+ .diag0c = _diag0c_dma,
+ .diag308_reset = _diag308_reset_dma
+};
+static struct diag210 _diag210_tmp_dma __section(".dma.data");
+struct diag210 *__bootdata_preserved(__diag210_tmp_dma) = &_diag210_tmp_dma;
+void _swsusp_reset_dma(void);
+unsigned long __bootdata_preserved(__swsusp_reset_dma) = __pa(_swsusp_reset_dma);
void error(char *x)
{
@@ -13,7 +57,7 @@ void error(char *x)
sclp_early_printk(x);
sclp_early_printk("\n\n -- System halted");
- disabled_wait(0xdeadbeef);
+ disabled_wait();
}
#ifdef CONFIG_KERNEL_UNCOMPRESSED
@@ -23,19 +67,16 @@ unsigned long mem_safe_offset(void)
}
#endif
-static void rescue_initrd(void)
+static void rescue_initrd(unsigned long addr)
{
- unsigned long min_initrd_addr;
-
if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
return;
if (!INITRD_START || !INITRD_SIZE)
return;
- min_initrd_addr = mem_safe_offset();
- if (min_initrd_addr <= INITRD_START)
+ if (addr <= INITRD_START)
return;
- memmove((void *)min_initrd_addr, (void *)INITRD_START, INITRD_SIZE);
- INITRD_START = min_initrd_addr;
+ memmove((void *)addr, (void *)INITRD_START, INITRD_SIZE);
+ INITRD_START = addr;
}
static void copy_bootdata(void)
@@ -43,23 +84,81 @@ static void copy_bootdata(void)
if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
error(".boot.data section size mismatch");
memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
+ if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
+ error(".boot.preserved.data section size mismatch");
+ memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
+}
+
+static void handle_relocs(unsigned long offset)
+{
+ Elf64_Rela *rela_start, *rela_end, *rela;
+ int r_type, r_sym, rc;
+ Elf64_Addr loc, val;
+ Elf64_Sym *dynsym;
+
+ rela_start = (Elf64_Rela *) vmlinux.rela_dyn_start;
+ rela_end = (Elf64_Rela *) vmlinux.rela_dyn_end;
+ dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
+ for (rela = rela_start; rela < rela_end; rela++) {
+ loc = rela->r_offset + offset;
+ val = rela->r_addend + offset;
+ r_sym = ELF64_R_SYM(rela->r_info);
+ if (r_sym)
+ val += dynsym[r_sym].st_value;
+ r_type = ELF64_R_TYPE(rela->r_info);
+ rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
+ if (rc)
+ error("Unknown relocation type");
+ }
}
void startup_kernel(void)
{
+ unsigned long random_lma;
+ unsigned long safe_addr;
void *img;
- rescue_initrd();
- sclp_early_read_info();
store_ipl_parmblock();
+ safe_addr = mem_safe_offset();
+ safe_addr = read_ipl_report(safe_addr);
+ uv_query_info();
+ rescue_initrd(safe_addr);
+ sclp_early_read_info();
setup_boot_command_line();
parse_boot_command_line();
setup_memory_end();
detect_memory();
+
+ random_lma = __kaslr_offset = 0;
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
+ random_lma = get_random_base(safe_addr);
+ if (random_lma) {
+ __kaslr_offset = random_lma - vmlinux.default_lma;
+ img = (void *)vmlinux.default_lma;
+ vmlinux.default_lma += __kaslr_offset;
+ vmlinux.entry += __kaslr_offset;
+ vmlinux.bootdata_off += __kaslr_offset;
+ vmlinux.bootdata_preserved_off += __kaslr_offset;
+ vmlinux.rela_dyn_start += __kaslr_offset;
+ vmlinux.rela_dyn_end += __kaslr_offset;
+ vmlinux.dynsym_start += __kaslr_offset;
+ }
+ }
+
if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
img = decompress_kernel();
memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);
- }
+ } else if (__kaslr_offset)
+ memcpy((void *)vmlinux.default_lma, img, vmlinux.image_size);
+
copy_bootdata();
+ if (IS_ENABLED(CONFIG_RELOCATABLE))
+ handle_relocs(__kaslr_offset);
+
+ if (__kaslr_offset) {
+ /* Clear non-relocated kernel */
+ if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED))
+ memset(img, 0, vmlinux.image_size);
+ }
vmlinux.entry();
}
diff --git a/arch/s390/boot/text_dma.S b/arch/s390/boot/text_dma.S
new file mode 100644
index 000000000000..9715715c4c28
--- /dev/null
+++ b/arch/s390/boot/text_dma.S
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Code that needs to run below 2 GB.
+ *
+ * Copyright IBM Corp. 2019
+ */
+
+#include <linux/linkage.h>
+#include <asm/errno.h>
+#include <asm/sigp.h>
+
+#ifdef CC_USING_EXPOLINE
+ .pushsection .dma.text.__s390_indirect_jump_r14,"axG"
+__dma__s390_indirect_jump_r14:
+ larl %r1,0f
+ ex 0,0(%r1)
+ j .
+0: br %r14
+ .popsection
+#endif
+
+ .section .dma.text,"ax"
+/*
+ * Simplified version of expoline thunk. The normal thunks can not be used here,
+ * because they might be more than 2 GB away, and not reachable by the relative
+ * branch. No comdat, exrl, etc. optimizations used here, because it only
+ * affects a few functions that are not performance-relevant.
+ */
+ .macro BR_EX_DMA_r14
+#ifdef CC_USING_EXPOLINE
+ jg __dma__s390_indirect_jump_r14
+#else
+ br %r14
+#endif
+ .endm
+
+/*
+ * int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode)
+ */
+ENTRY(_diag14_dma)
+ lgr %r1,%r2
+ lgr %r2,%r3
+ lgr %r3,%r4
+ lhi %r5,-EIO
+ sam31
+ diag %r1,%r2,0x14
+.Ldiag14_ex:
+ ipm %r5
+ srl %r5,28
+.Ldiag14_fault:
+ sam64
+ lgfr %r2,%r5
+ BR_EX_DMA_r14
+ EX_TABLE_DMA(.Ldiag14_ex, .Ldiag14_fault)
+ENDPROC(_diag14_dma)
+
+/*
+ * int _diag210_dma(struct diag210 *addr)
+ */
+ENTRY(_diag210_dma)
+ lgr %r1,%r2
+ lhi %r2,-1
+ sam31
+ diag %r1,%r0,0x210
+.Ldiag210_ex:
+ ipm %r2
+ srl %r2,28
+.Ldiag210_fault:
+ sam64
+ lgfr %r2,%r2
+ BR_EX_DMA_r14
+ EX_TABLE_DMA(.Ldiag210_ex, .Ldiag210_fault)
+ENDPROC(_diag210_dma)
+
+/*
+ * int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode)
+ */
+ENTRY(_diag26c_dma)
+ lghi %r5,-EOPNOTSUPP
+ sam31
+ diag %r2,%r4,0x26c
+.Ldiag26c_ex:
+ sam64
+ lgfr %r2,%r5
+ BR_EX_DMA_r14
+ EX_TABLE_DMA(.Ldiag26c_ex, .Ldiag26c_ex)
+ENDPROC(_diag26c_dma)
+
+/*
+ * void _diag0c_dma(struct hypfs_diag0c_entry *entry)
+ */
+ENTRY(_diag0c_dma)
+ sam31
+ diag %r2,%r2,0x0c
+ sam64
+ BR_EX_DMA_r14
+ENDPROC(_diag0c_dma)
+
+/*
+ * void _swsusp_reset_dma(void)
+ */
+ENTRY(_swsusp_reset_dma)
+ larl %r1,restart_entry
+ larl %r2,.Lrestart_diag308_psw
+ og %r1,0(%r2)
+ stg %r1,0(%r0)
+ lghi %r0,0
+ diag %r0,%r0,0x308
+restart_entry:
+ lhi %r1,1
+ sigp %r1,%r0,SIGP_SET_ARCHITECTURE
+ sam64
+ BR_EX_DMA_r14
+ENDPROC(_swsusp_reset_dma)
+
+/*
+ * void _diag308_reset_dma(void)
+ *
+ * Calls diag 308 subcode 1 and continues execution
+ */
+ENTRY(_diag308_reset_dma)
+ larl %r4,.Lctlregs # Save control registers
+ stctg %c0,%c15,0(%r4)
+ lg %r2,0(%r4) # Disable lowcore protection
+ nilh %r2,0xefff
+ larl %r4,.Lctlreg0
+ stg %r2,0(%r4)
+ lctlg %c0,%c0,0(%r4)
+ larl %r4,.Lfpctl # Floating point control register
+ stfpc 0(%r4)
+ larl %r4,.Lprefix # Save prefix register
+ stpx 0(%r4)
+ larl %r4,.Lprefix_zero # Set prefix register to 0
+ spx 0(%r4)
+ larl %r4,.Lcontinue_psw # Save PSW flags
+ epsw %r2,%r3
+ stm %r2,%r3,0(%r4)
+ larl %r4,restart_part2 # Setup restart PSW at absolute 0
+ larl %r3,.Lrestart_diag308_psw
+ og %r4,0(%r3) # Save PSW
+ lghi %r3,0
+ sturg %r4,%r3 # Use sturg, because of large pages
+ lghi %r1,1
+ lghi %r0,0
+ diag %r0,%r1,0x308
+restart_part2:
+ lhi %r0,0 # Load r0 with zero
+ lhi %r1,2 # Use mode 2 = ESAME (dump)
+ sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to ESAME mode
+ sam64 # Switch to 64 bit addressing mode
+ larl %r4,.Lctlregs # Restore control registers
+ lctlg %c0,%c15,0(%r4)
+ larl %r4,.Lfpctl # Restore floating point ctl register
+ lfpc 0(%r4)
+ larl %r4,.Lprefix # Restore prefix register
+ spx 0(%r4)
+ larl %r4,.Lcontinue_psw # Restore PSW flags
+ lpswe 0(%r4)
+.Lcontinue:
+ BR_EX_DMA_r14
+ENDPROC(_diag308_reset_dma)
+
+ .section .dma.data,"aw",@progbits
+.align 8
+.Lrestart_diag308_psw:
+ .long 0x00080000,0x80000000
+
+.align 8
+.Lcontinue_psw:
+ .quad 0,.Lcontinue
+
+.align 8
+.Lctlreg0:
+ .quad 0
+.Lctlregs:
+ .rept 16
+ .quad 0
+ .endr
+.Lfpctl:
+ .long 0
+.Lprefix:
+ .long 0
+.Lprefix_zero:
+ .long 0
diff --git a/arch/s390/boot/uv.c b/arch/s390/boot/uv.c
new file mode 100644
index 000000000000..ed007f4a6444
--- /dev/null
+++ b/arch/s390/boot/uv.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <asm/uv.h>
+#include <asm/facility.h>
+#include <asm/sections.h>
+
+int __bootdata_preserved(prot_virt_guest);
+
+void uv_query_info(void)
+{
+ struct uv_cb_qui uvcb = {
+ .header.cmd = UVC_CMD_QUI,
+ .header.len = sizeof(uvcb)
+ };
+
+ if (!test_facility(158))
+ return;
+
+ if (uv_call(0, (uint64_t)&uvcb))
+ return;
+
+ if (test_bit_inv(BIT_UVC_CMD_SET_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list) &&
+ test_bit_inv(BIT_UVC_CMD_REMOVE_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list))
+ prot_virt_guest = 1;
+}
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 9824c7bad9d4..b0920b35f87b 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -64,6 +64,7 @@ CONFIG_NUMA=y
CONFIG_PREEMPT=y
CONFIG_HZ_100=y
CONFIG_KEXEC_FILE=y
+CONFIG_KEXEC_VERIFY_SIG=y
CONFIG_EXPOLINE=y
CONFIG_EXPOLINE_AUTO=y
CONFIG_MEMORY_HOTPLUG=y
diff --git a/arch/s390/defconfig b/arch/s390/configs/defconfig
index 4d58a92b5d97..c59b922cb6c5 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/configs/defconfig
@@ -39,6 +39,7 @@ CONFIG_NR_CPUS=256
CONFIG_NUMA=y
CONFIG_HZ_100=y
CONFIG_KEXEC_FILE=y
+CONFIG_KEXEC_VERIFY_SIG=y
CONFIG_CRASH_DUMP=y
CONFIG_HIBERNATION=y
CONFIG_PM_DEBUG=y
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 4fcbe5792744..09aa5cb14873 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -65,6 +65,7 @@ CONFIG_NR_CPUS=512
CONFIG_NUMA=y
CONFIG_HZ_100=y
CONFIG_KEXEC_FILE=y
+CONFIG_KEXEC_VERIFY_SIG=y
CONFIG_EXPOLINE=y
CONFIG_EXPOLINE_AUTO=y
CONFIG_MEMORY_HOTPLUG=y
diff --git a/arch/s390/crypto/crc32be-vx.S b/arch/s390/crypto/crc32be-vx.S
index 2bf01ba44107..0099044e2c86 100644
--- a/arch/s390/crypto/crc32be-vx.S
+++ b/arch/s390/crypto/crc32be-vx.S
@@ -207,5 +207,6 @@ ENTRY(crc32_be_vgfm_16)
.Ldone:
VLGVF %r2,%v2,3
BR_EX %r14
+ENDPROC(crc32_be_vgfm_16)
.previous
diff --git a/arch/s390/crypto/crc32le-vx.S b/arch/s390/crypto/crc32le-vx.S
index 7d6f568bd3ad..71caf0f4ec08 100644
--- a/arch/s390/crypto/crc32le-vx.S
+++ b/arch/s390/crypto/crc32le-vx.S
@@ -105,13 +105,14 @@
ENTRY(crc32_le_vgfm_16)
larl %r5,.Lconstants_CRC_32_LE
j crc32_le_vgfm_generic
+ENDPROC(crc32_le_vgfm_16)
ENTRY(crc32c_le_vgfm_16)
larl %r5,.Lconstants_CRC_32C_LE
j crc32_le_vgfm_generic
+ENDPROC(crc32c_le_vgfm_16)
-
-crc32_le_vgfm_generic:
+ENTRY(crc32_le_vgfm_generic)
/* Load CRC-32 constants */
VLM CONST_PERM_LE2BE,CONST_CRC_POLY,0,%r5
@@ -267,5 +268,6 @@ crc32_le_vgfm_generic:
.Ldone:
VLGVF %r2,%v2,2
BR_EX %r14
+ENDPROC(crc32_le_vgfm_generic)
.previous
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 0d15383d0ff1..1f9ab24dc048 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -224,24 +224,11 @@ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int key_len)
{
struct s390_des_ctx *ctx = crypto_tfm_ctx(tfm);
+ int err;
- if (!(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
- crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
- DES_KEY_SIZE)) &&
- (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
- tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
- return -EINVAL;
- }
-
- /* in fips mode, ensure k1 != k2 and k2 != k3 and k1 != k3 */
- if (fips_enabled &&
- !(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
- crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
- DES_KEY_SIZE) &&
- crypto_memneq(key, &key[DES_KEY_SIZE * 2], DES_KEY_SIZE))) {
- tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
- return -EINVAL;
- }
+ err = __des3_verify_key(&tfm->crt_flags, key);
+ if (unlikely(err))
+ return err;
memcpy(ctx->key, key, key_len);
return 0;
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index a97a1802cfb4..12cca467af7d 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -61,6 +61,7 @@ static unsigned int prng_reseed_limit;
module_param_named(reseed_limit, prng_reseed_limit, int, 0);
MODULE_PARM_DESC(prng_reseed_limit, "PRNG reseed limit");
+static bool trng_available;
/*
* Any one who considers arithmetical methods of producing random digits is,
@@ -115,46 +116,68 @@ static const u8 initial_parm_block[32] __initconst = {
/*
* generate_entropy:
- * This algorithm produces 64 bytes of entropy data based on 1024
- * individual stckf() invocations assuming that each stckf() value
- * contributes 0.25 bits of entropy. So the caller gets 256 bit
- * entropy per 64 byte or 4 bits entropy per byte.
+ * This function fills a given buffer with random bytes. The entropy within
+ * the random bytes given back is assumed to have at least 50% - meaning
+ * a 64 bytes buffer has at least 64 * 8 / 2 = 256 bits of entropy.
+ * Within the function the entropy generation is done in junks of 64 bytes.
+ * So the caller should also ask for buffer fill in multiples of 64 bytes.
+ * The generation of the entropy is based on the assumption that every stckf()
+ * invocation produces 0.5 bits of entropy. To accumulate 256 bits of entropy
+ * at least 512 stckf() values are needed. The entropy relevant part of the
+ * stckf value is bit 51 (counting starts at the left with bit nr 0) so
+ * here we use the lower 4 bytes and exor the values into 2k of bufferspace.
+ * To be on the save side, if there is ever a problem with stckf() the
+ * other half of the page buffer is filled with bytes from urandom via
+ * get_random_bytes(), so this function consumes 2k of urandom for each
+ * requested 64 bytes output data. Finally the buffer page is condensed into
+ * a 64 byte value by hashing with a SHA512 hash.
*/
static int generate_entropy(u8 *ebuf, size_t nbytes)
{
int n, ret = 0;
- u8 *pg, *h, hash[64];
-
- /* allocate 2 pages */
- pg = (u8 *) __get_free_pages(GFP_KERNEL, 1);
+ u8 *pg, pblock[80] = {
+ /* 8 x 64 bit init values */
+ 0x6A, 0x09, 0xE6, 0x67, 0xF3, 0xBC, 0xC9, 0x08,
+ 0xBB, 0x67, 0xAE, 0x85, 0x84, 0xCA, 0xA7, 0x3B,
+ 0x3C, 0x6E, 0xF3, 0x72, 0xFE, 0x94, 0xF8, 0x2B,
+ 0xA5, 0x4F, 0xF5, 0x3A, 0x5F, 0x1D, 0x36, 0xF1,
+ 0x51, 0x0E, 0x52, 0x7F, 0xAD, 0xE6, 0x82, 0xD1,
+ 0x9B, 0x05, 0x68, 0x8C, 0x2B, 0x3E, 0x6C, 0x1F,
+ 0x1F, 0x83, 0xD9, 0xAB, 0xFB, 0x41, 0xBD, 0x6B,
+ 0x5B, 0xE0, 0xCD, 0x19, 0x13, 0x7E, 0x21, 0x79,
+ /* 128 bit counter total message bit length */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00 };
+
+ /* allocate one page stckf buffer */
+ pg = (u8 *) __get_free_page(GFP_KERNEL);
if (!pg) {
prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
return -ENOMEM;
}
+ /* fill the ebuf in chunks of 64 byte each */
while (nbytes) {
- /* fill pages with urandom bytes */
- get_random_bytes(pg, 2*PAGE_SIZE);
- /* exor pages with 1024 stckf values */
- for (n = 0; n < 2 * PAGE_SIZE / sizeof(u64); n++) {
- u64 *p = ((u64 *)pg) + n;
+ /* fill lower 2k with urandom bytes */
+ get_random_bytes(pg, PAGE_SIZE / 2);
+ /* exor upper 2k with 512 stckf values, offset 4 bytes each */
+ for (n = 0; n < 512; n++) {
+ int offset = (PAGE_SIZE / 2) + (n * 4) - 4;
+ u64 *p = (u64 *)(pg + offset);
*p ^= get_tod_clock_fast();
}
- n = (nbytes < sizeof(hash)) ? nbytes : sizeof(hash);
- if (n < sizeof(hash))
- h = hash;
- else
- h = ebuf;
- /* hash over the filled pages */
- cpacf_kimd(CPACF_KIMD_SHA_512, h, pg, 2*PAGE_SIZE);
- if (n < sizeof(hash))
- memcpy(ebuf, hash, n);
+ /* hash over the filled page */
+ cpacf_klmd(CPACF_KLMD_SHA_512, pblock, pg, PAGE_SIZE);
+ n = (nbytes < 64) ? nbytes : 64;
+ memcpy(ebuf, pblock, n);
ret += n;
ebuf += n;
nbytes -= n;
}
- free_pages((unsigned long)pg, 1);
+ memzero_explicit(pblock, sizeof(pblock));
+ memzero_explicit(pg, PAGE_SIZE);
+ free_page((unsigned long)pg);
return ret;
}
@@ -344,8 +367,8 @@ static int __init prng_sha512_selftest(void)
static int __init prng_sha512_instantiate(void)
{
- int ret, datalen;
- u8 seed[64 + 32 + 16];
+ int ret, datalen, seedlen;
+ u8 seed[128 + 16];
pr_debug("prng runs in SHA-512 mode "
"with chunksize=%d and reseed_limit=%u\n",
@@ -368,16 +391,36 @@ static int __init prng_sha512_instantiate(void)
if (ret)
goto outfree;
- /* generate initial seed bytestring, with 256 + 128 bits entropy */
- ret = generate_entropy(seed, 64 + 32);
- if (ret != 64 + 32)
- goto outfree;
- /* followed by 16 bytes of unique nonce */
- get_tod_clock_ext(seed + 64 + 32);
+ /* generate initial seed, we need at least 256 + 128 bits entropy. */
+ if (trng_available) {
+ /*
+ * Trng available, so use it. The trng works in chunks of
+ * 32 bytes and produces 100% entropy. So we pull 64 bytes
+ * which gives us 512 bits entropy.
+ */
+ seedlen = 2 * 32;
+ cpacf_trng(NULL, 0, seed, seedlen);
+ } else {
+ /*
+ * No trng available, so use the generate_entropy() function.
+ * This function works in 64 byte junks and produces
+ * 50% entropy. So we pull 2*64 bytes which gives us 512 bits
+ * of entropy.
+ */
+ seedlen = 2 * 64;
+ ret = generate_entropy(seed, seedlen);
+ if (ret != seedlen)
+ goto outfree;
+ }
+
+ /* append the seed by 16 bytes of unique nonce */
+ get_tod_clock_ext(seed + seedlen);
+ seedlen += 16;
- /* initial seed of the prno drng */
+ /* now initial seed of the prno drng */
cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
- &prng_data->prnows, NULL, 0, seed, sizeof(seed));
+ &prng_data->prnows, NULL, 0, seed, seedlen);
+ memzero_explicit(seed, sizeof(seed));
/* if fips mode is enabled, generate a first block of random
bytes for the FIPS 140-2 Conditional Self Test */
@@ -405,17 +448,26 @@ static void prng_sha512_deinstantiate(void)
static int prng_sha512_reseed(void)
{
- int ret;
+ int ret, seedlen;
u8 seed[64];
- /* fetch 256 bits of fresh entropy */
- ret = generate_entropy(seed, sizeof(seed));
- if (ret != sizeof(seed))
- return ret;
+ /* We need at least 256 bits of fresh entropy for reseeding */
+ if (trng_available) {
+ /* trng produces 256 bits entropy in 32 bytes */
+ seedlen = 32;
+ cpacf_trng(NULL, 0, seed, seedlen);
+ } else {
+ /* generate_entropy() produces 256 bits entropy in 64 bytes */
+ seedlen = 64;
+ ret = generate_entropy(seed, seedlen);
+ if (ret != sizeof(seed))
+ return ret;
+ }
/* do a reseed of the prno drng with this bytestring */
cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED,
- &prng_data->prnows, NULL, 0, seed, sizeof(seed));
+ &prng_data->prnows, NULL, 0, seed, seedlen);
+ memzero_explicit(seed, sizeof(seed));
return 0;
}
@@ -592,6 +644,7 @@ static ssize_t prng_sha512_read(struct file *file, char __user *ubuf,
ret = -EFAULT;
break;
}
+ memzero_explicit(p, n);
ubuf += n;
nbytes -= n;
ret += n;
@@ -773,6 +826,10 @@ static int __init prng_init(void)
if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG))
return -EOPNOTSUPP;
+ /* check if TRNG subfunction is available */
+ if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
+ trng_available = true;
+
/* choose prng mode */
if (prng_mode != PRNG_MODE_TDES) {
/* check for MSA5 support for PRNO operations */
diff --git a/arch/s390/hypfs/hypfs_diag0c.c b/arch/s390/hypfs/hypfs_diag0c.c
index 72e3140fafb5..3235e4d82f2d 100644
--- a/arch/s390/hypfs/hypfs_diag0c.c
+++ b/arch/s390/hypfs/hypfs_diag0c.c
@@ -16,26 +16,12 @@
#define DBFS_D0C_HDR_VERSION 0
/*
- * Execute diagnose 0c in 31 bit mode
- */
-static void diag0c(struct hypfs_diag0c_entry *entry)
-{
- diag_stat_inc(DIAG_STAT_X00C);
- asm volatile (
- " sam31\n"
- " diag %0,%0,0x0c\n"
- " sam64\n"
- : /* no output register */
- : "a" (entry)
- : "memory");
-}
-
-/*
* Get hypfs_diag0c_entry from CPU vector and store diag0c data
*/
static void diag0c_fn(void *data)
{
- diag0c(((void **) data)[smp_processor_id()]);
+ diag_stat_inc(DIAG_STAT_X00C);
+ diag_dma_ops.diag0c(((void **) data)[smp_processor_id()]);
}
/*
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 12d77cb11fe5..2531f673f099 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -20,7 +20,7 @@ generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += rwsem.h
+generic-y += mmiowb.h
generic-y += trace_clock.h
generic-y += unaligned.h
generic-y += word-at-a-time.h
diff --git a/arch/s390/include/asm/airq.h b/arch/s390/include/asm/airq.h
index fcf539efb32f..c10d2ee2dfda 100644
--- a/arch/s390/include/asm/airq.h
+++ b/arch/s390/include/asm/airq.h
@@ -14,7 +14,7 @@
struct airq_struct {
struct hlist_node list; /* Handler queueing. */
- void (*handler)(struct airq_struct *); /* Thin-interrupt handler */
+ void (*handler)(struct airq_struct *airq, bool floating);
u8 *lsi_ptr; /* Local-Summary-Indicator pointer */
u8 lsi_mask; /* Local-Summary-Indicator mask */
u8 isc; /* Interrupt-subclass */
@@ -35,13 +35,15 @@ struct airq_iv {
unsigned int *data; /* 32 bit value associated with each bit */
unsigned long bits; /* Number of bits in the vector */
unsigned long end; /* Number of highest allocated bit + 1 */
+ unsigned long flags; /* Allocation flags */
spinlock_t lock; /* Lock to protect alloc & free */
};
-#define AIRQ_IV_ALLOC 1 /* Use an allocation bit mask */
-#define AIRQ_IV_BITLOCK 2 /* Allocate the lock bit mask */
-#define AIRQ_IV_PTR 4 /* Allocate the ptr array */
-#define AIRQ_IV_DATA 8 /* Allocate the data array */
+#define AIRQ_IV_ALLOC 1 /* Use an allocation bit mask */
+#define AIRQ_IV_BITLOCK 2 /* Allocate the lock bit mask */
+#define AIRQ_IV_PTR 4 /* Allocate the ptr array */
+#define AIRQ_IV_DATA 8 /* Allocate the data array */
+#define AIRQ_IV_CACHELINE 16 /* Cacheline alignment for the vector */
struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags);
void airq_iv_release(struct airq_iv *iv);
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index d1f8a4d94cca..9900d655014c 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -73,7 +73,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
}
#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- __atomic64_or(mask, addr);
+ __atomic64_or(mask, (long *)addr);
}
static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
@@ -94,7 +94,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
}
#endif
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- __atomic64_and(mask, addr);
+ __atomic64_and(mask, (long *)addr);
}
static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
@@ -115,7 +115,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
}
#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- __atomic64_xor(mask, addr);
+ __atomic64_xor(mask, (long *)addr);
}
static inline int
@@ -125,7 +125,7 @@ test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- old = __atomic64_or_barrier(mask, addr);
+ old = __atomic64_or_barrier(mask, (long *)addr);
return (old & mask) != 0;
}
@@ -136,7 +136,7 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- old = __atomic64_and_barrier(mask, addr);
+ old = __atomic64_and_barrier(mask, (long *)addr);
return (old & ~mask) != 0;
}
@@ -147,7 +147,7 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- old = __atomic64_xor_barrier(mask, addr);
+ old = __atomic64_xor_barrier(mask, (long *)addr);
return (old & mask) != 0;
}
diff --git a/arch/s390/include/asm/boot_data.h b/arch/s390/include/asm/boot_data.h
index 2d999ccb977a..f7eed27b3220 100644
--- a/arch/s390/include/asm/boot_data.h
+++ b/arch/s390/include/asm/boot_data.h
@@ -5,7 +5,14 @@
#include <asm/ipl.h>
extern char early_command_line[COMMAND_LINE_SIZE];
-extern struct ipl_parameter_block early_ipl_block;
-extern int early_ipl_block_valid;
+extern struct ipl_parameter_block ipl_block;
+extern int ipl_block_valid;
+extern int ipl_secure_flag;
+
+extern unsigned long ipl_cert_list_addr;
+extern unsigned long ipl_cert_list_size;
+
+extern unsigned long early_ipl_comp_list_addr;
+extern unsigned long early_ipl_comp_list_size;
#endif /* _ASM_S390_BOOT_DATA_H */
diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h
index 429f43a8a8e8..713fc9735ffb 100644
--- a/arch/s390/include/asm/bug.h
+++ b/arch/s390/include/asm/bug.h
@@ -15,7 +15,7 @@
".section .rodata.str,\"aMS\",@progbits,1\n" \
"2: .asciz \""__FILE__"\"\n" \
".previous\n" \
- ".section __bug_table,\"aw\"\n" \
+ ".section __bug_table,\"awM\",@progbits,%2\n" \
"3: .long 1b-3b,2b-3b\n" \
" .short %0,%1\n" \
" .org 3b+%2\n" \
@@ -27,17 +27,17 @@
#else /* CONFIG_DEBUG_BUGVERBOSE */
-#define __EMIT_BUG(x) do { \
- asm volatile( \
- "0: j 0b+2\n" \
- "1:\n" \
- ".section __bug_table,\"aw\"\n" \
- "2: .long 1b-2b\n" \
- " .short %0\n" \
- " .org 2b+%1\n" \
- ".previous\n" \
- : : "i" (x), \
- "i" (sizeof(struct bug_entry))); \
+#define __EMIT_BUG(x) do { \
+ asm volatile( \
+ "0: j 0b+2\n" \
+ "1:\n" \
+ ".section __bug_table,\"awM\",@progbits,%1\n" \
+ "2: .long 1b-2b\n" \
+ " .short %0\n" \
+ " .org 2b+%1\n" \
+ ".previous\n" \
+ : : "i" (x), \
+ "i" (sizeof(struct bug_entry))); \
} while (0)
#endif /* CONFIG_DEBUG_BUGVERBOSE */
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index 3cc52e37b4b2..27696755daa9 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -28,6 +28,7 @@
#define CPACF_KMCTR 0xb92d /* MSA4 */
#define CPACF_PRNO 0xb93c /* MSA5 */
#define CPACF_KMA 0xb929 /* MSA8 */
+#define CPACF_KDSA 0xb93a /* MSA9 */
/*
* En/decryption modifier bits
@@ -202,7 +203,7 @@ static inline int __cpacf_check_opcode(unsigned int opcode)
}
}
-static inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
+static __always_inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
{
if (__cpacf_check_opcode(opcode)) {
__cpacf_query(opcode, mask);
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index 19562be22b7e..0036eab14391 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -308,4 +308,17 @@ union diag318_info {
int diag204(unsigned long subcode, unsigned long size, void *addr);
int diag224(void *ptr);
int diag26c(void *req, void *resp, enum diag26c_sc subcode);
+
+struct hypfs_diag0c_entry;
+
+struct diag_ops {
+ int (*diag210)(struct diag210 *addr);
+ int (*diag26c)(void *req, void *resp, enum diag26c_sc subcode);
+ int (*diag14)(unsigned long rx, unsigned long ry1, unsigned long subcode);
+ void (*diag0c)(struct hypfs_diag0c_entry *entry);
+ void (*diag308_reset)(void);
+};
+
+extern struct diag_ops diag_dma_ops;
+extern struct diag210 *__diag210_tmp_dma;
#endif /* _ASM_S390_DIAG_H */
diff --git a/arch/s390/include/asm/ebcdic.h b/arch/s390/include/asm/ebcdic.h
index 29441beb92e6..efb50fc6866c 100644
--- a/arch/s390/include/asm/ebcdic.h
+++ b/arch/s390/include/asm/ebcdic.h
@@ -20,7 +20,7 @@ extern __u8 _ebc_tolower[256]; /* EBCDIC -> lowercase */
extern __u8 _ebc_toupper[256]; /* EBCDIC -> uppercase */
static inline void
-codepage_convert(const __u8 *codepage, volatile __u8 * addr, unsigned long nr)
+codepage_convert(const __u8 *codepage, volatile char *addr, unsigned long nr)
{
if (nr-- <= 0)
return;
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index f74639a05f0f..5775fc22f410 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -107,6 +107,10 @@
#define HWCAP_S390_VXRS_BCD 4096
#define HWCAP_S390_VXRS_EXT 8192
#define HWCAP_S390_GS 16384
+#define HWCAP_S390_VXRS_EXT2 32768
+#define HWCAP_S390_VXRS_PDE 65536
+#define HWCAP_S390_SORT 131072
+#define HWCAP_S390_DFLT 262144
/* Internal bits, not exposed via elf */
#define HWCAP_INT_SIE 1UL
diff --git a/arch/s390/include/asm/extable.h b/arch/s390/include/asm/extable.h
index 80a4e5a9cb46..ae27f756b409 100644
--- a/arch/s390/include/asm/extable.h
+++ b/arch/s390/include/asm/extable.h
@@ -19,6 +19,11 @@ struct exception_table_entry
int insn, fixup;
};
+extern struct exception_table_entry *__start_dma_ex_table;
+extern struct exception_table_entry *__stop_dma_ex_table;
+
+const struct exception_table_entry *s390_search_extables(unsigned long addr);
+
static inline unsigned long extable_fixup(const struct exception_table_entry *x)
{
return (unsigned long)&x->fixup + x->fixup;
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 5a3c95b11952..68d362f8d6c1 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -11,9 +11,16 @@
#define MCOUNT_RETURN_FIXUP 18
#endif
+#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
+
#ifndef __ASSEMBLY__
+#ifdef CONFIG_CC_IS_CLANG
+/* https://bugs.llvm.org/show_bug.cgi?id=41424 */
+#define ftrace_return_address(n) 0UL
+#else
#define ftrace_return_address(n) __builtin_return_address(n)
+#endif
void _mcount(void);
void ftrace_caller(void);
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 2d1afa58a4b6..bb59dd964590 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -116,7 +116,9 @@ static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
return pte_modify(pte, newprot);
}
-#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
-static inline bool gigantic_page_supported(void) { return true; }
-#endif
+static inline bool gigantic_page_runtime_supported(void)
+{
+ return true;
+}
+
#endif /* _ASM_S390_HUGETLB_H */
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index f34d729347e4..ca421614722f 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -30,14 +30,8 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
#define ioremap_wc ioremap_nocache
#define ioremap_wt ioremap_nocache
-static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
-{
- return (void __iomem *) offset;
-}
-
-static inline void iounmap(volatile void __iomem *addr)
-{
-}
+void __iomem *ioremap(unsigned long offset, unsigned long size);
+void iounmap(volatile void __iomem *addr);
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
@@ -57,14 +51,17 @@ static inline void ioport_unmap(void __iomem *p)
* the corresponding device and create the mapping cookie.
*/
#define pci_iomap pci_iomap
+#define pci_iomap_range pci_iomap_range
#define pci_iounmap pci_iounmap
-#define pci_iomap_wc pci_iomap
-#define pci_iomap_wc_range pci_iomap_range
+#define pci_iomap_wc pci_iomap_wc
+#define pci_iomap_wc_range pci_iomap_wc_range
#define memcpy_fromio(dst, src, count) zpci_memcpy_fromio(dst, src, count)
#define memcpy_toio(dst, src, count) zpci_memcpy_toio(dst, src, count)
#define memset_io(dst, val, count) zpci_memset_io(dst, val, count)
+#define mmiowb() zpci_barrier()
+
#define __raw_readb zpci_read_u8
#define __raw_readw zpci_read_u16
#define __raw_readl zpci_read_u32
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index a8389e2d2f03..084e71b7272a 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -12,74 +12,36 @@
#include <asm/types.h>
#include <asm/cio.h>
#include <asm/setup.h>
+#include <uapi/asm/ipl.h>
-#define NSS_NAME_SIZE 8
-
-#define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \
- sizeof(struct ipl_block_fcp))
-
-#define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 16)
+struct ipl_parameter_block {
+ struct ipl_pl_hdr hdr;
+ union {
+ struct ipl_pb_hdr pb0_hdr;
+ struct ipl_pb0_common common;
+ struct ipl_pb0_fcp fcp;
+ struct ipl_pb0_ccw ccw;
+ char raw[PAGE_SIZE - sizeof(struct ipl_pl_hdr)];
+ };
+} __packed __aligned(PAGE_SIZE);
-#define IPL_PARM_BLK_CCW_LEN (sizeof(struct ipl_list_hdr) + \
- sizeof(struct ipl_block_ccw))
+#define NSS_NAME_SIZE 8
-#define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 16)
+#define IPL_BP_FCP_LEN (sizeof(struct ipl_pl_hdr) + \
+ sizeof(struct ipl_pb0_fcp))
+#define IPL_BP0_FCP_LEN (sizeof(struct ipl_pb0_fcp))
+#define IPL_BP_CCW_LEN (sizeof(struct ipl_pl_hdr) + \
+ sizeof(struct ipl_pb0_ccw))
+#define IPL_BP0_CCW_LEN (sizeof(struct ipl_pb0_ccw))
#define IPL_MAX_SUPPORTED_VERSION (0)
-struct ipl_list_hdr {
- u32 len;
- u8 reserved1[3];
- u8 version;
- u32 blk0_len;
- u8 pbt;
- u8 flags;
- u16 reserved2;
- u8 loadparm[8];
-} __attribute__((packed));
-
-struct ipl_block_fcp {
- u8 reserved1[305-1];
- u8 opt;
- u8 reserved2[3];
- u16 reserved3;
- u16 devno;
- u8 reserved4[4];
- u64 wwpn;
- u64 lun;
- u32 bootprog;
- u8 reserved5[12];
- u64 br_lba;
- u32 scp_data_len;
- u8 reserved6[260];
- u8 scp_data[];
-} __attribute__((packed));
-
-#define DIAG308_VMPARM_SIZE 64
-#define DIAG308_SCPDATA_SIZE (PAGE_SIZE - (sizeof(struct ipl_list_hdr) + \
- offsetof(struct ipl_block_fcp, scp_data)))
-
-struct ipl_block_ccw {
- u8 reserved1[84];
- u16 reserved2 : 13;
- u8 ssid : 3;
- u16 devno;
- u8 vm_flags;
- u8 reserved3[3];
- u32 vm_parm_len;
- u8 nss_name[8];
- u8 vm_parm[DIAG308_VMPARM_SIZE];
- u8 reserved4[8];
-} __attribute__((packed));
+#define IPL_RB_CERT_UNKNOWN ((unsigned short)-1)
-struct ipl_parameter_block {
- struct ipl_list_hdr hdr;
- union {
- struct ipl_block_fcp fcp;
- struct ipl_block_ccw ccw;
- char raw[PAGE_SIZE - sizeof(struct ipl_list_hdr)];
- } ipl_info;
-} __packed __aligned(PAGE_SIZE);
+#define DIAG308_VMPARM_SIZE (64)
+#define DIAG308_SCPDATA_OFFSET offsetof(struct ipl_parameter_block, \
+ fcp.scp_data)
+#define DIAG308_SCPDATA_SIZE (PAGE_SIZE - DIAG308_SCPDATA_OFFSET)
struct save_area;
struct save_area * __init save_area_alloc(bool is_boot_cpu);
@@ -88,7 +50,6 @@ void __init save_area_add_regs(struct save_area *, void *regs);
void __init save_area_add_vxrs(struct save_area *, __vector128 *vxrs);
extern void s390_reset_system(void);
-extern void ipl_store_parameters(void);
extern size_t ipl_block_get_ascii_vmparm(char *dest, size_t size,
const struct ipl_parameter_block *ipb);
@@ -122,6 +83,33 @@ extern struct ipl_info ipl_info;
extern void setup_ipl(void);
extern void set_os_info_reipl_block(void);
+struct ipl_report {
+ struct ipl_parameter_block *ipib;
+ struct list_head components;
+ struct list_head certificates;
+ size_t size;
+};
+
+struct ipl_report_component {
+ struct list_head list;
+ struct ipl_rb_component_entry entry;
+};
+
+struct ipl_report_certificate {
+ struct list_head list;
+ struct ipl_rb_certificate_entry entry;
+ void *key;
+};
+
+struct kexec_buf;
+struct ipl_report *ipl_report_init(struct ipl_parameter_block *ipib);
+void *ipl_report_finish(struct ipl_report *report);
+int ipl_report_free(struct ipl_report *report);
+int ipl_report_add_component(struct ipl_report *report, struct kexec_buf *kbuf,
+ unsigned char flags, unsigned short cert);
+int ipl_report_add_certificate(struct ipl_report *report, void *key,
+ unsigned long addr, unsigned long len);
+
/*
* DIAG 308 support
*/
@@ -133,32 +121,12 @@ enum diag308_subcode {
DIAG308_STORE = 6,
};
-enum diag308_ipl_type {
- DIAG308_IPL_TYPE_FCP = 0,
- DIAG308_IPL_TYPE_CCW = 2,
-};
-
-enum diag308_opt {
- DIAG308_IPL_OPT_IPL = 0x10,
- DIAG308_IPL_OPT_DUMP = 0x20,
-};
-
-enum diag308_flags {
- DIAG308_FLAGS_LP_VALID = 0x80,
-};
-
-enum diag308_vm_flags {
- DIAG308_VM_FLAGS_NSS_VALID = 0x80,
- DIAG308_VM_FLAGS_VP_VALID = 0x40,
-};
-
enum diag308_rc {
DIAG308_RC_OK = 0x0001,
DIAG308_RC_NOCONFIG = 0x0102,
};
extern int diag308(unsigned long subcode, void *addr);
-extern void diag308_reset(void);
extern void store_status(void (*fn)(void *), void *data);
extern void lgr_info_log(void);
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index afaf5e3c57fd..9f75d67b8c20 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -47,7 +47,6 @@ enum interruption_class {
IRQEXT_CMC,
IRQEXT_FTP,
IRQIO_CIO,
- IRQIO_QAI,
IRQIO_DAS,
IRQIO_C15,
IRQIO_C70,
@@ -55,12 +54,14 @@ enum interruption_class {
IRQIO_VMR,
IRQIO_LCS,
IRQIO_CTC,
- IRQIO_APB,
IRQIO_ADM,
IRQIO_CSC,
- IRQIO_PCI,
- IRQIO_MSI,
IRQIO_VIR,
+ IRQIO_QAI,
+ IRQIO_APB,
+ IRQIO_PCF,
+ IRQIO_PCD,
+ IRQIO_MSI,
IRQIO_VAI,
IRQIO_GAL,
NMI_NMI,
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
index 825dd0f7f221..ea398a05f643 100644
--- a/arch/s390/include/asm/kexec.h
+++ b/arch/s390/include/asm/kexec.h
@@ -11,6 +11,7 @@
#include <asm/processor.h>
#include <asm/page.h>
+#include <asm/setup.h>
/*
* KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
* I.e. Maximum page that is mapped directly into kernel memory,
@@ -42,6 +43,9 @@
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_S390
+/* Allow kexec_file to load a segment to 0 */
+#define KEXEC_BUF_MEM_UNKNOWN -1
+
/* Provide a dummy definition to avoid build failures. */
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs) { }
@@ -51,20 +55,24 @@ struct s390_load_data {
/* Pointer to the kernel buffer. Used to register cmdline etc.. */
void *kernel_buf;
+ /* Load address of the kernel_buf. */
+ unsigned long kernel_mem;
+
+ /* Parmarea in the kernel buffer. */
+ struct parmarea *parm;
+
/* Total size of loaded segments in memory. Used as an offset. */
size_t memsz;
- /* Load address of initrd. Used to register INITRD_START in kernel. */
- unsigned long initrd_load_addr;
+ struct ipl_report *report;
};
-int kexec_file_add_purgatory(struct kimage *image,
- struct s390_load_data *data);
-int kexec_file_add_initrd(struct kimage *image,
- struct s390_load_data *data,
- char *initrd, unsigned long initrd_len);
-int *kexec_file_update_kernel(struct kimage *iamge,
- struct s390_load_data *data);
+int s390_verify_sig(const char *kernel, unsigned long kernel_len);
+void *kexec_file_add_components(struct kimage *image,
+ int (*add_kernel)(struct kimage *image,
+ struct s390_load_data *data));
+int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val,
+ unsigned long addr);
extern const struct kexec_file_ops s390_kexec_image_ops;
extern const struct kexec_file_ops s390_kexec_elf_ops;
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index c47e22bba87f..bdbc81b5bc91 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -278,6 +278,7 @@ struct kvm_s390_sie_block {
#define ECD_HOSTREGMGMT 0x20000000
#define ECD_MEF 0x08000000
#define ECD_ETOKENF 0x02000000
+#define ECD_ECC 0x00200000
__u32 ecd; /* 0x01c8 */
__u8 reserved1cc[18]; /* 0x01cc */
__u64 pp; /* 0x01de */
@@ -312,6 +313,7 @@ struct kvm_vcpu_stat {
u64 halt_successful_poll;
u64 halt_attempted_poll;
u64 halt_poll_invalid;
+ u64 halt_no_poll_steal;
u64 halt_wakeup;
u64 instruction_lctl;
u64 instruction_lctlg;
diff --git a/arch/s390/include/asm/linkage.h b/arch/s390/include/asm/linkage.h
index 1b95da3fdd64..7f22262b0e46 100644
--- a/arch/s390/include/asm/linkage.h
+++ b/arch/s390/include/asm/linkage.h
@@ -28,5 +28,12 @@
.long (_target) - . ; \
.previous
+#define EX_TABLE_DMA(_fault, _target) \
+ .section .dma.ex_table, "a" ; \
+ .align 4 ; \
+ .long (_fault) - . ; \
+ .long (_target) - . ; \
+ .previous
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/s390/include/asm/livepatch.h b/arch/s390/include/asm/livepatch.h
index 672f95b12d40..818612b784cd 100644
--- a/arch/s390/include/asm/livepatch.h
+++ b/arch/s390/include/asm/livepatch.h
@@ -13,11 +13,6 @@
#include <asm/ptrace.h>
-static inline int klp_check_compiler_support(void)
-{
- return 0;
-}
-
static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
regs->psw.addr = ip;
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 5b9f10b1e55d..237ee0c4169f 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -129,7 +129,7 @@ struct lowcore {
/* SMP info area */
__u32 cpu_nr; /* 0x03a0 */
__u32 softirq_pending; /* 0x03a4 */
- __u32 preempt_count; /* 0x03a8 */
+ __s32 preempt_count; /* 0x03a8 */
__u32 spinlock_lockval; /* 0x03ac */
__u32 spinlock_index; /* 0x03b0 */
__u32 fpu_flags; /* 0x03b4 */
diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
index 123dac3717b3..0033dcd663b1 100644
--- a/arch/s390/include/asm/nospec-insn.h
+++ b/arch/s390/include/asm/nospec-insn.h
@@ -32,23 +32,23 @@ _LC_BR_R1 = __LC_BR_R1
.endm
.macro __THUNK_PROLOG_BR r1,r2
- __THUNK_PROLOG_NAME __s390x_indirect_jump_r\r2\()use_r\r1
+ __THUNK_PROLOG_NAME __s390_indirect_jump_r\r2\()use_r\r1
.endm
.macro __THUNK_PROLOG_BC d0,r1,r2
- __THUNK_PROLOG_NAME __s390x_indirect_branch_\d0\()_\r2\()use_\r1
+ __THUNK_PROLOG_NAME __s390_indirect_branch_\d0\()_\r2\()use_\r1
.endm
.macro __THUNK_BR r1,r2
- jg __s390x_indirect_jump_r\r2\()use_r\r1
+ jg __s390_indirect_jump_r\r2\()use_r\r1
.endm
.macro __THUNK_BC d0,r1,r2
- jg __s390x_indirect_branch_\d0\()_\r2\()use_\r1
+ jg __s390_indirect_branch_\d0\()_\r2\()use_\r1
.endm
.macro __THUNK_BRASL r1,r2,r3
- brasl \r1,__s390x_indirect_jump_r\r3\()use_r\r2
+ brasl \r1,__s390_indirect_jump_r\r3\()use_r\r2
.endm
.macro __DECODE_RR expand,reg,ruse
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 4e0efebc56a9..305befd55326 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -26,6 +26,9 @@ int pci_proc_domain(struct pci_bus *);
#define ZPCI_BUS_NR 0 /* default bus number */
#define ZPCI_DEVFN 0 /* default device number */
+#define ZPCI_NR_DMA_SPACES 1
+#define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS
+
/* PCI Function Controls */
#define ZPCI_FC_FN_ENABLED 0x80
#define ZPCI_FC_ERROR 0x40
@@ -83,6 +86,8 @@ enum zpci_state {
struct zpci_bar_struct {
struct resource *res; /* bus resource */
+ void __iomem *mio_wb;
+ void __iomem *mio_wt;
u32 val; /* bar start & 3 flag bits */
u16 map_idx; /* index into bar mapping array */
u8 size; /* order 2 exponent */
@@ -112,6 +117,8 @@ struct zpci_dev {
/* IRQ stuff */
u64 msi_addr; /* MSI address */
unsigned int max_msi; /* maximum number of MSI's */
+ unsigned int msi_first_bit;
+ unsigned int msi_nr_irqs;
struct airq_iv *aibv; /* adapter interrupt bit vector */
unsigned long aisb; /* number of the summary bit */
@@ -130,6 +137,7 @@ struct zpci_dev {
struct iommu_device iommu_dev; /* IOMMU core handle */
char res_name[16];
+ bool mio_capable;
struct zpci_bar_struct bars[PCI_BAR_COUNT];
u64 start_dma; /* Start of available DMA addresses */
@@ -158,6 +166,7 @@ static inline bool zdev_enabled(struct zpci_dev *zdev)
}
extern const struct attribute_group *zpci_attr_groups[];
+extern unsigned int s390_pci_force_floating __initdata;
/* -----------------------------------------------------------------------------
Prototypes
@@ -219,6 +228,9 @@ struct zpci_dev *get_zdev_by_fid(u32);
int zpci_dma_init(void);
void zpci_dma_exit(void);
+int __init zpci_irq_init(void);
+void __init zpci_irq_exit(void);
+
/* FMB */
int zpci_fmb_enable_device(struct zpci_dev *);
int zpci_fmb_disable_device(struct zpci_dev *);
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
index b3b31b31f0d3..3ec52a05d500 100644
--- a/arch/s390/include/asm/pci_clp.h
+++ b/arch/s390/include/asm/pci_clp.h
@@ -43,6 +43,8 @@ struct clp_fh_list_entry {
#define CLP_SET_ENABLE_PCI_FN 0 /* Yes, 0 enables it */
#define CLP_SET_DISABLE_PCI_FN 1 /* Yes, 1 disables it */
+#define CLP_SET_ENABLE_MIO 2
+#define CLP_SET_DISABLE_MIO 3
#define CLP_UTIL_STR_LEN 64
#define CLP_PFIP_NR_SEGMENTS 4
@@ -80,7 +82,8 @@ struct clp_req_query_pci {
struct clp_rsp_query_pci {
struct clp_rsp_hdr hdr;
u16 vfn; /* virtual fn number */
- u16 : 7;
+ u16 : 6;
+ u16 mio_addr_avail : 1;
u16 util_str_avail : 1; /* utility string available? */
u16 pfgid : 8; /* pci function group id */
u32 fid; /* pci function id */
@@ -96,6 +99,15 @@ struct clp_rsp_query_pci {
u32 reserved[11];
u32 uid; /* user defined id */
u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
+ u32 reserved2[16];
+ u32 mio_valid : 6;
+ u32 : 26;
+ u32 : 32;
+ struct {
+ u64 wb;
+ u64 wt;
+ } addr[PCI_BAR_COUNT];
+ u32 reserved3[6];
} __packed;
/* Query PCI function group request */
@@ -118,7 +130,11 @@ struct clp_rsp_query_pci_grp {
u8 refresh : 1; /* TLB refresh mode */
u16 reserved2;
u16 mui;
- u64 reserved3;
+ u16 : 16;
+ u16 maxfaal;
+ u16 : 4;
+ u16 dnoi : 12;
+ u16 maxcpu;
u64 dasm; /* dma address space mask */
u64 msia; /* MSI address */
u64 reserved4;
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index ba22a6ea51a1..ff81ed19c506 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -2,6 +2,8 @@
#ifndef _ASM_S390_PCI_INSN_H
#define _ASM_S390_PCI_INSN_H
+#include <linux/jump_label.h>
+
/* Load/Store status codes */
#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4
#define ZPCI_PCI_ST_FUNC_IN_ERR 8
@@ -38,6 +40,8 @@
#define ZPCI_MOD_FC_RESET_ERROR 7
#define ZPCI_MOD_FC_RESET_BLOCK 9
#define ZPCI_MOD_FC_SET_MEASURE 10
+#define ZPCI_MOD_FC_REG_INT_D 16
+#define ZPCI_MOD_FC_DEREG_INT_D 17
/* FIB function controls */
#define ZPCI_FIB_FC_ENABLED 0x80
@@ -51,16 +55,7 @@
#define ZPCI_FIB_FC_LS_BLOCKED 0x20
#define ZPCI_FIB_FC_DMAAS_REG 0x10
-/* Function Information Block */
-struct zpci_fib {
- u32 fmt : 8; /* format */
- u32 : 24;
- u32 : 32;
- u8 fc; /* function controls */
- u64 : 56;
- u64 pba; /* PCI base address */
- u64 pal; /* PCI address limit */
- u64 iota; /* I/O Translation Anchor */
+struct zpci_fib_fmt0 {
u32 : 1;
u32 isc : 3; /* Interrupt subclass */
u32 noi : 12; /* Number of interrupts */
@@ -72,16 +67,90 @@ struct zpci_fib {
u32 : 32;
u64 aibv; /* Adapter int bit vector address */
u64 aisb; /* Adapter int summary bit address */
+};
+
+struct zpci_fib_fmt1 {
+ u32 : 4;
+ u32 noi : 12;
+ u32 : 16;
+ u32 dibvo : 16;
+ u32 : 16;
+ u64 : 64;
+ u64 : 64;
+};
+
+/* Function Information Block */
+struct zpci_fib {
+ u32 fmt : 8; /* format */
+ u32 : 24;
+ u32 : 32;
+ u8 fc; /* function controls */
+ u64 : 56;
+ u64 pba; /* PCI base address */
+ u64 pal; /* PCI address limit */
+ u64 iota; /* I/O Translation Anchor */
+ union {
+ struct zpci_fib_fmt0 fmt0;
+ struct zpci_fib_fmt1 fmt1;
+ };
u64 fmb_addr; /* Function measurement block address and key */
u32 : 32;
u32 gd;
} __packed __aligned(8);
+/* directed interruption information block */
+struct zpci_diib {
+ u32 : 1;
+ u32 isc : 3;
+ u32 : 28;
+ u16 : 16;
+ u16 nr_cpus;
+ u64 disb_addr;
+ u64 : 64;
+ u64 : 64;
+} __packed __aligned(8);
+
+/* cpu directed interruption information block */
+struct zpci_cdiib {
+ u64 : 64;
+ u64 dibv_addr;
+ u64 : 64;
+ u64 : 64;
+ u64 : 64;
+} __packed __aligned(8);
+
+union zpci_sic_iib {
+ struct zpci_diib diib;
+ struct zpci_cdiib cdiib;
+};
+
+DECLARE_STATIC_KEY_FALSE(have_mio);
+
u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status);
int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
-int zpci_load(u64 *data, u64 req, u64 offset);
-int zpci_store(u64 data, u64 req, u64 offset);
-int zpci_store_block(const u64 *data, u64 req, u64 offset);
-int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
+int __zpci_load(u64 *data, u64 req, u64 offset);
+int zpci_load(u64 *data, const volatile void __iomem *addr, unsigned long len);
+int __zpci_store(u64 data, u64 req, u64 offset);
+int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len);
+int __zpci_store_block(const u64 *data, u64 req, u64 offset);
+void zpci_barrier(void);
+int __zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib);
+
+static inline int zpci_set_irq_ctrl(u16 ctl, u8 isc)
+{
+ union zpci_sic_iib iib = {{0}};
+
+ return __zpci_set_irq_ctrl(ctl, isc, &iib);
+}
+
+#ifdef CONFIG_PCI
+static inline void enable_mio_ctl(void)
+{
+ if (static_branch_likely(&have_mio))
+ __ctl_set_bit(2, 5);
+}
+#else /* CONFIG_PCI */
+static inline void enable_mio_ctl(void) {}
+#endif /* CONFIG_PCI */
#endif
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index cbb9cb9c6547..cd060b5dd8fd 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -37,12 +37,10 @@ extern struct zpci_iomap_entry *zpci_iomap_start;
#define zpci_read(LENGTH, RETTYPE) \
static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
{ \
- struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
- u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
u64 data; \
int rc; \
\
- rc = zpci_load(&data, req, ZPCI_OFFSET(addr)); \
+ rc = zpci_load(&data, addr, LENGTH); \
if (rc) \
data = -1ULL; \
return (RETTYPE) data; \
@@ -52,11 +50,9 @@ static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
static inline void zpci_write_##VALTYPE(VALTYPE val, \
const volatile void __iomem *addr) \
{ \
- struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
- u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
u64 data = (VALTYPE) val; \
\
- zpci_store(data, req, ZPCI_OFFSET(addr)); \
+ zpci_store(addr, data, LENGTH); \
}
zpci_read(8, u64)
@@ -68,36 +64,38 @@ zpci_write(4, u32)
zpci_write(2, u16)
zpci_write(1, u8)
-static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len)
+static inline int zpci_write_single(volatile void __iomem *dst, const void *src,
+ unsigned long len)
{
u64 val;
switch (len) {
case 1:
- val = (u64) *((u8 *) data);
+ val = (u64) *((u8 *) src);
break;
case 2:
- val = (u64) *((u16 *) data);
+ val = (u64) *((u16 *) src);
break;
case 4:
- val = (u64) *((u32 *) data);
+ val = (u64) *((u32 *) src);
break;
case 8:
- val = (u64) *((u64 *) data);
+ val = (u64) *((u64 *) src);
break;
default:
val = 0; /* let FW report error */
break;
}
- return zpci_store(val, req, offset);
+ return zpci_store(dst, val, len);
}
-static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
+static inline int zpci_read_single(void *dst, const volatile void __iomem *src,
+ unsigned long len)
{
u64 data;
int cc;
- cc = zpci_load(&data, req, offset);
+ cc = zpci_load(&data, src, len);
if (cc)
goto out;
@@ -119,10 +117,8 @@ out:
return cc;
}
-static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
-{
- return zpci_store_block(data, req, offset);
-}
+int zpci_write_block(volatile void __iomem *dst, const void *src,
+ unsigned long len);
static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
{
@@ -140,18 +136,15 @@ static inline int zpci_memcpy_fromio(void *dst,
const volatile void __iomem *src,
unsigned long n)
{
- struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(src)];
- u64 req, offset = ZPCI_OFFSET(src);
int size, rc = 0;
while (n > 0) {
size = zpci_get_max_write_size((u64 __force) src,
(u64) dst, n, 8);
- req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
- rc = zpci_read_single(req, dst, offset, size);
+ rc = zpci_read_single(dst, src, size);
if (rc)
break;
- offset += size;
+ src += size;
dst += size;
n -= size;
}
@@ -161,8 +154,6 @@ static inline int zpci_memcpy_fromio(void *dst,
static inline int zpci_memcpy_toio(volatile void __iomem *dst,
const void *src, unsigned long n)
{
- struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
- u64 req, offset = ZPCI_OFFSET(dst);
int size, rc = 0;
if (!src)
@@ -171,16 +162,14 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
while (n > 0) {
size = zpci_get_max_write_size((u64 __force) dst,
(u64) src, n, 128);
- req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
-
if (size > 8) /* main path */
- rc = zpci_write_block(req, src, offset);
+ rc = zpci_write_block(dst, src, size);
else
- rc = zpci_write_single(req, src, offset, size);
+ rc = zpci_write_single(dst, src, size);
if (rc)
break;
- offset += size;
src += size;
+ dst += size;
n -= size;
}
return rc;
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 76dc344edb8c..9f0195d5fa16 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -238,7 +238,7 @@ static inline int is_module_addr(void *addr)
#define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
#define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
-#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
+#define _REGION_ENTRY_TYPE_MASK 0x0c /* region table type mask */
#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
@@ -277,6 +277,7 @@ static inline int is_module_addr(void *addr)
#define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
#define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */
#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
+#define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */
#define _SEGMENT_ENTRY (0)
#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
@@ -614,15 +615,9 @@ static inline int pgd_none(pgd_t pgd)
static inline int pgd_bad(pgd_t pgd)
{
- /*
- * With dynamic page table levels the pgd can be a region table
- * entry or a segment table entry. Check for the bit that are
- * invalid for either table entry.
- */
- unsigned long mask =
- ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
- ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
- return (pgd_val(pgd) & mask) != 0;
+ if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
+ return 0;
+ return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
}
static inline unsigned long pgd_pfn(pgd_t pgd)
@@ -703,6 +698,8 @@ static inline int pmd_large(pmd_t pmd)
static inline int pmd_bad(pmd_t pmd)
{
+ if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0)
+ return 1;
if (pmd_large(pmd))
return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
@@ -710,8 +707,12 @@ static inline int pmd_bad(pmd_t pmd)
static inline int pud_bad(pud_t pud)
{
- if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
- return pmd_bad(__pmd(pud_val(pud)));
+ unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
+
+ if (type > _REGION_ENTRY_TYPE_R3)
+ return 1;
+ if (type < _REGION_ENTRY_TYPE_R3)
+ return 0;
if (pud_large(pud))
return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
@@ -719,8 +720,12 @@ static inline int pud_bad(pud_t pud)
static inline int p4d_bad(p4d_t p4d)
{
- if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
- return pud_bad(__pud(p4d_val(p4d)));
+ unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
+
+ if (type > _REGION_ENTRY_TYPE_R2)
+ return 1;
+ if (type < _REGION_ENTRY_TYPE_R2)
+ return 0;
return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
}
@@ -1204,41 +1209,78 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-#define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
-
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
#define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
-static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
+/*
+ * The pgd_offset function *always* adds the index for the top-level
+ * region/segment table. This is done to get a sequence like the
+ * following to work:
+ * pgdp = pgd_offset(current->mm, addr);
+ * pgd = READ_ONCE(*pgdp);
+ * p4dp = p4d_offset(&pgd, addr);
+ * ...
+ * The subsequent p4d_offset, pud_offset and pmd_offset functions
+ * only add an index if they dereferenced the pointer.
+ */
+static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
{
- p4d_t *p4d = (p4d_t *) pgd;
+ unsigned long rste;
+ unsigned int shift;
- if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
- p4d = (p4d_t *) pgd_deref(*pgd);
- return p4d + p4d_index(address);
+ /* Get the first entry of the top level table */
+ rste = pgd_val(*pgd);
+ /* Pick up the shift from the table type of the first entry */
+ shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
+ return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
}
-static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
{
- pud_t *pud = (pud_t *) p4d;
+ if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
+ return (p4d_t *) pgd_deref(*pgd) + p4d_index(address);
+ return (p4d_t *) pgd;
+}
- if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
- pud = (pud_t *) p4d_deref(*p4d);
- return pud + pud_index(address);
+static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+{
+ if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
+ return (pud_t *) p4d_deref(*p4d) + pud_index(address);
+ return (pud_t *) p4d;
}
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
- pmd_t *pmd = (pmd_t *) pud;
+ if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
+ return (pmd_t *) pud_deref(*pud) + pmd_index(address);
+ return (pmd_t *) pud;
+}
- if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
- pmd = (pmd_t *) pud_deref(*pud);
- return pmd + pmd_index(address);
+static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address)
+{
+ return (pte_t *) pmd_deref(*pmd) + pte_index(address);
+}
+
+#define pte_offset_kernel(pmd, address) pte_offset(pmd, address)
+#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
+#define pte_unmap(pte) do { } while (0)
+
+static inline bool gup_fast_permitted(unsigned long start, int nr_pages)
+{
+ unsigned long len, end;
+
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
+ end = start + len;
+ if (end < start)
+ return false;
+ return end <= current->mm->context.asce_limit;
}
+#define gup_fast_permitted gup_fast_permitted
#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
@@ -1249,12 +1291,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
-/* Find an entry in the lowest level page table.. */
-#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
-#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
-#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
-#define pte_unmap(pte) do { } while (0)
-
static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 81038ab357ce..b0fcbc37b637 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -156,25 +156,6 @@ struct thread_struct {
typedef struct thread_struct thread_struct;
-/*
- * Stack layout of a C stack frame.
- */
-#ifndef __PACK_STACK
-struct stack_frame {
- unsigned long back_chain;
- unsigned long empty1[5];
- unsigned long gprs[10];
- unsigned int empty2[8];
-};
-#else
-struct stack_frame {
- unsigned long empty1[5];
- unsigned int empty2[8];
- unsigned long gprs[10];
- unsigned long back_chain;
-};
-#endif
-
#define ARCH_MIN_TASKALIGN 8
#define INIT_THREAD { \
@@ -206,11 +187,7 @@ struct mm_struct;
struct seq_file;
struct pt_regs;
-typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable);
-void dump_trace(dump_trace_func_t func, void *data,
- struct task_struct *task, unsigned long sp);
void show_registers(struct pt_regs *regs);
-
void show_cacheinfo(struct seq_file *m);
/* Free all resources held by a thread. */
@@ -244,55 +221,6 @@ static __no_kasan_or_inline unsigned short stap(void)
return cpu_address;
}
-#define CALL_ARGS_0() \
- register unsigned long r2 asm("2")
-#define CALL_ARGS_1(arg1) \
- register unsigned long r2 asm("2") = (unsigned long)(arg1)
-#define CALL_ARGS_2(arg1, arg2) \
- CALL_ARGS_1(arg1); \
- register unsigned long r3 asm("3") = (unsigned long)(arg2)
-#define CALL_ARGS_3(arg1, arg2, arg3) \
- CALL_ARGS_2(arg1, arg2); \
- register unsigned long r4 asm("4") = (unsigned long)(arg3)
-#define CALL_ARGS_4(arg1, arg2, arg3, arg4) \
- CALL_ARGS_3(arg1, arg2, arg3); \
- register unsigned long r4 asm("5") = (unsigned long)(arg4)
-#define CALL_ARGS_5(arg1, arg2, arg3, arg4, arg5) \
- CALL_ARGS_4(arg1, arg2, arg3, arg4); \
- register unsigned long r4 asm("6") = (unsigned long)(arg5)
-
-#define CALL_FMT_0
-#define CALL_FMT_1 CALL_FMT_0, "0" (r2)
-#define CALL_FMT_2 CALL_FMT_1, "d" (r3)
-#define CALL_FMT_3 CALL_FMT_2, "d" (r4)
-#define CALL_FMT_4 CALL_FMT_3, "d" (r5)
-#define CALL_FMT_5 CALL_FMT_4, "d" (r6)
-
-#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
-#define CALL_CLOBBER_4 CALL_CLOBBER_5
-#define CALL_CLOBBER_3 CALL_CLOBBER_4, "5"
-#define CALL_CLOBBER_2 CALL_CLOBBER_3, "4"
-#define CALL_CLOBBER_1 CALL_CLOBBER_2, "3"
-#define CALL_CLOBBER_0 CALL_CLOBBER_1
-
-#define CALL_ON_STACK(fn, stack, nr, args...) \
-({ \
- CALL_ARGS_##nr(args); \
- unsigned long prev; \
- \
- asm volatile( \
- " la %[_prev],0(15)\n" \
- " la 15,0(%[_stack])\n" \
- " stg %[_prev],%[_bc](15)\n" \
- " brasl 14,%[_fn]\n" \
- " la 15,0(%[_prev])\n" \
- : "+&d" (r2), [_prev] "=&a" (prev) \
- : [_stack] "a" (stack), \
- [_bc] "i" (offsetof(struct stack_frame, back_chain)), \
- [_fn] "X" (fn) CALL_FMT_##nr : CALL_CLOBBER_##nr); \
- r2; \
-})
-
/*
* Give up the time slice of the virtual PU.
*/
@@ -339,10 +267,10 @@ static __no_kasan_or_inline void __load_psw_mask(unsigned long mask)
asm volatile(
" larl %0,1f\n"
- " stg %0,%O1+8(%R1)\n"
- " lpswe %1\n"
+ " stg %0,%1\n"
+ " lpswe %2\n"
"1:"
- : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
+ : "=&d" (addr), "=Q" (psw.addr) : "Q" (psw) : "memory", "cc");
}
/*
@@ -387,12 +315,12 @@ void enabled_wait(void);
/*
* Function to drop a processor into disabled wait state
*/
-static inline void __noreturn disabled_wait(unsigned long code)
+static inline void __noreturn disabled_wait(void)
{
psw_t psw;
psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA;
- psw.addr = code;
+ psw.addr = _THIS_IP_;
__load_psw(psw);
while (1);
}
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index ef4c9dec06a4..f577c5f6031a 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -79,6 +79,9 @@ struct sclp_info {
unsigned char has_kss : 1;
unsigned char has_gisaf : 1;
unsigned char has_diag318 : 1;
+ unsigned char has_sipl : 1;
+ unsigned char has_sipl_g2 : 1;
+ unsigned char has_dirq : 1;
unsigned int ibc;
unsigned int mtid;
unsigned int mtid_cp;
diff --git a/arch/s390/include/asm/sections.h b/arch/s390/include/asm/sections.h
index 7afe4620685c..42de04ad9c07 100644
--- a/arch/s390/include/asm/sections.h
+++ b/arch/s390/include/asm/sections.h
@@ -2,8 +2,20 @@
#ifndef _S390_SECTIONS_H
#define _S390_SECTIONS_H
+#define arch_is_kernel_initmem_freed arch_is_kernel_initmem_freed
+
#include <asm-generic/sections.h>
+extern bool initmem_freed;
+
+static inline int arch_is_kernel_initmem_freed(unsigned long addr)
+{
+ if (!initmem_freed)
+ return 0;
+ return addr >= (unsigned long)__init_begin &&
+ addr < (unsigned long)__init_end;
+}
+
/*
* .boot.data section contains variables "shared" between the decompressor and
* the decompressed kernel. The decompressor will store values in them, and
@@ -16,4 +28,14 @@
*/
#define __bootdata(var) __section(.boot.data.var) var
+/*
+ * .boot.preserved.data is similar to .boot.data, but it is not part of the
+ * .init section and thus will be preserved for later use in the decompressed
+ * kernel.
+ */
+#define __bootdata_preserved(var) __section(.boot.preserved.data.var) var
+
+extern unsigned long __sdma, __edma;
+extern unsigned long __stext_dma, __etext_dma;
+
#endif
diff --git a/arch/s390/include/asm/segment.h b/arch/s390/include/asm/segment.h
deleted file mode 100644
index 97a0582b8d0f..000000000000
--- a/arch/s390/include/asm/segment.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_SEGMENT_H
-#define _ASM_SEGMENT_H
-
-#endif
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index efda97804aa4..925889d360c1 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -12,7 +12,10 @@
#define EP_OFFSET 0x10008
#define EP_STRING "S390EP"
#define PARMAREA 0x10400
-#define PARMAREA_END 0x11000
+#define EARLY_SCCB_OFFSET 0x11000
+#define HEAD_END 0x12000
+
+#define EARLY_SCCB_SIZE PAGE_SIZE
/*
* Machine features detected in early.c
@@ -65,6 +68,16 @@
#define OLDMEM_SIZE (*(unsigned long *) (OLDMEM_SIZE_OFFSET))
#define COMMAND_LINE ((char *) (COMMAND_LINE_OFFSET))
+struct parmarea {
+ unsigned long ipl_device; /* 0x10400 */
+ unsigned long initrd_start; /* 0x10408 */
+ unsigned long initrd_size; /* 0x10410 */
+ unsigned long oldmem_base; /* 0x10418 */
+ unsigned long oldmem_size; /* 0x10420 */
+ char pad1[0x10480 - 0x10428]; /* 0x10428 - 0x10480 */
+ char command_line[ARCH_COMMAND_LINE_SIZE]; /* 0x10480 */
+};
+
extern int noexec_disabled;
extern int memory_end_set;
extern unsigned long memory_end;
@@ -134,6 +147,12 @@ extern void (*_machine_restart)(char *command);
extern void (*_machine_halt)(void);
extern void (*_machine_power_off)(void);
+extern unsigned long __kaslr_offset;
+static inline unsigned long kaslr_offset(void)
+{
+ return __kaslr_offset;
+}
+
#else /* __ASSEMBLY__ */
#define IPL_DEVICE (IPL_DEVICE_OFFSET)
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
new file mode 100644
index 000000000000..49634bfbecdd
--- /dev/null
+++ b/arch/s390/include/asm/stacktrace.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_STACKTRACE_H
+#define _ASM_S390_STACKTRACE_H
+
+#include <linux/uaccess.h>
+#include <linux/ptrace.h>
+#include <asm/switch_to.h>
+
+enum stack_type {
+ STACK_TYPE_UNKNOWN,
+ STACK_TYPE_TASK,
+ STACK_TYPE_IRQ,
+ STACK_TYPE_NODAT,
+ STACK_TYPE_RESTART,
+};
+
+struct stack_info {
+ enum stack_type type;
+ unsigned long begin, end;
+};
+
+const char *stack_type_name(enum stack_type type);
+int get_stack_info(unsigned long sp, struct task_struct *task,
+ struct stack_info *info, unsigned long *visit_mask);
+
+static inline bool on_stack(struct stack_info *info,
+ unsigned long addr, size_t len)
+{
+ if (info->type == STACK_TYPE_UNKNOWN)
+ return false;
+ if (addr + len < addr)
+ return false;
+ return addr >= info->begin && addr + len < info->end;
+}
+
+static inline unsigned long get_stack_pointer(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ if (regs)
+ return (unsigned long) kernel_stack_pointer(regs);
+ if (task == current)
+ return current_stack_pointer();
+ return (unsigned long) task->thread.ksp;
+}
+
+/*
+ * Stack layout of a C stack frame.
+ */
+#ifndef __PACK_STACK
+struct stack_frame {
+ unsigned long back_chain;
+ unsigned long empty1[5];
+ unsigned long gprs[10];
+ unsigned int empty2[8];
+};
+#else
+struct stack_frame {
+ unsigned long empty1[5];
+ unsigned int empty2[8];
+ unsigned long gprs[10];
+ unsigned long back_chain;
+};
+#endif
+
+#define CALL_ARGS_0() \
+ register unsigned long r2 asm("2")
+#define CALL_ARGS_1(arg1) \
+ register unsigned long r2 asm("2") = (unsigned long)(arg1)
+#define CALL_ARGS_2(arg1, arg2) \
+ CALL_ARGS_1(arg1); \
+ register unsigned long r3 asm("3") = (unsigned long)(arg2)
+#define CALL_ARGS_3(arg1, arg2, arg3) \
+ CALL_ARGS_2(arg1, arg2); \
+ register unsigned long r4 asm("4") = (unsigned long)(arg3)
+#define CALL_ARGS_4(arg1, arg2, arg3, arg4) \
+ CALL_ARGS_3(arg1, arg2, arg3); \
+ register unsigned long r4 asm("5") = (unsigned long)(arg4)
+#define CALL_ARGS_5(arg1, arg2, arg3, arg4, arg5) \
+ CALL_ARGS_4(arg1, arg2, arg3, arg4); \
+ register unsigned long r4 asm("6") = (unsigned long)(arg5)
+
+#define CALL_FMT_0 "=&d" (r2) :
+#define CALL_FMT_1 "+&d" (r2) :
+#define CALL_FMT_2 CALL_FMT_1 "d" (r3),
+#define CALL_FMT_3 CALL_FMT_2 "d" (r4),
+#define CALL_FMT_4 CALL_FMT_3 "d" (r5),
+#define CALL_FMT_5 CALL_FMT_4 "d" (r6),
+
+#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
+#define CALL_CLOBBER_4 CALL_CLOBBER_5
+#define CALL_CLOBBER_3 CALL_CLOBBER_4, "5"
+#define CALL_CLOBBER_2 CALL_CLOBBER_3, "4"
+#define CALL_CLOBBER_1 CALL_CLOBBER_2, "3"
+#define CALL_CLOBBER_0 CALL_CLOBBER_1
+
+#define CALL_ON_STACK(fn, stack, nr, args...) \
+({ \
+ CALL_ARGS_##nr(args); \
+ unsigned long prev; \
+ \
+ asm volatile( \
+ " la %[_prev],0(15)\n" \
+ " la 15,0(%[_stack])\n" \
+ " stg %[_prev],%[_bc](15)\n" \
+ " brasl 14,%[_fn]\n" \
+ " la 15,0(%[_prev])\n" \
+ : [_prev] "=&a" (prev), CALL_FMT_##nr \
+ [_stack] "a" (stack), \
+ [_bc] "i" (offsetof(struct stack_frame, back_chain)), \
+ [_fn] "X" (fn) : CALL_CLOBBER_##nr); \
+ r2; \
+})
+
+#endif /* _ASM_S390_STACKTRACE_H */
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index 59c3e91f2cdb..f073292e9fdb 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -14,13 +14,8 @@
#include <linux/err.h>
#include <asm/ptrace.h>
-/*
- * The syscall table always contains 32 bit pointers since we know that the
- * address of the function to be called is (way) below 4GB. So the "int"
- * type here is what we want [need] for both 32 bit and 64 bit systems.
- */
-extern const unsigned int sys_call_table[];
-extern const unsigned int sys_call_table_emu[];
+extern const unsigned long sys_call_table[];
+extern const unsigned long sys_call_table_emu[];
static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
@@ -84,10 +79,10 @@ static inline void syscall_set_arguments(struct task_struct *task,
regs->orig_gpr2 = args[0];
}
-static inline int syscall_get_arch(void)
+static inline int syscall_get_arch(struct task_struct *task)
{
#ifdef CONFIG_COMPAT
- if (test_tsk_thread_flag(current, TIF_31BIT))
+ if (test_tsk_thread_flag(task, TIF_31BIT))
return AUDIT_ARCH_S390;
#endif
return AUDIT_ARCH_S390X;
diff --git a/arch/s390/include/asm/syscall_wrapper.h b/arch/s390/include/asm/syscall_wrapper.h
index 5596c5c625d2..3c3d6fe8e2f0 100644
--- a/arch/s390/include/asm/syscall_wrapper.h
+++ b/arch/s390/include/asm/syscall_wrapper.h
@@ -119,8 +119,8 @@
"Type aliasing is used to sanitize syscall arguments");\
asmlinkage long __s390x_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
__attribute__((alias(__stringify(__se_sys##name)))); \
- ALLOW_ERROR_INJECTION(__s390x_sys##name, ERRNO); \
- static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ ALLOW_ERROR_INJECTION(__s390x_sys##name, ERRNO); \
+ long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
__S390_SYS_STUBx(x, name, __VA_ARGS__) \
asmlinkage long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index b31c779cf581..aa406c05a350 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -22,98 +22,39 @@
* Pages used for the page tables is a different story. FIXME: more
*/
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/swap.h>
-#include <asm/processor.h>
-#include <asm/pgalloc.h>
-#include <asm/tlbflush.h>
-
-struct mmu_gather {
- struct mm_struct *mm;
- struct mmu_table_batch *batch;
- unsigned int fullmm;
- unsigned long start, end;
-};
-
-struct mmu_table_batch {
- struct rcu_head rcu;
- unsigned int nr;
- void *tables[0];
-};
-
-#define MAX_TABLE_BATCH \
- ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
-
-extern void tlb_table_flush(struct mmu_gather *tlb);
-extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-
-static inline void
-arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- tlb->mm = mm;
- tlb->start = start;
- tlb->end = end;
- tlb->fullmm = !(start | (end+1));
- tlb->batch = NULL;
-}
-
-static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
-{
- __tlb_flush_mm_lazy(tlb->mm);
-}
-
-static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
-{
- tlb_table_flush(tlb);
-}
-
+void __tlb_remove_table(void *_table);
+static inline void tlb_flush(struct mmu_gather *tlb);
+static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
+ struct page *page, int page_size);
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
-{
- tlb_flush_mmu_tlbonly(tlb);
- tlb_flush_mmu_free(tlb);
-}
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
-static inline void
-arch_tlb_finish_mmu(struct mmu_gather *tlb,
- unsigned long start, unsigned long end, bool force)
-{
- if (force) {
- tlb->start = start;
- tlb->end = end;
- }
+#define tlb_flush tlb_flush
+#define pte_free_tlb pte_free_tlb
+#define pmd_free_tlb pmd_free_tlb
+#define p4d_free_tlb p4d_free_tlb
+#define pud_free_tlb pud_free_tlb
- tlb_flush_mmu(tlb);
-}
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm-generic/tlb.h>
/*
* Release the page cache reference for a pte removed by
* tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
* has already been freed, so just do free_page_and_swap_cache.
*/
-static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- free_page_and_swap_cache(page);
- return false; /* avoid calling tlb_flush_mmu */
-}
-
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- free_page_and_swap_cache(page);
-}
-
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
- return __tlb_remove_page(tlb, page);
+ free_page_and_swap_cache(page);
+ return false;
}
-static inline void tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
+static inline void tlb_flush(struct mmu_gather *tlb)
{
- return tlb_remove_page(tlb, page);
+ __tlb_flush_mm_lazy(tlb->mm);
}
/*
@@ -121,8 +62,17 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
* page table from the tlb.
*/
static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
- unsigned long address)
+ unsigned long address)
{
+ __tlb_adjust_range(tlb, address, PAGE_SIZE);
+ tlb->mm->context.flush_mm = 1;
+ tlb->freed_tables = 1;
+ tlb->cleared_ptes = 1;
+ /*
+ * page_table_free_rcu takes care of the allocation bit masks
+ * of the 2K table fragments in the 4K page table page,
+ * then calls tlb_remove_table.
+ */
page_table_free_rcu(tlb, (unsigned long *) pte, address);
}
@@ -139,6 +89,10 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
if (mm_pmd_folded(tlb->mm))
return;
pgtable_pmd_page_dtor(virt_to_page(pmd));
+ __tlb_adjust_range(tlb, address, PAGE_SIZE);
+ tlb->mm->context.flush_mm = 1;
+ tlb->freed_tables = 1;
+ tlb->cleared_puds = 1;
tlb_remove_table(tlb, pmd);
}
@@ -154,6 +108,10 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
{
if (mm_p4d_folded(tlb->mm))
return;
+ __tlb_adjust_range(tlb, address, PAGE_SIZE);
+ tlb->mm->context.flush_mm = 1;
+ tlb->freed_tables = 1;
+ tlb->cleared_p4ds = 1;
tlb_remove_table(tlb, p4d);
}
@@ -169,21 +127,11 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
{
if (mm_pud_folded(tlb->mm))
return;
+ tlb->mm->context.flush_mm = 1;
+ tlb->freed_tables = 1;
+ tlb->cleared_puds = 1;
tlb_remove_table(tlb, pud);
}
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
-#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0)
-#define tlb_migrate_finish(mm) do { } while (0)
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
- tlb_remove_tlb_entry(tlb, ptep, address)
-
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
- unsigned int page_size)
-{
-}
#endif /* _S390_TLB_H */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 007fcb9aeeb8..bd2fd9a7821d 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -55,8 +55,10 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n);
unsigned long __must_check
raw_copy_to_user(void __user *to, const void *from, unsigned long n);
+#ifndef CONFIG_KASAN
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
+#endif
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
diff --git a/arch/s390/include/asm/unwind.h b/arch/s390/include/asm/unwind.h
new file mode 100644
index 000000000000..6eb2ef105d87
--- /dev/null
+++ b/arch/s390/include/asm/unwind.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_UNWIND_H
+#define _ASM_S390_UNWIND_H
+
+#include <linux/sched.h>
+#include <linux/ftrace.h>
+#include <asm/ptrace.h>
+#include <asm/stacktrace.h>
+
+/*
+ * To use the stack unwinder it has to be initialized with unwind_start.
+ * There four combinations for task and regs:
+ * 1) task==NULL, regs==NULL: the unwind starts for the task that is currently
+ * running, sp/ip picked up from the CPU registers
+ * 2) task==NULL, regs!=NULL: the unwind starts from the sp/ip found in
+ * the struct pt_regs of an interrupt frame for the current task
+ * 3) task!=NULL, regs==NULL: the unwind starts for an inactive task with
+ * the sp picked up from task->thread.ksp and the ip picked up from the
+ * return address stored by __switch_to
+ * 4) task!=NULL, regs!=NULL: the sp/ip are picked up from the interrupt
+ * frame 'regs' of a inactive task
+ * If 'first_frame' is not zero unwind_start skips unwind frames until it
+ * reaches the specified stack pointer.
+ * The end of the unwinding is indicated with unwind_done, this can be true
+ * right after unwind_start, e.g. with first_frame!=0 that can not be found.
+ * unwind_next_frame skips to the next frame.
+ * Once the unwind is completed unwind_error() can be used to check if there
+ * has been a situation where the unwinder could not correctly understand
+ * the tasks call chain.
+ */
+
+struct unwind_state {
+ struct stack_info stack_info;
+ unsigned long stack_mask;
+ struct task_struct *task;
+ struct pt_regs *regs;
+ unsigned long sp, ip;
+ int graph_idx;
+ bool reliable;
+ bool error;
+};
+
+void __unwind_start(struct unwind_state *state, struct task_struct *task,
+ struct pt_regs *regs, unsigned long first_frame);
+bool unwind_next_frame(struct unwind_state *state);
+unsigned long unwind_get_return_address(struct unwind_state *state);
+
+static inline bool unwind_done(struct unwind_state *state)
+{
+ return state->stack_info.type == STACK_TYPE_UNKNOWN;
+}
+
+static inline bool unwind_error(struct unwind_state *state)
+{
+ return state->error;
+}
+
+static inline void unwind_start(struct unwind_state *state,
+ struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long sp)
+{
+ sp = sp ? : get_stack_pointer(task, regs);
+ __unwind_start(state, task, regs, sp);
+}
+
+static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
+{
+ return unwind_done(state) ? NULL : state->regs;
+}
+
+#define unwind_for_each_frame(state, task, regs, first_frame) \
+ for (unwind_start(state, task, regs, first_frame); \
+ !unwind_done(state); \
+ unwind_next_frame(state))
+
+static inline void unwind_init(void) {}
+static inline void unwind_module_init(struct module *mod, void *orc_ip,
+ size_t orc_ip_size, void *orc,
+ size_t orc_size) {}
+
+#ifdef CONFIG_KASAN
+/*
+ * This disables KASAN checking when reading a value from another task's stack,
+ * since the other task could be running on another CPU and could have poisoned
+ * the stack in the meantime.
+ */
+#define READ_ONCE_TASK_STACK(task, x) \
+({ \
+ unsigned long val; \
+ if (task == current) \
+ val = READ_ONCE(x); \
+ else \
+ val = READ_ONCE_NOCHECK(x); \
+ val; \
+})
+#else
+#define READ_ONCE_TASK_STACK(task, x) READ_ONCE(x)
+#endif
+
+#endif /* _ASM_S390_UNWIND_H */
diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
new file mode 100644
index 000000000000..ef3c00b049ab
--- /dev/null
+++ b/arch/s390/include/asm/uv.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Ultravisor Interfaces
+ *
+ * Copyright IBM Corp. 2019
+ *
+ * Author(s):
+ * Vasily Gorbik <gor@linux.ibm.com>
+ * Janosch Frank <frankja@linux.ibm.com>
+ */
+#ifndef _ASM_S390_UV_H
+#define _ASM_S390_UV_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <asm/page.h>
+
+#define UVC_RC_EXECUTED 0x0001
+#define UVC_RC_INV_CMD 0x0002
+#define UVC_RC_INV_STATE 0x0003
+#define UVC_RC_INV_LEN 0x0005
+#define UVC_RC_NO_RESUME 0x0007
+
+#define UVC_CMD_QUI 0x0001
+#define UVC_CMD_SET_SHARED_ACCESS 0x1000
+#define UVC_CMD_REMOVE_SHARED_ACCESS 0x1001
+
+/* Bits in installed uv calls */
+enum uv_cmds_inst {
+ BIT_UVC_CMD_QUI = 0,
+ BIT_UVC_CMD_SET_SHARED_ACCESS = 8,
+ BIT_UVC_CMD_REMOVE_SHARED_ACCESS = 9,
+};
+
+struct uv_cb_header {
+ u16 len;
+ u16 cmd; /* Command Code */
+ u16 rc; /* Response Code */
+ u16 rrc; /* Return Reason Code */
+} __packed __aligned(8);
+
+struct uv_cb_qui {
+ struct uv_cb_header header;
+ u64 reserved08;
+ u64 inst_calls_list[4];
+ u64 reserved30[15];
+} __packed __aligned(8);
+
+struct uv_cb_share {
+ struct uv_cb_header header;
+ u64 reserved08[3];
+ u64 paddr;
+ u64 reserved28;
+} __packed __aligned(8);
+
+static inline int uv_call(unsigned long r1, unsigned long r2)
+{
+ int cc;
+
+ asm volatile(
+ "0: .insn rrf,0xB9A40000,%[r1],%[r2],0,0\n"
+ " brc 3,0b\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc)
+ : [r1] "a" (r1), [r2] "a" (r2)
+ : "memory", "cc");
+ return cc;
+}
+
+#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
+extern int prot_virt_guest;
+
+static inline int is_prot_virt_guest(void)
+{
+ return prot_virt_guest;
+}
+
+static inline int share(unsigned long addr, u16 cmd)
+{
+ struct uv_cb_share uvcb = {
+ .header.cmd = cmd,
+ .header.len = sizeof(uvcb),
+ .paddr = addr
+ };
+
+ if (!is_prot_virt_guest())
+ return -ENOTSUPP;
+ /*
+ * Sharing is page wise, if we encounter addresses that are
+ * not page aligned, we assume something went wrong. If
+ * malloced structs are passed to this function, we could leak
+ * data to the hypervisor.
+ */
+ BUG_ON(addr & ~PAGE_MASK);
+
+ if (!uv_call(0, (u64)&uvcb))
+ return 0;
+ return -EINVAL;
+}
+
+/*
+ * Guest 2 request to the Ultravisor to make a page shared with the
+ * hypervisor for IO.
+ *
+ * @addr: Real or absolute address of the page to be shared
+ */
+static inline int uv_set_shared(unsigned long addr)
+{
+ return share(addr, UVC_CMD_SET_SHARED_ACCESS);
+}
+
+/*
+ * Guest 2 request to the Ultravisor to make a page unshared.
+ *
+ * @addr: Real or absolute address of the page to be unshared
+ */
+static inline int uv_remove_shared(unsigned long addr)
+{
+ return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS);
+}
+
+void uv_query_info(void);
+#else
+#define is_prot_virt_guest() 0
+static inline int uv_set_shared(unsigned long addr) { return 0; }
+static inline int uv_remove_shared(unsigned long addr) { return 0; }
+static inline void uv_query_info(void) {}
+#endif
+
+#endif /* _ASM_S390_UV_H */
diff --git a/arch/s390/include/asm/vmlinux.lds.h b/arch/s390/include/asm/vmlinux.lds.h
index 2d127f900352..cbe670a6861b 100644
--- a/arch/s390/include/asm/vmlinux.lds.h
+++ b/arch/s390/include/asm/vmlinux.lds.h
@@ -18,3 +18,16 @@
*(SORT_BY_ALIGNMENT(SORT_BY_NAME(.boot.data*))) \
__boot_data_end = .; \
}
+
+/*
+ * .boot.preserved.data is similar to .boot.data, but it is not part of the
+ * .init section and thus will be preserved for later use in the decompressed
+ * kernel.
+ */
+#define BOOT_DATA_PRESERVED \
+ . = ALIGN(PAGE_SIZE); \
+ .boot.preserved.data : { \
+ __boot_data_preserved_start = .; \
+ *(SORT_BY_ALIGNMENT(SORT_BY_NAME(.boot.preserved.data*))) \
+ __boot_data_preserved_end = .; \
+ }
diff --git a/arch/s390/include/uapi/asm/ipl.h b/arch/s390/include/uapi/asm/ipl.h
new file mode 100644
index 000000000000..fd32b1cd80d2
--- /dev/null
+++ b/arch/s390/include/uapi/asm/ipl.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_UAPI_IPL_H
+#define _ASM_S390_UAPI_IPL_H
+
+#include <linux/types.h>
+
+/* IPL Parameter List header */
+struct ipl_pl_hdr {
+ __u32 len;
+ __u8 flags;
+ __u8 reserved1[2];
+ __u8 version;
+} __packed;
+
+#define IPL_PL_FLAG_IPLPS 0x80
+#define IPL_PL_FLAG_SIPL 0x40
+#define IPL_PL_FLAG_IPLSR 0x20
+
+/* IPL Parameter Block header */
+struct ipl_pb_hdr {
+ __u32 len;
+ __u8 pbt;
+} __packed;
+
+/* IPL Parameter Block types */
+enum ipl_pbt {
+ IPL_PBT_FCP = 0,
+ IPL_PBT_SCP_DATA = 1,
+ IPL_PBT_CCW = 2,
+};
+
+/* IPL Parameter Block 0 with common fields */
+struct ipl_pb0_common {
+ __u32 len;
+ __u8 pbt;
+ __u8 flags;
+ __u8 reserved1[2];
+ __u8 loadparm[8];
+ __u8 reserved2[84];
+} __packed;
+
+#define IPL_PB0_FLAG_LOADPARM 0x80
+
+/* IPL Parameter Block 0 for FCP */
+struct ipl_pb0_fcp {
+ __u32 len;
+ __u8 pbt;
+ __u8 reserved1[3];
+ __u8 loadparm[8];
+ __u8 reserved2[304];
+ __u8 opt;
+ __u8 reserved3[3];
+ __u8 cssid;
+ __u8 reserved4[1];
+ __u16 devno;
+ __u8 reserved5[4];
+ __u64 wwpn;
+ __u64 lun;
+ __u32 bootprog;
+ __u8 reserved6[12];
+ __u64 br_lba;
+ __u32 scp_data_len;
+ __u8 reserved7[260];
+ __u8 scp_data[];
+} __packed;
+
+#define IPL_PB0_FCP_OPT_IPL 0x10
+#define IPL_PB0_FCP_OPT_DUMP 0x20
+
+/* IPL Parameter Block 0 for CCW */
+struct ipl_pb0_ccw {
+ __u32 len;
+ __u8 pbt;
+ __u8 flags;
+ __u8 reserved1[2];
+ __u8 loadparm[8];
+ __u8 reserved2[84];
+ __u16 reserved3 : 13;
+ __u8 ssid : 3;
+ __u16 devno;
+ __u8 vm_flags;
+ __u8 reserved4[3];
+ __u32 vm_parm_len;
+ __u8 nss_name[8];
+ __u8 vm_parm[64];
+ __u8 reserved5[8];
+} __packed;
+
+#define IPL_PB0_CCW_VM_FLAG_NSS 0x80
+#define IPL_PB0_CCW_VM_FLAG_VP 0x40
+
+/* IPL Parameter Block 1 for additional SCP data */
+struct ipl_pb1_scp_data {
+ __u32 len;
+ __u8 pbt;
+ __u8 scp_data[];
+} __packed;
+
+/* IPL Report List header */
+struct ipl_rl_hdr {
+ __u32 len;
+ __u8 flags;
+ __u8 reserved1[2];
+ __u8 version;
+ __u8 reserved2[8];
+} __packed;
+
+/* IPL Report Block header */
+struct ipl_rb_hdr {
+ __u32 len;
+ __u8 rbt;
+ __u8 reserved1[11];
+} __packed;
+
+/* IPL Report Block types */
+enum ipl_rbt {
+ IPL_RBT_CERTIFICATES = 1,
+ IPL_RBT_COMPONENTS = 2,
+};
+
+/* IPL Report Block for the certificate list */
+struct ipl_rb_certificate_entry {
+ __u64 addr;
+ __u64 len;
+} __packed;
+
+struct ipl_rb_certificates {
+ __u32 len;
+ __u8 rbt;
+ __u8 reserved1[11];
+ struct ipl_rb_certificate_entry entries[];
+} __packed;
+
+/* IPL Report Block for the component list */
+struct ipl_rb_component_entry {
+ __u64 addr;
+ __u64 len;
+ __u8 flags;
+ __u8 reserved1[5];
+ __u16 certificate_index;
+ __u8 reserved2[8];
+};
+
+#define IPL_RB_COMPONENT_FLAG_SIGNED 0x80
+#define IPL_RB_COMPONENT_FLAG_VERIFIED 0x40
+
+struct ipl_rb_components {
+ __u32 len;
+ __u8 rbt;
+ __u8 reserved1[11];
+ struct ipl_rb_component_entry entries[];
+} __packed;
+
+#endif
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index 16511d97e8dc..47104e5b47fd 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -152,7 +152,10 @@ struct kvm_s390_vm_cpu_subfunc {
__u8 pcc[16]; /* with MSA4 */
__u8 ppno[16]; /* with MSA5 */
__u8 kma[16]; /* with MSA8 */
- __u8 reserved[1808];
+ __u8 kdsa[16]; /* with MSA9 */
+ __u8 sortl[32]; /* with STFLE.150 */
+ __u8 dfltcc[32]; /* with STFLE.151 */
+ __u8 reserved[1728];
};
/* kvm attributes for crypto */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 8a62c7f72e1b..b0478d01a0c5 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -39,6 +39,7 @@ CFLAGS_smp.o := -Wno-nonnull
#
CFLAGS_stacktrace.o += -fno-optimize-sibling-calls
CFLAGS_dumpstack.o += -fno-optimize-sibling-calls
+CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls
#
# Pass UTS_MACHINE for user_regset definition
@@ -51,7 +52,7 @@ obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o early_nobss.o
obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
-obj-y += nospec-branch.o ipl_vmparm.o
+obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
extra-y += head64.o vmlinux.lds
@@ -77,6 +78,8 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o
obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o
+obj-$(CONFIG_IMA) += ima_arch.o
+
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf_common.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf.o perf_cpum_sf.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o
@@ -86,7 +89,7 @@ obj-$(CONFIG_TRACEPOINTS) += trace.o
# vdso
obj-y += vdso64/
-obj-$(CONFIG_COMPAT) += vdso32/
+obj-$(CONFIG_COMPAT_VDSO) += vdso32/
chkbss := head64.o early_nobss.o
include $(srctree)/arch/s390/scripts/Makefile.chkbss
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 164bec175628..41ac4ad21311 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -16,6 +16,7 @@
#include <asm/pgtable.h>
#include <asm/gmap.h>
#include <asm/nmi.h>
+#include <asm/stacktrace.h>
int main(void)
{
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index f268fca67e82..2f39ea57f358 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -28,6 +28,7 @@ ENTRY(s390_base_mcck_handler)
1: la %r1,4095
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
lpswe __LC_MCK_OLD_PSW
+ENDPROC(s390_base_mcck_handler)
.section .bss
.align 8
@@ -48,6 +49,7 @@ ENTRY(s390_base_ext_handler)
1: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC
ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
lpswe __LC_EXT_OLD_PSW
+ENDPROC(s390_base_ext_handler)
.section .bss
.align 8
@@ -68,6 +70,7 @@ ENTRY(s390_base_pgm_handler)
lmg %r0,%r15,__LC_SAVE_AREA_SYNC
lpswe __LC_PGM_OLD_PSW
1: lpswe disabled_wait_psw-0b(%r13)
+ENDPROC(s390_base_pgm_handler)
.align 8
disabled_wait_psw:
@@ -79,71 +82,3 @@ disabled_wait_psw:
s390_base_pgm_handler_fn:
.quad 0
.previous
-
-#
-# Calls diag 308 subcode 1 and continues execution
-#
-ENTRY(diag308_reset)
- larl %r4,.Lctlregs # Save control registers
- stctg %c0,%c15,0(%r4)
- lg %r2,0(%r4) # Disable lowcore protection
- nilh %r2,0xefff
- larl %r4,.Lctlreg0
- stg %r2,0(%r4)
- lctlg %c0,%c0,0(%r4)
- larl %r4,.Lfpctl # Floating point control register
- stfpc 0(%r4)
- larl %r4,.Lprefix # Save prefix register
- stpx 0(%r4)
- larl %r4,.Lprefix_zero # Set prefix register to 0
- spx 0(%r4)
- larl %r4,.Lcontinue_psw # Save PSW flags
- epsw %r2,%r3
- stm %r2,%r3,0(%r4)
- larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0
- lghi %r3,0
- lg %r4,0(%r4) # Save PSW
- sturg %r4,%r3 # Use sturg, because of large pages
- lghi %r1,1
- lghi %r0,0
- diag %r0,%r1,0x308
-.Lrestart_part2:
- lhi %r0,0 # Load r0 with zero
- lhi %r1,2 # Use mode 2 = ESAME (dump)
- sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to ESAME mode
- sam64 # Switch to 64 bit addressing mode
- larl %r4,.Lctlregs # Restore control registers
- lctlg %c0,%c15,0(%r4)
- larl %r4,.Lfpctl # Restore floating point ctl register
- lfpc 0(%r4)
- larl %r4,.Lprefix # Restore prefix register
- spx 0(%r4)
- larl %r4,.Lcontinue_psw # Restore PSW flags
- lpswe 0(%r4)
-.Lcontinue:
- BR_EX %r14
-.align 16
-.Lrestart_psw:
- .long 0x00080000,0x80000000 + .Lrestart_part2
-
- .section .data..nosave,"aw",@progbits
-.align 8
-.Lcontinue_psw:
- .quad 0,.Lcontinue
- .previous
-
- .section .bss
-.align 8
-.Lctlreg0:
- .quad 0
-.Lctlregs:
- .rept 16
- .quad 0
- .endr
-.Lfpctl:
- .long 0
-.Lprefix:
- .long 0
-.Lprefix_zero:
- .long 0
- .previous
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index 7edaa733a77f..e9dac9a24d3f 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -13,6 +13,7 @@
#include <linux/debugfs.h>
#include <asm/diag.h>
#include <asm/trace/diag.h>
+#include <asm/sections.h>
struct diag_stat {
unsigned int counter[NR_DIAG_STAT];
@@ -49,6 +50,9 @@ static const struct diag_desc diag_map[NR_DIAG_STAT] = {
[DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" },
};
+struct diag_ops __bootdata_preserved(diag_dma_ops);
+struct diag210 *__bootdata_preserved(__diag210_tmp_dma);
+
static int show_diag_stat(struct seq_file *m, void *v)
{
struct diag_stat *stat;
@@ -139,30 +143,10 @@ EXPORT_SYMBOL(diag_stat_inc_norecursion);
/*
* Diagnose 14: Input spool file manipulation
*/
-static inline int __diag14(unsigned long rx, unsigned long ry1,
- unsigned long subcode)
-{
- register unsigned long _ry1 asm("2") = ry1;
- register unsigned long _ry2 asm("3") = subcode;
- int rc = 0;
-
- asm volatile(
- " sam31\n"
- " diag %2,2,0x14\n"
- " sam64\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (rc), "+d" (_ry2)
- : "d" (rx), "d" (_ry1)
- : "cc");
-
- return rc;
-}
-
int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
{
diag_stat_inc(DIAG_STAT_X014);
- return __diag14(rx, ry1, subcode);
+ return diag_dma_ops.diag14(rx, ry1, subcode);
}
EXPORT_SYMBOL(diag14);
@@ -195,30 +179,17 @@ EXPORT_SYMBOL(diag204);
*/
int diag210(struct diag210 *addr)
{
- /*
- * diag 210 needs its data below the 2GB border, so we
- * use a static data area to be sure
- */
- static struct diag210 diag210_tmp;
static DEFINE_SPINLOCK(diag210_lock);
unsigned long flags;
int ccode;
spin_lock_irqsave(&diag210_lock, flags);
- diag210_tmp = *addr;
+ *__diag210_tmp_dma = *addr;
diag_stat_inc(DIAG_STAT_X210);
- asm volatile(
- " lhi %0,-1\n"
- " sam31\n"
- " diag %1,0,0x210\n"
- "0: ipm %0\n"
- " srl %0,28\n"
- "1: sam64\n"
- EX_TABLE(0b, 1b)
- : "=&d" (ccode) : "a" (&diag210_tmp) : "cc", "memory");
-
- *addr = diag210_tmp;
+ ccode = diag_dma_ops.diag210(__diag210_tmp_dma);
+
+ *addr = *__diag210_tmp_dma;
spin_unlock_irqrestore(&diag210_lock, flags);
return ccode;
@@ -243,27 +214,9 @@ EXPORT_SYMBOL(diag224);
/*
* Diagnose 26C: Access Certain System Information
*/
-static inline int __diag26c(void *req, void *resp, enum diag26c_sc subcode)
-{
- register unsigned long _req asm("2") = (addr_t) req;
- register unsigned long _resp asm("3") = (addr_t) resp;
- register unsigned long _subcode asm("4") = subcode;
- register unsigned long _rc asm("5") = -EOPNOTSUPP;
-
- asm volatile(
- " sam31\n"
- " diag %[rx],%[ry],0x26c\n"
- "0: sam64\n"
- EX_TABLE(0b,0b)
- : "+d" (_rc)
- : [rx] "d" (_req), "d" (_resp), [ry] "d" (_subcode)
- : "cc", "memory");
- return _rc;
-}
-
int diag26c(void *req, void *resp, enum diag26c_sc subcode)
{
diag_stat_inc(DIAG_STAT_X26C);
- return __diag26c(req, resp, subcode);
+ return diag_dma_ops.diag26c(req, resp, subcode);
}
EXPORT_SYMBOL(diag26c);
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index cb7f55bbe06e..9e87b68be21c 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -21,95 +21,124 @@
#include <asm/debug.h>
#include <asm/dis.h>
#include <asm/ipl.h>
+#include <asm/unwind.h>
-/*
- * For dump_trace we have tree different stack to consider:
- * - the panic stack which is used if the kernel stack has overflown
- * - the asynchronous interrupt stack (cpu related)
- * - the synchronous kernel stack (process related)
- * The stack trace can start at any of the three stacks and can potentially
- * touch all of them. The order is: panic stack, async stack, sync stack.
- */
-static unsigned long __no_sanitize_address
-__dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
- unsigned long low, unsigned long high)
+const char *stack_type_name(enum stack_type type)
{
- struct stack_frame *sf;
- struct pt_regs *regs;
-
- while (1) {
- if (sp < low || sp > high - sizeof(*sf))
- return sp;
- sf = (struct stack_frame *) sp;
- if (func(data, sf->gprs[8], 0))
- return sp;
- /* Follow the backchain. */
- while (1) {
- low = sp;
- sp = sf->back_chain;
- if (!sp)
- break;
- if (sp <= low || sp > high - sizeof(*sf))
- return sp;
- sf = (struct stack_frame *) sp;
- if (func(data, sf->gprs[8], 1))
- return sp;
- }
- /* Zero backchain detected, check for interrupt frame. */
- sp = (unsigned long) (sf + 1);
- if (sp <= low || sp > high - sizeof(*regs))
- return sp;
- regs = (struct pt_regs *) sp;
- if (!user_mode(regs)) {
- if (func(data, regs->psw.addr, 1))
- return sp;
- }
- low = sp;
- sp = regs->gprs[15];
+ switch (type) {
+ case STACK_TYPE_TASK:
+ return "task";
+ case STACK_TYPE_IRQ:
+ return "irq";
+ case STACK_TYPE_NODAT:
+ return "nodat";
+ case STACK_TYPE_RESTART:
+ return "restart";
+ default:
+ return "unknown";
}
}
-void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
- unsigned long sp)
+static inline bool in_stack(unsigned long sp, struct stack_info *info,
+ enum stack_type type, unsigned long low,
+ unsigned long high)
+{
+ if (sp < low || sp >= high)
+ return false;
+ info->type = type;
+ info->begin = low;
+ info->end = high;
+ return true;
+}
+
+static bool in_task_stack(unsigned long sp, struct task_struct *task,
+ struct stack_info *info)
+{
+ unsigned long stack;
+
+ stack = (unsigned long) task_stack_page(task);
+ return in_stack(sp, info, STACK_TYPE_TASK, stack, stack + THREAD_SIZE);
+}
+
+static bool in_irq_stack(unsigned long sp, struct stack_info *info)
{
- unsigned long frame_size;
+ unsigned long frame_size, top;
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
-#ifdef CONFIG_CHECK_STACK
- sp = __dump_trace(func, data, sp,
- S390_lowcore.nodat_stack + frame_size - THREAD_SIZE,
- S390_lowcore.nodat_stack + frame_size);
-#endif
- sp = __dump_trace(func, data, sp,
- S390_lowcore.async_stack + frame_size - THREAD_SIZE,
- S390_lowcore.async_stack + frame_size);
- task = task ?: current;
- __dump_trace(func, data, sp,
- (unsigned long)task_stack_page(task),
- (unsigned long)task_stack_page(task) + THREAD_SIZE);
+ top = S390_lowcore.async_stack + frame_size;
+ return in_stack(sp, info, STACK_TYPE_IRQ, top - THREAD_SIZE, top);
+}
+
+static bool in_nodat_stack(unsigned long sp, struct stack_info *info)
+{
+ unsigned long frame_size, top;
+
+ frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
+ top = S390_lowcore.nodat_stack + frame_size;
+ return in_stack(sp, info, STACK_TYPE_NODAT, top - THREAD_SIZE, top);
}
-EXPORT_SYMBOL_GPL(dump_trace);
-static int show_address(void *data, unsigned long address, int reliable)
+static bool in_restart_stack(unsigned long sp, struct stack_info *info)
{
- if (reliable)
- printk(" [<%016lx>] %pSR \n", address, (void *)address);
- else
- printk("([<%016lx>] %pSR)\n", address, (void *)address);
+ unsigned long frame_size, top;
+
+ frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
+ top = S390_lowcore.restart_stack + frame_size;
+ return in_stack(sp, info, STACK_TYPE_RESTART, top - THREAD_SIZE, top);
+}
+
+int get_stack_info(unsigned long sp, struct task_struct *task,
+ struct stack_info *info, unsigned long *visit_mask)
+{
+ if (!sp)
+ goto unknown;
+
+ task = task ? : current;
+
+ /* Check per-task stack */
+ if (in_task_stack(sp, task, info))
+ goto recursion_check;
+
+ if (task != current)
+ goto unknown;
+
+ /* Check per-cpu stacks */
+ if (!in_irq_stack(sp, info) &&
+ !in_nodat_stack(sp, info) &&
+ !in_restart_stack(sp, info))
+ goto unknown;
+
+recursion_check:
+ /*
+ * Make sure we don't iterate through any given stack more than once.
+ * If it comes up a second time then there's something wrong going on:
+ * just break out and report an unknown stack type.
+ */
+ if (*visit_mask & (1UL << info->type)) {
+ printk_deferred_once(KERN_WARNING
+ "WARNING: stack recursion on stack type %d\n",
+ info->type);
+ goto unknown;
+ }
+ *visit_mask |= 1UL << info->type;
return 0;
+unknown:
+ info->type = STACK_TYPE_UNKNOWN;
+ return -EINVAL;
}
void show_stack(struct task_struct *task, unsigned long *stack)
{
- unsigned long sp = (unsigned long) stack;
+ struct unwind_state state;
- if (!sp)
- sp = task ? task->thread.ksp : current_stack_pointer();
printk("Call Trace:\n");
- dump_trace(show_address, NULL, task, sp);
if (!task)
task = current;
- debug_show_held_locks(task);
+ unwind_for_each_frame(&state, task, NULL, (unsigned long) stack)
+ printk(state.reliable ? " [<%016lx>] %pSR \n" :
+ "([<%016lx>] %pSR)\n",
+ state.ip, (void *) state.ip);
+ debug_show_held_locks(task ? : current);
}
static void show_last_breaking_event(struct pt_regs *regs)
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index d6edf45f93b9..629f173f60cd 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -30,6 +30,7 @@
#include <asm/sclp.h>
#include <asm/facility.h>
#include <asm/boot_data.h>
+#include <asm/pci_insn.h>
#include "entry.h"
/*
@@ -138,9 +139,9 @@ static void early_pgm_check_handler(void)
unsigned long addr;
addr = S390_lowcore.program_old_psw.addr;
- fixup = search_exception_tables(addr);
+ fixup = s390_search_extables(addr);
if (!fixup)
- disabled_wait(0);
+ disabled_wait();
/* Disable low address protection before storing into lowcore. */
__ctl_store(cr0, 0, 0);
cr0_new = cr0 & ~(1UL << 28);
@@ -235,6 +236,7 @@ static __init void detect_machine_facilities(void)
clock_comparator_max = -1ULL >> 1;
__ctl_set_bit(0, 53);
}
+ enable_mio_ctl();
}
static inline void save_vector_registers(void)
@@ -296,7 +298,7 @@ static void __init check_image_bootable(void)
sclp_early_printk("Linux kernel boot failure: An attempt to boot a vmlinux ELF image failed.\n");
sclp_early_printk("This image does not contain all parts necessary for starting up. Use\n");
sclp_early_printk("bzImage or arch/s390/boot/compressed/vmlinux instead.\n");
- disabled_wait(0xbadb007);
+ disabled_wait();
}
void __init startup_init(void)
@@ -309,7 +311,6 @@ void __init startup_init(void)
setup_facility_list();
detect_machine_type();
setup_arch_string();
- ipl_store_parameters();
setup_boot_command_line();
detect_diag9c();
detect_diag44();
diff --git a/arch/s390/kernel/early_nobss.c b/arch/s390/kernel/early_nobss.c
index 8d73f7fae16e..52a3ef959341 100644
--- a/arch/s390/kernel/early_nobss.c
+++ b/arch/s390/kernel/early_nobss.c
@@ -25,7 +25,7 @@ static void __init reset_tod_clock(void)
return;
/* TOD clock not running. Set the clock to Unix Epoch. */
if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0)
- disabled_wait(0);
+ disabled_wait();
memset(tod_clock_base, 0, 16);
*(__u64 *) &tod_clock_base[1] = TOD_UNIX_EPOCH;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 583d65ef5007..3f4d272577d3 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -224,6 +224,7 @@ ENTRY(__bpon)
.globl __bpon
BPON
BR_EX %r14
+ENDPROC(__bpon)
/*
* Scheduler resume function, called by switch_to
@@ -248,6 +249,7 @@ ENTRY(__switch_to)
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
BR_EX %r14
+ENDPROC(__switch_to)
.L__critical_start:
@@ -324,6 +326,7 @@ sie_exit:
EX_TABLE(.Lrewind_pad4,.Lsie_fault)
EX_TABLE(.Lrewind_pad2,.Lsie_fault)
EX_TABLE(sie_exit,.Lsie_fault)
+ENDPROC(sie64a)
EXPORT_SYMBOL(sie64a)
EXPORT_SYMBOL(sie_exit)
#endif
@@ -358,19 +361,19 @@ ENTRY(system_call)
# load address of system call table
lg %r10,__THREAD_sysc_table(%r13,%r12)
llgh %r8,__PT_INT_CODE+2(%r11)
- slag %r8,%r8,2 # shift and test for svc 0
+ slag %r8,%r8,3 # shift and test for svc 0
jnz .Lsysc_nr_ok
# svc 0: system call number in %r1
llgfr %r1,%r1 # clear high word in r1
cghi %r1,NR_syscalls
jnl .Lsysc_nr_ok
sth %r1,__PT_INT_CODE+2(%r11)
- slag %r8,%r1,2
+ slag %r8,%r1,3
.Lsysc_nr_ok:
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
stg %r2,__PT_ORIG_GPR2(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
- lgf %r9,0(%r8,%r10) # get system call add.
+ lg %r9,0(%r8,%r10) # get system call add.
TSTMSK __TI_flags(%r12),_TIF_TRACE
jnz .Lsysc_tracesys
BASR_EX %r14,%r9 # call sys_xxxx
@@ -556,8 +559,8 @@ ENTRY(system_call)
lghi %r0,NR_syscalls
clgr %r0,%r2
jnh .Lsysc_tracenogo
- sllg %r8,%r2,2
- lgf %r9,0(%r8,%r10)
+ sllg %r8,%r2,3
+ lg %r9,0(%r8,%r10)
.Lsysc_tracego:
lmg %r3,%r7,__PT_R3(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
@@ -570,6 +573,7 @@ ENTRY(system_call)
lgr %r2,%r11 # pass pointer to pt_regs
larl %r14,.Lsysc_return
jg do_syscall_trace_exit
+ENDPROC(system_call)
#
# a new process exits the kernel with ret_from_fork
@@ -584,10 +588,16 @@ ENTRY(ret_from_fork)
jne .Lsysc_tracenogo
# it's a kernel thread
lmg %r9,%r10,__PT_R9(%r11) # load gprs
+ la %r2,0(%r10)
+ BASR_EX %r14,%r9
+ j .Lsysc_tracenogo
+ENDPROC(ret_from_fork)
+
ENTRY(kernel_thread_starter)
la %r2,0(%r10)
BASR_EX %r14,%r9
j .Lsysc_tracenogo
+ENDPROC(kernel_thread_starter)
/*
* Program check handler routine
@@ -665,9 +675,9 @@ ENTRY(pgm_check_handler)
larl %r1,pgm_check_table
llgh %r10,__PT_INT_CODE+2(%r11)
nill %r10,0x007f
- sll %r10,2
+ sll %r10,3
je .Lpgm_return
- lgf %r9,0(%r10,%r1) # load address of handler routine
+ lg %r9,0(%r10,%r1) # load address of handler routine
lgr %r2,%r11 # pass pointer to pt_regs
BASR_EX %r14,%r9 # branch to interrupt-handler
.Lpgm_return:
@@ -698,6 +708,7 @@ ENTRY(pgm_check_handler)
stg %r14,__LC_RETURN_PSW+8
lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
+ENDPROC(pgm_check_handler)
/*
* IO interrupt handler routine
@@ -926,6 +937,7 @@ ENTRY(io_int_handler)
ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
TRACE_IRQS_OFF
j .Lio_return
+ENDPROC(io_int_handler)
/*
* External interrupt handler routine
@@ -965,6 +977,7 @@ ENTRY(ext_int_handler)
lghi %r3,EXT_INTERRUPT
brasl %r14,do_IRQ
j .Lio_return
+ENDPROC(ext_int_handler)
/*
* Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
@@ -989,6 +1002,7 @@ ENTRY(psw_idle)
lpswe __SF_EMPTY(%r15)
BR_EX %r14
.Lpsw_idle_end:
+ENDPROC(psw_idle)
/*
* Store floating-point controls and floating-point or vector register
@@ -1031,6 +1045,7 @@ ENTRY(save_fpu_regs)
.Lsave_fpu_regs_exit:
BR_EX %r14
.Lsave_fpu_regs_end:
+ENDPROC(save_fpu_regs)
EXPORT_SYMBOL(save_fpu_regs)
/*
@@ -1077,6 +1092,7 @@ load_fpu_regs:
.Lload_fpu_regs_exit:
BR_EX %r14
.Lload_fpu_regs_end:
+ENDPROC(load_fpu_regs)
.L__critical_end:
@@ -1206,6 +1222,7 @@ ENTRY(mcck_int_handler)
lg %r15,__LC_NODAT_STACK
la %r11,STACK_FRAME_OVERHEAD(%r15)
j .Lmcck_skip
+ENDPROC(mcck_int_handler)
#
# PSW restart interrupt handler
@@ -1232,6 +1249,7 @@ ENTRY(restart_int_handler)
2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
brc 2,2b
3: j 3b
+ENDPROC(restart_int_handler)
.section .kprobes.text, "ax"
@@ -1241,7 +1259,7 @@ ENTRY(restart_int_handler)
* No need to properly save the registers, we are going to panic anyway.
* Setup a pt_regs so that show_trace can provide a good call trace.
*/
-stack_overflow:
+ENTRY(stack_overflow)
lg %r15,__LC_NODAT_STACK # change to panic stack
la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
@@ -1251,9 +1269,10 @@ stack_overflow:
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
jg kernel_stack_overflow
+ENDPROC(stack_overflow)
#endif
-cleanup_critical:
+ENTRY(cleanup_critical)
#if IS_ENABLED(CONFIG_KVM)
clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap
jl 0f
@@ -1289,6 +1308,7 @@ cleanup_critical:
clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
jl .Lcleanup_load_fpu_regs
0: BR_EX %r14,%r11
+ENDPROC(cleanup_critical)
.align 8
.Lcleanup_table:
@@ -1512,7 +1532,7 @@ cleanup_critical:
.quad .Lsie_skip - .Lsie_entry
#endif
.section .rodata, "a"
-#define SYSCALL(esame,emu) .long __s390x_ ## esame
+#define SYSCALL(esame,emu) .quad __s390x_ ## esame
.globl sys_call_table
sys_call_table:
#include "asm/syscall_table.h"
@@ -1520,7 +1540,7 @@ sys_call_table:
#ifdef CONFIG_COMPAT
-#define SYSCALL(esame,emu) .long __s390_ ## emu
+#define SYSCALL(esame,emu) .quad __s390_ ## emu
.globl sys_call_table_emu
sys_call_table_emu:
#include "asm/syscall_table.h"
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index c3816ae108b0..20420c2b8a14 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -65,7 +65,7 @@ int setup_profiling_timer(unsigned int multiplier);
void __init time_init(void);
int pfn_is_nosave(unsigned long);
void s390_early_resume(void);
-unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip);
+unsigned long prepare_ftrace_return(unsigned long parent, unsigned long sp, unsigned long ip);
struct s390_mmap_arg_struct;
struct fadvise64_64_args;
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 39b13d71a8fe..1bb85f60c0dd 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -201,17 +201,18 @@ device_initcall(ftrace_plt_init);
* Hook the return address and push it in the stack of return addresses
* in current thread info.
*/
-unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
+unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp,
+ unsigned long ip)
{
if (unlikely(ftrace_graph_is_dead()))
goto out;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
goto out;
ip -= MCOUNT_INSN_SIZE;
- if (!function_graph_enter(parent, ip, 0, NULL))
- parent = (unsigned long) return_to_handler;
+ if (!function_graph_enter(ra, ip, 0, (void *) sp))
+ ra = (unsigned long) return_to_handler;
out:
- return parent;
+ return ra;
}
NOKPROBE_SYMBOL(prepare_ftrace_return);
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 56491e636eab..5aea1a527443 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -26,7 +26,6 @@ ENTRY(startup_continue)
0: larl %r1,tod_clock_base
mvc 0(16,%r1),__LC_BOOT_CLOCK
larl %r13,.LPG1 # get base
- lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
larl %r0,boot_vdso_data
stg %r0,__LC_VDSO_PER_CPU
#
@@ -61,22 +60,6 @@ ENTRY(startup_continue)
.align 16
.LPG1:
-.Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space
- .quad 0 # cr1: primary space segment table
- .quad .Lduct # cr2: dispatchable unit control table
- .quad 0 # cr3: instruction authorization
- .quad 0xffff # cr4: instruction authorization
- .quad .Lduct # cr5: primary-aste origin
- .quad 0 # cr6: I/O interrupts
- .quad 0 # cr7: secondary space segment table
- .quad 0 # cr8: access registers translation
- .quad 0 # cr9: tracing off
- .quad 0 # cr10: tracing off
- .quad 0 # cr11: tracing off
- .quad 0 # cr12: tracing off
- .quad 0 # cr13: home space segment table
- .quad 0xc0000000 # cr14: machine check handling off
- .quad .Llinkage_stack # cr15: linkage stack operations
.Lpcmsk:.quad 0x0000000180000000
.L4malign:.quad 0xffffffffffc00000
.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
@@ -84,14 +67,5 @@ ENTRY(startup_continue)
.Lparmaddr:
.quad PARMAREA
.align 64
-.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0
- .long 0,0,0,0,0,0,0,0
-.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0
- .align 128
-.Lduald:.rept 8
- .long 0x80000000,0,0,0 # invalid access-list entries
- .endr
-.Llinkage_stack:
- .long 0,0,0x89000000,0,0,0,0x8a000000,0
.Ldw: .quad 0x0002000180000000,0x0000000000000000
.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
diff --git a/arch/s390/kernel/ima_arch.c b/arch/s390/kernel/ima_arch.c
new file mode 100644
index 000000000000..f3c3e6e1c5d3
--- /dev/null
+++ b/arch/s390/kernel/ima_arch.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/ima.h>
+#include <asm/boot_data.h>
+
+bool arch_ima_get_secureboot(void)
+{
+ return ipl_secure_flag;
+}
+
+const char * const *arch_get_ima_policy(void)
+{
+ return NULL;
+}
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 18a5d6317acc..d836af3ccc38 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -31,6 +31,7 @@
#include <asm/os_info.h>
#include <asm/sections.h>
#include <asm/boot_data.h>
+#include <asm/uv.h>
#include "entry.h"
#define IPL_PARM_BLOCK_VERSION 0
@@ -119,11 +120,15 @@ static char *dump_type_str(enum dump_type type)
}
}
-struct ipl_parameter_block __bootdata(early_ipl_block);
-int __bootdata(early_ipl_block_valid);
+int __bootdata_preserved(ipl_block_valid);
+struct ipl_parameter_block __bootdata_preserved(ipl_block);
+int __bootdata_preserved(ipl_secure_flag);
-static int ipl_block_valid;
-static struct ipl_parameter_block ipl_block;
+unsigned long __bootdata_preserved(ipl_cert_list_addr);
+unsigned long __bootdata_preserved(ipl_cert_list_size);
+
+unsigned long __bootdata(early_ipl_comp_list_addr);
+unsigned long __bootdata(early_ipl_comp_list_size);
static int reipl_capabilities = IPL_TYPE_UNKNOWN;
@@ -246,11 +251,11 @@ static __init enum ipl_type get_ipl_type(void)
if (!ipl_block_valid)
return IPL_TYPE_UNKNOWN;
- switch (ipl_block.hdr.pbt) {
- case DIAG308_IPL_TYPE_CCW:
+ switch (ipl_block.pb0_hdr.pbt) {
+ case IPL_PBT_CCW:
return IPL_TYPE_CCW;
- case DIAG308_IPL_TYPE_FCP:
- if (ipl_block.ipl_info.fcp.opt == DIAG308_IPL_OPT_DUMP)
+ case IPL_PBT_FCP:
+ if (ipl_block.fcp.opt == IPL_PB0_FCP_OPT_DUMP)
return IPL_TYPE_FCP_DUMP;
else
return IPL_TYPE_FCP;
@@ -269,12 +274,35 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
+static ssize_t ipl_secure_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ return sprintf(page, "%i\n", !!ipl_secure_flag);
+}
+
+static struct kobj_attribute sys_ipl_secure_attr =
+ __ATTR(secure, 0444, ipl_secure_show, NULL);
+
+static ssize_t ipl_has_secure_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ if (MACHINE_IS_LPAR)
+ return sprintf(page, "%i\n", !!sclp.has_sipl);
+ else if (MACHINE_IS_VM)
+ return sprintf(page, "%i\n", !!sclp.has_sipl_g2);
+ else
+ return sprintf(page, "%i\n", 0);
+}
+
+static struct kobj_attribute sys_ipl_has_secure_attr =
+ __ATTR(has_secure, 0444, ipl_has_secure_show, NULL);
+
static ssize_t ipl_vm_parm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
char parm[DIAG308_VMPARM_SIZE + 1] = {};
- if (ipl_block_valid && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW))
+ if (ipl_block_valid && (ipl_block.pb0_hdr.pbt == IPL_PBT_CCW))
ipl_block_get_ascii_vmparm(parm, sizeof(parm), &ipl_block);
return sprintf(page, "%s\n", parm);
}
@@ -287,12 +315,11 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj,
{
switch (ipl_info.type) {
case IPL_TYPE_CCW:
- return sprintf(page, "0.%x.%04x\n", ipl_block.ipl_info.ccw.ssid,
- ipl_block.ipl_info.ccw.devno);
+ return sprintf(page, "0.%x.%04x\n", ipl_block.ccw.ssid,
+ ipl_block.ccw.devno);
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
- return sprintf(page, "0.0.%04x\n",
- ipl_block.ipl_info.fcp.devno);
+ return sprintf(page, "0.0.%04x\n", ipl_block.fcp.devno);
default:
return 0;
}
@@ -316,8 +343,8 @@ static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
- unsigned int size = ipl_block.ipl_info.fcp.scp_data_len;
- void *scp_data = &ipl_block.ipl_info.fcp.scp_data;
+ unsigned int size = ipl_block.fcp.scp_data_len;
+ void *scp_data = &ipl_block.fcp.scp_data;
return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
@@ -333,13 +360,13 @@ static struct bin_attribute *ipl_fcp_bin_attrs[] = {
/* FCP ipl device attributes */
DEFINE_IPL_ATTR_RO(ipl_fcp, wwpn, "0x%016llx\n",
- (unsigned long long)ipl_block.ipl_info.fcp.wwpn);
+ (unsigned long long)ipl_block.fcp.wwpn);
DEFINE_IPL_ATTR_RO(ipl_fcp, lun, "0x%016llx\n",
- (unsigned long long)ipl_block.ipl_info.fcp.lun);
+ (unsigned long long)ipl_block.fcp.lun);
DEFINE_IPL_ATTR_RO(ipl_fcp, bootprog, "%lld\n",
- (unsigned long long)ipl_block.ipl_info.fcp.bootprog);
+ (unsigned long long)ipl_block.fcp.bootprog);
DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n",
- (unsigned long long)ipl_block.ipl_info.fcp.br_lba);
+ (unsigned long long)ipl_block.fcp.br_lba);
static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
@@ -365,6 +392,8 @@ static struct attribute *ipl_fcp_attrs[] = {
&sys_ipl_fcp_bootprog_attr.attr,
&sys_ipl_fcp_br_lba_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
+ &sys_ipl_secure_attr.attr,
+ &sys_ipl_has_secure_attr.attr,
NULL,
};
@@ -380,6 +409,8 @@ static struct attribute *ipl_ccw_attrs_vm[] = {
&sys_ipl_device_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
&sys_ipl_vm_parm_attr.attr,
+ &sys_ipl_secure_attr.attr,
+ &sys_ipl_has_secure_attr.attr,
NULL,
};
@@ -387,6 +418,8 @@ static struct attribute *ipl_ccw_attrs_lpar[] = {
&sys_ipl_type_attr.attr,
&sys_ipl_device_attr.attr,
&sys_ipl_ccw_loadparm_attr.attr,
+ &sys_ipl_secure_attr.attr,
+ &sys_ipl_has_secure_attr.attr,
NULL,
};
@@ -495,14 +528,14 @@ static ssize_t reipl_generic_vmparm_store(struct ipl_parameter_block *ipb,
if (!(isalnum(buf[i]) || isascii(buf[i]) || isprint(buf[i])))
return -EINVAL;
- memset(ipb->ipl_info.ccw.vm_parm, 0, DIAG308_VMPARM_SIZE);
- ipb->ipl_info.ccw.vm_parm_len = ip_len;
+ memset(ipb->ccw.vm_parm, 0, DIAG308_VMPARM_SIZE);
+ ipb->ccw.vm_parm_len = ip_len;
if (ip_len > 0) {
- ipb->ipl_info.ccw.vm_flags |= DIAG308_VM_FLAGS_VP_VALID;
- memcpy(ipb->ipl_info.ccw.vm_parm, buf, ip_len);
- ASCEBC(ipb->ipl_info.ccw.vm_parm, ip_len);
+ ipb->ccw.vm_flags |= IPL_PB0_CCW_VM_FLAG_VP;
+ memcpy(ipb->ccw.vm_parm, buf, ip_len);
+ ASCEBC(ipb->ccw.vm_parm, ip_len);
} else {
- ipb->ipl_info.ccw.vm_flags &= ~DIAG308_VM_FLAGS_VP_VALID;
+ ipb->ccw.vm_flags &= ~IPL_PB0_CCW_VM_FLAG_VP;
}
return len;
@@ -549,8 +582,8 @@ static ssize_t reipl_fcp_scpdata_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
- size_t size = reipl_block_fcp->ipl_info.fcp.scp_data_len;
- void *scp_data = reipl_block_fcp->ipl_info.fcp.scp_data;
+ size_t size = reipl_block_fcp->fcp.scp_data_len;
+ void *scp_data = reipl_block_fcp->fcp.scp_data;
return memory_read_from_buffer(buf, count, &off, scp_data, size);
}
@@ -566,17 +599,17 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
if (off)
return -EINVAL;
- memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf, count);
+ memcpy(reipl_block_fcp->fcp.scp_data, buf, count);
if (scpdata_len % 8) {
padding = 8 - (scpdata_len % 8);
- memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len,
+ memset(reipl_block_fcp->fcp.scp_data + scpdata_len,
0, padding);
scpdata_len += padding;
}
- reipl_block_fcp->ipl_info.fcp.scp_data_len = scpdata_len;
- reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN + scpdata_len;
- reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN + scpdata_len;
+ reipl_block_fcp->hdr.len = IPL_BP_FCP_LEN + scpdata_len;
+ reipl_block_fcp->fcp.len = IPL_BP0_FCP_LEN + scpdata_len;
+ reipl_block_fcp->fcp.scp_data_len = scpdata_len;
return count;
}
@@ -590,20 +623,20 @@ static struct bin_attribute *reipl_fcp_bin_attrs[] = {
};
DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n",
- reipl_block_fcp->ipl_info.fcp.wwpn);
+ reipl_block_fcp->fcp.wwpn);
DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%llx\n",
- reipl_block_fcp->ipl_info.fcp.lun);
+ reipl_block_fcp->fcp.lun);
DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n",
- reipl_block_fcp->ipl_info.fcp.bootprog);
+ reipl_block_fcp->fcp.bootprog);
DEFINE_IPL_ATTR_RW(reipl_fcp, br_lba, "%lld\n", "%lld\n",
- reipl_block_fcp->ipl_info.fcp.br_lba);
+ reipl_block_fcp->fcp.br_lba);
DEFINE_IPL_ATTR_RW(reipl_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
- reipl_block_fcp->ipl_info.fcp.devno);
+ reipl_block_fcp->fcp.devno);
static void reipl_get_ascii_loadparm(char *loadparm,
struct ipl_parameter_block *ibp)
{
- memcpy(loadparm, ibp->hdr.loadparm, LOADPARM_LEN);
+ memcpy(loadparm, ibp->common.loadparm, LOADPARM_LEN);
EBCASC(loadparm, LOADPARM_LEN);
loadparm[LOADPARM_LEN] = 0;
strim(loadparm);
@@ -638,11 +671,11 @@ static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
return -EINVAL;
}
/* initialize loadparm with blanks */
- memset(ipb->hdr.loadparm, ' ', LOADPARM_LEN);
+ memset(ipb->common.loadparm, ' ', LOADPARM_LEN);
/* copy and convert to ebcdic */
- memcpy(ipb->hdr.loadparm, buf, lp_len);
- ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN);
- ipb->hdr.flags |= DIAG308_FLAGS_LP_VALID;
+ memcpy(ipb->common.loadparm, buf, lp_len);
+ ASCEBC(ipb->common.loadparm, LOADPARM_LEN);
+ ipb->common.flags |= IPL_PB0_FLAG_LOADPARM;
return len;
}
@@ -680,7 +713,7 @@ static struct attribute_group reipl_fcp_attr_group = {
};
/* CCW reipl device attributes */
-DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ipl_info.ccw);
+DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ccw);
/* NSS wrapper */
static ssize_t reipl_nss_loadparm_show(struct kobject *kobj,
@@ -742,7 +775,7 @@ static struct attribute_group reipl_ccw_attr_group_lpar = {
static void reipl_get_ascii_nss_name(char *dst,
struct ipl_parameter_block *ipb)
{
- memcpy(dst, ipb->ipl_info.ccw.nss_name, NSS_NAME_SIZE);
+ memcpy(dst, ipb->ccw.nss_name, NSS_NAME_SIZE);
EBCASC(dst, NSS_NAME_SIZE);
dst[NSS_NAME_SIZE] = 0;
}
@@ -770,16 +803,14 @@ static ssize_t reipl_nss_name_store(struct kobject *kobj,
if (nss_len > NSS_NAME_SIZE)
return -EINVAL;
- memset(reipl_block_nss->ipl_info.ccw.nss_name, 0x40, NSS_NAME_SIZE);
+ memset(reipl_block_nss->ccw.nss_name, 0x40, NSS_NAME_SIZE);
if (nss_len > 0) {
- reipl_block_nss->ipl_info.ccw.vm_flags |=
- DIAG308_VM_FLAGS_NSS_VALID;
- memcpy(reipl_block_nss->ipl_info.ccw.nss_name, buf, nss_len);
- ASCEBC(reipl_block_nss->ipl_info.ccw.nss_name, nss_len);
- EBC_TOUPPER(reipl_block_nss->ipl_info.ccw.nss_name, nss_len);
+ reipl_block_nss->ccw.vm_flags |= IPL_PB0_CCW_VM_FLAG_NSS;
+ memcpy(reipl_block_nss->ccw.nss_name, buf, nss_len);
+ ASCEBC(reipl_block_nss->ccw.nss_name, nss_len);
+ EBC_TOUPPER(reipl_block_nss->ccw.nss_name, nss_len);
} else {
- reipl_block_nss->ipl_info.ccw.vm_flags &=
- ~DIAG308_VM_FLAGS_NSS_VALID;
+ reipl_block_nss->ccw.vm_flags &= ~IPL_PB0_CCW_VM_FLAG_NSS;
}
return len;
@@ -866,15 +897,21 @@ static void __reipl_run(void *unused)
{
switch (reipl_type) {
case IPL_TYPE_CCW:
+ uv_set_shared(__pa(reipl_block_ccw));
diag308(DIAG308_SET, reipl_block_ccw);
+ uv_remove_shared(__pa(reipl_block_ccw));
diag308(DIAG308_LOAD_CLEAR, NULL);
break;
case IPL_TYPE_FCP:
+ uv_set_shared(__pa(reipl_block_fcp));
diag308(DIAG308_SET, reipl_block_fcp);
+ uv_remove_shared(__pa(reipl_block_fcp));
diag308(DIAG308_LOAD_CLEAR, NULL);
break;
case IPL_TYPE_NSS:
+ uv_set_shared(__pa(reipl_block_nss));
diag308(DIAG308_SET, reipl_block_nss);
+ uv_remove_shared(__pa(reipl_block_nss));
diag308(DIAG308_LOAD_CLEAR, NULL);
break;
case IPL_TYPE_UNKNOWN:
@@ -883,7 +920,7 @@ static void __reipl_run(void *unused)
case IPL_TYPE_FCP_DUMP:
break;
}
- disabled_wait((unsigned long) __builtin_return_address(0));
+ disabled_wait();
}
static void reipl_run(struct shutdown_trigger *trigger)
@@ -893,10 +930,10 @@ static void reipl_run(struct shutdown_trigger *trigger)
static void reipl_block_ccw_init(struct ipl_parameter_block *ipb)
{
- ipb->hdr.len = IPL_PARM_BLK_CCW_LEN;
+ ipb->hdr.len = IPL_BP_CCW_LEN;
ipb->hdr.version = IPL_PARM_BLOCK_VERSION;
- ipb->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
- ipb->hdr.pbt = DIAG308_IPL_TYPE_CCW;
+ ipb->pb0_hdr.len = IPL_BP0_CCW_LEN;
+ ipb->pb0_hdr.pbt = IPL_PBT_CCW;
}
static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb)
@@ -904,21 +941,20 @@ static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb)
/* LOADPARM */
/* check if read scp info worked and set loadparm */
if (sclp_ipl_info.is_valid)
- memcpy(ipb->hdr.loadparm, &sclp_ipl_info.loadparm, LOADPARM_LEN);
+ memcpy(ipb->ccw.loadparm, &sclp_ipl_info.loadparm, LOADPARM_LEN);
else
/* read scp info failed: set empty loadparm (EBCDIC blanks) */
- memset(ipb->hdr.loadparm, 0x40, LOADPARM_LEN);
- ipb->hdr.flags = DIAG308_FLAGS_LP_VALID;
+ memset(ipb->ccw.loadparm, 0x40, LOADPARM_LEN);
+ ipb->ccw.flags = IPL_PB0_FLAG_LOADPARM;
/* VM PARM */
if (MACHINE_IS_VM && ipl_block_valid &&
- (ipl_block.ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID)) {
+ (ipl_block.ccw.vm_flags & IPL_PB0_CCW_VM_FLAG_VP)) {
- ipb->ipl_info.ccw.vm_flags |= DIAG308_VM_FLAGS_VP_VALID;
- ipb->ipl_info.ccw.vm_parm_len =
- ipl_block.ipl_info.ccw.vm_parm_len;
- memcpy(ipb->ipl_info.ccw.vm_parm,
- ipl_block.ipl_info.ccw.vm_parm, DIAG308_VMPARM_SIZE);
+ ipb->ccw.vm_flags |= IPL_PB0_CCW_VM_FLAG_VP;
+ ipb->ccw.vm_parm_len = ipl_block.ccw.vm_parm_len;
+ memcpy(ipb->ccw.vm_parm,
+ ipl_block.ccw.vm_parm, DIAG308_VMPARM_SIZE);
}
}
@@ -958,8 +994,8 @@ static int __init reipl_ccw_init(void)
reipl_block_ccw_init(reipl_block_ccw);
if (ipl_info.type == IPL_TYPE_CCW) {
- reipl_block_ccw->ipl_info.ccw.ssid = ipl_block.ipl_info.ccw.ssid;
- reipl_block_ccw->ipl_info.ccw.devno = ipl_block.ipl_info.ccw.devno;
+ reipl_block_ccw->ccw.ssid = ipl_block.ccw.ssid;
+ reipl_block_ccw->ccw.devno = ipl_block.ccw.devno;
reipl_block_ccw_fill_parms(reipl_block_ccw);
}
@@ -997,14 +1033,14 @@ static int __init reipl_fcp_init(void)
* is invalid in the SCSI IPL parameter block, so take it
* always from sclp_ipl_info.
*/
- memcpy(reipl_block_fcp->hdr.loadparm, sclp_ipl_info.loadparm,
+ memcpy(reipl_block_fcp->fcp.loadparm, sclp_ipl_info.loadparm,
LOADPARM_LEN);
} else {
- reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
+ reipl_block_fcp->hdr.len = IPL_BP_FCP_LEN;
reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
- reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN;
- reipl_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP;
- reipl_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_IPL;
+ reipl_block_fcp->fcp.len = IPL_BP0_FCP_LEN;
+ reipl_block_fcp->fcp.pbt = IPL_PBT_FCP;
+ reipl_block_fcp->fcp.opt = IPL_PB0_FCP_OPT_IPL;
}
reipl_capabilities |= IPL_TYPE_FCP;
return 0;
@@ -1022,10 +1058,10 @@ static int __init reipl_type_init(void)
/*
* If we have an OS info reipl block, this will be used
*/
- if (reipl_block->hdr.pbt == DIAG308_IPL_TYPE_FCP) {
+ if (reipl_block->pb0_hdr.pbt == IPL_PBT_FCP) {
memcpy(reipl_block_fcp, reipl_block, size);
reipl_type = IPL_TYPE_FCP;
- } else if (reipl_block->hdr.pbt == DIAG308_IPL_TYPE_CCW) {
+ } else if (reipl_block->pb0_hdr.pbt == IPL_PBT_CCW) {
memcpy(reipl_block_ccw, reipl_block, size);
reipl_type = IPL_TYPE_CCW;
}
@@ -1070,15 +1106,15 @@ static struct shutdown_action __refdata reipl_action = {
/* FCP dump device attributes */
DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%llx\n",
- dump_block_fcp->ipl_info.fcp.wwpn);
+ dump_block_fcp->fcp.wwpn);
DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%llx\n",
- dump_block_fcp->ipl_info.fcp.lun);
+ dump_block_fcp->fcp.lun);
DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
- dump_block_fcp->ipl_info.fcp.bootprog);
+ dump_block_fcp->fcp.bootprog);
DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
- dump_block_fcp->ipl_info.fcp.br_lba);
+ dump_block_fcp->fcp.br_lba);
DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
- dump_block_fcp->ipl_info.fcp.devno);
+ dump_block_fcp->fcp.devno);
static struct attribute *dump_fcp_attrs[] = {
&sys_dump_fcp_device_attr.attr,
@@ -1095,7 +1131,7 @@ static struct attribute_group dump_fcp_attr_group = {
};
/* CCW dump device attributes */
-DEFINE_IPL_CCW_ATTR_RW(dump_ccw, device, dump_block_ccw->ipl_info.ccw);
+DEFINE_IPL_CCW_ATTR_RW(dump_ccw, device, dump_block_ccw->ccw);
static struct attribute *dump_ccw_attrs[] = {
&sys_dump_ccw_device_attr.attr,
@@ -1145,7 +1181,9 @@ static struct kset *dump_kset;
static void diag308_dump(void *dump_block)
{
+ uv_set_shared(__pa(dump_block));
diag308(DIAG308_SET, dump_block);
+ uv_remove_shared(__pa(dump_block));
while (1) {
if (diag308(DIAG308_LOAD_NORMAL_DUMP, NULL) != 0x302)
break;
@@ -1187,10 +1225,10 @@ static int __init dump_ccw_init(void)
free_page((unsigned long)dump_block_ccw);
return rc;
}
- dump_block_ccw->hdr.len = IPL_PARM_BLK_CCW_LEN;
+ dump_block_ccw->hdr.len = IPL_BP_CCW_LEN;
dump_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
- dump_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
- dump_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
+ dump_block_ccw->ccw.len = IPL_BP0_CCW_LEN;
+ dump_block_ccw->ccw.pbt = IPL_PBT_CCW;
dump_capabilities |= DUMP_TYPE_CCW;
return 0;
}
@@ -1209,11 +1247,11 @@ static int __init dump_fcp_init(void)
free_page((unsigned long)dump_block_fcp);
return rc;
}
- dump_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
+ dump_block_fcp->hdr.len = IPL_BP_FCP_LEN;
dump_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
- dump_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN;
- dump_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP;
- dump_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_DUMP;
+ dump_block_fcp->fcp.len = IPL_BP0_FCP_LEN;
+ dump_block_fcp->fcp.pbt = IPL_PBT_FCP;
+ dump_block_fcp->fcp.opt = IPL_PB0_FCP_OPT_DUMP;
dump_capabilities |= DUMP_TYPE_FCP;
return 0;
}
@@ -1337,7 +1375,7 @@ static void stop_run(struct shutdown_trigger *trigger)
{
if (strcmp(trigger->name, ON_PANIC_STR) == 0 ||
strcmp(trigger->name, ON_RESTART_STR) == 0)
- disabled_wait((unsigned long) __builtin_return_address(0));
+ disabled_wait();
smp_stop_cpu();
}
@@ -1572,7 +1610,7 @@ static int __init s390_ipl_init(void)
* READ SCP info provides the correct value.
*/
if (memcmp(sclp_ipl_info.loadparm, str, sizeof(str)) == 0 && ipl_block_valid)
- memcpy(sclp_ipl_info.loadparm, ipl_block.hdr.loadparm, LOADPARM_LEN);
+ memcpy(sclp_ipl_info.loadparm, ipl_block.ccw.loadparm, LOADPARM_LEN);
shutdown_actions_init();
shutdown_triggers_init();
return 0;
@@ -1657,15 +1695,15 @@ void __init setup_ipl(void)
ipl_info.type = get_ipl_type();
switch (ipl_info.type) {
case IPL_TYPE_CCW:
- ipl_info.data.ccw.dev_id.ssid = ipl_block.ipl_info.ccw.ssid;
- ipl_info.data.ccw.dev_id.devno = ipl_block.ipl_info.ccw.devno;
+ ipl_info.data.ccw.dev_id.ssid = ipl_block.ccw.ssid;
+ ipl_info.data.ccw.dev_id.devno = ipl_block.ccw.devno;
break;
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
ipl_info.data.fcp.dev_id.ssid = 0;
- ipl_info.data.fcp.dev_id.devno = ipl_block.ipl_info.fcp.devno;
- ipl_info.data.fcp.wwpn = ipl_block.ipl_info.fcp.wwpn;
- ipl_info.data.fcp.lun = ipl_block.ipl_info.fcp.lun;
+ ipl_info.data.fcp.dev_id.devno = ipl_block.fcp.devno;
+ ipl_info.data.fcp.wwpn = ipl_block.fcp.wwpn;
+ ipl_info.data.fcp.lun = ipl_block.fcp.lun;
break;
case IPL_TYPE_NSS:
case IPL_TYPE_UNKNOWN:
@@ -1675,14 +1713,6 @@ void __init setup_ipl(void)
atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
}
-void __init ipl_store_parameters(void)
-{
- if (early_ipl_block_valid) {
- memcpy(&ipl_block, &early_ipl_block, sizeof(ipl_block));
- ipl_block_valid = 1;
- }
-}
-
void s390_reset_system(void)
{
/* Disable prefixing */
@@ -1690,5 +1720,139 @@ void s390_reset_system(void)
/* Disable lowcore protection */
__ctl_clear_bit(0, 28);
- diag308_reset();
+ diag_dma_ops.diag308_reset();
+}
+
+#ifdef CONFIG_KEXEC_FILE
+
+int ipl_report_add_component(struct ipl_report *report, struct kexec_buf *kbuf,
+ unsigned char flags, unsigned short cert)
+{
+ struct ipl_report_component *comp;
+
+ comp = vzalloc(sizeof(*comp));
+ if (!comp)
+ return -ENOMEM;
+ list_add_tail(&comp->list, &report->components);
+
+ comp->entry.addr = kbuf->mem;
+ comp->entry.len = kbuf->memsz;
+ comp->entry.flags = flags;
+ comp->entry.certificate_index = cert;
+
+ report->size += sizeof(comp->entry);
+
+ return 0;
+}
+
+int ipl_report_add_certificate(struct ipl_report *report, void *key,
+ unsigned long addr, unsigned long len)
+{
+ struct ipl_report_certificate *cert;
+
+ cert = vzalloc(sizeof(*cert));
+ if (!cert)
+ return -ENOMEM;
+ list_add_tail(&cert->list, &report->certificates);
+
+ cert->entry.addr = addr;
+ cert->entry.len = len;
+ cert->key = key;
+
+ report->size += sizeof(cert->entry);
+ report->size += cert->entry.len;
+
+ return 0;
+}
+
+struct ipl_report *ipl_report_init(struct ipl_parameter_block *ipib)
+{
+ struct ipl_report *report;
+
+ report = vzalloc(sizeof(*report));
+ if (!report)
+ return ERR_PTR(-ENOMEM);
+
+ report->ipib = ipib;
+ INIT_LIST_HEAD(&report->components);
+ INIT_LIST_HEAD(&report->certificates);
+
+ report->size = ALIGN(ipib->hdr.len, 8);
+ report->size += sizeof(struct ipl_rl_hdr);
+ report->size += sizeof(struct ipl_rb_components);
+ report->size += sizeof(struct ipl_rb_certificates);
+
+ return report;
+}
+
+void *ipl_report_finish(struct ipl_report *report)
+{
+ struct ipl_report_certificate *cert;
+ struct ipl_report_component *comp;
+ struct ipl_rb_certificates *certs;
+ struct ipl_parameter_block *ipib;
+ struct ipl_rb_components *comps;
+ struct ipl_rl_hdr *rl_hdr;
+ void *buf, *ptr;
+
+ buf = vzalloc(report->size);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+ ptr = buf;
+
+ memcpy(ptr, report->ipib, report->ipib->hdr.len);
+ ipib = ptr;
+ if (ipl_secure_flag)
+ ipib->hdr.flags |= IPL_PL_FLAG_SIPL;
+ ipib->hdr.flags |= IPL_PL_FLAG_IPLSR;
+ ptr += report->ipib->hdr.len;
+ ptr = PTR_ALIGN(ptr, 8);
+
+ rl_hdr = ptr;
+ ptr += sizeof(*rl_hdr);
+
+ comps = ptr;
+ comps->rbt = IPL_RBT_COMPONENTS;
+ ptr += sizeof(*comps);
+ list_for_each_entry(comp, &report->components, list) {
+ memcpy(ptr, &comp->entry, sizeof(comp->entry));
+ ptr += sizeof(comp->entry);
+ }
+ comps->len = ptr - (void *)comps;
+
+ certs = ptr;
+ certs->rbt = IPL_RBT_CERTIFICATES;
+ ptr += sizeof(*certs);
+ list_for_each_entry(cert, &report->certificates, list) {
+ memcpy(ptr, &cert->entry, sizeof(cert->entry));
+ ptr += sizeof(cert->entry);
+ }
+ certs->len = ptr - (void *)certs;
+ rl_hdr->len = ptr - (void *)rl_hdr;
+
+ list_for_each_entry(cert, &report->certificates, list) {
+ memcpy(ptr, cert->key, cert->entry.len);
+ ptr += cert->entry.len;
+ }
+
+ BUG_ON(ptr > buf + report->size);
+ return buf;
+}
+
+int ipl_report_free(struct ipl_report *report)
+{
+ struct ipl_report_component *comp, *ncomp;
+ struct ipl_report_certificate *cert, *ncert;
+
+ list_for_each_entry_safe(comp, ncomp, &report->components, list)
+ vfree(comp);
+
+ list_for_each_entry_safe(cert, ncert, &report->certificates, list)
+ vfree(cert);
+
+ vfree(report);
+
+ return 0;
}
+
+#endif
diff --git a/arch/s390/kernel/ipl_vmparm.c b/arch/s390/kernel/ipl_vmparm.c
index 411838c0a0af..af43535a976d 100644
--- a/arch/s390/kernel/ipl_vmparm.c
+++ b/arch/s390/kernel/ipl_vmparm.c
@@ -11,11 +11,11 @@ size_t ipl_block_get_ascii_vmparm(char *dest, size_t size,
char has_lowercase = 0;
len = 0;
- if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) &&
- (ipb->ipl_info.ccw.vm_parm_len > 0)) {
+ if ((ipb->ccw.vm_flags & IPL_PB0_CCW_VM_FLAG_VP) &&
+ (ipb->ccw.vm_parm_len > 0)) {
- len = min_t(size_t, size - 1, ipb->ipl_info.ccw.vm_parm_len);
- memcpy(dest, ipb->ipl_info.ccw.vm_parm, len);
+ len = min_t(size_t, size - 1, ipb->ccw.vm_parm_len);
+ memcpy(dest, ipb->ccw.vm_parm, len);
/* If at least one character is lowercase, we assume mixed
* case; otherwise we convert everything to lowercase.
*/
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 0cd5a5f96729..8371855042dc 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -26,6 +26,7 @@
#include <asm/lowcore.h>
#include <asm/irq.h>
#include <asm/hw_irq.h>
+#include <asm/stacktrace.h>
#include "entry.h"
DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
@@ -73,7 +74,6 @@ static const struct irq_class irqclass_sub_desc[] = {
{.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
{.irq = IRQEXT_FTP, .name = "FTP", .desc = "[EXT] HMC FTP Service"},
{.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
- {.irq = IRQIO_QAI, .name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
{.irq = IRQIO_DAS, .name = "DAS", .desc = "[I/O] DASD"},
{.irq = IRQIO_C15, .name = "C15", .desc = "[I/O] 3215"},
{.irq = IRQIO_C70, .name = "C70", .desc = "[I/O] 3270"},
@@ -81,14 +81,16 @@ static const struct irq_class irqclass_sub_desc[] = {
{.irq = IRQIO_VMR, .name = "VMR", .desc = "[I/O] Unit Record Devices"},
{.irq = IRQIO_LCS, .name = "LCS", .desc = "[I/O] LCS"},
{.irq = IRQIO_CTC, .name = "CTC", .desc = "[I/O] CTC"},
- {.irq = IRQIO_APB, .name = "APB", .desc = "[I/O] AP Bus"},
{.irq = IRQIO_ADM, .name = "ADM", .desc = "[I/O] EADM Subchannel"},
{.irq = IRQIO_CSC, .name = "CSC", .desc = "[I/O] CHSC Subchannel"},
- {.irq = IRQIO_PCI, .name = "PCI", .desc = "[I/O] PCI Interrupt" },
- {.irq = IRQIO_MSI, .name = "MSI", .desc = "[I/O] MSI Interrupt" },
{.irq = IRQIO_VIR, .name = "VIR", .desc = "[I/O] Virtual I/O Devices"},
- {.irq = IRQIO_VAI, .name = "VAI", .desc = "[I/O] Virtual I/O Devices AI"},
- {.irq = IRQIO_GAL, .name = "GAL", .desc = "[I/O] GIB Alert"},
+ {.irq = IRQIO_QAI, .name = "QAI", .desc = "[AIO] QDIO Adapter Interrupt"},
+ {.irq = IRQIO_APB, .name = "APB", .desc = "[AIO] AP Bus"},
+ {.irq = IRQIO_PCF, .name = "PCF", .desc = "[AIO] PCI Floating Interrupt"},
+ {.irq = IRQIO_PCD, .name = "PCD", .desc = "[AIO] PCI Directed Interrupt"},
+ {.irq = IRQIO_MSI, .name = "MSI", .desc = "[AIO] MSI Interrupt"},
+ {.irq = IRQIO_VAI, .name = "VAI", .desc = "[AIO] Virtual I/O Devices AI"},
+ {.irq = IRQIO_GAL, .name = "GAL", .desc = "[AIO] GIB Alert"},
{.irq = NMI_NMI, .name = "NMI", .desc = "[NMI] Machine Check"},
{.irq = CPU_RST, .name = "RST", .desc = "[CPU] CPU Restart"},
};
@@ -116,6 +118,34 @@ void do_IRQ(struct pt_regs *regs, int irq)
set_irq_regs(old_regs);
}
+static void show_msi_interrupt(struct seq_file *p, int irq)
+{
+ struct irq_desc *desc;
+ unsigned long flags;
+ int cpu;
+
+ irq_lock_sparse();
+ desc = irq_to_desc(irq);
+ if (!desc)
+ goto out;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ seq_printf(p, "%3d: ", irq);
+ for_each_online_cpu(cpu)
+ seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
+
+ if (desc->irq_data.chip)
+ seq_printf(p, " %8s", desc->irq_data.chip->name);
+
+ if (desc->action)
+ seq_printf(p, " %s", desc->action->name);
+
+ seq_putc(p, '\n');
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+out:
+ irq_unlock_sparse();
+}
+
/*
* show_interrupts is needed by /proc/interrupts.
*/
@@ -128,7 +158,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (index == 0) {
seq_puts(p, " ");
for_each_online_cpu(cpu)
- seq_printf(p, "CPU%d ", cpu);
+ seq_printf(p, "CPU%-8d", cpu);
seq_putc(p, '\n');
}
if (index < NR_IRQS_BASE) {
@@ -139,9 +169,10 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
goto out;
}
- if (index > NR_IRQS_BASE)
+ if (index < nr_irqs) {
+ show_msi_interrupt(p, index);
goto out;
-
+ }
for (index = 0; index < NR_ARCH_IRQS; index++) {
seq_printf(p, "%s: ", irqclass_sub_desc[index].name);
irq = irqclass_sub_desc[index].irq;
diff --git a/arch/s390/kernel/kexec_elf.c b/arch/s390/kernel/kexec_elf.c
index 5a286b012043..6d0635ceddd0 100644
--- a/arch/s390/kernel/kexec_elf.c
+++ b/arch/s390/kernel/kexec_elf.c
@@ -10,19 +10,26 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
+#include <asm/ipl.h>
#include <asm/setup.h>
-static int kexec_file_add_elf_kernel(struct kimage *image,
- struct s390_load_data *data,
- char *kernel, unsigned long kernel_len)
+static int kexec_file_add_kernel_elf(struct kimage *image,
+ struct s390_load_data *data)
{
struct kexec_buf buf;
const Elf_Ehdr *ehdr;
const Elf_Phdr *phdr;
+ Elf_Addr entry;
+ void *kernel;
int i, ret;
+ kernel = image->kernel_buf;
ehdr = (Elf_Ehdr *)kernel;
buf.image = image;
+ if (image->type == KEXEC_TYPE_CRASH)
+ entry = STARTUP_KDUMP_OFFSET;
+ else
+ entry = ehdr->e_entry;
phdr = (void *)ehdr + ehdr->e_phoff;
for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
@@ -33,30 +40,27 @@ static int kexec_file_add_elf_kernel(struct kimage *image,
buf.bufsz = phdr->p_filesz;
buf.mem = ALIGN(phdr->p_paddr, phdr->p_align);
+ if (image->type == KEXEC_TYPE_CRASH)
+ buf.mem += crashk_res.start;
buf.memsz = phdr->p_memsz;
+ data->memsz = ALIGN(data->memsz, phdr->p_align) + buf.memsz;
- if (phdr->p_paddr == 0) {
+ if (entry - phdr->p_paddr < phdr->p_memsz) {
data->kernel_buf = buf.buffer;
- data->memsz += STARTUP_NORMAL_OFFSET;
-
- buf.buffer += STARTUP_NORMAL_OFFSET;
- buf.bufsz -= STARTUP_NORMAL_OFFSET;
-
- buf.mem += STARTUP_NORMAL_OFFSET;
- buf.memsz -= STARTUP_NORMAL_OFFSET;
+ data->kernel_mem = buf.mem;
+ data->parm = buf.buffer + PARMAREA;
}
- if (image->type == KEXEC_TYPE_CRASH)
- buf.mem += crashk_res.start;
-
+ ipl_report_add_component(data->report, &buf,
+ IPL_RB_COMPONENT_FLAG_SIGNED |
+ IPL_RB_COMPONENT_FLAG_VERIFIED,
+ IPL_RB_CERT_UNKNOWN);
ret = kexec_add_buffer(&buf);
if (ret)
return ret;
-
- data->memsz += buf.memsz;
}
- return 0;
+ return data->memsz ? 0 : -EINVAL;
}
static void *s390_elf_load(struct kimage *image,
@@ -64,11 +68,10 @@ static void *s390_elf_load(struct kimage *image,
char *initrd, unsigned long initrd_len,
char *cmdline, unsigned long cmdline_len)
{
- struct s390_load_data data = {0};
const Elf_Ehdr *ehdr;
const Elf_Phdr *phdr;
size_t size;
- int i, ret;
+ int i;
/* image->fobs->probe already checked for valid ELF magic number. */
ehdr = (Elf_Ehdr *)kernel;
@@ -101,24 +104,7 @@ static void *s390_elf_load(struct kimage *image,
if (size > kernel_len)
return ERR_PTR(-EINVAL);
- ret = kexec_file_add_elf_kernel(image, &data, kernel, kernel_len);
- if (ret)
- return ERR_PTR(ret);
-
- if (!data.memsz)
- return ERR_PTR(-EINVAL);
-
- if (initrd) {
- ret = kexec_file_add_initrd(image, &data, initrd, initrd_len);
- if (ret)
- return ERR_PTR(ret);
- }
-
- ret = kexec_file_add_purgatory(image, &data);
- if (ret)
- return ERR_PTR(ret);
-
- return kexec_file_update_kernel(image, &data);
+ return kexec_file_add_components(image, kexec_file_add_kernel_elf);
}
static int s390_elf_probe(const char *buf, unsigned long len)
@@ -144,4 +130,7 @@ static int s390_elf_probe(const char *buf, unsigned long len)
const struct kexec_file_ops s390_kexec_elf_ops = {
.probe = s390_elf_probe,
.load = s390_elf_load,
+#ifdef CONFIG_KEXEC_VERIFY_SIG
+ .verify_sig = s390_verify_sig,
+#endif /* CONFIG_KEXEC_VERIFY_SIG */
};
diff --git a/arch/s390/kernel/kexec_image.c b/arch/s390/kernel/kexec_image.c
index 3800852595e8..58318bf89fd9 100644
--- a/arch/s390/kernel/kexec_image.c
+++ b/arch/s390/kernel/kexec_image.c
@@ -10,31 +10,34 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
+#include <asm/ipl.h>
#include <asm/setup.h>
-static int kexec_file_add_image_kernel(struct kimage *image,
- struct s390_load_data *data,
- char *kernel, unsigned long kernel_len)
+static int kexec_file_add_kernel_image(struct kimage *image,
+ struct s390_load_data *data)
{
struct kexec_buf buf;
- int ret;
buf.image = image;
- buf.buffer = kernel + STARTUP_NORMAL_OFFSET;
- buf.bufsz = kernel_len - STARTUP_NORMAL_OFFSET;
+ buf.buffer = image->kernel_buf;
+ buf.bufsz = image->kernel_buf_len;
- buf.mem = STARTUP_NORMAL_OFFSET;
+ buf.mem = 0;
if (image->type == KEXEC_TYPE_CRASH)
buf.mem += crashk_res.start;
buf.memsz = buf.bufsz;
- ret = kexec_add_buffer(&buf);
+ data->kernel_buf = image->kernel_buf;
+ data->kernel_mem = buf.mem;
+ data->parm = image->kernel_buf + PARMAREA;
+ data->memsz += buf.memsz;
- data->kernel_buf = kernel;
- data->memsz += buf.memsz + STARTUP_NORMAL_OFFSET;
-
- return ret;
+ ipl_report_add_component(data->report, &buf,
+ IPL_RB_COMPONENT_FLAG_SIGNED |
+ IPL_RB_COMPONENT_FLAG_VERIFIED,
+ IPL_RB_CERT_UNKNOWN);
+ return kexec_add_buffer(&buf);
}
static void *s390_image_load(struct kimage *image,
@@ -42,24 +45,7 @@ static void *s390_image_load(struct kimage *image,
char *initrd, unsigned long initrd_len,
char *cmdline, unsigned long cmdline_len)
{
- struct s390_load_data data = {0};
- int ret;
-
- ret = kexec_file_add_image_kernel(image, &data, kernel, kernel_len);
- if (ret)
- return ERR_PTR(ret);
-
- if (initrd) {
- ret = kexec_file_add_initrd(image, &data, initrd, initrd_len);
- if (ret)
- return ERR_PTR(ret);
- }
-
- ret = kexec_file_add_purgatory(image, &data);
- if (ret)
- return ERR_PTR(ret);
-
- return kexec_file_update_kernel(image, &data);
+ return kexec_file_add_components(image, kexec_file_add_kernel_image);
}
static int s390_image_probe(const char *buf, unsigned long len)
@@ -73,4 +59,7 @@ static int s390_image_probe(const char *buf, unsigned long len)
const struct kexec_file_ops s390_kexec_image_ops = {
.probe = s390_image_probe,
.load = s390_image_load,
+#ifdef CONFIG_KEXEC_VERIFY_SIG
+ .verify_sig = s390_verify_sig,
+#endif /* CONFIG_KEXEC_VERIFY_SIG */
};
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 7c0a095e9c5f..6f1388391620 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -27,29 +27,30 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
struct kretprobe_blackpoint kretprobe_blacklist[] = { };
-DEFINE_INSN_CACHE_OPS(dmainsn);
+DEFINE_INSN_CACHE_OPS(s390_insn);
-static void *alloc_dmainsn_page(void)
-{
- void *page;
+static int insn_page_in_use;
+static char insn_page[PAGE_SIZE] __aligned(PAGE_SIZE);
- page = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
- if (page)
- set_memory_x((unsigned long) page, 1);
- return page;
+static void *alloc_s390_insn_page(void)
+{
+ if (xchg(&insn_page_in_use, 1) == 1)
+ return NULL;
+ set_memory_x((unsigned long) &insn_page, 1);
+ return &insn_page;
}
-static void free_dmainsn_page(void *page)
+static void free_s390_insn_page(void *page)
{
set_memory_nx((unsigned long) page, 1);
- free_page((unsigned long)page);
+ xchg(&insn_page_in_use, 0);
}
-struct kprobe_insn_cache kprobe_dmainsn_slots = {
- .mutex = __MUTEX_INITIALIZER(kprobe_dmainsn_slots.mutex),
- .alloc = alloc_dmainsn_page,
- .free = free_dmainsn_page,
- .pages = LIST_HEAD_INIT(kprobe_dmainsn_slots.pages),
+struct kprobe_insn_cache kprobe_s390_insn_slots = {
+ .mutex = __MUTEX_INITIALIZER(kprobe_s390_insn_slots.mutex),
+ .alloc = alloc_s390_insn_page,
+ .free = free_s390_insn_page,
+ .pages = LIST_HEAD_INIT(kprobe_s390_insn_slots.pages),
.insn_size = MAX_INSN_SIZE,
};
@@ -102,7 +103,7 @@ static int s390_get_insn_slot(struct kprobe *p)
*/
p->ainsn.insn = NULL;
if (is_kernel_addr(p->addr))
- p->ainsn.insn = get_dmainsn_slot();
+ p->ainsn.insn = get_s390_insn_slot();
else if (is_module_addr(p->addr))
p->ainsn.insn = get_insn_slot();
return p->ainsn.insn ? 0 : -ENOMEM;
@@ -114,7 +115,7 @@ static void s390_free_insn_slot(struct kprobe *p)
if (!p->ainsn.insn)
return;
if (is_kernel_addr(p->addr))
- free_dmainsn_slot(p->ainsn.insn, 0);
+ free_s390_insn_slot(p->ainsn.insn, 0);
else
free_insn_slot(p->ainsn.insn, 0);
p->ainsn.insn = NULL;
@@ -572,7 +573,7 @@ static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
- entry = search_exception_tables(regs->psw.addr);
+ entry = s390_search_extables(regs->psw.addr);
if (entry) {
regs->psw.addr = extable_fixup(entry);
return 1;
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index cb582649aba6..8a1ae140c5e2 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -27,6 +27,7 @@
#include <asm/cacheflush.h>
#include <asm/os_info.h>
#include <asm/set_memory.h>
+#include <asm/stacktrace.h>
#include <asm/switch_to.h>
#include <asm/nmi.h>
@@ -95,7 +96,7 @@ static void __do_machine_kdump(void *image)
start_kdump(1);
/* Die if start_kdump returns */
- disabled_wait((unsigned long) __builtin_return_address(0));
+ disabled_wait();
}
/*
@@ -253,6 +254,9 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_SYMBOL(high_memory);
VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
+ vmcoreinfo_append_str("SDMA=%lx\n", __sdma);
+ vmcoreinfo_append_str("EDMA=%lx\n", __edma);
+ vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
}
void machine_shutdown(void)
@@ -280,7 +284,7 @@ static void __do_machine_kexec(void *data)
(*data_mover)(&image->head, image->start);
/* Die if kexec returns */
- disabled_wait((unsigned long) __builtin_return_address(0));
+ disabled_wait();
}
/*
diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c
index 32023b4f9dc0..fbdd3ea73667 100644
--- a/arch/s390/kernel/machine_kexec_file.c
+++ b/arch/s390/kernel/machine_kexec_file.c
@@ -8,7 +8,12 @@
*/
#include <linux/elf.h>
+#include <linux/errno.h>
#include <linux/kexec.h>
+#include <linux/module.h>
+#include <linux/verification.h>
+#include <asm/boot_data.h>
+#include <asm/ipl.h>
#include <asm/setup.h>
const struct kexec_file_ops * const kexec_file_loaders[] = {
@@ -17,38 +22,78 @@ const struct kexec_file_ops * const kexec_file_loaders[] = {
NULL,
};
-int *kexec_file_update_kernel(struct kimage *image,
- struct s390_load_data *data)
-{
- unsigned long *loc;
-
- if (image->cmdline_buf_len >= ARCH_COMMAND_LINE_SIZE)
- return ERR_PTR(-EINVAL);
-
- if (image->cmdline_buf_len)
- memcpy(data->kernel_buf + COMMAND_LINE_OFFSET,
- image->cmdline_buf, image->cmdline_buf_len);
-
- if (image->type == KEXEC_TYPE_CRASH) {
- loc = (unsigned long *)(data->kernel_buf + OLDMEM_BASE_OFFSET);
- *loc = crashk_res.start;
-
- loc = (unsigned long *)(data->kernel_buf + OLDMEM_SIZE_OFFSET);
- *loc = crashk_res.end - crashk_res.start + 1;
- }
+#ifdef CONFIG_KEXEC_VERIFY_SIG
+/*
+ * Module signature information block.
+ *
+ * The constituents of the signature section are, in order:
+ *
+ * - Signer's name
+ * - Key identifier
+ * - Signature data
+ * - Information block
+ */
+struct module_signature {
+ u8 algo; /* Public-key crypto algorithm [0] */
+ u8 hash; /* Digest algorithm [0] */
+ u8 id_type; /* Key identifier type [PKEY_ID_PKCS7] */
+ u8 signer_len; /* Length of signer's name [0] */
+ u8 key_id_len; /* Length of key identifier [0] */
+ u8 __pad[3];
+ __be32 sig_len; /* Length of signature data */
+};
- if (image->initrd_buf) {
- loc = (unsigned long *)(data->kernel_buf + INITRD_START_OFFSET);
- *loc = data->initrd_load_addr;
+#define PKEY_ID_PKCS7 2
- loc = (unsigned long *)(data->kernel_buf + INITRD_SIZE_OFFSET);
- *loc = image->initrd_buf_len;
+int s390_verify_sig(const char *kernel, unsigned long kernel_len)
+{
+ const unsigned long marker_len = sizeof(MODULE_SIG_STRING) - 1;
+ struct module_signature *ms;
+ unsigned long sig_len;
+
+ /* Skip signature verification when not secure IPLed. */
+ if (!ipl_secure_flag)
+ return 0;
+
+ if (marker_len > kernel_len)
+ return -EKEYREJECTED;
+
+ if (memcmp(kernel + kernel_len - marker_len, MODULE_SIG_STRING,
+ marker_len))
+ return -EKEYREJECTED;
+ kernel_len -= marker_len;
+
+ ms = (void *)kernel + kernel_len - sizeof(*ms);
+ kernel_len -= sizeof(*ms);
+
+ sig_len = be32_to_cpu(ms->sig_len);
+ if (sig_len >= kernel_len)
+ return -EKEYREJECTED;
+ kernel_len -= sig_len;
+
+ if (ms->id_type != PKEY_ID_PKCS7)
+ return -EKEYREJECTED;
+
+ if (ms->algo != 0 ||
+ ms->hash != 0 ||
+ ms->signer_len != 0 ||
+ ms->key_id_len != 0 ||
+ ms->__pad[0] != 0 ||
+ ms->__pad[1] != 0 ||
+ ms->__pad[2] != 0) {
+ return -EBADMSG;
}
- return NULL;
+ return verify_pkcs7_signature(kernel, kernel_len,
+ kernel + kernel_len, sig_len,
+ VERIFY_USE_PLATFORM_KEYRING,
+ VERIFYING_MODULE_SIGNATURE,
+ NULL, NULL);
}
+#endif /* CONFIG_KEXEC_VERIFY_SIG */
-static int kexec_file_update_purgatory(struct kimage *image)
+static int kexec_file_update_purgatory(struct kimage *image,
+ struct s390_load_data *data)
{
u64 entry, type;
int ret;
@@ -90,7 +135,8 @@ static int kexec_file_update_purgatory(struct kimage *image)
return ret;
}
-int kexec_file_add_purgatory(struct kimage *image, struct s390_load_data *data)
+static int kexec_file_add_purgatory(struct kimage *image,
+ struct s390_load_data *data)
{
struct kexec_buf buf;
int ret;
@@ -105,21 +151,21 @@ int kexec_file_add_purgatory(struct kimage *image, struct s390_load_data *data)
ret = kexec_load_purgatory(image, &buf);
if (ret)
return ret;
+ data->memsz += buf.memsz;
- ret = kexec_file_update_purgatory(image);
- return ret;
+ return kexec_file_update_purgatory(image, data);
}
-int kexec_file_add_initrd(struct kimage *image, struct s390_load_data *data,
- char *initrd, unsigned long initrd_len)
+static int kexec_file_add_initrd(struct kimage *image,
+ struct s390_load_data *data)
{
struct kexec_buf buf;
int ret;
buf.image = image;
- buf.buffer = initrd;
- buf.bufsz = initrd_len;
+ buf.buffer = image->initrd_buf;
+ buf.bufsz = image->initrd_buf_len;
data->memsz = ALIGN(data->memsz, PAGE_SIZE);
buf.mem = data->memsz;
@@ -127,11 +173,115 @@ int kexec_file_add_initrd(struct kimage *image, struct s390_load_data *data,
buf.mem += crashk_res.start;
buf.memsz = buf.bufsz;
- data->initrd_load_addr = buf.mem;
+ data->parm->initrd_start = buf.mem;
+ data->parm->initrd_size = buf.memsz;
data->memsz += buf.memsz;
ret = kexec_add_buffer(&buf);
- return ret;
+ if (ret)
+ return ret;
+
+ return ipl_report_add_component(data->report, &buf, 0, 0);
+}
+
+static int kexec_file_add_ipl_report(struct kimage *image,
+ struct s390_load_data *data)
+{
+ __u32 *lc_ipl_parmblock_ptr;
+ unsigned int len, ncerts;
+ struct kexec_buf buf;
+ unsigned long addr;
+ void *ptr, *end;
+
+ buf.image = image;
+
+ data->memsz = ALIGN(data->memsz, PAGE_SIZE);
+ buf.mem = data->memsz;
+ if (image->type == KEXEC_TYPE_CRASH)
+ buf.mem += crashk_res.start;
+
+ ptr = (void *)ipl_cert_list_addr;
+ end = ptr + ipl_cert_list_size;
+ ncerts = 0;
+ while (ptr < end) {
+ ncerts++;
+ len = *(unsigned int *)ptr;
+ ptr += sizeof(len);
+ ptr += len;
+ }
+
+ addr = data->memsz + data->report->size;
+ addr += ncerts * sizeof(struct ipl_rb_certificate_entry);
+ ptr = (void *)ipl_cert_list_addr;
+ while (ptr < end) {
+ len = *(unsigned int *)ptr;
+ ptr += sizeof(len);
+ ipl_report_add_certificate(data->report, ptr, addr, len);
+ addr += len;
+ ptr += len;
+ }
+
+ buf.buffer = ipl_report_finish(data->report);
+ buf.bufsz = data->report->size;
+ buf.memsz = buf.bufsz;
+
+ data->memsz += buf.memsz;
+
+ lc_ipl_parmblock_ptr =
+ data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr);
+ *lc_ipl_parmblock_ptr = (__u32)buf.mem;
+
+ return kexec_add_buffer(&buf);
+}
+
+void *kexec_file_add_components(struct kimage *image,
+ int (*add_kernel)(struct kimage *image,
+ struct s390_load_data *data))
+{
+ struct s390_load_data data = {0};
+ int ret;
+
+ data.report = ipl_report_init(&ipl_block);
+ if (IS_ERR(data.report))
+ return data.report;
+
+ ret = add_kernel(image, &data);
+ if (ret)
+ goto out;
+
+ if (image->cmdline_buf_len >= ARCH_COMMAND_LINE_SIZE) {
+ ret = -EINVAL;
+ goto out;
+ }
+ memcpy(data.parm->command_line, image->cmdline_buf,
+ image->cmdline_buf_len);
+
+ if (image->type == KEXEC_TYPE_CRASH) {
+ data.parm->oldmem_base = crashk_res.start;
+ data.parm->oldmem_size = crashk_res.end - crashk_res.start + 1;
+ }
+
+ if (image->initrd_buf) {
+ ret = kexec_file_add_initrd(image, &data);
+ if (ret)
+ goto out;
+ }
+
+ ret = kexec_file_add_purgatory(image, &data);
+ if (ret)
+ goto out;
+
+ if (data.kernel_mem == 0) {
+ unsigned long restart_psw = 0x0008000080000000UL;
+ restart_psw += image->start;
+ memcpy(data.kernel_buf, &restart_psw, sizeof(restart_psw));
+ image->start = 0;
+ }
+
+ ret = kexec_file_add_ipl_report(image, &data);
+out:
+ ipl_report_free(data.report);
+ return ERR_PTR(ret);
}
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
@@ -140,7 +290,7 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
const Elf_Shdr *symtab)
{
Elf_Rela *relas;
- int i;
+ int i, r_type;
relas = (void *)pi->ehdr + relsec->sh_offset;
@@ -174,46 +324,8 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
addr = section->sh_addr + relas[i].r_offset;
- switch (ELF64_R_TYPE(relas[i].r_info)) {
- case R_390_8: /* Direct 8 bit. */
- *(u8 *)loc = val;
- break;
- case R_390_12: /* Direct 12 bit. */
- *(u16 *)loc &= 0xf000;
- *(u16 *)loc |= val & 0xfff;
- break;
- case R_390_16: /* Direct 16 bit. */
- *(u16 *)loc = val;
- break;
- case R_390_20: /* Direct 20 bit. */
- *(u32 *)loc &= 0xf00000ff;
- *(u32 *)loc |= (val & 0xfff) << 16; /* DL */
- *(u32 *)loc |= (val & 0xff000) >> 4; /* DH */
- break;
- case R_390_32: /* Direct 32 bit. */
- *(u32 *)loc = val;
- break;
- case R_390_64: /* Direct 64 bit. */
- *(u64 *)loc = val;
- break;
- case R_390_PC16: /* PC relative 16 bit. */
- *(u16 *)loc = (val - addr);
- break;
- case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */
- *(u16 *)loc = (val - addr) >> 1;
- break;
- case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */
- *(u32 *)loc = (val - addr) >> 1;
- break;
- case R_390_PC32: /* PC relative 32 bit. */
- *(u32 *)loc = (val - addr);
- break;
- case R_390_PC64: /* PC relative 64 bit. */
- *(u64 *)loc = (val - addr);
- break;
- default:
- break;
- }
+ r_type = ELF64_R_TYPE(relas[i].r_info);
+ arch_kexec_do_relocs(r_type, loc, val, addr);
}
return 0;
}
@@ -225,10 +337,8 @@ int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
* load memory in head.S will be accessed, e.g. to register the next
* command line. If the next kernel were smaller the current kernel
* will panic at load.
- *
- * 0x11000 = sizeof(head.S)
*/
- if (buf_len < 0x11000)
+ if (buf_len < HEAD_END)
return -ENOEXEC;
return kexec_image_probe_default(image, buf, buf_len);
diff --git a/arch/s390/kernel/machine_kexec_reloc.c b/arch/s390/kernel/machine_kexec_reloc.c
new file mode 100644
index 000000000000..1dded39239f8
--- /dev/null
+++ b/arch/s390/kernel/machine_kexec_reloc.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/elf.h>
+
+int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val,
+ unsigned long addr)
+{
+ switch (r_type) {
+ case R_390_NONE:
+ break;
+ case R_390_8: /* Direct 8 bit. */
+ *(u8 *)loc = val;
+ break;
+ case R_390_12: /* Direct 12 bit. */
+ *(u16 *)loc &= 0xf000;
+ *(u16 *)loc |= val & 0xfff;
+ break;
+ case R_390_16: /* Direct 16 bit. */
+ *(u16 *)loc = val;
+ break;
+ case R_390_20: /* Direct 20 bit. */
+ *(u32 *)loc &= 0xf00000ff;
+ *(u32 *)loc |= (val & 0xfff) << 16; /* DL */
+ *(u32 *)loc |= (val & 0xff000) >> 4; /* DH */
+ break;
+ case R_390_32: /* Direct 32 bit. */
+ *(u32 *)loc = val;
+ break;
+ case R_390_64: /* Direct 64 bit. */
+ *(u64 *)loc = val;
+ break;
+ case R_390_PC16: /* PC relative 16 bit. */
+ *(u16 *)loc = (val - addr);
+ break;
+ case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */
+ *(u16 *)loc = (val - addr) >> 1;
+ break;
+ case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */
+ *(u32 *)loc = (val - addr) >> 1;
+ break;
+ case R_390_PC32: /* PC relative 32 bit. */
+ *(u32 *)loc = (val - addr);
+ break;
+ case R_390_PC64: /* PC relative 64 bit. */
+ *(u64 *)loc = (val - addr);
+ break;
+ case R_390_RELATIVE:
+ *(unsigned long *) loc = val;
+ break;
+ default:
+ return 1;
+ }
+ return 0;
+}
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index e93fbf02490c..9e1660a6b9db 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -20,6 +20,7 @@
ENTRY(ftrace_stub)
BR_EX %r14
+ENDPROC(ftrace_stub)
#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
@@ -28,7 +29,7 @@ ENTRY(ftrace_stub)
ENTRY(_mcount)
BR_EX %r14
-
+ENDPROC(_mcount)
EXPORT_SYMBOL(_mcount)
ENTRY(ftrace_caller)
@@ -61,10 +62,11 @@ ENTRY(ftrace_caller)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
# The j instruction gets runtime patched to a nop instruction.
# See ftrace_enable_ftrace_graph_caller.
-ENTRY(ftrace_graph_caller)
+ .globl ftrace_graph_caller
+ftrace_graph_caller:
j ftrace_graph_caller_end
- lg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
- lg %r3,(STACK_PTREGS_PSW+8)(%r15)
+ lmg %r2,%r3,(STACK_PTREGS_GPRS+14*8)(%r15)
+ lg %r4,(STACK_PTREGS_PSW+8)(%r15)
brasl %r14,prepare_ftrace_return
stg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
ftrace_graph_caller_end:
@@ -73,6 +75,7 @@ ftrace_graph_caller_end:
lg %r1,(STACK_PTREGS_PSW+8)(%r15)
lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
BR_EX %r1
+ENDPROC(ftrace_caller)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -86,5 +89,6 @@ ENTRY(return_to_handler)
lgr %r14,%r2
lmg %r2,%r5,32(%r15)
BR_EX %r14
+ENDPROC(return_to_handler)
#endif
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 8c867b43c8eb..0a487fae763e 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -125,7 +125,7 @@ void nmi_free_per_cpu(struct lowcore *lc)
static notrace void s390_handle_damage(void)
{
smp_emergency_stop();
- disabled_wait((unsigned long) __builtin_return_address(0));
+ disabled_wait();
while (1);
}
NOKPROBE_SYMBOL(s390_handle_damage);
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
index bdddaae96559..29e511f5bf06 100644
--- a/arch/s390/kernel/nospec-branch.c
+++ b/arch/s390/kernel/nospec-branch.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/cpu.h>
#include <asm/nospec-branch.h>
static int __init nobp_setup_early(char *str)
@@ -37,7 +38,7 @@ static int __init nospec_report(void)
{
if (test_facility(156))
pr_info("Spectre V2 mitigation: etokens\n");
- if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
+ if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable)
pr_info("Spectre V2 mitigation: execute trampolines\n");
if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
pr_info("Spectre V2 mitigation: limited branch prediction\n");
@@ -58,15 +59,15 @@ early_param("nospectre_v2", nospectre_v2_setup_early);
void __init nospec_auto_detect(void)
{
- if (test_facility(156)) {
+ if (test_facility(156) || cpu_mitigations_off()) {
/*
* The machine supports etokens.
* Disable expolines and disable nobp.
*/
- if (IS_ENABLED(CC_USING_EXPOLINE))
+ if (__is_defined(CC_USING_EXPOLINE))
nospec_disable = 1;
__clear_facility(82, S390_lowcore.alt_stfle_fac_list);
- } else if (IS_ENABLED(CC_USING_EXPOLINE)) {
+ } else if (__is_defined(CC_USING_EXPOLINE)) {
/*
* The kernel has been compiled with expolines.
* Keep expolines enabled and disable nobp.
diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c
index e30e580ae362..48f472bf9290 100644
--- a/arch/s390/kernel/nospec-sysfs.c
+++ b/arch/s390/kernel/nospec-sysfs.c
@@ -15,7 +15,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
{
if (test_facility(156))
return sprintf(buf, "Mitigation: etokens\n");
- if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
+ if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable)
return sprintf(buf, "Mitigation: execute trampolines\n");
if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
return sprintf(buf, "Mitigation: limited branch prediction\n");
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index e1c54d28713a..48d48b6187c0 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -2,8 +2,8 @@
/*
* Performance event support for s390x - CPU-measurement Counter Facility
*
- * Copyright IBM Corp. 2012, 2017
- * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ * Copyright IBM Corp. 2012, 2019
+ * Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
*/
#define KMSG_COMPONENT "cpum_cf"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
@@ -26,7 +26,7 @@ static enum cpumf_ctr_set get_counter_set(u64 event)
set = CPUMF_CTR_SET_USER;
else if (event < 128)
set = CPUMF_CTR_SET_CRYPTO;
- else if (event < 256)
+ else if (event < 288)
set = CPUMF_CTR_SET_EXT;
else if (event >= 448 && event < 496)
set = CPUMF_CTR_SET_MT_DIAG;
@@ -50,12 +50,19 @@ static int validate_ctr_version(const struct hw_perf_event *hwc)
err = -EOPNOTSUPP;
break;
case CPUMF_CTR_SET_CRYPTO:
+ if ((cpuhw->info.csvn >= 1 && cpuhw->info.csvn <= 5 &&
+ hwc->config > 79) ||
+ (cpuhw->info.csvn >= 6 && hwc->config > 83))
+ err = -EOPNOTSUPP;
+ break;
case CPUMF_CTR_SET_EXT:
if (cpuhw->info.csvn < 1)
err = -EOPNOTSUPP;
if ((cpuhw->info.csvn == 1 && hwc->config > 159) ||
(cpuhw->info.csvn == 2 && hwc->config > 175) ||
- (cpuhw->info.csvn > 2 && hwc->config > 255))
+ (cpuhw->info.csvn >= 3 && cpuhw->info.csvn <= 5
+ && hwc->config > 255) ||
+ (cpuhw->info.csvn >= 6 && hwc->config > 287))
err = -EOPNOTSUPP;
break;
case CPUMF_CTR_SET_MT_DIAG:
diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c
index b6854812d2ed..d4e031f7b9c8 100644
--- a/arch/s390/kernel/perf_cpum_cf_diag.c
+++ b/arch/s390/kernel/perf_cpum_cf_diag.c
@@ -306,15 +306,20 @@ static size_t cf_diag_ctrset_size(enum cpumf_ctr_set ctrset,
ctrset_size = 2;
break;
case CPUMF_CTR_SET_CRYPTO:
- ctrset_size = 16;
+ if (info->csvn >= 1 && info->csvn <= 5)
+ ctrset_size = 16;
+ else if (info->csvn == 6)
+ ctrset_size = 20;
break;
case CPUMF_CTR_SET_EXT:
if (info->csvn == 1)
ctrset_size = 32;
else if (info->csvn == 2)
ctrset_size = 48;
- else if (info->csvn >= 3)
+ else if (info->csvn >= 3 && info->csvn <= 5)
ctrset_size = 128;
+ else if (info->csvn == 6)
+ ctrset_size = 160;
break;
case CPUMF_CTR_SET_MT_DIAG:
if (info->csvn > 3)
diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
index b45238c89728..34cc96449b30 100644
--- a/arch/s390/kernel/perf_cpum_cf_events.c
+++ b/arch/s390/kernel/perf_cpum_cf_events.c
@@ -31,22 +31,26 @@ CPUMF_EVENT_ATTR(cf_fvn3, PROBLEM_STATE_CPU_CYCLES, 0x0020);
CPUMF_EVENT_ATTR(cf_fvn3, PROBLEM_STATE_INSTRUCTIONS, 0x0021);
CPUMF_EVENT_ATTR(cf_fvn3, L1D_DIR_WRITES, 0x0004);
CPUMF_EVENT_ATTR(cf_fvn3, L1D_PENALTY_CYCLES, 0x0005);
-CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_FUNCTIONS, 0x0040);
-CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_CYCLES, 0x0041);
-CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_BLOCKED_FUNCTIONS, 0x0042);
-CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_BLOCKED_CYCLES, 0x0043);
-CPUMF_EVENT_ATTR(cf_svn_generic, SHA_FUNCTIONS, 0x0044);
-CPUMF_EVENT_ATTR(cf_svn_generic, SHA_CYCLES, 0x0045);
-CPUMF_EVENT_ATTR(cf_svn_generic, SHA_BLOCKED_FUNCTIONS, 0x0046);
-CPUMF_EVENT_ATTR(cf_svn_generic, SHA_BLOCKED_CYCLES, 0x0047);
-CPUMF_EVENT_ATTR(cf_svn_generic, DEA_FUNCTIONS, 0x0048);
-CPUMF_EVENT_ATTR(cf_svn_generic, DEA_CYCLES, 0x0049);
-CPUMF_EVENT_ATTR(cf_svn_generic, DEA_BLOCKED_FUNCTIONS, 0x004a);
-CPUMF_EVENT_ATTR(cf_svn_generic, DEA_BLOCKED_CYCLES, 0x004b);
-CPUMF_EVENT_ATTR(cf_svn_generic, AES_FUNCTIONS, 0x004c);
-CPUMF_EVENT_ATTR(cf_svn_generic, AES_CYCLES, 0x004d);
-CPUMF_EVENT_ATTR(cf_svn_generic, AES_BLOCKED_FUNCTIONS, 0x004e);
-CPUMF_EVENT_ATTR(cf_svn_generic, AES_BLOCKED_CYCLES, 0x004f);
+CPUMF_EVENT_ATTR(cf_svn_12345, PRNG_FUNCTIONS, 0x0040);
+CPUMF_EVENT_ATTR(cf_svn_12345, PRNG_CYCLES, 0x0041);
+CPUMF_EVENT_ATTR(cf_svn_12345, PRNG_BLOCKED_FUNCTIONS, 0x0042);
+CPUMF_EVENT_ATTR(cf_svn_12345, PRNG_BLOCKED_CYCLES, 0x0043);
+CPUMF_EVENT_ATTR(cf_svn_12345, SHA_FUNCTIONS, 0x0044);
+CPUMF_EVENT_ATTR(cf_svn_12345, SHA_CYCLES, 0x0045);
+CPUMF_EVENT_ATTR(cf_svn_12345, SHA_BLOCKED_FUNCTIONS, 0x0046);
+CPUMF_EVENT_ATTR(cf_svn_12345, SHA_BLOCKED_CYCLES, 0x0047);
+CPUMF_EVENT_ATTR(cf_svn_12345, DEA_FUNCTIONS, 0x0048);
+CPUMF_EVENT_ATTR(cf_svn_12345, DEA_CYCLES, 0x0049);
+CPUMF_EVENT_ATTR(cf_svn_12345, DEA_BLOCKED_FUNCTIONS, 0x004a);
+CPUMF_EVENT_ATTR(cf_svn_12345, DEA_BLOCKED_CYCLES, 0x004b);
+CPUMF_EVENT_ATTR(cf_svn_12345, AES_FUNCTIONS, 0x004c);
+CPUMF_EVENT_ATTR(cf_svn_12345, AES_CYCLES, 0x004d);
+CPUMF_EVENT_ATTR(cf_svn_12345, AES_BLOCKED_FUNCTIONS, 0x004e);
+CPUMF_EVENT_ATTR(cf_svn_12345, AES_BLOCKED_CYCLES, 0x004f);
+CPUMF_EVENT_ATTR(cf_svn_6, ECC_FUNCTION_COUNT, 0x0050);
+CPUMF_EVENT_ATTR(cf_svn_6, ECC_CYCLES_COUNT, 0x0051);
+CPUMF_EVENT_ATTR(cf_svn_6, ECC_BLOCKED_FUNCTION_COUNT, 0x0052);
+CPUMF_EVENT_ATTR(cf_svn_6, ECC_BLOCKED_CYCLES_COUNT, 0x0053);
CPUMF_EVENT_ATTR(cf_z10, L1I_L2_SOURCED_WRITES, 0x0080);
CPUMF_EVENT_ATTR(cf_z10, L1D_L2_SOURCED_WRITES, 0x0081);
CPUMF_EVENT_ATTR(cf_z10, L1I_L3_LOCAL_WRITES, 0x0082);
@@ -262,23 +266,47 @@ static struct attribute *cpumcf_fvn3_pmu_event_attr[] __initdata = {
NULL,
};
-static struct attribute *cpumcf_svn_generic_pmu_event_attr[] __initdata = {
- CPUMF_EVENT_PTR(cf_svn_generic, PRNG_FUNCTIONS),
- CPUMF_EVENT_PTR(cf_svn_generic, PRNG_CYCLES),
- CPUMF_EVENT_PTR(cf_svn_generic, PRNG_BLOCKED_FUNCTIONS),
- CPUMF_EVENT_PTR(cf_svn_generic, PRNG_BLOCKED_CYCLES),
- CPUMF_EVENT_PTR(cf_svn_generic, SHA_FUNCTIONS),
- CPUMF_EVENT_PTR(cf_svn_generic, SHA_CYCLES),
- CPUMF_EVENT_PTR(cf_svn_generic, SHA_BLOCKED_FUNCTIONS),
- CPUMF_EVENT_PTR(cf_svn_generic, SHA_BLOCKED_CYCLES),
- CPUMF_EVENT_PTR(cf_svn_generic, DEA_FUNCTIONS),
- CPUMF_EVENT_PTR(cf_svn_generic, DEA_CYCLES),
- CPUMF_EVENT_PTR(cf_svn_generic, DEA_BLOCKED_FUNCTIONS),
- CPUMF_EVENT_PTR(cf_svn_generic, DEA_BLOCKED_CYCLES),
- CPUMF_EVENT_PTR(cf_svn_generic, AES_FUNCTIONS),
- CPUMF_EVENT_PTR(cf_svn_generic, AES_CYCLES),
- CPUMF_EVENT_PTR(cf_svn_generic, AES_BLOCKED_FUNCTIONS),
- CPUMF_EVENT_PTR(cf_svn_generic, AES_BLOCKED_CYCLES),
+static struct attribute *cpumcf_svn_12345_pmu_event_attr[] __initdata = {
+ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, SHA_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, SHA_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, SHA_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, SHA_BLOCKED_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, DEA_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, DEA_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, DEA_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, DEA_BLOCKED_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, AES_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, AES_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, AES_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, AES_BLOCKED_CYCLES),
+ NULL,
+};
+
+static struct attribute *cpumcf_svn_6_pmu_event_attr[] __initdata = {
+ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, SHA_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, SHA_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, SHA_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, SHA_BLOCKED_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, DEA_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, DEA_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, DEA_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, DEA_BLOCKED_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, AES_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, AES_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_12345, AES_BLOCKED_FUNCTIONS),
+ CPUMF_EVENT_PTR(cf_svn_12345, AES_BLOCKED_CYCLES),
+ CPUMF_EVENT_PTR(cf_svn_6, ECC_FUNCTION_COUNT),
+ CPUMF_EVENT_PTR(cf_svn_6, ECC_CYCLES_COUNT),
+ CPUMF_EVENT_PTR(cf_svn_6, ECC_BLOCKED_FUNCTION_COUNT),
+ CPUMF_EVENT_PTR(cf_svn_6, ECC_BLOCKED_CYCLES_COUNT),
NULL,
};
@@ -562,7 +590,18 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
default:
cfvn = none;
}
- csvn = cpumcf_svn_generic_pmu_event_attr;
+
+ /* Determine version specific crypto set */
+ switch (ci.csvn) {
+ case 1 ... 5:
+ csvn = cpumcf_svn_12345_pmu_event_attr;
+ break;
+ case 6:
+ csvn = cpumcf_svn_6_pmu_event_attr;
+ break;
+ default:
+ csvn = none;
+ }
/* Determine model-specific counter set(s) */
get_cpu_id(&cpu_id);
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 0d770e513abf..fcb6c2e92b07 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -21,6 +21,7 @@
#include <asm/lowcore.h>
#include <asm/processor.h>
#include <asm/sysinfo.h>
+#include <asm/unwind.h>
const char *perf_pmu_name(void)
{
@@ -219,20 +220,13 @@ static int __init service_level_perf_register(void)
}
arch_initcall(service_level_perf_register);
-static int __perf_callchain_kernel(void *data, unsigned long address, int reliable)
-{
- struct perf_callchain_entry_ctx *entry = data;
-
- perf_callchain_store(entry, address);
- return 0;
-}
-
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
- if (user_mode(regs))
- return;
- dump_trace(__perf_callchain_kernel, entry, NULL, regs->gprs[15]);
+ struct unwind_state state;
+
+ unwind_for_each_frame(&state, current, regs, 0)
+ perf_callchain_store(entry, state.ip);
}
/* Perf definitions for PMU event attributes in sysfs */
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
index 3e62aae34ea3..59dee9d3bebf 100644
--- a/arch/s390/kernel/pgm_check.S
+++ b/arch/s390/kernel/pgm_check.S
@@ -7,7 +7,7 @@
#include <linux/linkage.h>
-#define PGM_CHECK(handler) .long handler
+#define PGM_CHECK(handler) .quad handler
#define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler)
/*
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 6e758bb6cd29..63873aa6693f 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -37,6 +37,7 @@
#include <asm/irq.h>
#include <asm/nmi.h>
#include <asm/smp.h>
+#include <asm/stacktrace.h>
#include <asm/switch_to.h>
#include <asm/runtime_instr.h>
#include "entry.h"
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 6fe2e1875058..5de13307b703 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -109,7 +109,8 @@ static void show_cpu_summary(struct seq_file *m, void *v)
{
static const char *hwcap_str[] = {
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
- "edat", "etf3eh", "highgprs", "te", "vx", "vxd", "vxe", "gs"
+ "edat", "etf3eh", "highgprs", "te", "vx", "vxd", "vxe", "gs",
+ "vxe2", "vxp", "sort", "dflt"
};
static const char * const int_hwcap_str[] = {
"sie"
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index cd3df5514552..ad71132374f0 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -24,7 +24,6 @@
#include <linux/seccomp.h>
#include <linux/compat.h>
#include <trace/syscall.h>
-#include <asm/segment.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index 7f14adf512c6..4a22163962eb 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -73,6 +73,7 @@ ENTRY(store_status)
lgr %r9,%r2
lgr %r2,%r3
BR_EX %r9
+ENDPROC(store_status)
.section .bss
.align 8
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
index c97c2d40fe15..fe396673e8a6 100644
--- a/arch/s390/kernel/relocate_kernel.S
+++ b/arch/s390/kernel/relocate_kernel.S
@@ -58,11 +58,15 @@ ENTRY(relocate_kernel)
j .base
.done:
sgr %r0,%r0 # clear register r0
+ cghi %r3,0
+ je .diag
la %r4,load_psw-.base(%r13) # load psw-address into the register
o %r3,4(%r4) # or load address into psw
st %r3,4(%r4)
mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0
+ .diag:
diag %r0,%r0,0x308
+ENDPROC(relocate_kernel)
.align 8
load_psw:
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 2c642af526ce..f8544d517430 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -50,6 +50,7 @@
#include <linux/compat.h>
#include <linux/start_kernel.h>
+#include <asm/boot_data.h>
#include <asm/ipl.h>
#include <asm/facility.h>
#include <asm/smp.h>
@@ -65,11 +66,13 @@
#include <asm/diag.h>
#include <asm/os_info.h>
#include <asm/sclp.h>
+#include <asm/stacktrace.h>
#include <asm/sysinfo.h>
#include <asm/numa.h>
#include <asm/alternative.h>
#include <asm/nospec-branch.h>
#include <asm/mem_detect.h>
+#include <asm/uv.h>
#include "entry.h"
/*
@@ -89,12 +92,25 @@ char elf_platform[ELF_PLATFORM_SIZE];
unsigned long int_hwcap = 0;
+#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
+int __bootdata_preserved(prot_virt_guest);
+#endif
+
int __bootdata(noexec_disabled);
int __bootdata(memory_end_set);
unsigned long __bootdata(memory_end);
unsigned long __bootdata(max_physmem_end);
struct mem_detect_info __bootdata(mem_detect);
+struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table);
+struct exception_table_entry *__bootdata_preserved(__stop_dma_ex_table);
+unsigned long __bootdata_preserved(__swsusp_reset_dma);
+unsigned long __bootdata_preserved(__stext_dma);
+unsigned long __bootdata_preserved(__etext_dma);
+unsigned long __bootdata_preserved(__sdma);
+unsigned long __bootdata_preserved(__edma);
+unsigned long __bootdata_preserved(__kaslr_offset);
+
unsigned long VMALLOC_START;
EXPORT_SYMBOL(VMALLOC_START);
@@ -736,6 +752,15 @@ static void __init reserve_initrd(void)
#endif
}
+/*
+ * Reserve the memory area used to pass the certificate lists
+ */
+static void __init reserve_certificate_list(void)
+{
+ if (ipl_cert_list_addr)
+ memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
+}
+
static void __init reserve_mem_detect_info(void)
{
unsigned long start, size;
@@ -814,9 +839,10 @@ static void __init reserve_kernel(void)
{
unsigned long start_pfn = PFN_UP(__pa(_end));
- memblock_reserve(0, PARMAREA_END);
+ memblock_reserve(0, HEAD_END);
memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
- (unsigned long)_stext);
+ memblock_reserve(__sdma, __edma - __sdma);
}
static void __init setup_memory(void)
@@ -914,7 +940,15 @@ static int __init setup_hwcaps(void)
elf_hwcap |= HWCAP_S390_VXRS_EXT;
if (test_facility(135))
elf_hwcap |= HWCAP_S390_VXRS_BCD;
+ if (test_facility(148))
+ elf_hwcap |= HWCAP_S390_VXRS_EXT2;
+ if (test_facility(152))
+ elf_hwcap |= HWCAP_S390_VXRS_PDE;
}
+ if (test_facility(150))
+ elf_hwcap |= HWCAP_S390_SORT;
+ if (test_facility(151))
+ elf_hwcap |= HWCAP_S390_DFLT;
/*
* Guarded storage support HWCAP_S390_GS is bit 12.
@@ -1023,6 +1057,38 @@ static void __init setup_control_program_code(void)
}
/*
+ * Print the component list from the IPL report
+ */
+static void __init log_component_list(void)
+{
+ struct ipl_rb_component_entry *ptr, *end;
+ char *str;
+
+ if (!early_ipl_comp_list_addr)
+ return;
+ if (ipl_block.hdr.flags & IPL_PL_FLAG_IPLSR)
+ pr_info("Linux is running with Secure-IPL enabled\n");
+ else
+ pr_info("Linux is running with Secure-IPL disabled\n");
+ ptr = (void *) early_ipl_comp_list_addr;
+ end = (void *) ptr + early_ipl_comp_list_size;
+ pr_info("The IPL report contains the following components:\n");
+ while (ptr < end) {
+ if (ptr->flags & IPL_RB_COMPONENT_FLAG_SIGNED) {
+ if (ptr->flags & IPL_RB_COMPONENT_FLAG_VERIFIED)
+ str = "signed, verified";
+ else
+ str = "signed, verification failed";
+ } else {
+ str = "not signed";
+ }
+ pr_info("%016llx - %016llx (%s)\n",
+ ptr->addr, ptr->addr + ptr->len, str);
+ ptr++;
+ }
+}
+
+/*
* Setup function called from init/main.c just after the banner
* was printed.
*/
@@ -1042,6 +1108,8 @@ void __init setup_arch(char **cmdline_p)
else
pr_info("Linux is running as a guest in 64-bit mode\n");
+ log_component_list();
+
/* Have one command line that is parsed and saved in /proc/cmdline */
/* boot_command_line has been already set up in early.c */
*cmdline_p = boot_command_line;
@@ -1073,6 +1141,7 @@ void __init setup_arch(char **cmdline_p)
reserve_oldmem();
reserve_kernel();
reserve_initrd();
+ reserve_certificate_list();
reserve_mem_detect_info();
memblock_allow_resize();
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index bd197baf1dc3..35fafa2b91a8 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -53,6 +53,7 @@
#include <asm/sigp.h>
#include <asm/idle.h>
#include <asm/nmi.h>
+#include <asm/stacktrace.h>
#include <asm/topology.h>
#include "entry.h"
@@ -689,7 +690,7 @@ void __init smp_save_dump_cpus(void)
smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
}
memblock_free(page, PAGE_SIZE);
- diag308_reset();
+ diag_dma_ops.diag308_reset();
pcpu_set_smt(0);
}
#endif /* CONFIG_CRASH_DUMP */
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 460dcfba7d4e..f6a620f854e1 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -11,65 +11,52 @@
#include <linux/stacktrace.h>
#include <linux/kallsyms.h>
#include <linux/export.h>
-
-static int __save_address(void *data, unsigned long address, int nosched)
-{
- struct stack_trace *trace = data;
-
- if (nosched && in_sched_functions(address))
- return 0;
- if (trace->skip > 0) {
- trace->skip--;
- return 0;
- }
- if (trace->nr_entries < trace->max_entries) {
- trace->entries[trace->nr_entries++] = address;
- return 0;
- }
- return 1;
-}
-
-static int save_address(void *data, unsigned long address, int reliable)
-{
- return __save_address(data, address, 0);
-}
-
-static int save_address_nosched(void *data, unsigned long address, int reliable)
-{
- return __save_address(data, address, 1);
-}
+#include <asm/stacktrace.h>
+#include <asm/unwind.h>
void save_stack_trace(struct stack_trace *trace)
{
- unsigned long sp;
-
- sp = current_stack_pointer();
- dump_trace(save_address, trace, NULL, sp);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
+ struct unwind_state state;
+
+ unwind_for_each_frame(&state, current, NULL, 0) {
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = state.ip;
+ }
}
EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
- unsigned long sp;
-
- sp = tsk->thread.ksp;
- if (tsk == current)
- sp = current_stack_pointer();
- dump_trace(save_address_nosched, trace, tsk, sp);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
+ struct unwind_state state;
+
+ unwind_for_each_frame(&state, tsk, NULL, 0) {
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ if (in_sched_functions(state.ip))
+ continue;
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = state.ip;
+ }
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
{
- unsigned long sp;
-
- sp = kernel_stack_pointer(regs);
- dump_trace(save_address, trace, NULL, sp);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
+ struct unwind_state state;
+
+ unwind_for_each_frame(&state, current, regs, 0) {
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = state.ip;
+ }
}
EXPORT_SYMBOL_GPL(save_stack_trace_regs);
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
index 993100c31d65..19a3c427801a 100644
--- a/arch/s390/kernel/swsusp.S
+++ b/arch/s390/kernel/swsusp.S
@@ -108,6 +108,7 @@ ENTRY(swsusp_arch_suspend)
lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
lghi %r2,0
BR_EX %r14
+ENDPROC(swsusp_arch_suspend)
/*
* Restore saved memory image to correct place and restore register context.
@@ -154,20 +155,13 @@ ENTRY(swsusp_arch_resume)
ptlb /* flush tlb */
/* Reset System */
- larl %r1,restart_entry
- larl %r2,.Lrestart_diag308_psw
- og %r1,0(%r2)
- stg %r1,0(%r0)
larl %r1,.Lnew_pgm_check_psw
epsw %r2,%r3
stm %r2,%r3,0(%r1)
mvc __LC_PGM_NEW_PSW(16,%r0),0(%r1)
- lghi %r0,0
- diag %r0,%r0,0x308
-restart_entry:
- lhi %r1,1
- sigp %r1,%r0,SIGP_SET_ARCHITECTURE
- sam64
+ larl %r1,__swsusp_reset_dma
+ lg %r1,0(%r1)
+ BASR_EX %r14,%r1
#ifdef CONFIG_SMP
larl %r1,smp_cpu_mt_shift
icm %r1,15,0(%r1)
@@ -267,6 +261,7 @@ restore_registers:
lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
lghi %r2,0
BR_EX %r14
+ENDPROC(swsusp_arch_resume)
.section .data..nosave,"aw",@progbits
.align 8
@@ -275,8 +270,6 @@ restore_registers:
.Lpanic_string:
.asciz "Resume not possible because suspend CPU is no longer available\n"
.align 8
-.Lrestart_diag308_psw:
- .long 0x00080000,0x80000000
.Lrestart_suspend_psw:
.quad 0x0000000180000000,restart_suspend
.Lnew_pgm_check_psw:
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index 061418f787c3..e822b2964a83 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -430,3 +430,9 @@
425 common io_uring_setup sys_io_uring_setup sys_io_uring_setup
426 common io_uring_enter sys_io_uring_enter sys_io_uring_enter
427 common io_uring_register sys_io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree sys_open_tree
+429 common move_mount sys_move_mount sys_move_mount
+430 common fsopen sys_fsopen sys_fsopen
+431 common fsconfig sys_fsconfig sys_fsconfig
+432 common fsmount sys_fsmount sys_fsmount
+433 common fspick sys_fspick sys_fspick
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 8003b38c1688..82e81a9f7112 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -49,7 +49,7 @@ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
report_user_fault(regs, si_signo, 0);
} else {
const struct exception_table_entry *fixup;
- fixup = search_exception_tables(regs->psw.addr);
+ fixup = s390_search_extables(regs->psw.addr);
if (fixup)
regs->psw.addr = extable_fixup(fixup);
else {
@@ -263,5 +263,6 @@ NOKPROBE_SYMBOL(kernel_stack_overflow);
void __init trap_init(void)
{
+ sort_extable(__start_dma_ex_table, __stop_dma_ex_table);
local_mcck_enable();
}
diff --git a/arch/s390/kernel/unwind_bc.c b/arch/s390/kernel/unwind_bc.c
new file mode 100644
index 000000000000..57fd4e902f1f
--- /dev/null
+++ b/arch/s390/kernel/unwind_bc.c
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/sched.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+#include <linux/interrupt.h>
+#include <asm/sections.h>
+#include <asm/ptrace.h>
+#include <asm/bitops.h>
+#include <asm/stacktrace.h>
+#include <asm/unwind.h>
+
+unsigned long unwind_get_return_address(struct unwind_state *state)
+{
+ if (unwind_done(state))
+ return 0;
+ return __kernel_text_address(state->ip) ? state->ip : 0;
+}
+EXPORT_SYMBOL_GPL(unwind_get_return_address);
+
+static bool outside_of_stack(struct unwind_state *state, unsigned long sp)
+{
+ return (sp <= state->sp) ||
+ (sp + sizeof(struct stack_frame) > state->stack_info.end);
+}
+
+static bool update_stack_info(struct unwind_state *state, unsigned long sp)
+{
+ struct stack_info *info = &state->stack_info;
+ unsigned long *mask = &state->stack_mask;
+
+ /* New stack pointer leaves the current stack */
+ if (get_stack_info(sp, state->task, info, mask) != 0 ||
+ !on_stack(info, sp, sizeof(struct stack_frame)))
+ /* 'sp' does not point to a valid stack */
+ return false;
+ return true;
+}
+
+bool unwind_next_frame(struct unwind_state *state)
+{
+ struct stack_info *info = &state->stack_info;
+ struct stack_frame *sf;
+ struct pt_regs *regs;
+ unsigned long sp, ip;
+ bool reliable;
+
+ regs = state->regs;
+ if (unlikely(regs)) {
+ sp = READ_ONCE_TASK_STACK(state->task, regs->gprs[15]);
+ if (unlikely(outside_of_stack(state, sp))) {
+ if (!update_stack_info(state, sp))
+ goto out_err;
+ }
+ sf = (struct stack_frame *) sp;
+ ip = READ_ONCE_TASK_STACK(state->task, sf->gprs[8]);
+ reliable = false;
+ regs = NULL;
+ } else {
+ sf = (struct stack_frame *) state->sp;
+ sp = READ_ONCE_TASK_STACK(state->task, sf->back_chain);
+ if (likely(sp)) {
+ /* Non-zero back-chain points to the previous frame */
+ if (unlikely(outside_of_stack(state, sp))) {
+ if (!update_stack_info(state, sp))
+ goto out_err;
+ }
+ sf = (struct stack_frame *) sp;
+ ip = READ_ONCE_TASK_STACK(state->task, sf->gprs[8]);
+ reliable = true;
+ } else {
+ /* No back-chain, look for a pt_regs structure */
+ sp = state->sp + STACK_FRAME_OVERHEAD;
+ if (!on_stack(info, sp, sizeof(struct pt_regs)))
+ goto out_stop;
+ regs = (struct pt_regs *) sp;
+ if (user_mode(regs))
+ goto out_stop;
+ ip = READ_ONCE_TASK_STACK(state->task, regs->psw.addr);
+ reliable = true;
+ }
+ }
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* Decode any ftrace redirection */
+ if (ip == (unsigned long) return_to_handler)
+ ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
+ ip, (void *) sp);
+#endif
+
+ /* Update unwind state */
+ state->sp = sp;
+ state->ip = ip;
+ state->regs = regs;
+ state->reliable = reliable;
+ return true;
+
+out_err:
+ state->error = true;
+out_stop:
+ state->stack_info.type = STACK_TYPE_UNKNOWN;
+ return false;
+}
+EXPORT_SYMBOL_GPL(unwind_next_frame);
+
+void __unwind_start(struct unwind_state *state, struct task_struct *task,
+ struct pt_regs *regs, unsigned long sp)
+{
+ struct stack_info *info = &state->stack_info;
+ unsigned long *mask = &state->stack_mask;
+ struct stack_frame *sf;
+ unsigned long ip;
+ bool reliable;
+
+ memset(state, 0, sizeof(*state));
+ state->task = task;
+ state->regs = regs;
+
+ /* Don't even attempt to start from user mode regs: */
+ if (regs && user_mode(regs)) {
+ info->type = STACK_TYPE_UNKNOWN;
+ return;
+ }
+
+ /* Get current stack pointer and initialize stack info */
+ if (get_stack_info(sp, task, info, mask) != 0 ||
+ !on_stack(info, sp, sizeof(struct stack_frame))) {
+ /* Something is wrong with the stack pointer */
+ info->type = STACK_TYPE_UNKNOWN;
+ state->error = true;
+ return;
+ }
+
+ /* Get the instruction pointer from pt_regs or the stack frame */
+ if (regs) {
+ ip = READ_ONCE_TASK_STACK(state->task, regs->psw.addr);
+ reliable = true;
+ } else {
+ sf = (struct stack_frame *) sp;
+ ip = READ_ONCE_TASK_STACK(state->task, sf->gprs[8]);
+ reliable = false;
+ }
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* Decode any ftrace redirection */
+ if (ip == (unsigned long) return_to_handler)
+ ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
+ ip, NULL);
+#endif
+
+ /* Update unwind state */
+ state->sp = sp;
+ state->ip = ip;
+ state->reliable = reliable;
+}
+EXPORT_SYMBOL_GPL(__unwind_start);
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index e7920a68a12e..243d8b1185bf 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -29,7 +29,7 @@
#include <asm/vdso.h>
#include <asm/facility.h>
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_VDSO
extern char vdso32_start, vdso32_end;
static void *vdso32_kbase = &vdso32_start;
static unsigned int vdso32_pages;
@@ -55,7 +55,7 @@ static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
vdso_pagelist = vdso64_pagelist;
vdso_pages = vdso64_pages;
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_VDSO
if (vma->vm_mm->context.compat_mm) {
vdso_pagelist = vdso32_pagelist;
vdso_pages = vdso32_pages;
@@ -76,7 +76,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
unsigned long vdso_pages;
vdso_pages = vdso64_pages;
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_VDSO
if (vma->vm_mm->context.compat_mm)
vdso_pages = vdso32_pages;
#endif
@@ -223,7 +223,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
return 0;
vdso_pages = vdso64_pages;
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_VDSO
mm->context.compat_mm = is_compat_task();
if (mm->context.compat_mm)
vdso_pages = vdso32_pages;
@@ -280,7 +280,7 @@ static int __init vdso_init(void)
int i;
vdso_init_data(vdso_data);
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_VDSO
/* Calculate the size of the 32 bit vDSO */
vdso32_pages = ((&vdso32_end - &vdso32_start
+ PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index e76309fbbcb3..aee9ffbccb54 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -19,7 +19,7 @@ KBUILD_AFLAGS_31 += -m31 -s
KBUILD_CFLAGS_31 := $(filter-out -m64,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_31 += -m31 -fPIC -shared -fno-common -fno-builtin
KBUILD_CFLAGS_31 += -nostdlib -Wl,-soname=linux-vdso32.so.1 \
- $(call cc-ldoption, -Wl$(comma)--hash-style=both)
+ -Wl,--hash-style=both
$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_31)
$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_31)
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index f849ac61c5da..bec19e7e6e1c 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -19,7 +19,7 @@ KBUILD_AFLAGS_64 += -m64 -s
KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin
KBUILD_CFLAGS_64 += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
- $(call cc-ldoption, -Wl$(comma)--hash-style=both)
+ -Wl,--hash-style=both
$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64)
$(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64)
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 8429ab079715..49d55327de0b 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -72,6 +72,7 @@ SECTIONS
__end_ro_after_init = .;
RW_DATA_SECTION(0x100, PAGE_SIZE, THREAD_SIZE)
+ BOOT_DATA_PRESERVED
_edata = .; /* End of data section */
@@ -143,6 +144,18 @@ SECTIONS
INIT_DATA_SECTION(0x100)
PERCPU_SECTION(0x100)
+
+ .dynsym ALIGN(8) : {
+ __dynsym_start = .;
+ *(.dynsym)
+ __dynsym_end = .;
+ }
+ .rela.dyn ALIGN(8) : {
+ __rela_dyn_start = .;
+ *(.rela*)
+ __rela_dyn_end = .;
+ }
+
. = ALIGN(PAGE_SIZE);
__init_end = .; /* freed after init ends here */
@@ -161,6 +174,12 @@ SECTIONS
QUAD(__bss_stop - __bss_start) /* bss_size */
QUAD(__boot_data_start) /* bootdata_off */
QUAD(__boot_data_end - __boot_data_start) /* bootdata_size */
+ QUAD(__boot_data_preserved_start) /* bootdata_preserved_off */
+ QUAD(__boot_data_preserved_end -
+ __boot_data_preserved_start) /* bootdata_preserved_size */
+ QUAD(__dynsym_start) /* dynsym_start */
+ QUAD(__rela_dyn_start) /* rela_dyn_start */
+ QUAD(__rela_dyn_end) /* rela_dyn_end */
} :NONE
/* Debugging sections. */
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 767453faacfc..d3db3d7ed077 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -21,7 +21,6 @@ config KVM
prompt "Kernel-based Virtual Machine (KVM) support"
depends on HAVE_KVM
select PREEMPT_NOTIFIERS
- select ANON_INODES
select HAVE_KVM_CPU_RELAX_INTERCEPT
select HAVE_KVM_VCPU_ASYNC_IOCTL
select HAVE_KVM_EVENTFD
@@ -31,6 +30,7 @@ config KVM
select HAVE_KVM_IRQFD
select HAVE_KVM_IRQ_ROUTING
select HAVE_KVM_INVALID_WAKEUPS
+ select HAVE_KVM_NO_POLL
select SRCU
select KVM_VFIO
---help---
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 82162867f378..9dde4d7d8704 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -14,6 +14,7 @@
#include <linux/kvm_host.h>
#include <linux/hrtimer.h>
#include <linux/mmu_context.h>
+#include <linux/nospec.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
@@ -2307,6 +2308,7 @@ static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
{
if (id >= MAX_S390_IO_ADAPTERS)
return NULL;
+ id = array_index_nospec(id, MAX_S390_IO_ADAPTERS);
return kvm->arch.adapters[id];
}
@@ -2320,8 +2322,13 @@ static int register_io_adapter(struct kvm_device *dev,
(void __user *)attr->addr, sizeof(adapter_info)))
return -EFAULT;
- if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
- (dev->kvm->arch.adapters[adapter_info.id] != NULL))
+ if (adapter_info.id >= MAX_S390_IO_ADAPTERS)
+ return -EINVAL;
+
+ adapter_info.id = array_index_nospec(adapter_info.id,
+ MAX_S390_IO_ADAPTERS);
+
+ if (dev->kvm->arch.adapters[adapter_info.id] != NULL)
return -EINVAL;
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
@@ -2376,7 +2383,7 @@ static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
ret = -EFAULT;
goto out;
}
- ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
+ ret = get_user_pages_fast(map->addr, 1, FOLL_WRITE, &map->page);
if (ret < 0)
goto out;
BUG_ON(ret != 1);
@@ -3194,7 +3201,7 @@ out:
}
EXPORT_SYMBOL_GPL(kvm_s390_gisc_unregister);
-static void gib_alert_irq_handler(struct airq_struct *airq)
+static void gib_alert_irq_handler(struct airq_struct *airq, bool floating)
{
inc_irq_stat(IRQIO_GAL);
process_gib_alert_list();
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 4638303ba6a8..8d6d75db8de6 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -75,6 +75,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
+ { "halt_no_poll_steal", VCPU_STAT(halt_no_poll_steal) },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
@@ -177,6 +178,11 @@ static int hpage;
module_param(hpage, int, 0444);
MODULE_PARM_DESC(hpage, "1m huge page backing support");
+/* maximum percentage of steal time for polling. >100 is treated like 100 */
+static u8 halt_poll_max_steal = 10;
+module_param(halt_poll_max_steal, byte, 0644);
+MODULE_PARM_DESC(hpage, "Maximum percentage of steal time to allow polling");
+
/*
* For now we handle at most 16 double words as this is what the s390 base
* kernel handles and stores in the prefix page. If we ever need to go beyond
@@ -321,6 +327,22 @@ static inline int plo_test_bit(unsigned char nr)
return cc == 0;
}
+static inline void __insn32_query(unsigned int opcode, u8 query[32])
+{
+ register unsigned long r0 asm("0") = 0; /* query function */
+ register unsigned long r1 asm("1") = (unsigned long) query;
+
+ asm volatile(
+ /* Parameter regs are ignored */
+ " .insn rrf,%[opc] << 16,2,4,6,0\n"
+ : "=m" (*query)
+ : "d" (r0), "a" (r1), [opc] "i" (opcode)
+ : "cc");
+}
+
+#define INSN_SORTL 0xb938
+#define INSN_DFLTCC 0xb939
+
static void kvm_s390_cpu_feat_init(void)
{
int i;
@@ -368,6 +390,16 @@ static void kvm_s390_cpu_feat_init(void)
__cpacf_query(CPACF_KMA, (cpacf_mask_t *)
kvm_s390_available_subfunc.kma);
+ if (test_facility(155)) /* MSA9 */
+ __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
+ kvm_s390_available_subfunc.kdsa);
+
+ if (test_facility(150)) /* SORTL */
+ __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
+
+ if (test_facility(151)) /* DFLTCC */
+ __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
+
if (MACHINE_HAS_ESOP)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
/*
@@ -513,9 +545,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
else if (sclp.has_esca && sclp.has_64bscao)
r = KVM_S390_ESCA_CPU_SLOTS;
break;
- case KVM_CAP_NR_MEMSLOTS:
- r = KVM_USER_MEM_SLOTS;
- break;
case KVM_CAP_S390_COW:
r = MACHINE_HAS_ESOP;
break;
@@ -657,6 +686,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
set_kvm_facility(kvm->arch.model.fac_mask, 135);
set_kvm_facility(kvm->arch.model.fac_list, 135);
}
+ if (test_facility(148)) {
+ set_kvm_facility(kvm->arch.model.fac_mask, 148);
+ set_kvm_facility(kvm->arch.model.fac_list, 148);
+ }
+ if (test_facility(152)) {
+ set_kvm_facility(kvm->arch.model.fac_mask, 152);
+ set_kvm_facility(kvm->arch.model.fac_list, 152);
+ }
r = 0;
} else
r = -EINVAL;
@@ -1323,6 +1360,19 @@ static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
+ VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
+ VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
+ ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
+ ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
+ VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
+ ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
+ ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
return 0;
}
@@ -1491,6 +1541,19 @@ static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
+ VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
+ VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
+ ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
+ ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
+ VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
+ ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
+ ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
+ ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
return 0;
}
@@ -1546,6 +1609,19 @@ static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
+ VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
+ VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
+ ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
+ ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
+ VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
+ ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
+ ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
+ ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
+ ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
return 0;
}
@@ -2817,6 +2893,25 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
vcpu->arch.enabled_gmap = vcpu->arch.gmap;
}
+static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
+{
+ if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
+ test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
+ return true;
+ return false;
+}
+
+static bool kvm_has_pckmo_ecc(struct kvm *kvm)
+{
+ /* At least one ECC subfunction must be present */
+ return kvm_has_pckmo_subfunc(kvm, 32) ||
+ kvm_has_pckmo_subfunc(kvm, 33) ||
+ kvm_has_pckmo_subfunc(kvm, 34) ||
+ kvm_has_pckmo_subfunc(kvm, 40) ||
+ kvm_has_pckmo_subfunc(kvm, 41);
+
+}
+
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
{
/*
@@ -2829,13 +2924,19 @@ static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
vcpu->arch.sie_block->eca &= ~ECA_APIE;
+ vcpu->arch.sie_block->ecd &= ~ECD_ECC;
if (vcpu->kvm->arch.crypto.apie)
vcpu->arch.sie_block->eca |= ECA_APIE;
/* Set up protected key support */
- if (vcpu->kvm->arch.crypto.aes_kw)
+ if (vcpu->kvm->arch.crypto.aes_kw) {
vcpu->arch.sie_block->ecb3 |= ECB3_AES;
+ /* ecc is also wrapped with AES key */
+ if (kvm_has_pckmo_ecc(vcpu->kvm))
+ vcpu->arch.sie_block->ecd |= ECD_ECC;
+ }
+
if (vcpu->kvm->arch.crypto.dea_kw)
vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
}
@@ -3068,6 +3169,17 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
}
}
+bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
+{
+ /* do not poll with more than halt_poll_max_steal percent of steal time */
+ if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
+ halt_poll_max_steal) {
+ vcpu->stat.halt_no_poll_steal++;
+ return true;
+ }
+ return false;
+}
+
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
/* kvm common code refers to this, but never calls it */
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index d62fa148558b..076090f9e666 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -288,7 +288,9 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
const u32 crycb_addr = crycbd_o & 0x7ffffff8U;
unsigned long *b1, *b2;
u8 ecb3_flags;
+ u32 ecd_flags;
int apie_h;
+ int apie_s;
int key_msk = test_kvm_facility(vcpu->kvm, 76);
int fmt_o = crycbd_o & CRYCB_FORMAT_MASK;
int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK;
@@ -297,7 +299,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->crycbd = 0;
apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
- if (!apie_h && (!key_msk || fmt_o == CRYCB_FORMAT0))
+ apie_s = apie_h & scb_o->eca;
+ if (!apie_s && (!key_msk || (fmt_o == CRYCB_FORMAT0)))
return 0;
if (!crycb_addr)
@@ -308,7 +311,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
((crycb_addr + 128) & PAGE_MASK))
return set_validity_icpt(scb_s, 0x003CU);
- if (apie_h && (scb_o->eca & ECA_APIE)) {
+ if (apie_s) {
ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr,
vcpu->kvm->arch.crypto.crycb,
fmt_o, fmt_h);
@@ -320,7 +323,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
/* we may only allow it if enabled for guest 2 */
ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 &
(ECB3_AES | ECB3_DEA);
- if (!ecb3_flags)
+ ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd & ECD_ECC;
+ if (!ecb3_flags && !ecd_flags)
goto end;
/* copy only the wrapping keys */
@@ -329,6 +333,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
return set_validity_icpt(scb_s, 0x0035U);
scb_s->ecb3 |= ecb3_flags;
+ scb_s->ecd |= ecd_flags;
/* xor both blocks in one run */
b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask;
@@ -339,7 +344,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
end:
switch (ret) {
case -EINVAL:
- return set_validity_icpt(scb_s, 0x0020U);
+ return set_validity_icpt(scb_s, 0x0022U);
case -EFAULT:
return set_validity_icpt(scb_s, 0x0035U);
case -EACCES:
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index 53008da05190..dc0874f2e203 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -178,6 +178,7 @@ ENTRY(__memset\bits)
BR_EX %r14
.L__memset_mvc\bits:
mvc \bytes(1,%r1),0(%r1)
+ENDPROC(__memset\bits)
.endm
__MEMSET 16,2,sth
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index f5880bfd1b0c..3175413186b9 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -4,7 +4,7 @@
#
obj-y := init.o fault.o extmem.o mmap.o vmem.o maccess.o
-obj-y += page-states.o gup.o pageattr.o pgtable.o pgalloc.o
+obj-y += page-states.o pageattr.o pgtable.o pgalloc.o
obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 11613362c4e7..c220399ae196 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -247,12 +247,24 @@ static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
current);
}
+const struct exception_table_entry *s390_search_extables(unsigned long addr)
+{
+ const struct exception_table_entry *fixup;
+
+ fixup = search_extable(__start_dma_ex_table,
+ __stop_dma_ex_table - __start_dma_ex_table,
+ addr);
+ if (!fixup)
+ fixup = search_exception_tables(addr);
+ return fixup;
+}
+
static noinline void do_no_context(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
/* Are we prepared to handle this kernel fault? */
- fixup = search_exception_tables(regs->psw.addr);
+ fixup = s390_search_extables(regs->psw.addr);
if (fixup) {
regs->psw.addr = extable_fixup(fixup);
return;
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
deleted file mode 100644
index 2809d11c7a28..000000000000
--- a/arch/s390/mm/gup.c
+++ /dev/null
@@ -1,300 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Lockless get_user_pages_fast for s390
- *
- * Copyright IBM Corp. 2010
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/hugetlb.h>
-#include <linux/vmstat.h>
-#include <linux/pagemap.h>
-#include <linux/rwsem.h>
-#include <asm/pgtable.h>
-
-/*
- * The performance critical leaf functions are made noinline otherwise gcc
- * inlines everything into a single function which results in too much
- * register pressure.
- */
-static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- struct page *head, *page;
- unsigned long mask;
- pte_t *ptep, pte;
-
- mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
-
- ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
- do {
- pte = *ptep;
- barrier();
- /* Similar to the PMD case, NUMA hinting must take slow path */
- if (pte_protnone(pte))
- return 0;
- if ((pte_val(pte) & mask) != 0)
- return 0;
- VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
- page = pte_page(pte);
- head = compound_head(page);
- if (!page_cache_get_speculative(head))
- return 0;
- if (unlikely(pte_val(pte) != pte_val(*ptep))) {
- put_page(head);
- return 0;
- }
- VM_BUG_ON_PAGE(compound_head(page) != head, page);
- pages[*nr] = page;
- (*nr)++;
-
- } while (ptep++, addr += PAGE_SIZE, addr != end);
-
- return 1;
-}
-
-static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- struct page *head, *page;
- unsigned long mask;
- int refs;
-
- mask = (write ? _SEGMENT_ENTRY_PROTECT : 0) | _SEGMENT_ENTRY_INVALID;
- if ((pmd_val(pmd) & mask) != 0)
- return 0;
- VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
-
- refs = 0;
- head = pmd_page(pmd);
- page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
- do {
- VM_BUG_ON(compound_head(page) != head);
- pages[*nr] = page;
- (*nr)++;
- page++;
- refs++;
- } while (addr += PAGE_SIZE, addr != end);
-
- if (!page_cache_add_speculative(head, refs)) {
- *nr -= refs;
- return 0;
- }
-
- if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
- *nr -= refs;
- while (refs--)
- put_page(head);
- return 0;
- }
-
- return 1;
-}
-
-
-static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pmd_t *pmdp, pmd;
-
- pmdp = (pmd_t *) pudp;
- if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
- pmdp = (pmd_t *) pud_deref(pud);
- pmdp += pmd_index(addr);
- do {
- pmd = *pmdp;
- barrier();
- next = pmd_addr_end(addr, end);
- if (pmd_none(pmd))
- return 0;
- if (unlikely(pmd_large(pmd))) {
- /*
- * NUMA hinting faults need to be handled in the GUP
- * slowpath for accounting purposes and so that they
- * can be serialised against THP migration.
- */
- if (pmd_protnone(pmd))
- return 0;
- if (!gup_huge_pmd(pmdp, pmd, addr, next,
- write, pages, nr))
- return 0;
- } else if (!gup_pte_range(pmdp, pmd, addr, next,
- write, pages, nr))
- return 0;
- } while (pmdp++, addr = next, addr != end);
-
- return 1;
-}
-
-static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- struct page *head, *page;
- unsigned long mask;
- int refs;
-
- mask = (write ? _REGION_ENTRY_PROTECT : 0) | _REGION_ENTRY_INVALID;
- if ((pud_val(pud) & mask) != 0)
- return 0;
- VM_BUG_ON(!pfn_valid(pud_pfn(pud)));
-
- refs = 0;
- head = pud_page(pud);
- page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
- do {
- VM_BUG_ON_PAGE(compound_head(page) != head, page);
- pages[*nr] = page;
- (*nr)++;
- page++;
- refs++;
- } while (addr += PAGE_SIZE, addr != end);
-
- if (!page_cache_add_speculative(head, refs)) {
- *nr -= refs;
- return 0;
- }
-
- if (unlikely(pud_val(pud) != pud_val(*pudp))) {
- *nr -= refs;
- while (refs--)
- put_page(head);
- return 0;
- }
-
- return 1;
-}
-
-static inline int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- unsigned long next;
- pud_t *pudp, pud;
-
- pudp = (pud_t *) p4dp;
- if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
- pudp = (pud_t *) p4d_deref(p4d);
- pudp += pud_index(addr);
- do {
- pud = *pudp;
- barrier();
- next = pud_addr_end(addr, end);
- if (pud_none(pud))
- return 0;
- if (unlikely(pud_large(pud))) {
- if (!gup_huge_pud(pudp, pud, addr, next, write, pages,
- nr))
- return 0;
- } else if (!gup_pmd_range(pudp, pud, addr, next, write, pages,
- nr))
- return 0;
- } while (pudp++, addr = next, addr != end);
-
- return 1;
-}
-
-static inline int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
- unsigned long end, int write, struct page **pages, int *nr)
-{
- unsigned long next;
- p4d_t *p4dp, p4d;
-
- p4dp = (p4d_t *) pgdp;
- if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
- p4dp = (p4d_t *) pgd_deref(pgd);
- p4dp += p4d_index(addr);
- do {
- p4d = *p4dp;
- barrier();
- next = p4d_addr_end(addr, end);
- if (p4d_none(p4d))
- return 0;
- if (!gup_pud_range(p4dp, p4d, addr, next, write, pages, nr))
- return 0;
- } while (p4dp++, addr = next, addr != end);
-
- return 1;
-}
-
-/*
- * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
- * back to the regular GUP.
- * Note a difference with get_user_pages_fast: this always returns the
- * number of pages pinned, 0 if no pages were pinned.
- */
-int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
-{
- struct mm_struct *mm = current->mm;
- unsigned long addr, len, end;
- unsigned long next, flags;
- pgd_t *pgdp, pgd;
- int nr = 0;
-
- start &= PAGE_MASK;
- addr = start;
- len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
- if ((end <= start) || (end > mm->context.asce_limit))
- return 0;
- /*
- * local_irq_save() doesn't prevent pagetable teardown, but does
- * prevent the pagetables from being freed on s390.
- *
- * So long as we atomically load page table pointers versus teardown,
- * we can follow the address down to the the page and take a ref on it.
- */
- local_irq_save(flags);
- pgdp = pgd_offset(mm, addr);
- do {
- pgd = *pgdp;
- barrier();
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- break;
- if (!gup_p4d_range(pgdp, pgd, addr, next, write, pages, &nr))
- break;
- } while (pgdp++, addr = next, addr != end);
- local_irq_restore(flags);
-
- return nr;
-}
-
-/**
- * get_user_pages_fast() - pin user pages in memory
- * @start: starting user address
- * @nr_pages: number of pages from start to pin
- * @write: whether pages will be written to
- * @pages: array that receives pointers to the pages pinned.
- * Should be at least nr_pages long.
- *
- * Attempt to pin user pages in memory without taking mm->mmap_sem.
- * If not successful, it will fall back to taking the lock and
- * calling get_user_pages().
- *
- * Returns number of pages pinned. This may be fewer than the number
- * requested. If nr_pages is 0 or negative, returns 0. If no pages
- * were pinned, returns -errno.
- */
-int get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
-{
- int nr, ret;
-
- might_sleep();
- start &= PAGE_MASK;
- nr = __get_user_pages_fast(start, nr_pages, write, pages);
- if (nr == nr_pages)
- return nr;
-
- /* Try to get the remaining pages with get_user_pages */
- start += nr << PAGE_SHIFT;
- pages += nr;
- ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
- write ? FOLL_WRITE : 0);
- /* Have to be a bit careful with return values */
- if (nr > 0)
- ret = (ret < 0) ? nr : ret + nr;
- return ret;
-}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 3e82f66d5c61..14d1eae9fe43 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -49,6 +49,8 @@ unsigned long empty_zero_page, zero_page_mask;
EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(zero_page_mask);
+bool initmem_freed;
+
static void __init setup_zero_pages(void)
{
unsigned int order;
@@ -148,20 +150,13 @@ void __init mem_init(void)
void free_initmem(void)
{
+ initmem_freed = true;
__set_memory((unsigned long)_sinittext,
(unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
SET_MEMORY_RW | SET_MEMORY_NX);
free_initmem_default(POISON_FREE_INITMEM);
}
-#ifdef CONFIG_BLK_DEV_INITRD
-void __init free_initrd_mem(unsigned long start, unsigned long end)
-{
- free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
- "initrd");
-}
-#endif
-
unsigned long memory_block_size_bytes(void)
{
/*
@@ -224,8 +219,8 @@ device_initcall(s390_cma_mem_init);
#endif /* CONFIG_CMA */
-int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
- bool want_memblock)
+int arch_add_memory(int nid, u64 start, u64 size,
+ struct mhp_restrictions *restrictions)
{
unsigned long start_pfn = PFN_DOWN(start);
unsigned long size_pages = PFN_DOWN(size);
@@ -235,21 +230,22 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
if (rc)
return rc;
- rc = __add_pages(nid, start_pfn, size_pages, altmap, want_memblock);
+ rc = __add_pages(nid, start_pfn, size_pages, restrictions);
if (rc)
vmem_remove_mapping(start, size);
return rc;
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-int arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap)
+void arch_remove_memory(int nid, u64 start, u64 size,
+ struct vmem_altmap *altmap)
{
/*
* There is no hardware or firmware interface which could trigger a
* hot memory remove on s390. So there is nothing that needs to be
* implemented.
*/
- return -EBUSY;
+ BUG();
}
#endif
#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
index 01892dcf4029..0c1f257be422 100644
--- a/arch/s390/mm/kasan_init.c
+++ b/arch/s390/mm/kasan_init.c
@@ -28,7 +28,7 @@ static void __init kasan_early_panic(const char *reason)
{
sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
sclp_early_printk(reason);
- disabled_wait(0);
+ disabled_wait();
}
static void * __init kasan_early_alloc_segment(void)
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 97b3ee53852b..818deeb1ebc3 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -16,6 +16,7 @@
#include <linux/cpu.h>
#include <asm/ctl_reg.h>
#include <asm/io.h>
+#include <asm/stacktrace.h>
static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
{
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index db6bb2f97a2c..99e06213a22b 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -290,7 +290,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
tlb_remove_table(tlb, table);
}
-static void __tlb_remove_table(void *_table)
+void __tlb_remove_table(void *_table)
{
unsigned int mask = (unsigned long) _table & 3;
void *table = (void *)((unsigned long) _table ^ mask);
@@ -316,67 +316,6 @@ static void __tlb_remove_table(void *_table)
}
}
-static void tlb_remove_table_smp_sync(void *arg)
-{
- /* Simply deliver the interrupt */
-}
-
-static void tlb_remove_table_one(void *table)
-{
- /*
- * This isn't an RCU grace period and hence the page-tables cannot be
- * assumed to be actually RCU-freed.
- *
- * It is however sufficient for software page-table walkers that rely
- * on IRQ disabling. See the comment near struct mmu_table_batch.
- */
- smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
- __tlb_remove_table(table);
-}
-
-static void tlb_remove_table_rcu(struct rcu_head *head)
-{
- struct mmu_table_batch *batch;
- int i;
-
- batch = container_of(head, struct mmu_table_batch, rcu);
-
- for (i = 0; i < batch->nr; i++)
- __tlb_remove_table(batch->tables[i]);
-
- free_page((unsigned long)batch);
-}
-
-void tlb_table_flush(struct mmu_gather *tlb)
-{
- struct mmu_table_batch **batch = &tlb->batch;
-
- if (*batch) {
- call_rcu(&(*batch)->rcu, tlb_remove_table_rcu);
- *batch = NULL;
- }
-}
-
-void tlb_remove_table(struct mmu_gather *tlb, void *table)
-{
- struct mmu_table_batch **batch = &tlb->batch;
-
- tlb->mm->context.flush_mm = 1;
- if (*batch == NULL) {
- *batch = (struct mmu_table_batch *)
- __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
- if (*batch == NULL) {
- __tlb_flush_mm_lazy(tlb->mm);
- tlb_remove_table_one(table);
- return;
- }
- (*batch)->nr = 0;
- }
- (*batch)->tables[(*batch)->nr++] = table;
- if ((*batch)->nr == MAX_TABLE_BATCH)
- tlb_flush_mmu(tlb);
-}
-
/*
* Base infrastructure required to generate basic asces, region, segment,
* and page tables that do not make use of enhanced features like EDAT1.
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 8485d6dc2754..9ebd01219812 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -410,6 +410,7 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
return old;
}
+#ifdef CONFIG_PGSTE
static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
@@ -427,6 +428,7 @@ static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
pmd = pmd_alloc(mm, pud, addr);
return pmd;
}
+#endif
pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t new)
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 0472e27febdf..b403fa14847d 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -413,6 +413,8 @@ void __init vmem_map_init(void)
__set_memory((unsigned long)_sinittext,
(unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X);
+ __set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
+ SET_MEMORY_RO | SET_MEMORY_X);
pr_info("Write protected kernel read-only data: %luk\n",
(unsigned long)(__end_rodata - _stext) >> 10);
}
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 51dd0267d014..5e7c63033159 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -455,7 +455,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
EMIT4(0xb9040000, REG_2, BPF_REG_0);
/* Restore registers */
save_restore_regs(jit, REGS_RESTORE, stack_depth);
- if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
+ if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable) {
jit->r14_thunk_ip = jit->prg;
/* Generate __s390_indirect_jump_r14 thunk */
if (test_facility(35)) {
@@ -473,7 +473,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
/* br %r14 */
_EMIT2(0x07fe);
- if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable &&
+ if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable &&
(jit->seen & SEEN_FUNC)) {
jit->r1_thunk_ip = jit->prg;
/* Generate __s390_indirect_jump_r1 thunk */
@@ -999,7 +999,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
/* lg %w1,<d(imm)>(%l) */
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
EMIT_CONST_U64(func));
- if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
+ if (__is_defined(CC_USING_EXPOLINE) && !nospec_disable) {
/* brasl %r14,__s390_indirect_jump_r1 */
EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
} else {
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 43d9525c36fc..7441857df51b 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -13,23 +13,17 @@
#include <linux/oprofile.h>
#include <linux/init.h>
#include <asm/processor.h>
-
-static int __s390_backtrace(void *data, unsigned long address, int reliable)
-{
- unsigned int *depth = data;
-
- if (*depth == 0)
- return 1;
- (*depth)--;
- oprofile_add_trace(address);
- return 0;
-}
+#include <asm/unwind.h>
static void s390_backtrace(struct pt_regs *regs, unsigned int depth)
{
- if (user_mode(regs))
- return;
- dump_trace(__s390_backtrace, &depth, NULL, regs->gprs[15]);
+ struct unwind_state state;
+
+ unwind_for_each_frame(&state, current, regs, 0) {
+ if (depth-- == 0)
+ break;
+ oprofile_add_trace(state.ip);
+ }
}
int __init oprofile_arch_init(struct oprofile_operations *ops)
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
index 22d0871291ee..748626a33028 100644
--- a/arch/s390/pci/Makefile
+++ b/arch/s390/pci/Makefile
@@ -3,5 +3,5 @@
# Makefile for the s390 PCI subsystem.
#
-obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_sysfs.o \
+obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_dma.o pci_clp.o pci_sysfs.o \
pci_event.o pci_debug.o pci_insn.o pci_mmio.o
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index dc9bc82c072c..0ebb7c405a25 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -24,11 +24,9 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
+#include <linux/jump_label.h>
#include <linux/pci.h>
-#include <linux/msi.h>
#include <asm/isc.h>
#include <asm/airq.h>
@@ -37,30 +35,13 @@
#include <asm/pci_clp.h>
#include <asm/pci_dma.h>
-#define DEBUG /* enable pr_debug */
-
-#define SIC_IRQ_MODE_ALL 0
-#define SIC_IRQ_MODE_SINGLE 1
-
-#define ZPCI_NR_DMA_SPACES 1
-#define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS
-
/* list of all detected zpci devices */
static LIST_HEAD(zpci_list);
static DEFINE_SPINLOCK(zpci_list_lock);
-static struct irq_chip zpci_irq_chip = {
- .name = "zPCI",
- .irq_unmask = pci_msi_unmask_irq,
- .irq_mask = pci_msi_mask_irq,
-};
-
static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
static DEFINE_SPINLOCK(zpci_domain_lock);
-static struct airq_iv *zpci_aisb_iv;
-static struct airq_iv *zpci_aibv[ZPCI_NR_DEVICES];
-
#define ZPCI_IOMAP_ENTRIES \
min(((unsigned long) ZPCI_NR_DEVICES * PCI_BAR_COUNT / 2), \
ZPCI_IOMAP_MAX_ENTRIES)
@@ -70,6 +51,8 @@ static unsigned long *zpci_iomap_bitmap;
struct zpci_iomap_entry *zpci_iomap_start;
EXPORT_SYMBOL_GPL(zpci_iomap_start);
+DEFINE_STATIC_KEY_FALSE(have_mio);
+
static struct kmem_cache *zdev_fmb_cache;
struct zpci_dev *get_zdev_by_fid(u32 fid)
@@ -123,39 +106,6 @@ int pci_proc_domain(struct pci_bus *bus)
}
EXPORT_SYMBOL_GPL(pci_proc_domain);
-/* Modify PCI: Register adapter interruptions */
-static int zpci_set_airq(struct zpci_dev *zdev)
-{
- u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
- struct zpci_fib fib = {0};
- u8 status;
-
- fib.isc = PCI_ISC;
- fib.sum = 1; /* enable summary notifications */
- fib.noi = airq_iv_end(zdev->aibv);
- fib.aibv = (unsigned long) zdev->aibv->vector;
- fib.aibvo = 0; /* each zdev has its own interrupt vector */
- fib.aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8;
- fib.aisbo = zdev->aisb & 63;
-
- return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
-}
-
-/* Modify PCI: Unregister adapter interruptions */
-static int zpci_clear_airq(struct zpci_dev *zdev)
-{
- u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT);
- struct zpci_fib fib = {0};
- u8 cc, status;
-
- cc = zpci_mod_fc(req, &fib, &status);
- if (cc == 3 || (cc == 1 && status == 24))
- /* Function already gone or IRQs already deregistered. */
- cc = 0;
-
- return cc ? -EIO : 0;
-}
-
/* Modify PCI: Register I/O address translation parameters */
int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
u64 base, u64 limit, u64 iota)
@@ -241,7 +191,7 @@ static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
u64 data;
int rc;
- rc = zpci_load(&data, req, offset);
+ rc = __zpci_load(&data, req, offset);
if (!rc) {
data = le64_to_cpu((__force __le64) data);
data >>= (8 - len) * 8;
@@ -259,7 +209,7 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
data <<= (8 - len) * 8;
data = (__force u64) cpu_to_le64(data);
- rc = zpci_store(data, req, offset);
+ rc = __zpci_store(data, req, offset);
return rc;
}
@@ -276,18 +226,48 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
zpci_memcpy_toio(to, from, count);
}
+void __iomem *ioremap(unsigned long ioaddr, unsigned long size)
+{
+ struct vm_struct *area;
+ unsigned long offset;
+
+ if (!size)
+ return NULL;
+
+ if (!static_branch_unlikely(&have_mio))
+ return (void __iomem *) ioaddr;
+
+ offset = ioaddr & ~PAGE_MASK;
+ ioaddr &= PAGE_MASK;
+ size = PAGE_ALIGN(size + offset);
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+
+ if (ioremap_page_range((unsigned long) area->addr,
+ (unsigned long) area->addr + size,
+ ioaddr, PAGE_KERNEL)) {
+ vunmap(area->addr);
+ return NULL;
+ }
+ return (void __iomem *) ((unsigned long) area->addr + offset);
+}
+EXPORT_SYMBOL(ioremap);
+
+void iounmap(volatile void __iomem *addr)
+{
+ if (static_branch_likely(&have_mio))
+ vunmap((__force void *) ((unsigned long) addr & PAGE_MASK));
+}
+EXPORT_SYMBOL(iounmap);
+
/* Create a virtual mapping cookie for a PCI BAR */
-void __iomem *pci_iomap_range(struct pci_dev *pdev,
- int bar,
- unsigned long offset,
- unsigned long max)
+static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
+ unsigned long offset, unsigned long max)
{
struct zpci_dev *zdev = to_zpci(pdev);
int idx;
- if (!pci_resource_len(pdev, bar) || bar >= PCI_BAR_COUNT)
- return NULL;
-
idx = zdev->bars[bar].map_idx;
spin_lock(&zpci_iomap_lock);
/* Detect overrun */
@@ -298,6 +278,30 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev,
return (void __iomem *) ZPCI_ADDR(idx) + offset;
}
+
+static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
+ unsigned long offset,
+ unsigned long max)
+{
+ unsigned long barsize = pci_resource_len(pdev, bar);
+ struct zpci_dev *zdev = to_zpci(pdev);
+ void __iomem *iova;
+
+ iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
+ return iova ? iova + offset : iova;
+}
+
+void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
+ unsigned long offset, unsigned long max)
+{
+ if (!pci_resource_len(pdev, bar) || bar >= PCI_BAR_COUNT)
+ return NULL;
+
+ if (static_branch_likely(&have_mio))
+ return pci_iomap_range_mio(pdev, bar, offset, max);
+ else
+ return pci_iomap_range_fh(pdev, bar, offset, max);
+}
EXPORT_SYMBOL(pci_iomap_range);
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
@@ -306,7 +310,37 @@ void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
}
EXPORT_SYMBOL(pci_iomap);
-void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
+static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
+ unsigned long offset, unsigned long max)
+{
+ unsigned long barsize = pci_resource_len(pdev, bar);
+ struct zpci_dev *zdev = to_zpci(pdev);
+ void __iomem *iova;
+
+ iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
+ return iova ? iova + offset : iova;
+}
+
+void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
+ unsigned long offset, unsigned long max)
+{
+ if (!pci_resource_len(pdev, bar) || bar >= PCI_BAR_COUNT)
+ return NULL;
+
+ if (static_branch_likely(&have_mio))
+ return pci_iomap_wc_range_mio(pdev, bar, offset, max);
+ else
+ return pci_iomap_range_fh(pdev, bar, offset, max);
+}
+EXPORT_SYMBOL(pci_iomap_wc_range);
+
+void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ return pci_iomap_wc_range(dev, bar, 0, maxlen);
+}
+EXPORT_SYMBOL(pci_iomap_wc);
+
+static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
{
unsigned int idx = ZPCI_IDX(addr);
@@ -319,6 +353,19 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
}
spin_unlock(&zpci_iomap_lock);
}
+
+static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
+{
+ iounmap(addr);
+}
+
+void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
+{
+ if (static_branch_likely(&have_mio))
+ pci_iounmap_mio(pdev, addr);
+ else
+ pci_iounmap_fh(pdev, addr);
+}
EXPORT_SYMBOL(pci_iounmap);
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
@@ -354,136 +401,6 @@ static struct pci_ops pci_root_ops = {
.write = pci_write,
};
-static void zpci_irq_handler(struct airq_struct *airq)
-{
- unsigned long si, ai;
- struct airq_iv *aibv;
- int irqs_on = 0;
-
- inc_irq_stat(IRQIO_PCI);
- for (si = 0;;) {
- /* Scan adapter summary indicator bit vector */
- si = airq_iv_scan(zpci_aisb_iv, si, airq_iv_end(zpci_aisb_iv));
- if (si == -1UL) {
- if (irqs_on++)
- /* End of second scan with interrupts on. */
- break;
- /* First scan complete, reenable interrupts. */
- if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC))
- break;
- si = 0;
- continue;
- }
-
- /* Scan the adapter interrupt vector for this device. */
- aibv = zpci_aibv[si];
- for (ai = 0;;) {
- ai = airq_iv_scan(aibv, ai, airq_iv_end(aibv));
- if (ai == -1UL)
- break;
- inc_irq_stat(IRQIO_MSI);
- airq_iv_lock(aibv, ai);
- generic_handle_irq(airq_iv_get_data(aibv, ai));
- airq_iv_unlock(aibv, ai);
- }
- }
-}
-
-int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
-{
- struct zpci_dev *zdev = to_zpci(pdev);
- unsigned int hwirq, msi_vecs;
- unsigned long aisb;
- struct msi_desc *msi;
- struct msi_msg msg;
- int rc, irq;
-
- zdev->aisb = -1UL;
- if (type == PCI_CAP_ID_MSI && nvec > 1)
- return 1;
- msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
-
- /* Allocate adapter summary indicator bit */
- aisb = airq_iv_alloc_bit(zpci_aisb_iv);
- if (aisb == -1UL)
- return -EIO;
- zdev->aisb = aisb;
-
- /* Create adapter interrupt vector */
- zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK);
- if (!zdev->aibv)
- return -ENOMEM;
-
- /* Wire up shortcut pointer */
- zpci_aibv[aisb] = zdev->aibv;
-
- /* Request MSI interrupts */
- hwirq = 0;
- for_each_pci_msi_entry(msi, pdev) {
- if (hwirq >= msi_vecs)
- break;
- irq = irq_alloc_desc(0); /* Alloc irq on node 0 */
- if (irq < 0)
- return -ENOMEM;
- rc = irq_set_msi_desc(irq, msi);
- if (rc)
- return rc;
- irq_set_chip_and_handler(irq, &zpci_irq_chip,
- handle_simple_irq);
- msg.data = hwirq;
- msg.address_lo = zdev->msi_addr & 0xffffffff;
- msg.address_hi = zdev->msi_addr >> 32;
- pci_write_msi_msg(irq, &msg);
- airq_iv_set_data(zdev->aibv, hwirq, irq);
- hwirq++;
- }
-
- /* Enable adapter interrupts */
- rc = zpci_set_airq(zdev);
- if (rc)
- return rc;
-
- return (msi_vecs == nvec) ? 0 : msi_vecs;
-}
-
-void arch_teardown_msi_irqs(struct pci_dev *pdev)
-{
- struct zpci_dev *zdev = to_zpci(pdev);
- struct msi_desc *msi;
- int rc;
-
- /* Disable adapter interrupts */
- rc = zpci_clear_airq(zdev);
- if (rc)
- return;
-
- /* Release MSI interrupts */
- for_each_pci_msi_entry(msi, pdev) {
- if (!msi->irq)
- continue;
- if (msi->msi_attrib.is_msix)
- __pci_msix_desc_mask_irq(msi, 1);
- else
- __pci_msi_desc_mask_irq(msi, 1, 1);
- irq_set_msi_desc(msi->irq, NULL);
- irq_free_desc(msi->irq);
- msi->msg.address_lo = 0;
- msi->msg.address_hi = 0;
- msi->msg.data = 0;
- msi->irq = 0;
- }
-
- if (zdev->aisb != -1UL) {
- zpci_aibv[zdev->aisb] = NULL;
- airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
- zdev->aisb = -1UL;
- }
- if (zdev->aibv) {
- airq_iv_release(zdev->aibv);
- zdev->aibv = NULL;
- }
-}
-
#ifdef CONFIG_PCI_IOV
static struct resource iov_res = {
.name = "PCI IOV res",
@@ -495,6 +412,7 @@ static struct resource iov_res = {
static void zpci_map_resources(struct pci_dev *pdev)
{
+ struct zpci_dev *zdev = to_zpci(pdev);
resource_size_t len;
int i;
@@ -502,8 +420,13 @@ static void zpci_map_resources(struct pci_dev *pdev)
len = pci_resource_len(pdev, i);
if (!len)
continue;
- pdev->resource[i].start =
- (resource_size_t __force) pci_iomap(pdev, i, 0);
+
+ if (static_branch_likely(&have_mio))
+ pdev->resource[i].start =
+ (resource_size_t __force) zdev->bars[i].mio_wb;
+ else
+ pdev->resource[i].start =
+ (resource_size_t __force) pci_iomap(pdev, i, 0);
pdev->resource[i].end = pdev->resource[i].start + len - 1;
}
@@ -524,6 +447,9 @@ static void zpci_unmap_resources(struct pci_dev *pdev)
resource_size_t len;
int i;
+ if (static_branch_likely(&have_mio))
+ return;
+
for (i = 0; i < PCI_BAR_COUNT; i++) {
len = pci_resource_len(pdev, i);
if (!len)
@@ -533,41 +459,6 @@ static void zpci_unmap_resources(struct pci_dev *pdev)
}
}
-static struct airq_struct zpci_airq = {
- .handler = zpci_irq_handler,
- .isc = PCI_ISC,
-};
-
-static int __init zpci_irq_init(void)
-{
- int rc;
-
- rc = register_adapter_interrupt(&zpci_airq);
- if (rc)
- goto out;
- /* Set summary to 1 to be called every time for the ISC. */
- *zpci_airq.lsi_ptr = 1;
-
- rc = -ENOMEM;
- zpci_aisb_iv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC);
- if (!zpci_aisb_iv)
- goto out_airq;
-
- zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
- return 0;
-
-out_airq:
- unregister_adapter_interrupt(&zpci_airq);
-out:
- return rc;
-}
-
-static void zpci_irq_exit(void)
-{
- airq_iv_release(zpci_aisb_iv);
- unregister_adapter_interrupt(&zpci_airq);
-}
-
static int zpci_alloc_iomap(struct zpci_dev *zdev)
{
unsigned long entry;
@@ -958,7 +849,9 @@ static void zpci_mem_exit(void)
kmem_cache_destroy(zdev_fmb_cache);
}
-static unsigned int s390_pci_probe = 1;
+static unsigned int s390_pci_probe __initdata = 1;
+static unsigned int s390_pci_no_mio __initdata;
+unsigned int s390_pci_force_floating __initdata;
static unsigned int s390_pci_initialized;
char * __init pcibios_setup(char *str)
@@ -967,6 +860,14 @@ char * __init pcibios_setup(char *str)
s390_pci_probe = 0;
return NULL;
}
+ if (!strcmp(str, "nomio")) {
+ s390_pci_no_mio = 1;
+ return NULL;
+ }
+ if (!strcmp(str, "force_floating")) {
+ s390_pci_force_floating = 1;
+ return NULL;
+ }
return str;
}
@@ -985,6 +886,9 @@ static int __init pci_base_init(void)
if (!test_facility(69) || !test_facility(71))
return 0;
+ if (test_facility(153) && !s390_pci_no_mio)
+ static_branch_enable(&have_mio);
+
rc = zpci_debug_init();
if (rc)
goto out;
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index eeb7450db18c..3a36b07a5571 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -163,7 +163,14 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev,
memcpy(zdev->util_str, response->util_str,
sizeof(zdev->util_str));
}
+ zdev->mio_capable = response->mio_addr_avail;
+ for (i = 0; i < PCI_BAR_COUNT; i++) {
+ if (!(response->mio_valid & (1 << (PCI_BAR_COUNT - i - 1))))
+ continue;
+ zdev->bars[i].mio_wb = (void __iomem *) response->addr[i].wb;
+ zdev->bars[i].mio_wt = (void __iomem *) response->addr[i].wt;
+ }
return 0;
}
@@ -279,11 +286,18 @@ int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
int rc;
rc = clp_set_pci_fn(&fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
- if (!rc)
- /* Success -> store enabled handle in zdev */
- zdev->fh = fh;
+ zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, fh, rc);
+ if (rc)
+ goto out;
- zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
+ zdev->fh = fh;
+ if (zdev->mio_capable) {
+ rc = clp_set_pci_fn(&fh, nr_dma_as, CLP_SET_ENABLE_MIO);
+ zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n", zdev->fid, fh, rc);
+ if (rc)
+ clp_disable_fh(zdev);
+ }
+out:
return rc;
}
@@ -296,11 +310,10 @@ int clp_disable_fh(struct zpci_dev *zdev)
return 0;
rc = clp_set_pci_fn(&fh, 0, CLP_SET_DISABLE_PCI_FN);
+ zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, fh, rc);
if (!rc)
- /* Success -> store disabled handle in zdev */
zdev->fh = fh;
- zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
return rc;
}
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
index f069929e8211..02f9505c99a8 100644
--- a/arch/s390/pci/pci_insn.c
+++ b/arch/s390/pci/pci_insn.c
@@ -8,9 +8,11 @@
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/delay.h>
+#include <linux/jump_label.h>
#include <asm/facility.h>
#include <asm/pci_insn.h>
#include <asm/pci_debug.h>
+#include <asm/pci_io.h>
#include <asm/processor.h>
#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
@@ -96,13 +98,15 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
}
/* Set Interruption Controls */
-int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
+int __zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib)
{
if (!test_facility(72))
return -EIO;
- asm volatile (
- " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
- : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
+
+ asm volatile(
+ ".insn rsy,0xeb00000000d1,%[ctl],%[isc],%[iib]\n"
+ : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [iib] "Q" (*iib));
+
return 0;
}
@@ -140,7 +144,7 @@ static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
return cc;
}
-int zpci_load(u64 *data, u64 req, u64 offset)
+int __zpci_load(u64 *data, u64 req, u64 offset)
{
u8 status;
int cc;
@@ -156,6 +160,52 @@ int zpci_load(u64 *data, u64 req, u64 offset)
return (cc > 0) ? -EIO : cc;
}
+EXPORT_SYMBOL_GPL(__zpci_load);
+
+static inline int zpci_load_fh(u64 *data, const volatile void __iomem *addr,
+ unsigned long len)
+{
+ struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
+ u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
+
+ return __zpci_load(data, req, ZPCI_OFFSET(addr));
+}
+
+static inline int __pcilg_mio(u64 *data, u64 ioaddr, u64 len, u8 *status)
+{
+ register u64 addr asm("2") = ioaddr;
+ register u64 r3 asm("3") = len;
+ int cc = -ENXIO;
+ u64 __data;
+
+ asm volatile (
+ " .insn rre,0xb9d60000,%[data],%[ioaddr]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), [data] "=d" (__data), "+d" (r3)
+ : [ioaddr] "d" (addr)
+ : "cc");
+ *status = r3 >> 24 & 0xff;
+ *data = __data;
+ return cc;
+}
+
+int zpci_load(u64 *data, const volatile void __iomem *addr, unsigned long len)
+{
+ u8 status;
+ int cc;
+
+ if (!static_branch_unlikely(&have_mio))
+ return zpci_load_fh(data, addr, len);
+
+ cc = __pcilg_mio(data, (__force u64) addr, len, &status);
+ if (cc)
+ zpci_err_insn(cc, status, 0, (__force u64) addr);
+
+ return (cc > 0) ? -EIO : cc;
+}
EXPORT_SYMBOL_GPL(zpci_load);
/* PCI Store */
@@ -178,7 +228,7 @@ static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
return cc;
}
-int zpci_store(u64 data, u64 req, u64 offset)
+int __zpci_store(u64 data, u64 req, u64 offset)
{
u8 status;
int cc;
@@ -194,6 +244,50 @@ int zpci_store(u64 data, u64 req, u64 offset)
return (cc > 0) ? -EIO : cc;
}
+EXPORT_SYMBOL_GPL(__zpci_store);
+
+static inline int zpci_store_fh(const volatile void __iomem *addr, u64 data,
+ unsigned long len)
+{
+ struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
+ u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
+
+ return __zpci_store(data, req, ZPCI_OFFSET(addr));
+}
+
+static inline int __pcistg_mio(u64 data, u64 ioaddr, u64 len, u8 *status)
+{
+ register u64 addr asm("2") = ioaddr;
+ register u64 r3 asm("3") = len;
+ int cc = -ENXIO;
+
+ asm volatile (
+ " .insn rre,0xb9d40000,%[data],%[ioaddr]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), "+d" (r3)
+ : [data] "d" (data), [ioaddr] "d" (addr)
+ : "cc");
+ *status = r3 >> 24 & 0xff;
+ return cc;
+}
+
+int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len)
+{
+ u8 status;
+ int cc;
+
+ if (!static_branch_unlikely(&have_mio))
+ return zpci_store_fh(addr, data, len);
+
+ cc = __pcistg_mio(data, (__force u64) addr, len, &status);
+ if (cc)
+ zpci_err_insn(cc, status, 0, (__force u64) addr);
+
+ return (cc > 0) ? -EIO : cc;
+}
EXPORT_SYMBOL_GPL(zpci_store);
/* PCI Store Block */
@@ -214,7 +308,7 @@ static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
return cc;
}
-int zpci_store_block(const u64 *data, u64 req, u64 offset)
+int __zpci_store_block(const u64 *data, u64 req, u64 offset)
{
u8 status;
int cc;
@@ -230,4 +324,63 @@ int zpci_store_block(const u64 *data, u64 req, u64 offset)
return (cc > 0) ? -EIO : cc;
}
-EXPORT_SYMBOL_GPL(zpci_store_block);
+EXPORT_SYMBOL_GPL(__zpci_store_block);
+
+static inline int zpci_write_block_fh(volatile void __iomem *dst,
+ const void *src, unsigned long len)
+{
+ struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
+ u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
+ u64 offset = ZPCI_OFFSET(dst);
+
+ return __zpci_store_block(src, req, offset);
+}
+
+static inline int __pcistb_mio(const u64 *data, u64 ioaddr, u64 len, u8 *status)
+{
+ int cc = -ENXIO;
+
+ asm volatile (
+ " .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[data]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), [len] "+d" (len)
+ : [ioaddr] "d" (ioaddr), [data] "Q" (*data)
+ : "cc");
+ *status = len >> 24 & 0xff;
+ return cc;
+}
+
+int zpci_write_block(volatile void __iomem *dst,
+ const void *src, unsigned long len)
+{
+ u8 status;
+ int cc;
+
+ if (!static_branch_unlikely(&have_mio))
+ return zpci_write_block_fh(dst, src, len);
+
+ cc = __pcistb_mio(src, (__force u64) dst, len, &status);
+ if (cc)
+ zpci_err_insn(cc, status, 0, (__force u64) dst);
+
+ return (cc > 0) ? -EIO : cc;
+}
+EXPORT_SYMBOL_GPL(zpci_write_block);
+
+static inline void __pciwb_mio(void)
+{
+ unsigned long unused = 0;
+
+ asm volatile (".insn rre,0xb9d50000,%[op],%[op]\n"
+ : [op] "+d" (unused));
+}
+
+void zpci_barrier(void)
+{
+ if (static_branch_likely(&have_mio))
+ __pciwb_mio();
+}
+EXPORT_SYMBOL_GPL(zpci_barrier);
diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c
new file mode 100644
index 000000000000..d80616ae8dd8
--- /dev/null
+++ b/arch/s390/pci/pci_irq.c
@@ -0,0 +1,486 @@
+// SPDX-License-Identifier: GPL-2.0
+#define KMSG_COMPONENT "zpci"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/kernel_stat.h>
+#include <linux/pci.h>
+#include <linux/msi.h>
+#include <linux/smp.h>
+
+#include <asm/isc.h>
+#include <asm/airq.h>
+
+static enum {FLOATING, DIRECTED} irq_delivery;
+
+#define SIC_IRQ_MODE_ALL 0
+#define SIC_IRQ_MODE_SINGLE 1
+#define SIC_IRQ_MODE_DIRECT 4
+#define SIC_IRQ_MODE_D_ALL 16
+#define SIC_IRQ_MODE_D_SINGLE 17
+#define SIC_IRQ_MODE_SET_CPU 18
+
+/*
+ * summary bit vector
+ * FLOATING - summary bit per function
+ * DIRECTED - summary bit per cpu (only used in fallback path)
+ */
+static struct airq_iv *zpci_sbv;
+
+/*
+ * interrupt bit vectors
+ * FLOATING - interrupt bit vector per function
+ * DIRECTED - interrupt bit vector per cpu
+ */
+static struct airq_iv **zpci_ibv;
+
+/* Modify PCI: Register adapter interruptions */
+static int zpci_set_airq(struct zpci_dev *zdev)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
+ struct zpci_fib fib = {0};
+ u8 status;
+
+ fib.fmt0.isc = PCI_ISC;
+ fib.fmt0.sum = 1; /* enable summary notifications */
+ fib.fmt0.noi = airq_iv_end(zdev->aibv);
+ fib.fmt0.aibv = (unsigned long) zdev->aibv->vector;
+ fib.fmt0.aibvo = 0; /* each zdev has its own interrupt vector */
+ fib.fmt0.aisb = (unsigned long) zpci_sbv->vector + (zdev->aisb/64)*8;
+ fib.fmt0.aisbo = zdev->aisb & 63;
+
+ return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
+}
+
+/* Modify PCI: Unregister adapter interruptions */
+static int zpci_clear_airq(struct zpci_dev *zdev)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT);
+ struct zpci_fib fib = {0};
+ u8 cc, status;
+
+ cc = zpci_mod_fc(req, &fib, &status);
+ if (cc == 3 || (cc == 1 && status == 24))
+ /* Function already gone or IRQs already deregistered. */
+ cc = 0;
+
+ return cc ? -EIO : 0;
+}
+
+/* Modify PCI: Register CPU directed interruptions */
+static int zpci_set_directed_irq(struct zpci_dev *zdev)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT_D);
+ struct zpci_fib fib = {0};
+ u8 status;
+
+ fib.fmt = 1;
+ fib.fmt1.noi = zdev->msi_nr_irqs;
+ fib.fmt1.dibvo = zdev->msi_first_bit;
+
+ return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
+}
+
+/* Modify PCI: Unregister CPU directed interruptions */
+static int zpci_clear_directed_irq(struct zpci_dev *zdev)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT_D);
+ struct zpci_fib fib = {0};
+ u8 cc, status;
+
+ fib.fmt = 1;
+ cc = zpci_mod_fc(req, &fib, &status);
+ if (cc == 3 || (cc == 1 && status == 24))
+ /* Function already gone or IRQs already deregistered. */
+ cc = 0;
+
+ return cc ? -EIO : 0;
+}
+
+static int zpci_set_irq_affinity(struct irq_data *data, const struct cpumask *dest,
+ bool force)
+{
+ struct msi_desc *entry = irq_get_msi_desc(data->irq);
+ struct msi_msg msg = entry->msg;
+
+ msg.address_lo &= 0xff0000ff;
+ msg.address_lo |= (cpumask_first(dest) << 8);
+ pci_write_msi_msg(data->irq, &msg);
+
+ return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip zpci_irq_chip = {
+ .name = "PCI-MSI",
+ .irq_unmask = pci_msi_unmask_irq,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_set_affinity = zpci_set_irq_affinity,
+};
+
+static void zpci_handle_cpu_local_irq(bool rescan)
+{
+ struct airq_iv *dibv = zpci_ibv[smp_processor_id()];
+ unsigned long bit;
+ int irqs_on = 0;
+
+ for (bit = 0;;) {
+ /* Scan the directed IRQ bit vector */
+ bit = airq_iv_scan(dibv, bit, airq_iv_end(dibv));
+ if (bit == -1UL) {
+ if (!rescan || irqs_on++)
+ /* End of second scan with interrupts on. */
+ break;
+ /* First scan complete, reenable interrupts. */
+ if (zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC))
+ break;
+ bit = 0;
+ continue;
+ }
+ inc_irq_stat(IRQIO_MSI);
+ generic_handle_irq(airq_iv_get_data(dibv, bit));
+ }
+}
+
+struct cpu_irq_data {
+ call_single_data_t csd;
+ atomic_t scheduled;
+};
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_irq_data, irq_data);
+
+static void zpci_handle_remote_irq(void *data)
+{
+ atomic_t *scheduled = data;
+
+ do {
+ zpci_handle_cpu_local_irq(false);
+ } while (atomic_dec_return(scheduled));
+}
+
+static void zpci_handle_fallback_irq(void)
+{
+ struct cpu_irq_data *cpu_data;
+ unsigned long cpu;
+ int irqs_on = 0;
+
+ for (cpu = 0;;) {
+ cpu = airq_iv_scan(zpci_sbv, cpu, airq_iv_end(zpci_sbv));
+ if (cpu == -1UL) {
+ if (irqs_on++)
+ /* End of second scan with interrupts on. */
+ break;
+ /* First scan complete, reenable interrupts. */
+ if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC))
+ break;
+ cpu = 0;
+ continue;
+ }
+ cpu_data = &per_cpu(irq_data, cpu);
+ if (atomic_inc_return(&cpu_data->scheduled) > 1)
+ continue;
+
+ cpu_data->csd.func = zpci_handle_remote_irq;
+ cpu_data->csd.info = &cpu_data->scheduled;
+ cpu_data->csd.flags = 0;
+ smp_call_function_single_async(cpu, &cpu_data->csd);
+ }
+}
+
+static void zpci_directed_irq_handler(struct airq_struct *airq, bool floating)
+{
+ if (floating) {
+ inc_irq_stat(IRQIO_PCF);
+ zpci_handle_fallback_irq();
+ } else {
+ inc_irq_stat(IRQIO_PCD);
+ zpci_handle_cpu_local_irq(true);
+ }
+}
+
+static void zpci_floating_irq_handler(struct airq_struct *airq, bool floating)
+{
+ unsigned long si, ai;
+ struct airq_iv *aibv;
+ int irqs_on = 0;
+
+ inc_irq_stat(IRQIO_PCF);
+ for (si = 0;;) {
+ /* Scan adapter summary indicator bit vector */
+ si = airq_iv_scan(zpci_sbv, si, airq_iv_end(zpci_sbv));
+ if (si == -1UL) {
+ if (irqs_on++)
+ /* End of second scan with interrupts on. */
+ break;
+ /* First scan complete, reenable interrupts. */
+ if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC))
+ break;
+ si = 0;
+ continue;
+ }
+
+ /* Scan the adapter interrupt vector for this device. */
+ aibv = zpci_ibv[si];
+ for (ai = 0;;) {
+ ai = airq_iv_scan(aibv, ai, airq_iv_end(aibv));
+ if (ai == -1UL)
+ break;
+ inc_irq_stat(IRQIO_MSI);
+ airq_iv_lock(aibv, ai);
+ generic_handle_irq(airq_iv_get_data(aibv, ai));
+ airq_iv_unlock(aibv, ai);
+ }
+ }
+}
+
+int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+{
+ struct zpci_dev *zdev = to_zpci(pdev);
+ unsigned int hwirq, msi_vecs, cpu;
+ unsigned long bit;
+ struct msi_desc *msi;
+ struct msi_msg msg;
+ int rc, irq;
+
+ zdev->aisb = -1UL;
+ zdev->msi_first_bit = -1U;
+ if (type == PCI_CAP_ID_MSI && nvec > 1)
+ return 1;
+ msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
+
+ if (irq_delivery == DIRECTED) {
+ /* Allocate cpu vector bits */
+ bit = airq_iv_alloc(zpci_ibv[0], msi_vecs);
+ if (bit == -1UL)
+ return -EIO;
+ } else {
+ /* Allocate adapter summary indicator bit */
+ bit = airq_iv_alloc_bit(zpci_sbv);
+ if (bit == -1UL)
+ return -EIO;
+ zdev->aisb = bit;
+
+ /* Create adapter interrupt vector */
+ zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK);
+ if (!zdev->aibv)
+ return -ENOMEM;
+
+ /* Wire up shortcut pointer */
+ zpci_ibv[bit] = zdev->aibv;
+ /* Each function has its own interrupt vector */
+ bit = 0;
+ }
+
+ /* Request MSI interrupts */
+ hwirq = bit;
+ for_each_pci_msi_entry(msi, pdev) {
+ rc = -EIO;
+ if (hwirq - bit >= msi_vecs)
+ break;
+ irq = __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE, msi->affinity);
+ if (irq < 0)
+ return -ENOMEM;
+ rc = irq_set_msi_desc(irq, msi);
+ if (rc)
+ return rc;
+ irq_set_chip_and_handler(irq, &zpci_irq_chip,
+ handle_percpu_irq);
+ msg.data = hwirq;
+ if (irq_delivery == DIRECTED) {
+ msg.address_lo = zdev->msi_addr & 0xff0000ff;
+ msg.address_lo |= msi->affinity ?
+ (cpumask_first(&msi->affinity->mask) << 8) : 0;
+ for_each_possible_cpu(cpu) {
+ airq_iv_set_data(zpci_ibv[cpu], hwirq, irq);
+ }
+ } else {
+ msg.address_lo = zdev->msi_addr & 0xffffffff;
+ airq_iv_set_data(zdev->aibv, hwirq, irq);
+ }
+ msg.address_hi = zdev->msi_addr >> 32;
+ pci_write_msi_msg(irq, &msg);
+ hwirq++;
+ }
+
+ zdev->msi_first_bit = bit;
+ zdev->msi_nr_irqs = msi_vecs;
+
+ if (irq_delivery == DIRECTED)
+ rc = zpci_set_directed_irq(zdev);
+ else
+ rc = zpci_set_airq(zdev);
+ if (rc)
+ return rc;
+
+ return (msi_vecs == nvec) ? 0 : msi_vecs;
+}
+
+void arch_teardown_msi_irqs(struct pci_dev *pdev)
+{
+ struct zpci_dev *zdev = to_zpci(pdev);
+ struct msi_desc *msi;
+ int rc;
+
+ /* Disable interrupts */
+ if (irq_delivery == DIRECTED)
+ rc = zpci_clear_directed_irq(zdev);
+ else
+ rc = zpci_clear_airq(zdev);
+ if (rc)
+ return;
+
+ /* Release MSI interrupts */
+ for_each_pci_msi_entry(msi, pdev) {
+ if (!msi->irq)
+ continue;
+ if (msi->msi_attrib.is_msix)
+ __pci_msix_desc_mask_irq(msi, 1);
+ else
+ __pci_msi_desc_mask_irq(msi, 1, 1);
+ irq_set_msi_desc(msi->irq, NULL);
+ irq_free_desc(msi->irq);
+ msi->msg.address_lo = 0;
+ msi->msg.address_hi = 0;
+ msi->msg.data = 0;
+ msi->irq = 0;
+ }
+
+ if (zdev->aisb != -1UL) {
+ zpci_ibv[zdev->aisb] = NULL;
+ airq_iv_free_bit(zpci_sbv, zdev->aisb);
+ zdev->aisb = -1UL;
+ }
+ if (zdev->aibv) {
+ airq_iv_release(zdev->aibv);
+ zdev->aibv = NULL;
+ }
+
+ if ((irq_delivery == DIRECTED) && zdev->msi_first_bit != -1U)
+ airq_iv_free(zpci_ibv[0], zdev->msi_first_bit, zdev->msi_nr_irqs);
+}
+
+static struct airq_struct zpci_airq = {
+ .handler = zpci_floating_irq_handler,
+ .isc = PCI_ISC,
+};
+
+static void __init cpu_enable_directed_irq(void *unused)
+{
+ union zpci_sic_iib iib = {{0}};
+
+ iib.cdiib.dibv_addr = (u64) zpci_ibv[smp_processor_id()]->vector;
+
+ __zpci_set_irq_ctrl(SIC_IRQ_MODE_SET_CPU, 0, &iib);
+ zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC);
+}
+
+static int __init zpci_directed_irq_init(void)
+{
+ union zpci_sic_iib iib = {{0}};
+ unsigned int cpu;
+
+ zpci_sbv = airq_iv_create(num_possible_cpus(), 0);
+ if (!zpci_sbv)
+ return -ENOMEM;
+
+ iib.diib.isc = PCI_ISC;
+ iib.diib.nr_cpus = num_possible_cpus();
+ iib.diib.disb_addr = (u64) zpci_sbv->vector;
+ __zpci_set_irq_ctrl(SIC_IRQ_MODE_DIRECT, 0, &iib);
+
+ zpci_ibv = kcalloc(num_possible_cpus(), sizeof(*zpci_ibv),
+ GFP_KERNEL);
+ if (!zpci_ibv)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ /*
+ * Per CPU IRQ vectors look the same but bit-allocation
+ * is only done on the first vector.
+ */
+ zpci_ibv[cpu] = airq_iv_create(cache_line_size() * BITS_PER_BYTE,
+ AIRQ_IV_DATA |
+ AIRQ_IV_CACHELINE |
+ (!cpu ? AIRQ_IV_ALLOC : 0));
+ if (!zpci_ibv[cpu])
+ return -ENOMEM;
+ }
+ on_each_cpu(cpu_enable_directed_irq, NULL, 1);
+
+ zpci_irq_chip.irq_set_affinity = zpci_set_irq_affinity;
+
+ return 0;
+}
+
+static int __init zpci_floating_irq_init(void)
+{
+ zpci_ibv = kcalloc(ZPCI_NR_DEVICES, sizeof(*zpci_ibv), GFP_KERNEL);
+ if (!zpci_ibv)
+ return -ENOMEM;
+
+ zpci_sbv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC);
+ if (!zpci_sbv)
+ goto out_free;
+
+ return 0;
+
+out_free:
+ kfree(zpci_ibv);
+ return -ENOMEM;
+}
+
+int __init zpci_irq_init(void)
+{
+ int rc;
+
+ irq_delivery = sclp.has_dirq ? DIRECTED : FLOATING;
+ if (s390_pci_force_floating)
+ irq_delivery = FLOATING;
+
+ if (irq_delivery == DIRECTED)
+ zpci_airq.handler = zpci_directed_irq_handler;
+
+ rc = register_adapter_interrupt(&zpci_airq);
+ if (rc)
+ goto out;
+ /* Set summary to 1 to be called every time for the ISC. */
+ *zpci_airq.lsi_ptr = 1;
+
+ switch (irq_delivery) {
+ case FLOATING:
+ rc = zpci_floating_irq_init();
+ break;
+ case DIRECTED:
+ rc = zpci_directed_irq_init();
+ break;
+ }
+
+ if (rc)
+ goto out_airq;
+
+ /*
+ * Enable floating IRQs (with suppression after one IRQ). When using
+ * directed IRQs this enables the fallback path.
+ */
+ zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC);
+
+ return 0;
+out_airq:
+ unregister_adapter_interrupt(&zpci_airq);
+out:
+ return rc;
+}
+
+void __init zpci_irq_exit(void)
+{
+ unsigned int cpu;
+
+ if (irq_delivery == DIRECTED) {
+ for_each_possible_cpu(cpu) {
+ airq_iv_release(zpci_ibv[cpu]);
+ }
+ }
+ kfree(zpci_ibv);
+ if (zpci_sbv)
+ airq_iv_release(zpci_sbv);
+ unregister_adapter_interrupt(&zpci_airq);
+}
diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile
index ce6a3f75065b..dc1ae4ff79d7 100644
--- a/arch/s390/purgatory/Makefile
+++ b/arch/s390/purgatory/Makefile
@@ -4,7 +4,7 @@ OBJECT_FILES_NON_STANDARD := y
purgatory-y := head.o purgatory.o string.o sha256.o mem.o
-targets += $(purgatory-y) purgatory.ro kexec-purgatory.c
+targets += $(purgatory-y) purgatory.lds purgatory purgatory.ro
PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
$(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
@@ -16,22 +16,26 @@ $(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE
$(obj)/string.o: $(srctree)/arch/s390/lib/string.c FORCE
$(call if_changed_rule,cc_o_c)
-LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib
-LDFLAGS_purgatory.ro += -z nodefaultlib
KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float -fno-common
+KBUILD_CFLAGS += $(CLANG_FLAGS)
KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS))
-$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
+LDFLAGS_purgatory := -r --no-undefined -nostdlib -z nodefaultlib -T
+$(obj)/purgatory: $(obj)/purgatory.lds $(PURGATORY_OBJS) FORCE
$(call if_changed,ld)
-quiet_cmd_bin2c = BIN2C $@
- cmd_bin2c = $(objtree)/scripts/bin2c kexec_purgatory < $< > $@
+OBJCOPYFLAGS_purgatory.ro := -O elf64-s390
+OBJCOPYFLAGS_purgatory.ro += --remove-section='*debug*'
+OBJCOPYFLAGS_purgatory.ro += --remove-section='.comment'
+OBJCOPYFLAGS_purgatory.ro += --remove-section='.note.*'
+$(obj)/purgatory.ro: $(obj)/purgatory FORCE
+ $(call if_changed,objcopy)
-$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
- $(call if_changed,bin2c)
+$(obj)/kexec-purgatory.o: $(obj)/kexec-purgatory.S $(obj)/purgatory.ro FORCE
+ $(call if_changed_rule,as_o_S)
obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += kexec-purgatory.o
diff --git a/arch/s390/purgatory/kexec-purgatory.S b/arch/s390/purgatory/kexec-purgatory.S
new file mode 100644
index 000000000000..8293753100ae
--- /dev/null
+++ b/arch/s390/purgatory/kexec-purgatory.S
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+ .section .rodata, "a"
+
+ .align 8
+kexec_purgatory:
+ .globl kexec_purgatory
+ .incbin "arch/s390/purgatory/purgatory.ro"
+.Lkexec_purgatroy_end:
+
+ .align 8
+kexec_purgatory_size:
+ .globl kexec_purgatory_size
+ .quad .Lkexec_purgatroy_end - kexec_purgatory
diff --git a/arch/s390/purgatory/purgatory.lds.S b/arch/s390/purgatory/purgatory.lds.S
new file mode 100644
index 000000000000..482eb4fbcef1
--- /dev/null
+++ b/arch/s390/purgatory/purgatory.lds.S
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <asm-generic/vmlinux.lds.h>
+
+OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
+OUTPUT_ARCH(s390:64-bit)
+
+ENTRY(purgatory_start)
+
+SECTIONS
+{
+ . = 0;
+ .head.text : {
+ _head = . ;
+ HEAD_TEXT
+ _ehead = . ;
+ }
+ .text : {
+ _text = .; /* Text */
+ *(.text)
+ *(.text.*)
+ _etext = . ;
+ }
+ .rodata : {
+ _rodata = . ;
+ *(.rodata) /* read-only data */
+ *(.rodata.*)
+ _erodata = . ;
+ }
+ .data : {
+ _data = . ;
+ *(.data)
+ *(.data.*)
+ _edata = . ;
+ }
+
+ . = ALIGN(256);
+ .bss : {
+ _bss = . ;
+ *(.bss)
+ *(.bss.*)
+ *(COMMON)
+ . = ALIGN(8); /* For convenience during zeroing */
+ _ebss = .;
+ }
+ _end = .;
+
+ /* Sections to be discarded */
+ /DISCARD/ : {
+ *(.eh_frame)
+ *(*__ksymtab*)
+ *(___kcrctab*)
+ }
+}
diff --git a/arch/s390/scripts/Makefile.chkbss b/arch/s390/scripts/Makefile.chkbss
index cd7e8f4419f5..884a9caff5fb 100644
--- a/arch/s390/scripts/Makefile.chkbss
+++ b/arch/s390/scripts/Makefile.chkbss
@@ -11,7 +11,8 @@ chkbss: $(addprefix $(obj)/, $(chkbss-files))
quiet_cmd_chkbss = CHKBSS $<
cmd_chkbss = \
- if ! $(OBJDUMP) -j .bss -w -h $< | awk 'END { if ($$3) exit 1 }'; then \
+ if $(OBJDUMP) -h $< | grep -q "\.bss" && \
+ ! $(OBJDUMP) -j .bss -w -h $< | awk 'END { if ($$3) exit 1 }'; then \
echo "error: $< .bss section is not empty" >&2; exit 1; \
fi; \
touch $@;
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index fd788e0f2e5b..cead9e0dcffb 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -93,6 +93,9 @@ static struct facility_def facility_defs[] = {
131, /* enhanced-SOP 2 and side-effect */
139, /* multiple epoch facility */
146, /* msa extension 8 */
+ 150, /* enhanced sort */
+ 151, /* deflate conversion */
+ 155, /* msa extension 9 */
-1 /* END */
}
},
diff --git a/arch/s390/tools/opcodes.txt b/arch/s390/tools/opcodes.txt
index 1cbed82cd17b..64638b764d1c 100644
--- a/arch/s390/tools/opcodes.txt
+++ b/arch/s390/tools/opcodes.txt
@@ -1,3 +1,5 @@
+0000 illegal E
+0002 brkpt E
0101 pr E
0102 upt E
0104 ptff E
@@ -257,6 +259,7 @@ b258 bsg RRE_RR
b25a bsa RRE_RR
b25d clst RRE_RR
b25e srst RRE_RR
+b25f chsc RRE_R0
b263 cmpsc RRE_RR
b274 siga S_RD
b276 xsch S_00
@@ -277,6 +280,9 @@ b29d lfpc S_RD
b2a5 tre RRE_RR
b2a6 cu21 RRF_U0RR
b2a7 cu12 RRF_U0RR
+b2ad nqap RRE_RR
+b2ae dqap RRE_RR
+b2af pqap RRE_RR
b2b0 stfle S_RD
b2b1 stfl S_RD
b2b2 lpswe S_RD
@@ -290,6 +296,7 @@ b2e5 epctr RRE_RR
b2e8 ppa RRF_U0RR
b2ec etnd RRE_R0
b2ed ecpga RRE_RR
+b2f0 iucv RRE_RR
b2f8 tend S_00
b2fa niai IE_UU
b2fc tabort S_RD
@@ -559,12 +566,15 @@ b998 alcr RRE_RR
b999 slbr RRE_RR
b99a epair RRE_R0
b99b esair RRE_R0
+b99c eqbs RRF_U0RR
b99d esea RRE_R0
b99e pti RRE_RR
b99f ssair RRE_R0
+b9a0 clp RRF_U0RR
b9a1 tpei RRE_RR
b9a2 ptf RRE_R0
b9aa lptea RRF_RURR2
+b9ab essa RRF_U0RR
b9ac irbm RRE_RR
b9ae rrbm RRE_RR
b9af pfmf RRE_RR
@@ -1039,6 +1049,7 @@ eb7a agsi SIY_IRD
eb7e algsi SIY_IRD
eb80 icmh RSY_RURD
eb81 icmy RSY_RURD
+eb8a sqbs RSY_RDRU
eb8e mvclu RSY_RRRD
eb8f clclu RSY_RRRD
eb90 stmy RSY_RRRD
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index b1c91ea9a958..b77f512bb176 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -10,7 +10,6 @@ config SUPERH
select DMA_DECLARE_COHERENT
select HAVE_IDE if HAS_IOPORT_MAP
select HAVE_MEMBLOCK_NODE_MAP
- select ARCH_DISCARD_MEMBLOCK
select HAVE_OPROFILE
select HAVE_ARCH_TRACEHOOK
select HAVE_PERF_EVENTS
@@ -53,6 +52,7 @@ config SUPERH
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_NMI
select NEED_SG_DMA_LENGTH
+ select ARCH_HAS_GIGANTIC_PAGE
help
The SuperH is a RISC processor targeted for use in embedded systems
@@ -90,12 +90,6 @@ config ARCH_DEFCONFIG
default "arch/sh/configs/shx3_defconfig" if SUPERH32
default "arch/sh/configs/cayman_defconfig" if SUPERH64
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
-config RWSEM_XCHGADD_ALGORITHM
- bool
-
config GENERIC_BUG
def_bool y
depends on BUG && SUPERH32
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 4009bef62fe9..b4a86f27e048 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -191,8 +191,8 @@ cpuincdir-y += cpu-common # Must be last
drivers-y += arch/sh/drivers/
drivers-$(CONFIG_OPROFILE) += arch/sh/oprofile/
-cflags-y += $(foreach d, $(cpuincdir-y), -Iarch/sh/include/$(d)) \
- $(foreach d, $(machdir-y), -Iarch/sh/include/$(d))
+cflags-y += $(foreach d, $(cpuincdir-y), -I $(srctree)/arch/sh/include/$(d)) \
+ $(foreach d, $(machdir-y), -I $(srctree)/arch/sh/include/$(d))
KBUILD_CFLAGS += -pipe $(cflags-y)
KBUILD_CPPFLAGS += $(cflags-y)
diff --git a/arch/sh/boards/board-apsh4a3a.c b/arch/sh/boards/board-apsh4a3a.c
index 346eda7a2ef6..abf19a947df3 100644
--- a/arch/sh/boards/board-apsh4a3a.c
+++ b/arch/sh/boards/board-apsh4a3a.c
@@ -16,7 +16,7 @@
#include <linux/irq.h>
#include <linux/clk.h>
#include <asm/machvec.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/clock.h>
static struct mtd_partition nor_flash_partitions[] = {
diff --git a/arch/sh/boards/board-apsh4ad0a.c b/arch/sh/boards/board-apsh4ad0a.c
index 4efa9c571f64..fa031a16c9b5 100644
--- a/arch/sh/boards/board-apsh4ad0a.c
+++ b/arch/sh/boards/board-apsh4ad0a.c
@@ -15,7 +15,7 @@
#include <linux/irq.h>
#include <linux/clk.h>
#include <asm/machvec.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
/* Dummy supplies, where voltage doesn't matter */
static struct regulator_consumer_supply dummy_supplies[] = {
diff --git a/arch/sh/boards/board-edosk7705.c b/arch/sh/boards/board-edosk7705.c
index 67a8803eb3f9..0de7d603da2d 100644
--- a/arch/sh/boards/board-edosk7705.c
+++ b/arch/sh/boards/board-edosk7705.c
@@ -16,7 +16,7 @@
#include <linux/smc91x.h>
#include <linux/sh_intc.h>
#include <asm/machvec.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#define SMC_IOBASE 0xA2000000
#define SMC_IO_OFFSET 0x300
diff --git a/arch/sh/boards/board-edosk7760.c b/arch/sh/boards/board-edosk7760.c
index 0fbe91cba67a..7569d85c5ff5 100644
--- a/arch/sh/boards/board-edosk7760.c
+++ b/arch/sh/boards/board-edosk7760.c
@@ -18,7 +18,7 @@
#include <asm/addrspace.h>
#include <asm/delay.h>
#include <asm/i2c-sh7760.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
/* Bus state controller registers for CS4 area */
#define BSC_CS4BCR 0xA4FD0010
diff --git a/arch/sh/boards/board-espt.c b/arch/sh/boards/board-espt.c
index f478fee3b48a..6e784b5cf5a0 100644
--- a/arch/sh/boards/board-espt.c
+++ b/arch/sh/boards/board-espt.c
@@ -13,7 +13,7 @@
#include <linux/sh_eth.h>
#include <linux/sh_intc.h>
#include <asm/machvec.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
/* NOR Flash */
static struct mtd_partition espt_nor_flash_partitions[] = {
diff --git a/arch/sh/boards/board-urquell.c b/arch/sh/boards/board-urquell.c
index 799af57c0b81..dad2b3b40735 100644
--- a/arch/sh/boards/board-urquell.c
+++ b/arch/sh/boards/board-urquell.c
@@ -21,7 +21,7 @@
#include <mach/urquell.h>
#include <cpu/sh7786.h>
#include <asm/heartbeat.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/smp-ops.h>
/*
diff --git a/arch/sh/boards/mach-dreamcast/irq.c b/arch/sh/boards/mach-dreamcast/irq.c
index a929f764ae04..cc06e4cdb4cd 100644
--- a/arch/sh/boards/mach-dreamcast/irq.c
+++ b/arch/sh/boards/mach-dreamcast/irq.c
@@ -10,7 +10,6 @@
*/
#include <linux/irq.h>
#include <linux/io.h>
-#include <linux/irq.h>
#include <linux/export.h>
#include <linux/err.h>
#include <mach/sysasic.h>
diff --git a/arch/sh/boards/mach-microdev/setup.c b/arch/sh/boards/mach-microdev/setup.c
index 706b48f797be..f4a777fe2d01 100644
--- a/arch/sh/boards/mach-microdev/setup.c
+++ b/arch/sh/boards/mach-microdev/setup.c
@@ -15,7 +15,7 @@
#include <mach/microdev.h>
#include <asm/io.h>
#include <asm/machvec.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
static struct resource smc91x_resources[] = {
[0] = {
diff --git a/arch/sh/boards/mach-sdk7786/fpga.c b/arch/sh/boards/mach-sdk7786/fpga.c
index 6d2a3d381c2a..895576ff8376 100644
--- a/arch/sh/boards/mach-sdk7786/fpga.c
+++ b/arch/sh/boards/mach-sdk7786/fpga.c
@@ -8,7 +8,7 @@
#include <linux/io.h>
#include <linux/bcd.h>
#include <mach/fpga.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#define FPGA_REGS_OFFSET 0x03fff800
#define FPGA_REGS_SIZE 0x490
diff --git a/arch/sh/boards/mach-sdk7786/setup.c b/arch/sh/boards/mach-sdk7786/setup.c
index 65721c3a482c..d183026dbeb1 100644
--- a/arch/sh/boards/mach-sdk7786/setup.c
+++ b/arch/sh/boards/mach-sdk7786/setup.c
@@ -19,7 +19,7 @@
#include <mach/irq.h>
#include <asm/machvec.h>
#include <asm/heartbeat.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/clock.h>
#include <asm/reboot.h>
#include <asm/smp-ops.h>
diff --git a/arch/sh/boards/mach-sdk7786/sram.c b/arch/sh/boards/mach-sdk7786/sram.c
index d76cdb7ede39..7c6ca976f332 100644
--- a/arch/sh/boards/mach-sdk7786/sram.c
+++ b/arch/sh/boards/mach-sdk7786/sram.c
@@ -13,7 +13,7 @@
#include <linux/string.h>
#include <mach/fpga.h>
#include <asm/sram.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
static int __init fpga_sram_init(void)
{
diff --git a/arch/sh/boards/mach-se/7343/irq.c b/arch/sh/boards/mach-se/7343/irq.c
index 39a3175e72b2..1aedbfe32654 100644
--- a/arch/sh/boards/mach-se/7343/irq.c
+++ b/arch/sh/boards/mach-se/7343/irq.c
@@ -16,7 +16,7 @@
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/io.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <mach-se/mach/se7343.h>
#define PA_CPLD_BASE_ADDR 0x11400000
diff --git a/arch/sh/boards/mach-se/7722/irq.c b/arch/sh/boards/mach-se/7722/irq.c
index f6e3009edd4e..6d34592767f8 100644
--- a/arch/sh/boards/mach-se/7722/irq.c
+++ b/arch/sh/boards/mach-se/7722/irq.c
@@ -14,7 +14,7 @@
#include <linux/irqdomain.h>
#include <linux/io.h>
#include <linux/err.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <mach-se/mach/se7722.h>
#define IRQ01_BASE_ADDR 0x11800000
diff --git a/arch/sh/boot/.gitignore b/arch/sh/boot/.gitignore
index 541087d2029c..f50fdd9975c5 100644
--- a/arch/sh/boot/.gitignore
+++ b/arch/sh/boot/.gitignore
@@ -1,3 +1,4 @@
zImage
vmlinux*
uImage*
+!vmlinux.scr
diff --git a/arch/sh/configs/ap325rxa_defconfig b/arch/sh/configs/ap325rxa_defconfig
index 72b72e50a92e..0ef3f1f9de5c 100644
--- a/arch/sh/configs/ap325rxa_defconfig
+++ b/arch/sh/configs/ap325rxa_defconfig
@@ -35,7 +35,7 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_SH_FLCTL=y
CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig
index 825c641726c4..d0d9ebc7165b 100644
--- a/arch/sh/configs/apsh4ad0a_defconfig
+++ b/arch/sh/configs/apsh4ad0a_defconfig
@@ -19,7 +19,6 @@ CONFIG_SLAB=y
CONFIG_PROFILING=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_CPU_SUBTYPE_SH7786=y
diff --git a/arch/sh/configs/ecovec24-romimage_defconfig b/arch/sh/configs/ecovec24-romimage_defconfig
index 0c5dfccbfe37..bdb61d1d0127 100644
--- a/arch/sh/configs/ecovec24-romimage_defconfig
+++ b/arch/sh/configs/ecovec24-romimage_defconfig
@@ -7,7 +7,6 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_KALLSYMS is not set
CONFIG_SLAB=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7724=y
CONFIG_MEMORY_SIZE=0x10000000
diff --git a/arch/sh/configs/ecovec24_defconfig b/arch/sh/configs/ecovec24_defconfig
index 3568310c2c2f..ba67e3752938 100644
--- a/arch/sh/configs/ecovec24_defconfig
+++ b/arch/sh/configs/ecovec24_defconfig
@@ -38,7 +38,7 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=4
diff --git a/arch/sh/configs/migor_defconfig b/arch/sh/configs/migor_defconfig
index e04f21be0756..121a75d65fb4 100644
--- a/arch/sh/configs/migor_defconfig
+++ b/arch/sh/configs/migor_defconfig
@@ -34,7 +34,7 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_PLATFORM=y
CONFIG_BLK_DEV_RAM=y
CONFIG_SCSI=y
diff --git a/arch/sh/configs/rsk7264_defconfig b/arch/sh/configs/rsk7264_defconfig
index 2b9b731fc86b..ad003ee469ea 100644
--- a/arch/sh/configs/rsk7264_defconfig
+++ b/arch/sh/configs/rsk7264_defconfig
@@ -16,7 +16,6 @@ CONFIG_PERF_COUNTERS=y
CONFIG_SLAB=y
CONFIG_MMAP_ALLOW_UNINITIALIZED=y
CONFIG_PROFILING=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_DEADLINE is not set
diff --git a/arch/sh/configs/rsk7269_defconfig b/arch/sh/configs/rsk7269_defconfig
index d041f7bcb84c..27fc01d58cf8 100644
--- a/arch/sh/configs/rsk7269_defconfig
+++ b/arch/sh/configs/rsk7269_defconfig
@@ -3,7 +3,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_SLAB=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig
index d16e9334cd98..5209889765ad 100644
--- a/arch/sh/configs/sdk7786_defconfig
+++ b/arch/sh/configs/sdk7786_defconfig
@@ -108,7 +108,7 @@ CONFIG_MTD_ROM=m
CONFIG_MTD_ABSENT=m
CONFIG_MTD_PLATRAM=y
CONFIG_MTD_PHRAM=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_PLATFORM=y
CONFIG_MTD_NAND_SH_FLCTL=m
CONFIG_MTD_UBI=y
diff --git a/arch/sh/configs/se7724_defconfig b/arch/sh/configs/se7724_defconfig
index aedb3a2d9a10..9f6d46d58554 100644
--- a/arch/sh/configs/se7724_defconfig
+++ b/arch/sh/configs/se7724_defconfig
@@ -37,7 +37,7 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_NAND=y
+CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=4
diff --git a/arch/sh/configs/sh7785lcr_32bit_defconfig b/arch/sh/configs/sh7785lcr_32bit_defconfig
index 2ddf5ca7094e..a89ccc15af23 100644
--- a/arch/sh/configs/sh7785lcr_32bit_defconfig
+++ b/arch/sh/configs/sh7785lcr_32bit_defconfig
@@ -11,7 +11,6 @@ CONFIG_PROFILING=y
CONFIG_GCOV_KERNEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7785=y
CONFIG_MEMORY_START=0x40000000
diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig
index ceb48e9b70f4..822fa9e96f74 100644
--- a/arch/sh/configs/titan_defconfig
+++ b/arch/sh/configs/titan_defconfig
@@ -155,7 +155,7 @@ CONFIG_INFTL=m
CONFIG_RFD_FTL=m
CONFIG_MTD_CFI=m
CONFIG_MTD_JEDECPROBE=m
-CONFIG_MTD_NAND=m
+CONFIG_MTD_RAW_NAND=m
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/sh/drivers/pci/pci-sh7751.c b/arch/sh/drivers/pci/pci-sh7751.c
index 1b9e5caac389..11ed21c2e9bb 100644
--- a/arch/sh/drivers/pci/pci-sh7751.c
+++ b/arch/sh/drivers/pci/pci-sh7751.c
@@ -14,7 +14,7 @@
#include <linux/io.h>
#include "pci-sh4.h"
#include <asm/addrspace.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
static int __init __area_sdram_check(struct pci_channel *chan,
unsigned int area)
diff --git a/arch/sh/drivers/pci/pci-sh7780.c b/arch/sh/drivers/pci/pci-sh7780.c
index 3fd0f392a0ee..287b3a68570c 100644
--- a/arch/sh/drivers/pci/pci-sh7780.c
+++ b/arch/sh/drivers/pci/pci-sh7780.c
@@ -16,7 +16,7 @@
#include <linux/log2.h>
#include "pci-sh4.h"
#include <asm/mmu.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#if defined(CONFIG_CPU_BIG_ENDIAN)
# define PCICR_ENDIANNESS SH4_PCICR_BSWP
diff --git a/arch/sh/drivers/pci/pcie-sh7786.c b/arch/sh/drivers/pci/pcie-sh7786.c
index a58b77cea295..e0b568aaa701 100644
--- a/arch/sh/drivers/pci/pcie-sh7786.c
+++ b/arch/sh/drivers/pci/pcie-sh7786.c
@@ -18,7 +18,7 @@
#include <linux/sh_intc.h>
#include <cpu/sh7786.h>
#include "pcie-sh7786.h"
-#include <asm/sizes.h>
+#include <linux/sizes.h>
struct sh7786_pcie_port {
struct pci_channel *hose;
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 7bf2cb680d32..51a54df22c11 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -17,8 +17,6 @@ generic-y += mm-arch-hooks.h
generic-y += parport.h
generic-y += percpu.h
generic-y += preempt.h
-generic-y += rwsem.h
generic-y += serial.h
-generic-y += sizes.h
generic-y += trace_clock.h
generic-y += xor.h
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 4f7f235f15f8..c28e37a344ad 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -229,9 +229,6 @@ __BUILD_IOPORT_STRING(q, u64)
#define IO_SPACE_LIMIT 0xffffffff
-/* synco on SH-4A, otherwise a nop */
-#define mmiowb() wmb()
-
/* We really want to try and get these to memcpy etc */
void memcpy_fromio(void *, const volatile void __iomem *, unsigned long);
void memcpy_toio(volatile void __iomem *, const void *, unsigned long);
diff --git a/arch/sh/include/asm/mmiowb.h b/arch/sh/include/asm/mmiowb.h
new file mode 100644
index 000000000000..535d59735f1d
--- /dev/null
+++ b/arch/sh/include/asm/mmiowb.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_SH_MMIOWB_H
+#define __ASM_SH_MMIOWB_H
+
+#include <asm/barrier.h>
+
+/* synco on SH-4A, otherwise a nop */
+#define mmiowb() wmb()
+
+#include <asm-generic/mmiowb.h>
+
+#endif /* __ASM_SH_MMIOWB_H */
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index 8ad73cb31121..b56f908b1395 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -70,6 +70,15 @@ do { \
tlb_remove_page((tlb), (pte)); \
} while (0)
+#if CONFIG_PGTABLE_LEVELS > 2
+#define __pmd_free_tlb(tlb, pmdp, addr) \
+do { \
+ struct page *page = virt_to_page(pmdp); \
+ pgtable_pmd_page_dtor(page); \
+ tlb_remove_page((tlb), page); \
+} while (0);
+#endif
+
static inline void check_pgt_cache(void)
{
quicklist_trim(QUICK_PT, NULL, 25, 16);
diff --git a/arch/sh/include/asm/spinlock-llsc.h b/arch/sh/include/asm/spinlock-llsc.h
index 786ee0fde3b0..7fd929cd2e7a 100644
--- a/arch/sh/include/asm/spinlock-llsc.h
+++ b/arch/sh/include/asm/spinlock-llsc.h
@@ -47,6 +47,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
unsigned long tmp;
+ /* This could be optimised with ARCH_HAS_MMIOWB */
+ mmiowb();
__asm__ __volatile__ (
"mov #1, %0 ! arch_spin_unlock \n\t"
"mov.l %0, @%1 \n\t"
diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h
index 8c9d7e5e5dcc..0b5b8e75edac 100644
--- a/arch/sh/include/asm/syscall_32.h
+++ b/arch/sh/include/asm/syscall_32.h
@@ -72,7 +72,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
regs->regs[4] = args[0];
}
-static inline int syscall_get_arch(void)
+static inline int syscall_get_arch(struct task_struct *task)
{
int arch = AUDIT_ARCH_SH;
diff --git a/arch/sh/include/asm/syscall_64.h b/arch/sh/include/asm/syscall_64.h
index 22fad97da066..72efcbc76f91 100644
--- a/arch/sh/include/asm/syscall_64.h
+++ b/arch/sh/include/asm/syscall_64.h
@@ -59,7 +59,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
memcpy(&regs->regs[2], args, 6 * sizeof(args[0]));
}
-static inline int syscall_get_arch(void)
+static inline int syscall_get_arch(struct task_struct *task)
{
int arch = AUDIT_ARCH_SH;
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 77abe192fb43..bc77f3dd4261 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -11,133 +11,8 @@
#ifdef CONFIG_MMU
#include <linux/swap.h>
-#include <asm/pgalloc.h>
-#include <asm/tlbflush.h>
-#include <asm/mmu_context.h>
-/*
- * TLB handling. This allows us to remove pages from the page
- * tables, and efficiently handle the TLB issues.
- */
-struct mmu_gather {
- struct mm_struct *mm;
- unsigned int fullmm;
- unsigned long start, end;
-};
-
-static inline void init_tlb_gather(struct mmu_gather *tlb)
-{
- tlb->start = TASK_SIZE;
- tlb->end = 0;
-
- if (tlb->fullmm) {
- tlb->start = 0;
- tlb->end = TASK_SIZE;
- }
-}
-
-static inline void
-arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- tlb->mm = mm;
- tlb->start = start;
- tlb->end = end;
- tlb->fullmm = !(start | (end+1));
-
- init_tlb_gather(tlb);
-}
-
-static inline void
-arch_tlb_finish_mmu(struct mmu_gather *tlb,
- unsigned long start, unsigned long end, bool force)
-{
- if (tlb->fullmm || force)
- flush_tlb_mm(tlb->mm);
-
- /* keep the page table cache within bounds */
- check_pgt_cache();
-}
-
-static inline void
-tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
-{
- if (tlb->start > address)
- tlb->start = address;
- if (tlb->end < address + PAGE_SIZE)
- tlb->end = address + PAGE_SIZE;
-}
-
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
- tlb_remove_tlb_entry(tlb, ptep, address)
-
-/*
- * In the case of tlb vma handling, we can optimise these away in the
- * case where we're doing a full MM flush. When we're doing a munmap,
- * the vmas are adjusted to only cover the region to be torn down.
- */
-static inline void
-tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
-{
- if (!tlb->fullmm)
- flush_cache_range(vma, vma->vm_start, vma->vm_end);
-}
-
-static inline void
-tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
-{
- if (!tlb->fullmm && tlb->end) {
- flush_tlb_range(vma, tlb->start, tlb->end);
- init_tlb_gather(tlb);
- }
-}
-
-static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
-{
-}
-
-static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
-{
-}
-
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
-{
-}
-
-static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- free_page_and_swap_cache(page);
- return false; /* avoid calling tlb_flush_mmu */
-}
-
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- __tlb_remove_page(tlb, page);
-}
-
-static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
-{
- return __tlb_remove_page(tlb, page);
-}
-
-static inline void tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
-{
- return tlb_remove_page(tlb, page);
-}
-
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
- unsigned int page_size)
-{
-}
-
-#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
-#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
-#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
-
-#define tlb_migrate_finish(mm) do { } while (0)
+#include <asm-generic/tlb.h>
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64)
extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
@@ -157,11 +32,6 @@ static inline void tlb_unwire_entry(void)
#else /* CONFIG_MMU */
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
-#define tlb_flush(tlb) do { } while (0)
-
#include <asm-generic/tlb.h>
#endif /* CONFIG_MMU */
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7786.h b/arch/sh/include/cpu-sh4/cpu/sh7786.h
index 8f9bfbf3cdb1..d6cce65b4871 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7786.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7786.h
@@ -132,7 +132,7 @@ enum {
static inline u32 sh7786_mm_sel(void)
{
- return __raw_readl(0xFC400020) & 0x7;
+ return __raw_readl((const volatile void __iomem *)0xFC400020) & 0x7;
}
#endif /* __CPU_SH7786_H__ */
diff --git a/arch/sh/include/uapi/asm/sockios.h b/arch/sh/include/uapi/asm/sockios.h
index 17313d2c3527..ef18a668456d 100644
--- a/arch/sh/include/uapi/asm/sockios.h
+++ b/arch/sh/include/uapi/asm/sockios.h
@@ -10,6 +10,7 @@
#define SIOCSPGRP _IOW('s', 8, pid_t)
#define SIOCGPGRP _IOR('s', 9, pid_t)
-#define SIOCGSTAMP _IOR('s', 100, struct timeval) /* Get stamp (timeval) */
-#define SIOCGSTAMPNS _IOR('s', 101, struct timespec) /* Get stamp (timespec) */
+#define SIOCGSTAMP_OLD _IOR('s', 100, struct timeval) /* Get stamp (timeval) */
+#define SIOCGSTAMPNS_OLD _IOR('s', 101, struct timespec) /* Get stamp (timespec) */
+
#endif /* __ASM_SH_SOCKIOS_H */
diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c
index f3cb2cccb262..2950b19ad077 100644
--- a/arch/sh/kernel/stacktrace.c
+++ b/arch/sh/kernel/stacktrace.c
@@ -49,8 +49,6 @@ void save_stack_trace(struct stack_trace *trace)
unsigned long *sp = (unsigned long *)current_stack_pointer;
unwind_stack(current, NULL, sp, &save_stack_ops, trace);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace);
@@ -84,7 +82,5 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
unsigned long *sp = (unsigned long *)tsk->thread.sp;
unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl
index 480b057556ee..016a727d4357 100644
--- a/arch/sh/kernel/syscalls/syscall.tbl
+++ b/arch/sh/kernel/syscalls/syscall.tbl
@@ -430,3 +430,9 @@
425 common io_uring_setup sys_io_uring_setup
426 common io_uring_enter sys_io_uring_enter
427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
diff --git a/arch/sh/kernel/vsyscall/Makefile b/arch/sh/kernel/vsyscall/Makefile
index 5db6579bc44c..6e8664448048 100644
--- a/arch/sh/kernel/vsyscall/Makefile
+++ b/arch/sh/kernel/vsyscall/Makefile
@@ -15,8 +15,7 @@ quiet_cmd_syscall = SYSCALL $@
export CPPFLAGS_vsyscall.lds += -P -C -Ush
-vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 \
- $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 -Wl,--hash-style=sysv
SYSCFLAGS_vsyscall-trapa.so = $(vsyscall-flags)
diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c
index 3e27f6d1f1ec..277c882f7489 100644
--- a/arch/sh/mm/gup.c
+++ b/arch/sh/mm/gup.c
@@ -204,7 +204,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
* get_user_pages_fast() - pin user pages in memory
* @start: starting user address
* @nr_pages: number of pages from start to pin
- * @write: whether pages will be written to
+ * @gup_flags: flags modifying pin behaviour
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long.
*
@@ -216,8 +216,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
* requested. If nr_pages is 0 or negative, returns 0. If no pages
* were pinned, returns -errno.
*/
-int get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
+int get_user_pages_fast(unsigned long start, int nr_pages,
+ unsigned int gup_flags, struct page **pages)
{
struct mm_struct *mm = current->mm;
unsigned long addr, len, end;
@@ -241,7 +241,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
goto slow;
- if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
+ if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE,
+ pages, &nr))
goto slow;
} while (pgdp++, addr = next, addr != end);
local_irq_enable();
@@ -261,7 +262,7 @@ slow_irqon:
ret = get_user_pages_unlocked(start,
(end - start) >> PAGE_SHIFT, pages,
- write ? FOLL_WRITE : 0);
+ gup_flags);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 70621324db41..5aeb4d7099a1 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -26,7 +26,7 @@
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/cache.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD];
@@ -403,28 +403,16 @@ void __init mem_init(void)
mem_init_done = 1;
}
-void free_initmem(void)
-{
- free_initmem_default(-1);
-}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- free_reserved_area((void *)start, (void *)end, -1, "initrd");
-}
-#endif
-
#ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
- bool want_memblock)
+int arch_add_memory(int nid, u64 start, u64 size,
+ struct mhp_restrictions *restrictions)
{
unsigned long start_pfn = PFN_DOWN(start);
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;
/* We only have ZONE_NORMAL, so this is easy.. */
- ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
+ ret = __add_pages(nid, start_pfn, nr_pages, restrictions);
if (unlikely(ret))
printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
@@ -441,20 +429,15 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif
#ifdef CONFIG_MEMORY_HOTREMOVE
-int arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap)
+void arch_remove_memory(int nid, u64 start, u64 size,
+ struct vmem_altmap *altmap)
{
unsigned long start_pfn = PFN_DOWN(start);
unsigned long nr_pages = size >> PAGE_SHIFT;
struct zone *zone;
- int ret;
zone = page_zone(pfn_to_page(start_pfn));
- ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
- if (unlikely(ret))
- pr_warn("%s: Failed, __remove_pages() == %d\n", __func__,
- ret);
-
- return ret;
+ __remove_pages(zone, start_pfn, nr_pages, altmap);
}
#endif
#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 7b2cc490ebb7..a53a040d0054 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -24,7 +24,7 @@
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <asm/cacheflush.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
#include <asm/page.h>
diff --git a/arch/sh/mm/uncached.c b/arch/sh/mm/uncached.c
index 010010bf205a..bd1585e8efed 100644
--- a/arch/sh/mm/uncached.c
+++ b/arch/sh/mm/uncached.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/module.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/page.h>
#include <asm/addrspace.h>
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 40f8f4f73fe8..7c93f3121ee6 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -63,6 +63,7 @@ config SPARC64
select HAVE_KRETPROBES
select HAVE_KPROBES
select HAVE_RCU_TABLE_FREE if SMP
+ select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_DYNAMIC_FTRACE
@@ -91,6 +92,7 @@ config SPARC64
select ARCH_CLOCKSOURCE_DATA
select ARCH_HAS_PTE_SPECIAL
select PCI_DOMAINS if PCI
+ select ARCH_HAS_GIGANTIC_PAGE
config ARCH_DEFCONFIG
string
@@ -191,14 +193,6 @@ config NR_CPUS
source "kernel/Kconfig.hz"
-config RWSEM_GENERIC_SPINLOCK
- bool
- default y if SPARC32
-
-config RWSEM_XCHGADD_ALGORITHM
- bool
- default y if SPARC64
-
config GENERIC_HWEIGHT
bool
default y
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
index 4884315daff4..453a4cf5492a 100644
--- a/arch/sparc/crypto/des_glue.c
+++ b/arch/sparc/crypto/des_glue.c
@@ -201,18 +201,15 @@ static int des3_ede_set_key(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct des3_ede_sparc64_ctx *dctx = crypto_tfm_ctx(tfm);
- const u32 *K = (const u32 *)key;
u32 *flags = &tfm->crt_flags;
u64 k1[DES_EXPKEY_WORDS / 2];
u64 k2[DES_EXPKEY_WORDS / 2];
u64 k3[DES_EXPKEY_WORDS / 2];
+ int err;
- if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
- !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
- (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
- *flags |= CRYPTO_TFM_RES_WEAK_KEY;
- return -EINVAL;
- }
+ err = __des3_verify_key(flags, key);
+ if (unlikely(err))
+ return err;
des_sparc64_key_expand((const u32 *)key, k1);
key += DES_KEY_SIZE;
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index a22cfd5c0ee8..95c44380b1d6 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -15,10 +15,10 @@ generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += module.h
generic-y += msi.h
generic-y += preempt.h
-generic-y += rwsem.h
generic-y += serial.h
generic-y += trace_clock.h
generic-y += word-at-a-time.h
diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h
index b162c23ae8c2..688911051b44 100644
--- a/arch/sparc/include/asm/io_64.h
+++ b/arch/sparc/include/asm/io_64.h
@@ -396,8 +396,6 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
}
}
-#define mmiowb()
-
#ifdef __KERNEL__
/* On sparc64 we have the whole physical IO address space accessible
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 1393a8ac596b..22500c3be7a9 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -231,36 +231,6 @@ extern unsigned long _PAGE_ALL_SZ_BITS;
extern struct page *mem_map_zero;
#define ZERO_PAGE(vaddr) (mem_map_zero)
-/* This macro must be updated when the size of struct page grows above 80
- * or reduces below 64.
- * The idea that compiler optimizes out switch() statement, and only
- * leaves clrx instructions
- */
-#define mm_zero_struct_page(pp) do { \
- unsigned long *_pp = (void *)(pp); \
- \
- /* Check that struct page is either 64, 72, or 80 bytes */ \
- BUILD_BUG_ON(sizeof(struct page) & 7); \
- BUILD_BUG_ON(sizeof(struct page) < 64); \
- BUILD_BUG_ON(sizeof(struct page) > 80); \
- \
- switch (sizeof(struct page)) { \
- case 80: \
- _pp[9] = 0; /* fallthrough */ \
- case 72: \
- _pp[8] = 0; /* fallthrough */ \
- default: \
- _pp[7] = 0; \
- _pp[6] = 0; \
- _pp[5] = 0; \
- _pp[4] = 0; \
- _pp[3] = 0; \
- _pp[2] = 0; \
- _pp[1] = 0; \
- _pp[0] = 0; \
- } \
-} while (0)
-
/* PFNs are real physical page numbers. However, mem_map only begins to record
* per-page information starting at pfn_base. This is to handle systems where
* the first physical page in the machine is at some huge physical address,
diff --git a/arch/sparc/include/asm/syscall.h b/arch/sparc/include/asm/syscall.h
index 4d075434e816..62a5a78804c4 100644
--- a/arch/sparc/include/asm/syscall.h
+++ b/arch/sparc/include/asm/syscall.h
@@ -127,10 +127,11 @@ static inline void syscall_set_arguments(struct task_struct *task,
regs->u_regs[UREG_I0 + i] = args[i];
}
-static inline int syscall_get_arch(void)
+static inline int syscall_get_arch(struct task_struct *task)
{
#if defined(CONFIG_SPARC64) && defined(CONFIG_COMPAT)
- return in_compat_syscall() ? AUDIT_ARCH_SPARC : AUDIT_ARCH_SPARC64;
+ return test_tsk_thread_flag(task, TIF_32BIT)
+ ? AUDIT_ARCH_SPARC : AUDIT_ARCH_SPARC64;
#elif defined(CONFIG_SPARC64)
return AUDIT_ARCH_SPARC64;
#else
diff --git a/arch/sparc/include/asm/tlb_32.h b/arch/sparc/include/asm/tlb_32.h
index 343cea19e573..5cd28a8793e3 100644
--- a/arch/sparc/include/asm/tlb_32.h
+++ b/arch/sparc/include/asm/tlb_32.h
@@ -2,24 +2,6 @@
#ifndef _SPARC_TLB_H
#define _SPARC_TLB_H
-#define tlb_start_vma(tlb, vma) \
-do { \
- flush_cache_range(vma, vma->vm_start, vma->vm_end); \
-} while (0)
-
-#define tlb_end_vma(tlb, vma) \
-do { \
- flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
-} while (0)
-
-#define __tlb_remove_tlb_entry(tlb, pte, address) \
- do { } while (0)
-
-#define tlb_flush(tlb) \
-do { \
- flush_tlb_mm((tlb)->mm); \
-} while (0)
-
#include <asm-generic/tlb.h>
#endif /* _SPARC_TLB_H */
diff --git a/arch/sparc/include/uapi/asm/sockios.h b/arch/sparc/include/uapi/asm/sockios.h
deleted file mode 100644
index 18a3ec14a847..000000000000
--- a/arch/sparc/include/uapi/asm/sockios.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _ASM_SPARC_SOCKIOS_H
-#define _ASM_SPARC_SOCKIOS_H
-
-/* Socket-level I/O control calls. */
-#define FIOSETOWN 0x8901
-#define SIOCSPGRP 0x8902
-#define FIOGETOWN 0x8903
-#define SIOCGPGRP 0x8904
-#define SIOCATMARK 0x8905
-#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
-#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
-
-#endif /* !(_ASM_SPARC_SOCKIOS_H) */
-
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
index d1d52822603d..1cb62bfeaa1f 100644
--- a/arch/sparc/kernel/cpumap.c
+++ b/arch/sparc/kernel/cpumap.c
@@ -194,8 +194,7 @@ static struct cpuinfo_tree *build_cpuinfo_tree(void)
n = enumerate_cpuinfo_nodes(tmp_level);
- new_tree = kzalloc(sizeof(struct cpuinfo_tree) +
- (sizeof(struct cpuinfo_node) * n), GFP_ATOMIC);
+ new_tree = kzalloc(struct_size(new_tree, nodes, n), GFP_ATOMIC);
if (!new_tree)
return NULL;
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index f87265afb175..cad08ccce625 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -876,7 +876,7 @@ void ldom_power_off(void)
static void ds_conn_reset(struct ds_info *dp)
{
- printk(KERN_ERR "ds-%llu: ds_conn_reset() from %pf\n",
+ printk(KERN_ERR "ds-%llu: ds_conn_reset() from %ps\n",
dp->id, __builtin_return_address(0));
}
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
index a1dd24307b00..e047480b1605 100644
--- a/arch/sparc/kernel/syscalls/syscall.tbl
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -473,3 +473,9 @@
425 common io_uring_setup sys_io_uring_setup
426 common io_uring_enter sys_io_uring_enter
427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 3eb77943ce12..89fb05f90609 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -653,19 +653,23 @@ static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val
void *data)
{
struct cpufreq_freqs *freq = data;
- unsigned int cpu = freq->cpu;
- struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
+ unsigned int cpu;
+ struct freq_table *ft;
- if (!ft->ref_freq) {
- ft->ref_freq = freq->old;
- ft->clock_tick_ref = cpu_data(cpu).clock_tick;
- }
- if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
- (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
- cpu_data(cpu).clock_tick =
- cpufreq_scale(ft->clock_tick_ref,
- ft->ref_freq,
- freq->new);
+ for_each_cpu(cpu, freq->policy->cpus) {
+ ft = &per_cpu(sparc64_freq_table, cpu);
+
+ if (!ft->ref_freq) {
+ ft->ref_freq = freq->old;
+ ft->clock_tick_ref = cpu_data(cpu).clock_tick;
+ }
+
+ if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
+ (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
+ cpu_data(cpu).clock_tick =
+ cpufreq_scale(ft->clock_tick_ref, ft->ref_freq,
+ freq->new);
+ }
}
return 0;
diff --git a/arch/sparc/kernel/uprobes.c b/arch/sparc/kernel/uprobes.c
index d852ae56ddc1..c44bf5b85de8 100644
--- a/arch/sparc/kernel/uprobes.c
+++ b/arch/sparc/kernel/uprobes.c
@@ -29,7 +29,6 @@
#include <linux/kdebug.h>
#include <asm/cacheflush.h>
-#include <linux/uaccess.h>
/* Compute the address of the breakpoint instruction and return it.
*
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
index aee6dba83d0e..1e770a517d4a 100644
--- a/arch/sparc/mm/gup.c
+++ b/arch/sparc/mm/gup.c
@@ -245,8 +245,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
return nr;
}
-int get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages)
+int get_user_pages_fast(unsigned long start, int nr_pages,
+ unsigned int gup_flags, struct page **pages)
{
struct mm_struct *mm = current->mm;
unsigned long addr, len, end;
@@ -303,7 +303,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
goto slow;
- if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
+ if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE,
+ pages, &nr))
goto slow;
} while (pgdp++, addr = next, addr != end);
@@ -324,7 +325,7 @@ slow:
ret = get_user_pages_unlocked(start,
(end - start) >> PAGE_SHIFT, pages,
- write ? FOLL_WRITE : 0);
+ gup_flags);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index a8ff29821bdb..046ab116cc8c 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -294,19 +294,6 @@ void __init mem_init(void)
mem_init_print_info(NULL);
}
-void free_initmem (void)
-{
- free_initmem_default(POISON_FREE_INITMEM);
-}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
- "initrd");
-}
-#endif
-
void sparc_flush_page_to_ram(struct page *page)
{
unsigned long vaddr = (unsigned long)page_address(page);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index f2d70ff7a284..4b099dd7a767 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2269,19 +2269,6 @@ static unsigned long last_valid_pfn;
static void sun4u_pgprot_init(void);
static void sun4v_pgprot_init(void);
-static phys_addr_t __init available_memory(void)
-{
- phys_addr_t available = 0ULL;
- phys_addr_t pa_start, pa_end;
- u64 i;
-
- for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
- &pa_end, NULL)
- available = available + (pa_end - pa_start);
-
- return available;
-}
-
#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
@@ -2295,33 +2282,8 @@ static phys_addr_t __init available_memory(void)
*/
static void __init reduce_memory(phys_addr_t limit_ram)
{
- phys_addr_t avail_ram = available_memory();
- phys_addr_t pa_start, pa_end;
- u64 i;
-
- if (limit_ram >= avail_ram)
- return;
-
- for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
- &pa_end, NULL) {
- phys_addr_t region_size = pa_end - pa_start;
- phys_addr_t clip_start = pa_start;
-
- avail_ram = avail_ram - region_size;
- /* Are we consuming too much? */
- if (avail_ram < limit_ram) {
- phys_addr_t give_back = limit_ram - avail_ram;
-
- region_size = region_size - give_back;
- clip_start = clip_start + give_back;
- }
-
- memblock_remove(clip_start, region_size);
-
- if (avail_ram <= limit_ram)
- break;
- i = 0UL;
- }
+ limit_ram += memblock_reserved_size();
+ memblock_enforce_memory_limit(limit_ram);
}
void __init paging_init(void)
@@ -2610,14 +2572,6 @@ void free_initmem(void)
}
}
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
- "initrd");
-}
-#endif
-
pgprot_t PAGE_KERNEL __read_mostly;
EXPORT_SYMBOL(PAGE_KERNEL);
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index e8d5d73ca40d..71ac353032b6 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -175,16 +175,37 @@ static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
}
}
-static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
+static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t len, bool per_page_flush)
{
struct iommu_struct *iommu = dev->archdata.iommu;
- int ioptex;
- iopte_t *iopte, *iopte0;
+ phys_addr_t paddr = page_to_phys(page) + offset;
+ unsigned long off = paddr & ~PAGE_MASK;
+ unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long pfn = __phys_to_pfn(paddr);
unsigned int busa, busa0;
- int i;
+ iopte_t *iopte, *iopte0;
+ int ioptex, i;
+
+ /* XXX So what is maxphys for us and how do drivers know it? */
+ if (!len || len > 256 * 1024)
+ return DMA_MAPPING_ERROR;
+
+ /*
+ * We expect unmapped highmem pages to be not in the cache.
+ * XXX Is this a good assumption?
+ * XXX What if someone else unmaps it here and races us?
+ */
+ if (per_page_flush && !PageHighMem(page)) {
+ unsigned long vaddr, p;
+
+ vaddr = (unsigned long)page_address(page) + offset;
+ for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE)
+ flush_page_for_dma(p);
+ }
/* page color = pfn of page */
- ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
+ ioptex = bit_map_string_get(&iommu->usemap, npages, pfn);
if (ioptex < 0)
panic("iommu out");
busa0 = iommu->start + (ioptex << PAGE_SHIFT);
@@ -193,29 +214,15 @@ static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
busa = busa0;
iopte = iopte0;
for (i = 0; i < npages; i++) {
- iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
+ iopte_val(*iopte) = MKIOPTE(pfn, IOPERM);
iommu_invalidate_page(iommu->regs, busa);
busa += PAGE_SIZE;
iopte++;
- page++;
+ pfn++;
}
iommu_flush_iotlb(iopte0, npages);
-
- return busa0;
-}
-
-static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t len)
-{
- void *vaddr = page_address(page) + offset;
- unsigned long off = (unsigned long)vaddr & ~PAGE_MASK;
- unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
-
- /* XXX So what is maxphys for us and how do drivers know it? */
- if (!len || len > 256 * 1024)
- return DMA_MAPPING_ERROR;
- return iommu_get_one(dev, virt_to_page(vaddr), npages) + off;
+ return busa0 + off;
}
static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
@@ -223,81 +230,58 @@ static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
enum dma_data_direction dir, unsigned long attrs)
{
flush_page_for_dma(0);
- return __sbus_iommu_map_page(dev, page, offset, len);
+ return __sbus_iommu_map_page(dev, page, offset, len, false);
}
static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev,
struct page *page, unsigned long offset, size_t len,
enum dma_data_direction dir, unsigned long attrs)
{
- void *vaddr = page_address(page) + offset;
- unsigned long p = ((unsigned long)vaddr) & PAGE_MASK;
-
- while (p < (unsigned long)vaddr + len) {
- flush_page_for_dma(p);
- p += PAGE_SIZE;
- }
-
- return __sbus_iommu_map_page(dev, page, offset, len);
+ return __sbus_iommu_map_page(dev, page, offset, len, true);
}
-static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir, unsigned long attrs)
+static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir, unsigned long attrs,
+ bool per_page_flush)
{
struct scatterlist *sg;
- int i, n;
-
- flush_page_for_dma(0);
+ int j;
- for_each_sg(sgl, sg, nents, i) {
- n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
- sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
+ for_each_sg(sgl, sg, nents, j) {
+ sg->dma_address =__sbus_iommu_map_page(dev, sg_page(sg),
+ sg->offset, sg->length, per_page_flush);
+ if (sg->dma_address == DMA_MAPPING_ERROR)
+ return 0;
sg->dma_length = sg->length;
}
return nents;
}
-static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl,
+static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
- unsigned long page, oldpage = 0;
- struct scatterlist *sg;
- int i, j, n;
-
- for_each_sg(sgl, sg, nents, j) {
- n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
-
- /*
- * We expect unmapped highmem pages to be not in the cache.
- * XXX Is this a good assumption?
- * XXX What if someone else unmaps it here and races us?
- */
- if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
- for (i = 0; i < n; i++) {
- if (page != oldpage) { /* Already flushed? */
- flush_page_for_dma(page);
- oldpage = page;
- }
- page += PAGE_SIZE;
- }
- }
-
- sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
- sg->dma_length = sg->length;
- }
+ flush_page_for_dma(0);
+ return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, false);
+}
- return nents;
+static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+ return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, true);
}
-static void iommu_release_one(struct device *dev, u32 busa, int npages)
+static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
+ size_t len, enum dma_data_direction dir, unsigned long attrs)
{
struct iommu_struct *iommu = dev->archdata.iommu;
- int ioptex;
- int i;
+ unsigned int busa = dma_addr & PAGE_MASK;
+ unsigned long off = dma_addr & ~PAGE_MASK;
+ unsigned int npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
+ unsigned int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
+ unsigned int i;
BUG_ON(busa < iommu->start);
- ioptex = (busa - iommu->start) >> PAGE_SHIFT;
for (i = 0; i < npages; i++) {
iopte_val(iommu->page_table[ioptex + i]) = 0;
iommu_invalidate_page(iommu->regs, busa);
@@ -306,25 +290,15 @@ static void iommu_release_one(struct device *dev, u32 busa, int npages)
bit_map_clear(&iommu->usemap, ioptex, npages);
}
-static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
- size_t len, enum dma_data_direction dir, unsigned long attrs)
-{
- unsigned long off = dma_addr & ~PAGE_MASK;
- int npages;
-
- npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
- iommu_release_one(dev, dma_addr & PAGE_MASK, npages);
-}
-
static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *sg;
- int i, n;
+ int i;
for_each_sg(sgl, sg, nents, i) {
- n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
- iommu_release_one(dev, sg->dma_address & PAGE_MASK, n);
+ sbus_iommu_unmap_page(dev, sg->dma_address, sg->length, dir,
+ attrs);
sg->dma_address = 0x21212121;
}
}
diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile
index 74e97f77e23b..83c4b463cb3d 100644
--- a/arch/sparc/vdso/Makefile
+++ b/arch/sparc/vdso/Makefile
@@ -68,7 +68,7 @@ CFLAGS_REMOVE_vdso-note.o = -pg
CFLAGS_REMOVE_vclock_gettime.o = -pg
$(obj)/%.so: OBJCOPYFLAGS := -S
-$(obj)/%.so: $(obj)/%.so.dbg
+$(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index ec9711d068b7..6b6eb938fcc1 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -80,46 +80,46 @@ config LD_SCRIPT_DYN
bool
default y
depends on !LD_SCRIPT_STATIC
- select MODULE_REL_CRCS if MODVERSIONS
+ select MODULE_REL_CRCS if MODVERSIONS
config HOSTFS
tristate "Host filesystem"
help
- While the User-Mode Linux port uses its own root file system for
- booting and normal file access, this module lets the UML user
- access files stored on the host. It does not require any
- network connection between the Host and UML. An example use of
- this might be:
+ While the User-Mode Linux port uses its own root file system for
+ booting and normal file access, this module lets the UML user
+ access files stored on the host. It does not require any
+ network connection between the Host and UML. An example use of
+ this might be:
- mount none /tmp/fromhost -t hostfs -o /tmp/umlshare
+ mount none /tmp/fromhost -t hostfs -o /tmp/umlshare
- where /tmp/fromhost is an empty directory inside UML and
- /tmp/umlshare is a directory on the host with files the UML user
- wishes to access.
+ where /tmp/fromhost is an empty directory inside UML and
+ /tmp/umlshare is a directory on the host with files the UML user
+ wishes to access.
- For more information, see
- <http://user-mode-linux.sourceforge.net/hostfs.html>.
+ For more information, see
+ <http://user-mode-linux.sourceforge.net/hostfs.html>.
- If you'd like to be able to work with files stored on the host,
- say Y or M here; otherwise say N.
+ If you'd like to be able to work with files stored on the host,
+ say Y or M here; otherwise say N.
config MCONSOLE
bool "Management console"
depends on PROC_FS
default y
help
- The user mode linux management console is a low-level interface to
- the kernel, somewhat like the i386 SysRq interface. Since there is
- a full-blown operating system running under every user mode linux
- instance, there is much greater flexibility possible than with the
- SysRq mechanism.
+ The user mode linux management console is a low-level interface to
+ the kernel, somewhat like the i386 SysRq interface. Since there is
+ a full-blown operating system running under every user mode linux
+ instance, there is much greater flexibility possible than with the
+ SysRq mechanism.
- If you answer 'Y' to this option, to use this feature, you need the
- mconsole client (called uml_mconsole) which is present in CVS in
- 2.4.5-9um and later (path /tools/mconsole), and is also in the
- distribution RPM package in 2.4.6 and later.
+ If you answer 'Y' to this option, to use this feature, you need the
+ mconsole client (called uml_mconsole) which is present in CVS in
+ 2.4.5-9um and later (path /tools/mconsole), and is also in the
+ distribution RPM package in 2.4.6 and later.
- It is safe to say 'Y' here.
+ It is safe to say 'Y' here.
config MAGIC_SYSRQ
bool "Magic SysRq key"
@@ -142,13 +142,17 @@ config MAGIC_SYSRQ
config KERNEL_STACK_ORDER
int "Kernel stack size order"
- default 1 if 64BIT
- range 1 10 if 64BIT
- default 0 if !64BIT
+ default 2 if 64BIT
+ range 2 10 if 64BIT
+ default 1 if !64BIT
help
This option determines the size of UML kernel stacks. They will
be 1 << order pages. The default is OK unless you're running Valgrind
on UML, in which case, set this to 3.
+ It is possible to reduce the stack to 1 for 64BIT and 0 for 32BIT on
+ older (pre-2017) CPUs. It is not recommended on newer CPUs due to the
+ increase in the size of the state which needs to be saved when handling
+ signals.
config MMAPPER
tristate "iomem emulation driver"
diff --git a/arch/um/drivers/Kconfig b/arch/um/drivers/Kconfig
index 2b1aaf7755aa..2638e46f50cc 100644
--- a/arch/um/drivers/Kconfig
+++ b/arch/um/drivers/Kconfig
@@ -11,58 +11,58 @@ config STDERR_CONSOLE
config SSL
bool "Virtual serial line"
help
- The User-Mode Linux environment allows you to create virtual serial
- lines on the UML that are usually made to show up on the host as
- ttys or ptys.
+ The User-Mode Linux environment allows you to create virtual serial
+ lines on the UML that are usually made to show up on the host as
+ ttys or ptys.
- See <http://user-mode-linux.sourceforge.net/old/input.html> for more
- information and command line examples of how to use this facility.
+ See <http://user-mode-linux.sourceforge.net/old/input.html> for more
+ information and command line examples of how to use this facility.
- Unless you have a specific reason for disabling this, say Y.
+ Unless you have a specific reason for disabling this, say Y.
config NULL_CHAN
bool "null channel support"
help
- This option enables support for attaching UML consoles and serial
- lines to a device similar to /dev/null. Data written to it disappears
- and there is never any data to be read.
+ This option enables support for attaching UML consoles and serial
+ lines to a device similar to /dev/null. Data written to it disappears
+ and there is never any data to be read.
config PORT_CHAN
bool "port channel support"
help
- This option enables support for attaching UML consoles and serial
- lines to host portals. They may be accessed with 'telnet <host>
- <port number>'. Any number of consoles and serial lines may be
- attached to a single portal, although what UML device you get when
- you telnet to that portal will be unpredictable.
- It is safe to say 'Y' here.
+ This option enables support for attaching UML consoles and serial
+ lines to host portals. They may be accessed with 'telnet <host>
+ <port number>'. Any number of consoles and serial lines may be
+ attached to a single portal, although what UML device you get when
+ you telnet to that portal will be unpredictable.
+ It is safe to say 'Y' here.
config PTY_CHAN
bool "pty channel support"
help
- This option enables support for attaching UML consoles and serial
- lines to host pseudo-terminals. Access to both traditional
- pseudo-terminals (/dev/pty*) and pts pseudo-terminals are controlled
- with this option. The assignment of UML devices to host devices
- will be announced in the kernel message log.
- It is safe to say 'Y' here.
+ This option enables support for attaching UML consoles and serial
+ lines to host pseudo-terminals. Access to both traditional
+ pseudo-terminals (/dev/pty*) and pts pseudo-terminals are controlled
+ with this option. The assignment of UML devices to host devices
+ will be announced in the kernel message log.
+ It is safe to say 'Y' here.
config TTY_CHAN
bool "tty channel support"
help
- This option enables support for attaching UML consoles and serial
- lines to host terminals. Access to both virtual consoles
- (/dev/tty*) and the slave side of pseudo-terminals (/dev/ttyp* and
- /dev/pts/*) are controlled by this option.
- It is safe to say 'Y' here.
+ This option enables support for attaching UML consoles and serial
+ lines to host terminals. Access to both virtual consoles
+ (/dev/tty*) and the slave side of pseudo-terminals (/dev/ttyp* and
+ /dev/pts/*) are controlled by this option.
+ It is safe to say 'Y' here.
config XTERM_CHAN
bool "xterm channel support"
help
- This option enables support for attaching UML consoles and serial
- lines to xterms. Each UML device so assigned will be brought up in
- its own xterm.
- It is safe to say 'Y' here.
+ This option enables support for attaching UML consoles and serial
+ lines to xterms. Each UML device so assigned will be brought up in
+ its own xterm.
+ It is safe to say 'Y' here.
config NOCONFIG_CHAN
bool
@@ -72,43 +72,43 @@ config CON_ZERO_CHAN
string "Default main console channel initialization"
default "fd:0,fd:1"
help
- This is the string describing the channel to which the main console
- will be attached by default. This value can be overridden from the
- command line. The default value is "fd:0,fd:1", which attaches the
- main console to stdin and stdout.
- It is safe to leave this unchanged.
+ This is the string describing the channel to which the main console
+ will be attached by default. This value can be overridden from the
+ command line. The default value is "fd:0,fd:1", which attaches the
+ main console to stdin and stdout.
+ It is safe to leave this unchanged.
config CON_CHAN
string "Default console channel initialization"
default "xterm"
help
- This is the string describing the channel to which all consoles
- except the main console will be attached by default. This value can
- be overridden from the command line. The default value is "xterm",
- which brings them up in xterms.
- It is safe to leave this unchanged, although you may wish to change
- this if you expect the UML that you build to be run in environments
- which don't have X or xterm available.
+ This is the string describing the channel to which all consoles
+ except the main console will be attached by default. This value can
+ be overridden from the command line. The default value is "xterm",
+ which brings them up in xterms.
+ It is safe to leave this unchanged, although you may wish to change
+ this if you expect the UML that you build to be run in environments
+ which don't have X or xterm available.
config SSL_CHAN
string "Default serial line channel initialization"
default "pty"
help
- This is the string describing the channel to which the serial lines
- will be attached by default. This value can be overridden from the
- command line. The default value is "pty", which attaches them to
- traditional pseudo-terminals.
- It is safe to leave this unchanged, although you may wish to change
- this if you expect the UML that you build to be run in environments
- which don't have a set of /dev/pty* devices.
+ This is the string describing the channel to which the serial lines
+ will be attached by default. This value can be overridden from the
+ command line. The default value is "pty", which attaches them to
+ traditional pseudo-terminals.
+ It is safe to leave this unchanged, although you may wish to change
+ this if you expect the UML that you build to be run in environments
+ which don't have a set of /dev/pty* devices.
config UML_SOUND
tristate "Sound support"
help
- This option enables UML sound support. If enabled, it will pull in
- soundcore and the UML hostaudio relay, which acts as a intermediary
- between the host's dsp and mixer devices and the UML sound system.
- It is safe to say 'Y' here.
+ This option enables UML sound support. If enabled, it will pull in
+ soundcore and the UML hostaudio relay, which acts as a intermediary
+ between the host's dsp and mixer devices and the UML sound system.
+ It is safe to say 'Y' here.
config SOUND
tristate
@@ -131,107 +131,107 @@ menu "UML Network Devices"
config UML_NET
bool "Virtual network device"
help
- While the User-Mode port cannot directly talk to any physical
- hardware devices, this choice and the following transport options
- provide one or more virtual network devices through which the UML
- kernels can talk to each other, the host, and with the host's help,
- machines on the outside world.
+ While the User-Mode port cannot directly talk to any physical
+ hardware devices, this choice and the following transport options
+ provide one or more virtual network devices through which the UML
+ kernels can talk to each other, the host, and with the host's help,
+ machines on the outside world.
- For more information, including explanations of the networking and
- sample configurations, see
- <http://user-mode-linux.sourceforge.net/old/networking.html>.
+ For more information, including explanations of the networking and
+ sample configurations, see
+ <http://user-mode-linux.sourceforge.net/old/networking.html>.
- If you'd like to be able to enable networking in the User-Mode
- linux environment, say Y; otherwise say N. Note that you must
- enable at least one of the following transport options to actually
- make use of UML networking.
+ If you'd like to be able to enable networking in the User-Mode
+ linux environment, say Y; otherwise say N. Note that you must
+ enable at least one of the following transport options to actually
+ make use of UML networking.
config UML_NET_ETHERTAP
bool "Ethertap transport"
depends on UML_NET
help
- The Ethertap User-Mode Linux network transport allows a single
- running UML to exchange packets with its host over one of the
- host's Ethertap devices, such as /dev/tap0. Additional running
- UMLs can use additional Ethertap devices, one per running UML.
- While the UML believes it's on a (multi-device, broadcast) virtual
- Ethernet network, it's in fact communicating over a point-to-point
- link with the host.
-
- To use this, your host kernel must have support for Ethertap
- devices. Also, if your host kernel is 2.4.x, it must have
- CONFIG_NETLINK_DEV configured as Y or M.
-
- For more information, see
- <http://user-mode-linux.sourceforge.net/old/networking.html> That site
- has examples of the UML command line to use to enable Ethertap
- networking.
-
- If you'd like to set up an IP network with the host and/or the
- outside world, say Y to this, the Daemon Transport and/or the
- Slip Transport. You'll need at least one of them, but may choose
- more than one without conflict. If you don't need UML networking,
- say N.
+ The Ethertap User-Mode Linux network transport allows a single
+ running UML to exchange packets with its host over one of the
+ host's Ethertap devices, such as /dev/tap0. Additional running
+ UMLs can use additional Ethertap devices, one per running UML.
+ While the UML believes it's on a (multi-device, broadcast) virtual
+ Ethernet network, it's in fact communicating over a point-to-point
+ link with the host.
+
+ To use this, your host kernel must have support for Ethertap
+ devices. Also, if your host kernel is 2.4.x, it must have
+ CONFIG_NETLINK_DEV configured as Y or M.
+
+ For more information, see
+ <http://user-mode-linux.sourceforge.net/old/networking.html> That site
+ has examples of the UML command line to use to enable Ethertap
+ networking.
+
+ If you'd like to set up an IP network with the host and/or the
+ outside world, say Y to this, the Daemon Transport and/or the
+ Slip Transport. You'll need at least one of them, but may choose
+ more than one without conflict. If you don't need UML networking,
+ say N.
config UML_NET_TUNTAP
bool "TUN/TAP transport"
depends on UML_NET
help
- The UML TUN/TAP network transport allows a UML instance to exchange
- packets with the host over a TUN/TAP device. This option will only
- work with a 2.4 host, unless you've applied the TUN/TAP patch to
- your 2.2 host kernel.
+ The UML TUN/TAP network transport allows a UML instance to exchange
+ packets with the host over a TUN/TAP device. This option will only
+ work with a 2.4 host, unless you've applied the TUN/TAP patch to
+ your 2.2 host kernel.
- To use this transport, your host kernel must have support for TUN/TAP
- devices, either built-in or as a module.
+ To use this transport, your host kernel must have support for TUN/TAP
+ devices, either built-in or as a module.
config UML_NET_SLIP
bool "SLIP transport"
depends on UML_NET
help
- The slip User-Mode Linux network transport allows a running UML to
- network with its host over a point-to-point link. Unlike Ethertap,
- which can carry any Ethernet frame (and hence even non-IP packets),
- the slip transport can only carry IP packets.
-
- To use this, your host must support slip devices.
-
- For more information, see
- <http://user-mode-linux.sourceforge.net/old/networking.html>.
- has examples of the UML command line to use to enable slip
- networking, and details of a few quirks with it.
-
- The Ethertap Transport is preferred over slip because of its
- limitations. If you prefer slip, however, say Y here. Otherwise
- choose the Multicast transport (to network multiple UMLs on
- multiple hosts), Ethertap (to network with the host and the
- outside world), and/or the Daemon transport (to network multiple
- UMLs on a single host). You may choose more than one without
- conflict. If you don't need UML networking, say N.
+ The slip User-Mode Linux network transport allows a running UML to
+ network with its host over a point-to-point link. Unlike Ethertap,
+ which can carry any Ethernet frame (and hence even non-IP packets),
+ the slip transport can only carry IP packets.
+
+ To use this, your host must support slip devices.
+
+ For more information, see
+ <http://user-mode-linux.sourceforge.net/old/networking.html>.
+ has examples of the UML command line to use to enable slip
+ networking, and details of a few quirks with it.
+
+ The Ethertap Transport is preferred over slip because of its
+ limitations. If you prefer slip, however, say Y here. Otherwise
+ choose the Multicast transport (to network multiple UMLs on
+ multiple hosts), Ethertap (to network with the host and the
+ outside world), and/or the Daemon transport (to network multiple
+ UMLs on a single host). You may choose more than one without
+ conflict. If you don't need UML networking, say N.
config UML_NET_DAEMON
bool "Daemon transport"
depends on UML_NET
help
- This User-Mode Linux network transport allows one or more running
- UMLs on a single host to communicate with each other, but not to
- the host.
-
- To use this form of networking, you'll need to run the UML
- networking daemon on the host.
-
- For more information, see
- <http://user-mode-linux.sourceforge.net/old/networking.html> That site
- has examples of the UML command line to use to enable Daemon
- networking.
-
- If you'd like to set up a network with other UMLs on a single host,
- say Y. If you need a network between UMLs on multiple physical
- hosts, choose the Multicast Transport. To set up a network with
- the host and/or other IP machines, say Y to the Ethertap or Slip
- transports. You'll need at least one of them, but may choose
- more than one without conflict. If you don't need UML networking,
- say N.
+ This User-Mode Linux network transport allows one or more running
+ UMLs on a single host to communicate with each other, but not to
+ the host.
+
+ To use this form of networking, you'll need to run the UML
+ networking daemon on the host.
+
+ For more information, see
+ <http://user-mode-linux.sourceforge.net/old/networking.html> That site
+ has examples of the UML command line to use to enable Daemon
+ networking.
+
+ If you'd like to set up a network with other UMLs on a single host,
+ say Y. If you need a network between UMLs on multiple physical
+ hosts, choose the Multicast Transport. To set up a network with
+ the host and/or other IP machines, say Y to the Ethertap or Slip
+ transports. You'll need at least one of them, but may choose
+ more than one without conflict. If you don't need UML networking,
+ say N.
config UML_NET_VECTOR
bool "Vector I/O high performance network devices"
@@ -270,26 +270,26 @@ config UML_NET_MCAST
bool "Multicast transport"
depends on UML_NET
help
- This Multicast User-Mode Linux network transport allows multiple
- UMLs (even ones running on different host machines!) to talk to
- each other over a virtual ethernet network. However, it requires
- at least one UML with one of the other transports to act as a
- bridge if any of them need to be able to talk to their hosts or any
- other IP machines.
-
- To use this, your host kernel(s) must support IP Multicasting.
-
- For more information, see
- <http://user-mode-linux.sourceforge.net/old/networking.html> That site
- has examples of the UML command line to use to enable Multicast
- networking, and notes about the security of this approach.
-
- If you need UMLs on multiple physical hosts to communicate as if
- they shared an Ethernet network, say Y. If you need to communicate
- with other IP machines, make sure you select one of the other
- transports (possibly in addition to Multicast; they're not
- exclusive). If you don't need to network UMLs say N to each of
- the transports.
+ This Multicast User-Mode Linux network transport allows multiple
+ UMLs (even ones running on different host machines!) to talk to
+ each other over a virtual ethernet network. However, it requires
+ at least one UML with one of the other transports to act as a
+ bridge if any of them need to be able to talk to their hosts or any
+ other IP machines.
+
+ To use this, your host kernel(s) must support IP Multicasting.
+
+ For more information, see
+ <http://user-mode-linux.sourceforge.net/old/networking.html> That site
+ has examples of the UML command line to use to enable Multicast
+ networking, and notes about the security of this approach.
+
+ If you need UMLs on multiple physical hosts to communicate as if
+ they shared an Ethernet network, say Y. If you need to communicate
+ with other IP machines, make sure you select one of the other
+ transports (possibly in addition to Multicast; they're not
+ exclusive). If you don't need to network UMLs say N to each of
+ the transports.
config UML_NET_PCAP
bool "pcap transport"
@@ -300,9 +300,9 @@ config UML_NET_PCAP
UML act as a network monitor for the host. You must have libcap
installed in order to build the pcap transport into UML.
- For more information, see
- <http://user-mode-linux.sourceforge.net/old/networking.html> That site
- has examples of the UML command line to use to enable this option.
+ For more information, see
+ <http://user-mode-linux.sourceforge.net/old/networking.html> That site
+ has examples of the UML command line to use to enable this option.
If you intend to use UML as a network monitor for the host, say
Y here. Otherwise, say N.
@@ -311,27 +311,27 @@ config UML_NET_SLIRP
bool "SLiRP transport"
depends on UML_NET
help
- The SLiRP User-Mode Linux network transport allows a running UML
- to network by invoking a program that can handle SLIP encapsulated
- packets. This is commonly (but not limited to) the application
- known as SLiRP, a program that can re-socket IP packets back onto
- the host on which it is run. Only IP packets are supported,
- unlike other network transports that can handle all Ethernet
- frames. In general, slirp allows the UML the same IP connectivity
- to the outside world that the host user is permitted, and unlike
- other transports, SLiRP works without the need of root level
- privleges, setuid binaries, or SLIP devices on the host. This
- also means not every type of connection is possible, but most
- situations can be accommodated with carefully crafted slirp
- commands that can be passed along as part of the network device's
- setup string. The effect of this transport on the UML is similar
- that of a host behind a firewall that masquerades all network
- connections passing through it (but is less secure).
-
- To use this you should first have slirp compiled somewhere
- accessible on the host, and have read its documentation. If you
- don't need UML networking, say N.
-
- Startup example: "eth0=slirp,FE:FD:01:02:03:04,/usr/local/bin/slirp"
+ The SLiRP User-Mode Linux network transport allows a running UML
+ to network by invoking a program that can handle SLIP encapsulated
+ packets. This is commonly (but not limited to) the application
+ known as SLiRP, a program that can re-socket IP packets back onto
+ he host on which it is run. Only IP packets are supported,
+ unlike other network transports that can handle all Ethernet
+ frames. In general, slirp allows the UML the same IP connectivity
+ to the outside world that the host user is permitted, and unlike
+ other transports, SLiRP works without the need of root level
+ privleges, setuid binaries, or SLIP devices on the host. This
+ also means not every type of connection is possible, but most
+ situations can be accommodated with carefully crafted slirp
+ commands that can be passed along as part of the network device's
+ setup string. The effect of this transport on the UML is similar
+ that of a host behind a firewall that masquerades all network
+ connections passing through it (but is less secure).
+
+ To use this you should first have slirp compiled somewhere
+ accessible on the host, and have read its documentation. If you
+ don't need UML networking, say N.
+
+ Startup example: "eth0=slirp,FE:FD:01:02:03:04,/usr/local/bin/slirp"
endmenu
diff --git a/arch/um/drivers/harddog_kern.c b/arch/um/drivers/harddog_kern.c
index 6d381279b362..000cb69ba0bc 100644
--- a/arch/um/drivers/harddog_kern.c
+++ b/arch/um/drivers/harddog_kern.c
@@ -85,7 +85,7 @@ static int harddog_open(struct inode *inode, struct file *file)
timer_alive = 1;
spin_unlock(&lock);
mutex_unlock(&harddog_mutex);
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
err:
spin_unlock(&lock);
mutex_unlock(&harddog_mutex);
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index aca09be2373e..33c1cd6a12ac 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -276,14 +276,14 @@ static int ubd_setup_common(char *str, int *index_out, char **error_out)
str++;
if(!strcmp(str, "sync")){
global_openflags = of_sync(global_openflags);
- goto out1;
+ return err;
}
err = -EINVAL;
major = simple_strtoul(str, &end, 0);
if((*end != '\0') || (end == str)){
*error_out = "Didn't parse major number";
- goto out1;
+ return err;
}
mutex_lock(&ubd_lock);
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
index 596e7056f376..e190e4ca52e1 100644
--- a/arch/um/drivers/vector_kern.c
+++ b/arch/um/drivers/vector_kern.c
@@ -1043,7 +1043,7 @@ static int vector_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
vector_send(vp->tx_queue);
return NETDEV_TX_OK;
}
- if (skb->xmit_more) {
+ if (netdev_xmit_more()) {
mod_timer(&vp->tl, vp->coalesce);
return NETDEV_TX_OK;
}
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 00bcbe2326d9..b506ad06aefc 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -16,6 +16,7 @@ generic-y += irq_work.h
generic-y += kdebug.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += param.h
generic-y += pci.h
generic-y += percpu.h
diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index fca34b2177e2..9f4b4bb78120 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -22,7 +22,6 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
}
extern void arch_exit_mmap(struct mm_struct *mm);
static inline void arch_unmap(struct mm_struct *mm,
- struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
}
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index 9c04562310b3..b377df76cc28 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -263,7 +263,12 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval)
*pteptr = pte_mknewpage(*pteptr);
if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
}
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *pteptr, pte_t pteval)
+{
+ set_pte(pteptr, pteval);
+}
#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t pte_a, pte_t pte_b)
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index dce6db147f24..70ee60383900 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -2,162 +2,8 @@
#ifndef __UM_TLB_H
#define __UM_TLB_H
-#include <linux/pagemap.h>
-#include <linux/swap.h>
-#include <asm/percpu.h>
-#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
-
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
-/* struct mmu_gather is an opaque type used by the mm code for passing around
- * any data needed by arch specific code for tlb_remove_page.
- */
-struct mmu_gather {
- struct mm_struct *mm;
- unsigned int need_flush; /* Really unmapped some ptes? */
- unsigned long start;
- unsigned long end;
- unsigned int fullmm; /* non-zero means full mm flush */
-};
-
-static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
- unsigned long address)
-{
- if (tlb->start > address)
- tlb->start = address;
- if (tlb->end < address + PAGE_SIZE)
- tlb->end = address + PAGE_SIZE;
-}
-
-static inline void init_tlb_gather(struct mmu_gather *tlb)
-{
- tlb->need_flush = 0;
-
- tlb->start = TASK_SIZE;
- tlb->end = 0;
-
- if (tlb->fullmm) {
- tlb->start = 0;
- tlb->end = TASK_SIZE;
- }
-}
-
-static inline void
-arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- tlb->mm = mm;
- tlb->start = start;
- tlb->end = end;
- tlb->fullmm = !(start | (end+1));
-
- init_tlb_gather(tlb);
-}
-
-extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
- unsigned long end);
-
-static inline void
-tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
-{
- flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
-}
-
-static inline void
-tlb_flush_mmu_free(struct mmu_gather *tlb)
-{
- init_tlb_gather(tlb);
-}
-
-static inline void
-tlb_flush_mmu(struct mmu_gather *tlb)
-{
- if (!tlb->need_flush)
- return;
-
- tlb_flush_mmu_tlbonly(tlb);
- tlb_flush_mmu_free(tlb);
-}
-
-/* arch_tlb_finish_mmu
- * Called at the end of the shootdown operation to free up any resources
- * that were required.
- */
-static inline void
-arch_tlb_finish_mmu(struct mmu_gather *tlb,
- unsigned long start, unsigned long end, bool force)
-{
- if (force) {
- tlb->start = start;
- tlb->end = end;
- tlb->need_flush = 1;
- }
- tlb_flush_mmu(tlb);
-
- /* keep the page table cache within bounds */
- check_pgt_cache();
-}
-
-/* tlb_remove_page
- * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
- * while handling the additional races in SMP caused by other CPUs
- * caching valid mappings in their TLBs.
- */
-static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- tlb->need_flush = 1;
- free_page_and_swap_cache(page);
- return false; /* avoid calling tlb_flush_mmu */
-}
-
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
- __tlb_remove_page(tlb, page);
-}
-
-static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
-{
- return __tlb_remove_page(tlb, page);
-}
-
-static inline void tlb_remove_page_size(struct mmu_gather *tlb,
- struct page *page, int page_size)
-{
- return tlb_remove_page(tlb, page);
-}
-
-/**
- * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
- *
- * Record the fact that pte's were really umapped in ->need_flush, so we can
- * later optimise away the tlb invalidate. This helps when userspace is
- * unmapping already-unmapped pages, which happens quite a lot.
- */
-#define tlb_remove_tlb_entry(tlb, ptep, address) \
- do { \
- tlb->need_flush = 1; \
- __tlb_remove_tlb_entry(tlb, ptep, address); \
- } while (0)
-
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
- tlb_remove_tlb_entry(tlb, ptep, address)
-
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
- unsigned int page_size)
-{
-}
-
-#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
-
-#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
-
-#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
-
-#define tlb_migrate_finish(mm) do {} while (0)
+#include <asm-generic/cacheflush.h>
+#include <asm-generic/tlb.h>
#endif
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index f4874b7ec503..598d7b3d9355 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -479,7 +479,7 @@ void __init init_IRQ(void)
irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
- for (i = 1; i < NR_IRQS; i++)
+ for (i = 1; i < LAST_IRQ; i++)
irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
/* Initialize EPOLL Loop */
os_setup_epoll();
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 99aa11bf53d1..a9c9a94c096f 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -188,13 +188,6 @@ void free_initmem(void)
{
}
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- free_reserved_area((void *)start, (void *)end, -1, "initrd");
-}
-#endif
-
/* Allocate and free page tables. */
pgd_t *pgd_alloc(struct mm_struct *mm)
diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c
index 7f06fdbc7ee1..bd3cb694322c 100644
--- a/arch/um/kernel/skas/uaccess.c
+++ b/arch/um/kernel/skas/uaccess.c
@@ -59,7 +59,6 @@ static pte_t *maybe_map(unsigned long virt, int is_write)
static int do_op_one_page(unsigned long addr, int len, int is_write,
int (*op)(unsigned long addr, int len, void *arg), void *arg)
{
- jmp_buf buf;
struct page *page;
pte_t *pte;
int n;
diff --git a/arch/um/kernel/stacktrace.c b/arch/um/kernel/stacktrace.c
index ebe7bcf62684..bd95e020d509 100644
--- a/arch/um/kernel/stacktrace.c
+++ b/arch/um/kernel/stacktrace.c
@@ -63,8 +63,6 @@ static const struct stacktrace_ops dump_ops = {
static void __save_stack_trace(struct task_struct *tsk, struct stack_trace *trace)
{
dump_trace(tsk, &dump_ops, trace);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
}
void save_stack_trace(struct stack_trace *trace)
diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c
index 6b995e870d55..05585eef11d9 100644
--- a/arch/um/kernel/sysrq.c
+++ b/arch/um/kernel/sysrq.c
@@ -20,7 +20,7 @@
static void _print_addr(void *data, unsigned long address, int reliable)
{
- pr_info(" [<%08lx>] %s%pF\n", address, reliable ? "" : "? ",
+ pr_info(" [<%08lx>] %s%pS\n", address, reliable ? "" : "? ",
(void *)address);
}
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 052de4c8acb2..0c572a48158e 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -56,7 +56,7 @@ static int itimer_one_shot(struct clock_event_device *evt)
static struct clock_event_device timer_clockevent = {
.name = "posix-timer",
.rating = 250,
- .cpumask = cpu_all_mask,
+ .cpumask = cpu_possible_mask,
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.set_state_shutdown = itimer_shutdown,
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index bf0acb8aad8b..75b10235d369 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -31,29 +31,23 @@ void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
{
- struct uml_pt_regs *r;
+ struct uml_pt_regs r;
int save_errno = errno;
- r = uml_kmalloc(sizeof(struct uml_pt_regs), UM_GFP_ATOMIC);
- if (!r)
- panic("out of memory");
-
- r->is_user = 0;
+ r.is_user = 0;
if (sig == SIGSEGV) {
/* For segfaults, we want the data from the sigcontext. */
- get_regs_from_mc(r, mc);
- GET_FAULTINFO_FROM_MC(r->faultinfo, mc);
+ get_regs_from_mc(&r, mc);
+ GET_FAULTINFO_FROM_MC(r.faultinfo, mc);
}
/* enable signals if sig isn't IRQ signal */
if ((sig != SIGIO) && (sig != SIGWINCH) && (sig != SIGALRM))
unblock_signals();
- (*sig_info[sig])(sig, si, r);
+ (*sig_info[sig])(sig, si, &r);
errno = save_errno;
-
- free(r);
}
/*
@@ -91,17 +85,11 @@ void sig_handler(int sig, struct siginfo *si, mcontext_t *mc)
static void timer_real_alarm_handler(mcontext_t *mc)
{
- struct uml_pt_regs *regs;
-
- regs = uml_kmalloc(sizeof(struct uml_pt_regs), UM_GFP_ATOMIC);
- if (!regs)
- panic("out of memory");
+ struct uml_pt_regs regs;
if (mc != NULL)
- get_regs_from_mc(regs, mc);
- timer_handler(SIGALRM, NULL, regs);
-
- free(regs);
+ get_regs_from_mc(&regs, mc);
+ timer_handler(SIGALRM, NULL, &regs);
}
void timer_alarm_handler(int sig, struct siginfo *unused_si, mcontext_t *mc)
diff --git a/arch/um/os-Linux/umid.c b/arch/um/os-Linux/umid.c
index 998fbb445458..e261656fe9d7 100644
--- a/arch/um/os-Linux/umid.c
+++ b/arch/um/os-Linux/umid.c
@@ -135,12 +135,18 @@ out:
*/
static inline int is_umdir_used(char *dir)
{
- char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")];
- char pid[sizeof("nnnnn\0")], *end;
+ char pid[sizeof("nnnnn\0")], *end, *file;
int dead, fd, p, n, err;
+ size_t filelen;
- n = snprintf(file, sizeof(file), "%s/pid", dir);
- if (n >= sizeof(file)) {
+ err = asprintf(&file, "%s/pid", dir);
+ if (err < 0)
+ return 0;
+
+ filelen = strlen(file);
+
+ n = snprintf(file, filelen, "%s/pid", dir);
+ if (n >= filelen) {
printk(UM_KERN_ERR "is_umdir_used - pid filename too long\n");
err = -E2BIG;
goto out;
@@ -185,6 +191,7 @@ static inline int is_umdir_used(char *dir)
out_close:
close(fd);
out:
+ free(file);
return 0;
}
@@ -210,18 +217,21 @@ static int umdir_take_if_dead(char *dir)
static void __init create_pid_file(void)
{
- char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")];
- char pid[sizeof("nnnnn\0")];
+ char pid[sizeof("nnnnn\0")], *file;
int fd, n;
- if (umid_file_name("pid", file, sizeof(file)))
+ file = malloc(strlen(uml_dir) + UMID_LEN + sizeof("/pid\0"));
+ if (!file)
return;
+ if (umid_file_name("pid", file, sizeof(file)))
+ goto out;
+
fd = open(file, O_RDWR | O_CREAT | O_EXCL, 0644);
if (fd < 0) {
printk(UM_KERN_ERR "Open of machine pid file \"%s\" failed: "
"%s\n", file, strerror(errno));
- return;
+ goto out;
}
snprintf(pid, sizeof(pid), "%d\n", getpid());
@@ -231,6 +241,8 @@ static void __init create_pid_file(void)
errno);
close(fd);
+out:
+ free(file);
}
int __init set_umid(char *name)
@@ -385,13 +397,19 @@ __uml_setup("uml_dir=", set_uml_dir,
static void remove_umid_dir(void)
{
- char dir[strlen(uml_dir) + UMID_LEN + 1], err;
+ char *dir, err;
+
+ dir = malloc(strlen(uml_dir) + UMID_LEN + 1);
+ if (!dir)
+ return;
sprintf(dir, "%s%s", uml_dir, umid);
err = remove_files_and_dir(dir);
if (err)
os_warn("%s - remove_files_and_dir failed with err = %d\n",
__func__, err);
+
+ free(dir);
}
__uml_exitcall(remove_umid_dir);
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index 817d82608712..41fe944005f8 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -3,6 +3,7 @@ config UNICORE32
def_bool y
select ARCH_32BIT_OFF_T
select ARCH_HAS_DEVMEM_IS_ALLOWED
+ select ARCH_HAS_KEEPINITRD
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select HAVE_KERNEL_GZIP
@@ -20,6 +21,7 @@ config UNICORE32
select GENERIC_IOMAP
select MODULES_USE_ELF_REL
select NEED_DMA_MAP_STATE
+ select MMU_GATHER_NO_RANGE if MMU
help
UniCore-32 is 32-bit Instruction Set Architecture,
including a series of low-power-consumption RISC chip
@@ -38,12 +40,6 @@ config STACKTRACE_SUPPORT
config LOCKDEP_SUPPORT
def_bool y
-config RWSEM_GENERIC_SPINLOCK
- def_bool y
-
-config RWSEM_XCHGADD_ALGORITHM
- bool
-
config ARCH_HAS_ILOG2_U32
bool
@@ -195,7 +191,6 @@ config I2C_EEPROM_AT24
config LCD_BACKLIGHT
tristate "LCD Backlight support"
- select BACKLIGHT_LCD_SUPPORT
select BACKLIGHT_PWM
endmenu
diff --git a/arch/unicore32/configs/unicore32_defconfig b/arch/unicore32/configs/unicore32_defconfig
index aebd01fc28e5..360cc9abcdb0 100644
--- a/arch/unicore32/configs/unicore32_defconfig
+++ b/arch/unicore32/configs/unicore32_defconfig
@@ -119,7 +119,7 @@ CONFIG_I2C_PUV3=y
# Hardware Monitoring support
#CONFIG_SENSORS_LM75=m
# Generic Thermal sysfs driver
-#CONFIG_THERMAL=m
+#CONFIG_THERMAL=y
#CONFIG_THERMAL_HWMON=y
# Multimedia support
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index d77d953c04c1..5fe2426bb7a5 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -22,15 +22,14 @@ generic-y += kvm_para.h
generic-y += local.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += module.h
generic-y += parport.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += sections.h
-generic-y += segment.h
generic-y += serial.h
generic-y += shmparam.h
-generic-y += sizes.h
generic-y += syscalls.h
generic-y += topology.h
generic-y += trace_clock.h
diff --git a/arch/unicore32/include/asm/elf.h b/arch/unicore32/include/asm/elf.h
index 829042d07722..ae66dc1be49e 100644
--- a/arch/unicore32/include/asm/elf.h
+++ b/arch/unicore32/include/asm/elf.h
@@ -19,6 +19,7 @@
* ELF register definitions..
*/
#include <asm/ptrace.h>
+#include <linux/elf-em.h>
typedef unsigned long elf_greg_t;
typedef unsigned long elf_freg_t[3];
@@ -28,8 +29,6 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct fp_state elf_fpregset_t;
-#define EM_UNICORE 110
-
#define R_UNICORE_NONE 0
#define R_UNICORE_PC24 1
#define R_UNICORE_ABS32 2
diff --git a/arch/unicore32/include/asm/memory.h b/arch/unicore32/include/asm/memory.h
index 66bb9f6525c0..46cf27efbb7e 100644
--- a/arch/unicore32/include/asm/memory.h
+++ b/arch/unicore32/include/asm/memory.h
@@ -16,7 +16,7 @@
#include <linux/compiler.h>
#include <linux/const.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <mach/memory.h>
/*
diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h
index 5c205a9cb5a6..9f06ea5466dd 100644
--- a/arch/unicore32/include/asm/mmu_context.h
+++ b/arch/unicore32/include/asm/mmu_context.h
@@ -88,7 +88,6 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm,
}
static inline void arch_unmap(struct mm_struct *mm,
- struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
}
diff --git a/arch/unicore32/include/asm/syscall.h b/arch/unicore32/include/asm/syscall.h
new file mode 100644
index 000000000000..607961797fff
--- /dev/null
+++ b/arch/unicore32/include/asm/syscall.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_UNICORE_SYSCALL_H
+#define _ASM_UNICORE_SYSCALL_H
+
+#include <uapi/linux/audit.h>
+
+static inline int syscall_get_arch(struct task_struct *task)
+{
+ return AUDIT_ARCH_UNICORE;
+}
+
+#endif /* _ASM_UNICORE_SYSCALL_H */
diff --git a/arch/unicore32/include/asm/tlb.h b/arch/unicore32/include/asm/tlb.h
index 9cca15cdae94..00a8477333f6 100644
--- a/arch/unicore32/include/asm/tlb.h
+++ b/arch/unicore32/include/asm/tlb.h
@@ -12,10 +12,9 @@
#ifndef __UNICORE_TLB_H__
#define __UNICORE_TLB_H__
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+/*
+ * unicore32 lacks an efficient flush_tlb_range(), use flush_tlb_mm().
+ */
#define __pte_free_tlb(tlb, pte, addr) \
do { \
diff --git a/arch/unicore32/kernel/stacktrace.c b/arch/unicore32/kernel/stacktrace.c
index 9976e767d51c..e37da8c6837b 100644
--- a/arch/unicore32/kernel/stacktrace.c
+++ b/arch/unicore32/kernel/stacktrace.c
@@ -120,8 +120,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
}
walk_stackframe(&frame, save_trace, &data);
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
}
void save_stack_trace(struct stack_trace *trace)
diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c
index 74b6a2e29809..c994cdf14119 100644
--- a/arch/unicore32/mm/init.c
+++ b/arch/unicore32/mm/init.c
@@ -23,7 +23,7 @@
#include <asm/sections.h>
#include <asm/setup.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/tlb.h>
#include <asm/memblock.h>
#include <mach/map.h>
@@ -287,27 +287,3 @@ void __init mem_init(void)
sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
}
}
-
-void free_initmem(void)
-{
- free_initmem_default(-1);
-}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-
-static int keep_initrd;
-
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- if (!keep_initrd)
- free_reserved_area((void *)start, (void *)end, -1, "initrd");
-}
-
-static int __init keepinitrd_setup(char *__unused)
-{
- keep_initrd = 1;
- return 1;
-}
-
-__setup("keepinitrd", keepinitrd_setup);
-#endif
diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c
index bf012b2b71a9..b69cb18ce8b1 100644
--- a/arch/unicore32/mm/ioremap.c
+++ b/arch/unicore32/mm/ioremap.c
@@ -34,7 +34,7 @@
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <mach/map.h>
#include "mm.h"
diff --git a/arch/unicore32/mm/mmu.c b/arch/unicore32/mm/mmu.c
index aa2060beb408..f0ae623b305f 100644
--- a/arch/unicore32/mm/mmu.c
+++ b/arch/unicore32/mm/mmu.c
@@ -22,7 +22,7 @@
#include <asm/cputype.h>
#include <asm/sections.h>
#include <asm/setup.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <asm/tlb.h>
#include <asm/memblock.h>
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 62fc3fda1a05..2bbbd4d1ba31 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -14,6 +14,7 @@ config X86_32
select ARCH_WANT_IPC_PARSE_VERSION
select CLKSRC_I8253
select CLONE_BACKWARDS
+ select HAVE_DEBUG_STACKOVERFLOW
select MODULES_USE_ELF_REL
select OLD_SIGACTION
@@ -21,16 +22,26 @@ config X86_64
def_bool y
depends on 64BIT
# Options that are inherently 64-bit kernel only:
- select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
+ select ARCH_HAS_GIGANTIC_PAGE
select ARCH_SUPPORTS_INT128
select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_ARCH_SOFT_DIRTY
select MODULES_USE_ELF_RELA
select NEED_DMA_MAP_STATE
select SWIOTLB
- select X86_DEV_DMA_OPS
select ARCH_HAS_SYSCALL_WRAPPER
+config FORCE_DYNAMIC_FTRACE
+ def_bool y
+ depends on X86_32
+ depends on FUNCTION_TRACER
+ select DYNAMIC_FTRACE
+ help
+ We keep the static function tracing (!DYNAMIC_FTRACE) around
+ in order to test the non static function tracing in the
+ generic code, as other architectures still use it. But we
+ only need to keep it around for x86_64. No need to keep it
+ for x86_32. For x86_32, force DYNAMIC_FTRACE.
#
# Arch settings
#
@@ -44,11 +55,9 @@ config X86
#
select ACPI_LEGACY_TABLES_LOOKUP if ACPI
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
- select ANON_INODES
select ARCH_32BIT_OFF_T if X86_32
select ARCH_CLOCKSOURCE_DATA
select ARCH_CLOCKSOURCE_INIT
- select ARCH_DISCARD_MEMBLOCK
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED
@@ -65,6 +74,7 @@ config X86
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
select ARCH_HAS_UACCESS_MCSAFE if X86_64 && X86_MCE
select ARCH_HAS_SET_MEMORY
+ select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
select ARCH_HAS_SYNC_CORE_BEFORE_USERMODE
@@ -74,6 +84,7 @@ config X86
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
+ select ARCH_STACKWALK
select ARCH_SUPPORTS_ACPI
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
@@ -138,7 +149,6 @@ config X86
select HAVE_COPY_THREAD_TLS
select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_KMEMLEAK
- select HAVE_DEBUG_STACKOVERFLOW
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
@@ -183,7 +193,6 @@ config X86
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE if PARAVIRT
- select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
select HAVE_FUNCTION_ARG_ACCESS_API
@@ -261,16 +270,10 @@ config GENERIC_BUG
config GENERIC_BUG_RELATIVE_POINTERS
bool
-config GENERIC_HWEIGHT
- def_bool y
-
config ARCH_MAY_HAVE_PC_FDC
def_bool y
depends on ISA_DMA_API
-config RWSEM_XCHGADD_ALGORITHM
- def_bool y
-
config GENERIC_CALIBRATE_DELAY
def_bool y
@@ -310,9 +313,6 @@ config ZONE_DMA32
config AUDIT_ARCH
def_bool y if X86_64
-config ARCH_SUPPORTS_OPTIMIZED_INLINING
- def_bool y
-
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y
@@ -703,8 +703,6 @@ config STA2X11
bool "STA2X11 Companion Chip Support"
depends on X86_32_NON_STANDARD && PCI
select ARCH_HAS_PHYS_TO_DMA
- select X86_DEV_DMA_OPS
- select X86_DMA_REMAP
select SWIOTLB
select MFD_STA2X11
select GPIOLIB
@@ -783,14 +781,6 @@ config PARAVIRT_SPINLOCKS
If you are unsure how to answer this question, answer Y.
-config QUEUED_LOCK_STAT
- bool "Paravirt queued spinlock statistics"
- depends on PARAVIRT_SPINLOCKS && DEBUG_FS
- ---help---
- Enable the collection of statistical data on the slowpath
- behavior of paravirtualized queued spinlocks and report
- them on debugfs.
-
source "arch/x86/xen/Kconfig"
config KVM_GUEST
@@ -1330,8 +1320,16 @@ config MICROCODE_AMD
processors will be enabled.
config MICROCODE_OLD_INTERFACE
- def_bool y
+ bool "Ancient loading interface (DEPRECATED)"
+ default n
depends on MICROCODE
+ ---help---
+ DO NOT USE THIS! This is the ancient /dev/cpu/microcode interface
+ which was used by userspace tools like iucode_tool and microcode.ctl.
+ It is inadequate because it runs too late to be able to properly
+ load microcode on a machine and it needs special tools. Instead, you
+ should've switched to the early loading method with the initrd or
+ builtin microcode by now: Documentation/x86/microcode.txt
config X86_MSR
tristate "/dev/cpu/*/msr - Model-specific register support"
@@ -1606,12 +1604,9 @@ config ARCH_FLATMEM_ENABLE
depends on X86_32 && !NUMA
config ARCH_DISCONTIGMEM_ENABLE
- def_bool y
- depends on NUMA && X86_32
-
-config ARCH_DISCONTIGMEM_DEFAULT
- def_bool y
+ def_bool n
depends on NUMA && X86_32
+ depends on BROKEN
config ARCH_SPARSEMEM_ENABLE
def_bool y
@@ -1620,8 +1615,7 @@ config ARCH_SPARSEMEM_ENABLE
select SPARSEMEM_VMEMMAP_ENABLE if X86_64
config ARCH_SPARSEMEM_DEFAULT
- def_bool y
- depends on X86_64
+ def_bool X86_64 || (NUMA && X86_32)
config ARCH_SELECT_MEMORY_MODEL
def_bool y
@@ -2878,11 +2872,6 @@ config HAVE_ATOMIC_IOMAP
config X86_DEV_DMA_OPS
bool
- depends on X86_64 || STA2X11
-
-config X86_DMA_REMAP
- bool
- depends on STA2X11
config HAVE_GENERIC_GUP
def_bool y
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 15d0fbe27872..f730680dc818 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -266,20 +266,6 @@ config CPA_DEBUG
---help---
Do change_page_attr() self-tests every 30 seconds.
-config OPTIMIZE_INLINING
- bool "Allow gcc to uninline functions marked 'inline'"
- ---help---
- This option determines if the kernel forces gcc to inline the functions
- developers have marked 'inline'. Doing so takes away freedom from gcc to
- do what it thinks is best, which is desirable for the gcc 3.x series of
- compilers. The gcc 4.x series have a rewritten inlining algorithm and
- enabling this option will generate a smaller kernel there. Hopefully
- this algorithm is so good that allowing gcc 4.x and above to make the
- decision will become the default in the future. Until then this option
- is there to test gcc for this.
-
- If unsure, say N.
-
config DEBUG_ENTRY
bool "Debug low-level entry code"
depends on DEBUG_KERNEL
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index a587805c6687..56e748a7679f 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -47,7 +47,7 @@ export REALMODE_CFLAGS
export BITS
ifdef CONFIG_X86_NEED_RELOCS
- LDFLAGS_vmlinux := --emit-relocs
+ LDFLAGS_vmlinux := --emit-relocs --discard-none
endif
#
diff --git a/arch/x86/boot/compressed/acpi.c b/arch/x86/boot/compressed/acpi.c
index 0ef4ad55b29b..ad84239e595e 100644
--- a/arch/x86/boot/compressed/acpi.c
+++ b/arch/x86/boot/compressed/acpi.c
@@ -276,7 +276,7 @@ static unsigned long get_acpi_srat_table(void)
if (acpi_table) {
header = (struct acpi_table_header *)acpi_table;
- if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_SRAT))
+ if (ACPI_COMPARE_NAMESEG(header->signature, ACPI_SIG_SRAT))
return acpi_table;
}
entry += size;
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 9f908112bbb9..2b2481acc661 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -25,18 +25,6 @@ CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_OSF_PARTITION=y
-CONFIG_AMIGA_PARTITION=y
-CONFIG_MAC_PARTITION=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_MINIX_SUBPARTITION=y
-CONFIG_SOLARIS_X86_PARTITION=y
-CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_SGI_PARTITION=y
-CONFIG_SUN_PARTITION=y
-CONFIG_KARMA_PARTITION=y
-CONFIG_EFI_PARTITION=y
CONFIG_SMP=y
CONFIG_X86_GENERIC=y
CONFIG_HPET_TIMER=y
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 1d3badfda09e..e8829abf063a 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -24,18 +24,6 @@ CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_OSF_PARTITION=y
-CONFIG_AMIGA_PARTITION=y
-CONFIG_MAC_PARTITION=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_MINIX_SUBPARTITION=y
-CONFIG_SOLARIS_X86_PARTITION=y
-CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_SGI_PARTITION=y
-CONFIG_SUN_PARTITION=y
-CONFIG_KARMA_PARTITION=y
-CONFIG_EFI_PARTITION=y
CONFIG_SMP=y
CONFIG_CALGARY_IOMMU=y
CONFIG_NR_CPUS=64
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
index 3ea71b871813..bdeee1b830be 100644
--- a/arch/x86/crypto/aegis128-aesni-glue.c
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
@@ -11,8 +11,8 @@
* any later version.
*/
-#include <crypto/cryptd.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
@@ -242,131 +242,35 @@ static void crypto_aegis128_aesni_exit_tfm(struct crypto_aead *aead)
{
}
-static int cryptd_aegis128_aesni_setkey(struct crypto_aead *aead,
- const u8 *key, unsigned int keylen)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
-}
-
-static int cryptd_aegis128_aesni_setauthsize(struct crypto_aead *aead,
- unsigned int authsize)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
-}
-
-static int cryptd_aegis128_aesni_encrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- aead = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- aead = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, aead);
-
- return crypto_aead_encrypt(req);
-}
-
-static int cryptd_aegis128_aesni_decrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- aead = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- aead = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, aead);
-
- return crypto_aead_decrypt(req);
-}
-
-static int cryptd_aegis128_aesni_init_tfm(struct crypto_aead *aead)
-{
- struct cryptd_aead *cryptd_tfm;
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_tfm = cryptd_alloc_aead("__aegis128-aesni", CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(cryptd_tfm))
- return PTR_ERR(cryptd_tfm);
-
- *ctx = cryptd_tfm;
- crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
- return 0;
-}
-
-static void cryptd_aegis128_aesni_exit_tfm(struct crypto_aead *aead)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_free_aead(*ctx);
-}
-
-static struct aead_alg crypto_aegis128_aesni_alg[] = {
- {
- .setkey = crypto_aegis128_aesni_setkey,
- .setauthsize = crypto_aegis128_aesni_setauthsize,
- .encrypt = crypto_aegis128_aesni_encrypt,
- .decrypt = crypto_aegis128_aesni_decrypt,
- .init = crypto_aegis128_aesni_init_tfm,
- .exit = crypto_aegis128_aesni_exit_tfm,
-
- .ivsize = AEGIS128_NONCE_SIZE,
- .maxauthsize = AEGIS128_MAX_AUTH_SIZE,
- .chunksize = AEGIS128_BLOCK_SIZE,
-
- .base = {
- .cra_flags = CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct aegis_ctx) +
- __alignof__(struct aegis_ctx),
- .cra_alignmask = 0,
-
- .cra_name = "__aegis128",
- .cra_driver_name = "__aegis128-aesni",
-
- .cra_module = THIS_MODULE,
- }
- }, {
- .setkey = cryptd_aegis128_aesni_setkey,
- .setauthsize = cryptd_aegis128_aesni_setauthsize,
- .encrypt = cryptd_aegis128_aesni_encrypt,
- .decrypt = cryptd_aegis128_aesni_decrypt,
- .init = cryptd_aegis128_aesni_init_tfm,
- .exit = cryptd_aegis128_aesni_exit_tfm,
-
- .ivsize = AEGIS128_NONCE_SIZE,
- .maxauthsize = AEGIS128_MAX_AUTH_SIZE,
- .chunksize = AEGIS128_BLOCK_SIZE,
-
- .base = {
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct cryptd_aead *),
- .cra_alignmask = 0,
-
- .cra_priority = 400,
-
- .cra_name = "aegis128",
- .cra_driver_name = "aegis128-aesni",
-
- .cra_module = THIS_MODULE,
- }
+static struct aead_alg crypto_aegis128_aesni_alg = {
+ .setkey = crypto_aegis128_aesni_setkey,
+ .setauthsize = crypto_aegis128_aesni_setauthsize,
+ .encrypt = crypto_aegis128_aesni_encrypt,
+ .decrypt = crypto_aegis128_aesni_decrypt,
+ .init = crypto_aegis128_aesni_init_tfm,
+ .exit = crypto_aegis128_aesni_exit_tfm,
+
+ .ivsize = AEGIS128_NONCE_SIZE,
+ .maxauthsize = AEGIS128_MAX_AUTH_SIZE,
+ .chunksize = AEGIS128_BLOCK_SIZE,
+
+ .base = {
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aegis_ctx) +
+ __alignof__(struct aegis_ctx),
+ .cra_alignmask = 0,
+ .cra_priority = 400,
+
+ .cra_name = "__aegis128",
+ .cra_driver_name = "__aegis128-aesni",
+
+ .cra_module = THIS_MODULE,
}
};
+static struct simd_aead_alg *simd_alg;
+
static int __init crypto_aegis128_aesni_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
@@ -374,14 +278,13 @@ static int __init crypto_aegis128_aesni_module_init(void)
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
- return crypto_register_aeads(crypto_aegis128_aesni_alg,
- ARRAY_SIZE(crypto_aegis128_aesni_alg));
+ return simd_register_aeads_compat(&crypto_aegis128_aesni_alg, 1,
+ &simd_alg);
}
static void __exit crypto_aegis128_aesni_module_exit(void)
{
- crypto_unregister_aeads(crypto_aegis128_aesni_alg,
- ARRAY_SIZE(crypto_aegis128_aesni_alg));
+ simd_unregister_aeads(&crypto_aegis128_aesni_alg, 1, &simd_alg);
}
module_init(crypto_aegis128_aesni_module_init);
diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c
index 1b1b39c66c5e..80d917f7e467 100644
--- a/arch/x86/crypto/aegis128l-aesni-glue.c
+++ b/arch/x86/crypto/aegis128l-aesni-glue.c
@@ -11,8 +11,8 @@
* any later version.
*/
-#include <crypto/cryptd.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
@@ -242,131 +242,35 @@ static void crypto_aegis128l_aesni_exit_tfm(struct crypto_aead *aead)
{
}
-static int cryptd_aegis128l_aesni_setkey(struct crypto_aead *aead,
- const u8 *key, unsigned int keylen)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
-}
-
-static int cryptd_aegis128l_aesni_setauthsize(struct crypto_aead *aead,
- unsigned int authsize)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
-}
-
-static int cryptd_aegis128l_aesni_encrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- aead = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- aead = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, aead);
-
- return crypto_aead_encrypt(req);
-}
-
-static int cryptd_aegis128l_aesni_decrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- aead = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- aead = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, aead);
-
- return crypto_aead_decrypt(req);
-}
-
-static int cryptd_aegis128l_aesni_init_tfm(struct crypto_aead *aead)
-{
- struct cryptd_aead *cryptd_tfm;
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_tfm = cryptd_alloc_aead("__aegis128l-aesni", CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(cryptd_tfm))
- return PTR_ERR(cryptd_tfm);
-
- *ctx = cryptd_tfm;
- crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
- return 0;
-}
-
-static void cryptd_aegis128l_aesni_exit_tfm(struct crypto_aead *aead)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_free_aead(*ctx);
-}
-
-static struct aead_alg crypto_aegis128l_aesni_alg[] = {
- {
- .setkey = crypto_aegis128l_aesni_setkey,
- .setauthsize = crypto_aegis128l_aesni_setauthsize,
- .encrypt = crypto_aegis128l_aesni_encrypt,
- .decrypt = crypto_aegis128l_aesni_decrypt,
- .init = crypto_aegis128l_aesni_init_tfm,
- .exit = crypto_aegis128l_aesni_exit_tfm,
-
- .ivsize = AEGIS128L_NONCE_SIZE,
- .maxauthsize = AEGIS128L_MAX_AUTH_SIZE,
- .chunksize = AEGIS128L_BLOCK_SIZE,
-
- .base = {
- .cra_flags = CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct aegis_ctx) +
- __alignof__(struct aegis_ctx),
- .cra_alignmask = 0,
-
- .cra_name = "__aegis128l",
- .cra_driver_name = "__aegis128l-aesni",
-
- .cra_module = THIS_MODULE,
- }
- }, {
- .setkey = cryptd_aegis128l_aesni_setkey,
- .setauthsize = cryptd_aegis128l_aesni_setauthsize,
- .encrypt = cryptd_aegis128l_aesni_encrypt,
- .decrypt = cryptd_aegis128l_aesni_decrypt,
- .init = cryptd_aegis128l_aesni_init_tfm,
- .exit = cryptd_aegis128l_aesni_exit_tfm,
-
- .ivsize = AEGIS128L_NONCE_SIZE,
- .maxauthsize = AEGIS128L_MAX_AUTH_SIZE,
- .chunksize = AEGIS128L_BLOCK_SIZE,
-
- .base = {
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct cryptd_aead *),
- .cra_alignmask = 0,
-
- .cra_priority = 400,
-
- .cra_name = "aegis128l",
- .cra_driver_name = "aegis128l-aesni",
-
- .cra_module = THIS_MODULE,
- }
+static struct aead_alg crypto_aegis128l_aesni_alg = {
+ .setkey = crypto_aegis128l_aesni_setkey,
+ .setauthsize = crypto_aegis128l_aesni_setauthsize,
+ .encrypt = crypto_aegis128l_aesni_encrypt,
+ .decrypt = crypto_aegis128l_aesni_decrypt,
+ .init = crypto_aegis128l_aesni_init_tfm,
+ .exit = crypto_aegis128l_aesni_exit_tfm,
+
+ .ivsize = AEGIS128L_NONCE_SIZE,
+ .maxauthsize = AEGIS128L_MAX_AUTH_SIZE,
+ .chunksize = AEGIS128L_BLOCK_SIZE,
+
+ .base = {
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aegis_ctx) +
+ __alignof__(struct aegis_ctx),
+ .cra_alignmask = 0,
+ .cra_priority = 400,
+
+ .cra_name = "__aegis128l",
+ .cra_driver_name = "__aegis128l-aesni",
+
+ .cra_module = THIS_MODULE,
}
};
+static struct simd_aead_alg *simd_alg;
+
static int __init crypto_aegis128l_aesni_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
@@ -374,14 +278,13 @@ static int __init crypto_aegis128l_aesni_module_init(void)
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
- return crypto_register_aeads(crypto_aegis128l_aesni_alg,
- ARRAY_SIZE(crypto_aegis128l_aesni_alg));
+ return simd_register_aeads_compat(&crypto_aegis128l_aesni_alg, 1,
+ &simd_alg);
}
static void __exit crypto_aegis128l_aesni_module_exit(void)
{
- crypto_unregister_aeads(crypto_aegis128l_aesni_alg,
- ARRAY_SIZE(crypto_aegis128l_aesni_alg));
+ simd_unregister_aeads(&crypto_aegis128l_aesni_alg, 1, &simd_alg);
}
module_init(crypto_aegis128l_aesni_module_init);
diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c
index 6227ca3220a0..716eecb66bd5 100644
--- a/arch/x86/crypto/aegis256-aesni-glue.c
+++ b/arch/x86/crypto/aegis256-aesni-glue.c
@@ -11,8 +11,8 @@
* any later version.
*/
-#include <crypto/cryptd.h>
#include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
@@ -242,131 +242,35 @@ static void crypto_aegis256_aesni_exit_tfm(struct crypto_aead *aead)
{
}
-static int cryptd_aegis256_aesni_setkey(struct crypto_aead *aead,
- const u8 *key, unsigned int keylen)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
-}
-
-static int cryptd_aegis256_aesni_setauthsize(struct crypto_aead *aead,
- unsigned int authsize)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
-}
-
-static int cryptd_aegis256_aesni_encrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- aead = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- aead = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, aead);
-
- return crypto_aead_encrypt(req);
-}
-
-static int cryptd_aegis256_aesni_decrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- aead = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- aead = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, aead);
-
- return crypto_aead_decrypt(req);
-}
-
-static int cryptd_aegis256_aesni_init_tfm(struct crypto_aead *aead)
-{
- struct cryptd_aead *cryptd_tfm;
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_tfm = cryptd_alloc_aead("__aegis256-aesni", CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(cryptd_tfm))
- return PTR_ERR(cryptd_tfm);
-
- *ctx = cryptd_tfm;
- crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
- return 0;
-}
-
-static void cryptd_aegis256_aesni_exit_tfm(struct crypto_aead *aead)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_free_aead(*ctx);
-}
-
-static struct aead_alg crypto_aegis256_aesni_alg[] = {
- {
- .setkey = crypto_aegis256_aesni_setkey,
- .setauthsize = crypto_aegis256_aesni_setauthsize,
- .encrypt = crypto_aegis256_aesni_encrypt,
- .decrypt = crypto_aegis256_aesni_decrypt,
- .init = crypto_aegis256_aesni_init_tfm,
- .exit = crypto_aegis256_aesni_exit_tfm,
-
- .ivsize = AEGIS256_NONCE_SIZE,
- .maxauthsize = AEGIS256_MAX_AUTH_SIZE,
- .chunksize = AEGIS256_BLOCK_SIZE,
-
- .base = {
- .cra_flags = CRYPTO_ALG_INTERNAL,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct aegis_ctx) +
- __alignof__(struct aegis_ctx),
- .cra_alignmask = 0,
-
- .cra_name = "__aegis256",
- .cra_driver_name = "__aegis256-aesni",
-
- .cra_module = THIS_MODULE,
- }
- }, {
- .setkey = cryptd_aegis256_aesni_setkey,
- .setauthsize = cryptd_aegis256_aesni_setauthsize,
- .encrypt = cryptd_aegis256_aesni_encrypt,
- .decrypt = cryptd_aegis256_aesni_decrypt,
- .init = cryptd_aegis256_aesni_init_tfm,
- .exit = cryptd_aegis256_aesni_exit_tfm,
-
- .ivsize = AEGIS256_NONCE_SIZE,
- .maxauthsize = AEGIS256_MAX_AUTH_SIZE,
- .chunksize = AEGIS256_BLOCK_SIZE,
-
- .base = {
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct cryptd_aead *),
- .cra_alignmask = 0,
-
- .cra_priority = 400,
-
- .cra_name = "aegis256",
- .cra_driver_name = "aegis256-aesni",
-
- .cra_module = THIS_MODULE,
- }
+static struct aead_alg crypto_aegis256_aesni_alg = {
+ .setkey = crypto_aegis256_aesni_setkey,
+ .setauthsize = crypto_aegis256_aesni_setauthsize,
+ .encrypt = crypto_aegis256_aesni_encrypt,
+ .decrypt = crypto_aegis256_aesni_decrypt,
+ .init = crypto_aegis256_aesni_init_tfm,
+ .exit = crypto_aegis256_aesni_exit_tfm,
+
+ .ivsize = AEGIS256_NONCE_SIZE,
+ .maxauthsize = AEGIS256_MAX_AUTH_SIZE,
+ .chunksize = AEGIS256_BLOCK_SIZE,
+
+ .base = {
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct aegis_ctx) +
+ __alignof__(struct aegis_ctx),
+ .cra_alignmask = 0,
+ .cra_priority = 400,
+
+ .cra_name = "__aegis256",
+ .cra_driver_name = "__aegis256-aesni",
+
+ .cra_module = THIS_MODULE,
}
};
+static struct simd_aead_alg *simd_alg;
+
static int __init crypto_aegis256_aesni_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
@@ -374,14 +278,13 @@ static int __init crypto_aegis256_aesni_module_init(void)
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
- return crypto_register_aeads(crypto_aegis256_aesni_alg,
- ARRAY_SIZE(crypto_aegis256_aesni_alg));
+ return simd_register_aeads_compat(&crypto_aegis256_aesni_alg, 1,
+ &simd_alg);
}
static void __exit crypto_aegis256_aesni_module_exit(void)
{
- crypto_unregister_aeads(crypto_aegis256_aesni_alg,
- ARRAY_SIZE(crypto_aegis256_aesni_alg));
+ simd_unregister_aeads(&crypto_aegis256_aesni_alg, 1, &simd_alg);
}
module_init(crypto_aegis256_aesni_module_init);
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 1e3d2102033a..21c246799aa5 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -25,14 +25,13 @@
#include <linux/err.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
-#include <crypto/cryptd.h>
#include <crypto/ctr.h>
#include <crypto/b128ops.h>
#include <crypto/gcm.h>
#include <crypto/xts.h>
#include <asm/cpu_device_id.h>
-#include <asm/fpu/api.h>
#include <asm/crypto/aes.h>
+#include <asm/simd.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/simd.h>
@@ -333,7 +332,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
return -EINVAL;
}
- if (!irq_fpu_usable())
+ if (!crypto_simd_usable())
err = crypto_aes_expand_key(ctx, in_key, key_len);
else {
kernel_fpu_begin();
@@ -354,7 +353,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
- if (!irq_fpu_usable())
+ if (!crypto_simd_usable())
crypto_aes_encrypt_x86(ctx, dst, src);
else {
kernel_fpu_begin();
@@ -367,7 +366,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
- if (!irq_fpu_usable())
+ if (!crypto_simd_usable())
crypto_aes_decrypt_x86(ctx, dst, src);
else {
kernel_fpu_begin();
@@ -643,29 +642,6 @@ static int xts_decrypt(struct skcipher_request *req)
aes_ctx(ctx->raw_crypt_ctx));
}
-static int rfc4106_init(struct crypto_aead *aead)
-{
- struct cryptd_aead *cryptd_tfm;
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
- CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(cryptd_tfm))
- return PTR_ERR(cryptd_tfm);
-
- *ctx = cryptd_tfm;
- crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
- return 0;
-}
-
-static void rfc4106_exit(struct crypto_aead *aead)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_free_aead(*ctx);
-}
-
static int
rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
{
@@ -710,15 +686,8 @@ static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
}
-static int gcmaes_wrapper_set_key(struct crypto_aead *parent, const u8 *key,
- unsigned int key_len)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(parent);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
-}
-
+/* This is the Integrity Check Value (aka the authentication tag) length and can
+ * be 8, 12 or 16 bytes long. */
static int common_rfc4106_set_authsize(struct crypto_aead *aead,
unsigned int authsize)
{
@@ -734,17 +703,6 @@ static int common_rfc4106_set_authsize(struct crypto_aead *aead,
return 0;
}
-/* This is the Integrity Check Value (aka the authentication tag length and can
- * be 8, 12 or 16 bytes long. */
-static int gcmaes_wrapper_set_authsize(struct crypto_aead *parent,
- unsigned int authsize)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(parent);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
-}
-
static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
unsigned int authsize)
{
@@ -964,38 +922,6 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
aes_ctx);
}
-
-static int gcmaes_wrapper_encrypt(struct aead_request *req)
-{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- tfm = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- tfm = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, tfm);
-
- return crypto_aead_encrypt(req);
-}
-
-static int gcmaes_wrapper_decrypt(struct aead_request *req)
-{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- tfm = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- tfm = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, tfm);
-
- return crypto_aead_decrypt(req);
-}
#endif
static struct crypto_alg aesni_algs[] = { {
@@ -1148,31 +1074,7 @@ static int generic_gcmaes_decrypt(struct aead_request *req)
aes_ctx);
}
-static int generic_gcmaes_init(struct crypto_aead *aead)
-{
- struct cryptd_aead *cryptd_tfm;
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_tfm = cryptd_alloc_aead("__driver-generic-gcm-aes-aesni",
- CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(cryptd_tfm))
- return PTR_ERR(cryptd_tfm);
-
- *ctx = cryptd_tfm;
- crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
-
- return 0;
-}
-
-static void generic_gcmaes_exit(struct crypto_aead *aead)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_free_aead(*ctx);
-}
-
-static struct aead_alg aesni_aead_algs[] = { {
+static struct aead_alg aesni_aeads[] = { {
.setkey = common_rfc4106_set_key,
.setauthsize = common_rfc4106_set_authsize,
.encrypt = helper_rfc4106_encrypt,
@@ -1180,8 +1082,9 @@ static struct aead_alg aesni_aead_algs[] = { {
.ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = 16,
.base = {
- .cra_name = "__gcm-aes-aesni",
- .cra_driver_name = "__driver-gcm-aes-aesni",
+ .cra_name = "__rfc4106(gcm(aes))",
+ .cra_driver_name = "__rfc4106-gcm-aesni",
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx),
@@ -1189,24 +1092,6 @@ static struct aead_alg aesni_aead_algs[] = { {
.cra_module = THIS_MODULE,
},
}, {
- .init = rfc4106_init,
- .exit = rfc4106_exit,
- .setkey = gcmaes_wrapper_set_key,
- .setauthsize = gcmaes_wrapper_set_authsize,
- .encrypt = gcmaes_wrapper_encrypt,
- .decrypt = gcmaes_wrapper_decrypt,
- .ivsize = GCM_RFC4106_IV_SIZE,
- .maxauthsize = 16,
- .base = {
- .cra_name = "rfc4106(gcm(aes))",
- .cra_driver_name = "rfc4106-gcm-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct cryptd_aead *),
- .cra_module = THIS_MODULE,
- },
-}, {
.setkey = generic_gcmaes_set_key,
.setauthsize = generic_gcmaes_set_authsize,
.encrypt = generic_gcmaes_encrypt,
@@ -1214,38 +1099,21 @@ static struct aead_alg aesni_aead_algs[] = { {
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = 16,
.base = {
- .cra_name = "__generic-gcm-aes-aesni",
- .cra_driver_name = "__driver-generic-gcm-aes-aesni",
- .cra_priority = 0,
+ .cra_name = "__gcm(aes)",
+ .cra_driver_name = "__generic-gcm-aesni",
+ .cra_priority = 400,
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct generic_gcmaes_ctx),
.cra_alignmask = AESNI_ALIGN - 1,
.cra_module = THIS_MODULE,
},
-}, {
- .init = generic_gcmaes_init,
- .exit = generic_gcmaes_exit,
- .setkey = gcmaes_wrapper_set_key,
- .setauthsize = gcmaes_wrapper_set_authsize,
- .encrypt = gcmaes_wrapper_encrypt,
- .decrypt = gcmaes_wrapper_decrypt,
- .ivsize = GCM_AES_IV_SIZE,
- .maxauthsize = 16,
- .base = {
- .cra_name = "gcm(aes)",
- .cra_driver_name = "generic-gcm-aesni",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct cryptd_aead *),
- .cra_module = THIS_MODULE,
- },
} };
#else
-static struct aead_alg aesni_aead_algs[0];
+static struct aead_alg aesni_aeads[0];
#endif
+static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
static const struct x86_cpu_id aesni_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_AES),
@@ -1253,23 +1121,9 @@ static const struct x86_cpu_id aesni_cpu_id[] = {
};
MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
-static void aesni_free_simds(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) &&
- aesni_simd_skciphers[i]; i++)
- simd_skcipher_free(aesni_simd_skciphers[i]);
-}
-
static int __init aesni_init(void)
{
- struct simd_skcipher_alg *simd;
- const char *basename;
- const char *algname;
- const char *drvname;
int err;
- int i;
if (!x86_match_cpu(aesni_cpu_id))
return -ENODEV;
@@ -1304,36 +1158,22 @@ static int __init aesni_init(void)
if (err)
return err;
- err = crypto_register_skciphers(aesni_skciphers,
- ARRAY_SIZE(aesni_skciphers));
+ err = simd_register_skciphers_compat(aesni_skciphers,
+ ARRAY_SIZE(aesni_skciphers),
+ aesni_simd_skciphers);
if (err)
goto unregister_algs;
- err = crypto_register_aeads(aesni_aead_algs,
- ARRAY_SIZE(aesni_aead_algs));
+ err = simd_register_aeads_compat(aesni_aeads, ARRAY_SIZE(aesni_aeads),
+ aesni_simd_aeads);
if (err)
goto unregister_skciphers;
- for (i = 0; i < ARRAY_SIZE(aesni_skciphers); i++) {
- algname = aesni_skciphers[i].base.cra_name + 2;
- drvname = aesni_skciphers[i].base.cra_driver_name + 2;
- basename = aesni_skciphers[i].base.cra_driver_name;
- simd = simd_skcipher_create_compat(algname, drvname, basename);
- err = PTR_ERR(simd);
- if (IS_ERR(simd))
- goto unregister_simds;
-
- aesni_simd_skciphers[i] = simd;
- }
-
return 0;
-unregister_simds:
- aesni_free_simds();
- crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
unregister_skciphers:
- crypto_unregister_skciphers(aesni_skciphers,
- ARRAY_SIZE(aesni_skciphers));
+ simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
+ aesni_simd_skciphers);
unregister_algs:
crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
return err;
@@ -1341,10 +1181,10 @@ unregister_algs:
static void __exit aesni_exit(void)
{
- aesni_free_simds();
- crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
- crypto_unregister_skciphers(aesni_skciphers,
- ARRAY_SIZE(aesni_skciphers));
+ simd_unregister_aeads(aesni_aeads, ARRAY_SIZE(aesni_aeads),
+ aesni_simd_aeads);
+ simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers),
+ aesni_simd_skciphers);
crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
}
diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c
index 45c1c4143176..4967ad620775 100644
--- a/arch/x86/crypto/chacha_glue.c
+++ b/arch/x86/crypto/chacha_glue.c
@@ -12,10 +12,10 @@
#include <crypto/algapi.h>
#include <crypto/chacha.h>
+#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/fpu/api.h>
#include <asm/simd.h>
#define CHACHA_STATE_ALIGN 16
@@ -170,7 +170,7 @@ static int chacha_simd(struct skcipher_request *req)
struct skcipher_walk walk;
int err;
- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable())
+ if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
return crypto_chacha_crypt(req);
err = skcipher_walk_virt(&walk, req, true);
@@ -193,7 +193,7 @@ static int xchacha_simd(struct skcipher_request *req)
u8 real_iv[16];
int err;
- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable())
+ if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
return crypto_xchacha_crypt(req);
err = skcipher_walk_virt(&walk, req, true);
diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
index c8d9cdacbf10..cb4ab6645106 100644
--- a/arch/x86/crypto/crc32-pclmul_glue.c
+++ b/arch/x86/crypto/crc32-pclmul_glue.c
@@ -32,10 +32,11 @@
#include <linux/kernel.h>
#include <linux/crc32.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <asm/cpufeatures.h>
#include <asm/cpu_device_id.h>
-#include <asm/fpu/api.h>
+#include <asm/simd.h>
#define CHKSUM_BLOCK_SIZE 1
#define CHKSUM_DIGEST_SIZE 4
@@ -54,7 +55,7 @@ static u32 __attribute__((pure))
unsigned int iremainder;
unsigned int prealign;
- if (len < PCLMUL_MIN_LEN + SCALE_F_MASK || !irq_fpu_usable())
+ if (len < PCLMUL_MIN_LEN + SCALE_F_MASK || !crypto_simd_usable())
return crc32_le(crc, p, len);
if ((long)p & SCALE_F_MASK) {
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
index 5773e1161072..a58fe217c856 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -29,10 +29,11 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <asm/cpufeatures.h>
#include <asm/cpu_device_id.h>
-#include <asm/fpu/internal.h>
+#include <asm/simd.h>
#define CHKSUM_BLOCK_SIZE 1
#define CHKSUM_DIGEST_SIZE 4
@@ -177,7 +178,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
* use faster PCL version if datasize is large enough to
* overcome kernel fpu state save/restore overhead
*/
- if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
+ if (len >= CRC32C_PCL_BREAKEVEN && crypto_simd_usable()) {
kernel_fpu_begin();
*crcp = crc_pcl(data, len, *crcp);
kernel_fpu_end();
@@ -189,7 +190,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len,
u8 *out)
{
- if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
+ if (len >= CRC32C_PCL_BREAKEVEN && crypto_simd_usable()) {
kernel_fpu_begin();
*(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp));
kernel_fpu_end();
diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
index 0e785c0b2354..3c81e15b0873 100644
--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
+++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
@@ -26,12 +26,13 @@
#include <linux/module.h>
#include <linux/crc-t10dif.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/kernel.h>
-#include <asm/fpu/api.h>
#include <asm/cpufeatures.h>
#include <asm/cpu_device_id.h>
+#include <asm/simd.h>
asmlinkage u16 crc_t10dif_pcl(u16 init_crc, const u8 *buf, size_t len);
@@ -53,7 +54,7 @@ static int chksum_update(struct shash_desc *desc, const u8 *data,
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
- if (length >= 16 && irq_fpu_usable()) {
+ if (length >= 16 && crypto_simd_usable()) {
kernel_fpu_begin();
ctx->crc = crc_t10dif_pcl(ctx->crc, data, length);
kernel_fpu_end();
@@ -70,15 +71,14 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
return 0;
}
-static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
- u8 *out)
+static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
{
- if (len >= 16 && irq_fpu_usable()) {
+ if (len >= 16 && crypto_simd_usable()) {
kernel_fpu_begin();
- *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len);
+ *(__u16 *)out = crc_t10dif_pcl(crc, data, len);
kernel_fpu_end();
} else
- *(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
+ *(__u16 *)out = crc_t10dif_generic(crc, data, len);
return 0;
}
@@ -87,15 +87,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
- return __chksum_finup(&ctx->crc, data, len, out);
+ return __chksum_finup(ctx->crc, data, len, out);
}
static int chksum_digest(struct shash_desc *desc, const u8 *data,
unsigned int length, u8 *out)
{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksum_finup(&ctx->crc, data, length, out);
+ return __chksum_finup(0, data, length, out);
}
static struct shash_alg alg = {
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 3582ae885ee1..e3f3e6fd9d65 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -19,8 +19,9 @@
#include <crypto/cryptd.h>
#include <crypto/gf128mul.h>
#include <crypto/internal/hash.h>
-#include <asm/fpu/api.h>
+#include <crypto/internal/simd.h>
#include <asm/cpu_device_id.h>
+#include <asm/simd.h>
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
@@ -171,7 +172,6 @@ static int ghash_async_init(struct ahash_request *req)
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
desc->tfm = child;
- desc->flags = req->base.flags;
return crypto_shash_init(desc);
}
@@ -182,7 +182,7 @@ static int ghash_async_update(struct ahash_request *req)
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
- if (!irq_fpu_usable() ||
+ if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -200,7 +200,7 @@ static int ghash_async_final(struct ahash_request *req)
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
- if (!irq_fpu_usable() ||
+ if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -241,7 +241,7 @@ static int ghash_async_digest(struct ahash_request *req)
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
- if (!irq_fpu_usable() ||
+ if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
@@ -251,7 +251,6 @@ static int ghash_async_digest(struct ahash_request *req)
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
desc->tfm = child;
- desc->flags = req->base.flags;
return shash_ahash_digest(req, desc);
}
}
diff --git a/arch/x86/crypto/morus1280-avx2-glue.c b/arch/x86/crypto/morus1280-avx2-glue.c
index 6634907d6ccd..679627a2a824 100644
--- a/arch/x86/crypto/morus1280-avx2-glue.c
+++ b/arch/x86/crypto/morus1280-avx2-glue.c
@@ -12,6 +12,7 @@
*/
#include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
#include <crypto/morus1280_glue.h>
#include <linux/module.h>
#include <asm/fpu/api.h>
@@ -35,7 +36,9 @@ asmlinkage void crypto_morus1280_avx2_dec_tail(void *state, const void *src,
asmlinkage void crypto_morus1280_avx2_final(void *state, void *tag_xor,
u64 assoclen, u64 cryptlen);
-MORUS1280_DECLARE_ALGS(avx2, "morus1280-avx2", 400);
+MORUS1280_DECLARE_ALG(avx2, "morus1280-avx2", 400);
+
+static struct simd_aead_alg *simd_alg;
static int __init crypto_morus1280_avx2_module_init(void)
{
@@ -44,14 +47,13 @@ static int __init crypto_morus1280_avx2_module_init(void)
!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
return -ENODEV;
- return crypto_register_aeads(crypto_morus1280_avx2_algs,
- ARRAY_SIZE(crypto_morus1280_avx2_algs));
+ return simd_register_aeads_compat(&crypto_morus1280_avx2_alg, 1,
+ &simd_alg);
}
static void __exit crypto_morus1280_avx2_module_exit(void)
{
- crypto_unregister_aeads(crypto_morus1280_avx2_algs,
- ARRAY_SIZE(crypto_morus1280_avx2_algs));
+ simd_unregister_aeads(&crypto_morus1280_avx2_alg, 1, &simd_alg);
}
module_init(crypto_morus1280_avx2_module_init);
diff --git a/arch/x86/crypto/morus1280-sse2-glue.c b/arch/x86/crypto/morus1280-sse2-glue.c
index f40244eaf14d..c35c0638d0bb 100644
--- a/arch/x86/crypto/morus1280-sse2-glue.c
+++ b/arch/x86/crypto/morus1280-sse2-glue.c
@@ -12,6 +12,7 @@
*/
#include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
#include <crypto/morus1280_glue.h>
#include <linux/module.h>
#include <asm/fpu/api.h>
@@ -35,7 +36,9 @@ asmlinkage void crypto_morus1280_sse2_dec_tail(void *state, const void *src,
asmlinkage void crypto_morus1280_sse2_final(void *state, void *tag_xor,
u64 assoclen, u64 cryptlen);
-MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350);
+MORUS1280_DECLARE_ALG(sse2, "morus1280-sse2", 350);
+
+static struct simd_aead_alg *simd_alg;
static int __init crypto_morus1280_sse2_module_init(void)
{
@@ -43,14 +46,13 @@ static int __init crypto_morus1280_sse2_module_init(void)
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
- return crypto_register_aeads(crypto_morus1280_sse2_algs,
- ARRAY_SIZE(crypto_morus1280_sse2_algs));
+ return simd_register_aeads_compat(&crypto_morus1280_sse2_alg, 1,
+ &simd_alg);
}
static void __exit crypto_morus1280_sse2_module_exit(void)
{
- crypto_unregister_aeads(crypto_morus1280_sse2_algs,
- ARRAY_SIZE(crypto_morus1280_sse2_algs));
+ simd_unregister_aeads(&crypto_morus1280_sse2_alg, 1, &simd_alg);
}
module_init(crypto_morus1280_sse2_module_init);
diff --git a/arch/x86/crypto/morus1280_glue.c b/arch/x86/crypto/morus1280_glue.c
index 7e600f8bcdad..30fc1bd98ec3 100644
--- a/arch/x86/crypto/morus1280_glue.c
+++ b/arch/x86/crypto/morus1280_glue.c
@@ -11,7 +11,6 @@
* any later version.
*/
-#include <crypto/cryptd.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/morus1280_glue.h>
@@ -205,90 +204,6 @@ void crypto_morus1280_glue_init_ops(struct crypto_aead *aead,
}
EXPORT_SYMBOL_GPL(crypto_morus1280_glue_init_ops);
-int cryptd_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key,
- unsigned int keylen)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_setkey);
-
-int cryptd_morus1280_glue_setauthsize(struct crypto_aead *aead,
- unsigned int authsize)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_setauthsize);
-
-int cryptd_morus1280_glue_encrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- aead = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- aead = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, aead);
-
- return crypto_aead_encrypt(req);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_encrypt);
-
-int cryptd_morus1280_glue_decrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- aead = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- aead = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, aead);
-
- return crypto_aead_decrypt(req);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_decrypt);
-
-int cryptd_morus1280_glue_init_tfm(struct crypto_aead *aead)
-{
- struct cryptd_aead *cryptd_tfm;
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- const char *name = crypto_aead_alg(aead)->base.cra_driver_name;
- char internal_name[CRYPTO_MAX_ALG_NAME];
-
- if (snprintf(internal_name, CRYPTO_MAX_ALG_NAME, "__%s", name)
- >= CRYPTO_MAX_ALG_NAME)
- return -ENAMETOOLONG;
-
- cryptd_tfm = cryptd_alloc_aead(internal_name, CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(cryptd_tfm))
- return PTR_ERR(cryptd_tfm);
-
- *ctx = cryptd_tfm;
- crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
- return 0;
-}
-EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_init_tfm);
-
-void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_free_aead(*ctx);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus1280_glue_exit_tfm);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
MODULE_DESCRIPTION("MORUS-1280 AEAD mode -- glue for x86 optimizations");
diff --git a/arch/x86/crypto/morus640-sse2-glue.c b/arch/x86/crypto/morus640-sse2-glue.c
index 9afaf8f8565a..32da56b3bdad 100644
--- a/arch/x86/crypto/morus640-sse2-glue.c
+++ b/arch/x86/crypto/morus640-sse2-glue.c
@@ -12,6 +12,7 @@
*/
#include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
#include <crypto/morus640_glue.h>
#include <linux/module.h>
#include <asm/fpu/api.h>
@@ -35,7 +36,9 @@ asmlinkage void crypto_morus640_sse2_dec_tail(void *state, const void *src,
asmlinkage void crypto_morus640_sse2_final(void *state, void *tag_xor,
u64 assoclen, u64 cryptlen);
-MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400);
+MORUS640_DECLARE_ALG(sse2, "morus640-sse2", 400);
+
+static struct simd_aead_alg *simd_alg;
static int __init crypto_morus640_sse2_module_init(void)
{
@@ -43,14 +46,13 @@ static int __init crypto_morus640_sse2_module_init(void)
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
- return crypto_register_aeads(crypto_morus640_sse2_algs,
- ARRAY_SIZE(crypto_morus640_sse2_algs));
+ return simd_register_aeads_compat(&crypto_morus640_sse2_alg, 1,
+ &simd_alg);
}
static void __exit crypto_morus640_sse2_module_exit(void)
{
- crypto_unregister_aeads(crypto_morus640_sse2_algs,
- ARRAY_SIZE(crypto_morus640_sse2_algs));
+ simd_unregister_aeads(&crypto_morus640_sse2_alg, 1, &simd_alg);
}
module_init(crypto_morus640_sse2_module_init);
diff --git a/arch/x86/crypto/morus640_glue.c b/arch/x86/crypto/morus640_glue.c
index cb3a81732016..1dea33d84426 100644
--- a/arch/x86/crypto/morus640_glue.c
+++ b/arch/x86/crypto/morus640_glue.c
@@ -11,7 +11,6 @@
* any later version.
*/
-#include <crypto/cryptd.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/morus640_glue.h>
@@ -200,90 +199,6 @@ void crypto_morus640_glue_init_ops(struct crypto_aead *aead,
}
EXPORT_SYMBOL_GPL(crypto_morus640_glue_init_ops);
-int cryptd_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key,
- unsigned int keylen)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setkey(&cryptd_tfm->base, key, keylen);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus640_glue_setkey);
-
-int cryptd_morus640_glue_setauthsize(struct crypto_aead *aead,
- unsigned int authsize)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus640_glue_setauthsize);
-
-int cryptd_morus640_glue_encrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- aead = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- aead = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, aead);
-
- return crypto_aead_encrypt(req);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus640_glue_encrypt);
-
-int cryptd_morus640_glue_decrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- struct cryptd_aead *cryptd_tfm = *ctx;
-
- aead = &cryptd_tfm->base;
- if (irq_fpu_usable() && (!in_atomic() ||
- !cryptd_aead_queued(cryptd_tfm)))
- aead = cryptd_aead_child(cryptd_tfm);
-
- aead_request_set_tfm(req, aead);
-
- return crypto_aead_decrypt(req);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus640_glue_decrypt);
-
-int cryptd_morus640_glue_init_tfm(struct crypto_aead *aead)
-{
- struct cryptd_aead *cryptd_tfm;
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
- const char *name = crypto_aead_alg(aead)->base.cra_driver_name;
- char internal_name[CRYPTO_MAX_ALG_NAME];
-
- if (snprintf(internal_name, CRYPTO_MAX_ALG_NAME, "__%s", name)
- >= CRYPTO_MAX_ALG_NAME)
- return -ENAMETOOLONG;
-
- cryptd_tfm = cryptd_alloc_aead(internal_name, CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(cryptd_tfm))
- return PTR_ERR(cryptd_tfm);
-
- *ctx = cryptd_tfm;
- crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
- return 0;
-}
-EXPORT_SYMBOL_GPL(cryptd_morus640_glue_init_tfm);
-
-void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead)
-{
- struct cryptd_aead **ctx = crypto_aead_ctx(aead);
-
- cryptd_free_aead(*ctx);
-}
-EXPORT_SYMBOL_GPL(cryptd_morus640_glue_exit_tfm);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ondrej Mosnacek <omosnacek@gmail.com>");
MODULE_DESCRIPTION("MORUS-640 AEAD mode -- glue for x86 optimizations");
diff --git a/arch/x86/crypto/nhpoly1305-avx2-glue.c b/arch/x86/crypto/nhpoly1305-avx2-glue.c
index 20d815ea4b6a..f7567cbd35b6 100644
--- a/arch/x86/crypto/nhpoly1305-avx2-glue.c
+++ b/arch/x86/crypto/nhpoly1305-avx2-glue.c
@@ -7,9 +7,10 @@
*/
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/nhpoly1305.h>
#include <linux/module.h>
-#include <asm/fpu/api.h>
+#include <asm/simd.h>
asmlinkage void nh_avx2(const u32 *key, const u8 *message, size_t message_len,
u8 hash[NH_HASH_BYTES]);
@@ -24,7 +25,7 @@ static void _nh_avx2(const u32 *key, const u8 *message, size_t message_len,
static int nhpoly1305_avx2_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
- if (srclen < 64 || !irq_fpu_usable())
+ if (srclen < 64 || !crypto_simd_usable())
return crypto_nhpoly1305_update(desc, src, srclen);
do {
diff --git a/arch/x86/crypto/nhpoly1305-sse2-glue.c b/arch/x86/crypto/nhpoly1305-sse2-glue.c
index ed68d164ce14..a661ede3b5cf 100644
--- a/arch/x86/crypto/nhpoly1305-sse2-glue.c
+++ b/arch/x86/crypto/nhpoly1305-sse2-glue.c
@@ -7,9 +7,10 @@
*/
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/nhpoly1305.h>
#include <linux/module.h>
-#include <asm/fpu/api.h>
+#include <asm/simd.h>
asmlinkage void nh_sse2(const u32 *key, const u8 *message, size_t message_len,
u8 hash[NH_HASH_BYTES]);
@@ -24,7 +25,7 @@ static void _nh_sse2(const u32 *key, const u8 *message, size_t message_len,
static int nhpoly1305_sse2_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
- if (srclen < 64 || !irq_fpu_usable())
+ if (srclen < 64 || !crypto_simd_usable())
return crypto_nhpoly1305_update(desc, src, srclen);
do {
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
index 88cc01506c84..6eb65b237b3c 100644
--- a/arch/x86/crypto/poly1305_glue.c
+++ b/arch/x86/crypto/poly1305_glue.c
@@ -11,11 +11,11 @@
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/poly1305.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <asm/fpu/api.h>
#include <asm/simd.h>
struct poly1305_simd_desc_ctx {
@@ -126,7 +126,7 @@ static int poly1305_simd_update(struct shash_desc *desc,
unsigned int bytes;
/* kernel_fpu_begin/end is costly, use fallback for small updates */
- if (srclen <= 288 || !may_use_simd())
+ if (srclen <= 288 || !crypto_simd_usable())
return crypto_poly1305_update(desc, src, srclen);
kernel_fpu_begin();
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index 7391c7de72c7..42f177afc33a 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -22,6 +22,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
@@ -29,7 +30,7 @@
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha1_base.h>
-#include <asm/fpu/api.h>
+#include <asm/simd.h>
typedef void (sha1_transform_fn)(u32 *digest, const char *data,
unsigned int rounds);
@@ -39,7 +40,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
{
struct sha1_state *sctx = shash_desc_ctx(desc);
- if (!irq_fpu_usable() ||
+ if (!crypto_simd_usable() ||
(sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
return crypto_sha1_update(desc, data, len);
@@ -57,7 +58,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
static int sha1_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out, sha1_transform_fn *sha1_xform)
{
- if (!irq_fpu_usable())
+ if (!crypto_simd_usable())
return crypto_sha1_finup(desc, data, len, out);
kernel_fpu_begin();
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index 773a873d2b28..73867da3cbee 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -30,6 +30,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
@@ -37,8 +38,8 @@
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha256_base.h>
-#include <asm/fpu/api.h>
#include <linux/string.h>
+#include <asm/simd.h>
asmlinkage void sha256_transform_ssse3(u32 *digest, const char *data,
u64 rounds);
@@ -49,7 +50,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
{
struct sha256_state *sctx = shash_desc_ctx(desc);
- if (!irq_fpu_usable() ||
+ if (!crypto_simd_usable() ||
(sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
return crypto_sha256_update(desc, data, len);
@@ -67,7 +68,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
static int sha256_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out, sha256_transform_fn *sha256_xform)
{
- if (!irq_fpu_usable())
+ if (!crypto_simd_usable())
return crypto_sha256_finup(desc, data, len, out);
kernel_fpu_begin();
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index f1b811b60ba6..458356a3f124 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -28,16 +28,16 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
+#include <linux/string.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <crypto/sha512_base.h>
-#include <asm/fpu/api.h>
-
-#include <linux/string.h>
+#include <asm/simd.h>
asmlinkage void sha512_transform_ssse3(u64 *digest, const char *data,
u64 rounds);
@@ -49,7 +49,7 @@ static int sha512_update(struct shash_desc *desc, const u8 *data,
{
struct sha512_state *sctx = shash_desc_ctx(desc);
- if (!irq_fpu_usable() ||
+ if (!crypto_simd_usable() ||
(sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
return crypto_sha512_update(desc, data, len);
@@ -67,7 +67,7 @@ static int sha512_update(struct shash_desc *desc, const u8 *data,
static int sha512_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out, sha512_transform_fn *sha512_xform)
{
- if (!irq_fpu_usable())
+ if (!crypto_simd_usable())
return crypto_sha512_finup(desc, data, len, out);
kernel_fpu_begin();
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 7bc105f47d21..a986b3c8294c 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -25,12 +25,14 @@
#include <linux/uprobes.h>
#include <linux/livepatch.h>
#include <linux/syscalls.h>
+#include <linux/uaccess.h>
#include <asm/desc.h>
#include <asm/traps.h>
#include <asm/vdso.h>
-#include <linux/uaccess.h>
#include <asm/cpufeature.h>
+#include <asm/fpu/api.h>
+#include <asm/nospec-branch.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
@@ -196,6 +198,13 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
exit_to_usermode_loop(regs, cached_flags);
+ /* Reload ti->flags; we may have rescheduled above. */
+ cached_flags = READ_ONCE(ti->flags);
+
+ fpregs_assert_state_consistent();
+ if (unlikely(cached_flags & _TIF_NEED_FPU_LOAD))
+ switch_fpu_return();
+
#ifdef CONFIG_COMPAT
/*
* Compat syscalls set TS_COMPAT. Make sure we clear it before
@@ -212,6 +221,8 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
#endif
user_enter_irqoff();
+
+ mds_user_clear_cpu_buffers();
}
#define SYSCALL_EXIT_WORK_FLAGS \
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index d309f30cf7af..7b23431be5cb 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -650,6 +650,7 @@ ENTRY(__switch_to_asm)
pushl %ebx
pushl %edi
pushl %esi
+ pushfl
/* switch stack */
movl %esp, TASK_threadsp(%eax)
@@ -672,6 +673,7 @@ ENTRY(__switch_to_asm)
#endif
/* restore callee-saved registers */
+ popfl
popl %esi
popl %edi
popl %ebx
@@ -766,13 +768,12 @@ END(ret_from_exception)
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
DISABLE_INTERRUPTS(CLBR_ANY)
-.Lneed_resched:
cmpl $0, PER_CPU_VAR(__preempt_count)
jnz restore_all_kernel
testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
jz restore_all_kernel
call preempt_schedule_irq
- jmp .Lneed_resched
+ jmp restore_all_kernel
END(resume_kernel)
#endif
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 1f0efdb7b629..11aa3b2afa4d 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -298,7 +298,7 @@ ENTRY(__switch_to_asm)
#ifdef CONFIG_STACKPROTECTOR
movq TASK_stack_canary(%rsi), %rbx
- movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
+ movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset
#endif
#ifdef CONFIG_RETPOLINE
@@ -430,8 +430,8 @@ END(irq_entries_start)
* it before we actually move ourselves to the IRQ stack.
*/
- movq \old_rsp, PER_CPU_VAR(irq_stack_union + IRQ_STACK_SIZE - 8)
- movq PER_CPU_VAR(irq_stack_ptr), %rsp
+ movq \old_rsp, PER_CPU_VAR(irq_stack_backing_store + IRQ_STACK_SIZE - 8)
+ movq PER_CPU_VAR(hardirq_stack_ptr), %rsp
#ifdef CONFIG_DEBUG_ENTRY
/*
@@ -645,10 +645,9 @@ retint_kernel:
/* Check if we need preemption */
btl $9, EFLAGS(%rsp) /* were interrupts off? */
jnc 1f
-0: cmpl $0, PER_CPU_VAR(__preempt_count)
+ cmpl $0, PER_CPU_VAR(__preempt_count)
jnz 1f
call preempt_schedule_irq
- jmp 0b
1:
#endif
/*
@@ -841,7 +840,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
/*
* Exception entry points.
*/
-#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8)
+#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + (x) * 8)
/**
* idtentry - Generate an IDT entry stub
@@ -879,7 +878,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
* @paranoid == 2 is special: the stub will never switch stacks. This is for
* #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS.
*/
-.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
+.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0 create_gap=0
ENTRY(\sym)
UNWIND_HINT_IRET_REGS offset=\has_error_code*8
@@ -899,6 +898,20 @@ ENTRY(\sym)
jnz .Lfrom_usermode_switch_stack_\@
.endif
+ .if \create_gap == 1
+ /*
+ * If coming from kernel space, create a 6-word gap to allow the
+ * int3 handler to emulate a call instruction.
+ */
+ testb $3, CS-ORIG_RAX(%rsp)
+ jnz .Lfrom_usermode_no_gap_\@
+ .rept 6
+ pushq 5*8(%rsp)
+ .endr
+ UNWIND_HINT_IRET_REGS offset=8
+.Lfrom_usermode_no_gap_\@:
+ .endif
+
.if \paranoid
call paranoid_entry
.else
@@ -925,13 +938,13 @@ ENTRY(\sym)
.endif
.if \shift_ist != -1
- subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
+ subq $\ist_offset, CPU_TSS_IST(\shift_ist)
.endif
call \do_sym
.if \shift_ist != -1
- addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
+ addq $\ist_offset, CPU_TSS_IST(\shift_ist)
.endif
/* these procedures expect "no swapgs" flag in ebx */
@@ -1129,8 +1142,8 @@ apicinterrupt3 HYPERV_STIMER0_VECTOR \
hv_stimer0_callback_vector hv_stimer0_vector_handler
#endif /* CONFIG_HYPERV */
-idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
-idtentry int3 do_int3 has_error_code=0
+idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=IST_INDEX_DB ist_offset=DB_STACK_OFFSET
+idtentry int3 do_int3 has_error_code=0 create_gap=1
idtentry stack_segment do_stack_segment has_error_code=1
#ifdef CONFIG_XEN_PV
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 1f9607ed087c..ad968b7bac72 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -398,7 +398,6 @@
384 i386 arch_prctl sys_arch_prctl __ia32_compat_sys_arch_prctl
385 i386 io_pgetevents sys_io_pgetevents_time32 __ia32_compat_sys_io_pgetevents
386 i386 rseq sys_rseq __ia32_sys_rseq
-# don't use numbers 387 through 392, add new calls at the end
393 i386 semget sys_semget __ia32_sys_semget
394 i386 semctl sys_semctl __ia32_compat_sys_semctl
395 i386 shmget sys_shmget __ia32_sys_shmget
@@ -433,3 +432,9 @@
425 i386 io_uring_setup sys_io_uring_setup __ia32_sys_io_uring_setup
426 i386 io_uring_enter sys_io_uring_enter __ia32_sys_io_uring_enter
427 i386 io_uring_register sys_io_uring_register __ia32_sys_io_uring_register
+428 i386 open_tree sys_open_tree __ia32_sys_open_tree
+429 i386 move_mount sys_move_mount __ia32_sys_move_mount
+430 i386 fsopen sys_fsopen __ia32_sys_fsopen
+431 i386 fsconfig sys_fsconfig __ia32_sys_fsconfig
+432 i386 fsmount sys_fsmount __ia32_sys_fsmount
+433 i386 fspick sys_fspick __ia32_sys_fspick
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 92ee0b4378d4..b4e6f9e6204a 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -349,6 +349,12 @@
425 common io_uring_setup __x64_sys_io_uring_setup
426 common io_uring_enter __x64_sys_io_uring_enter
427 common io_uring_register __x64_sys_io_uring_register
+428 common open_tree __x64_sys_open_tree
+429 common move_mount __x64_sys_move_mount
+430 common fsopen __x64_sys_fsopen
+431 common fsconfig __x64_sys_fsconfig
+432 common fsmount __x64_sys_fsmount
+433 common fspick __x64_sys_fspick
#
# x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 5bfe2243a08f..42fe42e82baf 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -116,7 +116,7 @@ $(obj)/%-x32.o: $(obj)/%.o FORCE
targets += vdsox32.lds $(vobjx32s-y)
$(obj)/%.so: OBJCOPYFLAGS := -S
-$(obj)/%.so: $(obj)/%.so.dbg
+$(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
$(obj)/vdsox32.so.dbg: $(obj)/vdsox32.lds $(vobjx32s) FORCE
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 007b3fe9d727..98c7d12b945c 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -29,12 +29,12 @@ extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
extern time_t __vdso_time(time_t *t);
#ifdef CONFIG_PARAVIRT_CLOCK
-extern u8 pvclock_page
+extern u8 pvclock_page[PAGE_SIZE]
__attribute__((visibility("hidden")));
#endif
#ifdef CONFIG_HYPERV_TSCPAGE
-extern u8 hvclock_page
+extern u8 hvclock_page[PAGE_SIZE]
__attribute__((visibility("hidden")));
#endif
diff --git a/arch/x86/entry/vdso/vdso2c.c b/arch/x86/entry/vdso/vdso2c.c
index 8e470b018512..3a4d8d4d39f8 100644
--- a/arch/x86/entry/vdso/vdso2c.c
+++ b/arch/x86/entry/vdso/vdso2c.c
@@ -73,14 +73,12 @@ const char *outfilename;
enum {
sym_vvar_start,
sym_vvar_page,
- sym_hpet_page,
sym_pvclock_page,
sym_hvclock_page,
};
const int special_pages[] = {
sym_vvar_page,
- sym_hpet_page,
sym_pvclock_page,
sym_hvclock_page,
};
@@ -93,7 +91,6 @@ struct vdso_sym {
struct vdso_sym required_syms[] = {
[sym_vvar_start] = {"vvar_start", true},
[sym_vvar_page] = {"vvar_page", true},
- [sym_hpet_page] = {"hpet_page", true},
[sym_pvclock_page] = {"pvclock_page", true},
[sym_hvclock_page] = {"hvclock_page", true},
{"VDSO32_NOTE_MASK", true},
diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h
index fa847a620f40..a20b134de2a8 100644
--- a/arch/x86/entry/vdso/vdso2c.h
+++ b/arch/x86/entry/vdso/vdso2c.h
@@ -7,7 +7,7 @@
static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
void *stripped_addr, size_t stripped_len,
- FILE *outfile, const char *name)
+ FILE *outfile, const char *image_name)
{
int found_load = 0;
unsigned long load_size = -1; /* Work around bogus warning */
@@ -93,11 +93,12 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
int k;
ELF(Sym) *sym = raw_addr + GET_LE(&symtab_hdr->sh_offset) +
GET_LE(&symtab_hdr->sh_entsize) * i;
- const char *name = raw_addr + GET_LE(&strtab_hdr->sh_offset) +
- GET_LE(&sym->st_name);
+ const char *sym_name = raw_addr +
+ GET_LE(&strtab_hdr->sh_offset) +
+ GET_LE(&sym->st_name);
for (k = 0; k < NSYMS; k++) {
- if (!strcmp(name, required_syms[k].name)) {
+ if (!strcmp(sym_name, required_syms[k].name)) {
if (syms[k]) {
fail("duplicate symbol %s\n",
required_syms[k].name);
@@ -134,7 +135,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
if (syms[sym_vvar_start] % 4096)
fail("vvar_begin must be a multiple of 4096\n");
- if (!name) {
+ if (!image_name) {
fwrite(stripped_addr, stripped_len, 1, outfile);
return;
}
@@ -157,7 +158,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
}
fprintf(outfile, "\n};\n\n");
- fprintf(outfile, "const struct vdso_image %s = {\n", name);
+ fprintf(outfile, "const struct vdso_image %s = {\n", image_name);
fprintf(outfile, "\t.data = raw_data,\n");
fprintf(outfile, "\t.size = %lu,\n", mapping_size);
if (alt_sec) {
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index d45f3fbd232e..f15441b07dad 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -116,6 +116,110 @@ static __initconst const u64 amd_hw_cache_event_ids
},
};
+static __initconst const u64 amd_hw_cache_event_ids_f17h
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */
+ [C(RESULT_MISS)] = 0xc860, /* L2$ access from DC Miss */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */
+ [C(RESULT_MISS)] = 0,
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches */
+ [C(RESULT_MISS)] = 0x0081, /* Instruction cache misses */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+},
+[C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */
+ [C(RESULT_MISS)] = 0xf045, /* L2 DTLB misses (PT walks) */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */
+ [C(RESULT_MISS)] = 0xff85, /* L1 ITLB misses, L2 misses */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+},
+[C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr. */
+ [C(RESULT_MISS)] = 0x00c3, /* Retired Mispredicted BI */
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+},
+[C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = 0,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+},
+};
+
/*
* AMD Performance Monitor K7 and later, up to and including Family 16h:
*/
@@ -865,9 +969,10 @@ __init int amd_pmu_init(void)
x86_pmu.amd_nb_constraints = 0;
}
- /* Events are common for all AMDs */
- memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
- sizeof(hw_cache_event_ids));
+ if (boot_cpu_data.x86 >= 0x17)
+ memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids));
+ else
+ memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids));
return 0;
}
diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
index 7635c23f7d82..58a6993d7eb3 100644
--- a/arch/x86/events/amd/iommu.c
+++ b/arch/x86/events/amd/iommu.c
@@ -393,7 +393,7 @@ static __init int _init_events_attrs(void)
return 0;
}
-const struct attribute_group *amd_iommu_attr_groups[] = {
+static const struct attribute_group *amd_iommu_attr_groups[] = {
&amd_iommu_format_group,
&amd_iommu_cpumask_group,
&amd_iommu_events_group,
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 81911e11a15d..f315425d8468 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -560,6 +560,21 @@ int x86_pmu_hw_config(struct perf_event *event)
return -EINVAL;
}
+ /* sample_regs_user never support XMM registers */
+ if (unlikely(event->attr.sample_regs_user & PEBS_XMM_REGS))
+ return -EINVAL;
+ /*
+ * Besides the general purpose registers, XMM registers may
+ * be collected in PEBS on some platforms, e.g. Icelake
+ */
+ if (unlikely(event->attr.sample_regs_intr & PEBS_XMM_REGS)) {
+ if (x86_pmu.pebs_no_xmm_regs)
+ return -EINVAL;
+
+ if (!event->attr.precise_ip)
+ return -EINVAL;
+ }
+
return x86_setup_perfctr(event);
}
@@ -661,6 +676,10 @@ static inline int is_x86_event(struct perf_event *event)
return event->pmu == &pmu;
}
+struct pmu *x86_get_pmu(void)
+{
+ return &pmu;
+}
/*
* Event scheduler state:
*
@@ -849,18 +868,43 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
struct event_constraint *c;
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
struct perf_event *e;
- int i, wmin, wmax, unsched = 0;
+ int n0, i, wmin, wmax, unsched = 0;
struct hw_perf_event *hwc;
bitmap_zero(used_mask, X86_PMC_IDX_MAX);
+ /*
+ * Compute the number of events already present; see x86_pmu_add(),
+ * validate_group() and x86_pmu_commit_txn(). For the former two
+ * cpuc->n_events hasn't been updated yet, while for the latter
+ * cpuc->n_txn contains the number of events added in the current
+ * transaction.
+ */
+ n0 = cpuc->n_events;
+ if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
+ n0 -= cpuc->n_txn;
+
if (x86_pmu.start_scheduling)
x86_pmu.start_scheduling(cpuc);
for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
- cpuc->event_constraint[i] = NULL;
- c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
- cpuc->event_constraint[i] = c;
+ c = cpuc->event_constraint[i];
+
+ /*
+ * Previously scheduled events should have a cached constraint,
+ * while new events should not have one.
+ */
+ WARN_ON_ONCE((c && i >= n0) || (!c && i < n0));
+
+ /*
+ * Request constraints for new events; or for those events that
+ * have a dynamic constraint -- for those the constraint can
+ * change due to external factors (sibling state, allow_tfa).
+ */
+ if (!c || (c->flags & PERF_X86_EVENT_DYNAMIC)) {
+ c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
+ cpuc->event_constraint[i] = c;
+ }
wmin = min(wmin, c->weight);
wmax = max(wmax, c->weight);
@@ -925,25 +969,20 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
if (!unsched && assign) {
for (i = 0; i < n; i++) {
e = cpuc->event_list[i];
- e->hw.flags |= PERF_X86_EVENT_COMMITTED;
if (x86_pmu.commit_scheduling)
x86_pmu.commit_scheduling(cpuc, i, assign[i]);
}
} else {
- for (i = 0; i < n; i++) {
+ for (i = n0; i < n; i++) {
e = cpuc->event_list[i];
- /*
- * do not put_constraint() on comitted events,
- * because they are good to go
- */
- if ((e->hw.flags & PERF_X86_EVENT_COMMITTED))
- continue;
/*
* release events that failed scheduling
*/
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(cpuc, e);
+
+ cpuc->event_constraint[i] = NULL;
}
}
@@ -1373,11 +1412,6 @@ static void x86_pmu_del(struct perf_event *event, int flags)
int i;
/*
- * event is descheduled
- */
- event->hw.flags &= ~PERF_X86_EVENT_COMMITTED;
-
- /*
* If we're called during a txn, we only need to undo x86_pmu.add.
* The events never got scheduled and ->cancel_txn will truncate
* the event_list.
@@ -1413,6 +1447,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
cpuc->event_list[i-1] = cpuc->event_list[i];
cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
}
+ cpuc->event_constraint[i-1] = NULL;
--cpuc->n_events;
perf_event_update_userpage(event);
@@ -2024,7 +2059,7 @@ static int validate_event(struct perf_event *event)
if (IS_ERR(fake_cpuc))
return PTR_ERR(fake_cpuc);
- c = x86_pmu.get_event_constraints(fake_cpuc, -1, event);
+ c = x86_pmu.get_event_constraints(fake_cpuc, 0, event);
if (!c || !c->weight)
ret = -EINVAL;
@@ -2072,8 +2107,7 @@ static int validate_group(struct perf_event *event)
if (n < 0)
goto out;
- fake_cpuc->n_events = n;
-
+ fake_cpuc->n_events = 0;
ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
out:
@@ -2348,6 +2382,15 @@ void arch_perf_update_userpage(struct perf_event *event,
cyc2ns_read_end();
}
+/*
+ * Determine whether the regs were taken from an irq/exception handler rather
+ * than from perf_arch_fetch_caller_regs().
+ */
+static bool perf_hw_regs(struct pt_regs *regs)
+{
+ return regs->flags & X86_EFLAGS_FIXED;
+}
+
void
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
@@ -2359,11 +2402,15 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
return;
}
- if (perf_callchain_store(entry, regs->ip))
- return;
+ if (perf_hw_regs(regs)) {
+ if (perf_callchain_store(entry, regs->ip))
+ return;
+ unwind_start(&state, current, regs, NULL);
+ } else {
+ unwind_start(&state, current, NULL, (void *)regs->sp);
+ }
- for (unwind_start(&state, current, regs, NULL); !unwind_done(&state);
- unwind_next_frame(&state)) {
+ for (; !unwind_done(&state); unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
if (!addr || perf_callchain_store(entry, addr))
return;
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 7cdd7b13bbda..890a3fb5706f 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -23,7 +23,7 @@
#include <linux/device.h>
#include <linux/coredump.h>
-#include <asm-generic/sizes.h>
+#include <linux/sizes.h>
#include <asm/perf_event.h>
#include "../perf_event.h"
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index f9451566cd9b..546d13e436aa 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -239,6 +239,35 @@ static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
EVENT_EXTRA_END
};
+static struct event_constraint intel_icl_event_constraints[] = {
+ FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+ INTEL_UEVENT_CONSTRAINT(0x1c0, 0), /* INST_RETIRED.PREC_DIST */
+ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
+ FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
+ INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
+ INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
+ INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */
+ INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf),
+ INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
+ INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */
+ INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
+ INTEL_EVENT_CONSTRAINT(0xa3, 0xf), /* CYCLE_ACTIVITY.* */
+ INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
+ INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
+ INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
+ INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
+ EVENT_CONSTRAINT_END
+};
+
+static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
+ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff9fffull, RSP_0),
+ INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff9fffull, RSP_1),
+ INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
+ INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
+ EVENT_EXTRA_END
+};
+
EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
@@ -1827,6 +1856,45 @@ static __initconst const u64 glp_hw_cache_extra_regs
},
};
+#define TNT_LOCAL_DRAM BIT_ULL(26)
+#define TNT_DEMAND_READ GLM_DEMAND_DATA_RD
+#define TNT_DEMAND_WRITE GLM_DEMAND_RFO
+#define TNT_LLC_ACCESS GLM_ANY_RESPONSE
+#define TNT_SNP_ANY (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \
+ SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM)
+#define TNT_LLC_MISS (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM)
+
+static __initconst const u64 tnt_hw_cache_extra_regs
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = TNT_DEMAND_READ|
+ TNT_LLC_ACCESS,
+ [C(RESULT_MISS)] = TNT_DEMAND_READ|
+ TNT_LLC_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = TNT_DEMAND_WRITE|
+ TNT_LLC_ACCESS,
+ [C(RESULT_MISS)] = TNT_DEMAND_WRITE|
+ TNT_LLC_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = 0x0,
+ [C(RESULT_MISS)] = 0x0,
+ },
+ },
+};
+
+static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
+ /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
+ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffffff9fffull, RSP_0),
+ INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xffffff9fffull, RSP_1),
+ EVENT_EXTRA_END
+};
+
#define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
#define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
#define KNL_MCDRAM_LOCAL BIT_ULL(21)
@@ -2015,7 +2083,7 @@ static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int
/*
* We're going to use PMC3, make sure TFA is set before we touch it.
*/
- if (cntr == 3 && !cpuc->is_fake)
+ if (cntr == 3)
intel_set_tfa(cpuc, true);
}
@@ -2091,15 +2159,19 @@ static void intel_pmu_disable_event(struct perf_event *event)
cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
cpuc->intel_cp_status &= ~(1ull << hwc->idx);
- if (unlikely(event->attr.precise_ip))
- intel_pmu_pebs_disable(event);
-
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_disable_fixed(hwc);
return;
}
x86_pmu_disable_event(event);
+
+ /*
+ * Needs to be called after x86_pmu_disable_event,
+ * so we don't trigger the event without PEBS bit set.
+ */
+ if (unlikely(event->attr.precise_ip))
+ intel_pmu_pebs_disable(event);
}
static void intel_pmu_del_event(struct perf_event *event)
@@ -2145,6 +2217,11 @@ static void intel_pmu_enable_fixed(struct perf_event *event)
bits <<= (idx * 4);
mask = 0xfULL << (idx * 4);
+ if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
+ bits |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
+ mask |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
+ }
+
rdmsrl(hwc->config_base, ctrl_val);
ctrl_val &= ~mask;
ctrl_val |= bits;
@@ -2307,7 +2384,11 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
*/
if (__test_and_clear_bit(55, (unsigned long *)&status)) {
handled++;
- intel_pt_interrupt();
+ if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() &&
+ perf_guest_cbs->handle_intel_pt_intr))
+ perf_guest_cbs->handle_intel_pt_intr();
+ else
+ intel_pt_interrupt();
}
/*
@@ -2688,7 +2769,7 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
if (x86_pmu.event_constraints) {
for_each_event_constraint(c, x86_pmu.event_constraints) {
- if ((event->hw.config & c->cmask) == c->code) {
+ if (constraint_match(c, event->hw.config)) {
event->hw.flags |= c->flags;
return c;
}
@@ -2838,7 +2919,7 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
struct intel_excl_states *xlo;
int tid = cpuc->excl_thread_id;
- int is_excl, i;
+ int is_excl, i, w;
/*
* validating a group does not require
@@ -2894,36 +2975,40 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
* SHARED : sibling counter measuring non-exclusive event
* UNUSED : sibling counter unused
*/
+ w = c->weight;
for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
/*
* exclusive event in sibling counter
* our corresponding counter cannot be used
* regardless of our event
*/
- if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE)
+ if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) {
__clear_bit(i, c->idxmsk);
+ w--;
+ continue;
+ }
/*
* if measuring an exclusive event, sibling
* measuring non-exclusive, then counter cannot
* be used
*/
- if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED)
+ if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) {
__clear_bit(i, c->idxmsk);
+ w--;
+ continue;
+ }
}
/*
- * recompute actual bit weight for scheduling algorithm
- */
- c->weight = hweight64(c->idxmsk64);
-
- /*
* if we return an empty mask, then switch
* back to static empty constraint to avoid
* the cost of freeing later on
*/
- if (c->weight == 0)
+ if (!w)
c = &emptyconstraint;
+ c->weight = w;
+
return c;
}
@@ -2931,11 +3016,9 @@ static struct event_constraint *
intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{
- struct event_constraint *c1 = NULL;
- struct event_constraint *c2;
+ struct event_constraint *c1, *c2;
- if (idx >= 0) /* fake does < 0 */
- c1 = cpuc->event_constraint[idx];
+ c1 = cpuc->event_constraint[idx];
/*
* first time only
@@ -2943,7 +3026,8 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
* - dynamic constraint: handled by intel_get_excl_constraints()
*/
c2 = __intel_get_event_constraints(cpuc, idx, event);
- if (c1 && (c1->flags & PERF_X86_EVENT_DYNAMIC)) {
+ if (c1) {
+ WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC));
bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
c1->weight = c2->weight;
c2 = c1;
@@ -3185,7 +3269,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
return ret;
if (event->attr.precise_ip) {
- if (!(event->attr.freq || event->attr.wakeup_events)) {
+ if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
if (!(event->attr.sample_type &
~intel_pmu_large_pebs_flags(event)))
@@ -3366,6 +3450,12 @@ static struct event_constraint counter0_constraint =
static struct event_constraint counter2_constraint =
EVENT_CONSTRAINT(0, 0x4, 0);
+static struct event_constraint fixed0_constraint =
+ FIXED_EVENT_CONSTRAINT(0x00c0, 0);
+
+static struct event_constraint fixed0_counter0_constraint =
+ INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL);
+
static struct event_constraint *
hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
@@ -3385,6 +3475,21 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
}
static struct event_constraint *
+icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event)
+{
+ /*
+ * Fixed counter 0 has less skid.
+ * Force instruction:ppp in Fixed counter 0
+ */
+ if ((event->attr.precise_ip == 3) &&
+ constraint_match(&fixed0_constraint, event->hw.config))
+ return &fixed0_constraint;
+
+ return hsw_get_event_constraints(cpuc, idx, event);
+}
+
+static struct event_constraint *
glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{
@@ -3399,6 +3504,29 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
return c;
}
+static struct event_constraint *
+tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event)
+{
+ struct event_constraint *c;
+
+ /*
+ * :ppp means to do reduced skid PEBS,
+ * which is available on PMC0 and fixed counter 0.
+ */
+ if (event->attr.precise_ip == 3) {
+ /* Force instruction:ppp on PMC0 and Fixed counter 0 */
+ if (constraint_match(&fixed0_constraint, event->hw.config))
+ return &fixed0_counter0_constraint;
+
+ return &counter0_constraint;
+ }
+
+ c = intel_get_event_constraints(cpuc, idx, event);
+
+ return c;
+}
+
static bool allow_tsx_force_abort = true;
static struct event_constraint *
@@ -3410,7 +3538,7 @@ tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
/*
* Without TFA we must not use PMC3.
*/
- if (!allow_tsx_force_abort && test_bit(3, c->idxmsk) && idx >= 0) {
+ if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
c = dyn_constraint(cpuc, c, idx);
c->idxmsk64 &= ~(1ULL << 3);
c->weight--;
@@ -3507,6 +3635,8 @@ static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
{
+ cpuc->pebs_record_size = x86_pmu.pebs_record_size;
+
if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
cpuc->shared_regs = allocate_shared_regs(cpu);
if (!cpuc->shared_regs)
@@ -4114,6 +4244,42 @@ static struct attribute *hsw_tsx_events_attrs[] = {
NULL
};
+EVENT_ATTR_STR(tx-capacity-read, tx_capacity_read, "event=0x54,umask=0x80");
+EVENT_ATTR_STR(tx-capacity-write, tx_capacity_write, "event=0x54,umask=0x2");
+EVENT_ATTR_STR(el-capacity-read, el_capacity_read, "event=0x54,umask=0x80");
+EVENT_ATTR_STR(el-capacity-write, el_capacity_write, "event=0x54,umask=0x2");
+
+static struct attribute *icl_events_attrs[] = {
+ EVENT_PTR(mem_ld_hsw),
+ EVENT_PTR(mem_st_hsw),
+ NULL,
+};
+
+static struct attribute *icl_tsx_events_attrs[] = {
+ EVENT_PTR(tx_start),
+ EVENT_PTR(tx_abort),
+ EVENT_PTR(tx_commit),
+ EVENT_PTR(tx_capacity_read),
+ EVENT_PTR(tx_capacity_write),
+ EVENT_PTR(tx_conflict),
+ EVENT_PTR(el_start),
+ EVENT_PTR(el_abort),
+ EVENT_PTR(el_commit),
+ EVENT_PTR(el_capacity_read),
+ EVENT_PTR(el_capacity_write),
+ EVENT_PTR(el_conflict),
+ EVENT_PTR(cycles_t),
+ EVENT_PTR(cycles_ct),
+ NULL,
+};
+
+static __init struct attribute **get_icl_events_attrs(void)
+{
+ return boot_cpu_has(X86_FEATURE_RTM) ?
+ merge_attr(icl_events_attrs, icl_tsx_events_attrs) :
+ icl_events_attrs;
+}
+
static ssize_t freeze_on_smi_show(struct device *cdev,
struct device_attribute *attr,
char *buf)
@@ -4153,6 +4319,50 @@ done:
return count;
}
+static void update_tfa_sched(void *ignored)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ /*
+ * check if PMC3 is used
+ * and if so force schedule out for all event types all contexts
+ */
+ if (test_bit(3, cpuc->active_mask))
+ perf_pmu_resched(x86_get_pmu());
+}
+
+static ssize_t show_sysctl_tfa(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, 40, "%d\n", allow_tsx_force_abort);
+}
+
+static ssize_t set_sysctl_tfa(struct device *cdev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ bool val;
+ ssize_t ret;
+
+ ret = kstrtobool(buf, &val);
+ if (ret)
+ return ret;
+
+ /* no change */
+ if (val == allow_tsx_force_abort)
+ return count;
+
+ allow_tsx_force_abort = val;
+
+ get_online_cpus();
+ on_each_cpu(update_tfa_sched, NULL, 1);
+ put_online_cpus();
+
+ return count;
+}
+
+
static DEVICE_ATTR_RW(freeze_on_smi);
static ssize_t branches_show(struct device *cdev,
@@ -4185,7 +4395,9 @@ static struct attribute *intel_pmu_caps_attrs[] = {
NULL
};
-static DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
+static DEVICE_ATTR(allow_tsx_force_abort, 0644,
+ show_sysctl_tfa,
+ set_sysctl_tfa);
static struct attribute *intel_pmu_attrs[] = {
&dev_attr_freeze_on_smi.attr,
@@ -4446,6 +4658,32 @@ __init int intel_pmu_init(void)
name = "goldmont_plus";
break;
+ case INTEL_FAM6_ATOM_TREMONT_X:
+ x86_pmu.late_ack = true;
+ memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs,
+ sizeof(hw_cache_extra_regs));
+ hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
+
+ intel_pmu_lbr_init_skl();
+
+ x86_pmu.event_constraints = intel_slm_event_constraints;
+ x86_pmu.extra_regs = intel_tnt_extra_regs;
+ /*
+ * It's recommended to use CPU_CLK_UNHALTED.CORE_P + NPEBS
+ * for precise cycles.
+ */
+ x86_pmu.pebs_aliases = NULL;
+ x86_pmu.pebs_prec_dist = true;
+ x86_pmu.lbr_pt_coexist = true;
+ x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+ x86_pmu.get_event_constraints = tnt_get_event_constraints;
+ extra_attr = slm_format_attr;
+ pr_cont("Tremont events, ");
+ name = "Tremont";
+ break;
+
case INTEL_FAM6_WESTMERE:
case INTEL_FAM6_WESTMERE_EP:
case INTEL_FAM6_WESTMERE_EX:
@@ -4694,13 +4932,41 @@ __init int intel_pmu_init(void)
x86_pmu.get_event_constraints = tfa_get_event_constraints;
x86_pmu.enable_all = intel_tfa_pmu_enable_all;
x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
- intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr;
+ intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr;
}
pr_cont("Skylake events, ");
name = "skylake";
break;
+ case INTEL_FAM6_ICELAKE_MOBILE:
+ x86_pmu.late_ack = true;
+ memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
+ hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
+ intel_pmu_lbr_init_skl();
+
+ x86_pmu.event_constraints = intel_icl_event_constraints;
+ x86_pmu.pebs_constraints = intel_icl_pebs_event_constraints;
+ x86_pmu.extra_regs = intel_icl_extra_regs;
+ x86_pmu.pebs_aliases = NULL;
+ x86_pmu.pebs_prec_dist = true;
+ x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
+
+ x86_pmu.hw_config = hsw_hw_config;
+ x86_pmu.get_event_constraints = icl_get_event_constraints;
+ extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
+ hsw_format_attr : nhm_format_attr;
+ extra_attr = merge_attr(extra_attr, skl_format_attr);
+ x86_pmu.cpu_events = get_icl_events_attrs();
+ x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xca, .umask=0x02);
+ x86_pmu.lbr_pt_coexist = true;
+ intel_pmu_pebs_data_source_skl(false);
+ pr_cont("Icelake events, ");
+ name = "icelake";
+ break;
+
default:
switch (x86_pmu.version) {
case 1:
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index d41de9af7a39..6072f92cb8ea 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -578,6 +578,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_X, glm_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
+
+ X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_MOBILE, snb_cstates),
{ },
};
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 10c99ce1fead..7a9f5dac5abe 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -849,6 +849,26 @@ struct event_constraint intel_skl_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END
};
+struct event_constraint intel_icl_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x400000000ULL), /* SLOTS */
+
+ INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf), /* MEM_INST_RETIRED.STORE */
+
+ INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */
+
+ INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
+
+ /*
+ * Everything else is handled by PMU_FL_PEBS_ALL, because we
+ * need the full constraints from the main table.
+ */
+
+ EVENT_CONSTRAINT_END
+};
+
struct event_constraint *intel_pebs_constraints(struct perf_event *event)
{
struct event_constraint *c;
@@ -858,7 +878,7 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event)
if (x86_pmu.pebs_constraints) {
for_each_event_constraint(c, x86_pmu.pebs_constraints) {
- if ((event->hw.config & c->cmask) == c->code) {
+ if (constraint_match(c, event->hw.config)) {
event->hw.flags |= c->flags;
return c;
}
@@ -906,17 +926,87 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
if (cpuc->n_pebs == cpuc->n_large_pebs) {
threshold = ds->pebs_absolute_maximum -
- reserved * x86_pmu.pebs_record_size;
+ reserved * cpuc->pebs_record_size;
} else {
- threshold = ds->pebs_buffer_base + x86_pmu.pebs_record_size;
+ threshold = ds->pebs_buffer_base + cpuc->pebs_record_size;
}
ds->pebs_interrupt_threshold = threshold;
}
+static void adaptive_pebs_record_size_update(void)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ u64 pebs_data_cfg = cpuc->pebs_data_cfg;
+ int sz = sizeof(struct pebs_basic);
+
+ if (pebs_data_cfg & PEBS_DATACFG_MEMINFO)
+ sz += sizeof(struct pebs_meminfo);
+ if (pebs_data_cfg & PEBS_DATACFG_GP)
+ sz += sizeof(struct pebs_gprs);
+ if (pebs_data_cfg & PEBS_DATACFG_XMMS)
+ sz += sizeof(struct pebs_xmm);
+ if (pebs_data_cfg & PEBS_DATACFG_LBRS)
+ sz += x86_pmu.lbr_nr * sizeof(struct pebs_lbr_entry);
+
+ cpuc->pebs_record_size = sz;
+}
+
+#define PERF_PEBS_MEMINFO_TYPE (PERF_SAMPLE_ADDR | PERF_SAMPLE_DATA_SRC | \
+ PERF_SAMPLE_PHYS_ADDR | PERF_SAMPLE_WEIGHT | \
+ PERF_SAMPLE_TRANSACTION)
+
+static u64 pebs_update_adaptive_cfg(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ u64 sample_type = attr->sample_type;
+ u64 pebs_data_cfg = 0;
+ bool gprs, tsx_weight;
+
+ if (!(sample_type & ~(PERF_SAMPLE_IP|PERF_SAMPLE_TIME)) &&
+ attr->precise_ip > 1)
+ return pebs_data_cfg;
+
+ if (sample_type & PERF_PEBS_MEMINFO_TYPE)
+ pebs_data_cfg |= PEBS_DATACFG_MEMINFO;
+
+ /*
+ * We need GPRs when:
+ * + user requested them
+ * + precise_ip < 2 for the non event IP
+ * + For RTM TSX weight we need GPRs for the abort code.
+ */
+ gprs = (sample_type & PERF_SAMPLE_REGS_INTR) &&
+ (attr->sample_regs_intr & PEBS_GP_REGS);
+
+ tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT) &&
+ ((attr->config & INTEL_ARCH_EVENT_MASK) ==
+ x86_pmu.rtm_abort_event);
+
+ if (gprs || (attr->precise_ip < 2) || tsx_weight)
+ pebs_data_cfg |= PEBS_DATACFG_GP;
+
+ if ((sample_type & PERF_SAMPLE_REGS_INTR) &&
+ (attr->sample_regs_intr & PEBS_XMM_REGS))
+ pebs_data_cfg |= PEBS_DATACFG_XMMS;
+
+ if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
+ /*
+ * For now always log all LBRs. Could configure this
+ * later.
+ */
+ pebs_data_cfg |= PEBS_DATACFG_LBRS |
+ ((x86_pmu.lbr_nr-1) << PEBS_DATACFG_LBR_SHIFT);
+ }
+
+ return pebs_data_cfg;
+}
+
static void
-pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, struct pmu *pmu)
+pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
+ struct perf_event *event, bool add)
{
+ struct pmu *pmu = event->ctx->pmu;
/*
* Make sure we get updated with the first PEBS
* event. It will trigger also during removal, but
@@ -933,6 +1023,29 @@ pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, struct pmu *pmu)
update = true;
}
+ /*
+ * The PEBS record doesn't shrink on pmu::del(). Doing so would require
+ * iterating all remaining PEBS events to reconstruct the config.
+ */
+ if (x86_pmu.intel_cap.pebs_baseline && add) {
+ u64 pebs_data_cfg;
+
+ /* Clear pebs_data_cfg and pebs_record_size for first PEBS. */
+ if (cpuc->n_pebs == 1) {
+ cpuc->pebs_data_cfg = 0;
+ cpuc->pebs_record_size = sizeof(struct pebs_basic);
+ }
+
+ pebs_data_cfg = pebs_update_adaptive_cfg(event);
+
+ /* Update pebs_record_size if new event requires more data. */
+ if (pebs_data_cfg & ~cpuc->pebs_data_cfg) {
+ cpuc->pebs_data_cfg |= pebs_data_cfg;
+ adaptive_pebs_record_size_update();
+ update = true;
+ }
+ }
+
if (update)
pebs_update_threshold(cpuc);
}
@@ -947,7 +1060,7 @@ void intel_pmu_pebs_add(struct perf_event *event)
if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
cpuc->n_large_pebs++;
- pebs_update_state(needed_cb, cpuc, event->ctx->pmu);
+ pebs_update_state(needed_cb, cpuc, event, true);
}
void intel_pmu_pebs_enable(struct perf_event *event)
@@ -960,11 +1073,19 @@ void intel_pmu_pebs_enable(struct perf_event *event)
cpuc->pebs_enabled |= 1ULL << hwc->idx;
- if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT)
+ if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5))
cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32);
else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
cpuc->pebs_enabled |= 1ULL << 63;
+ if (x86_pmu.intel_cap.pebs_baseline) {
+ hwc->config |= ICL_EVENTSEL_ADAPTIVE;
+ if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) {
+ wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg);
+ cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg;
+ }
+ }
+
/*
* Use auto-reload if possible to save a MSR write in the PMI.
* This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
@@ -991,7 +1112,7 @@ void intel_pmu_pebs_del(struct perf_event *event)
if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
cpuc->n_large_pebs--;
- pebs_update_state(needed_cb, cpuc, event->ctx->pmu);
+ pebs_update_state(needed_cb, cpuc, event, false);
}
void intel_pmu_pebs_disable(struct perf_event *event)
@@ -1004,7 +1125,8 @@ void intel_pmu_pebs_disable(struct perf_event *event)
cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
- if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT)
+ if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) &&
+ (x86_pmu.version < 5))
cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32));
else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
cpuc->pebs_enabled &= ~(1ULL << 63);
@@ -1125,34 +1247,57 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
return 0;
}
-static inline u64 intel_hsw_weight(struct pebs_record_skl *pebs)
+static inline u64 intel_get_tsx_weight(u64 tsx_tuning)
{
- if (pebs->tsx_tuning) {
- union hsw_tsx_tuning tsx = { .value = pebs->tsx_tuning };
+ if (tsx_tuning) {
+ union hsw_tsx_tuning tsx = { .value = tsx_tuning };
return tsx.cycles_last_block;
}
return 0;
}
-static inline u64 intel_hsw_transaction(struct pebs_record_skl *pebs)
+static inline u64 intel_get_tsx_transaction(u64 tsx_tuning, u64 ax)
{
- u64 txn = (pebs->tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
+ u64 txn = (tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
/* For RTM XABORTs also log the abort code from AX */
- if ((txn & PERF_TXN_TRANSACTION) && (pebs->ax & 1))
- txn |= ((pebs->ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
+ if ((txn & PERF_TXN_TRANSACTION) && (ax & 1))
+ txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
return txn;
}
-static void setup_pebs_sample_data(struct perf_event *event,
- struct pt_regs *iregs, void *__pebs,
- struct perf_sample_data *data,
- struct pt_regs *regs)
+static inline u64 get_pebs_status(void *n)
{
+ if (x86_pmu.intel_cap.pebs_format < 4)
+ return ((struct pebs_record_nhm *)n)->status;
+ return ((struct pebs_basic *)n)->applicable_counters;
+}
+
#define PERF_X86_EVENT_PEBS_HSW_PREC \
(PERF_X86_EVENT_PEBS_ST_HSW | \
PERF_X86_EVENT_PEBS_LD_HSW | \
PERF_X86_EVENT_PEBS_NA_HSW)
+
+static u64 get_data_src(struct perf_event *event, u64 aux)
+{
+ u64 val = PERF_MEM_NA;
+ int fl = event->hw.flags;
+ bool fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
+
+ if (fl & PERF_X86_EVENT_PEBS_LDLAT)
+ val = load_latency_data(aux);
+ else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
+ val = precise_datala_hsw(event, aux);
+ else if (fst)
+ val = precise_store_data(aux);
+ return val;
+}
+
+static void setup_pebs_fixed_sample_data(struct perf_event *event,
+ struct pt_regs *iregs, void *__pebs,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
/*
* We cast to the biggest pebs_record but are careful not to
* unconditionally access the 'extra' entries.
@@ -1160,17 +1305,13 @@ static void setup_pebs_sample_data(struct perf_event *event,
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct pebs_record_skl *pebs = __pebs;
u64 sample_type;
- int fll, fst, dsrc;
- int fl = event->hw.flags;
+ int fll;
if (pebs == NULL)
return;
sample_type = event->attr.sample_type;
- dsrc = sample_type & PERF_SAMPLE_DATA_SRC;
-
- fll = fl & PERF_X86_EVENT_PEBS_LDLAT;
- fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
+ fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT;
perf_sample_data_init(data, 0, event->hw.last_period);
@@ -1185,16 +1326,8 @@ static void setup_pebs_sample_data(struct perf_event *event,
/*
* data.data_src encodes the data source
*/
- if (dsrc) {
- u64 val = PERF_MEM_NA;
- if (fll)
- val = load_latency_data(pebs->dse);
- else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
- val = precise_datala_hsw(event, pebs->dse);
- else if (fst)
- val = precise_store_data(pebs->dse);
- data->data_src.val = val;
- }
+ if (sample_type & PERF_SAMPLE_DATA_SRC)
+ data->data_src.val = get_data_src(event, pebs->dse);
/*
* We must however always use iregs for the unwinder to stay sane; the
@@ -1281,10 +1414,11 @@ static void setup_pebs_sample_data(struct perf_event *event,
if (x86_pmu.intel_cap.pebs_format >= 2) {
/* Only set the TSX weight when no memory weight. */
if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll)
- data->weight = intel_hsw_weight(pebs);
+ data->weight = intel_get_tsx_weight(pebs->tsx_tuning);
if (sample_type & PERF_SAMPLE_TRANSACTION)
- data->txn = intel_hsw_transaction(pebs);
+ data->txn = intel_get_tsx_transaction(pebs->tsx_tuning,
+ pebs->ax);
}
/*
@@ -1301,6 +1435,140 @@ static void setup_pebs_sample_data(struct perf_event *event,
data->br_stack = &cpuc->lbr_stack;
}
+static void adaptive_pebs_save_regs(struct pt_regs *regs,
+ struct pebs_gprs *gprs)
+{
+ regs->ax = gprs->ax;
+ regs->bx = gprs->bx;
+ regs->cx = gprs->cx;
+ regs->dx = gprs->dx;
+ regs->si = gprs->si;
+ regs->di = gprs->di;
+ regs->bp = gprs->bp;
+ regs->sp = gprs->sp;
+#ifndef CONFIG_X86_32
+ regs->r8 = gprs->r8;
+ regs->r9 = gprs->r9;
+ regs->r10 = gprs->r10;
+ regs->r11 = gprs->r11;
+ regs->r12 = gprs->r12;
+ regs->r13 = gprs->r13;
+ regs->r14 = gprs->r14;
+ regs->r15 = gprs->r15;
+#endif
+}
+
+/*
+ * With adaptive PEBS the layout depends on what fields are configured.
+ */
+
+static void setup_pebs_adaptive_sample_data(struct perf_event *event,
+ struct pt_regs *iregs, void *__pebs,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct pebs_basic *basic = __pebs;
+ void *next_record = basic + 1;
+ u64 sample_type;
+ u64 format_size;
+ struct pebs_meminfo *meminfo = NULL;
+ struct pebs_gprs *gprs = NULL;
+ struct x86_perf_regs *perf_regs;
+
+ if (basic == NULL)
+ return;
+
+ perf_regs = container_of(regs, struct x86_perf_regs, regs);
+ perf_regs->xmm_regs = NULL;
+
+ sample_type = event->attr.sample_type;
+ format_size = basic->format_size;
+ perf_sample_data_init(data, 0, event->hw.last_period);
+ data->period = event->hw.last_period;
+
+ if (event->attr.use_clockid == 0)
+ data->time = native_sched_clock_from_tsc(basic->tsc);
+
+ /*
+ * We must however always use iregs for the unwinder to stay sane; the
+ * record BP,SP,IP can point into thin air when the record is from a
+ * previous PMI context or an (I)RET happened between the record and
+ * PMI.
+ */
+ if (sample_type & PERF_SAMPLE_CALLCHAIN)
+ data->callchain = perf_callchain(event, iregs);
+
+ *regs = *iregs;
+ /* The ip in basic is EventingIP */
+ set_linear_ip(regs, basic->ip);
+ regs->flags = PERF_EFLAGS_EXACT;
+
+ /*
+ * The record for MEMINFO is in front of GP
+ * But PERF_SAMPLE_TRANSACTION needs gprs->ax.
+ * Save the pointer here but process later.
+ */
+ if (format_size & PEBS_DATACFG_MEMINFO) {
+ meminfo = next_record;
+ next_record = meminfo + 1;
+ }
+
+ if (format_size & PEBS_DATACFG_GP) {
+ gprs = next_record;
+ next_record = gprs + 1;
+
+ if (event->attr.precise_ip < 2) {
+ set_linear_ip(regs, gprs->ip);
+ regs->flags &= ~PERF_EFLAGS_EXACT;
+ }
+
+ if (sample_type & PERF_SAMPLE_REGS_INTR)
+ adaptive_pebs_save_regs(regs, gprs);
+ }
+
+ if (format_size & PEBS_DATACFG_MEMINFO) {
+ if (sample_type & PERF_SAMPLE_WEIGHT)
+ data->weight = meminfo->latency ?:
+ intel_get_tsx_weight(meminfo->tsx_tuning);
+
+ if (sample_type & PERF_SAMPLE_DATA_SRC)
+ data->data_src.val = get_data_src(event, meminfo->aux);
+
+ if (sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR))
+ data->addr = meminfo->address;
+
+ if (sample_type & PERF_SAMPLE_TRANSACTION)
+ data->txn = intel_get_tsx_transaction(meminfo->tsx_tuning,
+ gprs ? gprs->ax : 0);
+ }
+
+ if (format_size & PEBS_DATACFG_XMMS) {
+ struct pebs_xmm *xmm = next_record;
+
+ next_record = xmm + 1;
+ perf_regs->xmm_regs = xmm->xmm;
+ }
+
+ if (format_size & PEBS_DATACFG_LBRS) {
+ struct pebs_lbr *lbr = next_record;
+ int num_lbr = ((format_size >> PEBS_DATACFG_LBR_SHIFT)
+ & 0xff) + 1;
+ next_record = next_record + num_lbr*sizeof(struct pebs_lbr_entry);
+
+ if (has_branch_stack(event)) {
+ intel_pmu_store_pebs_lbrs(lbr);
+ data->br_stack = &cpuc->lbr_stack;
+ }
+ }
+
+ WARN_ONCE(next_record != __pebs + (format_size >> 48),
+ "PEBS record size %llu, expected %llu, config %llx\n",
+ format_size >> 48,
+ (u64)(next_record - __pebs),
+ basic->format_size);
+}
+
static inline void *
get_next_pebs_record_by_bit(void *base, void *top, int bit)
{
@@ -1318,19 +1586,19 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit)
if (base == NULL)
return NULL;
- for (at = base; at < top; at += x86_pmu.pebs_record_size) {
- struct pebs_record_nhm *p = at;
+ for (at = base; at < top; at += cpuc->pebs_record_size) {
+ unsigned long status = get_pebs_status(at);
- if (test_bit(bit, (unsigned long *)&p->status)) {
+ if (test_bit(bit, (unsigned long *)&status)) {
/* PEBS v3 has accurate status bits */
if (x86_pmu.intel_cap.pebs_format >= 3)
return at;
- if (p->status == (1 << bit))
+ if (status == (1 << bit))
return at;
/* clear non-PEBS bit and re-check */
- pebs_status = p->status & cpuc->pebs_enabled;
+ pebs_status = status & cpuc->pebs_enabled;
pebs_status &= PEBS_COUNTER_MASK;
if (pebs_status == (1 << bit))
return at;
@@ -1410,11 +1678,18 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
static void __intel_pmu_pebs_event(struct perf_event *event,
struct pt_regs *iregs,
void *base, void *top,
- int bit, int count)
+ int bit, int count,
+ void (*setup_sample)(struct perf_event *,
+ struct pt_regs *,
+ void *,
+ struct perf_sample_data *,
+ struct pt_regs *))
{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
struct perf_sample_data data;
- struct pt_regs regs;
+ struct x86_perf_regs perf_regs;
+ struct pt_regs *regs = &perf_regs.regs;
void *at = get_next_pebs_record_by_bit(base, top, bit);
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
@@ -1429,20 +1704,20 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
return;
while (count > 1) {
- setup_pebs_sample_data(event, iregs, at, &data, &regs);
- perf_event_output(event, &data, &regs);
- at += x86_pmu.pebs_record_size;
+ setup_sample(event, iregs, at, &data, regs);
+ perf_event_output(event, &data, regs);
+ at += cpuc->pebs_record_size;
at = get_next_pebs_record_by_bit(at, top, bit);
count--;
}
- setup_pebs_sample_data(event, iregs, at, &data, &regs);
+ setup_sample(event, iregs, at, &data, regs);
/*
* All but the last records are processed.
* The last one is left to be able to call the overflow handler.
*/
- if (perf_event_overflow(event, &data, &regs)) {
+ if (perf_event_overflow(event, &data, regs)) {
x86_pmu_stop(event, 0);
return;
}
@@ -1483,7 +1758,27 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
return;
}
- __intel_pmu_pebs_event(event, iregs, at, top, 0, n);
+ __intel_pmu_pebs_event(event, iregs, at, top, 0, n,
+ setup_pebs_fixed_sample_data);
+}
+
+static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size)
+{
+ struct perf_event *event;
+ int bit;
+
+ /*
+ * The drain_pebs() could be called twice in a short period
+ * for auto-reload event in pmu::read(). There are no
+ * overflows have happened in between.
+ * It needs to call intel_pmu_save_and_restart_reload() to
+ * update the event->count for this case.
+ */
+ for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) {
+ event = cpuc->events[bit];
+ if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
+ intel_pmu_save_and_restart_reload(event, 0);
+ }
}
static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
@@ -1513,19 +1808,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
}
if (unlikely(base >= top)) {
- /*
- * The drain_pebs() could be called twice in a short period
- * for auto-reload event in pmu::read(). There are no
- * overflows have happened in between.
- * It needs to call intel_pmu_save_and_restart_reload() to
- * update the event->count for this case.
- */
- for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled,
- size) {
- event = cpuc->events[bit];
- if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
- intel_pmu_save_and_restart_reload(event, 0);
- }
+ intel_pmu_pebs_event_update_no_drain(cpuc, size);
return;
}
@@ -1538,8 +1821,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
/* PEBS v3 has more accurate status bits */
if (x86_pmu.intel_cap.pebs_format >= 3) {
- for_each_set_bit(bit, (unsigned long *)&pebs_status,
- size)
+ for_each_set_bit(bit, (unsigned long *)&pebs_status, size)
counts[bit]++;
continue;
@@ -1578,8 +1860,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
* If collision happened, the record will be dropped.
*/
if (p->status != (1ULL << bit)) {
- for_each_set_bit(i, (unsigned long *)&pebs_status,
- x86_pmu.max_pebs_events)
+ for_each_set_bit(i, (unsigned long *)&pebs_status, size)
error[i]++;
continue;
}
@@ -1587,7 +1868,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
counts[bit]++;
}
- for (bit = 0; bit < size; bit++) {
+ for_each_set_bit(bit, (unsigned long *)&mask, size) {
if ((counts[bit] == 0) && (error[bit] == 0))
continue;
@@ -1608,11 +1889,66 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
if (counts[bit]) {
__intel_pmu_pebs_event(event, iregs, base,
- top, bit, counts[bit]);
+ top, bit, counts[bit],
+ setup_pebs_fixed_sample_data);
}
}
}
+static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs)
+{
+ short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct debug_store *ds = cpuc->ds;
+ struct perf_event *event;
+ void *base, *at, *top;
+ int bit, size;
+ u64 mask;
+
+ if (!x86_pmu.pebs_active)
+ return;
+
+ base = (struct pebs_basic *)(unsigned long)ds->pebs_buffer_base;
+ top = (struct pebs_basic *)(unsigned long)ds->pebs_index;
+
+ ds->pebs_index = ds->pebs_buffer_base;
+
+ mask = ((1ULL << x86_pmu.max_pebs_events) - 1) |
+ (((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED);
+ size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
+
+ if (unlikely(base >= top)) {
+ intel_pmu_pebs_event_update_no_drain(cpuc, size);
+ return;
+ }
+
+ for (at = base; at < top; at += cpuc->pebs_record_size) {
+ u64 pebs_status;
+
+ pebs_status = get_pebs_status(at) & cpuc->pebs_enabled;
+ pebs_status &= mask;
+
+ for_each_set_bit(bit, (unsigned long *)&pebs_status, size)
+ counts[bit]++;
+ }
+
+ for_each_set_bit(bit, (unsigned long *)&mask, size) {
+ if (counts[bit] == 0)
+ continue;
+
+ event = cpuc->events[bit];
+ if (WARN_ON_ONCE(!event))
+ continue;
+
+ if (WARN_ON_ONCE(!event->attr.precise_ip))
+ continue;
+
+ __intel_pmu_pebs_event(event, iregs, base,
+ top, bit, counts[bit],
+ setup_pebs_adaptive_sample_data);
+ }
+}
+
/*
* BTS, PEBS probe and setup
*/
@@ -1628,12 +1964,18 @@ void __init intel_ds_init(void)
x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
- if (x86_pmu.version <= 4)
+ if (x86_pmu.version <= 4) {
x86_pmu.pebs_no_isolation = 1;
+ x86_pmu.pebs_no_xmm_regs = 1;
+ }
if (x86_pmu.pebs) {
char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
+ char *pebs_qual = "";
int format = x86_pmu.intel_cap.pebs_format;
+ if (format < 4)
+ x86_pmu.intel_cap.pebs_baseline = 0;
+
switch (format) {
case 0:
pr_cont("PEBS fmt0%c, ", pebs_type);
@@ -1669,6 +2011,29 @@ void __init intel_ds_init(void)
x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME;
break;
+ case 4:
+ x86_pmu.drain_pebs = intel_pmu_drain_pebs_icl;
+ x86_pmu.pebs_record_size = sizeof(struct pebs_basic);
+ if (x86_pmu.intel_cap.pebs_baseline) {
+ x86_pmu.large_pebs_flags |=
+ PERF_SAMPLE_BRANCH_STACK |
+ PERF_SAMPLE_TIME;
+ x86_pmu.flags |= PMU_FL_PEBS_ALL;
+ pebs_qual = "-baseline";
+ } else {
+ /* Only basic record supported */
+ x86_pmu.pebs_no_xmm_regs = 1;
+ x86_pmu.large_pebs_flags &=
+ ~(PERF_SAMPLE_ADDR |
+ PERF_SAMPLE_TIME |
+ PERF_SAMPLE_DATA_SRC |
+ PERF_SAMPLE_TRANSACTION |
+ PERF_SAMPLE_REGS_USER |
+ PERF_SAMPLE_REGS_INTR);
+ }
+ pr_cont("PEBS fmt4%c%s, ", pebs_type, pebs_qual);
+ break;
+
default:
pr_cont("no PEBS fmt%d%c, ", format, pebs_type);
x86_pmu.pebs = 0;
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 580c1b91c454..6f814a27416b 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -488,6 +488,8 @@ void intel_pmu_lbr_add(struct perf_event *event)
* be 'new'. Conversely, a new event can get installed through the
* context switch path for the first time.
*/
+ if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
+ cpuc->lbr_pebs_users++;
perf_sched_cb_inc(event->ctx->pmu);
if (!cpuc->lbr_users++ && !event->total_time_running)
intel_pmu_lbr_reset();
@@ -507,8 +509,11 @@ void intel_pmu_lbr_del(struct perf_event *event)
task_ctx->lbr_callstack_users--;
}
+ if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip > 0)
+ cpuc->lbr_pebs_users--;
cpuc->lbr_users--;
WARN_ON_ONCE(cpuc->lbr_users < 0);
+ WARN_ON_ONCE(cpuc->lbr_pebs_users < 0);
perf_sched_cb_dec(event->ctx->pmu);
}
@@ -658,7 +663,13 @@ void intel_pmu_lbr_read(void)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
- if (!cpuc->lbr_users)
+ /*
+ * Don't read when all LBRs users are using adaptive PEBS.
+ *
+ * This could be smarter and actually check the event,
+ * but this simple approach seems to work for now.
+ */
+ if (!cpuc->lbr_users || cpuc->lbr_users == cpuc->lbr_pebs_users)
return;
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
@@ -1080,6 +1091,28 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
}
}
+void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ int i;
+
+ cpuc->lbr_stack.nr = x86_pmu.lbr_nr;
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
+ u64 info = lbr->lbr[i].info;
+ struct perf_branch_entry *e = &cpuc->lbr_entries[i];
+
+ e->from = lbr->lbr[i].from;
+ e->to = lbr->lbr[i].to;
+ e->mispred = !!(info & LBR_INFO_MISPRED);
+ e->predicted = !(info & LBR_INFO_MISPRED);
+ e->in_tx = !!(info & LBR_INFO_IN_TX);
+ e->abort = !!(info & LBR_INFO_ABORT);
+ e->cycles = info & LBR_INFO_CYCLES;
+ e->reserved = 0;
+ }
+ intel_pmu_lbr_filter(cpuc);
+}
+
/*
* Map interface branch filters onto LBR filters
*/
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index fb3a2f13fc70..339d7628080c 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -1525,8 +1525,7 @@ static __init int pt_init(void)
}
if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
- pt_pmu.pmu.capabilities =
- PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_SW_DOUBLEBUF;
+ pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG;
pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE;
pt_pmu.pmu.attr_groups = pt_attr_groups;
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 94dc564146ca..37ebf6fc5415 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -775,6 +775,8 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, hsw_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, hsw_rapl_init),
+
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, skl_rapl_init),
{},
};
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 9fe64c01a2e5..fc40a1473058 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1367,6 +1367,11 @@ static const struct intel_uncore_init_fun skx_uncore_init __initconst = {
.pci_init = skx_uncore_pci_init,
};
+static const struct intel_uncore_init_fun icl_uncore_init __initconst = {
+ .cpu_init = icl_uncore_cpu_init,
+ .pci_init = skl_uncore_pci_init,
+};
+
static const struct x86_cpu_id intel_uncore_match[] __initconst = {
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP, nhm_uncore_init),
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM, nhm_uncore_init),
@@ -1393,6 +1398,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init),
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_uncore_init),
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, icl_uncore_init),
{},
};
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index 853a49a8ccf6..79eb2e21e4f0 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -512,6 +512,7 @@ int skl_uncore_pci_init(void);
void snb_uncore_cpu_init(void);
void nhm_uncore_cpu_init(void);
void skl_uncore_cpu_init(void);
+void icl_uncore_cpu_init(void);
int snb_pci2phy_map_init(int devid);
/* uncore_snbep.c */
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index 13493f43b247..f8431819b3e1 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -34,6 +34,8 @@
#define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33
#define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca
#define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32
+#define PCI_DEVICE_ID_INTEL_ICL_U_IMC 0x8a02
+#define PCI_DEVICE_ID_INTEL_ICL_U2_IMC 0x8a12
/* SNB event control */
#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
@@ -93,6 +95,12 @@
#define SKL_UNC_PERF_GLOBAL_CTL 0xe01
#define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1)
+/* ICL Cbo register */
+#define ICL_UNC_CBO_CONFIG 0x396
+#define ICL_UNC_NUM_CBO_MASK 0xf
+#define ICL_UNC_CBO_0_PER_CTR0 0x702
+#define ICL_UNC_CBO_MSR_OFFSET 0x8
+
DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
@@ -280,6 +288,70 @@ void skl_uncore_cpu_init(void)
snb_uncore_arb.ops = &skl_uncore_msr_ops;
}
+static struct intel_uncore_type icl_uncore_cbox = {
+ .name = "cbox",
+ .num_counters = 4,
+ .perf_ctr_bits = 44,
+ .perf_ctr = ICL_UNC_CBO_0_PER_CTR0,
+ .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
+ .event_mask = SNB_UNC_RAW_EVENT_MASK,
+ .msr_offset = ICL_UNC_CBO_MSR_OFFSET,
+ .ops = &skl_uncore_msr_ops,
+ .format_group = &snb_uncore_format_group,
+};
+
+static struct uncore_event_desc icl_uncore_events[] = {
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff"),
+ { /* end: all zeroes */ },
+};
+
+static struct attribute *icl_uncore_clock_formats_attr[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group icl_uncore_clock_format_group = {
+ .name = "format",
+ .attrs = icl_uncore_clock_formats_attr,
+};
+
+static struct intel_uncore_type icl_uncore_clockbox = {
+ .name = "clock",
+ .num_counters = 1,
+ .num_boxes = 1,
+ .fixed_ctr_bits = 48,
+ .fixed_ctr = SNB_UNC_FIXED_CTR,
+ .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
+ .single_fixed = 1,
+ .event_mask = SNB_UNC_CTL_EV_SEL_MASK,
+ .format_group = &icl_uncore_clock_format_group,
+ .ops = &skl_uncore_msr_ops,
+ .event_descs = icl_uncore_events,
+};
+
+static struct intel_uncore_type *icl_msr_uncores[] = {
+ &icl_uncore_cbox,
+ &snb_uncore_arb,
+ &icl_uncore_clockbox,
+ NULL,
+};
+
+static int icl_get_cbox_num(void)
+{
+ u64 num_boxes;
+
+ rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes);
+
+ return num_boxes & ICL_UNC_NUM_CBO_MASK;
+}
+
+void icl_uncore_cpu_init(void)
+{
+ uncore_msr_uncores = icl_msr_uncores;
+ icl_uncore_cbox.num_boxes = icl_get_cbox_num();
+ snb_uncore_arb.ops = &skl_uncore_msr_ops;
+}
+
enum {
SNB_PCI_UNCORE_IMC,
};
@@ -668,6 +740,18 @@ static const struct pci_device_id skl_uncore_pci_ids[] = {
{ /* end: all zeroes */ },
};
+static const struct pci_device_id icl_uncore_pci_ids[] = {
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICL_U2_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* end: all zeroes */ },
+};
+
static struct pci_driver snb_uncore_pci_driver = {
.name = "snb_uncore",
.id_table = snb_uncore_pci_ids,
@@ -693,6 +777,11 @@ static struct pci_driver skl_uncore_pci_driver = {
.id_table = skl_uncore_pci_ids,
};
+static struct pci_driver icl_uncore_pci_driver = {
+ .name = "icl_uncore",
+ .id_table = icl_uncore_pci_ids,
+};
+
struct imc_uncore_pci_dev {
__u32 pci_id;
struct pci_driver *driver;
@@ -732,6 +821,8 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */
IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */
IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */
+ IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */
+ IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */
{ /* end marker */ }
};
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index a878e6286e4a..f3f4c2263501 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -89,6 +89,7 @@ static bool test_intel(int idx)
case INTEL_FAM6_SKYLAKE_X:
case INTEL_FAM6_KABYLAKE_MOBILE:
case INTEL_FAM6_KABYLAKE_DESKTOP:
+ case INTEL_FAM6_ICELAKE_MOBILE:
if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
return true;
break;
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 1e98a42b560a..a6ac2f4f76fc 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -49,28 +49,33 @@ struct event_constraint {
unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
u64 idxmsk64;
};
- u64 code;
- u64 cmask;
- int weight;
- int overlap;
- int flags;
+ u64 code;
+ u64 cmask;
+ int weight;
+ int overlap;
+ int flags;
+ unsigned int size;
};
+
+static inline bool constraint_match(struct event_constraint *c, u64 ecode)
+{
+ return ((ecode & c->cmask) - c->code) <= (u64)c->size;
+}
+
/*
* struct hw_perf_event.flags flags
*/
#define PERF_X86_EVENT_PEBS_LDLAT 0x0001 /* ld+ldlat data address sampling */
#define PERF_X86_EVENT_PEBS_ST 0x0002 /* st data address sampling */
#define PERF_X86_EVENT_PEBS_ST_HSW 0x0004 /* haswell style datala, store */
-#define PERF_X86_EVENT_COMMITTED 0x0008 /* event passed commit_txn */
-#define PERF_X86_EVENT_PEBS_LD_HSW 0x0010 /* haswell style datala, load */
-#define PERF_X86_EVENT_PEBS_NA_HSW 0x0020 /* haswell style datala, unknown */
-#define PERF_X86_EVENT_EXCL 0x0040 /* HT exclusivity on counter */
-#define PERF_X86_EVENT_DYNAMIC 0x0080 /* dynamic alloc'd constraint */
-#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100 /* grant rdpmc permission */
-#define PERF_X86_EVENT_EXCL_ACCT 0x0200 /* accounted EXCL event */
-#define PERF_X86_EVENT_AUTO_RELOAD 0x0400 /* use PEBS auto-reload */
-#define PERF_X86_EVENT_LARGE_PEBS 0x0800 /* use large PEBS */
-
+#define PERF_X86_EVENT_PEBS_LD_HSW 0x0008 /* haswell style datala, load */
+#define PERF_X86_EVENT_PEBS_NA_HSW 0x0010 /* haswell style datala, unknown */
+#define PERF_X86_EVENT_EXCL 0x0020 /* HT exclusivity on counter */
+#define PERF_X86_EVENT_DYNAMIC 0x0040 /* dynamic alloc'd constraint */
+#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0080 /* grant rdpmc permission */
+#define PERF_X86_EVENT_EXCL_ACCT 0x0100 /* accounted EXCL event */
+#define PERF_X86_EVENT_AUTO_RELOAD 0x0200 /* use PEBS auto-reload */
+#define PERF_X86_EVENT_LARGE_PEBS 0x0400 /* use large PEBS */
struct amd_nb {
int nb_id; /* NorthBridge id */
@@ -116,6 +121,24 @@ struct amd_nb {
(1ULL << PERF_REG_X86_R14) | \
(1ULL << PERF_REG_X86_R15))
+#define PEBS_XMM_REGS \
+ ((1ULL << PERF_REG_X86_XMM0) | \
+ (1ULL << PERF_REG_X86_XMM1) | \
+ (1ULL << PERF_REG_X86_XMM2) | \
+ (1ULL << PERF_REG_X86_XMM3) | \
+ (1ULL << PERF_REG_X86_XMM4) | \
+ (1ULL << PERF_REG_X86_XMM5) | \
+ (1ULL << PERF_REG_X86_XMM6) | \
+ (1ULL << PERF_REG_X86_XMM7) | \
+ (1ULL << PERF_REG_X86_XMM8) | \
+ (1ULL << PERF_REG_X86_XMM9) | \
+ (1ULL << PERF_REG_X86_XMM10) | \
+ (1ULL << PERF_REG_X86_XMM11) | \
+ (1ULL << PERF_REG_X86_XMM12) | \
+ (1ULL << PERF_REG_X86_XMM13) | \
+ (1ULL << PERF_REG_X86_XMM14) | \
+ (1ULL << PERF_REG_X86_XMM15))
+
/*
* Per register state.
*/
@@ -207,10 +230,16 @@ struct cpu_hw_events {
int n_pebs;
int n_large_pebs;
+ /* Current super set of events hardware configuration */
+ u64 pebs_data_cfg;
+ u64 active_pebs_data_cfg;
+ int pebs_record_size;
+
/*
* Intel LBR bits
*/
int lbr_users;
+ int lbr_pebs_users;
struct perf_branch_stack lbr_stack;
struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
struct er_account *lbr_sel;
@@ -257,18 +286,29 @@ struct cpu_hw_events {
void *kfree_on_online[X86_PERF_KFREE_MAX];
};
-#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
+#define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \
{ .idxmsk64 = (n) }, \
.code = (c), \
+ .size = (e) - (c), \
.cmask = (m), \
.weight = (w), \
.overlap = (o), \
.flags = f, \
}
+#define __EVENT_CONSTRAINT(c, n, m, w, o, f) \
+ __EVENT_CONSTRAINT_RANGE(c, c, n, m, w, o, f)
+
#define EVENT_CONSTRAINT(c, n, m) \
__EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
+/*
+ * The constraint_match() function only works for 'simple' event codes
+ * and not for extended (AMD64_EVENTSEL_EVENT) events codes.
+ */
+#define EVENT_CONSTRAINT_RANGE(c, e, n, m) \
+ __EVENT_CONSTRAINT_RANGE(c, e, n, m, HWEIGHT(n), 0, 0)
+
#define INTEL_EXCLEVT_CONSTRAINT(c, n) \
__EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
0, PERF_X86_EVENT_EXCL)
@@ -304,6 +344,12 @@ struct cpu_hw_events {
EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
/*
+ * Constraint on a range of Event codes
+ */
+#define INTEL_EVENT_CONSTRAINT_RANGE(c, e, n) \
+ EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT)
+
+/*
* Constraint on the Event code + UMask + fixed-mask
*
* filter mask to validate fixed counter events.
@@ -348,7 +394,10 @@ struct cpu_hw_events {
/* Event constraint, but match on all event flags too. */
#define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
- EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS)
+ EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
+
+#define INTEL_FLAGS_EVENT_CONSTRAINT_RANGE(c, e, n) \
+ EVENT_CONSTRAINT_RANGE(c, e, n, ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS)
/* Check only flags, but allow all event/umask */
#define INTEL_ALL_EVENT_CONSTRAINT(code, n) \
@@ -366,6 +415,11 @@ struct cpu_hw_events {
ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
+#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(code, end, n) \
+ __EVENT_CONSTRAINT_RANGE(code, end, n, \
+ ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
+ HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
+
#define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
__EVENT_CONSTRAINT(code, n, \
ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
@@ -473,6 +527,7 @@ union perf_capabilities {
* values > 32bit.
*/
u64 full_width_write:1;
+ u64 pebs_baseline:1;
};
u64 capabilities;
};
@@ -613,14 +668,16 @@ struct x86_pmu {
pebs_broken :1,
pebs_prec_dist :1,
pebs_no_tlb :1,
- pebs_no_isolation :1;
+ pebs_no_isolation :1,
+ pebs_no_xmm_regs :1;
int pebs_record_size;
int pebs_buffer_size;
+ int max_pebs_events;
void (*drain_pebs)(struct pt_regs *regs);
struct event_constraint *pebs_constraints;
void (*pebs_aliases)(struct perf_event *event);
- int max_pebs_events;
unsigned long large_pebs_flags;
+ u64 rtm_abort_event;
/*
* Intel LBR
@@ -714,6 +771,7 @@ static struct perf_pmu_events_ht_attr event_attr_##v = { \
.event_str_ht = ht, \
}
+struct pmu *x86_get_pmu(void);
extern struct x86_pmu x86_pmu __read_mostly;
static inline bool x86_pmu_has_lbr_callstack(void)
@@ -941,6 +999,8 @@ extern struct event_constraint intel_bdw_pebs_event_constraints[];
extern struct event_constraint intel_skl_pebs_event_constraints[];
+extern struct event_constraint intel_icl_pebs_event_constraints[];
+
struct event_constraint *intel_pebs_constraints(struct perf_event *event);
void intel_pmu_pebs_add(struct perf_event *event);
@@ -959,6 +1019,8 @@ void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
void intel_pmu_auto_reload_read(struct perf_event *event);
+void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr);
+
void intel_ds_init(void);
void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index 8eb6fbee8e13..5c056b8aebef 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -86,6 +86,11 @@ static void hv_apic_write(u32 reg, u32 val)
static void hv_apic_eoi_write(u32 reg, u32 val)
{
+ struct hv_vp_assist_page *hvp = hv_vp_assist_page[smp_processor_id()];
+
+ if (hvp && (xchg(&hvp->apic_assist, 0) & 0x1))
+ return;
+
wrmsr(HV_X64_MSR_EOI, val, 0);
}
diff --git a/arch/x86/hyperv/hv_spinlock.c b/arch/x86/hyperv/hv_spinlock.c
index a861b0456b1a..07f21a06392f 100644
--- a/arch/x86/hyperv/hv_spinlock.c
+++ b/arch/x86/hyperv/hv_spinlock.c
@@ -56,7 +56,7 @@ static void hv_qlock_wait(u8 *byte, u8 val)
/*
* Hyper-V does not support this so far.
*/
-bool hv_vcpu_is_preempted(int vcpu)
+__visible bool hv_vcpu_is_preempted(int vcpu)
{
return false;
}
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 321fe5f5d0e9..629d1ee05599 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -61,9 +61,8 @@
} while (0)
#define RELOAD_SEG(seg) { \
- unsigned int pre = GET_SEG(seg); \
+ unsigned int pre = (seg) | 3; \
unsigned int cur = get_user_seg(seg); \
- pre |= 3; \
if (pre != cur) \
set_user_seg(seg, pre); \
}
@@ -72,6 +71,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
struct sigcontext_32 __user *sc)
{
unsigned int tmpflags, err = 0;
+ u16 gs, fs, es, ds;
void __user *buf;
u32 tmp;
@@ -79,16 +79,10 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
current->restart_block.fn = do_no_restart_syscall;
get_user_try {
- /*
- * Reload fs and gs if they have changed in the signal
- * handler. This does not handle long fs/gs base changes in
- * the handler, but does not clobber them at least in the
- * normal case.
- */
- RELOAD_SEG(gs);
- RELOAD_SEG(fs);
- RELOAD_SEG(ds);
- RELOAD_SEG(es);
+ gs = GET_SEG(gs);
+ fs = GET_SEG(fs);
+ ds = GET_SEG(ds);
+ es = GET_SEG(es);
COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
COPY(dx); COPY(cx); COPY(ip); COPY(ax);
@@ -106,6 +100,17 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
buf = compat_ptr(tmp);
} get_user_catch(err);
+ /*
+ * Reload fs and gs if they have changed in the signal
+ * handler. This does not handle long fs/gs base changes in
+ * the handler, but does not clobber them at least in the
+ * normal case.
+ */
+ RELOAD_SEG(gs);
+ RELOAD_SEG(fs);
+ RELOAD_SEG(ds);
+ RELOAD_SEG(es);
+
err |= fpu__restore_sig(buf, 1);
force_iret();
@@ -216,8 +221,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
size_t frame_size,
void __user **fpstate)
{
- struct fpu *fpu = &current->thread.fpu;
- unsigned long sp;
+ unsigned long sp, fx_aligned, math_size;
/* Default to using normal stack */
sp = regs->sp;
@@ -231,15 +235,11 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
ksig->ka.sa.sa_restorer)
sp = (unsigned long) ksig->ka.sa.sa_restorer;
- if (fpu->initialized) {
- unsigned long fx_aligned, math_size;
-
- sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size);
- *fpstate = (struct _fpstate_32 __user *) sp;
- if (copy_fpstate_to_sigframe(*fpstate, (void __user *)fx_aligned,
- math_size) < 0)
- return (void __user *) -1L;
- }
+ sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size);
+ *fpstate = (struct _fpstate_32 __user *) sp;
+ if (copy_fpstate_to_sigframe(*fpstate, (void __user *)fx_aligned,
+ math_size) < 0)
+ return (void __user *) -1L;
sp -= frame_size;
/* Align the stack pointer according to the i386 ABI,
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index a0ab9ab61c75..eebd05942e6c 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -11,3 +11,4 @@ generic-y += early_ioremap.h
generic-y += export.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index 31b627b43a8e..464034db299f 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -20,6 +20,17 @@
#endif
/*
+ * objtool annotation to ignore the alternatives and only consider the original
+ * instruction(s).
+ */
+.macro ANNOTATE_IGNORE_ALTERNATIVE
+ .Lannotate_\@:
+ .pushsection .discard.ignore_alts
+ .long .Lannotate_\@ - .
+ .popsection
+.endm
+
+/*
* Issue one struct alt_instr descriptor entry (need to put it into
* the section .altinstructions, see below). This entry contains
* enough information for the alternatives patching code to patch an
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 4c74073a19cc..094fbc9c0b1c 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -45,6 +45,16 @@
#define LOCK_PREFIX ""
#endif
+/*
+ * objtool annotation to ignore the alternatives and only consider the original
+ * instruction(s).
+ */
+#define ANNOTATE_IGNORE_ALTERNATIVE \
+ "999:\n\t" \
+ ".pushsection .discard.ignore_alts\n\t" \
+ ".long 999b - .\n\t" \
+ ".popsection\n\t"
+
struct alt_instr {
s32 instr_offset; /* original instruction */
s32 repl_offset; /* offset to replacement instruction */
diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h
index fc0693569f7a..ba88edd0d58b 100644
--- a/arch/x86/include/asm/arch_hweight.h
+++ b/arch/x86/include/asm/arch_hweight.h
@@ -12,8 +12,6 @@
#define REG_OUT "a"
#endif
-#define __HAVE_ARCH_SW_HWEIGHT
-
static __always_inline unsigned int __arch_hweight32(unsigned int w)
{
unsigned int res;
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 6467757bb39f..3ff577c0b102 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -148,30 +148,6 @@
_ASM_PTR (entry); \
.popsection
-.macro ALIGN_DESTINATION
- /* check for bad alignment of destination */
- movl %edi,%ecx
- andl $7,%ecx
- jz 102f /* already aligned */
- subl $8,%ecx
- negl %ecx
- subl %ecx,%edx
-100: movb (%rsi),%al
-101: movb %al,(%rdi)
- incq %rsi
- incq %rdi
- decl %ecx
- jnz 100b
-102:
- .section .fixup,"ax"
-103: addl %ecx,%edx /* ecx is zerorest also */
- jmp copy_user_handle_tail
- .previous
-
- _ASM_EXTABLE_UA(100b, 103b)
- _ASM_EXTABLE_UA(101b, 103b)
- .endm
-
#else
# define _EXPAND_EXTABLE_HANDLE(x) #x
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h
index 29c706415443..cff3f3f3bfe0 100644
--- a/arch/x86/include/asm/cpu_entry_area.h
+++ b/arch/x86/include/asm/cpu_entry_area.h
@@ -7,6 +7,64 @@
#include <asm/processor.h>
#include <asm/intel_ds.h>
+#ifdef CONFIG_X86_64
+
+/* Macro to enforce the same ordering and stack sizes */
+#define ESTACKS_MEMBERS(guardsize, db2_holesize)\
+ char DF_stack_guard[guardsize]; \
+ char DF_stack[EXCEPTION_STKSZ]; \
+ char NMI_stack_guard[guardsize]; \
+ char NMI_stack[EXCEPTION_STKSZ]; \
+ char DB2_stack_guard[guardsize]; \
+ char DB2_stack[db2_holesize]; \
+ char DB1_stack_guard[guardsize]; \
+ char DB1_stack[EXCEPTION_STKSZ]; \
+ char DB_stack_guard[guardsize]; \
+ char DB_stack[EXCEPTION_STKSZ]; \
+ char MCE_stack_guard[guardsize]; \
+ char MCE_stack[EXCEPTION_STKSZ]; \
+ char IST_top_guard[guardsize]; \
+
+/* The exception stacks' physical storage. No guard pages required */
+struct exception_stacks {
+ ESTACKS_MEMBERS(0, 0)
+};
+
+/* The effective cpu entry area mapping with guard pages. */
+struct cea_exception_stacks {
+ ESTACKS_MEMBERS(PAGE_SIZE, EXCEPTION_STKSZ)
+};
+
+/*
+ * The exception stack ordering in [cea_]exception_stacks
+ */
+enum exception_stack_ordering {
+ ESTACK_DF,
+ ESTACK_NMI,
+ ESTACK_DB2,
+ ESTACK_DB1,
+ ESTACK_DB,
+ ESTACK_MCE,
+ N_EXCEPTION_STACKS
+};
+
+#define CEA_ESTACK_SIZE(st) \
+ sizeof(((struct cea_exception_stacks *)0)->st## _stack)
+
+#define CEA_ESTACK_BOT(ceastp, st) \
+ ((unsigned long)&(ceastp)->st## _stack)
+
+#define CEA_ESTACK_TOP(ceastp, st) \
+ (CEA_ESTACK_BOT(ceastp, st) + CEA_ESTACK_SIZE(st))
+
+#define CEA_ESTACK_OFFS(st) \
+ offsetof(struct cea_exception_stacks, st## _stack)
+
+#define CEA_ESTACK_PAGES \
+ (sizeof(struct cea_exception_stacks) / PAGE_SIZE)
+
+#endif
+
/*
* cpu_entry_area is a percpu region that contains things needed by the CPU
* and early entry/exit code. Real types aren't used for all fields here
@@ -32,12 +90,9 @@ struct cpu_entry_area {
#ifdef CONFIG_X86_64
/*
- * Exception stacks used for IST entries.
- *
- * In the future, this should have a separate slot for each stack
- * with guard pages between them.
+ * Exception stacks used for IST entries with guard pages.
*/
- char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];
+ struct cea_exception_stacks estacks;
#endif
#ifdef CONFIG_CPU_SUP_INTEL
/*
@@ -57,6 +112,7 @@ struct cpu_entry_area {
#define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
+DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
extern void setup_cpu_entry_areas(void);
extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
@@ -76,4 +132,7 @@ static inline struct entry_stack *cpu_entry_stack(int cpu)
return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
}
+#define __this_cpu_ist_top_va(name) \
+ CEA_ESTACK_TOP(__this_cpu_read(cea_exception_stacks), name)
+
#endif
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 0e56ff7e4848..1d337c51f7e6 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -156,11 +156,14 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
#else
/*
- * Static testing of CPU features. Used the same as boot_cpu_has().
- * These will statically patch the target code for additional
- * performance.
+ * Static testing of CPU features. Used the same as boot_cpu_has(). It
+ * statically patches the target code for additional performance. Use
+ * static_cpu_has() only in fast paths, where every cycle counts. Which
+ * means that the boot_cpu_has() variant is already fast enough for the
+ * majority of cases and you should stick to using it as it is generally
+ * only two instructions: a RIP-relative MOV and a TEST.
*/
-static __always_inline __pure bool _static_cpu_has(u16 bit)
+static __always_inline bool _static_cpu_has(u16 bit)
{
asm_volatile_goto("1: jmp 6f\n"
"2:\n"
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 981ff9479648..75f27ee2c263 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -344,6 +344,7 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
@@ -382,5 +383,7 @@
#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
+#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
+#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index 9e5ca30738e5..1a8609a15856 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -104,11 +104,9 @@ static inline void debug_stack_usage_dec(void)
{
__this_cpu_dec(debug_stack_usage);
}
-int is_debug_stack(unsigned long addr);
void debug_stack_set_zero(void);
void debug_stack_reset(void);
#else /* !X86_64 */
-static inline int is_debug_stack(unsigned long addr) { return 0; }
static inline void debug_stack_set_zero(void) { }
static inline void debug_stack_reset(void) { }
static inline void debug_stack_usage_inc(void) { }
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index ce4d176b3d13..6b15a24930e0 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -13,14 +13,7 @@
#include <asm/swiotlb.h>
#include <linux/dma-contiguous.h>
-#ifdef CONFIG_ISA
-# define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
-#else
-# define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
-#endif
-
extern int iommu_merge;
-extern struct device x86_dma_fallback_dev;
extern int panic_on_overflow;
extern const struct dma_map_ops *dma_ops;
@@ -30,7 +23,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return dma_ops;
}
-bool arch_dma_alloc_attrs(struct device **dev);
-#define arch_dma_alloc_attrs arch_dma_alloc_attrs
-
#endif
diff --git a/arch/x86/include/asm/e820/api.h b/arch/x86/include/asm/e820/api.h
index 62be73b23d5c..e8f58ddd06d9 100644
--- a/arch/x86/include/asm/e820/api.h
+++ b/arch/x86/include/asm/e820/api.h
@@ -10,6 +10,7 @@ extern struct e820_table *e820_table_firmware;
extern unsigned long pci_mem_start;
+extern bool e820__mapped_raw_any(u64 start, u64 end, enum e820_type type);
extern bool e820__mapped_any(u64 start, u64 end, enum e820_type type);
extern bool e820__mapped_all(u64 start, u64 end, enum e820_type type);
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 50ba74a34a37..9da8cccdf3fb 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -103,8 +103,6 @@ enum fixed_addresses {
#ifdef CONFIG_PARAVIRT
FIX_PARAVIRT_BOOTMAP,
#endif
- FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */
- FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
#ifdef CONFIG_X86_INTEL_MID
FIX_LNW_VRTC,
#endif
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index b56d504af654..b774c52e5411 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -10,6 +10,7 @@
#ifndef _ASM_X86_FPU_API_H
#define _ASM_X86_FPU_API_H
+#include <linux/bottom_half.h>
/*
* Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
@@ -21,6 +22,36 @@
extern void kernel_fpu_begin(void);
extern void kernel_fpu_end(void);
extern bool irq_fpu_usable(void);
+extern void fpregs_mark_activate(void);
+
+/*
+ * Use fpregs_lock() while editing CPU's FPU registers or fpu->state.
+ * A context switch will (and softirq might) save CPU's FPU registers to
+ * fpu->state and set TIF_NEED_FPU_LOAD leaving CPU's FPU registers in
+ * a random state.
+ */
+static inline void fpregs_lock(void)
+{
+ preempt_disable();
+ local_bh_disable();
+}
+
+static inline void fpregs_unlock(void)
+{
+ local_bh_enable();
+ preempt_enable();
+}
+
+#ifdef CONFIG_X86_DEBUG_FPU
+extern void fpregs_assert_state_consistent(void);
+#else
+static inline void fpregs_assert_state_consistent(void) { }
+#endif
+
+/*
+ * Load the task FPU state before returning to userspace.
+ */
+extern void switch_fpu_return(void);
/*
* Query the presence of one or more xfeatures. Works on any legacy CPU as well.
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index fb04a3ded7dd..9e27fa05a7ae 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -14,6 +14,7 @@
#include <linux/compat.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/mm.h>
#include <asm/user.h>
#include <asm/fpu/api.h>
@@ -24,14 +25,12 @@
/*
* High level FPU state handling functions:
*/
-extern void fpu__initialize(struct fpu *fpu);
extern void fpu__prepare_read(struct fpu *fpu);
extern void fpu__prepare_write(struct fpu *fpu);
extern void fpu__save(struct fpu *fpu);
-extern void fpu__restore(struct fpu *fpu);
extern int fpu__restore_sig(void __user *buf, int ia32_frame);
extern void fpu__drop(struct fpu *fpu);
-extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu);
+extern int fpu__copy(struct task_struct *dst, struct task_struct *src);
extern void fpu__clear(struct fpu *fpu);
extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
extern int dump_fpu(struct pt_regs *ptregs, struct user_i387_struct *fpstate);
@@ -122,6 +121,21 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
err; \
})
+#define kernel_insn_err(insn, output, input...) \
+({ \
+ int err; \
+ asm volatile("1:" #insn "\n\t" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: movl $-1,%[err]\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : [err] "=r" (err), output \
+ : "0"(0), input); \
+ err; \
+})
+
#define kernel_insn(insn, output, input...) \
asm volatile("1:" #insn "\n\t" \
"2:\n" \
@@ -150,6 +164,14 @@ static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
}
+static inline int copy_kernel_to_fxregs_err(struct fxregs_state *fx)
+{
+ if (IS_ENABLED(CONFIG_X86_32))
+ return kernel_insn_err(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+ else
+ return kernel_insn_err(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
+}
+
static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
{
if (IS_ENABLED(CONFIG_X86_32))
@@ -163,6 +185,11 @@ static inline void copy_kernel_to_fregs(struct fregs_state *fx)
kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
}
+static inline int copy_kernel_to_fregs_err(struct fregs_state *fx)
+{
+ return kernel_insn_err(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+}
+
static inline int copy_user_to_fregs(struct fregs_state __user *fx)
{
return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
@@ -253,7 +280,7 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
WARN_ON(system_state != SYSTEM_BOOTING);
- if (static_cpu_has(X86_FEATURE_XSAVES))
+ if (boot_cpu_has(X86_FEATURE_XSAVES))
XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
else
XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
@@ -275,7 +302,7 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
WARN_ON(system_state != SYSTEM_BOOTING);
- if (static_cpu_has(X86_FEATURE_XSAVES))
+ if (boot_cpu_has(X86_FEATURE_XSAVES))
XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
else
XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
@@ -363,6 +390,21 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
}
/*
+ * Restore xstate from kernel space xsave area, return an error code instead of
+ * an exception.
+ */
+static inline int copy_kernel_to_xregs_err(struct xregs_state *xstate, u64 mask)
+{
+ u32 lmask = mask;
+ u32 hmask = mask >> 32;
+ int err;
+
+ XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
+
+ return err;
+}
+
+/*
* These must be called with preempt disabled. Returns
* 'true' if the FPU state is still intact and we can
* keep registers active.
@@ -487,6 +529,25 @@ static inline void fpregs_activate(struct fpu *fpu)
}
/*
+ * Internal helper, do not use directly. Use switch_fpu_return() instead.
+ */
+static inline void __fpregs_load_activate(void)
+{
+ struct fpu *fpu = &current->thread.fpu;
+ int cpu = smp_processor_id();
+
+ if (WARN_ON_ONCE(current->mm == NULL))
+ return;
+
+ if (!fpregs_state_valid(fpu, cpu)) {
+ copy_kernel_to_fpregs(&fpu->state);
+ fpregs_activate(fpu);
+ fpu->last_cpu = cpu;
+ }
+ clear_thread_flag(TIF_NEED_FPU_LOAD);
+}
+
+/*
* FPU state switching for scheduling.
*
* This is a two-stage process:
@@ -494,13 +555,23 @@ static inline void fpregs_activate(struct fpu *fpu)
* - switch_fpu_prepare() saves the old state.
* This is done within the context of the old process.
*
- * - switch_fpu_finish() restores the new state as
- * necessary.
+ * - switch_fpu_finish() sets TIF_NEED_FPU_LOAD; the floating point state
+ * will get loaded on return to userspace, or when the kernel needs it.
+ *
+ * If TIF_NEED_FPU_LOAD is cleared then the CPU's FPU registers
+ * are saved in the current thread's FPU register state.
+ *
+ * If TIF_NEED_FPU_LOAD is set then CPU's FPU registers may not
+ * hold current()'s FPU registers. It is required to load the
+ * registers before returning to userland or using the content
+ * otherwise.
+ *
+ * The FPU context is only stored/restored for a user task and
+ * ->mm is used to distinguish between kernel and user threads.
*/
-static inline void
-switch_fpu_prepare(struct fpu *old_fpu, int cpu)
+static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
{
- if (static_cpu_has(X86_FEATURE_FPU) && old_fpu->initialized) {
+ if (static_cpu_has(X86_FEATURE_FPU) && current->mm) {
if (!copy_fpregs_to_fpstate(old_fpu))
old_fpu->last_cpu = -1;
else
@@ -508,8 +579,7 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu)
/* But leave fpu_fpregs_owner_ctx! */
trace_x86_fpu_regs_deactivated(old_fpu);
- } else
- old_fpu->last_cpu = -1;
+ }
}
/*
@@ -517,36 +587,32 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu)
*/
/*
- * Set up the userspace FPU context for the new task, if the task
- * has used the FPU.
+ * Load PKRU from the FPU context if available. Delay loading of the
+ * complete FPU state until the return to userland.
*/
-static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
+static inline void switch_fpu_finish(struct fpu *new_fpu)
{
- bool preload = static_cpu_has(X86_FEATURE_FPU) &&
- new_fpu->initialized;
+ u32 pkru_val = init_pkru_value;
+ struct pkru_state *pk;
- if (preload) {
- if (!fpregs_state_valid(new_fpu, cpu))
- copy_kernel_to_fpregs(&new_fpu->state);
- fpregs_activate(new_fpu);
- }
-}
+ if (!static_cpu_has(X86_FEATURE_FPU))
+ return;
-/*
- * Needs to be preemption-safe.
- *
- * NOTE! user_fpu_begin() must be used only immediately before restoring
- * the save state. It does not do any saving/restoring on its own. In
- * lazy FPU mode, it is just an optimization to avoid a #NM exception,
- * the task can lose the FPU right after preempt_enable().
- */
-static inline void user_fpu_begin(void)
-{
- struct fpu *fpu = &current->thread.fpu;
+ set_thread_flag(TIF_NEED_FPU_LOAD);
+
+ if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
+ return;
- preempt_disable();
- fpregs_activate(fpu);
- preempt_enable();
+ /*
+ * PKRU state is switched eagerly because it needs to be valid before we
+ * return to userland e.g. for a copy_to_user() operation.
+ */
+ if (current->mm) {
+ pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
+ if (pk)
+ pkru_val = pk->pkru;
+ }
+ __write_pkru(pkru_val);
}
/*
diff --git a/arch/x86/include/asm/fpu/signal.h b/arch/x86/include/asm/fpu/signal.h
index 44bbc39a57b3..7fb516b6893a 100644
--- a/arch/x86/include/asm/fpu/signal.h
+++ b/arch/x86/include/asm/fpu/signal.h
@@ -22,7 +22,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
struct task_struct *tsk);
-extern void convert_to_fxsr(struct task_struct *tsk,
+extern void convert_to_fxsr(struct fxregs_state *fxsave,
const struct user_i387_ia32_struct *env);
unsigned long
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 2e32e178e064..f098f6cab94b 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -294,15 +294,6 @@ struct fpu {
unsigned int last_cpu;
/*
- * @initialized:
- *
- * This flag indicates whether this context is initialized: if the task
- * is not running then we can restore from this context, if the task
- * is running then we should save into this context.
- */
- unsigned char initialized;
-
- /*
* @avx512_timestamp:
*
* Records the timestamp of AVX512 use during last context switch.
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
index 48581988d78c..7e42b285c856 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -2,9 +2,11 @@
#ifndef __ASM_X86_XSAVE_H
#define __ASM_X86_XSAVE_H
+#include <linux/uaccess.h>
#include <linux/types.h>
+
#include <asm/processor.h>
-#include <linux/uaccess.h>
+#include <asm/user.h>
/* Bit 63 of XCR0 is reserved for future expansion */
#define XFEATURE_MASK_EXTEND (~(XFEATURE_MASK_FPSSE | (1ULL << 63)))
@@ -46,8 +48,8 @@ extern void __init update_regset_xstate_info(unsigned int size,
u64 xstate_mask);
void fpu__xstate_clear_all_cpu_caps(void);
-void *get_xsave_addr(struct xregs_state *xsave, int xstate);
-const void *get_xsave_field_ptr(int xstate_field);
+void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr);
+const void *get_xsave_field_ptr(int xfeature_nr);
int using_compacted_format(void);
int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset, unsigned int size);
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index cf350639e76d..287f1f7b2e52 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -3,12 +3,10 @@
#define _ASM_X86_FTRACE_H
#ifdef CONFIG_FUNCTION_TRACER
-#ifdef CC_USING_FENTRY
-# define MCOUNT_ADDR ((unsigned long)(__fentry__))
-#else
-# define MCOUNT_ADDR ((unsigned long)(mcount))
-# define HAVE_FUNCTION_GRAPH_FP_TEST
+#ifndef CC_USING_FENTRY
+# error Compiler does not support fentry?
#endif
+# define MCOUNT_ADDR ((unsigned long)(__fentry__))
#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
index 7469d321f072..f65cfb48cfdd 100644
--- a/arch/x86/include/asm/hugetlb.h
+++ b/arch/x86/include/asm/hugetlb.h
@@ -17,8 +17,4 @@ static inline void arch_clear_hugepage_flags(struct page *page)
{
}
-#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
-static inline bool gigantic_page_supported(void) { return true; }
-#endif
-
#endif /* _ASM_X86_HUGETLB_H */
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index 2bdbbbcfa393..cdf44aa9a501 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file contains definitions from Hyper-V Hypervisor Top-Level Functional
diff --git a/arch/x86/include/asm/intel_ds.h b/arch/x86/include/asm/intel_ds.h
index ae26df1c2789..8380c3ddd4b2 100644
--- a/arch/x86/include/asm/intel_ds.h
+++ b/arch/x86/include/asm/intel_ds.h
@@ -8,7 +8,7 @@
/* The maximal number of PEBS events: */
#define MAX_PEBS_EVENTS 8
-#define MAX_FIXED_PEBS_EVENTS 3
+#define MAX_FIXED_PEBS_EVENTS 4
/*
* A debug store configuration.
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 686247db3106..a06a9f8294ea 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -90,8 +90,6 @@ build_mmio_write(__writel, "l", unsigned int, "r", )
#define __raw_writew __writew
#define __raw_writel __writel
-#define mmiowb() barrier()
-
#ifdef CONFIG_X86_64
build_mmio_read(readq, "q", u64, "=r", :"memory")
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index fbb16e6b6c18..8f95686ec27e 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -16,11 +16,7 @@ static inline int irq_canonicalize(int irq)
return ((irq == 2) ? 9 : irq);
}
-#ifdef CONFIG_X86_32
-extern void irq_ctx_init(int cpu);
-#else
-# define irq_ctx_init(cpu) do { } while (0)
-#endif
+extern int irq_init_percpu_irqstack(unsigned int cpu);
#define __ARCH_HAS_DO_SOFTIRQ
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 548d90bbf919..889f8b1b5b7f 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -18,8 +18,8 @@
* Vectors 0 ... 31 : system traps and exceptions - hardcoded events
* Vectors 32 ... 127 : device interrupts
* Vector 128 : legacy int80 syscall interface
- * Vectors 129 ... INVALIDATE_TLB_VECTOR_START-1 except 204 : device interrupts
- * Vectors INVALIDATE_TLB_VECTOR_START ... 255 : special interrupts
+ * Vectors 129 ... LOCAL_TIMER_VECTOR-1
+ * Vectors LOCAL_TIMER_VECTOR ... 255 : special interrupts
*
* 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
*
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index 058e40fed167..8a0e56e1dcc9 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -6,6 +6,8 @@
#ifndef __ASSEMBLY__
+#include <asm/nospec-branch.h>
+
/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
#define __cpuidle __attribute__((__section__(".cpuidle.text")))
@@ -54,11 +56,13 @@ static inline void native_irq_enable(void)
static inline __cpuidle void native_safe_halt(void)
{
+ mds_idle_clear_cpu_buffers();
asm volatile("sti; hlt": : :"memory");
}
static inline __cpuidle void native_halt(void)
{
+ mds_idle_clear_cpu_buffers();
asm volatile("hlt": : :"memory");
}
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index a9d03af34030..450d69a1e6fa 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -295,6 +295,7 @@ union kvm_mmu_extended_role {
unsigned int valid:1;
unsigned int execonly:1;
unsigned int cr0_pg:1;
+ unsigned int cr4_pae:1;
unsigned int cr4_pse:1;
unsigned int cr4_pke:1;
unsigned int cr4_smap:1;
@@ -469,6 +470,7 @@ struct kvm_pmu {
u64 global_ovf_ctrl;
u64 counter_bitmask[2];
u64 global_ctrl_mask;
+ u64 global_ovf_ctrl_mask;
u64 reserved_bits;
u8 version;
struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
@@ -780,6 +782,9 @@ struct kvm_vcpu_arch {
/* Flush the L1 Data cache for L1TF mitigation on VMENTER */
bool l1tf_flush_l1d;
+
+ /* AMD MSRC001_0015 Hardware Configuration */
+ u64 msr_hwcr;
};
struct kvm_lpage_info {
@@ -1167,7 +1172,8 @@ struct kvm_x86_ops {
uint32_t guest_irq, bool set);
void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
- int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc);
+ int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
+ bool *expired);
void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
void (*setup_mce)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h
index ed80003ce3e2..a66f6706c2de 100644
--- a/arch/x86/include/asm/livepatch.h
+++ b/arch/x86/include/asm/livepatch.h
@@ -24,14 +24,6 @@
#include <asm/setup.h>
#include <linux/ftrace.h>
-static inline int klp_check_compiler_support(void)
-{
-#ifndef CC_USING_FENTRY
- return 1;
-#endif
- return 0;
-}
-
static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
regs->ip = ip;
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 22d05e3835f0..dc2d4b206ab7 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -210,16 +210,6 @@ static inline void cmci_rediscover(void) {}
static inline void cmci_recheck(void) {}
#endif
-#ifdef CONFIG_X86_MCE_AMD
-void mce_amd_feature_init(struct cpuinfo_x86 *c);
-int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr);
-#else
-static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
-static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) { return -EINVAL; };
-#endif
-
-static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_amd_feature_init(c); }
-
int mce_available(struct cpuinfo_x86 *c);
bool mce_is_memory_error(struct mce *m);
bool mce_is_correctable(struct mce *m);
@@ -345,12 +335,19 @@ extern bool amd_mce_is_memory_error(struct mce *m);
extern int mce_threshold_create_device(unsigned int cpu);
extern int mce_threshold_remove_device(unsigned int cpu);
-#else
+void mce_amd_feature_init(struct cpuinfo_x86 *c);
+int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr);
-static inline int mce_threshold_create_device(unsigned int cpu) { return 0; };
-static inline int mce_threshold_remove_device(unsigned int cpu) { return 0; };
-static inline bool amd_mce_is_memory_error(struct mce *m) { return false; };
+#else
+static inline int mce_threshold_create_device(unsigned int cpu) { return 0; };
+static inline int mce_threshold_remove_device(unsigned int cpu) { return 0; };
+static inline bool amd_mce_is_memory_error(struct mce *m) { return false; };
+static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
+static inline int
+umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) { return -EINVAL; };
#endif
+static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_amd_feature_init(c); }
+
#endif /* _ASM_X86_MCE_H */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 19d18fae6ec6..9024236693d2 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -13,6 +13,7 @@
#include <asm/tlbflush.h>
#include <asm/paravirt.h>
#include <asm/mpx.h>
+#include <asm/debugreg.h>
extern atomic64_t last_mm_ctx_id;
@@ -277,8 +278,8 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
mpx_mm_init(mm);
}
-static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+static inline void arch_unmap(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
{
/*
* mpx_notify_unmap() goes and reads a rarely-hot
@@ -298,7 +299,7 @@ static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
* consistently wrong.
*/
if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
- mpx_notify_unmap(mm, vma, start, end);
+ mpx_notify_unmap(mm, start, end);
}
/*
@@ -356,4 +357,59 @@ static inline unsigned long __get_current_cr3_fast(void)
return cr3;
}
+typedef struct {
+ struct mm_struct *mm;
+} temp_mm_state_t;
+
+/*
+ * Using a temporary mm allows to set temporary mappings that are not accessible
+ * by other CPUs. Such mappings are needed to perform sensitive memory writes
+ * that override the kernel memory protections (e.g., W^X), without exposing the
+ * temporary page-table mappings that are required for these write operations to
+ * other CPUs. Using a temporary mm also allows to avoid TLB shootdowns when the
+ * mapping is torn down.
+ *
+ * Context: The temporary mm needs to be used exclusively by a single core. To
+ * harden security IRQs must be disabled while the temporary mm is
+ * loaded, thereby preventing interrupt handler bugs from overriding
+ * the kernel memory protection.
+ */
+static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
+{
+ temp_mm_state_t temp_state;
+
+ lockdep_assert_irqs_disabled();
+ temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
+ switch_mm_irqs_off(NULL, mm, current);
+
+ /*
+ * If breakpoints are enabled, disable them while the temporary mm is
+ * used. Userspace might set up watchpoints on addresses that are used
+ * in the temporary mm, which would lead to wrong signals being sent or
+ * crashes.
+ *
+ * Note that breakpoints are not disabled selectively, which also causes
+ * kernel breakpoints (e.g., perf's) to be disabled. This might be
+ * undesirable, but still seems reasonable as the code that runs in the
+ * temporary mm should be short.
+ */
+ if (hw_breakpoint_active())
+ hw_breakpoint_disable();
+
+ return temp_state;
+}
+
+static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
+{
+ lockdep_assert_irqs_disabled();
+ switch_mm_irqs_off(NULL, prev_state.mm, current);
+
+ /*
+ * Restore the breakpoints if they were disabled before the temporary mm
+ * was loaded.
+ */
+ if (hw_breakpoint_active())
+ hw_breakpoint_restore();
+}
+
#endif /* _ASM_X86_MMU_CONTEXT_H */
diff --git a/arch/x86/include/asm/mpx.h b/arch/x86/include/asm/mpx.h
index d0b1434fb0b6..143a5c193ed3 100644
--- a/arch/x86/include/asm/mpx.h
+++ b/arch/x86/include/asm/mpx.h
@@ -64,12 +64,15 @@ struct mpx_fault_info {
};
#ifdef CONFIG_X86_INTEL_MPX
-int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs);
-int mpx_handle_bd_fault(void);
+
+extern int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs);
+extern int mpx_handle_bd_fault(void);
+
static inline int kernel_managing_mpx_tables(struct mm_struct *mm)
{
return (mm->context.bd_addr != MPX_INVALID_BOUNDS_DIR);
}
+
static inline void mpx_mm_init(struct mm_struct *mm)
{
/*
@@ -78,11 +81,10 @@ static inline void mpx_mm_init(struct mm_struct *mm)
*/
mm->context.bd_addr = MPX_INVALID_BOUNDS_DIR;
}
-void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long start, unsigned long end);
-unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len,
- unsigned long flags);
+extern void mpx_notify_unmap(struct mm_struct *mm, unsigned long start, unsigned long end);
+extern unsigned long mpx_unmapped_area_check(unsigned long addr, unsigned long len, unsigned long flags);
+
#else
static inline int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs)
{
@@ -100,7 +102,6 @@ static inline void mpx_mm_init(struct mm_struct *mm)
{
}
static inline void mpx_notify_unmap(struct mm_struct *mm,
- struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
}
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index ca5bc0eacb95..979ef971cc78 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -2,6 +2,8 @@
#ifndef _ASM_X86_MSR_INDEX_H
#define _ASM_X86_MSR_INDEX_H
+#include <linux/bits.h>
+
/*
* CPU model specific register (MSR) numbers.
*
@@ -40,14 +42,14 @@
/* Intel MSRs. Some also available on other CPUs */
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
-#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
+#define SPEC_CTRL_IBRS BIT(0) /* Indirect Branch Restricted Speculation */
#define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */
-#define SPEC_CTRL_STIBP (1 << SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
+#define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
-#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
+#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
-#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
+#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
#define MSR_PPIN_CTL 0x0000004e
#define MSR_PPIN 0x0000004f
@@ -69,20 +71,25 @@
#define MSR_MTRRcap 0x000000fe
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
-#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
-#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
-#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH (1 << 3) /* Skip L1D flush on vmentry */
-#define ARCH_CAP_SSB_NO (1 << 4) /*
- * Not susceptible to Speculative Store Bypass
- * attack, so no Speculative Store Bypass
- * control required.
- */
+#define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */
+#define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */
+#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */
+#define ARCH_CAP_SSB_NO BIT(4) /*
+ * Not susceptible to Speculative Store Bypass
+ * attack, so no Speculative Store Bypass
+ * control required.
+ */
+#define ARCH_CAP_MDS_NO BIT(5) /*
+ * Not susceptible to
+ * Microarchitectural Data
+ * Sampling (MDS) vulnerabilities.
+ */
#define MSR_IA32_FLUSH_CMD 0x0000010b
-#define L1D_FLUSH (1 << 0) /*
- * Writeback and invalidate the
- * L1 data cache.
- */
+#define L1D_FLUSH BIT(0) /*
+ * Writeback and invalidate the
+ * L1 data cache.
+ */
#define MSR_IA32_BBL_CR_CTL 0x00000119
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
@@ -116,6 +123,7 @@
#define LBR_INFO_CYCLES 0xffff
#define MSR_IA32_PEBS_ENABLE 0x000003f1
+#define MSR_PEBS_DATA_CFG 0x000003f2
#define MSR_IA32_DS_AREA 0x00000600
#define MSR_IA32_PERF_CAPABILITIES 0x00000345
#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6
@@ -781,6 +789,14 @@
#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
+/* PERF_GLOBAL_OVF_CTL bits */
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT 55
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI (1ULL << MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT)
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF_BIT 62
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF (1ULL << MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF_BIT)
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD_BIT 63
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD (1ULL << MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD_BIT)
+
/* Geode defined MSRs */
#define MSR_GEODE_BUSCONT_CONF0 0x00001900
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index 39a2fb29378a..eb0f80ce8524 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -6,6 +6,7 @@
#include <linux/sched/idle.h>
#include <asm/cpufeature.h>
+#include <asm/nospec-branch.h>
#define MWAIT_SUBSTATE_MASK 0xf
#define MWAIT_CSTATE_MASK 0xf
@@ -40,6 +41,8 @@ static inline void __monitorx(const void *eax, unsigned long ecx,
static inline void __mwait(unsigned long eax, unsigned long ecx)
{
+ mds_idle_clear_cpu_buffers();
+
/* "mwait %eax, %ecx;" */
asm volatile(".byte 0x0f, 0x01, 0xc9;"
:: "a" (eax), "c" (ecx));
@@ -74,6 +77,8 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
static inline void __mwaitx(unsigned long eax, unsigned long ebx,
unsigned long ecx)
{
+ /* No MDS buffer clear as this is AMD/HYGON only */
+
/* "mwaitx %eax, %ebx, %ecx;" */
asm volatile(".byte 0x0f, 0x01, 0xfb;"
:: "a" (eax), "b" (ebx), "c" (ecx));
@@ -81,6 +86,8 @@ static inline void __mwaitx(unsigned long eax, unsigned long ebx,
static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
{
+ mds_idle_clear_cpu_buffers();
+
trace_hardirqs_on();
/* "mwait %eax, %ecx;" */
asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index dad12b767ba0..109f974f9835 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -11,6 +11,15 @@
#include <asm/msr-index.h>
/*
+ * This should be used immediately before a retpoline alternative. It tells
+ * objtool where the retpolines are so that it can make sense of the control
+ * flow by just reading the original instruction(s) and ignoring the
+ * alternatives.
+ */
+#define ANNOTATE_NOSPEC_ALTERNATIVE \
+ ANNOTATE_IGNORE_ALTERNATIVE
+
+/*
* Fill the CPU return stack buffer.
*
* Each entry in the RSB, if used for a speculative 'ret', contains an
@@ -57,19 +66,6 @@
#ifdef __ASSEMBLY__
/*
- * This should be used immediately before a retpoline alternative. It tells
- * objtool where the retpolines are so that it can make sense of the control
- * flow by just reading the original instruction(s) and ignoring the
- * alternatives.
- */
-.macro ANNOTATE_NOSPEC_ALTERNATIVE
- .Lannotate_\@:
- .pushsection .discard.nospec
- .long .Lannotate_\@ - .
- .popsection
-.endm
-
-/*
* This should be used immediately before an indirect jump/call. It tells
* objtool the subsequent indirect jump/call is vouched safe for retpoline
* builds.
@@ -152,12 +148,6 @@
#else /* __ASSEMBLY__ */
-#define ANNOTATE_NOSPEC_ALTERNATIVE \
- "999:\n\t" \
- ".pushsection .discard.nospec\n\t" \
- ".long 999b - .\n\t" \
- ".popsection\n\t"
-
#define ANNOTATE_RETPOLINE_SAFE \
"999:\n\t" \
".pushsection .discard.retpoline_safe\n\t" \
@@ -318,6 +308,56 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+DECLARE_STATIC_KEY_FALSE(mds_user_clear);
+DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
+
+#include <asm/segment.h>
+
+/**
+ * mds_clear_cpu_buffers - Mitigation for MDS vulnerability
+ *
+ * This uses the otherwise unused and obsolete VERW instruction in
+ * combination with microcode which triggers a CPU buffer flush when the
+ * instruction is executed.
+ */
+static inline void mds_clear_cpu_buffers(void)
+{
+ static const u16 ds = __KERNEL_DS;
+
+ /*
+ * Has to be the memory-operand variant because only that
+ * guarantees the CPU buffer flush functionality according to
+ * documentation. The register-operand variant does not.
+ * Works with any segment selector, but a valid writable
+ * data segment is the fastest variant.
+ *
+ * "cc" clobber is required because VERW modifies ZF.
+ */
+ asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
+}
+
+/**
+ * mds_user_clear_cpu_buffers - Mitigation for MDS vulnerability
+ *
+ * Clear CPU buffers if the corresponding static key is enabled
+ */
+static inline void mds_user_clear_cpu_buffers(void)
+{
+ if (static_branch_likely(&mds_user_clear))
+ mds_clear_cpu_buffers();
+}
+
+/**
+ * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
+ *
+ * Clear CPU buffers if the corresponding static key is enabled
+ */
+static inline void mds_idle_clear_cpu_buffers(void)
+{
+ if (static_branch_likely(&mds_idle_clear))
+ mds_clear_cpu_buffers();
+}
+
#endif /* __ASSEMBLY__ */
/*
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index 0d5c739eebd7..565ad755c785 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -22,11 +22,9 @@
#define THREAD_SIZE_ORDER 1
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
-#define DOUBLEFAULT_STACK 1
-#define NMI_STACK 0
-#define DEBUG_STACK 0
-#define MCE_STACK 0
-#define N_EXCEPTION_STACKS 1
+#define IRQ_STACK_SIZE THREAD_SIZE
+
+#define N_EXCEPTION_STACKS 1
#ifdef CONFIG_X86_PAE
/*
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 8f657286d599..793c14c372cb 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -14,22 +14,20 @@
#define THREAD_SIZE_ORDER (2 + KASAN_STACK_ORDER)
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
-#define CURRENT_MASK (~(THREAD_SIZE - 1))
#define EXCEPTION_STACK_ORDER (0 + KASAN_STACK_ORDER)
#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
-#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
-#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
-
#define IRQ_STACK_ORDER (2 + KASAN_STACK_ORDER)
#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
-#define DOUBLEFAULT_STACK 1
-#define NMI_STACK 2
-#define DEBUG_STACK 3
-#define MCE_STACK 4
-#define N_EXCEPTION_STACKS 4 /* hw limit: 7 */
+/*
+ * The index for the tss.ist[] array. The hardware limit is 7 entries.
+ */
+#define IST_INDEX_DF 0
+#define IST_INDEX_NMI 1
+#define IST_INDEX_DB 2
+#define IST_INDEX_MCE 3
/*
* Set __PAGE_OFFSET to the most negative possible address +
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 8bdf74902293..1392d5e6e8d6 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -7,7 +7,7 @@
*/
#define INTEL_PMC_MAX_GENERIC 32
-#define INTEL_PMC_MAX_FIXED 3
+#define INTEL_PMC_MAX_FIXED 4
#define INTEL_PMC_IDX_FIXED 32
#define X86_PMC_IDX_MAX 64
@@ -32,6 +32,8 @@
#define HSW_IN_TX (1ULL << 32)
#define HSW_IN_TX_CHECKPOINTED (1ULL << 33)
+#define ICL_EVENTSEL_ADAPTIVE (1ULL << 34)
+#define ICL_FIXED_0_ADAPTIVE (1ULL << 32)
#define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
#define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
@@ -87,6 +89,12 @@
#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
#define ARCH_PERFMON_EVENTS_COUNT 7
+#define PEBS_DATACFG_MEMINFO BIT_ULL(0)
+#define PEBS_DATACFG_GP BIT_ULL(1)
+#define PEBS_DATACFG_XMMS BIT_ULL(2)
+#define PEBS_DATACFG_LBRS BIT_ULL(3)
+#define PEBS_DATACFG_LBR_SHIFT 24
+
/*
* Intel "Architectural Performance Monitoring" CPUID
* detection/enumeration details:
@@ -177,6 +185,41 @@ struct x86_pmu_capability {
#define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(55)
/*
+ * Adaptive PEBS v4
+ */
+
+struct pebs_basic {
+ u64 format_size;
+ u64 ip;
+ u64 applicable_counters;
+ u64 tsc;
+};
+
+struct pebs_meminfo {
+ u64 address;
+ u64 aux;
+ u64 latency;
+ u64 tsx_tuning;
+};
+
+struct pebs_gprs {
+ u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di;
+ u64 r8, r9, r10, r11, r12, r13, r14, r15;
+};
+
+struct pebs_xmm {
+ u64 xmm[16*2]; /* two entries for each register */
+};
+
+struct pebs_lbr_entry {
+ u64 from, to, info;
+};
+
+struct pebs_lbr {
+ struct pebs_lbr_entry lbr[0]; /* Variable length */
+};
+
+/*
* IBS cpuid feature detection
*/
@@ -248,6 +291,11 @@ extern void perf_events_lapic_init(void);
#define PERF_EFLAGS_VM (1UL << 5)
struct pt_regs;
+struct x86_perf_regs {
+ struct pt_regs regs;
+ u64 *xmm_regs;
+};
+
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_misc_flags(regs) perf_misc_flags(regs)
@@ -260,14 +308,9 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
*/
#define perf_arch_fetch_caller_regs(regs, __ip) { \
(regs)->ip = (__ip); \
- (regs)->bp = caller_frame_pointer(); \
+ (regs)->sp = (unsigned long)__builtin_frame_address(0); \
(regs)->cs = __KERNEL_CS; \
regs->flags = 0; \
- asm volatile( \
- _ASM_MOV "%%"_ASM_SP ", %0\n" \
- : "=m" ((regs)->sp) \
- :: "memory" \
- ); \
}
struct perf_guest_switch_msr {
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 2779ace16d23..5e0509b41986 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -23,6 +23,8 @@
#ifndef __ASSEMBLY__
#include <asm/x86_init.h>
+#include <asm/fpu/xstate.h>
+#include <asm/fpu/api.h>
extern pgd_t early_top_pgt[PTRS_PER_PGD];
int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
@@ -46,7 +48,7 @@ void ptdump_walk_user_pgd_level_checkwx(void);
*/
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
__visible;
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+#define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
extern spinlock_t pgd_lock;
extern struct list_head pgd_list;
@@ -127,14 +129,29 @@ static inline int pte_dirty(pte_t pte)
static inline u32 read_pkru(void)
{
if (boot_cpu_has(X86_FEATURE_OSPKE))
- return __read_pkru();
+ return rdpkru();
return 0;
}
static inline void write_pkru(u32 pkru)
{
- if (boot_cpu_has(X86_FEATURE_OSPKE))
- __write_pkru(pkru);
+ struct pkru_state *pk;
+
+ if (!boot_cpu_has(X86_FEATURE_OSPKE))
+ return;
+
+ pk = get_xsave_addr(&current->thread.fpu.state.xsave, XFEATURE_PKRU);
+
+ /*
+ * The PKRU value in xstate needs to be in sync with the value that is
+ * written to the CPU. The FPU restore on return to userland would
+ * otherwise load the previous value again.
+ */
+ fpregs_lock();
+ if (pk)
+ pk->pkru = pkru;
+ __write_pkru(pkru);
+ fpregs_unlock();
}
static inline int pte_young(pte_t pte)
@@ -1021,6 +1038,9 @@ static inline void __meminit init_trampoline_default(void)
/* Default trampoline pgd value */
trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
}
+
+void __init poking_init(void);
+
# ifdef CONFIG_RANDOMIZE_MEMORY
void __meminit init_trampoline(void);
# else
@@ -1355,6 +1375,12 @@ static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
#define PKRU_WD_BIT 0x2
#define PKRU_BITS_PER_PKEY 2
+#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+extern u32 init_pkru_value;
+#else
+#define init_pkru_value 0
+#endif
+
static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
{
int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 2bb3a648fc12..c34a35c78618 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -367,6 +367,13 @@ DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
#define __KERNEL_TSS_LIMIT \
(IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1)
+/* Per CPU interrupt stacks */
+struct irq_stack {
+ char stack[IRQ_STACK_SIZE];
+} __aligned(IRQ_STACK_SIZE);
+
+DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
+
#ifdef CONFIG_X86_32
DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
#else
@@ -374,38 +381,25 @@ DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1
#endif
-/*
- * Save the original ist values for checking stack pointers during debugging
- */
-struct orig_ist {
- unsigned long ist[7];
-};
-
#ifdef CONFIG_X86_64
-DECLARE_PER_CPU(struct orig_ist, orig_ist);
-
-union irq_stack_union {
- char irq_stack[IRQ_STACK_SIZE];
+struct fixed_percpu_data {
/*
* GCC hardcodes the stack canary as %gs:40. Since the
* irq_stack is the object at %gs:0, we reserve the bottom
* 48 bytes of the irq stack for the canary.
*/
- struct {
- char gs_base[40];
- unsigned long stack_canary;
- };
+ char gs_base[40];
+ unsigned long stack_canary;
};
-DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
-DECLARE_INIT_PER_CPU(irq_stack_union);
+DECLARE_PER_CPU_FIRST(struct fixed_percpu_data, fixed_percpu_data) __visible;
+DECLARE_INIT_PER_CPU(fixed_percpu_data);
static inline unsigned long cpu_kernelmode_gs_base(int cpu)
{
- return (unsigned long)per_cpu(irq_stack_union.gs_base, cpu);
+ return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
}
-DECLARE_PER_CPU(char *, irq_stack_ptr);
DECLARE_PER_CPU(unsigned int, irq_count);
extern asmlinkage void ignore_sysret(void);
@@ -427,15 +421,8 @@ struct stack_canary {
};
DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
#endif
-/*
- * per-CPU IRQ handling stacks
- */
-struct irq_stack {
- u32 stack[THREAD_SIZE/sizeof(u32)];
-} __aligned(THREAD_SIZE);
-
-DECLARE_PER_CPU(struct irq_stack *, hardirq_stack);
-DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
+/* Per CPU softirq stack pointer */
+DECLARE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
#endif /* X86_64 */
extern unsigned int fpu_kernel_xstate_size;
@@ -991,4 +978,10 @@ enum l1tf_mitigations {
extern enum l1tf_mitigations l1tf_mitigation;
+enum mds_mitigations {
+ MDS_MITIGATION_OFF,
+ MDS_MITIGATION_FULL,
+ MDS_MITIGATION_VMWERV,
+};
+
#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
deleted file mode 100644
index 4c25cf6caefa..000000000000
--- a/arch/x86/include/asm/rwsem.h
+++ /dev/null
@@ -1,237 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+
- *
- * Written by David Howells (dhowells@redhat.com).
- *
- * Derived from asm-x86/semaphore.h
- *
- *
- * The MSW of the count is the negated number of active writers and waiting
- * lockers, and the LSW is the total number of active locks
- *
- * The lock count is initialized to 0 (no active and no waiting lockers).
- *
- * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
- * uncontended lock. This can be determined because XADD returns the old value.
- * Readers increment by 1 and see a positive value when uncontended, negative
- * if there are writers (and maybe) readers waiting (in which case it goes to
- * sleep).
- *
- * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
- * be extended to 65534 by manually checking the whole MSW rather than relying
- * on the S flag.
- *
- * The value of ACTIVE_BIAS supports up to 65535 active processes.
- *
- * This should be totally fair - if anything is waiting, a process that wants a
- * lock will go to the back of the queue. When the currently active lock is
- * released, if there's a writer at the front of the queue, then that and only
- * that will be woken up; if there's a bunch of consecutive readers at the
- * front, then they'll all be woken up, but no other readers will be.
- */
-
-#ifndef _ASM_X86_RWSEM_H
-#define _ASM_X86_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
-#endif
-
-#ifdef __KERNEL__
-#include <asm/asm.h>
-
-/*
- * The bias values and the counter type limits the number of
- * potential readers/writers to 32767 for 32 bits and 2147483647
- * for 64 bits.
- */
-
-#ifdef CONFIG_X86_64
-# define RWSEM_ACTIVE_MASK 0xffffffffL
-#else
-# define RWSEM_ACTIVE_MASK 0x0000ffffL
-#endif
-
-#define RWSEM_UNLOCKED_VALUE 0x00000000L
-#define RWSEM_ACTIVE_BIAS 0x00000001L
-#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
-#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-#define ____down_read(sem, slow_path) \
-({ \
- struct rw_semaphore* ret; \
- asm volatile("# beginning down_read\n\t" \
- LOCK_PREFIX _ASM_INC "(%[sem])\n\t" \
- /* adds 0x00000001 */ \
- " jns 1f\n" \
- " call " slow_path "\n" \
- "1:\n\t" \
- "# ending down_read\n\t" \
- : "+m" (sem->count), "=a" (ret), \
- ASM_CALL_CONSTRAINT \
- : [sem] "a" (sem) \
- : "memory", "cc"); \
- ret; \
-})
-
-static inline void __down_read(struct rw_semaphore *sem)
-{
- ____down_read(sem, "call_rwsem_down_read_failed");
-}
-
-static inline int __down_read_killable(struct rw_semaphore *sem)
-{
- if (IS_ERR(____down_read(sem, "call_rwsem_down_read_failed_killable")))
- return -EINTR;
- return 0;
-}
-
-/*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
-static inline bool __down_read_trylock(struct rw_semaphore *sem)
-{
- long result, tmp;
- asm volatile("# beginning __down_read_trylock\n\t"
- " mov %[count],%[result]\n\t"
- "1:\n\t"
- " mov %[result],%[tmp]\n\t"
- " add %[inc],%[tmp]\n\t"
- " jle 2f\n\t"
- LOCK_PREFIX " cmpxchg %[tmp],%[count]\n\t"
- " jnz 1b\n\t"
- "2:\n\t"
- "# ending __down_read_trylock\n\t"
- : [count] "+m" (sem->count), [result] "=&a" (result),
- [tmp] "=&r" (tmp)
- : [inc] "i" (RWSEM_ACTIVE_READ_BIAS)
- : "memory", "cc");
- return result >= 0;
-}
-
-/*
- * lock for writing
- */
-#define ____down_write(sem, slow_path) \
-({ \
- long tmp; \
- struct rw_semaphore* ret; \
- \
- asm volatile("# beginning down_write\n\t" \
- LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t" \
- /* adds 0xffff0001, returns the old value */ \
- " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
- /* was the active mask 0 before? */\
- " jz 1f\n" \
- " call " slow_path "\n" \
- "1:\n" \
- "# ending down_write" \
- : "+m" (sem->count), [tmp] "=d" (tmp), \
- "=a" (ret), ASM_CALL_CONSTRAINT \
- : [sem] "a" (sem), "[tmp]" (RWSEM_ACTIVE_WRITE_BIAS) \
- : "memory", "cc"); \
- ret; \
-})
-
-static inline void __down_write(struct rw_semaphore *sem)
-{
- ____down_write(sem, "call_rwsem_down_write_failed");
-}
-
-static inline int __down_write_killable(struct rw_semaphore *sem)
-{
- if (IS_ERR(____down_write(sem, "call_rwsem_down_write_failed_killable")))
- return -EINTR;
-
- return 0;
-}
-
-/*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
-static inline bool __down_write_trylock(struct rw_semaphore *sem)
-{
- bool result;
- long tmp0, tmp1;
- asm volatile("# beginning __down_write_trylock\n\t"
- " mov %[count],%[tmp0]\n\t"
- "1:\n\t"
- " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
- /* was the active mask 0 before? */
- " jnz 2f\n\t"
- " mov %[tmp0],%[tmp1]\n\t"
- " add %[inc],%[tmp1]\n\t"
- LOCK_PREFIX " cmpxchg %[tmp1],%[count]\n\t"
- " jnz 1b\n\t"
- "2:\n\t"
- CC_SET(e)
- "# ending __down_write_trylock\n\t"
- : [count] "+m" (sem->count), [tmp0] "=&a" (tmp0),
- [tmp1] "=&r" (tmp1), CC_OUT(e) (result)
- : [inc] "er" (RWSEM_ACTIVE_WRITE_BIAS)
- : "memory");
- return result;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
- long tmp;
- asm volatile("# beginning __up_read\n\t"
- LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t"
- /* subtracts 1, returns the old value */
- " jns 1f\n\t"
- " call call_rwsem_wake\n" /* expects old value in %edx */
- "1:\n"
- "# ending __up_read\n"
- : "+m" (sem->count), [tmp] "=d" (tmp)
- : [sem] "a" (sem), "[tmp]" (-RWSEM_ACTIVE_READ_BIAS)
- : "memory", "cc");
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
- long tmp;
- asm volatile("# beginning __up_write\n\t"
- LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t"
- /* subtracts 0xffff0001, returns the old value */
- " jns 1f\n\t"
- " call call_rwsem_wake\n" /* expects old value in %edx */
- "1:\n\t"
- "# ending __up_write\n"
- : "+m" (sem->count), [tmp] "=d" (tmp)
- : [sem] "a" (sem), "[tmp]" (-RWSEM_ACTIVE_WRITE_BIAS)
- : "memory", "cc");
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
- asm volatile("# beginning __downgrade_write\n\t"
- LOCK_PREFIX _ASM_ADD "%[inc],(%[sem])\n\t"
- /*
- * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
- * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
- */
- " jns 1f\n\t"
- " call call_rwsem_downgrade_wake\n"
- "1:\n\t"
- "# ending __downgrade_write\n"
- : "+m" (sem->count)
- : [sem] "a" (sem), [inc] "er" (-RWSEM_WAITING_BIAS)
- : "memory", "cc");
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_X86_RWSEM_H */
diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
index 07a25753e85c..ae7b909dc242 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -85,6 +85,9 @@ int set_pages_nx(struct page *page, int numpages);
int set_pages_ro(struct page *page, int numpages);
int set_pages_rw(struct page *page, int numpages);
+int set_direct_map_invalid_noflush(struct page *page);
+int set_direct_map_default_noflush(struct page *page);
+
extern int kernel_set_to_readonly;
void set_kernel_text_rw(void);
void set_kernel_text_ro(void);
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
index db333300bd4b..f94a7d0ddd49 100644
--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -13,13 +13,12 @@
#ifndef _ASM_X86_SMAP_H
#define _ASM_X86_SMAP_H
-#include <linux/stringify.h>
#include <asm/nops.h>
#include <asm/cpufeatures.h>
/* "Raw" instruction opcodes */
-#define __ASM_CLAC .byte 0x0f,0x01,0xca
-#define __ASM_STAC .byte 0x0f,0x01,0xcb
+#define __ASM_CLAC ".byte 0x0f,0x01,0xca"
+#define __ASM_STAC ".byte 0x0f,0x01,0xcb"
#ifdef __ASSEMBLY__
@@ -28,10 +27,10 @@
#ifdef CONFIG_X86_SMAP
#define ASM_CLAC \
- ALTERNATIVE "", __stringify(__ASM_CLAC), X86_FEATURE_SMAP
+ ALTERNATIVE "", __ASM_CLAC, X86_FEATURE_SMAP
#define ASM_STAC \
- ALTERNATIVE "", __stringify(__ASM_STAC), X86_FEATURE_SMAP
+ ALTERNATIVE "", __ASM_STAC, X86_FEATURE_SMAP
#else /* CONFIG_X86_SMAP */
@@ -49,26 +48,46 @@
static __always_inline void clac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP);
+ alternative("", __ASM_CLAC, X86_FEATURE_SMAP);
}
static __always_inline void stac(void)
{
/* Note: a barrier is implicit in alternative() */
- alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP);
+ alternative("", __ASM_STAC, X86_FEATURE_SMAP);
+}
+
+static __always_inline unsigned long smap_save(void)
+{
+ unsigned long flags;
+
+ asm volatile (ALTERNATIVE("", "pushf; pop %0; " __ASM_CLAC,
+ X86_FEATURE_SMAP)
+ : "=rm" (flags) : : "memory", "cc");
+
+ return flags;
+}
+
+static __always_inline void smap_restore(unsigned long flags)
+{
+ asm volatile (ALTERNATIVE("", "push %0; popf", X86_FEATURE_SMAP)
+ : : "g" (flags) : "memory", "cc");
}
/* These macros can be used in asm() statements */
#define ASM_CLAC \
- ALTERNATIVE("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP)
+ ALTERNATIVE("", __ASM_CLAC, X86_FEATURE_SMAP)
#define ASM_STAC \
- ALTERNATIVE("", __stringify(__ASM_STAC), X86_FEATURE_SMAP)
+ ALTERNATIVE("", __ASM_STAC, X86_FEATURE_SMAP)
#else /* CONFIG_X86_SMAP */
static inline void clac(void) { }
static inline void stac(void) { }
+static inline unsigned long smap_save(void) { return 0; }
+static inline void smap_restore(unsigned long flags) { }
+
#define ASM_CLAC
#define ASM_STAC
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 2e95b6c1bca3..da545df207b2 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -131,7 +131,7 @@ void native_smp_prepare_boot_cpu(void);
void native_smp_prepare_cpus(unsigned int max_cpus);
void calculate_max_logical_packages(void);
void native_smp_cpus_done(unsigned int max_cpus);
-void common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
+int common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
int native_cpu_disable(void);
int common_cpu_die(unsigned int cpu);
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 43c029cdc3fe..0a3c4cab39db 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -92,7 +92,7 @@ static inline void native_write_cr8(unsigned long val)
#endif
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
-static inline u32 __read_pkru(void)
+static inline u32 rdpkru(void)
{
u32 ecx = 0;
u32 edx, pkru;
@@ -107,7 +107,7 @@ static inline u32 __read_pkru(void)
return pkru;
}
-static inline void __write_pkru(u32 pkru)
+static inline void wrpkru(u32 pkru)
{
u32 ecx = 0, edx = 0;
@@ -118,8 +118,21 @@ static inline void __write_pkru(u32 pkru)
asm volatile(".byte 0x0f,0x01,0xef\n\t"
: : "a" (pkru), "c"(ecx), "d"(edx));
}
+
+static inline void __write_pkru(u32 pkru)
+{
+ /*
+ * WRPKRU is relatively expensive compared to RDPKRU.
+ * Avoid WRPKRU when it would not change the value.
+ */
+ if (pkru == rdpkru())
+ return;
+
+ wrpkru(pkru);
+}
+
#else
-static inline u32 __read_pkru(void)
+static inline u32 rdpkru(void)
{
return 0;
}
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index 8ec97a62c245..91e29b6a86a5 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -13,7 +13,7 @@
* On x86_64, %gs is shared by percpu area and stack canary. All
* percpu symbols are zero based and %gs points to the base of percpu
* area. The first occupant of the percpu area is always
- * irq_stack_union which contains stack_canary at offset 40. Userland
+ * fixed_percpu_data which contains stack_canary at offset 40. Userland
* %gs is always saved and restored on kernel entry and exit using
* swapgs, so stack protector doesn't add any complexity there.
*
@@ -64,7 +64,7 @@ static __always_inline void boot_init_stack_canary(void)
u64 tsc;
#ifdef CONFIG_X86_64
- BUILD_BUG_ON(offsetof(union irq_stack_union, stack_canary) != 40);
+ BUILD_BUG_ON(offsetof(struct fixed_percpu_data, stack_canary) != 40);
#endif
/*
* We both use the random pool and the current TSC as a source
@@ -79,7 +79,7 @@ static __always_inline void boot_init_stack_canary(void)
current->stack_canary = canary;
#ifdef CONFIG_X86_64
- this_cpu_write(irq_stack_union.stack_canary, canary);
+ this_cpu_write(fixed_percpu_data.stack_canary, canary);
#else
this_cpu_write(stack_canary.canary, canary);
#endif
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index f335aad404a4..a8d0cdf48616 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -9,6 +9,8 @@
#include <linux/uaccess.h>
#include <linux/ptrace.h>
+
+#include <asm/cpu_entry_area.h>
#include <asm/switch_to.h>
enum stack_type {
@@ -98,19 +100,6 @@ struct stack_frame_ia32 {
u32 return_address;
};
-static inline unsigned long caller_frame_pointer(void)
-{
- struct stack_frame *frame;
-
- frame = __builtin_frame_address(0);
-
-#ifdef CONFIG_FRAME_POINTER
- frame = frame->next_frame;
-#endif
-
- return (unsigned long)frame;
-}
-
void show_opcodes(struct pt_regs *regs, const char *loglvl);
void show_ip(struct pt_regs *regs, const char *loglvl);
#endif /* _ASM_X86_STACKTRACE_H */
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 7cf1a270d891..18a4b6890fa8 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -46,6 +46,7 @@ struct inactive_task_frame {
unsigned long r13;
unsigned long r12;
#else
+ unsigned long flags;
unsigned long si;
unsigned long di;
#endif
diff --git a/arch/x86/include/asm/sync_bitops.h b/arch/x86/include/asm/sync_bitops.h
index 2fe745356fb1..6d8d6bc183b7 100644
--- a/arch/x86/include/asm/sync_bitops.h
+++ b/arch/x86/include/asm/sync_bitops.h
@@ -14,6 +14,8 @@
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
+#include <asm/rmwcc.h>
+
#define ADDR (*(volatile long *)addr)
/**
@@ -29,7 +31,7 @@
*/
static inline void sync_set_bit(long nr, volatile unsigned long *addr)
{
- asm volatile("lock; bts %1,%0"
+ asm volatile("lock; " __ASM_SIZE(bts) " %1,%0"
: "+m" (ADDR)
: "Ir" (nr)
: "memory");
@@ -47,7 +49,7 @@ static inline void sync_set_bit(long nr, volatile unsigned long *addr)
*/
static inline void sync_clear_bit(long nr, volatile unsigned long *addr)
{
- asm volatile("lock; btr %1,%0"
+ asm volatile("lock; " __ASM_SIZE(btr) " %1,%0"
: "+m" (ADDR)
: "Ir" (nr)
: "memory");
@@ -64,7 +66,7 @@ static inline void sync_clear_bit(long nr, volatile unsigned long *addr)
*/
static inline void sync_change_bit(long nr, volatile unsigned long *addr)
{
- asm volatile("lock; btc %1,%0"
+ asm volatile("lock; " __ASM_SIZE(btc) " %1,%0"
: "+m" (ADDR)
: "Ir" (nr)
: "memory");
@@ -78,14 +80,9 @@ static inline void sync_change_bit(long nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int sync_test_and_set_bit(long nr, volatile unsigned long *addr)
+static inline bool sync_test_and_set_bit(long nr, volatile unsigned long *addr)
{
- unsigned char oldbit;
-
- asm volatile("lock; bts %2,%1\n\tsetc %0"
- : "=qm" (oldbit), "+m" (ADDR)
- : "Ir" (nr) : "memory");
- return oldbit;
+ return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(bts), *addr, c, "Ir", nr);
}
/**
@@ -98,12 +95,7 @@ static inline int sync_test_and_set_bit(long nr, volatile unsigned long *addr)
*/
static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr)
{
- unsigned char oldbit;
-
- asm volatile("lock; btr %2,%1\n\tsetc %0"
- : "=qm" (oldbit), "+m" (ADDR)
- : "Ir" (nr) : "memory");
- return oldbit;
+ return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btr), *addr, c, "Ir", nr);
}
/**
@@ -116,12 +108,7 @@ static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr)
*/
static inline int sync_test_and_change_bit(long nr, volatile unsigned long *addr)
{
- unsigned char oldbit;
-
- asm volatile("lock; btc %2,%1\n\tsetc %0"
- : "=qm" (oldbit), "+m" (ADDR)
- : "Ir" (nr) : "memory");
- return oldbit;
+ return GEN_BINARY_RMWcc("lock; " __ASM_SIZE(btc), *addr, c, "Ir", nr);
}
#define sync_test_bit(nr, addr) test_bit(nr, addr)
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index 4c305471ec33..b05ad16174e5 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -105,7 +105,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
memcpy(&regs->bx + i, args, n * sizeof(args[0]));
}
-static inline int syscall_get_arch(void)
+static inline int syscall_get_arch(struct task_struct *task)
{
return AUDIT_ARCH_I386;
}
@@ -160,10 +160,12 @@ static inline void syscall_set_arguments(struct task_struct *task,
}
}
-static inline int syscall_get_arch(void)
+static inline int syscall_get_arch(struct task_struct *task)
{
/* x32 tasks should be considered AUDIT_ARCH_X86_64. */
- return in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
+ return (IS_ENABLED(CONFIG_IA32_EMULATION) &&
+ task->thread_info.status & TS_COMPAT)
+ ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
}
#endif /* CONFIG_X86_32 */
diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
index e85ff65c43c3..880b5515b1d6 100644
--- a/arch/x86/include/asm/text-patching.h
+++ b/arch/x86/include/asm/text-patching.h
@@ -18,7 +18,7 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
#define __parainstructions_end NULL
#endif
-extern void *text_poke_early(void *addr, const void *opcode, size_t len);
+extern void text_poke_early(void *addr, const void *opcode, size_t len);
/*
* Clear and restore the kernel write-protection flag on the local CPU.
@@ -35,8 +35,41 @@ extern void *text_poke_early(void *addr, const void *opcode, size_t len);
* inconsistent instruction while you patch.
*/
extern void *text_poke(void *addr, const void *opcode, size_t len);
+extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len);
extern int poke_int3_handler(struct pt_regs *regs);
-extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
+extern void text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
extern int after_bootmem;
+extern __ro_after_init struct mm_struct *poking_mm;
+extern __ro_after_init unsigned long poking_addr;
+
+#ifndef CONFIG_UML_X86
+static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
+{
+ regs->ip = ip;
+}
+
+#define INT3_INSN_SIZE 1
+#define CALL_INSN_SIZE 5
+
+#ifdef CONFIG_X86_64
+static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val)
+{
+ /*
+ * The int3 handler in entry_64.S adds a gap between the
+ * stack where the break point happened, and the saving of
+ * pt_regs. We can extend the original stack because of
+ * this gap. See the idtentry macro's create_gap option.
+ */
+ regs->sp -= sizeof(unsigned long);
+ *(unsigned long *)regs->sp = val;
+}
+
+static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func)
+{
+ int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
+ int3_emulate_jmp(regs, func);
+}
+#endif /* CONFIG_X86_64 */
+#endif /* !CONFIG_UML_X86 */
#endif /* _ASM_X86_TEXT_PATCHING_H */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index e0eccbcb8447..f9453536f9bb 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -88,6 +88,7 @@ struct thread_info {
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
#define TIF_UPROBE 12 /* breakpointed or singlestepping */
#define TIF_PATCH_PENDING 13 /* pending live patching update */
+#define TIF_NEED_FPU_LOAD 14 /* load FPU on return to userspace */
#define TIF_NOCPUID 15 /* CPUID is not accessible in userland */
#define TIF_NOTSC 16 /* TSC is not accessible in userland */
#define TIF_IA32 17 /* IA32 compatibility process */
@@ -117,6 +118,7 @@ struct thread_info {
#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
+#define _TIF_NEED_FPU_LOAD (1 << TIF_NEED_FPU_LOAD)
#define _TIF_NOCPUID (1 << TIF_NOCPUID)
#define _TIF_NOTSC (1 << TIF_NOTSC)
#define _TIF_IA32 (1 << TIF_IA32)
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 404b8b1d44f5..f23e7aaff4cd 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -6,6 +6,7 @@
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+#define tlb_flush tlb_flush
static inline void tlb_flush(struct mmu_gather *tlb);
#include <asm-generic/tlb.h>
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index f4204bf377fc..dee375831962 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -167,7 +167,7 @@ struct tlb_state {
*/
struct mm_struct *loaded_mm;
-#define LOADED_MM_SWITCHING ((struct mm_struct *)1)
+#define LOADED_MM_SWITCHING ((struct mm_struct *)1UL)
/* Last user mm for optimizing IBPB */
union {
@@ -274,6 +274,8 @@ static inline bool nmi_uaccess_okay(void)
return true;
}
+#define nmi_uaccess_okay nmi_uaccess_okay
+
/* Initialize cr4 shadow for this CPU. */
static inline void cr4_init_shadow(void)
{
diff --git a/arch/x86/include/asm/trace/exceptions.h b/arch/x86/include/asm/trace/exceptions.h
index e0e6d7f21399..6b1e87194809 100644
--- a/arch/x86/include/asm/trace/exceptions.h
+++ b/arch/x86/include/asm/trace/exceptions.h
@@ -30,7 +30,7 @@ DECLARE_EVENT_CLASS(x86_exceptions,
__entry->error_code = error_code;
),
- TP_printk("address=%pf ip=%pf error_code=0x%lx",
+ TP_printk("address=%ps ip=%ps error_code=0x%lx",
(void *)__entry->address, (void *)__entry->ip,
__entry->error_code) );
diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h
index 069c04be1507..879b77792f94 100644
--- a/arch/x86/include/asm/trace/fpu.h
+++ b/arch/x86/include/asm/trace/fpu.h
@@ -13,22 +13,22 @@ DECLARE_EVENT_CLASS(x86_fpu,
TP_STRUCT__entry(
__field(struct fpu *, fpu)
- __field(bool, initialized)
+ __field(bool, load_fpu)
__field(u64, xfeatures)
__field(u64, xcomp_bv)
),
TP_fast_assign(
__entry->fpu = fpu;
- __entry->initialized = fpu->initialized;
+ __entry->load_fpu = test_thread_flag(TIF_NEED_FPU_LOAD);
if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
__entry->xfeatures = fpu->state.xsave.header.xfeatures;
__entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv;
}
),
- TP_printk("x86/fpu: %p initialized: %d xfeatures: %llx xcomp_bv: %llx",
+ TP_printk("x86/fpu: %p load: %d xfeatures: %llx xcomp_bv: %llx",
__entry->fpu,
- __entry->initialized,
+ __entry->load_fpu,
__entry->xfeatures,
__entry->xcomp_bv
)
@@ -64,11 +64,6 @@ DEFINE_EVENT(x86_fpu, x86_fpu_regs_deactivated,
TP_ARGS(fpu)
);
-DEFINE_EVENT(x86_fpu, x86_fpu_activate_state,
- TP_PROTO(struct fpu *fpu),
- TP_ARGS(fpu)
-);
-
DEFINE_EVENT(x86_fpu, x86_fpu_init_state,
TP_PROTO(struct fpu *fpu),
TP_ARGS(fpu)
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 1954dd5552a2..c82abd6e4ca3 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -427,10 +427,11 @@ do { \
({ \
__label__ __pu_label; \
int __pu_err = -EFAULT; \
- __typeof__(*(ptr)) __pu_val; \
- __pu_val = x; \
+ __typeof__(*(ptr)) __pu_val = (x); \
+ __typeof__(ptr) __pu_ptr = (ptr); \
+ __typeof__(size) __pu_size = (size); \
__uaccess_begin(); \
- __put_user_size(__pu_val, (ptr), (size), __pu_label); \
+ __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \
__pu_err = 0; \
__pu_label: \
__uaccess_end(); \
@@ -585,7 +586,6 @@ extern void __cmpxchg_wrong_size(void)
#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
({ \
int __ret = 0; \
- __typeof__(ptr) __uval = (uval); \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
__uaccess_begin_nospec(); \
@@ -661,7 +661,7 @@ extern void __cmpxchg_wrong_size(void)
__cmpxchg_wrong_size(); \
} \
__uaccess_end(); \
- *__uval = __old; \
+ *(uval) = __old; \
__ret; \
})
@@ -705,7 +705,7 @@ extern struct movsl_mask {
* checking before using them, but you have to surround them with the
* user_access_begin/end() pair.
*/
-static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
+static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
{
if (unlikely(!access_ok(ptr,len)))
return 0;
@@ -715,6 +715,9 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t
#define user_access_begin(a,b) user_access_begin(a,b)
#define user_access_end() __uaccess_end()
+#define user_access_save() smap_save()
+#define user_access_restore(x) smap_restore(x)
+
#define unsafe_put_user(x, ptr, label) \
__put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index a9d637bc301d..5cd1caa8bc65 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -208,9 +208,6 @@ __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
}
unsigned long
-copy_user_handle_tail(char *to, char *from, unsigned len);
-
-unsigned long
mcsafe_handle_tail(char *to, char *from, unsigned len);
#endif /* _ASM_X86_UACCESS_64_H */
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
index 27566e57e87d..230474e2ddb5 100644
--- a/arch/x86/include/asm/vdso.h
+++ b/arch/x86/include/asm/vdso.h
@@ -19,7 +19,6 @@ struct vdso_image {
long sym_vvar_start; /* Negative offset to the vvar area */
long sym_vvar_page;
- long sym_hpet_page;
long sym_pvclock_page;
long sym_hvclock_page;
long sym_VDSO32_NOTE_MASK;
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 2863c2026655..d50c7b747d8b 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -217,6 +217,22 @@ xen_single_call(unsigned int call,
return (long)__res;
}
+static __always_inline void __xen_stac(void)
+{
+ /*
+ * Suppress objtool seeing the STAC/CLAC and getting confused about it
+ * calling random code with AC=1.
+ */
+ asm volatile(ANNOTATE_IGNORE_ALTERNATIVE
+ ASM_STAC ::: "memory", "flags");
+}
+
+static __always_inline void __xen_clac(void)
+{
+ asm volatile(ANNOTATE_IGNORE_ALTERNATIVE
+ ASM_CLAC ::: "memory", "flags");
+}
+
static inline long
privcmd_call(unsigned int call,
unsigned long a1, unsigned long a2,
@@ -225,9 +241,9 @@ privcmd_call(unsigned int call,
{
long res;
- stac();
+ __xen_stac();
res = xen_single_call(call, a1, a2, a3, a4, a5);
- clac();
+ __xen_clac();
return res;
}
@@ -424,9 +440,9 @@ HYPERVISOR_dm_op(
domid_t dom, unsigned int nr_bufs, struct xen_dm_op_buf *bufs)
{
int ret;
- stac();
+ __xen_stac();
ret = _hypercall3(int, dm_op, dom, nr_bufs, bufs);
- clac();
+ __xen_clac();
return ret;
}
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index dabfcf7c3941..7a0e64ccd6ff 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -381,6 +381,7 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
+#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3)
#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
diff --git a/arch/x86/include/uapi/asm/perf_regs.h b/arch/x86/include/uapi/asm/perf_regs.h
index f3329cabce5c..ac67bbea10ca 100644
--- a/arch/x86/include/uapi/asm/perf_regs.h
+++ b/arch/x86/include/uapi/asm/perf_regs.h
@@ -27,8 +27,29 @@ enum perf_event_x86_regs {
PERF_REG_X86_R13,
PERF_REG_X86_R14,
PERF_REG_X86_R15,
-
+ /* These are the limits for the GPRs. */
PERF_REG_X86_32_MAX = PERF_REG_X86_GS + 1,
PERF_REG_X86_64_MAX = PERF_REG_X86_R15 + 1,
+
+ /* These all need two bits set because they are 128bit */
+ PERF_REG_X86_XMM0 = 32,
+ PERF_REG_X86_XMM1 = 34,
+ PERF_REG_X86_XMM2 = 36,
+ PERF_REG_X86_XMM3 = 38,
+ PERF_REG_X86_XMM4 = 40,
+ PERF_REG_X86_XMM5 = 42,
+ PERF_REG_X86_XMM6 = 44,
+ PERF_REG_X86_XMM7 = 46,
+ PERF_REG_X86_XMM8 = 48,
+ PERF_REG_X86_XMM9 = 50,
+ PERF_REG_X86_XMM10 = 52,
+ PERF_REG_X86_XMM11 = 54,
+ PERF_REG_X86_XMM12 = 56,
+ PERF_REG_X86_XMM13 = 58,
+ PERF_REG_X86_XMM14 = 60,
+ PERF_REG_X86_XMM15 = 62,
+
+ /* These include both GPRs and XMMX registers */
+ PERF_REG_X86_XMM_MAX = PERF_REG_X86_XMM15 + 2,
};
#endif /* _ASM_X86_PERF_REGS_H */
diff --git a/arch/x86/include/uapi/asm/sockios.h b/arch/x86/include/uapi/asm/sockios.h
deleted file mode 100644
index def6d4746ee7..000000000000
--- a/arch/x86/include/uapi/asm/sockios.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/sockios.h>
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 00b7e27bc2b7..ce1b5cc360a2 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -42,7 +42,7 @@ endif
# non-deterministic coverage.
KCOV_INSTRUMENT := n
-CFLAGS_irq.o := -I$(src)/../include/asm/trace
+CFLAGS_irq.o := -I $(srctree)/$(src)/../include/asm/trace
obj-y := process_$(BITS).o signal.o
obj-$(CONFIG_COMPAT) += signal_compat.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 8dcbf6890714..9fc92e4539d8 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -197,7 +197,7 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
}
static int __init
-acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
+acpi_parse_x2apic(union acpi_subtable_headers *header, const unsigned long end)
{
struct acpi_madt_local_x2apic *processor = NULL;
#ifdef CONFIG_X86_X2APIC
@@ -210,7 +210,7 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
if (BAD_MADT_ENTRY(processor, end))
return -EINVAL;
- acpi_table_print_madt_entry(header);
+ acpi_table_print_madt_entry(&header->common);
#ifdef CONFIG_X86_X2APIC
apic_id = processor->local_apic_id;
@@ -242,7 +242,7 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
}
static int __init
-acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
+acpi_parse_lapic(union acpi_subtable_headers * header, const unsigned long end)
{
struct acpi_madt_local_apic *processor = NULL;
@@ -251,7 +251,7 @@ acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
if (BAD_MADT_ENTRY(processor, end))
return -EINVAL;
- acpi_table_print_madt_entry(header);
+ acpi_table_print_madt_entry(&header->common);
/* Ignore invalid ID */
if (processor->id == 0xff)
@@ -272,7 +272,7 @@ acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
}
static int __init
-acpi_parse_sapic(struct acpi_subtable_header *header, const unsigned long end)
+acpi_parse_sapic(union acpi_subtable_headers *header, const unsigned long end)
{
struct acpi_madt_local_sapic *processor = NULL;
@@ -281,7 +281,7 @@ acpi_parse_sapic(struct acpi_subtable_header *header, const unsigned long end)
if (BAD_MADT_ENTRY(processor, end))
return -EINVAL;
- acpi_table_print_madt_entry(header);
+ acpi_table_print_madt_entry(&header->common);
acpi_register_lapic((processor->id << 8) | processor->eid,/* APIC ID */
processor->processor_id, /* ACPI ID */
@@ -291,7 +291,7 @@ acpi_parse_sapic(struct acpi_subtable_header *header, const unsigned long end)
}
static int __init
-acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
+acpi_parse_lapic_addr_ovr(union acpi_subtable_headers * header,
const unsigned long end)
{
struct acpi_madt_local_apic_override *lapic_addr_ovr = NULL;
@@ -301,7 +301,7 @@ acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
return -EINVAL;
- acpi_table_print_madt_entry(header);
+ acpi_table_print_madt_entry(&header->common);
acpi_lapic_addr = lapic_addr_ovr->address;
@@ -309,7 +309,7 @@ acpi_parse_lapic_addr_ovr(struct acpi_subtable_header * header,
}
static int __init
-acpi_parse_x2apic_nmi(struct acpi_subtable_header *header,
+acpi_parse_x2apic_nmi(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_local_x2apic_nmi *x2apic_nmi = NULL;
@@ -319,7 +319,7 @@ acpi_parse_x2apic_nmi(struct acpi_subtable_header *header,
if (BAD_MADT_ENTRY(x2apic_nmi, end))
return -EINVAL;
- acpi_table_print_madt_entry(header);
+ acpi_table_print_madt_entry(&header->common);
if (x2apic_nmi->lint != 1)
printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
@@ -328,7 +328,7 @@ acpi_parse_x2apic_nmi(struct acpi_subtable_header *header,
}
static int __init
-acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long end)
+acpi_parse_lapic_nmi(union acpi_subtable_headers * header, const unsigned long end)
{
struct acpi_madt_local_apic_nmi *lapic_nmi = NULL;
@@ -337,7 +337,7 @@ acpi_parse_lapic_nmi(struct acpi_subtable_header * header, const unsigned long e
if (BAD_MADT_ENTRY(lapic_nmi, end))
return -EINVAL;
- acpi_table_print_madt_entry(header);
+ acpi_table_print_madt_entry(&header->common);
if (lapic_nmi->lint != 1)
printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
@@ -449,7 +449,7 @@ static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity,
}
static int __init
-acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
+acpi_parse_ioapic(union acpi_subtable_headers * header, const unsigned long end)
{
struct acpi_madt_io_apic *ioapic = NULL;
struct ioapic_domain_cfg cfg = {
@@ -462,7 +462,7 @@ acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
if (BAD_MADT_ENTRY(ioapic, end))
return -EINVAL;
- acpi_table_print_madt_entry(header);
+ acpi_table_print_madt_entry(&header->common);
/* Statically assign IRQ numbers for IOAPICs hosting legacy IRQs */
if (ioapic->global_irq_base < nr_legacy_irqs())
@@ -508,7 +508,7 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger,
}
static int __init
-acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
+acpi_parse_int_src_ovr(union acpi_subtable_headers * header,
const unsigned long end)
{
struct acpi_madt_interrupt_override *intsrc = NULL;
@@ -518,7 +518,7 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
if (BAD_MADT_ENTRY(intsrc, end))
return -EINVAL;
- acpi_table_print_madt_entry(header);
+ acpi_table_print_madt_entry(&header->common);
if (intsrc->source_irq == acpi_gbl_FADT.sci_interrupt) {
acpi_sci_ioapic_setup(intsrc->source_irq,
@@ -550,7 +550,7 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
}
static int __init
-acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end)
+acpi_parse_nmi_src(union acpi_subtable_headers * header, const unsigned long end)
{
struct acpi_madt_nmi_source *nmi_src = NULL;
@@ -559,7 +559,7 @@ acpi_parse_nmi_src(struct acpi_subtable_header * header, const unsigned long end
if (BAD_MADT_ENTRY(nmi_src, end))
return -EINVAL;
- acpi_table_print_madt_entry(header);
+ acpi_table_print_madt_entry(&header->common);
/* TBD: Support nimsrc entries? */
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 158ad1483c43..cb6e076a6d39 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -51,6 +51,18 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
if (c->x86_vendor == X86_VENDOR_INTEL &&
(c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f)))
flags->bm_control = 0;
+ /*
+ * For all recent Centaur CPUs, the ucode will make sure that each
+ * core can keep cache coherence with each other while entering C3
+ * type state. So, set bm_check to 1 to indicate that the kernel
+ * doesn't need to execute a cache flush operation (WBINVD) when
+ * entering C3 type state.
+ */
+ if (c->x86_vendor == X86_VENDOR_CENTAUR) {
+ if (c->x86 > 6 || (c->x86 == 6 && c->x86_model == 0x0f &&
+ c->x86_stepping >= 0x0e))
+ flags->bm_check = 1;
+ }
}
EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 9a79c7808f9c..7b9b49dfc05a 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/kdebug.h>
#include <linux/kprobes.h>
+#include <linux/mmu_context.h>
#include <asm/text-patching.h>
#include <asm/alternative.h>
#include <asm/sections.h>
@@ -264,7 +265,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
extern s32 __smp_locks[], __smp_locks_end[];
-void *text_poke_early(void *addr, const void *opcode, size_t len);
+void text_poke_early(void *addr, const void *opcode, size_t len);
/*
* Are we looking at a near JMP with a 1 or 4-byte displacement.
@@ -666,16 +667,136 @@ void __init alternative_instructions(void)
* instructions. And on the local CPU you need to be protected again NMI or MCE
* handlers seeing an inconsistent instruction while you patch.
*/
-void *__init_or_module text_poke_early(void *addr, const void *opcode,
- size_t len)
+void __init_or_module text_poke_early(void *addr, const void *opcode,
+ size_t len)
{
unsigned long flags;
+
+ if (boot_cpu_has(X86_FEATURE_NX) &&
+ is_module_text_address((unsigned long)addr)) {
+ /*
+ * Modules text is marked initially as non-executable, so the
+ * code cannot be running and speculative code-fetches are
+ * prevented. Just change the code.
+ */
+ memcpy(addr, opcode, len);
+ } else {
+ local_irq_save(flags);
+ memcpy(addr, opcode, len);
+ local_irq_restore(flags);
+ sync_core();
+
+ /*
+ * Could also do a CLFLUSH here to speed up CPU recovery; but
+ * that causes hangs on some VIA CPUs.
+ */
+ }
+}
+
+__ro_after_init struct mm_struct *poking_mm;
+__ro_after_init unsigned long poking_addr;
+
+static void *__text_poke(void *addr, const void *opcode, size_t len)
+{
+ bool cross_page_boundary = offset_in_page(addr) + len > PAGE_SIZE;
+ struct page *pages[2] = {NULL};
+ temp_mm_state_t prev;
+ unsigned long flags;
+ pte_t pte, *ptep;
+ spinlock_t *ptl;
+ pgprot_t pgprot;
+
+ /*
+ * While boot memory allocator is running we cannot use struct pages as
+ * they are not yet initialized. There is no way to recover.
+ */
+ BUG_ON(!after_bootmem);
+
+ if (!core_kernel_text((unsigned long)addr)) {
+ pages[0] = vmalloc_to_page(addr);
+ if (cross_page_boundary)
+ pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
+ } else {
+ pages[0] = virt_to_page(addr);
+ WARN_ON(!PageReserved(pages[0]));
+ if (cross_page_boundary)
+ pages[1] = virt_to_page(addr + PAGE_SIZE);
+ }
+ /*
+ * If something went wrong, crash and burn since recovery paths are not
+ * implemented.
+ */
+ BUG_ON(!pages[0] || (cross_page_boundary && !pages[1]));
+
local_irq_save(flags);
- memcpy(addr, opcode, len);
+
+ /*
+ * Map the page without the global bit, as TLB flushing is done with
+ * flush_tlb_mm_range(), which is intended for non-global PTEs.
+ */
+ pgprot = __pgprot(pgprot_val(PAGE_KERNEL) & ~_PAGE_GLOBAL);
+
+ /*
+ * The lock is not really needed, but this allows to avoid open-coding.
+ */
+ ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
+
+ /*
+ * This must not fail; preallocated in poking_init().
+ */
+ VM_BUG_ON(!ptep);
+
+ pte = mk_pte(pages[0], pgprot);
+ set_pte_at(poking_mm, poking_addr, ptep, pte);
+
+ if (cross_page_boundary) {
+ pte = mk_pte(pages[1], pgprot);
+ set_pte_at(poking_mm, poking_addr + PAGE_SIZE, ptep + 1, pte);
+ }
+
+ /*
+ * Loading the temporary mm behaves as a compiler barrier, which
+ * guarantees that the PTE will be set at the time memcpy() is done.
+ */
+ prev = use_temporary_mm(poking_mm);
+
+ kasan_disable_current();
+ memcpy((u8 *)poking_addr + offset_in_page(addr), opcode, len);
+ kasan_enable_current();
+
+ /*
+ * Ensure that the PTE is only cleared after the instructions of memcpy
+ * were issued by using a compiler barrier.
+ */
+ barrier();
+
+ pte_clear(poking_mm, poking_addr, ptep);
+ if (cross_page_boundary)
+ pte_clear(poking_mm, poking_addr + PAGE_SIZE, ptep + 1);
+
+ /*
+ * Loading the previous page-table hierarchy requires a serializing
+ * instruction that already allows the core to see the updated version.
+ * Xen-PV is assumed to serialize execution in a similar manner.
+ */
+ unuse_temporary_mm(prev);
+
+ /*
+ * Flushing the TLB might involve IPIs, which would require enabled
+ * IRQs, but not if the mm is not used, as it is in this point.
+ */
+ flush_tlb_mm_range(poking_mm, poking_addr, poking_addr +
+ (cross_page_boundary ? 2 : 1) * PAGE_SIZE,
+ PAGE_SHIFT, false);
+
+ /*
+ * If the text does not match what we just wrote then something is
+ * fundamentally screwy; there's nothing we can really do about that.
+ */
+ BUG_ON(memcmp(addr, opcode, len));
+
+ pte_unmap_unlock(ptep, ptl);
local_irq_restore(flags);
- sync_core();
- /* Could also do a CLFLUSH here to speed up CPU recovery; but
- that causes hangs on some VIA CPUs. */
return addr;
}
@@ -689,48 +810,36 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
* It means the size must be writable atomically and the address must be aligned
* in a way that permits an atomic write. It also makes sure we fit on a single
* page.
+ *
+ * Note that the caller must ensure that if the modified code is part of a
+ * module, the module would not be removed during poking. This can be achieved
+ * by registering a module notifier, and ordering module removal and patching
+ * trough a mutex.
*/
void *text_poke(void *addr, const void *opcode, size_t len)
{
- unsigned long flags;
- char *vaddr;
- struct page *pages[2];
- int i;
-
- /*
- * While boot memory allocator is runnig we cannot use struct
- * pages as they are not yet initialized.
- */
- BUG_ON(!after_bootmem);
-
lockdep_assert_held(&text_mutex);
- if (!core_kernel_text((unsigned long)addr)) {
- pages[0] = vmalloc_to_page(addr);
- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
- } else {
- pages[0] = virt_to_page(addr);
- WARN_ON(!PageReserved(pages[0]));
- pages[1] = virt_to_page(addr + PAGE_SIZE);
- }
- BUG_ON(!pages[0]);
- local_irq_save(flags);
- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
- if (pages[1])
- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
- clear_fixmap(FIX_TEXT_POKE0);
- if (pages[1])
- clear_fixmap(FIX_TEXT_POKE1);
- local_flush_tlb();
- sync_core();
- /* Could also do a CLFLUSH here to speed up CPU recovery; but
- that causes hangs on some VIA CPUs. */
- for (i = 0; i < len; i++)
- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
- local_irq_restore(flags);
- return addr;
+ return __text_poke(addr, opcode, len);
+}
+
+/**
+ * text_poke_kgdb - Update instructions on a live kernel by kgdb
+ * @addr: address to modify
+ * @opcode: source of the copy
+ * @len: length to copy
+ *
+ * Only atomic text poke/set should be allowed when not doing early patching.
+ * It means the size must be writable atomically and the address must be aligned
+ * in a way that permits an atomic write. It also makes sure we fit on a single
+ * page.
+ *
+ * Context: should only be used by kgdb, which ensures no other core is running,
+ * despite the fact it does not hold the text_mutex.
+ */
+void *text_poke_kgdb(void *addr, const void *opcode, size_t len)
+{
+ return __text_poke(addr, opcode, len);
}
static void do_sync_core(void *info)
@@ -788,7 +897,7 @@ NOKPROBE_SYMBOL(poke_int3_handler);
* replacing opcode
* - sync cores
*/
-void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
+void text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
{
unsigned char int3 = 0xcc;
@@ -830,7 +939,5 @@ void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
* the writing of the new instruction.
*/
bp_patching_in_progress = false;
-
- return addr;
}
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index 2c0aa34af69c..bf7f13ea3c64 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -233,9 +233,6 @@ static dma_addr_t gart_map_page(struct device *dev, struct page *page,
unsigned long bus;
phys_addr_t paddr = page_to_phys(page) + offset;
- if (!dev)
- dev = &x86_dma_fallback_dev;
-
if (!need_iommu(dev, paddr, size))
return paddr;
@@ -392,9 +389,6 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
if (nents == 0)
return 0;
- if (!dev)
- dev = &x86_dma_fallback_dev;
-
out = 0;
start = 0;
start_sg = sg;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index b7bcdd781651..ab6af775f06c 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -802,6 +802,24 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
return 0;
}
+static int __init lapic_init_clockevent(void)
+{
+ if (!lapic_timer_frequency)
+ return -1;
+
+ /* Calculate the scaled math multiplication factor */
+ lapic_clockevent.mult = div_sc(lapic_timer_frequency/APIC_DIVISOR,
+ TICK_NSEC, lapic_clockevent.shift);
+ lapic_clockevent.max_delta_ns =
+ clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
+ lapic_clockevent.max_delta_ticks = 0x7FFFFFFF;
+ lapic_clockevent.min_delta_ns =
+ clockevent_delta2ns(0xF, &lapic_clockevent);
+ lapic_clockevent.min_delta_ticks = 0xF;
+
+ return 0;
+}
+
static int __init calibrate_APIC_clock(void)
{
struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
@@ -810,25 +828,21 @@ static int __init calibrate_APIC_clock(void)
long delta, deltatsc;
int pm_referenced = 0;
- /**
- * check if lapic timer has already been calibrated by platform
- * specific routine, such as tsc calibration code. if so, we just fill
+ if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
+ return 0;
+
+ /*
+ * Check if lapic timer has already been calibrated by platform
+ * specific routine, such as tsc calibration code. If so just fill
* in the clockevent structure and return.
*/
-
- if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
- return 0;
- } else if (lapic_timer_frequency) {
+ if (!lapic_init_clockevent()) {
apic_printk(APIC_VERBOSE, "lapic timer already calibrated %d\n",
- lapic_timer_frequency);
- lapic_clockevent.mult = div_sc(lapic_timer_frequency/APIC_DIVISOR,
- TICK_NSEC, lapic_clockevent.shift);
- lapic_clockevent.max_delta_ns =
- clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
- lapic_clockevent.max_delta_ticks = 0x7FFFFF;
- lapic_clockevent.min_delta_ns =
- clockevent_delta2ns(0xF, &lapic_clockevent);
- lapic_clockevent.min_delta_ticks = 0xF;
+ lapic_timer_frequency);
+ /*
+ * Direct calibration methods must have an always running
+ * local APIC timer, no need for broadcast timer.
+ */
lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
return 0;
}
@@ -869,17 +883,8 @@ static int __init calibrate_APIC_clock(void)
pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
&delta, &deltatsc);
- /* Calculate the scaled math multiplication factor */
- lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
- lapic_clockevent.shift);
- lapic_clockevent.max_delta_ns =
- clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
- lapic_clockevent.max_delta_ticks = 0x7FFFFFFF;
- lapic_clockevent.min_delta_ns =
- clockevent_delta2ns(0xF, &lapic_clockevent);
- lapic_clockevent.min_delta_ticks = 0xF;
-
lapic_timer_frequency = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
+ lapic_init_clockevent();
apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult);
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index 78778b54f904..a5464b8b6c46 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -175,7 +175,7 @@ static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
this_cpu_write(cpu_llc_id, node);
/* Account for nodes per socket in multi-core-module processors */
- if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
+ if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
rdmsrl(MSR_FAM10H_NODE_ID, val);
nodes = ((val >> 3) & 7) + 1;
}
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index ddced33184b5..d3d075226c0a 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -68,10 +68,12 @@ int main(void)
#undef ENTRY
OFFSET(TSS_ist, tss_struct, x86_tss.ist);
+ DEFINE(DB_STACK_OFFSET, offsetof(struct cea_exception_stacks, DB_stack) -
+ offsetof(struct cea_exception_stacks, DB1_stack));
BLANK();
#ifdef CONFIG_STACKPROTECTOR
- DEFINE(stack_canary_offset, offsetof(union irq_stack_union, stack_canary));
+ DEFINE(stack_canary_offset, offsetof(struct fixed_percpu_data, stack_canary));
BLANK();
#endif
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index cfd24f9f7614..1796d2bdcaaa 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -28,7 +28,7 @@ obj-y += cpuid-deps.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
-obj-$(CONFIG_CPU_SUP_INTEL) += intel.o intel_pconfig.o
+obj-$(CONFIG_CPU_SUP_INTEL) += intel.o intel_pconfig.o intel_epb.o
obj-$(CONFIG_CPU_SUP_AMD) += amd.o
obj-$(CONFIG_CPU_SUP_HYGON) += hygon.o
obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 01004bfb1a1b..fb6a64bd765f 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -82,11 +82,14 @@ static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
* performance at the same time..
*/
+#ifdef CONFIG_X86_32
extern __visible void vide(void);
-__asm__(".globl vide\n"
+__asm__(".text\n"
+ ".globl vide\n"
".type vide, @function\n"
".align 4\n"
"vide: ret\n");
+#endif
static void init_amd_k5(struct cpuinfo_x86 *c)
{
diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
index 804c49493938..64d5aec24203 100644
--- a/arch/x86/kernel/cpu/aperfmperf.c
+++ b/arch/x86/kernel/cpu/aperfmperf.c
@@ -83,7 +83,7 @@ unsigned int aperfmperf_get_khz(int cpu)
if (!cpu_khz)
return 0;
- if (!static_cpu_has(X86_FEATURE_APERFMPERF))
+ if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
return 0;
aperfmperf_snapshot_cpu(cpu, ktime_get(), true);
@@ -99,7 +99,7 @@ void arch_freq_prepare_all(void)
if (!cpu_khz)
return;
- if (!static_cpu_has(X86_FEATURE_APERFMPERF))
+ if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
return;
for_each_online_cpu(cpu)
@@ -115,7 +115,7 @@ unsigned int arch_freq_get_on_cpu(int cpu)
if (!cpu_khz)
return 0;
- if (!static_cpu_has(X86_FEATURE_APERFMPERF))
+ if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
return 0;
if (aperfmperf_snapshot_cpu(cpu, ktime_get(), true))
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index b91b3bfa5cfb..03b4cc0ec3a7 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -37,6 +37,7 @@
static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void);
+static void __init mds_select_mitigation(void);
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
u64 x86_spec_ctrl_base;
@@ -63,6 +64,13 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
/* Control unconditional IBPB in switch_mm() */
DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+/* Control MDS CPU buffer clear before returning to user space */
+DEFINE_STATIC_KEY_FALSE(mds_user_clear);
+EXPORT_SYMBOL_GPL(mds_user_clear);
+/* Control MDS CPU buffer clear before idling (halt, mwait) */
+DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
+EXPORT_SYMBOL_GPL(mds_idle_clear);
+
void __init check_bugs(void)
{
identify_boot_cpu();
@@ -101,6 +109,10 @@ void __init check_bugs(void)
l1tf_select_mitigation();
+ mds_select_mitigation();
+
+ arch_smt_update();
+
#ifdef CONFIG_X86_32
/*
* Check whether we are able to run this kernel safely on SMP.
@@ -207,6 +219,61 @@ static void x86_amd_ssb_disable(void)
}
#undef pr_fmt
+#define pr_fmt(fmt) "MDS: " fmt
+
+/* Default mitigation for MDS-affected CPUs */
+static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
+static bool mds_nosmt __ro_after_init = false;
+
+static const char * const mds_strings[] = {
+ [MDS_MITIGATION_OFF] = "Vulnerable",
+ [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
+ [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
+};
+
+static void __init mds_select_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
+ mds_mitigation = MDS_MITIGATION_OFF;
+ return;
+ }
+
+ if (mds_mitigation == MDS_MITIGATION_FULL) {
+ if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
+ mds_mitigation = MDS_MITIGATION_VMWERV;
+
+ static_branch_enable(&mds_user_clear);
+
+ if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
+ (mds_nosmt || cpu_mitigations_auto_nosmt()))
+ cpu_smt_disable(false);
+ }
+
+ pr_info("%s\n", mds_strings[mds_mitigation]);
+}
+
+static int __init mds_cmdline(char *str)
+{
+ if (!boot_cpu_has_bug(X86_BUG_MDS))
+ return 0;
+
+ if (!str)
+ return -EINVAL;
+
+ if (!strcmp(str, "off"))
+ mds_mitigation = MDS_MITIGATION_OFF;
+ else if (!strcmp(str, "full"))
+ mds_mitigation = MDS_MITIGATION_FULL;
+ else if (!strcmp(str, "full,nosmt")) {
+ mds_mitigation = MDS_MITIGATION_FULL;
+ mds_nosmt = true;
+ }
+
+ return 0;
+}
+early_param("mds", mds_cmdline);
+
+#undef pr_fmt
#define pr_fmt(fmt) "Spectre V2 : " fmt
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
@@ -440,7 +507,8 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
char arg[20];
int ret, i;
- if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
+ if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
+ cpu_mitigations_off())
return SPECTRE_V2_CMD_NONE;
ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
@@ -574,9 +642,6 @@ specv2_set_mode:
/* Set up IBPB and STIBP depending on the general spectre V2 command */
spectre_v2_user_select_mitigation(cmd);
-
- /* Enable STIBP if appropriate */
- arch_smt_update();
}
static void update_stibp_msr(void * __unused)
@@ -610,6 +675,31 @@ static void update_indir_branch_cond(void)
static_branch_disable(&switch_to_cond_stibp);
}
+#undef pr_fmt
+#define pr_fmt(fmt) fmt
+
+/* Update the static key controlling the MDS CPU buffer clear in idle */
+static void update_mds_branch_idle(void)
+{
+ /*
+ * Enable the idle clearing if SMT is active on CPUs which are
+ * affected only by MSBDS and not any other MDS variant.
+ *
+ * The other variants cannot be mitigated when SMT is enabled, so
+ * clearing the buffers on idle just to prevent the Store Buffer
+ * repartitioning leak would be a window dressing exercise.
+ */
+ if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
+ return;
+
+ if (sched_smt_active())
+ static_branch_enable(&mds_idle_clear);
+ else
+ static_branch_disable(&mds_idle_clear);
+}
+
+#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
+
void arch_smt_update(void)
{
/* Enhanced IBRS implies STIBP. No update required. */
@@ -631,6 +721,17 @@ void arch_smt_update(void)
break;
}
+ switch (mds_mitigation) {
+ case MDS_MITIGATION_FULL:
+ case MDS_MITIGATION_VMWERV:
+ if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
+ pr_warn_once(MDS_MSG_SMT);
+ update_mds_branch_idle();
+ break;
+ case MDS_MITIGATION_OFF:
+ break;
+ }
+
mutex_unlock(&spec_ctrl_mutex);
}
@@ -672,7 +773,8 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
char arg[20];
int ret, i;
- if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
+ if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
+ cpu_mitigations_off()) {
return SPEC_STORE_BYPASS_CMD_NONE;
} else {
ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
@@ -1008,6 +1110,11 @@ static void __init l1tf_select_mitigation(void)
if (!boot_cpu_has_bug(X86_BUG_L1TF))
return;
+ if (cpu_mitigations_off())
+ l1tf_mitigation = L1TF_MITIGATION_OFF;
+ else if (cpu_mitigations_auto_nosmt())
+ l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
+
override_cache_bits(&boot_cpu_data);
switch (l1tf_mitigation) {
@@ -1036,7 +1143,7 @@ static void __init l1tf_select_mitigation(void)
pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
half_pa);
pr_info("However, doing so will make a part of your RAM unusable.\n");
- pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
+ pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
return;
}
@@ -1069,6 +1176,7 @@ static int __init l1tf_cmdline(char *str)
early_param("l1tf", l1tf_cmdline);
#undef pr_fmt
+#define pr_fmt(fmt) fmt
#ifdef CONFIG_SYSFS
@@ -1107,6 +1215,23 @@ static ssize_t l1tf_show_state(char *buf)
}
#endif
+static ssize_t mds_show_state(char *buf)
+{
+ if (!hypervisor_is_type(X86_HYPER_NATIVE)) {
+ return sprintf(buf, "%s; SMT Host state unknown\n",
+ mds_strings[mds_mitigation]);
+ }
+
+ if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
+ return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
+ (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
+ sched_smt_active() ? "mitigated" : "disabled"));
+ }
+
+ return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
+ sched_smt_active() ? "vulnerable" : "disabled");
+}
+
static char *stibp_state(void)
{
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
@@ -1173,6 +1298,10 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
return l1tf_show_state(buf);
break;
+
+ case X86_BUG_MDS:
+ return mds_show_state(buf);
+
default:
break;
}
@@ -1204,4 +1333,9 @@ ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *b
{
return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
}
+
+ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
+}
#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index cb28e98a0659..d7f55ad2dfb1 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -372,6 +372,8 @@ static bool pku_disabled;
static __always_inline void setup_pku(struct cpuinfo_x86 *c)
{
+ struct pkru_state *pk;
+
/* check the boot processor, plus compile options for PKU: */
if (!cpu_feature_enabled(X86_FEATURE_PKU))
return;
@@ -382,6 +384,9 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c)
return;
cr4_set_bits(X86_CR4_PKE);
+ pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU);
+ if (pk)
+ pk->pkru = init_pkru_value;
/*
* Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
* cpuid bit to be set. We need to ensure that we
@@ -507,19 +512,6 @@ void load_percpu_segment(int cpu)
DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
#endif
-#ifdef CONFIG_X86_64
-/*
- * Special IST stacks which the CPU switches to when it calls
- * an IST-marked descriptor entry. Up to 7 stacks (hardware
- * limit), all of them are 4K, except the debug stack which
- * is 8K.
- */
-static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
- [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
- [DEBUG_STACK - 1] = DEBUG_STKSZ
-};
-#endif
-
/* Load the original GDT from the per-cpu structure */
void load_direct_gdt(int cpu)
{
@@ -948,61 +940,77 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
#endif
}
-static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_TABLET, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL_MID, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_MID, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL, X86_FEATURE_ANY },
- { X86_VENDOR_CENTAUR, 5 },
- { X86_VENDOR_INTEL, 5 },
- { X86_VENDOR_NSC, 5 },
- { X86_VENDOR_ANY, 4 },
- {}
-};
+#define NO_SPECULATION BIT(0)
+#define NO_MELTDOWN BIT(1)
+#define NO_SSB BIT(2)
+#define NO_L1TF BIT(3)
+#define NO_MDS BIT(4)
+#define MSBDS_ONLY BIT(5)
-static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
- { X86_VENDOR_AMD },
- { X86_VENDOR_HYGON },
- {}
-};
+#define VULNWL(_vendor, _family, _model, _whitelist) \
+ { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
-/* Only list CPUs which speculate but are non susceptible to SSB */
-static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
- { X86_VENDOR_AMD, 0x12, },
- { X86_VENDOR_AMD, 0x11, },
- { X86_VENDOR_AMD, 0x10, },
- { X86_VENDOR_AMD, 0xf, },
- {}
-};
+#define VULNWL_INTEL(model, whitelist) \
+ VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
+
+#define VULNWL_AMD(family, whitelist) \
+ VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
+
+#define VULNWL_HYGON(family, whitelist) \
+ VULNWL(HYGON, family, X86_MODEL_ANY, whitelist)
+
+static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
+ VULNWL(ANY, 4, X86_MODEL_ANY, NO_SPECULATION),
+ VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION),
+ VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION),
+ VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
+
+ /* Intel Family 6 */
+ VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION),
+ VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION),
+ VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION),
+ VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION),
+ VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION),
+
+ VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
+ VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY),
+ VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY),
+ VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
+ VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY),
+ VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY),
-static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
- /* in addition to cpu_no_speculation */
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT_MID },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_PLUS },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
+ VULNWL_INTEL(CORE_YONAH, NO_SSB),
+
+ VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY),
+
+ VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF),
+ VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF),
+ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF),
+
+ /* AMD Family 0xf - 0x12 */
+ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+
+ /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
+ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS),
+ VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS),
{}
};
+static bool __init cpu_matches(unsigned long which)
+{
+ const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist);
+
+ return m && !!(m->driver_data & which);
+}
+
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
{
u64 ia32_cap = 0;
- if (x86_match_cpu(cpu_no_speculation))
+ if (cpu_matches(NO_SPECULATION))
return;
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
@@ -1011,15 +1019,20 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
- if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
- !(ia32_cap & ARCH_CAP_SSB_NO) &&
+ if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
if (ia32_cap & ARCH_CAP_IBRS_ALL)
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
- if (x86_match_cpu(cpu_no_meltdown))
+ if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) {
+ setup_force_cpu_bug(X86_BUG_MDS);
+ if (cpu_matches(MSBDS_ONLY))
+ setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
+ }
+
+ if (cpu_matches(NO_MELTDOWN))
return;
/* Rogue Data Cache Load? No! */
@@ -1028,7 +1041,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
- if (x86_match_cpu(cpu_no_l1tf))
+ if (cpu_matches(NO_L1TF))
return;
setup_force_cpu_bug(X86_BUG_L1TF);
@@ -1511,9 +1524,9 @@ static __init int setup_clearcpuid(char *arg)
__setup("clearcpuid=", setup_clearcpuid);
#ifdef CONFIG_X86_64
-DEFINE_PER_CPU_FIRST(union irq_stack_union,
- irq_stack_union) __aligned(PAGE_SIZE) __visible;
-EXPORT_PER_CPU_SYMBOL_GPL(irq_stack_union);
+DEFINE_PER_CPU_FIRST(struct fixed_percpu_data,
+ fixed_percpu_data) __aligned(PAGE_SIZE) __visible;
+EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data);
/*
* The following percpu variables are hot. Align current_task to
@@ -1523,9 +1536,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
&init_task;
EXPORT_PER_CPU_SYMBOL(current_task);
-DEFINE_PER_CPU(char *, irq_stack_ptr) =
- init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE;
-
+DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
@@ -1562,23 +1573,7 @@ void syscall_init(void)
X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
}
-/*
- * Copies of the original ist values from the tss are only accessed during
- * debugging, no special alignment required.
- */
-DEFINE_PER_CPU(struct orig_ist, orig_ist);
-
-static DEFINE_PER_CPU(unsigned long, debug_stack_addr);
DEFINE_PER_CPU(int, debug_stack_usage);
-
-int is_debug_stack(unsigned long addr)
-{
- return __this_cpu_read(debug_stack_usage) ||
- (addr <= __this_cpu_read(debug_stack_addr) &&
- addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ));
-}
-NOKPROBE_SYMBOL(is_debug_stack);
-
DEFINE_PER_CPU(u32, debug_idt_ctr);
void debug_stack_set_zero(void)
@@ -1668,7 +1663,7 @@ static void setup_getcpu(int cpu)
unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
struct desc_struct d = { };
- if (static_cpu_has(X86_FEATURE_RDTSCP))
+ if (boot_cpu_has(X86_FEATURE_RDTSCP))
write_rdtscp_aux(cpudata);
/* Store CPU and node number in limit. */
@@ -1690,17 +1685,14 @@ static void setup_getcpu(int cpu)
* initialized (naturally) in the bootstrap process, such as the GDT
* and IDT. We reload them nevertheless, this function acts as a
* 'CPU state barrier', nothing should get across.
- * A lot of state is already set up in PDA init for 64 bit
*/
#ifdef CONFIG_X86_64
void cpu_init(void)
{
- struct orig_ist *oist;
+ int cpu = raw_smp_processor_id();
struct task_struct *me;
struct tss_struct *t;
- unsigned long v;
- int cpu = raw_smp_processor_id();
int i;
wait_for_master_cpu(cpu);
@@ -1715,7 +1707,6 @@ void cpu_init(void)
load_ucode_ap();
t = &per_cpu(cpu_tss_rw, cpu);
- oist = &per_cpu(orig_ist, cpu);
#ifdef CONFIG_NUMA
if (this_cpu_read(numa_node) == 0 &&
@@ -1753,16 +1744,11 @@ void cpu_init(void)
/*
* set up and load the per-CPU TSS
*/
- if (!oist->ist[0]) {
- char *estacks = get_cpu_entry_area(cpu)->exception_stacks;
-
- for (v = 0; v < N_EXCEPTION_STACKS; v++) {
- estacks += exception_stack_sizes[v];
- oist->ist[v] = t->x86_tss.ist[v] =
- (unsigned long)estacks;
- if (v == DEBUG_STACK-1)
- per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks;
- }
+ if (!t->x86_tss.ist[0]) {
+ t->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF);
+ t->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
+ t->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
+ t->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
}
t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
@@ -1864,23 +1850,6 @@ void cpu_init(void)
}
#endif
-static void bsp_resume(void)
-{
- if (this_cpu->c_bsp_resume)
- this_cpu->c_bsp_resume(&boot_cpu_data);
-}
-
-static struct syscore_ops cpu_syscore_ops = {
- .resume = bsp_resume,
-};
-
-static int __init init_cpu_syscore(void)
-{
- register_syscore_ops(&cpu_syscore_ops);
- return 0;
-}
-core_initcall(init_cpu_syscore);
-
/*
* The microcode loader calls this upon late microcode load to recheck features,
* only when microcode has been updated. Caller holds microcode_mutex and CPU
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 5eb946b9a9f3..c0e2407abdd6 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -14,7 +14,6 @@ struct cpu_dev {
void (*c_init)(struct cpuinfo_x86 *);
void (*c_identify)(struct cpuinfo_x86 *);
void (*c_detect_tlb)(struct cpuinfo_x86 *);
- void (*c_bsp_resume)(struct cpuinfo_x86 *);
int c_x86_vendor;
#ifdef CONFIG_X86_32
/* Optional vendor specific routine to obtain the cache size. */
diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
index cf25405444ab..415621ddb8a2 100644
--- a/arch/x86/kernel/cpu/hygon.c
+++ b/arch/x86/kernel/cpu/hygon.c
@@ -19,6 +19,8 @@
#include "cpu.h"
+#define APICID_SOCKET_ID_BIT 6
+
/*
* nodes_per_socket: Stores the number of nodes per socket.
* Refer to CPUID Fn8000_001E_ECX Node Identifiers[10:8]
@@ -87,6 +89,9 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
if (!err)
c->x86_coreid_bits = get_count_order(c->x86_max_cores);
+ /* Socket ID is ApicId[6] for these processors. */
+ c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
+
cacheinfo_hygon_init_llc_id(c, cpu, node_id);
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
u64 value;
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 3142fd7a9b32..f17c1a714779 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -596,36 +596,6 @@ detect_keyid_bits:
c->x86_phys_bits -= keyid_bits;
}
-static void init_intel_energy_perf(struct cpuinfo_x86 *c)
-{
- u64 epb;
-
- /*
- * Initialize MSR_IA32_ENERGY_PERF_BIAS if not already initialized.
- * (x86_energy_perf_policy(8) is available to change it at run-time.)
- */
- if (!cpu_has(c, X86_FEATURE_EPB))
- return;
-
- rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
- if ((epb & 0xF) != ENERGY_PERF_BIAS_PERFORMANCE)
- return;
-
- pr_info_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
- pr_info_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
- epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
- wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
-}
-
-static void intel_bsp_resume(struct cpuinfo_x86 *c)
-{
- /*
- * MSR_IA32_ENERGY_PERF_BIAS is lost across suspend/resume,
- * so reinitialize it properly like during bootup:
- */
- init_intel_energy_perf(c);
-}
-
static void init_cpuid_fault(struct cpuinfo_x86 *c)
{
u64 msr;
@@ -763,8 +733,6 @@ static void init_intel(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_TME))
detect_tme(c);
- init_intel_energy_perf(c);
-
init_intel_misc_features(c);
}
@@ -1023,9 +991,7 @@ static const struct cpu_dev intel_cpu_dev = {
.c_detect_tlb = intel_detect_tlb,
.c_early_init = early_init_intel,
.c_init = init_intel,
- .c_bsp_resume = intel_bsp_resume,
.c_x86_vendor = X86_VENDOR_INTEL,
};
cpu_dev_register(intel_cpu_dev);
-
diff --git a/arch/x86/kernel/cpu/intel_epb.c b/arch/x86/kernel/cpu/intel_epb.c
new file mode 100644
index 000000000000..ebb14a26f117
--- /dev/null
+++ b/arch/x86/kernel/cpu/intel_epb.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Performance and Energy Bias Hint support.
+ *
+ * Copyright (C) 2019 Intel Corporation
+ *
+ * Author:
+ * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ */
+
+#include <linux/cpuhotplug.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/syscore_ops.h>
+#include <linux/pm.h>
+
+#include <asm/cpufeature.h>
+#include <asm/msr.h>
+
+/**
+ * DOC: overview
+ *
+ * The Performance and Energy Bias Hint (EPB) allows software to specify its
+ * preference with respect to the power-performance tradeoffs present in the
+ * processor. Generally, the EPB is expected to be set by user space (directly
+ * via sysfs or with the help of the x86_energy_perf_policy tool), but there are
+ * two reasons for the kernel to update it.
+ *
+ * First, there are systems where the platform firmware resets the EPB during
+ * system-wide transitions from sleep states back into the working state
+ * effectively causing the previous EPB updates by user space to be lost.
+ * Thus the kernel needs to save the current EPB values for all CPUs during
+ * system-wide transitions to sleep states and restore them on the way back to
+ * the working state. That can be achieved by saving EPB for secondary CPUs
+ * when they are taken offline during transitions into system sleep states and
+ * for the boot CPU in a syscore suspend operation, so that it can be restored
+ * for the boot CPU in a syscore resume operation and for the other CPUs when
+ * they are brought back online. However, CPUs that are already offline when
+ * a system-wide PM transition is started are not taken offline again, but their
+ * EPB values may still be reset by the platform firmware during the transition,
+ * so in fact it is necessary to save the EPB of any CPU taken offline and to
+ * restore it when the given CPU goes back online at all times.
+ *
+ * Second, on many systems the initial EPB value coming from the platform
+ * firmware is 0 ('performance') and at least on some of them that is because
+ * the platform firmware does not initialize EPB at all with the assumption that
+ * the OS will do that anyway. That sometimes is problematic, as it may cause
+ * the system battery to drain too fast, for example, so it is better to adjust
+ * it on CPU bring-up and if the initial EPB value for a given CPU is 0, the
+ * kernel changes it to 6 ('normal').
+ */
+
+static DEFINE_PER_CPU(u8, saved_epb);
+
+#define EPB_MASK 0x0fULL
+#define EPB_SAVED 0x10ULL
+#define MAX_EPB EPB_MASK
+
+static int intel_epb_save(void)
+{
+ u64 epb;
+
+ rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
+ /*
+ * Ensure that saved_epb will always be nonzero after this write even if
+ * the EPB value read from the MSR is 0.
+ */
+ this_cpu_write(saved_epb, (epb & EPB_MASK) | EPB_SAVED);
+
+ return 0;
+}
+
+static void intel_epb_restore(void)
+{
+ u64 val = this_cpu_read(saved_epb);
+ u64 epb;
+
+ rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
+ if (val) {
+ val &= EPB_MASK;
+ } else {
+ /*
+ * Because intel_epb_save() has not run for the current CPU yet,
+ * it is going online for the first time, so if its EPB value is
+ * 0 ('performance') at this point, assume that it has not been
+ * initialized by the platform firmware and set it to 6
+ * ('normal').
+ */
+ val = epb & EPB_MASK;
+ if (val == ENERGY_PERF_BIAS_PERFORMANCE) {
+ val = ENERGY_PERF_BIAS_NORMAL;
+ pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
+ }
+ }
+ wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, (epb & ~EPB_MASK) | val);
+}
+
+#ifdef CONFIG_PM
+static struct syscore_ops intel_epb_syscore_ops = {
+ .suspend = intel_epb_save,
+ .resume = intel_epb_restore,
+};
+
+static const char * const energy_perf_strings[] = {
+ "performance",
+ "balance-performance",
+ "normal",
+ "balance-power",
+ "power"
+};
+static const u8 energ_perf_values[] = {
+ ENERGY_PERF_BIAS_PERFORMANCE,
+ ENERGY_PERF_BIAS_BALANCE_PERFORMANCE,
+ ENERGY_PERF_BIAS_NORMAL,
+ ENERGY_PERF_BIAS_BALANCE_POWERSAVE,
+ ENERGY_PERF_BIAS_POWERSAVE
+};
+
+static ssize_t energy_perf_bias_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned int cpu = dev->id;
+ u64 epb;
+ int ret;
+
+ ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%llu\n", epb);
+}
+
+static ssize_t energy_perf_bias_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int cpu = dev->id;
+ u64 epb, val;
+ int ret;
+
+ ret = __sysfs_match_string(energy_perf_strings,
+ ARRAY_SIZE(energy_perf_strings), buf);
+ if (ret >= 0)
+ val = energ_perf_values[ret];
+ else if (kstrtou64(buf, 0, &val) || val > MAX_EPB)
+ return -EINVAL;
+
+ ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+ if (ret < 0)
+ return ret;
+
+ ret = wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS,
+ (epb & ~EPB_MASK) | val);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(energy_perf_bias);
+
+static struct attribute *intel_epb_attrs[] = {
+ &dev_attr_energy_perf_bias.attr,
+ NULL
+};
+
+static const struct attribute_group intel_epb_attr_group = {
+ .name = power_group_name,
+ .attrs = intel_epb_attrs
+};
+
+static int intel_epb_online(unsigned int cpu)
+{
+ struct device *cpu_dev = get_cpu_device(cpu);
+
+ intel_epb_restore();
+ if (!cpuhp_tasks_frozen)
+ sysfs_merge_group(&cpu_dev->kobj, &intel_epb_attr_group);
+
+ return 0;
+}
+
+static int intel_epb_offline(unsigned int cpu)
+{
+ struct device *cpu_dev = get_cpu_device(cpu);
+
+ if (!cpuhp_tasks_frozen)
+ sysfs_unmerge_group(&cpu_dev->kobj, &intel_epb_attr_group);
+
+ intel_epb_save();
+ return 0;
+}
+
+static inline void register_intel_ebp_syscore_ops(void)
+{
+ register_syscore_ops(&intel_epb_syscore_ops);
+}
+#else /* !CONFIG_PM */
+static int intel_epb_online(unsigned int cpu)
+{
+ intel_epb_restore();
+ return 0;
+}
+
+static int intel_epb_offline(unsigned int cpu)
+{
+ return intel_epb_save();
+}
+
+static inline void register_intel_ebp_syscore_ops(void) {}
+#endif
+
+static __init int intel_epb_init(void)
+{
+ int ret;
+
+ if (!boot_cpu_has(X86_FEATURE_EPB))
+ return -ENODEV;
+
+ ret = cpuhp_setup_state(CPUHP_AP_X86_INTEL_EPB_ONLINE,
+ "x86/intel/epb:online", intel_epb_online,
+ intel_epb_offline);
+ if (ret < 0)
+ goto err_out_online;
+
+ register_intel_ebp_syscore_ops();
+ return 0;
+
+err_out_online:
+ cpuhp_remove_state(CPUHP_AP_X86_INTEL_EPB_ONLINE);
+ return ret;
+}
+subsys_initcall(intel_epb_init);
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index e64de5149e50..d904aafe6409 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -563,33 +563,59 @@ out:
return offset;
}
+bool amd_filter_mce(struct mce *m)
+{
+ enum smca_bank_types bank_type = smca_get_bank_type(m->bank);
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+ u8 xec = (m->status >> 16) & 0x3F;
+
+ /* See Family 17h Models 10h-2Fh Erratum #1114. */
+ if (c->x86 == 0x17 &&
+ c->x86_model >= 0x10 && c->x86_model <= 0x2F &&
+ bank_type == SMCA_IF && xec == 10)
+ return true;
+
+ return false;
+}
+
/*
- * Turn off MC4_MISC thresholding banks on all family 0x15 models since
- * they're not supported there.
+ * Turn off thresholding banks for the following conditions:
+ * - MC4_MISC thresholding is not supported on Family 0x15.
+ * - Prevent possible spurious interrupts from the IF bank on Family 0x17
+ * Models 0x10-0x2F due to Erratum #1114.
*/
-void disable_err_thresholding(struct cpuinfo_x86 *c)
+void disable_err_thresholding(struct cpuinfo_x86 *c, unsigned int bank)
{
- int i;
+ int i, num_msrs;
u64 hwcr;
bool need_toggle;
- u32 msrs[] = {
- 0x00000413, /* MC4_MISC0 */
- 0xc0000408, /* MC4_MISC1 */
- };
+ u32 msrs[NR_BLOCKS];
+
+ if (c->x86 == 0x15 && bank == 4) {
+ msrs[0] = 0x00000413; /* MC4_MISC0 */
+ msrs[1] = 0xc0000408; /* MC4_MISC1 */
+ num_msrs = 2;
+ } else if (c->x86 == 0x17 &&
+ (c->x86_model >= 0x10 && c->x86_model <= 0x2F)) {
- if (c->x86 != 0x15)
+ if (smca_get_bank_type(bank) != SMCA_IF)
+ return;
+
+ msrs[0] = MSR_AMD64_SMCA_MCx_MISC(bank);
+ num_msrs = 1;
+ } else {
return;
+ }
rdmsrl(MSR_K7_HWCR, hwcr);
/* McStatusWrEn has to be set */
need_toggle = !(hwcr & BIT(18));
-
if (need_toggle)
wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
/* Clear CntP bit safely */
- for (i = 0; i < ARRAY_SIZE(msrs); i++)
+ for (i = 0; i < num_msrs; i++)
msr_clear_bit(msrs[i], 62);
/* restore old settings */
@@ -604,12 +630,12 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
unsigned int bank, block, cpu = smp_processor_id();
int offset = -1;
- disable_err_thresholding(c);
-
for (bank = 0; bank < mca_cfg.banks; ++bank) {
if (mce_flags.smca)
smca_configure(bank, cpu);
+ disable_err_thresholding(c, bank);
+
for (block = 0; block < NR_BLOCKS; ++block) {
address = get_block_address(address, low, high, bank, block);
if (!address)
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index b7fb541a4873..5112a50e6486 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -460,23 +460,6 @@ static void mce_irq_work_cb(struct irq_work *entry)
mce_schedule_work();
}
-static void mce_report_event(struct pt_regs *regs)
-{
- if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
- mce_notify_irq();
- /*
- * Triggering the work queue here is just an insurance
- * policy in case the syscall exit notify handler
- * doesn't run soon enough or ends up running on the
- * wrong CPU (can happen when audit sleeps)
- */
- mce_schedule_work();
- return;
- }
-
- irq_work_queue(&mce_irq_work);
-}
-
/*
* Check if the address reported by the CPU is in a format we can parse.
* It would be possible to add code for most other cases, but all would
@@ -712,19 +695,49 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
barrier();
m.status = mce_rdmsrl(msr_ops.status(i));
+
+ /* If this entry is not valid, ignore it */
if (!(m.status & MCI_STATUS_VAL))
continue;
/*
- * Uncorrected or signalled events are handled by the exception
- * handler when it is enabled, so don't process those here.
- *
- * TBD do the same check for MCI_STATUS_EN here?
+ * If we are logging everything (at CPU online) or this
+ * is a corrected error, then we must log it.
*/
- if (!(flags & MCP_UC) &&
- (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
- continue;
+ if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC))
+ goto log_it;
+
+ /*
+ * Newer Intel systems that support software error
+ * recovery need to make additional checks. Other
+ * CPUs should skip over uncorrected errors, but log
+ * everything else.
+ */
+ if (!mca_cfg.ser) {
+ if (m.status & MCI_STATUS_UC)
+ continue;
+ goto log_it;
+ }
+ /* Log "not enabled" (speculative) errors */
+ if (!(m.status & MCI_STATUS_EN))
+ goto log_it;
+
+ /*
+ * Log UCNA (SDM: 15.6.3 "UCR Error Classification")
+ * UC == 1 && PCC == 0 && S == 0
+ */
+ if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S))
+ goto log_it;
+
+ /*
+ * Skip anything else. Presumption is that our read of this
+ * bank is racing with a machine check. Leave the log alone
+ * for do_machine_check() to deal with it.
+ */
+ continue;
+
+log_it:
error_seen = true;
mce_read_aux(&m, i);
@@ -1301,7 +1314,8 @@ void do_machine_check(struct pt_regs *regs, long error_code)
mce_panic("Fatal machine check on current CPU", &m, msg);
if (worst > 0)
- mce_report_event(regs);
+ irq_work_queue(&mce_irq_work);
+
mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
sync_core();
@@ -1451,13 +1465,12 @@ EXPORT_SYMBOL_GPL(mce_notify_irq);
static int __mcheck_cpu_mce_banks_init(void)
{
int i;
- u8 num_banks = mca_cfg.banks;
- mce_banks = kcalloc(num_banks, sizeof(struct mce_bank), GFP_KERNEL);
+ mce_banks = kcalloc(MAX_NR_BANKS, sizeof(struct mce_bank), GFP_KERNEL);
if (!mce_banks)
return -ENOMEM;
- for (i = 0; i < num_banks; i++) {
+ for (i = 0; i < MAX_NR_BANKS; i++) {
struct mce_bank *b = &mce_banks[i];
b->ctl = -1ULL;
@@ -1471,28 +1484,19 @@ static int __mcheck_cpu_mce_banks_init(void)
*/
static int __mcheck_cpu_cap_init(void)
{
- unsigned b;
u64 cap;
+ u8 b;
rdmsrl(MSR_IA32_MCG_CAP, cap);
b = cap & MCG_BANKCNT_MASK;
- if (!mca_cfg.banks)
- pr_info("CPU supports %d MCE banks\n", b);
-
- if (b > MAX_NR_BANKS) {
- pr_warn("Using only %u machine check banks out of %u\n",
- MAX_NR_BANKS, b);
+ if (WARN_ON_ONCE(b > MAX_NR_BANKS))
b = MAX_NR_BANKS;
- }
- /* Don't support asymmetric configurations today */
- WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
- mca_cfg.banks = b;
+ mca_cfg.banks = max(mca_cfg.banks, b);
if (!mce_banks) {
int err = __mcheck_cpu_mce_banks_init();
-
if (err)
return err;
}
@@ -1771,6 +1775,14 @@ static void __mcheck_cpu_init_timer(void)
mce_start_timer(t);
}
+bool filter_mce(struct mce *m)
+{
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ return amd_filter_mce(m);
+
+ return false;
+}
+
/* Handle unconfigured int18 (should never happen) */
static void unexpected_machine_check(struct pt_regs *regs, long error_code)
{
@@ -2425,8 +2437,8 @@ static int fake_panic_set(void *data, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
- fake_panic_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fake_panic_fops, fake_panic_get, fake_panic_set,
+ "%llu\n");
static int __init mcheck_debugfs_init(void)
{
@@ -2435,8 +2447,8 @@ static int __init mcheck_debugfs_init(void)
dmce = mce_get_debugfs_dir();
if (!dmce)
return -ENOMEM;
- ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
- &fake_panic_fops);
+ ffake_panic = debugfs_create_file_unsafe("fake_panic", 0444, dmce,
+ NULL, &fake_panic_fops);
if (!ffake_panic)
return -ENOMEM;
@@ -2451,6 +2463,8 @@ EXPORT_SYMBOL_GPL(mcsafe_key);
static int __init mcheck_late_init(void)
{
+ pr_info("Using %d MCE banks\n", mca_cfg.banks);
+
if (mca_cfg.recovery)
static_branch_inc(&mcsafe_key);
diff --git a/arch/x86/kernel/cpu/mce/genpool.c b/arch/x86/kernel/cpu/mce/genpool.c
index 3395549c51d3..64d1d5a00f39 100644
--- a/arch/x86/kernel/cpu/mce/genpool.c
+++ b/arch/x86/kernel/cpu/mce/genpool.c
@@ -99,6 +99,9 @@ int mce_gen_pool_add(struct mce *mce)
{
struct mce_evt_llist *node;
+ if (filter_mce(mce))
+ return -EINVAL;
+
if (!mce_evt_pool)
return -EINVAL;
diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
index 8492ef7d9015..a6026170af92 100644
--- a/arch/x86/kernel/cpu/mce/inject.c
+++ b/arch/x86/kernel/cpu/mce/inject.c
@@ -46,8 +46,6 @@
static struct mce i_mce;
static struct dentry *dfs_inj;
-static u8 n_banks;
-
#define MAX_FLAG_OPT_SIZE 4
#define NBCFG 0x44
@@ -528,7 +526,7 @@ static void do_inject(void)
* only on the node base core. Refer to D18F3x44[NbMcaToMstCpuEn] for
* Fam10h and later BKDGs.
*/
- if (static_cpu_has(X86_FEATURE_AMD_DCM) &&
+ if (boot_cpu_has(X86_FEATURE_AMD_DCM) &&
b == 4 &&
boot_cpu_data.x86 < 0x17) {
toggle_nb_mca_mst_cpu(amd_get_nb_id(cpu));
@@ -570,9 +568,15 @@ err:
static int inj_bank_set(void *data, u64 val)
{
struct mce *m = (struct mce *)data;
+ u8 n_banks;
+ u64 cap;
+
+ /* Get bank count on target CPU so we can handle non-uniform values. */
+ rdmsrl_on_cpu(m->extcpu, MSR_IA32_MCG_CAP, &cap);
+ n_banks = cap & MCG_BANKCNT_MASK;
if (val >= n_banks) {
- pr_err("Non-existent MCE bank: %llu\n", val);
+ pr_err("MCA bank %llu non-existent on CPU%d\n", val, m->extcpu);
return -EINVAL;
}
@@ -665,10 +669,6 @@ static struct dfs_node {
static int __init debugfs_init(void)
{
unsigned int i;
- u64 cap;
-
- rdmsrl(MSR_IA32_MCG_CAP, cap);
- n_banks = cap & MCG_BANKCNT_MASK;
dfs_inj = debugfs_create_dir("mce-inject", NULL);
if (!dfs_inj)
diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h
index af5eab1e65e2..a34b55baa7aa 100644
--- a/arch/x86/kernel/cpu/mce/internal.h
+++ b/arch/x86/kernel/cpu/mce/internal.h
@@ -173,4 +173,13 @@ struct mca_msr_regs {
extern struct mca_msr_regs msr_ops;
+/* Decide whether to add MCE record to MCE event pool or filter it out. */
+extern bool filter_mce(struct mce *m);
+
+#ifdef CONFIG_X86_MCE_AMD
+extern bool amd_filter_mce(struct mce *m);
+#else
+static inline bool amd_filter_mce(struct mce *m) { return false; };
+#endif
+
#endif /* __X86_MCE_INTERNAL_H__ */
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 5260185cbf7b..c321f4f513f9 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -418,8 +418,9 @@ static int do_microcode_update(const void __user *buf, size_t size)
if (ustate == UCODE_ERROR) {
error = -1;
break;
- } else if (ustate == UCODE_OK)
+ } else if (ustate == UCODE_NEW) {
apply_microcode_on_target(cpu);
+ }
}
return error;
@@ -427,7 +428,7 @@ static int do_microcode_update(const void __user *buf, size_t size)
static int microcode_open(struct inode *inode, struct file *file)
{
- return capable(CAP_SYS_RAWIO) ? nonseekable_open(inode, file) : -EPERM;
+ return capable(CAP_SYS_RAWIO) ? stream_open(inode, file) : -EPERM;
}
static ssize_t microcode_write(struct file *file, const char __user *buf,
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 16936a24795c..a44bdbe7c55e 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -31,6 +31,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/cpu.h>
+#include <linux/uio.h>
#include <linux/mm.h>
#include <asm/microcode_intel.h>
@@ -861,32 +862,33 @@ out:
return ret;
}
-static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
- int (*get_ucode_data)(void *, const void *, size_t))
+static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
- u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
- int new_rev = uci->cpu_sig.rev;
- unsigned int leftover = size;
unsigned int curr_mc_size = 0, new_mc_size = 0;
- unsigned int csig, cpf;
enum ucode_state ret = UCODE_OK;
+ int new_rev = uci->cpu_sig.rev;
+ u8 *new_mc = NULL, *mc = NULL;
+ unsigned int csig, cpf;
- while (leftover) {
+ while (iov_iter_count(iter)) {
struct microcode_header_intel mc_header;
- unsigned int mc_size;
+ unsigned int mc_size, data_size;
+ u8 *data;
- if (leftover < sizeof(mc_header)) {
- pr_err("error! Truncated header in microcode data file\n");
+ if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) {
+ pr_err("error! Truncated or inaccessible header in microcode data file\n");
break;
}
- if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
- break;
-
mc_size = get_totalsize(&mc_header);
- if (!mc_size || mc_size > leftover) {
- pr_err("error! Bad data in microcode data file\n");
+ if (mc_size < sizeof(mc_header)) {
+ pr_err("error! Bad data in microcode data file (totalsize too small)\n");
+ break;
+ }
+ data_size = mc_size - sizeof(mc_header);
+ if (data_size > iov_iter_count(iter)) {
+ pr_err("error! Bad data in microcode data file (truncated file?)\n");
break;
}
@@ -899,7 +901,9 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
curr_mc_size = mc_size;
}
- if (get_ucode_data(mc, ucode_ptr, mc_size) ||
+ memcpy(mc, &mc_header, sizeof(mc_header));
+ data = mc + sizeof(mc_header);
+ if (!copy_from_iter_full(data, data_size, iter) ||
microcode_sanity_check(mc, 1) < 0) {
break;
}
@@ -914,14 +918,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
mc = NULL; /* trigger new vmalloc */
ret = UCODE_NEW;
}
-
- ucode_ptr += mc_size;
- leftover -= mc_size;
}
vfree(mc);
- if (leftover) {
+ if (iov_iter_count(iter)) {
vfree(new_mc);
return UCODE_ERROR;
}
@@ -945,12 +946,6 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
return ret;
}
-static int get_ucode_fw(void *to, const void *from, size_t n)
-{
- memcpy(to, from, n);
- return 0;
-}
-
static bool is_blacklisted(unsigned int cpu)
{
struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -977,10 +972,12 @@ static bool is_blacklisted(unsigned int cpu)
static enum ucode_state request_microcode_fw(int cpu, struct device *device,
bool refresh_fw)
{
- char name[30];
struct cpuinfo_x86 *c = &cpu_data(cpu);
const struct firmware *firmware;
+ struct iov_iter iter;
enum ucode_state ret;
+ struct kvec kvec;
+ char name[30];
if (is_blacklisted(cpu))
return UCODE_NFOUND;
@@ -993,26 +990,30 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
return UCODE_NFOUND;
}
- ret = generic_load_microcode(cpu, (void *)firmware->data,
- firmware->size, &get_ucode_fw);
+ kvec.iov_base = (void *)firmware->data;
+ kvec.iov_len = firmware->size;
+ iov_iter_kvec(&iter, WRITE, &kvec, 1, firmware->size);
+ ret = generic_load_microcode(cpu, &iter);
release_firmware(firmware);
return ret;
}
-static int get_ucode_user(void *to, const void *from, size_t n)
-{
- return copy_from_user(to, from, n);
-}
-
static enum ucode_state
request_microcode_user(int cpu, const void __user *buf, size_t size)
{
+ struct iov_iter iter;
+ struct iovec iov;
+
if (is_blacklisted(cpu))
return UCODE_NFOUND;
- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
+ iov.iov_base = (void __user *)buf;
+ iov.iov_len = size;
+ iov_iter_init(&iter, WRITE, &iov, 1, size);
+
+ return generic_load_microcode(cpu, &iter);
}
static struct microcode_ops microcode_intel_ops = {
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 2c8522a39ed5..cb2e49810d68 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -35,11 +35,11 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
"fpu_exception\t: %s\n"
"cpuid level\t: %d\n"
"wp\t\t: yes\n",
- static_cpu_has_bug(X86_BUG_FDIV) ? "yes" : "no",
- static_cpu_has_bug(X86_BUG_F00F) ? "yes" : "no",
- static_cpu_has_bug(X86_BUG_COMA) ? "yes" : "no",
- static_cpu_has(X86_FEATURE_FPU) ? "yes" : "no",
- static_cpu_has(X86_FEATURE_FPU) ? "yes" : "no",
+ boot_cpu_has_bug(X86_BUG_FDIV) ? "yes" : "no",
+ boot_cpu_has_bug(X86_BUG_F00F) ? "yes" : "no",
+ boot_cpu_has_bug(X86_BUG_COMA) ? "yes" : "no",
+ boot_cpu_has(X86_FEATURE_FPU) ? "yes" : "no",
+ boot_cpu_has(X86_FEATURE_FPU) ? "yes" : "no",
c->cpuid_level);
}
#else
diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
index 2dbd990a2eb7..89320c0396b1 100644
--- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
@@ -342,10 +342,10 @@ int update_domains(struct rdt_resource *r, int closid)
if (cpumask_empty(cpu_mask) || mba_sc)
goto done;
cpu = get_cpu();
- /* Update CBM on this cpu if it's in cpu_mask. */
+ /* Update resource control msr on this CPU if it's in cpu_mask. */
if (cpumask_test_cpu(cpu, cpu_mask))
rdt_ctrl_update(&msr_param);
- /* Update CBM on other cpus. */
+ /* Update resource control msr on other CPUs. */
smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
put_cpu();
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 85212a32b54d..333c177a2471 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -2516,100 +2516,127 @@ static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
bitmap_clear(val, zero_bit, cbm_len - zero_bit);
}
-/**
- * rdtgroup_init_alloc - Initialize the new RDT group's allocations
- *
- * A new RDT group is being created on an allocation capable (CAT)
- * supporting system. Set this group up to start off with all usable
- * allocations. That is, all shareable and unused bits.
+/*
+ * Initialize cache resources per RDT domain
*
- * All-zero CBM is invalid. If there are no more shareable bits available
- * on any domain then the entire allocation will fail.
+ * Set the RDT domain up to start off with all usable allocations. That is,
+ * all shareable and unused bits. All-zero CBM is invalid.
*/
-static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
+static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
+ u32 closid)
{
struct rdt_resource *r_cdp = NULL;
struct rdt_domain *d_cdp = NULL;
u32 used_b = 0, unused_b = 0;
- u32 closid = rdtgrp->closid;
- struct rdt_resource *r;
unsigned long tmp_cbm;
enum rdtgrp_mode mode;
- struct rdt_domain *d;
u32 peer_ctl, *ctrl;
- int i, ret;
+ int i;
- for_each_alloc_enabled_rdt_resource(r) {
- /*
- * Only initialize default allocations for CBM cache
- * resources
- */
- if (r->rid == RDT_RESOURCE_MBA)
- continue;
- list_for_each_entry(d, &r->domains, list) {
- rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
- d->have_new_ctrl = false;
- d->new_ctrl = r->cache.shareable_bits;
- used_b = r->cache.shareable_bits;
- ctrl = d->ctrl_val;
- for (i = 0; i < closids_supported(); i++, ctrl++) {
- if (closid_allocated(i) && i != closid) {
- mode = rdtgroup_mode_by_closid(i);
- if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
- break;
- /*
- * If CDP is active include peer
- * domain's usage to ensure there
- * is no overlap with an exclusive
- * group.
- */
- if (d_cdp)
- peer_ctl = d_cdp->ctrl_val[i];
- else
- peer_ctl = 0;
- used_b |= *ctrl | peer_ctl;
- if (mode == RDT_MODE_SHAREABLE)
- d->new_ctrl |= *ctrl | peer_ctl;
- }
- }
- if (d->plr && d->plr->cbm > 0)
- used_b |= d->plr->cbm;
- unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
- unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
- d->new_ctrl |= unused_b;
- /*
- * Force the initial CBM to be valid, user can
- * modify the CBM based on system availability.
- */
- cbm_ensure_valid(&d->new_ctrl, r);
+ rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
+ d->have_new_ctrl = false;
+ d->new_ctrl = r->cache.shareable_bits;
+ used_b = r->cache.shareable_bits;
+ ctrl = d->ctrl_val;
+ for (i = 0; i < closids_supported(); i++, ctrl++) {
+ if (closid_allocated(i) && i != closid) {
+ mode = rdtgroup_mode_by_closid(i);
+ if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
+ break;
/*
- * Assign the u32 CBM to an unsigned long to ensure
- * that bitmap_weight() does not access out-of-bound
- * memory.
+ * If CDP is active include peer domain's
+ * usage to ensure there is no overlap
+ * with an exclusive group.
*/
- tmp_cbm = d->new_ctrl;
- if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
- r->cache.min_cbm_bits) {
- rdt_last_cmd_printf("No space on %s:%d\n",
- r->name, d->id);
- return -ENOSPC;
- }
- d->have_new_ctrl = true;
+ if (d_cdp)
+ peer_ctl = d_cdp->ctrl_val[i];
+ else
+ peer_ctl = 0;
+ used_b |= *ctrl | peer_ctl;
+ if (mode == RDT_MODE_SHAREABLE)
+ d->new_ctrl |= *ctrl | peer_ctl;
}
}
+ if (d->plr && d->plr->cbm > 0)
+ used_b |= d->plr->cbm;
+ unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
+ unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
+ d->new_ctrl |= unused_b;
+ /*
+ * Force the initial CBM to be valid, user can
+ * modify the CBM based on system availability.
+ */
+ cbm_ensure_valid(&d->new_ctrl, r);
+ /*
+ * Assign the u32 CBM to an unsigned long to ensure that
+ * bitmap_weight() does not access out-of-bound memory.
+ */
+ tmp_cbm = d->new_ctrl;
+ if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) {
+ rdt_last_cmd_printf("No space on %s:%d\n", r->name, d->id);
+ return -ENOSPC;
+ }
+ d->have_new_ctrl = true;
+
+ return 0;
+}
+
+/*
+ * Initialize cache resources with default values.
+ *
+ * A new RDT group is being created on an allocation capable (CAT)
+ * supporting system. Set this group up to start off with all usable
+ * allocations.
+ *
+ * If there are no more shareable bits available on any domain then
+ * the entire allocation will fail.
+ */
+static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid)
+{
+ struct rdt_domain *d;
+ int ret;
+
+ list_for_each_entry(d, &r->domains, list) {
+ ret = __init_one_rdt_domain(d, r, closid);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Initialize MBA resource with default values. */
+static void rdtgroup_init_mba(struct rdt_resource *r)
+{
+ struct rdt_domain *d;
+
+ list_for_each_entry(d, &r->domains, list) {
+ d->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl;
+ d->have_new_ctrl = true;
+ }
+}
+
+/* Initialize the RDT group's allocations. */
+static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
+{
+ struct rdt_resource *r;
+ int ret;
for_each_alloc_enabled_rdt_resource(r) {
- /*
- * Only initialize default allocations for CBM cache
- * resources
- */
- if (r->rid == RDT_RESOURCE_MBA)
- continue;
+ if (r->rid == RDT_RESOURCE_MBA) {
+ rdtgroup_init_mba(r);
+ } else {
+ ret = rdtgroup_init_cat(r, rdtgrp->closid);
+ if (ret < 0)
+ return ret;
+ }
+
ret = update_domains(r, rdtgrp->closid);
if (ret < 0) {
rdt_last_cmd_puts("Failed to initialize allocations\n");
return ret;
}
+
}
rdtgrp->mode = RDT_MODE_SHAREABLE;
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 17ffc869cab8..a96ca8584803 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -204,8 +204,7 @@ static struct crash_mem *fill_up_crash_elf_data(void)
* another range split. So add extra two slots here.
*/
nr_ranges += 2;
- cmem = vzalloc(sizeof(struct crash_mem) +
- sizeof(struct crash_mem_range) * nr_ranges);
+ cmem = vzalloc(struct_size(cmem, ranges, nr_ranges));
if (!cmem)
return NULL;
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index cd53f3030e40..64a59d726639 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -34,14 +34,14 @@ const char *stack_type_name(enum stack_type type)
static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info)
{
- unsigned long *begin = (unsigned long *)this_cpu_read(hardirq_stack);
+ unsigned long *begin = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
unsigned long *end = begin + (THREAD_SIZE / sizeof(long));
/*
* This is a software stack, so 'end' can be a valid stack pointer.
* It just means the stack is empty.
*/
- if (stack <= begin || stack > end)
+ if (stack < begin || stack > end)
return false;
info->type = STACK_TYPE_IRQ;
@@ -59,14 +59,14 @@ static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info)
static bool in_softirq_stack(unsigned long *stack, struct stack_info *info)
{
- unsigned long *begin = (unsigned long *)this_cpu_read(softirq_stack);
+ unsigned long *begin = (unsigned long *)this_cpu_read(softirq_stack_ptr);
unsigned long *end = begin + (THREAD_SIZE / sizeof(long));
/*
* This is a software stack, so 'end' can be a valid stack pointer.
* It just means the stack is empty.
*/
- if (stack <= begin || stack > end)
+ if (stack < begin || stack > end)
return false;
info->type = STACK_TYPE_SOFTIRQ;
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 5cdb9e84da57..753b8cfe8b8a 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -16,23 +16,21 @@
#include <linux/bug.h>
#include <linux/nmi.h>
+#include <asm/cpu_entry_area.h>
#include <asm/stacktrace.h>
-static char *exception_stack_names[N_EXCEPTION_STACKS] = {
- [ DOUBLEFAULT_STACK-1 ] = "#DF",
- [ NMI_STACK-1 ] = "NMI",
- [ DEBUG_STACK-1 ] = "#DB",
- [ MCE_STACK-1 ] = "#MC",
-};
-
-static unsigned long exception_stack_sizes[N_EXCEPTION_STACKS] = {
- [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
- [DEBUG_STACK - 1] = DEBUG_STKSZ
+static const char * const exception_stack_names[] = {
+ [ ESTACK_DF ] = "#DF",
+ [ ESTACK_NMI ] = "NMI",
+ [ ESTACK_DB2 ] = "#DB2",
+ [ ESTACK_DB1 ] = "#DB1",
+ [ ESTACK_DB ] = "#DB",
+ [ ESTACK_MCE ] = "#MC",
};
const char *stack_type_name(enum stack_type type)
{
- BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);
+ BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
if (type == STACK_TYPE_IRQ)
return "IRQ";
@@ -52,43 +50,84 @@ const char *stack_type_name(enum stack_type type)
return NULL;
}
+/**
+ * struct estack_pages - Page descriptor for exception stacks
+ * @offs: Offset from the start of the exception stack area
+ * @size: Size of the exception stack
+ * @type: Type to store in the stack_info struct
+ */
+struct estack_pages {
+ u32 offs;
+ u16 size;
+ u16 type;
+};
+
+#define EPAGERANGE(st) \
+ [PFN_DOWN(CEA_ESTACK_OFFS(st)) ... \
+ PFN_DOWN(CEA_ESTACK_OFFS(st) + CEA_ESTACK_SIZE(st) - 1)] = { \
+ .offs = CEA_ESTACK_OFFS(st), \
+ .size = CEA_ESTACK_SIZE(st), \
+ .type = STACK_TYPE_EXCEPTION + ESTACK_ ##st, }
+
+/*
+ * Array of exception stack page descriptors. If the stack is larger than
+ * PAGE_SIZE, all pages covering a particular stack will have the same
+ * info. The guard pages including the not mapped DB2 stack are zeroed
+ * out.
+ */
+static const
+struct estack_pages estack_pages[CEA_ESTACK_PAGES] ____cacheline_aligned = {
+ EPAGERANGE(DF),
+ EPAGERANGE(NMI),
+ EPAGERANGE(DB1),
+ EPAGERANGE(DB),
+ EPAGERANGE(MCE),
+};
+
static bool in_exception_stack(unsigned long *stack, struct stack_info *info)
{
- unsigned long *begin, *end;
+ unsigned long begin, end, stk = (unsigned long)stack;
+ const struct estack_pages *ep;
struct pt_regs *regs;
- unsigned k;
+ unsigned int k;
- BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);
+ BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
- for (k = 0; k < N_EXCEPTION_STACKS; k++) {
- end = (unsigned long *)raw_cpu_ptr(&orig_ist)->ist[k];
- begin = end - (exception_stack_sizes[k] / sizeof(long));
- regs = (struct pt_regs *)end - 1;
-
- if (stack <= begin || stack >= end)
- continue;
+ begin = (unsigned long)__this_cpu_read(cea_exception_stacks);
+ end = begin + sizeof(struct cea_exception_stacks);
+ /* Bail if @stack is outside the exception stack area. */
+ if (stk < begin || stk >= end)
+ return false;
- info->type = STACK_TYPE_EXCEPTION + k;
- info->begin = begin;
- info->end = end;
- info->next_sp = (unsigned long *)regs->sp;
+ /* Calc page offset from start of exception stacks */
+ k = (stk - begin) >> PAGE_SHIFT;
+ /* Lookup the page descriptor */
+ ep = &estack_pages[k];
+ /* Guard page? */
+ if (!ep->size)
+ return false;
- return true;
- }
+ begin += (unsigned long)ep->offs;
+ end = begin + (unsigned long)ep->size;
+ regs = (struct pt_regs *)end - 1;
- return false;
+ info->type = ep->type;
+ info->begin = (unsigned long *)begin;
+ info->end = (unsigned long *)end;
+ info->next_sp = (unsigned long *)regs->sp;
+ return true;
}
static bool in_irq_stack(unsigned long *stack, struct stack_info *info)
{
- unsigned long *end = (unsigned long *)this_cpu_read(irq_stack_ptr);
+ unsigned long *end = (unsigned long *)this_cpu_read(hardirq_stack_ptr);
unsigned long *begin = end - (IRQ_STACK_SIZE / sizeof(long));
/*
* This is a software stack, so 'end' can be a valid stack pointer.
* It just means the stack is empty.
*/
- if (stack <= begin || stack > end)
+ if (stack < begin || stack >= end)
return false;
info->type = STACK_TYPE_IRQ;
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 2879e234e193..76dd605ee2a3 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -73,12 +73,13 @@ EXPORT_SYMBOL(pci_mem_start);
* This function checks if any part of the range <start,end> is mapped
* with type.
*/
-bool e820__mapped_any(u64 start, u64 end, enum e820_type type)
+static bool _e820__mapped_any(struct e820_table *table,
+ u64 start, u64 end, enum e820_type type)
{
int i;
- for (i = 0; i < e820_table->nr_entries; i++) {
- struct e820_entry *entry = &e820_table->entries[i];
+ for (i = 0; i < table->nr_entries; i++) {
+ struct e820_entry *entry = &table->entries[i];
if (type && entry->type != type)
continue;
@@ -88,6 +89,17 @@ bool e820__mapped_any(u64 start, u64 end, enum e820_type type)
}
return 0;
}
+
+bool e820__mapped_raw_any(u64 start, u64 end, enum e820_type type)
+{
+ return _e820__mapped_any(e820_table_firmware, start, end, type);
+}
+EXPORT_SYMBOL_GPL(e820__mapped_raw_any);
+
+bool e820__mapped_any(u64 start, u64 end, enum e820_type type)
+{
+ return _e820__mapped_any(e820_table, start, end, type);
+}
EXPORT_SYMBOL_GPL(e820__mapped_any);
/*
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 50d5848bf22e..6c4f01540833 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -525,7 +525,8 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
INTEL_I945G_IDS(&gen3_early_ops),
INTEL_I945GM_IDS(&gen3_early_ops),
INTEL_VLV_IDS(&gen6_early_ops),
- INTEL_PINEVIEW_IDS(&gen3_early_ops),
+ INTEL_PINEVIEW_G_IDS(&gen3_early_ops),
+ INTEL_PINEVIEW_M_IDS(&gen3_early_ops),
INTEL_I965G_IDS(&gen3_early_ops),
INTEL_G33_IDS(&gen3_early_ops),
INTEL_I965GM_IDS(&gen3_early_ops),
@@ -547,6 +548,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
INTEL_GLK_IDS(&gen9_early_ops),
INTEL_CNL_IDS(&gen9_early_ops),
INTEL_ICL_11_IDS(&gen11_early_ops),
+ INTEL_EHL_IDS(&gen11_early_ops),
};
struct resource intel_graphics_stolen_res __ro_after_init = DEFINE_RES_MEM(0, 0);
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 2e5003fef51a..ce243f76bdb7 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -101,24 +101,21 @@ static void __kernel_fpu_begin(void)
kernel_fpu_disable();
- if (fpu->initialized) {
- /*
- * Ignore return value -- we don't care if reg state
- * is clobbered.
- */
- copy_fpregs_to_fpstate(fpu);
- } else {
- __cpu_invalidate_fpregs_state();
+ if (current->mm) {
+ if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
+ set_thread_flag(TIF_NEED_FPU_LOAD);
+ /*
+ * Ignore return value -- we don't care if reg state
+ * is clobbered.
+ */
+ copy_fpregs_to_fpstate(fpu);
+ }
}
+ __cpu_invalidate_fpregs_state();
}
static void __kernel_fpu_end(void)
{
- struct fpu *fpu = &current->thread.fpu;
-
- if (fpu->initialized)
- copy_kernel_to_fpregs(&fpu->state);
-
kernel_fpu_enable();
}
@@ -145,15 +142,17 @@ void fpu__save(struct fpu *fpu)
{
WARN_ON_FPU(fpu != &current->thread.fpu);
- preempt_disable();
+ fpregs_lock();
trace_x86_fpu_before_save(fpu);
- if (fpu->initialized) {
+
+ if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
if (!copy_fpregs_to_fpstate(fpu)) {
copy_kernel_to_fpregs(&fpu->state);
}
}
+
trace_x86_fpu_after_save(fpu);
- preempt_enable();
+ fpregs_unlock();
}
EXPORT_SYMBOL_GPL(fpu__save);
@@ -186,11 +185,14 @@ void fpstate_init(union fpregs_state *state)
}
EXPORT_SYMBOL_GPL(fpstate_init);
-int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
+int fpu__copy(struct task_struct *dst, struct task_struct *src)
{
+ struct fpu *dst_fpu = &dst->thread.fpu;
+ struct fpu *src_fpu = &src->thread.fpu;
+
dst_fpu->last_cpu = -1;
- if (!src_fpu->initialized || !static_cpu_has(X86_FEATURE_FPU))
+ if (!static_cpu_has(X86_FEATURE_FPU))
return 0;
WARN_ON_FPU(src_fpu != &current->thread.fpu);
@@ -202,16 +204,23 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
/*
- * Save current FPU registers directly into the child
- * FPU context, without any memory-to-memory copying.
+ * If the FPU registers are not current just memcpy() the state.
+ * Otherwise save current FPU registers directly into the child's FPU
+ * context, without any memory-to-memory copying.
*
* ( The function 'fails' in the FNSAVE case, which destroys
- * register contents so we have to copy them back. )
+ * register contents so we have to load them back. )
*/
- if (!copy_fpregs_to_fpstate(dst_fpu)) {
- memcpy(&src_fpu->state, &dst_fpu->state, fpu_kernel_xstate_size);
- copy_kernel_to_fpregs(&src_fpu->state);
- }
+ fpregs_lock();
+ if (test_thread_flag(TIF_NEED_FPU_LOAD))
+ memcpy(&dst_fpu->state, &src_fpu->state, fpu_kernel_xstate_size);
+
+ else if (!copy_fpregs_to_fpstate(dst_fpu))
+ copy_kernel_to_fpregs(&dst_fpu->state);
+
+ fpregs_unlock();
+
+ set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD);
trace_x86_fpu_copy_src(src_fpu);
trace_x86_fpu_copy_dst(dst_fpu);
@@ -223,20 +232,14 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
* Activate the current task's in-memory FPU context,
* if it has not been used before:
*/
-void fpu__initialize(struct fpu *fpu)
+static void fpu__initialize(struct fpu *fpu)
{
WARN_ON_FPU(fpu != &current->thread.fpu);
- if (!fpu->initialized) {
- fpstate_init(&fpu->state);
- trace_x86_fpu_init_state(fpu);
-
- trace_x86_fpu_activate_state(fpu);
- /* Safe to do for the current task: */
- fpu->initialized = 1;
- }
+ set_thread_flag(TIF_NEED_FPU_LOAD);
+ fpstate_init(&fpu->state);
+ trace_x86_fpu_init_state(fpu);
}
-EXPORT_SYMBOL_GPL(fpu__initialize);
/*
* This function must be called before we read a task's fpstate.
@@ -248,32 +251,20 @@ EXPORT_SYMBOL_GPL(fpu__initialize);
*
* - or it's called for stopped tasks (ptrace), in which case the
* registers were already saved by the context-switch code when
- * the task scheduled out - we only have to initialize the registers
- * if they've never been initialized.
+ * the task scheduled out.
*
* If the task has used the FPU before then save it.
*/
void fpu__prepare_read(struct fpu *fpu)
{
- if (fpu == &current->thread.fpu) {
+ if (fpu == &current->thread.fpu)
fpu__save(fpu);
- } else {
- if (!fpu->initialized) {
- fpstate_init(&fpu->state);
- trace_x86_fpu_init_state(fpu);
-
- trace_x86_fpu_activate_state(fpu);
- /* Safe to do for current and for stopped child tasks: */
- fpu->initialized = 1;
- }
- }
}
/*
* This function must be called before we write a task's fpstate.
*
- * If the task has used the FPU before then invalidate any cached FPU registers.
- * If the task has not used the FPU before then initialize its fpstate.
+ * Invalidate any cached FPU registers.
*
* After this function call, after registers in the fpstate are
* modified and the child task has woken up, the child task will
@@ -290,44 +281,11 @@ void fpu__prepare_write(struct fpu *fpu)
*/
WARN_ON_FPU(fpu == &current->thread.fpu);
- if (fpu->initialized) {
- /* Invalidate any cached state: */
- __fpu_invalidate_fpregs_state(fpu);
- } else {
- fpstate_init(&fpu->state);
- trace_x86_fpu_init_state(fpu);
-
- trace_x86_fpu_activate_state(fpu);
- /* Safe to do for stopped child tasks: */
- fpu->initialized = 1;
- }
+ /* Invalidate any cached state: */
+ __fpu_invalidate_fpregs_state(fpu);
}
/*
- * 'fpu__restore()' is called to copy FPU registers from
- * the FPU fpstate to the live hw registers and to activate
- * access to the hardware registers, so that FPU instructions
- * can be used afterwards.
- *
- * Must be called with kernel preemption disabled (for example
- * with local interrupts disabled, as it is in the case of
- * do_device_not_available()).
- */
-void fpu__restore(struct fpu *fpu)
-{
- fpu__initialize(fpu);
-
- /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
- kernel_fpu_disable();
- trace_x86_fpu_before_restore(fpu);
- fpregs_activate(fpu);
- copy_kernel_to_fpregs(&fpu->state);
- trace_x86_fpu_after_restore(fpu);
- kernel_fpu_enable();
-}
-EXPORT_SYMBOL_GPL(fpu__restore);
-
-/*
* Drops current FPU state: deactivates the fpregs and
* the fpstate. NOTE: it still leaves previous contents
* in the fpregs in the eager-FPU case.
@@ -341,17 +299,13 @@ void fpu__drop(struct fpu *fpu)
preempt_disable();
if (fpu == &current->thread.fpu) {
- if (fpu->initialized) {
- /* Ignore delayed exceptions from user space */
- asm volatile("1: fwait\n"
- "2:\n"
- _ASM_EXTABLE(1b, 2b));
- fpregs_deactivate(fpu);
- }
+ /* Ignore delayed exceptions from user space */
+ asm volatile("1: fwait\n"
+ "2:\n"
+ _ASM_EXTABLE(1b, 2b));
+ fpregs_deactivate(fpu);
}
- fpu->initialized = 0;
-
trace_x86_fpu_dropped(fpu);
preempt_enable();
@@ -363,6 +317,8 @@ void fpu__drop(struct fpu *fpu)
*/
static inline void copy_init_fpstate_to_fpregs(void)
{
+ fpregs_lock();
+
if (use_xsave())
copy_kernel_to_xregs(&init_fpstate.xsave, -1);
else if (static_cpu_has(X86_FEATURE_FXSR))
@@ -372,6 +328,9 @@ static inline void copy_init_fpstate_to_fpregs(void)
if (boot_cpu_has(X86_FEATURE_OSPKE))
copy_init_pkru_to_fpregs();
+
+ fpregs_mark_activate();
+ fpregs_unlock();
}
/*
@@ -389,16 +348,52 @@ void fpu__clear(struct fpu *fpu)
/*
* Make sure fpstate is cleared and initialized.
*/
- if (static_cpu_has(X86_FEATURE_FPU)) {
- preempt_disable();
- fpu__initialize(fpu);
- user_fpu_begin();
+ fpu__initialize(fpu);
+ if (static_cpu_has(X86_FEATURE_FPU))
copy_init_fpstate_to_fpregs();
- preempt_enable();
- }
}
/*
+ * Load FPU context before returning to userspace.
+ */
+void switch_fpu_return(void)
+{
+ if (!static_cpu_has(X86_FEATURE_FPU))
+ return;
+
+ __fpregs_load_activate();
+}
+EXPORT_SYMBOL_GPL(switch_fpu_return);
+
+#ifdef CONFIG_X86_DEBUG_FPU
+/*
+ * If current FPU state according to its tracking (loaded FPU context on this
+ * CPU) is not valid then we must have TIF_NEED_FPU_LOAD set so the context is
+ * loaded on return to userland.
+ */
+void fpregs_assert_state_consistent(void)
+{
+ struct fpu *fpu = &current->thread.fpu;
+
+ if (test_thread_flag(TIF_NEED_FPU_LOAD))
+ return;
+
+ WARN_ON_FPU(!fpregs_state_valid(fpu, smp_processor_id()));
+}
+EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent);
+#endif
+
+void fpregs_mark_activate(void)
+{
+ struct fpu *fpu = &current->thread.fpu;
+
+ fpregs_activate(fpu);
+ fpu->last_cpu = smp_processor_id();
+ clear_thread_flag(TIF_NEED_FPU_LOAD);
+}
+EXPORT_SYMBOL_GPL(fpregs_mark_activate);
+
+/*
* x87 math exception handling:
*/
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 6abd83572b01..20d8fa7124c7 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -239,8 +239,6 @@ static void __init fpu__init_system_ctx_switch(void)
WARN_ON_FPU(!on_boot_cpu);
on_boot_cpu = 0;
-
- WARN_ON_FPU(current->thread.fpu.initialized);
}
/*
diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
index bc02f5144b95..d652b939ccfb 100644
--- a/arch/x86/kernel/fpu/regset.c
+++ b/arch/x86/kernel/fpu/regset.c
@@ -15,16 +15,12 @@
*/
int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
{
- struct fpu *target_fpu = &target->thread.fpu;
-
- return target_fpu->initialized ? regset->n : 0;
+ return regset->n;
}
int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
{
- struct fpu *target_fpu = &target->thread.fpu;
-
- if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->initialized)
+ if (boot_cpu_has(X86_FEATURE_FXSR))
return regset->n;
else
return 0;
@@ -269,11 +265,10 @@ convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
memcpy(&to[i], &from[i], sizeof(to[0]));
}
-void convert_to_fxsr(struct task_struct *tsk,
+void convert_to_fxsr(struct fxregs_state *fxsave,
const struct user_i387_ia32_struct *env)
{
- struct fxregs_state *fxsave = &tsk->thread.fpu.state.fxsave;
struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
int i;
@@ -350,7 +345,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
if (!ret)
- convert_to_fxsr(target, &env);
+ convert_to_fxsr(&target->thread.fpu.state.fxsave, &env);
/*
* update the header bit in the xsave header, indicating the
@@ -371,16 +366,9 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
{
struct task_struct *tsk = current;
- struct fpu *fpu = &tsk->thread.fpu;
- int fpvalid;
-
- fpvalid = fpu->initialized;
- if (fpvalid)
- fpvalid = !fpregs_get(tsk, NULL,
- 0, sizeof(struct user_i387_ia32_struct),
- ufpu, NULL);
- return fpvalid;
+ return !fpregs_get(tsk, NULL, 0, sizeof(struct user_i387_ia32_struct),
+ ufpu, NULL);
}
EXPORT_SYMBOL(dump_fpu);
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index f6a1d299627c..5a8d118bc423 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -92,13 +92,13 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
return err;
err |= __put_user(FP_XSTATE_MAGIC2,
- (__u32 *)(buf + fpu_user_xstate_size));
+ (__u32 __user *)(buf + fpu_user_xstate_size));
/*
* Read the xfeatures which we copied (directly from the cpu or
* from the state in task struct) to the user buffers.
*/
- err |= __get_user(xfeatures, (__u32 *)&x->header.xfeatures);
+ err |= __get_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
/*
* For legacy compatible, we always set FP/SSE bits in the bit
@@ -113,7 +113,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
*/
xfeatures |= XFEATURE_MASK_FPSSE;
- err |= __put_user(xfeatures, (__u32 *)&x->header.xfeatures);
+ err |= __put_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
return err;
}
@@ -144,9 +144,10 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
* buf == buf_fx for 64-bit frames and 32-bit fsave frame.
* buf != buf_fx for 32-bit frames with fxstate.
*
- * If the fpu, extended register state is live, save the state directly
- * to the user frame pointed by the aligned pointer 'buf_fx'. Otherwise,
- * copy the thread's fpu state to the user frame starting at 'buf_fx'.
+ * Try to save it directly to the user frame with disabled page fault handler.
+ * If this fails then do the slow path where the FPU state is first saved to
+ * task's fpu->state and then copy it to the user frame pointed to by the
+ * aligned pointer 'buf_fx'.
*
* If this is a 32-bit frame with fxstate, put a fsave header before
* the aligned state at 'buf_fx'.
@@ -156,10 +157,9 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
*/
int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
{
- struct fpu *fpu = &current->thread.fpu;
- struct xregs_state *xsave = &fpu->state.xsave;
struct task_struct *tsk = current;
int ia32_fxstate = (buf != buf_fx);
+ int ret;
ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
IS_ENABLED(CONFIG_IA32_EMULATION));
@@ -172,28 +172,34 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
sizeof(struct user_i387_ia32_struct), NULL,
(struct _fpstate_32 __user *) buf) ? -1 : 1;
- if (fpu->initialized || using_compacted_format()) {
- /* Save the live register state to the user directly. */
- if (copy_fpregs_to_sigframe(buf_fx))
- return -1;
- /* Update the thread's fxstate to save the fsave header. */
- if (ia32_fxstate)
- copy_fxregs_to_kernel(fpu);
- } else {
- /*
- * It is a *bug* if kernel uses compacted-format for xsave
- * area and we copy it out directly to a signal frame. It
- * should have been handled above by saving the registers
- * directly.
- */
- if (boot_cpu_has(X86_FEATURE_XSAVES)) {
- WARN_ONCE(1, "x86/fpu: saving compacted-format xsave area to a signal frame!\n");
- return -1;
- }
-
- fpstate_sanitize_xstate(fpu);
- if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size))
- return -1;
+retry:
+ /*
+ * Load the FPU registers if they are not valid for the current task.
+ * With a valid FPU state we can attempt to save the state directly to
+ * userland's stack frame which will likely succeed. If it does not,
+ * resolve the fault in the user memory and try again.
+ */
+ fpregs_lock();
+ if (test_thread_flag(TIF_NEED_FPU_LOAD))
+ __fpregs_load_activate();
+
+ pagefault_disable();
+ ret = copy_fpregs_to_sigframe(buf_fx);
+ pagefault_enable();
+ fpregs_unlock();
+
+ if (ret) {
+ int aligned_size;
+ int nr_pages;
+
+ aligned_size = offset_in_page(buf_fx) + fpu_user_xstate_size;
+ nr_pages = DIV_ROUND_UP(aligned_size, PAGE_SIZE);
+
+ ret = get_user_pages_unlocked((unsigned long)buf_fx, nr_pages,
+ NULL, FOLL_WRITE);
+ if (ret == nr_pages)
+ goto retry;
+ return -EFAULT;
}
/* Save the fsave header for the 32-bit frames. */
@@ -207,11 +213,11 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
}
static inline void
-sanitize_restored_xstate(struct task_struct *tsk,
+sanitize_restored_xstate(union fpregs_state *state,
struct user_i387_ia32_struct *ia32_env,
u64 xfeatures, int fx_only)
{
- struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
+ struct xregs_state *xsave = &state->xsave;
struct xstate_header *header = &xsave->header;
if (use_xsave()) {
@@ -238,17 +244,18 @@ sanitize_restored_xstate(struct task_struct *tsk,
*/
xsave->i387.mxcsr &= mxcsr_feature_mask;
- convert_to_fxsr(tsk, ia32_env);
+ if (ia32_env)
+ convert_to_fxsr(&state->fxsave, ia32_env);
}
}
/*
* Restore the extended state if present. Otherwise, restore the FP/SSE state.
*/
-static inline int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
+static int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
{
if (use_xsave()) {
- if ((unsigned long)buf % 64 || fx_only) {
+ if (fx_only) {
u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
return copy_user_to_fxregs(buf);
@@ -266,12 +273,15 @@ static inline int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_
static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
{
+ struct user_i387_ia32_struct *envp = NULL;
+ int state_size = fpu_kernel_xstate_size;
int ia32_fxstate = (buf != buf_fx);
struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu;
- int state_size = fpu_kernel_xstate_size;
+ struct user_i387_ia32_struct env;
u64 xfeatures = 0;
int fx_only = 0;
+ int ret = 0;
ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
IS_ENABLED(CONFIG_IA32_EMULATION));
@@ -284,8 +294,6 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
if (!access_ok(buf, size))
return -EACCES;
- fpu__initialize(fpu);
-
if (!static_cpu_has(X86_FEATURE_FPU))
return fpregs_soft_set(current, NULL,
0, sizeof(struct user_i387_ia32_struct),
@@ -308,61 +316,101 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
}
}
+ /*
+ * The current state of the FPU registers does not matter. By setting
+ * TIF_NEED_FPU_LOAD unconditionally it is ensured that the our xstate
+ * is not modified on context switch and that the xstate is considered
+ * to be loaded again on return to userland (overriding last_cpu avoids
+ * the optimisation).
+ */
+ set_thread_flag(TIF_NEED_FPU_LOAD);
+ __fpu_invalidate_fpregs_state(fpu);
+
+ if ((unsigned long)buf_fx % 64)
+ fx_only = 1;
+ /*
+ * For 32-bit frames with fxstate, copy the fxstate so it can be
+ * reconstructed later.
+ */
if (ia32_fxstate) {
+ ret = __copy_from_user(&env, buf, sizeof(env));
+ if (ret)
+ goto err_out;
+ envp = &env;
+ } else {
/*
- * For 32-bit frames with fxstate, copy the user state to the
- * thread's fpu state, reconstruct fxstate from the fsave
- * header. Validate and sanitize the copied state.
+ * Attempt to restore the FPU registers directly from user
+ * memory. For that to succeed, the user access cannot cause
+ * page faults. If it does, fall back to the slow path below,
+ * going through the kernel buffer with the enabled pagefault
+ * handler.
*/
- struct user_i387_ia32_struct env;
- int err = 0;
+ fpregs_lock();
+ pagefault_disable();
+ ret = copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only);
+ pagefault_enable();
+ if (!ret) {
+ fpregs_mark_activate();
+ fpregs_unlock();
+ return 0;
+ }
+ fpregs_unlock();
+ }
- /*
- * Drop the current fpu which clears fpu->initialized. This ensures
- * that any context-switch during the copy of the new state,
- * avoids the intermediate state from getting restored/saved.
- * Thus avoiding the new restored state from getting corrupted.
- * We will be ready to restore/save the state only after
- * fpu->initialized is again set.
- */
- fpu__drop(fpu);
+
+ if (use_xsave() && !fx_only) {
+ u64 init_bv = xfeatures_mask & ~xfeatures;
if (using_compacted_format()) {
- err = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
+ ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
} else {
- err = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
+ ret = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
- if (!err && state_size > offsetof(struct xregs_state, header))
- err = validate_xstate_header(&fpu->state.xsave.header);
+ if (!ret && state_size > offsetof(struct xregs_state, header))
+ ret = validate_xstate_header(&fpu->state.xsave.header);
}
+ if (ret)
+ goto err_out;
- if (err || __copy_from_user(&env, buf, sizeof(env))) {
- fpstate_init(&fpu->state);
- trace_x86_fpu_init_state(fpu);
- err = -1;
- } else {
- sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
+ sanitize_restored_xstate(&fpu->state, envp, xfeatures, fx_only);
+
+ fpregs_lock();
+ if (unlikely(init_bv))
+ copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
+ ret = copy_kernel_to_xregs_err(&fpu->state.xsave, xfeatures);
+
+ } else if (use_fxsr()) {
+ ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
+ if (ret) {
+ ret = -EFAULT;
+ goto err_out;
}
- local_bh_disable();
- fpu->initialized = 1;
- fpu__restore(fpu);
- local_bh_enable();
+ sanitize_restored_xstate(&fpu->state, envp, xfeatures, fx_only);
- return err;
- } else {
- /*
- * For 64-bit frames and 32-bit fsave frames, restore the user
- * state to the registers directly (with exceptions handled).
- */
- user_fpu_begin();
- if (copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only)) {
- fpu__clear(fpu);
- return -1;
+ fpregs_lock();
+ if (use_xsave()) {
+ u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
+ copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
}
+
+ ret = copy_kernel_to_fxregs_err(&fpu->state.fxsave);
+ } else {
+ ret = __copy_from_user(&fpu->state.fsave, buf_fx, state_size);
+ if (ret)
+ goto err_out;
+
+ fpregs_lock();
+ ret = copy_kernel_to_fregs_err(&fpu->state.fsave);
}
+ if (!ret)
+ fpregs_mark_activate();
+ fpregs_unlock();
- return 0;
+err_out:
+ if (ret)
+ fpu__clear(fpu);
+ return ret;
}
static inline int xstate_sigframe_size(void)
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index d7432c2b1051..9c459fd1d38e 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -805,20 +805,18 @@ void fpu__resume_cpu(void)
}
/*
- * Given an xstate feature mask, calculate where in the xsave
+ * Given an xstate feature nr, calculate where in the xsave
* buffer the state is. Callers should ensure that the buffer
* is valid.
*/
-static void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask)
+static void *__raw_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
{
- int feature_nr = fls64(xstate_feature_mask) - 1;
-
- if (!xfeature_enabled(feature_nr)) {
+ if (!xfeature_enabled(xfeature_nr)) {
WARN_ON_FPU(1);
return NULL;
}
- return (void *)xsave + xstate_comp_offsets[feature_nr];
+ return (void *)xsave + xstate_comp_offsets[xfeature_nr];
}
/*
* Given the xsave area and a state inside, this function returns the
@@ -832,13 +830,13 @@ static void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask
*
* Inputs:
* xstate: the thread's storage area for all FPU data
- * xstate_feature: state which is defined in xsave.h (e.g.
- * XFEATURE_MASK_FP, XFEATURE_MASK_SSE, etc...)
+ * xfeature_nr: state which is defined in xsave.h (e.g. XFEATURE_FP,
+ * XFEATURE_SSE, etc...)
* Output:
* address of the state in the xsave area, or NULL if the
* field is not present in the xsave buffer.
*/
-void *get_xsave_addr(struct xregs_state *xsave, int xstate_feature)
+void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
{
/*
* Do we even *have* xsave state?
@@ -851,11 +849,11 @@ void *get_xsave_addr(struct xregs_state *xsave, int xstate_feature)
* have not enabled. Remember that pcntxt_mask is
* what we write to the XCR0 register.
*/
- WARN_ONCE(!(xfeatures_mask & xstate_feature),
+ WARN_ONCE(!(xfeatures_mask & BIT_ULL(xfeature_nr)),
"get of unsupported state");
/*
* This assumes the last 'xsave*' instruction to
- * have requested that 'xstate_feature' be saved.
+ * have requested that 'xfeature_nr' be saved.
* If it did not, we might be seeing and old value
* of the field in the buffer.
*
@@ -864,10 +862,10 @@ void *get_xsave_addr(struct xregs_state *xsave, int xstate_feature)
* or because the "init optimization" caused it
* to not be saved.
*/
- if (!(xsave->header.xfeatures & xstate_feature))
+ if (!(xsave->header.xfeatures & BIT_ULL(xfeature_nr)))
return NULL;
- return __raw_xsave_addr(xsave, xstate_feature);
+ return __raw_xsave_addr(xsave, xfeature_nr);
}
EXPORT_SYMBOL_GPL(get_xsave_addr);
@@ -882,25 +880,23 @@ EXPORT_SYMBOL_GPL(get_xsave_addr);
* Note that this only works on the current task.
*
* Inputs:
- * @xsave_state: state which is defined in xsave.h (e.g. XFEATURE_MASK_FP,
- * XFEATURE_MASK_SSE, etc...)
+ * @xfeature_nr: state which is defined in xsave.h (e.g. XFEATURE_FP,
+ * XFEATURE_SSE, etc...)
* Output:
* address of the state in the xsave area or NULL if the state
* is not present or is in its 'init state'.
*/
-const void *get_xsave_field_ptr(int xsave_state)
+const void *get_xsave_field_ptr(int xfeature_nr)
{
struct fpu *fpu = &current->thread.fpu;
- if (!fpu->initialized)
- return NULL;
/*
* fpu__save() takes the CPU's xstate registers
* and saves them off to the 'fpu memory buffer.
*/
fpu__save(fpu);
- return get_xsave_addr(&fpu->state.xsave, xsave_state);
+ return get_xsave_addr(&fpu->state.xsave, xfeature_nr);
}
#ifdef CONFIG_ARCH_HAS_PKEYS
@@ -1016,7 +1012,7 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int of
* Copy only in-use xstates:
*/
if ((header.xfeatures >> i) & 1) {
- void *src = __raw_xsave_addr(xsave, 1 << i);
+ void *src = __raw_xsave_addr(xsave, i);
offset = xstate_offsets[i];
size = xstate_sizes[i];
@@ -1102,7 +1098,7 @@ int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned i
* Copy only in-use xstates:
*/
if ((header.xfeatures >> i) & 1) {
- void *src = __raw_xsave_addr(xsave, 1 << i);
+ void *src = __raw_xsave_addr(xsave, i);
offset = xstate_offsets[i];
size = xstate_sizes[i];
@@ -1159,7 +1155,7 @@ int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf)
u64 mask = ((u64)1 << i);
if (hdr.xfeatures & mask) {
- void *dst = __raw_xsave_addr(xsave, 1 << i);
+ void *dst = __raw_xsave_addr(xsave, i);
offset = xstate_offsets[i];
size = xstate_sizes[i];
@@ -1213,7 +1209,7 @@ int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf)
u64 mask = ((u64)1 << i);
if (hdr.xfeatures & mask) {
- void *dst = __raw_xsave_addr(xsave, 1 << i);
+ void *dst = __raw_xsave_addr(xsave, i);
offset = xstate_offsets[i];
size = xstate_sizes[i];
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index ef49517f6bb2..0927bb158ffc 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -29,6 +29,7 @@
#include <asm/kprobes.h>
#include <asm/ftrace.h>
#include <asm/nops.h>
+#include <asm/text-patching.h>
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -231,6 +232,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
}
static unsigned long ftrace_update_func;
+static unsigned long ftrace_update_func_call;
static int update_ftrace_func(unsigned long ip, void *new)
{
@@ -259,6 +261,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
unsigned char *new;
int ret;
+ ftrace_update_func_call = (unsigned long)func;
+
new = ftrace_call_replace(ip, (unsigned long)func);
ret = update_ftrace_func(ip, new);
@@ -294,13 +298,28 @@ int ftrace_int3_handler(struct pt_regs *regs)
if (WARN_ON_ONCE(!regs))
return 0;
- ip = regs->ip - 1;
- if (!ftrace_location(ip) && !is_ftrace_caller(ip))
- return 0;
+ ip = regs->ip - INT3_INSN_SIZE;
- regs->ip += MCOUNT_INSN_SIZE - 1;
+#ifdef CONFIG_X86_64
+ if (ftrace_location(ip)) {
+ int3_emulate_call(regs, (unsigned long)ftrace_regs_caller);
+ return 1;
+ } else if (is_ftrace_caller(ip)) {
+ if (!ftrace_update_func_call) {
+ int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
+ return 1;
+ }
+ int3_emulate_call(regs, ftrace_update_func_call);
+ return 1;
+ }
+#else
+ if (ftrace_location(ip) || is_ftrace_caller(ip)) {
+ int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
+ return 1;
+ }
+#endif
- return 1;
+ return 0;
}
NOKPROBE_SYMBOL(ftrace_int3_handler);
@@ -678,12 +697,8 @@ static inline void *alloc_tramp(unsigned long size)
{
return module_alloc(size);
}
-static inline void tramp_free(void *tramp, int size)
+static inline void tramp_free(void *tramp)
{
- int npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
- set_memory_nx((unsigned long)tramp, npages);
- set_memory_rw((unsigned long)tramp, npages);
module_memfree(tramp);
}
#else
@@ -692,7 +707,7 @@ static inline void *alloc_tramp(unsigned long size)
{
return NULL;
}
-static inline void tramp_free(void *tramp, int size) { }
+static inline void tramp_free(void *tramp) { }
#endif
/* Defined as markers to the end of the ftrace default trampolines */
@@ -730,6 +745,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
unsigned long end_offset;
unsigned long op_offset;
unsigned long offset;
+ unsigned long npages;
unsigned long size;
unsigned long retq;
unsigned long *ptr;
@@ -762,6 +778,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
return 0;
*tramp_size = size + RET_SIZE + sizeof(void *);
+ npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
/* Copy ftrace_caller onto the trampoline memory */
ret = probe_kernel_read(trampoline, (void *)start_offset, size);
@@ -806,9 +823,17 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
/* ALLOC_TRAMP flags lets us know we created it */
ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
+ set_vm_flush_reset_perms(trampoline);
+
+ /*
+ * Module allocation needs to be completed by making the page
+ * executable. The page is still writable, which is a security hazard,
+ * but anyhow ftrace breaks W^X completely.
+ */
+ set_memory_x((unsigned long)trampoline, npages);
return (unsigned long)trampoline;
fail:
- tramp_free(trampoline, *tramp_size);
+ tramp_free(trampoline);
return 0;
}
@@ -859,6 +884,8 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
func = ftrace_ops_get_func(ops);
+ ftrace_update_func_call = (unsigned long)func;
+
/* Do a safe modify in case the trampoline is executing */
new = ftrace_call_replace(ip, (unsigned long)func);
ret = update_ftrace_func(ip, new);
@@ -939,7 +966,7 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
return;
- tramp_free((void *)ops->trampoline, ops->trampoline_size);
+ tramp_free((void *)ops->trampoline);
ops->trampoline = 0;
}
@@ -960,6 +987,7 @@ static int ftrace_mod_jmp(unsigned long ip, void *func)
{
unsigned char *new;
+ ftrace_update_func_call = 0UL;
new = ftrace_jmp_replace(ip, (unsigned long)func);
return update_ftrace_func(ip, new);
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index 4c8440de3355..2ba914a34b06 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -10,22 +10,10 @@
#include <asm/ftrace.h>
#include <asm/nospec-branch.h>
-#ifdef CC_USING_FENTRY
# define function_hook __fentry__
EXPORT_SYMBOL(__fentry__)
-#else
-# define function_hook mcount
-EXPORT_SYMBOL(mcount)
-#endif
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-
-/* mcount uses a frame pointer even if CONFIG_FRAME_POINTER is not set */
-#if !defined(CC_USING_FENTRY) || defined(CONFIG_FRAME_POINTER)
-# define USING_FRAME_POINTER
-#endif
-#ifdef USING_FRAME_POINTER
+#ifdef CONFIG_FRAME_POINTER
# define MCOUNT_FRAME 1 /* using frame = true */
#else
# define MCOUNT_FRAME 0 /* using frame = false */
@@ -37,8 +25,7 @@ END(function_hook)
ENTRY(ftrace_caller)
-#ifdef USING_FRAME_POINTER
-# ifdef CC_USING_FENTRY
+#ifdef CONFIG_FRAME_POINTER
/*
* Frame pointers are of ip followed by bp.
* Since fentry is an immediate jump, we are left with
@@ -49,7 +36,7 @@ ENTRY(ftrace_caller)
pushl %ebp
movl %esp, %ebp
pushl 2*4(%esp) /* function ip */
-# endif
+
/* For mcount, the function ip is directly above */
pushl %ebp
movl %esp, %ebp
@@ -59,7 +46,7 @@ ENTRY(ftrace_caller)
pushl %edx
pushl $0 /* Pass NULL as regs pointer */
-#ifdef USING_FRAME_POINTER
+#ifdef CONFIG_FRAME_POINTER
/* Load parent ebp into edx */
movl 4*4(%esp), %edx
#else
@@ -82,13 +69,11 @@ ftrace_call:
popl %edx
popl %ecx
popl %eax
-#ifdef USING_FRAME_POINTER
+#ifdef CONFIG_FRAME_POINTER
popl %ebp
-# ifdef CC_USING_FENTRY
addl $4,%esp /* skip function ip */
popl %ebp /* this is the orig bp */
addl $4, %esp /* skip parent ip */
-# endif
#endif
.Lftrace_ret:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -133,11 +118,7 @@ ENTRY(ftrace_regs_caller)
movl 12*4(%esp), %eax /* Load ip (1st parameter) */
subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
-#ifdef CC_USING_FENTRY
movl 15*4(%esp), %edx /* Load parent ip (2nd parameter) */
-#else
- movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
-#endif
movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
pushl %esp /* Save pt_regs as 4th parameter */
@@ -170,43 +151,6 @@ GLOBAL(ftrace_regs_call)
lea 3*4(%esp), %esp /* Skip orig_ax, ip and cs */
jmp .Lftrace_ret
-#else /* ! CONFIG_DYNAMIC_FTRACE */
-
-ENTRY(function_hook)
- cmpl $__PAGE_OFFSET, %esp
- jb ftrace_stub /* Paging not enabled yet? */
-
- cmpl $ftrace_stub, ftrace_trace_function
- jnz .Ltrace
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- cmpl $ftrace_stub, ftrace_graph_return
- jnz ftrace_graph_caller
-
- cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
- jnz ftrace_graph_caller
-#endif
-.globl ftrace_stub
-ftrace_stub:
- ret
-
- /* taken from glibc */
-.Ltrace:
- pushl %eax
- pushl %ecx
- pushl %edx
- movl 0xc(%esp), %eax
- movl 0x4(%ebp), %edx
- subl $MCOUNT_INSN_SIZE, %eax
-
- movl ftrace_trace_function, %ecx
- CALL_NOSPEC %ecx
-
- popl %edx
- popl %ecx
- popl %eax
- jmp ftrace_stub
-END(function_hook)
-#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
@@ -215,13 +159,8 @@ ENTRY(ftrace_graph_caller)
pushl %edx
movl 3*4(%esp), %eax
/* Even with frame pointers, fentry doesn't have one here */
-#ifdef CC_USING_FENTRY
lea 4*4(%esp), %edx
movl $0, %ecx
-#else
- lea 0x4(%ebp), %edx
- movl (%ebp), %ecx
-#endif
subl $MCOUNT_INSN_SIZE, %eax
call prepare_ftrace_return
popl %edx
@@ -234,11 +173,7 @@ END(ftrace_graph_caller)
return_to_handler:
pushl %eax
pushl %edx
-#ifdef CC_USING_FENTRY
movl $0, %eax
-#else
- movl %ebp, %eax
-#endif
call ftrace_return_to_handler
movl %eax, %ecx
popl %edx
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index 75f2b36b41a6..10eb2760ef2c 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -13,22 +13,12 @@
.code64
.section .entry.text, "ax"
-#ifdef CC_USING_FENTRY
# define function_hook __fentry__
EXPORT_SYMBOL(__fentry__)
-#else
-# define function_hook mcount
-EXPORT_SYMBOL(mcount)
-#endif
#ifdef CONFIG_FRAME_POINTER
-# ifdef CC_USING_FENTRY
/* Save parent and function stack frames (rip and rbp) */
# define MCOUNT_FRAME_SIZE (8+16*2)
-# else
-/* Save just function stack frame (rip and rbp) */
-# define MCOUNT_FRAME_SIZE (8+16)
-# endif
#else
/* No need to save a stack frame */
# define MCOUNT_FRAME_SIZE 0
@@ -75,17 +65,13 @@ EXPORT_SYMBOL(mcount)
* fentry is called before the stack frame is set up, where as mcount
* is called afterward.
*/
-#ifdef CC_USING_FENTRY
+
/* Save the parent pointer (skip orig rbp and our return address) */
pushq \added+8*2(%rsp)
pushq %rbp
movq %rsp, %rbp
/* Save the return address (now skip orig rbp, rbp and parent) */
pushq \added+8*3(%rsp)
-#else
- /* Can't assume that rip is before this (unless added was zero) */
- pushq \added+8(%rsp)
-#endif
pushq %rbp
movq %rsp, %rbp
#endif /* CONFIG_FRAME_POINTER */
@@ -113,12 +99,7 @@ EXPORT_SYMBOL(mcount)
movq %rdx, RBP(%rsp)
/* Copy the parent address into %rsi (second parameter) */
-#ifdef CC_USING_FENTRY
movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi
-#else
- /* %rdx contains original %rbp */
- movq 8(%rdx), %rsi
-#endif
/* Move RIP to its proper location */
movq MCOUNT_REG_SIZE+\added(%rsp), %rdi
@@ -303,15 +284,8 @@ ENTRY(ftrace_graph_caller)
/* Saves rbp into %rdx and fills first parameter */
save_mcount_regs
-#ifdef CC_USING_FENTRY
leaq MCOUNT_REG_SIZE+8(%rsp), %rsi
movq $0, %rdx /* No framepointers needed */
-#else
- /* Save address of the return address of traced function */
- leaq 8(%rdx), %rsi
- /* ftrace does sanity checks against frame pointers */
- movq (%rdx), %rdx
-#endif
call prepare_ftrace_return
restore_mcount_regs
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index d1dbe8e4eb82..bcd206c8ac90 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -265,7 +265,7 @@ ENDPROC(start_cpu0)
GLOBAL(initial_code)
.quad x86_64_start_kernel
GLOBAL(initial_gs)
- .quad INIT_PER_CPU_VAR(irq_stack_union)
+ .quad INIT_PER_CPU_VAR(fixed_percpu_data)
GLOBAL(initial_stack)
/*
* The SIZEOF_PTREGS gap is a convention which helps the in-kernel
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index 01adea278a71..6d8917875f44 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -41,13 +41,12 @@ struct idt_data {
#define SYSG(_vector, _addr) \
G(_vector, _addr, DEFAULT_STACK, GATE_INTERRUPT, DPL3, __KERNEL_CS)
-/* Interrupt gate with interrupt stack */
+/*
+ * Interrupt gate with interrupt stack. The _ist index is the index in
+ * the tss.ist[] array, but for the descriptor it needs to start at 1.
+ */
#define ISTG(_vector, _addr, _ist) \
- G(_vector, _addr, _ist, GATE_INTERRUPT, DPL0, __KERNEL_CS)
-
-/* System interrupt gate with interrupt stack */
-#define SISTG(_vector, _addr, _ist) \
- G(_vector, _addr, _ist, GATE_INTERRUPT, DPL3, __KERNEL_CS)
+ G(_vector, _addr, _ist + 1, GATE_INTERRUPT, DPL0, __KERNEL_CS)
/* Task gate */
#define TSKG(_vector, _gdt) \
@@ -184,11 +183,11 @@ gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss;
* cpu_init() when the TSS has been initialized.
*/
static const __initconst struct idt_data ist_idts[] = {
- ISTG(X86_TRAP_DB, debug, DEBUG_STACK),
- ISTG(X86_TRAP_NMI, nmi, NMI_STACK),
- ISTG(X86_TRAP_DF, double_fault, DOUBLEFAULT_STACK),
+ ISTG(X86_TRAP_DB, debug, IST_INDEX_DB),
+ ISTG(X86_TRAP_NMI, nmi, IST_INDEX_NMI),
+ ISTG(X86_TRAP_DF, double_fault, IST_INDEX_DF),
#ifdef CONFIG_X86_MCE
- ISTG(X86_TRAP_MC, &machine_check, MCE_STACK),
+ ISTG(X86_TRAP_MC, &machine_check, IST_INDEX_MCE),
#endif
};
diff --git a/arch/x86/kernel/ima_arch.c b/arch/x86/kernel/ima_arch.c
index e47cd9390ab4..85de790583f9 100644
--- a/arch/x86/kernel/ima_arch.c
+++ b/arch/x86/kernel/ima_arch.c
@@ -3,6 +3,7 @@
* Copyright (C) 2018 IBM Corporation
*/
#include <linux/efi.h>
+#include <linux/module.h>
#include <linux/ima.h>
extern struct boot_params boot_params;
@@ -64,12 +65,19 @@ static const char * const sb_arch_rules[] = {
"appraise func=KEXEC_KERNEL_CHECK appraise_type=imasig",
#endif /* CONFIG_KEXEC_VERIFY_SIG */
"measure func=KEXEC_KERNEL_CHECK",
+#if !IS_ENABLED(CONFIG_MODULE_SIG)
+ "appraise func=MODULE_CHECK appraise_type=imasig",
+#endif
+ "measure func=MODULE_CHECK",
NULL
};
const char * const *arch_get_ima_policy(void)
{
- if (IS_ENABLED(CONFIG_IMA_ARCH_POLICY) && arch_ima_get_secureboot())
+ if (IS_ENABLED(CONFIG_IMA_ARCH_POLICY) && arch_ima_get_secureboot()) {
+ if (IS_ENABLED(CONFIG_MODULE_SIG))
+ set_module_sig_enforced();
return sb_arch_rules;
+ }
return NULL;
}
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 95600a99ae93..fc34816c6f04 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -51,8 +51,8 @@ static inline int check_stack_overflow(void) { return 0; }
static inline void print_stack_overflow(void) { }
#endif
-DEFINE_PER_CPU(struct irq_stack *, hardirq_stack);
-DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
+DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
+DEFINE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
static void call_on_stack(void *func, void *stack)
{
@@ -76,7 +76,7 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
u32 *isp, *prev_esp, arg1;
curstk = (struct irq_stack *) current_stack();
- irqstk = __this_cpu_read(hardirq_stack);
+ irqstk = __this_cpu_read(hardirq_stack_ptr);
/*
* this is where we switch to the IRQ stack. However, if we are
@@ -107,27 +107,28 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
}
/*
- * allocate per-cpu stacks for hardirq and for softirq processing
+ * Allocate per-cpu stacks for hardirq and softirq processing
*/
-void irq_ctx_init(int cpu)
+int irq_init_percpu_irqstack(unsigned int cpu)
{
- struct irq_stack *irqstk;
-
- if (per_cpu(hardirq_stack, cpu))
- return;
+ int node = cpu_to_node(cpu);
+ struct page *ph, *ps;
- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
- THREADINFO_GFP,
- THREAD_SIZE_ORDER));
- per_cpu(hardirq_stack, cpu) = irqstk;
+ if (per_cpu(hardirq_stack_ptr, cpu))
+ return 0;
- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
- THREADINFO_GFP,
- THREAD_SIZE_ORDER));
- per_cpu(softirq_stack, cpu) = irqstk;
+ ph = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
+ if (!ph)
+ return -ENOMEM;
+ ps = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER);
+ if (!ps) {
+ __free_pages(ph, THREAD_SIZE_ORDER);
+ return -ENOMEM;
+ }
- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
+ per_cpu(hardirq_stack_ptr, cpu) = page_address(ph);
+ per_cpu(softirq_stack_ptr, cpu) = page_address(ps);
+ return 0;
}
void do_softirq_own_stack(void)
@@ -135,7 +136,7 @@ void do_softirq_own_stack(void)
struct irq_stack *irqstk;
u32 *isp, *prev_esp;
- irqstk = __this_cpu_read(softirq_stack);
+ irqstk = __this_cpu_read(softirq_stack_ptr);
/* build the stack frame on the softirq stack */
isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 0469cd078db1..6bf6517a05bb 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -18,63 +18,64 @@
#include <linux/uaccess.h>
#include <linux/smp.h>
#include <linux/sched/task_stack.h>
+
+#include <asm/cpu_entry_area.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
-int sysctl_panic_on_stackoverflow;
+DEFINE_PER_CPU_PAGE_ALIGNED(struct irq_stack, irq_stack_backing_store) __visible;
+DECLARE_INIT_PER_CPU(irq_stack_backing_store);
-/*
- * Probabilistic stack overflow check:
- *
- * Only check the stack in process context, because everything else
- * runs on the big interrupt stacks. Checking reliably is too expensive,
- * so we just check from interrupts.
- */
-static inline void stack_overflow_check(struct pt_regs *regs)
+bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
{
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
-#define STACK_TOP_MARGIN 128
- struct orig_ist *oist;
- u64 irq_stack_top, irq_stack_bottom;
- u64 estack_top, estack_bottom;
- u64 curbase = (u64)task_stack_page(current);
+ if (IS_ERR_OR_NULL(desc))
+ return false;
- if (user_mode(regs))
- return;
+ generic_handle_irq_desc(desc);
+ return true;
+}
- if (regs->sp >= curbase + sizeof(struct pt_regs) + STACK_TOP_MARGIN &&
- regs->sp <= curbase + THREAD_SIZE)
- return;
+#ifdef CONFIG_VMAP_STACK
+/*
+ * VMAP the backing store with guard pages
+ */
+static int map_irq_stack(unsigned int cpu)
+{
+ char *stack = (char *)per_cpu_ptr(&irq_stack_backing_store, cpu);
+ struct page *pages[IRQ_STACK_SIZE / PAGE_SIZE];
+ void *va;
+ int i;
- irq_stack_top = (u64)this_cpu_ptr(irq_stack_union.irq_stack) +
- STACK_TOP_MARGIN;
- irq_stack_bottom = (u64)__this_cpu_read(irq_stack_ptr);
- if (regs->sp >= irq_stack_top && regs->sp <= irq_stack_bottom)
- return;
+ for (i = 0; i < IRQ_STACK_SIZE / PAGE_SIZE; i++) {
+ phys_addr_t pa = per_cpu_ptr_to_phys(stack + (i << PAGE_SHIFT));
- oist = this_cpu_ptr(&orig_ist);
- estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN;
- estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1];
- if (regs->sp >= estack_top && regs->sp <= estack_bottom)
- return;
+ pages[i] = pfn_to_page(pa >> PAGE_SHIFT);
+ }
- WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx,ip:%pF)\n",
- current->comm, curbase, regs->sp,
- irq_stack_top, irq_stack_bottom,
- estack_top, estack_bottom, (void *)regs->ip);
+ va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
+ if (!va)
+ return -ENOMEM;
- if (sysctl_panic_on_stackoverflow)
- panic("low stack detected by irq handler - check messages\n");
-#endif
+ per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE;
+ return 0;
}
-
-bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
+#else
+/*
+ * If VMAP stacks are disabled due to KASAN, just use the per cpu
+ * backing store without guard pages.
+ */
+static int map_irq_stack(unsigned int cpu)
{
- stack_overflow_check(regs);
+ void *va = per_cpu_ptr(&irq_stack_backing_store, cpu);
- if (IS_ERR_OR_NULL(desc))
- return false;
+ per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE;
+ return 0;
+}
+#endif
- generic_handle_irq_desc(desc);
- return true;
+int irq_init_percpu_irqstack(unsigned int cpu)
+{
+ if (per_cpu(hardirq_stack_ptr, cpu))
+ return 0;
+ return map_irq_stack(cpu);
}
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index a0693b71cfc1..16919a9671fa 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -91,6 +91,8 @@ void __init init_IRQ(void)
for (i = 0; i < nr_legacy_irqs(); i++)
per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = irq_to_desc(i);
+ BUG_ON(irq_init_percpu_irqstack(smp_processor_id()));
+
x86_init.irqs.intr_init();
}
@@ -104,6 +106,4 @@ void __init native_init_IRQ(void)
if (!acpi_ioapic && !of_ioapic && nr_legacy_irqs())
setup_irq(2, &irq2);
-
- irq_ctx_init(smp_processor_id());
}
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index f99bd26bd3f1..e631c358f7f4 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -37,7 +37,6 @@ static void bug_at(unsigned char *ip, int line)
static void __ref __jump_label_transform(struct jump_entry *entry,
enum jump_label_type type,
- void *(*poker)(void *, const void *, size_t),
int init)
{
union jump_code_union jmp;
@@ -50,9 +49,6 @@ static void __ref __jump_label_transform(struct jump_entry *entry,
jmp.offset = jump_entry_target(entry) -
(jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE);
- if (early_boot_irqs_disabled)
- poker = text_poke_early;
-
if (type == JUMP_LABEL_JMP) {
if (init) {
expect = default_nop; line = __LINE__;
@@ -75,16 +71,19 @@ static void __ref __jump_label_transform(struct jump_entry *entry,
bug_at((void *)jump_entry_code(entry), line);
/*
- * Make text_poke_bp() a default fallback poker.
+ * As long as only a single processor is running and the code is still
+ * not marked as RO, text_poke_early() can be used; Checking that
+ * system_state is SYSTEM_BOOTING guarantees it. It will be set to
+ * SYSTEM_SCHEDULING before other cores are awaken and before the
+ * code is write-protected.
*
* At the time the change is being done, just ignore whether we
* are doing nop -> jump or jump -> nop transition, and assume
* always nop being the 'currently valid' instruction
- *
*/
- if (poker) {
- (*poker)((void *)jump_entry_code(entry), code,
- JUMP_LABEL_NOP_SIZE);
+ if (init || system_state == SYSTEM_BOOTING) {
+ text_poke_early((void *)jump_entry_code(entry), code,
+ JUMP_LABEL_NOP_SIZE);
return;
}
@@ -96,7 +95,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
mutex_lock(&text_mutex);
- __jump_label_transform(entry, type, NULL, 0);
+ __jump_label_transform(entry, type, 0);
mutex_unlock(&text_mutex);
}
@@ -126,5 +125,5 @@ __init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
jlstate = JL_STATE_NO_UPDATE;
}
if (jlstate == JL_STATE_UPDATE)
- __jump_label_transform(entry, type, text_poke_early, 1);
+ __jump_label_transform(entry, type, 1);
}
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 4ff6b4cdb941..13b13311b792 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -747,7 +747,6 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
{
int err;
- char opc[BREAK_INSTR_SIZE];
bpt->type = BP_BREAKPOINT;
err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
@@ -759,18 +758,13 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
if (!err)
return err;
/*
- * It is safe to call text_poke() because normal kernel execution
+ * It is safe to call text_poke_kgdb() because normal kernel execution
* is stopped on all cores, so long as the text_mutex is not locked.
*/
if (mutex_is_locked(&text_mutex))
return -EBUSY;
- text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
- BREAK_INSTR_SIZE);
- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
- if (err)
- return err;
- if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
- return -EINVAL;
+ text_poke_kgdb((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
+ BREAK_INSTR_SIZE);
bpt->type = BP_POKE_BREAKPOINT;
return err;
@@ -778,22 +772,17 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
{
- int err;
- char opc[BREAK_INSTR_SIZE];
-
if (bpt->type != BP_POKE_BREAKPOINT)
goto knl_write;
/*
- * It is safe to call text_poke() because normal kernel execution
+ * It is safe to call text_poke_kgdb() because normal kernel execution
* is stopped on all cores, so long as the text_mutex is not locked.
*/
if (mutex_is_locked(&text_mutex))
goto knl_write;
- text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
- if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
- goto knl_write;
- return err;
+ text_poke_kgdb((void *)bpt->bpt_addr, bpt->saved_instr,
+ BREAK_INSTR_SIZE);
+ return 0;
knl_write:
return probe_kernel_write((char *)bpt->bpt_addr,
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index fed46ddb1eef..9e4fa2484d10 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -431,8 +431,21 @@ void *alloc_insn_page(void)
void *page;
page = module_alloc(PAGE_SIZE);
- if (page)
- set_memory_ro((unsigned long)page & PAGE_MASK, 1);
+ if (!page)
+ return NULL;
+
+ set_vm_flush_reset_perms(page);
+ /*
+ * First make the page read-only, and only then make it executable to
+ * prevent it from being W+X in between.
+ */
+ set_memory_ro((unsigned long)page, 1);
+
+ /*
+ * TODO: Once additional kernel code protection mechanisms are set, ensure
+ * that the page was not maliciously altered and it is still zeroed.
+ */
+ set_memory_x((unsigned long)page, 1);
return page;
}
@@ -440,8 +453,6 @@ void *alloc_insn_page(void)
/* Recover page to RW mode before releasing it */
void free_insn_page(void *page)
{
- set_memory_nx((unsigned long)page & PAGE_MASK, 1);
- set_memory_rw((unsigned long)page & PAGE_MASK, 1);
module_memfree(page);
}
@@ -716,6 +727,7 @@ NOKPROBE_SYMBOL(kprobe_int3_handler);
* calls trampoline_handler() runs, which calls the kretprobe's handler.
*/
asm(
+ ".text\n"
".global kretprobe_trampoline\n"
".type kretprobe_trampoline, @function\n"
"kretprobe_trampoline:\n"
@@ -756,7 +768,7 @@ static struct kprobe kretprobe_kprobe = {
/*
* Called from kretprobe_trampoline
*/
-static __used void *trampoline_handler(struct pt_regs *regs)
+__used __visible void *trampoline_handler(struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb;
struct kretprobe_instance *ri = NULL;
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 5c93a65ee1e5..3f0cc828cc36 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -67,7 +67,7 @@ static int __init parse_no_stealacc(char *arg)
early_param("no-steal-acc", parse_no_stealacc);
static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
-static DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64);
+DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
static int has_steal_clock = 0;
/*
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 6135ae8ce036..b2463fcb20a8 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -113,7 +113,7 @@ static void do_sanity_check(struct mm_struct *mm,
* tables.
*/
WARN_ON(!had_kernel_mapping);
- if (static_cpu_has(X86_FEATURE_PTI))
+ if (boot_cpu_has(X86_FEATURE_PTI))
WARN_ON(!had_user_mapping);
} else {
/*
@@ -121,7 +121,7 @@ static void do_sanity_check(struct mm_struct *mm,
* Sync the pgd to the usermode tables.
*/
WARN_ON(had_kernel_mapping);
- if (static_cpu_has(X86_FEATURE_PTI))
+ if (boot_cpu_has(X86_FEATURE_PTI))
WARN_ON(had_user_mapping);
}
}
@@ -156,7 +156,7 @@ static void map_ldt_struct_to_user(struct mm_struct *mm)
k_pmd = pgd_to_pmd_walk(k_pgd, LDT_BASE_ADDR);
u_pmd = pgd_to_pmd_walk(u_pgd, LDT_BASE_ADDR);
- if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
+ if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
set_pmd(u_pmd, *k_pmd);
}
@@ -181,7 +181,7 @@ static void map_ldt_struct_to_user(struct mm_struct *mm)
{
pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
- if (static_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
+ if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
set_pgd(kernel_to_user_pgdp(pgd), *pgd);
}
@@ -208,7 +208,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
spinlock_t *ptl;
int i, nr_pages;
- if (!static_cpu_has(X86_FEATURE_PTI))
+ if (!boot_cpu_has(X86_FEATURE_PTI))
return 0;
/*
@@ -271,7 +271,7 @@ static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
return;
/* LDT map/unmap is only required for PTI */
- if (!static_cpu_has(X86_FEATURE_PTI))
+ if (!boot_cpu_has(X86_FEATURE_PTI))
return;
nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
@@ -311,7 +311,7 @@ static void free_ldt_pgtables(struct mm_struct *mm)
unsigned long start = LDT_BASE_ADDR;
unsigned long end = LDT_END_ADDR;
- if (!static_cpu_has(X86_FEATURE_PTI))
+ if (!boot_cpu_has(X86_FEATURE_PTI))
return;
tlb_gather_mmu(&tlb, mm, start, end);
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index b052e883dd8c..cfa3106faee4 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -87,7 +87,7 @@ void *module_alloc(unsigned long size)
p = __vmalloc_node_range(size, MODULE_ALIGN,
MODULES_VADDR + get_module_load_offset(),
MODULES_END, GFP_KERNEL,
- PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
+ PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
if (p && (kasan_module_alloc(p, size) < 0)) {
vfree(p);
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 18bc9b51ac9b..05b09896cfaf 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -21,19 +21,21 @@
#include <linux/ratelimit.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/atomic.h>
#include <linux/sched/clock.h>
#if defined(CONFIG_EDAC)
#include <linux/edac.h>
#endif
-#include <linux/atomic.h>
+#include <asm/cpu_entry_area.h>
#include <asm/traps.h>
#include <asm/mach_traps.h>
#include <asm/nmi.h>
#include <asm/x86_init.h>
#include <asm/reboot.h>
#include <asm/cache.h>
+#include <asm/nospec-branch.h>
#define CREATE_TRACE_POINTS
#include <trace/events/nmi.h>
@@ -487,6 +489,23 @@ static DEFINE_PER_CPU(unsigned long, nmi_cr2);
* switch back to the original IDT.
*/
static DEFINE_PER_CPU(int, update_debug_stack);
+
+static bool notrace is_debug_stack(unsigned long addr)
+{
+ struct cea_exception_stacks *cs = __this_cpu_read(cea_exception_stacks);
+ unsigned long top = CEA_ESTACK_TOP(cs, DB);
+ unsigned long bot = CEA_ESTACK_BOT(cs, DB1);
+
+ if (__this_cpu_read(debug_stack_usage))
+ return true;
+ /*
+ * Note, this covers the guard page between DB and DB1 as well to
+ * avoid two checks. But by all means @addr can never point into
+ * the guard page.
+ */
+ return addr >= bot && addr < top;
+}
+NOKPROBE_SYMBOL(is_debug_stack);
#endif
dotraplinkage notrace void
@@ -533,6 +552,9 @@ nmi_restart:
write_cr2(this_cpu_read(nmi_cr2));
if (this_cpu_dec_return(nmi_state))
goto nmi_restart;
+
+ if (user_mode(regs))
+ mds_user_clear_cpu_buffers();
}
NOKPROBE_SYMBOL(do_nmi);
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index c0e0101133f3..7bbaa6baf37f 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -121,7 +121,7 @@ DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
void __init native_pv_lock_init(void)
{
- if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
+ if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
static_branch_disable(&virt_spin_lock_key);
}
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index d460998ae828..dcd272dbd0a9 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -51,14 +51,6 @@ int iommu_pass_through __read_mostly;
extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
-/* Dummy device used for NULL arguments (normally ISA). */
-struct device x86_dma_fallback_dev = {
- .init_name = "fallback device",
- .coherent_dma_mask = ISA_DMA_BIT_MASK,
- .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
-};
-EXPORT_SYMBOL(x86_dma_fallback_dev);
-
void __init pci_iommu_alloc(void)
{
struct iommu_table_entry *p;
@@ -77,18 +69,6 @@ void __init pci_iommu_alloc(void)
}
}
-bool arch_dma_alloc_attrs(struct device **dev)
-{
- if (!*dev)
- *dev = &x86_dma_fallback_dev;
-
- if (!is_device_dma_capable(*dev))
- return false;
- return true;
-
-}
-EXPORT_SYMBOL(arch_dma_alloc_attrs);
-
/*
* See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
* parameter documentation.
diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c
index c06c4c16c6b6..07c30ee17425 100644
--- a/arch/x86/kernel/perf_regs.c
+++ b/arch/x86/kernel/perf_regs.c
@@ -59,18 +59,34 @@ static unsigned int pt_regs_offset[PERF_REG_X86_MAX] = {
u64 perf_reg_value(struct pt_regs *regs, int idx)
{
+ struct x86_perf_regs *perf_regs;
+
+ if (idx >= PERF_REG_X86_XMM0 && idx < PERF_REG_X86_XMM_MAX) {
+ perf_regs = container_of(regs, struct x86_perf_regs, regs);
+ if (!perf_regs->xmm_regs)
+ return 0;
+ return perf_regs->xmm_regs[idx - PERF_REG_X86_XMM0];
+ }
+
if (WARN_ON_ONCE(idx >= ARRAY_SIZE(pt_regs_offset)))
return 0;
return regs_get_register(regs, pt_regs_offset[idx]);
}
-#define REG_RESERVED (~((1ULL << PERF_REG_X86_MAX) - 1ULL))
-
#ifdef CONFIG_X86_32
+#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_R8) | \
+ (1ULL << PERF_REG_X86_R9) | \
+ (1ULL << PERF_REG_X86_R10) | \
+ (1ULL << PERF_REG_X86_R11) | \
+ (1ULL << PERF_REG_X86_R12) | \
+ (1ULL << PERF_REG_X86_R13) | \
+ (1ULL << PERF_REG_X86_R14) | \
+ (1ULL << PERF_REG_X86_R15))
+
int perf_reg_validate(u64 mask)
{
- if (!mask || mask & REG_RESERVED)
+ if (!mask || (mask & REG_NOSUPPORT))
return -EINVAL;
return 0;
@@ -96,10 +112,7 @@ void perf_get_regs_user(struct perf_regs *regs_user,
int perf_reg_validate(u64 mask)
{
- if (!mask || mask & REG_RESERVED)
- return -EINVAL;
-
- if (mask & REG_NOSUPPORT)
+ if (!mask || (mask & REG_NOSUPPORT))
return -EINVAL;
return 0;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 957eae13b370..75fea0d48c0e 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -101,7 +101,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
dst->thread.vm86 = NULL;
#endif
- return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
+ return fpu__copy(dst, src);
}
/*
@@ -236,7 +236,7 @@ static int get_cpuid_mode(void)
static int set_cpuid_mode(struct task_struct *task, unsigned long cpuid_enabled)
{
- if (!static_cpu_has(X86_FEATURE_CPUID_FAULT))
+ if (!boot_cpu_has(X86_FEATURE_CPUID_FAULT))
return -ENODEV;
if (cpuid_enabled)
@@ -670,7 +670,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
if (c->x86_vendor != X86_VENDOR_INTEL)
return 0;
- if (!cpu_has(c, X86_FEATURE_MWAIT) || static_cpu_has_bug(X86_BUG_MONITOR))
+ if (!cpu_has(c, X86_FEATURE_MWAIT) || boot_cpu_has_bug(X86_BUG_MONITOR))
return 0;
return 1;
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index e471d8e6f0b2..2399e910d109 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -127,6 +127,13 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
struct task_struct *tsk;
int err;
+ /*
+ * For a new task use the RESET flags value since there is no before.
+ * All the status flags are zero; DF and all the system flags must also
+ * be 0, specifically IF must be 0 because we context switch to the new
+ * task with interrupts disabled.
+ */
+ frame->flags = X86_EFLAGS_FIXED;
frame->bp = 0;
frame->ret_addr = (unsigned long) ret_from_fork;
p->thread.sp = (unsigned long) fork_frame;
@@ -234,7 +241,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
- switch_fpu_prepare(prev_fpu, cpu);
+ if (!test_thread_flag(TIF_NEED_FPU_LOAD))
+ switch_fpu_prepare(prev_fpu, cpu);
/*
* Save away %gs. No need to save %fs, as it was saved on the
@@ -267,9 +275,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/*
* Leave lazy mode, flushing any hypercalls made here.
* This must be done before restoring TLS segments so
- * the GDT and LDT are properly updated, and must be
- * done before fpu__restore(), so the TS bit is up
- * to date.
+ * the GDT and LDT are properly updated.
*/
arch_end_context_switch(next_p);
@@ -290,10 +296,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
if (prev->gs | next->gs)
lazy_load_gs(next->gs);
- switch_fpu_finish(next_fpu, cpu);
-
this_cpu_write(current_task, next_p);
+ switch_fpu_finish(next_fpu);
+
/* Load the Intel cache allocation PQR MSR. */
resctrl_sched_in();
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 6a62f4af9fcf..f8e1af380cdf 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -392,6 +392,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
childregs = task_pt_regs(p);
fork_frame = container_of(childregs, struct fork_frame, regs);
frame = &fork_frame->frame;
+
frame->bp = 0;
frame->ret_addr = (unsigned long) ret_from_fork;
p->thread.sp = (unsigned long) fork_frame;
@@ -520,7 +521,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
this_cpu_read(irq_count) != -1);
- switch_fpu_prepare(prev_fpu, cpu);
+ if (!test_thread_flag(TIF_NEED_FPU_LOAD))
+ switch_fpu_prepare(prev_fpu, cpu);
/* We must save %fs and %gs before load_TLS() because
* %fs and %gs may be cleared by load_TLS().
@@ -538,9 +540,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/*
* Leave lazy mode, flushing any hypercalls made here. This
* must be done after loading TLS entries in the GDT but before
- * loading segments that might reference them, and and it must
- * be done before fpu__restore(), so the TS bit is up to
- * date.
+ * loading segments that might reference them.
*/
arch_end_context_switch(next_p);
@@ -568,14 +568,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
x86_fsgsbase_load(prev, next);
- switch_fpu_finish(next_fpu, cpu);
-
/*
* Switch the PDA and FPU contexts.
*/
this_cpu_write(current_task, next_p);
this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
+ switch_fpu_finish(next_fpu);
+
/* Reload sp0. */
update_task_stack(next_p);
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 8fd3cedd9acc..09d6bded3c1e 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -121,7 +121,7 @@ void __noreturn machine_real_restart(unsigned int type)
write_cr3(real_mode_header->trampoline_pgd);
/* Exiting long mode will fail if CR4.PCIDE is set. */
- if (static_cpu_has(X86_FEATURE_PCID))
+ if (boot_cpu_has(X86_FEATURE_PCID))
cr4_clear_bits(X86_CR4_PCIDE);
#endif
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 3d872a527cd9..905dae880563 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -71,6 +71,7 @@
#include <linux/tboot.h>
#include <linux/jiffies.h>
#include <linux/mem_encrypt.h>
+#include <linux/sizes.h>
#include <linux/usb/xhci-dbgp.h>
#include <video/edid.h>
@@ -448,18 +449,17 @@ static void __init memblock_x86_reserve_range_setup_data(void)
#ifdef CONFIG_KEXEC_CORE
/* 16M alignment for crash kernel regions */
-#define CRASH_ALIGN (16 << 20)
+#define CRASH_ALIGN SZ_16M
/*
* Keep the crash kernel below this limit. On 32 bits earlier kernels
* would limit the kernel to the low 512 MiB due to mapping restrictions.
- * On 64bit, old kexec-tools need to under 896MiB.
*/
#ifdef CONFIG_X86_32
-# define CRASH_ADDR_LOW_MAX (512 << 20)
-# define CRASH_ADDR_HIGH_MAX (512 << 20)
+# define CRASH_ADDR_LOW_MAX SZ_512M
+# define CRASH_ADDR_HIGH_MAX SZ_512M
#else
-# define CRASH_ADDR_LOW_MAX (896UL << 20)
+# define CRASH_ADDR_LOW_MAX SZ_4G
# define CRASH_ADDR_HIGH_MAX MAXMEM
#endif
@@ -541,21 +541,27 @@ static void __init reserve_crashkernel(void)
}
/* 0 means: find the address automatically */
- if (crash_base <= 0) {
+ if (!crash_base) {
/*
* Set CRASH_ADDR_LOW_MAX upper bound for crash memory,
- * as old kexec-tools loads bzImage below that, unless
- * "crashkernel=size[KMG],high" is specified.
+ * crashkernel=x,high reserves memory over 4G, also allocates
+ * 256M extra low memory for DMA buffers and swiotlb.
+ * But the extra memory is not required for all machines.
+ * So try low memory first and fall back to high memory
+ * unless "crashkernel=size[KMG],high" is specified.
*/
- crash_base = memblock_find_in_range(CRASH_ALIGN,
- high ? CRASH_ADDR_HIGH_MAX
- : CRASH_ADDR_LOW_MAX,
- crash_size, CRASH_ALIGN);
+ if (!high)
+ crash_base = memblock_find_in_range(CRASH_ALIGN,
+ CRASH_ADDR_LOW_MAX,
+ crash_size, CRASH_ALIGN);
+ if (!crash_base)
+ crash_base = memblock_find_in_range(CRASH_ALIGN,
+ CRASH_ADDR_HIGH_MAX,
+ crash_size, CRASH_ALIGN);
if (!crash_base) {
pr_info("crashkernel reservation failed - No suitable area found.\n");
return;
}
-
} else {
unsigned long long start;
@@ -1005,13 +1011,11 @@ void __init setup_arch(char **cmdline_p)
if (efi_enabled(EFI_BOOT))
efi_init();
- dmi_scan_machine();
- dmi_memdev_walk();
- dmi_set_dump_stack_arch_desc();
+ dmi_setup();
/*
* VMware detection requires dmi to be available, so this
- * needs to be done after dmi_scan_machine(), for the boot CPU.
+ * needs to be done after dmi_setup(), for the boot CPU.
*/
init_hypervisor_platform();
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 4bf46575568a..86663874ef04 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -244,11 +244,6 @@ void __init setup_per_cpu_areas(void)
per_cpu(x86_cpu_to_logical_apicid, cpu) =
early_per_cpu_map(x86_cpu_to_logical_apicid, cpu);
#endif
-#ifdef CONFIG_X86_64
- per_cpu(irq_stack_ptr, cpu) =
- per_cpu(irq_stack_union.irq_stack, cpu) +
- IRQ_STACK_SIZE;
-#endif
#ifdef CONFIG_NUMA
per_cpu(x86_cpu_to_node_map, cpu) =
early_per_cpu_map(x86_cpu_to_node_map, cpu);
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 08dfd4c1a4f9..364813cea647 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -132,16 +132,6 @@ static int restore_sigcontext(struct pt_regs *regs,
COPY_SEG_CPL3(cs);
COPY_SEG_CPL3(ss);
-#ifdef CONFIG_X86_64
- /*
- * Fix up SS if needed for the benefit of old DOSEMU and
- * CRIU.
- */
- if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) &&
- user_64bit_mode(regs)))
- force_valid_ss(regs);
-#endif
-
get_user_ex(tmpflags, &sc->flags);
regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
regs->orig_ax = -1; /* disable syscall checks */
@@ -150,6 +140,15 @@ static int restore_sigcontext(struct pt_regs *regs,
buf = (void __user *)buf_val;
} get_user_catch(err);
+#ifdef CONFIG_X86_64
+ /*
+ * Fix up SS if needed for the benefit of old DOSEMU and
+ * CRIU.
+ */
+ if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs)))
+ force_valid_ss(regs);
+#endif
+
err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32));
force_iret();
@@ -206,7 +205,7 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
put_user_ex(regs->ss, &sc->ss);
#endif /* CONFIG_X86_32 */
- put_user_ex(fpstate, &sc->fpstate);
+ put_user_ex(fpstate, (unsigned long __user *)&sc->fpstate);
/* non-iBCS2 extensions.. */
put_user_ex(mask, &sc->oldmask);
@@ -246,7 +245,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
unsigned long sp = regs->sp;
unsigned long buf_fx = 0;
int onsigstack = on_sig_stack(sp);
- struct fpu *fpu = &current->thread.fpu;
+ int ret;
/* redzone */
if (IS_ENABLED(CONFIG_X86_64))
@@ -265,11 +264,9 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
sp = (unsigned long) ka->sa.sa_restorer;
}
- if (fpu->initialized) {
- sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
- &buf_fx, &math_size);
- *fpstate = (void __user *)sp;
- }
+ sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
+ &buf_fx, &math_size);
+ *fpstate = (void __user *)sp;
sp = align_sigframe(sp - frame_size);
@@ -281,8 +278,8 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
return (void __user *)-1L;
/* save i387 and extended state */
- if (fpu->initialized &&
- copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size) < 0)
+ ret = copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size);
+ if (ret < 0)
return (void __user *)-1L;
return (void __user *)sp;
@@ -461,6 +458,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
{
struct rt_sigframe __user *frame;
void __user *fp = NULL;
+ unsigned long uc_flags;
int err = 0;
frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp);
@@ -473,9 +471,11 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
return -EFAULT;
}
+ uc_flags = frame_uc_flags(regs);
+
put_user_try {
/* Create the ucontext. */
- put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
+ put_user_ex(uc_flags, &frame->uc.uc_flags);
put_user_ex(0, &frame->uc.uc_link);
save_altstack_ex(&frame->uc.uc_stack, regs->sp);
@@ -541,6 +541,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
{
#ifdef CONFIG_X86_X32_ABI
struct rt_sigframe_x32 __user *frame;
+ unsigned long uc_flags;
void __user *restorer;
int err = 0;
void __user *fpstate = NULL;
@@ -555,9 +556,11 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
return -EFAULT;
}
+ uc_flags = frame_uc_flags(regs);
+
put_user_try {
/* Create the ucontext. */
- put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
+ put_user_ex(uc_flags, &frame->uc.uc_flags);
put_user_ex(0, &frame->uc.uc_link);
compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
put_user_ex(0, &frame->uc.uc__pad0);
@@ -569,7 +572,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
restorer = NULL;
err |= -EFAULT;
}
- put_user_ex(restorer, &frame->pretcode);
+ put_user_ex(restorer, (unsigned long __user *)&frame->pretcode);
} put_user_catch(err);
err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
@@ -688,10 +691,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
sigset_t *set = sigmask_to_save();
compat_sigset_t *cset = (compat_sigset_t *) set;
- /*
- * Increment event counter and perform fixup for the pre-signal
- * frame.
- */
+ /* Perform fixup for the pre-signal frame. */
rseq_signal_deliver(ksig, regs);
/* Set up the stack frame */
@@ -763,8 +763,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
/*
* Ensure the signal handler starts with the new fpu state.
*/
- if (fpu->initialized)
- fpu__clear(fpu);
+ fpu__clear(fpu);
}
signal_setup_done(failed, ksig, stepping);
}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ce1a67b70168..73e69aaaa117 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -455,7 +455,7 @@ static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
* multicore group inside a NUMA node. If this happens, we will
* discard the MC level of the topology later.
*/
-static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{
if (c->phys_proc_id == o->phys_proc_id)
return true;
@@ -546,7 +546,7 @@ void set_cpu_sibling_map(int cpu)
for_each_cpu(i, cpu_sibling_setup_mask) {
o = &cpu_data(i);
- if ((i == cpu) || (has_mp && match_die(c, o))) {
+ if ((i == cpu) || (has_mp && match_pkg(c, o))) {
link_mask(topology_core_cpumask, cpu, i);
/*
@@ -570,7 +570,7 @@ void set_cpu_sibling_map(int cpu)
} else if (i != cpu && !c->booted_cores)
c->booted_cores = cpu_data(i).booted_cores;
}
- if (match_die(c, o) && !topology_same_node(c, o))
+ if (match_pkg(c, o) && !topology_same_node(c, o))
x86_has_numa_in_package = true;
}
@@ -935,20 +935,27 @@ out:
return boot_error;
}
-void common_cpu_up(unsigned int cpu, struct task_struct *idle)
+int common_cpu_up(unsigned int cpu, struct task_struct *idle)
{
+ int ret;
+
/* Just in case we booted with a single CPU. */
alternatives_enable_smp();
per_cpu(current_task, cpu) = idle;
+ /* Initialize the interrupt stack(s) */
+ ret = irq_init_percpu_irqstack(cpu);
+ if (ret)
+ return ret;
+
#ifdef CONFIG_X86_32
/* Stack for startup_32 can be just as for start_secondary onwards */
- irq_ctx_init(cpu);
per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle);
#else
initial_gs = per_cpu_offset(cpu);
#endif
+ return 0;
}
/*
@@ -1106,7 +1113,9 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
/* the FPU context is blank, nobody can own it */
per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
- common_cpu_up(cpu, tidle);
+ err = common_cpu_up(cpu, tidle);
+ if (err)
+ return err;
err = do_boot_cpu(apicid, cpu, tidle, &cpu0_nmi_registered);
if (err) {
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 5c2d71a1dc06..2abf27d7df6b 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -12,78 +12,31 @@
#include <asm/stacktrace.h>
#include <asm/unwind.h>
-static int save_stack_address(struct stack_trace *trace, unsigned long addr,
- bool nosched)
-{
- if (nosched && in_sched_functions(addr))
- return 0;
-
- if (trace->skip > 0) {
- trace->skip--;
- return 0;
- }
-
- if (trace->nr_entries >= trace->max_entries)
- return -1;
-
- trace->entries[trace->nr_entries++] = addr;
- return 0;
-}
-
-static void noinline __save_stack_trace(struct stack_trace *trace,
- struct task_struct *task, struct pt_regs *regs,
- bool nosched)
+void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ struct task_struct *task, struct pt_regs *regs)
{
struct unwind_state state;
unsigned long addr;
- if (regs)
- save_stack_address(trace, regs->ip, nosched);
+ if (regs && !consume_entry(cookie, regs->ip, false))
+ return;
for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
- if (!addr || save_stack_address(trace, addr, nosched))
+ if (!addr || !consume_entry(cookie, addr, false))
break;
}
-
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
}
/*
- * Save stack-backtrace addresses into a stack_trace buffer.
+ * This function returns an error if it detects any unreliable features of the
+ * stack. Otherwise it guarantees that the stack trace is reliable.
+ *
+ * If the task is not 'current', the caller *must* ensure the task is inactive.
*/
-void save_stack_trace(struct stack_trace *trace)
-{
- trace->skip++;
- __save_stack_trace(trace, current, NULL, false);
-}
-EXPORT_SYMBOL_GPL(save_stack_trace);
-
-void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
-{
- __save_stack_trace(trace, current, regs, false);
-}
-
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
-{
- if (!try_get_task_stack(tsk))
- return;
-
- if (tsk == current)
- trace->skip++;
- __save_stack_trace(trace, tsk, NULL, true);
-
- put_task_stack(tsk);
-}
-EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
-
-#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
-
-static int __always_inline
-__save_stack_trace_reliable(struct stack_trace *trace,
- struct task_struct *task)
+int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
+ void *cookie, struct task_struct *task)
{
struct unwind_state state;
struct pt_regs *regs;
@@ -97,7 +50,7 @@ __save_stack_trace_reliable(struct stack_trace *trace,
if (regs) {
/* Success path for user tasks */
if (user_mode(regs))
- goto success;
+ return 0;
/*
* Kernel mode registers on the stack indicate an
@@ -120,7 +73,7 @@ __save_stack_trace_reliable(struct stack_trace *trace,
if (!addr)
return -EINVAL;
- if (save_stack_address(trace, addr, false))
+ if (!consume_entry(cookie, addr, false))
return -EINVAL;
}
@@ -132,39 +85,9 @@ __save_stack_trace_reliable(struct stack_trace *trace,
if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
return -EINVAL;
-success:
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
-
return 0;
}
-/*
- * This function returns an error if it detects any unreliable features of the
- * stack. Otherwise it guarantees that the stack trace is reliable.
- *
- * If the task is not 'current', the caller *must* ensure the task is inactive.
- */
-int save_stack_trace_tsk_reliable(struct task_struct *tsk,
- struct stack_trace *trace)
-{
- int ret;
-
- /*
- * If the task doesn't have a stack (e.g., a zombie), the stack is
- * "reliably" empty.
- */
- if (!try_get_task_stack(tsk))
- return 0;
-
- ret = __save_stack_trace_reliable(trace, tsk);
-
- put_task_stack(tsk);
-
- return ret;
-}
-#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
-
/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
struct stack_frame_user {
@@ -189,15 +112,15 @@ copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
return ret;
}
-static inline void __save_stack_trace_user(struct stack_trace *trace)
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+ const struct pt_regs *regs)
{
- const struct pt_regs *regs = task_pt_regs(current);
const void __user *fp = (const void __user *)regs->bp;
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = regs->ip;
+ if (!consume_entry(cookie, regs->ip, false))
+ return;
- while (trace->nr_entries < trace->max_entries) {
+ while (1) {
struct stack_frame_user frame;
frame.next_fp = NULL;
@@ -207,8 +130,8 @@ static inline void __save_stack_trace_user(struct stack_trace *trace)
if ((unsigned long)fp < regs->sp)
break;
if (frame.ret_addr) {
- trace->entries[trace->nr_entries++] =
- frame.ret_addr;
+ if (!consume_entry(cookie, frame.ret_addr, false))
+ return;
}
if (fp == frame.next_fp)
break;
@@ -216,14 +139,3 @@ static inline void __save_stack_trace_user(struct stack_trace *trace)
}
}
-void save_stack_trace_user(struct stack_trace *trace)
-{
- /*
- * Trace user stack if we are not a kernel thread
- */
- if (current->mm) {
- __save_stack_trace_user(trace);
- }
- if (trace->nr_entries < trace->max_entries)
- trace->entries[trace->nr_entries++] = ULONG_MAX;
-}
diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c
index 738bf42b0218..be5bc2e47c71 100644
--- a/arch/x86/kernel/topology.c
+++ b/arch/x86/kernel/topology.c
@@ -71,7 +71,7 @@ int _debug_hotplug_cpu(int cpu, int action)
case 0:
ret = cpu_down(cpu);
if (!ret) {
- pr_info("CPU %u is now offline\n", cpu);
+ pr_info("DEBUG_HOTPLUG_CPU0: CPU %u is now offline\n", cpu);
dev->offline = true;
kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
} else
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index d26f9e9c3d83..8b6d03e55d2f 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -456,7 +456,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
* which is all zeros which indicates MPX was not
* responsible for the exception.
*/
- bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
+ bndcsr = get_xsave_field_ptr(XFEATURE_BNDCSR);
if (!bndcsr)
goto exit_trap;
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 3fae23834069..356dfc555a27 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -185,8 +185,7 @@ static void __init cyc2ns_init_boot_cpu(void)
/*
* Secondary CPUs do not run through tsc_init(), so set up
* all the scale factors for all CPUs, assuming the same
- * speed as the bootup CPU. (cpufreq notifiers will fix this
- * up if their speed diverges)
+ * speed as the bootup CPU.
*/
static void __init cyc2ns_init_secondary_cpus(void)
{
@@ -283,6 +282,7 @@ int __init notsc_setup(char *str)
__setup("notsc", notsc_setup);
static int no_sched_irq_time;
+static int no_tsc_watchdog;
static int __init tsc_setup(char *str)
{
@@ -292,6 +292,8 @@ static int __init tsc_setup(char *str)
no_sched_irq_time = 1;
if (!strcmp(str, "unstable"))
mark_tsc_unstable("boot parameter");
+ if (!strcmp(str, "nowatchdog"))
+ no_tsc_watchdog = 1;
return 1;
}
@@ -937,12 +939,12 @@ void tsc_restore_sched_clock_state(void)
}
#ifdef CONFIG_CPU_FREQ
-/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
+/*
+ * Frequency scaling support. Adjust the TSC based timer when the CPU frequency
* changes.
*
- * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
- * not that important because current Opteron setups do not support
- * scaling on SMP anyroads.
+ * NOTE: On SMP the situation is not fixable in general, so simply mark the TSC
+ * as unstable and give up in those cases.
*
* Should fix up last_tsc too. Currently gettimeofday in the
* first tick after the change will be slightly wrong.
@@ -956,28 +958,28 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{
struct cpufreq_freqs *freq = data;
- unsigned long *lpj;
- lpj = &boot_cpu_data.loops_per_jiffy;
-#ifdef CONFIG_SMP
- if (!(freq->flags & CPUFREQ_CONST_LOOPS))
- lpj = &cpu_data(freq->cpu).loops_per_jiffy;
-#endif
+ if (num_online_cpus() > 1) {
+ mark_tsc_unstable("cpufreq changes on SMP");
+ return 0;
+ }
if (!ref_freq) {
ref_freq = freq->old;
- loops_per_jiffy_ref = *lpj;
+ loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
tsc_khz_ref = tsc_khz;
}
+
if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
- (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
- *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
+ (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
+ boot_cpu_data.loops_per_jiffy =
+ cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
if (!(freq->flags & CPUFREQ_CONST_LOOPS))
mark_tsc_unstable("cpufreq changes");
- set_cyc2ns_scale(tsc_khz, freq->cpu, rdtsc());
+ set_cyc2ns_scale(tsc_khz, freq->policy->cpu, rdtsc());
}
return 0;
@@ -1349,7 +1351,7 @@ static int __init init_tsc_clocksource(void)
if (tsc_unstable)
goto unreg;
- if (tsc_clocksource_reliable)
+ if (tsc_clocksource_reliable || no_tsc_watchdog)
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index a092b6b40c6b..6a38717d179c 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -369,7 +369,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
preempt_disable();
tsk->thread.sp0 += 16;
- if (static_cpu_has(X86_FEATURE_SEP)) {
+ if (boot_cpu_has(X86_FEATURE_SEP)) {
tsk->thread.sysenter_cs = 0;
refresh_sysenter_cs(&tsk->thread);
}
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index a5127b2c195f..0850b5149345 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -141,11 +141,11 @@ SECTIONS
*(.text.__x86.indirect_thunk)
__indirect_thunk_end = .;
#endif
-
- /* End of text section */
- _etext = .;
} :text = 0x9090
+ /* End of text section */
+ _etext = .;
+
NOTES :text :note
EXCEPTION_TABLE(16) :text = 0x9090
@@ -403,7 +403,8 @@ SECTIONS
*/
#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load
INIT_PER_CPU(gdt_page);
-INIT_PER_CPU(irq_stack_union);
+INIT_PER_CPU(fixed_percpu_data);
+INIT_PER_CPU(irq_stack_backing_store);
/*
* Build-time check on the image size:
@@ -412,8 +413,8 @@ INIT_PER_CPU(irq_stack_union);
"kernel image bigger than KERNEL_IMAGE_SIZE");
#ifdef CONFIG_SMP
-. = ASSERT((irq_stack_union == 0),
- "irq_stack_union is not at start of per-cpu area");
+. = ASSERT((fixed_percpu_data == 0),
+ "fixed_percpu_data is not at start of per-cpu area");
#endif
#endif /* CONFIG_X86_32 */
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 72fa955f4a15..fc042419e670 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -27,7 +27,6 @@ config KVM
depends on X86_LOCAL_APIC
select PREEMPT_NOTIFIERS
select MMU_NOTIFIER
- select ANON_INODES
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQFD
select IRQ_BYPASS_MANAGER
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index fd3951638ae4..80a642a0143d 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -410,7 +410,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
/* cpuid 7.0.edx*/
const u32 kvm_cpuid_7_0_edx_x86_features =
F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
- F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP);
+ F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
+ F(MD_CLEAR);
/* all calls to cpuid_count() should be made on the same cpu */
get_cpu();
@@ -962,13 +963,13 @@ int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
return 1;
- eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
- ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
+ eax = kvm_rax_read(vcpu);
+ ecx = kvm_rcx_read(vcpu);
kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, true);
- kvm_register_write(vcpu, VCPU_REGS_RAX, eax);
- kvm_register_write(vcpu, VCPU_REGS_RBX, ebx);
- kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
- kvm_register_write(vcpu, VCPU_REGS_RDX, edx);
+ kvm_rax_write(vcpu, eax);
+ kvm_rbx_write(vcpu, ebx);
+ kvm_rcx_write(vcpu, ecx);
+ kvm_rdx_write(vcpu, edx);
return kvm_skip_emulated_instruction(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 421899f6ad7b..8ca4b39918e0 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1371,7 +1371,16 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
valid_bank_mask = BIT_ULL(0);
sparse_banks[0] = flush.processor_mask;
- all_cpus = flush.flags & HV_FLUSH_ALL_PROCESSORS;
+
+ /*
+ * Work around possible WS2012 bug: it sends hypercalls
+ * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear,
+ * while also expecting us to flush something and crashing if
+ * we don't. Let's treat processor_mask == 0 same as
+ * HV_FLUSH_ALL_PROCESSORS.
+ */
+ all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
+ flush.processor_mask == 0;
} else {
if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex,
sizeof(flush_ex))))
@@ -1526,10 +1535,10 @@ static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
longmode = is_64_bit_mode(vcpu);
if (longmode)
- kvm_register_write(vcpu, VCPU_REGS_RAX, result);
+ kvm_rax_write(vcpu, result);
else {
- kvm_register_write(vcpu, VCPU_REGS_RDX, result >> 32);
- kvm_register_write(vcpu, VCPU_REGS_RAX, result & 0xffffffff);
+ kvm_rdx_write(vcpu, result >> 32);
+ kvm_rax_write(vcpu, result & 0xffffffff);
}
}
@@ -1602,18 +1611,18 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
longmode = is_64_bit_mode(vcpu);
if (!longmode) {
- param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
- (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
- ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
- (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
- outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
- (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
+ param = ((u64)kvm_rdx_read(vcpu) << 32) |
+ (kvm_rax_read(vcpu) & 0xffffffff);
+ ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
+ (kvm_rcx_read(vcpu) & 0xffffffff);
+ outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
+ (kvm_rsi_read(vcpu) & 0xffffffff);
}
#ifdef CONFIG_X86_64
else {
- param = kvm_register_read(vcpu, VCPU_REGS_RCX);
- ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
- outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
+ param = kvm_rcx_read(vcpu);
+ ingpa = kvm_rdx_read(vcpu);
+ outgpa = kvm_r8_read(vcpu);
}
#endif
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index f8f56a93358b..1cc6c47dc77e 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -9,6 +9,34 @@
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
| X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE)
+#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
+static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
+{ \
+ return vcpu->arch.regs[VCPU_REGS_##uname]; \
+} \
+static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
+ unsigned long val) \
+{ \
+ vcpu->arch.regs[VCPU_REGS_##uname] = val; \
+}
+BUILD_KVM_GPR_ACCESSORS(rax, RAX)
+BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
+BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
+BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
+BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
+BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
+BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
+#ifdef CONFIG_X86_64
+BUILD_KVM_GPR_ACCESSORS(r8, R8)
+BUILD_KVM_GPR_ACCESSORS(r9, R9)
+BUILD_KVM_GPR_ACCESSORS(r10, R10)
+BUILD_KVM_GPR_ACCESSORS(r11, R11)
+BUILD_KVM_GPR_ACCESSORS(r12, R12)
+BUILD_KVM_GPR_ACCESSORS(r13, R13)
+BUILD_KVM_GPR_ACCESSORS(r14, R14)
+BUILD_KVM_GPR_ACCESSORS(r15, R15)
+#endif
+
static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
enum kvm_reg reg)
{
@@ -37,6 +65,16 @@ static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
kvm_register_write(vcpu, VCPU_REGS_RIP, val);
}
+static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
+{
+ return kvm_register_read(vcpu, VCPU_REGS_RSP);
+}
+
+static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ kvm_register_write(vcpu, VCPU_REGS_RSP, val);
+}
+
static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
{
might_sleep(); /* on svm */
@@ -83,8 +121,8 @@ static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
{
- return (kvm_register_read(vcpu, VCPU_REGS_RAX) & -1u)
- | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
+ return (kvm_rax_read(vcpu) & -1u)
+ | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32);
}
static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 9bf70cf84564..4924f83ed4f3 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -70,7 +70,6 @@
#define APIC_BROADCAST 0xFF
#define X2APIC_BROADCAST 0xFFFFFFFFul
-static bool lapic_timer_advance_adjust_done = false;
#define LAPIC_TIMER_ADVANCE_ADJUST_DONE 100
/* step-by-step approximation to mitigate fluctuation */
#define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
@@ -1455,7 +1454,7 @@ static void apic_timer_expired(struct kvm_lapic *apic)
if (swait_active(q))
swake_up_one(q);
- if (apic_lvtt_tscdeadline(apic))
+ if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
ktimer->expired_tscdeadline = ktimer->tscdeadline;
}
@@ -1482,14 +1481,32 @@ static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
return false;
}
+static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
+{
+ u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
+
+ /*
+ * If the guest TSC is running at a different ratio than the host, then
+ * convert the delay to nanoseconds to achieve an accurate delay. Note
+ * that __delay() uses delay_tsc whenever the hardware has TSC, thus
+ * always for VMX enabled hardware.
+ */
+ if (vcpu->arch.tsc_scaling_ratio == kvm_default_tsc_scaling_ratio) {
+ __delay(min(guest_cycles,
+ nsec_to_cycles(vcpu, timer_advance_ns)));
+ } else {
+ u64 delay_ns = guest_cycles * 1000000ULL;
+ do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
+ ndelay(min_t(u32, delay_ns, timer_advance_ns));
+ }
+}
+
void wait_lapic_expire(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
+ u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
u64 guest_tsc, tsc_deadline, ns;
- if (!lapic_in_kernel(vcpu))
- return;
-
if (apic->lapic_timer.expired_tscdeadline == 0)
return;
@@ -1501,33 +1518,37 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
- /* __delay is delay_tsc whenever the hardware has TSC, thus always. */
if (guest_tsc < tsc_deadline)
- __delay(min(tsc_deadline - guest_tsc,
- nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
+ __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
- if (!lapic_timer_advance_adjust_done) {
+ if (!apic->lapic_timer.timer_advance_adjust_done) {
/* too early */
if (guest_tsc < tsc_deadline) {
ns = (tsc_deadline - guest_tsc) * 1000000ULL;
do_div(ns, vcpu->arch.virtual_tsc_khz);
- lapic_timer_advance_ns -= min((unsigned int)ns,
- lapic_timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
+ timer_advance_ns -= min((u32)ns,
+ timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
} else {
/* too late */
ns = (guest_tsc - tsc_deadline) * 1000000ULL;
do_div(ns, vcpu->arch.virtual_tsc_khz);
- lapic_timer_advance_ns += min((unsigned int)ns,
- lapic_timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
+ timer_advance_ns += min((u32)ns,
+ timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
}
if (abs(guest_tsc - tsc_deadline) < LAPIC_TIMER_ADVANCE_ADJUST_DONE)
- lapic_timer_advance_adjust_done = true;
+ apic->lapic_timer.timer_advance_adjust_done = true;
+ if (unlikely(timer_advance_ns > 5000)) {
+ timer_advance_ns = 0;
+ apic->lapic_timer.timer_advance_adjust_done = true;
+ }
+ apic->lapic_timer.timer_advance_ns = timer_advance_ns;
}
}
static void start_sw_tscdeadline(struct kvm_lapic *apic)
{
- u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
+ struct kvm_timer *ktimer = &apic->lapic_timer;
+ u64 guest_tsc, tscdeadline = ktimer->tscdeadline;
u64 ns = 0;
ktime_t expire;
struct kvm_vcpu *vcpu = apic->vcpu;
@@ -1542,13 +1563,15 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
now = ktime_get();
guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
- if (likely(tscdeadline > guest_tsc)) {
- ns = (tscdeadline - guest_tsc) * 1000000ULL;
- do_div(ns, this_tsc_khz);
+
+ ns = (tscdeadline - guest_tsc) * 1000000ULL;
+ do_div(ns, this_tsc_khz);
+
+ if (likely(tscdeadline > guest_tsc) &&
+ likely(ns > apic->lapic_timer.timer_advance_ns)) {
expire = ktime_add_ns(now, ns);
- expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
- hrtimer_start(&apic->lapic_timer.timer,
- expire, HRTIMER_MODE_ABS_PINNED);
+ expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
+ hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_PINNED);
} else
apic_timer_expired(apic);
@@ -1673,37 +1696,42 @@ static void cancel_hv_timer(struct kvm_lapic *apic)
static bool start_hv_timer(struct kvm_lapic *apic)
{
struct kvm_timer *ktimer = &apic->lapic_timer;
- int r;
+ struct kvm_vcpu *vcpu = apic->vcpu;
+ bool expired;
WARN_ON(preemptible());
if (!kvm_x86_ops->set_hv_timer)
return false;
- if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
- return false;
-
if (!ktimer->tscdeadline)
return false;
- r = kvm_x86_ops->set_hv_timer(apic->vcpu, ktimer->tscdeadline);
- if (r < 0)
+ if (kvm_x86_ops->set_hv_timer(vcpu, ktimer->tscdeadline, &expired))
return false;
ktimer->hv_timer_in_use = true;
hrtimer_cancel(&ktimer->timer);
/*
- * Also recheck ktimer->pending, in case the sw timer triggered in
- * the window. For periodic timer, leave the hv timer running for
- * simplicity, and the deadline will be recomputed on the next vmexit.
+ * To simplify handling the periodic timer, leave the hv timer running
+ * even if the deadline timer has expired, i.e. rely on the resulting
+ * VM-Exit to recompute the periodic timer's target expiration.
*/
- if (!apic_lvtt_period(apic) && (r || atomic_read(&ktimer->pending))) {
- if (r)
+ if (!apic_lvtt_period(apic)) {
+ /*
+ * Cancel the hv timer if the sw timer fired while the hv timer
+ * was being programmed, or if the hv timer itself expired.
+ */
+ if (atomic_read(&ktimer->pending)) {
+ cancel_hv_timer(apic);
+ } else if (expired) {
apic_timer_expired(apic);
- return false;
+ cancel_hv_timer(apic);
+ }
}
- trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, true);
+ trace_kvm_hv_timer_state(vcpu->vcpu_id, ktimer->hv_timer_in_use);
+
return true;
}
@@ -1727,8 +1755,13 @@ static void start_sw_timer(struct kvm_lapic *apic)
static void restart_apic_timer(struct kvm_lapic *apic)
{
preempt_disable();
+
+ if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending))
+ goto out;
+
if (!start_hv_timer(apic))
start_sw_timer(apic);
+out:
preempt_enable();
}
@@ -2255,7 +2288,7 @@ static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
return HRTIMER_NORESTART;
}
-int kvm_create_lapic(struct kvm_vcpu *vcpu)
+int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
{
struct kvm_lapic *apic;
@@ -2279,6 +2312,14 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS_PINNED);
apic->lapic_timer.timer.function = apic_timer_fn;
+ if (timer_advance_ns == -1) {
+ apic->lapic_timer.timer_advance_ns = 1000;
+ apic->lapic_timer.timer_advance_adjust_done = false;
+ } else {
+ apic->lapic_timer.timer_advance_ns = timer_advance_ns;
+ apic->lapic_timer.timer_advance_adjust_done = true;
+ }
+
/*
* APIC is created enabled. This will prevent kvm_lapic_set_base from
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index ff6ef9c3d760..d6d049ba3045 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -31,8 +31,10 @@ struct kvm_timer {
u32 timer_mode_mask;
u64 tscdeadline;
u64 expired_tscdeadline;
+ u32 timer_advance_ns;
atomic_t pending; /* accumulated triggered timers */
bool hv_timer_in_use;
+ bool timer_advance_adjust_done;
};
struct kvm_lapic {
@@ -62,7 +64,7 @@ struct kvm_lapic {
struct dest_map;
-int kvm_create_lapic(struct kvm_vcpu *vcpu);
+int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns);
void kvm_free_lapic(struct kvm_vcpu *vcpu);
int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e10962dfc203..1e9ba81accba 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -44,6 +44,7 @@
#include <asm/page.h>
#include <asm/pat.h>
#include <asm/cmpxchg.h>
+#include <asm/e820/api.h>
#include <asm/io.h>
#include <asm/vmx.h>
#include <asm/kvm_page_track.h>
@@ -487,16 +488,24 @@ static void kvm_mmu_reset_all_pte_masks(void)
* If the CPU has 46 or less physical address bits, then set an
* appropriate mask to guard against L1TF attacks. Otherwise, it is
* assumed that the CPU is not vulnerable to L1TF.
+ *
+ * Some Intel CPUs address the L1 cache using more PA bits than are
+ * reported by CPUID. Use the PA width of the L1 cache when possible
+ * to achieve more effective mitigation, e.g. if system RAM overlaps
+ * the most significant bits of legal physical address space.
*/
- low_phys_bits = boot_cpu_data.x86_phys_bits;
- if (boot_cpu_data.x86_phys_bits <
+ shadow_nonpresent_or_rsvd_mask = 0;
+ low_phys_bits = boot_cpu_data.x86_cache_bits;
+ if (boot_cpu_data.x86_cache_bits <
52 - shadow_nonpresent_or_rsvd_mask_len) {
shadow_nonpresent_or_rsvd_mask =
- rsvd_bits(boot_cpu_data.x86_phys_bits -
+ rsvd_bits(boot_cpu_data.x86_cache_bits -
shadow_nonpresent_or_rsvd_mask_len,
- boot_cpu_data.x86_phys_bits - 1);
+ boot_cpu_data.x86_cache_bits - 1);
low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
- }
+ } else
+ WARN_ON_ONCE(boot_cpu_has_bug(X86_BUG_L1TF));
+
shadow_nonpresent_or_rsvd_lower_gfn_mask =
GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
}
@@ -2892,7 +2901,9 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
*/
(!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn));
- return true;
+ return !e820__mapped_raw_any(pfn_to_hpa(pfn),
+ pfn_to_hpa(pfn + 1) - 1,
+ E820_TYPE_RAM);
}
/* Bits which may be returned by set_spte() */
@@ -4781,6 +4792,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
union kvm_mmu_extended_role ext = {0};
ext.cr0_pg = !!is_paging(vcpu);
+ ext.cr4_pae = !!is_pae(vcpu);
ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
ext.cr4_pse = !!is_pse(vcpu);
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index e9ea2d45ae66..9f72cc427158 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -48,11 +48,6 @@ static bool msr_mtrr_valid(unsigned msr)
return false;
}
-static bool valid_pat_type(unsigned t)
-{
- return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
-}
-
static bool valid_mtrr_type(unsigned t)
{
return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
@@ -67,10 +62,7 @@ bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
return false;
if (msr == MSR_IA32_CR_PAT) {
- for (i = 0; i < 8; i++)
- if (!valid_pat_type((data >> (i * 8)) & 0xff))
- return false;
- return true;
+ return kvm_pat_valid(data);
} else if (msr == MSR_MTRRdefType) {
if (data & ~0xcff)
return false;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 6bdca39829bc..367a47df4ba0 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -140,16 +140,36 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
pt_element_t *table;
struct page *page;
- npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page);
- /* Check if the user is doing something meaningless. */
- if (unlikely(npages != 1))
- return -EFAULT;
-
- table = kmap_atomic(page);
- ret = CMPXCHG(&table[index], orig_pte, new_pte);
- kunmap_atomic(table);
-
- kvm_release_page_dirty(page);
+ npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page);
+ if (likely(npages == 1)) {
+ table = kmap_atomic(page);
+ ret = CMPXCHG(&table[index], orig_pte, new_pte);
+ kunmap_atomic(table);
+
+ kvm_release_page_dirty(page);
+ } else {
+ struct vm_area_struct *vma;
+ unsigned long vaddr = (unsigned long)ptep_user & PAGE_MASK;
+ unsigned long pfn;
+ unsigned long paddr;
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE);
+ if (!vma || !(vma->vm_flags & VM_PFNMAP)) {
+ up_read(&current->mm->mmap_sem);
+ return -EFAULT;
+ }
+ pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+ paddr = pfn << PAGE_SHIFT;
+ table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB);
+ if (!table) {
+ up_read(&current->mm->mmap_sem);
+ return -EFAULT;
+ }
+ ret = CMPXCHG(&table[index], orig_pte, new_pte);
+ memunmap(table);
+ up_read(&current->mm->mmap_sem);
+ }
return (ret != orig_pte);
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 406b558abfef..a849dcb7fbc5 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1805,7 +1805,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
return NULL;
/* Pin the user virtual address. */
- npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
+ npinned = get_user_pages_fast(uaddr, npages, FOLL_WRITE, pages);
if (npinned != npages) {
pr_err("SEV: Failure locking %lu pages.\n", npages);
goto err;
@@ -2091,7 +2091,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
init_vmcb(svm);
kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, true);
- kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
+ kvm_rdx_write(vcpu, eax);
if (kvm_vcpu_apicv_active(vcpu) && !init_event)
avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
@@ -3071,32 +3071,6 @@ static inline bool nested_svm_nmi(struct vcpu_svm *svm)
return false;
}
-static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
-{
- struct page *page;
-
- might_sleep();
-
- page = kvm_vcpu_gfn_to_page(&svm->vcpu, gpa >> PAGE_SHIFT);
- if (is_error_page(page))
- goto error;
-
- *_page = page;
-
- return kmap(page);
-
-error:
- kvm_inject_gp(&svm->vcpu, 0);
-
- return NULL;
-}
-
-static void nested_svm_unmap(struct page *page)
-{
- kunmap(page);
- kvm_release_page_dirty(page);
-}
-
static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
{
unsigned port, size, iopm_len;
@@ -3299,10 +3273,11 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr
static int nested_svm_vmexit(struct vcpu_svm *svm)
{
+ int rc;
struct vmcb *nested_vmcb;
struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
- struct page *page;
+ struct kvm_host_map map;
trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
vmcb->control.exit_info_1,
@@ -3311,9 +3286,14 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
vmcb->control.exit_int_info_err,
KVM_ISA_SVM);
- nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
- if (!nested_vmcb)
+ rc = kvm_vcpu_map(&svm->vcpu, gfn_to_gpa(svm->nested.vmcb), &map);
+ if (rc) {
+ if (rc == -EINVAL)
+ kvm_inject_gp(&svm->vcpu, 0);
return 1;
+ }
+
+ nested_vmcb = map.hva;
/* Exit Guest-Mode */
leave_guest_mode(&svm->vcpu);
@@ -3408,16 +3388,16 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
} else {
(void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
}
- kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
- kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
- kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
+ kvm_rax_write(&svm->vcpu, hsave->save.rax);
+ kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
+ kvm_rip_write(&svm->vcpu, hsave->save.rip);
svm->vmcb->save.dr7 = 0;
svm->vmcb->save.cpl = 0;
svm->vmcb->control.exit_int_info = 0;
mark_all_dirty(svm->vmcb);
- nested_svm_unmap(page);
+ kvm_vcpu_unmap(&svm->vcpu, &map, true);
nested_svm_uninit_mmu_context(&svm->vcpu);
kvm_mmu_reset_context(&svm->vcpu);
@@ -3483,7 +3463,7 @@ static bool nested_vmcb_checks(struct vmcb *vmcb)
}
static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
- struct vmcb *nested_vmcb, struct page *page)
+ struct vmcb *nested_vmcb, struct kvm_host_map *map)
{
if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
svm->vcpu.arch.hflags |= HF_HIF_MASK;
@@ -3516,9 +3496,9 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
kvm_mmu_reset_context(&svm->vcpu);
svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
- kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
- kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
- kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
+ kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax);
+ kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp);
+ kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip);
/* In case we don't even reach vcpu_run, the fields are not updated */
svm->vmcb->save.rax = nested_vmcb->save.rax;
@@ -3567,7 +3547,7 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
svm->vmcb->control.pause_filter_thresh =
nested_vmcb->control.pause_filter_thresh;
- nested_svm_unmap(page);
+ kvm_vcpu_unmap(&svm->vcpu, map, true);
/* Enter Guest-Mode */
enter_guest_mode(&svm->vcpu);
@@ -3587,17 +3567,23 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
static bool nested_svm_vmrun(struct vcpu_svm *svm)
{
+ int rc;
struct vmcb *nested_vmcb;
struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
- struct page *page;
+ struct kvm_host_map map;
u64 vmcb_gpa;
vmcb_gpa = svm->vmcb->save.rax;
- nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
- if (!nested_vmcb)
+ rc = kvm_vcpu_map(&svm->vcpu, gfn_to_gpa(vmcb_gpa), &map);
+ if (rc) {
+ if (rc == -EINVAL)
+ kvm_inject_gp(&svm->vcpu, 0);
return false;
+ }
+
+ nested_vmcb = map.hva;
if (!nested_vmcb_checks(nested_vmcb)) {
nested_vmcb->control.exit_code = SVM_EXIT_ERR;
@@ -3605,7 +3591,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
nested_vmcb->control.exit_info_1 = 0;
nested_vmcb->control.exit_info_2 = 0;
- nested_svm_unmap(page);
+ kvm_vcpu_unmap(&svm->vcpu, &map, true);
return false;
}
@@ -3649,7 +3635,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
copy_vmcb_control_area(hsave, vmcb);
- enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, page);
+ enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, &map);
return true;
}
@@ -3673,21 +3659,26 @@ static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
static int vmload_interception(struct vcpu_svm *svm)
{
struct vmcb *nested_vmcb;
- struct page *page;
+ struct kvm_host_map map;
int ret;
if (nested_svm_check_permissions(svm))
return 1;
- nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
- if (!nested_vmcb)
+ ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
+ if (ret) {
+ if (ret == -EINVAL)
+ kvm_inject_gp(&svm->vcpu, 0);
return 1;
+ }
+
+ nested_vmcb = map.hva;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
ret = kvm_skip_emulated_instruction(&svm->vcpu);
nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
- nested_svm_unmap(page);
+ kvm_vcpu_unmap(&svm->vcpu, &map, true);
return ret;
}
@@ -3695,21 +3686,26 @@ static int vmload_interception(struct vcpu_svm *svm)
static int vmsave_interception(struct vcpu_svm *svm)
{
struct vmcb *nested_vmcb;
- struct page *page;
+ struct kvm_host_map map;
int ret;
if (nested_svm_check_permissions(svm))
return 1;
- nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
- if (!nested_vmcb)
+ ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
+ if (ret) {
+ if (ret == -EINVAL)
+ kvm_inject_gp(&svm->vcpu, 0);
return 1;
+ }
+
+ nested_vmcb = map.hva;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
ret = kvm_skip_emulated_instruction(&svm->vcpu);
nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
- nested_svm_unmap(page);
+ kvm_vcpu_unmap(&svm->vcpu, &map, true);
return ret;
}
@@ -3791,11 +3787,11 @@ static int invlpga_interception(struct vcpu_svm *svm)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
- trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX),
- kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
+ trace_kvm_invlpga(svm->vmcb->save.rip, kvm_rcx_read(&svm->vcpu),
+ kvm_rax_read(&svm->vcpu));
/* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
- kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
+ kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu));
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
return kvm_skip_emulated_instruction(&svm->vcpu);
@@ -3803,7 +3799,7 @@ static int invlpga_interception(struct vcpu_svm *svm)
static int skinit_interception(struct vcpu_svm *svm)
{
- trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
+ trace_kvm_skinit(svm->vmcb->save.rip, kvm_rax_read(&svm->vcpu));
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1;
@@ -3817,7 +3813,7 @@ static int wbinvd_interception(struct vcpu_svm *svm)
static int xsetbv_interception(struct vcpu_svm *svm)
{
u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
- u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
+ u32 index = kvm_rcx_read(&svm->vcpu);
if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
@@ -4213,7 +4209,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
static int rdmsr_interception(struct vcpu_svm *svm)
{
- u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
+ u32 ecx = kvm_rcx_read(&svm->vcpu);
struct msr_data msr_info;
msr_info.index = ecx;
@@ -4225,10 +4221,8 @@ static int rdmsr_interception(struct vcpu_svm *svm)
} else {
trace_kvm_msr_read(ecx, msr_info.data);
- kvm_register_write(&svm->vcpu, VCPU_REGS_RAX,
- msr_info.data & 0xffffffff);
- kvm_register_write(&svm->vcpu, VCPU_REGS_RDX,
- msr_info.data >> 32);
+ kvm_rax_write(&svm->vcpu, msr_info.data & 0xffffffff);
+ kvm_rdx_write(&svm->vcpu, msr_info.data >> 32);
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
return kvm_skip_emulated_instruction(&svm->vcpu);
}
@@ -4422,7 +4416,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
static int wrmsr_interception(struct vcpu_svm *svm)
{
struct msr_data msr;
- u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
+ u32 ecx = kvm_rcx_read(&svm->vcpu);
u64 data = kvm_read_edx_eax(&svm->vcpu);
msr.data = data;
@@ -6236,7 +6230,7 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *nested_vmcb;
- struct page *page;
+ struct kvm_host_map map;
u64 guest;
u64 vmcb;
@@ -6244,10 +6238,10 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
if (guest) {
- nested_vmcb = nested_svm_map(svm, vmcb, &page);
- if (!nested_vmcb)
+ if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL)
return 1;
- enter_svm_guest_mode(svm, vmcb, nested_vmcb, page);
+ nested_vmcb = map.hva;
+ enter_svm_guest_mode(svm, vmcb, nested_vmcb, &map);
}
return 0;
}
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index 854e144131c6..d6664ee3d127 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -2,6 +2,8 @@
#ifndef __KVM_X86_VMX_CAPS_H
#define __KVM_X86_VMX_CAPS_H
+#include <asm/vmx.h>
+
#include "lapic.h"
extern bool __read_mostly enable_vpid;
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 6401eb7ef19c..f1a69117ac0f 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -193,10 +193,8 @@ static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
if (!vmx->nested.hv_evmcs)
return;
- kunmap(vmx->nested.hv_evmcs_page);
- kvm_release_page_dirty(vmx->nested.hv_evmcs_page);
+ kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
vmx->nested.hv_evmcs_vmptr = -1ull;
- vmx->nested.hv_evmcs_page = NULL;
vmx->nested.hv_evmcs = NULL;
}
@@ -229,16 +227,9 @@ static void free_nested(struct kvm_vcpu *vcpu)
kvm_release_page_dirty(vmx->nested.apic_access_page);
vmx->nested.apic_access_page = NULL;
}
- if (vmx->nested.virtual_apic_page) {
- kvm_release_page_dirty(vmx->nested.virtual_apic_page);
- vmx->nested.virtual_apic_page = NULL;
- }
- if (vmx->nested.pi_desc_page) {
- kunmap(vmx->nested.pi_desc_page);
- kvm_release_page_dirty(vmx->nested.pi_desc_page);
- vmx->nested.pi_desc_page = NULL;
- vmx->nested.pi_desc = NULL;
- }
+ kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
+ kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
+ vmx->nested.pi_desc = NULL;
kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
@@ -519,39 +510,19 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
int msr;
- struct page *page;
unsigned long *msr_bitmap_l1;
unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
- /*
- * pred_cmd & spec_ctrl are trying to verify two things:
- *
- * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
- * ensures that we do not accidentally generate an L02 MSR bitmap
- * from the L12 MSR bitmap that is too permissive.
- * 2. That L1 or L2s have actually used the MSR. This avoids
- * unnecessarily merging of the bitmap if the MSR is unused. This
- * works properly because we only update the L01 MSR bitmap lazily.
- * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
- * updated to reflect this when L1 (or its L2s) actually write to
- * the MSR.
- */
- bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
- bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
+ struct kvm_host_map *map = &to_vmx(vcpu)->nested.msr_bitmap_map;
/* Nothing to do if the MSR bitmap is not in use. */
if (!cpu_has_vmx_msr_bitmap() ||
!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
return false;
- if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
- !pred_cmd && !spec_ctrl)
- return false;
-
- page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap);
- if (is_error_page(page))
+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map))
return false;
- msr_bitmap_l1 = (unsigned long *)kmap(page);
+ msr_bitmap_l1 = (unsigned long *)map->hva;
/*
* To keep the control flow simple, pay eight 8-byte writes (sixteen
@@ -592,20 +563,42 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
}
}
- if (spec_ctrl)
+ /* KVM unconditionally exposes the FS/GS base MSRs to L1. */
+ nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
+ MSR_FS_BASE, MSR_TYPE_RW);
+
+ nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
+ MSR_GS_BASE, MSR_TYPE_RW);
+
+ nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
+ MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
+
+ /*
+ * Checking the L0->L1 bitmap is trying to verify two things:
+ *
+ * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
+ * ensures that we do not accidentally generate an L02 MSR bitmap
+ * from the L12 MSR bitmap that is too permissive.
+ * 2. That L1 or L2s have actually used the MSR. This avoids
+ * unnecessarily merging of the bitmap if the MSR is unused. This
+ * works properly because we only update the L01 MSR bitmap lazily.
+ * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
+ * updated to reflect this when L1 (or its L2s) actually write to
+ * the MSR.
+ */
+ if (!msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL))
nested_vmx_disable_intercept_for_msr(
msr_bitmap_l1, msr_bitmap_l0,
MSR_IA32_SPEC_CTRL,
MSR_TYPE_R | MSR_TYPE_W);
- if (pred_cmd)
+ if (!msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD))
nested_vmx_disable_intercept_for_msr(
msr_bitmap_l1, msr_bitmap_l0,
MSR_IA32_PRED_CMD,
MSR_TYPE_W);
- kunmap(page);
- kvm_release_page_clean(page);
+ kvm_vcpu_unmap(vcpu, &to_vmx(vcpu)->nested.msr_bitmap_map, false);
return true;
}
@@ -613,20 +606,20 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
+ struct kvm_host_map map;
struct vmcs12 *shadow;
- struct page *page;
if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
vmcs12->vmcs_link_pointer == -1ull)
return;
shadow = get_shadow_vmcs12(vcpu);
- page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer);
- memcpy(shadow, kmap(page), VMCS12_SIZE);
+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))
+ return;
- kunmap(page);
- kvm_release_page_clean(page);
+ memcpy(shadow, map.hva, VMCS12_SIZE);
+ kvm_vcpu_unmap(vcpu, &map, false);
}
static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
@@ -930,7 +923,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
if (!nested_cr3_valid(vcpu, cr3)) {
*entry_failure_code = ENTRY_FAIL_DEFAULT;
- return 1;
+ return -EINVAL;
}
/*
@@ -941,7 +934,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
!nested_ept) {
if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) {
*entry_failure_code = ENTRY_FAIL_PDPTE;
- return 1;
+ return -EINVAL;
}
}
}
@@ -1794,13 +1787,11 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
nested_release_evmcs(vcpu);
- vmx->nested.hv_evmcs_page = kvm_vcpu_gpa_to_page(
- vcpu, assist_page.current_nested_vmcs);
-
- if (unlikely(is_error_page(vmx->nested.hv_evmcs_page)))
+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(assist_page.current_nested_vmcs),
+ &vmx->nested.hv_evmcs_map))
return 0;
- vmx->nested.hv_evmcs = kmap(vmx->nested.hv_evmcs_page);
+ vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva;
/*
* Currently, KVM only supports eVMCS version 1
@@ -2373,19 +2364,19 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
*/
if (vmx->emulation_required) {
*entry_failure_code = ENTRY_FAIL_DEFAULT;
- return 1;
+ return -EINVAL;
}
/* Shadow page tables on either EPT or shadow page tables. */
if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
entry_failure_code))
- return 1;
+ return -EINVAL;
if (!enable_ept)
vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
- kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
- kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
+ kvm_rsp_write(vcpu, vmcs12->guest_rsp);
+ kvm_rip_write(vcpu, vmcs12->guest_rip);
return 0;
}
@@ -2589,11 +2580,19 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
return 0;
}
-/*
- * Checks related to Host Control Registers and MSRs
- */
-static int nested_check_host_control_regs(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
+static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
+ nested_check_vm_exit_controls(vcpu, vmcs12) ||
+ nested_check_vm_entry_controls(vcpu, vmcs12))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
{
bool ia32e;
@@ -2606,6 +2605,10 @@ static int nested_check_host_control_regs(struct kvm_vcpu *vcpu,
is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu))
return -EINVAL;
+ if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) &&
+ !kvm_pat_valid(vmcs12->host_ia32_pat))
+ return -EINVAL;
+
/*
* If the load IA32_EFER VM-exit control is 1, bits reserved in the
* IA32_EFER MSR must be 0 in the field for that register. In addition,
@@ -2624,41 +2627,12 @@ static int nested_check_host_control_regs(struct kvm_vcpu *vcpu,
return 0;
}
-/*
- * Checks related to Guest Non-register State
- */
-static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
-{
- if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
- vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
- return -EINVAL;
-
- return 0;
-}
-
-static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
-{
- if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
- nested_check_vm_exit_controls(vcpu, vmcs12) ||
- nested_check_vm_entry_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_check_host_control_regs(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
-
- if (nested_check_guest_non_reg_state(vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- return 0;
-}
-
static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
- int r;
- struct page *page;
+ int r = 0;
struct vmcs12 *shadow;
+ struct kvm_host_map map;
if (vmcs12->vmcs_link_pointer == -1ull)
return 0;
@@ -2666,23 +2640,34 @@ static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
if (!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))
return -EINVAL;
- page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer);
- if (is_error_page(page))
+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))
return -EINVAL;
- r = 0;
- shadow = kmap(page);
+ shadow = map.hva;
+
if (shadow->hdr.revision_id != VMCS12_REVISION ||
shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))
r = -EINVAL;
- kunmap(page);
- kvm_release_page_clean(page);
+
+ kvm_vcpu_unmap(vcpu, &map, false);
return r;
}
-static int nested_vmx_check_vmentry_postreqs(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12,
- u32 *exit_qual)
+/*
+ * Checks related to Guest Non-register State
+ */
+static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
+{
+ if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
+ vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12,
+ u32 *exit_qual)
{
bool ia32e;
@@ -2690,11 +2675,15 @@ static int nested_vmx_check_vmentry_postreqs(struct kvm_vcpu *vcpu,
if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))
- return 1;
+ return -EINVAL;
+
+ if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) &&
+ !kvm_pat_valid(vmcs12->guest_ia32_pat))
+ return -EINVAL;
if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
*exit_qual = ENTRY_FAIL_VMCS_LINK_PTR;
- return 1;
+ return -EINVAL;
}
/*
@@ -2713,13 +2702,16 @@ static int nested_vmx_check_vmentry_postreqs(struct kvm_vcpu *vcpu,
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
((vmcs12->guest_cr0 & X86_CR0_PG) &&
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))
- return 1;
+ return -EINVAL;
}
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
- (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
- (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
- return 1;
+ (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
+ (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
+ return -EINVAL;
+
+ if (nested_check_guest_non_reg_state(vmcs12))
+ return -EINVAL;
return 0;
}
@@ -2832,6 +2824,7 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct kvm_host_map *map;
struct page *page;
u64 hpa;
@@ -2864,20 +2857,14 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
}
if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
- if (vmx->nested.virtual_apic_page) { /* shouldn't happen */
- kvm_release_page_dirty(vmx->nested.virtual_apic_page);
- vmx->nested.virtual_apic_page = NULL;
- }
- page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->virtual_apic_page_addr);
+ map = &vmx->nested.virtual_apic_map;
/*
* If translation failed, VM entry will fail because
* prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull.
*/
- if (!is_error_page(page)) {
- vmx->nested.virtual_apic_page = page;
- hpa = page_to_phys(vmx->nested.virtual_apic_page);
- vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa);
+ if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) {
+ vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn));
} else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) &&
nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) &&
!nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
@@ -2898,26 +2885,15 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
}
if (nested_cpu_has_posted_intr(vmcs12)) {
- if (vmx->nested.pi_desc_page) { /* shouldn't happen */
- kunmap(vmx->nested.pi_desc_page);
- kvm_release_page_dirty(vmx->nested.pi_desc_page);
- vmx->nested.pi_desc_page = NULL;
- vmx->nested.pi_desc = NULL;
- vmcs_write64(POSTED_INTR_DESC_ADDR, -1ull);
+ map = &vmx->nested.pi_desc_map;
+
+ if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) {
+ vmx->nested.pi_desc =
+ (struct pi_desc *)(((void *)map->hva) +
+ offset_in_page(vmcs12->posted_intr_desc_addr));
+ vmcs_write64(POSTED_INTR_DESC_ADDR,
+ pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr));
}
- page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr);
- if (is_error_page(page))
- return;
- vmx->nested.pi_desc_page = page;
- vmx->nested.pi_desc = kmap(vmx->nested.pi_desc_page);
- vmx->nested.pi_desc =
- (struct pi_desc *)((void *)vmx->nested.pi_desc +
- (unsigned long)(vmcs12->posted_intr_desc_addr &
- (PAGE_SIZE - 1)));
- vmcs_write64(POSTED_INTR_DESC_ADDR,
- page_to_phys(vmx->nested.pi_desc_page) +
- (unsigned long)(vmcs12->posted_intr_desc_addr &
- (PAGE_SIZE - 1)));
}
if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
@@ -3000,7 +2976,7 @@ int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
return -1;
}
- if (nested_vmx_check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
+ if (nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
goto vmentry_fail_vmexit;
}
@@ -3145,9 +3121,11 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
: VMXERR_VMRESUME_NONLAUNCHED_VMCS);
- ret = nested_vmx_check_vmentry_prereqs(vcpu, vmcs12);
- if (ret)
- return nested_vmx_failValid(vcpu, ret);
+ if (nested_vmx_check_controls(vcpu, vmcs12))
+ return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+
+ if (nested_vmx_check_host_state(vcpu, vmcs12))
+ return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
/*
* We're finally done with prerequisite checking, and can start with
@@ -3310,11 +3288,12 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
if (max_irr != 256) {
- vapic_page = kmap(vmx->nested.virtual_apic_page);
+ vapic_page = vmx->nested.virtual_apic_map.hva;
+ if (!vapic_page)
+ return;
+
__kvm_apic_update_irr(vmx->nested.pi_desc->pir,
vapic_page, &max_irr);
- kunmap(vmx->nested.virtual_apic_page);
-
status = vmcs_read16(GUEST_INTR_STATUS);
if ((u8)max_irr > ((u8)status & 0xff)) {
status &= ~0xff;
@@ -3425,8 +3404,8 @@ static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
- vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
- vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
+ vmcs12->guest_rsp = kvm_rsp_read(vcpu);
+ vmcs12->guest_rip = kvm_rip_read(vcpu);
vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
@@ -3609,8 +3588,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
vmx_set_efer(vcpu, vcpu->arch.efer);
- kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
- kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
+ kvm_rsp_write(vcpu, vmcs12->host_rsp);
+ kvm_rip_write(vcpu, vmcs12->host_rip);
vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
vmx_set_interrupt_shadow(vcpu, 0);
@@ -3955,16 +3934,9 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
kvm_release_page_dirty(vmx->nested.apic_access_page);
vmx->nested.apic_access_page = NULL;
}
- if (vmx->nested.virtual_apic_page) {
- kvm_release_page_dirty(vmx->nested.virtual_apic_page);
- vmx->nested.virtual_apic_page = NULL;
- }
- if (vmx->nested.pi_desc_page) {
- kunmap(vmx->nested.pi_desc_page);
- kvm_release_page_dirty(vmx->nested.pi_desc_page);
- vmx->nested.pi_desc_page = NULL;
- vmx->nested.pi_desc = NULL;
- }
+ kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
+ kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
+ vmx->nested.pi_desc = NULL;
/*
* We are now running in L2, mmu_notifier will force to reload the
@@ -4260,7 +4232,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
{
int ret;
gpa_t vmptr;
- struct page *page;
+ uint32_t revision;
struct vcpu_vmx *vmx = to_vmx(vcpu);
const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
@@ -4306,20 +4278,12 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
* Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
* which replaces physical address width with 32
*/
- if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
- return nested_vmx_failInvalid(vcpu);
-
- page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
- if (is_error_page(page))
+ if (!page_address_valid(vcpu, vmptr))
return nested_vmx_failInvalid(vcpu);
- if (*(u32 *)kmap(page) != VMCS12_REVISION) {
- kunmap(page);
- kvm_release_page_clean(page);
+ if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) ||
+ revision != VMCS12_REVISION)
return nested_vmx_failInvalid(vcpu);
- }
- kunmap(page);
- kvm_release_page_clean(page);
vmx->nested.vmxon_ptr = vmptr;
ret = enter_vmx_operation(vcpu);
@@ -4377,7 +4341,7 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
if (nested_vmx_get_vmptr(vcpu, &vmptr))
return 1;
- if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
+ if (!page_address_valid(vcpu, vmptr))
return nested_vmx_failValid(vcpu,
VMXERR_VMCLEAR_INVALID_ADDRESS);
@@ -4385,7 +4349,7 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
return nested_vmx_failValid(vcpu,
VMXERR_VMCLEAR_VMXON_POINTER);
- if (vmx->nested.hv_evmcs_page) {
+ if (vmx->nested.hv_evmcs_map.hva) {
if (vmptr == vmx->nested.hv_evmcs_vmptr)
nested_release_evmcs(vcpu);
} else {
@@ -4584,7 +4548,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
if (nested_vmx_get_vmptr(vcpu, &vmptr))
return 1;
- if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
+ if (!page_address_valid(vcpu, vmptr))
return nested_vmx_failValid(vcpu,
VMXERR_VMPTRLD_INVALID_ADDRESS);
@@ -4597,11 +4561,10 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
return 1;
if (vmx->nested.current_vmptr != vmptr) {
+ struct kvm_host_map map;
struct vmcs12 *new_vmcs12;
- struct page *page;
- page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
- if (is_error_page(page)) {
+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmptr), &map)) {
/*
* Reads from an unbacked page return all 1s,
* which means that the 32 bits located at the
@@ -4611,12 +4574,13 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
return nested_vmx_failValid(vcpu,
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
}
- new_vmcs12 = kmap(page);
+
+ new_vmcs12 = map.hva;
+
if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
(new_vmcs12->hdr.shadow_vmcs &&
!nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
- kunmap(page);
- kvm_release_page_clean(page);
+ kvm_vcpu_unmap(vcpu, &map, false);
return nested_vmx_failValid(vcpu,
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
}
@@ -4628,8 +4592,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
* cached.
*/
memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
- kunmap(page);
- kvm_release_page_clean(page);
+ kvm_vcpu_unmap(vcpu, &map, false);
set_current_vmptr(vmx, vmptr);
}
@@ -4804,7 +4767,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
- u32 index = vcpu->arch.regs[VCPU_REGS_RCX];
+ u32 index = kvm_rcx_read(vcpu);
u64 address;
bool accessed_dirty;
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
@@ -4850,7 +4813,7 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12;
- u32 function = vcpu->arch.regs[VCPU_REGS_RAX];
+ u32 function = kvm_rax_read(vcpu);
/*
* VMFUNC is only supported for nested guests, but we always enable the
@@ -4936,7 +4899,7 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12, u32 exit_reason)
{
- u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
+ u32 msr_index = kvm_rcx_read(vcpu);
gpa_t bitmap;
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
@@ -5373,9 +5336,6 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (kvm_state->format != 0)
return -EINVAL;
- if (kvm_state->flags & KVM_STATE_NESTED_EVMCS)
- nested_enable_evmcs(vcpu, NULL);
-
if (!nested_vmx_allowed(vcpu))
return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL;
@@ -5417,13 +5377,16 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (kvm_state->vmx.vmxon_pa == -1ull)
return 0;
+ if (kvm_state->flags & KVM_STATE_NESTED_EVMCS)
+ nested_enable_evmcs(vcpu, NULL);
+
vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa;
ret = enter_vmx_operation(vcpu);
if (ret)
return ret;
/* Empty 'VMXON' state is permitted */
- if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12))
+ if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12))
return 0;
if (kvm_state->vmx.vmcs_pa != -1ull) {
@@ -5460,14 +5423,11 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
return 0;
- vmx->nested.nested_run_pending =
- !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
-
if (nested_cpu_has_shadow_vmcs(vmcs12) &&
vmcs12->vmcs_link_pointer != -1ull) {
struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
- if (kvm_state->size < sizeof(kvm_state) + 2 * sizeof(*vmcs12))
+ if (kvm_state->size < sizeof(*kvm_state) + 2 * sizeof(*vmcs12))
return -EINVAL;
if (copy_from_user(shadow_vmcs12,
@@ -5480,14 +5440,20 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
return -EINVAL;
}
- if (nested_vmx_check_vmentry_prereqs(vcpu, vmcs12) ||
- nested_vmx_check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
+ if (nested_vmx_check_controls(vcpu, vmcs12) ||
+ nested_vmx_check_host_state(vcpu, vmcs12) ||
+ nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
return -EINVAL;
vmx->nested.dirty_vmcs12 = true;
+ vmx->nested.nested_run_pending =
+ !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
+
ret = nested_vmx_enter_non_root_mode(vcpu, false);
- if (ret)
+ if (ret) {
+ vmx->nested.nested_run_pending = 0;
return -EINVAL;
+ }
return 0;
}
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 5ab4a364348e..f8502c376b37 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -227,7 +227,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
}
break;
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
- if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
+ if (!(data & pmu->global_ovf_ctrl_mask)) {
if (!msr_info->host_initiated)
pmu->global_status &= ~data;
pmu->global_ovf_ctrl = data;
@@ -297,6 +297,12 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
pmu->global_ctrl_mask = ~pmu->global_ctrl;
+ pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
+ & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
+ MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
+ if (kvm_x86_ops->pt_supported())
+ pmu->global_ovf_ctrl_mask &=
+ ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
entry = kvm_find_cpuid_entry(vcpu, 7, 0);
if (entry &&
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 7b272738c576..d4cb1945b2e3 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -3,6 +3,7 @@
#include <asm/asm.h>
#include <asm/bitsperlong.h>
#include <asm/kvm_vcpu_regs.h>
+#include <asm/nospec-branch.h>
#define WORD_SIZE (BITS_PER_LONG / 8)
@@ -77,6 +78,17 @@ ENDPROC(vmx_vmenter)
* referred to by VMCS.HOST_RIP.
*/
ENTRY(vmx_vmexit)
+#ifdef CONFIG_RETPOLINE
+ ALTERNATIVE "jmp .Lvmexit_skip_rsb", "", X86_FEATURE_RETPOLINE
+ /* Preserve guest's RAX, it's used to stuff the RSB. */
+ push %_ASM_AX
+
+ /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
+ FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
+
+ pop %_ASM_AX
+.Lvmexit_skip_rsb:
+#endif
ret
ENDPROC(vmx_vmexit)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index b4e7d645275a..1ac167614032 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1692,6 +1692,9 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_SYSENTER_ESP:
msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
break;
+ case MSR_IA32_POWER_CTL:
+ msr_info->data = vmx->msr_ia32_power_ctl;
+ break;
case MSR_IA32_BNDCFGS:
if (!kvm_mpx_supported() ||
(!msr_info->host_initiated &&
@@ -1822,6 +1825,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_SYSENTER_ESP:
vmcs_writel(GUEST_SYSENTER_ESP, data);
break;
+ case MSR_IA32_POWER_CTL:
+ vmx->msr_ia32_power_ctl = data;
+ break;
case MSR_IA32_BNDCFGS:
if (!kvm_mpx_supported() ||
(!msr_info->host_initiated &&
@@ -1891,7 +1897,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_CR_PAT:
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
- if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
+ if (!kvm_pat_valid(data))
return 1;
vmcs_write64(GUEST_IA32_PAT, data);
vcpu->arch.pat = data;
@@ -2288,7 +2294,6 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
#endif
opt = VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
- VM_EXIT_SAVE_IA32_PAT |
VM_EXIT_LOAD_IA32_PAT |
VM_EXIT_LOAD_IA32_EFER |
VM_EXIT_CLEAR_BNDCFGS |
@@ -3619,14 +3624,13 @@ static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
if (WARN_ON_ONCE(!is_guest_mode(vcpu)) ||
!nested_cpu_has_vid(get_vmcs12(vcpu)) ||
- WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
+ WARN_ON_ONCE(!vmx->nested.virtual_apic_map.gfn))
return false;
rvi = vmx_get_rvi();
- vapic_page = kmap(vmx->nested.virtual_apic_page);
+ vapic_page = vmx->nested.virtual_apic_map.hva;
vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
- kunmap(vmx->nested.virtual_apic_page);
return ((rvi & 0xf0) > (vppr & 0xf0));
}
@@ -4827,7 +4831,7 @@ static int handle_cpuid(struct kvm_vcpu *vcpu)
static int handle_rdmsr(struct kvm_vcpu *vcpu)
{
- u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
+ u32 ecx = kvm_rcx_read(vcpu);
struct msr_data msr_info;
msr_info.index = ecx;
@@ -4840,18 +4844,16 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu)
trace_kvm_msr_read(ecx, msr_info.data);
- /* FIXME: handling of bits 32:63 of rax, rdx */
- vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u;
- vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u;
+ kvm_rax_write(vcpu, msr_info.data & -1u);
+ kvm_rdx_write(vcpu, (msr_info.data >> 32) & -1u);
return kvm_skip_emulated_instruction(vcpu);
}
static int handle_wrmsr(struct kvm_vcpu *vcpu)
{
struct msr_data msr;
- u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
- u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
- | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
+ u32 ecx = kvm_rcx_read(vcpu);
+ u64 data = kvm_read_edx_eax(vcpu);
msr.data = data;
msr.index = ecx;
@@ -4922,7 +4924,7 @@ static int handle_wbinvd(struct kvm_vcpu *vcpu)
static int handle_xsetbv(struct kvm_vcpu *vcpu)
{
u64 new_bv = kvm_read_edx_eax(vcpu);
- u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
+ u32 index = kvm_rcx_read(vcpu);
if (kvm_set_xcr(vcpu, index, new_bv) == 0)
return kvm_skip_emulated_instruction(vcpu);
@@ -5723,8 +5725,16 @@ void dump_vmcs(void)
if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
pr_err("TSC Multiplier = 0x%016llx\n",
vmcs_read64(TSC_MULTIPLIER));
- if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW)
- pr_err("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD));
+ if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW) {
+ if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) {
+ u16 status = vmcs_read16(GUEST_INTR_STATUS);
+ pr_err("SVI|RVI = %02x|%02x ", status >> 8, status & 0xff);
+ }
+ pr_cont("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD));
+ if (secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
+ pr_err("APIC-access addr = 0x%016llx ", vmcs_read64(APIC_ACCESS_ADDR));
+ pr_cont("virt-APIC addr = 0x%016llx\n", vmcs_read64(VIRTUAL_APIC_PAGE_ADDR));
+ }
if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR)
pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV));
if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
@@ -6431,8 +6441,11 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
*/
x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
+ /* L1D Flush includes CPU buffer clear to mitigate MDS */
if (static_branch_unlikely(&vmx_l1d_should_flush))
vmx_l1d_flush(vcpu);
+ else if (static_branch_unlikely(&mds_user_clear))
+ mds_clear_cpu_buffers();
if (vcpu->arch.cr2 != read_cr2())
write_cr2(vcpu->arch.cr2);
@@ -6462,9 +6475,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
- /* Eliminate branch target predictions from guest mode */
- vmexit_fill_RSB();
-
/* All fields are clean at this point */
if (static_branch_unlikely(&enable_evmcs))
current_evmcs->hv_clean_fields |=
@@ -6503,7 +6513,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
*/
if (static_cpu_has(X86_FEATURE_PKU) &&
kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
- vcpu->arch.pkru = __read_pkru();
+ vcpu->arch.pkru = rdpkru();
if (vcpu->arch.pkru != vmx->host_pkru)
__write_pkru(vmx->host_pkru);
}
@@ -6671,8 +6681,8 @@ free_partial_vcpu:
return ERR_PTR(err);
}
-#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
-#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
+#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
+#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
static int vmx_vm_init(struct kvm *kvm)
{
@@ -6856,30 +6866,6 @@ static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
}
}
-static bool guest_cpuid_has_pmu(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *entry;
- union cpuid10_eax eax;
-
- entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
- if (!entry)
- return false;
-
- eax.full = entry->eax;
- return (eax.split.version_id > 0);
-}
-
-static void nested_vmx_procbased_ctls_update(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- bool pmu_enabled = guest_cpuid_has_pmu(vcpu);
-
- if (pmu_enabled)
- vmx->nested.msrs.procbased_ctls_high |= CPU_BASED_RDPMC_EXITING;
- else
- vmx->nested.msrs.procbased_ctls_high &= ~CPU_BASED_RDPMC_EXITING;
-}
-
static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6968,7 +6954,6 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
if (nested_vmx_allowed(vcpu)) {
nested_vmx_cr_fixed1_bits_update(vcpu);
nested_vmx_entry_exit_ctls_update(vcpu);
- nested_vmx_procbased_ctls_update(vcpu);
}
if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&
@@ -7028,10 +7013,12 @@ static inline int u64_shl_div_u64(u64 a, unsigned int shift,
return 0;
}
-static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
+static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
+ bool *expired)
{
struct vcpu_vmx *vmx;
u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles;
+ struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer;
if (kvm_mwait_in_guest(vcpu->kvm))
return -EOPNOTSUPP;
@@ -7040,7 +7027,8 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
tscl = rdtsc();
guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl;
- lapic_timer_advance_cycles = nsec_to_cycles(vcpu, lapic_timer_advance_ns);
+ lapic_timer_advance_cycles = nsec_to_cycles(vcpu,
+ ktimer->timer_advance_ns);
if (delta_tsc > lapic_timer_advance_cycles)
delta_tsc -= lapic_timer_advance_cycles;
@@ -7049,10 +7037,9 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
/* Convert to host delta tsc if tsc scaling is enabled */
if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
- u64_shl_div_u64(delta_tsc,
+ delta_tsc && u64_shl_div_u64(delta_tsc,
kvm_tsc_scaling_ratio_frac_bits,
- vcpu->arch.tsc_scaling_ratio,
- &delta_tsc))
+ vcpu->arch.tsc_scaling_ratio, &delta_tsc))
return -ERANGE;
/*
@@ -7065,7 +7052,8 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
return -ERANGE;
vmx->hv_deadline_tsc = tscl + delta_tsc;
- return delta_tsc == 0;
+ *expired = !delta_tsc;
+ return 0;
}
static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
@@ -7102,9 +7090,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
{
struct vmcs12 *vmcs12;
struct vcpu_vmx *vmx = to_vmx(vcpu);
- gpa_t gpa;
- struct page *page = NULL;
- u64 *pml_address;
+ gpa_t gpa, dst;
if (is_guest_mode(vcpu)) {
WARN_ON_ONCE(vmx->nested.pml_full);
@@ -7124,15 +7110,13 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
}
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
+ dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
- page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->pml_address);
- if (is_error_page(page))
+ if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
+ offset_in_page(dst), sizeof(gpa)))
return 0;
- pml_address = kmap(page);
- pml_address[vmcs12->guest_pml_index--] = gpa;
- kunmap(page);
- kvm_release_page_clean(page);
+ vmcs12->guest_pml_index--;
}
return 0;
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index f879529906b4..63d37ccce3dc 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -142,8 +142,11 @@ struct nested_vmx {
* pointers, so we must keep them pinned while L2 runs.
*/
struct page *apic_access_page;
- struct page *virtual_apic_page;
- struct page *pi_desc_page;
+ struct kvm_host_map virtual_apic_map;
+ struct kvm_host_map pi_desc_map;
+
+ struct kvm_host_map msr_bitmap_map;
+
struct pi_desc *pi_desc;
bool pi_pending;
u16 posted_intr_nv;
@@ -169,7 +172,7 @@ struct nested_vmx {
} smm;
gpa_t hv_evmcs_vmptr;
- struct page *hv_evmcs_page;
+ struct kvm_host_map hv_evmcs_map;
struct hv_enlightened_vmcs *hv_evmcs;
};
@@ -257,6 +260,8 @@ struct vcpu_vmx {
unsigned long host_debugctlmsr;
+ u64 msr_ia32_power_ctl;
+
/*
* Only bits masked by msr_ia32_feature_control_valid_bits can be set in
* msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a0d1fc80ac5a..536b78c4af6e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -136,10 +136,14 @@ EXPORT_SYMBOL_GPL(kvm_default_tsc_scaling_ratio);
static u32 __read_mostly tsc_tolerance_ppm = 250;
module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
-/* lapic timer advance (tscdeadline mode only) in nanoseconds */
-unsigned int __read_mostly lapic_timer_advance_ns = 1000;
+/*
+ * lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables
+ * adaptive tuning starting from default advancment of 1000ns. '0' disables
+ * advancement entirely. Any other value is used as-is and disables adaptive
+ * tuning, i.e. allows priveleged userspace to set an exact advancement time.
+ */
+static int __read_mostly lapic_timer_advance_ns = -1;
module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
-EXPORT_SYMBOL_GPL(lapic_timer_advance_ns);
static bool __read_mostly vector_hashing = true;
module_param(vector_hashing, bool, S_IRUGO);
@@ -1096,15 +1100,15 @@ EXPORT_SYMBOL_GPL(kvm_get_dr);
bool kvm_rdpmc(struct kvm_vcpu *vcpu)
{
- u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
+ u32 ecx = kvm_rcx_read(vcpu);
u64 data;
int err;
err = kvm_pmu_rdpmc(vcpu, ecx, &data);
if (err)
return err;
- kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
- kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
+ kvm_rax_write(vcpu, (u32)data);
+ kvm_rdx_write(vcpu, data >> 32);
return err;
}
EXPORT_SYMBOL_GPL(kvm_rdpmc);
@@ -1170,6 +1174,9 @@ static u32 emulated_msrs[] = {
MSR_PLATFORM_INFO,
MSR_MISC_FEATURES_ENABLES,
MSR_AMD64_VIRT_SPEC_CTRL,
+ MSR_IA32_POWER_CTL,
+
+ MSR_K7_HWCR,
};
static unsigned num_emulated_msrs;
@@ -1258,31 +1265,49 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
return 0;
}
-bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
+static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
{
- if (efer & efer_reserved_bits)
- return false;
-
if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
- return false;
+ return false;
if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
- return false;
+ return false;
+
+ if (efer & (EFER_LME | EFER_LMA) &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_LM))
+ return false;
+
+ if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX))
+ return false;
return true;
+
+}
+bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
+{
+ if (efer & efer_reserved_bits)
+ return false;
+
+ return __kvm_valid_efer(vcpu, efer);
}
EXPORT_SYMBOL_GPL(kvm_valid_efer);
-static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
+static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
u64 old_efer = vcpu->arch.efer;
+ u64 efer = msr_info->data;
- if (!kvm_valid_efer(vcpu, efer))
- return 1;
+ if (efer & efer_reserved_bits)
+ return false;
- if (is_paging(vcpu)
- && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
- return 1;
+ if (!msr_info->host_initiated) {
+ if (!__kvm_valid_efer(vcpu, efer))
+ return 1;
+
+ if (is_paging(vcpu) &&
+ (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
+ return 1;
+ }
efer &= ~EFER_LMA;
efer |= vcpu->arch.efer & EFER_LMA;
@@ -2275,6 +2300,18 @@ static void kvmclock_sync_fn(struct work_struct *work)
KVMCLOCK_SYNC_PERIOD);
}
+/*
+ * On AMD, HWCR[McStatusWrEn] controls whether setting MCi_STATUS results in #GP.
+ */
+static bool can_set_mci_status(struct kvm_vcpu *vcpu)
+{
+ /* McStatusWrEn enabled? */
+ if (guest_cpuid_is_amd(vcpu))
+ return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
+
+ return false;
+}
+
static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
u64 mcg_cap = vcpu->arch.mcg_cap;
@@ -2306,9 +2343,14 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if ((offset & 0x3) == 0 &&
data != 0 && (data | (1 << 10)) != ~(u64)0)
return -1;
+
+ /* MCi_STATUS */
if (!msr_info->host_initiated &&
- (offset & 0x3) == 1 && data != 0)
- return -1;
+ (offset & 0x3) == 1 && data != 0) {
+ if (!can_set_mci_status(vcpu))
+ return -1;
+ }
+
vcpu->arch.mce_banks[offset] = data;
break;
}
@@ -2452,13 +2494,16 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu->arch.arch_capabilities = data;
break;
case MSR_EFER:
- return set_efer(vcpu, data);
+ return set_efer(vcpu, msr_info);
case MSR_K7_HWCR:
data &= ~(u64)0x40; /* ignore flush filter disable */
data &= ~(u64)0x100; /* ignore ignne emulation enable */
data &= ~(u64)0x8; /* ignore TLB cache disable */
- data &= ~(u64)0x40000; /* ignore Mc status write enable */
- if (data != 0) {
+
+ /* Handle McStatusWrEn */
+ if (data == BIT_ULL(18)) {
+ vcpu->arch.msr_hwcr = data;
+ } else if (data != 0) {
vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
data);
return 1;
@@ -2732,7 +2777,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_K8_SYSCFG:
case MSR_K8_TSEG_ADDR:
case MSR_K8_TSEG_MASK:
- case MSR_K7_HWCR:
case MSR_VM_HSAVE_PA:
case MSR_K8_INT_PENDING_MSG:
case MSR_AMD64_NB_CFG:
@@ -2896,6 +2940,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_MISC_FEATURES_ENABLES:
msr_info->data = vcpu->arch.msr_misc_features_enables;
break;
+ case MSR_K7_HWCR:
+ msr_info->data = vcpu->arch.msr_hwcr;
+ break;
default:
if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
@@ -3075,9 +3122,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
- case KVM_CAP_NR_MEMSLOTS:
- r = KVM_USER_MEM_SLOTS;
- break;
case KVM_CAP_PV_MMU: /* obsolete */
r = 0;
break;
@@ -3677,15 +3721,15 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
*/
valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
while (valid) {
- u64 feature = valid & -valid;
- int index = fls64(feature) - 1;
- void *src = get_xsave_addr(xsave, feature);
+ u64 xfeature_mask = valid & -valid;
+ int xfeature_nr = fls64(xfeature_mask) - 1;
+ void *src = get_xsave_addr(xsave, xfeature_nr);
if (src) {
u32 size, offset, ecx, edx;
- cpuid_count(XSTATE_CPUID, index,
+ cpuid_count(XSTATE_CPUID, xfeature_nr,
&size, &offset, &ecx, &edx);
- if (feature == XFEATURE_MASK_PKRU)
+ if (xfeature_nr == XFEATURE_PKRU)
memcpy(dest + offset, &vcpu->arch.pkru,
sizeof(vcpu->arch.pkru));
else
@@ -3693,7 +3737,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
}
- valid -= feature;
+ valid -= xfeature_mask;
}
}
@@ -3720,22 +3764,22 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
*/
valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
while (valid) {
- u64 feature = valid & -valid;
- int index = fls64(feature) - 1;
- void *dest = get_xsave_addr(xsave, feature);
+ u64 xfeature_mask = valid & -valid;
+ int xfeature_nr = fls64(xfeature_mask) - 1;
+ void *dest = get_xsave_addr(xsave, xfeature_nr);
if (dest) {
u32 size, offset, ecx, edx;
- cpuid_count(XSTATE_CPUID, index,
+ cpuid_count(XSTATE_CPUID, xfeature_nr,
&size, &offset, &ecx, &edx);
- if (feature == XFEATURE_MASK_PKRU)
+ if (xfeature_nr == XFEATURE_PKRU)
memcpy(&vcpu->arch.pkru, src + offset,
sizeof(vcpu->arch.pkru));
else
memcpy(dest, src + offset, size);
}
- valid -= feature;
+ valid -= xfeature_mask;
}
}
@@ -5517,9 +5561,9 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
unsigned int bytes,
struct x86_exception *exception)
{
+ struct kvm_host_map map;
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
gpa_t gpa;
- struct page *page;
char *kaddr;
bool exchanged;
@@ -5536,12 +5580,11 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
goto emul_write;
- page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
- if (is_error_page(page))
+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(gpa), &map))
goto emul_write;
- kaddr = kmap_atomic(page);
- kaddr += offset_in_page(gpa);
+ kaddr = map.hva + offset_in_page(gpa);
+
switch (bytes) {
case 1:
exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
@@ -5558,13 +5601,12 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
default:
BUG();
}
- kunmap_atomic(kaddr);
- kvm_release_page_dirty(page);
+
+ kvm_vcpu_unmap(vcpu, &map, true);
if (!exchanged)
return X86EMUL_CMPXCHG_FAILED;
- kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
kvm_page_track_write(vcpu, gpa, new, bytes);
return X86EMUL_CONTINUE;
@@ -6535,6 +6577,12 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
}
EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
+static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.pio.count = 0;
+ return 1;
+}
+
static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
{
vcpu->arch.pio.count = 0;
@@ -6548,15 +6596,26 @@ static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
unsigned short port)
{
- unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ unsigned long val = kvm_rax_read(vcpu);
int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
size, port, &val, 1);
+ if (ret)
+ return ret;
- if (!ret) {
+ /*
+ * Workaround userspace that relies on old KVM behavior of %rip being
+ * incremented prior to exiting to userspace to handle "OUT 0x7e".
+ */
+ if (port == 0x7e &&
+ kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) {
+ vcpu->arch.complete_userspace_io =
+ complete_fast_pio_out_port_0x7e;
+ kvm_skip_emulated_instruction(vcpu);
+ } else {
vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
vcpu->arch.complete_userspace_io = complete_fast_pio_out;
}
- return ret;
+ return 0;
}
static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
@@ -6572,8 +6631,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
}
/* For size less than 4 we merge, else we zero extend */
- val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX)
- : 0;
+ val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0;
/*
* Since vcpu->arch.pio.count == 1 let emulator_pio_in_emulated perform
@@ -6581,7 +6639,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
*/
emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, vcpu->arch.pio.size,
vcpu->arch.pio.port, &val, 1);
- kvm_register_write(vcpu, VCPU_REGS_RAX, val);
+ kvm_rax_write(vcpu, val);
return kvm_skip_emulated_instruction(vcpu);
}
@@ -6593,12 +6651,12 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
int ret;
/* For size less than 4 we merge, else we zero extend */
- val = (size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX) : 0;
+ val = (size < 4) ? kvm_rax_read(vcpu) : 0;
ret = emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, size, port,
&val, 1);
if (ret) {
- kvm_register_write(vcpu, VCPU_REGS_RAX, val);
+ kvm_rax_write(vcpu, val);
return ret;
}
@@ -6677,10 +6735,8 @@ static void kvm_hyperv_tsc_notifier(void)
}
#endif
-static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
- void *data)
+static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu)
{
- struct cpufreq_freqs *freq = data;
struct kvm *kvm;
struct kvm_vcpu *vcpu;
int i, send_ipi = 0;
@@ -6724,17 +6780,12 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
*
*/
- if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
- return 0;
- if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
- return 0;
-
- smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
+ smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) {
- if (vcpu->cpu != freq->cpu)
+ if (vcpu->cpu != cpu)
continue;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
if (vcpu->cpu != smp_processor_id())
@@ -6756,8 +6807,24 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
* guest context is entered kvmclock will be updated,
* so the guest will not see stale values.
*/
- smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
+ smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
}
+}
+
+static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ int cpu;
+
+ if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
+ return 0;
+ if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
+ return 0;
+
+ for_each_cpu(cpu, freq->policy->cpus)
+ __kvmclock_cpufreq_notifier(freq, cpu);
+
return 0;
}
@@ -6824,10 +6891,20 @@ static unsigned long kvm_get_guest_ip(void)
return ip;
}
+static void kvm_handle_intel_pt_intr(void)
+{
+ struct kvm_vcpu *vcpu = __this_cpu_read(current_vcpu);
+
+ kvm_make_request(KVM_REQ_PMI, vcpu);
+ __set_bit(MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT,
+ (unsigned long *)&vcpu->arch.pmu.global_status);
+}
+
static struct perf_guest_info_callbacks kvm_guest_cbs = {
.is_in_guest = kvm_is_in_guest,
.is_user_mode = kvm_is_user_mode,
.get_guest_ip = kvm_get_guest_ip,
+ .handle_intel_pt_intr = kvm_handle_intel_pt_intr,
};
static void kvm_set_mmio_spte_mask(void)
@@ -7103,11 +7180,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
if (kvm_hv_hypercall_enabled(vcpu->kvm))
return kvm_hv_hypercall(vcpu);
- nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
- a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
- a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
- a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
- a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
+ nr = kvm_rax_read(vcpu);
+ a0 = kvm_rbx_read(vcpu);
+ a1 = kvm_rcx_read(vcpu);
+ a2 = kvm_rdx_read(vcpu);
+ a3 = kvm_rsi_read(vcpu);
trace_kvm_hypercall(nr, a0, a1, a2, a3);
@@ -7148,7 +7225,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
out:
if (!op_64_bit)
ret = (u32)ret;
- kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
+ kvm_rax_write(vcpu, ret);
++vcpu->stat.hypercalls;
return kvm_skip_emulated_instruction(vcpu);
@@ -7873,10 +7950,15 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
}
trace_kvm_entry(vcpu->vcpu_id);
- if (lapic_timer_advance_ns)
+ if (lapic_in_kernel(vcpu) &&
+ vcpu->arch.apic->lapic_timer.timer_advance_ns)
wait_lapic_expire(vcpu);
guest_enter_irqoff();
+ fpregs_assert_state_consistent();
+ if (test_thread_flag(TIF_NEED_FPU_LOAD))
+ switch_fpu_return();
+
if (unlikely(vcpu->arch.switch_db_regs)) {
set_debugreg(0, 7);
set_debugreg(vcpu->arch.eff_db[0], 0);
@@ -8135,22 +8217,30 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
/* Swap (qemu) user FPU context for the guest FPU context. */
static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
{
- preempt_disable();
+ fpregs_lock();
+
copy_fpregs_to_fpstate(&current->thread.fpu);
/* PKRU is separately restored in kvm_x86_ops->run. */
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state,
~XFEATURE_MASK_PKRU);
- preempt_enable();
+
+ fpregs_mark_activate();
+ fpregs_unlock();
+
trace_kvm_fpu(1);
}
/* When vcpu_run ends, restore user space FPU context. */
static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
{
- preempt_disable();
+ fpregs_lock();
+
copy_fpregs_to_fpstate(vcpu->arch.guest_fpu);
copy_kernel_to_fpregs(&current->thread.fpu.state);
- preempt_enable();
+
+ fpregs_mark_activate();
+ fpregs_unlock();
+
++vcpu->stat.fpu_reload;
trace_kvm_fpu(0);
}
@@ -8237,23 +8327,23 @@ static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
}
- regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
- regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
- regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
- regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
- regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
- regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
- regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
- regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
+ regs->rax = kvm_rax_read(vcpu);
+ regs->rbx = kvm_rbx_read(vcpu);
+ regs->rcx = kvm_rcx_read(vcpu);
+ regs->rdx = kvm_rdx_read(vcpu);
+ regs->rsi = kvm_rsi_read(vcpu);
+ regs->rdi = kvm_rdi_read(vcpu);
+ regs->rsp = kvm_rsp_read(vcpu);
+ regs->rbp = kvm_rbp_read(vcpu);
#ifdef CONFIG_X86_64
- regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
- regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
- regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
- regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
- regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
- regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
- regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
- regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
+ regs->r8 = kvm_r8_read(vcpu);
+ regs->r9 = kvm_r9_read(vcpu);
+ regs->r10 = kvm_r10_read(vcpu);
+ regs->r11 = kvm_r11_read(vcpu);
+ regs->r12 = kvm_r12_read(vcpu);
+ regs->r13 = kvm_r13_read(vcpu);
+ regs->r14 = kvm_r14_read(vcpu);
+ regs->r15 = kvm_r15_read(vcpu);
#endif
regs->rip = kvm_rip_read(vcpu);
@@ -8273,23 +8363,23 @@ static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
- kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
- kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
- kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
- kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
- kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
- kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
- kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
- kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
+ kvm_rax_write(vcpu, regs->rax);
+ kvm_rbx_write(vcpu, regs->rbx);
+ kvm_rcx_write(vcpu, regs->rcx);
+ kvm_rdx_write(vcpu, regs->rdx);
+ kvm_rsi_write(vcpu, regs->rsi);
+ kvm_rdi_write(vcpu, regs->rdi);
+ kvm_rsp_write(vcpu, regs->rsp);
+ kvm_rbp_write(vcpu, regs->rbp);
#ifdef CONFIG_X86_64
- kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
- kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
- kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
- kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
- kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
- kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
- kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
- kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
+ kvm_r8_write(vcpu, regs->r8);
+ kvm_r9_write(vcpu, regs->r9);
+ kvm_r10_write(vcpu, regs->r10);
+ kvm_r11_write(vcpu, regs->r11);
+ kvm_r12_write(vcpu, regs->r12);
+ kvm_r13_write(vcpu, regs->r13);
+ kvm_r14_write(vcpu, regs->r14);
+ kvm_r15_write(vcpu, regs->r15);
#endif
kvm_rip_write(vcpu, regs->rip);
@@ -8848,11 +8938,11 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
if (init_event)
kvm_put_guest_fpu(vcpu);
mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave,
- XFEATURE_MASK_BNDREGS);
+ XFEATURE_BNDREGS);
if (mpx_state_buffer)
memset(mpx_state_buffer, 0, sizeof(struct mpx_bndreg_state));
mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave,
- XFEATURE_MASK_BNDCSR);
+ XFEATURE_BNDCSR);
if (mpx_state_buffer)
memset(mpx_state_buffer, 0, sizeof(struct mpx_bndcsr));
if (init_event)
@@ -9061,7 +9151,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
if (irqchip_in_kernel(vcpu->kvm)) {
vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu);
- r = kvm_create_lapic(vcpu);
+ r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
if (r < 0)
goto fail_mmu_destroy;
} else
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index aedc5d0d4989..a470ff0868c5 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -294,8 +294,6 @@ extern u64 kvm_supported_xcr0(void);
extern unsigned int min_timer_period_us;
-extern unsigned int lapic_timer_advance_ns;
-
extern bool enable_vmware_backdoor;
extern struct static_key kvm_no_apic_vcpu;
@@ -347,6 +345,16 @@ static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
__this_cpu_write(current_vcpu, NULL);
}
+
+static inline bool kvm_pat_valid(u64 data)
+{
+ if (data & 0xF8F8F8F8F8F8F8F8ull)
+ return false;
+ /* 0, 1, 4, 5, 6, 7 are valid values. */
+ return (data | ((data & 0x0202020202020202ull) << 1)) == data;
+}
+
void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
+
#endif
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 140e61843a07..5246db42de45 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -6,6 +6,18 @@
# Produces uninteresting flaky coverage.
KCOV_INSTRUMENT_delay.o := n
+# Early boot use of cmdline; don't instrument it
+ifdef CONFIG_AMD_MEM_ENCRYPT
+KCOV_INSTRUMENT_cmdline.o := n
+KASAN_SANITIZE_cmdline.o := n
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_cmdline.o = -pg
+endif
+
+CFLAGS_cmdline.o := $(call cc-option, -fno-stack-protector)
+endif
+
inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk
inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt
quiet_cmd_inat_tables = GEN $@
@@ -23,7 +35,6 @@ obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
lib-y := delay.o misc.o cmdline.o cpu.o
lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
lib-y += memcpy_$(BITS).o
-lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index db4e5aa0858b..b2f1822084ae 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -16,6 +16,30 @@
#include <asm/smap.h>
#include <asm/export.h>
+.macro ALIGN_DESTINATION
+ /* check for bad alignment of destination */
+ movl %edi,%ecx
+ andl $7,%ecx
+ jz 102f /* already aligned */
+ subl $8,%ecx
+ negl %ecx
+ subl %ecx,%edx
+100: movb (%rsi),%al
+101: movb %al,(%rdi)
+ incq %rsi
+ incq %rdi
+ decl %ecx
+ jnz 100b
+102:
+ .section .fixup,"ax"
+103: addl %ecx,%edx /* ecx is zerorest also */
+ jmp copy_user_handle_tail
+ .previous
+
+ _ASM_EXTABLE_UA(100b, 103b)
+ _ASM_EXTABLE_UA(101b, 103b)
+ .endm
+
/*
* copy_user_generic_unrolled - memory copy with exception handling.
* This version is for CPUs like P4 that don't have efficient micro
@@ -194,6 +218,30 @@ ENDPROC(copy_user_enhanced_fast_string)
EXPORT_SYMBOL(copy_user_enhanced_fast_string)
/*
+ * Try to copy last bytes and clear the rest if needed.
+ * Since protection fault in copy_from/to_user is not a normal situation,
+ * it is not necessary to optimize tail handling.
+ *
+ * Input:
+ * rdi destination
+ * rsi source
+ * rdx count
+ *
+ * Output:
+ * eax uncopied bytes or 0 if successful.
+ */
+ALIGN;
+copy_user_handle_tail:
+ movl %edx,%ecx
+1: rep movsb
+2: mov %ecx,%eax
+ ASM_CLAC
+ ret
+
+ _ASM_EXTABLE_UA(1b, 2b)
+ENDPROC(copy_user_handle_tail)
+
+/*
* copy_user_nocache - Uncached memory copy with exception handling
* This will force destination out of cache for more performance.
*
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index f5b7f1b3b6d7..b7375dc6898f 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -162,7 +162,7 @@ void __delay(unsigned long loops)
}
EXPORT_SYMBOL(__delay);
-void __const_udelay(unsigned long xloops)
+noinline void __const_udelay(unsigned long xloops)
{
unsigned long lpj = this_cpu_read(cpu_info.loops_per_jiffy) ? : loops_per_jiffy;
int d0;
diff --git a/arch/x86/lib/error-inject.c b/arch/x86/lib/error-inject.c
index 3cdf06128d13..be5b5fb1598b 100644
--- a/arch/x86/lib/error-inject.c
+++ b/arch/x86/lib/error-inject.c
@@ -6,6 +6,7 @@
asmlinkage void just_return_func(void);
asm(
+ ".text\n"
".type just_return_func, @function\n"
".globl just_return_func\n"
"just_return_func:\n"
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 3b24dc05251c..9d05572370ed 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -257,6 +257,7 @@ ENTRY(__memcpy_mcsafe)
/* Copy successful. Return zero */
.L_done_memcpy_trap:
xorl %eax, %eax
+.L_done:
ret
ENDPROC(__memcpy_mcsafe)
EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
@@ -273,7 +274,7 @@ EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
addl %edx, %ecx
.E_trailing_bytes:
mov %ecx, %eax
- ret
+ jmp .L_done
/*
* For write fault handling, given the destination is unaligned,
diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
deleted file mode 100644
index dc2ab6ea6768..000000000000
--- a/arch/x86/lib/rwsem.S
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * x86 semaphore implementation.
- *
- * (C) Copyright 1999 Linus Torvalds
- *
- * Portions Copyright 1999 Red Hat, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
- */
-
-#include <linux/linkage.h>
-#include <asm/alternative-asm.h>
-#include <asm/frame.h>
-
-#define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg)
-#define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l)
-
-#ifdef CONFIG_X86_32
-
-/*
- * The semaphore operations have a special calling sequence that
- * allow us to do a simpler in-line version of them. These routines
- * need to convert that sequence back into the C sequence when
- * there is contention on the semaphore.
- *
- * %eax contains the semaphore pointer on entry. Save the C-clobbered
- * registers (%eax, %edx and %ecx) except %eax which is either a return
- * value or just gets clobbered. Same is true for %edx so make sure GCC
- * reloads it after the slow path, by making it hold a temporary, for
- * example see ____down_write().
- */
-
-#define save_common_regs \
- pushl %ecx
-
-#define restore_common_regs \
- popl %ecx
-
- /* Avoid uglifying the argument copying x86-64 needs to do. */
- .macro movq src, dst
- .endm
-
-#else
-
-/*
- * x86-64 rwsem wrappers
- *
- * This interfaces the inline asm code to the slow-path
- * C routines. We need to save the call-clobbered regs
- * that the asm does not mark as clobbered, and move the
- * argument from %rax to %rdi.
- *
- * NOTE! We don't need to save %rax, because the functions
- * will always return the semaphore pointer in %rax (which
- * is also the input argument to these helpers)
- *
- * The following can clobber %rdx because the asm clobbers it:
- * call_rwsem_down_write_failed
- * call_rwsem_wake
- * but %rdi, %rsi, %rcx, %r8-r11 always need saving.
- */
-
-#define save_common_regs \
- pushq %rdi; \
- pushq %rsi; \
- pushq %rcx; \
- pushq %r8; \
- pushq %r9; \
- pushq %r10; \
- pushq %r11
-
-#define restore_common_regs \
- popq %r11; \
- popq %r10; \
- popq %r9; \
- popq %r8; \
- popq %rcx; \
- popq %rsi; \
- popq %rdi
-
-#endif
-
-/* Fix up special calling conventions */
-ENTRY(call_rwsem_down_read_failed)
- FRAME_BEGIN
- save_common_regs
- __ASM_SIZE(push,) %__ASM_REG(dx)
- movq %rax,%rdi
- call rwsem_down_read_failed
- __ASM_SIZE(pop,) %__ASM_REG(dx)
- restore_common_regs
- FRAME_END
- ret
-ENDPROC(call_rwsem_down_read_failed)
-
-ENTRY(call_rwsem_down_read_failed_killable)
- FRAME_BEGIN
- save_common_regs
- __ASM_SIZE(push,) %__ASM_REG(dx)
- movq %rax,%rdi
- call rwsem_down_read_failed_killable
- __ASM_SIZE(pop,) %__ASM_REG(dx)
- restore_common_regs
- FRAME_END
- ret
-ENDPROC(call_rwsem_down_read_failed_killable)
-
-ENTRY(call_rwsem_down_write_failed)
- FRAME_BEGIN
- save_common_regs
- movq %rax,%rdi
- call rwsem_down_write_failed
- restore_common_regs
- FRAME_END
- ret
-ENDPROC(call_rwsem_down_write_failed)
-
-ENTRY(call_rwsem_down_write_failed_killable)
- FRAME_BEGIN
- save_common_regs
- movq %rax,%rdi
- call rwsem_down_write_failed_killable
- restore_common_regs
- FRAME_END
- ret
-ENDPROC(call_rwsem_down_write_failed_killable)
-
-ENTRY(call_rwsem_wake)
- FRAME_BEGIN
- /* do nothing if still outstanding active readers */
- __ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
- jnz 1f
- save_common_regs
- movq %rax,%rdi
- call rwsem_wake
- restore_common_regs
-1: FRAME_END
- ret
-ENDPROC(call_rwsem_wake)
-
-ENTRY(call_rwsem_downgrade_wake)
- FRAME_BEGIN
- save_common_regs
- __ASM_SIZE(push,) %__ASM_REG(dx)
- movq %rax,%rdi
- call rwsem_downgrade_wake
- __ASM_SIZE(pop,) %__ASM_REG(dx)
- restore_common_regs
- FRAME_END
- ret
-ENDPROC(call_rwsem_downgrade_wake)
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index ee42bb0cbeb3..9952a01cad24 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -55,26 +55,6 @@ unsigned long clear_user(void __user *to, unsigned long n)
EXPORT_SYMBOL(clear_user);
/*
- * Try to copy last bytes and clear the rest if needed.
- * Since protection fault in copy_from/to_user is not a normal situation,
- * it is not necessary to optimize tail handling.
- */
-__visible unsigned long
-copy_user_handle_tail(char *to, char *from, unsigned len)
-{
- for (; len; --len, to++) {
- char c;
-
- if (__get_user_nocheck(c, from++, sizeof(char)))
- break;
- if (__put_user_nocheck(c, to, sizeof(char)))
- break;
- }
- clac();
- return len;
-}
-
-/*
* Similar to copy_user_handle_tail, probe for the write fault point,
* but reuse __memcpy_mcsafe in case a new read error is encountered.
* clac() is handled in _copy_to_iter_mcsafe().
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index 9e2ba7e667f6..a873da6b46d6 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -113,9 +113,6 @@ void math_emulate(struct math_emu_info *info)
unsigned long code_base = 0;
unsigned long code_limit = 0; /* Initialized to stop compiler warnings */
struct desc_struct code_descriptor;
- struct fpu *fpu = &current->thread.fpu;
-
- fpu__initialize(fpu);
#ifdef RE_ENTRANT_CHECKING
if (emulating) {
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 4b101dd6e52f..84373dc9b341 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -21,7 +21,7 @@ CFLAGS_physaddr.o := $(nostackp)
CFLAGS_setup_nx.o := $(nostackp)
CFLAGS_mem_encrypt_identity.o := $(nostackp)
-CFLAGS_fault.o := -I$(src)/../include/asm/trace
+CFLAGS_fault.o := -I $(srctree)/$(src)/../include/asm/trace
obj-$(CONFIG_X86_PAT) += pat_rbtree.o
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
index 19c6abf9ea31..752ad11d6868 100644
--- a/arch/x86/mm/cpu_entry_area.c
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -13,8 +13,8 @@
static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
#ifdef CONFIG_X86_64
-static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
- [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
+static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
+DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
#endif
struct cpu_entry_area *get_cpu_entry_area(int cpu)
@@ -52,10 +52,10 @@ cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
}
-static void __init percpu_setup_debug_store(int cpu)
+static void __init percpu_setup_debug_store(unsigned int cpu)
{
#ifdef CONFIG_CPU_SUP_INTEL
- int npages;
+ unsigned int npages;
void *cea;
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
@@ -78,9 +78,43 @@ static void __init percpu_setup_debug_store(int cpu)
#endif
}
+#ifdef CONFIG_X86_64
+
+#define cea_map_stack(name) do { \
+ npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \
+ cea_map_percpu_pages(cea->estacks.name## _stack, \
+ estacks->name## _stack, npages, PAGE_KERNEL); \
+ } while (0)
+
+static void __init percpu_setup_exception_stacks(unsigned int cpu)
+{
+ struct exception_stacks *estacks = per_cpu_ptr(&exception_stacks, cpu);
+ struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
+ unsigned int npages;
+
+ BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
+
+ per_cpu(cea_exception_stacks, cpu) = &cea->estacks;
+
+ /*
+ * The exceptions stack mappings in the per cpu area are protected
+ * by guard pages so each stack must be mapped separately. DB2 is
+ * not mapped; it just exists to catch triple nesting of #DB.
+ */
+ cea_map_stack(DF);
+ cea_map_stack(NMI);
+ cea_map_stack(DB1);
+ cea_map_stack(DB);
+ cea_map_stack(MCE);
+}
+#else
+static inline void percpu_setup_exception_stacks(unsigned int cpu) {}
+#endif
+
/* Setup the fixmap mappings only once per-processor */
-static void __init setup_cpu_entry_area(int cpu)
+static void __init setup_cpu_entry_area(unsigned int cpu)
{
+ struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
#ifdef CONFIG_X86_64
/* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
pgprot_t gdt_prot = PAGE_KERNEL_RO;
@@ -101,10 +135,9 @@ static void __init setup_cpu_entry_area(int cpu)
pgprot_t tss_prot = PAGE_KERNEL;
#endif
- cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu),
- gdt_prot);
+ cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot);
- cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page,
+ cea_map_percpu_pages(&cea->entry_stack_page,
per_cpu_ptr(&entry_stack_storage, cpu), 1,
PAGE_KERNEL);
@@ -128,22 +161,15 @@ static void __init setup_cpu_entry_area(int cpu)
BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
- cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss,
- &per_cpu(cpu_tss_rw, cpu),
+ cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu),
sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
#ifdef CONFIG_X86_32
- per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
+ per_cpu(cpu_entry_area, cpu) = cea;
#endif
-#ifdef CONFIG_X86_64
- BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
- BUILD_BUG_ON(sizeof(exception_stacks) !=
- sizeof(((struct cpu_entry_area *)0)->exception_stacks));
- cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks,
- &per_cpu(exception_stacks, cpu),
- sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL);
-#endif
+ percpu_setup_exception_stacks(cpu);
+
percpu_setup_debug_store(cpu);
}
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index c0309ea9abee..6a7302d1161f 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -578,7 +578,7 @@ void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd)
void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user)
{
#ifdef CONFIG_PAGE_TABLE_ISOLATION
- if (user && static_cpu_has(X86_FEATURE_PTI))
+ if (user && boot_cpu_has(X86_FEATURE_PTI))
pgd = kernel_to_user_pgdp(pgd);
#endif
ptdump_walk_pgd_level_core(m, pgd, false, false);
@@ -591,7 +591,7 @@ void ptdump_walk_user_pgd_level_checkwx(void)
pgd_t *pgd = INIT_PGD;
if (!(__supported_pte_mask & _PAGE_NX) ||
- !static_cpu_has(X86_FEATURE_PTI))
+ !boot_cpu_has(X86_FEATURE_PTI))
return;
pr_info("x86/mm: Checking user space page tables\n");
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 3c4568f8fb28..b0a2de8d2f9e 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -145,7 +145,7 @@ __visible bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup
unsigned long error_code,
unsigned long fault_addr)
{
- if (pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pF)\n",
+ if (pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
(unsigned int)regs->cx, regs->ip, (void *)regs->ip))
show_stack_regs(regs);
@@ -162,7 +162,7 @@ __visible bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup
unsigned long error_code,
unsigned long fault_addr)
{
- if (pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pF)\n",
+ if (pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
(unsigned int)regs->cx, (unsigned int)regs->dx,
(unsigned int)regs->ax, regs->ip, (void *)regs->ip))
show_stack_regs(regs);
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 667f1da36208..46df4c6aae46 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -28,6 +28,7 @@
#include <asm/mmu_context.h> /* vma_pkey() */
#include <asm/efi.h> /* efi_recover_from_page_fault()*/
#include <asm/desc.h> /* store_idt(), ... */
+#include <asm/cpu_entry_area.h> /* exception stack */
#define CREATE_TRACE_POINTS
#include <asm/trace/exceptions.h>
@@ -359,8 +360,6 @@ static noinline int vmalloc_fault(unsigned long address)
if (!(address >= VMALLOC_START && address < VMALLOC_END))
return -1;
- WARN_ON_ONCE(in_nmi());
-
/*
* Copy kernel mappings over when needed. This can also
* happen within a race in page table update. In the later
@@ -603,24 +602,9 @@ static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
name, index, addr, (desc.limit0 | (desc.limit1 << 16)));
}
-/*
- * This helper function transforms the #PF error_code bits into
- * "[PROT] [USER]" type of descriptive, almost human-readable error strings:
- */
-static void err_str_append(unsigned long error_code, char *buf, unsigned long mask, const char *txt)
-{
- if (error_code & mask) {
- if (buf[0])
- strcat(buf, " ");
- strcat(buf, txt);
- }
-}
-
static void
show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address)
{
- char err_txt[64];
-
if (!oops_may_print())
return;
@@ -644,31 +628,29 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long ad
from_kuid(&init_user_ns, current_uid()));
}
- pr_alert("BUG: unable to handle kernel %s at %px\n",
- address < PAGE_SIZE ? "NULL pointer dereference" : "paging request",
- (void *)address);
-
- err_txt[0] = 0;
-
- /*
- * Note: length of these appended strings including the separation space and the
- * zero delimiter must fit into err_txt[].
- */
- err_str_append(error_code, err_txt, X86_PF_PROT, "[PROT]" );
- err_str_append(error_code, err_txt, X86_PF_WRITE, "[WRITE]");
- err_str_append(error_code, err_txt, X86_PF_USER, "[USER]" );
- err_str_append(error_code, err_txt, X86_PF_RSVD, "[RSVD]" );
- err_str_append(error_code, err_txt, X86_PF_INSTR, "[INSTR]");
- err_str_append(error_code, err_txt, X86_PF_PK, "[PK]" );
-
- pr_alert("#PF error: %s\n", error_code ? err_txt : "[normal kernel read fault]");
+ if (address < PAGE_SIZE && !user_mode(regs))
+ pr_alert("BUG: kernel NULL pointer dereference, address: %px\n",
+ (void *)address);
+ else
+ pr_alert("BUG: unable to handle page fault for address: %px\n",
+ (void *)address);
+
+ pr_alert("#PF: %s %s in %s mode\n",
+ (error_code & X86_PF_USER) ? "user" : "supervisor",
+ (error_code & X86_PF_INSTR) ? "instruction fetch" :
+ (error_code & X86_PF_WRITE) ? "write access" :
+ "read access",
+ user_mode(regs) ? "user" : "kernel");
+ pr_alert("#PF: error_code(0x%04lx) - %s\n", error_code,
+ !(error_code & X86_PF_PROT) ? "not-present page" :
+ (error_code & X86_PF_RSVD) ? "reserved bit violation" :
+ (error_code & X86_PF_PK) ? "protection keys violation" :
+ "permissions violation");
if (!(error_code & X86_PF_USER) && user_mode(regs)) {
struct desc_ptr idt, gdt;
u16 ldtr, tr;
- pr_alert("This was a system access from user code\n");
-
/*
* This can happen for quite a few reasons. The more obvious
* ones are faults accessing the GDT, or LDT. Perhaps
@@ -793,7 +775,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
if (is_vmalloc_addr((void *)address) &&
(((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) ||
address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) {
- unsigned long stack = this_cpu_read(orig_ist.ist[DOUBLEFAULT_STACK]) - sizeof(void *);
+ unsigned long stack = __this_cpu_ist_top_va(DF) - sizeof(void *);
/*
* We're likely to be running with very little stack space
* left. It's plausible that we'd hit this condition but
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 92e4c4b85bba..fab095362c50 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -203,7 +203,7 @@ static __init int setup_hugepagesz(char *opt)
}
__setup("hugepagesz=", setup_hugepagesz);
-#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
+#ifdef CONFIG_CONTIG_ALLOC
static __init int gigantic_pages_init(void)
{
/* With compaction or CMA we can allocate gigantic pages at runtime */
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 8dacdb96899e..fd10d91a6115 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -6,6 +6,7 @@
#include <linux/swapfile.h>
#include <linux/swapops.h>
#include <linux/kmemleak.h>
+#include <linux/sched/task.h>
#include <asm/set_memory.h>
#include <asm/e820/api.h>
@@ -23,6 +24,7 @@
#include <asm/hypervisor.h>
#include <asm/cpufeature.h>
#include <asm/pti.h>
+#include <asm/text-patching.h>
/*
* We need to define the tracepoints somewhere, and tlb.c
@@ -702,6 +704,41 @@ void __init init_mem_mapping(void)
}
/*
+ * Initialize an mm_struct to be used during poking and a pointer to be used
+ * during patching.
+ */
+void __init poking_init(void)
+{
+ spinlock_t *ptl;
+ pte_t *ptep;
+
+ poking_mm = copy_init_mm();
+ BUG_ON(!poking_mm);
+
+ /*
+ * Randomize the poking address, but make sure that the following page
+ * will be mapped at the same PMD. We need 2 pages, so find space for 3,
+ * and adjust the address if the PMD ends after the first one.
+ */
+ poking_addr = TASK_UNMAPPED_BASE;
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ poking_addr += (kaslr_get_random_long("Poking") & PAGE_MASK) %
+ (TASK_SIZE - TASK_UNMAPPED_BASE - 3 * PAGE_SIZE);
+
+ if (((poking_addr + PAGE_SIZE) & ~PMD_MASK) == 0)
+ poking_addr += PAGE_SIZE;
+
+ /*
+ * We need to trigger the allocation of the page-tables that will be
+ * needed for poking now. Later, poking may be performed in an atomic
+ * section, which might cause allocation to fail.
+ */
+ ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
+ BUG_ON(!ptep);
+ pte_unmap_unlock(ptep, ptl);
+}
+
+/*
* devmem_is_allowed() checks to see if /dev/mem access to a certain address
* is valid. The argument is a physical page number.
*
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 85c94f9a87f8..075e568098f2 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -850,24 +850,25 @@ void __init mem_init(void)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
- bool want_memblock)
+int arch_add_memory(int nid, u64 start, u64 size,
+ struct mhp_restrictions *restrictions)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
- return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
+ return __add_pages(nid, start_pfn, nr_pages, restrictions);
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-int arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap)
+void arch_remove_memory(int nid, u64 start, u64 size,
+ struct vmem_altmap *altmap)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
struct zone *zone;
zone = page_zone(pfn_to_page(start_pfn));
- return __remove_pages(zone, start_pfn, nr_pages, altmap);
+ __remove_pages(zone, start_pfn, nr_pages, altmap);
}
#endif
#endif
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index bccff68e3267..62fc457f3849 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -58,6 +58,37 @@
#include "ident_map.c"
+#define DEFINE_POPULATE(fname, type1, type2, init) \
+static inline void fname##_init(struct mm_struct *mm, \
+ type1##_t *arg1, type2##_t *arg2, bool init) \
+{ \
+ if (init) \
+ fname##_safe(mm, arg1, arg2); \
+ else \
+ fname(mm, arg1, arg2); \
+}
+
+DEFINE_POPULATE(p4d_populate, p4d, pud, init)
+DEFINE_POPULATE(pgd_populate, pgd, p4d, init)
+DEFINE_POPULATE(pud_populate, pud, pmd, init)
+DEFINE_POPULATE(pmd_populate_kernel, pmd, pte, init)
+
+#define DEFINE_ENTRY(type1, type2, init) \
+static inline void set_##type1##_init(type1##_t *arg1, \
+ type2##_t arg2, bool init) \
+{ \
+ if (init) \
+ set_##type1##_safe(arg1, arg2); \
+ else \
+ set_##type1(arg1, arg2); \
+}
+
+DEFINE_ENTRY(p4d, p4d, init)
+DEFINE_ENTRY(pud, pud, init)
+DEFINE_ENTRY(pmd, pmd, init)
+DEFINE_ENTRY(pte, pte, init)
+
+
/*
* NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
* physical space so we can cache the place of the first one and move
@@ -414,7 +445,7 @@ void __init cleanup_highmap(void)
*/
static unsigned long __meminit
phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
- pgprot_t prot)
+ pgprot_t prot, bool init)
{
unsigned long pages = 0, paddr_next;
unsigned long paddr_last = paddr_end;
@@ -432,7 +463,7 @@ phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
E820_TYPE_RAM) &&
!e820__mapped_any(paddr & PAGE_MASK, paddr_next,
E820_TYPE_RESERVED_KERN))
- set_pte_safe(pte, __pte(0));
+ set_pte_init(pte, __pte(0), init);
continue;
}
@@ -452,7 +483,7 @@ phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
pr_info(" pte=%p addr=%lx pte=%016lx\n", pte, paddr,
pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte);
pages++;
- set_pte_safe(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
+ set_pte_init(pte, pfn_pte(paddr >> PAGE_SHIFT, prot), init);
paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE;
}
@@ -468,7 +499,7 @@ phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
*/
static unsigned long __meminit
phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
- unsigned long page_size_mask, pgprot_t prot)
+ unsigned long page_size_mask, pgprot_t prot, bool init)
{
unsigned long pages = 0, paddr_next;
unsigned long paddr_last = paddr_end;
@@ -487,7 +518,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
E820_TYPE_RAM) &&
!e820__mapped_any(paddr & PMD_MASK, paddr_next,
E820_TYPE_RESERVED_KERN))
- set_pmd_safe(pmd, __pmd(0));
+ set_pmd_init(pmd, __pmd(0), init);
continue;
}
@@ -496,7 +527,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
spin_lock(&init_mm.page_table_lock);
pte = (pte_t *)pmd_page_vaddr(*pmd);
paddr_last = phys_pte_init(pte, paddr,
- paddr_end, prot);
+ paddr_end, prot,
+ init);
spin_unlock(&init_mm.page_table_lock);
continue;
}
@@ -524,19 +556,20 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
if (page_size_mask & (1<<PG_LEVEL_2M)) {
pages++;
spin_lock(&init_mm.page_table_lock);
- set_pte_safe((pte_t *)pmd,
- pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT,
- __pgprot(pgprot_val(prot) | _PAGE_PSE)));
+ set_pte_init((pte_t *)pmd,
+ pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT,
+ __pgprot(pgprot_val(prot) | _PAGE_PSE)),
+ init);
spin_unlock(&init_mm.page_table_lock);
paddr_last = paddr_next;
continue;
}
pte = alloc_low_page();
- paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot);
+ paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot, init);
spin_lock(&init_mm.page_table_lock);
- pmd_populate_kernel_safe(&init_mm, pmd, pte);
+ pmd_populate_kernel_init(&init_mm, pmd, pte, init);
spin_unlock(&init_mm.page_table_lock);
}
update_page_count(PG_LEVEL_2M, pages);
@@ -551,7 +584,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
*/
static unsigned long __meminit
phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
- unsigned long page_size_mask)
+ unsigned long page_size_mask, bool init)
{
unsigned long pages = 0, paddr_next;
unsigned long paddr_last = paddr_end;
@@ -573,7 +606,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
E820_TYPE_RAM) &&
!e820__mapped_any(paddr & PUD_MASK, paddr_next,
E820_TYPE_RESERVED_KERN))
- set_pud_safe(pud, __pud(0));
+ set_pud_init(pud, __pud(0), init);
continue;
}
@@ -583,7 +616,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
paddr_last = phys_pmd_init(pmd, paddr,
paddr_end,
page_size_mask,
- prot);
+ prot, init);
continue;
}
/*
@@ -610,9 +643,10 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
if (page_size_mask & (1<<PG_LEVEL_1G)) {
pages++;
spin_lock(&init_mm.page_table_lock);
- set_pte_safe((pte_t *)pud,
- pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
- PAGE_KERNEL_LARGE));
+ set_pte_init((pte_t *)pud,
+ pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
+ PAGE_KERNEL_LARGE),
+ init);
spin_unlock(&init_mm.page_table_lock);
paddr_last = paddr_next;
continue;
@@ -620,10 +654,10 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
pmd = alloc_low_page();
paddr_last = phys_pmd_init(pmd, paddr, paddr_end,
- page_size_mask, prot);
+ page_size_mask, prot, init);
spin_lock(&init_mm.page_table_lock);
- pud_populate_safe(&init_mm, pud, pmd);
+ pud_populate_init(&init_mm, pud, pmd, init);
spin_unlock(&init_mm.page_table_lock);
}
@@ -634,14 +668,15 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
static unsigned long __meminit
phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
- unsigned long page_size_mask)
+ unsigned long page_size_mask, bool init)
{
unsigned long paddr_next, paddr_last = paddr_end;
unsigned long vaddr = (unsigned long)__va(paddr);
int i = p4d_index(vaddr);
if (!pgtable_l5_enabled())
- return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, page_size_mask);
+ return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end,
+ page_size_mask, init);
for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) {
p4d_t *p4d;
@@ -657,39 +692,34 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
E820_TYPE_RAM) &&
!e820__mapped_any(paddr & P4D_MASK, paddr_next,
E820_TYPE_RESERVED_KERN))
- set_p4d_safe(p4d, __p4d(0));
+ set_p4d_init(p4d, __p4d(0), init);
continue;
}
if (!p4d_none(*p4d)) {
pud = pud_offset(p4d, 0);
- paddr_last = phys_pud_init(pud, paddr,
- paddr_end,
- page_size_mask);
+ paddr_last = phys_pud_init(pud, paddr, paddr_end,
+ page_size_mask, init);
continue;
}
pud = alloc_low_page();
paddr_last = phys_pud_init(pud, paddr, paddr_end,
- page_size_mask);
+ page_size_mask, init);
spin_lock(&init_mm.page_table_lock);
- p4d_populate_safe(&init_mm, p4d, pud);
+ p4d_populate_init(&init_mm, p4d, pud, init);
spin_unlock(&init_mm.page_table_lock);
}
return paddr_last;
}
-/*
- * Create page table mapping for the physical memory for specific physical
- * addresses. The virtual and physical addresses have to be aligned on PMD level
- * down. It returns the last physical address mapped.
- */
-unsigned long __meminit
-kernel_physical_mapping_init(unsigned long paddr_start,
- unsigned long paddr_end,
- unsigned long page_size_mask)
+static unsigned long __meminit
+__kernel_physical_mapping_init(unsigned long paddr_start,
+ unsigned long paddr_end,
+ unsigned long page_size_mask,
+ bool init)
{
bool pgd_changed = false;
unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
@@ -709,19 +739,22 @@ kernel_physical_mapping_init(unsigned long paddr_start,
p4d = (p4d_t *)pgd_page_vaddr(*pgd);
paddr_last = phys_p4d_init(p4d, __pa(vaddr),
__pa(vaddr_end),
- page_size_mask);
+ page_size_mask,
+ init);
continue;
}
p4d = alloc_low_page();
paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end),
- page_size_mask);
+ page_size_mask, init);
spin_lock(&init_mm.page_table_lock);
if (pgtable_l5_enabled())
- pgd_populate_safe(&init_mm, pgd, p4d);
+ pgd_populate_init(&init_mm, pgd, p4d, init);
else
- p4d_populate_safe(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d);
+ p4d_populate_init(&init_mm, p4d_offset(pgd, vaddr),
+ (pud_t *) p4d, init);
+
spin_unlock(&init_mm.page_table_lock);
pgd_changed = true;
}
@@ -732,6 +765,37 @@ kernel_physical_mapping_init(unsigned long paddr_start,
return paddr_last;
}
+
+/*
+ * Create page table mapping for the physical memory for specific physical
+ * addresses. Note that it can only be used to populate non-present entries.
+ * The virtual and physical addresses have to be aligned on PMD level
+ * down. It returns the last physical address mapped.
+ */
+unsigned long __meminit
+kernel_physical_mapping_init(unsigned long paddr_start,
+ unsigned long paddr_end,
+ unsigned long page_size_mask)
+{
+ return __kernel_physical_mapping_init(paddr_start, paddr_end,
+ page_size_mask, true);
+}
+
+/*
+ * This function is similar to kernel_physical_mapping_init() above with the
+ * exception that it uses set_{pud,pmd}() instead of the set_{pud,pte}_safe()
+ * when updating the mapping. The caller is responsible to flush the TLBs after
+ * the function returns.
+ */
+unsigned long __meminit
+kernel_physical_mapping_change(unsigned long paddr_start,
+ unsigned long paddr_end,
+ unsigned long page_size_mask)
+{
+ return __kernel_physical_mapping_init(paddr_start, paddr_end,
+ page_size_mask, false);
+}
+
#ifndef CONFIG_NUMA
void __init initmem_init(void)
{
@@ -777,11 +841,11 @@ static void update_end_of_memory_vars(u64 start, u64 size)
}
int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
- struct vmem_altmap *altmap, bool want_memblock)
+ struct mhp_restrictions *restrictions)
{
int ret;
- ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
+ ret = __add_pages(nid, start_pfn, nr_pages, restrictions);
WARN_ON_ONCE(ret);
/* update max_pfn, max_low_pfn and high_memory */
@@ -791,15 +855,15 @@ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
return ret;
}
-int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
- bool want_memblock)
+int arch_add_memory(int nid, u64 start, u64 size,
+ struct mhp_restrictions *restrictions)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
init_memory_mapping(start, start + size);
- return add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
+ return add_pages(nid, start_pfn, nr_pages, restrictions);
}
#define PAGE_INUSE 0xFD
@@ -1141,24 +1205,20 @@ kernel_physical_mapping_remove(unsigned long start, unsigned long end)
remove_pagetable(start, end, true, NULL);
}
-int __ref arch_remove_memory(int nid, u64 start, u64 size,
- struct vmem_altmap *altmap)
+void __ref arch_remove_memory(int nid, u64 start, u64 size,
+ struct vmem_altmap *altmap)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
struct page *page = pfn_to_page(start_pfn);
struct zone *zone;
- int ret;
/* With altmap the first mapped page is offset from @start */
if (altmap)
page += vmem_altmap_offset(altmap);
zone = page_zone(page);
- ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
- WARN_ON_ONCE(ret);
+ __remove_pages(zone, start_pfn, nr_pages, altmap);
kernel_physical_mapping_remove(start, start + size);
-
- return ret;
}
#endif
#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index d669c5e797e0..dc3f058bdf9b 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -125,10 +125,7 @@ void __init kernel_randomize_memory(void)
*/
entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);
prandom_bytes_state(&rand_state, &rand, sizeof(rand));
- if (pgtable_l5_enabled())
- entropy = (rand % (entropy + 1)) & P4D_MASK;
- else
- entropy = (rand % (entropy + 1)) & PUD_MASK;
+ entropy = (rand % (entropy + 1)) & PUD_MASK;
vaddr += entropy;
*kaslr_regions[i].base = vaddr;
@@ -137,84 +134,71 @@ void __init kernel_randomize_memory(void)
* randomization alignment.
*/
vaddr += get_padding(&kaslr_regions[i]);
- if (pgtable_l5_enabled())
- vaddr = round_up(vaddr + 1, P4D_SIZE);
- else
- vaddr = round_up(vaddr + 1, PUD_SIZE);
+ vaddr = round_up(vaddr + 1, PUD_SIZE);
remain_entropy -= entropy;
}
}
static void __meminit init_trampoline_pud(void)
{
- unsigned long paddr, paddr_next;
+ pud_t *pud_page_tramp, *pud, *pud_tramp;
+ p4d_t *p4d_page_tramp, *p4d, *p4d_tramp;
+ unsigned long paddr, vaddr;
pgd_t *pgd;
- pud_t *pud_page, *pud_page_tramp;
- int i;
pud_page_tramp = alloc_low_page();
+ /*
+ * There are two mappings for the low 1MB area, the direct mapping
+ * and the 1:1 mapping for the real mode trampoline:
+ *
+ * Direct mapping: virt_addr = phys_addr + PAGE_OFFSET
+ * 1:1 mapping: virt_addr = phys_addr
+ */
paddr = 0;
- pgd = pgd_offset_k((unsigned long)__va(paddr));
- pud_page = (pud_t *) pgd_page_vaddr(*pgd);
-
- for (i = pud_index(paddr); i < PTRS_PER_PUD; i++, paddr = paddr_next) {
- pud_t *pud, *pud_tramp;
- unsigned long vaddr = (unsigned long)__va(paddr);
+ vaddr = (unsigned long)__va(paddr);
+ pgd = pgd_offset_k(vaddr);
- pud_tramp = pud_page_tramp + pud_index(paddr);
- pud = pud_page + pud_index(vaddr);
- paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
-
- *pud_tramp = *pud;
- }
+ p4d = p4d_offset(pgd, vaddr);
+ pud = pud_offset(p4d, vaddr);
- set_pgd(&trampoline_pgd_entry,
- __pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
-}
-
-static void __meminit init_trampoline_p4d(void)
-{
- unsigned long paddr, paddr_next;
- pgd_t *pgd;
- p4d_t *p4d_page, *p4d_page_tramp;
- int i;
+ pud_tramp = pud_page_tramp + pud_index(paddr);
+ *pud_tramp = *pud;
- p4d_page_tramp = alloc_low_page();
-
- paddr = 0;
- pgd = pgd_offset_k((unsigned long)__va(paddr));
- p4d_page = (p4d_t *) pgd_page_vaddr(*pgd);
-
- for (i = p4d_index(paddr); i < PTRS_PER_P4D; i++, paddr = paddr_next) {
- p4d_t *p4d, *p4d_tramp;
- unsigned long vaddr = (unsigned long)__va(paddr);
+ if (pgtable_l5_enabled()) {
+ p4d_page_tramp = alloc_low_page();
p4d_tramp = p4d_page_tramp + p4d_index(paddr);
- p4d = p4d_page + p4d_index(vaddr);
- paddr_next = (paddr & P4D_MASK) + P4D_SIZE;
- *p4d_tramp = *p4d;
- }
+ set_p4d(p4d_tramp,
+ __p4d(_KERNPG_TABLE | __pa(pud_page_tramp)));
- set_pgd(&trampoline_pgd_entry,
- __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)));
+ set_pgd(&trampoline_pgd_entry,
+ __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)));
+ } else {
+ set_pgd(&trampoline_pgd_entry,
+ __pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
+ }
}
/*
- * Create PGD aligned trampoline table to allow real mode initialization
- * of additional CPUs. Consume only 1 low memory page.
+ * The real mode trampoline, which is required for bootstrapping CPUs
+ * occupies only a small area under the low 1MB. See reserve_real_mode()
+ * for details.
+ *
+ * If KASLR is disabled the first PGD entry of the direct mapping is copied
+ * to map the real mode trampoline.
+ *
+ * If KASLR is enabled, copy only the PUD which covers the low 1MB
+ * area. This limits the randomization granularity to 1GB for both 4-level
+ * and 5-level paging.
*/
void __meminit init_trampoline(void)
{
-
if (!kaslr_memory_enabled()) {
init_trampoline_default();
return;
}
- if (pgtable_l5_enabled())
- init_trampoline_p4d();
- else
- init_trampoline_pud();
+ init_trampoline_pud();
}
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 385afa2b9e17..51f50a7a07ef 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -301,9 +301,13 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr,
else
split_page_size_mask = 1 << PG_LEVEL_2M;
- kernel_physical_mapping_init(__pa(vaddr & pmask),
- __pa((vaddr_end & pmask) + psize),
- split_page_size_mask);
+ /*
+ * kernel_physical_mapping_change() does not flush the TLBs, so
+ * a TLB flush is required after we exit from the for loop.
+ */
+ kernel_physical_mapping_change(__pa(vaddr & pmask),
+ __pa((vaddr_end & pmask) + psize),
+ split_page_size_mask);
}
ret = 0;
diff --git a/arch/x86/mm/mm_internal.h b/arch/x86/mm/mm_internal.h
index 319bde386d5f..eeae142062ed 100644
--- a/arch/x86/mm/mm_internal.h
+++ b/arch/x86/mm/mm_internal.h
@@ -13,6 +13,9 @@ void early_ioremap_page_table_range_init(void);
unsigned long kernel_physical_mapping_init(unsigned long start,
unsigned long end,
unsigned long page_size_mask);
+unsigned long kernel_physical_mapping_change(unsigned long start,
+ unsigned long end,
+ unsigned long page_size_mask);
void zone_sizes_init(void);
extern int after_bootmem;
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index c805db6236b4..0d1c47cbbdd6 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -142,7 +142,7 @@ int mpx_fault_info(struct mpx_fault_info *info, struct pt_regs *regs)
goto err_out;
}
/* get bndregs field from current task's xsave area */
- bndregs = get_xsave_field_ptr(XFEATURE_MASK_BNDREGS);
+ bndregs = get_xsave_field_ptr(XFEATURE_BNDREGS);
if (!bndregs) {
err = -EINVAL;
goto err_out;
@@ -190,7 +190,7 @@ static __user void *mpx_get_bounds_dir(void)
* The bounds directory pointer is stored in a register
* only accessible if we first do an xsave.
*/
- bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
+ bndcsr = get_xsave_field_ptr(XFEATURE_BNDCSR);
if (!bndcsr)
return MPX_INVALID_BOUNDS_DIR;
@@ -376,7 +376,7 @@ static int do_mpx_bt_fault(void)
const struct mpx_bndcsr *bndcsr;
struct mm_struct *mm = current->mm;
- bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
+ bndcsr = get_xsave_field_ptr(XFEATURE_BNDCSR);
if (!bndcsr)
return -EINVAL;
/*
@@ -881,9 +881,10 @@ static int mpx_unmap_tables(struct mm_struct *mm,
* the virtual address region start...end have already been split if
* necessary, and the 'vma' is the first vma in this range (start -> end).
*/
-void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+void mpx_notify_unmap(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
{
+ struct vm_area_struct *vma;
int ret;
/*
@@ -902,11 +903,12 @@ void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
* which should not occur normally. Being strict about it here
* helps ensure that we do not have an exploitable stack overflow.
*/
- do {
+ vma = find_vma(mm, start);
+ while (vma && vma->vm_start < end) {
if (vma->vm_flags & VM_MPX)
return;
vma = vma->vm_next;
- } while (vma && vma->vm_start < end);
+ }
ret = mpx_unmap_tables(mm, start, end);
if (ret)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 4c570612e24e..daf4d645e537 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -2209,8 +2209,6 @@ int set_pages_rw(struct page *page, int numpages)
return set_memory_rw(addr, numpages);
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
-
static int __set_pages_p(struct page *page, int numpages)
{
unsigned long tempaddr = (unsigned long) page_address(page);
@@ -2249,6 +2247,16 @@ static int __set_pages_np(struct page *page, int numpages)
return __change_page_attr_set_clr(&cpa, 0);
}
+int set_direct_map_invalid_noflush(struct page *page)
+{
+ return __set_pages_np(page, 1);
+}
+
+int set_direct_map_default_noflush(struct page *page)
+{
+ return __set_pages_p(page, 1);
+}
+
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
if (PageHighMem(page))
@@ -2282,7 +2290,6 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
}
#ifdef CONFIG_HIBERNATION
-
bool kernel_page_present(struct page *page)
{
unsigned int level;
@@ -2294,11 +2301,8 @@ bool kernel_page_present(struct page *page)
pte = lookup_address((unsigned long)page_address(page), &level);
return (pte_val(*pte) & _PAGE_PRESENT);
}
-
#endif /* CONFIG_HIBERNATION */
-#endif /* CONFIG_DEBUG_PAGEALLOC */
-
int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
unsigned numpages, unsigned long page_flags)
{
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 7bd01709a091..1f67b1e15bf6 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -190,7 +190,7 @@ static void pgd_dtor(pgd_t *pgd)
* when PTI is enabled. We need them to map the per-process LDT into the
* user-space page-table.
*/
-#define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \
+#define PREALLOCATED_USER_PMDS (boot_cpu_has(X86_FEATURE_PTI) ? \
KERNEL_PGD_PTRS : 0)
#define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS
@@ -292,7 +292,7 @@ static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
#ifdef CONFIG_PAGE_TABLE_ISOLATION
- if (!static_cpu_has(X86_FEATURE_PTI))
+ if (!boot_cpu_has(X86_FEATURE_PTI))
return;
pgdp = kernel_to_user_pgdp(pgdp);
@@ -373,14 +373,14 @@ static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
static struct kmem_cache *pgd_cache;
-static int __init pgd_cache_init(void)
+void __init pgd_cache_init(void)
{
/*
* When PAE kernel is running as a Xen domain, it does not use
* shared kernel pmd. And this requires a whole page for pgd.
*/
if (!SHARED_KERNEL_PMD)
- return 0;
+ return;
/*
* when PAE kernel is not running as a Xen domain, it uses
@@ -390,9 +390,7 @@ static int __init pgd_cache_init(void)
*/
pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
SLAB_PANIC, NULL);
- return 0;
}
-core_initcall(pgd_cache_init);
static inline pgd_t *_pgd_alloc(void)
{
@@ -420,6 +418,10 @@ static inline void _pgd_free(pgd_t *pgd)
}
#else
+void __init pgd_cache_init(void)
+{
+}
+
static inline pgd_t *_pgd_alloc(void)
{
return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER);
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
index 047a77f6a10c..1dcfc91c8f0c 100644
--- a/arch/x86/mm/pkeys.c
+++ b/arch/x86/mm/pkeys.c
@@ -18,6 +18,7 @@
#include <asm/cpufeature.h> /* boot_cpu_has, ... */
#include <asm/mmu_context.h> /* vma_pkey() */
+#include <asm/fpu/internal.h> /* init_fpstate */
int __execute_only_pkey(struct mm_struct *mm)
{
@@ -39,17 +40,12 @@ int __execute_only_pkey(struct mm_struct *mm)
* dance to set PKRU if we do not need to. Check it
* first and assume that if the execute-only pkey is
* write-disabled that we do not have to set it
- * ourselves. We need preempt off so that nobody
- * can make fpregs inactive.
+ * ourselves.
*/
- preempt_disable();
if (!need_to_set_mm_pkey &&
- current->thread.fpu.initialized &&
!__pkru_allows_read(read_pkru(), execute_only_pkey)) {
- preempt_enable();
return execute_only_pkey;
}
- preempt_enable();
/*
* Set up PKRU so that it denies access for everything
@@ -131,7 +127,6 @@ int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey
* in the process's lifetime will not accidentally get access
* to data which is pkey-protected later on.
*/
-static
u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) |
PKRU_AD_KEY( 4) | PKRU_AD_KEY( 5) | PKRU_AD_KEY( 6) |
PKRU_AD_KEY( 7) | PKRU_AD_KEY( 8) | PKRU_AD_KEY( 9) |
@@ -148,13 +143,6 @@ void copy_init_pkru_to_fpregs(void)
{
u32 init_pkru_value_snapshot = READ_ONCE(init_pkru_value);
/*
- * Any write to PKRU takes it out of the XSAVE 'init
- * state' which increases context switch cost. Avoid
- * writing 0 when PKRU was already 0.
- */
- if (!init_pkru_value_snapshot && !read_pkru())
- return;
- /*
* Override the PKRU state that came from 'init_fpstate'
* with the baseline from the process.
*/
@@ -174,6 +162,7 @@ static ssize_t init_pkru_read_file(struct file *file, char __user *user_buf,
static ssize_t init_pkru_write_file(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
+ struct pkru_state *pk;
char buf[32];
ssize_t len;
u32 new_init_pkru;
@@ -196,6 +185,10 @@ static ssize_t init_pkru_write_file(struct file *file,
return -EINVAL;
WRITE_ONCE(init_pkru_value, new_init_pkru);
+ pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU);
+ if (!pk)
+ return -EINVAL;
+ pk->pkru = new_init_pkru;
return count;
}
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 139b28a01ce4..9c2463bc158f 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -35,6 +35,7 @@
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
+#include <linux/cpu.h>
#include <asm/cpufeature.h>
#include <asm/hypervisor.h>
@@ -115,7 +116,8 @@ void __init pti_check_boottime_disable(void)
}
}
- if (cmdline_find_option_bool(boot_command_line, "nopti")) {
+ if (cmdline_find_option_bool(boot_command_line, "nopti") ||
+ cpu_mitigations_off()) {
pti_mode = PTI_FORCE_OFF;
pti_print_if_insecure("disabled on command line.");
return;
@@ -626,7 +628,7 @@ static void pti_set_kernel_image_nonglobal(void)
*/
void __init pti_init(void)
{
- if (!static_cpu_has(X86_FEATURE_PTI))
+ if (!boot_cpu_has(X86_FEATURE_PTI))
return;
pr_info("enabled\n");
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 487b8474c01c..7f61431c75fb 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -634,7 +634,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
}
-static void flush_tlb_func_local(void *info, enum tlb_flush_reason reason)
+static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
{
const struct flush_tlb_info *f = info;
@@ -722,43 +722,81 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
*/
unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct flush_tlb_info, flush_tlb_info);
+
+#ifdef CONFIG_DEBUG_VM
+static DEFINE_PER_CPU(unsigned int, flush_tlb_info_idx);
+#endif
+
+static inline struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
+ unsigned long start, unsigned long end,
+ unsigned int stride_shift, bool freed_tables,
+ u64 new_tlb_gen)
+{
+ struct flush_tlb_info *info = this_cpu_ptr(&flush_tlb_info);
+
+#ifdef CONFIG_DEBUG_VM
+ /*
+ * Ensure that the following code is non-reentrant and flush_tlb_info
+ * is not overwritten. This means no TLB flushing is initiated by
+ * interrupt handlers and machine-check exception handlers.
+ */
+ BUG_ON(this_cpu_inc_return(flush_tlb_info_idx) != 1);
+#endif
+
+ info->start = start;
+ info->end = end;
+ info->mm = mm;
+ info->stride_shift = stride_shift;
+ info->freed_tables = freed_tables;
+ info->new_tlb_gen = new_tlb_gen;
+
+ return info;
+}
+
+static inline void put_flush_tlb_info(void)
+{
+#ifdef CONFIG_DEBUG_VM
+ /* Complete reentrency prevention checks */
+ barrier();
+ this_cpu_dec(flush_tlb_info_idx);
+#endif
+}
+
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned int stride_shift,
bool freed_tables)
{
+ struct flush_tlb_info *info;
+ u64 new_tlb_gen;
int cpu;
- struct flush_tlb_info info = {
- .mm = mm,
- .stride_shift = stride_shift,
- .freed_tables = freed_tables,
- };
-
cpu = get_cpu();
- /* This is also a barrier that synchronizes with switch_mm(). */
- info.new_tlb_gen = inc_mm_tlb_gen(mm);
-
/* Should we flush just the requested range? */
- if ((end != TLB_FLUSH_ALL) &&
- ((end - start) >> stride_shift) <= tlb_single_page_flush_ceiling) {
- info.start = start;
- info.end = end;
- } else {
- info.start = 0UL;
- info.end = TLB_FLUSH_ALL;
+ if ((end == TLB_FLUSH_ALL) ||
+ ((end - start) >> stride_shift) > tlb_single_page_flush_ceiling) {
+ start = 0;
+ end = TLB_FLUSH_ALL;
}
+ /* This is also a barrier that synchronizes with switch_mm(). */
+ new_tlb_gen = inc_mm_tlb_gen(mm);
+
+ info = get_flush_tlb_info(mm, start, end, stride_shift, freed_tables,
+ new_tlb_gen);
+
if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
- VM_WARN_ON(irqs_disabled());
+ lockdep_assert_irqs_enabled();
local_irq_disable();
- flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN);
+ flush_tlb_func_local(info, TLB_LOCAL_MM_SHOOTDOWN);
local_irq_enable();
}
if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
- flush_tlb_others(mm_cpumask(mm), &info);
+ flush_tlb_others(mm_cpumask(mm), info);
+ put_flush_tlb_info();
put_cpu();
}
@@ -787,38 +825,48 @@ static void do_kernel_range_flush(void *info)
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
-
/* Balance as user space task's flush, a bit conservative */
if (end == TLB_FLUSH_ALL ||
(end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) {
on_each_cpu(do_flush_tlb_all, NULL, 1);
} else {
- struct flush_tlb_info info;
- info.start = start;
- info.end = end;
- on_each_cpu(do_kernel_range_flush, &info, 1);
+ struct flush_tlb_info *info;
+
+ preempt_disable();
+ info = get_flush_tlb_info(NULL, start, end, 0, false, 0);
+
+ on_each_cpu(do_kernel_range_flush, info, 1);
+
+ put_flush_tlb_info();
+ preempt_enable();
}
}
+/*
+ * arch_tlbbatch_flush() performs a full TLB flush regardless of the active mm.
+ * This means that the 'struct flush_tlb_info' that describes which mappings to
+ * flush is actually fixed. We therefore set a single fixed struct and use it in
+ * arch_tlbbatch_flush().
+ */
+static const struct flush_tlb_info full_flush_tlb_info = {
+ .mm = NULL,
+ .start = 0,
+ .end = TLB_FLUSH_ALL,
+};
+
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
{
- struct flush_tlb_info info = {
- .mm = NULL,
- .start = 0UL,
- .end = TLB_FLUSH_ALL,
- };
-
int cpu = get_cpu();
if (cpumask_test_cpu(cpu, &batch->cpumask)) {
- VM_WARN_ON(irqs_disabled());
+ lockdep_assert_irqs_enabled();
local_irq_disable();
- flush_tlb_func_local(&info, TLB_LOCAL_SHOOTDOWN);
+ flush_tlb_func_local(&full_flush_tlb_info, TLB_LOCAL_SHOOTDOWN);
local_irq_enable();
}
if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
- flush_tlb_others(&batch->cpumask, &info);
+ flush_tlb_others(&batch->cpumask, &full_flush_tlb_info);
cpumask_clear(&batch->cpumask);
diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
index 0d9cdffce6ac..b29e82f190c7 100644
--- a/arch/x86/net/bpf_jit_comp32.c
+++ b/arch/x86/net/bpf_jit_comp32.c
@@ -117,6 +117,8 @@ static bool is_simm32(s64 value)
#define IA32_JLE 0x7E
#define IA32_JG 0x7F
+#define COND_JMP_OPCODE_INVALID (0xFF)
+
/*
* Map eBPF registers to IA32 32bit registers or stack scratch space.
*
@@ -698,19 +700,12 @@ static inline void emit_ia32_neg64(const u8 dst[], bool dstk, u8 **pprog)
STACK_VAR(dst_hi));
}
- /* xor ecx,ecx */
- EMIT2(0x31, add_2reg(0xC0, IA32_ECX, IA32_ECX));
- /* sub dreg_lo,ecx */
- EMIT2(0x2B, add_2reg(0xC0, dreg_lo, IA32_ECX));
- /* mov dreg_lo,ecx */
- EMIT2(0x89, add_2reg(0xC0, dreg_lo, IA32_ECX));
-
- /* xor ecx,ecx */
- EMIT2(0x31, add_2reg(0xC0, IA32_ECX, IA32_ECX));
- /* sbb dreg_hi,ecx */
- EMIT2(0x19, add_2reg(0xC0, dreg_hi, IA32_ECX));
- /* mov dreg_hi,ecx */
- EMIT2(0x89, add_2reg(0xC0, dreg_hi, IA32_ECX));
+ /* neg dreg_lo */
+ EMIT2(0xF7, add_1reg(0xD8, dreg_lo));
+ /* adc dreg_hi,0x0 */
+ EMIT3(0x83, add_1reg(0xD0, dreg_hi), 0x00);
+ /* neg dreg_hi */
+ EMIT2(0xF7, add_1reg(0xD8, dreg_hi));
if (dstk) {
/* mov dword ptr [ebp+off],dreg_lo */
@@ -1613,6 +1608,75 @@ static inline void emit_push_r64(const u8 src[], u8 **pprog)
*pprog = prog;
}
+static u8 get_cond_jmp_opcode(const u8 op, bool is_cmp_lo)
+{
+ u8 jmp_cond;
+
+ /* Convert BPF opcode to x86 */
+ switch (op) {
+ case BPF_JEQ:
+ jmp_cond = IA32_JE;
+ break;
+ case BPF_JSET:
+ case BPF_JNE:
+ jmp_cond = IA32_JNE;
+ break;
+ case BPF_JGT:
+ /* GT is unsigned '>', JA in x86 */
+ jmp_cond = IA32_JA;
+ break;
+ case BPF_JLT:
+ /* LT is unsigned '<', JB in x86 */
+ jmp_cond = IA32_JB;
+ break;
+ case BPF_JGE:
+ /* GE is unsigned '>=', JAE in x86 */
+ jmp_cond = IA32_JAE;
+ break;
+ case BPF_JLE:
+ /* LE is unsigned '<=', JBE in x86 */
+ jmp_cond = IA32_JBE;
+ break;
+ case BPF_JSGT:
+ if (!is_cmp_lo)
+ /* Signed '>', GT in x86 */
+ jmp_cond = IA32_JG;
+ else
+ /* GT is unsigned '>', JA in x86 */
+ jmp_cond = IA32_JA;
+ break;
+ case BPF_JSLT:
+ if (!is_cmp_lo)
+ /* Signed '<', LT in x86 */
+ jmp_cond = IA32_JL;
+ else
+ /* LT is unsigned '<', JB in x86 */
+ jmp_cond = IA32_JB;
+ break;
+ case BPF_JSGE:
+ if (!is_cmp_lo)
+ /* Signed '>=', GE in x86 */
+ jmp_cond = IA32_JGE;
+ else
+ /* GE is unsigned '>=', JAE in x86 */
+ jmp_cond = IA32_JAE;
+ break;
+ case BPF_JSLE:
+ if (!is_cmp_lo)
+ /* Signed '<=', LE in x86 */
+ jmp_cond = IA32_JLE;
+ else
+ /* LE is unsigned '<=', JBE in x86 */
+ jmp_cond = IA32_JBE;
+ break;
+ default: /* to silence GCC warning */
+ jmp_cond = COND_JMP_OPCODE_INVALID;
+ break;
+ }
+
+ return jmp_cond;
+}
+
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
int oldproglen, struct jit_context *ctx)
{
@@ -2069,10 +2133,6 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JLE | BPF_X:
- case BPF_JMP | BPF_JSGT | BPF_X:
- case BPF_JMP | BPF_JSLE | BPF_X:
- case BPF_JMP | BPF_JSLT | BPF_X:
- case BPF_JMP | BPF_JSGE | BPF_X:
case BPF_JMP32 | BPF_JEQ | BPF_X:
case BPF_JMP32 | BPF_JNE | BPF_X:
case BPF_JMP32 | BPF_JGT | BPF_X:
@@ -2118,6 +2178,40 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
goto emit_cond_jmp;
}
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSLE | BPF_X:
+ case BPF_JMP | BPF_JSLT | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_X: {
+ u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
+ u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
+ u8 sreg_lo = sstk ? IA32_ECX : src_lo;
+ u8 sreg_hi = sstk ? IA32_EBX : src_hi;
+
+ if (dstk) {
+ EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
+ STACK_VAR(dst_lo));
+ EMIT3(0x8B,
+ add_2reg(0x40, IA32_EBP,
+ IA32_EDX),
+ STACK_VAR(dst_hi));
+ }
+
+ if (sstk) {
+ EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
+ STACK_VAR(src_lo));
+ EMIT3(0x8B,
+ add_2reg(0x40, IA32_EBP,
+ IA32_EBX),
+ STACK_VAR(src_hi));
+ }
+
+ /* cmp dreg_hi,sreg_hi */
+ EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
+ EMIT2(IA32_JNE, 10);
+ /* cmp dreg_lo,sreg_lo */
+ EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
+ goto emit_cond_jmp_signed;
+ }
case BPF_JMP | BPF_JSET | BPF_X:
case BPF_JMP32 | BPF_JSET | BPF_X: {
bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
@@ -2194,10 +2288,6 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JLE | BPF_K:
- case BPF_JMP | BPF_JSGT | BPF_K:
- case BPF_JMP | BPF_JSLE | BPF_K:
- case BPF_JMP | BPF_JSLT | BPF_K:
- case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP32 | BPF_JEQ | BPF_K:
case BPF_JMP32 | BPF_JNE | BPF_K:
case BPF_JMP32 | BPF_JGT | BPF_K:
@@ -2238,50 +2328,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
/* cmp dreg_lo,sreg_lo */
EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
-emit_cond_jmp: /* Convert BPF opcode to x86 */
- switch (BPF_OP(code)) {
- case BPF_JEQ:
- jmp_cond = IA32_JE;
- break;
- case BPF_JSET:
- case BPF_JNE:
- jmp_cond = IA32_JNE;
- break;
- case BPF_JGT:
- /* GT is unsigned '>', JA in x86 */
- jmp_cond = IA32_JA;
- break;
- case BPF_JLT:
- /* LT is unsigned '<', JB in x86 */
- jmp_cond = IA32_JB;
- break;
- case BPF_JGE:
- /* GE is unsigned '>=', JAE in x86 */
- jmp_cond = IA32_JAE;
- break;
- case BPF_JLE:
- /* LE is unsigned '<=', JBE in x86 */
- jmp_cond = IA32_JBE;
- break;
- case BPF_JSGT:
- /* Signed '>', GT in x86 */
- jmp_cond = IA32_JG;
- break;
- case BPF_JSLT:
- /* Signed '<', LT in x86 */
- jmp_cond = IA32_JL;
- break;
- case BPF_JSGE:
- /* Signed '>=', GE in x86 */
- jmp_cond = IA32_JGE;
- break;
- case BPF_JSLE:
- /* Signed '<=', LE in x86 */
- jmp_cond = IA32_JLE;
- break;
- default: /* to silence GCC warning */
+emit_cond_jmp: jmp_cond = get_cond_jmp_opcode(BPF_OP(code), false);
+ if (jmp_cond == COND_JMP_OPCODE_INVALID)
return -EFAULT;
- }
jmp_offset = addrs[i + insn->off] - addrs[i];
if (is_imm8(jmp_offset)) {
EMIT2(jmp_cond, jmp_offset);
@@ -2291,7 +2340,66 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */
pr_err("cond_jmp gen bug %llx\n", jmp_offset);
return -EFAULT;
}
+ break;
+ }
+ case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSLE | BPF_K:
+ case BPF_JMP | BPF_JSLT | BPF_K:
+ case BPF_JMP | BPF_JSGE | BPF_K: {
+ u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
+ u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
+ u8 sreg_lo = IA32_ECX;
+ u8 sreg_hi = IA32_EBX;
+ u32 hi;
+ if (dstk) {
+ EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
+ STACK_VAR(dst_lo));
+ EMIT3(0x8B,
+ add_2reg(0x40, IA32_EBP,
+ IA32_EDX),
+ STACK_VAR(dst_hi));
+ }
+
+ /* mov ecx,imm32 */
+ EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32);
+ hi = imm32 & (1 << 31) ? (u32)~0 : 0;
+ /* mov ebx,imm32 */
+ EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi);
+ /* cmp dreg_hi,sreg_hi */
+ EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
+ EMIT2(IA32_JNE, 10);
+ /* cmp dreg_lo,sreg_lo */
+ EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
+
+ /*
+ * For simplicity of branch offset computation,
+ * let's use fixed jump coding here.
+ */
+emit_cond_jmp_signed: /* Check the condition for low 32-bit comparison */
+ jmp_cond = get_cond_jmp_opcode(BPF_OP(code), true);
+ if (jmp_cond == COND_JMP_OPCODE_INVALID)
+ return -EFAULT;
+ jmp_offset = addrs[i + insn->off] - addrs[i] + 8;
+ if (is_simm32(jmp_offset)) {
+ EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
+ } else {
+ pr_err("cond_jmp gen bug %llx\n", jmp_offset);
+ return -EFAULT;
+ }
+ EMIT2(0xEB, 6);
+
+ /* Check the condition for high 32-bit comparison */
+ jmp_cond = get_cond_jmp_opcode(BPF_OP(code), false);
+ if (jmp_cond == COND_JMP_OPCODE_INVALID)
+ return -EFAULT;
+ jmp_offset = addrs[i + insn->off] - addrs[i];
+ if (is_simm32(jmp_offset)) {
+ EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
+ } else {
+ pr_err("cond_jmp gen bug %llx\n", jmp_offset);
+ return -EFAULT;
+ }
break;
}
case BPF_JMP | BPF_JA:
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 52e55108404e..d3a73f9335e1 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -1119,6 +1119,8 @@ static const struct dmi_system_id pciirq_dmi_table[] __initconst = {
void __init pcibios_irq_init(void)
{
+ struct irq_routing_table *rtable = NULL;
+
DBG(KERN_DEBUG "PCI: IRQ init\n");
if (raw_pci_ops == NULL)
@@ -1129,8 +1131,10 @@ void __init pcibios_irq_init(void)
pirq_table = pirq_find_routing_table();
#ifdef CONFIG_PCI_BIOS
- if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN))
+ if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) {
pirq_table = pcibios_get_irq_routing_table();
+ rtable = pirq_table;
+ }
#endif
if (pirq_table) {
pirq_peer_trick();
@@ -1145,8 +1149,10 @@ void __init pcibios_irq_init(void)
* If we're using the I/O APIC, avoid using the PCI IRQ
* routing table
*/
- if (io_apic_assign_pci_irqs)
+ if (io_apic_assign_pci_irqs) {
+ kfree(rtable);
pirq_table = NULL;
+ }
}
x86_init.pci.fixup_irqs();
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
index ac9e7bf49b66..0296c5b55e6f 100644
--- a/arch/x86/platform/olpc/olpc_dt.c
+++ b/arch/x86/platform/olpc/olpc_dt.c
@@ -220,10 +220,26 @@ static u32 __init olpc_dt_get_board_revision(void)
return be32_to_cpu(rev);
}
+int olpc_dt_compatible_match(phandle node, const char *compat)
+{
+ char buf[64], *p;
+ int plen, len;
+
+ plen = olpc_dt_getproperty(node, "compatible", buf, sizeof(buf));
+ if (plen <= 0)
+ return 0;
+
+ len = strlen(compat);
+ for (p = buf; p < buf + plen; p += strlen(p) + 1) {
+ if (strcmp(p, compat) == 0)
+ return 1;
+ }
+
+ return 0;
+}
+
void __init olpc_dt_fixup(void)
{
- int r;
- char buf[64];
phandle node;
u32 board_rev;
@@ -231,41 +247,66 @@ void __init olpc_dt_fixup(void)
if (!node)
return;
- /*
- * If the battery node has a compatible property, we are running a new
- * enough firmware and don't have fixups to make.
- */
- r = olpc_dt_getproperty(node, "compatible", buf, sizeof(buf));
- if (r > 0)
- return;
-
- pr_info("PROM DT: Old firmware detected, applying fixes\n");
-
- /* Add olpc,xo1-battery compatible marker to battery node */
- olpc_dt_interpret("\" /battery@0\" find-device"
- " \" olpc,xo1-battery\" +compatible"
- " device-end");
-
board_rev = olpc_dt_get_board_revision();
if (!board_rev)
return;
if (board_rev >= olpc_board_pre(0xd0)) {
- /* XO-1.5: add dcon device */
- olpc_dt_interpret("\" /pci/display@1\" find-device"
- " new-device"
- " \" dcon\" device-name \" olpc,xo1-dcon\" +compatible"
- " finish-device device-end");
+ /* XO-1.5 */
+
+ if (olpc_dt_compatible_match(node, "olpc,xo1.5-battery"))
+ return;
+
+ /* Add olpc,xo1.5-battery compatible marker to battery node */
+ olpc_dt_interpret("\" /battery@0\" find-device");
+ olpc_dt_interpret(" \" olpc,xo1.5-battery\" +compatible");
+ olpc_dt_interpret("device-end");
+
+ if (olpc_dt_compatible_match(node, "olpc,xo1-battery")) {
+ /*
+ * If we have a olpc,xo1-battery compatible, then we're
+ * running a new enough firmware that already has
+ * the dcon node.
+ */
+ return;
+ }
+
+ /* Add dcon device */
+ olpc_dt_interpret("\" /pci/display@1\" find-device");
+ olpc_dt_interpret(" new-device");
+ olpc_dt_interpret(" \" dcon\" device-name");
+ olpc_dt_interpret(" \" olpc,xo1-dcon\" +compatible");
+ olpc_dt_interpret(" finish-device");
+ olpc_dt_interpret("device-end");
} else {
- /* XO-1: add dcon device, mark RTC as olpc,xo1-rtc */
- olpc_dt_interpret("\" /pci/display@1,1\" find-device"
- " new-device"
- " \" dcon\" device-name \" olpc,xo1-dcon\" +compatible"
- " finish-device device-end"
- " \" /rtc\" find-device"
- " \" olpc,xo1-rtc\" +compatible"
- " device-end");
+ /* XO-1 */
+
+ if (olpc_dt_compatible_match(node, "olpc,xo1-battery")) {
+ /*
+ * If we have a olpc,xo1-battery compatible, then we're
+ * running a new enough firmware that already has
+ * the dcon and RTC nodes.
+ */
+ return;
+ }
+
+ /* Add dcon device, mark RTC as olpc,xo1-rtc */
+ olpc_dt_interpret("\" /pci/display@1,1\" find-device");
+ olpc_dt_interpret(" new-device");
+ olpc_dt_interpret(" \" dcon\" device-name");
+ olpc_dt_interpret(" \" olpc,xo1-dcon\" +compatible");
+ olpc_dt_interpret(" finish-device");
+ olpc_dt_interpret("device-end");
+
+ olpc_dt_interpret("\" /rtc\" find-device");
+ olpc_dt_interpret(" \" olpc,xo1-rtc\" +compatible");
+ olpc_dt_interpret("device-end");
}
+
+ /* Add olpc,xo1-battery compatible marker to battery node */
+ olpc_dt_interpret("\" /battery@0\" find-device");
+ olpc_dt_interpret(" \" olpc,xo1-battery\" +compatible");
+ olpc_dt_interpret("device-end");
}
void __init olpc_dt_build_devicetree(void)
diff --git a/arch/x86/platform/pvh/enlighten.c b/arch/x86/platform/pvh/enlighten.c
index 62f5c7045944..1861a2ba0f2b 100644
--- a/arch/x86/platform/pvh/enlighten.c
+++ b/arch/x86/platform/pvh/enlighten.c
@@ -44,8 +44,6 @@ void __init __weak mem_map_via_hcall(struct boot_params *ptr __maybe_unused)
static void __init init_pvh_bootparams(bool xen_guest)
{
- memset(&pvh_bootparams, 0, sizeof(pvh_bootparams));
-
if ((pvh_start_info.version > 0) && (pvh_start_info.memmap_entries)) {
struct hvm_memmap_table_entry *ep;
int i;
@@ -103,7 +101,7 @@ static void __init init_pvh_bootparams(bool xen_guest)
* If we are trying to boot a Xen PVH guest, it is expected that the kernel
* will have been configured to provide the required override for this routine.
*/
-void __init __weak xen_pvh_init(void)
+void __init __weak xen_pvh_init(struct boot_params *boot_params)
{
xen_raw_printk("Error: Missing xen PVH initialization\n");
BUG();
@@ -112,7 +110,7 @@ void __init __weak xen_pvh_init(void)
static void hypervisor_specific_init(bool xen_guest)
{
if (xen_guest)
- xen_pvh_init();
+ xen_pvh_init(&pvh_bootparams);
}
/*
@@ -131,6 +129,8 @@ void __init xen_prepare_pvh(void)
BUG();
}
+ memset(&pvh_bootparams, 0, sizeof(pvh_bootparams));
+
hypervisor_specific_init(xen_guest);
init_pvh_bootparams(xen_guest);
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 2c53b0f19329..1297e185b8c8 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -2133,14 +2133,19 @@ static int __init summarize_uvhub_sockets(int nuvhubs,
*/
static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
{
- unsigned char *uvhub_mask;
struct uvhub_desc *uvhub_descs;
+ unsigned char *uvhub_mask = NULL;
if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub())
timeout_us = calculate_destination_timeout();
uvhub_descs = kcalloc(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL);
+ if (!uvhub_descs)
+ goto fail;
+
uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
+ if (!uvhub_mask)
+ goto fail;
if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
goto fail;
diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
index bcddf09b5aa3..4845b8c7be7f 100644
--- a/arch/x86/power/hibernate.c
+++ b/arch/x86/power/hibernate.c
@@ -90,7 +90,6 @@ static int get_e820_md5(struct e820_table *table, void *buf)
}
desc->tfm = tfm;
- desc->flags = 0;
size = offsetof(struct e820_table, entries) +
sizeof(struct e820_entry) * table->nr_entries;
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index b629f6992d9f..ce7188cbdae5 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -11,7 +11,9 @@
#define Elf_Shdr ElfW(Shdr)
#define Elf_Sym ElfW(Sym)
-static Elf_Ehdr ehdr;
+static Elf_Ehdr ehdr;
+static unsigned long shnum;
+static unsigned int shstrndx;
struct relocs {
uint32_t *offset;
@@ -241,9 +243,9 @@ static const char *sec_name(unsigned shndx)
{
const char *sec_strtab;
const char *name;
- sec_strtab = secs[ehdr.e_shstrndx].strtab;
+ sec_strtab = secs[shstrndx].strtab;
name = "<noname>";
- if (shndx < ehdr.e_shnum) {
+ if (shndx < shnum) {
name = sec_strtab + secs[shndx].shdr.sh_name;
}
else if (shndx == SHN_ABS) {
@@ -271,7 +273,7 @@ static const char *sym_name(const char *sym_strtab, Elf_Sym *sym)
static Elf_Sym *sym_lookup(const char *symname)
{
int i;
- for (i = 0; i < ehdr.e_shnum; i++) {
+ for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
long nsyms;
char *strtab;
@@ -366,27 +368,41 @@ static void read_ehdr(FILE *fp)
ehdr.e_shnum = elf_half_to_cpu(ehdr.e_shnum);
ehdr.e_shstrndx = elf_half_to_cpu(ehdr.e_shstrndx);
- if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN)) {
+ shnum = ehdr.e_shnum;
+ shstrndx = ehdr.e_shstrndx;
+
+ if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN))
die("Unsupported ELF header type\n");
- }
- if (ehdr.e_machine != ELF_MACHINE) {
+ if (ehdr.e_machine != ELF_MACHINE)
die("Not for %s\n", ELF_MACHINE_NAME);
- }
- if (ehdr.e_version != EV_CURRENT) {
+ if (ehdr.e_version != EV_CURRENT)
die("Unknown ELF version\n");
- }
- if (ehdr.e_ehsize != sizeof(Elf_Ehdr)) {
+ if (ehdr.e_ehsize != sizeof(Elf_Ehdr))
die("Bad Elf header size\n");
- }
- if (ehdr.e_phentsize != sizeof(Elf_Phdr)) {
+ if (ehdr.e_phentsize != sizeof(Elf_Phdr))
die("Bad program header entry\n");
- }
- if (ehdr.e_shentsize != sizeof(Elf_Shdr)) {
+ if (ehdr.e_shentsize != sizeof(Elf_Shdr))
die("Bad section header entry\n");
+
+
+ if (shnum == SHN_UNDEF || shstrndx == SHN_XINDEX) {
+ Elf_Shdr shdr;
+
+ if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0)
+ die("Seek to %d failed: %s\n", ehdr.e_shoff, strerror(errno));
+
+ if (fread(&shdr, sizeof(shdr), 1, fp) != 1)
+ die("Cannot read initial ELF section header: %s\n", strerror(errno));
+
+ if (shnum == SHN_UNDEF)
+ shnum = elf_xword_to_cpu(shdr.sh_size);
+
+ if (shstrndx == SHN_XINDEX)
+ shstrndx = elf_word_to_cpu(shdr.sh_link);
}
- if (ehdr.e_shstrndx >= ehdr.e_shnum) {
+
+ if (shstrndx >= shnum)
die("String table index out of bounds\n");
- }
}
static void read_shdrs(FILE *fp)
@@ -394,20 +410,20 @@ static void read_shdrs(FILE *fp)
int i;
Elf_Shdr shdr;
- secs = calloc(ehdr.e_shnum, sizeof(struct section));
+ secs = calloc(shnum, sizeof(struct section));
if (!secs) {
die("Unable to allocate %d section headers\n",
- ehdr.e_shnum);
+ shnum);
}
if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0) {
die("Seek to %d failed: %s\n",
ehdr.e_shoff, strerror(errno));
}
- for (i = 0; i < ehdr.e_shnum; i++) {
+ for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
if (fread(&shdr, sizeof(shdr), 1, fp) != 1)
die("Cannot read ELF section headers %d/%d: %s\n",
- i, ehdr.e_shnum, strerror(errno));
+ i, shnum, strerror(errno));
sec->shdr.sh_name = elf_word_to_cpu(shdr.sh_name);
sec->shdr.sh_type = elf_word_to_cpu(shdr.sh_type);
sec->shdr.sh_flags = elf_xword_to_cpu(shdr.sh_flags);
@@ -418,7 +434,7 @@ static void read_shdrs(FILE *fp)
sec->shdr.sh_info = elf_word_to_cpu(shdr.sh_info);
sec->shdr.sh_addralign = elf_xword_to_cpu(shdr.sh_addralign);
sec->shdr.sh_entsize = elf_xword_to_cpu(shdr.sh_entsize);
- if (sec->shdr.sh_link < ehdr.e_shnum)
+ if (sec->shdr.sh_link < shnum)
sec->link = &secs[sec->shdr.sh_link];
}
@@ -427,7 +443,7 @@ static void read_shdrs(FILE *fp)
static void read_strtabs(FILE *fp)
{
int i;
- for (i = 0; i < ehdr.e_shnum; i++) {
+ for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
if (sec->shdr.sh_type != SHT_STRTAB) {
continue;
@@ -452,7 +468,7 @@ static void read_strtabs(FILE *fp)
static void read_symtabs(FILE *fp)
{
int i,j;
- for (i = 0; i < ehdr.e_shnum; i++) {
+ for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
if (sec->shdr.sh_type != SHT_SYMTAB) {
continue;
@@ -485,7 +501,7 @@ static void read_symtabs(FILE *fp)
static void read_relocs(FILE *fp)
{
int i,j;
- for (i = 0; i < ehdr.e_shnum; i++) {
+ for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
if (sec->shdr.sh_type != SHT_REL_TYPE) {
continue;
@@ -528,7 +544,7 @@ static void print_absolute_symbols(void)
printf("Absolute symbols\n");
printf(" Num: Value Size Type Bind Visibility Name\n");
- for (i = 0; i < ehdr.e_shnum; i++) {
+ for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
char *sym_strtab;
int j;
@@ -566,7 +582,7 @@ static void print_absolute_relocs(void)
else
format = "%08"PRIx32" %08"PRIx32" %10s %08"PRIx32" %s\n";
- for (i = 0; i < ehdr.e_shnum; i++) {
+ for (i = 0; i < shnum; i++) {
struct section *sec = &secs[i];
struct section *sec_applies, *sec_symtab;
char *sym_strtab;
@@ -650,7 +666,7 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
{
int i;
/* Walk through the relocations */
- for (i = 0; i < ehdr.e_shnum; i++) {
+ for (i = 0; i < shnum; i++) {
char *sym_strtab;
Elf_Sym *sh_symtab;
struct section *sec_applies, *sec_symtab;
@@ -706,7 +722,7 @@ static Elf_Addr per_cpu_load_addr;
static void percpu_init(void)
{
int i;
- for (i = 0; i < ehdr.e_shnum; i++) {
+ for (i = 0; i < shnum; i++) {
ElfW(Sym) *sym;
if (strcmp(sec_name(i), ".data..percpu"))
continue;
@@ -738,7 +754,7 @@ static void percpu_init(void)
* __per_cpu_load
*
* The "gold" linker incorrectly associates:
- * init_per_cpu__irq_stack_union
+ * init_per_cpu__fixed_percpu_data
* init_per_cpu__gdt_page
*/
static int is_percpu_sym(ElfW(Sym) *sym, const char *symname)
diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig
index a9e80e44178c..a8985e1f7432 100644
--- a/arch/x86/um/Kconfig
+++ b/arch/x86/um/Kconfig
@@ -32,12 +32,6 @@ config ARCH_DEFCONFIG
default "arch/um/configs/i386_defconfig" if X86_32
default "arch/um/configs/x86_64_defconfig" if X86_64
-config RWSEM_XCHGADD_ALGORITHM
- def_bool 64BIT
-
-config RWSEM_GENERIC_SPINLOCK
- def_bool !RWSEM_XCHGADD_ALGORITHM
-
config 3_LEVEL_PGTABLES
bool "Three-level pagetables" if !64BIT
default 64BIT
diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile
index 2d686ae54681..33c51c064c77 100644
--- a/arch/x86/um/Makefile
+++ b/arch/x86/um/Makefile
@@ -21,14 +21,12 @@ obj-y += checksum_32.o syscalls_32.o
obj-$(CONFIG_ELF_CORE) += elfcore.o
subarch-y = ../lib/string_32.o ../lib/atomic64_32.o ../lib/atomic64_cx8_32.o
-subarch-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += ../lib/rwsem.o
else
obj-y += syscalls_64.o vdso/
-subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../entry/thunk_64.o \
- ../lib/rwsem.o
+subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../entry/thunk_64.o
endif
diff --git a/arch/x86/um/asm/syscall.h b/arch/x86/um/asm/syscall.h
index ef898af102d1..56a2f0913e3c 100644
--- a/arch/x86/um/asm/syscall.h
+++ b/arch/x86/um/asm/syscall.h
@@ -9,7 +9,7 @@ typedef asmlinkage long (*sys_call_ptr_t)(unsigned long, unsigned long,
unsigned long, unsigned long,
unsigned long, unsigned long);
-static inline int syscall_get_arch(void)
+static inline int syscall_get_arch(struct task_struct *task)
{
#ifdef CONFIG_X86_32
return AUDIT_ARCH_I386;
diff --git a/arch/x86/um/vdso/Makefile b/arch/x86/um/vdso/Makefile
index bf94060fc06f..0caddd6acb22 100644
--- a/arch/x86/um/vdso/Makefile
+++ b/arch/x86/um/vdso/Makefile
@@ -62,7 +62,7 @@ quiet_cmd_vdso = VDSO $@
-Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+VDSO_LDFLAGS = -fPIC -shared -Wl,--hash-style=sysv
GCOV_PROFILE := n
#
diff --git a/arch/x86/xen/efi.c b/arch/x86/xen/efi.c
index 1fbb629a9d78..0d3365cb64de 100644
--- a/arch/x86/xen/efi.c
+++ b/arch/x86/xen/efi.c
@@ -158,7 +158,7 @@ static enum efi_secureboot_mode xen_efi_get_secureboot(void)
return efi_secureboot_mode_unknown;
}
-void __init xen_efi_init(void)
+void __init xen_efi_init(struct boot_params *boot_params)
{
efi_system_table_t *efi_systab_xen;
@@ -167,12 +167,12 @@ void __init xen_efi_init(void)
if (efi_systab_xen == NULL)
return;
- strncpy((char *)&boot_params.efi_info.efi_loader_signature, "Xen",
- sizeof(boot_params.efi_info.efi_loader_signature));
- boot_params.efi_info.efi_systab = (__u32)__pa(efi_systab_xen);
- boot_params.efi_info.efi_systab_hi = (__u32)(__pa(efi_systab_xen) >> 32);
+ strncpy((char *)&boot_params->efi_info.efi_loader_signature, "Xen",
+ sizeof(boot_params->efi_info.efi_loader_signature));
+ boot_params->efi_info.efi_systab = (__u32)__pa(efi_systab_xen);
+ boot_params->efi_info.efi_systab_hi = (__u32)(__pa(efi_systab_xen) >> 32);
- boot_params.secure_boot = xen_efi_get_secureboot();
+ boot_params->secure_boot = xen_efi_get_secureboot();
set_bit(EFI_BOOT, &efi.flags);
set_bit(EFI_PARAVIRT, &efi.flags);
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index c54a493e139a..4722ba2966ac 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -1403,7 +1403,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
/* We need this for printk timestamps */
xen_setup_runstate_info(0);
- xen_efi_init();
+ xen_efi_init(&boot_params);
/* Start the world */
#ifdef CONFIG_X86_32
diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
index 35b7599d2d0b..80a79db72fcf 100644
--- a/arch/x86/xen/enlighten_pvh.c
+++ b/arch/x86/xen/enlighten_pvh.c
@@ -13,6 +13,8 @@
#include <xen/interface/memory.h>
+#include "xen-ops.h"
+
/*
* PVH variables.
*
@@ -21,17 +23,20 @@
*/
bool xen_pvh __attribute__((section(".data"))) = 0;
-void __init xen_pvh_init(void)
+void __init xen_pvh_init(struct boot_params *boot_params)
{
u32 msr;
u64 pfn;
xen_pvh = 1;
+ xen_domain_type = XEN_HVM_DOMAIN;
xen_start_flags = pvh_start_info.flags;
msr = cpuid_ebx(xen_cpuid_base() + 2);
pfn = __pa(hypercall_page);
wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
+
+ xen_efi_init(boot_params);
}
void __init mem_map_via_hcall(struct boot_params *boot_params_p)
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index a21e1734fc1f..beb44e22afdf 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -2318,8 +2318,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
#elif defined(CONFIG_X86_VSYSCALL_EMULATION)
case VSYSCALL_PAGE:
#endif
- case FIX_TEXT_POKE0:
- case FIX_TEXT_POKE1:
/* All local page mappings */
pte = pfn_pte(phys, prot);
break;
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index 0766a08bdf45..07054572297f 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -105,7 +105,7 @@ void xen_mc_flush(void)
for (i = 0; i < b->mcidx; i++) {
if (b->entries[i].result < 0) {
#if MC_DEBUG
- pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\t%pF\n",
+ pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\t%pS\n",
i + 1,
b->debug[i].op,
b->debug[i].args[0],
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 145506f9fdbe..590fcf863006 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -361,7 +361,9 @@ static int xen_pv_cpu_up(unsigned int cpu, struct task_struct *idle)
{
int rc;
- common_cpu_up(cpu, idle);
+ rc = common_cpu_up(cpu, idle);
+ if (rc)
+ return rc;
xen_setup_runstate_info(cpu);
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 6e29794573b7..befbdd8b17f0 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -28,7 +28,7 @@
#include "xen-ops.h"
-/* Xen may fire a timer up to this many ns early */
+/* Minimum amount of time until next clock event fires */
#define TIMER_SLOP 100000
static u64 xen_sched_clock_offset __read_mostly;
@@ -212,7 +212,7 @@ static int xen_timerop_set_next_event(unsigned long delta,
return 0;
}
-static const struct clock_event_device xen_timerop_clockevent = {
+static struct clock_event_device xen_timerop_clockevent __ro_after_init = {
.name = "xen",
.features = CLOCK_EVT_FEAT_ONESHOT,
@@ -273,7 +273,7 @@ static int xen_vcpuop_set_next_event(unsigned long delta,
return ret;
}
-static const struct clock_event_device xen_vcpuop_clockevent = {
+static struct clock_event_device xen_vcpuop_clockevent __ro_after_init = {
.name = "xen",
.features = CLOCK_EVT_FEAT_ONESHOT,
@@ -570,3 +570,17 @@ void __init xen_hvm_init_time_ops(void)
x86_platform.set_wallclock = xen_set_wallclock;
}
#endif
+
+/* Kernel parameter to specify Xen timer slop */
+static int __init parse_xen_timer_slop(char *ptr)
+{
+ unsigned long slop = memparse(ptr, NULL);
+
+ xen_timerop_clockevent.min_delta_ns = slop;
+ xen_timerop_clockevent.min_delta_ticks = slop;
+ xen_vcpuop_clockevent.min_delta_ns = slop;
+ xen_vcpuop_clockevent.min_delta_ticks = slop;
+
+ return 0;
+}
+early_param("xen_timer_slop", parse_xen_timer_slop);
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 5077ead5e59c..c1d8b90aa4e2 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -40,13 +40,13 @@ ENTRY(startup_xen)
#ifdef CONFIG_X86_64
/* Set up %gs.
*
- * The base of %gs always points to the bottom of the irqstack
- * union. If the stack protector canary is enabled, it is
- * located at %gs:40. Note that, on SMP, the boot cpu uses
- * init data section till per cpu areas are set up.
+ * The base of %gs always points to fixed_percpu_data. If the
+ * stack protector canary is enabled, it is located at %gs:40.
+ * Note that, on SMP, the boot cpu uses init data section until
+ * the per cpu areas are set up.
*/
movl $MSR_GS_BASE,%ecx
- movq $INIT_PER_CPU_VAR(irq_stack_union),%rax
+ movq $INIT_PER_CPU_VAR(fixed_percpu_data),%rax
cdq
wrmsr
#endif
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 0e60bd918695..2f111f47ba98 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -122,9 +122,9 @@ static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
void __init xen_init_apic(void);
#ifdef CONFIG_XEN_EFI
-extern void xen_efi_init(void);
+extern void xen_efi_init(struct boot_params *boot_params);
#else
-static inline void __init xen_efi_init(void)
+static inline void __init xen_efi_init(struct boot_params *boot_params)
{
}
#endif
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 4b9aafe766c5..6ec1b75eabc5 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -46,9 +46,6 @@ config XTENSA
with reasonable minimum requirements. The Xtensa Linux project has
a home page at <http://www.linux-xtensa.org/>.
-config RWSEM_XCHGADD_ALGORITHM
- def_bool y
-
config GENERIC_HWEIGHT
def_bool y
@@ -256,12 +253,26 @@ config MEMMAP_CACHEATTR
region: bits 0..3 -- for addresses 0x00000000..0x1fffffff,
bits 4..7 -- for addresses 0x20000000..0x3fffffff, and so on.
- Cache attribute values are specific for the MMU type, so e.g.
- for region protection MMUs: 2 is cache bypass, 4 is WB cached,
- 1 is WT cached, f is illegal. For ful MMU: bit 0 makes it executable,
- bit 1 makes it writable, bits 2..3 meaning is 0: cache bypass,
- 1: WB cache, 2: WT cache, 3: special (c and e are illegal, f is
- reserved).
+ Cache attribute values are specific for the MMU type.
+ For region protection MMUs:
+ 1: WT cached,
+ 2: cache bypass,
+ 4: WB cached,
+ f: illegal.
+ For ful MMU:
+ bit 0: executable,
+ bit 1: writable,
+ bits 2..3:
+ 0: cache bypass,
+ 1: WB cache,
+ 2: WT cache,
+ 3: special (c and e are illegal, f is reserved).
+ For MPU:
+ 0: illegal,
+ 1: WB cache,
+ 2: WB, no-write-allocate cache,
+ 3: WT cache,
+ 4: cache bypass.
config KSEG_PADDR
hex "Physical address of the KSEG mapping"
diff --git a/arch/xtensa/boot/boot-redboot/bootstrap.S b/arch/xtensa/boot/boot-redboot/bootstrap.S
index bbf3b4b080cd..48ba5a232d94 100644
--- a/arch/xtensa/boot/boot-redboot/bootstrap.S
+++ b/arch/xtensa/boot/boot-redboot/bootstrap.S
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#include <variant/core.h>
+#include <asm/core.h>
#include <asm/regs.h>
#include <asm/asmmacro.h>
#include <asm/cacheasm.h>
diff --git a/arch/xtensa/boot/lib/Makefile b/arch/xtensa/boot/lib/Makefile
index 355127faade1..e3d717c7bfa1 100644
--- a/arch/xtensa/boot/lib/Makefile
+++ b/arch/xtensa/boot/lib/Makefile
@@ -7,7 +7,7 @@ zlib := inffast.c inflate.c inftrees.c
lib-y += $(zlib:.c=.o) zmem.o
-ccflags-y := -Ilib/zlib_inflate
+ccflags-y := -I $(srctree)/lib/zlib_inflate
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_inflate.o = -pg
CFLAGS_REMOVE_zmem.o = -pg
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 3843198e03d4..f1686d919178 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -20,14 +20,13 @@ generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
generic-y += param.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += qrwlock.h
generic-y += qspinlock.h
-generic-y += rwsem.h
generic-y += sections.h
-generic-y += socket.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += vga.h
diff --git a/arch/xtensa/include/asm/asmmacro.h b/arch/xtensa/include/asm/asmmacro.h
index 7f2ae5872151..8308a9c3abb2 100644
--- a/arch/xtensa/include/asm/asmmacro.h
+++ b/arch/xtensa/include/asm/asmmacro.h
@@ -11,7 +11,7 @@
#ifndef _XTENSA_ASMMACRO_H
#define _XTENSA_ASMMACRO_H
-#include <variant/core.h>
+#include <asm/core.h>
/*
* Some little helpers for loops. Use zero-overhead-loops
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index 7de0149e1cf7..7b00d26f472e 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -15,8 +15,6 @@
#include <linux/stringify.h>
#include <linux/types.h>
-
-#ifdef __KERNEL__
#include <asm/processor.h>
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
@@ -58,7 +56,67 @@
*/
#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
-#if XCHAL_HAVE_S32C1I
+#if XCHAL_HAVE_EXCLUSIVE
+#define ATOMIC_OP(op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32ex %1, %3\n" \
+ " " #op " %0, %1, %2\n" \
+ " s32ex %0, %3\n" \
+ " getex %0\n" \
+ " beqz %0, 1b\n" \
+ : "=&a" (result), "=&a" (tmp) \
+ : "a" (i), "a" (v) \
+ : "memory" \
+ ); \
+} \
+
+#define ATOMIC_OP_RETURN(op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32ex %1, %3\n" \
+ " " #op " %0, %1, %2\n" \
+ " s32ex %0, %3\n" \
+ " getex %0\n" \
+ " beqz %0, 1b\n" \
+ " " #op " %0, %1, %2\n" \
+ : "=&a" (result), "=&a" (tmp) \
+ : "a" (i), "a" (v) \
+ : "memory" \
+ ); \
+ \
+ return result; \
+}
+
+#define ATOMIC_FETCH_OP(op) \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ __asm__ __volatile__( \
+ "1: l32ex %1, %3\n" \
+ " " #op " %0, %1, %2\n" \
+ " s32ex %0, %3\n" \
+ " getex %0\n" \
+ " beqz %0, 1b\n" \
+ : "=&a" (result), "=&a" (tmp) \
+ : "a" (i), "a" (v) \
+ : "memory" \
+ ); \
+ \
+ return tmp; \
+}
+
+#elif XCHAL_HAVE_S32C1I
#define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t * v) \
{ \
@@ -200,6 +258,4 @@ ATOMIC_OPS(xor)
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-#endif /* __KERNEL__ */
-
#endif /* _XTENSA_ATOMIC_H */
diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h
index 956596e4d437..d6f8d4ddc2bc 100644
--- a/arch/xtensa/include/asm/barrier.h
+++ b/arch/xtensa/include/asm/barrier.h
@@ -9,12 +9,16 @@
#ifndef _XTENSA_SYSTEM_H
#define _XTENSA_SYSTEM_H
+#include <asm/core.h>
+
#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
#define rmb() barrier()
#define wmb() mb()
+#if XCHAL_HAVE_S32C1I
#define __smp_mb__before_atomic() barrier()
#define __smp_mb__after_atomic() barrier()
+#endif
#include <asm-generic/barrier.h>
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
index d3490189792b..aeb15f4c755b 100644
--- a/arch/xtensa/include/asm/bitops.h
+++ b/arch/xtensa/include/asm/bitops.h
@@ -13,8 +13,6 @@
#ifndef _XTENSA_BITOPS_H
#define _XTENSA_BITOPS_H
-#ifdef __KERNEL__
-
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
@@ -98,7 +96,126 @@ static inline unsigned long __fls(unsigned long word)
#include <asm-generic/bitops/fls64.h>
-#if XCHAL_HAVE_S32C1I
+#if XCHAL_HAVE_EXCLUSIVE
+
+static inline void set_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32ex %0, %2\n"
+ " or %0, %0, %1\n"
+ " s32ex %0, %2\n"
+ " getex %0\n"
+ " beqz %0, 1b\n"
+ : "=&a" (tmp)
+ : "a" (mask), "a" (p)
+ : "memory");
+}
+
+static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32ex %0, %2\n"
+ " and %0, %0, %1\n"
+ " s32ex %0, %2\n"
+ " getex %0\n"
+ " beqz %0, 1b\n"
+ : "=&a" (tmp)
+ : "a" (~mask), "a" (p)
+ : "memory");
+}
+
+static inline void change_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32ex %0, %2\n"
+ " xor %0, %0, %1\n"
+ " s32ex %0, %2\n"
+ " getex %0\n"
+ " beqz %0, 1b\n"
+ : "=&a" (tmp)
+ : "a" (~mask), "a" (p)
+ : "memory");
+}
+
+static inline int
+test_and_set_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32ex %1, %3\n"
+ " or %0, %1, %2\n"
+ " s32ex %0, %3\n"
+ " getex %0\n"
+ " beqz %0, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (mask), "a" (p)
+ : "memory");
+
+ return value & mask;
+}
+
+static inline int
+test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32ex %1, %3\n"
+ " and %0, %1, %2\n"
+ " s32ex %0, %3\n"
+ " getex %0\n"
+ " beqz %0, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (~mask), "a" (p)
+ : "memory");
+
+ return value & mask;
+}
+
+static inline int
+test_and_change_bit(unsigned int bit, volatile unsigned long *p)
+{
+ unsigned long tmp, value;
+ unsigned long mask = 1UL << (bit & 31);
+
+ p += bit >> 5;
+
+ __asm__ __volatile__(
+ "1: l32ex %1, %3\n"
+ " xor %0, %1, %2\n"
+ " s32ex %0, %3\n"
+ " getex %0\n"
+ " beqz %0, 1b\n"
+ : "=&a" (tmp), "=&a" (value)
+ : "a" (mask), "a" (p)
+ : "memory");
+
+ return value & mask;
+}
+
+#elif XCHAL_HAVE_S32C1I
static inline void set_bit(unsigned int bit, volatile unsigned long *p)
{
@@ -232,6 +349,4 @@ test_and_change_bit(unsigned int bit, volatile unsigned long *p)
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/sched.h>
-#endif /* __KERNEL__ */
-
#endif /* _XTENSA_BITOPS_H */
diff --git a/arch/xtensa/include/asm/cache.h b/arch/xtensa/include/asm/cache.h
index d2fd932fdb4d..b21fd133ff62 100644
--- a/arch/xtensa/include/asm/cache.h
+++ b/arch/xtensa/include/asm/cache.h
@@ -11,7 +11,7 @@
#ifndef _XTENSA_CACHE_H
#define _XTENSA_CACHE_H
-#include <variant/core.h>
+#include <asm/core.h>
#define L1_CACHE_SHIFT XCHAL_DCACHE_LINEWIDTH
#define L1_CACHE_BYTES XCHAL_DCACHE_LINESIZE
diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h
index f302ef57973a..8b687176ad72 100644
--- a/arch/xtensa/include/asm/checksum.h
+++ b/arch/xtensa/include/asm/checksum.h
@@ -13,7 +13,7 @@
#include <linux/in6.h>
#include <linux/uaccess.h>
-#include <variant/core.h>
+#include <asm/core.h>
/*
* computes the checksum of a memory block at buff, length len,
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
index 22a10c715c1f..7ccc5cbf441b 100644
--- a/arch/xtensa/include/asm/cmpxchg.h
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -23,7 +23,24 @@
static inline unsigned long
__cmpxchg_u32(volatile int *p, int old, int new)
{
-#if XCHAL_HAVE_S32C1I
+#if XCHAL_HAVE_EXCLUSIVE
+ unsigned long tmp, result;
+
+ __asm__ __volatile__(
+ "1: l32ex %0, %3\n"
+ " bne %0, %4, 2f\n"
+ " mov %1, %2\n"
+ " s32ex %1, %3\n"
+ " getex %1\n"
+ " beqz %1, 1b\n"
+ "2:\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (new), "a" (p), "a" (old)
+ : "memory"
+ );
+
+ return result;
+#elif XCHAL_HAVE_S32C1I
__asm__ __volatile__(
" wsr %2, scompare1\n"
" s32c1i %0, %1, 0\n"
@@ -108,7 +125,22 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
{
-#if XCHAL_HAVE_S32C1I
+#if XCHAL_HAVE_EXCLUSIVE
+ unsigned long tmp, result;
+
+ __asm__ __volatile__(
+ "1: l32ex %0, %3\n"
+ " mov %1, %2\n"
+ " s32ex %1, %3\n"
+ " getex %1\n"
+ " beqz %1, 1b\n"
+ : "=&a" (result), "=&a" (tmp)
+ : "a" (val), "a" (m)
+ : "memory"
+ );
+
+ return result;
+#elif XCHAL_HAVE_S32C1I
unsigned long tmp, result;
__asm__ __volatile__(
"1: l32i %1, %2, 0\n"
diff --git a/arch/xtensa/include/asm/coprocessor.h b/arch/xtensa/include/asm/coprocessor.h
index 6712929a27c9..0fbe2a740b8d 100644
--- a/arch/xtensa/include/asm/coprocessor.h
+++ b/arch/xtensa/include/asm/coprocessor.h
@@ -12,8 +12,8 @@
#ifndef _XTENSA_COPROCESSOR_H
#define _XTENSA_COPROCESSOR_H
-#include <variant/core.h>
#include <variant/tie.h>
+#include <asm/core.h>
#include <asm/types.h>
#ifdef __ASSEMBLY__
diff --git a/arch/xtensa/include/asm/core.h b/arch/xtensa/include/asm/core.h
new file mode 100644
index 000000000000..5b4acb7d1c07
--- /dev/null
+++ b/arch/xtensa/include/asm/core.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019 Cadence Design Systems Inc. */
+
+#ifndef _ASM_XTENSA_CORE_H
+#define _ASM_XTENSA_CORE_H
+
+#include <variant/core.h>
+
+#ifndef XCHAL_HAVE_EXCLUSIVE
+#define XCHAL_HAVE_EXCLUSIVE 0
+#endif
+
+#ifndef XCHAL_HAVE_MPU
+#define XCHAL_HAVE_MPU 0
+#endif
+
+#ifndef XCHAL_SPANNING_WAY
+#define XCHAL_SPANNING_WAY 0
+#endif
+
+#endif
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
index 505d09eff184..9538b0f7953c 100644
--- a/arch/xtensa/include/asm/futex.h
+++ b/arch/xtensa/include/asm/futex.h
@@ -15,65 +15,88 @@
#ifndef _ASM_XTENSA_FUTEX_H
#define _ASM_XTENSA_FUTEX_H
-#ifdef __KERNEL__
-
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <linux/errno.h>
-#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+#if XCHAL_HAVE_EXCLUSIVE
+#define __futex_atomic_op(insn, ret, old, uaddr, arg) \
+ __asm__ __volatile( \
+ "1: l32ex %[oldval], %[addr]\n" \
+ insn "\n" \
+ "2: s32ex %[newval], %[addr]\n" \
+ " getex %[newval]\n" \
+ " beqz %[newval], 1b\n" \
+ " movi %[newval], 0\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 4\n" \
+ " .literal_position\n" \
+ "5: movi %[oldval], 3b\n" \
+ " movi %[newval], %[fault]\n" \
+ " jx %[oldval]\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .long 1b, 5b, 2b, 5b\n" \
+ " .previous\n" \
+ : [oldval] "=&r" (old), [newval] "=&r" (ret) \
+ : [addr] "r" (uaddr), [oparg] "r" (arg), \
+ [fault] "I" (-EFAULT) \
+ : "memory")
+#elif XCHAL_HAVE_S32C1I
+#define __futex_atomic_op(insn, ret, old, uaddr, arg) \
__asm__ __volatile( \
- "1: l32i %0, %2, 0\n" \
+ "1: l32i %[oldval], %[addr], 0\n" \
insn "\n" \
- " wsr %0, scompare1\n" \
- "2: s32c1i %1, %2, 0\n" \
- " bne %1, %0, 1b\n" \
- " movi %1, 0\n" \
+ " wsr %[oldval], scompare1\n" \
+ "2: s32c1i %[newval], %[addr], 0\n" \
+ " bne %[newval], %[oldval], 1b\n" \
+ " movi %[newval], 0\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .align 4\n" \
" .literal_position\n" \
- "5: movi %0, 3b\n" \
- " movi %1, %3\n" \
- " jx %0\n" \
+ "5: movi %[oldval], 3b\n" \
+ " movi %[newval], %[fault]\n" \
+ " jx %[oldval]\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
- " .long 1b,5b,2b,5b\n" \
+ " .long 1b, 5b, 2b, 5b\n" \
" .previous\n" \
- : "=&r" (oldval), "=&r" (ret) \
- : "r" (uaddr), "I" (-EFAULT), "r" (oparg) \
+ : [oldval] "=&r" (old), [newval] "=&r" (ret) \
+ : [addr] "r" (uaddr), [oparg] "r" (arg), \
+ [fault] "I" (-EFAULT) \
: "memory")
+#endif
static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
u32 __user *uaddr)
{
+#if XCHAL_HAVE_S32C1I || XCHAL_HAVE_EXCLUSIVE
int oldval = 0, ret;
-#if !XCHAL_HAVE_S32C1I
- return -ENOSYS;
-#endif
-
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
- __futex_atomic_op("mov %1, %4", ret, oldval, uaddr, oparg);
+ __futex_atomic_op("mov %[newval], %[oparg]",
+ ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ADD:
- __futex_atomic_op("add %1, %0, %4", ret, oldval, uaddr,
- oparg);
+ __futex_atomic_op("add %[newval], %[oldval], %[oparg]",
+ ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_OR:
- __futex_atomic_op("or %1, %0, %4", ret, oldval, uaddr,
- oparg);
+ __futex_atomic_op("or %[newval], %[oldval], %[oparg]",
+ ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ANDN:
- __futex_atomic_op("and %1, %0, %4", ret, oldval, uaddr,
- ~oparg);
+ __futex_atomic_op("and %[newval], %[oldval], %[oparg]",
+ ret, oldval, uaddr, ~oparg);
break;
case FUTEX_OP_XOR:
- __futex_atomic_op("xor %1, %0, %4", ret, oldval, uaddr,
- oparg);
+ __futex_atomic_op("xor %[newval], %[oldval], %[oparg]",
+ ret, oldval, uaddr, oparg);
break;
default:
ret = -ENOSYS;
@@ -85,43 +108,60 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
*oval = oldval;
return ret;
+#else
+ return -ENOSYS;
+#endif
}
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
+#if XCHAL_HAVE_S32C1I || XCHAL_HAVE_EXCLUSIVE
+ unsigned long tmp;
int ret = 0;
if (!access_ok(uaddr, sizeof(u32)))
return -EFAULT;
-#if !XCHAL_HAVE_S32C1I
- return -ENOSYS;
-#endif
-
__asm__ __volatile__ (
" # futex_atomic_cmpxchg_inatomic\n"
- " wsr %5, scompare1\n"
- "1: s32c1i %1, %4, 0\n"
- " s32i %1, %6, 0\n"
+#if XCHAL_HAVE_EXCLUSIVE
+ "1: l32ex %[tmp], %[addr]\n"
+ " s32i %[tmp], %[uval], 0\n"
+ " bne %[tmp], %[oldval], 2f\n"
+ " mov %[tmp], %[newval]\n"
+ "3: s32ex %[tmp], %[addr]\n"
+ " getex %[tmp]\n"
+ " beqz %[tmp], 1b\n"
+#elif XCHAL_HAVE_S32C1I
+ " wsr %[oldval], scompare1\n"
+ "1: s32c1i %[newval], %[addr], 0\n"
+ " s32i %[newval], %[uval], 0\n"
+#endif
"2:\n"
" .section .fixup,\"ax\"\n"
" .align 4\n"
" .literal_position\n"
- "4: movi %1, 2b\n"
- " movi %0, %7\n"
- " jx %1\n"
+ "4: movi %[tmp], 2b\n"
+ " movi %[ret], %[fault]\n"
+ " jx %[tmp]\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
- " .long 1b,4b\n"
+ " .long 1b, 4b\n"
+#if XCHAL_HAVE_EXCLUSIVE
+ " .long 3b, 4b\n"
+#endif
" .previous\n"
- : "+r" (ret), "+r" (newval), "+m" (*uaddr), "+m" (*uval)
- : "r" (uaddr), "r" (oldval), "r" (uval), "I" (-EFAULT)
+ : [ret] "+r" (ret), [newval] "+r" (newval), [tmp] "=&r" (tmp)
+ : [addr] "r" (uaddr), [oldval] "r" (oldval), [uval] "r" (uval),
+ [fault] "I" (-EFAULT)
: "memory");
return ret;
+#else
+ return -ENOSYS;
+#endif
}
-#endif /* __KERNEL__ */
#endif /* _ASM_XTENSA_FUTEX_H */
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
index 10e9852b2fb4..323d05789159 100644
--- a/arch/xtensa/include/asm/initialize_mmu.h
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -33,10 +33,6 @@
#define CA_WRITEBACK (0x4)
#endif
-#ifndef XCHAL_SPANNING_WAY
-#define XCHAL_SPANNING_WAY 0
-#endif
-
#ifdef __ASSEMBLY__
#define XTENSA_HWVERSION_RC_2009_0 230000
@@ -181,11 +177,42 @@
.macro initialize_cacheattr
-#if !defined(CONFIG_MMU) && XCHAL_HAVE_TLBS
+#if !defined(CONFIG_MMU) && (XCHAL_HAVE_TLBS || XCHAL_HAVE_MPU)
#if CONFIG_MEMMAP_CACHEATTR == 0x22222222 && XCHAL_HAVE_PTP_MMU
#error Default MEMMAP_CACHEATTR of 0x22222222 does not work with full MMU.
#endif
+#if XCHAL_HAVE_MPU
+ .data
+ .align 4
+.Lattribute_table:
+ .long 0x000000, 0x1fff00, 0x1ddf00, 0x1eef00
+ .long 0x006600, 0x000000, 0x000000, 0x000000
+ .long 0x000000, 0x000000, 0x000000, 0x000000
+ .long 0x000000, 0x000000, 0x000000, 0x000000
+ .previous
+
+ movi a3, .Lattribute_table
+ movi a4, CONFIG_MEMMAP_CACHEATTR
+ movi a5, 1
+ movi a6, XCHAL_MPU_ENTRIES
+ movi a10, 0x20000000
+ movi a11, -1
+1:
+ sub a5, a5, a10
+ extui a8, a4, 28, 4
+ beq a8, a11, 2f
+ addi a6, a6, -1
+ mov a11, a8
+2:
+ addx4 a9, a8, a3
+ l32i a9, a9, 0
+ or a9, a9, a6
+ wptlb a9, a5
+ slli a4, a4, 4
+ bgeu a5, a10, 1b
+
+#else
movi a5, XCHAL_SPANNING_WAY
movi a6, ~_PAGE_ATTRIB_MASK
movi a4, CONFIG_MEMMAP_CACHEATTR
@@ -208,6 +235,7 @@
isync
#endif
+#endif
.endm
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h
index acc5bb2cf1c7..da3e783f896b 100644
--- a/arch/xtensa/include/asm/io.h
+++ b/arch/xtensa/include/asm/io.h
@@ -11,7 +11,6 @@
#ifndef _XTENSA_IO_H
#define _XTENSA_IO_H
-#ifdef __KERNEL__
#include <asm/byteorder.h>
#include <asm/page.h>
#include <asm/vectors.h>
@@ -78,8 +77,6 @@ static inline void iounmap(volatile void __iomem *addr)
#endif /* CONFIG_MMU */
-#endif /* __KERNEL__ */
-
#include <asm-generic/io.h>
#endif /* _XTENSA_IO_H */
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
index 6c6ed23e0c79..0f71a51dab25 100644
--- a/arch/xtensa/include/asm/irq.h
+++ b/arch/xtensa/include/asm/irq.h
@@ -12,7 +12,7 @@
#define _XTENSA_IRQ_H
#include <linux/init.h>
-#include <variant/core.h>
+#include <asm/core.h>
#ifdef CONFIG_PLATFORM_NR_IRQS
# define PLATFORM_NR_IRQS CONFIG_PLATFORM_NR_IRQS
diff --git a/arch/xtensa/include/asm/irqflags.h b/arch/xtensa/include/asm/irqflags.h
index 9b5e8526afe5..12890681587b 100644
--- a/arch/xtensa/include/asm/irqflags.h
+++ b/arch/xtensa/include/asm/irqflags.h
@@ -27,7 +27,7 @@ static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags;
#if XTENSA_FAKE_NMI
-#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
+#if defined(CONFIG_DEBUG_MISC) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
unsigned long tmp;
asm volatile("rsr %0, ps\t\n"
diff --git a/arch/xtensa/include/asm/pci-bridge.h b/arch/xtensa/include/asm/pci-bridge.h
index 0b68c76ec1e6..405526912d9a 100644
--- a/arch/xtensa/include/asm/pci-bridge.h
+++ b/arch/xtensa/include/asm/pci-bridge.h
@@ -11,8 +11,6 @@
#ifndef _XTENSA_PCI_BRIDGE_H
#define _XTENSA_PCI_BRIDGE_H
-#ifdef __KERNEL__
-
struct device_node;
struct pci_controller;
@@ -84,5 +82,4 @@ int early_write_config_byte(struct pci_controller*, int, int, int, u8);
int early_write_config_word(struct pci_controller*, int, int, int, u16);
int early_write_config_dword(struct pci_controller*, int, int, int, u32);
-#endif /* __KERNEL__ */
#endif /* _XTENSA_PCI_BRIDGE_H */
diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h
index 883024054b05..8e2b48a268db 100644
--- a/arch/xtensa/include/asm/pci.h
+++ b/arch/xtensa/include/asm/pci.h
@@ -11,8 +11,6 @@
#ifndef _XTENSA_PCI_H
#define _XTENSA_PCI_H
-#ifdef __KERNEL__
-
/* Can be used to override the logic in pci_scan_bus for skipping
* already-configured bus numbers - to be used for buggy BIOSes
* or architectures with incomplete PCI setup by the loader
@@ -45,8 +43,6 @@
#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1
#define arch_can_pci_mmap_io() 1
-#endif /* __KERNEL__ */
-
/* Generic PCI */
#include <asm-generic/pci.h>
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
index b3b388ff2f01..368284c972e7 100644
--- a/arch/xtensa/include/asm/pgalloc.h
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -11,8 +11,6 @@
#ifndef _XTENSA_PGALLOC_H
#define _XTENSA_PGALLOC_H
-#ifdef __KERNEL__
-
#include <linux/highmem.h>
#include <linux/slab.h>
@@ -79,5 +77,4 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
}
#define pmd_pgtable(pmd) pmd_page(pmd)
-#endif /* __KERNEL__ */
#endif /* _XTENSA_PGALLOC_H */
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index 0c14018d1c26..19f6b54e358b 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -10,7 +10,7 @@
#ifndef _XTENSA_PROCESSOR_H
#define _XTENSA_PROCESSOR_H
-#include <variant/core.h>
+#include <asm/core.h>
#include <linux/compiler.h>
#include <linux/stringify.h>
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
index 62a58d2567e9..b109416dc07e 100644
--- a/arch/xtensa/include/asm/ptrace.h
+++ b/arch/xtensa/include/asm/ptrace.h
@@ -80,7 +80,7 @@ struct pt_regs {
unsigned long areg[16];
};
-#include <variant/core.h>
+#include <asm/core.h>
# define arch_has_single_step() (1)
# define task_pt_regs(tsk) ((struct pt_regs*) \
diff --git a/arch/xtensa/include/asm/segment.h b/arch/xtensa/include/asm/segment.h
deleted file mode 100644
index 98964ad15ca2..000000000000
--- a/arch/xtensa/include/asm/segment.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * include/asm-xtensa/segment.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_SEGMENT_H
-#define _XTENSA_SEGMENT_H
-
-#include <linux/uaccess.h>
-
-#endif /* _XTENSA_SEGEMENT_H */
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h
index 91dc06d58060..359ab40e935a 100644
--- a/arch/xtensa/include/asm/syscall.h
+++ b/arch/xtensa/include/asm/syscall.h
@@ -14,7 +14,7 @@
#include <asm/ptrace.h>
#include <uapi/linux/audit.h>
-static inline int syscall_get_arch(void)
+static inline int syscall_get_arch(struct task_struct *task)
{
return AUDIT_ARCH_XTENSA;
}
diff --git a/arch/xtensa/include/asm/tlb.h b/arch/xtensa/include/asm/tlb.h
index 0d766f9c1083..50889935138a 100644
--- a/arch/xtensa/include/asm/tlb.h
+++ b/arch/xtensa/include/asm/tlb.h
@@ -14,32 +14,6 @@
#include <asm/cache.h>
#include <asm/page.h>
-#if (DCACHE_WAY_SIZE <= PAGE_SIZE)
-
-/* Note, read http://lkml.org/lkml/2004/1/15/6 */
-
-# define tlb_start_vma(tlb,vma) do { } while (0)
-# define tlb_end_vma(tlb,vma) do { } while (0)
-
-#else
-
-# define tlb_start_vma(tlb, vma) \
- do { \
- if (!tlb->fullmm) \
- flush_cache_range(vma, vma->vm_start, vma->vm_end); \
- } while(0)
-
-# define tlb_end_vma(tlb, vma) \
- do { \
- if (!tlb->fullmm) \
- flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
- } while(0)
-
-#endif
-
-#define __tlb_remove_tlb_entry(tlb,pte,addr) do { } while (0)
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
#include <asm-generic/tlb.h>
#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
index 7111280c8842..79fe3007919e 100644
--- a/arch/xtensa/include/asm/vectors.h
+++ b/arch/xtensa/include/asm/vectors.h
@@ -18,7 +18,7 @@
#ifndef _XTENSA_VECTORS_H
#define _XTENSA_VECTORS_H
-#include <variant/core.h>
+#include <asm/core.h>
#include <asm/kmem_layout.h>
#if XCHAL_HAVE_PTP_MMU
diff --git a/arch/xtensa/include/uapi/asm/sockios.h b/arch/xtensa/include/uapi/asm/sockios.h
index fb8ac3607189..1a1f58f4b75a 100644
--- a/arch/xtensa/include/uapi/asm/sockios.h
+++ b/arch/xtensa/include/uapi/asm/sockios.h
@@ -26,7 +26,7 @@
#define SIOCSPGRP _IOW('s', 8, pid_t)
#define SIOCGPGRP _IOR('s', 9, pid_t)
-#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
-#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
+#define SIOCGSTAMP_OLD 0x8906 /* Get stamp (timeval) */
+#define SIOCGSTAMPNS_OLD 0x8907 /* Get stamp (timespec) */
#endif /* _XTENSA_SOCKIOS_H */
diff --git a/arch/xtensa/kernel/hw_breakpoint.c b/arch/xtensa/kernel/hw_breakpoint.c
index 4f20416061fb..285fb2942b06 100644
--- a/arch/xtensa/kernel/hw_breakpoint.c
+++ b/arch/xtensa/kernel/hw_breakpoint.c
@@ -12,7 +12,7 @@
#include <linux/log2.h>
#include <linux/percpu.h>
#include <linux/perf_event.h>
-#include <variant/core.h>
+#include <asm/core.h>
/* Breakpoint currently in use for each IBREAKA. */
static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[XCHAL_NUM_IBREAK]);
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 4ec6fbb696bf..c0ec24349421 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -651,6 +651,9 @@ c_show(struct seq_file *f, void *slot)
#if XCHAL_HAVE_S32C1I
"s32c1i "
#endif
+#if XCHAL_HAVE_EXCLUSIVE
+ "exclusive "
+#endif
"\n");
/* Registers. */
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
index 3699d6d3e479..83b244ce61ee 100644
--- a/arch/xtensa/kernel/smp.c
+++ b/arch/xtensa/kernel/smp.c
@@ -126,7 +126,7 @@ void secondary_start_kernel(void)
init_mmu();
-#ifdef CONFIG_DEBUG_KERNEL
+#ifdef CONFIG_DEBUG_MISC
if (boot_secondary_processors == 0) {
pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n",
__func__, boot_secondary_processors, cpu);
diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl
index 30084eaf8422..5fa0ee1c8e00 100644
--- a/arch/xtensa/kernel/syscalls/syscall.tbl
+++ b/arch/xtensa/kernel/syscalls/syscall.tbl
@@ -398,3 +398,9 @@
425 common io_uring_setup sys_io_uring_setup
426 common io_uring_enter sys_io_uring_enter
427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index b80a430453b1..943f10639a93 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -18,8 +18,8 @@
#include <asm/page.h>
#include <asm/thread_info.h>
+#include <asm/core.h>
#include <asm/vectors.h>
-#include <variant/core.h>
OUTPUT_ARCH(xtensa)
ENTRY(_start)
diff --git a/arch/xtensa/lib/checksum.S b/arch/xtensa/lib/checksum.S
index 528fe0dd9339..d82c20c1fb7a 100644
--- a/arch/xtensa/lib/checksum.S
+++ b/arch/xtensa/lib/checksum.S
@@ -16,8 +16,8 @@
#include <linux/errno.h>
#include <linux/linkage.h>
-#include <variant/core.h>
#include <asm/asmmacro.h>
+#include <asm/core.h>
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
diff --git a/arch/xtensa/lib/memcopy.S b/arch/xtensa/lib/memcopy.S
index c0f6981719d6..efecfd7ed8cc 100644
--- a/arch/xtensa/lib/memcopy.S
+++ b/arch/xtensa/lib/memcopy.S
@@ -10,8 +10,8 @@
*/
#include <linux/linkage.h>
-#include <variant/core.h>
#include <asm/asmmacro.h>
+#include <asm/core.h>
/*
* void *memcpy(void *dst, const void *src, size_t len);
diff --git a/arch/xtensa/lib/memset.S b/arch/xtensa/lib/memset.S
index 276747dec300..8632eacbdc80 100644
--- a/arch/xtensa/lib/memset.S
+++ b/arch/xtensa/lib/memset.S
@@ -12,8 +12,8 @@
*/
#include <linux/linkage.h>
-#include <variant/core.h>
#include <asm/asmmacro.h>
+#include <asm/core.h>
/*
* void *memset(void *dst, int c, size_t length)
diff --git a/arch/xtensa/lib/strncpy_user.S b/arch/xtensa/lib/strncpy_user.S
index 5fce16b67dca..c4c6c8578d59 100644
--- a/arch/xtensa/lib/strncpy_user.S
+++ b/arch/xtensa/lib/strncpy_user.S
@@ -13,8 +13,8 @@
#include <linux/errno.h>
#include <linux/linkage.h>
-#include <variant/core.h>
#include <asm/asmmacro.h>
+#include <asm/core.h>
/*
* char *__strncpy_user(char *dst, const char *src, size_t len)
diff --git a/arch/xtensa/lib/strnlen_user.S b/arch/xtensa/lib/strnlen_user.S
index 0b956ce7f386..1f2ca2bb2ab3 100644
--- a/arch/xtensa/lib/strnlen_user.S
+++ b/arch/xtensa/lib/strnlen_user.S
@@ -12,8 +12,8 @@
*/
#include <linux/linkage.h>
-#include <variant/core.h>
#include <asm/asmmacro.h>
+#include <asm/core.h>
/*
* size_t __strnlen_user(const char *s, size_t len)
diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S
index 64ab1971324f..228607e30bc2 100644
--- a/arch/xtensa/lib/usercopy.S
+++ b/arch/xtensa/lib/usercopy.S
@@ -54,8 +54,8 @@
*/
#include <linux/linkage.h>
-#include <variant/core.h>
#include <asm/asmmacro.h>
+#include <asm/core.h>
.text
ENTRY(__xtensa_copy_user)
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index d49861099684..b51746f2b80b 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -216,11 +216,6 @@ void free_initrd_mem(unsigned long start, unsigned long end)
}
#endif
-void free_initmem(void)
-{
- free_initmem_default(-1);
-}
-
static void __init parse_memmap_one(char *p)
{
char *oldp;
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index 026211e7ab09..f9cd45860bee 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -297,8 +297,7 @@ out_alloc_disk:
blk_cleanup_queue(dev->queue);
dev->queue = NULL;
out_alloc_queue:
- simc_close(dev->fd);
- return -EIO;
+ return -ENOMEM;
}
static int __init simdisk_init(void)
diff --git a/arch/xtensa/platforms/xt2000/include/platform/hardware.h b/arch/xtensa/platforms/xt2000/include/platform/hardware.h
index 8e5e0d6a81ec..9f213f573330 100644
--- a/arch/xtensa/platforms/xt2000/include/platform/hardware.h
+++ b/arch/xtensa/platforms/xt2000/include/platform/hardware.h
@@ -15,7 +15,7 @@
#ifndef _XTENSA_XT2000_HARDWARE_H
#define _XTENSA_XT2000_HARDWARE_H
-#include <variant/core.h>
+#include <asm/core.h>
/*
* On-board components.
diff --git a/arch/xtensa/platforms/xt2000/include/platform/serial.h b/arch/xtensa/platforms/xt2000/include/platform/serial.h
index 7226cf732b47..cde804827626 100644
--- a/arch/xtensa/platforms/xt2000/include/platform/serial.h
+++ b/arch/xtensa/platforms/xt2000/include/platform/serial.h
@@ -11,7 +11,7 @@
#ifndef _XTENSA_XT2000_SERIAL_H
#define _XTENSA_XT2000_SERIAL_H
-#include <variant/core.h>
+#include <asm/core.h>
#include <asm/io.h>
/* National-Semi PC16552D DUART: */
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 820e8738af11..b1506376d502 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -18,6 +18,7 @@
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/errno.h>
#include <linux/reboot.h>
#include <linux/kdev_t.h>
diff --git a/block/Kconfig b/block/Kconfig
index 028bc085dac8..1b220101a9cb 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -26,30 +26,6 @@ menuconfig BLOCK
if BLOCK
-config LBDAF
- bool "Support for large (2TB+) block devices and files"
- depends on !64BIT
- default y
- help
- Enable block devices or files of size 2TB and larger.
-
- This option is required to support the full capacity of large
- (2TB+) block devices, including RAID, disk, Network Block Device,
- Logical Volume Manager (LVM) and loopback.
-
- This option also enables support for single files larger than
- 2TB.
-
- The ext4 filesystem requires that this feature be enabled in
- order to support filesystems that have the huge_file feature
- enabled. Otherwise, it will refuse to mount in the read-write
- mode any filesystems that use the huge_file feature, which is
- enabled by default by mke2fs.ext4.
-
- The GFS2 filesystem also requires this feature.
-
- If unsure, say Y.
-
config BLK_SCSI_REQUEST
bool
diff --git a/block/badblocks.c b/block/badblocks.c
index 91f7bcf979d3..2e5f5697db35 100644
--- a/block/badblocks.c
+++ b/block/badblocks.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Bad block management
*
* - Heavily based on MD badblocks code from Neil Brown
*
* Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/badblocks.h>
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index c6113af31960..b3796a40a61a 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* cgroups support for the BFQ I/O scheduler.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include <linux/module.h>
#include <linux/slab.h>
@@ -578,7 +569,8 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfqg_and_blkg_get(bfqg);
if (bfq_bfqq_busy(bfqq)) {
- bfq_pos_tree_add_move(bfqd, bfqq);
+ if (unlikely(!bfqd->nonrot_with_queueing))
+ bfq_pos_tree_add_move(bfqd, bfqq);
bfq_activate_bfqq(bfqd, bfqq);
}
@@ -1102,7 +1094,7 @@ struct cftype bfq_blkcg_legacy_files[] = {
},
#endif /* CONFIG_DEBUG_BLK_CGROUP */
- /* the same statictics which cover the bfqg and its descendants */
+ /* the same statistics which cover the bfqg and its descendants */
{
.name = "bfq.io_service_bytes_recursive",
.private = (unsigned long)&blkcg_policy_bfq,
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 5ba1e0d841b4..f8d430f88d25 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Budget Fair Queueing (BFQ) I/O scheduler.
*
@@ -12,16 +13,6 @@
*
* Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
* BFQ is a proportional-share I/O scheduler, with some extra
* low-latency capabilities. BFQ also supports full hierarchical
* scheduling through cgroups. Next paragraphs provide an introduction
@@ -189,7 +180,7 @@ static const int bfq_default_max_budget = 16 * 1024;
/*
* When a sync request is dispatched, the queue that contains that
* request, and all the ancestor entities of that queue, are charged
- * with the number of sectors of the request. In constrast, if the
+ * with the number of sectors of the request. In contrast, if the
* request is async, then the queue and its ancestor entities are
* charged with the number of sectors of the request, multiplied by
* the factor below. This throttles the bandwidth for async I/O,
@@ -217,7 +208,7 @@ const int bfq_timeout = HZ / 8;
* queue merging.
*
* As can be deduced from the low time limit below, queue merging, if
- * successful, happens at the very beggining of the I/O of the involved
+ * successful, happens at the very beginning of the I/O of the involved
* cooperating processes, as a consequence of the arrival of the very
* first requests from each cooperator. After that, there is very
* little chance to find cooperators.
@@ -242,6 +233,14 @@ static struct kmem_cache *bfq_pool;
blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT))
#define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19)
+/*
+ * Sync random I/O is likely to be confused with soft real-time I/O,
+ * because it is characterized by limited throughput and apparently
+ * isochronous arrival pattern. To avoid false positives, queues
+ * containing only random (seeky) I/O are prevented from being tagged
+ * as soft real-time.
+ */
+#define BFQQ_TOTALLY_SEEKY(bfqq) (bfqq->seek_history & -1)
/* Min number of samples required to perform peak-rate update */
#define BFQ_RATE_MIN_SAMPLES 32
@@ -433,7 +432,7 @@ void bfq_schedule_dispatch(struct bfq_data *bfqd)
/*
* Lifted from AS - choose which of rq1 and rq2 that is best served now.
- * We choose the request that is closesr to the head right now. Distance
+ * We choose the request that is closer to the head right now. Distance
* behind the head is penalized and only allowed to a certain extent.
*/
static struct request *bfq_choose_req(struct bfq_data *bfqd,
@@ -595,7 +594,16 @@ static bool bfq_too_late_for_merging(struct bfq_queue *bfqq)
bfq_merge_time_limit);
}
-void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+/*
+ * The following function is not marked as __cold because it is
+ * actually cold, but for the same performance goal described in the
+ * comments on the likely() at the beginning of
+ * bfq_setup_cooperator(). Unexpectedly, to reach an even lower
+ * execution time for the case where this function is not invoked, we
+ * had to add an unlikely() in each involved if().
+ */
+void __cold
+bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
struct rb_node **p, *parent;
struct bfq_queue *__bfqq;
@@ -629,12 +637,19 @@ void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
}
/*
- * The following function returns true if every queue must receive the
- * same share of the throughput (this condition is used when deciding
- * whether idling may be disabled, see the comments in the function
- * bfq_better_to_idle()).
+ * The following function returns false either if every active queue
+ * must receive the same share of the throughput (symmetric scenario),
+ * or, as a special case, if bfqq must receive a share of the
+ * throughput lower than or equal to the share that every other active
+ * queue must receive. If bfqq does sync I/O, then these are the only
+ * two cases where bfqq happens to be guaranteed its share of the
+ * throughput even if I/O dispatching is not plugged when bfqq remains
+ * temporarily empty (for more details, see the comments in the
+ * function bfq_better_to_idle()). For this reason, the return value
+ * of this function is used to check whether I/O-dispatch plugging can
+ * be avoided.
*
- * Such a scenario occurs when:
+ * The above first case (symmetric scenario) occurs when:
* 1) all active queues have the same weight,
* 2) all active queues belong to the same I/O-priority class,
* 3) all active groups at the same level in the groups tree have the same
@@ -654,30 +669,36 @@ void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
* support or the cgroups interface are not enabled, thus no state
* needs to be maintained in this case.
*/
-static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
+static bool bfq_asymmetric_scenario(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
{
+ bool smallest_weight = bfqq &&
+ bfqq->weight_counter &&
+ bfqq->weight_counter ==
+ container_of(
+ rb_first_cached(&bfqd->queue_weights_tree),
+ struct bfq_weight_counter,
+ weights_node);
+
/*
* For queue weights to differ, queue_weights_tree must contain
* at least two nodes.
*/
- bool varied_queue_weights = !RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
- (bfqd->queue_weights_tree.rb_node->rb_left ||
- bfqd->queue_weights_tree.rb_node->rb_right);
+ bool varied_queue_weights = !smallest_weight &&
+ !RB_EMPTY_ROOT(&bfqd->queue_weights_tree.rb_root) &&
+ (bfqd->queue_weights_tree.rb_root.rb_node->rb_left ||
+ bfqd->queue_weights_tree.rb_root.rb_node->rb_right);
bool multiple_classes_busy =
(bfqd->busy_queues[0] && bfqd->busy_queues[1]) ||
(bfqd->busy_queues[0] && bfqd->busy_queues[2]) ||
(bfqd->busy_queues[1] && bfqd->busy_queues[2]);
- /*
- * For queue weights to differ, queue_weights_tree must contain
- * at least two nodes.
- */
- return !(varied_queue_weights || multiple_classes_busy
+ return varied_queue_weights || multiple_classes_busy
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|| bfqd->num_groups_with_pending_reqs > 0
#endif
- );
+ ;
}
/*
@@ -694,10 +715,11 @@ static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
* should be low too.
*/
void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
- struct rb_root *root)
+ struct rb_root_cached *root)
{
struct bfq_entity *entity = &bfqq->entity;
- struct rb_node **new = &(root->rb_node), *parent = NULL;
+ struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
+ bool leftmost = true;
/*
* Do not insert if the queue is already associated with a
@@ -726,8 +748,10 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
}
if (entity->weight < __counter->weight)
new = &((*new)->rb_left);
- else
+ else {
new = &((*new)->rb_right);
+ leftmost = false;
+ }
}
bfqq->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
@@ -736,7 +760,7 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
/*
* In the unlucky event of an allocation failure, we just
* exit. This will cause the weight of queue to not be
- * considered in bfq_symmetric_scenario, which, in its turn,
+ * considered in bfq_asymmetric_scenario, which, in its turn,
* causes the scenario to be deemed wrongly symmetric in case
* bfqq's weight would have been the only weight making the
* scenario asymmetric. On the bright side, no unbalance will
@@ -750,7 +774,8 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfqq->weight_counter->weight = entity->weight;
rb_link_node(&bfqq->weight_counter->weights_node, parent, new);
- rb_insert_color(&bfqq->weight_counter->weights_node, root);
+ rb_insert_color_cached(&bfqq->weight_counter->weights_node, root,
+ leftmost);
inc_counter:
bfqq->weight_counter->num_active++;
@@ -765,7 +790,7 @@ inc_counter:
*/
void __bfq_weights_tree_remove(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
- struct rb_root *root)
+ struct rb_root_cached *root)
{
if (!bfqq->weight_counter)
return;
@@ -774,7 +799,7 @@ void __bfq_weights_tree_remove(struct bfq_data *bfqd,
if (bfqq->weight_counter->num_active > 0)
goto reset_entity_pointer;
- rb_erase(&bfqq->weight_counter->weights_node, root);
+ rb_erase_cached(&bfqq->weight_counter->weights_node, root);
kfree(bfqq->weight_counter);
reset_entity_pointer:
@@ -889,7 +914,7 @@ static unsigned long bfq_serv_to_charge(struct request *rq,
struct bfq_queue *bfqq)
{
if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1 ||
- !bfq_symmetric_scenario(bfqq->bfqd))
+ bfq_asymmetric_scenario(bfqq->bfqd, bfqq))
return blk_rq_sectors(rq);
return blk_rq_sectors(rq) * bfq_async_charge_factor;
@@ -955,7 +980,7 @@ static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
* of several files
* mplayer took 23 seconds to start, if constantly weight-raised.
*
- * As for higher values than that accomodating the above bad
+ * As for higher values than that accommodating the above bad
* scenario, tests show that higher values would often yield
* the opposite of the desired result, i.e., would worsen
* responsiveness by allowing non-interactive applications to
@@ -994,6 +1019,7 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
else
bfq_clear_bfqq_IO_bound(bfqq);
+ bfqq->entity.new_weight = bic->saved_weight;
bfqq->ttime = bic->saved_ttime;
bfqq->wr_coeff = bic->saved_wr_coeff;
bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
@@ -1041,8 +1067,18 @@ static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
hlist_del_init(&item->burst_list_node);
- hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
- bfqd->burst_size = 1;
+
+ /*
+ * Start the creation of a new burst list only if there is no
+ * active queue. See comments on the conditional invocation of
+ * bfq_handle_burst().
+ */
+ if (bfq_tot_busy_queues(bfqd) == 0) {
+ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
+ bfqd->burst_size = 1;
+ } else
+ bfqd->burst_size = 0;
+
bfqd->burst_parent_entity = bfqq->entity.parent;
}
@@ -1098,7 +1134,8 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
* many parallel threads/processes. Examples are systemd during boot,
* or git grep. To help these processes get their job done as soon as
* possible, it is usually better to not grant either weight-raising
- * or device idling to their queues.
+ * or device idling to their queues, unless these queues must be
+ * protected from the I/O flowing through other active queues.
*
* In this comment we describe, firstly, the reasons why this fact
* holds, and, secondly, the next function, which implements the main
@@ -1110,7 +1147,10 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
* cumulatively served, the sooner the target job of these queues gets
* completed. As a consequence, weight-raising any of these queues,
* which also implies idling the device for it, is almost always
- * counterproductive. In most cases it just lowers throughput.
+ * counterproductive, unless there are other active queues to isolate
+ * these new queues from. If there no other active queues, then
+ * weight-raising these new queues just lowers throughput in most
+ * cases.
*
* On the other hand, a burst of queue creations may be caused also by
* the start of an application that does not consist of a lot of
@@ -1144,14 +1184,16 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
* are very rare. They typically occur if some service happens to
* start doing I/O exactly when the interactive task starts.
*
- * Turning back to the next function, it implements all the steps
- * needed to detect the occurrence of a large burst and to properly
- * mark all the queues belonging to it (so that they can then be
- * treated in a different way). This goal is achieved by maintaining a
- * "burst list" that holds, temporarily, the queues that belong to the
- * burst in progress. The list is then used to mark these queues as
- * belonging to a large burst if the burst does become large. The main
- * steps are the following.
+ * Turning back to the next function, it is invoked only if there are
+ * no active queues (apart from active queues that would belong to the
+ * same, possible burst bfqq would belong to), and it implements all
+ * the steps needed to detect the occurrence of a large burst and to
+ * properly mark all the queues belonging to it (so that they can then
+ * be treated in a different way). This goal is achieved by
+ * maintaining a "burst list" that holds, temporarily, the queues that
+ * belong to the burst in progress. The list is then used to mark
+ * these queues as belonging to a large burst if the burst does become
+ * large. The main steps are the following.
*
* . when the very first queue is created, the queue is inserted into the
* list (as it could be the first queue in a possible burst)
@@ -1596,6 +1638,7 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
*/
in_burst = bfq_bfqq_in_large_burst(bfqq);
soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
+ !BFQQ_TOTALLY_SEEKY(bfqq) &&
!in_burst &&
time_is_before_jiffies(bfqq->soft_rt_next_start) &&
bfqq->dispatched == 0;
@@ -1704,6 +1747,123 @@ static void bfq_add_request(struct request *rq)
bfqq->queued[rq_is_sync(rq)]++;
bfqd->queued++;
+ if (RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_sync(bfqq)) {
+ /*
+ * Periodically reset inject limit, to make sure that
+ * the latter eventually drops in case workload
+ * changes, see step (3) in the comments on
+ * bfq_update_inject_limit().
+ */
+ if (time_is_before_eq_jiffies(bfqq->decrease_time_jif +
+ msecs_to_jiffies(1000))) {
+ /* invalidate baseline total service time */
+ bfqq->last_serv_time_ns = 0;
+
+ /*
+ * Reset pointer in case we are waiting for
+ * some request completion.
+ */
+ bfqd->waited_rq = NULL;
+
+ /*
+ * If bfqq has a short think time, then start
+ * by setting the inject limit to 0
+ * prudentially, because the service time of
+ * an injected I/O request may be higher than
+ * the think time of bfqq, and therefore, if
+ * one request was injected when bfqq remains
+ * empty, this injected request might delay
+ * the service of the next I/O request for
+ * bfqq significantly. In case bfqq can
+ * actually tolerate some injection, then the
+ * adaptive update will however raise the
+ * limit soon. This lucky circumstance holds
+ * exactly because bfqq has a short think
+ * time, and thus, after remaining empty, is
+ * likely to get new I/O enqueued---and then
+ * completed---before being expired. This is
+ * the very pattern that gives the
+ * limit-update algorithm the chance to
+ * measure the effect of injection on request
+ * service times, and then to update the limit
+ * accordingly.
+ *
+ * On the opposite end, if bfqq has a long
+ * think time, then start directly by 1,
+ * because:
+ * a) on the bright side, keeping at most one
+ * request in service in the drive is unlikely
+ * to cause any harm to the latency of bfqq's
+ * requests, as the service time of a single
+ * request is likely to be lower than the
+ * think time of bfqq;
+ * b) on the downside, after becoming empty,
+ * bfqq is likely to expire before getting its
+ * next request. With this request arrival
+ * pattern, it is very hard to sample total
+ * service times and update the inject limit
+ * accordingly (see comments on
+ * bfq_update_inject_limit()). So the limit is
+ * likely to be never, or at least seldom,
+ * updated. As a consequence, by setting the
+ * limit to 1, we avoid that no injection ever
+ * occurs with bfqq. On the downside, this
+ * proactive step further reduces chances to
+ * actually compute the baseline total service
+ * time. Thus it reduces chances to execute the
+ * limit-update algorithm and possibly raise the
+ * limit to more than 1.
+ */
+ if (bfq_bfqq_has_short_ttime(bfqq))
+ bfqq->inject_limit = 0;
+ else
+ bfqq->inject_limit = 1;
+ bfqq->decrease_time_jif = jiffies;
+ }
+
+ /*
+ * The following conditions must hold to setup a new
+ * sampling of total service time, and then a new
+ * update of the inject limit:
+ * - bfqq is in service, because the total service
+ * time is evaluated only for the I/O requests of
+ * the queues in service;
+ * - this is the right occasion to compute or to
+ * lower the baseline total service time, because
+ * there are actually no requests in the drive,
+ * or
+ * the baseline total service time is available, and
+ * this is the right occasion to compute the other
+ * quantity needed to update the inject limit, i.e.,
+ * the total service time caused by the amount of
+ * injection allowed by the current value of the
+ * limit. It is the right occasion because injection
+ * has actually been performed during the service
+ * hole, and there are still in-flight requests,
+ * which are very likely to be exactly the injected
+ * requests, or part of them;
+ * - the minimum interval for sampling the total
+ * service time and updating the inject limit has
+ * elapsed.
+ */
+ if (bfqq == bfqd->in_service_queue &&
+ (bfqd->rq_in_driver == 0 ||
+ (bfqq->last_serv_time_ns > 0 &&
+ bfqd->rqs_injected && bfqd->rq_in_driver > 0)) &&
+ time_is_before_eq_jiffies(bfqq->decrease_time_jif +
+ msecs_to_jiffies(100))) {
+ bfqd->last_empty_occupied_ns = ktime_get_ns();
+ /*
+ * Start the state machine for measuring the
+ * total service time of rq: setting
+ * wait_dispatch will cause bfqd->waited_rq to
+ * be set when rq will be dispatched.
+ */
+ bfqd->wait_dispatch = true;
+ bfqd->rqs_injected = false;
+ }
+ }
+
elv_rb_add(&bfqq->sort_list, rq);
/*
@@ -1715,8 +1875,9 @@ static void bfq_add_request(struct request *rq)
/*
* Adjust priority tree position, if next_rq changes.
+ * See comments on bfq_pos_tree_add_move() for the unlikely().
*/
- if (prev != bfqq->next_rq)
+ if (unlikely(!bfqd->nonrot_with_queueing && prev != bfqq->next_rq))
bfq_pos_tree_add_move(bfqd, bfqq);
if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
@@ -1856,7 +2017,9 @@ static void bfq_remove_request(struct request_queue *q,
bfqq->pos_root = NULL;
}
} else {
- bfq_pos_tree_add_move(bfqd, bfqq);
+ /* see comments on bfq_pos_tree_add_move() for the unlikely() */
+ if (unlikely(!bfqd->nonrot_with_queueing))
+ bfq_pos_tree_add_move(bfqd, bfqq);
}
if (rq->cmd_flags & REQ_META)
@@ -1941,7 +2104,12 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
*/
if (prev != bfqq->next_rq) {
bfq_updated_next_req(bfqd, bfqq);
- bfq_pos_tree_add_move(bfqd, bfqq);
+ /*
+ * See comments on bfq_pos_tree_add_move() for
+ * the unlikely().
+ */
+ if (unlikely(!bfqd->nonrot_with_queueing))
+ bfq_pos_tree_add_move(bfqd, bfqq);
}
}
}
@@ -2224,6 +2392,46 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct bfq_queue *in_service_bfqq, *new_bfqq;
/*
+ * Do not perform queue merging if the device is non
+ * rotational and performs internal queueing. In fact, such a
+ * device reaches a high speed through internal parallelism
+ * and pipelining. This means that, to reach a high
+ * throughput, it must have many requests enqueued at the same
+ * time. But, in this configuration, the internal scheduling
+ * algorithm of the device does exactly the job of queue
+ * merging: it reorders requests so as to obtain as much as
+ * possible a sequential I/O pattern. As a consequence, with
+ * the workload generated by processes doing interleaved I/O,
+ * the throughput reached by the device is likely to be the
+ * same, with and without queue merging.
+ *
+ * Disabling merging also provides a remarkable benefit in
+ * terms of throughput. Merging tends to make many workloads
+ * artificially more uneven, because of shared queues
+ * remaining non empty for incomparably more time than
+ * non-merged queues. This may accentuate workload
+ * asymmetries. For example, if one of the queues in a set of
+ * merged queues has a higher weight than a normal queue, then
+ * the shared queue may inherit such a high weight and, by
+ * staying almost always active, may force BFQ to perform I/O
+ * plugging most of the time. This evidently makes it harder
+ * for BFQ to let the device reach a high throughput.
+ *
+ * Finally, the likely() macro below is not used because one
+ * of the two branches is more likely than the other, but to
+ * have the code path after the following if() executed as
+ * fast as possible for the case of a non rotational device
+ * with queueing. We want it because this is the fastest kind
+ * of device. On the opposite end, the likely() may lengthen
+ * the execution time of BFQ for the case of slower devices
+ * (rotational or at least without queueing). But in this case
+ * the execution time of BFQ matters very little, if not at
+ * all.
+ */
+ if (likely(bfqd->nonrot_with_queueing))
+ return NULL;
+
+ /*
* Prevent bfqq from being merged if it has been created too
* long ago. The idea is that true cooperating processes, and
* thus their associated bfq_queues, are supposed to be
@@ -2286,6 +2494,7 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
if (!bic)
return;
+ bic->saved_weight = bfqq->entity.orig_weight;
bic->saved_ttime = bfqq->ttime;
bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq);
bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
@@ -2374,6 +2583,16 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
* assignment causes no harm).
*/
new_bfqq->bic = NULL;
+ /*
+ * If the queue is shared, the pid is the pid of one of the associated
+ * processes. Which pid depends on the exact sequence of merge events
+ * the queue underwent. So printing such a pid is useless and confusing
+ * because it reports a random pid between those of the associated
+ * processes.
+ * We mark such a queue with a pid -1, and then print SHARED instead of
+ * a pid in logging messages.
+ */
+ new_bfqq->pid = -1;
bfqq->bic = NULL;
/* release process reference to bfqq */
bfq_put_queue(bfqq);
@@ -2408,8 +2627,8 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
/*
* bic still points to bfqq, then it has not yet been
* redirected to some other bfq_queue, and a queue
- * merge beween bfqq and new_bfqq can be safely
- * fulfillled, i.e., bic can be redirected to new_bfqq
+ * merge between bfqq and new_bfqq can be safely
+ * fulfilled, i.e., bic can be redirected to new_bfqq
* and bfqq can be put.
*/
bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
@@ -2543,10 +2762,14 @@ static void bfq_arm_slice_timer(struct bfq_data *bfqd)
* queue).
*/
if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
- bfq_symmetric_scenario(bfqd))
+ !bfq_asymmetric_scenario(bfqd, bfqq))
sl = min_t(u64, sl, BFQ_MIN_TT);
+ else if (bfqq->wr_coeff > 1)
+ sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC);
bfqd->last_idling_start = ktime_get();
+ bfqd->last_idling_start_jiffies = jiffies;
+
hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
HRTIMER_MODE_REL);
bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
@@ -2848,8 +3071,10 @@ static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_requeue_bfqq(bfqd, bfqq, true);
/*
* Resort priority tree of potential close cooperators.
+ * See comments on bfq_pos_tree_add_move() for the unlikely().
*/
- bfq_pos_tree_add_move(bfqd, bfqq);
+ if (unlikely(!bfqd->nonrot_with_queueing))
+ bfq_pos_tree_add_move(bfqd, bfqq);
}
/*
@@ -3223,13 +3448,6 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
}
-static bool bfq_bfqq_injectable(struct bfq_queue *bfqq)
-{
- return BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
- blk_queue_nonrot(bfqq->bfqd->queue) &&
- bfqq->bfqd->hw_tag;
-}
-
/**
* bfq_bfqq_expire - expire a queue.
* @bfqd: device owning the queue.
@@ -3344,6 +3562,14 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq));
/*
+ * bfqq expired, so no total service time needs to be computed
+ * any longer: reset state machine for measuring total service
+ * times.
+ */
+ bfqd->rqs_injected = bfqd->wait_dispatch = false;
+ bfqd->waited_rq = NULL;
+
+ /*
* Increase, decrease or leave budget unchanged according to
* reason.
*/
@@ -3352,8 +3578,6 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
/* bfqq is gone, no more actions on it */
return;
- bfqq->injected_service = 0;
-
/* mark bfqq as waiting a request only if a bic still points to it */
if (!bfq_bfqq_busy(bfqq) &&
reason != BFQQE_BUDGET_TIMEOUT &&
@@ -3497,8 +3721,9 @@ static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
}
/*
- * There is a case where idling must be performed not for
- * throughput concerns, but to preserve service guarantees.
+ * There is a case where idling does not have to be performed for
+ * throughput concerns, but to preserve the throughput share of
+ * the process associated with bfqq.
*
* To introduce this case, we can note that allowing the drive
* to enqueue more than one request at a time, and hence
@@ -3514,77 +3739,83 @@ static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
* concern about per-process throughput distribution, and
* makes its decisions only on a per-request basis. Therefore,
* the service distribution enforced by the drive's internal
- * scheduler is likely to coincide with the desired
- * device-throughput distribution only in a completely
- * symmetric scenario where:
- * (i) each of these processes must get the same throughput as
- * the others;
- * (ii) the I/O of each process has the same properties, in
- * terms of locality (sequential or random), direction
- * (reads or writes), request sizes, greediness
- * (from I/O-bound to sporadic), and so on.
- * In fact, in such a scenario, the drive tends to treat
- * the requests of each of these processes in about the same
- * way as the requests of the others, and thus to provide
- * each of these processes with about the same throughput
- * (which is exactly the desired throughput distribution). In
- * contrast, in any asymmetric scenario, device idling is
- * certainly needed to guarantee that bfqq receives its
- * assigned fraction of the device throughput (see [1] for
- * details).
- * The problem is that idling may significantly reduce
- * throughput with certain combinations of types of I/O and
- * devices. An important example is sync random I/O, on flash
- * storage with command queueing. So, unless bfqq falls in the
- * above cases where idling also boosts throughput, it would
- * be important to check conditions (i) and (ii) accurately,
- * so as to avoid idling when not strictly needed for service
- * guarantees.
+ * scheduler is likely to coincide with the desired throughput
+ * distribution only in a completely symmetric, or favorably
+ * skewed scenario where:
+ * (i-a) each of these processes must get the same throughput as
+ * the others,
+ * (i-b) in case (i-a) does not hold, it holds that the process
+ * associated with bfqq must receive a lower or equal
+ * throughput than any of the other processes;
+ * (ii) the I/O of each process has the same properties, in
+ * terms of locality (sequential or random), direction
+ * (reads or writes), request sizes, greediness
+ * (from I/O-bound to sporadic), and so on;
+
+ * In fact, in such a scenario, the drive tends to treat the requests
+ * of each process in about the same way as the requests of the
+ * others, and thus to provide each of these processes with about the
+ * same throughput. This is exactly the desired throughput
+ * distribution if (i-a) holds, or, if (i-b) holds instead, this is an
+ * even more convenient distribution for (the process associated with)
+ * bfqq.
+ *
+ * In contrast, in any asymmetric or unfavorable scenario, device
+ * idling (I/O-dispatch plugging) is certainly needed to guarantee
+ * that bfqq receives its assigned fraction of the device throughput
+ * (see [1] for details).
*
- * Unfortunately, it is extremely difficult to thoroughly
- * check condition (ii). And, in case there are active groups,
- * it becomes very difficult to check condition (i) too. In
- * fact, if there are active groups, then, for condition (i)
- * to become false, it is enough that an active group contains
- * more active processes or sub-groups than some other active
- * group. More precisely, for condition (i) to hold because of
- * such a group, it is not even necessary that the group is
- * (still) active: it is sufficient that, even if the group
- * has become inactive, some of its descendant processes still
- * have some request already dispatched but still waiting for
- * completion. In fact, requests have still to be guaranteed
- * their share of the throughput even after being
- * dispatched. In this respect, it is easy to show that, if a
- * group frequently becomes inactive while still having
- * in-flight requests, and if, when this happens, the group is
- * not considered in the calculation of whether the scenario
- * is asymmetric, then the group may fail to be guaranteed its
- * fair share of the throughput (basically because idling may
- * not be performed for the descendant processes of the group,
- * but it had to be). We address this issue with the
- * following bi-modal behavior, implemented in the function
- * bfq_symmetric_scenario().
+ * The problem is that idling may significantly reduce throughput with
+ * certain combinations of types of I/O and devices. An important
+ * example is sync random I/O on flash storage with command
+ * queueing. So, unless bfqq falls in cases where idling also boosts
+ * throughput, it is important to check conditions (i-a), i(-b) and
+ * (ii) accurately, so as to avoid idling when not strictly needed for
+ * service guarantees.
+ *
+ * Unfortunately, it is extremely difficult to thoroughly check
+ * condition (ii). And, in case there are active groups, it becomes
+ * very difficult to check conditions (i-a) and (i-b) too. In fact,
+ * if there are active groups, then, for conditions (i-a) or (i-b) to
+ * become false 'indirectly', it is enough that an active group
+ * contains more active processes or sub-groups than some other active
+ * group. More precisely, for conditions (i-a) or (i-b) to become
+ * false because of such a group, it is not even necessary that the
+ * group is (still) active: it is sufficient that, even if the group
+ * has become inactive, some of its descendant processes still have
+ * some request already dispatched but still waiting for
+ * completion. In fact, requests have still to be guaranteed their
+ * share of the throughput even after being dispatched. In this
+ * respect, it is easy to show that, if a group frequently becomes
+ * inactive while still having in-flight requests, and if, when this
+ * happens, the group is not considered in the calculation of whether
+ * the scenario is asymmetric, then the group may fail to be
+ * guaranteed its fair share of the throughput (basically because
+ * idling may not be performed for the descendant processes of the
+ * group, but it had to be). We address this issue with the following
+ * bi-modal behavior, implemented in the function
+ * bfq_asymmetric_scenario().
*
* If there are groups with requests waiting for completion
* (as commented above, some of these groups may even be
* already inactive), then the scenario is tagged as
* asymmetric, conservatively, without checking any of the
- * conditions (i) and (ii). So the device is idled for bfqq.
+ * conditions (i-a), (i-b) or (ii). So the device is idled for bfqq.
* This behavior matches also the fact that groups are created
* exactly if controlling I/O is a primary concern (to
* preserve bandwidth and latency guarantees).
*
- * On the opposite end, if there are no groups with requests
- * waiting for completion, then only condition (i) is actually
- * controlled, i.e., provided that condition (i) holds, idling
- * is not performed, regardless of whether condition (ii)
- * holds. In other words, only if condition (i) does not hold,
- * then idling is allowed, and the device tends to be
- * prevented from queueing many requests, possibly of several
- * processes. Since there are no groups with requests waiting
- * for completion, then, to control condition (i) it is enough
- * to check just whether all the queues with requests waiting
- * for completion also have the same weight.
+ * On the opposite end, if there are no groups with requests waiting
+ * for completion, then only conditions (i-a) and (i-b) are actually
+ * controlled, i.e., provided that conditions (i-a) or (i-b) holds,
+ * idling is not performed, regardless of whether condition (ii)
+ * holds. In other words, only if conditions (i-a) and (i-b) do not
+ * hold, then idling is allowed, and the device tends to be prevented
+ * from queueing many requests, possibly of several processes. Since
+ * there are no groups with requests waiting for completion, then, to
+ * control conditions (i-a) and (i-b) it is enough to check just
+ * whether all the queues with requests waiting for completion also
+ * have the same weight.
*
* Not checking condition (ii) evidently exposes bfqq to the
* risk of getting less throughput than its fair share.
@@ -3636,7 +3867,7 @@ static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
* compound condition that is checked below for deciding
* whether the scenario is asymmetric. To explain this
* compound condition, we need to add that the function
- * bfq_symmetric_scenario checks the weights of only
+ * bfq_asymmetric_scenario checks the weights of only
* non-weight-raised queues, for efficiency reasons (see
* comments on bfq_weights_tree_add()). Then the fact that
* bfqq is weight-raised is checked explicitly here. More
@@ -3664,7 +3895,7 @@ static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd,
return (bfqq->wr_coeff > 1 &&
bfqd->wr_busy_queues <
bfq_tot_busy_queues(bfqd)) ||
- !bfq_symmetric_scenario(bfqd);
+ bfq_asymmetric_scenario(bfqd, bfqq);
}
/*
@@ -3740,26 +3971,98 @@ static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq);
}
-static struct bfq_queue *bfq_choose_bfqq_for_injection(struct bfq_data *bfqd)
+/*
+ * This function chooses the queue from which to pick the next extra
+ * I/O request to inject, if it finds a compatible queue. See the
+ * comments on bfq_update_inject_limit() for details on the injection
+ * mechanism, and for the definitions of the quantities mentioned
+ * below.
+ */
+static struct bfq_queue *
+bfq_choose_bfqq_for_injection(struct bfq_data *bfqd)
{
- struct bfq_queue *bfqq;
+ struct bfq_queue *bfqq, *in_serv_bfqq = bfqd->in_service_queue;
+ unsigned int limit = in_serv_bfqq->inject_limit;
+ /*
+ * If
+ * - bfqq is not weight-raised and therefore does not carry
+ * time-critical I/O,
+ * or
+ * - regardless of whether bfqq is weight-raised, bfqq has
+ * however a long think time, during which it can absorb the
+ * effect of an appropriate number of extra I/O requests
+ * from other queues (see bfq_update_inject_limit for
+ * details on the computation of this number);
+ * then injection can be performed without restrictions.
+ */
+ bool in_serv_always_inject = in_serv_bfqq->wr_coeff == 1 ||
+ !bfq_bfqq_has_short_ttime(in_serv_bfqq);
+
+ /*
+ * If
+ * - the baseline total service time could not be sampled yet,
+ * so the inject limit happens to be still 0, and
+ * - a lot of time has elapsed since the plugging of I/O
+ * dispatching started, so drive speed is being wasted
+ * significantly;
+ * then temporarily raise inject limit to one request.
+ */
+ if (limit == 0 && in_serv_bfqq->last_serv_time_ns == 0 &&
+ bfq_bfqq_wait_request(in_serv_bfqq) &&
+ time_is_before_eq_jiffies(bfqd->last_idling_start_jiffies +
+ bfqd->bfq_slice_idle)
+ )
+ limit = 1;
+
+ if (bfqd->rq_in_driver >= limit)
+ return NULL;
/*
- * A linear search; but, with a high probability, very few
- * steps are needed to find a candidate queue, i.e., a queue
- * with enough budget left for its next request. In fact:
+ * Linear search of the source queue for injection; but, with
+ * a high probability, very few steps are needed to find a
+ * candidate queue, i.e., a queue with enough budget left for
+ * its next request. In fact:
* - BFQ dynamically updates the budget of every queue so as
* to accommodate the expected backlog of the queue;
* - if a queue gets all its requests dispatched as injected
* service, then the queue is removed from the active list
- * (and re-added only if it gets new requests, but with
- * enough budget for its new backlog).
+ * (and re-added only if it gets new requests, but then it
+ * is assigned again enough budget for its new backlog).
*/
list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
if (!RB_EMPTY_ROOT(&bfqq->sort_list) &&
+ (in_serv_always_inject || bfqq->wr_coeff > 1) &&
bfq_serv_to_charge(bfqq->next_rq, bfqq) <=
- bfq_bfqq_budget_left(bfqq))
- return bfqq;
+ bfq_bfqq_budget_left(bfqq)) {
+ /*
+ * Allow for only one large in-flight request
+ * on non-rotational devices, for the
+ * following reason. On non-rotationl drives,
+ * large requests take much longer than
+ * smaller requests to be served. In addition,
+ * the drive prefers to serve large requests
+ * w.r.t. to small ones, if it can choose. So,
+ * having more than one large requests queued
+ * in the drive may easily make the next first
+ * request of the in-service queue wait for so
+ * long to break bfqq's service guarantees. On
+ * the bright side, large requests let the
+ * drive reach a very high throughput, even if
+ * there is only one in-flight large request
+ * at a time.
+ */
+ if (blk_queue_nonrot(bfqd->queue) &&
+ blk_rq_sectors(bfqq->next_rq) >=
+ BFQQ_SECT_THR_NONROT)
+ limit = min_t(unsigned int, 1, limit);
+ else
+ limit = in_serv_bfqq->inject_limit;
+
+ if (bfqd->rq_in_driver < limit) {
+ bfqd->rqs_injected = true;
+ return bfqq;
+ }
+ }
return NULL;
}
@@ -3846,14 +4149,32 @@ check_queue:
* for a new request, or has requests waiting for a completion and
* may idle after their completion, then keep it anyway.
*
- * Yet, to boost throughput, inject service from other queues if
- * possible.
+ * Yet, inject service from other queues if it boosts
+ * throughput and is possible.
*/
if (bfq_bfqq_wait_request(bfqq) ||
(bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) {
- if (bfq_bfqq_injectable(bfqq) &&
- bfqq->injected_service * bfqq->inject_coeff <
- bfqq->entity.service * 10)
+ struct bfq_queue *async_bfqq =
+ bfqq->bic && bfqq->bic->bfqq[0] &&
+ bfq_bfqq_busy(bfqq->bic->bfqq[0]) ?
+ bfqq->bic->bfqq[0] : NULL;
+
+ /*
+ * If the process associated with bfqq has also async
+ * I/O pending, then inject it
+ * unconditionally. Injecting I/O from the same
+ * process can cause no harm to the process. On the
+ * contrary, it can only increase bandwidth and reduce
+ * latency for the process.
+ */
+ if (async_bfqq &&
+ icq_to_bic(async_bfqq->next_rq->elv.icq) == bfqq->bic &&
+ bfq_serv_to_charge(async_bfqq->next_rq, async_bfqq) <=
+ bfq_bfqq_budget_left(async_bfqq))
+ bfqq = bfqq->bic->bfqq[0];
+ else if (!idling_boosts_thr_without_issues(bfqd, bfqq) &&
+ (bfqq->wr_coeff == 1 || bfqd->wr_busy_queues > 1 ||
+ !bfq_bfqq_has_short_ttime(bfqq)))
bfqq = bfq_choose_bfqq_for_injection(bfqd);
else
bfqq = NULL;
@@ -3945,15 +4266,15 @@ static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
bfq_bfqq_served(bfqq, service_to_charge);
- bfq_dispatch_remove(bfqd->queue, rq);
+ if (bfqq == bfqd->in_service_queue && bfqd->wait_dispatch) {
+ bfqd->wait_dispatch = false;
+ bfqd->waited_rq = rq;
+ }
- if (bfqq != bfqd->in_service_queue) {
- if (likely(bfqd->in_service_queue))
- bfqd->in_service_queue->injected_service +=
- bfq_serv_to_charge(rq, bfqq);
+ bfq_dispatch_remove(bfqd->queue, rq);
+ if (bfqq != bfqd->in_service_queue)
goto return_rq;
- }
/*
* If weight raising has to terminate for bfqq, then next
@@ -4384,13 +4705,6 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfq_mark_bfqq_has_short_ttime(bfqq);
bfq_mark_bfqq_sync(bfqq);
bfq_mark_bfqq_just_created(bfqq);
- /*
- * Aggressively inject a lot of service: up to 90%.
- * This coefficient remains constant during bfqq life,
- * but this behavior might be changed, after enough
- * testing and tuning.
- */
- bfqq->inject_coeff = 1;
} else
bfq_clear_bfqq_sync(bfqq);
@@ -4529,6 +4843,11 @@ bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
{
bfqq->seek_history <<= 1;
bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq);
+
+ if (bfqq->wr_coeff > 1 &&
+ bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
+ BFQQ_TOTALLY_SEEKY(bfqq))
+ bfq_bfqq_end_wr(bfqq);
}
static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
@@ -4823,6 +5142,9 @@ static void bfq_update_hw_tag(struct bfq_data *bfqd)
bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
bfqd->max_rq_in_driver = 0;
bfqd->hw_tag_samples = 0;
+
+ bfqd->nonrot_with_queueing =
+ blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag;
}
static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
@@ -4950,6 +5272,147 @@ static void bfq_finish_requeue_request_body(struct bfq_queue *bfqq)
}
/*
+ * The processes associated with bfqq may happen to generate their
+ * cumulative I/O at a lower rate than the rate at which the device
+ * could serve the same I/O. This is rather probable, e.g., if only
+ * one process is associated with bfqq and the device is an SSD. It
+ * results in bfqq becoming often empty while in service. In this
+ * respect, if BFQ is allowed to switch to another queue when bfqq
+ * remains empty, then the device goes on being fed with I/O requests,
+ * and the throughput is not affected. In contrast, if BFQ is not
+ * allowed to switch to another queue---because bfqq is sync and
+ * I/O-dispatch needs to be plugged while bfqq is temporarily
+ * empty---then, during the service of bfqq, there will be frequent
+ * "service holes", i.e., time intervals during which bfqq gets empty
+ * and the device can only consume the I/O already queued in its
+ * hardware queues. During service holes, the device may even get to
+ * remaining idle. In the end, during the service of bfqq, the device
+ * is driven at a lower speed than the one it can reach with the kind
+ * of I/O flowing through bfqq.
+ *
+ * To counter this loss of throughput, BFQ implements a "request
+ * injection mechanism", which tries to fill the above service holes
+ * with I/O requests taken from other queues. The hard part in this
+ * mechanism is finding the right amount of I/O to inject, so as to
+ * both boost throughput and not break bfqq's bandwidth and latency
+ * guarantees. In this respect, the mechanism maintains a per-queue
+ * inject limit, computed as below. While bfqq is empty, the injection
+ * mechanism dispatches extra I/O requests only until the total number
+ * of I/O requests in flight---i.e., already dispatched but not yet
+ * completed---remains lower than this limit.
+ *
+ * A first definition comes in handy to introduce the algorithm by
+ * which the inject limit is computed. We define as first request for
+ * bfqq, an I/O request for bfqq that arrives while bfqq is in
+ * service, and causes bfqq to switch from empty to non-empty. The
+ * algorithm updates the limit as a function of the effect of
+ * injection on the service times of only the first requests of
+ * bfqq. The reason for this restriction is that these are the
+ * requests whose service time is affected most, because they are the
+ * first to arrive after injection possibly occurred.
+ *
+ * To evaluate the effect of injection, the algorithm measures the
+ * "total service time" of first requests. We define as total service
+ * time of an I/O request, the time that elapses since when the
+ * request is enqueued into bfqq, to when it is completed. This
+ * quantity allows the whole effect of injection to be measured. It is
+ * easy to see why. Suppose that some requests of other queues are
+ * actually injected while bfqq is empty, and that a new request R
+ * then arrives for bfqq. If the device does start to serve all or
+ * part of the injected requests during the service hole, then,
+ * because of this extra service, it may delay the next invocation of
+ * the dispatch hook of BFQ. Then, even after R gets eventually
+ * dispatched, the device may delay the actual service of R if it is
+ * still busy serving the extra requests, or if it decides to serve,
+ * before R, some extra request still present in its queues. As a
+ * conclusion, the cumulative extra delay caused by injection can be
+ * easily evaluated by just comparing the total service time of first
+ * requests with and without injection.
+ *
+ * The limit-update algorithm works as follows. On the arrival of a
+ * first request of bfqq, the algorithm measures the total time of the
+ * request only if one of the three cases below holds, and, for each
+ * case, it updates the limit as described below:
+ *
+ * (1) If there is no in-flight request. This gives a baseline for the
+ * total service time of the requests of bfqq. If the baseline has
+ * not been computed yet, then, after computing it, the limit is
+ * set to 1, to start boosting throughput, and to prepare the
+ * ground for the next case. If the baseline has already been
+ * computed, then it is updated, in case it results to be lower
+ * than the previous value.
+ *
+ * (2) If the limit is higher than 0 and there are in-flight
+ * requests. By comparing the total service time in this case with
+ * the above baseline, it is possible to know at which extent the
+ * current value of the limit is inflating the total service
+ * time. If the inflation is below a certain threshold, then bfqq
+ * is assumed to be suffering from no perceivable loss of its
+ * service guarantees, and the limit is even tentatively
+ * increased. If the inflation is above the threshold, then the
+ * limit is decreased. Due to the lack of any hysteresis, this
+ * logic makes the limit oscillate even in steady workload
+ * conditions. Yet we opted for it, because it is fast in reaching
+ * the best value for the limit, as a function of the current I/O
+ * workload. To reduce oscillations, this step is disabled for a
+ * short time interval after the limit happens to be decreased.
+ *
+ * (3) Periodically, after resetting the limit, to make sure that the
+ * limit eventually drops in case the workload changes. This is
+ * needed because, after the limit has gone safely up for a
+ * certain workload, it is impossible to guess whether the
+ * baseline total service time may have changed, without measuring
+ * it again without injection. A more effective version of this
+ * step might be to just sample the baseline, by interrupting
+ * injection only once, and then to reset/lower the limit only if
+ * the total service time with the current limit does happen to be
+ * too large.
+ *
+ * More details on each step are provided in the comments on the
+ * pieces of code that implement these steps: the branch handling the
+ * transition from empty to non empty in bfq_add_request(), the branch
+ * handling injection in bfq_select_queue(), and the function
+ * bfq_choose_bfqq_for_injection(). These comments also explain some
+ * exceptions, made by the injection mechanism in some special cases.
+ */
+static void bfq_update_inject_limit(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
+{
+ u64 tot_time_ns = ktime_get_ns() - bfqd->last_empty_occupied_ns;
+ unsigned int old_limit = bfqq->inject_limit;
+
+ if (bfqq->last_serv_time_ns > 0) {
+ u64 threshold = (bfqq->last_serv_time_ns * 3)>>1;
+
+ if (tot_time_ns >= threshold && old_limit > 0) {
+ bfqq->inject_limit--;
+ bfqq->decrease_time_jif = jiffies;
+ } else if (tot_time_ns < threshold &&
+ old_limit < bfqd->max_rq_in_driver<<1)
+ bfqq->inject_limit++;
+ }
+
+ /*
+ * Either we still have to compute the base value for the
+ * total service time, and there seem to be the right
+ * conditions to do it, or we can lower the last base value
+ * computed.
+ */
+ if ((bfqq->last_serv_time_ns == 0 && bfqd->rq_in_driver == 0) ||
+ tot_time_ns < bfqq->last_serv_time_ns) {
+ bfqq->last_serv_time_ns = tot_time_ns;
+ /*
+ * Now we certainly have a base value: make sure we
+ * start trying injection.
+ */
+ bfqq->inject_limit = max_t(unsigned int, 1, old_limit);
+ }
+
+ /* update complete, not waiting for any request completion any longer */
+ bfqd->waited_rq = NULL;
+}
+
+/*
* Handle either a requeue or a finish for rq. The things to do are
* the same in both cases: all references to rq are to be dropped. In
* particular, rq is considered completed from the point of view of
@@ -4993,6 +5456,9 @@ static void bfq_finish_requeue_request(struct request *rq)
spin_lock_irqsave(&bfqd->lock, flags);
+ if (rq == bfqd->waited_rq)
+ bfq_update_inject_limit(bfqd, bfqq);
+
bfq_completed_request(bfqq, bfqd);
bfq_finish_requeue_request_body(bfqq);
@@ -5156,7 +5622,7 @@ static void bfq_prepare_request(struct request *rq, struct bio *bio)
* preparation is that, after the prepare_request hook is invoked for
* rq, rq may still be transformed into a request with no icq, i.e., a
* request not associated with any queue. No bfq hook is invoked to
- * signal this tranformation. As a consequence, should these
+ * signal this transformation. As a consequence, should these
* preparation operations be performed when the prepare_request hook
* is invoked, and should rq be transformed one moment later, bfq
* would end up in an inconsistent state, because it would have
@@ -5247,7 +5713,29 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
}
}
- if (unlikely(bfq_bfqq_just_created(bfqq)))
+ /*
+ * Consider bfqq as possibly belonging to a burst of newly
+ * created queues only if:
+ * 1) A burst is actually happening (bfqd->burst_size > 0)
+ * or
+ * 2) There is no other active queue. In fact, if, in
+ * contrast, there are active queues not belonging to the
+ * possible burst bfqq may belong to, then there is no gain
+ * in considering bfqq as belonging to a burst, and
+ * therefore in not weight-raising bfqq. See comments on
+ * bfq_handle_burst().
+ *
+ * This filtering also helps eliminating false positives,
+ * occurring when bfqq does not belong to an actual large
+ * burst, but some background task (e.g., a service) happens
+ * to trigger the creation of new queues very close to when
+ * bfqq and its possible companion queues are created. See
+ * comments on bfq_handle_burst() for further details also on
+ * this issue.
+ */
+ if (unlikely(bfq_bfqq_just_created(bfqq) &&
+ (bfqd->burst_size > 0 ||
+ bfq_tot_busy_queues(bfqd) == 0)))
bfq_handle_burst(bfqd, bfqq);
return bfqq;
@@ -5507,7 +5995,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
HRTIMER_MODE_REL);
bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
- bfqd->queue_weights_tree = RB_ROOT;
+ bfqd->queue_weights_tree = RB_ROOT_CACHED;
bfqd->num_groups_with_pending_reqs = 0;
INIT_LIST_HEAD(&bfqd->active_list);
@@ -5515,6 +6003,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
INIT_HLIST_HEAD(&bfqd->burst_list);
bfqd->hw_tag = -1;
+ bfqd->nonrot_with_queueing = blk_queue_nonrot(bfqd->queue);
bfqd->bfq_max_budget = bfq_default_max_budget;
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 86394e503ca9..c2faa77824f8 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Header file for the BFQ I/O scheduler: data structures and
* prototypes of interface functions among BFQ components.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#ifndef _BFQ_H
#define _BFQ_H
@@ -32,6 +23,8 @@
#define BFQ_DEFAULT_GRP_IOPRIO 0
#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE
+#define MAX_PID_STR_LENGTH 12
+
/*
* Soft real-time applications are extremely more latency sensitive
* than interactive ones. Over-raise the weight of the former to
@@ -89,7 +82,7 @@ struct bfq_service_tree {
* expiration. This peculiar definition allows for the following
* optimization, not yet exploited: while a given entity is still in
* service, we already know which is the best candidate for next
- * service among the other active entitities in the same parent
+ * service among the other active entities in the same parent
* entity. We can then quickly compare the timestamps of the
* in-service entity with those of such best candidate.
*
@@ -140,7 +133,7 @@ struct bfq_weight_counter {
*
* Unless cgroups are used, the weight value is calculated from the
* ioprio to export the same interface as CFQ. When dealing with
- * ``well-behaved'' queues (i.e., queues that do not spend too much
+ * "well-behaved" queues (i.e., queues that do not spend too much
* time to consume their budget and have true sequential behavior, and
* when there are no external factors breaking anticipation) the
* relative weights at each level of the cgroups hierarchy should be
@@ -240,6 +233,13 @@ struct bfq_queue {
/* next ioprio and ioprio class if a change is in progress */
unsigned short new_ioprio, new_ioprio_class;
+ /* last total-service-time sample, see bfq_update_inject_limit() */
+ u64 last_serv_time_ns;
+ /* limit for request injection */
+ unsigned int inject_limit;
+ /* last time the inject limit has been decreased, in jiffies */
+ unsigned long decrease_time_jif;
+
/*
* Shared bfq_queue if queue is cooperating with one or more
* other queues.
@@ -357,29 +357,6 @@ struct bfq_queue {
/* max service rate measured so far */
u32 max_service_rate;
- /*
- * Ratio between the service received by bfqq while it is in
- * service, and the cumulative service (of requests of other
- * queues) that may be injected while bfqq is empty but still
- * in service. To increase precision, the coefficient is
- * measured in tenths of unit. Here are some example of (1)
- * ratios, (2) resulting percentages of service injected
- * w.r.t. to the total service dispatched while bfqq is in
- * service, and (3) corresponding values of the coefficient:
- * 1 (50%) -> 10
- * 2 (33%) -> 20
- * 10 (9%) -> 100
- * 9.9 (9%) -> 99
- * 1.5 (40%) -> 15
- * 0.5 (66%) -> 5
- * 0.1 (90%) -> 1
- *
- * So, if the coefficient is lower than 10, then
- * injected service is more than bfqq service.
- */
- unsigned int inject_coeff;
- /* amount of service injected in current service slot */
- unsigned int injected_service;
};
/**
@@ -419,6 +396,15 @@ struct bfq_io_cq {
bool was_in_burst_list;
/*
+ * Save the weight when a merge occurs, to be able
+ * to restore it in case of split. If the weight is not
+ * correctly resumed when the queue is recycled,
+ * then the weight of the recycled queue could differ
+ * from the weight of the original queue.
+ */
+ unsigned int saved_weight;
+
+ /*
* Similar to previous fields: save wr information.
*/
unsigned long saved_wr_coeff;
@@ -450,7 +436,7 @@ struct bfq_data {
* weight-raised @bfq_queue (see the comments to the functions
* bfq_weights_tree_[add|remove] for further details).
*/
- struct rb_root queue_weights_tree;
+ struct rb_root_cached queue_weights_tree;
/*
* Number of groups with at least one descendant process that
@@ -513,6 +499,9 @@ struct bfq_data {
/* number of requests dispatched and waiting for completion */
int rq_in_driver;
+ /* true if the device is non rotational and performs queueing */
+ bool nonrot_with_queueing;
+
/*
* Maximum number of requests in driver in the last
* @hw_tag_samples completed requests.
@@ -544,6 +533,26 @@ struct bfq_data {
/* time of last request completion (ns) */
u64 last_completion;
+ /* time of last transition from empty to non-empty (ns) */
+ u64 last_empty_occupied_ns;
+
+ /*
+ * Flag set to activate the sampling of the total service time
+ * of a just-arrived first I/O request (see
+ * bfq_update_inject_limit()). This will cause the setting of
+ * waited_rq when the request is finally dispatched.
+ */
+ bool wait_dispatch;
+ /*
+ * If set, then bfq_update_inject_limit() is invoked when
+ * waited_rq is eventually completed.
+ */
+ struct request *waited_rq;
+ /*
+ * True if some request has been injected during the last service hole.
+ */
+ bool rqs_injected;
+
/* time of first rq dispatch in current observation interval (ns) */
u64 first_dispatch;
/* time of last rq dispatch in current observation interval (ns) */
@@ -553,6 +562,7 @@ struct bfq_data {
ktime_t last_budget_start;
/* beginning of the last idle slice */
ktime_t last_idling_start;
+ unsigned long last_idling_start_jiffies;
/* number of samples in current observation interval */
int peak_rate_samples;
@@ -898,10 +908,10 @@ void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync);
struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic);
void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq);
void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
- struct rb_root *root);
+ struct rb_root_cached *root);
void __bfq_weights_tree_remove(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
- struct rb_root *root);
+ struct rb_root_cached *root);
void bfq_weights_tree_remove(struct bfq_data *bfqd,
struct bfq_queue *bfqq);
void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
@@ -1008,13 +1018,23 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq);
/* --------------- end of interface of B-WF2Q+ ---------------- */
/* Logging facilities. */
+static inline void bfq_pid_to_str(int pid, char *str, int len)
+{
+ if (pid != -1)
+ snprintf(str, len, "%d", pid);
+ else
+ snprintf(str, len, "SHARED-");
+}
+
#ifdef CONFIG_BFQ_GROUP_IOSCHED
struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
+ char pid_str[MAX_PID_STR_LENGTH]; \
+ bfq_pid_to_str((bfqq)->pid, pid_str, MAX_PID_STR_LENGTH); \
blk_add_cgroup_trace_msg((bfqd)->queue, \
bfqg_to_blkg(bfqq_group(bfqq))->blkcg, \
- "bfq%d%c " fmt, (bfqq)->pid, \
+ "bfq%s%c " fmt, pid_str, \
bfq_bfqq_sync((bfqq)) ? 'S' : 'A', ##args); \
} while (0)
@@ -1025,10 +1045,13 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
#else /* CONFIG_BFQ_GROUP_IOSCHED */
-#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
- blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid, \
+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
+ char pid_str[MAX_PID_STR_LENGTH]; \
+ bfq_pid_to_str((bfqq)->pid, pid_str, MAX_PID_STR_LENGTH); \
+ blk_add_trace_msg((bfqd)->queue, "bfq%s%c " fmt, pid_str, \
bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
- ##args)
+ ##args); \
+} while (0)
#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
#endif /* CONFIG_BFQ_GROUP_IOSCHED */
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index ae4d000ac0af..c9ba225081ce 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Hierarchical Budget Worst-case Fair Weighted Fair Queueing
* (B-WF2Q+): hierarchical scheduling algorithm by which the BFQ I/O
* scheduler schedules generic entities. The latter can represent
* either single bfq queues (associated with processes) or groups of
* bfq queues (associated with cgroups).
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include "bfq-iosched.h"
@@ -59,7 +50,7 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service);
* bfq_update_next_in_service - update sd->next_in_service
* @sd: sched_data for which to perform the update.
* @new_entity: if not NULL, pointer to the entity whose activation,
- * requeueing or repositionig triggered the invocation of
+ * requeueing or repositioning triggered the invocation of
* this function.
* @expiration: id true, this function is being invoked after the
* expiration of the in-service entity
@@ -90,7 +81,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
/*
* If this update is triggered by the activation, requeueing
- * or repositiong of an entity that does not coincide with
+ * or repositioning of an entity that does not coincide with
* sd->next_in_service, then a full lookup in the active tree
* can be avoided. In fact, it is enough to check whether the
* just-modified entity has the same priority as
@@ -737,7 +728,7 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
unsigned int prev_weight, new_weight;
struct bfq_data *bfqd = NULL;
- struct rb_root *root;
+ struct rb_root_cached *root;
#ifdef CONFIG_BFQ_GROUP_IOSCHED
struct bfq_sched_data *sd;
struct bfq_group *bfqg;
@@ -1396,7 +1387,7 @@ left:
* In this first case, update the virtual time in @st too (see the
* comments on this update inside the function).
*
- * In constrast, if there is an in-service entity, then return the
+ * In contrast, if there is an in-service entity, then return the
* entity that would be set in service if not only the above
* conditions, but also the next one held true: the currently
* in-service entity, on expiration,
@@ -1479,12 +1470,12 @@ static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
* is being invoked as a part of the expiration path
* of the in-service queue. In this case, even if
* sd->in_service_entity is not NULL,
- * sd->in_service_entiy at this point is actually not
+ * sd->in_service_entity at this point is actually not
* in service any more, and, if needed, has already
* been properly queued or requeued into the right
* tree. The reason why sd->in_service_entity is still
* not NULL here, even if expiration is true, is that
- * sd->in_service_entiy is reset as a last step in the
+ * sd->in_service_entity is reset as a last step in the
* expiration path. So, if expiration is true, tell
* __bfq_lookup_next_entity that there is no
* sd->in_service_entity.
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 1b633a3526d4..4db620849515 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -1,23 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* bio-integrity.c - bio data integrity extensions
*
* Copyright (C) 2007, 2008, 2009 Oracle Corporation
* Written by: Martin K. Petersen <martin.petersen@oracle.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- *
*/
#include <linux/blkdev.h>
@@ -57,8 +43,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
unsigned inline_vecs;
if (!bs || !mempool_initialized(&bs->bio_integrity_pool)) {
- bip = kmalloc(sizeof(struct bio_integrity_payload) +
- sizeof(struct bio_vec) * nr_vecs, gfp_mask);
+ bip = kmalloc(struct_size(bip, bip_inline_vecs, nr_vecs), gfp_mask);
inline_vecs = nr_vecs;
} else {
bip = mempool_alloc(&bs->bio_integrity_pool, gfp_mask);
diff --git a/block/bio.c b/block/bio.c
index 716510ecd7ff..683cbb40f051 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public Licens
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
- *
*/
#include <linux/mm.h>
#include <linux/swap.h>
@@ -647,25 +634,72 @@ struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
}
EXPORT_SYMBOL(bio_clone_fast);
+static inline bool page_is_mergeable(const struct bio_vec *bv,
+ struct page *page, unsigned int len, unsigned int off,
+ bool same_page)
+{
+ phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) +
+ bv->bv_offset + bv->bv_len - 1;
+ phys_addr_t page_addr = page_to_phys(page);
+
+ if (vec_end_addr + 1 != page_addr + off)
+ return false;
+ if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
+ return false;
+
+ if ((vec_end_addr & PAGE_MASK) != page_addr) {
+ if (same_page)
+ return false;
+ if (pfn_to_page(PFN_DOWN(vec_end_addr)) + 1 != page)
+ return false;
+ }
+
+ WARN_ON_ONCE(same_page && (len + off) > PAGE_SIZE);
+
+ return true;
+}
+
+/*
+ * Check if the @page can be added to the current segment(@bv), and make
+ * sure to call it only if page_is_mergeable(@bv, @page) is true
+ */
+static bool can_add_page_to_seg(struct request_queue *q,
+ struct bio_vec *bv, struct page *page, unsigned len,
+ unsigned offset)
+{
+ unsigned long mask = queue_segment_boundary(q);
+ phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
+ phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
+
+ if ((addr1 | mask) != (addr2 | mask))
+ return false;
+
+ if (bv->bv_len + len > queue_max_segment_size(q))
+ return false;
+
+ return true;
+}
+
/**
- * bio_add_pc_page - attempt to add page to bio
+ * __bio_add_pc_page - attempt to add page to passthrough bio
* @q: the target queue
* @bio: destination bio
* @page: page to add
* @len: vec entry length
* @offset: vec entry offset
+ * @put_same_page: put the page if it is same with last added page
*
* Attempt to add a page to the bio_vec maplist. This can fail for a
* number of reasons, such as the bio being full or target block device
* limitations. The target block device must allow bio's up to PAGE_SIZE,
* so it is always possible to add a single page to an empty bio.
*
- * This should only be used by REQ_PC bios.
+ * This should only be used by passthrough bios.
*/
-int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
- *page, unsigned int len, unsigned int offset)
+static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
+ struct page *page, unsigned int len, unsigned int offset,
+ bool put_same_page)
{
- int retried_segments = 0;
struct bio_vec *bvec;
/*
@@ -677,18 +711,14 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
return 0;
- /*
- * For filesystems with a blocksize smaller than the pagesize
- * we will often be called with the same page as last time and
- * a consecutive offset. Optimize this special case.
- */
if (bio->bi_vcnt > 0) {
- struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
+ bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
- if (page == prev->bv_page &&
- offset == prev->bv_offset + prev->bv_len) {
- prev->bv_len += len;
- bio->bi_iter.bi_size += len;
+ if (page == bvec->bv_page &&
+ offset == bvec->bv_offset + bvec->bv_len) {
+ if (put_same_page)
+ put_page(page);
+ bvec->bv_len += len;
goto done;
}
@@ -696,63 +726,47 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, disallow it.
*/
- if (bvec_gap_to_prev(q, prev, offset))
+ if (bvec_gap_to_prev(q, bvec, offset))
return 0;
+
+ if (page_is_mergeable(bvec, page, len, offset, false) &&
+ can_add_page_to_seg(q, bvec, page, len, offset)) {
+ bvec->bv_len += len;
+ goto done;
+ }
}
if (bio_full(bio))
return 0;
- /*
- * setup the new entry, we might clear it again later if we
- * cannot add the page
- */
+ if (bio->bi_phys_segments >= queue_max_segments(q))
+ return 0;
+
bvec = &bio->bi_io_vec[bio->bi_vcnt];
bvec->bv_page = page;
bvec->bv_len = len;
bvec->bv_offset = offset;
bio->bi_vcnt++;
- bio->bi_phys_segments++;
- bio->bi_iter.bi_size += len;
-
- /*
- * Perform a recount if the number of segments is greater
- * than queue_max_segments(q).
- */
-
- while (bio->bi_phys_segments > queue_max_segments(q)) {
-
- if (retried_segments)
- goto failed;
-
- retried_segments = 1;
- blk_recount_segments(q, bio);
- }
-
- /* If we may be able to merge these biovecs, force a recount */
- if (bio->bi_vcnt > 1 && biovec_phys_mergeable(q, bvec - 1, bvec))
- bio_clear_flag(bio, BIO_SEG_VALID);
-
done:
+ bio->bi_iter.bi_size += len;
+ bio->bi_phys_segments = bio->bi_vcnt;
+ bio_set_flag(bio, BIO_SEG_VALID);
return len;
+}
- failed:
- bvec->bv_page = NULL;
- bvec->bv_len = 0;
- bvec->bv_offset = 0;
- bio->bi_vcnt--;
- bio->bi_iter.bi_size -= len;
- blk_recount_segments(q, bio);
- return 0;
+int bio_add_pc_page(struct request_queue *q, struct bio *bio,
+ struct page *page, unsigned int len, unsigned int offset)
+{
+ return __bio_add_pc_page(q, bio, page, len, offset, false);
}
EXPORT_SYMBOL(bio_add_pc_page);
/**
* __bio_try_merge_page - try appending data to an existing bvec.
* @bio: destination bio
- * @page: page to add
+ * @page: start page to add
* @len: length of the data to add
- * @off: offset of the data in @page
+ * @off: offset of the data relative to @page
* @same_page: if %true only merge if the new data is in the same physical
* page as the last segment of the bio.
*
@@ -760,6 +774,8 @@ EXPORT_SYMBOL(bio_add_pc_page);
* a useful optimisation for file systems with a block size smaller than the
* page size.
*
+ * Warn if (@len, @off) crosses pages in case that @same_page is true.
+ *
* Return %true on success or %false on failure.
*/
bool __bio_try_merge_page(struct bio *bio, struct page *page,
@@ -770,29 +786,23 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
if (bio->bi_vcnt > 0) {
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
- phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) +
- bv->bv_offset + bv->bv_len - 1;
- phys_addr_t page_addr = page_to_phys(page);
- if (vec_end_addr + 1 != page_addr + off)
- return false;
- if (same_page && (vec_end_addr & PAGE_MASK) != page_addr)
- return false;
-
- bv->bv_len += len;
- bio->bi_iter.bi_size += len;
- return true;
+ if (page_is_mergeable(bv, page, len, off, same_page)) {
+ bv->bv_len += len;
+ bio->bi_iter.bi_size += len;
+ return true;
+ }
}
return false;
}
EXPORT_SYMBOL_GPL(__bio_try_merge_page);
/**
- * __bio_add_page - add page to a bio in a new segment
+ * __bio_add_page - add page(s) to a bio in a new segment
* @bio: destination bio
- * @page: page to add
- * @len: length of the data to add
- * @off: offset of the data in @page
+ * @page: start page to add
+ * @len: length of the data to add, may cross pages
+ * @off: offset of the data relative to @page, may cross pages
*
* Add the data at @page + @off to @bio as a new bvec. The caller must ensure
* that @bio has space for another bvec.
@@ -815,13 +825,13 @@ void __bio_add_page(struct bio *bio, struct page *page,
EXPORT_SYMBOL_GPL(__bio_add_page);
/**
- * bio_add_page - attempt to add page to bio
+ * bio_add_page - attempt to add page(s) to bio
* @bio: destination bio
- * @page: page to add
- * @len: vec entry length
- * @offset: vec entry offset
+ * @page: start page to add
+ * @len: vec entry length, may cross pages
+ * @offset: vec entry offset relative to @page, may cross pages
*
- * Attempt to add a page to the bio_vec maplist. This will only fail
+ * Attempt to add page(s) to the bio_vec maplist. This will only fail
* if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
*/
int bio_add_page(struct bio *bio, struct page *page,
@@ -836,6 +846,24 @@ int bio_add_page(struct bio *bio, struct page *page,
}
EXPORT_SYMBOL(bio_add_page);
+static void bio_get_pages(struct bio *bio)
+{
+ struct bvec_iter_all iter_all;
+ struct bio_vec *bvec;
+
+ bio_for_each_segment_all(bvec, bio, iter_all)
+ get_page(bvec->bv_page);
+}
+
+static void bio_release_pages(struct bio *bio)
+{
+ struct bvec_iter_all iter_all;
+ struct bio_vec *bvec;
+
+ bio_for_each_segment_all(bvec, bio, iter_all)
+ put_page(bvec->bv_page);
+}
+
static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
{
const struct bio_vec *bv = iter->bvec;
@@ -848,20 +876,10 @@ static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
len = min_t(size_t, bv->bv_len - iter->iov_offset, iter->count);
size = bio_add_page(bio, bv->bv_page, len,
bv->bv_offset + iter->iov_offset);
- if (size == len) {
- if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
- struct page *page;
- int i;
-
- mp_bvec_for_each_page(page, bv, i)
- get_page(page);
- }
-
- iov_iter_advance(iter, size);
- return 0;
- }
-
- return -EINVAL;
+ if (unlikely(size != len))
+ return -EINVAL;
+ iov_iter_advance(iter, size);
+ return 0;
}
#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
@@ -934,29 +952,24 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
{
const bool is_bvec = iov_iter_is_bvec(iter);
- unsigned short orig_vcnt = bio->bi_vcnt;
+ int ret;
- /*
- * If this is a BVEC iter, then the pages are kernel pages. Don't
- * release them on IO completion, if the caller asked us to.
- */
- if (is_bvec && iov_iter_bvec_no_ref(iter))
- bio_set_flag(bio, BIO_NO_PAGE_REF);
+ if (WARN_ON_ONCE(bio->bi_vcnt))
+ return -EINVAL;
do {
- int ret;
-
if (is_bvec)
ret = __bio_iov_bvec_add_pages(bio, iter);
else
ret = __bio_iov_iter_get_pages(bio, iter);
+ } while (!ret && iov_iter_count(iter) && !bio_full(bio));
- if (unlikely(ret))
- return bio->bi_vcnt > orig_vcnt ? 0 : ret;
-
- } while (iov_iter_count(iter) && !bio_full(bio));
+ if (iov_iter_bvec_no_ref(iter))
+ bio_set_flag(bio, BIO_NO_PAGE_REF);
+ else if (is_bvec)
+ bio_get_pages(bio);
- return 0;
+ return bio->bi_vcnt ? 0 : ret;
}
static void submit_bio_wait_endio(struct bio *bio)
@@ -1127,11 +1140,10 @@ static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
*/
static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
{
- int i;
struct bio_vec *bvec;
struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bvec, bio, i, iter_all) {
+ bio_for_each_segment_all(bvec, bio, iter_all) {
ssize_t ret;
ret = copy_page_from_iter(bvec->bv_page,
@@ -1159,11 +1171,10 @@ static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
*/
static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
{
- int i;
struct bio_vec *bvec;
struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bvec, bio, i, iter_all) {
+ bio_for_each_segment_all(bvec, bio, iter_all) {
ssize_t ret;
ret = copy_page_to_iter(bvec->bv_page,
@@ -1184,10 +1195,9 @@ static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
void bio_free_pages(struct bio *bio)
{
struct bio_vec *bvec;
- int i;
struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bvec, bio, i, iter_all)
+ bio_for_each_segment_all(bvec, bio, iter_all)
__free_page(bvec->bv_page);
}
EXPORT_SYMBOL(bio_free_pages);
@@ -1388,21 +1398,14 @@ struct bio *bio_map_user_iov(struct request_queue *q,
for (j = 0; j < npages; j++) {
struct page *page = pages[j];
unsigned int n = PAGE_SIZE - offs;
- unsigned short prev_bi_vcnt = bio->bi_vcnt;
if (n > bytes)
n = bytes;
- if (!bio_add_pc_page(q, bio, page, n, offs))
+ if (!__bio_add_pc_page(q, bio, page, n, offs,
+ true))
break;
- /*
- * check if vector was merged with previous
- * drop page reference if needed
- */
- if (bio->bi_vcnt == prev_bi_vcnt)
- put_page(page);
-
added += n;
bytes -= n;
offs = 0;
@@ -1432,7 +1435,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
return bio;
out_unmap:
- bio_for_each_segment_all(bvec, bio, j, iter_all) {
+ bio_for_each_segment_all(bvec, bio, iter_all) {
put_page(bvec->bv_page);
}
bio_put(bio);
@@ -1442,13 +1445,12 @@ struct bio *bio_map_user_iov(struct request_queue *q,
static void __bio_unmap_user(struct bio *bio)
{
struct bio_vec *bvec;
- int i;
struct bvec_iter_all iter_all;
/*
* make sure we dirty pages we wrote to
*/
- bio_for_each_segment_all(bvec, bio, i, iter_all) {
+ bio_for_each_segment_all(bvec, bio, iter_all) {
if (bio_data_dir(bio) == READ)
set_page_dirty_lock(bvec->bv_page);
@@ -1539,10 +1541,9 @@ static void bio_copy_kern_endio_read(struct bio *bio)
{
char *p = bio->bi_private;
struct bio_vec *bvec;
- int i;
struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bvec, bio, i, iter_all) {
+ bio_for_each_segment_all(bvec, bio, iter_all) {
memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
p += bvec->bv_len;
}
@@ -1650,25 +1651,14 @@ cleanup:
void bio_set_pages_dirty(struct bio *bio)
{
struct bio_vec *bvec;
- int i;
struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bvec, bio, i, iter_all) {
+ bio_for_each_segment_all(bvec, bio, iter_all) {
if (!PageCompound(bvec->bv_page))
set_page_dirty_lock(bvec->bv_page);
}
}
-static void bio_release_pages(struct bio *bio)
-{
- struct bio_vec *bvec;
- int i;
- struct bvec_iter_all iter_all;
-
- bio_for_each_segment_all(bvec, bio, i, iter_all)
- put_page(bvec->bv_page);
-}
-
/*
* bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
* If they are, then fine. If, however, some pages are clean then they must
@@ -1712,10 +1702,9 @@ void bio_check_pages_dirty(struct bio *bio)
{
struct bio_vec *bvec;
unsigned long flags;
- int i;
struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bvec, bio, i, iter_all) {
+ bio_for_each_segment_all(bvec, bio, iter_all) {
if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
goto defer;
}
@@ -2203,6 +2192,9 @@ static int __init init_bio(void)
bio_slab_nr = 0;
bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
GFP_KERNEL);
+
+ BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET);
+
if (!bio_slabs)
panic("bio: can't allocate bios\n");
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 617a2b3f7582..b97b479e4f64 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Common Block IO controller cgroup interface
*
diff --git a/block/blk-core.c b/block/blk-core.c
index a55389ba8779..419d600e6637 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994, Karl Keyte: Added support for disk statistics
@@ -232,15 +233,6 @@ void blk_sync_queue(struct request_queue *q)
{
del_timer_sync(&q->timeout);
cancel_work_sync(&q->timeout_work);
-
- if (queue_is_mq(q)) {
- struct blk_mq_hw_ctx *hctx;
- int i;
-
- cancel_delayed_work_sync(&q->requeue_work);
- queue_for_each_hw_ctx(q, hctx, i)
- cancel_delayed_work_sync(&hctx->run_work);
- }
}
EXPORT_SYMBOL(blk_sync_queue);
@@ -347,18 +339,6 @@ void blk_cleanup_queue(struct request_queue *q)
blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
- /*
- * make sure all in-progress dispatch are completed because
- * blk_freeze_queue() can only complete all requests, and
- * dispatch may still be in-progress since we dispatch requests
- * from more than one contexts.
- *
- * We rely on driver to deal with the race in case that queue
- * initialization isn't done.
- */
- if (queue_is_mq(q) && blk_queue_init_done(q))
- blk_mq_quiesce_queue(q);
-
/* for synchronous bio-based driver finish in-flight integrity i/o */
blk_flush_integrity();
@@ -375,7 +355,7 @@ void blk_cleanup_queue(struct request_queue *q)
blk_exit_queue(q);
if (queue_is_mq(q))
- blk_mq_free_queue(q);
+ blk_mq_exit_queue(q);
percpu_ref_exit(&q->q_usage_counter);
diff --git a/block/blk-exec.c b/block/blk-exec.c
index a34b7d918742..1db44ca0f4a6 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Functions related to setting various queue properties from drivers
*/
diff --git a/block/blk-flush.c b/block/blk-flush.c
index d95f94892015..aedd9320e605 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Functions to sequence PREFLUSH and FUA writes.
*
* Copyright (C) 2011 Max Planck Institute for Gravitational Physics
* Copyright (C) 2011 Tejun Heo <tj@kernel.org>
*
- * This file is released under the GPLv2.
- *
* REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
* optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
* properties and hardware capability.
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index d1ab089e0919..825c9c070458 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -1,23 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* blk-integrity.c - Block layer data integrity extensions
*
* Copyright (C) 2007, 2008 Oracle Corporation
* Written by: Martin K. Petersen <martin.petersen@oracle.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- *
*/
#include <linux/blkdev.h>
@@ -365,6 +351,7 @@ static struct attribute *integrity_attrs[] = {
&integrity_device_entry.attr,
NULL,
};
+ATTRIBUTE_GROUPS(integrity);
static const struct sysfs_ops integrity_ops = {
.show = &integrity_attr_show,
@@ -372,7 +359,7 @@ static const struct sysfs_ops integrity_ops = {
};
static struct kobj_type integrity_ktype = {
- .default_attrs = integrity_attrs,
+ .default_groups = integrity_groups,
.sysfs_ops = &integrity_ops,
};
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 507212d75ee2..d22e61bced86 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Block rq-qos base io controller
*
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 1c9d4f0f96ea..21e87a714a73 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -267,23 +267,6 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
goto split;
}
- if (bvprvp) {
- if (seg_size + bv.bv_len > queue_max_segment_size(q))
- goto new_segment;
- if (!biovec_phys_mergeable(q, bvprvp, &bv))
- goto new_segment;
-
- seg_size += bv.bv_len;
- bvprv = bv;
- bvprvp = &bvprv;
- sectors += bv.bv_len >> 9;
-
- if (nsegs == 1 && seg_size > front_seg_size)
- front_seg_size = seg_size;
-
- continue;
- }
-new_segment:
if (nsegs == max_segs)
goto split;
@@ -370,12 +353,12 @@ EXPORT_SYMBOL(blk_queue_split);
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
struct bio *bio)
{
- struct bio_vec bv, bvprv = { NULL };
- int prev = 0;
+ struct bio_vec uninitialized_var(bv), bvprv = { NULL };
unsigned int seg_size, nr_phys_segs;
unsigned front_seg_size;
struct bio *fbio, *bbio;
struct bvec_iter iter;
+ bool new_bio = false;
if (!bio)
return 0;
@@ -396,7 +379,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
nr_phys_segs = 0;
for_each_bio(bio) {
bio_for_each_bvec(bv, bio, iter) {
- if (prev) {
+ if (new_bio) {
if (seg_size + bv.bv_len
> queue_max_segment_size(q))
goto new_segment;
@@ -404,7 +387,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
goto new_segment;
seg_size += bv.bv_len;
- bvprv = bv;
if (nr_phys_segs == 1 && seg_size >
front_seg_size)
@@ -413,12 +395,15 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
continue;
}
new_segment:
- bvprv = bv;
- prev = 1;
bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size,
&front_seg_size, NULL, UINT_MAX);
+ new_bio = false;
}
bbio = bio;
+ if (likely(bio->bi_iter.bi_size)) {
+ bvprv = bv;
+ new_bio = true;
+ }
}
fbio->bi_seg_front_size = front_seg_size;
@@ -484,79 +469,97 @@ static unsigned blk_bvec_map_sg(struct request_queue *q,
struct scatterlist **sg)
{
unsigned nbytes = bvec->bv_len;
- unsigned nsegs = 0, total = 0, offset = 0;
+ unsigned nsegs = 0, total = 0;
while (nbytes > 0) {
- unsigned seg_size;
- struct page *pg;
- unsigned idx;
-
- *sg = blk_next_sg(sg, sglist);
+ unsigned offset = bvec->bv_offset + total;
+ unsigned len = min(get_max_segment_size(q, offset), nbytes);
+ struct page *page = bvec->bv_page;
- seg_size = get_max_segment_size(q, bvec->bv_offset + total);
- seg_size = min(nbytes, seg_size);
-
- offset = (total + bvec->bv_offset) % PAGE_SIZE;
- idx = (total + bvec->bv_offset) / PAGE_SIZE;
- pg = bvec_nth_page(bvec->bv_page, idx);
+ /*
+ * Unfortunately a fair number of drivers barf on scatterlists
+ * that have an offset larger than PAGE_SIZE, despite other
+ * subsystems dealing with that invariant just fine. For now
+ * stick to the legacy format where we never present those from
+ * the block layer, but the code below should be removed once
+ * these offenders (mostly MMC/SD drivers) are fixed.
+ */
+ page += (offset >> PAGE_SHIFT);
+ offset &= ~PAGE_MASK;
- sg_set_page(*sg, pg, seg_size, offset);
+ *sg = blk_next_sg(sg, sglist);
+ sg_set_page(*sg, page, len, offset);
- total += seg_size;
- nbytes -= seg_size;
+ total += len;
+ nbytes -= len;
nsegs++;
}
return nsegs;
}
-static inline void
-__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
- struct scatterlist *sglist, struct bio_vec *bvprv,
- struct scatterlist **sg, int *nsegs)
+static inline int __blk_bvec_map_sg(struct bio_vec bv,
+ struct scatterlist *sglist, struct scatterlist **sg)
+{
+ *sg = blk_next_sg(sg, sglist);
+ sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
+ return 1;
+}
+
+/* only try to merge bvecs into one sg if they are from two bios */
+static inline bool
+__blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
+ struct bio_vec *bvprv, struct scatterlist **sg)
{
int nbytes = bvec->bv_len;
- if (*sg) {
- if ((*sg)->length + nbytes > queue_max_segment_size(q))
- goto new_segment;
- if (!biovec_phys_mergeable(q, bvprv, bvec))
- goto new_segment;
+ if (!*sg)
+ return false;
- (*sg)->length += nbytes;
- } else {
-new_segment:
- if (bvec->bv_offset + bvec->bv_len <= PAGE_SIZE) {
- *sg = blk_next_sg(sg, sglist);
- sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
- (*nsegs) += 1;
- } else
- (*nsegs) += blk_bvec_map_sg(q, bvec, sglist, sg);
- }
- *bvprv = *bvec;
-}
+ if ((*sg)->length + nbytes > queue_max_segment_size(q))
+ return false;
-static inline int __blk_bvec_map_sg(struct request_queue *q, struct bio_vec bv,
- struct scatterlist *sglist, struct scatterlist **sg)
-{
- *sg = sglist;
- sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
- return 1;
+ if (!biovec_phys_mergeable(q, bvprv, bvec))
+ return false;
+
+ (*sg)->length += nbytes;
+
+ return true;
}
static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
struct scatterlist *sglist,
struct scatterlist **sg)
{
- struct bio_vec bvec, bvprv = { NULL };
+ struct bio_vec uninitialized_var(bvec), bvprv = { NULL };
struct bvec_iter iter;
int nsegs = 0;
+ bool new_bio = false;
- for_each_bio(bio)
- bio_for_each_bvec(bvec, bio, iter)
- __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
- &nsegs);
+ for_each_bio(bio) {
+ bio_for_each_bvec(bvec, bio, iter) {
+ /*
+ * Only try to merge bvecs from two bios given we
+ * have done bio internal merge when adding pages
+ * to bio
+ */
+ if (new_bio &&
+ __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
+ goto next_bvec;
+
+ if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
+ nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
+ else
+ nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
+ next_bvec:
+ new_bio = false;
+ }
+ if (likely(bio->bi_iter.bi_size)) {
+ bvprv = bvec;
+ new_bio = true;
+ }
+ }
return nsegs;
}
@@ -572,9 +575,9 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
int nsegs = 0;
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
- nsegs = __blk_bvec_map_sg(q, rq->special_vec, sglist, &sg);
+ nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, &sg);
else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
- nsegs = __blk_bvec_map_sg(q, bio_iovec(rq->bio), sglist, &sg);
+ nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, &sg);
else if (rq->bio)
nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 03a534820271..48bebf00a5f3 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* CPU <-> hardware queue mapping helpers
*
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index ec1d18cb643c..6aea0ebc3a73 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Facebook
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License v2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c
index 1dce18553984..ad4545a2a98b 100644
--- a/block/blk-mq-pci.c
+++ b/block/blk-mq-pci.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 Christoph Hellwig.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/kobject.h>
#include <linux/blkdev.h>
diff --git a/block/blk-mq-rdma.c b/block/blk-mq-rdma.c
index 45030a81a1ed..cc921e6ba709 100644
--- a/block/blk-mq-rdma.c
+++ b/block/blk-mq-rdma.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017 Sagi Grimberg.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/blk-mq.h>
#include <linux/blk-mq-rdma.h>
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index aa6bc5c02643..74c6bb871f7e 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* blk-mq scheduling framework
*
@@ -413,6 +414,14 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
struct list_head *list, bool run_queue_async)
{
struct elevator_queue *e;
+ struct request_queue *q = hctx->queue;
+
+ /*
+ * blk_mq_sched_insert_requests() is called from flush plug
+ * context only, and hold one usage counter to prevent queue
+ * from being released.
+ */
+ percpu_ref_get(&q->q_usage_counter);
e = hctx->queue->elevator;
if (e && e->type->ops.insert_requests)
@@ -426,12 +435,14 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
if (!hctx->dispatch_busy && !e && !run_queue_async) {
blk_mq_try_issue_list_directly(hctx, list);
if (list_empty(list))
- return;
+ goto out;
}
blk_mq_insert_requests(hctx, ctx, list);
}
blk_mq_run_hw_queue(hctx, run_queue_async);
+ out:
+ percpu_ref_put(&q->q_usage_counter);
}
static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 3f9c3f4ac44c..d6e1a9bd7131 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
@@ -10,6 +11,7 @@
#include <linux/smp.h>
#include <linux/blk-mq.h>
+#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
@@ -33,6 +35,13 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
{
struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
kobj);
+
+ cancel_delayed_work_sync(&hctx->run_work);
+
+ if (hctx->flags & BLK_MQ_F_BLOCKING)
+ cleanup_srcu_struct(hctx->srcu);
+ blk_free_flush_queue(hctx->fq);
+ sbitmap_free(&hctx->ctx_map);
free_cpumask_var(hctx->cpumask);
kfree(hctx->ctxs);
kfree(hctx);
@@ -173,10 +182,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
return ret;
}
-static struct attribute *default_ctx_attrs[] = {
- NULL,
-};
-
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
.attr = {.name = "nr_tags", .mode = 0444 },
.show = blk_mq_hw_sysfs_nr_tags_show,
@@ -196,6 +201,7 @@ static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_cpus.attr,
NULL,
};
+ATTRIBUTE_GROUPS(default_hw_ctx);
static const struct sysfs_ops blk_mq_sysfs_ops = {
.show = blk_mq_sysfs_show,
@@ -214,13 +220,12 @@ static struct kobj_type blk_mq_ktype = {
static struct kobj_type blk_mq_ctx_ktype = {
.sysfs_ops = &blk_mq_sysfs_ops,
- .default_attrs = default_ctx_attrs,
.release = blk_mq_ctx_sysfs_release,
};
static struct kobj_type blk_mq_hw_ktype = {
.sysfs_ops = &blk_mq_hw_sysfs_ops,
- .default_attrs = default_hw_ctx_attrs,
+ .default_groups = default_hw_ctx_groups,
.release = blk_mq_hw_sysfs_release,
};
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index a4931fc7be8a..7513c8eaabee 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Tag allocation using scalable bitmaps. Uses active queue tracking to support
* fairer distribution of tags between multiple submitters when a shared tag map
diff --git a/block/blk-mq-virtio.c b/block/blk-mq-virtio.c
index 370827163835..75a52c18a8f6 100644
--- a/block/blk-mq-virtio.c
+++ b/block/blk-mq-virtio.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 Christoph Hellwig.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/device.h>
#include <linux/blk-mq.h>
diff --git a/block/blk-mq.c b/block/blk-mq.c
index fc60ed7e940e..08a6248d8536 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Block multiqueue core code
*
@@ -2062,7 +2063,7 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
list_del_init(&page->lru);
/*
* Remove kmemleak object previously allocated in
- * blk_mq_init_rq_map().
+ * blk_mq_alloc_rqs().
*/
kmemleak_free(page_address(page));
__free_pages(page, page->private);
@@ -2267,12 +2268,11 @@ static void blk_mq_exit_hctx(struct request_queue *q,
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
- if (hctx->flags & BLK_MQ_F_BLOCKING)
- cleanup_srcu_struct(hctx->srcu);
-
blk_mq_remove_cpuhp(hctx);
- blk_free_flush_queue(hctx->fq);
- sbitmap_free(&hctx->ctx_map);
+
+ spin_lock(&q->unused_hctx_lock);
+ list_add(&hctx->hctx_list, &q->unused_hctx_list);
+ spin_unlock(&q->unused_hctx_lock);
}
static void blk_mq_exit_hw_queues(struct request_queue *q,
@@ -2289,15 +2289,65 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
}
}
+static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
+{
+ int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
+
+ BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
+ __alignof__(struct blk_mq_hw_ctx)) !=
+ sizeof(struct blk_mq_hw_ctx));
+
+ if (tag_set->flags & BLK_MQ_F_BLOCKING)
+ hw_ctx_size += sizeof(struct srcu_struct);
+
+ return hw_ctx_size;
+}
+
static int blk_mq_init_hctx(struct request_queue *q,
struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
{
- int node;
+ hctx->queue_num = hctx_idx;
+
+ cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
+
+ hctx->tags = set->tags[hctx_idx];
+
+ if (set->ops->init_hctx &&
+ set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
+ goto unregister_cpu_notifier;
+
+ if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
+ hctx->numa_node))
+ goto exit_hctx;
+ return 0;
+
+ exit_hctx:
+ if (set->ops->exit_hctx)
+ set->ops->exit_hctx(hctx, hctx_idx);
+ unregister_cpu_notifier:
+ blk_mq_remove_cpuhp(hctx);
+ return -1;
+}
+
+static struct blk_mq_hw_ctx *
+blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
+ int node)
+{
+ struct blk_mq_hw_ctx *hctx;
+ gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
+
+ hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node);
+ if (!hctx)
+ goto fail_alloc_hctx;
- node = hctx->numa_node;
+ if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
+ goto free_hctx;
+
+ atomic_set(&hctx->nr_active, 0);
if (node == NUMA_NO_NODE)
- node = hctx->numa_node = set->numa_node;
+ node = set->numa_node;
+ hctx->numa_node = node;
INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
spin_lock_init(&hctx->lock);
@@ -2305,58 +2355,47 @@ static int blk_mq_init_hctx(struct request_queue *q,
hctx->queue = q;
hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
- cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
-
- hctx->tags = set->tags[hctx_idx];
+ INIT_LIST_HEAD(&hctx->hctx_list);
/*
* Allocate space for all possible cpus to avoid allocation at
* runtime
*/
hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node);
+ gfp, node);
if (!hctx->ctxs)
- goto unregister_cpu_notifier;
+ goto free_cpumask;
if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node))
+ gfp, node))
goto free_ctxs;
-
hctx->nr_ctx = 0;
spin_lock_init(&hctx->dispatch_wait_lock);
init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
- if (set->ops->init_hctx &&
- set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
- goto free_bitmap;
-
hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size,
- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
+ gfp);
if (!hctx->fq)
- goto exit_hctx;
-
- if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
- goto free_fq;
+ goto free_bitmap;
if (hctx->flags & BLK_MQ_F_BLOCKING)
init_srcu_struct(hctx->srcu);
+ blk_mq_hctx_kobj_init(hctx);
- return 0;
+ return hctx;
- free_fq:
- blk_free_flush_queue(hctx->fq);
- exit_hctx:
- if (set->ops->exit_hctx)
- set->ops->exit_hctx(hctx, hctx_idx);
free_bitmap:
sbitmap_free(&hctx->ctx_map);
free_ctxs:
kfree(hctx->ctxs);
- unregister_cpu_notifier:
- blk_mq_remove_cpuhp(hctx);
- return -1;
+ free_cpumask:
+ free_cpumask_var(hctx->cpumask);
+ free_hctx:
+ kfree(hctx);
+ fail_alloc_hctx:
+ return NULL;
}
static void blk_mq_init_cpu_queues(struct request_queue *q,
@@ -2631,13 +2670,17 @@ static int blk_mq_alloc_ctxs(struct request_queue *q)
*/
void blk_mq_release(struct request_queue *q)
{
- struct blk_mq_hw_ctx *hctx;
- unsigned int i;
+ struct blk_mq_hw_ctx *hctx, *next;
+ int i;
- /* hctx kobj stays in hctx */
- queue_for_each_hw_ctx(q, hctx, i) {
- if (!hctx)
- continue;
+ cancel_delayed_work_sync(&q->requeue_work);
+
+ queue_for_each_hw_ctx(q, hctx, i)
+ WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
+
+ /* all hctx are in .unused_hctx_list now */
+ list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
+ list_del_init(&hctx->hctx_list);
kobject_put(&hctx->kobj);
}
@@ -2700,51 +2743,38 @@ struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
}
EXPORT_SYMBOL(blk_mq_init_sq_queue);
-static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
-{
- int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
-
- BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
- __alignof__(struct blk_mq_hw_ctx)) !=
- sizeof(struct blk_mq_hw_ctx));
-
- if (tag_set->flags & BLK_MQ_F_BLOCKING)
- hw_ctx_size += sizeof(struct srcu_struct);
-
- return hw_ctx_size;
-}
-
static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
struct blk_mq_tag_set *set, struct request_queue *q,
int hctx_idx, int node)
{
- struct blk_mq_hw_ctx *hctx;
-
- hctx = kzalloc_node(blk_mq_hw_ctx_size(set),
- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
- node);
- if (!hctx)
- return NULL;
+ struct blk_mq_hw_ctx *hctx = NULL, *tmp;
- if (!zalloc_cpumask_var_node(&hctx->cpumask,
- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
- node)) {
- kfree(hctx);
- return NULL;
+ /* reuse dead hctx first */
+ spin_lock(&q->unused_hctx_lock);
+ list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
+ if (tmp->numa_node == node) {
+ hctx = tmp;
+ break;
+ }
}
+ if (hctx)
+ list_del_init(&hctx->hctx_list);
+ spin_unlock(&q->unused_hctx_lock);
- atomic_set(&hctx->nr_active, 0);
- hctx->numa_node = node;
- hctx->queue_num = hctx_idx;
+ if (!hctx)
+ hctx = blk_mq_alloc_hctx(q, set, node);
+ if (!hctx)
+ goto fail;
- if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) {
- free_cpumask_var(hctx->cpumask);
- kfree(hctx);
- return NULL;
- }
- blk_mq_hctx_kobj_init(hctx);
+ if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
+ goto free_hctx;
return hctx;
+
+ free_hctx:
+ kobject_put(&hctx->kobj);
+ fail:
+ return NULL;
}
static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
@@ -2770,10 +2800,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
hctx = blk_mq_alloc_and_init_hctx(set, q, i, node);
if (hctx) {
- if (hctxs[i]) {
+ if (hctxs[i])
blk_mq_exit_hctx(q, set, hctxs[i], i);
- kobject_put(&hctxs[i]->kobj);
- }
hctxs[i] = hctx;
} else {
if (hctxs[i])
@@ -2804,9 +2832,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
if (hctx->tags)
blk_mq_free_map_and_requests(set, j);
blk_mq_exit_hctx(q, set, hctx, j);
- kobject_put(&hctx->kobj);
hctxs[j] = NULL;
-
}
}
mutex_unlock(&q->sysfs_lock);
@@ -2849,6 +2875,9 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
if (!q->queue_hw_ctx)
goto err_sys_init;
+ INIT_LIST_HEAD(&q->unused_hctx_list);
+ spin_lock_init(&q->unused_hctx_lock);
+
blk_mq_realloc_hw_ctxs(set, q);
if (!q->nr_hw_queues)
goto err_hctxs;
@@ -2905,7 +2934,8 @@ err_exit:
}
EXPORT_SYMBOL(blk_mq_init_allocated_queue);
-void blk_mq_free_queue(struct request_queue *q)
+/* tags can _not_ be used after returning from blk_mq_exit_queue */
+void blk_mq_exit_queue(struct request_queue *q)
{
struct blk_mq_tag_set *set = q->tag_set;
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 423ea88ab6fb..633a5a77ee8b 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -37,7 +37,7 @@ struct blk_mq_ctx {
struct kobject kobj;
} ____cacheline_aligned_in_smp;
-void blk_mq_free_queue(struct request_queue *q);
+void blk_mq_exit_queue(struct request_queue *q);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index d169d7188fa6..3f55b56f24bc 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -1,3 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+
#include "blk-rq-qos.h"
/*
diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
index 564851889550..2300e038b9fa 100644
--- a/block/blk-rq-qos.h
+++ b/block/blk-rq-qos.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef RQ_QOS_H
#define RQ_QOS_H
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 6375afaedcec..3facc41476be 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Functions related to setting various queue properties from drivers
*/
@@ -663,22 +664,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
EXPORT_SYMBOL(disk_stack_limits);
/**
- * blk_queue_dma_pad - set pad mask
- * @q: the request queue for the device
- * @mask: pad mask
- *
- * Set dma pad mask.
- *
- * Appending pad buffer to a request modifies the last entry of a
- * scatter list such that it includes the pad buffer.
- **/
-void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
-{
- q->dma_pad_mask = mask;
-}
-EXPORT_SYMBOL(blk_queue_dma_pad);
-
-/**
* blk_queue_update_dma_pad - update pad mask
* @q: the request queue for the device
* @mask: pad mask
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 696a04176e4d..940f15d600f8 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Block stat tracking code
*
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 422327089e0f..a16a02c52a85 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -728,7 +728,7 @@ static struct queue_sysfs_entry throtl_sample_time_entry = {
};
#endif
-static struct attribute *default_attrs[] = {
+static struct attribute *queue_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
&queue_max_hw_sectors_entry.attr,
@@ -770,6 +770,25 @@ static struct attribute *default_attrs[] = {
NULL,
};
+static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
+ int n)
+{
+ struct request_queue *q =
+ container_of(kobj, struct request_queue, kobj);
+
+ if (attr == &queue_io_timeout_entry.attr &&
+ (!q->mq_ops || !q->mq_ops->timeout))
+ return 0;
+
+ return attr->mode;
+}
+
+static struct attribute_group queue_attr_group = {
+ .attrs = queue_attrs,
+ .is_visible = queue_attr_visible,
+};
+
+
#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
static ssize_t
@@ -890,7 +909,6 @@ static const struct sysfs_ops queue_sysfs_ops = {
struct kobj_type blk_queue_ktype = {
.sysfs_ops = &queue_sysfs_ops,
- .default_attrs = default_attrs,
.release = blk_release_queue,
};
@@ -939,6 +957,14 @@ int blk_register_queue(struct gendisk *disk)
goto unlock;
}
+ ret = sysfs_create_group(&q->kobj, &queue_attr_group);
+ if (ret) {
+ blk_trace_remove_sysfs(dev);
+ kobject_del(&q->kobj);
+ kobject_put(&dev->kobj);
+ goto unlock;
+ }
+
if (queue_is_mq(q)) {
__blk_mq_register_dev(dev, q);
blk_mq_debugfs_register(q);
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 124c26128bf6..8aa68fae96ad 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Functions related to generic timeout handling of requests.
*/
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index fd166fbb0f65..313f45a37e9d 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* buffered writeback throttling. loosely based on CoDel. We can't drop
* packets for IO scheduling, so the logic is something like this:
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 2d98803faec2..ae7e91bd0618 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Zoned block device handling
*
diff --git a/block/blk.h b/block/blk.h
index 5d636ee41663..e27fd1512e4b 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -75,7 +75,7 @@ static inline bool biovec_phys_mergeable(struct request_queue *q,
if (addr1 + vec1->bv_len != addr2)
return false;
- if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2))
+ if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
return false;
if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
return false;
diff --git a/block/bounce.c b/block/bounce.c
index 47eb7e936e22..f8ed677a1bf7 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -163,14 +163,13 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool)
{
struct bio *bio_orig = bio->bi_private;
struct bio_vec *bvec, orig_vec;
- int i;
struct bvec_iter orig_iter = bio_orig->bi_iter;
struct bvec_iter_all iter_all;
/*
* free up bounce indirect pages used
*/
- bio_for_each_segment_all(bvec, bio, i, iter_all) {
+ bio_for_each_segment_all(bvec, bio, iter_all) {
orig_vec = bio_iter_iovec(bio_orig, orig_iter);
if (bvec->bv_page != orig_vec.bv_page) {
dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index 005e2b75d775..b898a1cdf872 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* BSG helper library
*
* Copyright (C) 2008 James Smart, Emulex Corporation
* Copyright (C) 2011 Red Hat, Inc. All rights reserved.
* Copyright (C) 2011 Mike Christie
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/slab.h>
#include <linux/blk-mq.h>
diff --git a/block/bsg.c b/block/bsg.c
index f306853c6b08..833c44b3d458 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -1,13 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* bsg.c - block layer implementation of the sg v4 interface
- *
- * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
- * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License version 2. See the file "COPYING" in the main directory of this
- * archive for more details.
- *
*/
#include <linux/module.h>
#include <linux/init.h>
diff --git a/block/elevator.c b/block/elevator.c
index d6d835a08de6..ec55d5fc0b3e 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Block device elevator/IO-scheduler.
*
@@ -509,8 +510,6 @@ void elv_unregister_queue(struct request_queue *q)
int elv_register(struct elevator_type *e)
{
- char *def = "";
-
/* create icq_cache if requested */
if (e->icq_size) {
if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
@@ -535,8 +534,8 @@ int elv_register(struct elevator_type *e)
list_add_tail(&e->list, &elv_list);
spin_unlock(&elv_list_lock);
- printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
- def);
+ printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
+
return 0;
}
EXPORT_SYMBOL_GPL(elv_register);
diff --git a/block/genhd.c b/block/genhd.c
index 703267865f14..ad6826628e79 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* gendisk handling
*/
@@ -531,6 +532,18 @@ void blk_free_devt(dev_t devt)
}
}
+/**
+ * We invalidate devt by assigning NULL pointer for devt in idr.
+ */
+void blk_invalidate_devt(dev_t devt)
+{
+ if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
+ spin_lock_bh(&ext_devt_lock);
+ idr_replace(&ext_devt_idr, NULL, blk_mangle_minor(MINOR(devt)));
+ spin_unlock_bh(&ext_devt_lock);
+ }
+}
+
static char *bdevt_str(dev_t devt, char *buf)
{
if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) {
@@ -793,6 +806,13 @@ void del_gendisk(struct gendisk *disk)
if (!(disk->flags & GENHD_FL_HIDDEN))
blk_unregister_region(disk_devt(disk), disk->minors);
+ /*
+ * Remove gendisk pointer from idr so that it cannot be looked up
+ * while RCU period before freeing gendisk is running to prevent
+ * use-after-free issues. Note that the device number stays
+ * "in-use" until we really free the gendisk.
+ */
+ blk_invalidate_devt(disk_devt(disk));
kobject_put(disk->part0.holder_dir);
kobject_put(disk->slave_dir);
@@ -1628,12 +1648,11 @@ static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
/*
* If device-specific poll interval is set, always use it. If
- * the default is being used, poll iff there are events which
- * can't be monitored asynchronously.
+ * the default is being used, poll if the POLL flag is set.
*/
if (ev->poll_msecs >= 0)
intv_msecs = ev->poll_msecs;
- else if (disk->events & ~disk->async_events)
+ else if (disk->event_flags & DISK_EVENT_FLAG_POLL)
intv_msecs = disk_events_dfl_poll_msecs;
return msecs_to_jiffies(intv_msecs);
@@ -1843,11 +1862,13 @@ static void disk_check_events(struct disk_events *ev,
/*
* Tell userland about new events. Only the events listed in
- * @disk->events are reported. Unlisted events are processed the
- * same internally but never get reported to userland.
+ * @disk->events are reported, and only if DISK_EVENT_FLAG_UEVENT
+ * is set. Otherwise, events are processed internally but never
+ * get reported to userland.
*/
for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
- if (events & disk->events & (1 << i))
+ if ((events & disk->events & (1 << i)) &&
+ (disk->event_flags & DISK_EVENT_FLAG_UEVENT))
envp[nr_events++] = disk_uevents[i];
if (nr_events)
@@ -1860,6 +1881,7 @@ static void disk_check_events(struct disk_events *ev,
*
* events : list of all supported events
* events_async : list of events which can be detected w/o polling
+ * (always empty, only for backwards compatibility)
* events_poll_msecs : polling interval, 0: disable, -1: system default
*/
static ssize_t __disk_events_show(unsigned int events, char *buf)
@@ -1884,15 +1906,16 @@ static ssize_t disk_events_show(struct device *dev,
{
struct gendisk *disk = dev_to_disk(dev);
+ if (!(disk->event_flags & DISK_EVENT_FLAG_UEVENT))
+ return 0;
+
return __disk_events_show(disk->events, buf);
}
static ssize_t disk_events_async_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct gendisk *disk = dev_to_disk(dev);
-
- return __disk_events_show(disk->async_events, buf);
+ return 0;
}
static ssize_t disk_events_poll_msecs_show(struct device *dev,
@@ -1901,6 +1924,9 @@ static ssize_t disk_events_poll_msecs_show(struct device *dev,
{
struct gendisk *disk = dev_to_disk(dev);
+ if (!disk->ev)
+ return sprintf(buf, "-1\n");
+
return sprintf(buf, "%ld\n", disk->ev->poll_msecs);
}
@@ -1917,6 +1943,9 @@ static ssize_t disk_events_poll_msecs_store(struct device *dev,
if (intv < 0 && intv != -1)
return -EINVAL;
+ if (!disk->ev)
+ return -ENODEV;
+
disk_block_events(disk);
disk->ev->poll_msecs = intv;
__disk_unblock_events(disk, true);
@@ -1981,7 +2010,7 @@ static void disk_alloc_events(struct gendisk *disk)
{
struct disk_events *ev;
- if (!disk->fops->check_events)
+ if (!disk->fops->check_events || !disk->events)
return;
ev = kzalloc(sizeof(*ev), GFP_KERNEL);
@@ -2003,14 +2032,14 @@ static void disk_alloc_events(struct gendisk *disk)
static void disk_add_events(struct gendisk *disk)
{
- if (!disk->ev)
- return;
-
/* FIXME: error handling */
if (sysfs_create_files(&disk_to_dev(disk)->kobj, disk_events_attrs) < 0)
pr_warn("%s: failed to create sysfs files for events\n",
disk->disk_name);
+ if (!disk->ev)
+ return;
+
mutex_lock(&disk_events_mutex);
list_add_tail(&disk->ev->node, &disk_events);
mutex_unlock(&disk_events_mutex);
@@ -2024,14 +2053,13 @@ static void disk_add_events(struct gendisk *disk)
static void disk_del_events(struct gendisk *disk)
{
- if (!disk->ev)
- return;
-
- disk_block_events(disk);
+ if (disk->ev) {
+ disk_block_events(disk);
- mutex_lock(&disk_events_mutex);
- list_del_init(&disk->ev->node);
- mutex_unlock(&disk_events_mutex);
+ mutex_lock(&disk_events_mutex);
+ list_del_init(&disk->ev->node);
+ mutex_unlock(&disk_events_mutex);
+ }
sysfs_remove_files(&disk_to_dev(disk)->kobj, disk_events_attrs);
}
diff --git a/block/ioctl.c b/block/ioctl.c
index 4825c78a6baa..15a0eb80ada9 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/capability.h>
#include <linux/blkdev.h>
#include <linux/export.h>
diff --git a/block/ioprio.c b/block/ioprio.c
index f9821080c92c..2e0559f157c8 100644
--- a/block/ioprio.c
+++ b/block/ioprio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/ioprio.c
*
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index ec6a04e01bc1..c3b05119cebd 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* The Kyber I/O scheduler. Controls latency by throttling queue depths using
* scalable techniques.
*
* Copyright (C) 2017 Facebook
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License v2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 14288f864e94..1876f5712bfd 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
* for the blk-mq scheduling framework
diff --git a/block/opal_proto.h b/block/opal_proto.h
index e20be8258854..d9a05ad02eb5 100644
--- a/block/opal_proto.h
+++ b/block/opal_proto.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright © 2016 Intel Corporation
*
* Authors:
* Rafael Antognolli <rafael.antognolli@intel.com>
* Scott Bauer <scott.bauer@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/types.h>
@@ -170,6 +162,8 @@ enum opal_token {
OPAL_READLOCKED = 0x07,
OPAL_WRITELOCKED = 0x08,
OPAL_ACTIVEKEY = 0x0A,
+ /* lockingsp table */
+ OPAL_LIFECYCLE = 0x06,
/* locking info table */
OPAL_MAXRANGES = 0x04,
/* mbr control */
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 8e596a8dff32..aee643ce13d1 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -285,6 +285,13 @@ void delete_partition(struct gendisk *disk, int partno)
kobject_put(part->holder_dir);
device_del(part_to_dev(part));
+ /*
+ * Remove gendisk pointer from idr so that it cannot be looked up
+ * while RCU period before freeing gendisk is running to prevent
+ * use-after-free issues. Note that the device number stays
+ * "in-use" until we really free the gendisk.
+ */
+ blk_invalidate_devt(part_devt(part));
hd_struct_kill(part);
}
diff --git a/block/partitions/acorn.c b/block/partitions/acorn.c
index fbeb697374d5..7587700fad4a 100644
--- a/block/partitions/acorn.c
+++ b/block/partitions/acorn.c
@@ -1,12 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * linux/fs/partitions/acorn.c
- *
* Copyright (c) 1996-2000 Russell King.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Scan ADFS partitions on hard disk drives. Unfortunately, there
* isn't a standard for partitioning drives on Acorn machines, so
* every single manufacturer of SCSI and IDE cards created their own
diff --git a/block/partitions/aix.h b/block/partitions/aix.h
index e0c66a987523..b4449f0b9f2b 100644
--- a/block/partitions/aix.h
+++ b/block/partitions/aix.h
@@ -1 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
extern int aix_partition(struct parsed_partitions *state);
diff --git a/block/partitions/amiga.h b/block/partitions/amiga.h
index d094585cadaa..7e63f4d9d969 100644
--- a/block/partitions/amiga.h
+++ b/block/partitions/amiga.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/partitions/amiga.h
*/
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index 39f70d968754..db2fef7dfc47 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/************************************************************
* EFI GUID Partition Table handling
*
@@ -7,21 +8,6 @@
* efi.[ch] by Matt Domsch <Matt_Domsch@dell.com>
* Copyright 2000,2001,2002,2004 Dell Inc.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- *
* TODO:
*
* Changelog:
diff --git a/block/partitions/efi.h b/block/partitions/efi.h
index abd0b19288a6..3e8576157575 100644
--- a/block/partitions/efi.h
+++ b/block/partitions/efi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/************************************************************
* EFI GUID Partition Table
* Per Intel EFI Specification v1.02
@@ -5,21 +6,6 @@
*
* By Matt Domsch <Matt_Domsch@dell.com> Fri Sep 22 22:15:56 CDT 2000
* Copyright 2000,2001 Dell Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
************************************************************/
#ifndef FS_PART_EFI_H_INCLUDED
diff --git a/block/partitions/ibm.h b/block/partitions/ibm.h
index 08fb0804a812..8bf13febb2b6 100644
--- a/block/partitions/ibm.h
+++ b/block/partitions/ibm.h
@@ -1 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
int ibm_partition(struct parsed_partitions *);
diff --git a/block/partitions/karma.h b/block/partitions/karma.h
index c764b2e9df21..48e074d417fb 100644
--- a/block/partitions/karma.h
+++ b/block/partitions/karma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/partitions/karma.h
*/
diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c
index 16766f267559..6db573f33219 100644
--- a/block/partitions/ldm.c
+++ b/block/partitions/ldm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/**
* ldm - Support for Windows Logical Disk Manager (Dynamic Disks)
*
@@ -6,21 +7,6 @@
* Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
*
* Documentation is available at http://www.linux-ntfs.org/doku.php?id=downloads
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any later
- * version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program (in the main directory of the source in the file COPYING); if
- * not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
- * Boston, MA 02111-1307 USA
*/
#include <linux/slab.h>
diff --git a/block/partitions/ldm.h b/block/partitions/ldm.h
index f4c6055df956..1ca63e97bccc 100644
--- a/block/partitions/ldm.h
+++ b/block/partitions/ldm.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/**
* ldm - Part of the Linux-NTFS project.
*
@@ -6,21 +7,6 @@
* Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
*
* Documentation is available at http://www.linux-ntfs.org/doku.php?id=downloads
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program (in the main directory of the Linux-NTFS source
- * in the file COPYING); if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _FS_PT_LDM_H_
diff --git a/block/partitions/msdos.h b/block/partitions/msdos.h
index 38c781c490b3..fcacfc486092 100644
--- a/block/partitions/msdos.h
+++ b/block/partitions/msdos.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/partitions/msdos.h
*/
diff --git a/block/partitions/osf.h b/block/partitions/osf.h
index 20ed2315ec16..4d8088e7ea8c 100644
--- a/block/partitions/osf.h
+++ b/block/partitions/osf.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/partitions/osf.h
*/
diff --git a/block/partitions/sgi.h b/block/partitions/sgi.h
index b9553ebdd5a9..a5b77c3987cf 100644
--- a/block/partitions/sgi.h
+++ b/block/partitions/sgi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/partitions/sgi.h
*/
diff --git a/block/partitions/sun.h b/block/partitions/sun.h
index 2424baa8319f..ae1b9eed3fd7 100644
--- a/block/partitions/sun.h
+++ b/block/partitions/sun.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/partitions/sun.h
*/
diff --git a/block/partitions/sysv68.h b/block/partitions/sysv68.h
index bf2f5ffa97ac..4fb6b8ec78ae 100644
--- a/block/partitions/sysv68.h
+++ b/block/partitions/sysv68.h
@@ -1 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
extern int sysv68_partition(struct parsed_partitions *state);
diff --git a/block/partitions/ultrix.h b/block/partitions/ultrix.h
index a3cc00b2bded..9f676cead222 100644
--- a/block/partitions/ultrix.h
+++ b/block/partitions/ultrix.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/partitions/ultrix.h
*/
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 533f4aee8567..f5e0ad65e86a 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -1,20 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 Jens Axboe <axboe@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- *
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public Licens
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
- *
*/
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/block/sed-opal.c b/block/sed-opal.c
index e0de4dd448b3..a46e8d13e16d 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright © 2016 Intel Corporation
*
* Authors:
* Scott Bauer <scott.bauer@intel.com>
* Rafael Antognolli <rafael.antognolli@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":OPAL: " fmt
@@ -85,7 +77,6 @@ struct opal_dev {
void *data;
sec_send_recv *send_recv;
- const struct opal_step *steps;
struct mutex dev_lock;
u16 comid;
u32 hsn;
@@ -157,7 +148,7 @@ static const u8 opaluid[][OPAL_UID_LENGTH] = {
/* C_PIN_TABLE object ID's */
- [OPAL_C_PIN_MSID] =
+ [OPAL_C_PIN_MSID] =
{ 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x84, 0x02},
[OPAL_C_PIN_SID] =
{ 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x01},
@@ -181,7 +172,7 @@ static const u8 opaluid[][OPAL_UID_LENGTH] = {
* Derived from: TCG_Storage_Architecture_Core_Spec_v2.01_r1.00
* Section: 6.3 Assigned UIDs
*/
-static const u8 opalmethod[][OPAL_UID_LENGTH] = {
+static const u8 opalmethod[][OPAL_METHOD_LENGTH] = {
[OPAL_PROPERTIES] =
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x01 },
[OPAL_STARTSESSION] =
@@ -217,6 +208,7 @@ static const u8 opalmethod[][OPAL_UID_LENGTH] = {
};
static int end_opal_session_error(struct opal_dev *dev);
+static int opal_discovery0_step(struct opal_dev *dev);
struct opal_suspend_data {
struct opal_lock_unlock unlk;
@@ -382,37 +374,50 @@ static void check_geometry(struct opal_dev *dev, const void *data)
dev->lowest_lba = geo->lowest_aligned_lba;
}
-static int next(struct opal_dev *dev)
+static int execute_step(struct opal_dev *dev,
+ const struct opal_step *step, size_t stepIndex)
{
- const struct opal_step *step;
- int state = 0, error = 0;
+ int error = step->fn(dev, step->data);
- do {
- step = &dev->steps[state];
- if (!step->fn)
- break;
+ if (error) {
+ pr_debug("Step %zu (%pS) failed with error %d: %s\n",
+ stepIndex, step->fn, error,
+ opal_error_to_human(error));
+ }
- error = step->fn(dev, step->data);
- if (error) {
- pr_debug("Error on step function: %d with error %d: %s\n",
- state, error,
- opal_error_to_human(error));
-
- /* For each OPAL command we do a discovery0 then we
- * start some sort of session.
- * If we haven't passed state 1 then there was an error
- * on discovery0 or during the attempt to start a
- * session. Therefore we shouldn't attempt to terminate
- * a session, as one has not yet been created.
- */
- if (state > 1) {
- end_opal_session_error(dev);
- return error;
- }
+ return error;
+}
- }
- state++;
- } while (!error);
+static int execute_steps(struct opal_dev *dev,
+ const struct opal_step *steps, size_t n_steps)
+{
+ size_t state = 0;
+ int error;
+
+ /* first do a discovery0 */
+ error = opal_discovery0_step(dev);
+ if (error)
+ return error;
+
+ for (state = 0; state < n_steps; state++) {
+ error = execute_step(dev, &steps[state], state);
+ if (error)
+ goto out_error;
+ }
+
+ return 0;
+
+out_error:
+ /*
+ * For each OPAL command the first step in steps starts some sort of
+ * session. If an error occurred in the initial discovery0 or if an
+ * error occurred in the first step (and thus stopping the loop with
+ * state == 0) then there was an error before or during the attempt to
+ * start a session. Therefore we shouldn't attempt to terminate a
+ * session, as one has not yet been created.
+ */
+ if (state > 0)
+ end_opal_session_error(dev);
return error;
}
@@ -510,15 +515,32 @@ static int opal_discovery0(struct opal_dev *dev, void *data)
return opal_discovery0_end(dev);
}
-static void add_token_u8(int *err, struct opal_dev *cmd, u8 tok)
+static int opal_discovery0_step(struct opal_dev *dev)
+{
+ const struct opal_step discovery0_step = {
+ opal_discovery0,
+ };
+ return execute_step(dev, &discovery0_step, 0);
+}
+
+static bool can_add(int *err, struct opal_dev *cmd, size_t len)
{
if (*err)
- return;
- if (cmd->pos >= IO_BUFFER_LENGTH - 1) {
- pr_debug("Error adding u8: end of buffer.\n");
+ return false;
+
+ if (len > IO_BUFFER_LENGTH || cmd->pos > IO_BUFFER_LENGTH - len) {
+ pr_debug("Error adding %zu bytes: end of buffer.\n", len);
*err = -ERANGE;
- return;
+ return false;
}
+
+ return true;
+}
+
+static void add_token_u8(int *err, struct opal_dev *cmd, u8 tok)
+{
+ if (!can_add(err, cmd, 1))
+ return;
cmd->cmd[cmd->pos++] = tok;
}
@@ -551,7 +573,6 @@ static void add_medium_atom_header(struct opal_dev *cmd, bool bytestring,
static void add_token_u64(int *err, struct opal_dev *cmd, u64 number)
{
-
size_t len;
int msb;
@@ -563,9 +584,8 @@ static void add_token_u64(int *err, struct opal_dev *cmd, u64 number)
msb = fls64(number);
len = DIV_ROUND_UP(msb, 8);
- if (cmd->pos >= IO_BUFFER_LENGTH - len - 1) {
+ if (!can_add(err, cmd, len + 1)) {
pr_debug("Error adding u64: end of buffer.\n");
- *err = -ERANGE;
return;
}
add_short_atom_header(cmd, false, false, len);
@@ -573,24 +593,19 @@ static void add_token_u64(int *err, struct opal_dev *cmd, u64 number)
add_token_u8(err, cmd, number >> (len * 8));
}
-static void add_token_bytestring(int *err, struct opal_dev *cmd,
- const u8 *bytestring, size_t len)
+static u8 *add_bytestring_header(int *err, struct opal_dev *cmd, size_t len)
{
size_t header_len = 1;
bool is_short_atom = true;
- if (*err)
- return;
-
if (len & ~SHORT_ATOM_LEN_MASK) {
header_len = 2;
is_short_atom = false;
}
- if (len >= IO_BUFFER_LENGTH - cmd->pos - header_len) {
+ if (!can_add(err, cmd, header_len + len)) {
pr_debug("Error adding bytestring: end of buffer.\n");
- *err = -ERANGE;
- return;
+ return NULL;
}
if (is_short_atom)
@@ -598,9 +613,19 @@ static void add_token_bytestring(int *err, struct opal_dev *cmd,
else
add_medium_atom_header(cmd, true, false, len);
- memcpy(&cmd->cmd[cmd->pos], bytestring, len);
- cmd->pos += len;
+ return &cmd->cmd[cmd->pos];
+}
+
+static void add_token_bytestring(int *err, struct opal_dev *cmd,
+ const u8 *bytestring, size_t len)
+{
+ u8 *start;
+ start = add_bytestring_header(err, cmd, len);
+ if (!start)
+ return;
+ memcpy(start, bytestring, len);
+ cmd->pos += len;
}
static int build_locking_range(u8 *buffer, size_t length, u8 lr)
@@ -623,7 +648,7 @@ static int build_locking_range(u8 *buffer, size_t length, u8 lr)
static int build_locking_user(u8 *buffer, size_t length, u8 lr)
{
if (length > OPAL_UID_LENGTH) {
- pr_debug("Can't build locking range user, Length OOB\n");
+ pr_debug("Can't build locking range user. Length OOB\n");
return -ERANGE;
}
@@ -649,6 +674,9 @@ static int cmd_finalize(struct opal_dev *cmd, u32 hsn, u32 tsn)
struct opal_header *hdr;
int err = 0;
+ /* close the parameter list opened from cmd_start */
+ add_token_u8(&err, cmd, OPAL_ENDLIST);
+
add_token_u8(&err, cmd, OPAL_ENDOFDATA);
add_token_u8(&err, cmd, OPAL_STARTLIST);
add_token_u8(&err, cmd, 0);
@@ -687,6 +715,11 @@ static const struct opal_resp_tok *response_get_token(
{
const struct opal_resp_tok *tok;
+ if (!resp) {
+ pr_debug("Response is NULL\n");
+ return ERR_PTR(-EINVAL);
+ }
+
if (n >= resp->num) {
pr_debug("Token number doesn't exist: %d, resp: %d\n",
n, resp->num);
@@ -869,27 +902,19 @@ static size_t response_get_string(const struct parsed_resp *resp, int n,
const char **store)
{
u8 skip;
- const struct opal_resp_tok *token;
+ const struct opal_resp_tok *tok;
*store = NULL;
- if (!resp) {
- pr_debug("Response is NULL\n");
- return 0;
- }
-
- if (n >= resp->num) {
- pr_debug("Response has %d tokens. Can't access %d\n",
- resp->num, n);
+ tok = response_get_token(resp, n);
+ if (IS_ERR(tok))
return 0;
- }
- token = &resp->toks[n];
- if (token->type != OPAL_DTA_TOKENID_BYTESTRING) {
+ if (tok->type != OPAL_DTA_TOKENID_BYTESTRING) {
pr_debug("Token is not a byte string!\n");
return 0;
}
- switch (token->width) {
+ switch (tok->width) {
case OPAL_WIDTH_TINY:
case OPAL_WIDTH_SHORT:
skip = 1;
@@ -905,37 +930,29 @@ static size_t response_get_string(const struct parsed_resp *resp, int n,
return 0;
}
- *store = token->pos + skip;
- return token->len - skip;
+ *store = tok->pos + skip;
+ return tok->len - skip;
}
static u64 response_get_u64(const struct parsed_resp *resp, int n)
{
- if (!resp) {
- pr_debug("Response is NULL\n");
- return 0;
- }
+ const struct opal_resp_tok *tok;
- if (n >= resp->num) {
- pr_debug("Response has %d tokens. Can't access %d\n",
- resp->num, n);
+ tok = response_get_token(resp, n);
+ if (IS_ERR(tok))
return 0;
- }
- if (resp->toks[n].type != OPAL_DTA_TOKENID_UINT) {
- pr_debug("Token is not unsigned it: %d\n",
- resp->toks[n].type);
+ if (tok->type != OPAL_DTA_TOKENID_UINT) {
+ pr_debug("Token is not unsigned int: %d\n", tok->type);
return 0;
}
- if (!(resp->toks[n].width == OPAL_WIDTH_TINY ||
- resp->toks[n].width == OPAL_WIDTH_SHORT)) {
- pr_debug("Atom is not short or tiny: %d\n",
- resp->toks[n].width);
+ if (tok->width != OPAL_WIDTH_TINY && tok->width != OPAL_WIDTH_SHORT) {
+ pr_debug("Atom is not short or tiny: %d\n", tok->width);
return 0;
}
- return resp->toks[n].stored.u;
+ return tok->stored.u;
}
static bool response_token_matches(const struct opal_resp_tok *token, u8 match)
@@ -991,6 +1008,27 @@ static void clear_opal_cmd(struct opal_dev *dev)
memset(dev->cmd, 0, IO_BUFFER_LENGTH);
}
+static int cmd_start(struct opal_dev *dev, const u8 *uid, const u8 *method)
+{
+ int err = 0;
+
+ clear_opal_cmd(dev);
+ set_comid(dev, dev->comid);
+
+ add_token_u8(&err, dev, OPAL_CALL);
+ add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
+ add_token_bytestring(&err, dev, method, OPAL_METHOD_LENGTH);
+
+ /*
+ * Every method call is followed by its parameters enclosed within
+ * OPAL_STARTLIST and OPAL_ENDLIST tokens. We automatically open the
+ * parameter list here and close it later in cmd_finalize.
+ */
+ add_token_u8(&err, dev, OPAL_STARTLIST);
+
+ return err;
+}
+
static int start_opal_session_cont(struct opal_dev *dev)
{
u32 hsn, tsn;
@@ -1050,24 +1088,47 @@ static int finalize_and_send(struct opal_dev *dev, cont_fn cont)
return opal_send_recv(dev, cont);
}
+/*
+ * request @column from table @table on device @dev. On success, the column
+ * data will be available in dev->resp->tok[4]
+ */
+static int generic_get_column(struct opal_dev *dev, const u8 *table,
+ u64 column)
+{
+ int err;
+
+ err = cmd_start(dev, table, opalmethod[OPAL_GET]);
+
+ add_token_u8(&err, dev, OPAL_STARTLIST);
+
+ add_token_u8(&err, dev, OPAL_STARTNAME);
+ add_token_u8(&err, dev, OPAL_STARTCOLUMN);
+ add_token_u64(&err, dev, column);
+ add_token_u8(&err, dev, OPAL_ENDNAME);
+
+ add_token_u8(&err, dev, OPAL_STARTNAME);
+ add_token_u8(&err, dev, OPAL_ENDCOLUMN);
+ add_token_u64(&err, dev, column);
+ add_token_u8(&err, dev, OPAL_ENDNAME);
+
+ add_token_u8(&err, dev, OPAL_ENDLIST);
+
+ if (err)
+ return err;
+
+ return finalize_and_send(dev, parse_and_check_status);
+}
+
static int gen_key(struct opal_dev *dev, void *data)
{
u8 uid[OPAL_UID_LENGTH];
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ int err;
memcpy(uid, dev->prev_data, min(sizeof(uid), dev->prev_d_len));
kfree(dev->prev_data);
dev->prev_data = NULL;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_GENKEY],
- OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
+ err = cmd_start(dev, uid, opalmethod[OPAL_GENKEY]);
if (err) {
pr_debug("Error building gen key command\n");
@@ -1105,62 +1166,39 @@ static int get_active_key_cont(struct opal_dev *dev)
static int get_active_key(struct opal_dev *dev, void *data)
{
u8 uid[OPAL_UID_LENGTH];
- int err = 0;
+ int err;
u8 *lr = data;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
err = build_locking_range(uid, sizeof(uid), *lr);
if (err)
return err;
- err = 0;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_GET], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 3); /* startCloumn */
- add_token_u8(&err, dev, 10); /* ActiveKey */
- add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 4); /* endColumn */
- add_token_u8(&err, dev, 10); /* ActiveKey */
- add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
- if (err) {
- pr_debug("Error building get active key command\n");
+ err = generic_get_column(dev, uid, OPAL_ACTIVEKEY);
+ if (err)
return err;
- }
- return finalize_and_send(dev, get_active_key_cont);
+ return get_active_key_cont(dev);
}
static int generic_lr_enable_disable(struct opal_dev *dev,
u8 *uid, bool rle, bool wle,
bool rl, bool wl)
{
- int err = 0;
+ int err;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET], OPAL_UID_LENGTH);
+ err = cmd_start(dev, uid, opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 5); /* ReadLockEnabled */
+ add_token_u8(&err, dev, OPAL_READLOCKENABLED);
add_token_u8(&err, dev, rle);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 6); /* WriteLockEnabled */
+ add_token_u8(&err, dev, OPAL_WRITELOCKENABLED);
add_token_u8(&err, dev, wle);
add_token_u8(&err, dev, OPAL_ENDNAME);
@@ -1176,7 +1214,6 @@ static int generic_lr_enable_disable(struct opal_dev *dev,
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
return err;
}
@@ -1197,10 +1234,7 @@ static int setup_locking_range(struct opal_dev *dev, void *data)
u8 uid[OPAL_UID_LENGTH];
struct opal_user_lr_setup *setup = data;
u8 lr;
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ int err;
lr = setup->session.opal_key.lr;
err = build_locking_range(uid, sizeof(uid), lr);
@@ -1210,40 +1244,34 @@ static int setup_locking_range(struct opal_dev *dev, void *data)
if (lr == 0)
err = enable_global_lr(dev, uid, setup);
else {
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET],
- OPAL_UID_LENGTH);
+ err = cmd_start(dev, uid, opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 3); /* Ranges Start */
+ add_token_u8(&err, dev, OPAL_RANGESTART);
add_token_u64(&err, dev, setup->range_start);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 4); /* Ranges length */
+ add_token_u8(&err, dev, OPAL_RANGELENGTH);
add_token_u64(&err, dev, setup->range_length);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 5); /*ReadLockEnabled */
+ add_token_u8(&err, dev, OPAL_READLOCKENABLED);
add_token_u64(&err, dev, !!setup->RLE);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 6); /*WriteLockEnabled*/
+ add_token_u8(&err, dev, OPAL_WRITELOCKENABLED);
add_token_u64(&err, dev, !!setup->WLE);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
-
}
if (err) {
pr_debug("Error building Setup Locking range command.\n");
@@ -1261,29 +1289,21 @@ static int start_generic_opal_session(struct opal_dev *dev,
u8 key_len)
{
u32 hsn;
- int err = 0;
+ int err;
if (key == NULL && auth != OPAL_ANYBODY_UID)
return OPAL_INVAL_PARAM;
- clear_opal_cmd(dev);
-
- set_comid(dev, dev->comid);
hsn = GENERIC_HOST_SESSION_NUM;
+ err = cmd_start(dev, opaluid[OPAL_SMUID_UID],
+ opalmethod[OPAL_STARTSESSION]);
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_SMUID_UID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_STARTSESSION],
- OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u64(&err, dev, hsn);
add_token_bytestring(&err, dev, opaluid[sp_type], OPAL_UID_LENGTH);
add_token_u8(&err, dev, 1);
switch (auth) {
case OPAL_ANYBODY_UID:
- add_token_u8(&err, dev, OPAL_ENDLIST);
break;
case OPAL_ADMIN1_UID:
case OPAL_SID_UID:
@@ -1296,7 +1316,6 @@ static int start_generic_opal_session(struct opal_dev *dev,
add_token_bytestring(&err, dev, opaluid[auth],
OPAL_UID_LENGTH);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
break;
default:
pr_debug("Cannot start Admin SP session with auth %d\n", auth);
@@ -1324,6 +1343,7 @@ static int start_SIDASP_opal_session(struct opal_dev *dev, void *data)
if (!key) {
const struct opal_key *okey = data;
+
ret = start_generic_opal_session(dev, OPAL_SID_UID,
OPAL_ADMINSP_UID,
okey->key,
@@ -1341,6 +1361,7 @@ static int start_SIDASP_opal_session(struct opal_dev *dev, void *data)
static int start_admin1LSP_opal_session(struct opal_dev *dev, void *data)
{
struct opal_key *key = data;
+
return start_generic_opal_session(dev, OPAL_ADMIN1_UID,
OPAL_LOCKINGSP_UID,
key->key, key->key_len);
@@ -1356,30 +1377,21 @@ static int start_auth_opal_session(struct opal_dev *dev, void *data)
u8 *key = session->opal_key.key;
u32 hsn = GENERIC_HOST_SESSION_NUM;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
- if (session->sum) {
+ if (session->sum)
err = build_locking_user(lk_ul_user, sizeof(lk_ul_user),
session->opal_key.lr);
- if (err)
- return err;
-
- } else if (session->who != OPAL_ADMIN1 && !session->sum) {
+ else if (session->who != OPAL_ADMIN1 && !session->sum)
err = build_locking_user(lk_ul_user, sizeof(lk_ul_user),
session->who - 1);
- if (err)
- return err;
- } else
+ else
memcpy(lk_ul_user, opaluid[OPAL_ADMIN1_UID], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_SMUID_UID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_STARTSESSION],
- OPAL_UID_LENGTH);
+ if (err)
+ return err;
+
+ err = cmd_start(dev, opaluid[OPAL_SMUID_UID],
+ opalmethod[OPAL_STARTSESSION]);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u64(&err, dev, hsn);
add_token_bytestring(&err, dev, opaluid[OPAL_LOCKINGSP_UID],
OPAL_UID_LENGTH);
@@ -1392,7 +1404,6 @@ static int start_auth_opal_session(struct opal_dev *dev, void *data)
add_token_u8(&err, dev, 3);
add_token_bytestring(&err, dev, lk_ul_user, OPAL_UID_LENGTH);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error building STARTSESSION command.\n");
@@ -1404,18 +1415,10 @@ static int start_auth_opal_session(struct opal_dev *dev, void *data)
static int revert_tper(struct opal_dev *dev, void *data)
{
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ int err;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_ADMINSP_UID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_REVERT],
- OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
+ err = cmd_start(dev, opaluid[OPAL_ADMINSP_UID],
+ opalmethod[OPAL_REVERT]);
if (err) {
pr_debug("Error building REVERT TPER command.\n");
return err;
@@ -1428,18 +1431,12 @@ static int internal_activate_user(struct opal_dev *dev, void *data)
{
struct opal_session_info *session = data;
u8 uid[OPAL_UID_LENGTH];
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ int err;
memcpy(uid, opaluid[OPAL_USER1_UID], OPAL_UID_LENGTH);
uid[7] = session->who;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
+ err = cmd_start(dev, uid, opalmethod[OPAL_SET]);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
@@ -1449,7 +1446,6 @@ static int internal_activate_user(struct opal_dev *dev, void *data)
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error building Activate UserN command.\n");
@@ -1463,20 +1459,12 @@ static int erase_locking_range(struct opal_dev *dev, void *data)
{
struct opal_session_info *session = data;
u8 uid[OPAL_UID_LENGTH];
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ int err;
if (build_locking_range(uid, sizeof(uid), session->opal_key.lr) < 0)
return -ERANGE;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_ERASE],
- OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
+ err = cmd_start(dev, uid, opalmethod[OPAL_ERASE]);
if (err) {
pr_debug("Error building Erase Locking Range Command.\n");
@@ -1488,26 +1476,20 @@ static int erase_locking_range(struct opal_dev *dev, void *data)
static int set_mbr_done(struct opal_dev *dev, void *data)
{
u8 *mbr_done_tf = data;
- int err = 0;
+ int err;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ err = cmd_start(dev, opaluid[OPAL_MBRCONTROL],
+ opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_MBRCONTROL],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 2); /* Done */
+ add_token_u8(&err, dev, OPAL_MBRDONE);
add_token_u8(&err, dev, *mbr_done_tf); /* Done T or F */
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error Building set MBR Done command\n");
@@ -1520,26 +1502,20 @@ static int set_mbr_done(struct opal_dev *dev, void *data)
static int set_mbr_enable_disable(struct opal_dev *dev, void *data)
{
u8 *mbr_en_dis = data;
- int err = 0;
+ int err;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ err = cmd_start(dev, opaluid[OPAL_MBRCONTROL],
+ opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_MBRCONTROL],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 1);
+ add_token_u8(&err, dev, OPAL_MBRENABLE);
add_token_u8(&err, dev, *mbr_en_dis);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error Building set MBR done command\n");
@@ -1552,26 +1528,19 @@ static int set_mbr_enable_disable(struct opal_dev *dev, void *data)
static int generic_pw_cmd(u8 *key, size_t key_len, u8 *cpin_uid,
struct opal_dev *dev)
{
- int err = 0;
+ int err;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ err = cmd_start(dev, cpin_uid, opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, cpin_uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET],
- OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 3); /* PIN */
+ add_token_u8(&err, dev, OPAL_PIN);
add_token_bytestring(&err, dev, key, key_len);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
return err;
}
@@ -1619,10 +1588,7 @@ static int add_user_to_lr(struct opal_dev *dev, void *data)
u8 lr_buffer[OPAL_UID_LENGTH];
u8 user_uid[OPAL_UID_LENGTH];
struct opal_lock_unlock *lkul = data;
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ int err;
memcpy(lr_buffer, opaluid[OPAL_LOCKINGRANGE_ACE_RDLOCKED],
OPAL_UID_LENGTH);
@@ -1637,12 +1603,8 @@ static int add_user_to_lr(struct opal_dev *dev, void *data)
user_uid[7] = lkul->session.who;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, lr_buffer, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET],
- OPAL_UID_LENGTH);
+ err = cmd_start(dev, lr_buffer, opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
@@ -1680,7 +1642,6 @@ static int add_user_to_lr(struct opal_dev *dev, void *data)
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error building add user to locking range command.\n");
@@ -1697,9 +1658,6 @@ static int lock_unlock_locking_range(struct opal_dev *dev, void *data)
u8 read_locked = 1, write_locked = 1;
int err = 0;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
if (build_locking_range(lr_buffer, sizeof(lr_buffer),
lkul->session.opal_key.lr) < 0)
return -ERANGE;
@@ -1714,17 +1672,15 @@ static int lock_unlock_locking_range(struct opal_dev *dev, void *data)
write_locked = 0;
break;
case OPAL_LK:
- /* vars are initalized to locked */
+ /* vars are initialized to locked */
break;
default:
pr_debug("Tried to set an invalid locking state... returning to uland\n");
return OPAL_INVAL_PARAM;
}
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, lr_buffer, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
+ err = cmd_start(dev, lr_buffer, opalmethod[OPAL_SET]);
+
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
@@ -1741,7 +1697,6 @@ static int lock_unlock_locking_range(struct opal_dev *dev, void *data)
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error building SET command.\n");
@@ -1775,7 +1730,7 @@ static int lock_unlock_locking_range_sum(struct opal_dev *dev, void *data)
write_locked = 0;
break;
case OPAL_LK:
- /* vars are initalized to locked */
+ /* vars are initialized to locked */
break;
default:
pr_debug("Tried to set an invalid locking state.\n");
@@ -1796,17 +1751,10 @@ static int activate_lsp(struct opal_dev *dev, void *data)
struct opal_lr_act *opal_act = data;
u8 user_lr[OPAL_UID_LENGTH];
u8 uint_3 = 0x83;
- int err = 0, i;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_LOCKINGSP_UID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_ACTIVATE],
- OPAL_UID_LENGTH);
+ int err, i;
+ err = cmd_start(dev, opaluid[OPAL_LOCKINGSP_UID],
+ opalmethod[OPAL_ACTIVATE]);
if (opal_act->sum) {
err = build_locking_range(user_lr, sizeof(user_lr),
@@ -1814,7 +1762,6 @@ static int activate_lsp(struct opal_dev *dev, void *data)
if (err)
return err;
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, uint_3);
add_token_u8(&err, dev, 6);
@@ -1829,11 +1776,6 @@ static int activate_lsp(struct opal_dev *dev, void *data)
}
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
-
- } else {
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
}
if (err) {
@@ -1844,17 +1786,19 @@ static int activate_lsp(struct opal_dev *dev, void *data)
return finalize_and_send(dev, parse_and_check_status);
}
-static int get_lsp_lifecycle_cont(struct opal_dev *dev)
+/* Determine if we're in the Manufactured Inactive or Active state */
+static int get_lsp_lifecycle(struct opal_dev *dev, void *data)
{
u8 lc_status;
- int error = 0;
+ int err;
- error = parse_and_check_status(dev);
- if (error)
- return error;
+ err = generic_get_column(dev, opaluid[OPAL_LOCKINGSP_UID],
+ OPAL_LIFECYCLE);
+ if (err)
+ return err;
lc_status = response_get_u64(&dev->parsed, 4);
- /* 0x08 is Manufacured Inactive */
+ /* 0x08 is Manufactured Inactive */
/* 0x09 is Manufactured */
if (lc_status != OPAL_MANUFACTURED_INACTIVE) {
pr_debug("Couldn't determine the status of the Lifecycle state\n");
@@ -1864,56 +1808,19 @@ static int get_lsp_lifecycle_cont(struct opal_dev *dev)
return 0;
}
-/* Determine if we're in the Manufactured Inactive or Active state */
-static int get_lsp_lifecycle(struct opal_dev *dev, void *data)
-{
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_LOCKINGSP_UID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_GET], OPAL_UID_LENGTH);
-
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_STARTLIST);
-
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 3); /* Start Column */
- add_token_u8(&err, dev, 6); /* Lifecycle Column */
- add_token_u8(&err, dev, OPAL_ENDNAME);
-
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 4); /* End Column */
- add_token_u8(&err, dev, 6); /* Lifecycle Column */
- add_token_u8(&err, dev, OPAL_ENDNAME);
-
- add_token_u8(&err, dev, OPAL_ENDLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
-
- if (err) {
- pr_debug("Error Building GET Lifecycle Status command\n");
- return err;
- }
-
- return finalize_and_send(dev, get_lsp_lifecycle_cont);
-}
-
-static int get_msid_cpin_pin_cont(struct opal_dev *dev)
+static int get_msid_cpin_pin(struct opal_dev *dev, void *data)
{
const char *msid_pin;
size_t strlen;
- int error = 0;
+ int err;
- error = parse_and_check_status(dev);
- if (error)
- return error;
+ err = generic_get_column(dev, opaluid[OPAL_C_PIN_MSID], OPAL_PIN);
+ if (err)
+ return err;
strlen = response_get_string(&dev->parsed, 4, &msid_pin);
if (!msid_pin) {
- pr_debug("%s: Couldn't extract PIN from response\n", __func__);
+ pr_debug("Couldn't extract MSID_CPIN from response\n");
return OPAL_INVAL_PARAM;
}
@@ -1926,42 +1833,6 @@ static int get_msid_cpin_pin_cont(struct opal_dev *dev)
return 0;
}
-static int get_msid_cpin_pin(struct opal_dev *dev, void *data)
-{
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_C_PIN_MSID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_GET], OPAL_UID_LENGTH);
-
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_STARTLIST);
-
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 3); /* Start Column */
- add_token_u8(&err, dev, 3); /* PIN */
- add_token_u8(&err, dev, OPAL_ENDNAME);
-
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 4); /* End Column */
- add_token_u8(&err, dev, 3); /* Lifecycle Column */
- add_token_u8(&err, dev, OPAL_ENDNAME);
-
- add_token_u8(&err, dev, OPAL_ENDLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
-
- if (err) {
- pr_debug("Error building Get MSID CPIN PIN command.\n");
- return err;
- }
-
- return finalize_and_send(dev, get_msid_cpin_pin_cont);
-}
-
static int end_opal_session(struct opal_dev *dev, void *data)
{
int err = 0;
@@ -1977,18 +1848,14 @@ static int end_opal_session(struct opal_dev *dev, void *data)
static int end_opal_session_error(struct opal_dev *dev)
{
- const struct opal_step error_end_session[] = {
- { end_opal_session, },
- { NULL, }
+ const struct opal_step error_end_session = {
+ end_opal_session,
};
- dev->steps = error_end_session;
- return next(dev);
+ return execute_step(dev, &error_end_session, 0);
}
-static inline void setup_opal_dev(struct opal_dev *dev,
- const struct opal_step *steps)
+static inline void setup_opal_dev(struct opal_dev *dev)
{
- dev->steps = steps;
dev->tsn = 0;
dev->hsn = 0;
dev->prev_data = NULL;
@@ -1996,15 +1863,11 @@ static inline void setup_opal_dev(struct opal_dev *dev,
static int check_opal_support(struct opal_dev *dev)
{
- const struct opal_step steps[] = {
- { opal_discovery0, },
- { NULL, }
- };
int ret;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = opal_discovery0_step(dev);
dev->supported = !ret;
mutex_unlock(&dev->dev_lock);
return ret;
@@ -2057,18 +1920,16 @@ static int opal_secure_erase_locking_range(struct opal_dev *dev,
struct opal_session_info *opal_session)
{
const struct opal_step erase_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, opal_session },
{ get_active_key, &opal_session->opal_key.lr },
{ gen_key, },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, erase_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, erase_steps, ARRAY_SIZE(erase_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2077,17 +1938,15 @@ static int opal_erase_locking_range(struct opal_dev *dev,
struct opal_session_info *opal_session)
{
const struct opal_step erase_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, opal_session },
{ erase_locking_range, opal_session },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, erase_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, erase_steps, ARRAY_SIZE(erase_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2095,15 +1954,16 @@ static int opal_erase_locking_range(struct opal_dev *dev,
static int opal_enable_disable_shadow_mbr(struct opal_dev *dev,
struct opal_mbr_data *opal_mbr)
{
+ u8 enable_disable = opal_mbr->enable_disable == OPAL_MBR_ENABLE ?
+ OPAL_TRUE : OPAL_FALSE;
+
const struct opal_step mbr_steps[] = {
- { opal_discovery0, },
{ start_admin1LSP_opal_session, &opal_mbr->key },
- { set_mbr_done, &opal_mbr->enable_disable },
+ { set_mbr_done, &enable_disable },
{ end_opal_session, },
{ start_admin1LSP_opal_session, &opal_mbr->key },
- { set_mbr_enable_disable, &opal_mbr->enable_disable },
- { end_opal_session, },
- { NULL, }
+ { set_mbr_enable_disable, &enable_disable },
+ { end_opal_session, }
};
int ret;
@@ -2112,8 +1972,8 @@ static int opal_enable_disable_shadow_mbr(struct opal_dev *dev,
return -EINVAL;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, mbr_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, mbr_steps, ARRAY_SIZE(mbr_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2130,7 +1990,7 @@ static int opal_save(struct opal_dev *dev, struct opal_lock_unlock *lk_unlk)
suspend->lr = lk_unlk->session.opal_key.lr;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, NULL);
+ setup_opal_dev(dev);
add_suspend_info(dev, suspend);
mutex_unlock(&dev->dev_lock);
return 0;
@@ -2140,11 +2000,9 @@ static int opal_add_user_to_lr(struct opal_dev *dev,
struct opal_lock_unlock *lk_unlk)
{
const struct opal_step steps[] = {
- { opal_discovery0, },
{ start_admin1LSP_opal_session, &lk_unlk->session.opal_key },
{ add_user_to_lr, lk_unlk },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
@@ -2166,8 +2024,8 @@ static int opal_add_user_to_lr(struct opal_dev *dev,
}
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, steps, ARRAY_SIZE(steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2175,16 +2033,14 @@ static int opal_add_user_to_lr(struct opal_dev *dev,
static int opal_reverttper(struct opal_dev *dev, struct opal_key *opal)
{
const struct opal_step revert_steps[] = {
- { opal_discovery0, },
{ start_SIDASP_opal_session, opal },
- { revert_tper, }, /* controller will terminate session */
- { NULL, }
+ { revert_tper, } /* controller will terminate session */
};
int ret;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, revert_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, revert_steps, ARRAY_SIZE(revert_steps));
mutex_unlock(&dev->dev_lock);
/*
@@ -2201,37 +2057,34 @@ static int __opal_lock_unlock(struct opal_dev *dev,
struct opal_lock_unlock *lk_unlk)
{
const struct opal_step unlock_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, &lk_unlk->session },
{ lock_unlock_locking_range, lk_unlk },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
const struct opal_step unlock_sum_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, &lk_unlk->session },
{ lock_unlock_locking_range_sum, lk_unlk },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
- dev->steps = lk_unlk->session.sum ? unlock_sum_steps : unlock_steps;
- return next(dev);
+ if (lk_unlk->session.sum)
+ return execute_steps(dev, unlock_sum_steps,
+ ARRAY_SIZE(unlock_sum_steps));
+ else
+ return execute_steps(dev, unlock_steps,
+ ARRAY_SIZE(unlock_steps));
}
static int __opal_set_mbr_done(struct opal_dev *dev, struct opal_key *key)
{
- u8 mbr_done_tf = 1;
- const struct opal_step mbrdone_step [] = {
- { opal_discovery0, },
+ u8 mbr_done_tf = OPAL_TRUE;
+ const struct opal_step mbrdone_step[] = {
{ start_admin1LSP_opal_session, key },
{ set_mbr_done, &mbr_done_tf },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
- dev->steps = mbrdone_step;
- return next(dev);
+ return execute_steps(dev, mbrdone_step, ARRAY_SIZE(mbrdone_step));
}
static int opal_lock_unlock(struct opal_dev *dev,
@@ -2252,14 +2105,12 @@ static int opal_lock_unlock(struct opal_dev *dev,
static int opal_take_ownership(struct opal_dev *dev, struct opal_key *opal)
{
const struct opal_step owner_steps[] = {
- { opal_discovery0, },
{ start_anybodyASP_opal_session, },
{ get_msid_cpin_pin, },
{ end_opal_session, },
{ start_SIDASP_opal_session, opal },
{ set_sid_cpin_pin, opal },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
@@ -2267,21 +2118,20 @@ static int opal_take_ownership(struct opal_dev *dev, struct opal_key *opal)
return -ENODEV;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, owner_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, owner_steps, ARRAY_SIZE(owner_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
-static int opal_activate_lsp(struct opal_dev *dev, struct opal_lr_act *opal_lr_act)
+static int opal_activate_lsp(struct opal_dev *dev,
+ struct opal_lr_act *opal_lr_act)
{
const struct opal_step active_steps[] = {
- { opal_discovery0, },
{ start_SIDASP_opal_session, &opal_lr_act->key },
{ get_lsp_lifecycle, },
{ activate_lsp, opal_lr_act },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
@@ -2289,8 +2139,8 @@ static int opal_activate_lsp(struct opal_dev *dev, struct opal_lr_act *opal_lr_a
return -EINVAL;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, active_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, active_steps, ARRAY_SIZE(active_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2299,17 +2149,15 @@ static int opal_setup_locking_range(struct opal_dev *dev,
struct opal_user_lr_setup *opal_lrs)
{
const struct opal_step lr_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, &opal_lrs->session },
{ setup_locking_range, opal_lrs },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, lr_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, lr_steps, ARRAY_SIZE(lr_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2317,11 +2165,9 @@ static int opal_setup_locking_range(struct opal_dev *dev,
static int opal_set_new_pw(struct opal_dev *dev, struct opal_new_pw *opal_pw)
{
const struct opal_step pw_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, &opal_pw->session },
{ set_new_pw, &opal_pw->new_user_pw },
- { end_opal_session, },
- { NULL }
+ { end_opal_session, }
};
int ret;
@@ -2332,8 +2178,8 @@ static int opal_set_new_pw(struct opal_dev *dev, struct opal_new_pw *opal_pw)
return -EINVAL;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, pw_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, pw_steps, ARRAY_SIZE(pw_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2342,11 +2188,9 @@ static int opal_activate_user(struct opal_dev *dev,
struct opal_session_info *opal_session)
{
const struct opal_step act_steps[] = {
- { opal_discovery0, },
{ start_admin1LSP_opal_session, &opal_session->opal_key },
{ internal_activate_user, opal_session },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
@@ -2358,8 +2202,8 @@ static int opal_activate_user(struct opal_dev *dev,
}
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, act_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, act_steps, ARRAY_SIZE(act_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2376,7 +2220,7 @@ bool opal_unlock_from_suspend(struct opal_dev *dev)
return false;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, NULL);
+ setup_opal_dev(dev);
list_for_each_entry(suspend, &dev->unlk_lst, node) {
dev->tsn = 0;
diff --git a/block/t10-pi.c b/block/t10-pi.c
index 62aed77d0bb9..0c0094609dd6 100644
--- a/block/t10-pi.c
+++ b/block/t10-pi.c
@@ -1,24 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* t10_pi.c - Functions for generating and verifying T10 Protection
* Information.
- *
- * Copyright (C) 2007, 2008, 2014 Oracle Corporation
- * Written by: Martin K. Petersen <martin.petersen@oracle.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- *
*/
#include <linux/t10-pi.h>
diff --git a/crypto/842.c b/crypto/842.c
index bc26dc942821..5f98393b65d1 100644
--- a/crypto/842.c
+++ b/crypto/842.c
@@ -144,7 +144,7 @@ static int __init crypto842_mod_init(void)
return ret;
}
-module_init(crypto842_mod_init);
+subsys_initcall(crypto842_mod_init);
static void __exit crypto842_mod_exit(void)
{
diff --git a/crypto/Kconfig b/crypto/Kconfig
index bbab6bf33519..3d056e7da65f 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -27,8 +27,8 @@ config CRYPTO_FIPS
depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS
depends on (MODULE_SIG || !MODULES)
help
- This options enables the fips boot option which is
- required if you want to system to operate in a FIPS 200
+ This option enables the fips boot option which is
+ required if you want the system to operate in a FIPS 200
certification. You should say no unless you know what
this is.
@@ -113,29 +113,6 @@ config CRYPTO_ACOMP
select CRYPTO_ALGAPI
select CRYPTO_ACOMP2
-config CRYPTO_RSA
- tristate "RSA algorithm"
- select CRYPTO_AKCIPHER
- select CRYPTO_MANAGER
- select MPILIB
- select ASN1
- help
- Generic implementation of the RSA public key algorithm.
-
-config CRYPTO_DH
- tristate "Diffie-Hellman algorithm"
- select CRYPTO_KPP
- select MPILIB
- help
- Generic implementation of the Diffie-Hellman algorithm.
-
-config CRYPTO_ECDH
- tristate "ECDH algorithm"
- select CRYPTO_KPP
- select CRYPTO_RNG_DEFAULT
- help
- Generic implementation of the ECDH algorithm
-
config CRYPTO_MANAGER
tristate "Cryptographic algorithm manager"
select CRYPTO_MANAGER2
@@ -253,6 +230,48 @@ config CRYPTO_GLUE_HELPER_X86
config CRYPTO_ENGINE
tristate
+comment "Public-key cryptography"
+
+config CRYPTO_RSA
+ tristate "RSA algorithm"
+ select CRYPTO_AKCIPHER
+ select CRYPTO_MANAGER
+ select MPILIB
+ select ASN1
+ help
+ Generic implementation of the RSA public key algorithm.
+
+config CRYPTO_DH
+ tristate "Diffie-Hellman algorithm"
+ select CRYPTO_KPP
+ select MPILIB
+ help
+ Generic implementation of the Diffie-Hellman algorithm.
+
+config CRYPTO_ECC
+ tristate
+
+config CRYPTO_ECDH
+ tristate "ECDH algorithm"
+ select CRYPTO_ECC
+ select CRYPTO_KPP
+ select CRYPTO_RNG_DEFAULT
+ help
+ Generic implementation of the ECDH algorithm
+
+config CRYPTO_ECRDSA
+ tristate "EC-RDSA (GOST 34.10) algorithm"
+ select CRYPTO_ECC
+ select CRYPTO_AKCIPHER
+ select CRYPTO_STREEBOG
+ select OID_REGISTRY
+ select ASN1
+ help
+ Elliptic Curve Russian Digital Signature Algorithm (GOST R 34.10-2012,
+ RFC 7091, ISO/IEC 14888-3:2018) is one of the Russian cryptographic
+ standard algorithms (called GOST algorithms). Only signature verification
+ is implemented.
+
comment "Authenticated Encryption with Associated Data"
config CRYPTO_CCM
@@ -310,25 +329,25 @@ config CRYPTO_AEGIS128_AESNI_SSE2
tristate "AEGIS-128 AEAD algorithm (x86_64 AESNI+SSE2 implementation)"
depends on X86 && 64BIT
select CRYPTO_AEAD
- select CRYPTO_CRYPTD
+ select CRYPTO_SIMD
help
- AESNI+SSE2 implementation of the AEGSI-128 dedicated AEAD algorithm.
+ AESNI+SSE2 implementation of the AEGIS-128 dedicated AEAD algorithm.
config CRYPTO_AEGIS128L_AESNI_SSE2
tristate "AEGIS-128L AEAD algorithm (x86_64 AESNI+SSE2 implementation)"
depends on X86 && 64BIT
select CRYPTO_AEAD
- select CRYPTO_CRYPTD
+ select CRYPTO_SIMD
help
- AESNI+SSE2 implementation of the AEGSI-128L dedicated AEAD algorithm.
+ AESNI+SSE2 implementation of the AEGIS-128L dedicated AEAD algorithm.
config CRYPTO_AEGIS256_AESNI_SSE2
tristate "AEGIS-256 AEAD algorithm (x86_64 AESNI+SSE2 implementation)"
depends on X86 && 64BIT
select CRYPTO_AEAD
- select CRYPTO_CRYPTD
+ select CRYPTO_SIMD
help
- AESNI+SSE2 implementation of the AEGSI-256 dedicated AEAD algorithm.
+ AESNI+SSE2 implementation of the AEGIS-256 dedicated AEAD algorithm.
config CRYPTO_MORUS640
tristate "MORUS-640 AEAD algorithm"
@@ -340,7 +359,7 @@ config CRYPTO_MORUS640_GLUE
tristate
depends on X86
select CRYPTO_AEAD
- select CRYPTO_CRYPTD
+ select CRYPTO_SIMD
help
Common glue for SIMD optimizations of the MORUS-640 dedicated AEAD
algorithm.
@@ -363,7 +382,7 @@ config CRYPTO_MORUS1280_GLUE
tristate
depends on X86
select CRYPTO_AEAD
- select CRYPTO_CRYPTD
+ select CRYPTO_SIMD
help
Common glue for SIMD optimizations of the MORUS-1280 dedicated AEAD
algorithm.
diff --git a/crypto/Makefile b/crypto/Makefile
index fb5bf2a3a666..266a4cdbb9e2 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -147,12 +147,20 @@ obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o
obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o
obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o
obj-$(CONFIG_CRYPTO_OFB) += ofb.o
+obj-$(CONFIG_CRYPTO_ECC) += ecc.o
-ecdh_generic-y := ecc.o
ecdh_generic-y += ecdh.o
ecdh_generic-y += ecdh_helper.o
obj-$(CONFIG_CRYPTO_ECDH) += ecdh_generic.o
+$(obj)/ecrdsa_params.asn1.o: $(obj)/ecrdsa_params.asn1.c $(obj)/ecrdsa_params.asn1.h
+$(obj)/ecrdsa_pub_key.asn1.o: $(obj)/ecrdsa_pub_key.asn1.c $(obj)/ecrdsa_pub_key.asn1.h
+$(obj)/ecrdsa.o: $(obj)/ecrdsa_params.asn1.h $(obj)/ecrdsa_pub_key.asn1.h
+ecrdsa_generic-y += ecrdsa.o
+ecrdsa_generic-y += ecrdsa_params.asn1.o
+ecrdsa_generic-y += ecrdsa_pub_key.asn1.o
+obj-$(CONFIG_CRYPTO_ECRDSA) += ecrdsa_generic.o
+
#
# generic algorithms and the async_tx api
#
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index 5564e73266a6..395a3ddd3707 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -265,7 +265,6 @@ static int adiantum_hash_message(struct skcipher_request *req,
int err;
hash_desc->tfm = tctx->hash;
- hash_desc->flags = 0;
err = crypto_shash_init(hash_desc);
if (err)
@@ -659,7 +658,7 @@ static void __exit adiantum_module_exit(void)
crypto_unregister_template(&adiantum_tmpl);
}
-module_init(adiantum_module_init);
+subsys_initcall(adiantum_module_init);
module_exit(adiantum_module_exit);
MODULE_DESCRIPTION("Adiantum length-preserving encryption mode");
diff --git a/crypto/aegis128.c b/crypto/aegis128.c
index 3718a8341303..d78f77fc5dd1 100644
--- a/crypto/aegis128.c
+++ b/crypto/aegis128.c
@@ -448,7 +448,7 @@ static void __exit crypto_aegis128_module_exit(void)
crypto_unregister_aead(&crypto_aegis128_alg);
}
-module_init(crypto_aegis128_module_init);
+subsys_initcall(crypto_aegis128_module_init);
module_exit(crypto_aegis128_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c
index 275a8616d71b..9bca3d619a22 100644
--- a/crypto/aegis128l.c
+++ b/crypto/aegis128l.c
@@ -512,7 +512,7 @@ static void __exit crypto_aegis128l_module_exit(void)
crypto_unregister_aead(&crypto_aegis128l_alg);
}
-module_init(crypto_aegis128l_module_init);
+subsys_initcall(crypto_aegis128l_module_init);
module_exit(crypto_aegis128l_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/aegis256.c b/crypto/aegis256.c
index ecd6b7f34a2d..b47fd39595ad 100644
--- a/crypto/aegis256.c
+++ b/crypto/aegis256.c
@@ -463,7 +463,7 @@ static void __exit crypto_aegis256_module_exit(void)
crypto_unregister_aead(&crypto_aegis256_alg);
}
-module_init(crypto_aegis256_module_init);
+subsys_initcall(crypto_aegis256_module_init);
module_exit(crypto_aegis256_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index 13df33aca463..f217568917e4 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -64,7 +64,7 @@ static inline u8 byte(const u32 x, const unsigned n)
static const u32 rco_tab[10] = { 1, 2, 4, 8, 16, 32, 64, 128, 27, 54 };
/* cacheline-aligned to facilitate prefetching into cache */
-__visible const u32 crypto_ft_tab[4][256] __cacheline_aligned = {
+__visible const u32 crypto_ft_tab[4][256] ____cacheline_aligned = {
{
0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6,
0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591,
@@ -328,7 +328,7 @@ __visible const u32 crypto_ft_tab[4][256] __cacheline_aligned = {
}
};
-__visible const u32 crypto_fl_tab[4][256] __cacheline_aligned = {
+__visible const u32 crypto_fl_tab[4][256] ____cacheline_aligned = {
{
0x00000063, 0x0000007c, 0x00000077, 0x0000007b,
0x000000f2, 0x0000006b, 0x0000006f, 0x000000c5,
@@ -592,7 +592,7 @@ __visible const u32 crypto_fl_tab[4][256] __cacheline_aligned = {
}
};
-__visible const u32 crypto_it_tab[4][256] __cacheline_aligned = {
+__visible const u32 crypto_it_tab[4][256] ____cacheline_aligned = {
{
0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a,
0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b,
@@ -856,7 +856,7 @@ __visible const u32 crypto_it_tab[4][256] __cacheline_aligned = {
}
};
-__visible const u32 crypto_il_tab[4][256] __cacheline_aligned = {
+__visible const u32 crypto_il_tab[4][256] ____cacheline_aligned = {
{
0x00000052, 0x00000009, 0x0000006a, 0x000000d5,
0x00000030, 0x00000036, 0x000000a5, 0x00000038,
@@ -1470,7 +1470,7 @@ static void __exit aes_fini(void)
crypto_unregister_alg(&aes_alg);
}
-module_init(aes_init);
+subsys_initcall(aes_init);
module_exit(aes_fini);
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
diff --git a/crypto/akcipher.c b/crypto/akcipher.c
index 0cbeae137e0a..780daa436dac 100644
--- a/crypto/akcipher.c
+++ b/crypto/akcipher.c
@@ -119,10 +119,24 @@ static void akcipher_prepare_alg(struct akcipher_alg *alg)
base->cra_flags |= CRYPTO_ALG_TYPE_AKCIPHER;
}
+static int akcipher_default_op(struct akcipher_request *req)
+{
+ return -ENOSYS;
+}
+
int crypto_register_akcipher(struct akcipher_alg *alg)
{
struct crypto_alg *base = &alg->base;
+ if (!alg->sign)
+ alg->sign = akcipher_default_op;
+ if (!alg->verify)
+ alg->verify = akcipher_default_op;
+ if (!alg->encrypt)
+ alg->encrypt = akcipher_default_op;
+ if (!alg->decrypt)
+ alg->decrypt = akcipher_default_op;
+
akcipher_prepare_alg(alg);
return crypto_register_alg(base);
}
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 527b44d0af21..bb97cfb38836 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -296,7 +296,13 @@ static void __exit cryptomgr_exit(void)
BUG_ON(err);
}
-subsys_initcall(cryptomgr_init);
+/*
+ * This is arch_initcall() so that the crypto self-tests are run on algorithms
+ * registered early by subsys_initcall(). subsys_initcall() is needed for
+ * generic implementations so that they're available for comparison tests when
+ * other implementations are registered later by module_init().
+ */
+arch_initcall(cryptomgr_init);
module_exit(cryptomgr_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index eff337ce9003..e7c43ea4ce9d 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -472,7 +472,7 @@ MODULE_DESCRIPTION("Software Pseudo Random Number Generator");
MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
module_param(dbg, int, 0);
MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
-module_init(prng_mod_init);
+subsys_initcall(prng_mod_init);
module_exit(prng_mod_fini);
MODULE_ALIAS_CRYPTO("stdrng");
MODULE_ALIAS_CRYPTO("ansi_cprng");
diff --git a/crypto/anubis.c b/crypto/anubis.c
index 4bb187c2a902..673927de0eb9 100644
--- a/crypto/anubis.c
+++ b/crypto/anubis.c
@@ -699,7 +699,7 @@ static void __exit anubis_mod_fini(void)
crypto_unregister_alg(&anubis_alg);
}
-module_init(anubis_mod_init);
+subsys_initcall(anubis_mod_init);
module_exit(anubis_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/arc4.c b/crypto/arc4.c
index 6c93342e3405..2233d36456e2 100644
--- a/crypto/arc4.c
+++ b/crypto/arc4.c
@@ -163,7 +163,7 @@ static void __exit arc4_exit(void)
crypto_unregister_skcipher(&arc4_skcipher);
}
-module_init(arc4_init);
+subsys_initcall(arc4_init);
module_exit(arc4_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/asymmetric_keys/asym_tpm.c b/crypto/asymmetric_keys/asym_tpm.c
index 5d4c270463f6..76d2ce3a1b5b 100644
--- a/crypto/asymmetric_keys/asym_tpm.c
+++ b/crypto/asymmetric_keys/asym_tpm.c
@@ -276,6 +276,10 @@ static int tpm_sign(struct tpm_buf *tb,
return datalen;
}
+
+/* Room to fit two u32 zeros for algo id and parameters length. */
+#define SETKEY_PARAMS_SIZE (sizeof(u32) * 2)
+
/*
* Maximum buffer size for the BER/DER encoded public key. The public key
* is of the form SEQUENCE { INTEGER n, INTEGER e } where n is a maximum 2048
@@ -286,8 +290,9 @@ static int tpm_sign(struct tpm_buf *tb,
* - 257 bytes of n
* - max 2 bytes for INTEGER e type/length
* - 3 bytes of e
+ * - 4+4 of zeros for set_pub_key parameters (SETKEY_PARAMS_SIZE)
*/
-#define PUB_KEY_BUF_SIZE (4 + 4 + 257 + 2 + 3)
+#define PUB_KEY_BUF_SIZE (4 + 4 + 257 + 2 + 3 + SETKEY_PARAMS_SIZE)
/*
* Provide a part of a description of the key for /proc/keys.
@@ -364,6 +369,8 @@ static uint32_t derive_pub_key(const void *pub_key, uint32_t len, uint8_t *buf)
cur = encode_tag_length(cur, 0x02, sizeof(e));
memcpy(cur, e, sizeof(e));
cur += sizeof(e);
+ /* Zero parameters to satisfy set_pub_key ABI. */
+ memset(cur, 0, SETKEY_PARAMS_SIZE);
return cur - buf;
}
@@ -744,12 +751,10 @@ static int tpm_key_verify_signature(const struct key *key,
struct crypto_wait cwait;
struct crypto_akcipher *tfm;
struct akcipher_request *req;
- struct scatterlist sig_sg, digest_sg;
+ struct scatterlist src_sg[2];
char alg_name[CRYPTO_MAX_ALG_NAME];
uint8_t der_pub_key[PUB_KEY_BUF_SIZE];
uint32_t der_pub_key_len;
- void *output;
- unsigned int outlen;
int ret;
pr_devel("==>%s()\n", __func__);
@@ -781,37 +786,17 @@ static int tpm_key_verify_signature(const struct key *key,
if (!req)
goto error_free_tfm;
- ret = -ENOMEM;
- outlen = crypto_akcipher_maxsize(tfm);
- output = kmalloc(outlen, GFP_KERNEL);
- if (!output)
- goto error_free_req;
-
- sg_init_one(&sig_sg, sig->s, sig->s_size);
- sg_init_one(&digest_sg, output, outlen);
- akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size,
- outlen);
+ sg_init_table(src_sg, 2);
+ sg_set_buf(&src_sg[0], sig->s, sig->s_size);
+ sg_set_buf(&src_sg[1], sig->digest, sig->digest_size);
+ akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size,
+ sig->digest_size);
crypto_init_wait(&cwait);
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP,
crypto_req_done, &cwait);
-
- /* Perform the verification calculation. This doesn't actually do the
- * verification, but rather calculates the hash expected by the
- * signature and returns that to us.
- */
ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
- if (ret)
- goto out_free_output;
-
- /* Do the actual verification step. */
- if (req->dst_len != sig->digest_size ||
- memcmp(sig->digest, output, sig->digest_size) != 0)
- ret = -EKEYREJECTED;
-out_free_output:
- kfree(output);
-error_free_req:
akcipher_request_free(req);
error_free_tfm:
crypto_free_akcipher(tfm);
diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
index 97c77f66b20d..f7b0980bf02d 100644
--- a/crypto/asymmetric_keys/pkcs7_verify.c
+++ b/crypto/asymmetric_keys/pkcs7_verify.c
@@ -56,7 +56,6 @@ static int pkcs7_digest(struct pkcs7_message *pkcs7,
goto error_no_desc;
desc->tfm = tfm;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
/* Digest the message [RFC2315 9.3] */
ret = crypto_shash_digest(desc, pkcs7->data, pkcs7->data_len,
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index f5d85b47fcc6..77e0ae7840ff 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -45,6 +45,7 @@ void public_key_free(struct public_key *key)
{
if (key) {
kfree(key->key);
+ kfree(key->params);
kfree(key);
}
}
@@ -94,6 +95,12 @@ int software_key_determine_akcipher(const char *encoding,
return -ENOPKG;
}
+static u8 *pkey_pack_u32(u8 *dst, u32 val)
+{
+ memcpy(dst, &val, sizeof(val));
+ return dst + sizeof(val);
+}
+
/*
* Query information about a key.
*/
@@ -103,6 +110,7 @@ static int software_key_query(const struct kernel_pkey_params *params,
struct crypto_akcipher *tfm;
struct public_key *pkey = params->key->payload.data[asym_crypto];
char alg_name[CRYPTO_MAX_ALG_NAME];
+ u8 *key, *ptr;
int ret, len;
ret = software_key_determine_akcipher(params->encoding,
@@ -115,14 +123,22 @@ static int software_key_query(const struct kernel_pkey_params *params,
if (IS_ERR(tfm))
return PTR_ERR(tfm);
+ key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
+ GFP_KERNEL);
+ if (!key)
+ goto error_free_tfm;
+ memcpy(key, pkey->key, pkey->keylen);
+ ptr = key + pkey->keylen;
+ ptr = pkey_pack_u32(ptr, pkey->algo);
+ ptr = pkey_pack_u32(ptr, pkey->paramlen);
+ memcpy(ptr, pkey->params, pkey->paramlen);
+
if (pkey->key_is_private)
- ret = crypto_akcipher_set_priv_key(tfm,
- pkey->key, pkey->keylen);
+ ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
else
- ret = crypto_akcipher_set_pub_key(tfm,
- pkey->key, pkey->keylen);
+ ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
if (ret < 0)
- goto error_free_tfm;
+ goto error_free_key;
len = crypto_akcipher_maxsize(tfm);
info->key_size = len * 8;
@@ -137,6 +153,8 @@ static int software_key_query(const struct kernel_pkey_params *params,
KEYCTL_SUPPORTS_SIGN);
ret = 0;
+error_free_key:
+ kfree(key);
error_free_tfm:
crypto_free_akcipher(tfm);
pr_devel("<==%s() = %d\n", __func__, ret);
@@ -155,6 +173,7 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
struct crypto_wait cwait;
struct scatterlist in_sg, out_sg;
char alg_name[CRYPTO_MAX_ALG_NAME];
+ char *key, *ptr;
int ret;
pr_devel("==>%s()\n", __func__);
@@ -173,14 +192,23 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
if (!req)
goto error_free_tfm;
+ key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
+ GFP_KERNEL);
+ if (!key)
+ goto error_free_req;
+
+ memcpy(key, pkey->key, pkey->keylen);
+ ptr = key + pkey->keylen;
+ ptr = pkey_pack_u32(ptr, pkey->algo);
+ ptr = pkey_pack_u32(ptr, pkey->paramlen);
+ memcpy(ptr, pkey->params, pkey->paramlen);
+
if (pkey->key_is_private)
- ret = crypto_akcipher_set_priv_key(tfm,
- pkey->key, pkey->keylen);
+ ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
else
- ret = crypto_akcipher_set_pub_key(tfm,
- pkey->key, pkey->keylen);
+ ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
if (ret)
- goto error_free_req;
+ goto error_free_key;
sg_init_one(&in_sg, in, params->in_len);
sg_init_one(&out_sg, out, params->out_len);
@@ -210,6 +238,8 @@ static int software_key_eds_op(struct kernel_pkey_params *params,
if (ret == 0)
ret = req->dst_len;
+error_free_key:
+ kfree(key);
error_free_req:
akcipher_request_free(req);
error_free_tfm:
@@ -227,10 +257,9 @@ int public_key_verify_signature(const struct public_key *pkey,
struct crypto_wait cwait;
struct crypto_akcipher *tfm;
struct akcipher_request *req;
- struct scatterlist sig_sg, digest_sg;
+ struct scatterlist src_sg[2];
char alg_name[CRYPTO_MAX_ALG_NAME];
- void *output;
- unsigned int outlen;
+ char *key, *ptr;
int ret;
pr_devel("==>%s()\n", __func__);
@@ -254,45 +283,37 @@ int public_key_verify_signature(const struct public_key *pkey,
if (!req)
goto error_free_tfm;
+ key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
+ GFP_KERNEL);
+ if (!key)
+ goto error_free_req;
+
+ memcpy(key, pkey->key, pkey->keylen);
+ ptr = key + pkey->keylen;
+ ptr = pkey_pack_u32(ptr, pkey->algo);
+ ptr = pkey_pack_u32(ptr, pkey->paramlen);
+ memcpy(ptr, pkey->params, pkey->paramlen);
+
if (pkey->key_is_private)
- ret = crypto_akcipher_set_priv_key(tfm,
- pkey->key, pkey->keylen);
+ ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
else
- ret = crypto_akcipher_set_pub_key(tfm,
- pkey->key, pkey->keylen);
+ ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
if (ret)
- goto error_free_req;
-
- ret = -ENOMEM;
- outlen = crypto_akcipher_maxsize(tfm);
- output = kmalloc(outlen, GFP_KERNEL);
- if (!output)
- goto error_free_req;
+ goto error_free_key;
- sg_init_one(&sig_sg, sig->s, sig->s_size);
- sg_init_one(&digest_sg, output, outlen);
- akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size,
- outlen);
+ sg_init_table(src_sg, 2);
+ sg_set_buf(&src_sg[0], sig->s, sig->s_size);
+ sg_set_buf(&src_sg[1], sig->digest, sig->digest_size);
+ akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size,
+ sig->digest_size);
crypto_init_wait(&cwait);
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP,
crypto_req_done, &cwait);
-
- /* Perform the verification calculation. This doesn't actually do the
- * verification, but rather calculates the hash expected by the
- * signature and returns that to us.
- */
ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
- if (ret)
- goto out_free_output;
-
- /* Do the actual verification step. */
- if (req->dst_len != sig->digest_size ||
- memcmp(sig->digest, output, sig->digest_size) != 0)
- ret = -EKEYREJECTED;
-out_free_output:
- kfree(output);
+error_free_key:
+ kfree(key);
error_free_req:
akcipher_request_free(req);
error_free_tfm:
diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c
index d178650fd524..f8e4a932bcfb 100644
--- a/crypto/asymmetric_keys/verify_pefile.c
+++ b/crypto/asymmetric_keys/verify_pefile.c
@@ -354,7 +354,6 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
goto error_no_desc;
desc->tfm = tfm;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
ret = crypto_shash_init(desc);
if (ret < 0)
goto error;
diff --git a/crypto/asymmetric_keys/x509.asn1 b/crypto/asymmetric_keys/x509.asn1
index aae0cde414e2..5c9f4e4a5231 100644
--- a/crypto/asymmetric_keys/x509.asn1
+++ b/crypto/asymmetric_keys/x509.asn1
@@ -22,7 +22,7 @@ CertificateSerialNumber ::= INTEGER
AlgorithmIdentifier ::= SEQUENCE {
algorithm OBJECT IDENTIFIER ({ x509_note_OID }),
- parameters ANY OPTIONAL
+ parameters ANY OPTIONAL ({ x509_note_params })
}
Name ::= SEQUENCE OF RelativeDistinguishedName
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 991f4d735a4e..5b7bfd95c334 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -26,6 +26,9 @@ struct x509_parse_context {
const void *cert_start; /* Start of cert content */
const void *key; /* Key data */
size_t key_size; /* Size of key data */
+ const void *params; /* Key parameters */
+ size_t params_size; /* Size of key parameters */
+ enum OID key_algo; /* Public key algorithm */
enum OID last_oid; /* Last OID encountered */
enum OID algo_oid; /* Algorithm OID */
unsigned char nr_mpi; /* Number of MPIs stored */
@@ -109,6 +112,13 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
cert->pub->keylen = ctx->key_size;
+ cert->pub->params = kmemdup(ctx->params, ctx->params_size, GFP_KERNEL);
+ if (!cert->pub->params)
+ goto error_decode;
+
+ cert->pub->paramlen = ctx->params_size;
+ cert->pub->algo = ctx->key_algo;
+
/* Grab the signature bits */
ret = x509_get_sig_params(cert);
if (ret < 0)
@@ -220,6 +230,14 @@ int x509_note_pkey_algo(void *context, size_t hdrlen,
case OID_sha224WithRSAEncryption:
ctx->cert->sig->hash_algo = "sha224";
goto rsa_pkcs1;
+
+ case OID_gost2012Signature256:
+ ctx->cert->sig->hash_algo = "streebog256";
+ goto ecrdsa;
+
+ case OID_gost2012Signature512:
+ ctx->cert->sig->hash_algo = "streebog512";
+ goto ecrdsa;
}
rsa_pkcs1:
@@ -227,6 +245,11 @@ rsa_pkcs1:
ctx->cert->sig->encoding = "pkcs1";
ctx->algo_oid = ctx->last_oid;
return 0;
+ecrdsa:
+ ctx->cert->sig->pkey_algo = "ecrdsa";
+ ctx->cert->sig->encoding = "raw";
+ ctx->algo_oid = ctx->last_oid;
+ return 0;
}
/*
@@ -246,7 +269,8 @@ int x509_note_signature(void *context, size_t hdrlen,
return -EINVAL;
}
- if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0) {
+ if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0 ||
+ strcmp(ctx->cert->sig->pkey_algo, "ecrdsa") == 0) {
/* Discard the BIT STRING metadata */
if (vlen < 1 || *(const u8 *)value != 0)
return -EBADMSG;
@@ -401,6 +425,27 @@ int x509_note_subject(void *context, size_t hdrlen,
}
/*
+ * Extract the parameters for the public key
+ */
+int x509_note_params(void *context, size_t hdrlen,
+ unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct x509_parse_context *ctx = context;
+
+ /*
+ * AlgorithmIdentifier is used three times in the x509, we should skip
+ * first and ignore third, using second one which is after subject and
+ * before subjectPublicKey.
+ */
+ if (!ctx->cert->raw_subject || ctx->key)
+ return 0;
+ ctx->params = value - hdrlen;
+ ctx->params_size = vlen + hdrlen;
+ return 0;
+}
+
+/*
* Extract the data for the public key algorithm
*/
int x509_extract_key_data(void *context, size_t hdrlen,
@@ -409,11 +454,15 @@ int x509_extract_key_data(void *context, size_t hdrlen,
{
struct x509_parse_context *ctx = context;
- if (ctx->last_oid != OID_rsaEncryption)
+ ctx->key_algo = ctx->last_oid;
+ if (ctx->last_oid == OID_rsaEncryption)
+ ctx->cert->pub->pkey_algo = "rsa";
+ else if (ctx->last_oid == OID_gost2012PKey256 ||
+ ctx->last_oid == OID_gost2012PKey512)
+ ctx->cert->pub->pkey_algo = "ecrdsa";
+ else
return -ENOPKG;
- ctx->cert->pub->pkey_algo = "rsa";
-
/* Discard the BIT STRING metadata */
if (vlen < 1 || *(const u8 *)value != 0)
return -EBADMSG;
diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
index 9338b4558cdc..bd96683d8cde 100644
--- a/crypto/asymmetric_keys/x509_public_key.c
+++ b/crypto/asymmetric_keys/x509_public_key.c
@@ -77,7 +77,6 @@ int x509_get_sig_params(struct x509_certificate *cert)
goto error;
desc->tfm = tfm;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size, sig->digest);
if (ret < 0)
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 4be293a4b5f0..b3eddac7fa3a 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -508,7 +508,7 @@ static void __exit crypto_authenc_module_exit(void)
crypto_unregister_template(&crypto_authenc_tmpl);
}
-module_init(crypto_authenc_module_init);
+subsys_initcall(crypto_authenc_module_init);
module_exit(crypto_authenc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 4741fe89ba2c..58074308e535 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -523,7 +523,7 @@ static void __exit crypto_authenc_esn_module_exit(void)
crypto_unregister_template(&crypto_authenc_esn_tmpl);
}
-module_init(crypto_authenc_esn_module_init);
+subsys_initcall(crypto_authenc_esn_module_init);
module_exit(crypto_authenc_esn_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c
index 87b392a77a93..8548ced8b074 100644
--- a/crypto/blowfish_generic.c
+++ b/crypto/blowfish_generic.c
@@ -133,7 +133,7 @@ static void __exit blowfish_mod_fini(void)
crypto_unregister_alg(&alg);
}
-module_init(blowfish_mod_init);
+subsys_initcall(blowfish_mod_init);
module_exit(blowfish_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
index 32ddd4836ff5..15ce1281f5d9 100644
--- a/crypto/camellia_generic.c
+++ b/crypto/camellia_generic.c
@@ -1092,7 +1092,7 @@ static void __exit camellia_fini(void)
crypto_unregister_alg(&camellia_alg);
}
-module_init(camellia_init);
+subsys_initcall(camellia_init);
module_exit(camellia_fini);
MODULE_DESCRIPTION("Camellia Cipher Algorithm");
diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
index 66169c178314..24bc7d4e33be 100644
--- a/crypto/cast5_generic.c
+++ b/crypto/cast5_generic.c
@@ -543,7 +543,7 @@ static void __exit cast5_mod_fini(void)
crypto_unregister_alg(&alg);
}
-module_init(cast5_mod_init);
+subsys_initcall(cast5_mod_init);
module_exit(cast5_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
index c8e5ec69790e..edd59cc34991 100644
--- a/crypto/cast6_generic.c
+++ b/crypto/cast6_generic.c
@@ -285,7 +285,7 @@ static void __exit cast6_mod_fini(void)
crypto_unregister_alg(&alg);
}
-module_init(cast6_mod_init);
+subsys_initcall(cast6_mod_init);
module_exit(cast6_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/cbc.c b/crypto/cbc.c
index d12efaac9230..129f79d03365 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -98,7 +98,7 @@ static void __exit crypto_cbc_module_exit(void)
crypto_unregister_template(&crypto_cbc_tmpl);
}
-module_init(crypto_cbc_module_init);
+subsys_initcall(crypto_cbc_module_init);
module_exit(crypto_cbc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 50df8f001c1c..c1ef9d0b4271 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -458,7 +458,6 @@ static void crypto_ccm_free(struct aead_instance *inst)
static int crypto_ccm_create_common(struct crypto_template *tmpl,
struct rtattr **tb,
- const char *full_name,
const char *ctr_name,
const char *mac_name)
{
@@ -486,7 +485,8 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
mac = __crypto_hash_alg_common(mac_alg);
err = -EINVAL;
- if (mac->digestsize != 16)
+ if (strncmp(mac->base.cra_name, "cbcmac(", 7) != 0 ||
+ mac->digestsize != 16)
goto out_put_mac;
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
@@ -509,23 +509,27 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
ctr = crypto_spawn_skcipher_alg(&ictx->ctr);
- /* Not a stream cipher? */
+ /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
err = -EINVAL;
- if (ctr->base.cra_blocksize != 1)
+ if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
+ crypto_skcipher_alg_ivsize(ctr) != 16 ||
+ ctr->base.cra_blocksize != 1)
goto err_drop_ctr;
- /* We want the real thing! */
- if (crypto_skcipher_alg_ivsize(ctr) != 16)
+ /* ctr and cbcmac must use the same underlying block cipher. */
+ if (strcmp(ctr->base.cra_name + 4, mac->base.cra_name + 7) != 0)
goto err_drop_ctr;
err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "ccm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
+ goto err_drop_ctr;
+
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"ccm_base(%s,%s)", ctr->base.cra_driver_name,
mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_drop_ctr;
- memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
-
inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = (mac->base.cra_priority +
ctr->base.cra_priority) / 2;
@@ -567,7 +571,6 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
const char *cipher_name;
char ctr_name[CRYPTO_MAX_ALG_NAME];
char mac_name[CRYPTO_MAX_ALG_NAME];
- char full_name[CRYPTO_MAX_ALG_NAME];
cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(cipher_name))
@@ -581,35 +584,24 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
cipher_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
- if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >=
- CRYPTO_MAX_ALG_NAME)
- return -ENAMETOOLONG;
-
- return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
- mac_name);
+ return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
}
static int crypto_ccm_base_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
const char *ctr_name;
- const char *cipher_name;
- char full_name[CRYPTO_MAX_ALG_NAME];
+ const char *mac_name;
ctr_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(ctr_name))
return PTR_ERR(ctr_name);
- cipher_name = crypto_attr_alg_name(tb[2]);
- if (IS_ERR(cipher_name))
- return PTR_ERR(cipher_name);
-
- if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)",
- ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME)
- return -ENAMETOOLONG;
+ mac_name = crypto_attr_alg_name(tb[2]);
+ if (IS_ERR(mac_name))
+ return PTR_ERR(mac_name);
- return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
- cipher_name);
+ return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
}
static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key,
@@ -1014,7 +1006,7 @@ static void __exit crypto_ccm_module_exit(void)
ARRAY_SIZE(crypto_ccm_tmpls));
}
-module_init(crypto_ccm_module_init);
+subsys_initcall(crypto_ccm_module_init);
module_exit(crypto_ccm_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/cfb.c b/crypto/cfb.c
index 03ac847f6d6a..7b68fbb61732 100644
--- a/crypto/cfb.c
+++ b/crypto/cfb.c
@@ -243,7 +243,7 @@ static void __exit crypto_cfb_module_exit(void)
crypto_unregister_template(&crypto_cfb_tmpl);
}
-module_init(crypto_cfb_module_init);
+subsys_initcall(crypto_cfb_module_init);
module_exit(crypto_cfb_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index ed2e12e26dd8..e38a2d61819a 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -645,8 +645,8 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
- "%s(%s,%s)", name, chacha_name,
- poly_name) >= CRYPTO_MAX_ALG_NAME)
+ "%s(%s,%s)", name, chacha->base.cra_name,
+ poly->cra_name) >= CRYPTO_MAX_ALG_NAME)
goto out_drop_chacha;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"%s(%s,%s)", name, chacha->base.cra_driver_name,
@@ -725,7 +725,7 @@ static void __exit chacha20poly1305_module_exit(void)
ARRAY_SIZE(rfc7539_tmpls));
}
-module_init(chacha20poly1305_module_init);
+subsys_initcall(chacha20poly1305_module_init);
module_exit(chacha20poly1305_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/chacha_generic.c b/crypto/chacha_generic.c
index 35b583101f4f..d2ec04997832 100644
--- a/crypto/chacha_generic.c
+++ b/crypto/chacha_generic.c
@@ -22,18 +22,16 @@ static void chacha_docrypt(u32 *state, u8 *dst, const u8 *src,
/* aligned to potentially speed up crypto_xor() */
u8 stream[CHACHA_BLOCK_SIZE] __aligned(sizeof(long));
- if (dst != src)
- memcpy(dst, src, bytes);
-
while (bytes >= CHACHA_BLOCK_SIZE) {
chacha_block(state, stream, nrounds);
- crypto_xor(dst, stream, CHACHA_BLOCK_SIZE);
+ crypto_xor_cpy(dst, src, stream, CHACHA_BLOCK_SIZE);
bytes -= CHACHA_BLOCK_SIZE;
dst += CHACHA_BLOCK_SIZE;
+ src += CHACHA_BLOCK_SIZE;
}
if (bytes) {
chacha_block(state, stream, nrounds);
- crypto_xor(dst, stream, bytes);
+ crypto_xor_cpy(dst, src, stream, bytes);
}
}
@@ -52,7 +50,7 @@ static int chacha_stream_xor(struct skcipher_request *req,
unsigned int nbytes = walk.nbytes;
if (nbytes < walk.total)
- nbytes = round_down(nbytes, walk.stride);
+ nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);
chacha_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
nbytes, ctx->nrounds);
@@ -203,7 +201,7 @@ static void __exit chacha_generic_mod_fini(void)
crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
}
-module_init(chacha_generic_mod_init);
+subsys_initcall(chacha_generic_mod_init);
module_exit(chacha_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/cmac.c b/crypto/cmac.c
index 16301f52858c..c60b6c011ec6 100644
--- a/crypto/cmac.c
+++ b/crypto/cmac.c
@@ -313,7 +313,7 @@ static void __exit crypto_cmac_module_exit(void)
crypto_unregister_template(&crypto_cmac_tmpl);
}
-module_init(crypto_cmac_module_init);
+subsys_initcall(crypto_cmac_module_init);
module_exit(crypto_cmac_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c
index 00facd27bcc2..9e97912280bd 100644
--- a/crypto/crc32_generic.c
+++ b/crypto/crc32_generic.c
@@ -146,7 +146,7 @@ static void __exit crc32_mod_fini(void)
crypto_unregister_shash(&alg);
}
-module_init(crc32_mod_init);
+subsys_initcall(crc32_mod_init);
module_exit(crc32_mod_fini);
MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
index 7283066ecc98..ad26f15d4c7b 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c_generic.c
@@ -165,7 +165,7 @@ static void __exit crc32c_mod_fini(void)
crypto_unregister_shash(&alg);
}
-module_init(crc32c_mod_init);
+subsys_initcall(crc32c_mod_init);
module_exit(crc32c_mod_fini);
MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
index 8e94e29dc6fc..d90c0070710e 100644
--- a/crypto/crct10dif_generic.c
+++ b/crypto/crct10dif_generic.c
@@ -65,10 +65,9 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
return 0;
}
-static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
- u8 *out)
+static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
{
- *(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
+ *(__u16 *)out = crc_t10dif_generic(crc, data, len);
return 0;
}
@@ -77,15 +76,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
- return __chksum_finup(&ctx->crc, data, len, out);
+ return __chksum_finup(ctx->crc, data, len, out);
}
static int chksum_digest(struct shash_desc *desc, const u8 *data,
unsigned int length, u8 *out)
{
- struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
-
- return __chksum_finup(&ctx->crc, data, length, out);
+ return __chksum_finup(0, data, length, out);
}
static struct shash_alg alg = {
@@ -115,7 +112,7 @@ static void __exit crct10dif_mod_fini(void)
crypto_unregister_shash(&alg);
}
-module_init(crct10dif_mod_init);
+subsys_initcall(crct10dif_mod_init);
module_exit(crct10dif_mod_fini);
MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 5640e5db7bdb..b3bb99390ae7 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -65,15 +65,6 @@ struct aead_instance_ctx {
struct cryptd_queue *queue;
};
-struct cryptd_blkcipher_ctx {
- atomic_t refcnt;
- struct crypto_blkcipher *child;
-};
-
-struct cryptd_blkcipher_request_ctx {
- crypto_completion_t complete;
-};
-
struct cryptd_skcipher_ctx {
atomic_t refcnt;
struct crypto_sync_skcipher *child;
@@ -216,129 +207,6 @@ static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
*mask |= algt->mask & CRYPTO_ALG_INTERNAL;
}
-static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
- const u8 *key, unsigned int keylen)
-{
- struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
- struct crypto_blkcipher *child = ctx->child;
- int err;
-
- crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_blkcipher_setkey(child, key, keylen);
- crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- return err;
-}
-
-static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
- struct crypto_blkcipher *child,
- int err,
- int (*crypt)(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int len))
-{
- struct cryptd_blkcipher_request_ctx *rctx;
- struct cryptd_blkcipher_ctx *ctx;
- struct crypto_ablkcipher *tfm;
- struct blkcipher_desc desc;
- int refcnt;
-
- rctx = ablkcipher_request_ctx(req);
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
-
- desc.tfm = child;
- desc.info = req->info;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-
- err = crypt(&desc, req->dst, req->src, req->nbytes);
-
- req->base.complete = rctx->complete;
-
-out:
- tfm = crypto_ablkcipher_reqtfm(req);
- ctx = crypto_ablkcipher_ctx(tfm);
- refcnt = atomic_read(&ctx->refcnt);
-
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-
- if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
- crypto_free_ablkcipher(tfm);
-}
-
-static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
-{
- struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
- struct crypto_blkcipher *child = ctx->child;
-
- cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
- crypto_blkcipher_crt(child)->encrypt);
-}
-
-static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
-{
- struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
- struct crypto_blkcipher *child = ctx->child;
-
- cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
- crypto_blkcipher_crt(child)->decrypt);
-}
-
-static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
- crypto_completion_t compl)
-{
- struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct cryptd_queue *queue;
-
- queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
- rctx->complete = req->base.complete;
- req->base.complete = compl;
-
- return cryptd_enqueue_request(queue, &req->base);
-}
-
-static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
-{
- return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
-}
-
-static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
-{
- return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
-}
-
-static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
-{
- struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
- struct crypto_spawn *spawn = &ictx->spawn;
- struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_blkcipher *cipher;
-
- cipher = crypto_spawn_blkcipher(spawn);
- if (IS_ERR(cipher))
- return PTR_ERR(cipher);
-
- ctx->child = cipher;
- tfm->crt_ablkcipher.reqsize =
- sizeof(struct cryptd_blkcipher_request_ctx);
- return 0;
-}
-
-static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
-{
- struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- crypto_free_blkcipher(ctx->child);
-}
-
static int cryptd_init_instance(struct crypto_instance *inst,
struct crypto_alg *alg)
{
@@ -382,67 +250,6 @@ out_free_inst:
goto out;
}
-static int cryptd_create_blkcipher(struct crypto_template *tmpl,
- struct rtattr **tb,
- struct cryptd_queue *queue)
-{
- struct cryptd_instance_ctx *ctx;
- struct crypto_instance *inst;
- struct crypto_alg *alg;
- u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
- u32 mask = CRYPTO_ALG_TYPE_MASK;
- int err;
-
- cryptd_check_internal(tb, &type, &mask);
-
- alg = crypto_get_attr_alg(tb, type, mask);
- if (IS_ERR(alg))
- return PTR_ERR(alg);
-
- inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
- err = PTR_ERR(inst);
- if (IS_ERR(inst))
- goto out_put_alg;
-
- ctx = crypto_instance_ctx(inst);
- ctx->queue = queue;
-
- err = crypto_init_spawn(&ctx->spawn, alg, inst,
- CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
- if (err)
- goto out_free_inst;
-
- type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
- if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
- type |= CRYPTO_ALG_INTERNAL;
- inst->alg.cra_flags = type;
- inst->alg.cra_type = &crypto_ablkcipher_type;
-
- inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
- inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
- inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
-
- inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
-
- inst->alg.cra_init = cryptd_blkcipher_init_tfm;
- inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
-
- inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
- inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
- inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
-
- err = crypto_register_instance(tmpl, inst);
- if (err) {
- crypto_drop_spawn(&ctx->spawn);
-out_free_inst:
- kfree(inst);
- }
-
-out_put_alg:
- crypto_mod_put(alg);
- return err;
-}
-
static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
const u8 *key, unsigned int keylen)
{
@@ -738,7 +545,6 @@ static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
goto out;
desc->tfm = child;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_shash_init(desc);
@@ -830,7 +636,6 @@ static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
goto out;
desc->tfm = child;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = shash_ahash_digest(req, desc);
@@ -859,7 +664,6 @@ static int cryptd_hash_import(struct ahash_request *req, const void *in)
struct shash_desc *desc = cryptd_shash_desc(req);
desc->tfm = ctx->child;
- desc->flags = req->base.flags;
return crypto_shash_import(desc, in);
}
@@ -1118,10 +922,6 @@ static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_BLKCIPHER:
- if ((algt->type & CRYPTO_ALG_TYPE_MASK) ==
- CRYPTO_ALG_TYPE_BLKCIPHER)
- return cryptd_create_blkcipher(tmpl, tb, &queue);
-
return cryptd_create_skcipher(tmpl, tb, &queue);
case CRYPTO_ALG_TYPE_DIGEST:
return cryptd_create_hash(tmpl, tb, &queue);
@@ -1160,58 +960,6 @@ static struct crypto_template cryptd_tmpl = {
.module = THIS_MODULE,
};
-struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
- u32 type, u32 mask)
-{
- char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
- struct cryptd_blkcipher_ctx *ctx;
- struct crypto_tfm *tfm;
-
- if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
- "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
- return ERR_PTR(-EINVAL);
- type = crypto_skcipher_type(type);
- mask &= ~CRYPTO_ALG_TYPE_MASK;
- mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
- tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
- if (IS_ERR(tfm))
- return ERR_CAST(tfm);
- if (tfm->__crt_alg->cra_module != THIS_MODULE) {
- crypto_free_tfm(tfm);
- return ERR_PTR(-EINVAL);
- }
-
- ctx = crypto_tfm_ctx(tfm);
- atomic_set(&ctx->refcnt, 1);
-
- return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
-}
-EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
-
-struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
-{
- struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
- return ctx->child;
-}
-EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
-
-bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm)
-{
- struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
-
- return atomic_read(&ctx->refcnt) - 1;
-}
-EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued);
-
-void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
-{
- struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
-
- if (atomic_dec_and_test(&ctx->refcnt))
- crypto_free_ablkcipher(&tfm->base);
-}
-EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
-
struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
u32 type, u32 mask)
{
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index 01630a9c7e01..9320d4eaa4a8 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -220,7 +220,7 @@ static void __exit crypto_null_mod_fini(void)
crypto_unregister_skcipher(&skcipher_null);
}
-module_init(crypto_null_mod_init);
+subsys_initcall(crypto_null_mod_init);
module_exit(crypto_null_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/crypto_user_base.c b/crypto/crypto_user_base.c
index f25d3f32c9c2..e48da3b75c71 100644
--- a/crypto/crypto_user_base.c
+++ b/crypto/crypto_user_base.c
@@ -465,8 +465,8 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
return err;
}
- err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
- crypto_policy, extack);
+ err = nlmsg_parse_deprecated(nlh, crypto_msg_min[type], attrs,
+ CRYPTOCFGA_MAX, crypto_policy, extack);
if (err < 0)
return err;
diff --git a/crypto/ctr.c b/crypto/ctr.c
index ec8f8b67473a..52cdf2c5605f 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -384,7 +384,7 @@ static void __exit crypto_ctr_module_exit(void)
ARRAY_SIZE(crypto_ctr_tmpls));
}
-module_init(crypto_ctr_module_init);
+subsys_initcall(crypto_ctr_module_init);
module_exit(crypto_ctr_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/cts.c b/crypto/cts.c
index 4e28d83ae37d..6b6087dbb62a 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -152,12 +152,14 @@ static int crypto_cts_encrypt(struct skcipher_request *req)
struct skcipher_request *subreq = &rctx->subreq;
int bsize = crypto_skcipher_blocksize(tfm);
unsigned int nbytes = req->cryptlen;
- int cbc_blocks = (nbytes + bsize - 1) / bsize - 1;
unsigned int offset;
skcipher_request_set_tfm(subreq, ctx->child);
- if (cbc_blocks <= 0) {
+ if (nbytes < bsize)
+ return -EINVAL;
+
+ if (nbytes == bsize) {
skcipher_request_set_callback(subreq, req->base.flags,
req->base.complete,
req->base.data);
@@ -166,7 +168,7 @@ static int crypto_cts_encrypt(struct skcipher_request *req)
return crypto_skcipher_encrypt(subreq);
}
- offset = cbc_blocks * bsize;
+ offset = rounddown(nbytes - 1, bsize);
rctx->offset = offset;
skcipher_request_set_callback(subreq, req->base.flags,
@@ -244,13 +246,15 @@ static int crypto_cts_decrypt(struct skcipher_request *req)
struct skcipher_request *subreq = &rctx->subreq;
int bsize = crypto_skcipher_blocksize(tfm);
unsigned int nbytes = req->cryptlen;
- int cbc_blocks = (nbytes + bsize - 1) / bsize - 1;
unsigned int offset;
u8 *space;
skcipher_request_set_tfm(subreq, ctx->child);
- if (cbc_blocks <= 0) {
+ if (nbytes < bsize)
+ return -EINVAL;
+
+ if (nbytes == bsize) {
skcipher_request_set_callback(subreq, req->base.flags,
req->base.complete,
req->base.data);
@@ -264,10 +268,10 @@ static int crypto_cts_decrypt(struct skcipher_request *req)
space = crypto_cts_reqctx_space(req);
- offset = cbc_blocks * bsize;
+ offset = rounddown(nbytes - 1, bsize);
rctx->offset = offset;
- if (cbc_blocks <= 1)
+ if (offset <= bsize)
memcpy(space, req->iv, bsize);
else
scatterwalk_map_and_copy(space, req->src, offset - 2 * bsize,
@@ -419,7 +423,7 @@ static void __exit crypto_cts_module_exit(void)
crypto_unregister_template(&crypto_cts_tmpl);
}
-module_init(crypto_cts_module_init);
+subsys_initcall(crypto_cts_module_init);
module_exit(crypto_cts_module_exit);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/deflate.c b/crypto/deflate.c
index 94ec3b36a8e8..aab089cde1bf 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -334,7 +334,7 @@ static void __exit deflate_mod_fini(void)
crypto_unregister_scomps(scomp, ARRAY_SIZE(scomp));
}
-module_init(deflate_mod_init);
+subsys_initcall(deflate_mod_init);
module_exit(deflate_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
index 1e6621665dd9..d7a88b4fa611 100644
--- a/crypto/des_generic.c
+++ b/crypto/des_generic.c
@@ -862,14 +862,11 @@ static void des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key,
unsigned int keylen)
{
- const u32 *K = (const u32 *)key;
+ int err;
- if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
- !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
- (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
- *flags |= CRYPTO_TFM_RES_WEAK_KEY;
- return -EINVAL;
- }
+ err = __des3_verify_key(flags, key);
+ if (unlikely(err))
+ return err;
des_ekey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
dkey(expkey, key); expkey += DES_EXPKEY_WORDS; key += DES_KEY_SIZE;
@@ -993,7 +990,7 @@ static void __exit des_generic_mod_fini(void)
crypto_unregister_algs(des_algs, ARRAY_SIZE(des_algs));
}
-module_init(des_generic_mod_init);
+subsys_initcall(des_generic_mod_init);
module_exit(des_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/dh.c b/crypto/dh.c
index 09a44de4209d..ce77fb4ee8b3 100644
--- a/crypto/dh.c
+++ b/crypto/dh.c
@@ -236,7 +236,7 @@ static void dh_exit(void)
crypto_unregister_kpp(&dh);
}
-module_init(dh_init);
+subsys_initcall(dh_init);
module_exit(dh_exit);
MODULE_ALIAS_CRYPTO("dh");
MODULE_LICENSE("GPL");
diff --git a/crypto/drbg.c b/crypto/drbg.c
index bc52d9562611..2a5b16bb000c 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -1587,7 +1587,6 @@ static int drbg_init_hash_kernel(struct drbg_state *drbg)
}
sdesc->shash.tfm = tfm;
- sdesc->shash.flags = 0;
drbg->priv_data = sdesc;
return crypto_shash_alignmask(tfm);
@@ -2039,7 +2038,7 @@ static void __exit drbg_exit(void)
crypto_unregister_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2));
}
-module_init(drbg_init);
+subsys_initcall(drbg_init);
module_exit(drbg_exit);
#ifndef CRYPTO_DRBG_HASH_STRING
#define CRYPTO_DRBG_HASH_STRING ""
diff --git a/crypto/ecb.c b/crypto/ecb.c
index 0732715c8d91..de839129d151 100644
--- a/crypto/ecb.c
+++ b/crypto/ecb.c
@@ -101,7 +101,7 @@ static void __exit crypto_ecb_module_exit(void)
crypto_unregister_template(&crypto_ecb_tmpl);
}
-module_init(crypto_ecb_module_init);
+subsys_initcall(crypto_ecb_module_init);
module_exit(crypto_ecb_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/ecc.c b/crypto/ecc.c
index ed1237115066..dfe114bc0c4a 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2013, Kenneth MacKay
- * All rights reserved.
+ * Copyright (c) 2013, 2014 Kenneth MacKay. All rights reserved.
+ * Copyright (c) 2019 Vitaly Chikunov <vt@altlinux.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
@@ -24,12 +24,15 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <linux/module.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/swab.h>
#include <linux/fips.h>
#include <crypto/ecdh.h>
#include <crypto/rng.h>
+#include <asm/unaligned.h>
+#include <linux/ratelimit.h>
#include "ecc.h"
#include "ecc_curve_defs.h"
@@ -112,7 +115,7 @@ static void vli_clear(u64 *vli, unsigned int ndigits)
}
/* Returns true if vli == 0, false otherwise. */
-static bool vli_is_zero(const u64 *vli, unsigned int ndigits)
+bool vli_is_zero(const u64 *vli, unsigned int ndigits)
{
int i;
@@ -123,6 +126,7 @@ static bool vli_is_zero(const u64 *vli, unsigned int ndigits)
return true;
}
+EXPORT_SYMBOL(vli_is_zero);
/* Returns nonzero if bit bit of vli is set. */
static u64 vli_test_bit(const u64 *vli, unsigned int bit)
@@ -130,6 +134,11 @@ static u64 vli_test_bit(const u64 *vli, unsigned int bit)
return (vli[bit / 64] & ((u64)1 << (bit % 64)));
}
+static bool vli_is_negative(const u64 *vli, unsigned int ndigits)
+{
+ return vli_test_bit(vli, ndigits * 64 - 1);
+}
+
/* Counts the number of 64-bit "digits" in vli. */
static unsigned int vli_num_digits(const u64 *vli, unsigned int ndigits)
{
@@ -161,6 +170,27 @@ static unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits)
return ((num_digits - 1) * 64 + i);
}
+/* Set dest from unaligned bit string src. */
+void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits)
+{
+ int i;
+ const u64 *from = src;
+
+ for (i = 0; i < ndigits; i++)
+ dest[i] = get_unaligned_be64(&from[ndigits - 1 - i]);
+}
+EXPORT_SYMBOL(vli_from_be64);
+
+void vli_from_le64(u64 *dest, const void *src, unsigned int ndigits)
+{
+ int i;
+ const u64 *from = src;
+
+ for (i = 0; i < ndigits; i++)
+ dest[i] = get_unaligned_le64(&from[i]);
+}
+EXPORT_SYMBOL(vli_from_le64);
+
/* Sets dest = src. */
static void vli_set(u64 *dest, const u64 *src, unsigned int ndigits)
{
@@ -171,7 +201,7 @@ static void vli_set(u64 *dest, const u64 *src, unsigned int ndigits)
}
/* Returns sign of left - right. */
-static int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits)
+int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits)
{
int i;
@@ -184,6 +214,7 @@ static int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits)
return 0;
}
+EXPORT_SYMBOL(vli_cmp);
/* Computes result = in << c, returning carry. Can modify in place
* (if result == in). 0 < shift < 64.
@@ -239,8 +270,30 @@ static u64 vli_add(u64 *result, const u64 *left, const u64 *right,
return carry;
}
+/* Computes result = left + right, returning carry. Can modify in place. */
+static u64 vli_uadd(u64 *result, const u64 *left, u64 right,
+ unsigned int ndigits)
+{
+ u64 carry = right;
+ int i;
+
+ for (i = 0; i < ndigits; i++) {
+ u64 sum;
+
+ sum = left[i] + carry;
+ if (sum != left[i])
+ carry = (sum < left[i]);
+ else
+ carry = !!carry;
+
+ result[i] = sum;
+ }
+
+ return carry;
+}
+
/* Computes result = left - right, returning borrow. Can modify in place. */
-static u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
+u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
unsigned int ndigits)
{
u64 borrow = 0;
@@ -258,9 +311,37 @@ static u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
return borrow;
}
+EXPORT_SYMBOL(vli_sub);
+
+/* Computes result = left - right, returning borrow. Can modify in place. */
+static u64 vli_usub(u64 *result, const u64 *left, u64 right,
+ unsigned int ndigits)
+{
+ u64 borrow = right;
+ int i;
+
+ for (i = 0; i < ndigits; i++) {
+ u64 diff;
+
+ diff = left[i] - borrow;
+ if (diff != left[i])
+ borrow = (diff > left[i]);
+
+ result[i] = diff;
+ }
+
+ return borrow;
+}
static uint128_t mul_64_64(u64 left, u64 right)
{
+ uint128_t result;
+#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
+ unsigned __int128 m = (unsigned __int128)left * right;
+
+ result.m_low = m;
+ result.m_high = m >> 64;
+#else
u64 a0 = left & 0xffffffffull;
u64 a1 = left >> 32;
u64 b0 = right & 0xffffffffull;
@@ -269,7 +350,6 @@ static uint128_t mul_64_64(u64 left, u64 right)
u64 m1 = a0 * b1;
u64 m2 = a1 * b0;
u64 m3 = a1 * b1;
- uint128_t result;
m2 += (m0 >> 32);
m2 += m1;
@@ -280,7 +360,7 @@ static uint128_t mul_64_64(u64 left, u64 right)
result.m_low = (m0 & 0xffffffffull) | (m2 << 32);
result.m_high = m3 + (m2 >> 32);
-
+#endif
return result;
}
@@ -330,6 +410,28 @@ static void vli_mult(u64 *result, const u64 *left, const u64 *right,
result[ndigits * 2 - 1] = r01.m_low;
}
+/* Compute product = left * right, for a small right value. */
+static void vli_umult(u64 *result, const u64 *left, u32 right,
+ unsigned int ndigits)
+{
+ uint128_t r01 = { 0 };
+ unsigned int k;
+
+ for (k = 0; k < ndigits; k++) {
+ uint128_t product;
+
+ product = mul_64_64(left[k], right);
+ r01 = add_128_128(r01, product);
+ /* no carry */
+ result[k] = r01.m_low;
+ r01.m_low = r01.m_high;
+ r01.m_high = 0;
+ }
+ result[k] = r01.m_low;
+ for (++k; k < ndigits * 2; k++)
+ result[k] = 0;
+}
+
static void vli_square(u64 *result, const u64 *left, unsigned int ndigits)
{
uint128_t r01 = { 0, 0 };
@@ -402,6 +504,170 @@ static void vli_mod_sub(u64 *result, const u64 *left, const u64 *right,
vli_add(result, result, mod, ndigits);
}
+/*
+ * Computes result = product % mod
+ * for special form moduli: p = 2^k-c, for small c (note the minus sign)
+ *
+ * References:
+ * R. Crandall, C. Pomerance. Prime Numbers: A Computational Perspective.
+ * 9 Fast Algorithms for Large-Integer Arithmetic. 9.2.3 Moduli of special form
+ * Algorithm 9.2.13 (Fast mod operation for special-form moduli).
+ */
+static void vli_mmod_special(u64 *result, const u64 *product,
+ const u64 *mod, unsigned int ndigits)
+{
+ u64 c = -mod[0];
+ u64 t[ECC_MAX_DIGITS * 2];
+ u64 r[ECC_MAX_DIGITS * 2];
+
+ vli_set(r, product, ndigits * 2);
+ while (!vli_is_zero(r + ndigits, ndigits)) {
+ vli_umult(t, r + ndigits, c, ndigits);
+ vli_clear(r + ndigits, ndigits);
+ vli_add(r, r, t, ndigits * 2);
+ }
+ vli_set(t, mod, ndigits);
+ vli_clear(t + ndigits, ndigits);
+ while (vli_cmp(r, t, ndigits * 2) >= 0)
+ vli_sub(r, r, t, ndigits * 2);
+ vli_set(result, r, ndigits);
+}
+
+/*
+ * Computes result = product % mod
+ * for special form moduli: p = 2^{k-1}+c, for small c (note the plus sign)
+ * where k-1 does not fit into qword boundary by -1 bit (such as 255).
+
+ * References (loosely based on):
+ * A. Menezes, P. van Oorschot, S. Vanstone. Handbook of Applied Cryptography.
+ * 14.3.4 Reduction methods for moduli of special form. Algorithm 14.47.
+ * URL: http://cacr.uwaterloo.ca/hac/about/chap14.pdf
+ *
+ * H. Cohen, G. Frey, R. Avanzi, C. Doche, T. Lange, K. Nguyen, F. Vercauteren.
+ * Handbook of Elliptic and Hyperelliptic Curve Cryptography.
+ * Algorithm 10.25 Fast reduction for special form moduli
+ */
+static void vli_mmod_special2(u64 *result, const u64 *product,
+ const u64 *mod, unsigned int ndigits)
+{
+ u64 c2 = mod[0] * 2;
+ u64 q[ECC_MAX_DIGITS];
+ u64 r[ECC_MAX_DIGITS * 2];
+ u64 m[ECC_MAX_DIGITS * 2]; /* expanded mod */
+ int carry; /* last bit that doesn't fit into q */
+ int i;
+
+ vli_set(m, mod, ndigits);
+ vli_clear(m + ndigits, ndigits);
+
+ vli_set(r, product, ndigits);
+ /* q and carry are top bits */
+ vli_set(q, product + ndigits, ndigits);
+ vli_clear(r + ndigits, ndigits);
+ carry = vli_is_negative(r, ndigits);
+ if (carry)
+ r[ndigits - 1] &= (1ull << 63) - 1;
+ for (i = 1; carry || !vli_is_zero(q, ndigits); i++) {
+ u64 qc[ECC_MAX_DIGITS * 2];
+
+ vli_umult(qc, q, c2, ndigits);
+ if (carry)
+ vli_uadd(qc, qc, mod[0], ndigits * 2);
+ vli_set(q, qc + ndigits, ndigits);
+ vli_clear(qc + ndigits, ndigits);
+ carry = vli_is_negative(qc, ndigits);
+ if (carry)
+ qc[ndigits - 1] &= (1ull << 63) - 1;
+ if (i & 1)
+ vli_sub(r, r, qc, ndigits * 2);
+ else
+ vli_add(r, r, qc, ndigits * 2);
+ }
+ while (vli_is_negative(r, ndigits * 2))
+ vli_add(r, r, m, ndigits * 2);
+ while (vli_cmp(r, m, ndigits * 2) >= 0)
+ vli_sub(r, r, m, ndigits * 2);
+
+ vli_set(result, r, ndigits);
+}
+
+/*
+ * Computes result = product % mod, where product is 2N words long.
+ * Reference: Ken MacKay's micro-ecc.
+ * Currently only designed to work for curve_p or curve_n.
+ */
+static void vli_mmod_slow(u64 *result, u64 *product, const u64 *mod,
+ unsigned int ndigits)
+{
+ u64 mod_m[2 * ECC_MAX_DIGITS];
+ u64 tmp[2 * ECC_MAX_DIGITS];
+ u64 *v[2] = { tmp, product };
+ u64 carry = 0;
+ unsigned int i;
+ /* Shift mod so its highest set bit is at the maximum position. */
+ int shift = (ndigits * 2 * 64) - vli_num_bits(mod, ndigits);
+ int word_shift = shift / 64;
+ int bit_shift = shift % 64;
+
+ vli_clear(mod_m, word_shift);
+ if (bit_shift > 0) {
+ for (i = 0; i < ndigits; ++i) {
+ mod_m[word_shift + i] = (mod[i] << bit_shift) | carry;
+ carry = mod[i] >> (64 - bit_shift);
+ }
+ } else
+ vli_set(mod_m + word_shift, mod, ndigits);
+
+ for (i = 1; shift >= 0; --shift) {
+ u64 borrow = 0;
+ unsigned int j;
+
+ for (j = 0; j < ndigits * 2; ++j) {
+ u64 diff = v[i][j] - mod_m[j] - borrow;
+
+ if (diff != v[i][j])
+ borrow = (diff > v[i][j]);
+ v[1 - i][j] = diff;
+ }
+ i = !(i ^ borrow); /* Swap the index if there was no borrow */
+ vli_rshift1(mod_m, ndigits);
+ mod_m[ndigits - 1] |= mod_m[ndigits] << (64 - 1);
+ vli_rshift1(mod_m + ndigits, ndigits);
+ }
+ vli_set(result, v[i], ndigits);
+}
+
+/* Computes result = product % mod using Barrett's reduction with precomputed
+ * value mu appended to the mod after ndigits, mu = (2^{2w} / mod) and have
+ * length ndigits + 1, where mu * (2^w - 1) should not overflow ndigits
+ * boundary.
+ *
+ * Reference:
+ * R. Brent, P. Zimmermann. Modern Computer Arithmetic. 2010.
+ * 2.4.1 Barrett's algorithm. Algorithm 2.5.
+ */
+static void vli_mmod_barrett(u64 *result, u64 *product, const u64 *mod,
+ unsigned int ndigits)
+{
+ u64 q[ECC_MAX_DIGITS * 2];
+ u64 r[ECC_MAX_DIGITS * 2];
+ const u64 *mu = mod + ndigits;
+
+ vli_mult(q, product + ndigits, mu, ndigits);
+ if (mu[ndigits])
+ vli_add(q + ndigits, q + ndigits, product + ndigits, ndigits);
+ vli_mult(r, mod, q + ndigits, ndigits);
+ vli_sub(r, product, r, ndigits * 2);
+ while (!vli_is_zero(r + ndigits, ndigits) ||
+ vli_cmp(r, mod, ndigits) != -1) {
+ u64 carry;
+
+ carry = vli_sub(r, r, mod, ndigits);
+ vli_usub(r + ndigits, r + ndigits, carry, ndigits);
+ }
+ vli_set(result, r, ndigits);
+}
+
/* Computes p_result = p_product % curve_p.
* See algorithm 5 and 6 from
* http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf
@@ -509,14 +775,33 @@ static void vli_mmod_fast_256(u64 *result, const u64 *product,
}
}
-/* Computes result = product % curve_prime
- * from http://www.nsa.gov/ia/_files/nist-routines.pdf
-*/
+/* Computes result = product % curve_prime for different curve_primes.
+ *
+ * Note that curve_primes are distinguished just by heuristic check and
+ * not by complete conformance check.
+ */
static bool vli_mmod_fast(u64 *result, u64 *product,
const u64 *curve_prime, unsigned int ndigits)
{
u64 tmp[2 * ECC_MAX_DIGITS];
+ /* Currently, both NIST primes have -1 in lowest qword. */
+ if (curve_prime[0] != -1ull) {
+ /* Try to handle Pseudo-Marsenne primes. */
+ if (curve_prime[ndigits - 1] == -1ull) {
+ vli_mmod_special(result, product, curve_prime,
+ ndigits);
+ return true;
+ } else if (curve_prime[ndigits - 1] == 1ull << 63 &&
+ curve_prime[ndigits - 2] == 0) {
+ vli_mmod_special2(result, product, curve_prime,
+ ndigits);
+ return true;
+ }
+ vli_mmod_barrett(result, product, curve_prime, ndigits);
+ return true;
+ }
+
switch (ndigits) {
case 3:
vli_mmod_fast_192(result, product, curve_prime, tmp);
@@ -525,13 +810,26 @@ static bool vli_mmod_fast(u64 *result, u64 *product,
vli_mmod_fast_256(result, product, curve_prime, tmp);
break;
default:
- pr_err("unsupports digits size!\n");
+ pr_err_ratelimited("ecc: unsupported digits size!\n");
return false;
}
return true;
}
+/* Computes result = (left * right) % mod.
+ * Assumes that mod is big enough curve order.
+ */
+void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right,
+ const u64 *mod, unsigned int ndigits)
+{
+ u64 product[ECC_MAX_DIGITS * 2];
+
+ vli_mult(product, left, right, ndigits);
+ vli_mmod_slow(result, product, mod, ndigits);
+}
+EXPORT_SYMBOL(vli_mod_mult_slow);
+
/* Computes result = (left * right) % curve_prime. */
static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right,
const u64 *curve_prime, unsigned int ndigits)
@@ -557,7 +855,7 @@ static void vli_mod_square_fast(u64 *result, const u64 *left,
* See "From Euclid's GCD to Montgomery Multiplication to the Great Divide"
* https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf
*/
-static void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
+void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
unsigned int ndigits)
{
u64 a[ECC_MAX_DIGITS], b[ECC_MAX_DIGITS];
@@ -630,6 +928,7 @@ static void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
vli_set(result, u, ndigits);
}
+EXPORT_SYMBOL(vli_mod_inv);
/* ------ Point operations ------ */
@@ -903,6 +1202,85 @@ static void ecc_point_mult(struct ecc_point *result,
vli_set(result->y, ry[0], ndigits);
}
+/* Computes R = P + Q mod p */
+static void ecc_point_add(const struct ecc_point *result,
+ const struct ecc_point *p, const struct ecc_point *q,
+ const struct ecc_curve *curve)
+{
+ u64 z[ECC_MAX_DIGITS];
+ u64 px[ECC_MAX_DIGITS];
+ u64 py[ECC_MAX_DIGITS];
+ unsigned int ndigits = curve->g.ndigits;
+
+ vli_set(result->x, q->x, ndigits);
+ vli_set(result->y, q->y, ndigits);
+ vli_mod_sub(z, result->x, p->x, curve->p, ndigits);
+ vli_set(px, p->x, ndigits);
+ vli_set(py, p->y, ndigits);
+ xycz_add(px, py, result->x, result->y, curve->p, ndigits);
+ vli_mod_inv(z, z, curve->p, ndigits);
+ apply_z(result->x, result->y, z, curve->p, ndigits);
+}
+
+/* Computes R = u1P + u2Q mod p using Shamir's trick.
+ * Based on: Kenneth MacKay's micro-ecc (2014).
+ */
+void ecc_point_mult_shamir(const struct ecc_point *result,
+ const u64 *u1, const struct ecc_point *p,
+ const u64 *u2, const struct ecc_point *q,
+ const struct ecc_curve *curve)
+{
+ u64 z[ECC_MAX_DIGITS];
+ u64 sump[2][ECC_MAX_DIGITS];
+ u64 *rx = result->x;
+ u64 *ry = result->y;
+ unsigned int ndigits = curve->g.ndigits;
+ unsigned int num_bits;
+ struct ecc_point sum = ECC_POINT_INIT(sump[0], sump[1], ndigits);
+ const struct ecc_point *points[4];
+ const struct ecc_point *point;
+ unsigned int idx;
+ int i;
+
+ ecc_point_add(&sum, p, q, curve);
+ points[0] = NULL;
+ points[1] = p;
+ points[2] = q;
+ points[3] = &sum;
+
+ num_bits = max(vli_num_bits(u1, ndigits),
+ vli_num_bits(u2, ndigits));
+ i = num_bits - 1;
+ idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1);
+ point = points[idx];
+
+ vli_set(rx, point->x, ndigits);
+ vli_set(ry, point->y, ndigits);
+ vli_clear(z + 1, ndigits - 1);
+ z[0] = 1;
+
+ for (--i; i >= 0; i--) {
+ ecc_point_double_jacobian(rx, ry, z, curve->p, ndigits);
+ idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1);
+ point = points[idx];
+ if (point) {
+ u64 tx[ECC_MAX_DIGITS];
+ u64 ty[ECC_MAX_DIGITS];
+ u64 tz[ECC_MAX_DIGITS];
+
+ vli_set(tx, point->x, ndigits);
+ vli_set(ty, point->y, ndigits);
+ apply_z(tx, ty, z, curve->p, ndigits);
+ vli_mod_sub(tz, rx, tx, curve->p, ndigits);
+ xycz_add(tx, ty, rx, ry, curve->p, ndigits);
+ vli_mod_mult_fast(z, z, tz, curve->p, ndigits);
+ }
+ }
+ vli_mod_inv(z, z, curve->p, ndigits);
+ apply_z(rx, ry, z, curve->p, ndigits);
+}
+EXPORT_SYMBOL(ecc_point_mult_shamir);
+
static inline void ecc_swap_digits(const u64 *in, u64 *out,
unsigned int ndigits)
{
@@ -948,6 +1326,7 @@ int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
return __ecc_is_key_valid(curve, private_key, ndigits);
}
+EXPORT_SYMBOL(ecc_is_key_valid);
/*
* ECC private keys are generated using the method of extra random bits,
@@ -1000,6 +1379,7 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey)
return 0;
}
+EXPORT_SYMBOL(ecc_gen_privkey);
int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits,
const u64 *private_key, u64 *public_key)
@@ -1036,13 +1416,17 @@ err_free_point:
out:
return ret;
}
+EXPORT_SYMBOL(ecc_make_pub_key);
/* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */
-static int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
- struct ecc_point *pk)
+int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
+ struct ecc_point *pk)
{
u64 yy[ECC_MAX_DIGITS], xxx[ECC_MAX_DIGITS], w[ECC_MAX_DIGITS];
+ if (WARN_ON(pk->ndigits != curve->g.ndigits))
+ return -EINVAL;
+
/* Check 1: Verify key is not the zero point. */
if (ecc_point_is_zero(pk))
return -EINVAL;
@@ -1064,8 +1448,8 @@ static int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
return -EINVAL;
return 0;
-
}
+EXPORT_SYMBOL(ecc_is_pubkey_valid_partial);
int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
const u64 *private_key, const u64 *public_key,
@@ -1121,3 +1505,6 @@ err_alloc_product:
out:
return ret;
}
+EXPORT_SYMBOL(crypto_ecdh_shared_secret);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/ecc.h b/crypto/ecc.h
index f75a86baa3bd..ab0eb70b9c09 100644
--- a/crypto/ecc.h
+++ b/crypto/ecc.h
@@ -26,13 +26,51 @@
#ifndef _CRYPTO_ECC_H
#define _CRYPTO_ECC_H
+/* One digit is u64 qword. */
#define ECC_CURVE_NIST_P192_DIGITS 3
#define ECC_CURVE_NIST_P256_DIGITS 4
-#define ECC_MAX_DIGITS ECC_CURVE_NIST_P256_DIGITS
+#define ECC_MAX_DIGITS (512 / 64)
#define ECC_DIGITS_TO_BYTES_SHIFT 3
/**
+ * struct ecc_point - elliptic curve point in affine coordinates
+ *
+ * @x: X coordinate in vli form.
+ * @y: Y coordinate in vli form.
+ * @ndigits: Length of vlis in u64 qwords.
+ */
+struct ecc_point {
+ u64 *x;
+ u64 *y;
+ u8 ndigits;
+};
+
+#define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits }
+
+/**
+ * struct ecc_curve - definition of elliptic curve
+ *
+ * @name: Short name of the curve.
+ * @g: Generator point of the curve.
+ * @p: Prime number, if Barrett's reduction is used for this curve
+ * pre-calculated value 'mu' is appended to the @p after ndigits.
+ * Use of Barrett's reduction is heuristically determined in
+ * vli_mmod_fast().
+ * @n: Order of the curve group.
+ * @a: Curve parameter a.
+ * @b: Curve parameter b.
+ */
+struct ecc_curve {
+ char *name;
+ struct ecc_point g;
+ u64 *p;
+ u64 *n;
+ u64 *a;
+ u64 *b;
+};
+
+/**
* ecc_is_key_valid() - Validate a given ECDH private key
*
* @curve_id: id representing the curve to use
@@ -91,4 +129,117 @@ int ecc_make_pub_key(const unsigned int curve_id, unsigned int ndigits,
int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
const u64 *private_key, const u64 *public_key,
u64 *secret);
+
+/**
+ * ecc_is_pubkey_valid_partial() - Partial public key validation
+ *
+ * @curve: elliptic curve domain parameters
+ * @pk: public key as a point
+ *
+ * Valdiate public key according to SP800-56A section 5.6.2.3.4 ECC Partial
+ * Public-Key Validation Routine.
+ *
+ * Note: There is no check that the public key is in the correct elliptic curve
+ * subgroup.
+ *
+ * Return: 0 if validation is successful, -EINVAL if validation is failed.
+ */
+int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
+ struct ecc_point *pk);
+
+/**
+ * vli_is_zero() - Determine is vli is zero
+ *
+ * @vli: vli to check.
+ * @ndigits: length of the @vli
+ */
+bool vli_is_zero(const u64 *vli, unsigned int ndigits);
+
+/**
+ * vli_cmp() - compare left and right vlis
+ *
+ * @left: vli
+ * @right: vli
+ * @ndigits: length of both vlis
+ *
+ * Returns sign of @left - @right, i.e. -1 if @left < @right,
+ * 0 if @left == @right, 1 if @left > @right.
+ */
+int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits);
+
+/**
+ * vli_sub() - Subtracts right from left
+ *
+ * @result: where to write result
+ * @left: vli
+ * @right vli
+ * @ndigits: length of all vlis
+ *
+ * Note: can modify in-place.
+ *
+ * Return: carry bit.
+ */
+u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
+ unsigned int ndigits);
+
+/**
+ * vli_from_be64() - Load vli from big-endian u64 array
+ *
+ * @dest: destination vli
+ * @src: source array of u64 BE values
+ * @ndigits: length of both vli and array
+ */
+void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits);
+
+/**
+ * vli_from_le64() - Load vli from little-endian u64 array
+ *
+ * @dest: destination vli
+ * @src: source array of u64 LE values
+ * @ndigits: length of both vli and array
+ */
+void vli_from_le64(u64 *dest, const void *src, unsigned int ndigits);
+
+/**
+ * vli_mod_inv() - Modular inversion
+ *
+ * @result: where to write vli number
+ * @input: vli value to operate on
+ * @mod: modulus
+ * @ndigits: length of all vlis
+ */
+void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
+ unsigned int ndigits);
+
+/**
+ * vli_mod_mult_slow() - Modular multiplication
+ *
+ * @result: where to write result value
+ * @left: vli number to multiply with @right
+ * @right: vli number to multiply with @left
+ * @mod: modulus
+ * @ndigits: length of all vlis
+ *
+ * Note: Assumes that mod is big enough curve order.
+ */
+void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right,
+ const u64 *mod, unsigned int ndigits);
+
+/**
+ * ecc_point_mult_shamir() - Add two points multiplied by scalars
+ *
+ * @result: resulting point
+ * @x: scalar to multiply with @p
+ * @p: point to multiply with @x
+ * @y: scalar to multiply with @q
+ * @q: point to multiply with @y
+ * @curve: curve
+ *
+ * Returns result = x * p + x * q over the curve.
+ * This works faster than two multiplications and addition.
+ */
+void ecc_point_mult_shamir(const struct ecc_point *result,
+ const u64 *x, const struct ecc_point *p,
+ const u64 *y, const struct ecc_point *q,
+ const struct ecc_curve *curve);
#endif
diff --git a/crypto/ecc_curve_defs.h b/crypto/ecc_curve_defs.h
index 336ab1805639..69be6c7d228f 100644
--- a/crypto/ecc_curve_defs.h
+++ b/crypto/ecc_curve_defs.h
@@ -2,21 +2,6 @@
#ifndef _CRYTO_ECC_CURVE_DEFS_H
#define _CRYTO_ECC_CURVE_DEFS_H
-struct ecc_point {
- u64 *x;
- u64 *y;
- u8 ndigits;
-};
-
-struct ecc_curve {
- char *name;
- struct ecc_point g;
- u64 *p;
- u64 *n;
- u64 *a;
- u64 *b;
-};
-
/* NIST P-192: a = p - 3 */
static u64 nist_p192_g_x[] = { 0xF4FF0AFD82FF1012ull, 0x7CBF20EB43A18800ull,
0x188DA80EB03090F6ull };
diff --git a/crypto/ecdh.c b/crypto/ecdh.c
index bf6300175b9c..890092bd8989 100644
--- a/crypto/ecdh.c
+++ b/crypto/ecdh.c
@@ -166,7 +166,7 @@ static void ecdh_exit(void)
crypto_unregister_kpp(&ecdh);
}
-module_init(ecdh_init);
+subsys_initcall(ecdh_init);
module_exit(ecdh_exit);
MODULE_ALIAS_CRYPTO("ecdh");
MODULE_LICENSE("GPL");
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index 77e607fdbfb7..e71d1bc8d850 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -174,7 +174,7 @@ static void __exit echainiv_module_exit(void)
crypto_unregister_template(&echainiv_tmpl);
}
-module_init(echainiv_module_init);
+subsys_initcall(echainiv_module_init);
module_exit(echainiv_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c
new file mode 100644
index 000000000000..887ec21aee49
--- /dev/null
+++ b/crypto/ecrdsa.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Elliptic Curve (Russian) Digital Signature Algorithm for Cryptographic API
+ *
+ * Copyright (c) 2019 Vitaly Chikunov <vt@altlinux.org>
+ *
+ * References:
+ * GOST 34.10-2018, GOST R 34.10-2012, RFC 7091, ISO/IEC 14888-3:2018.
+ *
+ * Historical references:
+ * GOST R 34.10-2001, RFC 4357, ISO/IEC 14888-3:2006/Amd 1:2010.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <crypto/streebog.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/akcipher.h>
+#include <linux/oid_registry.h>
+#include "ecrdsa_params.asn1.h"
+#include "ecrdsa_pub_key.asn1.h"
+#include "ecc.h"
+#include "ecrdsa_defs.h"
+
+#define ECRDSA_MAX_SIG_SIZE (2 * 512 / 8)
+#define ECRDSA_MAX_DIGITS (512 / 64)
+
+struct ecrdsa_ctx {
+ enum OID algo_oid; /* overall public key oid */
+ enum OID curve_oid; /* parameter */
+ enum OID digest_oid; /* parameter */
+ const struct ecc_curve *curve; /* curve from oid */
+ unsigned int digest_len; /* parameter (bytes) */
+ const char *digest; /* digest name from oid */
+ unsigned int key_len; /* @key length (bytes) */
+ const char *key; /* raw public key */
+ struct ecc_point pub_key;
+ u64 _pubp[2][ECRDSA_MAX_DIGITS]; /* point storage for @pub_key */
+};
+
+static const struct ecc_curve *get_curve_by_oid(enum OID oid)
+{
+ switch (oid) {
+ case OID_gostCPSignA:
+ case OID_gostTC26Sign256B:
+ return &gost_cp256a;
+ case OID_gostCPSignB:
+ case OID_gostTC26Sign256C:
+ return &gost_cp256b;
+ case OID_gostCPSignC:
+ case OID_gostTC26Sign256D:
+ return &gost_cp256c;
+ case OID_gostTC26Sign512A:
+ return &gost_tc512a;
+ case OID_gostTC26Sign512B:
+ return &gost_tc512b;
+ /* The following two aren't implemented: */
+ case OID_gostTC26Sign256A:
+ case OID_gostTC26Sign512C:
+ default:
+ return NULL;
+ }
+}
+
+static int ecrdsa_verify(struct akcipher_request *req)
+{
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ unsigned char sig[ECRDSA_MAX_SIG_SIZE];
+ unsigned char digest[STREEBOG512_DIGEST_SIZE];
+ unsigned int ndigits = req->dst_len / sizeof(u64);
+ u64 r[ECRDSA_MAX_DIGITS]; /* witness (r) */
+ u64 _r[ECRDSA_MAX_DIGITS]; /* -r */
+ u64 s[ECRDSA_MAX_DIGITS]; /* second part of sig (s) */
+ u64 e[ECRDSA_MAX_DIGITS]; /* h \mod q */
+ u64 *v = e; /* e^{-1} \mod q */
+ u64 z1[ECRDSA_MAX_DIGITS];
+ u64 *z2 = _r;
+ struct ecc_point cc = ECC_POINT_INIT(s, e, ndigits); /* reuse s, e */
+
+ /*
+ * Digest value, digest algorithm, and curve (modulus) should have the
+ * same length (256 or 512 bits), public key and signature should be
+ * twice bigger.
+ */
+ if (!ctx->curve ||
+ !ctx->digest ||
+ !req->src ||
+ !ctx->pub_key.x ||
+ req->dst_len != ctx->digest_len ||
+ req->dst_len != ctx->curve->g.ndigits * sizeof(u64) ||
+ ctx->pub_key.ndigits != ctx->curve->g.ndigits ||
+ req->dst_len * 2 != req->src_len ||
+ WARN_ON(req->src_len > sizeof(sig)) ||
+ WARN_ON(req->dst_len > sizeof(digest)))
+ return -EBADMSG;
+
+ sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, req->src_len),
+ sig, req->src_len);
+ sg_pcopy_to_buffer(req->src,
+ sg_nents_for_len(req->src,
+ req->src_len + req->dst_len),
+ digest, req->dst_len, req->src_len);
+
+ vli_from_be64(s, sig, ndigits);
+ vli_from_be64(r, sig + ndigits * sizeof(u64), ndigits);
+
+ /* Step 1: verify that 0 < r < q, 0 < s < q */
+ if (vli_is_zero(r, ndigits) ||
+ vli_cmp(r, ctx->curve->n, ndigits) == 1 ||
+ vli_is_zero(s, ndigits) ||
+ vli_cmp(s, ctx->curve->n, ndigits) == 1)
+ return -EKEYREJECTED;
+
+ /* Step 2: calculate hash (h) of the message (passed as input) */
+ /* Step 3: calculate e = h \mod q */
+ vli_from_le64(e, digest, ndigits);
+ if (vli_cmp(e, ctx->curve->n, ndigits) == 1)
+ vli_sub(e, e, ctx->curve->n, ndigits);
+ if (vli_is_zero(e, ndigits))
+ e[0] = 1;
+
+ /* Step 4: calculate v = e^{-1} \mod q */
+ vli_mod_inv(v, e, ctx->curve->n, ndigits);
+
+ /* Step 5: calculate z_1 = sv \mod q, z_2 = -rv \mod q */
+ vli_mod_mult_slow(z1, s, v, ctx->curve->n, ndigits);
+ vli_sub(_r, ctx->curve->n, r, ndigits);
+ vli_mod_mult_slow(z2, _r, v, ctx->curve->n, ndigits);
+
+ /* Step 6: calculate point C = z_1P + z_2Q, and R = x_c \mod q */
+ ecc_point_mult_shamir(&cc, z1, &ctx->curve->g, z2, &ctx->pub_key,
+ ctx->curve);
+ if (vli_cmp(cc.x, ctx->curve->n, ndigits) == 1)
+ vli_sub(cc.x, cc.x, ctx->curve->n, ndigits);
+
+ /* Step 7: if R == r signature is valid */
+ if (!vli_cmp(cc.x, r, ndigits))
+ return 0;
+ else
+ return -EKEYREJECTED;
+}
+
+int ecrdsa_param_curve(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct ecrdsa_ctx *ctx = context;
+
+ ctx->curve_oid = look_up_OID(value, vlen);
+ if (!ctx->curve_oid)
+ return -EINVAL;
+ ctx->curve = get_curve_by_oid(ctx->curve_oid);
+ return 0;
+}
+
+/* Optional. If present should match expected digest algo OID. */
+int ecrdsa_param_digest(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct ecrdsa_ctx *ctx = context;
+ int digest_oid = look_up_OID(value, vlen);
+
+ if (digest_oid != ctx->digest_oid)
+ return -EINVAL;
+ return 0;
+}
+
+int ecrdsa_parse_pub_key(void *context, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen)
+{
+ struct ecrdsa_ctx *ctx = context;
+
+ ctx->key = value;
+ ctx->key_len = vlen;
+ return 0;
+}
+
+static u8 *ecrdsa_unpack_u32(u32 *dst, void *src)
+{
+ memcpy(dst, src, sizeof(u32));
+ return src + sizeof(u32);
+}
+
+/* Parse BER encoded subjectPublicKey. */
+static int ecrdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen)
+{
+ struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+ unsigned int ndigits;
+ u32 algo, paramlen;
+ u8 *params;
+ int err;
+
+ err = asn1_ber_decoder(&ecrdsa_pub_key_decoder, ctx, key, keylen);
+ if (err < 0)
+ return err;
+
+ /* Key parameters is in the key after keylen. */
+ params = ecrdsa_unpack_u32(&paramlen,
+ ecrdsa_unpack_u32(&algo, (u8 *)key + keylen));
+
+ if (algo == OID_gost2012PKey256) {
+ ctx->digest = "streebog256";
+ ctx->digest_oid = OID_gost2012Digest256;
+ ctx->digest_len = 256 / 8;
+ } else if (algo == OID_gost2012PKey512) {
+ ctx->digest = "streebog512";
+ ctx->digest_oid = OID_gost2012Digest512;
+ ctx->digest_len = 512 / 8;
+ } else
+ return -ENOPKG;
+ ctx->algo_oid = algo;
+
+ /* Parse SubjectPublicKeyInfo.AlgorithmIdentifier.parameters. */
+ err = asn1_ber_decoder(&ecrdsa_params_decoder, ctx, params, paramlen);
+ if (err < 0)
+ return err;
+ /*
+ * Sizes of algo (set in digest_len) and curve should match
+ * each other.
+ */
+ if (!ctx->curve ||
+ ctx->curve->g.ndigits * sizeof(u64) != ctx->digest_len)
+ return -ENOPKG;
+ /*
+ * Key is two 256- or 512-bit coordinates which should match
+ * curve size.
+ */
+ if ((ctx->key_len != (2 * 256 / 8) &&
+ ctx->key_len != (2 * 512 / 8)) ||
+ ctx->key_len != ctx->curve->g.ndigits * sizeof(u64) * 2)
+ return -ENOPKG;
+
+ ndigits = ctx->key_len / sizeof(u64) / 2;
+ ctx->pub_key = ECC_POINT_INIT(ctx->_pubp[0], ctx->_pubp[1], ndigits);
+ vli_from_le64(ctx->pub_key.x, ctx->key, ndigits);
+ vli_from_le64(ctx->pub_key.y, ctx->key + ndigits * sizeof(u64),
+ ndigits);
+
+ if (ecc_is_pubkey_valid_partial(ctx->curve, &ctx->pub_key))
+ return -EKEYREJECTED;
+
+ return 0;
+}
+
+static unsigned int ecrdsa_max_size(struct crypto_akcipher *tfm)
+{
+ struct ecrdsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ /*
+ * Verify doesn't need any output, so it's just informational
+ * for keyctl to determine the key bit size.
+ */
+ return ctx->pub_key.ndigits * sizeof(u64);
+}
+
+static void ecrdsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+}
+
+static struct akcipher_alg ecrdsa_alg = {
+ .verify = ecrdsa_verify,
+ .set_pub_key = ecrdsa_set_pub_key,
+ .max_size = ecrdsa_max_size,
+ .exit = ecrdsa_exit_tfm,
+ .base = {
+ .cra_name = "ecrdsa",
+ .cra_driver_name = "ecrdsa-generic",
+ .cra_priority = 100,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct ecrdsa_ctx),
+ },
+};
+
+static int __init ecrdsa_mod_init(void)
+{
+ return crypto_register_akcipher(&ecrdsa_alg);
+}
+
+static void __exit ecrdsa_mod_fini(void)
+{
+ crypto_unregister_akcipher(&ecrdsa_alg);
+}
+
+module_init(ecrdsa_mod_init);
+module_exit(ecrdsa_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vitaly Chikunov <vt@altlinux.org>");
+MODULE_DESCRIPTION("EC-RDSA generic algorithm");
+MODULE_ALIAS_CRYPTO("ecrdsa-generic");
diff --git a/crypto/ecrdsa_defs.h b/crypto/ecrdsa_defs.h
new file mode 100644
index 000000000000..170baf039007
--- /dev/null
+++ b/crypto/ecrdsa_defs.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Definitions of EC-RDSA Curve Parameters
+ *
+ * Copyright (c) 2019 Vitaly Chikunov <vt@altlinux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#ifndef _CRYTO_ECRDSA_DEFS_H
+#define _CRYTO_ECRDSA_DEFS_H
+
+#include "ecc.h"
+
+#define ECRDSA_MAX_SIG_SIZE (2 * 512 / 8)
+#define ECRDSA_MAX_DIGITS (512 / 64)
+
+/*
+ * EC-RDSA uses its own set of curves.
+ *
+ * cp256{a,b,c} curves first defined for GOST R 34.10-2001 in RFC 4357 (as
+ * 256-bit {A,B,C}-ParamSet), but inherited for GOST R 34.10-2012 and
+ * proposed for use in R 50.1.114-2016 and RFC 7836 as the 256-bit curves.
+ */
+/* OID_gostCPSignA 1.2.643.2.2.35.1 */
+static u64 cp256a_g_x[] = {
+ 0x0000000000000001ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull, };
+static u64 cp256a_g_y[] = {
+ 0x22ACC99C9E9F1E14ull, 0x35294F2DDF23E3B1ull,
+ 0x27DF505A453F2B76ull, 0x8D91E471E0989CDAull, };
+static u64 cp256a_p[] = { /* p = 2^256 - 617 */
+ 0xFFFFFFFFFFFFFD97ull, 0xFFFFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull };
+static u64 cp256a_n[] = {
+ 0x45841B09B761B893ull, 0x6C611070995AD100ull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull };
+static u64 cp256a_a[] = { /* a = p - 3 */
+ 0xFFFFFFFFFFFFFD94ull, 0xFFFFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull };
+static u64 cp256a_b[] = {
+ 0x00000000000000a6ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull };
+
+static struct ecc_curve gost_cp256a = {
+ .name = "cp256a",
+ .g = {
+ .x = cp256a_g_x,
+ .y = cp256a_g_y,
+ .ndigits = 256 / 64,
+ },
+ .p = cp256a_p,
+ .n = cp256a_n,
+ .a = cp256a_a,
+ .b = cp256a_b
+};
+
+/* OID_gostCPSignB 1.2.643.2.2.35.2 */
+static u64 cp256b_g_x[] = {
+ 0x0000000000000001ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull, };
+static u64 cp256b_g_y[] = {
+ 0x744BF8D717717EFCull, 0xC545C9858D03ECFBull,
+ 0xB83D1C3EB2C070E5ull, 0x3FA8124359F96680ull, };
+static u64 cp256b_p[] = { /* p = 2^255 + 3225 */
+ 0x0000000000000C99ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x8000000000000000ull, };
+static u64 cp256b_n[] = {
+ 0xE497161BCC8A198Full, 0x5F700CFFF1A624E5ull,
+ 0x0000000000000001ull, 0x8000000000000000ull, };
+static u64 cp256b_a[] = { /* a = p - 3 */
+ 0x0000000000000C96ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x8000000000000000ull, };
+static u64 cp256b_b[] = {
+ 0x2F49D4CE7E1BBC8Bull, 0xE979259373FF2B18ull,
+ 0x66A7D3C25C3DF80Aull, 0x3E1AF419A269A5F8ull, };
+
+static struct ecc_curve gost_cp256b = {
+ .name = "cp256b",
+ .g = {
+ .x = cp256b_g_x,
+ .y = cp256b_g_y,
+ .ndigits = 256 / 64,
+ },
+ .p = cp256b_p,
+ .n = cp256b_n,
+ .a = cp256b_a,
+ .b = cp256b_b
+};
+
+/* OID_gostCPSignC 1.2.643.2.2.35.3 */
+static u64 cp256c_g_x[] = {
+ 0x0000000000000000ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull, };
+static u64 cp256c_g_y[] = {
+ 0x366E550DFDB3BB67ull, 0x4D4DC440D4641A8Full,
+ 0x3CBF3783CD08C0EEull, 0x41ECE55743711A8Cull, };
+static u64 cp256c_p[] = {
+ 0x7998F7B9022D759Bull, 0xCF846E86789051D3ull,
+ 0xAB1EC85E6B41C8AAull, 0x9B9F605F5A858107ull,
+ /* pre-computed value for Barrett's reduction */
+ 0xedc283cdd217b5a2ull, 0xbac48fc06398ae59ull,
+ 0x405384d55f9f3b73ull, 0xa51f176161f1d734ull,
+ 0x0000000000000001ull, };
+static u64 cp256c_n[] = {
+ 0xF02F3A6598980BB9ull, 0x582CA3511EDDFB74ull,
+ 0xAB1EC85E6B41C8AAull, 0x9B9F605F5A858107ull, };
+static u64 cp256c_a[] = { /* a = p - 3 */
+ 0x7998F7B9022D7598ull, 0xCF846E86789051D3ull,
+ 0xAB1EC85E6B41C8AAull, 0x9B9F605F5A858107ull, };
+static u64 cp256c_b[] = {
+ 0x000000000000805aull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull, };
+
+static struct ecc_curve gost_cp256c = {
+ .name = "cp256c",
+ .g = {
+ .x = cp256c_g_x,
+ .y = cp256c_g_y,
+ .ndigits = 256 / 64,
+ },
+ .p = cp256c_p,
+ .n = cp256c_n,
+ .a = cp256c_a,
+ .b = cp256c_b
+};
+
+/* tc512{a,b} curves first recommended in 2013 and then standardized in
+ * R 50.1.114-2016 and RFC 7836 for use with GOST R 34.10-2012 (as TC26
+ * 512-bit ParamSet{A,B}).
+ */
+/* OID_gostTC26Sign512A 1.2.643.7.1.2.1.2.1 */
+static u64 tc512a_g_x[] = {
+ 0x0000000000000003ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull, };
+static u64 tc512a_g_y[] = {
+ 0x89A589CB5215F2A4ull, 0x8028FE5FC235F5B8ull,
+ 0x3D75E6A50E3A41E9ull, 0xDF1626BE4FD036E9ull,
+ 0x778064FDCBEFA921ull, 0xCE5E1C93ACF1ABC1ull,
+ 0xA61B8816E25450E6ull, 0x7503CFE87A836AE3ull, };
+static u64 tc512a_p[] = { /* p = 2^512 - 569 */
+ 0xFFFFFFFFFFFFFDC7ull, 0xFFFFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull, };
+static u64 tc512a_n[] = {
+ 0xCACDB1411F10B275ull, 0x9B4B38ABFAD2B85Dull,
+ 0x6FF22B8D4E056060ull, 0x27E69532F48D8911ull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull, };
+static u64 tc512a_a[] = { /* a = p - 3 */
+ 0xFFFFFFFFFFFFFDC4ull, 0xFFFFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull,
+ 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull, };
+static u64 tc512a_b[] = {
+ 0x503190785A71C760ull, 0x862EF9D4EBEE4761ull,
+ 0x4CB4574010DA90DDull, 0xEE3CB090F30D2761ull,
+ 0x79BD081CFD0B6265ull, 0x34B82574761CB0E8ull,
+ 0xC1BD0B2B6667F1DAull, 0xE8C2505DEDFC86DDull, };
+
+static struct ecc_curve gost_tc512a = {
+ .name = "tc512a",
+ .g = {
+ .x = tc512a_g_x,
+ .y = tc512a_g_y,
+ .ndigits = 512 / 64,
+ },
+ .p = tc512a_p,
+ .n = tc512a_n,
+ .a = tc512a_a,
+ .b = tc512a_b
+};
+
+/* OID_gostTC26Sign512B 1.2.643.7.1.2.1.2.2 */
+static u64 tc512b_g_x[] = {
+ 0x0000000000000002ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull, };
+static u64 tc512b_g_y[] = {
+ 0x7E21340780FE41BDull, 0x28041055F94CEEECull,
+ 0x152CBCAAF8C03988ull, 0xDCB228FD1EDF4A39ull,
+ 0xBE6DD9E6C8EC7335ull, 0x3C123B697578C213ull,
+ 0x2C071E3647A8940Full, 0x1A8F7EDA389B094Cull, };
+static u64 tc512b_p[] = { /* p = 2^511 + 111 */
+ 0x000000000000006Full, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x8000000000000000ull, };
+static u64 tc512b_n[] = {
+ 0xC6346C54374F25BDull, 0x8B996712101BEA0Eull,
+ 0xACFDB77BD9D40CFAull, 0x49A1EC142565A545ull,
+ 0x0000000000000001ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x8000000000000000ull, };
+static u64 tc512b_a[] = { /* a = p - 3 */
+ 0x000000000000006Cull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x0000000000000000ull,
+ 0x0000000000000000ull, 0x8000000000000000ull, };
+static u64 tc512b_b[] = {
+ 0xFB8CCBC7C5140116ull, 0x50F78BEE1FA3106Eull,
+ 0x7F8B276FAD1AB69Cull, 0x3E965D2DB1416D21ull,
+ 0xBF85DC806C4B289Full, 0xB97C7D614AF138BCull,
+ 0x7E3E06CF6F5E2517ull, 0x687D1B459DC84145ull, };
+
+static struct ecc_curve gost_tc512b = {
+ .name = "tc512b",
+ .g = {
+ .x = tc512b_g_x,
+ .y = tc512b_g_y,
+ .ndigits = 512 / 64,
+ },
+ .p = tc512b_p,
+ .n = tc512b_n,
+ .a = tc512b_a,
+ .b = tc512b_b
+};
+
+#endif
diff --git a/crypto/ecrdsa_params.asn1 b/crypto/ecrdsa_params.asn1
new file mode 100644
index 000000000000..aba99c3763cf
--- /dev/null
+++ b/crypto/ecrdsa_params.asn1
@@ -0,0 +1,4 @@
+EcrdsaParams ::= SEQUENCE {
+ curve OBJECT IDENTIFIER ({ ecrdsa_param_curve }),
+ digest OBJECT IDENTIFIER OPTIONAL ({ ecrdsa_param_digest })
+}
diff --git a/crypto/ecrdsa_pub_key.asn1 b/crypto/ecrdsa_pub_key.asn1
new file mode 100644
index 000000000000..048cb646bce4
--- /dev/null
+++ b/crypto/ecrdsa_pub_key.asn1
@@ -0,0 +1 @@
+EcrdsaPubKey ::= OCTET STRING ({ ecrdsa_parse_pub_key })
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
index 77286ea28865..4e8704405a3b 100644
--- a/crypto/fcrypt.c
+++ b/crypto/fcrypt.c
@@ -414,7 +414,7 @@ static void __exit fcrypt_mod_fini(void)
crypto_unregister_alg(&fcrypt_alg);
}
-module_init(fcrypt_mod_init);
+subsys_initcall(fcrypt_mod_init);
module_exit(fcrypt_mod_fini);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/fips.c b/crypto/fips.c
index 9d627c1cf8bc..9dfed122d6da 100644
--- a/crypto/fips.c
+++ b/crypto/fips.c
@@ -74,5 +74,5 @@ static void __exit fips_exit(void)
crypto_proc_fips_exit();
}
-module_init(fips_init);
+subsys_initcall(fips_init);
module_exit(fips_exit);
diff --git a/crypto/gcm.c b/crypto/gcm.c
index e1a11f529d25..33f45a980967 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -597,7 +597,6 @@ static void crypto_gcm_free(struct aead_instance *inst)
static int crypto_gcm_create_common(struct crypto_template *tmpl,
struct rtattr **tb,
- const char *full_name,
const char *ctr_name,
const char *ghash_name)
{
@@ -638,7 +637,8 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
goto err_free_inst;
err = -EINVAL;
- if (ghash->digestsize != 16)
+ if (strcmp(ghash->base.cra_name, "ghash") != 0 ||
+ ghash->digestsize != 16)
goto err_drop_ghash;
crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst));
@@ -650,24 +650,24 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
- /* We only support 16-byte blocks. */
+ /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
err = -EINVAL;
- if (crypto_skcipher_alg_ivsize(ctr) != 16)
+ if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
+ crypto_skcipher_alg_ivsize(ctr) != 16 ||
+ ctr->base.cra_blocksize != 1)
goto out_put_ctr;
- /* Not a stream cipher? */
- if (ctr->base.cra_blocksize != 1)
+ err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "gcm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
goto out_put_ctr;
- err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"gcm_base(%s,%s)", ctr->base.cra_driver_name,
ghash_alg->cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
goto out_put_ctr;
- memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
-
inst->alg.base.cra_flags = (ghash->base.cra_flags |
ctr->base.cra_flags) & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = (ghash->base.cra_priority +
@@ -709,7 +709,6 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
{
const char *cipher_name;
char ctr_name[CRYPTO_MAX_ALG_NAME];
- char full_name[CRYPTO_MAX_ALG_NAME];
cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(cipher_name))
@@ -719,12 +718,7 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
- if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >=
- CRYPTO_MAX_ALG_NAME)
- return -ENAMETOOLONG;
-
- return crypto_gcm_create_common(tmpl, tb, full_name,
- ctr_name, "ghash");
+ return crypto_gcm_create_common(tmpl, tb, ctr_name, "ghash");
}
static int crypto_gcm_base_create(struct crypto_template *tmpl,
@@ -732,7 +726,6 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
{
const char *ctr_name;
const char *ghash_name;
- char full_name[CRYPTO_MAX_ALG_NAME];
ctr_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(ctr_name))
@@ -742,12 +735,7 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
if (IS_ERR(ghash_name))
return PTR_ERR(ghash_name);
- if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)",
- ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME)
- return -ENAMETOOLONG;
-
- return crypto_gcm_create_common(tmpl, tb, full_name,
- ctr_name, ghash_name);
+ return crypto_gcm_create_common(tmpl, tb, ctr_name, ghash_name);
}
static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key,
@@ -1258,7 +1246,7 @@ static void __exit crypto_gcm_module_exit(void)
ARRAY_SIZE(crypto_gcm_tmpls));
}
-module_init(crypto_gcm_module_init);
+subsys_initcall(crypto_gcm_module_init);
module_exit(crypto_gcm_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index d9f192b953b2..e6307935413c 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -149,7 +149,7 @@ static void __exit ghash_mod_exit(void)
crypto_unregister_shash(&ghash_alg);
}
-module_init(ghash_mod_init);
+subsys_initcall(ghash_mod_init);
module_exit(ghash_mod_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/hmac.c b/crypto/hmac.c
index e74730224f0a..a68c1266121f 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -57,8 +57,6 @@ static int hmac_setkey(struct crypto_shash *parent,
unsigned int i;
shash->tfm = hash;
- shash->flags = crypto_shash_get_flags(parent)
- & CRYPTO_TFM_REQ_MAY_SLEEP;
if (keylen > bs) {
int err;
@@ -91,8 +89,6 @@ static int hmac_export(struct shash_desc *pdesc, void *out)
{
struct shash_desc *desc = shash_desc_ctx(pdesc);
- desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
return crypto_shash_export(desc, out);
}
@@ -102,7 +98,6 @@ static int hmac_import(struct shash_desc *pdesc, const void *in)
struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm);
desc->tfm = ctx->hash;
- desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_import(desc, in);
}
@@ -117,8 +112,6 @@ static int hmac_update(struct shash_desc *pdesc,
{
struct shash_desc *desc = shash_desc_ctx(pdesc);
- desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
return crypto_shash_update(desc, data, nbytes);
}
@@ -130,8 +123,6 @@ static int hmac_final(struct shash_desc *pdesc, u8 *out)
char *opad = crypto_shash_ctx_aligned(parent) + ss;
struct shash_desc *desc = shash_desc_ctx(pdesc);
- desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
return crypto_shash_final(desc, out) ?:
crypto_shash_import(desc, opad) ?:
crypto_shash_finup(desc, out, ds, out);
@@ -147,8 +138,6 @@ static int hmac_finup(struct shash_desc *pdesc, const u8 *data,
char *opad = crypto_shash_ctx_aligned(parent) + ss;
struct shash_desc *desc = shash_desc_ctx(pdesc);
- desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
-
return crypto_shash_finup(desc, data, nbytes, out) ?:
crypto_shash_import(desc, opad) ?:
crypto_shash_finup(desc, out, ds, out);
@@ -268,7 +257,7 @@ static void __exit hmac_module_exit(void)
crypto_unregister_template(&hmac_tmpl);
}
-module_init(hmac_module_init);
+subsys_initcall(hmac_module_init);
module_exit(hmac_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index 787dccca3715..6ea1a270b8dc 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -198,7 +198,7 @@ static void __exit jent_mod_exit(void)
crypto_unregister_rng(&jent_alg);
}
-module_init(jent_mod_init);
+subsys_initcall(jent_mod_init);
module_exit(jent_mod_exit);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/keywrap.c b/crypto/keywrap.c
index a5cfe610d8f4..a155c88105ea 100644
--- a/crypto/keywrap.c
+++ b/crypto/keywrap.c
@@ -310,7 +310,7 @@ static void __exit crypto_kw_exit(void)
crypto_unregister_template(&crypto_kw_tmpl);
}
-module_init(crypto_kw_init);
+subsys_initcall(crypto_kw_init);
module_exit(crypto_kw_exit);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/crypto/khazad.c b/crypto/khazad.c
index 873eb5ded6d7..b50aa8a3ab4c 100644
--- a/crypto/khazad.c
+++ b/crypto/khazad.c
@@ -875,7 +875,7 @@ static void __exit khazad_mod_fini(void)
}
-module_init(khazad_mod_init);
+subsys_initcall(khazad_mod_init);
module_exit(khazad_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 08a0e458bc3e..fa302f3f161e 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -162,8 +162,10 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass)
}
err = skcipher_walk_virt(&w, req, false);
- iv = (__be32 *)w.iv;
+ if (err)
+ return err;
+ iv = (__be32 *)w.iv;
counter[0] = be32_to_cpu(iv[3]);
counter[1] = be32_to_cpu(iv[2]);
counter[2] = be32_to_cpu(iv[1]);
@@ -435,7 +437,7 @@ static void __exit crypto_module_exit(void)
crypto_unregister_template(&crypto_tmpl);
}
-module_init(crypto_module_init);
+subsys_initcall(crypto_module_init);
module_exit(crypto_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/lz4.c b/crypto/lz4.c
index c160dfdbf2e0..1e35134d0a98 100644
--- a/crypto/lz4.c
+++ b/crypto/lz4.c
@@ -164,7 +164,7 @@ static void __exit lz4_mod_fini(void)
crypto_unregister_scomp(&scomp);
}
-module_init(lz4_mod_init);
+subsys_initcall(lz4_mod_init);
module_exit(lz4_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
index 583b5e013d7a..4a220b628fe7 100644
--- a/crypto/lz4hc.c
+++ b/crypto/lz4hc.c
@@ -165,7 +165,7 @@ static void __exit lz4hc_mod_fini(void)
crypto_unregister_scomp(&scomp);
}
-module_init(lz4hc_mod_init);
+subsys_initcall(lz4hc_mod_init);
module_exit(lz4hc_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/lzo-rle.c b/crypto/lzo-rle.c
index ea9c75b1db49..4c82bf18440f 100644
--- a/crypto/lzo-rle.c
+++ b/crypto/lzo-rle.c
@@ -167,7 +167,7 @@ static void __exit lzorle_mod_fini(void)
crypto_unregister_scomp(&scomp);
}
-module_init(lzorle_mod_init);
+subsys_initcall(lzorle_mod_init);
module_exit(lzorle_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/lzo.c b/crypto/lzo.c
index 218567d717d6..4a6ac8f247d0 100644
--- a/crypto/lzo.c
+++ b/crypto/lzo.c
@@ -167,7 +167,7 @@ static void __exit lzo_mod_fini(void)
crypto_unregister_scomp(&scomp);
}
-module_init(lzo_mod_init);
+subsys_initcall(lzo_mod_init);
module_exit(lzo_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/md4.c b/crypto/md4.c
index 9965ec40d9f9..9a1a228a0c69 100644
--- a/crypto/md4.c
+++ b/crypto/md4.c
@@ -232,7 +232,7 @@ static void __exit md4_mod_fini(void)
crypto_unregister_shash(&alg);
}
-module_init(md4_mod_init);
+subsys_initcall(md4_mod_init);
module_exit(md4_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/md5.c b/crypto/md5.c
index 94dd78144ba3..221c2c0932f8 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -244,7 +244,7 @@ static void __exit md5_mod_fini(void)
crypto_unregister_shash(&alg);
}
-module_init(md5_mod_init);
+subsys_initcall(md5_mod_init);
module_exit(md5_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
index 46195e0d0f4d..538ae7933795 100644
--- a/crypto/michael_mic.c
+++ b/crypto/michael_mic.c
@@ -178,7 +178,7 @@ static void __exit michael_mic_exit(void)
}
-module_init(michael_mic_init);
+subsys_initcall(michael_mic_init);
module_exit(michael_mic_exit);
MODULE_LICENSE("GPL v2");
diff --git a/crypto/morus1280.c b/crypto/morus1280.c
index 0747732d5b78..f8734c6576af 100644
--- a/crypto/morus1280.c
+++ b/crypto/morus1280.c
@@ -532,7 +532,7 @@ static void __exit crypto_morus1280_module_exit(void)
crypto_unregister_aead(&crypto_morus1280_alg);
}
-module_init(crypto_morus1280_module_init);
+subsys_initcall(crypto_morus1280_module_init);
module_exit(crypto_morus1280_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/morus640.c b/crypto/morus640.c
index 1617a1eb8be1..ae5aa9482cb4 100644
--- a/crypto/morus640.c
+++ b/crypto/morus640.c
@@ -523,7 +523,7 @@ static void __exit crypto_morus640_module_exit(void)
crypto_unregister_aead(&crypto_morus640_alg);
}
-module_init(crypto_morus640_module_init);
+subsys_initcall(crypto_morus640_module_init);
module_exit(crypto_morus640_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/nhpoly1305.c b/crypto/nhpoly1305.c
index ec831a5594d8..9ab4e07cde4d 100644
--- a/crypto/nhpoly1305.c
+++ b/crypto/nhpoly1305.c
@@ -244,7 +244,7 @@ static void __exit nhpoly1305_mod_exit(void)
crypto_unregister_shash(&nhpoly1305_alg);
}
-module_init(nhpoly1305_mod_init);
+subsys_initcall(nhpoly1305_mod_init);
module_exit(nhpoly1305_mod_exit);
MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function");
diff --git a/crypto/ofb.c b/crypto/ofb.c
index 34b6e1f426f7..133ff4c7f2c6 100644
--- a/crypto/ofb.c
+++ b/crypto/ofb.c
@@ -95,7 +95,7 @@ static void __exit crypto_ofb_module_exit(void)
crypto_unregister_template(&crypto_ofb_tmpl);
}
-module_init(crypto_ofb_module_init);
+subsys_initcall(crypto_ofb_module_init);
module_exit(crypto_ofb_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index 2fa03fc576fe..31b3ce948474 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -191,7 +191,7 @@ static void __exit crypto_pcbc_module_exit(void)
crypto_unregister_template(&crypto_pcbc_tmpl);
}
-module_init(crypto_pcbc_module_init);
+subsys_initcall(crypto_pcbc_module_init);
module_exit(crypto_pcbc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index d47cfc47b1b1..0e9ce329fd47 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -512,7 +512,7 @@ static void __exit pcrypt_exit(void)
crypto_unregister_template(&pcrypt_tmpl);
}
-module_init(pcrypt_init);
+subsys_initcall(pcrypt_init);
module_exit(pcrypt_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
index 2a06874204e8..adc40298c749 100644
--- a/crypto/poly1305_generic.c
+++ b/crypto/poly1305_generic.c
@@ -318,7 +318,7 @@ static void __exit poly1305_mod_exit(void)
crypto_unregister_shash(&poly1305_alg);
}
-module_init(poly1305_mod_init);
+subsys_initcall(poly1305_mod_init);
module_exit(poly1305_mod_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/rmd128.c b/crypto/rmd128.c
index 5f4472256e27..faf4252c4b85 100644
--- a/crypto/rmd128.c
+++ b/crypto/rmd128.c
@@ -318,7 +318,7 @@ static void __exit rmd128_mod_fini(void)
crypto_unregister_shash(&alg);
}
-module_init(rmd128_mod_init);
+subsys_initcall(rmd128_mod_init);
module_exit(rmd128_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
index 737645344d1c..b33309916d4f 100644
--- a/crypto/rmd160.c
+++ b/crypto/rmd160.c
@@ -362,7 +362,7 @@ static void __exit rmd160_mod_fini(void)
crypto_unregister_shash(&alg);
}
-module_init(rmd160_mod_init);
+subsys_initcall(rmd160_mod_init);
module_exit(rmd160_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/rmd256.c b/crypto/rmd256.c
index 0e9d30676a01..2a643250c9a5 100644
--- a/crypto/rmd256.c
+++ b/crypto/rmd256.c
@@ -337,7 +337,7 @@ static void __exit rmd256_mod_fini(void)
crypto_unregister_shash(&alg);
}
-module_init(rmd256_mod_init);
+subsys_initcall(rmd256_mod_init);
module_exit(rmd256_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/rmd320.c b/crypto/rmd320.c
index 3ae1df5bb48c..2f062574fc8c 100644
--- a/crypto/rmd320.c
+++ b/crypto/rmd320.c
@@ -386,7 +386,7 @@ static void __exit rmd320_mod_fini(void)
crypto_unregister_shash(&alg);
}
-module_init(rmd320_mod_init);
+subsys_initcall(rmd320_mod_init);
module_exit(rmd320_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 0a6680ca8cb6..29c336068dc0 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -429,7 +429,7 @@ static int pkcs1pad_sign(struct akcipher_request *req)
akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
req->dst, ctx->key_size - 1, req->dst_len);
- err = crypto_akcipher_sign(&req_ctx->child_req);
+ err = crypto_akcipher_decrypt(&req_ctx->child_req);
if (err != -EINPROGRESS && err != -EBUSY)
return pkcs1pad_encrypt_sign_complete(req, err);
@@ -488,14 +488,21 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
err = 0;
- if (req->dst_len < dst_len - pos)
- err = -EOVERFLOW;
- req->dst_len = dst_len - pos;
-
- if (!err)
- sg_copy_from_buffer(req->dst,
- sg_nents_for_len(req->dst, req->dst_len),
- out_buf + pos, req->dst_len);
+ if (req->dst_len != dst_len - pos) {
+ err = -EKEYREJECTED;
+ req->dst_len = dst_len - pos;
+ goto done;
+ }
+ /* Extract appended digest. */
+ sg_pcopy_to_buffer(req->src,
+ sg_nents_for_len(req->src,
+ req->src_len + req->dst_len),
+ req_ctx->out_buf + ctx->key_size,
+ req->dst_len, ctx->key_size);
+ /* Do the actual verification step. */
+ if (memcmp(req_ctx->out_buf + ctx->key_size, out_buf + pos,
+ req->dst_len) != 0)
+ err = -EKEYREJECTED;
done:
kzfree(req_ctx->out_buf);
@@ -532,10 +539,12 @@ static int pkcs1pad_verify(struct akcipher_request *req)
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
int err;
- if (!ctx->key_size || req->src_len < ctx->key_size)
+ if (WARN_ON(req->dst) ||
+ WARN_ON(!req->dst_len) ||
+ !ctx->key_size || req->src_len < ctx->key_size)
return -EINVAL;
- req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
+ req_ctx->out_buf = kmalloc(ctx->key_size + req->dst_len, GFP_KERNEL);
if (!req_ctx->out_buf)
return -ENOMEM;
@@ -551,7 +560,7 @@ static int pkcs1pad_verify(struct akcipher_request *req)
req_ctx->out_sg, req->src_len,
ctx->key_size);
- err = crypto_akcipher_verify(&req_ctx->child_req);
+ err = crypto_akcipher_encrypt(&req_ctx->child_req);
if (err != -EINPROGRESS && err != -EBUSY)
return pkcs1pad_verify_complete(req, err);
diff --git a/crypto/rsa.c b/crypto/rsa.c
index 4167980c243d..dcbb03431778 100644
--- a/crypto/rsa.c
+++ b/crypto/rsa.c
@@ -50,34 +50,6 @@ static int _rsa_dec(const struct rsa_mpi_key *key, MPI m, MPI c)
return mpi_powm(m, c, key->d, key->n);
}
-/*
- * RSASP1 function [RFC3447 sec 5.2.1]
- * s = m^d mod n
- */
-static int _rsa_sign(const struct rsa_mpi_key *key, MPI s, MPI m)
-{
- /* (1) Validate 0 <= m < n */
- if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0)
- return -EINVAL;
-
- /* (2) s = m^d mod n */
- return mpi_powm(s, m, key->d, key->n);
-}
-
-/*
- * RSAVP1 function [RFC3447 sec 5.2.2]
- * m = s^e mod n;
- */
-static int _rsa_verify(const struct rsa_mpi_key *key, MPI m, MPI s)
-{
- /* (1) Validate 0 <= s < n */
- if (mpi_cmp_ui(s, 0) < 0 || mpi_cmp(s, key->n) >= 0)
- return -EINVAL;
-
- /* (2) m = s^e mod n */
- return mpi_powm(m, s, key->e, key->n);
-}
-
static inline struct rsa_mpi_key *rsa_get_key(struct crypto_akcipher *tfm)
{
return akcipher_tfm_ctx(tfm);
@@ -160,85 +132,6 @@ err_free_m:
return ret;
}
-static int rsa_sign(struct akcipher_request *req)
-{
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
- MPI m, s = mpi_alloc(0);
- int ret = 0;
- int sign;
-
- if (!s)
- return -ENOMEM;
-
- if (unlikely(!pkey->n || !pkey->d)) {
- ret = -EINVAL;
- goto err_free_s;
- }
-
- ret = -ENOMEM;
- m = mpi_read_raw_from_sgl(req->src, req->src_len);
- if (!m)
- goto err_free_s;
-
- ret = _rsa_sign(pkey, s, m);
- if (ret)
- goto err_free_m;
-
- ret = mpi_write_to_sgl(s, req->dst, req->dst_len, &sign);
- if (ret)
- goto err_free_m;
-
- if (sign < 0)
- ret = -EBADMSG;
-
-err_free_m:
- mpi_free(m);
-err_free_s:
- mpi_free(s);
- return ret;
-}
-
-static int rsa_verify(struct akcipher_request *req)
-{
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
- MPI s, m = mpi_alloc(0);
- int ret = 0;
- int sign;
-
- if (!m)
- return -ENOMEM;
-
- if (unlikely(!pkey->n || !pkey->e)) {
- ret = -EINVAL;
- goto err_free_m;
- }
-
- s = mpi_read_raw_from_sgl(req->src, req->src_len);
- if (!s) {
- ret = -ENOMEM;
- goto err_free_m;
- }
-
- ret = _rsa_verify(pkey, m, s);
- if (ret)
- goto err_free_s;
-
- ret = mpi_write_to_sgl(m, req->dst, req->dst_len, &sign);
- if (ret)
- goto err_free_s;
-
- if (sign < 0)
- ret = -EBADMSG;
-
-err_free_s:
- mpi_free(s);
-err_free_m:
- mpi_free(m);
- return ret;
-}
-
static void rsa_free_mpi_key(struct rsa_mpi_key *key)
{
mpi_free(key->d);
@@ -353,8 +246,6 @@ static void rsa_exit_tfm(struct crypto_akcipher *tfm)
static struct akcipher_alg rsa = {
.encrypt = rsa_enc,
.decrypt = rsa_dec,
- .sign = rsa_sign,
- .verify = rsa_verify,
.set_priv_key = rsa_set_priv_key,
.set_pub_key = rsa_set_pub_key,
.max_size = rsa_max_size,
@@ -391,7 +282,7 @@ static void rsa_exit(void)
crypto_unregister_akcipher(&rsa);
}
-module_init(rsa_init);
+subsys_initcall(rsa_init);
module_exit(rsa_exit);
MODULE_ALIAS_CRYPTO("rsa");
MODULE_LICENSE("GPL");
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index 00fce32ae17a..c81a44404086 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -86,18 +86,17 @@ static void salsa20_docrypt(u32 *state, u8 *dst, const u8 *src,
{
__le32 stream[SALSA20_BLOCK_SIZE / sizeof(__le32)];
- if (dst != src)
- memcpy(dst, src, bytes);
-
while (bytes >= SALSA20_BLOCK_SIZE) {
salsa20_block(state, stream);
- crypto_xor(dst, (const u8 *)stream, SALSA20_BLOCK_SIZE);
+ crypto_xor_cpy(dst, src, (const u8 *)stream,
+ SALSA20_BLOCK_SIZE);
bytes -= SALSA20_BLOCK_SIZE;
dst += SALSA20_BLOCK_SIZE;
+ src += SALSA20_BLOCK_SIZE;
}
if (bytes) {
salsa20_block(state, stream);
- crypto_xor(dst, (const u8 *)stream, bytes);
+ crypto_xor_cpy(dst, src, (const u8 *)stream, bytes);
}
}
@@ -161,7 +160,7 @@ static int salsa20_crypt(struct skcipher_request *req)
err = skcipher_walk_virt(&walk, req, false);
- salsa20_init(state, ctx, walk.iv);
+ salsa20_init(state, ctx, req->iv);
while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes;
@@ -204,7 +203,7 @@ static void __exit salsa20_generic_mod_fini(void)
crypto_unregister_skcipher(&alg);
}
-module_init(salsa20_generic_mod_init);
+subsys_initcall(salsa20_generic_mod_init);
module_exit(salsa20_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 6f8305f8c300..712b4c2ea021 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -29,9 +29,17 @@
#include <crypto/internal/scompress.h>
#include "internal.h"
+struct scomp_scratch {
+ spinlock_t lock;
+ void *src;
+ void *dst;
+};
+
+static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
+ .lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock),
+};
+
static const struct crypto_type crypto_scomp_type;
-static void * __percpu *scomp_src_scratches;
-static void * __percpu *scomp_dst_scratches;
static int scomp_scratch_users;
static DEFINE_MUTEX(scomp_lock);
@@ -62,76 +70,53 @@ static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
seq_puts(m, "type : scomp\n");
}
-static void crypto_scomp_free_scratches(void * __percpu *scratches)
+static void crypto_scomp_free_scratches(void)
{
+ struct scomp_scratch *scratch;
int i;
- if (!scratches)
- return;
-
- for_each_possible_cpu(i)
- vfree(*per_cpu_ptr(scratches, i));
+ for_each_possible_cpu(i) {
+ scratch = per_cpu_ptr(&scomp_scratch, i);
- free_percpu(scratches);
+ vfree(scratch->src);
+ vfree(scratch->dst);
+ scratch->src = NULL;
+ scratch->dst = NULL;
+ }
}
-static void * __percpu *crypto_scomp_alloc_scratches(void)
+static int crypto_scomp_alloc_scratches(void)
{
- void * __percpu *scratches;
+ struct scomp_scratch *scratch;
int i;
- scratches = alloc_percpu(void *);
- if (!scratches)
- return NULL;
-
for_each_possible_cpu(i) {
- void *scratch;
-
- scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
- if (!scratch)
- goto error;
- *per_cpu_ptr(scratches, i) = scratch;
- }
-
- return scratches;
-
-error:
- crypto_scomp_free_scratches(scratches);
- return NULL;
-}
+ void *mem;
-static void crypto_scomp_free_all_scratches(void)
-{
- if (!--scomp_scratch_users) {
- crypto_scomp_free_scratches(scomp_src_scratches);
- crypto_scomp_free_scratches(scomp_dst_scratches);
- scomp_src_scratches = NULL;
- scomp_dst_scratches = NULL;
- }
-}
+ scratch = per_cpu_ptr(&scomp_scratch, i);
-static int crypto_scomp_alloc_all_scratches(void)
-{
- if (!scomp_scratch_users++) {
- scomp_src_scratches = crypto_scomp_alloc_scratches();
- if (!scomp_src_scratches)
- return -ENOMEM;
- scomp_dst_scratches = crypto_scomp_alloc_scratches();
- if (!scomp_dst_scratches) {
- crypto_scomp_free_scratches(scomp_src_scratches);
- scomp_src_scratches = NULL;
- return -ENOMEM;
- }
+ mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
+ if (!mem)
+ goto error;
+ scratch->src = mem;
+ mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
+ if (!mem)
+ goto error;
+ scratch->dst = mem;
}
return 0;
+error:
+ crypto_scomp_free_scratches();
+ return -ENOMEM;
}
static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
{
- int ret;
+ int ret = 0;
mutex_lock(&scomp_lock);
- ret = crypto_scomp_alloc_all_scratches();
+ if (!scomp_scratch_users++)
+ ret = crypto_scomp_alloc_scratches();
mutex_unlock(&scomp_lock);
return ret;
@@ -143,42 +128,41 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
void **tfm_ctx = acomp_tfm_ctx(tfm);
struct crypto_scomp *scomp = *tfm_ctx;
void **ctx = acomp_request_ctx(req);
- const int cpu = get_cpu();
- u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
- u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
+ struct scomp_scratch *scratch;
int ret;
- if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
- ret = -EINVAL;
- goto out;
- }
+ if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
+ return -EINVAL;
- if (req->dst && !req->dlen) {
- ret = -EINVAL;
- goto out;
- }
+ if (req->dst && !req->dlen)
+ return -EINVAL;
if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
req->dlen = SCOMP_SCRATCH_SIZE;
- scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
+ scratch = raw_cpu_ptr(&scomp_scratch);
+ spin_lock(&scratch->lock);
+
+ scatterwalk_map_and_copy(scratch->src, req->src, 0, req->slen, 0);
if (dir)
- ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
- scratch_dst, &req->dlen, *ctx);
+ ret = crypto_scomp_compress(scomp, scratch->src, req->slen,
+ scratch->dst, &req->dlen, *ctx);
else
- ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
- scratch_dst, &req->dlen, *ctx);
+ ret = crypto_scomp_decompress(scomp, scratch->src, req->slen,
+ scratch->dst, &req->dlen, *ctx);
if (!ret) {
if (!req->dst) {
req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
- if (!req->dst)
+ if (!req->dst) {
+ ret = -ENOMEM;
goto out;
+ }
}
- scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
+ scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
1);
}
out:
- put_cpu();
+ spin_unlock(&scratch->lock);
return ret;
}
@@ -199,7 +183,8 @@ static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
crypto_free_scomp(*ctx);
mutex_lock(&scomp_lock);
- crypto_scomp_free_all_scratches();
+ if (!--scomp_scratch_users)
+ crypto_scomp_free_scratches();
mutex_unlock(&scomp_lock);
}
diff --git a/crypto/seed.c b/crypto/seed.c
index c6ba8438be43..a75ac50fa4fd 100644
--- a/crypto/seed.c
+++ b/crypto/seed.c
@@ -470,7 +470,7 @@ static void __exit seed_fini(void)
crypto_unregister_alg(&seed_alg);
}
-module_init(seed_init);
+subsys_initcall(seed_init);
module_exit(seed_fini);
MODULE_DESCRIPTION("SEED Cipher Algorithm");
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index ed1b0e9f2436..3f2fad615d26 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -211,7 +211,7 @@ static void __exit seqiv_module_exit(void)
crypto_unregister_template(&seqiv_tmpl);
}
-module_init(seqiv_module_init);
+subsys_initcall(seqiv_module_init);
module_exit(seqiv_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
index 7c3382facc82..ec4ec89ad108 100644
--- a/crypto/serpent_generic.c
+++ b/crypto/serpent_generic.c
@@ -664,7 +664,7 @@ static void __exit serpent_mod_fini(void)
crypto_unregister_algs(srp_algs, ARRAY_SIZE(srp_algs));
}
-module_init(serpent_mod_init);
+subsys_initcall(serpent_mod_init);
module_exit(serpent_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 2af64ef81f40..1b806d4584b2 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -92,7 +92,7 @@ static void __exit sha1_generic_mod_fini(void)
crypto_unregister_shash(&alg);
}
-module_init(sha1_generic_mod_init);
+subsys_initcall(sha1_generic_mod_init);
module_exit(sha1_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 1e5ba6649e8d..5844e9a469e8 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -301,7 +301,7 @@ static void __exit sha256_generic_mod_fini(void)
crypto_unregister_shashes(sha256_algs, ARRAY_SIZE(sha256_algs));
}
-module_init(sha256_generic_mod_init);
+subsys_initcall(sha256_generic_mod_init);
module_exit(sha256_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index 7ed98367d4fb..60fd2be609d8 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -294,7 +294,7 @@ static void __exit sha3_generic_mod_fini(void)
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
-module_init(sha3_generic_mod_init);
+subsys_initcall(sha3_generic_mod_init);
module_exit(sha3_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index 4097cd555eb6..0193ecb8ae10 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -223,7 +223,7 @@ static void __exit sha512_generic_mod_fini(void)
crypto_unregister_shashes(sha512_algs, ARRAY_SIZE(sha512_algs));
}
-module_init(sha512_generic_mod_init);
+subsys_initcall(sha512_generic_mod_init);
module_exit(sha512_generic_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/shash.c b/crypto/shash.c
index 15b369c4745f..e55c1f558bc3 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -238,7 +238,6 @@ static int shash_async_init(struct ahash_request *req)
struct shash_desc *desc = ahash_request_ctx(req);
desc->tfm = *ctx;
- desc->flags = req->base.flags;
return crypto_shash_init(desc);
}
@@ -293,7 +292,6 @@ static int shash_async_finup(struct ahash_request *req)
struct shash_desc *desc = ahash_request_ctx(req);
desc->tfm = *ctx;
- desc->flags = req->base.flags;
return shash_ahash_finup(req, desc);
}
@@ -307,14 +305,13 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
if (nbytes &&
(sg = req->src, offset = sg->offset,
- nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
+ nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
void *data;
data = kmap_atomic(sg_page(sg));
err = crypto_shash_digest(desc, data + offset, nbytes,
req->result);
kunmap_atomic(data);
- crypto_yield(desc->flags);
} else
err = crypto_shash_init(desc) ?:
shash_ahash_finup(req, desc);
@@ -329,7 +326,6 @@ static int shash_async_digest(struct ahash_request *req)
struct shash_desc *desc = ahash_request_ctx(req);
desc->tfm = *ctx;
- desc->flags = req->base.flags;
return shash_ahash_digest(req, desc);
}
@@ -345,7 +341,6 @@ static int shash_async_import(struct ahash_request *req, const void *in)
struct shash_desc *desc = ahash_request_ctx(req);
desc->tfm = *ctx;
- desc->flags = req->base.flags;
return crypto_shash_import(desc, in);
}
diff --git a/crypto/simd.c b/crypto/simd.c
index 78e8d037ae2b..3e3b1d1a6b1f 100644
--- a/crypto/simd.c
+++ b/crypto/simd.c
@@ -3,6 +3,7 @@
*
* Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
* Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2019 Google LLC
*
* Based on aesni-intel_glue.c by:
* Copyright (C) 2008, Intel Corp.
@@ -20,10 +21,26 @@
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Shared crypto SIMD helpers. These functions dynamically create and register
+ * an skcipher or AEAD algorithm that wraps another, internal algorithm. The
+ * wrapper ensures that the internal algorithm is only executed in a context
+ * where SIMD instructions are usable, i.e. where may_use_simd() returns true.
+ * If SIMD is already usable, the wrapper directly calls the internal algorithm.
+ * Otherwise it defers execution to a workqueue via cryptd.
*
+ * This is an alternative to the internal algorithm implementing a fallback for
+ * the !may_use_simd() case itself.
+ *
+ * Note that the wrapper algorithm is asynchronous, i.e. it has the
+ * CRYPTO_ALG_ASYNC flag set. Therefore it won't be found by users who
+ * explicitly allocate a synchronous algorithm.
*/
#include <crypto/cryptd.h>
+#include <crypto/internal/aead.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <linux/kernel.h>
@@ -31,6 +48,8 @@
#include <linux/preempt.h>
#include <asm/simd.h>
+/* skcipher support */
+
struct simd_skcipher_alg {
const char *ialg_name;
struct skcipher_alg alg;
@@ -66,7 +85,7 @@ static int simd_skcipher_encrypt(struct skcipher_request *req)
subreq = skcipher_request_ctx(req);
*subreq = *req;
- if (!may_use_simd() ||
+ if (!crypto_simd_usable() ||
(in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
child = &ctx->cryptd_tfm->base;
else
@@ -87,7 +106,7 @@ static int simd_skcipher_decrypt(struct skcipher_request *req)
subreq = skcipher_request_ctx(req);
*subreq = *req;
- if (!may_use_simd() ||
+ if (!crypto_simd_usable() ||
(in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
child = &ctx->cryptd_tfm->base;
else
@@ -272,4 +291,254 @@ void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
}
EXPORT_SYMBOL_GPL(simd_unregister_skciphers);
+/* AEAD support */
+
+struct simd_aead_alg {
+ const char *ialg_name;
+ struct aead_alg alg;
+};
+
+struct simd_aead_ctx {
+ struct cryptd_aead *cryptd_tfm;
+};
+
+static int simd_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct crypto_aead *child = &ctx->cryptd_tfm->base;
+ int err;
+
+ crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_aead_set_flags(child, crypto_aead_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_aead_setkey(child, key, key_len);
+ crypto_aead_set_flags(tfm, crypto_aead_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
+ return err;
+}
+
+static int simd_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+ struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct crypto_aead *child = &ctx->cryptd_tfm->base;
+
+ return crypto_aead_setauthsize(child, authsize);
+}
+
+static int simd_aead_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_request *subreq;
+ struct crypto_aead *child;
+
+ subreq = aead_request_ctx(req);
+ *subreq = *req;
+
+ if (!crypto_simd_usable() ||
+ (in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm)))
+ child = &ctx->cryptd_tfm->base;
+ else
+ child = cryptd_aead_child(ctx->cryptd_tfm);
+
+ aead_request_set_tfm(subreq, child);
+
+ return crypto_aead_encrypt(subreq);
+}
+
+static int simd_aead_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct aead_request *subreq;
+ struct crypto_aead *child;
+
+ subreq = aead_request_ctx(req);
+ *subreq = *req;
+
+ if (!crypto_simd_usable() ||
+ (in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm)))
+ child = &ctx->cryptd_tfm->base;
+ else
+ child = cryptd_aead_child(ctx->cryptd_tfm);
+
+ aead_request_set_tfm(subreq, child);
+
+ return crypto_aead_decrypt(subreq);
+}
+
+static void simd_aead_exit(struct crypto_aead *tfm)
+{
+ struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ cryptd_free_aead(ctx->cryptd_tfm);
+}
+
+static int simd_aead_init(struct crypto_aead *tfm)
+{
+ struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct cryptd_aead *cryptd_tfm;
+ struct simd_aead_alg *salg;
+ struct aead_alg *alg;
+ unsigned reqsize;
+
+ alg = crypto_aead_alg(tfm);
+ salg = container_of(alg, struct simd_aead_alg, alg);
+
+ cryptd_tfm = cryptd_alloc_aead(salg->ialg_name, CRYPTO_ALG_INTERNAL,
+ CRYPTO_ALG_INTERNAL);
+ if (IS_ERR(cryptd_tfm))
+ return PTR_ERR(cryptd_tfm);
+
+ ctx->cryptd_tfm = cryptd_tfm;
+
+ reqsize = crypto_aead_reqsize(cryptd_aead_child(cryptd_tfm));
+ reqsize = max(reqsize, crypto_aead_reqsize(&cryptd_tfm->base));
+ reqsize += sizeof(struct aead_request);
+
+ crypto_aead_set_reqsize(tfm, reqsize);
+
+ return 0;
+}
+
+struct simd_aead_alg *simd_aead_create_compat(const char *algname,
+ const char *drvname,
+ const char *basename)
+{
+ struct simd_aead_alg *salg;
+ struct crypto_aead *tfm;
+ struct aead_alg *ialg;
+ struct aead_alg *alg;
+ int err;
+
+ tfm = crypto_alloc_aead(basename, CRYPTO_ALG_INTERNAL,
+ CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm))
+ return ERR_CAST(tfm);
+
+ ialg = crypto_aead_alg(tfm);
+
+ salg = kzalloc(sizeof(*salg), GFP_KERNEL);
+ if (!salg) {
+ salg = ERR_PTR(-ENOMEM);
+ goto out_put_tfm;
+ }
+
+ salg->ialg_name = basename;
+ alg = &salg->alg;
+
+ err = -ENAMETOOLONG;
+ if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >=
+ CRYPTO_MAX_ALG_NAME)
+ goto out_free_salg;
+
+ if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ drvname) >= CRYPTO_MAX_ALG_NAME)
+ goto out_free_salg;
+
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC;
+ alg->base.cra_priority = ialg->base.cra_priority;
+ alg->base.cra_blocksize = ialg->base.cra_blocksize;
+ alg->base.cra_alignmask = ialg->base.cra_alignmask;
+ alg->base.cra_module = ialg->base.cra_module;
+ alg->base.cra_ctxsize = sizeof(struct simd_aead_ctx);
+
+ alg->ivsize = ialg->ivsize;
+ alg->maxauthsize = ialg->maxauthsize;
+ alg->chunksize = ialg->chunksize;
+
+ alg->init = simd_aead_init;
+ alg->exit = simd_aead_exit;
+
+ alg->setkey = simd_aead_setkey;
+ alg->setauthsize = simd_aead_setauthsize;
+ alg->encrypt = simd_aead_encrypt;
+ alg->decrypt = simd_aead_decrypt;
+
+ err = crypto_register_aead(alg);
+ if (err)
+ goto out_free_salg;
+
+out_put_tfm:
+ crypto_free_aead(tfm);
+ return salg;
+
+out_free_salg:
+ kfree(salg);
+ salg = ERR_PTR(err);
+ goto out_put_tfm;
+}
+EXPORT_SYMBOL_GPL(simd_aead_create_compat);
+
+struct simd_aead_alg *simd_aead_create(const char *algname,
+ const char *basename)
+{
+ char drvname[CRYPTO_MAX_ALG_NAME];
+
+ if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >=
+ CRYPTO_MAX_ALG_NAME)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ return simd_aead_create_compat(algname, drvname, basename);
+}
+EXPORT_SYMBOL_GPL(simd_aead_create);
+
+void simd_aead_free(struct simd_aead_alg *salg)
+{
+ crypto_unregister_aead(&salg->alg);
+ kfree(salg);
+}
+EXPORT_SYMBOL_GPL(simd_aead_free);
+
+int simd_register_aeads_compat(struct aead_alg *algs, int count,
+ struct simd_aead_alg **simd_algs)
+{
+ int err;
+ int i;
+ const char *algname;
+ const char *drvname;
+ const char *basename;
+ struct simd_aead_alg *simd;
+
+ err = crypto_register_aeads(algs, count);
+ if (err)
+ return err;
+
+ for (i = 0; i < count; i++) {
+ WARN_ON(strncmp(algs[i].base.cra_name, "__", 2));
+ WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2));
+ algname = algs[i].base.cra_name + 2;
+ drvname = algs[i].base.cra_driver_name + 2;
+ basename = algs[i].base.cra_driver_name;
+ simd = simd_aead_create_compat(algname, drvname, basename);
+ err = PTR_ERR(simd);
+ if (IS_ERR(simd))
+ goto err_unregister;
+ simd_algs[i] = simd;
+ }
+ return 0;
+
+err_unregister:
+ simd_unregister_aeads(algs, count, simd_algs);
+ return err;
+}
+EXPORT_SYMBOL_GPL(simd_register_aeads_compat);
+
+void simd_unregister_aeads(struct aead_alg *algs, int count,
+ struct simd_aead_alg **simd_algs)
+{
+ int i;
+
+ crypto_unregister_aeads(algs, count);
+
+ for (i = 0; i < count; i++) {
+ if (simd_algs[i]) {
+ simd_aead_free(simd_algs[i]);
+ simd_algs[i] = NULL;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(simd_unregister_aeads);
+
MODULE_LICENSE("GPL");
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index bcf13d95f54a..2e66f312e2c4 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -131,8 +131,13 @@ unmap_src:
memcpy(walk->dst.virt.addr, walk->page, n);
skcipher_unmap_dst(walk);
} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
- if (WARN_ON(err)) {
- /* unexpected case; didn't process all bytes */
+ if (err) {
+ /*
+ * Didn't process all bytes. Either the algorithm is
+ * broken, or this was the last step and it turned out
+ * the message wasn't evenly divisible into blocks but
+ * the algorithm requires it.
+ */
err = -EINVAL;
goto finish;
}
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
index c0cf87ae7ef6..e227bcada2a2 100644
--- a/crypto/sm3_generic.c
+++ b/crypto/sm3_generic.c
@@ -199,7 +199,7 @@ static void __exit sm3_generic_mod_fini(void)
crypto_unregister_shash(&sm3_alg);
}
-module_init(sm3_generic_mod_init);
+subsys_initcall(sm3_generic_mod_init);
module_exit(sm3_generic_mod_fini);
MODULE_LICENSE("GPL v2");
diff --git a/crypto/sm4_generic.c b/crypto/sm4_generic.c
index c18eebfd5edd..71ffb343709a 100644
--- a/crypto/sm4_generic.c
+++ b/crypto/sm4_generic.c
@@ -237,7 +237,7 @@ static void __exit sm4_fini(void)
crypto_unregister_alg(&sm4_alg);
}
-module_init(sm4_init);
+subsys_initcall(sm4_init);
module_exit(sm4_fini);
MODULE_DESCRIPTION("SM4 Cipher Algorithm");
diff --git a/crypto/streebog_generic.c b/crypto/streebog_generic.c
index 5a2eafed9c29..63663c3bab7e 100644
--- a/crypto/streebog_generic.c
+++ b/crypto/streebog_generic.c
@@ -996,7 +996,7 @@ static void streebog_add512(const struct streebog_uint512 *x,
static void streebog_g(struct streebog_uint512 *h,
const struct streebog_uint512 *N,
- const u8 *m)
+ const struct streebog_uint512 *m)
{
struct streebog_uint512 Ki, data;
unsigned int i;
@@ -1005,7 +1005,7 @@ static void streebog_g(struct streebog_uint512 *h,
/* Starting E() */
Ki = data;
- streebog_xlps(&Ki, (const struct streebog_uint512 *)&m[0], &data);
+ streebog_xlps(&Ki, m, &data);
for (i = 0; i < 11; i++)
streebog_round(i, &Ki, &data);
@@ -1015,16 +1015,19 @@ static void streebog_g(struct streebog_uint512 *h,
/* E() done */
streebog_xor(&data, h, &data);
- streebog_xor(&data, (const struct streebog_uint512 *)&m[0], h);
+ streebog_xor(&data, m, h);
}
static void streebog_stage2(struct streebog_state *ctx, const u8 *data)
{
- streebog_g(&ctx->h, &ctx->N, data);
+ struct streebog_uint512 m;
+
+ memcpy(&m, data, sizeof(m));
+
+ streebog_g(&ctx->h, &ctx->N, &m);
streebog_add512(&ctx->N, &buffer512, &ctx->N);
- streebog_add512(&ctx->Sigma, (const struct streebog_uint512 *)data,
- &ctx->Sigma);
+ streebog_add512(&ctx->Sigma, &m, &ctx->Sigma);
}
static void streebog_stage3(struct streebog_state *ctx)
@@ -1034,13 +1037,11 @@ static void streebog_stage3(struct streebog_state *ctx)
buf.qword[0] = cpu_to_le64(ctx->fillsize << 3);
streebog_pad(ctx);
- streebog_g(&ctx->h, &ctx->N, (const u8 *)&ctx->buffer);
+ streebog_g(&ctx->h, &ctx->N, &ctx->m);
streebog_add512(&ctx->N, &buf, &ctx->N);
- streebog_add512(&ctx->Sigma,
- (const struct streebog_uint512 *)&ctx->buffer[0],
- &ctx->Sigma);
- streebog_g(&ctx->h, &buffer0, (const u8 *)&ctx->N);
- streebog_g(&ctx->h, &buffer0, (const u8 *)&ctx->Sigma);
+ streebog_add512(&ctx->Sigma, &ctx->m, &ctx->Sigma);
+ streebog_g(&ctx->h, &buffer0, &ctx->N);
+ streebog_g(&ctx->h, &buffer0, &ctx->Sigma);
memcpy(&ctx->hash, &ctx->h, sizeof(struct streebog_uint512));
}
@@ -1127,7 +1128,7 @@ static void __exit streebog_mod_fini(void)
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
-module_init(streebog_mod_init);
+subsys_initcall(streebog_mod_init);
module_exit(streebog_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 1ea2d5007ff5..798253f05203 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -3053,7 +3053,7 @@ err_free_tv:
*/
static void __exit tcrypt_mod_fini(void) { }
-module_init(tcrypt_mod_init);
+subsys_initcall(tcrypt_mod_init);
module_exit(tcrypt_mod_fini);
module_param(alg, charp, 0);
diff --git a/crypto/tea.c b/crypto/tea.c
index b70b441c7d1e..786b589e1399 100644
--- a/crypto/tea.c
+++ b/crypto/tea.c
@@ -274,7 +274,7 @@ MODULE_ALIAS_CRYPTO("tea");
MODULE_ALIAS_CRYPTO("xtea");
MODULE_ALIAS_CRYPTO("xeta");
-module_init(tea_mod_init);
+subsys_initcall(tea_mod_init);
module_exit(tea_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 8386038d67c7..c9e67c2bd725 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -37,6 +37,7 @@
#include <crypto/akcipher.h>
#include <crypto/kpp.h>
#include <crypto/acompress.h>
+#include <crypto/internal/simd.h>
#include "internal.h"
@@ -44,6 +45,9 @@ static bool notests;
module_param(notests, bool, 0644);
MODULE_PARM_DESC(notests, "disable crypto self-tests");
+static bool panic_on_fail;
+module_param(panic_on_fail, bool, 0444);
+
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
static bool noextratests;
module_param(noextratests, bool, 0644);
@@ -52,6 +56,9 @@ MODULE_PARM_DESC(noextratests, "disable expensive crypto self-tests");
static unsigned int fuzz_iterations = 100;
module_param(fuzz_iterations, uint, 0644);
MODULE_PARM_DESC(fuzz_iterations, "number of fuzz test iterations");
+
+DEFINE_PER_CPU(bool, crypto_simd_disabled_for_test);
+EXPORT_PER_CPU_SYMBOL_GPL(crypto_simd_disabled_for_test);
#endif
#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
@@ -121,6 +128,7 @@ struct kpp_test_suite {
struct alg_test_desc {
const char *alg;
+ const char *generic_driver;
int (*test)(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask);
int fips_allowed; /* set if alg is allowed in fips mode */
@@ -230,12 +238,14 @@ enum finalization_type {
* @offset
* @flush_type: for hashes, whether an update() should be done now vs.
* continuing to accumulate data
+ * @nosimd: if doing the pending update(), do it with SIMD disabled?
*/
struct test_sg_division {
unsigned int proportion_of_total;
unsigned int offset;
bool offset_relative_to_alignmask;
enum flush_type flush_type;
+ bool nosimd;
};
/**
@@ -255,6 +265,7 @@ struct test_sg_division {
* @iv_offset_relative_to_alignmask: if true, add the algorithm's alignmask to
* the @iv_offset
* @finalization_type: what finalization function to use for hashes
+ * @nosimd: execute with SIMD disabled? Requires !CRYPTO_TFM_REQ_MAY_SLEEP.
*/
struct testvec_config {
const char *name;
@@ -265,6 +276,7 @@ struct testvec_config {
unsigned int iv_offset;
bool iv_offset_relative_to_alignmask;
enum finalization_type finalization_type;
+ bool nosimd;
};
#define TESTVEC_CONFIG_NAMELEN 192
@@ -416,8 +428,11 @@ static unsigned int count_test_sg_divisions(const struct test_sg_division *divs)
return ndivs;
}
+#define SGDIVS_HAVE_FLUSHES BIT(0)
+#define SGDIVS_HAVE_NOSIMD BIT(1)
+
static bool valid_sg_divisions(const struct test_sg_division *divs,
- unsigned int count, bool *any_flushes_ret)
+ unsigned int count, int *flags_ret)
{
unsigned int total = 0;
unsigned int i;
@@ -428,7 +443,9 @@ static bool valid_sg_divisions(const struct test_sg_division *divs,
return false;
total += divs[i].proportion_of_total;
if (divs[i].flush_type != FLUSH_TYPE_NONE)
- *any_flushes_ret = true;
+ *flags_ret |= SGDIVS_HAVE_FLUSHES;
+ if (divs[i].nosimd)
+ *flags_ret |= SGDIVS_HAVE_NOSIMD;
}
return total == TEST_SG_TOTAL &&
memchr_inv(&divs[i], 0, (count - i) * sizeof(divs[0])) == NULL;
@@ -441,19 +458,18 @@ static bool valid_sg_divisions(const struct test_sg_division *divs,
*/
static bool valid_testvec_config(const struct testvec_config *cfg)
{
- bool any_flushes = false;
+ int flags = 0;
if (cfg->name == NULL)
return false;
if (!valid_sg_divisions(cfg->src_divs, ARRAY_SIZE(cfg->src_divs),
- &any_flushes))
+ &flags))
return false;
if (cfg->dst_divs[0].proportion_of_total) {
if (!valid_sg_divisions(cfg->dst_divs,
- ARRAY_SIZE(cfg->dst_divs),
- &any_flushes))
+ ARRAY_SIZE(cfg->dst_divs), &flags))
return false;
} else {
if (memchr_inv(cfg->dst_divs, 0, sizeof(cfg->dst_divs)))
@@ -466,7 +482,12 @@ static bool valid_testvec_config(const struct testvec_config *cfg)
MAX_ALGAPI_ALIGNMASK + 1)
return false;
- if (any_flushes && cfg->finalization_type == FINALIZATION_TYPE_DIGEST)
+ if ((flags & (SGDIVS_HAVE_FLUSHES | SGDIVS_HAVE_NOSIMD)) &&
+ cfg->finalization_type == FINALIZATION_TYPE_DIGEST)
+ return false;
+
+ if ((cfg->nosimd || (flags & SGDIVS_HAVE_NOSIMD)) &&
+ (cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP))
return false;
return true;
@@ -725,15 +746,101 @@ static int build_cipher_test_sglists(struct cipher_test_sglists *tsgls,
}
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+
+/* Generate a random length in range [0, max_len], but prefer smaller values */
+static unsigned int generate_random_length(unsigned int max_len)
+{
+ unsigned int len = prandom_u32() % (max_len + 1);
+
+ switch (prandom_u32() % 4) {
+ case 0:
+ return len % 64;
+ case 1:
+ return len % 256;
+ case 2:
+ return len % 1024;
+ default:
+ return len;
+ }
+}
+
+/* Sometimes make some random changes to the given data buffer */
+static void mutate_buffer(u8 *buf, size_t count)
+{
+ size_t num_flips;
+ size_t i;
+ size_t pos;
+
+ /* Sometimes flip some bits */
+ if (prandom_u32() % 4 == 0) {
+ num_flips = min_t(size_t, 1 << (prandom_u32() % 8), count * 8);
+ for (i = 0; i < num_flips; i++) {
+ pos = prandom_u32() % (count * 8);
+ buf[pos / 8] ^= 1 << (pos % 8);
+ }
+ }
+
+ /* Sometimes flip some bytes */
+ if (prandom_u32() % 4 == 0) {
+ num_flips = min_t(size_t, 1 << (prandom_u32() % 8), count);
+ for (i = 0; i < num_flips; i++)
+ buf[prandom_u32() % count] ^= 0xff;
+ }
+}
+
+/* Randomly generate 'count' bytes, but sometimes make them "interesting" */
+static void generate_random_bytes(u8 *buf, size_t count)
+{
+ u8 b;
+ u8 increment;
+ size_t i;
+
+ if (count == 0)
+ return;
+
+ switch (prandom_u32() % 8) { /* Choose a generation strategy */
+ case 0:
+ case 1:
+ /* All the same byte, plus optional mutations */
+ switch (prandom_u32() % 4) {
+ case 0:
+ b = 0x00;
+ break;
+ case 1:
+ b = 0xff;
+ break;
+ default:
+ b = (u8)prandom_u32();
+ break;
+ }
+ memset(buf, b, count);
+ mutate_buffer(buf, count);
+ break;
+ case 2:
+ /* Ascending or descending bytes, plus optional mutations */
+ increment = (u8)prandom_u32();
+ b = (u8)prandom_u32();
+ for (i = 0; i < count; i++, b += increment)
+ buf[i] = b;
+ mutate_buffer(buf, count);
+ break;
+ default:
+ /* Fully random bytes */
+ for (i = 0; i < count; i++)
+ buf[i] = (u8)prandom_u32();
+ }
+}
+
static char *generate_random_sgl_divisions(struct test_sg_division *divs,
size_t max_divs, char *p, char *end,
- bool gen_flushes)
+ bool gen_flushes, u32 req_flags)
{
struct test_sg_division *div = divs;
unsigned int remaining = TEST_SG_TOTAL;
do {
unsigned int this_len;
+ const char *flushtype_str;
if (div == &divs[max_divs - 1] || prandom_u32() % 2 == 0)
this_len = remaining;
@@ -762,11 +869,31 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
}
}
+ if (div->flush_type != FLUSH_TYPE_NONE &&
+ !(req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
+ prandom_u32() % 2 == 0)
+ div->nosimd = true;
+
+ switch (div->flush_type) {
+ case FLUSH_TYPE_FLUSH:
+ if (div->nosimd)
+ flushtype_str = "<flush,nosimd>";
+ else
+ flushtype_str = "<flush>";
+ break;
+ case FLUSH_TYPE_REIMPORT:
+ if (div->nosimd)
+ flushtype_str = "<reimport,nosimd>";
+ else
+ flushtype_str = "<reimport>";
+ break;
+ default:
+ flushtype_str = "";
+ break;
+ }
+
BUILD_BUG_ON(TEST_SG_TOTAL != 10000); /* for "%u.%u%%" */
- p += scnprintf(p, end - p, "%s%u.%u%%@%s+%u%s",
- div->flush_type == FLUSH_TYPE_NONE ? "" :
- div->flush_type == FLUSH_TYPE_FLUSH ?
- "<flush> " : "<reimport> ",
+ p += scnprintf(p, end - p, "%s%u.%u%%@%s+%u%s", flushtype_str,
this_len / 100, this_len % 100,
div->offset_relative_to_alignmask ?
"alignmask" : "",
@@ -816,18 +943,26 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
break;
}
+ if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
+ prandom_u32() % 2 == 0) {
+ cfg->nosimd = true;
+ p += scnprintf(p, end - p, " nosimd");
+ }
+
p += scnprintf(p, end - p, " src_divs=[");
p = generate_random_sgl_divisions(cfg->src_divs,
ARRAY_SIZE(cfg->src_divs), p, end,
(cfg->finalization_type !=
- FINALIZATION_TYPE_DIGEST));
+ FINALIZATION_TYPE_DIGEST),
+ cfg->req_flags);
p += scnprintf(p, end - p, "]");
if (!cfg->inplace && prandom_u32() % 2 == 0) {
p += scnprintf(p, end - p, " dst_divs=[");
p = generate_random_sgl_divisions(cfg->dst_divs,
ARRAY_SIZE(cfg->dst_divs),
- p, end, false);
+ p, end, false,
+ cfg->req_flags);
p += scnprintf(p, end - p, "]");
}
@@ -838,21 +973,100 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
WARN_ON_ONCE(!valid_testvec_config(cfg));
}
-#endif /* CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+
+static void crypto_disable_simd_for_test(void)
+{
+ preempt_disable();
+ __this_cpu_write(crypto_simd_disabled_for_test, true);
+}
+
+static void crypto_reenable_simd_for_test(void)
+{
+ __this_cpu_write(crypto_simd_disabled_for_test, false);
+ preempt_enable();
+}
+
+/*
+ * Given an algorithm name, build the name of the generic implementation of that
+ * algorithm, assuming the usual naming convention. Specifically, this appends
+ * "-generic" to every part of the name that is not a template name. Examples:
+ *
+ * aes => aes-generic
+ * cbc(aes) => cbc(aes-generic)
+ * cts(cbc(aes)) => cts(cbc(aes-generic))
+ * rfc7539(chacha20,poly1305) => rfc7539(chacha20-generic,poly1305-generic)
+ *
+ * Return: 0 on success, or -ENAMETOOLONG if the generic name would be too long
+ */
+static int build_generic_driver_name(const char *algname,
+ char driver_name[CRYPTO_MAX_ALG_NAME])
+{
+ const char *in = algname;
+ char *out = driver_name;
+ size_t len = strlen(algname);
+
+ if (len >= CRYPTO_MAX_ALG_NAME)
+ goto too_long;
+ do {
+ const char *in_saved = in;
+
+ while (*in && *in != '(' && *in != ')' && *in != ',')
+ *out++ = *in++;
+ if (*in != '(' && in > in_saved) {
+ len += 8;
+ if (len >= CRYPTO_MAX_ALG_NAME)
+ goto too_long;
+ memcpy(out, "-generic", 8);
+ out += 8;
+ }
+ } while ((*out++ = *in++) != '\0');
+ return 0;
+
+too_long:
+ pr_err("alg: generic driver name for \"%s\" would be too long\n",
+ algname);
+ return -ENAMETOOLONG;
+}
+#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+static void crypto_disable_simd_for_test(void)
+{
+}
+
+static void crypto_reenable_simd_for_test(void)
+{
+}
+#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+
+static int do_ahash_op(int (*op)(struct ahash_request *req),
+ struct ahash_request *req,
+ struct crypto_wait *wait, bool nosimd)
+{
+ int err;
+
+ if (nosimd)
+ crypto_disable_simd_for_test();
+
+ err = op(req);
+
+ if (nosimd)
+ crypto_reenable_simd_for_test();
+
+ return crypto_wait_req(err, wait);
+}
static int check_nonfinal_hash_op(const char *op, int err,
u8 *result, unsigned int digestsize,
- const char *driver, unsigned int vec_num,
+ const char *driver, const char *vec_name,
const struct testvec_config *cfg)
{
if (err) {
- pr_err("alg: hash: %s %s() failed with err %d on test vector %u, cfg=\"%s\"\n",
- driver, op, err, vec_num, cfg->name);
+ pr_err("alg: hash: %s %s() failed with err %d on test vector %s, cfg=\"%s\"\n",
+ driver, op, err, vec_name, cfg->name);
return err;
}
if (!testmgr_is_poison(result, digestsize)) {
- pr_err("alg: hash: %s %s() used result buffer on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: hash: %s %s() used result buffer on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
return -EINVAL;
}
return 0;
@@ -860,7 +1074,7 @@ static int check_nonfinal_hash_op(const char *op, int err,
static int test_hash_vec_cfg(const char *driver,
const struct hash_testvec *vec,
- unsigned int vec_num,
+ const char *vec_name,
const struct testvec_config *cfg,
struct ahash_request *req,
struct test_sglist *tsgl,
@@ -885,11 +1099,18 @@ static int test_hash_vec_cfg(const char *driver,
if (vec->ksize) {
err = crypto_ahash_setkey(tfm, vec->key, vec->ksize);
if (err) {
- pr_err("alg: hash: %s setkey failed with err %d on test vector %u; flags=%#x\n",
- driver, err, vec_num,
+ if (err == vec->setkey_error)
+ return 0;
+ pr_err("alg: hash: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
+ driver, vec_name, vec->setkey_error, err,
crypto_ahash_get_flags(tfm));
return err;
}
+ if (vec->setkey_error) {
+ pr_err("alg: hash: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
+ driver, vec_name, vec->setkey_error);
+ return -EINVAL;
+ }
}
/* Build the scatterlist for the source data */
@@ -899,8 +1120,8 @@ static int test_hash_vec_cfg(const char *driver,
err = build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize,
&input, divs);
if (err) {
- pr_err("alg: hash: %s: error preparing scatterlist for test vector %u, cfg=\"%s\"\n",
- driver, vec_num, cfg->name);
+ pr_err("alg: hash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n",
+ driver, vec_name, cfg->name);
return err;
}
@@ -909,17 +1130,26 @@ static int test_hash_vec_cfg(const char *driver,
testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm));
testmgr_poison(result, digestsize + TESTMGR_POISON_LEN);
- if (cfg->finalization_type == FINALIZATION_TYPE_DIGEST) {
+ if (cfg->finalization_type == FINALIZATION_TYPE_DIGEST ||
+ vec->digest_error) {
/* Just using digest() */
ahash_request_set_callback(req, req_flags, crypto_req_done,
&wait);
ahash_request_set_crypt(req, tsgl->sgl, result, vec->psize);
- err = crypto_wait_req(crypto_ahash_digest(req), &wait);
+ err = do_ahash_op(crypto_ahash_digest, req, &wait, cfg->nosimd);
if (err) {
- pr_err("alg: hash: %s digest() failed with err %d on test vector %u, cfg=\"%s\"\n",
- driver, err, vec_num, cfg->name);
+ if (err == vec->digest_error)
+ return 0;
+ pr_err("alg: hash: %s digest() failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
+ driver, vec_name, vec->digest_error, err,
+ cfg->name);
return err;
}
+ if (vec->digest_error) {
+ pr_err("alg: hash: %s digest() unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
+ driver, vec_name, vec->digest_error, cfg->name);
+ return -EINVAL;
+ }
goto result_ready;
}
@@ -927,9 +1157,9 @@ static int test_hash_vec_cfg(const char *driver,
ahash_request_set_callback(req, req_flags, crypto_req_done, &wait);
ahash_request_set_crypt(req, NULL, result, 0);
- err = crypto_wait_req(crypto_ahash_init(req), &wait);
+ err = do_ahash_op(crypto_ahash_init, req, &wait, cfg->nosimd);
err = check_nonfinal_hash_op("init", err, result, digestsize,
- driver, vec_num, cfg);
+ driver, vec_name, cfg);
if (err)
return err;
@@ -943,10 +1173,11 @@ static int test_hash_vec_cfg(const char *driver,
crypto_req_done, &wait);
ahash_request_set_crypt(req, pending_sgl, result,
pending_len);
- err = crypto_wait_req(crypto_ahash_update(req), &wait);
+ err = do_ahash_op(crypto_ahash_update, req, &wait,
+ divs[i]->nosimd);
err = check_nonfinal_hash_op("update", err,
result, digestsize,
- driver, vec_num, cfg);
+ driver, vec_name, cfg);
if (err)
return err;
pending_sgl = NULL;
@@ -959,13 +1190,13 @@ static int test_hash_vec_cfg(const char *driver,
err = crypto_ahash_export(req, hashstate);
err = check_nonfinal_hash_op("export", err,
result, digestsize,
- driver, vec_num, cfg);
+ driver, vec_name, cfg);
if (err)
return err;
if (!testmgr_is_poison(hashstate + statesize,
TESTMGR_POISON_LEN)) {
- pr_err("alg: hash: %s export() overran state buffer on test vector %u, cfg=\"%s\"\n",
- driver, vec_num, cfg->name);
+ pr_err("alg: hash: %s export() overran state buffer on test vector %s, cfg=\"%s\"\n",
+ driver, vec_name, cfg->name);
return -EOVERFLOW;
}
@@ -973,7 +1204,7 @@ static int test_hash_vec_cfg(const char *driver,
err = crypto_ahash_import(req, hashstate);
err = check_nonfinal_hash_op("import", err,
result, digestsize,
- driver, vec_num, cfg);
+ driver, vec_name, cfg);
if (err)
return err;
}
@@ -986,23 +1217,23 @@ static int test_hash_vec_cfg(const char *driver,
ahash_request_set_crypt(req, pending_sgl, result, pending_len);
if (cfg->finalization_type == FINALIZATION_TYPE_FINAL) {
/* finish with update() and final() */
- err = crypto_wait_req(crypto_ahash_update(req), &wait);
+ err = do_ahash_op(crypto_ahash_update, req, &wait, cfg->nosimd);
err = check_nonfinal_hash_op("update", err, result, digestsize,
- driver, vec_num, cfg);
+ driver, vec_name, cfg);
if (err)
return err;
- err = crypto_wait_req(crypto_ahash_final(req), &wait);
+ err = do_ahash_op(crypto_ahash_final, req, &wait, cfg->nosimd);
if (err) {
- pr_err("alg: hash: %s final() failed with err %d on test vector %u, cfg=\"%s\"\n",
- driver, err, vec_num, cfg->name);
+ pr_err("alg: hash: %s final() failed with err %d on test vector %s, cfg=\"%s\"\n",
+ driver, err, vec_name, cfg->name);
return err;
}
} else {
/* finish with finup() */
- err = crypto_wait_req(crypto_ahash_finup(req), &wait);
+ err = do_ahash_op(crypto_ahash_finup, req, &wait, cfg->nosimd);
if (err) {
- pr_err("alg: hash: %s finup() failed with err %d on test vector %u, cfg=\"%s\"\n",
- driver, err, vec_num, cfg->name);
+ pr_err("alg: hash: %s finup() failed with err %d on test vector %s, cfg=\"%s\"\n",
+ driver, err, vec_name, cfg->name);
return err;
}
}
@@ -1010,13 +1241,13 @@ static int test_hash_vec_cfg(const char *driver,
result_ready:
/* Check that the algorithm produced the correct digest */
if (memcmp(result, vec->digest, digestsize) != 0) {
- pr_err("alg: hash: %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n",
- driver, vec_num, cfg->name);
+ pr_err("alg: hash: %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n",
+ driver, vec_name, cfg->name);
return -EINVAL;
}
if (!testmgr_is_poison(&result[digestsize], TESTMGR_POISON_LEN)) {
- pr_err("alg: hash: %s overran result buffer on test vector %u, cfg=\"%s\"\n",
- driver, vec_num, cfg->name);
+ pr_err("alg: hash: %s overran result buffer on test vector %s, cfg=\"%s\"\n",
+ driver, vec_name, cfg->name);
return -EOVERFLOW;
}
@@ -1027,11 +1258,14 @@ static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
unsigned int vec_num, struct ahash_request *req,
struct test_sglist *tsgl, u8 *hashstate)
{
+ char vec_name[16];
unsigned int i;
int err;
+ sprintf(vec_name, "%u", vec_num);
+
for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++) {
- err = test_hash_vec_cfg(driver, vec, vec_num,
+ err = test_hash_vec_cfg(driver, vec, vec_name,
&default_hash_testvec_configs[i],
req, tsgl, hashstate);
if (err)
@@ -1046,7 +1280,7 @@ static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
for (i = 0; i < fuzz_iterations; i++) {
generate_random_testvec_config(&cfg, cfgname,
sizeof(cfgname));
- err = test_hash_vec_cfg(driver, vec, vec_num, &cfg,
+ err = test_hash_vec_cfg(driver, vec, vec_name, &cfg,
req, tsgl, hashstate);
if (err)
return err;
@@ -1056,9 +1290,168 @@ static int test_hash_vec(const char *driver, const struct hash_testvec *vec,
return 0;
}
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+/*
+ * Generate a hash test vector from the given implementation.
+ * Assumes the buffers in 'vec' were already allocated.
+ */
+static void generate_random_hash_testvec(struct crypto_shash *tfm,
+ struct hash_testvec *vec,
+ unsigned int maxkeysize,
+ unsigned int maxdatasize,
+ char *name, size_t max_namelen)
+{
+ SHASH_DESC_ON_STACK(desc, tfm);
+
+ /* Data */
+ vec->psize = generate_random_length(maxdatasize);
+ generate_random_bytes((u8 *)vec->plaintext, vec->psize);
+
+ /*
+ * Key: length in range [1, maxkeysize], but usually choose maxkeysize.
+ * If algorithm is unkeyed, then maxkeysize == 0 and set ksize = 0.
+ */
+ vec->setkey_error = 0;
+ vec->ksize = 0;
+ if (maxkeysize) {
+ vec->ksize = maxkeysize;
+ if (prandom_u32() % 4 == 0)
+ vec->ksize = 1 + (prandom_u32() % maxkeysize);
+ generate_random_bytes((u8 *)vec->key, vec->ksize);
+
+ vec->setkey_error = crypto_shash_setkey(tfm, vec->key,
+ vec->ksize);
+ /* If the key couldn't be set, no need to continue to digest. */
+ if (vec->setkey_error)
+ goto done;
+ }
+
+ /* Digest */
+ desc->tfm = tfm;
+ vec->digest_error = crypto_shash_digest(desc, vec->plaintext,
+ vec->psize, (u8 *)vec->digest);
+done:
+ snprintf(name, max_namelen, "\"random: psize=%u ksize=%u\"",
+ vec->psize, vec->ksize);
+}
+
+/*
+ * Test the hash algorithm represented by @req against the corresponding generic
+ * implementation, if one is available.
+ */
+static int test_hash_vs_generic_impl(const char *driver,
+ const char *generic_driver,
+ unsigned int maxkeysize,
+ struct ahash_request *req,
+ struct test_sglist *tsgl,
+ u8 *hashstate)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ const unsigned int digestsize = crypto_ahash_digestsize(tfm);
+ const unsigned int blocksize = crypto_ahash_blocksize(tfm);
+ const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
+ const char *algname = crypto_hash_alg_common(tfm)->base.cra_name;
+ char _generic_driver[CRYPTO_MAX_ALG_NAME];
+ struct crypto_shash *generic_tfm = NULL;
+ unsigned int i;
+ struct hash_testvec vec = { 0 };
+ char vec_name[64];
+ struct testvec_config cfg;
+ char cfgname[TESTVEC_CONFIG_NAMELEN];
+ int err;
+
+ if (noextratests)
+ return 0;
+
+ if (!generic_driver) { /* Use default naming convention? */
+ err = build_generic_driver_name(algname, _generic_driver);
+ if (err)
+ return err;
+ generic_driver = _generic_driver;
+ }
+
+ if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */
+ return 0;
+
+ generic_tfm = crypto_alloc_shash(generic_driver, 0, 0);
+ if (IS_ERR(generic_tfm)) {
+ err = PTR_ERR(generic_tfm);
+ if (err == -ENOENT) {
+ pr_warn("alg: hash: skipping comparison tests for %s because %s is unavailable\n",
+ driver, generic_driver);
+ return 0;
+ }
+ pr_err("alg: hash: error allocating %s (generic impl of %s): %d\n",
+ generic_driver, algname, err);
+ return err;
+ }
+
+ /* Check the algorithm properties for consistency. */
+
+ if (digestsize != crypto_shash_digestsize(generic_tfm)) {
+ pr_err("alg: hash: digestsize for %s (%u) doesn't match generic impl (%u)\n",
+ driver, digestsize,
+ crypto_shash_digestsize(generic_tfm));
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (blocksize != crypto_shash_blocksize(generic_tfm)) {
+ pr_err("alg: hash: blocksize for %s (%u) doesn't match generic impl (%u)\n",
+ driver, blocksize, crypto_shash_blocksize(generic_tfm));
+ err = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Now generate test vectors using the generic implementation, and test
+ * the other implementation against them.
+ */
+
+ vec.key = kmalloc(maxkeysize, GFP_KERNEL);
+ vec.plaintext = kmalloc(maxdatasize, GFP_KERNEL);
+ vec.digest = kmalloc(digestsize, GFP_KERNEL);
+ if (!vec.key || !vec.plaintext || !vec.digest) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < fuzz_iterations * 8; i++) {
+ generate_random_hash_testvec(generic_tfm, &vec,
+ maxkeysize, maxdatasize,
+ vec_name, sizeof(vec_name));
+ generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname));
+
+ err = test_hash_vec_cfg(driver, &vec, vec_name, &cfg,
+ req, tsgl, hashstate);
+ if (err)
+ goto out;
+ cond_resched();
+ }
+ err = 0;
+out:
+ kfree(vec.key);
+ kfree(vec.plaintext);
+ kfree(vec.digest);
+ crypto_free_shash(generic_tfm);
+ return err;
+}
+#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+static int test_hash_vs_generic_impl(const char *driver,
+ const char *generic_driver,
+ unsigned int maxkeysize,
+ struct ahash_request *req,
+ struct test_sglist *tsgl,
+ u8 *hashstate)
+{
+ return 0;
+}
+#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+
static int __alg_test_hash(const struct hash_testvec *vecs,
unsigned int num_vecs, const char *driver,
- u32 type, u32 mask)
+ u32 type, u32 mask,
+ const char *generic_driver, unsigned int maxkeysize)
{
struct crypto_ahash *tfm;
struct ahash_request *req = NULL;
@@ -1106,7 +1499,8 @@ static int __alg_test_hash(const struct hash_testvec *vecs,
if (err)
goto out;
}
- err = 0;
+ err = test_hash_vs_generic_impl(driver, generic_driver, maxkeysize, req,
+ tsgl, hashstate);
out:
kfree(hashstate);
if (tsgl) {
@@ -1124,6 +1518,7 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
const struct hash_testvec *template = desc->suite.hash.vecs;
unsigned int tcount = desc->suite.hash.count;
unsigned int nr_unkeyed, nr_keyed;
+ unsigned int maxkeysize = 0;
int err;
/*
@@ -1142,23 +1537,27 @@ static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
"unkeyed ones must come first\n", desc->alg);
return -EINVAL;
}
+ maxkeysize = max_t(unsigned int, maxkeysize,
+ template[nr_unkeyed + nr_keyed].ksize);
}
err = 0;
if (nr_unkeyed) {
- err = __alg_test_hash(template, nr_unkeyed, driver, type, mask);
+ err = __alg_test_hash(template, nr_unkeyed, driver, type, mask,
+ desc->generic_driver, maxkeysize);
template += nr_unkeyed;
}
if (!err && nr_keyed)
- err = __alg_test_hash(template, nr_keyed, driver, type, mask);
+ err = __alg_test_hash(template, nr_keyed, driver, type, mask,
+ desc->generic_driver, maxkeysize);
return err;
}
static int test_aead_vec_cfg(const char *driver, int enc,
const struct aead_testvec *vec,
- unsigned int vec_num,
+ const char *vec_name,
const struct testvec_config *cfg,
struct aead_request *req,
struct cipher_test_sglists *tsgls)
@@ -1175,6 +1574,7 @@ static int test_aead_vec_cfg(const char *driver, int enc,
cfg->iv_offset +
(cfg->iv_offset_relative_to_alignmask ? alignmask : 0);
struct kvec input[2];
+ int expected_error;
int err;
/* Set the key */
@@ -1183,26 +1583,33 @@ static int test_aead_vec_cfg(const char *driver, int enc,
else
crypto_aead_clear_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
err = crypto_aead_setkey(tfm, vec->key, vec->klen);
- if (err) {
- if (vec->fail) /* expectedly failed to set key? */
- return 0;
- pr_err("alg: aead: %s setkey failed with err %d on test vector %u; flags=%#x\n",
- driver, err, vec_num, crypto_aead_get_flags(tfm));
+ if (err && err != vec->setkey_error) {
+ pr_err("alg: aead: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
+ driver, vec_name, vec->setkey_error, err,
+ crypto_aead_get_flags(tfm));
return err;
}
- if (vec->fail) {
- pr_err("alg: aead: %s setkey unexpectedly succeeded on test vector %u\n",
- driver, vec_num);
+ if (!err && vec->setkey_error) {
+ pr_err("alg: aead: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
+ driver, vec_name, vec->setkey_error);
return -EINVAL;
}
/* Set the authentication tag size */
err = crypto_aead_setauthsize(tfm, authsize);
- if (err) {
- pr_err("alg: aead: %s setauthsize failed with err %d on test vector %u\n",
- driver, err, vec_num);
+ if (err && err != vec->setauthsize_error) {
+ pr_err("alg: aead: %s setauthsize failed on test vector %s; expected_error=%d, actual_error=%d\n",
+ driver, vec_name, vec->setauthsize_error, err);
return err;
}
+ if (!err && vec->setauthsize_error) {
+ pr_err("alg: aead: %s setauthsize unexpectedly succeeded on test vector %s; expected_error=%d\n",
+ driver, vec_name, vec->setauthsize_error);
+ return -EINVAL;
+ }
+
+ if (vec->setkey_error || vec->setauthsize_error)
+ return 0;
/* The IV must be copied to a buffer, as the algorithm may modify it */
if (WARN_ON(ivsize > MAX_IVLEN))
@@ -1224,8 +1631,8 @@ static int test_aead_vec_cfg(const char *driver, int enc,
vec->plen),
input, 2);
if (err) {
- pr_err("alg: aead: %s %s: error preparing scatterlists for test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: aead: %s %s: error preparing scatterlists for test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
return err;
}
@@ -1235,23 +1642,12 @@ static int test_aead_vec_cfg(const char *driver, int enc,
aead_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr,
enc ? vec->plen : vec->clen, iv);
aead_request_set_ad(req, vec->alen);
- err = crypto_wait_req(enc ? crypto_aead_encrypt(req) :
- crypto_aead_decrypt(req), &wait);
-
- aead_request_set_tfm(req, tfm); /* TODO: get rid of this */
-
- if (err) {
- if (err == -EBADMSG && vec->novrfy)
- return 0;
- pr_err("alg: aead: %s %s failed with err %d on test vector %u, cfg=\"%s\"\n",
- driver, op, err, vec_num, cfg->name);
- return err;
- }
- if (vec->novrfy) {
- pr_err("alg: aead: %s %s unexpectedly succeeded on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
- return -EINVAL;
- }
+ if (cfg->nosimd)
+ crypto_disable_simd_for_test();
+ err = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
+ if (cfg->nosimd)
+ crypto_reenable_simd_for_test();
+ err = crypto_wait_req(err, &wait);
/* Check that the algorithm didn't overwrite things it shouldn't have */
if (req->cryptlen != (enc ? vec->plen : vec->clen) ||
@@ -1263,8 +1659,8 @@ static int test_aead_vec_cfg(const char *driver, int enc,
req->base.complete != crypto_req_done ||
req->base.flags != req_flags ||
req->base.data != &wait) {
- pr_err("alg: aead: %s %s corrupted request struct on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: aead: %s %s corrupted request struct on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
if (req->cryptlen != (enc ? vec->plen : vec->clen))
pr_err("alg: aead: changed 'req->cryptlen'\n");
if (req->assoclen != vec->alen)
@@ -1286,14 +1682,29 @@ static int test_aead_vec_cfg(const char *driver, int enc,
return -EINVAL;
}
if (is_test_sglist_corrupted(&tsgls->src)) {
- pr_err("alg: aead: %s %s corrupted src sgl on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: aead: %s %s corrupted src sgl on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
return -EINVAL;
}
if (tsgls->dst.sgl_ptr != tsgls->src.sgl &&
is_test_sglist_corrupted(&tsgls->dst)) {
- pr_err("alg: aead: %s %s corrupted dst sgl on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: aead: %s %s corrupted dst sgl on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
+ return -EINVAL;
+ }
+
+ /* Check for success or failure */
+ expected_error = vec->novrfy ? -EBADMSG : vec->crypt_error;
+ if (err) {
+ if (err == expected_error)
+ return 0;
+ pr_err("alg: aead: %s %s failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
+ driver, op, vec_name, expected_error, err, cfg->name);
+ return err;
+ }
+ if (expected_error) {
+ pr_err("alg: aead: %s %s unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
+ driver, op, vec_name, expected_error, cfg->name);
return -EINVAL;
}
@@ -1302,13 +1713,13 @@ static int test_aead_vec_cfg(const char *driver, int enc,
enc ? vec->clen : vec->plen,
vec->alen, enc || !cfg->inplace);
if (err == -EOVERFLOW) {
- pr_err("alg: aead: %s %s overran dst buffer on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: aead: %s %s overran dst buffer on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
return err;
}
if (err) {
- pr_err("alg: aead: %s %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: aead: %s %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
return err;
}
@@ -1320,14 +1731,17 @@ static int test_aead_vec(const char *driver, int enc,
struct aead_request *req,
struct cipher_test_sglists *tsgls)
{
+ char vec_name[16];
unsigned int i;
int err;
if (enc && vec->novrfy)
return 0;
+ sprintf(vec_name, "%u", vec_num);
+
for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) {
- err = test_aead_vec_cfg(driver, enc, vec, vec_num,
+ err = test_aead_vec_cfg(driver, enc, vec, vec_name,
&default_cipher_testvec_configs[i],
req, tsgls);
if (err)
@@ -1342,7 +1756,7 @@ static int test_aead_vec(const char *driver, int enc,
for (i = 0; i < fuzz_iterations; i++) {
generate_random_testvec_config(&cfg, cfgname,
sizeof(cfgname));
- err = test_aead_vec_cfg(driver, enc, vec, vec_num,
+ err = test_aead_vec_cfg(driver, enc, vec, vec_name,
&cfg, req, tsgls);
if (err)
return err;
@@ -1352,6 +1766,226 @@ static int test_aead_vec(const char *driver, int enc,
return 0;
}
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+/*
+ * Generate an AEAD test vector from the given implementation.
+ * Assumes the buffers in 'vec' were already allocated.
+ */
+static void generate_random_aead_testvec(struct aead_request *req,
+ struct aead_testvec *vec,
+ unsigned int maxkeysize,
+ unsigned int maxdatasize,
+ char *name, size_t max_namelen)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ const unsigned int ivsize = crypto_aead_ivsize(tfm);
+ unsigned int maxauthsize = crypto_aead_alg(tfm)->maxauthsize;
+ unsigned int authsize;
+ unsigned int total_len;
+ int i;
+ struct scatterlist src[2], dst;
+ u8 iv[MAX_IVLEN];
+ DECLARE_CRYPTO_WAIT(wait);
+
+ /* Key: length in [0, maxkeysize], but usually choose maxkeysize */
+ vec->klen = maxkeysize;
+ if (prandom_u32() % 4 == 0)
+ vec->klen = prandom_u32() % (maxkeysize + 1);
+ generate_random_bytes((u8 *)vec->key, vec->klen);
+ vec->setkey_error = crypto_aead_setkey(tfm, vec->key, vec->klen);
+
+ /* IV */
+ generate_random_bytes((u8 *)vec->iv, ivsize);
+
+ /* Tag length: in [0, maxauthsize], but usually choose maxauthsize */
+ authsize = maxauthsize;
+ if (prandom_u32() % 4 == 0)
+ authsize = prandom_u32() % (maxauthsize + 1);
+ if (WARN_ON(authsize > maxdatasize))
+ authsize = maxdatasize;
+ maxdatasize -= authsize;
+ vec->setauthsize_error = crypto_aead_setauthsize(tfm, authsize);
+
+ /* Plaintext and associated data */
+ total_len = generate_random_length(maxdatasize);
+ if (prandom_u32() % 4 == 0)
+ vec->alen = 0;
+ else
+ vec->alen = generate_random_length(total_len);
+ vec->plen = total_len - vec->alen;
+ generate_random_bytes((u8 *)vec->assoc, vec->alen);
+ generate_random_bytes((u8 *)vec->ptext, vec->plen);
+
+ vec->clen = vec->plen + authsize;
+
+ /*
+ * If the key or authentication tag size couldn't be set, no need to
+ * continue to encrypt.
+ */
+ if (vec->setkey_error || vec->setauthsize_error)
+ goto done;
+
+ /* Ciphertext */
+ sg_init_table(src, 2);
+ i = 0;
+ if (vec->alen)
+ sg_set_buf(&src[i++], vec->assoc, vec->alen);
+ if (vec->plen)
+ sg_set_buf(&src[i++], vec->ptext, vec->plen);
+ sg_init_one(&dst, vec->ctext, vec->alen + vec->clen);
+ memcpy(iv, vec->iv, ivsize);
+ aead_request_set_callback(req, 0, crypto_req_done, &wait);
+ aead_request_set_crypt(req, src, &dst, vec->plen, iv);
+ aead_request_set_ad(req, vec->alen);
+ vec->crypt_error = crypto_wait_req(crypto_aead_encrypt(req), &wait);
+ if (vec->crypt_error == 0)
+ memmove((u8 *)vec->ctext, vec->ctext + vec->alen, vec->clen);
+done:
+ snprintf(name, max_namelen,
+ "\"random: alen=%u plen=%u authsize=%u klen=%u\"",
+ vec->alen, vec->plen, authsize, vec->klen);
+}
+
+/*
+ * Test the AEAD algorithm represented by @req against the corresponding generic
+ * implementation, if one is available.
+ */
+static int test_aead_vs_generic_impl(const char *driver,
+ const struct alg_test_desc *test_desc,
+ struct aead_request *req,
+ struct cipher_test_sglists *tsgls)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ const unsigned int ivsize = crypto_aead_ivsize(tfm);
+ const unsigned int maxauthsize = crypto_aead_alg(tfm)->maxauthsize;
+ const unsigned int blocksize = crypto_aead_blocksize(tfm);
+ const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
+ const char *algname = crypto_aead_alg(tfm)->base.cra_name;
+ const char *generic_driver = test_desc->generic_driver;
+ char _generic_driver[CRYPTO_MAX_ALG_NAME];
+ struct crypto_aead *generic_tfm = NULL;
+ struct aead_request *generic_req = NULL;
+ unsigned int maxkeysize;
+ unsigned int i;
+ struct aead_testvec vec = { 0 };
+ char vec_name[64];
+ struct testvec_config cfg;
+ char cfgname[TESTVEC_CONFIG_NAMELEN];
+ int err;
+
+ if (noextratests)
+ return 0;
+
+ if (!generic_driver) { /* Use default naming convention? */
+ err = build_generic_driver_name(algname, _generic_driver);
+ if (err)
+ return err;
+ generic_driver = _generic_driver;
+ }
+
+ if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */
+ return 0;
+
+ generic_tfm = crypto_alloc_aead(generic_driver, 0, 0);
+ if (IS_ERR(generic_tfm)) {
+ err = PTR_ERR(generic_tfm);
+ if (err == -ENOENT) {
+ pr_warn("alg: aead: skipping comparison tests for %s because %s is unavailable\n",
+ driver, generic_driver);
+ return 0;
+ }
+ pr_err("alg: aead: error allocating %s (generic impl of %s): %d\n",
+ generic_driver, algname, err);
+ return err;
+ }
+
+ generic_req = aead_request_alloc(generic_tfm, GFP_KERNEL);
+ if (!generic_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Check the algorithm properties for consistency. */
+
+ if (maxauthsize != crypto_aead_alg(generic_tfm)->maxauthsize) {
+ pr_err("alg: aead: maxauthsize for %s (%u) doesn't match generic impl (%u)\n",
+ driver, maxauthsize,
+ crypto_aead_alg(generic_tfm)->maxauthsize);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (ivsize != crypto_aead_ivsize(generic_tfm)) {
+ pr_err("alg: aead: ivsize for %s (%u) doesn't match generic impl (%u)\n",
+ driver, ivsize, crypto_aead_ivsize(generic_tfm));
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (blocksize != crypto_aead_blocksize(generic_tfm)) {
+ pr_err("alg: aead: blocksize for %s (%u) doesn't match generic impl (%u)\n",
+ driver, blocksize, crypto_aead_blocksize(generic_tfm));
+ err = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Now generate test vectors using the generic implementation, and test
+ * the other implementation against them.
+ */
+
+ maxkeysize = 0;
+ for (i = 0; i < test_desc->suite.aead.count; i++)
+ maxkeysize = max_t(unsigned int, maxkeysize,
+ test_desc->suite.aead.vecs[i].klen);
+
+ vec.key = kmalloc(maxkeysize, GFP_KERNEL);
+ vec.iv = kmalloc(ivsize, GFP_KERNEL);
+ vec.assoc = kmalloc(maxdatasize, GFP_KERNEL);
+ vec.ptext = kmalloc(maxdatasize, GFP_KERNEL);
+ vec.ctext = kmalloc(maxdatasize, GFP_KERNEL);
+ if (!vec.key || !vec.iv || !vec.assoc || !vec.ptext || !vec.ctext) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < fuzz_iterations * 8; i++) {
+ generate_random_aead_testvec(generic_req, &vec,
+ maxkeysize, maxdatasize,
+ vec_name, sizeof(vec_name));
+ generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname));
+
+ err = test_aead_vec_cfg(driver, ENCRYPT, &vec, vec_name, &cfg,
+ req, tsgls);
+ if (err)
+ goto out;
+ err = test_aead_vec_cfg(driver, DECRYPT, &vec, vec_name, &cfg,
+ req, tsgls);
+ if (err)
+ goto out;
+ cond_resched();
+ }
+ err = 0;
+out:
+ kfree(vec.key);
+ kfree(vec.iv);
+ kfree(vec.assoc);
+ kfree(vec.ptext);
+ kfree(vec.ctext);
+ crypto_free_aead(generic_tfm);
+ aead_request_free(generic_req);
+ return err;
+}
+#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+static int test_aead_vs_generic_impl(const char *driver,
+ const struct alg_test_desc *test_desc,
+ struct aead_request *req,
+ struct cipher_test_sglists *tsgls)
+{
+ return 0;
+}
+#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+
static int test_aead(const char *driver, int enc,
const struct aead_test_suite *suite,
struct aead_request *req,
@@ -1411,6 +2045,10 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
goto out;
err = test_aead(driver, DECRYPT, suite, req, tsgls);
+ if (err)
+ goto out;
+
+ err = test_aead_vs_generic_impl(driver, desc, req, tsgls);
out:
free_cipher_test_sglists(tsgls);
aead_request_free(req);
@@ -1462,13 +2100,20 @@ static int test_cipher(struct crypto_cipher *tfm, int enc,
ret = crypto_cipher_setkey(tfm, template[i].key,
template[i].klen);
- if (template[i].fail == !ret) {
- printk(KERN_ERR "alg: cipher: setkey failed "
- "on test %d for %s: flags=%x\n", j,
- algo, crypto_cipher_get_flags(tfm));
+ if (ret) {
+ if (ret == template[i].setkey_error)
+ continue;
+ pr_err("alg: cipher: %s setkey failed on test vector %u; expected_error=%d, actual_error=%d, flags=%#x\n",
+ algo, j, template[i].setkey_error, ret,
+ crypto_cipher_get_flags(tfm));
goto out;
- } else if (ret)
- continue;
+ }
+ if (template[i].setkey_error) {
+ pr_err("alg: cipher: %s setkey unexpectedly succeeded on test vector %u; expected_error=%d\n",
+ algo, j, template[i].setkey_error);
+ ret = -EINVAL;
+ goto out;
+ }
for (k = 0; k < template[i].len;
k += crypto_cipher_blocksize(tfm)) {
@@ -1500,7 +2145,7 @@ out_nobuf:
static int test_skcipher_vec_cfg(const char *driver, int enc,
const struct cipher_testvec *vec,
- unsigned int vec_num,
+ const char *vec_name,
const struct testvec_config *cfg,
struct skcipher_request *req,
struct cipher_test_sglists *tsgls)
@@ -1526,15 +2171,16 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
err = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
if (err) {
- if (vec->fail) /* expectedly failed to set key? */
+ if (err == vec->setkey_error)
return 0;
- pr_err("alg: skcipher: %s setkey failed with err %d on test vector %u; flags=%#x\n",
- driver, err, vec_num, crypto_skcipher_get_flags(tfm));
+ pr_err("alg: skcipher: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
+ driver, vec_name, vec->setkey_error, err,
+ crypto_skcipher_get_flags(tfm));
return err;
}
- if (vec->fail) {
- pr_err("alg: skcipher: %s setkey unexpectedly succeeded on test vector %u\n",
- driver, vec_num);
+ if (vec->setkey_error) {
+ pr_err("alg: skcipher: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
+ driver, vec_name, vec->setkey_error);
return -EINVAL;
}
@@ -1550,8 +2196,8 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
memset(iv, 0, ivsize);
} else {
if (vec->generates_iv) {
- pr_err("alg: skcipher: %s has ivsize=0 but test vector %u generates IV!\n",
- driver, vec_num);
+ pr_err("alg: skcipher: %s has ivsize=0 but test vector %s generates IV!\n",
+ driver, vec_name);
return -EINVAL;
}
iv = NULL;
@@ -1563,8 +2209,8 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
err = build_cipher_test_sglists(tsgls, cfg, alignmask,
vec->len, vec->len, &input, 1);
if (err) {
- pr_err("alg: skcipher: %s %s: error preparing scatterlists for test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: skcipher: %s %s: error preparing scatterlists for test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
return err;
}
@@ -1573,13 +2219,12 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
skcipher_request_set_callback(req, req_flags, crypto_req_done, &wait);
skcipher_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr,
vec->len, iv);
- err = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) :
- crypto_skcipher_decrypt(req), &wait);
- if (err) {
- pr_err("alg: skcipher: %s %s failed with err %d on test vector %u, cfg=\"%s\"\n",
- driver, op, err, vec_num, cfg->name);
- return err;
- }
+ if (cfg->nosimd)
+ crypto_disable_simd_for_test();
+ err = enc ? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
+ if (cfg->nosimd)
+ crypto_reenable_simd_for_test();
+ err = crypto_wait_req(err, &wait);
/* Check that the algorithm didn't overwrite things it shouldn't have */
if (req->cryptlen != vec->len ||
@@ -1590,8 +2235,8 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
req->base.complete != crypto_req_done ||
req->base.flags != req_flags ||
req->base.data != &wait) {
- pr_err("alg: skcipher: %s %s corrupted request struct on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: skcipher: %s %s corrupted request struct on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
if (req->cryptlen != vec->len)
pr_err("alg: skcipher: changed 'req->cryptlen'\n");
if (req->iv != iv)
@@ -1611,14 +2256,28 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
return -EINVAL;
}
if (is_test_sglist_corrupted(&tsgls->src)) {
- pr_err("alg: skcipher: %s %s corrupted src sgl on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: skcipher: %s %s corrupted src sgl on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
return -EINVAL;
}
if (tsgls->dst.sgl_ptr != tsgls->src.sgl &&
is_test_sglist_corrupted(&tsgls->dst)) {
- pr_err("alg: skcipher: %s %s corrupted dst sgl on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: skcipher: %s %s corrupted dst sgl on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
+ return -EINVAL;
+ }
+
+ /* Check for success or failure */
+ if (err) {
+ if (err == vec->crypt_error)
+ return 0;
+ pr_err("alg: skcipher: %s %s failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
+ driver, op, vec_name, vec->crypt_error, err, cfg->name);
+ return err;
+ }
+ if (vec->crypt_error) {
+ pr_err("alg: skcipher: %s %s unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
+ driver, op, vec_name, vec->crypt_error, cfg->name);
return -EINVAL;
}
@@ -1626,20 +2285,20 @@ static int test_skcipher_vec_cfg(const char *driver, int enc,
err = verify_correct_output(&tsgls->dst, enc ? vec->ctext : vec->ptext,
vec->len, 0, true);
if (err == -EOVERFLOW) {
- pr_err("alg: skcipher: %s %s overran dst buffer on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: skcipher: %s %s overran dst buffer on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
return err;
}
if (err) {
- pr_err("alg: skcipher: %s %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: skcipher: %s %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
return err;
}
/* If applicable, check that the algorithm generated the correct IV */
if (vec->iv_out && memcmp(iv, vec->iv_out, ivsize) != 0) {
- pr_err("alg: skcipher: %s %s test failed (wrong output IV) on test vector %u, cfg=\"%s\"\n",
- driver, op, vec_num, cfg->name);
+ pr_err("alg: skcipher: %s %s test failed (wrong output IV) on test vector %s, cfg=\"%s\"\n",
+ driver, op, vec_name, cfg->name);
hexdump(iv, ivsize);
return -EINVAL;
}
@@ -1653,14 +2312,17 @@ static int test_skcipher_vec(const char *driver, int enc,
struct skcipher_request *req,
struct cipher_test_sglists *tsgls)
{
+ char vec_name[16];
unsigned int i;
int err;
if (fips_enabled && vec->fips_skip)
return 0;
+ sprintf(vec_name, "%u", vec_num);
+
for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) {
- err = test_skcipher_vec_cfg(driver, enc, vec, vec_num,
+ err = test_skcipher_vec_cfg(driver, enc, vec, vec_name,
&default_cipher_testvec_configs[i],
req, tsgls);
if (err)
@@ -1675,7 +2337,7 @@ static int test_skcipher_vec(const char *driver, int enc,
for (i = 0; i < fuzz_iterations; i++) {
generate_random_testvec_config(&cfg, cfgname,
sizeof(cfgname));
- err = test_skcipher_vec_cfg(driver, enc, vec, vec_num,
+ err = test_skcipher_vec_cfg(driver, enc, vec, vec_name,
&cfg, req, tsgls);
if (err)
return err;
@@ -1685,6 +2347,186 @@ static int test_skcipher_vec(const char *driver, int enc,
return 0;
}
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+/*
+ * Generate a symmetric cipher test vector from the given implementation.
+ * Assumes the buffers in 'vec' were already allocated.
+ */
+static void generate_random_cipher_testvec(struct skcipher_request *req,
+ struct cipher_testvec *vec,
+ unsigned int maxdatasize,
+ char *name, size_t max_namelen)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const unsigned int maxkeysize = tfm->keysize;
+ const unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ struct scatterlist src, dst;
+ u8 iv[MAX_IVLEN];
+ DECLARE_CRYPTO_WAIT(wait);
+
+ /* Key: length in [0, maxkeysize], but usually choose maxkeysize */
+ vec->klen = maxkeysize;
+ if (prandom_u32() % 4 == 0)
+ vec->klen = prandom_u32() % (maxkeysize + 1);
+ generate_random_bytes((u8 *)vec->key, vec->klen);
+ vec->setkey_error = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
+
+ /* IV */
+ generate_random_bytes((u8 *)vec->iv, ivsize);
+
+ /* Plaintext */
+ vec->len = generate_random_length(maxdatasize);
+ generate_random_bytes((u8 *)vec->ptext, vec->len);
+
+ /* If the key couldn't be set, no need to continue to encrypt. */
+ if (vec->setkey_error)
+ goto done;
+
+ /* Ciphertext */
+ sg_init_one(&src, vec->ptext, vec->len);
+ sg_init_one(&dst, vec->ctext, vec->len);
+ memcpy(iv, vec->iv, ivsize);
+ skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
+ skcipher_request_set_crypt(req, &src, &dst, vec->len, iv);
+ vec->crypt_error = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+done:
+ snprintf(name, max_namelen, "\"random: len=%u klen=%u\"",
+ vec->len, vec->klen);
+}
+
+/*
+ * Test the skcipher algorithm represented by @req against the corresponding
+ * generic implementation, if one is available.
+ */
+static int test_skcipher_vs_generic_impl(const char *driver,
+ const char *generic_driver,
+ struct skcipher_request *req,
+ struct cipher_test_sglists *tsgls)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ const unsigned int blocksize = crypto_skcipher_blocksize(tfm);
+ const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
+ const char *algname = crypto_skcipher_alg(tfm)->base.cra_name;
+ char _generic_driver[CRYPTO_MAX_ALG_NAME];
+ struct crypto_skcipher *generic_tfm = NULL;
+ struct skcipher_request *generic_req = NULL;
+ unsigned int i;
+ struct cipher_testvec vec = { 0 };
+ char vec_name[64];
+ struct testvec_config cfg;
+ char cfgname[TESTVEC_CONFIG_NAMELEN];
+ int err;
+
+ if (noextratests)
+ return 0;
+
+ /* Keywrap isn't supported here yet as it handles its IV differently. */
+ if (strncmp(algname, "kw(", 3) == 0)
+ return 0;
+
+ if (!generic_driver) { /* Use default naming convention? */
+ err = build_generic_driver_name(algname, _generic_driver);
+ if (err)
+ return err;
+ generic_driver = _generic_driver;
+ }
+
+ if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */
+ return 0;
+
+ generic_tfm = crypto_alloc_skcipher(generic_driver, 0, 0);
+ if (IS_ERR(generic_tfm)) {
+ err = PTR_ERR(generic_tfm);
+ if (err == -ENOENT) {
+ pr_warn("alg: skcipher: skipping comparison tests for %s because %s is unavailable\n",
+ driver, generic_driver);
+ return 0;
+ }
+ pr_err("alg: skcipher: error allocating %s (generic impl of %s): %d\n",
+ generic_driver, algname, err);
+ return err;
+ }
+
+ generic_req = skcipher_request_alloc(generic_tfm, GFP_KERNEL);
+ if (!generic_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Check the algorithm properties for consistency. */
+
+ if (tfm->keysize != generic_tfm->keysize) {
+ pr_err("alg: skcipher: max keysize for %s (%u) doesn't match generic impl (%u)\n",
+ driver, tfm->keysize, generic_tfm->keysize);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (ivsize != crypto_skcipher_ivsize(generic_tfm)) {
+ pr_err("alg: skcipher: ivsize for %s (%u) doesn't match generic impl (%u)\n",
+ driver, ivsize, crypto_skcipher_ivsize(generic_tfm));
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (blocksize != crypto_skcipher_blocksize(generic_tfm)) {
+ pr_err("alg: skcipher: blocksize for %s (%u) doesn't match generic impl (%u)\n",
+ driver, blocksize,
+ crypto_skcipher_blocksize(generic_tfm));
+ err = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Now generate test vectors using the generic implementation, and test
+ * the other implementation against them.
+ */
+
+ vec.key = kmalloc(tfm->keysize, GFP_KERNEL);
+ vec.iv = kmalloc(ivsize, GFP_KERNEL);
+ vec.ptext = kmalloc(maxdatasize, GFP_KERNEL);
+ vec.ctext = kmalloc(maxdatasize, GFP_KERNEL);
+ if (!vec.key || !vec.iv || !vec.ptext || !vec.ctext) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < fuzz_iterations * 8; i++) {
+ generate_random_cipher_testvec(generic_req, &vec, maxdatasize,
+ vec_name, sizeof(vec_name));
+ generate_random_testvec_config(&cfg, cfgname, sizeof(cfgname));
+
+ err = test_skcipher_vec_cfg(driver, ENCRYPT, &vec, vec_name,
+ &cfg, req, tsgls);
+ if (err)
+ goto out;
+ err = test_skcipher_vec_cfg(driver, DECRYPT, &vec, vec_name,
+ &cfg, req, tsgls);
+ if (err)
+ goto out;
+ cond_resched();
+ }
+ err = 0;
+out:
+ kfree(vec.key);
+ kfree(vec.iv);
+ kfree(vec.ptext);
+ kfree(vec.ctext);
+ crypto_free_skcipher(generic_tfm);
+ skcipher_request_free(generic_req);
+ return err;
+}
+#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+static int test_skcipher_vs_generic_impl(const char *driver,
+ const char *generic_driver,
+ struct skcipher_request *req,
+ struct cipher_test_sglists *tsgls)
+{
+ return 0;
+}
+#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
+
static int test_skcipher(const char *driver, int enc,
const struct cipher_test_suite *suite,
struct skcipher_request *req,
@@ -1744,6 +2586,11 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
goto out;
err = test_skcipher(driver, DECRYPT, suite, req, tsgls);
+ if (err)
+ goto out;
+
+ err = test_skcipher_vs_generic_impl(driver, desc->generic_driver, req,
+ tsgls);
out:
free_cipher_test_sglists(tsgls);
skcipher_request_free(req);
@@ -2179,7 +3026,6 @@ static int alg_test_crc32c(const struct alg_test_desc *desc,
u32 *ctx = (u32 *)shash_desc_ctx(shash);
shash->tfm = tfm;
- shash->flags = 0;
*ctx = 420553207;
err = crypto_shash_final(shash, (u8 *)&val);
@@ -2493,6 +3339,12 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
return err;
}
+static u8 *test_pack_u32(u8 *dst, u32 val)
+{
+ memcpy(dst, &val, sizeof(val));
+ return dst + sizeof(val);
+}
+
static int test_akcipher_one(struct crypto_akcipher *tfm,
const struct akcipher_testvec *vecs)
{
@@ -2503,10 +3355,11 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
struct crypto_wait wait;
unsigned int out_len_max, out_len = 0;
int err = -ENOMEM;
- struct scatterlist src, dst, src_tab[2];
+ struct scatterlist src, dst, src_tab[3];
const char *m, *c;
unsigned int m_size, c_size;
const char *op;
+ u8 *key, *ptr;
if (testmgr_alloc_buf(xbuf))
return err;
@@ -2517,22 +3370,29 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
crypto_init_wait(&wait);
+ key = kmalloc(vecs->key_len + sizeof(u32) * 2 + vecs->param_len,
+ GFP_KERNEL);
+ if (!key)
+ goto free_xbuf;
+ memcpy(key, vecs->key, vecs->key_len);
+ ptr = key + vecs->key_len;
+ ptr = test_pack_u32(ptr, vecs->algo);
+ ptr = test_pack_u32(ptr, vecs->param_len);
+ memcpy(ptr, vecs->params, vecs->param_len);
+
if (vecs->public_key_vec)
- err = crypto_akcipher_set_pub_key(tfm, vecs->key,
- vecs->key_len);
+ err = crypto_akcipher_set_pub_key(tfm, key, vecs->key_len);
else
- err = crypto_akcipher_set_priv_key(tfm, vecs->key,
- vecs->key_len);
+ err = crypto_akcipher_set_priv_key(tfm, key, vecs->key_len);
if (err)
goto free_req;
- err = -ENOMEM;
- out_len_max = crypto_akcipher_maxsize(tfm);
-
/*
* First run test which do not require a private key, such as
* encrypt or verify.
*/
+ err = -ENOMEM;
+ out_len_max = crypto_akcipher_maxsize(tfm);
outbuf_enc = kzalloc(out_len_max, GFP_KERNEL);
if (!outbuf_enc)
goto free_req;
@@ -2558,12 +3418,20 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
goto free_all;
memcpy(xbuf[0], m, m_size);
- sg_init_table(src_tab, 2);
+ sg_init_table(src_tab, 3);
sg_set_buf(&src_tab[0], xbuf[0], 8);
sg_set_buf(&src_tab[1], xbuf[0] + 8, m_size - 8);
- sg_init_one(&dst, outbuf_enc, out_len_max);
- akcipher_request_set_crypt(req, src_tab, &dst, m_size,
- out_len_max);
+ if (vecs->siggen_sigver_test) {
+ if (WARN_ON(c_size > PAGE_SIZE))
+ goto free_all;
+ memcpy(xbuf[1], c, c_size);
+ sg_set_buf(&src_tab[2], xbuf[1], c_size);
+ akcipher_request_set_crypt(req, src_tab, NULL, m_size, c_size);
+ } else {
+ sg_init_one(&dst, outbuf_enc, out_len_max);
+ akcipher_request_set_crypt(req, src_tab, &dst, m_size,
+ out_len_max);
+ }
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
@@ -2576,18 +3444,21 @@ static int test_akcipher_one(struct crypto_akcipher *tfm,
pr_err("alg: akcipher: %s test failed. err %d\n", op, err);
goto free_all;
}
- if (req->dst_len != c_size) {
- pr_err("alg: akcipher: %s test failed. Invalid output len\n",
- op);
- err = -EINVAL;
- goto free_all;
- }
- /* verify that encrypted message is equal to expected */
- if (memcmp(c, outbuf_enc, c_size)) {
- pr_err("alg: akcipher: %s test failed. Invalid output\n", op);
- hexdump(outbuf_enc, c_size);
- err = -EINVAL;
- goto free_all;
+ if (!vecs->siggen_sigver_test) {
+ if (req->dst_len != c_size) {
+ pr_err("alg: akcipher: %s test failed. Invalid output len\n",
+ op);
+ err = -EINVAL;
+ goto free_all;
+ }
+ /* verify that encrypted message is equal to expected */
+ if (memcmp(c, outbuf_enc, c_size) != 0) {
+ pr_err("alg: akcipher: %s test failed. Invalid output\n",
+ op);
+ hexdump(outbuf_enc, c_size);
+ err = -EINVAL;
+ goto free_all;
+ }
}
/*
@@ -2642,6 +3513,7 @@ free_all:
kfree(outbuf_enc);
free_req:
akcipher_request_free(req);
+ kfree(key);
free_xbuf:
testmgr_free_buf(xbuf);
return err;
@@ -2699,12 +3571,14 @@ static int alg_test_null(const struct alg_test_desc *desc,
static const struct alg_test_desc alg_test_descs[] = {
{
.alg = "adiantum(xchacha12,aes)",
+ .generic_driver = "adiantum(xchacha12-generic,aes-generic,nhpoly1305-generic)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(adiantum_xchacha12_aes_tv_template)
},
}, {
.alg = "adiantum(xchacha20,aes)",
+ .generic_driver = "adiantum(xchacha20-generic,aes-generic,nhpoly1305-generic)",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(adiantum_xchacha20_aes_tv_template)
@@ -2921,6 +3795,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_null,
.fips_allowed = 1,
}, {
+ /* Same as cbc(sm4) except the key is stored in
+ * hardware secure memory which we reference by index
+ */
+ .alg = "cbc(psm4)",
+ .test = alg_test_null,
+ }, {
.alg = "cbc(serpent)",
.test = alg_test_skcipher,
.suite = {
@@ -2947,6 +3827,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "ccm(aes)",
+ .generic_driver = "ccm_base(ctr(aes-generic),cbcmac(aes-generic))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
@@ -3055,6 +3936,13 @@ static const struct alg_test_desc alg_test_descs[] = {
.test = alg_test_null,
.fips_allowed = 1,
}, {
+
+ /* Same as ctr(sm4) except the key is stored in
+ * hardware secure memory which we reference by index
+ */
+ .alg = "ctr(psm4)",
+ .test = alg_test_null,
+ }, {
.alg = "ctr(serpent)",
.test = alg_test_skcipher,
.suite = {
@@ -3080,6 +3968,13 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(cts_mode_tv_template)
}
}, {
+ /* Same as cts(cbc((aes)) except the key is stored in
+ * hardware secure memory which we reference by index
+ */
+ .alg = "cts(cbc(paes))",
+ .test = alg_test_null,
+ .fips_allowed = 1,
+ }, {
.alg = "deflate",
.test = alg_test_comp,
.fips_allowed = 1,
@@ -3358,7 +4253,14 @@ static const struct alg_test_desc alg_test_descs[] = {
.kpp = __VECS(ecdh_tv_template)
}
}, {
+ .alg = "ecrdsa",
+ .test = alg_test_akcipher,
+ .suite = {
+ .akcipher = __VECS(ecrdsa_tv_template)
+ }
+ }, {
.alg = "gcm(aes)",
+ .generic_driver = "gcm_base(ctr(aes-generic),ghash-generic)",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
@@ -3477,30 +4379,35 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "lrw(aes)",
+ .generic_driver = "lrw(ecb(aes-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(aes_lrw_tv_template)
}
}, {
.alg = "lrw(camellia)",
+ .generic_driver = "lrw(ecb(camellia-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(camellia_lrw_tv_template)
}
}, {
.alg = "lrw(cast6)",
+ .generic_driver = "lrw(ecb(cast6-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(cast6_lrw_tv_template)
}
}, {
.alg = "lrw(serpent)",
+ .generic_driver = "lrw(ecb(serpent-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(serpent_lrw_tv_template)
}
}, {
.alg = "lrw(twofish)",
+ .generic_driver = "lrw(ecb(twofish-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(tf_lrw_tv_template)
@@ -3625,6 +4532,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "rfc4106(gcm(aes))",
+ .generic_driver = "rfc4106(gcm_base(ctr(aes-generic),ghash-generic))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
@@ -3632,6 +4540,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "rfc4309(ccm(aes))",
+ .generic_driver = "rfc4309(ccm_base(ctr(aes-generic),cbcmac(aes-generic)))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
@@ -3639,6 +4548,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "rfc4543(gcm(aes))",
+ .generic_driver = "rfc4543(gcm_base(ctr(aes-generic),ghash-generic))",
.test = alg_test_aead,
.suite = {
.aead = __VECS(aes_gcm_rfc4543_tv_template)
@@ -3835,6 +4745,7 @@ static const struct alg_test_desc alg_test_descs[] = {
},
}, {
.alg = "xts(aes)",
+ .generic_driver = "xts(ecb(aes-generic))",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
@@ -3842,12 +4753,14 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}, {
.alg = "xts(camellia)",
+ .generic_driver = "xts(ecb(camellia-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(camellia_xts_tv_template)
}
}, {
.alg = "xts(cast6)",
+ .generic_driver = "xts(ecb(cast6-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(cast6_xts_tv_template)
@@ -3861,12 +4774,14 @@ static const struct alg_test_desc alg_test_descs[] = {
.fips_allowed = 1,
}, {
.alg = "xts(serpent)",
+ .generic_driver = "xts(ecb(serpent-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(serpent_xts_tv_template)
}
}, {
.alg = "xts(twofish)",
+ .generic_driver = "xts(ecb(twofish-generic))",
.test = alg_test_skcipher,
.suite = {
.cipher = __VECS(tf_xts_tv_template)
@@ -4020,8 +4935,9 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
type, mask);
test_done:
- if (fips_enabled && rc)
- panic("%s: %s alg self test failed in fips mode!\n", driver, alg);
+ if (rc && (fips_enabled || panic_on_fail))
+ panic("alg: self-tests for %s (%s) failed in %s mode!\n",
+ driver, alg, fips_enabled ? "fips" : "panic_on_fail");
if (fips_enabled && !rc)
pr_info("alg: self-tests for %s (%s) passed\n", driver, alg);
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index d18a37629f05..b6daae1f6a1d 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -25,6 +25,8 @@
#ifndef _CRYPTO_TESTMGR_H
#define _CRYPTO_TESTMGR_H
+#include <linux/oid_registry.h>
+
#define MAX_IVLEN 32
/*
@@ -34,6 +36,8 @@
* @digest: Pointer to expected digest
* @psize: Length of source data in bytes
* @ksize: Length of @key in bytes (0 if no key)
+ * @setkey_error: Expected error from setkey()
+ * @digest_error: Expected error from digest()
*/
struct hash_testvec {
const char *key;
@@ -41,6 +45,8 @@ struct hash_testvec {
const char *digest;
unsigned short psize;
unsigned short ksize;
+ int setkey_error;
+ int digest_error;
};
/*
@@ -52,12 +58,13 @@ struct hash_testvec {
* @ptext: Pointer to plaintext
* @ctext: Pointer to ciphertext
* @len: Length of @ptext and @ctext in bytes
- * @fail: If set to one, the test need to fail
* @wk: Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS?
* ( e.g. test needs to fail due to a weak key )
* @fips_skip: Skip the test vector in FIPS mode
* @generates_iv: Encryption should ignore the given IV, and output @iv_out.
* Decryption takes @iv_out. Needed for AES Keywrap ("kw(aes)").
+ * @setkey_error: Expected error from setkey()
+ * @crypt_error: Expected error from encrypt() and decrypt()
*/
struct cipher_testvec {
const char *key;
@@ -65,12 +72,13 @@ struct cipher_testvec {
const char *iv_out;
const char *ptext;
const char *ctext;
- bool fail;
unsigned char wk; /* weak key flag */
- unsigned char klen;
+ unsigned short klen;
unsigned short len;
bool fips_skip;
bool generates_iv;
+ int setkey_error;
+ int crypt_error;
};
/*
@@ -82,7 +90,6 @@ struct cipher_testvec {
* @ctext: Pointer to the full authenticated ciphertext. For AEADs that
* produce a separate "ciphertext" and "authentication tag", these
* two parts are concatenated: ciphertext || tag.
- * @fail: setkey() failure expected?
* @novrfy: Decryption verification failure expected?
* @wk: Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS?
* (e.g. setkey() needs to fail due to a weak key)
@@ -90,6 +97,9 @@ struct cipher_testvec {
* @plen: Length of @ptext in bytes
* @alen: Length of @assoc in bytes
* @clen: Length of @ctext in bytes
+ * @setkey_error: Expected error from setkey()
+ * @setauthsize_error: Expected error from setauthsize()
+ * @crypt_error: Expected error from encrypt() and decrypt()
*/
struct aead_testvec {
const char *key;
@@ -97,13 +107,15 @@ struct aead_testvec {
const char *ptext;
const char *assoc;
const char *ctext;
- bool fail;
unsigned char novrfy;
unsigned char wk;
unsigned char klen;
unsigned short plen;
unsigned short clen;
unsigned short alen;
+ int setkey_error;
+ int setauthsize_error;
+ int crypt_error;
};
struct cprng_testvec {
@@ -135,13 +147,16 @@ struct drbg_testvec {
struct akcipher_testvec {
const unsigned char *key;
+ const unsigned char *params;
const unsigned char *m;
const unsigned char *c;
unsigned int key_len;
+ unsigned int param_len;
unsigned int m_size;
unsigned int c_size;
bool public_key_vec;
bool siggen_sigver_test;
+ enum OID algo;
};
struct kpp_testvec {
@@ -551,6 +566,160 @@ static const struct akcipher_testvec rsa_tv_template[] = {
};
/*
+ * EC-RDSA test vectors are generated by gost-engine.
+ */
+static const struct akcipher_testvec ecrdsa_tv_template[] = {
+ {
+ .key =
+ "\x04\x40\xd5\xa7\x77\xf9\x26\x2f\x8c\xbd\xcc\xe3\x1f\x01\x94\x05"
+ "\x3d\x2f\xec\xb5\x00\x34\xf5\x51\x6d\x3b\x90\x4b\x23\x28\x6f\x1d"
+ "\xc8\x36\x61\x60\x36\xec\xbb\xb4\x0b\x95\x4e\x54\x4f\x15\x21\x05"
+ "\xd8\x52\x66\x44\x31\x7e\x5d\xc5\xd1\x26\x00\x5f\x60\xd8\xf0\xc7"
+ "\x27\xfc",
+ .key_len = 66,
+ .params = /* OID_gostCPSignA */
+ "\x30\x13\x06\x07\x2a\x85\x03\x02\x02\x23\x01\x06\x08\x2a\x85\x03"
+ "\x07\x01\x01\x02\x02",
+ .param_len = 21,
+ .c =
+ "\x41\x32\x09\x73\xa4\xc1\x38\xd6\x63\x7d\x8b\xf7\x50\x3f\xda\x9f"
+ "\x68\x48\xc1\x50\xe3\x42\x3a\x9b\x2b\x28\x12\x2a\xa7\xc2\x75\x31"
+ "\x65\x77\x8c\x3c\x9e\x0d\x56\xb2\xf9\xdc\x04\x33\x3e\xb0\x9e\xf9"
+ "\x74\x4e\x59\xb3\x83\xf2\x91\x27\xda\x5e\xc7\x33\xc0\xc1\x8f\x41",
+ .c_size = 64,
+ .algo = OID_gost2012PKey256,
+ .m =
+ "\x75\x1b\x9b\x40\x25\xb9\x96\xd2\x9b\x00\x41\xb3\x58\xbf\x23\x14"
+ "\x79\xd2\x76\x64\xa3\xbd\x66\x10\x79\x05\x5a\x06\x42\xec\xb9\xc9",
+ .m_size = 32,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ },
+ {
+ .key =
+ "\x04\x40\x66\x6f\xd6\xb7\x06\xd0\xf5\xa5\x6f\x69\x5c\xa5\x13\x45"
+ "\x14\xdd\xcb\x12\x9c\x1b\xf5\x28\x64\x7a\x49\x48\x29\x14\x66\x42"
+ "\xb8\x1b\x5c\xf9\x56\x6d\x08\x3b\xce\xbb\x62\x2f\xc2\x3c\xc5\x49"
+ "\x93\x27\x70\x20\xcc\x79\xeb\xdc\x76\x8e\x48\x6e\x04\x96\xc3\x29"
+ "\xa0\x73",
+ .key_len = 66,
+ .params = /* OID_gostCPSignB */
+ "\x30\x13\x06\x07\x2a\x85\x03\x02\x02\x23\x02\x06\x08\x2a\x85\x03"
+ "\x07\x01\x01\x02\x02",
+ .param_len = 21,
+ .c =
+ "\x45\x6d\x4a\x03\x1d\x5c\x0b\x17\x79\xe7\x19\xdb\xbf\x81\x9f\x82"
+ "\xae\x06\xda\xf5\x47\x00\x05\x80\xc3\x16\x06\x9a\x8e\x7c\xb2\x8e"
+ "\x7f\x74\xaa\xec\x6b\x7b\x7f\x8b\xc6\x0b\x10\x42\x4e\x91\x2c\xdf"
+ "\x7b\x8b\x15\xf4\x9e\x59\x0f\xc7\xa4\x68\x2e\xce\x89\xdf\x84\xe9",
+ .c_size = 64,
+ .algo = OID_gost2012PKey256,
+ .m =
+ "\xd0\x54\x00\x27\x6a\xeb\xce\x6c\xf5\xf6\xfb\x57\x18\x18\x21\x13"
+ "\x11\x23\x4a\x70\x43\x52\x7a\x68\x11\x65\x45\x37\xbb\x25\xb7\x40",
+ .m_size = 32,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ },
+ {
+ .key =
+ "\x04\x40\x05\x91\xa9\x7d\xcb\x87\xdc\x98\xa1\xbf\xff\xdd\x20\x61"
+ "\xaa\x58\x3b\x2d\x8e\x9c\x41\x9d\x4f\xc6\x23\x17\xf9\xca\x60\x65"
+ "\xbc\x97\x97\xf6\x6b\x24\xe8\xac\xb1\xa7\x61\x29\x3c\x71\xdc\xad"
+ "\xcb\x20\xbe\x96\xe8\xf4\x44\x2e\x49\xd5\x2c\xb9\xc9\x3b\x9c\xaa"
+ "\xba\x15",
+ .key_len = 66,
+ .params = /* OID_gostCPSignC */
+ "\x30\x13\x06\x07\x2a\x85\x03\x02\x02\x23\x03\x06\x08\x2a\x85\x03"
+ "\x07\x01\x01\x02\x02",
+ .param_len = 21,
+ .c =
+ "\x3b\x2e\x2e\x74\x74\x47\xda\xea\x93\x90\x6a\xe2\xf5\xf5\xe6\x46"
+ "\x11\xfc\xab\xdc\x52\xbc\x58\xdb\x45\x44\x12\x4a\xf7\xd0\xab\xc9"
+ "\x73\xba\x64\xab\x0d\xac\x4e\x72\x10\xa8\x04\xf6\x1e\xe0\x48\x6a"
+ "\xcd\xe8\xe3\x78\x73\x77\x82\x24\x8d\xf1\xd3\xeb\x4c\x25\x7e\xc0",
+ .c_size = 64,
+ .algo = OID_gost2012PKey256,
+ .m =
+ "\x52\x33\xf4\x3f\x7b\x5d\xcf\x20\xee\xe4\x5c\xab\x0b\x3f\x14\xd6"
+ "\x9f\x16\xc6\x1c\xb1\x3f\x84\x41\x69\xec\x34\xfd\xf1\xf9\xa3\x39",
+ .m_size = 32,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ },
+ {
+ .key =
+ "\x04\x81\x80\x85\x46\x8f\x16\xf8\x7a\x7e\x4a\xc3\x81\x9e\xf1\x6e"
+ "\x94\x1e\x5d\x02\x87\xea\xfa\xa0\x0a\x17\x70\x49\x64\xad\x95\x68"
+ "\x60\x0a\xf0\x57\x29\x41\x79\x30\x3c\x61\x69\xf2\xa6\x94\x87\x17"
+ "\x54\xfa\x97\x2c\xe6\x1e\x0a\xbb\x55\x10\x57\xbe\xf7\xc1\x77\x2b"
+ "\x11\x74\x0a\x50\x37\x14\x10\x2a\x45\xfc\x7a\xae\x1c\x4c\xce\x08"
+ "\x05\xb7\xa4\x50\xc8\x3d\x39\x3d\xdc\x5c\x8f\x96\x6c\xe7\xfc\x21"
+ "\xc3\x2d\x1e\x9f\x11\xb3\xec\x22\x18\x8a\x8c\x08\x6b\x8b\xed\xf5"
+ "\xc5\x47\x3c\x7e\x73\x59\x44\x1e\x77\x83\x84\x52\x9e\x3b\x7d\xff"
+ "\x9d\x86\x1a",
+ .key_len = 131,
+ .params = /* OID_gostTC26Sign512A */
+ "\x30\x0b\x06\x09\x2a\x85\x03\x07\x01\x02\x01\x02\x01",
+ .param_len = 13,
+ .c =
+ "\x92\x81\x74\x5f\x95\x48\x38\x87\xd9\x8f\x5e\xc8\x8a\xbb\x01\x4e"
+ "\xb0\x75\x3c\x2f\xc7\x5a\x08\x4c\x68\xab\x75\x01\x32\x75\x75\xb5"
+ "\x37\xe0\x74\x6d\x94\x84\x31\x2a\x6b\xf4\xf7\xb7\xa7\x39\x7b\x46"
+ "\x07\xf0\x98\xbd\x33\x18\xa1\x72\xb2\x6d\x54\xe3\xde\x91\xc2\x2e"
+ "\x4f\x6a\xf8\xb7\xec\xa8\x83\xc9\x8f\xd9\xce\x7c\x45\x06\x02\xf4"
+ "\x4f\x21\xb5\x24\x3d\xb4\xb5\xd8\x58\x42\xbe\x2d\x29\xae\x93\xc0"
+ "\x13\x41\x96\x35\x08\x69\xe8\x36\xc7\xd1\x83\x81\xd7\xca\xfb\xc0"
+ "\xd2\xb7\x78\x32\x3e\x30\x1a\x1e\xce\xdc\x34\x35\xc6\xad\x68\x24",
+ .c_size = 128,
+ .algo = OID_gost2012PKey512,
+ .m =
+ "\x1f\x70\xb5\xe9\x55\x12\xd6\x88\xcc\x55\xb9\x0c\x7f\xc4\x94\xf2"
+ "\x04\x77\x41\x12\x02\xd6\xf1\x1f\x83\x56\xe9\xd6\x5a\x6a\x72\xb9"
+ "\x6e\x8e\x24\x2a\x84\xf1\xba\x67\xe8\xbf\xff\xc1\xd3\xde\xfb\xc6"
+ "\xa8\xf6\x80\x01\xb9\x27\xac\xd8\x45\x96\x66\xa1\xee\x48\x08\x3f",
+ .m_size = 64,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ },
+ {
+ .key =
+ "\x04\x81\x80\x28\xf3\x2b\x92\x04\x32\xea\x66\x20\xde\xa0\x2f\x74"
+ "\xbf\x2d\xf7\xb5\x30\x76\xb1\xc8\xee\x38\x9f\xea\xe5\xad\xc6\xa3"
+ "\x28\x1e\x51\x3d\x67\xa3\x41\xcc\x6b\x81\xe2\xe2\x9e\x82\xf3\x78"
+ "\x56\xd7\x2e\xb2\xb5\xbe\xb4\x50\x21\x05\xe5\x29\x82\xef\x15\x1b"
+ "\xc0\xd7\x30\xd6\x2f\x96\xe8\xff\x99\x4c\x25\xcf\x9a\xfc\x54\x30"
+ "\xce\xdf\x59\xe9\xc6\x45\xce\xe4\x22\xe8\x01\xd5\xcd\x2f\xaa\x78"
+ "\x99\xc6\x04\x1e\x6f\x4c\x25\x6a\x76\xad\xff\x48\xf3\xb3\xb4\xd6"
+ "\x14\x5c\x2c\x0e\xea\xa2\x4b\xb9\x7e\x89\x77\x02\x3a\x29\xc8\x16"
+ "\x8e\x78\x48",
+ .key_len = 131,
+ .params = /* OID_gostTC26Sign512B */
+ "\x30\x0b\x06\x09\x2a\x85\x03\x07\x01\x02\x01\x02\x02",
+ .param_len = 13,
+ .c =
+ "\x0a\xed\xb6\x27\xea\xa7\xa6\x7e\x2f\xc1\x02\x21\x74\xce\x27\xd2"
+ "\xee\x8a\x92\x4d\xa9\x43\x2d\xa4\x5b\xdc\x23\x02\xfc\x3a\xf3\xb2"
+ "\x10\x93\x0b\x40\x1b\x75\x95\x3e\x39\x41\x37\xb9\xab\x51\x09\xeb"
+ "\xf1\xb9\x49\x58\xec\x58\xc7\xf9\x2e\xb9\xc9\x40\xf2\x00\x39\x7e"
+ "\x3f\xde\x72\xe3\x85\x67\x06\xbe\xd8\xb8\xc1\x81\x1e\xe3\x0a\xfe"
+ "\xce\xd3\x77\x92\x56\x8c\x58\xf9\x37\x60\x2d\xe6\x8b\x66\xa3\xdd"
+ "\xd2\xf0\xf8\xda\x1b\x20\xbc\x9c\xec\x29\x5d\xd1\x8f\xcc\x37\xd1"
+ "\x3b\x8d\xb7\xc1\xe0\xb8\x3b\xef\x14\x1b\x87\xbc\xc1\x03\x9a\x93",
+ .c_size = 128,
+ .algo = OID_gost2012PKey512,
+ .m =
+ "\x11\x24\x21\x27\xf2\x42\x9f\xce\x5a\xf9\x01\x70\xe0\x07\x2b\x57"
+ "\xfb\x7d\x77\x5e\x74\x66\xe6\xa5\x40\x4c\x1a\x85\x18\xff\xd0\x63"
+ "\xe0\x39\xd3\xd6\xe5\x17\xf8\xc3\x4b\xc6\x1c\x33\x1a\xca\xa6\x66"
+ "\x6d\xf4\xd2\x45\xc2\x83\xa0\x42\x95\x05\x9d\x89\x8e\x0a\xca\xcc",
+ .m_size = 64,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ },
+};
+
+/*
* PKCS#1 RSA test vectors. Obtained from CAVS testing.
*/
static const struct akcipher_testvec pkcs1pad_rsa_tv_template[] = {
@@ -7084,7 +7253,7 @@ static const struct cipher_testvec des_tv_template[] = {
"\xb4\x99\x26\xf7\x1f\xe1\xd4\x90",
.len = 24,
}, { /* Weak key */
- .fail = true,
+ .setkey_error = -EINVAL,
.wk = 1,
.key = "\x01\x01\x01\x01\x01\x01\x01\x01",
.klen = 8,
diff --git a/crypto/tgr192.c b/crypto/tgr192.c
index f8e1d9f9938f..40020f8adc46 100644
--- a/crypto/tgr192.c
+++ b/crypto/tgr192.c
@@ -677,7 +677,7 @@ MODULE_ALIAS_CRYPTO("tgr192");
MODULE_ALIAS_CRYPTO("tgr160");
MODULE_ALIAS_CRYPTO("tgr128");
-module_init(tgr192_mod_init);
+subsys_initcall(tgr192_mod_init);
module_exit(tgr192_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
index 07e62433fbfb..dbac6e233285 100644
--- a/crypto/twofish_generic.c
+++ b/crypto/twofish_generic.c
@@ -205,7 +205,7 @@ static void __exit twofish_mod_fini(void)
crypto_unregister_alg(&alg);
}
-module_init(twofish_mod_init);
+subsys_initcall(twofish_mod_init);
module_exit(twofish_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/vmac.c b/crypto/vmac.c
index 5f436dfdfc61..f50a85060b39 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -690,7 +690,7 @@ static void __exit vmac_module_exit(void)
crypto_unregister_template(&vmac64_tmpl);
}
-module_init(vmac_module_init);
+subsys_initcall(vmac_module_init);
module_exit(vmac_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/wp512.c b/crypto/wp512.c
index 149e577fb772..1b8e502d999f 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -1168,7 +1168,7 @@ MODULE_ALIAS_CRYPTO("wp512");
MODULE_ALIAS_CRYPTO("wp384");
MODULE_ALIAS_CRYPTO("wp256");
-module_init(wp512_mod_init);
+subsys_initcall(wp512_mod_init);
module_exit(wp512_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index c055f57fab11..94ca694ef091 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -282,7 +282,7 @@ static void __exit crypto_xcbc_module_exit(void)
crypto_unregister_template(&crypto_xcbc_tmpl);
}
-module_init(crypto_xcbc_module_init);
+subsys_initcall(crypto_xcbc_module_init);
module_exit(crypto_xcbc_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/xts.c b/crypto/xts.c
index 2f948328cabb..33cf726df4ac 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -363,7 +363,7 @@ static void __exit crypto_module_exit(void)
crypto_unregister_template(&crypto_tmpl);
}
-module_init(crypto_module_init);
+subsys_initcall(crypto_module_init);
module_exit(crypto_module_exit);
MODULE_LICENSE("GPL");
diff --git a/crypto/zstd.c b/crypto/zstd.c
index 9a76b3ed8b8b..2c04055e407f 100644
--- a/crypto/zstd.c
+++ b/crypto/zstd.c
@@ -257,7 +257,7 @@ static void __exit zstd_mod_fini(void)
crypto_unregister_scomp(&scomp);
}
-module_init(zstd_mod_init);
+subsys_initcall(zstd_mod_init);
module_exit(zstd_mod_fini);
MODULE_LICENSE("GPL");
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 45f9decb9848..e8231663f201 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -230,4 +230,6 @@ source "drivers/slimbus/Kconfig"
source "drivers/interconnect/Kconfig"
+source "drivers/counter/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index c61cde554340..28b030d7988d 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -187,3 +187,4 @@ obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/
obj-$(CONFIG_SIOX) += siox/
obj-$(CONFIG_GNSS) += gnss/
obj-$(CONFIG_INTERCONNECT) += interconnect/
+obj-$(CONFIG_COUNTER) += counter/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 4e015c77e48e..283ee94224c6 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -475,6 +475,7 @@ config ACPI_REDUCED_HARDWARE_ONLY
If you are unsure what to do, do not enable this option.
source "drivers/acpi/nfit/Kconfig"
+source "drivers/acpi/hmat/Kconfig"
source "drivers/acpi/apei/Kconfig"
source "drivers/acpi/dptf/Kconfig"
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index bb857421c2e8..5d361e4e3405 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -80,6 +80,7 @@ obj-$(CONFIG_ACPI_PROCESSOR) += processor.o
obj-$(CONFIG_ACPI) += container.o
obj-$(CONFIG_ACPI_THERMAL) += thermal.o
obj-$(CONFIG_ACPI_NFIT) += nfit/
+obj-$(CONFIG_ACPI_HMAT) += hmat/
obj-$(CONFIG_ACPI) += acpi_memhotplug.o
obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o
obj-$(CONFIG_ACPI_BATTERY) += battery.o
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index ddf598ae8b6b..c16f9460c4a2 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -17,6 +17,7 @@
#include <linux/clkdev.h>
#include <linux/acpi.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/pm.h>
#include "internal.h"
diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
index 81bfc6197293..f92033661239 100644
--- a/drivers/acpi/acpi_configfs.c
+++ b/drivers/acpi/acpi_configfs.c
@@ -109,7 +109,7 @@ static ssize_t acpi_table_signature_show(struct config_item *cfg, char *str)
if (!h)
return -EINVAL;
- return sprintf(str, "%.*s\n", ACPI_NAME_SIZE, h->signature);
+ return sprintf(str, "%.*s\n", ACPI_NAMESEG_SIZE, h->signature);
}
static ssize_t acpi_table_length_show(struct config_item *cfg, char *str)
@@ -170,7 +170,7 @@ static ssize_t acpi_table_asl_compiler_id_show(struct config_item *cfg,
if (!h)
return -EINVAL;
- return sprintf(str, "%.*s\n", ACPI_NAME_SIZE, h->asl_compiler_id);
+ return sprintf(str, "%.*s\n", ACPI_NAMESEG_SIZE, h->asl_compiler_id);
}
static ssize_t acpi_table_asl_compiler_revision_show(struct config_item *cfg,
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index 4a434c23a196..d18246a2a65e 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -390,7 +390,7 @@ again:
return size > 0 ? size : ret;
}
-static int acpi_aml_thread(void *unsed)
+static int acpi_aml_thread(void *unused)
{
acpi_osd_exec_callback function = NULL;
void *context;
diff --git a/drivers/acpi/acpi_lpat.c b/drivers/acpi/acpi_lpat.c
index 2cd9f738812b..43f1b99c86ca 100644
--- a/drivers/acpi/acpi_lpat.c
+++ b/drivers/acpi/acpi_lpat.c
@@ -22,7 +22,7 @@
* LPAT conversion table
*
* @lpat_table: the temperature_raw mapping table structure
- * @raw: the raw value, used as a key to get the temerature from the
+ * @raw: the raw value, used as a key to get the temperature from the
* above mapping table
*
* A positive converted temperature value will be returned on success,
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 1e2a10a06b9d..cf768608437e 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -1142,8 +1142,8 @@ static struct dev_pm_domain acpi_lpss_pm_domain = {
.thaw_noirq = acpi_subsys_thaw_noirq,
.poweroff = acpi_subsys_suspend,
.poweroff_late = acpi_lpss_suspend_late,
- .poweroff_noirq = acpi_subsys_suspend_noirq,
- .restore_noirq = acpi_subsys_resume_noirq,
+ .poweroff_noirq = acpi_lpss_suspend_noirq,
+ .restore_noirq = acpi_lpss_resume_noirq,
.restore_early = acpi_lpss_resume_early,
#endif
.runtime_suspend = acpi_lpss_runtime_suspend,
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index a2dfbf6b004e..13d513b81589 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -293,7 +293,7 @@ acpi_status (*acpi_internal_method) (struct acpi_walk_state * walk_state);
* expected_return_btypes - Allowed type(s) for the return value
*/
struct acpi_name_info {
- char name[ACPI_NAME_SIZE];
+ char name[ACPI_NAMESEG_SIZE];
u16 argument_list;
u8 expected_btypes;
};
@@ -370,7 +370,7 @@ typedef acpi_status (*acpi_object_converter) (struct acpi_namespace_node *
converted_object);
struct acpi_simple_repair_info {
- char name[ACPI_NAME_SIZE];
+ char name[ACPI_NAMESEG_SIZE];
u32 unexpected_btypes;
u32 package_index;
acpi_object_converter object_converter;
diff --git a/drivers/acpi/acpica/dbexec.c b/drivers/acpi/acpica/dbexec.c
index bb43305cb215..4027eaab18a4 100644
--- a/drivers/acpi/acpica/dbexec.c
+++ b/drivers/acpi/acpica/dbexec.c
@@ -453,7 +453,7 @@ acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags)
/* Dump a _PLD buffer if present */
- if (ACPI_COMPARE_NAME
+ if (ACPI_COMPARE_NAMESEG
((ACPI_CAST_PTR
(struct acpi_namespace_node,
acpi_gbl_db_method_info.method)->name.ascii),
diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c
index 004d34d9369b..63fe30e86807 100644
--- a/drivers/acpi/acpica/dbnames.c
+++ b/drivers/acpi/acpica/dbnames.c
@@ -354,7 +354,7 @@ acpi_status acpi_db_find_name_in_namespace(char *name_arg)
char acpi_name[5] = "____";
char *acpi_name_ptr = acpi_name;
- if (strlen(name_arg) > ACPI_NAME_SIZE) {
+ if (strlen(name_arg) > ACPI_NAMESEG_SIZE) {
acpi_os_printf("Name must be no longer than 4 characters\n");
return (AE_OK);
}
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index a4a24ffe5fae..4ebd23700bbc 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -200,7 +200,7 @@ acpi_ds_initialize_objects(u32 table_index,
/* DSDT is always the first AML table */
- if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT)) {
+ if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_DSDT)) {
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
"\nInitializing Namespace objects:\n"));
}
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 5e9d7348c16f..62d3aa74277b 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -81,12 +81,8 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
ACPI_FUNCTION_TRACE(ev_enable_gpe);
- /* Clear the GPE status */
- status = acpi_hw_clear_gpe(gpe_event_info);
- if (ACPI_FAILURE(status))
- return_ACPI_STATUS(status);
-
/* Enable the requested GPE */
+
status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index c92d2f6ebe01..b04f982e59fa 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -292,7 +292,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
acpi_status status;
u32 gpe_number;
u8 temp_gpe_number;
- char name[ACPI_NAME_SIZE + 1];
+ char name[ACPI_NAMESEG_SIZE + 1];
u8 type;
ACPI_FUNCTION_TRACE(ev_match_gpe_method);
@@ -310,7 +310,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
* 1) Extract the method name and null terminate it
*/
ACPI_MOVE_32_TO_32(name, &method_node->name.integer);
- name[ACPI_NAME_SIZE] = 0;
+ name[ACPI_NAMESEG_SIZE] = 0;
/* 2) Name must begin with an underscore */
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index bd68d66e89f0..6b76be5212a4 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -53,10 +53,10 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
/* Special case for root */
- size_needed = 1 + (ACPI_NAME_SIZE * num_name_segs) + 2 + 1;
+ size_needed = 1 + (ACPI_NAMESEG_SIZE * num_name_segs) + 2 + 1;
} else {
size_needed =
- prefix_count + (ACPI_NAME_SIZE * num_name_segs) + 2 + 1;
+ prefix_count + (ACPI_NAMESEG_SIZE * num_name_segs) + 2 + 1;
}
/*
@@ -141,7 +141,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
}
for (index = 0;
- (index < ACPI_NAME_SIZE)
+ (index < ACPI_NAMESEG_SIZE)
&& (acpi_ut_valid_name_char(*aml_address, 0)); index++) {
char_buf[index] = *aml_address++;
}
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 75192b958544..7b855603f81a 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -683,7 +683,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
/* Point to next name segment and make this node current */
- path += ACPI_NAME_SIZE;
+ path += ACPI_NAMESEG_SIZE;
current_node = this_node;
}
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 5470213b8e64..6eb63db72249 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -74,6 +74,10 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
ACPI_FUNCTION_NAME(ns_delete_node);
+ if (!node) {
+ return_VOID;
+ }
+
/* Detach an object if there is one */
acpi_ns_detach_object(node);
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 15070bd0c28a..1b12c172e115 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -70,7 +70,7 @@ void acpi_ns_print_pathname(u32 num_segments, const char *pathname)
acpi_os_printf("?");
}
- pathname += ACPI_NAME_SIZE;
+ pathname += ACPI_NAMESEG_SIZE;
num_segments--;
if (num_segments) {
acpi_os_printf(".");
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 19fb8dda870f..53e5d00d3a5e 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -478,7 +478,7 @@ acpi_ns_find_ini_methods(acpi_handle obj_handle,
/* We are only looking for methods named _INI */
- if (!ACPI_COMPARE_NAME(node->name.ascii, METHOD_NAME__INI)) {
+ if (!ACPI_COMPARE_NAMESEG(node->name.ascii, METHOD_NAME__INI)) {
return (AE_OK);
}
@@ -641,7 +641,7 @@ acpi_ns_init_one_device(acpi_handle obj_handle,
* Note: We know there is an _INI within this subtree, but it may not be
* under this particular device, it may be lower in the branch.
*/
- if (!ACPI_COMPARE_NAME(device_node->name.ascii, "_SB_") ||
+ if (!ACPI_COMPARE_NAMESEG(device_node->name.ascii, "_SB_") ||
device_node->parent != acpi_gbl_root_node) {
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
(ACPI_TYPE_METHOD, device_node,
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 289c15bb8c6a..370bbc867745 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -108,8 +108,8 @@ acpi_ns_handle_to_name(acpi_handle target_handle, struct acpi_buffer *buffer)
/* Just copy the ACPI name from the Node and zero terminate it */
node_name = acpi_ut_get_node_name(node);
- ACPI_MOVE_NAME(buffer->pointer, node_name);
- ((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0;
+ ACPI_COPY_NAMESEG(buffer->pointer, node_name);
+ ((char *)buffer->pointer)[ACPI_NAMESEG_SIZE] = 0;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%4.4s\n", (char *)buffer->pointer));
return_ACPI_STATUS(AE_OK);
@@ -198,7 +198,7 @@ acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
char *full_path, u32 path_size, u8 no_trailing)
{
u32 length = 0, i;
- char name[ACPI_NAME_SIZE];
+ char name[ACPI_NAMESEG_SIZE];
u8 do_no_trailing;
char c, *left, *right;
struct acpi_namespace_node *next_node;
@@ -446,7 +446,7 @@ static void acpi_ns_normalize_pathname(char *original_path)
/* Do one nameseg at a time */
- for (i = 0; (i < ACPI_NAME_SIZE) && *input_path; i++) {
+ for (i = 0; (i < ACPI_NAMESEG_SIZE) && *input_path; i++) {
if ((i == 0) || (*input_path != '_')) { /* First char is allowed to be underscore */
*new_path = *input_path;
new_path++;
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index c0b4f7bedfab..f16cf5e4742c 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -203,7 +203,7 @@ acpi_ns_one_complete_parse(u32 pass_number,
/* Found OSDT table, enable the namespace override feature */
- if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_OSDT) &&
+ if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_OSDT) &&
pass_number == ACPI_IMODE_LOAD_PASS1) {
walk_state->namespace_override = TRUE;
}
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 0aacfa48e20d..be86fea8e4d4 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -316,7 +316,7 @@ static const struct acpi_simple_repair_info *acpi_ns_match_simple_repair(struct
this_name = acpi_object_repair_info;
while (this_name->object_converter) {
- if (ACPI_COMPARE_NAME(node->name.ascii, this_name->name)) {
+ if (ACPI_COMPARE_NAMESEG(node->name.ascii, this_name->name)) {
/* Check if we can actually repair this name/type combination */
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index d5804a6d1d65..8d776256b213 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -25,7 +25,7 @@ acpi_status (*acpi_repair_function) (struct acpi_evaluate_info * info,
return_object_ptr);
typedef struct acpi_repair_info {
- char name[ACPI_NAME_SIZE];
+ char name[ACPI_NAMESEG_SIZE];
acpi_repair_function repair_function;
} acpi_repair_info;
@@ -188,7 +188,7 @@ static const struct acpi_repair_info *acpi_ns_match_complex_repair(struct
this_name = acpi_ns_repairable_names;
while (this_name->repair_function) {
- if (ACPI_COMPARE_NAME(node->name.ascii, this_name->name)) {
+ if (ACPI_COMPARE_NAMESEG(node->name.ascii, this_name->name)) {
return (this_name);
}
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index e5cef1edf49f..6bc90d46db5c 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -178,7 +178,7 @@ void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info)
}
}
- info->length = (ACPI_NAME_SIZE * info->num_segments) +
+ info->length = (ACPI_NAMESEG_SIZE * info->num_segments) +
4 + info->num_carats;
info->next_external_char = next_external_char;
@@ -249,7 +249,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
/* Build the name (minus path separators) */
for (; num_segments; num_segments--) {
- for (i = 0; i < ACPI_NAME_SIZE; i++) {
+ for (i = 0; i < ACPI_NAMESEG_SIZE; i++) {
if (ACPI_IS_PATH_SEPARATOR(*external_name) ||
(*external_name == 0)) {
@@ -274,7 +274,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
/* Move on the next segment */
external_name++;
- result += ACPI_NAME_SIZE;
+ result += ACPI_NAMESEG_SIZE;
}
/* Terminate the string */
@@ -489,12 +489,12 @@ acpi_ns_externalize_name(u32 internal_name_length,
/* Copy and validate the 4-char name segment */
- ACPI_MOVE_NAME(&(*converted_name)[j],
- &internal_name[names_index]);
+ ACPI_COPY_NAMESEG(&(*converted_name)[j],
+ &internal_name[names_index]);
acpi_ut_repair_name(&(*converted_name)[j]);
- j += ACPI_NAME_SIZE;
- names_index += ACPI_NAME_SIZE;
+ j += ACPI_NAMESEG_SIZE;
+ names_index += ACPI_NAMESEG_SIZE;
}
}
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index de2d3135d6a9..55b4a5b3331f 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -495,8 +495,8 @@ acpi_status acpi_install_method(u8 *buffer)
/* Table must be a DSDT or SSDT */
- if (!ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) &&
- !ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT)) {
+ if (!ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_DSDT) &&
+ !ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_SSDT)) {
return (AE_BAD_HEADER);
}
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 9d9d442cd999..e62c7897fdf1 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -150,21 +150,21 @@ char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state)
/* Two name segments */
- end += 1 + (2 * ACPI_NAME_SIZE);
+ end += 1 + (2 * ACPI_NAMESEG_SIZE);
break;
case AML_MULTI_NAME_PREFIX:
/* Multiple name segments, 4 chars each, count in next byte */
- end += 2 + (*(end + 1) * ACPI_NAME_SIZE);
+ end += 2 + (*(end + 1) * ACPI_NAMESEG_SIZE);
break;
default:
/* Single name segment */
- end += ACPI_NAME_SIZE;
+ end += ACPI_NAMESEG_SIZE;
break;
}
@@ -522,7 +522,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
ACPI_MOVE_32_TO_32(&name, parser_state->aml);
acpi_ps_set_name(field, name);
- parser_state->aml += ACPI_NAME_SIZE;
+ parser_state->aml += ACPI_NAMESEG_SIZE;
ASL_CV_CAPTURE_COMMENTS_ONLY(parser_state);
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 1d6f136e4068..c62be3d91712 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -603,10 +603,10 @@ acpi_walk_resources(acpi_handle device_handle,
/* Parameter validation */
if (!device_handle || !user_function || !name ||
- (!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
- !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) &&
- !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI) &&
- !ACPI_COMPARE_NAME(name, METHOD_NAME__DMA))) {
+ (!ACPI_COMPARE_NAMESEG(name, METHOD_NAME__CRS) &&
+ !ACPI_COMPARE_NAMESEG(name, METHOD_NAME__PRS) &&
+ !ACPI_COMPARE_NAMESEG(name, METHOD_NAME__AEI) &&
+ !ACPI_COMPARE_NAMESEG(name, METHOD_NAME__DMA))) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 0cecd0039acf..933f81316ad2 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -480,7 +480,8 @@ acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc,
/* If a particular signature is expected (DSDT/FACS), it must match */
- if (signature && !ACPI_COMPARE_NAME(&table_desc->signature, signature)) {
+ if (signature &&
+ !ACPI_COMPARE_NAMESEG(&table_desc->signature, signature)) {
ACPI_BIOS_ERROR((AE_INFO,
"Invalid signature 0x%X for ACPI table, expected [%s]",
table_desc->signature.integer, signature));
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 951bd8e1c50a..b2abb40023a6 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -56,7 +56,7 @@ acpi_tb_find_table(char *signature,
/* Normalize the input strings */
memset(&header, 0, sizeof(struct acpi_table_header));
- ACPI_MOVE_NAME(header.signature, signature);
+ ACPI_COPY_NAMESEG(header.signature, signature);
strncpy(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
strncpy(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
@@ -65,7 +65,7 @@ acpi_tb_find_table(char *signature,
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
if (memcmp(&(acpi_gbl_root_table_list.tables[i].signature),
- header.signature, ACPI_NAME_SIZE)) {
+ header.signature, ACPI_NAMESEG_SIZE)) {
/* Not the requested table */
@@ -94,14 +94,14 @@ acpi_tb_find_table(char *signature,
if (!memcmp
(acpi_gbl_root_table_list.tables[i].pointer->signature,
- header.signature, ACPI_NAME_SIZE) && (!oem_id[0]
- ||
- !memcmp
- (acpi_gbl_root_table_list.
- tables[i].pointer->
- oem_id,
- header.oem_id,
- ACPI_OEM_ID_SIZE))
+ header.signature, ACPI_NAMESEG_SIZE) && (!oem_id[0]
+ ||
+ !memcmp
+ (acpi_gbl_root_table_list.
+ tables[i].
+ pointer->oem_id,
+ header.oem_id,
+ ACPI_OEM_ID_SIZE))
&& (!oem_table_id[0]
|| !memcmp(acpi_gbl_root_table_list.tables[i].pointer->
oem_table_id, header.oem_table_id,
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index be6642bf6366..ef1ffd36ab3f 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -120,7 +120,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
*/
if (!reload &&
acpi_gbl_disable_ssdt_table_install &&
- ACPI_COMPARE_NAME(&new_table_desc.signature, ACPI_SIG_SSDT)) {
+ ACPI_COMPARE_NAMESEG(&new_table_desc.signature, ACPI_SIG_SSDT)) {
ACPI_INFO(("Ignoring installation of %4.4s at %8.8X%8.8X",
new_table_desc.signature.ascii,
ACPI_FORMAT_UINT64(address)));
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index 9b5df95d881b..4764f849cb78 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -69,10 +69,10 @@ acpi_tb_cleanup_table_header(struct acpi_table_header *out_header,
memcpy(out_header, header, sizeof(struct acpi_table_header));
- acpi_tb_fix_string(out_header->signature, ACPI_NAME_SIZE);
+ acpi_tb_fix_string(out_header->signature, ACPI_NAMESEG_SIZE);
acpi_tb_fix_string(out_header->oem_id, ACPI_OEM_ID_SIZE);
acpi_tb_fix_string(out_header->oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
- acpi_tb_fix_string(out_header->asl_compiler_id, ACPI_NAME_SIZE);
+ acpi_tb_fix_string(out_header->asl_compiler_id, ACPI_NAMESEG_SIZE);
}
/*******************************************************************************
@@ -94,7 +94,7 @@ acpi_tb_print_table_header(acpi_physical_address address,
{
struct acpi_table_header local_header;
- if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
+ if (ACPI_COMPARE_NAMESEG(header->signature, ACPI_SIG_FACS)) {
/* FACS only has signature and length fields */
@@ -158,8 +158,8 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
* They are the odd tables, have no standard ACPI header and no checksum
*/
- if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_S3PT) ||
- ACPI_COMPARE_NAME(table->signature, ACPI_SIG_FACS)) {
+ if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_S3PT) ||
+ ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_FACS)) {
return (AE_OK);
}
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 2469e01310e2..c5f0b8ec70cc 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -332,9 +332,9 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
&table_index);
if (ACPI_SUCCESS(status) &&
- ACPI_COMPARE_NAME(&acpi_gbl_root_table_list.
- tables[table_index].signature,
- ACPI_SIG_FADT)) {
+ ACPI_COMPARE_NAMESEG(&acpi_gbl_root_table_list.
+ tables[table_index].signature,
+ ACPI_SIG_FADT)) {
acpi_gbl_fadt_index = table_index;
acpi_tb_parse_fadt();
}
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 36592888f0e7..1640685bf4ae 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -230,7 +230,7 @@ acpi_get_table_header(char *signature,
for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count;
i++) {
- if (!ACPI_COMPARE_NAME
+ if (!ACPI_COMPARE_NAMESEG
(&(acpi_gbl_root_table_list.tables[i].signature),
signature)) {
continue;
@@ -323,7 +323,7 @@ acpi_get_table(char *signature,
i++) {
table_desc = &acpi_gbl_root_table_list.tables[i];
- if (!ACPI_COMPARE_NAME(&table_desc->signature, signature)) {
+ if (!ACPI_COMPARE_NAMESEG(&table_desc->signature, signature)) {
continue;
}
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 1a2592cc3245..4f30f06a6f78 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -118,7 +118,7 @@ acpi_status acpi_tb_load_namespace(void)
table = &acpi_gbl_root_table_list.tables[acpi_gbl_dsdt_index];
if (!acpi_gbl_root_table_list.current_table_count ||
- !ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_DSDT) ||
+ !ACPI_COMPARE_NAMESEG(table->signature.ascii, ACPI_SIG_DSDT) ||
ACPI_FAILURE(acpi_tb_validate_table(table))) {
status = AE_NO_ACPI_TABLES;
goto unlock_and_exit;
@@ -170,11 +170,12 @@ acpi_status acpi_tb_load_namespace(void)
table = &acpi_gbl_root_table_list.tables[i];
if (!table->address ||
- (!ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_SSDT)
- && !ACPI_COMPARE_NAME(table->signature.ascii,
- ACPI_SIG_PSDT)
- && !ACPI_COMPARE_NAME(table->signature.ascii,
- ACPI_SIG_OSDT))
+ (!ACPI_COMPARE_NAMESEG
+ (table->signature.ascii, ACPI_SIG_SSDT)
+ && !ACPI_COMPARE_NAMESEG(table->signature.ascii,
+ ACPI_SIG_PSDT)
+ && !ACPI_COMPARE_NAMESEG(table->signature.ascii,
+ ACPI_SIG_OSDT))
|| ACPI_FAILURE(acpi_tb_validate_table(table))) {
continue;
}
@@ -364,7 +365,7 @@ acpi_status acpi_unload_parent_table(acpi_handle object)
* only these types can contain AML and thus are the only types
* that can create namespace objects.
*/
- if (ACPI_COMPARE_NAME
+ if (ACPI_COMPARE_NAMESEG
(acpi_gbl_root_table_list.tables[i].signature.ascii,
ACPI_SIG_DSDT)) {
status = AE_TYPE;
diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c
index 79d7426fd7bf..f6cd7d4f698b 100644
--- a/drivers/acpi/acpica/utascii.c
+++ b/drivers/acpi/acpica/utascii.c
@@ -30,7 +30,7 @@ u8 acpi_ut_valid_nameseg(char *name)
/* Validate each character in the signature */
- for (i = 0; i < ACPI_NAME_SIZE; i++) {
+ for (i = 0; i < ACPI_NAMESEG_SIZE; i++) {
if (!acpi_ut_valid_name_char(name[i], i)) {
return (FALSE);
}
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index ad9f77eb554f..65beaa237669 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -78,7 +78,7 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
"IPMI", /* 0x07 */
"GeneralPurposeIo", /* 0x08 */
"GenericSerialBus", /* 0x09 */
- "PCC" /* 0x0A */
+ "PlatformCommChannel" /* 0x0A */
};
const char *acpi_ut_get_region_name(u8 space_id)
@@ -239,7 +239,7 @@ const char *acpi_ut_get_node_name(void *object)
{
struct acpi_namespace_node *node = (struct acpi_namespace_node *)object;
- /* Must return a string of exactly 4 characters == ACPI_NAME_SIZE */
+ /* Must return a string of exactly 4 characters == ACPI_NAMESEG_SIZE */
if (!object) {
return ("NULL");
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index afaadc73196b..8638efacdbf4 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -59,10 +59,10 @@ u8 acpi_ut_is_aml_table(struct acpi_table_header *table)
/* These are the only tables that contain executable AML */
- if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) ||
- ACPI_COMPARE_NAME(table->signature, ACPI_SIG_PSDT) ||
- ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT) ||
- ACPI_COMPARE_NAME(table->signature, ACPI_SIG_OSDT) ||
+ if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_DSDT) ||
+ ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_PSDT) ||
+ ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_SSDT) ||
+ ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_OSDT) ||
ACPI_IS_OEM_SIG(table->signature)) {
return (TRUE);
}
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index a9f08f43c685..1b0f68f5ed8c 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -84,7 +84,7 @@ const union acpi_predefined_info *acpi_ut_match_predefined_method(char *name)
this_name = acpi_gbl_predefined_methods;
while (this_name->info.name[0]) {
- if (ACPI_COMPARE_NAME(name, this_name->info.name)) {
+ if (ACPI_COMPARE_NAMESEG(name, this_name->info.name)) {
return (this_name);
}
@@ -201,7 +201,7 @@ const union acpi_predefined_info *acpi_ut_match_resource_name(char *name)
this_name = acpi_gbl_resource_names;
while (this_name->info.name[0]) {
- if (ACPI_COMPARE_NAME(name, this_name->info.name)) {
+ if (ACPI_COMPARE_NAMESEG(name, this_name->info.name)) {
return (this_name);
}
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
index 5bef0b059406..c39b5483045d 100644
--- a/drivers/acpi/acpica/utstring.c
+++ b/drivers/acpi/acpica/utstring.c
@@ -141,15 +141,15 @@ void acpi_ut_repair_name(char *name)
* Special case for the root node. This can happen if we get an
* error during the execution of module-level code.
*/
- if (ACPI_COMPARE_NAME(name, ACPI_ROOT_PATHNAME)) {
+ if (ACPI_COMPARE_NAMESEG(name, ACPI_ROOT_PATHNAME)) {
return;
}
- ACPI_MOVE_NAME(&original_name, name);
+ ACPI_COPY_NAMESEG(&original_name, name);
/* Check each character in the name */
- for (i = 0; i < ACPI_NAME_SIZE; i++) {
+ for (i = 0; i < ACPI_NAMESEG_SIZE; i++) {
if (acpi_ut_valid_name_char(name[i], i)) {
continue;
}
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index e48894e002ba..9058cb084b91 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -356,7 +356,8 @@ static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX ||
- node->type == ACPI_IORT_NODE_SMMU_V3) {
+ node->type == ACPI_IORT_NODE_SMMU_V3 ||
+ node->type == ACPI_IORT_NODE_PMCG) {
*id_out = map->output_base;
return parent;
}
@@ -394,6 +395,8 @@ static int iort_get_id_mapping_index(struct acpi_iort_node *node)
}
return smmu->id_mapping_index;
+ case ACPI_IORT_NODE_PMCG:
+ return 0;
default:
return -EINVAL;
}
@@ -1028,6 +1031,14 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset);
}
+static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
+{
+ struct acpi_iort_root_complex *pci_rc;
+
+ pci_rc = (struct acpi_iort_root_complex *)node->node_data;
+ return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED;
+}
+
/**
* iort_iommu_configure - Set-up IOMMU configuration for a device.
*
@@ -1063,6 +1074,9 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
info.node = node;
err = pci_for_each_dma_alias(to_pci_dev(dev),
iort_pci_iommu_init, &info);
+
+ if (!err && iort_pci_rc_supports_ats(node))
+ dev->iommu_fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
} else {
int i = 0;
@@ -1218,32 +1232,47 @@ static void __init arm_smmu_v3_init_resources(struct resource *res,
}
}
-static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
+static void __init arm_smmu_v3_dma_configure(struct device *dev,
+ struct acpi_iort_node *node)
{
struct acpi_iort_smmu_v3 *smmu;
+ enum dev_dma_attr attr;
/* Retrieve SMMUv3 specific data */
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
- return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE;
+ attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ?
+ DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
+
+ /* We expect the dma masks to be equivalent for all SMMUv3 set-ups */
+ dev->dma_mask = &dev->coherent_dma_mask;
+
+ /* Configure DMA for the page table walker */
+ acpi_dma_configure(dev, attr);
}
#if defined(CONFIG_ACPI_NUMA)
/*
* set numa proximity domain for smmuv3 device
*/
-static void __init arm_smmu_v3_set_proximity(struct device *dev,
+static int __init arm_smmu_v3_set_proximity(struct device *dev,
struct acpi_iort_node *node)
{
struct acpi_iort_smmu_v3 *smmu;
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
- set_dev_node(dev, acpi_map_pxm_to_node(smmu->pxm));
+ int node = acpi_map_pxm_to_node(smmu->pxm);
+
+ if (node != NUMA_NO_NODE && !node_online(node))
+ return -EINVAL;
+
+ set_dev_node(dev, node);
pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
smmu->base_address,
smmu->pxm);
}
+ return 0;
}
#else
#define arm_smmu_v3_set_proximity NULL
@@ -1301,30 +1330,96 @@ static void __init arm_smmu_init_resources(struct resource *res,
}
}
-static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node)
+static void __init arm_smmu_dma_configure(struct device *dev,
+ struct acpi_iort_node *node)
{
struct acpi_iort_smmu *smmu;
+ enum dev_dma_attr attr;
/* Retrieve SMMU specific data */
smmu = (struct acpi_iort_smmu *)node->node_data;
- return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK;
+ attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ?
+ DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
+
+ /* We expect the dma masks to be equivalent for SMMU set-ups */
+ dev->dma_mask = &dev->coherent_dma_mask;
+
+ /* Configure DMA for the page table walker */
+ acpi_dma_configure(dev, attr);
+}
+
+static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node)
+{
+ struct acpi_iort_pmcg *pmcg;
+
+ /* Retrieve PMCG specific data */
+ pmcg = (struct acpi_iort_pmcg *)node->node_data;
+
+ /*
+ * There are always 2 memory resources.
+ * If the overflow_gsiv is present then add that for a total of 3.
+ */
+ return pmcg->overflow_gsiv ? 3 : 2;
+}
+
+static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
+ struct acpi_iort_node *node)
+{
+ struct acpi_iort_pmcg *pmcg;
+
+ /* Retrieve PMCG specific data */
+ pmcg = (struct acpi_iort_pmcg *)node->node_data;
+
+ res[0].start = pmcg->page0_base_address;
+ res[0].end = pmcg->page0_base_address + SZ_4K - 1;
+ res[0].flags = IORESOURCE_MEM;
+ res[1].start = pmcg->page1_base_address;
+ res[1].end = pmcg->page1_base_address + SZ_4K - 1;
+ res[1].flags = IORESOURCE_MEM;
+
+ if (pmcg->overflow_gsiv)
+ acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
+ ACPI_EDGE_SENSITIVE, &res[2]);
+}
+
+static struct acpi_platform_list pmcg_plat_info[] __initdata = {
+ /* HiSilicon Hip08 Platform */
+ {"HISI ", "HIP08 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
+ "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08},
+ { }
+};
+
+static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev)
+{
+ u32 model;
+ int idx;
+
+ idx = acpi_match_platform_list(pmcg_plat_info);
+ if (idx >= 0)
+ model = pmcg_plat_info[idx].data;
+ else
+ model = IORT_SMMU_V3_PMCG_GENERIC;
+
+ return platform_device_add_data(pdev, &model, sizeof(model));
}
struct iort_dev_config {
const char *name;
int (*dev_init)(struct acpi_iort_node *node);
- bool (*dev_is_coherent)(struct acpi_iort_node *node);
+ void (*dev_dma_configure)(struct device *dev,
+ struct acpi_iort_node *node);
int (*dev_count_resources)(struct acpi_iort_node *node);
void (*dev_init_resources)(struct resource *res,
struct acpi_iort_node *node);
- void (*dev_set_proximity)(struct device *dev,
+ int (*dev_set_proximity)(struct device *dev,
struct acpi_iort_node *node);
+ int (*dev_add_platdata)(struct platform_device *pdev);
};
static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
.name = "arm-smmu-v3",
- .dev_is_coherent = arm_smmu_v3_is_coherent,
+ .dev_dma_configure = arm_smmu_v3_dma_configure,
.dev_count_resources = arm_smmu_v3_count_resources,
.dev_init_resources = arm_smmu_v3_init_resources,
.dev_set_proximity = arm_smmu_v3_set_proximity,
@@ -1332,9 +1427,16 @@ static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
static const struct iort_dev_config iort_arm_smmu_cfg __initconst = {
.name = "arm-smmu",
- .dev_is_coherent = arm_smmu_is_coherent,
+ .dev_dma_configure = arm_smmu_dma_configure,
.dev_count_resources = arm_smmu_count_resources,
- .dev_init_resources = arm_smmu_init_resources
+ .dev_init_resources = arm_smmu_init_resources,
+};
+
+static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = {
+ .name = "arm-smmu-v3-pmcg",
+ .dev_count_resources = arm_smmu_v3_pmcg_count_resources,
+ .dev_init_resources = arm_smmu_v3_pmcg_init_resources,
+ .dev_add_platdata = arm_smmu_v3_pmcg_add_platdata,
};
static __init const struct iort_dev_config *iort_get_dev_cfg(
@@ -1345,6 +1447,8 @@ static __init const struct iort_dev_config *iort_get_dev_cfg(
return &iort_arm_smmu_v3_cfg;
case ACPI_IORT_NODE_SMMU:
return &iort_arm_smmu_cfg;
+ case ACPI_IORT_NODE_PMCG:
+ return &iort_arm_smmu_v3_pmcg_cfg;
default:
return NULL;
}
@@ -1362,15 +1466,17 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node,
struct fwnode_handle *fwnode;
struct platform_device *pdev;
struct resource *r;
- enum dev_dma_attr attr;
int ret, count;
pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
if (!pdev)
return -ENOMEM;
- if (ops->dev_set_proximity)
- ops->dev_set_proximity(&pdev->dev, node);
+ if (ops->dev_set_proximity) {
+ ret = ops->dev_set_proximity(&pdev->dev, node);
+ if (ret)
+ goto dev_put;
+ }
count = ops->dev_count_resources(node);
@@ -1393,19 +1499,19 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node,
goto dev_put;
/*
- * Add a copy of IORT node pointer to platform_data to
- * be used to retrieve IORT data information.
+ * Platform devices based on PMCG nodes uses platform_data to
+ * pass the hardware model info to the driver. For others, add
+ * a copy of IORT node pointer to platform_data to be used to
+ * retrieve IORT data information.
*/
- ret = platform_device_add_data(pdev, &node, sizeof(node));
+ if (ops->dev_add_platdata)
+ ret = ops->dev_add_platdata(pdev);
+ else
+ ret = platform_device_add_data(pdev, &node, sizeof(node));
+
if (ret)
goto dev_put;
- /*
- * We expect the dma masks to be equivalent for
- * all SMMUs set-ups
- */
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-
fwnode = iort_get_fwnode(node);
if (!fwnode) {
@@ -1415,11 +1521,8 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node,
pdev->dev.fwnode = fwnode;
- attr = ops->dev_is_coherent && ops->dev_is_coherent(node) ?
- DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
-
- /* Configure DMA for the page table walker */
- acpi_dma_configure(&pdev->dev, attr);
+ if (ops->dev_dma_configure)
+ ops->dev_dma_configure(&pdev->dev, node);
iort_set_device_domain(&pdev->dev, node);
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index a19ff3977ac4..623998a8d722 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -456,8 +456,11 @@ static int acpi_button_resume(struct device *dev)
struct acpi_button *button = acpi_driver_data(device);
button->suspended = false;
- if (button->type == ACPI_BUTTON_TYPE_LID && button->input->users)
+ if (button->type == ACPI_BUTTON_TYPE_LID && button->input->users) {
+ button->last_state = !!acpi_lid_evaluate_state(device);
+ button->last_time = ktime_get();
acpi_lid_initialize_state(device);
+ }
return 0;
}
#endif
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index d4244e7d0e38..653642a4cbdd 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -81,9 +81,9 @@ struct cppc_pcc_data {
int refcount;
};
-/* Array to represent the PCC channel per subspace id */
+/* Array to represent the PCC channel per subspace ID */
static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
-/* The cpu_pcc_subspace_idx containsper CPU subspace id */
+/* The cpu_pcc_subspace_idx contains per CPU subspace ID */
static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
/*
@@ -436,7 +436,7 @@ int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
return -ENOMEM;
/*
- * Now that we have _PSD data from all CPUs, lets setup P-state
+ * Now that we have _PSD data from all CPUs, let's setup P-state
* domain info.
*/
for_each_possible_cpu(i) {
@@ -588,7 +588,7 @@ static int register_pcc_channel(int pcc_ss_idx)
return -ENOMEM;
}
- /* Set flag so that we dont come here for each CPU. */
+ /* Set flag so that we don't come here for each CPU. */
pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
}
@@ -613,7 +613,7 @@ bool __weak cpc_ffh_supported(void)
*
* Check and allocate the cppc_pcc_data memory.
* In some processor configurations it is possible that same subspace
- * is shared between multiple CPU's. This is seen especially in CPU's
+ * is shared between multiple CPUs. This is seen especially in CPUs
* with hardware multi-threading support.
*
* Return: 0 for success, errno for failure
@@ -711,7 +711,7 @@ static bool is_cppc_supported(int revision, int num_ent)
/**
* acpi_cppc_processor_probe - Search for per CPU _CPC objects.
- * @pr: Ptr to acpi_processor containing this CPUs logical Id.
+ * @pr: Ptr to acpi_processor containing this CPU's logical ID.
*
* Return: 0 for success or negative value for err.
*/
@@ -728,7 +728,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
acpi_status status;
int ret = -EFAULT;
- /* Parse the ACPI _CPC table for this cpu. */
+ /* Parse the ACPI _CPC table for this CPU. */
status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
ACPI_TYPE_PACKAGE);
if (ACPI_FAILURE(status)) {
@@ -840,7 +840,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
if (ret)
goto out_free;
- /* Register PCC channel once for all PCC subspace id. */
+ /* Register PCC channel once for all PCC subspace ID. */
if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
ret = register_pcc_channel(pcc_subspace_id);
if (ret)
@@ -860,7 +860,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free;
}
- /* Plug PSD data into this CPUs CPC descriptor. */
+ /* Plug PSD data into this CPU's CPC descriptor. */
per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
@@ -891,7 +891,7 @@ EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
/**
* acpi_cppc_processor_exit - Cleanup CPC structs.
- * @pr: Ptr to acpi_processor containing this CPUs logical Id.
+ * @pr: Ptr to acpi_processor containing this CPU's logical ID.
*
* Return: Void
*/
@@ -931,7 +931,7 @@ EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
/**
* cpc_read_ffh() - Read FFH register
- * @cpunum: cpu number to read
+ * @cpunum: CPU number to read
* @reg: cppc register information
* @val: place holder for return value
*
@@ -946,7 +946,7 @@ int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
/**
* cpc_write_ffh() - Write FFH register
- * @cpunum: cpu number to write
+ * @cpunum: CPU number to write
* @reg: cppc register information
* @val: value to write
*
@@ -1093,7 +1093,7 @@ int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
/**
- * cppc_get_perf_caps - Get a CPUs performance capabilities.
+ * cppc_get_perf_caps - Get a CPU's performance capabilities.
* @cpunum: CPU from which to get capabilities info.
* @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
*
@@ -1183,7 +1183,7 @@ out_err:
EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
/**
- * cppc_get_perf_ctrs - Read a CPUs performance feedback counters.
+ * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
* @cpunum: CPU from which to read counters.
* @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
*
@@ -1210,7 +1210,7 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
/*
- * If refernce perf register is not supported then we should
+ * If reference perf register is not supported then we should
* use the nominal perf value
*/
if (!CPC_SUPPORTED(ref_perf_reg))
@@ -1263,7 +1263,7 @@ out_err:
EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
/**
- * cppc_set_perf - Set a CPUs performance controls.
+ * cppc_set_perf - Set a CPU's performance controls.
* @cpu: CPU for which to set performance controls.
* @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
*
@@ -1344,7 +1344,7 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
* executing the Phase-II.
* 2. Some other CPU has beaten this CPU to successfully execute the
* write_trylock and has already acquired the write_lock. We know for a
- * fact it(other CPU acquiring the write_lock) couldn't have happened
+ * fact it (other CPU acquiring the write_lock) couldn't have happened
* before this CPU's Phase-I as we held the read_lock.
* 3. Some other CPU executing pcc CMD_READ has stolen the
* down_write, in which case, send_pcc_cmd will check for pending
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 824ae985ad93..b859d75eaf9f 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -414,7 +414,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
if (adev->wakeup.flags.notifier_present) {
pm_wakeup_ws_event(adev->wakeup.ws, 0, acpi_s2idle_wakeup());
if (adev->wakeup.context.func) {
- acpi_handle_debug(handle, "Running %pF for %s\n",
+ acpi_handle_debug(handle, "Running %pS for %s\n",
adev->wakeup.context.func,
dev_name(adev->wakeup.context.dev));
adev->wakeup.context.func(&adev->wakeup.context);
@@ -728,6 +728,9 @@ static int __acpi_device_wakeup_enable(struct acpi_device *adev,
goto out;
}
+ acpi_handle_debug(adev->handle, "GPE%2X enabled for wakeup\n",
+ (unsigned int)wakeup->gpe_number);
+
inc:
wakeup->enable_count++;
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 8940054d6250..78c2653bf020 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -428,8 +428,10 @@ static ssize_t acpi_device_adr_show(struct device *dev,
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
- return sprintf(buf, "0x%08x\n",
- (unsigned int)(acpi_dev->pnp.bus_address));
+ if (acpi_dev->pnp.bus_address > U32_MAX)
+ return sprintf(buf, "0x%016llx\n", acpi_dev->pnp.bus_address);
+ else
+ return sprintf(buf, "0x%08llx\n", acpi_dev->pnp.bus_address);
}
static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL);
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index e1c242568341..0c081390930a 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -31,8 +31,7 @@ static ssize_t name##_show(struct device *dev,\
struct device_attribute *attr,\
char *buf)\
{\
- struct platform_device *pdev = to_platform_device(dev);\
- struct acpi_device *acpi_dev = platform_get_drvdata(pdev);\
+ struct acpi_device *acpi_dev = dev_get_drvdata(dev);\
unsigned long long val;\
acpi_status status;\
\
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index 5a127f3f2d5c..47f21599f2ab 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -131,8 +131,8 @@ int acpi_bus_generate_netlink_event(const char *device_class,
event = nla_data(attr);
memset(event, 0, sizeof(struct acpi_genl_event));
- strcpy(event->device_class, device_class);
- strcpy(event->bus_id, bus_id);
+ strscpy(event->device_class, device_class, sizeof(event->device_class));
+ strscpy(event->bus_id, bus_id, sizeof(event->bus_id));
event->type = type;
event->data = data;
diff --git a/drivers/acpi/hmat/Kconfig b/drivers/acpi/hmat/Kconfig
new file mode 100644
index 000000000000..95a29964dbea
--- /dev/null
+++ b/drivers/acpi/hmat/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+config ACPI_HMAT
+ bool "ACPI Heterogeneous Memory Attribute Table Support"
+ depends on ACPI_NUMA
+ select HMEM_REPORTING
+ help
+ If set, this option has the kernel parse and report the
+ platform's ACPI HMAT (Heterogeneous Memory Attributes Table),
+ register memory initiators with their targets, and export
+ performance attributes through the node's sysfs device if
+ provided.
diff --git a/drivers/acpi/hmat/Makefile b/drivers/acpi/hmat/Makefile
new file mode 100644
index 000000000000..e909051d3d00
--- /dev/null
+++ b/drivers/acpi/hmat/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_ACPI_HMAT) := hmat.o
diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c
new file mode 100644
index 000000000000..96b7d39a97c6
--- /dev/null
+++ b/drivers/acpi/hmat/hmat.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, Intel Corporation.
+ *
+ * Heterogeneous Memory Attributes Table (HMAT) representation
+ *
+ * This program parses and reports the platform's HMAT tables, and registers
+ * the applicable attributes with the node's interfaces.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/list_sort.h>
+#include <linux/node.h>
+#include <linux/sysfs.h>
+
+static __initdata u8 hmat_revision;
+
+static __initdata LIST_HEAD(targets);
+static __initdata LIST_HEAD(initiators);
+static __initdata LIST_HEAD(localities);
+
+/*
+ * The defined enum order is used to prioritize attributes to break ties when
+ * selecting the best performing node.
+ */
+enum locality_types {
+ WRITE_LATENCY,
+ READ_LATENCY,
+ WRITE_BANDWIDTH,
+ READ_BANDWIDTH,
+};
+
+static struct memory_locality *localities_types[4];
+
+struct memory_target {
+ struct list_head node;
+ unsigned int memory_pxm;
+ unsigned int processor_pxm;
+ struct node_hmem_attrs hmem_attrs;
+};
+
+struct memory_initiator {
+ struct list_head node;
+ unsigned int processor_pxm;
+};
+
+struct memory_locality {
+ struct list_head node;
+ struct acpi_hmat_locality *hmat_loc;
+};
+
+static __init struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
+{
+ struct memory_initiator *initiator;
+
+ list_for_each_entry(initiator, &initiators, node)
+ if (initiator->processor_pxm == cpu_pxm)
+ return initiator;
+ return NULL;
+}
+
+static __init struct memory_target *find_mem_target(unsigned int mem_pxm)
+{
+ struct memory_target *target;
+
+ list_for_each_entry(target, &targets, node)
+ if (target->memory_pxm == mem_pxm)
+ return target;
+ return NULL;
+}
+
+static __init void alloc_memory_initiator(unsigned int cpu_pxm)
+{
+ struct memory_initiator *initiator;
+
+ if (pxm_to_node(cpu_pxm) == NUMA_NO_NODE)
+ return;
+
+ initiator = find_mem_initiator(cpu_pxm);
+ if (initiator)
+ return;
+
+ initiator = kzalloc(sizeof(*initiator), GFP_KERNEL);
+ if (!initiator)
+ return;
+
+ initiator->processor_pxm = cpu_pxm;
+ list_add_tail(&initiator->node, &initiators);
+}
+
+static __init void alloc_memory_target(unsigned int mem_pxm)
+{
+ struct memory_target *target;
+
+ if (pxm_to_node(mem_pxm) == NUMA_NO_NODE)
+ return;
+
+ target = find_mem_target(mem_pxm);
+ if (target)
+ return;
+
+ target = kzalloc(sizeof(*target), GFP_KERNEL);
+ if (!target)
+ return;
+
+ target->memory_pxm = mem_pxm;
+ target->processor_pxm = PXM_INVAL;
+ list_add_tail(&target->node, &targets);
+}
+
+static __init const char *hmat_data_type(u8 type)
+{
+ switch (type) {
+ case ACPI_HMAT_ACCESS_LATENCY:
+ return "Access Latency";
+ case ACPI_HMAT_READ_LATENCY:
+ return "Read Latency";
+ case ACPI_HMAT_WRITE_LATENCY:
+ return "Write Latency";
+ case ACPI_HMAT_ACCESS_BANDWIDTH:
+ return "Access Bandwidth";
+ case ACPI_HMAT_READ_BANDWIDTH:
+ return "Read Bandwidth";
+ case ACPI_HMAT_WRITE_BANDWIDTH:
+ return "Write Bandwidth";
+ default:
+ return "Reserved";
+ }
+}
+
+static __init const char *hmat_data_type_suffix(u8 type)
+{
+ switch (type) {
+ case ACPI_HMAT_ACCESS_LATENCY:
+ case ACPI_HMAT_READ_LATENCY:
+ case ACPI_HMAT_WRITE_LATENCY:
+ return " nsec";
+ case ACPI_HMAT_ACCESS_BANDWIDTH:
+ case ACPI_HMAT_READ_BANDWIDTH:
+ case ACPI_HMAT_WRITE_BANDWIDTH:
+ return " MB/s";
+ default:
+ return "";
+ }
+}
+
+static __init u32 hmat_normalize(u16 entry, u64 base, u8 type)
+{
+ u32 value;
+
+ /*
+ * Check for invalid and overflow values
+ */
+ if (entry == 0xffff || !entry)
+ return 0;
+ else if (base > (UINT_MAX / (entry)))
+ return 0;
+
+ /*
+ * Divide by the base unit for version 1, convert latency from
+ * picosenonds to nanoseconds if revision 2.
+ */
+ value = entry * base;
+ if (hmat_revision == 1) {
+ if (value < 10)
+ return 0;
+ value = DIV_ROUND_UP(value, 10);
+ } else if (hmat_revision == 2) {
+ switch (type) {
+ case ACPI_HMAT_ACCESS_LATENCY:
+ case ACPI_HMAT_READ_LATENCY:
+ case ACPI_HMAT_WRITE_LATENCY:
+ value = DIV_ROUND_UP(value, 1000);
+ break;
+ default:
+ break;
+ }
+ }
+ return value;
+}
+
+static __init void hmat_update_target_access(struct memory_target *target,
+ u8 type, u32 value)
+{
+ switch (type) {
+ case ACPI_HMAT_ACCESS_LATENCY:
+ target->hmem_attrs.read_latency = value;
+ target->hmem_attrs.write_latency = value;
+ break;
+ case ACPI_HMAT_READ_LATENCY:
+ target->hmem_attrs.read_latency = value;
+ break;
+ case ACPI_HMAT_WRITE_LATENCY:
+ target->hmem_attrs.write_latency = value;
+ break;
+ case ACPI_HMAT_ACCESS_BANDWIDTH:
+ target->hmem_attrs.read_bandwidth = value;
+ target->hmem_attrs.write_bandwidth = value;
+ break;
+ case ACPI_HMAT_READ_BANDWIDTH:
+ target->hmem_attrs.read_bandwidth = value;
+ break;
+ case ACPI_HMAT_WRITE_BANDWIDTH:
+ target->hmem_attrs.write_bandwidth = value;
+ break;
+ default:
+ break;
+ }
+}
+
+static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc)
+{
+ struct memory_locality *loc;
+
+ loc = kzalloc(sizeof(*loc), GFP_KERNEL);
+ if (!loc) {
+ pr_notice_once("Failed to allocate HMAT locality\n");
+ return;
+ }
+
+ loc->hmat_loc = hmat_loc;
+ list_add_tail(&loc->node, &localities);
+
+ switch (hmat_loc->data_type) {
+ case ACPI_HMAT_ACCESS_LATENCY:
+ localities_types[READ_LATENCY] = loc;
+ localities_types[WRITE_LATENCY] = loc;
+ break;
+ case ACPI_HMAT_READ_LATENCY:
+ localities_types[READ_LATENCY] = loc;
+ break;
+ case ACPI_HMAT_WRITE_LATENCY:
+ localities_types[WRITE_LATENCY] = loc;
+ break;
+ case ACPI_HMAT_ACCESS_BANDWIDTH:
+ localities_types[READ_BANDWIDTH] = loc;
+ localities_types[WRITE_BANDWIDTH] = loc;
+ break;
+ case ACPI_HMAT_READ_BANDWIDTH:
+ localities_types[READ_BANDWIDTH] = loc;
+ break;
+ case ACPI_HMAT_WRITE_BANDWIDTH:
+ localities_types[WRITE_BANDWIDTH] = loc;
+ break;
+ default:
+ break;
+ }
+}
+
+static __init int hmat_parse_locality(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_hmat_locality *hmat_loc = (void *)header;
+ struct memory_target *target;
+ unsigned int init, targ, total_size, ipds, tpds;
+ u32 *inits, *targs, value;
+ u16 *entries;
+ u8 type, mem_hier;
+
+ if (hmat_loc->header.length < sizeof(*hmat_loc)) {
+ pr_notice("HMAT: Unexpected locality header length: %d\n",
+ hmat_loc->header.length);
+ return -EINVAL;
+ }
+
+ type = hmat_loc->data_type;
+ mem_hier = hmat_loc->flags & ACPI_HMAT_MEMORY_HIERARCHY;
+ ipds = hmat_loc->number_of_initiator_Pds;
+ tpds = hmat_loc->number_of_target_Pds;
+ total_size = sizeof(*hmat_loc) + sizeof(*entries) * ipds * tpds +
+ sizeof(*inits) * ipds + sizeof(*targs) * tpds;
+ if (hmat_loc->header.length < total_size) {
+ pr_notice("HMAT: Unexpected locality header length:%d, minimum required:%d\n",
+ hmat_loc->header.length, total_size);
+ return -EINVAL;
+ }
+
+ pr_info("HMAT: Locality: Flags:%02x Type:%s Initiator Domains:%d Target Domains:%d Base:%lld\n",
+ hmat_loc->flags, hmat_data_type(type), ipds, tpds,
+ hmat_loc->entry_base_unit);
+
+ inits = (u32 *)(hmat_loc + 1);
+ targs = inits + ipds;
+ entries = (u16 *)(targs + tpds);
+ for (init = 0; init < ipds; init++) {
+ alloc_memory_initiator(inits[init]);
+ for (targ = 0; targ < tpds; targ++) {
+ value = hmat_normalize(entries[init * tpds + targ],
+ hmat_loc->entry_base_unit,
+ type);
+ pr_info(" Initiator-Target[%d-%d]:%d%s\n",
+ inits[init], targs[targ], value,
+ hmat_data_type_suffix(type));
+
+ if (mem_hier == ACPI_HMAT_MEMORY) {
+ target = find_mem_target(targs[targ]);
+ if (target && target->processor_pxm == inits[init])
+ hmat_update_target_access(target, type, value);
+ }
+ }
+ }
+
+ if (mem_hier == ACPI_HMAT_MEMORY)
+ hmat_add_locality(hmat_loc);
+
+ return 0;
+}
+
+static __init int hmat_parse_cache(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_hmat_cache *cache = (void *)header;
+ struct node_cache_attrs cache_attrs;
+ u32 attrs;
+
+ if (cache->header.length < sizeof(*cache)) {
+ pr_notice("HMAT: Unexpected cache header length: %d\n",
+ cache->header.length);
+ return -EINVAL;
+ }
+
+ attrs = cache->cache_attributes;
+ pr_info("HMAT: Cache: Domain:%d Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
+ cache->memory_PD, cache->cache_size, attrs,
+ cache->number_of_SMBIOShandles);
+
+ cache_attrs.size = cache->cache_size;
+ cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
+ cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
+
+ switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
+ case ACPI_HMAT_CA_DIRECT_MAPPED:
+ cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
+ break;
+ case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
+ cache_attrs.indexing = NODE_CACHE_INDEXED;
+ break;
+ case ACPI_HMAT_CA_NONE:
+ default:
+ cache_attrs.indexing = NODE_CACHE_OTHER;
+ break;
+ }
+
+ switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) {
+ case ACPI_HMAT_CP_WB:
+ cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
+ break;
+ case ACPI_HMAT_CP_WT:
+ cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
+ break;
+ case ACPI_HMAT_CP_NONE:
+ default:
+ cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
+ break;
+ }
+
+ node_add_cache(pxm_to_node(cache->memory_PD), &cache_attrs);
+ return 0;
+}
+
+static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_hmat_proximity_domain *p = (void *)header;
+ struct memory_target *target = NULL;
+
+ if (p->header.length != sizeof(*p)) {
+ pr_notice("HMAT: Unexpected address range header length: %d\n",
+ p->header.length);
+ return -EINVAL;
+ }
+
+ if (hmat_revision == 1)
+ pr_info("HMAT: Memory (%#llx length %#llx) Flags:%04x Processor Domain:%d Memory Domain:%d\n",
+ p->reserved3, p->reserved4, p->flags, p->processor_PD,
+ p->memory_PD);
+ else
+ pr_info("HMAT: Memory Flags:%04x Processor Domain:%d Memory Domain:%d\n",
+ p->flags, p->processor_PD, p->memory_PD);
+
+ if (p->flags & ACPI_HMAT_MEMORY_PD_VALID) {
+ target = find_mem_target(p->memory_PD);
+ if (!target) {
+ pr_debug("HMAT: Memory Domain missing from SRAT\n");
+ return -EINVAL;
+ }
+ }
+ if (target && p->flags & ACPI_HMAT_PROCESSOR_PD_VALID) {
+ int p_node = pxm_to_node(p->processor_PD);
+
+ if (p_node == NUMA_NO_NODE) {
+ pr_debug("HMAT: Invalid Processor Domain\n");
+ return -EINVAL;
+ }
+ target->processor_pxm = p_node;
+ }
+
+ return 0;
+}
+
+static int __init hmat_parse_subtable(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_hmat_structure *hdr = (void *)header;
+
+ if (!hdr)
+ return -EINVAL;
+
+ switch (hdr->type) {
+ case ACPI_HMAT_TYPE_PROXIMITY:
+ return hmat_parse_proximity_domain(header, end);
+ case ACPI_HMAT_TYPE_LOCALITY:
+ return hmat_parse_locality(header, end);
+ case ACPI_HMAT_TYPE_CACHE:
+ return hmat_parse_cache(header, end);
+ default:
+ return -EINVAL;
+ }
+}
+
+static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_srat_mem_affinity *ma = (void *)header;
+
+ if (!ma)
+ return -EINVAL;
+ if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
+ return 0;
+ alloc_memory_target(ma->proximity_domain);
+ return 0;
+}
+
+static __init u32 hmat_initiator_perf(struct memory_target *target,
+ struct memory_initiator *initiator,
+ struct acpi_hmat_locality *hmat_loc)
+{
+ unsigned int ipds, tpds, i, idx = 0, tdx = 0;
+ u32 *inits, *targs;
+ u16 *entries;
+
+ ipds = hmat_loc->number_of_initiator_Pds;
+ tpds = hmat_loc->number_of_target_Pds;
+ inits = (u32 *)(hmat_loc + 1);
+ targs = inits + ipds;
+ entries = (u16 *)(targs + tpds);
+
+ for (i = 0; i < ipds; i++) {
+ if (inits[i] == initiator->processor_pxm) {
+ idx = i;
+ break;
+ }
+ }
+
+ if (i == ipds)
+ return 0;
+
+ for (i = 0; i < tpds; i++) {
+ if (targs[i] == target->memory_pxm) {
+ tdx = i;
+ break;
+ }
+ }
+ if (i == tpds)
+ return 0;
+
+ return hmat_normalize(entries[idx * tpds + tdx],
+ hmat_loc->entry_base_unit,
+ hmat_loc->data_type);
+}
+
+static __init bool hmat_update_best(u8 type, u32 value, u32 *best)
+{
+ bool updated = false;
+
+ if (!value)
+ return false;
+
+ switch (type) {
+ case ACPI_HMAT_ACCESS_LATENCY:
+ case ACPI_HMAT_READ_LATENCY:
+ case ACPI_HMAT_WRITE_LATENCY:
+ if (!*best || *best > value) {
+ *best = value;
+ updated = true;
+ }
+ break;
+ case ACPI_HMAT_ACCESS_BANDWIDTH:
+ case ACPI_HMAT_READ_BANDWIDTH:
+ case ACPI_HMAT_WRITE_BANDWIDTH:
+ if (!*best || *best < value) {
+ *best = value;
+ updated = true;
+ }
+ break;
+ }
+
+ return updated;
+}
+
+static int initiator_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct memory_initiator *ia;
+ struct memory_initiator *ib;
+ unsigned long *p_nodes = priv;
+
+ ia = list_entry(a, struct memory_initiator, node);
+ ib = list_entry(b, struct memory_initiator, node);
+
+ set_bit(ia->processor_pxm, p_nodes);
+ set_bit(ib->processor_pxm, p_nodes);
+
+ return ia->processor_pxm - ib->processor_pxm;
+}
+
+static __init void hmat_register_target_initiators(struct memory_target *target)
+{
+ static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
+ struct memory_initiator *initiator;
+ unsigned int mem_nid, cpu_nid;
+ struct memory_locality *loc = NULL;
+ u32 best = 0;
+ int i;
+
+ mem_nid = pxm_to_node(target->memory_pxm);
+ /*
+ * If the Address Range Structure provides a local processor pxm, link
+ * only that one. Otherwise, find the best performance attributes and
+ * register all initiators that match.
+ */
+ if (target->processor_pxm != PXM_INVAL) {
+ cpu_nid = pxm_to_node(target->processor_pxm);
+ register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
+ return;
+ }
+
+ if (list_empty(&localities))
+ return;
+
+ /*
+ * We need the initiator list sorted so we can use bitmap_clear for
+ * previously set initiators when we find a better memory accessor.
+ * We'll also use the sorting to prime the candidate nodes with known
+ * initiators.
+ */
+ bitmap_zero(p_nodes, MAX_NUMNODES);
+ list_sort(p_nodes, &initiators, initiator_cmp);
+ for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
+ loc = localities_types[i];
+ if (!loc)
+ continue;
+
+ best = 0;
+ list_for_each_entry(initiator, &initiators, node) {
+ u32 value;
+
+ if (!test_bit(initiator->processor_pxm, p_nodes))
+ continue;
+
+ value = hmat_initiator_perf(target, initiator, loc->hmat_loc);
+ if (hmat_update_best(loc->hmat_loc->data_type, value, &best))
+ bitmap_clear(p_nodes, 0, initiator->processor_pxm);
+ if (value != best)
+ clear_bit(initiator->processor_pxm, p_nodes);
+ }
+ if (best)
+ hmat_update_target_access(target, loc->hmat_loc->data_type, best);
+ }
+
+ for_each_set_bit(i, p_nodes, MAX_NUMNODES) {
+ cpu_nid = pxm_to_node(i);
+ register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
+ }
+}
+
+static __init void hmat_register_target_perf(struct memory_target *target)
+{
+ unsigned mem_nid = pxm_to_node(target->memory_pxm);
+ node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
+}
+
+static __init void hmat_register_targets(void)
+{
+ struct memory_target *target;
+
+ list_for_each_entry(target, &targets, node) {
+ hmat_register_target_initiators(target);
+ hmat_register_target_perf(target);
+ }
+}
+
+static __init void hmat_free_structures(void)
+{
+ struct memory_target *target, *tnext;
+ struct memory_locality *loc, *lnext;
+ struct memory_initiator *initiator, *inext;
+
+ list_for_each_entry_safe(target, tnext, &targets, node) {
+ list_del(&target->node);
+ kfree(target);
+ }
+
+ list_for_each_entry_safe(initiator, inext, &initiators, node) {
+ list_del(&initiator->node);
+ kfree(initiator);
+ }
+
+ list_for_each_entry_safe(loc, lnext, &localities, node) {
+ list_del(&loc->node);
+ kfree(loc);
+ }
+}
+
+static __init int hmat_init(void)
+{
+ struct acpi_table_header *tbl;
+ enum acpi_hmat_type i;
+ acpi_status status;
+
+ if (srat_disabled())
+ return 0;
+
+ status = acpi_get_table(ACPI_SIG_SRAT, 0, &tbl);
+ if (ACPI_FAILURE(status))
+ return 0;
+
+ if (acpi_table_parse_entries(ACPI_SIG_SRAT,
+ sizeof(struct acpi_table_srat),
+ ACPI_SRAT_TYPE_MEMORY_AFFINITY,
+ srat_parse_mem_affinity, 0) < 0)
+ goto out_put;
+ acpi_put_table(tbl);
+
+ status = acpi_get_table(ACPI_SIG_HMAT, 0, &tbl);
+ if (ACPI_FAILURE(status))
+ goto out_put;
+
+ hmat_revision = tbl->revision;
+ switch (hmat_revision) {
+ case 1:
+ case 2:
+ break;
+ default:
+ pr_notice("Ignoring HMAT: Unknown revision:%d\n", hmat_revision);
+ goto out_put;
+ }
+
+ for (i = ACPI_HMAT_TYPE_PROXIMITY; i < ACPI_HMAT_TYPE_RESERVED; i++) {
+ if (acpi_table_parse_entries(ACPI_SIG_HMAT,
+ sizeof(struct acpi_table_hmat), i,
+ hmat_parse_subtable, 0) < 0) {
+ pr_notice("Ignoring HMAT: Invalid table");
+ goto out_put;
+ }
+ }
+ hmat_register_targets();
+out_put:
+ hmat_free_structures();
+ acpi_put_table(tbl);
+ return 0;
+}
+subsys_initcall(hmat_init);
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 867f6e3f2b4f..30995834ad70 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -339,7 +339,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
}
static int __init
-acpi_parse_x2apic_affinity(struct acpi_subtable_header *header,
+acpi_parse_x2apic_affinity(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_srat_x2apic_cpu_affinity *processor_affinity;
@@ -348,7 +348,7 @@ acpi_parse_x2apic_affinity(struct acpi_subtable_header *header,
if (!processor_affinity)
return -EINVAL;
- acpi_table_print_srat_entry(header);
+ acpi_table_print_srat_entry(&header->common);
/* let architecture-dependent part to do it */
acpi_numa_x2apic_affinity_init(processor_affinity);
@@ -357,7 +357,7 @@ acpi_parse_x2apic_affinity(struct acpi_subtable_header *header,
}
static int __init
-acpi_parse_processor_affinity(struct acpi_subtable_header *header,
+acpi_parse_processor_affinity(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_srat_cpu_affinity *processor_affinity;
@@ -366,7 +366,7 @@ acpi_parse_processor_affinity(struct acpi_subtable_header *header,
if (!processor_affinity)
return -EINVAL;
- acpi_table_print_srat_entry(header);
+ acpi_table_print_srat_entry(&header->common);
/* let architecture-dependent part to do it */
acpi_numa_processor_affinity_init(processor_affinity);
@@ -375,7 +375,7 @@ acpi_parse_processor_affinity(struct acpi_subtable_header *header,
}
static int __init
-acpi_parse_gicc_affinity(struct acpi_subtable_header *header,
+acpi_parse_gicc_affinity(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_srat_gicc_affinity *processor_affinity;
@@ -384,7 +384,7 @@ acpi_parse_gicc_affinity(struct acpi_subtable_header *header,
if (!processor_affinity)
return -EINVAL;
- acpi_table_print_srat_entry(header);
+ acpi_table_print_srat_entry(&header->common);
/* let architecture-dependent part to do it */
acpi_numa_gicc_affinity_init(processor_affinity);
@@ -395,7 +395,7 @@ acpi_parse_gicc_affinity(struct acpi_subtable_header *header,
static int __initdata parsed_numa_memblks;
static int __init
-acpi_parse_memory_affinity(struct acpi_subtable_header * header,
+acpi_parse_memory_affinity(union acpi_subtable_headers * header,
const unsigned long end)
{
struct acpi_srat_mem_affinity *memory_affinity;
@@ -404,7 +404,7 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
if (!memory_affinity)
return -EINVAL;
- acpi_table_print_srat_entry(header);
+ acpi_table_print_srat_entry(&header->common);
/* let architecture-dependent part to do it */
if (!acpi_numa_memory_affinity_init(memory_affinity))
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c
index a4e8432fc2fb..b42be067fb83 100644
--- a/drivers/acpi/pci_mcfg.c
+++ b/drivers/acpi/pci_mcfg.c
@@ -52,6 +52,18 @@ struct mcfg_fixup {
static struct mcfg_fixup mcfg_quirks[] = {
/* { OEM_ID, OEM_TABLE_ID, REV, SEGMENT, BUS_RANGE, ops, cfgres }, */
+#define AL_ECAM(table_id, rev, seg, ops) \
+ { "AMAZON", table_id, rev, seg, MCFG_BUS_ANY, ops }
+
+ AL_ECAM("GRAVITON", 0, 0, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 1, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 2, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 3, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 4, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 5, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 6, &al_pcie_ops),
+ AL_ECAM("GRAVITON", 0, 7, &al_pcie_ops),
+
#define QCOM_ECAM32(seg) \
{ "QCOM ", "QDF2432 ", 1, seg, MCFG_BUS_ANY, &pci_32b_ops }
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 707aafc7c2aa..c36781a9b493 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -145,6 +145,7 @@ static struct pci_osc_bit_struct pci_osc_support_bit[] = {
{ OSC_PCI_CLOCK_PM_SUPPORT, "ClockPM" },
{ OSC_PCI_SEGMENT_GROUPS_SUPPORT, "Segments" },
{ OSC_PCI_MSI_SUPPORT, "MSI" },
+ { OSC_PCI_HPX_TYPE_3_SUPPORT, "HPX-Type3" },
};
static struct pci_osc_bit_struct pci_osc_control_bit[] = {
@@ -446,6 +447,7 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
* PCI domains, so we indicate this in _OSC support capabilities.
*/
support = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
+ support |= OSC_PCI_HPX_TYPE_3_SUPPORT;
if (pci_ext_cfg_avail())
support |= OSC_PCI_EXT_CONFIG_SUPPORT;
if (pcie_aspm_support_enabled())
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 665e93ca0b40..87db3e124725 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -535,12 +535,12 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
/*
* Try to execute _DSW first.
*
- * Three agruments are needed for the _DSW object:
+ * Three arguments are needed for the _DSW object:
* Argument 0: enable/disable the wake capabilities
* Argument 1: target system state
* Argument 2: target device state
* When _DSW object is called to disable the wake capabilities, maybe
- * the first argument is filled. The values of the other two agruments
+ * the first argument is filled. The values of the other two arguments
* are meaningless.
*/
in_arg[0].type = ACPI_TYPE_INTEGER;
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index 065c4fc245d1..b72e6afaa8fb 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -164,7 +164,7 @@ static struct acpi_pptt_cache *acpi_find_cache_level(struct acpi_table_header *t
}
/**
- * acpi_count_levels() - Given a PPTT table, and a cpu node, count the caches
+ * acpi_count_levels() - Given a PPTT table, and a CPU node, count the caches
* @table_hdr: Pointer to the head of the PPTT table
* @cpu_node: processor node we wish to count caches for
*
@@ -235,7 +235,7 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
/**
* acpi_find_processor_node() - Given a PPTT table find the requested processor
* @table_hdr: Pointer to the head of the PPTT table
- * @acpi_cpu_id: cpu we are searching for
+ * @acpi_cpu_id: CPU we are searching for
*
* Find the subtable entry describing the provided processor.
* This is done by iterating the PPTT table looking for processor nodes
@@ -456,21 +456,21 @@ static struct acpi_pptt_processor *acpi_find_processor_package_id(struct acpi_ta
static void acpi_pptt_warn_missing(void)
{
- pr_warn_once("No PPTT table found, cpu and cache topology may be inaccurate\n");
+ pr_warn_once("No PPTT table found, CPU and cache topology may be inaccurate\n");
}
/**
* topology_get_acpi_cpu_tag() - Find a unique topology value for a feature
* @table: Pointer to the head of the PPTT table
- * @cpu: Kernel logical cpu number
+ * @cpu: Kernel logical CPU number
* @level: A level that terminates the search
* @flag: A flag which terminates the search
*
- * Get a unique value given a cpu, and a topology level, that can be
+ * Get a unique value given a CPU, and a topology level, that can be
* matched to determine which cpus share common topological features
* at that level.
*
- * Return: Unique value, or -ENOENT if unable to locate cpu
+ * Return: Unique value, or -ENOENT if unable to locate CPU
*/
static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
unsigned int cpu, int level, int flag)
@@ -510,7 +510,7 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
return -ENOENT;
}
retval = topology_get_acpi_cpu_tag(table, cpu, level, flag);
- pr_debug("Topology Setup ACPI cpu %d, level %d ret = %d\n",
+ pr_debug("Topology Setup ACPI CPU %d, level %d ret = %d\n",
cpu, level, retval);
acpi_put_table(table);
@@ -519,9 +519,9 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
/**
* acpi_find_last_cache_level() - Determines the number of cache levels for a PE
- * @cpu: Kernel logical cpu number
+ * @cpu: Kernel logical CPU number
*
- * Given a logical cpu number, returns the number of levels of cache represented
+ * Given a logical CPU number, returns the number of levels of cache represented
* in the PPTT. Errors caused by lack of a PPTT table, or otherwise, return 0
* indicating we didn't find any cache levels.
*
@@ -534,7 +534,7 @@ int acpi_find_last_cache_level(unsigned int cpu)
int number_of_levels = 0;
acpi_status status;
- pr_debug("Cache Setup find last level cpu=%d\n", cpu);
+ pr_debug("Cache Setup find last level CPU=%d\n", cpu);
acpi_cpu_id = get_acpi_id_for_cpu(cpu);
status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
@@ -551,14 +551,14 @@ int acpi_find_last_cache_level(unsigned int cpu)
/**
* cache_setup_acpi() - Override CPU cache topology with data from the PPTT
- * @cpu: Kernel logical cpu number
+ * @cpu: Kernel logical CPU number
*
* Updates the global cache info provided by cpu_get_cacheinfo()
* when there are valid properties in the acpi_pptt_cache nodes. A
* successful parse may not result in any updates if none of the
- * cache levels have any valid flags set. Futher, a unique value is
+ * cache levels have any valid flags set. Further, a unique value is
* associated with each known CPU cache entry. This unique value
- * can be used to determine whether caches are shared between cpus.
+ * can be used to determine whether caches are shared between CPUs.
*
* Return: -ENOENT on failure to find table, or 0 on success
*/
@@ -567,7 +567,7 @@ int cache_setup_acpi(unsigned int cpu)
struct acpi_table_header *table;
acpi_status status;
- pr_debug("Cache Setup ACPI cpu %d\n", cpu);
+ pr_debug("Cache Setup ACPI CPU %d\n", cpu);
status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
if (ACPI_FAILURE(status)) {
@@ -582,8 +582,8 @@ int cache_setup_acpi(unsigned int cpu)
}
/**
- * find_acpi_cpu_topology() - Determine a unique topology value for a given cpu
- * @cpu: Kernel logical cpu number
+ * find_acpi_cpu_topology() - Determine a unique topology value for a given CPU
+ * @cpu: Kernel logical CPU number
* @level: The topological level for which we would like a unique ID
*
* Determine a topology unique ID for each thread/core/cluster/mc_grouping
@@ -596,7 +596,7 @@ int cache_setup_acpi(unsigned int cpu)
* other levels beyond this use a generated value to uniquely identify
* a topological feature.
*
- * Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found.
+ * Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found.
* Otherwise returns a value which represents a unique topological feature.
*/
int find_acpi_cpu_topology(unsigned int cpu, int level)
@@ -606,12 +606,12 @@ int find_acpi_cpu_topology(unsigned int cpu, int level)
/**
* find_acpi_cpu_cache_topology() - Determine a unique cache topology value
- * @cpu: Kernel logical cpu number
+ * @cpu: Kernel logical CPU number
* @level: The cache level for which we would like a unique ID
*
* Determine a unique ID for each unified cache in the system
*
- * Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found.
+ * Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found.
* Otherwise returns a value which represents a unique topological feature.
*/
int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
@@ -643,17 +643,17 @@ int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
/**
- * find_acpi_cpu_topology_package() - Determine a unique cpu package value
- * @cpu: Kernel logical cpu number
+ * find_acpi_cpu_topology_package() - Determine a unique CPU package value
+ * @cpu: Kernel logical CPU number
*
- * Determine a topology unique package ID for the given cpu.
+ * Determine a topology unique package ID for the given CPU.
* This ID can then be used to group peers, which will have matching ids.
*
* The search terminates when either a level is found with the PHYSICAL_PACKAGE
* flag set or we reach a root node.
*
- * Return: -ENOENT if the PPTT doesn't exist, or the cpu cannot be found.
- * Otherwise returns a value which represents the package for this cpu.
+ * Return: -ENOENT if the PPTT doesn't exist, or the CPU cannot be found.
+ * Otherwise returns a value which represents the package for this CPU.
*/
int find_acpi_cpu_topology_package(unsigned int cpu)
{
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index a303fd0e108c..c73d3a62799a 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -181,7 +181,7 @@ void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
acpi_processor_ppc_ost(pr->handle, 0);
}
if (ret >= 0)
- cpufreq_update_policy(pr->id);
+ cpufreq_update_limits(pr->id);
}
int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 77abe0ec4043..9d460a859be0 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -44,6 +44,7 @@ static const guid_t prp_guids[] = {
0xbf, 0xf0, 0x76, 0x14, 0x38, 0x07, 0xc3, 0x89),
};
+/* ACPI _DSD data subnodes GUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */
static const guid_t ads_guid =
GUID_INIT(0xdbb8e3e6, 0x5886, 0x4ba6,
0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b);
@@ -1031,6 +1032,14 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
const struct acpi_data_node *data = to_acpi_data_node(fwnode);
struct acpi_data_node *dn;
+ /*
+ * We can have a combination of device and data nodes, e.g. with
+ * hierarchical _DSD properties. Make sure the adev pointer is
+ * restored before going through data nodes, otherwise we will
+ * be looking for data_nodes below the last device found instead
+ * of the common fwnode shared by device_nodes and data_nodes.
+ */
+ adev = to_acpi_device_node(fwnode);
if (adev)
head = &adev->data.subnodes;
else if (data)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 446c959a8f08..566270d0e91a 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -763,18 +763,16 @@ acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
}
EXPORT_SYMBOL_GPL(acpi_bus_get_ejd);
-static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
- struct acpi_device_wakeup *wakeup)
+static int acpi_bus_extract_wakeup_device_power_package(struct acpi_device *dev)
{
+ acpi_handle handle = dev->handle;
+ struct acpi_device_wakeup *wakeup = &dev->wakeup;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *package = NULL;
union acpi_object *element = NULL;
acpi_status status;
int err = -ENODATA;
- if (!wakeup)
- return -EINVAL;
-
INIT_LIST_HEAD(&wakeup->resources);
/* _PRW */
@@ -848,9 +846,9 @@ static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
static bool acpi_wakeup_gpe_init(struct acpi_device *device)
{
static const struct acpi_device_id button_device_ids[] = {
- {"PNP0C0C", 0},
- {"PNP0C0D", 0},
- {"PNP0C0E", 0},
+ {"PNP0C0C", 0}, /* Power button */
+ {"PNP0C0D", 0}, /* Lid */
+ {"PNP0C0E", 0}, /* Sleep button */
{"", 0},
};
struct acpi_device_wakeup *wakeup = &device->wakeup;
@@ -883,8 +881,7 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
if (!acpi_has_method(device->handle, "_PRW"))
return;
- err = acpi_bus_extract_wakeup_device_power_package(device->handle,
- &device->wakeup);
+ err = acpi_bus_extract_wakeup_device_power_package(device);
if (err) {
dev_err(&device->dev, "_PRW evaluation error: %d\n", err);
return;
@@ -895,7 +892,7 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
/*
* Call _PSW/_DSW object to disable its ability to wake the sleeping
* system for the ACPI device with the _PRW object.
- * The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW.
+ * The _PSW object is deprecated in ACPI 3.0 and is replaced by _DSW.
* So it is necessary to call _DSW object first. Only when it is not
* present will the _PSW object used.
*/
@@ -2241,10 +2238,10 @@ static struct acpi_probe_entry *ape;
static int acpi_probe_count;
static DEFINE_MUTEX(acpi_probe_mutex);
-static int __init acpi_match_madt(struct acpi_subtable_header *header,
+static int __init acpi_match_madt(union acpi_subtable_headers *header,
const unsigned long end)
{
- if (!ape->subtable_valid || ape->subtable_valid(header, ape))
+ if (!ape->subtable_valid || ape->subtable_valid(&header->common, ape))
if (!ape->probe_subtbl(header, end))
acpi_probe_count++;
@@ -2260,7 +2257,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
mutex_lock(&acpi_probe_mutex);
for (ape = ap_head; nr; ape++, nr--) {
- if (ACPI_COMPARE_NAME(ACPI_SIG_MADT, ape->id)) {
+ if (ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id)) {
acpi_probe_count = 0;
acpi_table_parse_madt(ape->type, acpi_match_madt, 0);
count += acpi_probe_count;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 403c4ff15349..e52f1238d2d6 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -977,6 +977,8 @@ static int acpi_s2idle_prepare(void)
if (acpi_sci_irq_valid())
enable_irq_wake(acpi_sci_irq);
+ acpi_enable_wakeup_devices(ACPI_STATE_S0);
+
/* Change the configuration of GPEs to avoid spurious wakeup. */
acpi_enable_all_wakeup_gpes();
acpi_os_wait_events_complete();
@@ -1027,6 +1029,8 @@ static void acpi_s2idle_restore(void)
{
acpi_enable_all_runtime_gpes();
+ acpi_disable_wakeup_devices(ACPI_STATE_S0);
+
if (acpi_sci_irq_valid())
disable_irq_wake(acpi_sci_irq);
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index c336784d0bcb..b34d05e365b7 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -28,7 +28,7 @@ EXPORT_SYMBOL(qdf2400_e44_present);
/*
* Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit.
- * Detect them by examining the OEM fields in the SPCR header, similiar to PCI
+ * Detect them by examining the OEM fields in the SPCR header, similar to PCI
* quirk detection in pci_mcfg.c.
*/
static bool qdf2400_erratum_44_present(struct acpi_table_header *h)
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index fa76f5e41b5c..75948a3f1a20 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -327,9 +327,9 @@ static struct kobject *hotplug_kobj;
struct acpi_table_attr {
struct bin_attribute attr;
- char name[ACPI_NAME_SIZE];
+ char name[ACPI_NAMESEG_SIZE];
int instance;
- char filename[ACPI_NAME_SIZE+ACPI_INST_SIZE];
+ char filename[ACPI_NAMESEG_SIZE+ACPI_INST_SIZE];
struct list_head node;
};
@@ -368,10 +368,10 @@ static int acpi_table_attr_init(struct kobject *tables_obj,
char instance_str[ACPI_INST_SIZE];
sysfs_attr_init(&table_attr->attr.attr);
- ACPI_MOVE_NAME(table_attr->name, table_header->signature);
+ ACPI_COPY_NAMESEG(table_attr->name, table_header->signature);
list_for_each_entry(attr, &acpi_table_attr_list, node) {
- if (ACPI_COMPARE_NAME(table_attr->name, attr->name))
+ if (ACPI_COMPARE_NAMESEG(table_attr->name, attr->name))
if (table_attr->instance < attr->instance)
table_attr->instance = attr->instance;
}
@@ -382,8 +382,8 @@ static int acpi_table_attr_init(struct kobject *tables_obj,
return -ERANGE;
}
- ACPI_MOVE_NAME(table_attr->filename, table_header->signature);
- table_attr->filename[ACPI_NAME_SIZE] = '\0';
+ ACPI_COPY_NAMESEG(table_attr->filename, table_header->signature);
+ table_attr->filename[ACPI_NAMESEG_SIZE] = '\0';
if (table_attr->instance > 1 || (table_attr->instance == 1 &&
!acpi_get_table
(table_header->signature, 2, &header))) {
@@ -484,7 +484,7 @@ static int acpi_table_data_init(struct acpi_table_header *th)
int i;
for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) {
- if (ACPI_COMPARE_NAME(th->signature, acpi_data_objs[i].name)) {
+ if (ACPI_COMPARE_NAMESEG(th->signature, acpi_data_objs[i].name)) {
data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL);
if (!data_attr)
return -ENOMEM;
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 8fccbe49612a..3b5d04fd5e3e 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -49,6 +49,16 @@ static struct acpi_table_desc initial_tables[ACPI_MAX_TABLES] __initdata;
static int acpi_apic_instance __initdata;
+enum acpi_subtable_type {
+ ACPI_SUBTABLE_COMMON,
+ ACPI_SUBTABLE_HMAT,
+};
+
+struct acpi_subtable_entry {
+ union acpi_subtable_headers *hdr;
+ enum acpi_subtable_type type;
+};
+
/*
* Disable table checksum verification for the early stage due to the size
* limitation of the current x86 early mapping implementation.
@@ -217,6 +227,50 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
}
}
+static unsigned long __init
+acpi_get_entry_type(struct acpi_subtable_entry *entry)
+{
+ switch (entry->type) {
+ case ACPI_SUBTABLE_COMMON:
+ return entry->hdr->common.type;
+ case ACPI_SUBTABLE_HMAT:
+ return entry->hdr->hmat.type;
+ }
+ return 0;
+}
+
+static unsigned long __init
+acpi_get_entry_length(struct acpi_subtable_entry *entry)
+{
+ switch (entry->type) {
+ case ACPI_SUBTABLE_COMMON:
+ return entry->hdr->common.length;
+ case ACPI_SUBTABLE_HMAT:
+ return entry->hdr->hmat.length;
+ }
+ return 0;
+}
+
+static unsigned long __init
+acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
+{
+ switch (entry->type) {
+ case ACPI_SUBTABLE_COMMON:
+ return sizeof(entry->hdr->common);
+ case ACPI_SUBTABLE_HMAT:
+ return sizeof(entry->hdr->hmat);
+ }
+ return 0;
+}
+
+static enum acpi_subtable_type __init
+acpi_get_subtable_type(char *id)
+{
+ if (strncmp(id, ACPI_SIG_HMAT, 4) == 0)
+ return ACPI_SUBTABLE_HMAT;
+ return ACPI_SUBTABLE_COMMON;
+}
+
/**
* acpi_parse_entries_array - for each proc_num find a suitable subtable
*
@@ -240,14 +294,13 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
* On success returns sum of all matching entries for all proc handlers.
* Otherwise, -ENODEV or -EINVAL is returned.
*/
-static int __init
-acpi_parse_entries_array(char *id, unsigned long table_size,
+static int __init acpi_parse_entries_array(char *id, unsigned long table_size,
struct acpi_table_header *table_header,
struct acpi_subtable_proc *proc, int proc_num,
unsigned int max_entries)
{
- struct acpi_subtable_header *entry;
- unsigned long table_end;
+ struct acpi_subtable_entry entry;
+ unsigned long table_end, subtable_len, entry_len;
int count = 0;
int errs = 0;
int i;
@@ -270,19 +323,20 @@ acpi_parse_entries_array(char *id, unsigned long table_size,
/* Parse all entries looking for a match. */
- entry = (struct acpi_subtable_header *)
+ entry.type = acpi_get_subtable_type(id);
+ entry.hdr = (union acpi_subtable_headers *)
((unsigned long)table_header + table_size);
+ subtable_len = acpi_get_subtable_header_length(&entry);
- while (((unsigned long)entry) + sizeof(struct acpi_subtable_header) <
- table_end) {
+ while (((unsigned long)entry.hdr) + subtable_len < table_end) {
if (max_entries && count >= max_entries)
break;
for (i = 0; i < proc_num; i++) {
- if (entry->type != proc[i].id)
+ if (acpi_get_entry_type(&entry) != proc[i].id)
continue;
if (!proc[i].handler ||
- (!errs && proc[i].handler(entry, table_end))) {
+ (!errs && proc[i].handler(entry.hdr, table_end))) {
errs++;
continue;
}
@@ -297,13 +351,14 @@ acpi_parse_entries_array(char *id, unsigned long table_size,
* If entry->length is 0, break from this loop to avoid
* infinite loop.
*/
- if (entry->length == 0) {
+ entry_len = acpi_get_entry_length(&entry);
+ if (entry_len == 0) {
pr_err("[%4.4s:0x%02x] Invalid zero length\n", id, proc->id);
return -EINVAL;
}
- entry = (struct acpi_subtable_header *)
- ((unsigned long)entry + entry->length);
+ entry.hdr = (union acpi_subtable_headers *)
+ ((unsigned long)entry.hdr + entry_len);
}
if (max_entries && count > max_entries) {
@@ -314,8 +369,7 @@ acpi_parse_entries_array(char *id, unsigned long table_size,
return errs ? -EINVAL : count;
}
-int __init
-acpi_table_parse_entries_array(char *id,
+int __init acpi_table_parse_entries_array(char *id,
unsigned long table_size,
struct acpi_subtable_proc *proc, int proc_num,
unsigned int max_entries)
@@ -346,8 +400,7 @@ acpi_table_parse_entries_array(char *id,
return count;
}
-int __init
-acpi_table_parse_entries(char *id,
+int __init acpi_table_parse_entries(char *id,
unsigned long table_size,
int entry_id,
acpi_tbl_entry_handler handler,
@@ -362,8 +415,7 @@ acpi_table_parse_entries(char *id,
max_entries);
}
-int __init
-acpi_table_parse_madt(enum acpi_madt_type id,
+int __init acpi_table_parse_madt(enum acpi_madt_type id,
acpi_tbl_entry_handler handler, unsigned int max_entries)
{
return acpi_table_parse_entries(ACPI_SIG_MADT,
@@ -670,8 +722,8 @@ static void __init acpi_table_initrd_scan(void)
table_length = table->length;
/* Skip RSDT/XSDT which should only be used for override */
- if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_RSDT) ||
- ACPI_COMPARE_NAME(table->signature, ACPI_SIG_XSDT)) {
+ if (ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_RSDT) ||
+ ACPI_COMPARE_NAMESEG(table->signature, ACPI_SIG_XSDT)) {
acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
goto next_table;
}
@@ -725,8 +777,7 @@ static void *amlcode __attribute__ ((weakref("AmlCode")));
static void *dsdt_amlcode __attribute__ ((weakref("dsdt_aml_code")));
#endif
-acpi_status
-acpi_os_table_override(struct acpi_table_header *existing_table,
+acpi_status acpi_os_table_override(struct acpi_table_header *existing_table,
struct acpi_table_header **new_table)
{
if (!existing_table || !new_table)
@@ -788,7 +839,6 @@ static int __init acpi_parse_apic_instance(char *str)
return 0;
}
-
early_param("acpi_apic_instance", acpi_parse_apic_instance);
static int __init acpi_force_table_verification_setup(char *s)
@@ -797,7 +847,6 @@ static int __init acpi_force_table_verification_setup(char *s)
return 0;
}
-
early_param("acpi_force_table_verification", acpi_force_table_verification_setup);
static int __init acpi_force_32bit_fadt_addr(char *s)
@@ -807,5 +856,4 @@ static int __init acpi_force_32bit_fadt_addr(char *s)
return 0;
}
-
early_param("acpi_force_32bit_fadt_addr", acpi_force_32bit_fadt_addr);
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index c4b06cc075f9..89363b245489 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -739,6 +739,7 @@ EXPORT_SYMBOL(acpi_dev_found);
struct acpi_dev_match_info {
const char *dev_name;
+ struct acpi_device *adev;
struct acpi_device_id hid[2];
const char *uid;
s64 hrv;
@@ -759,6 +760,7 @@ static int acpi_dev_match_cb(struct device *dev, void *data)
return 0;
match->dev_name = acpi_dev_name(adev);
+ match->adev = adev;
if (match->hrv == -1)
return 1;
@@ -806,18 +808,20 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
EXPORT_SYMBOL(acpi_dev_present);
/**
- * acpi_dev_get_first_match_name - Return name of first match of ACPI device
+ * acpi_dev_get_first_match_dev - Return the first match of ACPI device
* @hid: Hardware ID of the device.
* @uid: Unique ID of the device, pass NULL to not check _UID
* @hrv: Hardware Revision of the device, pass -1 to not check _HRV
*
- * Return device name if a matching device was present
+ * Return the first match of ACPI device if a matching device was present
* at the moment of invocation, or NULL otherwise.
*
+ * The caller is responsible to call put_device() on the returned device.
+ *
* See additional information in acpi_dev_present() as well.
*/
-const char *
-acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv)
+struct acpi_device *
+acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
{
struct acpi_dev_match_info match = {};
struct device *dev;
@@ -827,9 +831,9 @@ acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv)
match.hrv = hrv;
dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
- return dev ? match.dev_name : NULL;
+ return dev ? match.adev : NULL;
}
-EXPORT_SYMBOL(acpi_dev_get_first_match_name);
+EXPORT_SYMBOL(acpi_dev_get_first_match_dev);
/*
* acpi_backlight= handling, this is done here rather then in video_detect.c
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 43587ac680e4..31014c7d3793 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -112,7 +112,7 @@ static int video_detect_force_none(const struct dmi_system_id *d)
static const struct dmi_system_id video_detect_dmi_table[] = {
/* On Samsung X360, the BIOS will set a flag (VDRV) if generic
* ACPI backlight device is used. This flag will definitively break
- * the backlight interface (even the vendor interface) untill next
+ * the backlight interface (even the vendor interface) until next
* reboot. It's why we should prevent video.ko from being used here
* and we can't rely on a later call to acpi_video_unregister().
*/
@@ -141,6 +141,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
},
},
+ {
+ .callback = video_detect_force_vendor,
+ .ident = "Sony VPCEH3U1E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VPCEH3U1E"),
+ },
+ },
/*
* These models have a working acpi_video backlight control, and using
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
index b0b688c481e8..3751d811be39 100644
--- a/drivers/amba/tegra-ahb.c
+++ b/drivers/amba/tegra-ahb.c
@@ -170,8 +170,7 @@ int tegra_ahb_enable_smmu(struct device_node *dn)
EXPORT_SYMBOL(tegra_ahb_enable_smmu);
#endif
-#ifdef CONFIG_PM
-static int tegra_ahb_suspend(struct device *dev)
+static int __maybe_unused tegra_ahb_suspend(struct device *dev)
{
int i;
struct tegra_ahb *ahb = dev_get_drvdata(dev);
@@ -181,7 +180,7 @@ static int tegra_ahb_suspend(struct device *dev)
return 0;
}
-static int tegra_ahb_resume(struct device *dev)
+static int __maybe_unused tegra_ahb_resume(struct device *dev)
{
int i;
struct tegra_ahb *ahb = dev_get_drvdata(dev);
@@ -190,7 +189,6 @@ static int tegra_ahb_resume(struct device *dev)
gizmo_writel(ahb, ahb->ctx[i], tegra_ahb_gizmo[i]);
return 0;
}
-#endif
static UNIVERSAL_DEV_PM_OPS(tegra_ahb_pm,
tegra_ahb_suspend,
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 4b9c7ca492e6..6f0712f0767c 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -3121,6 +3121,7 @@ static void binder_transaction(struct binder_proc *proc,
if (target_node && target_node->txn_security_ctx) {
u32 secid;
+ size_t added_size;
security_task_getsecid(proc->tsk, &secid);
ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
@@ -3130,7 +3131,15 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_get_secctx_failed;
}
- extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
+ added_size = ALIGN(secctx_sz, sizeof(u64));
+ extra_buffers_size += added_size;
+ if (extra_buffers_size < added_size) {
+ /* integer overflow of extra_buffers_size */
+ return_error = BR_FAILED_REPLY;
+ return_error_param = EINVAL;
+ return_error_line = __LINE__;
+ goto err_bad_extra_size;
+ }
}
trace_binder_transaction(reply, t, target_node);
@@ -3480,6 +3489,7 @@ err_copy_data_failed:
t->buffer->transaction = NULL;
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
+err_bad_extra_size:
if (secctx)
security_release_secctx(secctx, secctx_sz);
err_get_secctx_failed:
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 195f120c4e8c..bb929eb87116 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -931,8 +931,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
mm = alloc->vma_vm_mm;
if (!mmget_not_zero(mm))
goto err_mmget;
- if (!down_write_trylock(&mm->mmap_sem))
- goto err_down_write_mmap_sem_failed;
+ if (!down_read_trylock(&mm->mmap_sem))
+ goto err_down_read_mmap_sem_failed;
vma = binder_alloc_get_vma(alloc);
list_lru_isolate(lru, item);
@@ -945,7 +945,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_user_end(alloc, index);
}
- up_write(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
mmput(mm);
trace_binder_unmap_kernel_start(alloc, index);
@@ -959,7 +959,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
mutex_unlock(&alloc->mutex);
return LRU_REMOVED_RETRY;
-err_down_write_mmap_sem_failed:
+err_down_read_mmap_sem_failed:
mmput_async(mm);
err_mmget:
err_page_already_freed:
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index ce59253ec158..ea1175f7f147 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -53,11 +53,13 @@
enum ahci_qoriq_type {
AHCI_LS1021A,
+ AHCI_LS1028A,
AHCI_LS1043A,
AHCI_LS2080A,
AHCI_LS1046A,
AHCI_LS1088A,
AHCI_LS2088A,
+ AHCI_LX2160A,
};
struct ahci_qoriq_priv {
@@ -67,13 +69,17 @@ struct ahci_qoriq_priv {
bool is_dmacoherent;
};
+static bool ecc_initialized;
+
static const struct of_device_id ahci_qoriq_of_match[] = {
{ .compatible = "fsl,ls1021a-ahci", .data = (void *)AHCI_LS1021A},
+ { .compatible = "fsl,ls1028a-ahci", .data = (void *)AHCI_LS1028A},
{ .compatible = "fsl,ls1043a-ahci", .data = (void *)AHCI_LS1043A},
{ .compatible = "fsl,ls2080a-ahci", .data = (void *)AHCI_LS2080A},
{ .compatible = "fsl,ls1046a-ahci", .data = (void *)AHCI_LS1046A},
{ .compatible = "fsl,ls1088a-ahci", .data = (void *)AHCI_LS1088A},
{ .compatible = "fsl,ls2088a-ahci", .data = (void *)AHCI_LS2088A},
+ { .compatible = "fsl,lx2160a-ahci", .data = (void *)AHCI_LX2160A},
{},
};
MODULE_DEVICE_TABLE(of, ahci_qoriq_of_match);
@@ -165,9 +171,10 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
switch (qpriv->type) {
case AHCI_LS1021A:
- if (!qpriv->ecc_addr)
+ if (!(qpriv->ecc_addr || ecc_initialized))
return -EINVAL;
- writel(SATA_ECC_DISABLE, qpriv->ecc_addr);
+ else if (qpriv->ecc_addr && !ecc_initialized)
+ writel(SATA_ECC_DISABLE, qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(LS1021A_PORT_PHY2, reg_base + PORT_PHY2);
writel(LS1021A_PORT_PHY3, reg_base + PORT_PHY3);
@@ -180,10 +187,12 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
break;
case AHCI_LS1043A:
- if (!qpriv->ecc_addr)
+ if (!(qpriv->ecc_addr || ecc_initialized))
return -EINVAL;
- writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
- qpriv->ecc_addr);
+ else if (qpriv->ecc_addr && !ecc_initialized)
+ writel(readl(qpriv->ecc_addr) |
+ ECC_DIS_ARMV8_CH2,
+ qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
@@ -202,10 +211,12 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
break;
case AHCI_LS1046A:
- if (!qpriv->ecc_addr)
+ if (!(qpriv->ecc_addr || ecc_initialized))
return -EINVAL;
- writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
- qpriv->ecc_addr);
+ else if (qpriv->ecc_addr && !ecc_initialized)
+ writel(readl(qpriv->ecc_addr) |
+ ECC_DIS_ARMV8_CH2,
+ qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
@@ -214,11 +225,15 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
break;
+ case AHCI_LS1028A:
case AHCI_LS1088A:
- if (!qpriv->ecc_addr)
+ case AHCI_LX2160A:
+ if (!(qpriv->ecc_addr || ecc_initialized))
return -EINVAL;
- writel(readl(qpriv->ecc_addr) | ECC_DIS_LS1088A,
- qpriv->ecc_addr);
+ else if (qpriv->ecc_addr && !ecc_initialized)
+ writel(readl(qpriv->ecc_addr) |
+ ECC_DIS_LS1088A,
+ qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2);
writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3);
@@ -237,6 +252,7 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
break;
}
+ ecc_initialized = true;
return 0;
}
@@ -264,13 +280,18 @@ static int ahci_qoriq_probe(struct platform_device *pdev)
qoriq_priv->type = (enum ahci_qoriq_type)of_id->data;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "sata-ecc");
- if (res) {
- qoriq_priv->ecc_addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(qoriq_priv->ecc_addr))
- return PTR_ERR(qoriq_priv->ecc_addr);
+ if (unlikely(!ecc_initialized)) {
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ "sata-ecc");
+ if (res) {
+ qoriq_priv->ecc_addr =
+ devm_ioremap_resource(dev, res);
+ if (IS_ERR(qoriq_priv->ecc_addr))
+ return PTR_ERR(qoriq_priv->ecc_addr);
+ }
}
+
qoriq_priv->is_dmacoherent = of_dma_is_coherent(np);
rc = ahci_platform_enable_resources(hpriv);
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index cc6d06c1b2c7..db271b705529 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -44,7 +44,7 @@
#include <linux/ktime.h>
#include <linux/platform_data/dma-ep93xx.h>
-#include <mach/platform.h>
+#include <linux/soc/cirrus/ep93xx.h>
#define DRV_NAME "ep93xx-ide"
#define DRV_VERSION "1.0"
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 59b2317acea9..3495e1733a8e 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -909,7 +909,6 @@ static int sata_rcar_probe(struct platform_device *pdev)
host = ata_host_alloc(dev, 1);
if (!host) {
- dev_err(dev, "ata_host_alloc failed\n");
ret = -ENOMEM;
goto err_pm_put;
}
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 82532c299bb5..5278c57dce73 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2826,8 +2826,8 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
case 0x6:
{
ia_cmds.status = 0;
- printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
- printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
+ printk("skb = 0x%p\n", skb_peek(&iadev->tx_backlog));
+ printk("rtn_q: 0x%p\n",ia_deque_rtn_q(&iadev->tx_return_q));
}
break;
case 0x8:
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 059700ea3521..dc404492381d 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -3,7 +3,6 @@ menu "Generic Driver Options"
config UEVENT_HELPER
bool "Support for uevent helper"
- default y
help
The uevent helper program is forked by the kernel for
every uevent.
@@ -149,6 +148,14 @@ config DEBUG_TEST_DRIVER_REMOVE
unusable. You should say N here unless you are explicitly looking to
test this functionality.
+config HMEM_REPORTING
+ bool
+ default n
+ depends on NUMA
+ help
+ Enable reporting for heterogenous memory access attributes under
+ their non-uniform memory nodes.
+
source "drivers/base/test/Kconfig"
config SYS_HYPERVISOR
@@ -174,7 +181,6 @@ source "drivers/base/regmap/Kconfig"
config DMA_SHARED_BUFFER
bool
default n
- select ANON_INODES
select IRQ_WORK
help
This option enables the framework for buffer-sharing between
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index edfcf8d982e4..1739d7e1952a 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -7,7 +7,6 @@
*/
#include <linux/acpi.h>
-#include <linux/arch_topology.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/device.h>
@@ -31,7 +30,6 @@ void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
per_cpu(freq_scale, i) = scale;
}
-static DEFINE_MUTEX(cpu_scale_mutex);
DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
@@ -51,37 +49,7 @@ static ssize_t cpu_capacity_show(struct device *dev,
static void update_topology_flags_workfn(struct work_struct *work);
static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
-static ssize_t cpu_capacity_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
- int this_cpu = cpu->dev.id;
- int i;
- unsigned long new_capacity;
- ssize_t ret;
-
- if (!count)
- return 0;
-
- ret = kstrtoul(buf, 0, &new_capacity);
- if (ret)
- return ret;
- if (new_capacity > SCHED_CAPACITY_SCALE)
- return -EINVAL;
-
- mutex_lock(&cpu_scale_mutex);
- for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
- topology_set_cpu_scale(i, new_capacity);
- mutex_unlock(&cpu_scale_mutex);
-
- schedule_work(&update_topology_flags_work);
-
- return count;
-}
-
-static DEVICE_ATTR_RW(cpu_capacity);
+static DEVICE_ATTR_RO(cpu_capacity);
static int register_cpu_capacity_sysctl(void)
{
@@ -141,7 +109,6 @@ void topology_normalize_cpu_scale(void)
return;
pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
- mutex_lock(&cpu_scale_mutex);
for_each_possible_cpu(cpu) {
pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n",
cpu, raw_capacity[cpu]);
@@ -151,7 +118,6 @@ void topology_normalize_cpu_scale(void)
pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
cpu, topology_get_cpu_scale(NULL, cpu));
}
- mutex_unlock(&cpu_scale_mutex);
}
bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 4aeaa0c92bda..fd7511e04e62 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1999,6 +1999,11 @@ static int device_private_init(struct device *dev)
* NOTE: _Never_ directly free @dev after calling this function, even
* if it returned an error! Always use put_device() to give up your
* reference instead.
+ *
+ * Rule of thumb is: if device_add() succeeds, you should call
+ * device_del() when you want to get rid of it. If device_add() has
+ * *not* succeeded, use *only* put_device() to drop the reference
+ * count.
*/
int device_add(struct device *dev)
{
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 668139cfa664..cc37511de866 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -548,11 +548,18 @@ ssize_t __weak cpu_show_l1tf(struct device *dev,
return sprintf(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_mds(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
+static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -560,6 +567,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_spectre_v2.attr,
&dev_attr_spec_store_bypass.attr,
&dev_attr_l1tf.attr,
+ &dev_attr_mds.attr,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index a823f469e53f..0df9b4461766 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -490,7 +490,7 @@ re_probe:
if (dev->bus->dma_configure) {
ret = dev->bus->dma_configure(dev);
if (ret)
- goto dma_failed;
+ goto probe_failed;
}
if (driver_sysfs_add(dev)) {
@@ -546,14 +546,13 @@ re_probe:
goto done;
probe_failed:
- arch_teardown_dma_ops(dev);
-dma_failed:
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
pinctrl_bind_failed:
device_links_no_driver(dev);
devres_release_all(dev);
+ arch_teardown_dma_ops(dev);
driver_sysfs_remove(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig
index eb15d976a9ea..38f2da6f5c2b 100644
--- a/drivers/base/firmware_loader/Kconfig
+++ b/drivers/base/firmware_loader/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
menu "Firmware loader"
config FW_LOADER
diff --git a/drivers/base/firmware_loader/builtin/.gitignore b/drivers/base/firmware_loader/builtin/.gitignore
index 9c8bdb9fdcc3..166f76b43049 100644
--- a/drivers/base/firmware_loader/builtin/.gitignore
+++ b/drivers/base/firmware_loader/builtin/.gitignore
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
*.gen.S
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index b5c865fe263b..f962488546b6 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -674,8 +674,8 @@ static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
*
* This function is called if direct lookup for the firmware failed, it enables
* a fallback mechanism through userspace by exposing a sysfs loading
- * interface. Userspace is in charge of loading the firmware through the syfs
- * loading interface. This syfs fallback mechanism may be disabled completely
+ * interface. Userspace is in charge of loading the firmware through the sysfs
+ * loading interface. This sysfs fallback mechanism may be disabled completely
* on a system by setting the proc sysctl value ignore_sysfs_fallback to true.
* If this false we check if the internal API caller set the @FW_OPT_NOFALLBACK
* flag, if so it would also disable the fallback mechanism. A system may want
@@ -693,7 +693,7 @@ int firmware_fallback_sysfs(struct firmware *fw, const char *name,
return ret;
if (!(opt_flags & FW_OPT_NO_WARN))
- dev_warn(device, "Falling back to syfs fallback for: %s\n",
+ dev_warn(device, "Falling back to sysfs fallback for: %s\n",
name);
else
dev_dbg(device, "Falling back to sysfs fallback for: %s\n",
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index e49028a60429..f180427e48f4 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -231,13 +231,14 @@ static bool pages_correctly_probed(unsigned long start_pfn)
* OK to have direct references to sparsemem variables in here.
*/
static int
-memory_block_action(unsigned long phys_index, unsigned long action, int online_type)
+memory_block_action(unsigned long start_section_nr, unsigned long action,
+ int online_type)
{
unsigned long start_pfn;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
int ret;
- start_pfn = section_nr_to_pfn(phys_index);
+ start_pfn = section_nr_to_pfn(start_section_nr);
switch (action) {
case MEM_ONLINE:
@@ -251,7 +252,7 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
break;
default:
WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
- "%ld\n", __func__, phys_index, action, action);
+ "%ld\n", __func__, start_section_nr, action, action);
ret = -EINVAL;
}
@@ -733,16 +734,18 @@ unregister_memory(struct memory_block *memory)
{
BUG_ON(memory->dev.bus != &memory_subsys);
- /* drop the ref. we got in remove_memory_section() */
+ /* drop the ref. we got via find_memory_block() */
put_device(&memory->dev);
device_unregister(&memory->dev);
}
-static int remove_memory_section(unsigned long node_id,
- struct mem_section *section, int phys_device)
+void unregister_memory_section(struct mem_section *section)
{
struct memory_block *mem;
+ if (WARN_ON_ONCE(!present_section(section)))
+ return;
+
mutex_lock(&mem_sysfs_mutex);
/*
@@ -763,15 +766,6 @@ static int remove_memory_section(unsigned long node_id,
out_unlock:
mutex_unlock(&mem_sysfs_mutex);
- return 0;
-}
-
-int unregister_memory_section(struct mem_section *section)
-{
- if (!present_section(section))
- return -EINVAL;
-
- return remove_memory_section(0, section, 0);
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 86d6cd92ce3d..8598fcbd2a17 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -17,6 +17,7 @@
#include <linux/nodemask.h>
#include <linux/cpu.h>
#include <linux/device.h>
+#include <linux/pm_runtime.h>
#include <linux/swap.h>
#include <linux/slab.h>
@@ -59,6 +60,302 @@ static inline ssize_t node_read_cpulist(struct device *dev,
static DEVICE_ATTR(cpumap, S_IRUGO, node_read_cpumask, NULL);
static DEVICE_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL);
+/**
+ * struct node_access_nodes - Access class device to hold user visible
+ * relationships to other nodes.
+ * @dev: Device for this memory access class
+ * @list_node: List element in the node's access list
+ * @access: The access class rank
+ */
+struct node_access_nodes {
+ struct device dev;
+ struct list_head list_node;
+ unsigned access;
+#ifdef CONFIG_HMEM_REPORTING
+ struct node_hmem_attrs hmem_attrs;
+#endif
+};
+#define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev)
+
+static struct attribute *node_init_access_node_attrs[] = {
+ NULL,
+};
+
+static struct attribute *node_targ_access_node_attrs[] = {
+ NULL,
+};
+
+static const struct attribute_group initiators = {
+ .name = "initiators",
+ .attrs = node_init_access_node_attrs,
+};
+
+static const struct attribute_group targets = {
+ .name = "targets",
+ .attrs = node_targ_access_node_attrs,
+};
+
+static const struct attribute_group *node_access_node_groups[] = {
+ &initiators,
+ &targets,
+ NULL,
+};
+
+static void node_remove_accesses(struct node *node)
+{
+ struct node_access_nodes *c, *cnext;
+
+ list_for_each_entry_safe(c, cnext, &node->access_list, list_node) {
+ list_del(&c->list_node);
+ device_unregister(&c->dev);
+ }
+}
+
+static void node_access_release(struct device *dev)
+{
+ kfree(to_access_nodes(dev));
+}
+
+static struct node_access_nodes *node_init_node_access(struct node *node,
+ unsigned access)
+{
+ struct node_access_nodes *access_node;
+ struct device *dev;
+
+ list_for_each_entry(access_node, &node->access_list, list_node)
+ if (access_node->access == access)
+ return access_node;
+
+ access_node = kzalloc(sizeof(*access_node), GFP_KERNEL);
+ if (!access_node)
+ return NULL;
+
+ access_node->access = access;
+ dev = &access_node->dev;
+ dev->parent = &node->dev;
+ dev->release = node_access_release;
+ dev->groups = node_access_node_groups;
+ if (dev_set_name(dev, "access%u", access))
+ goto free;
+
+ if (device_register(dev))
+ goto free_name;
+
+ pm_runtime_no_callbacks(dev);
+ list_add_tail(&access_node->list_node, &node->access_list);
+ return access_node;
+free_name:
+ kfree_const(dev->kobj.name);
+free:
+ kfree(access_node);
+ return NULL;
+}
+
+#ifdef CONFIG_HMEM_REPORTING
+#define ACCESS_ATTR(name) \
+static ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return sprintf(buf, "%u\n", to_access_nodes(dev)->hmem_attrs.name); \
+} \
+static DEVICE_ATTR_RO(name);
+
+ACCESS_ATTR(read_bandwidth)
+ACCESS_ATTR(read_latency)
+ACCESS_ATTR(write_bandwidth)
+ACCESS_ATTR(write_latency)
+
+static struct attribute *access_attrs[] = {
+ &dev_attr_read_bandwidth.attr,
+ &dev_attr_read_latency.attr,
+ &dev_attr_write_bandwidth.attr,
+ &dev_attr_write_latency.attr,
+ NULL,
+};
+
+/**
+ * node_set_perf_attrs - Set the performance values for given access class
+ * @nid: Node identifier to be set
+ * @hmem_attrs: Heterogeneous memory performance attributes
+ * @access: The access class the for the given attributes
+ */
+void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
+ unsigned access)
+{
+ struct node_access_nodes *c;
+ struct node *node;
+ int i;
+
+ if (WARN_ON_ONCE(!node_online(nid)))
+ return;
+
+ node = node_devices[nid];
+ c = node_init_node_access(node, access);
+ if (!c)
+ return;
+
+ c->hmem_attrs = *hmem_attrs;
+ for (i = 0; access_attrs[i] != NULL; i++) {
+ if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i],
+ "initiators")) {
+ pr_info("failed to add performance attribute to node %d\n",
+ nid);
+ break;
+ }
+ }
+}
+
+/**
+ * struct node_cache_info - Internal tracking for memory node caches
+ * @dev: Device represeting the cache level
+ * @node: List element for tracking in the node
+ * @cache_attrs:Attributes for this cache level
+ */
+struct node_cache_info {
+ struct device dev;
+ struct list_head node;
+ struct node_cache_attrs cache_attrs;
+};
+#define to_cache_info(device) container_of(device, struct node_cache_info, dev)
+
+#define CACHE_ATTR(name, fmt) \
+static ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return sprintf(buf, fmt "\n", to_cache_info(dev)->cache_attrs.name);\
+} \
+DEVICE_ATTR_RO(name);
+
+CACHE_ATTR(size, "%llu")
+CACHE_ATTR(line_size, "%u")
+CACHE_ATTR(indexing, "%u")
+CACHE_ATTR(write_policy, "%u")
+
+static struct attribute *cache_attrs[] = {
+ &dev_attr_indexing.attr,
+ &dev_attr_size.attr,
+ &dev_attr_line_size.attr,
+ &dev_attr_write_policy.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(cache);
+
+static void node_cache_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+static void node_cacheinfo_release(struct device *dev)
+{
+ struct node_cache_info *info = to_cache_info(dev);
+ kfree(info);
+}
+
+static void node_init_cache_dev(struct node *node)
+{
+ struct device *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return;
+
+ dev->parent = &node->dev;
+ dev->release = node_cache_release;
+ if (dev_set_name(dev, "memory_side_cache"))
+ goto free_dev;
+
+ if (device_register(dev))
+ goto free_name;
+
+ pm_runtime_no_callbacks(dev);
+ node->cache_dev = dev;
+ return;
+free_name:
+ kfree_const(dev->kobj.name);
+free_dev:
+ kfree(dev);
+}
+
+/**
+ * node_add_cache() - add cache attribute to a memory node
+ * @nid: Node identifier that has new cache attributes
+ * @cache_attrs: Attributes for the cache being added
+ */
+void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs)
+{
+ struct node_cache_info *info;
+ struct device *dev;
+ struct node *node;
+
+ if (!node_online(nid) || !node_devices[nid])
+ return;
+
+ node = node_devices[nid];
+ list_for_each_entry(info, &node->cache_attrs, node) {
+ if (info->cache_attrs.level == cache_attrs->level) {
+ dev_warn(&node->dev,
+ "attempt to add duplicate cache level:%d\n",
+ cache_attrs->level);
+ return;
+ }
+ }
+
+ if (!node->cache_dev)
+ node_init_cache_dev(node);
+ if (!node->cache_dev)
+ return;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return;
+
+ dev = &info->dev;
+ dev->parent = node->cache_dev;
+ dev->release = node_cacheinfo_release;
+ dev->groups = cache_groups;
+ if (dev_set_name(dev, "index%d", cache_attrs->level))
+ goto free_cache;
+
+ info->cache_attrs = *cache_attrs;
+ if (device_register(dev)) {
+ dev_warn(&node->dev, "failed to add cache level:%d\n",
+ cache_attrs->level);
+ goto free_name;
+ }
+ pm_runtime_no_callbacks(dev);
+ list_add_tail(&info->node, &node->cache_attrs);
+ return;
+free_name:
+ kfree_const(dev->kobj.name);
+free_cache:
+ kfree(info);
+}
+
+static void node_remove_caches(struct node *node)
+{
+ struct node_cache_info *info, *next;
+
+ if (!node->cache_dev)
+ return;
+
+ list_for_each_entry_safe(info, next, &node->cache_attrs, node) {
+ list_del(&info->node);
+ device_unregister(&info->dev);
+ }
+ device_unregister(node->cache_dev);
+}
+
+static void node_init_caches(unsigned int nid)
+{
+ INIT_LIST_HEAD(&node_devices[nid]->cache_attrs);
+}
+#else
+static void node_init_caches(unsigned int nid) { }
+static void node_remove_caches(struct node *node) { }
+#endif
+
#define K(x) ((x) << (PAGE_SHIFT - 10))
static ssize_t node_read_meminfo(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -340,7 +637,8 @@ static int register_node(struct node *node, int num)
void unregister_node(struct node *node)
{
hugetlb_unregister_node(node); /* no-op, if memoryless node */
-
+ node_remove_accesses(node);
+ node_remove_caches(node);
device_unregister(&node->dev);
}
@@ -372,6 +670,56 @@ int register_cpu_under_node(unsigned int cpu, unsigned int nid)
kobject_name(&node_devices[nid]->dev.kobj));
}
+/**
+ * register_memory_node_under_compute_node - link memory node to its compute
+ * node for a given access class.
+ * @mem_node: Memory node number
+ * @cpu_node: Cpu node number
+ * @access: Access class to register
+ *
+ * Description:
+ * For use with platforms that may have separate memory and compute nodes.
+ * This function will export node relationships linking which memory
+ * initiator nodes can access memory targets at a given ranked access
+ * class.
+ */
+int register_memory_node_under_compute_node(unsigned int mem_nid,
+ unsigned int cpu_nid,
+ unsigned access)
+{
+ struct node *init_node, *targ_node;
+ struct node_access_nodes *initiator, *target;
+ int ret;
+
+ if (!node_online(cpu_nid) || !node_online(mem_nid))
+ return -ENODEV;
+
+ init_node = node_devices[cpu_nid];
+ targ_node = node_devices[mem_nid];
+ initiator = node_init_node_access(init_node, access);
+ target = node_init_node_access(targ_node, access);
+ if (!initiator || !target)
+ return -ENOMEM;
+
+ ret = sysfs_add_link_to_group(&initiator->dev.kobj, "targets",
+ &targ_node->dev.kobj,
+ dev_name(&targ_node->dev));
+ if (ret)
+ return ret;
+
+ ret = sysfs_add_link_to_group(&target->dev.kobj, "initiators",
+ &init_node->dev.kobj,
+ dev_name(&init_node->dev));
+ if (ret)
+ goto err;
+
+ return 0;
+ err:
+ sysfs_remove_link_from_group(&initiator->dev.kobj, "targets",
+ dev_name(&targ_node->dev));
+ return ret;
+}
+
int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
{
struct device *obj;
@@ -580,8 +928,10 @@ int __register_one_node(int nid)
register_cpu_under_node(cpu, nid);
}
+ INIT_LIST_HEAD(&node_devices[nid]->access_list);
/* initialize work queue for memory hot plug */
init_node_hugetlb_work(nid);
+ node_init_caches(nid);
return error;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index dab0a5abc391..4d1729853d1a 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -84,7 +84,7 @@ EXPORT_SYMBOL_GPL(platform_get_resource);
* device
*
* @pdev: platform device to use both for memory resource lookup as well as
- * resource managemend
+ * resource management
* @index: resource index
*/
#ifdef CONFIG_HAS_IOMEM
@@ -438,10 +438,12 @@ int platform_device_add(struct platform_device *pdev)
p = &ioport_resource;
}
- if (p && insert_resource(p, r)) {
- dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r);
- ret = -EBUSY;
- goto failed;
+ if (p) {
+ ret = insert_resource(p, r);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r);
+ goto failed;
+ }
}
}
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 365ad751ce0f..59d19dd64928 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks
*
* Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
- *
- * This file is released under the GPLv2.
*/
#include <linux/kernel.h>
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 22aedb28aad7..8db98a1f83dc 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/common.c - Common device power management code.
*
* Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
- *
- * This file is released under the GPLv2.
*/
-
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/export.h>
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 96a6dc9d305c..33c30c1e6a30 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/domain.c - Common code related to device power domains.
*
* Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
- *
- * This file is released under the GPLv2.
*/
-
#define pr_fmt(fmt) "PM: " fmt
#include <linux/delay.h>
@@ -22,6 +20,7 @@
#include <linux/sched.h>
#include <linux/suspend.h>
#include <linux/export.h>
+#include <linux/cpu.h>
#include "power.h"
@@ -128,6 +127,8 @@ static const struct genpd_lock_ops genpd_spin_ops = {
#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
+#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
+#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
const struct generic_pm_domain *genpd)
@@ -391,11 +392,9 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
if (unlikely(!genpd->set_performance_state))
return -EINVAL;
- if (unlikely(!dev->power.subsys_data ||
- !dev->power.subsys_data->domain_data)) {
- WARN_ON(1);
+ if (WARN_ON(!dev->power.subsys_data ||
+ !dev->power.subsys_data->domain_data))
return -EINVAL;
- }
genpd_lock(genpd);
@@ -517,7 +516,9 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
* (1) The domain is configured as always on.
* (2) When the domain has a subdomain being powered on.
*/
- if (genpd_is_always_on(genpd) || atomic_read(&genpd->sd_count) > 0)
+ if (genpd_is_always_on(genpd) ||
+ genpd_is_rpm_always_on(genpd) ||
+ atomic_read(&genpd->sd_count) > 0)
return -EBUSY;
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
@@ -1396,8 +1397,7 @@ EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
#endif /* CONFIG_PM_SLEEP */
-static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
- struct gpd_timing_data *td)
+static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev)
{
struct generic_pm_domain_data *gpd_data;
int ret;
@@ -1412,9 +1412,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
goto err_put;
}
- if (td)
- gpd_data->td = *td;
-
gpd_data->base.dev = dev;
gpd_data->td.constraint_changed = true;
gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
@@ -1454,8 +1451,57 @@ static void genpd_free_dev_data(struct device *dev,
dev_pm_put_subsys_data(dev);
}
+static void genpd_update_cpumask(struct generic_pm_domain *genpd,
+ int cpu, bool set, unsigned int depth)
+{
+ struct gpd_link *link;
+
+ if (!genpd_is_cpu_domain(genpd))
+ return;
+
+ list_for_each_entry(link, &genpd->slave_links, slave_node) {
+ struct generic_pm_domain *master = link->master;
+
+ genpd_lock_nested(master, depth + 1);
+ genpd_update_cpumask(master, cpu, set, depth + 1);
+ genpd_unlock(master);
+ }
+
+ if (set)
+ cpumask_set_cpu(cpu, genpd->cpus);
+ else
+ cpumask_clear_cpu(cpu, genpd->cpus);
+}
+
+static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
+{
+ if (cpu >= 0)
+ genpd_update_cpumask(genpd, cpu, true, 0);
+}
+
+static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
+{
+ if (cpu >= 0)
+ genpd_update_cpumask(genpd, cpu, false, 0);
+}
+
+static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
+{
+ int cpu;
+
+ if (!genpd_is_cpu_domain(genpd))
+ return -1;
+
+ for_each_possible_cpu(cpu) {
+ if (get_cpu_device(cpu) == dev)
+ return cpu;
+ }
+
+ return -1;
+}
+
static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
- struct gpd_timing_data *td)
+ struct device *base_dev)
{
struct generic_pm_domain_data *gpd_data;
int ret;
@@ -1465,16 +1511,19 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
return -EINVAL;
- gpd_data = genpd_alloc_dev_data(dev, td);
+ gpd_data = genpd_alloc_dev_data(dev);
if (IS_ERR(gpd_data))
return PTR_ERR(gpd_data);
+ gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
+
ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
if (ret)
goto out;
genpd_lock(genpd);
+ genpd_set_cpumask(genpd, gpd_data->cpu);
dev_pm_domain_set(dev, &genpd->domain);
genpd->device_count++;
@@ -1502,7 +1551,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
int ret;
mutex_lock(&gpd_list_lock);
- ret = genpd_add_device(genpd, dev, NULL);
+ ret = genpd_add_device(genpd, dev, dev);
mutex_unlock(&gpd_list_lock);
return ret;
@@ -1532,6 +1581,7 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
genpd->device_count--;
genpd->max_off_time_changed = true;
+ genpd_clear_cpumask(genpd, gpd_data->cpu);
dev_pm_domain_set(dev, NULL);
list_del_init(&pdd->list_node);
@@ -1686,6 +1736,12 @@ out:
}
EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
+static void genpd_free_default_power_state(struct genpd_power_state *states,
+ unsigned int state_count)
+{
+ kfree(states);
+}
+
static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
{
struct genpd_power_state *state;
@@ -1696,7 +1752,7 @@ static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
genpd->states = state;
genpd->state_count = 1;
- genpd->free = state;
+ genpd->free_states = genpd_free_default_power_state;
return 0;
}
@@ -1759,14 +1815,22 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
}
/* Always-on domains must be powered on at initialization. */
- if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
+ if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
+ !genpd_status_on(genpd))
return -EINVAL;
+ if (genpd_is_cpu_domain(genpd) &&
+ !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
+ return -ENOMEM;
+
/* Use only one "off" state if there were no states declared */
if (genpd->state_count == 0) {
ret = genpd_set_default_power_state(genpd);
- if (ret)
+ if (ret) {
+ if (genpd_is_cpu_domain(genpd))
+ free_cpumask_var(genpd->cpus);
return ret;
+ }
} else if (!gov && genpd->state_count > 1) {
pr_warn("%s: no governor for states\n", genpd->name);
}
@@ -1812,7 +1876,11 @@ static int genpd_remove(struct generic_pm_domain *genpd)
list_del(&genpd->gpd_list_node);
genpd_unlock(genpd);
cancel_work_sync(&genpd->power_off_work);
- kfree(genpd->free);
+ if (genpd_is_cpu_domain(genpd))
+ free_cpumask_var(genpd->cpus);
+ if (genpd->free_states)
+ genpd->free_states(genpd->states, genpd->state_count);
+
pr_debug("%s: removed %s\n", __func__, genpd->name);
return 0;
@@ -2190,7 +2258,7 @@ int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
goto out;
}
- ret = genpd_add_device(genpd, dev, NULL);
+ ret = genpd_add_device(genpd, dev, dev);
out:
mutex_unlock(&gpd_list_lock);
@@ -2274,6 +2342,7 @@ EXPORT_SYMBOL_GPL(of_genpd_remove_last);
static void genpd_release_dev(struct device *dev)
{
+ of_node_put(dev->of_node);
kfree(dev);
}
@@ -2335,14 +2404,14 @@ static void genpd_dev_pm_sync(struct device *dev)
genpd_queue_power_off_work(pd);
}
-static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
+static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
unsigned int index, bool power_on)
{
struct of_phandle_args pd_args;
struct generic_pm_domain *pd;
int ret;
- ret = of_parse_phandle_with_args(np, "power-domains",
+ ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
"#power-domain-cells", index, &pd_args);
if (ret < 0)
return ret;
@@ -2354,12 +2423,12 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
mutex_unlock(&gpd_list_lock);
dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
__func__, PTR_ERR(pd));
- return driver_deferred_probe_check_state(dev);
+ return driver_deferred_probe_check_state(base_dev);
}
dev_dbg(dev, "adding to PM domain %s\n", pd->name);
- ret = genpd_add_device(pd, dev, NULL);
+ ret = genpd_add_device(pd, dev, base_dev);
mutex_unlock(&gpd_list_lock);
if (ret < 0) {
@@ -2410,7 +2479,7 @@ int genpd_dev_pm_attach(struct device *dev)
"#power-domain-cells") != 1)
return 0;
- return __genpd_dev_pm_attach(dev, dev->of_node, 0, true);
+ return __genpd_dev_pm_attach(dev, dev, 0, true);
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
@@ -2440,10 +2509,10 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
if (!dev->of_node)
return NULL;
- /* Deal only with devices using multiple PM domains. */
+ /* Verify that the index is within a valid range. */
num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
"#power-domain-cells");
- if (num_domains < 2 || index >= num_domains)
+ if (index >= num_domains)
return NULL;
/* Allocate and register device on the genpd bus. */
@@ -2454,15 +2523,16 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
virt_dev->bus = &genpd_bus_type;
virt_dev->release = genpd_release_dev;
+ virt_dev->of_node = of_node_get(dev->of_node);
ret = device_register(virt_dev);
if (ret) {
- kfree(virt_dev);
+ put_device(virt_dev);
return ERR_PTR(ret);
}
/* Try to attach the device to the PM domain at the specified index. */
- ret = __genpd_dev_pm_attach(virt_dev, dev->of_node, index, false);
+ ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
if (ret < 1) {
device_unregister(virt_dev);
return ret ? ERR_PTR(ret) : NULL;
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 4d07e38a8247..3838045c9277 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -1,15 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/domain_governor.c - Governors for device PM domains.
*
* Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
- *
- * This file is released under the GPLv2.
*/
-
#include <linux/kernel.h>
#include <linux/pm_domain.h>
#include <linux/pm_qos.h>
#include <linux/hrtimer.h>
+#include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/ktime.h>
static int dev_update_qos_constraint(struct device *dev, void *data)
{
@@ -210,8 +211,10 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
struct generic_pm_domain *genpd = pd_to_genpd(pd);
struct gpd_link *link;
- if (!genpd->max_off_time_changed)
+ if (!genpd->max_off_time_changed) {
+ genpd->state_idx = genpd->cached_power_down_state_idx;
return genpd->cached_power_down_ok;
+ }
/*
* We have to invalidate the cached results for the masters, so
@@ -236,6 +239,7 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
genpd->state_idx--;
}
+ genpd->cached_power_down_state_idx = genpd->state_idx;
return genpd->cached_power_down_ok;
}
@@ -244,6 +248,65 @@ static bool always_on_power_down_ok(struct dev_pm_domain *domain)
return false;
}
+#ifdef CONFIG_CPU_IDLE
+static bool cpu_power_down_ok(struct dev_pm_domain *pd)
+{
+ struct generic_pm_domain *genpd = pd_to_genpd(pd);
+ struct cpuidle_device *dev;
+ ktime_t domain_wakeup, next_hrtimer;
+ s64 idle_duration_ns;
+ int cpu, i;
+
+ /* Validate dev PM QoS constraints. */
+ if (!default_power_down_ok(pd))
+ return false;
+
+ if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
+ return true;
+
+ /*
+ * Find the next wakeup for any of the online CPUs within the PM domain
+ * and its subdomains. Note, we only need the genpd->cpus, as it already
+ * contains a mask of all CPUs from subdomains.
+ */
+ domain_wakeup = ktime_set(KTIME_SEC_MAX, 0);
+ for_each_cpu_and(cpu, genpd->cpus, cpu_online_mask) {
+ dev = per_cpu(cpuidle_devices, cpu);
+ if (dev) {
+ next_hrtimer = READ_ONCE(dev->next_hrtimer);
+ if (ktime_before(next_hrtimer, domain_wakeup))
+ domain_wakeup = next_hrtimer;
+ }
+ }
+
+ /* The minimum idle duration is from now - until the next wakeup. */
+ idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, ktime_get()));
+ if (idle_duration_ns <= 0)
+ return false;
+
+ /*
+ * Find the deepest idle state that has its residency value satisfied
+ * and by also taking into account the power off latency for the state.
+ * Start at the state picked by the dev PM QoS constraint validation.
+ */
+ i = genpd->state_idx;
+ do {
+ if (idle_duration_ns >= (genpd->states[i].residency_ns +
+ genpd->states[i].power_off_latency_ns)) {
+ genpd->state_idx = i;
+ return true;
+ }
+ } while (--i >= 0);
+
+ return false;
+}
+
+struct dev_power_governor pm_domain_cpu_gov = {
+ .suspend_ok = default_suspend_ok,
+ .power_down_ok = cpu_power_down_ok,
+};
+#endif
+
struct dev_power_governor simple_qos_governor = {
.suspend_ok = default_suspend_ok,
.power_down_ok = default_power_down_ok,
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index b2ed606265a8..4fa525668cb7 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems
*
* Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- *
- * This file is released under the GPLv2.
*/
-
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/export.h>
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index f80d298de3fa..dcfc0a36c8f7 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1,12 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/main.c - Where the driver meets power management.
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
*
- * This file is released under the GPLv2
- *
- *
* The driver model core calls device_pm_add() when a device is registered.
* This will initialize the embedded device_pm_info object in the device
* and add it to the list of power-controlled devices. sysfs entries for
@@ -207,7 +205,7 @@ static ktime_t initcall_debug_start(struct device *dev, void *cb)
if (!pm_print_times_enabled)
return 0;
- dev_info(dev, "calling %pF @ %i, parent: %s\n", cb,
+ dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
task_pid_nr(current),
dev->parent ? dev_name(dev->parent) : "none");
return ktime_get();
@@ -225,7 +223,7 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime,
rettime = ktime_get();
nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
- dev_info(dev, "%pF returned %d after %Ld usecs\n", cb, error,
+ dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
(unsigned long long)nsecs >> 10);
}
@@ -478,7 +476,7 @@ struct dpm_watchdog {
/**
* dpm_watchdog_handler - Driver suspend / resume watchdog handler.
- * @data: Watchdog object address.
+ * @t: The timer that PM watchdog depends on.
*
* Called when a driver has timed out suspending or resuming.
* There's not much we can do here to recover so panic() to
@@ -706,6 +704,19 @@ static bool is_async(struct device *dev)
&& !pm_trace_is_enabled();
}
+static bool dpm_async_fn(struct device *dev, async_func_t func)
+{
+ reinit_completion(&dev->power.completion);
+
+ if (is_async(dev)) {
+ get_device(dev);
+ async_schedule(func, dev);
+ return true;
+ }
+
+ return false;
+}
+
static void async_resume_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = (struct device *)data;
@@ -732,13 +743,8 @@ void dpm_noirq_resume_devices(pm_message_t state)
* in case the starting of async threads is
* delayed by non-async resuming devices.
*/
- list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
- reinit_completion(&dev->power.completion);
- if (is_async(dev)) {
- get_device(dev);
- async_schedule_dev(async_resume_noirq, dev);
- }
- }
+ list_for_each_entry(dev, &dpm_noirq_list, power.entry)
+ dpm_async_fn(dev, async_resume_noirq);
while (!list_empty(&dpm_noirq_list)) {
dev = to_device(dpm_noirq_list.next);
@@ -889,13 +895,8 @@ void dpm_resume_early(pm_message_t state)
* in case the starting of async threads is
* delayed by non-async resuming devices.
*/
- list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
- reinit_completion(&dev->power.completion);
- if (is_async(dev)) {
- get_device(dev);
- async_schedule_dev(async_resume_early, dev);
- }
- }
+ list_for_each_entry(dev, &dpm_late_early_list, power.entry)
+ dpm_async_fn(dev, async_resume_early);
while (!list_empty(&dpm_late_early_list)) {
dev = to_device(dpm_late_early_list.next);
@@ -1053,13 +1054,8 @@ void dpm_resume(pm_message_t state)
pm_transition = state;
async_error = 0;
- list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
- reinit_completion(&dev->power.completion);
- if (is_async(dev)) {
- get_device(dev);
- async_schedule_dev(async_resume, dev);
- }
- }
+ list_for_each_entry(dev, &dpm_suspended_list, power.entry)
+ dpm_async_fn(dev, async_resume);
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
@@ -1373,13 +1369,9 @@ static void async_suspend_noirq(void *data, async_cookie_t cookie)
static int device_suspend_noirq(struct device *dev)
{
- reinit_completion(&dev->power.completion);
-
- if (is_async(dev)) {
- get_device(dev);
- async_schedule_dev(async_suspend_noirq, dev);
+ if (dpm_async_fn(dev, async_suspend_noirq))
return 0;
- }
+
return __device_suspend_noirq(dev, pm_transition, false);
}
@@ -1576,13 +1568,8 @@ static void async_suspend_late(void *data, async_cookie_t cookie)
static int device_suspend_late(struct device *dev)
{
- reinit_completion(&dev->power.completion);
-
- if (is_async(dev)) {
- get_device(dev);
- async_schedule_dev(async_suspend_late, dev);
+ if (dpm_async_fn(dev, async_suspend_late))
return 0;
- }
return __device_suspend_late(dev, pm_transition, false);
}
@@ -1747,6 +1734,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (dev->power.syscore)
goto Complete;
+ /* Avoid direct_complete to let wakeup_path propagate. */
+ if (device_may_wakeup(dev) || dev->power.wakeup_path)
+ dev->power.direct_complete = false;
+
if (dev->power.direct_complete) {
if (pm_runtime_status_suspended(dev)) {
pm_runtime_disable(dev);
@@ -1842,13 +1833,8 @@ static void async_suspend(void *data, async_cookie_t cookie)
static int device_suspend(struct device *dev)
{
- reinit_completion(&dev->power.completion);
-
- if (is_async(dev)) {
- get_device(dev);
- async_schedule_dev(async_suspend, dev);
+ if (dpm_async_fn(dev, async_suspend))
return 0;
- }
return __device_suspend(dev, pm_transition, false);
}
@@ -2063,14 +2049,14 @@ EXPORT_SYMBOL_GPL(dpm_suspend_start);
void __suspend_report_result(const char *function, void *fn, int ret)
{
if (ret)
- pr_err("%s(): %pF returns %d\n", function, fn, ret);
+ pr_err("%s(): %pS returns %d\n", function, fn, ret);
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
/**
* device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
- * @dev: Device to wait for.
* @subordinate: Device that needs to wait for @dev.
+ * @dev: Device to wait for.
*/
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
{
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index f80e402ef778..6c91f8df1d59 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Devices PM QoS constraints management
*
* Copyright (C) 2011 Texas Instruments, Inc.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *
* This module exposes the interface to kernel space for specifying
* per-device PM QoS dependencies. It provides infrastructure for registration
* of:
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 977db40378b0..952a1e7057c7 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1,12 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/runtime.c - Helper functions for device runtime PM
*
* Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
* Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
- *
- * This file is released under the GPLv2.
*/
-
#include <linux/sched/mm.h>
#include <linux/ktime.h>
#include <linux/hrtimer.h>
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 1226e441ddfe..1b9c281cbe41 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -1,7 +1,5 @@
-/*
- * drivers/base/power/sysfs.c - sysfs entries for device PM
- */
-
+// SPDX-License-Identifier: GPL-2.0
+/* sysfs entries for device PM */
#include <linux/device.h>
#include <linux/string.h>
#include <linux/export.h>
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index 2bd9d2c744ca..977d27bd1a22 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/trace.c
*
@@ -6,7 +7,6 @@
* Trace facility for suspend/resume problems, when none of the
* devices may be working.
*/
-
#define pr_fmt(fmt) "PM: " fmt
#include <linux/pm-trace.h>
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index b8fa5c0f2d13..5ce77d1ef9fc 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -1,16 +1,5 @@
-/*
- * wakeirq.c - Device wakeirq helper functions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
+// SPDX-License-Identifier: GPL-2.0
+/* Device wakeirq helper functions */
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index bb1ae175fae1..5b2b6a05a4f3 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* drivers/base/power/wakeup.c - System wakeup events framework
*
* Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
- *
- * This file is released under the GPLv2.
*/
-
#define pr_fmt(fmt) "PM: " fmt
#include <linux/device.h>
@@ -804,7 +802,7 @@ void pm_print_active_wakeup_sources(void)
srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->active) {
- pr_debug("active wakeup source: %s\n", ws->name);
+ pm_pr_dbg("active wakeup source: %s\n", ws->name);
active = 1;
} else if (!active &&
(!last_activity_ws ||
@@ -815,7 +813,7 @@ void pm_print_active_wakeup_sources(void)
}
if (!active && last_activity_ws)
- pr_debug("last active wakeup source: %s\n",
+ pm_pr_dbg("last active wakeup source: %s\n",
last_activity_ws->name);
srcu_read_unlock(&wakeup_srcu, srcuidx);
}
@@ -845,7 +843,7 @@ bool pm_wakeup_pending(void)
raw_spin_unlock_irqrestore(&events_lock, flags);
if (ret) {
- pr_debug("Wakeup pending, aborting suspend\n");
+ pm_pr_dbg("Wakeup pending, aborting suspend\n");
pm_print_active_wakeup_sources();
}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 8b91ab380d14..348b37e64944 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -984,6 +984,81 @@ fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port_id,
EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_node);
/**
+ * fwnode_graph_get_endpoint_by_id - get endpoint by port and endpoint numbers
+ * @fwnode: parent fwnode_handle containing the graph
+ * @port: identifier of the port node
+ * @endpoint: identifier of the endpoint node under the port node
+ * @flags: fwnode lookup flags
+ *
+ * Return the fwnode handle of the local endpoint corresponding the port and
+ * endpoint IDs or NULL if not found.
+ *
+ * If FWNODE_GRAPH_ENDPOINT_NEXT is passed in @flags and the specified endpoint
+ * has not been found, look for the closest endpoint ID greater than the
+ * specified one and return the endpoint that corresponds to it, if present.
+ *
+ * Do not return endpoints that belong to disabled devices, unless
+ * FWNODE_GRAPH_DEVICE_DISABLED is passed in @flags.
+ *
+ * The returned endpoint needs to be released by calling fwnode_handle_put() on
+ * it when it is not needed any more.
+ */
+struct fwnode_handle *
+fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
+ u32 port, u32 endpoint, unsigned long flags)
+{
+ struct fwnode_handle *ep = NULL, *best_ep = NULL;
+ unsigned int best_ep_id = 0;
+ bool endpoint_next = flags & FWNODE_GRAPH_ENDPOINT_NEXT;
+ bool enabled_only = !(flags & FWNODE_GRAPH_DEVICE_DISABLED);
+
+ while ((ep = fwnode_graph_get_next_endpoint(fwnode, ep))) {
+ struct fwnode_endpoint fwnode_ep = { 0 };
+ int ret;
+
+ if (enabled_only) {
+ struct fwnode_handle *dev_node;
+ bool available;
+
+ dev_node = fwnode_graph_get_remote_port_parent(ep);
+ available = fwnode_device_is_available(dev_node);
+ fwnode_handle_put(dev_node);
+ if (!available)
+ continue;
+ }
+
+ ret = fwnode_graph_parse_endpoint(ep, &fwnode_ep);
+ if (ret < 0)
+ continue;
+
+ if (fwnode_ep.port != port)
+ continue;
+
+ if (fwnode_ep.id == endpoint)
+ return ep;
+
+ if (!endpoint_next)
+ continue;
+
+ /*
+ * If the endpoint that has just been found is not the first
+ * matching one and the ID of the one found previously is closer
+ * to the requested endpoint ID, skip it.
+ */
+ if (fwnode_ep.id < endpoint ||
+ (best_ep && best_ep_id < fwnode_ep.id))
+ continue;
+
+ fwnode_handle_put(best_ep);
+ best_ep = fwnode_handle_get(ep);
+ best_ep_id = fwnode_ep.id;
+ }
+
+ return best_ep;
+}
+EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_by_id);
+
+/**
* fwnode_graph_parse_endpoint - parse common endpoint node properties
* @fwnode: pointer to endpoint fwnode_handle
* @endpoint: pointer to the fwnode endpoint data structure
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index a98fced9bff8..3d80c4b43f72 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Register map access API internal header
*
* Copyright 2011 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _REGMAP_INTERNAL_H
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index bc6cd88b8cc6..b7e4b2464102 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -1,14 +1,10 @@
-/*
- * Register cache access API - flat caching support
- *
- * Copyright 2012 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register cache access API - flat caching support
+//
+// Copyright 2012 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/device.h>
#include <linux/seq_file.h>
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
index 4ff311374c4a..fc14e8b9344f 100644
--- a/drivers/base/regmap/regcache-lzo.c
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -1,14 +1,10 @@
-/*
- * Register cache access API - LZO caching support
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register cache access API - LZO caching support
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
#include <linux/device.h>
#include <linux/lzo.h>
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 9cbb4b0cd01b..cfa29dc89bbf 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -1,14 +1,10 @@
-/*
- * Register cache access API - rbtree caching support
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register cache access API - rbtree caching support
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
#include <linux/debugfs.h>
#include <linux/device.h>
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 773560348337..a93cafd7be4f 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -1,14 +1,10 @@
-/*
- * Register cache access API
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register cache access API
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
#include <linux/bsearch.h>
#include <linux/device.h>
diff --git a/drivers/base/regmap/regmap-ac97.c b/drivers/base/regmap/regmap-ac97.c
index c03ebfd4c731..b9f76bdf74a9 100644
--- a/drivers/base/regmap/regmap-ac97.c
+++ b/drivers/base/regmap/regmap-ac97.c
@@ -1,20 +1,8 @@
-/*
- * Register map access API - AC'97 support
- *
- * Copyright 2013 Linaro Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - AC'97 support
+//
+// Copyright 2013 Linaro Ltd. All rights reserved.
#include <linux/clk.h>
#include <linux/err.h>
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 19eb454f26c3..263f82516ff4 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -1,14 +1,10 @@
-/*
- * Register map access API - debugfs
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - debugfs
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/slab.h>
#include <linux/mutex.h>
@@ -195,6 +191,28 @@ static inline void regmap_calc_tot_len(struct regmap *map,
}
}
+static int regmap_next_readable_reg(struct regmap *map, int reg)
+{
+ struct regmap_debugfs_off_cache *c;
+ int ret = -EINVAL;
+
+ if (regmap_printable(map, reg + map->reg_stride)) {
+ ret = reg + map->reg_stride;
+ } else {
+ mutex_lock(&map->cache_lock);
+ list_for_each_entry(c, &map->debugfs_off_cache, list) {
+ if (reg > c->max_reg)
+ continue;
+ if (reg < c->base_reg) {
+ ret = c->base_reg;
+ break;
+ }
+ }
+ mutex_unlock(&map->cache_lock);
+ }
+ return ret;
+}
+
static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
unsigned int to, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -218,12 +236,8 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
/* Work out which register we're starting at */
start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
- for (i = start_reg; i <= to; i += map->reg_stride) {
- if (!regmap_readable(map, i) && !regmap_cached(map, i))
- continue;
-
- if (regmap_precious(map, i))
- continue;
+ for (i = start_reg; i >= 0 && i <= to;
+ i = regmap_next_readable_reg(map, i)) {
/* If we're in the region the user is trying to read */
if (p >= *ppos) {
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 056acde5e7d3..ac9b31c57967 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -1,14 +1,10 @@
-/*
- * Register map access API - I2C support
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - I2C support
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/regmap.h>
#include <linux/i2c.h>
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 5059748afd4c..c9dc70ceca5f 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -1,14 +1,10 @@
-/*
- * regmap based irq_chip
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// regmap based irq_chip
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/device.h>
#include <linux/export.h>
@@ -761,9 +757,6 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
if (chip->num_type_reg && !chip->type_in_mask) {
for (i = 0; i < chip->num_type_reg; ++i) {
- if (!d->type_buf_def[i])
- continue;
-
reg = chip->type_base +
(i * map->reg_stride * d->type_reg_stride);
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 8741fb5f8f54..af967d8f975e 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -1,20 +1,8 @@
-/*
- * Register map access API - MMIO support
- *
- * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - MMIO support
+//
+// Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
#include <linux/clk.h>
#include <linux/err.h>
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index c7150dd264d5..c1894e93c378 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -1,14 +1,10 @@
-/*
- * Register map access API - SPI support
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - SPI support
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
index 0bfb8ed244d5..cdf12d2aa3a1 100644
--- a/drivers/base/regmap/regmap-spmi.c
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -1,22 +1,13 @@
-/*
- * Register map access API - SPMI support
- *
- * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
- *
- * Based on regmap-i2c.c:
- * Copyright 2011 Wolfson Microelectronics plc
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - SPMI support
+//
+// Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+//
+// Based on regmap-i2c.c:
+// Copyright 2011 Wolfson Microelectronics plc
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+
#include <linux/regmap.h>
#include <linux/spmi.h>
#include <linux/module.h>
diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c
index e6c64b0be5b2..3a7d30b8c3ac 100644
--- a/drivers/base/regmap/regmap-w1.c
+++ b/drivers/base/regmap/regmap-w1.c
@@ -1,13 +1,9 @@
-/*
- * Register map access API - W1 (1-Wire) support
- *
- * Copyright (c) 2017 Radioavionica Corporation
- * Author: Alex A. Mihaylov <minimumlaw@rambler.ru>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - W1 (1-Wire) support
+//
+// Copyright (c) 2017 Radioavionica Corporation
+// Author: Alex A. Mihaylov <minimumlaw@rambler.ru>
#include <linux/regmap.h>
#include <linux/module.h>
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 4f822e087def..f1025452bb39 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1,14 +1,10 @@
-/*
- * Register map access API
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
#include <linux/device.h>
#include <linux/slab.h>
@@ -1493,11 +1489,10 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
WARN_ON(!map->bus);
/* Check for unwritable registers before we start */
- if (map->writeable_reg)
- for (i = 0; i < val_len / map->format.val_bytes; i++)
- if (!map->writeable_reg(map->dev,
- reg + regmap_get_offset(map, i)))
- return -EINVAL;
+ for (i = 0; i < val_len / map->format.val_bytes; i++)
+ if (!regmap_writeable(map,
+ reg + regmap_get_offset(map, i)))
+ return -EINVAL;
if (!map->cache_bypass && map->format.parse_val) {
unsigned int ival;
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index 6e076f359dcc..0d346a307140 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -62,19 +62,19 @@ int syscore_suspend(void)
list_for_each_entry_reverse(ops, &syscore_ops_list, node)
if (ops->suspend) {
if (initcall_debug)
- pr_info("PM: Calling %pF\n", ops->suspend);
+ pr_info("PM: Calling %pS\n", ops->suspend);
ret = ops->suspend();
if (ret)
goto err_out;
WARN_ONCE(!irqs_disabled(),
- "Interrupts enabled after %pF\n", ops->suspend);
+ "Interrupts enabled after %pS\n", ops->suspend);
}
trace_suspend_resume(TPS("syscore_suspend"), 0, false);
return 0;
err_out:
- pr_err("PM: System core suspend callback %pF failed.\n", ops->suspend);
+ pr_err("PM: System core suspend callback %pS failed.\n", ops->suspend);
list_for_each_entry_continue(ops, &syscore_ops_list, node)
if (ops->resume)
@@ -100,10 +100,10 @@ void syscore_resume(void)
list_for_each_entry(ops, &syscore_ops_list, node)
if (ops->resume) {
if (initcall_debug)
- pr_info("PM: Calling %pF\n", ops->resume);
+ pr_info("PM: Calling %pS\n", ops->resume);
ops->resume();
WARN_ONCE(!irqs_disabled(),
- "Interrupts enabled after %pF\n", ops->resume);
+ "Interrupts enabled after %pS\n", ops->resume);
}
trace_suspend_resume(TPS("syscore_resume"), 0, false);
}
@@ -122,7 +122,7 @@ void syscore_shutdown(void)
list_for_each_entry_reverse(ops, &syscore_ops_list, node)
if (ops->shutdown) {
if (initcall_debug)
- pr_info("PM: Calling %pF\n", ops->shutdown);
+ pr_info("PM: Calling %pS\n", ops->shutdown);
ops->shutdown();
}
diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile
index 90477c5fd9f9..0f1f7277a013 100644
--- a/drivers/base/test/Makefile
+++ b/drivers/base/test/Makefile
@@ -1 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 0903e0803ec8..92b930cb3b72 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1829,6 +1829,7 @@ static int __init fd_probe_drives(void)
disk->major = FLOPPY_MAJOR;
disk->first_minor = drive;
disk->fops = &floppy_fops;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
sprintf(disk->disk_name, "fd%d", drive);
disk->private_data = &unit[drive];
set_capacity(disk, 880*2);
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index b0dbbdfeb33e..c7b5c4671f05 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -2028,6 +2028,7 @@ static int __init atari_floppy_init (void)
unit[i].disk->first_minor = i;
sprintf(unit[i].disk->disk_name, "fd%d", i);
unit[i].disk->fops = &floppy_fops;
+ unit[i].disk->events = DISK_EVENT_MEDIA_CHANGE;
unit[i].disk->private_data = &unit[i];
set_capacity(unit[i].disk, MAX_DISK_SIZE * 2);
add_disk(unit[i].disk);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index c18586fccb6f..2da615b45b31 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -96,13 +96,8 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
/*
* Must use NOIO because we don't want to recurse back into the
* block or filesystem layers from page reclaim.
- *
- * Cannot support DAX and highmem, because our ->direct_access
- * routine for DAX must return memory that is always addressable.
- * If DAX was reworked to use pfns and kmap throughout, this
- * restriction might be able to be lifted.
*/
- gfp_flags = GFP_NOIO | __GFP_ZERO;
+ gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM;
page = alloc_page(gfp_flags);
if (!page)
return NULL;
@@ -158,6 +153,12 @@ static void brd_free_pages(struct brd_device *brd)
pos++;
/*
+ * It takes 3.4 seconds to remove 80GiB ramdisk.
+ * So, we need cond_resched to avoid stalling the CPU.
+ */
+ cond_resched();
+
+ /*
* This assumes radix_tree_gang_lookup always returns as
* many pages as possible. If the radix-tree code changes,
* so will this have to.
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 000a2f4c0e92..549c64df9708 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1317,10 +1317,6 @@ struct bm_extent {
#define DRBD_MAX_SECTORS_FIXED_BM \
((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
-#if !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
-#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32
-#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
-#else
#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM
/* 16 TB in units of sectors */
#if BITS_PER_LONG == 32
@@ -1333,7 +1329,6 @@ struct bm_extent {
#define DRBD_MAX_SECTORS_FLEX (1UL << 51)
/* corresponds to (1UL << 38) bits right now. */
#endif
-#endif
/* Estimate max bio size as 256 * PAGE_SIZE,
* so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
@@ -1778,7 +1773,7 @@ static inline void __drbd_chk_io_error_(struct drbd_device *device,
_drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
break;
}
- /* NOTE fall through for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
+ /* fall through - for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
case EP_DETACH:
case EP_CALL_HELPER:
/* Remember whether we saw a READ or WRITE error.
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index f2471172a961..1cb5a0b85fd9 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -114,7 +114,7 @@ static int drbd_msg_put_info(struct sk_buff *skb, const char *info)
if (!info || !info[0])
return 0;
- nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
+ nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
if (!nla)
return err;
@@ -135,7 +135,7 @@ static int drbd_msg_sprintf_info(struct sk_buff *skb, const char *fmt, ...)
int err = -EMSGSIZE;
int len;
- nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
+ nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
if (!nla)
return err;
@@ -3269,7 +3269,7 @@ static int nla_put_drbd_cfg_context(struct sk_buff *skb,
struct drbd_device *device)
{
struct nlattr *nla;
- nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
+ nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_CONTEXT);
if (!nla)
goto nla_put_failure;
if (device &&
@@ -3837,7 +3837,7 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
if (err)
goto nla_put_failure;
- nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
+ nla = nla_nest_start_noflag(skb, DRBD_NLA_STATE_INFO);
if (!nla)
goto nla_put_failure;
if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
diff --git a/drivers/block/drbd/drbd_nla.c b/drivers/block/drbd/drbd_nla.c
index 8e261cb5198b..6a09b0b98018 100644
--- a/drivers/block/drbd/drbd_nla.c
+++ b/drivers/block/drbd/drbd_nla.c
@@ -35,7 +35,8 @@ int drbd_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla,
err = drbd_nla_check_mandatory(maxtype, nla);
if (!err)
- err = nla_parse_nested(tb, maxtype, nla, policy, NULL);
+ err = nla_parse_nested_deprecated(tb, maxtype, nla, policy,
+ NULL);
return err;
}
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index c7ad88d91a09..6a727df02889 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -3094,7 +3094,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold
rv = 1;
break;
}
- /* Else fall through to one of the other strategies... */
+ /* Else fall through - to one of the other strategies... */
case ASB_DISCARD_OLDER_PRI:
if (self == 0 && peer == 1) {
rv = 1;
@@ -3119,7 +3119,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold
}
if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
break;
- /* else: fall through */
+ /* else, fall through */
case ASB_DISCARD_LEAST_CHG:
if (ch_self < ch_peer)
rv = -1;
@@ -5443,7 +5443,6 @@ static int drbd_do_auth(struct drbd_connection *connection)
rcu_read_unlock();
desc->tfm = connection->cram_hmac_tfm;
- desc->flags = 0;
rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
if (rv) {
@@ -6116,7 +6115,7 @@ int drbd_ack_receiver(struct drbd_thread *thi)
err = cmd->fn(connection, &pi);
if (err) {
- drbd_err(connection, "%pf failed\n", cmd->fn);
+ drbd_err(connection, "%ps failed\n", cmd->fn);
goto reconnect;
}
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 643a04af213b..3809c7e6be8c 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -866,7 +866,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
} /* else: FIXME can this happen? */
break;
}
- /* else, fall through to BARRIER_ACKED */
+ /* else, fall through - to BARRIER_ACKED */
case BARRIER_ACKED:
/* barrier ack for READ requests does not make sense */
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 268ef0c5d4ab..6781bcf3ec26 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -304,7 +304,6 @@ void drbd_csum_ee(struct crypto_shash *tfm, struct drbd_peer_request *peer_req,
void *src;
desc->tfm = tfm;
- desc->flags = 0;
crypto_shash_init(desc);
@@ -332,7 +331,6 @@ void drbd_csum_bio(struct crypto_shash *tfm, struct bio *bio, void *digest)
struct bvec_iter iter;
desc->tfm = tfm;
- desc->flags = 0;
crypto_shash_init(desc);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 95f608d1a098..b8998abd86a5 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -1693,7 +1693,7 @@ irqreturn_t floppy_interrupt(int irq, void *dev_id)
/* we don't even know which FDC is the culprit */
pr_info("DOR0=%x\n", fdc_state[0].dor);
pr_info("floppy interrupt on bizarre fdc %d\n", fdc);
- pr_info("handler=%pf\n", handler);
+ pr_info("handler=%ps\n", handler);
is_alive(__func__, "bizarre fdc");
return IRQ_NONE;
}
@@ -1752,7 +1752,7 @@ static void reset_interrupt(void)
debugt(__func__, "");
result(); /* get the status ready for set_fdc */
if (FDCS->reset) {
- pr_info("reset set in interrupt, calling %pf\n", cont->error);
+ pr_info("reset set in interrupt, calling %ps\n", cont->error);
cont->error(); /* a reset just after a reset. BAD! */
}
cont->redo();
@@ -1793,7 +1793,7 @@ static void show_floppy(void)
pr_info("\n");
pr_info("floppy driver state\n");
pr_info("-------------------\n");
- pr_info("now=%lu last interrupt=%lu diff=%lu last called handler=%pf\n",
+ pr_info("now=%lu last interrupt=%lu diff=%lu last called handler=%ps\n",
jiffies, interruptjiffies, jiffies - interruptjiffies,
lasthandler);
@@ -1812,9 +1812,9 @@ static void show_floppy(void)
pr_info("status=%x\n", fd_inb(FD_STATUS));
pr_info("fdc_busy=%lu\n", fdc_busy);
if (do_floppy)
- pr_info("do_floppy=%pf\n", do_floppy);
+ pr_info("do_floppy=%ps\n", do_floppy);
if (work_pending(&floppy_work))
- pr_info("floppy_work.func=%pf\n", floppy_work.func);
+ pr_info("floppy_work.func=%ps\n", floppy_work.func);
if (delayed_work_pending(&fd_timer))
pr_info("delayed work.function=%p expires=%ld\n",
fd_timer.work.func,
@@ -4540,6 +4540,7 @@ static int __init do_floppy_init(void)
disks[drive]->major = FLOPPY_MAJOR;
disks[drive]->first_minor = TOMINOR(drive);
disks[drive]->fops = &floppy_fops;
+ disks[drive]->events = DISK_EVENT_MEDIA_CHANGE;
sprintf(disks[drive]->disk_name, "fd%d", drive);
timer_setup(&motor_off_timer[drive], motor_off_callback, 0);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index bf1c61cab8eb..102d79575895 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -264,12 +264,20 @@ lo_do_transfer(struct loop_device *lo, int cmd,
return ret;
}
+static inline void loop_iov_iter_bvec(struct iov_iter *i,
+ unsigned int direction, const struct bio_vec *bvec,
+ unsigned long nr_segs, size_t count)
+{
+ iov_iter_bvec(i, direction, bvec, nr_segs, count);
+ i->type |= ITER_BVEC_FLAG_NO_REF;
+}
+
static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
{
struct iov_iter i;
ssize_t bw;
- iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len);
+ loop_iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len);
file_start_write(file);
bw = vfs_iter_write(file, &i, ppos, 0);
@@ -347,7 +355,7 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq,
ssize_t len;
rq_for_each_segment(bvec, rq, iter) {
- iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len);
+ loop_iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len);
len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
if (len < 0)
return len;
@@ -388,7 +396,7 @@ static int lo_read_transfer(struct loop_device *lo, struct request *rq,
b.bv_offset = 0;
b.bv_len = bvec.bv_len;
- iov_iter_bvec(&i, READ, &b, 1, b.bv_len);
+ loop_iov_iter_bvec(&i, READ, &b, 1, b.bv_len);
len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
if (len < 0) {
ret = len;
@@ -555,7 +563,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
}
atomic_set(&cmd->ref, 2);
- iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
+ loop_iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
iter.iov_offset = offset;
cmd->iocb.ki_pos = pos;
@@ -900,6 +908,24 @@ static int loop_prepare_queue(struct loop_device *lo)
return 0;
}
+static void loop_update_rotational(struct loop_device *lo)
+{
+ struct file *file = lo->lo_backing_file;
+ struct inode *file_inode = file->f_mapping->host;
+ struct block_device *file_bdev = file_inode->i_sb->s_bdev;
+ struct request_queue *q = lo->lo_queue;
+ bool nonrot = true;
+
+ /* not all filesystems (e.g. tmpfs) have a sb->s_bdev */
+ if (file_bdev)
+ nonrot = blk_queue_nonrot(bdev_get_queue(file_bdev));
+
+ if (nonrot)
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+ else
+ blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+}
+
static int loop_set_fd(struct loop_device *lo, fmode_t mode,
struct block_device *bdev, unsigned int arg)
{
@@ -963,6 +989,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
blk_queue_write_cache(lo->lo_queue, true, false);
+ loop_update_rotational(lo);
loop_update_dio(lo);
set_capacity(lo->lo_disk, size);
bd_set_size(bdev, size << 9);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 83302ecdc8db..f0105d118056 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -1192,14 +1192,6 @@ static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
else
clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
-#ifdef MTIP_TRIM /* Disabling TRIM support temporarily */
- /* Demux ID.DRAT & ID.RZAT to determine trim support */
- if (port->identify[69] & (1 << 14) && port->identify[69] & (1 << 5))
- port->dd->trim_supp = true;
- else
-#endif
- port->dd->trim_supp = false;
-
/* Set the identify buffer as valid. */
port->identify_valid = 1;
@@ -1387,77 +1379,6 @@ static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
}
/*
- * Trim unused sectors
- *
- * @dd pointer to driver_data structure
- * @lba starting lba
- * @len # of 512b sectors to trim
- */
-static blk_status_t mtip_send_trim(struct driver_data *dd, unsigned int lba,
- unsigned int len)
-{
- u64 tlba, tlen, sect_left;
- struct mtip_trim_entry *buf;
- dma_addr_t dma_addr;
- struct host_to_dev_fis fis;
- blk_status_t ret = BLK_STS_OK;
- int i;
-
- if (!len || dd->trim_supp == false)
- return BLK_STS_IOERR;
-
- /* Trim request too big */
- WARN_ON(len > (MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES));
-
- /* Trim request not aligned on 4k boundary */
- WARN_ON(len % 8 != 0);
-
- /* Warn if vu_trim structure is too big */
- WARN_ON(sizeof(struct mtip_trim) > ATA_SECT_SIZE);
-
- /* Allocate a DMA buffer for the trim structure */
- buf = dma_alloc_coherent(&dd->pdev->dev, ATA_SECT_SIZE, &dma_addr,
- GFP_KERNEL);
- if (!buf)
- return BLK_STS_RESOURCE;
- memset(buf, 0, ATA_SECT_SIZE);
-
- for (i = 0, sect_left = len, tlba = lba;
- i < MTIP_MAX_TRIM_ENTRIES && sect_left;
- i++) {
- tlen = (sect_left >= MTIP_MAX_TRIM_ENTRY_LEN ?
- MTIP_MAX_TRIM_ENTRY_LEN :
- sect_left);
- buf[i].lba = cpu_to_le32(tlba);
- buf[i].range = cpu_to_le16(tlen);
- tlba += tlen;
- sect_left -= tlen;
- }
- WARN_ON(sect_left != 0);
-
- /* Build the fis */
- memset(&fis, 0, sizeof(struct host_to_dev_fis));
- fis.type = 0x27;
- fis.opts = 1 << 7;
- fis.command = 0xfb;
- fis.features = 0x60;
- fis.sect_count = 1;
- fis.device = ATA_DEVICE_OBS;
-
- if (mtip_exec_internal_command(dd->port,
- &fis,
- 5,
- dma_addr,
- ATA_SECT_SIZE,
- 0,
- MTIP_TRIM_TIMEOUT_MS) < 0)
- ret = BLK_STS_IOERR;
-
- dma_free_coherent(&dd->pdev->dev, ATA_SECT_SIZE, buf, dma_addr);
- return ret;
-}
-
-/*
* Get the drive capacity.
*
* @dd Pointer to the device data structure.
@@ -3590,8 +3511,6 @@ static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(rq);
- if (req_op(rq) == REQ_OP_DISCARD)
- return mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq));
mtip_hw_submit_io(dd, rq, cmd, hctx);
return BLK_STS_OK;
}
@@ -3769,14 +3688,6 @@ skip_create_disk:
blk_queue_max_segment_size(dd->queue, 0x400000);
blk_queue_io_min(dd->queue, 4096);
- /* Signal trim support */
- if (dd->trim_supp == true) {
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, dd->queue);
- dd->queue->limits.discard_granularity = 4096;
- blk_queue_max_discard_sectors(dd->queue,
- MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES);
- }
-
/* Set the capacity of the device in 512 byte sectors. */
if (!(mtip_hw_get_capacity(dd, &capacity))) {
dev_warn(&dd->pdev->dev,
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index abce25f27f57..91c1cb5b1532 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -193,21 +193,6 @@ struct mtip_work {
mtip_workq_sdbfx(w->port, group, w->completed); \
}
-#define MTIP_TRIM_TIMEOUT_MS 240000
-#define MTIP_MAX_TRIM_ENTRIES 8
-#define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8
-
-struct mtip_trim_entry {
- __le32 lba; /* starting lba of region */
- __le16 rsvd; /* unused */
- __le16 range; /* # of 512b blocks to trim */
-} __packed;
-
-struct mtip_trim {
- /* Array of regions to trim */
- struct mtip_trim_entry entry[MTIP_MAX_TRIM_ENTRIES];
-} __packed;
-
/* Register Frame Information Structure (FIS), host to device. */
struct host_to_dev_fis {
/*
@@ -474,8 +459,6 @@ struct driver_data {
struct dentry *dfs_node;
- bool trim_supp; /* flag indicating trim support */
-
bool sr;
int numa_node; /* NUMA support */
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 90ba9f4c03f3..053958a8a2ba 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -44,6 +44,9 @@
#include <linux/nbd-netlink.h>
#include <net/genetlink.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/nbd.h>
+
static DEFINE_IDR(nbd_index_idr);
static DEFINE_MUTEX(nbd_index_mutex);
static int nbd_total_devices = 0;
@@ -510,6 +513,10 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
if (sent) {
if (sent >= sizeof(request)) {
skip = sent - sizeof(request);
+
+ /* initialize handle for tracing purposes */
+ handle = nbd_cmd_handle(cmd);
+
goto send_pages;
}
iov_iter_advance(&from, sent);
@@ -526,11 +533,14 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
handle = nbd_cmd_handle(cmd);
memcpy(request.handle, &handle, sizeof(handle));
+ trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
+
dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
req, nbdcmd_to_ascii(type),
(unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
result = sock_xmit(nbd, index, 1, &from,
(type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
+ trace_nbd_header_sent(req, handle);
if (result <= 0) {
if (was_interrupted(result)) {
/* If we havne't sent anything we can just return BUSY,
@@ -603,6 +613,7 @@ send_pages:
bio = next;
}
out:
+ trace_nbd_payload_sent(req, handle);
nsock->pending = NULL;
nsock->sent = 0;
return 0;
@@ -650,6 +661,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
tag, req);
return ERR_PTR(-ENOENT);
}
+ trace_nbd_header_received(req, handle);
cmd = blk_mq_rq_to_pdu(req);
mutex_lock(&cmd->lock);
@@ -703,6 +715,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
}
}
out:
+ trace_nbd_payload_received(req, handle);
mutex_unlock(&cmd->lock);
return ret ? ERR_PTR(ret) : cmd;
}
@@ -1797,8 +1810,10 @@ again:
ret = -EINVAL;
goto out;
}
- ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr,
- nbd_sock_policy, info->extack);
+ ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
+ attr,
+ nbd_sock_policy,
+ info->extack);
if (ret != 0) {
printk(KERN_ERR "nbd: error processing sock list\n");
ret = -EINVAL;
@@ -1968,8 +1983,10 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
ret = -EINVAL;
goto out;
}
- ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr,
- nbd_sock_policy, info->extack);
+ ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
+ attr,
+ nbd_sock_policy,
+ info->extack);
if (ret != 0) {
printk(KERN_ERR "nbd: error processing sock list\n");
ret = -EINVAL;
@@ -1999,22 +2016,22 @@ out:
static const struct genl_ops nbd_connect_genl_ops[] = {
{
.cmd = NBD_CMD_CONNECT,
- .policy = nbd_attr_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nbd_genl_connect,
},
{
.cmd = NBD_CMD_DISCONNECT,
- .policy = nbd_attr_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nbd_genl_disconnect,
},
{
.cmd = NBD_CMD_RECONFIGURE,
- .policy = nbd_attr_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nbd_genl_reconfigure,
},
{
.cmd = NBD_CMD_STATUS,
- .policy = nbd_attr_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nbd_genl_status,
},
};
@@ -2031,6 +2048,7 @@ static struct genl_family nbd_genl_family __ro_after_init = {
.ops = nbd_connect_genl_ops,
.n_ops = ARRAY_SIZE(nbd_connect_genl_ops),
.maxattr = NBD_ATTR_MAX,
+ .policy = nbd_attr_policy,
.mcgrps = nbd_mcast_grps,
.n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
};
@@ -2050,7 +2068,7 @@ static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
*/
if (refcount_read(&nbd->config_refs))
connected = 1;
- dev_opt = nla_nest_start(reply, NBD_DEVICE_ITEM);
+ dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM);
if (!dev_opt)
return -EMSGSIZE;
ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
@@ -2098,7 +2116,7 @@ static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- dev_list = nla_nest_start(reply, NBD_ATTR_DEVICE_LIST);
+ dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
if (index == -1) {
ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
if (ret) {
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 6d415b20fb70..001dbdcbf355 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -343,6 +343,7 @@ static void pcd_init_units(void)
strcpy(disk->disk_name, cd->name); /* umm... */
disk->fops = &pcd_bdops;
disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
}
}
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 0ff9b12d0e35..6f9ad3fc716f 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -897,6 +897,7 @@ static void pd_probe_drive(struct pd_unit *disk)
p->fops = &pd_fops;
p->major = major;
p->first_minor = (disk - pd) << PD_BITS;
+ p->events = DISK_EVENT_MEDIA_CHANGE;
disk->gd = p;
p->private_data = disk;
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 35e6e271b219..1e9c50a7256c 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -319,6 +319,7 @@ static void __init pf_init_units(void)
disk->first_minor = unit;
strcpy(disk->disk_name, pf->name);
disk->fops = &pf_fops;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
if (!(*drives[unit])[D_PRT])
pf_drive_count++;
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index f5a71023f76c..024060165afa 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2761,7 +2761,6 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
/* inherit events of the host device */
disk->events = pd->bdev->bd_disk->events;
- disk->async_events = pd->bdev->bd_disk->async_events;
add_disk(disk);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 4e1d9b31f60c..cc61c5ce3ad5 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -102,7 +102,7 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
rq_for_each_segment(bvec, req, iter) {
unsigned long flags;
- dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %lu\n",
+ dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %llu\n",
__func__, __LINE__, i, bio_sectors(iter.bio),
iter.bio->bi_iter.bi_sector);
@@ -496,7 +496,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
dev->regions[dev->region_idx].size*priv->blocking_factor);
dev_info(&dev->sbd.core,
- "%s is a %s (%llu MiB total, %lu MiB for OtherOS)\n",
+ "%s is a %s (%llu MiB total, %llu MiB for OtherOS)\n",
gendisk->disk_name, priv->model, priv->raw_capacity >> 11,
get_capacity(gendisk) >> 11);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 2210c1b9491b..e5009a34f9c2 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -934,7 +934,7 @@ static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
struct rbd_client *rbdc;
int ret;
- mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
+ mutex_lock(&client_mutex);
rbdc = rbd_client_find(ceph_opts);
if (rbdc) {
ceph_destroy_options(ceph_opts);
@@ -1326,7 +1326,7 @@ static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
zero_bvecs(&obj_req->bvec_pos, off, bytes);
break;
default:
- rbd_assert(0);
+ BUG();
}
}
@@ -1581,7 +1581,7 @@ static void rbd_obj_request_destroy(struct kref *kref)
kfree(obj_request->bvec_pos.bvecs);
break;
default:
- rbd_assert(0);
+ BUG();
}
kfree(obj_request->img_extents);
@@ -1781,7 +1781,7 @@ static void rbd_osd_req_setup_data(struct rbd_obj_request *obj_req, u32 which)
&obj_req->bvec_pos);
break;
default:
- rbd_assert(0);
+ BUG();
}
}
@@ -2036,7 +2036,7 @@ static int __rbd_img_fill_request(struct rbd_img_request *img_req)
ret = rbd_obj_setup_zeroout(obj_req);
break;
default:
- rbd_assert(0);
+ BUG();
}
if (ret < 0)
return ret;
@@ -2383,7 +2383,7 @@ static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
&obj_req->bvec_pos);
break;
default:
- rbd_assert(0);
+ BUG();
}
} else {
ret = rbd_img_fill_from_bvecs(child_img_req,
@@ -2515,7 +2515,7 @@ static int rbd_obj_issue_copyup_ops(struct rbd_obj_request *obj_req, u32 bytes)
num_osd_ops += count_zeroout_ops(obj_req);
break;
default:
- rbd_assert(0);
+ BUG();
}
obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
@@ -2542,7 +2542,7 @@ static int rbd_obj_issue_copyup_ops(struct rbd_obj_request *obj_req, u32 bytes)
__rbd_obj_setup_zeroout(obj_req, which);
break;
default:
- rbd_assert(0);
+ BUG();
}
ret = ceph_osdc_alloc_messages(obj_req->osd_req, GFP_NOIO);
@@ -3842,8 +3842,12 @@ static void rbd_queue_workfn(struct work_struct *work)
goto err_rq;
}
- rbd_assert(op_type == OBJ_OP_READ ||
- rbd_dev->spec->snap_id == CEPH_NOSNAP);
+ if (op_type != OBJ_OP_READ && rbd_dev->spec->snap_id != CEPH_NOSNAP) {
+ rbd_warn(rbd_dev, "%s on read-only snapshot",
+ obj_op_name(op_type));
+ result = -EIO;
+ goto err;
+ }
/*
* Quit early if the mapped snapshot no longer exists. It's
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 0cf4509d575c..898d522e8338 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -439,6 +439,7 @@ static void card_state_change(struct rsxx_cardinfo *card,
* Fall through so the DMA devices can be attached and
* the user can attempt to pull off their data.
*/
+ /* fall through */
case CARD_STATE_GOOD:
st = rsxx_get_card_size8(card, &card->size8);
if (st)
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 3fa6fcc34790..67b5ec281c6d 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -862,6 +862,7 @@ static int swim_floppy_init(struct swim_priv *swd)
swd->unit[drive].disk->first_minor = drive;
sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive);
swd->unit[drive].disk->fops = &floppy_fops;
+ swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE;
swd->unit[drive].disk->private_data = &swd->unit[drive];
set_capacity(swd->unit[drive].disk, 2880);
add_disk(swd->unit[drive].disk);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 1e2ae90d7715..cf42729c788e 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1216,6 +1216,7 @@ static int swim3_attach(struct macio_dev *mdev,
disk->first_minor = floppy_count;
disk->fops = &floppy_fops;
disk->private_data = fs;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
disk->flags |= GENHD_FL_REMOVABLE;
sprintf(disk->disk_name, "fd%d", floppy_count);
set_capacity(disk, 2880);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2a7ca4a1e6f7..f1d90cd3dc47 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -693,7 +693,8 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
{
struct virtio_blk *vblk = set->driver_data;
- return blk_mq_virtio_map_queues(&set->map[0], vblk->vdev, 0);
+ return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
+ vblk->vdev, 0);
}
#ifdef CONFIG_VIRTIO_BLK_SCSI
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 32a21b8d1d85..464c9092bc8b 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1032,6 +1032,7 @@ static int ace_setup(struct ace_device *ace)
ace->gd->major = ace_major;
ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
ace->gd->fops = &ace_fops;
+ ace->gd->events = DISK_EVENT_MEDIA_CHANGE;
ace->gd->queue = ace->queue;
ace->gd->private_data = ace;
snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 7b2e76e7f22f..b9c34ff9a0d3 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -336,7 +336,7 @@ config BT_MRVL
The core driver to support Marvell Bluetooth devices.
This driver is required if you want to support
- Marvell Bluetooth devices, such as 8688/8787/8797/8887/8897/8977/8997.
+ Marvell Bluetooth devices, such as 8688/8787/8797/8887/8897/8977/8987/8997.
Say Y here to compile Marvell Bluetooth driver
into the kernel or say M to compile it as module.
@@ -350,7 +350,7 @@ config BT_MRVL_SDIO
The driver for Marvell Bluetooth chipsets with SDIO interface.
This driver is required if you want to use Marvell Bluetooth
- devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8887/SD8897/SD8977/SD8997
+ devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8887/SD8897/SD8977/SD8987/SD8997
chipsets are supported.
Say Y here to compile support for Marvell BT-over-SDIO driver
@@ -379,6 +379,17 @@ config BT_WILINK
Say Y here to compile support for Texas Instrument's WiLink7 driver
into the kernel or say M to compile it as module (btwilink).
+config BT_MTKSDIO
+ tristate "MediaTek HCI SDIO driver"
+ depends on MMC
+ help
+ MediaTek Bluetooth HCI SDIO driver.
+ This driver is required if you want to use MediaTek Bluetooth
+ with SDIO interface.
+
+ Say Y here to compile support for MediaTek Bluetooth SDIO devices
+ into the kernel or say M to compile it as module (btmtksdio).
+
config BT_MTKUART
tristate "MediaTek HCI UART driver"
depends on SERIAL_DEV_BUS
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index b7e393cfc1e3..34887b9b3a85 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_BT_ATH3K) += ath3k.o
obj-$(CONFIG_BT_MRVL) += btmrvl.o
obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o
obj-$(CONFIG_BT_WILINK) += btwilink.o
+obj-$(CONFIG_BT_MTKSDIO) += btmtksdio.o
obj-$(CONFIG_BT_MTKUART) += btmtkuart.o
obj-$(CONFIG_BT_QCOMSMD) += btqcomsmd.o
obj-$(CONFIG_BT_BCM) += btbcm.o
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index d5d6e6e5da3b..8e17963ab65a 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -34,9 +34,11 @@
#define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}})
#define BDADDR_BCM20702A1 (&(bdaddr_t) {{0x00, 0x00, 0xa0, 0x02, 0x70, 0x20}})
+#define BDADDR_BCM2076B1 (&(bdaddr_t) {{0x79, 0x56, 0x00, 0xa0, 0x76, 0x20}})
#define BDADDR_BCM43430A0 (&(bdaddr_t) {{0xac, 0x1f, 0x12, 0xa0, 0x43, 0x43}})
#define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}})
#define BDADDR_BCM4330B1 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb1, 0x30, 0x43}})
+#define BDADDR_BCM43341B (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0x1b, 0x34, 0x43}})
int btbcm_check_bdaddr(struct hci_dev *hdev)
{
@@ -69,6 +71,9 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
* The address 20:70:02:A0:00:00 indicates a BCM20702A1 controller
* with no configured address.
*
+ * The address 20:76:A0:00:56:79 indicates a BCM2076B1 controller
+ * with no configured address.
+ *
* The address 43:24:B3:00:00:00 indicates a BCM4324B3 controller
* with waiting for configuration state.
*
@@ -80,9 +85,11 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
*/
if (!bacmp(&bda->bdaddr, BDADDR_BCM20702A0) ||
!bacmp(&bda->bdaddr, BDADDR_BCM20702A1) ||
+ !bacmp(&bda->bdaddr, BDADDR_BCM2076B1) ||
!bacmp(&bda->bdaddr, BDADDR_BCM4324B3) ||
!bacmp(&bda->bdaddr, BDADDR_BCM4330B1) ||
- !bacmp(&bda->bdaddr, BDADDR_BCM43430A0)) {
+ !bacmp(&bda->bdaddr, BDADDR_BCM43430A0) ||
+ !bacmp(&bda->bdaddr, BDADDR_BCM43341B)) {
bt_dev_info(hdev, "BCM: Using default device address (%pMR)",
&bda->bdaddr);
set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
@@ -333,6 +340,7 @@ struct bcm_subver_table {
static const struct bcm_subver_table bcm_uart_subver_table[] = {
{ 0x4103, "BCM4330B1" }, /* 002.001.003 */
{ 0x410e, "BCM43341B0" }, /* 002.001.014 */
+ { 0x4204, "BCM2076B1" }, /* 002.002.004 */
{ 0x4406, "BCM4324B3" }, /* 002.004.006 */
{ 0x6109, "BCM4335C0" }, /* 003.001.009 */
{ 0x610c, "BCM4354" }, /* 003.001.012 */
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 047b75ce1deb..0f3a020703ab 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -235,6 +235,29 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8977 = {
.fw_dump_end = 0xf8,
};
+static const struct btmrvl_sdio_card_reg btmrvl_reg_8987 = {
+ .cfg = 0x00,
+ .host_int_mask = 0x08,
+ .host_intstatus = 0x0c,
+ .card_status = 0x5c,
+ .sq_read_base_addr_a0 = 0xf8,
+ .sq_read_base_addr_a1 = 0xf9,
+ .card_revision = 0xc8,
+ .card_fw_status0 = 0xe8,
+ .card_fw_status1 = 0xe9,
+ .card_rx_len = 0xea,
+ .card_rx_unit = 0xeb,
+ .io_port_0 = 0xe4,
+ .io_port_1 = 0xe5,
+ .io_port_2 = 0xe6,
+ .int_read_to_clear = true,
+ .host_int_rsr = 0x04,
+ .card_misc_cfg = 0xd8,
+ .fw_dump_ctrl = 0xf0,
+ .fw_dump_start = 0xf1,
+ .fw_dump_end = 0xf8,
+};
+
static const struct btmrvl_sdio_card_reg btmrvl_reg_8997 = {
.cfg = 0x00,
.host_int_mask = 0x08,
@@ -312,6 +335,15 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8977 = {
.supports_fw_dump = true,
};
+static const struct btmrvl_sdio_device btmrvl_sdio_sd8987 = {
+ .helper = NULL,
+ .firmware = "mrvl/sd8987_uapsta.bin",
+ .reg = &btmrvl_reg_8987,
+ .support_pscan_win_report = true,
+ .sd_blksz_fw_dl = 256,
+ .supports_fw_dump = true,
+};
+
static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = {
.helper = NULL,
.firmware = "mrvl/sd8997_uapsta.bin",
@@ -343,6 +375,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8977 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9146),
.driver_data = (unsigned long)&btmrvl_sdio_sd8977 },
+ /* Marvell SD8987 Bluetooth device */
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x914A),
+ .driver_data = (unsigned long)&btmrvl_sdio_sd8987 },
/* Marvell SD8997 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9142),
.driver_data = (unsigned long)&btmrvl_sdio_sd8997 },
@@ -1797,4 +1832,5 @@ MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8977_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8987_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8997_uapsta.bin");
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
new file mode 100644
index 000000000000..813338288453
--- /dev/null
+++ b/drivers/bluetooth/btmtksdio.c
@@ -0,0 +1,1101 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 MediaTek Inc.
+
+/*
+ * Bluetooth support for MediaTek SDIO devices
+ *
+ * This file is written based on btsdio.c and btmtkuart.c.
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <asm/unaligned.h>
+#include <linux/atomic.h>
+#include <linux/firmware.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/skbuff.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio_func.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "h4_recv.h"
+
+#define VERSION "0.1"
+
+#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
+#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
+
+#define MTKBTSDIO_AUTOSUSPEND_DELAY 8000
+
+static bool enable_autosuspend;
+
+struct btmtksdio_data {
+ const char *fwname;
+};
+
+static const struct btmtksdio_data mt7663_data = {
+ .fwname = FIRMWARE_MT7663,
+};
+
+static const struct btmtksdio_data mt7668_data = {
+ .fwname = FIRMWARE_MT7668,
+};
+
+static const struct sdio_device_id btmtksdio_table[] = {
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7663),
+ .driver_data = (kernel_ulong_t)&mt7663_data },
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7668),
+ .driver_data = (kernel_ulong_t)&mt7668_data },
+ { } /* Terminating entry */
+};
+
+#define MTK_REG_CHLPCR 0x4 /* W1S */
+#define C_INT_EN_SET BIT(0)
+#define C_INT_EN_CLR BIT(1)
+#define C_FW_OWN_REQ_SET BIT(8) /* For write */
+#define C_COM_DRV_OWN BIT(8) /* For read */
+#define C_FW_OWN_REQ_CLR BIT(9)
+
+#define MTK_REG_CSDIOCSR 0x8
+#define SDIO_RE_INIT_EN BIT(0)
+#define SDIO_INT_CTL BIT(2)
+
+#define MTK_REG_CHCR 0xc
+#define C_INT_CLR_CTRL BIT(1)
+
+/* CHISR have the same bits field definition with CHIER */
+#define MTK_REG_CHISR 0x10
+#define MTK_REG_CHIER 0x14
+#define FW_OWN_BACK_INT BIT(0)
+#define RX_DONE_INT BIT(1)
+#define TX_EMPTY BIT(2)
+#define TX_FIFO_OVERFLOW BIT(8)
+#define RX_PKT_LEN GENMASK(31, 16)
+
+#define MTK_REG_CTDR 0x18
+
+#define MTK_REG_CRDR 0x1c
+
+#define MTK_SDIO_BLOCK_SIZE 256
+
+#define BTMTKSDIO_TX_WAIT_VND_EVT 1
+
+enum {
+ MTK_WMT_PATCH_DWNLD = 0x1,
+ MTK_WMT_TEST = 0x2,
+ MTK_WMT_WAKEUP = 0x3,
+ MTK_WMT_HIF = 0x4,
+ MTK_WMT_FUNC_CTRL = 0x6,
+ MTK_WMT_RST = 0x7,
+ MTK_WMT_SEMAPHORE = 0x17,
+};
+
+enum {
+ BTMTK_WMT_INVALID,
+ BTMTK_WMT_PATCH_UNDONE,
+ BTMTK_WMT_PATCH_DONE,
+ BTMTK_WMT_ON_UNDONE,
+ BTMTK_WMT_ON_DONE,
+ BTMTK_WMT_ON_PROGRESS,
+};
+
+struct mtkbtsdio_hdr {
+ __le16 len;
+ __le16 reserved;
+ u8 bt_type;
+} __packed;
+
+struct mtk_wmt_hdr {
+ u8 dir;
+ u8 op;
+ __le16 dlen;
+ u8 flag;
+} __packed;
+
+struct mtk_hci_wmt_cmd {
+ struct mtk_wmt_hdr hdr;
+ u8 data[256];
+} __packed;
+
+struct btmtk_hci_wmt_evt {
+ struct hci_event_hdr hhdr;
+ struct mtk_wmt_hdr whdr;
+} __packed;
+
+struct btmtk_hci_wmt_evt_funcc {
+ struct btmtk_hci_wmt_evt hwhdr;
+ __be16 status;
+} __packed;
+
+struct btmtk_tci_sleep {
+ u8 mode;
+ __le16 duration;
+ __le16 host_duration;
+ u8 host_wakeup_pin;
+ u8 time_compensation;
+} __packed;
+
+struct btmtk_hci_wmt_params {
+ u8 op;
+ u8 flag;
+ u16 dlen;
+ const void *data;
+ u32 *status;
+};
+
+struct btmtksdio_dev {
+ struct hci_dev *hdev;
+ struct sdio_func *func;
+ struct device *dev;
+
+ struct work_struct tx_work;
+ unsigned long tx_state;
+ struct sk_buff_head txq;
+
+ struct sk_buff *evt_skb;
+
+ const struct btmtksdio_data *data;
+};
+
+static int mtk_hci_wmt_sync(struct hci_dev *hdev,
+ struct btmtk_hci_wmt_params *wmt_params)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
+ u32 hlen, status = BTMTK_WMT_INVALID;
+ struct btmtk_hci_wmt_evt *wmt_evt;
+ struct mtk_hci_wmt_cmd wc;
+ struct mtk_wmt_hdr *hdr;
+ int err;
+
+ hlen = sizeof(*hdr) + wmt_params->dlen;
+ if (hlen > 255)
+ return -EINVAL;
+
+ hdr = (struct mtk_wmt_hdr *)&wc;
+ hdr->dir = 1;
+ hdr->op = wmt_params->op;
+ hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
+ hdr->flag = wmt_params->flag;
+ memcpy(wc.data, wmt_params->data, wmt_params->dlen);
+
+ set_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
+
+ err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
+ if (err < 0) {
+ clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
+ return err;
+ }
+
+ /* The vendor specific WMT commands are all answered by a vendor
+ * specific event and will not have the Command Status or Command
+ * Complete as with usual HCI command flow control.
+ *
+ * After sending the command, wait for BTMTKSDIO_TX_WAIT_VND_EVT
+ * state to be cleared. The driver specific event receive routine
+ * will clear that state and with that indicate completion of the
+ * WMT command.
+ */
+ err = wait_on_bit_timeout(&bdev->tx_state, BTMTKSDIO_TX_WAIT_VND_EVT,
+ TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
+ if (err == -EINTR) {
+ bt_dev_err(hdev, "Execution of wmt command interrupted");
+ clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
+ return err;
+ }
+
+ if (err) {
+ bt_dev_err(hdev, "Execution of wmt command timed out");
+ clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
+ return -ETIMEDOUT;
+ }
+
+ /* Parse and handle the return WMT event */
+ wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data;
+ if (wmt_evt->whdr.op != hdr->op) {
+ bt_dev_err(hdev, "Wrong op received %d expected %d",
+ wmt_evt->whdr.op, hdr->op);
+ err = -EIO;
+ goto err_free_skb;
+ }
+
+ switch (wmt_evt->whdr.op) {
+ case MTK_WMT_SEMAPHORE:
+ if (wmt_evt->whdr.flag == 2)
+ status = BTMTK_WMT_PATCH_UNDONE;
+ else
+ status = BTMTK_WMT_PATCH_DONE;
+ break;
+ case MTK_WMT_FUNC_CTRL:
+ wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
+ if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
+ status = BTMTK_WMT_ON_DONE;
+ else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420)
+ status = BTMTK_WMT_ON_PROGRESS;
+ else
+ status = BTMTK_WMT_ON_UNDONE;
+ break;
+ }
+
+ if (wmt_params->status)
+ *wmt_params->status = status;
+
+err_free_skb:
+ kfree_skb(bdev->evt_skb);
+ bdev->evt_skb = NULL;
+
+ return err;
+}
+
+static int btmtksdio_tx_packet(struct btmtksdio_dev *bdev,
+ struct sk_buff *skb)
+{
+ struct mtkbtsdio_hdr *sdio_hdr;
+ int err;
+
+ /* Make sure that there are enough rooms for SDIO header */
+ if (unlikely(skb_headroom(skb) < sizeof(*sdio_hdr))) {
+ err = pskb_expand_head(skb, sizeof(*sdio_hdr), 0,
+ GFP_ATOMIC);
+ if (err < 0)
+ return err;
+ }
+
+ /* Prepend MediaTek SDIO Specific Header */
+ skb_push(skb, sizeof(*sdio_hdr));
+
+ sdio_hdr = (void *)skb->data;
+ sdio_hdr->len = cpu_to_le16(skb->len);
+ sdio_hdr->reserved = cpu_to_le16(0);
+ sdio_hdr->bt_type = hci_skb_pkt_type(skb);
+
+ err = sdio_writesb(bdev->func, MTK_REG_CTDR, skb->data,
+ round_up(skb->len, MTK_SDIO_BLOCK_SIZE));
+ if (err < 0)
+ goto err_skb_pull;
+
+ bdev->hdev->stat.byte_tx += skb->len;
+
+ kfree_skb(skb);
+
+ return 0;
+
+err_skb_pull:
+ skb_pull(skb, sizeof(*sdio_hdr));
+
+ return err;
+}
+
+static u32 btmtksdio_drv_own_query(struct btmtksdio_dev *bdev)
+{
+ return sdio_readl(bdev->func, MTK_REG_CHLPCR, NULL);
+}
+
+static void btmtksdio_tx_work(struct work_struct *work)
+{
+ struct btmtksdio_dev *bdev = container_of(work, struct btmtksdio_dev,
+ tx_work);
+ struct sk_buff *skb;
+ int err;
+
+ pm_runtime_get_sync(bdev->dev);
+
+ sdio_claim_host(bdev->func);
+
+ while ((skb = skb_dequeue(&bdev->txq))) {
+ err = btmtksdio_tx_packet(bdev, skb);
+ if (err < 0) {
+ bdev->hdev->stat.err_tx++;
+ skb_queue_head(&bdev->txq, skb);
+ break;
+ }
+ }
+
+ sdio_release_host(bdev->func);
+
+ pm_runtime_mark_last_busy(bdev->dev);
+ pm_runtime_put_autosuspend(bdev->dev);
+}
+
+static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ struct hci_event_hdr *hdr = (void *)skb->data;
+ int err;
+
+ /* Fix up the vendor event id with 0xff for vendor specific instead
+ * of 0xe4 so that event send via monitoring socket can be parsed
+ * properly.
+ */
+ if (hdr->evt == 0xe4)
+ hdr->evt = HCI_EV_VENDOR;
+
+ /* When someone waits for the WMT event, the skb is being cloned
+ * and being processed the events from there then.
+ */
+ if (test_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state)) {
+ bdev->evt_skb = skb_clone(skb, GFP_KERNEL);
+ if (!bdev->evt_skb) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ }
+
+ err = hci_recv_frame(hdev, skb);
+ if (err < 0)
+ goto err_free_skb;
+
+ if (hdr->evt == HCI_EV_VENDOR) {
+ if (test_and_clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT,
+ &bdev->tx_state)) {
+ /* Barrier to sync with other CPUs */
+ smp_mb__after_atomic();
+ wake_up_bit(&bdev->tx_state, BTMTKSDIO_TX_WAIT_VND_EVT);
+ }
+ }
+
+ return 0;
+
+err_free_skb:
+ kfree_skb(bdev->evt_skb);
+ bdev->evt_skb = NULL;
+
+err_out:
+ return err;
+}
+
+static const struct h4_recv_pkt mtk_recv_pkts[] = {
+ { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_SCO, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = btmtksdio_recv_event },
+};
+
+static int btmtksdio_rx_packet(struct btmtksdio_dev *bdev, u16 rx_size)
+{
+ const struct h4_recv_pkt *pkts = mtk_recv_pkts;
+ int pkts_count = ARRAY_SIZE(mtk_recv_pkts);
+ struct mtkbtsdio_hdr *sdio_hdr;
+ int err, i, pad_size;
+ struct sk_buff *skb;
+ u16 dlen;
+
+ if (rx_size < sizeof(*sdio_hdr))
+ return -EILSEQ;
+
+ /* A SDIO packet is exactly containing a Bluetooth packet */
+ skb = bt_skb_alloc(rx_size, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, rx_size);
+
+ err = sdio_readsb(bdev->func, skb->data, MTK_REG_CRDR, rx_size);
+ if (err < 0)
+ goto err_kfree_skb;
+
+ sdio_hdr = (void *)skb->data;
+
+ /* We assume the default error as -EILSEQ simply to make the error path
+ * be cleaner.
+ */
+ err = -EILSEQ;
+
+ if (rx_size != le16_to_cpu(sdio_hdr->len)) {
+ bt_dev_err(bdev->hdev, "Rx size in sdio header is mismatched ");
+ goto err_kfree_skb;
+ }
+
+ hci_skb_pkt_type(skb) = sdio_hdr->bt_type;
+
+ /* Remove MediaTek SDIO header */
+ skb_pull(skb, sizeof(*sdio_hdr));
+
+ /* We have to dig into the packet to get payload size and then know how
+ * many padding bytes at the tail, these padding bytes should be removed
+ * before the packet is indicated to the core layer.
+ */
+ for (i = 0; i < pkts_count; i++) {
+ if (sdio_hdr->bt_type == (&pkts[i])->type)
+ break;
+ }
+
+ if (i >= pkts_count) {
+ bt_dev_err(bdev->hdev, "Invalid bt type 0x%02x",
+ sdio_hdr->bt_type);
+ goto err_kfree_skb;
+ }
+
+ /* Remaining bytes cannot hold a header*/
+ if (skb->len < (&pkts[i])->hlen) {
+ bt_dev_err(bdev->hdev, "The size of bt header is mismatched");
+ goto err_kfree_skb;
+ }
+
+ switch ((&pkts[i])->lsize) {
+ case 1:
+ dlen = skb->data[(&pkts[i])->loff];
+ break;
+ case 2:
+ dlen = get_unaligned_le16(skb->data +
+ (&pkts[i])->loff);
+ break;
+ default:
+ goto err_kfree_skb;
+ }
+
+ pad_size = skb->len - (&pkts[i])->hlen - dlen;
+
+ /* Remaining bytes cannot hold a payload */
+ if (pad_size < 0) {
+ bt_dev_err(bdev->hdev, "The size of bt payload is mismatched");
+ goto err_kfree_skb;
+ }
+
+ /* Remove padding bytes */
+ skb_trim(skb, skb->len - pad_size);
+
+ /* Complete frame */
+ (&pkts[i])->recv(bdev->hdev, skb);
+
+ bdev->hdev->stat.byte_rx += rx_size;
+
+ return 0;
+
+err_kfree_skb:
+ kfree_skb(skb);
+
+ return err;
+}
+
+static void btmtksdio_interrupt(struct sdio_func *func)
+{
+ struct btmtksdio_dev *bdev = sdio_get_drvdata(func);
+ u32 int_status;
+ u16 rx_size;
+
+ /* It is required that the host gets ownership from the device before
+ * accessing any register, however, if SDIO host is not being released,
+ * a potential deadlock probably happens in a circular wait between SDIO
+ * IRQ work and PM runtime work. So, we have to explicitly release SDIO
+ * host here and claim again after the PM runtime work is all done.
+ */
+ sdio_release_host(bdev->func);
+
+ pm_runtime_get_sync(bdev->dev);
+
+ sdio_claim_host(bdev->func);
+
+ /* Disable interrupt */
+ sdio_writel(func, C_INT_EN_CLR, MTK_REG_CHLPCR, 0);
+
+ int_status = sdio_readl(func, MTK_REG_CHISR, NULL);
+
+ /* Ack an interrupt as soon as possible before any operation on
+ * hardware.
+ *
+ * Note that we don't ack any status during operations to avoid race
+ * condition between the host and the device such as it's possible to
+ * mistakenly ack RX_DONE for the next packet and then cause interrupts
+ * not be raised again but there is still pending data in the hardware
+ * FIFO.
+ */
+ sdio_writel(func, int_status, MTK_REG_CHISR, NULL);
+
+ if (unlikely(!int_status))
+ bt_dev_err(bdev->hdev, "CHISR is 0");
+
+ if (int_status & FW_OWN_BACK_INT)
+ bt_dev_dbg(bdev->hdev, "Get fw own back");
+
+ if (int_status & TX_EMPTY)
+ schedule_work(&bdev->tx_work);
+ else if (unlikely(int_status & TX_FIFO_OVERFLOW))
+ bt_dev_warn(bdev->hdev, "Tx fifo overflow");
+
+ if (int_status & RX_DONE_INT) {
+ rx_size = (int_status & RX_PKT_LEN) >> 16;
+
+ if (btmtksdio_rx_packet(bdev, rx_size) < 0)
+ bdev->hdev->stat.err_rx++;
+ }
+
+ /* Enable interrupt */
+ sdio_writel(func, C_INT_EN_SET, MTK_REG_CHLPCR, 0);
+
+ pm_runtime_mark_last_busy(bdev->dev);
+ pm_runtime_put_autosuspend(bdev->dev);
+}
+
+static int btmtksdio_open(struct hci_dev *hdev)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ int err;
+ u32 status;
+
+ sdio_claim_host(bdev->func);
+
+ err = sdio_enable_func(bdev->func);
+ if (err < 0)
+ goto err_release_host;
+
+ /* Get ownership from the device */
+ sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
+ if (err < 0)
+ goto err_disable_func;
+
+ err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
+ status & C_COM_DRV_OWN, 2000, 1000000);
+ if (err < 0) {
+ bt_dev_err(bdev->hdev, "Cannot get ownership from device");
+ goto err_disable_func;
+ }
+
+ /* Disable interrupt & mask out all interrupt sources */
+ sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, &err);
+ if (err < 0)
+ goto err_disable_func;
+
+ sdio_writel(bdev->func, 0, MTK_REG_CHIER, &err);
+ if (err < 0)
+ goto err_disable_func;
+
+ err = sdio_claim_irq(bdev->func, btmtksdio_interrupt);
+ if (err < 0)
+ goto err_disable_func;
+
+ err = sdio_set_block_size(bdev->func, MTK_SDIO_BLOCK_SIZE);
+ if (err < 0)
+ goto err_release_irq;
+
+ /* SDIO CMD 5 allows the SDIO device back to idle state an
+ * synchronous interrupt is supported in SDIO 4-bit mode
+ */
+ sdio_writel(bdev->func, SDIO_INT_CTL | SDIO_RE_INIT_EN,
+ MTK_REG_CSDIOCSR, &err);
+ if (err < 0)
+ goto err_release_irq;
+
+ /* Setup write-1-clear for CHISR register */
+ sdio_writel(bdev->func, C_INT_CLR_CTRL, MTK_REG_CHCR, &err);
+ if (err < 0)
+ goto err_release_irq;
+
+ /* Setup interrupt sources */
+ sdio_writel(bdev->func, RX_DONE_INT | TX_EMPTY | TX_FIFO_OVERFLOW,
+ MTK_REG_CHIER, &err);
+ if (err < 0)
+ goto err_release_irq;
+
+ /* Enable interrupt */
+ sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, &err);
+ if (err < 0)
+ goto err_release_irq;
+
+ sdio_release_host(bdev->func);
+
+ return 0;
+
+err_release_irq:
+ sdio_release_irq(bdev->func);
+
+err_disable_func:
+ sdio_disable_func(bdev->func);
+
+err_release_host:
+ sdio_release_host(bdev->func);
+
+ return err;
+}
+
+static int btmtksdio_close(struct hci_dev *hdev)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ u32 status;
+ int err;
+
+ sdio_claim_host(bdev->func);
+
+ /* Disable interrupt */
+ sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
+
+ sdio_release_irq(bdev->func);
+
+ /* Return ownership to the device */
+ sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, NULL);
+
+ err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
+ !(status & C_COM_DRV_OWN), 2000, 1000000);
+ if (err < 0)
+ bt_dev_err(bdev->hdev, "Cannot return ownership to device");
+
+ sdio_disable_func(bdev->func);
+
+ sdio_release_host(bdev->func);
+
+ return 0;
+}
+
+static int btmtksdio_flush(struct hci_dev *hdev)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+
+ skb_queue_purge(&bdev->txq);
+
+ cancel_work_sync(&bdev->tx_work);
+
+ return 0;
+}
+
+static int btmtksdio_func_query(struct hci_dev *hdev)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ int status, err;
+ u8 param = 0;
+
+ /* Query whether the function is enabled */
+ wmt_params.op = MTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 4;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = &status;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to query function status (%d)", err);
+ return err;
+ }
+
+ return status;
+}
+
+static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ const struct firmware *fw;
+ const u8 *fw_ptr;
+ size_t fw_size;
+ int err, dlen;
+ u8 flag;
+
+ err = request_firmware(&fw, fwname, &hdev->dev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
+ return err;
+ }
+
+ fw_ptr = fw->data;
+ fw_size = fw->size;
+
+ /* The size of patch header is 30 bytes, should be skip */
+ if (fw_size < 30) {
+ err = -EINVAL;
+ goto free_fw;
+ }
+
+ fw_size -= 30;
+ fw_ptr += 30;
+ flag = 1;
+
+ wmt_params.op = MTK_WMT_PATCH_DWNLD;
+ wmt_params.status = NULL;
+
+ while (fw_size > 0) {
+ dlen = min_t(int, 250, fw_size);
+
+ /* Tell device the position in sequence */
+ if (fw_size - dlen <= 0)
+ flag = 3;
+ else if (fw_size < fw->size - 30)
+ flag = 2;
+
+ wmt_params.flag = flag;
+ wmt_params.dlen = dlen;
+ wmt_params.data = fw_ptr;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
+ err);
+ goto free_fw;
+ }
+
+ fw_size -= dlen;
+ fw_ptr += dlen;
+ }
+
+ wmt_params.op = MTK_WMT_RST;
+ wmt_params.flag = 4;
+ wmt_params.dlen = 0;
+ wmt_params.data = NULL;
+ wmt_params.status = NULL;
+
+ /* Activate funciton the firmware providing to */
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
+ goto free_fw;
+ }
+
+ /* Wait a few moments for firmware activation done */
+ usleep_range(10000, 12000);
+
+free_fw:
+ release_firmware(fw);
+ return err;
+}
+
+static int btmtksdio_setup(struct hci_dev *hdev)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ struct btmtk_hci_wmt_params wmt_params;
+ ktime_t calltime, delta, rettime;
+ struct btmtk_tci_sleep tci_sleep;
+ unsigned long long duration;
+ struct sk_buff *skb;
+ int err, status;
+ u8 param = 0x1;
+
+ calltime = ktime_get();
+
+ /* Query whether the firmware is already download */
+ wmt_params.op = MTK_WMT_SEMAPHORE;
+ wmt_params.flag = 1;
+ wmt_params.dlen = 0;
+ wmt_params.data = NULL;
+ wmt_params.status = &status;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to query firmware status (%d)", err);
+ return err;
+ }
+
+ if (status == BTMTK_WMT_PATCH_DONE) {
+ bt_dev_info(hdev, "Firmware already downloaded");
+ goto ignore_setup_fw;
+ }
+
+ /* Setup a firmware which the device definitely requires */
+ err = mtk_setup_firmware(hdev, bdev->data->fwname);
+ if (err < 0)
+ return err;
+
+ignore_setup_fw:
+ /* Query whether the device is already enabled */
+ err = readx_poll_timeout(btmtksdio_func_query, hdev, status,
+ status < 0 || status != BTMTK_WMT_ON_PROGRESS,
+ 2000, 5000000);
+ /* -ETIMEDOUT happens */
+ if (err < 0)
+ return err;
+
+ /* The other errors happen in btusb_mtk_func_query */
+ if (status < 0)
+ return status;
+
+ if (status == BTMTK_WMT_ON_DONE) {
+ bt_dev_info(hdev, "function already on");
+ goto ignore_func_on;
+ }
+
+ /* Enable Bluetooth protocol */
+ wmt_params.op = MTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 0;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ return err;
+ }
+
+ignore_func_on:
+ /* Apply the low power environment setup */
+ tci_sleep.mode = 0x5;
+ tci_sleep.duration = cpu_to_le16(0x640);
+ tci_sleep.host_duration = cpu_to_le16(0x640);
+ tci_sleep.host_wakeup_pin = 0;
+ tci_sleep.time_compensation = 0;
+
+ skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Failed to apply low power setting (%d)", err);
+ return err;
+ }
+ kfree_skb(skb);
+
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long)ktime_to_ns(delta) >> 10;
+
+ pm_runtime_set_autosuspend_delay(bdev->dev,
+ MTKBTSDIO_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(bdev->dev);
+
+ err = pm_runtime_set_active(bdev->dev);
+ if (err < 0)
+ return err;
+
+ /* Default forbid runtime auto suspend, that can be allowed by
+ * enable_autosuspend flag or the PM runtime entry under sysfs.
+ */
+ pm_runtime_forbid(bdev->dev);
+ pm_runtime_enable(bdev->dev);
+
+ if (enable_autosuspend)
+ pm_runtime_allow(bdev->dev);
+
+ bt_dev_info(hdev, "Device setup in %llu usecs", duration);
+
+ return 0;
+}
+
+static int btmtksdio_shutdown(struct hci_dev *hdev)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ struct btmtk_hci_wmt_params wmt_params;
+ u8 param = 0x0;
+ int err;
+
+ /* Get back the state to be consistent with the state
+ * in btmtksdio_setup.
+ */
+ pm_runtime_get_sync(bdev->dev);
+
+ /* Disable the device */
+ wmt_params.op = MTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 0;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ return err;
+ }
+
+ pm_runtime_put_noidle(bdev->dev);
+ pm_runtime_disable(bdev->dev);
+
+ return 0;
+}
+
+static int btmtksdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+
+ switch (hci_skb_pkt_type(skb)) {
+ case HCI_COMMAND_PKT:
+ hdev->stat.cmd_tx++;
+ break;
+
+ case HCI_ACLDATA_PKT:
+ hdev->stat.acl_tx++;
+ break;
+
+ case HCI_SCODATA_PKT:
+ hdev->stat.sco_tx++;
+ break;
+
+ default:
+ return -EILSEQ;
+ }
+
+ skb_queue_tail(&bdev->txq, skb);
+
+ schedule_work(&bdev->tx_work);
+
+ return 0;
+}
+
+static int btmtksdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ struct btmtksdio_dev *bdev;
+ struct hci_dev *hdev;
+ int err;
+
+ bdev = devm_kzalloc(&func->dev, sizeof(*bdev), GFP_KERNEL);
+ if (!bdev)
+ return -ENOMEM;
+
+ bdev->data = (void *)id->driver_data;
+ if (!bdev->data)
+ return -ENODEV;
+
+ bdev->dev = &func->dev;
+ bdev->func = func;
+
+ INIT_WORK(&bdev->tx_work, btmtksdio_tx_work);
+ skb_queue_head_init(&bdev->txq);
+
+ /* Initialize and register HCI device */
+ hdev = hci_alloc_dev();
+ if (!hdev) {
+ dev_err(&func->dev, "Can't allocate HCI device\n");
+ return -ENOMEM;
+ }
+
+ bdev->hdev = hdev;
+
+ hdev->bus = HCI_SDIO;
+ hci_set_drvdata(hdev, bdev);
+
+ hdev->open = btmtksdio_open;
+ hdev->close = btmtksdio_close;
+ hdev->flush = btmtksdio_flush;
+ hdev->setup = btmtksdio_setup;
+ hdev->shutdown = btmtksdio_shutdown;
+ hdev->send = btmtksdio_send_frame;
+ SET_HCIDEV_DEV(hdev, &func->dev);
+
+ hdev->manufacturer = 70;
+ set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+
+ err = hci_register_dev(hdev);
+ if (err < 0) {
+ dev_err(&func->dev, "Can't register HCI device\n");
+ hci_free_dev(hdev);
+ return err;
+ }
+
+ sdio_set_drvdata(func, bdev);
+
+ /* pm_runtime_enable would be done after the firmware is being
+ * downloaded because the core layer probably already enables
+ * runtime PM for this func such as the case host->caps &
+ * MMC_CAP_POWER_OFF_CARD.
+ */
+ if (pm_runtime_enabled(bdev->dev))
+ pm_runtime_disable(bdev->dev);
+
+ /* As explaination in drivers/mmc/core/sdio_bus.c tells us:
+ * Unbound SDIO functions are always suspended.
+ * During probe, the function is set active and the usage count
+ * is incremented. If the driver supports runtime PM,
+ * it should call pm_runtime_put_noidle() in its probe routine and
+ * pm_runtime_get_noresume() in its remove routine.
+ *
+ * So, put a pm_runtime_put_noidle here !
+ */
+ pm_runtime_put_noidle(bdev->dev);
+
+ return 0;
+}
+
+static void btmtksdio_remove(struct sdio_func *func)
+{
+ struct btmtksdio_dev *bdev = sdio_get_drvdata(func);
+ struct hci_dev *hdev;
+
+ if (!bdev)
+ return;
+
+ /* Be consistent the state in btmtksdio_probe */
+ pm_runtime_get_noresume(bdev->dev);
+
+ hdev = bdev->hdev;
+
+ sdio_set_drvdata(func, NULL);
+ hci_unregister_dev(hdev);
+ hci_free_dev(hdev);
+}
+
+#ifdef CONFIG_PM
+static int btmtksdio_runtime_suspend(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct btmtksdio_dev *bdev;
+ u32 status;
+ int err;
+
+ bdev = sdio_get_drvdata(func);
+ if (!bdev)
+ return 0;
+
+ sdio_claim_host(bdev->func);
+
+ sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, &err);
+ if (err < 0)
+ goto out;
+
+ err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
+ !(status & C_COM_DRV_OWN), 2000, 1000000);
+out:
+ bt_dev_info(bdev->hdev, "status (%d) return ownership to device", err);
+
+ sdio_release_host(bdev->func);
+
+ return err;
+}
+
+static int btmtksdio_runtime_resume(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct btmtksdio_dev *bdev;
+ u32 status;
+ int err;
+
+ bdev = sdio_get_drvdata(func);
+ if (!bdev)
+ return 0;
+
+ sdio_claim_host(bdev->func);
+
+ sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
+ if (err < 0)
+ goto out;
+
+ err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
+ status & C_COM_DRV_OWN, 2000, 1000000);
+out:
+ bt_dev_info(bdev->hdev, "status (%d) get ownership from device", err);
+
+ sdio_release_host(bdev->func);
+
+ return err;
+}
+
+static UNIVERSAL_DEV_PM_OPS(btmtksdio_pm_ops, btmtksdio_runtime_suspend,
+ btmtksdio_runtime_resume, NULL);
+#define BTMTKSDIO_PM_OPS (&btmtksdio_pm_ops)
+#else /* CONFIG_PM */
+#define BTMTKSDIO_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static struct sdio_driver btmtksdio_driver = {
+ .name = "btmtksdio",
+ .probe = btmtksdio_probe,
+ .remove = btmtksdio_remove,
+ .id_table = btmtksdio_table,
+ .drv = {
+ .owner = THIS_MODULE,
+ .pm = BTMTKSDIO_PM_OPS,
+ }
+};
+
+module_sdio_driver(btmtksdio_driver);
+
+module_param(enable_autosuspend, bool, 0644);
+MODULE_PARM_DESC(enable_autosuspend, "Enable autosuspend by default");
+
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek Bluetooth SDIO driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(FIRMWARE_MT7663);
+MODULE_FIRMWARE(FIRMWARE_MT7668);
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index b0b680dd69f4..f5dbeec8e274 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -661,7 +661,7 @@ static int btmtkuart_change_baudrate(struct hci_dev *hdev)
{
struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
struct btmtk_hci_wmt_params wmt_params;
- u32 baudrate;
+ __le32 baudrate;
u8 param;
int err;
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 612268574fc7..cc12eecd9e4d 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -336,7 +336,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
{
struct rome_config config;
int err;
- u8 rom_ver;
+ u8 rom_ver = 0;
bt_dev_dbg(hdev, "QCA setup on UART");
@@ -344,7 +344,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
/* Download rampatch file */
config.type = TLV_TYPE_PATCH;
- if (soc_type == QCA_WCN3990) {
+ if (qca_is_wcn399x(soc_type)) {
/* Firmware files to download are based on ROM version.
* ROM version is derived from last two bytes of soc_ver.
*/
@@ -365,7 +365,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
/* Download NVM configuration */
config.type = TLV_TYPE_NVM;
- if (soc_type == QCA_WCN3990)
+ if (qca_is_wcn399x(soc_type))
snprintf(config.fwname, sizeof(config.fwname),
"qca/crnv%02x.bin", rom_ver);
else
@@ -410,6 +410,7 @@ int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
}
EXPORT_SYMBOL_GPL(qca_set_bdaddr);
+
MODULE_AUTHOR("Ben Young Tae Kim <ytkim@qca.qualcomm.com>");
MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index c72c56ea7480..4c4fe2b5b7b7 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -41,7 +41,7 @@
#define QCA_WCN3990_POWERON_PULSE 0xFC
#define QCA_WCN3990_POWEROFF_PULSE 0xC0
-enum qca_bardrate {
+enum qca_baudrate {
QCA_BAUDRATE_115200 = 0,
QCA_BAUDRATE_57600,
QCA_BAUDRATE_38400,
@@ -132,7 +132,8 @@ enum qca_btsoc_type {
QCA_INVALID = -1,
QCA_AR3002,
QCA_ROME,
- QCA_WCN3990
+ QCA_WCN3990,
+ QCA_WCN3998,
};
#if IS_ENABLED(CONFIG_BT_QCA)
@@ -142,6 +143,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, u32 soc_ver);
int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version);
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
+{
+ return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3998;
+}
#else
static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
@@ -165,4 +170,8 @@ static inline int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
return -EOPNOTSUPP;
}
+static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
+{
+ return false;
+}
#endif
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 282d1af1d3ba..4cfa9abe03c8 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -376,20 +376,7 @@ static struct sdio_driver btsdio_driver = {
.id_table = btsdio_table,
};
-static int __init btsdio_init(void)
-{
- BT_INFO("Generic Bluetooth SDIO driver ver %s", VERSION);
-
- return sdio_register_driver(&btsdio_driver);
-}
-
-static void __exit btsdio_exit(void)
-{
- sdio_unregister_driver(&btsdio_driver);
-}
-
-module_init(btsdio_init);
-module_exit(btsdio_exit);
+module_sdio_driver(btsdio_driver);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Generic Bluetooth SDIO driver ver " VERSION);
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index ddbe518c3e5b..b5d31d583d60 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -228,9 +228,15 @@ static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
int err;
if (powered && !dev->res_enabled) {
- err = regulator_bulk_enable(BCM_NUM_SUPPLIES, dev->supplies);
- if (err)
- return err;
+ /* Intel Macs use bcm_apple_get_resources() and don't
+ * have regulator supplies configured.
+ */
+ if (dev->supplies[0].supply) {
+ err = regulator_bulk_enable(BCM_NUM_SUPPLIES,
+ dev->supplies);
+ if (err)
+ return err;
+ }
/* LPO clock needs to be 32.768 kHz */
err = clk_set_rate(dev->lpo_clk, 32768);
@@ -259,7 +265,13 @@ static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
if (!powered && dev->res_enabled) {
clk_disable_unprepare(dev->txco_clk);
clk_disable_unprepare(dev->lpo_clk);
- regulator_bulk_disable(BCM_NUM_SUPPLIES, dev->supplies);
+
+ /* Intel Macs use bcm_apple_get_resources() and don't
+ * have regulator supplies configured.
+ */
+ if (dev->supplies[0].supply)
+ regulator_bulk_disable(BCM_NUM_SUPPLIES,
+ dev->supplies);
}
/* wait for device to power on and come out of reset */
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 069d1c8fde73..3f02ae560120 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -536,7 +536,7 @@ static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
skb_put_data(h5->rx_skb, byte, 1);
h5->rx_pending--;
- BT_DBG("unsliped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
+ BT_DBG("unslipped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
}
static void h5_reset_rx(struct h5 *h5)
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 237aea34b69f..57322c42bb2d 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -54,9 +54,6 @@
#define HCI_IBS_WAKE_ACK 0xFC
#define HCI_MAX_IBS_SIZE 10
-/* Controller states */
-#define STATE_IN_BAND_SLEEP_ENABLED 1
-
#define IBS_WAKE_RETRANS_TIMEOUT_MS 100
#define IBS_TX_IDLE_TIMEOUT_MS 2000
#define CMD_TRANS_TIMEOUT_MS 100
@@ -67,6 +64,10 @@
/* Controller debug log header */
#define QCA_DEBUG_HANDLE 0x2EDC
+enum qca_flags {
+ QCA_IBS_ENABLED,
+};
+
/* HCI_IBS transmit side sleep protocol states */
enum tx_ibs_states {
HCI_IBS_TX_ASLEEP,
@@ -174,6 +175,21 @@ static int qca_power_setup(struct hci_uart *hu, bool on);
static void qca_power_shutdown(struct hci_uart *hu);
static int qca_power_off(struct hci_dev *hdev);
+static enum qca_btsoc_type qca_soc_type(struct hci_uart *hu)
+{
+ enum qca_btsoc_type soc_type;
+
+ if (hu->serdev) {
+ struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
+
+ soc_type = qsd->btsoc_type;
+ } else {
+ soc_type = QCA_ROME;
+ }
+
+ return soc_type;
+}
+
static void __serial_clock_on(struct tty_struct *tty)
{
/* TODO: Some chipset requires to enable UART clock on client
@@ -506,8 +522,10 @@ static int qca_open(struct hci_uart *hu)
if (hu->serdev) {
qcadev = serdev_device_get_drvdata(hu->serdev);
- if (qcadev->btsoc_type != QCA_WCN3990) {
+ if (!qca_is_wcn399x(qcadev->btsoc_type)) {
gpiod_set_value_cansleep(qcadev->bt_en, 1);
+ /* Controller needs time to bootup. */
+ msleep(150);
} else {
hu->init_speed = qcadev->init_speed;
hu->oper_speed = qcadev->oper_speed;
@@ -612,7 +630,7 @@ static int qca_close(struct hci_uart *hu)
if (hu->serdev) {
qcadev = serdev_device_get_drvdata(hu->serdev);
- if (qcadev->btsoc_type == QCA_WCN3990)
+ if (qca_is_wcn399x(qcadev->btsoc_type))
qca_power_shutdown(hu);
else
gpiod_set_value_cansleep(qcadev->bt_en, 0);
@@ -775,7 +793,7 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
/* Don't go to sleep in middle of patch download or
* Out-Of-Band(GPIOs control) sleep is selected.
*/
- if (!test_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags)) {
+ if (!test_bit(QCA_IBS_ENABLED, &qca->flags)) {
skb_queue_tail(&qca->txq, skb);
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
return 0;
@@ -963,7 +981,6 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
- struct qca_serdev *qcadev;
struct sk_buff *skb;
u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
@@ -985,18 +1002,17 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
skb_queue_tail(&qca->txq, skb);
hci_uart_tx_wakeup(hu);
- qcadev = serdev_device_get_drvdata(hu->serdev);
-
/* Wait for the baudrate change request to be sent */
while (!skb_queue_empty(&qca->txq))
usleep_range(100, 200);
- serdev_device_wait_until_sent(hu->serdev,
+ if (hu->serdev)
+ serdev_device_wait_until_sent(hu->serdev,
msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
/* Give the controller time to process the request */
- if (qcadev->btsoc_type == QCA_WCN3990)
+ if (qca_is_wcn399x(qca_soc_type(hu)))
msleep(10);
else
msleep(300);
@@ -1072,10 +1088,7 @@ static unsigned int qca_get_speed(struct hci_uart *hu,
static int qca_check_speeds(struct hci_uart *hu)
{
- struct qca_serdev *qcadev;
-
- qcadev = serdev_device_get_drvdata(hu->serdev);
- if (qcadev->btsoc_type == QCA_WCN3990) {
+ if (qca_is_wcn399x(qca_soc_type(hu))) {
if (!qca_get_speed(hu, QCA_INIT_SPEED) &&
!qca_get_speed(hu, QCA_OPER_SPEED))
return -EINVAL;
@@ -1091,7 +1104,6 @@ static int qca_check_speeds(struct hci_uart *hu)
static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
{
unsigned int speed, qca_baudrate;
- struct qca_serdev *qcadev;
int ret = 0;
if (speed_type == QCA_INIT_SPEED) {
@@ -1099,6 +1111,8 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
if (speed)
host_set_baudrate(hu, speed);
} else {
+ enum qca_btsoc_type soc_type = qca_soc_type(hu);
+
speed = qca_get_speed(hu, QCA_OPER_SPEED);
if (!speed)
return 0;
@@ -1106,8 +1120,7 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
/* Disable flow control for wcn3990 to deassert RTS while
* changing the baudrate of chip and host.
*/
- qcadev = serdev_device_get_drvdata(hu->serdev);
- if (qcadev->btsoc_type == QCA_WCN3990)
+ if (qca_is_wcn399x(soc_type))
hci_uart_set_flow_control(hu, true);
qca_baudrate = qca_get_baudrate_value(speed);
@@ -1119,7 +1132,7 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
host_set_baudrate(hu, speed);
error:
- if (qcadev->btsoc_type == QCA_WCN3990)
+ if (qca_is_wcn399x(soc_type))
hci_uart_set_flow_control(hu, false);
}
@@ -1181,20 +1194,18 @@ static int qca_setup(struct hci_uart *hu)
struct hci_dev *hdev = hu->hdev;
struct qca_data *qca = hu->priv;
unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
- struct qca_serdev *qcadev;
+ enum qca_btsoc_type soc_type = qca_soc_type(hu);
int ret;
int soc_ver = 0;
- qcadev = serdev_device_get_drvdata(hu->serdev);
-
ret = qca_check_speeds(hu);
if (ret)
return ret;
/* Patch downloading has to be done without IBS mode */
- clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
+ clear_bit(QCA_IBS_ENABLED, &qca->flags);
- if (qcadev->btsoc_type == QCA_WCN3990) {
+ if (qca_is_wcn399x(soc_type)) {
bt_dev_info(hdev, "setting up wcn3990");
/* Enable NON_PERSISTENT_SETUP QUIRK to ensure to execute
@@ -1225,7 +1236,7 @@ static int qca_setup(struct hci_uart *hu)
qca_baudrate = qca_get_baudrate_value(speed);
}
- if (qcadev->btsoc_type != QCA_WCN3990) {
+ if (!qca_is_wcn399x(soc_type)) {
/* Get QCA version information */
ret = qca_read_soc_version(hdev, &soc_ver);
if (ret)
@@ -1234,9 +1245,9 @@ static int qca_setup(struct hci_uart *hu)
bt_dev_info(hdev, "QCA controller version 0x%08x", soc_ver);
/* Setup patch / NVM configurations */
- ret = qca_uart_setup(hdev, qca_baudrate, qcadev->btsoc_type, soc_ver);
+ ret = qca_uart_setup(hdev, qca_baudrate, soc_type, soc_ver);
if (!ret) {
- set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
+ set_bit(QCA_IBS_ENABLED, &qca->flags);
qca_debugfs_init(hdev);
} else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */
@@ -1250,7 +1261,7 @@ static int qca_setup(struct hci_uart *hu)
}
/* Setup bdaddr */
- if (qcadev->btsoc_type == QCA_WCN3990)
+ if (qca_is_wcn399x(soc_type))
hu->hdev->set_bdaddr = qca_set_bdaddr;
else
hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
@@ -1273,7 +1284,7 @@ static struct hci_uart_proto qca_proto = {
.dequeue = qca_dequeue,
};
-static const struct qca_vreg_data qca_soc_data = {
+static const struct qca_vreg_data qca_soc_data_wcn3990 = {
.soc_type = QCA_WCN3990,
.vregs = (struct qca_vreg []) {
{ "vddio", 1800000, 1900000, 15000 },
@@ -1284,6 +1295,17 @@ static const struct qca_vreg_data qca_soc_data = {
.num_vregs = 4,
};
+static const struct qca_vreg_data qca_soc_data_wcn3998 = {
+ .soc_type = QCA_WCN3998,
+ .vregs = (struct qca_vreg []) {
+ { "vddio", 1800000, 1900000, 10000 },
+ { "vddxo", 1800000, 1900000, 80000 },
+ { "vddrf", 1300000, 1352000, 300000 },
+ { "vddch0", 3300000, 3300000, 450000 },
+ },
+ .num_vregs = 4,
+};
+
static void qca_power_shutdown(struct hci_uart *hu)
{
struct qca_data *qca = hu->priv;
@@ -1294,7 +1316,7 @@ static void qca_power_shutdown(struct hci_uart *hu)
* data in skb's.
*/
spin_lock_irqsave(&qca->hci_ibs_lock, flags);
- clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
+ clear_bit(QCA_IBS_ENABLED, &qca->flags);
qca_flush(hu);
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
@@ -1417,8 +1439,8 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->serdev_hu.serdev = serdev;
data = of_device_get_match_data(&serdev->dev);
serdev_device_set_drvdata(serdev, qcadev);
- if (data && data->soc_type == QCA_WCN3990) {
- qcadev->btsoc_type = QCA_WCN3990;
+ if (data && qca_is_wcn399x(data->soc_type)) {
+ qcadev->btsoc_type = data->soc_type;
qcadev->bt_power = devm_kzalloc(&serdev->dev,
sizeof(struct qca_power),
GFP_KERNEL);
@@ -1482,7 +1504,7 @@ static void qca_serdev_remove(struct serdev_device *serdev)
{
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
- if (qcadev->btsoc_type == QCA_WCN3990)
+ if (qca_is_wcn399x(qcadev->btsoc_type))
qca_power_shutdown(&qcadev->serdev_hu);
else
clk_disable_unprepare(qcadev->susclk);
@@ -1492,7 +1514,8 @@ static void qca_serdev_remove(struct serdev_device *serdev)
static const struct of_device_id qca_bluetooth_of_match[] = {
{ .compatible = "qcom,qca6174-bt" },
- { .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data},
+ { .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
+ { .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
diff --git a/drivers/bus/tegra-aconnect.c b/drivers/bus/tegra-aconnect.c
index 084ae286fa23..ac58142301f4 100644
--- a/drivers/bus/tegra-aconnect.c
+++ b/drivers/bus/tegra-aconnect.c
@@ -12,28 +12,38 @@
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>
+struct tegra_aconnect {
+ struct clk *ape_clk;
+ struct clk *apb2ape_clk;
+};
+
static int tegra_aconnect_probe(struct platform_device *pdev)
{
- int ret;
+ struct tegra_aconnect *aconnect;
if (!pdev->dev.of_node)
return -EINVAL;
- ret = pm_clk_create(&pdev->dev);
- if (ret)
- return ret;
+ aconnect = devm_kzalloc(&pdev->dev, sizeof(struct tegra_aconnect),
+ GFP_KERNEL);
+ if (!aconnect)
+ return -ENOMEM;
- ret = of_pm_clk_add_clk(&pdev->dev, "ape");
- if (ret)
- goto clk_destroy;
+ aconnect->ape_clk = devm_clk_get(&pdev->dev, "ape");
+ if (IS_ERR(aconnect->ape_clk)) {
+ dev_err(&pdev->dev, "Can't retrieve ape clock\n");
+ return PTR_ERR(aconnect->ape_clk);
+ }
- ret = of_pm_clk_add_clk(&pdev->dev, "apb2ape");
- if (ret)
- goto clk_destroy;
+ aconnect->apb2ape_clk = devm_clk_get(&pdev->dev, "apb2ape");
+ if (IS_ERR(aconnect->apb2ape_clk)) {
+ dev_err(&pdev->dev, "Can't retrieve apb2ape clock\n");
+ return PTR_ERR(aconnect->apb2ape_clk);
+ }
+ dev_set_drvdata(&pdev->dev, aconnect);
pm_runtime_enable(&pdev->dev);
of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
@@ -41,35 +51,51 @@ static int tegra_aconnect_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Tegra ACONNECT bus registered\n");
return 0;
-
-clk_destroy:
- pm_clk_destroy(&pdev->dev);
-
- return ret;
}
static int tegra_aconnect_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
- pm_clk_destroy(&pdev->dev);
-
return 0;
}
static int tegra_aconnect_runtime_resume(struct device *dev)
{
- return pm_clk_resume(dev);
+ struct tegra_aconnect *aconnect = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(aconnect->ape_clk);
+ if (ret) {
+ dev_err(dev, "ape clk_enable failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(aconnect->apb2ape_clk);
+ if (ret) {
+ clk_disable_unprepare(aconnect->ape_clk);
+ dev_err(dev, "apb2ape clk_enable failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
}
static int tegra_aconnect_runtime_suspend(struct device *dev)
{
- return pm_clk_suspend(dev);
+ struct tegra_aconnect *aconnect = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(aconnect->ape_clk);
+ clk_disable_unprepare(aconnect->apb2ape_clk);
+
+ return 0;
}
static const struct dev_pm_ops tegra_aconnect_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_aconnect_runtime_suspend,
tegra_aconnect_runtime_resume, NULL)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static const struct of_device_id tegra_aconnect_of_match[] = {
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index d299ec79e4c3..308475ed4b32 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -47,7 +47,10 @@ enum sysc_clocks {
SYSC_MAX_CLOCKS,
};
-static const char * const clock_names[SYSC_ICK + 1] = { "fck", "ick", };
+static const char * const clock_names[SYSC_MAX_CLOCKS] = {
+ "fck", "ick", "opt0", "opt1", "opt2", "opt3", "opt4",
+ "opt5", "opt6", "opt7",
+};
#define SYSC_IDLEMODE_MASK 3
#define SYSC_CLOCKACTIVITY_MASK 3
@@ -75,6 +78,7 @@ struct sysc {
u32 module_size;
void __iomem *module_va;
int offsets[SYSC_MAX_REGS];
+ struct ti_sysc_module_data *mdata;
struct clk **clocks;
const char **clock_roles;
int nr_clocks;
@@ -94,7 +98,7 @@ struct sysc {
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
bool is_child);
-void sysc_write(struct sysc *ddata, int offset, u32 value)
+static void sysc_write(struct sysc *ddata, int offset, u32 value)
{
writel_relaxed(value, ddata->module_va + offset);
}
@@ -128,6 +132,81 @@ static u32 sysc_read_revision(struct sysc *ddata)
return sysc_read(ddata, offset);
}
+static int sysc_add_named_clock_from_child(struct sysc *ddata,
+ const char *name,
+ const char *optfck_name)
+{
+ struct device_node *np = ddata->dev->of_node;
+ struct device_node *child;
+ struct clk_lookup *cl;
+ struct clk *clock;
+ const char *n;
+
+ if (name)
+ n = name;
+ else
+ n = optfck_name;
+
+ /* Does the clock alias already exist? */
+ clock = of_clk_get_by_name(np, n);
+ if (!IS_ERR(clock)) {
+ clk_put(clock);
+
+ return 0;
+ }
+
+ child = of_get_next_available_child(np, NULL);
+ if (!child)
+ return -ENODEV;
+
+ clock = devm_get_clk_from_child(ddata->dev, child, name);
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ /*
+ * Use clkdev_add() instead of clkdev_alloc() to avoid the MAX_DEV_ID
+ * limit for clk_get(). If cl ever needs to be freed, it should be done
+ * with clkdev_drop().
+ */
+ cl = kcalloc(1, sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ return -ENOMEM;
+
+ cl->con_id = n;
+ cl->dev_id = dev_name(ddata->dev);
+ cl->clk = clock;
+ clkdev_add(cl);
+
+ clk_put(clock);
+
+ return 0;
+}
+
+static int sysc_init_ext_opt_clock(struct sysc *ddata, const char *name)
+{
+ const char *optfck_name;
+ int error, index;
+
+ if (ddata->nr_clocks < SYSC_OPTFCK0)
+ index = SYSC_OPTFCK0;
+ else
+ index = ddata->nr_clocks;
+
+ if (name)
+ optfck_name = name;
+ else
+ optfck_name = clock_names[index];
+
+ error = sysc_add_named_clock_from_child(ddata, name, optfck_name);
+ if (error)
+ return error;
+
+ ddata->clock_roles[index] = optfck_name;
+ ddata->nr_clocks++;
+
+ return 0;
+}
+
static int sysc_get_one_clock(struct sysc *ddata, const char *name)
{
int error, i, index = -ENODEV;
@@ -199,6 +278,12 @@ static int sysc_get_clocks(struct sysc *ddata)
if (ddata->nr_clocks < 1)
return 0;
+ if ((ddata->cfg.quirks & SYSC_QUIRK_EXT_OPT_CLOCK)) {
+ error = sysc_init_ext_opt_clock(ddata, NULL);
+ if (error)
+ return error;
+ }
+
if (ddata->nr_clocks > SYSC_MAX_CLOCKS) {
dev_err(ddata->dev, "too many clocks for %pOF\n", np);
@@ -231,39 +316,125 @@ static int sysc_get_clocks(struct sysc *ddata)
return 0;
}
+static int sysc_enable_main_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i, error;
+
+ if (!ddata->clocks)
+ return 0;
+
+ for (i = 0; i < SYSC_OPTFCK0; i++) {
+ clock = ddata->clocks[i];
+
+ /* Main clocks may not have ick */
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ error = clk_enable(clock);
+ if (error)
+ goto err_disable;
+ }
+
+ return 0;
+
+err_disable:
+ for (i--; i >= 0; i--) {
+ clock = ddata->clocks[i];
+
+ /* Main clocks may not have ick */
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ clk_disable(clock);
+ }
+
+ return error;
+}
+
+static void sysc_disable_main_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i;
+
+ if (!ddata->clocks)
+ return;
+
+ for (i = 0; i < SYSC_OPTFCK0; i++) {
+ clock = ddata->clocks[i];
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ clk_disable(clock);
+ }
+}
+
+static int sysc_enable_opt_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i, error;
+
+ if (!ddata->clocks)
+ return 0;
+
+ for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
+ clock = ddata->clocks[i];
+
+ /* Assume no holes for opt clocks */
+ if (IS_ERR_OR_NULL(clock))
+ return 0;
+
+ error = clk_enable(clock);
+ if (error)
+ goto err_disable;
+ }
+
+ return 0;
+
+err_disable:
+ for (i--; i >= 0; i--) {
+ clock = ddata->clocks[i];
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ clk_disable(clock);
+ }
+
+ return error;
+}
+
+static void sysc_disable_opt_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i;
+
+ if (!ddata->clocks)
+ return;
+
+ for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
+ clock = ddata->clocks[i];
+
+ /* Assume no holes for opt clocks */
+ if (IS_ERR_OR_NULL(clock))
+ return;
+
+ clk_disable(clock);
+ }
+}
+
/**
- * sysc_init_resets - reset module on init
+ * sysc_init_resets - init rstctrl reset line if configured
* @ddata: device driver data
*
- * A module can have both OCP softreset control and external rstctrl.
- * If more complicated rstctrl resets are needed, please handle these
- * directly from the child device driver and map only the module reset
- * for the parent interconnect target module device.
- *
- * Automatic reset of the module on init can be skipped with the
- * "ti,no-reset-on-init" device tree property.
+ * See sysc_rstctrl_reset_deassert().
*/
static int sysc_init_resets(struct sysc *ddata)
{
- int error;
-
ddata->rsts =
devm_reset_control_array_get_optional_exclusive(ddata->dev);
if (IS_ERR(ddata->rsts))
return PTR_ERR(ddata->rsts);
- if (ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
- goto deassert;
-
- error = reset_control_assert(ddata->rsts);
- if (error)
- return error;
-
-deassert:
- error = reset_control_deassert(ddata->rsts);
- if (error)
- return error;
-
return 0;
}
@@ -622,91 +793,239 @@ static void sysc_show_registers(struct sysc *ddata)
buf);
}
-static int __maybe_unused sysc_runtime_suspend(struct device *dev)
+#define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1)
+
+static int sysc_enable_module(struct device *dev)
{
- struct ti_sysc_platform_data *pdata;
struct sysc *ddata;
- int error = 0, i;
+ const struct sysc_regbits *regbits;
+ u32 reg, idlemodes, best_mode;
ddata = dev_get_drvdata(dev);
+ if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
+ return 0;
- if (!ddata->enabled)
+ /*
+ * TODO: Need to prevent clockdomain autoidle?
+ * See clkdm_deny_idle() in arch/mach-omap2/omap_hwmod.c
+ */
+
+ regbits = ddata->cap->regbits;
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ /* Set SIDLE mode */
+ idlemodes = ddata->cfg.sidlemodes;
+ if (!idlemodes || regbits->sidle_shift < 0)
+ goto set_midle;
+
+ best_mode = fls(ddata->cfg.sidlemodes) - 1;
+ if (best_mode > SYSC_IDLE_MASK) {
+ dev_err(dev, "%s: invalid sidlemode\n", __func__);
+ return -EINVAL;
+ }
+
+ reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
+ reg |= best_mode << regbits->sidle_shift;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+
+set_midle:
+ /* Set MIDLE mode */
+ idlemodes = ddata->cfg.midlemodes;
+ if (!idlemodes || regbits->midle_shift < 0)
return 0;
- if (ddata->legacy_mode) {
- pdata = dev_get_platdata(ddata->dev);
- if (!pdata)
- return 0;
+ best_mode = fls(ddata->cfg.midlemodes) - 1;
+ if (best_mode > SYSC_IDLE_MASK) {
+ dev_err(dev, "%s: invalid midlemode\n", __func__);
+ return -EINVAL;
+ }
- if (!pdata->idle_module)
- return -ENODEV;
+ reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
+ reg |= best_mode << regbits->midle_shift;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
- error = pdata->idle_module(dev, &ddata->cookie);
- if (error)
- dev_err(dev, "%s: could not idle: %i\n",
- __func__, error);
+ return 0;
+}
+
+static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
+{
+ if (idlemodes & BIT(SYSC_IDLE_SMART_WKUP))
+ *best_mode = SYSC_IDLE_SMART_WKUP;
+ else if (idlemodes & BIT(SYSC_IDLE_SMART))
+ *best_mode = SYSC_IDLE_SMART;
+ else if (idlemodes & SYSC_IDLE_FORCE)
+ *best_mode = SYSC_IDLE_FORCE;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sysc_disable_module(struct device *dev)
+{
+ struct sysc *ddata;
+ const struct sysc_regbits *regbits;
+ u32 reg, idlemodes, best_mode;
+ int ret;
- goto idled;
+ ddata = dev_get_drvdata(dev);
+ if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
+ return 0;
+
+ /*
+ * TODO: Need to prevent clockdomain autoidle?
+ * See clkdm_deny_idle() in arch/mach-omap2/omap_hwmod.c
+ */
+
+ regbits = ddata->cap->regbits;
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ /* Set MIDLE mode */
+ idlemodes = ddata->cfg.midlemodes;
+ if (!idlemodes || regbits->midle_shift < 0)
+ goto set_sidle;
+
+ ret = sysc_best_idle_mode(idlemodes, &best_mode);
+ if (ret) {
+ dev_err(dev, "%s: invalid midlemode\n", __func__);
+ return ret;
}
- for (i = 0; i < ddata->nr_clocks; i++) {
- if (IS_ERR_OR_NULL(ddata->clocks[i]))
- continue;
+ reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
+ reg |= best_mode << regbits->midle_shift;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
- if (i >= SYSC_OPTFCK0 && !sysc_opt_clks_needed(ddata))
- break;
+set_sidle:
+ /* Set SIDLE mode */
+ idlemodes = ddata->cfg.sidlemodes;
+ if (!idlemodes || regbits->sidle_shift < 0)
+ return 0;
- clk_disable(ddata->clocks[i]);
+ ret = sysc_best_idle_mode(idlemodes, &best_mode);
+ if (ret) {
+ dev_err(dev, "%s: invalid sidlemode\n", __func__);
+ return ret;
}
-idled:
- ddata->enabled = false;
+ reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
+ reg |= best_mode << regbits->sidle_shift;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
- return error;
+ return 0;
}
-static int __maybe_unused sysc_runtime_resume(struct device *dev)
+static int __maybe_unused sysc_runtime_suspend_legacy(struct device *dev,
+ struct sysc *ddata)
{
struct ti_sysc_platform_data *pdata;
+ int error;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (!pdata)
+ return 0;
+
+ if (!pdata->idle_module)
+ return -ENODEV;
+
+ error = pdata->idle_module(dev, &ddata->cookie);
+ if (error)
+ dev_err(dev, "%s: could not idle: %i\n",
+ __func__, error);
+
+ return 0;
+}
+
+static int __maybe_unused sysc_runtime_resume_legacy(struct device *dev,
+ struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata;
+ int error;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (!pdata)
+ return 0;
+
+ if (!pdata->enable_module)
+ return -ENODEV;
+
+ error = pdata->enable_module(dev, &ddata->cookie);
+ if (error)
+ dev_err(dev, "%s: could not enable: %i\n",
+ __func__, error);
+
+ return 0;
+}
+
+static int __maybe_unused sysc_runtime_suspend(struct device *dev)
+{
struct sysc *ddata;
- int error = 0, i;
+ int error = 0;
ddata = dev_get_drvdata(dev);
- if (ddata->enabled)
+ if (!ddata->enabled)
return 0;
if (ddata->legacy_mode) {
- pdata = dev_get_platdata(ddata->dev);
- if (!pdata)
- return 0;
+ error = sysc_runtime_suspend_legacy(dev, ddata);
+ if (error)
+ return error;
+ } else {
+ error = sysc_disable_module(dev);
+ if (error)
+ return error;
+ }
- if (!pdata->enable_module)
- return -ENODEV;
+ sysc_disable_main_clocks(ddata);
- error = pdata->enable_module(dev, &ddata->cookie);
- if (error)
- dev_err(dev, "%s: could not enable: %i\n",
- __func__, error);
+ if (sysc_opt_clks_needed(ddata))
+ sysc_disable_opt_clocks(ddata);
- goto awake;
- }
+ ddata->enabled = false;
- for (i = 0; i < ddata->nr_clocks; i++) {
- if (IS_ERR_OR_NULL(ddata->clocks[i]))
- continue;
+ return error;
+}
- if (i >= SYSC_OPTFCK0 && !sysc_opt_clks_needed(ddata))
- break;
+static int __maybe_unused sysc_runtime_resume(struct device *dev)
+{
+ struct sysc *ddata;
+ int error = 0;
+
+ ddata = dev_get_drvdata(dev);
- error = clk_enable(ddata->clocks[i]);
+ if (ddata->enabled)
+ return 0;
+
+ if (sysc_opt_clks_needed(ddata)) {
+ error = sysc_enable_opt_clocks(ddata);
if (error)
return error;
}
-awake:
+ error = sysc_enable_main_clocks(ddata);
+ if (error)
+ goto err_opt_clocks;
+
+ if (ddata->legacy_mode) {
+ error = sysc_runtime_resume_legacy(dev, ddata);
+ if (error)
+ goto err_main_clocks;
+ } else {
+ error = sysc_enable_module(dev);
+ if (error)
+ goto err_main_clocks;
+ }
+
ddata->enabled = true;
+ return 0;
+
+err_main_clocks:
+ sysc_disable_main_clocks(ddata);
+err_opt_clocks:
+ if (sysc_opt_clks_needed(ddata))
+ sysc_disable_opt_clocks(ddata);
+
return error;
}
@@ -788,12 +1107,17 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff,
0),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
- SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
/* Uarts on omap4 and later */
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
- SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
- SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+
+ /* Quirks that need to be set based on the module address */
+ SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -1, 0x50000800, 0xffffffff,
+ SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT |
+ SYSC_QUIRK_SWSUP_SIDLE),
#ifdef DEBUG
SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0),
@@ -805,6 +1129,7 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
0xffff00f0, 0),
SYSC_QUIRK("dcan", 0, 0, -1, -1, 0xffffffff, 0xffffffff, 0),
SYSC_QUIRK("dcan", 0, 0, -1, -1, 0x00001401, 0xffffffff, 0),
+ SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0),
SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0),
SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0),
SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -1, 0, 0, 0),
@@ -853,6 +1178,42 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
#endif
};
+/*
+ * Early quirks based on module base and register offsets only that are
+ * needed before the module revision can be read
+ */
+static void sysc_init_early_quirks(struct sysc *ddata)
+{
+ const struct sysc_revision_quirk *q;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) {
+ q = &sysc_revision_quirks[i];
+
+ if (!q->base)
+ continue;
+
+ if (q->base != ddata->module_pa)
+ continue;
+
+ if (q->rev_offset >= 0 &&
+ q->rev_offset != ddata->offsets[SYSC_REVISION])
+ continue;
+
+ if (q->sysc_offset >= 0 &&
+ q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
+ continue;
+
+ if (q->syss_offset >= 0 &&
+ q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
+ continue;
+
+ ddata->name = q->name;
+ ddata->cfg.quirks |= q->quirks;
+ }
+}
+
+/* Quirks that also consider the revision register value */
static void sysc_init_revision_quirks(struct sysc *ddata)
{
const struct sysc_revision_quirk *q;
@@ -885,6 +1246,55 @@ static void sysc_init_revision_quirks(struct sysc *ddata)
}
}
+/*
+ * Note that pdata->init_module() typically does a reset first. After
+ * pdata->init_module() is done, PM runtime can be used for the interconnect
+ * target module.
+ */
+static int sysc_legacy_init(struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
+ int error;
+
+ if (!ddata->legacy_mode || !pdata || !pdata->init_module)
+ return 0;
+
+ error = pdata->init_module(ddata->dev, ddata->mdata, &ddata->cookie);
+ if (error == -EEXIST)
+ error = 0;
+
+ return error;
+}
+
+/**
+ * sysc_rstctrl_reset_deassert - deassert rstctrl reset
+ * @ddata: device driver data
+ * @reset: reset before deassert
+ *
+ * A module can have both OCP softreset control and external rstctrl.
+ * If more complicated rstctrl resets are needed, please handle these
+ * directly from the child device driver and map only the module reset
+ * for the parent interconnect target module device.
+ *
+ * Automatic reset of the module on init can be skipped with the
+ * "ti,no-reset-on-init" device tree property.
+ */
+static int sysc_rstctrl_reset_deassert(struct sysc *ddata, bool reset)
+{
+ int error;
+
+ if (!ddata->rsts)
+ return 0;
+
+ if (reset) {
+ error = reset_control_assert(ddata->rsts);
+ if (error)
+ return error;
+ }
+
+ return reset_control_deassert(ddata->rsts);
+}
+
static int sysc_reset(struct sysc *ddata)
{
int offset = ddata->offsets[SYSC_SYSCONFIG];
@@ -915,38 +1325,58 @@ static int sysc_reset(struct sysc *ddata)
100, MAX_MODULE_SOFTRESET_WAIT);
}
-/* At this point the module is configured enough to read the revision */
+/*
+ * At this point the module is configured enough to read the revision but
+ * module may not be completely configured yet to use PM runtime. Enable
+ * all clocks directly during init to configure the quirks needed for PM
+ * runtime based on the revision register.
+ */
static int sysc_init_module(struct sysc *ddata)
{
- int error;
+ int error = 0;
+ bool manage_clocks = true;
+ bool reset = true;
- if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE_ON_INIT) {
- ddata->revision = sysc_read_revision(ddata);
- goto rev_quirks;
- }
+ if (ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
+ reset = false;
- error = pm_runtime_get_sync(ddata->dev);
- if (error < 0) {
- pm_runtime_put_noidle(ddata->dev);
+ error = sysc_rstctrl_reset_deassert(ddata, reset);
+ if (error)
+ return error;
- return 0;
- }
+ if (ddata->cfg.quirks &
+ (SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT))
+ manage_clocks = false;
- error = sysc_reset(ddata);
- if (error) {
- dev_err(ddata->dev, "Reset failed with %d\n", error);
- pm_runtime_put_sync(ddata->dev);
+ if (manage_clocks) {
+ error = sysc_enable_opt_clocks(ddata);
+ if (error)
+ return error;
- return error;
+ error = sysc_enable_main_clocks(ddata);
+ if (error)
+ goto err_opt_clocks;
}
ddata->revision = sysc_read_revision(ddata);
- pm_runtime_put_sync(ddata->dev);
-
-rev_quirks:
sysc_init_revision_quirks(ddata);
- return 0;
+ error = sysc_legacy_init(ddata);
+ if (error)
+ goto err_main_clocks;
+
+ error = sysc_reset(ddata);
+ if (error)
+ dev_err(ddata->dev, "Reset failed with %d\n", error);
+
+err_main_clocks:
+ if (manage_clocks)
+ sysc_disable_main_clocks(ddata);
+err_opt_clocks:
+ if (manage_clocks)
+ sysc_disable_opt_clocks(ddata);
+
+ return error;
}
static int sysc_init_sysc_mask(struct sysc *ddata)
@@ -1208,7 +1638,7 @@ static int sysc_child_resume_noirq(struct device *dev)
}
#endif
-struct dev_pm_domain sysc_child_pm_domain = {
+static struct dev_pm_domain sysc_child_pm_domain = {
.ops = {
SET_RUNTIME_PM_OPS(sysc_child_runtime_suspend,
sysc_child_runtime_resume,
@@ -1281,6 +1711,8 @@ static const struct sysc_dts_quirk sysc_dts_quirks[] = {
.mask = SYSC_QUIRK_NO_IDLE_ON_INIT, },
{ .name = "ti,no-reset-on-init",
.mask = SYSC_QUIRK_NO_RESET_ON_INIT, },
+ { .name = "ti,no-idle",
+ .mask = SYSC_QUIRK_NO_IDLE, },
};
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
@@ -1331,6 +1763,9 @@ static void sysc_unprepare(struct sysc *ddata)
{
int i;
+ if (!ddata->clocks)
+ return;
+
for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
if (!IS_ERR_OR_NULL(ddata->clocks[i]))
clk_unprepare(ddata->clocks[i]);
@@ -1576,28 +2011,26 @@ static const struct sysc_capabilities sysc_dra7_mcan = {
static int sysc_init_pdata(struct sysc *ddata)
{
struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
- struct ti_sysc_module_data mdata;
- int error = 0;
+ struct ti_sysc_module_data *mdata;
if (!pdata || !ddata->legacy_mode)
return 0;
- mdata.name = ddata->legacy_mode;
- mdata.module_pa = ddata->module_pa;
- mdata.module_size = ddata->module_size;
- mdata.offsets = ddata->offsets;
- mdata.nr_offsets = SYSC_MAX_REGS;
- mdata.cap = ddata->cap;
- mdata.cfg = &ddata->cfg;
+ mdata = devm_kzalloc(ddata->dev, sizeof(*mdata), GFP_KERNEL);
+ if (!mdata)
+ return -ENOMEM;
- if (!pdata->init_module)
- return -ENODEV;
+ mdata->name = ddata->legacy_mode;
+ mdata->module_pa = ddata->module_pa;
+ mdata->module_size = ddata->module_size;
+ mdata->offsets = ddata->offsets;
+ mdata->nr_offsets = SYSC_MAX_REGS;
+ mdata->cap = ddata->cap;
+ mdata->cfg = &ddata->cfg;
- error = pdata->init_module(ddata->dev, &mdata, &ddata->cookie);
- if (error == -EEXIST)
- error = 0;
+ ddata->mdata = mdata;
- return error;
+ return 0;
}
static int sysc_init_match(struct sysc *ddata)
@@ -1651,10 +2084,6 @@ static int sysc_probe(struct platform_device *pdev)
if (error)
goto unprepare;
- error = sysc_get_clocks(ddata);
- if (error)
- return error;
-
error = sysc_map_and_check_registers(ddata);
if (error)
goto unprepare;
@@ -1675,15 +2104,21 @@ static int sysc_probe(struct platform_device *pdev)
if (error)
goto unprepare;
+ sysc_init_early_quirks(ddata);
+
+ error = sysc_get_clocks(ddata);
+ if (error)
+ return error;
+
error = sysc_init_resets(ddata);
if (error)
return error;
- pm_runtime_enable(ddata->dev);
error = sysc_init_module(ddata);
if (error)
goto unprepare;
+ pm_runtime_enable(ddata->dev);
error = pm_runtime_get_sync(ddata->dev);
if (error < 0) {
pm_runtime_put_noidle(ddata->dev);
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index f8b7345fe1cb..5cf3bade0d57 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -786,6 +786,7 @@ static int probe_gdrom(struct platform_device *devptr)
goto probe_fail_cdrom_register;
}
gd.disk->fops = &gdrom_bdops;
+ gd.disk->events = DISK_EVENT_MEDIA_CHANGE;
/* latch on to the interrupt */
err = gdrom_set_interrupt_handlers();
if (err)
diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c
index a5ecf6dae02e..373f549525fe 100644
--- a/drivers/char/ds1620.c
+++ b/drivers/char/ds1620.c
@@ -212,7 +212,7 @@ static void ds1620_read_state(struct therm *therm)
static int ds1620_open(struct inode *inode, struct file *file)
{
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
}
static ssize_t
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index f882460b5a44..4fed8fafa0f0 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -298,12 +298,11 @@ static int dtlk_open(struct inode *inode, struct file *file)
{
TRACE_TEXT("(dtlk_open");
- nonseekable_open(inode, file);
switch (iminor(inode)) {
case DTLK_MINOR:
if (dtlk_busy)
return -EBUSY;
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
default:
return -ENXIO;
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index d0ad85900b79..3a1e6b3ccd10 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -973,6 +973,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
if (ACPI_SUCCESS(status)) {
hdp->hd_phys_address = addr.address.minimum;
hdp->hd_address = ioremap(addr.address.minimum, addr.address.address_length);
+ if (!hdp->hd_address)
+ return AE_ERROR;
if (hpet_is_known(hdp)) {
iounmap(hdp->hd_address);
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index b65ff6962899..e9b6ac61fb7f 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -443,6 +443,7 @@ static int omap_rng_probe(struct platform_device *pdev)
priv->rng.read = omap_rng_do_read;
priv->rng.init = omap_rng_init;
priv->rng.cleanup = omap_rng_cleanup;
+ priv->rng.quality = 900;
priv->rng.priv = (unsigned long)priv;
platform_set_drvdata(pdev, priv);
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 042860d97b15..0ef5b6a3f560 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -161,6 +161,7 @@ static int stm32_rng_probe(struct platform_device *ofdev)
#endif
priv->rng.read = stm32_rng_read,
priv->rng.priv = (unsigned long) dev;
+ priv->rng.quality = 900;
pm_runtime_set_autosuspend_delay(dev, 100);
pm_runtime_use_autosuspend(dev);
@@ -169,6 +170,13 @@ static int stm32_rng_probe(struct platform_device *ofdev)
return devm_hwrng_register(dev, &priv->rng);
}
+static int stm32_rng_remove(struct platform_device *ofdev)
+{
+ pm_runtime_disable(&ofdev->dev);
+
+ return 0;
+}
+
#ifdef CONFIG_PM
static int stm32_rng_runtime_suspend(struct device *dev)
{
@@ -210,6 +218,7 @@ static struct platform_driver stm32_rng_driver = {
.of_match_table = stm32_rng_match,
},
.probe = stm32_rng_probe,
+ .remove = stm32_rng_remove,
};
module_platform_driver(stm32_rng_driver);
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 99c9f581a1f3..f7b1c004a12b 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -29,7 +29,6 @@ struct ipmi_file_private
struct ipmi_user *user;
spinlock_t recv_msg_lock;
struct list_head recv_msgs;
- struct file *file;
struct fasync_struct *fasync_queue;
wait_queue_head_t wait;
struct mutex recv_mutex;
@@ -95,8 +94,6 @@ static int ipmi_open(struct inode *inode, struct file *file)
if (!priv)
return -ENOMEM;
- priv->file = file;
-
rv = ipmi_create_user(if_num,
&ipmi_hndlrs,
priv,
diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c
index f2411468f33f..f38e651dd1b5 100644
--- a/drivers/char/ipmi/ipmi_dmi.c
+++ b/drivers/char/ipmi/ipmi_dmi.c
@@ -47,9 +47,11 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
memset(&p, 0, sizeof(p));
name = "dmi-ipmi-si";
+ p.iftype = IPMI_PLAT_IF_SI;
switch (type) {
case IPMI_DMI_TYPE_SSIF:
name = "dmi-ipmi-ssif";
+ p.iftype = IPMI_PLAT_IF_SSIF;
p.type = SI_TYPE_INVALID;
break;
case IPMI_DMI_TYPE_BT:
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 00bf4b17edbf..1dc10740fc0f 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -635,7 +635,7 @@ static DEFINE_MUTEX(ipmidriver_mutex);
static LIST_HEAD(ipmi_interfaces);
static DEFINE_MUTEX(ipmi_interfaces_mutex);
-struct srcu_struct ipmi_interfaces_srcu;
+static struct srcu_struct ipmi_interfaces_srcu;
/*
* List of watchers that want to know when smi's are added and deleted.
@@ -5179,7 +5179,7 @@ static void __exit cleanup_ipmi(void)
* avoids problems with race conditions removing the timer
* here.
*/
- atomic_inc(&stop_operation);
+ atomic_set(&stop_operation, 1);
del_timer_sync(&ipmi_timer);
initialized = false;
diff --git a/drivers/char/ipmi/ipmi_plat_data.c b/drivers/char/ipmi/ipmi_plat_data.c
index 8f0ca2a848eb..28471ff2a3a3 100644
--- a/drivers/char/ipmi/ipmi_plat_data.c
+++ b/drivers/char/ipmi/ipmi_plat_data.c
@@ -12,7 +12,7 @@ struct platform_device *ipmi_platform_add(const char *name, unsigned int inst,
struct ipmi_plat_data *p)
{
struct platform_device *pdev;
- unsigned int num_r = 1, size, pidx = 0;
+ unsigned int num_r = 1, size = 0, pidx = 0;
struct resource r[4];
struct property_entry pr[6];
u32 flags;
@@ -21,19 +21,22 @@ struct platform_device *ipmi_platform_add(const char *name, unsigned int inst,
memset(pr, 0, sizeof(pr));
memset(r, 0, sizeof(r));
- if (p->type == SI_BT)
- size = 3;
- else if (p->type == SI_TYPE_INVALID)
- size = 0;
- else
- size = 2;
+ if (p->iftype == IPMI_PLAT_IF_SI) {
+ if (p->type == SI_BT)
+ size = 3;
+ else if (p->type != SI_TYPE_INVALID)
+ size = 2;
+
+ if (p->regsize == 0)
+ p->regsize = DEFAULT_REGSIZE;
+ if (p->regspacing == 0)
+ p->regspacing = p->regsize;
- if (p->regsize == 0)
- p->regsize = DEFAULT_REGSIZE;
- if (p->regspacing == 0)
- p->regspacing = p->regsize;
+ pr[pidx++] = PROPERTY_ENTRY_U8("ipmi-type", p->type);
+ } else if (p->iftype == IPMI_PLAT_IF_SSIF) {
+ pr[pidx++] = PROPERTY_ENTRY_U16("i2c-addr", p->addr);
+ }
- pr[pidx++] = PROPERTY_ENTRY_U8("ipmi-type", p->type);
if (p->slave_addr)
pr[pidx++] = PROPERTY_ENTRY_U8("slave-addr", p->slave_addr);
pr[pidx++] = PROPERTY_ENTRY_U8("addr-source", p->addr_source);
diff --git a/drivers/char/ipmi/ipmi_plat_data.h b/drivers/char/ipmi/ipmi_plat_data.h
index 567cfcec8ada..9ba744ea9571 100644
--- a/drivers/char/ipmi/ipmi_plat_data.h
+++ b/drivers/char/ipmi/ipmi_plat_data.h
@@ -6,7 +6,10 @@
#include <linux/ipmi.h>
+enum ipmi_plat_interface_type { IPMI_PLAT_IF_SI, IPMI_PLAT_IF_SSIF };
+
struct ipmi_plat_data {
+ enum ipmi_plat_interface_type iftype;
unsigned int type; /* si_type for si, SI_INVALID for others */
unsigned int space; /* addr_space for si, intf# for ssif. */
unsigned long addr;
diff --git a/drivers/char/ipmi/ipmi_si_hardcode.c b/drivers/char/ipmi/ipmi_si_hardcode.c
index 682221eebd66..f6ece7569504 100644
--- a/drivers/char/ipmi/ipmi_si_hardcode.c
+++ b/drivers/char/ipmi/ipmi_si_hardcode.c
@@ -83,6 +83,7 @@ static void __init ipmi_hardcode_init_one(const char *si_type_str,
memset(&p, 0, sizeof(p));
+ p.iftype = IPMI_PLAT_IF_SI;
if (!si_type_str || !*si_type_str || strcmp(si_type_str, "kcs") == 0) {
p.type = SI_KCS;
} else if (strcmp(si_type_str, "smic") == 0) {
diff --git a/drivers/char/ipmi/ipmi_si_hotmod.c b/drivers/char/ipmi/ipmi_si_hotmod.c
index 03140f6cdf6f..42a925f8cf69 100644
--- a/drivers/char/ipmi/ipmi_si_hotmod.c
+++ b/drivers/char/ipmi/ipmi_si_hotmod.c
@@ -108,6 +108,7 @@ static int parse_hotmod_str(const char *curr, enum hotmod_op *op,
int rv;
unsigned int ival;
+ h->iftype = IPMI_PLAT_IF_SI;
rv = parse_str(hotmod_ops, &ival, "operation", &curr);
if (rv)
return rv;
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index b1732882b97e..f124a2d2bb9f 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1931,7 +1931,6 @@ static int try_smi_init(struct smi_info *new_smi)
{
int rv = 0;
int i;
- char *init_name = NULL;
pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
@@ -2073,7 +2072,6 @@ static int try_smi_init(struct smi_info *new_smi)
new_smi->io.io_cleanup = NULL;
}
- kfree(init_name);
return rv;
}
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
index 54c7ded2a1ff..f2a91c4d8cab 100644
--- a/drivers/char/ipmi/ipmi_si_platform.c
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -188,12 +188,10 @@ static int platform_ipmi_probe(struct platform_device *pdev)
return -EINVAL;
rv = device_property_read_u8(&pdev->dev, "slave-addr", &slave_addr);
- if (rv) {
- dev_warn(&pdev->dev, "device has no slave-addr property\n");
+ if (rv)
io.slave_addr = 0x20;
- } else {
+ else
io.slave_addr = slave_addr;
- }
io.irq = platform_get_irq(pdev, 0);
if (io.irq > 0)
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 8b5aec5430f1..cf8156d6bc07 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -727,12 +727,16 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
/* End of read */
len = ssif_info->multi_len;
data = ssif_info->data;
- } else if (blocknum != ssif_info->multi_pos) {
+ } else if (blocknum + 1 != ssif_info->multi_pos) {
/*
* Out of sequence block, just abort. Block
* numbers start at zero for the second block,
* but multi_pos starts at one, so the +1.
*/
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ dev_dbg(&ssif_info->client->dev,
+ "Received message out of sequence, expected %u, got %u\n",
+ ssif_info->multi_pos - 1, blocknum);
result = -EIO;
} else {
ssif_inc_stat(ssif_info, received_message_parts);
@@ -1991,7 +1995,7 @@ static int dmi_ipmi_probe(struct platform_device *pdev)
rv = device_property_read_u8(&pdev->dev, "slave-addr", &slave_addr);
if (rv)
- dev_warn(&pdev->dev, "device has no slave-addr property");
+ slave_addr = 0x20;
return new_ssif_client(i2c_addr, NULL, 0,
slave_addr, SI_SMBIOS, &pdev->dev);
@@ -2107,7 +2111,8 @@ static void cleanup_ipmi_ssif(void)
kfree(ssif_i2c_driver.address_list);
- platform_driver_unregister(&ipmi_driver);
+ if (ssif_trydmi)
+ platform_driver_unregister(&ipmi_driver);
free_ssif_clients();
}
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 2924a4bc4a32..74c6d1f34132 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -837,7 +837,7 @@ static int ipmi_open(struct inode *ino, struct file *filep)
* first heartbeat.
*/
ipmi_start_timer_on_heartbeat = 1;
- return nonseekable_open(ino, filep);
+ return stream_open(ino, filep);
default:
return (-ENODEV);
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 7a4eb86aedac..15bf585af5d3 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1682,7 +1682,7 @@ static int cmm_open(struct inode *inode, struct file *filp)
link->open = 1; /* only one open per device */
DEBUGP(2, dev, "<- cmm_open\n");
- ret = nonseekable_open(inode, filp);
+ ret = stream_open(inode, filp);
out:
mutex_unlock(&cmm_mutex);
return ret;
diff --git a/drivers/char/pcmcia/scr24x_cs.c b/drivers/char/pcmcia/scr24x_cs.c
index f6b43d9350f0..04b39c3596cc 100644
--- a/drivers/char/pcmcia/scr24x_cs.c
+++ b/drivers/char/pcmcia/scr24x_cs.c
@@ -92,7 +92,7 @@ static int scr24x_open(struct inode *inode, struct file *filp)
kref_get(&dev->refcnt);
filp->private_data = dev;
- return nonseekable_open(inode, filp);
+ return stream_open(inode, filp);
}
static int scr24x_release(struct inode *inode, struct file *filp)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 38c6d1af6d1c..a42b3d764da8 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -101,15 +101,13 @@
* Exported interfaces ---- output
* ===============================
*
- * There are three exported interfaces; the first is one designed to
- * be used from within the kernel:
+ * There are four exported interfaces; two for use within the kernel,
+ * and two or use from userspace.
*
- * void get_random_bytes(void *buf, int nbytes);
- *
- * This interface will return the requested number of random bytes,
- * and place it in the requested buffer.
+ * Exported interfaces ---- userspace output
+ * -----------------------------------------
*
- * The two other interfaces are two character devices /dev/random and
+ * The userspace interfaces are two character devices /dev/random and
* /dev/urandom. /dev/random is suitable for use when very high
* quality randomness is desired (for example, for key generation or
* one-time pads), as it will only return a maximum of the number of
@@ -122,6 +120,77 @@
* this will result in random numbers that are merely cryptographically
* strong. For many applications, however, this is acceptable.
*
+ * Exported interfaces ---- kernel output
+ * --------------------------------------
+ *
+ * The primary kernel interface is
+ *
+ * void get_random_bytes(void *buf, int nbytes);
+ *
+ * This interface will return the requested number of random bytes,
+ * and place it in the requested buffer. This is equivalent to a
+ * read from /dev/urandom.
+ *
+ * For less critical applications, there are the functions:
+ *
+ * u32 get_random_u32()
+ * u64 get_random_u64()
+ * unsigned int get_random_int()
+ * unsigned long get_random_long()
+ *
+ * These are produced by a cryptographic RNG seeded from get_random_bytes,
+ * and so do not deplete the entropy pool as much. These are recommended
+ * for most in-kernel operations *if the result is going to be stored in
+ * the kernel*.
+ *
+ * Specifically, the get_random_int() family do not attempt to do
+ * "anti-backtracking". If you capture the state of the kernel (e.g.
+ * by snapshotting the VM), you can figure out previous get_random_int()
+ * return values. But if the value is stored in the kernel anyway,
+ * this is not a problem.
+ *
+ * It *is* safe to expose get_random_int() output to attackers (e.g. as
+ * network cookies); given outputs 1..n, it's not feasible to predict
+ * outputs 0 or n+1. The only concern is an attacker who breaks into
+ * the kernel later; the get_random_int() engine is not reseeded as
+ * often as the get_random_bytes() one.
+ *
+ * get_random_bytes() is needed for keys that need to stay secret after
+ * they are erased from the kernel. For example, any key that will
+ * be wrapped and stored encrypted. And session encryption keys: we'd
+ * like to know that after the session is closed and the keys erased,
+ * the plaintext is unrecoverable to someone who recorded the ciphertext.
+ *
+ * But for network ports/cookies, stack canaries, PRNG seeds, address
+ * space layout randomization, session *authentication* keys, or other
+ * applications where the sensitive data is stored in the kernel in
+ * plaintext for as long as it's sensitive, the get_random_int() family
+ * is just fine.
+ *
+ * Consider ASLR. We want to keep the address space secret from an
+ * outside attacker while the process is running, but once the address
+ * space is torn down, it's of no use to an attacker any more. And it's
+ * stored in kernel data structures as long as it's alive, so worrying
+ * about an attacker's ability to extrapolate it from the get_random_int()
+ * CRNG is silly.
+ *
+ * Even some cryptographic keys are safe to generate with get_random_int().
+ * In particular, keys for SipHash are generally fine. Here, knowledge
+ * of the key authorizes you to do something to a kernel object (inject
+ * packets to a network connection, or flood a hash table), and the
+ * key is stored with the object being protected. Once it goes away,
+ * we no longer care if anyone knows the key.
+ *
+ * prandom_u32()
+ * -------------
+ *
+ * For even weaker applications, see the pseudorandom generator
+ * prandom_u32(), prandom_max(), and prandom_bytes(). If the random
+ * numbers aren't security-critical at all, these are *far* cheaper.
+ * Useful for self-tests, random error simulation, randomized backoffs,
+ * and any other application where you trust that nobody is trying to
+ * maliciously mess with you by guessing the "random" numbers.
+ *
* Exported interfaces ---- input
* ==============================
*
@@ -295,7 +364,7 @@
* To allow fractional bits to be tracked, the entropy_count field is
* denominated in units of 1/8th bits.
*
- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
+ * 2*(ENTROPY_SHIFT + poolbitshift) must <= 31, or the multiply in
* credit_entropy_bits() needs to be 64 bits wide.
*/
#define ENTROPY_SHIFT 3
@@ -359,9 +428,9 @@ static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
* polynomial which improves the resulting TGFSR polynomial to be
* irreducible, which we have made here.
*/
-static struct poolinfo {
- int poolbitshift, poolwords, poolbytes, poolbits, poolfracbits;
-#define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
+static const struct poolinfo {
+ int poolbitshift, poolwords, poolbytes, poolfracbits;
+#define S(x) ilog2(x)+5, (x), (x)*4, (x) << (ENTROPY_SHIFT+5)
int tap1, tap2, tap3, tap4, tap5;
} poolinfo_table[] = {
/* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
@@ -415,7 +484,7 @@ struct crng_state {
spinlock_t lock;
};
-struct crng_state primary_crng = {
+static struct crng_state primary_crng = {
.lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
};
@@ -470,7 +539,6 @@ struct entropy_store {
unsigned short add_ptr;
unsigned short input_rotate;
int entropy_count;
- int entropy_total;
unsigned int initialized:1;
unsigned int last_data_init:1;
__u8 last_data[EXTRACT_SIZE];
@@ -643,7 +711,7 @@ static void process_random_ready_list(void)
*/
static void credit_entropy_bits(struct entropy_store *r, int nbits)
{
- int entropy_count, orig;
+ int entropy_count, orig, has_initialized = 0;
const int pool_size = r->poolinfo->poolfracbits;
int nfrac = nbits << ENTROPY_SHIFT;
@@ -698,23 +766,25 @@ retry:
entropy_count = 0;
} else if (entropy_count > pool_size)
entropy_count = pool_size;
+ if ((r == &blocking_pool) && !r->initialized &&
+ (entropy_count >> ENTROPY_SHIFT) > 128)
+ has_initialized = 1;
if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
goto retry;
- r->entropy_total += nbits;
- if (!r->initialized && r->entropy_total > 128) {
+ if (has_initialized)
r->initialized = 1;
- r->entropy_total = 0;
- }
trace_credit_entropy_bits(r->name, nbits,
- entropy_count >> ENTROPY_SHIFT,
- r->entropy_total, _RET_IP_);
+ entropy_count >> ENTROPY_SHIFT, _RET_IP_);
if (r == &input_pool) {
int entropy_bits = entropy_count >> ENTROPY_SHIFT;
+ struct entropy_store *other = &blocking_pool;
- if (crng_init < 2 && entropy_bits >= 128) {
+ if (crng_init < 2) {
+ if (entropy_bits < 128)
+ return;
crng_reseed(&primary_crng, r);
entropy_bits = r->entropy_count >> ENTROPY_SHIFT;
}
@@ -725,20 +795,14 @@ retry:
wake_up_interruptible(&random_read_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
}
- /* If the input pool is getting full, send some
- * entropy to the blocking pool until it is 75% full.
+ /* If the input pool is getting full, and the blocking
+ * pool has room, send some entropy to the blocking
+ * pool.
*/
- if (entropy_bits > random_write_wakeup_bits &&
- r->initialized &&
- r->entropy_total >= 2*random_read_wakeup_bits) {
- struct entropy_store *other = &blocking_pool;
-
- if (other->entropy_count <=
- 3 * other->poolinfo->poolfracbits / 4) {
- schedule_work(&other->push_work);
- r->entropy_total = 0;
- }
- }
+ if (!work_pending(&other->push_work) &&
+ (ENTROPY_BITS(r) > 6 * r->poolinfo->poolbytes) &&
+ (ENTROPY_BITS(other) <= 6 * other->poolinfo->poolbytes))
+ schedule_work(&other->push_work);
}
}
@@ -777,6 +841,7 @@ static struct crng_state **crng_node_pool __read_mostly;
#endif
static void invalidate_batched_entropy(void);
+static void numa_crng_init(void);
static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
static int __init parse_trust_cpu(char *arg)
@@ -805,7 +870,9 @@ static void crng_initialize(struct crng_state *crng)
}
crng->state[i] ^= rv;
}
- if (trust_cpu && arch_init) {
+ if (trust_cpu && arch_init && crng == &primary_crng) {
+ invalidate_batched_entropy();
+ numa_crng_init();
crng_init = 2;
pr_notice("random: crng done (trusting CPU's manufacturer)\n");
}
@@ -1553,6 +1620,11 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
int large_request = (nbytes > 256);
trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
+ if (!r->initialized && r->pull) {
+ xfer_secondary_pool(r, ENTROPY_BITS(r->pull)/8);
+ if (!r->initialized)
+ return 0;
+ }
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, 0, 0);
@@ -1783,7 +1855,7 @@ EXPORT_SYMBOL(get_random_bytes_arch);
* data into the pool to prepare it for use. The pool is not cleared
* as that can only decrease the entropy in the pool.
*/
-static void init_std_data(struct entropy_store *r)
+static void __init init_std_data(struct entropy_store *r)
{
int i;
ktime_t now = ktime_get_real();
@@ -1810,7 +1882,7 @@ static void init_std_data(struct entropy_store *r)
* take care not to overwrite the precious per platform data
* we were given.
*/
-static int rand_initialize(void)
+int __init rand_initialize(void)
{
init_std_data(&input_pool);
init_std_data(&blocking_pool);
@@ -1822,7 +1894,6 @@ static int rand_initialize(void)
}
return 0;
}
-early_initcall(rand_initialize);
#ifdef CONFIG_BLOCK
void rand_initialize_disk(struct gendisk *disk)
@@ -2211,8 +2282,8 @@ struct batched_entropy {
u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
};
unsigned int position;
+ spinlock_t batch_lock;
};
-static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
/*
* Get a random word for internal kernel use only. The quality of the random
@@ -2222,12 +2293,14 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_
* wait_for_random_bytes() should be called and return 0 at least once
* at any point prior.
*/
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
+ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
+};
+
u64 get_random_u64(void)
{
u64 ret;
- bool use_lock;
- unsigned long flags = 0;
+ unsigned long flags;
struct batched_entropy *batch;
static void *previous;
@@ -2242,28 +2315,25 @@ u64 get_random_u64(void)
warn_unseeded_randomness(&previous);
- use_lock = READ_ONCE(crng_init) < 2;
- batch = &get_cpu_var(batched_entropy_u64);
- if (use_lock)
- read_lock_irqsave(&batched_entropy_reset_lock, flags);
+ batch = raw_cpu_ptr(&batched_entropy_u64);
+ spin_lock_irqsave(&batch->batch_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
extract_crng((u8 *)batch->entropy_u64);
batch->position = 0;
}
ret = batch->entropy_u64[batch->position++];
- if (use_lock)
- read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
- put_cpu_var(batched_entropy_u64);
+ spin_unlock_irqrestore(&batch->batch_lock, flags);
return ret;
}
EXPORT_SYMBOL(get_random_u64);
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
+ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
+};
u32 get_random_u32(void)
{
u32 ret;
- bool use_lock;
- unsigned long flags = 0;
+ unsigned long flags;
struct batched_entropy *batch;
static void *previous;
@@ -2272,18 +2342,14 @@ u32 get_random_u32(void)
warn_unseeded_randomness(&previous);
- use_lock = READ_ONCE(crng_init) < 2;
- batch = &get_cpu_var(batched_entropy_u32);
- if (use_lock)
- read_lock_irqsave(&batched_entropy_reset_lock, flags);
+ batch = raw_cpu_ptr(&batched_entropy_u32);
+ spin_lock_irqsave(&batch->batch_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
extract_crng((u8 *)batch->entropy_u32);
batch->position = 0;
}
ret = batch->entropy_u32[batch->position++];
- if (use_lock)
- read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
- put_cpu_var(batched_entropy_u32);
+ spin_unlock_irqrestore(&batch->batch_lock, flags);
return ret;
}
EXPORT_SYMBOL(get_random_u32);
@@ -2297,12 +2363,19 @@ static void invalidate_batched_entropy(void)
int cpu;
unsigned long flags;
- write_lock_irqsave(&batched_entropy_reset_lock, flags);
for_each_possible_cpu (cpu) {
- per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
- per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
+ struct batched_entropy *batched_entropy;
+
+ batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
+ spin_lock_irqsave(&batched_entropy->batch_lock, flags);
+ batched_entropy->position = 0;
+ spin_unlock(&batched_entropy->batch_lock);
+
+ batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
+ spin_lock(&batched_entropy->batch_lock);
+ batched_entropy->position = 0;
+ spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
}
- write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
}
/**
diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c
index 7c19d9b22785..e8614ea843e2 100644
--- a/drivers/char/tb0219.c
+++ b/drivers/char/tb0219.c
@@ -243,7 +243,7 @@ static int tanbac_tb0219_open(struct inode *inode, struct file *file)
case 16 ... 23:
case 32 ... 39:
case 48 ... 55:
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
default:
break;
}
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 536e55d3919f..f3e4bc490cf0 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -157,7 +157,6 @@ config TCG_CRB
config TCG_VTPM_PROXY
tristate "VTPM Proxy Interface"
depends on TCG_TPM
- select ANON_INODES
---help---
This driver proxies for an emulated TPM (vTPM) running in userspace.
A device /dev/vtpmx is provided that creates a device pair
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index fbeb71953526..05dbfdb9f4af 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -75,7 +75,7 @@ struct ports_driver_data {
/* All the console devices handled by this driver */
struct list_head consoles;
};
-static struct ports_driver_data pdrvdata;
+static struct ports_driver_data pdrvdata = { .next_vtermno = 1};
static DEFINE_SPINLOCK(pdrvdata_lock);
static DECLARE_COMPLETION(early_console_added);
@@ -1394,6 +1394,7 @@ static int add_port(struct ports_device *portdev, u32 id)
port->async_queue = NULL;
port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
+ port->cons.vtermno = 0;
port->host_connected = port->guest_connected = false;
port->stats = (struct port_stats) { 0 };
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index e705aab9e38b..fc1e0cf44995 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config CLKDEV_LOOKUP
bool
@@ -219,6 +220,13 @@ config COMMON_CLK_XGENE
---help---
Sypport for the APM X-Gene SoC reference, PLL, and device clocks.
+config COMMON_CLK_LOCHNAGAR
+ tristate "Cirrus Logic Lochnagar clock driver"
+ depends on MFD_LOCHNAGAR
+ help
+ This driver supports the clocking features of the Cirrus Logic
+ Lochnagar audio development board.
+
config COMMON_CLK_NXP
def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX)
select REGMAP_MMIO if ARCH_LPC32XX
@@ -297,6 +305,7 @@ config COMMON_CLK_FIXED_MMIO
Support for Memory Mapped IO Fixed clocks
source "drivers/clk/actions/Kconfig"
+source "drivers/clk/analogbits/Kconfig"
source "drivers/clk/bcm/Kconfig"
source "drivers/clk/hisilicon/Kconfig"
source "drivers/clk/imgtec/Kconfig"
@@ -309,7 +318,9 @@ source "drivers/clk/mvebu/Kconfig"
source "drivers/clk/qcom/Kconfig"
source "drivers/clk/renesas/Kconfig"
source "drivers/clk/samsung/Kconfig"
+source "drivers/clk/sifive/Kconfig"
source "drivers/clk/sprd/Kconfig"
+source "drivers/clk/sunxi/Kconfig"
source "drivers/clk/sunxi-ng/Kconfig"
source "drivers/clk/tegra/Kconfig"
source "drivers/clk/ti/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 1db133652f0c..9ef4305d55e0 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -32,8 +32,10 @@ obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o
obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o
obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
obj-$(CONFIG_CLK_HSDK) += clk-hsdk-pll.o
+obj-$(CONFIG_COMMON_CLK_LOCHNAGAR) += clk-lochnagar.o
obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
obj-$(CONFIG_COMMON_CLK_MAX9485) += clk-max9485.o
+obj-$(CONFIG_ARCH_MILBEAUT_M10V) += clk-milbeaut.o
obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o
obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
obj-$(CONFIG_ARCH_NPCM7XX) += clk-npcm7xx.o
@@ -64,6 +66,7 @@ obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o
# please keep this section sorted lexicographically by directory path name
obj-y += actions/
+obj-y += analogbits/
obj-$(CONFIG_COMMON_CLK_AT91) += at91/
obj-$(CONFIG_ARCH_ARTPEC) += axis/
obj-$(CONFIG_ARC_PLAT_AXS10X) += axs10x/
@@ -93,6 +96,7 @@ obj-$(CONFIG_COMMON_CLK_QCOM) += qcom/
obj-y += renesas/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/
+obj-$(CONFIG_CLK_SIFIVE) += sifive/
obj-$(CONFIG_ARCH_SIRF) += sirf/
obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
obj-$(CONFIG_PLAT_SPEAR) += spear/
diff --git a/drivers/clk/actions/owl-common.h b/drivers/clk/actions/owl-common.h
index 5a866a8b913d..c000a431471e 100644
--- a/drivers/clk/actions/owl-common.h
+++ b/drivers/clk/actions/owl-common.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
//
// OWL common clock driver
//
diff --git a/drivers/clk/actions/owl-composite.h b/drivers/clk/actions/owl-composite.h
index b410ed5bf308..bca38bf8f218 100644
--- a/drivers/clk/actions/owl-composite.h
+++ b/drivers/clk/actions/owl-composite.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
//
// OWL composite clock driver
//
diff --git a/drivers/clk/actions/owl-divider.h b/drivers/clk/actions/owl-divider.h
index 92d3e3d23967..083be6d80954 100644
--- a/drivers/clk/actions/owl-divider.h
+++ b/drivers/clk/actions/owl-divider.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
//
// OWL divider clock driver
//
diff --git a/drivers/clk/actions/owl-factor.h b/drivers/clk/actions/owl-factor.h
index f1a7ffe896e1..04b89cbfdccb 100644
--- a/drivers/clk/actions/owl-factor.h
+++ b/drivers/clk/actions/owl-factor.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
//
// OWL factor clock driver
//
diff --git a/drivers/clk/actions/owl-fixed-factor.h b/drivers/clk/actions/owl-fixed-factor.h
index cc9fe36c0964..3dfd7fd7d292 100644
--- a/drivers/clk/actions/owl-fixed-factor.h
+++ b/drivers/clk/actions/owl-fixed-factor.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
//
// OWL fixed factor clock driver
//
diff --git a/drivers/clk/actions/owl-gate.h b/drivers/clk/actions/owl-gate.h
index c2d61ceebce2..c2f161c93fda 100644
--- a/drivers/clk/actions/owl-gate.h
+++ b/drivers/clk/actions/owl-gate.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
//
// OWL gate clock driver
//
diff --git a/drivers/clk/actions/owl-mux.h b/drivers/clk/actions/owl-mux.h
index 834284c8c3ae..53b9ab665294 100644
--- a/drivers/clk/actions/owl-mux.h
+++ b/drivers/clk/actions/owl-mux.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
//
// OWL mux clock driver
//
diff --git a/drivers/clk/actions/owl-pll.h b/drivers/clk/actions/owl-pll.h
index 6fb0d45bb088..78e5fc360b03 100644
--- a/drivers/clk/actions/owl-pll.h
+++ b/drivers/clk/actions/owl-pll.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
//
// OWL pll clock driver
//
diff --git a/drivers/clk/actions/owl-reset.h b/drivers/clk/actions/owl-reset.h
index 10f5774979a6..a947ffcb5a02 100644
--- a/drivers/clk/actions/owl-reset.h
+++ b/drivers/clk/actions/owl-reset.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
+/* SPDX-License-Identifier: GPL-2.0-or-later */
//
// Actions Semi Owl SoCs Reset Management Unit driver
//
diff --git a/drivers/clk/analogbits/Kconfig b/drivers/clk/analogbits/Kconfig
new file mode 100644
index 000000000000..b5fd60c7f136
--- /dev/null
+++ b/drivers/clk/analogbits/Kconfig
@@ -0,0 +1,2 @@
+config CLK_ANALOGBITS_WRPLL_CLN28HPC
+ bool
diff --git a/drivers/clk/analogbits/Makefile b/drivers/clk/analogbits/Makefile
new file mode 100644
index 000000000000..bf017447451e
--- /dev/null
+++ b/drivers/clk/analogbits/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CLK_ANALOGBITS_WRPLL_CLN28HPC) += wrpll-cln28hpc.o
diff --git a/drivers/clk/analogbits/wrpll-cln28hpc.c b/drivers/clk/analogbits/wrpll-cln28hpc.c
new file mode 100644
index 000000000000..776ead319ae9
--- /dev/null
+++ b/drivers/clk/analogbits/wrpll-cln28hpc.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2019 SiFive, Inc.
+ * Wesley Terpstra
+ * Paul Walmsley
+ *
+ * This library supports configuration parsing and reprogramming of
+ * the CLN28HPC variant of the Analog Bits Wide Range PLL. The
+ * intention is for this library to be reusable for any device that
+ * integrates this PLL; thus the register structure and programming
+ * details are expected to be provided by a separate IP block driver.
+ *
+ * The bulk of this code is primarily useful for clock configurations
+ * that must operate at arbitrary rates, as opposed to clock configurations
+ * that are restricted by software or manufacturer guidance to a small,
+ * pre-determined set of performance points.
+ *
+ * References:
+ * - Analog Bits "Wide Range PLL Datasheet", version 2015.10.01
+ * - SiFive FU540-C000 Manual v1p0, Chapter 7 "Clocking and Reset"
+ * https://static.dev.sifive.com/FU540-C000-v1.0.pdf
+ */
+
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/log2.h>
+#include <linux/math64.h>
+#include <linux/clk/analogbits-wrpll-cln28hpc.h>
+
+/* MIN_INPUT_FREQ: minimum input clock frequency, in Hz (Fref_min) */
+#define MIN_INPUT_FREQ 7000000
+
+/* MAX_INPUT_FREQ: maximum input clock frequency, in Hz (Fref_max) */
+#define MAX_INPUT_FREQ 600000000
+
+/* MIN_POST_DIVIDE_REF_FREQ: minimum post-divider reference frequency, in Hz */
+#define MIN_POST_DIVR_FREQ 7000000
+
+/* MAX_POST_DIVIDE_REF_FREQ: maximum post-divider reference frequency, in Hz */
+#define MAX_POST_DIVR_FREQ 200000000
+
+/* MIN_VCO_FREQ: minimum VCO frequency, in Hz (Fvco_min) */
+#define MIN_VCO_FREQ 2400000000UL
+
+/* MAX_VCO_FREQ: maximum VCO frequency, in Hz (Fvco_max) */
+#define MAX_VCO_FREQ 4800000000ULL
+
+/* MAX_DIVQ_DIVISOR: maximum output divisor. Selected by DIVQ = 6 */
+#define MAX_DIVQ_DIVISOR 64
+
+/* MAX_DIVR_DIVISOR: maximum reference divisor. Selected by DIVR = 63 */
+#define MAX_DIVR_DIVISOR 64
+
+/* MAX_LOCK_US: maximum PLL lock time, in microseconds (tLOCK_max) */
+#define MAX_LOCK_US 70
+
+/*
+ * ROUND_SHIFT: number of bits to shift to avoid precision loss in the rounding
+ * algorithm
+ */
+#define ROUND_SHIFT 20
+
+/*
+ * Private functions
+ */
+
+/**
+ * __wrpll_calc_filter_range() - determine PLL loop filter bandwidth
+ * @post_divr_freq: input clock rate after the R divider
+ *
+ * Select the value to be presented to the PLL RANGE input signals, based
+ * on the input clock frequency after the post-R-divider @post_divr_freq.
+ * This code follows the recommendations in the PLL datasheet for filter
+ * range selection.
+ *
+ * Return: The RANGE value to be presented to the PLL configuration inputs,
+ * or a negative return code upon error.
+ */
+static int __wrpll_calc_filter_range(unsigned long post_divr_freq)
+{
+ if (post_divr_freq < MIN_POST_DIVR_FREQ ||
+ post_divr_freq > MAX_POST_DIVR_FREQ) {
+ WARN(1, "%s: post-divider reference freq out of range: %lu",
+ __func__, post_divr_freq);
+ return -ERANGE;
+ }
+
+ switch (post_divr_freq) {
+ case 0 ... 10999999:
+ return 1;
+ case 11000000 ... 17999999:
+ return 2;
+ case 18000000 ... 29999999:
+ return 3;
+ case 30000000 ... 49999999:
+ return 4;
+ case 50000000 ... 79999999:
+ return 5;
+ case 80000000 ... 129999999:
+ return 6;
+ }
+
+ return 7;
+}
+
+/**
+ * __wrpll_calc_fbdiv() - return feedback fixed divide value
+ * @c: ptr to a struct wrpll_cfg record to read from
+ *
+ * The internal feedback path includes a fixed by-two divider; the
+ * external feedback path does not. Return the appropriate divider
+ * value (2 or 1) depending on whether internal or external feedback
+ * is enabled. This code doesn't test for invalid configurations
+ * (e.g. both or neither of WRPLL_FLAGS_*_FEEDBACK are set); it relies
+ * on the caller to do so.
+ *
+ * Context: Any context. Caller must protect the memory pointed to by
+ * @c from simultaneous modification.
+ *
+ * Return: 2 if internal feedback is enabled or 1 if external feedback
+ * is enabled.
+ */
+static u8 __wrpll_calc_fbdiv(const struct wrpll_cfg *c)
+{
+ return (c->flags & WRPLL_FLAGS_INT_FEEDBACK_MASK) ? 2 : 1;
+}
+
+/**
+ * __wrpll_calc_divq() - determine DIVQ based on target PLL output clock rate
+ * @target_rate: target PLL output clock rate
+ * @vco_rate: pointer to a u64 to store the computed VCO rate into
+ *
+ * Determine a reasonable value for the PLL Q post-divider, based on the
+ * target output rate @target_rate for the PLL. Along with returning the
+ * computed Q divider value as the return value, this function stores the
+ * desired target VCO rate into the variable pointed to by @vco_rate.
+ *
+ * Context: Any context. Caller must protect the memory pointed to by
+ * @vco_rate from simultaneous access or modification.
+ *
+ * Return: a positive integer DIVQ value to be programmed into the hardware
+ * upon success, or 0 upon error (since 0 is an invalid DIVQ value)
+ */
+static u8 __wrpll_calc_divq(u32 target_rate, u64 *vco_rate)
+{
+ u64 s;
+ u8 divq = 0;
+
+ if (!vco_rate) {
+ WARN_ON(1);
+ goto wcd_out;
+ }
+
+ s = div_u64(MAX_VCO_FREQ, target_rate);
+ if (s <= 1) {
+ divq = 1;
+ *vco_rate = MAX_VCO_FREQ;
+ } else if (s > MAX_DIVQ_DIVISOR) {
+ divq = ilog2(MAX_DIVQ_DIVISOR);
+ *vco_rate = MIN_VCO_FREQ;
+ } else {
+ divq = ilog2(s);
+ *vco_rate = (u64)target_rate << divq;
+ }
+
+wcd_out:
+ return divq;
+}
+
+/**
+ * __wrpll_update_parent_rate() - update PLL data when parent rate changes
+ * @c: ptr to a struct wrpll_cfg record to write PLL data to
+ * @parent_rate: PLL input refclk rate (pre-R-divider)
+ *
+ * Pre-compute some data used by the PLL configuration algorithm when
+ * the PLL's reference clock rate changes. The intention is to avoid
+ * computation when the parent rate remains constant - expected to be
+ * the common case.
+ *
+ * Returns: 0 upon success or -ERANGE if the reference clock rate is
+ * out of range.
+ */
+static int __wrpll_update_parent_rate(struct wrpll_cfg *c,
+ unsigned long parent_rate)
+{
+ u8 max_r_for_parent;
+
+ if (parent_rate > MAX_INPUT_FREQ || parent_rate < MIN_POST_DIVR_FREQ)
+ return -ERANGE;
+
+ c->parent_rate = parent_rate;
+ max_r_for_parent = div_u64(parent_rate, MIN_POST_DIVR_FREQ);
+ c->max_r = min_t(u8, MAX_DIVR_DIVISOR, max_r_for_parent);
+
+ c->init_r = DIV_ROUND_UP_ULL(parent_rate, MAX_POST_DIVR_FREQ);
+
+ return 0;
+}
+
+/**
+ * wrpll_configure() - compute PLL configuration for a target rate
+ * @c: ptr to a struct wrpll_cfg record to write into
+ * @target_rate: target PLL output clock rate (post-Q-divider)
+ * @parent_rate: PLL input refclk rate (pre-R-divider)
+ *
+ * Compute the appropriate PLL signal configuration values and store
+ * in PLL context @c. PLL reprogramming is not glitchless, so the
+ * caller should switch any downstream logic to a different clock
+ * source or clock-gate it before presenting these values to the PLL
+ * configuration signals.
+ *
+ * The caller must pass this function a pre-initialized struct
+ * wrpll_cfg record: either initialized to zero (with the
+ * exception of the .name and .flags fields) or read from the PLL.
+ *
+ * Context: Any context. Caller must protect the memory pointed to by @c
+ * from simultaneous access or modification.
+ *
+ * Return: 0 upon success; anything else upon failure.
+ */
+int wrpll_configure_for_rate(struct wrpll_cfg *c, u32 target_rate,
+ unsigned long parent_rate)
+{
+ unsigned long ratio;
+ u64 target_vco_rate, delta, best_delta, f_pre_div, vco, vco_pre;
+ u32 best_f, f, post_divr_freq;
+ u8 fbdiv, divq, best_r, r;
+ int range;
+
+ if (c->flags == 0) {
+ WARN(1, "%s called with uninitialized PLL config", __func__);
+ return -EINVAL;
+ }
+
+ /* Initialize rounding data if it hasn't been initialized already */
+ if (parent_rate != c->parent_rate) {
+ if (__wrpll_update_parent_rate(c, parent_rate)) {
+ pr_err("%s: PLL input rate is out of range\n",
+ __func__);
+ return -ERANGE;
+ }
+ }
+
+ c->flags &= ~WRPLL_FLAGS_RESET_MASK;
+
+ /* Put the PLL into bypass if the user requests the parent clock rate */
+ if (target_rate == parent_rate) {
+ c->flags |= WRPLL_FLAGS_BYPASS_MASK;
+ return 0;
+ }
+
+ c->flags &= ~WRPLL_FLAGS_BYPASS_MASK;
+
+ /* Calculate the Q shift and target VCO rate */
+ divq = __wrpll_calc_divq(target_rate, &target_vco_rate);
+ if (!divq)
+ return -1;
+ c->divq = divq;
+
+ /* Precalculate the pre-Q divider target ratio */
+ ratio = div64_u64((target_vco_rate << ROUND_SHIFT), parent_rate);
+
+ fbdiv = __wrpll_calc_fbdiv(c);
+ best_r = 0;
+ best_f = 0;
+ best_delta = MAX_VCO_FREQ;
+
+ /*
+ * Consider all values for R which land within
+ * [MIN_POST_DIVR_FREQ, MAX_POST_DIVR_FREQ]; prefer smaller R
+ */
+ for (r = c->init_r; r <= c->max_r; ++r) {
+ f_pre_div = ratio * r;
+ f = (f_pre_div + (1 << ROUND_SHIFT)) >> ROUND_SHIFT;
+ f >>= (fbdiv - 1);
+
+ post_divr_freq = div_u64(parent_rate, r);
+ vco_pre = fbdiv * post_divr_freq;
+ vco = vco_pre * f;
+
+ /* Ensure rounding didn't take us out of range */
+ if (vco > target_vco_rate) {
+ --f;
+ vco = vco_pre * f;
+ } else if (vco < MIN_VCO_FREQ) {
+ ++f;
+ vco = vco_pre * f;
+ }
+
+ delta = abs(target_rate - vco);
+ if (delta < best_delta) {
+ best_delta = delta;
+ best_r = r;
+ best_f = f;
+ }
+ }
+
+ c->divr = best_r - 1;
+ c->divf = best_f - 1;
+
+ post_divr_freq = div_u64(parent_rate, best_r);
+
+ /* Pick the best PLL jitter filter */
+ range = __wrpll_calc_filter_range(post_divr_freq);
+ if (range < 0)
+ return range;
+ c->range = range;
+
+ return 0;
+}
+
+/**
+ * wrpll_calc_output_rate() - calculate the PLL's target output rate
+ * @c: ptr to a struct wrpll_cfg record to read from
+ * @parent_rate: PLL refclk rate
+ *
+ * Given a pointer to the PLL's current input configuration @c and the
+ * PLL's input reference clock rate @parent_rate (before the R
+ * pre-divider), calculate the PLL's output clock rate (after the Q
+ * post-divider).
+ *
+ * Context: Any context. Caller must protect the memory pointed to by @c
+ * from simultaneous modification.
+ *
+ * Return: the PLL's output clock rate, in Hz. The return value from
+ * this function is intended to be convenient to pass directly
+ * to the Linux clock framework; thus there is no explicit
+ * error return value.
+ */
+unsigned long wrpll_calc_output_rate(const struct wrpll_cfg *c,
+ unsigned long parent_rate)
+{
+ u8 fbdiv;
+ u64 n;
+
+ if (c->flags & WRPLL_FLAGS_EXT_FEEDBACK_MASK) {
+ WARN(1, "external feedback mode not yet supported");
+ return ULONG_MAX;
+ }
+
+ fbdiv = __wrpll_calc_fbdiv(c);
+ n = parent_rate * fbdiv * (c->divf + 1);
+ n = div_u64(n, c->divr + 1);
+ n >>= c->divq;
+
+ return n;
+}
+
+/**
+ * wrpll_calc_max_lock_us() - return the time for the PLL to lock
+ * @c: ptr to a struct wrpll_cfg record to read from
+ *
+ * Return the minimum amount of time (in microseconds) that the caller
+ * must wait after reprogramming the PLL to ensure that it is locked
+ * to the input frequency and stable. This is likely to depend on the DIVR
+ * value; this is under discussion with the manufacturer.
+ *
+ * Return: the minimum amount of time the caller must wait for the PLL
+ * to lock (in microseconds)
+ */
+unsigned int wrpll_calc_max_lock_us(const struct wrpll_cfg *c)
+{
+ return MAX_LOCK_US;
+}
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
index c75df1cad60e..3732241352ce 100644
--- a/drivers/clk/at91/Makefile
+++ b/drivers/clk/at91/Makefile
@@ -14,6 +14,8 @@ obj-$(CONFIG_HAVE_AT91_SMD) += clk-smd.o
obj-$(CONFIG_HAVE_AT91_H32MX) += clk-h32mx.o
obj-$(CONFIG_HAVE_AT91_GENERATED_CLK) += clk-generated.o
obj-$(CONFIG_HAVE_AT91_I2S_MUX_CLK) += clk-i2s-mux.o
+obj-$(CONFIG_HAVE_AT91_SAM9X60_PLL) += clk-sam9x60-pll.o
obj-$(CONFIG_SOC_AT91SAM9) += at91sam9260.o at91sam9rl.o at91sam9x5.o
+obj-$(CONFIG_SOC_SAM9X60) += sam9x60.o
obj-$(CONFIG_SOC_SAMA5D4) += sama5d4.o
obj-$(CONFIG_SOC_SAMA5D2) += sama5d2.o
diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c
index b1af5a395423..0aabe49aed09 100644
--- a/drivers/clk/at91/at91sam9260.c
+++ b/drivers/clk/at91/at91sam9260.c
@@ -41,7 +41,7 @@ static u8 sam9260_plla_out[] = { 0, 2 };
static u16 sam9260_plla_icpll[] = { 1, 1 };
-static struct clk_range sam9260_plla_outputs[] = {
+static const struct clk_range sam9260_plla_outputs[] = {
{ .min = 80000000, .max = 160000000 },
{ .min = 150000000, .max = 240000000 },
};
@@ -58,7 +58,7 @@ static u8 sam9260_pllb_out[] = { 1 };
static u16 sam9260_pllb_icpll[] = { 1 };
-static struct clk_range sam9260_pllb_outputs[] = {
+static const struct clk_range sam9260_pllb_outputs[] = {
{ .min = 70000000, .max = 130000000 },
};
@@ -128,7 +128,7 @@ static u8 sam9g20_plla_out[] = { 0, 1, 2, 3, 0, 1, 2, 3 };
static u16 sam9g20_plla_icpll[] = { 0, 0, 0, 0, 1, 1, 1, 1 };
-static struct clk_range sam9g20_plla_outputs[] = {
+static const struct clk_range sam9g20_plla_outputs[] = {
{ .min = 745000000, .max = 800000000 },
{ .min = 695000000, .max = 750000000 },
{ .min = 645000000, .max = 700000000 },
@@ -151,7 +151,7 @@ static u8 sam9g20_pllb_out[] = { 0 };
static u16 sam9g20_pllb_icpll[] = { 0 };
-static struct clk_range sam9g20_pllb_outputs[] = {
+static const struct clk_range sam9g20_pllb_outputs[] = {
{ .min = 30000000, .max = 100000000 },
};
@@ -182,7 +182,7 @@ static const struct clk_master_characteristics sam9261_mck_characteristics = {
.divisors = { 1, 2, 4, 0 },
};
-static struct clk_range sam9261_plla_outputs[] = {
+static const struct clk_range sam9261_plla_outputs[] = {
{ .min = 80000000, .max = 200000000 },
{ .min = 190000000, .max = 240000000 },
};
@@ -199,7 +199,7 @@ static u8 sam9261_pllb_out[] = { 1 };
static u16 sam9261_pllb_icpll[] = { 1 };
-static struct clk_range sam9261_pllb_outputs[] = {
+static const struct clk_range sam9261_pllb_outputs[] = {
{ .min = 70000000, .max = 130000000 },
};
@@ -262,7 +262,7 @@ static const struct clk_master_characteristics sam9263_mck_characteristics = {
.divisors = { 1, 2, 4, 0 },
};
-static struct clk_range sam9263_pll_outputs[] = {
+static const struct clk_range sam9263_pll_outputs[] = {
{ .min = 80000000, .max = 200000000 },
{ .min = 190000000, .max = 240000000 },
};
diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c
index 5aeef68b4bdd..0ac34cdaa106 100644
--- a/drivers/clk/at91/at91sam9rl.c
+++ b/drivers/clk/at91/at91sam9rl.c
@@ -14,7 +14,7 @@ static const struct clk_master_characteristics sam9rl_mck_characteristics = {
static u8 sam9rl_plla_out[] = { 0, 2 };
-static struct clk_range sam9rl_plla_outputs[] = {
+static const struct clk_range sam9rl_plla_outputs[] = {
{ .min = 80000000, .max = 200000000 },
{ .min = 190000000, .max = 240000000 },
};
diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c
index 3487e03d4bc6..0855f3a80cc7 100644
--- a/drivers/clk/at91/at91sam9x5.c
+++ b/drivers/clk/at91/at91sam9x5.c
@@ -17,7 +17,7 @@ static u8 plla_out[] = { 0, 1, 2, 3, 0, 1, 2, 3 };
static u16 plla_icpll[] = { 0, 0, 0, 0, 1, 1, 1, 1 };
-static struct clk_range plla_outputs[] = {
+static const struct clk_range plla_outputs[] = {
{ .min = 745000000, .max = 800000000 },
{ .min = 695000000, .max = 750000000 },
{ .min = 645000000, .max = 700000000 },
@@ -49,6 +49,13 @@ static const struct {
{ .n = "pck1", .p = "prog1", .id = 9 },
};
+static const struct clk_pcr_layout at91sam9x5_pcr_layout = {
+ .offset = 0x10c,
+ .cmd = BIT(12),
+ .pid_mask = GENMASK(5, 0),
+ .div_mask = GENMASK(17, 16),
+};
+
struct pck {
char *n;
u8 id;
@@ -242,6 +249,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
for (i = 0; i < ARRAY_SIZE(at91sam9x5_periphck); i++) {
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &at91sam9x5_pcr_layout,
at91sam9x5_periphck[i].n,
"masterck",
at91sam9x5_periphck[i].id,
@@ -254,6 +262,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
for (i = 0; extra_pcks[i].id; i++) {
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &at91sam9x5_pcr_layout,
extra_pcks[i].n,
"masterck",
extra_pcks[i].id,
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index 66e7f7baf958..5f18847965c1 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -11,6 +11,7 @@
*
*/
+#include <linux/bitfield.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
@@ -31,6 +32,7 @@ struct clk_generated {
spinlock_t *lock;
u32 id;
u32 gckdiv;
+ const struct clk_pcr_layout *layout;
u8 parent_id;
bool audio_pll_allowed;
};
@@ -47,14 +49,14 @@ static int clk_generated_enable(struct clk_hw *hw)
__func__, gck->gckdiv, gck->parent_id);
spin_lock_irqsave(gck->lock, flags);
- regmap_write(gck->regmap, AT91_PMC_PCR,
- (gck->id & AT91_PMC_PCR_PID_MASK));
- regmap_update_bits(gck->regmap, AT91_PMC_PCR,
- AT91_PMC_PCR_GCKDIV_MASK | AT91_PMC_PCR_GCKCSS_MASK |
- AT91_PMC_PCR_CMD | AT91_PMC_PCR_GCKEN,
- AT91_PMC_PCR_GCKCSS(gck->parent_id) |
- AT91_PMC_PCR_CMD |
- AT91_PMC_PCR_GCKDIV(gck->gckdiv) |
+ regmap_write(gck->regmap, gck->layout->offset,
+ (gck->id & gck->layout->pid_mask));
+ regmap_update_bits(gck->regmap, gck->layout->offset,
+ AT91_PMC_PCR_GCKDIV_MASK | gck->layout->gckcss_mask |
+ gck->layout->cmd | AT91_PMC_PCR_GCKEN,
+ field_prep(gck->layout->gckcss_mask, gck->parent_id) |
+ gck->layout->cmd |
+ FIELD_PREP(AT91_PMC_PCR_GCKDIV_MASK, gck->gckdiv) |
AT91_PMC_PCR_GCKEN);
spin_unlock_irqrestore(gck->lock, flags);
return 0;
@@ -66,11 +68,11 @@ static void clk_generated_disable(struct clk_hw *hw)
unsigned long flags;
spin_lock_irqsave(gck->lock, flags);
- regmap_write(gck->regmap, AT91_PMC_PCR,
- (gck->id & AT91_PMC_PCR_PID_MASK));
- regmap_update_bits(gck->regmap, AT91_PMC_PCR,
- AT91_PMC_PCR_CMD | AT91_PMC_PCR_GCKEN,
- AT91_PMC_PCR_CMD);
+ regmap_write(gck->regmap, gck->layout->offset,
+ (gck->id & gck->layout->pid_mask));
+ regmap_update_bits(gck->regmap, gck->layout->offset,
+ gck->layout->cmd | AT91_PMC_PCR_GCKEN,
+ gck->layout->cmd);
spin_unlock_irqrestore(gck->lock, flags);
}
@@ -81,9 +83,9 @@ static int clk_generated_is_enabled(struct clk_hw *hw)
unsigned int status;
spin_lock_irqsave(gck->lock, flags);
- regmap_write(gck->regmap, AT91_PMC_PCR,
- (gck->id & AT91_PMC_PCR_PID_MASK));
- regmap_read(gck->regmap, AT91_PMC_PCR, &status);
+ regmap_write(gck->regmap, gck->layout->offset,
+ (gck->id & gck->layout->pid_mask));
+ regmap_read(gck->regmap, gck->layout->offset, &status);
spin_unlock_irqrestore(gck->lock, flags);
return status & AT91_PMC_PCR_GCKEN ? 1 : 0;
@@ -259,19 +261,18 @@ static void clk_generated_startup(struct clk_generated *gck)
unsigned long flags;
spin_lock_irqsave(gck->lock, flags);
- regmap_write(gck->regmap, AT91_PMC_PCR,
- (gck->id & AT91_PMC_PCR_PID_MASK));
- regmap_read(gck->regmap, AT91_PMC_PCR, &tmp);
+ regmap_write(gck->regmap, gck->layout->offset,
+ (gck->id & gck->layout->pid_mask));
+ regmap_read(gck->regmap, gck->layout->offset, &tmp);
spin_unlock_irqrestore(gck->lock, flags);
- gck->parent_id = (tmp & AT91_PMC_PCR_GCKCSS_MASK)
- >> AT91_PMC_PCR_GCKCSS_OFFSET;
- gck->gckdiv = (tmp & AT91_PMC_PCR_GCKDIV_MASK)
- >> AT91_PMC_PCR_GCKDIV_OFFSET;
+ gck->parent_id = field_get(gck->layout->gckcss_mask, tmp);
+ gck->gckdiv = FIELD_GET(AT91_PMC_PCR_GCKDIV_MASK, tmp);
}
struct clk_hw * __init
at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
+ const struct clk_pcr_layout *layout,
const char *name, const char **parent_names,
u8 num_parents, u8 id, bool pll_audio,
const struct clk_range *range)
@@ -298,6 +299,7 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
gck->lock = lock;
gck->range = *range;
gck->audio_pll_allowed = pll_audio;
+ gck->layout = layout;
clk_generated_startup(gck);
hw = &gck->hw;
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index eb53b4a8fab6..12b5bf4cc7bb 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -29,6 +29,7 @@ struct clk_master {
struct regmap *regmap;
const struct clk_master_layout *layout;
const struct clk_master_characteristics *characteristics;
+ u32 mckr;
};
static inline bool clk_master_ready(struct regmap *regmap)
@@ -69,7 +70,7 @@ static unsigned long clk_master_recalc_rate(struct clk_hw *hw,
master->characteristics;
unsigned int mckr;
- regmap_read(master->regmap, AT91_PMC_MCKR, &mckr);
+ regmap_read(master->regmap, master->layout->offset, &mckr);
mckr &= layout->mask;
pres = (mckr >> layout->pres_shift) & MASTER_PRES_MASK;
@@ -95,7 +96,7 @@ static u8 clk_master_get_parent(struct clk_hw *hw)
struct clk_master *master = to_clk_master(hw);
unsigned int mckr;
- regmap_read(master->regmap, AT91_PMC_MCKR, &mckr);
+ regmap_read(master->regmap, master->layout->offset, &mckr);
return mckr & AT91_PMC_CSS;
}
@@ -147,13 +148,14 @@ at91_clk_register_master(struct regmap *regmap,
return hw;
}
-
const struct clk_master_layout at91rm9200_master_layout = {
.mask = 0x31F,
.pres_shift = 2,
+ .offset = AT91_PMC_MCKR,
};
const struct clk_master_layout at91sam9x5_master_layout = {
.mask = 0x373,
.pres_shift = 4,
+ .offset = AT91_PMC_MCKR,
};
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index 65c1defa78e4..6b7748b9588a 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -8,6 +8,7 @@
*
*/
+#include <linux/bitops.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
@@ -23,9 +24,6 @@ DEFINE_SPINLOCK(pmc_pcr_lock);
#define PERIPHERAL_ID_MAX 31
#define PERIPHERAL_MASK(id) (1 << ((id) & PERIPHERAL_ID_MAX))
-#define PERIPHERAL_RSHIFT_MASK 0x3
-#define PERIPHERAL_RSHIFT(val) (((val) >> 16) & PERIPHERAL_RSHIFT_MASK)
-
#define PERIPHERAL_MAX_SHIFT 3
struct clk_peripheral {
@@ -43,6 +41,7 @@ struct clk_sam9x5_peripheral {
spinlock_t *lock;
u32 id;
u32 div;
+ const struct clk_pcr_layout *layout;
bool auto_div;
};
@@ -169,13 +168,13 @@ static int clk_sam9x5_peripheral_enable(struct clk_hw *hw)
return 0;
spin_lock_irqsave(periph->lock, flags);
- regmap_write(periph->regmap, AT91_PMC_PCR,
- (periph->id & AT91_PMC_PCR_PID_MASK));
- regmap_update_bits(periph->regmap, AT91_PMC_PCR,
- AT91_PMC_PCR_DIV_MASK | AT91_PMC_PCR_CMD |
+ regmap_write(periph->regmap, periph->layout->offset,
+ (periph->id & periph->layout->pid_mask));
+ regmap_update_bits(periph->regmap, periph->layout->offset,
+ periph->layout->div_mask | periph->layout->cmd |
AT91_PMC_PCR_EN,
- AT91_PMC_PCR_DIV(periph->div) |
- AT91_PMC_PCR_CMD |
+ field_prep(periph->layout->div_mask, periph->div) |
+ periph->layout->cmd |
AT91_PMC_PCR_EN);
spin_unlock_irqrestore(periph->lock, flags);
@@ -191,11 +190,11 @@ static void clk_sam9x5_peripheral_disable(struct clk_hw *hw)
return;
spin_lock_irqsave(periph->lock, flags);
- regmap_write(periph->regmap, AT91_PMC_PCR,
- (periph->id & AT91_PMC_PCR_PID_MASK));
- regmap_update_bits(periph->regmap, AT91_PMC_PCR,
- AT91_PMC_PCR_EN | AT91_PMC_PCR_CMD,
- AT91_PMC_PCR_CMD);
+ regmap_write(periph->regmap, periph->layout->offset,
+ (periph->id & periph->layout->pid_mask));
+ regmap_update_bits(periph->regmap, periph->layout->offset,
+ AT91_PMC_PCR_EN | periph->layout->cmd,
+ periph->layout->cmd);
spin_unlock_irqrestore(periph->lock, flags);
}
@@ -209,9 +208,9 @@ static int clk_sam9x5_peripheral_is_enabled(struct clk_hw *hw)
return 1;
spin_lock_irqsave(periph->lock, flags);
- regmap_write(periph->regmap, AT91_PMC_PCR,
- (periph->id & AT91_PMC_PCR_PID_MASK));
- regmap_read(periph->regmap, AT91_PMC_PCR, &status);
+ regmap_write(periph->regmap, periph->layout->offset,
+ (periph->id & periph->layout->pid_mask));
+ regmap_read(periph->regmap, periph->layout->offset, &status);
spin_unlock_irqrestore(periph->lock, flags);
return status & AT91_PMC_PCR_EN ? 1 : 0;
@@ -229,13 +228,13 @@ clk_sam9x5_peripheral_recalc_rate(struct clk_hw *hw,
return parent_rate;
spin_lock_irqsave(periph->lock, flags);
- regmap_write(periph->regmap, AT91_PMC_PCR,
- (periph->id & AT91_PMC_PCR_PID_MASK));
- regmap_read(periph->regmap, AT91_PMC_PCR, &status);
+ regmap_write(periph->regmap, periph->layout->offset,
+ (periph->id & periph->layout->pid_mask));
+ regmap_read(periph->regmap, periph->layout->offset, &status);
spin_unlock_irqrestore(periph->lock, flags);
if (status & AT91_PMC_PCR_EN) {
- periph->div = PERIPHERAL_RSHIFT(status);
+ periph->div = field_get(periph->layout->div_mask, status);
periph->auto_div = false;
} else {
clk_sam9x5_peripheral_autodiv(periph);
@@ -328,6 +327,7 @@ static const struct clk_ops sam9x5_peripheral_ops = {
struct clk_hw * __init
at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
+ const struct clk_pcr_layout *layout,
const char *name, const char *parent_name,
u32 id, const struct clk_range *range)
{
@@ -354,7 +354,9 @@ at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
periph->div = 0;
periph->regmap = regmap;
periph->lock = lock;
- periph->auto_div = true;
+ if (layout->div_mask)
+ periph->auto_div = true;
+ periph->layout = layout;
periph->range = *range;
hw = &periph->hw;
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
new file mode 100644
index 000000000000..34b817825b22
--- /dev/null
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Microchip Technology Inc.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include "pmc.h"
+
+#define PMC_PLL_CTRL0 0xc
+#define PMC_PLL_CTRL0_DIV_MSK GENMASK(7, 0)
+#define PMC_PLL_CTRL0_ENPLL BIT(28)
+#define PMC_PLL_CTRL0_ENPLLCK BIT(29)
+#define PMC_PLL_CTRL0_ENLOCK BIT(31)
+
+#define PMC_PLL_CTRL1 0x10
+#define PMC_PLL_CTRL1_FRACR_MSK GENMASK(21, 0)
+#define PMC_PLL_CTRL1_MUL_MSK GENMASK(30, 24)
+
+#define PMC_PLL_ACR 0x18
+#define PMC_PLL_ACR_DEFAULT 0x1b040010UL
+#define PMC_PLL_ACR_UTMIVR BIT(12)
+#define PMC_PLL_ACR_UTMIBG BIT(13)
+#define PMC_PLL_ACR_LOOP_FILTER_MSK GENMASK(31, 24)
+
+#define PMC_PLL_UPDT 0x1c
+#define PMC_PLL_UPDT_UPDATE BIT(8)
+
+#define PMC_PLL_ISR0 0xec
+
+#define PLL_DIV_MAX (FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, UINT_MAX) + 1)
+#define UPLL_DIV 2
+#define PLL_MUL_MAX (FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, UINT_MAX) + 1)
+
+#define PLL_MAX_ID 1
+
+struct sam9x60_pll {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ spinlock_t *lock;
+ const struct clk_pll_characteristics *characteristics;
+ u32 frac;
+ u8 id;
+ u8 div;
+ u16 mul;
+};
+
+#define to_sam9x60_pll(hw) container_of(hw, struct sam9x60_pll, hw)
+
+static inline bool sam9x60_pll_ready(struct regmap *regmap, int id)
+{
+ unsigned int status;
+
+ regmap_read(regmap, PMC_PLL_ISR0, &status);
+
+ return !!(status & BIT(id));
+}
+
+static int sam9x60_pll_prepare(struct clk_hw *hw)
+{
+ struct sam9x60_pll *pll = to_sam9x60_pll(hw);
+ struct regmap *regmap = pll->regmap;
+ unsigned long flags;
+ u8 div;
+ u16 mul;
+ u32 val;
+
+ spin_lock_irqsave(pll->lock, flags);
+ regmap_write(regmap, PMC_PLL_UPDT, pll->id);
+
+ regmap_read(regmap, PMC_PLL_CTRL0, &val);
+ div = FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, val);
+
+ regmap_read(regmap, PMC_PLL_CTRL1, &val);
+ mul = FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, val);
+
+ if (sam9x60_pll_ready(regmap, pll->id) &&
+ (div == pll->div && mul == pll->mul)) {
+ spin_unlock_irqrestore(pll->lock, flags);
+ return 0;
+ }
+
+ /* Recommended value for PMC_PLL_ACR */
+ val = PMC_PLL_ACR_DEFAULT;
+ regmap_write(regmap, PMC_PLL_ACR, val);
+
+ regmap_write(regmap, PMC_PLL_CTRL1,
+ FIELD_PREP(PMC_PLL_CTRL1_MUL_MSK, pll->mul));
+
+ if (pll->characteristics->upll) {
+ /* Enable the UTMI internal bandgap */
+ val |= PMC_PLL_ACR_UTMIBG;
+ regmap_write(regmap, PMC_PLL_ACR, val);
+
+ udelay(10);
+
+ /* Enable the UTMI internal regulator */
+ val |= PMC_PLL_ACR_UTMIVR;
+ regmap_write(regmap, PMC_PLL_ACR, val);
+
+ udelay(10);
+ }
+
+ regmap_update_bits(regmap, PMC_PLL_UPDT,
+ PMC_PLL_UPDT_UPDATE, PMC_PLL_UPDT_UPDATE);
+
+ regmap_write(regmap, PMC_PLL_CTRL0,
+ PMC_PLL_CTRL0_ENLOCK | PMC_PLL_CTRL0_ENPLL |
+ PMC_PLL_CTRL0_ENPLLCK | pll->div);
+
+ regmap_update_bits(regmap, PMC_PLL_UPDT,
+ PMC_PLL_UPDT_UPDATE, PMC_PLL_UPDT_UPDATE);
+
+ while (!sam9x60_pll_ready(regmap, pll->id))
+ cpu_relax();
+
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ return 0;
+}
+
+static int sam9x60_pll_is_prepared(struct clk_hw *hw)
+{
+ struct sam9x60_pll *pll = to_sam9x60_pll(hw);
+
+ return sam9x60_pll_ready(pll->regmap, pll->id);
+}
+
+static void sam9x60_pll_unprepare(struct clk_hw *hw)
+{
+ struct sam9x60_pll *pll = to_sam9x60_pll(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(pll->lock, flags);
+
+ regmap_write(pll->regmap, PMC_PLL_UPDT, pll->id);
+
+ regmap_update_bits(pll->regmap, PMC_PLL_CTRL0,
+ PMC_PLL_CTRL0_ENPLLCK, 0);
+
+ regmap_update_bits(pll->regmap, PMC_PLL_UPDT,
+ PMC_PLL_UPDT_UPDATE, PMC_PLL_UPDT_UPDATE);
+
+ regmap_update_bits(pll->regmap, PMC_PLL_CTRL0, PMC_PLL_CTRL0_ENPLL, 0);
+
+ if (pll->characteristics->upll)
+ regmap_update_bits(pll->regmap, PMC_PLL_ACR,
+ PMC_PLL_ACR_UTMIBG | PMC_PLL_ACR_UTMIVR, 0);
+
+ regmap_update_bits(pll->regmap, PMC_PLL_UPDT,
+ PMC_PLL_UPDT_UPDATE, PMC_PLL_UPDT_UPDATE);
+
+ spin_unlock_irqrestore(pll->lock, flags);
+}
+
+static unsigned long sam9x60_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct sam9x60_pll *pll = to_sam9x60_pll(hw);
+
+ return (parent_rate * (pll->mul + 1)) / (pll->div + 1);
+}
+
+static long sam9x60_pll_get_best_div_mul(struct sam9x60_pll *pll,
+ unsigned long rate,
+ unsigned long parent_rate,
+ bool update)
+{
+ const struct clk_pll_characteristics *characteristics =
+ pll->characteristics;
+ unsigned long bestremainder = ULONG_MAX;
+ unsigned long maxdiv, mindiv, tmpdiv;
+ long bestrate = -ERANGE;
+ unsigned long bestdiv = 0;
+ unsigned long bestmul = 0;
+ unsigned long bestfrac = 0;
+
+ if (rate < characteristics->output[0].min ||
+ rate > characteristics->output[0].max)
+ return -ERANGE;
+
+ if (!pll->characteristics->upll) {
+ mindiv = parent_rate / rate;
+ if (mindiv < 2)
+ mindiv = 2;
+
+ maxdiv = DIV_ROUND_UP(parent_rate * PLL_MUL_MAX, rate);
+ if (maxdiv > PLL_DIV_MAX)
+ maxdiv = PLL_DIV_MAX;
+ } else {
+ mindiv = maxdiv = UPLL_DIV;
+ }
+
+ for (tmpdiv = mindiv; tmpdiv <= maxdiv; tmpdiv++) {
+ unsigned long remainder;
+ unsigned long tmprate;
+ unsigned long tmpmul;
+ unsigned long tmpfrac = 0;
+
+ /*
+ * Calculate the multiplier associated with the current
+ * divider that provide the closest rate to the requested one.
+ */
+ tmpmul = mult_frac(rate, tmpdiv, parent_rate);
+ tmprate = mult_frac(parent_rate, tmpmul, tmpdiv);
+ remainder = rate - tmprate;
+
+ if (remainder) {
+ tmpfrac = DIV_ROUND_CLOSEST_ULL((u64)remainder * tmpdiv * (1 << 22),
+ parent_rate);
+
+ tmprate += DIV_ROUND_CLOSEST_ULL((u64)tmpfrac * parent_rate,
+ tmpdiv * (1 << 22));
+
+ if (tmprate > rate)
+ remainder = tmprate - rate;
+ else
+ remainder = rate - tmprate;
+ }
+
+ /*
+ * Compare the remainder with the best remainder found until
+ * now and elect a new best multiplier/divider pair if the
+ * current remainder is smaller than the best one.
+ */
+ if (remainder < bestremainder) {
+ bestremainder = remainder;
+ bestdiv = tmpdiv;
+ bestmul = tmpmul;
+ bestrate = tmprate;
+ bestfrac = tmpfrac;
+ }
+
+ /* We've found a perfect match! */
+ if (!remainder)
+ break;
+ }
+
+ /* Check if bestrate is a valid output rate */
+ if (bestrate < characteristics->output[0].min &&
+ bestrate > characteristics->output[0].max)
+ return -ERANGE;
+
+ if (update) {
+ pll->div = bestdiv - 1;
+ pll->mul = bestmul - 1;
+ pll->frac = bestfrac;
+ }
+
+ return bestrate;
+}
+
+static long sam9x60_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct sam9x60_pll *pll = to_sam9x60_pll(hw);
+
+ return sam9x60_pll_get_best_div_mul(pll, rate, *parent_rate, false);
+}
+
+static int sam9x60_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct sam9x60_pll *pll = to_sam9x60_pll(hw);
+
+ return sam9x60_pll_get_best_div_mul(pll, rate, parent_rate, true);
+}
+
+static const struct clk_ops pll_ops = {
+ .prepare = sam9x60_pll_prepare,
+ .unprepare = sam9x60_pll_unprepare,
+ .is_prepared = sam9x60_pll_is_prepared,
+ .recalc_rate = sam9x60_pll_recalc_rate,
+ .round_rate = sam9x60_pll_round_rate,
+ .set_rate = sam9x60_pll_set_rate,
+};
+
+struct clk_hw * __init
+sam9x60_clk_register_pll(struct regmap *regmap, spinlock_t *lock,
+ const char *name, const char *parent_name, u8 id,
+ const struct clk_pll_characteristics *characteristics)
+{
+ struct sam9x60_pll *pll;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ unsigned int pllr;
+ int ret;
+
+ if (id > PLL_MAX_ID)
+ return ERR_PTR(-EINVAL);
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &pll_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = CLK_SET_RATE_GATE;
+
+ pll->id = id;
+ pll->hw.init = &init;
+ pll->characteristics = characteristics;
+ pll->regmap = regmap;
+ pll->lock = lock;
+
+ regmap_write(regmap, PMC_PLL_UPDT, id);
+ regmap_read(regmap, PMC_PLL_CTRL0, &pllr);
+ pll->div = FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, pllr);
+ regmap_read(regmap, PMC_PLL_CTRL1, &pllr);
+ pll->mul = FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, pllr);
+
+ hw = &pll->hw;
+ ret = clk_hw_register(NULL, hw);
+ if (ret) {
+ kfree(pll);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index 79ee1c760f2a..ebc37ee33518 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -23,9 +23,13 @@
#define RM9200_USB_DIV_SHIFT 28
#define RM9200_USB_DIV_TAB_SIZE 4
+#define SAM9X5_USBS_MASK GENMASK(0, 0)
+#define SAM9X60_USBS_MASK GENMASK(1, 0)
+
struct at91sam9x5_clk_usb {
struct clk_hw hw;
struct regmap *regmap;
+ u32 usbs_mask;
};
#define to_at91sam9x5_clk_usb(hw) \
@@ -111,8 +115,7 @@ static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
if (index > 1)
return -EINVAL;
- regmap_update_bits(usb->regmap, AT91_PMC_USB, AT91_PMC_USBS,
- index ? AT91_PMC_USBS : 0);
+ regmap_update_bits(usb->regmap, AT91_PMC_USB, usb->usbs_mask, index);
return 0;
}
@@ -124,7 +127,7 @@ static u8 at91sam9x5_clk_usb_get_parent(struct clk_hw *hw)
regmap_read(usb->regmap, AT91_PMC_USB, &usbr);
- return usbr & AT91_PMC_USBS;
+ return usbr & usb->usbs_mask;
}
static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -190,9 +193,10 @@ static const struct clk_ops at91sam9n12_usb_ops = {
.set_rate = at91sam9x5_clk_usb_set_rate,
};
-struct clk_hw * __init
-at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name,
- const char **parent_names, u8 num_parents)
+static struct clk_hw * __init
+_at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name,
+ const char **parent_names, u8 num_parents,
+ u32 usbs_mask)
{
struct at91sam9x5_clk_usb *usb;
struct clk_hw *hw;
@@ -212,6 +216,7 @@ at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name,
usb->hw.init = &init;
usb->regmap = regmap;
+ usb->usbs_mask = SAM9X5_USBS_MASK;
hw = &usb->hw;
ret = clk_hw_register(NULL, &usb->hw);
@@ -224,6 +229,22 @@ at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name,
}
struct clk_hw * __init
+at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name,
+ const char **parent_names, u8 num_parents)
+{
+ return _at91sam9x5_clk_register_usb(regmap, name, parent_names,
+ num_parents, SAM9X5_USBS_MASK);
+}
+
+struct clk_hw * __init
+sam9x60_clk_register_usb(struct regmap *regmap, const char *name,
+ const char **parent_names, u8 num_parents)
+{
+ return _at91sam9x5_clk_register_usb(regmap, name, parent_names,
+ num_parents, SAM9X60_USBS_MASK);
+}
+
+struct clk_hw * __init
at91sam9n12_clk_register_usb(struct regmap *regmap, const char *name,
const char *parent_name)
{
diff --git a/drivers/clk/at91/dt-compat.c b/drivers/clk/at91/dt-compat.c
index b95bb4e2a927..aa1754eac59f 100644
--- a/drivers/clk/at91/dt-compat.c
+++ b/drivers/clk/at91/dt-compat.c
@@ -93,6 +93,14 @@ CLK_OF_DECLARE(of_sama5d2_clk_audio_pll_pmc_setup,
of_sama5d2_clk_audio_pll_pmc_setup);
#endif /* CONFIG_HAVE_AT91_AUDIO_PLL */
+static const struct clk_pcr_layout dt_pcr_layout = {
+ .offset = 0x10c,
+ .cmd = BIT(12),
+ .pid_mask = GENMASK(5, 0),
+ .div_mask = GENMASK(17, 16),
+ .gckcss_mask = GENMASK(10, 8),
+};
+
#ifdef CONFIG_HAVE_AT91_GENERATED_CLK
#define GENERATED_SOURCE_MAX 6
@@ -146,7 +154,8 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
id == GCK_ID_CLASSD))
pll_audio = true;
- hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, name,
+ hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
+ &dt_pcr_layout, name,
parent_names, num_parents,
id, pll_audio, &range);
if (IS_ERR(hw))
@@ -448,6 +457,7 @@ of_at91_clk_periph_setup(struct device_node *np, u8 type)
hw = at91_clk_register_sam9x5_peripheral(regmap,
&pmc_pcr_lock,
+ &dt_pcr_layout,
name,
parent_name,
id, &range);
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index a0e5ce9c9b9e..2311204948be 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -38,6 +38,7 @@ struct clk_range {
#define CLK_RANGE(MIN, MAX) {.min = MIN, .max = MAX,}
struct clk_master_layout {
+ u32 offset;
u32 mask;
u8 pres_shift;
};
@@ -65,9 +66,10 @@ extern const struct clk_pll_layout sama5d3_pll_layout;
struct clk_pll_characteristics {
struct clk_range input;
int num_output;
- struct clk_range *output;
+ const struct clk_range *output;
u16 *icpll;
u8 *out;
+ u8 upll : 1;
};
struct clk_programmable_layout {
@@ -82,6 +84,17 @@ extern const struct clk_programmable_layout at91rm9200_programmable_layout;
extern const struct clk_programmable_layout at91sam9g45_programmable_layout;
extern const struct clk_programmable_layout at91sam9x5_programmable_layout;
+struct clk_pcr_layout {
+ u32 offset;
+ u32 cmd;
+ u32 div_mask;
+ u32 gckcss_mask;
+ u32 pid_mask;
+};
+
+#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
+#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
+
#define ndck(a, s) (a[s - 1].id + 1)
#define nck(a) (a[ARRAY_SIZE(a) - 1].id + 1)
struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem,
@@ -107,6 +120,7 @@ at91_clk_register_audio_pll_pmc(struct regmap *regmap, const char *name,
struct clk_hw * __init
at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
+ const struct clk_pcr_layout *layout,
const char *name, const char **parent_names,
u8 num_parents, u8 id, bool pll_audio,
const struct clk_range *range);
@@ -145,6 +159,7 @@ at91_clk_register_peripheral(struct regmap *regmap, const char *name,
const char *parent_name, u32 id);
struct clk_hw * __init
at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
+ const struct clk_pcr_layout *layout,
const char *name, const char *parent_name,
u32 id, const struct clk_range *range);
@@ -158,6 +173,11 @@ at91_clk_register_plldiv(struct regmap *regmap, const char *name,
const char *parent_name);
struct clk_hw * __init
+sam9x60_clk_register_pll(struct regmap *regmap, spinlock_t *lock,
+ const char *name, const char *parent_name, u8 id,
+ const struct clk_pll_characteristics *characteristics);
+
+struct clk_hw * __init
at91_clk_register_programmable(struct regmap *regmap, const char *name,
const char **parent_names, u8 num_parents, u8 id,
const struct clk_programmable_layout *layout);
@@ -183,6 +203,9 @@ struct clk_hw * __init
at91sam9n12_clk_register_usb(struct regmap *regmap, const char *name,
const char *parent_name);
struct clk_hw * __init
+sam9x60_clk_register_usb(struct regmap *regmap, const char *name,
+ const char **parent_names, u8 num_parents);
+struct clk_hw * __init
at91rm9200_clk_register_usb(struct regmap *regmap, const char *name,
const char *parent_name, const u32 *divisors);
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
new file mode 100644
index 000000000000..9790ddfa5b3c
--- /dev/null
+++ b/drivers/clk/at91/sam9x60.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/at91.h>
+
+#include "pmc.h"
+
+static DEFINE_SPINLOCK(pmc_pll_lock);
+
+static const struct clk_master_characteristics mck_characteristics = {
+ .output = { .min = 140000000, .max = 200000000 },
+ .divisors = { 1, 2, 4, 3 },
+ .have_div3_pres = 1,
+};
+
+static const struct clk_master_layout sam9x60_master_layout = {
+ .mask = 0x373,
+ .pres_shift = 4,
+ .offset = 0x28,
+};
+
+static const struct clk_range plla_outputs[] = {
+ { .min = 300000000, .max = 600000000 },
+};
+
+static const struct clk_pll_characteristics plla_characteristics = {
+ .input = { .min = 12000000, .max = 48000000 },
+ .num_output = ARRAY_SIZE(plla_outputs),
+ .output = plla_outputs,
+};
+
+static const struct clk_range upll_outputs[] = {
+ { .min = 300000000, .max = 500000000 },
+};
+
+static const struct clk_pll_characteristics upll_characteristics = {
+ .input = { .min = 12000000, .max = 48000000 },
+ .num_output = ARRAY_SIZE(upll_outputs),
+ .output = upll_outputs,
+ .upll = true,
+};
+
+static const struct clk_programmable_layout sam9x60_programmable_layout = {
+ .pres_shift = 8,
+ .css_mask = 0x1f,
+ .have_slck_mck = 0,
+};
+
+static const struct clk_pcr_layout sam9x60_pcr_layout = {
+ .offset = 0x88,
+ .cmd = BIT(31),
+ .gckcss_mask = GENMASK(12, 8),
+ .pid_mask = GENMASK(6, 0),
+};
+
+static const struct {
+ char *n;
+ char *p;
+ u8 id;
+} sam9x60_systemck[] = {
+ { .n = "ddrck", .p = "masterck", .id = 2 },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+ { .n = "qspick", .p = "masterck", .id = 19 },
+};
+
+static const struct {
+ char *n;
+ u8 id;
+} sam9x60_periphck[] = {
+ { .n = "pioA_clk", .id = 2, },
+ { .n = "pioB_clk", .id = 3, },
+ { .n = "pioC_clk", .id = 4, },
+ { .n = "flex0_clk", .id = 5, },
+ { .n = "flex1_clk", .id = 6, },
+ { .n = "flex2_clk", .id = 7, },
+ { .n = "flex3_clk", .id = 8, },
+ { .n = "flex6_clk", .id = 9, },
+ { .n = "flex7_clk", .id = 10, },
+ { .n = "flex8_clk", .id = 11, },
+ { .n = "sdmmc0_clk", .id = 12, },
+ { .n = "flex4_clk", .id = 13, },
+ { .n = "flex5_clk", .id = 14, },
+ { .n = "flex9_clk", .id = 15, },
+ { .n = "flex10_clk", .id = 16, },
+ { .n = "tcb0_clk", .id = 17, },
+ { .n = "pwm_clk", .id = 18, },
+ { .n = "adc_clk", .id = 19, },
+ { .n = "dma0_clk", .id = 20, },
+ { .n = "matrix_clk", .id = 21, },
+ { .n = "uhphs_clk", .id = 22, },
+ { .n = "udphs_clk", .id = 23, },
+ { .n = "macb0_clk", .id = 24, },
+ { .n = "lcd_clk", .id = 25, },
+ { .n = "sdmmc1_clk", .id = 26, },
+ { .n = "macb1_clk", .id = 27, },
+ { .n = "ssc_clk", .id = 28, },
+ { .n = "can0_clk", .id = 29, },
+ { .n = "can1_clk", .id = 30, },
+ { .n = "flex11_clk", .id = 32, },
+ { .n = "flex12_clk", .id = 33, },
+ { .n = "i2s_clk", .id = 34, },
+ { .n = "qspi_clk", .id = 35, },
+ { .n = "gfx2d_clk", .id = 36, },
+ { .n = "pit64b_clk", .id = 37, },
+ { .n = "trng_clk", .id = 38, },
+ { .n = "aes_clk", .id = 39, },
+ { .n = "tdes_clk", .id = 40, },
+ { .n = "sha_clk", .id = 41, },
+ { .n = "classd_clk", .id = 42, },
+ { .n = "isi_clk", .id = 43, },
+ { .n = "pioD_clk", .id = 44, },
+ { .n = "tcb1_clk", .id = 45, },
+ { .n = "dbgu_clk", .id = 47, },
+ { .n = "mpddr_clk", .id = 49, },
+};
+
+static const struct {
+ char *n;
+ u8 id;
+ struct clk_range r;
+ bool pll;
+} sam9x60_gck[] = {
+ { .n = "flex0_gclk", .id = 5, },
+ { .n = "flex1_gclk", .id = 6, },
+ { .n = "flex2_gclk", .id = 7, },
+ { .n = "flex3_gclk", .id = 8, },
+ { .n = "flex6_gclk", .id = 9, },
+ { .n = "flex7_gclk", .id = 10, },
+ { .n = "flex8_gclk", .id = 11, },
+ { .n = "sdmmc0_gclk", .id = 12, .r = { .min = 0, .max = 105000000 }, },
+ { .n = "flex4_gclk", .id = 13, },
+ { .n = "flex5_gclk", .id = 14, },
+ { .n = "flex9_gclk", .id = 15, },
+ { .n = "flex10_gclk", .id = 16, },
+ { .n = "tcb0_gclk", .id = 17, },
+ { .n = "adc_gclk", .id = 19, },
+ { .n = "lcd_gclk", .id = 25, .r = { .min = 0, .max = 140000000 }, },
+ { .n = "sdmmc1_gclk", .id = 26, .r = { .min = 0, .max = 105000000 }, },
+ { .n = "flex11_gclk", .id = 32, },
+ { .n = "flex12_gclk", .id = 33, },
+ { .n = "i2s_gclk", .id = 34, .r = { .min = 0, .max = 105000000 },
+ .pll = true, },
+ { .n = "pit64b_gclk", .id = 37, },
+ { .n = "classd_gclk", .id = 42, .r = { .min = 0, .max = 100000000 },
+ .pll = true, },
+ { .n = "tcb1_gclk", .id = 45, },
+ { .n = "dbgu_gclk", .id = 47, },
+};
+
+static void __init sam9x60_pmc_setup(struct device_node *np)
+{
+ struct clk_range range = CLK_RANGE(0, 0);
+ const char *td_slck_name, *md_slck_name, *mainxtal_name;
+ struct pmc_data *sam9x60_pmc;
+ const char *parent_names[6];
+ struct regmap *regmap;
+ struct clk_hw *hw;
+ int i;
+ bool bypass;
+
+ i = of_property_match_string(np, "clock-names", "td_slck");
+ if (i < 0)
+ return;
+
+ td_slck_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "md_slck");
+ if (i < 0)
+ return;
+
+ md_slck_name = of_clk_get_parent_name(np, i);
+
+ i = of_property_match_string(np, "clock-names", "main_xtal");
+ if (i < 0)
+ return;
+ mainxtal_name = of_clk_get_parent_name(np, i);
+
+ regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return;
+
+ sam9x60_pmc = pmc_data_allocate(PMC_MAIN + 1,
+ nck(sam9x60_systemck),
+ nck(sam9x60_periphck),
+ nck(sam9x60_gck));
+ if (!sam9x60_pmc)
+ return;
+
+ hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 24000000,
+ 50000000);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
+ bypass);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = "main_rc_osc";
+ parent_names[1] = "main_osc";
+ hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x60_pmc->chws[PMC_MAIN] = hw;
+
+ hw = sam9x60_clk_register_pll(regmap, &pmc_pll_lock, "pllack",
+ "mainck", 0, &plla_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = sam9x60_clk_register_pll(regmap, &pmc_pll_lock, "upllck",
+ "main_osc", 1, &upll_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x60_pmc->chws[PMC_UTMI] = hw;
+
+ parent_names[0] = md_slck_name;
+ parent_names[1] = "mainck";
+ parent_names[2] = "pllack";
+ hw = at91_clk_register_master(regmap, "masterck", 3, parent_names,
+ &sam9x60_master_layout,
+ &mck_characteristics);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x60_pmc->chws[PMC_MCK] = hw;
+
+ parent_names[0] = "pllack";
+ parent_names[1] = "upllck";
+ parent_names[2] = "mainck";
+ parent_names[3] = "mainck";
+ hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 4);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ parent_names[0] = md_slck_name;
+ parent_names[1] = td_slck_name;
+ parent_names[2] = "mainck";
+ parent_names[3] = "masterck";
+ parent_names[4] = "pllack";
+ parent_names[5] = "upllck";
+ for (i = 0; i < 8; i++) {
+ char name[6];
+
+ snprintf(name, sizeof(name), "prog%d", i);
+
+ hw = at91_clk_register_programmable(regmap, name,
+ parent_names, 6, i,
+ &sam9x60_programmable_layout);
+ if (IS_ERR(hw))
+ goto err_free;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sam9x60_systemck); i++) {
+ hw = at91_clk_register_system(regmap, sam9x60_systemck[i].n,
+ sam9x60_systemck[i].p,
+ sam9x60_systemck[i].id);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x60_pmc->shws[sam9x60_systemck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sam9x60_periphck); i++) {
+ hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &sam9x60_pcr_layout,
+ sam9x60_periphck[i].n,
+ "masterck",
+ sam9x60_periphck[i].id,
+ &range);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x60_pmc->phws[sam9x60_periphck[i].id] = hw;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sam9x60_gck); i++) {
+ hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
+ &sam9x60_pcr_layout,
+ sam9x60_gck[i].n,
+ parent_names, 6,
+ sam9x60_gck[i].id,
+ sam9x60_gck[i].pll,
+ &sam9x60_gck[i].r);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sam9x60_pmc->ghws[sam9x60_gck[i].id] = hw;
+ }
+
+ of_clk_add_hw_provider(np, of_clk_hw_pmc_get, sam9x60_pmc);
+
+ return;
+
+err_free:
+ pmc_data_free(sam9x60_pmc);
+}
+/* Some clks are used for a clocksource */
+CLK_OF_DECLARE(sam9x60_pmc, "microchip,sam9x60-pmc", sam9x60_pmc_setup);
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
index 81943fac4537..6509d0934804 100644
--- a/drivers/clk/at91/sama5d2.c
+++ b/drivers/clk/at91/sama5d2.c
@@ -16,7 +16,7 @@ static u8 plla_out[] = { 0 };
static u16 plla_icpll[] = { 0 };
-static struct clk_range plla_outputs[] = {
+static const struct clk_range plla_outputs[] = {
{ .min = 600000000, .max = 1200000000 },
};
@@ -28,6 +28,13 @@ static const struct clk_pll_characteristics plla_characteristics = {
.out = plla_out,
};
+static const struct clk_pcr_layout sama5d2_pcr_layout = {
+ .offset = 0x10c,
+ .cmd = BIT(12),
+ .gckcss_mask = GENMASK(10, 8),
+ .pid_mask = GENMASK(6, 0),
+};
+
static const struct {
char *n;
char *p;
@@ -274,6 +281,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(sama5d2_periphck); i++) {
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &sama5d2_pcr_layout,
sama5d2_periphck[i].n,
"masterck",
sama5d2_periphck[i].id,
@@ -286,6 +294,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(sama5d2_periph32ck); i++) {
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &sama5d2_pcr_layout,
sama5d2_periph32ck[i].n,
"h32mxck",
sama5d2_periph32ck[i].id,
@@ -304,6 +313,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
parent_names[5] = "audiopll_pmcck";
for (i = 0; i < ARRAY_SIZE(sama5d2_gck); i++) {
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
+ &sama5d2_pcr_layout,
sama5d2_gck[i].n,
parent_names, 6,
sama5d2_gck[i].id,
diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c
index b645a9d59cdb..25b156d4e645 100644
--- a/drivers/clk/at91/sama5d4.c
+++ b/drivers/clk/at91/sama5d4.c
@@ -16,7 +16,7 @@ static u8 plla_out[] = { 0 };
static u16 plla_icpll[] = { 0 };
-static struct clk_range plla_outputs[] = {
+static const struct clk_range plla_outputs[] = {
{ .min = 600000000, .max = 1200000000 },
};
@@ -28,6 +28,12 @@ static const struct clk_pll_characteristics plla_characteristics = {
.out = plla_out,
};
+static const struct clk_pcr_layout sama5d4_pcr_layout = {
+ .offset = 0x10c,
+ .cmd = BIT(12),
+ .pid_mask = GENMASK(6, 0),
+};
+
static const struct {
char *n;
char *p;
@@ -232,6 +238,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(sama5d4_periphck); i++) {
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &sama5d4_pcr_layout,
sama5d4_periphck[i].n,
"masterck",
sama5d4_periphck[i].id,
@@ -244,6 +251,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(sama5d4_periph32ck); i++) {
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
+ &sama5d4_pcr_layout,
sama5d4_periph32ck[i].n,
"h32mxck",
sama5d4_periph32ck[i].id,
diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c
index ab6ecefc49ad..e76b1d64e905 100644
--- a/drivers/clk/at91/sckc.c
+++ b/drivers/clk/at91/sckc.c
@@ -152,28 +152,6 @@ at91_clk_register_slow_osc(void __iomem *sckcr,
return hw;
}
-static void __init
-of_at91sam9x5_clk_slow_osc_setup(struct device_node *np, void __iomem *sckcr)
-{
- struct clk_hw *hw;
- const char *parent_name;
- const char *name = np->name;
- u32 startup;
- bool bypass;
-
- parent_name = of_clk_get_parent_name(np, 0);
- of_property_read_string(np, "clock-output-names", &name);
- of_property_read_u32(np, "atmel,startup-time-usec", &startup);
- bypass = of_property_read_bool(np, "atmel,osc-bypass");
-
- hw = at91_clk_register_slow_osc(sckcr, name, parent_name, startup,
- bypass);
- if (IS_ERR(hw))
- return;
-
- of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
-}
-
static unsigned long clk_slow_rc_osc_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -266,28 +244,6 @@ at91_clk_register_slow_rc_osc(void __iomem *sckcr,
return hw;
}
-static void __init
-of_at91sam9x5_clk_slow_rc_osc_setup(struct device_node *np, void __iomem *sckcr)
-{
- struct clk_hw *hw;
- u32 frequency = 0;
- u32 accuracy = 0;
- u32 startup = 0;
- const char *name = np->name;
-
- of_property_read_string(np, "clock-output-names", &name);
- of_property_read_u32(np, "clock-frequency", &frequency);
- of_property_read_u32(np, "clock-accuracy", &accuracy);
- of_property_read_u32(np, "atmel,startup-time-usec", &startup);
-
- hw = at91_clk_register_slow_rc_osc(sckcr, name, frequency, accuracy,
- startup);
- if (IS_ERR(hw))
- return;
-
- of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
-}
-
static int clk_sam9x5_slow_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_sam9x5_slow *slowck = to_clk_sam9x5_slow(hw);
@@ -365,68 +321,72 @@ at91_clk_register_sam9x5_slow(void __iomem *sckcr,
return hw;
}
-static void __init
-of_at91sam9x5_clk_slow_setup(struct device_node *np, void __iomem *sckcr)
+static void __init at91sam9x5_sckc_register(struct device_node *np,
+ unsigned int rc_osc_startup_us)
{
+ const char *parent_names[2] = { "slow_rc_osc", "slow_osc" };
+ void __iomem *regbase = of_iomap(np, 0);
+ struct device_node *child = NULL;
+ const char *xtal_name;
struct clk_hw *hw;
- const char *parent_names[2];
- unsigned int num_parents;
- const char *name = np->name;
+ bool bypass;
- num_parents = of_clk_get_parent_count(np);
- if (num_parents == 0 || num_parents > 2)
+ if (!regbase)
+ return;
+
+ hw = at91_clk_register_slow_rc_osc(regbase, parent_names[0], 32768,
+ 50000000, rc_osc_startup_us);
+ if (IS_ERR(hw))
return;
- of_clk_parent_fill(np, parent_names, num_parents);
+ xtal_name = of_clk_get_parent_name(np, 0);
+ if (!xtal_name) {
+ /* DT backward compatibility */
+ child = of_get_compatible_child(np, "atmel,at91sam9x5-clk-slow-osc");
+ if (!child)
+ return;
+
+ xtal_name = of_clk_get_parent_name(child, 0);
+ bypass = of_property_read_bool(child, "atmel,osc-bypass");
+
+ child = of_get_compatible_child(np, "atmel,at91sam9x5-clk-slow");
+ } else {
+ bypass = of_property_read_bool(np, "atmel,osc-bypass");
+ }
+
+ if (!xtal_name)
+ return;
- of_property_read_string(np, "clock-output-names", &name);
+ hw = at91_clk_register_slow_osc(regbase, parent_names[1], xtal_name,
+ 1200000, bypass);
+ if (IS_ERR(hw))
+ return;
- hw = at91_clk_register_sam9x5_slow(sckcr, name, parent_names,
- num_parents);
+ hw = at91_clk_register_sam9x5_slow(regbase, "slowck", parent_names, 2);
if (IS_ERR(hw))
return;
of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
-}
-static const struct of_device_id sckc_clk_ids[] __initconst = {
- /* Slow clock */
- {
- .compatible = "atmel,at91sam9x5-clk-slow-osc",
- .data = of_at91sam9x5_clk_slow_osc_setup,
- },
- {
- .compatible = "atmel,at91sam9x5-clk-slow-rc-osc",
- .data = of_at91sam9x5_clk_slow_rc_osc_setup,
- },
- {
- .compatible = "atmel,at91sam9x5-clk-slow",
- .data = of_at91sam9x5_clk_slow_setup,
- },
- { /*sentinel*/ }
-};
+ /* DT backward compatibility */
+ if (child)
+ of_clk_add_hw_provider(child, of_clk_hw_simple_get, hw);
+}
static void __init of_at91sam9x5_sckc_setup(struct device_node *np)
{
- struct device_node *childnp;
- void (*clk_setup)(struct device_node *, void __iomem *);
- const struct of_device_id *clk_id;
- void __iomem *regbase = of_iomap(np, 0);
-
- if (!regbase)
- return;
-
- for_each_child_of_node(np, childnp) {
- clk_id = of_match_node(sckc_clk_ids, childnp);
- if (!clk_id)
- continue;
- clk_setup = clk_id->data;
- clk_setup(childnp, regbase);
- }
+ at91sam9x5_sckc_register(np, 75);
}
CLK_OF_DECLARE(at91sam9x5_clk_sckc, "atmel,at91sam9x5-sckc",
of_at91sam9x5_sckc_setup);
+static void __init of_sama5d3_sckc_setup(struct device_node *np)
+{
+ at91sam9x5_sckc_register(np, 500);
+}
+CLK_OF_DECLARE(sama5d3_clk_sckc, "atmel,sama5d3-sckc",
+ of_sama5d3_sckc_setup);
+
static int clk_sama5d4_slow_osc_prepare(struct clk_hw *hw)
{
struct clk_sama5d4_slow_osc *osc = to_clk_sama5d4_slow_osc(hw);
diff --git a/drivers/clk/axs10x/i2s_pll_clock.c b/drivers/clk/axs10x/i2s_pll_clock.c
index 02d3bcd6216c..71c2e9519ca8 100644
--- a/drivers/clk/axs10x/i2s_pll_clock.c
+++ b/drivers/clk/axs10x/i2s_pll_clock.c
@@ -13,6 +13,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/of.h>
diff --git a/drivers/clk/axs10x/pll_clock.c b/drivers/clk/axs10x/pll_clock.c
index c68dada97316..aba787b2e771 100644
--- a/drivers/clk/axs10x/pll_clock.c
+++ b/drivers/clk/axs10x/pll_clock.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/slab.h>
diff --git a/drivers/clk/bcm/clk-bcm2835-aux.c b/drivers/clk/bcm/clk-bcm2835-aux.c
index 2a2c7569336a..b6d07ca0164f 100644
--- a/drivers/clk/bcm/clk-bcm2835-aux.c
+++ b/drivers/clk/bcm/clk-bcm2835-aux.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/bcm2835-aux.h>
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 9fcae932e082..770bb01f523e 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -29,6 +29,7 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index eee64b9e5d10..cc3b1e1bc087 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -15,8 +15,9 @@
#include "clk-kona.h"
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
/*
* "Policies" affect the frequencies of bus clocks provided by a
diff --git a/drivers/clk/berlin/berlin2-div.c b/drivers/clk/berlin/berlin2-div.c
index 4d0be66aa6a8..eb14a5bc0507 100644
--- a/drivers/clk/berlin/berlin2-div.c
+++ b/drivers/clk/berlin/berlin2-div.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitops.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c
index 0b4b44a2579e..bccdfa00fd37 100644
--- a/drivers/clk/berlin/bg2.c
+++ b/drivers/clk/berlin/bg2.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
index 9b9db743df25..e9518d35f262 100644
--- a/drivers/clk/berlin/bg2q.c
+++ b/drivers/clk/berlin/bg2q.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c
index 596136793fc4..42b4df6ba249 100644
--- a/drivers/clk/clk-aspeed.c
+++ b/drivers/clk/clk-aspeed.c
@@ -87,10 +87,10 @@ struct aspeed_clk_gate {
/* TODO: ask Aspeed about the actual parent data */
static const struct aspeed_gate_data aspeed_gates[] = {
/* clk rst name parent flags */
- [ASPEED_CLK_GATE_ECLK] = { 0, -1, "eclk-gate", "eclk", 0 }, /* Video Engine */
+ [ASPEED_CLK_GATE_ECLK] = { 0, 6, "eclk-gate", "eclk", 0 }, /* Video Engine */
[ASPEED_CLK_GATE_GCLK] = { 1, 7, "gclk-gate", NULL, 0 }, /* 2D engine */
[ASPEED_CLK_GATE_MCLK] = { 2, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */
- [ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */
+ [ASPEED_CLK_GATE_VCLK] = { 3, -1, "vclk-gate", NULL, 0 }, /* Video Capture */
[ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", CLK_IS_CRITICAL }, /* PCIe/PCI */
[ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */
[ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL },
@@ -113,6 +113,24 @@ static const struct aspeed_gate_data aspeed_gates[] = {
[ASPEED_CLK_GATE_LHCCLK] = { 28, -1, "lhclk-gate", "lhclk", 0 }, /* LPC master/LPC+ */
};
+static const char * const eclk_parent_names[] = {
+ "mpll",
+ "hpll",
+ "dpll",
+};
+
+static const struct clk_div_table ast2500_eclk_div_table[] = {
+ { 0x0, 2 },
+ { 0x1, 2 },
+ { 0x2, 3 },
+ { 0x3, 4 },
+ { 0x4, 5 },
+ { 0x5, 6 },
+ { 0x6, 7 },
+ { 0x7, 8 },
+ { 0 }
+};
+
static const struct clk_div_table ast2500_mac_div_table[] = {
{ 0x0, 4 }, /* Yep, really. Aspeed confirmed this is correct */
{ 0x1, 4 },
@@ -192,18 +210,21 @@ static struct clk_hw *aspeed_ast2500_calc_pll(const char *name, u32 val)
struct aspeed_clk_soc_data {
const struct clk_div_table *div_table;
+ const struct clk_div_table *eclk_div_table;
const struct clk_div_table *mac_div_table;
struct clk_hw *(*calc_pll)(const char *name, u32 val);
};
static const struct aspeed_clk_soc_data ast2500_data = {
.div_table = ast2500_div_table,
+ .eclk_div_table = ast2500_eclk_div_table,
.mac_div_table = ast2500_mac_div_table,
.calc_pll = aspeed_ast2500_calc_pll,
};
static const struct aspeed_clk_soc_data ast2400_data = {
.div_table = ast2400_div_table,
+ .eclk_div_table = ast2400_div_table,
.mac_div_table = ast2400_div_table,
.calc_pll = aspeed_ast2400_calc_pll,
};
@@ -522,6 +543,22 @@ static int aspeed_clk_probe(struct platform_device *pdev)
return PTR_ERR(hw);
aspeed_clk_data->hws[ASPEED_CLK_24M] = hw;
+ hw = clk_hw_register_mux(dev, "eclk-mux", eclk_parent_names,
+ ARRAY_SIZE(eclk_parent_names), 0,
+ scu_base + ASPEED_CLK_SELECTION, 2, 0x3, 0,
+ &aspeed_clk_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ aspeed_clk_data->hws[ASPEED_CLK_ECLK_MUX] = hw;
+
+ hw = clk_hw_register_divider_table(dev, "eclk", "eclk-mux", 0,
+ scu_base + ASPEED_CLK_SELECTION, 28,
+ 3, 0, soc_data->eclk_div_table,
+ &aspeed_clk_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ aspeed_clk_data->hws[ASPEED_CLK_ECLK] = hw;
+
/*
* TODO: There are a number of clocks that not included in this driver
* as more information is required:
@@ -531,7 +568,6 @@ static int aspeed_clk_probe(struct platform_device *pdev)
* RGMII
* RMII
* UART[1..5] clock source mux
- * Video Engine (ECLK) mux and clock divider
*/
for (i = 0; i < ARRAY_SIZE(aspeed_gates); i++) {
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 46604214bba0..b06038b8f658 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -218,7 +218,7 @@ struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
return ERR_PTR(-ENOMEM);
init.name = name;
- init.flags = flags | CLK_IS_BASIC;
+ init.flags = flags;
init.parent_names = parent_names;
init.num_parents = num_parents;
hw = &composite->hw;
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index e5a17265cfaf..3f9ff78c4a2a 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -25,6 +25,22 @@
* parent - fixed parent. No clk_set_parent support
*/
+static inline u32 clk_div_readl(struct clk_divider *divider)
+{
+ if (divider->flags & CLK_DIVIDER_BIG_ENDIAN)
+ return ioread32be(divider->reg);
+
+ return readl(divider->reg);
+}
+
+static inline void clk_div_writel(struct clk_divider *divider, u32 val)
+{
+ if (divider->flags & CLK_DIVIDER_BIG_ENDIAN)
+ iowrite32be(val, divider->reg);
+ else
+ writel(val, divider->reg);
+}
+
static unsigned int _get_table_maxdiv(const struct clk_div_table *table,
u8 width)
{
@@ -135,7 +151,7 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
struct clk_divider *divider = to_clk_divider(hw);
unsigned int val;
- val = clk_readl(divider->reg) >> divider->shift;
+ val = clk_div_readl(divider) >> divider->shift;
val &= clk_div_mask(divider->width);
return divider_recalc_rate(hw, parent_rate, val, divider->table,
@@ -370,7 +386,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
if (divider->flags & CLK_DIVIDER_READ_ONLY) {
u32 val;
- val = clk_readl(divider->reg) >> divider->shift;
+ val = clk_div_readl(divider) >> divider->shift;
val &= clk_div_mask(divider->width);
return divider_ro_round_rate(hw, rate, prate, divider->table,
@@ -420,11 +436,11 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
val = clk_div_mask(divider->width) << (divider->shift + 16);
} else {
- val = clk_readl(divider->reg);
+ val = clk_div_readl(divider);
val &= ~(clk_div_mask(divider->width) << divider->shift);
}
val |= (u32)value << divider->shift;
- clk_writel(val, divider->reg);
+ clk_div_writel(divider, val);
if (divider->lock)
spin_unlock_irqrestore(divider->lock, flags);
@@ -475,7 +491,7 @@ static struct clk_hw *_register_divider(struct device *dev, const char *name,
init.ops = &clk_divider_ro_ops;
else
init.ops = &clk_divider_ops;
- init.flags = flags | CLK_IS_BASIC;
+ init.flags = flags;
init.parent_names = (parent_name ? &parent_name: NULL);
init.num_parents = (parent_name ? 1 : 0);
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 241b3f8c61a9..8b343e59dc61 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -64,12 +64,14 @@ const struct clk_ops clk_fixed_factor_ops = {
};
EXPORT_SYMBOL_GPL(clk_fixed_factor_ops);
-struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
- const char *name, const char *parent_name, unsigned long flags,
- unsigned int mult, unsigned int div)
+static struct clk_hw *
+__clk_hw_register_fixed_factor(struct device *dev, struct device_node *np,
+ const char *name, const char *parent_name, int index,
+ unsigned long flags, unsigned int mult, unsigned int div)
{
struct clk_fixed_factor *fix;
- struct clk_init_data init;
+ struct clk_init_data init = { };
+ struct clk_parent_data pdata = { .index = index };
struct clk_hw *hw;
int ret;
@@ -84,12 +86,18 @@ struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
init.name = name;
init.ops = &clk_fixed_factor_ops;
- init.flags = flags | CLK_IS_BASIC;
- init.parent_names = &parent_name;
+ init.flags = flags;
+ if (parent_name)
+ init.parent_names = &parent_name;
+ else
+ init.parent_data = &pdata;
init.num_parents = 1;
hw = &fix->hw;
- ret = clk_hw_register(dev, hw);
+ if (dev)
+ ret = clk_hw_register(dev, hw);
+ else
+ ret = of_clk_hw_register(np, hw);
if (ret) {
kfree(fix);
hw = ERR_PTR(ret);
@@ -97,6 +105,14 @@ struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
return hw;
}
+
+struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
+ unsigned int mult, unsigned int div)
+{
+ return __clk_hw_register_fixed_factor(dev, NULL, name, parent_name, -1,
+ flags, mult, div);
+}
EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor);
struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
@@ -143,11 +159,10 @@ static const struct of_device_id set_rate_parent_matches[] = {
{ /* Sentinel */ },
};
-static struct clk *_of_fixed_factor_clk_setup(struct device_node *node)
+static struct clk_hw *_of_fixed_factor_clk_setup(struct device_node *node)
{
- struct clk *clk;
+ struct clk_hw *hw;
const char *clk_name = node->name;
- const char *parent_name;
unsigned long flags = 0;
u32 div, mult;
int ret;
@@ -165,30 +180,28 @@ static struct clk *_of_fixed_factor_clk_setup(struct device_node *node)
}
of_property_read_string(node, "clock-output-names", &clk_name);
- parent_name = of_clk_get_parent_name(node, 0);
if (of_match_node(set_rate_parent_matches, node))
flags |= CLK_SET_RATE_PARENT;
- clk = clk_register_fixed_factor(NULL, clk_name, parent_name, flags,
- mult, div);
- if (IS_ERR(clk)) {
+ hw = __clk_hw_register_fixed_factor(NULL, node, clk_name, NULL, 0,
+ flags, mult, div);
+ if (IS_ERR(hw)) {
/*
- * If parent clock is not registered, registration would fail.
* Clear OF_POPULATED flag so that clock registration can be
* attempted again from probe function.
*/
of_node_clear_flag(node, OF_POPULATED);
- return clk;
+ return ERR_CAST(hw);
}
- ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
if (ret) {
- clk_unregister(clk);
+ clk_hw_unregister_fixed_factor(hw);
return ERR_PTR(ret);
}
- return clk;
+ return hw;
}
/**
@@ -203,17 +216,17 @@ CLK_OF_DECLARE(fixed_factor_clk, "fixed-factor-clock",
static int of_fixed_factor_clk_remove(struct platform_device *pdev)
{
- struct clk *clk = platform_get_drvdata(pdev);
+ struct clk_hw *clk = platform_get_drvdata(pdev);
of_clk_del_provider(pdev->dev.of_node);
- clk_unregister_fixed_factor(clk);
+ clk_hw_unregister_fixed_factor(clk);
return 0;
}
static int of_fixed_factor_clk_probe(struct platform_device *pdev)
{
- struct clk *clk;
+ struct clk_hw *clk;
/*
* This function is not executed when of_fixed_factor_clk_setup
diff --git a/drivers/clk/clk-fixed-mmio.c b/drivers/clk/clk-fixed-mmio.c
index d1a97d971183..51f26619b6a2 100644
--- a/drivers/clk/clk-fixed-mmio.c
+++ b/drivers/clk/clk-fixed-mmio.c
@@ -10,8 +10,9 @@
*/
#include <linux/clk-provider.h>
-#include <linux/of_address.h>
+#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
static struct clk_hw *fixed_mmio_clk_setup(struct device_node *node)
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index 00ef4f5e53fe..a7e4aef7a376 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -68,7 +68,7 @@ struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev,
init.name = name;
init.ops = &clk_fixed_rate_ops;
- init.flags = flags | CLK_IS_BASIC;
+ init.flags = flags;
init.parent_names = (parent_name ? &parent_name: NULL);
init.num_parents = (parent_name ? 1 : 0);
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index fdfe2e423d15..b1e556f20911 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -8,11 +8,28 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/rational.h>
+static inline u32 clk_fd_readl(struct clk_fractional_divider *fd)
+{
+ if (fd->flags & CLK_FRAC_DIVIDER_BIG_ENDIAN)
+ return ioread32be(fd->reg);
+
+ return readl(fd->reg);
+}
+
+static inline void clk_fd_writel(struct clk_fractional_divider *fd, u32 val)
+{
+ if (fd->flags & CLK_FRAC_DIVIDER_BIG_ENDIAN)
+ iowrite32be(val, fd->reg);
+ else
+ writel(val, fd->reg);
+}
+
static unsigned long clk_fd_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -27,7 +44,7 @@ static unsigned long clk_fd_recalc_rate(struct clk_hw *hw,
else
__acquire(fd->lock);
- val = clk_readl(fd->reg);
+ val = clk_fd_readl(fd);
if (fd->lock)
spin_unlock_irqrestore(fd->lock, flags);
@@ -115,10 +132,10 @@ static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
else
__acquire(fd->lock);
- val = clk_readl(fd->reg);
+ val = clk_fd_readl(fd);
val &= ~(fd->mmask | fd->nmask);
val |= (m << fd->mshift) | (n << fd->nshift);
- clk_writel(val, fd->reg);
+ clk_fd_writel(fd, val);
if (fd->lock)
spin_unlock_irqrestore(fd->lock, flags);
@@ -151,7 +168,7 @@ struct clk_hw *clk_hw_register_fractional_divider(struct device *dev,
init.name = name;
init.ops = &clk_fractional_divider_ops;
- init.flags = flags | CLK_IS_BASIC;
+ init.flags = flags;
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index f05823cd9b21..1b99fc962745 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -23,6 +23,22 @@
* parent - fixed parent. No clk_set_parent support
*/
+static inline u32 clk_gate_readl(struct clk_gate *gate)
+{
+ if (gate->flags & CLK_GATE_BIG_ENDIAN)
+ return ioread32be(gate->reg);
+
+ return readl(gate->reg);
+}
+
+static inline void clk_gate_writel(struct clk_gate *gate, u32 val)
+{
+ if (gate->flags & CLK_GATE_BIG_ENDIAN)
+ iowrite32be(val, gate->reg);
+ else
+ writel(val, gate->reg);
+}
+
/*
* It works on following logic:
*
@@ -55,7 +71,7 @@ static void clk_gate_endisable(struct clk_hw *hw, int enable)
if (set)
reg |= BIT(gate->bit_idx);
} else {
- reg = clk_readl(gate->reg);
+ reg = clk_gate_readl(gate);
if (set)
reg |= BIT(gate->bit_idx);
@@ -63,7 +79,7 @@ static void clk_gate_endisable(struct clk_hw *hw, int enable)
reg &= ~BIT(gate->bit_idx);
}
- clk_writel(reg, gate->reg);
+ clk_gate_writel(gate, reg);
if (gate->lock)
spin_unlock_irqrestore(gate->lock, flags);
@@ -88,7 +104,7 @@ int clk_gate_is_enabled(struct clk_hw *hw)
u32 reg;
struct clk_gate *gate = to_clk_gate(hw);
- reg = clk_readl(gate->reg);
+ reg = clk_gate_readl(gate);
/* if a set bit disables this clk, flip it before masking */
if (gate->flags & CLK_GATE_SET_TO_DISABLE)
@@ -142,7 +158,7 @@ struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name,
init.name = name;
init.ops = &clk_gate_ops;
- init.flags = flags | CLK_IS_BASIC;
+ init.flags = flags;
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index c2f07f0d077c..9d930edd6516 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -137,7 +137,7 @@ static struct clk_hw *clk_register_gpio(struct device *dev, const char *name,
init.name = name;
init.ops = clk_gpio_ops;
- init.flags = flags | CLK_IS_BASIC;
+ init.flags = flags;
init.parent_names = parent_names;
init.num_parents = num_parents;
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
index 8e4581004695..bd328b0eb243 100644
--- a/drivers/clk/clk-highbank.c
+++ b/drivers/clk/clk-highbank.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/err.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -272,7 +271,7 @@ static const struct clk_ops periclk_ops = {
.set_rate = clk_periclk_set_rate,
};
-static __init struct clk *hb_clk_init(struct device_node *node, const struct clk_ops *ops)
+static void __init hb_clk_init(struct device_node *node, const struct clk_ops *ops, unsigned long clkflags)
{
u32 reg;
struct hb_clk *hb_clk;
@@ -284,11 +283,11 @@ static __init struct clk *hb_clk_init(struct device_node *node, const struct clk
rc = of_property_read_u32(node, "reg", &reg);
if (WARN_ON(rc))
- return NULL;
+ return;
hb_clk = kzalloc(sizeof(*hb_clk), GFP_KERNEL);
if (WARN_ON(!hb_clk))
- return NULL;
+ return;
/* Map system registers */
srnp = of_find_compatible_node(NULL, NULL, "calxeda,hb-sregs");
@@ -301,7 +300,7 @@ static __init struct clk *hb_clk_init(struct device_node *node, const struct clk
init.name = clk_name;
init.ops = ops;
- init.flags = 0;
+ init.flags = clkflags;
parent_name = of_clk_get_parent_name(node, 0);
init.parent_names = &parent_name;
init.num_parents = 1;
@@ -311,33 +310,31 @@ static __init struct clk *hb_clk_init(struct device_node *node, const struct clk
rc = clk_hw_register(NULL, &hb_clk->hw);
if (WARN_ON(rc)) {
kfree(hb_clk);
- return NULL;
+ return;
}
- rc = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &hb_clk->hw);
- return hb_clk->hw.clk;
+ of_clk_add_hw_provider(node, of_clk_hw_simple_get, &hb_clk->hw);
}
static void __init hb_pll_init(struct device_node *node)
{
- hb_clk_init(node, &clk_pll_ops);
+ hb_clk_init(node, &clk_pll_ops, 0);
}
CLK_OF_DECLARE(hb_pll, "calxeda,hb-pll-clock", hb_pll_init);
static void __init hb_a9periph_init(struct device_node *node)
{
- hb_clk_init(node, &a9periphclk_ops);
+ hb_clk_init(node, &a9periphclk_ops, 0);
}
CLK_OF_DECLARE(hb_a9periph, "calxeda,hb-a9periph-clock", hb_a9periph_init);
static void __init hb_a9bus_init(struct device_node *node)
{
- struct clk *clk = hb_clk_init(node, &a9bclk_ops);
- clk_prepare_enable(clk);
+ hb_clk_init(node, &a9bclk_ops, CLK_IS_CRITICAL);
}
CLK_OF_DECLARE(hb_a9bus, "calxeda,hb-a9bus-clock", hb_a9bus_init);
static void __init hb_emmc_init(struct device_node *node)
{
- hb_clk_init(node, &periclk_ops);
+ hb_clk_init(node, &periclk_ops, 0);
}
CLK_OF_DECLARE(hb_emmc, "calxeda,hb-emmc-clock", hb_emmc_init);
diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c
index a47c2b600f20..97d1e8c35b71 100644
--- a/drivers/clk/clk-hsdk-pll.c
+++ b/drivers/clk/clk-hsdk-pll.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
diff --git a/drivers/clk/clk-lochnagar.c b/drivers/clk/clk-lochnagar.c
new file mode 100644
index 000000000000..a2f31e58ee48
--- /dev/null
+++ b/drivers/clk/clk-lochnagar.c
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lochnagar clock control
+ *
+ * Copyright (c) 2017-2018 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ *
+ * Author: Charles Keepax <ckeepax@opensource.cirrus.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/lochnagar.h>
+#include <linux/mfd/lochnagar1_regs.h>
+#include <linux/mfd/lochnagar2_regs.h>
+
+#include <dt-bindings/clk/lochnagar.h>
+
+#define LOCHNAGAR_NUM_CLOCKS (LOCHNAGAR_SPDIF_CLKOUT + 1)
+
+struct lochnagar_clk {
+ const char * const name;
+ struct clk_hw hw;
+
+ struct lochnagar_clk_priv *priv;
+
+ u16 cfg_reg;
+ u16 ena_mask;
+
+ u16 src_reg;
+ u16 src_mask;
+};
+
+struct lochnagar_clk_priv {
+ struct device *dev;
+ struct regmap *regmap;
+ enum lochnagar_type type;
+
+ const char **parents;
+ unsigned int nparents;
+
+ struct lochnagar_clk lclks[LOCHNAGAR_NUM_CLOCKS];
+};
+
+static const char * const lochnagar1_clk_parents[] = {
+ "ln-none",
+ "ln-spdif-mclk",
+ "ln-psia1-mclk",
+ "ln-psia2-mclk",
+ "ln-cdc-clkout",
+ "ln-dsp-clkout",
+ "ln-pmic-32k",
+ "ln-gf-mclk1",
+ "ln-gf-mclk3",
+ "ln-gf-mclk2",
+ "ln-gf-mclk4",
+};
+
+static const char * const lochnagar2_clk_parents[] = {
+ "ln-none",
+ "ln-cdc-clkout",
+ "ln-dsp-clkout",
+ "ln-pmic-32k",
+ "ln-spdif-mclk",
+ "ln-clk-12m",
+ "ln-clk-11m",
+ "ln-clk-24m",
+ "ln-clk-22m",
+ "ln-clk-8m",
+ "ln-usb-clk-24m",
+ "ln-gf-mclk1",
+ "ln-gf-mclk3",
+ "ln-gf-mclk2",
+ "ln-psia1-mclk",
+ "ln-psia2-mclk",
+ "ln-spdif-clkout",
+ "ln-adat-mclk",
+ "ln-usb-clk-12m",
+};
+
+#define LN1_CLK(ID, NAME, REG) \
+ [LOCHNAGAR_##ID] = { \
+ .name = NAME, \
+ .cfg_reg = LOCHNAGAR1_##REG, \
+ .ena_mask = LOCHNAGAR1_##ID##_ENA_MASK, \
+ .src_reg = LOCHNAGAR1_##ID##_SEL, \
+ .src_mask = LOCHNAGAR1_SRC_MASK, \
+ }
+
+#define LN2_CLK(ID, NAME) \
+ [LOCHNAGAR_##ID] = { \
+ .name = NAME, \
+ .cfg_reg = LOCHNAGAR2_##ID##_CTRL, \
+ .src_reg = LOCHNAGAR2_##ID##_CTRL, \
+ .ena_mask = LOCHNAGAR2_CLK_ENA_MASK, \
+ .src_mask = LOCHNAGAR2_CLK_SRC_MASK, \
+ }
+
+static const struct lochnagar_clk lochnagar1_clks[LOCHNAGAR_NUM_CLOCKS] = {
+ LN1_CLK(CDC_MCLK1, "ln-cdc-mclk1", CDC_AIF_CTRL2),
+ LN1_CLK(CDC_MCLK2, "ln-cdc-mclk2", CDC_AIF_CTRL2),
+ LN1_CLK(DSP_CLKIN, "ln-dsp-clkin", DSP_AIF),
+ LN1_CLK(GF_CLKOUT1, "ln-gf-clkout1", GF_AIF1),
+};
+
+static const struct lochnagar_clk lochnagar2_clks[LOCHNAGAR_NUM_CLOCKS] = {
+ LN2_CLK(CDC_MCLK1, "ln-cdc-mclk1"),
+ LN2_CLK(CDC_MCLK2, "ln-cdc-mclk2"),
+ LN2_CLK(DSP_CLKIN, "ln-dsp-clkin"),
+ LN2_CLK(GF_CLKOUT1, "ln-gf-clkout1"),
+ LN2_CLK(GF_CLKOUT2, "ln-gf-clkout2"),
+ LN2_CLK(PSIA1_MCLK, "ln-psia1-mclk"),
+ LN2_CLK(PSIA2_MCLK, "ln-psia2-mclk"),
+ LN2_CLK(SPDIF_MCLK, "ln-spdif-mclk"),
+ LN2_CLK(ADAT_MCLK, "ln-adat-mclk"),
+ LN2_CLK(SOUNDCARD_MCLK, "ln-soundcard-mclk"),
+};
+
+static inline struct lochnagar_clk *lochnagar_hw_to_lclk(struct clk_hw *hw)
+{
+ return container_of(hw, struct lochnagar_clk, hw);
+}
+
+static int lochnagar_clk_prepare(struct clk_hw *hw)
+{
+ struct lochnagar_clk *lclk = lochnagar_hw_to_lclk(hw);
+ struct lochnagar_clk_priv *priv = lclk->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, lclk->cfg_reg,
+ lclk->ena_mask, lclk->ena_mask);
+ if (ret < 0)
+ dev_dbg(priv->dev, "Failed to prepare %s: %d\n",
+ lclk->name, ret);
+
+ return ret;
+}
+
+static void lochnagar_clk_unprepare(struct clk_hw *hw)
+{
+ struct lochnagar_clk *lclk = lochnagar_hw_to_lclk(hw);
+ struct lochnagar_clk_priv *priv = lclk->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, lclk->cfg_reg, lclk->ena_mask, 0);
+ if (ret < 0)
+ dev_dbg(priv->dev, "Failed to unprepare %s: %d\n",
+ lclk->name, ret);
+}
+
+static int lochnagar_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct lochnagar_clk *lclk = lochnagar_hw_to_lclk(hw);
+ struct lochnagar_clk_priv *priv = lclk->priv;
+ struct regmap *regmap = priv->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, lclk->src_reg, lclk->src_mask, index);
+ if (ret < 0)
+ dev_dbg(priv->dev, "Failed to reparent %s: %d\n",
+ lclk->name, ret);
+
+ return ret;
+}
+
+static u8 lochnagar_clk_get_parent(struct clk_hw *hw)
+{
+ struct lochnagar_clk *lclk = lochnagar_hw_to_lclk(hw);
+ struct lochnagar_clk_priv *priv = lclk->priv;
+ struct regmap *regmap = priv->regmap;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(regmap, lclk->src_reg, &val);
+ if (ret < 0) {
+ dev_dbg(priv->dev, "Failed to read parent of %s: %d\n",
+ lclk->name, ret);
+ return priv->nparents;
+ }
+
+ val &= lclk->src_mask;
+
+ return val;
+}
+
+static const struct clk_ops lochnagar_clk_ops = {
+ .prepare = lochnagar_clk_prepare,
+ .unprepare = lochnagar_clk_unprepare,
+ .set_parent = lochnagar_clk_set_parent,
+ .get_parent = lochnagar_clk_get_parent,
+};
+
+static int lochnagar_init_parents(struct lochnagar_clk_priv *priv)
+{
+ struct device_node *np = priv->dev->of_node;
+ int i, j;
+
+ switch (priv->type) {
+ case LOCHNAGAR1:
+ memcpy(priv->lclks, lochnagar1_clks, sizeof(lochnagar1_clks));
+
+ priv->nparents = ARRAY_SIZE(lochnagar1_clk_parents);
+ priv->parents = devm_kmemdup(priv->dev, lochnagar1_clk_parents,
+ sizeof(lochnagar1_clk_parents),
+ GFP_KERNEL);
+ break;
+ case LOCHNAGAR2:
+ memcpy(priv->lclks, lochnagar2_clks, sizeof(lochnagar2_clks));
+
+ priv->nparents = ARRAY_SIZE(lochnagar2_clk_parents);
+ priv->parents = devm_kmemdup(priv->dev, lochnagar2_clk_parents,
+ sizeof(lochnagar2_clk_parents),
+ GFP_KERNEL);
+ break;
+ default:
+ dev_err(priv->dev, "Unknown Lochnagar type: %d\n", priv->type);
+ return -EINVAL;
+ }
+
+ if (!priv->parents)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->nparents; i++) {
+ j = of_property_match_string(np, "clock-names",
+ priv->parents[i]);
+ if (j >= 0)
+ priv->parents[i] = of_clk_get_parent_name(np, j);
+ }
+
+ return 0;
+}
+
+static struct clk_hw *
+lochnagar_of_clk_hw_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct lochnagar_clk_priv *priv = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= ARRAY_SIZE(priv->lclks)) {
+ dev_err(priv->dev, "Invalid index %u\n", idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &priv->lclks[idx].hw;
+}
+
+static int lochnagar_init_clks(struct lochnagar_clk_priv *priv)
+{
+ struct clk_init_data clk_init = {
+ .ops = &lochnagar_clk_ops,
+ .parent_names = priv->parents,
+ .num_parents = priv->nparents,
+ };
+ struct lochnagar_clk *lclk;
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->lclks); i++) {
+ lclk = &priv->lclks[i];
+
+ if (!lclk->name)
+ continue;
+
+ clk_init.name = lclk->name;
+
+ lclk->priv = priv;
+ lclk->hw.init = &clk_init;
+
+ ret = devm_clk_hw_register(priv->dev, &lclk->hw);
+ if (ret) {
+ dev_err(priv->dev, "Failed to register %s: %d\n",
+ lclk->name, ret);
+ return ret;
+ }
+ }
+
+ ret = devm_of_clk_add_hw_provider(priv->dev, lochnagar_of_clk_hw_get,
+ priv);
+ if (ret < 0)
+ dev_err(priv->dev, "Failed to register provider: %d\n", ret);
+
+ return ret;
+}
+
+static const struct of_device_id lochnagar_of_match[] = {
+ { .compatible = "cirrus,lochnagar1-clk", .data = (void *)LOCHNAGAR1 },
+ { .compatible = "cirrus,lochnagar2-clk", .data = (void *)LOCHNAGAR2 },
+ {}
+};
+MODULE_DEVICE_TABLE(of, lochnagar_of_match);
+
+static int lochnagar_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct lochnagar_clk_priv *priv;
+ const struct of_device_id *of_id;
+ int ret;
+
+ of_id = of_match_device(lochnagar_of_match, dev);
+ if (!of_id)
+ return -EINVAL;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->regmap = dev_get_regmap(dev->parent, NULL);
+ priv->type = (enum lochnagar_type)of_id->data;
+
+ ret = lochnagar_init_parents(priv);
+ if (ret)
+ return ret;
+
+ return lochnagar_init_clks(priv);
+}
+
+static struct platform_driver lochnagar_clk_driver = {
+ .driver = {
+ .name = "lochnagar-clk",
+ .of_match_table = lochnagar_of_match,
+ },
+ .probe = lochnagar_clk_probe,
+};
+module_platform_driver(lochnagar_clk_driver);
+
+MODULE_AUTHOR("Charles Keepax <ckeepax@opensource.cirrus.com>");
+MODULE_DESCRIPTION("Clock driver for Cirrus Logic Lochnagar Board");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-milbeaut.c b/drivers/clk/clk-milbeaut.c
new file mode 100644
index 000000000000..5fc78faf820c
--- /dev/null
+++ b/drivers/clk/clk-milbeaut.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Socionext Inc.
+ * Copyright (C) 2016 Linaro Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define M10V_CLKSEL1 0x0
+#define CLKSEL(n) (((n) - 1) * 4 + M10V_CLKSEL1)
+
+#define M10V_PLL1 "pll1"
+#define M10V_PLL1DIV2 "pll1-2"
+#define M10V_PLL2 "pll2"
+#define M10V_PLL2DIV2 "pll2-2"
+#define M10V_PLL6 "pll6"
+#define M10V_PLL6DIV2 "pll6-2"
+#define M10V_PLL6DIV3 "pll6-3"
+#define M10V_PLL7 "pll7"
+#define M10V_PLL7DIV2 "pll7-2"
+#define M10V_PLL7DIV5 "pll7-5"
+#define M10V_PLL9 "pll9"
+#define M10V_PLL10 "pll10"
+#define M10V_PLL10DIV2 "pll10-2"
+#define M10V_PLL11 "pll11"
+
+#define M10V_SPI_PARENT0 "spi-parent0"
+#define M10V_SPI_PARENT1 "spi-parent1"
+#define M10V_SPI_PARENT2 "spi-parent2"
+#define M10V_UHS1CLK2_PARENT0 "uhs1clk2-parent0"
+#define M10V_UHS1CLK2_PARENT1 "uhs1clk2-parent1"
+#define M10V_UHS1CLK2_PARENT2 "uhs1clk2-parent2"
+#define M10V_UHS1CLK1_PARENT0 "uhs1clk1-parent0"
+#define M10V_UHS1CLK1_PARENT1 "uhs1clk1-parent1"
+#define M10V_NFCLK_PARENT0 "nfclk-parent0"
+#define M10V_NFCLK_PARENT1 "nfclk-parent1"
+#define M10V_NFCLK_PARENT2 "nfclk-parent2"
+#define M10V_NFCLK_PARENT3 "nfclk-parent3"
+#define M10V_NFCLK_PARENT4 "nfclk-parent4"
+#define M10V_NFCLK_PARENT5 "nfclk-parent5"
+
+#define M10V_DCHREQ 1
+#define M10V_UPOLL_RATE 1
+#define M10V_UTIMEOUT 250
+
+#define M10V_EMMCCLK_ID 0
+#define M10V_ACLK_ID 1
+#define M10V_HCLK_ID 2
+#define M10V_PCLK_ID 3
+#define M10V_RCLK_ID 4
+#define M10V_SPICLK_ID 5
+#define M10V_NFCLK_ID 6
+#define M10V_UHS1CLK2_ID 7
+#define M10V_NUM_CLKS 8
+
+#define to_m10v_div(_hw) container_of(_hw, struct m10v_clk_divider, hw)
+
+static struct clk_hw_onecell_data *m10v_clk_data;
+
+static DEFINE_SPINLOCK(m10v_crglock);
+
+struct m10v_clk_div_factors {
+ const char *name;
+ const char *parent_name;
+ u32 offset;
+ u8 shift;
+ u8 width;
+ const struct clk_div_table *table;
+ unsigned long div_flags;
+ int onecell_idx;
+};
+
+struct m10v_clk_div_fixed_data {
+ const char *name;
+ const char *parent_name;
+ u8 div;
+ u8 mult;
+ int onecell_idx;
+};
+
+struct m10v_clk_mux_factors {
+ const char *name;
+ const char * const *parent_names;
+ u8 num_parents;
+ u32 offset;
+ u8 shift;
+ u8 mask;
+ u32 *table;
+ unsigned long mux_flags;
+ int onecell_idx;
+};
+
+static const struct clk_div_table emmcclk_table[] = {
+ { .val = 0, .div = 8 },
+ { .val = 1, .div = 9 },
+ { .val = 2, .div = 10 },
+ { .val = 3, .div = 15 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table mclk400_table[] = {
+ { .val = 1, .div = 2 },
+ { .val = 3, .div = 4 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table mclk200_table[] = {
+ { .val = 3, .div = 4 },
+ { .val = 7, .div = 8 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table aclk400_table[] = {
+ { .val = 1, .div = 2 },
+ { .val = 3, .div = 4 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table aclk300_table[] = {
+ { .val = 0, .div = 2 },
+ { .val = 1, .div = 3 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table aclk_table[] = {
+ { .val = 3, .div = 4 },
+ { .val = 7, .div = 8 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table aclkexs_table[] = {
+ { .val = 3, .div = 4 },
+ { .val = 4, .div = 5 },
+ { .val = 5, .div = 6 },
+ { .val = 7, .div = 8 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table hclk_table[] = {
+ { .val = 7, .div = 8 },
+ { .val = 15, .div = 16 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table hclkbmh_table[] = {
+ { .val = 3, .div = 4 },
+ { .val = 7, .div = 8 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table pclk_table[] = {
+ { .val = 15, .div = 16 },
+ { .val = 31, .div = 32 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table rclk_table[] = {
+ { .val = 0, .div = 8 },
+ { .val = 1, .div = 16 },
+ { .val = 2, .div = 24 },
+ { .val = 3, .div = 32 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table uhs1clk0_table[] = {
+ { .val = 0, .div = 2 },
+ { .val = 1, .div = 3 },
+ { .val = 2, .div = 4 },
+ { .val = 3, .div = 8 },
+ { .val = 4, .div = 16 },
+ { .div = 0 },
+};
+
+static const struct clk_div_table uhs2clk_table[] = {
+ { .val = 0, .div = 9 },
+ { .val = 1, .div = 10 },
+ { .val = 2, .div = 11 },
+ { .val = 3, .div = 12 },
+ { .val = 4, .div = 13 },
+ { .val = 5, .div = 14 },
+ { .val = 6, .div = 16 },
+ { .val = 7, .div = 18 },
+ { .div = 0 },
+};
+
+static u32 spi_mux_table[] = {0, 1, 2};
+static const char * const spi_mux_names[] = {
+ M10V_SPI_PARENT0, M10V_SPI_PARENT1, M10V_SPI_PARENT2
+};
+
+static u32 uhs1clk2_mux_table[] = {2, 3, 4, 8};
+static const char * const uhs1clk2_mux_names[] = {
+ M10V_UHS1CLK2_PARENT0, M10V_UHS1CLK2_PARENT1,
+ M10V_UHS1CLK2_PARENT2, M10V_PLL6DIV2
+};
+
+static u32 uhs1clk1_mux_table[] = {3, 4, 8};
+static const char * const uhs1clk1_mux_names[] = {
+ M10V_UHS1CLK1_PARENT0, M10V_UHS1CLK1_PARENT1, M10V_PLL6DIV2
+};
+
+static u32 nfclk_mux_table[] = {0, 1, 2, 3, 4, 8};
+static const char * const nfclk_mux_names[] = {
+ M10V_NFCLK_PARENT0, M10V_NFCLK_PARENT1, M10V_NFCLK_PARENT2,
+ M10V_NFCLK_PARENT3, M10V_NFCLK_PARENT4, M10V_NFCLK_PARENT5
+};
+
+static const struct m10v_clk_div_fixed_data m10v_pll_fixed_data[] = {
+ {M10V_PLL1, NULL, 1, 40, -1},
+ {M10V_PLL2, NULL, 1, 30, -1},
+ {M10V_PLL6, NULL, 1, 35, -1},
+ {M10V_PLL7, NULL, 1, 40, -1},
+ {M10V_PLL9, NULL, 1, 33, -1},
+ {M10V_PLL10, NULL, 5, 108, -1},
+ {M10V_PLL10DIV2, M10V_PLL10, 2, 1, -1},
+ {M10V_PLL11, NULL, 2, 75, -1},
+};
+
+static const struct m10v_clk_div_fixed_data m10v_div_fixed_data[] = {
+ {"usb2", NULL, 2, 1, -1},
+ {"pcisuppclk", NULL, 20, 1, -1},
+ {M10V_PLL1DIV2, M10V_PLL1, 2, 1, -1},
+ {M10V_PLL2DIV2, M10V_PLL2, 2, 1, -1},
+ {M10V_PLL6DIV2, M10V_PLL6, 2, 1, -1},
+ {M10V_PLL6DIV3, M10V_PLL6, 3, 1, -1},
+ {M10V_PLL7DIV2, M10V_PLL7, 2, 1, -1},
+ {M10V_PLL7DIV5, M10V_PLL7, 5, 1, -1},
+ {"ca7wd", M10V_PLL2DIV2, 12, 1, -1},
+ {"pclkca7wd", M10V_PLL1DIV2, 16, 1, -1},
+ {M10V_SPI_PARENT0, M10V_PLL10DIV2, 2, 1, -1},
+ {M10V_SPI_PARENT1, M10V_PLL10DIV2, 4, 1, -1},
+ {M10V_SPI_PARENT2, M10V_PLL7DIV2, 8, 1, -1},
+ {M10V_UHS1CLK2_PARENT0, M10V_PLL7, 4, 1, -1},
+ {M10V_UHS1CLK2_PARENT1, M10V_PLL7, 8, 1, -1},
+ {M10V_UHS1CLK2_PARENT2, M10V_PLL7, 16, 1, -1},
+ {M10V_UHS1CLK1_PARENT0, M10V_PLL7, 8, 1, -1},
+ {M10V_UHS1CLK1_PARENT1, M10V_PLL7, 16, 1, -1},
+ {M10V_NFCLK_PARENT0, M10V_PLL7DIV2, 8, 1, -1},
+ {M10V_NFCLK_PARENT1, M10V_PLL7DIV2, 10, 1, -1},
+ {M10V_NFCLK_PARENT2, M10V_PLL7DIV2, 13, 1, -1},
+ {M10V_NFCLK_PARENT3, M10V_PLL7DIV2, 16, 1, -1},
+ {M10V_NFCLK_PARENT4, M10V_PLL7DIV2, 40, 1, -1},
+ {M10V_NFCLK_PARENT5, M10V_PLL7DIV5, 10, 1, -1},
+};
+
+static const struct m10v_clk_div_factors m10v_div_factor_data[] = {
+ {"emmc", M10V_PLL11, CLKSEL(1), 28, 3, emmcclk_table, 0,
+ M10V_EMMCCLK_ID},
+ {"mclk400", M10V_PLL1DIV2, CLKSEL(10), 7, 3, mclk400_table, 0, -1},
+ {"mclk200", M10V_PLL1DIV2, CLKSEL(10), 3, 4, mclk200_table, 0, -1},
+ {"aclk400", M10V_PLL1DIV2, CLKSEL(10), 0, 3, aclk400_table, 0, -1},
+ {"aclk300", M10V_PLL2DIV2, CLKSEL(12), 0, 2, aclk300_table, 0, -1},
+ {"aclk", M10V_PLL1DIV2, CLKSEL(9), 20, 4, aclk_table, 0, M10V_ACLK_ID},
+ {"aclkexs", M10V_PLL1DIV2, CLKSEL(9), 16, 4, aclkexs_table, 0, -1},
+ {"hclk", M10V_PLL1DIV2, CLKSEL(9), 7, 5, hclk_table, 0, M10V_HCLK_ID},
+ {"hclkbmh", M10V_PLL1DIV2, CLKSEL(9), 12, 4, hclkbmh_table, 0, -1},
+ {"pclk", M10V_PLL1DIV2, CLKSEL(9), 0, 7, pclk_table, 0, M10V_PCLK_ID},
+ {"uhs1clk0", M10V_PLL7, CLKSEL(1), 3, 5, uhs1clk0_table, 0, -1},
+ {"uhs2clk", M10V_PLL6DIV3, CLKSEL(1), 18, 4, uhs2clk_table, 0, -1},
+};
+
+static const struct m10v_clk_mux_factors m10v_mux_factor_data[] = {
+ {"spi", spi_mux_names, ARRAY_SIZE(spi_mux_names),
+ CLKSEL(8), 3, 7, spi_mux_table, 0, M10V_SPICLK_ID},
+ {"uhs1clk2", uhs1clk2_mux_names, ARRAY_SIZE(uhs1clk2_mux_names),
+ CLKSEL(1), 13, 31, uhs1clk2_mux_table, 0, M10V_UHS1CLK2_ID},
+ {"uhs1clk1", uhs1clk1_mux_names, ARRAY_SIZE(uhs1clk1_mux_names),
+ CLKSEL(1), 8, 31, uhs1clk1_mux_table, 0, -1},
+ {"nfclk", nfclk_mux_names, ARRAY_SIZE(nfclk_mux_names),
+ CLKSEL(1), 22, 127, nfclk_mux_table, 0, M10V_NFCLK_ID},
+};
+
+static u8 m10v_mux_get_parent(struct clk_hw *hw)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ u32 val;
+
+ val = readl(mux->reg) >> mux->shift;
+ val &= mux->mask;
+
+ return clk_mux_val_to_index(hw, mux->table, mux->flags, val);
+}
+
+static int m10v_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
+ unsigned long flags = 0;
+ u32 reg;
+ u32 write_en = BIT(fls(mux->mask) - 1);
+
+ if (mux->lock)
+ spin_lock_irqsave(mux->lock, flags);
+ else
+ __acquire(mux->lock);
+
+ reg = readl(mux->reg);
+ reg &= ~(mux->mask << mux->shift);
+
+ val = (val | write_en) << mux->shift;
+ reg |= val;
+ writel(reg, mux->reg);
+
+ if (mux->lock)
+ spin_unlock_irqrestore(mux->lock, flags);
+ else
+ __release(mux->lock);
+
+ return 0;
+}
+
+static const struct clk_ops m10v_mux_ops = {
+ .get_parent = m10v_mux_get_parent,
+ .set_parent = m10v_mux_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+};
+
+static struct clk_hw *m10v_clk_hw_register_mux(struct device *dev,
+ const char *name, const char * const *parent_names,
+ u8 num_parents, unsigned long flags, void __iomem *reg,
+ u8 shift, u32 mask, u8 clk_mux_flags, u32 *table,
+ spinlock_t *lock)
+{
+ struct clk_mux *mux;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &m10v_mux_ops;
+ init.flags = flags;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ mux->reg = reg;
+ mux->shift = shift;
+ mux->mask = mask;
+ mux->flags = clk_mux_flags;
+ mux->lock = lock;
+ mux->table = table;
+ mux->hw.init = &init;
+
+ hw = &mux->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(mux);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+
+}
+
+struct m10v_clk_divider {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 shift;
+ u8 width;
+ u8 flags;
+ const struct clk_div_table *table;
+ spinlock_t *lock;
+ void __iomem *write_valid_reg;
+};
+
+static unsigned long m10v_clk_divider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct m10v_clk_divider *divider = to_m10v_div(hw);
+ unsigned int val;
+
+ val = readl(divider->reg) >> divider->shift;
+ val &= clk_div_mask(divider->width);
+
+ return divider_recalc_rate(hw, parent_rate, val, divider->table,
+ divider->flags, divider->width);
+}
+
+static long m10v_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct m10v_clk_divider *divider = to_m10v_div(hw);
+
+ /* if read only, just return current value */
+ if (divider->flags & CLK_DIVIDER_READ_ONLY) {
+ u32 val;
+
+ val = readl(divider->reg) >> divider->shift;
+ val &= clk_div_mask(divider->width);
+
+ return divider_ro_round_rate(hw, rate, prate, divider->table,
+ divider->width, divider->flags,
+ val);
+ }
+
+ return divider_round_rate(hw, rate, prate, divider->table,
+ divider->width, divider->flags);
+}
+
+static int m10v_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct m10v_clk_divider *divider = to_m10v_div(hw);
+ int value;
+ unsigned long flags = 0;
+ u32 val;
+ u32 write_en = BIT(divider->width - 1);
+
+ value = divider_get_val(rate, parent_rate, divider->table,
+ divider->width, divider->flags);
+ if (value < 0)
+ return value;
+
+ if (divider->lock)
+ spin_lock_irqsave(divider->lock, flags);
+ else
+ __acquire(divider->lock);
+
+ val = readl(divider->reg);
+ val &= ~(clk_div_mask(divider->width) << divider->shift);
+
+ val |= ((u32)value | write_en) << divider->shift;
+ writel(val, divider->reg);
+
+ if (divider->write_valid_reg) {
+ writel(M10V_DCHREQ, divider->write_valid_reg);
+ if (readl_poll_timeout(divider->write_valid_reg, val,
+ !val, M10V_UPOLL_RATE, M10V_UTIMEOUT))
+ pr_err("%s:%s couldn't stabilize\n",
+ __func__, divider->hw.init->name);
+ }
+
+ if (divider->lock)
+ spin_unlock_irqrestore(divider->lock, flags);
+ else
+ __release(divider->lock);
+
+ return 0;
+}
+
+static const struct clk_ops m10v_clk_divider_ops = {
+ .recalc_rate = m10v_clk_divider_recalc_rate,
+ .round_rate = m10v_clk_divider_round_rate,
+ .set_rate = m10v_clk_divider_set_rate,
+};
+
+static struct clk_hw *m10v_clk_hw_register_divider(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width,
+ u8 clk_divider_flags, const struct clk_div_table *table,
+ spinlock_t *lock, void __iomem *write_valid_reg)
+{
+ struct m10v_clk_divider *div;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &m10v_clk_divider_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ div->reg = reg;
+ div->shift = shift;
+ div->width = width;
+ div->flags = clk_divider_flags;
+ div->lock = lock;
+ div->hw.init = &init;
+ div->table = table;
+ div->write_valid_reg = write_valid_reg;
+
+ /* register the clock */
+ hw = &div->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(div);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+
+static void m10v_reg_div_pre(const struct m10v_clk_div_factors *factors,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base)
+{
+ struct clk_hw *hw;
+ void __iomem *write_valid_reg;
+
+ /*
+ * The registers on CLKSEL(9) or CLKSEL(10) need additional
+ * writing to become valid.
+ */
+ if ((factors->offset == CLKSEL(9)) || (factors->offset == CLKSEL(10)))
+ write_valid_reg = base + CLKSEL(11);
+ else
+ write_valid_reg = NULL;
+
+ hw = m10v_clk_hw_register_divider(NULL, factors->name,
+ factors->parent_name,
+ CLK_SET_RATE_PARENT,
+ base + factors->offset,
+ factors->shift,
+ factors->width, factors->div_flags,
+ factors->table,
+ &m10v_crglock, write_valid_reg);
+
+ if (factors->onecell_idx >= 0)
+ clk_data->hws[factors->onecell_idx] = hw;
+}
+
+static void m10v_reg_fixed_pre(const struct m10v_clk_div_fixed_data *factors,
+ struct clk_hw_onecell_data *clk_data,
+ const char *parent_name)
+{
+ struct clk_hw *hw;
+ const char *pn = factors->parent_name ?
+ factors->parent_name : parent_name;
+
+ hw = clk_hw_register_fixed_factor(NULL, factors->name, pn, 0,
+ factors->mult, factors->div);
+
+ if (factors->onecell_idx >= 0)
+ clk_data->hws[factors->onecell_idx] = hw;
+}
+
+static void m10v_reg_mux_pre(const struct m10v_clk_mux_factors *factors,
+ struct clk_hw_onecell_data *clk_data,
+ void __iomem *base)
+{
+ struct clk_hw *hw;
+
+ hw = m10v_clk_hw_register_mux(NULL, factors->name,
+ factors->parent_names,
+ factors->num_parents,
+ CLK_SET_RATE_PARENT,
+ base + factors->offset, factors->shift,
+ factors->mask, factors->mux_flags,
+ factors->table, &m10v_crglock);
+
+ if (factors->onecell_idx >= 0)
+ clk_data->hws[factors->onecell_idx] = hw;
+}
+
+static int m10v_clk_probe(struct platform_device *pdev)
+{
+ int id;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ void __iomem *base;
+ const char *parent_name;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ parent_name = of_clk_get_parent_name(np, 0);
+
+ for (id = 0; id < ARRAY_SIZE(m10v_div_factor_data); ++id)
+ m10v_reg_div_pre(&m10v_div_factor_data[id],
+ m10v_clk_data, base);
+
+ for (id = 0; id < ARRAY_SIZE(m10v_div_fixed_data); ++id)
+ m10v_reg_fixed_pre(&m10v_div_fixed_data[id],
+ m10v_clk_data, parent_name);
+
+ for (id = 0; id < ARRAY_SIZE(m10v_mux_factor_data); ++id)
+ m10v_reg_mux_pre(&m10v_mux_factor_data[id],
+ m10v_clk_data, base);
+
+ for (id = 0; id < M10V_NUM_CLKS; id++) {
+ if (IS_ERR(m10v_clk_data->hws[id]))
+ return PTR_ERR(m10v_clk_data->hws[id]);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id m10v_clk_dt_ids[] = {
+ { .compatible = "socionext,milbeaut-m10v-ccu", },
+ { }
+};
+
+static struct platform_driver m10v_clk_driver = {
+ .probe = m10v_clk_probe,
+ .driver = {
+ .name = "m10v-ccu",
+ .of_match_table = m10v_clk_dt_ids,
+ },
+};
+builtin_platform_driver(m10v_clk_driver);
+
+static void __init m10v_cc_init(struct device_node *np)
+{
+ int id;
+ void __iomem *base;
+ const char *parent_name;
+ struct clk_hw *hw;
+
+ m10v_clk_data = kzalloc(struct_size(m10v_clk_data, hws,
+ M10V_NUM_CLKS),
+ GFP_KERNEL);
+
+ if (!m10v_clk_data)
+ return;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ kfree(m10v_clk_data);
+ return;
+ }
+
+ parent_name = of_clk_get_parent_name(np, 0);
+ if (!parent_name) {
+ kfree(m10v_clk_data);
+ iounmap(base);
+ return;
+ }
+
+ /*
+ * This way all clocks fetched before the platform device probes,
+ * except those we assign here for early use, will be deferred.
+ */
+ for (id = 0; id < M10V_NUM_CLKS; id++)
+ m10v_clk_data->hws[id] = ERR_PTR(-EPROBE_DEFER);
+
+ /*
+ * PLLs are set by bootloader so this driver registers them as the
+ * fixed factor.
+ */
+ for (id = 0; id < ARRAY_SIZE(m10v_pll_fixed_data); ++id)
+ m10v_reg_fixed_pre(&m10v_pll_fixed_data[id],
+ m10v_clk_data, parent_name);
+
+ /*
+ * timer consumes "rclk" so it needs to register here.
+ */
+ hw = m10v_clk_hw_register_divider(NULL, "rclk", M10V_PLL10DIV2, 0,
+ base + CLKSEL(1), 0, 3, 0, rclk_table,
+ &m10v_crglock, NULL);
+ m10v_clk_data->hws[M10V_RCLK_ID] = hw;
+
+ m10v_clk_data->num = M10V_NUM_CLKS;
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, m10v_clk_data);
+}
+CLK_OF_DECLARE_DRIVER(m10v_cc, "socionext,milbeaut-m10v-ccu", m10v_cc_init);
diff --git a/drivers/clk/clk-multiplier.c b/drivers/clk/clk-multiplier.c
index 3c86f859c199..e507aa958da9 100644
--- a/drivers/clk/clk-multiplier.c
+++ b/drivers/clk/clk-multiplier.c
@@ -7,10 +7,27 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/export.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/slab.h>
+static inline u32 clk_mult_readl(struct clk_multiplier *mult)
+{
+ if (mult->flags & CLK_MULTIPLIER_BIG_ENDIAN)
+ return ioread32be(mult->reg);
+
+ return readl(mult->reg);
+}
+
+static inline void clk_mult_writel(struct clk_multiplier *mult, u32 val)
+{
+ if (mult->flags & CLK_MULTIPLIER_BIG_ENDIAN)
+ iowrite32be(val, mult->reg);
+ else
+ writel(val, mult->reg);
+}
+
static unsigned long __get_mult(struct clk_multiplier *mult,
unsigned long rate,
unsigned long parent_rate)
@@ -27,7 +44,7 @@ static unsigned long clk_multiplier_recalc_rate(struct clk_hw *hw,
struct clk_multiplier *mult = to_clk_multiplier(hw);
unsigned long val;
- val = clk_readl(mult->reg) >> mult->shift;
+ val = clk_mult_readl(mult) >> mult->shift;
val &= GENMASK(mult->width - 1, 0);
if (!val && mult->flags & CLK_MULTIPLIER_ZERO_BYPASS)
@@ -118,10 +135,10 @@ static int clk_multiplier_set_rate(struct clk_hw *hw, unsigned long rate,
else
__acquire(mult->lock);
- val = clk_readl(mult->reg);
+ val = clk_mult_readl(mult);
val &= ~GENMASK(mult->width + mult->shift - 1, mult->shift);
val |= factor << mult->shift;
- clk_writel(val, mult->reg);
+ clk_mult_writel(mult, val);
if (mult->lock)
spin_unlock_irqrestore(mult->lock, flags);
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 2ad2df2e8909..66e91f740508 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -23,6 +23,22 @@
* parent - parent is adjustable through clk_set_parent
*/
+static inline u32 clk_mux_readl(struct clk_mux *mux)
+{
+ if (mux->flags & CLK_MUX_BIG_ENDIAN)
+ return ioread32be(mux->reg);
+
+ return readl(mux->reg);
+}
+
+static inline void clk_mux_writel(struct clk_mux *mux, u32 val)
+{
+ if (mux->flags & CLK_MUX_BIG_ENDIAN)
+ iowrite32be(val, mux->reg);
+ else
+ writel(val, mux->reg);
+}
+
int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags,
unsigned int val)
{
@@ -73,7 +89,7 @@ static u8 clk_mux_get_parent(struct clk_hw *hw)
struct clk_mux *mux = to_clk_mux(hw);
u32 val;
- val = clk_readl(mux->reg) >> mux->shift;
+ val = clk_mux_readl(mux) >> mux->shift;
val &= mux->mask;
return clk_mux_val_to_index(hw, mux->table, mux->flags, val);
@@ -94,12 +110,12 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
if (mux->flags & CLK_MUX_HIWORD_MASK) {
reg = mux->mask << (mux->shift + 16);
} else {
- reg = clk_readl(mux->reg);
+ reg = clk_mux_readl(mux);
reg &= ~(mux->mask << mux->shift);
}
val = val << mux->shift;
reg |= val;
- clk_writel(reg, mux->reg);
+ clk_mux_writel(mux, reg);
if (mux->lock)
spin_unlock_irqrestore(mux->lock, flags);
@@ -159,7 +175,7 @@ struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
init.ops = &clk_mux_ro_ops;
else
init.ops = &clk_mux_ops;
- init.flags = flags | CLK_IS_BASIC;
+ init.flags = flags;
init.parent_names = parent_names;
init.num_parents = num_parents;
diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c
index 8cb9d117fdbf..02b472a1f9b0 100644
--- a/drivers/clk/clk-pwm.c
+++ b/drivers/clk/clk-pwm.c
@@ -101,7 +101,7 @@ static int clk_pwm_probe(struct platform_device *pdev)
init.name = clk_name;
init.ops = &clk_pwm_ops;
- init.flags = CLK_IS_BASIC;
+ init.flags = 0;
init.num_parents = 0;
clk_pwm->pwm = pwm;
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 1212a9be7e80..4739a47ec8bd 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -34,6 +34,7 @@
#define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */
#define CGB_PLL1 4
#define CGB_PLL2 5
+#define MAX_PLL_DIV 16
struct clockgen_pll_div {
struct clk *clk;
@@ -41,7 +42,7 @@ struct clockgen_pll_div {
};
struct clockgen_pll {
- struct clockgen_pll_div div[8];
+ struct clockgen_pll_div div[MAX_PLL_DIV];
};
#define CLKSEL_VALID 1
@@ -79,7 +80,7 @@ struct clockgen_chipinfo {
const struct clockgen_muxinfo *cmux_groups[2];
const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL];
void (*init_periph)(struct clockgen *cg);
- int cmux_to_group[NUM_CMUX]; /* -1 terminates if fewer than NUM_CMUX */
+ int cmux_to_group[NUM_CMUX + 1]; /* array should be -1 terminated */
u32 pll_mask; /* 1 << n bit set if PLL n is valid */
u32 flags; /* CG_xxx */
};
@@ -245,6 +246,58 @@ static const struct clockgen_muxinfo clockgen2_cmux_cgb = {
},
};
+static const struct clockgen_muxinfo ls1028a_hwa1 = {
+ {
+ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
+ {},
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+ },
+};
+
+static const struct clockgen_muxinfo ls1028a_hwa2 = {
+ {
+ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
+ {},
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
+ },
+};
+
+static const struct clockgen_muxinfo ls1028a_hwa3 = {
+ {
+ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
+ {},
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+ },
+};
+
+static const struct clockgen_muxinfo ls1028a_hwa4 = {
+ {
+ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
+ {},
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
+ },
+};
+
static const struct clockgen_muxinfo ls1043a_hwa1 = {
{
{},
@@ -508,6 +561,21 @@ static const struct clockgen_chipinfo chipinfo[] = {
.pll_mask = 0x03,
},
{
+ .compat = "fsl,ls1028a-clockgen",
+ .cmux_groups = {
+ &clockgen2_cmux_cga12
+ },
+ .hwaccel = {
+ &ls1028a_hwa1, &ls1028a_hwa2,
+ &ls1028a_hwa3, &ls1028a_hwa4
+ },
+ .cmux_to_group = {
+ 0, 0, 0, 0, -1
+ },
+ .pll_mask = 0x07,
+ .flags = CG_VER3 | CG_LITTLE_ENDIAN,
+ },
+ {
.compat = "fsl,ls1043a-clockgen",
.init_periph = t2080_init_periph,
.cmux_groups = {
@@ -601,7 +669,7 @@ static const struct clockgen_chipinfo chipinfo[] = {
&p4080_cmux_grp1, &p4080_cmux_grp2
},
.cmux_to_group = {
- 0, 0, 0, 0, 1, 1, 1, 1
+ 0, 0, 0, 0, 1, 1, 1, 1, -1
},
.pll_mask = 0x1f,
},
@@ -1128,7 +1196,7 @@ static void __init create_one_pll(struct clockgen *cg, int idx)
int ret;
/*
- * For platform PLL, there are 8 divider clocks.
+ * For platform PLL, there are MAX_PLL_DIV divider clocks.
* For core PLL, there are 4 divider clocks at most.
*/
if (idx != PLATFORM_PLL && i >= 4)
@@ -1423,6 +1491,7 @@ CLK_OF_DECLARE(qoriq_clockgen_b4420, "fsl,b4420-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_b4860, "fsl,b4860-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init);
+CLK_OF_DECLARE(qoriq_clockgen_ls1028a, "fsl,ls1028a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1088a, "fsl,ls1088a-clockgen", clockgen_init);
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index cdaa567c8042..fdac33a9be2f 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -300,6 +300,85 @@ static const struct stm32f4_gate_data stm32f746_gates[] __initconst = {
{ STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" },
};
+static const struct stm32f4_gate_data stm32f769_gates[] __initconst = {
+ { STM32F4_RCC_AHB1ENR, 0, "gpioa", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 1, "gpiob", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 2, "gpioc", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 3, "gpiod", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 4, "gpioe", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 5, "gpiof", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 6, "gpiog", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 7, "gpioh", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 8, "gpioi", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 9, "gpioj", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 10, "gpiok", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 12, "crc", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 18, "bkpsra", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 20, "dtcmram", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 21, "dma1", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 22, "dma2", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 23, "dma2d", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 25, "ethmac", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 26, "ethmactx", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 27, "ethmacrx", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 28, "ethmacptp", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 29, "otghs", "ahb_div" },
+ { STM32F4_RCC_AHB1ENR, 30, "otghsulpi", "ahb_div" },
+
+ { STM32F4_RCC_AHB2ENR, 0, "dcmi", "ahb_div" },
+ { STM32F4_RCC_AHB2ENR, 1, "jpeg", "ahb_div" },
+ { STM32F4_RCC_AHB2ENR, 4, "cryp", "ahb_div" },
+ { STM32F4_RCC_AHB2ENR, 5, "hash", "ahb_div" },
+ { STM32F4_RCC_AHB2ENR, 6, "rng", "pll48" },
+ { STM32F4_RCC_AHB2ENR, 7, "otgfs", "pll48" },
+
+ { STM32F4_RCC_AHB3ENR, 0, "fmc", "ahb_div",
+ CLK_IGNORE_UNUSED },
+ { STM32F4_RCC_AHB3ENR, 1, "qspi", "ahb_div",
+ CLK_IGNORE_UNUSED },
+
+ { STM32F4_RCC_APB1ENR, 0, "tim2", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 1, "tim3", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 2, "tim4", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 3, "tim5", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 4, "tim6", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 5, "tim7", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 6, "tim12", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 7, "tim13", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 8, "tim14", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 10, "rtcapb", "apb1_mul" },
+ { STM32F4_RCC_APB1ENR, 11, "wwdg", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 13, "can3", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 14, "spi2", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 15, "spi3", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 16, "spdifrx", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 25, "can1", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 26, "can2", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 27, "cec", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 28, "pwr", "apb1_div" },
+ { STM32F4_RCC_APB1ENR, 29, "dac", "apb1_div" },
+
+ { STM32F4_RCC_APB2ENR, 0, "tim1", "apb2_mul" },
+ { STM32F4_RCC_APB2ENR, 1, "tim8", "apb2_mul" },
+ { STM32F4_RCC_APB2ENR, 7, "sdmmc2", "sdmux2" },
+ { STM32F4_RCC_APB2ENR, 8, "adc1", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 9, "adc2", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 10, "adc3", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 11, "sdmmc1", "sdmux1" },
+ { STM32F4_RCC_APB2ENR, 12, "spi1", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 13, "spi4", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 14, "syscfg", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 16, "tim9", "apb2_mul" },
+ { STM32F4_RCC_APB2ENR, 17, "tim10", "apb2_mul" },
+ { STM32F4_RCC_APB2ENR, 18, "tim11", "apb2_mul" },
+ { STM32F4_RCC_APB2ENR, 20, "spi5", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 23, "sai2", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" },
+ { STM32F4_RCC_APB2ENR, 30, "mdio", "apb2_div" },
+};
+
/*
* This bitmask tells us which bit offsets (0..192) on STM32F4[23]xxx
* have gate bits associated with them. Its combined hweight is 71.
@@ -318,6 +397,10 @@ static const u64 stm32f746_gate_map[MAX_GATE_MAP] = { 0x000000f17ef417ffull,
0x0000000000000003ull,
0x04f77f833e01c9ffull };
+static const u64 stm32f769_gate_map[MAX_GATE_MAP] = { 0x000000f37ef417ffull,
+ 0x0000000000000003ull,
+ 0x44F77F833E01EDFFull };
+
static const u64 *stm32f4_gate_map;
static struct clk_hw **clks;
@@ -1048,6 +1131,10 @@ static const char *rtc_parents[4] = {
"no-clock", "lse", "lsi", "hse-rtc"
};
+static const char *pll_src = "pll-src";
+
+static const char *pllsrc_parent[2] = { "hsi", NULL };
+
static const char *dsi_parent[2] = { NULL, "pll-r" };
static const char *lcd_parent[1] = { "pllsai-r-div" };
@@ -1072,6 +1159,9 @@ static const char *uart_parents2[4] = { "apb1_div", "sys", "hsi", "lse" };
static const char *i2c_parents[4] = { "apb1_div", "sys", "hsi", "no-clock" };
+static const char * const dfsdm1_src[] = { "apb2_div", "sys" };
+static const char * const adsfdm1_parent[] = { "sai1_clk", "sai2_clk" };
+
struct stm32_aux_clk {
int idx;
const char *name;
@@ -1313,6 +1403,177 @@ static const struct stm32_aux_clk stm32f746_aux_clk[] = {
},
};
+static const struct stm32_aux_clk stm32f769_aux_clk[] = {
+ {
+ CLK_LCD, "lcd-tft", lcd_parent, ARRAY_SIZE(lcd_parent),
+ NO_MUX, 0, 0,
+ STM32F4_RCC_APB2ENR, 26,
+ CLK_SET_RATE_PARENT
+ },
+ {
+ CLK_I2S, "i2s", i2s_parents, ARRAY_SIZE(i2s_parents),
+ STM32F4_RCC_CFGR, 23, 1,
+ NO_GATE, 0,
+ CLK_SET_RATE_PARENT
+ },
+ {
+ CLK_SAI1, "sai1_clk", sai_parents, ARRAY_SIZE(sai_parents),
+ STM32F4_RCC_DCKCFGR, 20, 3,
+ STM32F4_RCC_APB2ENR, 22,
+ CLK_SET_RATE_PARENT
+ },
+ {
+ CLK_SAI2, "sai2_clk", sai_parents, ARRAY_SIZE(sai_parents),
+ STM32F4_RCC_DCKCFGR, 22, 3,
+ STM32F4_RCC_APB2ENR, 23,
+ CLK_SET_RATE_PARENT
+ },
+ {
+ NO_IDX, "pll48", pll48_parents, ARRAY_SIZE(pll48_parents),
+ STM32F7_RCC_DCKCFGR2, 27, 1,
+ NO_GATE, 0,
+ 0
+ },
+ {
+ NO_IDX, "sdmux1", sdmux_parents, ARRAY_SIZE(sdmux_parents),
+ STM32F7_RCC_DCKCFGR2, 28, 1,
+ NO_GATE, 0,
+ 0
+ },
+ {
+ NO_IDX, "sdmux2", sdmux_parents, ARRAY_SIZE(sdmux_parents),
+ STM32F7_RCC_DCKCFGR2, 29, 1,
+ NO_GATE, 0,
+ 0
+ },
+ {
+ CLK_HDMI_CEC, "hdmi-cec",
+ hdmi_parents, ARRAY_SIZE(hdmi_parents),
+ STM32F7_RCC_DCKCFGR2, 26, 1,
+ NO_GATE, 0,
+ 0
+ },
+ {
+ CLK_SPDIF, "spdif-rx",
+ spdif_parent, ARRAY_SIZE(spdif_parent),
+ STM32F7_RCC_DCKCFGR2, 22, 3,
+ STM32F4_RCC_APB2ENR, 23,
+ CLK_SET_RATE_PARENT
+ },
+ {
+ CLK_USART1, "usart1",
+ uart_parents1, ARRAY_SIZE(uart_parents1),
+ STM32F7_RCC_DCKCFGR2, 0, 3,
+ STM32F4_RCC_APB2ENR, 4,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_USART2, "usart2",
+ uart_parents2, ARRAY_SIZE(uart_parents1),
+ STM32F7_RCC_DCKCFGR2, 2, 3,
+ STM32F4_RCC_APB1ENR, 17,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_USART3, "usart3",
+ uart_parents2, ARRAY_SIZE(uart_parents1),
+ STM32F7_RCC_DCKCFGR2, 4, 3,
+ STM32F4_RCC_APB1ENR, 18,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_UART4, "uart4",
+ uart_parents2, ARRAY_SIZE(uart_parents1),
+ STM32F7_RCC_DCKCFGR2, 6, 3,
+ STM32F4_RCC_APB1ENR, 19,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_UART5, "uart5",
+ uart_parents2, ARRAY_SIZE(uart_parents1),
+ STM32F7_RCC_DCKCFGR2, 8, 3,
+ STM32F4_RCC_APB1ENR, 20,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_USART6, "usart6",
+ uart_parents1, ARRAY_SIZE(uart_parents1),
+ STM32F7_RCC_DCKCFGR2, 10, 3,
+ STM32F4_RCC_APB2ENR, 5,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_UART7, "uart7",
+ uart_parents2, ARRAY_SIZE(uart_parents1),
+ STM32F7_RCC_DCKCFGR2, 12, 3,
+ STM32F4_RCC_APB1ENR, 30,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_UART8, "uart8",
+ uart_parents2, ARRAY_SIZE(uart_parents1),
+ STM32F7_RCC_DCKCFGR2, 14, 3,
+ STM32F4_RCC_APB1ENR, 31,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_I2C1, "i2c1",
+ i2c_parents, ARRAY_SIZE(i2c_parents),
+ STM32F7_RCC_DCKCFGR2, 16, 3,
+ STM32F4_RCC_APB1ENR, 21,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_I2C2, "i2c2",
+ i2c_parents, ARRAY_SIZE(i2c_parents),
+ STM32F7_RCC_DCKCFGR2, 18, 3,
+ STM32F4_RCC_APB1ENR, 22,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_I2C3, "i2c3",
+ i2c_parents, ARRAY_SIZE(i2c_parents),
+ STM32F7_RCC_DCKCFGR2, 20, 3,
+ STM32F4_RCC_APB1ENR, 23,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_I2C4, "i2c4",
+ i2c_parents, ARRAY_SIZE(i2c_parents),
+ STM32F7_RCC_DCKCFGR2, 22, 3,
+ STM32F4_RCC_APB1ENR, 24,
+ CLK_SET_RATE_PARENT,
+ },
+ {
+ CLK_LPTIMER, "lptim1",
+ lptim_parent, ARRAY_SIZE(lptim_parent),
+ STM32F7_RCC_DCKCFGR2, 24, 3,
+ STM32F4_RCC_APB1ENR, 9,
+ CLK_SET_RATE_PARENT
+ },
+ {
+ CLK_F769_DSI, "dsi",
+ dsi_parent, ARRAY_SIZE(dsi_parent),
+ STM32F7_RCC_DCKCFGR2, 0, 1,
+ STM32F4_RCC_APB2ENR, 27,
+ CLK_SET_RATE_PARENT
+ },
+ {
+ CLK_DFSDM1, "dfsdm1",
+ dfsdm1_src, ARRAY_SIZE(dfsdm1_src),
+ STM32F4_RCC_DCKCFGR, 25, 1,
+ STM32F4_RCC_APB2ENR, 29,
+ CLK_SET_RATE_PARENT
+ },
+ {
+ CLK_ADFSDM1, "adfsdm1",
+ adsfdm1_parent, ARRAY_SIZE(adsfdm1_parent),
+ STM32F4_RCC_DCKCFGR, 26, 1,
+ STM32F4_RCC_APB2ENR, 29,
+ CLK_SET_RATE_PARENT
+ },
+};
+
static const struct stm32f4_clk_data stm32f429_clk_data = {
.end_primary = END_PRIMARY_CLK,
.gates_data = stm32f429_gates,
@@ -1343,6 +1604,16 @@ static const struct stm32f4_clk_data stm32f746_clk_data = {
.aux_clk_num = ARRAY_SIZE(stm32f746_aux_clk),
};
+static const struct stm32f4_clk_data stm32f769_clk_data = {
+ .end_primary = END_PRIMARY_CLK_F7,
+ .gates_data = stm32f769_gates,
+ .gates_map = stm32f769_gate_map,
+ .gates_num = ARRAY_SIZE(stm32f769_gates),
+ .pll_data = stm32f469_pll,
+ .aux_clk = stm32f769_aux_clk,
+ .aux_clk_num = ARRAY_SIZE(stm32f769_aux_clk),
+};
+
static const struct of_device_id stm32f4_of_match[] = {
{
.compatible = "st,stm32f42xx-rcc",
@@ -1356,6 +1627,10 @@ static const struct of_device_id stm32f4_of_match[] = {
.compatible = "st,stm32f746-rcc",
.data = &stm32f746_clk_data
},
+ {
+ .compatible = "st,stm32f769-rcc",
+ .data = &stm32f769_clk_data
+ },
{}
};
@@ -1427,9 +1702,8 @@ static void __init stm32f4_rcc_init(struct device_node *np)
int n;
const struct of_device_id *match;
const struct stm32f4_clk_data *data;
- unsigned long pllcfgr;
- const char *pllsrc;
unsigned long pllm;
+ struct clk_hw *pll_src_hw;
base = of_iomap(np, 0);
if (!base) {
@@ -1460,21 +1734,33 @@ static void __init stm32f4_rcc_init(struct device_node *np)
hse_clk = of_clk_get_parent_name(np, 0);
dsi_parent[0] = hse_clk;
+ pllsrc_parent[1] = hse_clk;
i2s_in_clk = of_clk_get_parent_name(np, 1);
i2s_parents[1] = i2s_in_clk;
sai_parents[2] = i2s_in_clk;
+ if (of_device_is_compatible(np, "st,stm32f769-rcc")) {
+ clk_hw_register_gate(NULL, "dfsdm1_apb", "apb2_div", 0,
+ base + STM32F4_RCC_APB2ENR, 29,
+ CLK_IGNORE_UNUSED, &stm32f4_clk_lock);
+ dsi_parent[0] = pll_src;
+ sai_parents[3] = pll_src;
+ }
+
clks[CLK_HSI] = clk_hw_register_fixed_rate_with_accuracy(NULL, "hsi",
NULL, 0, 16000000, 160000);
- pllcfgr = readl(base + STM32F4_RCC_PLLCFGR);
- pllsrc = pllcfgr & BIT(22) ? hse_clk : "hsi";
- pllm = pllcfgr & 0x3f;
+ pll_src_hw = clk_hw_register_mux(NULL, pll_src, pllsrc_parent,
+ ARRAY_SIZE(pllsrc_parent), 0,
+ base + STM32F4_RCC_PLLCFGR, 22, 1, 0,
+ &stm32f4_clk_lock);
+
+ pllm = readl(base + STM32F4_RCC_PLLCFGR) & 0x3f;
- clk_hw_register_fixed_factor(NULL, "vco_in", pllsrc,
- 0, 1, pllm);
+ clk_hw_register_fixed_factor(NULL, "vco_in", pll_src,
+ 0, 1, pllm);
stm32f4_rcc_register_pll("vco_in", &data->pll_data[0],
&stm32f4_clk_lock);
@@ -1612,12 +1898,16 @@ static void __init stm32f4_rcc_init(struct device_node *np)
clks[aux_clk->idx] = hw;
}
- if (of_device_is_compatible(np, "st,stm32f746-rcc"))
+ if (of_device_is_compatible(np, "st,stm32f746-rcc")) {
clk_hw_register_fixed_factor(NULL, "hsi_div488", "hsi", 0,
1, 488);
+ clks[CLK_PLL_SRC] = pll_src_hw;
+ }
+
of_clk_add_hw_provider(np, stm32f4_rcc_lookup_clk, NULL);
+
return;
fail:
kfree(clks);
@@ -1626,3 +1916,4 @@ fail:
CLK_OF_DECLARE_DRIVER(stm32f42xx_rcc, "st,stm32f42xx-rcc", stm32f4_rcc_init);
CLK_OF_DECLARE_DRIVER(stm32f46xx_rcc, "st,stm32f469-rcc", stm32f4_rcc_init);
CLK_OF_DECLARE_DRIVER(stm32f746_rcc, "st,stm32f746-rcc", stm32f4_rcc_init);
+CLK_OF_DECLARE_DRIVER(stm32f769_rcc, "st,stm32f769-rcc", stm32f4_rcc_init);
diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/clk-stm32mp1.c
index a0ae8dc16909..a875649df8b8 100644
--- a/drivers/clk/clk-stm32mp1.c
+++ b/drivers/clk/clk-stm32mp1.c
@@ -1402,6 +1402,7 @@ enum {
G_CRYP1,
G_HASH1,
G_BKPSRAM,
+ G_DDRPERFM,
G_LAST
};
@@ -1488,6 +1489,7 @@ static struct stm32_gate_cfg per_gate_cfg[G_LAST] = {
K_GATE(G_STGENRO, RCC_APB4ENSETR, 20, 0),
K_MGATE(G_USBPHY, RCC_APB4ENSETR, 16, 0),
K_GATE(G_IWDG2, RCC_APB4ENSETR, 15, 0),
+ K_GATE(G_DDRPERFM, RCC_APB4ENSETR, 8, 0),
K_MGATE(G_DSI, RCC_APB4ENSETR, 4, 0),
K_MGATE(G_LTDC, RCC_APB4ENSETR, 0, 0),
@@ -1899,6 +1901,7 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
PCLK(CRC1, "crc1", "ck_axi", 0, G_CRC1),
PCLK(USBH, "usbh", "ck_axi", 0, G_USBH),
PCLK(ETHSTP, "ethstp", "ck_axi", 0, G_ETHSTP),
+ PCLK(DDRPERFM, "ddrperfm", "pclk4", 0, G_DDRPERFM),
/* Kernel clocks */
KCLK(SDMMC1_K, "sdmmc1_k", sdmmc12_src, 0, G_SDMMC1, M_SDMMC12),
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 531b030d4d4e..d975465fe2a8 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -262,7 +262,7 @@ static unsigned long xgene_clk_pmd_recalc_rate(struct clk_hw *hw,
else
__acquire(fd->lock);
- val = clk_readl(fd->reg);
+ val = readl(fd->reg);
if (fd->lock)
spin_unlock_irqrestore(fd->lock, flags);
@@ -333,10 +333,10 @@ static int xgene_clk_pmd_set_rate(struct clk_hw *hw, unsigned long rate,
else
__acquire(fd->lock);
- val = clk_readl(fd->reg);
+ val = readl(fd->reg);
val &= ~fd->mask;
val |= (scale << fd->shift);
- clk_writel(val, fd->reg);
+ writel(val, fd->reg);
if (fd->lock)
spin_unlock_irqrestore(fd->lock, flags);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 96053a96fe2f..aa51756fd4d6 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -39,15 +39,23 @@ static LIST_HEAD(clk_notifier_list);
/*** private data structures ***/
+struct clk_parent_map {
+ const struct clk_hw *hw;
+ struct clk_core *core;
+ const char *fw_name;
+ const char *name;
+ int index;
+};
+
struct clk_core {
const char *name;
const struct clk_ops *ops;
struct clk_hw *hw;
struct module *owner;
struct device *dev;
+ struct device_node *of_node;
struct clk_core *parent;
- const char **parent_names;
- struct clk_core **parents;
+ struct clk_parent_map *parents;
u8 num_parents;
u8 new_parent_index;
unsigned long rate;
@@ -316,17 +324,102 @@ static struct clk_core *clk_core_lookup(const char *name)
return NULL;
}
+/**
+ * clk_core_get - Find the clk_core parent of a clk
+ * @core: clk to find parent of
+ * @p_index: parent index to search for
+ *
+ * This is the preferred method for clk providers to find the parent of a
+ * clk when that parent is external to the clk controller. The parent_names
+ * array is indexed and treated as a local name matching a string in the device
+ * node's 'clock-names' property or as the 'con_id' matching the device's
+ * dev_name() in a clk_lookup. This allows clk providers to use their own
+ * namespace instead of looking for a globally unique parent string.
+ *
+ * For example the following DT snippet would allow a clock registered by the
+ * clock-controller@c001 that has a clk_init_data::parent_data array
+ * with 'xtal' in the 'name' member to find the clock provided by the
+ * clock-controller@f00abcd without needing to get the globally unique name of
+ * the xtal clk.
+ *
+ * parent: clock-controller@f00abcd {
+ * reg = <0xf00abcd 0xabcd>;
+ * #clock-cells = <0>;
+ * };
+ *
+ * clock-controller@c001 {
+ * reg = <0xc001 0xf00d>;
+ * clocks = <&parent>;
+ * clock-names = "xtal";
+ * #clock-cells = <1>;
+ * };
+ *
+ * Returns: -ENOENT when the provider can't be found or the clk doesn't
+ * exist in the provider. -EINVAL when the name can't be found. NULL when the
+ * provider knows about the clk but it isn't provided on this system.
+ * A valid clk_core pointer when the clk can be found in the provider.
+ */
+static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
+{
+ const char *name = core->parents[p_index].fw_name;
+ int index = core->parents[p_index].index;
+ struct clk_hw *hw = ERR_PTR(-ENOENT);
+ struct device *dev = core->dev;
+ const char *dev_id = dev ? dev_name(dev) : NULL;
+ struct device_node *np = core->of_node;
+
+ if (np && index >= 0)
+ hw = of_clk_get_hw(np, index, name);
+
+ /*
+ * If the DT search above couldn't find the provider or the provider
+ * didn't know about this clk, fallback to looking up via clkdev based
+ * clk_lookups
+ */
+ if (PTR_ERR(hw) == -ENOENT && name)
+ hw = clk_find_hw(dev_id, name);
+
+ if (IS_ERR(hw))
+ return ERR_CAST(hw);
+
+ return hw->core;
+}
+
+static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
+{
+ struct clk_parent_map *entry = &core->parents[index];
+ struct clk_core *parent = ERR_PTR(-ENOENT);
+
+ if (entry->hw) {
+ parent = entry->hw->core;
+ /*
+ * We have a direct reference but it isn't registered yet?
+ * Orphan it and let clk_reparent() update the orphan status
+ * when the parent is registered.
+ */
+ if (!parent)
+ parent = ERR_PTR(-EPROBE_DEFER);
+ } else {
+ parent = clk_core_get(core, index);
+ if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT)
+ parent = clk_core_lookup(entry->name);
+ }
+
+ /* Only cache it if it's not an error */
+ if (!IS_ERR(parent))
+ entry->core = parent;
+}
+
static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
u8 index)
{
- if (!core || index >= core->num_parents)
+ if (!core || index >= core->num_parents || !core->parents)
return NULL;
- if (!core->parents[index])
- core->parents[index] =
- clk_core_lookup(core->parent_names[index]);
+ if (!core->parents[index].core)
+ clk_core_fill_parent_index(core, index);
- return core->parents[index];
+ return core->parents[index].core;
}
struct clk_hw *
@@ -347,23 +440,18 @@ unsigned int __clk_get_enable_count(struct clk *clk)
static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
{
- unsigned long ret;
-
- if (!core) {
- ret = 0;
- goto out;
- }
-
- ret = core->rate;
-
- if (!core->num_parents)
- goto out;
+ if (!core)
+ return 0;
- if (!core->parent)
- ret = 0;
+ if (!core->num_parents || core->parent)
+ return core->rate;
-out:
- return ret;
+ /*
+ * Clk must have a parent because num_parents > 0 but the parent isn't
+ * known yet. Best to return 0 as the rate of this clk until we can
+ * properly recalc the rate based on the parent's rate.
+ */
+ return 0;
}
unsigned long clk_hw_get_rate(const struct clk_hw *hw)
@@ -524,9 +612,15 @@ void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
/*
+ * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk
+ * @hw: mux type clk to determine rate on
+ * @req: rate request, also used to return preferred parent and frequencies
+ *
* Helper for finding best parent to provide a given frequency. This can be used
* directly as a determine_rate callback (e.g. for a mux), or from a more
* complex clock that may combine a mux with other operations.
+ *
+ * Returns: 0 on success, -EERROR value on error
*/
int __clk_mux_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
@@ -1519,20 +1613,37 @@ static int clk_fetch_parent_index(struct clk_core *core,
return -EINVAL;
for (i = 0; i < core->num_parents; i++) {
- if (core->parents[i] == parent)
+ /* Found it first try! */
+ if (core->parents[i].core == parent)
return i;
- if (core->parents[i])
+ /* Something else is here, so keep looking */
+ if (core->parents[i].core)
continue;
- /* Fallback to comparing globally unique names */
- if (!strcmp(parent->name, core->parent_names[i])) {
- core->parents[i] = parent;
- return i;
+ /* Maybe core hasn't been cached but the hw is all we know? */
+ if (core->parents[i].hw) {
+ if (core->parents[i].hw == parent->hw)
+ break;
+
+ /* Didn't match, but we're expecting a clk_hw */
+ continue;
}
+
+ /* Maybe it hasn't been cached (clk_set_parent() path) */
+ if (parent == clk_core_get(core, i))
+ break;
+
+ /* Fallback to comparing globally unique names */
+ if (!strcmp(parent->name, core->parents[i].name))
+ break;
}
- return -EINVAL;
+ if (i == core->num_parents)
+ return -EINVAL;
+
+ core->parents[i].core = parent;
+ return i;
}
/*
@@ -2293,6 +2404,7 @@ void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
bool clk_has_parent(struct clk *clk, struct clk *parent)
{
struct clk_core *core, *parent_core;
+ int i;
/* NULL clocks should be nops, so return success if either is NULL. */
if (!clk || !parent)
@@ -2305,8 +2417,11 @@ bool clk_has_parent(struct clk *clk, struct clk *parent)
if (core->parent == parent_core)
return true;
- return match_string(core->parent_names, core->num_parents,
- parent_core->name) >= 0;
+ for (i = 0; i < core->num_parents; i++)
+ if (!strcmp(core->parents[i].name, parent_core->name))
+ return true;
+
+ return false;
}
EXPORT_SYMBOL_GPL(clk_has_parent);
@@ -2850,7 +2965,6 @@ static const struct {
ENTRY(CLK_SET_PARENT_GATE),
ENTRY(CLK_SET_RATE_PARENT),
ENTRY(CLK_IGNORE_UNUSED),
- ENTRY(CLK_IS_BASIC),
ENTRY(CLK_GET_RATE_NOCACHE),
ENTRY(CLK_SET_RATE_NO_REPARENT),
ENTRY(CLK_GET_ACCURACY_NOCACHE),
@@ -2889,9 +3003,9 @@ static int possible_parents_show(struct seq_file *s, void *data)
int i;
for (i = 0; i < core->num_parents - 1; i++)
- seq_printf(s, "%s ", core->parent_names[i]);
+ seq_printf(s, "%s ", core->parents[i].name);
- seq_printf(s, "%s\n", core->parent_names[i]);
+ seq_printf(s, "%s\n", core->parents[i].name);
return 0;
}
@@ -3025,7 +3139,7 @@ static inline void clk_debug_unregister(struct clk_core *core)
*/
static int __clk_core_init(struct clk_core *core)
{
- int i, ret;
+ int ret;
struct clk_core *orphan;
struct hlist_node *tmp2;
unsigned long rate;
@@ -3079,12 +3193,6 @@ static int __clk_core_init(struct clk_core *core)
goto out;
}
- /* throw a WARN if any entries in parent_names are NULL */
- for (i = 0; i < core->num_parents; i++)
- WARN(!core->parent_names[i],
- "%s: invalid NULL in %s's .parent_names\n",
- __func__, core->name);
-
core->parent = __clk_init_parent(core);
/*
@@ -3313,20 +3421,104 @@ struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
return clk;
}
-/**
- * clk_register - allocate a new clock, register it and return an opaque cookie
- * @dev: device that is registering this clock
- * @hw: link to hardware-specific clock data
- *
- * clk_register is the primary interface for populating the clock tree with new
- * clock nodes. It returns a pointer to the newly allocated struct clk which
- * cannot be dereferenced by driver code but may be used in conjunction with the
- * rest of the clock API. In the event of an error clk_register will return an
- * error code; drivers must test for an error code after calling clk_register.
- */
-struct clk *clk_register(struct device *dev, struct clk_hw *hw)
+static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist)
+{
+ const char *dst;
+
+ if (!src) {
+ if (must_exist)
+ return -EINVAL;
+ return 0;
+ }
+
+ *dst_p = dst = kstrdup_const(src, GFP_KERNEL);
+ if (!dst)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int clk_core_populate_parent_map(struct clk_core *core)
+{
+ const struct clk_init_data *init = core->hw->init;
+ u8 num_parents = init->num_parents;
+ const char * const *parent_names = init->parent_names;
+ const struct clk_hw **parent_hws = init->parent_hws;
+ const struct clk_parent_data *parent_data = init->parent_data;
+ int i, ret = 0;
+ struct clk_parent_map *parents, *parent;
+
+ if (!num_parents)
+ return 0;
+
+ /*
+ * Avoid unnecessary string look-ups of clk_core's possible parents by
+ * having a cache of names/clk_hw pointers to clk_core pointers.
+ */
+ parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL);
+ core->parents = parents;
+ if (!parents)
+ return -ENOMEM;
+
+ /* Copy everything over because it might be __initdata */
+ for (i = 0, parent = parents; i < num_parents; i++, parent++) {
+ parent->index = -1;
+ if (parent_names) {
+ /* throw a WARN if any entries are NULL */
+ WARN(!parent_names[i],
+ "%s: invalid NULL in %s's .parent_names\n",
+ __func__, core->name);
+ ret = clk_cpy_name(&parent->name, parent_names[i],
+ true);
+ } else if (parent_data) {
+ parent->hw = parent_data[i].hw;
+ parent->index = parent_data[i].index;
+ ret = clk_cpy_name(&parent->fw_name,
+ parent_data[i].fw_name, false);
+ if (!ret)
+ ret = clk_cpy_name(&parent->name,
+ parent_data[i].name,
+ false);
+ } else if (parent_hws) {
+ parent->hw = parent_hws[i];
+ } else {
+ ret = -EINVAL;
+ WARN(1, "Must specify parents if num_parents > 0\n");
+ }
+
+ if (ret) {
+ do {
+ kfree_const(parents[i].name);
+ kfree_const(parents[i].fw_name);
+ } while (--i >= 0);
+ kfree(parents);
+
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void clk_core_free_parent_map(struct clk_core *core)
+{
+ int i = core->num_parents;
+
+ if (!core->num_parents)
+ return;
+
+ while (--i >= 0) {
+ kfree_const(core->parents[i].name);
+ kfree_const(core->parents[i].fw_name);
+ }
+
+ kfree(core->parents);
+}
+
+static struct clk *
+__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
{
- int i, ret;
+ int ret;
struct clk_core *core;
core = kzalloc(sizeof(*core), GFP_KERNEL);
@@ -3350,6 +3542,7 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
if (dev && pm_runtime_enabled(dev))
core->rpm_enabled = true;
core->dev = dev;
+ core->of_node = np;
if (dev && dev->driver)
core->owner = dev->driver->owner;
core->hw = hw;
@@ -3359,33 +3552,9 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
core->max_rate = ULONG_MAX;
hw->core = core;
- /* allocate local copy in case parent_names is __initdata */
- core->parent_names = kcalloc(core->num_parents, sizeof(char *),
- GFP_KERNEL);
-
- if (!core->parent_names) {
- ret = -ENOMEM;
- goto fail_parent_names;
- }
-
-
- /* copy each string name in case parent_names is __initdata */
- for (i = 0; i < core->num_parents; i++) {
- core->parent_names[i] = kstrdup_const(hw->init->parent_names[i],
- GFP_KERNEL);
- if (!core->parent_names[i]) {
- ret = -ENOMEM;
- goto fail_parent_names_copy;
- }
- }
-
- /* avoid unnecessary string look-ups of clk_core's possible parents. */
- core->parents = kcalloc(core->num_parents, sizeof(*core->parents),
- GFP_KERNEL);
- if (!core->parents) {
- ret = -ENOMEM;
+ ret = clk_core_populate_parent_map(core);
+ if (ret)
goto fail_parents;
- };
INIT_HLIST_HEAD(&core->clks);
@@ -3396,7 +3565,7 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
hw->clk = alloc_clk(core, NULL, NULL);
if (IS_ERR(hw->clk)) {
ret = PTR_ERR(hw->clk);
- goto fail_parents;
+ goto fail_create_clk;
}
clk_core_link_consumer(hw->core, hw->clk);
@@ -3412,13 +3581,9 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
free_clk(hw->clk);
hw->clk = NULL;
+fail_create_clk:
+ clk_core_free_parent_map(core);
fail_parents:
- kfree(core->parents);
-fail_parent_names_copy:
- while (--i >= 0)
- kfree_const(core->parent_names[i]);
- kfree(core->parent_names);
-fail_parent_names:
fail_ops:
kfree_const(core->name);
fail_name:
@@ -3426,6 +3591,24 @@ fail_name:
fail_out:
return ERR_PTR(ret);
}
+
+/**
+ * clk_register - allocate a new clock, register it and return an opaque cookie
+ * @dev: device that is registering this clock
+ * @hw: link to hardware-specific clock data
+ *
+ * clk_register is the *deprecated* interface for populating the clock tree with
+ * new clock nodes. Use clk_hw_register() instead.
+ *
+ * Returns: a pointer to the newly allocated struct clk which
+ * cannot be dereferenced by driver code but may be used in conjunction with the
+ * rest of the clock API. In the event of an error clk_register will return an
+ * error code; drivers must test for an error code after calling clk_register.
+ */
+struct clk *clk_register(struct device *dev, struct clk_hw *hw)
+{
+ return __clk_register(dev, dev_of_node(dev), hw);
+}
EXPORT_SYMBOL_GPL(clk_register);
/**
@@ -3440,23 +3623,35 @@ EXPORT_SYMBOL_GPL(clk_register);
*/
int clk_hw_register(struct device *dev, struct clk_hw *hw)
{
- return PTR_ERR_OR_ZERO(clk_register(dev, hw));
+ return PTR_ERR_OR_ZERO(__clk_register(dev, dev_of_node(dev), hw));
}
EXPORT_SYMBOL_GPL(clk_hw_register);
+/*
+ * of_clk_hw_register - register a clk_hw and return an error code
+ * @node: device_node of device that is registering this clock
+ * @hw: link to hardware-specific clock data
+ *
+ * of_clk_hw_register() is the primary interface for populating the clock tree
+ * with new clock nodes when a struct device is not available, but a struct
+ * device_node is. It returns an integer equal to zero indicating success or
+ * less than zero indicating failure. Drivers must test for an error code after
+ * calling of_clk_hw_register().
+ */
+int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
+{
+ return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw));
+}
+EXPORT_SYMBOL_GPL(of_clk_hw_register);
+
/* Free memory allocated for a clock. */
static void __clk_release(struct kref *ref)
{
struct clk_core *core = container_of(ref, struct clk_core, ref);
- int i = core->num_parents;
lockdep_assert_held(&prepare_lock);
- kfree(core->parents);
- while (--i >= 0)
- kfree_const(core->parent_names[i]);
-
- kfree(core->parent_names);
+ clk_core_free_parent_map(core);
kfree_const(core->name);
kfree(core);
}
@@ -3575,9 +3770,10 @@ static void devm_clk_hw_release(struct device *dev, void *res)
* @dev: device that is registering this clock
* @hw: link to hardware-specific clock data
*
- * Managed clk_register(). Clocks returned from this function are
- * automatically clk_unregister()ed on driver detach. See clk_register() for
- * more information.
+ * Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead.
+ *
+ * Clocks returned from this function are automatically clk_unregister()ed on
+ * driver detach. See clk_register() for more information.
*/
struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
{
@@ -3895,6 +4091,8 @@ EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
* @np: Device node pointer associated with clock provider
* @clk_src_get: callback for decoding clock
* @data: context pointer for @clk_src_get callback.
+ *
+ * This function is *deprecated*. Use of_clk_add_hw_provider() instead.
*/
int of_clk_add_provider(struct device_node *np,
struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h
index 553f531cc232..d8400d623b34 100644
--- a/drivers/clk/clk.h
+++ b/drivers/clk/clk.h
@@ -19,6 +19,8 @@ static inline struct clk_hw *of_clk_get_hw(struct device_node *np,
}
#endif
+struct clk_hw *clk_find_hw(const char *dev_id, const char *con_id);
+
#ifdef CONFIG_COMMON_CLK
struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
const char *dev_id, const char *con_id);
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 8c4435c53f09..2afc8df8acff 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -46,6 +46,8 @@ static struct clk_lookup *clk_find(const char *dev_id, const char *con_id)
if (con_id)
best_possible += 1;
+ lockdep_assert_held(&clocks_mutex);
+
list_for_each_entry(p, &clocks, node) {
match = 0;
if (p->dev_id) {
@@ -70,25 +72,26 @@ static struct clk_lookup *clk_find(const char *dev_id, const char *con_id)
return cl;
}
-static struct clk *__clk_get_sys(struct device *dev, const char *dev_id,
- const char *con_id)
+struct clk_hw *clk_find_hw(const char *dev_id, const char *con_id)
{
struct clk_lookup *cl;
- struct clk *clk = NULL;
+ struct clk_hw *hw = ERR_PTR(-ENOENT);
mutex_lock(&clocks_mutex);
-
cl = clk_find(dev_id, con_id);
- if (!cl)
- goto out;
-
- clk = clk_hw_create_clk(dev, cl->clk_hw, dev_id, con_id);
- if (IS_ERR(clk))
- cl = NULL;
-out:
+ if (cl)
+ hw = cl->clk_hw;
mutex_unlock(&clocks_mutex);
- return cl ? clk : ERR_PTR(-ENOENT);
+ return hw;
+}
+
+static struct clk *__clk_get_sys(struct device *dev, const char *dev_id,
+ const char *con_id)
+{
+ struct clk_hw *hw = clk_find_hw(dev_id, con_id);
+
+ return clk_hw_create_clk(dev, hw, dev_id, con_id);
}
struct clk *clk_get_sys(const char *dev_id, const char *con_id)
@@ -402,7 +405,10 @@ void devm_clk_release_clkdev(struct device *dev, const char *con_id,
struct clk_lookup *cl;
int rval;
+ mutex_lock(&clocks_mutex);
cl = clk_find(dev_id, con_id);
+ mutex_unlock(&clocks_mutex);
+
WARN_ON(!cl);
rval = devres_release(dev, devm_clkdev_release,
devm_clk_match_clkdev, cl);
diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c
index d1bbee19ed0f..bdc52364b421 100644
--- a/drivers/clk/davinci/da8xx-cfgchip.c
+++ b/drivers/clk/davinci/da8xx-cfgchip.c
@@ -160,10 +160,8 @@ static int __init da8xx_cfgchip_register_div4p5(struct device *dev,
struct da8xx_cfgchip_gate_clk *gate;
gate = da8xx_cfgchip_gate_clk_register(dev, &da8xx_div4p5ena_info, regmap);
- if (IS_ERR(gate))
- return PTR_ERR(gate);
- return 0;
+ return PTR_ERR_OR_ZERO(gate);
}
static int __init
diff --git a/drivers/clk/davinci/pll-da850.c b/drivers/clk/davinci/pll-da850.c
index 0f7198191ea2..bf120bec59ae 100644
--- a/drivers/clk/davinci/pll-da850.c
+++ b/drivers/clk/davinci/pll-da850.c
@@ -11,6 +11,7 @@
#include <linux/clkdev.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mfd/da8xx-cfgchip.h>
#include <linux/mfd/syscon.h>
diff --git a/drivers/clk/davinci/pll.h b/drivers/clk/davinci/pll.h
index 7cc354dd29e2..c2a453caa131 100644
--- a/drivers/clk/davinci/pll.h
+++ b/drivers/clk/davinci/pll.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Clock driver for TI Davinci PSC controllers
*
diff --git a/drivers/clk/davinci/psc.h b/drivers/clk/davinci/psc.h
index cc5614567a70..69070f834391 100644
--- a/drivers/clk/davinci/psc.h
+++ b/drivers/clk/davinci/psc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Clock driver for TI Davinci PSC controllers
*
diff --git a/drivers/clk/h8300/clk-div.c b/drivers/clk/h8300/clk-div.c
index d413ade95c99..376be03bb546 100644
--- a/drivers/clk/h8300/clk-div.c
+++ b/drivers/clk/h8300/clk-div.c
@@ -7,6 +7,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/h8300/clk-h8s2678.c b/drivers/clk/h8300/clk-h8s2678.c
index c7ae653c8a16..67c495b67c18 100644
--- a/drivers/clk/h8300/clk-h8s2678.c
+++ b/drivers/clk/h8300/clk-h8s2678.c
@@ -6,8 +6,9 @@
*/
#include <linux/clk-provider.h>
-#include <linux/err.h>
#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/err.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/hisilicon/clk-hi3660-stub.c b/drivers/clk/hisilicon/clk-hi3660-stub.c
index e8b2c43b1bb8..89934bee7c9e 100644
--- a/drivers/clk/hisilicon/clk-hi3660-stub.c
+++ b/drivers/clk/hisilicon/clk-hi3660-stub.c
@@ -24,6 +24,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
diff --git a/drivers/clk/hisilicon/clk-hi3660.c b/drivers/clk/hisilicon/clk-hi3660.c
index f40419959656..794eeff0d5d2 100644
--- a/drivers/clk/hisilicon/clk-hi3660.c
+++ b/drivers/clk/hisilicon/clk-hi3660.c
@@ -163,8 +163,12 @@ static const struct hisi_gate_clock hi3660_crgctrl_gate_sep_clks[] = {
"clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 17, 0, },
{ HI3660_CLK_GATE_ISP_SNCLK2, "clk_gate_isp_snclk2",
"clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 18, 0, },
+ /*
+ * clk_gate_ufs_subsys is a system bus clock, mark it as critical
+ * clock and keep it on for system suspend and resume.
+ */
{ HI3660_CLK_GATE_UFS_SUBSYS, "clk_gate_ufs_subsys", "clk_div_sysbus",
- CLK_SET_RATE_PARENT, 0x50, 21, 0, },
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0x50, 21, 0, },
{ HI3660_PCLK_GATE_DSI0, "pclk_gate_dsi0", "clk_div_cfgbus",
CLK_SET_RATE_PARENT, 0x50, 28, 0, },
{ HI3660_PCLK_GATE_DSI1, "pclk_gate_dsi1", "clk_div_cfgbus",
diff --git a/drivers/clk/hisilicon/clk-hisi-phase.c b/drivers/clk/hisilicon/clk-hisi-phase.c
index 5fdc267bb2da..ba6afad66a2b 100644
--- a/drivers/clk/hisilicon/clk-hisi-phase.c
+++ b/drivers/clk/hisilicon/clk-hisi-phase.c
@@ -75,10 +75,10 @@ static int hisi_clk_set_phase(struct clk_hw *hw, int degrees)
spin_lock_irqsave(phase->lock, flags);
- val = clk_readl(phase->reg);
+ val = readl(phase->reg);
val &= ~phase->mask;
val |= regval << phase->shift;
- clk_writel(val, phase->reg);
+ writel(val, phase->reg);
spin_unlock_irqrestore(phase->lock, flags);
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index 0d5180fbe988..05641c64b317 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -35,7 +35,7 @@ obj-$(CONFIG_SOC_IMX25) += clk-imx25.o
obj-$(CONFIG_SOC_IMX27) += clk-imx27.o
obj-$(CONFIG_SOC_IMX31) += clk-imx31.o
obj-$(CONFIG_SOC_IMX35) += clk-imx35.o
-obj-$(CONFIG_SOC_IMX5) += clk-imx51-imx53.o
+obj-$(CONFIG_SOC_IMX5) += clk-imx5.o
obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o
obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o
obj-$(CONFIG_SOC_IMX6SLL) += clk-imx6sll.o
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index 574fac1a169f..388bdb94f841 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -3,9 +3,10 @@
* Copyright 2018 NXP
*/
+#include <linux/clk-provider.h>
#include <linux/errno.h>
+#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/clk-provider.h>
#include "clk.h"
diff --git a/drivers/clk/imx/clk-divider-gate.c b/drivers/clk/imx/clk-divider-gate.c
index df1f8429fe16..2a8352a316c7 100644
--- a/drivers/clk/imx/clk-divider-gate.c
+++ b/drivers/clk/imx/clk-divider-gate.c
@@ -29,7 +29,7 @@ static unsigned long clk_divider_gate_recalc_rate_ro(struct clk_hw *hw,
struct clk_divider *div = to_clk_divider(hw);
unsigned int val;
- val = clk_readl(div->reg) >> div->shift;
+ val = readl(div->reg) >> div->shift;
val &= clk_div_mask(div->width);
if (!val)
return 0;
@@ -51,7 +51,7 @@ static unsigned long clk_divider_gate_recalc_rate(struct clk_hw *hw,
if (!clk_hw_is_enabled(hw)) {
val = div_gate->cached_val;
} else {
- val = clk_readl(div->reg) >> div->shift;
+ val = readl(div->reg) >> div->shift;
val &= clk_div_mask(div->width);
}
@@ -87,10 +87,10 @@ static int clk_divider_gate_set_rate(struct clk_hw *hw, unsigned long rate,
spin_lock_irqsave(div->lock, flags);
if (clk_hw_is_enabled(hw)) {
- val = clk_readl(div->reg);
+ val = readl(div->reg);
val &= ~(clk_div_mask(div->width) << div->shift);
val |= (u32)value << div->shift;
- clk_writel(val, div->reg);
+ writel(val, div->reg);
} else {
div_gate->cached_val = value;
}
@@ -114,9 +114,9 @@ static int clk_divider_enable(struct clk_hw *hw)
spin_lock_irqsave(div->lock, flags);
/* restore div val */
- val = clk_readl(div->reg);
+ val = readl(div->reg);
val |= div_gate->cached_val << div->shift;
- clk_writel(val, div->reg);
+ writel(val, div->reg);
spin_unlock_irqrestore(div->lock, flags);
@@ -133,10 +133,10 @@ static void clk_divider_disable(struct clk_hw *hw)
spin_lock_irqsave(div->lock, flags);
/* store the current div val */
- val = clk_readl(div->reg) >> div->shift;
+ val = readl(div->reg) >> div->shift;
val &= clk_div_mask(div->width);
div_gate->cached_val = val;
- clk_writel(0, div->reg);
+ writel(0, div->reg);
spin_unlock_irqrestore(div->lock, flags);
}
@@ -146,7 +146,7 @@ static int clk_divider_is_enabled(struct clk_hw *hw)
struct clk_divider *div = to_clk_divider(hw);
u32 val;
- val = clk_readl(div->reg) >> div->shift;
+ val = readl(div->reg) >> div->shift;
val &= clk_div_mask(div->width);
return val ? 1 : 0;
@@ -206,7 +206,7 @@ struct clk_hw *imx_clk_divider_gate(const char *name, const char *parent_name,
div_gate->divider.hw.init = &init;
div_gate->divider.flags = CLK_DIVIDER_ONE_BASED | clk_divider_flags;
/* cache gate status */
- val = clk_readl(reg) >> shift;
+ val = readl(reg) >> shift;
val &= clk_div_mask(width);
div_gate->cached_val = val;
diff --git a/drivers/clk/imx/clk-frac-pll.c b/drivers/clk/imx/clk-frac-pll.c
index 76b9eb15604e..fece503e3610 100644
--- a/drivers/clk/imx/clk-frac-pll.c
+++ b/drivers/clk/imx/clk-frac-pll.c
@@ -10,6 +10,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/bitfield.h>
diff --git a/drivers/clk/imx/clk-imx21.c b/drivers/clk/imx/clk-imx21.c
index e63188eb08ac..6e93284c397b 100644
--- a/drivers/clk/imx/clk-imx21.c
+++ b/drivers/clk/imx/clk-imx21.c
@@ -11,6 +11,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <dt-bindings/clock/imx21-clock.h>
diff --git a/drivers/clk/imx/clk-imx27.c b/drivers/clk/imx/clk-imx27.c
index 0a0ab95d16fe..a3753067fc12 100644
--- a/drivers/clk/imx/clk-imx27.c
+++ b/drivers/clk/imx/clk-imx27.c
@@ -3,6 +3,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <dt-bindings/clock/imx27-clock.h>
diff --git a/drivers/clk/imx/clk-imx51-imx53.c b/drivers/clk/imx/clk-imx5.c
index e91c826bce70..c85ebd74a8a5 100644
--- a/drivers/clk/imx/clk-imx51-imx53.c
+++ b/drivers/clk/imx/clk-imx5.c
@@ -164,10 +164,6 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
clk[IMX5_CLK_CKIH1] = imx_obtain_fixed_clock("ckih1", 0);
clk[IMX5_CLK_CKIH2] = imx_obtain_fixed_clock("ckih2", 0);
- clk[IMX5_CLK_PERIPH_APM] = imx_clk_mux("periph_apm", MXC_CCM_CBCMR, 12, 2,
- periph_apm_sel, ARRAY_SIZE(periph_apm_sel));
- clk[IMX5_CLK_MAIN_BUS] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1,
- main_bus_sel, ARRAY_SIZE(main_bus_sel));
clk[IMX5_CLK_PER_LP_APM] = imx_clk_mux("per_lp_apm", MXC_CCM_CBCMR, 1, 1,
per_lp_apm_sel, ARRAY_SIZE(per_lp_apm_sel));
clk[IMX5_CLK_PER_PRED1] = imx_clk_divider("per_pred1", "per_lp_apm", MXC_CCM_CBCDR, 6, 2);
@@ -191,16 +187,10 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
clk[IMX5_CLK_UART_PRED] = imx_clk_divider("uart_pred", "uart_sel", MXC_CCM_CSCDR1, 3, 3);
clk[IMX5_CLK_UART_ROOT] = imx_clk_divider("uart_root", "uart_pred", MXC_CCM_CSCDR1, 0, 3);
- clk[IMX5_CLK_ESDHC_A_SEL] = imx_clk_mux("esdhc_a_sel", MXC_CCM_CSCMR1, 20, 2,
- standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
- clk[IMX5_CLK_ESDHC_B_SEL] = imx_clk_mux("esdhc_b_sel", MXC_CCM_CSCMR1, 16, 2,
- standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
clk[IMX5_CLK_ESDHC_A_PRED] = imx_clk_divider("esdhc_a_pred", "esdhc_a_sel", MXC_CCM_CSCDR1, 16, 3);
clk[IMX5_CLK_ESDHC_A_PODF] = imx_clk_divider("esdhc_a_podf", "esdhc_a_pred", MXC_CCM_CSCDR1, 11, 3);
clk[IMX5_CLK_ESDHC_B_PRED] = imx_clk_divider("esdhc_b_pred", "esdhc_b_sel", MXC_CCM_CSCDR1, 22, 3);
clk[IMX5_CLK_ESDHC_B_PODF] = imx_clk_divider("esdhc_b_podf", "esdhc_b_pred", MXC_CCM_CSCDR1, 19, 3);
- clk[IMX5_CLK_ESDHC_C_SEL] = imx_clk_mux("esdhc_c_sel", MXC_CCM_CSCMR1, 19, 1, esdhc_c_sel, ARRAY_SIZE(esdhc_c_sel));
- clk[IMX5_CLK_ESDHC_D_SEL] = imx_clk_mux("esdhc_d_sel", MXC_CCM_CSCMR1, 18, 1, esdhc_d_sel, ARRAY_SIZE(esdhc_d_sel));
clk[IMX5_CLK_EMI_SEL] = imx_clk_mux("emi_sel", MXC_CCM_CBCDR, 26, 1,
emi_slow_sel, ARRAY_SIZE(emi_slow_sel));
@@ -311,10 +301,6 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
clk_register_clkdev(clk[IMX5_CLK_CPU_PODF], NULL, "cpu0");
clk_register_clkdev(clk[IMX5_CLK_GPC_DVFS], "gpc_dvfs", NULL);
- /* Set SDHC parents to be PLL2 */
- clk_set_parent(clk[IMX5_CLK_ESDHC_A_SEL], clk[IMX5_CLK_PLL2_SW]);
- clk_set_parent(clk[IMX5_CLK_ESDHC_B_SEL], clk[IMX5_CLK_PLL2_SW]);
-
/* move usb phy clk to 24MHz */
clk_set_parent(clk[IMX5_CLK_USB_PHY_SEL], clk[IMX5_CLK_OSC]);
}
@@ -342,8 +328,21 @@ static void __init mx50_clocks_init(struct device_node *np)
mx5_clocks_common_init(ccm_base);
+ /*
+ * This clock is called periph_clk in the i.MX50 Reference Manual, but
+ * it comes closest in scope to the main_bus_clk of i.MX51 and i.MX53
+ */
+ clk[IMX5_CLK_MAIN_BUS] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+
clk[IMX5_CLK_LP_APM] = imx_clk_mux("lp_apm", MXC_CCM_CCSR, 10, 1,
lp_apm_sel, ARRAY_SIZE(lp_apm_sel));
+ clk[IMX5_CLK_ESDHC_A_SEL] = imx_clk_mux("esdhc_a_sel", MXC_CCM_CSCMR1, 21, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[IMX5_CLK_ESDHC_B_SEL] = imx_clk_mux("esdhc_b_sel", MXC_CCM_CSCMR1, 16, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[IMX5_CLK_ESDHC_C_SEL] = imx_clk_mux("esdhc_c_sel", MXC_CCM_CSCMR1, 20, 1, esdhc_c_sel, ARRAY_SIZE(esdhc_c_sel));
+ clk[IMX5_CLK_ESDHC_D_SEL] = imx_clk_mux("esdhc_d_sel", MXC_CCM_CSCMR1, 19, 1, esdhc_d_sel, ARRAY_SIZE(esdhc_d_sel));
clk[IMX5_CLK_ESDHC1_PER_GATE] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
clk[IMX5_CLK_ESDHC2_PER_GATE] = imx_clk_gate2("esdhc2_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 6);
clk[IMX5_CLK_ESDHC3_PER_GATE] = imx_clk_gate2("esdhc3_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 10);
@@ -372,6 +371,10 @@ static void __init mx50_clocks_init(struct device_node *np)
clk_data.clk_num = ARRAY_SIZE(clk);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ /* Set SDHC parents to be PLL2 */
+ clk_set_parent(clk[IMX5_CLK_ESDHC_A_SEL], clk[IMX5_CLK_PLL2_SW]);
+ clk_set_parent(clk[IMX5_CLK_ESDHC_B_SEL], clk[IMX5_CLK_PLL2_SW]);
+
/* set SDHC root clock to 200MHZ*/
clk_set_rate(clk[IMX5_CLK_ESDHC_A_PODF], 200000000);
clk_set_rate(clk[IMX5_CLK_ESDHC_B_PODF], 200000000);
@@ -410,6 +413,10 @@ static void __init mx51_clocks_init(struct device_node *np)
mx5_clocks_common_init(ccm_base);
+ clk[IMX5_CLK_PERIPH_APM] = imx_clk_mux("periph_apm", MXC_CCM_CBCMR, 12, 2,
+ periph_apm_sel, ARRAY_SIZE(periph_apm_sel));
+ clk[IMX5_CLK_MAIN_BUS] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1,
+ main_bus_sel, ARRAY_SIZE(main_bus_sel));
clk[IMX5_CLK_LP_APM] = imx_clk_mux("lp_apm", MXC_CCM_CCSR, 9, 1,
lp_apm_sel, ARRAY_SIZE(lp_apm_sel));
clk[IMX5_CLK_IPU_DI0_SEL] = imx_clk_mux_flags("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3,
@@ -422,6 +429,12 @@ static void __init mx51_clocks_init(struct device_node *np)
mx51_tve_sel, ARRAY_SIZE(mx51_tve_sel));
clk[IMX5_CLK_TVE_GATE] = imx_clk_gate2("tve_gate", "tve_sel", MXC_CCM_CCGR2, 30);
clk[IMX5_CLK_TVE_PRED] = imx_clk_divider("tve_pred", "pll3_sw", MXC_CCM_CDCDR, 28, 3);
+ clk[IMX5_CLK_ESDHC_A_SEL] = imx_clk_mux("esdhc_a_sel", MXC_CCM_CSCMR1, 20, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[IMX5_CLK_ESDHC_B_SEL] = imx_clk_mux("esdhc_b_sel", MXC_CCM_CSCMR1, 16, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[IMX5_CLK_ESDHC_C_SEL] = imx_clk_mux("esdhc_c_sel", MXC_CCM_CSCMR1, 19, 1, esdhc_c_sel, ARRAY_SIZE(esdhc_c_sel));
+ clk[IMX5_CLK_ESDHC_D_SEL] = imx_clk_mux("esdhc_d_sel", MXC_CCM_CSCMR1, 18, 1, esdhc_d_sel, ARRAY_SIZE(esdhc_d_sel));
clk[IMX5_CLK_ESDHC1_PER_GATE] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
clk[IMX5_CLK_ESDHC2_PER_GATE] = imx_clk_gate2("esdhc2_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 6);
clk[IMX5_CLK_ESDHC3_PER_GATE] = imx_clk_gate2("esdhc3_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 10);
@@ -452,6 +465,10 @@ static void __init mx51_clocks_init(struct device_node *np)
/* set the usboh3 parent to pll2_sw */
clk_set_parent(clk[IMX5_CLK_USBOH3_SEL], clk[IMX5_CLK_PLL2_SW]);
+ /* Set SDHC parents to be PLL2 */
+ clk_set_parent(clk[IMX5_CLK_ESDHC_A_SEL], clk[IMX5_CLK_PLL2_SW]);
+ clk_set_parent(clk[IMX5_CLK_ESDHC_B_SEL], clk[IMX5_CLK_PLL2_SW]);
+
/* set SDHC root clock to 166.25MHZ*/
clk_set_rate(clk[IMX5_CLK_ESDHC_A_PODF], 166250000);
clk_set_rate(clk[IMX5_CLK_ESDHC_B_PODF], 166250000);
@@ -506,6 +523,10 @@ static void __init mx53_clocks_init(struct device_node *np)
mx5_clocks_common_init(ccm_base);
+ clk[IMX5_CLK_PERIPH_APM] = imx_clk_mux("periph_apm", MXC_CCM_CBCMR, 12, 2,
+ periph_apm_sel, ARRAY_SIZE(periph_apm_sel));
+ clk[IMX5_CLK_MAIN_BUS] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1,
+ main_bus_sel, ARRAY_SIZE(main_bus_sel));
clk[IMX5_CLK_LP_APM] = imx_clk_mux("lp_apm", MXC_CCM_CCSR, 10, 1,
lp_apm_sel, ARRAY_SIZE(lp_apm_sel));
clk[IMX5_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
@@ -527,6 +548,12 @@ static void __init mx53_clocks_init(struct device_node *np)
mx53_tve_ext_sel, ARRAY_SIZE(mx53_tve_ext_sel), CLK_SET_RATE_PARENT);
clk[IMX5_CLK_TVE_GATE] = imx_clk_gate2("tve_gate", "tve_pred", MXC_CCM_CCGR2, 30);
clk[IMX5_CLK_TVE_PRED] = imx_clk_divider("tve_pred", "tve_ext_sel", MXC_CCM_CDCDR, 28, 3);
+ clk[IMX5_CLK_ESDHC_A_SEL] = imx_clk_mux("esdhc_a_sel", MXC_CCM_CSCMR1, 20, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[IMX5_CLK_ESDHC_B_SEL] = imx_clk_mux("esdhc_b_sel", MXC_CCM_CSCMR1, 16, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[IMX5_CLK_ESDHC_C_SEL] = imx_clk_mux("esdhc_c_sel", MXC_CCM_CSCMR1, 19, 1, esdhc_c_sel, ARRAY_SIZE(esdhc_c_sel));
+ clk[IMX5_CLK_ESDHC_D_SEL] = imx_clk_mux("esdhc_d_sel", MXC_CCM_CSCMR1, 18, 1, esdhc_d_sel, ARRAY_SIZE(esdhc_d_sel));
clk[IMX5_CLK_ESDHC1_PER_GATE] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
clk[IMX5_CLK_ESDHC2_PER_GATE] = imx_clk_gate2("esdhc2_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 6);
clk[IMX5_CLK_ESDHC3_PER_GATE] = imx_clk_gate2("esdhc3_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 10);
@@ -589,6 +616,10 @@ static void __init mx53_clocks_init(struct device_node *np)
clk_data.clk_num = ARRAY_SIZE(clk);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ /* Set SDHC parents to be PLL2 */
+ clk_set_parent(clk[IMX5_CLK_ESDHC_A_SEL], clk[IMX5_CLK_PLL2_SW]);
+ clk_set_parent(clk[IMX5_CLK_ESDHC_B_SEL], clk[IMX5_CLK_PLL2_SW]);
+
/* set SDHC root clock to 200MHZ*/
clk_set_rate(clk[IMX5_CLK_ESDHC_A_PODF], 200000000);
clk_set_rate(clk[IMX5_CLK_ESDHC_B_PODF], 200000000);
diff --git a/drivers/clk/imx/clk-imx6sll.c b/drivers/clk/imx/clk-imx6sll.c
index 3bd2044cf25c..7eea448cb9a9 100644
--- a/drivers/clk/imx/clk-imx6sll.c
+++ b/drivers/clk/imx/clk-imx6sll.c
@@ -76,6 +76,20 @@ static u32 share_count_ssi1;
static u32 share_count_ssi2;
static u32 share_count_ssi3;
+static struct clk ** const uart_clks[] __initconst = {
+ &clks[IMX6SLL_CLK_UART1_IPG],
+ &clks[IMX6SLL_CLK_UART1_SERIAL],
+ &clks[IMX6SLL_CLK_UART2_IPG],
+ &clks[IMX6SLL_CLK_UART2_SERIAL],
+ &clks[IMX6SLL_CLK_UART3_IPG],
+ &clks[IMX6SLL_CLK_UART3_SERIAL],
+ &clks[IMX6SLL_CLK_UART4_IPG],
+ &clks[IMX6SLL_CLK_UART4_SERIAL],
+ &clks[IMX6SLL_CLK_UART5_IPG],
+ &clks[IMX6SLL_CLK_UART5_SERIAL],
+ NULL
+};
+
static void __init imx6sll_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
@@ -268,7 +282,7 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
clks[IMX6SLL_CLK_GPT_BUS] = imx_clk_gate2("gpt1_bus", "perclk", base + 0x6c, 20);
clks[IMX6SLL_CLK_GPT_SERIAL] = imx_clk_gate2("gpt1_serial", "perclk", base + 0x6c, 22);
clks[IMX6SLL_CLK_UART4_IPG] = imx_clk_gate2("uart4_ipg", "ipg", base + 0x6c, 24);
- clks[IMX6SLL_CLK_UART4_SERIAL] = imx_clk_gate2("uart4_serail", "uart_podf", base + 0x6c, 24);
+ clks[IMX6SLL_CLK_UART4_SERIAL] = imx_clk_gate2("uart4_serial", "uart_podf", base + 0x6c, 24);
clks[IMX6SLL_CLK_GPIO1] = imx_clk_gate2("gpio1", "ipg", base + 0x6c, 26);
clks[IMX6SLL_CLK_GPIO5] = imx_clk_gate2("gpio5", "ipg", base + 0x6c, 30);
@@ -334,6 +348,8 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
clk_data.clk_num = ARRAY_SIZE(clks);
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ imx_register_uart_clocks(uart_clks);
+
/* Lower the AHB clock rate before changing the clock source. */
clk_set_rate(clks[IMX6SLL_CLK_AHB], 99000000);
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index cfbd8d4edb85..5b8a0c729f90 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -417,8 +417,8 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_PLL_DRAM_MAIN] = imx_clk_pllv3(IMX_PLLV3_DDR_IMX7, "pll_dram_main", "osc", base + 0x70, 0x7f);
clks[IMX7D_PLL_SYS_MAIN] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll_sys_main", "osc", base + 0xb0, 0x1);
clks[IMX7D_PLL_ENET_MAIN] = imx_clk_pllv3(IMX_PLLV3_ENET_IMX7, "pll_enet_main", "osc", base + 0xe0, 0x0);
- clks[IMX7D_PLL_AUDIO_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV, "pll_audio_main", "osc", base + 0xf0, 0x7f);
- clks[IMX7D_PLL_VIDEO_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV, "pll_video_main", "osc", base + 0x130, 0x7f);
+ clks[IMX7D_PLL_AUDIO_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV_IMX7, "pll_audio_main", "osc", base + 0xf0, 0x7f);
+ clks[IMX7D_PLL_VIDEO_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV_IMX7, "pll_video_main", "osc", base + 0x130, 0x7f);
clks[IMX7D_PLL_ARM_MAIN_BYPASS] = imx_clk_mux_flags("pll_arm_main_bypass", base + 0x60, 16, 1, pll_arm_bypass_sel, ARRAY_SIZE(pll_arm_bypass_sel), CLK_SET_RATE_PARENT);
clks[IMX7D_PLL_DRAM_MAIN_BYPASS] = imx_clk_mux_flags("pll_dram_main_bypass", base + 0x70, 16, 1, pll_dram_bypass_sel, ARRAY_SIZE(pll_dram_bypass_sel), CLK_SET_RATE_PARENT);
diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
index ce306631e844..66682100f14c 100644
--- a/drivers/clk/imx/clk-imx7ulp.c
+++ b/drivers/clk/imx/clk-imx7ulp.c
@@ -151,7 +151,6 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
clks[IMX7ULP_CLK_DMA1] = imx_clk_hw_gate("dma1", "nic1_clk", base + 0x20, 30);
clks[IMX7ULP_CLK_RGPIO2P1] = imx_clk_hw_gate("rgpio2p1", "nic1_bus_clk", base + 0x3c, 30);
clks[IMX7ULP_CLK_DMA_MUX1] = imx_clk_hw_gate("dma_mux1", "nic1_bus_clk", base + 0x84, 30);
- clks[IMX7ULP_CLK_SNVS] = imx_clk_hw_gate("snvs", "nic1_bus_clk", base + 0x8c, 30);
clks[IMX7ULP_CLK_CAAM] = imx_clk_hw_gate("caam", "nic1_clk", base + 0x90, 30);
clks[IMX7ULP_CLK_LPTPM4] = imx7ulp_clk_composite("lptpm4", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x94);
clks[IMX7ULP_CLK_LPTPM5] = imx7ulp_clk_composite("lptpm5", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x98);
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
index a9b3888aef0c..daf1841b2adb 100644
--- a/drivers/clk/imx/clk-imx8mq.c
+++ b/drivers/clk/imx/clk-imx8mq.c
@@ -458,6 +458,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
clks[IMX8MQ_CLK_DSI_DBI] = imx8m_clk_composite("dsi_dbi", imx8mq_dsi_dbi_sels, base + 0xbc00);
clks[IMX8MQ_CLK_DSI_ESC] = imx8m_clk_composite("dsi_esc", imx8mq_dsi_esc_sels, base + 0xbc80);
clks[IMX8MQ_CLK_DSI_AHB] = imx8m_clk_composite("dsi_ahb", imx8mq_dsi_ahb_sels, base + 0x9200);
+ clks[IMX8MQ_CLK_DSI_IPG_DIV] = imx_clk_divider2("dsi_ipg_div", "dsi_ahb", base + 0x9280, 0, 6);
clks[IMX8MQ_CLK_CSI1_CORE] = imx8m_clk_composite("csi1_core", imx8mq_csi1_core_sels, base + 0xbd00);
clks[IMX8MQ_CLK_CSI1_PHY_REF] = imx8m_clk_composite("csi1_phy_ref", imx8mq_csi1_phy_sels, base + 0xbd80);
clks[IMX8MQ_CLK_CSI1_ESC] = imx8m_clk_composite("csi1_esc", imx8mq_csi1_esc_sels, base + 0xbe00);
diff --git a/drivers/clk/imx/clk-pfdv2.c b/drivers/clk/imx/clk-pfdv2.c
index 7e9134b205ab..a03bbed662c6 100644
--- a/drivers/clk/imx/clk-pfdv2.c
+++ b/drivers/clk/imx/clk-pfdv2.c
@@ -9,6 +9,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
@@ -43,7 +44,7 @@ static int clk_pfdv2_wait(struct clk_pfdv2 *pfd)
{
u32 val;
- return readl_poll_timeout(pfd->reg, val, val & pfd->vld_bit,
+ return readl_poll_timeout(pfd->reg, val, val & (1 << pfd->vld_bit),
0, LOCK_TIMEOUT_US);
}
@@ -55,7 +56,7 @@ static int clk_pfdv2_enable(struct clk_hw *hw)
spin_lock_irqsave(&pfd_lock, flags);
val = readl_relaxed(pfd->reg);
- val &= ~pfd->gate_bit;
+ val &= ~(1 << pfd->gate_bit);
writel_relaxed(val, pfd->reg);
spin_unlock_irqrestore(&pfd_lock, flags);
@@ -70,7 +71,7 @@ static void clk_pfdv2_disable(struct clk_hw *hw)
spin_lock_irqsave(&pfd_lock, flags);
val = readl_relaxed(pfd->reg);
- val |= pfd->gate_bit;
+ val |= (1 << pfd->gate_bit);
writel_relaxed(val, pfd->reg);
spin_unlock_irqrestore(&pfd_lock, flags);
}
@@ -123,7 +124,7 @@ static int clk_pfdv2_is_enabled(struct clk_hw *hw)
{
struct clk_pfdv2 *pfd = to_clk_pfdv2(hw);
- if (readl_relaxed(pfd->reg) & pfd->gate_bit)
+ if (readl_relaxed(pfd->reg) & (1 << pfd->gate_bit))
return 0;
return 1;
@@ -180,7 +181,7 @@ struct clk_hw *imx_clk_pfdv2(const char *name, const char *parent_name,
return ERR_PTR(-ENOMEM);
pfd->reg = reg;
- pfd->gate_bit = 1 << ((idx + 1) * 8 - 1);
+ pfd->gate_bit = (idx + 1) * 8 - 1;
pfd->vld_bit = pfd->gate_bit - 1;
pfd->frac_off = idx * 8;
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index 113d71042199..b7213023b238 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -74,10 +74,9 @@ static unsigned long clk_pll1416x_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_pll14xx *pll = to_clk_pll14xx(hw);
- u32 mdiv, pdiv, sdiv, pll_gnrl, pll_div;
+ u32 mdiv, pdiv, sdiv, pll_div;
u64 fvco = parent_rate;
- pll_gnrl = readl_relaxed(pll->base);
pll_div = readl_relaxed(pll->base + 4);
mdiv = (pll_div & MDIV_MASK) >> MDIV_SHIFT;
pdiv = (pll_div & PDIV_MASK) >> PDIV_SHIFT;
@@ -93,11 +92,10 @@ static unsigned long clk_pll1443x_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_pll14xx *pll = to_clk_pll14xx(hw);
- u32 mdiv, pdiv, sdiv, pll_gnrl, pll_div_ctl0, pll_div_ctl1;
+ u32 mdiv, pdiv, sdiv, pll_div_ctl0, pll_div_ctl1;
short int kdiv;
u64 fvco = parent_rate;
- pll_gnrl = readl_relaxed(pll->base);
pll_div_ctl0 = readl_relaxed(pll->base + 4);
pll_div_ctl1 = readl_relaxed(pll->base + 8);
mdiv = (pll_div_ctl0 & MDIV_MASK) >> MDIV_SHIFT;
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index 9af62ee8f347..4110e713d259 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -20,6 +20,8 @@
#define PLL_NUM_OFFSET 0x10
#define PLL_DENOM_OFFSET 0x20
+#define PLL_IMX7_NUM_OFFSET 0x20
+#define PLL_IMX7_DENOM_OFFSET 0x30
#define PLL_VF610_NUM_OFFSET 0x20
#define PLL_VF610_DENOM_OFFSET 0x30
@@ -49,6 +51,8 @@ struct clk_pllv3 {
u32 div_mask;
u32 div_shift;
unsigned long ref_clock;
+ u32 num_offset;
+ u32 denom_offset;
};
#define to_clk_pllv3(_hw) container_of(_hw, struct clk_pllv3, hw)
@@ -219,8 +223,8 @@ static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_pllv3 *pll = to_clk_pllv3(hw);
- u32 mfn = readl_relaxed(pll->base + PLL_NUM_OFFSET);
- u32 mfd = readl_relaxed(pll->base + PLL_DENOM_OFFSET);
+ u32 mfn = readl_relaxed(pll->base + pll->num_offset);
+ u32 mfd = readl_relaxed(pll->base + pll->denom_offset);
u32 div = readl_relaxed(pll->base) & pll->div_mask;
u64 temp64 = (u64)parent_rate;
@@ -289,8 +293,8 @@ static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
val &= ~pll->div_mask;
val |= div;
writel_relaxed(val, pll->base);
- writel_relaxed(mfn, pll->base + PLL_NUM_OFFSET);
- writel_relaxed(mfd, pll->base + PLL_DENOM_OFFSET);
+ writel_relaxed(mfn, pll->base + pll->num_offset);
+ writel_relaxed(mfd, pll->base + pll->denom_offset);
return clk_pllv3_wait_lock(pll);
}
@@ -352,8 +356,8 @@ static unsigned long clk_pllv3_vf610_recalc_rate(struct clk_hw *hw,
struct clk_pllv3 *pll = to_clk_pllv3(hw);
struct clk_pllv3_vf610_mf mf;
- mf.mfn = readl_relaxed(pll->base + PLL_VF610_NUM_OFFSET);
- mf.mfd = readl_relaxed(pll->base + PLL_VF610_DENOM_OFFSET);
+ mf.mfn = readl_relaxed(pll->base + pll->num_offset);
+ mf.mfd = readl_relaxed(pll->base + pll->denom_offset);
mf.mfi = (readl_relaxed(pll->base) & pll->div_mask) ? 22 : 20;
return clk_pllv3_vf610_mf_to_rate(parent_rate, mf);
@@ -382,8 +386,8 @@ static int clk_pllv3_vf610_set_rate(struct clk_hw *hw, unsigned long rate,
val |= pll->div_mask; /* set bit for mfi=22 */
writel_relaxed(val, pll->base);
- writel_relaxed(mf.mfn, pll->base + PLL_VF610_NUM_OFFSET);
- writel_relaxed(mf.mfd, pll->base + PLL_VF610_DENOM_OFFSET);
+ writel_relaxed(mf.mfn, pll->base + pll->num_offset);
+ writel_relaxed(mf.mfd, pll->base + pll->denom_offset);
return clk_pllv3_wait_lock(pll);
}
@@ -426,6 +430,8 @@ struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
return ERR_PTR(-ENOMEM);
pll->power_bit = BM_PLL_POWER;
+ pll->num_offset = PLL_NUM_OFFSET;
+ pll->denom_offset = PLL_DENOM_OFFSET;
switch (type) {
case IMX_PLLV3_SYS:
@@ -433,13 +439,20 @@ struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
break;
case IMX_PLLV3_SYS_VF610:
ops = &clk_pllv3_vf610_ops;
+ pll->num_offset = PLL_VF610_NUM_OFFSET;
+ pll->denom_offset = PLL_VF610_DENOM_OFFSET;
break;
case IMX_PLLV3_USB_VF610:
pll->div_shift = 1;
+ /* fall through */
case IMX_PLLV3_USB:
ops = &clk_pllv3_ops;
pll->powerup_set = true;
break;
+ case IMX_PLLV3_AV_IMX7:
+ pll->num_offset = PLL_IMX7_NUM_OFFSET;
+ pll->denom_offset = PLL_IMX7_DENOM_OFFSET;
+ /* fall through */
case IMX_PLLV3_AV:
ops = &clk_pllv3_av_ops;
break;
@@ -454,6 +467,8 @@ struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
break;
case IMX_PLLV3_DDR_IMX7:
pll->power_bit = IMX7_DDR_PLL_POWER;
+ pll->num_offset = PLL_IMX7_NUM_OFFSET;
+ pll->denom_offset = PLL_IMX7_DENOM_OFFSET;
ops = &clk_pllv3_av_ops;
break;
default:
diff --git a/drivers/clk/imx/clk-pllv4.c b/drivers/clk/imx/clk-pllv4.c
index d38bc9f87c1d..8155b12cf0e1 100644
--- a/drivers/clk/imx/clk-pllv4.c
+++ b/drivers/clk/imx/clk-pllv4.c
@@ -9,6 +9,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
@@ -30,6 +31,9 @@
/* PLL Denominator Register (xPLLDENOM) */
#define PLL_DENOM_OFFSET 0x14
+#define MAX_MFD 0x3fffffff
+#define DEFAULT_MFD 1000000
+
struct clk_pllv4 {
struct clk_hw hw;
void __iomem *base;
@@ -64,13 +68,20 @@ static unsigned long clk_pllv4_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_pllv4 *pll = to_clk_pllv4(hw);
- u32 div;
+ u32 mult, mfn, mfd;
+ u64 temp64;
+
+ mult = readl_relaxed(pll->base + PLL_CFG_OFFSET);
+ mult &= BM_PLL_MULT;
+ mult >>= BP_PLL_MULT;
- div = readl_relaxed(pll->base + PLL_CFG_OFFSET);
- div &= BM_PLL_MULT;
- div >>= BP_PLL_MULT;
+ mfn = readl_relaxed(pll->base + PLL_NUM_OFFSET);
+ mfd = readl_relaxed(pll->base + PLL_DENOM_OFFSET);
+ temp64 = parent_rate;
+ temp64 *= mfn;
+ do_div(temp64, mfd);
- return parent_rate * div;
+ return (parent_rate * mult) + (u32)temp64;
}
static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -78,14 +89,46 @@ static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
{
unsigned long parent_rate = *prate;
unsigned long round_rate, i;
+ u32 mfn, mfd = DEFAULT_MFD;
+ bool found = false;
+ u64 temp64;
for (i = 0; i < ARRAY_SIZE(pllv4_mult_table); i++) {
round_rate = parent_rate * pllv4_mult_table[i];
- if (rate >= round_rate)
- return round_rate;
+ if (rate >= round_rate) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_warn("%s: unable to round rate %lu, parent rate %lu\n",
+ clk_hw_get_name(hw), rate, parent_rate);
+ return 0;
}
- return round_rate;
+ if (parent_rate <= MAX_MFD)
+ mfd = parent_rate;
+
+ temp64 = (u64)(rate - round_rate);
+ temp64 *= mfd;
+ do_div(temp64, parent_rate);
+ mfn = temp64;
+
+ /*
+ * NOTE: The value of numerator must always be configured to be
+ * less than the value of the denominator. If we can't get a proper
+ * pair of mfn/mfd, we simply return the round_rate without using
+ * the frac part.
+ */
+ if (mfn >= mfd)
+ return round_rate;
+
+ temp64 = (u64)parent_rate;
+ temp64 *= mfn;
+ do_div(temp64, mfd);
+
+ return round_rate + (u32)temp64;
}
static bool clk_pllv4_is_valid_mult(unsigned int mult)
@@ -105,18 +148,30 @@ static int clk_pllv4_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_pllv4 *pll = to_clk_pllv4(hw);
- u32 val, mult;
+ u32 val, mult, mfn, mfd = DEFAULT_MFD;
+ u64 temp64;
mult = rate / parent_rate;
if (!clk_pllv4_is_valid_mult(mult))
return -EINVAL;
+ if (parent_rate <= MAX_MFD)
+ mfd = parent_rate;
+
+ temp64 = (u64)(rate - mult * parent_rate);
+ temp64 *= mfd;
+ do_div(temp64, parent_rate);
+ mfn = temp64;
+
val = readl_relaxed(pll->base + PLL_CFG_OFFSET);
val &= ~BM_PLL_MULT;
val |= mult << BP_PLL_MULT;
writel_relaxed(val, pll->base + PLL_CFG_OFFSET);
+ writel_relaxed(mfn, pll->base + PLL_NUM_OFFSET);
+ writel_relaxed(mfd, pll->base + PLL_DENOM_OFFSET);
+
return 0;
}
diff --git a/drivers/clk/imx/clk-sccg-pll.c b/drivers/clk/imx/clk-sccg-pll.c
index 9dfd03a95557..5d65f65c512e 100644
--- a/drivers/clk/imx/clk-sccg-pll.c
+++ b/drivers/clk/imx/clk-sccg-pll.c
@@ -10,6 +10,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/bitfield.h>
@@ -348,7 +349,7 @@ static unsigned long clk_sccg_pll_recalc_rate(struct clk_hw *hw,
temp64 = parent_rate;
- val = clk_readl(pll->base + PLL_CFG0);
+ val = readl(pll->base + PLL_CFG0);
if (val & SSCG_PLL_BYPASS2_MASK) {
temp64 = parent_rate;
} else if (val & SSCG_PLL_BYPASS1_MASK) {
@@ -371,10 +372,10 @@ static int clk_sccg_pll_set_rate(struct clk_hw *hw, unsigned long rate,
u32 val;
/* set bypass here too since the parent might be the same */
- val = clk_readl(pll->base + PLL_CFG0);
+ val = readl(pll->base + PLL_CFG0);
val &= ~SSCG_PLL_BYPASS_MASK;
val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, setup->bypass);
- clk_writel(val, pll->base + PLL_CFG0);
+ writel(val, pll->base + PLL_CFG0);
val = readl_relaxed(pll->base + PLL_CFG2);
val &= ~(PLL_DIVF1_MASK | PLL_DIVF2_MASK);
@@ -395,7 +396,7 @@ static u8 clk_sccg_pll_get_parent(struct clk_hw *hw)
u32 val;
u8 ret = pll->parent;
- val = clk_readl(pll->base + PLL_CFG0);
+ val = readl(pll->base + PLL_CFG0);
if (val & SSCG_PLL_BYPASS2_MASK)
ret = pll->bypass2;
else if (val & SSCG_PLL_BYPASS1_MASK)
@@ -408,10 +409,10 @@ static int clk_sccg_pll_set_parent(struct clk_hw *hw, u8 index)
struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
u32 val;
- val = clk_readl(pll->base + PLL_CFG0);
+ val = readl(pll->base + PLL_CFG0);
val &= ~SSCG_PLL_BYPASS_MASK;
val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, pll->setup.bypass);
- clk_writel(val, pll->base + PLL_CFG0);
+ writel(val, pll->base + PLL_CFG0);
return clk_sccg_pll_wait_lock(pll);
}
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index 5748ec8673e4..8639a8f2153e 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -77,6 +77,7 @@ enum imx_pllv3_type {
IMX_PLLV3_ENET_IMX7,
IMX_PLLV3_SYS_VF610,
IMX_PLLV3_DDR_IMX7,
+ IMX_PLLV3_AV_IMX7,
};
struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
@@ -138,11 +139,6 @@ static inline struct clk_hw *imx_clk_hw_fixed(const char *name, int rate)
return clk_hw_register_fixed_rate(NULL, name, NULL, 0, rate);
}
-static inline struct clk_hw *imx_get_clk_hw_fixed(const char *name, int rate)
-{
- return clk_hw_register_fixed_rate(NULL, name, NULL, 0, rate);
-}
-
static inline struct clk *imx_clk_mux_ldb(const char *name, void __iomem *reg,
u8 shift, u8 width, const char * const *parents,
int num_parents)
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index 510b685212d3..b80af61dc1f3 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -20,6 +20,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/math64.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c
index 584ff4ff81c7..8901ea0295b7 100644
--- a/drivers/clk/ingenic/jz4725b-cgu.c
+++ b/drivers/clk/ingenic/jz4725b-cgu.c
@@ -205,6 +205,12 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = {
.parents = { JZ4725B_CLK_EXT512, JZ4725B_CLK_OSC32K, -1, -1 },
.mux = { CGU_REG_OPCR, 2, 1},
},
+
+ [JZ4725B_CLK_UDC_PHY] = {
+ "udc_phy", CGU_CLK_GATE,
+ .parents = { JZ4725B_CLK_EXT, -1, -1, -1 },
+ .gate = { CGU_REG_OPCR, 6, true },
+ },
};
static void __init jz4725b_cgu_init(struct device_node *np)
diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c
index b86edd328249..25f7df028e67 100644
--- a/drivers/clk/ingenic/jz4740-cgu.c
+++ b/drivers/clk/ingenic/jz4740-cgu.c
@@ -17,6 +17,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <dt-bindings/clock/jz4740-cgu.h>
#include <asm/mach-jz4740/clock.h>
diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c
index bf46a0df2004..dfce740c25a8 100644
--- a/drivers/clk/ingenic/jz4770-cgu.c
+++ b/drivers/clk/ingenic/jz4770-cgu.c
@@ -7,6 +7,7 @@
#include <linux/bitops.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/syscore_ops.h>
#include <dt-bindings/clock/jz4770-cgu.h>
diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c
index 6427be117ff1..d03b7fcfd82b 100644
--- a/drivers/clk/ingenic/jz4780-cgu.c
+++ b/drivers/clk/ingenic/jz4780-cgu.c
@@ -17,6 +17,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <dt-bindings/clock/jz4780-cgu.h>
#include "cgu.h"
diff --git a/drivers/clk/loongson1/clk-loongson1c.c b/drivers/clk/loongson1/clk-loongson1c.c
index 3466f7320b40..22a165ef65cf 100644
--- a/drivers/clk/loongson1/clk-loongson1c.c
+++ b/drivers/clk/loongson1/clk-loongson1c.c
@@ -9,6 +9,7 @@
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <loongson1.h>
#include "clk.h"
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 53edade25a1d..4d8a9aef95f6 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -216,4 +216,87 @@ config COMMON_CLK_MT8173
default ARCH_MEDIATEK
---help---
This driver supports MediaTek MT8173 clocks.
+
+config COMMON_CLK_MT8183
+ bool "Clock driver for MediaTek MT8183"
+ depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK && ARM64
+ help
+ This driver supports MediaTek MT8183 basic clocks.
+
+config COMMON_CLK_MT8183_AUDIOSYS
+ bool "Clock driver for MediaTek MT8183 audiosys"
+ depends on COMMON_CLK_MT8183
+ help
+ This driver supports MediaTek MT8183 audiosys clocks.
+
+config COMMON_CLK_MT8183_CAMSYS
+ bool "Clock driver for MediaTek MT8183 camsys"
+ depends on COMMON_CLK_MT8183
+ help
+ This driver supports MediaTek MT8183 camsys clocks.
+
+config COMMON_CLK_MT8183_IMGSYS
+ bool "Clock driver for MediaTek MT8183 imgsys"
+ depends on COMMON_CLK_MT8183
+ help
+ This driver supports MediaTek MT8183 imgsys clocks.
+
+config COMMON_CLK_MT8183_IPU_CORE0
+ bool "Clock driver for MediaTek MT8183 ipu_core0"
+ depends on COMMON_CLK_MT8183
+ help
+ This driver supports MediaTek MT8183 ipu_core0 clocks.
+
+config COMMON_CLK_MT8183_IPU_CORE1
+ bool "Clock driver for MediaTek MT8183 ipu_core1"
+ depends on COMMON_CLK_MT8183
+ help
+ This driver supports MediaTek MT8183 ipu_core1 clocks.
+
+config COMMON_CLK_MT8183_IPU_ADL
+ bool "Clock driver for MediaTek MT8183 ipu_adl"
+ depends on COMMON_CLK_MT8183
+ help
+ This driver supports MediaTek MT8183 ipu_adl clocks.
+
+config COMMON_CLK_MT8183_IPU_CONN
+ bool "Clock driver for MediaTek MT8183 ipu_conn"
+ depends on COMMON_CLK_MT8183
+ help
+ This driver supports MediaTek MT8183 ipu_conn clocks.
+
+config COMMON_CLK_MT8183_MFGCFG
+ bool "Clock driver for MediaTek MT8183 mfgcfg"
+ depends on COMMON_CLK_MT8183
+ help
+ This driver supports MediaTek MT8183 mfgcfg clocks.
+
+config COMMON_CLK_MT8183_MMSYS
+ bool "Clock driver for MediaTek MT8183 mmsys"
+ depends on COMMON_CLK_MT8183
+ help
+ This driver supports MediaTek MT8183 mmsys clocks.
+
+config COMMON_CLK_MT8183_VDECSYS
+ bool "Clock driver for MediaTek MT8183 vdecsys"
+ depends on COMMON_CLK_MT8183
+ help
+ This driver supports MediaTek MT8183 vdecsys clocks.
+
+config COMMON_CLK_MT8183_VENCSYS
+ bool "Clock driver for MediaTek MT8183 vencsys"
+ depends on COMMON_CLK_MT8183
+ help
+ This driver supports MediaTek MT8183 vencsys clocks.
+
+config COMMON_CLK_MT8516
+ bool "Clock driver for MediaTek MT8516"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ help
+ This driver supports MediaTek MT8516 clocks.
+
endmenu
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index ee4410ff43ab..f74937b35f68 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o clk-cpumux.o reset.o
+obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o clk-cpumux.o reset.o clk-mux.o
+
obj-$(CONFIG_COMMON_CLK_MT6797) += clk-mt6797.o
obj-$(CONFIG_COMMON_CLK_MT6797_IMGSYS) += clk-mt6797-img.o
obj-$(CONFIG_COMMON_CLK_MT6797_MMSYS) += clk-mt6797-mm.o
@@ -31,3 +32,16 @@ obj-$(CONFIG_COMMON_CLK_MT7629_ETHSYS) += clk-mt7629-eth.o
obj-$(CONFIG_COMMON_CLK_MT7629_HIFSYS) += clk-mt7629-hif.o
obj-$(CONFIG_COMMON_CLK_MT8135) += clk-mt8135.o
obj-$(CONFIG_COMMON_CLK_MT8173) += clk-mt8173.o
+obj-$(CONFIG_COMMON_CLK_MT8183) += clk-mt8183.o
+obj-$(CONFIG_COMMON_CLK_MT8183_AUDIOSYS) += clk-mt8183-audio.o
+obj-$(CONFIG_COMMON_CLK_MT8183_CAMSYS) += clk-mt8183-cam.o
+obj-$(CONFIG_COMMON_CLK_MT8183_IMGSYS) += clk-mt8183-img.o
+obj-$(CONFIG_COMMON_CLK_MT8183_IPU_CORE0) += clk-mt8183-ipu0.o
+obj-$(CONFIG_COMMON_CLK_MT8183_IPU_CORE1) += clk-mt8183-ipu1.o
+obj-$(CONFIG_COMMON_CLK_MT8183_IPU_ADL) += clk-mt8183-ipu_adl.o
+obj-$(CONFIG_COMMON_CLK_MT8183_IPU_CONN) += clk-mt8183-ipu_conn.o
+obj-$(CONFIG_COMMON_CLK_MT8183_MFGCFG) += clk-mt8183-mfgcfg.o
+obj-$(CONFIG_COMMON_CLK_MT8183_MMSYS) += clk-mt8183-mm.o
+obj-$(CONFIG_COMMON_CLK_MT8183_VDECSYS) += clk-mt8183-vdec.o
+obj-$(CONFIG_COMMON_CLK_MT8183_VENCSYS) += clk-mt8183-venc.o
+obj-$(CONFIG_COMMON_CLK_MT8516) += clk-mt8516.o
diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h
index 9f766dfe1d57..ab240163f9f8 100644
--- a/drivers/clk/mediatek/clk-gate.h
+++ b/drivers/clk/mediatek/clk-gate.h
@@ -50,4 +50,18 @@ struct clk *mtk_clk_register_gate(
const struct clk_ops *ops,
unsigned long flags);
+#define GATE_MTK_FLAGS(_id, _name, _parent, _regs, _shift, \
+ _ops, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = _regs, \
+ .shift = _shift, \
+ .ops = _ops, \
+ .flags = _flags, \
+ }
+
+#define GATE_MTK(_id, _name, _parent, _regs, _shift, _ops) \
+ GATE_MTK_FLAGS(_id, _name, _parent, _regs, _shift, _ops, 0)
+
#endif /* __DRV_CLK_GATE_H */
diff --git a/drivers/clk/mediatek/clk-mt8183-audio.c b/drivers/clk/mediatek/clk-mt8183-audio.c
new file mode 100644
index 000000000000..c87450180b7b
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-audio.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs audio0_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x0,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs audio1_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x4,
+ .sta_ofs = 0x4,
+};
+
+#define GATE_AUDIO0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_no_setclr)
+
+#define GATE_AUDIO1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &audio1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_no_setclr)
+
+static const struct mtk_gate audio_clks[] = {
+ /* AUDIO0 */
+ GATE_AUDIO0(CLK_AUDIO_AFE, "aud_afe", "audio_sel",
+ 2),
+ GATE_AUDIO0(CLK_AUDIO_22M, "aud_22m", "aud_eng1_sel",
+ 8),
+ GATE_AUDIO0(CLK_AUDIO_24M, "aud_24m", "aud_eng2_sel",
+ 9),
+ GATE_AUDIO0(CLK_AUDIO_APLL2_TUNER, "aud_apll2_tuner", "aud_eng2_sel",
+ 18),
+ GATE_AUDIO0(CLK_AUDIO_APLL_TUNER, "aud_apll_tuner", "aud_eng1_sel",
+ 19),
+ GATE_AUDIO0(CLK_AUDIO_TDM, "aud_tdm", "apll12_divb",
+ 20),
+ GATE_AUDIO0(CLK_AUDIO_ADC, "aud_adc", "audio_sel",
+ 24),
+ GATE_AUDIO0(CLK_AUDIO_DAC, "aud_dac", "audio_sel",
+ 25),
+ GATE_AUDIO0(CLK_AUDIO_DAC_PREDIS, "aud_dac_predis", "audio_sel",
+ 26),
+ GATE_AUDIO0(CLK_AUDIO_TML, "aud_tml", "audio_sel",
+ 27),
+ /* AUDIO1 */
+ GATE_AUDIO1(CLK_AUDIO_I2S1, "aud_i2s1", "audio_sel",
+ 4),
+ GATE_AUDIO1(CLK_AUDIO_I2S2, "aud_i2s2", "audio_sel",
+ 5),
+ GATE_AUDIO1(CLK_AUDIO_I2S3, "aud_i2s3", "audio_sel",
+ 6),
+ GATE_AUDIO1(CLK_AUDIO_I2S4, "aud_i2s4", "audio_sel",
+ 7),
+ GATE_AUDIO1(CLK_AUDIO_PDN_ADDA6_ADC, "aud_pdn_adda6_adc", "audio_sel",
+ 20),
+};
+
+static int clk_mt8183_audio_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_AUDIO_NR_CLK);
+
+ mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ return r;
+
+ r = devm_of_platform_populate(&pdev->dev);
+ if (r)
+ of_clk_del_provider(node);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt8183_audio[] = {
+ { .compatible = "mediatek,mt8183-audiosys", },
+ {}
+};
+
+static struct platform_driver clk_mt8183_audio_drv = {
+ .probe = clk_mt8183_audio_probe,
+ .driver = {
+ .name = "clk-mt8183-audio",
+ .of_match_table = of_match_clk_mt8183_audio,
+ },
+};
+
+builtin_platform_driver(clk_mt8183_audio_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-cam.c b/drivers/clk/mediatek/clk-mt8183-cam.c
new file mode 100644
index 000000000000..8643802c4471
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-cam.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs cam_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_CAM(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &cam_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate cam_clks[] = {
+ GATE_CAM(CLK_CAM_LARB6, "cam_larb6", "cam_sel", 0),
+ GATE_CAM(CLK_CAM_DFP_VAD, "cam_dfp_vad", "cam_sel", 1),
+ GATE_CAM(CLK_CAM_LARB3, "cam_larb3", "cam_sel", 2),
+ GATE_CAM(CLK_CAM_CAM, "cam_cam", "cam_sel", 6),
+ GATE_CAM(CLK_CAM_CAMTG, "cam_camtg", "cam_sel", 7),
+ GATE_CAM(CLK_CAM_SENINF, "cam_seninf", "cam_sel", 8),
+ GATE_CAM(CLK_CAM_CAMSV0, "cam_camsv0", "cam_sel", 9),
+ GATE_CAM(CLK_CAM_CAMSV1, "cam_camsv1", "cam_sel", 10),
+ GATE_CAM(CLK_CAM_CAMSV2, "cam_camsv2", "cam_sel", 11),
+ GATE_CAM(CLK_CAM_CCU, "cam_ccu", "cam_sel", 12),
+};
+
+static int clk_mt8183_cam_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_CAM_NR_CLK);
+
+ mtk_clk_register_gates(node, cam_clks, ARRAY_SIZE(cam_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_cam[] = {
+ { .compatible = "mediatek,mt8183-camsys", },
+ {}
+};
+
+static struct platform_driver clk_mt8183_cam_drv = {
+ .probe = clk_mt8183_cam_probe,
+ .driver = {
+ .name = "clk-mt8183-cam",
+ .of_match_table = of_match_clk_mt8183_cam,
+ },
+};
+
+builtin_platform_driver(clk_mt8183_cam_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-img.c b/drivers/clk/mediatek/clk-mt8183-img.c
new file mode 100644
index 000000000000..470d676a4a10
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-img.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs img_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_IMG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate img_clks[] = {
+ GATE_IMG(CLK_IMG_LARB5, "img_larb5", "img_sel", 0),
+ GATE_IMG(CLK_IMG_LARB2, "img_larb2", "img_sel", 1),
+ GATE_IMG(CLK_IMG_DIP, "img_dip", "img_sel", 2),
+ GATE_IMG(CLK_IMG_FDVT, "img_fdvt", "img_sel", 3),
+ GATE_IMG(CLK_IMG_DPE, "img_dpe", "img_sel", 4),
+ GATE_IMG(CLK_IMG_RSC, "img_rsc", "img_sel", 5),
+ GATE_IMG(CLK_IMG_MFB, "img_mfb", "img_sel", 6),
+ GATE_IMG(CLK_IMG_WPE_A, "img_wpe_a", "img_sel", 7),
+ GATE_IMG(CLK_IMG_WPE_B, "img_wpe_b", "img_sel", 8),
+ GATE_IMG(CLK_IMG_OWE, "img_owe", "img_sel", 9),
+};
+
+static int clk_mt8183_img_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
+
+ mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_img[] = {
+ { .compatible = "mediatek,mt8183-imgsys", },
+ {}
+};
+
+static struct platform_driver clk_mt8183_img_drv = {
+ .probe = clk_mt8183_img_probe,
+ .driver = {
+ .name = "clk-mt8183-img",
+ .of_match_table = of_match_clk_mt8183_img,
+ },
+};
+
+builtin_platform_driver(clk_mt8183_img_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu0.c b/drivers/clk/mediatek/clk-mt8183-ipu0.c
new file mode 100644
index 000000000000..c5cb76fc9e5e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-ipu0.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs ipu_core0_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_IPU_CORE0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ipu_core0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate ipu_core0_clks[] = {
+ GATE_IPU_CORE0(CLK_IPU_CORE0_JTAG, "ipu_core0_jtag", "dsp_sel", 0),
+ GATE_IPU_CORE0(CLK_IPU_CORE0_AXI, "ipu_core0_axi", "dsp_sel", 1),
+ GATE_IPU_CORE0(CLK_IPU_CORE0_IPU, "ipu_core0_ipu", "dsp_sel", 2),
+};
+
+static int clk_mt8183_ipu_core0_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_IPU_CORE0_NR_CLK);
+
+ mtk_clk_register_gates(node, ipu_core0_clks, ARRAY_SIZE(ipu_core0_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_ipu_core0[] = {
+ { .compatible = "mediatek,mt8183-ipu_core0", },
+ {}
+};
+
+static struct platform_driver clk_mt8183_ipu_core0_drv = {
+ .probe = clk_mt8183_ipu_core0_probe,
+ .driver = {
+ .name = "clk-mt8183-ipu_core0",
+ .of_match_table = of_match_clk_mt8183_ipu_core0,
+ },
+};
+
+builtin_platform_driver(clk_mt8183_ipu_core0_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu1.c b/drivers/clk/mediatek/clk-mt8183-ipu1.c
new file mode 100644
index 000000000000..8fd5fe002890
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-ipu1.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs ipu_core1_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_IPU_CORE1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ipu_core1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate ipu_core1_clks[] = {
+ GATE_IPU_CORE1(CLK_IPU_CORE1_JTAG, "ipu_core1_jtag", "dsp_sel", 0),
+ GATE_IPU_CORE1(CLK_IPU_CORE1_AXI, "ipu_core1_axi", "dsp_sel", 1),
+ GATE_IPU_CORE1(CLK_IPU_CORE1_IPU, "ipu_core1_ipu", "dsp_sel", 2),
+};
+
+static int clk_mt8183_ipu_core1_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_IPU_CORE1_NR_CLK);
+
+ mtk_clk_register_gates(node, ipu_core1_clks, ARRAY_SIZE(ipu_core1_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_ipu_core1[] = {
+ { .compatible = "mediatek,mt8183-ipu_core1", },
+ {}
+};
+
+static struct platform_driver clk_mt8183_ipu_core1_drv = {
+ .probe = clk_mt8183_ipu_core1_probe,
+ .driver = {
+ .name = "clk-mt8183-ipu_core1",
+ .of_match_table = of_match_clk_mt8183_ipu_core1,
+ },
+};
+
+builtin_platform_driver(clk_mt8183_ipu_core1_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_adl.c b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
new file mode 100644
index 000000000000..3f37d0ef1df1
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs ipu_adl_cg_regs = {
+ .set_ofs = 0x204,
+ .clr_ofs = 0x204,
+ .sta_ofs = 0x204,
+};
+
+#define GATE_IPU_ADL_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ipu_adl_cg_regs, _shift, \
+ &mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate ipu_adl_clks[] = {
+ GATE_IPU_ADL_I(CLK_IPU_ADL_CABGEN, "ipu_adl_cabgen", "dsp_sel", 24),
+};
+
+static int clk_mt8183_ipu_adl_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_IPU_ADL_NR_CLK);
+
+ mtk_clk_register_gates(node, ipu_adl_clks, ARRAY_SIZE(ipu_adl_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_ipu_adl[] = {
+ { .compatible = "mediatek,mt8183-ipu_adl", },
+ {}
+};
+
+static struct platform_driver clk_mt8183_ipu_adl_drv = {
+ .probe = clk_mt8183_ipu_adl_probe,
+ .driver = {
+ .name = "clk-mt8183-ipu_adl",
+ .of_match_table = of_match_clk_mt8183_ipu_adl,
+ },
+};
+
+builtin_platform_driver(clk_mt8183_ipu_adl_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_conn.c b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
new file mode 100644
index 000000000000..7e0eef79c461
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs ipu_conn_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs ipu_conn_apb_cg_regs = {
+ .set_ofs = 0x10,
+ .clr_ofs = 0x10,
+ .sta_ofs = 0x10,
+};
+
+static const struct mtk_gate_regs ipu_conn_axi_cg_regs = {
+ .set_ofs = 0x18,
+ .clr_ofs = 0x18,
+ .sta_ofs = 0x18,
+};
+
+static const struct mtk_gate_regs ipu_conn_axi1_cg_regs = {
+ .set_ofs = 0x1c,
+ .clr_ofs = 0x1c,
+ .sta_ofs = 0x1c,
+};
+
+static const struct mtk_gate_regs ipu_conn_axi2_cg_regs = {
+ .set_ofs = 0x20,
+ .clr_ofs = 0x20,
+ .sta_ofs = 0x20,
+};
+
+#define GATE_IPU_CONN(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ipu_conn_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+#define GATE_IPU_CONN_APB(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ipu_conn_apb_cg_regs, _shift, \
+ &mtk_clk_gate_ops_no_setclr)
+
+#define GATE_IPU_CONN_AXI_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ipu_conn_axi_cg_regs, _shift, \
+ &mtk_clk_gate_ops_no_setclr_inv)
+
+#define GATE_IPU_CONN_AXI1_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ipu_conn_axi1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_no_setclr_inv)
+
+#define GATE_IPU_CONN_AXI2_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ipu_conn_axi2_cg_regs, _shift, \
+ &mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate ipu_conn_clks[] = {
+ GATE_IPU_CONN(CLK_IPU_CONN_IPU,
+ "ipu_conn_ipu", "dsp_sel", 0),
+ GATE_IPU_CONN(CLK_IPU_CONN_AHB,
+ "ipu_conn_ahb", "dsp_sel", 1),
+ GATE_IPU_CONN(CLK_IPU_CONN_AXI,
+ "ipu_conn_axi", "dsp_sel", 2),
+ GATE_IPU_CONN(CLK_IPU_CONN_ISP,
+ "ipu_conn_isp", "dsp_sel", 3),
+ GATE_IPU_CONN(CLK_IPU_CONN_CAM_ADL,
+ "ipu_conn_cam_adl", "dsp_sel", 4),
+ GATE_IPU_CONN(CLK_IPU_CONN_IMG_ADL,
+ "ipu_conn_img_adl", "dsp_sel", 5),
+ GATE_IPU_CONN_APB(CLK_IPU_CONN_DAP_RX,
+ "ipu_conn_dap_rx", "dsp1_sel", 0),
+ GATE_IPU_CONN_APB(CLK_IPU_CONN_APB2AXI,
+ "ipu_conn_apb2axi", "dsp1_sel", 3),
+ GATE_IPU_CONN_APB(CLK_IPU_CONN_APB2AHB,
+ "ipu_conn_apb2ahb", "dsp1_sel", 20),
+ GATE_IPU_CONN_AXI_I(CLK_IPU_CONN_IPU_CAB1TO2,
+ "ipu_conn_ipu_cab1to2", "dsp1_sel", 6),
+ GATE_IPU_CONN_AXI_I(CLK_IPU_CONN_IPU1_CAB1TO2,
+ "ipu_conn_ipu1_cab1to2", "dsp1_sel", 13),
+ GATE_IPU_CONN_AXI_I(CLK_IPU_CONN_IPU2_CAB1TO2,
+ "ipu_conn_ipu2_cab1to2", "dsp1_sel", 20),
+ GATE_IPU_CONN_AXI1_I(CLK_IPU_CONN_CAB3TO3,
+ "ipu_conn_cab3to3", "dsp1_sel", 0),
+ GATE_IPU_CONN_AXI2_I(CLK_IPU_CONN_CAB2TO1,
+ "ipu_conn_cab2to1", "dsp1_sel", 14),
+ GATE_IPU_CONN_AXI2_I(CLK_IPU_CONN_CAB3TO1_SLICE,
+ "ipu_conn_cab3to1_slice", "dsp1_sel", 17),
+};
+
+static int clk_mt8183_ipu_conn_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_IPU_CONN_NR_CLK);
+
+ mtk_clk_register_gates(node, ipu_conn_clks, ARRAY_SIZE(ipu_conn_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_ipu_conn[] = {
+ { .compatible = "mediatek,mt8183-ipu_conn", },
+ {}
+};
+
+static struct platform_driver clk_mt8183_ipu_conn_drv = {
+ .probe = clk_mt8183_ipu_conn_probe,
+ .driver = {
+ .name = "clk-mt8183-ipu_conn",
+ .of_match_table = of_match_clk_mt8183_ipu_conn,
+ },
+};
+
+builtin_platform_driver(clk_mt8183_ipu_conn_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
new file mode 100644
index 000000000000..99a6b020833e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs mfg_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_MFG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate mfg_clks[] = {
+ GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "mfg_sel", 0)
+};
+
+static int clk_mt8183_mfg_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_MFG_NR_CLK);
+
+ mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_mfg[] = {
+ { .compatible = "mediatek,mt8183-mfgcfg", },
+ {}
+};
+
+static struct platform_driver clk_mt8183_mfg_drv = {
+ .probe = clk_mt8183_mfg_probe,
+ .driver = {
+ .name = "clk-mt8183-mfg",
+ .of_match_table = of_match_clk_mt8183_mfg,
+ },
+};
+
+builtin_platform_driver(clk_mt8183_mfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-mm.c b/drivers/clk/mediatek/clk-mt8183-mm.c
new file mode 100644
index 000000000000..720c696b506d
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-mm.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs mm0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+#define GATE_MM0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+#define GATE_MM1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate mm_clks[] = {
+ /* MM0 */
+ GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 0),
+ GATE_MM0(CLK_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 1),
+ GATE_MM0(CLK_MM_SMI_LARB1, "mm_smi_larb1", "mm_sel", 2),
+ GATE_MM0(CLK_MM_GALS_COMM0, "mm_gals_comm0", "mm_sel", 3),
+ GATE_MM0(CLK_MM_GALS_COMM1, "mm_gals_comm1", "mm_sel", 4),
+ GATE_MM0(CLK_MM_GALS_CCU2MM, "mm_gals_ccu2mm", "mm_sel", 5),
+ GATE_MM0(CLK_MM_GALS_IPU12MM, "mm_gals_ipu12mm", "mm_sel", 6),
+ GATE_MM0(CLK_MM_GALS_IMG2MM, "mm_gals_img2mm", "mm_sel", 7),
+ GATE_MM0(CLK_MM_GALS_CAM2MM, "mm_gals_cam2mm", "mm_sel", 8),
+ GATE_MM0(CLK_MM_GALS_IPU2MM, "mm_gals_ipu2mm", "mm_sel", 9),
+ GATE_MM0(CLK_MM_MDP_DL_TXCK, "mm_mdp_dl_txck", "mm_sel", 10),
+ GATE_MM0(CLK_MM_IPU_DL_TXCK, "mm_ipu_dl_txck", "mm_sel", 11),
+ GATE_MM0(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "mm_sel", 12),
+ GATE_MM0(CLK_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 13),
+ GATE_MM0(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "mm_sel", 14),
+ GATE_MM0(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "mm_sel", 15),
+ GATE_MM0(CLK_MM_MDP_TDSHP, "mm_mdp_tdshp", "mm_sel", 16),
+ GATE_MM0(CLK_MM_MDP_WROT0, "mm_mdp_wrot0", "mm_sel", 17),
+ GATE_MM0(CLK_MM_MDP_WDMA0, "mm_mdp_wdma0", "mm_sel", 18),
+ GATE_MM0(CLK_MM_FAKE_ENG, "mm_fake_eng", "mm_sel", 19),
+ GATE_MM0(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "mm_sel", 20),
+ GATE_MM0(CLK_MM_DISP_OVL0_2L, "mm_disp_ovl0_2l", "mm_sel", 21),
+ GATE_MM0(CLK_MM_DISP_OVL1_2L, "mm_disp_ovl1_2l", "mm_sel", 22),
+ GATE_MM0(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "mm_sel", 23),
+ GATE_MM0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 24),
+ GATE_MM0(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "mm_sel", 25),
+ GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "mm_sel", 26),
+ GATE_MM0(CLK_MM_DISP_CCORR0, "mm_disp_ccorr0", "mm_sel", 27),
+ GATE_MM0(CLK_MM_DISP_AAL0, "mm_disp_aal0", "mm_sel", 28),
+ GATE_MM0(CLK_MM_DISP_GAMMA0, "mm_disp_gamma0", "mm_sel", 29),
+ GATE_MM0(CLK_MM_DISP_DITHER0, "mm_disp_dither0", "mm_sel", 30),
+ GATE_MM0(CLK_MM_DISP_SPLIT, "mm_disp_split", "mm_sel", 31),
+ /* MM1 */
+ GATE_MM1(CLK_MM_DSI0_MM, "mm_dsi0_mm", "mm_sel", 0),
+ GATE_MM1(CLK_MM_DSI0_IF, "mm_dsi0_if", "mm_sel", 1),
+ GATE_MM1(CLK_MM_DPI_MM, "mm_dpi_mm", "mm_sel", 2),
+ GATE_MM1(CLK_MM_DPI_IF, "mm_dpi_if", "dpi0_sel", 3),
+ GATE_MM1(CLK_MM_FAKE_ENG2, "mm_fake_eng2", "mm_sel", 4),
+ GATE_MM1(CLK_MM_MDP_DL_RX, "mm_mdp_dl_rx", "mm_sel", 5),
+ GATE_MM1(CLK_MM_IPU_DL_RX, "mm_ipu_dl_rx", "mm_sel", 6),
+ GATE_MM1(CLK_MM_26M, "mm_26m", "f_f26m_ck", 7),
+ GATE_MM1(CLK_MM_MMSYS_R2Y, "mm_mmsys_r2y", "mm_sel", 8),
+ GATE_MM1(CLK_MM_DISP_RSZ, "mm_disp_rsz", "mm_sel", 9),
+ GATE_MM1(CLK_MM_MDP_AAL, "mm_mdp_aal", "mm_sel", 10),
+ GATE_MM1(CLK_MM_MDP_CCORR, "mm_mdp_ccorr", "mm_sel", 11),
+ GATE_MM1(CLK_MM_DBI_MM, "mm_dbi_mm", "mm_sel", 12),
+ GATE_MM1(CLK_MM_DBI_IF, "mm_dbi_if", "dpi0_sel", 13),
+};
+
+static int clk_mt8183_mm_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
+
+ mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_mm[] = {
+ { .compatible = "mediatek,mt8183-mmsys", },
+ {}
+};
+
+static struct platform_driver clk_mt8183_mm_drv = {
+ .probe = clk_mt8183_mm_probe,
+ .driver = {
+ .name = "clk-mt8183-mm",
+ .of_match_table = of_match_clk_mt8183_mm,
+ },
+};
+
+builtin_platform_driver(clk_mt8183_mm_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-vdec.c b/drivers/clk/mediatek/clk-mt8183-vdec.c
new file mode 100644
index 000000000000..6250fd1e0edc
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-vdec.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs vdec0_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x4,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs vdec1_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0xc,
+ .sta_ofs = 0x8,
+};
+
+#define GATE_VDEC0_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr_inv)
+
+#define GATE_VDEC1_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate vdec_clks[] = {
+ /* VDEC0 */
+ GATE_VDEC0_I(CLK_VDEC_VDEC, "vdec_vdec", "mm_sel", 0),
+ /* VDEC1 */
+ GATE_VDEC1_I(CLK_VDEC_LARB1, "vdec_larb1", "mm_sel", 0),
+};
+
+static int clk_mt8183_vdec_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK);
+
+ mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_vdec[] = {
+ { .compatible = "mediatek,mt8183-vdecsys", },
+ {}
+};
+
+static struct platform_driver clk_mt8183_vdec_drv = {
+ .probe = clk_mt8183_vdec_probe,
+ .driver = {
+ .name = "clk-mt8183-vdec",
+ .of_match_table = of_match_clk_mt8183_vdec,
+ },
+};
+
+builtin_platform_driver(clk_mt8183_vdec_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183-venc.c b/drivers/clk/mediatek/clk-mt8183-venc.c
new file mode 100644
index 000000000000..6678ef03fab2
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183-venc.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static const struct mtk_gate_regs venc_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_VENC_I(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate venc_clks[] = {
+ GATE_VENC_I(CLK_VENC_LARB, "venc_larb",
+ "mm_sel", 0),
+ GATE_VENC_I(CLK_VENC_VENC, "venc_venc",
+ "mm_sel", 4),
+ GATE_VENC_I(CLK_VENC_JPGENC, "venc_jpgenc",
+ "mm_sel", 8),
+};
+
+static int clk_mt8183_venc_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK);
+
+ mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183_venc[] = {
+ { .compatible = "mediatek,mt8183-vencsys", },
+ {}
+};
+
+static struct platform_driver clk_mt8183_venc_drv = {
+ .probe = clk_mt8183_venc_probe,
+ .driver = {
+ .name = "clk-mt8183-venc",
+ .of_match_table = of_match_clk_mt8183_venc,
+ },
+};
+
+builtin_platform_driver(clk_mt8183_venc_drv);
diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
new file mode 100644
index 000000000000..9d8651033ae9
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8183.c
@@ -0,0 +1,1284 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2018 MediaTek Inc.
+// Author: Weiyi Lu <weiyi.lu@mediatek.com>
+
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8183-clk.h>
+
+static DEFINE_SPINLOCK(mt8183_clk_lock);
+
+static const struct mtk_fixed_clk top_fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_CLK26M, "f_f26m_ck", "clk26m", 26000000),
+ FIXED_CLK(CLK_TOP_ULPOSC, "osc", NULL, 250000),
+ FIXED_CLK(CLK_TOP_UNIVP_192M, "univpll_192m", "univpll", 192000000),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1,
+ 2),
+ FACTOR(CLK_TOP_F26M_CK_D2, "csw_f26m_ck_d2", "clk26m", 1,
+ 2),
+ FACTOR(CLK_TOP_SYSPLL_CK, "syspll_ck", "mainpll", 1,
+ 1),
+ FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "syspll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_SYSPLL_D2_D2, "syspll_d2_d2", "syspll_d2", 1,
+ 2),
+ FACTOR(CLK_TOP_SYSPLL_D2_D4, "syspll_d2_d4", "syspll_d2", 1,
+ 4),
+ FACTOR(CLK_TOP_SYSPLL_D2_D8, "syspll_d2_d8", "syspll_d2", 1,
+ 8),
+ FACTOR(CLK_TOP_SYSPLL_D2_D16, "syspll_d2_d16", "syspll_d2", 1,
+ 16),
+ FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "mainpll", 1,
+ 3),
+ FACTOR(CLK_TOP_SYSPLL_D3_D2, "syspll_d3_d2", "syspll_d3", 1,
+ 2),
+ FACTOR(CLK_TOP_SYSPLL_D3_D4, "syspll_d3_d4", "syspll_d3", 1,
+ 4),
+ FACTOR(CLK_TOP_SYSPLL_D3_D8, "syspll_d3_d8", "syspll_d3", 1,
+ 8),
+ FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "mainpll", 1,
+ 5),
+ FACTOR(CLK_TOP_SYSPLL_D5_D2, "syspll_d5_d2", "syspll_d5", 1,
+ 2),
+ FACTOR(CLK_TOP_SYSPLL_D5_D4, "syspll_d5_d4", "syspll_d5", 1,
+ 4),
+ FACTOR(CLK_TOP_SYSPLL_D7, "syspll_d7", "mainpll", 1,
+ 7),
+ FACTOR(CLK_TOP_SYSPLL_D7_D2, "syspll_d7_d2", "syspll_d7", 1,
+ 2),
+ FACTOR(CLK_TOP_SYSPLL_D7_D4, "syspll_d7_d4", "syspll_d7", 1,
+ 4),
+ FACTOR(CLK_TOP_UNIVPLL_CK, "univpll_ck", "univpll", 1,
+ 1),
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_UNIVPLL_D2_D2, "univpll_d2_d2", "univpll_d2", 1,
+ 2),
+ FACTOR(CLK_TOP_UNIVPLL_D2_D4, "univpll_d2_d4", "univpll_d2", 1,
+ 4),
+ FACTOR(CLK_TOP_UNIVPLL_D2_D8, "univpll_d2_d8", "univpll_d2", 1,
+ 8),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1,
+ 3),
+ FACTOR(CLK_TOP_UNIVPLL_D3_D2, "univpll_d3_d2", "univpll_d3", 1,
+ 2),
+ FACTOR(CLK_TOP_UNIVPLL_D3_D4, "univpll_d3_d4", "univpll_d3", 1,
+ 4),
+ FACTOR(CLK_TOP_UNIVPLL_D3_D8, "univpll_d3_d8", "univpll_d3", 1,
+ 8),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1,
+ 5),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D2, "univpll_d5_d2", "univpll_d5", 1,
+ 2),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D4, "univpll_d5_d4", "univpll_d5", 1,
+ 4),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D8, "univpll_d5_d8", "univpll_d5", 1,
+ 8),
+ FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll", 1,
+ 7),
+ FACTOR(CLK_TOP_UNIVP_192M_CK, "univ_192m_ck", "univpll_192m", 1,
+ 1),
+ FACTOR(CLK_TOP_UNIVP_192M_D2, "univ_192m_d2", "univ_192m_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_UNIVP_192M_D4, "univ_192m_d4", "univ_192m_ck", 1,
+ 4),
+ FACTOR(CLK_TOP_UNIVP_192M_D8, "univ_192m_d8", "univ_192m_ck", 1,
+ 8),
+ FACTOR(CLK_TOP_UNIVP_192M_D16, "univ_192m_d16", "univ_192m_ck", 1,
+ 16),
+ FACTOR(CLK_TOP_UNIVP_192M_D32, "univ_192m_d32", "univ_192m_ck", 1,
+ 32),
+ FACTOR(CLK_TOP_APLL1_CK, "apll1_ck", "apll1", 1,
+ 1),
+ FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1", 1,
+ 2),
+ FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "apll1", 1,
+ 4),
+ FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "apll1", 1,
+ 8),
+ FACTOR(CLK_TOP_APLL2_CK, "apll2_ck", "apll2", 1,
+ 1),
+ FACTOR(CLK_TOP_APLL2_D2, "apll2_d2", "apll2", 1,
+ 2),
+ FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "apll2", 1,
+ 4),
+ FACTOR(CLK_TOP_APLL2_D8, "apll2_d8", "apll2", 1,
+ 8),
+ FACTOR(CLK_TOP_TVDPLL_CK, "tvdpll_ck", "tvdpll", 1,
+ 1),
+ FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll_ck", 1,
+ 2),
+ FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll", 1,
+ 4),
+ FACTOR(CLK_TOP_TVDPLL_D8, "tvdpll_d8", "tvdpll", 1,
+ 8),
+ FACTOR(CLK_TOP_TVDPLL_D16, "tvdpll_d16", "tvdpll", 1,
+ 16),
+ FACTOR(CLK_TOP_MMPLL_CK, "mmpll_ck", "mmpll", 1,
+ 1),
+ FACTOR(CLK_TOP_MMPLL_D4, "mmpll_d4", "mmpll", 1,
+ 4),
+ FACTOR(CLK_TOP_MMPLL_D4_D2, "mmpll_d4_d2", "mmpll_d4", 1,
+ 2),
+ FACTOR(CLK_TOP_MMPLL_D4_D4, "mmpll_d4_d4", "mmpll_d4", 1,
+ 4),
+ FACTOR(CLK_TOP_MMPLL_D5, "mmpll_d5", "mmpll", 1,
+ 5),
+ FACTOR(CLK_TOP_MMPLL_D5_D2, "mmpll_d5_d2", "mmpll_d5", 1,
+ 2),
+ FACTOR(CLK_TOP_MMPLL_D5_D4, "mmpll_d5_d4", "mmpll_d5", 1,
+ 4),
+ FACTOR(CLK_TOP_MMPLL_D6, "mmpll_d6", "mmpll", 1,
+ 6),
+ FACTOR(CLK_TOP_MMPLL_D7, "mmpll_d7", "mmpll", 1,
+ 7),
+ FACTOR(CLK_TOP_MFGPLL_CK, "mfgpll_ck", "mfgpll", 1,
+ 1),
+ FACTOR(CLK_TOP_MSDCPLL_CK, "msdcpll_ck", "msdcpll", 1,
+ 1),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1,
+ 2),
+ FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll", 1,
+ 4),
+ FACTOR(CLK_TOP_MSDCPLL_D8, "msdcpll_d8", "msdcpll", 1,
+ 8),
+ FACTOR(CLK_TOP_MSDCPLL_D16, "msdcpll_d16", "msdcpll", 1,
+ 16),
+ FACTOR(CLK_TOP_AD_OSC_CK, "ad_osc_ck", "osc", 1,
+ 1),
+ FACTOR(CLK_TOP_OSC_D2, "osc_d2", "osc", 1,
+ 2),
+ FACTOR(CLK_TOP_OSC_D4, "osc_d4", "osc", 1,
+ 4),
+ FACTOR(CLK_TOP_OSC_D8, "osc_d8", "osc", 1,
+ 8),
+ FACTOR(CLK_TOP_OSC_D16, "osc_d16", "osc", 1,
+ 16),
+ FACTOR(CLK_TOP_UNIVPLL, "univpll", "univ2pll", 1,
+ 2),
+ FACTOR(CLK_TOP_UNIVPLL_D3_D16, "univpll_d3_d16", "univpll_d3", 1,
+ 16),
+};
+
+static const char * const axi_parents[] = {
+ "clk26m",
+ "syspll_d2_d4",
+ "syspll_d7",
+ "osc_d4"
+};
+
+static const char * const mm_parents[] = {
+ "clk26m",
+ "mmpll_d7",
+ "syspll_d3",
+ "univpll_d2_d2",
+ "syspll_d2_d2",
+ "syspll_d3_d2"
+};
+
+static const char * const img_parents[] = {
+ "clk26m",
+ "mmpll_d6",
+ "univpll_d3",
+ "syspll_d3",
+ "univpll_d2_d2",
+ "syspll_d2_d2",
+ "univpll_d3_d2",
+ "syspll_d3_d2"
+};
+
+static const char * const cam_parents[] = {
+ "clk26m",
+ "syspll_d2",
+ "mmpll_d6",
+ "syspll_d3",
+ "mmpll_d7",
+ "univpll_d3",
+ "univpll_d2_d2",
+ "syspll_d2_d2",
+ "syspll_d3_d2",
+ "univpll_d3_d2"
+};
+
+static const char * const dsp_parents[] = {
+ "clk26m",
+ "mmpll_d6",
+ "mmpll_d7",
+ "univpll_d3",
+ "syspll_d3",
+ "univpll_d2_d2",
+ "syspll_d2_d2",
+ "univpll_d3_d2",
+ "syspll_d3_d2"
+};
+
+static const char * const dsp1_parents[] = {
+ "clk26m",
+ "mmpll_d6",
+ "mmpll_d7",
+ "univpll_d3",
+ "syspll_d3",
+ "univpll_d2_d2",
+ "syspll_d2_d2",
+ "univpll_d3_d2",
+ "syspll_d3_d2"
+};
+
+static const char * const dsp2_parents[] = {
+ "clk26m",
+ "mmpll_d6",
+ "mmpll_d7",
+ "univpll_d3",
+ "syspll_d3",
+ "univpll_d2_d2",
+ "syspll_d2_d2",
+ "univpll_d3_d2",
+ "syspll_d3_d2"
+};
+
+static const char * const ipu_if_parents[] = {
+ "clk26m",
+ "mmpll_d6",
+ "mmpll_d7",
+ "univpll_d3",
+ "syspll_d3",
+ "univpll_d2_d2",
+ "syspll_d2_d2",
+ "univpll_d3_d2",
+ "syspll_d3_d2"
+};
+
+static const char * const mfg_parents[] = {
+ "clk26m",
+ "mfgpll_ck",
+ "univpll_d3",
+ "syspll_d3"
+};
+
+static const char * const f52m_mfg_parents[] = {
+ "clk26m",
+ "univpll_d3_d2",
+ "univpll_d3_d4",
+ "univpll_d3_d8"
+};
+
+static const char * const camtg_parents[] = {
+ "clk26m",
+ "univ_192m_d8",
+ "univpll_d3_d8",
+ "univ_192m_d4",
+ "univpll_d3_d16",
+ "csw_f26m_ck_d2",
+ "univ_192m_d16",
+ "univ_192m_d32"
+};
+
+static const char * const camtg2_parents[] = {
+ "clk26m",
+ "univ_192m_d8",
+ "univpll_d3_d8",
+ "univ_192m_d4",
+ "univpll_d3_d16",
+ "csw_f26m_ck_d2",
+ "univ_192m_d16",
+ "univ_192m_d32"
+};
+
+static const char * const camtg3_parents[] = {
+ "clk26m",
+ "univ_192m_d8",
+ "univpll_d3_d8",
+ "univ_192m_d4",
+ "univpll_d3_d16",
+ "csw_f26m_ck_d2",
+ "univ_192m_d16",
+ "univ_192m_d32"
+};
+
+static const char * const camtg4_parents[] = {
+ "clk26m",
+ "univ_192m_d8",
+ "univpll_d3_d8",
+ "univ_192m_d4",
+ "univpll_d3_d16",
+ "csw_f26m_ck_d2",
+ "univ_192m_d16",
+ "univ_192m_d32"
+};
+
+static const char * const uart_parents[] = {
+ "clk26m",
+ "univpll_d3_d8"
+};
+
+static const char * const spi_parents[] = {
+ "clk26m",
+ "syspll_d5_d2",
+ "syspll_d3_d4",
+ "msdcpll_d4"
+};
+
+static const char * const msdc50_hclk_parents[] = {
+ "clk26m",
+ "syspll_d2_d2",
+ "syspll_d3_d2"
+};
+
+static const char * const msdc50_0_parents[] = {
+ "clk26m",
+ "msdcpll_ck",
+ "msdcpll_d2",
+ "univpll_d2_d4",
+ "syspll_d3_d2",
+ "univpll_d2_d2"
+};
+
+static const char * const msdc30_1_parents[] = {
+ "clk26m",
+ "univpll_d3_d2",
+ "syspll_d3_d2",
+ "syspll_d7",
+ "msdcpll_d2"
+};
+
+static const char * const msdc30_2_parents[] = {
+ "clk26m",
+ "univpll_d3_d2",
+ "syspll_d3_d2",
+ "syspll_d7",
+ "msdcpll_d2"
+};
+
+static const char * const audio_parents[] = {
+ "clk26m",
+ "syspll_d5_d4",
+ "syspll_d7_d4",
+ "syspll_d2_d16"
+};
+
+static const char * const aud_intbus_parents[] = {
+ "clk26m",
+ "syspll_d2_d4",
+ "syspll_d7_d2"
+};
+
+static const char * const pmicspi_parents[] = {
+ "clk26m",
+ "syspll_d2_d8",
+ "osc_d8"
+};
+
+static const char * const fpwrap_ulposc_parents[] = {
+ "clk26m",
+ "osc_d16",
+ "osc_d4",
+ "osc_d8"
+};
+
+static const char * const atb_parents[] = {
+ "clk26m",
+ "syspll_d2_d2",
+ "syspll_d5"
+};
+
+static const char * const sspm_parents[] = {
+ "clk26m",
+ "univpll_d2_d4",
+ "syspll_d2_d2",
+ "univpll_d2_d2",
+ "syspll_d3"
+};
+
+static const char * const dpi0_parents[] = {
+ "clk26m",
+ "tvdpll_d2",
+ "tvdpll_d4",
+ "tvdpll_d8",
+ "tvdpll_d16",
+ "univpll_d5_d2",
+ "univpll_d3_d4",
+ "syspll_d3_d4",
+ "univpll_d3_d8"
+};
+
+static const char * const scam_parents[] = {
+ "clk26m",
+ "syspll_d5_d2"
+};
+
+static const char * const disppwm_parents[] = {
+ "clk26m",
+ "univpll_d3_d4",
+ "osc_d2",
+ "osc_d4",
+ "osc_d16"
+};
+
+static const char * const usb_top_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "univpll_d3_d4",
+ "univpll_d5_d2"
+};
+
+
+static const char * const ssusb_top_xhci_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "univpll_d3_d4",
+ "univpll_d5_d2"
+};
+
+static const char * const spm_parents[] = {
+ "clk26m",
+ "syspll_d2_d8"
+};
+
+static const char * const i2c_parents[] = {
+ "clk26m",
+ "syspll_d2_d8",
+ "univpll_d5_d2"
+};
+
+static const char * const scp_parents[] = {
+ "clk26m",
+ "univpll_d2_d8",
+ "syspll_d5",
+ "syspll_d2_d2",
+ "univpll_d2_d2",
+ "syspll_d3",
+ "univpll_d3"
+};
+
+static const char * const seninf_parents[] = {
+ "clk26m",
+ "univpll_d2_d2",
+ "univpll_d3_d2",
+ "univpll_d2_d4"
+};
+
+static const char * const dxcc_parents[] = {
+ "clk26m",
+ "syspll_d2_d2",
+ "syspll_d2_d4",
+ "syspll_d2_d8"
+};
+
+static const char * const aud_engen1_parents[] = {
+ "clk26m",
+ "apll1_d2",
+ "apll1_d4",
+ "apll1_d8"
+};
+
+static const char * const aud_engen2_parents[] = {
+ "clk26m",
+ "apll2_d2",
+ "apll2_d4",
+ "apll2_d8"
+};
+
+static const char * const faes_ufsfde_parents[] = {
+ "clk26m",
+ "syspll_d2",
+ "syspll_d2_d2",
+ "syspll_d3",
+ "syspll_d2_d4",
+ "univpll_d3"
+};
+
+static const char * const fufs_parents[] = {
+ "clk26m",
+ "syspll_d2_d4",
+ "syspll_d2_d8",
+ "syspll_d2_d16"
+};
+
+static const char * const aud_1_parents[] = {
+ "clk26m",
+ "apll1_ck"
+};
+
+static const char * const aud_2_parents[] = {
+ "clk26m",
+ "apll2_ck"
+};
+
+/*
+ * CRITICAL CLOCK:
+ * axi_sel is the main bus clock of whole SOC.
+ * spm_sel is the clock of the always-on co-processor.
+ */
+static const struct mtk_mux top_muxes[] = {
+ /* CLK_CFG_0 */
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_MUX_AXI, "axi_sel",
+ axi_parents, 0x40,
+ 0x44, 0x48, 0, 2, 7, 0x004, 0, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MM, "mm_sel",
+ mm_parents, 0x40,
+ 0x44, 0x48, 8, 3, 15, 0x004, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_IMG, "img_sel",
+ img_parents, 0x40,
+ 0x44, 0x48, 16, 3, 23, 0x004, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAM, "cam_sel",
+ cam_parents, 0x40,
+ 0x44, 0x48, 24, 4, 31, 0x004, 3),
+ /* CLK_CFG_1 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DSP, "dsp_sel",
+ dsp_parents, 0x50,
+ 0x54, 0x58, 0, 4, 7, 0x004, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DSP1, "dsp1_sel",
+ dsp1_parents, 0x50,
+ 0x54, 0x58, 8, 4, 15, 0x004, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DSP2, "dsp2_sel",
+ dsp2_parents, 0x50,
+ 0x54, 0x58, 16, 4, 23, 0x004, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_IPU_IF, "ipu_if_sel",
+ ipu_if_parents, 0x50,
+ 0x54, 0x58, 24, 4, 31, 0x004, 7),
+ /* CLK_CFG_2 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MFG, "mfg_sel",
+ mfg_parents, 0x60,
+ 0x64, 0x68, 0, 2, 7, 0x004, 8),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_F52M_MFG, "f52m_mfg_sel",
+ f52m_mfg_parents, 0x60,
+ 0x64, 0x68, 8, 2, 15, 0x004, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAMTG, "camtg_sel",
+ camtg_parents, 0x60,
+ 0x64, 0x68, 16, 3, 23, 0x004, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAMTG2, "camtg2_sel",
+ camtg2_parents, 0x60,
+ 0x64, 0x68, 24, 3, 31, 0x004, 11),
+ /* CLK_CFG_3 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAMTG3, "camtg3_sel",
+ camtg3_parents, 0x70,
+ 0x74, 0x78, 0, 3, 7, 0x004, 12),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_CAMTG4, "camtg4_sel",
+ camtg4_parents, 0x70,
+ 0x74, 0x78, 8, 3, 15, 0x004, 13),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_UART, "uart_sel",
+ uart_parents, 0x70,
+ 0x74, 0x78, 16, 1, 23, 0x004, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SPI, "spi_sel",
+ spi_parents, 0x70,
+ 0x74, 0x78, 24, 2, 31, 0x004, 15),
+ /* CLK_CFG_4 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MSDC50_0_HCLK, "msdc50_hclk_sel",
+ msdc50_hclk_parents, 0x80,
+ 0x84, 0x88, 0, 2, 7, 0x004, 16),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MSDC50_0, "msdc50_0_sel",
+ msdc50_0_parents, 0x80,
+ 0x84, 0x88, 8, 3, 15, 0x004, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MSDC30_1, "msdc30_1_sel",
+ msdc30_1_parents, 0x80,
+ 0x84, 0x88, 16, 3, 23, 0x004, 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_MSDC30_2, "msdc30_2_sel",
+ msdc30_2_parents, 0x80,
+ 0x84, 0x88, 24, 3, 31, 0x004, 19),
+ /* CLK_CFG_5 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUDIO, "audio_sel",
+ audio_parents, 0x90,
+ 0x94, 0x98, 0, 2, 7, 0x004, 20),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_INTBUS, "aud_intbus_sel",
+ aud_intbus_parents, 0x90,
+ 0x94, 0x98, 8, 2, 15, 0x004, 21),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_PMICSPI, "pmicspi_sel",
+ pmicspi_parents, 0x90,
+ 0x94, 0x98, 16, 2, 23, 0x004, 22),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_FPWRAP_ULPOSC, "fpwrap_ulposc_sel",
+ fpwrap_ulposc_parents, 0x90,
+ 0x94, 0x98, 24, 2, 31, 0x004, 23),
+ /* CLK_CFG_6 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_ATB, "atb_sel",
+ atb_parents, 0xa0,
+ 0xa4, 0xa8, 0, 2, 7, 0x004, 24),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SSPM, "sspm_sel",
+ sspm_parents, 0xa0,
+ 0xa4, 0xa8, 8, 3, 15, 0x004, 25),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DPI0, "dpi0_sel",
+ dpi0_parents, 0xa0,
+ 0xa4, 0xa8, 16, 4, 23, 0x004, 26),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SCAM, "scam_sel",
+ scam_parents, 0xa0,
+ 0xa4, 0xa8, 24, 1, 31, 0x004, 27),
+ /* CLK_CFG_7 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DISP_PWM, "disppwm_sel",
+ disppwm_parents, 0xb0,
+ 0xb4, 0xb8, 0, 3, 7, 0x004, 28),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_USB_TOP, "usb_top_sel",
+ usb_top_parents, 0xb0,
+ 0xb4, 0xb8, 8, 2, 15, 0x004, 29),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SSUSB_TOP_XHCI, "ssusb_top_xhci_sel",
+ ssusb_top_xhci_parents, 0xb0,
+ 0xb4, 0xb8, 16, 2, 23, 0x004, 30),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_MUX_SPM, "spm_sel",
+ spm_parents, 0xb0,
+ 0xb4, 0xb8, 24, 1, 31, 0x008, 0, CLK_IS_CRITICAL),
+ /* CLK_CFG_8 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_I2C, "i2c_sel",
+ i2c_parents, 0xc0,
+ 0xc4, 0xc8, 0, 2, 7, 0x008, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SCP, "scp_sel",
+ scp_parents, 0xc0,
+ 0xc4, 0xc8, 8, 3, 15, 0x008, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_SENINF, "seninf_sel",
+ seninf_parents, 0xc0,
+ 0xc4, 0xc8, 16, 2, 23, 0x008, 3),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_DXCC, "dxcc_sel",
+ dxcc_parents, 0xc0,
+ 0xc4, 0xc8, 24, 2, 31, 0x008, 4),
+ /* CLK_CFG_9 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_ENG1, "aud_eng1_sel",
+ aud_engen1_parents, 0xd0,
+ 0xd4, 0xd8, 0, 2, 7, 0x008, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_ENG2, "aud_eng2_sel",
+ aud_engen2_parents, 0xd0,
+ 0xd4, 0xd8, 8, 2, 15, 0x008, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_FAES_UFSFDE, "faes_ufsfde_sel",
+ faes_ufsfde_parents, 0xd0,
+ 0xd4, 0xd8, 16, 3, 23, 0x008, 7),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_FUFS, "fufs_sel",
+ fufs_parents, 0xd0,
+ 0xd4, 0xd8, 24, 2, 31, 0x008, 8),
+ /* CLK_CFG_10 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_1, "aud_1_sel",
+ aud_1_parents, 0xe0,
+ 0xe4, 0xe8, 0, 1, 7, 0x008, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MUX_AUD_2, "aud_2_sel",
+ aud_2_parents, 0xe0,
+ 0xe4, 0xe8, 8, 1, 15, 0x008, 10),
+};
+
+static const char * const apll_i2s0_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+static const char * const apll_i2s1_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+static const char * const apll_i2s2_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+static const char * const apll_i2s3_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+static const char * const apll_i2s4_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+static const char * const apll_i2s5_parents[] = {
+ "aud_1_sel",
+ "aud_2_sel"
+};
+
+static struct mtk_composite top_aud_muxes[] = {
+ MUX(CLK_TOP_MUX_APLL_I2S0, "apll_i2s0_sel", apll_i2s0_parents,
+ 0x320, 8, 1),
+ MUX(CLK_TOP_MUX_APLL_I2S1, "apll_i2s1_sel", apll_i2s1_parents,
+ 0x320, 9, 1),
+ MUX(CLK_TOP_MUX_APLL_I2S2, "apll_i2s2_sel", apll_i2s2_parents,
+ 0x320, 10, 1),
+ MUX(CLK_TOP_MUX_APLL_I2S3, "apll_i2s3_sel", apll_i2s3_parents,
+ 0x320, 11, 1),
+ MUX(CLK_TOP_MUX_APLL_I2S4, "apll_i2s4_sel", apll_i2s4_parents,
+ 0x320, 12, 1),
+ MUX(CLK_TOP_MUX_APLL_I2S5, "apll_i2s5_sel", apll_i2s5_parents,
+ 0x328, 20, 1),
+};
+
+static const char * const mcu_mp0_parents[] = {
+ "clk26m",
+ "armpll_ll",
+ "armpll_div_pll1",
+ "armpll_div_pll2"
+};
+
+static const char * const mcu_mp2_parents[] = {
+ "clk26m",
+ "armpll_l",
+ "armpll_div_pll1",
+ "armpll_div_pll2"
+};
+
+static const char * const mcu_bus_parents[] = {
+ "clk26m",
+ "ccipll",
+ "armpll_div_pll1",
+ "armpll_div_pll2"
+};
+
+static struct mtk_composite mcu_muxes[] = {
+ /* mp0_pll_divider_cfg */
+ MUX(CLK_MCU_MP0_SEL, "mcu_mp0_sel", mcu_mp0_parents, 0x7A0, 9, 2),
+ /* mp2_pll_divider_cfg */
+ MUX(CLK_MCU_MP2_SEL, "mcu_mp2_sel", mcu_mp2_parents, 0x7A8, 9, 2),
+ /* bus_pll_divider_cfg */
+ MUX(CLK_MCU_BUS_SEL, "mcu_bus_sel", mcu_bus_parents, 0x7C0, 9, 2),
+};
+
+static struct mtk_composite top_aud_divs[] = {
+ DIV_GATE(CLK_TOP_APLL12_DIV0, "apll12_div0", "apll_i2s0_sel",
+ 0x320, 2, 0x324, 8, 0),
+ DIV_GATE(CLK_TOP_APLL12_DIV1, "apll12_div1", "apll_i2s1_sel",
+ 0x320, 3, 0x324, 8, 8),
+ DIV_GATE(CLK_TOP_APLL12_DIV2, "apll12_div2", "apll_i2s2_sel",
+ 0x320, 4, 0x324, 8, 16),
+ DIV_GATE(CLK_TOP_APLL12_DIV3, "apll12_div3", "apll_i2s3_sel",
+ 0x320, 5, 0x324, 8, 24),
+ DIV_GATE(CLK_TOP_APLL12_DIV4, "apll12_div4", "apll_i2s4_sel",
+ 0x320, 6, 0x328, 8, 0),
+ DIV_GATE(CLK_TOP_APLL12_DIVB, "apll12_divb", "apll12_div4",
+ 0x320, 7, 0x328, 8, 8),
+};
+
+static const struct mtk_gate_regs top_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x104,
+ .sta_ofs = 0x104,
+};
+
+#define GATE_TOP(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top_cg_regs, _shift, \
+ &mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate top_clks[] = {
+ /* TOP */
+ GATE_TOP(CLK_TOP_ARMPLL_DIV_PLL1, "armpll_div_pll1", "mainpll", 4),
+ GATE_TOP(CLK_TOP_ARMPLL_DIV_PLL2, "armpll_div_pll2", "univpll", 5),
+};
+
+static const struct mtk_gate_regs infra0_cg_regs = {
+ .set_ofs = 0x80,
+ .clr_ofs = 0x84,
+ .sta_ofs = 0x90,
+};
+
+static const struct mtk_gate_regs infra1_cg_regs = {
+ .set_ofs = 0x88,
+ .clr_ofs = 0x8c,
+ .sta_ofs = 0x94,
+};
+
+static const struct mtk_gate_regs infra2_cg_regs = {
+ .set_ofs = 0xa4,
+ .clr_ofs = 0xa8,
+ .sta_ofs = 0xac,
+};
+
+static const struct mtk_gate_regs infra3_cg_regs = {
+ .set_ofs = 0xc0,
+ .clr_ofs = 0xc4,
+ .sta_ofs = 0xc8,
+};
+
+#define GATE_INFRA0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+#define GATE_INFRA1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+#define GATE_INFRA2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra2_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+#define GATE_INFRA3(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra3_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate infra_clks[] = {
+ /* INFRA0 */
+ GATE_INFRA0(CLK_INFRA_PMIC_TMR, "infra_pmic_tmr",
+ "axi_sel", 0),
+ GATE_INFRA0(CLK_INFRA_PMIC_AP, "infra_pmic_ap",
+ "axi_sel", 1),
+ GATE_INFRA0(CLK_INFRA_PMIC_MD, "infra_pmic_md",
+ "axi_sel", 2),
+ GATE_INFRA0(CLK_INFRA_PMIC_CONN, "infra_pmic_conn",
+ "axi_sel", 3),
+ GATE_INFRA0(CLK_INFRA_SCPSYS, "infra_scp",
+ "scp_sel", 4),
+ GATE_INFRA0(CLK_INFRA_SEJ, "infra_sej",
+ "f_f26m_ck", 5),
+ GATE_INFRA0(CLK_INFRA_APXGPT, "infra_apxgpt",
+ "axi_sel", 6),
+ GATE_INFRA0(CLK_INFRA_ICUSB, "infra_icusb",
+ "axi_sel", 8),
+ GATE_INFRA0(CLK_INFRA_GCE, "infra_gce",
+ "axi_sel", 9),
+ GATE_INFRA0(CLK_INFRA_THERM, "infra_therm",
+ "axi_sel", 10),
+ GATE_INFRA0(CLK_INFRA_I2C0, "infra_i2c0",
+ "i2c_sel", 11),
+ GATE_INFRA0(CLK_INFRA_I2C1, "infra_i2c1",
+ "i2c_sel", 12),
+ GATE_INFRA0(CLK_INFRA_I2C2, "infra_i2c2",
+ "i2c_sel", 13),
+ GATE_INFRA0(CLK_INFRA_I2C3, "infra_i2c3",
+ "i2c_sel", 14),
+ GATE_INFRA0(CLK_INFRA_PWM_HCLK, "infra_pwm_hclk",
+ "axi_sel", 15),
+ GATE_INFRA0(CLK_INFRA_PWM1, "infra_pwm1",
+ "i2c_sel", 16),
+ GATE_INFRA0(CLK_INFRA_PWM2, "infra_pwm2",
+ "i2c_sel", 17),
+ GATE_INFRA0(CLK_INFRA_PWM3, "infra_pwm3",
+ "i2c_sel", 18),
+ GATE_INFRA0(CLK_INFRA_PWM4, "infra_pwm4",
+ "i2c_sel", 19),
+ GATE_INFRA0(CLK_INFRA_PWM, "infra_pwm",
+ "i2c_sel", 21),
+ GATE_INFRA0(CLK_INFRA_UART0, "infra_uart0",
+ "uart_sel", 22),
+ GATE_INFRA0(CLK_INFRA_UART1, "infra_uart1",
+ "uart_sel", 23),
+ GATE_INFRA0(CLK_INFRA_UART2, "infra_uart2",
+ "uart_sel", 24),
+ GATE_INFRA0(CLK_INFRA_UART3, "infra_uart3",
+ "uart_sel", 25),
+ GATE_INFRA0(CLK_INFRA_GCE_26M, "infra_gce_26m",
+ "axi_sel", 27),
+ GATE_INFRA0(CLK_INFRA_CQ_DMA_FPC, "infra_cqdma_fpc",
+ "axi_sel", 28),
+ GATE_INFRA0(CLK_INFRA_BTIF, "infra_btif",
+ "axi_sel", 31),
+ /* INFRA1 */
+ GATE_INFRA1(CLK_INFRA_SPI0, "infra_spi0",
+ "spi_sel", 1),
+ GATE_INFRA1(CLK_INFRA_MSDC0, "infra_msdc0",
+ "msdc50_hclk_sel", 2),
+ GATE_INFRA1(CLK_INFRA_MSDC1, "infra_msdc1",
+ "axi_sel", 4),
+ GATE_INFRA1(CLK_INFRA_MSDC2, "infra_msdc2",
+ "axi_sel", 5),
+ GATE_INFRA1(CLK_INFRA_MSDC0_SCK, "infra_msdc0_sck",
+ "msdc50_0_sel", 6),
+ GATE_INFRA1(CLK_INFRA_DVFSRC, "infra_dvfsrc",
+ "f_f26m_ck", 7),
+ GATE_INFRA1(CLK_INFRA_GCPU, "infra_gcpu",
+ "axi_sel", 8),
+ GATE_INFRA1(CLK_INFRA_TRNG, "infra_trng",
+ "axi_sel", 9),
+ GATE_INFRA1(CLK_INFRA_AUXADC, "infra_auxadc",
+ "f_f26m_ck", 10),
+ GATE_INFRA1(CLK_INFRA_CPUM, "infra_cpum",
+ "axi_sel", 11),
+ GATE_INFRA1(CLK_INFRA_CCIF1_AP, "infra_ccif1_ap",
+ "axi_sel", 12),
+ GATE_INFRA1(CLK_INFRA_CCIF1_MD, "infra_ccif1_md",
+ "axi_sel", 13),
+ GATE_INFRA1(CLK_INFRA_AUXADC_MD, "infra_auxadc_md",
+ "f_f26m_ck", 14),
+ GATE_INFRA1(CLK_INFRA_MSDC1_SCK, "infra_msdc1_sck",
+ "msdc30_1_sel", 16),
+ GATE_INFRA1(CLK_INFRA_MSDC2_SCK, "infra_msdc2_sck",
+ "msdc30_2_sel", 17),
+ GATE_INFRA1(CLK_INFRA_AP_DMA, "infra_apdma",
+ "axi_sel", 18),
+ GATE_INFRA1(CLK_INFRA_XIU, "infra_xiu",
+ "axi_sel", 19),
+ GATE_INFRA1(CLK_INFRA_DEVICE_APC, "infra_device_apc",
+ "axi_sel", 20),
+ GATE_INFRA1(CLK_INFRA_CCIF_AP, "infra_ccif_ap",
+ "axi_sel", 23),
+ GATE_INFRA1(CLK_INFRA_DEBUGSYS, "infra_debugsys",
+ "axi_sel", 24),
+ GATE_INFRA1(CLK_INFRA_AUDIO, "infra_audio",
+ "axi_sel", 25),
+ GATE_INFRA1(CLK_INFRA_CCIF_MD, "infra_ccif_md",
+ "axi_sel", 26),
+ GATE_INFRA1(CLK_INFRA_DXCC_SEC_CORE, "infra_dxcc_sec_core",
+ "dxcc_sel", 27),
+ GATE_INFRA1(CLK_INFRA_DXCC_AO, "infra_dxcc_ao",
+ "dxcc_sel", 28),
+ GATE_INFRA1(CLK_INFRA_DEVMPU_BCLK, "infra_devmpu_bclk",
+ "axi_sel", 30),
+ GATE_INFRA1(CLK_INFRA_DRAMC_F26M, "infra_dramc_f26m",
+ "f_f26m_ck", 31),
+ /* INFRA2 */
+ GATE_INFRA2(CLK_INFRA_IRTX, "infra_irtx",
+ "f_f26m_ck", 0),
+ GATE_INFRA2(CLK_INFRA_USB, "infra_usb",
+ "usb_top_sel", 1),
+ GATE_INFRA2(CLK_INFRA_DISP_PWM, "infra_disppwm",
+ "axi_sel", 2),
+ GATE_INFRA2(CLK_INFRA_CLDMA_BCLK, "infra_cldma_bclk",
+ "axi_sel", 3),
+ GATE_INFRA2(CLK_INFRA_AUDIO_26M_BCLK, "infra_audio_26m_bclk",
+ "f_f26m_ck", 4),
+ GATE_INFRA2(CLK_INFRA_SPI1, "infra_spi1",
+ "spi_sel", 6),
+ GATE_INFRA2(CLK_INFRA_I2C4, "infra_i2c4",
+ "i2c_sel", 7),
+ GATE_INFRA2(CLK_INFRA_MODEM_TEMP_SHARE, "infra_md_tmp_share",
+ "f_f26m_ck", 8),
+ GATE_INFRA2(CLK_INFRA_SPI2, "infra_spi2",
+ "spi_sel", 9),
+ GATE_INFRA2(CLK_INFRA_SPI3, "infra_spi3",
+ "spi_sel", 10),
+ GATE_INFRA2(CLK_INFRA_UNIPRO_SCK, "infra_unipro_sck",
+ "ssusb_top_xhci_sel", 11),
+ GATE_INFRA2(CLK_INFRA_UNIPRO_TICK, "infra_unipro_tick",
+ "fufs_sel", 12),
+ GATE_INFRA2(CLK_INFRA_UFS_MP_SAP_BCLK, "infra_ufs_mp_sap_bck",
+ "fufs_sel", 13),
+ GATE_INFRA2(CLK_INFRA_MD32_BCLK, "infra_md32_bclk",
+ "axi_sel", 14),
+ GATE_INFRA2(CLK_INFRA_SSPM, "infra_sspm",
+ "sspm_sel", 15),
+ GATE_INFRA2(CLK_INFRA_UNIPRO_MBIST, "infra_unipro_mbist",
+ "axi_sel", 16),
+ GATE_INFRA2(CLK_INFRA_SSPM_BUS_HCLK, "infra_sspm_bus_hclk",
+ "axi_sel", 17),
+ GATE_INFRA2(CLK_INFRA_I2C5, "infra_i2c5",
+ "i2c_sel", 18),
+ GATE_INFRA2(CLK_INFRA_I2C5_ARBITER, "infra_i2c5_arbiter",
+ "i2c_sel", 19),
+ GATE_INFRA2(CLK_INFRA_I2C5_IMM, "infra_i2c5_imm",
+ "i2c_sel", 20),
+ GATE_INFRA2(CLK_INFRA_I2C1_ARBITER, "infra_i2c1_arbiter",
+ "i2c_sel", 21),
+ GATE_INFRA2(CLK_INFRA_I2C1_IMM, "infra_i2c1_imm",
+ "i2c_sel", 22),
+ GATE_INFRA2(CLK_INFRA_I2C2_ARBITER, "infra_i2c2_arbiter",
+ "i2c_sel", 23),
+ GATE_INFRA2(CLK_INFRA_I2C2_IMM, "infra_i2c2_imm",
+ "i2c_sel", 24),
+ GATE_INFRA2(CLK_INFRA_SPI4, "infra_spi4",
+ "spi_sel", 25),
+ GATE_INFRA2(CLK_INFRA_SPI5, "infra_spi5",
+ "spi_sel", 26),
+ GATE_INFRA2(CLK_INFRA_CQ_DMA, "infra_cqdma",
+ "axi_sel", 27),
+ GATE_INFRA2(CLK_INFRA_UFS, "infra_ufs",
+ "fufs_sel", 28),
+ GATE_INFRA2(CLK_INFRA_AES_UFSFDE, "infra_aes_ufsfde",
+ "faes_ufsfde_sel", 29),
+ GATE_INFRA2(CLK_INFRA_UFS_TICK, "infra_ufs_tick",
+ "fufs_sel", 30),
+ /* INFRA3 */
+ GATE_INFRA3(CLK_INFRA_MSDC0_SELF, "infra_msdc0_self",
+ "msdc50_0_sel", 0),
+ GATE_INFRA3(CLK_INFRA_MSDC1_SELF, "infra_msdc1_self",
+ "msdc50_0_sel", 1),
+ GATE_INFRA3(CLK_INFRA_MSDC2_SELF, "infra_msdc2_self",
+ "msdc50_0_sel", 2),
+ GATE_INFRA3(CLK_INFRA_SSPM_26M_SELF, "infra_sspm_26m_self",
+ "f_f26m_ck", 3),
+ GATE_INFRA3(CLK_INFRA_SSPM_32K_SELF, "infra_sspm_32k_self",
+ "f_f26m_ck", 4),
+ GATE_INFRA3(CLK_INFRA_UFS_AXI, "infra_ufs_axi",
+ "axi_sel", 5),
+ GATE_INFRA3(CLK_INFRA_I2C6, "infra_i2c6",
+ "i2c_sel", 6),
+ GATE_INFRA3(CLK_INFRA_AP_MSDC0, "infra_ap_msdc0",
+ "msdc50_hclk_sel", 7),
+ GATE_INFRA3(CLK_INFRA_MD_MSDC0, "infra_md_msdc0",
+ "msdc50_hclk_sel", 8),
+ GATE_INFRA3(CLK_INFRA_CCIF2_AP, "infra_ccif2_ap",
+ "axi_sel", 16),
+ GATE_INFRA3(CLK_INFRA_CCIF2_MD, "infra_ccif2_md",
+ "axi_sel", 17),
+ GATE_INFRA3(CLK_INFRA_CCIF3_AP, "infra_ccif3_ap",
+ "axi_sel", 18),
+ GATE_INFRA3(CLK_INFRA_CCIF3_MD, "infra_ccif3_md",
+ "axi_sel", 19),
+ GATE_INFRA3(CLK_INFRA_SEJ_F13M, "infra_sej_f13m",
+ "f_f26m_ck", 20),
+ GATE_INFRA3(CLK_INFRA_AES_BCLK, "infra_aes_bclk",
+ "axi_sel", 21),
+ GATE_INFRA3(CLK_INFRA_I2C7, "infra_i2c7",
+ "i2c_sel", 22),
+ GATE_INFRA3(CLK_INFRA_I2C8, "infra_i2c8",
+ "i2c_sel", 23),
+ GATE_INFRA3(CLK_INFRA_FBIST2FPC, "infra_fbist2fpc",
+ "msdc50_0_sel", 24),
+};
+
+static const struct mtk_gate_regs apmixed_cg_regs = {
+ .set_ofs = 0x20,
+ .clr_ofs = 0x20,
+ .sta_ofs = 0x20,
+};
+
+#define GATE_APMIXED_FLAGS(_id, _name, _parent, _shift, _flags) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &apmixed_cg_regs, \
+ _shift, &mtk_clk_gate_ops_no_setclr_inv, _flags)
+
+#define GATE_APMIXED(_id, _name, _parent, _shift) \
+ GATE_APMIXED_FLAGS(_id, _name, _parent, _shift, 0)
+
+/*
+ * CRITICAL CLOCK:
+ * apmixed_appll26m is the toppest clock gate of all PLLs.
+ */
+static const struct mtk_gate apmixed_clks[] = {
+ /* AUDIO0 */
+ GATE_APMIXED(CLK_APMIXED_SSUSB_26M, "apmixed_ssusb26m",
+ "f_f26m_ck", 4),
+ GATE_APMIXED_FLAGS(CLK_APMIXED_APPLL_26M, "apmixed_appll26m",
+ "f_f26m_ck", 5, CLK_IS_CRITICAL),
+ GATE_APMIXED(CLK_APMIXED_MIPIC0_26M, "apmixed_mipic026m",
+ "f_f26m_ck", 6),
+ GATE_APMIXED(CLK_APMIXED_MDPLLGP_26M, "apmixed_mdpll26m",
+ "f_f26m_ck", 7),
+ GATE_APMIXED(CLK_APMIXED_MMSYS_26M, "apmixed_mmsys26m",
+ "f_f26m_ck", 8),
+ GATE_APMIXED(CLK_APMIXED_UFS_26M, "apmixed_ufs26m",
+ "f_f26m_ck", 9),
+ GATE_APMIXED(CLK_APMIXED_MIPIC1_26M, "apmixed_mipic126m",
+ "f_f26m_ck", 11),
+ GATE_APMIXED(CLK_APMIXED_MEMPLL_26M, "apmixed_mempll26m",
+ "f_f26m_ck", 13),
+ GATE_APMIXED(CLK_APMIXED_CLKSQ_LVPLL_26M, "apmixed_lvpll26m",
+ "f_f26m_ck", 14),
+ GATE_APMIXED(CLK_APMIXED_MIPID0_26M, "apmixed_mipid026m",
+ "f_f26m_ck", 16),
+ GATE_APMIXED(CLK_APMIXED_MIPID1_26M, "apmixed_mipid126m",
+ "f_f26m_ck", 17),
+};
+
+#define MT8183_PLL_FMAX (3800UL * MHZ)
+#define MT8183_PLL_FMIN (1500UL * MHZ)
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _rst_bar_mask, _pcwbits, _pcwibits, _pd_reg, \
+ _pd_shift, _tuner_reg, _tuner_en_reg, \
+ _tuner_en_bit, _pcw_reg, _pcw_shift, \
+ _pcw_chg_reg, _div_table) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .fmax = MT8183_PLL_FMAX, \
+ .fmin = MT8183_PLL_FMIN, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = _pcwibits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcw_chg_reg = _pcw_chg_reg, \
+ .div_table = _div_table, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _rst_bar_mask, _pcwbits, _pcwibits, _pd_reg, \
+ _pd_shift, _tuner_reg, _tuner_en_reg, \
+ _tuner_en_bit, _pcw_reg, _pcw_shift, \
+ _pcw_chg_reg) \
+ PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _rst_bar_mask, _pcwbits, _pcwibits, _pd_reg, \
+ _pd_shift, _tuner_reg, _tuner_en_reg, \
+ _tuner_en_bit, _pcw_reg, _pcw_shift, \
+ _pcw_chg_reg, NULL)
+
+static const struct mtk_pll_div_table armpll_div_table[] = {
+ { .div = 0, .freq = MT8183_PLL_FMAX },
+ { .div = 1, .freq = 1500 * MHZ },
+ { .div = 2, .freq = 750 * MHZ },
+ { .div = 3, .freq = 375 * MHZ },
+ { .div = 4, .freq = 187500000 },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_div_table mfgpll_div_table[] = {
+ { .div = 0, .freq = MT8183_PLL_FMAX },
+ { .div = 1, .freq = 1600 * MHZ },
+ { .div = 2, .freq = 800 * MHZ },
+ { .div = 3, .freq = 400 * MHZ },
+ { .div = 4, .freq = 200 * MHZ },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_data plls[] = {
+ PLL_B(CLK_APMIXED_ARMPLL_LL, "armpll_ll", 0x0200, 0x020C, 0x00000001,
+ HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0204, 24, 0x0, 0x0, 0,
+ 0x0204, 0, 0, armpll_div_table),
+ PLL_B(CLK_APMIXED_ARMPLL_L, "armpll_l", 0x0210, 0x021C, 0x00000001,
+ HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0214, 24, 0x0, 0x0, 0,
+ 0x0214, 0, 0, armpll_div_table),
+ PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x0290, 0x029C, 0x00000001,
+ HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0294, 24, 0x0, 0x0, 0,
+ 0x0294, 0, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0220, 0x022C, 0x00000001,
+ HAVE_RST_BAR, BIT(24), 22, 8, 0x0224, 24, 0x0, 0x0, 0,
+ 0x0224, 0, 0),
+ PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0230, 0x023C, 0x00000001,
+ HAVE_RST_BAR, BIT(24), 22, 8, 0x0234, 24, 0x0, 0x0, 0,
+ 0x0234, 0, 0),
+ PLL_B(CLK_APMIXED_MFGPLL, "mfgpll", 0x0240, 0x024C, 0x00000001,
+ 0, 0, 22, 8, 0x0244, 24, 0x0, 0x0, 0, 0x0244, 0, 0,
+ mfgpll_div_table),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0250, 0x025C, 0x00000001,
+ 0, 0, 22, 8, 0x0254, 24, 0x0, 0x0, 0, 0x0254, 0, 0),
+ PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0260, 0x026C, 0x00000001,
+ 0, 0, 22, 8, 0x0264, 24, 0x0, 0x0, 0, 0x0264, 0, 0),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0270, 0x027C, 0x00000001,
+ HAVE_RST_BAR, BIT(23), 22, 8, 0x0274, 24, 0x0, 0x0, 0,
+ 0x0274, 0, 0),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x02A0, 0x02B0, 0x00000001,
+ 0, 0, 32, 8, 0x02A0, 1, 0x02A8, 0x0014, 0, 0x02A4, 0, 0x02A0),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x02b4, 0x02c4, 0x00000001,
+ 0, 0, 32, 8, 0x02B4, 1, 0x02BC, 0x0014, 1, 0x02B8, 0, 0x02B4),
+};
+
+static int clk_mt8183_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+
+ mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static int clk_mt8183_top_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ void __iomem *base;
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ clk_data);
+
+ mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+
+ mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes),
+ node, &mt8183_clk_lock, clk_data);
+
+ mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes),
+ base, &mt8183_clk_lock, clk_data);
+
+ mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs),
+ base, &mt8183_clk_lock, clk_data);
+
+ mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static int clk_mt8183_infra_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+
+ mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+ clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static int clk_mt8183_mcu_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ void __iomem *base;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_MCU_NR_CLK);
+
+ mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), base,
+ &mt8183_clk_lock, clk_data);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8183[] = {
+ {
+ .compatible = "mediatek,mt8183-apmixedsys",
+ .data = clk_mt8183_apmixed_probe,
+ }, {
+ .compatible = "mediatek,mt8183-topckgen",
+ .data = clk_mt8183_top_probe,
+ }, {
+ .compatible = "mediatek,mt8183-infracfg",
+ .data = clk_mt8183_infra_probe,
+ }, {
+ .compatible = "mediatek,mt8183-mcucfg",
+ .data = clk_mt8183_mcu_probe,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int clk_mt8183_probe(struct platform_device *pdev)
+{
+ int (*clk_probe)(struct platform_device *pdev);
+ int r;
+
+ clk_probe = of_device_get_match_data(&pdev->dev);
+ if (!clk_probe)
+ return -EINVAL;
+
+ r = clk_probe(pdev);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt8183_drv = {
+ .probe = clk_mt8183_probe,
+ .driver = {
+ .name = "clk-mt8183",
+ .of_match_table = of_match_clk_mt8183,
+ },
+};
+
+static int __init clk_mt8183_init(void)
+{
+ return platform_driver_register(&clk_mt8183_drv);
+}
+
+arch_initcall(clk_mt8183_init);
diff --git a/drivers/clk/mediatek/clk-mt8516.c b/drivers/clk/mediatek/clk-mt8516.c
new file mode 100644
index 000000000000..26fe43cc9ea2
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8516.c
@@ -0,0 +1,815 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ * Fabien Parent <fparent@baylibre.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt8516-clk.h>
+
+static DEFINE_SPINLOCK(mt8516_clk_lock);
+
+static const struct mtk_fixed_clk fixed_clks[] __initconst = {
+ FIXED_CLK(CLK_TOP_CLK_NULL, "clk_null", NULL, 0),
+ FIXED_CLK(CLK_TOP_I2S_INFRA_BCK, "i2s_infra_bck", "clk_null", 26000000),
+ FIXED_CLK(CLK_TOP_MEMPLL, "mempll", "clk26m", 800000000),
+};
+
+static const struct mtk_fixed_factor top_divs[] __initconst = {
+ FACTOR(CLK_TOP_DMPLL, "dmpll_ck", "mempll", 1, 1),
+ FACTOR(CLK_TOP_MAINPLL_D2, "mainpll_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D4, "mainpll_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D8, "mainpll_d8", "mainpll", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D16, "mainpll_d16", "mainpll", 1, 16),
+ FACTOR(CLK_TOP_MAINPLL_D11, "mainpll_d11", "mainpll", 1, 11),
+ FACTOR(CLK_TOP_MAINPLL_D22, "mainpll_d22", "mainpll", 1, 22),
+ FACTOR(CLK_TOP_MAINPLL_D3, "mainpll_d3", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_MAINPLL_D6, "mainpll_d6", "mainpll", 1, 6),
+ FACTOR(CLK_TOP_MAINPLL_D12, "mainpll_d12", "mainpll", 1, 12),
+ FACTOR(CLK_TOP_MAINPLL_D5, "mainpll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_MAINPLL_D10, "mainpll_d10", "mainpll", 1, 10),
+ FACTOR(CLK_TOP_MAINPLL_D20, "mainpll_d20", "mainpll", 1, 20),
+ FACTOR(CLK_TOP_MAINPLL_D40, "mainpll_d40", "mainpll", 1, 40),
+ FACTOR(CLK_TOP_MAINPLL_D7, "mainpll_d7", "mainpll", 1, 7),
+ FACTOR(CLK_TOP_MAINPLL_D14, "mainpll_d14", "mainpll", 1, 14),
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D4, "univpll_d4", "univpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D8, "univpll_d8", "univpll", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D16, "univpll_d16", "univpll", 1, 16),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3),
+ FACTOR(CLK_TOP_UNIVPLL_D6, "univpll_d6", "univpll", 1, 6),
+ FACTOR(CLK_TOP_UNIVPLL_D12, "univpll_d12", "univpll", 1, 12),
+ FACTOR(CLK_TOP_UNIVPLL_D24, "univpll_d24", "univpll", 1, 24),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL_D20, "univpll_d20", "univpll", 1, 20),
+ FACTOR(CLK_TOP_MMPLL380M, "mmpll380m", "mmpll", 1, 1),
+ FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll", 1, 2),
+ FACTOR(CLK_TOP_MMPLL_200M, "mmpll_200m", "mmpll", 1, 3),
+ FACTOR(CLK_TOP_USB_PHY48M, "usb_phy48m_ck", "univpll", 1, 26),
+ FACTOR(CLK_TOP_APLL1, "apll1_ck", "apll1", 1, 1),
+ FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1_ck", 1, 2),
+ FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "rg_apll1_d2_en", 1, 2),
+ FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "rg_apll1_d4_en", 1, 2),
+ FACTOR(CLK_TOP_APLL2, "apll2_ck", "apll2", 1, 1),
+ FACTOR(CLK_TOP_APLL2_D2, "apll2_d2", "apll2_ck", 1, 2),
+ FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "rg_apll2_d2_en", 1, 2),
+ FACTOR(CLK_TOP_APLL2_D8, "apll2_d8", "rg_apll2_d4_en", 1, 2),
+ FACTOR(CLK_TOP_CLK26M, "clk26m_ck", "clk26m", 1, 1),
+ FACTOR(CLK_TOP_CLK26M_D2, "clk26m_d2", "clk26m", 1, 2),
+ FACTOR(CLK_TOP_AHB_INFRA_D2, "ahb_infra_d2", "ahb_infra_sel", 1, 2),
+ FACTOR(CLK_TOP_NFI1X, "nfi1x_ck", "nfi2x_pad_sel", 1, 2),
+ FACTOR(CLK_TOP_ETH_D2, "eth_d2_ck", "eth_sel", 1, 2),
+};
+
+static const char * const uart0_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d24"
+};
+
+static const char * const ahb_infra_parents[] __initconst = {
+ "clk_null",
+ "clk26m_ck",
+ "mainpll_d11",
+ "clk_null",
+ "mainpll_d12",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "mainpll_d10"
+};
+
+static const char * const msdc0_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d6",
+ "mainpll_d8",
+ "univpll_d8",
+ "mainpll_d16",
+ "mmpll_200m",
+ "mainpll_d12",
+ "mmpll_d2"
+};
+
+static const char * const uart1_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d24"
+};
+
+static const char * const msdc1_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d6",
+ "mainpll_d8",
+ "univpll_d8",
+ "mainpll_d16",
+ "mmpll_200m",
+ "mainpll_d12",
+ "mmpll_d2"
+};
+
+static const char * const pmicspi_parents[] __initconst = {
+ "univpll_d20",
+ "usb_phy48m_ck",
+ "univpll_d16",
+ "clk26m_ck"
+};
+
+static const char * const qaxi_aud26m_parents[] __initconst = {
+ "clk26m_ck",
+ "ahb_infra_sel"
+};
+
+static const char * const aud_intbus_parents[] __initconst = {
+ "clk_null",
+ "clk26m_ck",
+ "mainpll_d22",
+ "clk_null",
+ "mainpll_d11"
+};
+
+static const char * const nfi2x_pad_parents[] __initconst = {
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk26m_ck",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "mainpll_d12",
+ "mainpll_d8",
+ "clk_null",
+ "mainpll_d6",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "mainpll_d4",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "clk_null",
+ "mainpll_d10",
+ "mainpll_d7",
+ "clk_null",
+ "mainpll_d5"
+};
+
+static const char * const nfi1x_pad_parents[] __initconst = {
+ "ahb_infra_sel",
+ "nfi1x_ck"
+};
+
+static const char * const ddrphycfg_parents[] __initconst = {
+ "clk26m_ck",
+ "mainpll_d16"
+};
+
+static const char * const usb_78m_parents[] __initconst = {
+ "clk_null",
+ "clk26m_ck",
+ "univpll_d16",
+ "clk_null",
+ "mainpll_d20"
+};
+
+static const char * const spinor_parents[] __initconst = {
+ "clk26m_d2",
+ "clk26m_ck",
+ "mainpll_d40",
+ "univpll_d24",
+ "univpll_d20",
+ "mainpll_d20",
+ "mainpll_d16",
+ "univpll_d12"
+};
+
+static const char * const msdc2_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d6",
+ "mainpll_d8",
+ "univpll_d8",
+ "mainpll_d16",
+ "mmpll_200m",
+ "mainpll_d12",
+ "mmpll_d2"
+};
+
+static const char * const eth_parents[] __initconst = {
+ "clk26m_ck",
+ "mainpll_d40",
+ "univpll_d24",
+ "univpll_d20",
+ "mainpll_d20"
+};
+
+static const char * const aud1_parents[] __initconst = {
+ "clk26m_ck",
+ "apll1_ck"
+};
+
+static const char * const aud2_parents[] __initconst = {
+ "clk26m_ck",
+ "apll2_ck"
+};
+
+static const char * const aud_engen1_parents[] __initconst = {
+ "clk26m_ck",
+ "rg_apll1_d2_en",
+ "rg_apll1_d4_en",
+ "rg_apll1_d8_en"
+};
+
+static const char * const aud_engen2_parents[] __initconst = {
+ "clk26m_ck",
+ "rg_apll2_d2_en",
+ "rg_apll2_d4_en",
+ "rg_apll2_d8_en"
+};
+
+static const char * const i2c_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d20",
+ "univpll_d16",
+ "univpll_d12"
+};
+
+static const char * const aud_i2s0_m_parents[] __initconst = {
+ "rg_aud1",
+ "rg_aud2"
+};
+
+static const char * const pwm_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d12"
+};
+
+static const char * const spi_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d12",
+ "univpll_d8",
+ "univpll_d6"
+};
+
+static const char * const aud_spdifin_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d2"
+};
+
+static const char * const uart2_parents[] __initconst = {
+ "clk26m_ck",
+ "univpll_d24"
+};
+
+static const char * const bsi_parents[] __initconst = {
+ "clk26m_ck",
+ "mainpll_d10",
+ "mainpll_d12",
+ "mainpll_d20"
+};
+
+static const char * const dbg_atclk_parents[] __initconst = {
+ "clk_null",
+ "clk26m_ck",
+ "mainpll_d5",
+ "clk_null",
+ "univpll_d5"
+};
+
+static const char * const csw_nfiecc_parents[] __initconst = {
+ "clk_null",
+ "mainpll_d7",
+ "mainpll_d6",
+ "clk_null",
+ "mainpll_d5"
+};
+
+static const char * const nfiecc_parents[] __initconst = {
+ "clk_null",
+ "nfi2x_pad_sel",
+ "mainpll_d4",
+ "clk_null",
+ "csw_nfiecc_sel"
+};
+
+static struct mtk_composite top_muxes[] __initdata = {
+ /* CLK_MUX_SEL0 */
+ MUX(CLK_TOP_UART0_SEL, "uart0_sel", uart0_parents,
+ 0x000, 0, 1),
+ MUX(CLK_TOP_AHB_INFRA_SEL, "ahb_infra_sel", ahb_infra_parents,
+ 0x000, 4, 4),
+ MUX(CLK_TOP_MSDC0_SEL, "msdc0_sel", msdc0_parents,
+ 0x000, 11, 3),
+ MUX(CLK_TOP_UART1_SEL, "uart1_sel", uart1_parents,
+ 0x000, 19, 1),
+ MUX(CLK_TOP_MSDC1_SEL, "msdc1_sel", msdc1_parents,
+ 0x000, 20, 3),
+ MUX(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_parents,
+ 0x000, 24, 2),
+ MUX(CLK_TOP_QAXI_AUD26M_SEL, "qaxi_aud26m_sel", qaxi_aud26m_parents,
+ 0x000, 26, 1),
+ MUX(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel", aud_intbus_parents,
+ 0x000, 27, 3),
+ /* CLK_MUX_SEL1 */
+ MUX(CLK_TOP_NFI2X_PAD_SEL, "nfi2x_pad_sel", nfi2x_pad_parents,
+ 0x004, 0, 7),
+ MUX(CLK_TOP_NFI1X_PAD_SEL, "nfi1x_pad_sel", nfi1x_pad_parents,
+ 0x004, 7, 1),
+ MUX(CLK_TOP_USB_78M_SEL, "usb_78m_sel", usb_78m_parents,
+ 0x004, 20, 3),
+ /* CLK_MUX_SEL8 */
+ MUX(CLK_TOP_SPINOR_SEL, "spinor_sel", spinor_parents,
+ 0x040, 0, 3),
+ MUX(CLK_TOP_MSDC2_SEL, "msdc2_sel", msdc2_parents,
+ 0x040, 3, 3),
+ MUX(CLK_TOP_ETH_SEL, "eth_sel", eth_parents,
+ 0x040, 6, 3),
+ MUX(CLK_TOP_AUD1_SEL, "aud1_sel", aud1_parents,
+ 0x040, 22, 1),
+ MUX(CLK_TOP_AUD2_SEL, "aud2_sel", aud2_parents,
+ 0x040, 23, 1),
+ MUX(CLK_TOP_AUD_ENGEN1_SEL, "aud_engen1_sel", aud_engen1_parents,
+ 0x040, 24, 2),
+ MUX(CLK_TOP_AUD_ENGEN2_SEL, "aud_engen2_sel", aud_engen2_parents,
+ 0x040, 26, 2),
+ MUX(CLK_TOP_I2C_SEL, "i2c_sel", i2c_parents,
+ 0x040, 28, 2),
+ /* CLK_SEL_9 */
+ MUX(CLK_TOP_AUD_I2S0_M_SEL, "aud_i2s0_m_sel", aud_i2s0_m_parents,
+ 0x044, 12, 1),
+ MUX(CLK_TOP_AUD_I2S1_M_SEL, "aud_i2s1_m_sel", aud_i2s0_m_parents,
+ 0x044, 13, 1),
+ MUX(CLK_TOP_AUD_I2S2_M_SEL, "aud_i2s2_m_sel", aud_i2s0_m_parents,
+ 0x044, 14, 1),
+ MUX(CLK_TOP_AUD_I2S3_M_SEL, "aud_i2s3_m_sel", aud_i2s0_m_parents,
+ 0x044, 15, 1),
+ MUX(CLK_TOP_AUD_I2S4_M_SEL, "aud_i2s4_m_sel", aud_i2s0_m_parents,
+ 0x044, 16, 1),
+ MUX(CLK_TOP_AUD_I2S5_M_SEL, "aud_i2s5_m_sel", aud_i2s0_m_parents,
+ 0x044, 17, 1),
+ MUX(CLK_TOP_AUD_SPDIF_B_SEL, "aud_spdif_b_sel", aud_i2s0_m_parents,
+ 0x044, 18, 1),
+ /* CLK_MUX_SEL13 */
+ MUX(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents,
+ 0x07c, 0, 1),
+ MUX(CLK_TOP_SPI_SEL, "spi_sel", spi_parents,
+ 0x07c, 1, 2),
+ MUX(CLK_TOP_AUD_SPDIFIN_SEL, "aud_spdifin_sel", aud_spdifin_parents,
+ 0x07c, 3, 1),
+ MUX(CLK_TOP_UART2_SEL, "uart2_sel", uart2_parents,
+ 0x07c, 4, 1),
+ MUX(CLK_TOP_BSI_SEL, "bsi_sel", bsi_parents,
+ 0x07c, 5, 2),
+ MUX(CLK_TOP_DBG_ATCLK_SEL, "dbg_atclk_sel", dbg_atclk_parents,
+ 0x07c, 7, 3),
+ MUX(CLK_TOP_CSW_NFIECC_SEL, "csw_nfiecc_sel", csw_nfiecc_parents,
+ 0x07c, 10, 3),
+ MUX(CLK_TOP_NFIECC_SEL, "nfiecc_sel", nfiecc_parents,
+ 0x07c, 13, 3),
+};
+
+static const char * const ifr_mux1_parents[] __initconst = {
+ "clk26m_ck",
+ "armpll",
+ "univpll",
+ "mainpll_d2"
+};
+
+static const char * const ifr_eth_25m_parents[] __initconst = {
+ "eth_d2_ck",
+ "rg_eth"
+};
+
+static const char * const ifr_i2c0_parents[] __initconst = {
+ "ahb_infra_d2",
+ "rg_i2c"
+};
+
+static const struct mtk_composite ifr_muxes[] __initconst = {
+ MUX(CLK_IFR_MUX1_SEL, "ifr_mux1_sel", ifr_mux1_parents, 0x000,
+ 2, 2),
+ MUX(CLK_IFR_ETH_25M_SEL, "ifr_eth_25m_sel", ifr_eth_25m_parents, 0x080,
+ 0, 1),
+ MUX(CLK_IFR_I2C0_SEL, "ifr_i2c0_sel", ifr_i2c0_parents, 0x080,
+ 1, 1),
+ MUX(CLK_IFR_I2C1_SEL, "ifr_i2c1_sel", ifr_i2c0_parents, 0x080,
+ 2, 1),
+ MUX(CLK_IFR_I2C2_SEL, "ifr_i2c2_sel", ifr_i2c0_parents, 0x080,
+ 3, 1),
+};
+
+#define DIV_ADJ(_id, _name, _parent, _reg, _shift, _width) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .div_reg = _reg, \
+ .div_shift = _shift, \
+ .div_width = _width, \
+}
+
+static const struct mtk_clk_divider top_adj_divs[] = {
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV0, "apll12_ck_div0", "aud_i2s0_m_sel",
+ 0x0048, 0, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV1, "apll12_ck_div1", "aud_i2s1_m_sel",
+ 0x0048, 8, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV2, "apll12_ck_div2", "aud_i2s2_m_sel",
+ 0x0048, 16, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV3, "apll12_ck_div3", "aud_i2s3_m_sel",
+ 0x0048, 24, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV4, "apll12_ck_div4", "aud_i2s4_m_sel",
+ 0x004c, 0, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV4B, "apll12_ck_div4b", "apll12_div4",
+ 0x004c, 8, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV5, "apll12_ck_div5", "aud_i2s5_m_sel",
+ 0x004c, 16, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV5B, "apll12_ck_div5b", "apll12_div5",
+ 0x004c, 24, 8),
+ DIV_ADJ(CLK_TOP_APLL12_CK_DIV6, "apll12_ck_div6", "aud_spdif_b_sel",
+ 0x0078, 0, 8),
+};
+
+static const struct mtk_gate_regs top1_cg_regs = {
+ .set_ofs = 0x54,
+ .clr_ofs = 0x84,
+ .sta_ofs = 0x24,
+};
+
+static const struct mtk_gate_regs top2_cg_regs = {
+ .set_ofs = 0x6c,
+ .clr_ofs = 0x9c,
+ .sta_ofs = 0x3c,
+};
+
+static const struct mtk_gate_regs top3_cg_regs = {
+ .set_ofs = 0xa0,
+ .clr_ofs = 0xb0,
+ .sta_ofs = 0x70,
+};
+
+static const struct mtk_gate_regs top4_cg_regs = {
+ .set_ofs = 0xa4,
+ .clr_ofs = 0xb4,
+ .sta_ofs = 0x74,
+};
+
+static const struct mtk_gate_regs top5_cg_regs = {
+ .set_ofs = 0x44,
+ .clr_ofs = 0x44,
+ .sta_ofs = 0x44,
+};
+
+#define GATE_TOP1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_TOP2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_TOP2_I(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_TOP3(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top3_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_TOP4_I(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top4_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_TOP5(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &top5_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr, \
+ }
+
+static const struct mtk_gate top_clks[] __initconst = {
+ /* TOP1 */
+ GATE_TOP1(CLK_TOP_THEM, "them", "ahb_infra_sel", 1),
+ GATE_TOP1(CLK_TOP_APDMA, "apdma", "ahb_infra_sel", 2),
+ GATE_TOP1(CLK_TOP_I2C0, "i2c0", "ifr_i2c0_sel", 3),
+ GATE_TOP1(CLK_TOP_I2C1, "i2c1", "ifr_i2c1_sel", 4),
+ GATE_TOP1(CLK_TOP_AUXADC1, "auxadc1", "ahb_infra_sel", 5),
+ GATE_TOP1(CLK_TOP_NFI, "nfi", "nfi1x_pad_sel", 6),
+ GATE_TOP1(CLK_TOP_NFIECC, "nfiecc", "rg_nfiecc", 7),
+ GATE_TOP1(CLK_TOP_DEBUGSYS, "debugsys", "rg_dbg_atclk", 8),
+ GATE_TOP1(CLK_TOP_PWM, "pwm", "ahb_infra_sel", 9),
+ GATE_TOP1(CLK_TOP_UART0, "uart0", "uart0_sel", 10),
+ GATE_TOP1(CLK_TOP_UART1, "uart1", "uart1_sel", 11),
+ GATE_TOP1(CLK_TOP_BTIF, "btif", "ahb_infra_sel", 12),
+ GATE_TOP1(CLK_TOP_USB, "usb", "usb_78m", 13),
+ GATE_TOP1(CLK_TOP_FLASHIF_26M, "flashif_26m", "clk26m_ck", 14),
+ GATE_TOP1(CLK_TOP_AUXADC2, "auxadc2", "ahb_infra_sel", 15),
+ GATE_TOP1(CLK_TOP_I2C2, "i2c2", "ifr_i2c2_sel", 16),
+ GATE_TOP1(CLK_TOP_MSDC0, "msdc0", "msdc0_sel", 17),
+ GATE_TOP1(CLK_TOP_MSDC1, "msdc1", "msdc1_sel", 18),
+ GATE_TOP1(CLK_TOP_NFI2X, "nfi2x", "nfi2x_pad_sel", 19),
+ GATE_TOP1(CLK_TOP_PMICWRAP_AP, "pwrap_ap", "clk26m_ck", 20),
+ GATE_TOP1(CLK_TOP_SEJ, "sej", "ahb_infra_sel", 21),
+ GATE_TOP1(CLK_TOP_MEMSLP_DLYER, "memslp_dlyer", "clk26m_ck", 22),
+ GATE_TOP1(CLK_TOP_SPI, "spi", "spi_sel", 23),
+ GATE_TOP1(CLK_TOP_APXGPT, "apxgpt", "clk26m_ck", 24),
+ GATE_TOP1(CLK_TOP_AUDIO, "audio", "clk26m_ck", 25),
+ GATE_TOP1(CLK_TOP_PMICWRAP_MD, "pwrap_md", "clk26m_ck", 27),
+ GATE_TOP1(CLK_TOP_PMICWRAP_CONN, "pwrap_conn", "clk26m_ck", 28),
+ GATE_TOP1(CLK_TOP_PMICWRAP_26M, "pwrap_26m", "clk26m_ck", 29),
+ GATE_TOP1(CLK_TOP_AUX_ADC, "aux_adc", "clk26m_ck", 30),
+ GATE_TOP1(CLK_TOP_AUX_TP, "aux_tp", "clk26m_ck", 31),
+ /* TOP2 */
+ GATE_TOP2(CLK_TOP_MSDC2, "msdc2", "ahb_infra_sel", 0),
+ GATE_TOP2(CLK_TOP_RBIST, "rbist", "univpll_d12", 1),
+ GATE_TOP2(CLK_TOP_NFI_BUS, "nfi_bus", "ahb_infra_sel", 2),
+ GATE_TOP2(CLK_TOP_GCE, "gce", "ahb_infra_sel", 4),
+ GATE_TOP2(CLK_TOP_TRNG, "trng", "ahb_infra_sel", 5),
+ GATE_TOP2(CLK_TOP_SEJ_13M, "sej_13m", "clk26m_ck", 6),
+ GATE_TOP2(CLK_TOP_AES, "aes", "ahb_infra_sel", 7),
+ GATE_TOP2(CLK_TOP_PWM_B, "pwm_b", "rg_pwm_infra", 8),
+ GATE_TOP2(CLK_TOP_PWM1_FB, "pwm1_fb", "rg_pwm_infra", 9),
+ GATE_TOP2(CLK_TOP_PWM2_FB, "pwm2_fb", "rg_pwm_infra", 10),
+ GATE_TOP2(CLK_TOP_PWM3_FB, "pwm3_fb", "rg_pwm_infra", 11),
+ GATE_TOP2(CLK_TOP_PWM4_FB, "pwm4_fb", "rg_pwm_infra", 12),
+ GATE_TOP2(CLK_TOP_PWM5_FB, "pwm5_fb", "rg_pwm_infra", 13),
+ GATE_TOP2(CLK_TOP_USB_1P, "usb_1p", "usb_78m", 14),
+ GATE_TOP2(CLK_TOP_FLASHIF_FREERUN, "flashif_freerun", "ahb_infra_sel",
+ 15),
+ GATE_TOP2(CLK_TOP_66M_ETH, "eth_66m", "ahb_infra_d2", 19),
+ GATE_TOP2(CLK_TOP_133M_ETH, "eth_133m", "ahb_infra_sel", 20),
+ GATE_TOP2(CLK_TOP_FETH_25M, "feth_25m", "ifr_eth_25m_sel", 21),
+ GATE_TOP2(CLK_TOP_FETH_50M, "feth_50m", "rg_eth", 22),
+ GATE_TOP2(CLK_TOP_FLASHIF_AXI, "flashif_axi", "ahb_infra_sel", 23),
+ GATE_TOP2(CLK_TOP_USBIF, "usbif", "ahb_infra_sel", 24),
+ GATE_TOP2(CLK_TOP_UART2, "uart2", "rg_uart2", 25),
+ GATE_TOP2(CLK_TOP_BSI, "bsi", "ahb_infra_sel", 26),
+ GATE_TOP2_I(CLK_TOP_MSDC0_INFRA, "msdc0_infra", "msdc0", 28),
+ GATE_TOP2_I(CLK_TOP_MSDC1_INFRA, "msdc1_infra", "msdc1", 29),
+ GATE_TOP2_I(CLK_TOP_MSDC2_INFRA, "msdc2_infra", "rg_msdc2", 30),
+ GATE_TOP2(CLK_TOP_USB_78M, "usb_78m", "usb_78m_sel", 31),
+ /* TOP3 */
+ GATE_TOP3(CLK_TOP_RG_SPINOR, "rg_spinor", "spinor_sel", 0),
+ GATE_TOP3(CLK_TOP_RG_MSDC2, "rg_msdc2", "msdc2_sel", 1),
+ GATE_TOP3(CLK_TOP_RG_ETH, "rg_eth", "eth_sel", 2),
+ GATE_TOP3(CLK_TOP_RG_AUD1, "rg_aud1", "aud1_sel", 8),
+ GATE_TOP3(CLK_TOP_RG_AUD2, "rg_aud2", "aud2_sel", 9),
+ GATE_TOP3(CLK_TOP_RG_AUD_ENGEN1, "rg_aud_engen1", "aud_engen1_sel", 10),
+ GATE_TOP3(CLK_TOP_RG_AUD_ENGEN2, "rg_aud_engen2", "aud_engen2_sel", 11),
+ GATE_TOP3(CLK_TOP_RG_I2C, "rg_i2c", "i2c_sel", 12),
+ GATE_TOP3(CLK_TOP_RG_PWM_INFRA, "rg_pwm_infra", "pwm_sel", 13),
+ GATE_TOP3(CLK_TOP_RG_AUD_SPDIF_IN, "rg_aud_spdif_in", "aud_spdifin_sel",
+ 14),
+ GATE_TOP3(CLK_TOP_RG_UART2, "rg_uart2", "uart2_sel", 15),
+ GATE_TOP3(CLK_TOP_RG_BSI, "rg_bsi", "bsi_sel", 16),
+ GATE_TOP3(CLK_TOP_RG_DBG_ATCLK, "rg_dbg_atclk", "dbg_atclk_sel", 17),
+ GATE_TOP3(CLK_TOP_RG_NFIECC, "rg_nfiecc", "nfiecc_sel", 18),
+ /* TOP4 */
+ GATE_TOP4_I(CLK_TOP_RG_APLL1_D2_EN, "rg_apll1_d2_en", "apll1_d2", 8),
+ GATE_TOP4_I(CLK_TOP_RG_APLL1_D4_EN, "rg_apll1_d4_en", "apll1_d4", 9),
+ GATE_TOP4_I(CLK_TOP_RG_APLL1_D8_EN, "rg_apll1_d8_en", "apll1_d8", 10),
+ GATE_TOP4_I(CLK_TOP_RG_APLL2_D2_EN, "rg_apll2_d2_en", "apll2_d2", 11),
+ GATE_TOP4_I(CLK_TOP_RG_APLL2_D4_EN, "rg_apll2_d4_en", "apll2_d4", 12),
+ GATE_TOP4_I(CLK_TOP_RG_APLL2_D8_EN, "rg_apll2_d8_en", "apll2_d8", 13),
+ /* TOP5 */
+ GATE_TOP5(CLK_TOP_APLL12_DIV0, "apll12_div0", "apll12_ck_div0", 0),
+ GATE_TOP5(CLK_TOP_APLL12_DIV1, "apll12_div1", "apll12_ck_div1", 1),
+ GATE_TOP5(CLK_TOP_APLL12_DIV2, "apll12_div2", "apll12_ck_div2", 2),
+ GATE_TOP5(CLK_TOP_APLL12_DIV3, "apll12_div3", "apll12_ck_div3", 3),
+ GATE_TOP5(CLK_TOP_APLL12_DIV4, "apll12_div4", "apll12_ck_div4", 4),
+ GATE_TOP5(CLK_TOP_APLL12_DIV4B, "apll12_div4b", "apll12_ck_div4b", 5),
+ GATE_TOP5(CLK_TOP_APLL12_DIV5, "apll12_div5", "apll12_ck_div5", 6),
+ GATE_TOP5(CLK_TOP_APLL12_DIV5B, "apll12_div5b", "apll12_ck_div5b", 7),
+ GATE_TOP5(CLK_TOP_APLL12_DIV6, "apll12_div6", "apll12_ck_div6", 8),
+};
+
+static void __init mtk_topckgen_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ void __iomem *base;
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return;
+ }
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+
+ mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks),
+ clk_data);
+ mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), clk_data);
+
+ mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+ mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+ &mt8516_clk_lock, clk_data);
+ mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
+ base, &mt8516_clk_lock, clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+}
+CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8516-topckgen", mtk_topckgen_init);
+
+static void __init mtk_infracfg_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+ void __iomem *base;
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return;
+ }
+
+ clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK);
+
+ mtk_clk_register_composites(ifr_muxes, ARRAY_SIZE(ifr_muxes), base,
+ &mt8516_clk_lock, clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+}
+CLK_OF_DECLARE(mtk_infracfg, "mediatek,mt8516-infracfg", mtk_infracfg_init);
+
+#define MT8516_PLL_FMAX (1502UL * MHZ)
+
+#define CON0_MT8516_RST_BAR BIT(27)
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
+ _pcw_shift, _div_table) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = CON0_MT8516_RST_BAR, \
+ .fmax = MT8516_PLL_FMAX, \
+ .pcwbits = _pcwbits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .div_table = _div_table, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
+ _pcw_shift) \
+ PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
+ NULL)
+
+static const struct mtk_pll_div_table mmpll_div_table[] = {
+ { .div = 0, .freq = MT8516_PLL_FMAX },
+ { .div = 1, .freq = 1000000000 },
+ { .div = 2, .freq = 604500000 },
+ { .div = 3, .freq = 253500000 },
+ { .div = 4, .freq = 126750000 },
+ { } /* sentinel */
+};
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0100, 0x0110, 0x00000001, 0,
+ 21, 0x0104, 24, 0, 0x0104, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0120, 0x0130, 0x00000001,
+ HAVE_RST_BAR, 21, 0x0124, 24, 0, 0x0124, 0),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0140, 0x0150, 0x30000001,
+ HAVE_RST_BAR, 7, 0x0144, 24, 0, 0x0144, 0),
+ PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0160, 0x0170, 0x00000001, 0,
+ 21, 0x0164, 24, 0, 0x0164, 0, mmpll_div_table),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x0180, 0x0190, 0x00000001, 0,
+ 31, 0x0180, 1, 0x0194, 0x0184, 0),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x01A0, 0x01B0, 0x00000001, 0,
+ 31, 0x01A0, 1, 0x01B4, 0x01A4, 0),
+};
+
+static void __init mtk_apmixedsys_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ void __iomem *base;
+ int r;
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return;
+ }
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+
+}
+CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8516-apmixedsys",
+ mtk_apmixedsys_init);
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index fb27b5bf30d9..33ab1731482f 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -227,10 +227,13 @@ struct mtk_pll_data {
unsigned int flags;
const struct clk_ops *ops;
u32 rst_bar_mask;
+ unsigned long fmin;
unsigned long fmax;
int pcwbits;
+ int pcwibits;
uint32_t pcw_reg;
int pcw_shift;
+ uint32_t pcw_chg_reg;
const struct mtk_pll_div_table *div_table;
const char *parent_name;
};
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
new file mode 100644
index 000000000000..76f9cd039195
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Owen Chen <owen.chen@mediatek.com>
+ */
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+
+static inline struct mtk_clk_mux *to_mtk_clk_mux(struct clk_hw *hw)
+{
+ return container_of(hw, struct mtk_clk_mux, hw);
+}
+
+static int mtk_clk_mux_enable(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 mask = BIT(mux->data->gate_shift);
+
+ return regmap_update_bits(mux->regmap, mux->data->mux_ofs,
+ mask, ~mask);
+}
+
+static void mtk_clk_mux_disable(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 mask = BIT(mux->data->gate_shift);
+
+ regmap_update_bits(mux->regmap, mux->data->mux_ofs, mask, mask);
+}
+
+static int mtk_clk_mux_enable_setclr(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+
+ return regmap_write(mux->regmap, mux->data->clr_ofs,
+ BIT(mux->data->gate_shift));
+}
+
+static void mtk_clk_mux_disable_setclr(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+
+ regmap_write(mux->regmap, mux->data->set_ofs,
+ BIT(mux->data->gate_shift));
+}
+
+static int mtk_clk_mux_is_enabled(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 val;
+
+ regmap_read(mux->regmap, mux->data->mux_ofs, &val);
+
+ return (val & BIT(mux->data->gate_shift)) == 0;
+}
+
+static u8 mtk_clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 mask = GENMASK(mux->data->mux_width - 1, 0);
+ u32 val;
+
+ regmap_read(mux->regmap, mux->data->mux_ofs, &val);
+ val = (val >> mux->data->mux_shift) & mask;
+
+ return val;
+}
+
+static int mtk_clk_mux_set_parent_lock(struct clk_hw *hw, u8 index)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 mask = GENMASK(mux->data->mux_width - 1, 0);
+ unsigned long flags = 0;
+
+ if (mux->lock)
+ spin_lock_irqsave(mux->lock, flags);
+ else
+ __acquire(mux->lock);
+
+ regmap_update_bits(mux->regmap, mux->data->mux_ofs, mask,
+ index << mux->data->mux_shift);
+
+ if (mux->lock)
+ spin_unlock_irqrestore(mux->lock, flags);
+ else
+ __release(mux->lock);
+
+ return 0;
+}
+
+static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 mask = GENMASK(mux->data->mux_width - 1, 0);
+ u32 val, orig;
+ unsigned long flags = 0;
+
+ if (mux->lock)
+ spin_lock_irqsave(mux->lock, flags);
+ else
+ __acquire(mux->lock);
+
+ regmap_read(mux->regmap, mux->data->mux_ofs, &orig);
+ val = (orig & ~(mask << mux->data->mux_shift))
+ | (index << mux->data->mux_shift);
+
+ if (val != orig) {
+ regmap_write(mux->regmap, mux->data->clr_ofs,
+ mask << mux->data->mux_shift);
+ regmap_write(mux->regmap, mux->data->set_ofs,
+ index << mux->data->mux_shift);
+
+ if (mux->data->upd_shift >= 0)
+ regmap_write(mux->regmap, mux->data->upd_ofs,
+ BIT(mux->data->upd_shift));
+ }
+
+ if (mux->lock)
+ spin_unlock_irqrestore(mux->lock, flags);
+ else
+ __release(mux->lock);
+
+ return 0;
+}
+
+const struct clk_ops mtk_mux_ops = {
+ .get_parent = mtk_clk_mux_get_parent,
+ .set_parent = mtk_clk_mux_set_parent_lock,
+};
+
+const struct clk_ops mtk_mux_clr_set_upd_ops = {
+ .get_parent = mtk_clk_mux_get_parent,
+ .set_parent = mtk_clk_mux_set_parent_setclr_lock,
+};
+
+const struct clk_ops mtk_mux_gate_ops = {
+ .enable = mtk_clk_mux_enable,
+ .disable = mtk_clk_mux_disable,
+ .is_enabled = mtk_clk_mux_is_enabled,
+ .get_parent = mtk_clk_mux_get_parent,
+ .set_parent = mtk_clk_mux_set_parent_lock,
+};
+
+const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
+ .enable = mtk_clk_mux_enable_setclr,
+ .disable = mtk_clk_mux_disable_setclr,
+ .is_enabled = mtk_clk_mux_is_enabled,
+ .get_parent = mtk_clk_mux_get_parent,
+ .set_parent = mtk_clk_mux_set_parent_setclr_lock,
+};
+
+struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
+ struct regmap *regmap,
+ spinlock_t *lock)
+{
+ struct mtk_clk_mux *clk_mux;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ clk_mux = kzalloc(sizeof(*clk_mux), GFP_KERNEL);
+ if (!clk_mux)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = mux->name;
+ init.flags = mux->flags | CLK_SET_RATE_PARENT;
+ init.parent_names = mux->parent_names;
+ init.num_parents = mux->num_parents;
+ init.ops = mux->ops;
+
+ clk_mux->regmap = regmap;
+ clk_mux->data = mux;
+ clk_mux->lock = lock;
+ clk_mux->hw.init = &init;
+
+ clk = clk_register(NULL, &clk_mux->hw);
+ if (IS_ERR(clk)) {
+ kfree(clk_mux);
+ return clk;
+ }
+
+ return clk;
+}
+
+int mtk_clk_register_muxes(const struct mtk_mux *muxes,
+ int num, struct device_node *node,
+ spinlock_t *lock,
+ struct clk_onecell_data *clk_data)
+{
+ struct regmap *regmap;
+ struct clk *clk;
+ int i;
+
+ regmap = syscon_node_to_regmap(node);
+ if (IS_ERR(regmap)) {
+ pr_err("Cannot find regmap for %pOF: %ld\n", node,
+ PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ for (i = 0; i < num; i++) {
+ const struct mtk_mux *mux = &muxes[i];
+
+ if (IS_ERR_OR_NULL(clk_data->clks[mux->id])) {
+ clk = mtk_clk_register_mux(mux, regmap, lock);
+
+ if (IS_ERR(clk)) {
+ pr_err("Failed to register clk %s: %ld\n",
+ mux->name, PTR_ERR(clk));
+ continue;
+ }
+
+ clk_data->clks[mux->id] = clk;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/clk/mediatek/clk-mux.h b/drivers/clk/mediatek/clk-mux.h
new file mode 100644
index 000000000000..f5625f4d9e6c
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mux.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Owen Chen <owen.chen@mediatek.com>
+ */
+
+#ifndef __DRV_CLK_MTK_MUX_H
+#define __DRV_CLK_MTK_MUX_H
+
+#include <linux/clk-provider.h>
+
+struct mtk_clk_mux {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ const struct mtk_mux *data;
+ spinlock_t *lock;
+};
+
+struct mtk_mux {
+ int id;
+ const char *name;
+ const char * const *parent_names;
+ unsigned int flags;
+
+ u32 mux_ofs;
+ u32 set_ofs;
+ u32 clr_ofs;
+ u32 upd_ofs;
+
+ u8 mux_shift;
+ u8 mux_width;
+ u8 gate_shift;
+ s8 upd_shift;
+
+ const struct clk_ops *ops;
+
+ signed char num_parents;
+};
+
+extern const struct clk_ops mtk_mux_ops;
+extern const struct clk_ops mtk_mux_clr_set_upd_ops;
+extern const struct clk_ops mtk_mux_gate_ops;
+extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
+
+#define GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \
+ _mux_set_ofs, _mux_clr_ofs, _shift, _width, \
+ _gate, _upd_ofs, _upd, _flags, _ops) { \
+ .id = _id, \
+ .name = _name, \
+ .mux_ofs = _mux_ofs, \
+ .set_ofs = _mux_set_ofs, \
+ .clr_ofs = _mux_clr_ofs, \
+ .upd_ofs = _upd_ofs, \
+ .mux_shift = _shift, \
+ .mux_width = _width, \
+ .gate_shift = _gate, \
+ .upd_shift = _upd, \
+ .parent_names = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .flags = _flags, \
+ .ops = &_ops, \
+ }
+
+#define MUX_GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \
+ _mux_set_ofs, _mux_clr_ofs, _shift, _width, \
+ _gate, _upd_ofs, _upd, _flags) \
+ GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \
+ _mux_set_ofs, _mux_clr_ofs, _shift, _width, \
+ _gate, _upd_ofs, _upd, _flags, \
+ mtk_mux_gate_clr_set_upd_ops)
+
+#define MUX_GATE_CLR_SET_UPD(_id, _name, _parents, _mux_ofs, \
+ _mux_set_ofs, _mux_clr_ofs, _shift, _width, \
+ _gate, _upd_ofs, _upd) \
+ MUX_GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, _shift, \
+ _width, _gate, _upd_ofs, _upd, \
+ CLK_SET_RATE_PARENT)
+
+struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
+ struct regmap *regmap,
+ spinlock_t *lock);
+
+int mtk_clk_register_muxes(const struct mtk_mux *muxes,
+ int num, struct device_node *node,
+ spinlock_t *lock,
+ struct clk_onecell_data *clk_data);
+
+#endif /* __DRV_CLK_MTK_MUX_H */
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index f54e4015b0b1..8d556fc99fed 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -27,11 +27,13 @@
#define CON0_BASE_EN BIT(0)
#define CON0_PWR_ON BIT(0)
#define CON0_ISO_EN BIT(1)
-#define CON0_PCW_CHG BIT(31)
+#define PCW_CHG_MASK BIT(31)
#define AUDPLL_TUNER_EN BIT(31)
#define POSTDIV_MASK 0x7
+
+/* default 7 bits integer, can be overridden with pcwibits. */
#define INTEGER_BITS 7
/*
@@ -49,6 +51,7 @@ struct mtk_clk_pll {
void __iomem *tuner_addr;
void __iomem *tuner_en_addr;
void __iomem *pcw_addr;
+ void __iomem *pcw_chg_addr;
const struct mtk_pll_data *data;
};
@@ -68,12 +71,15 @@ static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin,
u32 pcw, int postdiv)
{
int pcwbits = pll->data->pcwbits;
- int pcwfbits;
+ int pcwfbits = 0;
+ int ibits;
u64 vco;
u8 c = 0;
/* The fractional part of the PLL divider. */
- pcwfbits = pcwbits > INTEGER_BITS ? pcwbits - INTEGER_BITS : 0;
+ ibits = pll->data->pcwibits ? pll->data->pcwibits : INTEGER_BITS;
+ if (pcwbits > ibits)
+ pcwfbits = pcwbits - ibits;
vco = (u64)fin * pcw;
@@ -88,13 +94,39 @@ static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin,
return ((unsigned long)vco + postdiv - 1) / postdiv;
}
+static void __mtk_pll_tuner_enable(struct mtk_clk_pll *pll)
+{
+ u32 r;
+
+ if (pll->tuner_en_addr) {
+ r = readl(pll->tuner_en_addr) | BIT(pll->data->tuner_en_bit);
+ writel(r, pll->tuner_en_addr);
+ } else if (pll->tuner_addr) {
+ r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN;
+ writel(r, pll->tuner_addr);
+ }
+}
+
+static void __mtk_pll_tuner_disable(struct mtk_clk_pll *pll)
+{
+ u32 r;
+
+ if (pll->tuner_en_addr) {
+ r = readl(pll->tuner_en_addr) & ~BIT(pll->data->tuner_en_bit);
+ writel(r, pll->tuner_en_addr);
+ } else if (pll->tuner_addr) {
+ r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN;
+ writel(r, pll->tuner_addr);
+ }
+}
+
static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
int postdiv)
{
- u32 con1, val;
- int pll_en;
+ u32 chg, val;
- pll_en = readl(pll->base_addr + REG_CON0) & CON0_BASE_EN;
+ /* disable tuner */
+ __mtk_pll_tuner_disable(pll);
/* set postdiv */
val = readl(pll->pd_addr);
@@ -112,18 +144,15 @@ static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
pll->data->pcw_shift);
val |= pcw << pll->data->pcw_shift;
writel(val, pll->pcw_addr);
-
- con1 = readl(pll->base_addr + REG_CON1);
-
- if (pll_en)
- con1 |= CON0_PCW_CHG;
-
- writel(con1, pll->base_addr + REG_CON1);
+ chg = readl(pll->pcw_chg_addr) | PCW_CHG_MASK;
+ writel(chg, pll->pcw_chg_addr);
if (pll->tuner_addr)
- writel(con1 + 1, pll->tuner_addr);
+ writel(val + 1, pll->tuner_addr);
+
+ /* restore tuner_en */
+ __mtk_pll_tuner_enable(pll);
- if (pll_en)
- udelay(20);
+ udelay(20);
}
/*
@@ -138,9 +167,10 @@ static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
static void mtk_pll_calc_values(struct mtk_clk_pll *pll, u32 *pcw, u32 *postdiv,
u32 freq, u32 fin)
{
- unsigned long fmin = 1000 * MHZ;
+ unsigned long fmin = pll->data->fmin ? pll->data->fmin : (1000 * MHZ);
const struct mtk_pll_div_table *div_table = pll->data->div_table;
u64 _pcw;
+ int ibits;
u32 val;
if (freq > pll->data->fmax)
@@ -164,7 +194,8 @@ static void mtk_pll_calc_values(struct mtk_clk_pll *pll, u32 *pcw, u32 *postdiv,
}
/* _pcw = freq * postdiv / fin * 2^pcwfbits */
- _pcw = ((u64)freq << val) << (pll->data->pcwbits - INTEGER_BITS);
+ ibits = pll->data->pcwibits ? pll->data->pcwibits : INTEGER_BITS;
+ _pcw = ((u64)freq << val) << (pll->data->pcwbits - ibits);
do_div(_pcw, fin);
*pcw = (u32)_pcw;
@@ -228,13 +259,7 @@ static int mtk_pll_prepare(struct clk_hw *hw)
r |= pll->data->en_mask;
writel(r, pll->base_addr + REG_CON0);
- if (pll->tuner_en_addr) {
- r = readl(pll->tuner_en_addr) | BIT(pll->data->tuner_en_bit);
- writel(r, pll->tuner_en_addr);
- } else if (pll->tuner_addr) {
- r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN;
- writel(r, pll->tuner_addr);
- }
+ __mtk_pll_tuner_enable(pll);
udelay(20);
@@ -258,13 +283,7 @@ static void mtk_pll_unprepare(struct clk_hw *hw)
writel(r, pll->base_addr + REG_CON0);
}
- if (pll->tuner_en_addr) {
- r = readl(pll->tuner_en_addr) & ~BIT(pll->data->tuner_en_bit);
- writel(r, pll->tuner_en_addr);
- } else if (pll->tuner_addr) {
- r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN;
- writel(r, pll->tuner_addr);
- }
+ __mtk_pll_tuner_disable(pll);
r = readl(pll->base_addr + REG_CON0);
r &= ~CON0_BASE_EN;
@@ -302,6 +321,10 @@ static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data,
pll->pwr_addr = base + data->pwr_reg;
pll->pd_addr = base + data->pd_reg;
pll->pcw_addr = base + data->pcw_reg;
+ if (data->pcw_chg_reg)
+ pll->pcw_chg_addr = base + data->pcw_chg_reg;
+ else
+ pll->pcw_chg_addr = pll->base_addr + REG_CON1;
if (data->tuner_reg)
pll->tuner_addr = base + data->tuner_reg;
if (data->tuner_en_reg)
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
index 7ab200b6c3bf..8028ff6f6610 100644
--- a/drivers/clk/meson/axg-audio.c
+++ b/drivers/clk/meson/axg-audio.c
@@ -20,18 +20,18 @@
#include "clk-phase.h"
#include "sclk-div.h"
-#define AXG_MST_IN_COUNT 8
-#define AXG_SLV_SCLK_COUNT 10
-#define AXG_SLV_LRCLK_COUNT 10
+#define AUD_MST_IN_COUNT 8
+#define AUD_SLV_SCLK_COUNT 10
+#define AUD_SLV_LRCLK_COUNT 10
-#define AXG_AUD_GATE(_name, _reg, _bit, _pname, _iflags) \
-struct clk_regmap axg_##_name = { \
+#define AUD_GATE(_name, _reg, _bit, _pname, _iflags) \
+struct clk_regmap aud_##_name = { \
.data = &(struct clk_regmap_gate_data){ \
.offset = (_reg), \
.bit_idx = (_bit), \
}, \
.hw.init = &(struct clk_init_data) { \
- .name = "axg_"#_name, \
+ .name = "aud_"#_name, \
.ops = &clk_regmap_gate_ops, \
.parent_names = (const char *[]){ _pname }, \
.num_parents = 1, \
@@ -39,8 +39,8 @@ struct clk_regmap axg_##_name = { \
}, \
}
-#define AXG_AUD_MUX(_name, _reg, _mask, _shift, _dflags, _pnames, _iflags) \
-struct clk_regmap axg_##_name = { \
+#define AUD_MUX(_name, _reg, _mask, _shift, _dflags, _pnames, _iflags) \
+struct clk_regmap aud_##_name = { \
.data = &(struct clk_regmap_mux_data){ \
.offset = (_reg), \
.mask = (_mask), \
@@ -48,7 +48,7 @@ struct clk_regmap axg_##_name = { \
.flags = (_dflags), \
}, \
.hw.init = &(struct clk_init_data){ \
- .name = "axg_"#_name, \
+ .name = "aud_"#_name, \
.ops = &clk_regmap_mux_ops, \
.parent_names = (_pnames), \
.num_parents = ARRAY_SIZE(_pnames), \
@@ -56,8 +56,8 @@ struct clk_regmap axg_##_name = { \
}, \
}
-#define AXG_AUD_DIV(_name, _reg, _shift, _width, _dflags, _pname, _iflags) \
-struct clk_regmap axg_##_name = { \
+#define AUD_DIV(_name, _reg, _shift, _width, _dflags, _pname, _iflags) \
+struct clk_regmap aud_##_name = { \
.data = &(struct clk_regmap_div_data){ \
.offset = (_reg), \
.shift = (_shift), \
@@ -65,7 +65,7 @@ struct clk_regmap axg_##_name = { \
.flags = (_dflags), \
}, \
.hw.init = &(struct clk_init_data){ \
- .name = "axg_"#_name, \
+ .name = "aud_"#_name, \
.ops = &clk_regmap_divider_ops, \
.parent_names = (const char *[]) { _pname }, \
.num_parents = 1, \
@@ -73,109 +73,113 @@ struct clk_regmap axg_##_name = { \
}, \
}
-#define AXG_PCLK_GATE(_name, _bit) \
- AXG_AUD_GATE(_name, AUDIO_CLK_GATE_EN, _bit, "axg_audio_pclk", 0)
+#define AUD_PCLK_GATE(_name, _bit) \
+ AUD_GATE(_name, AUDIO_CLK_GATE_EN, _bit, "audio_pclk", 0)
/* Audio peripheral clocks */
-static AXG_PCLK_GATE(ddr_arb, 0);
-static AXG_PCLK_GATE(pdm, 1);
-static AXG_PCLK_GATE(tdmin_a, 2);
-static AXG_PCLK_GATE(tdmin_b, 3);
-static AXG_PCLK_GATE(tdmin_c, 4);
-static AXG_PCLK_GATE(tdmin_lb, 5);
-static AXG_PCLK_GATE(tdmout_a, 6);
-static AXG_PCLK_GATE(tdmout_b, 7);
-static AXG_PCLK_GATE(tdmout_c, 8);
-static AXG_PCLK_GATE(frddr_a, 9);
-static AXG_PCLK_GATE(frddr_b, 10);
-static AXG_PCLK_GATE(frddr_c, 11);
-static AXG_PCLK_GATE(toddr_a, 12);
-static AXG_PCLK_GATE(toddr_b, 13);
-static AXG_PCLK_GATE(toddr_c, 14);
-static AXG_PCLK_GATE(loopback, 15);
-static AXG_PCLK_GATE(spdifin, 16);
-static AXG_PCLK_GATE(spdifout, 17);
-static AXG_PCLK_GATE(resample, 18);
-static AXG_PCLK_GATE(power_detect, 19);
+static AUD_PCLK_GATE(ddr_arb, 0);
+static AUD_PCLK_GATE(pdm, 1);
+static AUD_PCLK_GATE(tdmin_a, 2);
+static AUD_PCLK_GATE(tdmin_b, 3);
+static AUD_PCLK_GATE(tdmin_c, 4);
+static AUD_PCLK_GATE(tdmin_lb, 5);
+static AUD_PCLK_GATE(tdmout_a, 6);
+static AUD_PCLK_GATE(tdmout_b, 7);
+static AUD_PCLK_GATE(tdmout_c, 8);
+static AUD_PCLK_GATE(frddr_a, 9);
+static AUD_PCLK_GATE(frddr_b, 10);
+static AUD_PCLK_GATE(frddr_c, 11);
+static AUD_PCLK_GATE(toddr_a, 12);
+static AUD_PCLK_GATE(toddr_b, 13);
+static AUD_PCLK_GATE(toddr_c, 14);
+static AUD_PCLK_GATE(loopback, 15);
+static AUD_PCLK_GATE(spdifin, 16);
+static AUD_PCLK_GATE(spdifout, 17);
+static AUD_PCLK_GATE(resample, 18);
+static AUD_PCLK_GATE(power_detect, 19);
+static AUD_PCLK_GATE(spdifout_b, 21);
/* Audio Master Clocks */
static const char * const mst_mux_parent_names[] = {
- "axg_mst_in0", "axg_mst_in1", "axg_mst_in2", "axg_mst_in3",
- "axg_mst_in4", "axg_mst_in5", "axg_mst_in6", "axg_mst_in7",
+ "aud_mst_in0", "aud_mst_in1", "aud_mst_in2", "aud_mst_in3",
+ "aud_mst_in4", "aud_mst_in5", "aud_mst_in6", "aud_mst_in7",
};
-#define AXG_MST_MUX(_name, _reg, _flag) \
- AXG_AUD_MUX(_name##_sel, _reg, 0x7, 24, _flag, \
- mst_mux_parent_names, CLK_SET_RATE_PARENT)
-
-#define AXG_MST_MCLK_MUX(_name, _reg) \
- AXG_MST_MUX(_name, _reg, CLK_MUX_ROUND_CLOSEST)
-
-#define AXG_MST_SYS_MUX(_name, _reg) \
- AXG_MST_MUX(_name, _reg, 0)
-
-static AXG_MST_MCLK_MUX(mst_a_mclk, AUDIO_MCLK_A_CTRL);
-static AXG_MST_MCLK_MUX(mst_b_mclk, AUDIO_MCLK_B_CTRL);
-static AXG_MST_MCLK_MUX(mst_c_mclk, AUDIO_MCLK_C_CTRL);
-static AXG_MST_MCLK_MUX(mst_d_mclk, AUDIO_MCLK_D_CTRL);
-static AXG_MST_MCLK_MUX(mst_e_mclk, AUDIO_MCLK_E_CTRL);
-static AXG_MST_MCLK_MUX(mst_f_mclk, AUDIO_MCLK_F_CTRL);
-static AXG_MST_MCLK_MUX(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
-static AXG_MST_MCLK_MUX(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
-static AXG_MST_SYS_MUX(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
-static AXG_MST_SYS_MUX(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
-
-#define AXG_MST_DIV(_name, _reg, _flag) \
- AXG_AUD_DIV(_name##_div, _reg, 0, 16, _flag, \
- "axg_"#_name"_sel", CLK_SET_RATE_PARENT) \
-
-#define AXG_MST_MCLK_DIV(_name, _reg) \
- AXG_MST_DIV(_name, _reg, CLK_DIVIDER_ROUND_CLOSEST)
-
-#define AXG_MST_SYS_DIV(_name, _reg) \
- AXG_MST_DIV(_name, _reg, 0)
-
-static AXG_MST_MCLK_DIV(mst_a_mclk, AUDIO_MCLK_A_CTRL);
-static AXG_MST_MCLK_DIV(mst_b_mclk, AUDIO_MCLK_B_CTRL);
-static AXG_MST_MCLK_DIV(mst_c_mclk, AUDIO_MCLK_C_CTRL);
-static AXG_MST_MCLK_DIV(mst_d_mclk, AUDIO_MCLK_D_CTRL);
-static AXG_MST_MCLK_DIV(mst_e_mclk, AUDIO_MCLK_E_CTRL);
-static AXG_MST_MCLK_DIV(mst_f_mclk, AUDIO_MCLK_F_CTRL);
-static AXG_MST_MCLK_DIV(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
-static AXG_MST_MCLK_DIV(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
-static AXG_MST_SYS_DIV(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
-static AXG_MST_SYS_DIV(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
-
-#define AXG_MST_MCLK_GATE(_name, _reg) \
- AXG_AUD_GATE(_name, _reg, 31, "axg_"#_name"_div", \
- CLK_SET_RATE_PARENT)
-
-static AXG_MST_MCLK_GATE(mst_a_mclk, AUDIO_MCLK_A_CTRL);
-static AXG_MST_MCLK_GATE(mst_b_mclk, AUDIO_MCLK_B_CTRL);
-static AXG_MST_MCLK_GATE(mst_c_mclk, AUDIO_MCLK_C_CTRL);
-static AXG_MST_MCLK_GATE(mst_d_mclk, AUDIO_MCLK_D_CTRL);
-static AXG_MST_MCLK_GATE(mst_e_mclk, AUDIO_MCLK_E_CTRL);
-static AXG_MST_MCLK_GATE(mst_f_mclk, AUDIO_MCLK_F_CTRL);
-static AXG_MST_MCLK_GATE(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
-static AXG_MST_MCLK_GATE(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
-static AXG_MST_MCLK_GATE(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
-static AXG_MST_MCLK_GATE(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+#define AUD_MST_MUX(_name, _reg, _flag) \
+ AUD_MUX(_name##_sel, _reg, 0x7, 24, _flag, \
+ mst_mux_parent_names, CLK_SET_RATE_PARENT)
+
+#define AUD_MST_MCLK_MUX(_name, _reg) \
+ AUD_MST_MUX(_name, _reg, CLK_MUX_ROUND_CLOSEST)
+
+#define AUD_MST_SYS_MUX(_name, _reg) \
+ AUD_MST_MUX(_name, _reg, 0)
+
+static AUD_MST_MCLK_MUX(mst_a_mclk, AUDIO_MCLK_A_CTRL);
+static AUD_MST_MCLK_MUX(mst_b_mclk, AUDIO_MCLK_B_CTRL);
+static AUD_MST_MCLK_MUX(mst_c_mclk, AUDIO_MCLK_C_CTRL);
+static AUD_MST_MCLK_MUX(mst_d_mclk, AUDIO_MCLK_D_CTRL);
+static AUD_MST_MCLK_MUX(mst_e_mclk, AUDIO_MCLK_E_CTRL);
+static AUD_MST_MCLK_MUX(mst_f_mclk, AUDIO_MCLK_F_CTRL);
+static AUD_MST_MCLK_MUX(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
+static AUD_MST_MCLK_MUX(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
+static AUD_MST_SYS_MUX(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
+static AUD_MST_SYS_MUX(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+static AUD_MST_MCLK_MUX(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL);
+
+#define AUD_MST_DIV(_name, _reg, _flag) \
+ AUD_DIV(_name##_div, _reg, 0, 16, _flag, \
+ "aud_"#_name"_sel", CLK_SET_RATE_PARENT) \
+
+#define AUD_MST_MCLK_DIV(_name, _reg) \
+ AUD_MST_DIV(_name, _reg, CLK_DIVIDER_ROUND_CLOSEST)
+
+#define AUD_MST_SYS_DIV(_name, _reg) \
+ AUD_MST_DIV(_name, _reg, 0)
+
+static AUD_MST_MCLK_DIV(mst_a_mclk, AUDIO_MCLK_A_CTRL);
+static AUD_MST_MCLK_DIV(mst_b_mclk, AUDIO_MCLK_B_CTRL);
+static AUD_MST_MCLK_DIV(mst_c_mclk, AUDIO_MCLK_C_CTRL);
+static AUD_MST_MCLK_DIV(mst_d_mclk, AUDIO_MCLK_D_CTRL);
+static AUD_MST_MCLK_DIV(mst_e_mclk, AUDIO_MCLK_E_CTRL);
+static AUD_MST_MCLK_DIV(mst_f_mclk, AUDIO_MCLK_F_CTRL);
+static AUD_MST_MCLK_DIV(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
+static AUD_MST_MCLK_DIV(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
+static AUD_MST_SYS_DIV(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
+static AUD_MST_SYS_DIV(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+static AUD_MST_MCLK_DIV(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL);
+
+#define AUD_MST_MCLK_GATE(_name, _reg) \
+ AUD_GATE(_name, _reg, 31, "aud_"#_name"_div", \
+ CLK_SET_RATE_PARENT)
+
+static AUD_MST_MCLK_GATE(mst_a_mclk, AUDIO_MCLK_A_CTRL);
+static AUD_MST_MCLK_GATE(mst_b_mclk, AUDIO_MCLK_B_CTRL);
+static AUD_MST_MCLK_GATE(mst_c_mclk, AUDIO_MCLK_C_CTRL);
+static AUD_MST_MCLK_GATE(mst_d_mclk, AUDIO_MCLK_D_CTRL);
+static AUD_MST_MCLK_GATE(mst_e_mclk, AUDIO_MCLK_E_CTRL);
+static AUD_MST_MCLK_GATE(mst_f_mclk, AUDIO_MCLK_F_CTRL);
+static AUD_MST_MCLK_GATE(spdifout_clk, AUDIO_CLK_SPDIFOUT_CTRL);
+static AUD_MST_MCLK_GATE(spdifin_clk, AUDIO_CLK_SPDIFIN_CTRL);
+static AUD_MST_MCLK_GATE(pdm_dclk, AUDIO_CLK_PDMIN_CTRL0);
+static AUD_MST_MCLK_GATE(pdm_sysclk, AUDIO_CLK_PDMIN_CTRL1);
+static AUD_MST_MCLK_GATE(spdifout_b_clk, AUDIO_CLK_SPDIFOUT_B_CTRL);
/* Sample Clocks */
-#define AXG_MST_SCLK_PRE_EN(_name, _reg) \
- AXG_AUD_GATE(mst_##_name##_sclk_pre_en, _reg, 31, \
- "axg_mst_"#_name"_mclk", 0)
-
-static AXG_MST_SCLK_PRE_EN(a, AUDIO_MST_A_SCLK_CTRL0);
-static AXG_MST_SCLK_PRE_EN(b, AUDIO_MST_B_SCLK_CTRL0);
-static AXG_MST_SCLK_PRE_EN(c, AUDIO_MST_C_SCLK_CTRL0);
-static AXG_MST_SCLK_PRE_EN(d, AUDIO_MST_D_SCLK_CTRL0);
-static AXG_MST_SCLK_PRE_EN(e, AUDIO_MST_E_SCLK_CTRL0);
-static AXG_MST_SCLK_PRE_EN(f, AUDIO_MST_F_SCLK_CTRL0);
-
-#define AXG_AUD_SCLK_DIV(_name, _reg, _div_shift, _div_width, \
+#define AUD_MST_SCLK_PRE_EN(_name, _reg) \
+ AUD_GATE(mst_##_name##_sclk_pre_en, _reg, 31, \
+ "aud_mst_"#_name"_mclk", 0)
+
+static AUD_MST_SCLK_PRE_EN(a, AUDIO_MST_A_SCLK_CTRL0);
+static AUD_MST_SCLK_PRE_EN(b, AUDIO_MST_B_SCLK_CTRL0);
+static AUD_MST_SCLK_PRE_EN(c, AUDIO_MST_C_SCLK_CTRL0);
+static AUD_MST_SCLK_PRE_EN(d, AUDIO_MST_D_SCLK_CTRL0);
+static AUD_MST_SCLK_PRE_EN(e, AUDIO_MST_E_SCLK_CTRL0);
+static AUD_MST_SCLK_PRE_EN(f, AUDIO_MST_F_SCLK_CTRL0);
+
+#define AUD_SCLK_DIV(_name, _reg, _div_shift, _div_width, \
_hi_shift, _hi_width, _pname, _iflags) \
-struct clk_regmap axg_##_name = { \
+struct clk_regmap aud_##_name = { \
.data = &(struct meson_sclk_div_data) { \
.div = { \
.reg_off = (_reg), \
@@ -189,7 +193,7 @@ struct clk_regmap axg_##_name = { \
}, \
}, \
.hw.init = &(struct clk_init_data) { \
- .name = "axg_"#_name, \
+ .name = "aud_"#_name, \
.ops = &meson_sclk_div_ops, \
.parent_names = (const char *[]) { _pname }, \
.num_parents = 1, \
@@ -197,32 +201,32 @@ struct clk_regmap axg_##_name = { \
}, \
}
-#define AXG_MST_SCLK_DIV(_name, _reg) \
- AXG_AUD_SCLK_DIV(mst_##_name##_sclk_div, _reg, 20, 10, 0, 0, \
- "axg_mst_"#_name"_sclk_pre_en", \
- CLK_SET_RATE_PARENT)
-
-static AXG_MST_SCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
-static AXG_MST_SCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0);
-static AXG_MST_SCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0);
-static AXG_MST_SCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0);
-static AXG_MST_SCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
-static AXG_MST_SCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
-
-#define AXG_MST_SCLK_POST_EN(_name, _reg) \
- AXG_AUD_GATE(mst_##_name##_sclk_post_en, _reg, 30, \
- "axg_mst_"#_name"_sclk_div", CLK_SET_RATE_PARENT)
-
-static AXG_MST_SCLK_POST_EN(a, AUDIO_MST_A_SCLK_CTRL0);
-static AXG_MST_SCLK_POST_EN(b, AUDIO_MST_B_SCLK_CTRL0);
-static AXG_MST_SCLK_POST_EN(c, AUDIO_MST_C_SCLK_CTRL0);
-static AXG_MST_SCLK_POST_EN(d, AUDIO_MST_D_SCLK_CTRL0);
-static AXG_MST_SCLK_POST_EN(e, AUDIO_MST_E_SCLK_CTRL0);
-static AXG_MST_SCLK_POST_EN(f, AUDIO_MST_F_SCLK_CTRL0);
-
-#define AXG_AUD_TRIPHASE(_name, _reg, _width, _shift0, _shift1, _shift2, \
+#define AUD_MST_SCLK_DIV(_name, _reg) \
+ AUD_SCLK_DIV(mst_##_name##_sclk_div, _reg, 20, 10, 0, 0, \
+ "aud_mst_"#_name"_sclk_pre_en", \
+ CLK_SET_RATE_PARENT)
+
+static AUD_MST_SCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
+static AUD_MST_SCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0);
+static AUD_MST_SCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0);
+static AUD_MST_SCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0);
+static AUD_MST_SCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
+static AUD_MST_SCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
+
+#define AUD_MST_SCLK_POST_EN(_name, _reg) \
+ AUD_GATE(mst_##_name##_sclk_post_en, _reg, 30, \
+ "aud_mst_"#_name"_sclk_div", CLK_SET_RATE_PARENT)
+
+static AUD_MST_SCLK_POST_EN(a, AUDIO_MST_A_SCLK_CTRL0);
+static AUD_MST_SCLK_POST_EN(b, AUDIO_MST_B_SCLK_CTRL0);
+static AUD_MST_SCLK_POST_EN(c, AUDIO_MST_C_SCLK_CTRL0);
+static AUD_MST_SCLK_POST_EN(d, AUDIO_MST_D_SCLK_CTRL0);
+static AUD_MST_SCLK_POST_EN(e, AUDIO_MST_E_SCLK_CTRL0);
+static AUD_MST_SCLK_POST_EN(f, AUDIO_MST_F_SCLK_CTRL0);
+
+#define AUD_TRIPHASE(_name, _reg, _width, _shift0, _shift1, _shift2, \
_pname, _iflags) \
-struct clk_regmap axg_##_name = { \
+struct clk_regmap aud_##_name = { \
.data = &(struct meson_clk_triphase_data) { \
.ph0 = { \
.reg_off = (_reg), \
@@ -241,7 +245,7 @@ struct clk_regmap axg_##_name = { \
}, \
}, \
.hw.init = &(struct clk_init_data) { \
- .name = "axg_"#_name, \
+ .name = "aud_"#_name, \
.ops = &meson_clk_triphase_ops, \
.parent_names = (const char *[]) { _pname }, \
.num_parents = 1, \
@@ -249,87 +253,87 @@ struct clk_regmap axg_##_name = { \
}, \
}
-#define AXG_MST_SCLK(_name, _reg) \
- AXG_AUD_TRIPHASE(mst_##_name##_sclk, _reg, 1, 0, 2, 4, \
- "axg_mst_"#_name"_sclk_post_en", CLK_SET_RATE_PARENT)
-
-static AXG_MST_SCLK(a, AUDIO_MST_A_SCLK_CTRL1);
-static AXG_MST_SCLK(b, AUDIO_MST_B_SCLK_CTRL1);
-static AXG_MST_SCLK(c, AUDIO_MST_C_SCLK_CTRL1);
-static AXG_MST_SCLK(d, AUDIO_MST_D_SCLK_CTRL1);
-static AXG_MST_SCLK(e, AUDIO_MST_E_SCLK_CTRL1);
-static AXG_MST_SCLK(f, AUDIO_MST_F_SCLK_CTRL1);
-
-#define AXG_MST_LRCLK_DIV(_name, _reg) \
- AXG_AUD_SCLK_DIV(mst_##_name##_lrclk_div, _reg, 0, 10, 10, 10, \
- "axg_mst_"#_name"_sclk_post_en", 0) \
-
-static AXG_MST_LRCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
-static AXG_MST_LRCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0);
-static AXG_MST_LRCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0);
-static AXG_MST_LRCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0);
-static AXG_MST_LRCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
-static AXG_MST_LRCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
-
-#define AXG_MST_LRCLK(_name, _reg) \
- AXG_AUD_TRIPHASE(mst_##_name##_lrclk, _reg, 1, 1, 3, 5, \
- "axg_mst_"#_name"_lrclk_div", CLK_SET_RATE_PARENT)
-
-static AXG_MST_LRCLK(a, AUDIO_MST_A_SCLK_CTRL1);
-static AXG_MST_LRCLK(b, AUDIO_MST_B_SCLK_CTRL1);
-static AXG_MST_LRCLK(c, AUDIO_MST_C_SCLK_CTRL1);
-static AXG_MST_LRCLK(d, AUDIO_MST_D_SCLK_CTRL1);
-static AXG_MST_LRCLK(e, AUDIO_MST_E_SCLK_CTRL1);
-static AXG_MST_LRCLK(f, AUDIO_MST_F_SCLK_CTRL1);
+#define AUD_MST_SCLK(_name, _reg) \
+ AUD_TRIPHASE(mst_##_name##_sclk, _reg, 1, 0, 2, 4, \
+ "aud_mst_"#_name"_sclk_post_en", CLK_SET_RATE_PARENT)
+
+static AUD_MST_SCLK(a, AUDIO_MST_A_SCLK_CTRL1);
+static AUD_MST_SCLK(b, AUDIO_MST_B_SCLK_CTRL1);
+static AUD_MST_SCLK(c, AUDIO_MST_C_SCLK_CTRL1);
+static AUD_MST_SCLK(d, AUDIO_MST_D_SCLK_CTRL1);
+static AUD_MST_SCLK(e, AUDIO_MST_E_SCLK_CTRL1);
+static AUD_MST_SCLK(f, AUDIO_MST_F_SCLK_CTRL1);
+
+#define AUD_MST_LRCLK_DIV(_name, _reg) \
+ AUD_SCLK_DIV(mst_##_name##_lrclk_div, _reg, 0, 10, 10, 10, \
+ "aud_mst_"#_name"_sclk_post_en", 0) \
+
+static AUD_MST_LRCLK_DIV(a, AUDIO_MST_A_SCLK_CTRL0);
+static AUD_MST_LRCLK_DIV(b, AUDIO_MST_B_SCLK_CTRL0);
+static AUD_MST_LRCLK_DIV(c, AUDIO_MST_C_SCLK_CTRL0);
+static AUD_MST_LRCLK_DIV(d, AUDIO_MST_D_SCLK_CTRL0);
+static AUD_MST_LRCLK_DIV(e, AUDIO_MST_E_SCLK_CTRL0);
+static AUD_MST_LRCLK_DIV(f, AUDIO_MST_F_SCLK_CTRL0);
+
+#define AUD_MST_LRCLK(_name, _reg) \
+ AUD_TRIPHASE(mst_##_name##_lrclk, _reg, 1, 1, 3, 5, \
+ "aud_mst_"#_name"_lrclk_div", CLK_SET_RATE_PARENT)
+
+static AUD_MST_LRCLK(a, AUDIO_MST_A_SCLK_CTRL1);
+static AUD_MST_LRCLK(b, AUDIO_MST_B_SCLK_CTRL1);
+static AUD_MST_LRCLK(c, AUDIO_MST_C_SCLK_CTRL1);
+static AUD_MST_LRCLK(d, AUDIO_MST_D_SCLK_CTRL1);
+static AUD_MST_LRCLK(e, AUDIO_MST_E_SCLK_CTRL1);
+static AUD_MST_LRCLK(f, AUDIO_MST_F_SCLK_CTRL1);
static const char * const tdm_sclk_parent_names[] = {
- "axg_mst_a_sclk", "axg_mst_b_sclk", "axg_mst_c_sclk",
- "axg_mst_d_sclk", "axg_mst_e_sclk", "axg_mst_f_sclk",
- "axg_slv_sclk0", "axg_slv_sclk1", "axg_slv_sclk2",
- "axg_slv_sclk3", "axg_slv_sclk4", "axg_slv_sclk5",
- "axg_slv_sclk6", "axg_slv_sclk7", "axg_slv_sclk8",
- "axg_slv_sclk9"
+ "aud_mst_a_sclk", "aud_mst_b_sclk", "aud_mst_c_sclk",
+ "aud_mst_d_sclk", "aud_mst_e_sclk", "aud_mst_f_sclk",
+ "aud_slv_sclk0", "aud_slv_sclk1", "aud_slv_sclk2",
+ "aud_slv_sclk3", "aud_slv_sclk4", "aud_slv_sclk5",
+ "aud_slv_sclk6", "aud_slv_sclk7", "aud_slv_sclk8",
+ "aud_slv_sclk9"
};
-#define AXG_TDM_SCLK_MUX(_name, _reg) \
- AXG_AUD_MUX(tdm##_name##_sclk_sel, _reg, 0xf, 24, \
+#define AUD_TDM_SCLK_MUX(_name, _reg) \
+ AUD_MUX(tdm##_name##_sclk_sel, _reg, 0xf, 24, \
CLK_MUX_ROUND_CLOSEST, \
tdm_sclk_parent_names, 0)
-static AXG_TDM_SCLK_MUX(in_a, AUDIO_CLK_TDMIN_A_CTRL);
-static AXG_TDM_SCLK_MUX(in_b, AUDIO_CLK_TDMIN_B_CTRL);
-static AXG_TDM_SCLK_MUX(in_c, AUDIO_CLK_TDMIN_C_CTRL);
-static AXG_TDM_SCLK_MUX(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
-static AXG_TDM_SCLK_MUX(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
-static AXG_TDM_SCLK_MUX(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
-static AXG_TDM_SCLK_MUX(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
-
-#define AXG_TDM_SCLK_PRE_EN(_name, _reg) \
- AXG_AUD_GATE(tdm##_name##_sclk_pre_en, _reg, 31, \
- "axg_tdm"#_name"_sclk_sel", CLK_SET_RATE_PARENT)
-
-static AXG_TDM_SCLK_PRE_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
-static AXG_TDM_SCLK_PRE_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
-static AXG_TDM_SCLK_PRE_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL);
-static AXG_TDM_SCLK_PRE_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
-static AXG_TDM_SCLK_PRE_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
-static AXG_TDM_SCLK_PRE_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
-static AXG_TDM_SCLK_PRE_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
-
-#define AXG_TDM_SCLK_POST_EN(_name, _reg) \
- AXG_AUD_GATE(tdm##_name##_sclk_post_en, _reg, 30, \
- "axg_tdm"#_name"_sclk_pre_en", CLK_SET_RATE_PARENT)
-
-static AXG_TDM_SCLK_POST_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
-static AXG_TDM_SCLK_POST_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
-static AXG_TDM_SCLK_POST_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL);
-static AXG_TDM_SCLK_POST_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
-static AXG_TDM_SCLK_POST_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
-static AXG_TDM_SCLK_POST_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
-static AXG_TDM_SCLK_POST_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
-
-#define AXG_TDM_SCLK(_name, _reg) \
- struct clk_regmap axg_tdm##_name##_sclk = { \
+static AUD_TDM_SCLK_MUX(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AUD_TDM_SCLK_MUX(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AUD_TDM_SCLK_MUX(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AUD_TDM_SCLK_MUX(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AUD_TDM_SCLK_MUX(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AUD_TDM_SCLK_MUX(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AUD_TDM_SCLK_MUX(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+#define AUD_TDM_SCLK_PRE_EN(_name, _reg) \
+ AUD_GATE(tdm##_name##_sclk_pre_en, _reg, 31, \
+ "aud_tdm"#_name"_sclk_sel", CLK_SET_RATE_PARENT)
+
+static AUD_TDM_SCLK_PRE_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AUD_TDM_SCLK_PRE_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AUD_TDM_SCLK_PRE_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AUD_TDM_SCLK_PRE_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AUD_TDM_SCLK_PRE_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AUD_TDM_SCLK_PRE_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AUD_TDM_SCLK_PRE_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+#define AUD_TDM_SCLK_POST_EN(_name, _reg) \
+ AUD_GATE(tdm##_name##_sclk_post_en, _reg, 30, \
+ "aud_tdm"#_name"_sclk_pre_en", CLK_SET_RATE_PARENT)
+
+static AUD_TDM_SCLK_POST_EN(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AUD_TDM_SCLK_POST_EN(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AUD_TDM_SCLK_POST_EN(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AUD_TDM_SCLK_POST_EN(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AUD_TDM_SCLK_POST_EN(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AUD_TDM_SCLK_POST_EN(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AUD_TDM_SCLK_POST_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+#define AUD_TDM_SCLK(_name, _reg) \
+ struct clk_regmap aud_tdm##_name##_sclk = { \
.data = &(struct meson_clk_phase_data) { \
.ph = { \
.reg_off = (_reg), \
@@ -338,44 +342,83 @@ static AXG_TDM_SCLK_POST_EN(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
}, \
}, \
.hw.init = &(struct clk_init_data) { \
- .name = "axg_tdm"#_name"_sclk", \
+ .name = "aud_tdm"#_name"_sclk", \
.ops = &meson_clk_phase_ops, \
.parent_names = (const char *[]) \
- { "axg_tdm"#_name"_sclk_post_en" }, \
+ { "aud_tdm"#_name"_sclk_post_en" }, \
.num_parents = 1, \
.flags = CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT, \
}, \
}
-static AXG_TDM_SCLK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
-static AXG_TDM_SCLK(in_b, AUDIO_CLK_TDMIN_B_CTRL);
-static AXG_TDM_SCLK(in_c, AUDIO_CLK_TDMIN_C_CTRL);
-static AXG_TDM_SCLK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
-static AXG_TDM_SCLK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
-static AXG_TDM_SCLK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
-static AXG_TDM_SCLK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+static AUD_TDM_SCLK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AUD_TDM_SCLK(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AUD_TDM_SCLK(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AUD_TDM_SCLK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AUD_TDM_SCLK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AUD_TDM_SCLK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AUD_TDM_SCLK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
static const char * const tdm_lrclk_parent_names[] = {
- "axg_mst_a_lrclk", "axg_mst_b_lrclk", "axg_mst_c_lrclk",
- "axg_mst_d_lrclk", "axg_mst_e_lrclk", "axg_mst_f_lrclk",
- "axg_slv_lrclk0", "axg_slv_lrclk1", "axg_slv_lrclk2",
- "axg_slv_lrclk3", "axg_slv_lrclk4", "axg_slv_lrclk5",
- "axg_slv_lrclk6", "axg_slv_lrclk7", "axg_slv_lrclk8",
- "axg_slv_lrclk9"
+ "aud_mst_a_lrclk", "aud_mst_b_lrclk", "aud_mst_c_lrclk",
+ "aud_mst_d_lrclk", "aud_mst_e_lrclk", "aud_mst_f_lrclk",
+ "aud_slv_lrclk0", "aud_slv_lrclk1", "aud_slv_lrclk2",
+ "aud_slv_lrclk3", "aud_slv_lrclk4", "aud_slv_lrclk5",
+ "aud_slv_lrclk6", "aud_slv_lrclk7", "aud_slv_lrclk8",
+ "aud_slv_lrclk9"
};
-#define AXG_TDM_LRLCK(_name, _reg) \
- AXG_AUD_MUX(tdm##_name##_lrclk, _reg, 0xf, 20, \
- CLK_MUX_ROUND_CLOSEST, \
- tdm_lrclk_parent_names, 0)
+#define AUD_TDM_LRLCK(_name, _reg) \
+ AUD_MUX(tdm##_name##_lrclk, _reg, 0xf, 20, \
+ CLK_MUX_ROUND_CLOSEST, \
+ tdm_lrclk_parent_names, 0)
+
+static AUD_TDM_LRLCK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
+static AUD_TDM_LRLCK(in_b, AUDIO_CLK_TDMIN_B_CTRL);
+static AUD_TDM_LRLCK(in_c, AUDIO_CLK_TDMIN_C_CTRL);
+static AUD_TDM_LRLCK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
+static AUD_TDM_LRLCK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
+static AUD_TDM_LRLCK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
+static AUD_TDM_LRLCK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+
+/* G12a Pad control */
+#define AUD_TDM_PAD_CTRL(_name, _reg, _shift, _parents) \
+ AUD_MUX(tdm_##_name, _reg, 0x7, _shift, 0, _parents, \
+ CLK_SET_RATE_NO_REPARENT)
+
+static const char * const mclk_pad_ctrl_parent_names[] = {
+ "aud_mst_a_mclk", "aud_mst_b_mclk", "aud_mst_c_mclk",
+ "aud_mst_d_mclk", "aud_mst_e_mclk", "aud_mst_f_mclk",
+};
+
+static AUD_TDM_PAD_CTRL(mclk_pad_0, AUDIO_MST_PAD_CTRL0, 0,
+ mclk_pad_ctrl_parent_names);
+static AUD_TDM_PAD_CTRL(mclk_pad_1, AUDIO_MST_PAD_CTRL0, 4,
+ mclk_pad_ctrl_parent_names);
+
+static const char * const lrclk_pad_ctrl_parent_names[] = {
+ "aud_mst_a_lrclk", "aud_mst_b_lrclk", "aud_mst_c_lrclk",
+ "aud_mst_d_lrclk", "aud_mst_e_lrclk", "aud_mst_f_lrclk",
+};
-static AXG_TDM_LRLCK(in_a, AUDIO_CLK_TDMIN_A_CTRL);
-static AXG_TDM_LRLCK(in_b, AUDIO_CLK_TDMIN_B_CTRL);
-static AXG_TDM_LRLCK(in_c, AUDIO_CLK_TDMIN_C_CTRL);
-static AXG_TDM_LRLCK(in_lb, AUDIO_CLK_TDMIN_LB_CTRL);
-static AXG_TDM_LRLCK(out_a, AUDIO_CLK_TDMOUT_A_CTRL);
-static AXG_TDM_LRLCK(out_b, AUDIO_CLK_TDMOUT_B_CTRL);
-static AXG_TDM_LRLCK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
+static AUD_TDM_PAD_CTRL(lrclk_pad_0, AUDIO_MST_PAD_CTRL1, 16,
+ lrclk_pad_ctrl_parent_names);
+static AUD_TDM_PAD_CTRL(lrclk_pad_1, AUDIO_MST_PAD_CTRL1, 20,
+ lrclk_pad_ctrl_parent_names);
+static AUD_TDM_PAD_CTRL(lrclk_pad_2, AUDIO_MST_PAD_CTRL1, 24,
+ lrclk_pad_ctrl_parent_names);
+
+static const char * const sclk_pad_ctrl_parent_names[] = {
+ "aud_mst_a_sclk", "aud_mst_b_sclk", "aud_mst_c_sclk",
+ "aud_mst_d_sclk", "aud_mst_e_sclk", "aud_mst_f_sclk",
+};
+
+static AUD_TDM_PAD_CTRL(sclk_pad_0, AUDIO_MST_PAD_CTRL1, 0,
+ sclk_pad_ctrl_parent_names);
+static AUD_TDM_PAD_CTRL(sclk_pad_1, AUDIO_MST_PAD_CTRL1, 4,
+ sclk_pad_ctrl_parent_names);
+static AUD_TDM_PAD_CTRL(sclk_pad_2, AUDIO_MST_PAD_CTRL1, 8,
+ sclk_pad_ctrl_parent_names);
/*
* Array of all clocks provided by this provider
@@ -383,255 +426,416 @@ static AXG_TDM_LRLCK(out_c, AUDIO_CLK_TDMOUT_C_CTRL);
*/
static struct clk_hw_onecell_data axg_audio_hw_onecell_data = {
.hws = {
- [AUD_CLKID_DDR_ARB] = &axg_ddr_arb.hw,
- [AUD_CLKID_PDM] = &axg_pdm.hw,
- [AUD_CLKID_TDMIN_A] = &axg_tdmin_a.hw,
- [AUD_CLKID_TDMIN_B] = &axg_tdmin_b.hw,
- [AUD_CLKID_TDMIN_C] = &axg_tdmin_c.hw,
- [AUD_CLKID_TDMIN_LB] = &axg_tdmin_lb.hw,
- [AUD_CLKID_TDMOUT_A] = &axg_tdmout_a.hw,
- [AUD_CLKID_TDMOUT_B] = &axg_tdmout_b.hw,
- [AUD_CLKID_TDMOUT_C] = &axg_tdmout_c.hw,
- [AUD_CLKID_FRDDR_A] = &axg_frddr_a.hw,
- [AUD_CLKID_FRDDR_B] = &axg_frddr_b.hw,
- [AUD_CLKID_FRDDR_C] = &axg_frddr_c.hw,
- [AUD_CLKID_TODDR_A] = &axg_toddr_a.hw,
- [AUD_CLKID_TODDR_B] = &axg_toddr_b.hw,
- [AUD_CLKID_TODDR_C] = &axg_toddr_c.hw,
- [AUD_CLKID_LOOPBACK] = &axg_loopback.hw,
- [AUD_CLKID_SPDIFIN] = &axg_spdifin.hw,
- [AUD_CLKID_SPDIFOUT] = &axg_spdifout.hw,
- [AUD_CLKID_RESAMPLE] = &axg_resample.hw,
- [AUD_CLKID_POWER_DETECT] = &axg_power_detect.hw,
- [AUD_CLKID_MST_A_MCLK_SEL] = &axg_mst_a_mclk_sel.hw,
- [AUD_CLKID_MST_B_MCLK_SEL] = &axg_mst_b_mclk_sel.hw,
- [AUD_CLKID_MST_C_MCLK_SEL] = &axg_mst_c_mclk_sel.hw,
- [AUD_CLKID_MST_D_MCLK_SEL] = &axg_mst_d_mclk_sel.hw,
- [AUD_CLKID_MST_E_MCLK_SEL] = &axg_mst_e_mclk_sel.hw,
- [AUD_CLKID_MST_F_MCLK_SEL] = &axg_mst_f_mclk_sel.hw,
- [AUD_CLKID_MST_A_MCLK_DIV] = &axg_mst_a_mclk_div.hw,
- [AUD_CLKID_MST_B_MCLK_DIV] = &axg_mst_b_mclk_div.hw,
- [AUD_CLKID_MST_C_MCLK_DIV] = &axg_mst_c_mclk_div.hw,
- [AUD_CLKID_MST_D_MCLK_DIV] = &axg_mst_d_mclk_div.hw,
- [AUD_CLKID_MST_E_MCLK_DIV] = &axg_mst_e_mclk_div.hw,
- [AUD_CLKID_MST_F_MCLK_DIV] = &axg_mst_f_mclk_div.hw,
- [AUD_CLKID_MST_A_MCLK] = &axg_mst_a_mclk.hw,
- [AUD_CLKID_MST_B_MCLK] = &axg_mst_b_mclk.hw,
- [AUD_CLKID_MST_C_MCLK] = &axg_mst_c_mclk.hw,
- [AUD_CLKID_MST_D_MCLK] = &axg_mst_d_mclk.hw,
- [AUD_CLKID_MST_E_MCLK] = &axg_mst_e_mclk.hw,
- [AUD_CLKID_MST_F_MCLK] = &axg_mst_f_mclk.hw,
- [AUD_CLKID_SPDIFOUT_CLK_SEL] = &axg_spdifout_clk_sel.hw,
- [AUD_CLKID_SPDIFOUT_CLK_DIV] = &axg_spdifout_clk_div.hw,
- [AUD_CLKID_SPDIFOUT_CLK] = &axg_spdifout_clk.hw,
- [AUD_CLKID_SPDIFIN_CLK_SEL] = &axg_spdifin_clk_sel.hw,
- [AUD_CLKID_SPDIFIN_CLK_DIV] = &axg_spdifin_clk_div.hw,
- [AUD_CLKID_SPDIFIN_CLK] = &axg_spdifin_clk.hw,
- [AUD_CLKID_PDM_DCLK_SEL] = &axg_pdm_dclk_sel.hw,
- [AUD_CLKID_PDM_DCLK_DIV] = &axg_pdm_dclk_div.hw,
- [AUD_CLKID_PDM_DCLK] = &axg_pdm_dclk.hw,
- [AUD_CLKID_PDM_SYSCLK_SEL] = &axg_pdm_sysclk_sel.hw,
- [AUD_CLKID_PDM_SYSCLK_DIV] = &axg_pdm_sysclk_div.hw,
- [AUD_CLKID_PDM_SYSCLK] = &axg_pdm_sysclk.hw,
- [AUD_CLKID_MST_A_SCLK_PRE_EN] = &axg_mst_a_sclk_pre_en.hw,
- [AUD_CLKID_MST_B_SCLK_PRE_EN] = &axg_mst_b_sclk_pre_en.hw,
- [AUD_CLKID_MST_C_SCLK_PRE_EN] = &axg_mst_c_sclk_pre_en.hw,
- [AUD_CLKID_MST_D_SCLK_PRE_EN] = &axg_mst_d_sclk_pre_en.hw,
- [AUD_CLKID_MST_E_SCLK_PRE_EN] = &axg_mst_e_sclk_pre_en.hw,
- [AUD_CLKID_MST_F_SCLK_PRE_EN] = &axg_mst_f_sclk_pre_en.hw,
- [AUD_CLKID_MST_A_SCLK_DIV] = &axg_mst_a_sclk_div.hw,
- [AUD_CLKID_MST_B_SCLK_DIV] = &axg_mst_b_sclk_div.hw,
- [AUD_CLKID_MST_C_SCLK_DIV] = &axg_mst_c_sclk_div.hw,
- [AUD_CLKID_MST_D_SCLK_DIV] = &axg_mst_d_sclk_div.hw,
- [AUD_CLKID_MST_E_SCLK_DIV] = &axg_mst_e_sclk_div.hw,
- [AUD_CLKID_MST_F_SCLK_DIV] = &axg_mst_f_sclk_div.hw,
- [AUD_CLKID_MST_A_SCLK_POST_EN] = &axg_mst_a_sclk_post_en.hw,
- [AUD_CLKID_MST_B_SCLK_POST_EN] = &axg_mst_b_sclk_post_en.hw,
- [AUD_CLKID_MST_C_SCLK_POST_EN] = &axg_mst_c_sclk_post_en.hw,
- [AUD_CLKID_MST_D_SCLK_POST_EN] = &axg_mst_d_sclk_post_en.hw,
- [AUD_CLKID_MST_E_SCLK_POST_EN] = &axg_mst_e_sclk_post_en.hw,
- [AUD_CLKID_MST_F_SCLK_POST_EN] = &axg_mst_f_sclk_post_en.hw,
- [AUD_CLKID_MST_A_SCLK] = &axg_mst_a_sclk.hw,
- [AUD_CLKID_MST_B_SCLK] = &axg_mst_b_sclk.hw,
- [AUD_CLKID_MST_C_SCLK] = &axg_mst_c_sclk.hw,
- [AUD_CLKID_MST_D_SCLK] = &axg_mst_d_sclk.hw,
- [AUD_CLKID_MST_E_SCLK] = &axg_mst_e_sclk.hw,
- [AUD_CLKID_MST_F_SCLK] = &axg_mst_f_sclk.hw,
- [AUD_CLKID_MST_A_LRCLK_DIV] = &axg_mst_a_lrclk_div.hw,
- [AUD_CLKID_MST_B_LRCLK_DIV] = &axg_mst_b_lrclk_div.hw,
- [AUD_CLKID_MST_C_LRCLK_DIV] = &axg_mst_c_lrclk_div.hw,
- [AUD_CLKID_MST_D_LRCLK_DIV] = &axg_mst_d_lrclk_div.hw,
- [AUD_CLKID_MST_E_LRCLK_DIV] = &axg_mst_e_lrclk_div.hw,
- [AUD_CLKID_MST_F_LRCLK_DIV] = &axg_mst_f_lrclk_div.hw,
- [AUD_CLKID_MST_A_LRCLK] = &axg_mst_a_lrclk.hw,
- [AUD_CLKID_MST_B_LRCLK] = &axg_mst_b_lrclk.hw,
- [AUD_CLKID_MST_C_LRCLK] = &axg_mst_c_lrclk.hw,
- [AUD_CLKID_MST_D_LRCLK] = &axg_mst_d_lrclk.hw,
- [AUD_CLKID_MST_E_LRCLK] = &axg_mst_e_lrclk.hw,
- [AUD_CLKID_MST_F_LRCLK] = &axg_mst_f_lrclk.hw,
- [AUD_CLKID_TDMIN_A_SCLK_SEL] = &axg_tdmin_a_sclk_sel.hw,
- [AUD_CLKID_TDMIN_B_SCLK_SEL] = &axg_tdmin_b_sclk_sel.hw,
- [AUD_CLKID_TDMIN_C_SCLK_SEL] = &axg_tdmin_c_sclk_sel.hw,
- [AUD_CLKID_TDMIN_LB_SCLK_SEL] = &axg_tdmin_lb_sclk_sel.hw,
- [AUD_CLKID_TDMOUT_A_SCLK_SEL] = &axg_tdmout_a_sclk_sel.hw,
- [AUD_CLKID_TDMOUT_B_SCLK_SEL] = &axg_tdmout_b_sclk_sel.hw,
- [AUD_CLKID_TDMOUT_C_SCLK_SEL] = &axg_tdmout_c_sclk_sel.hw,
- [AUD_CLKID_TDMIN_A_SCLK_PRE_EN] = &axg_tdmin_a_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_B_SCLK_PRE_EN] = &axg_tdmin_b_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_C_SCLK_PRE_EN] = &axg_tdmin_c_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_LB_SCLK_PRE_EN] = &axg_tdmin_lb_sclk_pre_en.hw,
- [AUD_CLKID_TDMOUT_A_SCLK_PRE_EN] = &axg_tdmout_a_sclk_pre_en.hw,
- [AUD_CLKID_TDMOUT_B_SCLK_PRE_EN] = &axg_tdmout_b_sclk_pre_en.hw,
- [AUD_CLKID_TDMOUT_C_SCLK_PRE_EN] = &axg_tdmout_c_sclk_pre_en.hw,
- [AUD_CLKID_TDMIN_A_SCLK_POST_EN] = &axg_tdmin_a_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_B_SCLK_POST_EN] = &axg_tdmin_b_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_C_SCLK_POST_EN] = &axg_tdmin_c_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_LB_SCLK_POST_EN] = &axg_tdmin_lb_sclk_post_en.hw,
- [AUD_CLKID_TDMOUT_A_SCLK_POST_EN] = &axg_tdmout_a_sclk_post_en.hw,
- [AUD_CLKID_TDMOUT_B_SCLK_POST_EN] = &axg_tdmout_b_sclk_post_en.hw,
- [AUD_CLKID_TDMOUT_C_SCLK_POST_EN] = &axg_tdmout_c_sclk_post_en.hw,
- [AUD_CLKID_TDMIN_A_SCLK] = &axg_tdmin_a_sclk.hw,
- [AUD_CLKID_TDMIN_B_SCLK] = &axg_tdmin_b_sclk.hw,
- [AUD_CLKID_TDMIN_C_SCLK] = &axg_tdmin_c_sclk.hw,
- [AUD_CLKID_TDMIN_LB_SCLK] = &axg_tdmin_lb_sclk.hw,
- [AUD_CLKID_TDMOUT_A_SCLK] = &axg_tdmout_a_sclk.hw,
- [AUD_CLKID_TDMOUT_B_SCLK] = &axg_tdmout_b_sclk.hw,
- [AUD_CLKID_TDMOUT_C_SCLK] = &axg_tdmout_c_sclk.hw,
- [AUD_CLKID_TDMIN_A_LRCLK] = &axg_tdmin_a_lrclk.hw,
- [AUD_CLKID_TDMIN_B_LRCLK] = &axg_tdmin_b_lrclk.hw,
- [AUD_CLKID_TDMIN_C_LRCLK] = &axg_tdmin_c_lrclk.hw,
- [AUD_CLKID_TDMIN_LB_LRCLK] = &axg_tdmin_lb_lrclk.hw,
- [AUD_CLKID_TDMOUT_A_LRCLK] = &axg_tdmout_a_lrclk.hw,
- [AUD_CLKID_TDMOUT_B_LRCLK] = &axg_tdmout_b_lrclk.hw,
- [AUD_CLKID_TDMOUT_C_LRCLK] = &axg_tdmout_c_lrclk.hw,
+ [AUD_CLKID_DDR_ARB] = &aud_ddr_arb.hw,
+ [AUD_CLKID_PDM] = &aud_pdm.hw,
+ [AUD_CLKID_TDMIN_A] = &aud_tdmin_a.hw,
+ [AUD_CLKID_TDMIN_B] = &aud_tdmin_b.hw,
+ [AUD_CLKID_TDMIN_C] = &aud_tdmin_c.hw,
+ [AUD_CLKID_TDMIN_LB] = &aud_tdmin_lb.hw,
+ [AUD_CLKID_TDMOUT_A] = &aud_tdmout_a.hw,
+ [AUD_CLKID_TDMOUT_B] = &aud_tdmout_b.hw,
+ [AUD_CLKID_TDMOUT_C] = &aud_tdmout_c.hw,
+ [AUD_CLKID_FRDDR_A] = &aud_frddr_a.hw,
+ [AUD_CLKID_FRDDR_B] = &aud_frddr_b.hw,
+ [AUD_CLKID_FRDDR_C] = &aud_frddr_c.hw,
+ [AUD_CLKID_TODDR_A] = &aud_toddr_a.hw,
+ [AUD_CLKID_TODDR_B] = &aud_toddr_b.hw,
+ [AUD_CLKID_TODDR_C] = &aud_toddr_c.hw,
+ [AUD_CLKID_LOOPBACK] = &aud_loopback.hw,
+ [AUD_CLKID_SPDIFIN] = &aud_spdifin.hw,
+ [AUD_CLKID_SPDIFOUT] = &aud_spdifout.hw,
+ [AUD_CLKID_RESAMPLE] = &aud_resample.hw,
+ [AUD_CLKID_POWER_DETECT] = &aud_power_detect.hw,
+ [AUD_CLKID_MST_A_MCLK_SEL] = &aud_mst_a_mclk_sel.hw,
+ [AUD_CLKID_MST_B_MCLK_SEL] = &aud_mst_b_mclk_sel.hw,
+ [AUD_CLKID_MST_C_MCLK_SEL] = &aud_mst_c_mclk_sel.hw,
+ [AUD_CLKID_MST_D_MCLK_SEL] = &aud_mst_d_mclk_sel.hw,
+ [AUD_CLKID_MST_E_MCLK_SEL] = &aud_mst_e_mclk_sel.hw,
+ [AUD_CLKID_MST_F_MCLK_SEL] = &aud_mst_f_mclk_sel.hw,
+ [AUD_CLKID_MST_A_MCLK_DIV] = &aud_mst_a_mclk_div.hw,
+ [AUD_CLKID_MST_B_MCLK_DIV] = &aud_mst_b_mclk_div.hw,
+ [AUD_CLKID_MST_C_MCLK_DIV] = &aud_mst_c_mclk_div.hw,
+ [AUD_CLKID_MST_D_MCLK_DIV] = &aud_mst_d_mclk_div.hw,
+ [AUD_CLKID_MST_E_MCLK_DIV] = &aud_mst_e_mclk_div.hw,
+ [AUD_CLKID_MST_F_MCLK_DIV] = &aud_mst_f_mclk_div.hw,
+ [AUD_CLKID_MST_A_MCLK] = &aud_mst_a_mclk.hw,
+ [AUD_CLKID_MST_B_MCLK] = &aud_mst_b_mclk.hw,
+ [AUD_CLKID_MST_C_MCLK] = &aud_mst_c_mclk.hw,
+ [AUD_CLKID_MST_D_MCLK] = &aud_mst_d_mclk.hw,
+ [AUD_CLKID_MST_E_MCLK] = &aud_mst_e_mclk.hw,
+ [AUD_CLKID_MST_F_MCLK] = &aud_mst_f_mclk.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_SEL] = &aud_spdifout_clk_sel.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_DIV] = &aud_spdifout_clk_div.hw,
+ [AUD_CLKID_SPDIFOUT_CLK] = &aud_spdifout_clk.hw,
+ [AUD_CLKID_SPDIFIN_CLK_SEL] = &aud_spdifin_clk_sel.hw,
+ [AUD_CLKID_SPDIFIN_CLK_DIV] = &aud_spdifin_clk_div.hw,
+ [AUD_CLKID_SPDIFIN_CLK] = &aud_spdifin_clk.hw,
+ [AUD_CLKID_PDM_DCLK_SEL] = &aud_pdm_dclk_sel.hw,
+ [AUD_CLKID_PDM_DCLK_DIV] = &aud_pdm_dclk_div.hw,
+ [AUD_CLKID_PDM_DCLK] = &aud_pdm_dclk.hw,
+ [AUD_CLKID_PDM_SYSCLK_SEL] = &aud_pdm_sysclk_sel.hw,
+ [AUD_CLKID_PDM_SYSCLK_DIV] = &aud_pdm_sysclk_div.hw,
+ [AUD_CLKID_PDM_SYSCLK] = &aud_pdm_sysclk.hw,
+ [AUD_CLKID_MST_A_SCLK_PRE_EN] = &aud_mst_a_sclk_pre_en.hw,
+ [AUD_CLKID_MST_B_SCLK_PRE_EN] = &aud_mst_b_sclk_pre_en.hw,
+ [AUD_CLKID_MST_C_SCLK_PRE_EN] = &aud_mst_c_sclk_pre_en.hw,
+ [AUD_CLKID_MST_D_SCLK_PRE_EN] = &aud_mst_d_sclk_pre_en.hw,
+ [AUD_CLKID_MST_E_SCLK_PRE_EN] = &aud_mst_e_sclk_pre_en.hw,
+ [AUD_CLKID_MST_F_SCLK_PRE_EN] = &aud_mst_f_sclk_pre_en.hw,
+ [AUD_CLKID_MST_A_SCLK_DIV] = &aud_mst_a_sclk_div.hw,
+ [AUD_CLKID_MST_B_SCLK_DIV] = &aud_mst_b_sclk_div.hw,
+ [AUD_CLKID_MST_C_SCLK_DIV] = &aud_mst_c_sclk_div.hw,
+ [AUD_CLKID_MST_D_SCLK_DIV] = &aud_mst_d_sclk_div.hw,
+ [AUD_CLKID_MST_E_SCLK_DIV] = &aud_mst_e_sclk_div.hw,
+ [AUD_CLKID_MST_F_SCLK_DIV] = &aud_mst_f_sclk_div.hw,
+ [AUD_CLKID_MST_A_SCLK_POST_EN] = &aud_mst_a_sclk_post_en.hw,
+ [AUD_CLKID_MST_B_SCLK_POST_EN] = &aud_mst_b_sclk_post_en.hw,
+ [AUD_CLKID_MST_C_SCLK_POST_EN] = &aud_mst_c_sclk_post_en.hw,
+ [AUD_CLKID_MST_D_SCLK_POST_EN] = &aud_mst_d_sclk_post_en.hw,
+ [AUD_CLKID_MST_E_SCLK_POST_EN] = &aud_mst_e_sclk_post_en.hw,
+ [AUD_CLKID_MST_F_SCLK_POST_EN] = &aud_mst_f_sclk_post_en.hw,
+ [AUD_CLKID_MST_A_SCLK] = &aud_mst_a_sclk.hw,
+ [AUD_CLKID_MST_B_SCLK] = &aud_mst_b_sclk.hw,
+ [AUD_CLKID_MST_C_SCLK] = &aud_mst_c_sclk.hw,
+ [AUD_CLKID_MST_D_SCLK] = &aud_mst_d_sclk.hw,
+ [AUD_CLKID_MST_E_SCLK] = &aud_mst_e_sclk.hw,
+ [AUD_CLKID_MST_F_SCLK] = &aud_mst_f_sclk.hw,
+ [AUD_CLKID_MST_A_LRCLK_DIV] = &aud_mst_a_lrclk_div.hw,
+ [AUD_CLKID_MST_B_LRCLK_DIV] = &aud_mst_b_lrclk_div.hw,
+ [AUD_CLKID_MST_C_LRCLK_DIV] = &aud_mst_c_lrclk_div.hw,
+ [AUD_CLKID_MST_D_LRCLK_DIV] = &aud_mst_d_lrclk_div.hw,
+ [AUD_CLKID_MST_E_LRCLK_DIV] = &aud_mst_e_lrclk_div.hw,
+ [AUD_CLKID_MST_F_LRCLK_DIV] = &aud_mst_f_lrclk_div.hw,
+ [AUD_CLKID_MST_A_LRCLK] = &aud_mst_a_lrclk.hw,
+ [AUD_CLKID_MST_B_LRCLK] = &aud_mst_b_lrclk.hw,
+ [AUD_CLKID_MST_C_LRCLK] = &aud_mst_c_lrclk.hw,
+ [AUD_CLKID_MST_D_LRCLK] = &aud_mst_d_lrclk.hw,
+ [AUD_CLKID_MST_E_LRCLK] = &aud_mst_e_lrclk.hw,
+ [AUD_CLKID_MST_F_LRCLK] = &aud_mst_f_lrclk.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_SEL] = &aud_tdmin_a_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_SEL] = &aud_tdmin_b_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_SEL] = &aud_tdmin_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_SEL] = &aud_tdmin_lb_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_SEL] = &aud_tdmout_a_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_SEL] = &aud_tdmout_b_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_SEL] = &aud_tdmout_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_PRE_EN] = &aud_tdmin_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_PRE_EN] = &aud_tdmin_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_PRE_EN] = &aud_tdmin_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_PRE_EN] = &aud_tdmin_lb_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_PRE_EN] = &aud_tdmout_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_PRE_EN] = &aud_tdmout_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_PRE_EN] = &aud_tdmout_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_POST_EN] = &aud_tdmin_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_POST_EN] = &aud_tdmin_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_POST_EN] = &aud_tdmin_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_POST_EN] = &aud_tdmin_lb_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_POST_EN] = &aud_tdmout_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_POST_EN] = &aud_tdmout_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_POST_EN] = &aud_tdmout_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK] = &aud_tdmin_a_sclk.hw,
+ [AUD_CLKID_TDMIN_B_SCLK] = &aud_tdmin_b_sclk.hw,
+ [AUD_CLKID_TDMIN_C_SCLK] = &aud_tdmin_c_sclk.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK] = &aud_tdmin_lb_sclk.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK] = &aud_tdmout_a_sclk.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK] = &aud_tdmout_b_sclk.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK] = &aud_tdmout_c_sclk.hw,
+ [AUD_CLKID_TDMIN_A_LRCLK] = &aud_tdmin_a_lrclk.hw,
+ [AUD_CLKID_TDMIN_B_LRCLK] = &aud_tdmin_b_lrclk.hw,
+ [AUD_CLKID_TDMIN_C_LRCLK] = &aud_tdmin_c_lrclk.hw,
+ [AUD_CLKID_TDMIN_LB_LRCLK] = &aud_tdmin_lb_lrclk.hw,
+ [AUD_CLKID_TDMOUT_A_LRCLK] = &aud_tdmout_a_lrclk.hw,
+ [AUD_CLKID_TDMOUT_B_LRCLK] = &aud_tdmout_b_lrclk.hw,
+ [AUD_CLKID_TDMOUT_C_LRCLK] = &aud_tdmout_c_lrclk.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
};
-/* Convenience table to populate regmap in .probe() */
-static struct clk_regmap *const axg_audio_clk_regmaps[] = {
- &axg_ddr_arb,
- &axg_pdm,
- &axg_tdmin_a,
- &axg_tdmin_b,
- &axg_tdmin_c,
- &axg_tdmin_lb,
- &axg_tdmout_a,
- &axg_tdmout_b,
- &axg_tdmout_c,
- &axg_frddr_a,
- &axg_frddr_b,
- &axg_frddr_c,
- &axg_toddr_a,
- &axg_toddr_b,
- &axg_toddr_c,
- &axg_loopback,
- &axg_spdifin,
- &axg_spdifout,
- &axg_resample,
- &axg_power_detect,
- &axg_mst_a_mclk_sel,
- &axg_mst_b_mclk_sel,
- &axg_mst_c_mclk_sel,
- &axg_mst_d_mclk_sel,
- &axg_mst_e_mclk_sel,
- &axg_mst_f_mclk_sel,
- &axg_mst_a_mclk_div,
- &axg_mst_b_mclk_div,
- &axg_mst_c_mclk_div,
- &axg_mst_d_mclk_div,
- &axg_mst_e_mclk_div,
- &axg_mst_f_mclk_div,
- &axg_mst_a_mclk,
- &axg_mst_b_mclk,
- &axg_mst_c_mclk,
- &axg_mst_d_mclk,
- &axg_mst_e_mclk,
- &axg_mst_f_mclk,
- &axg_spdifout_clk_sel,
- &axg_spdifout_clk_div,
- &axg_spdifout_clk,
- &axg_spdifin_clk_sel,
- &axg_spdifin_clk_div,
- &axg_spdifin_clk,
- &axg_pdm_dclk_sel,
- &axg_pdm_dclk_div,
- &axg_pdm_dclk,
- &axg_pdm_sysclk_sel,
- &axg_pdm_sysclk_div,
- &axg_pdm_sysclk,
- &axg_mst_a_sclk_pre_en,
- &axg_mst_b_sclk_pre_en,
- &axg_mst_c_sclk_pre_en,
- &axg_mst_d_sclk_pre_en,
- &axg_mst_e_sclk_pre_en,
- &axg_mst_f_sclk_pre_en,
- &axg_mst_a_sclk_div,
- &axg_mst_b_sclk_div,
- &axg_mst_c_sclk_div,
- &axg_mst_d_sclk_div,
- &axg_mst_e_sclk_div,
- &axg_mst_f_sclk_div,
- &axg_mst_a_sclk_post_en,
- &axg_mst_b_sclk_post_en,
- &axg_mst_c_sclk_post_en,
- &axg_mst_d_sclk_post_en,
- &axg_mst_e_sclk_post_en,
- &axg_mst_f_sclk_post_en,
- &axg_mst_a_sclk,
- &axg_mst_b_sclk,
- &axg_mst_c_sclk,
- &axg_mst_d_sclk,
- &axg_mst_e_sclk,
- &axg_mst_f_sclk,
- &axg_mst_a_lrclk_div,
- &axg_mst_b_lrclk_div,
- &axg_mst_c_lrclk_div,
- &axg_mst_d_lrclk_div,
- &axg_mst_e_lrclk_div,
- &axg_mst_f_lrclk_div,
- &axg_mst_a_lrclk,
- &axg_mst_b_lrclk,
- &axg_mst_c_lrclk,
- &axg_mst_d_lrclk,
- &axg_mst_e_lrclk,
- &axg_mst_f_lrclk,
- &axg_tdmin_a_sclk_sel,
- &axg_tdmin_b_sclk_sel,
- &axg_tdmin_c_sclk_sel,
- &axg_tdmin_lb_sclk_sel,
- &axg_tdmout_a_sclk_sel,
- &axg_tdmout_b_sclk_sel,
- &axg_tdmout_c_sclk_sel,
- &axg_tdmin_a_sclk_pre_en,
- &axg_tdmin_b_sclk_pre_en,
- &axg_tdmin_c_sclk_pre_en,
- &axg_tdmin_lb_sclk_pre_en,
- &axg_tdmout_a_sclk_pre_en,
- &axg_tdmout_b_sclk_pre_en,
- &axg_tdmout_c_sclk_pre_en,
- &axg_tdmin_a_sclk_post_en,
- &axg_tdmin_b_sclk_post_en,
- &axg_tdmin_c_sclk_post_en,
- &axg_tdmin_lb_sclk_post_en,
- &axg_tdmout_a_sclk_post_en,
- &axg_tdmout_b_sclk_post_en,
- &axg_tdmout_c_sclk_post_en,
- &axg_tdmin_a_sclk,
- &axg_tdmin_b_sclk,
- &axg_tdmin_c_sclk,
- &axg_tdmin_lb_sclk,
- &axg_tdmout_a_sclk,
- &axg_tdmout_b_sclk,
- &axg_tdmout_c_sclk,
- &axg_tdmin_a_lrclk,
- &axg_tdmin_b_lrclk,
- &axg_tdmin_c_lrclk,
- &axg_tdmin_lb_lrclk,
- &axg_tdmout_a_lrclk,
- &axg_tdmout_b_lrclk,
- &axg_tdmout_c_lrclk,
+/*
+ * Array of all G12A clocks provided by this provider
+ * The input clocks of the controller will be populated at runtime
+ */
+static struct clk_hw_onecell_data g12a_audio_hw_onecell_data = {
+ .hws = {
+ [AUD_CLKID_DDR_ARB] = &aud_ddr_arb.hw,
+ [AUD_CLKID_PDM] = &aud_pdm.hw,
+ [AUD_CLKID_TDMIN_A] = &aud_tdmin_a.hw,
+ [AUD_CLKID_TDMIN_B] = &aud_tdmin_b.hw,
+ [AUD_CLKID_TDMIN_C] = &aud_tdmin_c.hw,
+ [AUD_CLKID_TDMIN_LB] = &aud_tdmin_lb.hw,
+ [AUD_CLKID_TDMOUT_A] = &aud_tdmout_a.hw,
+ [AUD_CLKID_TDMOUT_B] = &aud_tdmout_b.hw,
+ [AUD_CLKID_TDMOUT_C] = &aud_tdmout_c.hw,
+ [AUD_CLKID_FRDDR_A] = &aud_frddr_a.hw,
+ [AUD_CLKID_FRDDR_B] = &aud_frddr_b.hw,
+ [AUD_CLKID_FRDDR_C] = &aud_frddr_c.hw,
+ [AUD_CLKID_TODDR_A] = &aud_toddr_a.hw,
+ [AUD_CLKID_TODDR_B] = &aud_toddr_b.hw,
+ [AUD_CLKID_TODDR_C] = &aud_toddr_c.hw,
+ [AUD_CLKID_LOOPBACK] = &aud_loopback.hw,
+ [AUD_CLKID_SPDIFIN] = &aud_spdifin.hw,
+ [AUD_CLKID_SPDIFOUT] = &aud_spdifout.hw,
+ [AUD_CLKID_RESAMPLE] = &aud_resample.hw,
+ [AUD_CLKID_POWER_DETECT] = &aud_power_detect.hw,
+ [AUD_CLKID_SPDIFOUT_B] = &aud_spdifout_b.hw,
+ [AUD_CLKID_MST_A_MCLK_SEL] = &aud_mst_a_mclk_sel.hw,
+ [AUD_CLKID_MST_B_MCLK_SEL] = &aud_mst_b_mclk_sel.hw,
+ [AUD_CLKID_MST_C_MCLK_SEL] = &aud_mst_c_mclk_sel.hw,
+ [AUD_CLKID_MST_D_MCLK_SEL] = &aud_mst_d_mclk_sel.hw,
+ [AUD_CLKID_MST_E_MCLK_SEL] = &aud_mst_e_mclk_sel.hw,
+ [AUD_CLKID_MST_F_MCLK_SEL] = &aud_mst_f_mclk_sel.hw,
+ [AUD_CLKID_MST_A_MCLK_DIV] = &aud_mst_a_mclk_div.hw,
+ [AUD_CLKID_MST_B_MCLK_DIV] = &aud_mst_b_mclk_div.hw,
+ [AUD_CLKID_MST_C_MCLK_DIV] = &aud_mst_c_mclk_div.hw,
+ [AUD_CLKID_MST_D_MCLK_DIV] = &aud_mst_d_mclk_div.hw,
+ [AUD_CLKID_MST_E_MCLK_DIV] = &aud_mst_e_mclk_div.hw,
+ [AUD_CLKID_MST_F_MCLK_DIV] = &aud_mst_f_mclk_div.hw,
+ [AUD_CLKID_MST_A_MCLK] = &aud_mst_a_mclk.hw,
+ [AUD_CLKID_MST_B_MCLK] = &aud_mst_b_mclk.hw,
+ [AUD_CLKID_MST_C_MCLK] = &aud_mst_c_mclk.hw,
+ [AUD_CLKID_MST_D_MCLK] = &aud_mst_d_mclk.hw,
+ [AUD_CLKID_MST_E_MCLK] = &aud_mst_e_mclk.hw,
+ [AUD_CLKID_MST_F_MCLK] = &aud_mst_f_mclk.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_SEL] = &aud_spdifout_clk_sel.hw,
+ [AUD_CLKID_SPDIFOUT_CLK_DIV] = &aud_spdifout_clk_div.hw,
+ [AUD_CLKID_SPDIFOUT_CLK] = &aud_spdifout_clk.hw,
+ [AUD_CLKID_SPDIFOUT_B_CLK_SEL] = &aud_spdifout_b_clk_sel.hw,
+ [AUD_CLKID_SPDIFOUT_B_CLK_DIV] = &aud_spdifout_b_clk_div.hw,
+ [AUD_CLKID_SPDIFOUT_B_CLK] = &aud_spdifout_b_clk.hw,
+ [AUD_CLKID_SPDIFIN_CLK_SEL] = &aud_spdifin_clk_sel.hw,
+ [AUD_CLKID_SPDIFIN_CLK_DIV] = &aud_spdifin_clk_div.hw,
+ [AUD_CLKID_SPDIFIN_CLK] = &aud_spdifin_clk.hw,
+ [AUD_CLKID_PDM_DCLK_SEL] = &aud_pdm_dclk_sel.hw,
+ [AUD_CLKID_PDM_DCLK_DIV] = &aud_pdm_dclk_div.hw,
+ [AUD_CLKID_PDM_DCLK] = &aud_pdm_dclk.hw,
+ [AUD_CLKID_PDM_SYSCLK_SEL] = &aud_pdm_sysclk_sel.hw,
+ [AUD_CLKID_PDM_SYSCLK_DIV] = &aud_pdm_sysclk_div.hw,
+ [AUD_CLKID_PDM_SYSCLK] = &aud_pdm_sysclk.hw,
+ [AUD_CLKID_MST_A_SCLK_PRE_EN] = &aud_mst_a_sclk_pre_en.hw,
+ [AUD_CLKID_MST_B_SCLK_PRE_EN] = &aud_mst_b_sclk_pre_en.hw,
+ [AUD_CLKID_MST_C_SCLK_PRE_EN] = &aud_mst_c_sclk_pre_en.hw,
+ [AUD_CLKID_MST_D_SCLK_PRE_EN] = &aud_mst_d_sclk_pre_en.hw,
+ [AUD_CLKID_MST_E_SCLK_PRE_EN] = &aud_mst_e_sclk_pre_en.hw,
+ [AUD_CLKID_MST_F_SCLK_PRE_EN] = &aud_mst_f_sclk_pre_en.hw,
+ [AUD_CLKID_MST_A_SCLK_DIV] = &aud_mst_a_sclk_div.hw,
+ [AUD_CLKID_MST_B_SCLK_DIV] = &aud_mst_b_sclk_div.hw,
+ [AUD_CLKID_MST_C_SCLK_DIV] = &aud_mst_c_sclk_div.hw,
+ [AUD_CLKID_MST_D_SCLK_DIV] = &aud_mst_d_sclk_div.hw,
+ [AUD_CLKID_MST_E_SCLK_DIV] = &aud_mst_e_sclk_div.hw,
+ [AUD_CLKID_MST_F_SCLK_DIV] = &aud_mst_f_sclk_div.hw,
+ [AUD_CLKID_MST_A_SCLK_POST_EN] = &aud_mst_a_sclk_post_en.hw,
+ [AUD_CLKID_MST_B_SCLK_POST_EN] = &aud_mst_b_sclk_post_en.hw,
+ [AUD_CLKID_MST_C_SCLK_POST_EN] = &aud_mst_c_sclk_post_en.hw,
+ [AUD_CLKID_MST_D_SCLK_POST_EN] = &aud_mst_d_sclk_post_en.hw,
+ [AUD_CLKID_MST_E_SCLK_POST_EN] = &aud_mst_e_sclk_post_en.hw,
+ [AUD_CLKID_MST_F_SCLK_POST_EN] = &aud_mst_f_sclk_post_en.hw,
+ [AUD_CLKID_MST_A_SCLK] = &aud_mst_a_sclk.hw,
+ [AUD_CLKID_MST_B_SCLK] = &aud_mst_b_sclk.hw,
+ [AUD_CLKID_MST_C_SCLK] = &aud_mst_c_sclk.hw,
+ [AUD_CLKID_MST_D_SCLK] = &aud_mst_d_sclk.hw,
+ [AUD_CLKID_MST_E_SCLK] = &aud_mst_e_sclk.hw,
+ [AUD_CLKID_MST_F_SCLK] = &aud_mst_f_sclk.hw,
+ [AUD_CLKID_MST_A_LRCLK_DIV] = &aud_mst_a_lrclk_div.hw,
+ [AUD_CLKID_MST_B_LRCLK_DIV] = &aud_mst_b_lrclk_div.hw,
+ [AUD_CLKID_MST_C_LRCLK_DIV] = &aud_mst_c_lrclk_div.hw,
+ [AUD_CLKID_MST_D_LRCLK_DIV] = &aud_mst_d_lrclk_div.hw,
+ [AUD_CLKID_MST_E_LRCLK_DIV] = &aud_mst_e_lrclk_div.hw,
+ [AUD_CLKID_MST_F_LRCLK_DIV] = &aud_mst_f_lrclk_div.hw,
+ [AUD_CLKID_MST_A_LRCLK] = &aud_mst_a_lrclk.hw,
+ [AUD_CLKID_MST_B_LRCLK] = &aud_mst_b_lrclk.hw,
+ [AUD_CLKID_MST_C_LRCLK] = &aud_mst_c_lrclk.hw,
+ [AUD_CLKID_MST_D_LRCLK] = &aud_mst_d_lrclk.hw,
+ [AUD_CLKID_MST_E_LRCLK] = &aud_mst_e_lrclk.hw,
+ [AUD_CLKID_MST_F_LRCLK] = &aud_mst_f_lrclk.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_SEL] = &aud_tdmin_a_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_SEL] = &aud_tdmin_b_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_SEL] = &aud_tdmin_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_SEL] = &aud_tdmin_lb_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_SEL] = &aud_tdmout_a_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_SEL] = &aud_tdmout_b_sclk_sel.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_SEL] = &aud_tdmout_c_sclk_sel.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_PRE_EN] = &aud_tdmin_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_PRE_EN] = &aud_tdmin_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_PRE_EN] = &aud_tdmin_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_PRE_EN] = &aud_tdmin_lb_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_PRE_EN] = &aud_tdmout_a_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_PRE_EN] = &aud_tdmout_b_sclk_pre_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_PRE_EN] = &aud_tdmout_c_sclk_pre_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK_POST_EN] = &aud_tdmin_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_B_SCLK_POST_EN] = &aud_tdmin_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_C_SCLK_POST_EN] = &aud_tdmin_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK_POST_EN] = &aud_tdmin_lb_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK_POST_EN] = &aud_tdmout_a_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK_POST_EN] = &aud_tdmout_b_sclk_post_en.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK_POST_EN] = &aud_tdmout_c_sclk_post_en.hw,
+ [AUD_CLKID_TDMIN_A_SCLK] = &aud_tdmin_a_sclk.hw,
+ [AUD_CLKID_TDMIN_B_SCLK] = &aud_tdmin_b_sclk.hw,
+ [AUD_CLKID_TDMIN_C_SCLK] = &aud_tdmin_c_sclk.hw,
+ [AUD_CLKID_TDMIN_LB_SCLK] = &aud_tdmin_lb_sclk.hw,
+ [AUD_CLKID_TDMOUT_A_SCLK] = &aud_tdmout_a_sclk.hw,
+ [AUD_CLKID_TDMOUT_B_SCLK] = &aud_tdmout_b_sclk.hw,
+ [AUD_CLKID_TDMOUT_C_SCLK] = &aud_tdmout_c_sclk.hw,
+ [AUD_CLKID_TDMIN_A_LRCLK] = &aud_tdmin_a_lrclk.hw,
+ [AUD_CLKID_TDMIN_B_LRCLK] = &aud_tdmin_b_lrclk.hw,
+ [AUD_CLKID_TDMIN_C_LRCLK] = &aud_tdmin_c_lrclk.hw,
+ [AUD_CLKID_TDMIN_LB_LRCLK] = &aud_tdmin_lb_lrclk.hw,
+ [AUD_CLKID_TDMOUT_A_LRCLK] = &aud_tdmout_a_lrclk.hw,
+ [AUD_CLKID_TDMOUT_B_LRCLK] = &aud_tdmout_b_lrclk.hw,
+ [AUD_CLKID_TDMOUT_C_LRCLK] = &aud_tdmout_c_lrclk.hw,
+ [AUD_CLKID_TDM_MCLK_PAD0] = &aud_tdm_mclk_pad_0.hw,
+ [AUD_CLKID_TDM_MCLK_PAD1] = &aud_tdm_mclk_pad_1.hw,
+ [AUD_CLKID_TDM_LRCLK_PAD0] = &aud_tdm_lrclk_pad_0.hw,
+ [AUD_CLKID_TDM_LRCLK_PAD1] = &aud_tdm_lrclk_pad_1.hw,
+ [AUD_CLKID_TDM_LRCLK_PAD2] = &aud_tdm_lrclk_pad_2.hw,
+ [AUD_CLKID_TDM_SCLK_PAD0] = &aud_tdm_sclk_pad_0.hw,
+ [AUD_CLKID_TDM_SCLK_PAD1] = &aud_tdm_sclk_pad_1.hw,
+ [AUD_CLKID_TDM_SCLK_PAD2] = &aud_tdm_sclk_pad_2.hw,
+ [NR_CLKS] = NULL,
+ },
+ .num = NR_CLKS,
+};
+
+/* Convenience table to populate regmap in .probe()
+ * Note that this table is shared between both AXG and G12A,
+ * with spdifout_b clocks being exclusive to G12A. Since those
+ * clocks are not declared within the AXG onecell table, we do not
+ * feel the need to have separate AXG/G12A regmap tables.
+ */
+static struct clk_regmap *const aud_clk_regmaps[] = {
+ &aud_ddr_arb,
+ &aud_pdm,
+ &aud_tdmin_a,
+ &aud_tdmin_b,
+ &aud_tdmin_c,
+ &aud_tdmin_lb,
+ &aud_tdmout_a,
+ &aud_tdmout_b,
+ &aud_tdmout_c,
+ &aud_frddr_a,
+ &aud_frddr_b,
+ &aud_frddr_c,
+ &aud_toddr_a,
+ &aud_toddr_b,
+ &aud_toddr_c,
+ &aud_loopback,
+ &aud_spdifin,
+ &aud_spdifout,
+ &aud_resample,
+ &aud_power_detect,
+ &aud_spdifout_b,
+ &aud_mst_a_mclk_sel,
+ &aud_mst_b_mclk_sel,
+ &aud_mst_c_mclk_sel,
+ &aud_mst_d_mclk_sel,
+ &aud_mst_e_mclk_sel,
+ &aud_mst_f_mclk_sel,
+ &aud_mst_a_mclk_div,
+ &aud_mst_b_mclk_div,
+ &aud_mst_c_mclk_div,
+ &aud_mst_d_mclk_div,
+ &aud_mst_e_mclk_div,
+ &aud_mst_f_mclk_div,
+ &aud_mst_a_mclk,
+ &aud_mst_b_mclk,
+ &aud_mst_c_mclk,
+ &aud_mst_d_mclk,
+ &aud_mst_e_mclk,
+ &aud_mst_f_mclk,
+ &aud_spdifout_clk_sel,
+ &aud_spdifout_clk_div,
+ &aud_spdifout_clk,
+ &aud_spdifin_clk_sel,
+ &aud_spdifin_clk_div,
+ &aud_spdifin_clk,
+ &aud_pdm_dclk_sel,
+ &aud_pdm_dclk_div,
+ &aud_pdm_dclk,
+ &aud_pdm_sysclk_sel,
+ &aud_pdm_sysclk_div,
+ &aud_pdm_sysclk,
+ &aud_mst_a_sclk_pre_en,
+ &aud_mst_b_sclk_pre_en,
+ &aud_mst_c_sclk_pre_en,
+ &aud_mst_d_sclk_pre_en,
+ &aud_mst_e_sclk_pre_en,
+ &aud_mst_f_sclk_pre_en,
+ &aud_mst_a_sclk_div,
+ &aud_mst_b_sclk_div,
+ &aud_mst_c_sclk_div,
+ &aud_mst_d_sclk_div,
+ &aud_mst_e_sclk_div,
+ &aud_mst_f_sclk_div,
+ &aud_mst_a_sclk_post_en,
+ &aud_mst_b_sclk_post_en,
+ &aud_mst_c_sclk_post_en,
+ &aud_mst_d_sclk_post_en,
+ &aud_mst_e_sclk_post_en,
+ &aud_mst_f_sclk_post_en,
+ &aud_mst_a_sclk,
+ &aud_mst_b_sclk,
+ &aud_mst_c_sclk,
+ &aud_mst_d_sclk,
+ &aud_mst_e_sclk,
+ &aud_mst_f_sclk,
+ &aud_mst_a_lrclk_div,
+ &aud_mst_b_lrclk_div,
+ &aud_mst_c_lrclk_div,
+ &aud_mst_d_lrclk_div,
+ &aud_mst_e_lrclk_div,
+ &aud_mst_f_lrclk_div,
+ &aud_mst_a_lrclk,
+ &aud_mst_b_lrclk,
+ &aud_mst_c_lrclk,
+ &aud_mst_d_lrclk,
+ &aud_mst_e_lrclk,
+ &aud_mst_f_lrclk,
+ &aud_tdmin_a_sclk_sel,
+ &aud_tdmin_b_sclk_sel,
+ &aud_tdmin_c_sclk_sel,
+ &aud_tdmin_lb_sclk_sel,
+ &aud_tdmout_a_sclk_sel,
+ &aud_tdmout_b_sclk_sel,
+ &aud_tdmout_c_sclk_sel,
+ &aud_tdmin_a_sclk_pre_en,
+ &aud_tdmin_b_sclk_pre_en,
+ &aud_tdmin_c_sclk_pre_en,
+ &aud_tdmin_lb_sclk_pre_en,
+ &aud_tdmout_a_sclk_pre_en,
+ &aud_tdmout_b_sclk_pre_en,
+ &aud_tdmout_c_sclk_pre_en,
+ &aud_tdmin_a_sclk_post_en,
+ &aud_tdmin_b_sclk_post_en,
+ &aud_tdmin_c_sclk_post_en,
+ &aud_tdmin_lb_sclk_post_en,
+ &aud_tdmout_a_sclk_post_en,
+ &aud_tdmout_b_sclk_post_en,
+ &aud_tdmout_c_sclk_post_en,
+ &aud_tdmin_a_sclk,
+ &aud_tdmin_b_sclk,
+ &aud_tdmin_c_sclk,
+ &aud_tdmin_lb_sclk,
+ &aud_tdmout_a_sclk,
+ &aud_tdmout_b_sclk,
+ &aud_tdmout_c_sclk,
+ &aud_tdmin_a_lrclk,
+ &aud_tdmin_b_lrclk,
+ &aud_tdmin_c_lrclk,
+ &aud_tdmin_lb_lrclk,
+ &aud_tdmout_a_lrclk,
+ &aud_tdmout_b_lrclk,
+ &aud_tdmout_c_lrclk,
+ &aud_spdifout_b_clk_sel,
+ &aud_spdifout_b_clk_div,
+ &aud_spdifout_b_clk,
+ &aud_tdm_mclk_pad_0,
+ &aud_tdm_mclk_pad_1,
+ &aud_tdm_lrclk_pad_0,
+ &aud_tdm_lrclk_pad_1,
+ &aud_tdm_lrclk_pad_2,
+ &aud_tdm_sclk_pad_0,
+ &aud_tdm_sclk_pad_1,
+ &aud_tdm_sclk_pad_2,
};
static int devm_clk_get_enable(struct device *dev, char *id)
@@ -665,14 +869,13 @@ static int devm_clk_get_enable(struct device *dev, char *id)
}
static int axg_register_clk_hw_input(struct device *dev,
- const char *name,
- unsigned int clkid)
+ const char *name)
{
char *clk_name;
struct clk_hw *hw;
int err = 0;
- clk_name = kasprintf(GFP_KERNEL, "axg_%s", name);
+ clk_name = kasprintf(GFP_KERNEL, "aud_%s", name);
if (!clk_name)
return -ENOMEM;
@@ -686,8 +889,6 @@ static int axg_register_clk_hw_input(struct device *dev,
if (err != -EPROBE_DEFER)
dev_err(dev, "failed to get %s clock", name);
}
- } else {
- axg_audio_hw_onecell_data.hws[clkid] = hw;
}
kfree(clk_name);
@@ -696,8 +897,7 @@ static int axg_register_clk_hw_input(struct device *dev,
static int axg_register_clk_hw_inputs(struct device *dev,
const char *basename,
- unsigned int count,
- unsigned int clkid)
+ unsigned int count)
{
char *name;
int i, ret;
@@ -707,7 +907,7 @@ static int axg_register_clk_hw_inputs(struct device *dev,
if (!name)
return -ENOMEM;
- ret = axg_register_clk_hw_input(dev, name, clkid + i);
+ ret = axg_register_clk_hw_input(dev, name);
kfree(name);
if (ret)
return ret;
@@ -723,15 +923,24 @@ static const struct regmap_config axg_audio_regmap_cfg = {
.max_register = AUDIO_CLK_PDMIN_CTRL1,
};
+struct audioclk_data {
+ struct clk_hw_onecell_data *hw_onecell_data;
+};
+
static int axg_audio_clkc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct audioclk_data *data;
struct regmap *map;
struct resource *res;
void __iomem *regs;
struct clk_hw *hw;
int ret, i;
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(dev, res);
if (IS_ERR(regs))
@@ -755,40 +964,35 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
}
/* Register the peripheral input clock */
- hw = meson_clk_hw_register_input(dev, "pclk", "axg_audio_pclk", 0);
+ hw = meson_clk_hw_register_input(dev, "pclk", "audio_pclk", 0);
if (IS_ERR(hw))
return PTR_ERR(hw);
- axg_audio_hw_onecell_data.hws[AUD_CLKID_PCLK] = hw;
-
/* Register optional input master clocks */
ret = axg_register_clk_hw_inputs(dev, "mst_in",
- AXG_MST_IN_COUNT,
- AUD_CLKID_MST0);
+ AUD_MST_IN_COUNT);
if (ret)
return ret;
/* Register optional input slave sclks */
ret = axg_register_clk_hw_inputs(dev, "slv_sclk",
- AXG_SLV_SCLK_COUNT,
- AUD_CLKID_SLV_SCLK0);
+ AUD_SLV_SCLK_COUNT);
if (ret)
return ret;
/* Register optional input slave lrclks */
ret = axg_register_clk_hw_inputs(dev, "slv_lrclk",
- AXG_SLV_LRCLK_COUNT,
- AUD_CLKID_SLV_LRCLK0);
+ AUD_SLV_LRCLK_COUNT);
if (ret)
return ret;
/* Populate regmap for the regmap backed clocks */
- for (i = 0; i < ARRAY_SIZE(axg_audio_clk_regmaps); i++)
- axg_audio_clk_regmaps[i]->map = map;
+ for (i = 0; i < ARRAY_SIZE(aud_clk_regmaps); i++)
+ aud_clk_regmaps[i]->map = map;
/* Take care to skip the registered input clocks */
- for (i = AUD_CLKID_DDR_ARB; i < axg_audio_hw_onecell_data.num; i++) {
- hw = axg_audio_hw_onecell_data.hws[i];
+ for (i = AUD_CLKID_DDR_ARB; i < data->hw_onecell_data->num; i++) {
+ hw = data->hw_onecell_data->hws[i];
/* array might be sparse */
if (!hw)
continue;
@@ -802,12 +1006,25 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
}
return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
- &axg_audio_hw_onecell_data);
+ data->hw_onecell_data);
}
+static const struct audioclk_data axg_audioclk_data = {
+ .hw_onecell_data = &axg_audio_hw_onecell_data,
+};
+
+static const struct audioclk_data g12a_audioclk_data = {
+ .hw_onecell_data = &g12a_audio_hw_onecell_data,
+};
+
static const struct of_device_id clkc_match_table[] = {
- { .compatible = "amlogic,axg-audio-clkc" },
- {}
+ {
+ .compatible = "amlogic,axg-audio-clkc",
+ .data = &axg_audioclk_data
+ }, {
+ .compatible = "amlogic,g12a-audio-clkc",
+ .data = &g12a_audioclk_data
+ }, {}
};
MODULE_DEVICE_TABLE(of, clkc_match_table);
@@ -820,6 +1037,6 @@ static struct platform_driver axg_audio_driver = {
};
module_platform_driver(axg_audio_driver);
-MODULE_DESCRIPTION("Amlogic A113x Audio Clock driver");
+MODULE_DESCRIPTION("Amlogic AXG/G12A Audio Clock driver");
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/axg-audio.h b/drivers/clk/meson/axg-audio.h
index 7191b39c9d65..5d972d55d6c7 100644
--- a/drivers/clk/meson/axg-audio.h
+++ b/drivers/clk/meson/axg-audio.h
@@ -20,6 +20,8 @@
#define AUDIO_MCLK_D_CTRL 0x010
#define AUDIO_MCLK_E_CTRL 0x014
#define AUDIO_MCLK_F_CTRL 0x018
+#define AUDIO_MST_PAD_CTRL0 0x01c
+#define AUDIO_MST_PAD_CTRL1 0x020
#define AUDIO_MST_A_SCLK_CTRL0 0x040
#define AUDIO_MST_A_SCLK_CTRL1 0x044
#define AUDIO_MST_B_SCLK_CTRL0 0x048
@@ -45,21 +47,13 @@
#define AUDIO_CLK_LOCKER_CTRL 0x0A8
#define AUDIO_CLK_PDMIN_CTRL0 0x0AC
#define AUDIO_CLK_PDMIN_CTRL1 0x0B0
+#define AUDIO_CLK_SPDIFOUT_B_CTRL 0x0B4
/*
* CLKID index values
* These indices are entirely contrived and do not map onto the hardware.
*/
-#define AUD_CLKID_PCLK 0
-#define AUD_CLKID_MST0 1
-#define AUD_CLKID_MST1 2
-#define AUD_CLKID_MST2 3
-#define AUD_CLKID_MST3 4
-#define AUD_CLKID_MST4 5
-#define AUD_CLKID_MST5 6
-#define AUD_CLKID_MST6 7
-#define AUD_CLKID_MST7 8
#define AUD_CLKID_MST_A_MCLK_SEL 59
#define AUD_CLKID_MST_B_MCLK_SEL 60
#define AUD_CLKID_MST_C_MCLK_SEL 61
@@ -118,10 +112,12 @@
#define AUD_CLKID_TDMOUT_A_SCLK_POST_EN 148
#define AUD_CLKID_TDMOUT_B_SCLK_POST_EN 149
#define AUD_CLKID_TDMOUT_C_SCLK_POST_EN 150
+#define AUD_CLKID_SPDIFOUT_B_CLK_SEL 153
+#define AUD_CLKID_SPDIFOUT_B_CLK_DIV 154
/* include the CLKIDs which are part of the DT bindings */
#include <dt-bindings/clock/axg-audio-clkc.h>
-#define NR_CLKS 151
+#define NR_CLKS 163
#endif /*__AXG_AUDIO_CLKC_H */
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index 7a14ac9b2fec..ddb1e5634739 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -303,6 +303,16 @@ static int meson_clk_pll_is_enabled(struct clk_hw *hw)
return 1;
}
+static int meson_clk_pcie_pll_enable(struct clk_hw *hw)
+{
+ meson_clk_pll_init(hw);
+
+ if (meson_clk_pll_wait_lock(hw))
+ return -EIO;
+
+ return 0;
+}
+
static int meson_clk_pll_enable(struct clk_hw *hw)
{
struct clk_regmap *clk = to_clk_regmap(hw);
@@ -387,6 +397,22 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+/*
+ * The Meson G12A PCIE PLL is fined tuned to deliver a very precise
+ * 100MHz reference clock for the PCIe Analog PHY, and thus requires
+ * a strict register sequence to enable the PLL.
+ * To simplify, re-use the _init() op to enable the PLL and keep
+ * the other ops except set_rate since the rate is fixed.
+ */
+const struct clk_ops meson_clk_pcie_pll_ops = {
+ .recalc_rate = meson_clk_pll_recalc_rate,
+ .round_rate = meson_clk_pll_round_rate,
+ .is_enabled = meson_clk_pll_is_enabled,
+ .enable = meson_clk_pcie_pll_enable,
+ .disable = meson_clk_pll_disable
+};
+EXPORT_SYMBOL_GPL(meson_clk_pcie_pll_ops);
+
const struct clk_ops meson_clk_pll_ops = {
.init = meson_clk_pll_init,
.recalc_rate = meson_clk_pll_recalc_rate,
diff --git a/drivers/clk/meson/clk-pll.h b/drivers/clk/meson/clk-pll.h
index 55af2e285b1b..367efd0f6410 100644
--- a/drivers/clk/meson/clk-pll.h
+++ b/drivers/clk/meson/clk-pll.h
@@ -45,5 +45,6 @@ struct meson_clk_pll_data {
extern const struct clk_ops meson_clk_pll_ro_ops;
extern const struct clk_ops meson_clk_pll_ops;
+extern const struct clk_ops meson_clk_pcie_pll_ops;
#endif /* __MESON_CLK_PLL_H */
diff --git a/drivers/clk/meson/g12a-aoclk.h b/drivers/clk/meson/g12a-aoclk.h
index 04b0d5506641..a67c8a7cd7c4 100644
--- a/drivers/clk/meson/g12a-aoclk.h
+++ b/drivers/clk/meson/g12a-aoclk.h
@@ -16,9 +16,7 @@
* to expose, such as the internal muxes and dividers of composite clocks,
* will remain defined here.
*/
-#define CLKID_AO_SAR_ADC_SEL 16
#define CLKID_AO_SAR_ADC_DIV 17
-#define CLKID_AO_CTS_OSCIN 19
#define CLKID_AO_32K_PRE 20
#define CLKID_AO_32K_DIV 21
#define CLKID_AO_32K_SEL 22
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index f7b11e1eeebe..739f64fdf1e3 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -150,6 +150,318 @@ static struct clk_regmap g12a_sys_pll = {
},
};
+static struct clk_regmap g12a_sys_pll_div16_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "sys_pll_div16_en",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "sys_pll" },
+ .num_parents = 1,
+ /*
+ * This clock is used to debug the sys_pll range
+ * Linux should not change it at runtime
+ */
+ },
+};
+
+static struct clk_fixed_factor g12a_sys_pll_div16 = {
+ .mult = 1,
+ .div = 16,
+ .hw.init = &(struct clk_init_data){
+ .name = "sys_pll_div16",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "sys_pll_div16_en" },
+ .num_parents = 1,
+ },
+};
+
+/* Datasheet names this field as "premux0" */
+static struct clk_regmap g12a_cpu_clk_premux0 = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x3,
+ .shift = 0,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_dyn0_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal",
+ "fclk_div2",
+ "fclk_div3" },
+ .num_parents = 3,
+ },
+};
+
+/* Datasheet names this field as "mux0_divn_tcnt" */
+static struct clk_regmap g12a_cpu_clk_mux0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .shift = 4,
+ .width = 6,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_dyn0_div",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk_dyn0_sel" },
+ .num_parents = 1,
+ },
+};
+
+/* Datasheet names this field as "postmux0" */
+static struct clk_regmap g12a_cpu_clk_postmux0 = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x1,
+ .shift = 2,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_dyn0",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk_dyn0_sel",
+ "cpu_clk_dyn0_div" },
+ .num_parents = 2,
+ },
+};
+
+/* Datasheet names this field as "premux1" */
+static struct clk_regmap g12a_cpu_clk_premux1 = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x3,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_dyn1_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal",
+ "fclk_div2",
+ "fclk_div3" },
+ .num_parents = 3,
+ },
+};
+
+/* Datasheet names this field as "Mux1_divn_tcnt" */
+static struct clk_regmap g12a_cpu_clk_mux1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .shift = 20,
+ .width = 6,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_dyn1_div",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk_dyn1_sel" },
+ .num_parents = 1,
+ },
+};
+
+/* Datasheet names this field as "postmux1" */
+static struct clk_regmap g12a_cpu_clk_postmux1 = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x1,
+ .shift = 18,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_dyn1",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk_dyn1_sel",
+ "cpu_clk_dyn1_div" },
+ .num_parents = 2,
+ },
+};
+
+/* Datasheet names this field as "Final_dyn_mux_sel" */
+static struct clk_regmap g12a_cpu_clk_dyn = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x1,
+ .shift = 10,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_dyn",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk_dyn0",
+ "cpu_clk_dyn1" },
+ .num_parents = 2,
+ },
+};
+
+/* Datasheet names this field as "Final_mux_sel" */
+static struct clk_regmap g12a_cpu_clk = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x1,
+ .shift = 11,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk_dyn",
+ "sys_pll" },
+ .num_parents = 2,
+ },
+};
+
+static struct clk_regmap g12a_cpu_clk_div16_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .bit_idx = 1,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "cpu_clk_div16_en",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk" },
+ .num_parents = 1,
+ /*
+ * This clock is used to debug the cpu_clk range
+ * Linux should not change it at runtime
+ */
+ },
+};
+
+static struct clk_fixed_factor g12a_cpu_clk_div16 = {
+ .mult = 1,
+ .div = 16,
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_div16",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "cpu_clk_div16_en" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_cpu_clk_apb_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .shift = 3,
+ .width = 3,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_apb_div",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_cpu_clk_apb = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .bit_idx = 1,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "cpu_clk_apb",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk_apb_div" },
+ .num_parents = 1,
+ /*
+ * This clock is set by the ROM monitor code,
+ * Linux should not change it at runtime
+ */
+ },
+};
+
+static struct clk_regmap g12a_cpu_clk_atb_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .shift = 6,
+ .width = 3,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_atb_div",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_cpu_clk_atb = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .bit_idx = 17,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "cpu_clk_atb",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk_atb_div" },
+ .num_parents = 1,
+ /*
+ * This clock is set by the ROM monitor code,
+ * Linux should not change it at runtime
+ */
+ },
+};
+
+static struct clk_regmap g12a_cpu_clk_axi_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .shift = 9,
+ .width = 3,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_axi_div",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_cpu_clk_axi = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .bit_idx = 18,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "cpu_clk_axi",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk_axi_div" },
+ .num_parents = 1,
+ /*
+ * This clock is set by the ROM monitor code,
+ * Linux should not change it at runtime
+ */
+ },
+};
+
+static struct clk_regmap g12a_cpu_clk_trace_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .shift = 20,
+ .width = 3,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_trace_div",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_cpu_clk_trace = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .bit_idx = 23,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "cpu_clk_trace",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "cpu_clk_trace_div" },
+ .num_parents = 1,
+ /*
+ * This clock is set by the ROM monitor code,
+ * Linux should not change it at runtime
+ */
+ },
+};
+
static const struct pll_mult_range g12a_gp0_pll_mult_range = {
.min = 55,
.max = 255,
@@ -302,6 +614,118 @@ static struct clk_regmap g12a_hifi_pll = {
},
};
+/*
+ * The Meson G12A PCIE PLL is fined tuned to deliver a very precise
+ * 100MHz reference clock for the PCIe Analog PHY, and thus requires
+ * a strict register sequence to enable the PLL.
+ */
+static const struct reg_sequence g12a_pcie_pll_init_regs[] = {
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x20090496 },
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x30090496 },
+ { .reg = HHI_PCIE_PLL_CNTL1, .def = 0x00000000 },
+ { .reg = HHI_PCIE_PLL_CNTL2, .def = 0x00001100 },
+ { .reg = HHI_PCIE_PLL_CNTL3, .def = 0x10058e00 },
+ { .reg = HHI_PCIE_PLL_CNTL4, .def = 0x000100c0 },
+ { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x68000048 },
+ { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x68000068, .delay_us = 20 },
+ { .reg = HHI_PCIE_PLL_CNTL4, .def = 0x008100c0, .delay_us = 10 },
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x34090496 },
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x14090496, .delay_us = 10 },
+ { .reg = HHI_PCIE_PLL_CNTL2, .def = 0x00001000 },
+};
+
+/* Keep a single entry table for recalc/round_rate() ops */
+static const struct pll_params_table g12a_pcie_pll_table[] = {
+ PLL_PARAMS(150, 1),
+ {0, 0},
+};
+
+static struct clk_regmap g12a_pcie_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_PCIE_PLL_CNTL1,
+ .shift = 0,
+ .width = 12,
+ },
+ .l = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ .table = g12a_pcie_pll_table,
+ .init_regs = g12a_pcie_pll_init_regs,
+ .init_count = ARRAY_SIZE(g12a_pcie_pll_init_regs),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_dco",
+ .ops = &meson_clk_pcie_pll_ops,
+ .parent_names = (const char *[]){ IN_PREFIX "xtal" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_pcie_pll_dco_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_dco_div2",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "pcie_pll_dco" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_pcie_pll_od = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_PCIE_PLL_CNTL0,
+ .shift = 16,
+ .width = 5,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST |
+ CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ALLOW_ZERO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_od",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "pcie_pll_dco_div2" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor g12a_pcie_pll = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_pll",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "pcie_pll_od" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static struct clk_regmap g12a_hdmi_pll_dco = {
.data = &(struct meson_clk_pll_data){
.en = {
@@ -1071,6 +1495,151 @@ static struct clk_regmap g12a_vpu = {
},
};
+/* VDEC clocks */
+
+static const char * const g12a_vdec_parent_names[] = {
+ "fclk_div2p5", "fclk_div3", "fclk_div4", "fclk_div5", "fclk_div7",
+ "hifi_pll", "gp0_pll",
+};
+
+static struct clk_regmap g12a_vdec_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VDEC_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_vdec_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_vdec_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vdec_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VDEC_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_1_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vdec_1_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vdec_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VDEC_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vdec_1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vdec_1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vdec_hevcf_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 9,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_hevcf_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_vdec_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_vdec_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vdec_hevcf_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_hevcf_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vdec_hevcf_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vdec_hevcf = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vdec_hevcf",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vdec_hevcf_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vdec_hevc_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 25,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_hevc_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = g12a_vdec_parent_names,
+ .num_parents = ARRAY_SIZE(g12a_vdec_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vdec_hevc_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_hevc_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vdec_hevc_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_vdec_hevc = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vdec_hevc",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vdec_hevc_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
/* VAPB Clock */
static const char * const g12a_vapb_parent_names[] = {
@@ -2167,6 +2736,39 @@ static struct clk_hw_onecell_data g12a_hw_onecell_data = {
[CLKID_MALI] = &g12a_mali.hw,
[CLKID_MPLL_5OM_DIV] = &g12a_mpll_50m_div.hw,
[CLKID_MPLL_5OM] = &g12a_mpll_50m.hw,
+ [CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw,
+ [CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw,
+ [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw,
+ [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_mux0_div.hw,
+ [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_postmux0.hw,
+ [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_premux1.hw,
+ [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_mux1_div.hw,
+ [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_postmux1.hw,
+ [CLKID_CPU_CLK_DYN] = &g12a_cpu_clk_dyn.hw,
+ [CLKID_CPU_CLK] = &g12a_cpu_clk.hw,
+ [CLKID_CPU_CLK_DIV16_EN] = &g12a_cpu_clk_div16_en.hw,
+ [CLKID_CPU_CLK_DIV16] = &g12a_cpu_clk_div16.hw,
+ [CLKID_CPU_CLK_APB_DIV] = &g12a_cpu_clk_apb_div.hw,
+ [CLKID_CPU_CLK_APB] = &g12a_cpu_clk_apb.hw,
+ [CLKID_CPU_CLK_ATB_DIV] = &g12a_cpu_clk_atb_div.hw,
+ [CLKID_CPU_CLK_ATB] = &g12a_cpu_clk_atb.hw,
+ [CLKID_CPU_CLK_AXI_DIV] = &g12a_cpu_clk_axi_div.hw,
+ [CLKID_CPU_CLK_AXI] = &g12a_cpu_clk_axi.hw,
+ [CLKID_CPU_CLK_TRACE_DIV] = &g12a_cpu_clk_trace_div.hw,
+ [CLKID_CPU_CLK_TRACE] = &g12a_cpu_clk_trace.hw,
+ [CLKID_PCIE_PLL_DCO] = &g12a_pcie_pll_dco.hw,
+ [CLKID_PCIE_PLL_DCO_DIV2] = &g12a_pcie_pll_dco_div2.hw,
+ [CLKID_PCIE_PLL_OD] = &g12a_pcie_pll_od.hw,
+ [CLKID_PCIE_PLL] = &g12a_pcie_pll.hw,
+ [CLKID_VDEC_1_SEL] = &g12a_vdec_1_sel.hw,
+ [CLKID_VDEC_1_DIV] = &g12a_vdec_1_div.hw,
+ [CLKID_VDEC_1] = &g12a_vdec_1.hw,
+ [CLKID_VDEC_HEVC_SEL] = &g12a_vdec_hevc_sel.hw,
+ [CLKID_VDEC_HEVC_DIV] = &g12a_vdec_hevc_div.hw,
+ [CLKID_VDEC_HEVC] = &g12a_vdec_hevc.hw,
+ [CLKID_VDEC_HEVCF_SEL] = &g12a_vdec_hevcf_sel.hw,
+ [CLKID_VDEC_HEVCF_DIV] = &g12a_vdec_hevcf_div.hw,
+ [CLKID_VDEC_HEVCF] = &g12a_vdec_hevcf.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -2335,6 +2937,35 @@ static struct clk_regmap *const g12a_clk_regmaps[] = {
&g12a_mali_1,
&g12a_mali,
&g12a_mpll_50m,
+ &g12a_sys_pll_div16_en,
+ &g12a_cpu_clk_premux0,
+ &g12a_cpu_clk_mux0_div,
+ &g12a_cpu_clk_postmux0,
+ &g12a_cpu_clk_premux1,
+ &g12a_cpu_clk_mux1_div,
+ &g12a_cpu_clk_postmux1,
+ &g12a_cpu_clk_dyn,
+ &g12a_cpu_clk,
+ &g12a_cpu_clk_div16_en,
+ &g12a_cpu_clk_apb_div,
+ &g12a_cpu_clk_apb,
+ &g12a_cpu_clk_atb_div,
+ &g12a_cpu_clk_atb,
+ &g12a_cpu_clk_axi_div,
+ &g12a_cpu_clk_axi,
+ &g12a_cpu_clk_trace_div,
+ &g12a_cpu_clk_trace,
+ &g12a_pcie_pll_od,
+ &g12a_pcie_pll_dco,
+ &g12a_vdec_1_sel,
+ &g12a_vdec_1_div,
+ &g12a_vdec_1,
+ &g12a_vdec_hevc_sel,
+ &g12a_vdec_hevc_div,
+ &g12a_vdec_hevc,
+ &g12a_vdec_hevcf_sel,
+ &g12a_vdec_hevcf_div,
+ &g12a_vdec_hevcf,
};
static const struct meson_eeclkc_data g12a_clkc_data = {
diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h
index f399dfe1401c..39c41af70804 100644
--- a/drivers/clk/meson/g12a.h
+++ b/drivers/clk/meson/g12a.h
@@ -50,6 +50,7 @@
#define HHI_GCLK_MPEG2 0x148
#define HHI_GCLK_OTHER 0x150
#define HHI_GCLK_OTHER2 0x154
+#define HHI_SYS_CPU_CLK_CNTL1 0x15c
#define HHI_VID_CLK_DIV 0x164
#define HHI_MPEG_CLK_CNTL 0x174
#define HHI_AUD_CLK_CNTL 0x178
@@ -166,8 +167,36 @@
#define CLKID_MALI_0_DIV 170
#define CLKID_MALI_1_DIV 173
#define CLKID_MPLL_5OM_DIV 176
+#define CLKID_SYS_PLL_DIV16_EN 178
+#define CLKID_SYS_PLL_DIV16 179
+#define CLKID_CPU_CLK_DYN0_SEL 180
+#define CLKID_CPU_CLK_DYN0_DIV 181
+#define CLKID_CPU_CLK_DYN0 182
+#define CLKID_CPU_CLK_DYN1_SEL 183
+#define CLKID_CPU_CLK_DYN1_DIV 184
+#define CLKID_CPU_CLK_DYN1 185
+#define CLKID_CPU_CLK_DYN 186
+#define CLKID_CPU_CLK_DIV16_EN 188
+#define CLKID_CPU_CLK_DIV16 189
+#define CLKID_CPU_CLK_APB_DIV 190
+#define CLKID_CPU_CLK_APB 191
+#define CLKID_CPU_CLK_ATB_DIV 192
+#define CLKID_CPU_CLK_ATB 193
+#define CLKID_CPU_CLK_AXI_DIV 194
+#define CLKID_CPU_CLK_AXI 195
+#define CLKID_CPU_CLK_TRACE_DIV 196
+#define CLKID_CPU_CLK_TRACE 197
+#define CLKID_PCIE_PLL_DCO 198
+#define CLKID_PCIE_PLL_DCO_DIV2 199
+#define CLKID_PCIE_PLL_OD 200
+#define CLKID_VDEC_1_SEL 202
+#define CLKID_VDEC_1_DIV 203
+#define CLKID_VDEC_HEVC_SEL 205
+#define CLKID_VDEC_HEVC_DIV 206
+#define CLKID_VDEC_HEVCF_SEL 208
+#define CLKID_VDEC_HEVCF_DIV 209
-#define NR_CLKS 178
+#define NR_CLKS 211
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/g12a-clkc.h>
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 576ad42252d0..37cf0f01bb5d 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -1703,6 +1703,456 @@ static struct clk_regmap meson8b_mali = {
},
};
+static const struct pll_params_table meson8m2_gp_pll_params_table[] = {
+ PLL_PARAMS(182, 3),
+ { /* sentinel */ },
+};
+
+static struct clk_regmap meson8m2_gp_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_GP_PLL_CNTL,
+ .shift = 30,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_GP_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_GP_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ .l = {
+ .reg_off = HHI_GP_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_GP_PLL_CNTL,
+ .shift = 29,
+ .width = 1,
+ },
+ .table = meson8m2_gp_pll_params_table,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp_pll_dco",
+ .ops = &meson_clk_pll_ops,
+ .parent_names = (const char *[]){ "xtal" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap meson8m2_gp_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_GP_PLL_CNTL,
+ .shift = 16,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp_pll",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "gp_pll_dco" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const char * const mmeson8b_vpu_0_1_parent_names[] = {
+ "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7"
+};
+
+static const char * const mmeson8m2_vpu_0_1_parent_names[] = {
+ "fclk_div4", "fclk_div3", "fclk_div5", "gp_pll"
+};
+
+static struct clk_regmap meson8b_vpu_0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_0_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = mmeson8b_vpu_0_1_parent_names,
+ .num_parents = ARRAY_SIZE(mmeson8b_vpu_0_1_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8m2_vpu_0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_0_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = mmeson8m2_vpu_0_1_parent_names,
+ .num_parents = ARRAY_SIZE(mmeson8m2_vpu_0_1_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vpu_0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_0_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vpu_0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vpu_0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vpu_0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vpu_0_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vpu_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = mmeson8b_vpu_0_1_parent_names,
+ .num_parents = ARRAY_SIZE(mmeson8b_vpu_0_1_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8m2_vpu_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = mmeson8m2_vpu_0_1_parent_names,
+ .num_parents = ARRAY_SIZE(mmeson8m2_vpu_0_1_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vpu_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_1_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vpu_1_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vpu_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vpu_1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vpu_1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vpu = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "vpu_0", "vpu_1" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static const char * const meson8b_vdec_parent_names[] = {
+ "fclk_div4", "fclk_div3", "fclk_div5", "fclk_div7", "mpll2", "mpll1"
+};
+
+static struct clk_regmap meson8b_vdec_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VDEC_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = meson8b_vdec_parent_names,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_1_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VDEC_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_1_1_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vdec_1_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_1_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VDEC_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vdec_1_1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vdec_1_1_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_1_2_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VDEC3_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_1_2_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vdec_1_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_1_2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VDEC3_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vdec_1_2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vdec_1_2_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_1 = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VDEC3_CLK_CNTL,
+ .mask = 0x1,
+ .shift = 15,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_1",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "vdec_1_1", "vdec_1_2" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_hcodec_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VDEC_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_hcodec_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = meson8b_vdec_parent_names,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_hcodec_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VDEC_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_hcodec_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vdec_hcodec_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_hcodec = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VDEC_CLK_CNTL,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vdec_hcodec",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vdec_hcodec_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_2_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_2_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = meson8b_vdec_parent_names,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_2_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_2_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vdec_2_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vdec_2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vdec_2_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_hevc_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_hevc_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = meson8b_vdec_parent_names,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parent_names),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_hevc_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_hevc_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vdec_hevc_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_hevc_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vdec_hevc_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vdec_hevc_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vdec_hevc = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VDEC2_CLK_CNTL,
+ .mask = 0x1,
+ .shift = 31,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdec_hevc",
+ .ops = &clk_regmap_mux_ops,
+ /* TODO: The second parent is currently unknown */
+ .parent_names = (const char *[]){ "vdec_hevc_en" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
/* Everything Else (EE) domain gates */
static MESON_GATE(meson8b_ddr, HHI_GCLK_MPEG0, 0);
@@ -1966,6 +2416,22 @@ static struct clk_hw_onecell_data meson8_hw_onecell_data = {
[CLKID_MALI_0_SEL] = &meson8b_mali_0_sel.hw,
[CLKID_MALI_0_DIV] = &meson8b_mali_0_div.hw,
[CLKID_MALI] = &meson8b_mali_0.hw,
+ [CLKID_VPU_0_SEL] = &meson8b_vpu_0_sel.hw,
+ [CLKID_VPU_0_DIV] = &meson8b_vpu_0_div.hw,
+ [CLKID_VPU] = &meson8b_vpu_0.hw,
+ [CLKID_VDEC_1_SEL] = &meson8b_vdec_1_sel.hw,
+ [CLKID_VDEC_1_1_DIV] = &meson8b_vdec_1_1_div.hw,
+ [CLKID_VDEC_1] = &meson8b_vdec_1_1.hw,
+ [CLKID_VDEC_HCODEC_SEL] = &meson8b_vdec_hcodec_sel.hw,
+ [CLKID_VDEC_HCODEC_DIV] = &meson8b_vdec_hcodec_div.hw,
+ [CLKID_VDEC_HCODEC] = &meson8b_vdec_hcodec.hw,
+ [CLKID_VDEC_2_SEL] = &meson8b_vdec_2_sel.hw,
+ [CLKID_VDEC_2_DIV] = &meson8b_vdec_2_div.hw,
+ [CLKID_VDEC_2] = &meson8b_vdec_2.hw,
+ [CLKID_VDEC_HEVC_SEL] = &meson8b_vdec_hevc_sel.hw,
+ [CLKID_VDEC_HEVC_DIV] = &meson8b_vdec_hevc_div.hw,
+ [CLKID_VDEC_HEVC_EN] = &meson8b_vdec_hevc_en.hw,
+ [CLKID_VDEC_HEVC] = &meson8b_vdec_hevc.hw,
[CLK_NR_CLKS] = NULL,
},
.num = CLK_NR_CLKS,
@@ -2152,6 +2618,240 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
[CLKID_MALI_1_DIV] = &meson8b_mali_1_div.hw,
[CLKID_MALI_1] = &meson8b_mali_1.hw,
[CLKID_MALI] = &meson8b_mali.hw,
+ [CLKID_VPU_0_SEL] = &meson8b_vpu_0_sel.hw,
+ [CLKID_VPU_0_DIV] = &meson8b_vpu_0_div.hw,
+ [CLKID_VPU_0] = &meson8b_vpu_0.hw,
+ [CLKID_VPU_1_SEL] = &meson8b_vpu_1_sel.hw,
+ [CLKID_VPU_1_DIV] = &meson8b_vpu_1_div.hw,
+ [CLKID_VPU_1] = &meson8b_vpu_1.hw,
+ [CLKID_VPU] = &meson8b_vpu.hw,
+ [CLKID_VDEC_1_SEL] = &meson8b_vdec_1_sel.hw,
+ [CLKID_VDEC_1_1_DIV] = &meson8b_vdec_1_1_div.hw,
+ [CLKID_VDEC_1_1] = &meson8b_vdec_1_1.hw,
+ [CLKID_VDEC_1_2_DIV] = &meson8b_vdec_1_2_div.hw,
+ [CLKID_VDEC_1_2] = &meson8b_vdec_1_2.hw,
+ [CLKID_VDEC_1] = &meson8b_vdec_1.hw,
+ [CLKID_VDEC_HCODEC_SEL] = &meson8b_vdec_hcodec_sel.hw,
+ [CLKID_VDEC_HCODEC_DIV] = &meson8b_vdec_hcodec_div.hw,
+ [CLKID_VDEC_HCODEC] = &meson8b_vdec_hcodec.hw,
+ [CLKID_VDEC_2_SEL] = &meson8b_vdec_2_sel.hw,
+ [CLKID_VDEC_2_DIV] = &meson8b_vdec_2_div.hw,
+ [CLKID_VDEC_2] = &meson8b_vdec_2.hw,
+ [CLKID_VDEC_HEVC_SEL] = &meson8b_vdec_hevc_sel.hw,
+ [CLKID_VDEC_HEVC_DIV] = &meson8b_vdec_hevc_div.hw,
+ [CLKID_VDEC_HEVC_EN] = &meson8b_vdec_hevc_en.hw,
+ [CLKID_VDEC_HEVC] = &meson8b_vdec_hevc.hw,
+ [CLK_NR_CLKS] = NULL,
+ },
+ .num = CLK_NR_CLKS,
+};
+
+static struct clk_hw_onecell_data meson8m2_hw_onecell_data = {
+ .hws = {
+ [CLKID_XTAL] = &meson8b_xtal.hw,
+ [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
+ [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
+ [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
+ [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
+ [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
+ [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
+ [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
+ [CLKID_MPEG_SEL] = &meson8b_mpeg_clk_sel.hw,
+ [CLKID_MPEG_DIV] = &meson8b_mpeg_clk_div.hw,
+ [CLKID_CLK81] = &meson8b_clk81.hw,
+ [CLKID_DDR] = &meson8b_ddr.hw,
+ [CLKID_DOS] = &meson8b_dos.hw,
+ [CLKID_ISA] = &meson8b_isa.hw,
+ [CLKID_PL301] = &meson8b_pl301.hw,
+ [CLKID_PERIPHS] = &meson8b_periphs.hw,
+ [CLKID_SPICC] = &meson8b_spicc.hw,
+ [CLKID_I2C] = &meson8b_i2c.hw,
+ [CLKID_SAR_ADC] = &meson8b_sar_adc.hw,
+ [CLKID_SMART_CARD] = &meson8b_smart_card.hw,
+ [CLKID_RNG0] = &meson8b_rng0.hw,
+ [CLKID_UART0] = &meson8b_uart0.hw,
+ [CLKID_SDHC] = &meson8b_sdhc.hw,
+ [CLKID_STREAM] = &meson8b_stream.hw,
+ [CLKID_ASYNC_FIFO] = &meson8b_async_fifo.hw,
+ [CLKID_SDIO] = &meson8b_sdio.hw,
+ [CLKID_ABUF] = &meson8b_abuf.hw,
+ [CLKID_HIU_IFACE] = &meson8b_hiu_iface.hw,
+ [CLKID_ASSIST_MISC] = &meson8b_assist_misc.hw,
+ [CLKID_SPI] = &meson8b_spi.hw,
+ [CLKID_I2S_SPDIF] = &meson8b_i2s_spdif.hw,
+ [CLKID_ETH] = &meson8b_eth.hw,
+ [CLKID_DEMUX] = &meson8b_demux.hw,
+ [CLKID_AIU_GLUE] = &meson8b_aiu_glue.hw,
+ [CLKID_IEC958] = &meson8b_iec958.hw,
+ [CLKID_I2S_OUT] = &meson8b_i2s_out.hw,
+ [CLKID_AMCLK] = &meson8b_amclk.hw,
+ [CLKID_AIFIFO2] = &meson8b_aififo2.hw,
+ [CLKID_MIXER] = &meson8b_mixer.hw,
+ [CLKID_MIXER_IFACE] = &meson8b_mixer_iface.hw,
+ [CLKID_ADC] = &meson8b_adc.hw,
+ [CLKID_BLKMV] = &meson8b_blkmv.hw,
+ [CLKID_AIU] = &meson8b_aiu.hw,
+ [CLKID_UART1] = &meson8b_uart1.hw,
+ [CLKID_G2D] = &meson8b_g2d.hw,
+ [CLKID_USB0] = &meson8b_usb0.hw,
+ [CLKID_USB1] = &meson8b_usb1.hw,
+ [CLKID_RESET] = &meson8b_reset.hw,
+ [CLKID_NAND] = &meson8b_nand.hw,
+ [CLKID_DOS_PARSER] = &meson8b_dos_parser.hw,
+ [CLKID_USB] = &meson8b_usb.hw,
+ [CLKID_VDIN1] = &meson8b_vdin1.hw,
+ [CLKID_AHB_ARB0] = &meson8b_ahb_arb0.hw,
+ [CLKID_EFUSE] = &meson8b_efuse.hw,
+ [CLKID_BOOT_ROM] = &meson8b_boot_rom.hw,
+ [CLKID_AHB_DATA_BUS] = &meson8b_ahb_data_bus.hw,
+ [CLKID_AHB_CTRL_BUS] = &meson8b_ahb_ctrl_bus.hw,
+ [CLKID_HDMI_INTR_SYNC] = &meson8b_hdmi_intr_sync.hw,
+ [CLKID_HDMI_PCLK] = &meson8b_hdmi_pclk.hw,
+ [CLKID_USB1_DDR_BRIDGE] = &meson8b_usb1_ddr_bridge.hw,
+ [CLKID_USB0_DDR_BRIDGE] = &meson8b_usb0_ddr_bridge.hw,
+ [CLKID_MMC_PCLK] = &meson8b_mmc_pclk.hw,
+ [CLKID_DVIN] = &meson8b_dvin.hw,
+ [CLKID_UART2] = &meson8b_uart2.hw,
+ [CLKID_SANA] = &meson8b_sana.hw,
+ [CLKID_VPU_INTR] = &meson8b_vpu_intr.hw,
+ [CLKID_SEC_AHB_AHB3_BRIDGE] = &meson8b_sec_ahb_ahb3_bridge.hw,
+ [CLKID_CLK81_A9] = &meson8b_clk81_a9.hw,
+ [CLKID_VCLK2_VENCI0] = &meson8b_vclk2_venci0.hw,
+ [CLKID_VCLK2_VENCI1] = &meson8b_vclk2_venci1.hw,
+ [CLKID_VCLK2_VENCP0] = &meson8b_vclk2_vencp0.hw,
+ [CLKID_VCLK2_VENCP1] = &meson8b_vclk2_vencp1.hw,
+ [CLKID_GCLK_VENCI_INT] = &meson8b_gclk_venci_int.hw,
+ [CLKID_GCLK_VENCP_INT] = &meson8b_gclk_vencp_int.hw,
+ [CLKID_DAC_CLK] = &meson8b_dac_clk.hw,
+ [CLKID_AOCLK_GATE] = &meson8b_aoclk_gate.hw,
+ [CLKID_IEC958_GATE] = &meson8b_iec958_gate.hw,
+ [CLKID_ENC480P] = &meson8b_enc480p.hw,
+ [CLKID_RNG1] = &meson8b_rng1.hw,
+ [CLKID_GCLK_VENCL_INT] = &meson8b_gclk_vencl_int.hw,
+ [CLKID_VCLK2_VENCLMCC] = &meson8b_vclk2_venclmcc.hw,
+ [CLKID_VCLK2_VENCL] = &meson8b_vclk2_vencl.hw,
+ [CLKID_VCLK2_OTHER] = &meson8b_vclk2_other.hw,
+ [CLKID_EDP] = &meson8b_edp.hw,
+ [CLKID_AO_MEDIA_CPU] = &meson8b_ao_media_cpu.hw,
+ [CLKID_AO_AHB_SRAM] = &meson8b_ao_ahb_sram.hw,
+ [CLKID_AO_AHB_BUS] = &meson8b_ao_ahb_bus.hw,
+ [CLKID_AO_IFACE] = &meson8b_ao_iface.hw,
+ [CLKID_MPLL0] = &meson8b_mpll0.hw,
+ [CLKID_MPLL1] = &meson8b_mpll1.hw,
+ [CLKID_MPLL2] = &meson8b_mpll2.hw,
+ [CLKID_MPLL0_DIV] = &meson8b_mpll0_div.hw,
+ [CLKID_MPLL1_DIV] = &meson8b_mpll1_div.hw,
+ [CLKID_MPLL2_DIV] = &meson8b_mpll2_div.hw,
+ [CLKID_CPU_IN_SEL] = &meson8b_cpu_in_sel.hw,
+ [CLKID_CPU_IN_DIV2] = &meson8b_cpu_in_div2.hw,
+ [CLKID_CPU_IN_DIV3] = &meson8b_cpu_in_div3.hw,
+ [CLKID_CPU_SCALE_DIV] = &meson8b_cpu_scale_div.hw,
+ [CLKID_CPU_SCALE_OUT_SEL] = &meson8b_cpu_scale_out_sel.hw,
+ [CLKID_MPLL_PREDIV] = &meson8b_mpll_prediv.hw,
+ [CLKID_FCLK_DIV2_DIV] = &meson8b_fclk_div2_div.hw,
+ [CLKID_FCLK_DIV3_DIV] = &meson8b_fclk_div3_div.hw,
+ [CLKID_FCLK_DIV4_DIV] = &meson8b_fclk_div4_div.hw,
+ [CLKID_FCLK_DIV5_DIV] = &meson8b_fclk_div5_div.hw,
+ [CLKID_FCLK_DIV7_DIV] = &meson8b_fclk_div7_div.hw,
+ [CLKID_NAND_SEL] = &meson8b_nand_clk_sel.hw,
+ [CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw,
+ [CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw,
+ [CLKID_PLL_FIXED_DCO] = &meson8b_fixed_pll_dco.hw,
+ [CLKID_HDMI_PLL_DCO] = &meson8b_hdmi_pll_dco.hw,
+ [CLKID_PLL_SYS_DCO] = &meson8b_sys_pll_dco.hw,
+ [CLKID_CPU_CLK_DIV2] = &meson8b_cpu_clk_div2.hw,
+ [CLKID_CPU_CLK_DIV3] = &meson8b_cpu_clk_div3.hw,
+ [CLKID_CPU_CLK_DIV4] = &meson8b_cpu_clk_div4.hw,
+ [CLKID_CPU_CLK_DIV5] = &meson8b_cpu_clk_div5.hw,
+ [CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw,
+ [CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw,
+ [CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw,
+ [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw,
+ [CLKID_APB] = &meson8b_apb_clk_gate.hw,
+ [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw,
+ [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw,
+ [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw,
+ [CLKID_AXI] = &meson8b_axi_clk_gate.hw,
+ [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_clk_sel.hw,
+ [CLKID_L2_DRAM] = &meson8b_l2_dram_clk_gate.hw,
+ [CLKID_HDMI_PLL_LVDS_OUT] = &meson8b_hdmi_pll_lvds_out.hw,
+ [CLKID_HDMI_PLL_HDMI_OUT] = &meson8b_hdmi_pll_hdmi_out.hw,
+ [CLKID_VID_PLL_IN_SEL] = &meson8b_vid_pll_in_sel.hw,
+ [CLKID_VID_PLL_IN_EN] = &meson8b_vid_pll_in_en.hw,
+ [CLKID_VID_PLL_PRE_DIV] = &meson8b_vid_pll_pre_div.hw,
+ [CLKID_VID_PLL_POST_DIV] = &meson8b_vid_pll_post_div.hw,
+ [CLKID_VID_PLL_FINAL_DIV] = &meson8b_vid_pll_final_div.hw,
+ [CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw,
+ [CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw,
+ [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw,
+ [CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw,
+ [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw,
+ [CLKID_VCLK_DIV4_DIV] = &meson8b_vclk_div4_div.hw,
+ [CLKID_VCLK_DIV4] = &meson8b_vclk_div4_div_gate.hw,
+ [CLKID_VCLK_DIV6_DIV] = &meson8b_vclk_div6_div.hw,
+ [CLKID_VCLK_DIV6] = &meson8b_vclk_div6_div_gate.hw,
+ [CLKID_VCLK_DIV12_DIV] = &meson8b_vclk_div12_div.hw,
+ [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw,
+ [CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw,
+ [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw,
+ [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw,
+ [CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw,
+ [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw,
+ [CLKID_VCLK2_DIV4_DIV] = &meson8b_vclk2_div4_div.hw,
+ [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4_div_gate.hw,
+ [CLKID_VCLK2_DIV6_DIV] = &meson8b_vclk2_div6_div.hw,
+ [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6_div_gate.hw,
+ [CLKID_VCLK2_DIV12_DIV] = &meson8b_vclk2_div12_div.hw,
+ [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12_div_gate.hw,
+ [CLKID_CTS_ENCT_SEL] = &meson8b_cts_enct_sel.hw,
+ [CLKID_CTS_ENCT] = &meson8b_cts_enct.hw,
+ [CLKID_CTS_ENCP_SEL] = &meson8b_cts_encp_sel.hw,
+ [CLKID_CTS_ENCP] = &meson8b_cts_encp.hw,
+ [CLKID_CTS_ENCI_SEL] = &meson8b_cts_enci_sel.hw,
+ [CLKID_CTS_ENCI] = &meson8b_cts_enci.hw,
+ [CLKID_HDMI_TX_PIXEL_SEL] = &meson8b_hdmi_tx_pixel_sel.hw,
+ [CLKID_HDMI_TX_PIXEL] = &meson8b_hdmi_tx_pixel.hw,
+ [CLKID_CTS_ENCL_SEL] = &meson8b_cts_encl_sel.hw,
+ [CLKID_CTS_ENCL] = &meson8b_cts_encl.hw,
+ [CLKID_CTS_VDAC0_SEL] = &meson8b_cts_vdac0_sel.hw,
+ [CLKID_CTS_VDAC0] = &meson8b_cts_vdac0.hw,
+ [CLKID_HDMI_SYS_SEL] = &meson8b_hdmi_sys_sel.hw,
+ [CLKID_HDMI_SYS_DIV] = &meson8b_hdmi_sys_div.hw,
+ [CLKID_HDMI_SYS] = &meson8b_hdmi_sys.hw,
+ [CLKID_MALI_0_SEL] = &meson8b_mali_0_sel.hw,
+ [CLKID_MALI_0_DIV] = &meson8b_mali_0_div.hw,
+ [CLKID_MALI_0] = &meson8b_mali_0.hw,
+ [CLKID_MALI_1_SEL] = &meson8b_mali_1_sel.hw,
+ [CLKID_MALI_1_DIV] = &meson8b_mali_1_div.hw,
+ [CLKID_MALI_1] = &meson8b_mali_1.hw,
+ [CLKID_MALI] = &meson8b_mali.hw,
+ [CLKID_GP_PLL_DCO] = &meson8m2_gp_pll_dco.hw,
+ [CLKID_GP_PLL] = &meson8m2_gp_pll.hw,
+ [CLKID_VPU_0_SEL] = &meson8m2_vpu_0_sel.hw,
+ [CLKID_VPU_0_DIV] = &meson8b_vpu_0_div.hw,
+ [CLKID_VPU_0] = &meson8b_vpu_0.hw,
+ [CLKID_VPU_1_SEL] = &meson8m2_vpu_1_sel.hw,
+ [CLKID_VPU_1_DIV] = &meson8b_vpu_1_div.hw,
+ [CLKID_VPU_1] = &meson8b_vpu_1.hw,
+ [CLKID_VPU] = &meson8b_vpu.hw,
+ [CLKID_VDEC_1_SEL] = &meson8b_vdec_1_sel.hw,
+ [CLKID_VDEC_1_1_DIV] = &meson8b_vdec_1_1_div.hw,
+ [CLKID_VDEC_1_1] = &meson8b_vdec_1_1.hw,
+ [CLKID_VDEC_1_2_DIV] = &meson8b_vdec_1_2_div.hw,
+ [CLKID_VDEC_1_2] = &meson8b_vdec_1_2.hw,
+ [CLKID_VDEC_1] = &meson8b_vdec_1.hw,
+ [CLKID_VDEC_HCODEC_SEL] = &meson8b_vdec_hcodec_sel.hw,
+ [CLKID_VDEC_HCODEC_DIV] = &meson8b_vdec_hcodec_div.hw,
+ [CLKID_VDEC_HCODEC] = &meson8b_vdec_hcodec.hw,
+ [CLKID_VDEC_2_SEL] = &meson8b_vdec_2_sel.hw,
+ [CLKID_VDEC_2_DIV] = &meson8b_vdec_2_div.hw,
+ [CLKID_VDEC_2] = &meson8b_vdec_2.hw,
+ [CLKID_VDEC_HEVC_SEL] = &meson8b_vdec_hevc_sel.hw,
+ [CLKID_VDEC_HEVC_DIV] = &meson8b_vdec_hevc_div.hw,
+ [CLKID_VDEC_HEVC_EN] = &meson8b_vdec_hevc_en.hw,
+ [CLKID_VDEC_HEVC] = &meson8b_vdec_hevc.hw,
[CLK_NR_CLKS] = NULL,
},
.num = CLK_NR_CLKS,
@@ -2314,6 +3014,33 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = {
&meson8b_mali_1_div,
&meson8b_mali_1,
&meson8b_mali,
+ &meson8m2_gp_pll_dco,
+ &meson8m2_gp_pll,
+ &meson8b_vpu_0_sel,
+ &meson8m2_vpu_0_sel,
+ &meson8b_vpu_0_div,
+ &meson8b_vpu_0,
+ &meson8b_vpu_1_sel,
+ &meson8m2_vpu_1_sel,
+ &meson8b_vpu_1_div,
+ &meson8b_vpu_1,
+ &meson8b_vpu,
+ &meson8b_vdec_1_sel,
+ &meson8b_vdec_1_1_div,
+ &meson8b_vdec_1_1,
+ &meson8b_vdec_1_2_div,
+ &meson8b_vdec_1_2,
+ &meson8b_vdec_1,
+ &meson8b_vdec_hcodec_sel,
+ &meson8b_vdec_hcodec_div,
+ &meson8b_vdec_hcodec,
+ &meson8b_vdec_2_sel,
+ &meson8b_vdec_2_div,
+ &meson8b_vdec_2,
+ &meson8b_vdec_hevc_sel,
+ &meson8b_vdec_hevc_div,
+ &meson8b_vdec_hevc_en,
+ &meson8b_vdec_hevc,
};
static const struct meson8b_clk_reset_line {
@@ -2558,9 +3285,14 @@ static void __init meson8b_clkc_init(struct device_node *np)
return meson8b_clkc_init_common(np, &meson8b_hw_onecell_data);
}
+static void __init meson8m2_clkc_init(struct device_node *np)
+{
+ return meson8b_clkc_init_common(np, &meson8m2_hw_onecell_data);
+}
+
CLK_OF_DECLARE_DRIVER(meson8_clkc, "amlogic,meson8-clkc",
meson8_clkc_init);
CLK_OF_DECLARE_DRIVER(meson8b_clkc, "amlogic,meson8b-clkc",
meson8b_clkc_init);
CLK_OF_DECLARE_DRIVER(meson8m2_clkc, "amlogic,meson8m2-clkc",
- meson8b_clkc_init);
+ meson8m2_clkc_init);
diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h
index b8c58faeae52..ed37196187e6 100644
--- a/drivers/clk/meson/meson8b.h
+++ b/drivers/clk/meson/meson8b.h
@@ -19,6 +19,7 @@
*
* [0] http://dn.odroid.com/S805/Datasheet/S805_Datasheet%20V0.8%2020150126.pdf
*/
+#define HHI_GP_PLL_CNTL 0x40 /* 0x10 offset in data sheet */
#define HHI_VIID_CLK_DIV 0x128 /* 0x4a offset in data sheet */
#define HHI_VIID_CLK_CNTL 0x12c /* 0x4b offset in data sheet */
#define HHI_GCLK_MPEG0 0x140 /* 0x50 offset in data sheet */
@@ -34,7 +35,11 @@
#define HHI_VID_DIVIDER_CNTL 0x198 /* 0x66 offset in data sheet */
#define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */
#define HHI_MALI_CLK_CNTL 0x1b0 /* 0x6c offset in data sheet */
+#define HHI_VPU_CLK_CNTL 0x1bc /* 0x6f offset in data sheet */
#define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 offset in data sheet */
+#define HHI_VDEC_CLK_CNTL 0x1e0 /* 0x78 offset in data sheet */
+#define HHI_VDEC2_CLK_CNTL 0x1e4 /* 0x79 offset in data sheet */
+#define HHI_VDEC3_CLK_CNTL 0x1e8 /* 0x7a offset in data sheet */
#define HHI_NAND_CLK_CNTL 0x25c /* 0x97 offset in data sheet */
#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */
#define HHI_SYS_PLL_CNTL 0x300 /* 0xc0 offset in data sheet */
@@ -146,8 +151,28 @@
#define CLKID_MALI_1_SEL 178
#define CLKID_MALI_1_DIV 179
#define CLKID_MALI_1 180
+#define CLKID_GP_PLL_DCO 181
+#define CLKID_GP_PLL 182
+#define CLKID_VPU_0_SEL 183
+#define CLKID_VPU_0_DIV 184
+#define CLKID_VPU_0 185
+#define CLKID_VPU_1_SEL 186
+#define CLKID_VPU_1_DIV 187
+#define CLKID_VPU_1 189
+#define CLKID_VDEC_1_SEL 191
+#define CLKID_VDEC_1_1_DIV 192
+#define CLKID_VDEC_1_1 193
+#define CLKID_VDEC_1_2_DIV 194
+#define CLKID_VDEC_1_2 195
+#define CLKID_VDEC_HCODEC_SEL 197
+#define CLKID_VDEC_HCODEC_DIV 198
+#define CLKID_VDEC_2_SEL 200
+#define CLKID_VDEC_2_DIV 201
+#define CLKID_VDEC_HEVC_SEL 203
+#define CLKID_VDEC_HEVC_DIV 204
+#define CLKID_VDEC_HEVC_EN 205
-#define CLK_NR_CLKS 181
+#define CLK_NR_CLKS 207
/*
* include the CLKID and RESETID that have
diff --git a/drivers/clk/microchip/clk-core.c b/drivers/clk/microchip/clk-core.c
index c3b301463425..4680064f1951 100644
--- a/drivers/clk/microchip/clk-core.c
+++ b/drivers/clk/microchip/clk-core.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <asm/mach-pic32/pic32.h>
#include <asm/traps.h>
diff --git a/drivers/clk/microchip/clk-pic32mzda.c b/drivers/clk/microchip/clk-pic32mzda.c
index 9f734779be92..e6c05df2d47f 100644
--- a/drivers/clk/microchip/clk-pic32mzda.c
+++ b/drivers/clk/microchip/clk-pic32mzda.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
diff --git a/drivers/clk/mmp/clk-gate.c b/drivers/clk/mmp/clk-gate.c
index 7355595c42e2..1755916ddef2 100644
--- a/drivers/clk/mmp/clk-gate.c
+++ b/drivers/clk/mmp/clk-gate.c
@@ -108,7 +108,7 @@ struct clk *mmp_clk_register_gate(struct device *dev, const char *name,
init.name = name;
init.ops = &mmp_clk_gate_ops;
- init.flags = flags | CLK_IS_BASIC;
+ init.flags = flags;
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 1f1cff428d78..5fc6d486a381 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -18,6 +18,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_device.h>
diff --git a/drivers/clk/mvebu/armada-37xx-tbg.c b/drivers/clk/mvebu/armada-37xx-tbg.c
index ee272d4d8c24..585a02e0b330 100644
--- a/drivers/clk/mvebu/armada-37xx-tbg.c
+++ b/drivers/clk/mvebu/armada-37xx-tbg.c
@@ -9,6 +9,7 @@
#include <linux/clk-provider.h>
#include <linux/clk.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/mvebu/clk-corediv.c b/drivers/clk/mvebu/clk-corediv.c
index 1fc84b0e72ee..818b175391fa 100644
--- a/drivers/clk/mvebu/clk-corediv.c
+++ b/drivers/clk/mvebu/clk-corediv.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/delay.h>
diff --git a/drivers/clk/mvebu/common.c b/drivers/clk/mvebu/common.c
index 6ab3c2e627c7..785dbede4835 100644
--- a/drivers/clk/mvebu/common.c
+++ b/drivers/clk/mvebu/common.c
@@ -240,7 +240,7 @@ void __init mvebu_clk_gating_setup(struct device_node *np,
int n;
if (ctrl) {
- pr_err("mvebu-clk-gating: cannot instantiate more than one gatable clock device\n");
+ pr_err("mvebu-clk-gating: cannot instantiate more than one gateable clock device\n");
return;
}
diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c
index 9235a331b588..b6de283f45e3 100644
--- a/drivers/clk/mvebu/cp110-system-controller.c
+++ b/drivers/clk/mvebu/cp110-system-controller.c
@@ -21,7 +21,7 @@
* - Equal to SDIO clock
* - 2/5 PLL0
*
- * CP110 has 32 gatable clocks, for the various peripherals in the IP.
+ * CP110 has 32 gateable clocks, for the various peripherals in the IP.
*/
#define pr_fmt(fmt) "cp110-system-controller: " fmt
@@ -57,7 +57,7 @@ enum {
#define CP110_CORE_NAND 4
#define CP110_CORE_SDIO 5
-/* A number of gatable clocks need special handling */
+/* A number of gateable clocks need special handling */
#define CP110_GATE_AUDIO 0
#define CP110_GATE_COMM_UNIT 1
#define CP110_GATE_NAND 2
diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c
index 27781b49eb82..f2e171a01fb4 100644
--- a/drivers/clk/nxp/clk-lpc18xx-ccu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -142,7 +143,7 @@ static int lpc18xx_ccu_gate_endisable(struct clk_hw *hw, bool enable)
* Divider field is write only, so divider stat field must
* be read so divider field can be set accordingly.
*/
- val = clk_readl(gate->reg);
+ val = readl(gate->reg);
if (val & LPC18XX_CCU_DIVSTAT)
val |= LPC18XX_CCU_DIV;
@@ -155,12 +156,12 @@ static int lpc18xx_ccu_gate_endisable(struct clk_hw *hw, bool enable)
* and the next write should clear the RUN bit.
*/
val |= LPC18XX_CCU_AUTO;
- clk_writel(val, gate->reg);
+ writel(val, gate->reg);
val &= ~LPC18XX_CCU_RUN;
}
- clk_writel(val, gate->reg);
+ writel(val, gate->reg);
return 0;
}
diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c
index 2531174b399e..8b686da5577b 100644
--- a/drivers/clk/nxp/clk-lpc18xx-cgu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c
@@ -10,6 +10,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -352,9 +353,9 @@ static unsigned long lpc18xx_pll0_recalc_rate(struct clk_hw *hw,
struct lpc18xx_pll *pll = to_lpc_pll(hw);
u32 ctrl, mdiv, msel, npdiv;
- ctrl = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
- mdiv = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_MDIV);
- npdiv = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_NP_DIV);
+ ctrl = readl(pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
+ mdiv = readl(pll->reg + LPC18XX_CGU_PLL0USB_MDIV);
+ npdiv = readl(pll->reg + LPC18XX_CGU_PLL0USB_NP_DIV);
if (ctrl & LPC18XX_PLL0_CTRL_BYPASS)
return parent_rate;
@@ -415,25 +416,25 @@ static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate,
m |= lpc18xx_pll0_msel2seli(m) << LPC18XX_PLL0_MDIV_SELI_SHIFT;
/* Power down PLL, disable clk output and dividers */
- ctrl = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
+ ctrl = readl(pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
ctrl |= LPC18XX_PLL0_CTRL_PD;
ctrl &= ~(LPC18XX_PLL0_CTRL_BYPASS | LPC18XX_PLL0_CTRL_DIRECTI |
LPC18XX_PLL0_CTRL_DIRECTO | LPC18XX_PLL0_CTRL_CLKEN);
- clk_writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
+ writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
/* Configure new PLL settings */
- clk_writel(m, pll->reg + LPC18XX_CGU_PLL0USB_MDIV);
- clk_writel(LPC18XX_PLL0_NP_DIVS_1, pll->reg + LPC18XX_CGU_PLL0USB_NP_DIV);
+ writel(m, pll->reg + LPC18XX_CGU_PLL0USB_MDIV);
+ writel(LPC18XX_PLL0_NP_DIVS_1, pll->reg + LPC18XX_CGU_PLL0USB_NP_DIV);
/* Power up PLL and wait for lock */
ctrl &= ~LPC18XX_PLL0_CTRL_PD;
- clk_writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
+ writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
do {
udelay(10);
- stat = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_STAT);
+ stat = readl(pll->reg + LPC18XX_CGU_PLL0USB_STAT);
if (stat & LPC18XX_PLL0_STAT_LOCK) {
ctrl |= LPC18XX_PLL0_CTRL_CLKEN;
- clk_writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
+ writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL);
return 0;
}
@@ -458,8 +459,8 @@ static unsigned long lpc18xx_pll1_recalc_rate(struct clk_hw *hw,
bool direct, fbsel;
u32 stat, ctrl;
- stat = clk_readl(pll->reg + LPC18XX_CGU_PLL1_STAT);
- ctrl = clk_readl(pll->reg + LPC18XX_CGU_PLL1_CTRL);
+ stat = readl(pll->reg + LPC18XX_CGU_PLL1_STAT);
+ ctrl = readl(pll->reg + LPC18XX_CGU_PLL1_CTRL);
direct = (ctrl & LPC18XX_PLL1_CTRL_DIRECT) ? true : false;
fbsel = (ctrl & LPC18XX_PLL1_CTRL_FBSEL) ? true : false;
diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c
index 5eeecee17b69..7f67c1036ff9 100644
--- a/drivers/clk/nxp/clk-lpc32xx.c
+++ b/drivers/clk/nxp/clk-lpc32xx.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/regmap.h>
@@ -1085,13 +1086,12 @@ struct clk_hw_proto {
};
};
-#define LPC32XX_DEFINE_FIXED(_idx, _rate, _flags) \
+#define LPC32XX_DEFINE_FIXED(_idx, _rate) \
[CLK_PREFIX(_idx)] = { \
.type = CLK_FIXED, \
{ \
.f = { \
.fixed_rate = (_rate), \
- .flags = (_flags), \
}, \
}, \
}
@@ -1225,7 +1225,7 @@ struct clk_hw_proto {
}
static struct clk_hw_proto clk_hw_proto[LPC32XX_CLK_HW_MAX] = {
- LPC32XX_DEFINE_FIXED(RTC, 32768, 0),
+ LPC32XX_DEFINE_FIXED(RTC, 32768),
LPC32XX_DEFINE_PLL(PLL397X, pll_397x, HCLKPLL_CTRL, BIT(1)),
LPC32XX_DEFINE_PLL(HCLK_PLL, hclk_pll, HCLKPLL_CTRL, PLL_CTRL_ENABLE),
LPC32XX_DEFINE_PLL(USB_PLL, usb_pll, USB_CTRL, PLL_CTRL_ENABLE),
@@ -1468,7 +1468,7 @@ static struct clk * __init lpc32xx_clk_register(u32 id)
struct clk_fixed_rate *fixed = &clk_hw->f;
clk = clk_register_fixed_rate(NULL, lpc32xx_clk->name,
- parents[0], fixed->flags, fixed->fixed_rate);
+ parents[0], 0, fixed->fixed_rate);
break;
}
default:
diff --git a/drivers/clk/pxa/clk-pxa.c b/drivers/clk/pxa/clk-pxa.c
index 42627bf8a09e..5326f77eb35a 100644
--- a/drivers/clk/pxa/clk-pxa.c
+++ b/drivers/clk/pxa/clk-pxa.c
@@ -13,6 +13,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <dt-bindings/clock/pxa-clock.h>
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 1c04575c118f..18bdf34d5e64 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -243,6 +243,12 @@ config SDM_GCC_660
Say Y if you want to use peripheral devices such as UART, SPI,
i2C, USB, UFS, SDDC, PCIe, etc.
+config QCS_TURING_404
+ tristate "QCS404 Turing Clock Controller"
+ help
+ Support for the Turing Clock Controller on QCS404, provides clocks
+ and resets for the Turing subsystem.
+
config SDM_GCC_845
tristate "SDM845 Global Clock Controller"
select QCOM_GDSC
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index ee8d0698e370..f0768fb1f037 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
obj-$(CONFIG_QCOM_CLK_RPMH) += clk-rpmh.o
obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o
+obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
obj-$(CONFIG_SDM_CAMCC_845) += camcc-sdm845.o
obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o
obj-$(CONFIG_SDM_GCC_660) += gcc-sdm660.o
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index 99446bf630aa..f869fc6aaed6 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -146,6 +146,12 @@ const struct clk_ops clk_branch2_ops = {
};
EXPORT_SYMBOL_GPL(clk_branch2_ops);
+const struct clk_ops clk_branch2_aon_ops = {
+ .enable = clk_branch2_enable,
+ .is_enabled = clk_is_enabled_regmap,
+};
+EXPORT_SYMBOL_GPL(clk_branch2_aon_ops);
+
const struct clk_ops clk_branch_simple_ops = {
.enable = clk_enable_regmap,
.disable = clk_disable_regmap,
diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h
index b3561e0a3984..17a58119165e 100644
--- a/drivers/clk/qcom/clk-branch.h
+++ b/drivers/clk/qcom/clk-branch.h
@@ -40,6 +40,7 @@ struct clk_branch {
extern const struct clk_ops clk_branch_ops;
extern const struct clk_ops clk_branch2_ops;
extern const struct clk_ops clk_branch_simple_ops;
+extern const struct clk_ops clk_branch2_aon_ops;
#define to_clk_branch(_hw) \
container_of(to_clk_regmap(_hw), struct clk_branch, clkr)
diff --git a/drivers/clk/qcom/clk-regmap-mux-div.h b/drivers/clk/qcom/clk-regmap-mux-div.h
index 6cd6261be7ac..4df6c8d24c24 100644
--- a/drivers/clk/qcom/clk-regmap-mux-div.h
+++ b/drivers/clk/qcom/clk-regmap-mux-div.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017, Linaro Limited
* Author: Georgi Djakov <georgi.djakov@linaro.org>
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index c240fba794c7..033688264c7b 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -2161,7 +2161,7 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
static struct clk_branch gcc_pcie_0_pipe_clk = {
.halt_reg = 0x6b018,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x6b018,
.enable_mask = BIT(0),
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
index 5a62f64ada93..a54807eb3b28 100644
--- a/drivers/clk/qcom/gcc-qcs404.c
+++ b/drivers/clk/qcom/gcc-qcs404.c
@@ -260,6 +260,20 @@ static const char * const gcc_parent_names_15[] = {
"core_bi_pll_test_se",
};
+static const struct parent_map gcc_parent_map_16[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_AUX, 2 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_16[] = {
+ "cxo",
+ "gpll0_out_main",
+ "gpll0_out_aux",
+ "core_bi_pll_test_se",
+};
+
static struct clk_fixed_factor cxo = {
.mult = 1,
.div = 1,
@@ -1194,6 +1208,28 @@ static struct clk_rcg2 vsync_clk_src = {
},
};
+static const struct freq_tbl ftbl_cdsp_bimc_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(266666667, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(320000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cdsp_bimc_clk_src = {
+ .cmd_rcgr = 0x5e010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_16,
+ .freq_tbl = ftbl_cdsp_bimc_clk_src,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "cdsp_bimc_clk_src",
+ .parent_names = gcc_parent_names_16,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static struct clk_branch gcc_apss_ahb_clk = {
.halt_reg = 0x4601c,
.halt_check = BRANCH_HALT_VOTED,
@@ -1255,6 +1291,24 @@ static struct clk_branch gcc_bimc_gpu_clk = {
},
};
+static struct clk_branch gcc_bimc_cdsp_clk = {
+ .halt_reg = 0x31030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x31030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_bimc_cdsp_clk",
+ .parent_names = (const char *[]) {
+ "cdsp_bimc_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_bimc_mdss_clk = {
.halt_reg = 0x31038,
.halt_check = BRANCH_HALT,
@@ -1792,6 +1846,24 @@ static struct clk_branch gcc_gfx_tbu_clk = {
},
};
+static struct clk_branch gcc_cdsp_tbu_clk = {
+ .halt_reg = 0x1203c,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x13020,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_cdsp_tbu_clk",
+ .parent_names = (const char *[]) {
+ "cdsp_bimc_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_gp1_clk = {
.halt_reg = 0x8000,
.halt_check = BRANCH_HALT,
@@ -2304,6 +2376,19 @@ static struct clk_branch gcc_sdcc1_ice_core_clk = {
},
};
+static struct clk_branch gcc_cdsp_cfg_ahb_clk = {
+ .halt_reg = 0x5e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_cdsp_cfg_ahb_cbcr",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_sdcc2_ahb_clk = {
.halt_reg = 0x4301c,
.halt_check = BRANCH_HALT,
@@ -2548,6 +2633,7 @@ static struct clk_regmap *gcc_qcs404_clocks[] = {
[GCC_ESC0_CLK_SRC] = &esc0_clk_src.clkr,
[GCC_APSS_AHB_CLK] = &gcc_apss_ahb_clk.clkr,
[GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
+ [GCC_BIMC_CDSP_CLK] = &gcc_bimc_cdsp_clk.clkr,
[GCC_BIMC_MDSS_CLK] = &gcc_bimc_mdss_clk.clkr,
[GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
[GCC_BLSP1_QUP0_I2C_APPS_CLK] = &gcc_blsp1_qup0_i2c_apps_clk.clkr,
@@ -2605,6 +2691,7 @@ static struct clk_regmap *gcc_qcs404_clocks[] = {
[GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
[GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
[GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_CDSP_CFG_AHB_CLK] = &gcc_cdsp_cfg_ahb_clk.clkr,
[GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
[GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
[GCC_SYS_NOC_USB3_CLK] = &gcc_sys_noc_usb3_clk.clkr,
@@ -2645,6 +2732,7 @@ static struct clk_regmap *gcc_qcs404_clocks[] = {
[GCC_USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
[GCC_USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr,
[GCC_VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+ [GCC_CDSP_BIMC_CLK_SRC] = &cdsp_bimc_clk_src.clkr,
[GCC_USB_HS_INACTIVITY_TIMERS_CLK] =
&gcc_usb_hs_inactivity_timers_clk.clkr,
[GCC_BIMC_GPU_CLK] = &gcc_bimc_gpu_clk.clkr,
@@ -2653,6 +2741,7 @@ static struct clk_regmap *gcc_qcs404_clocks[] = {
[GCC_GFX_TBU_CLK] = &gcc_gfx_tbu_clk.clkr,
[GCC_SMMU_CFG_CLK] = &gcc_smmu_cfg_clk.clkr,
[GCC_APSS_TCU_CLK] = &gcc_apss_tcu_clk.clkr,
+ [GCC_CDSP_TBU_CLK] = &gcc_cdsp_tbu_clk.clkr,
[GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr,
[GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr,
[GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr,
@@ -2664,6 +2753,7 @@ static struct clk_regmap *gcc_qcs404_clocks[] = {
static const struct qcom_reset_map gcc_qcs404_resets[] = {
[GCC_GENI_IR_BCR] = { 0x0F000 },
+ [GCC_CDSP_RESTART] = { 0x18000 },
[GCC_USB_HS_BCR] = { 0x41000 },
[GCC_USB2_HS_PHY_ONLY_BCR] = { 0x41034 },
[GCC_QUSB2_PHY_BCR] = { 0x4103c },
diff --git a/drivers/clk/qcom/turingcc-qcs404.c b/drivers/clk/qcom/turingcc-qcs404.c
new file mode 100644
index 000000000000..aa859e6ec9bd
--- /dev/null
+++ b/drivers/clk/qcom/turingcc-qcs404.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, Linaro Ltd.
+ */
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,turingcc-qcs404.h>
+
+#include "clk-regmap.h"
+#include "clk-branch.h"
+#include "common.h"
+#include "reset.h"
+
+static struct clk_branch turing_wrapper_aon_cbcr = {
+ .halt_reg = 0x5098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "turing_wrapper_aon_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch turing_q6ss_ahbm_aon_cbcr = {
+ .halt_reg = 0x9000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "turing_q6ss_ahbm_aon_cbcr",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch turing_q6ss_q6_axim_clk = {
+ .halt_reg = 0xb000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "turing_q6ss_q6_axim_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch turing_q6ss_ahbs_aon_cbcr = {
+ .halt_reg = 0x10000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "turing_q6ss_ahbs_aon_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch turing_wrapper_qos_ahbs_aon_cbcr = {
+ .halt_reg = 0x11014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "turing_wrapper_qos_ahbs_aon_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_regmap *turingcc_clocks[] = {
+ [TURING_WRAPPER_AON_CLK] = &turing_wrapper_aon_cbcr.clkr,
+ [TURING_Q6SS_AHBM_AON_CLK] = &turing_q6ss_ahbm_aon_cbcr.clkr,
+ [TURING_Q6SS_Q6_AXIM_CLK] = &turing_q6ss_q6_axim_clk.clkr,
+ [TURING_Q6SS_AHBS_AON_CLK] = &turing_q6ss_ahbs_aon_cbcr.clkr,
+ [TURING_WRAPPER_QOS_AHBS_AON_CLK] = &turing_wrapper_qos_ahbs_aon_cbcr.clkr,
+};
+
+static const struct regmap_config turingcc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x30000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc turingcc_desc = {
+ .config = &turingcc_regmap_config,
+ .clks = turingcc_clocks,
+ .num_clks = ARRAY_SIZE(turingcc_clocks),
+};
+
+static int turingcc_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_clk_create(&pdev->dev);
+ if (ret)
+ goto disable_pm_runtime;
+
+ ret = pm_clk_add(&pdev->dev, NULL);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to acquire iface clock\n");
+ goto destroy_pm_clk;
+ }
+
+ ret = qcom_cc_probe(pdev, &turingcc_desc);
+ if (ret < 0)
+ goto destroy_pm_clk;
+
+ return 0;
+
+destroy_pm_clk:
+ pm_clk_destroy(&pdev->dev);
+
+disable_pm_runtime:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int turingcc_remove(struct platform_device *pdev)
+{
+ pm_clk_destroy(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops turingcc_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static const struct of_device_id turingcc_match_table[] = {
+ { .compatible = "qcom,qcs404-turingcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, turingcc_match_table);
+
+static struct platform_driver turingcc_driver = {
+ .probe = turingcc_probe,
+ .remove = turingcc_remove,
+ .driver = {
+ .name = "qcs404-turingcc",
+ .of_match_table = turingcc_match_table,
+ .pm = &turingcc_pm_ops,
+ },
+};
+
+module_platform_driver(turingcc_driver);
+
+MODULE_DESCRIPTION("Qualcomm QCS404 Turing Clock Controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/renesas/clk-r8a73a4.c b/drivers/clk/renesas/clk-r8a73a4.c
index 2719c248c67b..cfed11c659d9 100644
--- a/drivers/clk/renesas/clk-r8a73a4.c
+++ b/drivers/clk/renesas/clk-r8a73a4.c
@@ -8,6 +8,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/of.h>
diff --git a/drivers/clk/renesas/clk-r8a7740.c b/drivers/clk/renesas/clk-r8a7740.c
index 5967656c13cc..d8190f007a81 100644
--- a/drivers/clk/renesas/clk-r8a7740.c
+++ b/drivers/clk/renesas/clk-r8a7740.c
@@ -8,6 +8,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/of.h>
diff --git a/drivers/clk/renesas/clk-rcar-gen2.c b/drivers/clk/renesas/clk-rcar-gen2.c
index 2913b4148157..da9fe3f032eb 100644
--- a/drivers/clk/renesas/clk-rcar-gen2.c
+++ b/drivers/clk/renesas/clk-rcar-gen2.c
@@ -10,6 +10,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/of.h>
diff --git a/drivers/clk/renesas/clk-rz.c b/drivers/clk/renesas/clk-rz.c
index 3cda53a97f4e..fbc34beafc78 100644
--- a/drivers/clk/renesas/clk-rz.c
+++ b/drivers/clk/renesas/clk-rz.c
@@ -9,6 +9,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/renesas/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c
index dc8ffc7c727a..5f25a70bc61c 100644
--- a/drivers/clk/renesas/clk-sh73a0.c
+++ b/drivers/clk/renesas/clk-sh73a0.c
@@ -8,6 +8,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/renesas/r7s9210-cpg-mssr.c b/drivers/clk/renesas/r7s9210-cpg-mssr.c
index 57c49fe88295..cf65d4e0e116 100644
--- a/drivers/clk/renesas/r7s9210-cpg-mssr.c
+++ b/drivers/clk/renesas/r7s9210-cpg-mssr.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <dt-bindings/clock/r7s9210-cpg-mssr.h>
#include "renesas-cpg-mssr.h"
@@ -119,7 +120,7 @@ static void __init r7s9210_update_clk_table(struct clk *extal_clk,
if (clk_get_rate(extal_clk) > 12000000)
cpg_mode = 1;
- frqcr = clk_readl(base + CPG_FRQCR) & 0xFFF;
+ frqcr = readl(base + CPG_FRQCR) & 0xFFF;
if (frqcr == 0x012)
index = 0;
else if (frqcr == 0x112)
diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
index 4d92b27a6153..76ed7d1bae36 100644
--- a/drivers/clk/renesas/r8a774a1-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
@@ -71,8 +71,8 @@ static const struct cpg_core_clk r8a774a1_core_clks[] __initconst = {
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
/* Core Clock Outputs */
- DEF_BASE("z", R8A774A1_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0),
- DEF_BASE("z2", R8A774A1_CLK_Z2, CLK_TYPE_GEN3_Z2, CLK_PLL2),
+ DEF_GEN3_Z("z", R8A774A1_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0, 2, 8),
+ DEF_GEN3_Z("z2", R8A774A1_CLK_Z2, CLK_TYPE_GEN3_Z, CLK_PLL2, 2, 0),
DEF_FIXED("ztr", R8A774A1_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED("ztrd2", R8A774A1_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
DEF_FIXED("zt", R8A774A1_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
@@ -123,8 +123,8 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = {
DEF_MOD("msiof2", 209, R8A774A1_CLK_MSO),
DEF_MOD("msiof1", 210, R8A774A1_CLK_MSO),
DEF_MOD("msiof0", 211, R8A774A1_CLK_MSO),
- DEF_MOD("sys-dmac2", 217, R8A774A1_CLK_S0D3),
- DEF_MOD("sys-dmac1", 218, R8A774A1_CLK_S0D3),
+ DEF_MOD("sys-dmac2", 217, R8A774A1_CLK_S3D1),
+ DEF_MOD("sys-dmac1", 218, R8A774A1_CLK_S3D1),
DEF_MOD("sys-dmac0", 219, R8A774A1_CLK_S0D3),
DEF_MOD("cmt3", 300, R8A774A1_CLK_R),
DEF_MOD("cmt2", 301, R8A774A1_CLK_R),
@@ -143,8 +143,8 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = {
DEF_MOD("rwdt", 402, R8A774A1_CLK_R),
DEF_MOD("intc-ex", 407, R8A774A1_CLK_CP),
DEF_MOD("intc-ap", 408, R8A774A1_CLK_S0D3),
- DEF_MOD("audmac1", 501, R8A774A1_CLK_S0D3),
- DEF_MOD("audmac0", 502, R8A774A1_CLK_S0D3),
+ DEF_MOD("audmac1", 501, R8A774A1_CLK_S1D2),
+ DEF_MOD("audmac0", 502, R8A774A1_CLK_S1D2),
DEF_MOD("hscif4", 516, R8A774A1_CLK_S3D1),
DEF_MOD("hscif3", 517, R8A774A1_CLK_S3D1),
DEF_MOD("hscif2", 518, R8A774A1_CLK_S3D1),
@@ -165,9 +165,9 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = {
DEF_MOD("vspd0", 623, R8A774A1_CLK_S0D2),
DEF_MOD("vspb", 626, R8A774A1_CLK_S0D1),
DEF_MOD("vspi0", 631, R8A774A1_CLK_S0D1),
- DEF_MOD("ehci1", 702, R8A774A1_CLK_S3D4),
- DEF_MOD("ehci0", 703, R8A774A1_CLK_S3D4),
- DEF_MOD("hsusb", 704, R8A774A1_CLK_S3D4),
+ DEF_MOD("ehci1", 702, R8A774A1_CLK_S3D2),
+ DEF_MOD("ehci0", 703, R8A774A1_CLK_S3D2),
+ DEF_MOD("hsusb", 704, R8A774A1_CLK_S3D2),
DEF_MOD("csi20", 714, R8A774A1_CLK_CSI0),
DEF_MOD("csi40", 716, R8A774A1_CLK_CSI0),
DEF_MOD("du2", 722, R8A774A1_CLK_S2D1),
diff --git a/drivers/clk/renesas/r8a774c0-cpg-mssr.c b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
index 34e274f2a273..f91e7a484753 100644
--- a/drivers/clk/renesas/r8a774c0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
@@ -81,6 +81,7 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = {
/* Core Clock Outputs */
DEF_FIXED("za2", R8A774C0_CLK_ZA2, CLK_PLL0D24, 1, 1),
DEF_FIXED("za8", R8A774C0_CLK_ZA8, CLK_PLL0D8, 1, 1),
+ DEF_GEN3_Z("z2", R8A774C0_CLK_Z2, CLK_TYPE_GEN3_Z, CLK_PLL0, 4, 8),
DEF_FIXED("ztr", R8A774C0_CLK_ZTR, CLK_PLL1, 6, 1),
DEF_FIXED("zt", R8A774C0_CLK_ZT, CLK_PLL1, 4, 1),
DEF_FIXED("zx", R8A774C0_CLK_ZX, CLK_PLL1, 3, 1),
@@ -157,7 +158,7 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
DEF_MOD("intc-ex", 407, R8A774C0_CLK_CP),
DEF_MOD("intc-ap", 408, R8A774C0_CLK_S0D3),
- DEF_MOD("audmac0", 502, R8A774C0_CLK_S3D4),
+ DEF_MOD("audmac0", 502, R8A774C0_CLK_S1D2),
DEF_MOD("hscif4", 516, R8A774C0_CLK_S3D1C),
DEF_MOD("hscif3", 517, R8A774C0_CLK_S3D1C),
DEF_MOD("hscif2", 518, R8A774C0_CLK_S3D1C),
@@ -177,8 +178,8 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
DEF_MOD("vspb", 626, R8A774C0_CLK_S0D1),
DEF_MOD("vspi0", 631, R8A774C0_CLK_S0D1),
- DEF_MOD("ehci0", 703, R8A774C0_CLK_S3D4),
- DEF_MOD("hsusb", 704, R8A774C0_CLK_S3D4),
+ DEF_MOD("ehci0", 703, R8A774C0_CLK_S3D2),
+ DEF_MOD("hsusb", 704, R8A774C0_CLK_S3D2),
DEF_MOD("csi40", 716, R8A774C0_CLK_CSI0),
DEF_MOD("du1", 723, R8A774C0_CLK_S1D1),
DEF_MOD("du0", 724, R8A774C0_CLK_S1D1),
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index 86842c9fd314..9e9a6f2c31e8 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -3,6 +3,7 @@
* r8a7795 Clock Pulse Generator / Module Standby and Software Reset
*
* Copyright (C) 2015 Glider bvba
+ * Copyright (C) 2018-2019 Renesas Electronics Corp.
*
* Based on clk-rcar-gen3.c
*
@@ -73,8 +74,8 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = {
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
/* Core Clock Outputs */
- DEF_BASE("z", R8A7795_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0),
- DEF_BASE("z2", R8A7795_CLK_Z2, CLK_TYPE_GEN3_Z2, CLK_PLL2),
+ DEF_GEN3_Z("z", R8A7795_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0, 2, 8),
+ DEF_GEN3_Z("z2", R8A7795_CLK_Z2, CLK_TYPE_GEN3_Z, CLK_PLL2, 2, 0),
DEF_FIXED("ztr", R8A7795_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED("ztrd2", R8A7795_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
DEF_FIXED("zt", R8A7795_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
@@ -129,8 +130,8 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("msiof2", 209, R8A7795_CLK_MSO),
DEF_MOD("msiof1", 210, R8A7795_CLK_MSO),
DEF_MOD("msiof0", 211, R8A7795_CLK_MSO),
- DEF_MOD("sys-dmac2", 217, R8A7795_CLK_S0D3),
- DEF_MOD("sys-dmac1", 218, R8A7795_CLK_S0D3),
+ DEF_MOD("sys-dmac2", 217, R8A7795_CLK_S3D1),
+ DEF_MOD("sys-dmac1", 218, R8A7795_CLK_S3D1),
DEF_MOD("sys-dmac0", 219, R8A7795_CLK_S0D3),
DEF_MOD("sceg-pub", 229, R8A7795_CLK_CR),
DEF_MOD("cmt3", 300, R8A7795_CLK_R),
@@ -153,16 +154,16 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("rwdt", 402, R8A7795_CLK_R),
DEF_MOD("intc-ex", 407, R8A7795_CLK_CP),
DEF_MOD("intc-ap", 408, R8A7795_CLK_S0D3),
- DEF_MOD("audmac1", 501, R8A7795_CLK_S0D3),
- DEF_MOD("audmac0", 502, R8A7795_CLK_S0D3),
- DEF_MOD("drif7", 508, R8A7795_CLK_S3D2),
- DEF_MOD("drif6", 509, R8A7795_CLK_S3D2),
- DEF_MOD("drif5", 510, R8A7795_CLK_S3D2),
- DEF_MOD("drif4", 511, R8A7795_CLK_S3D2),
- DEF_MOD("drif3", 512, R8A7795_CLK_S3D2),
- DEF_MOD("drif2", 513, R8A7795_CLK_S3D2),
- DEF_MOD("drif1", 514, R8A7795_CLK_S3D2),
- DEF_MOD("drif0", 515, R8A7795_CLK_S3D2),
+ DEF_MOD("audmac1", 501, R8A7795_CLK_S1D2),
+ DEF_MOD("audmac0", 502, R8A7795_CLK_S1D2),
+ DEF_MOD("drif31", 508, R8A7795_CLK_S3D2),
+ DEF_MOD("drif30", 509, R8A7795_CLK_S3D2),
+ DEF_MOD("drif21", 510, R8A7795_CLK_S3D2),
+ DEF_MOD("drif20", 511, R8A7795_CLK_S3D2),
+ DEF_MOD("drif11", 512, R8A7795_CLK_S3D2),
+ DEF_MOD("drif10", 513, R8A7795_CLK_S3D2),
+ DEF_MOD("drif01", 514, R8A7795_CLK_S3D2),
+ DEF_MOD("drif00", 515, R8A7795_CLK_S3D2),
DEF_MOD("hscif4", 516, R8A7795_CLK_S3D1),
DEF_MOD("hscif3", 517, R8A7795_CLK_S3D1),
DEF_MOD("hscif2", 518, R8A7795_CLK_S3D1),
@@ -194,12 +195,12 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("vspi2", 629, R8A7795_CLK_S2D1), /* ES1.x */
DEF_MOD("vspi1", 630, R8A7795_CLK_S0D1),
DEF_MOD("vspi0", 631, R8A7795_CLK_S0D1),
- DEF_MOD("ehci3", 700, R8A7795_CLK_S3D4),
- DEF_MOD("ehci2", 701, R8A7795_CLK_S3D4),
- DEF_MOD("ehci1", 702, R8A7795_CLK_S3D4),
- DEF_MOD("ehci0", 703, R8A7795_CLK_S3D4),
- DEF_MOD("hsusb", 704, R8A7795_CLK_S3D4),
- DEF_MOD("hsusb3", 705, R8A7795_CLK_S3D4),
+ DEF_MOD("ehci3", 700, R8A7795_CLK_S3D2),
+ DEF_MOD("ehci2", 701, R8A7795_CLK_S3D2),
+ DEF_MOD("ehci1", 702, R8A7795_CLK_S3D2),
+ DEF_MOD("ehci0", 703, R8A7795_CLK_S3D2),
+ DEF_MOD("hsusb", 704, R8A7795_CLK_S3D2),
+ DEF_MOD("hsusb3", 705, R8A7795_CLK_S3D2),
DEF_MOD("csi21", 713, R8A7795_CLK_CSI0), /* ES1.x */
DEF_MOD("csi20", 714, R8A7795_CLK_CSI0),
DEF_MOD("csi41", 715, R8A7795_CLK_CSI0),
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index 12c455859f2c..d8e9af5d9ae9 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -3,6 +3,7 @@
* r8a7796 Clock Pulse Generator / Module Standby and Software Reset
*
* Copyright (C) 2016 Glider bvba
+ * Copyright (C) 2018 Renesas Electronics Corp.
*
* Based on r8a7795-cpg-mssr.c
*
@@ -73,8 +74,8 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
/* Core Clock Outputs */
- DEF_BASE("z", R8A7796_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0),
- DEF_BASE("z2", R8A7796_CLK_Z2, CLK_TYPE_GEN3_Z2, CLK_PLL2),
+ DEF_GEN3_Z("z", R8A7796_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0, 2, 8),
+ DEF_GEN3_Z("z2", R8A7796_CLK_Z2, CLK_TYPE_GEN3_Z, CLK_PLL2, 2, 0),
DEF_FIXED("ztr", R8A7796_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED("ztrd2", R8A7796_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
DEF_FIXED("zt", R8A7796_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
@@ -126,8 +127,8 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
DEF_MOD("msiof2", 209, R8A7796_CLK_MSO),
DEF_MOD("msiof1", 210, R8A7796_CLK_MSO),
DEF_MOD("msiof0", 211, R8A7796_CLK_MSO),
- DEF_MOD("sys-dmac2", 217, R8A7796_CLK_S0D3),
- DEF_MOD("sys-dmac1", 218, R8A7796_CLK_S0D3),
+ DEF_MOD("sys-dmac2", 217, R8A7796_CLK_S3D1),
+ DEF_MOD("sys-dmac1", 218, R8A7796_CLK_S3D1),
DEF_MOD("sys-dmac0", 219, R8A7796_CLK_S0D3),
DEF_MOD("cmt3", 300, R8A7796_CLK_R),
DEF_MOD("cmt2", 301, R8A7796_CLK_R),
@@ -146,16 +147,16 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
DEF_MOD("rwdt", 402, R8A7796_CLK_R),
DEF_MOD("intc-ex", 407, R8A7796_CLK_CP),
DEF_MOD("intc-ap", 408, R8A7796_CLK_S0D3),
- DEF_MOD("audmac1", 501, R8A7796_CLK_S0D3),
- DEF_MOD("audmac0", 502, R8A7796_CLK_S0D3),
- DEF_MOD("drif7", 508, R8A7796_CLK_S3D2),
- DEF_MOD("drif6", 509, R8A7796_CLK_S3D2),
- DEF_MOD("drif5", 510, R8A7796_CLK_S3D2),
- DEF_MOD("drif4", 511, R8A7796_CLK_S3D2),
- DEF_MOD("drif3", 512, R8A7796_CLK_S3D2),
- DEF_MOD("drif2", 513, R8A7796_CLK_S3D2),
- DEF_MOD("drif1", 514, R8A7796_CLK_S3D2),
- DEF_MOD("drif0", 515, R8A7796_CLK_S3D2),
+ DEF_MOD("audmac1", 501, R8A7796_CLK_S1D2),
+ DEF_MOD("audmac0", 502, R8A7796_CLK_S1D2),
+ DEF_MOD("drif31", 508, R8A7796_CLK_S3D2),
+ DEF_MOD("drif30", 509, R8A7796_CLK_S3D2),
+ DEF_MOD("drif21", 510, R8A7796_CLK_S3D2),
+ DEF_MOD("drif20", 511, R8A7796_CLK_S3D2),
+ DEF_MOD("drif11", 512, R8A7796_CLK_S3D2),
+ DEF_MOD("drif10", 513, R8A7796_CLK_S3D2),
+ DEF_MOD("drif01", 514, R8A7796_CLK_S3D2),
+ DEF_MOD("drif00", 515, R8A7796_CLK_S3D2),
DEF_MOD("hscif4", 516, R8A7796_CLK_S3D1),
DEF_MOD("hscif3", 517, R8A7796_CLK_S3D1),
DEF_MOD("hscif2", 518, R8A7796_CLK_S3D1),
@@ -176,9 +177,9 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
DEF_MOD("vspd0", 623, R8A7796_CLK_S0D2),
DEF_MOD("vspb", 626, R8A7796_CLK_S0D1),
DEF_MOD("vspi0", 631, R8A7796_CLK_S0D1),
- DEF_MOD("ehci1", 702, R8A7796_CLK_S3D4),
- DEF_MOD("ehci0", 703, R8A7796_CLK_S3D4),
- DEF_MOD("hsusb", 704, R8A7796_CLK_S3D4),
+ DEF_MOD("ehci1", 702, R8A7796_CLK_S3D2),
+ DEF_MOD("ehci0", 703, R8A7796_CLK_S3D2),
+ DEF_MOD("hsusb", 704, R8A7796_CLK_S3D2),
DEF_MOD("csi20", 714, R8A7796_CLK_CSI0),
DEF_MOD("csi40", 716, R8A7796_CLK_CSI0),
DEF_MOD("du2", 722, R8A7796_CLK_S2D1),
diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c
index eb1cca58a1e1..8f87e314d949 100644
--- a/drivers/clk/renesas/r8a77965-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c
@@ -3,6 +3,7 @@
* r8a77965 Clock Pulse Generator / Module Standby and Software Reset
*
* Copyright (C) 2018 Jacopo Mondi <jacopo+renesas@jmondi.org>
+ * Copyright (C) 2019 Renesas Electronics Corp.
*
* Based on r8a7795-cpg-mssr.c
*
@@ -71,7 +72,7 @@ static const struct cpg_core_clk r8a77965_core_clks[] __initconst = {
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
/* Core Clock Outputs */
- DEF_BASE("z", R8A77965_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0),
+ DEF_GEN3_Z("z", R8A77965_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0, 2, 8),
DEF_FIXED("ztr", R8A77965_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED("ztrd2", R8A77965_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1),
DEF_FIXED("zt", R8A77965_CLK_ZT, CLK_PLL1_DIV2, 4, 1),
@@ -123,8 +124,8 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
DEF_MOD("msiof2", 209, R8A77965_CLK_MSO),
DEF_MOD("msiof1", 210, R8A77965_CLK_MSO),
DEF_MOD("msiof0", 211, R8A77965_CLK_MSO),
- DEF_MOD("sys-dmac2", 217, R8A77965_CLK_S0D3),
- DEF_MOD("sys-dmac1", 218, R8A77965_CLK_S0D3),
+ DEF_MOD("sys-dmac2", 217, R8A77965_CLK_S3D1),
+ DEF_MOD("sys-dmac1", 218, R8A77965_CLK_S3D1),
DEF_MOD("sys-dmac0", 219, R8A77965_CLK_S0D3),
DEF_MOD("cmt3", 300, R8A77965_CLK_R),
@@ -146,16 +147,16 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
DEF_MOD("intc-ex", 407, R8A77965_CLK_CP),
DEF_MOD("intc-ap", 408, R8A77965_CLK_S0D3),
- DEF_MOD("audmac1", 501, R8A77965_CLK_S0D3),
- DEF_MOD("audmac0", 502, R8A77965_CLK_S0D3),
- DEF_MOD("drif7", 508, R8A77965_CLK_S3D2),
- DEF_MOD("drif6", 509, R8A77965_CLK_S3D2),
- DEF_MOD("drif5", 510, R8A77965_CLK_S3D2),
- DEF_MOD("drif4", 511, R8A77965_CLK_S3D2),
- DEF_MOD("drif3", 512, R8A77965_CLK_S3D2),
- DEF_MOD("drif2", 513, R8A77965_CLK_S3D2),
- DEF_MOD("drif1", 514, R8A77965_CLK_S3D2),
- DEF_MOD("drif0", 515, R8A77965_CLK_S3D2),
+ DEF_MOD("audmac1", 501, R8A77965_CLK_S1D2),
+ DEF_MOD("audmac0", 502, R8A77965_CLK_S1D2),
+ DEF_MOD("drif31", 508, R8A77965_CLK_S3D2),
+ DEF_MOD("drif30", 509, R8A77965_CLK_S3D2),
+ DEF_MOD("drif21", 510, R8A77965_CLK_S3D2),
+ DEF_MOD("drif20", 511, R8A77965_CLK_S3D2),
+ DEF_MOD("drif11", 512, R8A77965_CLK_S3D2),
+ DEF_MOD("drif10", 513, R8A77965_CLK_S3D2),
+ DEF_MOD("drif01", 514, R8A77965_CLK_S3D2),
+ DEF_MOD("drif00", 515, R8A77965_CLK_S3D2),
DEF_MOD("hscif4", 516, R8A77965_CLK_S3D1),
DEF_MOD("hscif3", 517, R8A77965_CLK_S3D1),
DEF_MOD("hscif2", 518, R8A77965_CLK_S3D1),
@@ -175,9 +176,9 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
DEF_MOD("vspb", 626, R8A77965_CLK_S0D1),
DEF_MOD("vspi0", 631, R8A77965_CLK_S0D1),
- DEF_MOD("ehci1", 702, R8A77965_CLK_S3D4),
- DEF_MOD("ehci0", 703, R8A77965_CLK_S3D4),
- DEF_MOD("hsusb", 704, R8A77965_CLK_S3D4),
+ DEF_MOD("ehci1", 702, R8A77965_CLK_S3D2),
+ DEF_MOD("ehci0", 703, R8A77965_CLK_S3D2),
+ DEF_MOD("hsusb", 704, R8A77965_CLK_S3D2),
DEF_MOD("csi20", 714, R8A77965_CLK_CSI0),
DEF_MOD("csi40", 716, R8A77965_CLK_CSI0),
DEF_MOD("du3", 721, R8A77965_CLK_S2D1),
diff --git a/drivers/clk/renesas/r8a77980-cpg-mssr.c b/drivers/clk/renesas/r8a77980-cpg-mssr.c
index f9e07fcc0d96..7227f675e61f 100644
--- a/drivers/clk/renesas/r8a77980-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77980-cpg-mssr.c
@@ -171,7 +171,7 @@ static const struct mssr_mod_clk r8a77980_mod_clks[] __initconst = {
DEF_MOD("gpio1", 911, R8A77980_CLK_CP),
DEF_MOD("gpio0", 912, R8A77980_CLK_CP),
DEF_MOD("can-fd", 914, R8A77980_CLK_S3D2),
- DEF_MOD("rpc-if", 917, R8A77980_CLK_RPC),
+ DEF_MOD("rpc-if", 917, R8A77980_CLK_RPCD2),
DEF_MOD("i2c4", 927, R8A77980_CLK_S0D6),
DEF_MOD("i2c3", 928, R8A77980_CLK_S0D6),
DEF_MOD("i2c2", 929, R8A77980_CLK_S3D2),
diff --git a/drivers/clk/renesas/r8a77990-cpg-mssr.c b/drivers/clk/renesas/r8a77990-cpg-mssr.c
index 9a278c75c918..9570404baa58 100644
--- a/drivers/clk/renesas/r8a77990-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77990-cpg-mssr.c
@@ -2,7 +2,7 @@
/*
* r8a77990 Clock Pulse Generator / Module Standby and Software Reset
*
- * Copyright (C) 2018 Renesas Electronics Corp.
+ * Copyright (C) 2018-2019 Renesas Electronics Corp.
*
* Based on r8a7795-cpg-mssr.c
*
@@ -81,6 +81,7 @@ static const struct cpg_core_clk r8a77990_core_clks[] __initconst = {
/* Core Clock Outputs */
DEF_FIXED("za2", R8A77990_CLK_ZA2, CLK_PLL0D24, 1, 1),
DEF_FIXED("za8", R8A77990_CLK_ZA8, CLK_PLL0D8, 1, 1),
+ DEF_GEN3_Z("z2", R8A77990_CLK_Z2, CLK_TYPE_GEN3_Z, CLK_PLL0, 4, 8),
DEF_FIXED("ztr", R8A77990_CLK_ZTR, CLK_PLL1, 6, 1),
DEF_FIXED("zt", R8A77990_CLK_ZT, CLK_PLL1, 4, 1),
DEF_FIXED("zx", R8A77990_CLK_ZX, CLK_PLL1, 3, 1),
@@ -152,15 +153,15 @@ static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = {
DEF_MOD("intc-ex", 407, R8A77990_CLK_CP),
DEF_MOD("intc-ap", 408, R8A77990_CLK_S0D3),
- DEF_MOD("audmac0", 502, R8A77990_CLK_S3D4),
- DEF_MOD("drif7", 508, R8A77990_CLK_S3D2),
- DEF_MOD("drif6", 509, R8A77990_CLK_S3D2),
- DEF_MOD("drif5", 510, R8A77990_CLK_S3D2),
- DEF_MOD("drif4", 511, R8A77990_CLK_S3D2),
- DEF_MOD("drif3", 512, R8A77990_CLK_S3D2),
- DEF_MOD("drif2", 513, R8A77990_CLK_S3D2),
- DEF_MOD("drif1", 514, R8A77990_CLK_S3D2),
- DEF_MOD("drif0", 515, R8A77990_CLK_S3D2),
+ DEF_MOD("audmac0", 502, R8A77990_CLK_S1D2),
+ DEF_MOD("drif31", 508, R8A77990_CLK_S3D2),
+ DEF_MOD("drif30", 509, R8A77990_CLK_S3D2),
+ DEF_MOD("drif21", 510, R8A77990_CLK_S3D2),
+ DEF_MOD("drif20", 511, R8A77990_CLK_S3D2),
+ DEF_MOD("drif11", 512, R8A77990_CLK_S3D2),
+ DEF_MOD("drif10", 513, R8A77990_CLK_S3D2),
+ DEF_MOD("drif01", 514, R8A77990_CLK_S3D2),
+ DEF_MOD("drif00", 515, R8A77990_CLK_S3D2),
DEF_MOD("hscif4", 516, R8A77990_CLK_S3D1C),
DEF_MOD("hscif3", 517, R8A77990_CLK_S3D1C),
DEF_MOD("hscif2", 518, R8A77990_CLK_S3D1C),
@@ -180,8 +181,8 @@ static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = {
DEF_MOD("vspb", 626, R8A77990_CLK_S0D1),
DEF_MOD("vspi0", 631, R8A77990_CLK_S0D1),
- DEF_MOD("ehci0", 703, R8A77990_CLK_S3D4),
- DEF_MOD("hsusb", 704, R8A77990_CLK_S3D4),
+ DEF_MOD("ehci0", 703, R8A77990_CLK_S3D2),
+ DEF_MOD("hsusb", 704, R8A77990_CLK_S3D2),
DEF_MOD("csi40", 716, R8A77990_CLK_CSI0),
DEF_MOD("du1", 723, R8A77990_CLK_S1D1),
DEF_MOD("du0", 724, R8A77990_CLK_S1D1),
diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c
index eee3874865a9..68707277b17b 100644
--- a/drivers/clk/renesas/r8a77995-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c
@@ -133,7 +133,7 @@ static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = {
DEF_MOD("rwdt", 402, R8A77995_CLK_R),
DEF_MOD("intc-ex", 407, R8A77995_CLK_CP),
DEF_MOD("intc-ap", 408, R8A77995_CLK_S1D2),
- DEF_MOD("audmac0", 502, R8A77995_CLK_S3D1),
+ DEF_MOD("audmac0", 502, R8A77995_CLK_S1D2),
DEF_MOD("hscif3", 517, R8A77995_CLK_S3D1C),
DEF_MOD("hscif0", 520, R8A77995_CLK_S3D1C),
DEF_MOD("thermal", 522, R8A77995_CLK_CP),
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index 658cb11b6f55..7d042183aa37 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -11,6 +11,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/of.h>
@@ -170,6 +171,7 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] __initconst = {
D_GATE(CLK_P6_PG2, "clk_p6_pg2", DIV_P6_PG, 0x8a3, 0x8a4, 0x8a5, 0, 0xb61, 0, 0),
D_GATE(CLK_P6_PG3, "clk_p6_pg3", DIV_P6_PG, 0x8a6, 0x8a7, 0x8a8, 0, 0xb62, 0, 0),
D_GATE(CLK_P6_PG4, "clk_p6_pg4", DIV_P6_PG, 0x8a9, 0x8aa, 0x8ab, 0, 0xb63, 0, 0),
+ D_GATE(CLK_PCI_USB, "clk_pci_usb", CLKOUT_D40, 0xe6, 0, 0, 0, 0, 0, 0),
D_GATE(CLK_QSPI0, "clk_qspi0", DIV_QSPI0, 0x2a4, 0x2a5, 0, 0, 0, 0, 0),
D_GATE(CLK_QSPI1, "clk_qspi1", DIV_QSPI1, 0x484, 0x485, 0, 0, 0, 0, 0),
D_GATE(CLK_RGMII_REF, "clk_rgmii_ref", CLKOUT_D8, 0x340, 0, 0, 0, 0, 0, 0),
diff --git a/drivers/clk/renesas/rcar-gen2-cpg.h b/drivers/clk/renesas/rcar-gen2-cpg.h
index bff9551c7a38..db2f57ef2f99 100644
--- a/drivers/clk/renesas/rcar-gen2-cpg.h
+++ b/drivers/clk/renesas/rcar-gen2-cpg.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* R-Car Gen2 Clock Pulse Generator
*
* Copyright (C) 2016 Cogent Embedded Inc.
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 9a8071a8114d..d25c8ba00a65 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -3,6 +3,7 @@
* R-Car Gen3 Clock Pulse Generator
*
* Copyright (C) 2015-2018 Glider bvba
+ * Copyright (C) 2019 Renesas Electronics Corp.
*
* Based on clk-rcar-gen3.c
*
@@ -88,14 +89,13 @@ static void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
#define CPG_FRQCRB 0x00000004
#define CPG_FRQCRB_KICK BIT(31)
#define CPG_FRQCRC 0x000000e0
-#define CPG_FRQCRC_ZFC_MASK GENMASK(12, 8)
-#define CPG_FRQCRC_Z2FC_MASK GENMASK(4, 0)
struct cpg_z_clk {
struct clk_hw hw;
void __iomem *reg;
void __iomem *kick_reg;
unsigned long mask;
+ unsigned int fixed_div;
};
#define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
@@ -110,17 +110,18 @@ static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
val = readl(zclk->reg) & zclk->mask;
mult = 32 - (val >> __ffs(zclk->mask));
- /* Factor of 2 is for fixed divider */
- return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, 32 * 2);
+ return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
+ 32 * zclk->fixed_div);
}
static long cpg_z_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
- /* Factor of 2 is for fixed divider */
- unsigned long prate = *parent_rate / 2;
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned long prate;
unsigned int mult;
+ prate = *parent_rate / zclk->fixed_div;
mult = div_u64(rate * 32ULL, prate);
mult = clamp(mult, 1U, 32U);
@@ -134,8 +135,8 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned int mult;
unsigned int i;
- /* Factor of 2 is for fixed divider */
- mult = DIV_ROUND_CLOSEST_ULL(rate * 32ULL * 2, parent_rate);
+ mult = DIV64_U64_ROUND_CLOSEST(rate * 32ULL * zclk->fixed_div,
+ parent_rate);
mult = clamp(mult, 1U, 32U);
if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
@@ -178,7 +179,8 @@ static const struct clk_ops cpg_z_clk_ops = {
static struct clk * __init cpg_z_clk_register(const char *name,
const char *parent_name,
void __iomem *reg,
- unsigned long mask)
+ unsigned int div,
+ unsigned int offset)
{
struct clk_init_data init;
struct cpg_z_clk *zclk;
@@ -197,7 +199,8 @@ static struct clk * __init cpg_z_clk_register(const char *name,
zclk->reg = reg + CPG_FRQCRC;
zclk->kick_reg = reg + CPG_FRQCRB;
zclk->hw.init = &init;
- zclk->mask = mask;
+ zclk->mask = GENMASK(offset + 4, offset);
+ zclk->fixed_div = div; /* PLLVCO x 1/div x SYS-CPU divider */
clk = clk_register(NULL, &zclk->hw);
if (IS_ERR(clk))
@@ -234,8 +237,6 @@ struct sd_clock {
const struct sd_div_table *div_table;
struct cpg_simple_notifier csn;
unsigned int div_num;
- unsigned int div_min;
- unsigned int div_max;
unsigned int cur_div_idx;
};
@@ -312,14 +313,20 @@ static unsigned int cpg_sd_clock_calc_div(struct sd_clock *clock,
unsigned long rate,
unsigned long parent_rate)
{
- unsigned int div;
-
- if (!rate)
- rate = 1;
-
- div = DIV_ROUND_CLOSEST(parent_rate, rate);
+ unsigned long calc_rate, diff, diff_min = ULONG_MAX;
+ unsigned int i, best_div = 0;
+
+ for (i = 0; i < clock->div_num; i++) {
+ calc_rate = DIV_ROUND_CLOSEST(parent_rate,
+ clock->div_table[i].div);
+ diff = calc_rate > rate ? calc_rate - rate : rate - calc_rate;
+ if (diff < diff_min) {
+ best_div = clock->div_table[i].div;
+ diff_min = diff;
+ }
+ }
- return clamp_t(unsigned int, div, clock->div_min, clock->div_max);
+ return best_div;
}
static long cpg_sd_clock_round_rate(struct clk_hw *hw, unsigned long rate,
@@ -369,27 +376,26 @@ static u32 cpg_quirks __initdata;
#define RCKCR_CKSEL BIT(1) /* Manual RCLK parent selection */
#define SD_SKIP_FIRST BIT(2) /* Skip first clock in SD table */
-static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
- void __iomem *base, const char *parent_name,
+static struct clk * __init cpg_sd_clk_register(const char *name,
+ void __iomem *base, unsigned int offset, const char *parent_name,
struct raw_notifier_head *notifiers)
{
struct clk_init_data init;
struct sd_clock *clock;
struct clk *clk;
- unsigned int i;
u32 val;
clock = kzalloc(sizeof(*clock), GFP_KERNEL);
if (!clock)
return ERR_PTR(-ENOMEM);
- init.name = core->name;
+ init.name = name;
init.ops = &cpg_sd_clock_ops;
init.flags = CLK_SET_RATE_PARENT;
init.parent_names = &parent_name;
init.num_parents = 1;
- clock->csn.reg = base + core->offset;
+ clock->csn.reg = base + offset;
clock->hw.init = &init;
clock->div_table = cpg_sd_div_table;
clock->div_num = ARRAY_SIZE(cpg_sd_div_table);
@@ -403,13 +409,6 @@ static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
val |= CPG_SD_STP_MASK | (clock->div_table[0].val & CPG_SD_FC_MASK);
writel(val, clock->csn.reg);
- clock->div_max = clock->div_table[0].div;
- clock->div_min = clock->div_max;
- for (i = 1; i < clock->div_num; i++) {
- clock->div_max = max(clock->div_max, clock->div_table[i].div);
- clock->div_min = min(clock->div_min, clock->div_table[i].div);
- }
-
clk = clk_register(NULL, &clock->hw);
if (IS_ERR(clk))
goto free_clock;
@@ -606,8 +605,8 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
break;
case CLK_TYPE_GEN3_SD:
- return cpg_sd_clk_register(core, base, __clk_get_name(parent),
- notifiers);
+ return cpg_sd_clk_register(core->name, base, core->offset,
+ __clk_get_name(parent), notifiers);
case CLK_TYPE_GEN3_R:
if (cpg_quirks & RCKCR_CKSEL) {
@@ -658,11 +657,7 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
case CLK_TYPE_GEN3_Z:
return cpg_z_clk_register(core->name, __clk_get_name(parent),
- base, CPG_FRQCRC_ZFC_MASK);
-
- case CLK_TYPE_GEN3_Z2:
- return cpg_z_clk_register(core->name, __clk_get_name(parent),
- base, CPG_FRQCRC_Z2FC_MASK);
+ base, core->div, core->offset);
case CLK_TYPE_GEN3_OSC:
/*
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h
index eac1b057455a..c4ac80cac6a0 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.h
+++ b/drivers/clk/renesas/rcar-gen3-cpg.h
@@ -1,8 +1,9 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* R-Car Gen3 Clock Pulse Generator
*
* Copyright (C) 2015-2018 Glider bvba
+ * Copyright (C) 2018 Renesas Electronics Corp.
*
*/
@@ -20,7 +21,6 @@ enum rcar_gen3_clk_types {
CLK_TYPE_GEN3_R,
CLK_TYPE_GEN3_MDSEL, /* Select parent/divider using mode pin */
CLK_TYPE_GEN3_Z,
- CLK_TYPE_GEN3_Z2,
CLK_TYPE_GEN3_OSC, /* OSC EXTAL predivider and fixed divider */
CLK_TYPE_GEN3_RCKSEL, /* Select parent/divider using RCKCR.CKSEL */
CLK_TYPE_GEN3_RPCSRC,
@@ -51,6 +51,9 @@ enum rcar_gen3_clk_types {
DEF_BASE(_name, _id, CLK_TYPE_GEN3_RCKSEL, \
(_parent0) << 16 | (_parent1), .div = (_div0) << 16 | (_div1))
+#define DEF_GEN3_Z(_name, _id, _type, _parent, _div, _offset) \
+ DEF_BASE(_name, _id, _type, _parent, .div = _div, .offset = _offset)
+
struct rcar_gen3_cpg_pll_config {
u8 extal_div;
u8 pll1_mult;
diff --git a/drivers/clk/renesas/rcar-usb2-clock-sel.c b/drivers/clk/renesas/rcar-usb2-clock-sel.c
index b241f9ca3d71..cc90b11a9c25 100644
--- a/drivers/clk/renesas/rcar-usb2-clock-sel.c
+++ b/drivers/clk/renesas/rcar-usb2-clock-sel.c
@@ -13,6 +13,7 @@
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 30df0dc853f0..0201809bbd37 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index c4ec9df146fd..4ddcdf3bfb95 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
* Renesas Clock Pulse Generator / Module Standby and Software Reset
*
* Copyright (C) 2015 Glider bvba
diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c
index ebce5260068b..09ede6920593 100644
--- a/drivers/clk/rockchip/clk-ddr.c
+++ b/drivers/clk/rockchip/clk-ddr.c
@@ -82,7 +82,7 @@ static u8 rockchip_ddrclk_get_parent(struct clk_hw *hw)
struct rockchip_ddrclk *ddrclk = to_rockchip_ddrclk_hw(hw);
u32 val;
- val = clk_readl(ddrclk->reg_base +
+ val = readl(ddrclk->reg_base +
ddrclk->mux_offset) >> ddrclk->mux_shift;
val &= GENMASK(ddrclk->mux_width - 1, 0);
diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
index b8da6e799423..ba9f00dc9740 100644
--- a/drivers/clk/rockchip/clk-half-divider.c
+++ b/drivers/clk/rockchip/clk-half-divider.c
@@ -3,8 +3,9 @@
* Copyright (c) 2018 Fuzhou Rockchip Electronics Co., Ltd
*/
-#include <linux/slab.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/slab.h>
#include "clk.h"
#define div_mask(width) ((1 << (width)) - 1)
@@ -24,7 +25,7 @@ static unsigned long clk_half_divider_recalc_rate(struct clk_hw *hw,
struct clk_divider *divider = to_clk_divider(hw);
unsigned int val;
- val = clk_readl(divider->reg) >> divider->shift;
+ val = readl(divider->reg) >> divider->shift;
val &= div_mask(divider->width);
val = val * 2 + 3;
@@ -124,11 +125,11 @@ static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate,
if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
val = div_mask(divider->width) << (divider->shift + 16);
} else {
- val = clk_readl(divider->reg);
+ val = readl(divider->reg);
val &= ~(div_mask(divider->width) << divider->shift);
}
val |= value << divider->shift;
- clk_writel(val, divider->reg);
+ writel(val, divider->reg);
if (divider->lock)
spin_unlock_irqrestore(divider->lock, flags);
diff --git a/drivers/clk/rockchip/clk-px30.c b/drivers/clk/rockchip/clk-px30.c
index 601a77f1af78..68d23be18cbc 100644
--- a/drivers/clk/rockchip/clk-px30.c
+++ b/drivers/clk/rockchip/clk-px30.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index c3001980dbdc..3bf919b6c6e3 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -17,6 +17,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk-rk3128.c b/drivers/clk/rockchip/clk-rk3128.c
index 5970a50671b9..8278a54db343 100644
--- a/drivers/clk/rockchip/clk-rk3128.c
+++ b/drivers/clk/rockchip/clk-rk3128.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index 5ecf28854876..378420b8835a 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <dt-bindings/clock/rk3188-cru-common.h>
diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
index 7af48184b022..7176003b5c7c 100644
--- a/drivers/clk/rockchip/clk-rk3228.c
+++ b/drivers/clk/rockchip/clk-rk3228.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 5a67b7869960..85907f31c63f 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
@@ -200,8 +201,8 @@ PNAME(mux_aclk_cpu_src_p) = { "cpll_aclk_cpu", "gpll_aclk_cpu" };
PNAME(mux_pll_src_cpll_gpll_p) = { "cpll", "gpll" };
PNAME(mux_pll_src_npll_cpll_gpll_p) = { "npll", "cpll", "gpll" };
PNAME(mux_pll_src_cpll_gpll_npll_p) = { "cpll", "gpll", "npll" };
-PNAME(mux_pll_src_cpll_gpll_usb480m_p) = { "cpll", "gpll", "usbphy480m_src" };
-PNAME(mux_pll_src_cpll_gll_usb_npll_p) = { "cpll", "gpll", "usbphy480m_src", "npll" };
+PNAME(mux_pll_src_cpll_gpll_usb480m_p) = { "cpll", "gpll", "unstable:usbphy480m_src" };
+PNAME(mux_pll_src_cpll_gll_usb_npll_p) = { "cpll", "gpll", "unstable:usbphy480m_src", "npll" };
PNAME(mux_mmc_src_p) = { "cpll", "gpll", "xin24m", "xin24m" };
PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" };
@@ -219,7 +220,7 @@ PNAME(mux_hsadcout_p) = { "hsadc_src", "ext_hsadc" };
PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" };
PNAME(mux_tspout_p) = { "cpll", "gpll", "npll", "xin27m" };
-PNAME(mux_aclk_vcodec_pre_p) = { "aclk_vepu", "aclk_vdpu" };
+PNAME(mux_aclk_vcodec_pre_p) = { "aclk_vdpu", "aclk_vepu" };
PNAME(mux_usbphy480m_p) = { "sclk_otgphy1_480m", "sclk_otgphy2_480m",
"sclk_otgphy0_480m" };
PNAME(mux_hsicphy480m_p) = { "cpll", "gpll", "usbphy480m_src" };
@@ -313,13 +314,13 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "aclk_core_mp", "armclk", CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(0), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3288_CLKGATE_CON(12), 6, GFLAGS),
- COMPOSITE_NOMUX(0, "atclk", "armclk", CLK_IGNORE_UNUSED,
+ COMPOSITE_NOMUX(0, "atclk", "armclk", 0,
RK3288_CLKSEL_CON(37), 4, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3288_CLKGATE_CON(12), 7, GFLAGS),
COMPOSITE_NOMUX(0, "pclk_dbg_pre", "armclk", CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(37), 9, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3288_CLKGATE_CON(12), 8, GFLAGS),
- GATE(0, "pclk_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
+ GATE(0, "pclk_dbg", "pclk_dbg_pre", 0,
RK3288_CLKGATE_CON(12), 9, GFLAGS),
GATE(0, "cs_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(12), 10, GFLAGS),
@@ -420,7 +421,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb480m_p, 0,
RK3288_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS,
RK3288_CLKGATE_CON(3), 11, GFLAGS),
- MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, 0,
+ MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, CLK_SET_RATE_PARENT,
RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS),
GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 0,
RK3288_CLKGATE_CON(9), 0, GFLAGS),
@@ -647,7 +648,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
INVERTER(SCLK_HSADC, "sclk_hsadc", "sclk_hsadc_out",
RK3288_CLKSEL_CON(22), 7, IFLAGS),
- GATE(0, "jtag", "ext_jtag", CLK_IGNORE_UNUSED,
+ GATE(0, "jtag", "ext_jtag", 0,
RK3288_CLKGATE_CON(4), 14, GFLAGS),
COMPOSITE_NODIV(SCLK_USBPHY480M_SRC, "usbphy480m_src", mux_usbphy480m_p, 0,
@@ -656,7 +657,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
RK3288_CLKSEL_CON(29), 0, 2, MFLAGS,
RK3288_CLKGATE_CON(3), 6, GFLAGS),
- GATE(0, "hsicphy12m_xin12m", "xin12m", CLK_IGNORE_UNUSED,
+ GATE(0, "hsicphy12m_xin12m", "xin12m", 0,
RK3288_CLKGATE_CON(13), 9, GFLAGS),
DIV(0, "hsicphy12m_usbphy", "sclk_hsicphy480m", 0,
RK3288_CLKSEL_CON(11), 8, 6, DFLAGS),
@@ -697,7 +698,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
GATE(PCLK_TZPC, "pclk_tzpc", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 3, GFLAGS),
GATE(PCLK_UART2, "pclk_uart2", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 9, GFLAGS),
GATE(PCLK_EFUSE256, "pclk_efuse_256", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 10, GFLAGS),
- GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 11, GFLAGS),
+ GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 11, GFLAGS),
/* ddrctrl [DDR Controller PHY clock] gates */
GATE(0, "nclk_ddrupctl0", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 4, GFLAGS),
@@ -837,12 +838,9 @@ static const char *const rk3288_critical_clocks[] __initconst = {
"pclk_alive_niu",
"pclk_pd_pmu",
"pclk_pmu_niu",
- "pclk_core_niu",
- "pclk_ddrupctl0",
- "pclk_publ0",
- "pclk_ddrupctl1",
- "pclk_publ1",
"pmu_hclk_otg0",
+ /* pwm-regulators on some boards, so handoff-critical later */
+ "pclk_rkpwm",
};
static void __iomem *rk3288_cru_base;
@@ -859,6 +857,9 @@ static const int rk3288_saved_cru_reg_ids[] = {
RK3288_CLKSEL_CON(10),
RK3288_CLKSEL_CON(33),
RK3288_CLKSEL_CON(37),
+
+ /* We turn aclk_dmac1 on for suspend; this will restore it */
+ RK3288_CLKGATE_CON(10),
};
static u32 rk3288_saved_cru_regs[ARRAY_SIZE(rk3288_saved_cru_reg_ids)];
@@ -875,6 +876,14 @@ static int rk3288_clk_suspend(void)
}
/*
+ * Going into deep sleep (specifically setting PMU_CLR_DMA in
+ * RK3288_PMU_PWRMODE_CON1) appears to fail unless
+ * "aclk_dmac1" is on.
+ */
+ writel_relaxed(1 << (12 + 16),
+ rk3288_cru_base + RK3288_CLKGATE_CON(10));
+
+ /*
* Switch PLLs other than DPLL (for SDRAM) to slow mode to
* avoid crashes on resume. The Mask ROM on the system will
* put APLL, CPLL, and GPLL into slow mode at resume time
diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
index 65ab5c2f48b0..9b03c1abf19c 100644
--- a/drivers/clk/rockchip/clk-rk3328.c
+++ b/drivers/clk/rockchip/clk-rk3328.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
@@ -458,7 +459,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKSEL_CON(35), 15, 1, MFLAGS, 8, 7, DFLAGS,
RK3328_CLKGATE_CON(2), 12, GFLAGS),
COMPOSITE(SCLK_CRYPTO, "clk_crypto", mux_2plls_p, 0,
- RK3328_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3328_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 5, DFLAGS,
RK3328_CLKGATE_CON(2), 4, GFLAGS),
COMPOSITE_NOMUX(SCLK_TSADC, "clk_tsadc", "clk_24m", 0,
RK3328_CLKSEL_CON(22), 0, 10, DFLAGS,
@@ -550,15 +551,15 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
GATE(0, "hclk_rkvenc_niu", "hclk_rkvenc", 0,
RK3328_CLKGATE_CON(25), 1, GFLAGS),
GATE(ACLK_H265, "aclk_h265", "aclk_rkvenc", 0,
- RK3328_CLKGATE_CON(25), 0, GFLAGS),
+ RK3328_CLKGATE_CON(25), 2, GFLAGS),
GATE(PCLK_H265, "pclk_h265", "hclk_rkvenc", 0,
- RK3328_CLKGATE_CON(25), 1, GFLAGS),
+ RK3328_CLKGATE_CON(25), 3, GFLAGS),
GATE(ACLK_H264, "aclk_h264", "aclk_rkvenc", 0,
- RK3328_CLKGATE_CON(25), 0, GFLAGS),
+ RK3328_CLKGATE_CON(25), 4, GFLAGS),
GATE(HCLK_H264, "hclk_h264", "hclk_rkvenc", 0,
- RK3328_CLKGATE_CON(25), 1, GFLAGS),
+ RK3328_CLKGATE_CON(25), 5, GFLAGS),
GATE(ACLK_AXISRAM, "aclk_axisram", "aclk_rkvenc", CLK_IGNORE_UNUSED,
- RK3328_CLKGATE_CON(25), 0, GFLAGS),
+ RK3328_CLKGATE_CON(25), 6, GFLAGS),
COMPOSITE(SCLK_VENC_CORE, "sclk_venc_core", mux_4plls_p, 0,
RK3328_CLKSEL_CON(51), 14, 2, MFLAGS, 8, 5, DFLAGS,
@@ -663,7 +664,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
/* PD_GMAC */
COMPOSITE(ACLK_GMAC, "aclk_gmac", mux_2plls_hdmiphy_p, 0,
- RK3328_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3328_CLKSEL_CON(25), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK3328_CLKGATE_CON(3), 2, GFLAGS),
COMPOSITE_NOMUX(PCLK_GMAC, "pclk_gmac", "aclk_gmac", 0,
RK3328_CLKSEL_CON(25), 8, 3, DFLAGS,
@@ -733,7 +734,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
/* PD_PERI */
GATE(0, "aclk_peri_noc", "aclk_peri", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(19), 11, GFLAGS),
- GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri", 0, RK3328_CLKGATE_CON(19), 4, GFLAGS),
+ GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri", 0, RK3328_CLKGATE_CON(19), 14, GFLAGS),
GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 0, GFLAGS),
GATE(HCLK_SDIO, "hclk_sdio", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 1, GFLAGS),
@@ -913,7 +914,7 @@ static void __init rk3328_clk_init(struct device_node *np)
&rk3328_cpuclk_data, rk3328_cpuclk_rates,
ARRAY_SIZE(rk3328_cpuclk_rates));
- rockchip_register_softrst(np, 11, reg_base + RK3328_SOFTRST_CON(0),
+ rockchip_register_softrst(np, 12, reg_base + RK3328_SOFTRST_CON(0),
ROCKCHIP_SOFTRST_HIWORD_MASK);
rockchip_register_restart_notifier(ctx, RK3328_GLB_SRST_FST, NULL);
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index 7c4d242f19c1..d239bbc2fc77 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -13,6 +13,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index 5a628148f3f0..2322d712ba88 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/rockchip/clk-rv1108.c b/drivers/clk/rockchip/clk-rv1108.c
index 089cb17925e5..6c051aa04e59 100644
--- a/drivers/clk/rockchip/clk-rv1108.c
+++ b/drivers/clk/rockchip/clk-rv1108.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index c3ad92965823..d5fac5a8a3d7 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/reboot.h>
@@ -46,7 +47,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
const char *const *parent_names, u8 num_parents,
void __iomem *base,
int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
- u8 div_shift, u8 div_width, u8 div_flags,
+ int div_offset, u8 div_shift, u8 div_width, u8 div_flags,
struct clk_div_table *div_table, int gate_offset,
u8 gate_shift, u8 gate_flags, unsigned long flags,
spinlock_t *lock)
@@ -95,7 +96,10 @@ static struct clk *rockchip_clk_register_branch(const char *name,
}
div->flags = div_flags;
- div->reg = base + muxdiv_offset;
+ if (div_offset)
+ div->reg = base + div_offset;
+ else
+ div->reg = base + muxdiv_offset;
div->shift = div_shift;
div->width = div_width;
div->lock = lock;
@@ -516,7 +520,7 @@ void __init rockchip_clk_register_branches(
ctx->reg_base, list->muxdiv_offset,
list->mux_shift,
list->mux_width, list->mux_flags,
- list->div_shift, list->div_width,
+ list->div_offset, list->div_shift, list->div_width,
list->div_flags, list->div_table,
list->gate_offset, list->gate_shift,
list->gate_flags, flags, &ctx->lock);
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index 6b53fff4cc96..1b5270755431 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -407,6 +407,7 @@ struct rockchip_clk_branch {
u8 mux_shift;
u8 mux_width;
u8 mux_flags;
+ int div_offset;
u8 div_shift;
u8 div_width;
u8 div_flags;
@@ -438,6 +439,28 @@ struct rockchip_clk_branch {
.gate_flags = gf, \
}
+#define COMPOSITE_DIV_OFFSET(_id, cname, pnames, f, mo, ms, mw, \
+ mf, do, ds, dw, df, go, gs, gf) \
+ { \
+ .id = _id, \
+ .branch_type = branch_composite, \
+ .name = cname, \
+ .parent_names = pnames, \
+ .num_parents = ARRAY_SIZE(pnames), \
+ .flags = f, \
+ .muxdiv_offset = mo, \
+ .mux_shift = ms, \
+ .mux_width = mw, \
+ .mux_flags = mf, \
+ .div_offset = do, \
+ .div_shift = ds, \
+ .div_width = dw, \
+ .div_flags = df, \
+ .gate_offset = go, \
+ .gate_shift = gs, \
+ .gate_flags = gf, \
+ }
+
#define COMPOSITE_NOMUX(_id, cname, pname, f, mo, ds, dw, df, \
go, gs, gf) \
{ \
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index a5fddebbe530..3f80bcd46074 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -33,6 +33,7 @@
*/
#include <linux/errno.h>
+#include <linux/io.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 9c95390d2d77..ce41f36a0e29 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index 0e9a41a4cac8..facaad3c56a1 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 54066e6508d3..d2a68a792a21 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c
index 8ae44b5db4c2..91db7894125d 100644
--- a/drivers/clk/samsung/clk-exynos5-subcmu.c
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.c
@@ -4,6 +4,7 @@
// Author: Marek Szyprowski <m.szyprowski@samsung.com>
// Common Clock Framework support for Exynos5 power-domain dependent clocks
+#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index f14139bcb0c1..c8265c4cbc4f 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -12,6 +12,7 @@
#include <dt-bindings/clock/exynos5250.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-exynos5410.c b/drivers/clk/samsung/clk-exynos5410.c
index 0a0b09591e6f..b2da2c8fa0c7 100644
--- a/drivers/clk/samsung/clk-exynos5410.c
+++ b/drivers/clk/samsung/clk-exynos5410.c
@@ -209,6 +209,7 @@ static const struct samsung_gate_clock exynos5410_gate_clks[] __initconst = {
GATE(CLK_USI1, "usi1", "aclk66", GATE_IP_PERIC, 11, 0, 0),
GATE(CLK_USI2, "usi2", "aclk66", GATE_IP_PERIC, 12, 0, 0),
GATE(CLK_USI3, "usi3", "aclk66", GATE_IP_PERIC, 13, 0, 0),
+ GATE(CLK_TSADC, "tsadc", "aclk66", GATE_IP_PERIC, 15, 0, 0),
GATE(CLK_PWM, "pwm", "aclk66", GATE_IP_PERIC, 24, 0, 0),
GATE(CLK_SCLK_UART0, "sclk_uart0", "div_uart0",
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 1c4c7a3039f1..0c6782ceac48 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -13,7 +13,8 @@
#include <linux/hrtimer.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "clk.h"
#include "clk-pll.h"
diff --git a/drivers/clk/samsung/clk-s3c2410-dclk.c b/drivers/clk/samsung/clk-s3c2410-dclk.c
index 82f8ae22fd34..0117e40c1d0a 100644
--- a/drivers/clk/samsung/clk-s3c2410-dclk.c
+++ b/drivers/clk/samsung/clk-s3c2410-dclk.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include "clk.h"
diff --git a/drivers/clk/samsung/clk-s3c2412.c b/drivers/clk/samsung/clk-s3c2412.c
index dd1159050a5a..ce21b89d1eb1 100644
--- a/drivers/clk/samsung/clk-s3c2412.c
+++ b/drivers/clk/samsung/clk-s3c2412.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/reboot.h>
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
index f38f0e24e3b6..b2ea4dfb5b8c 100644
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ b/drivers/clk/samsung/clk-s3c2443.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/reboot.h>
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index 1f6e47cd327d..9ad546a5f74c 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -15,6 +15,7 @@
#include <linux/clkdev.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/sifive/Kconfig b/drivers/clk/sifive/Kconfig
new file mode 100644
index 000000000000..8db4a3eb4782
--- /dev/null
+++ b/drivers/clk/sifive/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menuconfig CLK_SIFIVE
+ bool "SiFive SoC driver support"
+ help
+ SoC drivers for SiFive Linux-capable SoCs.
+
+if CLK_SIFIVE
+
+config CLK_SIFIVE_FU540_PRCI
+ bool "PRCI driver for SiFive FU540 SoCs"
+ select CLK_ANALOGBITS_WRPLL_CLN28HPC
+ help
+ Supports the Power Reset Clock interface (PRCI) IP block found in
+ FU540 SoCs. If this kernel is meant to run on a SiFive FU540 SoC,
+ enable this driver.
+
+endif
diff --git a/drivers/clk/sifive/Makefile b/drivers/clk/sifive/Makefile
new file mode 100644
index 000000000000..74d58a4c0756
--- /dev/null
+++ b/drivers/clk/sifive/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CLK_SIFIVE_FU540_PRCI) += fu540-prci.o
diff --git a/drivers/clk/sifive/fu540-prci.c b/drivers/clk/sifive/fu540-prci.c
new file mode 100644
index 000000000000..6282ee2f361c
--- /dev/null
+++ b/drivers/clk/sifive/fu540-prci.c
@@ -0,0 +1,627 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2019 SiFive, Inc.
+ * Wesley Terpstra
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * The FU540 PRCI implements clock and reset control for the SiFive
+ * FU540-C000 chip. This driver assumes that it has sole control
+ * over all PRCI resources.
+ *
+ * This driver is based on the PRCI driver written by Wesley Terpstra:
+ * https://github.com/riscv/riscv-linux/commit/999529edf517ed75b56659d456d221b2ee56bb60
+ *
+ * References:
+ * - SiFive FU540-C000 manual v1p0, Chapter 7 "Clocking and Reset"
+ */
+
+#include <dt-bindings/clock/sifive-fu540-prci.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/analogbits-wrpll-cln28hpc.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_clk.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/*
+ * EXPECTED_CLK_PARENT_COUNT: how many parent clocks this driver expects:
+ * hfclk and rtcclk
+ */
+#define EXPECTED_CLK_PARENT_COUNT 2
+
+/*
+ * Register offsets and bitmasks
+ */
+
+/* COREPLLCFG0 */
+#define PRCI_COREPLLCFG0_OFFSET 0x4
+# define PRCI_COREPLLCFG0_DIVR_SHIFT 0
+# define PRCI_COREPLLCFG0_DIVR_MASK (0x3f << PRCI_COREPLLCFG0_DIVR_SHIFT)
+# define PRCI_COREPLLCFG0_DIVF_SHIFT 6
+# define PRCI_COREPLLCFG0_DIVF_MASK (0x1ff << PRCI_COREPLLCFG0_DIVF_SHIFT)
+# define PRCI_COREPLLCFG0_DIVQ_SHIFT 15
+# define PRCI_COREPLLCFG0_DIVQ_MASK (0x7 << PRCI_COREPLLCFG0_DIVQ_SHIFT)
+# define PRCI_COREPLLCFG0_RANGE_SHIFT 18
+# define PRCI_COREPLLCFG0_RANGE_MASK (0x7 << PRCI_COREPLLCFG0_RANGE_SHIFT)
+# define PRCI_COREPLLCFG0_BYPASS_SHIFT 24
+# define PRCI_COREPLLCFG0_BYPASS_MASK (0x1 << PRCI_COREPLLCFG0_BYPASS_SHIFT)
+# define PRCI_COREPLLCFG0_FSE_SHIFT 25
+# define PRCI_COREPLLCFG0_FSE_MASK (0x1 << PRCI_COREPLLCFG0_FSE_SHIFT)
+# define PRCI_COREPLLCFG0_LOCK_SHIFT 31
+# define PRCI_COREPLLCFG0_LOCK_MASK (0x1 << PRCI_COREPLLCFG0_LOCK_SHIFT)
+
+/* DDRPLLCFG0 */
+#define PRCI_DDRPLLCFG0_OFFSET 0xc
+# define PRCI_DDRPLLCFG0_DIVR_SHIFT 0
+# define PRCI_DDRPLLCFG0_DIVR_MASK (0x3f << PRCI_DDRPLLCFG0_DIVR_SHIFT)
+# define PRCI_DDRPLLCFG0_DIVF_SHIFT 6
+# define PRCI_DDRPLLCFG0_DIVF_MASK (0x1ff << PRCI_DDRPLLCFG0_DIVF_SHIFT)
+# define PRCI_DDRPLLCFG0_DIVQ_SHIFT 15
+# define PRCI_DDRPLLCFG0_DIVQ_MASK (0x7 << PRCI_DDRPLLCFG0_DIVQ_SHIFT)
+# define PRCI_DDRPLLCFG0_RANGE_SHIFT 18
+# define PRCI_DDRPLLCFG0_RANGE_MASK (0x7 << PRCI_DDRPLLCFG0_RANGE_SHIFT)
+# define PRCI_DDRPLLCFG0_BYPASS_SHIFT 24
+# define PRCI_DDRPLLCFG0_BYPASS_MASK (0x1 << PRCI_DDRPLLCFG0_BYPASS_SHIFT)
+# define PRCI_DDRPLLCFG0_FSE_SHIFT 25
+# define PRCI_DDRPLLCFG0_FSE_MASK (0x1 << PRCI_DDRPLLCFG0_FSE_SHIFT)
+# define PRCI_DDRPLLCFG0_LOCK_SHIFT 31
+# define PRCI_DDRPLLCFG0_LOCK_MASK (0x1 << PRCI_DDRPLLCFG0_LOCK_SHIFT)
+
+/* DDRPLLCFG1 */
+#define PRCI_DDRPLLCFG1_OFFSET 0x10
+# define PRCI_DDRPLLCFG1_CKE_SHIFT 24
+# define PRCI_DDRPLLCFG1_CKE_MASK (0x1 << PRCI_DDRPLLCFG1_CKE_SHIFT)
+
+/* GEMGXLPLLCFG0 */
+#define PRCI_GEMGXLPLLCFG0_OFFSET 0x1c
+# define PRCI_GEMGXLPLLCFG0_DIVR_SHIFT 0
+# define PRCI_GEMGXLPLLCFG0_DIVR_MASK (0x3f << PRCI_GEMGXLPLLCFG0_DIVR_SHIFT)
+# define PRCI_GEMGXLPLLCFG0_DIVF_SHIFT 6
+# define PRCI_GEMGXLPLLCFG0_DIVF_MASK (0x1ff << PRCI_GEMGXLPLLCFG0_DIVF_SHIFT)
+# define PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT 15
+# define PRCI_GEMGXLPLLCFG0_DIVQ_MASK (0x7 << PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT)
+# define PRCI_GEMGXLPLLCFG0_RANGE_SHIFT 18
+# define PRCI_GEMGXLPLLCFG0_RANGE_MASK (0x7 << PRCI_GEMGXLPLLCFG0_RANGE_SHIFT)
+# define PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT 24
+# define PRCI_GEMGXLPLLCFG0_BYPASS_MASK (0x1 << PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT)
+# define PRCI_GEMGXLPLLCFG0_FSE_SHIFT 25
+# define PRCI_GEMGXLPLLCFG0_FSE_MASK (0x1 << PRCI_GEMGXLPLLCFG0_FSE_SHIFT)
+# define PRCI_GEMGXLPLLCFG0_LOCK_SHIFT 31
+# define PRCI_GEMGXLPLLCFG0_LOCK_MASK (0x1 << PRCI_GEMGXLPLLCFG0_LOCK_SHIFT)
+
+/* GEMGXLPLLCFG1 */
+#define PRCI_GEMGXLPLLCFG1_OFFSET 0x20
+# define PRCI_GEMGXLPLLCFG1_CKE_SHIFT 24
+# define PRCI_GEMGXLPLLCFG1_CKE_MASK (0x1 << PRCI_GEMGXLPLLCFG1_CKE_SHIFT)
+
+/* CORECLKSEL */
+#define PRCI_CORECLKSEL_OFFSET 0x24
+# define PRCI_CORECLKSEL_CORECLKSEL_SHIFT 0
+# define PRCI_CORECLKSEL_CORECLKSEL_MASK (0x1 << PRCI_CORECLKSEL_CORECLKSEL_SHIFT)
+
+/* DEVICESRESETREG */
+#define PRCI_DEVICESRESETREG_OFFSET 0x28
+# define PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_SHIFT 0
+# define PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_SHIFT)
+# define PRCI_DEVICESRESETREG_DDR_AXI_RST_N_SHIFT 1
+# define PRCI_DEVICESRESETREG_DDR_AXI_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_DDR_AXI_RST_N_SHIFT)
+# define PRCI_DEVICESRESETREG_DDR_AHB_RST_N_SHIFT 2
+# define PRCI_DEVICESRESETREG_DDR_AHB_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_DDR_AHB_RST_N_SHIFT)
+# define PRCI_DEVICESRESETREG_DDR_PHY_RST_N_SHIFT 3
+# define PRCI_DEVICESRESETREG_DDR_PHY_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_DDR_PHY_RST_N_SHIFT)
+# define PRCI_DEVICESRESETREG_GEMGXL_RST_N_SHIFT 5
+# define PRCI_DEVICESRESETREG_GEMGXL_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_GEMGXL_RST_N_SHIFT)
+
+/* CLKMUXSTATUSREG */
+#define PRCI_CLKMUXSTATUSREG_OFFSET 0x2c
+# define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT 1
+# define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK (0x1 << PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT)
+
+/*
+ * Private structures
+ */
+
+/**
+ * struct __prci_data - per-device-instance data
+ * @va: base virtual address of the PRCI IP block
+ * @hw_clks: encapsulates struct clk_hw records
+ *
+ * PRCI per-device instance data
+ */
+struct __prci_data {
+ void __iomem *va;
+ struct clk_hw_onecell_data hw_clks;
+};
+
+/**
+ * struct __prci_wrpll_data - WRPLL configuration and integration data
+ * @c: WRPLL current configuration record
+ * @enable_bypass: fn ptr to code to bypass the WRPLL (if applicable; else NULL)
+ * @disable_bypass: fn ptr to code to not bypass the WRPLL (or NULL)
+ * @cfg0_offs: WRPLL CFG0 register offset (in bytes) from the PRCI base address
+ *
+ * @enable_bypass and @disable_bypass are used for WRPLL instances
+ * that contain a separate external glitchless clock mux downstream
+ * from the PLL. The WRPLL internal bypass mux is not glitchless.
+ */
+struct __prci_wrpll_data {
+ struct wrpll_cfg c;
+ void (*enable_bypass)(struct __prci_data *pd);
+ void (*disable_bypass)(struct __prci_data *pd);
+ u8 cfg0_offs;
+};
+
+/**
+ * struct __prci_clock - describes a clock device managed by PRCI
+ * @name: user-readable clock name string - should match the manual
+ * @parent_name: parent name for this clock
+ * @ops: struct clk_ops for the Linux clock framework to use for control
+ * @hw: Linux-private clock data
+ * @pwd: WRPLL-specific data, associated with this clock (if not NULL)
+ * @pd: PRCI-specific data associated with this clock (if not NULL)
+ *
+ * PRCI clock data. Used by the PRCI driver to register PRCI-provided
+ * clocks to the Linux clock infrastructure.
+ */
+struct __prci_clock {
+ const char *name;
+ const char *parent_name;
+ const struct clk_ops *ops;
+ struct clk_hw hw;
+ struct __prci_wrpll_data *pwd;
+ struct __prci_data *pd;
+};
+
+#define clk_hw_to_prci_clock(pwd) container_of(pwd, struct __prci_clock, hw)
+
+/*
+ * Private functions
+ */
+
+/**
+ * __prci_readl() - read from a PRCI register
+ * @pd: PRCI context
+ * @offs: register offset to read from (in bytes, from PRCI base address)
+ *
+ * Read the register located at offset @offs from the base virtual
+ * address of the PRCI register target described by @pd, and return
+ * the value to the caller.
+ *
+ * Context: Any context.
+ *
+ * Return: the contents of the register described by @pd and @offs.
+ */
+static u32 __prci_readl(struct __prci_data *pd, u32 offs)
+{
+ return readl_relaxed(pd->va + offs);
+}
+
+static void __prci_writel(u32 v, u32 offs, struct __prci_data *pd)
+{
+ writel_relaxed(v, pd->va + offs);
+}
+
+/* WRPLL-related private functions */
+
+/**
+ * __prci_wrpll_unpack() - unpack WRPLL configuration registers into parameters
+ * @c: ptr to a struct wrpll_cfg record to write config into
+ * @r: value read from the PRCI PLL configuration register
+ *
+ * Given a value @r read from an FU540 PRCI PLL configuration register,
+ * split it into fields and populate it into the WRPLL configuration record
+ * pointed to by @c.
+ *
+ * The COREPLLCFG0 macros are used below, but the other *PLLCFG0 macros
+ * have the same register layout.
+ *
+ * Context: Any context.
+ */
+static void __prci_wrpll_unpack(struct wrpll_cfg *c, u32 r)
+{
+ u32 v;
+
+ v = r & PRCI_COREPLLCFG0_DIVR_MASK;
+ v >>= PRCI_COREPLLCFG0_DIVR_SHIFT;
+ c->divr = v;
+
+ v = r & PRCI_COREPLLCFG0_DIVF_MASK;
+ v >>= PRCI_COREPLLCFG0_DIVF_SHIFT;
+ c->divf = v;
+
+ v = r & PRCI_COREPLLCFG0_DIVQ_MASK;
+ v >>= PRCI_COREPLLCFG0_DIVQ_SHIFT;
+ c->divq = v;
+
+ v = r & PRCI_COREPLLCFG0_RANGE_MASK;
+ v >>= PRCI_COREPLLCFG0_RANGE_SHIFT;
+ c->range = v;
+
+ c->flags &= (WRPLL_FLAGS_INT_FEEDBACK_MASK |
+ WRPLL_FLAGS_EXT_FEEDBACK_MASK);
+
+ /* external feedback mode not supported */
+ c->flags |= WRPLL_FLAGS_INT_FEEDBACK_MASK;
+}
+
+/**
+ * __prci_wrpll_pack() - pack PLL configuration parameters into a register value
+ * @c: pointer to a struct wrpll_cfg record containing the PLL's cfg
+ *
+ * Using a set of WRPLL configuration values pointed to by @c,
+ * assemble a PRCI PLL configuration register value, and return it to
+ * the caller.
+ *
+ * Context: Any context. Caller must ensure that the contents of the
+ * record pointed to by @c do not change during the execution
+ * of this function.
+ *
+ * Returns: a value suitable for writing into a PRCI PLL configuration
+ * register
+ */
+static u32 __prci_wrpll_pack(const struct wrpll_cfg *c)
+{
+ u32 r = 0;
+
+ r |= c->divr << PRCI_COREPLLCFG0_DIVR_SHIFT;
+ r |= c->divf << PRCI_COREPLLCFG0_DIVF_SHIFT;
+ r |= c->divq << PRCI_COREPLLCFG0_DIVQ_SHIFT;
+ r |= c->range << PRCI_COREPLLCFG0_RANGE_SHIFT;
+
+ /* external feedback mode not supported */
+ r |= PRCI_COREPLLCFG0_FSE_MASK;
+
+ return r;
+}
+
+/**
+ * __prci_wrpll_read_cfg() - read the WRPLL configuration from the PRCI
+ * @pd: PRCI context
+ * @pwd: PRCI WRPLL metadata
+ *
+ * Read the current configuration of the PLL identified by @pwd from
+ * the PRCI identified by @pd, and store it into the local configuration
+ * cache in @pwd.
+ *
+ * Context: Any context. Caller must prevent the records pointed to by
+ * @pd and @pwd from changing during execution.
+ */
+static void __prci_wrpll_read_cfg(struct __prci_data *pd,
+ struct __prci_wrpll_data *pwd)
+{
+ __prci_wrpll_unpack(&pwd->c, __prci_readl(pd, pwd->cfg0_offs));
+}
+
+/**
+ * __prci_wrpll_write_cfg() - write WRPLL configuration into the PRCI
+ * @pd: PRCI context
+ * @pwd: PRCI WRPLL metadata
+ * @c: WRPLL configuration record to write
+ *
+ * Write the WRPLL configuration described by @c into the WRPLL
+ * configuration register identified by @pwd in the PRCI instance
+ * described by @c. Make a cached copy of the WRPLL's current
+ * configuration so it can be used by other code.
+ *
+ * Context: Any context. Caller must prevent the records pointed to by
+ * @pd and @pwd from changing during execution.
+ */
+static void __prci_wrpll_write_cfg(struct __prci_data *pd,
+ struct __prci_wrpll_data *pwd,
+ struct wrpll_cfg *c)
+{
+ __prci_writel(__prci_wrpll_pack(c), pwd->cfg0_offs, pd);
+
+ memcpy(&pwd->c, c, sizeof(*c));
+}
+
+/* Core clock mux control */
+
+/**
+ * __prci_coreclksel_use_hfclk() - switch the CORECLK mux to output HFCLK
+ * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
+ *
+ * Switch the CORECLK mux to the HFCLK input source; return once complete.
+ *
+ * Context: Any context. Caller must prevent concurrent changes to the
+ * PRCI_CORECLKSEL_OFFSET register.
+ */
+static void __prci_coreclksel_use_hfclk(struct __prci_data *pd)
+{
+ u32 r;
+
+ r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
+ r |= PRCI_CORECLKSEL_CORECLKSEL_MASK;
+ __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
+
+ r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
+}
+
+/**
+ * __prci_coreclksel_use_corepll() - switch the CORECLK mux to output COREPLL
+ * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
+ *
+ * Switch the CORECLK mux to the PLL output clock; return once complete.
+ *
+ * Context: Any context. Caller must prevent concurrent changes to the
+ * PRCI_CORECLKSEL_OFFSET register.
+ */
+static void __prci_coreclksel_use_corepll(struct __prci_data *pd)
+{
+ u32 r;
+
+ r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
+ r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
+ __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
+
+ r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
+}
+
+/*
+ * Linux clock framework integration
+ *
+ * See the Linux clock framework documentation for more information on
+ * these functions.
+ */
+
+static unsigned long sifive_fu540_prci_wrpll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_wrpll_data *pwd = pc->pwd;
+
+ return wrpll_calc_output_rate(&pwd->c, parent_rate);
+}
+
+static long sifive_fu540_prci_wrpll_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_wrpll_data *pwd = pc->pwd;
+ struct wrpll_cfg c;
+
+ memcpy(&c, &pwd->c, sizeof(c));
+
+ wrpll_configure_for_rate(&c, rate, *parent_rate);
+
+ return wrpll_calc_output_rate(&c, *parent_rate);
+}
+
+static int sifive_fu540_prci_wrpll_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_wrpll_data *pwd = pc->pwd;
+ struct __prci_data *pd = pc->pd;
+ int r;
+
+ r = wrpll_configure_for_rate(&pwd->c, rate, parent_rate);
+ if (r)
+ return r;
+
+ if (pwd->enable_bypass)
+ pwd->enable_bypass(pd);
+
+ __prci_wrpll_write_cfg(pd, pwd, &pwd->c);
+
+ udelay(wrpll_calc_max_lock_us(&pwd->c));
+
+ if (pwd->disable_bypass)
+ pwd->disable_bypass(pd);
+
+ return 0;
+}
+
+static const struct clk_ops sifive_fu540_prci_wrpll_clk_ops = {
+ .set_rate = sifive_fu540_prci_wrpll_set_rate,
+ .round_rate = sifive_fu540_prci_wrpll_round_rate,
+ .recalc_rate = sifive_fu540_prci_wrpll_recalc_rate,
+};
+
+static const struct clk_ops sifive_fu540_prci_wrpll_ro_clk_ops = {
+ .recalc_rate = sifive_fu540_prci_wrpll_recalc_rate,
+};
+
+/* TLCLKSEL clock integration */
+
+static unsigned long sifive_fu540_prci_tlclksel_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_data *pd = pc->pd;
+ u32 v;
+ u8 div;
+
+ v = __prci_readl(pd, PRCI_CLKMUXSTATUSREG_OFFSET);
+ v &= PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK;
+ div = v ? 1 : 2;
+
+ return div_u64(parent_rate, div);
+}
+
+static const struct clk_ops sifive_fu540_prci_tlclksel_clk_ops = {
+ .recalc_rate = sifive_fu540_prci_tlclksel_recalc_rate,
+};
+
+/*
+ * PRCI integration data for each WRPLL instance
+ */
+
+static struct __prci_wrpll_data __prci_corepll_data = {
+ .cfg0_offs = PRCI_COREPLLCFG0_OFFSET,
+ .enable_bypass = __prci_coreclksel_use_hfclk,
+ .disable_bypass = __prci_coreclksel_use_corepll,
+};
+
+static struct __prci_wrpll_data __prci_ddrpll_data = {
+ .cfg0_offs = PRCI_DDRPLLCFG0_OFFSET,
+};
+
+static struct __prci_wrpll_data __prci_gemgxlpll_data = {
+ .cfg0_offs = PRCI_GEMGXLPLLCFG0_OFFSET,
+};
+
+/*
+ * List of clock controls provided by the PRCI
+ */
+
+static struct __prci_clock __prci_init_clocks[] = {
+ [PRCI_CLK_COREPLL] = {
+ .name = "corepll",
+ .parent_name = "hfclk",
+ .ops = &sifive_fu540_prci_wrpll_clk_ops,
+ .pwd = &__prci_corepll_data,
+ },
+ [PRCI_CLK_DDRPLL] = {
+ .name = "ddrpll",
+ .parent_name = "hfclk",
+ .ops = &sifive_fu540_prci_wrpll_ro_clk_ops,
+ .pwd = &__prci_ddrpll_data,
+ },
+ [PRCI_CLK_GEMGXLPLL] = {
+ .name = "gemgxlpll",
+ .parent_name = "hfclk",
+ .ops = &sifive_fu540_prci_wrpll_clk_ops,
+ .pwd = &__prci_gemgxlpll_data,
+ },
+ [PRCI_CLK_TLCLK] = {
+ .name = "tlclk",
+ .parent_name = "corepll",
+ .ops = &sifive_fu540_prci_tlclksel_clk_ops,
+ },
+};
+
+/**
+ * __prci_register_clocks() - register clock controls in the PRCI with Linux
+ * @dev: Linux struct device *
+ *
+ * Register the list of clock controls described in __prci_init_plls[] with
+ * the Linux clock framework.
+ *
+ * Return: 0 upon success or a negative error code upon failure.
+ */
+static int __prci_register_clocks(struct device *dev, struct __prci_data *pd)
+{
+ struct clk_init_data init = { };
+ struct __prci_clock *pic;
+ int parent_count, i, r;
+
+ parent_count = of_clk_get_parent_count(dev->of_node);
+ if (parent_count != EXPECTED_CLK_PARENT_COUNT) {
+ dev_err(dev, "expected only two parent clocks, found %d\n",
+ parent_count);
+ return -EINVAL;
+ }
+
+ /* Register PLLs */
+ for (i = 0; i < ARRAY_SIZE(__prci_init_clocks); ++i) {
+ pic = &__prci_init_clocks[i];
+
+ init.name = pic->name;
+ init.parent_names = &pic->parent_name;
+ init.num_parents = 1;
+ init.ops = pic->ops;
+ pic->hw.init = &init;
+
+ pic->pd = pd;
+
+ if (pic->pwd)
+ __prci_wrpll_read_cfg(pd, pic->pwd);
+
+ r = devm_clk_hw_register(dev, &pic->hw);
+ if (r) {
+ dev_warn(dev, "Failed to register clock %s: %d\n",
+ init.name, r);
+ return r;
+ }
+
+ r = clk_hw_register_clkdev(&pic->hw, pic->name, dev_name(dev));
+ if (r) {
+ dev_warn(dev, "Failed to register clkdev for %s: %d\n",
+ init.name, r);
+ return r;
+ }
+
+ pd->hw_clks.hws[i] = &pic->hw;
+ }
+
+ pd->hw_clks.num = i;
+
+ r = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &pd->hw_clks);
+ if (r) {
+ dev_err(dev, "could not add hw_provider: %d\n", r);
+ return r;
+ }
+
+ return 0;
+}
+
+/*
+ * Linux device model integration
+ *
+ * See the Linux device model documentation for more information about
+ * these functions.
+ */
+static int sifive_fu540_prci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct __prci_data *pd;
+ int r;
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pd->va = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pd->va))
+ return PTR_ERR(pd->va);
+
+ r = __prci_register_clocks(dev, pd);
+ if (r) {
+ dev_err(dev, "could not register clocks: %d\n", r);
+ return r;
+ }
+
+ dev_dbg(dev, "SiFive FU540 PRCI probed\n");
+
+ return 0;
+}
+
+static const struct of_device_id sifive_fu540_prci_of_match[] = {
+ { .compatible = "sifive,fu540-c000-prci", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sifive_fu540_prci_of_match);
+
+static struct platform_driver sifive_fu540_prci_driver = {
+ .driver = {
+ .name = "sifive-fu540-prci",
+ .of_match_table = sifive_fu540_prci_of_match,
+ },
+ .probe = sifive_fu540_prci_probe,
+};
+
+static int __init sifive_fu540_prci_init(void)
+{
+ return platform_driver_register(&sifive_fu540_prci_driver);
+}
+core_initcall(sifive_fu540_prci_init);
diff --git a/drivers/clk/socfpga/clk-gate-s10.c b/drivers/clk/socfpga/clk-gate-s10.c
index eee2d48ab656..54a464fa63e0 100644
--- a/drivers/clk/socfpga/clk-gate-s10.c
+++ b/drivers/clk/socfpga/clk-gate-s10.c
@@ -3,6 +3,7 @@
* Copyright (C) 2017, Intel Corporation
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/slab.h>
#include "stratix10-clk.h"
#include "clk.h"
diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
index 568f59b58ddf..5c50e723ecae 100644
--- a/drivers/clk/socfpga/clk-periph-s10.c
+++ b/drivers/clk/socfpga/clk-periph-s10.c
@@ -4,6 +4,7 @@
*/
#include <linux/slab.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "stratix10-clk.h"
#include "clk.h"
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index c4d0b6f6abf2..4705eb544f01 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -4,6 +4,7 @@
*/
#include <linux/slab.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "stratix10-clk.h"
#include "clk.h"
diff --git a/drivers/clk/sprd/common.h b/drivers/clk/sprd/common.h
index abd9ff5ef448..1d077b39cef6 100644
--- a/drivers/clk/sprd/common.h
+++ b/drivers/clk/sprd/common.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
//
// Spreadtrum clock infrastructure
//
diff --git a/drivers/clk/sprd/composite.h b/drivers/clk/sprd/composite.h
index 0984e9e252dc..04ab3f587ee2 100644
--- a/drivers/clk/sprd/composite.h
+++ b/drivers/clk/sprd/composite.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
//
// Spreadtrum composite clock driver
//
diff --git a/drivers/clk/sprd/div.h b/drivers/clk/sprd/div.h
index b3033d24d431..87510e3d0e14 100644
--- a/drivers/clk/sprd/div.h
+++ b/drivers/clk/sprd/div.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
//
// Spreadtrum divider clock driver
//
diff --git a/drivers/clk/sprd/gate.h b/drivers/clk/sprd/gate.h
index 2e582c68a08b..dc352ea55e1f 100644
--- a/drivers/clk/sprd/gate.h
+++ b/drivers/clk/sprd/gate.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
//
// Spreadtrum gate clock driver
//
diff --git a/drivers/clk/sprd/mux.h b/drivers/clk/sprd/mux.h
index 548cfa0f145c..892e4191cc7f 100644
--- a/drivers/clk/sprd/mux.h
+++ b/drivers/clk/sprd/mux.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
//
// Spreadtrum multiplexer clock driver
//
diff --git a/drivers/clk/sprd/pll.h b/drivers/clk/sprd/pll.h
index 514175621099..e95f11e91ffe 100644
--- a/drivers/clk/sprd/pll.h
+++ b/drivers/clk/sprd/pll.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
//
// Spreadtrum pll clock driver
//
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
index c514d39760cb..23497f07ad89 100644
--- a/drivers/clk/st/clkgen-mux.c
+++ b/drivers/clk/st/clkgen-mux.c
@@ -14,6 +14,7 @@
*/
#include <linux/slab.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
index 129ebd2588fd..2bbfb3343311 100644
--- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
+++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index 932836d26e2b..d3fc1f5bf396 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -531,7 +532,8 @@ static SUNXI_CCU_GATE(dram_ts_clk, "dram-ts", "dram",
static const char * const de_parents[] = { "pll-periph0-2x", "pll-de" };
static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
- 0x104, 0, 4, 24, 3, BIT(31), 0);
+ 0x104, 0, 4, 24, 3, BIT(31),
+ CLK_SET_RATE_PARENT);
static const char * const tcon0_parents[] = { "pll-mipi", "pll-video0-2x" };
static const u8 tcon0_table[] = { 0, 2, };
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index 139e8389615c..9d3f98962779 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -266,7 +267,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents, 0x600,
0, 4, /* M */
24, 1, /* mux */
BIT(31), /* gate */
- 0);
+ CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(bus_de_clk, "bus-de", "psi-ahb1-ahb2",
0x60c, BIT(0), 0);
@@ -311,7 +312,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(ve_clk, "ve", ve_parents, 0x690,
0, 3, /* M */
24, 1, /* mux */
BIT(31), /* gate */
- 0);
+ CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(bus_ve_clk, "bus-ve", "psi-ahb1-ahb2",
0x69c, BIT(0), 0);
@@ -656,6 +657,8 @@ static const char * const hdmi_cec_parents[] = { "osc32k", "pll-periph0-2x" };
static const struct ccu_mux_fixed_prediv hdmi_cec_predivs[] = {
{ .index = 1, .div = 36621 },
};
+
+#define SUN50I_H6_HDMI_CEC_CLK_REG 0xb10
static struct ccu_mux hdmi_cec_clk = {
.enable = BIT(31),
@@ -689,7 +692,7 @@ static SUNXI_CCU_MUX_WITH_GATE(tcon_lcd0_clk, "tcon-lcd0",
tcon_lcd0_parents, 0xb60,
24, 3, /* mux */
BIT(31), /* gate */
- 0);
+ CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(bus_tcon_lcd0_clk, "bus-tcon-lcd0", "ahb3",
0xb7c, BIT(0), 0);
@@ -704,7 +707,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE(tcon_tv0_clk, "tcon-tv0",
8, 2, /* P */
24, 3, /* mux */
BIT(31), /* gate */
- 0);
+ CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(bus_tcon_tv0_clk, "bus-tcon-tv0", "ahb3",
0xb9c, BIT(0), 0);
@@ -1200,6 +1203,15 @@ static int sun50i_h6_ccu_probe(struct platform_device *pdev)
val &= ~(GENMASK(21, 16) | BIT(0));
writel(val | (7 << 16), reg + SUN50I_H6_PLL_AUDIO_REG);
+ /*
+ * First clock parent (osc32K) is unusable for CEC. But since there
+ * is no good way to force parent switch (both run with same frequency),
+ * just set second clock parent here.
+ */
+ val = readl(reg + SUN50I_H6_HDMI_CEC_CLK_REG);
+ val |= BIT(24);
+ writel(val, reg + SUN50I_H6_HDMI_CEC_CLK_REG);
+
return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_h6_ccu_desc);
}
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.h b/drivers/clk/sunxi-ng/ccu-sun50i-h6.h
index 2ccfe4428260..9406f9a6a8aa 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.h
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2016 Icenowy Zheng <icenowy@aosc.io>
*/
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c
index fa2c2dd77102..813e9bf73cbf 100644
--- a/drivers/clk/sunxi-ng/ccu-sun5i.c
+++ b/drivers/clk/sunxi-ng/ccu-sun5i.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.h b/drivers/clk/sunxi-ng/ccu-sun5i.h
index 93a275fbd9a9..b66abd4fd0bf 100644
--- a/drivers/clk/sunxi-ng/ccu-sun5i.h
+++ b/drivers/clk/sunxi-ng/ccu-sun5i.h
@@ -60,10 +60,6 @@
/* The rest of the module clocks are exported */
-#define CLK_MBUS 99
-
-/* And finally the IEP clock */
-
#define CLK_NUMBER (CLK_IEP + 1)
#endif /* _CCU_SUN5I_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 609970c0b666..b494c4fe0b2c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -16,6 +16,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
index 4b5f8f4e4ab8..a9c0c5406b85 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index c7bf814dfd2b..25bcf3fd2dfc 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
index 2d6555d73170..be5920e8a9ca 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -513,8 +514,9 @@ static SUNXI_CCU_GATE(csi_misc_clk, "csi-misc", "osc24M", 0x130, BIT(16), 0);
static SUNXI_CCU_GATE(mipi_csi_clk, "mipi-csi", "osc24M", 0x130, BIT(31), 0);
-static const char * const csi_mclk_parents[] = { "pll-de", "osc24M" };
-static const u8 csi_mclk_table[] = { 3, 5 };
+static const char * const csi_mclk_parents[] = { "pll-video0", "pll-de",
+ "osc24M" };
+static const u8 csi_mclk_table[] = { 0, 3, 5 };
static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(csi_mclk_clk, "csi-mclk",
csi_mclk_parents, csi_mclk_table,
0x134,
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index e71e2451c2e3..0f3df565c6c1 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
index a22d11aa38ba..f9625f7b9ec2 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index ac12f261f8ca..ec64eb692ecf 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
@@ -325,7 +326,8 @@ static SUNXI_CCU_GATE(dram_ohci_clk, "dram-ohci", "dram",
static const char * const de_parents[] = { "pll-video", "pll-periph0" };
static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
- 0x104, 0, 4, 24, 2, BIT(31), 0);
+ 0x104, 0, 4, 24, 2, BIT(31),
+ CLK_SET_RATE_PARENT);
static const char * const tcon_parents[] = { "pll-video" };
static SUNXI_CCU_M_WITH_MUX_GATE(tcon_clk, "tcon", tcon_parents,
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
index 8936ef87652c..0e23583e4f58 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
index a09dfbe36402..e748b8a6f3c5 100644
--- a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
+++ b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
@@ -240,7 +241,7 @@ static SUNXI_CCU_MUX_WITH_GATE(spdif_clk, "spdif", i2s_spdif_parents,
/* The BSP header file has a CIR_CFG, but no mod clock uses this definition */
static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M",
- 0x0cc, BIT(8), 0);
+ 0x0cc, BIT(1), 0);
static SUNXI_CCU_GATE(dram_ve_clk, "dram-ve", "pll-ddr",
0x100, BIT(0), 0);
diff --git a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.h b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.h
index 39d06fed55b2..b22484f1bb9a 100644
--- a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.h
+++ b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0+
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* Copyright 2017 Icenowy Zheng <icenowy@aosc.io>
*
*/
diff --git a/drivers/clk/sunxi-ng/ccu_div.c b/drivers/clk/sunxi-ng/ccu_div.c
index 302a18efd39f..6d407a8a61ee 100644
--- a/drivers/clk/sunxi-ng/ccu_div.c
+++ b/drivers/clk/sunxi-ng/ccu_div.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_div.h"
diff --git a/drivers/clk/sunxi-ng/ccu_frac.c b/drivers/clk/sunxi-ng/ccu_frac.c
index d1d168d4c4f0..1842603f8f11 100644
--- a/drivers/clk/sunxi-ng/ccu_frac.c
+++ b/drivers/clk/sunxi-ng/ccu_frac.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/spinlock.h>
#include "ccu_frac.h"
diff --git a/drivers/clk/sunxi-ng/ccu_gate.c b/drivers/clk/sunxi-ng/ccu_gate.c
index cd069d5da215..9c81644e9dfe 100644
--- a/drivers/clk/sunxi-ng/ccu_gate.c
+++ b/drivers/clk/sunxi-ng/ccu_gate.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
diff --git a/drivers/clk/sunxi-ng/ccu_mmc_timing.c b/drivers/clk/sunxi-ng/ccu_mmc_timing.c
index f9869f7353c0..b23410682088 100644
--- a/drivers/clk/sunxi-ng/ccu_mmc_timing.c
+++ b/drivers/clk/sunxi-ng/ccu_mmc_timing.c
@@ -13,6 +13,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/sunxi-ng.h>
+#include <linux/io.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index 0357349eb767..e17fb4c9fcfe 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_mp.h"
diff --git a/drivers/clk/sunxi-ng/ccu_mult.c b/drivers/clk/sunxi-ng/ccu_mult.c
index 12e0783caee6..c2a672797a74 100644
--- a/drivers/clk/sunxi-ng/ccu_mult.c
+++ b/drivers/clk/sunxi-ng/ccu_mult.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_mult.h"
diff --git a/drivers/clk/sunxi-ng/ccu_mux.c b/drivers/clk/sunxi-ng/ccu_mux.c
index 312664155a54..f9b409c3a89c 100644
--- a/drivers/clk/sunxi-ng/ccu_mux.c
+++ b/drivers/clk/sunxi-ng/ccu_mux.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_mux.h"
diff --git a/drivers/clk/sunxi-ng/ccu_nk.c b/drivers/clk/sunxi-ng/ccu_nk.c
index 2485bda87a9a..50c7e6b1ba13 100644
--- a/drivers/clk/sunxi-ng/ccu_nk.c
+++ b/drivers/clk/sunxi-ng/ccu_nk.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_nk.h"
diff --git a/drivers/clk/sunxi-ng/ccu_nkm.c b/drivers/clk/sunxi-ng/ccu_nkm.c
index 841840e35e61..aa5beaabc292 100644
--- a/drivers/clk/sunxi-ng/ccu_nkm.c
+++ b/drivers/clk/sunxi-ng/ccu_nkm.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_nkm.h"
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index 9b49adb20d07..53ec4fb59880 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_nkmp.h"
@@ -167,7 +168,7 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
- u32 n_mask, k_mask, m_mask, p_mask;
+ u32 n_mask = 0, k_mask = 0, m_mask = 0, p_mask = 0;
struct _ccu_nkmp _nkmp;
unsigned long flags;
u32 reg;
@@ -186,10 +187,24 @@ static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
ccu_nkmp_find_best(parent_rate, rate, &_nkmp);
- n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1, nkmp->n.shift);
- k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1, nkmp->k.shift);
- m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1, nkmp->m.shift);
- p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1, nkmp->p.shift);
+ /*
+ * If width is 0, GENMASK() macro may not generate expected mask (0)
+ * as it falls under undefined behaviour by C standard due to shifts
+ * which are equal or greater than width of left operand. This can
+ * be easily avoided by explicitly checking if width is 0.
+ */
+ if (nkmp->n.width)
+ n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1,
+ nkmp->n.shift);
+ if (nkmp->k.width)
+ k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1,
+ nkmp->k.shift);
+ if (nkmp->m.width)
+ m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1,
+ nkmp->m.shift);
+ if (nkmp->p.width)
+ p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1,
+ nkmp->p.shift);
spin_lock_irqsave(nkmp->common.lock, flags);
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index 424d8635b053..e15413174aa7 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_frac.h"
#include "ccu_gate.h"
diff --git a/drivers/clk/sunxi-ng/ccu_phase.c b/drivers/clk/sunxi-ng/ccu_phase.c
index 400c58ad72fd..0a4a6fd13f5b 100644
--- a/drivers/clk/sunxi-ng/ccu_phase.c
+++ b/drivers/clk/sunxi-ng/ccu_phase.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/spinlock.h>
#include "ccu_phase.h"
diff --git a/drivers/clk/sunxi-ng/ccu_sdm.c b/drivers/clk/sunxi-ng/ccu_sdm.c
index 3b3dc9bdf2b0..e510467ea24c 100644
--- a/drivers/clk/sunxi-ng/ccu_sdm.c
+++ b/drivers/clk/sunxi-ng/ccu_sdm.c
@@ -8,6 +8,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/spinlock.h>
#include "ccu_sdm.h"
diff --git a/drivers/clk/sunxi/Kconfig b/drivers/clk/sunxi/Kconfig
new file mode 100644
index 000000000000..2b6207cc4eda
--- /dev/null
+++ b/drivers/clk/sunxi/Kconfig
@@ -0,0 +1,43 @@
+menuconfig CLK_SUNXI
+ bool "Legacy clock support for Allwinner SoCs"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ default y
+
+if CLK_SUNXI
+
+config CLK_SUNXI_CLOCKS
+ bool "Legacy clock drivers"
+ default y
+ help
+ Legacy clock drivers being used on older (A10, A13, A20,
+ A23, A31, A80) SoCs. These drivers are kept around for
+ Device Tree backward compatibility issues, in case one would
+ still use a Device Tree with one clock provider by
+ node. Newer Device Trees and newer SoCs use the drivers
+ controlled by CONFIG_SUNXI_CCU.
+
+config CLK_SUNXI_PRCM_SUN6I
+ bool "Legacy A31 PRCM driver"
+ select MFD_SUN6I_PRCM
+ default y
+ help
+ Legacy clock driver for the A31 PRCM clocks. Those are
+ usually needed for the PMIC communication, mostly.
+
+config CLK_SUNXI_PRCM_SUN8I
+ bool "Legacy sun8i PRCM driver"
+ select MFD_SUN6I_PRCM
+ default y
+ help
+ Legacy clock driver for the sun8i family PRCM clocks.
+ Those are usually needed for the PMIC communication,
+ mostly.
+
+config CLK_SUNXI_PRCM_SUN9I
+ bool "Legacy A80 PRCM driver"
+ default y
+ help
+ Legacy clock driver for the A80 PRCM clocks. Those are
+ usually needed for the PMIC communication, mostly.
+
+endif
diff --git a/drivers/clk/sunxi/Makefile b/drivers/clk/sunxi/Makefile
index be88368b48a1..e10824c76ae9 100644
--- a/drivers/clk/sunxi/Makefile
+++ b/drivers/clk/sunxi/Makefile
@@ -3,27 +3,32 @@
# Makefile for sunxi specific clk
#
-obj-y += clk-sunxi.o clk-factors.o
-obj-y += clk-a10-codec.o
-obj-y += clk-a10-hosc.o
-obj-y += clk-a10-mod1.o
-obj-y += clk-a10-pll2.o
-obj-y += clk-a10-ve.o
-obj-y += clk-a20-gmac.o
-obj-y += clk-mod0.o
-obj-y += clk-simple-gates.o
-obj-y += clk-sun4i-display.o
-obj-y += clk-sun4i-pll3.o
-obj-y += clk-sun4i-tcon-ch1.o
-obj-y += clk-sun8i-bus-gates.o
-obj-y += clk-sun8i-mbus.o
-obj-y += clk-sun9i-core.o
-obj-y += clk-sun9i-mmc.o
-obj-y += clk-usb.o
+obj-$(CONFIG_CLK_SUNXI) += clk-factors.o
-obj-$(CONFIG_MACH_SUN9I) += clk-sun8i-apb0.o
-obj-$(CONFIG_MACH_SUN9I) += clk-sun9i-cpus.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sunxi.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-a10-codec.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-a10-hosc.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-a10-mod1.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-a10-pll2.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-a10-ve.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-a20-gmac.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-mod0.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-simple-gates.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun4i-display.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun4i-pll3.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun4i-tcon-ch1.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun8i-bus-gates.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun8i-mbus.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun9i-core.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun9i-mmc.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-usb.o
-obj-$(CONFIG_MFD_SUN6I_PRCM) += \
- clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o \
- clk-sun8i-apb0.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun8i-apb0.o
+obj-$(CONFIG_CLK_SUNXI_CLOCKS) += clk-sun9i-cpus.o
+
+obj-$(CONFIG_CLK_SUNXI_PRCM_SUN6I) += clk-sun6i-apb0.o
+obj-$(CONFIG_CLK_SUNXI_PRCM_SUN6I) += clk-sun6i-apb0-gates.o
+obj-$(CONFIG_CLK_SUNXI_PRCM_SUN6I) += clk-sun6i-ar100.o
+
+obj-$(CONFIG_CLK_SUNXI_PRCM_SUN8I) += clk-sun8i-apb0.o
+obj-$(CONFIG_CLK_SUNXI_PRCM_SUN8I) += clk-sun6i-apb0-gates.o
diff --git a/drivers/clk/sunxi/clk-a10-mod1.c b/drivers/clk/sunxi/clk-a10-mod1.c
index e2819fa09637..9e6796a7b4c4 100644
--- a/drivers/clk/sunxi/clk-a10-mod1.c
+++ b/drivers/clk/sunxi/clk-a10-mod1.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-a10-pll2.c b/drivers/clk/sunxi/clk-a10-pll2.c
index d8eab90ae661..a709b6a551af 100644
--- a/drivers/clk/sunxi/clk-a10-pll2.c
+++ b/drivers/clk/sunxi/clk-a10-pll2.c
@@ -17,6 +17,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-a10-ve.c b/drivers/clk/sunxi/clk-a10-ve.c
index d9ea22ec4e25..d119b453dccd 100644
--- a/drivers/clk/sunxi/clk-a10-ve.c
+++ b/drivers/clk/sunxi/clk-a10-ve.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/reset-controller.h>
diff --git a/drivers/clk/sunxi/clk-a20-gmac.c b/drivers/clk/sunxi/clk-a20-gmac.c
index 3437f734c9bf..e6d639d9ea70 100644
--- a/drivers/clk/sunxi/clk-a20-gmac.c
+++ b/drivers/clk/sunxi/clk-a20-gmac.c
@@ -17,6 +17,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c
index fc0278a1acc7..915954507d0a 100644
--- a/drivers/clk/sunxi/clk-mod0.c
+++ b/drivers/clk/sunxi/clk-mod0.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-simple-gates.c b/drivers/clk/sunxi/clk-simple-gates.c
index a085c3bc127c..8130467d647a 100644
--- a/drivers/clk/sunxi/clk-simple-gates.c
+++ b/drivers/clk/sunxi/clk-simple-gates.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-sun4i-display.c b/drivers/clk/sunxi/clk-sun4i-display.c
index 9780fac6d029..bb2dc83fc697 100644
--- a/drivers/clk/sunxi/clk-sun4i-display.c
+++ b/drivers/clk/sunxi/clk-sun4i-display.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/reset-controller.h>
diff --git a/drivers/clk/sunxi/clk-sun4i-pll3.c b/drivers/clk/sunxi/clk-sun4i-pll3.c
index f66267e77d9c..c879d7e25ca0 100644
--- a/drivers/clk/sunxi/clk-sun4i-pll3.c
+++ b/drivers/clk/sunxi/clk-sun4i-pll3.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c b/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
index b6d29d1bedca..af8ca5019639 100644
--- a/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
+++ b/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-sun8i-apb0.c b/drivers/clk/sunxi/clk-sun8i-apb0.c
index d5c31804ee54..5a7d4dd09e85 100644
--- a/drivers/clk/sunxi/clk-sun8i-apb0.c
+++ b/drivers/clk/sunxi/clk-sun8i-apb0.c
@@ -16,6 +16,7 @@
#include <linux/clk-provider.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/sunxi/clk-sun8i-bus-gates.c b/drivers/clk/sunxi/clk-sun8i-bus-gates.c
index bee305bdddbe..bfbcd71b225d 100644
--- a/drivers/clk/sunxi/clk-sun8i-bus-gates.c
+++ b/drivers/clk/sunxi/clk-sun8i-bus-gates.c
@@ -18,6 +18,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-sun8i-mbus.c b/drivers/clk/sunxi/clk-sun8i-mbus.c
index 56db89b6979f..0e924c9cbd5c 100644
--- a/drivers/clk/sunxi/clk-sun8i-mbus.c
+++ b/drivers/clk/sunxi/clk-sun8i-mbus.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/sunxi/clk-sun9i-cpus.c b/drivers/clk/sunxi/clk-sun9i-cpus.c
index 4d5e14142e15..01255d827fc9 100644
--- a/drivers/clk/sunxi/clk-sun9i-cpus.c
+++ b/drivers/clk/sunxi/clk-sun9i-cpus.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/of.h>
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index f00d8758ba24..da264d0f7f4b 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -18,6 +18,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/reset.h>
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 892c29030b7b..f5b1c0067365 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -17,6 +17,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/reset-controller.h>
diff --git a/drivers/clk/sunxi/clk-usb.c b/drivers/clk/sunxi/clk-usb.c
index 917fc27a33dd..7d15e0432ed4 100644
--- a/drivers/clk/sunxi/clk-usb.c
+++ b/drivers/clk/sunxi/clk-usb.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/reset-controller.h>
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c
index 205fe8ff63f0..2a1822a22740 100644
--- a/drivers/clk/tegra/clk-divider.c
+++ b/drivers/clk/tegra/clk-divider.c
@@ -175,6 +175,7 @@ struct clk *tegra_clk_register_mc(const char *name, const char *parent_name,
void __iomem *reg, spinlock_t *lock)
{
return clk_register_divider_table(NULL, name, parent_name,
- CLK_IS_CRITICAL, reg, 16, 1, 0,
+ CLK_IS_CRITICAL,
+ reg, 16, 1, CLK_DIVIDER_READ_ONLY,
mc_div_table, lock);
}
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index 0621a3a82ea6..b7f763f0ecd8 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -20,6 +20,7 @@
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
@@ -121,18 +122,28 @@ static int emc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
struct tegra_clk_emc *tegra;
u8 ram_code = tegra_read_ram_code();
struct emc_timing *timing = NULL;
- int i;
+ int i, k, t;
tegra = container_of(hw, struct tegra_clk_emc, hw);
- for (i = 0; i < tegra->num_timings; i++) {
- if (tegra->timings[i].ram_code != ram_code)
- continue;
+ for (k = 0; k < tegra->num_timings; k++) {
+ if (tegra->timings[k].ram_code == ram_code)
+ break;
+ }
+
+ for (t = k; t < tegra->num_timings; t++) {
+ if (tegra->timings[t].ram_code != ram_code)
+ break;
+ }
+ for (i = k; i < t; i++) {
timing = tegra->timings + i;
+ if (timing->rate < req->rate && i != t - 1)
+ continue;
+
if (timing->rate > req->max_rate) {
- i = max(i, 1);
+ i = max(i, k + 1);
req->rate = tegra->timings[i - 1].rate;
return 0;
}
@@ -140,10 +151,8 @@ static int emc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
if (timing->rate < req->min_rate)
continue;
- if (timing->rate >= req->rate) {
- req->rate = timing->rate;
- return 0;
- }
+ req->rate = timing->rate;
+ return 0;
}
if (timing) {
@@ -214,7 +223,10 @@ static int emc_set_timing(struct tegra_clk_emc *tegra,
if (emc_get_parent(&tegra->hw) == timing->parent_index &&
clk_get_rate(timing->parent) != timing->parent_rate) {
- BUG();
+ WARN_ONCE(1, "parent %s rate mismatch %lu %lu\n",
+ __clk_get_name(timing->parent),
+ clk_get_rate(timing->parent),
+ timing->parent_rate);
return -EINVAL;
}
@@ -282,7 +294,7 @@ static struct emc_timing *get_backup_timing(struct tegra_clk_emc *tegra,
for (i = timing_index+1; i < tegra->num_timings; i++) {
timing = tegra->timings + i;
if (timing->ram_code != ram_code)
- continue;
+ break;
if (emc_parent_clk_sources[timing->parent_index] !=
emc_parent_clk_sources[
@@ -293,7 +305,7 @@ static struct emc_timing *get_backup_timing(struct tegra_clk_emc *tegra,
for (i = timing_index-1; i >= 0; --i) {
timing = tegra->timings + i;
if (timing->ram_code != ram_code)
- continue;
+ break;
if (emc_parent_clk_sources[timing->parent_index] !=
emc_parent_clk_sources[
@@ -433,19 +445,23 @@ static int load_timings_from_dt(struct tegra_clk_emc *tegra,
struct device_node *node,
u32 ram_code)
{
+ struct emc_timing *timings_ptr;
struct device_node *child;
int child_count = of_get_child_count(node);
int i = 0, err;
+ size_t size;
- tegra->timings = kcalloc(child_count, sizeof(struct emc_timing),
- GFP_KERNEL);
+ size = (tegra->num_timings + child_count) * sizeof(struct emc_timing);
+
+ tegra->timings = krealloc(tegra->timings, size, GFP_KERNEL);
if (!tegra->timings)
return -ENOMEM;
- tegra->num_timings = child_count;
+ timings_ptr = tegra->timings + tegra->num_timings;
+ tegra->num_timings += child_count;
for_each_child_of_node(node, child) {
- struct emc_timing *timing = tegra->timings + (i++);
+ struct emc_timing *timing = timings_ptr + (i++);
err = load_one_timing_from_dt(tegra, timing, child);
if (err) {
@@ -456,7 +472,7 @@ static int load_timings_from_dt(struct tegra_clk_emc *tegra,
timing->ram_code = ram_code;
}
- sort(tegra->timings, tegra->num_timings, sizeof(struct emc_timing),
+ sort(timings_ptr, child_count, sizeof(struct emc_timing),
cmp_timings, NULL);
return 0;
@@ -499,10 +515,10 @@ struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
* fuses until the apbmisc driver is loaded.
*/
err = load_timings_from_dt(tegra, node, node_ram_code);
- of_node_put(node);
- if (err)
+ if (err) {
+ of_node_put(node);
return ERR_PTR(err);
- break;
+ }
}
if (tegra->num_timings == 0)
@@ -532,7 +548,5 @@ struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
/* Allow debugging tools to see the EMC clock */
clk_register_clkdev(clk, "emc", "tegra-clk-debug");
- clk_prepare_enable(clk);
-
return clk;
};
diff --git a/drivers/clk/tegra/clk-periph-fixed.c b/drivers/clk/tegra/clk-periph-fixed.c
index c57dfb037b10..956f2215c733 100644
--- a/drivers/clk/tegra/clk-periph-fixed.c
+++ b/drivers/clk/tegra/clk-periph-fixed.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "clk.h"
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index b50b7460014b..6b976b2514f7 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -444,6 +444,9 @@ static int clk_pll_enable(struct clk_hw *hw)
unsigned long flags = 0;
int ret;
+ if (clk_pll_is_enabled(hw))
+ return 0;
+
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
@@ -663,8 +666,8 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll,
pll_override_writel(val, params->pmc_divp_reg, pll);
val = pll_override_readl(params->pmc_divnm_reg, pll);
- val &= ~(divm_mask(pll) << div_nmp->override_divm_shift) |
- ~(divn_mask(pll) << div_nmp->override_divn_shift);
+ val &= ~((divm_mask(pll) << div_nmp->override_divm_shift) |
+ (divn_mask(pll) << div_nmp->override_divn_shift));
val |= (cfg->m << div_nmp->override_divm_shift) |
(cfg->n << div_nmp->override_divn_shift);
pll_override_writel(val, params->pmc_divnm_reg, pll);
@@ -940,11 +943,16 @@ static int clk_plle_training(struct tegra_clk_pll *pll)
static int clk_plle_enable(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
- unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
struct tegra_clk_pll_freq_table sel;
+ unsigned long input_rate;
u32 val;
int err;
+ if (clk_pll_is_enabled(hw))
+ return 0;
+
+ input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
+
if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
return -EINVAL;
@@ -1355,6 +1363,9 @@ static int clk_pllc_enable(struct clk_hw *hw)
int ret;
unsigned long flags = 0;
+ if (clk_pll_is_enabled(hw))
+ return 0;
+
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
@@ -1567,7 +1578,12 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
u32 val;
int ret;
unsigned long flags = 0;
- unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
+ unsigned long input_rate;
+
+ if (clk_pll_is_enabled(hw))
+ return 0;
+
+ input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
return -EINVAL;
@@ -1704,6 +1720,9 @@ static int clk_pllu_tegra114_enable(struct clk_hw *hw)
return -EINVAL;
}
+ if (clk_pll_is_enabled(hw))
+ return 0;
+
input_rate = clk_hw_get_rate(__clk_get_hw(osc));
if (pll->lock)
@@ -2379,6 +2398,16 @@ struct clk *tegra_clk_register_pllre_tegra210(const char *name,
return clk;
}
+static int clk_plle_tegra210_is_enabled(struct clk_hw *hw)
+{
+ struct tegra_clk_pll *pll = to_clk_pll(hw);
+ u32 val;
+
+ val = pll_readl_base(pll);
+
+ return val & PLLE_BASE_ENABLE ? 1 : 0;
+}
+
static int clk_plle_tegra210_enable(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
@@ -2386,7 +2415,12 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
u32 val;
int ret = 0;
unsigned long flags = 0;
- unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
+ unsigned long input_rate;
+
+ if (clk_plle_tegra210_is_enabled(hw))
+ return 0;
+
+ input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
return -EINVAL;
@@ -2497,16 +2531,6 @@ out:
spin_unlock_irqrestore(pll->lock, flags);
}
-static int clk_plle_tegra210_is_enabled(struct clk_hw *hw)
-{
- struct tegra_clk_pll *pll = to_clk_pll(hw);
- u32 val;
-
- val = pll_readl_base(pll);
-
- return val & PLLE_BASE_ENABLE ? 1 : 0;
-}
-
static const struct clk_ops tegra_clk_plle_tegra210_ops = {
.is_enabled = clk_plle_tegra210_is_enabled,
.enable = clk_plle_tegra210_enable,
diff --git a/drivers/clk/tegra/clk-sdmmc-mux.c b/drivers/clk/tegra/clk-sdmmc-mux.c
index 473d418533cb..a5cd3e31dbae 100644
--- a/drivers/clk/tegra/clk-sdmmc-mux.c
+++ b/drivers/clk/tegra/clk-sdmmc-mux.c
@@ -12,6 +12,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/types.h>
#include "clk.h"
diff --git a/drivers/clk/tegra/clk-super.c b/drivers/clk/tegra/clk-super.c
index 84267cfc4433..b5ff76c663f8 100644
--- a/drivers/clk/tegra/clk-super.c
+++ b/drivers/clk/tegra/clk-super.c
@@ -121,7 +121,7 @@ out:
return err;
}
-const struct clk_ops tegra_clk_super_mux_ops = {
+static const struct clk_ops tegra_clk_super_mux_ops = {
.get_parent = clk_super_get_parent,
.set_parent = clk_super_set_parent,
};
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index df0018f7bf7e..d7bee144f4b7 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -413,7 +413,6 @@ static struct tegra_clk_pll_params pll_m_params = {
.base_reg = PLLM_BASE,
.misc_reg = PLLM_MISC,
.lock_mask = PLL_BASE_LOCK,
- .lock_enable_bit_idx = PLL_MISC_LOCK_ENABLE,
.lock_delay = 300,
.max_p = 5,
.pdiv_tohw = pllm_p,
@@ -421,7 +420,7 @@ static struct tegra_clk_pll_params pll_m_params = {
.pmc_divnm_reg = PMC_PLLM_WB0_OVERRIDE,
.pmc_divp_reg = PMC_PLLM_WB0_OVERRIDE_2,
.freq_table = pll_m_freq_table,
- .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+ .flags = TEGRA_PLL_USE_LOCK,
};
static struct tegra_clk_pll_freq_table pll_e_freq_table[] = {
@@ -1466,9 +1465,9 @@ static void __init tegra124_132_clock_init_pre(struct device_node *np)
tegra_pmc_clk_init(pmc_base, tegra124_clks);
/* For Tegra124 & Tegra132, PLLD is the only source for DSIA & DSIB */
- plld_base = clk_readl(clk_base + PLLD_BASE);
+ plld_base = readl(clk_base + PLLD_BASE);
plld_base &= ~BIT(25);
- clk_writel(plld_base, clk_base + PLLD_BASE);
+ writel(plld_base, clk_base + PLLD_BASE);
}
/**
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 7545af763d7a..ed3c7df75d1e 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -3557,7 +3557,7 @@ static void __init tegra210_clock_init(struct device_node *np)
if (!clks)
return;
- value = clk_readl(clk_base + SPARE_REG0) >> CLK_M_DIVISOR_SHIFT;
+ value = readl(clk_base + SPARE_REG0) >> CLK_M_DIVISOR_SHIFT;
clk_m_div = (value & CLK_M_DIVISOR_MASK) + 1;
if (tegra_osc_clk_init(clk_base, tegra210_clks, tegra210_input_freq,
@@ -3574,9 +3574,9 @@ static void __init tegra210_clock_init(struct device_node *np)
tegra_pmc_clk_init(pmc_base, tegra210_clks);
/* For Tegra210, PLLD is the only source for DSIA & DSIB */
- value = clk_readl(clk_base + PLLD_BASE);
+ value = readl(clk_base + PLLD_BASE);
value &= ~BIT(25);
- clk_writel(value, clk_base + PLLD_BASE);
+ writel(value, clk_base + PLLD_BASE);
tegra_clk_apply_init_table = tegra210_clock_apply_init_table;
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index ffaf17f71860..6f2862eddad7 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -18,6 +18,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/clk/tegra.h>
#include <linux/reset-controller.h>
diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
index 0c210984765a..fdfb90058504 100644
--- a/drivers/clk/ti/adpll.c
+++ b/drivers/clk/ti/adpll.c
@@ -14,6 +14,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of_device.h>
diff --git a/drivers/clk/ti/clk-7xx-compat.c b/drivers/clk/ti/clk-7xx-compat.c
index e3cb7f0b03ae..b3cd2296f84b 100644
--- a/drivers/clk/ti/clk-7xx-compat.c
+++ b/drivers/clk/ti/clk-7xx-compat.c
@@ -362,7 +362,7 @@ static const struct omap_clkctrl_reg_data dra7_l3init_clkctrl_regs[] __initconst
{ DRA7_MMC2_CLKCTRL, dra7_mmc2_bit_data, CLKF_SW_SUP, "l3init_cm:clk:0010:25" },
{ DRA7_USB_OTG_SS2_CLKCTRL, dra7_usb_otg_ss2_bit_data, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
{ DRA7_USB_OTG_SS3_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
- { DRA7_USB_OTG_SS4_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
+ { DRA7_USB_OTG_SS4_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_DRA74 | CLKF_SOC_DRA76, "dpll_core_h13x2_ck" },
{ DRA7_SATA_CLKCTRL, dra7_sata_bit_data, CLKF_SW_SUP, "func_48m_fclk" },
{ DRA7_PCIE1_CLKCTRL, dra7_pcie1_bit_data, CLKF_SW_SUP, "l4_root_clk_div", "pcie_clkdm" },
{ DRA7_PCIE2_CLKCTRL, dra7_pcie2_bit_data, CLKF_SW_SUP, "l4_root_clk_div", "pcie_clkdm" },
@@ -662,7 +662,7 @@ static const struct omap_clkctrl_reg_data dra7_l4per_clkctrl_regs[] __initconst
{ DRA7_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
{ DRA7_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
{ DRA7_DES_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
- { DRA7_RNG_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
+ { DRA7_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "l3_iclk_div", "l4sec_clkdm" },
{ DRA7_SHAM_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div", "l4sec_clkdm" },
{ DRA7_UART7_CLKCTRL, dra7_uart7_bit_data, CLKF_SW_SUP, "l4per_cm:clk:01d0:24", "l4per2_clkdm" },
{ DRA7_UART8_CLKCTRL, dra7_uart8_bit_data, CLKF_SW_SUP, "l4per_cm:clk:01e0:24", "l4per2_clkdm" },
@@ -704,7 +704,7 @@ static const struct omap_clkctrl_reg_data dra7_wkupaon_clkctrl_regs[] __initcons
{ DRA7_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
{ DRA7_GPIO1_CLKCTRL, dra7_gpio1_bit_data, CLKF_HW_SUP, "wkupaon_iclk_mux" },
{ DRA7_TIMER1_CLKCTRL, dra7_timer1_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0020:24" },
- { DRA7_TIMER12_CLKCTRL, NULL, 0, "secure_32k_clk_src_ck" },
+ { DRA7_TIMER12_CLKCTRL, NULL, CLKF_SOC_NONSEC, "secure_32k_clk_src_ck" },
{ DRA7_COUNTER_32K_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
{ DRA7_UART10_CLKCTRL, dra7_uart10_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0060:24" },
{ DRA7_DCAN1_CLKCTRL, dra7_dcan1_bit_data, CLKF_SW_SUP, "wkupaon_cm:clk:0068:24" },
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index 597fb4a59318..79186b918d87 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -348,7 +348,7 @@ static const struct omap_clkctrl_reg_data dra7_l3init_clkctrl_regs[] __initconst
{ DRA7_L3INIT_MMC2_CLKCTRL, dra7_mmc2_bit_data, CLKF_SW_SUP, "l3init-clkctrl:0010:25" },
{ DRA7_L3INIT_USB_OTG_SS2_CLKCTRL, dra7_usb_otg_ss2_bit_data, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
{ DRA7_L3INIT_USB_OTG_SS3_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
- { DRA7_L3INIT_USB_OTG_SS4_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_core_h13x2_ck" },
+ { DRA7_L3INIT_USB_OTG_SS4_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_DRA74 | CLKF_SOC_DRA76, "dpll_core_h13x2_ck" },
{ DRA7_L3INIT_SATA_CLKCTRL, dra7_sata_bit_data, CLKF_SW_SUP, "func_48m_fclk" },
{ DRA7_L3INIT_OCP2SCP1_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" },
{ DRA7_L3INIT_OCP2SCP3_CLKCTRL, NULL, CLKF_HW_SUP, "l4_root_clk_div" },
@@ -590,7 +590,7 @@ static const struct omap_clkctrl_reg_data dra7_l4sec_clkctrl_regs[] __initconst
{ DRA7_L4SEC_AES1_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
{ DRA7_L4SEC_AES2_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
{ DRA7_L4SEC_DES_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
- { DRA7_L4SEC_RNG_CLKCTRL, NULL, CLKF_HW_SUP, "" },
+ { DRA7_L4SEC_RNG_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_SOC_NONSEC, "" },
{ DRA7_L4SEC_SHAM_CLKCTRL, NULL, CLKF_HW_SUP, "l3_iclk_div" },
{ 0 },
};
@@ -757,7 +757,7 @@ static const struct omap_clkctrl_reg_data dra7_wkupaon_clkctrl_regs[] __initcons
{ DRA7_WKUPAON_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
{ DRA7_WKUPAON_GPIO1_CLKCTRL, dra7_gpio1_bit_data, CLKF_HW_SUP, "wkupaon_iclk_mux" },
{ DRA7_WKUPAON_TIMER1_CLKCTRL, dra7_timer1_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0020:24" },
- { DRA7_WKUPAON_TIMER12_CLKCTRL, NULL, 0, "secure_32k_clk_src_ck" },
+ { DRA7_WKUPAON_TIMER12_CLKCTRL, NULL, CLKF_SOC_NONSEC, "secure_32k_clk_src_ck" },
{ DRA7_WKUPAON_COUNTER_32K_CLKCTRL, NULL, 0, "wkupaon_iclk_mux" },
{ DRA7_WKUPAON_UART10_CLKCTRL, dra7_uart10_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0060:24" },
{ DRA7_WKUPAON_DCAN1_CLKCTRL, dra7_dcan1_bit_data, CLKF_SW_SUP, "wkupaon-clkctrl:0068:24" },
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index ba17cc5bd04b..e0b8ed3a1e80 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -19,6 +19,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/ti.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/list.h>
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index 639f515e08f0..96d65a1cf7be 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -446,6 +446,7 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
u32 addr;
int ret;
char *c;
+ u16 soc_mask = 0;
if (!(ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) &&
of_node_name_eq(node, "clk"))
@@ -469,6 +470,13 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
else
data = dra7_clkctrl_data;
}
+
+ if (of_machine_is_compatible("ti,dra72"))
+ soc_mask = CLKF_SOC_DRA72;
+ if (of_machine_is_compatible("ti,dra74"))
+ soc_mask = CLKF_SOC_DRA74;
+ if (of_machine_is_compatible("ti,dra76"))
+ soc_mask = CLKF_SOC_DRA76;
#endif
#ifdef CONFIG_SOC_AM33XX
if (of_machine_is_compatible("ti,am33xx")) {
@@ -501,6 +509,9 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
data = dm816_clkctrl_data;
#endif
+ if (ti_clk_get_features()->flags & TI_CLK_DEVICE_TYPE_GP)
+ soc_mask |= CLKF_SOC_NONSEC;
+
while (data->addr) {
if (addr == data->addr)
break;
@@ -562,6 +573,12 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
reg_data = data->regs;
while (reg_data->parent) {
+ if ((reg_data->flags & CLKF_SOC_MASK) &&
+ (reg_data->flags & soc_mask) == 0) {
+ reg_data++;
+ continue;
+ }
+
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
if (!hw)
return;
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index 1c0fac59d809..e4b8392ff63c 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -83,6 +83,13 @@ enum {
#define CLKF_HW_SUP BIT(6)
#define CLKF_NO_IDLEST BIT(7)
+#define CLKF_SOC_MASK GENMASK(11, 8)
+
+#define CLKF_SOC_NONSEC BIT(8)
+#define CLKF_SOC_DRA72 BIT(9)
+#define CLKF_SOC_DRA74 BIT(10)
+#define CLKF_SOC_DRA76 BIT(11)
+
#define CLK(dev, con, ck) \
{ \
.lk = { \
@@ -303,7 +310,6 @@ long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req);
int omap2_clk_for_each(int (*fn)(struct clk_hw_omap *hw));
-bool omap2_clk_is_hw_omap(struct clk_hw *hw);
extern struct ti_clk_ll_ops *ti_clk_ll_ops;
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index ed24f20f63c7..95e36ba64acc 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -13,6 +13,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/math64.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/ux500/clk-sysctrl.c b/drivers/clk/ux500/clk-sysctrl.c
index 7c0403b733ae..698306f4801f 100644
--- a/drivers/clk/ux500/clk-sysctrl.c
+++ b/drivers/clk/ux500/clk-sysctrl.c
@@ -42,7 +42,8 @@ static int clk_sysctrl_prepare(struct clk_hw *hw)
clk->reg_bits[0]);
if (!ret && clk->enable_delay_us)
- usleep_range(clk->enable_delay_us, clk->enable_delay_us);
+ usleep_range(clk->enable_delay_us, clk->enable_delay_us +
+ (clk->enable_delay_us >> 2));
return ret;
}
diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
index c2b6bb814742..4fa0cd951d2e 100644
--- a/drivers/clk/versatile/clk-sp810.c
+++ b/drivers/clk/versatile/clk-sp810.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c
index 19174835693b..5970edb6d334 100644
--- a/drivers/clk/x86/clk-pmc-atom.c
+++ b/drivers/clk/x86/clk-pmc-atom.c
@@ -17,6 +17,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/platform_data/x86/clk-pmc-atom.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index d7b53ac8ad11..4b9d5c14c400 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -158,7 +158,7 @@ static void __init zynq_clk_register_fclk(enum zynq_clk fclk,
clks[fclk] = clk_register_gate(NULL, clk_name,
div1_name, CLK_SET_RATE_PARENT, fclk_gate_reg,
0, CLK_GATE_SET_TO_DISABLE, fclk_gate_lock);
- enable_reg = clk_readl(fclk_gate_reg) & 1;
+ enable_reg = readl(fclk_gate_reg) & 1;
if (enable && !enable_reg) {
if (clk_prepare_enable(clks[fclk]))
pr_warn("%s: FCLK%u enable failed\n", __func__,
@@ -287,7 +287,7 @@ static void __init zynq_clk_setup(struct device_node *np)
SLCR_IOPLL_CTRL, 4, 1, 0, &iopll_lock);
/* CPU clocks */
- tmp = clk_readl(SLCR_621_TRUE) & 1;
+ tmp = readl(SLCR_621_TRUE) & 1;
clk = clk_register_mux(NULL, "cpu_mux", cpu_parents, 4,
CLK_SET_RATE_NO_REPARENT, SLCR_ARM_CLK_CTRL, 4, 2, 0,
&armclk_lock);
@@ -510,7 +510,7 @@ static void __init zynq_clk_setup(struct device_node *np)
&dbgclk_lock);
/* leave debug clocks in the state the bootloader set them up to */
- tmp = clk_readl(SLCR_DBG_CLK_CTRL);
+ tmp = readl(SLCR_DBG_CLK_CTRL);
if (tmp & DBG_CLK_CTRL_CLKACT_TRC)
if (clk_prepare_enable(clks[dbg_trc]))
pr_warn("%s: trace clk enable failed\n", __func__);
diff --git a/drivers/clk/zynq/pll.c b/drivers/clk/zynq/pll.c
index 00d72fb5c036..800b70ee19b3 100644
--- a/drivers/clk/zynq/pll.c
+++ b/drivers/clk/zynq/pll.c
@@ -90,7 +90,7 @@ static unsigned long zynq_pll_recalc_rate(struct clk_hw *hw,
* makes probably sense to redundantly save fbdiv in the struct
* zynq_pll to save the IO access.
*/
- fbdiv = (clk_readl(clk->pll_ctrl) & PLLCTRL_FBDIV_MASK) >>
+ fbdiv = (readl(clk->pll_ctrl) & PLLCTRL_FBDIV_MASK) >>
PLLCTRL_FBDIV_SHIFT;
return parent_rate * fbdiv;
@@ -112,7 +112,7 @@ static int zynq_pll_is_enabled(struct clk_hw *hw)
spin_lock_irqsave(clk->lock, flags);
- reg = clk_readl(clk->pll_ctrl);
+ reg = readl(clk->pll_ctrl);
spin_unlock_irqrestore(clk->lock, flags);
@@ -138,10 +138,10 @@ static int zynq_pll_enable(struct clk_hw *hw)
/* Power up PLL and wait for lock */
spin_lock_irqsave(clk->lock, flags);
- reg = clk_readl(clk->pll_ctrl);
+ reg = readl(clk->pll_ctrl);
reg &= ~(PLLCTRL_RESET_MASK | PLLCTRL_PWRDWN_MASK);
- clk_writel(reg, clk->pll_ctrl);
- while (!(clk_readl(clk->pll_status) & (1 << clk->lockbit)))
+ writel(reg, clk->pll_ctrl);
+ while (!(readl(clk->pll_status) & (1 << clk->lockbit)))
;
spin_unlock_irqrestore(clk->lock, flags);
@@ -168,9 +168,9 @@ static void zynq_pll_disable(struct clk_hw *hw)
/* shut down PLL */
spin_lock_irqsave(clk->lock, flags);
- reg = clk_readl(clk->pll_ctrl);
+ reg = readl(clk->pll_ctrl);
reg |= PLLCTRL_RESET_MASK | PLLCTRL_PWRDWN_MASK;
- clk_writel(reg, clk->pll_ctrl);
+ writel(reg, clk->pll_ctrl);
spin_unlock_irqrestore(clk->lock, flags);
}
@@ -223,9 +223,9 @@ struct clk *clk_register_zynq_pll(const char *name, const char *parent,
spin_lock_irqsave(pll->lock, flags);
- reg = clk_readl(pll->pll_ctrl);
+ reg = readl(pll->pll_ctrl);
reg &= ~PLLCTRL_BPQUAL_MASK;
- clk_writel(reg, pll->pll_ctrl);
+ writel(reg, pll->pll_ctrl);
spin_unlock_irqrestore(pll->lock, flags);
diff --git a/drivers/clk/zynqmp/clk-mux-zynqmp.c b/drivers/clk/zynqmp/clk-mux-zynqmp.c
index 4143f560c28d..0af8f74c5fa5 100644
--- a/drivers/clk/zynqmp/clk-mux-zynqmp.c
+++ b/drivers/clk/zynqmp/clk-mux-zynqmp.c
@@ -138,4 +138,3 @@ struct clk_hw *zynqmp_clk_register_mux(const char *name, u32 clk_id,
return hw;
}
-EXPORT_SYMBOL_GPL(zynqmp_clk_register_mux);
diff --git a/drivers/clk/zynqmp/clk-zynqmp.h b/drivers/clk/zynqmp/clk-zynqmp.h
index 7ab163b67249..fec9a15c8786 100644
--- a/drivers/clk/zynqmp/clk-zynqmp.h
+++ b/drivers/clk/zynqmp/clk-zynqmp.h
@@ -10,12 +10,6 @@
#include <linux/firmware/xlnx-zynqmp.h>
-/* Clock APIs payload parameters */
-#define CLK_GET_NAME_RESP_LEN 16
-#define CLK_GET_TOPOLOGY_RESP_WORDS 3
-#define CLK_GET_PARENTS_RESP_WORDS 3
-#define CLK_GET_ATTR_RESP_WORDS 1
-
enum topology_type {
TYPE_INVALID,
TYPE_MUX,
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
index b0908ec62f73..a11f93ecbf34 100644
--- a/drivers/clk/zynqmp/clkc.c
+++ b/drivers/clk/zynqmp/clkc.c
@@ -21,24 +21,6 @@
#define MAX_NODES 6
#define MAX_NAME_LEN 50
-#define CLK_TYPE_SHIFT 2
-
-#define PM_API_PAYLOAD_LEN 3
-
-#define NA_PARENT 0xFFFFFFFF
-#define DUMMY_PARENT 0xFFFFFFFE
-
-#define CLK_TYPE_FIELD_LEN 4
-#define CLK_TOPOLOGY_NODE_OFFSET 16
-#define NODES_PER_RESP 3
-
-#define CLK_TYPE_FIELD_MASK 0xF
-#define CLK_FLAG_FIELD_MASK GENMASK(21, 8)
-#define CLK_TYPE_FLAG_FIELD_MASK GENMASK(31, 24)
-
-#define CLK_PARENTS_ID_LEN 16
-#define CLK_PARENTS_ID_MASK 0xFFFF
-
/* Flags for parents */
#define PARENT_CLK_SELF 0
#define PARENT_CLK_NODE1 1
@@ -52,7 +34,10 @@
#define END_OF_PARENTS 1
#define RESERVED_CLK_NAME ""
-#define CLK_VALID_MASK 0x1
+#define CLK_GET_NAME_RESP_LEN 16
+#define CLK_GET_TOPOLOGY_RESP_WORDS 3
+#define CLK_GET_PARENTS_RESP_WORDS 3
+#define CLK_GET_ATTR_RESP_WORDS 1
enum clk_type {
CLK_TYPE_OUTPUT,
@@ -80,6 +65,7 @@ struct clock_parent {
* @num_nodes: Number of nodes present in topology
* @parent: Parent of clock
* @num_parents: Number of parents of clock
+ * @clk_id: Clock id
*/
struct zynqmp_clock {
char clk_name[MAX_NAME_LEN];
@@ -89,6 +75,36 @@ struct zynqmp_clock {
u32 num_nodes;
struct clock_parent parent[MAX_PARENT];
u32 num_parents;
+ u32 clk_id;
+};
+
+struct name_resp {
+ char name[CLK_GET_NAME_RESP_LEN];
+};
+
+struct topology_resp {
+#define CLK_TOPOLOGY_TYPE GENMASK(3, 0)
+#define CLK_TOPOLOGY_FLAGS GENMASK(23, 8)
+#define CLK_TOPOLOGY_TYPE_FLAGS GENMASK(31, 24)
+ u32 topology[CLK_GET_TOPOLOGY_RESP_WORDS];
+};
+
+struct parents_resp {
+#define NA_PARENT 0xFFFFFFFF
+#define DUMMY_PARENT 0xFFFFFFFE
+#define CLK_PARENTS_ID GENMASK(15, 0)
+#define CLK_PARENTS_FLAGS GENMASK(31, 16)
+ u32 parents[CLK_GET_PARENTS_RESP_WORDS];
+};
+
+struct attr_resp {
+#define CLK_ATTR_VALID BIT(0)
+#define CLK_ATTR_TYPE BIT(2)
+#define CLK_ATTR_NODE_INDEX GENMASK(13, 0)
+#define CLK_ATTR_NODE_TYPE GENMASK(19, 14)
+#define CLK_ATTR_NODE_SUBCLASS GENMASK(25, 20)
+#define CLK_ATTR_NODE_CLASS GENMASK(31, 26)
+ u32 attr[CLK_GET_ATTR_RESP_WORDS];
};
static const char clk_type_postfix[][10] = {
@@ -199,14 +215,15 @@ static int zynqmp_pm_clock_get_num_clocks(u32 *nclocks)
/**
* zynqmp_pm_clock_get_name() - Get the name of clock for given id
* @clock_id: ID of the clock to be queried
- * @name: Name of given clock
+ * @response: Name of the clock with the given id
*
* This function is used to get name of clock specified by given
* clock ID.
*
- * Return: Returns 0, in case of error name would be 0
+ * Return: Returns 0
*/
-static int zynqmp_pm_clock_get_name(u32 clock_id, char *name)
+static int zynqmp_pm_clock_get_name(u32 clock_id,
+ struct name_resp *response)
{
struct zynqmp_pm_query_data qdata = {0};
u32 ret_payload[PAYLOAD_ARG_CNT];
@@ -215,7 +232,7 @@ static int zynqmp_pm_clock_get_name(u32 clock_id, char *name)
qdata.arg1 = clock_id;
eemi_ops->query_data(qdata, ret_payload);
- memcpy(name, ret_payload, CLK_GET_NAME_RESP_LEN);
+ memcpy(response, ret_payload, sizeof(*response));
return 0;
}
@@ -224,7 +241,7 @@ static int zynqmp_pm_clock_get_name(u32 clock_id, char *name)
* zynqmp_pm_clock_get_topology() - Get the topology of clock for given id
* @clock_id: ID of the clock to be queried
* @index: Node index of clock topology
- * @topology: Buffer to store nodes in topology and flags
+ * @response: Buffer used for the topology response
*
* This function is used to get topology information for the clock
* specified by given clock ID.
@@ -237,7 +254,8 @@ static int zynqmp_pm_clock_get_name(u32 clock_id, char *name)
*
* Return: 0 on success else error+reason
*/
-static int zynqmp_pm_clock_get_topology(u32 clock_id, u32 index, u32 *topology)
+static int zynqmp_pm_clock_get_topology(u32 clock_id, u32 index,
+ struct topology_resp *response)
{
struct zynqmp_pm_query_data qdata = {0};
u32 ret_payload[PAYLOAD_ARG_CNT];
@@ -248,7 +266,7 @@ static int zynqmp_pm_clock_get_topology(u32 clock_id, u32 index, u32 *topology)
qdata.arg2 = index;
ret = eemi_ops->query_data(qdata, ret_payload);
- memcpy(topology, &ret_payload[1], CLK_GET_TOPOLOGY_RESP_WORDS * 4);
+ memcpy(response, &ret_payload[1], sizeof(*response));
return ret;
}
@@ -297,7 +315,7 @@ struct clk_hw *zynqmp_clk_register_fixed_factor(const char *name, u32 clk_id,
* zynqmp_pm_clock_get_parents() - Get the first 3 parents of clock for given id
* @clock_id: Clock ID
* @index: Parent index
- * @parents: 3 parents of the given clock
+ * @response: Parents of the given clock
*
* This function is used to get 3 parents for the clock specified by
* given clock ID.
@@ -310,7 +328,8 @@ struct clk_hw *zynqmp_clk_register_fixed_factor(const char *name, u32 clk_id,
*
* Return: 0 on success else error+reason
*/
-static int zynqmp_pm_clock_get_parents(u32 clock_id, u32 index, u32 *parents)
+static int zynqmp_pm_clock_get_parents(u32 clock_id, u32 index,
+ struct parents_resp *response)
{
struct zynqmp_pm_query_data qdata = {0};
u32 ret_payload[PAYLOAD_ARG_CNT];
@@ -321,7 +340,7 @@ static int zynqmp_pm_clock_get_parents(u32 clock_id, u32 index, u32 *parents)
qdata.arg2 = index;
ret = eemi_ops->query_data(qdata, ret_payload);
- memcpy(parents, &ret_payload[1], CLK_GET_PARENTS_RESP_WORDS * 4);
+ memcpy(response, &ret_payload[1], sizeof(*response));
return ret;
}
@@ -329,13 +348,14 @@ static int zynqmp_pm_clock_get_parents(u32 clock_id, u32 index, u32 *parents)
/**
* zynqmp_pm_clock_get_attributes() - Get the attributes of clock for given id
* @clock_id: Clock ID
- * @attr: Clock attributes
+ * @response: Clock attributes response
*
* This function is used to get clock's attributes(e.g. valid, clock type, etc).
*
* Return: 0 on success else error+reason
*/
-static int zynqmp_pm_clock_get_attributes(u32 clock_id, u32 *attr)
+static int zynqmp_pm_clock_get_attributes(u32 clock_id,
+ struct attr_resp *response)
{
struct zynqmp_pm_query_data qdata = {0};
u32 ret_payload[PAYLOAD_ARG_CNT];
@@ -345,7 +365,7 @@ static int zynqmp_pm_clock_get_attributes(u32 clock_id, u32 *attr)
qdata.arg1 = clock_id;
ret = eemi_ops->query_data(qdata, ret_payload);
- memcpy(attr, &ret_payload[1], CLK_GET_ATTR_RESP_WORDS * 4);
+ memcpy(response, &ret_payload[1], sizeof(*response));
return ret;
}
@@ -354,24 +374,28 @@ static int zynqmp_pm_clock_get_attributes(u32 clock_id, u32 *attr)
* __zynqmp_clock_get_topology() - Get topology data of clock from firmware
* response data
* @topology: Clock topology
- * @data: Clock topology data received from firmware
+ * @response: Clock topology data received from firmware
* @nnodes: Number of nodes
*
* Return: 0 on success else error+reason
*/
static int __zynqmp_clock_get_topology(struct clock_topology *topology,
- u32 *data, u32 *nnodes)
+ struct topology_resp *response,
+ u32 *nnodes)
{
int i;
+ u32 type;
- for (i = 0; i < PM_API_PAYLOAD_LEN; i++) {
- if (!(data[i] & CLK_TYPE_FIELD_MASK))
+ for (i = 0; i < ARRAY_SIZE(response->topology); i++) {
+ type = FIELD_GET(CLK_TOPOLOGY_TYPE, response->topology[i]);
+ if (type == TYPE_INVALID)
return END_OF_TOPOLOGY_NODE;
- topology[*nnodes].type = data[i] & CLK_TYPE_FIELD_MASK;
- topology[*nnodes].flag = FIELD_GET(CLK_FLAG_FIELD_MASK,
- data[i]);
+ topology[*nnodes].type = type;
+ topology[*nnodes].flag = FIELD_GET(CLK_TOPOLOGY_FLAGS,
+ response->topology[i]);
topology[*nnodes].type_flag =
- FIELD_GET(CLK_TYPE_FLAG_FIELD_MASK, data[i]);
+ FIELD_GET(CLK_TOPOLOGY_TYPE_FLAGS,
+ response->topology[i]);
(*nnodes)++;
}
@@ -392,14 +416,16 @@ static int zynqmp_clock_get_topology(u32 clk_id,
u32 *num_nodes)
{
int j, ret;
- u32 pm_resp[PM_API_PAYLOAD_LEN] = {0};
+ struct topology_resp response = { };
*num_nodes = 0;
- for (j = 0; j <= MAX_NODES; j += 3) {
- ret = zynqmp_pm_clock_get_topology(clk_id, j, pm_resp);
+ for (j = 0; j <= MAX_NODES; j += ARRAY_SIZE(response.topology)) {
+ ret = zynqmp_pm_clock_get_topology(clock[clk_id].clk_id, j,
+ &response);
if (ret)
return ret;
- ret = __zynqmp_clock_get_topology(topology, pm_resp, num_nodes);
+ ret = __zynqmp_clock_get_topology(topology, &response,
+ num_nodes);
if (ret == END_OF_TOPOLOGY_NODE)
return 0;
}
@@ -408,31 +434,33 @@ static int zynqmp_clock_get_topology(u32 clk_id,
}
/**
- * __zynqmp_clock_get_topology() - Get parents info of clock from firmware
+ * __zynqmp_clock_get_parents() - Get parents info of clock from firmware
* response data
* @parents: Clock parents
- * @data: Clock parents data received from firmware
+ * @response: Clock parents data received from firmware
* @nparent: Number of parent
*
* Return: 0 on success else error+reason
*/
-static int __zynqmp_clock_get_parents(struct clock_parent *parents, u32 *data,
+static int __zynqmp_clock_get_parents(struct clock_parent *parents,
+ struct parents_resp *response,
u32 *nparent)
{
int i;
struct clock_parent *parent;
- for (i = 0; i < PM_API_PAYLOAD_LEN; i++) {
- if (data[i] == NA_PARENT)
+ for (i = 0; i < ARRAY_SIZE(response->parents); i++) {
+ if (response->parents[i] == NA_PARENT)
return END_OF_PARENTS;
parent = &parents[i];
- parent->id = data[i] & CLK_PARENTS_ID_MASK;
- if (data[i] == DUMMY_PARENT) {
+ parent->id = FIELD_GET(CLK_PARENTS_ID, response->parents[i]);
+ if (response->parents[i] == DUMMY_PARENT) {
strcpy(parent->name, "dummy_name");
parent->flag = 0;
} else {
- parent->flag = data[i] >> CLK_PARENTS_ID_LEN;
+ parent->flag = FIELD_GET(CLK_PARENTS_FLAGS,
+ response->parents[i]);
if (zynqmp_get_clock_name(parent->id, parent->name))
continue;
}
@@ -454,20 +482,21 @@ static int zynqmp_clock_get_parents(u32 clk_id, struct clock_parent *parents,
u32 *num_parents)
{
int j = 0, ret;
- u32 pm_resp[PM_API_PAYLOAD_LEN] = {0};
+ struct parents_resp response = { };
*num_parents = 0;
do {
/* Get parents from firmware */
- ret = zynqmp_pm_clock_get_parents(clk_id, j, pm_resp);
+ ret = zynqmp_pm_clock_get_parents(clock[clk_id].clk_id, j,
+ &response);
if (ret)
return ret;
- ret = __zynqmp_clock_get_parents(&parents[j], pm_resp,
+ ret = __zynqmp_clock_get_parents(&parents[j], &response,
num_parents);
if (ret == END_OF_PARENTS)
return 0;
- j += PM_API_PAYLOAD_LEN;
+ j += ARRAY_SIZE(response.parents);
} while (*num_parents <= MAX_PARENT);
return 0;
@@ -528,13 +557,14 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name,
const char **parent_names)
{
int j;
- u32 num_nodes;
+ u32 num_nodes, clk_dev_id;
char *clk_out = NULL;
struct clock_topology *nodes;
struct clk_hw *hw = NULL;
nodes = clock[clk_id].node;
num_nodes = clock[clk_id].num_nodes;
+ clk_dev_id = clock[clk_id].clk_id;
for (j = 0; j < num_nodes; j++) {
/*
@@ -551,13 +581,14 @@ static struct clk_hw *zynqmp_register_clk_topology(int clk_id, char *clk_name,
if (!clk_topology[nodes[j].type])
continue;
- hw = (*clk_topology[nodes[j].type])(clk_out, clk_id,
+ hw = (*clk_topology[nodes[j].type])(clk_out, clk_dev_id,
parent_names,
num_parents,
&nodes[j]);
if (IS_ERR(hw))
- pr_warn_once("%s() %s register fail with %ld\n",
- __func__, clk_name, PTR_ERR(hw));
+ pr_warn_once("%s() 0x%x: %s register fail with %ld\n",
+ __func__, clk_dev_id, clk_name,
+ PTR_ERR(hw));
parent_names[0] = clk_out;
}
@@ -621,20 +652,33 @@ static int zynqmp_register_clocks(struct device_node *np)
static void zynqmp_get_clock_info(void)
{
int i, ret;
- u32 attr, type = 0;
+ u32 type = 0;
+ u32 nodetype, subclass, class;
+ struct attr_resp attr;
+ struct name_resp name;
for (i = 0; i < clock_max_idx; i++) {
- zynqmp_pm_clock_get_name(i, clock[i].clk_name);
- if (!strcmp(clock[i].clk_name, RESERVED_CLK_NAME))
- continue;
-
ret = zynqmp_pm_clock_get_attributes(i, &attr);
if (ret)
continue;
- clock[i].valid = attr & CLK_VALID_MASK;
- clock[i].type = attr >> CLK_TYPE_SHIFT ? CLK_TYPE_EXTERNAL :
- CLK_TYPE_OUTPUT;
+ clock[i].valid = FIELD_GET(CLK_ATTR_VALID, attr.attr[0]);
+ clock[i].type = FIELD_GET(CLK_ATTR_TYPE, attr.attr[0]) ?
+ CLK_TYPE_EXTERNAL : CLK_TYPE_OUTPUT;
+
+ nodetype = FIELD_GET(CLK_ATTR_NODE_TYPE, attr.attr[0]);
+ subclass = FIELD_GET(CLK_ATTR_NODE_SUBCLASS, attr.attr[0]);
+ class = FIELD_GET(CLK_ATTR_NODE_CLASS, attr.attr[0]);
+
+ clock[i].clk_id = FIELD_PREP(CLK_ATTR_NODE_CLASS, class) |
+ FIELD_PREP(CLK_ATTR_NODE_SUBCLASS, subclass) |
+ FIELD_PREP(CLK_ATTR_NODE_TYPE, nodetype) |
+ FIELD_PREP(CLK_ATTR_NODE_INDEX, i);
+
+ zynqmp_pm_clock_get_name(clock[i].clk_id, &name);
+ if (!strcmp(name.name, RESERVED_CLK_NAME))
+ continue;
+ strncpy(clock[i].clk_name, name.name, MAX_NAME_LEN);
}
/* Get topology of all clock */
@@ -695,8 +739,8 @@ static int zynqmp_clock_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
eemi_ops = zynqmp_pm_get_eemi_ops();
- if (!eemi_ops)
- return -ENXIO;
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
ret = zynqmp_clk_setup(dev->of_node);
diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
index a371c66e72ef..d8f5b70d2709 100644
--- a/drivers/clk/zynqmp/divider.c
+++ b/drivers/clk/zynqmp/divider.c
@@ -31,12 +31,14 @@
* struct zynqmp_clk_divider - adjustable divider clock
* @hw: handle between common and hardware-specific interfaces
* @flags: Hardware specific flags
+ * @is_frac: The divider is a fractional divider
* @clk_id: Id of clock
* @div_type: divisor type (TYPE_DIV1 or TYPE_DIV2)
*/
struct zynqmp_clk_divider {
struct clk_hw hw;
u8 flags;
+ bool is_frac;
u32 clk_id;
u32 div_type;
};
@@ -76,6 +78,13 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
else
value = div >> 16;
+ if (!value) {
+ WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
+ "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
+ clk_name);
+ return parent_rate;
+ }
+
return DIV_ROUND_UP_ULL(parent_rate, value);
}
@@ -116,8 +125,7 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
bestdiv = zynqmp_divider_get_val(*prate, rate);
- if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) &&
- (divider->flags & CLK_FRAC))
+ if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac)
bestdiv = rate % *prate ? 1 : bestdiv;
*prate = rate * bestdiv;
@@ -195,11 +203,13 @@ struct clk_hw *zynqmp_clk_register_divider(const char *name,
init.name = name;
init.ops = &zynqmp_clk_divider_ops;
- init.flags = nodes->flag;
+ /* CLK_FRAC is not defined in the common clk framework */
+ init.flags = nodes->flag & ~CLK_FRAC;
init.parent_names = parents;
init.num_parents = 1;
/* struct clk_divider assignments */
+ div->is_frac = !!(nodes->flag & CLK_FRAC);
div->flags = nodes->type_flag;
div->hw.init = &init;
div->clk_id = clk_id;
@@ -214,4 +224,3 @@ struct clk_hw *zynqmp_clk_register_divider(const char *name,
return hw;
}
-EXPORT_SYMBOL_GPL(zynqmp_clk_register_divider);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 4b3d143f0f8a..6bcaa4e2e72c 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -69,6 +69,13 @@ config FTTMR010_TIMER
Enables support for the Faraday Technology timer block
FTTMR010.
+config IXP4XX_TIMER
+ bool "Intel XScale IXP4xx timer driver" if COMPILE_TEST
+ depends on HAS_IOMEM
+ select CLKSRC_MMIO
+ help
+ Enables support for the Intel XScale IXP4xx SoC timer.
+
config ROCKCHIP_TIMER
bool "Rockchip timer driver" if COMPILE_TEST
depends on ARM || ARM64
@@ -380,7 +387,7 @@ config ARM_GLOBAL_TIMER
This options enables support for the ARM global timer unit
config ARM_TIMER_SP804
- bool "Support for Dual Timer SP804 module"
+ bool "Support for Dual Timer SP804 module" if COMPILE_TEST
depends on GENERIC_SCHED_CLOCK && CLKDEV_LOOKUP
select CLKSRC_MMIO
select TIMER_OF if OF
@@ -400,8 +407,11 @@ config ARMV7M_SYSTICK
This options enables support for the ARMv7M system timer unit
config ATMEL_PIT
+ bool "Atmel PIT support" if COMPILE_TEST
+ depends on HAS_IOMEM
select TIMER_OF if OF
- def_bool SOC_AT91SAM9 || SOC_SAMA5
+ help
+ Support for the Periodic Interval Timer found on Atmel SoCs.
config ATMEL_ST
bool "Atmel ST timer support" if COMPILE_TEST
@@ -411,6 +421,13 @@ config ATMEL_ST
help
Support for the Atmel ST timer.
+config ATMEL_TCB_CLKSRC
+ bool "Atmel TC Block timer driver" if COMPILE_TEST
+ depends on HAS_IOMEM
+ select TIMER_OF if OF
+ help
+ Support for Timer Counter Blocks on Atmel SoCs.
+
config CLKSRC_EXYNOS_MCT
bool "Exynos multi core timer driver" if COMPILE_TEST
depends on ARM || ARM64
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index be6e0fbc7489..236858fa7fbf 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_TIMER_OF) += timer-of.o
obj-$(CONFIG_TIMER_PROBE) += timer-probe.o
obj-$(CONFIG_ATMEL_PIT) += timer-atmel-pit.o
obj-$(CONFIG_ATMEL_ST) += timer-atmel-st.o
-obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
+obj-$(CONFIG_ATMEL_TCB_CLKSRC) += timer-atmel-tcb.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += timer-cs5535.o
@@ -20,6 +20,7 @@ obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o
obj-$(CONFIG_FTTMR010_TIMER) += timer-fttmr010.o
+obj-$(CONFIG_IXP4XX_TIMER) += timer-ixp4xx.o
obj-$(CONFIG_ROCKCHIP_TIMER) += timer-rockchip.o
obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o
obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index ea373cfbcecb..b2a951a798e2 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -149,6 +149,26 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
return val;
}
+static u64 arch_counter_get_cntpct_stable(void)
+{
+ return __arch_counter_get_cntpct_stable();
+}
+
+static u64 arch_counter_get_cntpct(void)
+{
+ return __arch_counter_get_cntpct();
+}
+
+static u64 arch_counter_get_cntvct_stable(void)
+{
+ return __arch_counter_get_cntvct_stable();
+}
+
+static u64 arch_counter_get_cntvct(void)
+{
+ return __arch_counter_get_cntvct();
+}
+
/*
* Default to cp15 based access because arm64 uses this function for
* sched_clock() before DT is probed and the cp15 method is guaranteed
@@ -316,13 +336,6 @@ static u64 notrace arm64_858921_read_cntvct_el0(void)
}
#endif
-#ifdef CONFIG_ARM64_ERRATUM_1188873
-static u64 notrace arm64_1188873_read_cntvct_el0(void)
-{
- return read_sysreg(cntvct_el0);
-}
-#endif
-
#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
/*
* The low bits of the counter registers are indeterminate while bit 10 or
@@ -369,8 +382,7 @@ static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
-DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
-EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
+static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
struct clock_event_device *clk)
@@ -454,14 +466,6 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.read_cntvct_el0 = arm64_858921_read_cntvct_el0,
},
#endif
-#ifdef CONFIG_ARM64_ERRATUM_1188873
- {
- .match_type = ate_match_local_cap_id,
- .id = (void *)ARM64_WORKAROUND_1188873,
- .desc = "ARM erratum 1188873",
- .read_cntvct_el0 = arm64_1188873_read_cntvct_el0,
- },
-#endif
#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
{
.match_type = ate_match_dt,
@@ -549,11 +553,8 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
per_cpu(timer_unstable_counter_workaround, i) = wa;
}
- /*
- * Use the locked version, as we're called from the CPU
- * hotplug framework. Otherwise, we end-up in deadlock-land.
- */
- static_branch_enable_cpuslocked(&arch_timer_read_ool_enabled);
+ if (wa->read_cntvct_el0 || wa->read_cntpct_el0)
+ atomic_set(&timer_unstable_counter_workaround_in_use, 1);
/*
* Don't use the vdso fastpath if errata require using the
@@ -570,7 +571,7 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
void *arg)
{
- const struct arch_timer_erratum_workaround *wa;
+ const struct arch_timer_erratum_workaround *wa, *__wa;
ate_match_fn_t match_fn = NULL;
bool local = false;
@@ -594,53 +595,32 @@ static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type t
if (!wa)
return;
- if (needs_unstable_timer_counter_workaround()) {
- const struct arch_timer_erratum_workaround *__wa;
- __wa = __this_cpu_read(timer_unstable_counter_workaround);
- if (__wa && wa != __wa)
- pr_warn("Can't enable workaround for %s (clashes with %s\n)",
- wa->desc, __wa->desc);
+ __wa = __this_cpu_read(timer_unstable_counter_workaround);
+ if (__wa && wa != __wa)
+ pr_warn("Can't enable workaround for %s (clashes with %s\n)",
+ wa->desc, __wa->desc);
- if (__wa)
- return;
- }
+ if (__wa)
+ return;
arch_timer_enable_workaround(wa, local);
pr_info("Enabling %s workaround for %s\n",
local ? "local" : "global", wa->desc);
}
-#define erratum_handler(fn, r, ...) \
-({ \
- bool __val; \
- if (needs_unstable_timer_counter_workaround()) { \
- const struct arch_timer_erratum_workaround *__wa; \
- __wa = __this_cpu_read(timer_unstable_counter_workaround); \
- if (__wa && __wa->fn) { \
- r = __wa->fn(__VA_ARGS__); \
- __val = true; \
- } else { \
- __val = false; \
- } \
- } else { \
- __val = false; \
- } \
- __val; \
-})
-
static bool arch_timer_this_cpu_has_cntvct_wa(void)
{
- const struct arch_timer_erratum_workaround *wa;
+ return has_erratum_handler(read_cntvct_el0);
+}
- wa = __this_cpu_read(timer_unstable_counter_workaround);
- return wa && wa->read_cntvct_el0;
+static bool arch_timer_counter_has_wa(void)
+{
+ return atomic_read(&timer_unstable_counter_workaround_in_use);
}
#else
#define arch_timer_check_ool_workaround(t,a) do { } while(0)
-#define erratum_set_next_event_tval_virt(...) ({BUG(); 0;})
-#define erratum_set_next_event_tval_phys(...) ({BUG(); 0;})
-#define erratum_handler(fn, r, ...) ({false;})
#define arch_timer_this_cpu_has_cntvct_wa() ({false;})
+#define arch_timer_counter_has_wa() ({false;})
#endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
static __always_inline irqreturn_t timer_handler(const int access,
@@ -733,11 +713,6 @@ static __always_inline void set_next_event(const int access, unsigned long evt,
static int arch_timer_set_next_event_virt(unsigned long evt,
struct clock_event_device *clk)
{
- int ret;
-
- if (erratum_handler(set_next_event_virt, ret, evt, clk))
- return ret;
-
set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
return 0;
}
@@ -745,11 +720,6 @@ static int arch_timer_set_next_event_virt(unsigned long evt,
static int arch_timer_set_next_event_phys(unsigned long evt,
struct clock_event_device *clk)
{
- int ret;
-
- if (erratum_handler(set_next_event_phys, ret, evt, clk))
- return ret;
-
set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
return 0;
}
@@ -774,6 +744,10 @@ static void __arch_timer_setup(unsigned type,
clk->features = CLOCK_EVT_FEAT_ONESHOT;
if (type == ARCH_TIMER_TYPE_CP15) {
+ typeof(clk->set_next_event) sne;
+
+ arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
+
if (arch_timer_c3stop)
clk->features |= CLOCK_EVT_FEAT_C3STOP;
clk->name = "arch_sys_timer";
@@ -784,20 +758,20 @@ static void __arch_timer_setup(unsigned type,
case ARCH_TIMER_VIRT_PPI:
clk->set_state_shutdown = arch_timer_shutdown_virt;
clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
- clk->set_next_event = arch_timer_set_next_event_virt;
+ sne = erratum_handler(set_next_event_virt);
break;
case ARCH_TIMER_PHYS_SECURE_PPI:
case ARCH_TIMER_PHYS_NONSECURE_PPI:
case ARCH_TIMER_HYP_PPI:
clk->set_state_shutdown = arch_timer_shutdown_phys;
clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
- clk->set_next_event = arch_timer_set_next_event_phys;
+ sne = erratum_handler(set_next_event_phys);
break;
default:
BUG();
}
- arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
+ clk->set_next_event = sne;
} else {
clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
clk->name = "arch_mem_timer";
@@ -830,7 +804,11 @@ static void arch_timer_evtstrm_enable(int divider)
cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
| ARCH_TIMER_VIRT_EVT_EN;
arch_timer_set_cntkctl(cntkctl);
+#ifdef CONFIG_ARM64
+ cpu_set_named_feature(EVTSTRM);
+#else
elf_hwcap |= HWCAP_EVTSTRM;
+#endif
#ifdef CONFIG_COMPAT
compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
#endif
@@ -995,12 +973,22 @@ static void __init arch_counter_register(unsigned type)
/* Register the CP15 based counter if we have one */
if (type & ARCH_TIMER_TYPE_CP15) {
+ u64 (*rd)(void);
+
if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
- arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
- arch_timer_read_counter = arch_counter_get_cntvct;
- else
- arch_timer_read_counter = arch_counter_get_cntpct;
+ arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
+ if (arch_timer_counter_has_wa())
+ rd = arch_counter_get_cntvct_stable;
+ else
+ rd = arch_counter_get_cntvct;
+ } else {
+ if (arch_timer_counter_has_wa())
+ rd = arch_counter_get_cntpct_stable;
+ else
+ rd = arch_counter_get_cntpct;
+ }
+ arch_timer_read_counter = rd;
clocksource_counter.archdata.vdso_direct = vdso_default;
} else {
arch_timer_read_counter = arch_counter_get_cntvct_mem;
@@ -1052,7 +1040,11 @@ static int arch_timer_cpu_pm_notify(struct notifier_block *self,
} else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
+#ifdef CONFIG_ARM64
+ if (cpu_have_named_feature(EVTSTRM))
+#else
if (elf_hwcap & HWCAP_EVTSTRM)
+#endif
cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
}
return NOTIFY_OK;
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/timer-atmel-tcb.c
index f987027ca566..6ed31f9def7e 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/timer-atmel-tcb.c
@@ -9,9 +9,11 @@
#include <linux/err.h>
#include <linux/ioport.h>
#include <linux/io.h>
-#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
#include <linux/syscore_ops.h>
-#include <linux/atmel_tc.h>
+#include <soc/at91/atmel_tcb.h>
/*
@@ -28,13 +30,6 @@
* source, used in either periodic or oneshot mode. This runs
* at 32 KiHZ, and can handle delays of up to two seconds.
*
- * A boot clocksource and clockevent source are also currently needed,
- * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
- * this code can be used when init_timers() is called, well before most
- * devices are set up. (Some low end AT91 parts, which can run uClinux,
- * have only the timers in one TC block... they currently don't support
- * the tclib code, because of that initialization issue.)
- *
* REVISIT behavior during system suspend states... we should disable
* all clocks and save the power. Easily done for clockevent devices,
* but clocksources won't necessarily get the needed notifications.
@@ -112,7 +107,6 @@ static void tc_clksrc_resume(struct clocksource *cs)
}
static struct clocksource clksrc = {
- .name = "tcb_clksrc",
.rating = 200,
.read = tc_get_cycles,
.mask = CLOCKSOURCE_MASK(32),
@@ -121,6 +115,16 @@ static struct clocksource clksrc = {
.resume = tc_clksrc_resume,
};
+static u64 notrace tc_sched_clock_read(void)
+{
+ return tc_get_cycles(&clksrc);
+}
+
+static u64 notrace tc_sched_clock_read32(void)
+{
+ return tc_get_cycles32(&clksrc);
+}
+
#ifdef CONFIG_GENERIC_CLOCKEVENTS
struct tc_clkevt_device {
@@ -214,7 +218,6 @@ static int tc_next_event(unsigned long delta, struct clock_event_device *d)
static struct tc_clkevt_device clkevt = {
.clkevt = {
- .name = "tc_clkevt",
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
/* Should be lower than at91rm9200's system timer */
@@ -330,39 +333,74 @@ static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_id
writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
}
-static int __init tcb_clksrc_init(void)
-{
- static char bootinfo[] __initdata
- = KERN_DEBUG "%s: tc%d at %d.%03d MHz\n";
+static const u8 atmel_tcb_divisors[5] = { 2, 8, 32, 128, 0, };
+
+static const struct of_device_id atmel_tcb_of_match[] = {
+ { .compatible = "atmel,at91rm9200-tcb", .data = (void *)16, },
+ { .compatible = "atmel,at91sam9x5-tcb", .data = (void *)32, },
+ { /* sentinel */ }
+};
- struct platform_device *pdev;
- struct atmel_tc *tc;
+static int __init tcb_clksrc_init(struct device_node *node)
+{
+ struct atmel_tc tc;
struct clk *t0_clk;
+ const struct of_device_id *match;
+ u64 (*tc_sched_clock)(void);
u32 rate, divided_rate = 0;
int best_divisor_idx = -1;
int clk32k_divisor_idx = -1;
+ int bits;
int i;
int ret;
- tc = atmel_tc_alloc(CONFIG_ATMEL_TCB_CLKSRC_BLOCK);
- if (!tc) {
- pr_debug("can't alloc TC for clocksource\n");
- return -ENODEV;
+ /* Protect against multiple calls */
+ if (tcaddr)
+ return 0;
+
+ tc.regs = of_iomap(node->parent, 0);
+ if (!tc.regs)
+ return -ENXIO;
+
+ t0_clk = of_clk_get_by_name(node->parent, "t0_clk");
+ if (IS_ERR(t0_clk))
+ return PTR_ERR(t0_clk);
+
+ tc.slow_clk = of_clk_get_by_name(node->parent, "slow_clk");
+ if (IS_ERR(tc.slow_clk))
+ return PTR_ERR(tc.slow_clk);
+
+ tc.clk[0] = t0_clk;
+ tc.clk[1] = of_clk_get_by_name(node->parent, "t1_clk");
+ if (IS_ERR(tc.clk[1]))
+ tc.clk[1] = t0_clk;
+ tc.clk[2] = of_clk_get_by_name(node->parent, "t2_clk");
+ if (IS_ERR(tc.clk[2]))
+ tc.clk[2] = t0_clk;
+
+ tc.irq[2] = of_irq_get(node->parent, 2);
+ if (tc.irq[2] <= 0) {
+ tc.irq[2] = of_irq_get(node->parent, 0);
+ if (tc.irq[2] <= 0)
+ return -EINVAL;
}
- tcaddr = tc->regs;
- pdev = tc->pdev;
- t0_clk = tc->clk[0];
+ match = of_match_node(atmel_tcb_of_match, node->parent);
+ bits = (uintptr_t)match->data;
+
+ for (i = 0; i < ARRAY_SIZE(tc.irq); i++)
+ writel(ATMEL_TC_ALL_IRQ, tc.regs + ATMEL_TC_REG(i, IDR));
+
ret = clk_prepare_enable(t0_clk);
if (ret) {
pr_debug("can't enable T0 clk\n");
- goto err_free_tc;
+ return ret;
}
/* How fast will we be counting? Pick something over 5 MHz. */
rate = (u32) clk_get_rate(t0_clk);
- for (i = 0; i < 5; i++) {
- unsigned divisor = atmel_tc_divisors[i];
+ for (i = 0; i < ARRAY_SIZE(atmel_tcb_divisors); i++) {
+ unsigned divisor = atmel_tcb_divisors[i];
unsigned tmp;
/* remember 32 KiHz clock for later */
@@ -381,27 +419,31 @@ static int __init tcb_clksrc_init(void)
best_divisor_idx = i;
}
-
- printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK,
- divided_rate / 1000000,
+ clksrc.name = kbasename(node->parent->full_name);
+ clkevt.clkevt.name = kbasename(node->parent->full_name);
+ pr_debug("%s at %d.%03d MHz\n", clksrc.name, divided_rate / 1000000,
((divided_rate % 1000000) + 500) / 1000);
- if (tc->tcb_config && tc->tcb_config->counter_width == 32) {
+ tcaddr = tc.regs;
+
+ if (bits == 32) {
/* use apropriate function to read 32 bit counter */
clksrc.read = tc_get_cycles32;
/* setup ony channel 0 */
- tcb_setup_single_chan(tc, best_divisor_idx);
+ tcb_setup_single_chan(&tc, best_divisor_idx);
+ tc_sched_clock = tc_sched_clock_read32;
} else {
- /* tclib will give us three clocks no matter what the
+ /* we have three clocks no matter what the
* underlying platform supports.
*/
- ret = clk_prepare_enable(tc->clk[1]);
+ ret = clk_prepare_enable(tc.clk[1]);
if (ret) {
pr_debug("can't enable T1 clk\n");
goto err_disable_t0;
}
/* setup both channel 0 & 1 */
- tcb_setup_dual_chan(tc, best_divisor_idx);
+ tcb_setup_dual_chan(&tc, best_divisor_idx);
+ tc_sched_clock = tc_sched_clock_read;
}
/* and away we go! */
@@ -410,24 +452,26 @@ static int __init tcb_clksrc_init(void)
goto err_disable_t1;
/* channel 2: periodic and oneshot timer support */
- ret = setup_clkevents(tc, clk32k_divisor_idx);
+ ret = setup_clkevents(&tc, clk32k_divisor_idx);
if (ret)
goto err_unregister_clksrc;
+ sched_clock_register(tc_sched_clock, 32, divided_rate);
+
return 0;
err_unregister_clksrc:
clocksource_unregister(&clksrc);
err_disable_t1:
- if (!tc->tcb_config || tc->tcb_config->counter_width != 32)
- clk_disable_unprepare(tc->clk[1]);
+ if (bits != 32)
+ clk_disable_unprepare(tc.clk[1]);
err_disable_t0:
clk_disable_unprepare(t0_clk);
-err_free_tc:
- atmel_tc_free(tc);
+ tcaddr = NULL;
+
return ret;
}
-arch_initcall(tcb_clksrc_init);
+TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init);
diff --git a/drivers/clocksource/timer-fsl-ftm.c b/drivers/clocksource/timer-fsl-ftm.c
index 846d18daf893..e1c34b2f53a5 100644
--- a/drivers/clocksource/timer-fsl-ftm.c
+++ b/drivers/clocksource/timer-fsl-ftm.c
@@ -19,20 +19,9 @@
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
#include <linux/slab.h>
+#include <linux/fsl/ftm.h>
-#define FTM_SC 0x00
-#define FTM_SC_CLK_SHIFT 3
-#define FTM_SC_CLK_MASK (0x3 << FTM_SC_CLK_SHIFT)
-#define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_SHIFT)
-#define FTM_SC_PS_MASK 0x7
-#define FTM_SC_TOIE BIT(6)
-#define FTM_SC_TOF BIT(7)
-
-#define FTM_CNT 0x04
-#define FTM_MOD 0x08
-#define FTM_CNTIN 0x4C
-
-#define FTM_PS_MAX 7
+#define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_MASK_SHIFT)
struct ftm_clock_device {
void __iomem *clksrc_base;
diff --git a/drivers/clocksource/timer-ixp4xx.c b/drivers/clocksource/timer-ixp4xx.c
new file mode 100644
index 000000000000..5c2190b654cd
--- /dev/null
+++ b/drivers/clocksource/timer-ixp4xx.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IXP4 timer driver
+ * Copyright (C) 2019 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on arch/arm/mach-ixp4xx/common.c
+ * Copyright 2002 (C) Intel Corporation
+ * Copyright 2003-2004 (C) MontaVista, Software, Inc.
+ * Copyright (C) Deepak Saxena <dsaxena@plexity.net>
+ */
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+/* Goes away with OF conversion */
+#include <linux/platform_data/timer-ixp4xx.h>
+
+/*
+ * Constants to make it easy to access Timer Control/Status registers
+ */
+#define IXP4XX_OSTS_OFFSET 0x00 /* Continuous Timestamp */
+#define IXP4XX_OST1_OFFSET 0x04 /* Timer 1 Timestamp */
+#define IXP4XX_OSRT1_OFFSET 0x08 /* Timer 1 Reload */
+#define IXP4XX_OST2_OFFSET 0x0C /* Timer 2 Timestamp */
+#define IXP4XX_OSRT2_OFFSET 0x10 /* Timer 2 Reload */
+#define IXP4XX_OSWT_OFFSET 0x14 /* Watchdog Timer */
+#define IXP4XX_OSWE_OFFSET 0x18 /* Watchdog Enable */
+#define IXP4XX_OSWK_OFFSET 0x1C /* Watchdog Key */
+#define IXP4XX_OSST_OFFSET 0x20 /* Timer Status */
+
+/*
+ * Timer register values and bit definitions
+ */
+#define IXP4XX_OST_ENABLE 0x00000001
+#define IXP4XX_OST_ONE_SHOT 0x00000002
+/* Low order bits of reload value ignored */
+#define IXP4XX_OST_RELOAD_MASK 0x00000003
+#define IXP4XX_OST_DISABLED 0x00000000
+#define IXP4XX_OSST_TIMER_1_PEND 0x00000001
+#define IXP4XX_OSST_TIMER_2_PEND 0x00000002
+#define IXP4XX_OSST_TIMER_TS_PEND 0x00000004
+#define IXP4XX_OSST_TIMER_WDOG_PEND 0x00000008
+#define IXP4XX_OSST_TIMER_WARM_RESET 0x00000010
+
+#define IXP4XX_WDT_KEY 0x0000482E
+#define IXP4XX_WDT_RESET_ENABLE 0x00000001
+#define IXP4XX_WDT_IRQ_ENABLE 0x00000002
+#define IXP4XX_WDT_COUNT_ENABLE 0x00000004
+
+struct ixp4xx_timer {
+ void __iomem *base;
+ unsigned int tick_rate;
+ u32 latch;
+ struct clock_event_device clkevt;
+#ifdef CONFIG_ARM
+ struct delay_timer delay_timer;
+#endif
+};
+
+/*
+ * A local singleton used by sched_clock and delay timer reads, which are
+ * fast and stateless
+ */
+static struct ixp4xx_timer *local_ixp4xx_timer;
+
+static inline struct ixp4xx_timer *
+to_ixp4xx_timer(struct clock_event_device *evt)
+{
+ return container_of(evt, struct ixp4xx_timer, clkevt);
+}
+
+static u64 notrace ixp4xx_read_sched_clock(void)
+{
+ return __raw_readl(local_ixp4xx_timer->base + IXP4XX_OSTS_OFFSET);
+}
+
+static u64 ixp4xx_clocksource_read(struct clocksource *c)
+{
+ return __raw_readl(local_ixp4xx_timer->base + IXP4XX_OSTS_OFFSET);
+}
+
+static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id)
+{
+ struct ixp4xx_timer *tmr = dev_id;
+ struct clock_event_device *evt = &tmr->clkevt;
+
+ /* Clear Pending Interrupt */
+ __raw_writel(IXP4XX_OSST_TIMER_1_PEND,
+ tmr->base + IXP4XX_OSST_OFFSET);
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static int ixp4xx_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = __raw_readl(tmr->base + IXP4XX_OSRT1_OFFSET);
+ /* Keep enable/oneshot bits */
+ val &= IXP4XX_OST_RELOAD_MASK;
+ __raw_writel((cycles & ~IXP4XX_OST_RELOAD_MASK) | val,
+ tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_shutdown(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = __raw_readl(tmr->base + IXP4XX_OSRT1_OFFSET);
+ val &= ~IXP4XX_OST_ENABLE;
+ __raw_writel(val, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_set_oneshot(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+
+ __raw_writel(IXP4XX_OST_ENABLE | IXP4XX_OST_ONE_SHOT,
+ tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_set_periodic(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = tmr->latch & ~IXP4XX_OST_RELOAD_MASK;
+ val |= IXP4XX_OST_ENABLE;
+ __raw_writel(val, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_resume(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = __raw_readl(tmr->base + IXP4XX_OSRT1_OFFSET);
+ val |= IXP4XX_OST_ENABLE;
+ __raw_writel(val, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+/*
+ * IXP4xx timer tick
+ * We use OS timer1 on the CPU for the timer tick and the timestamp
+ * counter as a source of real clock ticks to account for missed jiffies.
+ */
+static __init int ixp4xx_timer_register(void __iomem *base,
+ int timer_irq,
+ unsigned int timer_freq)
+{
+ struct ixp4xx_timer *tmr;
+ int ret;
+
+ tmr = kzalloc(sizeof(*tmr), GFP_KERNEL);
+ if (!tmr)
+ return -ENOMEM;
+ tmr->base = base;
+ tmr->tick_rate = timer_freq;
+
+ /*
+ * The timer register doesn't allow to specify the two least
+ * significant bits of the timeout value and assumes them being zero.
+ * So make sure the latch is the best value with the two least
+ * significant bits unset.
+ */
+ tmr->latch = DIV_ROUND_CLOSEST(timer_freq,
+ (IXP4XX_OST_RELOAD_MASK + 1) * HZ)
+ * (IXP4XX_OST_RELOAD_MASK + 1);
+
+ local_ixp4xx_timer = tmr;
+
+ /* Reset/disable counter */
+ __raw_writel(0, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ /* Clear any pending interrupt on timer 1 */
+ __raw_writel(IXP4XX_OSST_TIMER_1_PEND,
+ tmr->base + IXP4XX_OSST_OFFSET);
+
+ /* Reset time-stamp counter */
+ __raw_writel(0, tmr->base + IXP4XX_OSTS_OFFSET);
+
+ clocksource_mmio_init(NULL, "OSTS", timer_freq, 200, 32,
+ ixp4xx_clocksource_read);
+
+ tmr->clkevt.name = "ixp4xx timer1";
+ tmr->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ tmr->clkevt.rating = 200;
+ tmr->clkevt.set_state_shutdown = ixp4xx_shutdown;
+ tmr->clkevt.set_state_periodic = ixp4xx_set_periodic;
+ tmr->clkevt.set_state_oneshot = ixp4xx_set_oneshot;
+ tmr->clkevt.tick_resume = ixp4xx_resume;
+ tmr->clkevt.set_next_event = ixp4xx_set_next_event;
+ tmr->clkevt.cpumask = cpumask_of(0);
+ tmr->clkevt.irq = timer_irq;
+ ret = request_irq(timer_irq, ixp4xx_timer_interrupt,
+ IRQF_TIMER, "IXP4XX-TIMER1", tmr);
+ if (ret) {
+ pr_crit("no timer IRQ\n");
+ return -ENODEV;
+ }
+ clockevents_config_and_register(&tmr->clkevt, timer_freq,
+ 0xf, 0xfffffffe);
+
+ sched_clock_register(ixp4xx_read_sched_clock, 32, timer_freq);
+
+ return 0;
+}
+
+/**
+ * ixp4xx_timer_setup() - Timer setup function to be called from boardfiles
+ * @timerbase: physical base of timer block
+ * @timer_irq: Linux IRQ number for the timer
+ * @timer_freq: Fixed frequency of the timer
+ */
+void __init ixp4xx_timer_setup(resource_size_t timerbase,
+ int timer_irq,
+ unsigned int timer_freq)
+{
+ void __iomem *base;
+
+ base = ioremap(timerbase, 0x100);
+ if (!base) {
+ pr_crit("IXP4xx: can't remap timer\n");
+ return;
+ }
+ ixp4xx_timer_register(base, timer_irq, timer_freq);
+}
+EXPORT_SYMBOL_GPL(ixp4xx_timer_setup);
+
+#ifdef CONFIG_OF
+static __init int ixp4xx_of_timer_init(struct device_node *np)
+{
+ void __iomem *base;
+ int irq;
+ int ret;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_crit("IXP4xx: can't remap timer\n");
+ return -ENODEV;
+ }
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq <= 0) {
+ pr_err("Can't parse IRQ\n");
+ ret = -EINVAL;
+ goto out_unmap;
+ }
+
+ /* TODO: get some fixed clocks into the device tree */
+ ret = ixp4xx_timer_register(base, irq, 66666000);
+ if (ret)
+ goto out_unmap;
+ return 0;
+
+out_unmap:
+ iounmap(base);
+ return ret;
+}
+TIMER_OF_DECLARE(ixp4xx, "intel,ixp4xx-timer", ixp4xx_of_timer_init);
+#endif
diff --git a/drivers/clocksource/timer-milbeaut.c b/drivers/clocksource/timer-milbeaut.c
index f2019a88e3ee..fa9fb4eacade 100644
--- a/drivers/clocksource/timer-milbeaut.c
+++ b/drivers/clocksource/timer-milbeaut.c
@@ -26,8 +26,8 @@
#define MLB_TMR_TMCSR_CSL_DIV2 0
#define MLB_TMR_DIV_CNT 2
-#define MLB_TMR_SRC_CH (1)
-#define MLB_TMR_EVT_CH (0)
+#define MLB_TMR_SRC_CH 1
+#define MLB_TMR_EVT_CH 0
#define MLB_TMR_SRC_CH_OFS (MLB_TMR_REGSZPCH * MLB_TMR_SRC_CH)
#define MLB_TMR_EVT_CH_OFS (MLB_TMR_REGSZPCH * MLB_TMR_EVT_CH)
@@ -43,6 +43,8 @@
#define MLB_TMR_EVT_TMRLR2_OFS (MLB_TMR_EVT_CH_OFS + MLB_TMR_TMRLR2_OFS)
#define MLB_TIMER_RATING 500
+#define MLB_TIMER_ONESHOT 0
+#define MLB_TIMER_PERIODIC 1
static irqreturn_t mlb_timer_interrupt(int irq, void *dev_id)
{
@@ -59,27 +61,53 @@ static irqreturn_t mlb_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int mlb_set_state_periodic(struct clock_event_device *clk)
+static void mlb_evt_timer_start(struct timer_of *to, bool periodic)
{
- struct timer_of *to = to_timer_of(clk);
u32 val = MLB_TMR_TMCSR_CSL_DIV2;
+ val |= MLB_TMR_TMCSR_CNTE | MLB_TMR_TMCSR_TRG | MLB_TMR_TMCSR_INTE;
+ if (periodic)
+ val |= MLB_TMR_TMCSR_RELD;
writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+}
- writel_relaxed(to->of_clk.period, timer_of_base(to) +
- MLB_TMR_EVT_TMRLR1_OFS);
- val |= MLB_TMR_TMCSR_RELD | MLB_TMR_TMCSR_CNTE |
- MLB_TMR_TMCSR_TRG | MLB_TMR_TMCSR_INTE;
+static void mlb_evt_timer_stop(struct timer_of *to)
+{
+ u32 val = readl_relaxed(timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+
+ val &= ~MLB_TMR_TMCSR_CNTE;
writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+}
+
+static void mlb_evt_timer_register_count(struct timer_of *to, unsigned long cnt)
+{
+ writel_relaxed(cnt, timer_of_base(to) + MLB_TMR_EVT_TMRLR1_OFS);
+}
+
+static int mlb_set_state_periodic(struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+
+ mlb_evt_timer_stop(to);
+ mlb_evt_timer_register_count(to, to->of_clk.period);
+ mlb_evt_timer_start(to, MLB_TIMER_PERIODIC);
return 0;
}
static int mlb_set_state_oneshot(struct clock_event_device *clk)
{
struct timer_of *to = to_timer_of(clk);
- u32 val = MLB_TMR_TMCSR_CSL_DIV2;
- writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+ mlb_evt_timer_stop(to);
+ mlb_evt_timer_start(to, MLB_TIMER_ONESHOT);
+ return 0;
+}
+
+static int mlb_set_state_shutdown(struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+
+ mlb_evt_timer_stop(to);
return 0;
}
@@ -88,22 +116,21 @@ static int mlb_clkevt_next_event(unsigned long event,
{
struct timer_of *to = to_timer_of(clk);
- writel_relaxed(event, timer_of_base(to) + MLB_TMR_EVT_TMRLR1_OFS);
- writel_relaxed(MLB_TMR_TMCSR_CSL_DIV2 |
- MLB_TMR_TMCSR_CNTE | MLB_TMR_TMCSR_INTE |
- MLB_TMR_TMCSR_TRG, timer_of_base(to) +
- MLB_TMR_EVT_TMCSR_OFS);
+ mlb_evt_timer_stop(to);
+ mlb_evt_timer_register_count(to, event);
+ mlb_evt_timer_start(to, MLB_TIMER_ONESHOT);
return 0;
}
static int mlb_config_clock_source(struct timer_of *to)
{
- writel_relaxed(0, timer_of_base(to) + MLB_TMR_SRC_TMCSR_OFS);
- writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMR_OFS);
+ u32 val = MLB_TMR_TMCSR_CSL_DIV2;
+
+ writel_relaxed(val, timer_of_base(to) + MLB_TMR_SRC_TMCSR_OFS);
writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMRLR1_OFS);
writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMRLR2_OFS);
- writel_relaxed(BIT(4) | BIT(1) | BIT(0), timer_of_base(to) +
- MLB_TMR_SRC_TMCSR_OFS);
+ val |= MLB_TMR_TMCSR_RELD | MLB_TMR_TMCSR_CNTE | MLB_TMR_TMCSR_TRG;
+ writel_relaxed(val, timer_of_base(to) + MLB_TMR_SRC_TMCSR_OFS);
return 0;
}
@@ -123,6 +150,7 @@ static struct timer_of to = {
.features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_ONESHOT,
.set_state_oneshot = mlb_set_state_oneshot,
.set_state_periodic = mlb_set_state_periodic,
+ .set_state_shutdown = mlb_set_state_shutdown,
.set_next_event = mlb_clkevt_next_event,
},
diff --git a/drivers/clocksource/timer-sun4i.c b/drivers/clocksource/timer-sun4i.c
index 6e0180aaf784..65f38f6ca714 100644
--- a/drivers/clocksource/timer-sun4i.c
+++ b/drivers/clocksource/timer-sun4i.c
@@ -186,7 +186,8 @@ static int __init sun4i_timer_init(struct device_node *node)
*/
if (of_machine_is_compatible("allwinner,sun4i-a10") ||
of_machine_is_compatible("allwinner,sun5i-a13") ||
- of_machine_is_compatible("allwinner,sun5i-a10s"))
+ of_machine_is_compatible("allwinner,sun5i-a10s") ||
+ of_machine_is_compatible("allwinner,suniv-f1c100s"))
sched_clock_register(sun4i_timer_sched_read, 32,
timer_of_rate(&to));
@@ -218,3 +219,5 @@ static int __init sun4i_timer_init(struct device_node *node)
}
TIMER_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer",
sun4i_timer_init);
+TIMER_OF_DECLARE(suniv, "allwinner,suniv-f1c100s-timer",
+ sun4i_timer_init);
diff --git a/drivers/clocksource/timer-tegra20.c b/drivers/clocksource/timer-tegra20.c
index fdb3d795a409..919b3568c495 100644
--- a/drivers/clocksource/timer-tegra20.c
+++ b/drivers/clocksource/timer-tegra20.c
@@ -60,9 +60,6 @@
static u32 usec_config;
static void __iomem *timer_reg_base;
#ifdef CONFIG_ARM
-static void __iomem *rtc_base;
-static struct timespec64 persistent_ts;
-static u64 persistent_ms, last_persistent_ms;
static struct delay_timer tegra_delay_timer;
#endif
@@ -199,40 +196,30 @@ static unsigned long tegra_delay_timer_read_counter_long(void)
return readl(timer_reg_base + TIMERUS_CNTR_1US);
}
+static struct timer_of suspend_rtc_to = {
+ .flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
+};
+
/*
* tegra_rtc_read - Reads the Tegra RTC registers
* Care must be taken that this funciton is not called while the
* tegra_rtc driver could be executing to avoid race conditions
* on the RTC shadow register
*/
-static u64 tegra_rtc_read_ms(void)
+static u64 tegra_rtc_read_ms(struct clocksource *cs)
{
- u32 ms = readl(rtc_base + RTC_MILLISECONDS);
- u32 s = readl(rtc_base + RTC_SHADOW_SECONDS);
+ u32 ms = readl(timer_of_base(&suspend_rtc_to) + RTC_MILLISECONDS);
+ u32 s = readl(timer_of_base(&suspend_rtc_to) + RTC_SHADOW_SECONDS);
return (u64)s * MSEC_PER_SEC + ms;
}
-/*
- * tegra_read_persistent_clock64 - Return time from a persistent clock.
- *
- * Reads the time from a source which isn't disabled during PM, the
- * 32k sync timer. Convert the cycles elapsed since last read into
- * nsecs and adds to a monotonically increasing timespec64.
- * Care must be taken that this funciton is not called while the
- * tegra_rtc driver could be executing to avoid race conditions
- * on the RTC shadow register
- */
-static void tegra_read_persistent_clock64(struct timespec64 *ts)
-{
- u64 delta;
-
- last_persistent_ms = persistent_ms;
- persistent_ms = tegra_rtc_read_ms();
- delta = persistent_ms - last_persistent_ms;
-
- timespec64_add_ns(&persistent_ts, delta * NSEC_PER_MSEC);
- *ts = persistent_ts;
-}
+static struct clocksource suspend_rtc_clocksource = {
+ .name = "tegra_suspend_timer",
+ .rating = 200,
+ .read = tegra_rtc_read_ms,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
+};
#endif
static int tegra_timer_common_init(struct device_node *np, struct timer_of *to)
@@ -385,25 +372,15 @@ out:
static int __init tegra20_init_rtc(struct device_node *np)
{
- struct clk *clk;
+ int ret;
- rtc_base = of_iomap(np, 0);
- if (!rtc_base) {
- pr_err("Can't map RTC registers\n");
- return -ENXIO;
- }
+ ret = timer_of_init(np, &suspend_rtc_to);
+ if (ret)
+ return ret;
- /*
- * rtc registers are used by read_persistent_clock, keep the rtc clock
- * enabled
- */
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk))
- pr_warn("Unable to get rtc-tegra clock\n");
- else
- clk_prepare_enable(clk);
+ clocksource_register_hz(&suspend_rtc_clocksource, 1000);
- return register_persistent_clock(tegra_read_persistent_clock64);
+ return 0;
}
TIMER_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
#endif
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index ee8ec5a8cb16..e40b55a7086f 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -970,5 +970,4 @@ module_platform_driver(omap_dm_timer_driver);
MODULE_DESCRIPTION("OMAP Dual-Mode Timer Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("Texas Instruments Inc");
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
new file mode 100644
index 000000000000..4fa2931dcb7b
--- /dev/null
+++ b/drivers/counter/104-quad-8.c
@@ -0,0 +1,1367 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Counter driver for the ACCES 104-QUAD-8
+ * Copyright (C) 2016 William Breathitt Gray
+ *
+ * This driver supports the ACCES 104-QUAD-8 and ACCES 104-QUAD-4.
+ */
+#include <linux/bitops.h>
+#include <linux/counter.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/isa.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+
+#define QUAD8_EXTENT 32
+
+static unsigned int base[max_num_isa_dev(QUAD8_EXTENT)];
+static unsigned int num_quad8;
+module_param_array(base, uint, &num_quad8, 0);
+MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses");
+
+#define QUAD8_NUM_COUNTERS 8
+
+/**
+ * struct quad8_iio - IIO device private data structure
+ * @counter: instance of the counter_device
+ * @preset: array of preset values
+ * @count_mode: array of count mode configurations
+ * @quadrature_mode: array of quadrature mode configurations
+ * @quadrature_scale: array of quadrature mode scale configurations
+ * @ab_enable: array of A and B inputs enable configurations
+ * @preset_enable: array of set_to_preset_on_index attribute configurations
+ * @synchronous_mode: array of index function synchronous mode configurations
+ * @index_polarity: array of index function polarity configurations
+ * @base: base port address of the IIO device
+ */
+struct quad8_iio {
+ struct counter_device counter;
+ unsigned int preset[QUAD8_NUM_COUNTERS];
+ unsigned int count_mode[QUAD8_NUM_COUNTERS];
+ unsigned int quadrature_mode[QUAD8_NUM_COUNTERS];
+ unsigned int quadrature_scale[QUAD8_NUM_COUNTERS];
+ unsigned int ab_enable[QUAD8_NUM_COUNTERS];
+ unsigned int preset_enable[QUAD8_NUM_COUNTERS];
+ unsigned int synchronous_mode[QUAD8_NUM_COUNTERS];
+ unsigned int index_polarity[QUAD8_NUM_COUNTERS];
+ unsigned int base;
+};
+
+#define QUAD8_REG_CHAN_OP 0x11
+#define QUAD8_REG_INDEX_INPUT_LEVELS 0x16
+/* Borrow Toggle flip-flop */
+#define QUAD8_FLAG_BT BIT(0)
+/* Carry Toggle flip-flop */
+#define QUAD8_FLAG_CT BIT(1)
+/* Error flag */
+#define QUAD8_FLAG_E BIT(4)
+/* Up/Down flag */
+#define QUAD8_FLAG_UD BIT(5)
+/* Reset and Load Signal Decoders */
+#define QUAD8_CTR_RLD 0x00
+/* Counter Mode Register */
+#define QUAD8_CTR_CMR 0x20
+/* Input / Output Control Register */
+#define QUAD8_CTR_IOR 0x40
+/* Index Control Register */
+#define QUAD8_CTR_IDR 0x60
+/* Reset Byte Pointer (three byte data pointer) */
+#define QUAD8_RLD_RESET_BP 0x01
+/* Reset Counter */
+#define QUAD8_RLD_RESET_CNTR 0x02
+/* Reset Borrow Toggle, Carry Toggle, Compare Toggle, and Sign flags */
+#define QUAD8_RLD_RESET_FLAGS 0x04
+/* Reset Error flag */
+#define QUAD8_RLD_RESET_E 0x06
+/* Preset Register to Counter */
+#define QUAD8_RLD_PRESET_CNTR 0x08
+/* Transfer Counter to Output Latch */
+#define QUAD8_RLD_CNTR_OUT 0x10
+#define QUAD8_CHAN_OP_ENABLE_COUNTERS 0x00
+#define QUAD8_CHAN_OP_RESET_COUNTERS 0x01
+#define QUAD8_CMR_QUADRATURE_X1 0x08
+#define QUAD8_CMR_QUADRATURE_X2 0x10
+#define QUAD8_CMR_QUADRATURE_X4 0x18
+
+
+static int quad8_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long mask)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel;
+ unsigned int flags;
+ unsigned int borrow;
+ unsigned int carry;
+ int i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type == IIO_INDEX) {
+ *val = !!(inb(priv->base + QUAD8_REG_INDEX_INPUT_LEVELS)
+ & BIT(chan->channel));
+ return IIO_VAL_INT;
+ }
+
+ flags = inb(base_offset + 1);
+ borrow = flags & QUAD8_FLAG_BT;
+ carry = !!(flags & QUAD8_FLAG_CT);
+
+ /* Borrow XOR Carry effectively doubles count range */
+ *val = (borrow ^ carry) << 24;
+
+ /* Reset Byte Pointer; transfer Counter to Output Latch */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
+ base_offset + 1);
+
+ for (i = 0; i < 3; i++)
+ *val |= (unsigned int)inb(base_offset) << (8 * i);
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_ENABLE:
+ *val = priv->ab_enable[chan->channel];
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 1;
+ *val2 = priv->quadrature_scale[chan->channel];
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+
+ return -EINVAL;
+}
+
+static int quad8_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel;
+ int i;
+ unsigned int ior_cfg;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type == IIO_INDEX)
+ return -EINVAL;
+
+ /* Only 24-bit values are supported */
+ if ((unsigned int)val > 0xFFFFFF)
+ return -EINVAL;
+
+ /* Reset Byte Pointer */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+
+ /* Counter can only be set via Preset Register */
+ for (i = 0; i < 3; i++)
+ outb(val >> (8 * i), base_offset);
+
+ /* Transfer Preset Register to Counter */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_PRESET_CNTR, base_offset + 1);
+
+ /* Reset Byte Pointer */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+
+ /* Set Preset Register back to original value */
+ val = priv->preset[chan->channel];
+ for (i = 0; i < 3; i++)
+ outb(val >> (8 * i), base_offset);
+
+ /* Reset Borrow, Carry, Compare, and Sign flags */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, base_offset + 1);
+ /* Reset Error flag */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
+
+ return 0;
+ case IIO_CHAN_INFO_ENABLE:
+ /* only boolean values accepted */
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ priv->ab_enable[chan->channel] = val;
+
+ ior_cfg = val | priv->preset_enable[chan->channel] << 1;
+
+ /* Load I/O control configuration */
+ outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1);
+
+ return 0;
+ case IIO_CHAN_INFO_SCALE:
+ /* Quadrature scaling only available in quadrature mode */
+ if (!priv->quadrature_mode[chan->channel] && (val2 || val != 1))
+ return -EINVAL;
+
+ /* Only three gain states (1, 0.5, 0.25) */
+ if (val == 1 && !val2)
+ priv->quadrature_scale[chan->channel] = 0;
+ else if (!val)
+ switch (val2) {
+ case 500000:
+ priv->quadrature_scale[chan->channel] = 1;
+ break;
+ case 250000:
+ priv->quadrature_scale[chan->channel] = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ else
+ return -EINVAL;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info quad8_info = {
+ .read_raw = quad8_read_raw,
+ .write_raw = quad8_write_raw
+};
+
+static ssize_t quad8_read_preset(struct iio_dev *indio_dev, uintptr_t private,
+ const struct iio_chan_spec *chan, char *buf)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", priv->preset[chan->channel]);
+}
+
+static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private,
+ const struct iio_chan_spec *chan, const char *buf, size_t len)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel;
+ unsigned int preset;
+ int ret;
+ int i;
+
+ ret = kstrtouint(buf, 0, &preset);
+ if (ret)
+ return ret;
+
+ /* Only 24-bit values are supported */
+ if (preset > 0xFFFFFF)
+ return -EINVAL;
+
+ priv->preset[chan->channel] = preset;
+
+ /* Reset Byte Pointer */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+
+ /* Set Preset Register */
+ for (i = 0; i < 3; i++)
+ outb(preset >> (8 * i), base_offset);
+
+ return len;
+}
+
+static ssize_t quad8_read_set_to_preset_on_index(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, char *buf)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ !priv->preset_enable[chan->channel]);
+}
+
+static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, const char *buf,
+ size_t len)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+ bool preset_enable;
+ int ret;
+ unsigned int ior_cfg;
+
+ ret = kstrtobool(buf, &preset_enable);
+ if (ret)
+ return ret;
+
+ /* Preset enable is active low in Input/Output Control register */
+ preset_enable = !preset_enable;
+
+ priv->preset_enable[chan->channel] = preset_enable;
+
+ ior_cfg = priv->ab_enable[chan->channel] |
+ (unsigned int)preset_enable << 1;
+
+ /* Load I/O control configuration to Input / Output Control Register */
+ outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
+
+ return len;
+}
+
+static const char *const quad8_noise_error_states[] = {
+ "No excessive noise is present at the count inputs",
+ "Excessive noise is present at the count inputs"
+};
+
+static int quad8_get_noise_error(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ return !!(inb(base_offset) & QUAD8_FLAG_E);
+}
+
+static const struct iio_enum quad8_noise_error_enum = {
+ .items = quad8_noise_error_states,
+ .num_items = ARRAY_SIZE(quad8_noise_error_states),
+ .get = quad8_get_noise_error
+};
+
+static const char *const quad8_count_direction_states[] = {
+ "down",
+ "up"
+};
+
+static int quad8_get_count_direction(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ return !!(inb(base_offset) & QUAD8_FLAG_UD);
+}
+
+static const struct iio_enum quad8_count_direction_enum = {
+ .items = quad8_count_direction_states,
+ .num_items = ARRAY_SIZE(quad8_count_direction_states),
+ .get = quad8_get_count_direction
+};
+
+static const char *const quad8_count_modes[] = {
+ "normal",
+ "range limit",
+ "non-recycle",
+ "modulo-n"
+};
+
+static int quad8_set_count_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int cnt_mode)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ unsigned int mode_cfg = cnt_mode << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ priv->count_mode[chan->channel] = cnt_mode;
+
+ /* Add quadrature mode configuration */
+ if (priv->quadrature_mode[chan->channel])
+ mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
+
+ /* Load mode configuration to Counter Mode Register */
+ outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+
+ return 0;
+}
+
+static int quad8_get_count_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return priv->count_mode[chan->channel];
+}
+
+static const struct iio_enum quad8_count_mode_enum = {
+ .items = quad8_count_modes,
+ .num_items = ARRAY_SIZE(quad8_count_modes),
+ .set = quad8_set_count_mode,
+ .get = quad8_get_count_mode
+};
+
+static const char *const quad8_synchronous_modes[] = {
+ "non-synchronous",
+ "synchronous"
+};
+
+static int quad8_set_synchronous_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int synchronous_mode)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const unsigned int idr_cfg = synchronous_mode |
+ priv->index_polarity[chan->channel] << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ /* Index function must be non-synchronous in non-quadrature mode */
+ if (synchronous_mode && !priv->quadrature_mode[chan->channel])
+ return -EINVAL;
+
+ priv->synchronous_mode[chan->channel] = synchronous_mode;
+
+ /* Load Index Control configuration to Index Control Register */
+ outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+
+ return 0;
+}
+
+static int quad8_get_synchronous_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return priv->synchronous_mode[chan->channel];
+}
+
+static const struct iio_enum quad8_synchronous_mode_enum = {
+ .items = quad8_synchronous_modes,
+ .num_items = ARRAY_SIZE(quad8_synchronous_modes),
+ .set = quad8_set_synchronous_mode,
+ .get = quad8_get_synchronous_mode
+};
+
+static const char *const quad8_quadrature_modes[] = {
+ "non-quadrature",
+ "quadrature"
+};
+
+static int quad8_set_quadrature_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int quadrature_mode)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ unsigned int mode_cfg = priv->count_mode[chan->channel] << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ if (quadrature_mode)
+ mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
+ else {
+ /* Quadrature scaling only available in quadrature mode */
+ priv->quadrature_scale[chan->channel] = 0;
+
+ /* Synchronous function not supported in non-quadrature mode */
+ if (priv->synchronous_mode[chan->channel])
+ quad8_set_synchronous_mode(indio_dev, chan, 0);
+ }
+
+ priv->quadrature_mode[chan->channel] = quadrature_mode;
+
+ /* Load mode configuration to Counter Mode Register */
+ outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+
+ return 0;
+}
+
+static int quad8_get_quadrature_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return priv->quadrature_mode[chan->channel];
+}
+
+static const struct iio_enum quad8_quadrature_mode_enum = {
+ .items = quad8_quadrature_modes,
+ .num_items = ARRAY_SIZE(quad8_quadrature_modes),
+ .set = quad8_set_quadrature_mode,
+ .get = quad8_get_quadrature_mode
+};
+
+static const char *const quad8_index_polarity_modes[] = {
+ "negative",
+ "positive"
+};
+
+static int quad8_set_index_polarity(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int index_polarity)
+{
+ struct quad8_iio *const priv = iio_priv(indio_dev);
+ const unsigned int idr_cfg = priv->synchronous_mode[chan->channel] |
+ index_polarity << 1;
+ const int base_offset = priv->base + 2 * chan->channel + 1;
+
+ priv->index_polarity[chan->channel] = index_polarity;
+
+ /* Load Index Control configuration to Index Control Register */
+ outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+
+ return 0;
+}
+
+static int quad8_get_index_polarity(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ const struct quad8_iio *const priv = iio_priv(indio_dev);
+
+ return priv->index_polarity[chan->channel];
+}
+
+static const struct iio_enum quad8_index_polarity_enum = {
+ .items = quad8_index_polarity_modes,
+ .num_items = ARRAY_SIZE(quad8_index_polarity_modes),
+ .set = quad8_set_index_polarity,
+ .get = quad8_get_index_polarity
+};
+
+static const struct iio_chan_spec_ext_info quad8_count_ext_info[] = {
+ {
+ .name = "preset",
+ .shared = IIO_SEPARATE,
+ .read = quad8_read_preset,
+ .write = quad8_write_preset
+ },
+ {
+ .name = "set_to_preset_on_index",
+ .shared = IIO_SEPARATE,
+ .read = quad8_read_set_to_preset_on_index,
+ .write = quad8_write_set_to_preset_on_index
+ },
+ IIO_ENUM("noise_error", IIO_SEPARATE, &quad8_noise_error_enum),
+ IIO_ENUM_AVAILABLE("noise_error", &quad8_noise_error_enum),
+ IIO_ENUM("count_direction", IIO_SEPARATE, &quad8_count_direction_enum),
+ IIO_ENUM_AVAILABLE("count_direction", &quad8_count_direction_enum),
+ IIO_ENUM("count_mode", IIO_SEPARATE, &quad8_count_mode_enum),
+ IIO_ENUM_AVAILABLE("count_mode", &quad8_count_mode_enum),
+ IIO_ENUM("quadrature_mode", IIO_SEPARATE, &quad8_quadrature_mode_enum),
+ IIO_ENUM_AVAILABLE("quadrature_mode", &quad8_quadrature_mode_enum),
+ {}
+};
+
+static const struct iio_chan_spec_ext_info quad8_index_ext_info[] = {
+ IIO_ENUM("synchronous_mode", IIO_SEPARATE,
+ &quad8_synchronous_mode_enum),
+ IIO_ENUM_AVAILABLE("synchronous_mode", &quad8_synchronous_mode_enum),
+ IIO_ENUM("index_polarity", IIO_SEPARATE, &quad8_index_polarity_enum),
+ IIO_ENUM_AVAILABLE("index_polarity", &quad8_index_polarity_enum),
+ {}
+};
+
+#define QUAD8_COUNT_CHAN(_chan) { \
+ .type = IIO_COUNT, \
+ .channel = (_chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_ENABLE) | BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = quad8_count_ext_info, \
+ .indexed = 1 \
+}
+
+#define QUAD8_INDEX_CHAN(_chan) { \
+ .type = IIO_INDEX, \
+ .channel = (_chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .ext_info = quad8_index_ext_info, \
+ .indexed = 1 \
+}
+
+static const struct iio_chan_spec quad8_channels[] = {
+ QUAD8_COUNT_CHAN(0), QUAD8_INDEX_CHAN(0),
+ QUAD8_COUNT_CHAN(1), QUAD8_INDEX_CHAN(1),
+ QUAD8_COUNT_CHAN(2), QUAD8_INDEX_CHAN(2),
+ QUAD8_COUNT_CHAN(3), QUAD8_INDEX_CHAN(3),
+ QUAD8_COUNT_CHAN(4), QUAD8_INDEX_CHAN(4),
+ QUAD8_COUNT_CHAN(5), QUAD8_INDEX_CHAN(5),
+ QUAD8_COUNT_CHAN(6), QUAD8_INDEX_CHAN(6),
+ QUAD8_COUNT_CHAN(7), QUAD8_INDEX_CHAN(7)
+};
+
+static int quad8_signal_read(struct counter_device *counter,
+ struct counter_signal *signal, struct counter_signal_read_value *val)
+{
+ const struct quad8_iio *const priv = counter->priv;
+ unsigned int state;
+ enum counter_signal_level level;
+
+ /* Only Index signal levels can be read */
+ if (signal->id < 16)
+ return -EINVAL;
+
+ state = inb(priv->base + QUAD8_REG_INDEX_INPUT_LEVELS)
+ & BIT(signal->id - 16);
+
+ level = (state) ? COUNTER_SIGNAL_LEVEL_HIGH : COUNTER_SIGNAL_LEVEL_LOW;
+
+ counter_signal_read_value_set(val, COUNTER_SIGNAL_LEVEL, &level);
+
+ return 0;
+}
+
+static int quad8_count_read(struct counter_device *counter,
+ struct counter_count *count, struct counter_count_read_value *val)
+{
+ const struct quad8_iio *const priv = counter->priv;
+ const int base_offset = priv->base + 2 * count->id;
+ unsigned int flags;
+ unsigned int borrow;
+ unsigned int carry;
+ unsigned long position;
+ int i;
+
+ flags = inb(base_offset + 1);
+ borrow = flags & QUAD8_FLAG_BT;
+ carry = !!(flags & QUAD8_FLAG_CT);
+
+ /* Borrow XOR Carry effectively doubles count range */
+ position = (unsigned long)(borrow ^ carry) << 24;
+
+ /* Reset Byte Pointer; transfer Counter to Output Latch */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
+ base_offset + 1);
+
+ for (i = 0; i < 3; i++)
+ position |= (unsigned long)inb(base_offset) << (8 * i);
+
+ counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &position);
+
+ return 0;
+}
+
+static int quad8_count_write(struct counter_device *counter,
+ struct counter_count *count, struct counter_count_write_value *val)
+{
+ const struct quad8_iio *const priv = counter->priv;
+ const int base_offset = priv->base + 2 * count->id;
+ int err;
+ unsigned long position;
+ int i;
+
+ err = counter_count_write_value_get(&position, COUNTER_COUNT_POSITION,
+ val);
+ if (err)
+ return err;
+
+ /* Only 24-bit values are supported */
+ if (position > 0xFFFFFF)
+ return -EINVAL;
+
+ /* Reset Byte Pointer */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+
+ /* Counter can only be set via Preset Register */
+ for (i = 0; i < 3; i++)
+ outb(position >> (8 * i), base_offset);
+
+ /* Transfer Preset Register to Counter */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_PRESET_CNTR, base_offset + 1);
+
+ /* Reset Byte Pointer */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+
+ /* Set Preset Register back to original value */
+ position = priv->preset[count->id];
+ for (i = 0; i < 3; i++)
+ outb(position >> (8 * i), base_offset);
+
+ /* Reset Borrow, Carry, Compare, and Sign flags */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, base_offset + 1);
+ /* Reset Error flag */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
+
+ return 0;
+}
+
+enum quad8_count_function {
+ QUAD8_COUNT_FUNCTION_PULSE_DIRECTION = 0,
+ QUAD8_COUNT_FUNCTION_QUADRATURE_X1,
+ QUAD8_COUNT_FUNCTION_QUADRATURE_X2,
+ QUAD8_COUNT_FUNCTION_QUADRATURE_X4
+};
+
+static enum counter_count_function quad8_count_functions_list[] = {
+ [QUAD8_COUNT_FUNCTION_PULSE_DIRECTION] = COUNTER_COUNT_FUNCTION_PULSE_DIRECTION,
+ [QUAD8_COUNT_FUNCTION_QUADRATURE_X1] = COUNTER_COUNT_FUNCTION_QUADRATURE_X1_A,
+ [QUAD8_COUNT_FUNCTION_QUADRATURE_X2] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A,
+ [QUAD8_COUNT_FUNCTION_QUADRATURE_X4] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4
+};
+
+static int quad8_function_get(struct counter_device *counter,
+ struct counter_count *count, size_t *function)
+{
+ const struct quad8_iio *const priv = counter->priv;
+ const int id = count->id;
+ const unsigned int quadrature_mode = priv->quadrature_mode[id];
+ const unsigned int scale = priv->quadrature_scale[id];
+
+ if (quadrature_mode)
+ switch (scale) {
+ case 0:
+ *function = QUAD8_COUNT_FUNCTION_QUADRATURE_X1;
+ break;
+ case 1:
+ *function = QUAD8_COUNT_FUNCTION_QUADRATURE_X2;
+ break;
+ case 2:
+ *function = QUAD8_COUNT_FUNCTION_QUADRATURE_X4;
+ break;
+ }
+ else
+ *function = QUAD8_COUNT_FUNCTION_PULSE_DIRECTION;
+
+ return 0;
+}
+
+static int quad8_function_set(struct counter_device *counter,
+ struct counter_count *count, size_t function)
+{
+ struct quad8_iio *const priv = counter->priv;
+ const int id = count->id;
+ unsigned int *const quadrature_mode = priv->quadrature_mode + id;
+ unsigned int *const scale = priv->quadrature_scale + id;
+ unsigned int mode_cfg = priv->count_mode[id] << 1;
+ unsigned int *const synchronous_mode = priv->synchronous_mode + id;
+ const unsigned int idr_cfg = priv->index_polarity[id] << 1;
+ const int base_offset = priv->base + 2 * id + 1;
+
+ if (function == QUAD8_COUNT_FUNCTION_PULSE_DIRECTION) {
+ *quadrature_mode = 0;
+
+ /* Quadrature scaling only available in quadrature mode */
+ *scale = 0;
+
+ /* Synchronous function not supported in non-quadrature mode */
+ if (*synchronous_mode) {
+ *synchronous_mode = 0;
+ /* Disable synchronous function mode */
+ outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+ }
+ } else {
+ *quadrature_mode = 1;
+
+ switch (function) {
+ case QUAD8_COUNT_FUNCTION_QUADRATURE_X1:
+ *scale = 0;
+ mode_cfg |= QUAD8_CMR_QUADRATURE_X1;
+ break;
+ case QUAD8_COUNT_FUNCTION_QUADRATURE_X2:
+ *scale = 1;
+ mode_cfg |= QUAD8_CMR_QUADRATURE_X2;
+ break;
+ case QUAD8_COUNT_FUNCTION_QUADRATURE_X4:
+ *scale = 2;
+ mode_cfg |= QUAD8_CMR_QUADRATURE_X4;
+ break;
+ }
+ }
+
+ /* Load mode configuration to Counter Mode Register */
+ outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+
+ return 0;
+}
+
+static void quad8_direction_get(struct counter_device *counter,
+ struct counter_count *count, enum counter_count_direction *direction)
+{
+ const struct quad8_iio *const priv = counter->priv;
+ unsigned int ud_flag;
+ const unsigned int flag_addr = priv->base + 2 * count->id + 1;
+
+ /* U/D flag: nonzero = up, zero = down */
+ ud_flag = inb(flag_addr) & QUAD8_FLAG_UD;
+
+ *direction = (ud_flag) ? COUNTER_COUNT_DIRECTION_FORWARD :
+ COUNTER_COUNT_DIRECTION_BACKWARD;
+}
+
+enum quad8_synapse_action {
+ QUAD8_SYNAPSE_ACTION_NONE = 0,
+ QUAD8_SYNAPSE_ACTION_RISING_EDGE,
+ QUAD8_SYNAPSE_ACTION_FALLING_EDGE,
+ QUAD8_SYNAPSE_ACTION_BOTH_EDGES
+};
+
+static enum counter_synapse_action quad8_index_actions_list[] = {
+ [QUAD8_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
+ [QUAD8_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE
+};
+
+static enum counter_synapse_action quad8_synapse_actions_list[] = {
+ [QUAD8_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
+ [QUAD8_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+ [QUAD8_SYNAPSE_ACTION_FALLING_EDGE] = COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
+ [QUAD8_SYNAPSE_ACTION_BOTH_EDGES] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES
+};
+
+static int quad8_action_get(struct counter_device *counter,
+ struct counter_count *count, struct counter_synapse *synapse,
+ size_t *action)
+{
+ struct quad8_iio *const priv = counter->priv;
+ int err;
+ size_t function = 0;
+ const size_t signal_a_id = count->synapses[0].signal->id;
+ enum counter_count_direction direction;
+
+ /* Handle Index signals */
+ if (synapse->signal->id >= 16) {
+ if (priv->preset_enable[count->id])
+ *action = QUAD8_SYNAPSE_ACTION_RISING_EDGE;
+ else
+ *action = QUAD8_SYNAPSE_ACTION_NONE;
+
+ return 0;
+ }
+
+ err = quad8_function_get(counter, count, &function);
+ if (err)
+ return err;
+
+ /* Default action mode */
+ *action = QUAD8_SYNAPSE_ACTION_NONE;
+
+ /* Determine action mode based on current count function mode */
+ switch (function) {
+ case QUAD8_COUNT_FUNCTION_PULSE_DIRECTION:
+ if (synapse->signal->id == signal_a_id)
+ *action = QUAD8_SYNAPSE_ACTION_RISING_EDGE;
+ break;
+ case QUAD8_COUNT_FUNCTION_QUADRATURE_X1:
+ if (synapse->signal->id == signal_a_id) {
+ quad8_direction_get(counter, count, &direction);
+
+ if (direction == COUNTER_COUNT_DIRECTION_FORWARD)
+ *action = QUAD8_SYNAPSE_ACTION_RISING_EDGE;
+ else
+ *action = QUAD8_SYNAPSE_ACTION_FALLING_EDGE;
+ }
+ break;
+ case QUAD8_COUNT_FUNCTION_QUADRATURE_X2:
+ if (synapse->signal->id == signal_a_id)
+ *action = QUAD8_SYNAPSE_ACTION_BOTH_EDGES;
+ break;
+ case QUAD8_COUNT_FUNCTION_QUADRATURE_X4:
+ *action = QUAD8_SYNAPSE_ACTION_BOTH_EDGES;
+ break;
+ }
+
+ return 0;
+}
+
+const struct counter_ops quad8_ops = {
+ .signal_read = quad8_signal_read,
+ .count_read = quad8_count_read,
+ .count_write = quad8_count_write,
+ .function_get = quad8_function_get,
+ .function_set = quad8_function_set,
+ .action_get = quad8_action_get
+};
+
+static int quad8_index_polarity_get(struct counter_device *counter,
+ struct counter_signal *signal, size_t *index_polarity)
+{
+ const struct quad8_iio *const priv = counter->priv;
+ const size_t channel_id = signal->id - 16;
+
+ *index_polarity = priv->index_polarity[channel_id];
+
+ return 0;
+}
+
+static int quad8_index_polarity_set(struct counter_device *counter,
+ struct counter_signal *signal, size_t index_polarity)
+{
+ struct quad8_iio *const priv = counter->priv;
+ const size_t channel_id = signal->id - 16;
+ const unsigned int idr_cfg = priv->synchronous_mode[channel_id] |
+ index_polarity << 1;
+ const int base_offset = priv->base + 2 * channel_id + 1;
+
+ priv->index_polarity[channel_id] = index_polarity;
+
+ /* Load Index Control configuration to Index Control Register */
+ outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+
+ return 0;
+}
+
+static struct counter_signal_enum_ext quad8_index_pol_enum = {
+ .items = quad8_index_polarity_modes,
+ .num_items = ARRAY_SIZE(quad8_index_polarity_modes),
+ .get = quad8_index_polarity_get,
+ .set = quad8_index_polarity_set
+};
+
+static int quad8_synchronous_mode_get(struct counter_device *counter,
+ struct counter_signal *signal, size_t *synchronous_mode)
+{
+ const struct quad8_iio *const priv = counter->priv;
+ const size_t channel_id = signal->id - 16;
+
+ *synchronous_mode = priv->synchronous_mode[channel_id];
+
+ return 0;
+}
+
+static int quad8_synchronous_mode_set(struct counter_device *counter,
+ struct counter_signal *signal, size_t synchronous_mode)
+{
+ struct quad8_iio *const priv = counter->priv;
+ const size_t channel_id = signal->id - 16;
+ const unsigned int idr_cfg = synchronous_mode |
+ priv->index_polarity[channel_id] << 1;
+ const int base_offset = priv->base + 2 * channel_id + 1;
+
+ /* Index function must be non-synchronous in non-quadrature mode */
+ if (synchronous_mode && !priv->quadrature_mode[channel_id])
+ return -EINVAL;
+
+ priv->synchronous_mode[channel_id] = synchronous_mode;
+
+ /* Load Index Control configuration to Index Control Register */
+ outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+
+ return 0;
+}
+
+static struct counter_signal_enum_ext quad8_syn_mode_enum = {
+ .items = quad8_synchronous_modes,
+ .num_items = ARRAY_SIZE(quad8_synchronous_modes),
+ .get = quad8_synchronous_mode_get,
+ .set = quad8_synchronous_mode_set
+};
+
+static ssize_t quad8_count_floor_read(struct counter_device *counter,
+ struct counter_count *count, void *private, char *buf)
+{
+ /* Only a floor of 0 is supported */
+ return sprintf(buf, "0\n");
+}
+
+static int quad8_count_mode_get(struct counter_device *counter,
+ struct counter_count *count, size_t *cnt_mode)
+{
+ const struct quad8_iio *const priv = counter->priv;
+
+ /* Map 104-QUAD-8 count mode to Generic Counter count mode */
+ switch (priv->count_mode[count->id]) {
+ case 0:
+ *cnt_mode = COUNTER_COUNT_MODE_NORMAL;
+ break;
+ case 1:
+ *cnt_mode = COUNTER_COUNT_MODE_RANGE_LIMIT;
+ break;
+ case 2:
+ *cnt_mode = COUNTER_COUNT_MODE_NON_RECYCLE;
+ break;
+ case 3:
+ *cnt_mode = COUNTER_COUNT_MODE_MODULO_N;
+ break;
+ }
+
+ return 0;
+}
+
+static int quad8_count_mode_set(struct counter_device *counter,
+ struct counter_count *count, size_t cnt_mode)
+{
+ struct quad8_iio *const priv = counter->priv;
+ unsigned int mode_cfg;
+ const int base_offset = priv->base + 2 * count->id + 1;
+
+ /* Map Generic Counter count mode to 104-QUAD-8 count mode */
+ switch (cnt_mode) {
+ case COUNTER_COUNT_MODE_NORMAL:
+ cnt_mode = 0;
+ break;
+ case COUNTER_COUNT_MODE_RANGE_LIMIT:
+ cnt_mode = 1;
+ break;
+ case COUNTER_COUNT_MODE_NON_RECYCLE:
+ cnt_mode = 2;
+ break;
+ case COUNTER_COUNT_MODE_MODULO_N:
+ cnt_mode = 3;
+ break;
+ }
+
+ priv->count_mode[count->id] = cnt_mode;
+
+ /* Set count mode configuration value */
+ mode_cfg = cnt_mode << 1;
+
+ /* Add quadrature mode configuration */
+ if (priv->quadrature_mode[count->id])
+ mode_cfg |= (priv->quadrature_scale[count->id] + 1) << 3;
+
+ /* Load mode configuration to Counter Mode Register */
+ outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+
+ return 0;
+}
+
+static struct counter_count_enum_ext quad8_cnt_mode_enum = {
+ .items = counter_count_mode_str,
+ .num_items = ARRAY_SIZE(counter_count_mode_str),
+ .get = quad8_count_mode_get,
+ .set = quad8_count_mode_set
+};
+
+static ssize_t quad8_count_direction_read(struct counter_device *counter,
+ struct counter_count *count, void *priv, char *buf)
+{
+ enum counter_count_direction dir;
+
+ quad8_direction_get(counter, count, &dir);
+
+ return sprintf(buf, "%s\n", counter_count_direction_str[dir]);
+}
+
+static ssize_t quad8_count_enable_read(struct counter_device *counter,
+ struct counter_count *count, void *private, char *buf)
+{
+ const struct quad8_iio *const priv = counter->priv;
+
+ return sprintf(buf, "%u\n", priv->ab_enable[count->id]);
+}
+
+static ssize_t quad8_count_enable_write(struct counter_device *counter,
+ struct counter_count *count, void *private, const char *buf, size_t len)
+{
+ struct quad8_iio *const priv = counter->priv;
+ const int base_offset = priv->base + 2 * count->id;
+ int err;
+ bool ab_enable;
+ unsigned int ior_cfg;
+
+ err = kstrtobool(buf, &ab_enable);
+ if (err)
+ return err;
+
+ priv->ab_enable[count->id] = ab_enable;
+
+ ior_cfg = ab_enable | priv->preset_enable[count->id] << 1;
+
+ /* Load I/O control configuration */
+ outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1);
+
+ return len;
+}
+
+static int quad8_error_noise_get(struct counter_device *counter,
+ struct counter_count *count, size_t *noise_error)
+{
+ const struct quad8_iio *const priv = counter->priv;
+ const int base_offset = priv->base + 2 * count->id + 1;
+
+ *noise_error = !!(inb(base_offset) & QUAD8_FLAG_E);
+
+ return 0;
+}
+
+static struct counter_count_enum_ext quad8_error_noise_enum = {
+ .items = quad8_noise_error_states,
+ .num_items = ARRAY_SIZE(quad8_noise_error_states),
+ .get = quad8_error_noise_get
+};
+
+static ssize_t quad8_count_preset_read(struct counter_device *counter,
+ struct counter_count *count, void *private, char *buf)
+{
+ const struct quad8_iio *const priv = counter->priv;
+
+ return sprintf(buf, "%u\n", priv->preset[count->id]);
+}
+
+static ssize_t quad8_count_preset_write(struct counter_device *counter,
+ struct counter_count *count, void *private, const char *buf, size_t len)
+{
+ struct quad8_iio *const priv = counter->priv;
+ const int base_offset = priv->base + 2 * count->id;
+ unsigned int preset;
+ int ret;
+ int i;
+
+ ret = kstrtouint(buf, 0, &preset);
+ if (ret)
+ return ret;
+
+ /* Only 24-bit values are supported */
+ if (preset > 0xFFFFFF)
+ return -EINVAL;
+
+ priv->preset[count->id] = preset;
+
+ /* Reset Byte Pointer */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+
+ /* Set Preset Register */
+ for (i = 0; i < 3; i++)
+ outb(preset >> (8 * i), base_offset);
+
+ return len;
+}
+
+static ssize_t quad8_count_ceiling_read(struct counter_device *counter,
+ struct counter_count *count, void *private, char *buf)
+{
+ const struct quad8_iio *const priv = counter->priv;
+
+ /* Range Limit and Modulo-N count modes use preset value as ceiling */
+ switch (priv->count_mode[count->id]) {
+ case 1:
+ case 3:
+ return quad8_count_preset_read(counter, count, private, buf);
+ }
+
+ /* By default 0x1FFFFFF (25 bits unsigned) is maximum count */
+ return sprintf(buf, "33554431\n");
+}
+
+static ssize_t quad8_count_ceiling_write(struct counter_device *counter,
+ struct counter_count *count, void *private, const char *buf, size_t len)
+{
+ struct quad8_iio *const priv = counter->priv;
+
+ /* Range Limit and Modulo-N count modes use preset value as ceiling */
+ switch (priv->count_mode[count->id]) {
+ case 1:
+ case 3:
+ return quad8_count_preset_write(counter, count, private, buf,
+ len);
+ }
+
+ return len;
+}
+
+static ssize_t quad8_count_preset_enable_read(struct counter_device *counter,
+ struct counter_count *count, void *private, char *buf)
+{
+ const struct quad8_iio *const priv = counter->priv;
+
+ return sprintf(buf, "%u\n", !priv->preset_enable[count->id]);
+}
+
+static ssize_t quad8_count_preset_enable_write(struct counter_device *counter,
+ struct counter_count *count, void *private, const char *buf, size_t len)
+{
+ struct quad8_iio *const priv = counter->priv;
+ const int base_offset = priv->base + 2 * count->id + 1;
+ bool preset_enable;
+ int ret;
+ unsigned int ior_cfg;
+
+ ret = kstrtobool(buf, &preset_enable);
+ if (ret)
+ return ret;
+
+ /* Preset enable is active low in Input/Output Control register */
+ preset_enable = !preset_enable;
+
+ priv->preset_enable[count->id] = preset_enable;
+
+ ior_cfg = priv->ab_enable[count->id] | (unsigned int)preset_enable << 1;
+
+ /* Load I/O control configuration to Input / Output Control Register */
+ outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
+
+ return len;
+}
+
+static const struct counter_signal_ext quad8_index_ext[] = {
+ COUNTER_SIGNAL_ENUM("index_polarity", &quad8_index_pol_enum),
+ COUNTER_SIGNAL_ENUM_AVAILABLE("index_polarity", &quad8_index_pol_enum),
+ COUNTER_SIGNAL_ENUM("synchronous_mode", &quad8_syn_mode_enum),
+ COUNTER_SIGNAL_ENUM_AVAILABLE("synchronous_mode", &quad8_syn_mode_enum)
+};
+
+#define QUAD8_QUAD_SIGNAL(_id, _name) { \
+ .id = (_id), \
+ .name = (_name) \
+}
+
+#define QUAD8_INDEX_SIGNAL(_id, _name) { \
+ .id = (_id), \
+ .name = (_name), \
+ .ext = quad8_index_ext, \
+ .num_ext = ARRAY_SIZE(quad8_index_ext) \
+}
+
+static struct counter_signal quad8_signals[] = {
+ QUAD8_QUAD_SIGNAL(0, "Channel 1 Quadrature A"),
+ QUAD8_QUAD_SIGNAL(1, "Channel 1 Quadrature B"),
+ QUAD8_QUAD_SIGNAL(2, "Channel 2 Quadrature A"),
+ QUAD8_QUAD_SIGNAL(3, "Channel 2 Quadrature B"),
+ QUAD8_QUAD_SIGNAL(4, "Channel 3 Quadrature A"),
+ QUAD8_QUAD_SIGNAL(5, "Channel 3 Quadrature B"),
+ QUAD8_QUAD_SIGNAL(6, "Channel 4 Quadrature A"),
+ QUAD8_QUAD_SIGNAL(7, "Channel 4 Quadrature B"),
+ QUAD8_QUAD_SIGNAL(8, "Channel 5 Quadrature A"),
+ QUAD8_QUAD_SIGNAL(9, "Channel 5 Quadrature B"),
+ QUAD8_QUAD_SIGNAL(10, "Channel 6 Quadrature A"),
+ QUAD8_QUAD_SIGNAL(11, "Channel 6 Quadrature B"),
+ QUAD8_QUAD_SIGNAL(12, "Channel 7 Quadrature A"),
+ QUAD8_QUAD_SIGNAL(13, "Channel 7 Quadrature B"),
+ QUAD8_QUAD_SIGNAL(14, "Channel 8 Quadrature A"),
+ QUAD8_QUAD_SIGNAL(15, "Channel 8 Quadrature B"),
+ QUAD8_INDEX_SIGNAL(16, "Channel 1 Index"),
+ QUAD8_INDEX_SIGNAL(17, "Channel 2 Index"),
+ QUAD8_INDEX_SIGNAL(18, "Channel 3 Index"),
+ QUAD8_INDEX_SIGNAL(19, "Channel 4 Index"),
+ QUAD8_INDEX_SIGNAL(20, "Channel 5 Index"),
+ QUAD8_INDEX_SIGNAL(21, "Channel 6 Index"),
+ QUAD8_INDEX_SIGNAL(22, "Channel 7 Index"),
+ QUAD8_INDEX_SIGNAL(23, "Channel 8 Index")
+};
+
+#define QUAD8_COUNT_SYNAPSES(_id) { \
+ { \
+ .actions_list = quad8_synapse_actions_list, \
+ .num_actions = ARRAY_SIZE(quad8_synapse_actions_list), \
+ .signal = quad8_signals + 2 * (_id) \
+ }, \
+ { \
+ .actions_list = quad8_synapse_actions_list, \
+ .num_actions = ARRAY_SIZE(quad8_synapse_actions_list), \
+ .signal = quad8_signals + 2 * (_id) + 1 \
+ }, \
+ { \
+ .actions_list = quad8_index_actions_list, \
+ .num_actions = ARRAY_SIZE(quad8_index_actions_list), \
+ .signal = quad8_signals + 2 * (_id) + 16 \
+ } \
+}
+
+static struct counter_synapse quad8_count_synapses[][3] = {
+ QUAD8_COUNT_SYNAPSES(0), QUAD8_COUNT_SYNAPSES(1),
+ QUAD8_COUNT_SYNAPSES(2), QUAD8_COUNT_SYNAPSES(3),
+ QUAD8_COUNT_SYNAPSES(4), QUAD8_COUNT_SYNAPSES(5),
+ QUAD8_COUNT_SYNAPSES(6), QUAD8_COUNT_SYNAPSES(7)
+};
+
+static const struct counter_count_ext quad8_count_ext[] = {
+ {
+ .name = "ceiling",
+ .read = quad8_count_ceiling_read,
+ .write = quad8_count_ceiling_write
+ },
+ {
+ .name = "floor",
+ .read = quad8_count_floor_read
+ },
+ COUNTER_COUNT_ENUM("count_mode", &quad8_cnt_mode_enum),
+ COUNTER_COUNT_ENUM_AVAILABLE("count_mode", &quad8_cnt_mode_enum),
+ {
+ .name = "direction",
+ .read = quad8_count_direction_read
+ },
+ {
+ .name = "enable",
+ .read = quad8_count_enable_read,
+ .write = quad8_count_enable_write
+ },
+ COUNTER_COUNT_ENUM("error_noise", &quad8_error_noise_enum),
+ COUNTER_COUNT_ENUM_AVAILABLE("error_noise", &quad8_error_noise_enum),
+ {
+ .name = "preset",
+ .read = quad8_count_preset_read,
+ .write = quad8_count_preset_write
+ },
+ {
+ .name = "preset_enable",
+ .read = quad8_count_preset_enable_read,
+ .write = quad8_count_preset_enable_write
+ }
+};
+
+#define QUAD8_COUNT(_id, _cntname) { \
+ .id = (_id), \
+ .name = (_cntname), \
+ .functions_list = quad8_count_functions_list, \
+ .num_functions = ARRAY_SIZE(quad8_count_functions_list), \
+ .synapses = quad8_count_synapses[(_id)], \
+ .num_synapses = 2, \
+ .ext = quad8_count_ext, \
+ .num_ext = ARRAY_SIZE(quad8_count_ext) \
+}
+
+static struct counter_count quad8_counts[] = {
+ QUAD8_COUNT(0, "Channel 1 Count"),
+ QUAD8_COUNT(1, "Channel 2 Count"),
+ QUAD8_COUNT(2, "Channel 3 Count"),
+ QUAD8_COUNT(3, "Channel 4 Count"),
+ QUAD8_COUNT(4, "Channel 5 Count"),
+ QUAD8_COUNT(5, "Channel 6 Count"),
+ QUAD8_COUNT(6, "Channel 7 Count"),
+ QUAD8_COUNT(7, "Channel 8 Count")
+};
+
+static int quad8_probe(struct device *dev, unsigned int id)
+{
+ struct iio_dev *indio_dev;
+ struct quad8_iio *quad8iio;
+ int i, j;
+ unsigned int base_offset;
+ int err;
+
+ if (!devm_request_region(dev, base[id], QUAD8_EXTENT, dev_name(dev))) {
+ dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
+ base[id], base[id] + QUAD8_EXTENT);
+ return -EBUSY;
+ }
+
+ /* Allocate IIO device; this also allocates driver data structure */
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*quad8iio));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ /* Initialize IIO device */
+ indio_dev->info = &quad8_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = ARRAY_SIZE(quad8_channels);
+ indio_dev->channels = quad8_channels;
+ indio_dev->name = dev_name(dev);
+ indio_dev->dev.parent = dev;
+
+ /* Initialize Counter device and driver data */
+ quad8iio = iio_priv(indio_dev);
+ quad8iio->counter.name = dev_name(dev);
+ quad8iio->counter.parent = dev;
+ quad8iio->counter.ops = &quad8_ops;
+ quad8iio->counter.counts = quad8_counts;
+ quad8iio->counter.num_counts = ARRAY_SIZE(quad8_counts);
+ quad8iio->counter.signals = quad8_signals;
+ quad8iio->counter.num_signals = ARRAY_SIZE(quad8_signals);
+ quad8iio->counter.priv = quad8iio;
+ quad8iio->base = base[id];
+
+ /* Reset all counters and disable interrupt function */
+ outb(QUAD8_CHAN_OP_RESET_COUNTERS, base[id] + QUAD8_REG_CHAN_OP);
+ /* Set initial configuration for all counters */
+ for (i = 0; i < QUAD8_NUM_COUNTERS; i++) {
+ base_offset = base[id] + 2 * i;
+ /* Reset Byte Pointer */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+ /* Reset Preset Register */
+ for (j = 0; j < 3; j++)
+ outb(0x00, base_offset);
+ /* Reset Borrow, Carry, Compare, and Sign flags */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_FLAGS, base_offset + 1);
+ /* Reset Error flag */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
+ /* Binary encoding; Normal count; non-quadrature mode */
+ outb(QUAD8_CTR_CMR, base_offset + 1);
+ /* Disable A and B inputs; preset on index; FLG1 as Carry */
+ outb(QUAD8_CTR_IOR, base_offset + 1);
+ /* Disable index function; negative index polarity */
+ outb(QUAD8_CTR_IDR, base_offset + 1);
+ }
+ /* Enable all counters */
+ outb(QUAD8_CHAN_OP_ENABLE_COUNTERS, base[id] + QUAD8_REG_CHAN_OP);
+
+ /* Register IIO device */
+ err = devm_iio_device_register(dev, indio_dev);
+ if (err)
+ return err;
+
+ /* Register Counter device */
+ return devm_counter_register(dev, &quad8iio->counter);
+}
+
+static struct isa_driver quad8_driver = {
+ .probe = quad8_probe,
+ .driver = {
+ .name = "104-quad-8"
+ }
+};
+
+module_isa_driver(quad8_driver, num_quad8);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("ACCES 104-QUAD-8 IIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/counter/Kconfig b/drivers/counter/Kconfig
new file mode 100644
index 000000000000..233ac305d878
--- /dev/null
+++ b/drivers/counter/Kconfig
@@ -0,0 +1,60 @@
+#
+# Counter devices
+#
+
+menuconfig COUNTER
+ tristate "Counter support"
+ help
+ This enables counter device support through the Generic Counter
+ interface. You only need to enable this, if you also want to enable
+ one or more of the counter device drivers below.
+
+if COUNTER
+
+config 104_QUAD_8
+ tristate "ACCES 104-QUAD-8 driver"
+ depends on PC104 && X86 && IIO
+ select ISA_BUS_API
+ help
+ Say yes here to build support for the ACCES 104-QUAD-8 quadrature
+ encoder counter/interface device family (104-QUAD-8, 104-QUAD-4).
+
+ A counter's respective error flag may be cleared by performing a write
+ operation on the respective count value attribute. Although the
+ 104-QUAD-8 counters have a 25-bit range, only the lower 24 bits may be
+ set, either directly or via the counter's preset attribute. Interrupts
+ are not supported by this driver.
+
+ The base port addresses for the devices may be configured via the base
+ array module parameter.
+
+config STM32_TIMER_CNT
+ tristate "STM32 Timer encoder counter driver"
+ depends on MFD_STM32_TIMERS || COMPILE_TEST
+ help
+ Select this option to enable STM32 Timer quadrature encoder
+ and counter driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called stm32-timer-cnt.
+
+config STM32_LPTIMER_CNT
+ tristate "STM32 LP Timer encoder counter driver"
+ depends on (MFD_STM32_LPTIMER || COMPILE_TEST) && IIO
+ help
+ Select this option to enable STM32 Low-Power Timer quadrature encoder
+ and counter driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called stm32-lptimer-cnt.
+
+config FTM_QUADDEC
+ tristate "Flex Timer Module Quadrature decoder driver"
+ help
+ Select this option to enable the Flex Timer Quadrature decoder
+ driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ftm-quaddec.
+
+endif # COUNTER
diff --git a/drivers/counter/Makefile b/drivers/counter/Makefile
new file mode 100644
index 000000000000..0c9e622a6bea
--- /dev/null
+++ b/drivers/counter/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for Counter devices
+#
+
+obj-$(CONFIG_COUNTER) += counter.o
+
+obj-$(CONFIG_104_QUAD_8) += 104-quad-8.o
+obj-$(CONFIG_STM32_TIMER_CNT) += stm32-timer-cnt.o
+obj-$(CONFIG_STM32_LPTIMER_CNT) += stm32-lptimer-cnt.o
+obj-$(CONFIG_FTM_QUADDEC) += ftm-quaddec.o
diff --git a/drivers/counter/counter.c b/drivers/counter/counter.c
new file mode 100644
index 000000000000..106bc7180cd8
--- /dev/null
+++ b/drivers/counter/counter.c
@@ -0,0 +1,1567 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic Counter interface
+ * Copyright (C) 2018 William Breathitt Gray
+ */
+#include <linux/counter.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+const char *const counter_count_direction_str[2] = {
+ [COUNTER_COUNT_DIRECTION_FORWARD] = "forward",
+ [COUNTER_COUNT_DIRECTION_BACKWARD] = "backward"
+};
+EXPORT_SYMBOL_GPL(counter_count_direction_str);
+
+const char *const counter_count_mode_str[4] = {
+ [COUNTER_COUNT_MODE_NORMAL] = "normal",
+ [COUNTER_COUNT_MODE_RANGE_LIMIT] = "range limit",
+ [COUNTER_COUNT_MODE_NON_RECYCLE] = "non-recycle",
+ [COUNTER_COUNT_MODE_MODULO_N] = "modulo-n"
+};
+EXPORT_SYMBOL_GPL(counter_count_mode_str);
+
+ssize_t counter_signal_enum_read(struct counter_device *counter,
+ struct counter_signal *signal, void *priv,
+ char *buf)
+{
+ const struct counter_signal_enum_ext *const e = priv;
+ int err;
+ size_t index;
+
+ if (!e->get)
+ return -EINVAL;
+
+ err = e->get(counter, signal, &index);
+ if (err)
+ return err;
+
+ if (index >= e->num_items)
+ return -EINVAL;
+
+ return sprintf(buf, "%s\n", e->items[index]);
+}
+EXPORT_SYMBOL_GPL(counter_signal_enum_read);
+
+ssize_t counter_signal_enum_write(struct counter_device *counter,
+ struct counter_signal *signal, void *priv,
+ const char *buf, size_t len)
+{
+ const struct counter_signal_enum_ext *const e = priv;
+ ssize_t index;
+ int err;
+
+ if (!e->set)
+ return -EINVAL;
+
+ index = __sysfs_match_string(e->items, e->num_items, buf);
+ if (index < 0)
+ return index;
+
+ err = e->set(counter, signal, index);
+ if (err)
+ return err;
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(counter_signal_enum_write);
+
+ssize_t counter_signal_enum_available_read(struct counter_device *counter,
+ struct counter_signal *signal,
+ void *priv, char *buf)
+{
+ const struct counter_signal_enum_ext *const e = priv;
+ size_t i;
+ size_t len = 0;
+
+ if (!e->num_items)
+ return 0;
+
+ for (i = 0; i < e->num_items; i++)
+ len += sprintf(buf + len, "%s\n", e->items[i]);
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(counter_signal_enum_available_read);
+
+ssize_t counter_count_enum_read(struct counter_device *counter,
+ struct counter_count *count, void *priv,
+ char *buf)
+{
+ const struct counter_count_enum_ext *const e = priv;
+ int err;
+ size_t index;
+
+ if (!e->get)
+ return -EINVAL;
+
+ err = e->get(counter, count, &index);
+ if (err)
+ return err;
+
+ if (index >= e->num_items)
+ return -EINVAL;
+
+ return sprintf(buf, "%s\n", e->items[index]);
+}
+EXPORT_SYMBOL_GPL(counter_count_enum_read);
+
+ssize_t counter_count_enum_write(struct counter_device *counter,
+ struct counter_count *count, void *priv,
+ const char *buf, size_t len)
+{
+ const struct counter_count_enum_ext *const e = priv;
+ ssize_t index;
+ int err;
+
+ if (!e->set)
+ return -EINVAL;
+
+ index = __sysfs_match_string(e->items, e->num_items, buf);
+ if (index < 0)
+ return index;
+
+ err = e->set(counter, count, index);
+ if (err)
+ return err;
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(counter_count_enum_write);
+
+ssize_t counter_count_enum_available_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *priv, char *buf)
+{
+ const struct counter_count_enum_ext *const e = priv;
+ size_t i;
+ size_t len = 0;
+
+ if (!e->num_items)
+ return 0;
+
+ for (i = 0; i < e->num_items; i++)
+ len += sprintf(buf + len, "%s\n", e->items[i]);
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(counter_count_enum_available_read);
+
+ssize_t counter_device_enum_read(struct counter_device *counter, void *priv,
+ char *buf)
+{
+ const struct counter_device_enum_ext *const e = priv;
+ int err;
+ size_t index;
+
+ if (!e->get)
+ return -EINVAL;
+
+ err = e->get(counter, &index);
+ if (err)
+ return err;
+
+ if (index >= e->num_items)
+ return -EINVAL;
+
+ return sprintf(buf, "%s\n", e->items[index]);
+}
+EXPORT_SYMBOL_GPL(counter_device_enum_read);
+
+ssize_t counter_device_enum_write(struct counter_device *counter, void *priv,
+ const char *buf, size_t len)
+{
+ const struct counter_device_enum_ext *const e = priv;
+ ssize_t index;
+ int err;
+
+ if (!e->set)
+ return -EINVAL;
+
+ index = __sysfs_match_string(e->items, e->num_items, buf);
+ if (index < 0)
+ return index;
+
+ err = e->set(counter, index);
+ if (err)
+ return err;
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(counter_device_enum_write);
+
+ssize_t counter_device_enum_available_read(struct counter_device *counter,
+ void *priv, char *buf)
+{
+ const struct counter_device_enum_ext *const e = priv;
+ size_t i;
+ size_t len = 0;
+
+ if (!e->num_items)
+ return 0;
+
+ for (i = 0; i < e->num_items; i++)
+ len += sprintf(buf + len, "%s\n", e->items[i]);
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(counter_device_enum_available_read);
+
+static const char *const counter_signal_level_str[] = {
+ [COUNTER_SIGNAL_LEVEL_LOW] = "low",
+ [COUNTER_SIGNAL_LEVEL_HIGH] = "high"
+};
+
+/**
+ * counter_signal_read_value_set - set counter_signal_read_value data
+ * @val: counter_signal_read_value structure to set
+ * @type: property Signal data represents
+ * @data: Signal data
+ *
+ * This function sets an opaque counter_signal_read_value structure with the
+ * provided Signal data.
+ */
+void counter_signal_read_value_set(struct counter_signal_read_value *const val,
+ const enum counter_signal_value_type type,
+ void *const data)
+{
+ if (type == COUNTER_SIGNAL_LEVEL)
+ val->len = sprintf(val->buf, "%s\n",
+ counter_signal_level_str[*(enum counter_signal_level *)data]);
+ else
+ val->len = 0;
+}
+EXPORT_SYMBOL_GPL(counter_signal_read_value_set);
+
+/**
+ * counter_count_read_value_set - set counter_count_read_value data
+ * @val: counter_count_read_value structure to set
+ * @type: property Count data represents
+ * @data: Count data
+ *
+ * This function sets an opaque counter_count_read_value structure with the
+ * provided Count data.
+ */
+void counter_count_read_value_set(struct counter_count_read_value *const val,
+ const enum counter_count_value_type type,
+ void *const data)
+{
+ switch (type) {
+ case COUNTER_COUNT_POSITION:
+ val->len = sprintf(val->buf, "%lu\n", *(unsigned long *)data);
+ break;
+ default:
+ val->len = 0;
+ }
+}
+EXPORT_SYMBOL_GPL(counter_count_read_value_set);
+
+/**
+ * counter_count_write_value_get - get counter_count_write_value data
+ * @data: Count data
+ * @type: property Count data represents
+ * @val: counter_count_write_value structure containing data
+ *
+ * This function extracts Count data from the provided opaque
+ * counter_count_write_value structure and stores it at the address provided by
+ * @data.
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int counter_count_write_value_get(void *const data,
+ const enum counter_count_value_type type,
+ const struct counter_count_write_value *const val)
+{
+ int err;
+
+ switch (type) {
+ case COUNTER_COUNT_POSITION:
+ err = kstrtoul(val->buf, 0, data);
+ if (err)
+ return err;
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(counter_count_write_value_get);
+
+struct counter_attr_parm {
+ struct counter_device_attr_group *group;
+ const char *prefix;
+ const char *name;
+ ssize_t (*show)(struct device *dev, struct device_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len);
+ void *component;
+};
+
+struct counter_device_attr {
+ struct device_attribute dev_attr;
+ struct list_head l;
+ void *component;
+};
+
+static int counter_attribute_create(const struct counter_attr_parm *const parm)
+{
+ struct counter_device_attr *counter_attr;
+ struct device_attribute *dev_attr;
+ int err;
+ struct list_head *const attr_list = &parm->group->attr_list;
+
+ /* Allocate a Counter device attribute */
+ counter_attr = kzalloc(sizeof(*counter_attr), GFP_KERNEL);
+ if (!counter_attr)
+ return -ENOMEM;
+ dev_attr = &counter_attr->dev_attr;
+
+ sysfs_attr_init(&dev_attr->attr);
+
+ /* Configure device attribute */
+ dev_attr->attr.name = kasprintf(GFP_KERNEL, "%s%s", parm->prefix,
+ parm->name);
+ if (!dev_attr->attr.name) {
+ err = -ENOMEM;
+ goto err_free_counter_attr;
+ }
+ if (parm->show) {
+ dev_attr->attr.mode |= 0444;
+ dev_attr->show = parm->show;
+ }
+ if (parm->store) {
+ dev_attr->attr.mode |= 0200;
+ dev_attr->store = parm->store;
+ }
+
+ /* Store associated Counter component with attribute */
+ counter_attr->component = parm->component;
+
+ /* Keep track of the attribute for later cleanup */
+ list_add(&counter_attr->l, attr_list);
+ parm->group->num_attr++;
+
+ return 0;
+
+err_free_counter_attr:
+ kfree(counter_attr);
+ return err;
+}
+
+#define to_counter_attr(_dev_attr) \
+ container_of(_dev_attr, struct counter_device_attr, dev_attr)
+
+struct counter_signal_unit {
+ struct counter_signal *signal;
+};
+
+static ssize_t counter_signal_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct counter_device *const counter = dev_get_drvdata(dev);
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_signal_unit *const component = devattr->component;
+ struct counter_signal *const signal = component->signal;
+ int err;
+ struct counter_signal_read_value val = { .buf = buf };
+
+ err = counter->ops->signal_read(counter, signal, &val);
+ if (err)
+ return err;
+
+ return val.len;
+}
+
+struct counter_name_unit {
+ const char *name;
+};
+
+static ssize_t counter_device_attr_name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct counter_name_unit *const comp = to_counter_attr(attr)->component;
+
+ return sprintf(buf, "%s\n", comp->name);
+}
+
+static int counter_name_attribute_create(
+ struct counter_device_attr_group *const group,
+ const char *const name)
+{
+ struct counter_name_unit *name_comp;
+ struct counter_attr_parm parm;
+ int err;
+
+ /* Skip if no name */
+ if (!name)
+ return 0;
+
+ /* Allocate name attribute component */
+ name_comp = kmalloc(sizeof(*name_comp), GFP_KERNEL);
+ if (!name_comp)
+ return -ENOMEM;
+ name_comp->name = name;
+
+ /* Allocate Signal name attribute */
+ parm.group = group;
+ parm.prefix = "";
+ parm.name = "name";
+ parm.show = counter_device_attr_name_show;
+ parm.store = NULL;
+ parm.component = name_comp;
+ err = counter_attribute_create(&parm);
+ if (err)
+ goto err_free_name_comp;
+
+ return 0;
+
+err_free_name_comp:
+ kfree(name_comp);
+ return err;
+}
+
+struct counter_signal_ext_unit {
+ struct counter_signal *signal;
+ const struct counter_signal_ext *ext;
+};
+
+static ssize_t counter_signal_ext_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_signal_ext_unit *const comp = devattr->component;
+ const struct counter_signal_ext *const ext = comp->ext;
+
+ return ext->read(dev_get_drvdata(dev), comp->signal, ext->priv, buf);
+}
+
+static ssize_t counter_signal_ext_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_signal_ext_unit *const comp = devattr->component;
+ const struct counter_signal_ext *const ext = comp->ext;
+
+ return ext->write(dev_get_drvdata(dev), comp->signal, ext->priv, buf,
+ len);
+}
+
+static void counter_device_attr_list_free(struct list_head *attr_list)
+{
+ struct counter_device_attr *p, *n;
+
+ list_for_each_entry_safe(p, n, attr_list, l) {
+ /* free attribute name and associated component memory */
+ kfree(p->dev_attr.attr.name);
+ kfree(p->component);
+ list_del(&p->l);
+ kfree(p);
+ }
+}
+
+static int counter_signal_ext_register(
+ struct counter_device_attr_group *const group,
+ struct counter_signal *const signal)
+{
+ const size_t num_ext = signal->num_ext;
+ size_t i;
+ const struct counter_signal_ext *ext;
+ struct counter_signal_ext_unit *signal_ext_comp;
+ struct counter_attr_parm parm;
+ int err;
+
+ /* Create an attribute for each extension */
+ for (i = 0 ; i < num_ext; i++) {
+ ext = signal->ext + i;
+
+ /* Allocate signal_ext attribute component */
+ signal_ext_comp = kmalloc(sizeof(*signal_ext_comp), GFP_KERNEL);
+ if (!signal_ext_comp) {
+ err = -ENOMEM;
+ goto err_free_attr_list;
+ }
+ signal_ext_comp->signal = signal;
+ signal_ext_comp->ext = ext;
+
+ /* Allocate a Counter device attribute */
+ parm.group = group;
+ parm.prefix = "";
+ parm.name = ext->name;
+ parm.show = (ext->read) ? counter_signal_ext_show : NULL;
+ parm.store = (ext->write) ? counter_signal_ext_store : NULL;
+ parm.component = signal_ext_comp;
+ err = counter_attribute_create(&parm);
+ if (err) {
+ kfree(signal_ext_comp);
+ goto err_free_attr_list;
+ }
+ }
+
+ return 0;
+
+err_free_attr_list:
+ counter_device_attr_list_free(&group->attr_list);
+ return err;
+}
+
+static int counter_signal_attributes_create(
+ struct counter_device_attr_group *const group,
+ const struct counter_device *const counter,
+ struct counter_signal *const signal)
+{
+ struct counter_signal_unit *signal_comp;
+ struct counter_attr_parm parm;
+ int err;
+
+ /* Allocate Signal attribute component */
+ signal_comp = kmalloc(sizeof(*signal_comp), GFP_KERNEL);
+ if (!signal_comp)
+ return -ENOMEM;
+ signal_comp->signal = signal;
+
+ /* Create main Signal attribute */
+ parm.group = group;
+ parm.prefix = "";
+ parm.name = "signal";
+ parm.show = (counter->ops->signal_read) ? counter_signal_show : NULL;
+ parm.store = NULL;
+ parm.component = signal_comp;
+ err = counter_attribute_create(&parm);
+ if (err) {
+ kfree(signal_comp);
+ return err;
+ }
+
+ /* Create Signal name attribute */
+ err = counter_name_attribute_create(group, signal->name);
+ if (err)
+ goto err_free_attr_list;
+
+ /* Register Signal extension attributes */
+ err = counter_signal_ext_register(group, signal);
+ if (err)
+ goto err_free_attr_list;
+
+ return 0;
+
+err_free_attr_list:
+ counter_device_attr_list_free(&group->attr_list);
+ return err;
+}
+
+static int counter_signals_register(
+ struct counter_device_attr_group *const groups_list,
+ const struct counter_device *const counter)
+{
+ const size_t num_signals = counter->num_signals;
+ size_t i;
+ struct counter_signal *signal;
+ const char *name;
+ int err;
+
+ /* Register each Signal */
+ for (i = 0; i < num_signals; i++) {
+ signal = counter->signals + i;
+
+ /* Generate Signal attribute directory name */
+ name = kasprintf(GFP_KERNEL, "signal%d", signal->id);
+ if (!name) {
+ err = -ENOMEM;
+ goto err_free_attr_groups;
+ }
+ groups_list[i].attr_group.name = name;
+
+ /* Create all attributes associated with Signal */
+ err = counter_signal_attributes_create(groups_list + i, counter,
+ signal);
+ if (err)
+ goto err_free_attr_groups;
+ }
+
+ return 0;
+
+err_free_attr_groups:
+ do {
+ kfree(groups_list[i].attr_group.name);
+ counter_device_attr_list_free(&groups_list[i].attr_list);
+ } while (i--);
+ return err;
+}
+
+static const char *const counter_synapse_action_str[] = {
+ [COUNTER_SYNAPSE_ACTION_NONE] = "none",
+ [COUNTER_SYNAPSE_ACTION_RISING_EDGE] = "rising edge",
+ [COUNTER_SYNAPSE_ACTION_FALLING_EDGE] = "falling edge",
+ [COUNTER_SYNAPSE_ACTION_BOTH_EDGES] = "both edges"
+};
+
+struct counter_action_unit {
+ struct counter_synapse *synapse;
+ struct counter_count *count;
+};
+
+static ssize_t counter_action_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ int err;
+ struct counter_device *const counter = dev_get_drvdata(dev);
+ const struct counter_action_unit *const component = devattr->component;
+ struct counter_count *const count = component->count;
+ struct counter_synapse *const synapse = component->synapse;
+ size_t action_index;
+ enum counter_synapse_action action;
+
+ err = counter->ops->action_get(counter, count, synapse, &action_index);
+ if (err)
+ return err;
+
+ synapse->action = action_index;
+
+ action = synapse->actions_list[action_index];
+ return sprintf(buf, "%s\n", counter_synapse_action_str[action]);
+}
+
+static ssize_t counter_action_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_action_unit *const component = devattr->component;
+ struct counter_synapse *const synapse = component->synapse;
+ size_t action_index;
+ const size_t num_actions = synapse->num_actions;
+ enum counter_synapse_action action;
+ int err;
+ struct counter_device *const counter = dev_get_drvdata(dev);
+ struct counter_count *const count = component->count;
+
+ /* Find requested action mode */
+ for (action_index = 0; action_index < num_actions; action_index++) {
+ action = synapse->actions_list[action_index];
+ if (sysfs_streq(buf, counter_synapse_action_str[action]))
+ break;
+ }
+ /* If requested action mode not found */
+ if (action_index >= num_actions)
+ return -EINVAL;
+
+ err = counter->ops->action_set(counter, count, synapse, action_index);
+ if (err)
+ return err;
+
+ synapse->action = action_index;
+
+ return len;
+}
+
+struct counter_action_avail_unit {
+ const enum counter_synapse_action *actions_list;
+ size_t num_actions;
+};
+
+static ssize_t counter_synapse_action_available_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_action_avail_unit *const component = devattr->component;
+ size_t i;
+ enum counter_synapse_action action;
+ ssize_t len = 0;
+
+ for (i = 0; i < component->num_actions; i++) {
+ action = component->actions_list[i];
+ len += sprintf(buf + len, "%s\n",
+ counter_synapse_action_str[action]);
+ }
+
+ return len;
+}
+
+static int counter_synapses_register(
+ struct counter_device_attr_group *const group,
+ const struct counter_device *const counter,
+ struct counter_count *const count, const char *const count_attr_name)
+{
+ size_t i;
+ struct counter_synapse *synapse;
+ const char *prefix;
+ struct counter_action_unit *action_comp;
+ struct counter_attr_parm parm;
+ int err;
+ struct counter_action_avail_unit *avail_comp;
+
+ /* Register each Synapse */
+ for (i = 0; i < count->num_synapses; i++) {
+ synapse = count->synapses + i;
+
+ /* Generate attribute prefix */
+ prefix = kasprintf(GFP_KERNEL, "signal%d_",
+ synapse->signal->id);
+ if (!prefix) {
+ err = -ENOMEM;
+ goto err_free_attr_list;
+ }
+
+ /* Allocate action attribute component */
+ action_comp = kmalloc(sizeof(*action_comp), GFP_KERNEL);
+ if (!action_comp) {
+ err = -ENOMEM;
+ goto err_free_prefix;
+ }
+ action_comp->synapse = synapse;
+ action_comp->count = count;
+
+ /* Create action attribute */
+ parm.group = group;
+ parm.prefix = prefix;
+ parm.name = "action";
+ parm.show = (counter->ops->action_get) ? counter_action_show : NULL;
+ parm.store = (counter->ops->action_set) ? counter_action_store : NULL;
+ parm.component = action_comp;
+ err = counter_attribute_create(&parm);
+ if (err) {
+ kfree(action_comp);
+ goto err_free_prefix;
+ }
+
+ /* Allocate action available attribute component */
+ avail_comp = kmalloc(sizeof(*avail_comp), GFP_KERNEL);
+ if (!avail_comp) {
+ err = -ENOMEM;
+ goto err_free_prefix;
+ }
+ avail_comp->actions_list = synapse->actions_list;
+ avail_comp->num_actions = synapse->num_actions;
+
+ /* Create action_available attribute */
+ parm.group = group;
+ parm.prefix = prefix;
+ parm.name = "action_available";
+ parm.show = counter_synapse_action_available_show;
+ parm.store = NULL;
+ parm.component = avail_comp;
+ err = counter_attribute_create(&parm);
+ if (err) {
+ kfree(avail_comp);
+ goto err_free_prefix;
+ }
+
+ kfree(prefix);
+ }
+
+ return 0;
+
+err_free_prefix:
+ kfree(prefix);
+err_free_attr_list:
+ counter_device_attr_list_free(&group->attr_list);
+ return err;
+}
+
+struct counter_count_unit {
+ struct counter_count *count;
+};
+
+static ssize_t counter_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct counter_device *const counter = dev_get_drvdata(dev);
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_count_unit *const component = devattr->component;
+ struct counter_count *const count = component->count;
+ int err;
+ struct counter_count_read_value val = { .buf = buf };
+
+ err = counter->ops->count_read(counter, count, &val);
+ if (err)
+ return err;
+
+ return val.len;
+}
+
+static ssize_t counter_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct counter_device *const counter = dev_get_drvdata(dev);
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_count_unit *const component = devattr->component;
+ struct counter_count *const count = component->count;
+ int err;
+ struct counter_count_write_value val = { .buf = buf };
+
+ err = counter->ops->count_write(counter, count, &val);
+ if (err)
+ return err;
+
+ return len;
+}
+
+static const char *const counter_count_function_str[] = {
+ [COUNTER_COUNT_FUNCTION_INCREASE] = "increase",
+ [COUNTER_COUNT_FUNCTION_DECREASE] = "decrease",
+ [COUNTER_COUNT_FUNCTION_PULSE_DIRECTION] = "pulse-direction",
+ [COUNTER_COUNT_FUNCTION_QUADRATURE_X1_A] = "quadrature x1 a",
+ [COUNTER_COUNT_FUNCTION_QUADRATURE_X1_B] = "quadrature x1 b",
+ [COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A] = "quadrature x2 a",
+ [COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B] = "quadrature x2 b",
+ [COUNTER_COUNT_FUNCTION_QUADRATURE_X4] = "quadrature x4"
+};
+
+static ssize_t counter_function_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int err;
+ struct counter_device *const counter = dev_get_drvdata(dev);
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_count_unit *const component = devattr->component;
+ struct counter_count *const count = component->count;
+ size_t func_index;
+ enum counter_count_function function;
+
+ err = counter->ops->function_get(counter, count, &func_index);
+ if (err)
+ return err;
+
+ count->function = func_index;
+
+ function = count->functions_list[func_index];
+ return sprintf(buf, "%s\n", counter_count_function_str[function]);
+}
+
+static ssize_t counter_function_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_count_unit *const component = devattr->component;
+ struct counter_count *const count = component->count;
+ const size_t num_functions = count->num_functions;
+ size_t func_index;
+ enum counter_count_function function;
+ int err;
+ struct counter_device *const counter = dev_get_drvdata(dev);
+
+ /* Find requested Count function mode */
+ for (func_index = 0; func_index < num_functions; func_index++) {
+ function = count->functions_list[func_index];
+ if (sysfs_streq(buf, counter_count_function_str[function]))
+ break;
+ }
+ /* Return error if requested Count function mode not found */
+ if (func_index >= num_functions)
+ return -EINVAL;
+
+ err = counter->ops->function_set(counter, count, func_index);
+ if (err)
+ return err;
+
+ count->function = func_index;
+
+ return len;
+}
+
+struct counter_count_ext_unit {
+ struct counter_count *count;
+ const struct counter_count_ext *ext;
+};
+
+static ssize_t counter_count_ext_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_count_ext_unit *const comp = devattr->component;
+ const struct counter_count_ext *const ext = comp->ext;
+
+ return ext->read(dev_get_drvdata(dev), comp->count, ext->priv, buf);
+}
+
+static ssize_t counter_count_ext_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_count_ext_unit *const comp = devattr->component;
+ const struct counter_count_ext *const ext = comp->ext;
+
+ return ext->write(dev_get_drvdata(dev), comp->count, ext->priv, buf,
+ len);
+}
+
+static int counter_count_ext_register(
+ struct counter_device_attr_group *const group,
+ struct counter_count *const count)
+{
+ size_t i;
+ const struct counter_count_ext *ext;
+ struct counter_count_ext_unit *count_ext_comp;
+ struct counter_attr_parm parm;
+ int err;
+
+ /* Create an attribute for each extension */
+ for (i = 0 ; i < count->num_ext; i++) {
+ ext = count->ext + i;
+
+ /* Allocate count_ext attribute component */
+ count_ext_comp = kmalloc(sizeof(*count_ext_comp), GFP_KERNEL);
+ if (!count_ext_comp) {
+ err = -ENOMEM;
+ goto err_free_attr_list;
+ }
+ count_ext_comp->count = count;
+ count_ext_comp->ext = ext;
+
+ /* Allocate count_ext attribute */
+ parm.group = group;
+ parm.prefix = "";
+ parm.name = ext->name;
+ parm.show = (ext->read) ? counter_count_ext_show : NULL;
+ parm.store = (ext->write) ? counter_count_ext_store : NULL;
+ parm.component = count_ext_comp;
+ err = counter_attribute_create(&parm);
+ if (err) {
+ kfree(count_ext_comp);
+ goto err_free_attr_list;
+ }
+ }
+
+ return 0;
+
+err_free_attr_list:
+ counter_device_attr_list_free(&group->attr_list);
+ return err;
+}
+
+struct counter_func_avail_unit {
+ const enum counter_count_function *functions_list;
+ size_t num_functions;
+};
+
+static ssize_t counter_count_function_available_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_func_avail_unit *const component = devattr->component;
+ const enum counter_count_function *const func_list = component->functions_list;
+ const size_t num_functions = component->num_functions;
+ size_t i;
+ enum counter_count_function function;
+ ssize_t len = 0;
+
+ for (i = 0; i < num_functions; i++) {
+ function = func_list[i];
+ len += sprintf(buf + len, "%s\n",
+ counter_count_function_str[function]);
+ }
+
+ return len;
+}
+
+static int counter_count_attributes_create(
+ struct counter_device_attr_group *const group,
+ const struct counter_device *const counter,
+ struct counter_count *const count)
+{
+ struct counter_count_unit *count_comp;
+ struct counter_attr_parm parm;
+ int err;
+ struct counter_count_unit *func_comp;
+ struct counter_func_avail_unit *avail_comp;
+
+ /* Allocate count attribute component */
+ count_comp = kmalloc(sizeof(*count_comp), GFP_KERNEL);
+ if (!count_comp)
+ return -ENOMEM;
+ count_comp->count = count;
+
+ /* Create main Count attribute */
+ parm.group = group;
+ parm.prefix = "";
+ parm.name = "count";
+ parm.show = (counter->ops->count_read) ? counter_count_show : NULL;
+ parm.store = (counter->ops->count_write) ? counter_count_store : NULL;
+ parm.component = count_comp;
+ err = counter_attribute_create(&parm);
+ if (err) {
+ kfree(count_comp);
+ return err;
+ }
+
+ /* Allocate function attribute component */
+ func_comp = kmalloc(sizeof(*func_comp), GFP_KERNEL);
+ if (!func_comp) {
+ err = -ENOMEM;
+ goto err_free_attr_list;
+ }
+ func_comp->count = count;
+
+ /* Create Count function attribute */
+ parm.group = group;
+ parm.prefix = "";
+ parm.name = "function";
+ parm.show = (counter->ops->function_get) ? counter_function_show : NULL;
+ parm.store = (counter->ops->function_set) ? counter_function_store : NULL;
+ parm.component = func_comp;
+ err = counter_attribute_create(&parm);
+ if (err) {
+ kfree(func_comp);
+ goto err_free_attr_list;
+ }
+
+ /* Allocate function available attribute component */
+ avail_comp = kmalloc(sizeof(*avail_comp), GFP_KERNEL);
+ if (!avail_comp) {
+ err = -ENOMEM;
+ goto err_free_attr_list;
+ }
+ avail_comp->functions_list = count->functions_list;
+ avail_comp->num_functions = count->num_functions;
+
+ /* Create Count function_available attribute */
+ parm.group = group;
+ parm.prefix = "";
+ parm.name = "function_available";
+ parm.show = counter_count_function_available_show;
+ parm.store = NULL;
+ parm.component = avail_comp;
+ err = counter_attribute_create(&parm);
+ if (err) {
+ kfree(avail_comp);
+ goto err_free_attr_list;
+ }
+
+ /* Create Count name attribute */
+ err = counter_name_attribute_create(group, count->name);
+ if (err)
+ goto err_free_attr_list;
+
+ /* Register Count extension attributes */
+ err = counter_count_ext_register(group, count);
+ if (err)
+ goto err_free_attr_list;
+
+ return 0;
+
+err_free_attr_list:
+ counter_device_attr_list_free(&group->attr_list);
+ return err;
+}
+
+static int counter_counts_register(
+ struct counter_device_attr_group *const groups_list,
+ const struct counter_device *const counter)
+{
+ size_t i;
+ struct counter_count *count;
+ const char *name;
+ int err;
+
+ /* Register each Count */
+ for (i = 0; i < counter->num_counts; i++) {
+ count = counter->counts + i;
+
+ /* Generate Count attribute directory name */
+ name = kasprintf(GFP_KERNEL, "count%d", count->id);
+ if (!name) {
+ err = -ENOMEM;
+ goto err_free_attr_groups;
+ }
+ groups_list[i].attr_group.name = name;
+
+ /* Register the Synapses associated with each Count */
+ err = counter_synapses_register(groups_list + i, counter, count,
+ name);
+ if (err)
+ goto err_free_attr_groups;
+
+ /* Create all attributes associated with Count */
+ err = counter_count_attributes_create(groups_list + i, counter,
+ count);
+ if (err)
+ goto err_free_attr_groups;
+ }
+
+ return 0;
+
+err_free_attr_groups:
+ do {
+ kfree(groups_list[i].attr_group.name);
+ counter_device_attr_list_free(&groups_list[i].attr_list);
+ } while (i--);
+ return err;
+}
+
+struct counter_size_unit {
+ size_t size;
+};
+
+static ssize_t counter_device_attr_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ const struct counter_size_unit *const comp = to_counter_attr(attr)->component;
+
+ return sprintf(buf, "%zu\n", comp->size);
+}
+
+static int counter_size_attribute_create(
+ struct counter_device_attr_group *const group,
+ const size_t size, const char *const name)
+{
+ struct counter_size_unit *size_comp;
+ struct counter_attr_parm parm;
+ int err;
+
+ /* Allocate size attribute component */
+ size_comp = kmalloc(sizeof(*size_comp), GFP_KERNEL);
+ if (!size_comp)
+ return -ENOMEM;
+ size_comp->size = size;
+
+ parm.group = group;
+ parm.prefix = "";
+ parm.name = name;
+ parm.show = counter_device_attr_size_show;
+ parm.store = NULL;
+ parm.component = size_comp;
+ err = counter_attribute_create(&parm);
+ if (err)
+ goto err_free_size_comp;
+
+ return 0;
+
+err_free_size_comp:
+ kfree(size_comp);
+ return err;
+}
+
+struct counter_ext_unit {
+ const struct counter_device_ext *ext;
+};
+
+static ssize_t counter_device_ext_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_ext_unit *const component = devattr->component;
+ const struct counter_device_ext *const ext = component->ext;
+
+ return ext->read(dev_get_drvdata(dev), ext->priv, buf);
+}
+
+static ssize_t counter_device_ext_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ const struct counter_device_attr *const devattr = to_counter_attr(attr);
+ const struct counter_ext_unit *const component = devattr->component;
+ const struct counter_device_ext *const ext = component->ext;
+
+ return ext->write(dev_get_drvdata(dev), ext->priv, buf, len);
+}
+
+static int counter_device_ext_register(
+ struct counter_device_attr_group *const group,
+ struct counter_device *const counter)
+{
+ size_t i;
+ struct counter_ext_unit *ext_comp;
+ struct counter_attr_parm parm;
+ int err;
+
+ /* Create an attribute for each extension */
+ for (i = 0 ; i < counter->num_ext; i++) {
+ /* Allocate extension attribute component */
+ ext_comp = kmalloc(sizeof(*ext_comp), GFP_KERNEL);
+ if (!ext_comp) {
+ err = -ENOMEM;
+ goto err_free_attr_list;
+ }
+
+ ext_comp->ext = counter->ext + i;
+
+ /* Allocate extension attribute */
+ parm.group = group;
+ parm.prefix = "";
+ parm.name = counter->ext[i].name;
+ parm.show = (counter->ext[i].read) ? counter_device_ext_show : NULL;
+ parm.store = (counter->ext[i].write) ? counter_device_ext_store : NULL;
+ parm.component = ext_comp;
+ err = counter_attribute_create(&parm);
+ if (err) {
+ kfree(ext_comp);
+ goto err_free_attr_list;
+ }
+ }
+
+ return 0;
+
+err_free_attr_list:
+ counter_device_attr_list_free(&group->attr_list);
+ return err;
+}
+
+static int counter_global_attr_register(
+ struct counter_device_attr_group *const group,
+ struct counter_device *const counter)
+{
+ int err;
+
+ /* Create name attribute */
+ err = counter_name_attribute_create(group, counter->name);
+ if (err)
+ return err;
+
+ /* Create num_counts attribute */
+ err = counter_size_attribute_create(group, counter->num_counts,
+ "num_counts");
+ if (err)
+ goto err_free_attr_list;
+
+ /* Create num_signals attribute */
+ err = counter_size_attribute_create(group, counter->num_signals,
+ "num_signals");
+ if (err)
+ goto err_free_attr_list;
+
+ /* Register Counter device extension attributes */
+ err = counter_device_ext_register(group, counter);
+ if (err)
+ goto err_free_attr_list;
+
+ return 0;
+
+err_free_attr_list:
+ counter_device_attr_list_free(&group->attr_list);
+ return err;
+}
+
+static void counter_device_groups_list_free(
+ struct counter_device_attr_group *const groups_list,
+ const size_t num_groups)
+{
+ struct counter_device_attr_group *group;
+ size_t i;
+
+ /* loop through all attribute groups (signals, counts, global, etc.) */
+ for (i = 0; i < num_groups; i++) {
+ group = groups_list + i;
+
+ /* free all attribute group and associated attributes memory */
+ kfree(group->attr_group.name);
+ kfree(group->attr_group.attrs);
+ counter_device_attr_list_free(&group->attr_list);
+ }
+
+ kfree(groups_list);
+}
+
+static int counter_device_groups_list_prepare(
+ struct counter_device *const counter)
+{
+ const size_t total_num_groups =
+ counter->num_signals + counter->num_counts + 1;
+ struct counter_device_attr_group *groups_list;
+ size_t i;
+ int err;
+ size_t num_groups = 0;
+
+ /* Allocate space for attribute groups (signals, counts, and ext) */
+ groups_list = kcalloc(total_num_groups, sizeof(*groups_list),
+ GFP_KERNEL);
+ if (!groups_list)
+ return -ENOMEM;
+
+ /* Initialize attribute lists */
+ for (i = 0; i < total_num_groups; i++)
+ INIT_LIST_HEAD(&groups_list[i].attr_list);
+
+ /* Register Signals */
+ err = counter_signals_register(groups_list, counter);
+ if (err)
+ goto err_free_groups_list;
+ num_groups += counter->num_signals;
+
+ /* Register Counts and respective Synapses */
+ err = counter_counts_register(groups_list + num_groups, counter);
+ if (err)
+ goto err_free_groups_list;
+ num_groups += counter->num_counts;
+
+ /* Register Counter global attributes */
+ err = counter_global_attr_register(groups_list + num_groups, counter);
+ if (err)
+ goto err_free_groups_list;
+ num_groups++;
+
+ /* Store groups_list in device_state */
+ counter->device_state->groups_list = groups_list;
+ counter->device_state->num_groups = num_groups;
+
+ return 0;
+
+err_free_groups_list:
+ counter_device_groups_list_free(groups_list, num_groups);
+ return err;
+}
+
+static int counter_device_groups_prepare(
+ struct counter_device_state *const device_state)
+{
+ size_t i, j;
+ struct counter_device_attr_group *group;
+ int err;
+ struct counter_device_attr *p;
+
+ /* Allocate attribute groups for association with device */
+ device_state->groups = kcalloc(device_state->num_groups + 1,
+ sizeof(*device_state->groups),
+ GFP_KERNEL);
+ if (!device_state->groups)
+ return -ENOMEM;
+
+ /* Prepare each group of attributes for association */
+ for (i = 0; i < device_state->num_groups; i++) {
+ group = device_state->groups_list + i;
+
+ /* Allocate space for attribute pointers in attribute group */
+ group->attr_group.attrs = kcalloc(group->num_attr + 1,
+ sizeof(*group->attr_group.attrs), GFP_KERNEL);
+ if (!group->attr_group.attrs) {
+ err = -ENOMEM;
+ goto err_free_groups;
+ }
+
+ /* Add attribute pointers to attribute group */
+ j = 0;
+ list_for_each_entry(p, &group->attr_list, l)
+ group->attr_group.attrs[j++] = &p->dev_attr.attr;
+
+ /* Group attributes in attribute group */
+ device_state->groups[i] = &group->attr_group;
+ }
+ /* Associate attributes with device */
+ device_state->dev.groups = device_state->groups;
+
+ return 0;
+
+err_free_groups:
+ do {
+ group = device_state->groups_list + i;
+ kfree(group->attr_group.attrs);
+ group->attr_group.attrs = NULL;
+ } while (i--);
+ kfree(device_state->groups);
+ return err;
+}
+
+/* Provides a unique ID for each counter device */
+static DEFINE_IDA(counter_ida);
+
+static void counter_device_release(struct device *dev)
+{
+ struct counter_device *const counter = dev_get_drvdata(dev);
+ struct counter_device_state *const device_state = counter->device_state;
+
+ kfree(device_state->groups);
+ counter_device_groups_list_free(device_state->groups_list,
+ device_state->num_groups);
+ ida_simple_remove(&counter_ida, device_state->id);
+ kfree(device_state);
+}
+
+static struct device_type counter_device_type = {
+ .name = "counter_device",
+ .release = counter_device_release
+};
+
+static struct bus_type counter_bus_type = {
+ .name = "counter"
+};
+
+/**
+ * counter_register - register Counter to the system
+ * @counter: pointer to Counter to register
+ *
+ * This function registers a Counter to the system. A sysfs "counter" directory
+ * will be created and populated with sysfs attributes correlating with the
+ * Counter Signals, Synapses, and Counts respectively.
+ */
+int counter_register(struct counter_device *const counter)
+{
+ struct counter_device_state *device_state;
+ int err;
+
+ /* Allocate internal state container for Counter device */
+ device_state = kzalloc(sizeof(*device_state), GFP_KERNEL);
+ if (!device_state)
+ return -ENOMEM;
+ counter->device_state = device_state;
+
+ /* Acquire unique ID */
+ device_state->id = ida_simple_get(&counter_ida, 0, 0, GFP_KERNEL);
+ if (device_state->id < 0) {
+ err = device_state->id;
+ goto err_free_device_state;
+ }
+
+ /* Configure device structure for Counter */
+ device_state->dev.type = &counter_device_type;
+ device_state->dev.bus = &counter_bus_type;
+ if (counter->parent) {
+ device_state->dev.parent = counter->parent;
+ device_state->dev.of_node = counter->parent->of_node;
+ }
+ dev_set_name(&device_state->dev, "counter%d", device_state->id);
+ device_initialize(&device_state->dev);
+ dev_set_drvdata(&device_state->dev, counter);
+
+ /* Prepare device attributes */
+ err = counter_device_groups_list_prepare(counter);
+ if (err)
+ goto err_free_id;
+
+ /* Organize device attributes to groups and match to device */
+ err = counter_device_groups_prepare(device_state);
+ if (err)
+ goto err_free_groups_list;
+
+ /* Add device to system */
+ err = device_add(&device_state->dev);
+ if (err)
+ goto err_free_groups;
+
+ return 0;
+
+err_free_groups:
+ kfree(device_state->groups);
+err_free_groups_list:
+ counter_device_groups_list_free(device_state->groups_list,
+ device_state->num_groups);
+err_free_id:
+ ida_simple_remove(&counter_ida, device_state->id);
+err_free_device_state:
+ kfree(device_state);
+ return err;
+}
+EXPORT_SYMBOL_GPL(counter_register);
+
+/**
+ * counter_unregister - unregister Counter from the system
+ * @counter: pointer to Counter to unregister
+ *
+ * The Counter is unregistered from the system; all allocated memory is freed.
+ */
+void counter_unregister(struct counter_device *const counter)
+{
+ if (counter)
+ device_del(&counter->device_state->dev);
+}
+EXPORT_SYMBOL_GPL(counter_unregister);
+
+static void devm_counter_unreg(struct device *dev, void *res)
+{
+ counter_unregister(*(struct counter_device **)res);
+}
+
+/**
+ * devm_counter_register - Resource-managed counter_register
+ * @dev: device to allocate counter_device for
+ * @counter: pointer to Counter to register
+ *
+ * Managed counter_register. The Counter registered with this function is
+ * automatically unregistered on driver detach. This function calls
+ * counter_register internally. Refer to that function for more information.
+ *
+ * If an Counter registered with this function needs to be unregistered
+ * separately, devm_counter_unregister must be used.
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int devm_counter_register(struct device *dev,
+ struct counter_device *const counter)
+{
+ struct counter_device **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_counter_unreg, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = counter_register(counter);
+ if (!ret) {
+ *ptr = counter;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_counter_register);
+
+static int devm_counter_match(struct device *dev, void *res, void *data)
+{
+ struct counter_device **r = res;
+
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+
+ return *r == data;
+}
+
+/**
+ * devm_counter_unregister - Resource-managed counter_unregister
+ * @dev: device this counter_device belongs to
+ * @counter: pointer to Counter associated with the device
+ *
+ * Unregister Counter registered with devm_counter_register.
+ */
+void devm_counter_unregister(struct device *dev,
+ struct counter_device *const counter)
+{
+ int rc;
+
+ rc = devres_release(dev, devm_counter_unreg, devm_counter_match,
+ counter);
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_counter_unregister);
+
+static int __init counter_init(void)
+{
+ return bus_register(&counter_bus_type);
+}
+
+static void __exit counter_exit(void)
+{
+ bus_unregister(&counter_bus_type);
+}
+
+subsys_initcall(counter_init);
+module_exit(counter_exit);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("Generic Counter interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/counter/ftm-quaddec.c b/drivers/counter/ftm-quaddec.c
new file mode 100644
index 000000000000..c83c8875bf82
--- /dev/null
+++ b/drivers/counter/ftm-quaddec.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Flex Timer Module Quadrature decoder
+ *
+ * This module implements a driver for decoding the FTM quadrature
+ * of ex. a LS1021A
+ */
+
+#include <linux/fsl/ftm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/counter.h>
+#include <linux/bitfield.h>
+
+#define FTM_FIELD_UPDATE(ftm, offset, mask, val) \
+ ({ \
+ uint32_t flags; \
+ ftm_read(ftm, offset, &flags); \
+ flags &= ~mask; \
+ flags |= FIELD_PREP(mask, val); \
+ ftm_write(ftm, offset, flags); \
+ })
+
+struct ftm_quaddec {
+ struct counter_device counter;
+ struct platform_device *pdev;
+ void __iomem *ftm_base;
+ bool big_endian;
+ struct mutex ftm_quaddec_mutex;
+};
+
+static void ftm_read(struct ftm_quaddec *ftm, uint32_t offset, uint32_t *data)
+{
+ if (ftm->big_endian)
+ *data = ioread32be(ftm->ftm_base + offset);
+ else
+ *data = ioread32(ftm->ftm_base + offset);
+}
+
+static void ftm_write(struct ftm_quaddec *ftm, uint32_t offset, uint32_t data)
+{
+ if (ftm->big_endian)
+ iowrite32be(data, ftm->ftm_base + offset);
+ else
+ iowrite32(data, ftm->ftm_base + offset);
+}
+
+/* Hold mutex before modifying write protection state */
+static void ftm_clear_write_protection(struct ftm_quaddec *ftm)
+{
+ uint32_t flag;
+
+ /* First see if it is enabled */
+ ftm_read(ftm, FTM_FMS, &flag);
+
+ if (flag & FTM_FMS_WPEN)
+ FTM_FIELD_UPDATE(ftm, FTM_MODE, FTM_MODE_WPDIS, 1);
+}
+
+static void ftm_set_write_protection(struct ftm_quaddec *ftm)
+{
+ FTM_FIELD_UPDATE(ftm, FTM_FMS, FTM_FMS_WPEN, 1);
+}
+
+static void ftm_reset_counter(struct ftm_quaddec *ftm)
+{
+ /* Reset hardware counter to CNTIN */
+ ftm_write(ftm, FTM_CNT, 0x0);
+}
+
+static void ftm_quaddec_init(struct ftm_quaddec *ftm)
+{
+ ftm_clear_write_protection(ftm);
+
+ /*
+ * Do not write in the region from the CNTIN register through the
+ * PWMLOAD register when FTMEN = 0.
+ * Also reset other fields to zero
+ */
+ ftm_write(ftm, FTM_MODE, FTM_MODE_FTMEN);
+ ftm_write(ftm, FTM_CNTIN, 0x0000);
+ ftm_write(ftm, FTM_MOD, 0xffff);
+ ftm_write(ftm, FTM_CNT, 0x0);
+ /* Set prescaler, reset other fields to zero */
+ ftm_write(ftm, FTM_SC, FTM_SC_PS_1);
+
+ /* Select quad mode, reset other fields to zero */
+ ftm_write(ftm, FTM_QDCTRL, FTM_QDCTRL_QUADEN);
+
+ /* Unused features and reset to default section */
+ ftm_write(ftm, FTM_POL, 0x0);
+ ftm_write(ftm, FTM_FLTCTRL, 0x0);
+ ftm_write(ftm, FTM_SYNCONF, 0x0);
+ ftm_write(ftm, FTM_SYNC, 0xffff);
+
+ /* Lock the FTM */
+ ftm_set_write_protection(ftm);
+}
+
+static void ftm_quaddec_disable(struct ftm_quaddec *ftm)
+{
+ ftm_clear_write_protection(ftm);
+ ftm_write(ftm, FTM_MODE, 0);
+ ftm_write(ftm, FTM_QDCTRL, 0);
+ /*
+ * This is enough to disable the counter. No clock has been
+ * selected by writing to FTM_SC in init()
+ */
+ ftm_set_write_protection(ftm);
+}
+
+static int ftm_quaddec_get_prescaler(struct counter_device *counter,
+ struct counter_count *count,
+ size_t *cnt_mode)
+{
+ struct ftm_quaddec *ftm = counter->priv;
+ uint32_t scflags;
+
+ ftm_read(ftm, FTM_SC, &scflags);
+
+ *cnt_mode = FIELD_GET(FTM_SC_PS_MASK, scflags);
+
+ return 0;
+}
+
+static int ftm_quaddec_set_prescaler(struct counter_device *counter,
+ struct counter_count *count,
+ size_t cnt_mode)
+{
+ struct ftm_quaddec *ftm = counter->priv;
+
+ mutex_lock(&ftm->ftm_quaddec_mutex);
+
+ ftm_clear_write_protection(ftm);
+ FTM_FIELD_UPDATE(ftm, FTM_SC, FTM_SC_PS_MASK, cnt_mode);
+ ftm_set_write_protection(ftm);
+
+ /* Also resets the counter as it is undefined anyway now */
+ ftm_reset_counter(ftm);
+
+ mutex_unlock(&ftm->ftm_quaddec_mutex);
+ return 0;
+}
+
+static const char * const ftm_quaddec_prescaler[] = {
+ "1", "2", "4", "8", "16", "32", "64", "128"
+};
+
+static struct counter_count_enum_ext ftm_quaddec_prescaler_enum = {
+ .items = ftm_quaddec_prescaler,
+ .num_items = ARRAY_SIZE(ftm_quaddec_prescaler),
+ .get = ftm_quaddec_get_prescaler,
+ .set = ftm_quaddec_set_prescaler
+};
+
+enum ftm_quaddec_synapse_action {
+ FTM_QUADDEC_SYNAPSE_ACTION_BOTH_EDGES,
+};
+
+static enum counter_synapse_action ftm_quaddec_synapse_actions[] = {
+ [FTM_QUADDEC_SYNAPSE_ACTION_BOTH_EDGES] =
+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES
+};
+
+enum ftm_quaddec_count_function {
+ FTM_QUADDEC_COUNT_ENCODER_MODE_1,
+};
+
+static const enum counter_count_function ftm_quaddec_count_functions[] = {
+ [FTM_QUADDEC_COUNT_ENCODER_MODE_1] =
+ COUNTER_COUNT_FUNCTION_QUADRATURE_X4
+};
+
+static int ftm_quaddec_count_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_count_read_value *val)
+{
+ struct ftm_quaddec *const ftm = counter->priv;
+ uint32_t cntval;
+
+ ftm_read(ftm, FTM_CNT, &cntval);
+
+ counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &cntval);
+
+ return 0;
+}
+
+static int ftm_quaddec_count_write(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_count_write_value *val)
+{
+ struct ftm_quaddec *const ftm = counter->priv;
+ u32 cnt;
+ int err;
+
+ err = counter_count_write_value_get(&cnt, COUNTER_COUNT_POSITION, val);
+ if (err)
+ return err;
+
+ if (cnt != 0) {
+ dev_warn(&ftm->pdev->dev, "Can only accept '0' as new counter value\n");
+ return -EINVAL;
+ }
+
+ ftm_reset_counter(ftm);
+
+ return 0;
+}
+
+static int ftm_quaddec_count_function_get(struct counter_device *counter,
+ struct counter_count *count,
+ size_t *function)
+{
+ *function = FTM_QUADDEC_COUNT_ENCODER_MODE_1;
+
+ return 0;
+}
+
+static int ftm_quaddec_action_get(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ size_t *action)
+{
+ *action = FTM_QUADDEC_SYNAPSE_ACTION_BOTH_EDGES;
+
+ return 0;
+}
+
+static const struct counter_ops ftm_quaddec_cnt_ops = {
+ .count_read = ftm_quaddec_count_read,
+ .count_write = ftm_quaddec_count_write,
+ .function_get = ftm_quaddec_count_function_get,
+ .action_get = ftm_quaddec_action_get,
+};
+
+static struct counter_signal ftm_quaddec_signals[] = {
+ {
+ .id = 0,
+ .name = "Channel 1 Phase A"
+ },
+ {
+ .id = 1,
+ .name = "Channel 1 Phase B"
+ }
+};
+
+static struct counter_synapse ftm_quaddec_count_synapses[] = {
+ {
+ .actions_list = ftm_quaddec_synapse_actions,
+ .num_actions = ARRAY_SIZE(ftm_quaddec_synapse_actions),
+ .signal = &ftm_quaddec_signals[0]
+ },
+ {
+ .actions_list = ftm_quaddec_synapse_actions,
+ .num_actions = ARRAY_SIZE(ftm_quaddec_synapse_actions),
+ .signal = &ftm_quaddec_signals[1]
+ }
+};
+
+static const struct counter_count_ext ftm_quaddec_count_ext[] = {
+ COUNTER_COUNT_ENUM("prescaler", &ftm_quaddec_prescaler_enum),
+ COUNTER_COUNT_ENUM_AVAILABLE("prescaler", &ftm_quaddec_prescaler_enum),
+};
+
+static struct counter_count ftm_quaddec_counts = {
+ .id = 0,
+ .name = "Channel 1 Count",
+ .functions_list = ftm_quaddec_count_functions,
+ .num_functions = ARRAY_SIZE(ftm_quaddec_count_functions),
+ .synapses = ftm_quaddec_count_synapses,
+ .num_synapses = ARRAY_SIZE(ftm_quaddec_count_synapses),
+ .ext = ftm_quaddec_count_ext,
+ .num_ext = ARRAY_SIZE(ftm_quaddec_count_ext)
+};
+
+static int ftm_quaddec_probe(struct platform_device *pdev)
+{
+ struct ftm_quaddec *ftm;
+
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *io;
+ int ret;
+
+ ftm = devm_kzalloc(&pdev->dev, sizeof(*ftm), GFP_KERNEL);
+ if (!ftm)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ftm);
+
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!io) {
+ dev_err(&pdev->dev, "Failed to get memory region\n");
+ return -ENODEV;
+ }
+
+ ftm->pdev = pdev;
+ ftm->big_endian = of_property_read_bool(node, "big-endian");
+ ftm->ftm_base = devm_ioremap(&pdev->dev, io->start, resource_size(io));
+
+ if (!ftm->ftm_base) {
+ dev_err(&pdev->dev, "Failed to map memory region\n");
+ return -EINVAL;
+ }
+ ftm->counter.name = dev_name(&pdev->dev);
+ ftm->counter.parent = &pdev->dev;
+ ftm->counter.ops = &ftm_quaddec_cnt_ops;
+ ftm->counter.counts = &ftm_quaddec_counts;
+ ftm->counter.num_counts = 1;
+ ftm->counter.signals = ftm_quaddec_signals;
+ ftm->counter.num_signals = ARRAY_SIZE(ftm_quaddec_signals);
+ ftm->counter.priv = ftm;
+
+ mutex_init(&ftm->ftm_quaddec_mutex);
+
+ ftm_quaddec_init(ftm);
+
+ ret = counter_register(&ftm->counter);
+ if (ret)
+ ftm_quaddec_disable(ftm);
+
+ return ret;
+}
+
+static int ftm_quaddec_remove(struct platform_device *pdev)
+{
+ struct ftm_quaddec *ftm = platform_get_drvdata(pdev);
+
+ counter_unregister(&ftm->counter);
+
+ ftm_quaddec_disable(ftm);
+
+ return 0;
+}
+
+static const struct of_device_id ftm_quaddec_match[] = {
+ { .compatible = "fsl,ftm-quaddec" },
+ {},
+};
+
+static struct platform_driver ftm_quaddec_driver = {
+ .driver = {
+ .name = "ftm-quaddec",
+ .of_match_table = ftm_quaddec_match,
+ },
+ .probe = ftm_quaddec_probe,
+ .remove = ftm_quaddec_remove,
+};
+
+module_platform_driver(ftm_quaddec_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kjeld Flarup <kfa@deif.com");
+MODULE_AUTHOR("Patrick Havelange <patrick.havelange@essensium.com");
diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c
new file mode 100644
index 000000000000..bbc930a5962c
--- /dev/null
+++ b/drivers/counter/stm32-lptimer-cnt.c
@@ -0,0 +1,754 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * STM32 Low-Power Timer Encoder and Counter driver
+ *
+ * Copyright (C) STMicroelectronics 2017
+ *
+ * Author: Fabrice Gasnier <fabrice.gasnier@st.com>
+ *
+ * Inspired by 104-quad-8 and stm32-timer-trigger drivers.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/counter.h>
+#include <linux/iio/iio.h>
+#include <linux/mfd/stm32-lptimer.h>
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+
+struct stm32_lptim_cnt {
+ struct counter_device counter;
+ struct device *dev;
+ struct regmap *regmap;
+ struct clk *clk;
+ u32 ceiling;
+ u32 polarity;
+ u32 quadrature_mode;
+ bool enabled;
+};
+
+static int stm32_lptim_is_enabled(struct stm32_lptim_cnt *priv)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(priv->regmap, STM32_LPTIM_CR, &val);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(STM32_LPTIM_ENABLE, val);
+}
+
+static int stm32_lptim_set_enable_state(struct stm32_lptim_cnt *priv,
+ int enable)
+{
+ int ret;
+ u32 val;
+
+ val = FIELD_PREP(STM32_LPTIM_ENABLE, enable);
+ ret = regmap_write(priv->regmap, STM32_LPTIM_CR, val);
+ if (ret)
+ return ret;
+
+ if (!enable) {
+ clk_disable(priv->clk);
+ priv->enabled = false;
+ return 0;
+ }
+
+ /* LP timer must be enabled before writing CMP & ARR */
+ ret = regmap_write(priv->regmap, STM32_LPTIM_ARR, priv->ceiling);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(priv->regmap, STM32_LPTIM_CMP, 0);
+ if (ret)
+ return ret;
+
+ /* ensure CMP & ARR registers are properly written */
+ ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val,
+ (val & STM32_LPTIM_CMPOK_ARROK),
+ 100, 1000);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(priv->regmap, STM32_LPTIM_ICR,
+ STM32_LPTIM_CMPOKCF_ARROKCF);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(priv->clk);
+ if (ret) {
+ regmap_write(priv->regmap, STM32_LPTIM_CR, 0);
+ return ret;
+ }
+ priv->enabled = true;
+
+ /* Start LP timer in continuous mode */
+ return regmap_update_bits(priv->regmap, STM32_LPTIM_CR,
+ STM32_LPTIM_CNTSTRT, STM32_LPTIM_CNTSTRT);
+}
+
+static int stm32_lptim_setup(struct stm32_lptim_cnt *priv, int enable)
+{
+ u32 mask = STM32_LPTIM_ENC | STM32_LPTIM_COUNTMODE |
+ STM32_LPTIM_CKPOL | STM32_LPTIM_PRESC;
+ u32 val;
+
+ /* Setup LP timer encoder/counter and polarity, without prescaler */
+ if (priv->quadrature_mode)
+ val = enable ? STM32_LPTIM_ENC : 0;
+ else
+ val = enable ? STM32_LPTIM_COUNTMODE : 0;
+ val |= FIELD_PREP(STM32_LPTIM_CKPOL, enable ? priv->polarity : 0);
+
+ return regmap_update_bits(priv->regmap, STM32_LPTIM_CFGR, mask, val);
+}
+
+static int stm32_lptim_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_ENABLE:
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ /* Check nobody uses the timer, or already disabled/enabled */
+ ret = stm32_lptim_is_enabled(priv);
+ if ((ret < 0) || (!ret && !val))
+ return ret;
+ if (val && ret)
+ return -EBUSY;
+
+ ret = stm32_lptim_setup(priv, val);
+ if (ret)
+ return ret;
+ return stm32_lptim_set_enable_state(priv, val);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int stm32_lptim_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+ u32 dat;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = regmap_read(priv->regmap, STM32_LPTIM_CNT, &dat);
+ if (ret)
+ return ret;
+ *val = dat;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_ENABLE:
+ ret = stm32_lptim_is_enabled(priv);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ /* Non-quadrature mode: scale = 1 */
+ *val = 1;
+ *val2 = 0;
+ if (priv->quadrature_mode) {
+ /*
+ * Quadrature encoder mode:
+ * - both edges, quarter cycle, scale is 0.25
+ * - either rising/falling edge scale is 0.5
+ */
+ if (priv->polarity > 1)
+ *val2 = 2;
+ else
+ *val2 = 1;
+ }
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info stm32_lptim_cnt_iio_info = {
+ .read_raw = stm32_lptim_read_raw,
+ .write_raw = stm32_lptim_write_raw,
+};
+
+static const char *const stm32_lptim_quadrature_modes[] = {
+ "non-quadrature",
+ "quadrature",
+};
+
+static int stm32_lptim_get_quadrature_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+
+ return priv->quadrature_mode;
+}
+
+static int stm32_lptim_set_quadrature_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int type)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+
+ if (stm32_lptim_is_enabled(priv))
+ return -EBUSY;
+
+ priv->quadrature_mode = type;
+
+ return 0;
+}
+
+static const struct iio_enum stm32_lptim_quadrature_mode_en = {
+ .items = stm32_lptim_quadrature_modes,
+ .num_items = ARRAY_SIZE(stm32_lptim_quadrature_modes),
+ .get = stm32_lptim_get_quadrature_mode,
+ .set = stm32_lptim_set_quadrature_mode,
+};
+
+static const char * const stm32_lptim_cnt_polarity[] = {
+ "rising-edge", "falling-edge", "both-edges",
+};
+
+static int stm32_lptim_cnt_get_polarity(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+
+ return priv->polarity;
+}
+
+static int stm32_lptim_cnt_set_polarity(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int type)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+
+ if (stm32_lptim_is_enabled(priv))
+ return -EBUSY;
+
+ priv->polarity = type;
+
+ return 0;
+}
+
+static const struct iio_enum stm32_lptim_cnt_polarity_en = {
+ .items = stm32_lptim_cnt_polarity,
+ .num_items = ARRAY_SIZE(stm32_lptim_cnt_polarity),
+ .get = stm32_lptim_cnt_get_polarity,
+ .set = stm32_lptim_cnt_set_polarity,
+};
+
+static ssize_t stm32_lptim_cnt_get_ceiling(struct stm32_lptim_cnt *priv,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", priv->ceiling);
+}
+
+static ssize_t stm32_lptim_cnt_set_ceiling(struct stm32_lptim_cnt *priv,
+ const char *buf, size_t len)
+{
+ int ret;
+
+ if (stm32_lptim_is_enabled(priv))
+ return -EBUSY;
+
+ ret = kstrtouint(buf, 0, &priv->ceiling);
+ if (ret)
+ return ret;
+
+ if (priv->ceiling > STM32_LPTIM_MAX_ARR)
+ return -EINVAL;
+
+ return len;
+}
+
+static ssize_t stm32_lptim_cnt_get_preset_iio(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+
+ return stm32_lptim_cnt_get_ceiling(priv, buf);
+}
+
+static ssize_t stm32_lptim_cnt_set_preset_iio(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct stm32_lptim_cnt *priv = iio_priv(indio_dev);
+
+ return stm32_lptim_cnt_set_ceiling(priv, buf, len);
+}
+
+/* LP timer with encoder */
+static const struct iio_chan_spec_ext_info stm32_lptim_enc_ext_info[] = {
+ {
+ .name = "preset",
+ .shared = IIO_SEPARATE,
+ .read = stm32_lptim_cnt_get_preset_iio,
+ .write = stm32_lptim_cnt_set_preset_iio,
+ },
+ IIO_ENUM("polarity", IIO_SEPARATE, &stm32_lptim_cnt_polarity_en),
+ IIO_ENUM_AVAILABLE("polarity", &stm32_lptim_cnt_polarity_en),
+ IIO_ENUM("quadrature_mode", IIO_SEPARATE,
+ &stm32_lptim_quadrature_mode_en),
+ IIO_ENUM_AVAILABLE("quadrature_mode", &stm32_lptim_quadrature_mode_en),
+ {}
+};
+
+static const struct iio_chan_spec stm32_lptim_enc_channels = {
+ .type = IIO_COUNT,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_ENABLE) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .ext_info = stm32_lptim_enc_ext_info,
+ .indexed = 1,
+};
+
+/* LP timer without encoder (counter only) */
+static const struct iio_chan_spec_ext_info stm32_lptim_cnt_ext_info[] = {
+ {
+ .name = "preset",
+ .shared = IIO_SEPARATE,
+ .read = stm32_lptim_cnt_get_preset_iio,
+ .write = stm32_lptim_cnt_set_preset_iio,
+ },
+ IIO_ENUM("polarity", IIO_SEPARATE, &stm32_lptim_cnt_polarity_en),
+ IIO_ENUM_AVAILABLE("polarity", &stm32_lptim_cnt_polarity_en),
+ {}
+};
+
+static const struct iio_chan_spec stm32_lptim_cnt_channels = {
+ .type = IIO_COUNT,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_ENABLE) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .ext_info = stm32_lptim_cnt_ext_info,
+ .indexed = 1,
+};
+
+/**
+ * stm32_lptim_cnt_function - enumerates stm32 LPTimer counter & encoder modes
+ * @STM32_LPTIM_COUNTER_INCREASE: up count on IN1 rising, falling or both edges
+ * @STM32_LPTIM_ENCODER_BOTH_EDGE: count on both edges (IN1 & IN2 quadrature)
+ */
+enum stm32_lptim_cnt_function {
+ STM32_LPTIM_COUNTER_INCREASE,
+ STM32_LPTIM_ENCODER_BOTH_EDGE,
+};
+
+static enum counter_count_function stm32_lptim_cnt_functions[] = {
+ [STM32_LPTIM_COUNTER_INCREASE] = COUNTER_COUNT_FUNCTION_INCREASE,
+ [STM32_LPTIM_ENCODER_BOTH_EDGE] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4,
+};
+
+enum stm32_lptim_synapse_action {
+ STM32_LPTIM_SYNAPSE_ACTION_RISING_EDGE,
+ STM32_LPTIM_SYNAPSE_ACTION_FALLING_EDGE,
+ STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES,
+ STM32_LPTIM_SYNAPSE_ACTION_NONE,
+};
+
+static enum counter_synapse_action stm32_lptim_cnt_synapse_actions[] = {
+ /* Index must match with stm32_lptim_cnt_polarity[] (priv->polarity) */
+ [STM32_LPTIM_SYNAPSE_ACTION_RISING_EDGE] = COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+ [STM32_LPTIM_SYNAPSE_ACTION_FALLING_EDGE] = COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
+ [STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
+ [STM32_LPTIM_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
+};
+
+static int stm32_lptim_cnt_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_count_read_value *val)
+{
+ struct stm32_lptim_cnt *const priv = counter->priv;
+ u32 cnt;
+ int ret;
+
+ ret = regmap_read(priv->regmap, STM32_LPTIM_CNT, &cnt);
+ if (ret)
+ return ret;
+
+ counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &cnt);
+
+ return 0;
+}
+
+static int stm32_lptim_cnt_function_get(struct counter_device *counter,
+ struct counter_count *count,
+ size_t *function)
+{
+ struct stm32_lptim_cnt *const priv = counter->priv;
+
+ if (!priv->quadrature_mode) {
+ *function = STM32_LPTIM_COUNTER_INCREASE;
+ return 0;
+ }
+
+ if (priv->polarity == STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES) {
+ *function = STM32_LPTIM_ENCODER_BOTH_EDGE;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int stm32_lptim_cnt_function_set(struct counter_device *counter,
+ struct counter_count *count,
+ size_t function)
+{
+ struct stm32_lptim_cnt *const priv = counter->priv;
+
+ if (stm32_lptim_is_enabled(priv))
+ return -EBUSY;
+
+ switch (function) {
+ case STM32_LPTIM_COUNTER_INCREASE:
+ priv->quadrature_mode = 0;
+ return 0;
+ case STM32_LPTIM_ENCODER_BOTH_EDGE:
+ priv->quadrature_mode = 1;
+ priv->polarity = STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t stm32_lptim_cnt_enable_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *private, char *buf)
+{
+ struct stm32_lptim_cnt *const priv = counter->priv;
+ int ret;
+
+ ret = stm32_lptim_is_enabled(priv);
+ if (ret < 0)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", ret);
+}
+
+static ssize_t stm32_lptim_cnt_enable_write(struct counter_device *counter,
+ struct counter_count *count,
+ void *private,
+ const char *buf, size_t len)
+{
+ struct stm32_lptim_cnt *const priv = counter->priv;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool(buf, &enable);
+ if (ret)
+ return ret;
+
+ /* Check nobody uses the timer, or already disabled/enabled */
+ ret = stm32_lptim_is_enabled(priv);
+ if ((ret < 0) || (!ret && !enable))
+ return ret;
+ if (enable && ret)
+ return -EBUSY;
+
+ ret = stm32_lptim_setup(priv, enable);
+ if (ret)
+ return ret;
+
+ ret = stm32_lptim_set_enable_state(priv, enable);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t stm32_lptim_cnt_ceiling_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *private, char *buf)
+{
+ struct stm32_lptim_cnt *const priv = counter->priv;
+
+ return stm32_lptim_cnt_get_ceiling(priv, buf);
+}
+
+static ssize_t stm32_lptim_cnt_ceiling_write(struct counter_device *counter,
+ struct counter_count *count,
+ void *private,
+ const char *buf, size_t len)
+{
+ struct stm32_lptim_cnt *const priv = counter->priv;
+
+ return stm32_lptim_cnt_set_ceiling(priv, buf, len);
+}
+
+static const struct counter_count_ext stm32_lptim_cnt_ext[] = {
+ {
+ .name = "enable",
+ .read = stm32_lptim_cnt_enable_read,
+ .write = stm32_lptim_cnt_enable_write
+ },
+ {
+ .name = "ceiling",
+ .read = stm32_lptim_cnt_ceiling_read,
+ .write = stm32_lptim_cnt_ceiling_write
+ },
+};
+
+static int stm32_lptim_cnt_action_get(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ size_t *action)
+{
+ struct stm32_lptim_cnt *const priv = counter->priv;
+ size_t function;
+ int err;
+
+ err = stm32_lptim_cnt_function_get(counter, count, &function);
+ if (err)
+ return err;
+
+ switch (function) {
+ case STM32_LPTIM_COUNTER_INCREASE:
+ /* LP Timer acts as up-counter on input 1 */
+ if (synapse->signal->id == count->synapses[0].signal->id)
+ *action = priv->polarity;
+ else
+ *action = STM32_LPTIM_SYNAPSE_ACTION_NONE;
+ return 0;
+ case STM32_LPTIM_ENCODER_BOTH_EDGE:
+ *action = priv->polarity;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int stm32_lptim_cnt_action_set(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ size_t action)
+{
+ struct stm32_lptim_cnt *const priv = counter->priv;
+ size_t function;
+ int err;
+
+ if (stm32_lptim_is_enabled(priv))
+ return -EBUSY;
+
+ err = stm32_lptim_cnt_function_get(counter, count, &function);
+ if (err)
+ return err;
+
+ /* only set polarity when in counter mode (on input 1) */
+ if (function == STM32_LPTIM_COUNTER_INCREASE
+ && synapse->signal->id == count->synapses[0].signal->id) {
+ switch (action) {
+ case STM32_LPTIM_SYNAPSE_ACTION_RISING_EDGE:
+ case STM32_LPTIM_SYNAPSE_ACTION_FALLING_EDGE:
+ case STM32_LPTIM_SYNAPSE_ACTION_BOTH_EDGES:
+ priv->polarity = action;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static const struct counter_ops stm32_lptim_cnt_ops = {
+ .count_read = stm32_lptim_cnt_read,
+ .function_get = stm32_lptim_cnt_function_get,
+ .function_set = stm32_lptim_cnt_function_set,
+ .action_get = stm32_lptim_cnt_action_get,
+ .action_set = stm32_lptim_cnt_action_set,
+};
+
+static struct counter_signal stm32_lptim_cnt_signals[] = {
+ {
+ .id = 0,
+ .name = "Channel 1 Quadrature A"
+ },
+ {
+ .id = 1,
+ .name = "Channel 1 Quadrature B"
+ }
+};
+
+static struct counter_synapse stm32_lptim_cnt_synapses[] = {
+ {
+ .actions_list = stm32_lptim_cnt_synapse_actions,
+ .num_actions = ARRAY_SIZE(stm32_lptim_cnt_synapse_actions),
+ .signal = &stm32_lptim_cnt_signals[0]
+ },
+ {
+ .actions_list = stm32_lptim_cnt_synapse_actions,
+ .num_actions = ARRAY_SIZE(stm32_lptim_cnt_synapse_actions),
+ .signal = &stm32_lptim_cnt_signals[1]
+ }
+};
+
+/* LP timer with encoder */
+static struct counter_count stm32_lptim_enc_counts = {
+ .id = 0,
+ .name = "LPTimer Count",
+ .functions_list = stm32_lptim_cnt_functions,
+ .num_functions = ARRAY_SIZE(stm32_lptim_cnt_functions),
+ .synapses = stm32_lptim_cnt_synapses,
+ .num_synapses = ARRAY_SIZE(stm32_lptim_cnt_synapses),
+ .ext = stm32_lptim_cnt_ext,
+ .num_ext = ARRAY_SIZE(stm32_lptim_cnt_ext)
+};
+
+/* LP timer without encoder (counter only) */
+static struct counter_count stm32_lptim_in1_counts = {
+ .id = 0,
+ .name = "LPTimer Count",
+ .functions_list = stm32_lptim_cnt_functions,
+ .num_functions = 1,
+ .synapses = stm32_lptim_cnt_synapses,
+ .num_synapses = 1,
+ .ext = stm32_lptim_cnt_ext,
+ .num_ext = ARRAY_SIZE(stm32_lptim_cnt_ext)
+};
+
+static int stm32_lptim_cnt_probe(struct platform_device *pdev)
+{
+ struct stm32_lptimer *ddata = dev_get_drvdata(pdev->dev.parent);
+ struct stm32_lptim_cnt *priv;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ if (IS_ERR_OR_NULL(ddata))
+ return -EINVAL;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ priv = iio_priv(indio_dev);
+ priv->dev = &pdev->dev;
+ priv->regmap = ddata->regmap;
+ priv->clk = ddata->clk;
+ priv->ceiling = STM32_LPTIM_MAX_ARR;
+
+ /* Initialize IIO device */
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->dev.of_node = pdev->dev.of_node;
+ indio_dev->info = &stm32_lptim_cnt_iio_info;
+ if (ddata->has_encoder)
+ indio_dev->channels = &stm32_lptim_enc_channels;
+ else
+ indio_dev->channels = &stm32_lptim_cnt_channels;
+ indio_dev->num_channels = 1;
+
+ /* Initialize Counter device */
+ priv->counter.name = dev_name(&pdev->dev);
+ priv->counter.parent = &pdev->dev;
+ priv->counter.ops = &stm32_lptim_cnt_ops;
+ if (ddata->has_encoder) {
+ priv->counter.counts = &stm32_lptim_enc_counts;
+ priv->counter.num_signals = ARRAY_SIZE(stm32_lptim_cnt_signals);
+ } else {
+ priv->counter.counts = &stm32_lptim_in1_counts;
+ priv->counter.num_signals = 1;
+ }
+ priv->counter.num_counts = 1;
+ priv->counter.signals = stm32_lptim_cnt_signals;
+ priv->counter.priv = priv;
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = devm_iio_device_register(&pdev->dev, indio_dev);
+ if (ret)
+ return ret;
+
+ return devm_counter_register(&pdev->dev, &priv->counter);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int stm32_lptim_cnt_suspend(struct device *dev)
+{
+ struct stm32_lptim_cnt *priv = dev_get_drvdata(dev);
+ int ret;
+
+ /* Only take care of enabled counter: don't disturb other MFD child */
+ if (priv->enabled) {
+ ret = stm32_lptim_setup(priv, 0);
+ if (ret)
+ return ret;
+
+ ret = stm32_lptim_set_enable_state(priv, 0);
+ if (ret)
+ return ret;
+
+ /* Force enable state for later resume */
+ priv->enabled = true;
+ }
+
+ return pinctrl_pm_select_sleep_state(dev);
+}
+
+static int stm32_lptim_cnt_resume(struct device *dev)
+{
+ struct stm32_lptim_cnt *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret)
+ return ret;
+
+ if (priv->enabled) {
+ priv->enabled = false;
+ ret = stm32_lptim_setup(priv, 1);
+ if (ret)
+ return ret;
+
+ ret = stm32_lptim_set_enable_state(priv, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(stm32_lptim_cnt_pm_ops, stm32_lptim_cnt_suspend,
+ stm32_lptim_cnt_resume);
+
+static const struct of_device_id stm32_lptim_cnt_of_match[] = {
+ { .compatible = "st,stm32-lptimer-counter", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_lptim_cnt_of_match);
+
+static struct platform_driver stm32_lptim_cnt_driver = {
+ .probe = stm32_lptim_cnt_probe,
+ .driver = {
+ .name = "stm32-lptimer-counter",
+ .of_match_table = stm32_lptim_cnt_of_match,
+ .pm = &stm32_lptim_cnt_pm_ops,
+ },
+};
+module_platform_driver(stm32_lptim_cnt_driver);
+
+MODULE_AUTHOR("Fabrice Gasnier <fabrice.gasnier@st.com>");
+MODULE_ALIAS("platform:stm32-lptimer-counter");
+MODULE_DESCRIPTION("STMicroelectronics STM32 LPTIM counter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c
new file mode 100644
index 000000000000..644ba18a72ad
--- /dev/null
+++ b/drivers/counter/stm32-timer-cnt.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * STM32 Timer Encoder and Counter driver
+ *
+ * Copyright (C) STMicroelectronics 2018
+ *
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com>
+ *
+ */
+#include <linux/counter.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+#include <linux/mfd/stm32-timers.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define TIM_CCMR_CCXS (BIT(8) | BIT(0))
+#define TIM_CCMR_MASK (TIM_CCMR_CC1S | TIM_CCMR_CC2S | \
+ TIM_CCMR_IC1F | TIM_CCMR_IC2F)
+#define TIM_CCER_MASK (TIM_CCER_CC1P | TIM_CCER_CC1NP | \
+ TIM_CCER_CC2P | TIM_CCER_CC2NP)
+
+struct stm32_timer_cnt {
+ struct counter_device counter;
+ struct regmap *regmap;
+ struct clk *clk;
+ u32 ceiling;
+};
+
+/**
+ * stm32_count_function - enumerates stm32 timer counter encoder modes
+ * @STM32_COUNT_SLAVE_MODE_DISABLED: counts on internal clock when CEN=1
+ * @STM32_COUNT_ENCODER_MODE_1: counts TI1FP1 edges, depending on TI2FP2 level
+ * @STM32_COUNT_ENCODER_MODE_2: counts TI2FP2 edges, depending on TI1FP1 level
+ * @STM32_COUNT_ENCODER_MODE_3: counts on both TI1FP1 and TI2FP2 edges
+ */
+enum stm32_count_function {
+ STM32_COUNT_SLAVE_MODE_DISABLED = -1,
+ STM32_COUNT_ENCODER_MODE_1,
+ STM32_COUNT_ENCODER_MODE_2,
+ STM32_COUNT_ENCODER_MODE_3,
+};
+
+static enum counter_count_function stm32_count_functions[] = {
+ [STM32_COUNT_ENCODER_MODE_1] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A,
+ [STM32_COUNT_ENCODER_MODE_2] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B,
+ [STM32_COUNT_ENCODER_MODE_3] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4,
+};
+
+static int stm32_count_read(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_count_read_value *val)
+{
+ struct stm32_timer_cnt *const priv = counter->priv;
+ u32 cnt;
+
+ regmap_read(priv->regmap, TIM_CNT, &cnt);
+ counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &cnt);
+
+ return 0;
+}
+
+static int stm32_count_write(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_count_write_value *val)
+{
+ struct stm32_timer_cnt *const priv = counter->priv;
+ u32 cnt;
+ int err;
+
+ err = counter_count_write_value_get(&cnt, COUNTER_COUNT_POSITION, val);
+ if (err)
+ return err;
+
+ if (cnt > priv->ceiling)
+ return -EINVAL;
+
+ return regmap_write(priv->regmap, TIM_CNT, cnt);
+}
+
+static int stm32_count_function_get(struct counter_device *counter,
+ struct counter_count *count,
+ size_t *function)
+{
+ struct stm32_timer_cnt *const priv = counter->priv;
+ u32 smcr;
+
+ regmap_read(priv->regmap, TIM_SMCR, &smcr);
+
+ switch (smcr & TIM_SMCR_SMS) {
+ case 1:
+ *function = STM32_COUNT_ENCODER_MODE_1;
+ return 0;
+ case 2:
+ *function = STM32_COUNT_ENCODER_MODE_2;
+ return 0;
+ case 3:
+ *function = STM32_COUNT_ENCODER_MODE_3;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int stm32_count_function_set(struct counter_device *counter,
+ struct counter_count *count,
+ size_t function)
+{
+ struct stm32_timer_cnt *const priv = counter->priv;
+ u32 cr1, sms;
+
+ switch (function) {
+ case STM32_COUNT_ENCODER_MODE_1:
+ sms = 1;
+ break;
+ case STM32_COUNT_ENCODER_MODE_2:
+ sms = 2;
+ break;
+ case STM32_COUNT_ENCODER_MODE_3:
+ sms = 3;
+ break;
+ default:
+ sms = 0;
+ break;
+ }
+
+ /* Store enable status */
+ regmap_read(priv->regmap, TIM_CR1, &cr1);
+
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
+
+ /* TIMx_ARR register shouldn't be buffered (ARPE=0) */
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
+ regmap_write(priv->regmap, TIM_ARR, priv->ceiling);
+
+ regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms);
+
+ /* Make sure that registers are updated */
+ regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG);
+
+ /* Restore the enable status */
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, cr1);
+
+ return 0;
+}
+
+static ssize_t stm32_count_direction_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *private, char *buf)
+{
+ struct stm32_timer_cnt *const priv = counter->priv;
+ const char *direction;
+ u32 cr1;
+
+ regmap_read(priv->regmap, TIM_CR1, &cr1);
+ direction = (cr1 & TIM_CR1_DIR) ? "backward" : "forward";
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", direction);
+}
+
+static ssize_t stm32_count_ceiling_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *private, char *buf)
+{
+ struct stm32_timer_cnt *const priv = counter->priv;
+ u32 arr;
+
+ regmap_read(priv->regmap, TIM_ARR, &arr);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", arr);
+}
+
+static ssize_t stm32_count_ceiling_write(struct counter_device *counter,
+ struct counter_count *count,
+ void *private,
+ const char *buf, size_t len)
+{
+ struct stm32_timer_cnt *const priv = counter->priv;
+ unsigned int ceiling;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &ceiling);
+ if (ret)
+ return ret;
+
+ /* TIMx_ARR register shouldn't be buffered (ARPE=0) */
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
+ regmap_write(priv->regmap, TIM_ARR, ceiling);
+
+ priv->ceiling = ceiling;
+ return len;
+}
+
+static ssize_t stm32_count_enable_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *private, char *buf)
+{
+ struct stm32_timer_cnt *const priv = counter->priv;
+ u32 cr1;
+
+ regmap_read(priv->regmap, TIM_CR1, &cr1);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)(cr1 & TIM_CR1_CEN));
+}
+
+static ssize_t stm32_count_enable_write(struct counter_device *counter,
+ struct counter_count *count,
+ void *private,
+ const char *buf, size_t len)
+{
+ struct stm32_timer_cnt *const priv = counter->priv;
+ int err;
+ u32 cr1;
+ bool enable;
+
+ err = kstrtobool(buf, &enable);
+ if (err)
+ return err;
+
+ if (enable) {
+ regmap_read(priv->regmap, TIM_CR1, &cr1);
+ if (!(cr1 & TIM_CR1_CEN))
+ clk_enable(priv->clk);
+
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
+ TIM_CR1_CEN);
+ } else {
+ regmap_read(priv->regmap, TIM_CR1, &cr1);
+ regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
+ if (cr1 & TIM_CR1_CEN)
+ clk_disable(priv->clk);
+ }
+
+ return len;
+}
+
+static const struct counter_count_ext stm32_count_ext[] = {
+ {
+ .name = "direction",
+ .read = stm32_count_direction_read,
+ },
+ {
+ .name = "enable",
+ .read = stm32_count_enable_read,
+ .write = stm32_count_enable_write
+ },
+ {
+ .name = "ceiling",
+ .read = stm32_count_ceiling_read,
+ .write = stm32_count_ceiling_write
+ },
+};
+
+enum stm32_synapse_action {
+ STM32_SYNAPSE_ACTION_NONE,
+ STM32_SYNAPSE_ACTION_BOTH_EDGES
+};
+
+static enum counter_synapse_action stm32_synapse_actions[] = {
+ [STM32_SYNAPSE_ACTION_NONE] = COUNTER_SYNAPSE_ACTION_NONE,
+ [STM32_SYNAPSE_ACTION_BOTH_EDGES] = COUNTER_SYNAPSE_ACTION_BOTH_EDGES
+};
+
+static int stm32_action_get(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ size_t *action)
+{
+ size_t function;
+ int err;
+
+ /* Default action mode (e.g. STM32_COUNT_SLAVE_MODE_DISABLED) */
+ *action = STM32_SYNAPSE_ACTION_NONE;
+
+ err = stm32_count_function_get(counter, count, &function);
+ if (err)
+ return 0;
+
+ switch (function) {
+ case STM32_COUNT_ENCODER_MODE_1:
+ /* counts up/down on TI1FP1 edge depending on TI2FP2 level */
+ if (synapse->signal->id == count->synapses[0].signal->id)
+ *action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
+ break;
+ case STM32_COUNT_ENCODER_MODE_2:
+ /* counts up/down on TI2FP2 edge depending on TI1FP1 level */
+ if (synapse->signal->id == count->synapses[1].signal->id)
+ *action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
+ break;
+ case STM32_COUNT_ENCODER_MODE_3:
+ /* counts up/down on both TI1FP1 and TI2FP2 edges */
+ *action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct counter_ops stm32_timer_cnt_ops = {
+ .count_read = stm32_count_read,
+ .count_write = stm32_count_write,
+ .function_get = stm32_count_function_get,
+ .function_set = stm32_count_function_set,
+ .action_get = stm32_action_get,
+};
+
+static struct counter_signal stm32_signals[] = {
+ {
+ .id = 0,
+ .name = "Channel 1 Quadrature A"
+ },
+ {
+ .id = 1,
+ .name = "Channel 1 Quadrature B"
+ }
+};
+
+static struct counter_synapse stm32_count_synapses[] = {
+ {
+ .actions_list = stm32_synapse_actions,
+ .num_actions = ARRAY_SIZE(stm32_synapse_actions),
+ .signal = &stm32_signals[0]
+ },
+ {
+ .actions_list = stm32_synapse_actions,
+ .num_actions = ARRAY_SIZE(stm32_synapse_actions),
+ .signal = &stm32_signals[1]
+ }
+};
+
+static struct counter_count stm32_counts = {
+ .id = 0,
+ .name = "Channel 1 Count",
+ .functions_list = stm32_count_functions,
+ .num_functions = ARRAY_SIZE(stm32_count_functions),
+ .synapses = stm32_count_synapses,
+ .num_synapses = ARRAY_SIZE(stm32_count_synapses),
+ .ext = stm32_count_ext,
+ .num_ext = ARRAY_SIZE(stm32_count_ext)
+};
+
+static int stm32_timer_cnt_probe(struct platform_device *pdev)
+{
+ struct stm32_timers *ddata = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct stm32_timer_cnt *priv;
+
+ if (IS_ERR_OR_NULL(ddata))
+ return -EINVAL;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = ddata->regmap;
+ priv->clk = ddata->clk;
+ priv->ceiling = ddata->max_arr;
+
+ priv->counter.name = dev_name(dev);
+ priv->counter.parent = dev;
+ priv->counter.ops = &stm32_timer_cnt_ops;
+ priv->counter.counts = &stm32_counts;
+ priv->counter.num_counts = 1;
+ priv->counter.signals = stm32_signals;
+ priv->counter.num_signals = ARRAY_SIZE(stm32_signals);
+ priv->counter.priv = priv;
+
+ /* Register Counter device */
+ return devm_counter_register(dev, &priv->counter);
+}
+
+static const struct of_device_id stm32_timer_cnt_of_match[] = {
+ { .compatible = "st,stm32-timer-counter", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_timer_cnt_of_match);
+
+static struct platform_driver stm32_timer_cnt_driver = {
+ .probe = stm32_timer_cnt_probe,
+ .driver = {
+ .name = "stm32-timer-counter",
+ .of_match_table = stm32_timer_cnt_of_match,
+ },
+};
+module_platform_driver(stm32_timer_cnt_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
+MODULE_ALIAS("platform:stm32-timer-counter");
+MODULE_DESCRIPTION("STMicroelectronics STM32 TIMER counter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index b22e6bba71f1..4d2b33a30292 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -26,10 +26,6 @@ config CPU_FREQ_GOV_COMMON
select IRQ_WORK
bool
-config CPU_FREQ_BOOST_SW
- bool
- depends on THERMAL
-
config CPU_FREQ_STAT
bool "CPU frequency transition statistics"
help
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index c72258a44ba4..73bb2aafb1a8 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -366,7 +366,7 @@ static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *dat
val = drv_read(data, mask);
- pr_debug("get_cur_val = %u\n", val);
+ pr_debug("%s = %u\n", __func__, val);
return val;
}
@@ -378,7 +378,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
unsigned int freq;
unsigned int cached_freq;
- pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
+ pr_debug("%s (%d)\n", __func__, cpu);
policy = cpufreq_cpu_get_raw(cpu);
if (unlikely(!policy))
@@ -458,8 +458,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
if (acpi_pstate_strict) {
if (!check_freqs(policy, mask,
policy->freq_table[index].frequency)) {
- pr_debug("acpi_cpufreq_target failed (%d)\n",
- policy->cpu);
+ pr_debug("%s (%d)\n", __func__, policy->cpu);
result = -EAGAIN;
}
}
@@ -573,7 +572,7 @@ static int cpufreq_boost_down_prep(unsigned int cpu)
static int __init acpi_cpufreq_early_init(void)
{
unsigned int i;
- pr_debug("acpi_cpufreq_early_init\n");
+ pr_debug("%s\n", __func__);
acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
if (!acpi_perf_data) {
@@ -657,7 +656,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
static int blacklisted;
#endif
- pr_debug("acpi_cpufreq_cpu_init\n");
+ pr_debug("%s\n", __func__);
#ifdef CONFIG_SMP
if (blacklisted)
@@ -856,7 +855,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct acpi_cpufreq_data *data = policy->driver_data;
- pr_debug("acpi_cpufreq_cpu_exit\n");
+ pr_debug("%s\n", __func__);
policy->fast_switch_possible = false;
policy->driver_data = NULL;
@@ -881,7 +880,7 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
{
struct acpi_cpufreq_data *data = policy->driver_data;
- pr_debug("acpi_cpufreq_resume\n");
+ pr_debug("%s\n", __func__);
data->resume = 1;
@@ -954,7 +953,7 @@ static int __init acpi_cpufreq_init(void)
if (cpufreq_get_current_driver())
return -EEXIST;
- pr_debug("acpi_cpufreq_init\n");
+ pr_debug("%s\n", __func__);
ret = acpi_cpufreq_early_init();
if (ret)
@@ -991,7 +990,7 @@ static int __init acpi_cpufreq_init(void)
static void __exit acpi_cpufreq_exit(void)
{
- pr_debug("acpi_cpufreq_exit\n");
+ pr_debug("%s\n", __func__);
acpi_cpufreq_boost_exit();
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index 4ac7c3cf34be..6927a8c0e748 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -124,7 +124,7 @@ static int __init amd_freq_sensitivity_init(void)
PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL);
if (!pcidev) {
- if (!static_cpu_has(X86_FEATURE_PROC_FEEDBACK))
+ if (!boot_cpu_has(X86_FEATURE_PROC_FEEDBACK))
return -ENODEV;
}
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index 75491fc841a6..0df16eb1eb3c 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -359,11 +359,11 @@ static int __init armada37xx_cpufreq_driver_init(void)
struct armada_37xx_dvfs *dvfs;
struct platform_device *pdev;
unsigned long freq;
- unsigned int cur_frequency;
+ unsigned int cur_frequency, base_frequency;
struct regmap *nb_pm_base, *avs_base;
struct device *cpu_dev;
int load_lvl, ret;
- struct clk *clk;
+ struct clk *clk, *parent;
nb_pm_base =
syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
@@ -399,6 +399,22 @@ static int __init armada37xx_cpufreq_driver_init(void)
return PTR_ERR(clk);
}
+ parent = clk_get_parent(clk);
+ if (IS_ERR(parent)) {
+ dev_err(cpu_dev, "Cannot get parent clock for CPU0\n");
+ clk_put(clk);
+ return PTR_ERR(parent);
+ }
+
+ /* Get parent CPU frequency */
+ base_frequency = clk_get_rate(parent);
+
+ if (!base_frequency) {
+ dev_err(cpu_dev, "Failed to get parent clock rate for CPU\n");
+ clk_put(clk);
+ return -EINVAL;
+ }
+
/* Get nominal (current) CPU frequency */
cur_frequency = clk_get_rate(clk);
if (!cur_frequency) {
@@ -431,7 +447,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
load_lvl++) {
unsigned long u_volt = avs_map[dvfs->avs[load_lvl]] * 1000;
- freq = cur_frequency / dvfs->divider[load_lvl];
+ freq = base_frequency / dvfs->divider[load_lvl];
ret = dev_pm_opp_add(cpu_dev, freq, u_volt);
if (ret)
goto remove_opp;
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
index b3f4bd647e9b..988ebc326bdb 100644
--- a/drivers/cpufreq/armada-8k-cpufreq.c
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -132,6 +132,7 @@ static int __init armada_8k_cpufreq_init(void)
of_node_put(node);
return -ENODEV;
}
+ of_node_put(node);
nb_cpus = num_possible_cpus();
freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index e10922709d13..85ff958e01f1 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -34,11 +34,6 @@
static LIST_HEAD(cpufreq_policy_list);
-static inline bool policy_is_inactive(struct cpufreq_policy *policy)
-{
- return cpumask_empty(policy->cpus);
-}
-
/* Macros to iterate over CPU policies */
#define for_each_suitable_policy(__policy, __active) \
list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
@@ -250,6 +245,51 @@ void cpufreq_cpu_put(struct cpufreq_policy *policy)
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
+/**
+ * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
+ * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
+ */
+void cpufreq_cpu_release(struct cpufreq_policy *policy)
+{
+ if (WARN_ON(!policy))
+ return;
+
+ lockdep_assert_held(&policy->rwsem);
+
+ up_write(&policy->rwsem);
+
+ cpufreq_cpu_put(policy);
+}
+
+/**
+ * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
+ * @cpu: CPU to find the policy for.
+ *
+ * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
+ * if the policy returned by it is not NULL, acquire its rwsem for writing.
+ * Return the policy if it is active or release it and return NULL otherwise.
+ *
+ * The policy returned by this function has to be released with the help of
+ * cpufreq_cpu_release() in order to release its rwsem and balance its usage
+ * counter properly.
+ */
+struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+
+ if (!policy)
+ return NULL;
+
+ down_write(&policy->rwsem);
+
+ if (policy_is_inactive(policy)) {
+ cpufreq_cpu_release(policy);
+ return NULL;
+ }
+
+ return policy;
+}
+
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
@@ -300,11 +340,14 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs,
unsigned int state)
{
+ int cpu;
+
BUG_ON(irqs_disabled());
if (cpufreq_disabled())
return;
+ freqs->policy = policy;
freqs->flags = cpufreq_driver->flags;
pr_debug("notification %u of frequency transition to %u kHz\n",
state, freqs->new);
@@ -324,10 +367,8 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy,
}
}
- for_each_cpu(freqs->cpu, policy->cpus) {
- srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
- CPUFREQ_PRECHANGE, freqs);
- }
+ srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
+ CPUFREQ_PRECHANGE, freqs);
adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
break;
@@ -337,11 +378,11 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy,
pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
cpumask_pr_args(policy->cpus));
- for_each_cpu(freqs->cpu, policy->cpus) {
- trace_cpu_frequency(freqs->new, freqs->cpu);
- srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
- CPUFREQ_POSTCHANGE, freqs);
- }
+ for_each_cpu(cpu, policy->cpus)
+ trace_cpu_frequency(freqs->new, cpu);
+
+ srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
+ CPUFREQ_POSTCHANGE, freqs);
cpufreq_stats_record_transition(policy, freqs->new);
policy->cur = freqs->new;
@@ -426,7 +467,7 @@ static void cpufreq_list_transition_notifiers(void)
mutex_lock(&cpufreq_transition_notifier_list.mutex);
for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
- pr_info("%pF\n", nb->notifier_call);
+ pr_info("%pS\n", nb->notifier_call);
mutex_unlock(&cpufreq_transition_notifier_list.mutex);
}
@@ -578,50 +619,52 @@ static struct cpufreq_governor *find_governor(const char *str_governor)
return NULL;
}
+static int cpufreq_parse_policy(char *str_governor,
+ struct cpufreq_policy *policy)
+{
+ if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
+ policy->policy = CPUFREQ_POLICY_PERFORMANCE;
+ return 0;
+ }
+ if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
+ return 0;
+ }
+ return -EINVAL;
+}
+
/**
- * cpufreq_parse_governor - parse a governor string
+ * cpufreq_parse_governor - parse a governor string only for !setpolicy
*/
static int cpufreq_parse_governor(char *str_governor,
struct cpufreq_policy *policy)
{
- if (cpufreq_driver->setpolicy) {
- if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
- policy->policy = CPUFREQ_POLICY_PERFORMANCE;
- return 0;
- }
-
- if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
- policy->policy = CPUFREQ_POLICY_POWERSAVE;
- return 0;
- }
- } else {
- struct cpufreq_governor *t;
+ struct cpufreq_governor *t;
- mutex_lock(&cpufreq_governor_mutex);
+ mutex_lock(&cpufreq_governor_mutex);
- t = find_governor(str_governor);
- if (!t) {
- int ret;
+ t = find_governor(str_governor);
+ if (!t) {
+ int ret;
- mutex_unlock(&cpufreq_governor_mutex);
+ mutex_unlock(&cpufreq_governor_mutex);
- ret = request_module("cpufreq_%s", str_governor);
- if (ret)
- return -EINVAL;
+ ret = request_module("cpufreq_%s", str_governor);
+ if (ret)
+ return -EINVAL;
- mutex_lock(&cpufreq_governor_mutex);
+ mutex_lock(&cpufreq_governor_mutex);
- t = find_governor(str_governor);
- }
- if (t && !try_module_get(t->owner))
- t = NULL;
+ t = find_governor(str_governor);
+ }
+ if (t && !try_module_get(t->owner))
+ t = NULL;
- mutex_unlock(&cpufreq_governor_mutex);
+ mutex_unlock(&cpufreq_governor_mutex);
- if (t) {
- policy->governor = t;
- return 0;
- }
+ if (t) {
+ policy->governor = t;
+ return 0;
}
return -EINVAL;
@@ -669,9 +712,6 @@ static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
return ret;
}
-static int cpufreq_set_policy(struct cpufreq_policy *policy,
- struct cpufreq_policy *new_policy);
-
/**
* cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
*/
@@ -746,8 +786,13 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
if (ret != 1)
return -EINVAL;
- if (cpufreq_parse_governor(str_governor, &new_policy))
- return -EINVAL;
+ if (cpufreq_driver->setpolicy) {
+ if (cpufreq_parse_policy(str_governor, &new_policy))
+ return -EINVAL;
+ } else {
+ if (cpufreq_parse_governor(str_governor, &new_policy))
+ return -EINVAL;
+ }
ret = cpufreq_set_policy(policy, &new_policy);
@@ -857,11 +902,9 @@ static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
{
unsigned int limit;
int ret;
- if (cpufreq_driver->bios_limit) {
- ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
- if (!ret)
- return sprintf(buf, "%u\n", limit);
- }
+ ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
+ if (!ret)
+ return sprintf(buf, "%u\n", limit);
return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
}
@@ -1015,32 +1058,39 @@ __weak struct cpufreq_governor *cpufreq_default_governor(void)
static int cpufreq_init_policy(struct cpufreq_policy *policy)
{
- struct cpufreq_governor *gov = NULL;
+ struct cpufreq_governor *gov = NULL, *def_gov = NULL;
struct cpufreq_policy new_policy;
memcpy(&new_policy, policy, sizeof(*policy));
- /* Update governor of new_policy to the governor used before hotplug */
- gov = find_governor(policy->last_governor);
- if (gov) {
- pr_debug("Restoring governor %s for cpu %d\n",
+ def_gov = cpufreq_default_governor();
+
+ if (has_target()) {
+ /*
+ * Update governor of new_policy to the governor used before
+ * hotplug
+ */
+ gov = find_governor(policy->last_governor);
+ if (gov) {
+ pr_debug("Restoring governor %s for cpu %d\n",
policy->governor->name, policy->cpu);
+ } else {
+ if (!def_gov)
+ return -ENODATA;
+ gov = def_gov;
+ }
+ new_policy.governor = gov;
} else {
- gov = cpufreq_default_governor();
- if (!gov)
- return -ENODATA;
- }
-
- new_policy.governor = gov;
-
- /* Use the default policy if there is no last_policy. */
- if (cpufreq_driver->setpolicy) {
- if (policy->last_policy)
+ /* Use the default policy if there is no last_policy. */
+ if (policy->last_policy) {
new_policy.policy = policy->last_policy;
- else
- cpufreq_parse_governor(gov->name, &new_policy);
+ } else {
+ if (!def_gov)
+ return -ENODATA;
+ cpufreq_parse_policy(def_gov->name, &new_policy);
+ }
}
- /* set default policy */
+
return cpufreq_set_policy(policy, &new_policy);
}
@@ -1098,6 +1148,12 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
cpufreq_global_kobject, "policy%u", cpu);
if (ret) {
pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
+ /*
+ * The entire policy object will be freed below, but the extra
+ * memory allocated for the kobject name needs to be freed by
+ * releasing the kobject.
+ */
+ kobject_put(&policy->kobj);
goto err_free_real_cpus;
}
@@ -1550,7 +1606,7 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
{
unsigned int ret_freq = 0;
- if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
+ if (unlikely(policy_is_inactive(policy)))
return ret_freq;
ret_freq = cpufreq_driver->get(policy->cpu);
@@ -1588,7 +1644,8 @@ unsigned int cpufreq_get(unsigned int cpu)
if (policy) {
down_read(&policy->rwsem);
- ret_freq = __cpufreq_get(policy);
+ if (cpufreq_driver->get)
+ ret_freq = __cpufreq_get(policy);
up_read(&policy->rwsem);
cpufreq_cpu_put(policy);
@@ -2229,8 +2286,8 @@ EXPORT_SYMBOL(cpufreq_get_policy);
*
* The cpuinfo part of @policy is not updated by this function.
*/
-static int cpufreq_set_policy(struct cpufreq_policy *policy,
- struct cpufreq_policy *new_policy)
+int cpufreq_set_policy(struct cpufreq_policy *policy,
+ struct cpufreq_policy *new_policy)
{
struct cpufreq_governor *old_gov;
int ret;
@@ -2337,17 +2394,12 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
*/
void cpufreq_update_policy(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
struct cpufreq_policy new_policy;
if (!policy)
return;
- down_write(&policy->rwsem);
-
- if (policy_is_inactive(policy))
- goto unlock;
-
/*
* BIOS might change freq behind our back
* -> ask driver for current freq and notify governors about a change
@@ -2364,12 +2416,26 @@ void cpufreq_update_policy(unsigned int cpu)
cpufreq_set_policy(policy, &new_policy);
unlock:
- up_write(&policy->rwsem);
-
- cpufreq_cpu_put(policy);
+ cpufreq_cpu_release(policy);
}
EXPORT_SYMBOL(cpufreq_update_policy);
+/**
+ * cpufreq_update_limits - Update policy limits for a given CPU.
+ * @cpu: CPU to update the policy limits for.
+ *
+ * Invoke the driver's ->update_limits callback if present or call
+ * cpufreq_update_policy() for @cpu.
+ */
+void cpufreq_update_limits(unsigned int cpu)
+{
+ if (cpufreq_driver->update_limits)
+ cpufreq_driver->update_limits(cpu);
+ else
+ cpufreq_update_policy(cpu);
+}
+EXPORT_SYMBOL_GPL(cpufreq_update_limits);
+
/*********************************************************************
* BOOST *
*********************************************************************/
@@ -2426,7 +2492,7 @@ int cpufreq_boost_trigger_state(int state)
static bool cpufreq_boost_supported(void)
{
- return likely(cpufreq_driver) && cpufreq_driver->set_boost;
+ return cpufreq_driver->set_boost;
}
static int create_boost_sysfs_file(void)
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index ffa9adeaba31..9d1d9bf02710 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -459,6 +459,8 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
/* Failure, so roll back. */
pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
+ kobject_put(&dbs_data->attr_set.kobj);
+
policy->governor_data = NULL;
if (!have_governor_per_policy())
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index e2db5581489a..08b192eb22c6 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/slab.h>
-static DEFINE_SPINLOCK(cpufreq_stats_lock);
struct cpufreq_stats {
unsigned int total_trans;
@@ -23,6 +22,7 @@ struct cpufreq_stats {
unsigned int state_num;
unsigned int last_index;
u64 *time_in_state;
+ spinlock_t lock;
unsigned int *freq_table;
unsigned int *trans_table;
};
@@ -39,12 +39,12 @@ static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
{
unsigned int count = stats->max_state;
- spin_lock(&cpufreq_stats_lock);
+ spin_lock(&stats->lock);
memset(stats->time_in_state, 0, count * sizeof(u64));
memset(stats->trans_table, 0, count * count * sizeof(int));
stats->last_time = get_jiffies_64();
stats->total_trans = 0;
- spin_unlock(&cpufreq_stats_lock);
+ spin_unlock(&stats->lock);
}
static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
@@ -62,9 +62,9 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
if (policy->fast_switch_enabled)
return 0;
- spin_lock(&cpufreq_stats_lock);
+ spin_lock(&stats->lock);
cpufreq_stats_update(stats);
- spin_unlock(&cpufreq_stats_lock);
+ spin_unlock(&stats->lock);
for (i = 0; i < stats->state_num; i++) {
len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
@@ -211,6 +211,7 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
stats->state_num = i;
stats->last_time = get_jiffies_64();
stats->last_index = freq_table_get_index(stats, policy->cur);
+ spin_lock_init(&stats->lock);
policy->stats = stats;
ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
@@ -242,11 +243,11 @@ void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
if (old_index == -1 || new_index == -1 || old_index == new_index)
return;
- spin_lock(&cpufreq_stats_lock);
+ spin_lock(&stats->lock);
cpufreq_stats_update(stats);
stats->last_index = new_index;
stats->trans_table[old_index * stats->max_state + new_index]++;
stats->total_trans++;
- spin_unlock(&cpufreq_stats_lock);
+ spin_unlock(&stats->lock);
}
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 3a8cc99e6815..e7be0af3199f 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -290,9 +290,6 @@ EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_boost_freqs);
struct freq_attr *cpufreq_generic_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
-#ifdef CONFIG_CPU_FREQ_BOOST_SW
- &cpufreq_freq_attr_scaling_boost_freqs,
-#endif
NULL,
};
EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index a4ff09f91c8f..3e17560b1efe 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -388,11 +388,11 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
ret = imx6ul_opp_check_speed_grading(cpu_dev);
if (ret) {
if (ret == -EPROBE_DEFER)
- return ret;
+ goto put_node;
dev_err(cpu_dev, "failed to read ocotp: %d\n",
ret);
- return ret;
+ goto put_node;
}
} else {
imx6q_opp_check_speed_grading(cpu_dev);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 2986119dd31f..34b54df41aaa 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -179,6 +179,7 @@ struct vid_data {
* based on the MSR_IA32_MISC_ENABLE value and whether or
* not the maximum reported turbo P-state is different from
* the maximum reported non-turbo one.
+ * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq.
* @min_perf_pct: Minimum capacity limit in percent of the maximum turbo
* P-state capacity.
* @max_perf_pct: Maximum capacity limit in percent of the maximum turbo
@@ -187,6 +188,7 @@ struct vid_data {
struct global_params {
bool no_turbo;
bool turbo_disabled;
+ bool turbo_disabled_mf;
int max_perf_pct;
int min_perf_pct;
};
@@ -525,7 +527,7 @@ static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
u64 epb;
int ret;
- if (!static_cpu_has(X86_FEATURE_EPB))
+ if (!boot_cpu_has(X86_FEATURE_EPB))
return -ENXIO;
ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
@@ -539,7 +541,7 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
{
s16 epp;
- if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
/*
* When hwp_req_data is 0, means that caller didn't read
* MSR_HWP_REQUEST, so need to read and get EPP.
@@ -564,7 +566,7 @@ static int intel_pstate_set_epb(int cpu, s16 pref)
u64 epb;
int ret;
- if (!static_cpu_has(X86_FEATURE_EPB))
+ if (!boot_cpu_has(X86_FEATURE_EPB))
return -ENXIO;
ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
@@ -612,7 +614,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
if (epp < 0)
return epp;
- if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
if (epp == HWP_EPP_PERFORMANCE)
return 1;
if (epp <= HWP_EPP_BALANCE_PERFORMANCE)
@@ -621,7 +623,7 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
return 3;
else
return 4;
- } else if (static_cpu_has(X86_FEATURE_EPB)) {
+ } else if (boot_cpu_has(X86_FEATURE_EPB)) {
/*
* Range:
* 0x00-0x03 : Performance
@@ -649,7 +651,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
mutex_lock(&intel_pstate_limits_lock);
- if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
u64 value;
ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
@@ -824,7 +826,7 @@ static void intel_pstate_hwp_set(unsigned int cpu)
epp = cpu_data->epp_powersave;
}
update_epp:
- if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24;
} else {
@@ -849,7 +851,7 @@ static void intel_pstate_hwp_force_min_perf(int cpu)
value |= HWP_MIN_PERF(min_perf);
/* Set EPP/EPB to min */
- if (static_cpu_has(X86_FEATURE_HWP_EPP))
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP))
value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
else
intel_pstate_set_epb(cpu, HWP_EPP_BALANCE_POWERSAVE);
@@ -897,6 +899,48 @@ static void intel_pstate_update_policies(void)
cpufreq_update_policy(cpu);
}
+static void intel_pstate_update_max_freq(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
+ struct cpufreq_policy new_policy;
+ struct cpudata *cpudata;
+
+ if (!policy)
+ return;
+
+ cpudata = all_cpu_data[cpu];
+ policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
+ cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
+
+ memcpy(&new_policy, policy, sizeof(*policy));
+ new_policy.max = min(policy->user_policy.max, policy->cpuinfo.max_freq);
+ new_policy.min = min(policy->user_policy.min, new_policy.max);
+
+ cpufreq_set_policy(policy, &new_policy);
+
+ cpufreq_cpu_release(policy);
+}
+
+static void intel_pstate_update_limits(unsigned int cpu)
+{
+ mutex_lock(&intel_pstate_driver_lock);
+
+ update_turbo_state();
+ /*
+ * If turbo has been turned on or off globally, policy limits for
+ * all CPUs need to be updated to reflect that.
+ */
+ if (global.turbo_disabled_mf != global.turbo_disabled) {
+ global.turbo_disabled_mf = global.turbo_disabled;
+ for_each_possible_cpu(cpu)
+ intel_pstate_update_max_freq(cpu);
+ } else {
+ cpufreq_update_policy(cpu);
+ }
+
+ mutex_unlock(&intel_pstate_driver_lock);
+}
+
/************************** sysfs begin ************************/
#define show_one(file_name, object) \
static ssize_t show_##file_name \
@@ -1197,7 +1241,7 @@ static void __init intel_pstate_sysfs_expose_params(void)
static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
/* First disable HWP notification interrupt as we don't process them */
- if (static_cpu_has(X86_FEATURE_HWP_NOTIFY))
+ if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
@@ -2138,6 +2182,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
update_turbo_state();
+ global.turbo_disabled_mf = global.turbo_disabled;
policy->cpuinfo.max_freq = global.turbo_disabled ?
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
policy->cpuinfo.max_freq *= cpu->pstate.scaling;
@@ -2182,6 +2227,7 @@ static struct cpufreq_driver intel_pstate = {
.init = intel_pstate_cpu_init,
.exit = intel_pstate_cpu_exit,
.stop_cpu = intel_pstate_stop_cpu,
+ .update_limits = intel_pstate_update_limits,
.name = "intel_pstate",
};
@@ -2316,6 +2362,7 @@ static struct cpufreq_driver intel_cpufreq = {
.init = intel_cpufreq_cpu_init,
.exit = intel_pstate_cpu_exit,
.stop_cpu = intel_cpufreq_stop_cpu,
+ .update_limits = intel_pstate_update_limits,
.name = "intel_cpufreq",
};
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index c2dd43f3f5d8..8d63a6dc8383 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -124,13 +124,14 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
if (IS_ERR(priv.cpu_clk)) {
dev_err(priv.dev, "Unable to get cpuclk\n");
- return PTR_ERR(priv.cpu_clk);
+ err = PTR_ERR(priv.cpu_clk);
+ goto out_node;
}
err = clk_prepare_enable(priv.cpu_clk);
if (err) {
dev_err(priv.dev, "Unable to prepare cpuclk\n");
- return err;
+ goto out_node;
}
kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
@@ -161,20 +162,22 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
goto out_ddr;
}
- of_node_put(np);
- np = NULL;
-
err = cpufreq_register_driver(&kirkwood_cpufreq_driver);
- if (!err)
- return 0;
+ if (err) {
+ dev_err(priv.dev, "Failed to register cpufreq driver\n");
+ goto out_powersave;
+ }
- dev_err(priv.dev, "Failed to register cpufreq driver\n");
+ of_node_put(np);
+ return 0;
+out_powersave:
clk_disable_unprepare(priv.powersave_clk);
out_ddr:
clk_disable_unprepare(priv.ddr_clk);
out_cpu:
clk_disable_unprepare(priv.cpu_clk);
+out_node:
of_node_put(np);
return err;
diff --git a/drivers/cpufreq/loongson1-cpufreq.c b/drivers/cpufreq/loongson1-cpufreq.c
index be89416e2358..21c9ce8526c0 100644
--- a/drivers/cpufreq/loongson1-cpufreq.c
+++ b/drivers/cpufreq/loongson1-cpufreq.c
@@ -13,6 +13,7 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index d9df89392b84..a94355723ef8 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -210,7 +210,7 @@ static int __init maple_cpufreq_init(void)
*/
valp = of_get_property(cpunode, "clock-frequency", NULL);
if (!valp)
- return -ENODEV;
+ goto bail_noprops;
max_freq = (*valp)/1000;
maple_cpu_freqs[0].frequency = max_freq;
maple_cpu_freqs[1].frequency = max_freq/2;
@@ -231,10 +231,6 @@ static int __init maple_cpufreq_init(void)
rc = cpufreq_register_driver(&maple_cpufreq_driver);
- of_node_put(cpunode);
-
- return rc;
-
bail_noprops:
of_node_put(cpunode);
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 75dfbd2a58ea..c7710c149de8 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -146,6 +146,7 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpu = of_get_cpu_node(policy->cpu, NULL);
+ of_node_put(cpu);
if (!cpu)
goto out;
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index 52f0d91d30c1..9b4ce2eb8222 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -552,6 +552,7 @@ static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
if (volt_gpio_np)
voltage_gpio = read_gpio(volt_gpio_np);
+ of_node_put(volt_gpio_np);
if (!voltage_gpio){
pr_err("missing cpu-vcore-select gpio\n");
return 1;
@@ -588,6 +589,7 @@ static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
if (volt_gpio_np)
voltage_gpio = read_gpio(volt_gpio_np);
+ of_node_put(volt_gpio_np);
pvr = mfspr(SPRN_PVR);
has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index fb77b39a4ce3..3c12e03fa343 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1178,7 +1178,7 @@ static int powernowk8_init(void)
unsigned int i, supported_cpus = 0;
int ret;
- if (static_cpu_has(X86_FEATURE_HW_PSTATE)) {
+ if (boot_cpu_has(X86_FEATURE_HW_PSTATE)) {
__request_acpi_cpufreq();
return -ENODEV;
}
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index 41a0f0be3f9f..8414c3a4ea08 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -86,6 +86,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
!cbe_get_cpu_mic_tm_regs(policy->cpu)) {
pr_info("invalid CBE regs pointers for cpufreq\n");
+ of_node_put(cpu);
return -EINVAL;
}
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 4295e5476264..71b640c8c1a5 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -280,10 +280,12 @@ static const struct of_device_id node_matches[] __initconst = {
{ .compatible = "fsl,ls1012a-clockgen", },
{ .compatible = "fsl,ls1021a-clockgen", },
+ { .compatible = "fsl,ls1028a-clockgen", },
{ .compatible = "fsl,ls1043a-clockgen", },
{ .compatible = "fsl,ls1046a-clockgen", },
{ .compatible = "fsl,ls1088a-clockgen", },
{ .compatible = "fsl,ls2080a-clockgen", },
+ { .compatible = "fsl,lx2160a-clockgen", },
{ .compatible = "fsl,p4080-clockgen", },
{ .compatible = "fsl,qoriq-clockgen-1.0", },
{ .compatible = "fsl,qoriq-clockgen-2.0", },
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index a1fb735685db..e086b2dd4072 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -412,7 +412,7 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
}
/**
- * centrino_setpolicy - set a new CPUFreq policy
+ * centrino_target - set a new CPUFreq policy
* @policy: new policy
* @index: index of target frequency
*
diff --git a/drivers/cpuidle/cpuidle-exynos.c b/drivers/cpuidle/cpuidle-exynos.c
index 0171a6e190d7..f7199a35cbb6 100644
--- a/drivers/cpuidle/cpuidle-exynos.c
+++ b/drivers/cpuidle/cpuidle-exynos.c
@@ -84,7 +84,7 @@ static struct cpuidle_driver exynos_idle_driver = {
[1] = {
.enter = exynos_enter_lowpower,
.exit_latency = 300,
- .target_residency = 100000,
+ .target_residency = 10000,
.name = "C1",
.desc = "ARM power down",
},
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 7f108309e871..0f4b7c45df3e 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -328,9 +328,23 @@ int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
int index)
{
+ int ret = 0;
+
+ /*
+ * Store the next hrtimer, which becomes either next tick or the next
+ * timer event, whatever expires first. Additionally, to make this data
+ * useful for consumers outside cpuidle, we rely on that the governor's
+ * ->select() callback have decided, whether to stop the tick or not.
+ */
+ WRITE_ONCE(dev->next_hrtimer, tick_nohz_get_next_hrtimer());
+
if (cpuidle_state_is_coupled(drv, index))
- return cpuidle_enter_state_coupled(dev, drv, index);
- return cpuidle_enter_state(dev, drv, index);
+ ret = cpuidle_enter_state_coupled(dev, drv, index);
+ else
+ ret = cpuidle_enter_state(dev, drv, index);
+
+ WRITE_ONCE(dev->next_hrtimer, 0);
+ return ret;
}
/**
@@ -511,6 +525,7 @@ static void __cpuidle_device_init(struct cpuidle_device *dev)
{
memset(dev->states_usage, 0, sizeof(dev->states_usage));
dev->last_residency = 0;
+ dev->next_hrtimer = 0;
}
/**
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 0be55fcc19ba..177b7713bd2d 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -404,15 +404,6 @@ config CRYPTO_DEV_SAHARA
This option enables support for the SAHARA HW crypto accelerator
found in some Freescale i.MX chips.
-config CRYPTO_DEV_MXC_SCC
- tristate "Support for Freescale Security Controller (SCC)"
- depends on ARCH_MXC && OF
- select CRYPTO_BLKCIPHER
- select CRYPTO_DES
- help
- This option enables support for the Security Controller (SCC)
- found in Freescale i.MX25 chips.
-
config CRYPTO_DEV_EXYNOS_RNG
tristate "EXYNOS HW pseudo random number generator support"
depends on ARCH_EXYNOS || COMPILE_TEST
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 8e7e225d2446..a23a7197fcd7 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
-obj-$(CONFIG_CRYPTO_DEV_MXC_SCC) += mxc-scc.o
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
n2_crypto-y := n2_core.o n2_asm.o
obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index 4092c2aad8e2..307f5cfa9ba4 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -141,9 +141,10 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
/* Setup SA */
sa = ctx->sa_in;
- set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_CBC ?
- SA_SAVE_IV : SA_NOT_SAVE_IV),
- SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_ECB ?
+ SA_NOT_SAVE_IV : SA_SAVE_IV),
+ SA_NOT_LOAD_HASH, (cm == CRYPTO_MODE_ECB ?
+ SA_LOAD_IV_FROM_SA : SA_LOAD_IV_FROM_STATE),
SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
SA_OP_GROUP_BASIC, SA_OPCODE_DECRYPT,
@@ -162,6 +163,11 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
sa = ctx->sa_out;
sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+ /*
+ * SA_OPCODE_ENCRYPT is the same value as SA_OPCODE_DECRYPT.
+ * it's the DIR_(IN|OUT)BOUND that matters
+ */
+ sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT;
return 0;
}
@@ -258,10 +264,10 @@ crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt)
* overlow.
*/
if (counter + nblks < counter) {
- struct skcipher_request *subreq = skcipher_request_ctx(req);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher.cipher);
int ret;
- skcipher_request_set_tfm(subreq, ctx->sw_cipher.cipher);
+ skcipher_request_set_sync_tfm(subreq, ctx->sw_cipher.cipher);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -283,14 +289,14 @@ static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
{
int rc;
- crypto_skcipher_clear_flags(ctx->sw_cipher.cipher,
+ crypto_sync_skcipher_clear_flags(ctx->sw_cipher.cipher,
CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ctx->sw_cipher.cipher,
+ crypto_sync_skcipher_set_flags(ctx->sw_cipher.cipher,
crypto_skcipher_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
- rc = crypto_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
+ rc = crypto_sync_skcipher_setkey(ctx->sw_cipher.cipher, key, keylen);
crypto_skcipher_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
crypto_skcipher_set_flags(cipher,
- crypto_skcipher_get_flags(ctx->sw_cipher.cipher) &
+ crypto_sync_skcipher_get_flags(ctx->sw_cipher.cipher) &
CRYPTO_TFM_RES_MASK);
return rc;
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 06574a884715..3934c2523762 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -539,7 +539,7 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
req = skcipher_request_cast(pd_uinfo->async_req);
- if (pd_uinfo->using_sd) {
+ if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
req->cryptlen, req->dst);
} else {
@@ -593,7 +593,7 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
u32 icv[AES_BLOCK_SIZE];
int err = 0;
- if (pd_uinfo->using_sd) {
+ if (pd_uinfo->sa_va->sa_command_0.bf.scatter) {
crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
pd->pd_ctl_len.bf.pkt_len,
dst);
@@ -714,7 +714,23 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
size_t offset_to_sr_ptr;
u32 gd_idx = 0;
int tmp;
- bool is_busy;
+ bool is_busy, force_sd;
+
+ /*
+ * There's a very subtile/disguised "bug" in the hardware that
+ * gets indirectly mentioned in 18.1.3.5 Encryption/Decryption
+ * of the hardware spec:
+ * *drum roll* the AES/(T)DES OFB and CFB modes are listed as
+ * operation modes for >>> "Block ciphers" <<<.
+ *
+ * To workaround this issue and stop the hardware from causing
+ * "overran dst buffer" on crypttexts that are not a multiple
+ * of 16 (AES_BLOCK_SIZE), we force the driver to use the
+ * scatter buffers.
+ */
+ force_sd = (req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_CFB
+ || req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_OFB)
+ && (datalen % AES_BLOCK_SIZE);
/* figure how many gd are needed */
tmp = sg_nents_for_len(src, assoclen + datalen);
@@ -732,7 +748,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
}
/* figure how many sd are needed */
- if (sg_is_last(dst)) {
+ if (sg_is_last(dst) && force_sd == false) {
num_sd = 0;
} else {
if (datalen > PPC4XX_SD_BUFFER_SIZE) {
@@ -807,9 +823,10 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
pd->sa_len = sa_len;
pd_uinfo = &dev->pdr_uinfo[pd_entry];
- pd_uinfo->async_req = req;
pd_uinfo->num_gd = num_gd;
pd_uinfo->num_sd = num_sd;
+ pd_uinfo->dest_va = dst;
+ pd_uinfo->async_req = req;
if (iv_len)
memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
@@ -828,7 +845,6 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
/* get first gd we are going to use */
gd_idx = fst_gd;
pd_uinfo->first_gd = fst_gd;
- pd_uinfo->num_gd = num_gd;
gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
pd->src = gd_dma;
/* enable gather */
@@ -865,17 +881,13 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
* Indicate gather array is not used
*/
pd_uinfo->first_gd = 0xffffffff;
- pd_uinfo->num_gd = 0;
}
- if (sg_is_last(dst)) {
+ if (!num_sd) {
/*
* we know application give us dst a whole piece of memory
* no need to use scatter ring.
*/
- pd_uinfo->using_sd = 0;
pd_uinfo->first_sd = 0xffffffff;
- pd_uinfo->num_sd = 0;
- pd_uinfo->dest_va = dst;
sa->sa_command_0.bf.scatter = 0;
pd->dest = (u32)dma_map_page(dev->core_dev->device,
sg_page(dst), dst->offset,
@@ -888,10 +900,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
u32 sd_idx = fst_sd;
nbytes = datalen;
sa->sa_command_0.bf.scatter = 1;
- pd_uinfo->using_sd = 1;
- pd_uinfo->dest_va = dst;
pd_uinfo->first_sd = fst_sd;
- pd_uinfo->num_sd = num_sd;
sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
pd->dest = sd_dma;
/* setup scatter descriptor */
@@ -954,15 +963,10 @@ static int crypto4xx_sk_init(struct crypto_skcipher *sk)
if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
ctx->sw_cipher.cipher =
- crypto_alloc_skcipher(alg->base.cra_name, 0,
- CRYPTO_ALG_NEED_FALLBACK |
- CRYPTO_ALG_ASYNC);
+ crypto_alloc_sync_skcipher(alg->base.cra_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->sw_cipher.cipher))
return PTR_ERR(ctx->sw_cipher.cipher);
-
- crypto_skcipher_set_reqsize(sk,
- sizeof(struct skcipher_request) + 32 +
- crypto_skcipher_reqsize(ctx->sw_cipher.cipher));
}
amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
@@ -981,7 +985,7 @@ static void crypto4xx_sk_exit(struct crypto_skcipher *sk)
crypto4xx_common_exit(ctx);
if (ctx->sw_cipher.cipher)
- crypto_free_skcipher(ctx->sw_cipher.cipher);
+ crypto_free_sync_skcipher(ctx->sw_cipher.cipher);
}
static int crypto4xx_aead_init(struct crypto_aead *tfm)
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 18df695ca6b1..c624f8cd3d2e 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -64,7 +64,6 @@ union shadow_sa_buf {
struct pd_uinfo {
struct crypto4xx_device *dev;
u32 state;
- u32 using_sd;
u32 first_gd; /* first gather discriptor
used by this packet */
u32 num_gd; /* number of gather discriptor
@@ -131,7 +130,7 @@ struct crypto4xx_ctx {
__le32 iv_nonce;
u32 sa_len;
union {
- struct crypto_skcipher *cipher;
+ struct crypto_sync_skcipher *cipher;
struct crypto_aead *aead;
} sw_cipher;
};
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 65bf1a299562..fa76620281e8 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -800,20 +800,14 @@ static int atmel_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct atmel_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- const char *alg_name;
-
- alg_name = crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm));
+ u32 flags;
+ int err;
- /*
- * HW bug in cfb 3-keys mode.
- */
- if (!ctx->dd->caps.has_cfb_3keys && strstr(alg_name, "cfb")
- && (keylen != 2*DES_KEY_SIZE)) {
- crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- } else if ((keylen != 2*DES_KEY_SIZE) && (keylen != 3*DES_KEY_SIZE)) {
- crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
+ flags = crypto_ablkcipher_get_flags(tfm);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(tfm, flags);
+ return err;
}
memcpy(ctx->key, key, keylen);
@@ -1060,7 +1054,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
.cra_u.ablkcipher = {
- .min_keysize = 2 * DES_KEY_SIZE,
+ .min_keysize = 3 * DES_KEY_SIZE,
.max_keysize = 3 * DES_KEY_SIZE,
.setkey = atmel_tdes_setkey,
.encrypt = atmel_tdes_ecb_encrypt,
@@ -1079,7 +1073,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
.cra_u.ablkcipher = {
- .min_keysize = 2*DES_KEY_SIZE,
+ .min_keysize = 3*DES_KEY_SIZE,
.max_keysize = 3*DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = atmel_tdes_setkey,
@@ -1088,86 +1082,6 @@ static struct crypto_alg tdes_algs[] = {
}
},
{
- .cra_name = "cfb(des3_ede)",
- .cra_driver_name = "atmel-cfb-tdes",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x7,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 2*DES_KEY_SIZE,
- .max_keysize = 2*DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_tdes_setkey,
- .encrypt = atmel_tdes_cfb_encrypt,
- .decrypt = atmel_tdes_cfb_decrypt,
- }
-},
-{
- .cra_name = "cfb8(des3_ede)",
- .cra_driver_name = "atmel-cfb8-tdes",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB8_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 2*DES_KEY_SIZE,
- .max_keysize = 2*DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_tdes_setkey,
- .encrypt = atmel_tdes_cfb8_encrypt,
- .decrypt = atmel_tdes_cfb8_decrypt,
- }
-},
-{
- .cra_name = "cfb16(des3_ede)",
- .cra_driver_name = "atmel-cfb16-tdes",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB16_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x1,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 2*DES_KEY_SIZE,
- .max_keysize = 2*DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_tdes_setkey,
- .encrypt = atmel_tdes_cfb16_encrypt,
- .decrypt = atmel_tdes_cfb16_decrypt,
- }
-},
-{
- .cra_name = "cfb32(des3_ede)",
- .cra_driver_name = "atmel-cfb32-tdes",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = CFB32_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct atmel_tdes_ctx),
- .cra_alignmask = 0x3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = atmel_tdes_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = 2*DES_KEY_SIZE,
- .max_keysize = 2*DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_tdes_setkey,
- .encrypt = atmel_tdes_cfb32_encrypt,
- .decrypt = atmel_tdes_cfb32_decrypt,
- }
-},
-{
.cra_name = "ofb(des3_ede)",
.cra_driver_name = "atmel-ofb-tdes",
.cra_priority = 100,
@@ -1179,7 +1093,7 @@ static struct crypto_alg tdes_algs[] = {
.cra_module = THIS_MODULE,
.cra_init = atmel_tdes_cra_init,
.cra_u.ablkcipher = {
- .min_keysize = 2*DES_KEY_SIZE,
+ .min_keysize = 3*DES_KEY_SIZE,
.max_keysize = 3*DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = atmel_tdes_setkey,
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 57e5dca3253f..d2fb72811442 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2247,8 +2247,6 @@ artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash);
hdesc->tfm = tfm_ctx->child_hash;
- hdesc->flags = crypto_ahash_get_flags(tfm) &
- CRYPTO_TFM_REQ_MAY_SLEEP;
tfm_ctx->hmac_key_length = blocksize;
ret = crypto_shash_digest(hdesc, key, keylen,
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 28f592f7e1b7..25f8d3913ceb 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -1840,13 +1840,14 @@ static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
if (keylen == (DES_KEY_SIZE * 3)) {
- const u32 *K = (const u32 *)key;
- u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
+ u32 flags;
+ int ret;
- if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
- !((K[2] ^ K[4]) | (K[3] ^ K[5]))) {
+ flags = crypto_ablkcipher_get_flags(cipher);
+ ret = __des3_verify_key(&flags, key);
+ if (unlikely(ret)) {
crypto_ablkcipher_set_flags(cipher, flags);
- return -EINVAL;
+ return ret;
}
ctx->cipher_type = CIPHER_TYPE_3DES;
@@ -2139,7 +2140,6 @@ static int ahash_init(struct ahash_request *req)
goto err_hash;
}
ctx->shash->tfm = hash;
- ctx->shash->flags = 0;
/* Set the key using data we already have from setkey */
if (ctx->authkeylen > 0) {
@@ -2885,13 +2885,13 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
break;
case CIPHER_ALG_3DES:
if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
- const u32 *K = (const u32 *)keys.enckey;
- u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
+ u32 flags;
- if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
- !((K[2] ^ K[4]) | (K[3] ^ K[5]))) {
+ flags = crypto_aead_get_flags(cipher);
+ ret = __des3_verify_key(&flags, keys.enckey);
+ if (unlikely(ret)) {
crypto_aead_set_flags(cipher, flags);
- return -EINVAL;
+ return ret;
}
ctx->cipher_type = CIPHER_TYPE_3DES;
diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c
index dbb5c03dde49..2baf6d7f2c1d 100644
--- a/drivers/crypto/bcm/spu.c
+++ b/drivers/crypto/bcm/spu.c
@@ -22,9 +22,6 @@
#include "spum.h"
#include "cipher.h"
-/* This array is based on the hash algo type supported in spu.h */
-char *tag_to_hash_idx[] = { "none", "md5", "sha1", "sha224", "sha256" };
-
char *hash_alg_name[] = { "None", "md5", "sha1", "sha224", "sha256", "aes",
"sha384", "sha512", "sha3_224", "sha3_256", "sha3_384", "sha3_512" };
diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
index d8cda5fb75ad..91ec56399d84 100644
--- a/drivers/crypto/bcm/util.c
+++ b/drivers/crypto/bcm/util.c
@@ -242,7 +242,6 @@ int do_shash(unsigned char *name, unsigned char *result,
goto do_shash_err;
}
sdesc->shash.tfm = hash;
- sdesc->shash.flags = 0x0;
if (key_len > 0) {
rc = crypto_shash_setkey(hash, key, key_len);
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 579578498deb..c0ece44f303b 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -89,6 +89,7 @@ struct caam_alg_entry {
int class2_alg_type;
bool rfc3686;
bool geniv;
+ bool nodkp;
};
struct caam_aead_alg {
@@ -638,6 +639,39 @@ badkey:
return -EINVAL;
}
+static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ u32 flags;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ goto badkey;
+
+ err = -EINVAL;
+ if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+ goto badkey;
+
+ flags = crypto_aead_get_flags(aead);
+ err = __des3_verify_key(&flags, keys.enckey);
+ if (unlikely(err)) {
+ crypto_aead_set_flags(aead, flags);
+ goto out;
+ }
+
+ err = aead_setkey(aead, key, keylen);
+
+out:
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+}
+
static int gcm_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
@@ -2019,6 +2053,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
{
@@ -2037,6 +2072,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
/* Galois Counter Mode */
@@ -2056,6 +2092,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
/* single-pass ipsec_esp descriptor */
@@ -2457,7 +2494,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2479,7 +2516,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2502,7 +2539,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2525,7 +2562,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2548,7 +2585,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2571,7 +2608,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2594,7 +2631,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2617,7 +2654,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2640,7 +2677,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2663,7 +2700,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2686,7 +2723,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2709,7 +2746,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -3301,6 +3338,7 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_AEAD,
.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
OP_ALG_AAI_AEAD,
+ .nodkp = true,
},
},
{
@@ -3323,6 +3361,7 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_AEAD,
.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
OP_ALG_AAI_AEAD,
+ .nodkp = true,
},
},
};
@@ -3384,8 +3423,7 @@ static int caam_aead_init(struct crypto_aead *tfm)
container_of(alg, struct caam_aead_alg, aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam,
- alg->setkey == aead_setkey);
+ return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
}
static void caam_exit_common(struct caam_ctx *ctx)
@@ -3460,7 +3498,7 @@ static int __init caam_algapi_init(void)
u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
u32 arc4_inst;
unsigned int md_limit = SHA512_DIGEST_SIZE;
- bool registered = false;
+ bool registered = false, gcm_support;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
if (!dev_node) {
@@ -3493,7 +3531,7 @@ static int __init caam_algapi_init(void)
* First, detect presence and attributes of DES, AES, and MD blocks.
*/
if (priv->era < 10) {
- u32 cha_vid, cha_inst;
+ u32 cha_vid, cha_inst, aes_rn;
cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
@@ -3508,6 +3546,10 @@ static int __init caam_algapi_init(void)
CHA_ID_LS_ARC4_SHIFT;
ccha_inst = 0;
ptha_inst = 0;
+
+ aes_rn = rd_reg32(&priv->ctrl->perfmon.cha_rev_ls) &
+ CHA_ID_LS_AES_MASK;
+ gcm_support = !(aes_vid == CHA_VER_VID_AES_LP && aes_rn < 8);
} else {
u32 aesa, mdha;
@@ -3523,6 +3565,8 @@ static int __init caam_algapi_init(void)
ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK;
+
+ gcm_support = aesa & CHA_VER_MISC_AES_GCM;
}
/* If MD is present, limit digest size based on LP256 */
@@ -3595,11 +3639,9 @@ static int __init caam_algapi_init(void)
if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst)
continue;
- /*
- * Check support for AES algorithms not available
- * on LP devices.
- */
- if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
+ /* Skip GCM algorithms if not supported by device */
+ if (c1_alg_sel == OP_ALG_ALGSEL_AES &&
+ alg_aai == OP_ALG_AAI_GCM && !gcm_support)
continue;
/*
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index c61921d32489..d290d6b41825 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -36,6 +36,7 @@ struct caam_alg_entry {
int class2_alg_type;
bool rfc3686;
bool geniv;
+ bool nodkp;
};
struct caam_aead_alg {
@@ -292,6 +293,39 @@ badkey:
return -EINVAL;
}
+static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ u32 flags;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ goto badkey;
+
+ err = -EINVAL;
+ if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+ goto badkey;
+
+ flags = crypto_aead_get_flags(aead);
+ err = __des3_verify_key(&flags, keys.enckey);
+ if (unlikely(err)) {
+ crypto_aead_set_flags(aead, flags);
+ goto out;
+ }
+
+ err = aead_setkey(aead, key, keylen);
+
+out:
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+}
+
static int gcm_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -667,6 +701,13 @@ badkey:
return -EINVAL;
}
+static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ return unlikely(des3_verify_key(skcipher, key)) ?:
+ skcipher_setkey(skcipher, key, keylen);
+}
+
static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
@@ -1382,7 +1423,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "cbc-3des-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = des3_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
@@ -1483,6 +1524,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
{
@@ -1501,6 +1543,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
/* Galois Counter Mode */
@@ -1520,6 +1563,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
}
},
/* single-pass ipsec_esp descriptor */
@@ -1798,7 +1842,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1820,7 +1864,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1843,7 +1887,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1866,7 +1910,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1889,7 +1933,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1912,7 +1956,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1935,7 +1979,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1958,7 +2002,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1981,7 +2025,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2004,7 +2048,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2027,7 +2071,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2050,7 +2094,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2393,8 +2437,7 @@ static int caam_aead_init(struct crypto_aead *tfm)
aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam,
- alg->setkey == aead_setkey);
+ return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
}
static void caam_exit_common(struct caam_ctx *ctx)
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index c2c1abc68f81..2b2980a8a9b9 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -42,6 +42,7 @@ struct caam_alg_entry {
int class2_alg_type;
bool rfc3686;
bool geniv;
+ bool nodkp;
};
struct caam_aead_alg {
@@ -323,6 +324,39 @@ badkey:
return -EINVAL;
}
+static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ u32 flags;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ goto badkey;
+
+ err = -EINVAL;
+ if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+ goto badkey;
+
+ flags = crypto_aead_get_flags(aead);
+ err = __des3_verify_key(&flags, keys.enckey);
+ if (unlikely(err)) {
+ crypto_aead_set_flags(aead, flags);
+ goto out;
+ }
+
+ err = aead_setkey(aead, key, keylen);
+
+out:
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+}
+
static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
bool encrypt)
{
@@ -938,6 +972,13 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
return 0;
}
+static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key, unsigned int keylen)
+{
+ return unlikely(des3_verify_key(skcipher, key)) ?:
+ skcipher_setkey(skcipher, key, keylen);
+}
+
static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{
@@ -1440,7 +1481,7 @@ static int caam_cra_init_aead(struct crypto_aead *tfm)
crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
- alg->setkey == aead_setkey);
+ !caam_alg->caam.nodkp);
}
static void caam_exit_common(struct caam_ctx *ctx)
@@ -1484,7 +1525,7 @@ static struct caam_skcipher_alg driver_algs[] = {
.cra_driver_name = "cbc-3des-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = skcipher_setkey,
+ .setkey = des3_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
@@ -1601,6 +1642,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
{
@@ -1619,6 +1661,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
},
},
/* Galois Counter Mode */
@@ -1638,6 +1681,7 @@ static struct caam_aead_alg driver_aeads[] = {
},
.caam = {
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ .nodkp = true,
}
},
/* single-pass ipsec_esp descriptor */
@@ -1916,7 +1960,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1938,7 +1982,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1961,7 +2005,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -1984,7 +2028,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2007,7 +2051,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2030,7 +2074,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2053,7 +2097,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2076,7 +2120,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2099,7 +2143,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2122,7 +2166,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2145,7 +2189,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2168,7 +2212,7 @@ static struct caam_aead_alg driver_aeads[] = {
"cbc-des3_ede-caam-qi2",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
- .setkey = aead_setkey,
+ .setkey = des3_aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
.decrypt = aead_decrypt,
@@ -2715,6 +2759,7 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_AEAD,
.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
OP_ALG_AAI_AEAD,
+ .nodkp = true,
},
},
{
@@ -2737,6 +2782,7 @@ static struct caam_aead_alg driver_aeads[] = {
OP_ALG_AAI_AEAD,
.class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
OP_ALG_AAI_AEAD,
+ .nodkp = true,
},
},
{
@@ -2854,6 +2900,7 @@ struct caam_hash_state {
struct caam_request caam_req;
dma_addr_t buf_dma;
dma_addr_t ctx_dma;
+ int ctx_dma_len;
u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
int buflen_0;
u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
@@ -2927,6 +2974,7 @@ static inline int ctx_map_to_qm_sg(struct device *dev,
struct caam_hash_state *state, int ctx_len,
struct dpaa2_sg_entry *qm_sg, u32 flag)
{
+ state->ctx_dma_len = ctx_len;
state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
if (dma_mapping_error(dev, state->ctx_dma)) {
dev_err(dev, "unable to map ctx\n");
@@ -3018,13 +3066,13 @@ static void split_key_sh_done(void *cbk_ctx, u32 err)
}
/* Digest hash size if it is too large */
-static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
- u32 *keylen, u8 *key_out, u32 digestsize)
+static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
+ u32 digestsize)
{
struct caam_request *req_ctx;
u32 *desc;
struct split_key_sh_result result;
- dma_addr_t src_dma, dst_dma;
+ dma_addr_t key_dma;
struct caam_flc *flc;
dma_addr_t flc_dma;
int ret = -ENOMEM;
@@ -3041,17 +3089,10 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
if (!flc)
goto err_flc;
- src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
- DMA_TO_DEVICE);
- if (dma_mapping_error(ctx->dev, src_dma)) {
- dev_err(ctx->dev, "unable to map key input memory\n");
- goto err_src_dma;
- }
- dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, dst_dma)) {
- dev_err(ctx->dev, "unable to map key output memory\n");
- goto err_dst_dma;
+ key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(ctx->dev, key_dma)) {
+ dev_err(ctx->dev, "unable to map key memory\n");
+ goto err_key_dma;
}
desc = flc->sh_desc;
@@ -3076,14 +3117,14 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
dpaa2_fl_set_final(in_fle, true);
dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(in_fle, src_dma);
+ dpaa2_fl_set_addr(in_fle, key_dma);
dpaa2_fl_set_len(in_fle, *keylen);
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, dst_dma);
+ dpaa2_fl_set_addr(out_fle, key_dma);
dpaa2_fl_set_len(out_fle, digestsize);
print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
+ DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1);
@@ -3103,17 +3144,15 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
wait_for_completion(&result.completion);
ret = result.err;
print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key_in,
+ DUMP_PREFIX_ADDRESS, 16, 4, key,
digestsize, 1);
}
dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
DMA_TO_DEVICE);
err_flc_dma:
- dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
-err_dst_dma:
- dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
-err_src_dma:
+ dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
+err_key_dma:
kfree(flc);
err_flc:
kfree(req_ctx);
@@ -3135,12 +3174,10 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
if (keylen > blocksize) {
- hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
- GFP_KERNEL | GFP_DMA);
+ hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
if (!hashed_key)
return -ENOMEM;
- ret = hash_digest_key(ctx, key, &keylen, hashed_key,
- digestsize);
+ ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
if (ret)
goto bad_free_key;
key = hashed_key;
@@ -3165,14 +3202,12 @@ bad_free_key:
}
static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
- struct ahash_request *req, int dst_len)
+ struct ahash_request *req)
{
struct caam_hash_state *state = ahash_request_ctx(req);
if (edesc->src_nents)
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
- if (edesc->dst_dma)
- dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
if (edesc->qm_sg_bytes)
dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
@@ -3187,18 +3222,15 @@ static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
static inline void ahash_unmap_ctx(struct device *dev,
struct ahash_edesc *edesc,
- struct ahash_request *req, int dst_len,
- u32 flag)
+ struct ahash_request *req, u32 flag)
{
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
- struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
if (state->ctx_dma) {
- dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
+ dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
state->ctx_dma = 0;
}
- ahash_unmap(dev, edesc, req, dst_len);
+ ahash_unmap(dev, edesc, req);
}
static void ahash_done(void *cbk_ctx, u32 status)
@@ -3219,16 +3251,13 @@ static void ahash_done(void *cbk_ctx, u32 status)
ecode = -EIO;
}
- ahash_unmap(ctx->dev, edesc, req, digestsize);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
+ memcpy(req->result, state->caam_ctx, digestsize);
qi_cache_free(edesc);
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
- if (req->result)
- print_hex_dump_debug("result@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- digestsize, 1);
req->base.complete(&req->base, ecode);
}
@@ -3250,7 +3279,7 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
ecode = -EIO;
}
- ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
switch_buf(state);
qi_cache_free(edesc);
@@ -3283,16 +3312,13 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
ecode = -EIO;
}
- ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
+ memcpy(req->result, state->caam_ctx, digestsize);
qi_cache_free(edesc);
print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
- if (req->result)
- print_hex_dump_debug("result@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->result,
- digestsize, 1);
req->base.complete(&req->base, ecode);
}
@@ -3314,7 +3340,7 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
ecode = -EIO;
}
- ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
switch_buf(state);
qi_cache_free(edesc);
@@ -3452,7 +3478,7 @@ static int ahash_update_ctx(struct ahash_request *req)
return ret;
unmap_ctx:
- ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
qi_cache_free(edesc);
return ret;
}
@@ -3484,7 +3510,7 @@ static int ahash_final_ctx(struct ahash_request *req)
sg_table = &edesc->sgt[0];
ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
- DMA_TO_DEVICE);
+ DMA_BIDIRECTIONAL);
if (ret)
goto unmap_ctx;
@@ -3503,22 +3529,13 @@ static int ahash_final_ctx(struct ahash_request *req)
}
edesc->qm_sg_bytes = qm_sg_bytes;
- edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- dev_err(ctx->dev, "unable to map dst\n");
- edesc->dst_dma = 0;
- ret = -ENOMEM;
- goto unmap_ctx;
- }
-
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
dpaa2_fl_set_final(in_fle, true);
dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
dpaa2_fl_set_len(out_fle, digestsize);
req_ctx->flc = &ctx->flc[FINALIZE];
@@ -3533,7 +3550,7 @@ static int ahash_final_ctx(struct ahash_request *req)
return ret;
unmap_ctx:
- ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
qi_cache_free(edesc);
return ret;
}
@@ -3586,7 +3603,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
sg_table = &edesc->sgt[0];
ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
- DMA_TO_DEVICE);
+ DMA_BIDIRECTIONAL);
if (ret)
goto unmap_ctx;
@@ -3605,22 +3622,13 @@ static int ahash_finup_ctx(struct ahash_request *req)
}
edesc->qm_sg_bytes = qm_sg_bytes;
- edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- dev_err(ctx->dev, "unable to map dst\n");
- edesc->dst_dma = 0;
- ret = -ENOMEM;
- goto unmap_ctx;
- }
-
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
dpaa2_fl_set_final(in_fle, true);
dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
dpaa2_fl_set_len(out_fle, digestsize);
req_ctx->flc = &ctx->flc[FINALIZE];
@@ -3635,7 +3643,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
return ret;
unmap_ctx:
- ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
qi_cache_free(edesc);
return ret;
}
@@ -3704,18 +3712,19 @@ static int ahash_digest(struct ahash_request *req)
dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
}
- edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ state->ctx_dma_len = digestsize;
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- dev_err(ctx->dev, "unable to map dst\n");
- edesc->dst_dma = 0;
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
goto unmap;
}
dpaa2_fl_set_final(in_fle, true);
dpaa2_fl_set_len(in_fle, req->nbytes);
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
dpaa2_fl_set_len(out_fle, digestsize);
req_ctx->flc = &ctx->flc[DIGEST];
@@ -3729,7 +3738,7 @@ static int ahash_digest(struct ahash_request *req)
return ret;
unmap:
- ahash_unmap(ctx->dev, edesc, req, digestsize);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
qi_cache_free(edesc);
return ret;
}
@@ -3755,27 +3764,39 @@ static int ahash_final_no_ctx(struct ahash_request *req)
if (!edesc)
return ret;
- state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
- if (dma_mapping_error(ctx->dev, state->buf_dma)) {
- dev_err(ctx->dev, "unable to map src\n");
- goto unmap;
+ if (buflen) {
+ state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, state->buf_dma)) {
+ dev_err(ctx->dev, "unable to map src\n");
+ goto unmap;
+ }
}
- edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ state->ctx_dma_len = digestsize;
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- dev_err(ctx->dev, "unable to map dst\n");
- edesc->dst_dma = 0;
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
goto unmap;
}
memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
dpaa2_fl_set_final(in_fle, true);
- dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(in_fle, state->buf_dma);
- dpaa2_fl_set_len(in_fle, buflen);
+ /*
+ * crypto engine requires the input entry to be present when
+ * "frame list" FD is used.
+ * Since engine does not support FMT=2'b11 (unused entry type), leaving
+ * in_fle zeroized (except for "Final" flag) is the best option.
+ */
+ if (buflen) {
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, state->buf_dma);
+ dpaa2_fl_set_len(in_fle, buflen);
+ }
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
dpaa2_fl_set_len(out_fle, digestsize);
req_ctx->flc = &ctx->flc[DIGEST];
@@ -3790,7 +3811,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
return ret;
unmap:
- ahash_unmap(ctx->dev, edesc, req, digestsize);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
qi_cache_free(edesc);
return ret;
}
@@ -3870,6 +3891,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
}
edesc->qm_sg_bytes = qm_sg_bytes;
+ state->ctx_dma_len = ctx->ctx_len;
state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
ctx->ctx_len, DMA_FROM_DEVICE);
if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
@@ -3918,7 +3940,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
return ret;
unmap_ctx:
- ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
qi_cache_free(edesc);
return ret;
}
@@ -3983,11 +4005,12 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
}
edesc->qm_sg_bytes = qm_sg_bytes;
- edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ state->ctx_dma_len = digestsize;
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
DMA_FROM_DEVICE);
- if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
- dev_err(ctx->dev, "unable to map dst\n");
- edesc->dst_dma = 0;
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
ret = -ENOMEM;
goto unmap;
}
@@ -3998,7 +4021,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
- dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
dpaa2_fl_set_len(out_fle, digestsize);
req_ctx->flc = &ctx->flc[DIGEST];
@@ -4013,7 +4036,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
return ret;
unmap:
- ahash_unmap(ctx->dev, edesc, req, digestsize);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
qi_cache_free(edesc);
return -ENOMEM;
}
@@ -4100,6 +4123,7 @@ static int ahash_update_first(struct ahash_request *req)
scatterwalk_map_and_copy(next_buf, req->src, to_hash,
*next_buflen, 0);
+ state->ctx_dma_len = ctx->ctx_len;
state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
ctx->ctx_len, DMA_FROM_DEVICE);
if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
@@ -4143,7 +4167,7 @@ static int ahash_update_first(struct ahash_request *req)
return ret;
unmap_ctx:
- ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
qi_cache_free(edesc);
return ret;
}
@@ -4162,6 +4186,7 @@ static int ahash_init(struct ahash_request *req)
state->final = ahash_final_no_ctx;
state->ctx_dma = 0;
+ state->ctx_dma_len = 0;
state->current_buf = 0;
state->buf_dma = 0;
state->buflen_0 = 0;
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
index 20890780fb82..be5085451053 100644
--- a/drivers/crypto/caam/caamalg_qi2.h
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -162,14 +162,12 @@ struct skcipher_edesc {
/*
* ahash_edesc - s/w-extended ahash descriptor
- * @dst_dma: I/O virtual address of req->result
* @qm_sg_dma: I/O virtual address of h/w link table
* @src_nents: number of segments in input scatterlist
* @qm_sg_bytes: length of dma mapped qm_sg space
* @sgt: pointer to h/w link table
*/
struct ahash_edesc {
- dma_addr_t dst_dma;
dma_addr_t qm_sg_dma;
int src_nents;
int qm_sg_bytes;
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 58285642306e..fe24485274e1 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -994,8 +994,6 @@ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
static struct akcipher_alg caam_rsa = {
.encrypt = caam_rsa_enc,
.decrypt = caam_rsa_dec,
- .sign = caam_rsa_dec,
- .verify = caam_rsa_enc,
.set_pub_key = caam_rsa_set_pub_key,
.set_priv_key = caam_rsa_set_priv_key,
.max_size = caam_rsa_max_size,
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 858bdc9ab4a3..e2ba3d202da5 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -468,6 +468,24 @@ static int caam_get_era(struct caam_ctrl __iomem *ctrl)
return caam_get_era_from_hw(ctrl);
}
+/*
+ * ERRATA: imx6 devices (imx6D, imx6Q, imx6DL, imx6S, imx6DP and imx6DQ)
+ * have an issue wherein AXI bus transactions may not occur in the correct
+ * order. This isn't a problem running single descriptors, but can be if
+ * running multiple concurrent descriptors. Reworking the driver to throttle
+ * to single requests is impractical, thus the workaround is to limit the AXI
+ * pipeline to a depth of 1 (from it's default of 4) to preclude this situation
+ * from occurring.
+ */
+static void handle_imx6_err005766(u32 *mcr)
+{
+ if (of_machine_is_compatible("fsl,imx6q") ||
+ of_machine_is_compatible("fsl,imx6dl") ||
+ of_machine_is_compatible("fsl,imx6qp"))
+ clrsetbits_32(mcr, MCFGR_AXIPIPE_MASK,
+ 1 << MCFGR_AXIPIPE_SHIFT);
+}
+
static const struct of_device_id caam_match[] = {
{
.compatible = "fsl,sec-v4.0",
@@ -640,6 +658,8 @@ static int caam_probe(struct platform_device *pdev)
(sizeof(dma_addr_t) == sizeof(u64) ?
MCFGR_LONG_PTR : 0));
+ handle_imx6_err005766(&ctrl->mcr);
+
/*
* Read the Compile Time paramters and SCFGR to determine
* if Virtualization is enabled for this platform
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 21a70fd32f5d..4da844e4b61d 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -22,7 +22,7 @@ void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
size_t len;
void *buf;
- for (it = sg; it && tlen > 0 ; it = sg_next(sg)) {
+ for (it = sg; it && tlen > 0 ; it = sg_next(it)) {
/*
* make sure the scatterlist's page
* has a valid virtual memory mapping
@@ -138,7 +138,7 @@ static const struct {
{ 0x46, "Annotation length exceeds offset (reuse mode)"},
{ 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
{ 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
- { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
+ { 0x4B, "Annotation output enabled but ASA cannot be expanded (frame list)"},
{ 0x51, "Unsupported IF reuse mode"},
{ 0x52, "Unsupported FL use mode"},
{ 0x53, "Unsupported RJD use mode"},
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 5869ad58d497..3392615dc91b 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -49,13 +49,11 @@ struct caam_drv_private_jr {
atomic_t tfm_count ____cacheline_aligned;
/* Job ring info */
- int ringsize; /* Size of rings (assume input = output) */
struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */
spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */
- int inp_ring_write_index; /* Input index "tail" */
+ u32 inpring_avail; /* Number of free entries in input ring */
int head; /* entinfo (s/w ring) head index */
dma_addr_t *inpring; /* Base of input ring, alloc DMA-safe */
- spinlock_t outlock ____cacheline_aligned; /* Output ring index lock */
int out_ring_read_index; /* Output index "tail" */
int tail; /* entinfo (s/w ring) tail index */
struct jr_outentry *outring; /* Base of output ring, DMA-safe */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index d50085a03597..1de2562d0982 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -170,13 +170,13 @@ static void caam_jr_dequeue(unsigned long devarg)
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus;
void *userarg;
+ u32 outring_used = 0;
- while (rd_reg32(&jrp->rregs->outring_used)) {
+ while (outring_used ||
+ (outring_used = rd_reg32(&jrp->rregs->outring_used))) {
head = READ_ONCE(jrp->head);
- spin_lock(&jrp->outlock);
-
sw_idx = tail = jrp->tail;
hw_idx = jrp->out_ring_read_index;
@@ -199,7 +199,7 @@ static void caam_jr_dequeue(unsigned long devarg)
/* mark completed, avoid matching on a recycled desc addr */
jrp->entinfo[sw_idx].desc_addr_dma = 0;
- /* Stash callback params for use outside of lock */
+ /* Stash callback params */
usercall = jrp->entinfo[sw_idx].callbk;
userarg = jrp->entinfo[sw_idx].cbkarg;
userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
@@ -232,10 +232,9 @@ static void caam_jr_dequeue(unsigned long devarg)
jrp->tail = tail;
}
- spin_unlock(&jrp->outlock);
-
/* Finally, execute user's callback */
usercall(dev, userdesc, userstatus, userarg);
+ outring_used--;
}
/* reenable / unmask IRQs */
@@ -345,7 +344,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
head = jrp->head;
tail = READ_ONCE(jrp->tail);
- if (!rd_reg32(&jrp->rregs->inpring_avail) ||
+ if (!jrp->inpring_avail ||
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
spin_unlock_bh(&jrp->inplock);
dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
@@ -359,7 +358,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
head_entry->cbkarg = areq;
head_entry->desc_addr_dma = desc_dma;
- jrp->inpring[jrp->inp_ring_write_index] = cpu_to_caam_dma(desc_dma);
+ jrp->inpring[head] = cpu_to_caam_dma(desc_dma);
/*
* Guarantee that the descriptor's DMA address has been written to
@@ -368,18 +367,22 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
*/
smp_wmb();
- jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
- (JOBR_DEPTH - 1);
jrp->head = (head + 1) & (JOBR_DEPTH - 1);
/*
* Ensure that all job information has been written before
- * notifying CAAM that a new job was added to the input ring.
+ * notifying CAAM that a new job was added to the input ring
+ * using a memory barrier. The wr_reg32() uses api iowrite32()
+ * to do the register write. iowrite32() issues a memory barrier
+ * before the write operation.
*/
- wmb();
wr_reg32(&jrp->rregs->inpring_jobadd, 1);
+ jrp->inpring_avail--;
+ if (!jrp->inpring_avail)
+ jrp->inpring_avail = rd_reg32(&jrp->rregs->inpring_avail);
+
spin_unlock_bh(&jrp->inplock);
return 0;
@@ -431,7 +434,6 @@ static int caam_jr_init(struct device *dev)
jrp->entinfo[i].desc_addr_dma = !0;
/* Setup rings */
- jrp->inp_ring_write_index = 0;
jrp->out_ring_read_index = 0;
jrp->head = 0;
jrp->tail = 0;
@@ -441,10 +443,9 @@ static int caam_jr_init(struct device *dev)
wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
- jrp->ringsize = JOBR_DEPTH;
+ jrp->inpring_avail = JOBR_DEPTH;
spin_lock_init(&jrp->inplock);
- spin_lock_init(&jrp->outlock);
/* Select interrupt coalescing parameters */
clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 7cb8b1755e57..9f08f84cca59 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -318,7 +318,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
/* Create a new req FQ in parked state */
new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
drv_ctx->context_a, 0);
- if (IS_ERR_OR_NULL(new_fq)) {
+ if (IS_ERR(new_fq)) {
dev_err(qidev, "FQ allocation for shdesc update failed\n");
return PTR_ERR(new_fq);
}
@@ -431,7 +431,7 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
/* Attach request FQ */
drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
QMAN_INITFQ_FLAG_SCHED);
- if (IS_ERR_OR_NULL(drv_ctx->req_fq)) {
+ if (IS_ERR(drv_ctx->req_fq)) {
dev_err(qidev, "create_caam_req_fq failed\n");
dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
kfree(drv_ctx);
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 3cd0822ea819..8591914d5c51 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -253,6 +253,9 @@ struct version_regs {
#define CHA_VER_VID_SHIFT 24
#define CHA_VER_VID_MASK (0xffull << CHA_VER_VID_SHIFT)
+/* CHA Miscellaneous Information - AESA_MISC specific */
+#define CHA_VER_MISC_AES_GCM BIT(1 + CHA_VER_MISC_SHIFT)
+
/*
* caam_perfmon - Performance Monitor/Secure Memory Status/
* CAAM Global Status/Component Version IDs
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index 600336d169a9..9810ad8ac519 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -10,7 +10,6 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
-#include <crypto/cryptd.h>
#include <crypto/crypto_wq.h>
#include <crypto/des.h>
#include <crypto/xts.h>
@@ -327,27 +326,36 @@ static int cvm_cfb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
static int cvm_cbc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
u32 keylen)
{
+ u32 flags = crypto_ablkcipher_get_flags(cipher);
+ int err;
+
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
+ }
+
return cvm_setkey(cipher, key, keylen, DES3_CBC);
}
static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
u32 keylen)
{
+ u32 flags = crypto_ablkcipher_get_flags(cipher);
+ int err;
+
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
+ }
+
return cvm_setkey(cipher, key, keylen, DES3_ECB);
}
static int cvm_enc_dec_init(struct crypto_tfm *tfm)
{
- struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
-
- memset(ctx, 0, sizeof(*ctx));
- tfm->crt_ablkcipher.reqsize = sizeof(struct cvm_req_ctx) +
- sizeof(struct ablkcipher_request);
- /* Additional memory for ablkcipher_request is
- * allocated since the cryptd daemon uses
- * this memory for request_ctx information
- */
-
+ tfm->crt_ablkcipher.reqsize = sizeof(struct cvm_req_ctx);
return 0;
}
diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c
index 2ca431ed1db8..88a0166f5477 100644
--- a/drivers/crypto/cavium/cpt/cptvf_main.c
+++ b/drivers/crypto/cavium/cpt/cptvf_main.c
@@ -641,7 +641,7 @@ static void cptvf_write_vq_saddr(struct cpt_vf *cptvf, u64 val)
cpt_write_csr64(cptvf->reg_base, CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
}
-void cptvf_device_init(struct cpt_vf *cptvf)
+static void cptvf_device_init(struct cpt_vf *cptvf)
{
u64 base_addr = 0;
diff --git a/drivers/crypto/cavium/cpt/cptvf_mbox.c b/drivers/crypto/cavium/cpt/cptvf_mbox.c
index d5ec3b8a9e61..4f438eceb506 100644
--- a/drivers/crypto/cavium/cpt/cptvf_mbox.c
+++ b/drivers/crypto/cavium/cpt/cptvf_mbox.c
@@ -17,23 +17,6 @@ static void cptvf_send_msg_to_pf(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
mbx->data);
}
-/* ACKs PF's mailbox message
- */
-void cptvf_mbox_send_ack(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
-{
- mbx->msg = CPT_MBOX_MSG_TYPE_ACK;
- cptvf_send_msg_to_pf(cptvf, mbx);
-}
-
-/* NACKs PF's mailbox message that VF is not able to
- * complete the action
- */
-void cptvf_mbox_send_nack(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
-{
- mbx->msg = CPT_MBOX_MSG_TYPE_NACK;
- cptvf_send_msg_to_pf(cptvf, mbx);
-}
-
/* Interrupt handler to handle mailbox messages from VFs */
void cptvf_handle_mbox_intr(struct cpt_vf *cptvf)
{
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index ca549c5dc08e..f16f61504241 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -223,7 +223,7 @@ scatter_gather_clean:
return ret;
}
-int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
+static int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
u32 qno)
{
struct pci_dev *pdev = cptvf->pdev;
@@ -270,7 +270,7 @@ int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd,
return ret;
}
-void do_request_cleanup(struct cpt_vf *cptvf,
+static void do_request_cleanup(struct cpt_vf *cptvf,
struct cpt_info_buffer *info)
{
int i;
@@ -316,7 +316,7 @@ void do_request_cleanup(struct cpt_vf *cptvf,
kzfree(info);
}
-void do_post_process(struct cpt_vf *cptvf, struct cpt_info_buffer *info)
+static void do_post_process(struct cpt_vf *cptvf, struct cpt_info_buffer *info)
{
struct pci_dev *pdev = cptvf->pdev;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c
index 4f43eacd2557..e4841eb2a09f 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_aead.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c
@@ -18,26 +18,6 @@
#define GCM_AES_SALT_SIZE 4
-/**
- * struct nitrox_crypt_params - Params to set nitrox crypto request.
- * @cryptlen: Encryption/Decryption data length
- * @authlen: Assoc data length + Cryptlen
- * @srclen: Input buffer length
- * @dstlen: Output buffer length
- * @iv: IV data
- * @ivsize: IV data length
- * @ctrl_arg: Identifies the request type (ENCRYPT/DECRYPT)
- */
-struct nitrox_crypt_params {
- unsigned int cryptlen;
- unsigned int authlen;
- unsigned int srclen;
- unsigned int dstlen;
- u8 *iv;
- int ivsize;
- u8 ctrl_arg;
-};
-
union gph_p3 {
struct {
#ifdef __BIG_ENDIAN_BITFIELD
@@ -94,36 +74,40 @@ static int nitrox_aead_setauthsize(struct crypto_aead *aead,
return 0;
}
-static int alloc_src_sglist(struct aead_request *areq, char *iv, int ivsize,
+static int alloc_src_sglist(struct nitrox_kcrypt_request *nkreq,
+ struct scatterlist *src, char *iv, int ivsize,
int buflen)
{
- struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
- int nents = sg_nents_for_len(areq->src, buflen) + 1;
+ int nents = sg_nents_for_len(src, buflen);
int ret;
if (nents < 0)
return nents;
+ /* IV entry */
+ nents += 1;
/* Allocate buffer to hold IV and input scatterlist array */
ret = alloc_src_req_buf(nkreq, nents, ivsize);
if (ret)
return ret;
nitrox_creq_copy_iv(nkreq->src, iv, ivsize);
- nitrox_creq_set_src_sg(nkreq, nents, ivsize, areq->src, buflen);
+ nitrox_creq_set_src_sg(nkreq, nents, ivsize, src, buflen);
return 0;
}
-static int alloc_dst_sglist(struct aead_request *areq, int ivsize, int buflen)
+static int alloc_dst_sglist(struct nitrox_kcrypt_request *nkreq,
+ struct scatterlist *dst, int ivsize, int buflen)
{
- struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
- int nents = sg_nents_for_len(areq->dst, buflen) + 3;
+ int nents = sg_nents_for_len(dst, buflen);
int ret;
if (nents < 0)
return nents;
+ /* IV, ORH, COMPLETION entries */
+ nents += 3;
/* Allocate buffer to hold ORH, COMPLETION and output scatterlist
* array
*/
@@ -133,61 +117,54 @@ static int alloc_dst_sglist(struct aead_request *areq, int ivsize, int buflen)
nitrox_creq_set_orh(nkreq);
nitrox_creq_set_comp(nkreq);
- nitrox_creq_set_dst_sg(nkreq, nents, ivsize, areq->dst, buflen);
+ nitrox_creq_set_dst_sg(nkreq, nents, ivsize, dst, buflen);
return 0;
}
-static void free_src_sglist(struct aead_request *areq)
+static void free_src_sglist(struct nitrox_kcrypt_request *nkreq)
{
- struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
-
kfree(nkreq->src);
}
-static void free_dst_sglist(struct aead_request *areq)
+static void free_dst_sglist(struct nitrox_kcrypt_request *nkreq)
{
- struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
-
kfree(nkreq->dst);
}
-static int nitrox_set_creq(struct aead_request *areq,
- struct nitrox_crypt_params *params)
+static int nitrox_set_creq(struct nitrox_aead_rctx *rctx)
{
- struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
- struct se_crypto_request *creq = &nkreq->creq;
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct se_crypto_request *creq = &rctx->nkreq.creq;
union gph_p3 param3;
- struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
int ret;
- creq->flags = areq->base.flags;
- creq->gfp = (areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
+ creq->flags = rctx->flags;
+ creq->gfp = (rctx->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
+ GFP_ATOMIC;
creq->ctrl.value = 0;
creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
- creq->ctrl.s.arg = params->ctrl_arg;
+ creq->ctrl.s.arg = rctx->ctrl_arg;
- creq->gph.param0 = cpu_to_be16(params->cryptlen);
- creq->gph.param1 = cpu_to_be16(params->authlen);
- creq->gph.param2 = cpu_to_be16(params->ivsize + areq->assoclen);
+ creq->gph.param0 = cpu_to_be16(rctx->cryptlen);
+ creq->gph.param1 = cpu_to_be16(rctx->cryptlen + rctx->assoclen);
+ creq->gph.param2 = cpu_to_be16(rctx->ivsize + rctx->assoclen);
param3.iv_offset = 0;
- param3.auth_offset = params->ivsize;
+ param3.auth_offset = rctx->ivsize;
creq->gph.param3 = cpu_to_be16(param3.param);
- creq->ctx_handle = nctx->u.ctx_handle;
+ creq->ctx_handle = rctx->ctx_handle;
creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context);
- ret = alloc_src_sglist(areq, params->iv, params->ivsize,
- params->srclen);
+ ret = alloc_src_sglist(&rctx->nkreq, rctx->src, rctx->iv, rctx->ivsize,
+ rctx->srclen);
if (ret)
return ret;
- ret = alloc_dst_sglist(areq, params->ivsize, params->dstlen);
+ ret = alloc_dst_sglist(&rctx->nkreq, rctx->dst, rctx->ivsize,
+ rctx->dstlen);
if (ret) {
- free_src_sglist(areq);
+ free_src_sglist(&rctx->nkreq);
return ret;
}
@@ -197,9 +174,10 @@ static int nitrox_set_creq(struct aead_request *areq,
static void nitrox_aead_callback(void *arg, int err)
{
struct aead_request *areq = arg;
+ struct nitrox_aead_rctx *rctx = aead_request_ctx(areq);
- free_src_sglist(areq);
- free_dst_sglist(areq);
+ free_src_sglist(&rctx->nkreq);
+ free_dst_sglist(&rctx->nkreq);
if (err) {
pr_err_ratelimited("request failed status 0x%0x\n", err);
err = -EINVAL;
@@ -212,23 +190,25 @@ static int nitrox_aes_gcm_enc(struct aead_request *areq)
{
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
- struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
- struct se_crypto_request *creq = &nkreq->creq;
+ struct nitrox_aead_rctx *rctx = aead_request_ctx(areq);
+ struct se_crypto_request *creq = &rctx->nkreq.creq;
struct flexi_crypto_context *fctx = nctx->u.fctx;
- struct nitrox_crypt_params params;
int ret;
memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
- memset(&params, 0, sizeof(params));
- params.cryptlen = areq->cryptlen;
- params.authlen = areq->assoclen + params.cryptlen;
- params.srclen = params.authlen;
- params.dstlen = params.srclen + aead->authsize;
- params.iv = &areq->iv[GCM_AES_SALT_SIZE];
- params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
- params.ctrl_arg = ENCRYPT;
- ret = nitrox_set_creq(areq, &params);
+ rctx->cryptlen = areq->cryptlen;
+ rctx->assoclen = areq->assoclen;
+ rctx->srclen = areq->assoclen + areq->cryptlen;
+ rctx->dstlen = rctx->srclen + aead->authsize;
+ rctx->iv = &areq->iv[GCM_AES_SALT_SIZE];
+ rctx->ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
+ rctx->flags = areq->base.flags;
+ rctx->ctx_handle = nctx->u.ctx_handle;
+ rctx->src = areq->src;
+ rctx->dst = areq->dst;
+ rctx->ctrl_arg = ENCRYPT;
+ ret = nitrox_set_creq(rctx);
if (ret)
return ret;
@@ -241,23 +221,25 @@ static int nitrox_aes_gcm_dec(struct aead_request *areq)
{
struct crypto_aead *aead = crypto_aead_reqtfm(areq);
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
- struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
- struct se_crypto_request *creq = &nkreq->creq;
+ struct nitrox_aead_rctx *rctx = aead_request_ctx(areq);
+ struct se_crypto_request *creq = &rctx->nkreq.creq;
struct flexi_crypto_context *fctx = nctx->u.fctx;
- struct nitrox_crypt_params params;
int ret;
memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
- memset(&params, 0, sizeof(params));
- params.cryptlen = areq->cryptlen - aead->authsize;
- params.authlen = areq->assoclen + params.cryptlen;
- params.srclen = areq->cryptlen + areq->assoclen;
- params.dstlen = params.srclen - aead->authsize;
- params.iv = &areq->iv[GCM_AES_SALT_SIZE];
- params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
- params.ctrl_arg = DECRYPT;
- ret = nitrox_set_creq(areq, &params);
+ rctx->cryptlen = areq->cryptlen - aead->authsize;
+ rctx->assoclen = areq->assoclen;
+ rctx->srclen = areq->cryptlen + areq->assoclen;
+ rctx->dstlen = rctx->srclen - aead->authsize;
+ rctx->iv = &areq->iv[GCM_AES_SALT_SIZE];
+ rctx->ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
+ rctx->flags = areq->base.flags;
+ rctx->ctx_handle = nctx->u.ctx_handle;
+ rctx->src = areq->src;
+ rctx->dst = areq->dst;
+ rctx->ctrl_arg = DECRYPT;
+ ret = nitrox_set_creq(rctx);
if (ret)
return ret;
@@ -290,7 +272,7 @@ static int nitrox_aead_init(struct crypto_aead *aead)
return 0;
}
-static int nitrox_aes_gcm_init(struct crypto_aead *aead)
+static int nitrox_gcm_common_init(struct crypto_aead *aead)
{
int ret;
struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
@@ -308,8 +290,20 @@ static int nitrox_aes_gcm_init(struct crypto_aead *aead)
flags->w0.auth_input_type = 1;
flags->f = be64_to_cpu(flags->f);
- crypto_aead_set_reqsize(aead, sizeof(struct aead_request) +
- sizeof(struct nitrox_kcrypt_request));
+ return 0;
+}
+
+static int nitrox_aes_gcm_init(struct crypto_aead *aead)
+{
+ int ret;
+
+ ret = nitrox_gcm_common_init(aead);
+ if (ret)
+ return ret;
+
+ crypto_aead_set_reqsize(aead,
+ sizeof(struct aead_request) +
+ sizeof(struct nitrox_aead_rctx));
return 0;
}
@@ -332,6 +326,166 @@ static void nitrox_aead_exit(struct crypto_aead *aead)
nctx->ndev = NULL;
}
+static int nitrox_rfc4106_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ struct flexi_crypto_context *fctx = nctx->u.fctx;
+ int ret;
+
+ if (keylen < GCM_AES_SALT_SIZE)
+ return -EINVAL;
+
+ keylen -= GCM_AES_SALT_SIZE;
+ ret = nitrox_aes_gcm_setkey(aead, key, keylen);
+ if (ret)
+ return ret;
+
+ memcpy(fctx->crypto.iv, key + keylen, GCM_AES_SALT_SIZE);
+ return 0;
+}
+
+static int nitrox_rfc4106_setauthsize(struct crypto_aead *aead,
+ unsigned int authsize)
+{
+ switch (authsize) {
+ case 8:
+ case 12:
+ case 16:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return nitrox_aead_setauthsize(aead, authsize);
+}
+
+static int nitrox_rfc4106_set_aead_rctx_sglist(struct aead_request *areq)
+{
+ struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
+ struct nitrox_aead_rctx *aead_rctx = &rctx->base;
+ unsigned int assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE;
+ struct scatterlist *sg;
+
+ if (areq->assoclen != 16 && areq->assoclen != 20)
+ return -EINVAL;
+
+ scatterwalk_map_and_copy(rctx->assoc, areq->src, 0, assoclen, 0);
+ sg_init_table(rctx->src, 3);
+ sg_set_buf(rctx->src, rctx->assoc, assoclen);
+ sg = scatterwalk_ffwd(rctx->src + 1, areq->src, areq->assoclen);
+ if (sg != rctx->src + 1)
+ sg_chain(rctx->src, 2, sg);
+
+ if (areq->src != areq->dst) {
+ sg_init_table(rctx->dst, 3);
+ sg_set_buf(rctx->dst, rctx->assoc, assoclen);
+ sg = scatterwalk_ffwd(rctx->dst + 1, areq->dst, areq->assoclen);
+ if (sg != rctx->dst + 1)
+ sg_chain(rctx->dst, 2, sg);
+ }
+
+ aead_rctx->src = rctx->src;
+ aead_rctx->dst = (areq->src == areq->dst) ? rctx->src : rctx->dst;
+
+ return 0;
+}
+
+static void nitrox_rfc4106_callback(void *arg, int err)
+{
+ struct aead_request *areq = arg;
+ struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
+ struct nitrox_kcrypt_request *nkreq = &rctx->base.nkreq;
+
+ free_src_sglist(nkreq);
+ free_dst_sglist(nkreq);
+ if (err) {
+ pr_err_ratelimited("request failed status 0x%0x\n", err);
+ err = -EINVAL;
+ }
+
+ areq->base.complete(&areq->base, err);
+}
+
+static int nitrox_rfc4106_enc(struct aead_request *areq)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
+ struct nitrox_aead_rctx *aead_rctx = &rctx->base;
+ struct se_crypto_request *creq = &aead_rctx->nkreq.creq;
+ int ret;
+
+ aead_rctx->cryptlen = areq->cryptlen;
+ aead_rctx->assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE;
+ aead_rctx->srclen = aead_rctx->assoclen + aead_rctx->cryptlen;
+ aead_rctx->dstlen = aead_rctx->srclen + aead->authsize;
+ aead_rctx->iv = areq->iv;
+ aead_rctx->ivsize = GCM_RFC4106_IV_SIZE;
+ aead_rctx->flags = areq->base.flags;
+ aead_rctx->ctx_handle = nctx->u.ctx_handle;
+ aead_rctx->ctrl_arg = ENCRYPT;
+
+ ret = nitrox_rfc4106_set_aead_rctx_sglist(areq);
+ if (ret)
+ return ret;
+
+ ret = nitrox_set_creq(aead_rctx);
+ if (ret)
+ return ret;
+
+ /* send the crypto request */
+ return nitrox_process_se_request(nctx->ndev, creq,
+ nitrox_rfc4106_callback, areq);
+}
+
+static int nitrox_rfc4106_dec(struct aead_request *areq)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq);
+ struct nitrox_aead_rctx *aead_rctx = &rctx->base;
+ struct se_crypto_request *creq = &aead_rctx->nkreq.creq;
+ int ret;
+
+ aead_rctx->cryptlen = areq->cryptlen - aead->authsize;
+ aead_rctx->assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE;
+ aead_rctx->srclen =
+ areq->cryptlen - GCM_RFC4106_IV_SIZE + areq->assoclen;
+ aead_rctx->dstlen = aead_rctx->srclen - aead->authsize;
+ aead_rctx->iv = areq->iv;
+ aead_rctx->ivsize = GCM_RFC4106_IV_SIZE;
+ aead_rctx->flags = areq->base.flags;
+ aead_rctx->ctx_handle = nctx->u.ctx_handle;
+ aead_rctx->ctrl_arg = DECRYPT;
+
+ ret = nitrox_rfc4106_set_aead_rctx_sglist(areq);
+ if (ret)
+ return ret;
+
+ ret = nitrox_set_creq(aead_rctx);
+ if (ret)
+ return ret;
+
+ /* send the crypto request */
+ return nitrox_process_se_request(nctx->ndev, creq,
+ nitrox_rfc4106_callback, areq);
+}
+
+static int nitrox_rfc4106_init(struct crypto_aead *aead)
+{
+ int ret;
+
+ ret = nitrox_gcm_common_init(aead);
+ if (ret)
+ return ret;
+
+ crypto_aead_set_reqsize(aead, sizeof(struct aead_request) +
+ sizeof(struct nitrox_rfc4106_rctx));
+
+ return 0;
+}
+
static struct aead_alg nitrox_aeads[] = { {
.base = {
.cra_name = "gcm(aes)",
@@ -351,6 +505,25 @@ static struct aead_alg nitrox_aeads[] = { {
.exit = nitrox_aead_exit,
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
+}, {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "n5_rfc4106",
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .setkey = nitrox_rfc4106_setkey,
+ .setauthsize = nitrox_rfc4106_setauthsize,
+ .encrypt = nitrox_rfc4106_enc,
+ .decrypt = nitrox_rfc4106_dec,
+ .init = nitrox_rfc4106_init,
+ .exit = nitrox_aead_exit,
+ .ivsize = GCM_RFC4106_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
} };
int nitrox_register_aeads(void)
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c
index c08d9f33a3b1..3f0df60267a9 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_hal.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c
@@ -437,6 +437,45 @@ void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode)
nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, vfcfg.value);
}
+static const char *get_core_option(u8 se_cores, u8 ae_cores)
+{
+ const char *option = "";
+
+ if (ae_cores == AE_MAX_CORES) {
+ switch (se_cores) {
+ case SE_MAX_CORES:
+ option = "60";
+ break;
+ case 40:
+ option = "60s";
+ break;
+ }
+ } else if (ae_cores == (AE_MAX_CORES / 2)) {
+ option = "30";
+ } else {
+ option = "60i";
+ }
+
+ return option;
+}
+
+static const char *get_feature_option(u8 zip_cores, int core_freq)
+{
+ if (zip_cores == 0)
+ return "";
+ else if (zip_cores < ZIP_MAX_CORES)
+ return "-C15";
+
+ if (core_freq >= 850)
+ return "-C45";
+ else if (core_freq >= 750)
+ return "-C35";
+ else if (core_freq >= 550)
+ return "-C25";
+
+ return "";
+}
+
void nitrox_get_hwinfo(struct nitrox_device *ndev)
{
union emu_fuse_map emu_fuse;
@@ -469,24 +508,14 @@ void nitrox_get_hwinfo(struct nitrox_device *ndev)
ndev->hw.zip_cores = ZIP_MAX_CORES - dead_cores;
}
- /* determine the partname CNN55<cores>-<freq><pincount>-<rev>*/
- if (ndev->hw.ae_cores == AE_MAX_CORES) {
- switch (ndev->hw.se_cores) {
- case SE_MAX_CORES:
- i = snprintf(name, sizeof(name), "CNN5560");
- break;
- case 40:
- i = snprintf(name, sizeof(name), "CNN5560s");
- break;
- }
- } else if (ndev->hw.ae_cores == (AE_MAX_CORES / 2)) {
- i = snprintf(name, sizeof(name), "CNN5530");
- } else {
- i = snprintf(name, sizeof(name), "CNN5560i");
- }
-
- snprintf(name + i, sizeof(name) - i, "-%3dBG676-1.%u",
- ndev->hw.freq, ndev->hw.revision_id);
+ /* determine the partname
+ * CNN55<core option>-<freq><pincount>-<feature option>-<rev>
+ */
+ snprintf(name, sizeof(name), "CNN55%s-%3dBG676%s-1.%u",
+ get_core_option(ndev->hw.se_cores, ndev->hw.ae_cores),
+ ndev->hw.freq,
+ get_feature_option(ndev->hw.zip_cores, ndev->hw.freq),
+ ndev->hw.revision_id);
/* copy partname */
strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname));
diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
index 76c0f0be7233..efdbd0fc3e3b 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_req.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
@@ -212,6 +212,50 @@ struct nitrox_kcrypt_request {
};
/**
+ * struct nitrox_aead_rctx - AEAD request context
+ * @nkreq: Base request context
+ * @cryptlen: Encryption/Decryption data length
+ * @assoclen: AAD length
+ * @srclen: Input buffer length
+ * @dstlen: Output buffer length
+ * @iv: IV data
+ * @ivsize: IV data length
+ * @flags: AEAD req flags
+ * @ctx_handle: Device context handle
+ * @src: Source sglist
+ * @dst: Destination sglist
+ * @ctrl_arg: Identifies the request type (ENCRYPT/DECRYPT)
+ */
+struct nitrox_aead_rctx {
+ struct nitrox_kcrypt_request nkreq;
+ unsigned int cryptlen;
+ unsigned int assoclen;
+ unsigned int srclen;
+ unsigned int dstlen;
+ u8 *iv;
+ int ivsize;
+ u32 flags;
+ u64 ctx_handle;
+ struct scatterlist *src;
+ struct scatterlist *dst;
+ u8 ctrl_arg;
+};
+
+/**
+ * struct nitrox_rfc4106_rctx - rfc4106 cipher request context
+ * @base: AEAD request context
+ * @src: Source sglist
+ * @dst: Destination sglist
+ * @assoc: AAD
+ */
+struct nitrox_rfc4106_rctx {
+ struct nitrox_aead_rctx base;
+ struct scatterlist src[3];
+ struct scatterlist dst[3];
+ u8 assoc[20];
+};
+
+/**
* struct pkt_instr_hdr - Packet Instruction Header
* @g: Gather used
* When [G] is set and [GSZ] != 0, the instruction is
@@ -512,7 +556,7 @@ static inline struct scatterlist *create_multi_sg(struct scatterlist *to_sg,
struct scatterlist *sg = to_sg;
unsigned int sglen;
- for (; buflen; buflen -= sglen) {
+ for (; buflen && from_sg; buflen -= sglen) {
sglen = from_sg->length;
if (sglen > buflen)
sglen = buflen;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index 4c97478d44bd..5826c2c98a50 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -303,8 +303,6 @@ static void post_se_instr(struct nitrox_softreq *sr,
/* Ring doorbell with count 1 */
writeq(1, cmdq->dbell_csr_addr);
- /* orders the doorbell rings */
- mmiowb();
cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
@@ -599,8 +597,6 @@ void pkt_slc_resp_tasklet(unsigned long data)
* MSI-X interrupt generates if Completion count > Threshold
*/
writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr);
- /* order the writes */
- mmiowb();
if (atomic_read(&cmdq->backlog_count))
schedule_work(&cmdq->backlog_qflush);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
index d4935d6cefdd..7e4a5e69085e 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -257,12 +257,8 @@ static int nitrox_aes_decrypt(struct skcipher_request *skreq)
static int nitrox_3des_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- if (keylen != DES3_EDE_KEY_SIZE) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
- return nitrox_skcipher_setkey(cipher, 0, key, keylen);
+ return unlikely(des3_verify_key(cipher, key)) ?:
+ nitrox_skcipher_setkey(cipher, 0, key, keylen);
}
static int nitrox_3des_encrypt(struct skcipher_request *skreq)
diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
index b92b6e7e100f..4985bc812b0e 100644
--- a/drivers/crypto/cavium/zip/zip_crypto.c
+++ b/drivers/crypto/cavium/zip/zip_crypto.c
@@ -69,7 +69,7 @@ static void zip_static_init_zip_ops(struct zip_operation *zip_ops,
zip_ops->csum = 1; /* Adler checksum desired */
}
-int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag)
+static int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag)
{
struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
struct zip_operation *decomp_ctx = &zip_ctx->zip_decomp;
@@ -107,7 +107,7 @@ err_comp_input:
return -ENOMEM;
}
-void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
+static void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
{
struct zip_operation *comp_ctx = &zip_ctx->zip_comp;
struct zip_operation *dec_ctx = &zip_ctx->zip_decomp;
@@ -119,7 +119,7 @@ void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx)
zip_data_buf_free(dec_ctx->output, MAX_OUTPUT_BUFFER_SIZE);
}
-int zip_compress(const u8 *src, unsigned int slen,
+static int zip_compress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen,
struct zip_kernel_ctx *zip_ctx)
{
@@ -155,7 +155,7 @@ int zip_compress(const u8 *src, unsigned int slen,
return ret;
}
-int zip_decompress(const u8 *src, unsigned int slen,
+static int zip_decompress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen,
struct zip_kernel_ctx *zip_ctx)
{
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index c2ff551d215b..91482ffcac59 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -43,24 +43,11 @@ static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
struct ccp_crypto_ablkcipher_alg *alg =
ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));
u32 *flags = &tfm->base.crt_flags;
+ int err;
-
- /* From des_generic.c:
- *
- * RFC2451:
- * If the first two or last two independent 64-bit keys are
- * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
- * same as DES. Implementers MUST reject keys that exhibit this
- * property.
- */
- const u32 *K = (const u32 *)key;
-
- if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
- !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
- (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
- *flags |= CRYPTO_TFM_RES_WEAK_KEY;
- return -EINVAL;
- }
+ err = __des3_verify_key(flags, key);
+ if (unlikely(err))
+ return err;
/* It's not clear that there is any support for a keysize of 112.
* If needed, the caller should make K1 == K3
diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
index 05850dfd7940..a2570c0c8cdc 100644
--- a/drivers/crypto/ccp/ccp-crypto-rsa.c
+++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
@@ -37,10 +37,9 @@ static inline int ccp_copy_and_save_keypart(u8 **kpbuf, unsigned int *kplen,
if (buf[nskip])
break;
*kplen = sz - nskip;
- *kpbuf = kzalloc(*kplen, GFP_KERNEL);
+ *kpbuf = kmemdup(buf + nskip, *kplen, GFP_KERNEL);
if (!*kpbuf)
return -ENOMEM;
- memcpy(*kpbuf, buf + nskip, *kplen);
return 0;
}
@@ -214,8 +213,6 @@ static void ccp_rsa_exit_tfm(struct crypto_akcipher *tfm)
static struct akcipher_alg ccp_rsa_defaults = {
.encrypt = ccp_rsa_encrypt,
.decrypt = ccp_rsa_decrypt,
- .sign = ccp_rsa_decrypt,
- .verify = ccp_rsa_encrypt,
.set_pub_key = ccp_rsa_setpubkey,
.set_priv_key = ccp_rsa_setprivkey,
.max_size = ccp_rsa_maxsize,
@@ -248,7 +245,8 @@ static struct ccp_rsa_def rsa_algs[] = {
}
};
-int ccp_register_rsa_alg(struct list_head *head, const struct ccp_rsa_def *def)
+static int ccp_register_rsa_alg(struct list_head *head,
+ const struct ccp_rsa_def *def)
{
struct ccp_crypto_akcipher_alg *ccp_alg;
struct akcipher_alg *alg;
diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 10a61cd54fce..3e10573f589e 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -293,8 +293,6 @@ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
if (key_len > block_size) {
/* Must hash the input key */
sdesc->tfm = shash;
- sdesc->flags = crypto_ahash_get_flags(tfm) &
- CRYPTO_TFM_REQ_MAY_SLEEP;
ret = crypto_shash_digest(sdesc, key, key_len,
ctx->u.sha.key);
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index fadf859a14b8..656838433f2f 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -583,6 +583,69 @@ e_free:
return ret;
}
+static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
+{
+ struct sev_user_data_get_id2 input;
+ struct sev_data_get_id *data;
+ void *id_blob = NULL;
+ int ret;
+
+ /* SEV GET_ID is available from SEV API v0.16 and up */
+ if (!SEV_VERSION_GREATER_OR_EQUAL(0, 16))
+ return -ENOTSUPP;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ /* Check if we have write access to the userspace buffer */
+ if (input.address &&
+ input.length &&
+ !access_ok(input.address, input.length))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (input.address && input.length) {
+ id_blob = kmalloc(input.length, GFP_KERNEL);
+ if (!id_blob) {
+ kfree(data);
+ return -ENOMEM;
+ }
+
+ data->address = __psp_pa(id_blob);
+ data->len = input.length;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
+
+ /*
+ * Firmware will return the length of the ID value (either the minimum
+ * required length or the actual length written), return it to the user.
+ */
+ input.length = data->len;
+
+ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ if (id_blob) {
+ if (copy_to_user((void __user *)input.address,
+ id_blob, data->len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+ }
+
+e_free:
+ kfree(id_blob);
+ kfree(data);
+
+ return ret;
+}
+
static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
{
struct sev_data_get_id *data;
@@ -761,8 +824,12 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
ret = sev_ioctl_do_pdh_export(&input);
break;
case SEV_GET_ID:
+ pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n");
ret = sev_ioctl_do_get_id(&input);
break;
+ case SEV_GET_ID2:
+ ret = sev_ioctl_do_get_id2(&input);
+ break;
default:
ret = -EINVAL;
goto out;
@@ -997,7 +1064,7 @@ void psp_pci_init(void)
rc = sev_platform_init(&error);
if (rc) {
dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error);
- goto err;
+ return;
}
dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major,
diff --git a/drivers/crypto/ccree/Makefile b/drivers/crypto/ccree/Makefile
index bdc27970f95f..145e50bdbf16 100644
--- a/drivers/crypto/ccree/Makefile
+++ b/drivers/crypto/ccree/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2012-2019 ARM Limited (or its affiliates).
obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index a3527c00b29a..7aa4cbe19a86 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -23,12 +23,8 @@
#define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
#define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
-#define AES_CCM_RFC4309_NONCE_SIZE 3
#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
-/* Value of each ICV_CMP byte (of 8) in case of success */
-#define ICV_VERIF_OK 0x01
-
struct cc_aead_handle {
cc_sram_addr_t sram_workspace_addr;
struct list_head aead_list;
@@ -220,6 +216,10 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err)
struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ /* BACKLOG notification */
+ if (err == -EINPROGRESS)
+ goto done;
+
cc_unmap_aead_request(dev, areq);
/* Restore ordinary iv pointer */
@@ -424,7 +424,7 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
/* This function prepers the user key so it can pass to the hmac processing
* (copy to intenral buffer or hash in case of key longer than block
*/
-static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
+static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
unsigned int keylen)
{
dma_addr_t key_dma_addr = 0;
@@ -437,6 +437,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
unsigned int hashmode;
unsigned int idx = 0;
int rc = 0;
+ u8 *key = NULL;
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
dma_addr_t padded_authkey_dma_addr =
ctx->auth_state.hmac.padded_authkey_dma_addr;
@@ -455,11 +456,17 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
}
if (keylen != 0) {
+
+ key = kmemdup(authkey, keylen, GFP_KERNEL);
+ if (!key)
+ return -ENOMEM;
+
key_dma_addr = dma_map_single(dev, (void *)key, keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
+ kzfree(key);
return -ENOMEM;
}
if (keylen > blocksize) {
@@ -542,6 +549,8 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
if (key_dma_addr)
dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
+ kzfree(key);
+
return rc;
}
@@ -650,6 +659,39 @@ setkey_error:
return rc;
}
+static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ u32 flags;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ goto badkey;
+
+ err = -EINVAL;
+ if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+ goto badkey;
+
+ flags = crypto_aead_get_flags(aead);
+ err = __des3_verify_key(&flags, keys.enckey);
+ if (unlikely(err)) {
+ crypto_aead_set_flags(aead, flags);
+ goto out;
+ }
+
+ err = cc_aead_setkey(aead, key, keylen);
+
+out:
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+}
+
static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
@@ -731,7 +773,7 @@ static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
dev_dbg(dev, "ASSOC buffer type DLLI\n");
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
- areq->assoclen, NS_BIT);
+ areq_ctx->assoclen, NS_BIT);
set_flow_mode(&desc[idx], flow_mode);
if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
areq_ctx->cryptlen > 0)
@@ -1080,9 +1122,11 @@ static void cc_proc_header_desc(struct aead_request *req,
struct cc_hw_desc desc[],
unsigned int *seq_size)
{
+ struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int idx = *seq_size;
+
/* Hash associated data */
- if (req->assoclen > 0)
+ if (areq_ctx->assoclen > 0)
cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
/* Hash IV */
@@ -1159,9 +1203,9 @@ static void cc_mlli_to_sram(struct aead_request *req,
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
+ if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
- !req_ctx->is_single_pass) {
+ !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
(unsigned int)ctx->drvdata->mlli_sram_addr,
req_ctx->mlli_params.mlli_len);
@@ -1310,7 +1354,7 @@ static int validate_data_size(struct cc_aead_ctx *ctx,
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- unsigned int assoclen = req->assoclen;
+ unsigned int assoclen = areq_ctx->assoclen;
unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
(req->cryptlen - ctx->authsize) : req->cryptlen;
@@ -1469,7 +1513,7 @@ static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
idx++;
/* process assoc data */
- if (req->assoclen > 0) {
+ if (req_ctx->assoclen > 0) {
cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
} else {
hw_desc_init(&desc[idx]);
@@ -1561,7 +1605,7 @@ static int config_ccm_adata(struct aead_request *req)
* NIST Special Publication 800-38C
*/
*b0 |= (8 * ((m - 2) / 2));
- if (req->assoclen > 0)
+ if (req_ctx->assoclen > 0)
*b0 |= 64; /* Enable bit 6 if Adata exists. */
rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
@@ -1572,7 +1616,7 @@ static int config_ccm_adata(struct aead_request *req)
/* END of "taken from crypto/ccm.c" */
/* l(a) - size of associated data. */
- req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
+ req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
req->iv[15] = 1;
@@ -1604,7 +1648,7 @@ static void cc_proc_rfc4309_ccm(struct aead_request *req)
memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
CCM_BLOCK_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
- req->assoclen -= CCM_BLOCK_IV_SIZE;
+ areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
}
static void cc_set_ghash_desc(struct aead_request *req,
@@ -1812,7 +1856,7 @@ static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
// for gcm and rfc4106.
cc_set_ghash_desc(req, desc, seq_size);
/* process(ghash) assoc data */
- if (req->assoclen > 0)
+ if (req_ctx->assoclen > 0)
cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
cc_set_gctr_desc(req, desc, seq_size);
/* process(gctr+ghash) */
@@ -1836,8 +1880,8 @@ static int config_gcm_context(struct aead_request *req)
(req->cryptlen - ctx->authsize);
__be32 counter = cpu_to_be32(2);
- dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
- __func__, cryptlen, req->assoclen, ctx->authsize);
+ dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
+ __func__, cryptlen, req_ctx->assoclen, ctx->authsize);
memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
@@ -1853,7 +1897,7 @@ static int config_gcm_context(struct aead_request *req)
if (!req_ctx->plaintext_authenticate_only) {
__be64 temp64;
- temp64 = cpu_to_be64(req->assoclen * 8);
+ temp64 = cpu_to_be64(req_ctx->assoclen * 8);
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = cpu_to_be64(cryptlen * 8);
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1863,8 +1907,8 @@ static int config_gcm_context(struct aead_request *req)
*/
__be64 temp64;
- temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
- cryptlen) * 8);
+ temp64 = cpu_to_be64((req_ctx->assoclen +
+ GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
temp64 = 0;
memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
@@ -1884,7 +1928,7 @@ static void cc_proc_rfc4_gcm(struct aead_request *req)
memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
GCM_BLOCK_RFC4_IV_SIZE);
req->iv = areq_ctx->ctr_iv;
- req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
+ areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
}
static int cc_proc_aead(struct aead_request *req,
@@ -1909,7 +1953,7 @@ static int cc_proc_aead(struct aead_request *req,
/* Check data length according to mode */
if (validate_data_size(ctx, direct, req)) {
dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
- req->cryptlen, req->assoclen);
+ req->cryptlen, areq_ctx->assoclen);
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
return -EINVAL;
}
@@ -2058,8 +2102,11 @@ static int cc_aead_encrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = false;
@@ -2087,8 +2134,11 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = true;
@@ -2106,8 +2156,11 @@ static int cc_aead_decrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = false;
@@ -2133,8 +2186,11 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->is_gcm4543 = true;
@@ -2250,8 +2306,11 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->plaintext_authenticate_only = false;
@@ -2273,11 +2332,14 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
//plaintext is not encryped with rfc4543
areq_ctx->plaintext_authenticate_only = true;
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
cc_proc_rfc4_gcm(req);
@@ -2305,8 +2367,11 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
goto out;
}
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
areq_ctx->plaintext_authenticate_only = false;
@@ -2328,11 +2393,14 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc;
+ memset(areq_ctx, 0, sizeof(*areq_ctx));
+
//plaintext is not decryped with rfc4543
areq_ctx->plaintext_authenticate_only = true;
/* No generated IV required */
areq_ctx->backup_iv = req->iv;
+ areq_ctx->assoclen = req->assoclen;
areq_ctx->backup_giv = NULL;
cc_proc_rfc4_gcm(req);
@@ -2372,7 +2440,7 @@ static struct cc_alg_template aead_algs[] = {
.driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
.template_aead = {
- .setkey = cc_aead_setkey,
+ .setkey = cc_des3_aead_setkey,
.setauthsize = cc_aead_setauthsize,
.encrypt = cc_aead_encrypt,
.decrypt = cc_aead_decrypt,
@@ -2412,7 +2480,7 @@ static struct cc_alg_template aead_algs[] = {
.driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
.blocksize = DES3_EDE_BLOCK_SIZE,
.template_aead = {
- .setkey = cc_aead_setkey,
+ .setkey = cc_des3_aead_setkey,
.setauthsize = cc_aead_setauthsize,
.encrypt = cc_aead_encrypt,
.decrypt = cc_aead_decrypt,
diff --git a/drivers/crypto/ccree/cc_aead.h b/drivers/crypto/ccree/cc_aead.h
index 5edf3b351fa4..e51724b96c56 100644
--- a/drivers/crypto/ccree/cc_aead.h
+++ b/drivers/crypto/ccree/cc_aead.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_aead.h
* ARM CryptoCell AEAD Crypto API
@@ -67,6 +67,7 @@ struct aead_req_ctx {
u8 backup_mac[MAX_MAC_SIZE];
u8 *backup_iv; /*store iv for generated IV flow*/
u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
+ u32 assoclen; /* internal assoclen */
dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
/* buffer for internal ccm configurations */
dma_addr_t ccm_iv0_dma_addr;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 0ee1c52da0a4..c81ad33f9115 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <crypto/internal/aead.h>
#include <crypto/authenc.h>
@@ -65,7 +65,7 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- u32 skip = req->assoclen + req->cryptlen;
+ u32 skip = areq_ctx->assoclen + req->cryptlen;
if (areq_ctx->is_gcm4543)
skip += crypto_aead_ivsize(tfm);
@@ -83,24 +83,17 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
*/
static unsigned int cc_get_sgl_nents(struct device *dev,
struct scatterlist *sg_list,
- unsigned int nbytes, u32 *lbytes,
- bool *is_chained)
+ unsigned int nbytes, u32 *lbytes)
{
unsigned int nents = 0;
while (nbytes && sg_list) {
- if (sg_list->length) {
- nents++;
- /* get the number of bytes in the last entry */
- *lbytes = nbytes;
- nbytes -= (sg_list->length > nbytes) ?
- nbytes : sg_list->length;
- sg_list = sg_next(sg_list);
- } else {
- sg_list = (struct scatterlist *)sg_page(sg_list);
- if (is_chained)
- *is_chained = true;
- }
+ nents++;
+ /* get the number of bytes in the last entry */
+ *lbytes = nbytes;
+ nbytes -= (sg_list->length > nbytes) ?
+ nbytes : sg_list->length;
+ sg_list = sg_next(sg_list);
}
dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
return nents;
@@ -140,9 +133,9 @@ void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
{
- u32 nents, lbytes;
+ u32 nents;
- nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
+ nents = sg_nents_for_len(sg, end);
sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
(direct == CC_SG_TO_BUF));
}
@@ -314,40 +307,10 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
sgl_data->num_of_buffers++;
}
-static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
- enum dma_data_direction direction)
-{
- u32 i, j;
- struct scatterlist *l_sg = sg;
-
- for (i = 0; i < nents; i++) {
- if (!l_sg)
- break;
- if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
- dev_err(dev, "dma_map_page() sg buffer failed\n");
- goto err;
- }
- l_sg = sg_next(l_sg);
- }
- return nents;
-
-err:
- /* Restore mapped parts */
- for (j = 0; j < i; j++) {
- if (!sg)
- break;
- dma_unmap_sg(dev, sg, 1, direction);
- sg = sg_next(sg);
- }
- return 0;
-}
-
static int cc_map_sg(struct device *dev, struct scatterlist *sg,
unsigned int nbytes, int direction, u32 *nents,
u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
{
- bool is_chained = false;
-
if (sg_is_last(sg)) {
/* One entry only case -set to DLLI */
if (dma_map_sg(dev, sg, 1, direction) != 1) {
@@ -361,35 +324,21 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
*nents = 1;
*mapped_nents = 1;
} else { /*sg_is_last*/
- *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
- &is_chained);
+ *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
if (*nents > max_sg_nents) {
*nents = 0;
dev_err(dev, "Too many fragments. current %d max %d\n",
*nents, max_sg_nents);
return -ENOMEM;
}
- if (!is_chained) {
- /* In case of mmu the number of mapped nents might
- * be changed from the original sgl nents
- */
- *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
- if (*mapped_nents == 0) {
- *nents = 0;
- dev_err(dev, "dma_map_sg() sg buffer failed\n");
- return -ENOMEM;
- }
- } else {
- /*In this case the driver maps entry by entry so it
- * must have the same nents before and after map
- */
- *mapped_nents = cc_dma_map_sg(dev, sg, *nents,
- direction);
- if (*mapped_nents != *nents) {
- *nents = *mapped_nents;
- dev_err(dev, "dma_map_sg() sg buffer failed\n");
- return -ENOMEM;
- }
+ /* In case of mmu the number of mapped nents might
+ * be changed from the original sgl nents
+ */
+ *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
+ if (*mapped_nents == 0) {
+ *nents = 0;
+ dev_err(dev, "dma_map_sg() sg buffer failed\n");
+ return -ENOMEM;
}
}
@@ -457,7 +406,7 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx,
dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
&req_ctx->gen_ctx.iv_dma_addr, ivsize);
dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
- ivsize, DMA_TO_DEVICE);
+ ivsize, DMA_BIDIRECTIONAL);
}
/* Release pool */
if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
@@ -499,7 +448,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
dump_byte_array("iv", (u8 *)info, ivsize);
req_ctx->gen_ctx.iv_dma_addr =
dma_map_single(dev, (void *)info,
- ivsize, DMA_TO_DEVICE);
+ ivsize, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
ivsize, info);
@@ -568,11 +517,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
- u32 dummy;
- bool chained;
- u32 size_to_unmap = 0;
if (areq_ctx->mac_buf_dma_addr) {
dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
@@ -612,6 +557,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
if (areq_ctx->gen_ctx.iv_dma_addr) {
dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
hw_iv_size, DMA_BIDIRECTIONAL);
+ kzfree(areq_ctx->gen_ctx.iv);
}
/* Release pool */
@@ -628,23 +574,13 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
- req->assoclen, req->cryptlen);
- size_to_unmap = req->assoclen + req->cryptlen;
- if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
- size_to_unmap += areq_ctx->req_authsize;
- if (areq_ctx->is_gcm4543)
- size_to_unmap += crypto_aead_ivsize(tfm);
+ areq_ctx->assoclen, req->cryptlen);
- dma_unmap_sg(dev, req->src,
- cc_get_sgl_nents(dev, req->src, size_to_unmap,
- &dummy, &chained),
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
if (req->src != req->dst) {
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst));
- dma_unmap_sg(dev, req->dst,
- cc_get_sgl_nents(dev, req->dst, size_to_unmap,
- &dummy, &chained),
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
DMA_BIDIRECTIONAL);
}
if (drvdata->coherent &&
@@ -658,55 +594,10 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
}
}
-static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl,
- unsigned int sgl_nents, unsigned int authsize,
- u32 last_entry_data_size,
- bool *is_icv_fragmented)
+static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize,
+ u32 last_entry_data_size)
{
- unsigned int icv_max_size = 0;
- unsigned int icv_required_size = authsize > last_entry_data_size ?
- (authsize - last_entry_data_size) :
- authsize;
- unsigned int nents;
- unsigned int i;
-
- if (sgl_nents < MAX_ICV_NENTS_SUPPORTED) {
- *is_icv_fragmented = false;
- return 0;
- }
-
- for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
- if (!sgl)
- break;
- sgl = sg_next(sgl);
- }
-
- if (sgl)
- icv_max_size = sgl->length;
-
- if (last_entry_data_size > authsize) {
- /* ICV attached to data in last entry (not fragmented!) */
- nents = 0;
- *is_icv_fragmented = false;
- } else if (last_entry_data_size == authsize) {
- /* ICV placed in whole last entry (not fragmented!) */
- nents = 1;
- *is_icv_fragmented = false;
- } else if (icv_max_size > icv_required_size) {
- nents = 1;
- *is_icv_fragmented = true;
- } else if (icv_max_size == icv_required_size) {
- nents = 2;
- *is_icv_fragmented = true;
- } else {
- dev_err(dev, "Unsupported num. of ICV fragments (> %d)\n",
- MAX_ICV_NENTS_SUPPORTED);
- nents = -1; /*unsupported*/
- }
- dev_dbg(dev, "is_frag=%s icv_nents=%u\n",
- (*is_icv_fragmented ? "true" : "false"), nents);
-
- return nents;
+ return ((sgl_nents > 1) && (last_entry_data_size < authsize));
}
static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
@@ -717,19 +608,27 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
struct device *dev = drvdata_to_dev(drvdata);
+ gfp_t flags = cc_gfp_flags(&req->base);
int rc = 0;
if (!req->iv) {
areq_ctx->gen_ctx.iv_dma_addr = 0;
+ areq_ctx->gen_ctx.iv = NULL;
goto chain_iv_exit;
}
- areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
- hw_iv_size,
- DMA_BIDIRECTIONAL);
+ areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
+ if (!areq_ctx->gen_ctx.iv)
+ return -ENOMEM;
+
+ areq_ctx->gen_ctx.iv_dma_addr =
+ dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
+ DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
hw_iv_size, req->iv);
+ kzfree(areq_ctx->gen_ctx.iv);
+ areq_ctx->gen_ctx.iv = NULL;
rc = -ENOMEM;
goto chain_iv_exit;
}
@@ -760,11 +659,9 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
int rc = 0;
- u32 mapped_nents = 0;
- struct scatterlist *current_sg = req->src;
+ int mapped_nents = 0;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- unsigned int sg_index = 0;
- u32 size_of_assoc = req->assoclen;
+ unsigned int size_of_assoc = areq_ctx->assoclen;
struct device *dev = drvdata_to_dev(drvdata);
if (areq_ctx->is_gcm4543)
@@ -775,7 +672,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
goto chain_assoc_exit;
}
- if (req->assoclen == 0) {
+ if (areq_ctx->assoclen == 0) {
areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL;
areq_ctx->assoc.nents = 0;
areq_ctx->assoc.mlli_nents = 0;
@@ -785,26 +682,10 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
goto chain_assoc_exit;
}
- //iterate over the sgl to see how many entries are for associated data
- //it is assumed that if we reach here , the sgl is already mapped
- sg_index = current_sg->length;
- //the first entry in the scatter list contains all the associated data
- if (sg_index > size_of_assoc) {
- mapped_nents++;
- } else {
- while (sg_index <= size_of_assoc) {
- current_sg = sg_next(current_sg);
- /* if have reached the end of the sgl, then this is
- * unexpected
- */
- if (!current_sg) {
- dev_err(dev, "reached end of sg list. unexpected\n");
- return -EINVAL;
- }
- sg_index += current_sg->length;
- mapped_nents++;
- }
- }
+ mapped_nents = sg_nents_for_len(req->src, size_of_assoc);
+ if (mapped_nents < 0)
+ return mapped_nents;
+
if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES);
@@ -835,7 +716,7 @@ static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
cc_dma_buf_type(areq_ctx->assoc_buff_type),
areq_ctx->assoc.nents);
cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src,
- req->assoclen, 0, is_last,
+ areq_ctx->assoclen, 0, is_last,
&areq_ctx->assoc.mlli_nents);
areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI;
}
@@ -850,39 +731,32 @@ static void cc_prepare_aead_data_dlli(struct aead_request *req,
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
unsigned int authsize = areq_ctx->req_authsize;
+ struct scatterlist *sg;
+ ssize_t offset;
areq_ctx->is_icv_fragmented = false;
- if (req->src == req->dst) {
- /*INPLACE*/
- areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
- (*src_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
- (*src_last_bytes - authsize);
- } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
- /*NON-INPLACE and DECRYPT*/
- areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->src_sgl) +
- (*src_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(areq_ctx->src_sgl) +
- (*src_last_bytes - authsize);
+
+ if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ sg = areq_ctx->src_sgl;
+ offset = *src_last_bytes - authsize;
} else {
- /*NON-INPLACE and ENCRYPT*/
- areq_ctx->icv_dma_addr = sg_dma_address(areq_ctx->dst_sgl) +
- (*dst_last_bytes - authsize);
- areq_ctx->icv_virt_addr = sg_virt(areq_ctx->dst_sgl) +
- (*dst_last_bytes - authsize);
+ sg = areq_ctx->dst_sgl;
+ offset = *dst_last_bytes - authsize;
}
+
+ areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset;
+ areq_ctx->icv_virt_addr = sg_virt(sg) + offset;
}
-static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
- struct aead_request *req,
- struct buffer_array *sg_data,
- u32 *src_last_bytes, u32 *dst_last_bytes,
- bool is_last_table)
+static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
+ struct aead_request *req,
+ struct buffer_array *sg_data,
+ u32 *src_last_bytes, u32 *dst_last_bytes,
+ bool is_last_table)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type;
unsigned int authsize = areq_ctx->req_authsize;
- int rc = 0, icv_nents;
struct device *dev = drvdata_to_dev(drvdata);
struct scatterlist *sg;
@@ -893,14 +767,9 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
areq_ctx->src_offset, is_last_table,
&areq_ctx->src.mlli_nents);
- icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
- areq_ctx->src.nents,
- authsize, *src_last_bytes,
- &areq_ctx->is_icv_fragmented);
- if (icv_nents < 0) {
- rc = -ENOTSUPP;
- goto prepare_data_mlli_exit;
- }
+ areq_ctx->is_icv_fragmented =
+ cc_is_icv_frag(areq_ctx->src.nents, authsize,
+ *src_last_bytes);
if (areq_ctx->is_icv_fragmented) {
/* Backup happens only when ICV is fragmented, ICV
@@ -942,16 +811,11 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
areq_ctx->dst_offset, is_last_table,
&areq_ctx->dst.mlli_nents);
- icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->src_sgl,
- areq_ctx->src.nents,
- authsize, *src_last_bytes,
- &areq_ctx->is_icv_fragmented);
- if (icv_nents < 0) {
- rc = -ENOTSUPP;
- goto prepare_data_mlli_exit;
- }
-
+ areq_ctx->is_icv_fragmented =
+ cc_is_icv_frag(areq_ctx->src.nents, authsize,
+ *src_last_bytes);
/* Backup happens only when ICV is fragmented, ICV
+
* verification is made by CPU compare in order to simplify
* MAC verification upon request completion
*/
@@ -979,14 +843,9 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
areq_ctx->src_offset, is_last_table,
&areq_ctx->src.mlli_nents);
- icv_nents = cc_get_aead_icv_nents(dev, areq_ctx->dst_sgl,
- areq_ctx->dst.nents,
- authsize, *dst_last_bytes,
- &areq_ctx->is_icv_fragmented);
- if (icv_nents < 0) {
- rc = -ENOTSUPP;
- goto prepare_data_mlli_exit;
- }
+ areq_ctx->is_icv_fragmented =
+ cc_is_icv_frag(areq_ctx->dst.nents, authsize,
+ *dst_last_bytes);
if (!areq_ctx->is_icv_fragmented) {
sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1];
@@ -1000,9 +859,6 @@ static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
areq_ctx->icv_virt_addr = areq_ctx->mac_buf;
}
}
-
-prepare_data_mlli_exit:
- return rc;
}
static int cc_aead_chain_data(struct cc_drvdata *drvdata,
@@ -1019,12 +875,12 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
u32 src_mapped_nents = 0, dst_mapped_nents = 0;
u32 offset = 0;
/* non-inplace mode */
- unsigned int size_for_map = req->assoclen + req->cryptlen;
+ unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
u32 sg_index = 0;
- bool chained = false;
bool is_gcm4543 = areq_ctx->is_gcm4543;
- u32 size_to_skip = req->assoclen;
+ u32 size_to_skip = areq_ctx->assoclen;
+ struct scatterlist *sgl;
if (is_gcm4543)
size_to_skip += crypto_aead_ivsize(tfm);
@@ -1043,19 +899,17 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
authsize : 0;
src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
- &src_last_bytes, &chained);
+ &src_last_bytes);
sg_index = areq_ctx->src_sgl->length;
//check where the data starts
while (sg_index <= size_to_skip) {
+ src_mapped_nents--;
offset -= areq_ctx->src_sgl->length;
- areq_ctx->src_sgl = sg_next(areq_ctx->src_sgl);
- //if have reached the end of the sgl, then this is unexpected
- if (!areq_ctx->src_sgl) {
- dev_err(dev, "reached end of sg list. unexpected\n");
- return -EINVAL;
- }
+ sgl = sg_next(areq_ctx->src_sgl);
+ if (!sgl)
+ break;
+ areq_ctx->src_sgl = sgl;
sg_index += areq_ctx->src_sgl->length;
- src_mapped_nents--;
}
if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
@@ -1068,7 +922,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
areq_ctx->src_offset = offset;
if (req->src != req->dst) {
- size_for_map = req->assoclen + req->cryptlen;
+ size_for_map = areq_ctx->assoclen + req->cryptlen;
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
authsize : 0;
if (is_gcm4543)
@@ -1083,21 +937,19 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
}
dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
- &dst_last_bytes, &chained);
+ &dst_last_bytes);
sg_index = areq_ctx->dst_sgl->length;
offset = size_to_skip;
//check where the data starts
while (sg_index <= size_to_skip) {
+ dst_mapped_nents--;
offset -= areq_ctx->dst_sgl->length;
- areq_ctx->dst_sgl = sg_next(areq_ctx->dst_sgl);
- //if have reached the end of the sgl, then this is unexpected
- if (!areq_ctx->dst_sgl) {
- dev_err(dev, "reached end of sg list. unexpected\n");
- return -EINVAL;
- }
+ sgl = sg_next(areq_ctx->dst_sgl);
+ if (!sgl)
+ break;
+ areq_ctx->dst_sgl = sgl;
sg_index += areq_ctx->dst_sgl->length;
- dst_mapped_nents--;
}
if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) {
dev_err(dev, "Too many fragments. current %d max %d\n",
@@ -1110,9 +962,9 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
dst_mapped_nents > 1 ||
do_chain) {
areq_ctx->data_buff_type = CC_DMA_BUF_MLLI;
- rc = cc_prepare_aead_data_mlli(drvdata, req, sg_data,
- &src_last_bytes,
- &dst_last_bytes, is_last_table);
+ cc_prepare_aead_data_mlli(drvdata, req, sg_data,
+ &src_last_bytes, &dst_last_bytes,
+ is_last_table);
} else {
areq_ctx->data_buff_type = CC_DMA_BUF_DLLI;
cc_prepare_aead_data_dlli(req, &src_last_bytes,
@@ -1234,7 +1086,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
areq_ctx->ccm_iv0_dma_addr = dma_addr;
rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config,
- &sg_data, req->assoclen);
+ &sg_data, areq_ctx->assoclen);
if (rc)
goto aead_map_failure;
}
@@ -1285,7 +1137,7 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
areq_ctx->gcm_iv_inc2_dma_addr = dma_addr;
}
- size_to_map = req->cryptlen + req->assoclen;
+ size_to_map = req->cryptlen + areq_ctx->assoclen;
if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
size_to_map += authsize;
@@ -1483,8 +1335,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
if (total_in_len < block_size) {
dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
- areq_ctx->in_nents =
- cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
+ areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
sg_copy_to_buffer(src, areq_ctx->in_nents,
&curr_buff[*curr_buff_cnt], nbytes);
*curr_buff_cnt += nbytes;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h
index 3ec4b4db5247..a726016bdbc1 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.h
+++ b/drivers/crypto/ccree/cc_buffer_mgr.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_buffer_mgr.h
* Buffer Manager
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index d9c17078517b..5b58226ea24d 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -34,6 +34,18 @@ struct cc_hw_key_info {
enum cc_hw_crypto_key key2_slot;
};
+struct cc_cpp_key_info {
+ u8 slot;
+ enum cc_cpp_alg alg;
+};
+
+enum cc_key_type {
+ CC_UNPROTECTED_KEY, /* User key */
+ CC_HW_PROTECTED_KEY, /* HW (FDE) key */
+ CC_POLICY_PROTECTED_KEY, /* CPP key */
+ CC_INVALID_PROTECTED_KEY /* Invalid key */
+};
+
struct cc_cipher_ctx {
struct cc_drvdata *drvdata;
int keylen;
@@ -41,19 +53,22 @@ struct cc_cipher_ctx {
int cipher_mode;
int flow_mode;
unsigned int flags;
- bool hw_key;
+ enum cc_key_type key_type;
struct cc_user_key_info user;
- struct cc_hw_key_info hw;
+ union {
+ struct cc_hw_key_info hw;
+ struct cc_cpp_key_info cpp;
+ };
struct crypto_shash *shash_tfm;
};
static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
-static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
+static inline enum cc_key_type cc_key_type(struct crypto_tfm *tfm)
{
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
- return ctx_p->hw_key;
+ return ctx_p->key_type;
}
static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
@@ -232,7 +247,7 @@ struct tdes_keys {
u8 key3[DES_KEY_SIZE];
};
-static enum cc_hw_crypto_key cc_slot_to_hw_key(int slot_num)
+static enum cc_hw_crypto_key cc_slot_to_hw_key(u8 slot_num)
{
switch (slot_num) {
case 0:
@@ -247,6 +262,22 @@ static enum cc_hw_crypto_key cc_slot_to_hw_key(int slot_num)
return END_OF_KEYS;
}
+static u8 cc_slot_to_cpp_key(u8 slot_num)
+{
+ return (slot_num - CC_FIRST_CPP_KEY_SLOT);
+}
+
+static inline enum cc_key_type cc_slot_to_key_type(u8 slot_num)
+{
+ if (slot_num >= CC_FIRST_HW_KEY_SLOT && slot_num <= CC_LAST_HW_KEY_SLOT)
+ return CC_HW_PROTECTED_KEY;
+ else if (slot_num >= CC_FIRST_CPP_KEY_SLOT &&
+ slot_num <= CC_LAST_CPP_KEY_SLOT)
+ return CC_POLICY_PROTECTED_KEY;
+ else
+ return CC_INVALID_PROTECTED_KEY;
+}
+
static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
unsigned int keylen)
{
@@ -261,18 +292,13 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
/* STAT_PHASE_0: Init and sanity checks */
- /* This check the size of the hardware key token */
+ /* This check the size of the protected key token */
if (keylen != sizeof(hki)) {
- dev_err(dev, "Unsupported HW key size %d.\n", keylen);
+ dev_err(dev, "Unsupported protected key size %d.\n", keylen);
crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
- if (ctx_p->flow_mode != S_DIN_to_AES) {
- dev_err(dev, "HW key not supported for non-AES flows\n");
- return -EINVAL;
- }
-
memcpy(&hki, key, keylen);
/* The real key len for crypto op is the size of the HW key
@@ -286,31 +312,70 @@ static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key,
return -EINVAL;
}
- ctx_p->hw.key1_slot = cc_slot_to_hw_key(hki.hw_key1);
- if (ctx_p->hw.key1_slot == END_OF_KEYS) {
- dev_err(dev, "Unsupported hw key1 number (%d)\n", hki.hw_key1);
- return -EINVAL;
- }
+ ctx_p->keylen = keylen;
- if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
- ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
- ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
- if (hki.hw_key1 == hki.hw_key2) {
- dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
- hki.hw_key1, hki.hw_key2);
+ switch (cc_slot_to_key_type(hki.hw_key1)) {
+ case CC_HW_PROTECTED_KEY:
+ if (ctx_p->flow_mode == S_DIN_to_SM4) {
+ dev_err(dev, "Only AES HW protected keys are supported\n");
return -EINVAL;
}
- ctx_p->hw.key2_slot = cc_slot_to_hw_key(hki.hw_key2);
- if (ctx_p->hw.key2_slot == END_OF_KEYS) {
- dev_err(dev, "Unsupported hw key2 number (%d)\n",
- hki.hw_key2);
+
+ ctx_p->hw.key1_slot = cc_slot_to_hw_key(hki.hw_key1);
+ if (ctx_p->hw.key1_slot == END_OF_KEYS) {
+ dev_err(dev, "Unsupported hw key1 number (%d)\n",
+ hki.hw_key1);
return -EINVAL;
}
- }
- ctx_p->keylen = keylen;
- ctx_p->hw_key = true;
- dev_dbg(dev, "cc_is_hw_key ret 0");
+ if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+ ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+ ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
+ if (hki.hw_key1 == hki.hw_key2) {
+ dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
+ hki.hw_key1, hki.hw_key2);
+ return -EINVAL;
+ }
+
+ ctx_p->hw.key2_slot = cc_slot_to_hw_key(hki.hw_key2);
+ if (ctx_p->hw.key2_slot == END_OF_KEYS) {
+ dev_err(dev, "Unsupported hw key2 number (%d)\n",
+ hki.hw_key2);
+ return -EINVAL;
+ }
+ }
+
+ ctx_p->key_type = CC_HW_PROTECTED_KEY;
+ dev_dbg(dev, "HW protected key %d/%d set\n.",
+ ctx_p->hw.key1_slot, ctx_p->hw.key2_slot);
+ break;
+
+ case CC_POLICY_PROTECTED_KEY:
+ if (ctx_p->drvdata->hw_rev < CC_HW_REV_713) {
+ dev_err(dev, "CPP keys not supported in this hardware revision.\n");
+ return -EINVAL;
+ }
+
+ if (ctx_p->cipher_mode != DRV_CIPHER_CBC &&
+ ctx_p->cipher_mode != DRV_CIPHER_CTR) {
+ dev_err(dev, "CPP keys only supported in CBC or CTR modes.\n");
+ return -EINVAL;
+ }
+
+ ctx_p->cpp.slot = cc_slot_to_cpp_key(hki.hw_key1);
+ if (ctx_p->flow_mode == S_DIN_to_AES)
+ ctx_p->cpp.alg = CC_CPP_AES;
+ else /* Must be SM4 since due to sethkey registration */
+ ctx_p->cpp.alg = CC_CPP_SM4;
+ ctx_p->key_type = CC_POLICY_PROTECTED_KEY;
+ dev_dbg(dev, "policy protected key alg: %d slot: %d.\n",
+ ctx_p->cpp.alg, ctx_p->cpp.slot);
+ break;
+
+ default:
+ dev_err(dev, "Unsupported protected key (%d)\n", hki.hw_key1);
+ return -EINVAL;
+ }
return 0;
}
@@ -321,7 +386,6 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm);
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
- u32 tmp[DES3_EDE_EXPKEY_WORDS];
struct cc_crypto_alg *cc_alg =
container_of(tfm->__crt_alg, struct cc_crypto_alg,
skcipher_alg.base);
@@ -339,7 +403,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
return -EINVAL;
}
- ctx_p->hw_key = false;
+ ctx_p->key_type = CC_UNPROTECTED_KEY;
/*
* Verify DES weak keys
@@ -347,6 +411,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
* HW does the expansion on its own.
*/
if (ctx_p->flow_mode == S_DIN_to_DES) {
+ u32 tmp[DES3_EDE_EXPKEY_WORDS];
if (keylen == DES3_EDE_KEY_SIZE &&
__des3_ede_setkey(tmp, &tfm->crt_flags, key,
DES3_EDE_KEY_SIZE)) {
@@ -399,7 +464,77 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
return 0;
}
-static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
+static int cc_out_setup_mode(struct cc_cipher_ctx *ctx_p)
+{
+ switch (ctx_p->flow_mode) {
+ case S_DIN_to_AES:
+ return S_AES_to_DOUT;
+ case S_DIN_to_DES:
+ return S_DES_to_DOUT;
+ case S_DIN_to_SM4:
+ return S_SM4_to_DOUT;
+ default:
+ return ctx_p->flow_mode;
+ }
+}
+
+static void cc_setup_readiv_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ unsigned int ivsize, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ int cipher_mode = ctx_p->cipher_mode;
+ int flow_mode = cc_out_setup_mode(ctx_p);
+ int direction = req_ctx->gen_ctx.op_type;
+ dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
+
+ if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY)
+ return;
+
+ switch (cipher_mode) {
+ case DRV_CIPHER_ECB:
+ break;
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_CBC_CTS:
+ case DRV_CIPHER_CTR:
+ case DRV_CIPHER_OFB:
+ /* Read next IV */
+ hw_desc_init(&desc[*seq_size]);
+ set_dout_dlli(&desc[*seq_size], iv_dma_addr, ivsize, NS_BIT, 1);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ if (cipher_mode == DRV_CIPHER_CTR ||
+ cipher_mode == DRV_CIPHER_OFB) {
+ set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1);
+ } else {
+ set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE0);
+ }
+ set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+ (*seq_size)++;
+ break;
+ case DRV_CIPHER_XTS:
+ case DRV_CIPHER_ESSIV:
+ case DRV_CIPHER_BITLOCKER:
+ /* IV */
+ hw_desc_init(&desc[*seq_size]);
+ set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_dout_dlli(&desc[*seq_size], iv_dma_addr, CC_AES_BLOCK_SIZE,
+ NS_BIT, 1);
+ set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+ (*seq_size)++;
+ break;
+ default:
+ dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+ }
+}
+
+static void cc_setup_state_desc(struct crypto_tfm *tfm,
struct cipher_req_ctx *req_ctx,
unsigned int ivsize, unsigned int nbytes,
struct cc_hw_desc desc[],
@@ -423,11 +558,13 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
du_size = cc_alg->data_unit;
switch (cipher_mode) {
+ case DRV_CIPHER_ECB:
+ break;
case DRV_CIPHER_CBC:
case DRV_CIPHER_CBC_CTS:
case DRV_CIPHER_CTR:
case DRV_CIPHER_OFB:
- /* Load cipher state */
+ /* Load IV */
hw_desc_init(&desc[*seq_size]);
set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, ivsize,
NS_BIT);
@@ -441,57 +578,15 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
}
(*seq_size)++;
- /*FALLTHROUGH*/
- case DRV_CIPHER_ECB:
- /* Load key */
- hw_desc_init(&desc[*seq_size]);
- set_cipher_mode(&desc[*seq_size], cipher_mode);
- set_cipher_config0(&desc[*seq_size], direction);
- if (flow_mode == S_DIN_to_AES) {
- if (cc_is_hw_key(tfm)) {
- set_hw_crypto_key(&desc[*seq_size],
- ctx_p->hw.key1_slot);
- } else {
- set_din_type(&desc[*seq_size], DMA_DLLI,
- key_dma_addr, ((key_len == 24) ?
- AES_MAX_KEY_SIZE :
- key_len), NS_BIT);
- }
- set_key_size_aes(&desc[*seq_size], key_len);
- } else {
- /*des*/
- set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
- key_len, NS_BIT);
- set_key_size_des(&desc[*seq_size], key_len);
- }
- set_flow_mode(&desc[*seq_size], flow_mode);
- set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
- (*seq_size)++;
break;
case DRV_CIPHER_XTS:
case DRV_CIPHER_ESSIV:
case DRV_CIPHER_BITLOCKER:
- /* Load AES key */
- hw_desc_init(&desc[*seq_size]);
- set_cipher_mode(&desc[*seq_size], cipher_mode);
- set_cipher_config0(&desc[*seq_size], direction);
- if (cc_is_hw_key(tfm)) {
- set_hw_crypto_key(&desc[*seq_size],
- ctx_p->hw.key1_slot);
- } else {
- set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
- (key_len / 2), NS_BIT);
- }
- set_key_size_aes(&desc[*seq_size], (key_len / 2));
- set_flow_mode(&desc[*seq_size], flow_mode);
- set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
- (*seq_size)++;
-
/* load XEX key */
hw_desc_init(&desc[*seq_size]);
set_cipher_mode(&desc[*seq_size], cipher_mode);
set_cipher_config0(&desc[*seq_size], direction);
- if (cc_is_hw_key(tfm)) {
+ if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
set_hw_crypto_key(&desc[*seq_size],
ctx_p->hw.key2_slot);
} else {
@@ -505,7 +600,7 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
(*seq_size)++;
- /* Set state */
+ /* Load IV */
hw_desc_init(&desc[*seq_size]);
set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
set_cipher_mode(&desc[*seq_size], cipher_mode);
@@ -521,48 +616,113 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
}
}
-static void cc_setup_cipher_data(struct crypto_tfm *tfm,
- struct cipher_req_ctx *req_ctx,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes,
- void *areq, struct cc_hw_desc desc[],
- unsigned int *seq_size)
+static int cc_out_flow_mode(struct cc_cipher_ctx *ctx_p)
{
- struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
- struct device *dev = drvdata_to_dev(ctx_p->drvdata);
- unsigned int flow_mode = ctx_p->flow_mode;
-
switch (ctx_p->flow_mode) {
case S_DIN_to_AES:
- flow_mode = DIN_AES_DOUT;
- break;
+ return DIN_AES_DOUT;
case S_DIN_to_DES:
- flow_mode = DIN_DES_DOUT;
- break;
+ return DIN_DES_DOUT;
case S_DIN_to_SM4:
- flow_mode = DIN_SM4_DOUT;
- break;
+ return DIN_SM4_DOUT;
default:
- dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode);
- return;
+ return ctx_p->flow_mode;
}
- /* Process */
- if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
- dev_dbg(dev, " data params addr %pad length 0x%X\n",
- &sg_dma_address(src), nbytes);
- dev_dbg(dev, " data params addr %pad length 0x%X\n",
- &sg_dma_address(dst), nbytes);
+}
+
+static void cc_setup_key_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ unsigned int nbytes, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ int cipher_mode = ctx_p->cipher_mode;
+ int flow_mode = ctx_p->flow_mode;
+ int direction = req_ctx->gen_ctx.op_type;
+ dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
+ unsigned int key_len = ctx_p->keylen;
+ unsigned int din_size;
+
+ switch (cipher_mode) {
+ case DRV_CIPHER_CBC:
+ case DRV_CIPHER_CBC_CTS:
+ case DRV_CIPHER_CTR:
+ case DRV_CIPHER_OFB:
+ case DRV_CIPHER_ECB:
+ /* Load key */
hw_desc_init(&desc[*seq_size]);
- set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
- nbytes, NS_BIT);
- set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
- nbytes, NS_BIT, (!areq ? 0 : 1));
- if (areq)
- set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ if (cc_key_type(tfm) == CC_POLICY_PROTECTED_KEY) {
+ /* We use the AES key size coding for all CPP algs */
+ set_key_size_aes(&desc[*seq_size], key_len);
+ set_cpp_crypto_key(&desc[*seq_size], ctx_p->cpp.slot);
+ flow_mode = cc_out_flow_mode(ctx_p);
+ } else {
+ if (flow_mode == S_DIN_to_AES) {
+ if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
+ set_hw_crypto_key(&desc[*seq_size],
+ ctx_p->hw.key1_slot);
+ } else {
+ /* CC_POLICY_UNPROTECTED_KEY
+ * Invalid keys are filtered out in
+ * sethkey()
+ */
+ din_size = (key_len == 24) ?
+ AES_MAX_KEY_SIZE : key_len;
+
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ key_dma_addr, din_size,
+ NS_BIT);
+ }
+ set_key_size_aes(&desc[*seq_size], key_len);
+ } else {
+ /*des*/
+ set_din_type(&desc[*seq_size], DMA_DLLI,
+ key_dma_addr, key_len, NS_BIT);
+ set_key_size_des(&desc[*seq_size], key_len);
+ }
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
+ }
set_flow_mode(&desc[*seq_size], flow_mode);
(*seq_size)++;
- } else {
+ break;
+ case DRV_CIPHER_XTS:
+ case DRV_CIPHER_ESSIV:
+ case DRV_CIPHER_BITLOCKER:
+ /* Load AES key */
+ hw_desc_init(&desc[*seq_size]);
+ set_cipher_mode(&desc[*seq_size], cipher_mode);
+ set_cipher_config0(&desc[*seq_size], direction);
+ if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) {
+ set_hw_crypto_key(&desc[*seq_size],
+ ctx_p->hw.key1_slot);
+ } else {
+ set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
+ (key_len / 2), NS_BIT);
+ }
+ set_key_size_aes(&desc[*seq_size], (key_len / 2));
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
+ (*seq_size)++;
+ break;
+ default:
+ dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+ }
+}
+
+static void cc_setup_mlli_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes, void *areq,
+ struct cc_hw_desc desc[], unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
/* bypass */
dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n",
&req_ctx->mlli_params.mlli_dma_addr,
@@ -577,7 +737,38 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
req_ctx->mlli_params.mlli_len);
set_flow_mode(&desc[*seq_size], BYPASS);
(*seq_size)++;
+ }
+}
+
+static void cc_setup_flow_desc(struct crypto_tfm *tfm,
+ struct cipher_req_ctx *req_ctx,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes, struct cc_hw_desc desc[],
+ unsigned int *seq_size)
+{
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+ unsigned int flow_mode = cc_out_flow_mode(ctx_p);
+ bool last_desc = (ctx_p->key_type == CC_POLICY_PROTECTED_KEY ||
+ ctx_p->cipher_mode == DRV_CIPHER_ECB);
+ /* Process */
+ if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
+ dev_dbg(dev, " data params addr %pad length 0x%X\n",
+ &sg_dma_address(src), nbytes);
+ dev_dbg(dev, " data params addr %pad length 0x%X\n",
+ &sg_dma_address(dst), nbytes);
+ hw_desc_init(&desc[*seq_size]);
+ set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
+ nbytes, NS_BIT);
+ set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
+ nbytes, NS_BIT, (!last_desc ? 0 : 1));
+ if (last_desc)
+ set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
+
+ set_flow_mode(&desc[*seq_size], flow_mode);
+ (*seq_size)++;
+ } else {
hw_desc_init(&desc[*seq_size]);
set_din_type(&desc[*seq_size], DMA_MLLI,
ctx_p->drvdata->mlli_sram_addr,
@@ -589,7 +780,7 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
set_dout_mlli(&desc[*seq_size],
ctx_p->drvdata->mlli_sram_addr,
req_ctx->in_mlli_nents, NS_BIT,
- (!areq ? 0 : 1));
+ (!last_desc ? 0 : 1));
} else {
dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
(unsigned int)ctx_p->drvdata->mlli_sram_addr,
@@ -600,9 +791,9 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
(LLI_ENTRY_BYTE_SIZE *
req_ctx->in_mlli_nents)),
req_ctx->out_mlli_nents, NS_BIT,
- (!areq ? 0 : 1));
+ (!last_desc ? 0 : 1));
}
- if (areq)
+ if (last_desc)
set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]);
set_flow_mode(&desc[*seq_size], flow_mode);
@@ -610,38 +801,6 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
}
}
-/*
- * Update a CTR-AES 128 bit counter
- */
-static void cc_update_ctr(u8 *ctr, unsigned int increment)
-{
- if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
- IS_ALIGNED((unsigned long)ctr, 8)) {
-
- __be64 *high_be = (__be64 *)ctr;
- __be64 *low_be = high_be + 1;
- u64 orig_low = __be64_to_cpu(*low_be);
- u64 new_low = orig_low + (u64)increment;
-
- *low_be = __cpu_to_be64(new_low);
-
- if (new_low < orig_low)
- *high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1);
- } else {
- u8 *pos = (ctr + AES_BLOCK_SIZE);
- u8 val;
- unsigned int size;
-
- for (; increment; increment--)
- for (size = AES_BLOCK_SIZE; size; size--) {
- val = *--pos + 1;
- *pos = val;
- if (val)
- break;
- }
- }
-}
-
static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
{
struct skcipher_request *req = (struct skcipher_request *)cc_req;
@@ -649,44 +808,15 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
struct scatterlist *src = req->src;
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
- struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
- unsigned int len;
- cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
-
- switch (ctx_p->cipher_mode) {
- case DRV_CIPHER_CBC:
- /*
- * The crypto API expects us to set the req->iv to the last
- * ciphertext block. For encrypt, simply copy from the result.
- * For decrypt, we must copy from a saved buffer since this
- * could be an in-place decryption operation and the src is
- * lost by this point.
- */
- if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
- memcpy(req->iv, req_ctx->backup_info, ivsize);
- kzfree(req_ctx->backup_info);
- } else if (!err) {
- len = req->cryptlen - ivsize;
- scatterwalk_map_and_copy(req->iv, req->dst, len,
- ivsize, 0);
- }
- break;
-
- case DRV_CIPHER_CTR:
- /* Compute the counter of the last block */
- len = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / AES_BLOCK_SIZE;
- cc_update_ctr((u8 *)req->iv, len);
- break;
-
- default:
- break;
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+ memcpy(req->iv, req_ctx->iv, ivsize);
+ kzfree(req_ctx->iv);
}
- kzfree(req_ctx->iv);
-
skcipher_request_complete(req, err);
}
@@ -741,6 +871,13 @@ static int cc_cipher_process(struct skcipher_request *req,
cc_req.user_cb = (void *)cc_cipher_complete;
cc_req.user_arg = (void *)req;
+ /* Setup CPP operation details */
+ if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY) {
+ cc_req.cpp.is_cpp = true;
+ cc_req.cpp.alg = ctx_p->cpp.alg;
+ cc_req.cpp.slot = ctx_p->cpp.slot;
+ }
+
/* Setup request context */
req_ctx->gen_ctx.op_type = direction;
@@ -755,11 +892,16 @@ static int cc_cipher_process(struct skcipher_request *req,
/* STAT_PHASE_2: Create sequence */
- /* Setup processing */
- cc_setup_cipher_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
+ /* Setup IV and XEX key used */
+ cc_setup_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
+ /* Setup MLLI line, if needed */
+ cc_setup_mlli_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len);
+ /* Setup key */
+ cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len);
/* Data processing */
- cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc,
- &seq_len);
+ cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, desc, &seq_len);
+ /* Read next IV */
+ cc_setup_readiv_desc(tfm, req_ctx, ivsize, desc, &seq_len);
/* STAT_PHASE_3: Lock HW and push sequence */
@@ -774,7 +916,6 @@ static int cc_cipher_process(struct skcipher_request *req,
exit_process:
if (rc != -EINPROGRESS && rc != -EBUSY) {
- kzfree(req_ctx->backup_info);
kzfree(req_ctx->iv);
}
@@ -792,31 +933,10 @@ static int cc_cipher_encrypt(struct skcipher_request *req)
static int cc_cipher_decrypt(struct skcipher_request *req)
{
- struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
- struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
- struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
- unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
- gfp_t flags = cc_gfp_flags(&req->base);
- unsigned int len;
memset(req_ctx, 0, sizeof(*req_ctx));
- if ((ctx_p->cipher_mode == DRV_CIPHER_CBC) &&
- (req->cryptlen >= ivsize)) {
-
- /* Allocate and save the last IV sized bytes of the source,
- * which will be lost in case of in-place decryption.
- */
- req_ctx->backup_info = kzalloc(ivsize, flags);
- if (!req_ctx->backup_info)
- return -ENOMEM;
-
- len = req->cryptlen - ivsize;
- scatterwalk_map_and_copy(req_ctx->backup_info, req->src, len,
- ivsize, 0);
- }
-
return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
}
@@ -838,6 +958,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "xts512(paes)",
@@ -856,6 +977,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "xts4096(paes)",
@@ -874,6 +996,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "essiv(paes)",
@@ -891,6 +1014,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "essiv512(paes)",
@@ -909,6 +1033,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "essiv4096(paes)",
@@ -927,6 +1052,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "bitlocker(paes)",
@@ -944,6 +1070,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "bitlocker512(paes)",
@@ -962,6 +1089,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "bitlocker4096(paes)",
@@ -980,6 +1108,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "ecb(paes)",
@@ -997,6 +1126,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "cbc(paes)",
@@ -1014,6 +1144,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "ofb(paes)",
@@ -1031,6 +1162,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "cts(cbc(paes))",
@@ -1048,6 +1180,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "ctr(paes)",
@@ -1065,6 +1198,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
.std_body = CC_STD_NIST,
+ .sec_func = true,
},
{
.name = "xts(aes)",
@@ -1429,6 +1563,42 @@ static const struct cc_alg_template skcipher_algs[] = {
.min_hw_rev = CC_HW_REV_713,
.std_body = CC_STD_OSCCA,
},
+ {
+ .name = "cbc(psm4)",
+ .driver_name = "cbc-psm4-ccree",
+ .blocksize = SM4_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
+ .sec_func = true,
+ },
+ {
+ .name = "ctr(psm4)",
+ .driver_name = "ctr-psm4-ccree",
+ .blocksize = SM4_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_sethkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = CC_HW_KEY_SIZE,
+ .max_keysize = CC_HW_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
+ .sec_func = true,
+ },
};
static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
@@ -1504,7 +1674,8 @@ int cc_cipher_alloc(struct cc_drvdata *drvdata)
ARRAY_SIZE(skcipher_algs));
for (alg = 0; alg < ARRAY_SIZE(skcipher_algs); alg++) {
if ((skcipher_algs[alg].min_hw_rev > drvdata->hw_rev) ||
- !(drvdata->std_bodies & skcipher_algs[alg].std_body))
+ !(drvdata->std_bodies & skcipher_algs[alg].std_body) ||
+ (drvdata->sec_disabled && skcipher_algs[alg].sec_func))
continue;
dev_dbg(dev, "creating %s\n", skcipher_algs[alg].driver_name);
diff --git a/drivers/crypto/ccree/cc_cipher.h b/drivers/crypto/ccree/cc_cipher.h
index 4dbc0a1e6d5c..da3a38707fae 100644
--- a/drivers/crypto/ccree/cc_cipher.h
+++ b/drivers/crypto/ccree/cc_cipher.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_cipher.h
* ARM CryptoCell Cipher Crypto API
@@ -20,7 +20,6 @@ struct cipher_req_ctx {
u32 in_mlli_nents;
u32 out_nents;
u32 out_mlli_nents;
- u8 *backup_info; /*store iv for generated IV flow*/
u8 *iv;
struct mlli_params mlli_params;
};
diff --git a/drivers/crypto/ccree/cc_crypto_ctx.h b/drivers/crypto/ccree/cc_crypto_ctx.h
index c8dac273c563..ccf960a0d989 100644
--- a/drivers/crypto/ccree/cc_crypto_ctx.h
+++ b/drivers/crypto/ccree/cc_crypto_ctx.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef _CC_CRYPTO_CTX_H_
#define _CC_CRYPTO_CTX_H_
@@ -55,6 +55,14 @@
#define CC_DRV_ALG_MAX_BLOCK_SIZE CC_HASH_BLOCK_SIZE_MAX
+#define CC_CPP_NUM_SLOTS 8
+#define CC_CPP_NUM_ALGS 2
+
+enum cc_cpp_alg {
+ CC_CPP_SM4 = 1,
+ CC_CPP_AES = 0
+};
+
enum drv_engine_type {
DRV_ENGINE_NULL = 0,
DRV_ENGINE_AES = 1,
diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c
index 5fa05a7bcf36..566999738698 100644
--- a/drivers/crypto/ccree/cc_debugfs.c
+++ b/drivers/crypto/ccree/cc_debugfs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
#include <linux/kernel.h>
#include <linux/debugfs.h>
@@ -25,9 +25,24 @@ struct cc_debugfs_ctx {
*/
static struct dentry *cc_debugfs_dir;
-static struct debugfs_reg32 debug_regs[] = {
+static struct debugfs_reg32 ver_sig_regs[] = {
{ .name = "SIGNATURE" }, /* Must be 0th */
{ .name = "VERSION" }, /* Must be 1st */
+};
+
+static struct debugfs_reg32 pid_cid_regs[] = {
+ CC_DEBUG_REG(PERIPHERAL_ID_0),
+ CC_DEBUG_REG(PERIPHERAL_ID_1),
+ CC_DEBUG_REG(PERIPHERAL_ID_2),
+ CC_DEBUG_REG(PERIPHERAL_ID_3),
+ CC_DEBUG_REG(PERIPHERAL_ID_4),
+ CC_DEBUG_REG(COMPONENT_ID_0),
+ CC_DEBUG_REG(COMPONENT_ID_1),
+ CC_DEBUG_REG(COMPONENT_ID_2),
+ CC_DEBUG_REG(COMPONENT_ID_3),
+};
+
+static struct debugfs_reg32 debug_regs[] = {
CC_DEBUG_REG(HOST_IRR),
CC_DEBUG_REG(HOST_POWER_DOWN_EN),
CC_DEBUG_REG(AXIM_MON_ERR),
@@ -53,10 +68,7 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
{
struct device *dev = drvdata_to_dev(drvdata);
struct cc_debugfs_ctx *ctx;
- struct debugfs_regset32 *regset;
-
- debug_regs[0].offset = drvdata->sig_offset;
- debug_regs[1].offset = drvdata->ver_offset;
+ struct debugfs_regset32 *regset, *verset;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -75,8 +87,26 @@ int cc_debugfs_init(struct cc_drvdata *drvdata)
debugfs_create_regset32("regs", 0400, ctx->dir, regset);
debugfs_create_bool("coherent", 0400, ctx->dir, &drvdata->coherent);
- drvdata->debugfs = ctx;
+ verset = devm_kzalloc(dev, sizeof(*verset), GFP_KERNEL);
+ /* Failing here is not important enough to fail the module load */
+ if (!verset)
+ goto out;
+
+ if (drvdata->hw_rev <= CC_HW_REV_712) {
+ ver_sig_regs[0].offset = drvdata->sig_offset;
+ ver_sig_regs[1].offset = drvdata->ver_offset;
+ verset->regs = ver_sig_regs;
+ verset->nregs = ARRAY_SIZE(ver_sig_regs);
+ } else {
+ verset->regs = pid_cid_regs;
+ verset->nregs = ARRAY_SIZE(pid_cid_regs);
+ }
+ verset->base = drvdata->cc_base;
+ debugfs_create_regset32("version", 0400, ctx->dir, verset);
+
+out:
+ drvdata->debugfs = ctx;
return 0;
}
diff --git a/drivers/crypto/ccree/cc_debugfs.h b/drivers/crypto/ccree/cc_debugfs.h
index 01cbd9a95659..664ff402e0e9 100644
--- a/drivers/crypto/ccree/cc_debugfs.h
+++ b/drivers/crypto/ccree/cc_debugfs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_DEBUGFS_H__
#define __CC_DEBUGFS_H__
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 3bcc6c76e090..86ac7b443355 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -30,27 +30,47 @@
bool cc_dump_desc;
module_param_named(dump_desc, cc_dump_desc, bool, 0600);
MODULE_PARM_DESC(cc_dump_desc, "Dump descriptors to kernel log as debugging aid");
-
bool cc_dump_bytes;
module_param_named(dump_bytes, cc_dump_bytes, bool, 0600);
MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid");
+static bool cc_sec_disable;
+module_param_named(sec_disable, cc_sec_disable, bool, 0600);
+MODULE_PARM_DESC(cc_sec_disable, "Disable security functions");
+
struct cc_hw_data {
char *name;
enum cc_hw_rev rev;
u32 sig;
+ u32 cidr_0123;
+ u32 pidr_0124;
int std_bodies;
};
+#define CC_NUM_IDRS 4
+
+/* Note: PIDR3 holds CMOD/Rev so ignored for HW identification purposes */
+static const u32 pidr_0124_offsets[CC_NUM_IDRS] = {
+ CC_REG(PERIPHERAL_ID_0), CC_REG(PERIPHERAL_ID_1),
+ CC_REG(PERIPHERAL_ID_2), CC_REG(PERIPHERAL_ID_4)
+};
+
+static const u32 cidr_0123_offsets[CC_NUM_IDRS] = {
+ CC_REG(COMPONENT_ID_0), CC_REG(COMPONENT_ID_1),
+ CC_REG(COMPONENT_ID_2), CC_REG(COMPONENT_ID_3)
+};
+
/* Hardware revisions defs. */
/* The 703 is a OSCCA only variant of the 713 */
static const struct cc_hw_data cc703_hw = {
- .name = "703", .rev = CC_HW_REV_713, .std_bodies = CC_STD_OSCCA
+ .name = "703", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
+ .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_OSCCA
};
static const struct cc_hw_data cc713_hw = {
- .name = "713", .rev = CC_HW_REV_713, .std_bodies = CC_STD_ALL
+ .name = "713", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
+ .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_ALL
};
static const struct cc_hw_data cc712_hw = {
@@ -78,6 +98,20 @@ static const struct of_device_id arm_ccree_dev_of_match[] = {
};
MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match);
+static u32 cc_read_idr(struct cc_drvdata *drvdata, const u32 *idr_offsets)
+{
+ int i;
+ union {
+ u8 regs[CC_NUM_IDRS];
+ __le32 val;
+ } idr;
+
+ for (i = 0; i < CC_NUM_IDRS; ++i)
+ idr.regs[i] = cc_ioread(drvdata, idr_offsets[i]);
+
+ return le32_to_cpu(idr.val);
+}
+
void __dump_byte_array(const char *name, const u8 *buf, size_t len)
{
char prefix[64];
@@ -114,12 +148,12 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
drvdata->irq = irr;
/* Completion interrupt - most probable */
- if (irr & CC_COMP_IRQ_MASK) {
- /* Mask AXI completion interrupt - will be unmasked in
- * Deferred service handler
+ if (irr & drvdata->comp_mask) {
+ /* Mask all completion interrupts - will be unmasked in
+ * deferred service handler
*/
- cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_COMP_IRQ_MASK);
- irr &= ~CC_COMP_IRQ_MASK;
+ cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | drvdata->comp_mask);
+ irr &= ~drvdata->comp_mask;
complete_request(drvdata);
}
#ifdef CONFIG_CRYPTO_FIPS
@@ -159,11 +193,14 @@ int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
unsigned int val, cache_params;
struct device *dev = drvdata_to_dev(drvdata);
- /* Unmask all AXI interrupt sources AXI_CFG1 register */
- val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
- cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
- dev_dbg(dev, "AXIM_CFG=0x%08X\n",
- cc_ioread(drvdata, CC_REG(AXIM_CFG)));
+ /* Unmask all AXI interrupt sources AXI_CFG1 register */
+ /* AXI interrupt config are obsoleted startign at cc7x3 */
+ if (drvdata->hw_rev <= CC_HW_REV_712) {
+ val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
+ cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
+ dev_dbg(dev, "AXIM_CFG=0x%08X\n",
+ cc_ioread(drvdata, CC_REG(AXIM_CFG)));
+ }
/* Clear all pending interrupts */
val = cc_ioread(drvdata, CC_REG(HOST_IRR));
@@ -171,7 +208,7 @@ int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
/* Unmask relevant interrupt cause */
- val = CC_COMP_IRQ_MASK | CC_AXI_ERR_IRQ_MASK;
+ val = drvdata->comp_mask | CC_AXI_ERR_IRQ_MASK;
if (drvdata->hw_rev >= CC_HW_REV_712)
val |= CC_GPR0_IRQ_MASK;
@@ -201,7 +238,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
struct cc_drvdata *new_drvdata;
struct device *dev = &plat_dev->dev;
struct device_node *np = dev->of_node;
- u32 signature_val;
+ u32 val, hw_rev_pidr, sig_cidr;
u64 dma_mask;
const struct cc_hw_data *hw_rev;
const struct of_device_id *dev_id;
@@ -231,6 +268,8 @@ static int init_cc_resources(struct platform_device *plat_dev)
new_drvdata->ver_offset = CC_REG(HOST_VERSION_630);
}
+ new_drvdata->comp_mask = CC_COMP_IRQ_MASK;
+
platform_set_drvdata(plat_dev, new_drvdata);
new_drvdata->plat_dev = plat_dev;
@@ -311,22 +350,57 @@ static int init_cc_resources(struct platform_device *plat_dev)
return rc;
}
+ new_drvdata->sec_disabled = cc_sec_disable;
+
if (hw_rev->rev <= CC_HW_REV_712) {
/* Verify correct mapping */
- signature_val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
- if (signature_val != hw_rev->sig) {
+ val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
+ if (val != hw_rev->sig) {
dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
- signature_val, hw_rev->sig);
+ val, hw_rev->sig);
+ rc = -EINVAL;
+ goto post_clk_err;
+ }
+ sig_cidr = val;
+ hw_rev_pidr = cc_ioread(new_drvdata, new_drvdata->ver_offset);
+ } else {
+ /* Verify correct mapping */
+ val = cc_read_idr(new_drvdata, pidr_0124_offsets);
+ if (val != hw_rev->pidr_0124) {
+ dev_err(dev, "Invalid CC PIDR: PIDR0124=0x%08X != expected=0x%08X\n",
+ val, hw_rev->pidr_0124);
+ rc = -EINVAL;
+ goto post_clk_err;
+ }
+ hw_rev_pidr = val;
+
+ val = cc_read_idr(new_drvdata, cidr_0123_offsets);
+ if (val != hw_rev->cidr_0123) {
+ dev_err(dev, "Invalid CC CIDR: CIDR0123=0x%08X != expected=0x%08X\n",
+ val, hw_rev->cidr_0123);
rc = -EINVAL;
goto post_clk_err;
}
- dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val);
+ sig_cidr = val;
+
+ /* Check security disable state */
+ val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED));
+ val &= CC_SECURITY_DISABLED_MASK;
+ new_drvdata->sec_disabled |= !!val;
+
+ if (!new_drvdata->sec_disabled) {
+ new_drvdata->comp_mask |= CC_CPP_SM4_ABORT_MASK;
+ if (new_drvdata->std_bodies & CC_STD_NIST)
+ new_drvdata->comp_mask |= CC_CPP_AES_ABORT_MASK;
+ }
}
+ if (new_drvdata->sec_disabled)
+ dev_info(dev, "Security Disabled mode is in effect. Security functions disabled.\n");
+
/* Display HW versions */
- dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
- hw_rev->name, cc_ioread(new_drvdata, new_drvdata->ver_offset),
- DRV_MODULE_VERSION);
+ dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n",
+ hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION);
rc = init_cc_regs(new_drvdata, true);
if (rc) {
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index 33dbf3e6d15d..b76181335c08 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_driver.h
* ARM CryptoCell Linux Crypto Driver
@@ -65,10 +65,32 @@ enum cc_std_body {
#define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
+#define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT)
+
#define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
+#define CC_CPP_AES_ABORT_MASK ( \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT))
+
+#define CC_CPP_SM4_ABORT_MASK ( \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT) | \
+ BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT))
+
/* Register name mangling macro */
#define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET
@@ -81,7 +103,6 @@ enum cc_std_body {
#define MAX_REQUEST_QUEUE_SIZE 4096
#define MAX_MLLI_BUFF_SIZE 2080
-#define MAX_ICV_NENTS_SUPPORTED 2
/* Definitions for HW descriptors DIN/DOUT fields */
#define NS_BIT 1
@@ -90,6 +111,12 @@ enum cc_std_body {
* field in the HW descriptor. The DMA engine +8 that value.
*/
+struct cc_cpp_req {
+ bool is_cpp;
+ enum cc_cpp_alg alg;
+ u8 slot;
+};
+
#define CC_MAX_IVGEN_DMA_ADDRESSES 3
struct cc_crypto_req {
void (*user_cb)(struct device *dev, void *req, int err);
@@ -104,6 +131,7 @@ struct cc_crypto_req {
/* The generated IV size required, 8/16 B allowed. */
unsigned int ivgen_size;
struct completion seq_compl; /* request completion */
+ struct cc_cpp_req cpp;
};
/**
@@ -136,6 +164,8 @@ struct cc_drvdata {
u32 sig_offset;
u32 ver_offset;
int std_bodies;
+ bool sec_disabled;
+ u32 comp_mask;
};
struct cc_crypto_alg {
@@ -162,12 +192,14 @@ struct cc_alg_template {
int auth_mode;
u32 min_hw_rev;
enum cc_std_body std_body;
+ bool sec_func;
unsigned int data_unit;
struct cc_drvdata *drvdata;
};
struct async_gen_req_ctx {
dma_addr_t iv_dma_addr;
+ u8 *iv;
enum drv_crypto_direction op_type;
};
diff --git a/drivers/crypto/ccree/cc_fips.c b/drivers/crypto/ccree/cc_fips.c
index b4d0a6d983e0..5ad3ffb7acaa 100644
--- a/drivers/crypto/ccree/cc_fips.c
+++ b/drivers/crypto/ccree/cc_fips.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/fips.h>
@@ -49,8 +49,6 @@ void cc_fips_fini(struct cc_drvdata *drvdata)
/* Kill tasklet */
tasklet_kill(&fips_h->tasklet);
-
- kfree(fips_h);
drvdata->fips_handle = NULL;
}
@@ -72,20 +70,28 @@ static inline void tee_fips_error(struct device *dev)
dev_err(dev, "TEE reported error!\n");
}
+/*
+ * This function check if cryptocell tee fips error occurred
+ * and in such case triggers system error
+ */
+void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata)
+{
+ struct device *dev = drvdata_to_dev(p_drvdata);
+
+ if (!cc_get_tee_fips_status(p_drvdata))
+ tee_fips_error(dev);
+}
+
/* Deferred service handler, run as interrupt-fired tasklet */
static void fips_dsr(unsigned long devarg)
{
struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
- struct device *dev = drvdata_to_dev(drvdata);
- u32 irq, state, val;
+ u32 irq, val;
irq = (drvdata->irq & (CC_GPR0_IRQ_MASK));
if (irq) {
- state = cc_ioread(drvdata, CC_REG(GPR_HOST));
-
- if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
- tee_fips_error(dev);
+ cc_tee_handle_fips_error(drvdata);
}
/* after verifing that there is nothing to do,
@@ -104,7 +110,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata)
if (p_drvdata->hw_rev < CC_HW_REV_712)
return 0;
- fips_h = kzalloc(sizeof(*fips_h), GFP_KERNEL);
+ fips_h = devm_kzalloc(dev, sizeof(*fips_h), GFP_KERNEL);
if (!fips_h)
return -ENOMEM;
@@ -113,8 +119,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata)
dev_dbg(dev, "Initializing fips tasklet\n");
tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata);
- if (!cc_get_tee_fips_status(p_drvdata))
- tee_fips_error(dev);
+ cc_tee_handle_fips_error(p_drvdata);
return 0;
}
diff --git a/drivers/crypto/ccree/cc_fips.h b/drivers/crypto/ccree/cc_fips.h
index 645e096a7a82..fc33eeb4d566 100644
--- a/drivers/crypto/ccree/cc_fips.h
+++ b/drivers/crypto/ccree/cc_fips.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_FIPS_H__
#define __CC_FIPS_H__
@@ -18,6 +18,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata);
void cc_fips_fini(struct cc_drvdata *drvdata);
void fips_handler(struct cc_drvdata *drvdata);
void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok);
+void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata);
#else /* CONFIG_CRYPTO_FIPS */
@@ -30,6 +31,7 @@ static inline void cc_fips_fini(struct cc_drvdata *drvdata) {}
static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata,
bool ok) {}
static inline void fips_handler(struct cc_drvdata *drvdata) {}
+static inline void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata) {}
#endif /* CONFIG_CRYPTO_FIPS */
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index 2c4ddc8fb76b..a6abe4e3bb0e 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/module.h>
@@ -69,6 +69,7 @@ struct cc_hash_alg {
struct hash_key_req_ctx {
u32 keylen;
dma_addr_t key_dma_addr;
+ u8 *key;
};
/* hash per-session context */
@@ -280,9 +281,13 @@ static void cc_update_complete(struct device *dev, void *cc_req, int err)
dev_dbg(dev, "req=%pK\n", req);
- cc_unmap_hash_request(dev, state, req->src, false);
- cc_unmap_req(dev, state, ctx);
- req->base.complete(&req->base, err);
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_req(dev, state, ctx);
+ }
+
+ ahash_request_complete(req, err);
}
static void cc_digest_complete(struct device *dev, void *cc_req, int err)
@@ -295,10 +300,14 @@ static void cc_digest_complete(struct device *dev, void *cc_req, int err)
dev_dbg(dev, "req=%pK\n", req);
- cc_unmap_hash_request(dev, state, req->src, false);
- cc_unmap_result(dev, state, digestsize, req->result);
- cc_unmap_req(dev, state, ctx);
- req->base.complete(&req->base, err);
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+
+ ahash_request_complete(req, err);
}
static void cc_hash_complete(struct device *dev, void *cc_req, int err)
@@ -311,10 +320,14 @@ static void cc_hash_complete(struct device *dev, void *cc_req, int err)
dev_dbg(dev, "req=%pK\n", req);
- cc_unmap_hash_request(dev, state, req->src, false);
- cc_unmap_result(dev, state, digestsize, req->result);
- cc_unmap_req(dev, state, ctx);
- req->base.complete(&req->base, err);
+ if (err != -EINPROGRESS) {
+ /* Not a BACKLOG notification */
+ cc_unmap_hash_request(dev, state, req->src, false);
+ cc_unmap_result(dev, state, digestsize, req->result);
+ cc_unmap_req(dev, state, ctx);
+ }
+
+ ahash_request_complete(req, err);
}
static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
@@ -730,13 +743,20 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
ctx->key_params.keylen = keylen;
ctx->key_params.key_dma_addr = 0;
ctx->is_hmac = true;
+ ctx->key_params.key = NULL;
if (keylen) {
+ ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
+ if (!ctx->key_params.key)
+ return -ENOMEM;
+
ctx->key_params.key_dma_addr =
- dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+ dma_map_single(dev, (void *)ctx->key_params.key, keylen,
+ DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
- key, keylen);
+ ctx->key_params.key, keylen);
+ kzfree(ctx->key_params.key);
return -ENOMEM;
}
dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
@@ -887,6 +907,9 @@ out:
dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
}
+
+ kzfree(ctx->key_params.key);
+
return rc;
}
@@ -913,11 +936,16 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
ctx->key_params.keylen = keylen;
+ ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
+ if (!ctx->key_params.key)
+ return -ENOMEM;
+
ctx->key_params.key_dma_addr =
- dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
+ dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
+ kzfree(ctx->key_params.key);
return -ENOMEM;
}
dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
@@ -969,6 +997,8 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
+ kzfree(ctx->key_params.key);
+
return rc;
}
@@ -1621,7 +1651,7 @@ static struct cc_hash_template driver_hash[] = {
.setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
- .statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
+ .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
},
},
.hash_mode = DRV_HASH_SHA224,
@@ -1648,7 +1678,7 @@ static struct cc_hash_template driver_hash[] = {
.setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
- .statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
+ .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
},
},
.hash_mode = DRV_HASH_SHA384,
diff --git a/drivers/crypto/ccree/cc_hash.h b/drivers/crypto/ccree/cc_hash.h
index 2e5bf8b0bbb6..0d6dc61484d7 100644
--- a/drivers/crypto/ccree/cc_hash.h
+++ b/drivers/crypto/ccree/cc_hash.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_hash.h
* ARM CryptoCell Hash Crypto API
diff --git a/drivers/crypto/ccree/cc_host_regs.h b/drivers/crypto/ccree/cc_host_regs.h
index 616b2e1c41ba..d0764147573f 100644
--- a/drivers/crypto/ccree/cc_host_regs.h
+++ b/drivers/crypto/ccree/cc_host_regs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
#ifndef __CC_HOST_H__
#define __CC_HOST_H__
@@ -7,33 +7,102 @@
// --------------------------------------
// BLOCK: HOST_P
// --------------------------------------
+
+
+/* IRR */
#define CC_HOST_IRR_REG_OFFSET 0xA00UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SHIFT 0x2UL
#define CC_HOST_IRR_DSCRPTR_COMPLETION_LOW_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT 0x3UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT 0x4UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT 0x5UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT 0x6UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT 0x7UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT 0x8UL
#define CC_HOST_IRR_AXI_ERR_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT 0x9UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT 0xAUL
+#define CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_GPR0_BIT_SHIFT 0xBUL
#define CC_HOST_IRR_GPR0_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT 0xCUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT 0xDUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT 0xEUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT 0xFUL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT 0x10UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT 0x11UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT 0x12UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SHIFT 0x13UL
#define CC_HOST_IRR_DSCRPTR_WATERMARK_INT_BIT_SIZE 0x1UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT 0x14UL
+#define CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SIZE 0x1UL
#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT 0x17UL
#define CC_HOST_IRR_AXIM_COMP_INT_BIT_SIZE 0x1UL
#define CC_HOST_SEP_SRAM_THRESHOLD_REG_OFFSET 0xA10UL
#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SHIFT 0x0UL
#define CC_HOST_SEP_SRAM_THRESHOLD_VALUE_BIT_SIZE 0xCUL
-#define CC_HOST_IMR_REG_OFFSET 0xA04UL
-#define CC_HOST_IMR_NOT_USED_MASK_BIT_SHIFT 0x1UL
-#define CC_HOST_IMR_NOT_USED_MASK_BIT_SIZE 0x1UL
+
+/* IMR */
+#define CC_HOST_IMR_REG_OFFSET 0x0A04UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SHIFT 0x2UL
#define CC_HOST_IMR_DSCRPTR_COMPLETION_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT 0x3UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT 0x4UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT 0x5UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT 0x6UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT 0x7UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SHIFT 0x8UL
#define CC_HOST_IMR_AXI_ERR_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT 0x9UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT 0xAUL
+#define CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_GPR0_BIT_SHIFT 0xBUL
#define CC_HOST_IMR_GPR0_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT 0xCUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT 0xDUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT 0xEUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT 0xFUL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT 0x10UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT 0x11UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT 0x12UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SHIFT 0x13UL
#define CC_HOST_IMR_DSCRPTR_WATERMARK_MASK0_BIT_SIZE 0x1UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT 0x14UL
+#define CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SIZE 0x1UL
#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SHIFT 0x17UL
#define CC_HOST_IMR_AXIM_COMP_INT_MASK_BIT_SIZE 0x1UL
+
+/* ICR */
#define CC_HOST_ICR_REG_OFFSET 0xA08UL
#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SHIFT 0x2UL
#define CC_HOST_ICR_DSCRPTR_COMPLETION_BIT_SIZE 0x1UL
@@ -45,6 +114,9 @@
#define CC_HOST_ICR_DSCRPTR_WATERMARK_QUEUE0_CLEAR_BIT_SIZE 0x1UL
#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SHIFT 0x17UL
#define CC_HOST_ICR_AXIM_COMP_INT_CLEAR_BIT_SIZE 0x1UL
+#define CC_SECURITY_DISABLED_REG_OFFSET 0x0A1CUL
+#define CC_SECURITY_DISABLED_VALUE_BIT_SHIFT 0x0UL
+#define CC_SECURITY_DISABLED_VALUE_BIT_SIZE 0x1UL
#define CC_HOST_SIGNATURE_712_REG_OFFSET 0xA24UL
#define CC_HOST_SIGNATURE_630_REG_OFFSET 0xAC8UL
#define CC_HOST_SIGNATURE_VALUE_BIT_SHIFT 0x0UL
@@ -132,6 +204,49 @@
#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SHIFT 0x0UL
#define CC_HOST_POWER_DOWN_EN_VALUE_BIT_SIZE 0x1UL
// --------------------------------------
+// BLOCK: ID_REGISTERS
+// --------------------------------------
+#define CC_PERIPHERAL_ID_4_REG_OFFSET 0x0FD0UL
+#define CC_PERIPHERAL_ID_4_VALUE_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_4_VALUE_BIT_SIZE 0x4UL
+#define CC_PIDRESERVED0_REG_OFFSET 0x0FD4UL
+#define CC_PIDRESERVED1_REG_OFFSET 0x0FD8UL
+#define CC_PIDRESERVED2_REG_OFFSET 0x0FDCUL
+#define CC_PERIPHERAL_ID_0_REG_OFFSET 0x0FE0UL
+#define CC_PERIPHERAL_ID_0_VALUE_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_0_VALUE_BIT_SIZE 0x8UL
+#define CC_PERIPHERAL_ID_1_REG_OFFSET 0x0FE4UL
+#define CC_PERIPHERAL_ID_1_PART_1_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_1_PART_1_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_1_DES_0_JEP106_BIT_SHIFT 0x4UL
+#define CC_PERIPHERAL_ID_1_DES_0_JEP106_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_2_REG_OFFSET 0x0FE8UL
+#define CC_PERIPHERAL_ID_2_DES_1_JEP106_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_2_DES_1_JEP106_BIT_SIZE 0x3UL
+#define CC_PERIPHERAL_ID_2_JEDEC_BIT_SHIFT 0x3UL
+#define CC_PERIPHERAL_ID_2_JEDEC_BIT_SIZE 0x1UL
+#define CC_PERIPHERAL_ID_2_REVISION_BIT_SHIFT 0x4UL
+#define CC_PERIPHERAL_ID_2_REVISION_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_3_REG_OFFSET 0x0FECUL
+#define CC_PERIPHERAL_ID_3_CMOD_BIT_SHIFT 0x0UL
+#define CC_PERIPHERAL_ID_3_CMOD_BIT_SIZE 0x4UL
+#define CC_PERIPHERAL_ID_3_REVAND_BIT_SHIFT 0x4UL
+#define CC_PERIPHERAL_ID_3_REVAND_BIT_SIZE 0x4UL
+#define CC_COMPONENT_ID_0_REG_OFFSET 0x0FF0UL
+#define CC_COMPONENT_ID_0_VALUE_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_0_VALUE_BIT_SIZE 0x8UL
+#define CC_COMPONENT_ID_1_REG_OFFSET 0x0FF4UL
+#define CC_COMPONENT_ID_1_PRMBL_1_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_1_PRMBL_1_BIT_SIZE 0x4UL
+#define CC_COMPONENT_ID_1_CLASS_BIT_SHIFT 0x4UL
+#define CC_COMPONENT_ID_1_CLASS_BIT_SIZE 0x4UL
+#define CC_COMPONENT_ID_2_REG_OFFSET 0x0FF8UL
+#define CC_COMPONENT_ID_2_VALUE_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_2_VALUE_BIT_SIZE 0x8UL
+#define CC_COMPONENT_ID_3_REG_OFFSET 0x0FFCUL
+#define CC_COMPONENT_ID_3_VALUE_BIT_SHIFT 0x0UL
+#define CC_COMPONENT_ID_3_VALUE_BIT_SIZE 0x8UL
+// --------------------------------------
// BLOCK: HOST_SRAM
// --------------------------------------
#define CC_SRAM_DATA_REG_OFFSET 0xF00UL
diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
index 7a9b90db7db7..9f4db9956e91 100644
--- a/drivers/crypto/ccree/cc_hw_queue_defs.h
+++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_HW_QUEUE_DEFS_H__
#define __CC_HW_QUEUE_DEFS_H__
@@ -28,11 +28,13 @@
GENMASK(CC_REG_HIGH(word, name), CC_REG_LOW(word, name))
#define WORD0_VALUE CC_GENMASK(0, VALUE)
+#define WORD0_CPP_CIPHER_MODE CC_GENMASK(0, CPP_CIPHER_MODE)
#define WORD1_DIN_CONST_VALUE CC_GENMASK(1, DIN_CONST_VALUE)
#define WORD1_DIN_DMA_MODE CC_GENMASK(1, DIN_DMA_MODE)
#define WORD1_DIN_SIZE CC_GENMASK(1, DIN_SIZE)
#define WORD1_NOT_LAST CC_GENMASK(1, NOT_LAST)
#define WORD1_NS_BIT CC_GENMASK(1, NS_BIT)
+#define WORD1_LOCK_QUEUE CC_GENMASK(1, LOCK_QUEUE)
#define WORD2_VALUE CC_GENMASK(2, VALUE)
#define WORD3_DOUT_DMA_MODE CC_GENMASK(3, DOUT_DMA_MODE)
#define WORD3_DOUT_LAST_IND CC_GENMASK(3, DOUT_LAST_IND)
@@ -176,6 +178,15 @@ enum cc_hw_crypto_key {
END_OF_KEYS = S32_MAX,
};
+#define CC_NUM_HW_KEY_SLOTS 4
+#define CC_FIRST_HW_KEY_SLOT 0
+#define CC_LAST_HW_KEY_SLOT (CC_FIRST_HW_KEY_SLOT + CC_NUM_HW_KEY_SLOTS - 1)
+
+#define CC_NUM_CPP_KEY_SLOTS 8
+#define CC_FIRST_CPP_KEY_SLOT 16
+#define CC_LAST_CPP_KEY_SLOT (CC_FIRST_CPP_KEY_SLOT + \
+ CC_NUM_CPP_KEY_SLOTS - 1)
+
enum cc_hw_aes_key_size {
AES_128_KEY = 0,
AES_192_KEY = 1,
@@ -189,6 +200,9 @@ enum cc_hash_cipher_pad {
HASH_CIPHER_DO_PADDING_RESERVE32 = S32_MAX,
};
+#define CC_CPP_DIN_ADDR 0xFF00FF00UL
+#define CC_CPP_DIN_SIZE 0xFF00FFUL
+
/*****************************/
/* Descriptor packing macros */
/*****************************/
@@ -249,6 +263,25 @@ static inline void set_din_no_dma(struct cc_hw_desc *pdesc, u32 addr, u32 size)
}
/*
+ * Setup the special CPP descriptor
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @alg: cipher used (AES / SM4)
+ * @mode: mode used (CTR or CBC)
+ * @slot: slot number
+ * @ksize: key size
+ */
+static inline void set_cpp_crypto_key(struct cc_hw_desc *pdesc, u8 slot)
+{
+ pdesc->word[0] |= CC_CPP_DIN_ADDR;
+
+ pdesc->word[1] |= FIELD_PREP(WORD1_DIN_SIZE, CC_CPP_DIN_SIZE);
+ pdesc->word[1] |= FIELD_PREP(WORD1_LOCK_QUEUE, 1);
+
+ pdesc->word[4] |= FIELD_PREP(WORD4_SETUP_OPERATION, slot);
+}
+
+/*
* Set the DIN field of a HW descriptors to SRAM mode.
* Note: No need to check SRAM alignment since host requests do not use SRAM and
* adaptor will enforce alignment check.
diff --git a/drivers/crypto/ccree/cc_ivgen.c b/drivers/crypto/ccree/cc_ivgen.c
index 769458323394..99dc69383e20 100644
--- a/drivers/crypto/ccree/cc_ivgen.c
+++ b/drivers/crypto/ccree/cc_ivgen.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <crypto/ctr.h>
#include "cc_driver.h"
@@ -154,9 +154,6 @@ void cc_ivgen_fini(struct cc_drvdata *drvdata)
}
ivgen_ctx->pool = NULL_SRAM_ADDR;
-
- /* release "this" context */
- kfree(ivgen_ctx);
}
/*!
@@ -174,10 +171,12 @@ int cc_ivgen_init(struct cc_drvdata *drvdata)
int rc;
/* Allocate "this" context */
- ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL);
+ ivgen_ctx = devm_kzalloc(device, sizeof(*ivgen_ctx), GFP_KERNEL);
if (!ivgen_ctx)
return -ENOMEM;
+ drvdata->ivgen_handle = ivgen_ctx;
+
/* Allocate pool's header for initial enc. key/IV */
ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE,
&ivgen_ctx->pool_meta_dma,
@@ -196,8 +195,6 @@ int cc_ivgen_init(struct cc_drvdata *drvdata)
goto out;
}
- drvdata->ivgen_handle = ivgen_ctx;
-
return cc_init_iv_sram(drvdata);
out:
diff --git a/drivers/crypto/ccree/cc_ivgen.h b/drivers/crypto/ccree/cc_ivgen.h
index b6ac16903dda..a9f5e8bba4f1 100644
--- a/drivers/crypto/ccree/cc_ivgen.h
+++ b/drivers/crypto/ccree/cc_ivgen.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_IVGEN_H__
#define __CC_IVGEN_H__
diff --git a/drivers/crypto/ccree/cc_kernel_regs.h b/drivers/crypto/ccree/cc_kernel_regs.h
index 8d7262a35156..582bae450596 100644
--- a/drivers/crypto/ccree/cc_kernel_regs.h
+++ b/drivers/crypto/ccree/cc_kernel_regs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_CRYS_KERNEL_H__
#define __CC_CRYS_KERNEL_H__
diff --git a/drivers/crypto/ccree/cc_lli_defs.h b/drivers/crypto/ccree/cc_lli_defs.h
index 64b15ac9f1d3..f891ab813f41 100644
--- a/drivers/crypto/ccree/cc_lli_defs.h
+++ b/drivers/crypto/ccree/cc_lli_defs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef _CC_LLI_DEFS_H_
#define _CC_LLI_DEFS_H_
@@ -14,7 +14,7 @@
#define CC_MAX_MLLI_ENTRY_SIZE 0xFFFF
#define LLI_MAX_NUM_OF_DATA_ENTRIES 128
-#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 4
+#define LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES 8
#define MLLI_TABLE_MIN_ALIGNMENT 4 /* 32 bit alignment */
#define MAX_NUM_OF_BUFFERS_IN_MLLI 4
#define MAX_NUM_OF_TOTAL_MLLI_ENTRIES \
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index 6ff7e75ad90e..2dad9c9543c6 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
#include <linux/interrupt.h>
@@ -11,6 +11,7 @@
#include "cc_ivgen.h"
#include "cc_hash.h"
#include "cc_pm.h"
+#include "cc_fips.h"
#define POWER_DOWN_ENABLE 0x01
#define POWER_DOWN_DISABLE 0x00
@@ -25,13 +26,13 @@ int cc_pm_suspend(struct device *dev)
int rc;
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
- cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
rc = cc_suspend_req_queue(drvdata);
if (rc) {
dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
return rc;
}
fini_cc_regs(drvdata);
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
cc_clk_off(drvdata);
return 0;
}
@@ -42,19 +43,21 @@ int cc_pm_resume(struct device *dev)
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
- cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
-
+ /* Enables the device source clk */
rc = cc_clk_on(drvdata);
if (rc) {
dev_err(dev, "failed getting clock back on. We're toast.\n");
return rc;
}
+ cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
rc = init_cc_regs(drvdata, false);
if (rc) {
dev_err(dev, "init_cc_regs (%x)\n", rc);
return rc;
}
+ /* check if tee fips error occurred during power down */
+ cc_tee_handle_fips_error(drvdata);
rc = cc_resume_req_queue(drvdata);
if (rc) {
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index 907a6db4d6c0..6190cdba5dad 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_pm.h
*/
diff --git a/drivers/crypto/ccree/cc_request_mgr.c b/drivers/crypto/ccree/cc_request_mgr.c
index 83a8aaae61c7..0bc6ccb0b899 100644
--- a/drivers/crypto/ccree/cc_request_mgr.c
+++ b/drivers/crypto/ccree/cc_request_mgr.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include <linux/kernel.h>
+#include <linux/nospec.h>
#include "cc_driver.h"
#include "cc_buffer_mgr.h"
#include "cc_request_mgr.h"
@@ -52,11 +53,38 @@ struct cc_bl_item {
bool notif;
};
+static const u32 cc_cpp_int_masks[CC_CPP_NUM_ALGS][CC_CPP_NUM_SLOTS] = {
+ { BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT) },
+ { BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT),
+ BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT) }
+};
+
static void comp_handler(unsigned long devarg);
#ifdef COMP_IN_WQ
static void comp_work_handler(struct work_struct *work);
#endif
+static inline u32 cc_cpp_int_mask(enum cc_cpp_alg alg, int slot)
+{
+ alg = array_index_nospec(alg, CC_CPP_NUM_ALGS);
+ slot = array_index_nospec(slot, CC_CPP_NUM_SLOTS);
+
+ return cc_cpp_int_masks[alg][slot];
+}
+
void cc_req_mgr_fini(struct cc_drvdata *drvdata)
{
struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
@@ -336,10 +364,12 @@ static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
struct cc_bl_item *bli)
{
struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
+ struct device *dev = drvdata_to_dev(drvdata);
spin_lock_bh(&mgr->bl_lock);
list_add_tail(&bli->list, &mgr->backlog);
++mgr->bl_len;
+ dev_dbg(dev, "+++bl len: %d\n", mgr->bl_len);
spin_unlock_bh(&mgr->bl_lock);
tasklet_schedule(&mgr->comptask);
}
@@ -349,7 +379,7 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
struct cc_bl_item *bli;
struct cc_crypto_req *creq;
- struct crypto_async_request *req;
+ void *req;
bool ivgen;
unsigned int total_len;
struct device *dev = drvdata_to_dev(drvdata);
@@ -359,17 +389,20 @@ static void cc_proc_backlog(struct cc_drvdata *drvdata)
while (mgr->bl_len) {
bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
+ dev_dbg(dev, "---bl len: %d\n", mgr->bl_len);
+
spin_unlock(&mgr->bl_lock);
+
creq = &bli->creq;
- req = (struct crypto_async_request *)creq->user_arg;
+ req = creq->user_arg;
/*
* Notify the request we're moving out of the backlog
* but only if we haven't done so already.
*/
if (!bli->notif) {
- req->complete(req, -EINPROGRESS);
+ creq->user_cb(dev, req, -EINPROGRESS);
bli->notif = true;
}
@@ -579,6 +612,8 @@ static void proc_completions(struct cc_drvdata *drvdata)
drvdata->request_mgr_handle;
unsigned int *tail = &request_mgr_handle->req_queue_tail;
unsigned int *head = &request_mgr_handle->req_queue_head;
+ int rc;
+ u32 mask;
while (request_mgr_handle->axi_completed) {
request_mgr_handle->axi_completed--;
@@ -596,8 +631,22 @@ static void proc_completions(struct cc_drvdata *drvdata)
cc_req = &request_mgr_handle->req_queue[*tail];
+ if (cc_req->cpp.is_cpp) {
+
+ dev_dbg(dev, "CPP request completion slot: %d alg:%d\n",
+ cc_req->cpp.slot, cc_req->cpp.alg);
+ mask = cc_cpp_int_mask(cc_req->cpp.alg,
+ cc_req->cpp.slot);
+ rc = (drvdata->irq & mask ? -EPERM : 0);
+ dev_dbg(dev, "Got mask: %x irq: %x rc: %d\n", mask,
+ drvdata->irq, rc);
+ } else {
+ dev_dbg(dev, "None CPP request completion\n");
+ rc = 0;
+ }
+
if (cc_req->user_cb)
- cc_req->user_cb(dev, cc_req->user_arg, 0);
+ cc_req->user_cb(dev, cc_req->user_arg, rc);
*tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
dev_dbg(dev, "Request completed. axi_completed=%d\n",
@@ -618,47 +667,50 @@ static void comp_handler(unsigned long devarg)
struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
struct cc_req_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
-
+ struct device *dev = drvdata_to_dev(drvdata);
u32 irq;
- irq = (drvdata->irq & CC_COMP_IRQ_MASK);
+ dev_dbg(dev, "Completion handler called!\n");
+ irq = (drvdata->irq & drvdata->comp_mask);
- if (irq & CC_COMP_IRQ_MASK) {
- /* To avoid the interrupt from firing as we unmask it,
- * we clear it now
- */
- cc_iowrite(drvdata, CC_REG(HOST_ICR), CC_COMP_IRQ_MASK);
+ /* To avoid the interrupt from firing as we unmask it,
+ * we clear it now
+ */
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
- /* Avoid race with above clear: Test completion counter
- * once more
- */
- request_mgr_handle->axi_completed +=
- cc_axi_comp_count(drvdata);
-
- while (request_mgr_handle->axi_completed) {
- do {
- proc_completions(drvdata);
- /* At this point (after proc_completions()),
- * request_mgr_handle->axi_completed is 0.
- */
- request_mgr_handle->axi_completed =
- cc_axi_comp_count(drvdata);
- } while (request_mgr_handle->axi_completed > 0);
+ /* Avoid race with above clear: Test completion counter once more */
- cc_iowrite(drvdata, CC_REG(HOST_ICR),
- CC_COMP_IRQ_MASK);
+ request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
+
+ dev_dbg(dev, "AXI completion after updated: %d\n",
+ request_mgr_handle->axi_completed);
+
+ while (request_mgr_handle->axi_completed) {
+ do {
+ drvdata->irq |= cc_ioread(drvdata, CC_REG(HOST_IRR));
+ irq = (drvdata->irq & drvdata->comp_mask);
+ proc_completions(drvdata);
+ /* At this point (after proc_completions()),
+ * request_mgr_handle->axi_completed is 0.
+ */
request_mgr_handle->axi_completed +=
- cc_axi_comp_count(drvdata);
- }
+ cc_axi_comp_count(drvdata);
+ } while (request_mgr_handle->axi_completed > 0);
+
+ cc_iowrite(drvdata, CC_REG(HOST_ICR), irq);
+
+ request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata);
}
+
/* after verifing that there is nothing to do,
* unmask AXI completion interrupt
*/
cc_iowrite(drvdata, CC_REG(HOST_IMR),
- cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq);
+ cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~drvdata->comp_mask);
cc_proc_backlog(drvdata);
+ dev_dbg(dev, "Comp. handler done.\n");
}
/*
diff --git a/drivers/crypto/ccree/cc_request_mgr.h b/drivers/crypto/ccree/cc_request_mgr.h
index 573cb97af085..f46cf766fe4d 100644
--- a/drivers/crypto/ccree/cc_request_mgr.h
+++ b/drivers/crypto/ccree/cc_request_mgr.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
/* \file cc_request_mgr.h
* Request Manager
diff --git a/drivers/crypto/ccree/cc_sram_mgr.c b/drivers/crypto/ccree/cc_sram_mgr.c
index c8c276f6dee9..62c885e6e791 100644
--- a/drivers/crypto/ccree/cc_sram_mgr.c
+++ b/drivers/crypto/ccree/cc_sram_mgr.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#include "cc_driver.h"
#include "cc_sram_mgr.h"
@@ -19,8 +19,7 @@ struct cc_sram_ctx {
*/
void cc_sram_mgr_fini(struct cc_drvdata *drvdata)
{
- /* Free "this" context */
- kfree(drvdata->sram_mgr_handle);
+ /* Nothing needed */
}
/**
@@ -48,7 +47,7 @@ int cc_sram_mgr_init(struct cc_drvdata *drvdata)
}
/* Allocate "this" context */
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
diff --git a/drivers/crypto/ccree/cc_sram_mgr.h b/drivers/crypto/ccree/cc_sram_mgr.h
index d48649fb3323..1d14de9ee8c3 100644
--- a/drivers/crypto/ccree/cc_sram_mgr.h
+++ b/drivers/crypto/ccree/cc_sram_mgr.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
#ifndef __CC_SRAM_MGR_H__
#define __CC_SRAM_MGR_H__
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 8d8cf80b9294..177f572b9589 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -200,17 +200,10 @@ void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
static int chcr_inc_wrcount(struct chcr_dev *dev)
{
- int err = 0;
-
- spin_lock_bh(&dev->lock_chcr_dev);
if (dev->state == CHCR_DETACH)
- err = 1;
- else
- atomic_inc(&dev->inflight);
-
- spin_unlock_bh(&dev->lock_chcr_dev);
-
- return err;
+ return 1;
+ atomic_inc(&dev->inflight);
+ return 0;
}
static inline void chcr_dec_wrcount(struct chcr_dev *dev)
@@ -1101,8 +1094,8 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,
int ret = 0;
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
- ctr_add_iv(iv, req->info, (reqctx->processed /
- AES_BLOCK_SIZE));
+ ctr_add_iv(iv, req->info, DIV_ROUND_UP(reqctx->processed,
+ AES_BLOCK_SIZE));
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
ret = chcr_update_tweak(req, iv, 1);
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
@@ -2130,7 +2123,6 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
* ipad in hmacctx->ipad and opad in hmacctx->opad location
*/
shash->tfm = hmacctx->base_hash;
- shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
if (keylen > bs) {
err = crypto_shash_digest(shash, key, keylen,
hmacctx->ipad);
@@ -3517,7 +3509,6 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
SHASH_DESC_ON_STACK(shash, base_hash);
shash->tfm = base_hash;
- shash->flags = crypto_shash_get_flags(base_hash);
bs = crypto_shash_blocksize(base_hash);
align = KEYCTX_ALIGN_PAD(max_authsize);
o_ptr = actx->h_iopad + param.result_size + align;
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 239b933d6df6..029a7354f541 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -243,15 +243,11 @@ static void chcr_detach_device(struct uld_ctx *u_ctx)
{
struct chcr_dev *dev = &u_ctx->dev;
- spin_lock_bh(&dev->lock_chcr_dev);
if (dev->state == CHCR_DETACH) {
- spin_unlock_bh(&dev->lock_chcr_dev);
pr_debug("Detached Event received for already detach device\n");
return;
}
dev->state = CHCR_DETACH;
- spin_unlock_bh(&dev->lock_chcr_dev);
-
if (atomic_read(&dev->inflight) != 0) {
schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
wait_for_completion(&dev->detach_comp);
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index 2f60049361ef..f429aae72542 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -575,7 +575,8 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
if (unlikely(credits < ETHTXQ_STOP_THRES)) {
netif_tx_stop_queue(q->txq);
q->q.stops++;
- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ if (!q->dbqt)
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
wr_mid |= FW_ULPTX_WR_DATA_F;
wr->wreq.flowid_len16 = htonl(wr_mid);
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index dad212cabe63..d656be0a142b 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -1976,6 +1976,29 @@ static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
+static int hifn_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int len)
+{
+ struct hifn_context *ctx = crypto_ablkcipher_ctx(cipher);
+ struct hifn_device *dev = ctx->dev;
+ u32 flags;
+ int err;
+
+ flags = crypto_ablkcipher_get_flags(cipher);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
+ }
+
+ dev->flags &= ~HIFN_FLAG_OLD_KEY;
+
+ memcpy(ctx->key, key, len);
+ ctx->keysize = len;
+
+ return 0;
+}
+
static int hifn_handle_req(struct ablkcipher_request *req)
{
struct hifn_context *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -2240,7 +2263,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
.ablkcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
- .setkey = hifn_setkey,
+ .setkey = hifn_des3_setkey,
.encrypt = hifn_encrypt_3des_cfb,
.decrypt = hifn_decrypt_3des_cfb,
},
@@ -2250,7 +2273,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
.ablkcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
- .setkey = hifn_setkey,
+ .setkey = hifn_des3_setkey,
.encrypt = hifn_encrypt_3des_ofb,
.decrypt = hifn_decrypt_3des_ofb,
},
@@ -2261,7 +2284,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
.ivsize = HIFN_IV_LENGTH,
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
- .setkey = hifn_setkey,
+ .setkey = hifn_des3_setkey,
.encrypt = hifn_encrypt_3des_cbc,
.decrypt = hifn_decrypt_3des_cbc,
},
@@ -2271,7 +2294,7 @@ static struct hifn_alg_template hifn_alg_templates[] = {
.ablkcipher = {
.min_keysize = HIFN_3DES_KEY_LENGTH,
.max_keysize = HIFN_3DES_KEY_LENGTH,
- .setkey = hifn_setkey,
+ .setkey = hifn_des3_setkey,
.encrypt = hifn_encrypt_3des_ecb,
.decrypt = hifn_decrypt_3des_ecb,
},
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index adc0cd8ae97b..02768af0dccd 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -365,20 +365,16 @@ static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm,
static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
- if (keylen != DES_KEY_SIZE * 3)
- return -EINVAL;
-
- return sec_alg_skcipher_setkey(tfm, key, keylen,
+ return unlikely(des3_verify_key(tfm, key)) ?:
+ sec_alg_skcipher_setkey(tfm, key, keylen,
SEC_C_3DES_ECB_192_3KEY);
}
static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
- if (keylen != DES3_EDE_KEY_SIZE)
- return -EINVAL;
-
- return sec_alg_skcipher_setkey(tfm, key, keylen,
+ return unlikely(des3_verify_key(tfm, key)) ?:
+ sec_alg_skcipher_setkey(tfm, key, keylen,
SEC_C_3DES_CBC_192_3KEY);
}
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 7ef30a98cb24..de4be10b172f 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -1039,13 +1039,12 @@ static int safexcel_cbc_des3_ede_decrypt(struct skcipher_request *req)
static int safexcel_des3_ede_setkey(struct crypto_skcipher *ctfm,
const u8 *key, unsigned int len)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
+ int err;
- if (len != DES3_EDE_KEY_SIZE) {
- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
+ err = des3_verify_key(ctfm, key);
+ if (unlikely(err))
+ return err;
/* if context exits and key changed, need to invalidate it */
if (ctx->base.ctxr_dma) {
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 5c4659b04d70..f5414b6dfb55 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -30,8 +30,8 @@
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
-#include <mach/npe.h>
-#include <mach/qmgr.h>
+#include <linux/soc/ixp4xx/npe.h>
+#include <linux/soc/ixp4xx/qmgr.h>
#define MAX_KEYLEN 32
@@ -758,14 +758,6 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
return -EINVAL;
}
cipher_cfg |= keylen_cfg;
- } else if (cipher_cfg & MOD_3DES) {
- const u32 *K = (const u32 *)key;
- if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
- !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
- {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
- return -EINVAL;
- }
} else {
u32 tmp[DES_EXPKEY_WORDS];
if (des_ekey(tmp, key) == 0) {
@@ -859,6 +851,19 @@ out:
return ret;
}
+static int ablk_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int key_len)
+{
+ u32 flags = crypto_ablkcipher_get_flags(tfm);
+ int err;
+
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err))
+ crypto_ablkcipher_set_flags(tfm, flags);
+
+ return ablk_setkey(tfm, key, key_len);
+}
+
static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
@@ -1175,6 +1180,43 @@ badkey:
return -EINVAL;
}
+static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
+ u32 flags = CRYPTO_TFM_RES_BAD_KEY_LEN;
+ struct crypto_authenc_keys keys;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ goto badkey;
+
+ err = -EINVAL;
+ if (keys.authkeylen > sizeof(ctx->authkey))
+ goto badkey;
+
+ if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+ goto badkey;
+
+ flags = crypto_aead_get_flags(tfm);
+ err = __des3_verify_key(&flags, keys.enckey);
+ if (unlikely(err))
+ goto badkey;
+
+ memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
+ memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
+ ctx->authkey_len = keys.authkeylen;
+ ctx->enckey_len = keys.enckeylen;
+
+ memzero_explicit(&keys, sizeof(keys));
+ return aead_setup(tfm, crypto_aead_authsize(tfm));
+badkey:
+ crypto_aead_set_flags(tfm, flags);
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+}
+
static int aead_encrypt(struct aead_request *req)
{
return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
@@ -1220,6 +1262,7 @@ static struct ixp_alg ixp4xx_algos[] = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = ablk_des3_setkey,
}
}
},
@@ -1232,6 +1275,7 @@ static struct ixp_alg ixp4xx_algos[] = {
.cra_u = { .ablkcipher = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = ablk_des3_setkey,
}
}
},
@@ -1313,6 +1357,7 @@ static struct ixp_aead_alg ixp4xx_aeads[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
+ .setkey = des3_aead_setkey,
},
.hash = &hash_alg_md5,
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
@@ -1337,6 +1382,7 @@ static struct ixp_aead_alg ixp4xx_aeads[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = des3_aead_setkey,
},
.hash = &hash_alg_sha1,
.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
@@ -1443,7 +1489,7 @@ static int __init ixp_module_init(void)
/* authenc */
cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_ASYNC;
- cra->setkey = aead_setkey;
+ cra->setkey = cra->setkey ?: aead_setkey;
cra->setauthsize = aead_setauthsize;
cra->encrypt = aead_encrypt;
cra->decrypt = aead_decrypt;
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index fb279b3a1ca1..2fd936b19c6d 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -299,13 +299,12 @@ static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int len)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
- struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
+ int err;
- if (len != DES3_EDE_KEY_SIZE) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
+ err = des3_verify_key(cipher, key);
+ if (unlikely(err))
+ return err;
memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 99ff54cc8a15..fd456dd703bf 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -135,11 +135,10 @@ static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
{
- unsigned int index, padlen;
+ unsigned int padlen;
buf[0] = 0x80;
/* Pad out to 56 mod 64 */
- index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
padlen = mv_cesa_ahash_pad_len(creq);
memset(buf + 1, 0, padlen - 1);
diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c
index 5f4f845adbb8..a0806ba40c68 100644
--- a/drivers/crypto/mediatek/mtk-sha.c
+++ b/drivers/crypto/mediatek/mtk-sha.c
@@ -365,7 +365,6 @@ static int mtk_sha_finish_hmac(struct ahash_request *req)
SHASH_DESC_ON_STACK(shash, bctx->shash);
shash->tfm = bctx->shash;
- shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
return crypto_shash_init(shash) ?:
crypto_shash_update(shash, bctx->opad, ctx->bs) ?:
@@ -810,8 +809,6 @@ static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
SHASH_DESC_ON_STACK(shash, bctx->shash);
shash->tfm = bctx->shash;
- shash->flags = crypto_shash_get_flags(bctx->shash) &
- CRYPTO_TFM_REQ_MAY_SLEEP;
if (keylen > bs) {
err = crypto_shash_digest(shash, key, keylen, bctx->ipad);
diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c
deleted file mode 100644
index 519086730791..000000000000
--- a/drivers/crypto/mxc-scc.c
+++ /dev/null
@@ -1,767 +0,0 @@
-/*
- * Copyright (C) 2016 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
- *
- * The driver is based on information gathered from
- * drivers/mxc/security/mxc_scc.c which can be found in
- * the Freescale linux-2.6-imx.git in the imx_2.6.35_maintain branch.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#include <linux/clk.h>
-#include <linux/crypto.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-
-#include <crypto/algapi.h>
-#include <crypto/des.h>
-
-/* Secure Memory (SCM) registers */
-#define SCC_SCM_RED_START 0x0000
-#define SCC_SCM_BLACK_START 0x0004
-#define SCC_SCM_LENGTH 0x0008
-#define SCC_SCM_CTRL 0x000C
-#define SCC_SCM_STATUS 0x0010
-#define SCC_SCM_ERROR_STATUS 0x0014
-#define SCC_SCM_INTR_CTRL 0x0018
-#define SCC_SCM_CFG 0x001C
-#define SCC_SCM_INIT_VECTOR_0 0x0020
-#define SCC_SCM_INIT_VECTOR_1 0x0024
-#define SCC_SCM_RED_MEMORY 0x0400
-#define SCC_SCM_BLACK_MEMORY 0x0800
-
-/* Security Monitor (SMN) Registers */
-#define SCC_SMN_STATUS 0x1000
-#define SCC_SMN_COMMAND 0x1004
-#define SCC_SMN_SEQ_START 0x1008
-#define SCC_SMN_SEQ_END 0x100C
-#define SCC_SMN_SEQ_CHECK 0x1010
-#define SCC_SMN_BIT_COUNT 0x1014
-#define SCC_SMN_BITBANK_INC_SIZE 0x1018
-#define SCC_SMN_BITBANK_DECREMENT 0x101C
-#define SCC_SMN_COMPARE_SIZE 0x1020
-#define SCC_SMN_PLAINTEXT_CHECK 0x1024
-#define SCC_SMN_CIPHERTEXT_CHECK 0x1028
-#define SCC_SMN_TIMER_IV 0x102C
-#define SCC_SMN_TIMER_CONTROL 0x1030
-#define SCC_SMN_DEBUG_DETECT_STAT 0x1034
-#define SCC_SMN_TIMER 0x1038
-
-#define SCC_SCM_CTRL_START_CIPHER BIT(2)
-#define SCC_SCM_CTRL_CBC_MODE BIT(1)
-#define SCC_SCM_CTRL_DECRYPT_MODE BIT(0)
-
-#define SCC_SCM_STATUS_LEN_ERR BIT(12)
-#define SCC_SCM_STATUS_SMN_UNBLOCKED BIT(11)
-#define SCC_SCM_STATUS_CIPHERING_DONE BIT(10)
-#define SCC_SCM_STATUS_ZEROIZING_DONE BIT(9)
-#define SCC_SCM_STATUS_INTR_STATUS BIT(8)
-#define SCC_SCM_STATUS_SEC_KEY BIT(7)
-#define SCC_SCM_STATUS_INTERNAL_ERR BIT(6)
-#define SCC_SCM_STATUS_BAD_SEC_KEY BIT(5)
-#define SCC_SCM_STATUS_ZEROIZE_FAIL BIT(4)
-#define SCC_SCM_STATUS_SMN_BLOCKED BIT(3)
-#define SCC_SCM_STATUS_CIPHERING BIT(2)
-#define SCC_SCM_STATUS_ZEROIZING BIT(1)
-#define SCC_SCM_STATUS_BUSY BIT(0)
-
-#define SCC_SMN_STATUS_STATE_MASK 0x0000001F
-#define SCC_SMN_STATE_START 0x0
-/* The SMN is zeroizing its RAM during reset */
-#define SCC_SMN_STATE_ZEROIZE_RAM 0x5
-/* SMN has passed internal checks */
-#define SCC_SMN_STATE_HEALTH_CHECK 0x6
-/* Fatal Security Violation. SMN is locked, SCM is inoperative. */
-#define SCC_SMN_STATE_FAIL 0x9
-/* SCC is in secure state. SCM is using secret key. */
-#define SCC_SMN_STATE_SECURE 0xA
-/* SCC is not secure. SCM is using default key. */
-#define SCC_SMN_STATE_NON_SECURE 0xC
-
-#define SCC_SCM_INTR_CTRL_ZEROIZE_MEM BIT(2)
-#define SCC_SCM_INTR_CTRL_CLR_INTR BIT(1)
-#define SCC_SCM_INTR_CTRL_MASK_INTR BIT(0)
-
-/* Size, in blocks, of Red memory. */
-#define SCC_SCM_CFG_BLACK_SIZE_MASK 0x07fe0000
-#define SCC_SCM_CFG_BLACK_SIZE_SHIFT 17
-/* Size, in blocks, of Black memory. */
-#define SCC_SCM_CFG_RED_SIZE_MASK 0x0001ff80
-#define SCC_SCM_CFG_RED_SIZE_SHIFT 7
-/* Number of bytes per block. */
-#define SCC_SCM_CFG_BLOCK_SIZE_MASK 0x0000007f
-
-#define SCC_SMN_COMMAND_TAMPER_LOCK BIT(4)
-#define SCC_SMN_COMMAND_CLR_INTR BIT(3)
-#define SCC_SMN_COMMAND_CLR_BIT_BANK BIT(2)
-#define SCC_SMN_COMMAND_EN_INTR BIT(1)
-#define SCC_SMN_COMMAND_SET_SOFTWARE_ALARM BIT(0)
-
-#define SCC_KEY_SLOTS 20
-#define SCC_MAX_KEY_SIZE 32
-#define SCC_KEY_SLOT_SIZE 32
-
-#define SCC_CRC_CCITT_START 0xFFFF
-
-/*
- * Offset into each RAM of the base of the area which is not
- * used for Stored Keys.
- */
-#define SCC_NON_RESERVED_OFFSET (SCC_KEY_SLOTS * SCC_KEY_SLOT_SIZE)
-
-/* Fixed padding for appending to plaintext to fill out a block */
-static char scc_block_padding[8] = { 0x80, 0, 0, 0, 0, 0, 0, 0 };
-
-enum mxc_scc_state {
- SCC_STATE_OK,
- SCC_STATE_UNIMPLEMENTED,
- SCC_STATE_FAILED
-};
-
-struct mxc_scc {
- struct device *dev;
- void __iomem *base;
- struct clk *clk;
- bool hw_busy;
- spinlock_t lock;
- struct crypto_queue queue;
- struct crypto_async_request *req;
- int block_size_bytes;
- int black_ram_size_blocks;
- int memory_size_bytes;
- int bytes_remaining;
-
- void __iomem *red_memory;
- void __iomem *black_memory;
-};
-
-struct mxc_scc_ctx {
- struct mxc_scc *scc;
- struct scatterlist *sg_src;
- size_t src_nents;
- struct scatterlist *sg_dst;
- size_t dst_nents;
- unsigned int offset;
- unsigned int size;
- unsigned int ctrl;
-};
-
-struct mxc_scc_crypto_tmpl {
- struct mxc_scc *scc;
- struct crypto_alg alg;
-};
-
-static int mxc_scc_get_data(struct mxc_scc_ctx *ctx,
- struct crypto_async_request *req)
-{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
- struct mxc_scc *scc = ctx->scc;
- size_t len;
- void __iomem *from;
-
- if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
- from = scc->red_memory;
- else
- from = scc->black_memory;
-
- dev_dbg(scc->dev, "pcopy: from 0x%p %zu bytes\n", from,
- ctx->dst_nents * 8);
- len = sg_pcopy_from_buffer(ablkreq->dst, ctx->dst_nents,
- from, ctx->size, ctx->offset);
- if (!len) {
- dev_err(scc->dev, "pcopy err from 0x%p (len=%zu)\n", from, len);
- return -EINVAL;
- }
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "red memory@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4,
- scc->red_memory, ctx->size, 1);
- print_hex_dump(KERN_ERR,
- "black memory@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4,
- scc->black_memory, ctx->size, 1);
-#endif
-
- ctx->offset += len;
-
- if (ctx->offset < ablkreq->nbytes)
- return -EINPROGRESS;
-
- return 0;
-}
-
-static int mxc_scc_ablkcipher_req_init(struct ablkcipher_request *req,
- struct mxc_scc_ctx *ctx)
-{
- struct mxc_scc *scc = ctx->scc;
- int nents;
-
- nents = sg_nents_for_len(req->src, req->nbytes);
- if (nents < 0) {
- dev_err(scc->dev, "Invalid number of src SC");
- return nents;
- }
- ctx->src_nents = nents;
-
- nents = sg_nents_for_len(req->dst, req->nbytes);
- if (nents < 0) {
- dev_err(scc->dev, "Invalid number of dst SC");
- return nents;
- }
- ctx->dst_nents = nents;
-
- ctx->size = 0;
- ctx->offset = 0;
-
- return 0;
-}
-
-static int mxc_scc_ablkcipher_req_complete(struct crypto_async_request *req,
- struct mxc_scc_ctx *ctx,
- int result)
-{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
- struct mxc_scc *scc = ctx->scc;
-
- scc->req = NULL;
- scc->bytes_remaining = scc->memory_size_bytes;
-
- if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE)
- memcpy(ablkreq->info, scc->base + SCC_SCM_INIT_VECTOR_0,
- scc->block_size_bytes);
-
- req->complete(req, result);
- scc->hw_busy = false;
-
- return 0;
-}
-
-static int mxc_scc_put_data(struct mxc_scc_ctx *ctx,
- struct ablkcipher_request *req)
-{
- u8 padding_buffer[sizeof(u16) + sizeof(scc_block_padding)];
- size_t len = min_t(size_t, req->nbytes - ctx->offset,
- ctx->scc->bytes_remaining);
- unsigned int padding_byte_count = 0;
- struct mxc_scc *scc = ctx->scc;
- void __iomem *to;
-
- if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
- to = scc->black_memory;
- else
- to = scc->red_memory;
-
- if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE && req->info)
- memcpy(scc->base + SCC_SCM_INIT_VECTOR_0, req->info,
- scc->block_size_bytes);
-
- len = sg_pcopy_to_buffer(req->src, ctx->src_nents,
- to, len, ctx->offset);
- if (!len) {
- dev_err(scc->dev, "pcopy err to 0x%p (len=%zu)\n", to, len);
- return -EINVAL;
- }
-
- ctx->size = len;
-
-#ifdef DEBUG
- dev_dbg(scc->dev, "copied %d bytes to 0x%p\n", len, to);
- print_hex_dump(KERN_ERR,
- "init vector0@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4,
- scc->base + SCC_SCM_INIT_VECTOR_0, scc->block_size_bytes,
- 1);
- print_hex_dump(KERN_ERR,
- "red memory@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4,
- scc->red_memory, ctx->size, 1);
- print_hex_dump(KERN_ERR,
- "black memory@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4,
- scc->black_memory, ctx->size, 1);
-#endif
-
- scc->bytes_remaining -= len;
-
- padding_byte_count = len % scc->block_size_bytes;
-
- if (padding_byte_count) {
- memcpy(padding_buffer, scc_block_padding, padding_byte_count);
- memcpy(to + len, padding_buffer, padding_byte_count);
- ctx->size += padding_byte_count;
- }
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "data to encrypt@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4,
- to, ctx->size, 1);
-#endif
-
- return 0;
-}
-
-static void mxc_scc_ablkcipher_next(struct mxc_scc_ctx *ctx,
- struct crypto_async_request *req)
-{
- struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
- struct mxc_scc *scc = ctx->scc;
- int err;
-
- dev_dbg(scc->dev, "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
- ablkreq->nbytes, ablkreq->src, ablkreq->dst);
-
- writel(0, scc->base + SCC_SCM_ERROR_STATUS);
-
- err = mxc_scc_put_data(ctx, ablkreq);
- if (err) {
- mxc_scc_ablkcipher_req_complete(req, ctx, err);
- return;
- }
-
- dev_dbg(scc->dev, "Start encryption (0x%x/0x%x)\n",
- readl(scc->base + SCC_SCM_RED_START),
- readl(scc->base + SCC_SCM_BLACK_START));
-
- /* clear interrupt control registers */
- writel(SCC_SCM_INTR_CTRL_CLR_INTR,
- scc->base + SCC_SCM_INTR_CTRL);
-
- writel((ctx->size / ctx->scc->block_size_bytes) - 1,
- scc->base + SCC_SCM_LENGTH);
-
- dev_dbg(scc->dev, "Process %d block(s) in 0x%p\n",
- ctx->size / ctx->scc->block_size_bytes,
- (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE) ? scc->black_memory :
- scc->red_memory);
-
- writel(ctx->ctrl, scc->base + SCC_SCM_CTRL);
-}
-
-static irqreturn_t mxc_scc_int(int irq, void *priv)
-{
- struct crypto_async_request *req;
- struct mxc_scc_ctx *ctx;
- struct mxc_scc *scc = priv;
- int status;
- int ret;
-
- status = readl(scc->base + SCC_SCM_STATUS);
-
- /* clear interrupt control registers */
- writel(SCC_SCM_INTR_CTRL_CLR_INTR, scc->base + SCC_SCM_INTR_CTRL);
-
- if (status & SCC_SCM_STATUS_BUSY)
- return IRQ_NONE;
-
- req = scc->req;
- if (req) {
- ctx = crypto_tfm_ctx(req->tfm);
- ret = mxc_scc_get_data(ctx, req);
- if (ret != -EINPROGRESS)
- mxc_scc_ablkcipher_req_complete(req, ctx, ret);
- else
- mxc_scc_ablkcipher_next(ctx, req);
- }
-
- return IRQ_HANDLED;
-}
-
-static int mxc_scc_cra_init(struct crypto_tfm *tfm)
-{
- struct mxc_scc_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_alg *alg = tfm->__crt_alg;
- struct mxc_scc_crypto_tmpl *algt;
-
- algt = container_of(alg, struct mxc_scc_crypto_tmpl, alg);
-
- ctx->scc = algt->scc;
- return 0;
-}
-
-static void mxc_scc_dequeue_req_unlocked(struct mxc_scc_ctx *ctx)
-{
- struct crypto_async_request *req, *backlog;
-
- if (ctx->scc->hw_busy)
- return;
-
- spin_lock_bh(&ctx->scc->lock);
- backlog = crypto_get_backlog(&ctx->scc->queue);
- req = crypto_dequeue_request(&ctx->scc->queue);
- ctx->scc->req = req;
- ctx->scc->hw_busy = true;
- spin_unlock_bh(&ctx->scc->lock);
-
- if (!req)
- return;
-
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
-
- mxc_scc_ablkcipher_next(ctx, req);
-}
-
-static int mxc_scc_queue_req(struct mxc_scc_ctx *ctx,
- struct crypto_async_request *req)
-{
- int ret;
-
- spin_lock_bh(&ctx->scc->lock);
- ret = crypto_enqueue_request(&ctx->scc->queue, req);
- spin_unlock_bh(&ctx->scc->lock);
-
- if (ret != -EINPROGRESS)
- return ret;
-
- mxc_scc_dequeue_req_unlocked(ctx);
-
- return -EINPROGRESS;
-}
-
-static int mxc_scc_des3_op(struct mxc_scc_ctx *ctx,
- struct ablkcipher_request *req)
-{
- int err;
-
- err = mxc_scc_ablkcipher_req_init(req, ctx);
- if (err)
- return err;
-
- return mxc_scc_queue_req(ctx, &req->base);
-}
-
-static int mxc_scc_ecb_des_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
- struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
-
- ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
-
- return mxc_scc_des3_op(ctx, req);
-}
-
-static int mxc_scc_ecb_des_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
- struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
-
- ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
- ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
-
- return mxc_scc_des3_op(ctx, req);
-}
-
-static int mxc_scc_cbc_des_encrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
- struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
-
- ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
- ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
-
- return mxc_scc_des3_op(ctx, req);
-}
-
-static int mxc_scc_cbc_des_decrypt(struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
- struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
-
- ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
- ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
- ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
-
- return mxc_scc_des3_op(ctx, req);
-}
-
-static void mxc_scc_hw_init(struct mxc_scc *scc)
-{
- int offset;
-
- offset = SCC_NON_RESERVED_OFFSET / scc->block_size_bytes;
-
- /* Fill the RED_START register */
- writel(offset, scc->base + SCC_SCM_RED_START);
-
- /* Fill the BLACK_START register */
- writel(offset, scc->base + SCC_SCM_BLACK_START);
-
- scc->red_memory = scc->base + SCC_SCM_RED_MEMORY +
- SCC_NON_RESERVED_OFFSET;
-
- scc->black_memory = scc->base + SCC_SCM_BLACK_MEMORY +
- SCC_NON_RESERVED_OFFSET;
-
- scc->bytes_remaining = scc->memory_size_bytes;
-}
-
-static int mxc_scc_get_config(struct mxc_scc *scc)
-{
- int config;
-
- config = readl(scc->base + SCC_SCM_CFG);
-
- scc->block_size_bytes = config & SCC_SCM_CFG_BLOCK_SIZE_MASK;
-
- scc->black_ram_size_blocks = config & SCC_SCM_CFG_BLACK_SIZE_MASK;
-
- scc->memory_size_bytes = (scc->block_size_bytes *
- scc->black_ram_size_blocks) -
- SCC_NON_RESERVED_OFFSET;
-
- return 0;
-}
-
-static enum mxc_scc_state mxc_scc_get_state(struct mxc_scc *scc)
-{
- enum mxc_scc_state state;
- int status;
-
- status = readl(scc->base + SCC_SMN_STATUS) &
- SCC_SMN_STATUS_STATE_MASK;
-
- /* If in Health Check, try to bringup to secure state */
- if (status & SCC_SMN_STATE_HEALTH_CHECK) {
- /*
- * Write a simple algorithm to the Algorithm Sequence
- * Checker (ASC)
- */
- writel(0xaaaa, scc->base + SCC_SMN_SEQ_START);
- writel(0x5555, scc->base + SCC_SMN_SEQ_END);
- writel(0x5555, scc->base + SCC_SMN_SEQ_CHECK);
-
- status = readl(scc->base + SCC_SMN_STATUS) &
- SCC_SMN_STATUS_STATE_MASK;
- }
-
- switch (status) {
- case SCC_SMN_STATE_NON_SECURE:
- case SCC_SMN_STATE_SECURE:
- state = SCC_STATE_OK;
- break;
- case SCC_SMN_STATE_FAIL:
- state = SCC_STATE_FAILED;
- break;
- default:
- state = SCC_STATE_UNIMPLEMENTED;
- break;
- }
-
- return state;
-}
-
-static struct mxc_scc_crypto_tmpl scc_ecb_des = {
- .alg = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ecb-des3-scc",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mxc_scc_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mxc_scc_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .encrypt = mxc_scc_ecb_des_encrypt,
- .decrypt = mxc_scc_ecb_des_decrypt,
- }
- }
-};
-
-static struct mxc_scc_crypto_tmpl scc_cbc_des = {
- .alg = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "cbc-des3-scc",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct mxc_scc_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_init = mxc_scc_cra_init,
- .cra_u.ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .encrypt = mxc_scc_cbc_des_encrypt,
- .decrypt = mxc_scc_cbc_des_decrypt,
- }
- }
-};
-
-static struct mxc_scc_crypto_tmpl *scc_crypto_algs[] = {
- &scc_ecb_des,
- &scc_cbc_des,
-};
-
-static int mxc_scc_crypto_register(struct mxc_scc *scc)
-{
- int i;
- int err = 0;
-
- for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++) {
- scc_crypto_algs[i]->scc = scc;
- err = crypto_register_alg(&scc_crypto_algs[i]->alg);
- if (err)
- goto err_out;
- }
-
- return 0;
-
-err_out:
- while (--i >= 0)
- crypto_unregister_alg(&scc_crypto_algs[i]->alg);
-
- return err;
-}
-
-static void mxc_scc_crypto_unregister(void)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++)
- crypto_unregister_alg(&scc_crypto_algs[i]->alg);
-}
-
-static int mxc_scc_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct resource *res;
- struct mxc_scc *scc;
- enum mxc_scc_state state;
- int irq;
- int ret;
- int i;
-
- scc = devm_kzalloc(dev, sizeof(*scc), GFP_KERNEL);
- if (!scc)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- scc->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(scc->base))
- return PTR_ERR(scc->base);
-
- scc->clk = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(scc->clk)) {
- dev_err(dev, "Could not get ipg clock\n");
- return PTR_ERR(scc->clk);
- }
-
- ret = clk_prepare_enable(scc->clk);
- if (ret)
- return ret;
-
- /* clear error status register */
- writel(0x0, scc->base + SCC_SCM_ERROR_STATUS);
-
- /* clear interrupt control registers */
- writel(SCC_SCM_INTR_CTRL_CLR_INTR |
- SCC_SCM_INTR_CTRL_MASK_INTR,
- scc->base + SCC_SCM_INTR_CTRL);
-
- writel(SCC_SMN_COMMAND_CLR_INTR |
- SCC_SMN_COMMAND_EN_INTR,
- scc->base + SCC_SMN_COMMAND);
-
- scc->dev = dev;
- platform_set_drvdata(pdev, scc);
-
- ret = mxc_scc_get_config(scc);
- if (ret)
- goto err_out;
-
- state = mxc_scc_get_state(scc);
-
- if (state != SCC_STATE_OK) {
- dev_err(dev, "SCC in unusable state %d\n", state);
- ret = -EINVAL;
- goto err_out;
- }
-
- mxc_scc_hw_init(scc);
-
- spin_lock_init(&scc->lock);
- /* FIXME: calculate queue from RAM slots */
- crypto_init_queue(&scc->queue, 50);
-
- for (i = 0; i < 2; i++) {
- irq = platform_get_irq(pdev, i);
- if (irq < 0) {
- dev_err(dev, "failed to get irq resource: %d\n", irq);
- ret = irq;
- goto err_out;
- }
-
- ret = devm_request_threaded_irq(dev, irq, NULL, mxc_scc_int,
- IRQF_ONESHOT, dev_name(dev), scc);
- if (ret)
- goto err_out;
- }
-
- ret = mxc_scc_crypto_register(scc);
- if (ret) {
- dev_err(dev, "could not register algorithms");
- goto err_out;
- }
-
- dev_info(dev, "registered successfully.\n");
-
- return 0;
-
-err_out:
- clk_disable_unprepare(scc->clk);
-
- return ret;
-}
-
-static int mxc_scc_remove(struct platform_device *pdev)
-{
- struct mxc_scc *scc = platform_get_drvdata(pdev);
-
- mxc_scc_crypto_unregister();
-
- clk_disable_unprepare(scc->clk);
-
- return 0;
-}
-
-static const struct of_device_id mxc_scc_dt_ids[] = {
- { .compatible = "fsl,imx25-scc", .data = NULL, },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, mxc_scc_dt_ids);
-
-static struct platform_driver mxc_scc_driver = {
- .probe = mxc_scc_probe,
- .remove = mxc_scc_remove,
- .driver = {
- .name = "mxc-scc",
- .of_match_table = mxc_scc_dt_ids,
- },
-};
-
-module_platform_driver(mxc_scc_driver);
-MODULE_AUTHOR("Steffen Trumtrar <kernel@pengutronix.de>");
-MODULE_DESCRIPTION("Freescale i.MX25 SCC Crypto driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index a2105cf33abb..b4429891e368 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -471,7 +471,7 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
wake_up_process(sdcp->thread[actx->chan]);
- return -EINPROGRESS;
+ return ret;
}
static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
@@ -700,11 +700,7 @@ static int dcp_chan_thread_sha(void *data)
struct crypto_async_request *backlog;
struct crypto_async_request *arq;
-
- struct dcp_sha_req_ctx *rctx;
-
- struct ahash_request *req;
- int ret, fini;
+ int ret;
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -725,11 +721,7 @@ static int dcp_chan_thread_sha(void *data)
backlog->complete(backlog, -EINPROGRESS);
if (arq) {
- req = ahash_request_cast(arq);
- rctx = ahash_request_ctx(req);
-
ret = dcp_sha_req_to_buf(arq);
- fini = rctx->fini;
arq->complete(arq, ret);
}
}
@@ -797,7 +789,7 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
wake_up_process(sdcp->thread[actx->chan]);
mutex_unlock(&actx->mutex);
- return -EINPROGRESS;
+ return ret;
}
static int dcp_sha_update(struct ahash_request *req)
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 9450c41211b2..0d5d3d8eb680 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -469,8 +469,6 @@ static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
return err;
shash->tfm = child_shash;
- shash->flags = crypto_ahash_get_flags(tfm) &
- CRYPTO_TFM_REQ_MAY_SLEEP;
bs = crypto_shash_blocksize(child_shash);
ds = crypto_shash_digestsize(child_shash);
@@ -788,13 +786,18 @@ static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
+ u32 flags;
+ int err;
+
+ flags = crypto_ablkcipher_get_flags(cipher);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
+ }
ctx->enc_type = n2alg->enc_type;
- if (keylen != (3 * DES_KEY_SIZE)) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
ctx->key_len = keylen;
memcpy(ctx->key.des3, key, keylen);
return 0;
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
index 66869976cfa2..57932848361b 100644
--- a/drivers/crypto/nx/nx-842-pseries.c
+++ b/drivers/crypto/nx/nx-842-pseries.c
@@ -296,7 +296,7 @@ static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
struct nx842_workmem *workmem;
struct nx842_scatterlist slin, slout;
struct nx_csbcpb *csbcpb;
- int ret = 0, max_sync_size;
+ int ret = 0;
unsigned long inbuf, outbuf;
struct vio_pfo_op op = {
.done = NULL,
@@ -319,7 +319,6 @@ static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
rcu_read_unlock();
return -ENODEV;
}
- max_sync_size = local_devdata->max_sync_size;
dev = local_devdata->dev;
/* Init scatterlist */
@@ -427,7 +426,7 @@ static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
struct nx842_workmem *workmem;
struct nx842_scatterlist slin, slout;
struct nx_csbcpb *csbcpb;
- int ret = 0, max_sync_size;
+ int ret = 0;
unsigned long inbuf, outbuf;
struct vio_pfo_op op = {
.done = NULL,
@@ -451,7 +450,6 @@ static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
rcu_read_unlock();
return -ENODEV;
}
- max_sync_size = local_devdata->max_sync_size;
dev = local_devdata->dev;
workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN);
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index d94e25df503b..f06565df2a12 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -353,7 +353,7 @@ static int decompress(struct nx842_crypto_ctx *ctx,
unsigned int adj_slen = slen;
u8 *src = p->in, *dst = p->out;
u16 padding = be16_to_cpu(g->padding);
- int ret, spadding = 0, dpadding = 0;
+ int ret, spadding = 0;
ktime_t timeout;
if (!slen || !required_len)
@@ -413,7 +413,6 @@ usesw:
spadding = 0;
dst = p->out;
dlen = p->oremain;
- dpadding = 0;
if (dlen < required_len) { /* have ignore bytes */
dst = ctx->dbounce;
dlen = BOUNCE_BUFFER_SIZE;
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index ad3358e74f5c..8f5820b78a83 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -105,8 +105,7 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
atomic_inc(&(nx_ctx->stats->aes_ops));
@@ -134,8 +133,7 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
atomic_inc(&(nx_ctx->stats->aes_ops));
@@ -279,8 +277,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
goto out;
}
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
@@ -361,8 +358,7 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
goto out;
}
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index a6764af83c6d..e06f0431dee5 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -162,8 +162,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
goto out;
}
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
@@ -243,8 +242,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
goto out;
}
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index 92956bc6e45e..0293b17903d0 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -166,8 +166,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
goto out;
}
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
@@ -249,8 +248,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
goto out;
}
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
if (rc)
goto out;
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 1ba2633e90d6..3d82d18ff810 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -656,9 +656,6 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher);
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- if (keylen != DES_KEY_SIZE && keylen != (3*DES_KEY_SIZE))
- return -EINVAL;
-
pr_debug("enter, keylen: %d\n", keylen);
/* Do we need to test against weak key? */
@@ -678,6 +675,28 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
return 0;
}
+static int omap_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 flags;
+ int err;
+
+ pr_debug("enter, keylen: %d\n", keylen);
+
+ flags = crypto_ablkcipher_get_flags(cipher);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
+ }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+}
+
static int omap_des_ecb_encrypt(struct ablkcipher_request *req)
{
return omap_des_crypt(req, FLAGS_ENCRYPT);
@@ -788,7 +807,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
.cra_u.ablkcipher = {
.min_keysize = 3*DES_KEY_SIZE,
.max_keysize = 3*DES_KEY_SIZE,
- .setkey = omap_des_setkey,
+ .setkey = omap_des3_setkey,
.encrypt = omap_des_ecb_encrypt,
.decrypt = omap_des_ecb_decrypt,
}
@@ -811,7 +830,7 @@ static struct crypto_alg algs_ecb_cbc[] = {
.min_keysize = 3*DES_KEY_SIZE,
.max_keysize = 3*DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
- .setkey = omap_des_setkey,
+ .setkey = omap_des3_setkey,
.encrypt = omap_des_cbc_encrypt,
.decrypt = omap_des_cbc_decrypt,
}
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 0641185bd82f..51b20abac464 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1055,7 +1055,6 @@ static int omap_sham_finish_hmac(struct ahash_request *req)
SHASH_DESC_ON_STACK(shash, bctx->shash);
shash->tfm = bctx->shash;
- shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
return crypto_shash_init(shash) ?:
crypto_shash_update(shash, bctx->opad, bs) ?:
@@ -1226,7 +1225,6 @@ static int omap_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
SHASH_DESC_ON_STACK(shash, tfm);
shash->tfm = tfm;
- shash->flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_digest(shash, data, len, out);
}
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 21e5cae0a1e0..e641481a3cd9 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -39,7 +39,6 @@ static int padlock_sha_init(struct shash_desc *desc)
struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
dctx->fallback.tfm = ctx->fallback;
- dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_init(&dctx->fallback);
}
@@ -48,7 +47,6 @@ static int padlock_sha_update(struct shash_desc *desc,
{
struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
- dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_update(&dctx->fallback, data, length);
}
@@ -65,7 +63,6 @@ static int padlock_sha_import(struct shash_desc *desc, const void *in)
struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
dctx->fallback.tfm = ctx->fallback;
- dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_import(&dctx->fallback, in);
}
@@ -91,7 +88,6 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
unsigned int leftover;
int err;
- dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_shash_export(&dctx->fallback, &state);
if (err)
goto out;
@@ -153,7 +149,6 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
unsigned int leftover;
int err;
- dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_shash_export(&dctx->fallback, &state);
if (err)
goto out;
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 1b3acdeffede..05b89e703903 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -753,11 +753,6 @@ static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
u32 tmp[DES_EXPKEY_WORDS];
- if (len > DES3_EDE_KEY_SIZE) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
if (unlikely(!des_ekey(tmp, key)) &&
(crypto_ablkcipher_get_flags(cipher) &
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
@@ -772,6 +767,30 @@ static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
}
/*
+ * Set the 3DES key for a block cipher transform. This also performs weak key
+ * checking if the transform has requested it.
+ */
+static int spacc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int len)
+{
+ struct spacc_ablk_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 flags;
+ int err;
+
+ flags = crypto_ablkcipher_get_flags(cipher);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
+ }
+
+ memcpy(ctx->key, key, len);
+ ctx->key_len = len;
+
+ return 0;
+}
+
+/*
* Set the key for an AES block cipher. Some key lengths are not supported in
* hardware so this must also check whether a fallback is needed.
*/
@@ -1196,7 +1215,7 @@ static const struct dev_pm_ops spacc_pm_ops = {
static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev)
{
- return dev ? platform_get_drvdata(to_platform_device(dev)) : NULL;
+ return dev ? dev_get_drvdata(dev) : NULL;
}
static ssize_t spacc_stat_irq_thresh_show(struct device *dev,
@@ -1353,7 +1372,7 @@ static struct spacc_alg ipsec_engine_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_ablkcipher = {
- .setkey = spacc_des_setkey,
+ .setkey = spacc_des3_setkey,
.encrypt = spacc_ablk_encrypt,
.decrypt = spacc_ablk_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
@@ -1380,7 +1399,7 @@ static struct spacc_alg ipsec_engine_algs[] = {
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_ablkcipher = {
- .setkey = spacc_des_setkey,
+ .setkey = spacc_des3_setkey,
.encrypt = spacc_ablk_encrypt,
.decrypt = spacc_ablk_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 975c75198f56..c8d401646902 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -164,7 +164,6 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
memset(ctx->ipad, 0, block_size);
memset(ctx->opad, 0, block_size);
shash->tfm = ctx->hash_tfm;
- shash->flags = 0x0;
if (auth_keylen > block_size) {
int ret = crypto_shash_digest(shash, auth_key,
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index c9f324730d71..692a7aaee749 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -1300,8 +1300,6 @@ static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
static struct akcipher_alg rsa = {
.encrypt = qat_rsa_enc,
.decrypt = qat_rsa_dec,
- .sign = qat_rsa_dec,
- .verify = qat_rsa_enc,
.set_pub_key = qat_rsa_setpubkey,
.set_priv_key = qat_rsa_setprivkey,
.max_size = qat_rsa_max_size,
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index 154b6baa124e..8d3493855a70 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -198,6 +198,25 @@ weakkey:
return -EINVAL;
}
+static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
+ unsigned int keylen)
+{
+ struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk);
+ u32 flags;
+ int err;
+
+ flags = crypto_ablkcipher_get_flags(ablk);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(ablk, flags);
+ return err;
+ }
+
+ ctx->enc_keylen = keylen;
+ memcpy(ctx->enc_key, key, keylen);
+ return 0;
+}
+
static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
{
struct crypto_tfm *tfm =
@@ -363,7 +382,8 @@ static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
alg->cra_ablkcipher.ivsize = def->ivsize;
alg->cra_ablkcipher.min_keysize = def->min_keysize;
alg->cra_ablkcipher.max_keysize = def->max_keysize;
- alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey;
+ alg->cra_ablkcipher.setkey = IS_3DES(def->flags) ?
+ qce_des3_setkey : qce_ablkcipher_setkey;
alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
index 02dac6ae7e53..313759521a0f 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c
@@ -46,24 +46,36 @@ static int rk_aes_setkey(struct crypto_ablkcipher *cipher,
return 0;
}
-static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
- const u8 *key, unsigned int keylen)
+static int rk_des_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
u32 tmp[DES_EXPKEY_WORDS];
- if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (!des_ekey(tmp, key) &&
+ (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
- if (keylen == DES_KEY_SIZE) {
- if (!des_ekey(tmp, key) &&
- (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
- tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
- return -EINVAL;
- }
+ ctx->keylen = keylen;
+ memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
+ return 0;
+}
+
+static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ u32 flags;
+ int err;
+
+ flags = crypto_ablkcipher_get_flags(cipher);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
}
ctx->keylen = keylen;
@@ -250,9 +262,14 @@ static int rk_set_data_start(struct rk_crypto_info *dev)
u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
dev->sg_src->offset + dev->sg_src->length - ivsize;
- /* store the iv that need to be updated in chain mode */
- if (ctx->mode & RK_CRYPTO_DEC)
+ /* Store the iv that need to be updated in chain mode.
+ * And update the IV buffer to contain the next IV for decryption mode.
+ */
+ if (ctx->mode & RK_CRYPTO_DEC) {
memcpy(ctx->iv, src_last_blk, ivsize);
+ sg_pcopy_to_buffer(dev->first, dev->src_nents, req->info,
+ ivsize, dev->total - ivsize);
+ }
err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
if (!err)
@@ -288,13 +305,19 @@ static void rk_iv_copyback(struct rk_crypto_info *dev)
struct ablkcipher_request *req =
ablkcipher_request_cast(dev->async_req);
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
u32 ivsize = crypto_ablkcipher_ivsize(tfm);
- if (ivsize == DES_BLOCK_SIZE)
- memcpy_fromio(req->info, dev->reg + RK_CRYPTO_TDES_IV_0,
- ivsize);
- else if (ivsize == AES_BLOCK_SIZE)
- memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
+ /* Update the IV buffer to contain the next IV for encryption mode. */
+ if (!(ctx->mode & RK_CRYPTO_DEC)) {
+ if (dev->aligned) {
+ memcpy(req->info, sg_virt(dev->sg_dst) +
+ dev->sg_dst->length - ivsize, ivsize);
+ } else {
+ memcpy(req->info, dev->addr_vir +
+ dev->count - ivsize, ivsize);
+ }
+ }
}
static void rk_update_iv(struct rk_crypto_info *dev)
@@ -457,7 +480,7 @@ struct rk_crypto_tmp rk_ecb_des_alg = {
.cra_u.ablkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
- .setkey = rk_tdes_setkey,
+ .setkey = rk_des_setkey,
.encrypt = rk_des_ecb_encrypt,
.decrypt = rk_des_ecb_decrypt,
}
@@ -483,7 +506,7 @@ struct rk_crypto_tmp rk_cbc_des_alg = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
- .setkey = rk_tdes_setkey,
+ .setkey = rk_des_setkey,
.encrypt = rk_des_cbc_encrypt,
.decrypt = rk_des_cbc_decrypt,
}
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 1afdcb81d8ed..9ef25230c199 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -1534,7 +1534,6 @@ static int s5p_hash_shash_digest(struct crypto_shash *tfm, u32 flags,
SHASH_DESC_ON_STACK(shash, tfm);
shash->tfm = tfm;
- shash->flags = flags & ~CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_shash_digest(shash, data, len, out);
}
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 8c32a3059b4a..fd11162a915e 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -354,7 +354,7 @@ static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
{
u8 state;
- if (!IS_ENABLED(DEBUG))
+ if (!__is_defined(DEBUG))
return;
state = SAHARA_STATUS_GET_STATE(status);
@@ -406,7 +406,7 @@ static void sahara_dump_descriptors(struct sahara_dev *dev)
{
int i;
- if (!IS_ENABLED(DEBUG))
+ if (!__is_defined(DEBUG))
return;
for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
@@ -427,7 +427,7 @@ static void sahara_dump_links(struct sahara_dev *dev)
{
int i;
- if (!IS_ENABLED(DEBUG))
+ if (!__is_defined(DEBUG))
return;
for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 63aa78c0b12b..4491e2197d9f 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -24,6 +24,7 @@ config CRYPTO_DEV_STM32_CRYP
depends on ARCH_STM32
select CRYPTO_HASH
select CRYPTO_ENGINE
+ select CRYPTO_DES
help
This enables support for the CRYP (AES/DES/TDES) hw accelerator which
can be found on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 23b0b7bd64c7..cddcc97875b2 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -137,7 +137,6 @@ struct stm32_cryp {
struct crypto_engine *engine;
- struct mutex lock; /* protects req / areq */
struct ablkcipher_request *req;
struct aead_request *areq;
@@ -394,6 +393,23 @@ static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, u32 *iv)
}
}
+static void stm32_cryp_get_iv(struct stm32_cryp *cryp)
+{
+ struct ablkcipher_request *req = cryp->req;
+ u32 *tmp = req->info;
+
+ if (!tmp)
+ return;
+
+ *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0LR));
+ *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV0RR));
+
+ if (is_aes(cryp)) {
+ *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1LR));
+ *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, CRYP_IV1RR));
+ }
+}
+
static void stm32_cryp_hw_write_key(struct stm32_cryp *c)
{
unsigned int i;
@@ -623,6 +639,9 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
/* Phase 4 : output tag */
err = stm32_cryp_read_auth_tag(cryp);
+ if (!err && (!(is_gcm(cryp) || is_ccm(cryp))))
+ stm32_cryp_get_iv(cryp);
+
if (cryp->sgs_copied) {
void *buf_in, *buf_out;
int pages, len;
@@ -645,18 +664,13 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
pm_runtime_mark_last_busy(cryp->dev);
pm_runtime_put_autosuspend(cryp->dev);
- if (is_gcm(cryp) || is_ccm(cryp)) {
+ if (is_gcm(cryp) || is_ccm(cryp))
crypto_finalize_aead_request(cryp->engine, cryp->areq, err);
- cryp->areq = NULL;
- } else {
+ else
crypto_finalize_ablkcipher_request(cryp->engine, cryp->req,
err);
- cryp->req = NULL;
- }
memset(cryp->ctx->key, 0, cryp->ctx->keylen);
-
- mutex_unlock(&cryp->lock);
}
static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
@@ -753,19 +767,35 @@ static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen)
{
+ u32 tmp[DES_EXPKEY_WORDS];
+
if (keylen != DES_KEY_SIZE)
return -EINVAL;
- else
- return stm32_cryp_setkey(tfm, key, keylen);
+
+ if ((crypto_ablkcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
+ unlikely(!des_ekey(tmp, key))) {
+ crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
+ return -EINVAL;
+ }
+
+ return stm32_cryp_setkey(tfm, key, keylen);
}
static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen)
{
- if (keylen != (3 * DES_KEY_SIZE))
- return -EINVAL;
- else
- return stm32_cryp_setkey(tfm, key, keylen);
+ u32 flags;
+ int err;
+
+ flags = crypto_ablkcipher_get_flags(tfm);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(tfm, flags);
+ return err;
+ }
+
+ return stm32_cryp_setkey(tfm, key, keylen);
}
static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
@@ -917,8 +947,6 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
if (!cryp)
return -ENODEV;
- mutex_lock(&cryp->lock);
-
rctx = req ? ablkcipher_request_ctx(req) : aead_request_ctx(areq);
rctx->mode &= FLG_MODE_MASK;
@@ -930,6 +958,7 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
if (req) {
cryp->req = req;
+ cryp->areq = NULL;
cryp->total_in = req->nbytes;
cryp->total_out = cryp->total_in;
} else {
@@ -955,6 +984,7 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
* <---------- total_out ----------------->
*/
cryp->areq = areq;
+ cryp->req = NULL;
cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq));
cryp->total_in = areq->assoclen + areq->cryptlen;
if (is_encrypt(cryp))
@@ -976,19 +1006,19 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
if (cryp->in_sg_len < 0) {
dev_err(cryp->dev, "Cannot get in_sg_len\n");
ret = cryp->in_sg_len;
- goto out;
+ return ret;
}
cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out);
if (cryp->out_sg_len < 0) {
dev_err(cryp->dev, "Cannot get out_sg_len\n");
ret = cryp->out_sg_len;
- goto out;
+ return ret;
}
ret = stm32_cryp_copy_sgs(cryp);
if (ret)
- goto out;
+ return ret;
scatterwalk_start(&cryp->in_walk, cryp->in_sg);
scatterwalk_start(&cryp->out_walk, cryp->out_sg);
@@ -1000,10 +1030,6 @@ static int stm32_cryp_prepare_req(struct ablkcipher_request *req,
}
ret = stm32_cryp_hw_init(cryp);
-out:
- if (ret)
- mutex_unlock(&cryp->lock);
-
return ret;
}
@@ -1943,8 +1969,6 @@ static int stm32_cryp_probe(struct platform_device *pdev)
cryp->dev = dev;
- mutex_init(&cryp->lock);
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
cryp->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(cryp->regs))
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 4a6cc8a3045d..bfc49e67124b 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -181,8 +181,6 @@ struct stm32_hash_dev {
u32 dma_mode;
u32 dma_maxburst;
- spinlock_t lock; /* lock to protect queue */
-
struct ahash_request *req;
struct crypto_engine *engine;
@@ -977,7 +975,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
pm_runtime_get_sync(hdev->dev);
- while (!(stm32_hash_read(hdev, HASH_SR) & HASH_SR_DATA_INPUT_READY))
+ while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
cpu_relax();
rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index 54fd714d53ca..b060a0810934 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -41,11 +41,6 @@ static int sun4i_ss_opti_poll(struct skcipher_request *areq)
if (!areq->cryptlen)
return 0;
- if (!areq->iv) {
- dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
- return -EINVAL;
- }
-
if (!areq->src || !areq->dst) {
dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
return -EINVAL;
@@ -134,6 +129,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
struct scatterlist *out_sg = areq->dst;
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct sun4i_ss_alg_template *algt;
u32 mode = ctx->mode;
/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
u32 rx_cnt = SS_RX_DEFAULT;
@@ -153,20 +150,20 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
unsigned int obo = 0; /* offset in bufo*/
unsigned int obl = 0; /* length of data in bufo */
unsigned long flags;
+ bool need_fallback;
if (!areq->cryptlen)
return 0;
- if (!areq->iv) {
- dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
- return -EINVAL;
- }
-
if (!areq->src || !areq->dst) {
dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
return -EINVAL;
}
+ algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
+ if (areq->cryptlen % algt->alg.crypto.base.cra_blocksize)
+ need_fallback = true;
+
/*
* if we have only SGs with size multiple of 4,
* we can use the SS optimized function
@@ -182,9 +179,24 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
out_sg = sg_next(out_sg);
}
- if (no_chunk == 1)
+ if (no_chunk == 1 && !need_fallback)
return sun4i_ss_opti_poll(areq);
+ if (need_fallback) {
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
+ skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
+ skcipher_request_set_callback(subreq, areq->base.flags, NULL,
+ NULL);
+ skcipher_request_set_crypt(subreq, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ if (ctx->mode & SS_DECRYPTION)
+ err = crypto_skcipher_decrypt(subreq);
+ else
+ err = crypto_skcipher_encrypt(subreq);
+ skcipher_request_zero(subreq);
+ return err;
+ }
+
spin_lock_irqsave(&ss->slock, flags);
for (i = 0; i < op->keylen; i += 4)
@@ -458,6 +470,7 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
{
struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
struct sun4i_ss_alg_template *algt;
+ const char *name = crypto_tfm_alg_name(tfm);
memset(op, 0, sizeof(struct sun4i_tfm_ctx));
@@ -468,9 +481,22 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
sizeof(struct sun4i_cipher_req_ctx));
+ op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(op->fallback_tfm)) {
+ dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(op->fallback_tfm));
+ return PTR_ERR(op->fallback_tfm);
+ }
+
return 0;
}
+void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
+{
+ struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
+ crypto_free_sync_skcipher(op->fallback_tfm);
+}
+
/* check and set the AES key, prepare the mode to be used */
int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
@@ -495,7 +521,11 @@ int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
op->keylen = keylen;
memcpy(op->key, key, keylen);
- return 0;
+
+ crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
}
/* check and set the DES key, prepare the mode to be used */
@@ -525,7 +555,11 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
op->keylen = keylen;
memcpy(op->key, key, keylen);
- return 0;
+
+ crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
}
/* check and set the 3DES key, prepare the mode to be used */
@@ -533,14 +567,18 @@ int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun4i_ss_ctx *ss = op->ss;
+ int err;
+
+ err = des3_verify_key(tfm, key);
+ if (unlikely(err))
+ return err;
- if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
- dev_err(ss->dev, "Invalid keylen %u\n", keylen);
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
op->keylen = keylen;
memcpy(op->key, key, keylen);
- return 0;
+
+ crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
+
}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
index 89adf9e0fed2..05b3d3c32f6d 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -92,11 +92,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "cbc-aes-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_init = sun4i_ss_cipher_init,
+ .cra_exit = sun4i_ss_cipher_exit,
}
}
},
@@ -107,17 +108,17 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.decrypt = sun4i_ss_ecb_aes_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_init = sun4i_ss_cipher_init,
+ .cra_exit = sun4i_ss_cipher_exit,
}
}
},
@@ -134,11 +135,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "cbc-des-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_init = sun4i_ss_cipher_init,
+ .cra_exit = sun4i_ss_cipher_exit,
}
}
},
@@ -154,11 +156,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "ecb-des-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_init = sun4i_ss_cipher_init,
+ .cra_exit = sun4i_ss_cipher_exit,
}
}
},
@@ -175,11 +178,12 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_driver_name = "cbc-des3-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_init = sun4i_ss_cipher_init,
+ .cra_exit = sun4i_ss_cipher_exit,
}
}
},
@@ -190,16 +194,17 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.decrypt = sun4i_ss_ecb_des3_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
.base = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_init = sun4i_ss_cipher_init,
+ .cra_exit = sun4i_ss_cipher_exit,
}
}
},
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
index a4b5ff2b72f8..f6936bb3b7be 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
@@ -240,7 +240,10 @@ static int sun4i_hash(struct ahash_request *areq)
}
} else {
/* Since we have the flag final, we can go up to modulo 4 */
- end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
+ if (areq->nbytes < 4)
+ end = 0;
+ else
+ end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
}
/* TODO if SGlen % 4 and !op->len then DMA */
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h
index f3ac90692ac6..8c4ec9e93565 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss.h
+++ b/drivers/crypto/sunxi-ss/sun4i-ss.h
@@ -161,6 +161,7 @@ struct sun4i_tfm_ctx {
u32 keylen;
u32 keymode;
struct sun4i_ss_ctx *ss;
+ struct crypto_sync_skcipher *fallback_tfm;
};
struct sun4i_cipher_req_ctx {
@@ -203,6 +204,7 @@ int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq);
int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq);
int sun4i_ss_cipher_init(struct crypto_tfm *tfm);
+void sun4i_ss_cipher_exit(struct crypto_tfm *tfm);
int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index de78b54bcfb1..1d429fc073d1 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -913,6 +913,54 @@ badkey:
return -EINVAL;
}
+static int aead_des3_setkey(struct crypto_aead *authenc,
+ const u8 *key, unsigned int keylen)
+{
+ struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+ struct device *dev = ctx->dev;
+ struct crypto_authenc_keys keys;
+ u32 flags;
+ int err;
+
+ err = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (unlikely(err))
+ goto badkey;
+
+ err = -EINVAL;
+ if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
+ goto badkey;
+
+ if (keys.enckeylen != DES3_EDE_KEY_SIZE)
+ goto badkey;
+
+ flags = crypto_aead_get_flags(authenc);
+ err = __des3_verify_key(&flags, keys.enckey);
+ if (unlikely(err)) {
+ crypto_aead_set_flags(authenc, flags);
+ goto out;
+ }
+
+ if (ctx->keylen)
+ dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
+
+ ctx->keylen = keys.authkeylen + keys.enckeylen;
+ ctx->enckeylen = keys.enckeylen;
+ ctx->authkeylen = keys.authkeylen;
+ ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
+ DMA_TO_DEVICE);
+
+out:
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+
+badkey:
+ crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ goto out;
+}
+
/*
* talitos_edesc - s/w-extended descriptor
* @src_nents: number of segments in input scatterlist
@@ -1527,12 +1575,22 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
{
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
struct device *dev = ctx->dev;
- u32 tmp[DES_EXPKEY_WORDS];
- if (keylen > TALITOS_MAX_KEY_SIZE) {
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
+ if (ctx->keylen)
+ dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+
+ memcpy(&ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
+
+ return 0;
+}
+
+static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 tmp[DES_EXPKEY_WORDS];
if (unlikely(crypto_ablkcipher_get_flags(cipher) &
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
@@ -1541,15 +1599,23 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
return -EINVAL;
}
- if (ctx->keylen)
- dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
+ return ablkcipher_setkey(cipher, key, keylen);
+}
- memcpy(&ctx->key, key, keylen);
- ctx->keylen = keylen;
+static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ u32 flags;
+ int err;
- ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
+ flags = crypto_ablkcipher_get_flags(cipher);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
+ }
- return 0;
+ return ablkcipher_setkey(cipher, key, keylen);
}
static void common_nonsnoop_unmap(struct device *dev,
@@ -2313,6 +2379,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2336,6 +2403,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
@@ -2399,6 +2467,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2422,6 +2491,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
@@ -2485,6 +2555,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2508,6 +2579,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
@@ -2550,6 +2622,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2592,6 +2665,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2654,6 +2728,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
DESC_HDR_SEL0_DEU |
@@ -2676,6 +2751,7 @@ static struct talitos_alg_template driver_algs[] = {
},
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
+ .setkey = aead_des3_setkey,
},
.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
DESC_HDR_SEL0_DEU |
@@ -2748,6 +2824,7 @@ static struct talitos_alg_template driver_algs[] = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
+ .setkey = ablkcipher_des_setkey,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2764,6 +2841,7 @@ static struct talitos_alg_template driver_algs[] = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
+ .setkey = ablkcipher_des_setkey,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2781,6 +2859,7 @@ static struct talitos_alg_template driver_algs[] = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = ablkcipher_des3_setkey,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2798,6 +2877,7 @@ static struct talitos_alg_template driver_algs[] = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = ablkcipher_des3_setkey,
}
},
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -3144,7 +3224,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
alg->cra_init = talitos_cra_init;
alg->cra_exit = talitos_cra_exit;
alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_ablkcipher.setkey = ablkcipher_setkey;
+ alg->cra_ablkcipher.setkey = alg->cra_ablkcipher.setkey ?:
+ ablkcipher_setkey;
alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
break;
@@ -3152,7 +3233,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
alg = &t_alg->algt.alg.aead.base;
alg->cra_exit = talitos_cra_exit;
t_alg->algt.alg.aead.init = talitos_cra_init_aead;
- t_alg->algt.alg.aead.setkey = aead_setkey;
+ t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
+ aead_setkey;
t_alg->algt.alg.aead.encrypt = aead_encrypt;
t_alg->algt.alg.aead.decrypt = aead_decrypt;
if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
diff --git a/drivers/crypto/ux500/cryp/Makefile b/drivers/crypto/ux500/cryp/Makefile
index b497ae3dde07..dc8fff29c4b3 100644
--- a/drivers/crypto/ux500/cryp/Makefile
+++ b/drivers/crypto/ux500/cryp/Makefile
@@ -3,11 +3,7 @@
# * Author: shujuan.chen@stericsson.com for ST-Ericsson.
# * License terms: GNU General Public License (GPL) version 2 */
-ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
-CFLAGS_cryp_core.o := -DDEBUG
-CFLAGS_cryp.o := -DDEBUG
-CFLAGS_cryp_irq.o := -DDEBUG
-endif
+ccflags-$(CONFIG_CRYPTO_DEV_UX500_DEBUG) += -DDEBUG
obj-$(CONFIG_CRYPTO_DEV_UX500_CRYP) += ux500_cryp.o
ux500_cryp-objs := cryp.o cryp_irq.o cryp_core.o
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 3235611928f2..7a93cba0877f 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1019,37 +1019,16 @@ static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen)
{
struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher);
- u32 *flags = &cipher->base.crt_flags;
- const u32 *K = (const u32 *)key;
- u32 tmp[DES3_EDE_EXPKEY_WORDS];
- int i, ret;
+ u32 flags;
+ int err;
pr_debug(DEV_DBG_NAME " [%s]", __func__);
- if (keylen != DES3_EDE_KEY_SIZE) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
- pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_BAD_KEY_LEN",
- __func__);
- return -EINVAL;
- }
- /* Checking key interdependency for weak key detection. */
- if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
- !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
- (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
- *flags |= CRYPTO_TFM_RES_WEAK_KEY;
- pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY",
- __func__);
- return -EINVAL;
- }
- for (i = 0; i < 3; i++) {
- ret = des_ekey(tmp, key + i*DES_KEY_SIZE);
- if (unlikely(ret == 0) &&
- (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
- *flags |= CRYPTO_TFM_RES_WEAK_KEY;
- pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY",
- __func__);
- return -EINVAL;
- }
+ flags = crypto_ablkcipher_get_flags(cipher);
+ err = __des3_verify_key(&flags, key);
+ if (unlikely(err)) {
+ crypto_ablkcipher_set_flags(cipher, flags);
+ return err;
}
memcpy(ctx->key, key, keylen);
@@ -1219,57 +1198,6 @@ static struct cryp_algo_template cryp_algs[] = {
{
.algomode = CRYP_ALGO_DES_ECB,
.crypto = {
- .cra_name = "des",
- .cra_driver_name = "des-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = des_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt
- }
- }
- }
-
- },
- {
- .algomode = CRYP_ALGO_TDES_ECB,
- .crypto = {
- .cra_name = "des3_ede",
- .cra_driver_name = "des3_ede-ux500",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
- CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct cryp_ctx),
- .cra_alignmask = 3,
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = cryp_cra_init,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .ablkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = des_ablkcipher_setkey,
- .encrypt = cryp_blk_encrypt,
- .decrypt = cryp_blk_decrypt
- }
- }
- }
- },
- {
- .algomode = CRYP_ALGO_DES_ECB,
- .crypto = {
.cra_name = "ecb(des)",
.cra_driver_name = "ecb-des-ux500",
.cra_priority = 300,
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
index d7316f7a3a69..603a62081994 100644
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@ -23,9 +23,10 @@
#include <linux/err.h>
#include <linux/crypto.h>
#include <linux/delay.h>
-#include <linux/hardirq.h>
+#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
+#include <crypto/internal/simd.h>
#include "aesp8-ppc.h"
@@ -78,20 +79,21 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_disable();
enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
- ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+ ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
- ret += crypto_cipher_setkey(ctx->fallback, key, keylen);
- return ret;
+ ret |= crypto_cipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
}
static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- if (in_interrupt()) {
+ if (!crypto_simd_usable()) {
crypto_cipher_encrypt_one(ctx->fallback, dst, src);
} else {
preempt_disable();
@@ -108,7 +110,7 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- if (in_interrupt()) {
+ if (!crypto_simd_usable()) {
crypto_cipher_decrypt_one(ctx->fallback, dst, src);
} else {
preempt_disable();
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index c5c5ff82b52e..a1a9a6f0d42c 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -23,9 +23,10 @@
#include <linux/err.h>
#include <linux/crypto.h>
#include <linux/delay.h>
-#include <linux/hardirq.h>
+#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
+#include <crypto/internal/simd.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
@@ -81,13 +82,14 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_disable();
enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
- ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+ ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
- ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
- return ret;
+ ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
}
static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
@@ -99,7 +101,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
struct p8_aes_cbc_ctx *ctx =
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
- if (in_interrupt()) {
+ if (!crypto_simd_usable()) {
SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
@@ -138,7 +140,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
struct p8_aes_cbc_ctx *ctx =
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
- if (in_interrupt()) {
+ if (!crypto_simd_usable()) {
SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 8a2fe092cb8e..192a53512f5e 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -23,9 +23,10 @@
#include <linux/err.h>
#include <linux/crypto.h>
#include <linux/delay.h>
-#include <linux/hardirq.h>
+#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
+#include <crypto/internal/simd.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
@@ -83,8 +84,9 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
- return ret;
+ ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
}
static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
@@ -118,7 +120,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
struct p8_aes_ctr_ctx *ctx =
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
- if (in_interrupt()) {
+ if (!crypto_simd_usable()) {
SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index ecd64e5cc5bb..00d412d811ae 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -23,9 +23,10 @@
#include <linux/err.h>
#include <linux/crypto.h>
#include <linux/delay.h>
-#include <linux/hardirq.h>
+#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
+#include <crypto/internal/simd.h>
#include <crypto/scatterwalk.h>
#include <crypto/xts.h>
#include <crypto/skcipher.h>
@@ -86,14 +87,15 @@ static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_disable();
enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key + keylen/2, (keylen/2) * 8, &ctx->tweak_key);
- ret += aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
- ret += aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
+ ret |= aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
+ ret |= aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
- ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
- return ret;
+ ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
}
static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
@@ -108,7 +110,7 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
struct p8_aes_xts_ctx *ctx =
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
- if (in_interrupt()) {
+ if (!crypto_simd_usable()) {
SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
index d6a9f63d65ba..de78282b8f44 100644
--- a/drivers/crypto/vmx/aesp8-ppc.pl
+++ b/drivers/crypto/vmx/aesp8-ppc.pl
@@ -1854,7 +1854,7 @@ Lctr32_enc8x_three:
stvx_u $out1,$x10,$out
stvx_u $out2,$x20,$out
addi $out,$out,0x30
- b Lcbc_dec8x_done
+ b Lctr32_enc8x_done
.align 5
Lctr32_enc8x_two:
@@ -1866,7 +1866,7 @@ Lctr32_enc8x_two:
stvx_u $out0,$x00,$out
stvx_u $out1,$x10,$out
addi $out,$out,0x20
- b Lcbc_dec8x_done
+ b Lctr32_enc8x_done
.align 5
Lctr32_enc8x_one:
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index dd8b8716467a..b5a6883bb09e 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -23,16 +23,15 @@
#include <linux/err.h>
#include <linux/crypto.h>
#include <linux/delay.h>
-#include <linux/hardirq.h>
+#include <asm/simd.h>
#include <asm/switch_to.h>
#include <crypto/aes.h>
#include <crypto/ghash.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/hash.h>
+#include <crypto/internal/simd.h>
#include <crypto/b128ops.h>
-#define IN_INTERRUPT in_interrupt()
-
void gcm_init_p8(u128 htable[16], const u64 Xi[2]);
void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]);
void gcm_ghash_p8(u64 Xi[2], const u128 htable[16],
@@ -102,7 +101,6 @@ static int p8_ghash_init(struct shash_desc *desc)
dctx->bytes = 0;
memset(dctx->shash, 0, GHASH_DIGEST_SIZE);
dctx->fallback_desc.tfm = ctx->fallback;
- dctx->fallback_desc.flags = desc->flags;
return crypto_shash_init(&dctx->fallback_desc);
}
@@ -131,7 +129,7 @@ static int p8_ghash_update(struct shash_desc *desc,
struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- if (IN_INTERRUPT) {
+ if (!crypto_simd_usable()) {
return crypto_shash_update(&dctx->fallback_desc, src,
srclen);
} else {
@@ -182,7 +180,7 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- if (IN_INTERRUPT) {
+ if (!crypto_simd_usable()) {
return crypto_shash_final(&dctx->fallback_desc, out);
} else {
if (dctx->bytes) {
diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c
index 31a98dc6f849..a9f519830615 100644
--- a/drivers/crypto/vmx/vmx.c
+++ b/drivers/crypto/vmx/vmx.c
@@ -41,7 +41,7 @@ static struct crypto_alg *algs[] = {
NULL,
};
-int __init p8_init(void)
+static int __init p8_init(void)
{
int ret = 0;
struct crypto_alg **alg_it;
@@ -67,7 +67,7 @@ int __init p8_init(void)
return ret;
}
-void __exit p8_exit(void)
+static void __exit p8_exit(void)
{
struct crypto_alg **alg_it;
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index 5ef624fe3934..a59f338f520f 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -23,7 +23,6 @@ config DEV_DAX
config DEV_DAX_PMEM
tristate "PMEM DAX: direct access to persistent memory"
depends on LIBNVDIMM && NVDIMM_DAX && DEV_DAX
- depends on m # until we can kill DEV_DAX_PMEM_COMPAT
default DEV_DAX
help
Support raw access to persistent memory. Note that this
@@ -50,7 +49,7 @@ config DEV_DAX_KMEM
config DEV_DAX_PMEM_COMPAT
tristate "PMEM DAX: support the deprecated /sys/class/dax interface"
- depends on DEV_DAX_PMEM
+ depends on m && DEV_DAX_PMEM=m
default DEV_DAX_PMEM
help
Older versions of the libdaxctl library expect to find all
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index e428468ab661..996d68ff992a 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -184,8 +184,7 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
- return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, *pfn,
- vmf->flags & FAULT_FLAG_WRITE);
+ return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
}
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
@@ -235,8 +234,7 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
- return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, *pfn,
- vmf->flags & FAULT_FLAG_WRITE);
+ return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
}
#else
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
index f71019ce0647..f9f51786d556 100644
--- a/drivers/dax/pmem/core.c
+++ b/drivers/dax/pmem/core.c
@@ -37,13 +37,13 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
devm_nsio_disable(dev, nsio);
/* reserve the metadata area, device-dax will reserve the data */
- pfn_sb = nd_pfn->pfn_sb;
+ pfn_sb = nd_pfn->pfn_sb;
offset = le64_to_cpu(pfn_sb->dataoff);
if (!devm_request_mem_region(dev, nsio->res.start, offset,
dev_name(&ndns->dev))) {
- dev_warn(dev, "could not reserve metadata\n");
+ dev_warn(dev, "could not reserve metadata\n");
return ERR_PTR(-EBUSY);
- }
+ }
rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", &region_id, &id);
if (rc != 2)
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 0a339b85133e..bbd57ca0634a 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -412,11 +412,9 @@ static struct dax_device *to_dax_dev(struct inode *inode)
return container_of(inode, struct dax_device, inode);
}
-static void dax_i_callback(struct rcu_head *head)
+static void dax_free_inode(struct inode *inode)
{
- struct inode *inode = container_of(head, struct inode, i_rcu);
struct dax_device *dax_dev = to_dax_dev(inode);
-
kfree(dax_dev->host);
dax_dev->host = NULL;
if (inode->i_rdev)
@@ -427,16 +425,15 @@ static void dax_i_callback(struct rcu_head *head)
static void dax_destroy_inode(struct inode *inode)
{
struct dax_device *dax_dev = to_dax_dev(inode);
-
WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags),
"kill_dax() must be called before final iput()\n");
- call_rcu(&inode->i_rcu, dax_i_callback);
}
static const struct super_operations dax_sops = {
.statfs = simple_statfs,
.alloc_inode = dax_alloc_inode,
.destroy_inode = dax_destroy_inode,
+ .free_inode = dax_free_inode,
.drop_inode = generic_delete_inode,
};
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
index d67242d87744..87e93406d7cd 100644
--- a/drivers/devfreq/devfreq-event.c
+++ b/drivers/devfreq/devfreq-event.c
@@ -240,7 +240,7 @@ struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(struct device *dev,
}
list_for_each_entry(edev, &devfreq_event_list, node) {
- if (!strcmp(edev->desc->name, node->name))
+ if (of_node_name_eq(node, edev->desc->name))
goto out;
}
edev = NULL;
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 0ae3de76833b..6b6991f0e873 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -29,6 +29,9 @@
#include <linux/of.h>
#include "governor.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/devfreq.h>
+
static struct class *devfreq_class;
/*
@@ -228,7 +231,7 @@ static struct devfreq_governor *find_devfreq_governor(const char *name)
* if is not found. This can happen when both drivers (the governor driver
* and the driver that call devfreq_add_device) are built as modules.
* devfreq_list_lock should be held by the caller. Returns the matched
- * governor's pointer.
+ * governor's pointer or an error pointer.
*/
static struct devfreq_governor *try_then_request_governor(const char *name)
{
@@ -254,7 +257,7 @@ static struct devfreq_governor *try_then_request_governor(const char *name)
/* Restore previous state before return */
mutex_lock(&devfreq_list_lock);
if (err)
- return NULL;
+ return ERR_PTR(err);
governor = find_devfreq_governor(name);
}
@@ -394,6 +397,8 @@ static void devfreq_monitor(struct work_struct *work)
queue_delayed_work(devfreq_wq, &devfreq->work,
msecs_to_jiffies(devfreq->profile->polling_ms));
mutex_unlock(&devfreq->lock);
+
+ trace_devfreq_monitor(devfreq);
}
/**
@@ -528,7 +533,7 @@ void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
mutex_lock(&devfreq->lock);
if (!devfreq->stop_polling)
queue_delayed_work(devfreq_wq, &devfreq->work,
- msecs_to_jiffies(devfreq->profile->polling_ms));
+ msecs_to_jiffies(devfreq->profile->polling_ms));
}
out:
mutex_unlock(&devfreq->lock);
@@ -537,7 +542,7 @@ EXPORT_SYMBOL(devfreq_interval_update);
/**
* devfreq_notifier_call() - Notify that the device frequency requirements
- * has been changed out of devfreq framework.
+ * has been changed out of devfreq framework.
* @nb: the notifier_block (supposed to be devfreq->nb)
* @type: not used
* @devp: not used
@@ -651,7 +656,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
mutex_unlock(&devfreq->lock);
err = set_freq_table(devfreq);
if (err < 0)
- goto err_out;
+ goto err_dev;
mutex_lock(&devfreq->lock);
}
@@ -683,16 +688,27 @@ struct devfreq *devfreq_add_device(struct device *dev,
goto err_out;
}
- devfreq->trans_table =
- devm_kzalloc(&devfreq->dev,
- array3_size(sizeof(unsigned int),
- devfreq->profile->max_state,
- devfreq->profile->max_state),
- GFP_KERNEL);
+ devfreq->trans_table = devm_kzalloc(&devfreq->dev,
+ array3_size(sizeof(unsigned int),
+ devfreq->profile->max_state,
+ devfreq->profile->max_state),
+ GFP_KERNEL);
+ if (!devfreq->trans_table) {
+ mutex_unlock(&devfreq->lock);
+ err = -ENOMEM;
+ goto err_devfreq;
+ }
+
devfreq->time_in_state = devm_kcalloc(&devfreq->dev,
- devfreq->profile->max_state,
- sizeof(unsigned long),
- GFP_KERNEL);
+ devfreq->profile->max_state,
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!devfreq->time_in_state) {
+ mutex_unlock(&devfreq->lock);
+ err = -ENOMEM;
+ goto err_devfreq;
+ }
+
devfreq->last_stat_updated = jiffies;
srcu_init_notifier_head(&devfreq->transition_notifier_list);
@@ -726,7 +742,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
err_init:
mutex_unlock(&devfreq_list_lock);
-
+err_devfreq:
devfreq_remove_device(devfreq);
devfreq = NULL;
err_dev:
@@ -1113,7 +1129,7 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
struct devfreq *df = to_devfreq(dev);
int ret;
char str_governor[DEVFREQ_NAME_LEN + 1];
- struct devfreq_governor *governor;
+ const struct devfreq_governor *governor, *prev_governor;
ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
if (ret != 1)
@@ -1142,12 +1158,24 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
goto out;
}
}
+ prev_governor = df->governor;
df->governor = governor;
strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
- if (ret)
+ if (ret) {
dev_warn(dev, "%s: Governor %s not started(%d)\n",
__func__, df->governor->name, ret);
+ df->governor = prev_governor;
+ strncpy(df->governor_name, prev_governor->name,
+ DEVFREQ_NAME_LEN);
+ ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
+ if (ret) {
+ dev_err(dev,
+ "%s: reverting to Governor %s failed (%d)\n",
+ __func__, df->governor_name, ret);
+ df->governor = NULL;
+ }
+ }
out:
mutex_unlock(&devfreq_list_lock);
@@ -1172,7 +1200,7 @@ static ssize_t available_governors_show(struct device *d,
*/
if (df->governor->immutable) {
count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
- "%s ", df->governor_name);
+ "%s ", df->governor_name);
/*
* The devfreq device shows the registered governor except for
* immutable governors such as passive governor .
@@ -1485,8 +1513,8 @@ EXPORT_SYMBOL(devfreq_recommended_opp);
/**
* devfreq_register_opp_notifier() - Helper function to get devfreq notified
- * for any changes in the OPP availability
- * changes
+ * for any changes in the OPP availability
+ * changes
* @dev: The devfreq user device. (parent of devfreq)
* @devfreq: The devfreq object.
*/
@@ -1498,8 +1526,8 @@ EXPORT_SYMBOL(devfreq_register_opp_notifier);
/**
* devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
- * notified for any changes in the OPP
- * availability changes anymore.
+ * notified for any changes in the OPP
+ * availability changes anymore.
* @dev: The devfreq user device. (parent of devfreq)
* @devfreq: The devfreq object.
*
@@ -1518,8 +1546,8 @@ static void devm_devfreq_opp_release(struct device *dev, void *res)
}
/**
- * devm_ devfreq_register_opp_notifier()
- * - Resource-managed devfreq_register_opp_notifier()
+ * devm_devfreq_register_opp_notifier() - Resource-managed
+ * devfreq_register_opp_notifier()
* @dev: The devfreq user device. (parent of devfreq)
* @devfreq: The devfreq object.
*/
@@ -1547,8 +1575,8 @@ int devm_devfreq_register_opp_notifier(struct device *dev,
EXPORT_SYMBOL(devm_devfreq_register_opp_notifier);
/**
- * devm_devfreq_unregister_opp_notifier()
- * - Resource-managed devfreq_unregister_opp_notifier()
+ * devm_devfreq_unregister_opp_notifier() - Resource-managed
+ * devfreq_unregister_opp_notifier()
* @dev: The devfreq user device. (parent of devfreq)
* @devfreq: The devfreq object.
*/
@@ -1567,8 +1595,8 @@ EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier);
* @list: DEVFREQ_TRANSITION_NOTIFIER.
*/
int devfreq_register_notifier(struct devfreq *devfreq,
- struct notifier_block *nb,
- unsigned int list)
+ struct notifier_block *nb,
+ unsigned int list)
{
int ret = 0;
@@ -1674,9 +1702,9 @@ EXPORT_SYMBOL(devm_devfreq_register_notifier);
* @list: DEVFREQ_TRANSITION_NOTIFIER.
*/
void devm_devfreq_unregister_notifier(struct device *dev,
- struct devfreq *devfreq,
- struct notifier_block *nb,
- unsigned int list)
+ struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
{
WARN_ON(devres_release(dev, devm_devfreq_notifier_release,
devm_devfreq_dev_match, devfreq));
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index c61de0bdf053..c2ea94957501 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -529,7 +529,7 @@ static int of_get_devfreq_events(struct device_node *np,
if (!ppmu_events[i].name)
continue;
- if (!of_node_cmp(node->name, ppmu_events[i].name))
+ if (of_node_name_eq(node, ppmu_events[i].name))
break;
}
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index 22b113363ffc..a436ec4901bb 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -26,6 +26,8 @@
#include <linux/list.h>
#include <linux/of.h>
+#include <soc/rockchip/rk3399_grf.h>
+
#define RK3399_DMC_NUM_CH 2
/* DDRMON_CTRL */
@@ -43,18 +45,6 @@
#define DDRMON_CH1_COUNT_NUM 0x3c
#define DDRMON_CH1_DFI_ACCESS_NUM 0x40
-/* pmu grf */
-#define PMUGRF_OS_REG2 0x308
-#define DDRTYPE_SHIFT 13
-#define DDRTYPE_MASK 7
-
-enum {
- DDR3 = 3,
- LPDDR3 = 6,
- LPDDR4 = 7,
- UNUSED = 0xFF
-};
-
struct dmc_usage {
u32 access;
u32 total;
@@ -83,16 +73,17 @@ static void rockchip_dfi_start_hardware_counter(struct devfreq_event_dev *edev)
u32 ddr_type;
/* get ddr type */
- regmap_read(info->regmap_pmu, PMUGRF_OS_REG2, &val);
- ddr_type = (val >> DDRTYPE_SHIFT) & DDRTYPE_MASK;
+ regmap_read(info->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
+ ddr_type = (val >> RK3399_PMUGRF_DDRTYPE_SHIFT) &
+ RK3399_PMUGRF_DDRTYPE_MASK;
/* clear DDRMON_CTRL setting */
writel_relaxed(CLR_DDRMON_CTRL, dfi_regs + DDRMON_CTRL);
/* set ddr type to dfi */
- if (ddr_type == LPDDR3)
+ if (ddr_type == RK3399_PMUGRF_DDRTYPE_LPDDR3)
writel_relaxed(LPDDR3_EN, dfi_regs + DDRMON_CTRL);
- else if (ddr_type == LPDDR4)
+ else if (ddr_type == RK3399_PMUGRF_DDRTYPE_LPDDR4)
writel_relaxed(LPDDR4_EN, dfi_regs + DDRMON_CTRL);
/* enable count, use software mode */
@@ -211,7 +202,7 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
if (IS_ERR(data->clk)) {
dev_err(dev, "Cannot get the clk dmc_clk\n");
return PTR_ERR(data->clk);
- };
+ }
/* try to find the optional reference to the pmu syscon */
node = of_parse_phandle(np, "rockchip,pmu", 0);
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
index c25658b26598..486cc5b422f1 100644
--- a/drivers/devfreq/exynos-bus.c
+++ b/drivers/devfreq/exynos-bus.c
@@ -514,6 +514,13 @@ err:
return ret;
}
+static void exynos_bus_shutdown(struct platform_device *pdev)
+{
+ struct exynos_bus *bus = dev_get_drvdata(&pdev->dev);
+
+ devfreq_suspend_device(bus->devfreq);
+}
+
#ifdef CONFIG_PM_SLEEP
static int exynos_bus_resume(struct device *dev)
{
@@ -556,6 +563,7 @@ MODULE_DEVICE_TABLE(of, exynos_bus_of_match);
static struct platform_driver exynos_bus_platdrv = {
.probe = exynos_bus_probe,
+ .shutdown = exynos_bus_shutdown,
.driver = {
.name = "exynos-bus",
.pm = &exynos_bus_pm,
diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
index e795ad2b3f6b..567c034d0301 100644
--- a/drivers/devfreq/rk3399_dmc.c
+++ b/drivers/devfreq/rk3399_dmc.c
@@ -18,14 +18,17 @@
#include <linux/devfreq.h>
#include <linux/devfreq-event.h>
#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
+#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/rwsem.h>
#include <linux/suspend.h>
+#include <soc/rockchip/rk3399_grf.h>
#include <soc/rockchip/rockchip_sip.h>
struct dram_timing {
@@ -69,8 +72,11 @@ struct rk3399_dmcfreq {
struct mutex lock;
struct dram_timing timing;
struct regulator *vdd_center;
+ struct regmap *regmap_pmu;
unsigned long rate, target_rate;
unsigned long volt, target_volt;
+ unsigned int odt_dis_freq;
+ int odt_pd_arg0, odt_pd_arg1;
};
static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
@@ -80,6 +86,8 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
struct dev_pm_opp *opp;
unsigned long old_clk_rate = dmcfreq->rate;
unsigned long target_volt, target_rate;
+ struct arm_smccc_res res;
+ bool odt_enable = false;
int err;
opp = devfreq_recommended_opp(dev, freq, flags);
@@ -95,6 +103,19 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
mutex_lock(&dmcfreq->lock);
+ if (target_rate >= dmcfreq->odt_dis_freq)
+ odt_enable = true;
+
+ /*
+ * This makes a SMC call to the TF-A to set the DDR PD (power-down)
+ * timings and to enable or disable the ODT (on-die termination)
+ * resistors.
+ */
+ arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, dmcfreq->odt_pd_arg0,
+ dmcfreq->odt_pd_arg1,
+ ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD,
+ odt_enable, 0, 0, 0, &res);
+
/*
* If frequency scaling from low to high, adjust voltage first.
* If frequency scaling from high to low, adjust frequency first.
@@ -294,11 +315,13 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
{
struct arm_smccc_res res;
struct device *dev = &pdev->dev;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = pdev->dev.of_node, *node;
struct rk3399_dmcfreq *data;
int ret, index, size;
uint32_t *timing;
struct dev_pm_opp *opp;
+ u32 ddr_type;
+ u32 val;
data = devm_kzalloc(dev, sizeof(struct rk3399_dmcfreq), GFP_KERNEL);
if (!data)
@@ -322,7 +345,7 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
dev_err(dev, "Cannot get the clk dmc_clk\n");
return PTR_ERR(data->dmc_clk);
- };
+ }
data->edev = devfreq_event_get_edev_by_phandle(dev, 0);
if (IS_ERR(data->edev))
@@ -354,11 +377,57 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
}
}
+ node = of_parse_phandle(np, "rockchip,pmu", 0);
+ if (node) {
+ data->regmap_pmu = syscon_node_to_regmap(node);
+ if (IS_ERR(data->regmap_pmu))
+ return PTR_ERR(data->regmap_pmu);
+ }
+
+ regmap_read(data->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
+ ddr_type = (val >> RK3399_PMUGRF_DDRTYPE_SHIFT) &
+ RK3399_PMUGRF_DDRTYPE_MASK;
+
+ switch (ddr_type) {
+ case RK3399_PMUGRF_DDRTYPE_DDR3:
+ data->odt_dis_freq = data->timing.ddr3_odt_dis_freq;
+ break;
+ case RK3399_PMUGRF_DDRTYPE_LPDDR3:
+ data->odt_dis_freq = data->timing.lpddr3_odt_dis_freq;
+ break;
+ case RK3399_PMUGRF_DDRTYPE_LPDDR4:
+ data->odt_dis_freq = data->timing.lpddr4_odt_dis_freq;
+ break;
+ default:
+ return -EINVAL;
+ };
+
arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
ROCKCHIP_SIP_CONFIG_DRAM_INIT,
0, 0, 0, 0, &res);
/*
+ * In TF-A there is a platform SIP call to set the PD (power-down)
+ * timings and to enable or disable the ODT (on-die termination).
+ * This call needs three arguments as follows:
+ *
+ * arg0:
+ * bit[0-7] : sr_idle
+ * bit[8-15] : sr_mc_gate_idle
+ * bit[16-31] : standby idle
+ * arg1:
+ * bit[0-11] : pd_idle
+ * bit[16-27] : srpd_lite_idle
+ * arg2:
+ * bit[0] : odt enable
+ */
+ data->odt_pd_arg0 = (data->timing.sr_idle & 0xff) |
+ ((data->timing.sr_mc_gate_idle & 0xff) << 8) |
+ ((data->timing.standby_idle & 0xffff) << 16);
+ data->odt_pd_arg1 = (data->timing.pd_idle & 0xfff) |
+ ((data->timing.srpd_lite_idle & 0xfff) << 16);
+
+ /*
* We add a devfreq driver to our parent since it has a device tree node
* with operating points.
*/
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index c59d2eee5d30..c89ba7b834ff 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -573,10 +573,7 @@ static int tegra_governor_get_target(struct devfreq *devfreq,
static int tegra_governor_event_handler(struct devfreq *devfreq,
unsigned int event, void *data)
{
- struct tegra_devfreq *tegra;
- int ret = 0;
-
- tegra = dev_get_drvdata(devfreq->dev.parent);
+ struct tegra_devfreq *tegra = dev_get_drvdata(devfreq->dev.parent);
switch (event) {
case DEVFREQ_GOV_START:
@@ -600,7 +597,7 @@ static int tegra_governor_event_handler(struct devfreq *devfreq,
break;
}
- return ret;
+ return 0;
}
static struct devfreq_governor tegra_devfreq_governor = {
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 2e5a0faa2cb1..3fc9c2efc583 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -3,7 +3,6 @@ menu "DMABUF options"
config SYNC_FILE
bool "Explicit Synchronization Framework"
default n
- select ANON_INODES
select DMA_SHARED_BUFFER
---help---
The Sync File Framework adds explicit syncronization via
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 0913a6ccab5a..1f006e083eb9 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,4 +1,5 @@
-obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o
+obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
+ reservation.o seqno-fence.o
obj-$(CONFIG_SYNC_FILE) += sync_file.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
obj-$(CONFIG_UDMABUF) += udmabuf.o
diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c
new file mode 100644
index 000000000000..93c42078cb57
--- /dev/null
+++ b/drivers/dma-buf/dma-fence-chain.c
@@ -0,0 +1,242 @@
+/*
+ * fence-chain: chain fences together in a timeline
+ *
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ * Authors:
+ * Christian König <christian.koenig@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/dma-fence-chain.h>
+
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence);
+
+/**
+ * dma_fence_chain_get_prev - use RCU to get a reference to the previous fence
+ * @chain: chain node to get the previous node from
+ *
+ * Use dma_fence_get_rcu_safe to get a reference to the previous fence of the
+ * chain node.
+ */
+static struct dma_fence *dma_fence_chain_get_prev(struct dma_fence_chain *chain)
+{
+ struct dma_fence *prev;
+
+ rcu_read_lock();
+ prev = dma_fence_get_rcu_safe(&chain->prev);
+ rcu_read_unlock();
+ return prev;
+}
+
+/**
+ * dma_fence_chain_walk - chain walking function
+ * @fence: current chain node
+ *
+ * Walk the chain to the next node. Returns the next fence or NULL if we are at
+ * the end of the chain. Garbage collects chain nodes which are already
+ * signaled.
+ */
+struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
+{
+ struct dma_fence_chain *chain, *prev_chain;
+ struct dma_fence *prev, *replacement, *tmp;
+
+ chain = to_dma_fence_chain(fence);
+ if (!chain) {
+ dma_fence_put(fence);
+ return NULL;
+ }
+
+ while ((prev = dma_fence_chain_get_prev(chain))) {
+
+ prev_chain = to_dma_fence_chain(prev);
+ if (prev_chain) {
+ if (!dma_fence_is_signaled(prev_chain->fence))
+ break;
+
+ replacement = dma_fence_chain_get_prev(prev_chain);
+ } else {
+ if (!dma_fence_is_signaled(prev))
+ break;
+
+ replacement = NULL;
+ }
+
+ tmp = cmpxchg((void **)&chain->prev, (void *)prev, (void *)replacement);
+ if (tmp == prev)
+ dma_fence_put(tmp);
+ else
+ dma_fence_put(replacement);
+ dma_fence_put(prev);
+ }
+
+ dma_fence_put(fence);
+ return prev;
+}
+EXPORT_SYMBOL(dma_fence_chain_walk);
+
+/**
+ * dma_fence_chain_find_seqno - find fence chain node by seqno
+ * @pfence: pointer to the chain node where to start
+ * @seqno: the sequence number to search for
+ *
+ * Advance the fence pointer to the chain node which will signal this sequence
+ * number. If no sequence number is provided then this is a no-op.
+ *
+ * Returns EINVAL if the fence is not a chain node or the sequence number has
+ * not yet advanced far enough.
+ */
+int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno)
+{
+ struct dma_fence_chain *chain;
+
+ if (!seqno)
+ return 0;
+
+ chain = to_dma_fence_chain(*pfence);
+ if (!chain || chain->base.seqno < seqno)
+ return -EINVAL;
+
+ dma_fence_chain_for_each(*pfence, &chain->base) {
+ if ((*pfence)->context != chain->base.context ||
+ to_dma_fence_chain(*pfence)->prev_seqno < seqno)
+ break;
+ }
+ dma_fence_put(&chain->base);
+
+ return 0;
+}
+EXPORT_SYMBOL(dma_fence_chain_find_seqno);
+
+static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence)
+{
+ return "dma_fence_chain";
+}
+
+static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence)
+{
+ return "unbound";
+}
+
+static void dma_fence_chain_irq_work(struct irq_work *work)
+{
+ struct dma_fence_chain *chain;
+
+ chain = container_of(work, typeof(*chain), work);
+
+ /* Try to rearm the callback */
+ if (!dma_fence_chain_enable_signaling(&chain->base))
+ /* Ok, we are done. No more unsignaled fences left */
+ dma_fence_signal(&chain->base);
+ dma_fence_put(&chain->base);
+}
+
+static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+ struct dma_fence_chain *chain;
+
+ chain = container_of(cb, typeof(*chain), cb);
+ irq_work_queue(&chain->work);
+ dma_fence_put(f);
+}
+
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence)
+{
+ struct dma_fence_chain *head = to_dma_fence_chain(fence);
+
+ dma_fence_get(&head->base);
+ dma_fence_chain_for_each(fence, &head->base) {
+ struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+ struct dma_fence *f = chain ? chain->fence : fence;
+
+ dma_fence_get(f);
+ if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) {
+ dma_fence_put(fence);
+ return true;
+ }
+ dma_fence_put(f);
+ }
+ dma_fence_put(&head->base);
+ return false;
+}
+
+static bool dma_fence_chain_signaled(struct dma_fence *fence)
+{
+ dma_fence_chain_for_each(fence, fence) {
+ struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+ struct dma_fence *f = chain ? chain->fence : fence;
+
+ if (!dma_fence_is_signaled(f)) {
+ dma_fence_put(fence);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void dma_fence_chain_release(struct dma_fence *fence)
+{
+ struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+
+ dma_fence_put(rcu_dereference_protected(chain->prev, true));
+ dma_fence_put(chain->fence);
+ dma_fence_free(fence);
+}
+
+const struct dma_fence_ops dma_fence_chain_ops = {
+ .use_64bit_seqno = true,
+ .get_driver_name = dma_fence_chain_get_driver_name,
+ .get_timeline_name = dma_fence_chain_get_timeline_name,
+ .enable_signaling = dma_fence_chain_enable_signaling,
+ .signaled = dma_fence_chain_signaled,
+ .release = dma_fence_chain_release,
+};
+EXPORT_SYMBOL(dma_fence_chain_ops);
+
+/**
+ * dma_fence_chain_init - initialize a fence chain
+ * @chain: the chain node to initialize
+ * @prev: the previous fence
+ * @fence: the current fence
+ *
+ * Initialize a new chain node and either start a new chain or add the node to
+ * the existing chain of the previous fence.
+ */
+void dma_fence_chain_init(struct dma_fence_chain *chain,
+ struct dma_fence *prev,
+ struct dma_fence *fence,
+ uint64_t seqno)
+{
+ struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev);
+ uint64_t context;
+
+ spin_lock_init(&chain->lock);
+ rcu_assign_pointer(chain->prev, prev);
+ chain->fence = fence;
+ chain->prev_seqno = 0;
+ init_irq_work(&chain->work, dma_fence_chain_irq_work);
+
+ /* Try to reuse the context of the previous chain node. */
+ if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
+ context = prev->context;
+ chain->prev_seqno = prev->seqno;
+ } else {
+ context = dma_fence_context_alloc(1);
+ /* Make sure that we always have a valid sequence number. */
+ if (prev_chain)
+ seqno = max(prev->seqno, seqno);
+ }
+
+ dma_fence_init(&chain->base, &dma_fence_chain_ops,
+ &chain->lock, context, seqno);
+}
+EXPORT_SYMBOL(dma_fence_chain_init);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 3aa8733f832a..9bf06042619a 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -29,6 +29,7 @@
EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
+EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled);
static DEFINE_SPINLOCK(dma_fence_stub_lock);
static struct dma_fence dma_fence_stub;
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index c1618335ca99..4d32e2c67862 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -73,6 +73,8 @@ int reservation_object_reserve_shared(struct reservation_object *obj,
struct reservation_object_list *old, *new;
unsigned int i, j, k, max;
+ reservation_object_assert_held(obj);
+
old = reservation_object_get_list(obj);
if (old && old->shared_max) {
@@ -151,6 +153,8 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
dma_fence_get(fence);
+ reservation_object_assert_held(obj);
+
fobj = reservation_object_get_list(obj);
count = fobj->shared_count;
@@ -196,6 +200,8 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
struct reservation_object_list *old;
u32 i = 0;
+ reservation_object_assert_held(obj);
+
old = reservation_object_get_list(obj);
if (old)
i = old->shared_count;
@@ -236,6 +242,8 @@ int reservation_object_copy_fences(struct reservation_object *dst,
size_t size;
unsigned i;
+ reservation_object_assert_held(dst);
+
rcu_read_lock();
src_list = rcu_dereference(src->fence);
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 32dcf7b4c935..119b2ffbc2c9 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -161,7 +161,7 @@ static bool timeline_fence_signaled(struct dma_fence *fence)
{
struct sync_timeline *parent = dma_fence_parent(fence);
- return !__dma_fence_is_later(fence->seqno, parent->value);
+ return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops);
}
static bool timeline_fence_enable_signaling(struct dma_fence *fence)
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 4f6305ca52c8..ed3fb6e5224c 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -258,7 +258,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
i_b++;
} else {
- if (__dma_fence_is_later(pt_a->seqno, pt_b->seqno))
+ if (__dma_fence_is_later(pt_a->seqno, pt_b->seqno,
+ pt_a->ops))
add_fence(fences, &i, pt_a);
else
add_fence(fences, &i, pt_b);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 0b1dfb5bf2d9..eaf78f4e07ce 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -99,7 +99,7 @@ config AT_XDMAC
config AXI_DMAC
tristate "Analog Devices AXI-DMAC DMA support"
- depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_SOCFPGA || COMPILE_TEST
+ depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_SOCFPGA || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index fc8c2bab563c..8cfc753ad4b0 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -254,6 +254,7 @@ enum pl08x_dma_chan_state {
* @slave: whether this channel is a device (slave) or for memcpy
* @signal: the physical DMA request signal which this channel is using
* @mux_use: count of descriptors using this DMA request signal setting
+ * @waiting_at: time in jiffies when this channel moved to waiting state
*/
struct pl08x_dma_chan {
struct virt_dma_chan vc;
@@ -267,6 +268,7 @@ struct pl08x_dma_chan {
bool slave;
int signal;
unsigned mux_use;
+ unsigned long waiting_at;
};
/**
@@ -875,6 +877,7 @@ static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan)
if (!ch) {
dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
plchan->state = PL08X_CHAN_WAITING;
+ plchan->waiting_at = jiffies;
return;
}
@@ -913,22 +916,29 @@ static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
{
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_dma_chan *p, *next;
-
+ unsigned long waiting_at;
retry:
next = NULL;
+ waiting_at = jiffies;
- /* Find a waiting virtual channel for the next transfer. */
+ /*
+ * Find a waiting virtual channel for the next transfer.
+ * To be fair, time when each channel reached waiting state is compared
+ * to select channel that is waiting for the longest time.
+ */
list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node)
- if (p->state == PL08X_CHAN_WAITING) {
+ if (p->state == PL08X_CHAN_WAITING &&
+ p->waiting_at <= waiting_at) {
next = p;
- break;
+ waiting_at = p->waiting_at;
}
if (!next && pl08x->has_slave) {
list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node)
- if (p->state == PL08X_CHAN_WAITING) {
+ if (p->state == PL08X_CHAN_WAITING &&
+ p->waiting_at <= waiting_at) {
next = p;
- break;
+ waiting_at = p->waiting_at;
}
}
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index fe69dccfa0c0..e4ae2ee46d3f 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -308,6 +308,11 @@ static inline int at_xdmac_csize(u32 maxburst)
return csize;
};
+static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg)
+{
+ return cfg & AT_XDMAC_CC_TYPE_PER_TRAN;
+}
+
static inline u8 at_xdmac_get_dwidth(u32 cfg)
{
return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
@@ -389,7 +394,13 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
- reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE;
+ reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE;
+ /*
+ * Request Overflow Error is only for peripheral synchronized transfers
+ */
+ if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg))
+ reg |= AT_XDMAC_CIE_ROIE;
+
/*
* There is no end of list when doing cyclic dma, we need to get
* an interrupt after each periods.
@@ -1575,6 +1586,46 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
dmaengine_desc_get_callback_invoke(txd, NULL);
}
+static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
+{
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
+ struct at_xdmac_desc *bad_desc;
+
+ /*
+ * The descriptor currently at the head of the active list is
+ * broken. Since we don't have any way to report errors, we'll
+ * just have to scream loudly and try to continue with other
+ * descriptors queued (if any).
+ */
+ if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
+ dev_err(chan2dev(&atchan->chan), "read bus error!!!");
+ if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
+ dev_err(chan2dev(&atchan->chan), "write bus error!!!");
+ if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
+ dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
+
+ spin_lock_bh(&atchan->lock);
+
+ /* Channel must be disabled first as it's not done automatically */
+ at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
+ while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
+ cpu_relax();
+
+ bad_desc = list_first_entry(&atchan->xfers_list,
+ struct at_xdmac_desc,
+ xfer_node);
+
+ spin_unlock_bh(&atchan->lock);
+
+ /* Print bad descriptor's details if needed */
+ dev_dbg(chan2dev(&atchan->chan),
+ "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
+ __func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da,
+ bad_desc->lld.mbr_ubc);
+
+ /* Then continue with usual descriptor management */
+}
+
static void at_xdmac_tasklet(unsigned long data)
{
struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data;
@@ -1594,19 +1645,19 @@ static void at_xdmac_tasklet(unsigned long data)
|| (atchan->irq_status & error_mask)) {
struct dma_async_tx_descriptor *txd;
- if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
- dev_err(chan2dev(&atchan->chan), "read bus error!!!");
- if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
- dev_err(chan2dev(&atchan->chan), "write bus error!!!");
- if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
- dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
+ if (atchan->irq_status & error_mask)
+ at_xdmac_handle_error(atchan);
spin_lock(&atchan->lock);
desc = list_first_entry(&atchan->xfers_list,
struct at_xdmac_desc,
xfer_node);
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
- BUG_ON(!desc->active_xfer);
+ if (!desc->active_xfer) {
+ dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
+ spin_unlock(&atchan->lock);
+ return;
+ }
txd = &desc->tx_dma_desc;
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index 72878ac5c78d..fa81d0177765 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -1459,8 +1459,7 @@ static void sba_receive_message(struct mbox_client *cl, void *msg)
static int sba_debugfs_stats_show(struct seq_file *file, void *offset)
{
- struct platform_device *pdev = to_platform_device(file->private);
- struct sba_device *sba = platform_get_drvdata(pdev);
+ struct sba_device *sba = dev_get_drvdata(file->private);
/* Write stats in file */
sba_write_stats_in_seqfile(sba, file);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 54093ffd0aef..8101ff2f05c1 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -891,7 +891,6 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask);
dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
- dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources;
od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index ffc0adc2f6ce..f32fdf21edbd 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -166,7 +166,7 @@ static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan)
static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
{
- if (len == 0 || len > chan->max_length)
+ if (len == 0)
return false;
if ((len & chan->align_mask) != 0) /* Not aligned */
return false;
@@ -379,6 +379,49 @@ static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
return desc;
}
+static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
+ enum dma_transfer_direction direction, dma_addr_t addr,
+ unsigned int num_periods, unsigned int period_len,
+ struct axi_dmac_sg *sg)
+{
+ unsigned int num_segments, i;
+ unsigned int segment_size;
+ unsigned int len;
+
+ /* Split into multiple equally sized segments if necessary */
+ num_segments = DIV_ROUND_UP(period_len, chan->max_length);
+ segment_size = DIV_ROUND_UP(period_len, num_segments);
+ /* Take care of alignment */
+ segment_size = ((segment_size - 1) | chan->align_mask) + 1;
+
+ for (i = 0; i < num_periods; i++) {
+ len = period_len;
+
+ while (len > segment_size) {
+ if (direction == DMA_DEV_TO_MEM)
+ sg->dest_addr = addr;
+ else
+ sg->src_addr = addr;
+ sg->x_len = segment_size;
+ sg->y_len = 1;
+ sg++;
+ addr += segment_size;
+ len -= segment_size;
+ }
+
+ if (direction == DMA_DEV_TO_MEM)
+ sg->dest_addr = addr;
+ else
+ sg->src_addr = addr;
+ sg->x_len = len;
+ sg->y_len = 1;
+ sg++;
+ addr += len;
+ }
+
+ return sg;
+}
+
static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
struct dma_chan *c, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
@@ -386,16 +429,24 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
{
struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
struct axi_dmac_desc *desc;
+ struct axi_dmac_sg *dsg;
struct scatterlist *sg;
+ unsigned int num_sgs;
unsigned int i;
if (direction != chan->direction)
return NULL;
- desc = axi_dmac_alloc_desc(sg_len);
+ num_sgs = 0;
+ for_each_sg(sgl, sg, sg_len, i)
+ num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length);
+
+ desc = axi_dmac_alloc_desc(num_sgs);
if (!desc)
return NULL;
+ dsg = desc->sg;
+
for_each_sg(sgl, sg, sg_len, i) {
if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
!axi_dmac_check_len(chan, sg_dma_len(sg))) {
@@ -403,12 +454,8 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
return NULL;
}
- if (direction == DMA_DEV_TO_MEM)
- desc->sg[i].dest_addr = sg_dma_address(sg);
- else
- desc->sg[i].src_addr = sg_dma_address(sg);
- desc->sg[i].x_len = sg_dma_len(sg);
- desc->sg[i].y_len = 1;
+ dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), 1,
+ sg_dma_len(sg), dsg);
}
desc->cyclic = false;
@@ -423,7 +470,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
{
struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
struct axi_dmac_desc *desc;
- unsigned int num_periods, i;
+ unsigned int num_periods, num_segments;
if (direction != chan->direction)
return NULL;
@@ -436,20 +483,14 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
return NULL;
num_periods = buf_len / period_len;
+ num_segments = DIV_ROUND_UP(period_len, chan->max_length);
- desc = axi_dmac_alloc_desc(num_periods);
+ desc = axi_dmac_alloc_desc(num_periods * num_segments);
if (!desc)
return NULL;
- for (i = 0; i < num_periods; i++) {
- if (direction == DMA_DEV_TO_MEM)
- desc->sg[i].dest_addr = buf_addr;
- else
- desc->sg[i].src_addr = buf_addr;
- desc->sg[i].x_len = period_len;
- desc->sg[i].y_len = 1;
- buf_addr += period_len;
- }
+ axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods,
+ period_len, desc->sg);
desc->cyclic = true;
@@ -485,7 +526,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
if (chan->hw_2d) {
if (!axi_dmac_check_len(chan, xt->sgl[0].size) ||
- !axi_dmac_check_len(chan, xt->numf))
+ xt->numf == 0)
return NULL;
if (xt->sgl[0].size + dst_icg > chan->max_length ||
xt->sgl[0].size + src_icg > chan->max_length)
@@ -577,15 +618,6 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
return ret;
chan->dest_width = val / 8;
- ret = of_property_read_u32(of_chan, "adi,length-width", &val);
- if (ret)
- return ret;
-
- if (val >= 32)
- chan->max_length = UINT_MAX;
- else
- chan->max_length = (1ULL << val) - 1;
-
chan->align_mask = max(chan->dest_width, chan->src_width) - 1;
if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
@@ -597,12 +629,27 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
else
chan->direction = DMA_DEV_TO_DEV;
- chan->hw_cyclic = of_property_read_bool(of_chan, "adi,cyclic");
- chan->hw_2d = of_property_read_bool(of_chan, "adi,2d");
-
return 0;
}
+static void axi_dmac_detect_caps(struct axi_dmac *dmac)
+{
+ struct axi_dmac_chan *chan = &dmac->chan;
+
+ axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC);
+ if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
+ chan->hw_cyclic = true;
+
+ axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1);
+ if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1)
+ chan->hw_2d = true;
+
+ axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0xffffffff);
+ chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
+ if (chan->max_length != UINT_MAX)
+ chan->max_length++;
+}
+
static int axi_dmac_probe(struct platform_device *pdev)
{
struct device_node *of_channels, *of_chan;
@@ -647,11 +694,12 @@ static int axi_dmac_probe(struct platform_device *pdev)
of_node_put(of_channels);
pdev->dev.dma_parms = &dmac->dma_parms;
- dma_set_max_seg_size(&pdev->dev, dmac->chan.max_length);
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
dma_dev = &dmac->dma_dev;
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, dma_dev->cap_mask);
dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources;
dma_dev->device_tx_status = dma_cookie_status;
dma_dev->device_issue_pending = axi_dmac_issue_pending;
@@ -675,6 +723,8 @@ static int axi_dmac_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ axi_dmac_detect_caps(dmac);
+
axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
ret = dma_async_device_register(dma_dev);
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index b435d8e1e3a1..c53f76eeb4d3 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -136,7 +136,7 @@ struct fsl_edma_desc {
};
enum edma_version {
- v1, /* 32ch, Vybdir, mpc57x, etc */
+ v1, /* 32ch, Vybrid, mpc57x, etc */
v2, /* 64ch Coldfire */
};
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 75e8a7ba3a22..d641ef85a634 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -144,21 +144,21 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
fsl_edma_irq_handler, 0, "eDMA", fsl_edma);
if (ret) {
dev_err(&pdev->dev, "Can't register eDMA IRQ.\n");
- return ret;
+ return ret;
}
} else {
ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
fsl_edma_tx_handler, 0, "eDMA tx", fsl_edma);
if (ret) {
dev_err(&pdev->dev, "Can't register eDMA tx IRQ.\n");
- return ret;
+ return ret;
}
ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
fsl_edma_err_handler, 0, "eDMA err", fsl_edma);
if (ret) {
dev_err(&pdev->dev, "Can't register eDMA err IRQ.\n");
- return ret;
+ return ret;
}
}
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 0baf9797cc09..07fd4f25cdd8 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -19,10 +19,9 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include "idma64.h"
+#include <linux/dma/idma64.h>
-/* Platform driver name */
-#define DRV_NAME "idma64"
+#include "idma64.h"
/* For now we support only two channels */
#define IDMA64_NR_CHAN 2
@@ -592,7 +591,7 @@ static int idma64_probe(struct idma64_chip *chip)
idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
- idma64->dma.dev = chip->dev;
+ idma64->dma.dev = chip->sysdev;
dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
@@ -632,6 +631,7 @@ static int idma64_platform_probe(struct platform_device *pdev)
{
struct idma64_chip *chip;
struct device *dev = &pdev->dev;
+ struct device *sysdev = dev->parent;
struct resource *mem;
int ret;
@@ -648,11 +648,12 @@ static int idma64_platform_probe(struct platform_device *pdev)
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
- ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
if (ret)
return ret;
chip->dev = dev;
+ chip->sysdev = sysdev;
ret = idma64_probe(chip);
if (ret)
@@ -697,7 +698,7 @@ static struct platform_driver idma64_platform_driver = {
.probe = idma64_platform_probe,
.remove = idma64_platform_remove,
.driver = {
- .name = DRV_NAME,
+ .name = LPSS_IDMA64_DRIVER_NAME,
.pm = &idma64_dev_pm_ops,
},
};
@@ -707,4 +708,4 @@ module_platform_driver(idma64_platform_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("iDMA64 core driver");
MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
-MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_ALIAS("platform:" LPSS_IDMA64_DRIVER_NAME);
diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h
index 6b816878e5e7..baa32e1425de 100644
--- a/drivers/dma/idma64.h
+++ b/drivers/dma/idma64.h
@@ -216,12 +216,14 @@ static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value)
/**
* struct idma64_chip - representation of iDMA 64-bit controller hardware
* @dev: struct device of the DMA controller
+ * @sysdev: struct device of the physical device that does DMA
* @irq: irq line
* @regs: memory mapped I/O space
* @idma64: struct idma64 that is filed by idma64_probe()
*/
struct idma64_chip {
struct device *dev;
+ struct device *sysdev;
int irq;
void __iomem *regs;
struct idma64 *idma64;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 5f3c1378b90e..99d9f431ae2c 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -419,6 +419,7 @@ struct sdma_driver_data {
int chnenbl0;
int num_events;
struct sdma_script_start_addrs *script_addrs;
+ bool check_ratio;
};
struct sdma_engine {
@@ -557,6 +558,13 @@ static struct sdma_driver_data sdma_imx7d = {
.script_addrs = &sdma_script_imx7d,
};
+static struct sdma_driver_data sdma_imx8mq = {
+ .chnenbl0 = SDMA_CHNENBL0_IMX35,
+ .num_events = 48,
+ .script_addrs = &sdma_script_imx7d,
+ .check_ratio = 1,
+};
+
static const struct platform_device_id sdma_devtypes[] = {
{
.name = "imx25-sdma",
@@ -580,6 +588,9 @@ static const struct platform_device_id sdma_devtypes[] = {
.name = "imx7d-sdma",
.driver_data = (unsigned long)&sdma_imx7d,
}, {
+ .name = "imx8mq-sdma",
+ .driver_data = (unsigned long)&sdma_imx8mq,
+ }, {
/* sentinel */
}
};
@@ -593,6 +604,7 @@ static const struct of_device_id sdma_dt_ids[] = {
{ .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
{ .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
{ .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
+ { .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdma_dt_ids);
@@ -1852,7 +1864,8 @@ static int sdma_init(struct sdma_engine *sdma)
if (ret)
goto disable_clk_ipg;
- if (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg))
+ if (sdma->drvdata->check_ratio &&
+ (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg)))
sdma->clk_ratio = 1;
/* Be sure SDMA has not started yet */
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index a67b292190f4..594409a6e975 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1491,14 +1491,14 @@ MODULE_DEVICE_TABLE(platform, nbpf_ids);
#ifdef CONFIG_PM
static int nbpf_runtime_suspend(struct device *dev)
{
- struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev));
+ struct nbpf_device *nbpf = dev_get_drvdata(dev);
clk_disable_unprepare(nbpf->clk);
return 0;
}
static int nbpf_runtime_resume(struct device *dev)
{
- struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev));
+ struct nbpf_device *nbpf = dev_get_drvdata(dev);
return clk_prepare_enable(nbpf->clk);
}
#endif
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index eec79fdf27a5..6e6837214210 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -11,6 +11,7 @@
* (at your option) any later version.
*/
+#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/init.h>
@@ -966,6 +967,7 @@ static void _stop(struct pl330_thread *thrd)
{
void __iomem *regs = thrd->dmac->base;
u8 insn[6] = {0, 0, 0, 0, 0, 0};
+ u32 inten = readl(regs + INTEN);
if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
@@ -978,10 +980,13 @@ static void _stop(struct pl330_thread *thrd)
_emit_KILL(0, insn);
- /* Stop generating interrupts for SEV */
- writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
-
_execute_DBGINSN(thrd, insn, is_manager(thrd));
+
+ /* clear the event */
+ if (inten & (1 << thrd->ev))
+ writel(1 << thrd->ev, regs + INTCLR);
+ /* Stop generating interrupts for SEV */
+ writel(inten & ~(1 << thrd->ev), regs + INTEN);
}
/* Start doing req 'idx' of thread 'thrd' */
@@ -2896,6 +2901,55 @@ static irqreturn_t pl330_irq_handler(int irq, void *data)
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
+#ifdef CONFIG_DEBUG_FS
+static int pl330_debugfs_show(struct seq_file *s, void *data)
+{
+ struct pl330_dmac *pl330 = s->private;
+ int chans, pchs, ch, pr;
+
+ chans = pl330->pcfg.num_chan;
+ pchs = pl330->num_peripherals;
+
+ seq_puts(s, "PL330 physical channels:\n");
+ seq_puts(s, "THREAD:\t\tCHANNEL:\n");
+ seq_puts(s, "--------\t-----\n");
+ for (ch = 0; ch < chans; ch++) {
+ struct pl330_thread *thrd = &pl330->channels[ch];
+ int found = -1;
+
+ for (pr = 0; pr < pchs; pr++) {
+ struct dma_pl330_chan *pch = &pl330->peripherals[pr];
+
+ if (!pch->thread || thrd->id != pch->thread->id)
+ continue;
+
+ found = pr;
+ }
+
+ seq_printf(s, "%d\t\t", thrd->id);
+ if (found == -1)
+ seq_puts(s, "--\n");
+ else
+ seq_printf(s, "%d\n", found);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(pl330_debugfs);
+
+static inline void init_pl330_debugfs(struct pl330_dmac *pl330)
+{
+ debugfs_create_file(dev_name(pl330->ddma.dev),
+ S_IFREG | 0444, NULL, pl330,
+ &pl330_debugfs_fops);
+}
+#else
+static inline void init_pl330_debugfs(struct pl330_dmac *pl330)
+{
+}
+#endif
+
/*
* Runtime PM callbacks are provided by amba/bus.c driver.
*
@@ -3082,6 +3136,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
dev_err(&adev->dev, "unable to set the seg size\n");
+ init_pl330_debugfs(pl330);
dev_info(&adev->dev,
"Loaded driver for PL330 DMAC-%x\n", adev->periphid);
dev_info(&adev->dev,
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index e2a5398f89b5..33ab1b607e2b 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Renesas R-Car Gen2 DMA Controller Driver
+ * Renesas R-Car Gen2/Gen3 DMA Controller Driver
*
- * Copyright (C) 2014 Renesas Electronics Inc.
+ * Copyright (C) 2014-2019 Renesas Electronics Inc.
*
* Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*/
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index ba239b529fa9..88d9c6c4389f 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -1042,33 +1042,97 @@ static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan)
return ndtr << width;
}
+/**
+ * stm32_dma_is_current_sg - check that expected sg_req is currently transferred
+ * @chan: dma channel
+ *
+ * This function called when IRQ are disable, checks that the hardware has not
+ * switched on the next transfer in double buffer mode. The test is done by
+ * comparing the next_sg memory address with the hardware related register
+ * (based on CT bit value).
+ *
+ * Returns true if expected current transfer is still running or double
+ * buffer mode is not activated.
+ */
+static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan)
+{
+ struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+ struct stm32_dma_sg_req *sg_req;
+ u32 dma_scr, dma_smar, id;
+
+ id = chan->id;
+ dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
+
+ if (!(dma_scr & STM32_DMA_SCR_DBM))
+ return true;
+
+ sg_req = &chan->desc->sg_req[chan->next_sg];
+
+ if (dma_scr & STM32_DMA_SCR_CT) {
+ dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id));
+ return (dma_smar == sg_req->chan_reg.dma_sm0ar);
+ }
+
+ dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id));
+
+ return (dma_smar == sg_req->chan_reg.dma_sm1ar);
+}
+
static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
struct stm32_dma_desc *desc,
u32 next_sg)
{
u32 modulo, burst_size;
- u32 residue = 0;
+ u32 residue;
+ u32 n_sg = next_sg;
+ struct stm32_dma_sg_req *sg_req = &chan->desc->sg_req[chan->next_sg];
int i;
/*
- * In cyclic mode, for the last period, residue = remaining bytes from
- * NDTR
+ * Calculate the residue means compute the descriptors
+ * information:
+ * - the sg_req currently transferred
+ * - the Hardware remaining position in this sg (NDTR bits field).
+ *
+ * A race condition may occur if DMA is running in cyclic or double
+ * buffer mode, since the DMA register are automatically reloaded at end
+ * of period transfer. The hardware may have switched to the next
+ * transfer (CT bit updated) just before the position (SxNDTR reg) is
+ * read.
+ * In this case the SxNDTR reg could (or not) correspond to the new
+ * transfer position, and not the expected one.
+ * The strategy implemented in the stm32 driver is to:
+ * - read the SxNDTR register
+ * - crosscheck that hardware is still in current transfer.
+ * In case of switch, we can assume that the DMA is at the beginning of
+ * the next transfer. So we approximate the residue in consequence, by
+ * pointing on the beginning of next transfer.
+ *
+ * This race condition doesn't apply for none cyclic mode, as double
+ * buffer is not used. In such situation registers are updated by the
+ * software.
*/
- if (chan->desc->cyclic && next_sg == 0) {
- residue = stm32_dma_get_remaining_bytes(chan);
- goto end;
+
+ residue = stm32_dma_get_remaining_bytes(chan);
+
+ if (!stm32_dma_is_current_sg(chan)) {
+ n_sg++;
+ if (n_sg == chan->desc->num_sgs)
+ n_sg = 0;
+ residue = sg_req->len;
}
/*
- * For all other periods in cyclic mode, and in sg mode,
- * residue = remaining bytes from NDTR + remaining periods/sg to be
- * transferred
+ * In cyclic mode, for the last period, residue = remaining bytes
+ * from NDTR,
+ * else for all other periods in cyclic mode, and in sg mode,
+ * residue = remaining bytes from NDTR + remaining
+ * periods/sg to be transferred
*/
- for (i = next_sg; i < desc->num_sgs; i++)
- residue += desc->sg_req[i].len;
- residue += stm32_dma_get_remaining_bytes(chan);
+ if (!chan->desc->cyclic || n_sg != 0)
+ for (i = n_sg; i < desc->num_sgs; i++)
+ residue += desc->sg_req[i].len;
-end:
if (!chan->mem_burst)
return residue;
@@ -1302,13 +1366,16 @@ static int stm32_dma_probe(struct platform_device *pdev)
for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
chan = &dmadev->chan[i];
- res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
- if (!res) {
- ret = -EINVAL;
- dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
+ chan->irq = platform_get_irq(pdev, i);
+ ret = platform_get_irq(pdev, i);
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "No irq resource for chan %d\n", i);
goto err_unregister;
}
- chan->irq = res->start;
+ chan->irq = ret;
+
ret = devm_request_irq(&pdev->dev, chan->irq,
stm32_dma_chan_irq, 0,
dev_name(chan2dev(chan)), chan);
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 5ec0dd97b397..21f6be16d013 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -22,7 +22,6 @@
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/of_irq.h>
-#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -31,35 +30,33 @@
#define ADMA_CH_CMD 0x00
#define ADMA_CH_STATUS 0x0c
#define ADMA_CH_STATUS_XFER_EN BIT(0)
+#define ADMA_CH_STATUS_XFER_PAUSED BIT(1)
#define ADMA_CH_INT_STATUS 0x10
#define ADMA_CH_INT_STATUS_XFER_DONE BIT(0)
#define ADMA_CH_INT_CLEAR 0x1c
#define ADMA_CH_CTRL 0x24
-#define ADMA_CH_CTRL_TX_REQ(val) (((val) & 0xf) << 28)
-#define ADMA_CH_CTRL_TX_REQ_MAX 10
-#define ADMA_CH_CTRL_RX_REQ(val) (((val) & 0xf) << 24)
-#define ADMA_CH_CTRL_RX_REQ_MAX 10
#define ADMA_CH_CTRL_DIR(val) (((val) & 0xf) << 12)
#define ADMA_CH_CTRL_DIR_AHUB2MEM 2
#define ADMA_CH_CTRL_DIR_MEM2AHUB 4
#define ADMA_CH_CTRL_MODE_CONTINUOUS (2 << 8)
#define ADMA_CH_CTRL_FLOWCTRL_EN BIT(1)
+#define ADMA_CH_CTRL_XFER_PAUSE_SHIFT 0
#define ADMA_CH_CONFIG 0x28
#define ADMA_CH_CONFIG_SRC_BUF(val) (((val) & 0x7) << 28)
#define ADMA_CH_CONFIG_TRG_BUF(val) (((val) & 0x7) << 24)
-#define ADMA_CH_CONFIG_BURST_SIZE(val) (((val) & 0x7) << 20)
-#define ADMA_CH_CONFIG_BURST_16 5
+#define ADMA_CH_CONFIG_BURST_SIZE_SHIFT 20
+#define ADMA_CH_CONFIG_MAX_BURST_SIZE 16
#define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf)
#define ADMA_CH_CONFIG_MAX_BUFS 8
#define ADMA_CH_FIFO_CTRL 0x2c
#define ADMA_CH_FIFO_CTRL_OVRFW_THRES(val) (((val) & 0xf) << 24)
#define ADMA_CH_FIFO_CTRL_STARV_THRES(val) (((val) & 0xf) << 16)
-#define ADMA_CH_FIFO_CTRL_TX_SIZE(val) (((val) & 0xf) << 8)
-#define ADMA_CH_FIFO_CTRL_RX_SIZE(val) ((val) & 0xf)
+#define ADMA_CH_FIFO_CTRL_TX_FIFO_SIZE_SHIFT 8
+#define ADMA_CH_FIFO_CTRL_RX_FIFO_SIZE_SHIFT 0
#define ADMA_CH_LOWER_SRC_ADDR 0x34
#define ADMA_CH_LOWER_TRG_ADDR 0x3c
@@ -69,25 +66,41 @@
#define ADMA_CH_XFER_STATUS 0x54
#define ADMA_CH_XFER_STATUS_COUNT_MASK 0xffff
-#define ADMA_GLOBAL_CMD 0xc00
-#define ADMA_GLOBAL_SOFT_RESET 0xc04
-#define ADMA_GLOBAL_INT_CLEAR 0xc20
-#define ADMA_GLOBAL_CTRL 0xc24
+#define ADMA_GLOBAL_CMD 0x00
+#define ADMA_GLOBAL_SOFT_RESET 0x04
-#define ADMA_CH_REG_OFFSET(a) (a * 0x80)
+#define TEGRA_ADMA_BURST_COMPLETE_TIME 20
#define ADMA_CH_FIFO_CTRL_DEFAULT (ADMA_CH_FIFO_CTRL_OVRFW_THRES(1) | \
- ADMA_CH_FIFO_CTRL_STARV_THRES(1) | \
- ADMA_CH_FIFO_CTRL_TX_SIZE(3) | \
- ADMA_CH_FIFO_CTRL_RX_SIZE(3))
+ ADMA_CH_FIFO_CTRL_STARV_THRES(1))
+
+#define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift)
+
struct tegra_adma;
/*
* struct tegra_adma_chip_data - Tegra chip specific data
+ * @global_reg_offset: Register offset of DMA global register.
+ * @global_int_clear: Register offset of DMA global interrupt clear.
+ * @ch_req_tx_shift: Register offset for AHUB transmit channel select.
+ * @ch_req_rx_shift: Register offset for AHUB receive channel select.
+ * @ch_base_offset: Reister offset of DMA channel registers.
+ * @ch_req_mask: Mask for Tx or Rx channel select.
+ * @ch_req_max: Maximum number of Tx or Rx channels available.
+ * @ch_reg_size: Size of DMA channel register space.
* @nr_channels: Number of DMA channels available.
*/
struct tegra_adma_chip_data {
- int nr_channels;
+ unsigned int (*adma_get_burst_config)(unsigned int burst_size);
+ unsigned int global_reg_offset;
+ unsigned int global_int_clear;
+ unsigned int ch_req_tx_shift;
+ unsigned int ch_req_rx_shift;
+ unsigned int ch_base_offset;
+ unsigned int ch_req_mask;
+ unsigned int ch_req_max;
+ unsigned int ch_reg_size;
+ unsigned int nr_channels;
};
/*
@@ -99,6 +112,7 @@ struct tegra_adma_chan_regs {
unsigned int src_addr;
unsigned int trg_addr;
unsigned int fifo_ctrl;
+ unsigned int cmd;
unsigned int tc;
};
@@ -128,6 +142,7 @@ struct tegra_adma_chan {
enum dma_transfer_direction sreq_dir;
unsigned int sreq_index;
bool sreq_reserved;
+ struct tegra_adma_chan_regs ch_regs;
/* Transfer count and position info */
unsigned int tx_buf_count;
@@ -141,6 +156,7 @@ struct tegra_adma {
struct dma_device dma_dev;
struct device *dev;
void __iomem *base_addr;
+ struct clk *ahub_clk;
unsigned int nr_channels;
unsigned long rx_requests_reserved;
unsigned long tx_requests_reserved;
@@ -148,18 +164,20 @@ struct tegra_adma {
/* Used to store global command register state when suspending */
unsigned int global_cmd;
+ const struct tegra_adma_chip_data *cdata;
+
/* Last member of the structure */
struct tegra_adma_chan channels[0];
};
static inline void tdma_write(struct tegra_adma *tdma, u32 reg, u32 val)
{
- writel(val, tdma->base_addr + reg);
+ writel(val, tdma->base_addr + tdma->cdata->global_reg_offset + reg);
}
static inline u32 tdma_read(struct tegra_adma *tdma, u32 reg)
{
- return readl(tdma->base_addr + reg);
+ return readl(tdma->base_addr + tdma->cdata->global_reg_offset + reg);
}
static inline void tdma_ch_write(struct tegra_adma_chan *tdc, u32 reg, u32 val)
@@ -209,14 +227,16 @@ static int tegra_adma_init(struct tegra_adma *tdma)
int ret;
/* Clear any interrupts */
- tdma_write(tdma, ADMA_GLOBAL_INT_CLEAR, 0x1);
+ tdma_write(tdma, tdma->cdata->global_int_clear, 0x1);
/* Assert soft reset */
tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1);
/* Wait for reset to clear */
ret = readx_poll_timeout(readl,
- tdma->base_addr + ADMA_GLOBAL_SOFT_RESET,
+ tdma->base_addr +
+ tdma->cdata->global_reg_offset +
+ ADMA_GLOBAL_SOFT_RESET,
status, status == 0, 20, 10000);
if (ret)
return ret;
@@ -236,13 +256,13 @@ static int tegra_adma_request_alloc(struct tegra_adma_chan *tdc,
if (tdc->sreq_reserved)
return tdc->sreq_dir == direction ? 0 : -EINVAL;
+ if (sreq_index > tdma->cdata->ch_req_max) {
+ dev_err(tdma->dev, "invalid DMA request\n");
+ return -EINVAL;
+ }
+
switch (direction) {
case DMA_MEM_TO_DEV:
- if (sreq_index > ADMA_CH_CTRL_TX_REQ_MAX) {
- dev_err(tdma->dev, "invalid DMA request\n");
- return -EINVAL;
- }
-
if (test_and_set_bit(sreq_index, &tdma->tx_requests_reserved)) {
dev_err(tdma->dev, "DMA request reserved\n");
return -EINVAL;
@@ -250,11 +270,6 @@ static int tegra_adma_request_alloc(struct tegra_adma_chan *tdc,
break;
case DMA_DEV_TO_MEM:
- if (sreq_index > ADMA_CH_CTRL_RX_REQ_MAX) {
- dev_err(tdma->dev, "invalid DMA request\n");
- return -EINVAL;
- }
-
if (test_and_set_bit(sreq_index, &tdma->rx_requests_reserved)) {
dev_err(tdma->dev, "DMA request reserved\n");
return -EINVAL;
@@ -428,6 +443,51 @@ static void tegra_adma_issue_pending(struct dma_chan *dc)
spin_unlock_irqrestore(&tdc->vc.lock, flags);
}
+static bool tegra_adma_is_paused(struct tegra_adma_chan *tdc)
+{
+ u32 csts;
+
+ csts = tdma_ch_read(tdc, ADMA_CH_STATUS);
+ csts &= ADMA_CH_STATUS_XFER_PAUSED;
+
+ return csts ? true : false;
+}
+
+static int tegra_adma_pause(struct dma_chan *dc)
+{
+ struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
+ struct tegra_adma_desc *desc = tdc->desc;
+ struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs;
+ int dcnt = 10;
+
+ ch_regs->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL);
+ ch_regs->ctrl |= (1 << ADMA_CH_CTRL_XFER_PAUSE_SHIFT);
+ tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl);
+
+ while (dcnt-- && !tegra_adma_is_paused(tdc))
+ udelay(TEGRA_ADMA_BURST_COMPLETE_TIME);
+
+ if (dcnt < 0) {
+ dev_err(tdc2dev(tdc), "unable to pause DMA channel\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int tegra_adma_resume(struct dma_chan *dc)
+{
+ struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
+ struct tegra_adma_desc *desc = tdc->desc;
+ struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs;
+
+ ch_regs->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL);
+ ch_regs->ctrl &= ~(1 << ADMA_CH_CTRL_XFER_PAUSE_SHIFT);
+ tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl);
+
+ return 0;
+}
+
static int tegra_adma_terminate_all(struct dma_chan *dc)
{
struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
@@ -481,12 +541,29 @@ static enum dma_status tegra_adma_tx_status(struct dma_chan *dc,
return ret;
}
+static unsigned int tegra210_adma_get_burst_config(unsigned int burst_size)
+{
+ if (!burst_size || burst_size > ADMA_CH_CONFIG_MAX_BURST_SIZE)
+ burst_size = ADMA_CH_CONFIG_MAX_BURST_SIZE;
+
+ return fls(burst_size) << ADMA_CH_CONFIG_BURST_SIZE_SHIFT;
+}
+
+static unsigned int tegra186_adma_get_burst_config(unsigned int burst_size)
+{
+ if (!burst_size || burst_size > ADMA_CH_CONFIG_MAX_BURST_SIZE)
+ burst_size = ADMA_CH_CONFIG_MAX_BURST_SIZE;
+
+ return (burst_size - 1) << ADMA_CH_CONFIG_BURST_SIZE_SHIFT;
+}
+
static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
struct tegra_adma_desc *desc,
dma_addr_t buf_addr,
enum dma_transfer_direction direction)
{
struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs;
+ const struct tegra_adma_chip_data *cdata = tdc->tdma->cdata;
unsigned int burst_size, adma_dir;
if (desc->num_periods > ADMA_CH_CONFIG_MAX_BUFS)
@@ -495,17 +572,21 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
switch (direction) {
case DMA_MEM_TO_DEV:
adma_dir = ADMA_CH_CTRL_DIR_MEM2AHUB;
- burst_size = fls(tdc->sconfig.dst_maxburst);
+ burst_size = tdc->sconfig.dst_maxburst;
ch_regs->config = ADMA_CH_CONFIG_SRC_BUF(desc->num_periods - 1);
- ch_regs->ctrl = ADMA_CH_CTRL_TX_REQ(tdc->sreq_index);
+ ch_regs->ctrl = ADMA_CH_REG_FIELD_VAL(tdc->sreq_index,
+ cdata->ch_req_mask,
+ cdata->ch_req_tx_shift);
ch_regs->src_addr = buf_addr;
break;
case DMA_DEV_TO_MEM:
adma_dir = ADMA_CH_CTRL_DIR_AHUB2MEM;
- burst_size = fls(tdc->sconfig.src_maxburst);
+ burst_size = tdc->sconfig.src_maxburst;
ch_regs->config = ADMA_CH_CONFIG_TRG_BUF(desc->num_periods - 1);
- ch_regs->ctrl = ADMA_CH_CTRL_RX_REQ(tdc->sreq_index);
+ ch_regs->ctrl = ADMA_CH_REG_FIELD_VAL(tdc->sreq_index,
+ cdata->ch_req_mask,
+ cdata->ch_req_rx_shift);
ch_regs->trg_addr = buf_addr;
break;
@@ -514,13 +595,10 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
return -EINVAL;
}
- if (!burst_size || burst_size > ADMA_CH_CONFIG_BURST_16)
- burst_size = ADMA_CH_CONFIG_BURST_16;
-
ch_regs->ctrl |= ADMA_CH_CTRL_DIR(adma_dir) |
ADMA_CH_CTRL_MODE_CONTINUOUS |
ADMA_CH_CTRL_FLOWCTRL_EN;
- ch_regs->config |= ADMA_CH_CONFIG_BURST_SIZE(burst_size);
+ ch_regs->config |= cdata->adma_get_burst_config(burst_size);
ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
ch_regs->fifo_ctrl = ADMA_CH_FIFO_CTRL_DEFAULT;
ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK;
@@ -635,32 +713,99 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
static int tegra_adma_runtime_suspend(struct device *dev)
{
struct tegra_adma *tdma = dev_get_drvdata(dev);
+ struct tegra_adma_chan_regs *ch_reg;
+ struct tegra_adma_chan *tdc;
+ int i;
tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD);
+ if (!tdma->global_cmd)
+ goto clk_disable;
+
+ for (i = 0; i < tdma->nr_channels; i++) {
+ tdc = &tdma->channels[i];
+ ch_reg = &tdc->ch_regs;
+ ch_reg->cmd = tdma_ch_read(tdc, ADMA_CH_CMD);
+ /* skip if channel is not active */
+ if (!ch_reg->cmd)
+ continue;
+ ch_reg->tc = tdma_ch_read(tdc, ADMA_CH_TC);
+ ch_reg->src_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_SRC_ADDR);
+ ch_reg->trg_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_TRG_ADDR);
+ ch_reg->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL);
+ ch_reg->fifo_ctrl = tdma_ch_read(tdc, ADMA_CH_FIFO_CTRL);
+ ch_reg->config = tdma_ch_read(tdc, ADMA_CH_CONFIG);
+ }
+
+clk_disable:
+ clk_disable_unprepare(tdma->ahub_clk);
- return pm_clk_suspend(dev);
+ return 0;
}
static int tegra_adma_runtime_resume(struct device *dev)
{
struct tegra_adma *tdma = dev_get_drvdata(dev);
- int ret;
+ struct tegra_adma_chan_regs *ch_reg;
+ struct tegra_adma_chan *tdc;
+ int ret, i;
- ret = pm_clk_resume(dev);
- if (ret)
+ ret = clk_prepare_enable(tdma->ahub_clk);
+ if (ret) {
+ dev_err(dev, "ahub clk_enable failed: %d\n", ret);
return ret;
-
+ }
tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd);
+ if (!tdma->global_cmd)
+ return 0;
+
+ for (i = 0; i < tdma->nr_channels; i++) {
+ tdc = &tdma->channels[i];
+ ch_reg = &tdc->ch_regs;
+ /* skip if channel was not active earlier */
+ if (!ch_reg->cmd)
+ continue;
+ tdma_ch_write(tdc, ADMA_CH_TC, ch_reg->tc);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_reg->src_addr);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_reg->trg_addr);
+ tdma_ch_write(tdc, ADMA_CH_CTRL, ch_reg->ctrl);
+ tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_reg->fifo_ctrl);
+ tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_reg->config);
+ tdma_ch_write(tdc, ADMA_CH_CMD, ch_reg->cmd);
+ }
+
return 0;
}
static const struct tegra_adma_chip_data tegra210_chip_data = {
- .nr_channels = 22,
+ .adma_get_burst_config = tegra210_adma_get_burst_config,
+ .global_reg_offset = 0xc00,
+ .global_int_clear = 0x20,
+ .ch_req_tx_shift = 28,
+ .ch_req_rx_shift = 24,
+ .ch_base_offset = 0,
+ .ch_req_mask = 0xf,
+ .ch_req_max = 10,
+ .ch_reg_size = 0x80,
+ .nr_channels = 22,
+};
+
+static const struct tegra_adma_chip_data tegra186_chip_data = {
+ .adma_get_burst_config = tegra186_adma_get_burst_config,
+ .global_reg_offset = 0,
+ .global_int_clear = 0x402c,
+ .ch_req_tx_shift = 27,
+ .ch_req_rx_shift = 22,
+ .ch_base_offset = 0x10000,
+ .ch_req_mask = 0x1f,
+ .ch_req_max = 20,
+ .ch_reg_size = 0x100,
+ .nr_channels = 32,
};
static const struct of_device_id tegra_adma_of_match[] = {
{ .compatible = "nvidia,tegra210-adma", .data = &tegra210_chip_data },
+ { .compatible = "nvidia,tegra186-adma", .data = &tegra186_chip_data },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_adma_of_match);
@@ -685,6 +830,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
return -ENOMEM;
tdma->dev = &pdev->dev;
+ tdma->cdata = cdata;
tdma->nr_channels = cdata->nr_channels;
platform_set_drvdata(pdev, tdma);
@@ -693,13 +839,11 @@ static int tegra_adma_probe(struct platform_device *pdev)
if (IS_ERR(tdma->base_addr))
return PTR_ERR(tdma->base_addr);
- ret = pm_clk_create(&pdev->dev);
- if (ret)
- return ret;
-
- ret = of_pm_clk_add_clk(&pdev->dev, "d_audio");
- if (ret)
- goto clk_destroy;
+ tdma->ahub_clk = devm_clk_get(&pdev->dev, "d_audio");
+ if (IS_ERR(tdma->ahub_clk)) {
+ dev_err(&pdev->dev, "Error: Missing ahub controller clock\n");
+ return PTR_ERR(tdma->ahub_clk);
+ }
pm_runtime_enable(&pdev->dev);
@@ -715,7 +859,8 @@ static int tegra_adma_probe(struct platform_device *pdev)
for (i = 0; i < tdma->nr_channels; i++) {
struct tegra_adma_chan *tdc = &tdma->channels[i];
- tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i);
+ tdc->chan_addr = tdma->base_addr + cdata->ch_base_offset
+ + (cdata->ch_reg_size * i);
tdc->irq = of_irq_get(pdev->dev.of_node, i);
if (tdc->irq <= 0) {
@@ -746,6 +891,8 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+ tdma->dma_dev.device_pause = tegra_adma_pause;
+ tdma->dma_dev.device_resume = tegra_adma_resume;
ret = dma_async_device_register(&tdma->dma_dev);
if (ret < 0) {
@@ -776,8 +923,6 @@ rpm_put:
pm_runtime_put_sync(&pdev->dev);
rpm_disable:
pm_runtime_disable(&pdev->dev);
-clk_destroy:
- pm_clk_destroy(&pdev->dev);
return ret;
}
@@ -787,6 +932,7 @@ static int tegra_adma_remove(struct platform_device *pdev)
struct tegra_adma *tdma = platform_get_drvdata(pdev);
int i;
+ of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&tdma->dma_dev);
for (i = 0; i < tdma->nr_channels; ++i)
@@ -794,22 +940,15 @@ static int tegra_adma_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- pm_clk_destroy(&pdev->dev);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int tegra_adma_pm_suspend(struct device *dev)
-{
- return pm_runtime_suspended(dev) == false;
-}
-#endif
-
static const struct dev_pm_ops tegra_adma_dev_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_adma_runtime_suspend,
tegra_adma_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(tegra_adma_pm_suspend, NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver tegra_admac_driver = {
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index eb45af71d3a3..e8d0881b64d8 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -327,7 +327,6 @@ static void txx9dmac_reset_chan(struct txx9dmac_chan *dc)
channel_writel(dc, SAIR, 0);
channel_writel(dc, DAIR, 0);
channel_writel(dc, CCR, 0);
- mmiowb();
}
/* Called with dc->lock held and bh disabled */
@@ -954,7 +953,6 @@ static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc,
dma_sync_single_for_device(chan2parent(&dc->chan),
prev->txd.phys, ddev->descsize,
DMA_TO_DEVICE);
- mmiowb();
if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) &&
channel_read_CHAR(dc) == prev->txd.phys)
/* Restart chain DMA */
@@ -1080,7 +1078,6 @@ static void txx9dmac_free_chan_resources(struct dma_chan *chan)
static void txx9dmac_off(struct txx9dmac_dev *ddev)
{
dma_writel(ddev, MCR, 0);
- mmiowb();
}
static int __init txx9dmac_chan_probe(struct platform_device *pdev)
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index eafd6c4b90fe..8d174dc5dccd 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -703,7 +703,7 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
INIT_LIST_HEAD(&ld_completed);
- spin_lock_bh(&chan->lock);
+ spin_lock(&chan->lock);
/* Clean already completed and acked descriptors */
xgene_dma_clean_completed_descriptor(chan);
@@ -772,7 +772,7 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
*/
xgene_chan_xfer_ld_pending(chan);
- spin_unlock_bh(&chan->lock);
+ spin_unlock(&chan->lock);
/* Run the callback for each descriptor, in order */
list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) {
@@ -797,7 +797,7 @@ static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan)
return -ENOMEM;
}
- chan_dbg(chan, "Allocate descripto pool\n");
+ chan_dbg(chan, "Allocate descriptor pool\n");
return 1;
}
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 47eb4d13ed5f..5e2e0348d460 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -263,8 +263,8 @@ config EDAC_PND2
micro-server but may appear on others in the future.
config EDAC_MPC85XX
- tristate "Freescale MPC83xx / MPC85xx"
- depends on FSL_SOC
+ bool "Freescale MPC83xx / MPC85xx"
+ depends on FSL_SOC && EDAC=y
help
Support for error detection and correction on the Freescale
MPC8349, MPC8560, MPC8540, MPC8548, T4240
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 1bcf9aea0cdf..8816f74a22b4 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -9,6 +9,7 @@
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/edac.h>
+#include <linux/firmware/intel/stratix10-smc.h>
#include <linux/genalloc.h>
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
@@ -1361,8 +1362,19 @@ static const struct edac_device_prv_data a10_l2ecc_data = {
#ifdef CONFIG_EDAC_ALTERA_ETHERNET
+static int __init socfpga_init_ethernet_ecc(struct altr_edac_device_dev *dev)
+{
+ int ret;
+
+ ret = altr_init_a10_ecc_device_type("altr,socfpga-eth-mac-ecc");
+ if (ret)
+ return ret;
+
+ return altr_check_ecc_deps(dev);
+}
+
static const struct edac_device_prv_data a10_enetecc_data = {
- .setup = altr_check_ecc_deps,
+ .setup = socfpga_init_ethernet_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
@@ -1374,21 +1386,25 @@ static const struct edac_device_prv_data a10_enetecc_data = {
.inject_fops = &altr_edac_a10_device_inject2_fops,
};
-static int __init socfpga_init_ethernet_ecc(void)
-{
- return altr_init_a10_ecc_device_type("altr,socfpga-eth-mac-ecc");
-}
-
-early_initcall(socfpga_init_ethernet_ecc);
-
#endif /* CONFIG_EDAC_ALTERA_ETHERNET */
/********************** NAND Device Functions **********************/
#ifdef CONFIG_EDAC_ALTERA_NAND
+static int __init socfpga_init_nand_ecc(struct altr_edac_device_dev *device)
+{
+ int ret;
+
+ ret = altr_init_a10_ecc_device_type("altr,socfpga-nand-ecc");
+ if (ret)
+ return ret;
+
+ return altr_check_ecc_deps(device);
+}
+
static const struct edac_device_prv_data a10_nandecc_data = {
- .setup = altr_check_ecc_deps,
+ .setup = socfpga_init_nand_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
@@ -1400,21 +1416,25 @@ static const struct edac_device_prv_data a10_nandecc_data = {
.inject_fops = &altr_edac_a10_device_inject_fops,
};
-static int __init socfpga_init_nand_ecc(void)
-{
- return altr_init_a10_ecc_device_type("altr,socfpga-nand-ecc");
-}
-
-early_initcall(socfpga_init_nand_ecc);
-
#endif /* CONFIG_EDAC_ALTERA_NAND */
/********************** DMA Device Functions **********************/
#ifdef CONFIG_EDAC_ALTERA_DMA
+static int __init socfpga_init_dma_ecc(struct altr_edac_device_dev *device)
+{
+ int ret;
+
+ ret = altr_init_a10_ecc_device_type("altr,socfpga-dma-ecc");
+ if (ret)
+ return ret;
+
+ return altr_check_ecc_deps(device);
+}
+
static const struct edac_device_prv_data a10_dmaecc_data = {
- .setup = altr_check_ecc_deps,
+ .setup = socfpga_init_dma_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
@@ -1426,21 +1446,25 @@ static const struct edac_device_prv_data a10_dmaecc_data = {
.inject_fops = &altr_edac_a10_device_inject_fops,
};
-static int __init socfpga_init_dma_ecc(void)
-{
- return altr_init_a10_ecc_device_type("altr,socfpga-dma-ecc");
-}
-
-early_initcall(socfpga_init_dma_ecc);
-
#endif /* CONFIG_EDAC_ALTERA_DMA */
/********************** USB Device Functions **********************/
#ifdef CONFIG_EDAC_ALTERA_USB
+static int __init socfpga_init_usb_ecc(struct altr_edac_device_dev *device)
+{
+ int ret;
+
+ ret = altr_init_a10_ecc_device_type("altr,socfpga-usb-ecc");
+ if (ret)
+ return ret;
+
+ return altr_check_ecc_deps(device);
+}
+
static const struct edac_device_prv_data a10_usbecc_data = {
- .setup = altr_check_ecc_deps,
+ .setup = socfpga_init_usb_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
@@ -1452,21 +1476,25 @@ static const struct edac_device_prv_data a10_usbecc_data = {
.inject_fops = &altr_edac_a10_device_inject2_fops,
};
-static int __init socfpga_init_usb_ecc(void)
-{
- return altr_init_a10_ecc_device_type("altr,socfpga-usb-ecc");
-}
-
-early_initcall(socfpga_init_usb_ecc);
-
#endif /* CONFIG_EDAC_ALTERA_USB */
/********************** QSPI Device Functions **********************/
#ifdef CONFIG_EDAC_ALTERA_QSPI
+static int __init socfpga_init_qspi_ecc(struct altr_edac_device_dev *device)
+{
+ int ret;
+
+ ret = altr_init_a10_ecc_device_type("altr,socfpga-qspi-ecc");
+ if (ret)
+ return ret;
+
+ return altr_check_ecc_deps(device);
+}
+
static const struct edac_device_prv_data a10_qspiecc_data = {
- .setup = altr_check_ecc_deps,
+ .setup = socfpga_init_qspi_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
@@ -1478,13 +1506,6 @@ static const struct edac_device_prv_data a10_qspiecc_data = {
.inject_fops = &altr_edac_a10_device_inject_fops,
};
-static int __init socfpga_init_qspi_ecc(void)
-{
- return altr_init_a10_ecc_device_type("altr,socfpga-qspi-ecc");
-}
-
-early_initcall(socfpga_init_qspi_ecc);
-
#endif /* CONFIG_EDAC_ALTERA_QSPI */
/********************* SDMMC Device Functions **********************/
@@ -1593,6 +1614,35 @@ err_release_group_1:
return rc;
}
+static int __init socfpga_init_sdmmc_ecc(struct altr_edac_device_dev *device)
+{
+ int rc = -ENODEV;
+ struct device_node *child;
+
+ child = of_find_compatible_node(NULL, NULL, "altr,socfpga-sdmmc-ecc");
+ if (!child)
+ return -ENODEV;
+
+ if (!of_device_is_available(child))
+ goto exit;
+
+ if (validate_parent_available(child))
+ goto exit;
+
+ /* Init portB */
+ rc = altr_init_a10_ecc_block(child, ALTR_A10_SDMMC_IRQ_MASK,
+ a10_sdmmceccb_data.ecc_enable_mask, 1);
+ if (rc)
+ goto exit;
+
+ /* Setup portB */
+ return altr_portb_setup(device);
+
+exit:
+ of_node_put(child);
+ return rc;
+}
+
static irqreturn_t altr_edac_a10_ecc_irq_portb(int irq, void *dev_id)
{
struct altr_edac_device_dev *ad = dev_id;
@@ -1617,7 +1667,7 @@ static irqreturn_t altr_edac_a10_ecc_irq_portb(int irq, void *dev_id)
}
static const struct edac_device_prv_data a10_sdmmcecca_data = {
- .setup = altr_portb_setup,
+ .setup = socfpga_init_sdmmc_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENA,
.ue_clear_mask = ALTR_A10_ECC_DERRPENA,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
@@ -1630,7 +1680,7 @@ static const struct edac_device_prv_data a10_sdmmcecca_data = {
};
static const struct edac_device_prv_data a10_sdmmceccb_data = {
- .setup = altr_portb_setup,
+ .setup = socfpga_init_sdmmc_ecc,
.ce_clear_mask = ALTR_A10_ECC_SERRPENB,
.ue_clear_mask = ALTR_A10_ECC_DERRPENB,
.ecc_enable_mask = ALTR_A10_COMMON_ECC_EN_CTL,
@@ -1642,35 +1692,6 @@ static const struct edac_device_prv_data a10_sdmmceccb_data = {
.inject_fops = &altr_edac_a10_device_inject_fops,
};
-static int __init socfpga_init_sdmmc_ecc(void)
-{
- int rc = -ENODEV;
- struct device_node *child;
-
- if (!socfpga_is_a10() && !socfpga_is_s10())
- return -ENODEV;
-
- child = of_find_compatible_node(NULL, NULL, "altr,socfpga-sdmmc-ecc");
- if (!child) {
- edac_printk(KERN_WARNING, EDAC_DEVICE, "SDMMC node not found\n");
- return -ENODEV;
- }
-
- if (!of_device_is_available(child))
- goto exit;
-
- if (validate_parent_available(child))
- goto exit;
-
- rc = altr_init_a10_ecc_block(child, ALTR_A10_SDMMC_IRQ_MASK,
- a10_sdmmcecca_data.ecc_enable_mask, 1);
-exit:
- of_node_put(child);
- return rc;
-}
-
-early_initcall(socfpga_init_sdmmc_ecc);
-
#endif /* CONFIG_EDAC_ALTERA_SDMMC */
/********************* Arria10 EDAC Device Functions *************************/
@@ -1762,28 +1783,24 @@ static ssize_t altr_edac_a10_device_trig2(struct file *file,
if (trig_type == ALTR_UE_TRIGGER_CHAR) {
writel(priv->ue_set_mask, set_addr);
} else {
- /* Setup write of 0 to first 4 bytes */
- writel(0x0, drvdata->base + ECC_BLK_WDATA0_OFST);
- writel(0x0, drvdata->base + ECC_BLK_WDATA1_OFST);
- writel(0x0, drvdata->base + ECC_BLK_WDATA2_OFST);
- writel(0x0, drvdata->base + ECC_BLK_WDATA3_OFST);
- /* Setup write of 4 bytes */
+ /* Setup read/write of 4 bytes */
writel(ECC_WORD_WRITE, drvdata->base + ECC_BLK_DBYTECTRL_OFST);
/* Setup Address to 0 */
- writel(0x0, drvdata->base + ECC_BLK_ADDRESS_OFST);
- /* Setup accctrl to write & data override */
- writel(ECC_WRITE_DOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
- /* Kick it. */
- writel(ECC_XACT_KICK, drvdata->base + ECC_BLK_STARTACC_OFST);
- /* Setup accctrl to read & ecc override */
- writel(ECC_READ_EOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
+ writel(0, drvdata->base + ECC_BLK_ADDRESS_OFST);
+ /* Setup accctrl to read & ecc & data override */
+ writel(ECC_READ_EDOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
/* Kick it. */
writel(ECC_XACT_KICK, drvdata->base + ECC_BLK_STARTACC_OFST);
/* Setup write for single bit change */
- writel(0x1, drvdata->base + ECC_BLK_WDATA0_OFST);
- writel(0x0, drvdata->base + ECC_BLK_WDATA1_OFST);
- writel(0x0, drvdata->base + ECC_BLK_WDATA2_OFST);
- writel(0x0, drvdata->base + ECC_BLK_WDATA3_OFST);
+ writel(readl(drvdata->base + ECC_BLK_RDATA0_OFST) ^ 0x1,
+ drvdata->base + ECC_BLK_WDATA0_OFST);
+ writel(readl(drvdata->base + ECC_BLK_RDATA1_OFST),
+ drvdata->base + ECC_BLK_WDATA1_OFST);
+ writel(readl(drvdata->base + ECC_BLK_RDATA2_OFST),
+ drvdata->base + ECC_BLK_WDATA2_OFST);
+ writel(readl(drvdata->base + ECC_BLK_RDATA3_OFST),
+ drvdata->base + ECC_BLK_WDATA3_OFST);
+
/* Copy Read ECC to Write ECC */
writel(readl(drvdata->base + ECC_BLK_RECC0_OFST),
drvdata->base + ECC_BLK_WECC0_OFST);
@@ -1930,6 +1947,15 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
goto err_release_group1;
}
+#ifdef CONFIG_ARCH_STRATIX10
+ /* Use IRQ to determine SError origin instead of assigning IRQ */
+ rc = of_property_read_u32_index(np, "interrupts", 0, &altdev->db_irq);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Unable to parse DB IRQ index\n");
+ goto err_release_group1;
+ }
+#else
altdev->db_irq = irq_of_parse_and_map(np, 1);
if (!altdev->db_irq) {
edac_printk(KERN_ERR, EDAC_DEVICE, "Error allocating DBIRQ\n");
@@ -1943,6 +1969,7 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
edac_printk(KERN_ERR, EDAC_DEVICE, "No DBERR IRQ resource\n");
goto err_release_group1;
}
+#endif
rc = edac_device_add_device(dci);
if (rc) {
@@ -2005,6 +2032,10 @@ static const struct irq_domain_ops a10_eccmgr_ic_ops = {
/************** Stratix 10 EDAC Double Bit Error Handler ************/
#define to_a10edac(p, m) container_of(p, struct altr_arria10_edac, m)
+#ifdef CONFIG_ARCH_STRATIX10
+/* panic routine issues reboot on non-zero panic_timeout */
+extern int panic_timeout;
+
/*
* The double bit error is handled through SError which is fatal. This is
* called as a panic notifier to printout ECC error info as part of the panic.
@@ -2018,17 +2049,37 @@ static int s10_edac_dberr_handler(struct notifier_block *this,
regmap_read(edac->ecc_mgr_map, S10_SYSMGR_ECC_INTSTAT_DERR_OFST,
&dberror);
regmap_write(edac->ecc_mgr_map, S10_SYSMGR_UE_VAL_OFST, dberror);
- if (dberror & S10_DDR0_IRQ_MASK) {
- regmap_read(edac->ecc_mgr_map, A10_DERRADDR_OFST, &err_addr);
- regmap_write(edac->ecc_mgr_map, S10_SYSMGR_UE_ADDR_OFST,
- err_addr);
- edac_printk(KERN_ERR, EDAC_MC,
- "EDAC: [Uncorrectable errors @ 0x%08X]\n\n",
- err_addr);
+ if (dberror & S10_DBE_IRQ_MASK) {
+ struct list_head *position;
+ struct altr_edac_device_dev *ed;
+ struct arm_smccc_res result;
+
+ /* Find the matching DBE in the list of devices */
+ list_for_each(position, &edac->a10_ecc_devices) {
+ ed = list_entry(position, struct altr_edac_device_dev,
+ next);
+ if (!(BIT(ed->db_irq) & dberror))
+ continue;
+
+ writel(ALTR_A10_ECC_DERRPENA,
+ ed->base + ALTR_A10_ECC_INTSTAT_OFST);
+ err_addr = readl(ed->base + ALTR_S10_DERR_ADDRA_OFST);
+ regmap_write(edac->ecc_mgr_map,
+ S10_SYSMGR_UE_ADDR_OFST, err_addr);
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "EDAC: [Fatal DBE on %s @ 0x%08X]\n",
+ ed->edac_dev_name, err_addr);
+ break;
+ }
+ /* Notify the System through SMC. Reboot delay = 1 second */
+ panic_timeout = 1;
+ arm_smccc_smc(INTEL_SIP_SMC_ECC_DBE, dberror, 0, 0, 0, 0,
+ 0, 0, &result);
}
return NOTIFY_DONE;
}
+#endif
/****************** Arria 10 EDAC Probe Function *********************/
static int altr_edac_a10_probe(struct platform_device *pdev)
@@ -2098,16 +2149,8 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
altr_edac_a10_irq_handler,
edac);
- if (socfpga_is_a10()) {
- edac->db_irq = platform_get_irq(pdev, 1);
- if (edac->db_irq < 0) {
- dev_err(&pdev->dev, "No DBERR IRQ resource\n");
- return edac->db_irq;
- }
- irq_set_chained_handler_and_data(edac->db_irq,
- altr_edac_a10_irq_handler,
- edac);
- } else {
+#ifdef CONFIG_ARCH_STRATIX10
+ {
int dberror, err_addr;
edac->panic_notifier.notifier_call = s10_edac_dberr_handler;
@@ -2130,6 +2173,15 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
S10_SYSMGR_UE_ADDR_OFST, 0);
}
}
+#else
+ edac->db_irq = platform_get_irq(pdev, 1);
+ if (edac->db_irq < 0) {
+ dev_err(&pdev->dev, "No DBERR IRQ resource\n");
+ return edac->db_irq;
+ }
+ irq_set_chained_handler_and_data(edac->db_irq,
+ altr_edac_a10_irq_handler, edac);
+#endif
for_each_child_of_node(pdev->dev.of_node, child) {
if (!of_device_is_available(child))
diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
index f8664bac9fa8..55654cc4bcdf 100644
--- a/drivers/edac/altera_edac.h
+++ b/drivers/edac/altera_edac.h
@@ -289,6 +289,7 @@ struct altr_sdram_mc_data {
#define ALTR_A10_ECC_INIT_WATCHDOG_10US 10000
/************* Stratix10 Defines **************/
+#define ALTR_S10_DERR_ADDRA_OFST 0x2C
/* Stratix10 ECC Manager Defines */
#define S10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98
@@ -299,6 +300,7 @@ struct altr_sdram_mc_data {
#define S10_SYSMGR_UE_ADDR_OFST 0x224
#define S10_DDR0_IRQ_MASK BIT(16)
+#define S10_DBE_IRQ_MASK 0x3FE
/* Define ECC Block Offsets for peripherals */
#define ECC_BLK_ADDRESS_OFST 0x40
@@ -319,7 +321,7 @@ struct altr_sdram_mc_data {
#define ECC_BLK_STARTACC_OFST 0x7C
#define ECC_XACT_KICK 0x10000
-#define ECC_WORD_WRITE 0xF
+#define ECC_WORD_WRITE 0xFF
#define ECC_WRITE_DOVR 0x101
#define ECC_WRITE_EDOVR 0x103
#define ECC_READ_EOVR 0x2
@@ -370,69 +372,4 @@ struct altr_arria10_edac {
struct notifier_block panic_notifier;
};
-/*
- * Functions specified by ARM SMC Calling convention:
- *
- * FAST call executes atomic operations, returns when the requested operation
- * has completed.
- * STD call starts a operation which can be preempted by a non-secure
- * interrupt. The call can return before the requested operation has
- * completed.
- *
- * a0..a7 is used as register names in the descriptions below, on arm32
- * that translates to r0..r7 and on arm64 to w0..w7.
- */
-
-#define INTEL_SIP_SMC_STD_CALL_VAL(func_num) \
- ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_64, \
- ARM_SMCCC_OWNER_SIP, (func_num))
-
-#define INTEL_SIP_SMC_FAST_CALL_VAL(func_num) \
- ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \
- ARM_SMCCC_OWNER_SIP, (func_num))
-
-#define INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF
-#define INTEL_SIP_SMC_STATUS_OK 0x0
-#define INTEL_SIP_SMC_REG_ERROR 0x5
-
-/*
- * Request INTEL_SIP_SMC_REG_READ
- *
- * Read a protected register using SMCCC
- *
- * Call register usage:
- * a0: INTEL_SIP_SMC_REG_READ.
- * a1: register address.
- * a2-7: not used.
- *
- * Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_REG_ERROR, or
- * INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION
- * a1: Value in the register
- * a2-3: not used.
- */
-#define INTEL_SIP_SMC_FUNCID_REG_READ 7
-#define INTEL_SIP_SMC_REG_READ \
- INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_READ)
-
-/*
- * Request INTEL_SIP_SMC_REG_WRITE
- *
- * Write a protected register using SMCCC
- *
- * Call register usage:
- * a0: INTEL_SIP_SMC_REG_WRITE.
- * a1: register address
- * a2: value to program into register.
- * a3-7: not used.
- *
- * Return status:
- * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_REG_ERROR, or
- * INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION
- * a1-3: not used.
- */
-#define INTEL_SIP_SMC_FUNCID_REG_WRITE 8
-#define INTEL_SIP_SMC_REG_WRITE \
- INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_WRITE)
-
#endif /* #ifndef _ALTERA_EDAC_H */
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 6ea98575a402..e2a99466faaa 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -18,6 +18,9 @@ static struct msr __percpu *msrs;
/* Per-node stuff */
static struct ecc_settings **ecc_stngs;
+/* Number of Unified Memory Controllers */
+static u8 num_umcs;
+
/*
* Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
* bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
@@ -449,6 +452,9 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
#define for_each_chip_select_mask(i, dct, pvt) \
for (i = 0; i < pvt->csels[dct].m_cnt; i++)
+#define for_each_umc(i) \
+ for (i = 0; i < num_umcs; i++)
+
/*
* @input_addr is an InputAddr associated with the node given by mci. Return the
* csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
@@ -722,7 +728,7 @@ static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
if (pvt->umc) {
u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
- for (i = 0; i < NUM_UMCS; i++) {
+ for_each_umc(i) {
if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
continue;
@@ -781,6 +787,22 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
(dclr & BIT(15)) ? "yes" : "no");
}
+/*
+ * The Address Mask should be a contiguous set of bits in the non-interleaved
+ * case. So to check for CS interleaving, find the most- and least-significant
+ * bits of the mask, generate a contiguous bitmask, and compare the two.
+ */
+static bool f17_cs_interleaved(struct amd64_pvt *pvt, u8 ctrl, int cs)
+{
+ u32 mask = pvt->csels[ctrl].csmasks[cs >> 1];
+ u32 msb = fls(mask) - 1, lsb = ffs(mask) - 1;
+ u32 test_mask = GENMASK(msb, lsb);
+
+ edac_dbg(1, "mask=0x%08x test_mask=0x%08x\n", mask, test_mask);
+
+ return mask ^ test_mask;
+}
+
static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
{
int dimm, size0, size1, cs0, cs1;
@@ -797,8 +819,19 @@ static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
size1 = 0;
cs1 = dimm * 2 + 1;
- if (csrow_enabled(cs1, ctrl, pvt))
- size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1);
+ if (csrow_enabled(cs1, ctrl, pvt)) {
+ /*
+ * CS interleaving is only supported if both CSes have
+ * the same amount of memory. Because they are
+ * interleaved, it will look like both CSes have the
+ * full amount of memory. Save the size for both as
+ * half the amount we found on CS0, if interleaved.
+ */
+ if (f17_cs_interleaved(pvt, ctrl, cs1))
+ size1 = size0 = (size0 >> 1);
+ else
+ size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1);
+ }
amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
cs0, size0,
@@ -811,7 +844,7 @@ static void __dump_misc_regs_df(struct amd64_pvt *pvt)
struct amd64_umc *umc;
u32 i, tmp, umc_base;
- for (i = 0; i < NUM_UMCS; i++) {
+ for_each_umc(i) {
umc_base = get_umc_base(i);
umc = &pvt->umc[i];
@@ -894,8 +927,7 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
- amd64_info("using %s syndromes.\n",
- ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
+ amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
}
/*
@@ -1388,7 +1420,7 @@ static int f17_early_channel_count(struct amd64_pvt *pvt)
int i, channels = 0;
/* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
- for (i = 0; i < NUM_UMCS; i++)
+ for_each_umc(i)
channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
amd64_info("MCT channel count: %d\n", channels);
@@ -2211,6 +2243,15 @@ static struct amd64_family_type family_types[] = {
.dbam_to_cs = f17_base_addr_to_cs_size,
}
},
+ [F17_M30H_CPUS] = {
+ .ctl_name = "F17h_M30h",
+ .f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
+ .f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
+ .ops = {
+ .early_channel_count = f17_early_channel_count,
+ .dbam_to_cs = f17_base_addr_to_cs_size,
+ }
+ },
};
/*
@@ -2464,18 +2505,14 @@ static inline void decode_bus_error(int node_id, struct mce *m)
* To find the UMC channel represented by this bank we need to match on its
* instance_id. The instance_id of a bank is held in the lower 32 bits of its
* IPID.
+ *
+ * Currently, we can derive the channel number by looking at the 6th nibble in
+ * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
+ * number.
*/
-static int find_umc_channel(struct amd64_pvt *pvt, struct mce *m)
+static int find_umc_channel(struct mce *m)
{
- u32 umc_instance_id[] = {0x50f00, 0x150f00};
- u32 instance_id = m->ipid & GENMASK(31, 0);
- int i, channel = -1;
-
- for (i = 0; i < ARRAY_SIZE(umc_instance_id); i++)
- if (umc_instance_id[i] == instance_id)
- channel = i;
-
- return channel;
+ return (m->ipid & GENMASK(31, 0)) >> 20;
}
static void decode_umc_error(int node_id, struct mce *m)
@@ -2497,11 +2534,7 @@ static void decode_umc_error(int node_id, struct mce *m)
if (m->status & MCI_STATUS_DEFERRED)
ecc_type = 3;
- err.channel = find_umc_channel(pvt, m);
- if (err.channel < 0) {
- err.err_code = ERR_CHANNEL;
- goto log_error;
- }
+ err.channel = find_umc_channel(m);
if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
err.err_code = ERR_NORM_ADDR;
@@ -2603,19 +2636,19 @@ static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
if (pvt->umc) {
u8 i;
- for (i = 0; i < NUM_UMCS; i++) {
+ for_each_umc(i) {
/* Check enabled channels only: */
- if ((pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) &&
- (pvt->umc[i].ecc_ctrl & BIT(7))) {
- pvt->ecc_sym_sz = 8;
- break;
+ if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
+ if (pvt->umc[i].ecc_ctrl & BIT(9)) {
+ pvt->ecc_sym_sz = 16;
+ return;
+ } else if (pvt->umc[i].ecc_ctrl & BIT(7)) {
+ pvt->ecc_sym_sz = 8;
+ return;
+ }
}
}
-
- return;
- }
-
- if (pvt->fam >= 0x10) {
+ } else if (pvt->fam >= 0x10) {
u32 tmp;
amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
@@ -2639,7 +2672,7 @@ static void __read_mc_regs_df(struct amd64_pvt *pvt)
u32 i, umc_base;
/* Read registers from each UMC */
- for (i = 0; i < NUM_UMCS; i++) {
+ for_each_umc(i) {
umc_base = get_umc_base(i);
umc = &pvt->umc[i];
@@ -3052,7 +3085,7 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
if (boot_cpu_data.x86 >= 0x17) {
u8 umc_en_mask = 0, ecc_en_mask = 0;
- for (i = 0; i < NUM_UMCS; i++) {
+ for_each_umc(i) {
u32 base = get_umc_base(i);
/* Only check enabled UMCs. */
@@ -3105,7 +3138,7 @@ f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
{
u8 i, ecc_en = 1, cpk_en = 1;
- for (i = 0; i < NUM_UMCS; i++) {
+ for_each_umc(i) {
if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
@@ -3203,6 +3236,10 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
fam_type = &family_types[F17_M10H_CPUS];
pvt->ops = &family_types[F17_M10H_CPUS].ops;
break;
+ } else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
+ fam_type = &family_types[F17_M30H_CPUS];
+ pvt->ops = &family_types[F17_M30H_CPUS].ops;
+ break;
}
/* fall through */
case 0x18:
@@ -3236,6 +3273,22 @@ static const struct attribute_group *amd64_edac_attr_groups[] = {
NULL
};
+/* Set the number of Unified Memory Controllers in the system. */
+static void compute_num_umcs(void)
+{
+ u8 model = boot_cpu_data.x86_model;
+
+ if (boot_cpu_data.x86 < 0x17)
+ return;
+
+ if (model >= 0x30 && model <= 0x3f)
+ num_umcs = 8;
+ else
+ num_umcs = 2;
+
+ edac_dbg(1, "Number of UMCs: %x", num_umcs);
+}
+
static int init_one_instance(unsigned int nid)
{
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
@@ -3260,7 +3313,7 @@ static int init_one_instance(unsigned int nid)
goto err_free;
if (pvt->fam >= 0x17) {
- pvt->umc = kcalloc(NUM_UMCS, sizeof(struct amd64_umc), GFP_KERNEL);
+ pvt->umc = kcalloc(num_umcs, sizeof(struct amd64_umc), GFP_KERNEL);
if (!pvt->umc) {
ret = -ENOMEM;
goto err_free;
@@ -3299,8 +3352,14 @@ static int init_one_instance(unsigned int nid)
* Always allocate two channels since we can have setups with DIMMs on
* only one channel. Also, this simplifies handling later for the price
* of a couple of KBs tops.
+ *
+ * On Fam17h+, the number of controllers may be greater than two. So set
+ * the size equal to the maximum number of UMCs.
*/
- layers[1].size = 2;
+ if (pvt->fam >= 0x17)
+ layers[1].size = num_umcs;
+ else
+ layers[1].size = 2;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
@@ -3481,6 +3540,8 @@ static int __init amd64_edac_init(void)
if (!msrs)
goto err_free;
+ compute_num_umcs();
+
for (i = 0; i < amd_nb_num(); i++) {
err = probe_one_instance(i);
if (err) {
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 4242f8e39c18..8f66472f7adc 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -117,6 +117,8 @@
#define PCI_DEVICE_ID_AMD_17H_DF_F6 0x1466
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F0 0x15e8
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee
+#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F0 0x1490
+#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496
/*
* Function 1 - Address Map
@@ -272,8 +274,6 @@
#define UMC_SDP_INIT BIT(31)
-#define NUM_UMCS 2
-
enum amd_families {
K8_CPUS = 0,
F10_CPUS,
@@ -284,6 +284,7 @@ enum amd_families {
F16_M30H_CPUS,
F17_CPUS,
F17_M10H_CPUS,
+ F17_M30H_CPUS,
NUM_FAMILIES,
};
@@ -363,7 +364,7 @@ struct amd64_pvt {
u32 dct_sel_hi; /* DRAM Controller Select High */
u32 online_spare; /* On-Line spare Reg */
- /* x4 or x8 syndromes in use */
+ /* x4, x8, or x16 syndromes in use */
u8 ecc_sym_sz;
/* place to store error injection parameters prior to issue */
@@ -396,8 +397,8 @@ struct err_info {
static inline u32 get_umc_base(u8 channel)
{
- /* ch0: 0x50000, ch1: 0x150000 */
- return 0x50000 + (!!channel << 20);
+ /* chY: 0xY50000 */
+ return 0x50000 + (channel << 20);
}
static inline u64 get_dram_base(struct amd64_pvt *pvt, u8 i)
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 13594ffadcb3..64922c8fa7e3 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -679,22 +679,18 @@ static int del_mc_from_global_list(struct mem_ctl_info *mci)
struct mem_ctl_info *edac_mc_find(int idx)
{
- struct mem_ctl_info *mci = NULL;
+ struct mem_ctl_info *mci;
struct list_head *item;
mutex_lock(&mem_ctls_mutex);
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
-
- if (mci->mc_idx >= idx) {
- if (mci->mc_idx == idx) {
- goto unlock;
- }
- break;
- }
+ if (mci->mc_idx == idx)
+ goto unlock;
}
+ mci = NULL;
unlock:
mutex_unlock(&mem_ctls_mutex);
return mci;
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index c334fb7c63df..6f06aec4877c 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -181,6 +181,54 @@ static struct notifier_block i10nm_mce_dec = {
.priority = MCE_PRIO_EDAC,
};
+#ifdef CONFIG_EDAC_DEBUG
+/*
+ * Debug feature.
+ * Exercise the address decode logic by writing an address to
+ * /sys/kernel/debug/edac/i10nm_test/addr.
+ */
+static struct dentry *i10nm_test;
+
+static int debugfs_u64_set(void *data, u64 val)
+{
+ struct mce m;
+
+ pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
+
+ memset(&m, 0, sizeof(m));
+ /* ADDRV + MemRd + Unknown channel */
+ m.status = MCI_STATUS_ADDRV + 0x90;
+ /* One corrected error */
+ m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT);
+ m.addr = val;
+ skx_mce_check_error(NULL, 0, &m);
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
+
+static void setup_i10nm_debug(void)
+{
+ i10nm_test = edac_debugfs_create_dir("i10nm_test");
+ if (!i10nm_test)
+ return;
+
+ if (!edac_debugfs_create_file("addr", 0200, i10nm_test,
+ NULL, &fops_u64_wo)) {
+ debugfs_remove(i10nm_test);
+ i10nm_test = NULL;
+ }
+}
+
+static void teardown_i10nm_debug(void)
+{
+ debugfs_remove_recursive(i10nm_test);
+}
+#else
+static inline void setup_i10nm_debug(void) {}
+static inline void teardown_i10nm_debug(void) {}
+#endif /*CONFIG_EDAC_DEBUG*/
+
static int __init i10nm_init(void)
{
u8 mc = 0, src_id = 0, node_id = 0;
@@ -249,7 +297,7 @@ static int __init i10nm_init(void)
opstate_init();
mce_register_decode_chain(&i10nm_mce_dec);
- setup_skx_debug("i10nm_test");
+ setup_i10nm_debug();
i10nm_printk(KERN_INFO, "%s\n", I10NM_REVISION);
@@ -262,7 +310,7 @@ fail:
static void __exit i10nm_exit(void)
{
edac_dbg(2, "\n");
- teardown_skx_debug();
+ teardown_i10nm_debug();
mce_unregister_decode_chain(&i10nm_mce_dec);
skx_adxl_put();
skx_remove();
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 0a1814dad6cf..bb0202ad7a13 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -1004,7 +1004,7 @@ static inline void amd_decode_err_code(u16 ec)
/*
* Filter out unwanted MCE signatures here.
*/
-static bool amd_filter_mce(struct mce *m)
+static bool ignore_mce(struct mce *m)
{
/*
* NB GART TLB error reporting is disabled by default.
@@ -1038,7 +1038,7 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
unsigned int fam = x86_family(m->cpuid);
int ecc;
- if (amd_filter_mce(m))
+ if (ignore_mce(m))
return NOTIFY_STOP;
pr_emerg(HW_ERR "%s\n", decode_error_status(m));
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index adae4c848ca1..a5c8fa3a249a 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -540,6 +540,54 @@ static struct notifier_block skx_mce_dec = {
.priority = MCE_PRIO_EDAC,
};
+#ifdef CONFIG_EDAC_DEBUG
+/*
+ * Debug feature.
+ * Exercise the address decode logic by writing an address to
+ * /sys/kernel/debug/edac/skx_test/addr.
+ */
+static struct dentry *skx_test;
+
+static int debugfs_u64_set(void *data, u64 val)
+{
+ struct mce m;
+
+ pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
+
+ memset(&m, 0, sizeof(m));
+ /* ADDRV + MemRd + Unknown channel */
+ m.status = MCI_STATUS_ADDRV + 0x90;
+ /* One corrected error */
+ m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT);
+ m.addr = val;
+ skx_mce_check_error(NULL, 0, &m);
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
+
+static void setup_skx_debug(void)
+{
+ skx_test = edac_debugfs_create_dir("skx_test");
+ if (!skx_test)
+ return;
+
+ if (!edac_debugfs_create_file("addr", 0200, skx_test,
+ NULL, &fops_u64_wo)) {
+ debugfs_remove(skx_test);
+ skx_test = NULL;
+ }
+}
+
+static void teardown_skx_debug(void)
+{
+ debugfs_remove_recursive(skx_test);
+}
+#else
+static inline void setup_skx_debug(void) {}
+static inline void teardown_skx_debug(void) {}
+#endif /*CONFIG_EDAC_DEBUG*/
+
/*
* skx_init:
* make sure we are running on the correct cpu model
@@ -619,7 +667,7 @@ static int __init skx_init(void)
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
- setup_skx_debug("skx_test");
+ setup_skx_debug();
mce_register_decode_chain(&skx_mce_dec);
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 0e96e7b5b0a7..b0dddcfa9baa 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -1,7 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Common codes for both the skx_edac driver and Intel 10nm server EDAC driver.
- * Originally split out from the skx_edac driver.
+ *
+ * Shared code by both skx_edac and i10nm_edac. Originally split out
+ * from the skx_edac driver.
+ *
+ * This file is linked into both skx_edac and i10nm_edac drivers. In
+ * order to avoid link errors, this file must be like a pure library
+ * without including symbols and defines which would otherwise conflict,
+ * when linked once into a module and into a built-in object, at the
+ * same time. For example, __this_module symbol references when that
+ * file is being linked into a built-in object.
*
* Copyright (c) 2018, Intel Corporation.
*/
@@ -644,48 +652,3 @@ void skx_remove(void)
kfree(d);
}
}
-
-#ifdef CONFIG_EDAC_DEBUG
-/*
- * Debug feature.
- * Exercise the address decode logic by writing an address to
- * /sys/kernel/debug/edac/dirname/addr.
- */
-static struct dentry *skx_test;
-
-static int debugfs_u64_set(void *data, u64 val)
-{
- struct mce m;
-
- pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
-
- memset(&m, 0, sizeof(m));
- /* ADDRV + MemRd + Unknown channel */
- m.status = MCI_STATUS_ADDRV + 0x90;
- /* One corrected error */
- m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT);
- m.addr = val;
- skx_mce_check_error(NULL, 0, &m);
-
- return 0;
-}
-DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
-
-void setup_skx_debug(const char *dirname)
-{
- skx_test = edac_debugfs_create_dir(dirname);
- if (!skx_test)
- return;
-
- if (!edac_debugfs_create_file("addr", 0200, skx_test,
- NULL, &fops_u64_wo)) {
- debugfs_remove(skx_test);
- skx_test = NULL;
- }
-}
-
-void teardown_skx_debug(void)
-{
- debugfs_remove_recursive(skx_test);
-}
-#endif /*CONFIG_EDAC_DEBUG*/
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index d25374e34d4f..d18fa98669af 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -141,12 +141,4 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
void skx_remove(void);
-#ifdef CONFIG_EDAC_DEBUG
-void setup_skx_debug(const char *dirname);
-void teardown_skx_debug(void);
-#else
-static inline void setup_skx_debug(const char *dirname) {}
-static inline void teardown_skx_debug(void) {}
-#endif /*CONFIG_EDAC_DEBUG*/
-
#endif /* _SKX_COMM_EDAC_H */
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 540e8cd16ee6..de06fafb52ff 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -30,7 +30,7 @@ config EXTCON_ARIZONA
config EXTCON_AXP288
tristate "X-Power AXP288 EXTCON support"
- depends on MFD_AXP20X && USB_SUPPORT && X86
+ depends on MFD_AXP20X && USB_SUPPORT && X86 && ACPI
select USB_ROLE_SWITCH
help
Say Y here to enable support for USB peripheral detection
@@ -60,6 +60,13 @@ config EXTCON_INTEL_CHT_WC
Say Y here to enable extcon support for charger detection / control
on the Intel Cherrytrail Whiskey Cove PMIC.
+config EXTCON_INTEL_MRFLD
+ tristate "Intel Merrifield Basin Cove PMIC extcon driver"
+ depends on INTEL_SOC_PMIC_MRFLD
+ help
+ Say Y here to enable extcon support for charger detection / control
+ on the Intel Merrifield Basin Cove PMIC.
+
config EXTCON_MAX14577
tristate "Maxim MAX14577/77836 EXTCON Support"
depends on MFD_MAX14577
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 261ce4cfe209..d3941a735df3 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_EXTCON_AXP288) += extcon-axp288.o
obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
obj-$(CONFIG_EXTCON_INTEL_INT3496) += extcon-intel-int3496.o
obj-$(CONFIG_EXTCON_INTEL_CHT_WC) += extcon-intel-cht-wc.o
+obj-$(CONFIG_EXTCON_INTEL_MRFLD) += extcon-intel-mrfld.o
obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o
obj-$(CONFIG_EXTCON_MAX3355) += extcon-max3355.o
obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
diff --git a/drivers/extcon/devres.c b/drivers/extcon/devres.c
index f599aeddf8e5..f487d877ab5d 100644
--- a/drivers/extcon/devres.c
+++ b/drivers/extcon/devres.c
@@ -205,7 +205,7 @@ EXPORT_SYMBOL(devm_extcon_register_notifier);
/**
* devm_extcon_unregister_notifier()
- - Resource-managed extcon_unregister_notifier()
+ * - Resource-managed extcon_unregister_notifier()
* @dev: the device owning the extcon device being created
* @edev: the extcon device
* @id: the unique id among the extcon enumeration
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index da0e9bc4262f..9327479c719c 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -1726,6 +1726,16 @@ static int arizona_extcon_remove(struct platform_device *pdev)
struct arizona_extcon_info *info = platform_get_drvdata(pdev);
struct arizona *arizona = info->arizona;
int jack_irq_rise, jack_irq_fall;
+ bool change;
+
+ regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
+ ARIZONA_MICD_ENA, 0,
+ &change);
+
+ if (change) {
+ regulator_disable(info->micvdd);
+ pm_runtime_put(info->dev);
+ }
gpiod_put(info->micd_pol_gpio);
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index a983708b77a6..50f9402fb325 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -333,7 +333,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
struct axp288_extcon_info *info;
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
- const char *name;
+ struct acpi_device *adev;
int ret, i, pirq;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -357,9 +357,10 @@ static int axp288_extcon_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = acpi_dev_get_first_match_name("INT3496", NULL, -1);
- if (name) {
- info->id_extcon = extcon_get_extcon_dev(name);
+ adev = acpi_dev_get_first_match_dev("INT3496", NULL, -1);
+ if (adev) {
+ info->id_extcon = extcon_get_extcon_dev(acpi_dev_name(adev));
+ put_device(&adev->dev);
if (!info->id_extcon)
return -EPROBE_DEFER;
diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
index 5ef215297101..9d32150e68db 100644
--- a/drivers/extcon/extcon-intel-cht-wc.c
+++ b/drivers/extcon/extcon-intel-cht-wc.c
@@ -17,6 +17,8 @@
#include <linux/regmap.h>
#include <linux/slab.h>
+#include "extcon-intel.h"
+
#define CHT_WC_PHYCTRL 0x5e07
#define CHT_WC_CHGRCTRL0 0x5e16
@@ -29,7 +31,15 @@
#define CHT_WC_CHGRCTRL0_DBPOFF BIT(6)
#define CHT_WC_CHGRCTRL0_CHR_WDT_NOKICK BIT(7)
-#define CHT_WC_CHGRCTRL1 0x5e17
+#define CHT_WC_CHGRCTRL1 0x5e17
+#define CHT_WC_CHGRCTRL1_FUSB_INLMT_100 BIT(0)
+#define CHT_WC_CHGRCTRL1_FUSB_INLMT_150 BIT(1)
+#define CHT_WC_CHGRCTRL1_FUSB_INLMT_500 BIT(2)
+#define CHT_WC_CHGRCTRL1_FUSB_INLMT_900 BIT(3)
+#define CHT_WC_CHGRCTRL1_FUSB_INLMT_1500 BIT(4)
+#define CHT_WC_CHGRCTRL1_FTEMP_EVENT BIT(5)
+#define CHT_WC_CHGRCTRL1_OTGMODE BIT(6)
+#define CHT_WC_CHGRCTRL1_DBPEN BIT(7)
#define CHT_WC_USBSRC 0x5e29
#define CHT_WC_USBSRC_STS_MASK GENMASK(1, 0)
@@ -48,6 +58,13 @@
#define CHT_WC_USBSRC_TYPE_OTHER 8
#define CHT_WC_USBSRC_TYPE_DCP_EXTPHY 9
+#define CHT_WC_CHGDISCTRL 0x5e2f
+#define CHT_WC_CHGDISCTRL_OUT BIT(0)
+/* 0 - open drain, 1 - regular push-pull output */
+#define CHT_WC_CHGDISCTRL_DRV BIT(4)
+/* 0 - pin is controlled by SW, 1 - by HW */
+#define CHT_WC_CHGDISCTRL_FN BIT(6)
+
#define CHT_WC_PWRSRC_IRQ 0x6e03
#define CHT_WC_PWRSRC_IRQ_MASK 0x6e0f
#define CHT_WC_PWRSRC_STS 0x6e1e
@@ -65,15 +82,6 @@
#define CHT_WC_VBUS_GPIO_CTLO_DRV_OD BIT(4)
#define CHT_WC_VBUS_GPIO_CTLO_DIR_OUT BIT(5)
-enum cht_wc_usb_id {
- USB_ID_OTG,
- USB_ID_GND,
- USB_ID_FLOAT,
- USB_RID_A,
- USB_RID_B,
- USB_RID_C,
-};
-
enum cht_wc_mux_select {
MUX_SEL_PMIC = 0,
MUX_SEL_SOC,
@@ -101,9 +109,9 @@ static int cht_wc_extcon_get_id(struct cht_wc_extcon_data *ext, int pwrsrc_sts)
{
switch ((pwrsrc_sts & CHT_WC_PWRSRC_USBID_MASK) >> CHT_WC_PWRSRC_USBID_SHIFT) {
case CHT_WC_PWRSRC_RID_GND:
- return USB_ID_GND;
+ return INTEL_USB_ID_GND;
case CHT_WC_PWRSRC_RID_FLOAT:
- return USB_ID_FLOAT;
+ return INTEL_USB_ID_FLOAT;
case CHT_WC_PWRSRC_RID_ACA:
default:
/*
@@ -111,7 +119,7 @@ static int cht_wc_extcon_get_id(struct cht_wc_extcon_data *ext, int pwrsrc_sts)
* the USBID GPADC channel here and determine ACA role
* based on that.
*/
- return USB_ID_FLOAT;
+ return INTEL_USB_ID_FLOAT;
}
}
@@ -198,6 +206,30 @@ static void cht_wc_extcon_set_5v_boost(struct cht_wc_extcon_data *ext,
dev_err(ext->dev, "Error writing Vbus GPIO CTLO: %d\n", ret);
}
+static void cht_wc_extcon_set_otgmode(struct cht_wc_extcon_data *ext,
+ bool enable)
+{
+ unsigned int val = enable ? CHT_WC_CHGRCTRL1_OTGMODE : 0;
+ int ret;
+
+ ret = regmap_update_bits(ext->regmap, CHT_WC_CHGRCTRL1,
+ CHT_WC_CHGRCTRL1_OTGMODE, val);
+ if (ret)
+ dev_err(ext->dev, "Error updating CHGRCTRL1 reg: %d\n", ret);
+}
+
+static void cht_wc_extcon_enable_charging(struct cht_wc_extcon_data *ext,
+ bool enable)
+{
+ unsigned int val = enable ? 0 : CHT_WC_CHGDISCTRL_OUT;
+ int ret;
+
+ ret = regmap_update_bits(ext->regmap, CHT_WC_CHGDISCTRL,
+ CHT_WC_CHGDISCTRL_OUT, val);
+ if (ret)
+ dev_err(ext->dev, "Error updating CHGDISCTRL reg: %d\n", ret);
+}
+
/* Small helper to sync EXTCON_CHG_USB_SDP and EXTCON_USB state */
static void cht_wc_extcon_set_state(struct cht_wc_extcon_data *ext,
unsigned int cable, bool state)
@@ -221,11 +253,17 @@ static void cht_wc_extcon_pwrsrc_event(struct cht_wc_extcon_data *ext)
}
id = cht_wc_extcon_get_id(ext, pwrsrc_sts);
- if (id == USB_ID_GND) {
+ if (id == INTEL_USB_ID_GND) {
+ cht_wc_extcon_enable_charging(ext, false);
+ cht_wc_extcon_set_otgmode(ext, true);
+
/* The 5v boost causes a false VBUS / SDP detect, skip */
goto charger_det_done;
}
+ cht_wc_extcon_set_otgmode(ext, false);
+ cht_wc_extcon_enable_charging(ext, true);
+
/* Plugged into a host/charger or not connected? */
if (!(pwrsrc_sts & CHT_WC_PWRSRC_VBUS)) {
/* Route D+ and D- to PMIC for future charger detection */
@@ -248,7 +286,7 @@ set_state:
ext->previous_cable = cable;
}
- ext->usb_host = ((id == USB_ID_GND) || (id == USB_RID_A));
+ ext->usb_host = ((id == INTEL_USB_ID_GND) || (id == INTEL_USB_RID_A));
extcon_set_state_sync(ext->edev, EXTCON_USB_HOST, ext->usb_host);
}
@@ -278,6 +316,14 @@ static int cht_wc_extcon_sw_control(struct cht_wc_extcon_data *ext, bool enable)
{
int ret, mask, val;
+ val = enable ? 0 : CHT_WC_CHGDISCTRL_FN;
+ ret = regmap_update_bits(ext->regmap, CHT_WC_CHGDISCTRL,
+ CHT_WC_CHGDISCTRL_FN, val);
+ if (ret)
+ dev_err(ext->dev,
+ "Error setting sw control for CHGDIS pin: %d\n",
+ ret);
+
mask = CHT_WC_CHGRCTRL0_SWCONTROL | CHT_WC_CHGRCTRL0_CCSM_OFF;
val = enable ? mask : 0;
ret = regmap_update_bits(ext->regmap, CHT_WC_CHGRCTRL0, mask, val);
@@ -329,7 +375,10 @@ static int cht_wc_extcon_probe(struct platform_device *pdev)
/* Enable sw control */
ret = cht_wc_extcon_sw_control(ext, true);
if (ret)
- return ret;
+ goto disable_sw_control;
+
+ /* Disable charging by external battery charger */
+ cht_wc_extcon_enable_charging(ext, false);
/* Register extcon device */
ret = devm_extcon_dev_register(ext->dev, ext->edev);
diff --git a/drivers/extcon/extcon-intel-mrfld.c b/drivers/extcon/extcon-intel-mrfld.c
new file mode 100644
index 000000000000..f47016fb28a8
--- /dev/null
+++ b/drivers/extcon/extcon-intel-mrfld.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * extcon driver for Basin Cove PMIC
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#include <linux/extcon-provider.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/mfd/intel_soc_pmic_mrfld.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "extcon-intel.h"
+
+#define BCOVE_USBIDCTRL 0x19
+#define BCOVE_USBIDCTRL_ID BIT(0)
+#define BCOVE_USBIDCTRL_ACA BIT(1)
+#define BCOVE_USBIDCTRL_ALL (BCOVE_USBIDCTRL_ID | BCOVE_USBIDCTRL_ACA)
+
+#define BCOVE_USBIDSTS 0x1a
+#define BCOVE_USBIDSTS_GND BIT(0)
+#define BCOVE_USBIDSTS_RARBRC_MASK GENMASK(2, 1)
+#define BCOVE_USBIDSTS_RARBRC_SHIFT 1
+#define BCOVE_USBIDSTS_NO_ACA 0
+#define BCOVE_USBIDSTS_R_ID_A 1
+#define BCOVE_USBIDSTS_R_ID_B 2
+#define BCOVE_USBIDSTS_R_ID_C 3
+#define BCOVE_USBIDSTS_FLOAT BIT(3)
+#define BCOVE_USBIDSTS_SHORT BIT(4)
+
+#define BCOVE_CHGRIRQ_ALL (BCOVE_CHGRIRQ_VBUSDET | BCOVE_CHGRIRQ_DCDET | \
+ BCOVE_CHGRIRQ_BATTDET | BCOVE_CHGRIRQ_USBIDDET)
+
+#define BCOVE_CHGRCTRL0 0x4b
+#define BCOVE_CHGRCTRL0_CHGRRESET BIT(0)
+#define BCOVE_CHGRCTRL0_EMRGCHREN BIT(1)
+#define BCOVE_CHGRCTRL0_EXTCHRDIS BIT(2)
+#define BCOVE_CHGRCTRL0_SWCONTROL BIT(3)
+#define BCOVE_CHGRCTRL0_TTLCK BIT(4)
+#define BCOVE_CHGRCTRL0_BIT_5 BIT(5)
+#define BCOVE_CHGRCTRL0_BIT_6 BIT(6)
+#define BCOVE_CHGRCTRL0_CHR_WDT_NOKICK BIT(7)
+
+struct mrfld_extcon_data {
+ struct device *dev;
+ struct regmap *regmap;
+ struct extcon_dev *edev;
+ unsigned int status;
+ unsigned int id;
+};
+
+static const unsigned int mrfld_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_CHG_USB_SDP,
+ EXTCON_CHG_USB_CDP,
+ EXTCON_CHG_USB_DCP,
+ EXTCON_CHG_USB_ACA,
+ EXTCON_NONE,
+};
+
+static int mrfld_extcon_clear(struct mrfld_extcon_data *data, unsigned int reg,
+ unsigned int mask)
+{
+ return regmap_update_bits(data->regmap, reg, mask, 0x00);
+}
+
+static int mrfld_extcon_set(struct mrfld_extcon_data *data, unsigned int reg,
+ unsigned int mask)
+{
+ return regmap_update_bits(data->regmap, reg, mask, 0xff);
+}
+
+static int mrfld_extcon_sw_control(struct mrfld_extcon_data *data, bool enable)
+{
+ unsigned int mask = BCOVE_CHGRCTRL0_SWCONTROL;
+ struct device *dev = data->dev;
+ int ret;
+
+ if (enable)
+ ret = mrfld_extcon_set(data, BCOVE_CHGRCTRL0, mask);
+ else
+ ret = mrfld_extcon_clear(data, BCOVE_CHGRCTRL0, mask);
+ if (ret)
+ dev_err(dev, "can't set SW control: %d\n", ret);
+ return ret;
+}
+
+static int mrfld_extcon_get_id(struct mrfld_extcon_data *data)
+{
+ struct regmap *regmap = data->regmap;
+ unsigned int id;
+ bool ground;
+ int ret;
+
+ ret = regmap_read(regmap, BCOVE_USBIDSTS, &id);
+ if (ret)
+ return ret;
+
+ if (id & BCOVE_USBIDSTS_FLOAT)
+ return INTEL_USB_ID_FLOAT;
+
+ switch ((id & BCOVE_USBIDSTS_RARBRC_MASK) >> BCOVE_USBIDSTS_RARBRC_SHIFT) {
+ case BCOVE_USBIDSTS_R_ID_A:
+ return INTEL_USB_RID_A;
+ case BCOVE_USBIDSTS_R_ID_B:
+ return INTEL_USB_RID_B;
+ case BCOVE_USBIDSTS_R_ID_C:
+ return INTEL_USB_RID_C;
+ }
+
+ /*
+ * PMIC A0 reports USBIDSTS_GND = 1 for ID_GND,
+ * but PMIC B0 reports USBIDSTS_GND = 0 for ID_GND.
+ * Thus we must check this bit at last.
+ */
+ ground = id & BCOVE_USBIDSTS_GND;
+ switch ('A' + BCOVE_MAJOR(data->id)) {
+ case 'A':
+ return ground ? INTEL_USB_ID_GND : INTEL_USB_ID_FLOAT;
+ case 'B':
+ return ground ? INTEL_USB_ID_FLOAT : INTEL_USB_ID_GND;
+ }
+
+ /* Unknown or unsupported type */
+ return INTEL_USB_ID_FLOAT;
+}
+
+static int mrfld_extcon_role_detect(struct mrfld_extcon_data *data)
+{
+ unsigned int id;
+ bool usb_host;
+ int ret;
+
+ ret = mrfld_extcon_get_id(data);
+ if (ret < 0)
+ return ret;
+
+ id = ret;
+
+ usb_host = (id == INTEL_USB_ID_GND) || (id == INTEL_USB_RID_A);
+ extcon_set_state_sync(data->edev, EXTCON_USB_HOST, usb_host);
+
+ return 0;
+}
+
+static int mrfld_extcon_cable_detect(struct mrfld_extcon_data *data)
+{
+ struct regmap *regmap = data->regmap;
+ unsigned int status, change;
+ int ret;
+
+ /*
+ * It seems SCU firmware clears the content of BCOVE_CHGRIRQ1
+ * and makes it useless for OS. Instead we compare a previously
+ * stored status to the current one, provided by BCOVE_SCHGRIRQ1.
+ */
+ ret = regmap_read(regmap, BCOVE_SCHGRIRQ1, &status);
+ if (ret)
+ return ret;
+
+ change = status ^ data->status;
+ if (!change)
+ return -ENODATA;
+
+ if (change & BCOVE_CHGRIRQ_USBIDDET) {
+ ret = mrfld_extcon_role_detect(data);
+ if (ret)
+ return ret;
+ }
+
+ data->status = status;
+
+ return 0;
+}
+
+static irqreturn_t mrfld_extcon_interrupt(int irq, void *dev_id)
+{
+ struct mrfld_extcon_data *data = dev_id;
+ int ret;
+
+ ret = mrfld_extcon_cable_detect(data);
+
+ mrfld_extcon_clear(data, BCOVE_MIRQLVL1, BCOVE_LVL1_CHGR);
+
+ return ret ? IRQ_NONE: IRQ_HANDLED;
+}
+
+static int mrfld_extcon_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
+ struct regmap *regmap = pmic->regmap;
+ struct mrfld_extcon_data *data;
+ unsigned int id;
+ int irq, ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = dev;
+ data->regmap = regmap;
+
+ data->edev = devm_extcon_dev_allocate(dev, mrfld_extcon_cable);
+ if (IS_ERR(data->edev))
+ return -ENOMEM;
+
+ ret = devm_extcon_dev_register(dev, data->edev);
+ if (ret < 0) {
+ dev_err(dev, "can't register extcon device: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, mrfld_extcon_interrupt,
+ IRQF_ONESHOT | IRQF_SHARED, pdev->name,
+ data);
+ if (ret) {
+ dev_err(dev, "can't register IRQ handler: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(regmap, BCOVE_ID, &id);
+ if (ret) {
+ dev_err(dev, "can't read PMIC ID: %d\n", ret);
+ return ret;
+ }
+
+ data->id = id;
+
+ ret = mrfld_extcon_sw_control(data, true);
+ if (ret)
+ return ret;
+
+ /* Get initial state */
+ mrfld_extcon_role_detect(data);
+
+ mrfld_extcon_clear(data, BCOVE_MIRQLVL1, BCOVE_LVL1_CHGR);
+ mrfld_extcon_clear(data, BCOVE_MCHGRIRQ1, BCOVE_CHGRIRQ_ALL);
+
+ mrfld_extcon_set(data, BCOVE_USBIDCTRL, BCOVE_USBIDCTRL_ALL);
+
+ platform_set_drvdata(pdev, data);
+
+ return 0;
+}
+
+static int mrfld_extcon_remove(struct platform_device *pdev)
+{
+ struct mrfld_extcon_data *data = platform_get_drvdata(pdev);
+
+ mrfld_extcon_sw_control(data, false);
+
+ return 0;
+}
+
+static const struct platform_device_id mrfld_extcon_id_table[] = {
+ { .name = "mrfld_bcove_pwrsrc" },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, mrfld_extcon_id_table);
+
+static struct platform_driver mrfld_extcon_driver = {
+ .driver = {
+ .name = "mrfld_bcove_pwrsrc",
+ },
+ .probe = mrfld_extcon_probe,
+ .remove = mrfld_extcon_remove,
+ .id_table = mrfld_extcon_id_table,
+};
+module_platform_driver(mrfld_extcon_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_DESCRIPTION("extcon driver for Intel Merrifield Basin Cove PMIC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/extcon/extcon-intel.h b/drivers/extcon/extcon-intel.h
new file mode 100644
index 000000000000..0ad645ec7b33
--- /dev/null
+++ b/drivers/extcon/extcon-intel.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for Intel extcon hardware
+ *
+ * Copyright (C) 2019 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __EXTCON_INTEL_H__
+#define __EXTCON_INTEL_H__
+
+enum extcon_intel_usb_id {
+ INTEL_USB_ID_OTG,
+ INTEL_USB_ID_GND,
+ INTEL_USB_ID_FLOAT,
+ INTEL_USB_RID_A,
+ INTEL_USB_RID_B,
+ INTEL_USB_RID_C,
+};
+
+#endif /* __EXTCON_INTEL_H__ */
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 35e784cffc23..5414eb1306aa 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -107,19 +107,8 @@ EXPORT_SYMBOL(fw_iso_buffer_init);
int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
struct vm_area_struct *vma)
{
- unsigned long uaddr;
- int i, err;
-
- uaddr = vma->vm_start;
- for (i = 0; i < buffer->page_count; i++) {
- err = vm_insert_page(vma, uaddr, buffer->pages[i]);
- if (err)
- return err;
-
- uaddr += PAGE_SIZE;
- }
-
- return 0;
+ return vm_map_pages_zero(vma, buffer->pages,
+ buffer->page_count);
}
void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index a128dd1126ae..515e96db4391 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -303,7 +303,7 @@ nosy_open(struct inode *inode, struct file *file)
file->private_data = client;
- return nonseekable_open(inode, file);
+ return stream_open(inode, file);
fail:
kfree(client);
lynx_put(lynx);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 45c048751f3b..7183ab34269e 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2939,7 +2939,6 @@ static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels)
reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
- mmiowb();
ohci->mc_channels = channels;
}
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index cac16c4b0df3..11fda9eb2466 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -5,20 +5,6 @@
menu "Firmware Drivers"
-config ARM_PSCI_FW
- bool
-
-config ARM_PSCI_CHECKER
- bool "ARM PSCI checker"
- depends on ARM_PSCI_FW && HOTPLUG_CPU && CPU_IDLE && !TORTURE_TEST
- help
- Run the PSCI checker during startup. This checks that hotplug and
- suspend operations work correctly when using PSCI.
-
- The torture tests may interfere with the PSCI checker by turning CPUs
- on and off through hotplug, so for now torture tests and PSCI checker
- are mutually exclusive.
-
config ARM_SCMI_PROTOCOL
bool "ARM System Control and Management Interface (SCMI) Message Protocol"
depends on ARM || ARM64 || COMPILE_TEST
@@ -267,9 +253,26 @@ config TI_SCI_PROTOCOL
This protocol library is used by client drivers to use the features
provided by the system controller.
+config TRUSTED_FOUNDATIONS
+ bool "Trusted Foundations secure monitor support"
+ depends on ARM
+ help
+ Some devices (including most early Tegra-based consumer devices on
+ the market) are booted with the Trusted Foundations secure monitor
+ active, requiring some core operations to be performed by the secure
+ monitor instead of the kernel.
+
+ This option allows the kernel to invoke the secure monitor whenever
+ required on devices using Trusted Foundations. See the functions and
+ comments in linux/firmware/trusted_foundations.h or the device tree
+ bindings for "tlm,trusted-foundations" for details on how to use it.
+
+ Choose N if you don't know what this is about.
+
config HAVE_ARM_SMCCC
bool
+source "drivers/firmware/psci/Kconfig"
source "drivers/firmware/broadcom/Kconfig"
source "drivers/firmware/google/Kconfig"
source "drivers/firmware/efi/Kconfig"
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 80feb635120f..3fa0b34eb72f 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -2,8 +2,6 @@
#
# Makefile for the linux kernel.
#
-obj-$(CONFIG_ARM_PSCI_FW) += psci.o
-obj-$(CONFIG_ARM_PSCI_CHECKER) += psci_checker.o
obj-$(CONFIG_ARM_SCPI_PROTOCOL) += arm_scpi.o
obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o
obj-$(CONFIG_ARM_SDE_INTERFACE) += arm_sdei.o
@@ -23,8 +21,10 @@ obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o
obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a
obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
+obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += arm_scmi/
+obj-y += psci/
obj-y += broadcom/
obj-y += meson/
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 8f952f2f1a29..b5bc4c7a8fab 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -654,9 +654,7 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
static int scmi_mailbox_check(struct device_node *np)
{
- struct of_phandle_args arg;
-
- return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, &arg);
+ return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, NULL);
}
static int scmi_mbox_free_channel(int id, void *p, void *data)
@@ -798,7 +796,9 @@ static int scmi_probe(struct platform_device *pdev)
return -EINVAL;
}
- desc = of_match_device(scmi_of_match, dev)->data;
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
index e6376f985ef7..9cd70d1a5622 100644
--- a/drivers/firmware/arm_sdei.c
+++ b/drivers/firmware/arm_sdei.c
@@ -165,6 +165,7 @@ static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
return err;
}
+NOKPROBE_SYMBOL(invoke_sdei_fn);
static struct sdei_event *sdei_event_find(u32 event_num)
{
@@ -879,6 +880,7 @@ static void sdei_smccc_smc(unsigned long function_id,
{
arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
}
+NOKPROBE_SYMBOL(sdei_smccc_smc);
static void sdei_smccc_hvc(unsigned long function_id,
unsigned long arg0, unsigned long arg1,
@@ -887,6 +889,7 @@ static void sdei_smccc_hvc(unsigned long function_id,
{
arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
}
+NOKPROBE_SYMBOL(sdei_smccc_hvc);
int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb,
sdei_event_callback *critical_cb)
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 099d83e4e910..fae2d5c43314 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -416,11 +416,8 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
nr++;
}
-void __init dmi_memdev_walk(void)
+static void __init dmi_memdev_walk(void)
{
- if (!dmi_available)
- return;
-
if (dmi_walk_early(count_mem_devices) == 0 && dmi_memdev_nr) {
dmi_memdev = dmi_alloc(sizeof(*dmi_memdev) * dmi_memdev_nr);
if (dmi_memdev)
@@ -614,7 +611,7 @@ static int __init dmi_smbios3_present(const u8 *buf)
return 1;
}
-void __init dmi_scan_machine(void)
+static void __init dmi_scan_machine(void)
{
char __iomem *p, *q;
char buf[32];
@@ -769,15 +766,20 @@ static int __init dmi_init(void)
subsys_initcall(dmi_init);
/**
- * dmi_set_dump_stack_arch_desc - set arch description for dump_stack()
+ * dmi_setup - scan and setup DMI system information
*
- * Invoke dump_stack_set_arch_desc() with DMI system information so that
- * DMI identifiers are printed out on task dumps. Arch boot code should
- * call this function after dmi_scan_machine() if it wants to print out DMI
- * identifiers on task dumps.
+ * Scan the DMI system information. This setups DMI identifiers
+ * (dmi_system_id) for printing it out on task dumps and prepares
+ * DIMM entry information (dmi_memdev_info) from the SMBIOS table
+ * for using this when reporting memory errors.
*/
-void __init dmi_set_dump_stack_arch_desc(void)
+void __init dmi_setup(void)
{
+ dmi_scan_machine();
+ if (!dmi_available)
+ return;
+
+ dmi_memdev_walk();
dump_stack_set_arch_desc("%s", dmi_ids_string);
}
@@ -841,7 +843,7 @@ static bool dmi_is_end_of_table(const struct dmi_system_id *dmi)
* returns non zero or we hit the end. Callback function is called for
* each successful match. Returns the number of matches.
*
- * dmi_scan_machine must be called before this function is called.
+ * dmi_setup must be called before this function is called.
*/
int dmi_check_system(const struct dmi_system_id *list)
{
@@ -871,7 +873,7 @@ EXPORT_SYMBOL(dmi_check_system);
* Walk the blacklist table until the first match is found. Return the
* pointer to the matching entry or NULL if there's no match.
*
- * dmi_scan_machine must be called before this function is called.
+ * dmi_setup must be called before this function is called.
*/
const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list)
{
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 0c1af675c338..e2ac5fa5531b 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -162,13 +162,11 @@ void efi_virtmap_unload(void)
static int __init arm_dmi_init(void)
{
/*
- * On arm64/ARM, DMI depends on UEFI, and dmi_scan_machine() needs to
+ * On arm64/ARM, DMI depends on UEFI, and dmi_setup() needs to
* be called early because dmi_id_init(), which is an arch_initcall
* itself, depends on dmi_scan_machine() having been called already.
*/
- dmi_scan_machine();
- if (dmi_available)
- dmi_set_dump_stack_arch_desc();
+ dmi_setup();
return 0;
}
core_initcall(arm_dmi_init);
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index b0103e16fc1b..0460c7581220 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -16,9 +16,9 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
# disable the stackleak plugin
-cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie \
- $(DISABLE_STACKLEAK_PLUGIN)
-cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
+cflags-$(CONFIG_ARM64) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
+ -fpie $(DISABLE_STACKLEAK_PLUGIN)
+cflags-$(CONFIG_ARM) := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
-fno-builtin -fpic \
$(call cc-option,-mno-single-pic-base)
@@ -71,7 +71,6 @@ CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
extra-$(CONFIG_EFI_ARMSTUB) := $(lib-y)
lib-$(CONFIG_EFI_ARMSTUB) := $(patsubst %.o,%.stub.o,$(lib-y))
-STUBCOPY_RM-y := -R *ksymtab* -R *kcrctab*
STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \
--prefix-symbols=__efistub_
STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
@@ -86,12 +85,13 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE
# this time, use objcopy and leave all sections in place.
#
quiet_cmd_stubcopy = STUBCPY $@
- cmd_stubcopy = if $(STRIP) --strip-debug $(STUBCOPY_RM-y) -o $@ $<; \
- then if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); \
- then (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \
- rm -f $@; /bin/false); \
- else $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@; fi \
- else /bin/false; fi
+ cmd_stubcopy = \
+ $(STRIP) --strip-debug -o $@ $<; \
+ if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); then \
+ echo "$@: absolute symbol references not allowed in the EFI stub" >&2; \
+ /bin/false; \
+ fi; \
+ $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@
#
# ARM discards the .data section because it disallows r/w data in the
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index c0c0b4e4e281..f240946ed701 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -254,7 +254,7 @@ static int vpd_section_destroy(struct vpd_section *sec)
static int vpd_sections_init(phys_addr_t physaddr)
{
- struct vpd_cbmem __iomem *temp;
+ struct vpd_cbmem *temp;
struct vpd_cbmem header;
int ret = 0;
@@ -262,7 +262,7 @@ static int vpd_sections_init(phys_addr_t physaddr)
if (!temp)
return -ENOMEM;
- memcpy_fromio(&header, temp, sizeof(struct vpd_cbmem));
+ memcpy(&header, temp, sizeof(struct vpd_cbmem));
memunmap(temp);
if (header.magic != VPD_CBMEM_MAGIC)
diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile
index 1b2e15b3c9ca..802c4ad8e8f9 100644
--- a/drivers/firmware/imx/Makefile
+++ b/drivers/firmware/imx/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o
+obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o
obj-$(CONFIG_IMX_SCU_PD) += scu-pd.o
diff --git a/drivers/firmware/imx/imx-scu-irq.c b/drivers/firmware/imx/imx-scu-irq.c
new file mode 100644
index 000000000000..043833ad3c1a
--- /dev/null
+++ b/drivers/firmware/imx/imx-scu-irq.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2019 NXP
+ *
+ * Implementation of the SCU IRQ functions using MU.
+ *
+ */
+
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/firmware/imx/ipc.h>
+#include <linux/mailbox_client.h>
+
+#define IMX_SC_IRQ_FUNC_ENABLE 1
+#define IMX_SC_IRQ_FUNC_STATUS 2
+#define IMX_SC_IRQ_NUM_GROUP 4
+
+static u32 mu_resource_id;
+
+struct imx_sc_msg_irq_get_status {
+ struct imx_sc_rpc_msg hdr;
+ union {
+ struct {
+ u16 resource;
+ u8 group;
+ u8 reserved;
+ } __packed req;
+ struct {
+ u32 status;
+ } resp;
+ } data;
+};
+
+struct imx_sc_msg_irq_enable {
+ struct imx_sc_rpc_msg hdr;
+ u32 mask;
+ u16 resource;
+ u8 group;
+ u8 enable;
+} __packed;
+
+static struct imx_sc_ipc *imx_sc_irq_ipc_handle;
+static struct work_struct imx_sc_irq_work;
+static ATOMIC_NOTIFIER_HEAD(imx_scu_irq_notifier_chain);
+
+int imx_scu_irq_register_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(
+ &imx_scu_irq_notifier_chain, nb);
+}
+EXPORT_SYMBOL(imx_scu_irq_register_notifier);
+
+int imx_scu_irq_unregister_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(
+ &imx_scu_irq_notifier_chain, nb);
+}
+EXPORT_SYMBOL(imx_scu_irq_unregister_notifier);
+
+static int imx_scu_irq_notifier_call_chain(unsigned long status, u8 *group)
+{
+ return atomic_notifier_call_chain(&imx_scu_irq_notifier_chain,
+ status, (void *)group);
+}
+
+static void imx_scu_irq_work_handler(struct work_struct *work)
+{
+ struct imx_sc_msg_irq_get_status msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ u32 irq_status;
+ int ret;
+ u8 i;
+
+ for (i = 0; i < IMX_SC_IRQ_NUM_GROUP; i++) {
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_IRQ;
+ hdr->func = IMX_SC_IRQ_FUNC_STATUS;
+ hdr->size = 2;
+
+ msg.data.req.resource = mu_resource_id;
+ msg.data.req.group = i;
+
+ ret = imx_scu_call_rpc(imx_sc_irq_ipc_handle, &msg, true);
+ if (ret) {
+ pr_err("get irq group %d status failed, ret %d\n",
+ i, ret);
+ return;
+ }
+
+ irq_status = msg.data.resp.status;
+ if (!irq_status)
+ continue;
+
+ imx_scu_irq_notifier_call_chain(irq_status, &i);
+ }
+}
+
+int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable)
+{
+ struct imx_sc_msg_irq_enable msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ int ret;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_IRQ;
+ hdr->func = IMX_SC_IRQ_FUNC_ENABLE;
+ hdr->size = 3;
+
+ msg.resource = mu_resource_id;
+ msg.group = group;
+ msg.mask = mask;
+ msg.enable = enable;
+
+ ret = imx_scu_call_rpc(imx_sc_irq_ipc_handle, &msg, true);
+ if (ret)
+ pr_err("enable irq failed, group %d, mask %d, ret %d\n",
+ group, mask, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(imx_scu_irq_group_enable);
+
+static void imx_scu_irq_callback(struct mbox_client *c, void *msg)
+{
+ schedule_work(&imx_sc_irq_work);
+}
+
+int imx_scu_enable_general_irq_channel(struct device *dev)
+{
+ struct of_phandle_args spec;
+ struct mbox_client *cl;
+ struct mbox_chan *ch;
+ int ret = 0, i = 0;
+
+ ret = imx_scu_get_handle(&imx_sc_irq_ipc_handle);
+ if (ret)
+ return ret;
+
+ cl = devm_kzalloc(dev, sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ return -ENOMEM;
+
+ cl->dev = dev;
+ cl->rx_callback = imx_scu_irq_callback;
+
+ /* SCU general IRQ uses general interrupt channel 3 */
+ ch = mbox_request_channel_byname(cl, "gip3");
+ if (IS_ERR(ch)) {
+ ret = PTR_ERR(ch);
+ dev_err(dev, "failed to request mbox chan gip3, ret %d\n", ret);
+ devm_kfree(dev, cl);
+ return ret;
+ }
+
+ INIT_WORK(&imx_sc_irq_work, imx_scu_irq_work_handler);
+
+ if (!of_parse_phandle_with_args(dev->of_node, "mboxes",
+ "#mbox-cells", 0, &spec))
+ i = of_alias_get_id(spec.np, "mu");
+
+ /* use mu1 as general mu irq channel if failed */
+ if (i < 0)
+ i = 1;
+
+ mu_resource_id = IMX_SC_R_MU_0A + i;
+
+ return ret;
+}
+EXPORT_SYMBOL(imx_scu_enable_general_irq_channel);
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
index 2bb1a19c413f..04a24a863d6e 100644
--- a/drivers/firmware/imx/imx-scu.c
+++ b/drivers/firmware/imx/imx-scu.c
@@ -10,6 +10,7 @@
#include <linux/err.h>
#include <linux/firmware/imx/types.h>
#include <linux/firmware/imx/ipc.h>
+#include <linux/firmware/imx/sci.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
@@ -246,6 +247,11 @@ static int imx_scu_probe(struct platform_device *pdev)
imx_sc_ipc_handle = sc_ipc;
+ ret = imx_scu_enable_general_irq_channel(dev);
+ if (ret)
+ dev_warn(dev,
+ "failed to enable general irq channel: %d\n", ret);
+
dev_info(dev, "NXP i.MX SCU Initialized\n");
return devm_of_platform_populate(dev);
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
index 39a94c7177fc..480cec69e2c9 100644
--- a/drivers/firmware/imx/scu-pd.c
+++ b/drivers/firmware/imx/scu-pd.c
@@ -74,7 +74,10 @@ struct imx_sc_pd_range {
char *name;
u32 rsrc;
u8 num;
+
+ /* add domain index */
bool postfix;
+ u8 start_from;
};
struct imx_sc_pd_soc {
@@ -84,71 +87,75 @@ struct imx_sc_pd_soc {
static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
/* LSIO SS */
- { "lsio-pwm", IMX_SC_R_PWM_0, 8, 1 },
- { "lsio-gpio", IMX_SC_R_GPIO_0, 8, 1 },
- { "lsio-gpt", IMX_SC_R_GPT_0, 5, 1 },
- { "lsio-kpp", IMX_SC_R_KPP, 1, 0 },
- { "lsio-fspi", IMX_SC_R_FSPI_0, 2, 1 },
- { "lsio-mu", IMX_SC_R_MU_0A, 14, 1 },
+ { "pwm", IMX_SC_R_PWM_0, 8, true, 0 },
+ { "gpio", IMX_SC_R_GPIO_0, 8, true, 0 },
+ { "gpt", IMX_SC_R_GPT_0, 5, true, 0 },
+ { "kpp", IMX_SC_R_KPP, 1, false, 0 },
+ { "fspi", IMX_SC_R_FSPI_0, 2, true, 0 },
+ { "mu", IMX_SC_R_MU_0A, 14, true, 0 },
/* CONN SS */
- { "con-usb", IMX_SC_R_USB_0, 2, 1 },
- { "con-usb0phy", IMX_SC_R_USB_0_PHY, 1, 0 },
- { "con-usb2", IMX_SC_R_USB_2, 1, 0 },
- { "con-usb2phy", IMX_SC_R_USB_2_PHY, 1, 0 },
- { "con-sdhc", IMX_SC_R_SDHC_0, 3, 1 },
- { "con-enet", IMX_SC_R_ENET_0, 2, 1 },
- { "con-nand", IMX_SC_R_NAND, 1, 0 },
- { "con-mlb", IMX_SC_R_MLB_0, 1, 1 },
-
- /* Audio DMA SS */
- { "adma-audio-pll0", IMX_SC_R_AUDIO_PLL_0, 1, 0 },
- { "adma-audio-pll1", IMX_SC_R_AUDIO_PLL_1, 1, 0 },
- { "adma-audio-clk-0", IMX_SC_R_AUDIO_CLK_0, 1, 0 },
- { "adma-dma0-ch", IMX_SC_R_DMA_0_CH0, 16, 1 },
- { "adma-dma1-ch", IMX_SC_R_DMA_1_CH0, 16, 1 },
- { "adma-dma2-ch", IMX_SC_R_DMA_2_CH0, 5, 1 },
- { "adma-asrc0", IMX_SC_R_ASRC_0, 1, 0 },
- { "adma-asrc1", IMX_SC_R_ASRC_1, 1, 0 },
- { "adma-esai0", IMX_SC_R_ESAI_0, 1, 0 },
- { "adma-spdif0", IMX_SC_R_SPDIF_0, 1, 0 },
- { "adma-sai", IMX_SC_R_SAI_0, 3, 1 },
- { "adma-amix", IMX_SC_R_AMIX, 1, 0 },
- { "adma-mqs0", IMX_SC_R_MQS_0, 1, 0 },
- { "adma-dsp", IMX_SC_R_DSP, 1, 0 },
- { "adma-dsp-ram", IMX_SC_R_DSP_RAM, 1, 0 },
- { "adma-can", IMX_SC_R_CAN_0, 3, 1 },
- { "adma-ftm", IMX_SC_R_FTM_0, 2, 1 },
- { "adma-lpi2c", IMX_SC_R_I2C_0, 4, 1 },
- { "adma-adc", IMX_SC_R_ADC_0, 1, 1 },
- { "adma-lcd", IMX_SC_R_LCD_0, 1, 1 },
- { "adma-lcd0-pwm", IMX_SC_R_LCD_0_PWM_0, 1, 1 },
- { "adma-lpuart", IMX_SC_R_UART_0, 4, 1 },
- { "adma-lpspi", IMX_SC_R_SPI_0, 4, 1 },
-
- /* VPU SS */
- { "vpu", IMX_SC_R_VPU, 1, 0 },
- { "vpu-pid", IMX_SC_R_VPU_PID0, 8, 1 },
- { "vpu-dec0", IMX_SC_R_VPU_DEC_0, 1, 0 },
- { "vpu-enc0", IMX_SC_R_VPU_ENC_0, 1, 0 },
+ { "usb", IMX_SC_R_USB_0, 2, true, 0 },
+ { "usb0phy", IMX_SC_R_USB_0_PHY, 1, false, 0 },
+ { "usb2", IMX_SC_R_USB_2, 1, false, 0 },
+ { "usb2phy", IMX_SC_R_USB_2_PHY, 1, false, 0 },
+ { "sdhc", IMX_SC_R_SDHC_0, 3, true, 0 },
+ { "enet", IMX_SC_R_ENET_0, 2, true, 0 },
+ { "nand", IMX_SC_R_NAND, 1, false, 0 },
+ { "mlb", IMX_SC_R_MLB_0, 1, true, 0 },
+
+ /* AUDIO SS */
+ { "audio-pll0", IMX_SC_R_AUDIO_PLL_0, 1, false, 0 },
+ { "audio-pll1", IMX_SC_R_AUDIO_PLL_1, 1, false, 0 },
+ { "audio-clk-0", IMX_SC_R_AUDIO_CLK_0, 1, false, 0 },
+ { "dma0-ch", IMX_SC_R_DMA_0_CH0, 16, true, 0 },
+ { "dma1-ch", IMX_SC_R_DMA_1_CH0, 16, true, 0 },
+ { "dma2-ch", IMX_SC_R_DMA_2_CH0, 5, true, 0 },
+ { "asrc0", IMX_SC_R_ASRC_0, 1, false, 0 },
+ { "asrc1", IMX_SC_R_ASRC_1, 1, false, 0 },
+ { "esai0", IMX_SC_R_ESAI_0, 1, false, 0 },
+ { "spdif0", IMX_SC_R_SPDIF_0, 1, false, 0 },
+ { "sai", IMX_SC_R_SAI_0, 3, true, 0 },
+ { "amix", IMX_SC_R_AMIX, 1, false, 0 },
+ { "mqs0", IMX_SC_R_MQS_0, 1, false, 0 },
+ { "dsp", IMX_SC_R_DSP, 1, false, 0 },
+ { "dsp-ram", IMX_SC_R_DSP_RAM, 1, false, 0 },
+
+ /* DMA SS */
+ { "can", IMX_SC_R_CAN_0, 3, true, 0 },
+ { "ftm", IMX_SC_R_FTM_0, 2, true, 0 },
+ { "lpi2c", IMX_SC_R_I2C_0, 4, true, 0 },
+ { "adc", IMX_SC_R_ADC_0, 1, true, 0 },
+ { "lcd", IMX_SC_R_LCD_0, 1, true, 0 },
+ { "lcd0-pwm", IMX_SC_R_LCD_0_PWM_0, 1, true, 0 },
+ { "lpuart", IMX_SC_R_UART_0, 4, true, 0 },
+ { "lpspi", IMX_SC_R_SPI_0, 4, true, 0 },
+
+ /* VPU SS */
+ { "vpu", IMX_SC_R_VPU, 1, false, 0 },
+ { "vpu-pid", IMX_SC_R_VPU_PID0, 8, true, 0 },
+ { "vpu-dec0", IMX_SC_R_VPU_DEC_0, 1, false, 0 },
+ { "vpu-enc0", IMX_SC_R_VPU_ENC_0, 1, false, 0 },
/* GPU SS */
- { "gpu0-pid", IMX_SC_R_GPU_0_PID0, 4, 1 },
+ { "gpu0-pid", IMX_SC_R_GPU_0_PID0, 4, true, 0 },
/* HSIO SS */
- { "hsio-pcie-b", IMX_SC_R_PCIE_B, 1, 0 },
- { "hsio-serdes-1", IMX_SC_R_SERDES_1, 1, 0 },
- { "hsio-gpio", IMX_SC_R_HSIO_GPIO, 1, 0 },
+ { "pcie-b", IMX_SC_R_PCIE_B, 1, false, 0 },
+ { "serdes-1", IMX_SC_R_SERDES_1, 1, false, 0 },
+ { "hsio-gpio", IMX_SC_R_HSIO_GPIO, 1, false, 0 },
+
+ /* MIPI SS */
+ { "mipi0", IMX_SC_R_MIPI_0, 1, false, 0 },
+ { "mipi0-pwm0", IMX_SC_R_MIPI_0_PWM_0, 1, false, 0 },
+ { "mipi0-i2c", IMX_SC_R_MIPI_0_I2C_0, 2, true, 0 },
- /* MIPI/LVDS SS */
- { "mipi0", IMX_SC_R_MIPI_0, 1, 0 },
- { "mipi0-pwm0", IMX_SC_R_MIPI_0_PWM_0, 1, 0 },
- { "mipi0-i2c", IMX_SC_R_MIPI_0_I2C_0, 2, 1 },
- { "lvds0", IMX_SC_R_LVDS_0, 1, 0 },
+ /* LVDS SS */
+ { "lvds0", IMX_SC_R_LVDS_0, 1, false, 0 },
/* DC SS */
- { "dc0", IMX_SC_R_DC_0, 1, 0 },
- { "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, 1 },
+ { "dc0", IMX_SC_R_DC_0, 1, false, 0 },
+ { "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, true, 0 },
};
static const struct imx_sc_pd_soc imx8qxp_scu_pd = {
@@ -236,7 +243,7 @@ imx_scu_add_pm_domain(struct device *dev, int idx,
if (pd_ranges->postfix)
snprintf(sc_pd->name, sizeof(sc_pd->name),
- "%s%i", pd_ranges->name, idx);
+ "%s%i", pd_ranges->name, pd_ranges->start_from + idx);
else
snprintf(sc_pd->name, sizeof(sc_pd->name),
"%s", pd_ranges->name);
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index c51462f5aa1e..a5dc0629f225 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -425,7 +425,7 @@ static ssize_t ibft_attr_show_acpitbl(void *data, int type, char *buf)
switch (type) {
case ISCSI_BOOT_ACPITBL_SIGNATURE:
- str += sprintf_string(str, ACPI_NAME_SIZE,
+ str += sprintf_string(str, ACPI_NAMESEG_SIZE,
entry->header->header.signature);
break;
case ISCSI_BOOT_ACPITBL_OEM_ID:
diff --git a/drivers/firmware/psci/Kconfig b/drivers/firmware/psci/Kconfig
new file mode 100644
index 000000000000..26a3b32bf7ab
--- /dev/null
+++ b/drivers/firmware/psci/Kconfig
@@ -0,0 +1,13 @@
+config ARM_PSCI_FW
+ bool
+
+config ARM_PSCI_CHECKER
+ bool "ARM PSCI checker"
+ depends on ARM_PSCI_FW && HOTPLUG_CPU && CPU_IDLE && !TORTURE_TEST
+ help
+ Run the PSCI checker during startup. This checks that hotplug and
+ suspend operations work correctly when using PSCI.
+
+ The torture tests may interfere with the PSCI checker by turning CPUs
+ on and off through hotplug, so for now torture tests and PSCI checker
+ are mutually exclusive.
diff --git a/drivers/firmware/psci/Makefile b/drivers/firmware/psci/Makefile
new file mode 100644
index 000000000000..1956b882470f
--- /dev/null
+++ b/drivers/firmware/psci/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+obj-$(CONFIG_ARM_PSCI_FW) += psci.o
+obj-$(CONFIG_ARM_PSCI_CHECKER) += psci_checker.o
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci/psci.c
index c80ec1d03274..fe090ef43d28 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -88,6 +88,7 @@ static u32 psci_function_id[PSCI_FN_MAX];
PSCI_1_0_EXT_POWER_STATE_TYPE_MASK)
static u32 psci_cpu_suspend_feature;
+static bool psci_system_reset2_supported;
static inline bool psci_has_ext_power_state(void)
{
@@ -95,6 +96,11 @@ static inline bool psci_has_ext_power_state(void)
PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK;
}
+static inline bool psci_has_osi_support(void)
+{
+ return psci_cpu_suspend_feature & PSCI_1_0_OS_INITIATED;
+}
+
static inline bool psci_power_state_loses_context(u32 state)
{
const u32 mask = psci_has_ext_power_state() ?
@@ -253,7 +259,17 @@ static int get_set_conduit_method(struct device_node *np)
static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
{
- invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+ if ((reboot_mode == REBOOT_WARM || reboot_mode == REBOOT_SOFT) &&
+ psci_system_reset2_supported) {
+ /*
+ * reset_type[31] = 0 (architectural)
+ * reset_type[30:0] = 0 (SYSTEM_WARM_RESET)
+ * cookie = 0 (ignored by the implementation)
+ */
+ invoke_psci_fn(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2), 0, 0, 0);
+ } else {
+ invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+ }
}
static void psci_sys_poweroff(void)
@@ -270,9 +286,26 @@ static int __init psci_features(u32 psci_func_id)
#ifdef CONFIG_CPU_IDLE
static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
+static int psci_dt_parse_state_node(struct device_node *np, u32 *state)
+{
+ int err = of_property_read_u32(np, "arm,psci-suspend-param", state);
+
+ if (err) {
+ pr_warn("%pOF missing arm,psci-suspend-param property\n", np);
+ return err;
+ }
+
+ if (!psci_power_state_is_valid(*state)) {
+ pr_warn("Invalid PSCI power state %#x\n", *state);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
{
- int i, ret, count = 0;
+ int i, ret = 0, count = 0;
u32 *psci_states;
struct device_node *state_node;
@@ -291,29 +324,16 @@ static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
return -ENOMEM;
for (i = 0; i < count; i++) {
- u32 state;
-
state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+ ret = psci_dt_parse_state_node(state_node, &psci_states[i]);
+ of_node_put(state_node);
- ret = of_property_read_u32(state_node,
- "arm,psci-suspend-param",
- &state);
- if (ret) {
- pr_warn(" * %pOF missing arm,psci-suspend-param property\n",
- state_node);
- of_node_put(state_node);
+ if (ret)
goto free_mem;
- }
- of_node_put(state_node);
- pr_debug("psci-power-state %#x index %d\n", state, i);
- if (!psci_power_state_is_valid(state)) {
- pr_warn("Invalid PSCI power state %#x\n", state);
- ret = -EINVAL;
- goto free_mem;
- }
- psci_states[i] = state;
+ pr_debug("psci-power-state %#x index %d\n", psci_states[i], i);
}
+
/* Idle states parsed correctly, initialize per-cpu pointer */
per_cpu(psci_power_state, cpu) = psci_states;
return 0;
@@ -451,6 +471,16 @@ static const struct platform_suspend_ops psci_suspend_ops = {
.enter = psci_system_suspend_enter,
};
+static void __init psci_init_system_reset2(void)
+{
+ int ret;
+
+ ret = psci_features(PSCI_FN_NATIVE(1_1, SYSTEM_RESET2));
+
+ if (ret != PSCI_RET_NOT_SUPPORTED)
+ psci_system_reset2_supported = true;
+}
+
static void __init psci_init_system_suspend(void)
{
int ret;
@@ -588,6 +618,7 @@ static int __init psci_probe(void)
psci_init_smccc();
psci_init_cpu_suspend();
psci_init_system_suspend();
+ psci_init_system_reset2();
}
return 0;
@@ -605,9 +636,9 @@ static int __init psci_0_2_init(struct device_node *np)
int err;
err = get_set_conduit_method(np);
-
if (err)
- goto out_put_node;
+ return err;
+
/*
* Starting with v0.2, the PSCI specification introduced a call
* (PSCI_VERSION) that allows probing the firmware version, so
@@ -615,11 +646,7 @@ static int __init psci_0_2_init(struct device_node *np)
* can be carried out according to the specific version reported
* by firmware
*/
- err = psci_probe();
-
-out_put_node:
- of_node_put(np);
- return err;
+ return psci_probe();
}
/*
@@ -631,9 +658,8 @@ static int __init psci_0_1_init(struct device_node *np)
int err;
err = get_set_conduit_method(np);
-
if (err)
- goto out_put_node;
+ return err;
pr_info("Using PSCI v0.1 Function IDs from DT\n");
@@ -657,15 +683,27 @@ static int __init psci_0_1_init(struct device_node *np)
psci_ops.migrate = psci_migrate;
}
-out_put_node:
- of_node_put(np);
- return err;
+ return 0;
+}
+
+static int __init psci_1_0_init(struct device_node *np)
+{
+ int err;
+
+ err = psci_0_2_init(np);
+ if (err)
+ return err;
+
+ if (psci_has_osi_support())
+ pr_info("OSI mode supported.\n");
+
+ return 0;
}
static const struct of_device_id psci_of_match[] __initconst = {
{ .compatible = "arm,psci", .data = psci_0_1_init},
{ .compatible = "arm,psci-0.2", .data = psci_0_2_init},
- { .compatible = "arm,psci-1.0", .data = psci_0_2_init},
+ { .compatible = "arm,psci-1.0", .data = psci_1_0_init},
{},
};
@@ -674,6 +712,7 @@ int __init psci_dt_init(void)
struct device_node *np;
const struct of_device_id *matched_np;
psci_initcall_t init_fn;
+ int ret;
np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
@@ -681,7 +720,10 @@ int __init psci_dt_init(void)
return -ENODEV;
init_fn = (psci_initcall_t)matched_np->data;
- return init_fn(np);
+ ret = init_fn(np);
+
+ of_node_put(np);
+ return ret;
}
#ifdef CONFIG_ACPI
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci/psci_checker.c
index 346943657962..346943657962 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci/psci_checker.c
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 3fbbb61012c4..ef93406ace1b 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -65,18 +65,36 @@ struct ti_sci_xfers_info {
};
/**
+ * struct ti_sci_rm_type_map - Structure representing TISCI Resource
+ * management representation of dev_ids.
+ * @dev_id: TISCI device ID
+ * @type: Corresponding id as identified by TISCI RM.
+ *
+ * Note: This is used only as a work around for using RM range apis
+ * for AM654 SoC. For future SoCs dev_id will be used as type
+ * for RM range APIs. In order to maintain ABI backward compatibility
+ * type is not being changed for AM654 SoC.
+ */
+struct ti_sci_rm_type_map {
+ u32 dev_id;
+ u16 type;
+};
+
+/**
* struct ti_sci_desc - Description of SoC integration
* @default_host_id: Host identifier representing the compute entity
* @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
* @max_msgs: Maximum number of messages that can be pending
* simultaneously in the system
* @max_msg_size: Maximum size of data per message that can be handled.
+ * @rm_type_map: RM resource type mapping structure.
*/
struct ti_sci_desc {
u8 default_host_id;
int max_rx_timeout_ms;
int max_msgs;
int max_msg_size;
+ struct ti_sci_rm_type_map *rm_type_map;
};
/**
@@ -1600,6 +1618,392 @@ fail:
return ret;
}
+static int ti_sci_get_resource_type(struct ti_sci_info *info, u16 dev_id,
+ u16 *type)
+{
+ struct ti_sci_rm_type_map *rm_type_map = info->desc->rm_type_map;
+ bool found = false;
+ int i;
+
+ /* If map is not provided then assume dev_id is used as type */
+ if (!rm_type_map) {
+ *type = dev_id;
+ return 0;
+ }
+
+ for (i = 0; rm_type_map[i].dev_id; i++) {
+ if (rm_type_map[i].dev_id == dev_id) {
+ *type = rm_type_map[i].type;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * ti_sci_get_resource_range - Helper to get a range of resources assigned
+ * to a host. Resource is uniquely identified by
+ * type and subtype.
+ * @handle: Pointer to TISCI handle.
+ * @dev_id: TISCI device ID.
+ * @subtype: Resource assignment subtype that is being requested
+ * from the given device.
+ * @s_host: Host processor ID to which the resources are allocated
+ * @range_start: Start index of the resource range
+ * @range_num: Number of resources in the range
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 subtype, u8 s_host,
+ u16 *range_start, u16 *range_num)
+{
+ struct ti_sci_msg_resp_get_resource_range *resp;
+ struct ti_sci_msg_req_get_resource_range *req;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ u16 type;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+
+ ret = ti_sci_get_resource_type(info, dev_id, &type);
+ if (ret) {
+ dev_err(dev, "rm type lookup failed for %u\n", dev_id);
+ goto fail;
+ }
+
+ req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
+ req->secondary_host = s_host;
+ req->type = type & MSG_RM_RESOURCE_TYPE_MASK;
+ req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ } else if (!resp->range_start && !resp->range_num) {
+ ret = -ENODEV;
+ } else {
+ *range_start = resp->range_start;
+ *range_num = resp->range_num;
+ };
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
+ * that is same as ti sci interface host.
+ * @handle: Pointer to TISCI handle.
+ * @dev_id: TISCI device ID.
+ * @subtype: Resource assignment subtype that is being requested
+ * from the given device.
+ * @range_start: Start index of the resource range
+ * @range_num: Number of resources in the range
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 subtype,
+ u16 *range_start, u16 *range_num)
+{
+ return ti_sci_get_resource_range(handle, dev_id, subtype,
+ TI_SCI_IRQ_SECONDARY_HOST_INVALID,
+ range_start, range_num);
+}
+
+/**
+ * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
+ * assigned to a specified host.
+ * @handle: Pointer to TISCI handle.
+ * @dev_id: TISCI device ID.
+ * @subtype: Resource assignment subtype that is being requested
+ * from the given device.
+ * @s_host: Host processor ID to which the resources are allocated
+ * @range_start: Start index of the resource range
+ * @range_num: Number of resources in the range
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static
+int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 subtype, u8 s_host,
+ u16 *range_start, u16 *range_num)
+{
+ return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
+ range_start, range_num);
+}
+
+/**
+ * ti_sci_manage_irq() - Helper api to configure/release the irq route between
+ * the requested source and destination
+ * @handle: Pointer to TISCI handle.
+ * @valid_params: Bit fields defining the validity of certain params
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @dst_id: Device ID of the IRQ destination
+ * @dst_host_irq: IRQ number of the destination device
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
+ * @vint: Virtual interrupt to be used within the IA
+ * @global_event: Global event number to be used for the requesting event
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
+ * @s_host: Secondary host ID to which the irq/event is being
+ * requested for.
+ * @type: Request type irq set or release.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
+ u32 valid_params, u16 src_id, u16 src_index,
+ u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
+ u16 global_event, u8 vint_status_bit, u8 s_host,
+ u16 type)
+{
+ struct ti_sci_msg_req_manage_irq *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
+ req->valid_params = valid_params;
+ req->src_id = src_id;
+ req->src_index = src_index;
+ req->dst_id = dst_id;
+ req->dst_host_irq = dst_host_irq;
+ req->ia_id = ia_id;
+ req->vint = vint;
+ req->global_event = global_event;
+ req->vint_status_bit = vint_status_bit;
+ req->secondary_host = s_host;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_set_irq() - Helper api to configure the irq route between the
+ * requested source and destination
+ * @handle: Pointer to TISCI handle.
+ * @valid_params: Bit fields defining the validity of certain params
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @dst_id: Device ID of the IRQ destination
+ * @dst_host_irq: IRQ number of the destination device
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
+ * @vint: Virtual interrupt to be used within the IA
+ * @global_event: Global event number to be used for the requesting event
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
+ * @s_host: Secondary host ID to which the irq/event is being
+ * requested for.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
+ u16 src_id, u16 src_index, u16 dst_id,
+ u16 dst_host_irq, u16 ia_id, u16 vint,
+ u16 global_event, u8 vint_status_bit, u8 s_host)
+{
+ pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
+ __func__, valid_params, src_id, src_index,
+ dst_id, dst_host_irq, ia_id, vint, global_event,
+ vint_status_bit);
+
+ return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
+ dst_id, dst_host_irq, ia_id, vint,
+ global_event, vint_status_bit, s_host,
+ TI_SCI_MSG_SET_IRQ);
+}
+
+/**
+ * ti_sci_free_irq() - Helper api to free the irq route between the
+ * requested source and destination
+ * @handle: Pointer to TISCI handle.
+ * @valid_params: Bit fields defining the validity of certain params
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @dst_id: Device ID of the IRQ destination
+ * @dst_host_irq: IRQ number of the destination device
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
+ * @vint: Virtual interrupt to be used within the IA
+ * @global_event: Global event number to be used for the requesting event
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
+ * @s_host: Secondary host ID to which the irq/event is being
+ * requested for.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
+ u16 src_id, u16 src_index, u16 dst_id,
+ u16 dst_host_irq, u16 ia_id, u16 vint,
+ u16 global_event, u8 vint_status_bit, u8 s_host)
+{
+ pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
+ __func__, valid_params, src_id, src_index,
+ dst_id, dst_host_irq, ia_id, vint, global_event,
+ vint_status_bit);
+
+ return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
+ dst_id, dst_host_irq, ia_id, vint,
+ global_event, vint_status_bit, s_host,
+ TI_SCI_MSG_FREE_IRQ);
+}
+
+/**
+ * ti_sci_cmd_set_irq() - Configure a host irq route between the requested
+ * source and destination.
+ * @handle: Pointer to TISCI handle.
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @dst_id: Device ID of the IRQ destination
+ * @dst_host_irq: IRQ number of the destination device
+ * @vint_irq: Boolean specifying if this interrupt belongs to
+ * Interrupt Aggregator.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id,
+ u16 src_index, u16 dst_id, u16 dst_host_irq)
+{
+ u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
+
+ return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
+ dst_host_irq, 0, 0, 0, 0, 0);
+}
+
+/**
+ * ti_sci_cmd_set_event_map() - Configure an event based irq route between the
+ * requested source and Interrupt Aggregator.
+ * @handle: Pointer to TISCI handle.
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
+ * @vint: Virtual interrupt to be used within the IA
+ * @global_event: Global event number to be used for the requesting event
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle,
+ u16 src_id, u16 src_index, u16 ia_id,
+ u16 vint, u16 global_event,
+ u8 vint_status_bit)
+{
+ u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
+ MSG_FLAG_GLB_EVNT_VALID |
+ MSG_FLAG_VINT_STS_BIT_VALID;
+
+ return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
+ ia_id, vint, global_event, vint_status_bit, 0);
+}
+
+/**
+ * ti_sci_cmd_free_irq() - Free a host irq route between the between the
+ * requested source and destination.
+ * @handle: Pointer to TISCI handle.
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @dst_id: Device ID of the IRQ destination
+ * @dst_host_irq: IRQ number of the destination device
+ * @vint_irq: Boolean specifying if this interrupt belongs to
+ * Interrupt Aggregator.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id,
+ u16 src_index, u16 dst_id, u16 dst_host_irq)
+{
+ u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
+
+ return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id,
+ dst_host_irq, 0, 0, 0, 0, 0);
+}
+
+/**
+ * ti_sci_cmd_free_event_map() - Free an event map between the requested source
+ * and Interrupt Aggregator.
+ * @handle: Pointer to TISCI handle.
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
+ * @vint: Virtual interrupt to be used within the IA
+ * @global_event: Global event number to be used for the requesting event
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
+ u16 src_id, u16 src_index, u16 ia_id,
+ u16 vint, u16 global_event,
+ u8 vint_status_bit)
+{
+ u32 valid_params = MSG_FLAG_IA_ID_VALID |
+ MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
+ MSG_FLAG_VINT_STS_BIT_VALID;
+
+ return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
+ ia_id, vint, global_event, vint_status_bit, 0);
+}
+
/*
* ti_sci_setup_ops() - Setup the operations structures
* @info: pointer to TISCI pointer
@@ -1610,6 +2014,8 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
struct ti_sci_core_ops *core_ops = &ops->core_ops;
struct ti_sci_dev_ops *dops = &ops->dev_ops;
struct ti_sci_clk_ops *cops = &ops->clk_ops;
+ struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
+ struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
core_ops->reboot_device = ti_sci_cmd_core_reboot;
@@ -1640,6 +2046,15 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
cops->set_freq = ti_sci_cmd_clk_set_freq;
cops->get_freq = ti_sci_cmd_clk_get_freq;
+
+ rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
+ rm_core_ops->get_range_from_shost =
+ ti_sci_cmd_get_resource_range_from_shost;
+
+ iops->set_irq = ti_sci_cmd_set_irq;
+ iops->set_event_map = ti_sci_cmd_set_event_map;
+ iops->free_irq = ti_sci_cmd_free_irq;
+ iops->free_event_map = ti_sci_cmd_free_event_map;
}
/**
@@ -1764,6 +2179,219 @@ const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
}
EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
+/**
+ * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
+ * @np: device node
+ * @property: property name containing phandle on TISCI node
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle
+ * Return: pointer to handle if successful, else:
+ * -EPROBE_DEFER if the instance is not ready
+ * -ENODEV if the required node handler is missing
+ * -EINVAL if invalid conditions are encountered.
+ */
+const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
+ const char *property)
+{
+ struct ti_sci_handle *handle = NULL;
+ struct device_node *ti_sci_np;
+ struct ti_sci_info *info;
+ struct list_head *p;
+
+ if (!np) {
+ pr_err("I need a device pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ti_sci_np = of_parse_phandle(np, property, 0);
+ if (!ti_sci_np)
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&ti_sci_list_mutex);
+ list_for_each(p, &ti_sci_list) {
+ info = list_entry(p, struct ti_sci_info, node);
+ if (ti_sci_np == info->dev->of_node) {
+ handle = &info->handle;
+ info->users++;
+ break;
+ }
+ }
+ mutex_unlock(&ti_sci_list_mutex);
+ of_node_put(ti_sci_np);
+
+ if (!handle)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return handle;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
+
+/**
+ * devm_ti_sci_get_by_phandle() - Managed get handle using phandle
+ * @dev: Device pointer requesting TISCI handle
+ * @property: property name containing phandle on TISCI node
+ *
+ * NOTE: This releases the handle once the device resources are
+ * no longer needed. MUST NOT BE released with ti_sci_put_handle.
+ * The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
+ const char *property)
+{
+ const struct ti_sci_handle *handle;
+ const struct ti_sci_handle **ptr;
+
+ ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+ handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
+
+ if (!IS_ERR(handle)) {
+ *ptr = handle;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return handle;
+}
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
+
+/**
+ * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
+ * @res: Pointer to the TISCI resource
+ *
+ * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
+ */
+u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
+{
+ unsigned long flags;
+ u16 set, free_bit;
+
+ raw_spin_lock_irqsave(&res->lock, flags);
+ for (set = 0; set < res->sets; set++) {
+ free_bit = find_first_zero_bit(res->desc[set].res_map,
+ res->desc[set].num);
+ if (free_bit != res->desc[set].num) {
+ set_bit(free_bit, res->desc[set].res_map);
+ raw_spin_unlock_irqrestore(&res->lock, flags);
+ return res->desc[set].start + free_bit;
+ }
+ }
+ raw_spin_unlock_irqrestore(&res->lock, flags);
+
+ return TI_SCI_RESOURCE_NULL;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
+
+/**
+ * ti_sci_release_resource() - Release a resource from TISCI resource.
+ * @res: Pointer to the TISCI resource
+ * @id: Resource id to be released.
+ */
+void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
+{
+ unsigned long flags;
+ u16 set;
+
+ raw_spin_lock_irqsave(&res->lock, flags);
+ for (set = 0; set < res->sets; set++) {
+ if (res->desc[set].start <= id &&
+ (res->desc[set].num + res->desc[set].start) > id)
+ clear_bit(id - res->desc[set].start,
+ res->desc[set].res_map);
+ }
+ raw_spin_unlock_irqrestore(&res->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ti_sci_release_resource);
+
+/**
+ * ti_sci_get_num_resources() - Get the number of resources in TISCI resource
+ * @res: Pointer to the TISCI resource
+ *
+ * Return: Total number of available resources.
+ */
+u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
+{
+ u32 set, count = 0;
+
+ for (set = 0; set < res->sets; set++)
+ count += res->desc[set].num;
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
+
+/**
+ * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
+ * @handle: TISCI handle
+ * @dev: Device pointer to which the resource is assigned
+ * @dev_id: TISCI device id to which the resource is assigned
+ * @of_prop: property name by which the resource are represented
+ *
+ * Return: Pointer to ti_sci_resource if all went well else appropriate
+ * error pointer.
+ */
+struct ti_sci_resource *
+devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
+ struct device *dev, u32 dev_id, char *of_prop)
+{
+ struct ti_sci_resource *res;
+ u32 resource_subtype;
+ int i, ret;
+
+ res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return ERR_PTR(-ENOMEM);
+
+ res->sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
+ sizeof(u32));
+ if (res->sets < 0) {
+ dev_err(dev, "%s resource type ids not available\n", of_prop);
+ return ERR_PTR(res->sets);
+ }
+
+ res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
+ GFP_KERNEL);
+ if (!res->desc)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < res->sets; i++) {
+ ret = of_property_read_u32_index(dev_of_node(dev), of_prop, i,
+ &resource_subtype);
+ if (ret)
+ return ERR_PTR(-EINVAL);
+
+ ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
+ resource_subtype,
+ &res->desc[i].start,
+ &res->desc[i].num);
+ if (ret) {
+ dev_err(dev, "dev = %d subtype %d not allocated for this host\n",
+ dev_id, resource_subtype);
+ return ERR_PTR(ret);
+ }
+
+ dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n",
+ dev_id, resource_subtype, res->desc[i].start,
+ res->desc[i].num);
+
+ res->desc[i].res_map =
+ devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
+ sizeof(*res->desc[i].res_map), GFP_KERNEL);
+ if (!res->desc[i].res_map)
+ return ERR_PTR(-ENOMEM);
+ }
+ raw_spin_lock_init(&res->lock);
+
+ return res;
+}
+
static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
void *cmd)
{
@@ -1784,10 +2412,33 @@ static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
.max_msgs = 20,
.max_msg_size = 64,
+ .rm_type_map = NULL,
+};
+
+static struct ti_sci_rm_type_map ti_sci_am654_rm_type_map[] = {
+ {.dev_id = 56, .type = 0x00b}, /* GIC_IRQ */
+ {.dev_id = 179, .type = 0x000}, /* MAIN_NAV_UDMASS_IA0 */
+ {.dev_id = 187, .type = 0x009}, /* MAIN_NAV_RA */
+ {.dev_id = 188, .type = 0x006}, /* MAIN_NAV_UDMAP */
+ {.dev_id = 194, .type = 0x007}, /* MCU_NAV_UDMAP */
+ {.dev_id = 195, .type = 0x00a}, /* MCU_NAV_RA */
+ {.dev_id = 0, .type = 0x000}, /* end of table */
+};
+
+/* Description for AM654 */
+static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
+ .default_host_id = 12,
+ /* Conservative duration */
+ .max_rx_timeout_ms = 10000,
+ /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
+ .max_msgs = 20,
+ .max_msg_size = 60,
+ .rm_type_map = ti_sci_am654_rm_type_map,
};
static const struct of_device_id ti_sci_of_match[] = {
{.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
+ {.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, ti_sci_of_match);
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
index 12bf316b68df..4983827151bf 100644
--- a/drivers/firmware/ti_sci.h
+++ b/drivers/firmware/ti_sci.h
@@ -35,6 +35,13 @@
#define TI_SCI_MSG_QUERY_CLOCK_FREQ 0x010d
#define TI_SCI_MSG_GET_CLOCK_FREQ 0x010e
+/* Resource Management Requests */
+#define TI_SCI_MSG_GET_RESOURCE_RANGE 0x1500
+
+/* IRQ requests */
+#define TI_SCI_MSG_SET_IRQ 0x1000
+#define TI_SCI_MSG_FREE_IRQ 0x1001
+
/**
* struct ti_sci_msg_hdr - Generic Message Header for All messages and responses
* @type: Type of messages: One of TI_SCI_MSG* values
@@ -461,4 +468,99 @@ struct ti_sci_msg_resp_get_clock_freq {
u64 freq_hz;
} __packed;
+#define TI_SCI_IRQ_SECONDARY_HOST_INVALID 0xff
+
+/**
+ * struct ti_sci_msg_req_get_resource_range - Request to get a host's assigned
+ * range of resources.
+ * @hdr: Generic Header
+ * @type: Unique resource assignment type
+ * @subtype: Resource assignment subtype within the resource type.
+ * @secondary_host: Host processing entity to which the resources are
+ * allocated. This is required only when the destination
+ * host id id different from ti sci interface host id,
+ * else TI_SCI_IRQ_SECONDARY_HOST_INVALID can be passed.
+ *
+ * Request type is TI_SCI_MSG_GET_RESOURCE_RANGE. Responded with requested
+ * resource range which is of type TI_SCI_MSG_GET_RESOURCE_RANGE.
+ */
+struct ti_sci_msg_req_get_resource_range {
+ struct ti_sci_msg_hdr hdr;
+#define MSG_RM_RESOURCE_TYPE_MASK GENMASK(9, 0)
+#define MSG_RM_RESOURCE_SUBTYPE_MASK GENMASK(5, 0)
+ u16 type;
+ u8 subtype;
+ u8 secondary_host;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_resource_range - Response to resource get range.
+ * @hdr: Generic Header
+ * @range_start: Start index of the resource range.
+ * @range_num: Number of resources in the range.
+ *
+ * Response to request TI_SCI_MSG_GET_RESOURCE_RANGE.
+ */
+struct ti_sci_msg_resp_get_resource_range {
+ struct ti_sci_msg_hdr hdr;
+ u16 range_start;
+ u16 range_num;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_manage_irq - Request to configure/release the route
+ * between the dev and the host.
+ * @hdr: Generic Header
+ * @valid_params: Bit fields defining the validity of interrupt source
+ * parameters. If a bit is not set, then corresponding
+ * field is not valid and will not be used for route set.
+ * Bit field definitions:
+ * 0 - Valid bit for @dst_id
+ * 1 - Valid bit for @dst_host_irq
+ * 2 - Valid bit for @ia_id
+ * 3 - Valid bit for @vint
+ * 4 - Valid bit for @global_event
+ * 5 - Valid bit for @vint_status_bit_index
+ * 31 - Valid bit for @secondary_host
+ * @src_id: IRQ source peripheral ID.
+ * @src_index: IRQ source index within the peripheral
+ * @dst_id: IRQ Destination ID. Based on the architecture it can be
+ * IRQ controller or host processor ID.
+ * @dst_host_irq: IRQ number of the destination host IRQ controller
+ * @ia_id: Device ID of the interrupt aggregator in which the
+ * vint resides.
+ * @vint: Virtual interrupt number if the interrupt route
+ * is through an interrupt aggregator.
+ * @global_event: Global event that is to be mapped to interrupt
+ * aggregator virtual interrupt status bit.
+ * @vint_status_bit: Virtual interrupt status bit if the interrupt route
+ * utilizes an interrupt aggregator status bit.
+ * @secondary_host: Host ID of the IRQ destination computing entity. This is
+ * required only when destination host id is different
+ * from ti sci interface host id.
+ *
+ * Request type is TI_SCI_MSG_SET/RELEASE_IRQ.
+ * Response is generic ACK / NACK message.
+ */
+struct ti_sci_msg_req_manage_irq {
+ struct ti_sci_msg_hdr hdr;
+#define MSG_FLAG_DST_ID_VALID TI_SCI_MSG_FLAG(0)
+#define MSG_FLAG_DST_HOST_IRQ_VALID TI_SCI_MSG_FLAG(1)
+#define MSG_FLAG_IA_ID_VALID TI_SCI_MSG_FLAG(2)
+#define MSG_FLAG_VINT_VALID TI_SCI_MSG_FLAG(3)
+#define MSG_FLAG_GLB_EVNT_VALID TI_SCI_MSG_FLAG(4)
+#define MSG_FLAG_VINT_STS_BIT_VALID TI_SCI_MSG_FLAG(5)
+#define MSG_FLAG_SHOST_VALID TI_SCI_MSG_FLAG(31)
+ u32 valid_params;
+ u16 src_id;
+ u16 src_index;
+ u16 dst_id;
+ u16 dst_host_irq;
+ u16 ia_id;
+ u16 vint;
+ u16 global_event;
+ u8 vint_status_bit;
+ u8 secondary_host;
+} __packed;
+
#endif /* __TI_SCI_H */
diff --git a/arch/arm/firmware/trusted_foundations.c b/drivers/firmware/trusted_foundations.c
index 689e6565abfc..fd4999388ff1 100644
--- a/arch/arm/firmware/trusted_foundations.c
+++ b/drivers/firmware/trusted_foundations.c
@@ -17,8 +17,17 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/of.h>
+
+#include <linux/firmware/trusted_foundations.h>
+
#include <asm/firmware.h>
-#include <asm/trusted_foundations.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/outercache.h>
+
+#define TF_CACHE_MAINT 0xfffff100
+
+#define TF_CACHE_ENABLE 1
+#define TF_CACHE_DISABLE 2
#define TF_SET_CPU_BOOT_ADDR_SMC 0xfffff200
@@ -60,16 +69,75 @@ static int tf_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
return 0;
}
-static int tf_prepare_idle(void)
+static int tf_prepare_idle(unsigned long mode)
{
- tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S1_NOFLUSH_L2, cpu_boot_addr);
+ switch (mode) {
+ case TF_PM_MODE_LP0:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S3, cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP1:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S2, cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP1_NO_MC_CLK:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S2_NO_MC_CLK,
+ cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP2:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S1, cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP2_NOFLUSH_L2:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S1_NOFLUSH_L2,
+ cpu_boot_addr);
+ break;
+
+ default:
+ return -EINVAL;
+ }
return 0;
}
+#ifdef CONFIG_CACHE_L2X0
+static void tf_cache_write_sec(unsigned long val, unsigned int reg)
+{
+ u32 l2x0_way_mask = 0xff;
+
+ switch (reg) {
+ case L2X0_CTRL:
+ if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_ASSOCIATIVITY_16)
+ l2x0_way_mask = 0xffff;
+
+ if (val == L2X0_CTRL_EN)
+ tf_generic_smc(TF_CACHE_MAINT, TF_CACHE_ENABLE,
+ l2x0_saved_regs.aux_ctrl);
+ else
+ tf_generic_smc(TF_CACHE_MAINT, TF_CACHE_DISABLE,
+ l2x0_way_mask);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int tf_init_cache(void)
+{
+ outer_cache.write_sec = tf_cache_write_sec;
+
+ return 0;
+}
+#endif /* CONFIG_CACHE_L2X0 */
+
static const struct firmware_ops trusted_foundations_ops = {
.set_cpu_boot_addr = tf_set_cpu_boot_addr,
.prepare_idle = tf_prepare_idle,
+#ifdef CONFIG_CACHE_L2X0
+ .l2x0_init = tf_init_cache,
+#endif
};
void register_trusted_foundations(struct trusted_foundations_platform_data *pd)
@@ -101,3 +169,8 @@ void of_register_trusted_foundations(void)
panic("Trusted Foundation: missing version-minor property\n");
register_trusted_foundations(&pdata);
}
+
+bool trusted_foundations_registered(void)
+{
+ return firmware_ops == &trusted_foundations_ops;
+}
diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c
index 2771df6df379..c6d0724da4db 100644
--- a/drivers/firmware/xilinx/zynqmp-debug.c
+++ b/drivers/firmware/xilinx/zynqmp-debug.c
@@ -90,9 +90,6 @@ static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
int ret;
struct zynqmp_pm_query_data qdata = {0};
- if (!eemi_ops)
- return -ENXIO;
-
switch (pm_id) {
case PM_GET_API_VERSION:
ret = eemi_ops->get_api_version(&pm_api_version);
@@ -163,21 +160,14 @@ static ssize_t zynqmp_pm_debugfs_api_write(struct file *file,
strcpy(debugfs_buf, "");
- if (*off != 0 || len == 0)
+ if (*off != 0 || len <= 1 || len > PAGE_SIZE - 1)
return -EINVAL;
- kern_buff = kzalloc(len, GFP_KERNEL);
- if (!kern_buff)
- return -ENOMEM;
-
+ kern_buff = memdup_user_nul(ptr, len);
+ if (IS_ERR(kern_buff))
+ return PTR_ERR(kern_buff);
tmp_buff = kern_buff;
- ret = strncpy_from_user(kern_buff, ptr, len);
- if (ret < 0) {
- ret = -EFAULT;
- goto err;
- }
-
/* Read the API name from a user request */
pm_api_req = strsep(&kern_buff, " ");
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 98f936125643..fd3d83745208 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -24,6 +24,8 @@
#include <linux/firmware/xlnx-zynqmp.h>
#include "zynqmp-debug.h"
+static const struct zynqmp_eemi_ops *eemi_ops_tbl;
+
static const struct mfd_cell firmware_devs[] = {
{
.name = "zynqmp_power_controller",
@@ -538,6 +540,49 @@ static int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
}
/**
+ * zynqmp_pm_fpga_load - Perform the fpga load
+ * @address: Address to write to
+ * @size: pl bitstream size
+ * @flags: Bitstream type
+ * -XILINX_ZYNQMP_PM_FPGA_FULL: FPGA full reconfiguration
+ * -XILINX_ZYNQMP_PM_FPGA_PARTIAL: FPGA partial reconfiguration
+ *
+ * This function provides access to pmufw. To transfer
+ * the required bitstream into PL.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_fpga_load(const u64 address, const u32 size,
+ const u32 flags)
+{
+ return zynqmp_pm_invoke_fn(PM_FPGA_LOAD, lower_32_bits(address),
+ upper_32_bits(address), size, flags, NULL);
+}
+
+/**
+ * zynqmp_pm_fpga_get_status - Read value from PCAP status register
+ * @value: Value to read
+ *
+ * This function provides access to the pmufw to get the PCAP
+ * status
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_fpga_get_status(u32 *value)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_FPGA_GET_STATUS, 0, 0, 0, 0, ret_payload);
+ *value = ret_payload[1];
+
+ return ret;
+}
+
+/**
* zynqmp_pm_init_finalize() - PM call to inform firmware that the caller
* master has initialized its own power management
*
@@ -640,6 +685,8 @@ static const struct zynqmp_eemi_ops eemi_ops = {
.request_node = zynqmp_pm_request_node,
.release_node = zynqmp_pm_release_node,
.set_requirement = zynqmp_pm_set_requirement,
+ .fpga_load = zynqmp_pm_fpga_load,
+ .fpga_get_status = zynqmp_pm_fpga_get_status,
};
/**
@@ -649,7 +696,11 @@ static const struct zynqmp_eemi_ops eemi_ops = {
*/
const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
{
- return &eemi_ops;
+ if (eemi_ops_tbl)
+ return eemi_ops_tbl;
+ else
+ return ERR_PTR(-EPROBE_DEFER);
+
}
EXPORT_SYMBOL_GPL(zynqmp_pm_get_eemi_ops);
@@ -694,6 +745,9 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
pr_info("%s Trustzone version v%d.%d\n", __func__,
pm_tz_version >> 16, pm_tz_version & 0xFFFF);
+ /* Assign eemi_ops_table */
+ eemi_ops_tbl = &eemi_ops;
+
zynqmp_pm_api_debugfs_init();
ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, firmware_devs,
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index c20445b867ae..d892f3efcd76 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -204,4 +204,13 @@ config FPGA_DFL_PCI
To compile this as a module, choose M here.
+config FPGA_MGR_ZYNQMP_FPGA
+ tristate "Xilinx ZynqMP FPGA"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ help
+ FPGA manager driver support for Xilinx ZynqMP FPGAs.
+ This driver uses the processor configuration port(PCAP)
+ to configure the programmable logic(PL) through PS
+ on ZynqMP SoC.
+
endif # FPGA
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index c0dd4c82fbdb..312b9371742f 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_FPGA_MGR_STRATIX10_SOC) += stratix10-soc.o
obj-$(CONFIG_FPGA_MGR_TS73XX) += ts73xx-fpga.o
obj-$(CONFIG_FPGA_MGR_XILINX_SPI) += xilinx-spi.o
obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o
+obj-$(CONFIG_FPGA_MGR_ZYNQMP_FPGA) += zynqmp-fpga.o
obj-$(CONFIG_ALTERA_PR_IP_CORE) += altera-pr-ip-core.o
obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT) += altera-pr-ip-core-plat.o
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
index e18a786fc943..c438722bf4e1 100644
--- a/drivers/fpga/dfl-afu-dma-region.c
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -102,7 +102,7 @@ static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
goto unlock_vm;
}
- pinned = get_user_pages_fast(region->user_addr, npages, 1,
+ pinned = get_user_pages_fast(region->user_addr, npages, FOLL_WRITE,
region->pages);
if (pinned < 0) {
ret = pinned;
diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c
new file mode 100644
index 000000000000..f7cbaadf49ab
--- /dev/null
+++ b/drivers/fpga/zynqmp-fpga.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/string.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+/* Constant Definitions */
+#define IXR_FPGA_DONE_MASK BIT(3)
+
+/**
+ * struct zynqmp_fpga_priv - Private data structure
+ * @dev: Device data structure
+ * @flags: flags which is used to identify the bitfile type
+ */
+struct zynqmp_fpga_priv {
+ struct device *dev;
+ u32 flags;
+};
+
+static int zynqmp_fpga_ops_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t size)
+{
+ struct zynqmp_fpga_priv *priv;
+
+ priv = mgr->priv;
+ priv->flags = info->flags;
+
+ return 0;
+}
+
+static int zynqmp_fpga_ops_write(struct fpga_manager *mgr,
+ const char *buf, size_t size)
+{
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+ struct zynqmp_fpga_priv *priv;
+ dma_addr_t dma_addr;
+ u32 eemi_flags = 0;
+ char *kbuf;
+ int ret;
+
+ if (!eemi_ops || !eemi_ops->fpga_load)
+ return -ENXIO;
+
+ priv = mgr->priv;
+
+ kbuf = dma_alloc_coherent(priv->dev, size, &dma_addr, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ memcpy(kbuf, buf, size);
+
+ wmb(); /* ensure all writes are done before initiate FW call */
+
+ if (priv->flags & FPGA_MGR_PARTIAL_RECONFIG)
+ eemi_flags |= XILINX_ZYNQMP_PM_FPGA_PARTIAL;
+
+ ret = eemi_ops->fpga_load(dma_addr, size, eemi_flags);
+
+ dma_free_coherent(priv->dev, size, kbuf, dma_addr);
+
+ return ret;
+}
+
+static int zynqmp_fpga_ops_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ return 0;
+}
+
+static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr)
+{
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+ u32 status;
+
+ if (!eemi_ops || !eemi_ops->fpga_get_status)
+ return FPGA_MGR_STATE_UNKNOWN;
+
+ eemi_ops->fpga_get_status(&status);
+ if (status & IXR_FPGA_DONE_MASK)
+ return FPGA_MGR_STATE_OPERATING;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static const struct fpga_manager_ops zynqmp_fpga_ops = {
+ .state = zynqmp_fpga_ops_state,
+ .write_init = zynqmp_fpga_ops_write_init,
+ .write = zynqmp_fpga_ops_write,
+ .write_complete = zynqmp_fpga_ops_write_complete,
+};
+
+static int zynqmp_fpga_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct zynqmp_fpga_priv *priv;
+ struct fpga_manager *mgr;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ mgr = devm_fpga_mgr_create(dev, "Xilinx ZynqMP FPGA Manager",
+ &zynqmp_fpga_ops, priv);
+ if (!mgr)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mgr);
+
+ ret = fpga_mgr_register(mgr);
+ if (ret) {
+ dev_err(dev, "unable to register FPGA manager");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int zynqmp_fpga_remove(struct platform_device *pdev)
+{
+ struct fpga_manager *mgr = platform_get_drvdata(pdev);
+
+ fpga_mgr_unregister(mgr);
+
+ return 0;
+}
+
+static const struct of_device_id zynqmp_fpga_of_match[] = {
+ { .compatible = "xlnx,zynqmp-pcap-fpga", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, zynqmp_fpga_of_match);
+
+static struct platform_driver zynqmp_fpga_driver = {
+ .probe = zynqmp_fpga_probe,
+ .remove = zynqmp_fpga_remove,
+ .driver = {
+ .name = "zynqmp_fpga_manager",
+ .of_match_table = of_match_ptr(zynqmp_fpga_of_match),
+ },
+};
+
+module_platform_driver(zynqmp_fpga_driver);
+
+MODULE_AUTHOR("Nava kishore Manne <navam@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx ZynqMp FPGA Manager");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gnss/core.c b/drivers/gnss/core.c
index 320cfca80d5f..e6f94501cb28 100644
--- a/drivers/gnss/core.c
+++ b/drivers/gnss/core.c
@@ -42,7 +42,7 @@ static int gnss_open(struct inode *inode, struct file *file)
get_device(&gdev->dev);
- nonseekable_open(inode, file);
+ stream_open(inode, file);
file->private_data = gdev;
down_write(&gdev->rwsem);
diff --git a/drivers/gnss/ubx.c b/drivers/gnss/ubx.c
index 12568aebb7f6..7b05bc40532e 100644
--- a/drivers/gnss/ubx.c
+++ b/drivers/gnss/ubx.c
@@ -130,6 +130,7 @@ static void ubx_remove(struct serdev_device *serdev)
#ifdef CONFIG_OF
static const struct of_device_id ubx_of_match[] = {
+ { .compatible = "u-blox,neo-6m" },
{ .compatible = "u-blox,neo-8" },
{ .compatible = "u-blox,neo-m8" },
{},
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 3f50526a771f..8023d03ec362 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -12,7 +12,6 @@ config ARCH_HAVE_CUSTOM_GPIO_H
menuconfig GPIOLIB
bool "GPIO Support"
- select ANON_INODES
help
This enables GPIO support through the generic GPIO library.
You only need to enable this, if you also want to enable
@@ -27,12 +26,12 @@ config GPIOLIB_FASTPATH_LIMIT
range 32 512
default 512
help
- This adjusts the point at which certain APIs will switch from
- using a stack allocated buffer to a dynamically allocated buffer.
+ This adjusts the point at which certain APIs will switch from
+ using a stack allocated buffer to a dynamically allocated buffer.
- You shouldn't need to change this unless you really need to
- optimize either stack space or performance. Change this carefully
- since setting an incorrect value could cause stack corruption.
+ You shouldn't need to change this unless you really need to
+ optimize either stack space or performance. Change this carefully
+ since setting an incorrect value could cause stack corruption.
config OF_GPIO
def_bool y
@@ -287,6 +286,19 @@ config GPIO_IOP
If unsure, say N.
+config GPIO_IXP4XX
+ bool "Intel IXP4xx GPIO"
+ depends on ARM # For <asm/mach-types.h>
+ depends on ARCH_IXP4XX
+ select GPIO_GENERIC
+ select IRQ_DOMAIN
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Say yes here to support the GPIO functionality of a number of Intel
+ IXP4xx series of chips.
+
+ If unsure, say N.
+
config GPIO_LOONGSON
bool "Loongson-2/3 GPIO support"
depends on CPU_LOONGSON2 || CPU_LOONGSON3
@@ -320,7 +332,7 @@ config GPIO_MENZ127
depends on MCB
select GPIO_GENERIC
help
- Say yes here to support the MEN 16Z127 GPIO Controller
+ Say yes here to support the MEN 16Z127 GPIO Controller
config GPIO_MM_LANTIQ
bool "Lantiq Memory mapped GPIOs"
@@ -330,20 +342,6 @@ config GPIO_MM_LANTIQ
(EBU) found on Lantiq SoCs. The gpios are output only as they are
created by attaching a 16bit latch to the bus.
-config GPIO_MOCKUP
- tristate "GPIO Testing Driver"
- depends on GPIOLIB && SYSFS
- select GPIO_SYSFS
- select GPIOLIB_IRQCHIP
- select IRQ_SIM
- help
- This enables GPIO Testing driver, which provides a way to test GPIO
- subsystem through sysfs(or char device) and debugfs. GPIO_SYSFS
- must be selected for this test.
- User could use it through the script in
- tools/testing/selftests/gpio/gpio-mockup.sh. Reference the usage in
- it.
-
config GPIO_MPC5200
def_bool y
depends on PPC_MPC52xx
@@ -862,11 +860,11 @@ config GPIO_MAX732X
Input and Output (designed by 'P'). The combinations are listed
below:
- 8 bits: max7319 (8I), max7320 (8O), max7321 (8P),
- max7322 (4I4O), max7323 (4P4O)
+ 8 bits: max7319 (8I), max7320 (8O), max7321 (8P),
+ max7322 (4I4O), max7323 (4P4O)
- 16 bits: max7324 (8I8O), max7325 (8P8O),
- max7326 (4I12O), max7327 (4P12O)
+ 16 bits: max7324 (8I8O), max7325 (8P8O),
+ max7326 (4I12O), max7327 (4P12O)
Board setup code must specify the model to use, and the start
number for these GPIOs.
@@ -893,17 +891,17 @@ config GPIO_PCA953X
SMBus I/O expanders, made mostly by NXP or TI. Compatible
models include:
- 4 bits: pca9536, pca9537
+ 4 bits: pca9536, pca9537
- 8 bits: max7310, max7315, pca6107, pca9534, pca9538, pca9554,
- pca9556, pca9557, pca9574, tca6408, tca9554, xra1202
+ 8 bits: max7310, max7315, pca6107, pca9534, pca9538, pca9554,
+ pca9556, pca9557, pca9574, tca6408, tca9554, xra1202
- 16 bits: max7312, max7313, pca9535, pca9539, pca9555, pca9575,
- tca6416
+ 16 bits: max7312, max7313, pca9535, pca9539, pca9555, pca9575,
+ tca6416
- 24 bits: tca6424
+ 24 bits: tca6424
- 40 bits: pca9505, pca9698
+ 40 bits: pca9505, pca9698
config GPIO_PCA953X_IRQ
bool "Interrupt controller support for PCA953x"
@@ -925,7 +923,7 @@ config GPIO_PCF857X
8 bits: pcf8574, pcf8574a, pca8574, pca8574a,
pca9670, pca9672, pca9674, pca9674a,
- max7328, max7329
+ max7328, max7329
16 bits: pcf8575, pcf8575c, pca8575,
pca9671, pca9673, pca9675
@@ -1047,9 +1045,9 @@ config HTC_EGPIO
bool "HTC EGPIO support"
depends on GPIOLIB && ARM
help
- This driver supports the CPLD egpio chip present on
- several HTC phones. It provides basic support for input
- pins, output pins, and irqs.
+ This driver supports the CPLD egpio chip present on
+ several HTC phones. It provides basic support for input
+ pins, output pins, and irqs.
config GPIO_JANZ_TTL
tristate "Janz VMOD-TTL Digital IO Module"
@@ -1085,7 +1083,7 @@ config GPIO_LP873X
on LP873X PMICs.
This driver can also be built as a module. If so, the module will be
- called gpio-lp873x.
+ called gpio-lp873x.
config GPIO_LP87565
tristate "TI LP87565 GPIO"
@@ -1112,6 +1110,13 @@ config GPIO_MAX77620
driver also provides interrupt support for each of the gpios.
Say yes here to enable the max77620 to be used as gpio controller.
+config GPIO_MAX77650
+ tristate "Maxim MAX77650/77651 GPIO support"
+ depends on MFD_MAX77650
+ help
+ GPIO driver for MAX77650/77651 PMIC from Maxim Semiconductor.
+ These chips have a single pin that can be configured as GPIO.
+
config GPIO_MSIC
bool "Intel MSIC mixed signal gpio support"
depends on (X86 || COMPILE_TEST) && MFD_INTEL_MSIC
@@ -1316,6 +1321,13 @@ config GPIO_MERRIFIELD
help
Say Y here to support Intel Merrifield GPIO.
+config GPIO_MLXBF
+ tristate "Mellanox BlueField SoC GPIO"
+ depends on (MELLANOX_PLATFORM && ARM64 && ACPI) || (64BIT && COMPILE_TEST)
+ select GPIO_GENERIC
+ help
+ Say Y here if you want GPIO support on Mellanox BlueField SoC.
+
config GPIO_ML_IOH
tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
depends on X86 || COMPILE_TEST
@@ -1436,10 +1448,22 @@ config GPIO_VIPERBOARD
Say yes here to access the GPIO signals of Nano River
Technologies Viperboard. There are two GPIO chips on the
board: gpioa and gpiob.
- See viperboard API specification and Nano
- River Tech's viperboard.h for detailed meaning
- of the module parameters.
+ See viperboard API specification and Nano
+ River Tech's viperboard.h for detailed meaning
+ of the module parameters.
endmenu
+config GPIO_MOCKUP
+ tristate "GPIO Testing Driver"
+ depends on GPIOLIB
+ select IRQ_SIM
+ help
+ This enables GPIO Testing driver, which provides a way to test GPIO
+ subsystem through sysfs(or char device) and debugfs. GPIO_SYSFS
+ must be selected for this test.
+ User could use it through the script in
+ tools/testing/selftests/gpio/gpio-mockup.sh. Reference the usage in
+ it.
+
endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 54d55274b93a..6700eee860b7 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_GPIO_HLWD) += gpio-hlwd.o
obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o
obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
obj-$(CONFIG_GPIO_IOP) += gpio-iop.o
+obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o
obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
obj-$(CONFIG_GPIO_KEMPLD) += gpio-kempld.o
@@ -80,11 +81,13 @@ obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
obj-$(CONFIG_GPIO_MAX7301) += gpio-max7301.o
obj-$(CONFIG_GPIO_MAX732X) += gpio-max732x.o
obj-$(CONFIG_GPIO_MAX77620) += gpio-max77620.o
+obj-$(CONFIG_GPIO_MAX77650) += gpio-max77650.o
obj-$(CONFIG_GPIO_MB86S7X) += gpio-mb86s7x.o
obj-$(CONFIG_GPIO_MENZ127) += gpio-menz127.o
obj-$(CONFIG_GPIO_MERRIFIELD) += gpio-merrifield.o
obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
obj-$(CONFIG_GPIO_MC9S08DZ60) += gpio-mc9s08dz60.o
+obj-$(CONFIG_GPIO_MLXBF) += gpio-mlxbf.o
obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
obj-$(CONFIG_GPIO_MM_LANTIQ) += gpio-mm-lantiq.o
obj-$(CONFIG_GPIO_MOCKUP) += gpio-mockup.o
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index fb7b620763a2..e81307f9754e 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -1,21 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* 74Hx164 - Generic serial-in/parallel-out 8-bits shift register GPIO driver
*
* Copyright (C) 2010 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2010 Miguel Gaio <miguel.gaio@efixo.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
-#include <linux/init.h>
-#include <linux/mutex.h>
-#include <linux/spi/spi.h>
-#include <linux/gpio/driver.h>
#include <linux/gpio/consumer.h>
-#include <linux/slab.h>
+#include <linux/gpio/driver.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
#define GEN_74X164_NUMBER_GPIOS 8
@@ -116,10 +113,9 @@ static int gen_74x164_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- if (of_property_read_u32(spi->dev.of_node, "registers-number",
- &nregs)) {
- dev_err(&spi->dev,
- "Missing registers-number property in the DT.\n");
+ ret = device_property_read_u32(&spi->dev, "registers-number", &nregs);
+ if (ret) {
+ dev_err(&spi->dev, "Missing 'registers-number' property.\n");
return -EINVAL;
}
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
index 49616ec815ee..04247075091d 100644
--- a/drivers/gpio/gpio-74xx-mmio.c
+++ b/drivers/gpio/gpio-74xx-mmio.c
@@ -106,7 +106,6 @@ static int mmio_74xx_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
static int mmio_74xx_gpio_probe(struct platform_device *pdev)
{
struct mmio_74xx_gpio_priv *priv;
- struct resource *res;
void __iomem *dat;
int err;
@@ -116,8 +115,7 @@ static int mmio_74xx_gpio_probe(struct platform_device *pdev)
priv->flags = (uintptr_t)of_device_get_match_data(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dat = devm_ioremap_resource(&pdev->dev, res);
+ dat = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dat))
return PTR_ERR(dat);
diff --git a/drivers/gpio/gpio-amdpt.c b/drivers/gpio/gpio-amdpt.c
index 9b78dc837603..1ffd7c2d1285 100644
--- a/drivers/gpio/gpio-amdpt.c
+++ b/drivers/gpio/gpio-amdpt.c
@@ -78,7 +78,6 @@ static int pt_gpio_probe(struct platform_device *pdev)
struct acpi_device *acpi_dev;
acpi_handle handle = ACPI_HANDLE(dev);
struct pt_gpio_chip *pt_gpio;
- struct resource *res_mem;
int ret = 0;
if (acpi_bus_get_device(handle, &acpi_dev)) {
@@ -90,12 +89,7 @@ static int pt_gpio_probe(struct platform_device *pdev)
if (!pt_gpio)
return -ENOMEM;
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res_mem) {
- dev_err(&pdev->dev, "Failed to get MMIO resource for PT GPIO.\n");
- return -EINVAL;
- }
- pt_gpio->reg_base = devm_ioremap_resource(dev, res_mem);
+ pt_gpio->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pt_gpio->reg_base)) {
dev_err(&pdev->dev, "Failed to map MMIO resource for PT GPIO.\n");
return PTR_ERR(pt_gpio->reg_base);
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 217507002dbc..0f1b55c7c361 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -1156,15 +1156,13 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
{
const struct of_device_id *gpio_id;
struct aspeed_gpio *gpio;
- struct resource *res;
int rc, i, banks;
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
if (!gpio)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gpio->base = devm_ioremap_resource(&pdev->dev, res);
+ gpio->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpio->base))
return PTR_ERR(gpio->base);
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index c5536a509b59..9fa6d3a967d2 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -568,7 +568,6 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct of_device_id *match;
- struct resource *res;
struct bcm_kona_gpio_bank *bank;
struct bcm_kona_gpio *kona_gpio;
struct gpio_chip *chip;
@@ -618,8 +617,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
return -ENXIO;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- kona_gpio->reg_base = devm_ioremap_resource(dev, res);
+ kona_gpio->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(kona_gpio->reg_base)) {
ret = -ENXIO;
goto err_irq_domain;
diff --git a/drivers/gpio/gpio-cadence.c b/drivers/gpio/gpio-cadence.c
index aec8d5df9f30..712ae212b0b4 100644
--- a/drivers/gpio/gpio-cadence.c
+++ b/drivers/gpio/gpio-cadence.c
@@ -148,7 +148,6 @@ static struct irq_chip cdns_gpio_irqchip = {
static int cdns_gpio_probe(struct platform_device *pdev)
{
struct cdns_gpio_chip *cgpio;
- struct resource *res;
int ret, irq;
u32 dir_prev;
u32 num_gpios = 32;
@@ -157,8 +156,7 @@ static int cdns_gpio_probe(struct platform_device *pdev)
if (!cgpio)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cgpio->regs = devm_ioremap_resource(&pdev->dev, res);
+ cgpio->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cgpio->regs))
return PTR_ERR(cgpio->regs);
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c
index 52fd63f02134..0fbbb0edc0ba 100644
--- a/drivers/gpio/gpio-clps711x.c
+++ b/drivers/gpio/gpio-clps711x.c
@@ -19,7 +19,6 @@ static int clps711x_gpio_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
void __iomem *dat, *dir;
struct gpio_chip *gc;
- struct resource *res;
int err, id;
if (!np)
@@ -33,13 +32,11 @@ static int clps711x_gpio_probe(struct platform_device *pdev)
if (!gc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dat = devm_ioremap_resource(&pdev->dev, res);
+ dat = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dat))
return PTR_ERR(dat);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- dir = devm_ioremap_resource(&pdev->dev, res);
+ dir = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dir))
return PTR_ERR(dir);
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 84ae04402f70..d3eda65fd6d3 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -655,7 +655,6 @@ MODULE_DEVICE_TABLE(acpi, dwapb_acpi_match);
static int dwapb_gpio_probe(struct platform_device *pdev)
{
unsigned int i;
- struct resource *res;
struct dwapb_gpio *gpio;
int err;
struct device *dev = &pdev->dev;
@@ -688,8 +687,7 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
if (!gpio->ports)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gpio->regs = devm_ioremap_resource(&pdev->dev, res);
+ gpio->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpio->regs))
return PTR_ERR(gpio->regs);
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index 45fe125823a8..8ff8ce2970d9 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -225,7 +225,6 @@ static int ftgpio_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
static int ftgpio_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct ftgpio_gpio *g;
int irq;
int ret;
@@ -236,8 +235,7 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
g->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- g->base = devm_ioremap_resource(dev, res);
+ g->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(g->base))
return PTR_ERR(g->base);
diff --git a/drivers/gpio/gpio-hlwd.c b/drivers/gpio/gpio-hlwd.c
index a7b17897356e..e5fa00f8145f 100644
--- a/drivers/gpio/gpio-hlwd.c
+++ b/drivers/gpio/gpio-hlwd.c
@@ -208,7 +208,6 @@ static int hlwd_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
static int hlwd_gpio_probe(struct platform_device *pdev)
{
struct hlwd_gpio *hlwd;
- struct resource *regs_resource;
u32 ngpios;
int res;
@@ -216,8 +215,7 @@ static int hlwd_gpio_probe(struct platform_device *pdev)
if (!hlwd)
return -ENOMEM;
- regs_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hlwd->regs = devm_ioremap_resource(&pdev->dev, regs_resource);
+ hlwd->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hlwd->regs))
return PTR_ERR(hlwd->regs);
diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c
index 8d62db447ec1..11b77d868c89 100644
--- a/drivers/gpio/gpio-iop.c
+++ b/drivers/gpio/gpio-iop.c
@@ -21,7 +21,6 @@
static int iop3xx_gpio_probe(struct platform_device *pdev)
{
- struct resource *res;
struct gpio_chip *gc;
void __iomem *base;
int err;
@@ -30,8 +29,7 @@ static int iop3xx_gpio_probe(struct platform_device *pdev)
if (!gc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c
new file mode 100644
index 000000000000..4b1cf7ea858d
--- /dev/null
+++ b/drivers/gpio/gpio-ixp4xx.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// IXP4 GPIO driver
+// Copyright (C) 2019 Linus Walleij <linus.walleij@linaro.org>
+//
+// based on previous work and know-how from:
+// Deepak Saxena <dsaxena@plexity.net>
+
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+/* Include that go away with DT transition */
+#include <linux/irqchip/irq-ixp4xx.h>
+
+#include <asm/mach-types.h>
+
+#define IXP4XX_REG_GPOUT 0x00
+#define IXP4XX_REG_GPOE 0x04
+#define IXP4XX_REG_GPIN 0x08
+#define IXP4XX_REG_GPIS 0x0C
+#define IXP4XX_REG_GPIT1 0x10
+#define IXP4XX_REG_GPIT2 0x14
+#define IXP4XX_REG_GPCLK 0x18
+#define IXP4XX_REG_GPDBSEL 0x1C
+
+/*
+ * The hardware uses 3 bits to indicate interrupt "style".
+ * we clear and set these three bits accordingly. The lower 24
+ * bits in two registers (GPIT1 and GPIT2) are used to set up
+ * the style for 8 lines each for a total of 16 GPIO lines.
+ */
+#define IXP4XX_GPIO_STYLE_ACTIVE_HIGH 0x0
+#define IXP4XX_GPIO_STYLE_ACTIVE_LOW 0x1
+#define IXP4XX_GPIO_STYLE_RISING_EDGE 0x2
+#define IXP4XX_GPIO_STYLE_FALLING_EDGE 0x3
+#define IXP4XX_GPIO_STYLE_TRANSITIONAL 0x4
+#define IXP4XX_GPIO_STYLE_MASK GENMASK(2, 0)
+#define IXP4XX_GPIO_STYLE_SIZE 3
+
+/**
+ * struct ixp4xx_gpio - IXP4 GPIO state container
+ * @dev: containing device for this instance
+ * @fwnode: the fwnode for this GPIO chip
+ * @gc: gpiochip for this instance
+ * @domain: irqdomain for this chip instance
+ * @base: remapped I/O-memory base
+ * @irq_edge: Each bit represents an IRQ: 1: edge-triggered,
+ * 0: level triggered
+ */
+struct ixp4xx_gpio {
+ struct device *dev;
+ struct fwnode_handle *fwnode;
+ struct gpio_chip gc;
+ struct irq_domain *domain;
+ void __iomem *base;
+ unsigned long long irq_edge;
+};
+
+/**
+ * struct ixp4xx_gpio_map - IXP4 GPIO to parent IRQ map
+ * @gpio_offset: offset of the IXP4 GPIO line
+ * @parent_hwirq: hwirq on the parent IRQ controller
+ */
+struct ixp4xx_gpio_map {
+ int gpio_offset;
+ int parent_hwirq;
+};
+
+/* GPIO lines 0..12 have corresponding IRQs, GPIOs 13..15 have no IRQs */
+const struct ixp4xx_gpio_map ixp4xx_gpiomap[] = {
+ { .gpio_offset = 0, .parent_hwirq = 6 },
+ { .gpio_offset = 1, .parent_hwirq = 7 },
+ { .gpio_offset = 2, .parent_hwirq = 19 },
+ { .gpio_offset = 3, .parent_hwirq = 20 },
+ { .gpio_offset = 4, .parent_hwirq = 21 },
+ { .gpio_offset = 5, .parent_hwirq = 22 },
+ { .gpio_offset = 6, .parent_hwirq = 23 },
+ { .gpio_offset = 7, .parent_hwirq = 24 },
+ { .gpio_offset = 8, .parent_hwirq = 25 },
+ { .gpio_offset = 9, .parent_hwirq = 26 },
+ { .gpio_offset = 10, .parent_hwirq = 27 },
+ { .gpio_offset = 11, .parent_hwirq = 28 },
+ { .gpio_offset = 12, .parent_hwirq = 29 },
+};
+
+static void ixp4xx_gpio_irq_ack(struct irq_data *d)
+{
+ struct ixp4xx_gpio *g = irq_data_get_irq_chip_data(d);
+
+ __raw_writel(BIT(d->hwirq), g->base + IXP4XX_REG_GPIS);
+}
+
+static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
+{
+ struct ixp4xx_gpio *g = irq_data_get_irq_chip_data(d);
+
+ /* ACK when unmasking if not edge-triggered */
+ if (!(g->irq_edge & BIT(d->hwirq)))
+ ixp4xx_gpio_irq_ack(d);
+
+ irq_chip_unmask_parent(d);
+}
+
+static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct ixp4xx_gpio *g = irq_data_get_irq_chip_data(d);
+ int line = d->hwirq;
+ unsigned long flags;
+ u32 int_style;
+ u32 int_reg;
+ u32 val;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ irq_set_handler_locked(d, handle_edge_irq);
+ int_style = IXP4XX_GPIO_STYLE_TRANSITIONAL;
+ g->irq_edge |= BIT(d->hwirq);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ irq_set_handler_locked(d, handle_edge_irq);
+ int_style = IXP4XX_GPIO_STYLE_RISING_EDGE;
+ g->irq_edge |= BIT(d->hwirq);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ irq_set_handler_locked(d, handle_edge_irq);
+ int_style = IXP4XX_GPIO_STYLE_FALLING_EDGE;
+ g->irq_edge |= BIT(d->hwirq);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ irq_set_handler_locked(d, handle_level_irq);
+ int_style = IXP4XX_GPIO_STYLE_ACTIVE_HIGH;
+ g->irq_edge &= ~BIT(d->hwirq);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_set_handler_locked(d, handle_level_irq);
+ int_style = IXP4XX_GPIO_STYLE_ACTIVE_LOW;
+ g->irq_edge &= ~BIT(d->hwirq);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (line >= 8) {
+ /* pins 8-15 */
+ line -= 8;
+ int_reg = IXP4XX_REG_GPIT2;
+ } else {
+ /* pins 0-7 */
+ int_reg = IXP4XX_REG_GPIT1;
+ }
+
+ spin_lock_irqsave(&g->gc.bgpio_lock, flags);
+
+ /* Clear the style for the appropriate pin */
+ val = __raw_readl(g->base + int_reg);
+ val &= ~(IXP4XX_GPIO_STYLE_MASK << (line * IXP4XX_GPIO_STYLE_SIZE));
+ __raw_writel(val, g->base + int_reg);
+
+ __raw_writel(BIT(line), g->base + IXP4XX_REG_GPIS);
+
+ /* Set the new style */
+ val = __raw_readl(g->base + int_reg);
+ val |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE));
+ __raw_writel(val, g->base + int_reg);
+
+ /* Force-configure this line as an input */
+ val = __raw_readl(g->base + IXP4XX_REG_GPOE);
+ val |= BIT(d->hwirq);
+ __raw_writel(val, g->base + IXP4XX_REG_GPOE);
+
+ spin_unlock_irqrestore(&g->gc.bgpio_lock, flags);
+
+ /* This parent only accept level high (asserted) */
+ return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
+}
+
+static struct irq_chip ixp4xx_gpio_irqchip = {
+ .name = "IXP4GPIO",
+ .irq_ack = ixp4xx_gpio_irq_ack,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = ixp4xx_gpio_irq_unmask,
+ .irq_set_type = ixp4xx_gpio_irq_set_type,
+};
+
+static int ixp4xx_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
+{
+ struct ixp4xx_gpio *g = gpiochip_get_data(gc);
+ struct irq_fwspec fwspec;
+
+ fwspec.fwnode = g->fwnode;
+ fwspec.param_count = 2;
+ fwspec.param[0] = offset;
+ fwspec.param[1] = IRQ_TYPE_NONE;
+
+ return irq_create_fwspec_mapping(&fwspec);
+}
+
+static int ixp4xx_gpio_irq_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+
+ /* We support standard DT translation */
+ if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ return 0;
+ }
+
+ /* This goes away when we transition to DT */
+ if (is_fwnode_irqchip(fwspec->fwnode)) {
+ if (fwspec->param_count != 2)
+ return -EINVAL;
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ WARN_ON(*type == IRQ_TYPE_NONE);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int ixp4xx_gpio_irq_domain_alloc(struct irq_domain *d,
+ unsigned int irq, unsigned int nr_irqs,
+ void *data)
+{
+ struct ixp4xx_gpio *g = d->host_data;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ struct irq_fwspec *fwspec = data;
+ int ret;
+ int i;
+
+ ret = ixp4xx_gpio_irq_domain_translate(d, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ dev_dbg(g->dev, "allocate IRQ %d..%d, hwirq %lu..%lu\n",
+ irq, irq + nr_irqs - 1,
+ hwirq, hwirq + nr_irqs - 1);
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_fwspec parent_fwspec;
+ const struct ixp4xx_gpio_map *map;
+ int j;
+
+ /* Not all lines support IRQs */
+ for (j = 0; j < ARRAY_SIZE(ixp4xx_gpiomap); j++) {
+ map = &ixp4xx_gpiomap[j];
+ if (map->gpio_offset == hwirq)
+ break;
+ }
+ if (j == ARRAY_SIZE(ixp4xx_gpiomap)) {
+ dev_err(g->dev, "can't look up hwirq %lu\n", hwirq);
+ return -EINVAL;
+ }
+ dev_dbg(g->dev, "found parent hwirq %u\n", map->parent_hwirq);
+
+ /*
+ * We set handle_bad_irq because the .set_type() should
+ * always be invoked and set the right type of handler.
+ */
+ irq_domain_set_info(d,
+ irq + i,
+ hwirq + i,
+ &ixp4xx_gpio_irqchip,
+ g,
+ handle_bad_irq,
+ NULL, NULL);
+ irq_set_probe(irq + i);
+
+ /*
+ * Create a IRQ fwspec to send up to the parent irqdomain:
+ * specify the hwirq we address on the parent and tie it
+ * all together up the chain.
+ */
+ parent_fwspec.fwnode = d->parent->fwnode;
+ parent_fwspec.param_count = 2;
+ parent_fwspec.param[0] = map->parent_hwirq;
+ /* This parent only handles asserted level IRQs */
+ parent_fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
+ dev_dbg(g->dev, "alloc_irqs_parent for %d parent hwirq %d\n",
+ irq + i, map->parent_hwirq);
+ ret = irq_domain_alloc_irqs_parent(d, irq + i, 1,
+ &parent_fwspec);
+ if (ret)
+ dev_err(g->dev,
+ "failed to allocate parent hwirq %d for hwirq %lu\n",
+ map->parent_hwirq, hwirq);
+ }
+
+ return 0;
+}
+
+static const struct irq_domain_ops ixp4xx_gpio_irqdomain_ops = {
+ .translate = ixp4xx_gpio_irq_domain_translate,
+ .alloc = ixp4xx_gpio_irq_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int ixp4xx_gpio_probe(struct platform_device *pdev)
+{
+ unsigned long flags;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct irq_domain *parent;
+ struct resource *res;
+ struct ixp4xx_gpio *g;
+ int ret;
+ int i;
+
+ g = devm_kzalloc(dev, sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return -ENOMEM;
+ g->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ g->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(g->base)) {
+ dev_err(dev, "ioremap error\n");
+ return PTR_ERR(g->base);
+ }
+
+ /*
+ * Make sure GPIO 14 and 15 are NOT used as clocks but GPIO on
+ * specific machines.
+ */
+ if (machine_is_dsmg600() || machine_is_nas100d())
+ __raw_writel(0x0, g->base + IXP4XX_REG_GPCLK);
+
+ /*
+ * This is a very special big-endian ARM issue: when the IXP4xx is
+ * run in big endian mode, all registers in the machine are switched
+ * around to the CPU-native endianness. As you see mostly in the
+ * driver we use __raw_readl()/__raw_writel() to access the registers
+ * in the appropriate order. With the GPIO library we need to specify
+ * byte order explicitly, so this flag needs to be set when compiling
+ * for big endian.
+ */
+#if defined(CONFIG_CPU_BIG_ENDIAN)
+ flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
+#else
+ flags = 0;
+#endif
+
+ /* Populate and register gpio chip */
+ ret = bgpio_init(&g->gc, dev, 4,
+ g->base + IXP4XX_REG_GPIN,
+ g->base + IXP4XX_REG_GPOUT,
+ NULL,
+ NULL,
+ g->base + IXP4XX_REG_GPOE,
+ flags);
+ if (ret) {
+ dev_err(dev, "unable to init generic GPIO\n");
+ return ret;
+ }
+ g->gc.to_irq = ixp4xx_gpio_to_irq;
+ g->gc.ngpio = 16;
+ g->gc.label = "IXP4XX_GPIO_CHIP";
+ /*
+ * TODO: when we have migrated to device tree and all GPIOs
+ * are fetched using phandles, set this to -1 to get rid of
+ * the fixed gpiochip base.
+ */
+ g->gc.base = 0;
+ g->gc.parent = &pdev->dev;
+ g->gc.owner = THIS_MODULE;
+
+ ret = devm_gpiochip_add_data(dev, &g->gc, g);
+ if (ret) {
+ dev_err(dev, "failed to add SoC gpiochip\n");
+ return ret;
+ }
+
+ /*
+ * When we convert to device tree we will simply look up the
+ * parent irqdomain using irq_find_host(parent) as parent comes
+ * from IRQCHIP_DECLARE(), then use of_node_to_fwnode() to get
+ * the fwnode. For now we need this boardfile style code.
+ */
+ if (np) {
+ struct device_node *irq_parent;
+
+ irq_parent = of_irq_find_parent(np);
+ if (!irq_parent) {
+ dev_err(dev, "no IRQ parent node\n");
+ return -ENODEV;
+ }
+ parent = irq_find_host(irq_parent);
+ if (!parent) {
+ dev_err(dev, "no IRQ parent domain\n");
+ return -ENODEV;
+ }
+ g->fwnode = of_node_to_fwnode(np);
+ } else {
+ parent = ixp4xx_get_irq_domain();
+ g->fwnode = irq_domain_alloc_fwnode(g->base);
+ if (!g->fwnode) {
+ dev_err(dev, "no domain base\n");
+ return -ENODEV;
+ }
+ }
+ g->domain = irq_domain_create_hierarchy(parent,
+ IRQ_DOMAIN_FLAG_HIERARCHY,
+ ARRAY_SIZE(ixp4xx_gpiomap),
+ g->fwnode,
+ &ixp4xx_gpio_irqdomain_ops,
+ g);
+ if (!g->domain) {
+ irq_domain_free_fwnode(g->fwnode);
+ dev_err(dev, "no hierarchical irq domain\n");
+ return ret;
+ }
+
+ /*
+ * After adding OF support, this is no longer needed: irqs
+ * will be allocated for the respective fwnodes.
+ */
+ if (!np) {
+ for (i = 0; i < ARRAY_SIZE(ixp4xx_gpiomap); i++) {
+ const struct ixp4xx_gpio_map *map = &ixp4xx_gpiomap[i];
+ struct irq_fwspec fwspec;
+
+ fwspec.fwnode = g->fwnode;
+ /* This is the hwirq for the GPIO line side of things */
+ fwspec.param[0] = map->gpio_offset;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ fwspec.param_count = 2;
+ ret = __irq_domain_alloc_irqs(g->domain,
+ -1, /* just pick something */
+ 1,
+ NUMA_NO_NODE,
+ &fwspec,
+ false,
+ NULL);
+ if (ret < 0) {
+ irq_domain_free_fwnode(g->fwnode);
+ dev_err(dev,
+ "can not allocate irq for GPIO line %d parent hwirq %d in hierarchy domain: %d\n",
+ map->gpio_offset, map->parent_hwirq,
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ platform_set_drvdata(pdev, g);
+ dev_info(dev, "IXP4 GPIO @%p registered\n", g->base);
+
+ return 0;
+}
+
+static const struct of_device_id ixp4xx_gpio_of_match[] = {
+ {
+ .compatible = "intel,ixp4xx-gpio",
+ },
+ {},
+};
+
+
+static struct platform_driver ixp4xx_gpio_driver = {
+ .driver = {
+ .name = "ixp4xx-gpio",
+ .of_match_table = of_match_ptr(ixp4xx_gpio_of_match),
+ },
+ .probe = ixp4xx_gpio_probe,
+};
+builtin_platform_driver(ixp4xx_gpio_driver);
diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index 6b9bf8b7bf16..b97a91166497 100644
--- a/drivers/gpio/gpio-janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
@@ -147,7 +147,6 @@ static int ttl_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct ttl_module *mod;
struct gpio_chip *gpio;
- struct resource *res;
int ret;
pdata = dev_get_platdata(&pdev->dev);
@@ -164,8 +163,7 @@ static int ttl_probe(struct platform_device *pdev)
spin_lock_init(&mod->lock);
/* get access to the MODULbus registers for this module */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mod->regs = devm_ioremap_resource(dev, res);
+ mod->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mod->regs))
return PTR_ERR(mod->regs);
diff --git a/drivers/gpio/gpio-loongson1.c b/drivers/gpio/gpio-loongson1.c
index fca84ccac35c..1b1ee94eeab4 100644
--- a/drivers/gpio/gpio-loongson1.c
+++ b/drivers/gpio/gpio-loongson1.c
@@ -47,15 +47,13 @@ static int ls1x_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct gpio_chip *gc;
- struct resource *res;
int ret;
gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
if (!gc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gpio_reg_base = devm_ioremap_resource(dev, res);
+ gpio_reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpio_reg_base))
return PTR_ERR(gpio_reg_base);
diff --git a/drivers/gpio/gpio-lpc18xx.c b/drivers/gpio/gpio-lpc18xx.c
index d441dbaed7a3..d711ae06747e 100644
--- a/drivers/gpio/gpio-lpc18xx.c
+++ b/drivers/gpio/gpio-lpc18xx.c
@@ -340,10 +340,7 @@ static int lpc18xx_gpio_probe(struct platform_device *pdev)
index = of_property_match_string(dev->of_node, "reg-names", "gpio");
if (index < 0) {
/* To support backward compatibility take the first resource */
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gc->base = devm_ioremap_resource(dev, res);
+ gc->base = devm_platform_ioremap_resource(pdev, 0);
} else {
struct resource res;
diff --git a/drivers/gpio/gpio-max77650.c b/drivers/gpio/gpio-max77650.c
new file mode 100644
index 000000000000..3f03f4e8956c
--- /dev/null
+++ b/drivers/gpio/gpio-max77650.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2018 BayLibre SAS
+// Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+//
+// GPIO driver for MAXIM 77650/77651 charger/power-supply.
+
+#include <linux/gpio/driver.h>
+#include <linux/i2c.h>
+#include <linux/mfd/max77650.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define MAX77650_GPIO_DIR_MASK BIT(0)
+#define MAX77650_GPIO_INVAL_MASK BIT(1)
+#define MAX77650_GPIO_DRV_MASK BIT(2)
+#define MAX77650_GPIO_OUTVAL_MASK BIT(3)
+#define MAX77650_GPIO_DEBOUNCE_MASK BIT(4)
+
+#define MAX77650_GPIO_DIR_OUT 0x00
+#define MAX77650_GPIO_DIR_IN BIT(0)
+#define MAX77650_GPIO_OUT_LOW 0x00
+#define MAX77650_GPIO_OUT_HIGH BIT(3)
+#define MAX77650_GPIO_DRV_OPEN_DRAIN 0x00
+#define MAX77650_GPIO_DRV_PUSH_PULL BIT(2)
+#define MAX77650_GPIO_DEBOUNCE BIT(4)
+
+#define MAX77650_GPIO_DIR_BITS(_reg) \
+ ((_reg) & MAX77650_GPIO_DIR_MASK)
+#define MAX77650_GPIO_INVAL_BITS(_reg) \
+ (((_reg) & MAX77650_GPIO_INVAL_MASK) >> 1)
+
+struct max77650_gpio_chip {
+ struct regmap *map;
+ struct gpio_chip gc;
+ int irq;
+};
+
+static int max77650_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
+
+ return regmap_update_bits(chip->map,
+ MAX77650_REG_CNFG_GPIO,
+ MAX77650_GPIO_DIR_MASK,
+ MAX77650_GPIO_DIR_IN);
+}
+
+static int max77650_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
+ int mask, regval;
+
+ mask = MAX77650_GPIO_DIR_MASK | MAX77650_GPIO_OUTVAL_MASK;
+ regval = value ? MAX77650_GPIO_OUT_HIGH : MAX77650_GPIO_OUT_LOW;
+ regval |= MAX77650_GPIO_DIR_OUT;
+
+ return regmap_update_bits(chip->map,
+ MAX77650_REG_CNFG_GPIO, mask, regval);
+}
+
+static void max77650_gpio_set_value(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
+ int rv, regval;
+
+ regval = value ? MAX77650_GPIO_OUT_HIGH : MAX77650_GPIO_OUT_LOW;
+
+ rv = regmap_update_bits(chip->map, MAX77650_REG_CNFG_GPIO,
+ MAX77650_GPIO_OUTVAL_MASK, regval);
+ if (rv)
+ dev_err(gc->parent, "cannot set GPIO value: %d\n", rv);
+}
+
+static int max77650_gpio_get_value(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
+ unsigned int val;
+ int rv;
+
+ rv = regmap_read(chip->map, MAX77650_REG_CNFG_GPIO, &val);
+ if (rv)
+ return rv;
+
+ return MAX77650_GPIO_INVAL_BITS(val);
+}
+
+static int max77650_gpio_get_direction(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
+ unsigned int val;
+ int rv;
+
+ rv = regmap_read(chip->map, MAX77650_REG_CNFG_GPIO, &val);
+ if (rv)
+ return rv;
+
+ return MAX77650_GPIO_DIR_BITS(val);
+}
+
+static int max77650_gpio_set_config(struct gpio_chip *gc,
+ unsigned int offset, unsigned long cfg)
+{
+ struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
+
+ switch (pinconf_to_config_param(cfg)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ return regmap_update_bits(chip->map,
+ MAX77650_REG_CNFG_GPIO,
+ MAX77650_GPIO_DRV_MASK,
+ MAX77650_GPIO_DRV_OPEN_DRAIN);
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ return regmap_update_bits(chip->map,
+ MAX77650_REG_CNFG_GPIO,
+ MAX77650_GPIO_DRV_MASK,
+ MAX77650_GPIO_DRV_PUSH_PULL);
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ return regmap_update_bits(chip->map,
+ MAX77650_REG_CNFG_GPIO,
+ MAX77650_GPIO_DEBOUNCE_MASK,
+ MAX77650_GPIO_DEBOUNCE);
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int max77650_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
+{
+ struct max77650_gpio_chip *chip = gpiochip_get_data(gc);
+
+ return chip->irq;
+}
+
+static int max77650_gpio_probe(struct platform_device *pdev)
+{
+ struct max77650_gpio_chip *chip;
+ struct device *dev, *parent;
+ struct i2c_client *i2c;
+
+ dev = &pdev->dev;
+ parent = dev->parent;
+ i2c = to_i2c_client(parent);
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->map = dev_get_regmap(parent, NULL);
+ if (!chip->map)
+ return -ENODEV;
+
+ chip->irq = platform_get_irq_byname(pdev, "GPI");
+ if (chip->irq < 0)
+ return chip->irq;
+
+ chip->gc.base = -1;
+ chip->gc.ngpio = 1;
+ chip->gc.label = i2c->name;
+ chip->gc.parent = dev;
+ chip->gc.owner = THIS_MODULE;
+ chip->gc.can_sleep = true;
+
+ chip->gc.direction_input = max77650_gpio_direction_input;
+ chip->gc.direction_output = max77650_gpio_direction_output;
+ chip->gc.set = max77650_gpio_set_value;
+ chip->gc.get = max77650_gpio_get_value;
+ chip->gc.get_direction = max77650_gpio_get_direction;
+ chip->gc.set_config = max77650_gpio_set_config;
+ chip->gc.to_irq = max77650_gpio_to_irq;
+
+ return devm_gpiochip_add_data(dev, &chip->gc, chip);
+}
+
+static struct platform_driver max77650_gpio_driver = {
+ .driver = {
+ .name = "max77650-gpio",
+ },
+ .probe = max77650_gpio_probe,
+};
+module_platform_driver(max77650_gpio_driver);
+
+MODULE_DESCRIPTION("MAXIM 77650/77651 GPIO driver");
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index 3134c0d2bfe4..9308081e0a4a 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -146,7 +146,6 @@ static void mb86s70_gpio_set(struct gpio_chip *gc, unsigned gpio, int value)
static int mb86s70_gpio_probe(struct platform_device *pdev)
{
struct mb86s70_gpio_chip *gchip;
- struct resource *res;
int ret;
gchip = devm_kzalloc(&pdev->dev, sizeof(*gchip), GFP_KERNEL);
@@ -155,8 +154,7 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gchip);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gchip->base = devm_ioremap_resource(&pdev->dev, res);
+ gchip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gchip->base))
return PTR_ERR(gchip->base);
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index 7c659fdaa6d5..3302125e5265 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -377,10 +377,20 @@ static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
}
}
-static const char *mrfld_gpio_get_pinctrl_dev_name(void)
+static const char *mrfld_gpio_get_pinctrl_dev_name(struct mrfld_gpio *priv)
{
- const char *dev_name = acpi_dev_get_first_match_name("INTC1002", NULL, -1);
- return dev_name ? dev_name : "pinctrl-merrifield";
+ struct acpi_device *adev;
+ const char *name;
+
+ adev = acpi_dev_get_first_match_dev("INTC1002", NULL, -1);
+ if (adev) {
+ name = devm_kstrdup(priv->dev, acpi_dev_name(adev), GFP_KERNEL);
+ acpi_dev_put(adev);
+ } else {
+ name = "pinctrl-merrifield";
+ }
+
+ return name;
}
static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -441,7 +451,7 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
return retval;
}
- pinctrl_dev_name = mrfld_gpio_get_pinctrl_dev_name();
+ pinctrl_dev_name = mrfld_gpio_get_pinctrl_dev_name(priv);
for (i = 0; i < ARRAY_SIZE(mrfld_gpio_ranges); i++) {
range = &mrfld_gpio_ranges[i];
retval = gpiochip_add_pin_range(&priv->chip,
diff --git a/drivers/gpio/gpio-mlxbf.c b/drivers/gpio/gpio-mlxbf.c
new file mode 100644
index 000000000000..894aaf55fc96
--- /dev/null
+++ b/drivers/gpio/gpio-mlxbf.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+/* Number of pins on BlueField */
+#define MLXBF_GPIO_NR 54
+
+/* Pad Electrical Controls. */
+#define MLXBF_GPIO_PAD_CONTROL_FIRST_WORD 0x0700
+#define MLXBF_GPIO_PAD_CONTROL_1_FIRST_WORD 0x0708
+#define MLXBF_GPIO_PAD_CONTROL_2_FIRST_WORD 0x0710
+#define MLXBF_GPIO_PAD_CONTROL_3_FIRST_WORD 0x0718
+
+#define MLXBF_GPIO_PIN_DIR_I 0x1040
+#define MLXBF_GPIO_PIN_DIR_O 0x1048
+#define MLXBF_GPIO_PIN_STATE 0x1000
+#define MLXBF_GPIO_SCRATCHPAD 0x20
+
+#ifdef CONFIG_PM
+struct mlxbf_gpio_context_save_regs {
+ u64 scratchpad;
+ u64 pad_control[MLXBF_GPIO_NR];
+ u64 pin_dir_i;
+ u64 pin_dir_o;
+};
+#endif
+
+/* Device state structure. */
+struct mlxbf_gpio_state {
+ struct gpio_chip gc;
+
+ /* Memory Address */
+ void __iomem *base;
+
+#ifdef CONFIG_PM
+ struct mlxbf_gpio_context_save_regs csave_regs;
+#endif
+};
+
+static int mlxbf_gpio_probe(struct platform_device *pdev)
+{
+ struct mlxbf_gpio_state *gs;
+ struct device *dev = &pdev->dev;
+ struct gpio_chip *gc;
+ int ret;
+
+ gs = devm_kzalloc(&pdev->dev, sizeof(*gs), GFP_KERNEL);
+ if (!gs)
+ return -ENOMEM;
+
+ gs->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(gs->base))
+ return PTR_ERR(gs->base);
+
+ gc = &gs->gc;
+ ret = bgpio_init(gc, dev, 8,
+ gs->base + MLXBF_GPIO_PIN_STATE,
+ NULL,
+ NULL,
+ gs->base + MLXBF_GPIO_PIN_DIR_O,
+ gs->base + MLXBF_GPIO_PIN_DIR_I,
+ 0);
+ if (ret)
+ return -ENODEV;
+
+ gc->owner = THIS_MODULE;
+ gc->ngpio = MLXBF_GPIO_NR;
+
+ ret = devm_gpiochip_add_data(dev, &gs->gc, gs);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed adding memory mapped gpiochip\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, gs);
+ dev_info(&pdev->dev, "registered Mellanox BlueField GPIO");
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mlxbf_gpio_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct mlxbf_gpio_state *gs = platform_get_drvdata(pdev);
+
+ gs->csave_regs.scratchpad = readq(gs->base + MLXBF_GPIO_SCRATCHPAD);
+ gs->csave_regs.pad_control[0] =
+ readq(gs->base + MLXBF_GPIO_PAD_CONTROL_FIRST_WORD);
+ gs->csave_regs.pad_control[1] =
+ readq(gs->base + MLXBF_GPIO_PAD_CONTROL_1_FIRST_WORD);
+ gs->csave_regs.pad_control[2] =
+ readq(gs->base + MLXBF_GPIO_PAD_CONTROL_2_FIRST_WORD);
+ gs->csave_regs.pad_control[3] =
+ readq(gs->base + MLXBF_GPIO_PAD_CONTROL_3_FIRST_WORD);
+ gs->csave_regs.pin_dir_i = readq(gs->base + MLXBF_GPIO_PIN_DIR_I);
+ gs->csave_regs.pin_dir_o = readq(gs->base + MLXBF_GPIO_PIN_DIR_O);
+
+ return 0;
+}
+
+static int mlxbf_gpio_resume(struct platform_device *pdev)
+{
+ struct mlxbf_gpio_state *gs = platform_get_drvdata(pdev);
+
+ writeq(gs->csave_regs.scratchpad, gs->base + MLXBF_GPIO_SCRATCHPAD);
+ writeq(gs->csave_regs.pad_control[0],
+ gs->base + MLXBF_GPIO_PAD_CONTROL_FIRST_WORD);
+ writeq(gs->csave_regs.pad_control[1],
+ gs->base + MLXBF_GPIO_PAD_CONTROL_1_FIRST_WORD);
+ writeq(gs->csave_regs.pad_control[2],
+ gs->base + MLXBF_GPIO_PAD_CONTROL_2_FIRST_WORD);
+ writeq(gs->csave_regs.pad_control[3],
+ gs->base + MLXBF_GPIO_PAD_CONTROL_3_FIRST_WORD);
+ writeq(gs->csave_regs.pin_dir_i, gs->base + MLXBF_GPIO_PIN_DIR_I);
+ writeq(gs->csave_regs.pin_dir_o, gs->base + MLXBF_GPIO_PIN_DIR_O);
+
+ return 0;
+}
+#endif
+
+static const struct acpi_device_id mlxbf_gpio_acpi_match[] = {
+ { "MLNXBF02", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, mlxbf_gpio_acpi_match);
+
+static struct platform_driver mlxbf_gpio_driver = {
+ .driver = {
+ .name = "mlxbf_gpio",
+ .acpi_match_table = ACPI_PTR(mlxbf_gpio_acpi_match),
+ },
+ .probe = mlxbf_gpio_probe,
+#ifdef CONFIG_PM
+ .suspend = mlxbf_gpio_suspend,
+ .resume = mlxbf_gpio_resume,
+#endif
+};
+
+module_platform_driver(mlxbf_gpio_driver);
+
+MODULE_DESCRIPTION("Mellanox BlueField GPIO Driver");
+MODULE_AUTHOR("Mellanox Technologies");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index 50bdc29591c0..6f904c874678 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -134,17 +134,6 @@ static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio)
unsigned long pinmask = bgpio_line2mask(gc, gpio);
bool dir = !!(gc->bgpio_dir & pinmask);
- /*
- * If the direction is OUT we read the value from the SET
- * register, and if the direction is IN we read the value
- * from the DAT register.
- *
- * If the direction bits are inverted, naturally this gets
- * inverted too.
- */
- if (gc->bgpio_dir_inverted)
- dir = !dir;
-
if (dir)
return !!(gc->read_reg(gc->reg_set) & pinmask);
else
@@ -164,14 +153,8 @@ static int bgpio_get_set_multiple(struct gpio_chip *gc, unsigned long *mask,
/* Make sure we first clear any bits that are zero when we read the register */
*bits &= ~*mask;
- /* Exploit the fact that we know which directions are set */
- if (gc->bgpio_dir_inverted) {
- set_mask = *mask & ~gc->bgpio_dir;
- get_mask = *mask & gc->bgpio_dir;
- } else {
- set_mask = *mask & gc->bgpio_dir;
- get_mask = *mask & ~gc->bgpio_dir;
- }
+ set_mask = *mask & gc->bgpio_dir;
+ get_mask = *mask & ~gc->bgpio_dir;
if (set_mask)
*bits |= gc->read_reg(gc->reg_set) & set_mask;
@@ -372,11 +355,12 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
spin_lock_irqsave(&gc->bgpio_lock, flags);
- if (gc->bgpio_dir_inverted)
- gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
- else
- gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
- gc->write_reg(gc->reg_dir, gc->bgpio_dir);
+ gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
+
+ if (gc->reg_dir_in)
+ gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir);
+ if (gc->reg_dir_out)
+ gc->write_reg(gc->reg_dir_out, gc->bgpio_dir);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -385,11 +369,16 @@ static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
{
- /* Return 0 if output, 1 of input */
- if (gc->bgpio_dir_inverted)
- return !!(gc->read_reg(gc->reg_dir) & bgpio_line2mask(gc, gpio));
- else
- return !(gc->read_reg(gc->reg_dir) & bgpio_line2mask(gc, gpio));
+ /* Return 0 if output, 1 if input */
+ if (gc->bgpio_dir_unreadable)
+ return !(gc->bgpio_dir & bgpio_line2mask(gc, gpio));
+ if (gc->reg_dir_out)
+ return !(gc->read_reg(gc->reg_dir_out) & bgpio_line2mask(gc, gpio));
+ if (gc->reg_dir_in)
+ return !!(gc->read_reg(gc->reg_dir_in) & bgpio_line2mask(gc, gpio));
+
+ /* This should not happen */
+ return 1;
}
static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
@@ -400,11 +389,12 @@ static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
spin_lock_irqsave(&gc->bgpio_lock, flags);
- if (gc->bgpio_dir_inverted)
- gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
- else
- gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
- gc->write_reg(gc->reg_dir, gc->bgpio_dir);
+ gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
+
+ if (gc->reg_dir_in)
+ gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir);
+ if (gc->reg_dir_out)
+ gc->write_reg(gc->reg_dir_out, gc->bgpio_dir);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
@@ -537,19 +527,12 @@ static int bgpio_setup_direction(struct gpio_chip *gc,
void __iomem *dirin,
unsigned long flags)
{
- if (dirout && dirin) {
- return -EINVAL;
- } else if (dirout) {
- gc->reg_dir = dirout;
- gc->direction_output = bgpio_dir_out;
- gc->direction_input = bgpio_dir_in;
- gc->get_direction = bgpio_get_dir;
- } else if (dirin) {
- gc->reg_dir = dirin;
+ if (dirout || dirin) {
+ gc->reg_dir_out = dirout;
+ gc->reg_dir_in = dirin;
gc->direction_output = bgpio_dir_out;
gc->direction_input = bgpio_dir_in;
gc->get_direction = bgpio_get_dir;
- gc->bgpio_dir_inverted = true;
} else {
if (flags & BGPIOF_NO_OUTPUT)
gc->direction_output = bgpio_dir_out_err;
@@ -588,11 +571,11 @@ static int bgpio_request(struct gpio_chip *chip, unsigned gpio_pin)
* @dirout: MMIO address for the register to set the line as OUTPUT. It is assumed
* that setting a line to 1 in this register will turn that line into an
* output line. Conversely, setting the line to 0 will turn that line into
- * an input. Either this or @dirin can be defined, but never both.
+ * an input.
* @dirin: MMIO address for the register to set this line as INPUT. It is assumed
* that setting a line to 1 in this register will turn that line into an
* input line. Conversely, setting the line to 0 will turn that line into
- * an output. Either this or @dirout can be defined, but never both.
+ * an output.
* @flags: Different flags that will affect the behaviour of the device, such as
* endianness etc.
*/
@@ -634,8 +617,28 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
if (gc->set == bgpio_set_set &&
!(flags & BGPIOF_UNREADABLE_REG_SET))
gc->bgpio_data = gc->read_reg(gc->reg_set);
- if (gc->reg_dir && !(flags & BGPIOF_UNREADABLE_REG_DIR))
- gc->bgpio_dir = gc->read_reg(gc->reg_dir);
+
+ if (flags & BGPIOF_UNREADABLE_REG_DIR)
+ gc->bgpio_dir_unreadable = true;
+
+ /*
+ * Inspect hardware to find initial direction setting.
+ */
+ if ((gc->reg_dir_out || gc->reg_dir_in) &&
+ !(flags & BGPIOF_UNREADABLE_REG_DIR)) {
+ if (gc->reg_dir_out)
+ gc->bgpio_dir = gc->read_reg(gc->reg_dir_out);
+ else if (gc->reg_dir_in)
+ gc->bgpio_dir = ~gc->read_reg(gc->reg_dir_in);
+ /*
+ * If we have two direction registers, synchronise
+ * input setting to output setting, the library
+ * can not handle a line being input and output at
+ * the same time.
+ */
+ if (gc->reg_dir_out && gc->reg_dir_in)
+ gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir);
+ }
return ret;
}
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index 74401e0adb29..79654fb2e50f 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -293,7 +293,6 @@ mediatek_gpio_bank_probe(struct device *dev,
static int
mediatek_gpio_probe(struct platform_device *pdev)
{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct mtk *mtk;
@@ -304,7 +303,7 @@ mediatek_gpio_probe(struct platform_device *pdev)
if (!mtk)
return -ENOMEM;
- mtk->base = devm_ioremap_resource(dev, res);
+ mtk->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mtk->base))
return PTR_ERR(mtk->base);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index f97ed32b8beb..059094ac44cb 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -1038,11 +1038,9 @@ static const struct regmap_config mvebu_gpio_regmap_config = {
static int mvebu_gpio_probe_raw(struct platform_device *pdev,
struct mvebu_gpio_chip *mvchip)
{
- struct resource *res;
void __iomem *base;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -1062,8 +1060,7 @@ static int mvebu_gpio_probe_raw(struct platform_device *pdev,
* per-CPU registers
*/
if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_ARMADAXP) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index e86e61dda4b7..b2813580c582 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -411,7 +411,6 @@ static int mxc_gpio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mxc_gpio_port *port;
- struct resource *iores;
int irq_base;
int err;
@@ -423,8 +422,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
port->dev = &pdev->dev;
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- port->base = devm_ioremap_resource(&pdev->dev, iores);
+ port->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(port->base))
return PTR_ERR(port->base);
diff --git a/drivers/gpio/gpio-octeon.c b/drivers/gpio/gpio-octeon.c
index 1b19c88ea7bb..afb0e8a791e5 100644
--- a/drivers/gpio/gpio-octeon.c
+++ b/drivers/gpio/gpio-octeon.c
@@ -82,7 +82,6 @@ static int octeon_gpio_probe(struct platform_device *pdev)
{
struct octeon_gpio *gpio;
struct gpio_chip *chip;
- struct resource *res_mem;
void __iomem *reg_base;
int err = 0;
@@ -91,8 +90,7 @@ static int octeon_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
chip = &gpio->chip;
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg_base = devm_ioremap_resource(&pdev->dev, res_mem);
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 7f33024b6d83..16289bafa001 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -31,8 +31,6 @@
#define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF
-#define OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER BIT(2)
-
struct gpio_regs {
u32 irqenable1;
u32 irqenable2;
@@ -48,13 +46,6 @@ struct gpio_regs {
u32 debounce_en;
};
-struct gpio_bank;
-
-struct gpio_omap_funcs {
- void (*idle_enable_level_quirk)(struct gpio_bank *bank);
- void (*idle_disable_level_quirk)(struct gpio_bank *bank);
-};
-
struct gpio_bank {
struct list_head node;
void __iomem *base;
@@ -62,7 +53,6 @@ struct gpio_bank {
u32 non_wakeup_gpios;
u32 enabled_non_wakeup_gpios;
struct gpio_regs context;
- struct gpio_omap_funcs funcs;
u32 saved_datain;
u32 level_mask;
u32 toggle_mask;
@@ -83,8 +73,6 @@ struct gpio_bank {
int stride;
u32 width;
int context_loss_count;
- bool workaround_enabled;
- u32 quirks;
void (*set_dataout)(struct gpio_bank *bank, unsigned gpio, int enable);
void (*set_dataout_multiple)(struct gpio_bank *bank,
@@ -353,6 +341,22 @@ static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset)
}
}
+/*
+ * Off mode wake-up capable GPIOs in bank(s) that are in the wakeup domain.
+ * See TRM section for GPIO for "Wake-Up Generation" for the list of GPIOs
+ * in wakeup domain. If bank->non_wakeup_gpios is not configured, assume none
+ * are capable waking up the system from off mode.
+ */
+static bool omap_gpio_is_off_wakeup_capable(struct gpio_bank *bank, u32 gpio_mask)
+{
+ u32 no_wake = bank->non_wakeup_gpios;
+
+ if (no_wake)
+ return !!(~no_wake & gpio_mask);
+
+ return false;
+}
+
static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
unsigned trigger)
{
@@ -363,10 +367,16 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
trigger & IRQ_TYPE_LEVEL_LOW);
omap_gpio_rmw(base, bank->regs->leveldetect1, gpio_bit,
trigger & IRQ_TYPE_LEVEL_HIGH);
+
+ /*
+ * We need the edge detection enabled for to allow the GPIO block
+ * to be woken from idle state. Set the appropriate edge detection
+ * in addition to the level detection.
+ */
omap_gpio_rmw(base, bank->regs->risingdetect, gpio_bit,
- trigger & IRQ_TYPE_EDGE_RISING);
+ trigger & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH));
omap_gpio_rmw(base, bank->regs->fallingdetect, gpio_bit,
- trigger & IRQ_TYPE_EDGE_FALLING);
+ trigger & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW));
bank->context.leveldetect0 =
readl_relaxed(bank->base + bank->regs->leveldetect0);
@@ -384,13 +394,7 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
}
/* This part needs to be executed always for OMAP{34xx, 44xx} */
- if (!bank->regs->irqctrl) {
- /* On omap24xx proceed only when valid GPIO bit is set */
- if (bank->non_wakeup_gpios) {
- if (!(bank->non_wakeup_gpios & gpio_bit))
- goto exit;
- }
-
+ if (!bank->regs->irqctrl && !omap_gpio_is_off_wakeup_capable(bank, gpio)) {
/*
* Log the edge gpio and manually trigger the IRQ
* after resume if the input level changes
@@ -403,7 +407,6 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
bank->enabled_non_wakeup_gpios &= ~gpio_bit;
}
-exit:
bank->level_mask =
readl_relaxed(bank->base + bank->regs->leveldetect0) |
readl_relaxed(bank->base + bank->regs->leveldetect1);
@@ -896,44 +899,6 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
raw_spin_unlock_irqrestore(&bank->lock, flags);
}
-/*
- * Only edges can generate a wakeup event to the PRCM.
- *
- * Therefore, ensure any wake-up capable GPIOs have
- * edge-detection enabled before going idle to ensure a wakeup
- * to the PRCM is generated on a GPIO transition. (c.f. 34xx
- * NDA TRM 25.5.3.1)
- *
- * The normal values will be restored upon ->runtime_resume()
- * by writing back the values saved in bank->context.
- */
-static void __maybe_unused
-omap2_gpio_enable_level_quirk(struct gpio_bank *bank)
-{
- u32 wake_low, wake_hi;
-
- /* Enable additional edge detection for level gpios for idle */
- wake_low = bank->context.leveldetect0 & bank->context.wake_en;
- if (wake_low)
- writel_relaxed(wake_low | bank->context.fallingdetect,
- bank->base + bank->regs->fallingdetect);
-
- wake_hi = bank->context.leveldetect1 & bank->context.wake_en;
- if (wake_hi)
- writel_relaxed(wake_hi | bank->context.risingdetect,
- bank->base + bank->regs->risingdetect);
-}
-
-static void __maybe_unused
-omap2_gpio_disable_level_quirk(struct gpio_bank *bank)
-{
- /* Disable edge detection for level gpios after idle */
- writel_relaxed(bank->context.fallingdetect,
- bank->base + bank->regs->fallingdetect);
- writel_relaxed(bank->context.risingdetect,
- bank->base + bank->regs->risingdetect);
-}
-
/*---------------------------------------------------------------------*/
static int omap_mpuio_suspend_noirq(struct device *dev)
@@ -1251,203 +1216,70 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
return ret;
}
-static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context);
-static void omap_gpio_unidle(struct gpio_bank *bank);
-
-static int gpio_omap_cpu_notifier(struct notifier_block *nb,
- unsigned long cmd, void *v)
+static void omap_gpio_init_context(struct gpio_bank *p)
{
- struct gpio_bank *bank;
- unsigned long flags;
+ struct omap_gpio_reg_offs *regs = p->regs;
+ void __iomem *base = p->base;
- bank = container_of(nb, struct gpio_bank, nb);
+ p->context.ctrl = readl_relaxed(base + regs->ctrl);
+ p->context.oe = readl_relaxed(base + regs->direction);
+ p->context.wake_en = readl_relaxed(base + regs->wkup_en);
+ p->context.leveldetect0 = readl_relaxed(base + regs->leveldetect0);
+ p->context.leveldetect1 = readl_relaxed(base + regs->leveldetect1);
+ p->context.risingdetect = readl_relaxed(base + regs->risingdetect);
+ p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect);
+ p->context.irqenable1 = readl_relaxed(base + regs->irqenable);
+ p->context.irqenable2 = readl_relaxed(base + regs->irqenable2);
- raw_spin_lock_irqsave(&bank->lock, flags);
- switch (cmd) {
- case CPU_CLUSTER_PM_ENTER:
- if (bank->is_suspended)
- break;
- omap_gpio_idle(bank, true);
- break;
- case CPU_CLUSTER_PM_ENTER_FAILED:
- case CPU_CLUSTER_PM_EXIT:
- if (bank->is_suspended)
- break;
- omap_gpio_unidle(bank);
- break;
- }
- raw_spin_unlock_irqrestore(&bank->lock, flags);
+ if (regs->set_dataout && p->regs->clr_dataout)
+ p->context.dataout = readl_relaxed(base + regs->set_dataout);
+ else
+ p->context.dataout = readl_relaxed(base + regs->dataout);
- return NOTIFY_OK;
+ p->context_valid = true;
}
-static const struct of_device_id omap_gpio_match[];
-
-static int omap_gpio_probe(struct platform_device *pdev)
+static void omap_gpio_restore_context(struct gpio_bank *bank)
{
- struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
- const struct of_device_id *match;
- const struct omap_gpio_platform_data *pdata;
- struct resource *res;
- struct gpio_bank *bank;
- struct irq_chip *irqc;
- int ret;
-
- match = of_match_device(of_match_ptr(omap_gpio_match), dev);
-
- pdata = match ? match->data : dev_get_platdata(dev);
- if (!pdata)
- return -EINVAL;
-
- bank = devm_kzalloc(dev, sizeof(*bank), GFP_KERNEL);
- if (!bank)
- return -ENOMEM;
-
- irqc = devm_kzalloc(dev, sizeof(*irqc), GFP_KERNEL);
- if (!irqc)
- return -ENOMEM;
-
- irqc->irq_startup = omap_gpio_irq_startup,
- irqc->irq_shutdown = omap_gpio_irq_shutdown,
- irqc->irq_ack = omap_gpio_ack_irq,
- irqc->irq_mask = omap_gpio_mask_irq,
- irqc->irq_unmask = omap_gpio_unmask_irq,
- irqc->irq_set_type = omap_gpio_irq_type,
- irqc->irq_set_wake = omap_gpio_wake_enable,
- irqc->irq_bus_lock = omap_gpio_irq_bus_lock,
- irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock,
- irqc->name = dev_name(&pdev->dev);
- irqc->flags = IRQCHIP_MASK_ON_SUSPEND;
- irqc->parent_device = dev;
-
- bank->irq = platform_get_irq(pdev, 0);
- if (bank->irq <= 0) {
- if (!bank->irq)
- bank->irq = -ENXIO;
- if (bank->irq != -EPROBE_DEFER)
- dev_err(dev,
- "can't get irq resource ret=%d\n", bank->irq);
- return bank->irq;
- }
-
- bank->chip.parent = dev;
- bank->chip.owner = THIS_MODULE;
- bank->dbck_flag = pdata->dbck_flag;
- bank->quirks = pdata->quirks;
- bank->stride = pdata->bank_stride;
- bank->width = pdata->bank_width;
- bank->is_mpuio = pdata->is_mpuio;
- bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
- bank->regs = pdata->regs;
-#ifdef CONFIG_OF_GPIO
- bank->chip.of_node = of_node_get(node);
-#endif
-
- if (node) {
- if (!of_property_read_bool(node, "ti,gpio-always-on"))
- bank->loses_context = true;
- } else {
- bank->loses_context = pdata->loses_context;
-
- if (bank->loses_context)
- bank->get_context_loss_count =
- pdata->get_context_loss_count;
- }
-
- if (bank->regs->set_dataout && bank->regs->clr_dataout) {
- bank->set_dataout = omap_set_gpio_dataout_reg;
- bank->set_dataout_multiple = omap_set_gpio_dataout_reg_multiple;
- } else {
- bank->set_dataout = omap_set_gpio_dataout_mask;
- bank->set_dataout_multiple =
- omap_set_gpio_dataout_mask_multiple;
- }
-
- if (bank->quirks & OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER) {
- bank->funcs.idle_enable_level_quirk =
- omap2_gpio_enable_level_quirk;
- bank->funcs.idle_disable_level_quirk =
- omap2_gpio_disable_level_quirk;
- }
-
- raw_spin_lock_init(&bank->lock);
- raw_spin_lock_init(&bank->wa_lock);
-
- /* Static mapping, never released */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bank->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(bank->base)) {
- return PTR_ERR(bank->base);
- }
-
- if (bank->dbck_flag) {
- bank->dbck = devm_clk_get(dev, "dbclk");
- if (IS_ERR(bank->dbck)) {
- dev_err(dev,
- "Could not get gpio dbck. Disable debounce\n");
- bank->dbck_flag = false;
- } else {
- clk_prepare(bank->dbck);
- }
- }
-
- platform_set_drvdata(pdev, bank);
-
- pm_runtime_enable(dev);
- pm_runtime_get_sync(dev);
-
- if (bank->is_mpuio)
- omap_mpuio_init(bank);
-
- omap_gpio_mod_init(bank);
-
- ret = omap_gpio_chip_init(bank, irqc);
- if (ret) {
- pm_runtime_put_sync(dev);
- pm_runtime_disable(dev);
- if (bank->dbck_flag)
- clk_unprepare(bank->dbck);
- return ret;
- }
-
- omap_gpio_show_rev(bank);
+ writel_relaxed(bank->context.wake_en,
+ bank->base + bank->regs->wkup_en);
+ writel_relaxed(bank->context.ctrl, bank->base + bank->regs->ctrl);
+ writel_relaxed(bank->context.leveldetect0,
+ bank->base + bank->regs->leveldetect0);
+ writel_relaxed(bank->context.leveldetect1,
+ bank->base + bank->regs->leveldetect1);
+ writel_relaxed(bank->context.risingdetect,
+ bank->base + bank->regs->risingdetect);
+ writel_relaxed(bank->context.fallingdetect,
+ bank->base + bank->regs->fallingdetect);
+ if (bank->regs->set_dataout && bank->regs->clr_dataout)
+ writel_relaxed(bank->context.dataout,
+ bank->base + bank->regs->set_dataout);
+ else
+ writel_relaxed(bank->context.dataout,
+ bank->base + bank->regs->dataout);
+ writel_relaxed(bank->context.oe, bank->base + bank->regs->direction);
- if (bank->funcs.idle_enable_level_quirk &&
- bank->funcs.idle_disable_level_quirk) {
- bank->nb.notifier_call = gpio_omap_cpu_notifier;
- cpu_pm_register_notifier(&bank->nb);
+ if (bank->dbck_enable_mask) {
+ writel_relaxed(bank->context.debounce, bank->base +
+ bank->regs->debounce);
+ writel_relaxed(bank->context.debounce_en,
+ bank->base + bank->regs->debounce_en);
}
- pm_runtime_put(dev);
-
- return 0;
-}
-
-static int omap_gpio_remove(struct platform_device *pdev)
-{
- struct gpio_bank *bank = platform_get_drvdata(pdev);
-
- if (bank->nb.notifier_call)
- cpu_pm_unregister_notifier(&bank->nb);
- list_del(&bank->node);
- gpiochip_remove(&bank->chip);
- pm_runtime_disable(&pdev->dev);
- if (bank->dbck_flag)
- clk_unprepare(bank->dbck);
-
- return 0;
+ writel_relaxed(bank->context.irqenable1,
+ bank->base + bank->regs->irqenable);
+ writel_relaxed(bank->context.irqenable2,
+ bank->base + bank->regs->irqenable2);
}
-static void omap_gpio_restore_context(struct gpio_bank *bank);
-
static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
{
struct device *dev = bank->chip.parent;
- u32 l1 = 0, l2 = 0;
+ void __iomem *base = bank->base;
+ u32 nowake;
- if (bank->funcs.idle_enable_level_quirk)
- bank->funcs.idle_enable_level_quirk(bank);
+ bank->saved_datain = readl_relaxed(base + bank->regs->datain);
if (!bank->enabled_non_wakeup_gpios)
goto update_gpio_context_count;
@@ -1456,22 +1288,15 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
goto update_gpio_context_count;
/*
- * If going to OFF, remove triggering for all
+ * If going to OFF, remove triggering for all wkup domain
* non-wakeup GPIOs. Otherwise spurious IRQs will be
* generated. See OMAP2420 Errata item 1.101.
*/
- bank->saved_datain = readl_relaxed(bank->base +
- bank->regs->datain);
- l1 = bank->context.fallingdetect;
- l2 = bank->context.risingdetect;
-
- l1 &= ~bank->enabled_non_wakeup_gpios;
- l2 &= ~bank->enabled_non_wakeup_gpios;
-
- writel_relaxed(l1, bank->base + bank->regs->fallingdetect);
- writel_relaxed(l2, bank->base + bank->regs->risingdetect);
-
- bank->workaround_enabled = true;
+ if (!bank->loses_context && bank->enabled_non_wakeup_gpios) {
+ nowake = bank->enabled_non_wakeup_gpios;
+ omap_gpio_rmw(base, bank->regs->fallingdetect, nowake, ~nowake);
+ omap_gpio_rmw(base, bank->regs->risingdetect, nowake, ~nowake);
+ }
update_gpio_context_count:
if (bank->get_context_loss_count)
@@ -1481,8 +1306,6 @@ update_gpio_context_count:
omap_gpio_dbck_disable(bank);
}
-static void omap_gpio_init_context(struct gpio_bank *p);
-
static void omap_gpio_unidle(struct gpio_bank *bank)
{
struct device *dev = bank->chip.parent;
@@ -1504,9 +1327,6 @@ static void omap_gpio_unidle(struct gpio_bank *bank)
omap_gpio_dbck_enable(bank);
- if (bank->funcs.idle_disable_level_quirk)
- bank->funcs.idle_disable_level_quirk(bank);
-
if (bank->loses_context) {
if (!bank->get_context_loss_count) {
omap_gpio_restore_context(bank);
@@ -1518,11 +1338,14 @@ static void omap_gpio_unidle(struct gpio_bank *bank)
return;
}
}
+ } else {
+ /* Restore changes done for OMAP2420 errata 1.101 */
+ writel_relaxed(bank->context.fallingdetect,
+ bank->base + bank->regs->fallingdetect);
+ writel_relaxed(bank->context.risingdetect,
+ bank->base + bank->regs->risingdetect);
}
- if (!bank->workaround_enabled)
- return;
-
l = readl_relaxed(bank->base + bank->regs->datain);
/*
@@ -1572,117 +1395,35 @@ static void omap_gpio_unidle(struct gpio_bank *bank)
writel_relaxed(old0, bank->base + bank->regs->leveldetect0);
writel_relaxed(old1, bank->base + bank->regs->leveldetect1);
}
-
- bank->workaround_enabled = false;
-}
-
-static void omap_gpio_init_context(struct gpio_bank *p)
-{
- struct omap_gpio_reg_offs *regs = p->regs;
- void __iomem *base = p->base;
-
- p->context.ctrl = readl_relaxed(base + regs->ctrl);
- p->context.oe = readl_relaxed(base + regs->direction);
- p->context.wake_en = readl_relaxed(base + regs->wkup_en);
- p->context.leveldetect0 = readl_relaxed(base + regs->leveldetect0);
- p->context.leveldetect1 = readl_relaxed(base + regs->leveldetect1);
- p->context.risingdetect = readl_relaxed(base + regs->risingdetect);
- p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect);
- p->context.irqenable1 = readl_relaxed(base + regs->irqenable);
- p->context.irqenable2 = readl_relaxed(base + regs->irqenable2);
-
- if (regs->set_dataout && p->regs->clr_dataout)
- p->context.dataout = readl_relaxed(base + regs->set_dataout);
- else
- p->context.dataout = readl_relaxed(base + regs->dataout);
-
- p->context_valid = true;
-}
-
-static void omap_gpio_restore_context(struct gpio_bank *bank)
-{
- writel_relaxed(bank->context.wake_en,
- bank->base + bank->regs->wkup_en);
- writel_relaxed(bank->context.ctrl, bank->base + bank->regs->ctrl);
- writel_relaxed(bank->context.leveldetect0,
- bank->base + bank->regs->leveldetect0);
- writel_relaxed(bank->context.leveldetect1,
- bank->base + bank->regs->leveldetect1);
- writel_relaxed(bank->context.risingdetect,
- bank->base + bank->regs->risingdetect);
- writel_relaxed(bank->context.fallingdetect,
- bank->base + bank->regs->fallingdetect);
- if (bank->regs->set_dataout && bank->regs->clr_dataout)
- writel_relaxed(bank->context.dataout,
- bank->base + bank->regs->set_dataout);
- else
- writel_relaxed(bank->context.dataout,
- bank->base + bank->regs->dataout);
- writel_relaxed(bank->context.oe, bank->base + bank->regs->direction);
-
- if (bank->dbck_enable_mask) {
- writel_relaxed(bank->context.debounce, bank->base +
- bank->regs->debounce);
- writel_relaxed(bank->context.debounce_en,
- bank->base + bank->regs->debounce_en);
- }
-
- writel_relaxed(bank->context.irqenable1,
- bank->base + bank->regs->irqenable);
- writel_relaxed(bank->context.irqenable2,
- bank->base + bank->regs->irqenable2);
}
-static int __maybe_unused omap_gpio_runtime_suspend(struct device *dev)
+static int gpio_omap_cpu_notifier(struct notifier_block *nb,
+ unsigned long cmd, void *v)
{
- struct gpio_bank *bank = dev_get_drvdata(dev);
+ struct gpio_bank *bank;
unsigned long flags;
- int error = 0;
-
- raw_spin_lock_irqsave(&bank->lock, flags);
- /* Must be idled only by CPU_CLUSTER_PM_ENTER? */
- if (bank->irq_usage) {
- error = -EBUSY;
- goto unlock;
- }
- omap_gpio_idle(bank, true);
- bank->is_suspended = true;
-unlock:
- raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- return error;
-}
-static int __maybe_unused omap_gpio_runtime_resume(struct device *dev)
-{
- struct gpio_bank *bank = dev_get_drvdata(dev);
- unsigned long flags;
- int error = 0;
+ bank = container_of(nb, struct gpio_bank, nb);
raw_spin_lock_irqsave(&bank->lock, flags);
- /* Must be unidled only by CPU_CLUSTER_PM_ENTER? */
- if (bank->irq_usage) {
- error = -EBUSY;
- goto unlock;
+ switch (cmd) {
+ case CPU_CLUSTER_PM_ENTER:
+ if (bank->is_suspended)
+ break;
+ omap_gpio_idle(bank, true);
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED:
+ case CPU_CLUSTER_PM_EXIT:
+ if (bank->is_suspended)
+ break;
+ omap_gpio_unidle(bank);
+ break;
}
- omap_gpio_unidle(bank);
- bank->is_suspended = false;
-unlock:
raw_spin_unlock_irqrestore(&bank->lock, flags);
- return error;
+ return NOTIFY_OK;
}
-#ifdef CONFIG_ARCH_OMAP2PLUS
-static const struct dev_pm_ops gpio_pm_ops = {
- SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
- NULL)
-};
-#else
-static const struct dev_pm_ops gpio_pm_ops;
-#endif /* CONFIG_ARCH_OMAP2PLUS */
-
-#if defined(CONFIG_OF)
static struct omap_gpio_reg_offs omap2_gpio_regs = {
.revision = OMAP24XX_GPIO_REVISION,
.direction = OMAP24XX_GPIO_OE,
@@ -1729,11 +1470,6 @@ static struct omap_gpio_reg_offs omap4_gpio_regs = {
.fallingdetect = OMAP4_GPIO_FALLINGDETECT,
};
-/*
- * Note that omap2 does not currently support idle modes with context loss so
- * no need to add OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER quirk flag to save
- * and restore context.
- */
static const struct omap_gpio_platform_data omap2_pdata = {
.regs = &omap2_gpio_regs,
.bank_width = 32,
@@ -1744,14 +1480,12 @@ static const struct omap_gpio_platform_data omap3_pdata = {
.regs = &omap2_gpio_regs,
.bank_width = 32,
.dbck_flag = true,
- .quirks = OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER,
};
static const struct omap_gpio_platform_data omap4_pdata = {
.regs = &omap4_gpio_regs,
.bank_width = 32,
.dbck_flag = true,
- .quirks = OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER,
};
static const struct of_device_id omap_gpio_match[] = {
@@ -1770,15 +1504,187 @@ static const struct of_device_id omap_gpio_match[] = {
{ },
};
MODULE_DEVICE_TABLE(of, omap_gpio_match);
+
+static int omap_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ const struct of_device_id *match;
+ const struct omap_gpio_platform_data *pdata;
+ struct gpio_bank *bank;
+ struct irq_chip *irqc;
+ int ret;
+
+ match = of_match_device(of_match_ptr(omap_gpio_match), dev);
+
+ pdata = match ? match->data : dev_get_platdata(dev);
+ if (!pdata)
+ return -EINVAL;
+
+ bank = devm_kzalloc(dev, sizeof(*bank), GFP_KERNEL);
+ if (!bank)
+ return -ENOMEM;
+
+ irqc = devm_kzalloc(dev, sizeof(*irqc), GFP_KERNEL);
+ if (!irqc)
+ return -ENOMEM;
+
+ irqc->irq_startup = omap_gpio_irq_startup,
+ irqc->irq_shutdown = omap_gpio_irq_shutdown,
+ irqc->irq_ack = omap_gpio_ack_irq,
+ irqc->irq_mask = omap_gpio_mask_irq,
+ irqc->irq_unmask = omap_gpio_unmask_irq,
+ irqc->irq_set_type = omap_gpio_irq_type,
+ irqc->irq_set_wake = omap_gpio_wake_enable,
+ irqc->irq_bus_lock = omap_gpio_irq_bus_lock,
+ irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock,
+ irqc->name = dev_name(&pdev->dev);
+ irqc->flags = IRQCHIP_MASK_ON_SUSPEND;
+ irqc->parent_device = dev;
+
+ bank->irq = platform_get_irq(pdev, 0);
+ if (bank->irq <= 0) {
+ if (!bank->irq)
+ bank->irq = -ENXIO;
+ if (bank->irq != -EPROBE_DEFER)
+ dev_err(dev,
+ "can't get irq resource ret=%d\n", bank->irq);
+ return bank->irq;
+ }
+
+ bank->chip.parent = dev;
+ bank->chip.owner = THIS_MODULE;
+ bank->dbck_flag = pdata->dbck_flag;
+ bank->stride = pdata->bank_stride;
+ bank->width = pdata->bank_width;
+ bank->is_mpuio = pdata->is_mpuio;
+ bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
+ bank->regs = pdata->regs;
+#ifdef CONFIG_OF_GPIO
+ bank->chip.of_node = of_node_get(node);
#endif
+ if (node) {
+ if (!of_property_read_bool(node, "ti,gpio-always-on"))
+ bank->loses_context = true;
+ } else {
+ bank->loses_context = pdata->loses_context;
+
+ if (bank->loses_context)
+ bank->get_context_loss_count =
+ pdata->get_context_loss_count;
+ }
+
+ if (bank->regs->set_dataout && bank->regs->clr_dataout) {
+ bank->set_dataout = omap_set_gpio_dataout_reg;
+ bank->set_dataout_multiple = omap_set_gpio_dataout_reg_multiple;
+ } else {
+ bank->set_dataout = omap_set_gpio_dataout_mask;
+ bank->set_dataout_multiple =
+ omap_set_gpio_dataout_mask_multiple;
+ }
+
+ raw_spin_lock_init(&bank->lock);
+ raw_spin_lock_init(&bank->wa_lock);
+
+ /* Static mapping, never released */
+ bank->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(bank->base)) {
+ return PTR_ERR(bank->base);
+ }
+
+ if (bank->dbck_flag) {
+ bank->dbck = devm_clk_get(dev, "dbclk");
+ if (IS_ERR(bank->dbck)) {
+ dev_err(dev,
+ "Could not get gpio dbck. Disable debounce\n");
+ bank->dbck_flag = false;
+ } else {
+ clk_prepare(bank->dbck);
+ }
+ }
+
+ platform_set_drvdata(pdev, bank);
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ if (bank->is_mpuio)
+ omap_mpuio_init(bank);
+
+ omap_gpio_mod_init(bank);
+
+ ret = omap_gpio_chip_init(bank, irqc);
+ if (ret) {
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ if (bank->dbck_flag)
+ clk_unprepare(bank->dbck);
+ return ret;
+ }
+
+ omap_gpio_show_rev(bank);
+
+ bank->nb.notifier_call = gpio_omap_cpu_notifier;
+ cpu_pm_register_notifier(&bank->nb);
+
+ pm_runtime_put(dev);
+
+ return 0;
+}
+
+static int omap_gpio_remove(struct platform_device *pdev)
+{
+ struct gpio_bank *bank = platform_get_drvdata(pdev);
+
+ cpu_pm_unregister_notifier(&bank->nb);
+ list_del(&bank->node);
+ gpiochip_remove(&bank->chip);
+ pm_runtime_disable(&pdev->dev);
+ if (bank->dbck_flag)
+ clk_unprepare(bank->dbck);
+
+ return 0;
+}
+
+static int __maybe_unused omap_gpio_runtime_suspend(struct device *dev)
+{
+ struct gpio_bank *bank = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&bank->lock, flags);
+ omap_gpio_idle(bank, true);
+ bank->is_suspended = true;
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+}
+
+static int __maybe_unused omap_gpio_runtime_resume(struct device *dev)
+{
+ struct gpio_bank *bank = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&bank->lock, flags);
+ omap_gpio_unidle(bank);
+ bank->is_suspended = false;
+ raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+}
+
+static const struct dev_pm_ops gpio_pm_ops = {
+ SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
+ NULL)
+};
+
static struct platform_driver omap_gpio_driver = {
.probe = omap_gpio_probe,
.remove = omap_gpio_remove,
.driver = {
.name = "omap_gpio",
.pm = &gpio_pm_ops,
- .of_match_table = of_match_ptr(omap_gpio_match),
+ .of_match_table = omap_gpio_match,
},
};
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 7e76830b3368..b7ef33f63392 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -73,6 +73,7 @@
#define PCA_CHIP_TYPE(x) ((x) & PCA_TYPE_MASK)
static const struct i2c_device_id pca953x_id[] = {
+ { "pca6416", 16 | PCA953X_TYPE | PCA_INT, },
{ "pca9505", 40 | PCA953X_TYPE | PCA_INT, },
{ "pca9534", 8 | PCA953X_TYPE | PCA_INT, },
{ "pca9535", 16 | PCA953X_TYPE | PCA_INT, },
@@ -153,6 +154,7 @@ struct pca953x_chip {
u8 irq_trig_fall[MAX_BANK];
struct irq_chip irq_chip;
#endif
+ atomic_t wakeup_path;
struct i2c_client *client;
struct gpio_chip gpio_chip;
@@ -581,6 +583,11 @@ static int pca953x_irq_set_wake(struct irq_data *d, unsigned int on)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct pca953x_chip *chip = gpiochip_get_data(gc);
+ if (on)
+ atomic_inc(&chip->wakeup_path);
+ else
+ atomic_dec(&chip->wakeup_path);
+
return irq_set_irq_wake(chip->client->irq, on);
}
@@ -1100,7 +1107,10 @@ static int pca953x_suspend(struct device *dev)
regcache_cache_only(chip->regmap, true);
- regulator_disable(chip->regulator);
+ if (atomic_read(&chip->wakeup_path))
+ device_set_wakeup_path(dev);
+ else
+ regulator_disable(chip->regulator);
return 0;
}
@@ -1110,10 +1120,12 @@ static int pca953x_resume(struct device *dev)
struct pca953x_chip *chip = dev_get_drvdata(dev);
int ret;
- ret = regulator_enable(chip->regulator);
- if (ret != 0) {
- dev_err(dev, "Failed to enable regulator: %d\n", ret);
- return 0;
+ if (!atomic_read(&chip->wakeup_path)) {
+ ret = regulator_enable(chip->regulator);
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable regulator: %d\n", ret);
+ return 0;
+ }
}
regcache_cache_only(chip->regmap, false);
@@ -1137,6 +1149,7 @@ static int pca953x_resume(struct device *dev)
#define OF_957X(__nrgpio, __int) (void *)(__nrgpio | PCA957X_TYPE | __int)
static const struct of_device_id pca953x_dt_ids[] = {
+ { .compatible = "nxp,pca6416", .data = OF_953X(16, PCA_INT), },
{ .compatible = "nxp,pca9505", .data = OF_953X(40, PCA_INT), },
{ .compatible = "nxp,pca9534", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "nxp,pca9535", .data = OF_953X(16, PCA_INT), },
@@ -1152,6 +1165,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "nxp,pca9575", .data = OF_957X(16, PCA_INT), },
{ .compatible = "nxp,pca9698", .data = OF_953X(40, 0), },
+ { .compatible = "nxp,pcal6416", .data = OF_953X(16, PCA_LATCH_INT), },
{ .compatible = "nxp,pcal6524", .data = OF_953X(24, PCA_LATCH_INT), },
{ .compatible = "nxp,pcal9555a", .data = OF_953X(16, PCA_LATCH_INT), },
@@ -1167,6 +1181,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "ti,tca6416", .data = OF_953X(16, PCA_INT), },
{ .compatible = "ti,tca6424", .data = OF_953X(24, PCA_INT), },
+ { .compatible = "onnn,cat9554", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "onnn,pca9654", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "exar,xra1202", .data = OF_953X( 8, 0), },
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index bcc6be4a5cb2..26f77fdb217e 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -577,7 +577,7 @@ static int pxa_irq_domain_map(struct irq_domain *d, unsigned int irq,
return 0;
}
-const struct irq_domain_ops pxa_irq_domain_ops = {
+static const struct irq_domain_ops pxa_irq_domain_ops = {
.map = pxa_irq_domain_map,
.xlate = irq_domain_xlate_twocell,
};
@@ -622,7 +622,6 @@ static int pxa_gpio_probe(struct platform_device *pdev)
{
struct pxa_gpio_chip *pchip;
struct pxa_gpio_bank *c;
- struct resource *res;
struct clk *clk;
struct pxa_gpio_platform_data *info;
void __iomem *gpio_reg_base;
@@ -665,11 +664,8 @@ static int pxa_gpio_probe(struct platform_device *pdev)
pchip->irq0 = irq0;
pchip->irq1 = irq1;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
- gpio_reg_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
+
+ gpio_reg_base = devm_platform_ioremap_resource(pdev, 0);
if (!gpio_reg_base)
return -EINVAL;
@@ -816,7 +812,7 @@ static void pxa_gpio_resume(void)
#define pxa_gpio_resume NULL
#endif
-struct syscore_ops pxa_gpio_syscore_ops = {
+static struct syscore_ops pxa_gpio_syscore_ops = {
.suspend = pxa_gpio_suspend,
.resume = pxa_gpio_resume,
};
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 500a3596aaf4..70e95fc4779f 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -430,7 +430,7 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
static int gpio_rcar_probe(struct platform_device *pdev)
{
struct gpio_rcar_priv *p;
- struct resource *io, *irq;
+ struct resource *irq;
struct gpio_chip *gpio_chip;
struct irq_chip *irq_chip;
struct device *dev = &pdev->dev;
@@ -461,8 +461,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
goto err0;
}
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- p->base = devm_ioremap_resource(dev, io);
+ p->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(p->base)) {
ret = PTR_ERR(p->base);
goto err0;
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index c333046d02b8..fb143f28c386 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -23,7 +23,6 @@ struct sch_gpio {
struct gpio_chip chip;
spinlock_t lock;
unsigned short iobase;
- unsigned short core_base;
unsigned short resume_base;
};
@@ -166,7 +165,6 @@ static int sch_gpio_probe(struct platform_device *pdev)
switch (pdev->id) {
case PCI_DEVICE_ID_INTEL_SCH_LPC:
- sch->core_base = 0;
sch->resume_base = 10;
sch->chip.ngpio = 14;
@@ -185,19 +183,16 @@ static int sch_gpio_probe(struct platform_device *pdev)
break;
case PCI_DEVICE_ID_INTEL_ITC_LPC:
- sch->core_base = 0;
sch->resume_base = 5;
sch->chip.ngpio = 14;
break;
case PCI_DEVICE_ID_INTEL_CENTERTON_ILB:
- sch->core_base = 0;
sch->resume_base = 21;
sch->chip.ngpio = 30;
break;
case PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB:
- sch->core_base = 0;
sch->resume_base = 2;
sch->chip.ngpio = 8;
break;
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
index ee3039f091f4..6eca531b7d96 100644
--- a/drivers/gpio/gpio-spear-spics.c
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -122,15 +122,13 @@ static int spics_gpio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct spear_spics *spics;
- struct resource *res;
int ret;
spics = devm_kzalloc(&pdev->dev, sizeof(*spics), GFP_KERNEL);
if (!spics)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- spics->base = devm_ioremap_resource(&pdev->dev, res);
+ spics->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spics->base))
return PTR_ERR(spics->base);
diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c
index 55072d2b367f..f5c8b3a351d5 100644
--- a/drivers/gpio/gpio-sprd.c
+++ b/drivers/gpio/gpio-sprd.c
@@ -219,7 +219,6 @@ static int sprd_gpio_probe(struct platform_device *pdev)
{
struct gpio_irq_chip *irq;
struct sprd_gpio *sprd_gpio;
- struct resource *res;
int ret;
sprd_gpio = devm_kzalloc(&pdev->dev, sizeof(*sprd_gpio), GFP_KERNEL);
@@ -232,8 +231,7 @@ static int sprd_gpio_probe(struct platform_device *pdev)
return sprd_gpio->irq;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sprd_gpio->base = devm_ioremap_resource(&pdev->dev, res);
+ sprd_gpio->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sprd_gpio->base))
return PTR_ERR(sprd_gpio->base);
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 2283c869ad5d..a51c310708b8 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -360,7 +360,6 @@ static int gsta_probe(struct platform_device *dev)
struct pci_dev *pdev;
struct sta2x11_gpio_pdata *gpio_pdata;
struct gsta_gpio *chip;
- struct resource *res;
pdev = *(struct pci_dev **)dev_get_platdata(&dev->dev);
gpio_pdata = dev_get_platdata(&pdev->dev);
@@ -369,13 +368,11 @@ static int gsta_probe(struct platform_device *dev)
dev_err(&dev->dev, "no gpio config\n");
pr_debug("gpio config: %p\n", gpio_pdata);
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
-
chip = devm_kzalloc(&dev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
chip->dev = &dev->dev;
- chip->reg_base = devm_ioremap_resource(&dev->dev, res);
+ chip->reg_base = devm_platform_ioremap_resource(dev, 0);
if (IS_ERR(chip->reg_base))
return PTR_ERR(chip->reg_base);
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index 19972084c45b..8a319d56c5de 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -210,7 +210,6 @@ static int xway_stp_hw_init(struct xway_stp *chip)
static int xway_stp_probe(struct platform_device *pdev)
{
- struct resource *res;
u32 shadow, groups, dsl, phy;
struct xway_stp *chip;
struct clk *clk;
@@ -220,8 +219,7 @@ static int xway_stp_probe(struct platform_device *pdev)
if (!chip)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->virt = devm_ioremap_resource(&pdev->dev, res);
+ chip->virt = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->virt))
return PTR_ERR(chip->virt);
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index d5e5d19f4c0a..6bbac6c83f29 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -120,7 +120,6 @@ static irqreturn_t tb10x_gpio_irq_cascade(int irq, void *data)
static int tb10x_gpio_probe(struct platform_device *pdev)
{
struct tb10x_gpio *tb10x_gpio;
- struct resource *mem;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
int ret = -EBUSY;
@@ -136,8 +135,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
if (tb10x_gpio == NULL)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tb10x_gpio->base = devm_ioremap_resource(dev, mem);
+ tb10x_gpio->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tb10x_gpio->base))
return PTR_ERR(tb10x_gpio->base);
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 1ececf2c3282..6d9b6906b9d0 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -569,7 +569,6 @@ static const struct dev_pm_ops tegra_gpio_pm_ops = {
static int tegra_gpio_probe(struct platform_device *pdev)
{
struct tegra_gpio_info *tgi;
- struct resource *res;
struct tegra_gpio_bank *bank;
unsigned int gpio, i, j;
int ret;
@@ -645,8 +644,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
bank->tgi = tgi;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tgi->regs = devm_ioremap_resource(&pdev->dev, res);
+ tgi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tgi->regs))
return PTR_ERR(tgi->regs);
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index 1306722faa5a..715371b5102a 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -363,22 +363,16 @@ static int thunderx_gpio_irq_request_resources(struct irq_data *data)
{
struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
struct thunderx_gpio *txgpio = txline->txgpio;
- struct irq_data *parent_data = data->parent_data;
int r;
r = gpiochip_lock_as_irq(&txgpio->chip, txline->line);
if (r)
return r;
- if (parent_data && parent_data->chip->irq_request_resources) {
- r = parent_data->chip->irq_request_resources(parent_data);
- if (r)
- goto error;
- }
+ r = irq_chip_request_resources_parent(data);
+ if (r)
+ gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
- return 0;
-error:
- gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
return r;
}
@@ -386,10 +380,8 @@ static void thunderx_gpio_irq_release_resources(struct irq_data *data)
{
struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
struct thunderx_gpio *txgpio = txline->txgpio;
- struct irq_data *parent_data = data->parent_data;
- if (parent_data && parent_data->chip->irq_release_resources)
- parent_data->chip->irq_release_resources(parent_data);
+ irq_chip_release_resources_parent(data);
gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
}
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index 314e300d6ba3..1c70e831069c 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -229,7 +229,6 @@ static int timbgpio_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct gpio_chip *gc;
struct timbgpio *tgpio;
- struct resource *iomem;
struct timbgpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
int irq = platform_get_irq(pdev, 0);
@@ -246,8 +245,7 @@ static int timbgpio_probe(struct platform_device *pdev)
spin_lock_init(&tgpio->lock);
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tgpio->membase = devm_ioremap_resource(dev, iomem);
+ tgpio->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tgpio->membase))
return PTR_ERR(tgpio->membase);
diff --git a/drivers/gpio/gpio-ts4800.c b/drivers/gpio/gpio-ts4800.c
index c2a80b4cbf32..8c0d82d926dd 100644
--- a/drivers/gpio/gpio-ts4800.c
+++ b/drivers/gpio/gpio-ts4800.c
@@ -23,7 +23,6 @@ static int ts4800_gpio_probe(struct platform_device *pdev)
{
struct device_node *node;
struct gpio_chip *chip;
- struct resource *res;
void __iomem *base_addr;
int retval;
u32 ngpios;
@@ -32,8 +31,7 @@ static int ts4800_gpio_probe(struct platform_device *pdev)
if (!chip)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base_addr = devm_ioremap_resource(&pdev->dev, res);
+ base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base_addr))
return PTR_ERR(base_addr);
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index 0f662b297a95..93cdcc41e9fb 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -346,7 +346,6 @@ static int uniphier_gpio_probe(struct platform_device *pdev)
struct uniphier_gpio_priv *priv;
struct gpio_chip *chip;
struct irq_chip *irq_chip;
- struct resource *regs;
unsigned int nregs;
u32 ngpios;
int ret;
@@ -370,8 +369,7 @@ static int uniphier_gpio_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(dev, regs);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return PTR_ERR(priv->regs);
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 541fa6ac399d..30aef41e3b7e 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -29,6 +29,7 @@ struct fsl_gpio_soc_data {
struct vf610_gpio_port {
struct gpio_chip gc;
+ struct irq_chip ic;
void __iomem *base;
void __iomem *gpio_base;
const struct fsl_gpio_soc_data *sdata;
@@ -60,8 +61,6 @@ struct vf610_gpio_port {
#define PORT_INT_EITHER_EDGE 0xb
#define PORT_INT_LOGIC_ONE 0xc
-static struct irq_chip vf610_gpio_irq_chip;
-
static const struct fsl_gpio_soc_data imx_data = {
.have_paddr = true,
};
@@ -86,28 +85,24 @@ static int vf610_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct vf610_gpio_port *port = gpiochip_get_data(gc);
unsigned long mask = BIT(gpio);
- void __iomem *addr;
+ unsigned long offset = GPIO_PDIR;
if (port->sdata && port->sdata->have_paddr) {
mask &= vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
- addr = mask ? port->gpio_base + GPIO_PDOR :
- port->gpio_base + GPIO_PDIR;
- return !!(vf610_gpio_readl(addr) & BIT(gpio));
- } else {
- return !!(vf610_gpio_readl(port->gpio_base + GPIO_PDIR)
- & BIT(gpio));
+ if (mask)
+ offset = GPIO_PDOR;
}
+
+ return !!(vf610_gpio_readl(port->gpio_base + offset) & BIT(gpio));
}
static void vf610_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct vf610_gpio_port *port = gpiochip_get_data(gc);
unsigned long mask = BIT(gpio);
+ unsigned long offset = val ? GPIO_PSOR : GPIO_PCOR;
- if (val)
- vf610_gpio_writel(mask, port->gpio_base + GPIO_PSOR);
- else
- vf610_gpio_writel(mask, port->gpio_base + GPIO_PCOR);
+ vf610_gpio_writel(mask, port->gpio_base + offset);
}
static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
@@ -237,37 +232,31 @@ static int vf610_gpio_irq_set_wake(struct irq_data *d, u32 enable)
return 0;
}
-static struct irq_chip vf610_gpio_irq_chip = {
- .name = "gpio-vf610",
- .irq_ack = vf610_gpio_irq_ack,
- .irq_mask = vf610_gpio_irq_mask,
- .irq_unmask = vf610_gpio_irq_unmask,
- .irq_set_type = vf610_gpio_irq_set_type,
- .irq_set_wake = vf610_gpio_irq_set_wake,
-};
+static void vf610_gpio_disable_clk(void *data)
+{
+ clk_disable_unprepare(data);
+}
static int vf610_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct vf610_gpio_port *port;
- struct resource *iores;
struct gpio_chip *gc;
+ struct irq_chip *ic;
int i;
int ret;
- port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
port->sdata = of_device_get_match_data(dev);
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- port->base = devm_ioremap_resource(dev, iores);
+ port->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(port->base))
return PTR_ERR(port->base);
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- port->gpio_base = devm_ioremap_resource(dev, iores);
+ port->gpio_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(port->gpio_base))
return PTR_ERR(port->gpio_base);
@@ -275,11 +264,15 @@ static int vf610_gpio_probe(struct platform_device *pdev)
if (port->irq < 0)
return port->irq;
- port->clk_port = devm_clk_get(&pdev->dev, "port");
+ port->clk_port = devm_clk_get(dev, "port");
if (!IS_ERR(port->clk_port)) {
ret = clk_prepare_enable(port->clk_port);
if (ret)
return ret;
+ ret = devm_add_action_or_reset(dev, vf610_gpio_disable_clk,
+ port->clk_port);
+ if (ret)
+ return ret;
} else if (port->clk_port == ERR_PTR(-EPROBE_DEFER)) {
/*
* Percolate deferrals, for anything else,
@@ -288,20 +281,19 @@ static int vf610_gpio_probe(struct platform_device *pdev)
return PTR_ERR(port->clk_port);
}
- port->clk_gpio = devm_clk_get(&pdev->dev, "gpio");
+ port->clk_gpio = devm_clk_get(dev, "gpio");
if (!IS_ERR(port->clk_gpio)) {
ret = clk_prepare_enable(port->clk_gpio);
- if (ret) {
- clk_disable_unprepare(port->clk_port);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(dev, vf610_gpio_disable_clk,
+ port->clk_gpio);
+ if (ret)
return ret;
- }
} else if (port->clk_gpio == ERR_PTR(-EPROBE_DEFER)) {
- clk_disable_unprepare(port->clk_port);
return PTR_ERR(port->clk_gpio);
}
- platform_set_drvdata(pdev, port);
-
gc = &port->gc;
gc->of_node = np;
gc->parent = dev;
@@ -316,7 +308,15 @@ static int vf610_gpio_probe(struct platform_device *pdev)
gc->direction_output = vf610_gpio_direction_output;
gc->set = vf610_gpio_set;
- ret = gpiochip_add_data(gc, port);
+ ic = &port->ic;
+ ic->name = "gpio-vf610";
+ ic->irq_ack = vf610_gpio_irq_ack;
+ ic->irq_mask = vf610_gpio_irq_mask;
+ ic->irq_unmask = vf610_gpio_irq_unmask;
+ ic->irq_set_type = vf610_gpio_irq_set_type;
+ ic->irq_set_wake = vf610_gpio_irq_set_wake;
+
+ ret = devm_gpiochip_add_data(dev, gc, port);
if (ret < 0)
return ret;
@@ -327,39 +327,23 @@ static int vf610_gpio_probe(struct platform_device *pdev)
/* Clear the interrupt status register for all GPIO's */
vf610_gpio_writel(~0, port->base + PORT_ISFR);
- ret = gpiochip_irqchip_add(gc, &vf610_gpio_irq_chip, 0,
- handle_edge_irq, IRQ_TYPE_NONE);
+ ret = gpiochip_irqchip_add(gc, ic, 0, handle_edge_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(dev, "failed to add irqchip\n");
- gpiochip_remove(gc);
return ret;
}
- gpiochip_set_chained_irqchip(gc, &vf610_gpio_irq_chip, port->irq,
+ gpiochip_set_chained_irqchip(gc, ic, port->irq,
vf610_gpio_irq_handler);
return 0;
}
-static int vf610_gpio_remove(struct platform_device *pdev)
-{
- struct vf610_gpio_port *port = platform_get_drvdata(pdev);
-
- gpiochip_remove(&port->gc);
- if (!IS_ERR(port->clk_port))
- clk_disable_unprepare(port->clk_port);
- if (!IS_ERR(port->clk_gpio))
- clk_disable_unprepare(port->clk_gpio);
-
- return 0;
-}
-
static struct platform_driver vf610_gpio_driver = {
.driver = {
.name = "gpio-vf610",
.of_match_table = vf610_gpio_dt_ids,
},
.probe = vf610_gpio_probe,
- .remove = vf610_gpio_remove,
};
builtin_platform_driver(vf610_gpio_driver);
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index 2eb76f35aa7e..641a05181017 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -229,7 +229,6 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
{
struct xgene_gpio_sb *priv;
int ret;
- struct resource *res;
void __iomem *regs;
struct irq_domain *parent_domain = NULL;
u32 val32;
@@ -238,8 +237,7 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&pdev->dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index 0a3607fd21af..54d3359444f3 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -290,22 +290,17 @@ MODULE_DEVICE_TABLE(of, xlp_gpio_of_ids);
static int xlp_gpio_probe(struct platform_device *pdev)
{
struct gpio_chip *gc;
- struct resource *iores;
struct xlp_gpio_priv *priv;
void __iomem *gpio_base;
int irq_base, irq, err;
int ngpio;
u32 soc_type;
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iores)
- return -ENODEV;
-
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- gpio_base = devm_ioremap_resource(&pdev->dev, iores);
+ gpio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpio_base))
return PTR_ERR(gpio_base);
diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c
index 5eacad9b2692..fb927559aefa 100644
--- a/drivers/gpio/gpio-zx.c
+++ b/drivers/gpio/gpio-zx.c
@@ -218,15 +218,13 @@ static int zx_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct zx_gpio *chip;
- struct resource *res;
int irq, id, ret;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->base = devm_ioremap_resource(dev, res);
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 00ff7b1fa8a1..9392edaeec3f 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -834,7 +834,6 @@ static int zynq_gpio_probe(struct platform_device *pdev)
int ret, bank_num;
struct zynq_gpio *gpio;
struct gpio_chip *chip;
- struct resource *res;
const struct of_device_id *match;
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
@@ -849,8 +848,7 @@ static int zynq_gpio_probe(struct platform_device *pdev)
gpio->p_data = match->data;
platform_set_drvdata(pdev, gpio);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gpio->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ gpio->base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gpio->base_addr))
return PTR_ERR(gpio->base_addr);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 30d0baf7ddae..c9fc9e232aaf 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -24,13 +24,13 @@
*
* @node: list-entry of the events list of the struct acpi_gpio_chip
* @handle: handle of ACPI method to execute when the IRQ triggers
- * @handler: irq_handler to pass to request_irq when requesting the IRQ
- * @pin: GPIO pin number on the gpio_chip
- * @irq: Linux IRQ number for the event, for request_ / free_irq
- * @irqflags: flags to pass to request_irq when requesting the IRQ
+ * @handler: handler function to pass to request_irq() when requesting the IRQ
+ * @pin: GPIO pin number on the struct gpio_chip
+ * @irq: Linux IRQ number for the event, for request_irq() / free_irq()
+ * @irqflags: flags to pass to request_irq() when requesting the IRQ
* @irq_is_wake: If the ACPI flags indicate the IRQ is a wakeup source
- * @irq_requested:True if request_irq has been done
- * @desc: gpio_desc for the GPIO pin for this event
+ * @irq_requested:True if request_irq() has been done
+ * @desc: struct gpio_desc for the GPIO pin for this event
*/
struct acpi_gpio_event {
struct list_head node;
@@ -65,10 +65,10 @@ struct acpi_gpio_chip {
};
/*
- * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
+ * For GPIO chips which call acpi_gpiochip_request_interrupts() before late_init
* (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
- * late_initcall_sync handler, so that other builtin drivers can register their
- * OpRegions before the event handlers can run. This list contains gpiochips
+ * late_initcall_sync() handler, so that other builtin drivers can register their
+ * OpRegions before the event handlers can run. This list contains GPIO chips
* for which the acpi_gpiochip_request_irqs() call has been deferred.
*/
static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
@@ -90,7 +90,7 @@ static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
*
* Return: GPIO descriptor to use with Linux generic GPIO API, or ERR_PTR
* error value. Specifically returns %-EPROBE_DEFER if the referenced GPIO
- * controller does not have gpiochip registered at the moment. This is to
+ * controller does not have GPIO chip registered at the moment. This is to
* support probe deferral.
*/
static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
@@ -287,9 +287,9 @@ fail_free_desc:
*
* ACPI5 platforms can use GPIO signaled ACPI events. These GPIO interrupts are
* handled by ACPI event methods which need to be called from the GPIO
- * chip's interrupt handler. acpi_gpiochip_request_interrupts finds out which
- * gpio pins have acpi event methods and assigns interrupt handlers that calls
- * the acpi event methods for those pins.
+ * chip's interrupt handler. acpi_gpiochip_request_interrupts() finds out which
+ * GPIO pins have ACPI event methods and assigns interrupt handlers that calls
+ * the ACPI event methods for those pins.
*/
void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
{
@@ -444,8 +444,6 @@ static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
static enum gpiod_flags
acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio)
{
- bool pull_up = agpio->pin_config == ACPI_PIN_CONFIG_PULLUP;
-
switch (agpio->io_restriction) {
case ACPI_IO_RESTRICT_INPUT:
return GPIOD_IN;
@@ -454,16 +452,26 @@ acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio)
* ACPI GPIO resources don't contain an initial value for the
* GPIO. Therefore we deduce that value from the pull field
* instead. If the pin is pulled up we assume default to be
- * high, otherwise low.
+ * high, if it is pulled down we assume default to be low,
+ * otherwise we leave pin untouched.
*/
- return pull_up ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ switch (agpio->pin_config) {
+ case ACPI_PIN_CONFIG_PULLUP:
+ return GPIOD_OUT_HIGH;
+ case ACPI_PIN_CONFIG_PULLDOWN:
+ return GPIOD_OUT_LOW;
+ default:
+ break;
+ }
default:
- /*
- * Assume that the BIOS has configured the direction and pull
- * accordingly.
- */
- return GPIOD_ASIS;
+ break;
}
+
+ /*
+ * Assume that the BIOS has configured the direction and pull
+ * accordingly.
+ */
+ return GPIOD_ASIS;
}
static int
@@ -517,6 +525,26 @@ acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, struct acpi_gpio_info *inf
return ret;
}
+int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
+ struct acpi_gpio_info *info)
+{
+ switch (info->pin_config) {
+ case ACPI_PIN_CONFIG_PULLUP:
+ *lookupflags |= GPIO_PULL_UP;
+ break;
+ case ACPI_PIN_CONFIG_PULLDOWN:
+ *lookupflags |= GPIO_PULL_DOWN;
+ break;
+ default:
+ break;
+ }
+
+ if (info->polarity == GPIO_ACTIVE_LOW)
+ *lookupflags |= GPIO_ACTIVE_LOW;
+
+ return 0;
+}
+
struct acpi_gpio_lookup {
struct acpi_gpio_info info;
int index;
@@ -550,6 +578,7 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
lookup->desc = acpi_get_gpiod(agpio->resource_source.string_ptr,
agpio->pin_table[pin_index]);
+ lookup->info.pin_config = agpio->pin_config;
lookup->info.gpioint = gpioint;
/*
@@ -653,7 +682,7 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
* that case @index is used to select the GPIO entry in the property value
* (in case of multiple).
*
- * If the GPIO cannot be translated or there is an error an ERR_PTR is
+ * If the GPIO cannot be translated or there is an error, an ERR_PTR is
* returned.
*
* Note: if the GPIO resource has multiple entries in the pin list, this
@@ -696,7 +725,7 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags *dflags,
- enum gpio_lookup_flags *lookupflags)
+ unsigned long *lookupflags)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
struct acpi_gpio_info info;
@@ -737,10 +766,8 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
return ERR_PTR(-ENOENT);
}
- if (info.polarity == GPIO_ACTIVE_LOW)
- *lookupflags |= GPIO_ACTIVE_LOW;
-
acpi_gpio_update_gpiod_flags(dflags, &info);
+ acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info);
return desc;
}
@@ -751,10 +778,13 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
* @index: index of GpioIo/GpioInt resource (starting from %0)
* @info: info pointer to fill in (optional)
*
- * If @fwnode is an ACPI device object, call %acpi_get_gpiod_by_index() for it.
- * Otherwise (ie. it is a data-only non-device object), use the property-based
+ * If @fwnode is an ACPI device object, call acpi_get_gpiod_by_index() for it.
+ * Otherwise (i.e. it is a data-only non-device object), use the property-based
* GPIO lookup to get to the GPIO resource with the relevant information and use
* that to obtain the GPIO descriptor to return.
+ *
+ * If the GPIO cannot be translated or there is an error an ERR_PTR is
+ * returned.
*/
struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
const char *propname, int index,
@@ -816,6 +846,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
return PTR_ERR(desc);
if (info.gpioint && idx++ == index) {
+ unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
char label[32];
int irq;
@@ -827,7 +858,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
return irq;
snprintf(label, sizeof(label), "GpioInt() %d", index);
- ret = gpiod_configure_flags(desc, label, 0, info.flags);
+ ret = gpiod_configure_flags(desc, label, lflags, info.flags);
if (ret < 0)
return ret;
@@ -992,16 +1023,19 @@ static void acpi_gpiochip_free_regions(struct acpi_gpio_chip *achip)
}
}
-static struct gpio_desc *acpi_gpiochip_parse_own_gpio(
- struct acpi_gpio_chip *achip, struct fwnode_handle *fwnode,
- const char **name, unsigned int *lflags, unsigned int *dflags)
+static struct gpio_desc *
+acpi_gpiochip_parse_own_gpio(struct acpi_gpio_chip *achip,
+ struct fwnode_handle *fwnode,
+ const char **name,
+ unsigned long *lflags,
+ enum gpiod_flags *dflags)
{
struct gpio_chip *chip = achip->chip;
struct gpio_desc *desc;
u32 gpios[2];
int ret;
- *lflags = 0;
+ *lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
*dflags = 0;
*name = NULL;
@@ -1037,7 +1071,8 @@ static void acpi_gpiochip_scan_gpios(struct acpi_gpio_chip *achip)
struct fwnode_handle *fwnode;
device_for_each_child_node(chip->parent, fwnode) {
- unsigned int lflags, dflags;
+ unsigned long lflags;
+ enum gpiod_flags dflags;
struct gpio_desc *desc;
const char *name;
int ret;
@@ -1158,11 +1193,13 @@ static int acpi_find_gpio_count(struct acpi_resource *ares, void *data)
}
/**
- * acpi_gpio_count - return the number of GPIOs associated with a
- * device / function or -ENOENT if no GPIO has been
- * assigned to the requested function.
- * @dev: GPIO consumer, can be NULL for system-global GPIOs
+ * acpi_gpio_count - count the GPIOs associated with a device / function
+ * @dev: GPIO consumer, can be %NULL for system-global GPIOs
* @con_id: function within the GPIO consumer
+ *
+ * Return:
+ * The number of GPIOs associated with a device / function or %-ENOENT,
+ * if no GPIO has been assigned to the requested function.
*/
int acpi_gpio_count(struct device *dev, const char *con_id)
{
diff --git a/drivers/gpio/gpiolib-devprop.c b/drivers/gpio/gpiolib-devprop.c
index dd517098ab95..53781b253986 100644
--- a/drivers/gpio/gpiolib-devprop.c
+++ b/drivers/gpio/gpiolib-devprop.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
+#include <linux/export.h>
#include "gpiolib.h"
@@ -56,3 +57,4 @@ void devprop_gpiochip_set_names(struct gpio_chip *chip,
kfree(names);
}
+EXPORT_SYMBOL_GPL(devprop_gpiochip_set_names);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 6a3ec575a404..aec7bd86ae7e 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -86,9 +86,9 @@ static void of_gpio_flags_quirks(struct device_node *np,
if (IS_ENABLED(CONFIG_REGULATOR) &&
(of_device_is_compatible(np, "regulator-fixed") ||
of_device_is_compatible(np, "reg-fixed-voltage") ||
- (of_device_is_compatible(np, "regulator-gpio") &&
- !(strcmp(propname, "enable-gpio") &&
- strcmp(propname, "enable-gpios"))))) {
+ (!(strcmp(propname, "enable-gpio") &&
+ strcmp(propname, "enable-gpios")) &&
+ of_device_is_compatible(np, "regulator-gpio")))) {
/*
* The regulator GPIO handles are specified such that the
* presence or absence of "enable-active-high" solely controls
@@ -119,9 +119,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
* property named "cs-gpios" we need to inspect the child node
* to determine if the flags should have inverted semantics.
*/
- if (IS_ENABLED(CONFIG_SPI_MASTER) &&
- of_property_read_bool(np, "cs-gpios") &&
- !strcmp(propname, "cs-gpios")) {
+ if (IS_ENABLED(CONFIG_SPI_MASTER) && !strcmp(propname, "cs-gpios") &&
+ of_property_read_bool(np, "cs-gpios")) {
struct device_node *child;
u32 cs;
int ret;
@@ -288,8 +287,7 @@ static struct gpio_desc *of_find_regulator_gpio(struct device *dev, const char *
}
struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
- unsigned int idx,
- enum gpio_lookup_flags *flags)
+ unsigned int idx, unsigned long *flags)
{
char prop_name[32]; /* 32 is max size of property name */
enum of_gpio_flags of_flags;
@@ -362,8 +360,8 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
* @chip: GPIO chip whose hog is parsed
* @idx: Index of the GPIO to parse
* @name: GPIO line name
- * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
- * of_parse_own_gpio()
+ * @lflags: bitmask of gpio_lookup_flags GPIO_* values - returned from
+ * of_find_gpio() or of_parse_own_gpio()
* @dflags: gpiod_flags - optional GPIO initialization flags
*
* Returns GPIO descriptor to use with Linux GPIO API, or one of the errno
@@ -372,7 +370,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
struct gpio_chip *chip,
unsigned int idx, const char **name,
- enum gpio_lookup_flags *lflags,
+ unsigned long *lflags,
enum gpiod_flags *dflags)
{
struct device_node *chip_np;
@@ -388,7 +386,7 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
return ERR_PTR(-EINVAL);
xlate_flags = 0;
- *lflags = 0;
+ *lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
*dflags = 0;
ret = of_property_read_u32(chip_np, "#gpio-cells", &tmp);
@@ -445,7 +443,7 @@ static int of_gpiochip_scan_gpios(struct gpio_chip *chip)
struct gpio_desc *desc = NULL;
struct device_node *np;
const char *name;
- enum gpio_lookup_flags lflags;
+ unsigned long lflags;
enum gpiod_flags dflags;
unsigned int i;
int ret;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index bca3e7740ef6..e013d417a936 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2519,6 +2519,7 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
const char *label,
enum gpiod_flags flags)
{
+ unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
struct gpio_desc *desc = gpiochip_get_desc(chip, hwnum);
int err;
@@ -2531,7 +2532,7 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
if (err < 0)
return ERR_PTR(err);
- err = gpiod_configure_flags(desc, label, 0, flags);
+ err = gpiod_configure_flags(desc, label, lflags, flags);
if (err) {
chip_err(chip, "setup of own GPIO %s failed\n", label);
gpiod_free_commit(desc);
@@ -2569,8 +2570,20 @@ EXPORT_SYMBOL_GPL(gpiochip_free_own_desc);
static int gpio_set_config(struct gpio_chip *gc, unsigned offset,
enum pin_config_param mode)
{
- unsigned long config = { PIN_CONF_PACKED(mode, 0) };
+ unsigned long config;
+ unsigned arg;
+ switch (mode) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ arg = 1;
+ break;
+
+ default:
+ arg = 0;
+ }
+
+ config = PIN_CONF_PACKED(mode, arg);
return gc->set_config ? gc->set_config(gc, offset, config) : -ENOTSUPP;
}
@@ -3915,8 +3928,7 @@ found:
}
static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
- unsigned int idx,
- enum gpio_lookup_flags *flags)
+ unsigned int idx, unsigned long *flags)
{
struct gpio_desc *desc = ERR_PTR(-ENOENT);
struct gpiod_lookup_table *table;
@@ -4072,8 +4084,8 @@ EXPORT_SYMBOL_GPL(gpiod_get_optional);
* gpiod_configure_flags - helper function to configure a given GPIO
* @desc: gpio whose value will be assigned
* @con_id: function within the GPIO consumer
- * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
- * of_get_gpio_hog()
+ * @lflags: bitmask of gpio_lookup_flags GPIO_* values - returned from
+ * of_find_gpio() or of_get_gpio_hog()
* @dflags: gpiod_flags - optional GPIO initialization flags
*
* Return 0 on success, -ENOENT if no GPIO has been assigned to the
@@ -4155,9 +4167,9 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
unsigned int idx,
enum gpiod_flags flags)
{
+ unsigned long lookupflags = GPIO_LOOKUP_FLAGS_DEFAULT;
struct gpio_desc *desc = NULL;
int status;
- enum gpio_lookup_flags lookupflags = 0;
/* Maybe we have a device name, maybe not */
const char *devname = dev ? dev_name(dev) : "?";
@@ -4242,8 +4254,8 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
enum gpiod_flags dflags,
const char *label)
{
+ unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
struct gpio_desc *desc;
- unsigned long lflags = 0;
enum of_gpio_flags flags;
bool active_low = false;
bool single_ended = false;
@@ -4321,8 +4333,8 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
enum gpiod_flags dflags,
const char *label)
{
+ unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
struct gpio_desc *desc = ERR_PTR(-ENODEV);
- unsigned long lflags = 0;
int ret;
if (!fwnode)
@@ -4342,9 +4354,7 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
return desc;
acpi_gpio_update_gpiod_flags(&dflags, &info);
-
- if (info.polarity == GPIO_ACTIVE_LOW)
- lflags |= GPIO_ACTIVE_LOW;
+ acpi_gpio_update_gpiod_lookup_flags(&lflags, &info);
}
/* Currently only ACPI takes this path */
@@ -4395,8 +4405,8 @@ EXPORT_SYMBOL_GPL(gpiod_get_index_optional);
* gpiod_hog - Hog the specified GPIO desc given the provided flags
* @desc: gpio whose value will be assigned
* @name: gpio line name
- * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
- * of_get_gpio_hog()
+ * @lflags: bitmask of gpio_lookup_flags GPIO_* values - returned from
+ * of_find_gpio() or of_get_gpio_hog()
* @dflags: gpiod_flags - optional GPIO initialization flags
*/
int gpiod_hog(struct gpio_desc *desc, const char *name,
@@ -4449,8 +4459,6 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
/**
* gpiochip_free_hogs - Scan gpio-controller chip and release GPIO hog
* @chip: gpio chip to act on
- *
- * This is only used by of_gpiochip_remove to free hogged gpios
*/
static void gpiochip_free_hogs(struct gpio_chip *chip)
{
@@ -4620,7 +4628,8 @@ EXPORT_SYMBOL_GPL(gpiod_get_array_optional);
*/
void gpiod_put(struct gpio_desc *desc)
{
- gpiod_free(desc);
+ if (desc)
+ gpiod_free(desc);
}
EXPORT_SYMBOL_GPL(gpiod_put);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 078ab17b96bf..7a65dad43932 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -17,7 +17,6 @@
#include <linux/cdev.h>
enum of_gpio_flags;
-enum gpio_lookup_flags;
struct acpi_device;
/**
@@ -75,6 +74,7 @@ struct gpio_device {
* @adev: reference to ACPI device which consumes GPIO resource
* @flags: GPIO initialization flags
* @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
+ * @pin_config: pin bias as provided by ACPI
* @polarity: interrupt polarity as provided by ACPI
* @triggering: triggering type as provided by ACPI
* @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping
@@ -83,6 +83,7 @@ struct acpi_gpio_info {
struct acpi_device *adev;
enum gpiod_flags flags;
bool gpioint;
+ int pin_config;
int polarity;
int triggering;
unsigned int quirks;
@@ -95,7 +96,7 @@ static __maybe_unused const char * const gpio_suffixes[] = { "gpios", "gpio" };
struct gpio_desc *of_find_gpio(struct device *dev,
const char *con_id,
unsigned int idx,
- enum gpio_lookup_flags *flags);
+ unsigned long *lookupflags);
struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
const char *list_name, int index, enum of_gpio_flags *flags);
int of_gpiochip_add(struct gpio_chip *gc);
@@ -104,7 +105,7 @@ void of_gpiochip_remove(struct gpio_chip *gc);
static inline struct gpio_desc *of_find_gpio(struct device *dev,
const char *con_id,
unsigned int idx,
- enum gpio_lookup_flags *flags)
+ unsigned long *lookupflags)
{
return ERR_PTR(-ENOENT);
}
@@ -126,12 +127,14 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip);
int acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags,
struct acpi_gpio_info *info);
+int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
+ struct acpi_gpio_info *info);
struct gpio_desc *acpi_find_gpio(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags *dflags,
- enum gpio_lookup_flags *lookupflags);
+ unsigned long *lookupflags);
struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
const char *propname, int index,
struct acpi_gpio_info *info);
@@ -154,11 +157,17 @@ acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, struct acpi_gpio_info *inf
{
return 0;
}
+static inline int
+acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
+ struct acpi_gpio_info *info)
+{
+ return 0;
+}
static inline struct gpio_desc *
acpi_find_gpio(struct device *dev, const char *con_id,
unsigned int idx, enum gpiod_flags *dflags,
- enum gpio_lookup_flags *lookupflags)
+ unsigned long *lookupflags)
{
return ERR_PTR(-ENOENT);
}
@@ -243,9 +252,6 @@ static inline int gpio_chip_hwgpio(const struct gpio_desc *desc)
return desc - &desc->gdev->descs[0];
}
-void devprop_gpiochip_set_names(struct gpio_chip *chip,
- const struct fwnode_handle *fwnode);
-
/* With descriptor prefix */
#define gpiod_emerg(desc, fmt, ...) \
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index bd943a71756c..e360a4a131e1 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -173,6 +173,12 @@ config DRM_KMS_CMA_HELPER
help
Choose this if you need the KMS CMA helper functions
+config DRM_GEM_SHMEM_HELPER
+ bool
+ depends on DRM
+ help
+ Choose this if you need the GEM shmem helper functions
+
config DRM_VM
bool
depends on DRM && MMU
@@ -194,7 +200,6 @@ config DRM_RADEON
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
- select BACKLIGHT_LCD_SUPPORT
select INTERVAL_TREE
help
Choose this option if you have an ATI Radeon graphics card. There
@@ -215,7 +220,6 @@ config DRM_AMDGPU
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
- select BACKLIGHT_LCD_SUPPORT
select INTERVAL_TREE
select CHASH
help
@@ -225,8 +229,6 @@ config DRM_AMDGPU
source "drivers/gpu/drm/amd/amdgpu/Kconfig"
-source "drivers/gpu/drm/amd/lib/Kconfig"
-
source "drivers/gpu/drm/nouveau/Kconfig"
source "drivers/gpu/drm/i915/Kconfig"
@@ -251,6 +253,9 @@ config DRM_VKMS
If M is selected the module will be called vkms.
+config DRM_ATI_PCIGART
+ bool
+
source "drivers/gpu/drm/exynos/Kconfig"
source "drivers/gpu/drm/rockchip/Kconfig"
@@ -329,12 +334,21 @@ source "drivers/gpu/drm/tve200/Kconfig"
source "drivers/gpu/drm/xen/Kconfig"
+source "drivers/gpu/drm/vboxvideo/Kconfig"
+
+source "drivers/gpu/drm/lima/Kconfig"
+
+source "drivers/gpu/drm/panfrost/Kconfig"
+
+source "drivers/gpu/drm/aspeed/Kconfig"
+
# Keep legacy drivers last
menuconfig DRM_LEGACY
bool "Enable legacy drivers (DANGEROUS)"
depends on DRM && MMU
select DRM_VM
+ select DRM_ATI_PCIGART if PCI
help
Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous
APIs to user-space, which can be used to circumvent access
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 1ac55c65eac0..72f5036d9bfa 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -3,11 +3,9 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-drm-y := drm_auth.o drm_bufs.o drm_cache.o \
- drm_context.o drm_dma.o \
+drm-y := drm_auth.o drm_cache.o \
drm_file.o drm_gem.o drm_ioctl.o drm_irq.o \
- drm_lock.o drm_memory.o drm_drv.o \
- drm_scatter.o drm_pci.o \
+ drm_memory.o drm_drv.o drm_pci.o \
drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
drm_encoder_slave.o \
@@ -21,11 +19,13 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \
drm_atomic_uapi.o
+drm-$(CONFIG_DRM_LEGACY) += drm_legacy_misc.o drm_bufs.o drm_context.o drm_dma.o drm_scatter.o drm_lock.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_DRM_VM) += drm_vm.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
-drm-$(CONFIG_PCI) += ati_pcigart.o
+drm-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_gem_shmem_helper.o
+drm-$(CONFIG_DRM_ATI_PCIGART) += ati_pcigart.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o
drm-$(CONFIG_AGP) += drm_agpsupport.o
@@ -37,7 +37,8 @@ drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_dsc.o drm_probe_helper
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
drm_simple_kms_helper.o drm_modeset_helper.o \
drm_scdc_helper.o drm_gem_framebuffer_helper.o \
- drm_atomic_state_helper.o drm_damage_helper.o
+ drm_atomic_state_helper.o drm_damage_helper.o \
+ drm_format_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
@@ -56,7 +57,6 @@ obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_SCHED) += scheduler/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/
-obj-y += amd/lib/
obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
@@ -109,3 +109,7 @@ obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
obj-$(CONFIG_DRM_PL111) += pl111/
obj-$(CONFIG_DRM_TVE200) += tve200/
obj-$(CONFIG_DRM_XEN) += xen/
+obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
+obj-$(CONFIG_DRM_LIMA) += lima/
+obj-$(CONFIG_DRM_PANFROST) += panfrost/
+obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 466da5954a68..fdd0ca4b0f0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -23,7 +23,7 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-FULL_AMD_PATH=$(src)/..
+FULL_AMD_PATH=$(srctree)/$(src)/..
DISPLAY_FOLDER_NAME=display
FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
@@ -53,7 +53,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
- amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o
+ amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
+ amdgpu_vm_sdma.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 8d0d7f3dd5fb..14398f55f602 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -83,6 +83,7 @@
#include "amdgpu_gem.h"
#include "amdgpu_doorbell.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_smu.h"
#define MAX_GPU_INSTANCE 16
@@ -156,6 +157,8 @@ extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size;
extern uint amdgpu_dc_feature_mask;
extern struct amdgpu_mgpu_info mgpu_info;
+extern int amdgpu_ras_enable;
+extern uint amdgpu_ras_mask;
#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
@@ -433,6 +436,12 @@ struct amdgpu_cs_chunk {
void *kdata;
};
+struct amdgpu_cs_post_dep {
+ struct drm_syncobj *syncobj;
+ struct dma_fence_chain *chain;
+ u64 point;
+};
+
struct amdgpu_cs_parser {
struct amdgpu_device *adev;
struct drm_file *filp;
@@ -462,8 +471,8 @@ struct amdgpu_cs_parser {
/* user fence */
struct amdgpu_bo_list_entry uf_entry;
- unsigned num_post_dep_syncobjs;
- struct drm_syncobj **post_dep_syncobjs;
+ unsigned num_post_deps;
+ struct amdgpu_cs_post_dep *post_deps;
};
static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
@@ -702,7 +711,6 @@ enum amd_hw_ip_block_type {
struct amd_powerplay {
void *pp_handle;
const struct amd_pm_funcs *pp_funcs;
- uint32_t pp_feature;
};
#define AMDGPU_RESET_MAGIC_NUM 64
@@ -825,6 +833,7 @@ struct amdgpu_device {
/* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
struct work_struct hotplug_work;
struct amdgpu_irq_src crtc_irq;
+ struct amdgpu_irq_src vupdate_irq;
struct amdgpu_irq_src pageflip_irq;
struct amdgpu_irq_src hpd_irq;
@@ -842,6 +851,9 @@ struct amdgpu_device {
struct amd_powerplay powerplay;
bool pp_force_state_enabled;
+ /* smu */
+ struct smu_context smu;
+
/* dpm */
struct amdgpu_pm pm;
u32 cg_flags;
@@ -922,6 +934,8 @@ struct amdgpu_device {
int asic_reset_res;
struct work_struct xgmi_reset_work;
+
+ bool in_baco_reset;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 4376b17ca594..56f8ca2a3bb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -464,8 +464,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
}
}
if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
- if ((adev->flags & AMD_IS_PX) &&
- amdgpu_atpx_dgpu_req_power_for_displays()) {
+ if (adev->flags & AMD_IS_PX) {
pm_runtime_get_sync(adev->ddev->dev);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(adev->ddev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index fe1d7368c1e6..aeead072fa79 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -335,6 +335,43 @@ void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
amdgpu_bo_unref(&(bo));
}
+uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
+ enum kgd_engine_type type)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ switch (type) {
+ case KGD_ENGINE_PFP:
+ return adev->gfx.pfp_fw_version;
+
+ case KGD_ENGINE_ME:
+ return adev->gfx.me_fw_version;
+
+ case KGD_ENGINE_CE:
+ return adev->gfx.ce_fw_version;
+
+ case KGD_ENGINE_MEC1:
+ return adev->gfx.mec_fw_version;
+
+ case KGD_ENGINE_MEC2:
+ return adev->gfx.mec2_fw_version;
+
+ case KGD_ENGINE_RLC:
+ return adev->gfx.rlc_fw_version;
+
+ case KGD_ENGINE_SDMA1:
+ return adev->sdma.instance[0].fw_version;
+
+ case KGD_ENGINE_SDMA2:
+ return adev->sdma.instance[1].fw_version;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
struct kfd_local_mem_info *mem_info)
{
@@ -640,4 +677,8 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd)
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
}
+
+void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
+{
+}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 0b31a1859023..4e37fa7e85b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -81,6 +81,18 @@ struct amdgpu_kfd_dev {
uint64_t vram_used;
};
+enum kgd_engine_type {
+ KGD_ENGINE_PFP = 1,
+ KGD_ENGINE_ME,
+ KGD_ENGINE_CE,
+ KGD_ENGINE_MEC1,
+ KGD_ENGINE_MEC2,
+ KGD_ENGINE_RLC,
+ KGD_ENGINE_SDMA1,
+ KGD_ENGINE_SDMA2,
+ KGD_ENGINE_MAX
+};
+
struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
struct mm_struct *mm);
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
@@ -142,6 +154,8 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr, bool mqd_gfx9);
void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
+uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
+ enum kgd_engine_type type);
void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
struct kfd_local_mem_info *mem_info);
uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd);
@@ -230,5 +244,6 @@ int kgd2kfd_quiesce_mm(struct mm_struct *mm);
int kgd2kfd_resume_mm(struct mm_struct *mm);
int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
struct dma_fence *fence);
+void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index ff7fac7df34b..fa09e11a600c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -22,14 +22,12 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
-#include <linux/firmware.h>
#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "cikd.h"
#include "cik_sdma.h"
-#include "amdgpu_ucode.h"
#include "gfx_v7_0.h"
#include "gca/gfx_7_2_d.h"
#include "gca/gfx_7_2_enum.h"
@@ -139,7 +137,6 @@ static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
uint8_t vmid);
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
static void set_scratch_backing_va(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
@@ -191,7 +188,6 @@ static const struct kfd2kgd_calls kfd2kgd = {
.address_watch_get_offset = kgd_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
.get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
- .get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
@@ -792,63 +788,6 @@ static void set_scratch_backing_va(struct kgd_dev *kgd,
unlock_srbm(kgd);
}
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- const union amdgpu_firmware_header *hdr;
-
- switch (type) {
- case KGD_ENGINE_PFP:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.pfp_fw->data;
- break;
-
- case KGD_ENGINE_ME:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.me_fw->data;
- break;
-
- case KGD_ENGINE_CE:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.ce_fw->data;
- break;
-
- case KGD_ENGINE_MEC1:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec_fw->data;
- break;
-
- case KGD_ENGINE_MEC2:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec2_fw->data;
- break;
-
- case KGD_ENGINE_RLC:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.rlc_fw->data;
- break;
-
- case KGD_ENGINE_SDMA1:
- hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[0].fw->data;
- break;
-
- case KGD_ENGINE_SDMA2:
- hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[1].fw->data;
- break;
-
- default:
- return 0;
- }
-
- if (hdr == NULL)
- return 0;
-
- /* Only 12 bit in use*/
- return hdr->common.ucode_version;
-}
-
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 56ea929f524b..fec3a6aa1de6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -23,12 +23,10 @@
#include <linux/module.h>
#include <linux/fdtable.h>
#include <linux/uaccess.h>
-#include <linux/firmware.h>
#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
-#include "amdgpu_ucode.h"
#include "gfx_v8_0.h"
#include "gca/gfx_8_0_sh_mask.h"
#include "gca/gfx_8_0_d.h"
@@ -95,7 +93,6 @@ static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
uint8_t vmid);
static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
uint8_t vmid);
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
static void set_scratch_backing_va(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
@@ -148,7 +145,6 @@ static const struct kfd2kgd_calls kfd2kgd = {
get_atc_vmid_pasid_mapping_pasid,
.get_atc_vmid_pasid_mapping_valid =
get_atc_vmid_pasid_mapping_valid,
- .get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
@@ -751,63 +747,6 @@ static void set_scratch_backing_va(struct kgd_dev *kgd,
unlock_srbm(kgd);
}
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- const union amdgpu_firmware_header *hdr;
-
- switch (type) {
- case KGD_ENGINE_PFP:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.pfp_fw->data;
- break;
-
- case KGD_ENGINE_ME:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.me_fw->data;
- break;
-
- case KGD_ENGINE_CE:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.ce_fw->data;
- break;
-
- case KGD_ENGINE_MEC1:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec_fw->data;
- break;
-
- case KGD_ENGINE_MEC2:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec2_fw->data;
- break;
-
- case KGD_ENGINE_RLC:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.rlc_fw->data;
- break;
-
- case KGD_ENGINE_SDMA1:
- hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[0].fw->data;
- break;
-
- case KGD_ENGINE_SDMA2:
- hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[1].fw->data;
- break;
-
- default:
- return 0;
- }
-
- if (hdr == NULL)
- return 0;
-
- /* Only 12 bit in use*/
- return hdr->common.ucode_version;
-}
-
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 5c51d4910650..ef3d93b995b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -25,12 +25,10 @@
#include <linux/module.h>
#include <linux/fdtable.h>
#include <linux/uaccess.h>
-#include <linux/firmware.h>
#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
-#include "amdgpu_ucode.h"
#include "soc15_hw_ip.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
@@ -111,7 +109,6 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
uint8_t vmid);
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base);
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
static void set_scratch_backing_va(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
@@ -158,7 +155,6 @@ static const struct kfd2kgd_calls kfd2kgd = {
get_atc_vmid_pasid_mapping_pasid,
.get_atc_vmid_pasid_mapping_valid =
get_atc_vmid_pasid_mapping_valid,
- .get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = amdgpu_amdkfd_get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
@@ -874,56 +870,6 @@ static void set_scratch_backing_va(struct kgd_dev *kgd,
*/
}
-/* FIXME: Does this need to be ASIC-specific code? */
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- const union amdgpu_firmware_header *hdr;
-
- switch (type) {
- case KGD_ENGINE_PFP:
- hdr = (const union amdgpu_firmware_header *)adev->gfx.pfp_fw->data;
- break;
-
- case KGD_ENGINE_ME:
- hdr = (const union amdgpu_firmware_header *)adev->gfx.me_fw->data;
- break;
-
- case KGD_ENGINE_CE:
- hdr = (const union amdgpu_firmware_header *)adev->gfx.ce_fw->data;
- break;
-
- case KGD_ENGINE_MEC1:
- hdr = (const union amdgpu_firmware_header *)adev->gfx.mec_fw->data;
- break;
-
- case KGD_ENGINE_MEC2:
- hdr = (const union amdgpu_firmware_header *)adev->gfx.mec2_fw->data;
- break;
-
- case KGD_ENGINE_RLC:
- hdr = (const union amdgpu_firmware_header *)adev->gfx.rlc_fw->data;
- break;
-
- case KGD_ENGINE_SDMA1:
- hdr = (const union amdgpu_firmware_header *)adev->sdma.instance[0].fw->data;
- break;
-
- case KGD_ENGINE_SDMA2:
- hdr = (const union amdgpu_firmware_header *)adev->sdma.instance[1].fw->data;
- break;
-
- default:
- return 0;
- }
-
- if (hdr == NULL)
- return 0;
-
- /* Only 12 bit in use*/
- return hdr->common.ucode_version;
-}
-
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 1921dec3df7a..a6e5184d436c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -410,15 +410,7 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
if (p_bo_va_entry)
*p_bo_va_entry = bo_va_entry;
- /* Allocate new page tables if needed and validate
- * them.
- */
- ret = amdgpu_vm_alloc_pts(adev, vm, va, amdgpu_bo_size(bo));
- if (ret) {
- pr_err("Failed to allocate pts, err=%d\n", ret);
- goto err_alloc_pts;
- }
-
+ /* Allocate validate page tables if needed */
ret = vm_validate_pt_pd_bos(vm);
if (ret) {
pr_err("validate_pt_pd_bos() failed\n");
@@ -741,13 +733,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
struct amdgpu_sync *sync)
{
int ret;
- struct amdgpu_vm *vm;
- struct amdgpu_bo_va *bo_va;
- struct amdgpu_bo *bo;
-
- bo_va = entry->bo_va;
- vm = bo_va->base.vm;
- bo = bo_va->base.bo;
+ struct amdgpu_bo_va *bo_va = entry->bo_va;
/* Update the page tables */
ret = amdgpu_vm_bo_update(adev, bo_va, false);
@@ -906,7 +892,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
pr_err("validate_pt_pd_bos() failed\n");
goto validate_pd_fail;
}
- amdgpu_bo_sync_wait(vm->root.base.bo, AMDGPU_FENCE_OWNER_KFD, false);
+ ret = amdgpu_bo_sync_wait(vm->root.base.bo,
+ AMDGPU_FENCE_OWNER_KFD, false);
if (ret)
goto wait_pd_fail;
amdgpu_bo_fence(vm->root.base.bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index b61e1dc61b4c..f96d75c6e099 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -28,8 +28,6 @@
#include "atom.h"
#include "atombios.h"
-#define get_index_into_master_table(master_table, table_name) (offsetof(struct master_table, table_name) / sizeof(uint16_t))
-
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
{
int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
@@ -238,10 +236,71 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
return 0;
}
+/*
+ * Return true if vbios enabled ecc by default, if umc info table is available
+ * or false if ecc is not enabled or umc info table is not available
+ */
+bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index;
+ u16 data_offset, size;
+ union umc_info *umc_info;
+ u8 frev, crev;
+ bool ecc_default_enabled = false;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ umc_info);
+
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context,
+ index, &size, &frev, &crev, &data_offset)) {
+ /* support umc_info 3.1+ */
+ if ((frev == 3 && crev >= 1) || (frev > 3)) {
+ umc_info = (union umc_info *)
+ (mode_info->atom_context->bios + data_offset);
+ ecc_default_enabled =
+ (le32_to_cpu(umc_info->v31.umc_config) &
+ UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
+ }
+ }
+
+ return ecc_default_enabled;
+}
+
union firmware_info {
struct atom_firmware_info_v3_1 v31;
};
+/*
+ * Return true if vbios supports sram ecc or false if not
+ */
+bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index;
+ u16 data_offset, size;
+ union firmware_info *firmware_info;
+ u8 frev, crev;
+ bool sram_ecc_supported = false;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+
+ if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
+ index, &size, &frev, &crev, &data_offset)) {
+ /* support firmware_info 3.1 + */
+ if ((frev == 3 && crev >=1) || (frev > 3)) {
+ firmware_info = (union firmware_info *)
+ (mode_info->atom_context->bios + data_offset);
+ sram_ecc_supported =
+ (le32_to_cpu(firmware_info->v31.firmware_capability) &
+ ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false;
+ }
+ }
+
+ return sram_ecc_supported;
+}
+
union smu_info {
struct atom_smu_info_v3_1 v31;
};
@@ -346,11 +405,11 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
(mode_info->atom_context->bios + data_offset);
switch (crev) {
case 4:
- adev->gfx.config.max_shader_engines = gfx_info->v24.gc_num_se;
- adev->gfx.config.max_cu_per_sh = gfx_info->v24.gc_num_cu_per_sh;
- adev->gfx.config.max_sh_per_se = gfx_info->v24.gc_num_sh_per_se;
- adev->gfx.config.max_backends_per_se = gfx_info->v24.gc_num_rb_per_se;
- adev->gfx.config.max_texture_channel_caches = gfx_info->v24.gc_num_tccs;
+ adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines;
+ adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh;
+ adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se;
+ adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se;
+ adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches;
adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index 20f158fd3b76..5ec6f92f353c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -24,6 +24,8 @@
#ifndef __AMDGPU_ATOMFIRMWARE_H__
#define __AMDGPU_ATOMFIRMWARE_H__
+#define get_index_into_master_table(master_table, table_name) (offsetof(struct master_table, table_name) / sizeof(uint16_t))
+
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev);
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
@@ -31,5 +33,7 @@ int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
+bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
+bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 52a5e4fdc95b..2f6239b6be6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -215,6 +215,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
+ case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
+ case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
break;
default:
@@ -804,9 +806,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
- for (i = 0; i < parser->num_post_dep_syncobjs; i++)
- drm_syncobj_put(parser->post_dep_syncobjs[i]);
- kfree(parser->post_dep_syncobjs);
+ for (i = 0; i < parser->num_post_deps; i++) {
+ drm_syncobj_put(parser->post_deps[i].syncobj);
+ kfree(parser->post_deps[i].chain);
+ }
+ kfree(parser->post_deps);
dma_fence_put(parser->fence);
@@ -1117,13 +1121,18 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
}
static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
- uint32_t handle)
+ uint32_t handle, u64 point,
+ u64 flags)
{
- int r;
struct dma_fence *fence;
- r = drm_syncobj_find_fence(p->filp, handle, 0, 0, &fence);
- if (r)
+ int r;
+
+ r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
+ if (r) {
+ DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
+ handle, point, r);
return r;
+ }
r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
dma_fence_put(fence);
@@ -1134,46 +1143,118 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
{
+ struct drm_amdgpu_cs_chunk_sem *deps;
unsigned num_deps;
int i, r;
- struct drm_amdgpu_cs_chunk_sem *deps;
deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_sem);
+ for (i = 0; i < num_deps; ++i) {
+ r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
+ 0, 0);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
+ unsigned num_deps;
+ int i, r;
+
+ syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_syncobj);
for (i = 0; i < num_deps; ++i) {
- r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle);
+ r = amdgpu_syncobj_lookup_and_add_to_sync(p,
+ syncobj_deps[i].handle,
+ syncobj_deps[i].point,
+ syncobj_deps[i].flags);
if (r)
return r;
}
+
return 0;
}
static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
{
+ struct drm_amdgpu_cs_chunk_sem *deps;
unsigned num_deps;
int i;
- struct drm_amdgpu_cs_chunk_sem *deps;
+
deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_sem);
- p->post_dep_syncobjs = kmalloc_array(num_deps,
- sizeof(struct drm_syncobj *),
- GFP_KERNEL);
- p->num_post_dep_syncobjs = 0;
+ p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
+ GFP_KERNEL);
+ p->num_post_deps = 0;
- if (!p->post_dep_syncobjs)
+ if (!p->post_deps)
return -ENOMEM;
+
for (i = 0; i < num_deps; ++i) {
- p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle);
- if (!p->post_dep_syncobjs[i])
+ p->post_deps[i].syncobj =
+ drm_syncobj_find(p->filp, deps[i].handle);
+ if (!p->post_deps[i].syncobj)
return -EINVAL;
- p->num_post_dep_syncobjs++;
+ p->post_deps[i].chain = NULL;
+ p->post_deps[i].point = 0;
+ p->num_post_deps++;
}
+
+ return 0;
+}
+
+
+static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk
+ *chunk)
+{
+ struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
+ unsigned num_deps;
+ int i;
+
+ syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_syncobj);
+
+ p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
+ GFP_KERNEL);
+ p->num_post_deps = 0;
+
+ if (!p->post_deps)
+ return -ENOMEM;
+
+ for (i = 0; i < num_deps; ++i) {
+ struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
+
+ dep->chain = NULL;
+ if (syncobj_deps[i].point) {
+ dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL);
+ if (!dep->chain)
+ return -ENOMEM;
+ }
+
+ dep->syncobj = drm_syncobj_find(p->filp,
+ syncobj_deps[i].handle);
+ if (!dep->syncobj) {
+ kfree(dep->chain);
+ return -EINVAL;
+ }
+ dep->point = syncobj_deps[i].point;
+ p->num_post_deps++;
+ }
+
return 0;
}
@@ -1187,19 +1268,33 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
chunk = &p->chunks[i];
- if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES ||
- chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
+ switch (chunk->chunk_id) {
+ case AMDGPU_CHUNK_ID_DEPENDENCIES:
+ case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
r = amdgpu_cs_process_fence_dep(p, chunk);
if (r)
return r;
- } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) {
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
if (r)
return r;
- } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) {
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
if (r)
return r;
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
+ r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
+ r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
+ if (r)
+ return r;
+ break;
}
}
@@ -1210,8 +1305,17 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
{
int i;
- for (i = 0; i < p->num_post_dep_syncobjs; ++i)
- drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence);
+ for (i = 0; i < p->num_post_deps; ++i) {
+ if (p->post_deps[i].chain && p->post_deps[i].point) {
+ drm_syncobj_add_point(p->post_deps[i].syncobj,
+ p->post_deps[i].chain,
+ p->fence, p->post_deps[i].point);
+ p->post_deps[i].chain = NULL;
+ } else {
+ drm_syncobj_replace_fence(p->post_deps[i].syncobj,
+ p->fence);
+ }
+ }
}
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index 7e22be7ca68a..54dd02a898b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -92,15 +92,6 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return -ENOMEM;
}
- r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
- size);
- if (r) {
- DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
- amdgpu_vm_bo_rmv(adev, *bo_va);
- ttm_eu_backoff_reservation(&ticket, &list);
- return r;
- }
-
r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
AMDGPU_PTE_EXECUTABLE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 7b526593eb77..a28a3d722ba2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -26,6 +26,7 @@
#include <drm/drm_auth.h>
#include "amdgpu.h"
#include "amdgpu_sched.h"
+#include "amdgpu_ras.h"
#define to_amdgpu_ctx_entity(e) \
container_of((e), struct amdgpu_ctx_entity, entity)
@@ -344,6 +345,7 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
{
struct amdgpu_ctx *ctx;
struct amdgpu_ctx_mgr *mgr;
+ uint32_t ras_counter;
if (!fpriv)
return -EINVAL;
@@ -368,6 +370,21 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
if (atomic_read(&ctx->guilty))
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
+ /*query ue count*/
+ ras_counter = amdgpu_ras_query_error_count(adev, false);
+ /*ras counter is monotonic increasing*/
+ if (ras_counter != ctx->ras_counter_ue) {
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
+ ctx->ras_counter_ue = ras_counter;
+ }
+
+ /*query ce count*/
+ ras_counter = amdgpu_ras_query_error_count(adev, true);
+ if (ras_counter != ctx->ras_counter_ce) {
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
+ ctx->ras_counter_ce = ras_counter;
+ }
+
mutex_unlock(&mgr->lock);
return 0;
}
@@ -541,32 +558,26 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
idr_init(&mgr->ctx_handles);
}
-void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
+long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
{
unsigned num_entities = amdgput_ctx_total_num_entities();
struct amdgpu_ctx *ctx;
struct idr *idp;
uint32_t id, i;
- long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
idp = &mgr->ctx_handles;
mutex_lock(&mgr->lock);
idr_for_each_entry(idp, ctx, id) {
-
- if (!ctx->adev) {
- mutex_unlock(&mgr->lock);
- return;
- }
-
for (i = 0; i < num_entities; i++) {
struct drm_sched_entity *entity;
entity = &ctx->entities[0][i].entity;
- max_wait = drm_sched_entity_flush(entity, max_wait);
+ timeout = drm_sched_entity_flush(entity, timeout);
}
}
mutex_unlock(&mgr->lock);
+ return timeout;
}
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
@@ -579,10 +590,6 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
idp = &mgr->ctx_handles;
idr_for_each_entry(idp, ctx, id) {
-
- if (!ctx->adev)
- return;
-
if (kref_read(&ctx->refcount) != 1) {
DRM_ERROR("ctx %p is still alive\n", ctx);
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index b3b012c0a7da..5f1b54c9bcdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -49,6 +49,8 @@ struct amdgpu_ctx {
enum drm_sched_priority override_priority;
struct mutex lock;
atomic_t guilty;
+ uint32_t ras_counter_ce;
+ uint32_t ras_counter_ue;
};
struct amdgpu_ctx_mgr {
@@ -82,7 +84,7 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
-void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr);
+long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 4ae3ff9a1d4c..8930d66f2204 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -568,10 +568,9 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
idx = *pos >> 2;
valuesize = sizeof(values);
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
- r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
- else
- return -EINVAL;
+ r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
+ if (r)
+ return r;
if (size > valuesize)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 79fb302fb954..cc8ad3831982 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -60,6 +60,7 @@
#include "amdgpu_pm.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_ras.h"
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
@@ -1506,7 +1507,9 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
return -EAGAIN;
}
- adev->powerplay.pp_feature = amdgpu_pp_feature_mask;
+ adev->pm.pp_feature = amdgpu_pp_feature_mask;
+ if (amdgpu_sriov_vf(adev))
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
@@ -1638,6 +1641,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
{
int i, r;
+ r = amdgpu_ras_init(adev);
+ if (r)
+ return r;
+
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
@@ -1681,6 +1688,13 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
}
}
+ r = amdgpu_ib_pool_init(adev);
+ if (r) {
+ dev_err(adev->dev, "IB initialization failed (%d).\n", r);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
+ goto init_failed;
+ }
+
r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
if (r)
goto init_failed;
@@ -1869,6 +1883,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
{
int i, r;
+ amdgpu_ras_pre_fini(adev);
+
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_remove_device(adev);
@@ -1917,6 +1933,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
amdgpu_free_static_csa(&adev->virt.csa_obj);
amdgpu_device_wb_fini(adev);
amdgpu_device_vram_scratch_fini(adev);
+ amdgpu_ib_pool_fini(adev);
}
r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
@@ -1937,6 +1954,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
adev->ip_blocks[i].status.late_initialized = false;
}
+ amdgpu_ras_fini(adev);
+
if (amdgpu_sriov_vf(adev))
if (amdgpu_virt_release_full_gpu(adev, false))
DRM_ERROR("failed to release exclusive mode on fini\n");
@@ -1999,6 +2018,10 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
r = amdgpu_device_enable_mgpu_fan_boost();
if (r)
DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
+
+ /*set to low pstate by default */
+ amdgpu_xgmi_set_pstate(adev, 0);
+
}
static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
@@ -2369,7 +2392,7 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
adev->asic_reset_res = amdgpu_asic_reset(adev);
if (adev->asic_reset_res)
- DRM_WARN("ASIC reset failed with err r, %d for drm dev, %s",
+ DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
adev->asic_reset_res, adev->ddev->unique);
}
@@ -2448,6 +2471,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash);
mutex_init(&adev->lock_reset);
+ mutex_init(&adev->virt.dpm_mutex);
amdgpu_device_check_arguments(adev);
@@ -2642,13 +2666,6 @@ fence_driver_init:
/* Get a log2 for easy divisions. */
adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
- r = amdgpu_ib_pool_init(adev);
- if (r) {
- dev_err(adev->dev, "IB initialization failed (%d).\n", r);
- amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
- goto failed;
- }
-
amdgpu_fbdev_init(adev);
r = amdgpu_pm_sysfs_init(adev);
@@ -2694,6 +2711,9 @@ fence_driver_init:
goto failed;
}
+ /* must succeed. */
+ amdgpu_ras_post_init(adev);
+
return 0;
failed:
@@ -2726,7 +2746,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
else
drm_atomic_helper_shutdown(adev->ddev);
}
- amdgpu_ib_pool_fini(adev);
amdgpu_fence_driver_fini(adev);
amdgpu_pm_sysfs_fini(adev);
amdgpu_fbdev_fini(adev);
@@ -3225,6 +3244,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
if (r)
return r;
+ amdgpu_amdkfd_pre_reset(adev);
+
/* Resume IP prior to SMC */
r = amdgpu_device_ip_reinit_early_sriov(adev);
if (r)
@@ -3244,6 +3265,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
amdgpu_irq_gpu_reset_resume_helper(adev);
r = amdgpu_ib_ring_tests(adev);
+ amdgpu_amdkfd_post_reset(adev);
error:
amdgpu_virt_init_data_exchange(adev);
@@ -3376,7 +3398,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
r = amdgpu_asic_reset(tmp_adev);
if (r) {
- DRM_ERROR("ASIC reset failed with err r, %d for drm dev, %s",
+ DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
r, tmp_adev->ddev->unique);
break;
}
@@ -3393,6 +3415,11 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
break;
}
}
+
+ list_for_each_entry(tmp_adev, device_list_handle,
+ gmc.xgmi.head) {
+ amdgpu_ras_reserve_bad_pages(tmp_adev);
+ }
}
}
@@ -3411,7 +3438,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
if (vram_lost) {
- DRM_ERROR("VRAM is lost!\n");
+ DRM_INFO("VRAM is lost due to GPU reset!\n");
atomic_inc(&tmp_adev->vram_lost_counter);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 344967df3137..523b8ab6b04e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -904,3 +904,19 @@ amdgpu_get_vce_clock_state(void *handle, u32 idx)
return NULL;
}
+
+int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+{
+ if (is_support_sw_smu(adev))
+ return smu_get_sclk(&adev->smu, low);
+ else
+ return (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
+}
+
+int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
+{
+ if (is_support_sw_smu(adev))
+ return smu_get_mclk(&adev->smu, low);
+ else
+ return (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index e871e022c129..dca35407879d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -260,9 +260,6 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_enable_bapm(adev, e) \
((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
-#define amdgpu_dpm_read_sensor(adev, idx, value, size) \
- ((adev)->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, (idx), (value), (size)))
-
#define amdgpu_dpm_set_fan_control_mode(adev, m) \
((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)))
@@ -281,18 +278,18 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_set_fan_speed_rpm(adev, s) \
((adev)->powerplay.pp_funcs->set_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
-#define amdgpu_dpm_get_sclk(adev, l) \
- ((adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)))
-
-#define amdgpu_dpm_get_mclk(adev, l) \
- ((adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)))
-
#define amdgpu_dpm_force_performance_level(adev, l) \
((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)))
#define amdgpu_dpm_get_current_power_state(adev) \
((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
+#define amdgpu_smu_get_current_power_state(adev) \
+ ((adev)->smu.ppt_funcs->get_current_power_state(&((adev)->smu)))
+
+#define amdgpu_smu_set_power_state(adev) \
+ ((adev)->smu.ppt_funcs->set_power_state(&((adev)->smu)))
+
#define amdgpu_dpm_get_pp_num_states(adev, data) \
((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data))
@@ -448,6 +445,9 @@ struct amdgpu_pm {
uint32_t smu_prv_buffer_size;
struct amdgpu_bo *smu_prv_buffer;
bool ac_power;
+ /* powerplay feature */
+ uint32_t pp_feature;
+
};
#define R600_SSTU_DFLT 0
@@ -486,6 +486,8 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
+int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
+ void *data, uint32_t *size);
bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor);
@@ -504,4 +506,8 @@ enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
struct amd_vce_state*
amdgpu_get_vce_clock_state(void *handle, u32 idx);
+extern int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low);
+
+extern int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 7419ea8a388b..1e2cc9d68a05 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -74,9 +74,11 @@
* - 3.28.0 - Add AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES
* - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID
* - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE.
+ * - 3.31.0 - Add support for per-flip tiling attribute changes with DC
+ * - 3.32.0 - Add syncobj timeline support to AMDGPU_CS.
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 30
+#define KMS_DRIVER_MINOR 32
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -117,8 +119,8 @@ uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL;
-/* OverDrive(bit 14),gfxoff(bit 15),stutter mode(bit 17) disabled by default*/
-uint amdgpu_pp_feature_mask = 0xfffd3fff;
+/* OverDrive(bit 14) disabled by default*/
+uint amdgpu_pp_feature_mask = 0xffffbfff;
int amdgpu_ngg = 0;
int amdgpu_prim_buf_per_se = 0;
int amdgpu_pos_buf_per_se = 0;
@@ -136,6 +138,8 @@ uint amdgpu_dc_feature_mask = 0;
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
};
+int amdgpu_ras_enable = -1;
+uint amdgpu_ras_mask = 0xffffffff;
/**
* DOC: vramlimit (int)
@@ -495,6 +499,21 @@ MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
/**
+ * DOC: ras_enable (int)
+ * Enable RAS features on the GPU (0 = disable, 1 = enable, -1 = auto (default))
+ */
+MODULE_PARM_DESC(ras_enable, "Enable RAS features on the GPU (0 = disable, 1 = enable, -1 = auto (default))");
+module_param_named(ras_enable, amdgpu_ras_enable, int, 0444);
+
+/**
+ * DOC: ras_mask (uint)
+ * Mask of RAS features to enable (default 0xffffffff), only valid when ras_enable == 1
+ * See the flags in drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+ */
+MODULE_PARM_DESC(ras_mask, "Mask of RAS features to enable (default 0xffffffff), only valid when ras_enable == 1");
+module_param_named(ras_mask, amdgpu_ras_mask, uint, 0444);
+
+/**
* DOC: si_support (int)
* Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled,
* set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
@@ -974,6 +993,7 @@ amdgpu_pci_remove(struct pci_dev *pdev)
DRM_ERROR("Device removal is currently not supported outside of fbcon\n");
drm_dev_unplug(dev);
+ drm_dev_put(dev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
@@ -1158,13 +1178,14 @@ static int amdgpu_flush(struct file *f, fl_owner_t id)
{
struct drm_file *file_priv = f->private_data;
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+ long timeout = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
- amdgpu_ctx_mgr_entity_flush(&fpriv->ctx_mgr);
+ timeout = amdgpu_ctx_mgr_entity_flush(&fpriv->ctx_mgr, timeout);
+ timeout = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
- return 0;
+ return timeout >= 0 ? 0 : timeout;
}
-
static const struct file_operations amdgpu_driver_kms_fops = {
.owner = THIS_MODULE,
.open = drm_open,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 5cbde74b97dd..e47609218839 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -49,12 +49,11 @@
static int
amdgpufb_open(struct fb_info *info, int user)
{
- struct amdgpu_fbdev *rfbdev = info->par;
- struct amdgpu_device *adev = rfbdev->adev;
- int ret = pm_runtime_get_sync(adev->ddev->dev);
+ struct drm_fb_helper *fb_helper = info->par;
+ int ret = pm_runtime_get_sync(fb_helper->dev->dev);
if (ret < 0 && ret != -EACCES) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(fb_helper->dev->dev);
+ pm_runtime_put_autosuspend(fb_helper->dev->dev);
return ret;
}
return 0;
@@ -63,11 +62,10 @@ amdgpufb_open(struct fb_info *info, int user)
static int
amdgpufb_release(struct fb_info *info, int user)
{
- struct amdgpu_fbdev *rfbdev = info->par;
- struct amdgpu_device *adev = rfbdev->adev;
+ struct drm_fb_helper *fb_helper = info->par;
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(fb_helper->dev->dev);
+ pm_runtime_put_autosuspend(fb_helper->dev->dev);
return 0;
}
@@ -233,9 +231,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
goto out;
}
- info->par = rfbdev;
- info->skip_vt_switch = true;
-
ret = amdgpu_display_framebuffer_init(adev->ddev, &rfbdev->rfb,
&mode_cmd, gobj);
if (ret) {
@@ -248,10 +243,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
/* setup helper */
rfbdev->helper.fb = fb;
- strcpy(info->fix.id, "amdgpudrmfb");
-
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
-
info->fbops = &amdgpufb_ops;
tmp = amdgpu_bo_gpu_offset(abo) - adev->gmc.vram_start;
@@ -260,7 +251,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
info->screen_base = amdgpu_bo_kptr(abo);
info->screen_size = amdgpu_bo_size(abo);
- drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, &rfbdev->helper, sizes);
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index ee47c11e92ce..4dee2326b29c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -136,8 +136,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_fence *fence;
- struct dma_fence *old, **ptr;
+ struct dma_fence __rcu **ptr;
uint32_t seq;
+ int r;
fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
if (fence == NULL)
@@ -153,15 +154,24 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
seq, flags | AMDGPU_FENCE_FLAG_INT);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
+ if (unlikely(rcu_dereference_protected(*ptr, 1))) {
+ struct dma_fence *old;
+
+ rcu_read_lock();
+ old = dma_fence_get_rcu_safe(ptr);
+ rcu_read_unlock();
+
+ if (old) {
+ r = dma_fence_wait(old, false);
+ dma_fence_put(old);
+ if (r)
+ return r;
+ }
+ }
+
/* This function can't be called concurrently anyway, otherwise
* emitting the fence would mess up the hardware ring buffer.
*/
- old = rcu_dereference_protected(*ptr, 1);
- if (old && !dma_fence_is_signaled(old)) {
- DRM_INFO("rcu slot is busy\n");
- dma_fence_wait(old, false);
- }
-
rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
*f = &fence->base;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index d21dd2f369da..d4fcf5475464 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -31,6 +31,7 @@
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_display.h"
+#include "amdgpu_xgmi.h"
void amdgpu_gem_object_free(struct drm_gem_object *gobj)
{
@@ -627,11 +628,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
- r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
- args->map_size);
- if (r)
- goto error_backoff;
-
va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
@@ -647,11 +643,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->map_size);
break;
case AMDGPU_VA_OP_REPLACE:
- r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
- args->map_size);
- if (r)
- goto error_backoff;
-
va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
@@ -678,6 +669,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct amdgpu_device *adev = dev->dev_private;
struct drm_amdgpu_gem_op *args = data;
struct drm_gem_object *gobj;
+ struct amdgpu_vm_bo_base *base;
struct amdgpu_bo *robj;
int r;
@@ -716,6 +708,15 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
amdgpu_bo_unreserve(robj);
break;
}
+ for (base = robj->vm_bo; base; base = base->next)
+ if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
+ amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) {
+ r = -EINVAL;
+ amdgpu_bo_unreserve(robj);
+ goto out;
+ }
+
+
robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT |
AMDGPU_GEM_DOMAIN_CPU);
@@ -745,17 +746,25 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
struct amdgpu_device *adev = dev->dev_private;
struct drm_gem_object *gobj;
uint32_t handle;
+ u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
u32 domain;
int r;
+ /*
+ * The buffer returned from this function should be cleared, but
+ * it can only be done if the ring is enabled or we'll fail to
+ * create the buffer.
+ */
+ if (adev->mman.buffer_funcs_enabled)
+ flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
+
args->pitch = amdgpu_align_pitch(adev, args->width,
DIV_ROUND_UP(args->bpp, 8), 0);
args->size = (u64)args->pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
domain = amdgpu_bo_get_preferred_pin_domain(adev,
amdgpu_display_supported_domains(adev));
- r = amdgpu_gem_object_create(adev, args->size, 0, domain,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
ttm_bo_type_device, NULL, &gobj);
if (r)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 97a60da62004..997932ebbb83 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -390,7 +390,7 @@ void amdgpu_gfx_compute_mqd_sw_fini(struct amdgpu_device *adev)
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
{
- if (!(adev->powerplay.pp_feature & PP_GFXOFF_MASK))
+ if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return;
if (!adev->powerplay.pp_funcs || !adev->powerplay.pp_funcs->set_powergating_by_smu)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index f790e15bcd08..09fc53af3d35 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -258,6 +258,9 @@ struct amdgpu_gfx {
/* pipe reservation */
struct mutex pipe_reserve_mutex;
DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
+
+ /*ras */
+ struct ras_common_if *ras_if;
};
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index d73367cab4f3..250d9212cc38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -80,6 +80,33 @@ uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo)
}
/**
+ * amdgpu_gmc_set_pte_pde - update the page tables using CPU
+ *
+ * @adev: amdgpu_device pointer
+ * @cpu_pt_addr: cpu address of the page table
+ * @gpu_page_idx: entry in the page table to update
+ * @addr: dst addr to write into pte/pde
+ * @flags: access flags
+ *
+ * Update the page tables using CPU.
+ */
+int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
+ uint32_t gpu_page_idx, uint64_t addr,
+ uint64_t flags)
+{
+ void __iomem *ptr = (void *)cpu_pt_addr;
+ uint64_t value;
+
+ /*
+ * The following is for PTE only. GART does not have PDEs.
+ */
+ value = addr & 0x0000FFFFFFFFF000ULL;
+ value |= flags;
+ writeq(value, ptr + (gpu_page_idx * 8));
+ return 0;
+}
+
+/**
* amdgpu_gmc_agp_addr - return the address in the AGP address space
*
* @tbo: TTM BO which needs the address, must be in GTT domain
@@ -213,3 +240,58 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n",
mc->agp_size >> 20, mc->agp_start, mc->agp_end);
}
+
+/**
+ * amdgpu_gmc_filter_faults - filter VM faults
+ *
+ * @adev: amdgpu device structure
+ * @addr: address of the VM fault
+ * @pasid: PASID of the process causing the fault
+ * @timestamp: timestamp of the fault
+ *
+ * Returns:
+ * True if the fault was filtered and should not be processed further.
+ * False if the fault is a new one and needs to be handled.
+ */
+bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
+ uint16_t pasid, uint64_t timestamp)
+{
+ struct amdgpu_gmc *gmc = &adev->gmc;
+
+ uint64_t stamp, key = addr << 4 | pasid;
+ struct amdgpu_gmc_fault *fault;
+ uint32_t hash;
+
+ /* If we don't have space left in the ring buffer return immediately */
+ stamp = max(timestamp, AMDGPU_GMC_FAULT_TIMEOUT + 1) -
+ AMDGPU_GMC_FAULT_TIMEOUT;
+ if (gmc->fault_ring[gmc->last_fault].timestamp >= stamp)
+ return true;
+
+ /* Try to find the fault in the hash */
+ hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
+ fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
+ while (fault->timestamp >= stamp) {
+ uint64_t tmp;
+
+ if (fault->key == key)
+ return true;
+
+ tmp = fault->timestamp;
+ fault = &gmc->fault_ring[fault->next];
+
+ /* Check if the entry was reused */
+ if (fault->timestamp >= tmp)
+ break;
+ }
+
+ /* Add the fault to the ring */
+ fault = &gmc->fault_ring[gmc->last_fault];
+ fault->key = key;
+ fault->timestamp = timestamp;
+
+ /* And update the hash */
+ fault->next = gmc->fault_hash[hash].idx;
+ gmc->fault_hash[hash].idx = gmc->last_fault++;
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 81e6070d255b..071145ac67b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -43,9 +43,35 @@
*/
#define AMDGPU_GMC_HOLE_MASK 0x0000ffffffffffffULL
+/*
+ * Ring size as power of two for the log of recent faults.
+ */
+#define AMDGPU_GMC_FAULT_RING_ORDER 8
+#define AMDGPU_GMC_FAULT_RING_SIZE (1 << AMDGPU_GMC_FAULT_RING_ORDER)
+
+/*
+ * Hash size as power of two for the log of recent faults
+ */
+#define AMDGPU_GMC_FAULT_HASH_ORDER 8
+#define AMDGPU_GMC_FAULT_HASH_SIZE (1 << AMDGPU_GMC_FAULT_HASH_ORDER)
+
+/*
+ * Number of IH timestamp ticks until a fault is considered handled
+ */
+#define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL
+
struct firmware;
/*
+ * GMC page fault information
+ */
+struct amdgpu_gmc_fault {
+ uint64_t timestamp;
+ uint64_t next:AMDGPU_GMC_FAULT_RING_ORDER;
+ uint64_t key:52;
+};
+
+/*
* VMHUB structures, functions & helpers
*/
struct amdgpu_vmhub {
@@ -71,12 +97,6 @@ struct amdgpu_gmc_funcs {
/* Change the VMID -> PASID mapping */
void (*emit_pasid_mapping)(struct amdgpu_ring *ring, unsigned vmid,
unsigned pasid);
- /* write pte/pde updates using the cpu */
- int (*set_pte_pde)(struct amdgpu_device *adev,
- void *cpu_pt_addr, /* cpu addr of page table */
- uint32_t gpu_page_idx, /* pte/pde to update */
- uint64_t addr, /* addr to write into pte/pde */
- uint64_t flags); /* access flags */
/* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable);
/* set pte flags based per asic */
@@ -147,15 +167,22 @@ struct amdgpu_gmc {
struct kfd_vm_fault_info *vm_fault_info;
atomic_t vm_fault_info_updated;
+ struct amdgpu_gmc_fault fault_ring[AMDGPU_GMC_FAULT_RING_SIZE];
+ struct {
+ uint64_t idx:AMDGPU_GMC_FAULT_RING_ORDER;
+ } fault_hash[AMDGPU_GMC_FAULT_HASH_SIZE];
+ uint64_t last_fault:AMDGPU_GMC_FAULT_RING_ORDER;
+
const struct amdgpu_gmc_funcs *gmc_funcs;
struct amdgpu_xgmi xgmi;
+ struct amdgpu_irq_src ecc_irq;
+ struct ras_common_if *ras_if;
};
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, type) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (type))
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
-#define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
@@ -189,6 +216,9 @@ static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
uint64_t *addr, uint64_t *flags);
+int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
+ uint32_t gpu_page_idx, uint64_t addr,
+ uint64_t flags);
uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo);
uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo);
void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
@@ -197,5 +227,7 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc);
void amdgpu_gmc_agp_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc);
+bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
+ uint16_t pasid, uint64_t timestamp);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index da7b1b92d9cf..62591d081856 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -37,6 +37,47 @@ struct amdgpu_gtt_node {
};
/**
+ * DOC: mem_info_gtt_total
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total size of
+ * the GTT.
+ * The file mem_info_gtt_total is used for this, and returns the total size of
+ * the GTT block, in bytes
+ */
+static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ (adev->mman.bdev.man[TTM_PL_TT].size) * PAGE_SIZE);
+}
+
+/**
+ * DOC: mem_info_gtt_used
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total amount of
+ * used GTT.
+ * The file mem_info_gtt_used is used for this, and returns the current used
+ * size of the GTT block, in bytes
+ */
+static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]));
+}
+
+static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
+ amdgpu_mem_info_gtt_total_show, NULL);
+static DEVICE_ATTR(mem_info_gtt_used, S_IRUGO,
+ amdgpu_mem_info_gtt_used_show, NULL);
+
+/**
* amdgpu_gtt_mgr_init - init GTT manager and DRM MM
*
* @man: TTM memory type manager
@@ -50,6 +91,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_gtt_mgr *mgr;
uint64_t start, size;
+ int ret;
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
if (!mgr)
@@ -61,6 +103,18 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
spin_lock_init(&mgr->lock);
atomic64_set(&mgr->available, p_size);
man->priv = mgr;
+
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_gtt_total);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_gtt_total\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_gtt_used);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_gtt_used\n");
+ return ret;
+ }
+
return 0;
}
@@ -74,12 +128,17 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
*/
static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_gtt_mgr *mgr = man->priv;
spin_lock(&mgr->lock);
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
kfree(mgr);
man->priv = NULL;
+
+ device_remove_file(adev->dev, &dev_attr_mem_info_gtt_total);
+ device_remove_file(adev->dev, &dev_attr_mem_info_gtt_used);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 1c50be3ab8a9..934dfdcb4e73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -142,6 +142,7 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
*/
int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
{
+ unsigned int count = AMDGPU_IH_MAX_NUM_IVS;
u32 wptr;
if (!ih->enabled || adev->shutdown)
@@ -159,7 +160,7 @@ restart_ih:
/* Order reading of wptr vs. reading of IH ring data */
rmb();
- while (ih->rptr != wptr) {
+ while (ih->rptr != wptr && --count) {
amdgpu_irq_dispatch(adev, ih);
ih->rptr &= ih->ptr_mask;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 113a1ba13d4a..4e0bb645176d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -24,6 +24,9 @@
#ifndef __AMDGPU_IH_H__
#define __AMDGPU_IH_H__
+/* Maximum number of IVs processed at once */
+#define AMDGPU_IH_MAX_NUM_IVS 32
+
struct amdgpu_device;
struct amdgpu_iv_entry;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index e860412043bb..b17d0545728e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -39,6 +39,7 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_gem.h"
#include "amdgpu_display.h"
+#include "amdgpu_ras.h"
static void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
{
@@ -296,6 +297,17 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->ver = adev->pm.fw_version;
fw_info->feature = 0;
break;
+ case AMDGPU_INFO_FW_TA:
+ if (query_fw->index > 1)
+ return -EINVAL;
+ if (query_fw->index == 0) {
+ fw_info->ver = adev->psp.ta_fw_version;
+ fw_info->feature = adev->psp.ta_xgmi_ucode_version;
+ } else {
+ fw_info->ver = adev->psp.ta_fw_version;
+ fw_info->feature = adev->psp.ta_ras_ucode_version;
+ }
+ break;
case AMDGPU_INFO_FW_SDMA:
if (query_fw->index >= adev->sdma.num_instances)
return -EINVAL;
@@ -684,6 +696,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (adev->pm.dpm_enabled) {
dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
+ } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
+ adev->virt.ops->get_pp_clk) {
+ dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
+ dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10;
} else {
dev_info.max_engine_clock = adev->clock.default_sclk * 10;
dev_info.max_memory_clock = adev->clock.default_mclk * 10;
@@ -909,6 +925,18 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
case AMDGPU_INFO_VRAM_LOST_COUNTER:
ui32 = atomic_read(&adev->vram_lost_counter);
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
+ case AMDGPU_INFO_RAS_ENABLED_FEATURES: {
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ uint64_t ras_mask;
+
+ if (!ras)
+ return -EINVAL;
+ ras_mask = (uint64_t)ras->supported << 32 | ras->features;
+
+ return copy_to_user(out, &ras_mask,
+ min_t(u64, size, sizeof(ras_mask))) ?
+ -EFAULT : 0;
+ }
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
@@ -1328,6 +1356,16 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
+ query_fw.fw_type = AMDGPU_INFO_FW_TA;
+ for (i = 0; i < 2; i++) {
+ query_fw.index = i;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ continue;
+ seq_printf(m, "TA %s feature version: %u, firmware version: 0x%08x\n",
+ i ? "RAS" : "XGMI", fw_info.feature, fw_info.ver);
+ }
+
/* SMC */
query_fw.fw_type = AMDGPU_INFO_FW_SMC;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 3e6823fdd939..58ed401c5996 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -256,14 +256,14 @@ static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
/* TODO we should be able to split locking for interval tree and
* amdgpu_mn_invalidate_node
*/
- if (amdgpu_mn_read_lock(amn, range->blockable))
+ if (amdgpu_mn_read_lock(amn, mmu_notifier_range_blockable(range)))
return -EAGAIN;
it = interval_tree_iter_first(&amn->objects, range->start, end);
while (it) {
struct amdgpu_mn_node *node;
- if (!range->blockable) {
+ if (!mmu_notifier_range_blockable(range)) {
amdgpu_mn_read_unlock(amn);
return -EAGAIN;
}
@@ -299,7 +299,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
/* notification is exclusive, but interval is inclusive */
end = range->end - 1;
- if (amdgpu_mn_read_lock(amn, range->blockable))
+ if (amdgpu_mn_read_lock(amn, mmu_notifier_range_blockable(range)))
return -EAGAIN;
it = interval_tree_iter_first(&amn->objects, range->start, end);
@@ -307,7 +307,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
struct amdgpu_mn_node *node;
struct amdgpu_bo *bo;
- if (!range->blockable) {
+ if (!mmu_notifier_range_blockable(range)) {
amdgpu_mn_read_unlock(amn);
return -EAGAIN;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 889e443eeee7..2e9e3db778c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -58,7 +58,7 @@ struct amdgpu_hpd;
#define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base)
#define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base)
-#define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base);
+#define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base)
#define AMDGPU_MAX_HPD_PINS 6
#define AMDGPU_MAX_CRTCS 6
@@ -406,7 +406,7 @@ struct amdgpu_crtc {
struct amdgpu_flip_work *pflip_works;
enum amdgpu_flip_status pflip_status;
int deferred_flip_completion;
- u64 last_flip_vblank;
+ u32 last_flip_vblank;
/* pll sharing */
struct amdgpu_atom_ss ss;
bool ss_enabled;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index ec9e45004bff..93b2c5a48a71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -88,12 +88,14 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
if (bo->gem_base.import_attach)
drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
drm_gem_object_release(&bo->gem_base);
- amdgpu_bo_unref(&bo->parent);
+ /* in case amdgpu_device_recover_vram got NULL of bo->parent */
if (!list_empty(&bo->shadow_list)) {
mutex_lock(&adev->shadow_list_lock);
list_del_init(&bo->shadow_list);
mutex_unlock(&adev->shadow_list_lock);
}
+ amdgpu_bo_unref(&bo->parent);
+
kfree(bo->metadata);
kfree(bo);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 220a6a7b1bc1..c430e8259038 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -72,6 +72,8 @@ struct amdgpu_bo_va {
/* If the mappings are cleared or filled */
bool cleared;
+
+ bool is_xgmi;
};
struct amdgpu_bo {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index a7adb7b6bd98..34471dbaa872 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -28,6 +28,7 @@
#include "amdgpu_pm.h"
#include "amdgpu_dpm.h"
#include "amdgpu_display.h"
+#include "amdgpu_smu.h"
#include "atom.h"
#include <linux/power_supply.h>
#include <linux/hwmon.h>
@@ -80,6 +81,27 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
}
}
+int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ int ret = 0;
+
+ if (!data || !size)
+ return -EINVAL;
+
+ if (is_support_sw_smu(adev))
+ ret = smu_read_sensor(&adev->smu, sensor, data, size);
+ else {
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
+ ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle,
+ sensor, data, size);
+ else
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
/**
* DOC: power_dpm_state
*
@@ -122,7 +144,9 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type pm;
- if (adev->powerplay.pp_funcs->get_current_power_state)
+ if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state)
+ pm = amdgpu_smu_get_current_power_state(adev);
+ else if (adev->powerplay.pp_funcs->get_current_power_state)
pm = amdgpu_dpm_get_current_power_state(adev);
else
pm = adev->pm.dpm.user_state;
@@ -240,7 +264,9 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return snprintf(buf, PAGE_SIZE, "off\n");
- if (adev->powerplay.pp_funcs->get_performance_level)
+ if (is_support_sw_smu(adev))
+ level = smu_get_performance_level(&adev->smu);
+ else if (adev->powerplay.pp_funcs->get_performance_level)
level = amdgpu_dpm_get_performance_level(adev);
else
level = adev->pm.dpm.forced_level;
@@ -273,7 +299,9 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- if (adev->powerplay.pp_funcs->get_performance_level)
+ if (is_support_sw_smu(adev))
+ current_level = smu_get_performance_level(&adev->smu);
+ else if (adev->powerplay.pp_funcs->get_performance_level)
current_level = amdgpu_dpm_get_performance_level(adev);
if (strncmp("low", buf, strlen("low")) == 0) {
@@ -299,10 +327,45 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
goto fail;
}
+ if (amdgpu_sriov_vf(adev)) {
+ if (amdgim_is_hwperf(adev) &&
+ adev->virt.ops->force_dpm_level) {
+ mutex_lock(&adev->pm.mutex);
+ adev->virt.ops->force_dpm_level(adev, level);
+ mutex_unlock(&adev->pm.mutex);
+ return count;
+ } else {
+ return -EINVAL;
+ }
+ }
+
if (current_level == level)
return count;
- if (adev->powerplay.pp_funcs->force_performance_level) {
+ /* profile_exit setting is valid only when current mode is in profile mode */
+ if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
+ (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
+ pr_err("Currently not in any profile mode!\n");
+ return -EINVAL;
+ }
+
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ if (adev->pm.dpm.thermal_active) {
+ count = -EINVAL;
+ mutex_unlock(&adev->pm.mutex);
+ goto fail;
+ }
+ ret = smu_force_performance_level(&adev->smu, level);
+ if (ret)
+ count = -EINVAL;
+ else
+ adev->pm.dpm.forced_level = level;
+ mutex_unlock(&adev->pm.mutex);
+ } else if (adev->powerplay.pp_funcs->force_performance_level) {
mutex_lock(&adev->pm.mutex);
if (adev->pm.dpm.thermal_active) {
count = -EINVAL;
@@ -328,9 +391,13 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
struct pp_states_info data;
- int i, buf_len;
+ int i, buf_len, ret;
- if (adev->powerplay.pp_funcs->get_pp_num_states)
+ if (is_support_sw_smu(adev)) {
+ ret = smu_get_power_num_states(&adev->smu, &data);
+ if (ret)
+ return ret;
+ } else if (adev->powerplay.pp_funcs->get_pp_num_states)
amdgpu_dpm_get_pp_num_states(adev, &data);
buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
@@ -351,23 +418,29 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
struct pp_states_info data;
+ struct smu_context *smu = &adev->smu;
enum amd_pm_state_type pm = 0;
- int i = 0;
+ int i = 0, ret = 0;
- if (adev->powerplay.pp_funcs->get_current_power_state
+ if (is_support_sw_smu(adev)) {
+ pm = smu_get_current_power_state(smu);
+ ret = smu_get_power_num_states(smu, &data);
+ if (ret)
+ return ret;
+ } else if (adev->powerplay.pp_funcs->get_current_power_state
&& adev->powerplay.pp_funcs->get_pp_num_states) {
pm = amdgpu_dpm_get_current_power_state(adev);
amdgpu_dpm_get_pp_num_states(adev, &data);
+ }
- for (i = 0; i < data.nums; i++) {
- if (pm == data.states[i])
- break;
- }
-
- if (i == data.nums)
- i = -EINVAL;
+ for (i = 0; i < data.nums; i++) {
+ if (pm == data.states[i])
+ break;
}
+ if (i == data.nums)
+ i = -EINVAL;
+
return snprintf(buf, PAGE_SIZE, "%d\n", i);
}
@@ -397,6 +470,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
if (strlen(buf) == 1)
adev->pp_force_state_enabled = false;
+ else if (is_support_sw_smu(adev))
+ adev->pp_force_state_enabled = false;
else if (adev->powerplay.pp_funcs->dispatch_tasks &&
adev->powerplay.pp_funcs->get_pp_num_states) {
struct pp_states_info data;
@@ -442,7 +517,12 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
char *table = NULL;
int size;
- if (adev->powerplay.pp_funcs->get_pp_table)
+ if (is_support_sw_smu(adev)) {
+ size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
+ if (size < 0)
+ return size;
+ }
+ else if (adev->powerplay.pp_funcs->get_pp_table)
size = amdgpu_dpm_get_pp_table(adev, &table);
else
return 0;
@@ -462,8 +542,13 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ int ret = 0;
- if (adev->powerplay.pp_funcs->set_pp_table)
+ if (is_support_sw_smu(adev)) {
+ ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
+ if (ret)
+ return ret;
+ } else if (adev->powerplay.pp_funcs->set_pp_table)
amdgpu_dpm_set_pp_table(adev, buf, count);
return count;
@@ -586,19 +671,29 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str++;
}
- if (adev->powerplay.pp_funcs->odn_edit_dpm_table)
- ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
- parameter, parameter_size);
+ if (is_support_sw_smu(adev)) {
+ ret = smu_od_edit_dpm_table(&adev->smu, type,
+ parameter, parameter_size);
- if (ret)
- return -EINVAL;
+ if (ret)
+ return -EINVAL;
+ } else {
+ if (adev->powerplay.pp_funcs->odn_edit_dpm_table)
+ ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
+ parameter, parameter_size);
- if (type == PP_OD_COMMIT_DPM_TABLE) {
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
- return count;
- } else {
+ if (ret)
return -EINVAL;
+
+ if (type == PP_OD_COMMIT_DPM_TABLE) {
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ amdgpu_dpm_dispatch_task(adev,
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ NULL);
+ return count;
+ } else {
+ return -EINVAL;
+ }
}
}
@@ -613,7 +708,13 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
uint32_t size = 0;
- if (adev->powerplay.pp_funcs->print_clock_levels) {
+ if (is_support_sw_smu(adev)) {
+ size = smu_print_clk_levels(&adev->smu, OD_SCLK, buf);
+ size += smu_print_clk_levels(&adev->smu, OD_MCLK, buf+size);
+ size += smu_print_clk_levels(&adev->smu, OD_VDDC_CURVE, buf+size);
+ size += smu_print_clk_levels(&adev->smu, OD_RANGE, buf+size);
+ return size;
+ } else if (adev->powerplay.pp_funcs->print_clock_levels) {
size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
@@ -711,7 +812,13 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->powerplay.pp_funcs->print_clock_levels)
+ if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
+ adev->virt.ops->get_pp_clk)
+ return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
+
+ if (is_support_sw_smu(adev))
+ return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
else
return snprintf(buf, PAGE_SIZE, "\n");
@@ -767,7 +874,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
if (ret)
return ret;
- if (adev->powerplay.pp_funcs->force_clock_level)
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, PP_SCLK, mask);
+ else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
if (ret)
@@ -783,7 +892,9 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->powerplay.pp_funcs->print_clock_levels)
+ if (is_support_sw_smu(adev))
+ return smu_print_clk_levels(&adev->smu, PP_MCLK, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
else
return snprintf(buf, PAGE_SIZE, "\n");
@@ -803,7 +914,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
if (ret)
return ret;
- if (adev->powerplay.pp_funcs->force_clock_level)
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, PP_MCLK, mask);
+ else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
if (ret)
@@ -819,7 +932,9 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->powerplay.pp_funcs->print_clock_levels)
+ if (is_support_sw_smu(adev))
+ return smu_print_clk_levels(&adev->smu, PP_SOCCLK, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
else
return snprintf(buf, PAGE_SIZE, "\n");
@@ -839,7 +954,9 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
if (ret)
return ret;
- if (adev->powerplay.pp_funcs->force_clock_level)
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, PP_SOCCLK, mask);
+ else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
if (ret)
@@ -855,7 +972,9 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->powerplay.pp_funcs->print_clock_levels)
+ if (is_support_sw_smu(adev))
+ return smu_print_clk_levels(&adev->smu, PP_FCLK, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
else
return snprintf(buf, PAGE_SIZE, "\n");
@@ -875,7 +994,9 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
if (ret)
return ret;
- if (adev->powerplay.pp_funcs->force_clock_level)
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, PP_FCLK, mask);
+ else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
if (ret)
@@ -891,7 +1012,9 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->powerplay.pp_funcs->print_clock_levels)
+ if (is_support_sw_smu(adev))
+ return smu_print_clk_levels(&adev->smu, PP_DCEFCLK, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
else
return snprintf(buf, PAGE_SIZE, "\n");
@@ -911,7 +1034,9 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
if (ret)
return ret;
- if (adev->powerplay.pp_funcs->force_clock_level)
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, PP_DCEFCLK, mask);
+ else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
if (ret)
@@ -927,7 +1052,9 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->powerplay.pp_funcs->print_clock_levels)
+ if (is_support_sw_smu(adev))
+ return smu_print_clk_levels(&adev->smu, PP_PCIE, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
else
return snprintf(buf, PAGE_SIZE, "\n");
@@ -947,7 +1074,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
if (ret)
return ret;
- if (adev->powerplay.pp_funcs->force_clock_level)
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, PP_PCIE, mask);
+ else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
if (ret)
@@ -964,7 +1093,9 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
uint32_t value = 0;
- if (adev->powerplay.pp_funcs->get_sclk_od)
+ if (is_support_sw_smu(adev))
+ value = smu_get_od_percentage(&(adev->smu), OD_SCLK);
+ else if (adev->powerplay.pp_funcs->get_sclk_od)
value = amdgpu_dpm_get_sclk_od(adev);
return snprintf(buf, PAGE_SIZE, "%d\n", value);
@@ -986,14 +1117,19 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
count = -EINVAL;
goto fail;
}
- if (adev->powerplay.pp_funcs->set_sclk_od)
- amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
+ if (is_support_sw_smu(adev)) {
+ value = smu_set_od_percentage(&(adev->smu), OD_SCLK, (uint32_t)value);
} else {
- adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
- amdgpu_pm_compute_clocks(adev);
+ if (adev->powerplay.pp_funcs->set_sclk_od)
+ amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
+
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
+ } else {
+ adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+ amdgpu_pm_compute_clocks(adev);
+ }
}
fail:
@@ -1008,7 +1144,9 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
uint32_t value = 0;
- if (adev->powerplay.pp_funcs->get_mclk_od)
+ if (is_support_sw_smu(adev))
+ value = smu_get_od_percentage(&(adev->smu), OD_MCLK);
+ else if (adev->powerplay.pp_funcs->get_mclk_od)
value = amdgpu_dpm_get_mclk_od(adev);
return snprintf(buf, PAGE_SIZE, "%d\n", value);
@@ -1030,14 +1168,19 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
count = -EINVAL;
goto fail;
}
- if (adev->powerplay.pp_funcs->set_mclk_od)
- amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
+ if (is_support_sw_smu(adev)) {
+ value = smu_set_od_percentage(&(adev->smu), OD_MCLK, (uint32_t)value);
} else {
- adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
- amdgpu_pm_compute_clocks(adev);
+ if (adev->powerplay.pp_funcs->set_mclk_od)
+ amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
+
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
+ } else {
+ adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+ amdgpu_pm_compute_clocks(adev);
+ }
}
fail:
@@ -1071,7 +1214,9 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->powerplay.pp_funcs->get_power_profile_mode)
+ if (is_support_sw_smu(adev))
+ return smu_get_power_profile_mode(&adev->smu, buf);
+ else if (adev->powerplay.pp_funcs->get_power_profile_mode)
return amdgpu_dpm_get_power_profile_mode(adev, buf);
return snprintf(buf, PAGE_SIZE, "\n");
@@ -1121,9 +1266,10 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
}
}
parameter[parameter_size] = profile_mode;
- if (adev->powerplay.pp_funcs->set_power_profile_mode)
+ if (is_support_sw_smu(adev))
+ ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size);
+ else if (adev->powerplay.pp_funcs->set_power_profile_mode)
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
-
if (!ret)
return count;
fail:
@@ -1146,14 +1292,10 @@ static ssize_t amdgpu_get_busy_percent(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int r, value, size = sizeof(value);
- /* sanity check PP is enabled */
- if (!(adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor))
- return -EINVAL;
-
/* read the IP busy sensor */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
(void *)&value, &size);
+
if (r)
return r;
@@ -1247,11 +1389,6 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- /* sanity check PP is enabled */
- if (!(adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor))
- return -EINVAL;
-
/* get the temperature */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
(void *)&temp, &size);
@@ -1283,11 +1420,14 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 pwm_mode = 0;
+ if (is_support_sw_smu(adev)) {
+ pwm_mode = smu_get_fan_control_mode(&adev->smu);
+ } else {
+ if (!adev->powerplay.pp_funcs->get_fan_control_mode)
+ return -EINVAL;
- if (!adev->powerplay.pp_funcs->get_fan_control_mode)
- return -EINVAL;
-
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ }
return sprintf(buf, "%i\n", pwm_mode);
}
@@ -1306,14 +1446,22 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- if (!adev->powerplay.pp_funcs->set_fan_control_mode)
- return -EINVAL;
+ if (is_support_sw_smu(adev)) {
+ err = kstrtoint(buf, 10, &value);
+ if (err)
+ return err;
- err = kstrtoint(buf, 10, &value);
- if (err)
- return err;
+ smu_set_fan_control_mode(&adev->smu, value);
+ } else {
+ if (!adev->powerplay.pp_funcs->set_fan_control_mode)
+ return -EINVAL;
- amdgpu_dpm_set_fan_control_mode(adev, value);
+ err = kstrtoint(buf, 10, &value);
+ if (err)
+ return err;
+
+ amdgpu_dpm_set_fan_control_mode(adev, value);
+ }
return count;
}
@@ -1345,8 +1493,10 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
if ((adev->flags & AMD_IS_PX) &&
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
-
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ if (is_support_sw_smu(adev))
+ pwm_mode = smu_get_fan_control_mode(&adev->smu);
+ else
+ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
pr_info("manual fan speed control should be enabled first\n");
return -EINVAL;
@@ -1358,7 +1508,11 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
value = (value * 100) / 255;
- if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
+ if (is_support_sw_smu(adev)) {
+ err = smu_set_fan_speed_percent(&adev->smu, value);
+ if (err)
+ return err;
+ } else if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
err = amdgpu_dpm_set_fan_speed_percent(adev, value);
if (err)
return err;
@@ -1380,7 +1534,11 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
+ if (is_support_sw_smu(adev)) {
+ err = smu_get_fan_speed_percent(&adev->smu, &speed);
+ if (err)
+ return err;
+ } else if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
if (err)
return err;
@@ -1404,7 +1562,11 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
+ if (is_support_sw_smu(adev)) {
+ err = smu_get_current_rpm(&adev->smu, &speed);
+ if (err)
+ return err;
+ } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
if (err)
return err;
@@ -1422,9 +1584,6 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
u32 size = sizeof(min_rpm);
int r;
- if (!adev->powerplay.pp_funcs->read_sensor)
- return -EINVAL;
-
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
(void *)&min_rpm, &size);
if (r)
@@ -1442,9 +1601,6 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
u32 size = sizeof(max_rpm);
int r;
- if (!adev->powerplay.pp_funcs->read_sensor)
- return -EINVAL;
-
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
(void *)&max_rpm, &size);
if (r)
@@ -1466,7 +1622,11 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
+ if (is_support_sw_smu(adev)) {
+ err = smu_get_current_rpm(&adev->smu, &rpm);
+ if (err)
+ return err;
+ } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
if (err)
return err;
@@ -1484,7 +1644,11 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
u32 value;
u32 pwm_mode;
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ if (is_support_sw_smu(adev))
+ pwm_mode = smu_get_fan_control_mode(&adev->smu);
+ else
+ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+
if (pwm_mode != AMD_FAN_CTRL_MANUAL)
return -ENODATA;
@@ -1497,7 +1661,11 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
if (err)
return err;
- if (adev->powerplay.pp_funcs->set_fan_speed_rpm) {
+ if (is_support_sw_smu(adev)) {
+ err = smu_set_fan_speed_rpm(&adev->smu, value);
+ if (err)
+ return err;
+ } else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) {
err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
if (err)
return err;
@@ -1513,11 +1681,14 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 pwm_mode = 0;
- if (!adev->powerplay.pp_funcs->get_fan_control_mode)
- return -EINVAL;
-
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ if (is_support_sw_smu(adev)) {
+ pwm_mode = smu_get_fan_control_mode(&adev->smu);
+ } else {
+ if (!adev->powerplay.pp_funcs->get_fan_control_mode)
+ return -EINVAL;
+ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ }
return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
}
@@ -1536,8 +1707,6 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- if (!adev->powerplay.pp_funcs->set_fan_control_mode)
- return -EINVAL;
err = kstrtoint(buf, 10, &value);
if (err)
@@ -1550,7 +1719,13 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
else
return -EINVAL;
- amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
+ if (is_support_sw_smu(adev)) {
+ smu_set_fan_control_mode(&adev->smu, pwm_mode);
+ } else {
+ if (!adev->powerplay.pp_funcs->set_fan_control_mode)
+ return -EINVAL;
+ amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
+ }
return count;
}
@@ -1569,11 +1744,6 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- /* sanity check PP is enabled */
- if (!(adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor))
- return -EINVAL;
-
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
(void *)&vddgfx, &size);
@@ -1608,11 +1778,6 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- /* sanity check PP is enabled */
- if (!(adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor))
- return -EINVAL;
-
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
(void *)&vddnb, &size);
@@ -1644,11 +1809,6 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- /* sanity check PP is enabled */
- if (!(adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor))
- return -EINVAL;
-
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
(void *)&query, &size);
@@ -1675,7 +1835,10 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
struct amdgpu_device *adev = dev_get_drvdata(dev);
uint32_t limit = 0;
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
+ if (is_support_sw_smu(adev)) {
+ smu_get_power_limit(&adev->smu, &limit, true);
+ return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else {
@@ -1690,7 +1853,10 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
struct amdgpu_device *adev = dev_get_drvdata(dev);
uint32_t limit = 0;
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
+ if (is_support_sw_smu(adev)) {
+ smu_get_power_limit(&adev->smu, &limit, false);
+ return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else {
@@ -1713,7 +1879,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
return err;
value = value / 1000000; /* convert to Watt */
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) {
+ if (is_support_sw_smu(adev)) {
+ adev->smu.funcs->set_power_limit(&adev->smu, value);
+ } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) {
err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
if (err)
return err;
@@ -1967,18 +2135,20 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
return 0;
- /* mask fan attributes if we have no bindings for this asic to expose */
- if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
- attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
- (!adev->powerplay.pp_funcs->get_fan_control_mode &&
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
- effective_mode &= ~S_IRUGO;
+ if (!is_support_sw_smu(adev)) {
+ /* mask fan attributes if we have no bindings for this asic to expose */
+ if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
+ attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
+ (!adev->powerplay.pp_funcs->get_fan_control_mode &&
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
+ effective_mode &= ~S_IRUGO;
- if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
- attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
- (!adev->powerplay.pp_funcs->set_fan_control_mode &&
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
- effective_mode &= ~S_IWUSR;
+ if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
+ attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
+ (!adev->powerplay.pp_funcs->set_fan_control_mode &&
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
+ effective_mode &= ~S_IWUSR;
+ }
if ((adev->flags & AMD_IS_APU) &&
(attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
@@ -1987,20 +2157,22 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
return 0;
- /* hide max/min values if we can't both query and manage the fan */
- if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
- !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
- (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
- !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
- (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
- return 0;
+ if (!is_support_sw_smu(adev)) {
+ /* hide max/min values if we can't both query and manage the fan */
+ if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
+ !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
+ (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
+ !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
+ (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
+ return 0;
- if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
- !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
- (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
- return 0;
+ if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
+ !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
+ (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
+ return 0;
+ }
/* only APUs have vddnb */
if (!(adev->flags & AMD_IS_APU) &&
@@ -2039,9 +2211,7 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
if (!adev->pm.dpm_enabled)
return;
- if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor &&
- !amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
(void *)&temp, &size)) {
if (temp < adev->pm.dpm.thermal.min_temp)
/* switch back the user state */
@@ -2267,7 +2437,13 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
{
- if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
+ int ret = 0;
+ if (is_support_sw_smu(adev)) {
+ ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_UVD, enable);
+ if (ret)
+ DRM_ERROR("[SW SMU]: dpm enable uvd failed, state = %s, ret = %d. \n",
+ enable ? "true" : "false", ret);
+ } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
/* enable/disable UVD */
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
@@ -2288,7 +2464,13 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
- if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
+ int ret = 0;
+ if (is_support_sw_smu(adev)) {
+ ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_VCE, enable);
+ if (ret)
+ DRM_ERROR("[SW SMU]: dpm enable vce failed, state = %s, ret = %d. \n",
+ enable ? "true" : "false", ret);
+ } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
/* enable/disable VCE */
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
@@ -2413,7 +2595,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
"pp_power_profile_mode\n");
return ret;
}
- if (hwmgr->od_enabled) {
+ if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
+ (!is_support_sw_smu(adev) && hwmgr->od_enabled)) {
ret = device_create_file(adev->dev,
&dev_attr_pp_od_clk_voltage);
if (ret) {
@@ -2489,7 +2672,8 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
device_remove_file(adev->dev,
&dev_attr_pp_power_profile_mode);
- if (hwmgr->od_enabled)
+ if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
+ (!is_support_sw_smu(adev) && hwmgr->od_enabled))
device_remove_file(adev->dev,
&dev_attr_pp_od_clk_voltage);
device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
@@ -2516,28 +2700,38 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
amdgpu_fence_wait_empty(ring);
}
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- if (!amdgpu_device_has_dc_support(adev)) {
+ if (is_support_sw_smu(adev)) {
+ struct smu_context *smu = &adev->smu;
+ struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
+ mutex_lock(&(smu->mutex));
+ smu_handle_task(&adev->smu,
+ smu_dpm->dpm_level,
+ AMD_PP_TASK_DISPLAY_CONFIG_CHANGE);
+ mutex_unlock(&(smu->mutex));
+ } else {
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ if (!amdgpu_device_has_dc_support(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ amdgpu_dpm_get_active_displays(adev);
+ adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
+ adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
+ adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
+ /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
+ if (adev->pm.pm_display_cfg.vrefresh > 120)
+ adev->pm.pm_display_cfg.min_vblank_time = 0;
+ if (adev->powerplay.pp_funcs->display_configuration_change)
+ adev->powerplay.pp_funcs->display_configuration_change(
+ adev->powerplay.pp_handle,
+ &adev->pm.pm_display_cfg);
+ mutex_unlock(&adev->pm.mutex);
+ }
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
+ } else {
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_get_active_displays(adev);
- adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
- adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
- adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
- /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
- if (adev->pm.pm_display_cfg.vrefresh > 120)
- adev->pm.pm_display_cfg.min_vblank_time = 0;
- if (adev->powerplay.pp_funcs->display_configuration_change)
- adev->powerplay.pp_funcs->display_configuration_change(
- adev->powerplay.pp_handle,
- &adev->pm.pm_display_cfg);
+ amdgpu_dpm_change_power_state_locked(adev);
mutex_unlock(&adev->pm.mutex);
}
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
- } else {
- mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_get_active_displays(adev);
- amdgpu_dpm_change_power_state_locked(adev);
- mutex_unlock(&adev->pm.mutex);
}
}
@@ -2553,11 +2747,6 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
uint32_t query = 0;
int size;
- /* sanity check PP is enabled */
- if (!(adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor))
- return -EINVAL;
-
/* GPU Clocks */
size = sizeof(value);
seq_printf(m, "GFX Clocks and Power:\n");
@@ -2649,7 +2838,7 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
if ((adev->flags & AMD_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
seq_printf(m, "PX asic powered off\n");
- } else if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
+ } else if (!is_support_sw_smu(adev) && adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
mutex_lock(&adev->pm.mutex);
if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 3091488cd8cc..05897b05766b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -38,18 +38,10 @@ static void psp_set_funcs(struct amdgpu_device *adev);
static int psp_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct psp_context *psp = &adev->psp;
psp_set_funcs(adev);
- return 0;
-}
-
-static int psp_sw_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct psp_context *psp = &adev->psp;
- int ret;
-
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
@@ -67,6 +59,15 @@ static int psp_sw_init(void *handle)
psp->adev = adev;
+ return 0;
+}
+
+static int psp_sw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct psp_context *psp = &adev->psp;
+ int ret;
+
ret = psp_init_microcode(psp);
if (ret) {
DRM_ERROR("Failed to load psp firmware!\n");
@@ -120,6 +121,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
{
int ret;
int index;
+ int timeout = 2000;
memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
@@ -133,8 +135,11 @@ psp_cmd_submit_buf(struct psp_context *psp,
return ret;
}
- while (*((unsigned int *)psp->fence_buf) != index)
+ while (*((unsigned int *)psp->fence_buf) != index) {
+ if (--timeout == 0)
+ break;
msleep(1);
+ }
/* In some cases, psp response status is not 0 even there is no
* problem while the command is submitted. Some version of PSP FW
@@ -143,12 +148,14 @@ psp_cmd_submit_buf(struct psp_context *psp,
* during psp initialization to avoid breaking hw_init and it doesn't
* return -EINVAL.
*/
- if (psp->cmd_buf_mem->resp.status) {
+ if (psp->cmd_buf_mem->resp.status || !timeout) {
if (ucode)
DRM_WARN("failed to load ucode id (%d) ",
ucode->ucode_id);
DRM_WARN("psp command failed and response status is (%d)\n",
psp->cmd_buf_mem->resp.status);
+ if (!timeout)
+ return -EINVAL;
}
/* get xGMI session id from response buffer */
@@ -181,13 +188,13 @@ static int psp_tmr_init(struct psp_context *psp)
int ret;
/*
- * Allocate 3M memory aligned to 1M from Frame Buffer (local
- * physical).
+ * According to HW engineer, they prefer the TMR address be "naturally
+ * aligned" , e.g. the start address be an integer divide of TMR size.
*
* Note: this memory need be reserved till the driver
* uninitializes.
*/
- ret = amdgpu_bo_create_kernel(psp->adev, PSP_TMR_SIZE, 0x100000,
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_TMR_SIZE, PSP_TMR_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
@@ -466,6 +473,206 @@ static int psp_xgmi_initialize(struct psp_context *psp)
return ret;
}
+// ras begin
+static void psp_prep_ras_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint64_t ras_ta_mc, uint64_t ras_mc_shared,
+ uint32_t ras_ta_size, uint32_t shared_size)
+{
+ cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
+ cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ras_ta_mc);
+ cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ras_ta_mc);
+ cmd->cmd.cmd_load_ta.app_len = ras_ta_size;
+
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ras_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ras_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
+}
+
+static int psp_ras_init_shared_buf(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * Allocate 16k memory aligned to 4k from Frame Buffer (local
+ * physical) for ras ta <-> Driver
+ */
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->ras.ras_shared_bo,
+ &psp->ras.ras_shared_mc_addr,
+ &psp->ras.ras_shared_buf);
+
+ return ret;
+}
+
+static int psp_ras_load(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size);
+
+ psp_prep_ras_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
+ psp->ras.ras_shared_mc_addr,
+ psp->ta_ras_ucode_size, PSP_RAS_SHARED_MEM_SIZE);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ if (!ret) {
+ psp->ras.ras_initialized = 1;
+ psp->ras.session_id = cmd->resp.session_id;
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static void psp_prep_ras_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t ras_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
+ cmd->cmd.cmd_unload_ta.session_id = ras_session_id;
+}
+
+static int psp_ras_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the unloading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_ras_ta_unload_cmd_buf(cmd, psp->ras.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static void psp_prep_ras_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t ta_cmd_id,
+ uint32_t ras_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
+ cmd->cmd.cmd_invoke_cmd.session_id = ras_session_id;
+ cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
+ /* Note: cmd_invoke_cmd.buf is not used for now */
+}
+
+int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_ras_ta_invoke_cmd_buf(cmd, ta_cmd_id,
+ psp->ras.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+int psp_ras_enable_features(struct psp_context *psp,
+ union ta_ras_cmd_input *info, bool enable)
+{
+ struct ta_ras_shared_memory *ras_cmd;
+ int ret;
+
+ if (!psp->ras.ras_initialized)
+ return -EINVAL;
+
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+ memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+
+ if (enable)
+ ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES;
+ else
+ ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES;
+
+ ras_cmd->ras_in_message = *info;
+
+ ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
+ if (ret)
+ return -EINVAL;
+
+ return ras_cmd->ras_status;
+}
+
+static int psp_ras_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ if (!psp->ras.ras_initialized)
+ return 0;
+
+ ret = psp_ras_unload(psp);
+ if (ret)
+ return ret;
+
+ psp->ras.ras_initialized = 0;
+
+ /* free ras shared memory */
+ amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo,
+ &psp->ras.ras_shared_mc_addr,
+ &psp->ras.ras_shared_buf);
+
+ return 0;
+}
+
+static int psp_ras_initialize(struct psp_context *psp)
+{
+ int ret;
+
+ if (!psp->ras.ras_initialized) {
+ ret = psp_ras_init_shared_buf(psp);
+ if (ret)
+ return ret;
+ }
+
+ ret = psp_ras_load(psp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+// ras end
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -473,25 +680,35 @@ static int psp_hw_start(struct psp_context *psp)
if (!amdgpu_sriov_vf(adev) || !adev->in_gpu_reset) {
ret = psp_bootloader_load_sysdrv(psp);
- if (ret)
+ if (ret) {
+ DRM_ERROR("PSP load sysdrv failed!\n");
return ret;
+ }
ret = psp_bootloader_load_sos(psp);
- if (ret)
+ if (ret) {
+ DRM_ERROR("PSP load sos failed!\n");
return ret;
+ }
}
ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
- if (ret)
+ if (ret) {
+ DRM_ERROR("PSP create ring failed!\n");
return ret;
+ }
ret = psp_tmr_load(psp);
- if (ret)
+ if (ret) {
+ DRM_ERROR("PSP load tmr failed!\n");
return ret;
+ }
ret = psp_asd_load(psp);
- if (ret)
+ if (ret) {
+ DRM_ERROR("PSP load asd failed!\n");
return ret;
+ }
if (adev->gmc.xgmi.num_physical_nodes > 1) {
ret = psp_xgmi_initialize(psp);
@@ -502,6 +719,15 @@ static int psp_hw_start(struct psp_context *psp)
dev_err(psp->adev->dev,
"XGMI: Failed to initialize XGMI session\n");
}
+
+
+ if (psp->adev->psp.ta_fw) {
+ ret = psp_ras_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "RAS: Failed to initialize RAS\n");
+ }
+
return 0;
}
@@ -665,53 +891,52 @@ static int psp_load_fw(struct amdgpu_device *adev)
&psp->fence_buf_mc_addr,
&psp->fence_buf);
if (ret)
- goto failed_mem2;
+ goto failed;
ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
(void **)&psp->cmd_buf_mem);
if (ret)
- goto failed_mem1;
+ goto failed;
memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
- if (ret)
- goto failed_mem;
+ if (ret) {
+ DRM_ERROR("PSP ring init failed!\n");
+ goto failed;
+ }
ret = psp_tmr_init(psp);
- if (ret)
- goto failed_mem;
+ if (ret) {
+ DRM_ERROR("PSP tmr init failed!\n");
+ goto failed;
+ }
ret = psp_asd_init(psp);
- if (ret)
- goto failed_mem;
+ if (ret) {
+ DRM_ERROR("PSP asd init failed!\n");
+ goto failed;
+ }
skip_memalloc:
ret = psp_hw_start(psp);
if (ret)
- goto failed_mem;
+ goto failed;
ret = psp_np_fw_load(psp);
if (ret)
- goto failed_mem;
+ goto failed;
return 0;
-failed_mem:
- amdgpu_bo_free_kernel(&psp->cmd_buf_bo,
- &psp->cmd_buf_mc_addr,
- (void **)&psp->cmd_buf_mem);
-failed_mem1:
- amdgpu_bo_free_kernel(&psp->fence_buf_bo,
- &psp->fence_buf_mc_addr, &psp->fence_buf);
-failed_mem2:
- amdgpu_bo_free_kernel(&psp->fw_pri_bo,
- &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
failed:
- kfree(psp->cmd);
- psp->cmd = NULL;
+ /*
+ * all cleanup jobs (xgmi terminate, ras terminate,
+ * ring destroy, cmd/fence/fw buffers destory,
+ * psp->cmd destory) are delayed to psp_hw_fini
+ */
return ret;
}
@@ -753,6 +978,9 @@ static int psp_hw_fini(void *handle)
psp->xgmi_context.initialized == 1)
psp_xgmi_terminate(psp);
+ if (psp->adev->psp.ta_fw)
+ psp_ras_terminate(psp);
+
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
@@ -786,6 +1014,14 @@ static int psp_suspend(void *handle)
}
}
+ if (psp->adev->psp.ta_fw) {
+ ret = psp_ras_terminate(psp);
+ if (ret) {
+ DRM_ERROR("Failed to terminate ras ta\n");
+ return ret;
+ }
+ }
+
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
if (ret) {
DRM_ERROR("PSP ring stop failed\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 2ef98cc755d6..cde113f07c96 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -28,11 +28,13 @@
#include "amdgpu.h"
#include "psp_gfx_if.h"
#include "ta_xgmi_if.h"
+#include "ta_ras_if.h"
#define PSP_FENCE_BUFFER_SIZE 0x1000
#define PSP_CMD_BUFFER_SIZE 0x1000
#define PSP_ASD_SHARED_MEM_SIZE 0x4000
#define PSP_XGMI_SHARED_MEM_SIZE 0x4000
+#define PSP_RAS_SHARED_MEM_SIZE 0x4000
#define PSP_1_MEG 0x100000
#define PSP_TMR_SIZE 0x400000
@@ -88,6 +90,9 @@ struct psp_funcs
int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices,
struct psp_xgmi_topology_info *topology);
bool (*support_vmr_ring)(struct psp_context *psp);
+ int (*ras_trigger_error)(struct psp_context *psp,
+ struct ta_ras_trigger_error_input *info);
+ int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr);
};
struct psp_xgmi_context {
@@ -98,6 +103,16 @@ struct psp_xgmi_context {
void *xgmi_shared_buf;
};
+struct psp_ras_context {
+ /*ras fw*/
+ bool ras_initialized;
+ uint32_t session_id;
+ struct amdgpu_bo *ras_shared_bo;
+ uint64_t ras_shared_mc_addr;
+ void *ras_shared_buf;
+ struct amdgpu_ras *ras;
+};
+
struct psp_context
{
struct amdgpu_device *adev;
@@ -150,10 +165,15 @@ struct psp_context
/* xgmi ta firmware and buffer */
const struct firmware *ta_fw;
+ uint32_t ta_fw_version;
uint32_t ta_xgmi_ucode_version;
uint32_t ta_xgmi_ucode_size;
uint8_t *ta_xgmi_start_addr;
+ uint32_t ta_ras_ucode_version;
+ uint32_t ta_ras_ucode_size;
+ uint8_t *ta_ras_start_addr;
struct psp_xgmi_context xgmi_context;
+ struct psp_ras_context ras;
};
struct amdgpu_psp_funcs {
@@ -207,6 +227,13 @@ struct psp_xgmi_topology_info {
#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
+#define psp_ras_trigger_error(psp, info) \
+ ((psp)->funcs->ras_trigger_error ? \
+ (psp)->funcs->ras_trigger_error((psp), (info)) : -EINVAL)
+#define psp_ras_cure_posion(psp, addr) \
+ ((psp)->funcs->ras_cure_posion ? \
+ (psp)->funcs->ras_cure_posion(psp, (addr)) : -EINVAL)
+
extern const struct amd_ip_funcs psp_ip_funcs;
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
@@ -217,6 +244,11 @@ extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
int psp_gpu_reset(struct amdgpu_device *adev);
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+
+int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_ras_enable_features(struct psp_context *psp,
+ union ta_ras_cmd_input *info, bool enable);
+
extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
new file mode 100644
index 000000000000..22bd21efe6b1
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -0,0 +1,1482 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_atomfirmware.h"
+
+struct ras_ih_data {
+ /* interrupt bottom half */
+ struct work_struct ih_work;
+ int inuse;
+ /* IP callback */
+ ras_ih_cb cb;
+ /* full of entries */
+ unsigned char *ring;
+ unsigned int ring_size;
+ unsigned int element_size;
+ unsigned int aligned_element_size;
+ unsigned int rptr;
+ unsigned int wptr;
+};
+
+struct ras_fs_data {
+ char sysfs_name[32];
+ char debugfs_name[32];
+};
+
+struct ras_err_data {
+ unsigned long ue_count;
+ unsigned long ce_count;
+};
+
+struct ras_err_handler_data {
+ /* point to bad pages array */
+ struct {
+ unsigned long bp;
+ struct amdgpu_bo *bo;
+ } *bps;
+ /* the count of entries */
+ int count;
+ /* the space can place new entries */
+ int space_left;
+ /* last reserved entry's index + 1 */
+ int last_reserved;
+};
+
+struct ras_manager {
+ struct ras_common_if head;
+ /* reference count */
+ int use;
+ /* ras block link */
+ struct list_head node;
+ /* the device */
+ struct amdgpu_device *adev;
+ /* debugfs */
+ struct dentry *ent;
+ /* sysfs */
+ struct device_attribute sysfs_attr;
+ int attr_inuse;
+
+ /* fs node name */
+ struct ras_fs_data fs_data;
+
+ /* IH data */
+ struct ras_ih_data ih_data;
+
+ struct ras_err_data err_data;
+};
+
+const char *ras_error_string[] = {
+ "none",
+ "parity",
+ "single_correctable",
+ "multi_uncorrectable",
+ "poison",
+};
+
+const char *ras_block_string[] = {
+ "umc",
+ "sdma",
+ "gfx",
+ "mmhub",
+ "athub",
+ "pcie_bif",
+ "hdp",
+ "xgmi_wafl",
+ "df",
+ "smn",
+ "sem",
+ "mp0",
+ "mp1",
+ "fuse",
+};
+
+#define ras_err_str(i) (ras_error_string[ffs(i)])
+#define ras_block_str(i) (ras_block_string[i])
+
+#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS 1
+#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
+
+static void amdgpu_ras_self_test(struct amdgpu_device *adev)
+{
+ /* TODO */
+}
+
+static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+ ssize_t s;
+ char val[128];
+
+ if (amdgpu_ras_error_query(obj->adev, &info))
+ return -EINVAL;
+
+ s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
+ "ue", info.ue_count,
+ "ce", info.ce_count);
+ if (*pos >= s)
+ return 0;
+
+ s -= *pos;
+ s = min_t(u64, s, size);
+
+
+ if (copy_to_user(buf, &val[*pos], s))
+ return -EINVAL;
+
+ *pos += s;
+
+ return s;
+}
+
+static const struct file_operations amdgpu_ras_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_ras_debugfs_read,
+ .write = NULL,
+ .llseek = default_llseek
+};
+
+static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
+ *block_id = i;
+ if (strcmp(name, ras_block_str(i)) == 0)
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
+ const char __user *buf, size_t size,
+ loff_t *pos, struct ras_debug_if *data)
+{
+ ssize_t s = min_t(u64, 64, size);
+ char str[65];
+ char block_name[33];
+ char err[9] = "ue";
+ int op = -1;
+ int block_id;
+ u64 address, value;
+
+ if (*pos)
+ return -EINVAL;
+ *pos = size;
+
+ memset(str, 0, sizeof(str));
+ memset(data, 0, sizeof(*data));
+
+ if (copy_from_user(str, buf, s))
+ return -EINVAL;
+
+ if (sscanf(str, "disable %32s", block_name) == 1)
+ op = 0;
+ else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
+ op = 1;
+ else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
+ op = 2;
+ else if (str[0] && str[1] && str[2] && str[3])
+ /* ascii string, but commands are not matched. */
+ return -EINVAL;
+
+ if (op != -1) {
+ if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
+ return -EINVAL;
+
+ data->head.block = block_id;
+ data->head.type = memcmp("ue", err, 2) == 0 ?
+ AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE :
+ AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
+ data->op = op;
+
+ if (op == 2) {
+ if (sscanf(str, "%*s %*s %*s %llu %llu",
+ &address, &value) != 2)
+ if (sscanf(str, "%*s %*s %*s 0x%llx 0x%llx",
+ &address, &value) != 2)
+ return -EINVAL;
+ data->inject.address = address;
+ data->inject.value = value;
+ }
+ } else {
+ if (size < sizeof(*data))
+ return -EINVAL;
+
+ if (copy_from_user(data, buf, sizeof(*data)))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+/*
+ * DOC: ras debugfs control interface
+ *
+ * It accepts struct ras_debug_if who has two members.
+ *
+ * First member: ras_debug_if::head or ras_debug_if::inject.
+ *
+ * head is used to indicate which IP block will be under control.
+ *
+ * head has four members, they are block, type, sub_block_index, name.
+ * block: which IP will be under control.
+ * type: what kind of error will be enabled/disabled/injected.
+ * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
+ * name: the name of IP.
+ *
+ * inject has two more members than head, they are address, value.
+ * As their names indicate, inject operation will write the
+ * value to the address.
+ *
+ * Second member: struct ras_debug_if::op.
+ * It has three kinds of operations.
+ * 0: disable RAS on the block. Take ::head as its data.
+ * 1: enable RAS on the block. Take ::head as its data.
+ * 2: inject errors on the block. Take ::inject as its data.
+ *
+ * How to use the interface?
+ * programs:
+ * copy the struct ras_debug_if in your codes and initialize it.
+ * write the struct to the control node.
+ *
+ * bash:
+ * echo op block [error [address value]] > .../ras/ras_ctrl
+ * op: disable, enable, inject
+ * disable: only block is needed
+ * enable: block and error are needed
+ * inject: error, address, value are needed
+ * block: umc, smda, gfx, .........
+ * see ras_block_string[] for details
+ * error: ue, ce
+ * ue: multi_uncorrectable
+ * ce: single_correctable
+ *
+ * here are some examples for bash commands,
+ * echo inject umc ue 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
+ * echo inject umc ce 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
+ * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
+ *
+ * How to check the result?
+ *
+ * For disable/enable, please check ras features at
+ * /sys/class/drm/card[0/1/2...]/device/ras/features
+ *
+ * For inject, please check corresponding err count at
+ * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
+ *
+ * NOTE: operation is only allowed on blocks which are supported.
+ * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
+ */
+static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ struct ras_debug_if data;
+ int ret = 0;
+
+ ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
+ if (ret)
+ return -EINVAL;
+
+ if (!amdgpu_ras_is_supported(adev, data.head.block))
+ return -EINVAL;
+
+ switch (data.op) {
+ case 0:
+ ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
+ break;
+ case 1:
+ ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
+ break;
+ case 2:
+ ret = amdgpu_ras_error_inject(adev, &data.inject);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ };
+
+ if (ret)
+ return -EINVAL;
+
+ return size;
+}
+
+static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
+ .owner = THIS_MODULE,
+ .read = NULL,
+ .write = amdgpu_ras_debugfs_ctrl_write,
+ .llseek = default_llseek
+};
+
+static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+
+ if (amdgpu_ras_error_query(obj->adev, &info))
+ return -EINVAL;
+
+ return snprintf(buf, PAGE_SIZE, "%s: %lu\n%s: %lu\n",
+ "ue", info.ue_count,
+ "ce", info.ce_count);
+}
+
+/* obj begin */
+
+#define get_obj(obj) do { (obj)->use++; } while (0)
+#define alive_obj(obj) ((obj)->use)
+
+static inline void put_obj(struct ras_manager *obj)
+{
+ if (obj && --obj->use == 0)
+ list_del(&obj->node);
+ if (obj && obj->use < 0) {
+ DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
+ }
+}
+
+/* make one obj and return it. */
+static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+
+ if (!con)
+ return NULL;
+
+ if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
+ return NULL;
+
+ obj = &con->objs[head->block];
+ /* already exist. return obj? */
+ if (alive_obj(obj))
+ return NULL;
+
+ obj->head = *head;
+ obj->adev = adev;
+ list_add(&obj->node, &con->head);
+ get_obj(obj);
+
+ return obj;
+}
+
+/* return an obj equal to head, or the first when head is NULL */
+static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+ int i;
+
+ if (!con)
+ return NULL;
+
+ if (head) {
+ if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
+ return NULL;
+
+ obj = &con->objs[head->block];
+
+ if (alive_obj(obj)) {
+ WARN_ON(head->block != obj->head.block);
+ return obj;
+ }
+ } else {
+ for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
+ obj = &con->objs[i];
+ if (alive_obj(obj)) {
+ WARN_ON(i != obj->head.block);
+ return obj;
+ }
+ }
+ }
+
+ return NULL;
+}
+/* obj end */
+
+/* feature ctl begin */
+static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ return con->hw_supported & BIT(head->block);
+}
+
+static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ return con->features & BIT(head->block);
+}
+
+/*
+ * if obj is not created, then create one.
+ * set feature enable flag.
+ */
+static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
+ struct ras_common_if *head, int enable)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
+
+ /* If hardware does not support ras, then do not create obj.
+ * But if hardware support ras, we can create the obj.
+ * Ras framework checks con->hw_supported to see if it need do
+ * corresponding initialization.
+ * IP checks con->support to see if it need disable ras.
+ */
+ if (!amdgpu_ras_is_feature_allowed(adev, head))
+ return 0;
+ if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
+ return 0;
+
+ if (enable) {
+ if (!obj) {
+ obj = amdgpu_ras_create_obj(adev, head);
+ if (!obj)
+ return -EINVAL;
+ } else {
+ /* In case we create obj somewhere else */
+ get_obj(obj);
+ }
+ con->features |= BIT(head->block);
+ } else {
+ if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
+ con->features &= ~BIT(head->block);
+ put_obj(obj);
+ }
+ }
+
+ return 0;
+}
+
+/* wrapper of psp_ras_enable_features */
+int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
+ struct ras_common_if *head, bool enable)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ union ta_ras_cmd_input info;
+ int ret;
+
+ if (!con)
+ return -EINVAL;
+
+ if (!enable) {
+ info.disable_features = (struct ta_ras_disable_features_input) {
+ .block_id = amdgpu_ras_block_to_ta(head->block),
+ .error_type = amdgpu_ras_error_to_ta(head->type),
+ };
+ } else {
+ info.enable_features = (struct ta_ras_enable_features_input) {
+ .block_id = amdgpu_ras_block_to_ta(head->block),
+ .error_type = amdgpu_ras_error_to_ta(head->type),
+ };
+ }
+
+ /* Do not enable if it is not allowed. */
+ WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
+ /* Are we alerady in that state we are going to set? */
+ if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
+ return 0;
+
+ ret = psp_ras_enable_features(&adev->psp, &info, enable);
+ if (ret) {
+ DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
+ enable ? "enable":"disable",
+ ras_block_str(head->block),
+ ret);
+ return -EINVAL;
+ }
+
+ /* setup the obj */
+ __amdgpu_ras_feature_enable(adev, head, enable);
+
+ return 0;
+}
+
+/* Only used in device probe stage and called only once. */
+int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
+ struct ras_common_if *head, bool enable)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int ret;
+
+ if (!con)
+ return -EINVAL;
+
+ if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
+ /* If ras is enabled by vbios, we set up ras object first in
+ * both case. For enable, that is all what we need do. For
+ * disable, we need perform a ras TA disable cmd after that.
+ */
+ ret = __amdgpu_ras_feature_enable(adev, head, 1);
+ if (ret)
+ return ret;
+
+ if (!enable)
+ ret = amdgpu_ras_feature_enable(adev, head, 0);
+ } else
+ ret = amdgpu_ras_feature_enable(adev, head, enable);
+
+ return ret;
+}
+
+static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
+ bool bypass)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj, *tmp;
+
+ list_for_each_entry_safe(obj, tmp, &con->head, node) {
+ /* bypass psp.
+ * aka just release the obj and corresponding flags
+ */
+ if (bypass) {
+ if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
+ break;
+ } else {
+ if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
+ break;
+ }
+ }
+
+ return con->features;
+}
+
+static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
+ bool bypass)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int ras_block_count = AMDGPU_RAS_BLOCK_COUNT;
+ int i;
+ const enum amdgpu_ras_error_type default_ras_type =
+ AMDGPU_RAS_ERROR__NONE;
+
+ for (i = 0; i < ras_block_count; i++) {
+ struct ras_common_if head = {
+ .block = i,
+ .type = default_ras_type,
+ .sub_block_index = 0,
+ };
+ strcpy(head.name, ras_block_str(i));
+ if (bypass) {
+ /*
+ * bypass psp. vbios enable ras for us.
+ * so just create the obj
+ */
+ if (__amdgpu_ras_feature_enable(adev, &head, 1))
+ break;
+ } else {
+ if (amdgpu_ras_feature_enable(adev, &head, 1))
+ break;
+ }
+ }
+
+ return con->features;
+}
+/* feature ctl end */
+
+/* query/inject/cure begin */
+int amdgpu_ras_error_query(struct amdgpu_device *adev,
+ struct ras_query_if *info)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
+
+ if (!obj)
+ return -EINVAL;
+ /* TODO might read the register to read the count */
+
+ info->ue_count = obj->err_data.ue_count;
+ info->ce_count = obj->err_data.ce_count;
+
+ return 0;
+}
+
+/* wrapper of psp_ras_trigger_error */
+int amdgpu_ras_error_inject(struct amdgpu_device *adev,
+ struct ras_inject_if *info)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
+ struct ta_ras_trigger_error_input block_info = {
+ .block_id = amdgpu_ras_block_to_ta(info->head.block),
+ .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
+ .sub_block_index = info->head.sub_block_index,
+ .address = info->address,
+ .value = info->value,
+ };
+ int ret = 0;
+
+ if (!obj)
+ return -EINVAL;
+
+ ret = psp_ras_trigger_error(&adev->psp, &block_info);
+ if (ret)
+ DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n",
+ ras_block_str(info->head.block),
+ ret);
+
+ return ret;
+}
+
+int amdgpu_ras_error_cure(struct amdgpu_device *adev,
+ struct ras_cure_if *info)
+{
+ /* psp fw has no cure interface for now. */
+ return 0;
+}
+
+/* get the total error counts on all IPs */
+int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
+ bool is_ce)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+ struct ras_err_data data = {0, 0};
+
+ if (!con)
+ return -EINVAL;
+
+ list_for_each_entry(obj, &con->head, node) {
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+
+ if (amdgpu_ras_error_query(adev, &info))
+ return -EINVAL;
+
+ data.ce_count += info.ce_count;
+ data.ue_count += info.ue_count;
+ }
+
+ return is_ce ? data.ce_count : data.ue_count;
+}
+/* query/inject/cure end */
+
+
+/* sysfs begin */
+
+static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct amdgpu_ras *con =
+ container_of(attr, struct amdgpu_ras, features_attr);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ struct ras_common_if head;
+ int ras_block_count = AMDGPU_RAS_BLOCK_COUNT;
+ int i;
+ ssize_t s;
+ struct ras_manager *obj;
+
+ s = scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
+
+ for (i = 0; i < ras_block_count; i++) {
+ head.block = i;
+
+ if (amdgpu_ras_is_feature_enabled(adev, &head)) {
+ obj = amdgpu_ras_find_obj(adev, &head);
+ s += scnprintf(&buf[s], PAGE_SIZE - s,
+ "%s: %s\n",
+ ras_block_str(i),
+ ras_err_str(obj->head.type));
+ } else
+ s += scnprintf(&buf[s], PAGE_SIZE - s,
+ "%s: disabled\n",
+ ras_block_str(i));
+ }
+
+ return s;
+}
+
+static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct attribute *attrs[] = {
+ &con->features_attr.attr,
+ NULL
+ };
+ struct attribute_group group = {
+ .name = "ras",
+ .attrs = attrs,
+ };
+
+ con->features_attr = (struct device_attribute) {
+ .attr = {
+ .name = "features",
+ .mode = S_IRUGO,
+ },
+ .show = amdgpu_ras_sysfs_features_read,
+ };
+ sysfs_attr_init(attrs[0]);
+
+ return sysfs_create_group(&adev->dev->kobj, &group);
+}
+
+static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct attribute *attrs[] = {
+ &con->features_attr.attr,
+ NULL
+ };
+ struct attribute_group group = {
+ .name = "ras",
+ .attrs = attrs,
+ };
+
+ sysfs_remove_group(&adev->dev->kobj, &group);
+
+ return 0;
+}
+
+int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
+ struct ras_fs_if *head)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
+
+ if (!obj || obj->attr_inuse)
+ return -EINVAL;
+
+ get_obj(obj);
+
+ memcpy(obj->fs_data.sysfs_name,
+ head->sysfs_name,
+ sizeof(obj->fs_data.sysfs_name));
+
+ obj->sysfs_attr = (struct device_attribute){
+ .attr = {
+ .name = obj->fs_data.sysfs_name,
+ .mode = S_IRUGO,
+ },
+ .show = amdgpu_ras_sysfs_read,
+ };
+ sysfs_attr_init(&obj->sysfs_attr.attr);
+
+ if (sysfs_add_file_to_group(&adev->dev->kobj,
+ &obj->sysfs_attr.attr,
+ "ras")) {
+ put_obj(obj);
+ return -EINVAL;
+ }
+
+ obj->attr_inuse = 1;
+
+ return 0;
+}
+
+int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
+
+ if (!obj || !obj->attr_inuse)
+ return -EINVAL;
+
+ sysfs_remove_file_from_group(&adev->dev->kobj,
+ &obj->sysfs_attr.attr,
+ "ras");
+ obj->attr_inuse = 0;
+ put_obj(obj);
+
+ return 0;
+}
+
+static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj, *tmp;
+
+ list_for_each_entry_safe(obj, tmp, &con->head, node) {
+ amdgpu_ras_sysfs_remove(adev, &obj->head);
+ }
+
+ amdgpu_ras_sysfs_remove_feature_node(adev);
+
+ return 0;
+}
+/* sysfs end */
+
+/* debugfs begin */
+static int amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct drm_minor *minor = adev->ddev->primary;
+ struct dentry *root = minor->debugfs_root, *dir;
+ struct dentry *ent;
+
+ dir = debugfs_create_dir("ras", root);
+ if (IS_ERR(dir))
+ return -EINVAL;
+
+ con->dir = dir;
+
+ ent = debugfs_create_file("ras_ctrl",
+ S_IWUGO | S_IRUGO, con->dir,
+ adev, &amdgpu_ras_debugfs_ctrl_ops);
+ if (IS_ERR(ent)) {
+ debugfs_remove(con->dir);
+ return -EINVAL;
+ }
+
+ con->ent = ent;
+ return 0;
+}
+
+int amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
+ struct ras_fs_if *head)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
+ struct dentry *ent;
+
+ if (!obj || obj->ent)
+ return -EINVAL;
+
+ get_obj(obj);
+
+ memcpy(obj->fs_data.debugfs_name,
+ head->debugfs_name,
+ sizeof(obj->fs_data.debugfs_name));
+
+ ent = debugfs_create_file(obj->fs_data.debugfs_name,
+ S_IWUGO | S_IRUGO, con->dir,
+ obj, &amdgpu_ras_debugfs_ops);
+
+ if (IS_ERR(ent))
+ return -EINVAL;
+
+ obj->ent = ent;
+
+ return 0;
+}
+
+int amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
+
+ if (!obj || !obj->ent)
+ return 0;
+
+ debugfs_remove(obj->ent);
+ obj->ent = NULL;
+ put_obj(obj);
+
+ return 0;
+}
+
+static int amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj, *tmp;
+
+ list_for_each_entry_safe(obj, tmp, &con->head, node) {
+ amdgpu_ras_debugfs_remove(adev, &obj->head);
+ }
+
+ debugfs_remove(con->ent);
+ debugfs_remove(con->dir);
+ con->dir = NULL;
+ con->ent = NULL;
+
+ return 0;
+}
+/* debugfs end */
+
+/* ras fs */
+
+static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
+{
+ amdgpu_ras_sysfs_create_feature_node(adev);
+ amdgpu_ras_debugfs_create_ctrl_node(adev);
+
+ return 0;
+}
+
+static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
+{
+ amdgpu_ras_debugfs_remove_all(adev);
+ amdgpu_ras_sysfs_remove_all(adev);
+ return 0;
+}
+/* ras fs end */
+
+/* ih begin */
+static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
+{
+ struct ras_ih_data *data = &obj->ih_data;
+ struct amdgpu_iv_entry entry;
+ int ret;
+
+ while (data->rptr != data->wptr) {
+ rmb();
+ memcpy(&entry, &data->ring[data->rptr],
+ data->element_size);
+
+ wmb();
+ data->rptr = (data->aligned_element_size +
+ data->rptr) % data->ring_size;
+
+ /* Let IP handle its data, maybe we need get the output
+ * from the callback to udpate the error type/count, etc
+ */
+ if (data->cb) {
+ ret = data->cb(obj->adev, &entry);
+ /* ue will trigger an interrupt, and in that case
+ * we need do a reset to recovery the whole system.
+ * But leave IP do that recovery, here we just dispatch
+ * the error.
+ */
+ if (ret == AMDGPU_RAS_UE) {
+ obj->err_data.ue_count++;
+ }
+ /* Might need get ce count by register, but not all IP
+ * saves ce count, some IP just use one bit or two bits
+ * to indicate ce happened.
+ */
+ }
+ }
+}
+
+static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
+{
+ struct ras_ih_data *data =
+ container_of(work, struct ras_ih_data, ih_work);
+ struct ras_manager *obj =
+ container_of(data, struct ras_manager, ih_data);
+
+ amdgpu_ras_interrupt_handler(obj);
+}
+
+int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
+ struct ras_dispatch_if *info)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
+ struct ras_ih_data *data = &obj->ih_data;
+
+ if (!obj)
+ return -EINVAL;
+
+ if (data->inuse == 0)
+ return 0;
+
+ /* Might be overflow... */
+ memcpy(&data->ring[data->wptr], info->entry,
+ data->element_size);
+
+ wmb();
+ data->wptr = (data->aligned_element_size +
+ data->wptr) % data->ring_size;
+
+ schedule_work(&data->ih_work);
+
+ return 0;
+}
+
+int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
+ struct ras_ih_if *info)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
+ struct ras_ih_data *data;
+
+ if (!obj)
+ return -EINVAL;
+
+ data = &obj->ih_data;
+ if (data->inuse == 0)
+ return 0;
+
+ cancel_work_sync(&data->ih_work);
+
+ kfree(data->ring);
+ memset(data, 0, sizeof(*data));
+ put_obj(obj);
+
+ return 0;
+}
+
+int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
+ struct ras_ih_if *info)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
+ struct ras_ih_data *data;
+
+ if (!obj) {
+ /* in case we registe the IH before enable ras feature */
+ obj = amdgpu_ras_create_obj(adev, &info->head);
+ if (!obj)
+ return -EINVAL;
+ } else
+ get_obj(obj);
+
+ data = &obj->ih_data;
+ /* add the callback.etc */
+ *data = (struct ras_ih_data) {
+ .inuse = 0,
+ .cb = info->cb,
+ .element_size = sizeof(struct amdgpu_iv_entry),
+ .rptr = 0,
+ .wptr = 0,
+ };
+
+ INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
+
+ data->aligned_element_size = ALIGN(data->element_size, 8);
+ /* the ring can store 64 iv entries. */
+ data->ring_size = 64 * data->aligned_element_size;
+ data->ring = kmalloc(data->ring_size, GFP_KERNEL);
+ if (!data->ring) {
+ put_obj(obj);
+ return -ENOMEM;
+ }
+
+ /* IH is ready */
+ data->inuse = 1;
+
+ return 0;
+}
+
+static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj, *tmp;
+
+ list_for_each_entry_safe(obj, tmp, &con->head, node) {
+ struct ras_ih_if info = {
+ .head = obj->head,
+ };
+ amdgpu_ras_interrupt_remove_handler(adev, &info);
+ }
+
+ return 0;
+}
+/* ih end */
+
+/* recovery begin */
+static void amdgpu_ras_do_recovery(struct work_struct *work)
+{
+ struct amdgpu_ras *ras =
+ container_of(work, struct amdgpu_ras, recovery_work);
+
+ amdgpu_device_gpu_recover(ras->adev, 0);
+ atomic_set(&ras->in_recovery, 0);
+}
+
+static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
+ struct amdgpu_bo **bo_ptr)
+{
+ /* no need to free it actually. */
+ amdgpu_bo_free_kernel(bo_ptr, NULL, NULL);
+ return 0;
+}
+
+/* reserve vram with size@offset */
+static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
+ uint64_t offset, uint64_t size,
+ struct amdgpu_bo **bo_ptr)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_bo_param bp;
+ int r = 0;
+ int i;
+ struct amdgpu_bo *bo;
+
+ if (bo_ptr)
+ *bo_ptr = NULL;
+ memset(&bp, 0, sizeof(bp));
+ bp.size = size;
+ bp.byte_align = PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+ bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+ bp.type = ttm_bo_type_kernel;
+ bp.resv = NULL;
+
+ r = amdgpu_bo_create(adev, &bp, &bo);
+ if (r)
+ return -EINVAL;
+
+ r = amdgpu_bo_reserve(bo, false);
+ if (r)
+ goto error_reserve;
+
+ offset = ALIGN(offset, PAGE_SIZE);
+ for (i = 0; i < bo->placement.num_placement; ++i) {
+ bo->placements[i].fpfn = offset >> PAGE_SHIFT;
+ bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
+ }
+
+ ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
+ r = ttm_bo_mem_space(&bo->tbo, &bo->placement, &bo->tbo.mem, &ctx);
+ if (r)
+ goto error_pin;
+
+ r = amdgpu_bo_pin_restricted(bo,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ offset,
+ offset + size);
+ if (r)
+ goto error_pin;
+
+ if (bo_ptr)
+ *bo_ptr = bo;
+
+ amdgpu_bo_unreserve(bo);
+ return r;
+
+error_pin:
+ amdgpu_bo_unreserve(bo);
+error_reserve:
+ amdgpu_bo_unref(&bo);
+ return r;
+}
+
+/* alloc/realloc bps array */
+static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
+ struct ras_err_handler_data *data, int pages)
+{
+ unsigned int old_space = data->count + data->space_left;
+ unsigned int new_space = old_space + pages;
+ unsigned int align_space = ALIGN(new_space, 1024);
+ void *tmp = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
+
+ if (!tmp)
+ return -ENOMEM;
+
+ if (data->bps) {
+ memcpy(tmp, data->bps,
+ data->count * sizeof(*data->bps));
+ kfree(data->bps);
+ }
+
+ data->bps = tmp;
+ data->space_left += align_space - old_space;
+ return 0;
+}
+
+/* it deal with vram only. */
+int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
+ unsigned long *bps, int pages)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data;
+ int i = pages;
+ int ret = 0;
+
+ if (!con || !con->eh_data || !bps || pages <= 0)
+ return 0;
+
+ mutex_lock(&con->recovery_lock);
+ data = con->eh_data;
+ if (!data)
+ goto out;
+
+ if (data->space_left <= pages)
+ if (amdgpu_ras_realloc_eh_data_space(adev, data, pages)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ while (i--)
+ data->bps[data->count++].bp = bps[i];
+
+ data->space_left -= pages;
+out:
+ mutex_unlock(&con->recovery_lock);
+
+ return ret;
+}
+
+/* called in gpu recovery/init */
+int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data;
+ uint64_t bp;
+ struct amdgpu_bo *bo;
+ int i;
+
+ if (!con || !con->eh_data)
+ return 0;
+
+ mutex_lock(&con->recovery_lock);
+ data = con->eh_data;
+ if (!data)
+ goto out;
+ /* reserve vram at driver post stage. */
+ for (i = data->last_reserved; i < data->count; i++) {
+ bp = data->bps[i].bp;
+
+ if (amdgpu_ras_reserve_vram(adev, bp << PAGE_SHIFT,
+ PAGE_SIZE, &bo))
+ DRM_ERROR("RAS ERROR: reserve vram %llx fail\n", bp);
+
+ data->bps[i].bo = bo;
+ data->last_reserved = i + 1;
+ }
+out:
+ mutex_unlock(&con->recovery_lock);
+ return 0;
+}
+
+/* called when driver unload */
+static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data;
+ struct amdgpu_bo *bo;
+ int i;
+
+ if (!con || !con->eh_data)
+ return 0;
+
+ mutex_lock(&con->recovery_lock);
+ data = con->eh_data;
+ if (!data)
+ goto out;
+
+ for (i = data->last_reserved - 1; i >= 0; i--) {
+ bo = data->bps[i].bo;
+
+ amdgpu_ras_release_vram(adev, &bo);
+
+ data->bps[i].bo = bo;
+ data->last_reserved = i;
+ }
+out:
+ mutex_unlock(&con->recovery_lock);
+ return 0;
+}
+
+static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
+{
+ /* TODO
+ * write the array to eeprom when SMU disabled.
+ */
+ return 0;
+}
+
+static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
+{
+ /* TODO
+ * read the array to eeprom when SMU disabled.
+ */
+ return 0;
+}
+
+static int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data **data = &con->eh_data;
+
+ *data = kmalloc(sizeof(**data),
+ GFP_KERNEL|__GFP_ZERO);
+ if (!*data)
+ return -ENOMEM;
+
+ mutex_init(&con->recovery_lock);
+ INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
+ atomic_set(&con->in_recovery, 0);
+ con->adev = adev;
+
+ amdgpu_ras_load_bad_pages(adev);
+ amdgpu_ras_reserve_bad_pages(adev);
+
+ return 0;
+}
+
+static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data = con->eh_data;
+
+ cancel_work_sync(&con->recovery_work);
+ amdgpu_ras_save_bad_pages(adev);
+ amdgpu_ras_release_bad_pages(adev);
+
+ mutex_lock(&con->recovery_lock);
+ con->eh_data = NULL;
+ kfree(data->bps);
+ kfree(data);
+ mutex_unlock(&con->recovery_lock);
+
+ return 0;
+}
+/* recovery end */
+
+/*
+ * check hardware's ras ability which will be saved in hw_supported.
+ * if hardware does not support ras, we can skip some ras initializtion and
+ * forbid some ras operations from IP.
+ * if software itself, say boot parameter, limit the ras ability. We still
+ * need allow IP do some limited operations, like disable. In such case,
+ * we have to initialize ras as normal. but need check if operation is
+ * allowed or not in each function.
+ */
+static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
+ uint32_t *hw_supported, uint32_t *supported)
+{
+ *hw_supported = 0;
+ *supported = 0;
+
+ if (amdgpu_sriov_vf(adev) ||
+ adev->asic_type != CHIP_VEGA20)
+ return;
+
+ if (adev->is_atom_fw &&
+ (amdgpu_atomfirmware_mem_ecc_supported(adev) ||
+ amdgpu_atomfirmware_sram_ecc_supported(adev)))
+ *hw_supported = AMDGPU_RAS_BLOCK_MASK;
+
+ *supported = amdgpu_ras_enable == 0 ?
+ 0 : *hw_supported & amdgpu_ras_mask;
+}
+
+int amdgpu_ras_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (con)
+ return 0;
+
+ con = kmalloc(sizeof(struct amdgpu_ras) +
+ sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT,
+ GFP_KERNEL|__GFP_ZERO);
+ if (!con)
+ return -ENOMEM;
+
+ con->objs = (struct ras_manager *)(con + 1);
+
+ amdgpu_ras_set_context(adev, con);
+
+ amdgpu_ras_check_supported(adev, &con->hw_supported,
+ &con->supported);
+ con->features = 0;
+ INIT_LIST_HEAD(&con->head);
+ /* Might need get this flag from vbios. */
+ con->flags = RAS_DEFAULT_FLAGS;
+
+ if (amdgpu_ras_recovery_init(adev))
+ goto recovery_out;
+
+ amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK;
+
+ if (amdgpu_ras_fs_init(adev))
+ goto fs_out;
+
+ amdgpu_ras_self_test(adev);
+
+ DRM_INFO("RAS INFO: ras initialized successfully, "
+ "hardware ability[%x] ras_mask[%x]\n",
+ con->hw_supported, con->supported);
+ return 0;
+fs_out:
+ amdgpu_ras_recovery_fini(adev);
+recovery_out:
+ amdgpu_ras_set_context(adev, NULL);
+ kfree(con);
+
+ return -EINVAL;
+}
+
+/* do some init work after IP late init as dependence */
+void amdgpu_ras_post_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj, *tmp;
+
+ if (!con)
+ return;
+
+ if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
+ /* Set up all other IPs which are not implemented. There is a
+ * tricky thing that IP's actual ras error type should be
+ * MULTI_UNCORRECTABLE, but as driver does not handle it, so
+ * ERROR_NONE make sense anyway.
+ */
+ amdgpu_ras_enable_all_features(adev, 1);
+
+ /* We enable ras on all hw_supported block, but as boot
+ * parameter might disable some of them and one or more IP has
+ * not implemented yet. So we disable them on behalf.
+ */
+ list_for_each_entry_safe(obj, tmp, &con->head, node) {
+ if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
+ amdgpu_ras_feature_enable(adev, &obj->head, 0);
+ /* there should be no any reference. */
+ WARN_ON(alive_obj(obj));
+ }
+ }
+ }
+}
+
+/* do some fini work before IP fini as dependence */
+int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!con)
+ return 0;
+
+ /* Need disable ras on all IPs here before ip [hw/sw]fini */
+ amdgpu_ras_disable_all_features(adev, 0);
+ amdgpu_ras_recovery_fini(adev);
+ return 0;
+}
+
+int amdgpu_ras_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!con)
+ return 0;
+
+ amdgpu_ras_fs_fini(adev);
+ amdgpu_ras_interrupt_remove_all(adev);
+
+ WARN(con->features, "Feature mask is not cleared");
+
+ if (con->features)
+ amdgpu_ras_disable_all_features(adev, 1);
+
+ amdgpu_ras_set_context(adev, NULL);
+ kfree(con);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
new file mode 100644
index 000000000000..eaef5edefc34
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -0,0 +1,294 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#ifndef _AMDGPU_RAS_H
+#define _AMDGPU_RAS_H
+
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include "amdgpu.h"
+#include "amdgpu_psp.h"
+#include "ta_ras_if.h"
+
+enum amdgpu_ras_block {
+ AMDGPU_RAS_BLOCK__UMC = 0,
+ AMDGPU_RAS_BLOCK__SDMA,
+ AMDGPU_RAS_BLOCK__GFX,
+ AMDGPU_RAS_BLOCK__MMHUB,
+ AMDGPU_RAS_BLOCK__ATHUB,
+ AMDGPU_RAS_BLOCK__PCIE_BIF,
+ AMDGPU_RAS_BLOCK__HDP,
+ AMDGPU_RAS_BLOCK__XGMI_WAFL,
+ AMDGPU_RAS_BLOCK__DF,
+ AMDGPU_RAS_BLOCK__SMN,
+ AMDGPU_RAS_BLOCK__SEM,
+ AMDGPU_RAS_BLOCK__MP0,
+ AMDGPU_RAS_BLOCK__MP1,
+ AMDGPU_RAS_BLOCK__FUSE,
+
+ AMDGPU_RAS_BLOCK__LAST
+};
+
+#define AMDGPU_RAS_BLOCK_COUNT AMDGPU_RAS_BLOCK__LAST
+#define AMDGPU_RAS_BLOCK_MASK ((1ULL << AMDGPU_RAS_BLOCK_COUNT) - 1)
+
+enum amdgpu_ras_error_type {
+ AMDGPU_RAS_ERROR__NONE = 0,
+ AMDGPU_RAS_ERROR__PARITY = 1,
+ AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE = 2,
+ AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE = 4,
+ AMDGPU_RAS_ERROR__POISON = 8,
+};
+
+enum amdgpu_ras_ret {
+ AMDGPU_RAS_SUCCESS = 0,
+ AMDGPU_RAS_FAIL,
+ AMDGPU_RAS_UE,
+ AMDGPU_RAS_CE,
+ AMDGPU_RAS_PT,
+};
+
+struct ras_common_if {
+ enum amdgpu_ras_block block;
+ enum amdgpu_ras_error_type type;
+ uint32_t sub_block_index;
+ /* block name */
+ char name[32];
+};
+
+typedef int (*ras_ih_cb)(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry);
+
+struct amdgpu_ras {
+ /* ras infrastructure */
+ /* for ras itself. */
+ uint32_t hw_supported;
+ /* for IP to check its ras ability. */
+ uint32_t supported;
+ uint32_t features;
+ struct list_head head;
+ /* debugfs */
+ struct dentry *dir;
+ /* debugfs ctrl */
+ struct dentry *ent;
+ /* sysfs */
+ struct device_attribute features_attr;
+ /* block array */
+ struct ras_manager *objs;
+
+ /* gpu recovery */
+ struct work_struct recovery_work;
+ atomic_t in_recovery;
+ struct amdgpu_device *adev;
+ /* error handler data */
+ struct ras_err_handler_data *eh_data;
+ struct mutex recovery_lock;
+
+ uint32_t flags;
+};
+
+/* interfaces for IP */
+
+struct ras_fs_if {
+ struct ras_common_if head;
+ char sysfs_name[32];
+ char debugfs_name[32];
+};
+
+struct ras_query_if {
+ struct ras_common_if head;
+ unsigned long ue_count;
+ unsigned long ce_count;
+};
+
+struct ras_inject_if {
+ struct ras_common_if head;
+ uint64_t address;
+ uint64_t value;
+};
+
+struct ras_cure_if {
+ struct ras_common_if head;
+ uint64_t address;
+};
+
+struct ras_ih_if {
+ struct ras_common_if head;
+ ras_ih_cb cb;
+};
+
+struct ras_dispatch_if {
+ struct ras_common_if head;
+ struct amdgpu_iv_entry *entry;
+};
+
+struct ras_debug_if {
+ union {
+ struct ras_common_if head;
+ struct ras_inject_if inject;
+ };
+ int op;
+};
+/* work flow
+ * vbios
+ * 1: ras feature enable (enabled by default)
+ * psp
+ * 2: ras framework init (in ip_init)
+ * IP
+ * 3: IH add
+ * 4: debugfs/sysfs create
+ * 5: query/inject
+ * 6: debugfs/sysfs remove
+ * 7: IH remove
+ * 8: feature disable
+ */
+
+#define amdgpu_ras_get_context(adev) ((adev)->psp.ras.ras)
+#define amdgpu_ras_set_context(adev, ras_con) ((adev)->psp.ras.ras = (ras_con))
+
+/* check if ras is supported on block, say, sdma, gfx */
+static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
+ unsigned int block)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ return ras && (ras->supported & (1 << block));
+}
+
+int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
+ bool is_ce);
+
+/* error handling functions */
+int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
+ unsigned long *bps, int pages);
+
+int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev);
+
+static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev,
+ bool is_baco)
+{
+ /* remove me when gpu reset works on vega20 A1. */
+#if 0
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
+ schedule_work(&ras->recovery_work);
+#endif
+ return 0;
+}
+
+static inline enum ta_ras_block
+amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) {
+ switch (block) {
+ case AMDGPU_RAS_BLOCK__UMC:
+ return TA_RAS_BLOCK__UMC;
+ case AMDGPU_RAS_BLOCK__SDMA:
+ return TA_RAS_BLOCK__SDMA;
+ case AMDGPU_RAS_BLOCK__GFX:
+ return TA_RAS_BLOCK__GFX;
+ case AMDGPU_RAS_BLOCK__MMHUB:
+ return TA_RAS_BLOCK__MMHUB;
+ case AMDGPU_RAS_BLOCK__ATHUB:
+ return TA_RAS_BLOCK__ATHUB;
+ case AMDGPU_RAS_BLOCK__PCIE_BIF:
+ return TA_RAS_BLOCK__PCIE_BIF;
+ case AMDGPU_RAS_BLOCK__HDP:
+ return TA_RAS_BLOCK__HDP;
+ case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+ return TA_RAS_BLOCK__XGMI_WAFL;
+ case AMDGPU_RAS_BLOCK__DF:
+ return TA_RAS_BLOCK__DF;
+ case AMDGPU_RAS_BLOCK__SMN:
+ return TA_RAS_BLOCK__SMN;
+ case AMDGPU_RAS_BLOCK__SEM:
+ return TA_RAS_BLOCK__SEM;
+ case AMDGPU_RAS_BLOCK__MP0:
+ return TA_RAS_BLOCK__MP0;
+ case AMDGPU_RAS_BLOCK__MP1:
+ return TA_RAS_BLOCK__MP1;
+ case AMDGPU_RAS_BLOCK__FUSE:
+ return TA_RAS_BLOCK__FUSE;
+ default:
+ WARN_ONCE(1, "RAS ERROR: unexpected block id %d\n", block);
+ return TA_RAS_BLOCK__UMC;
+ }
+}
+
+static inline enum ta_ras_error_type
+amdgpu_ras_error_to_ta(enum amdgpu_ras_error_type error) {
+ switch (error) {
+ case AMDGPU_RAS_ERROR__NONE:
+ return TA_RAS_ERROR__NONE;
+ case AMDGPU_RAS_ERROR__PARITY:
+ return TA_RAS_ERROR__PARITY;
+ case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
+ return TA_RAS_ERROR__SINGLE_CORRECTABLE;
+ case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
+ return TA_RAS_ERROR__MULTI_UNCORRECTABLE;
+ case AMDGPU_RAS_ERROR__POISON:
+ return TA_RAS_ERROR__POISON;
+ default:
+ WARN_ONCE(1, "RAS ERROR: unexpected error type %d\n", error);
+ return TA_RAS_ERROR__NONE;
+ }
+}
+
+/* called in ip_init and ip_fini */
+int amdgpu_ras_init(struct amdgpu_device *adev);
+void amdgpu_ras_post_init(struct amdgpu_device *adev);
+int amdgpu_ras_fini(struct amdgpu_device *adev);
+int amdgpu_ras_pre_fini(struct amdgpu_device *adev);
+
+int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
+ struct ras_common_if *head, bool enable);
+
+int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
+ struct ras_common_if *head, bool enable);
+
+int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
+ struct ras_fs_if *head);
+
+int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
+ struct ras_common_if *head);
+
+int amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
+ struct ras_fs_if *head);
+
+int amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
+ struct ras_common_if *head);
+
+int amdgpu_ras_error_query(struct amdgpu_device *adev,
+ struct ras_query_if *info);
+
+int amdgpu_ras_error_inject(struct amdgpu_device *adev,
+ struct ras_inject_if *info);
+
+int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
+ struct ras_ih_if *info);
+
+int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
+ struct ras_ih_if *info);
+
+int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
+ struct ras_dispatch_if *info);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 335a0edf114b..8f5026c123ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -248,6 +248,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
*/
if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
sched_hw_submission = max(sched_hw_submission, 256);
+ else if (ring == &adev->sdma.instance[0].page)
+ sched_hw_submission = 256;
if (ring->adev == NULL) {
if (adev->num_rings >= AMDGPU_MAX_RINGS)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index 0767a93e4d91..639297250c21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -53,26 +53,25 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
int fd,
enum drm_sched_priority priority)
{
- struct file *filp = fget(fd);
+ struct fd f = fdget(fd);
struct amdgpu_fpriv *fpriv;
struct amdgpu_ctx *ctx;
uint32_t id;
int r;
- if (!filp)
+ if (!f.file)
return -EINVAL;
- r = amdgpu_file_to_fpriv(filp, &fpriv);
+ r = amdgpu_file_to_fpriv(f.file, &fpriv);
if (r) {
- fput(filp);
+ fdput(f);
return r;
}
idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
amdgpu_ctx_priority_override(ctx, priority);
- fput(filp);
-
+ fdput(f);
return 0;
}
@@ -81,30 +80,30 @@ static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev,
unsigned ctx_id,
enum drm_sched_priority priority)
{
- struct file *filp = fget(fd);
+ struct fd f = fdget(fd);
struct amdgpu_fpriv *fpriv;
struct amdgpu_ctx *ctx;
int r;
- if (!filp)
+ if (!f.file)
return -EINVAL;
- r = amdgpu_file_to_fpriv(filp, &fpriv);
+ r = amdgpu_file_to_fpriv(f.file, &fpriv);
if (r) {
- fput(filp);
+ fdput(f);
return r;
}
ctx = amdgpu_ctx_get(fpriv, ctx_id);
if (!ctx) {
- fput(filp);
+ fdput(f);
return -EINVAL;
}
amdgpu_ctx_priority_override(ctx, priority);
amdgpu_ctx_put(ctx);
- fput(filp);
+ fdput(f);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 16b1a6ae5ba6..1ba9ba3b54f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -28,9 +28,8 @@
#define AMDGPU_MAX_SDMA_INSTANCES 2
enum amdgpu_sdma_irq {
- AMDGPU_SDMA_IRQ_TRAP0 = 0,
- AMDGPU_SDMA_IRQ_TRAP1,
-
+ AMDGPU_SDMA_IRQ_INSTANCE0 = 0,
+ AMDGPU_SDMA_IRQ_INSTANCE1,
AMDGPU_SDMA_IRQ_LAST
};
@@ -49,9 +48,11 @@ struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
+ struct amdgpu_irq_src ecc_irq;
int num_instances;
uint32_t srbm_soft_reset;
bool has_page_queue;
+ struct ras_common_if *ras_if;
};
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 73e71e61dc99..0c52d1f9fe0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -50,8 +50,6 @@
#include "amdgpu_sdma.h"
#include "bif/bif_4_1_d.h"
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem, unsigned num_pages,
uint64_t offset, unsigned window,
@@ -1424,6 +1422,13 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
struct dma_fence *f;
int i;
+ /* Don't evict VM page tables while they are busy, otherwise we can't
+ * cleanly handle page faults.
+ */
+ if (bo->type == ttm_bo_type_kernel &&
+ !reservation_object_test_signaled_rcu(bo->resv, true))
+ return false;
+
/* If bo is a KFD BO, check if the bo belongs to the current process.
* If true, then return false as any KFD process needs all its BOs to
* be resident to run successfully
@@ -1671,7 +1676,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
r = ttm_bo_device_init(&adev->mman.bdev,
&amdgpu_bo_driver,
adev->ddev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
adev->need_dma32);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -1877,14 +1881,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct amdgpu_device *adev;
-
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
+ struct drm_file *file_priv = filp->private_data;
+ struct amdgpu_device *adev = file_priv->minor->dev->dev_private;
- file_priv = filp->private_data;
- adev = file_priv->minor->dev->dev_private;
if (adev == NULL)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 462a04e0f5e6..7d484fad3909 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -36,6 +36,7 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
/* enable virtual display */
adev->mode_info.num_crtc = 1;
adev->enable_virtual_display = true;
+ adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC;
adev->cg_flags = 0;
adev->pg_flags = 0;
}
@@ -375,4 +376,53 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
}
}
+static uint32_t parse_clk(char *buf, bool min)
+{
+ char *ptr = buf;
+ uint32_t clk = 0;
+
+ do {
+ ptr = strchr(ptr, ':');
+ if (!ptr)
+ break;
+ ptr+=2;
+ clk = simple_strtoul(ptr, NULL, 10);
+ } while (!min);
+
+ return clk * 100;
+}
+
+uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest)
+{
+ char *buf = NULL;
+ uint32_t clk = 0;
+
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
+ clk = parse_clk(buf, lowest);
+
+ kfree(buf);
+
+ return clk;
+}
+
+uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
+{
+ char *buf = NULL;
+ uint32_t clk = 0;
+
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
+ clk = parse_clk(buf, lowest);
+
+ kfree(buf);
+
+ return clk;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 722deefc0a7e..584947b7ccf3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
int (*reset_gpu)(struct amdgpu_device *adev);
int (*wait_reset)(struct amdgpu_device *adev);
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
+ int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
+ int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
};
/*
@@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
/* VRAM LOST by GIM */
AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
+ /* HW PERF SIM in GIM */
+ AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
};
struct amd_sriov_msg_pf2vf_info_header {
@@ -252,6 +256,8 @@ struct amdgpu_virt {
struct amdgpu_vf_error_buffer vf_errors;
struct amdgpu_virt_fw_reserve fw_reserve;
uint32_t gim_feature;
+ /* protect DPM events to GIM */
+ struct mutex dpm_mutex;
};
#define amdgpu_sriov_enabled(adev) \
@@ -278,6 +284,9 @@ static inline bool is_virtual_machine(void)
#endif
}
+#define amdgim_is_hwperf(adev) \
+ ((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION)
+
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
@@ -295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
unsigned int key,
unsigned int chksum);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
+uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 16fcb56c232b..4f10f5aba00b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -34,6 +34,7 @@
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_gmc.h"
+#include "amdgpu_xgmi.h"
/**
* DOC: GPUVM
@@ -66,50 +67,6 @@ INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
#undef LAST
/**
- * struct amdgpu_pte_update_params - Local structure
- *
- * Encapsulate some VM table update parameters to reduce
- * the number of function parameters
- *
- */
-struct amdgpu_pte_update_params {
-
- /**
- * @adev: amdgpu device we do this update for
- */
- struct amdgpu_device *adev;
-
- /**
- * @vm: optional amdgpu_vm we do this update for
- */
- struct amdgpu_vm *vm;
-
- /**
- * @src: address where to copy page table entries from
- */
- uint64_t src;
-
- /**
- * @ib: indirect buffer to fill with commands
- */
- struct amdgpu_ib *ib;
-
- /**
- * @func: Function which actually does the update
- */
- void (*func)(struct amdgpu_pte_update_params *params,
- struct amdgpu_bo *bo, uint64_t pe,
- uint64_t addr, unsigned count, uint32_t incr,
- uint64_t flags);
- /**
- * @pages_addr:
- *
- * DMA addresses to use for mapping, used during VM update by CPU
- */
- dma_addr_t *pages_addr;
-};
-
-/**
* struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
*/
struct amdgpu_prt_cb {
@@ -183,6 +140,22 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
}
/**
+ * amdgpu_vm_num_ats_entries - return the number of ATS entries in the root PD
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns:
+ * The number of entries in the root page directory which needs the ATS setting.
+ */
+static unsigned amdgpu_vm_num_ats_entries(struct amdgpu_device *adev)
+{
+ unsigned shift;
+
+ shift = amdgpu_vm_level_shift(adev, adev->vm_manager.root_level);
+ return AMDGPU_GMC_HOLE_START >> (shift + AMDGPU_GPU_PAGE_SHIFT);
+}
+
+/**
* amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT
*
* @adev: amdgpu_device pointer
@@ -333,7 +306,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
return;
vm->bulk_moveable = false;
- if (bo->tbo.type == ttm_bo_type_kernel)
+ if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
amdgpu_vm_bo_relocated(base);
else
amdgpu_vm_bo_idle(base);
@@ -505,61 +478,39 @@ static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
}
/**
- * amdgpu_vm_pt_first_leaf - get first leaf PD/PT
+ * amdgpu_vm_pt_first_dfs - start a deep first search
*
- * @adev: amdgpu_device pointer
+ * @adev: amdgpu_device structure
* @vm: amdgpu_vm structure
- * @start: start addr of the walk
* @cursor: state to initialize
*
- * Start a walk and go directly to the leaf node.
- */
-static void amdgpu_vm_pt_first_leaf(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, uint64_t start,
- struct amdgpu_vm_pt_cursor *cursor)
-{
- amdgpu_vm_pt_start(adev, vm, start, cursor);
- while (amdgpu_vm_pt_descendant(adev, cursor));
-}
-
-/**
- * amdgpu_vm_pt_next_leaf - get next leaf PD/PT
- *
- * @adev: amdgpu_device pointer
- * @cursor: current state
- *
- * Walk the PD/PT tree to the next leaf node.
+ * Starts a deep first traversal of the PD/PT tree.
*/
-static void amdgpu_vm_pt_next_leaf(struct amdgpu_device *adev,
+static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt_cursor *start,
struct amdgpu_vm_pt_cursor *cursor)
{
- amdgpu_vm_pt_next(adev, cursor);
- if (cursor->pfn != ~0ll)
- while (amdgpu_vm_pt_descendant(adev, cursor));
+ if (start)
+ *cursor = *start;
+ else
+ amdgpu_vm_pt_start(adev, vm, 0, cursor);
+ while (amdgpu_vm_pt_descendant(adev, cursor));
}
/**
- * for_each_amdgpu_vm_pt_leaf - walk over all leaf PDs/PTs in the hierarchy
- */
-#define for_each_amdgpu_vm_pt_leaf(adev, vm, start, end, cursor) \
- for (amdgpu_vm_pt_first_leaf((adev), (vm), (start), &(cursor)); \
- (cursor).pfn <= end; amdgpu_vm_pt_next_leaf((adev), &(cursor)))
-
-/**
- * amdgpu_vm_pt_first_dfs - start a deep first search
+ * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
*
- * @adev: amdgpu_device structure
- * @vm: amdgpu_vm structure
- * @cursor: state to initialize
+ * @start: starting point for the search
+ * @entry: current entry
*
- * Starts a deep first traversal of the PD/PT tree.
+ * Returns:
+ * True when the search should continue, false otherwise.
*/
-static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_vm_pt_cursor *cursor)
+static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
+ struct amdgpu_vm_pt *entry)
{
- amdgpu_vm_pt_start(adev, vm, 0, cursor);
- while (amdgpu_vm_pt_descendant(adev, cursor));
+ return entry && (!start || entry != start->entry);
}
/**
@@ -587,11 +538,11 @@ static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
/**
* for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
*/
-#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) \
- for (amdgpu_vm_pt_first_dfs((adev), (vm), &(cursor)), \
+#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
+ for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \
(entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
- (entry); (entry) = (cursor).entry, \
- amdgpu_vm_pt_next_dfs((adev), &(cursor)))
+ amdgpu_vm_pt_continue_dfs((start), (entry)); \
+ (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
/**
* amdgpu_vm_get_pd_bo - add the VM PD to a validation list
@@ -712,18 +663,11 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (bo->tbo.type != ttm_bo_type_kernel) {
amdgpu_vm_bo_moved(bo_base);
} else {
- if (vm->use_cpu_for_update)
- r = amdgpu_bo_kmap(bo, NULL);
+ vm->update_funcs->map_table(bo);
+ if (bo->parent)
+ amdgpu_vm_bo_relocated(bo_base);
else
- r = amdgpu_ttm_alloc_gart(&bo->tbo);
- if (r)
- break;
- if (bo->shadow) {
- r = amdgpu_ttm_alloc_gart(&bo->shadow->tbo);
- if (r)
- break;
- }
- amdgpu_vm_bo_relocated(bo_base);
+ amdgpu_vm_bo_idle(bo_base);
}
}
@@ -751,8 +695,6 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer
* @vm: VM to clear BO from
* @bo: BO to clear
- * @level: level this BO is at
- * @pte_support_ats: indicate ATS support from PTE
*
* Root PD needs to be reserved when calling this.
*
@@ -760,99 +702,112 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
* 0 on success, errno otherwise.
*/
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, struct amdgpu_bo *bo,
- unsigned level, bool pte_support_ats)
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo)
{
struct ttm_operation_ctx ctx = { true, false };
- struct dma_fence *fence = NULL;
+ unsigned level = adev->vm_manager.root_level;
+ struct amdgpu_vm_update_params params;
+ struct amdgpu_bo *ancestor = bo;
unsigned entries, ats_entries;
- struct amdgpu_ring *ring;
- struct amdgpu_job *job;
uint64_t addr;
int r;
+ /* Figure out our place in the hierarchy */
+ if (ancestor->parent) {
+ ++level;
+ while (ancestor->parent->parent) {
+ ++level;
+ ancestor = ancestor->parent;
+ }
+ }
+
entries = amdgpu_bo_size(bo) / 8;
+ if (!vm->pte_support_ats) {
+ ats_entries = 0;
- if (pte_support_ats) {
- if (level == adev->vm_manager.root_level) {
- ats_entries = amdgpu_vm_level_shift(adev, level);
- ats_entries += AMDGPU_GPU_PAGE_SHIFT;
- ats_entries = AMDGPU_GMC_HOLE_START >> ats_entries;
- ats_entries = min(ats_entries, entries);
- entries -= ats_entries;
+ } else if (!bo->parent) {
+ ats_entries = amdgpu_vm_num_ats_entries(adev);
+ ats_entries = min(ats_entries, entries);
+ entries -= ats_entries;
+
+ } else {
+ struct amdgpu_vm_pt *pt;
+
+ pt = container_of(ancestor->vm_bo, struct amdgpu_vm_pt, base);
+ ats_entries = amdgpu_vm_num_ats_entries(adev);
+ if ((pt - vm->root.entries) >= ats_entries) {
+ ats_entries = 0;
} else {
ats_entries = entries;
entries = 0;
}
- } else {
- ats_entries = 0;
}
- ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
-
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
- goto error;
+ return r;
+
+ if (bo->shadow) {
+ r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement,
+ &ctx);
+ if (r)
+ return r;
+ }
- r = amdgpu_ttm_alloc_gart(&bo->tbo);
+ r = vm->update_funcs->map_table(bo);
if (r)
return r;
- r = amdgpu_job_alloc_with_ib(adev, 64, &job);
+ memset(&params, 0, sizeof(params));
+ params.adev = adev;
+ params.vm = vm;
+
+ r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_KFD, NULL);
if (r)
- goto error;
+ return r;
- addr = amdgpu_bo_gpu_offset(bo);
+ addr = 0;
if (ats_entries) {
- uint64_t ats_value;
+ uint64_t value = 0, flags;
+
+ flags = AMDGPU_PTE_DEFAULT_ATC;
+ if (level != AMDGPU_VM_PTB) {
+ /* Handle leaf PDEs as PTEs */
+ flags |= AMDGPU_PDE_PTE;
+ amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
+ }
- ats_value = AMDGPU_PTE_DEFAULT_ATC;
- if (level != AMDGPU_VM_PTB)
- ats_value |= AMDGPU_PDE_PTE;
+ r = vm->update_funcs->update(&params, bo, addr, 0, ats_entries,
+ value, flags);
+ if (r)
+ return r;
- amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
- ats_entries, 0, ats_value);
addr += ats_entries * 8;
}
if (entries) {
- uint64_t value = 0;
-
- /* Workaround for fault priority problem on GMC9 */
- if (level == AMDGPU_VM_PTB && adev->asic_type >= CHIP_VEGA10)
- value = AMDGPU_PTE_EXECUTABLE;
+ uint64_t value = 0, flags = 0;
+
+ if (adev->asic_type >= CHIP_VEGA10) {
+ if (level != AMDGPU_VM_PTB) {
+ /* Handle leaf PDEs as PTEs */
+ flags |= AMDGPU_PDE_PTE;
+ amdgpu_gmc_get_vm_pde(adev, level,
+ &value, &flags);
+ } else {
+ /* Workaround for fault priority problem on GMC9 */
+ flags = AMDGPU_PTE_EXECUTABLE;
+ }
+ }
- amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
- entries, 0, value);
+ r = vm->update_funcs->update(&params, bo, addr, 0, entries,
+ value, flags);
+ if (r)
+ return r;
}
- amdgpu_ring_pad_ib(ring, &job->ibs[0]);
-
- WARN_ON(job->ibs[0].length_dw > 64);
- r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
- AMDGPU_FENCE_OWNER_KFD, false);
- if (r)
- goto error_free;
-
- r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
- &fence);
- if (r)
- goto error_free;
-
- amdgpu_bo_fence(bo, fence, true);
- dma_fence_put(fence);
-
- if (bo->shadow)
- return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
- level, pte_support_ats);
-
- return 0;
-
-error_free:
- amdgpu_job_free(job);
-
-error:
- return r;
+ return vm->update_funcs->commit(&params, NULL);
}
/**
@@ -883,89 +838,56 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
/**
- * amdgpu_vm_alloc_pts - Allocate page tables.
+ * amdgpu_vm_alloc_pts - Allocate a specific page table
*
* @adev: amdgpu_device pointer
* @vm: VM to allocate page tables for
- * @saddr: Start address which needs to be allocated
- * @size: Size from start address we need.
+ * @cursor: Which page table to allocate
*
- * Make sure the page directories and page tables are allocated
+ * Make sure a specific page table or directory is allocated.
*
* Returns:
- * 0 on success, errno otherwise.
+ * 1 if page table needed to be allocated, 0 if page table was already
+ * allocated, negative errno if an error occurred.
*/
-int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- uint64_t saddr, uint64_t size)
+static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt_cursor *cursor)
{
- struct amdgpu_vm_pt_cursor cursor;
+ struct amdgpu_vm_pt *entry = cursor->entry;
+ struct amdgpu_bo_param bp;
struct amdgpu_bo *pt;
- bool ats = false;
- uint64_t eaddr;
int r;
- /* validate the parameters */
- if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
- return -EINVAL;
-
- eaddr = saddr + size - 1;
-
- if (vm->pte_support_ats)
- ats = saddr < AMDGPU_GMC_HOLE_START;
+ if (cursor->level < AMDGPU_VM_PTB && !entry->entries) {
+ unsigned num_entries;
- saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
-
- if (eaddr >= adev->vm_manager.max_pfn) {
- dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
- eaddr, adev->vm_manager.max_pfn);
- return -EINVAL;
+ num_entries = amdgpu_vm_num_entries(adev, cursor->level);
+ entry->entries = kvmalloc_array(num_entries,
+ sizeof(*entry->entries),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!entry->entries)
+ return -ENOMEM;
}
- for_each_amdgpu_vm_pt_leaf(adev, vm, saddr, eaddr, cursor) {
- struct amdgpu_vm_pt *entry = cursor.entry;
- struct amdgpu_bo_param bp;
-
- if (cursor.level < AMDGPU_VM_PTB) {
- unsigned num_entries;
-
- num_entries = amdgpu_vm_num_entries(adev, cursor.level);
- entry->entries = kvmalloc_array(num_entries,
- sizeof(*entry->entries),
- GFP_KERNEL |
- __GFP_ZERO);
- if (!entry->entries)
- return -ENOMEM;
- }
-
-
- if (entry->base.bo)
- continue;
-
- amdgpu_vm_bo_param(adev, vm, cursor.level, &bp);
-
- r = amdgpu_bo_create(adev, &bp, &pt);
- if (r)
- return r;
+ if (entry->base.bo)
+ return 0;
- if (vm->use_cpu_for_update) {
- r = amdgpu_bo_kmap(pt, NULL);
- if (r)
- goto error_free_pt;
- }
+ amdgpu_vm_bo_param(adev, vm, cursor->level, &bp);
- /* Keep a reference to the root directory to avoid
- * freeing them up in the wrong order.
- */
- pt->parent = amdgpu_bo_ref(cursor.parent->base.bo);
+ r = amdgpu_bo_create(adev, &bp, &pt);
+ if (r)
+ return r;
- amdgpu_vm_bo_base_init(&entry->base, vm, pt);
+ /* Keep a reference to the root directory to avoid
+ * freeing them up in the wrong order.
+ */
+ pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
+ amdgpu_vm_bo_base_init(&entry->base, vm, pt);
- r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats);
- if (r)
- goto error_free_pt;
- }
+ r = amdgpu_vm_clear_bo(adev, vm, pt);
+ if (r)
+ goto error_free_pt;
return 0;
@@ -976,31 +898,45 @@ error_free_pt:
}
/**
+ * amdgpu_vm_free_table - fre one PD/PT
+ *
+ * @entry: PDE to free
+ */
+static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry)
+{
+ if (entry->base.bo) {
+ entry->base.bo->vm_bo = NULL;
+ list_del(&entry->base.vm_status);
+ amdgpu_bo_unref(&entry->base.bo->shadow);
+ amdgpu_bo_unref(&entry->base.bo);
+ }
+ kvfree(entry->entries);
+ entry->entries = NULL;
+}
+
+/**
* amdgpu_vm_free_pts - free PD/PT levels
*
* @adev: amdgpu device structure
* @vm: amdgpu vm structure
+ * @start: optional cursor where to start freeing PDs/PTs
*
* Free the page directory or page table level and all sub levels.
*/
static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt_cursor *start)
{
struct amdgpu_vm_pt_cursor cursor;
struct amdgpu_vm_pt *entry;
- for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) {
+ vm->bulk_moveable = false;
- if (entry->base.bo) {
- entry->base.bo->vm_bo = NULL;
- list_del(&entry->base.vm_status);
- amdgpu_bo_unref(&entry->base.bo->shadow);
- amdgpu_bo_unref(&entry->base.bo);
- }
- kvfree(entry->entries);
- }
+ for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
+ amdgpu_vm_free_table(entry);
- BUG_ON(vm->root.base.bo);
+ if (start)
+ amdgpu_vm_free_table(start->entry);
}
/**
@@ -1212,66 +1148,6 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
}
/**
- * amdgpu_vm_do_set_ptes - helper to call the right asic function
- *
- * @params: see amdgpu_pte_update_params definition
- * @bo: PD/PT to update
- * @pe: addr of the page entry
- * @addr: dst addr to write into pe
- * @count: number of page entries to update
- * @incr: increase next addr by incr bytes
- * @flags: hw access flags
- *
- * Traces the parameters and calls the right asic functions
- * to setup the page table using the DMA.
- */
-static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
- struct amdgpu_bo *bo,
- uint64_t pe, uint64_t addr,
- unsigned count, uint32_t incr,
- uint64_t flags)
-{
- pe += amdgpu_bo_gpu_offset(bo);
- trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
-
- if (count < 3) {
- amdgpu_vm_write_pte(params->adev, params->ib, pe,
- addr | flags, count, incr);
-
- } else {
- amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
- count, incr, flags);
- }
-}
-
-/**
- * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
- *
- * @params: see amdgpu_pte_update_params definition
- * @bo: PD/PT to update
- * @pe: addr of the page entry
- * @addr: dst addr to write into pe
- * @count: number of page entries to update
- * @incr: increase next addr by incr bytes
- * @flags: hw access flags
- *
- * Traces the parameters and calls the DMA function to copy the PTEs.
- */
-static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
- struct amdgpu_bo *bo,
- uint64_t pe, uint64_t addr,
- unsigned count, uint32_t incr,
- uint64_t flags)
-{
- uint64_t src = (params->src + (addr >> 12) * 8);
-
- pe += amdgpu_bo_gpu_offset(bo);
- trace_amdgpu_vm_copy_ptes(pe, src, count);
-
- amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
-}
-
-/**
* amdgpu_vm_map_gart - Resolve gart mapping of addr
*
* @pages_addr: optional DMA address to use for lookup
@@ -1283,7 +1159,7 @@ static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
* Returns:
* The pointer for the page table entry.
*/
-static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
+uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
{
uint64_t result;
@@ -1298,88 +1174,31 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
return result;
}
-/**
- * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
- *
- * @params: see amdgpu_pte_update_params definition
- * @bo: PD/PT to update
- * @pe: kmap addr of the page entry
- * @addr: dst addr to write into pe
- * @count: number of page entries to update
- * @incr: increase next addr by incr bytes
- * @flags: hw access flags
- *
- * Write count number of PT/PD entries directly.
- */
-static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
- struct amdgpu_bo *bo,
- uint64_t pe, uint64_t addr,
- unsigned count, uint32_t incr,
- uint64_t flags)
-{
- unsigned int i;
- uint64_t value;
-
- pe += (unsigned long)amdgpu_bo_kptr(bo);
-
- trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
-
- for (i = 0; i < count; i++) {
- value = params->pages_addr ?
- amdgpu_vm_map_gart(params->pages_addr, addr) :
- addr;
- amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
- i, value, flags);
- addr += incr;
- }
-}
-
-/**
- * amdgpu_vm_update_func - helper to call update function
- *
- * Calls the update function for both the given BO as well as its shadow.
- */
-static void amdgpu_vm_update_func(struct amdgpu_pte_update_params *params,
- struct amdgpu_bo *bo,
- uint64_t pe, uint64_t addr,
- unsigned count, uint32_t incr,
- uint64_t flags)
-{
- if (bo->shadow)
- params->func(params, bo->shadow, pe, addr, count, incr, flags);
- params->func(params, bo, pe, addr, count, incr, flags);
-}
-
/*
* amdgpu_vm_update_pde - update a single level in the hierarchy
*
* @param: parameters for the update
* @vm: requested vm
- * @parent: parent directory
* @entry: entry to update
*
* Makes sure the requested entry in parent is up to date.
*/
-static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
- struct amdgpu_vm *vm,
- struct amdgpu_vm_pt *parent,
- struct amdgpu_vm_pt *entry)
+static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt *entry)
{
+ struct amdgpu_vm_pt *parent = amdgpu_vm_pt_parent(entry);
struct amdgpu_bo *bo = parent->base.bo, *pbo;
uint64_t pde, pt, flags;
unsigned level;
- /* Don't update huge pages here */
- if (entry->huge)
- return;
-
for (level = 0, pbo = bo->parent; pbo; ++level)
pbo = pbo->parent;
level += params->adev->vm_manager.root_level;
amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
pde = (entry - parent->entries) * 8;
- amdgpu_vm_update_func(params, bo, pde, pt, 1, 0, flags);
+ return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags);
}
/*
@@ -1396,7 +1215,7 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
struct amdgpu_vm_pt_cursor cursor;
struct amdgpu_vm_pt *entry;
- for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry)
+ for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry)
if (entry->base.bo && !entry->base.moved)
amdgpu_vm_bo_relocated(&entry->base);
}
@@ -1415,89 +1234,39 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
- struct amdgpu_pte_update_params params;
- struct amdgpu_job *job;
- unsigned ndw = 0;
- int r = 0;
+ struct amdgpu_vm_update_params params;
+ int r;
if (list_empty(&vm->relocated))
return 0;
-restart:
memset(&params, 0, sizeof(params));
params.adev = adev;
+ params.vm = vm;
- if (vm->use_cpu_for_update) {
- r = amdgpu_bo_sync_wait(vm->root.base.bo,
- AMDGPU_FENCE_OWNER_VM, true);
- if (unlikely(r))
- return r;
-
- params.func = amdgpu_vm_cpu_set_ptes;
- } else {
- ndw = 512 * 8;
- r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
- if (r)
- return r;
-
- params.ib = &job->ibs[0];
- params.func = amdgpu_vm_do_set_ptes;
- }
+ r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_VM, NULL);
+ if (r)
+ return r;
while (!list_empty(&vm->relocated)) {
- struct amdgpu_vm_pt *pt, *entry;
+ struct amdgpu_vm_pt *entry;
entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt,
base.vm_status);
amdgpu_vm_bo_idle(&entry->base);
- pt = amdgpu_vm_pt_parent(entry);
- if (!pt)
- continue;
-
- amdgpu_vm_update_pde(&params, vm, pt, entry);
-
- if (!vm->use_cpu_for_update &&
- (ndw - params.ib->length_dw) < 32)
- break;
- }
-
- if (vm->use_cpu_for_update) {
- /* Flush HDP */
- mb();
- amdgpu_asic_flush_hdp(adev, NULL);
- } else if (params.ib->length_dw == 0) {
- amdgpu_job_free(job);
- } else {
- struct amdgpu_bo *root = vm->root.base.bo;
- struct amdgpu_ring *ring;
- struct dma_fence *fence;
-
- ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
- sched);
-
- amdgpu_ring_pad_ib(ring, params.ib);
- amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
- AMDGPU_FENCE_OWNER_VM, false);
- WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
- &fence);
+ r = amdgpu_vm_update_pde(&params, vm, entry);
if (r)
goto error;
-
- amdgpu_bo_fence(root, fence, true);
- dma_fence_put(vm->last_update);
- vm->last_update = fence;
}
- if (!list_empty(&vm->relocated))
- goto restart;
-
+ r = vm->update_funcs->commit(&params, &vm->last_update);
+ if (r)
+ goto error;
return 0;
error:
amdgpu_vm_invalidate_pds(adev, vm);
- amdgpu_job_free(job);
return r;
}
@@ -1506,7 +1275,7 @@ error:
*
* Make sure to set the right flags for the PTEs at the desired level.
*/
-static void amdgpu_vm_update_flags(struct amdgpu_pte_update_params *params,
+static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params,
struct amdgpu_bo *bo, unsigned level,
uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr,
@@ -1525,13 +1294,14 @@ static void amdgpu_vm_update_flags(struct amdgpu_pte_update_params *params,
flags |= AMDGPU_PTE_EXECUTABLE;
}
- amdgpu_vm_update_func(params, bo, pe, addr, count, incr, flags);
+ params->vm->update_funcs->update(params, bo, pe, addr, count, incr,
+ flags);
}
/**
* amdgpu_vm_fragment - get fragment for PTEs
*
- * @params: see amdgpu_pte_update_params definition
+ * @params: see amdgpu_vm_update_params definition
* @start: first PTE to handle
* @end: last PTE to handle
* @flags: hw mapping flags
@@ -1540,7 +1310,7 @@ static void amdgpu_vm_update_flags(struct amdgpu_pte_update_params *params,
*
* Returns the first possible fragment for the start and end address.
*/
-static void amdgpu_vm_fragment(struct amdgpu_pte_update_params *params,
+static void amdgpu_vm_fragment(struct amdgpu_vm_update_params *params,
uint64_t start, uint64_t end, uint64_t flags,
unsigned int *frag, uint64_t *frag_end)
{
@@ -1573,7 +1343,7 @@ static void amdgpu_vm_fragment(struct amdgpu_pte_update_params *params,
max_frag = 31;
/* system pages are non continuously */
- if (params->src) {
+ if (params->pages_addr) {
*frag = 0;
*frag_end = end;
return;
@@ -1592,7 +1362,7 @@ static void amdgpu_vm_fragment(struct amdgpu_pte_update_params *params,
/**
* amdgpu_vm_update_ptes - make sure that page tables are valid
*
- * @params: see amdgpu_pte_update_params definition
+ * @params: see amdgpu_vm_update_params definition
* @start: start of GPU address range
* @end: end of GPU address range
* @dst: destination address to map to, the next dst inside the function
@@ -1603,7 +1373,7 @@ static void amdgpu_vm_fragment(struct amdgpu_pte_update_params *params,
* Returns:
* 0 for success, -EINVAL for failure.
*/
-static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
+static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
uint64_t start, uint64_t end,
uint64_t dst, uint64_t flags)
{
@@ -1611,6 +1381,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
struct amdgpu_vm_pt_cursor cursor;
uint64_t frag_start = start, frag_end;
unsigned int frag;
+ int r;
/* figure out the initial fragment */
amdgpu_vm_fragment(params, frag_start, end, flags, &frag, &frag_end);
@@ -1618,12 +1389,15 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
/* walk over the address space and update the PTs */
amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
while (cursor.pfn < end) {
- struct amdgpu_bo *pt = cursor.entry->base.bo;
unsigned shift, parent_shift, mask;
uint64_t incr, entry_end, pe_start;
+ struct amdgpu_bo *pt;
- if (!pt)
- return -ENOENT;
+ r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor);
+ if (r)
+ return r;
+
+ pt = cursor.entry->base.bo;
/* The root level can't be a huge page */
if (cursor.level == adev->vm_manager.root_level) {
@@ -1632,16 +1406,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
continue;
}
- /* If it isn't already handled it can't be a huge page */
- if (cursor.entry->huge) {
- /* Add the entry to the relocated list to update it. */
- cursor.entry->huge = false;
- amdgpu_vm_bo_relocated(&cursor.entry->base);
- }
-
shift = amdgpu_vm_level_shift(adev, cursor.level);
parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1);
- if (adev->asic_type < CHIP_VEGA10) {
+ if (adev->asic_type < CHIP_VEGA10 &&
+ (flags & AMDGPU_PTE_VALID)) {
/* No huge page support before GMC v9 */
if (cursor.level != AMDGPU_VM_PTB) {
if (!amdgpu_vm_pt_descendant(adev, &cursor))
@@ -1697,9 +1465,9 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
} while (frag_start < entry_end);
if (amdgpu_vm_pt_descendant(adev, &cursor)) {
- /* Mark all child entries as huge */
+ /* Free all child entries */
while (cursor.pfn < frag_start) {
- cursor.entry->huge = true;
+ amdgpu_vm_free_pts(adev, params->vm, &cursor);
amdgpu_vm_pt_next(adev, &cursor);
}
@@ -1738,137 +1506,28 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
uint64_t flags, uint64_t addr,
struct dma_fence **fence)
{
- struct amdgpu_ring *ring;
+ struct amdgpu_vm_update_params params;
void *owner = AMDGPU_FENCE_OWNER_VM;
- unsigned nptes, ncmds, ndw;
- struct amdgpu_job *job;
- struct amdgpu_pte_update_params params;
- struct dma_fence *f = NULL;
int r;
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
+ params.pages_addr = pages_addr;
/* sync to everything except eviction fences on unmapping */
if (!(flags & AMDGPU_PTE_VALID))
owner = AMDGPU_FENCE_OWNER_KFD;
- if (vm->use_cpu_for_update) {
- /* params.src is used as flag to indicate system Memory */
- if (pages_addr)
- params.src = ~0;
-
- /* Wait for PT BOs to be idle. PTs share the same resv. object
- * as the root PD BO
- */
- r = amdgpu_bo_sync_wait(vm->root.base.bo, owner, true);
- if (unlikely(r))
- return r;
-
- /* Wait for any BO move to be completed */
- if (exclusive) {
- r = dma_fence_wait(exclusive, true);
- if (unlikely(r))
- return r;
- }
-
- params.func = amdgpu_vm_cpu_set_ptes;
- params.pages_addr = pages_addr;
- return amdgpu_vm_update_ptes(&params, start, last + 1,
- addr, flags);
- }
-
- ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
-
- nptes = last - start + 1;
-
- /*
- * reserve space for two commands every (1 << BLOCK_SIZE)
- * entries or 2k dwords (whatever is smaller)
- */
- ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
-
- /* The second command is for the shadow pagetables. */
- if (vm->root.base.bo->shadow)
- ncmds *= 2;
-
- /* padding, etc. */
- ndw = 64;
-
- if (pages_addr) {
- /* copy commands needed */
- ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
-
- /* and also PTEs */
- ndw += nptes * 2;
-
- params.func = amdgpu_vm_do_copy_ptes;
-
- } else {
- /* set page commands needed */
- ndw += ncmds * 10;
-
- /* extra commands for begin/end fragments */
- ncmds = 2 * adev->vm_manager.fragment_size;
- if (vm->root.base.bo->shadow)
- ncmds *= 2;
-
- ndw += 10 * ncmds;
-
- params.func = amdgpu_vm_do_set_ptes;
- }
-
- r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
+ r = vm->update_funcs->prepare(&params, owner, exclusive);
if (r)
return r;
- params.ib = &job->ibs[0];
-
- if (pages_addr) {
- uint64_t *pte;
- unsigned i;
-
- /* Put the PTEs at the end of the IB. */
- i = ndw - nptes * 2;
- pte= (uint64_t *)&(job->ibs->ptr[i]);
- params.src = job->ibs->gpu_addr + i * 4;
-
- for (i = 0; i < nptes; ++i) {
- pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
- AMDGPU_GPU_PAGE_SIZE);
- pte[i] |= flags;
- }
- addr = 0;
- }
-
- r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
- if (r)
- goto error_free;
-
- r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
- owner, false);
- if (r)
- goto error_free;
-
r = amdgpu_vm_update_ptes(&params, start, last + 1, addr, flags);
if (r)
- goto error_free;
-
- amdgpu_ring_pad_ib(ring, params.ib);
- WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
- if (r)
- goto error_free;
-
- amdgpu_bo_fence(vm->root.base.bo, f, true);
- dma_fence_put(*fence);
- *fence = f;
- return 0;
+ return r;
-error_free:
- amdgpu_job_free(job);
- return r;
+ return vm->update_funcs->commit(&params, fence);
}
/**
@@ -1880,6 +1539,7 @@ error_free:
* @vm: requested vm
* @mapping: mapped range and flags to use for the update
* @flags: HW flags for the mapping
+ * @bo_adev: amdgpu_device pointer that bo actually been allocated
* @nodes: array of drm_mm_nodes with the MC addresses
* @fence: optional resulting fence
*
@@ -1895,6 +1555,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
uint64_t flags,
+ struct amdgpu_device *bo_adev,
struct drm_mm_node *nodes,
struct dma_fence **fence)
{
@@ -1949,7 +1610,6 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (pages_addr) {
uint64_t count;
- max_entries = min(max_entries, 16ull * 1024ull);
for (count = 1;
count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
++count) {
@@ -1969,7 +1629,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
} else if (flags & AMDGPU_PTE_VALID) {
- addr += adev->vm_manager.vram_base_offset;
+ addr += bo_adev->vm_manager.vram_base_offset;
addr += pfn << PAGE_SHIFT;
}
@@ -2016,6 +1676,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct drm_mm_node *nodes;
struct dma_fence *exclusive, **last_update;
uint64_t flags;
+ struct amdgpu_device *bo_adev = adev;
int r;
if (clear || !bo) {
@@ -2034,10 +1695,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
exclusive = reservation_object_get_excl(bo->tbo.resv);
}
- if (bo)
+ if (bo) {
flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
- else
+ bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ } else {
flags = 0x0;
+ }
if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
last_update = &vm->last_update;
@@ -2054,7 +1717,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
list_for_each_entry(mapping, &bo_va->invalids, list) {
r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
- mapping, flags, nodes,
+ mapping, flags, bo_adev, nodes,
last_update);
if (r)
return r;
@@ -2374,6 +2037,16 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids);
+ if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
+ (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) {
+ bo_va->is_xgmi = true;
+ mutex_lock(&adev->vm_manager.lock_pstate);
+ /* Power up XGMI if it can be potentially used */
+ if (++adev->vm_manager.xgmi_map_counter == 1)
+ amdgpu_xgmi_set_pstate(adev, 1);
+ mutex_unlock(&adev->vm_manager.lock_pstate);
+ }
+
return bo_va;
}
@@ -2792,6 +2465,14 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
}
dma_fence_put(bo_va->last_pt_update);
+
+ if (bo && bo_va->is_xgmi) {
+ mutex_lock(&adev->vm_manager.lock_pstate);
+ if (--adev->vm_manager.xgmi_map_counter == 0)
+ amdgpu_xgmi_set_pstate(adev, 0);
+ mutex_unlock(&adev->vm_manager.lock_pstate);
+ }
+
kfree(bo_va);
}
@@ -2949,20 +2630,16 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
adev->vm_manager.fragment_size);
}
-static struct amdgpu_retryfault_hashtable *init_fault_hash(void)
+/**
+ * amdgpu_vm_wait_idle - wait for the VM to become idle
+ *
+ * @vm: VM object to wait for
+ * @timeout: timeout to wait for VM to become idle
+ */
+long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
{
- struct amdgpu_retryfault_hashtable *fault_hash;
-
- fault_hash = kmalloc(sizeof(*fault_hash), GFP_KERNEL);
- if (!fault_hash)
- return fault_hash;
-
- INIT_CHASH_TABLE(fault_hash->hash,
- AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
- spin_lock_init(&fault_hash->lock);
- fault_hash->count = 0;
-
- return fault_hash;
+ return reservation_object_wait_timeout_rcu(vm->root.base.bo->tbo.resv,
+ true, true, timeout);
}
/**
@@ -3018,6 +2695,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->use_cpu_for_update ? "CPU" : "SDMA");
WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
+
+ if (vm->use_cpu_for_update)
+ vm->update_funcs = &amdgpu_vm_cpu_funcs;
+ else
+ vm->update_funcs = &amdgpu_vm_sdma_funcs;
vm->last_update = NULL;
amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp);
@@ -3037,9 +2719,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
- r = amdgpu_vm_clear_bo(adev, vm, root,
- adev->vm_manager.root_level,
- vm->pte_support_ats);
+ r = amdgpu_vm_clear_bo(adev, vm, root);
if (r)
goto error_unreserve;
@@ -3058,12 +2738,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->pasid = pasid;
}
- vm->fault_hash = init_fault_hash();
- if (!vm->fault_hash) {
- r = -ENOMEM;
- goto error_free_root;
- }
-
INIT_KFIFO(vm->faults);
return 0;
@@ -3083,6 +2757,37 @@ error_free_sched_entity:
}
/**
+ * amdgpu_vm_check_clean_reserved - check if a VM is clean
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: the VM to check
+ *
+ * check all entries of the root PD, if any subsequent PDs are allocated,
+ * it means there are page table creating and filling, and is no a clean
+ * VM
+ *
+ * Returns:
+ * 0 if this VM is clean
+ */
+static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
+{
+ enum amdgpu_vm_level root = adev->vm_manager.root_level;
+ unsigned int entries = amdgpu_vm_num_entries(adev, root);
+ unsigned int i = 0;
+
+ if (!(vm->root.entries))
+ return 0;
+
+ for (i = 0; i < entries; i++) {
+ if (vm->root.entries[i].base.bo)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
*
* @adev: amdgpu_device pointer
@@ -3112,10 +2817,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
return r;
/* Sanity checks */
- if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
- r = -EINVAL;
+ r = amdgpu_vm_check_clean_reserved(adev, vm);
+ if (r)
goto unreserve_bo;
- }
if (pasid) {
unsigned long flags;
@@ -3134,9 +2838,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
* changing any other state, in case it fails.
*/
if (pte_support_ats != vm->pte_support_ats) {
- r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
- adev->vm_manager.root_level,
- pte_support_ats);
+ vm->pte_support_ats = pte_support_ats;
+ r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo);
if (r)
goto free_idr;
}
@@ -3144,7 +2847,6 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
/* Update VM state */
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
- vm->pte_support_ats = pte_support_ats;
DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA");
WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
@@ -3219,15 +2921,10 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
struct amdgpu_bo_va_mapping *mapping, *tmp;
bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
struct amdgpu_bo *root;
- u64 fault;
int i, r;
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
- /* Clear pending page faults from IH when the VM is destroyed */
- while (kfifo_get(&vm->faults, &fault))
- amdgpu_vm_clear_fault(vm->fault_hash, fault);
-
if (vm->pasid) {
unsigned long flags;
@@ -3236,9 +2933,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
- kfree(vm->fault_hash);
- vm->fault_hash = NULL;
-
drm_sched_entity_destroy(&vm->entity);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
@@ -3267,10 +2961,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
if (r) {
dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
} else {
- amdgpu_vm_free_pts(adev, vm);
+ amdgpu_vm_free_pts(adev, vm, NULL);
amdgpu_bo_unreserve(root);
}
amdgpu_bo_unref(&root);
+ WARN_ON(vm->root.base.bo);
dma_fence_put(vm->last_update);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
amdgpu_vmid_free_reserved(adev, vm, i);
@@ -3315,6 +3010,9 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
idr_init(&adev->vm_manager.pasid_idr);
spin_lock_init(&adev->vm_manager.pasid_lock);
+
+ adev->vm_manager.xgmi_map_counter = 0;
+ mutex_init(&adev->vm_manager.lock_pstate);
}
/**
@@ -3405,78 +3103,3 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
}
}
}
-
-/**
- * amdgpu_vm_add_fault - Add a page fault record to fault hash table
- *
- * @fault_hash: fault hash table
- * @key: 64-bit encoding of PASID and address
- *
- * This should be called when a retry page fault interrupt is
- * received. If this is a new page fault, it will be added to a hash
- * table. The return value indicates whether this is a new fault, or
- * a fault that was already known and is already being handled.
- *
- * If there are too many pending page faults, this will fail. Retry
- * interrupts should be ignored in this case until there is enough
- * free space.
- *
- * Returns 0 if the fault was added, 1 if the fault was already known,
- * -ENOSPC if there are too many pending faults.
- */
-int amdgpu_vm_add_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key)
-{
- unsigned long flags;
- int r = -ENOSPC;
-
- if (WARN_ON_ONCE(!fault_hash))
- /* Should be allocated in amdgpu_vm_init
- */
- return r;
-
- spin_lock_irqsave(&fault_hash->lock, flags);
-
- /* Only let the hash table fill up to 50% for best performance */
- if (fault_hash->count >= (1 << (AMDGPU_PAGEFAULT_HASH_BITS-1)))
- goto unlock_out;
-
- r = chash_table_copy_in(&fault_hash->hash, key, NULL);
- if (!r)
- fault_hash->count++;
-
- /* chash_table_copy_in should never fail unless we're losing count */
- WARN_ON_ONCE(r < 0);
-
-unlock_out:
- spin_unlock_irqrestore(&fault_hash->lock, flags);
- return r;
-}
-
-/**
- * amdgpu_vm_clear_fault - Remove a page fault record
- *
- * @fault_hash: fault hash table
- * @key: 64-bit encoding of PASID and address
- *
- * This should be called when a page fault has been handled. Any
- * future interrupt with this key will be processed as a new
- * page fault.
- */
-void amdgpu_vm_clear_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key)
-{
- unsigned long flags;
- int r;
-
- if (!fault_hash)
- return;
-
- spin_lock_irqsave(&fault_hash->lock, flags);
-
- r = chash_table_remove(&fault_hash->hash, key, NULL);
- if (!WARN_ON_ONCE(r < 0)) {
- fault_hash->count--;
- WARN_ON_ONCE(fault_hash->count < 0);
- }
-
- spin_unlock_irqrestore(&fault_hash->lock, flags);
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 81ff8177f092..91baf95212a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -30,7 +30,6 @@
#include <drm/gpu_scheduler.h>
#include <drm/drm_file.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <linux/chash.h>
#include "amdgpu_sync.h"
#include "amdgpu_ring.h"
@@ -140,7 +139,6 @@ struct amdgpu_vm_bo_base {
struct amdgpu_vm_pt {
struct amdgpu_vm_bo_base base;
- bool huge;
/* array of page tables, one for each directory entry */
struct amdgpu_vm_pt *entries;
@@ -167,11 +165,6 @@ struct amdgpu_vm_pte_funcs {
uint32_t incr, uint64_t flags);
};
-#define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr))
-#define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48)
-#define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL)
-
-
struct amdgpu_task_info {
char process_name[TASK_COMM_LEN];
char task_name[TASK_COMM_LEN];
@@ -179,11 +172,52 @@ struct amdgpu_task_info {
pid_t tgid;
};
-#define AMDGPU_PAGEFAULT_HASH_BITS 8
-struct amdgpu_retryfault_hashtable {
- DECLARE_CHASH_TABLE(hash, AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
- spinlock_t lock;
- int count;
+/**
+ * struct amdgpu_vm_update_params
+ *
+ * Encapsulate some VM table update parameters to reduce
+ * the number of function parameters
+ *
+ */
+struct amdgpu_vm_update_params {
+
+ /**
+ * @adev: amdgpu device we do this update for
+ */
+ struct amdgpu_device *adev;
+
+ /**
+ * @vm: optional amdgpu_vm we do this update for
+ */
+ struct amdgpu_vm *vm;
+
+ /**
+ * @pages_addr:
+ *
+ * DMA addresses to use for mapping
+ */
+ dma_addr_t *pages_addr;
+
+ /**
+ * @job: job to used for hw submission
+ */
+ struct amdgpu_job *job;
+
+ /**
+ * @num_dw_left: number of dw left for the IB
+ */
+ unsigned int num_dw_left;
+};
+
+struct amdgpu_vm_update_funcs {
+ int (*map_table)(struct amdgpu_bo *bo);
+ int (*prepare)(struct amdgpu_vm_update_params *p, void * owner,
+ struct dma_fence *exclusive);
+ int (*update)(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
+ unsigned count, uint32_t incr, uint64_t flags);
+ int (*commit)(struct amdgpu_vm_update_params *p,
+ struct dma_fence **fence);
};
struct amdgpu_vm {
@@ -221,7 +255,10 @@ struct amdgpu_vm {
struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
- bool use_cpu_for_update;
+ bool use_cpu_for_update;
+
+ /* Functions to use for VM table updates */
+ const struct amdgpu_vm_update_funcs *update_funcs;
/* Flag to indicate ATS support from PTE for GFX9 */
bool pte_support_ats;
@@ -245,7 +282,6 @@ struct amdgpu_vm {
struct ttm_lru_bulk_move lru_bulk_move;
/* mark whether can do the bulk move */
bool bulk_moveable;
- struct amdgpu_retryfault_hashtable *fault_hash;
};
struct amdgpu_vm_manager {
@@ -267,6 +303,7 @@ struct amdgpu_vm_manager {
const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
struct drm_sched_rq *vm_pte_rqs[AMDGPU_MAX_RINGS];
unsigned vm_pte_num_rqs;
+ struct amdgpu_ring *page_fault;
/* partial resident texture handling */
spinlock_t prt_lock;
@@ -283,14 +320,23 @@ struct amdgpu_vm_manager {
*/
struct idr pasid_idr;
spinlock_t pasid_lock;
+
+ /* counter of mapped memory through xgmi */
+ uint32_t xgmi_map_counter;
+ struct mutex lock_pstate;
};
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
+extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
+extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
+
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
+
+long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int vm_context, unsigned int pasid);
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid);
@@ -303,9 +349,6 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm);
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*callback)(void *p, struct amdgpu_bo *bo),
void *param);
-int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- uint64_t saddr, uint64_t size);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
@@ -319,6 +362,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
bool clear);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo, bool evicted);
+uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
@@ -358,11 +402,6 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
-
-int amdgpu_vm_add_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key);
-
-void amdgpu_vm_clear_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key);
-
void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
new file mode 100644
index 000000000000..5222d165abfc
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu_vm.h"
+#include "amdgpu_object.h"
+#include "amdgpu_trace.h"
+
+/**
+ * amdgpu_vm_cpu_map_table - make sure new PDs/PTs are kmapped
+ *
+ * @table: newly allocated or validated PD/PT
+ */
+static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table)
+{
+ return amdgpu_bo_kmap(table, NULL);
+}
+
+/**
+ * amdgpu_vm_cpu_prepare - prepare page table update with the CPU
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @owner: owner we need to sync to
+ * @exclusive: exclusive move fence we need to sync to
+ *
+ * Returns:
+ * Negativ errno, 0 for success.
+ */
+static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
+ struct dma_fence *exclusive)
+{
+ int r;
+
+ /* Wait for PT BOs to be idle. PTs share the same resv. object
+ * as the root PD BO
+ */
+ r = amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
+ if (unlikely(r))
+ return r;
+
+ /* Wait for any BO move to be completed */
+ if (exclusive) {
+ r = dma_fence_wait(exclusive, true);
+ if (unlikely(r))
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_vm_cpu_update - helper to update page tables via CPU
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @bo: PD/PT to update
+ * @pe: kmap addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Write count number of PT/PD entries directly.
+ */
+static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo *bo, uint64_t pe,
+ uint64_t addr, unsigned count, uint32_t incr,
+ uint64_t flags)
+{
+ unsigned int i;
+ uint64_t value;
+
+ pe += (unsigned long)amdgpu_bo_kptr(bo);
+
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+
+ for (i = 0; i < count; i++) {
+ value = p->pages_addr ?
+ amdgpu_vm_map_gart(p->pages_addr, addr) :
+ addr;
+ amdgpu_gmc_set_pte_pde(p->adev, (void *)(uintptr_t)pe,
+ i, value, flags);
+ addr += incr;
+ }
+ return 0;
+}
+
+/**
+ * amdgpu_vm_cpu_commit - commit page table update to the HW
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @fence: unused
+ *
+ * Make sure that the hardware sees the page table updates.
+ */
+static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p,
+ struct dma_fence **fence)
+{
+ /* Flush HDP */
+ mb();
+ amdgpu_asic_flush_hdp(p->adev, NULL);
+ return 0;
+}
+
+const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs = {
+ .map_table = amdgpu_vm_cpu_map_table,
+ .prepare = amdgpu_vm_cpu_prepare,
+ .update = amdgpu_vm_cpu_update,
+ .commit = amdgpu_vm_cpu_commit
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
new file mode 100644
index 000000000000..ddd181f5ed37
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu_vm.h"
+#include "amdgpu_job.h"
+#include "amdgpu_object.h"
+#include "amdgpu_trace.h"
+
+#define AMDGPU_VM_SDMA_MIN_NUM_DW 256u
+#define AMDGPU_VM_SDMA_MAX_NUM_DW (16u * 1024u)
+
+/**
+ * amdgpu_vm_sdma_map_table - make sure new PDs/PTs are GTT mapped
+ *
+ * @table: newly allocated or validated PD/PT
+ */
+static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table)
+{
+ int r;
+
+ r = amdgpu_ttm_alloc_gart(&table->tbo);
+ if (r)
+ return r;
+
+ if (table->shadow)
+ r = amdgpu_ttm_alloc_gart(&table->shadow->tbo);
+
+ return r;
+}
+
+/**
+ * amdgpu_vm_sdma_prepare - prepare SDMA command submission
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @owner: owner we need to sync to
+ * @exclusive: exclusive move fence we need to sync to
+ *
+ * Returns:
+ * Negativ errno, 0 for success.
+ */
+static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
+ void *owner, struct dma_fence *exclusive)
+{
+ struct amdgpu_bo *root = p->vm->root.base.bo;
+ unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
+ int r;
+
+ r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
+ if (r)
+ return r;
+
+ r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false);
+ if (r)
+ return r;
+
+ r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.resv,
+ owner, false);
+ if (r)
+ return r;
+
+ p->num_dw_left = ndw;
+ return 0;
+}
+
+/**
+ * amdgpu_vm_sdma_commit - commit SDMA command submission
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @fence: resulting fence
+ *
+ * Returns:
+ * Negativ errno, 0 for success.
+ */
+static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
+ struct dma_fence **fence)
+{
+ struct amdgpu_bo *root = p->vm->root.base.bo;
+ struct amdgpu_ib *ib = p->job->ibs;
+ struct amdgpu_ring *ring;
+ struct dma_fence *f;
+ int r;
+
+ ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched);
+
+ WARN_ON(ib->length_dw == 0);
+ amdgpu_ring_pad_ib(ring, ib);
+ WARN_ON(ib->length_dw > p->num_dw_left);
+ r = amdgpu_job_submit(p->job, &p->vm->entity,
+ AMDGPU_FENCE_OWNER_VM, &f);
+ if (r)
+ goto error;
+
+ amdgpu_bo_fence(root, f, true);
+ if (fence)
+ swap(*fence, f);
+ dma_fence_put(f);
+ return 0;
+
+error:
+ amdgpu_job_free(p->job);
+ return r;
+}
+
+
+/**
+ * amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @bo: PD/PT to update
+ * @pe: addr of the page entry
+ * @count: number of page entries to copy
+ *
+ * Traces the parameters and calls the DMA function to copy the PTEs.
+ */
+static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo *bo, uint64_t pe,
+ unsigned count)
+{
+ struct amdgpu_ib *ib = p->job->ibs;
+ uint64_t src = ib->gpu_addr;
+
+ src += p->num_dw_left * 4;
+
+ pe += amdgpu_bo_gpu_offset(bo);
+ trace_amdgpu_vm_copy_ptes(pe, src, count);
+
+ amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
+}
+
+/**
+ * amdgpu_vm_sdma_set_ptes - helper to call the right asic function
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @bo: PD/PT to update
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Traces the parameters and calls the right asic functions
+ * to setup the page table using the DMA.
+ */
+static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo *bo, uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint64_t flags)
+{
+ struct amdgpu_ib *ib = p->job->ibs;
+
+ pe += amdgpu_bo_gpu_offset(bo);
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+ if (count < 3) {
+ amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
+ count, incr);
+ } else {
+ amdgpu_vm_set_pte_pde(p->adev, ib, pe, addr,
+ count, incr, flags);
+ }
+}
+
+/**
+ * amdgpu_vm_sdma_update - execute VM update
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @bo: PD/PT to update
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Reserve space in the IB, setup mapping buffer on demand and write commands to
+ * the IB.
+ */
+static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo *bo, uint64_t pe,
+ uint64_t addr, unsigned count, uint32_t incr,
+ uint64_t flags)
+{
+ unsigned int i, ndw, nptes;
+ uint64_t *pte;
+ int r;
+
+ do {
+ ndw = p->num_dw_left;
+ ndw -= p->job->ibs->length_dw;
+
+ if (ndw < 32) {
+ r = amdgpu_vm_sdma_commit(p, NULL);
+ if (r)
+ return r;
+
+ /* estimate how many dw we need */
+ ndw = 32;
+ if (p->pages_addr)
+ ndw += count * 2;
+ ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
+ ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
+
+ r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
+ if (r)
+ return r;
+
+ p->num_dw_left = ndw;
+ }
+
+ if (!p->pages_addr) {
+ /* set page commands needed */
+ if (bo->shadow)
+ amdgpu_vm_sdma_set_ptes(p, bo->shadow, pe, addr,
+ count, incr, flags);
+ amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
+ incr, flags);
+ return 0;
+ }
+
+ /* copy commands needed */
+ ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw *
+ (bo->shadow ? 2 : 1);
+
+ /* for padding */
+ ndw -= 7;
+
+ nptes = min(count, ndw / 2);
+
+ /* Put the PTEs at the end of the IB. */
+ p->num_dw_left -= nptes * 2;
+ pte = (uint64_t *)&(p->job->ibs->ptr[p->num_dw_left]);
+ for (i = 0; i < nptes; ++i, addr += incr) {
+ pte[i] = amdgpu_vm_map_gart(p->pages_addr, addr);
+ pte[i] |= flags;
+ }
+
+ if (bo->shadow)
+ amdgpu_vm_sdma_copy_ptes(p, bo->shadow, pe, nptes);
+ amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
+
+ pe += nptes * 8;
+ count -= nptes;
+ } while (count);
+
+ return 0;
+}
+
+const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs = {
+ .map_table = amdgpu_vm_sdma_map_table,
+ .prepare = amdgpu_vm_sdma_prepare,
+ .update = amdgpu_vm_sdma_update,
+ .commit = amdgpu_vm_sdma_commit
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 3f9d5d00c9b3..ec9ea3fdbb4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -33,6 +33,85 @@ struct amdgpu_vram_mgr {
};
/**
+ * DOC: mem_info_vram_total
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total VRAM
+ * available on the device
+ * The file mem_info_vram_total is used for this and returns the total
+ * amount of VRAM in bytes
+ */
+static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.real_vram_size);
+}
+
+/**
+ * DOC: mem_info_vis_vram_total
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total
+ * visible VRAM available on the device
+ * The file mem_info_vis_vram_total is used for this and returns the total
+ * amount of visible VRAM in bytes
+ */
+static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.visible_vram_size);
+}
+
+/**
+ * DOC: mem_info_vram_used
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total VRAM
+ * available on the device
+ * The file mem_info_vram_used is used for this and returns the total
+ * amount of currently used VRAM in bytes
+ */
+static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
+}
+
+/**
+ * DOC: mem_info_vis_vram_used
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total of
+ * used visible VRAM
+ * The file mem_info_vis_vram_used is used for this and returns the total
+ * amount of currently used visible VRAM in bytes
+ */
+static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
+}
+
+static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
+ amdgpu_mem_info_vram_total_show, NULL);
+static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
+ amdgpu_mem_info_vis_vram_total_show,NULL);
+static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
+ amdgpu_mem_info_vram_used_show, NULL);
+static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
+ amdgpu_mem_info_vis_vram_used_show, NULL);
+
+/**
* amdgpu_vram_mgr_init - init VRAM manager and DRM MM
*
* @man: TTM memory type manager
@@ -43,7 +122,9 @@ struct amdgpu_vram_mgr {
static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
unsigned long p_size)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_vram_mgr *mgr;
+ int ret;
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
if (!mgr)
@@ -52,6 +133,29 @@ static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
drm_mm_init(&mgr->mm, 0, p_size);
spin_lock_init(&mgr->lock);
man->priv = mgr;
+
+ /* Add the two VRAM-related sysfs files */
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_total);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_vram_total\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_vis_vram_total\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_used);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_vram_used\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n");
+ return ret;
+ }
+
return 0;
}
@@ -65,6 +169,7 @@ static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
*/
static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_vram_mgr *mgr = man->priv;
spin_lock(&mgr->lock);
@@ -72,6 +177,10 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
spin_unlock(&mgr->lock);
kfree(mgr);
man->priv = NULL;
+ device_remove_file(adev->dev, &dev_attr_mem_info_vram_total);
+ device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
+ device_remove_file(adev->dev, &dev_attr_mem_info_vram_used);
+ device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 407dd16cc35c..a48c84c51775 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -24,6 +24,7 @@
#include <linux/list.h>
#include "amdgpu.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_smu.h"
static DEFINE_MUTEX(xgmi_mutex);
@@ -34,12 +35,132 @@ static DEFINE_MUTEX(xgmi_mutex);
static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
static unsigned hive_count = 0;
-
void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
{
return &hive->device_list;
}
+static ssize_t amdgpu_xgmi_show_hive_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct amdgpu_hive_info *hive =
+ container_of(attr, struct amdgpu_hive_info, dev_attr);
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
+}
+
+static int amdgpu_xgmi_sysfs_create(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive)
+{
+ int ret = 0;
+
+ if (WARN_ON(hive->kobj))
+ return -EINVAL;
+
+ hive->kobj = kobject_create_and_add("xgmi_hive_info", &adev->dev->kobj);
+ if (!hive->kobj) {
+ dev_err(adev->dev, "XGMI: Failed to allocate sysfs entry!\n");
+ return -EINVAL;
+ }
+
+ hive->dev_attr = (struct device_attribute) {
+ .attr = {
+ .name = "xgmi_hive_id",
+ .mode = S_IRUGO,
+
+ },
+ .show = amdgpu_xgmi_show_hive_id,
+ };
+
+ ret = sysfs_create_file(hive->kobj, &hive->dev_attr.attr);
+ if (ret) {
+ dev_err(adev->dev, "XGMI: Failed to create device file xgmi_hive_id\n");
+ kobject_del(hive->kobj);
+ kobject_put(hive->kobj);
+ hive->kobj = NULL;
+ }
+
+ return ret;
+}
+
+static void amdgpu_xgmi_sysfs_destroy(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive)
+{
+ sysfs_remove_file(hive->kobj, &hive->dev_attr.attr);
+ kobject_del(hive->kobj);
+ kobject_put(hive->kobj);
+ hive->kobj = NULL;
+}
+
+static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.xgmi.node_id);
+
+}
+
+
+static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL);
+
+
+static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive)
+{
+ int ret = 0;
+ char node[10] = { 0 };
+
+ /* Create xgmi device id file */
+ ret = device_create_file(adev->dev, &dev_attr_xgmi_device_id);
+ if (ret) {
+ dev_err(adev->dev, "XGMI: Failed to create device file xgmi_device_id\n");
+ return ret;
+ }
+
+ /* Create sysfs link to hive info folder on the first device */
+ if (adev != hive->adev) {
+ ret = sysfs_create_link(&adev->dev->kobj, hive->kobj,
+ "xgmi_hive_info");
+ if (ret) {
+ dev_err(adev->dev, "XGMI: Failed to create link to hive info");
+ goto remove_file;
+ }
+ }
+
+ sprintf(node, "node%d", hive->number_devices);
+ /* Create sysfs link form the hive folder to yourself */
+ ret = sysfs_create_link(hive->kobj, &adev->dev->kobj, node);
+ if (ret) {
+ dev_err(adev->dev, "XGMI: Failed to create link from hive info");
+ goto remove_link;
+ }
+
+ goto success;
+
+
+remove_link:
+ sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
+
+remove_file:
+ device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
+
+success:
+ return ret;
+}
+
+static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive)
+{
+ device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
+ sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
+ sysfs_remove_link(hive->kobj, adev->ddev->unique);
+}
+
+
+
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock)
{
int i;
@@ -66,18 +187,50 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lo
/* initialize new hive if not exist */
tmp = &xgmi_hives[hive_count++];
+
+ if (amdgpu_xgmi_sysfs_create(adev, tmp)) {
+ mutex_unlock(&xgmi_mutex);
+ return NULL;
+ }
+
+ tmp->adev = adev;
tmp->hive_id = adev->gmc.xgmi.hive_id;
INIT_LIST_HEAD(&tmp->device_list);
mutex_init(&tmp->hive_lock);
mutex_init(&tmp->reset_lock);
+
if (lock)
mutex_lock(&tmp->hive_lock);
-
+ tmp->pstate = -1;
mutex_unlock(&xgmi_mutex);
return tmp;
}
+int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
+{
+ int ret = 0;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
+
+ if (!hive)
+ return 0;
+
+ if (hive->pstate == pstate)
+ return 0;
+
+ dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate);
+
+ if (is_support_sw_smu(adev))
+ ret = smu_set_xgmi_pstate(&adev->smu, pstate);
+ if (ret)
+ dev_err(adev->dev,
+ "XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
+ adev->gmc.xgmi.node_id,
+ adev->gmc.xgmi.hive_id, ret);
+
+ return ret;
+}
+
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
{
int ret = -EINVAL;
@@ -156,8 +309,17 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
break;
}
- dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
- adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
+ if (!ret)
+ ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);
+
+ if (!ret)
+ dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
+ adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
+ else
+ dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n",
+ adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id,
+ ret);
+
mutex_unlock(&hive->hive_lock);
exit:
@@ -176,9 +338,11 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
return;
if (!(hive->number_devices--)) {
+ amdgpu_xgmi_sysfs_destroy(adev, hive);
mutex_destroy(&hive->hive_lock);
mutex_destroy(&hive->reset_lock);
} else {
+ amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
mutex_unlock(&hive->hive_lock);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 14bc60664159..3e9c91e9a4bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -29,13 +29,25 @@ struct amdgpu_hive_info {
struct list_head device_list;
struct psp_xgmi_topology_info topology_info;
int number_devices;
- struct mutex hive_lock,
- reset_lock;
+ struct mutex hive_lock, reset_lock;
+ struct kobject *kobj;
+ struct device_attribute dev_attr;
+ struct amdgpu_device *adev;
+ int pstate; /*0 -- low , 1 -- high , -1 unknown*/
};
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
+int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
+
+static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
+ struct amdgpu_device *bo_adev)
+{
+ return (adev != bo_adev &&
+ adev->gmc.xgmi.hive_id &&
+ adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id);
+}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 189599b694e8..d42808b05971 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -977,8 +977,8 @@ static int cik_sdma_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 :
- AMDGPU_SDMA_IRQ_TRAP1);
+ AMDGPU_SDMA_IRQ_INSTANCE0 :
+ AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
}
@@ -1114,7 +1114,7 @@ static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
switch (type) {
- case AMDGPU_SDMA_IRQ_TRAP0:
+ case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
@@ -1130,7 +1130,7 @@ static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
break;
}
break;
- case AMDGPU_SDMA_IRQ_TRAP1:
+ case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 305276c7e4bf..c0cb244f58cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -782,6 +782,25 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[18] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16);
+ tilemode[19] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size);
+ tilemode[20] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size);
tilemode[21] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b8e50a34bdb3..02955e6e9dd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -3236,6 +3236,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
dev_warn(adev->dev,
"Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n",
adev->asic_type);
+ /* fall through */
case CHIP_CARRIZO:
modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index a11db2b1a63f..ba67d1023264 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -40,6 +40,8 @@
#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
+#include "amdgpu_ras.h"
+
#define GFX9_NUM_GFX_RINGS 1
#define GFX9_MEC_HPD_SIZE 4096
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
@@ -576,6 +578,27 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
}
}
+static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ break;
+ case CHIP_RAVEN:
+ if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
+ break;
+ if ((adev->gfx.rlc_fw_version < 531) ||
+ (adev->gfx.rlc_fw_version == 53815) ||
+ (adev->gfx.rlc_feature_version < 1) ||
+ !adev->gfx.rlc.is_rlc_v2_1)
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ break;
+ default:
+ break;
+ }
+}
+
static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
@@ -828,6 +851,7 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
}
out:
+ gfx_v9_0_check_if_need_gfxoff(adev);
gfx_v9_0_check_fw_write_wait(adev);
if (err) {
dev_err(adev->dev,
@@ -1639,6 +1663,18 @@ static int gfx_v9_0_sw_init(void *handle)
if (r)
return r;
+ /* ECC error */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
+ &adev->gfx.cp_ecc_error_irq);
+ if (r)
+ return r;
+
+ /* FUE error */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
+ &adev->gfx.cp_ecc_error_irq);
+ if (r)
+ return r;
+
adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
gfx_v9_0_scratch_init(adev);
@@ -1731,6 +1767,20 @@ static int gfx_v9_0_sw_fini(void *handle)
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
+ adev->gfx.ras_if) {
+ struct ras_common_if *ras_if = adev->gfx.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ };
+
+ amdgpu_ras_debugfs_remove(adev, ras_if);
+ amdgpu_ras_sysfs_remove(adev, ras_if);
+ amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
+ amdgpu_ras_feature_enable(adev, ras_if, 0);
+ kfree(ras_if);
+ }
+
amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
@@ -3303,6 +3353,7 @@ static int gfx_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
@@ -3492,6 +3543,80 @@ static int gfx_v9_0_early_init(void *handle)
return 0;
}
+static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry);
+
+static int gfx_v9_0_ecc_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct ras_common_if **ras_if = &adev->gfx.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = gfx_v9_0_process_ras_data_cb,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "gfx_err_count",
+ .debugfs_name = "gfx_err_inject",
+ };
+ struct ras_common_if ras_block = {
+ .block = AMDGPU_RAS_BLOCK__GFX,
+ .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
+ .sub_block_index = 0,
+ .name = "gfx",
+ };
+ int r;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
+ amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
+ return 0;
+ }
+
+ if (*ras_if)
+ goto resume;
+
+ *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
+ if (!*ras_if)
+ return -ENOMEM;
+
+ **ras_if = ras_block;
+
+ r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
+ if (r)
+ goto feature;
+
+ ih_info.head = **ras_if;
+ fs_info.head = **ras_if;
+
+ r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
+ if (r)
+ goto interrupt;
+
+ r = amdgpu_ras_debugfs_create(adev, &fs_info);
+ if (r)
+ goto debugfs;
+
+ r = amdgpu_ras_sysfs_create(adev, &fs_info);
+ if (r)
+ goto sysfs;
+resume:
+ r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
+ if (r)
+ goto irq;
+
+ return 0;
+irq:
+ amdgpu_ras_sysfs_remove(adev, *ras_if);
+sysfs:
+ amdgpu_ras_debugfs_remove(adev, *ras_if);
+debugfs:
+ amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
+interrupt:
+ amdgpu_ras_feature_enable(adev, *ras_if, 0);
+feature:
+ kfree(*ras_if);
+ *ras_if = NULL;
+ return -EINVAL;
+}
+
static int gfx_v9_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -3505,6 +3630,10 @@ static int gfx_v9_0_late_init(void *handle)
if (r)
return r;
+ r = gfx_v9_0_ecc_late_init(handle);
+ if (r)
+ return r;
+
return 0;
}
@@ -4541,6 +4670,45 @@ static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
return 0;
}
+#define ENABLE_ECC_ON_ME_PIPE(me, pipe) \
+ WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
+ CP_ECC_ERROR_INT_ENABLE, 1)
+
+#define DISABLE_ECC_ON_ME_PIPE(me, pipe) \
+ WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
+ CP_ECC_ERROR_INT_ENABLE, 0)
+
+static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
+ CP_ECC_ERROR_INT_ENABLE, 0);
+ DISABLE_ECC_ON_ME_PIPE(1, 0);
+ DISABLE_ECC_ON_ME_PIPE(1, 1);
+ DISABLE_ECC_ON_ME_PIPE(1, 2);
+ DISABLE_ECC_ON_ME_PIPE(1, 3);
+ break;
+
+ case AMDGPU_IRQ_STATE_ENABLE:
+ WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
+ CP_ECC_ERROR_INT_ENABLE, 1);
+ ENABLE_ECC_ON_ME_PIPE(1, 0);
+ ENABLE_ECC_ON_ME_PIPE(1, 1);
+ ENABLE_ECC_ON_ME_PIPE(1, 2);
+ ENABLE_ECC_ON_ME_PIPE(1, 3);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+
static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -4657,6 +4825,34 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ /* TODO ue will trigger an interrupt. */
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ amdgpu_ras_reset_gpu(adev, 0);
+ return AMDGPU_RAS_UE;
+}
+
+static int gfx_v9_0_cp_ecc_error_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->gfx.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ DRM_ERROR("CP ECC ERROR IRQ\n");
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
+
static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
.name = "gfx_v9_0",
.early_init = gfx_v9_0_early_init,
@@ -4818,6 +5014,12 @@ static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
.process = gfx_v9_0_priv_inst_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
+ .set = gfx_v9_0_set_cp_ecc_error_state,
+ .process = gfx_v9_0_cp_ecc_error_irq,
+};
+
+
static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
@@ -4828,6 +5030,9 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
+
+ adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/
+ adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
}
static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index f5edddf3b29d..7bb5359d0bbd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -143,7 +143,7 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
/* XXX for emulation, Refer to closed source code.*/
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
0);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
WREG32_SOC15(GC, 0, mmVM_L2_CNTL, tmp);
@@ -236,7 +236,7 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 1);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i, tmp);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 9fc3296592fe..b06d876da2d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -225,7 +225,7 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24;
- amdgpu_gmc_vram_location(adev, &adev->gmc, base);
+ amdgpu_gmc_vram_location(adev, mc, base);
amdgpu_gmc_gart_location(adev, mc);
}
@@ -383,20 +383,6 @@ static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
return pd_addr;
}
-static int gmc_v6_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
- uint32_t gpu_page_idx, uint64_t addr,
- uint64_t flags)
-{
- void __iomem *ptr = (void *)cpu_pt_addr;
- uint64_t value;
-
- value = addr & 0xFFFFFFFFFFFFF000ULL;
- value |= flags;
- writeq(value, ptr + (gpu_page_idx * 8));
-
- return 0;
-}
-
static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
uint32_t flags)
{
@@ -886,7 +872,7 @@ static int gmc_v6_0_sw_init(void *handle)
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n");
}
- adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
+ adev->need_swiotlb = drm_need_swiotlb(dma_bits);
r = gmc_v6_0_init_microcode(adev);
if (r) {
@@ -1169,7 +1155,6 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
- .set_pte_pde = gmc_v6_0_set_pte_pde,
.set_prt = gmc_v6_0_set_prt,
.get_vm_pde = gmc_v6_0_get_vm_pde,
.get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 761dcfb2fec0..75aa3332aee2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -242,7 +242,7 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24;
- amdgpu_gmc_vram_location(adev, &adev->gmc, base);
+ amdgpu_gmc_vram_location(adev, mc, base);
amdgpu_gmc_gart_location(adev, mc);
}
@@ -460,31 +460,6 @@ static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
}
-/**
- * gmc_v7_0_set_pte_pde - update the page tables using MMIO
- *
- * @adev: amdgpu_device pointer
- * @cpu_pt_addr: cpu address of the page table
- * @gpu_page_idx: entry in the page table to update
- * @addr: dst addr to write into pte/pde
- * @flags: access flags
- *
- * Update the page tables using the CPU.
- */
-static int gmc_v7_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
- uint32_t gpu_page_idx, uint64_t addr,
- uint64_t flags)
-{
- void __iomem *ptr = (void *)cpu_pt_addr;
- uint64_t value;
-
- value = addr & 0xFFFFFFFFFFFFF000ULL;
- value |= flags;
- writeq(value, ptr + (gpu_page_idx * 8));
-
- return 0;
-}
-
static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev,
uint32_t flags)
{
@@ -1030,7 +1005,7 @@ static int gmc_v7_0_sw_init(void *handle)
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
pr_warn("amdgpu: No coherent DMA available\n");
}
- adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
+ adev->need_swiotlb = drm_need_swiotlb(dma_bits);
r = gmc_v7_0_init_microcode(adev);
if (r) {
@@ -1376,7 +1351,6 @@ static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
- .set_pte_pde = gmc_v7_0_set_pte_pde,
.set_prt = gmc_v7_0_set_prt,
.get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
.get_vm_pde = gmc_v7_0_get_vm_pde
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 34440672f938..8a3b5e6fc6c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -433,7 +433,7 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24;
- amdgpu_gmc_vram_location(adev, &adev->gmc, base);
+ amdgpu_gmc_vram_location(adev, mc, base);
amdgpu_gmc_gart_location(adev, mc);
}
@@ -662,50 +662,26 @@ static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
}
-/**
- * gmc_v8_0_set_pte_pde - update the page tables using MMIO
- *
- * @adev: amdgpu_device pointer
- * @cpu_pt_addr: cpu address of the page table
- * @gpu_page_idx: entry in the page table to update
- * @addr: dst addr to write into pte/pde
- * @flags: access flags
+/*
+ * PTE format on VI:
+ * 63:40 reserved
+ * 39:12 4k physical page base address
+ * 11:7 fragment
+ * 6 write
+ * 5 read
+ * 4 exe
+ * 3 reserved
+ * 2 snooped
+ * 1 system
+ * 0 valid
*
- * Update the page tables using the CPU.
+ * PDE format on VI:
+ * 63:59 block fragment size
+ * 58:40 reserved
+ * 39:1 physical base address of PTE
+ * bits 5:1 must be 0.
+ * 0 valid
*/
-static int gmc_v8_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
- uint32_t gpu_page_idx, uint64_t addr,
- uint64_t flags)
-{
- void __iomem *ptr = (void *)cpu_pt_addr;
- uint64_t value;
-
- /*
- * PTE format on VI:
- * 63:40 reserved
- * 39:12 4k physical page base address
- * 11:7 fragment
- * 6 write
- * 5 read
- * 4 exe
- * 3 reserved
- * 2 snooped
- * 1 system
- * 0 valid
- *
- * PDE format on VI:
- * 63:59 block fragment size
- * 58:40 reserved
- * 39:1 physical base address of PTE
- * bits 5:1 must be 0.
- * 0 valid
- */
- value = addr & 0x000000FFFFFFF000ULL;
- value |= flags;
- writeq(value, ptr + (gpu_page_idx * 8));
-
- return 0;
-}
static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev,
uint32_t flags)
@@ -1155,7 +1131,7 @@ static int gmc_v8_0_sw_init(void *handle)
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
pr_warn("amdgpu: No coherent DMA available\n");
}
- adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
+ adev->need_swiotlb = drm_need_swiotlb(dma_bits);
r = gmc_v8_0_init_microcode(adev);
if (r) {
@@ -1743,7 +1719,6 @@ static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
- .set_pte_pde = gmc_v8_0_set_pte_pde,
.set_prt = gmc_v8_0_set_prt,
.get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
.get_vm_pde = gmc_v8_0_get_vm_pde
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 2fe8397241ea..3fd79e07944d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -47,6 +47,8 @@
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
+#include "amdgpu_ras.h"
+
/* add these here since we already include dce12 headers and these are for DCN */
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
@@ -84,121 +86,182 @@ static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
};
-/* Ecc related register addresses, (BASE + reg offset) */
-/* Universal Memory Controller caps (may be fused). */
-/* UMCCH:UmcLocalCap */
-#define UMCLOCALCAPS_ADDR0 (0x00014306 + 0x00000000)
-#define UMCLOCALCAPS_ADDR1 (0x00014306 + 0x00000800)
-#define UMCLOCALCAPS_ADDR2 (0x00014306 + 0x00001000)
-#define UMCLOCALCAPS_ADDR3 (0x00014306 + 0x00001800)
-#define UMCLOCALCAPS_ADDR4 (0x00054306 + 0x00000000)
-#define UMCLOCALCAPS_ADDR5 (0x00054306 + 0x00000800)
-#define UMCLOCALCAPS_ADDR6 (0x00054306 + 0x00001000)
-#define UMCLOCALCAPS_ADDR7 (0x00054306 + 0x00001800)
-#define UMCLOCALCAPS_ADDR8 (0x00094306 + 0x00000000)
-#define UMCLOCALCAPS_ADDR9 (0x00094306 + 0x00000800)
-#define UMCLOCALCAPS_ADDR10 (0x00094306 + 0x00001000)
-#define UMCLOCALCAPS_ADDR11 (0x00094306 + 0x00001800)
-#define UMCLOCALCAPS_ADDR12 (0x000d4306 + 0x00000000)
-#define UMCLOCALCAPS_ADDR13 (0x000d4306 + 0x00000800)
-#define UMCLOCALCAPS_ADDR14 (0x000d4306 + 0x00001000)
-#define UMCLOCALCAPS_ADDR15 (0x000d4306 + 0x00001800)
-
-/* Universal Memory Controller Channel config. */
-/* UMCCH:UMC_CONFIG */
-#define UMCCH_UMC_CONFIG_ADDR0 (0x00014040 + 0x00000000)
-#define UMCCH_UMC_CONFIG_ADDR1 (0x00014040 + 0x00000800)
-#define UMCCH_UMC_CONFIG_ADDR2 (0x00014040 + 0x00001000)
-#define UMCCH_UMC_CONFIG_ADDR3 (0x00014040 + 0x00001800)
-#define UMCCH_UMC_CONFIG_ADDR4 (0x00054040 + 0x00000000)
-#define UMCCH_UMC_CONFIG_ADDR5 (0x00054040 + 0x00000800)
-#define UMCCH_UMC_CONFIG_ADDR6 (0x00054040 + 0x00001000)
-#define UMCCH_UMC_CONFIG_ADDR7 (0x00054040 + 0x00001800)
-#define UMCCH_UMC_CONFIG_ADDR8 (0x00094040 + 0x00000000)
-#define UMCCH_UMC_CONFIG_ADDR9 (0x00094040 + 0x00000800)
-#define UMCCH_UMC_CONFIG_ADDR10 (0x00094040 + 0x00001000)
-#define UMCCH_UMC_CONFIG_ADDR11 (0x00094040 + 0x00001800)
-#define UMCCH_UMC_CONFIG_ADDR12 (0x000d4040 + 0x00000000)
-#define UMCCH_UMC_CONFIG_ADDR13 (0x000d4040 + 0x00000800)
-#define UMCCH_UMC_CONFIG_ADDR14 (0x000d4040 + 0x00001000)
-#define UMCCH_UMC_CONFIG_ADDR15 (0x000d4040 + 0x00001800)
-
-/* Universal Memory Controller Channel Ecc config. */
-/* UMCCH:EccCtrl */
-#define UMCCH_ECCCTRL_ADDR0 (0x00014053 + 0x00000000)
-#define UMCCH_ECCCTRL_ADDR1 (0x00014053 + 0x00000800)
-#define UMCCH_ECCCTRL_ADDR2 (0x00014053 + 0x00001000)
-#define UMCCH_ECCCTRL_ADDR3 (0x00014053 + 0x00001800)
-#define UMCCH_ECCCTRL_ADDR4 (0x00054053 + 0x00000000)
-#define UMCCH_ECCCTRL_ADDR5 (0x00054053 + 0x00000800)
-#define UMCCH_ECCCTRL_ADDR6 (0x00054053 + 0x00001000)
-#define UMCCH_ECCCTRL_ADDR7 (0x00054053 + 0x00001800)
-#define UMCCH_ECCCTRL_ADDR8 (0x00094053 + 0x00000000)
-#define UMCCH_ECCCTRL_ADDR9 (0x00094053 + 0x00000800)
-#define UMCCH_ECCCTRL_ADDR10 (0x00094053 + 0x00001000)
-#define UMCCH_ECCCTRL_ADDR11 (0x00094053 + 0x00001800)
-#define UMCCH_ECCCTRL_ADDR12 (0x000d4053 + 0x00000000)
-#define UMCCH_ECCCTRL_ADDR13 (0x000d4053 + 0x00000800)
-#define UMCCH_ECCCTRL_ADDR14 (0x000d4053 + 0x00001000)
-#define UMCCH_ECCCTRL_ADDR15 (0x000d4053 + 0x00001800)
-
-static const uint32_t ecc_umclocalcap_addrs[] = {
- UMCLOCALCAPS_ADDR0,
- UMCLOCALCAPS_ADDR1,
- UMCLOCALCAPS_ADDR2,
- UMCLOCALCAPS_ADDR3,
- UMCLOCALCAPS_ADDR4,
- UMCLOCALCAPS_ADDR5,
- UMCLOCALCAPS_ADDR6,
- UMCLOCALCAPS_ADDR7,
- UMCLOCALCAPS_ADDR8,
- UMCLOCALCAPS_ADDR9,
- UMCLOCALCAPS_ADDR10,
- UMCLOCALCAPS_ADDR11,
- UMCLOCALCAPS_ADDR12,
- UMCLOCALCAPS_ADDR13,
- UMCLOCALCAPS_ADDR14,
- UMCLOCALCAPS_ADDR15,
+static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
+ (0x000143c0 + 0x00000000),
+ (0x000143c0 + 0x00000800),
+ (0x000143c0 + 0x00001000),
+ (0x000143c0 + 0x00001800),
+ (0x000543c0 + 0x00000000),
+ (0x000543c0 + 0x00000800),
+ (0x000543c0 + 0x00001000),
+ (0x000543c0 + 0x00001800),
+ (0x000943c0 + 0x00000000),
+ (0x000943c0 + 0x00000800),
+ (0x000943c0 + 0x00001000),
+ (0x000943c0 + 0x00001800),
+ (0x000d43c0 + 0x00000000),
+ (0x000d43c0 + 0x00000800),
+ (0x000d43c0 + 0x00001000),
+ (0x000d43c0 + 0x00001800),
+ (0x001143c0 + 0x00000000),
+ (0x001143c0 + 0x00000800),
+ (0x001143c0 + 0x00001000),
+ (0x001143c0 + 0x00001800),
+ (0x001543c0 + 0x00000000),
+ (0x001543c0 + 0x00000800),
+ (0x001543c0 + 0x00001000),
+ (0x001543c0 + 0x00001800),
+ (0x001943c0 + 0x00000000),
+ (0x001943c0 + 0x00000800),
+ (0x001943c0 + 0x00001000),
+ (0x001943c0 + 0x00001800),
+ (0x001d43c0 + 0x00000000),
+ (0x001d43c0 + 0x00000800),
+ (0x001d43c0 + 0x00001000),
+ (0x001d43c0 + 0x00001800),
};
-static const uint32_t ecc_umcch_umc_config_addrs[] = {
- UMCCH_UMC_CONFIG_ADDR0,
- UMCCH_UMC_CONFIG_ADDR1,
- UMCCH_UMC_CONFIG_ADDR2,
- UMCCH_UMC_CONFIG_ADDR3,
- UMCCH_UMC_CONFIG_ADDR4,
- UMCCH_UMC_CONFIG_ADDR5,
- UMCCH_UMC_CONFIG_ADDR6,
- UMCCH_UMC_CONFIG_ADDR7,
- UMCCH_UMC_CONFIG_ADDR8,
- UMCCH_UMC_CONFIG_ADDR9,
- UMCCH_UMC_CONFIG_ADDR10,
- UMCCH_UMC_CONFIG_ADDR11,
- UMCCH_UMC_CONFIG_ADDR12,
- UMCCH_UMC_CONFIG_ADDR13,
- UMCCH_UMC_CONFIG_ADDR14,
- UMCCH_UMC_CONFIG_ADDR15,
+static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
+ (0x000143e0 + 0x00000000),
+ (0x000143e0 + 0x00000800),
+ (0x000143e0 + 0x00001000),
+ (0x000143e0 + 0x00001800),
+ (0x000543e0 + 0x00000000),
+ (0x000543e0 + 0x00000800),
+ (0x000543e0 + 0x00001000),
+ (0x000543e0 + 0x00001800),
+ (0x000943e0 + 0x00000000),
+ (0x000943e0 + 0x00000800),
+ (0x000943e0 + 0x00001000),
+ (0x000943e0 + 0x00001800),
+ (0x000d43e0 + 0x00000000),
+ (0x000d43e0 + 0x00000800),
+ (0x000d43e0 + 0x00001000),
+ (0x000d43e0 + 0x00001800),
+ (0x001143e0 + 0x00000000),
+ (0x001143e0 + 0x00000800),
+ (0x001143e0 + 0x00001000),
+ (0x001143e0 + 0x00001800),
+ (0x001543e0 + 0x00000000),
+ (0x001543e0 + 0x00000800),
+ (0x001543e0 + 0x00001000),
+ (0x001543e0 + 0x00001800),
+ (0x001943e0 + 0x00000000),
+ (0x001943e0 + 0x00000800),
+ (0x001943e0 + 0x00001000),
+ (0x001943e0 + 0x00001800),
+ (0x001d43e0 + 0x00000000),
+ (0x001d43e0 + 0x00000800),
+ (0x001d43e0 + 0x00001000),
+ (0x001d43e0 + 0x00001800),
};
-static const uint32_t ecc_umcch_eccctrl_addrs[] = {
- UMCCH_ECCCTRL_ADDR0,
- UMCCH_ECCCTRL_ADDR1,
- UMCCH_ECCCTRL_ADDR2,
- UMCCH_ECCCTRL_ADDR3,
- UMCCH_ECCCTRL_ADDR4,
- UMCCH_ECCCTRL_ADDR5,
- UMCCH_ECCCTRL_ADDR6,
- UMCCH_ECCCTRL_ADDR7,
- UMCCH_ECCCTRL_ADDR8,
- UMCCH_ECCCTRL_ADDR9,
- UMCCH_ECCCTRL_ADDR10,
- UMCCH_ECCCTRL_ADDR11,
- UMCCH_ECCCTRL_ADDR12,
- UMCCH_ECCCTRL_ADDR13,
- UMCCH_ECCCTRL_ADDR14,
- UMCCH_ECCCTRL_ADDR15,
+static const uint32_t ecc_umc_mcumc_status_addrs[] = {
+ (0x000143c2 + 0x00000000),
+ (0x000143c2 + 0x00000800),
+ (0x000143c2 + 0x00001000),
+ (0x000143c2 + 0x00001800),
+ (0x000543c2 + 0x00000000),
+ (0x000543c2 + 0x00000800),
+ (0x000543c2 + 0x00001000),
+ (0x000543c2 + 0x00001800),
+ (0x000943c2 + 0x00000000),
+ (0x000943c2 + 0x00000800),
+ (0x000943c2 + 0x00001000),
+ (0x000943c2 + 0x00001800),
+ (0x000d43c2 + 0x00000000),
+ (0x000d43c2 + 0x00000800),
+ (0x000d43c2 + 0x00001000),
+ (0x000d43c2 + 0x00001800),
+ (0x001143c2 + 0x00000000),
+ (0x001143c2 + 0x00000800),
+ (0x001143c2 + 0x00001000),
+ (0x001143c2 + 0x00001800),
+ (0x001543c2 + 0x00000000),
+ (0x001543c2 + 0x00000800),
+ (0x001543c2 + 0x00001000),
+ (0x001543c2 + 0x00001800),
+ (0x001943c2 + 0x00000000),
+ (0x001943c2 + 0x00000800),
+ (0x001943c2 + 0x00001000),
+ (0x001943c2 + 0x00001800),
+ (0x001d43c2 + 0x00000000),
+ (0x001d43c2 + 0x00000800),
+ (0x001d43c2 + 0x00001000),
+ (0x001d43c2 + 0x00001800),
};
+static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 bits, i, tmp, reg;
+
+ bits = 0x7f;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
+ reg = ecc_umc_mcumc_ctrl_addrs[i];
+ tmp = RREG32(reg);
+ tmp &= ~bits;
+ WREG32(reg, tmp);
+ }
+ for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
+ reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
+ tmp = RREG32(reg);
+ tmp &= ~bits;
+ WREG32(reg, tmp);
+ }
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
+ reg = ecc_umc_mcumc_ctrl_addrs[i];
+ tmp = RREG32(reg);
+ tmp |= bits;
+ WREG32(reg, tmp);
+ }
+ for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
+ reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
+ tmp = RREG32(reg);
+ tmp |= bits;
+ WREG32(reg, tmp);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ amdgpu_ras_reset_gpu(adev, 0);
+ return AMDGPU_RAS_UE;
+}
+
+static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->gmc.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
+
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -244,62 +307,6 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
return 0;
}
-/**
- * vega10_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool gmc_v9_0_prescreen_iv(struct amdgpu_device *adev,
- struct amdgpu_iv_entry *entry,
- uint64_t addr)
-{
- struct amdgpu_vm *vm;
- u64 key;
- int r;
-
- /* No PASID, can't identify faulting process */
- if (!entry->pasid)
- return true;
-
- /* Not a retry fault */
- if (!(entry->src_data[1] & 0x80))
- return true;
-
- /* Track retry faults in per-VM fault FIFO. */
- spin_lock(&adev->vm_manager.pasid_lock);
- vm = idr_find(&adev->vm_manager.pasid_idr, entry->pasid);
- if (!vm) {
- /* VM not found, process it normally */
- spin_unlock(&adev->vm_manager.pasid_lock);
- return true;
- }
-
- key = AMDGPU_VM_FAULT(entry->pasid, addr);
- r = amdgpu_vm_add_fault(vm->fault_hash, key);
-
- /* Hash table is full or the fault is already being processed,
- * ignore further page faults
- */
- if (r != 0) {
- spin_unlock(&adev->vm_manager.pasid_lock);
- return false;
- }
- /* No locking required with single writer and single reader */
- r = kfifo_put(&vm->faults, key);
- if (!r) {
- /* FIFO is full. Ignore it until there is space */
- amdgpu_vm_clear_fault(vm->fault_hash, key);
- spin_unlock(&adev->vm_manager.pasid_lock);
- return false;
- }
-
- spin_unlock(&adev->vm_manager.pasid_lock);
- /* It's the first fault for this address, process it normally */
- return true;
-}
-
static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -312,9 +319,11 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
- if (!gmc_v9_0_prescreen_iv(adev, entry, addr))
+ if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
+ entry->timestamp))
return 1; /* This also prevents sending it to KFD */
+ /* If it's the first fault for this address, process it normally */
if (!amdgpu_sriov_vf(adev)) {
status = RREG32(hub->vm_l2_pro_fault_status);
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
@@ -350,10 +359,19 @@ static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
.process = gmc_v9_0_process_interrupt,
};
+
+static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
+ .set = gmc_v9_0_ecc_interrupt_state,
+ .process = gmc_v9_0_process_ecc_irq,
+};
+
static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->gmc.vm_fault.num_types = 1;
adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
+
+ adev->gmc.ecc_irq.num_types = 1;
+ adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
}
static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
@@ -466,64 +484,37 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
amdgpu_ring_emit_wreg(ring, reg, pasid);
}
-/**
- * gmc_v9_0_set_pte_pde - update the page tables using MMIO
- *
- * @adev: amdgpu_device pointer
- * @cpu_pt_addr: cpu address of the page table
- * @gpu_page_idx: entry in the page table to update
- * @addr: dst addr to write into pte/pde
- * @flags: access flags
+/*
+ * PTE format on VEGA 10:
+ * 63:59 reserved
+ * 58:57 mtype
+ * 56 F
+ * 55 L
+ * 54 P
+ * 53 SW
+ * 52 T
+ * 50:48 reserved
+ * 47:12 4k physical page base address
+ * 11:7 fragment
+ * 6 write
+ * 5 read
+ * 4 exe
+ * 3 Z
+ * 2 snooped
+ * 1 system
+ * 0 valid
*
- * Update the page tables using the CPU.
+ * PDE format on VEGA 10:
+ * 63:59 block fragment size
+ * 58:55 reserved
+ * 54 P
+ * 53:48 reserved
+ * 47:6 physical base address of PD or PTE
+ * 5:3 reserved
+ * 2 C
+ * 1 system
+ * 0 valid
*/
-static int gmc_v9_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
- uint32_t gpu_page_idx, uint64_t addr,
- uint64_t flags)
-{
- void __iomem *ptr = (void *)cpu_pt_addr;
- uint64_t value;
-
- /*
- * PTE format on VEGA 10:
- * 63:59 reserved
- * 58:57 mtype
- * 56 F
- * 55 L
- * 54 P
- * 53 SW
- * 52 T
- * 50:48 reserved
- * 47:12 4k physical page base address
- * 11:7 fragment
- * 6 write
- * 5 read
- * 4 exe
- * 3 Z
- * 2 snooped
- * 1 system
- * 0 valid
- *
- * PDE format on VEGA 10:
- * 63:59 block fragment size
- * 58:55 reserved
- * 54 P
- * 53:48 reserved
- * 47:6 physical base address of PD or PTE
- * 5:3 reserved
- * 2 C
- * 1 system
- * 0 valid
- */
-
- /*
- * The following is for PTE only. GART does not have PDEs.
- */
- value = addr & 0x0000FFFFFFFFF000ULL;
- value |= flags;
- writeq(value, ptr + (gpu_page_idx * 8));
- return 0;
-}
static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
uint32_t flags)
@@ -593,7 +584,6 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
- .set_pte_pde = gmc_v9_0_set_pte_pde,
.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
.get_vm_pde = gmc_v9_0_get_vm_pde
};
@@ -620,85 +610,6 @@ static int gmc_v9_0_early_init(void *handle)
return 0;
}
-static int gmc_v9_0_ecc_available(struct amdgpu_device *adev)
-{
- uint32_t reg_val;
- uint32_t reg_addr;
- uint32_t field_val;
- size_t i;
- uint32_t fv2;
- size_t lost_sheep;
-
- DRM_DEBUG("ecc: gmc_v9_0_ecc_available()\n");
-
- lost_sheep = 0;
- for (i = 0; i < ARRAY_SIZE(ecc_umclocalcap_addrs); ++i) {
- reg_addr = ecc_umclocalcap_addrs[i];
- DRM_DEBUG("ecc: "
- "UMCCH_UmcLocalCap[%zu]: reg_addr: 0x%08x\n",
- i, reg_addr);
- reg_val = RREG32(reg_addr);
- field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UmcLocalCap,
- EccDis);
- DRM_DEBUG("ecc: "
- "reg_val: 0x%08x, "
- "EccDis: 0x%08x, ",
- reg_val, field_val);
- if (field_val) {
- DRM_ERROR("ecc: UmcLocalCap:EccDis is set.\n");
- ++lost_sheep;
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(ecc_umcch_umc_config_addrs); ++i) {
- reg_addr = ecc_umcch_umc_config_addrs[i];
- DRM_DEBUG("ecc: "
- "UMCCH0_0_UMC_CONFIG[%zu]: reg_addr: 0x%08x",
- i, reg_addr);
- reg_val = RREG32(reg_addr);
- field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UMC_CONFIG,
- DramReady);
- DRM_DEBUG("ecc: "
- "reg_val: 0x%08x, "
- "DramReady: 0x%08x\n",
- reg_val, field_val);
-
- if (!field_val) {
- DRM_ERROR("ecc: UMC_CONFIG:DramReady is not set.\n");
- ++lost_sheep;
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(ecc_umcch_eccctrl_addrs); ++i) {
- reg_addr = ecc_umcch_eccctrl_addrs[i];
- DRM_DEBUG("ecc: "
- "UMCCH_EccCtrl[%zu]: reg_addr: 0x%08x, ",
- i, reg_addr);
- reg_val = RREG32(reg_addr);
- field_val = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
- WrEccEn);
- fv2 = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
- RdEccEn);
- DRM_DEBUG("ecc: "
- "reg_val: 0x%08x, "
- "WrEccEn: 0x%08x, "
- "RdEccEn: 0x%08x\n",
- reg_val, field_val, fv2);
-
- if (!field_val) {
- DRM_DEBUG("ecc: WrEccEn is not set\n");
- ++lost_sheep;
- }
- if (!fv2) {
- DRM_DEBUG("ecc: RdEccEn is not set\n");
- ++lost_sheep;
- }
- }
-
- DRM_DEBUG("ecc: lost_sheep: %zu\n", lost_sheep);
- return lost_sheep == 0;
-}
-
static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
{
@@ -751,31 +662,119 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
return 0;
}
-static int gmc_v9_0_late_init(void *handle)
+static int gmc_v9_0_ecc_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct ras_common_if **ras_if = &adev->gmc.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = gmc_v9_0_process_ras_data_cb,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "umc_err_count",
+ .debugfs_name = "umc_err_inject",
+ };
+ struct ras_common_if ras_block = {
+ .block = AMDGPU_RAS_BLOCK__UMC,
+ .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
+ .sub_block_index = 0,
+ .name = "umc",
+ };
int r;
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
+ amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
+ return 0;
+ }
+ /* handle resume path. */
+ if (*ras_if)
+ goto resume;
+
+ *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
+ if (!*ras_if)
+ return -ENOMEM;
+
+ **ras_if = ras_block;
+
+ r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
+ if (r)
+ goto feature;
+
+ ih_info.head = **ras_if;
+ fs_info.head = **ras_if;
+
+ r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
+ if (r)
+ goto interrupt;
+
+ r = amdgpu_ras_debugfs_create(adev, &fs_info);
+ if (r)
+ goto debugfs;
+
+ r = amdgpu_ras_sysfs_create(adev, &fs_info);
+ if (r)
+ goto sysfs;
+resume:
+ r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
+ if (r)
+ goto irq;
+
+ return 0;
+irq:
+ amdgpu_ras_sysfs_remove(adev, *ras_if);
+sysfs:
+ amdgpu_ras_debugfs_remove(adev, *ras_if);
+debugfs:
+ amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
+interrupt:
+ amdgpu_ras_feature_enable(adev, *ras_if, 0);
+feature:
+ kfree(*ras_if);
+ *ras_if = NULL;
+ return -EINVAL;
+}
+
+
+static int gmc_v9_0_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool r;
+
if (!gmc_v9_0_keep_stolen_memory(adev))
amdgpu_bo_late_init(adev);
r = gmc_v9_0_allocate_vm_inv_eng(adev);
if (r)
return r;
+ /* Check if ecc is available */
+ if (!amdgpu_sriov_vf(adev)) {
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_VEGA20:
+ r = amdgpu_atomfirmware_mem_ecc_supported(adev);
+ if (!r) {
+ DRM_INFO("ECC is not present.\n");
+ if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
+ adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
+ } else {
+ DRM_INFO("ECC is active.\n");
+ }
- if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
- r = gmc_v9_0_ecc_available(adev);
- if (r == 1) {
- DRM_INFO("ECC is active.\n");
- } else if (r == 0) {
- DRM_INFO("ECC is not present.\n");
- adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
- } else {
- DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
- return r;
+ r = amdgpu_atomfirmware_sram_ecc_supported(adev);
+ if (!r) {
+ DRM_INFO("SRAM ECC is not present.\n");
+ } else {
+ DRM_INFO("SRAM ECC is active.\n");
+ }
+ break;
+ default:
+ break;
}
}
+ r = gmc_v9_0_ecc_late_init(handle);
+ if (r)
+ return r;
+
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
}
@@ -787,7 +786,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
base = mmhub_v1_0_get_fb_location(adev);
/* add the xgmi offset of the physical node */
base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
- amdgpu_gmc_vram_location(adev, &adev->gmc, base);
+ amdgpu_gmc_vram_location(adev, mc, base);
amdgpu_gmc_gart_location(adev, mc);
if (!amdgpu_sriov_vf(adev))
amdgpu_gmc_agp_location(adev, mc);
@@ -987,6 +986,12 @@ static int gmc_v9_0_sw_init(void *handle)
if (r)
return r;
+ /* interrupt sent to DF. */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
+ &adev->gmc.ecc_irq);
+ if (r)
+ return r;
+
/* Set the internal MC address mask
* This is the max address of the GPU's
* internal address space.
@@ -1011,7 +1016,7 @@ static int gmc_v9_0_sw_init(void *handle)
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
}
- adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
+ adev->need_swiotlb = drm_need_swiotlb(dma_bits);
if (adev->gmc.xgmi.supported) {
r = gfxhub_v1_1_get_xgmi_info(adev);
@@ -1052,6 +1057,22 @@ static int gmc_v9_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
+ adev->gmc.ras_if) {
+ struct ras_common_if *ras_if = adev->gmc.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ };
+
+ /*remove fs first*/
+ amdgpu_ras_debugfs_remove(adev, ras_if);
+ amdgpu_ras_sysfs_remove(adev, ras_if);
+ /*remove the IH*/
+ amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
+ amdgpu_ras_feature_enable(adev, ras_if, 0);
+ kfree(ras_if);
+ }
+
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
@@ -1198,6 +1219,7 @@ static int gmc_v9_0_hw_fini(void *handle)
return 0;
}
+ amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v9_0_gart_disable(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 0c9a2c03504e..f2e6b148ccad 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2824,7 +2824,7 @@ static int kv_dpm_init(struct amdgpu_device *adev)
pi->caps_tcp_ramping = true;
}
- if (adev->powerplay.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
+ if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
pi->caps_sclk_ds = true;
else
pi->caps_sclk_ds = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 1696644ec022..41a9a5779623 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -163,7 +163,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
/* XXX for emulation, Refer to closed source code.*/
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
0);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
@@ -256,7 +256,7 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 1);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i, tmp);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 73851ebb3833..2471e7cf75ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
xgpu_ai_mailbox_set_valid(adev, false);
}
+static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf)
+{
+ int r = 0;
+ u32 req, val, size;
+
+ if (!amdgim_is_hwperf(adev) || buf == NULL)
+ return -EBADRQC;
+
+ switch(type) {
+ case PP_SCLK:
+ req = IDH_IRQ_GET_PP_SCLK;
+ break;
+ case PP_MCLK:
+ req = IDH_IRQ_GET_PP_MCLK;
+ break;
+ default:
+ return -EBADRQC;
+ }
+
+ mutex_lock(&adev->virt.dpm_mutex);
+
+ xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
+
+ r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
+ if (!r && adev->fw_vram_usage.va != NULL) {
+ val = RREG32_NO_KIQ(
+ SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
+ size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
+ val), PAGE_SIZE);
+
+ if (size < PAGE_SIZE)
+ strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
+ else
+ size = 0;
+
+ r = size;
+ goto out;
+ }
+
+ r = xgpu_ai_poll_msg(adev, IDH_FAIL);
+ if(r)
+ pr_info("%s DPM request failed",
+ (type == PP_SCLK)? "SCLK" : "MCLK");
+
+out:
+ mutex_unlock(&adev->virt.dpm_mutex);
+ return r;
+}
+
+static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level)
+{
+ int r = 0;
+ u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
+
+ if (!amdgim_is_hwperf(adev))
+ return -EBADRQC;
+
+ mutex_lock(&adev->virt.dpm_mutex);
+ xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
+
+ r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
+ if (!r)
+ goto out;
+
+ r = xgpu_ai_poll_msg(adev, IDH_FAIL);
+ if (!r)
+ pr_info("DPM request failed");
+ else
+ pr_info("Mailbox is broken");
+
+out:
+ mutex_unlock(&adev->virt.dpm_mutex);
+ return r;
+}
+
static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
enum idh_request req)
{
@@ -296,6 +372,9 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
if (amdgpu_sriov_runtime(adev))
schedule_work(&adev->virt.flr_work);
break;
+ case IDH_QUERY_ALIVE:
+ xgpu_ai_mailbox_send_ack(adev);
+ break;
/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
* it byfar since that polling thread will handle it,
* other msg like flr complete is not handled here.
@@ -375,4 +454,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.reset_gpu = xgpu_ai_request_reset,
.wait_reset = NULL,
.trans_msg = xgpu_ai_mailbox_trans_msg,
+ .get_pp_clk = xgpu_ai_get_pp_clk,
+ .force_dpm_level = xgpu_ai_force_dpm_level,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index b4a9ceea334b..077e91a33d62 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -35,6 +35,10 @@ enum idh_request {
IDH_REL_GPU_FINI_ACCESS,
IDH_REQ_GPU_RESET_ACCESS,
+ IDH_IRQ_FORCE_DPM_LEVEL = 10,
+ IDH_IRQ_GET_PP_SCLK,
+ IDH_IRQ_GET_PP_MCLK,
+
IDH_LOG_VF_ERROR = 200,
};
@@ -43,6 +47,9 @@ enum idh_event {
IDH_READY_TO_ACCESS_GPU,
IDH_FLR_NOTIFICATION,
IDH_FLR_NOTIFICATION_CMPL,
+ IDH_SUCCESS,
+ IDH_FAIL,
+ IDH_QUERY_ALIVE,
IDH_EVENT_MAX
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index 6a0fcd67662a..aef9d059ae52 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -515,7 +515,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
/* wait until RCV_MSG become 3 */
if (xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
- pr_err("failed to recieve FLR_CMPL\n");
+ pr_err("failed to receive FLR_CMPL\n");
return;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index cc967dbfd631..6590143c3f75 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -118,7 +118,8 @@ static void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
if (use_doorbell) {
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
- ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2);
+ ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
+ BIF_IH_DOORBELL_RANGE, SIZE, 6);
} else
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index f3a7d207af07..2f79765b4bdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -43,6 +43,7 @@ enum psp_gfx_crtl_cmd_id
GFX_CTRL_CMD_ID_ENABLE_INT = 0x00050000, /* enable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */
+ GFX_CTRL_CMD_ID_GBR_IH_SET = 0x00080000, /* set Gbr IH_RB_CNTL registers */
GFX_CTRL_CMD_ID_CONSUME_CMD = 0x000A0000, /* send interrupt to psp for updating write pointer of vf */
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 860b70d80d3c..b91df7bd1d98 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -33,6 +33,9 @@
#include "sdma0/sdma0_4_0_offset.h"
#include "nbio/nbio_7_4_offset.h"
+#include "oss/osssys_4_0_offset.h"
+#include "oss/osssys_4_0_sh_mask.h"
+
MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
MODULE_FIRMWARE("amdgpu/vega20_asd.bin");
MODULE_FIRMWARE("amdgpu/vega20_ta.bin");
@@ -113,6 +116,13 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
+ adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
+
+ adev->psp.ta_ras_ucode_version = le32_to_cpu(ta_hdr->ta_ras_ucode_version);
+ adev->psp.ta_ras_ucode_size = le32_to_cpu(ta_hdr->ta_ras_size_bytes);
+ adev->psp.ta_ras_start_addr = (uint8_t *)adev->psp.ta_xgmi_start_addr +
+ le32_to_cpu(ta_hdr->ta_ras_offset_bytes);
}
return 0;
@@ -217,6 +227,37 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
return ret;
}
+static void psp_v11_0_reroute_ih(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t tmp;
+
+ /* Change IH ring for VMC */
+ tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
+
+ mdelay(20);
+ psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+
+ /* Change IH ring for UMC */
+ tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
+
+ mdelay(20);
+ psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+}
+
static int psp_v11_0_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -224,6 +265,8 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
+ psp_v11_0_reroute_ih(psp);
+
ring = &psp->km_ring;
ring->ring_type = ring_type;
@@ -631,7 +674,7 @@ static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp,
for (i = 0; i < topology_info_input->num_nodes; i++) {
topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
- topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
+ topology_info_input->nodes[i].is_sharing_enabled = 1;
topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
}
@@ -679,6 +722,54 @@ static int psp_v11_0_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id
return 0;
}
+static int psp_v11_0_ras_trigger_error(struct psp_context *psp,
+ struct ta_ras_trigger_error_input *info)
+{
+ struct ta_ras_shared_memory *ras_cmd;
+ int ret;
+
+ if (!psp->ras.ras_initialized)
+ return -EINVAL;
+
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+ memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+
+ ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
+ ras_cmd->ras_in_message.trigger_error = *info;
+
+ ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
+ if (ret)
+ return -EINVAL;
+
+ return ras_cmd->ras_status;
+}
+
+static int psp_v11_0_ras_cure_posion(struct psp_context *psp, uint64_t *mode_ptr)
+{
+#if 0
+ // not support yet.
+ struct ta_ras_shared_memory *ras_cmd;
+ int ret;
+
+ if (!psp->ras.ras_initialized)
+ return -EINVAL;
+
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+ memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+
+ ras_cmd->cmd_id = TA_RAS_COMMAND__CURE_POISON;
+ ras_cmd->ras_in_message.cure_poison.mode_ptr = mode_ptr;
+
+ ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
+ if (ret)
+ return -EINVAL;
+
+ return ras_cmd->ras_status;
+#else
+ return -EINVAL;
+#endif
+}
+
static const struct psp_funcs psp_v11_0_funcs = {
.init_microcode = psp_v11_0_init_microcode,
.bootloader_load_sysdrv = psp_v11_0_bootloader_load_sysdrv,
@@ -695,6 +786,8 @@ static const struct psp_funcs psp_v11_0_funcs = {
.xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id,
.xgmi_get_node_id = psp_v11_0_xgmi_get_node_id,
.support_vmr_ring = psp_v11_0_support_vmr_ring,
+ .ras_trigger_error = psp_v11_0_ras_trigger_error,
+ .ras_cure_posion = psp_v11_0_ras_cure_posion,
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 0487e3a4e9e7..143f0fae69d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -37,6 +37,9 @@
#include "sdma0/sdma0_4_0_offset.h"
#include "nbio/nbio_6_1_offset.h"
+#include "oss/osssys_4_0_offset.h"
+#include "oss/osssys_4_0_sh_mask.h"
+
MODULE_FIRMWARE("amdgpu/vega10_sos.bin");
MODULE_FIRMWARE("amdgpu/vega10_asd.bin");
MODULE_FIRMWARE("amdgpu/vega12_sos.bin");
@@ -252,6 +255,37 @@ static int psp_v3_1_ring_init(struct psp_context *psp,
return 0;
}
+static void psp_v3_1_reroute_ih(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t tmp;
+
+ /* Change IH ring for VMC */
+ tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
+
+ mdelay(20);
+ psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+
+ /* Change IH ring for UMC */
+ tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
+
+ mdelay(20);
+ psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+}
+
static int psp_v3_1_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -260,6 +294,8 @@ static int psp_v3_1_ring_create(struct psp_context *psp,
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
+ psp_v3_1_reroute_ih(psp);
+
/* Write low address of the ring to C2PMSG_69 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index cca3552b36ed..36196372e8db 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -870,8 +870,8 @@ static int sdma_v2_4_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 :
- AMDGPU_SDMA_IRQ_TRAP1);
+ AMDGPU_SDMA_IRQ_INSTANCE0 :
+ AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
}
@@ -1006,7 +1006,7 @@ static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
switch (type) {
- case AMDGPU_SDMA_IRQ_TRAP0:
+ case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
@@ -1022,7 +1022,7 @@ static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev,
break;
}
break;
- case AMDGPU_SDMA_IRQ_TRAP1:
+ case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 0ce8331baeb2..6d39544e7829 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1154,8 +1154,8 @@ static int sdma_v3_0_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 :
- AMDGPU_SDMA_IRQ_TRAP1);
+ AMDGPU_SDMA_IRQ_INSTANCE0 :
+ AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
}
@@ -1340,7 +1340,7 @@ static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
switch (type) {
- case AMDGPU_SDMA_IRQ_TRAP0:
+ case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
@@ -1356,7 +1356,7 @@ static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev,
break;
}
break;
- case AMDGPU_SDMA_IRQ_TRAP1:
+ case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index c816e55d43a9..9c88ce513d78 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -41,6 +41,8 @@
#include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
+#include "amdgpu_ras.h"
+
MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
MODULE_FIRMWARE("amdgpu/vega12_sdma.bin");
@@ -154,7 +156,6 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
- SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xFE000000, 0x00000000),
};
static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
@@ -184,7 +185,6 @@ static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xFE000000, 0x00000000),
};
static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
@@ -849,7 +849,7 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL);
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
SDMA0_GFX_RB_WPTR_POLL_CNTL,
- F32_POLL_ENABLE, amdgpu_sriov_vf(adev));
+ F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
/* enable DMA RB */
@@ -940,7 +940,7 @@ static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL);
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
SDMA0_PAGE_RB_WPTR_POLL_CNTL,
- F32_POLL_ENABLE, amdgpu_sriov_vf(adev));
+ F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
/* enable DMA RB */
@@ -1493,6 +1493,87 @@ static int sdma_v4_0_early_init(void *handle)
return 0;
}
+static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry);
+
+static int sdma_v4_0_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct ras_common_if **ras_if = &adev->sdma.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = sdma_v4_0_process_ras_data_cb,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "sdma_err_count",
+ .debugfs_name = "sdma_err_inject",
+ };
+ struct ras_common_if ras_block = {
+ .block = AMDGPU_RAS_BLOCK__SDMA,
+ .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
+ .sub_block_index = 0,
+ .name = "sdma",
+ };
+ int r;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+ amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
+ return 0;
+ }
+
+ /* handle resume path. */
+ if (*ras_if)
+ goto resume;
+
+ *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
+ if (!*ras_if)
+ return -ENOMEM;
+
+ **ras_if = ras_block;
+
+ r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
+ if (r)
+ goto feature;
+
+ ih_info.head = **ras_if;
+ fs_info.head = **ras_if;
+
+ r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
+ if (r)
+ goto interrupt;
+
+ r = amdgpu_ras_debugfs_create(adev, &fs_info);
+ if (r)
+ goto debugfs;
+
+ r = amdgpu_ras_sysfs_create(adev, &fs_info);
+ if (r)
+ goto sysfs;
+resume:
+ r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0);
+ if (r)
+ goto irq;
+
+ r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE1);
+ if (r) {
+ amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0);
+ goto irq;
+ }
+
+ return 0;
+irq:
+ amdgpu_ras_sysfs_remove(adev, *ras_if);
+sysfs:
+ amdgpu_ras_debugfs_remove(adev, *ras_if);
+debugfs:
+ amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
+interrupt:
+ amdgpu_ras_feature_enable(adev, *ras_if, 0);
+feature:
+ kfree(*ras_if);
+ *ras_if = NULL;
+ return -EINVAL;
+}
+
static int sdma_v4_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
@@ -1511,6 +1592,18 @@ static int sdma_v4_0_sw_init(void *handle)
if (r)
return r;
+ /* SDMA SRAM ECC event */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, SDMA0_4_0__SRCID__SDMA_SRAM_ECC,
+ &adev->sdma.ecc_irq);
+ if (r)
+ return r;
+
+ /* SDMA SRAM ECC event */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, SDMA1_4_0__SRCID__SDMA_SRAM_ECC,
+ &adev->sdma.ecc_irq);
+ if (r)
+ return r;
+
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
@@ -1526,8 +1619,8 @@ static int sdma_v4_0_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 :
- AMDGPU_SDMA_IRQ_TRAP1);
+ AMDGPU_SDMA_IRQ_INSTANCE0 :
+ AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
@@ -1546,8 +1639,8 @@ static int sdma_v4_0_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 :
- AMDGPU_SDMA_IRQ_TRAP1);
+ AMDGPU_SDMA_IRQ_INSTANCE0 :
+ AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
}
@@ -1561,6 +1654,22 @@ static int sdma_v4_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) &&
+ adev->sdma.ras_if) {
+ struct ras_common_if *ras_if = adev->sdma.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ };
+
+ /*remove fs first*/
+ amdgpu_ras_debugfs_remove(adev, ras_if);
+ amdgpu_ras_sysfs_remove(adev, ras_if);
+ /*remove the IH*/
+ amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
+ amdgpu_ras_feature_enable(adev, ras_if, 0);
+ kfree(ras_if);
+ }
+
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
if (adev->sdma.has_page_queue)
@@ -1598,6 +1707,9 @@ static int sdma_v4_0_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev))
return 0;
+ amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0);
+ amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE1);
+
sdma_v4_0_ctx_switch_enable(adev, false);
sdma_v4_0_enable(adev, false);
@@ -1666,13 +1778,12 @@ static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- unsigned int instance = (type == AMDGPU_SDMA_IRQ_TRAP0) ? 0 : 1;
u32 sdma_cntl;
- sdma_cntl = RREG32_SDMA(instance, mmSDMA0_CNTL);
+ sdma_cntl = RREG32_SDMA(type, mmSDMA0_CNTL);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
- WREG32_SDMA(instance, mmSDMA0_CNTL, sdma_cntl);
+ WREG32_SDMA(type, mmSDMA0_CNTL, sdma_cntl);
return 0;
}
@@ -1714,6 +1825,58 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
return 0;
}
+static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t instance, err_source;
+
+ switch (entry->client_id) {
+ case SOC15_IH_CLIENTID_SDMA0:
+ instance = 0;
+ break;
+ case SOC15_IH_CLIENTID_SDMA1:
+ instance = 1;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (entry->src_id) {
+ case SDMA0_4_0__SRCID__SDMA_SRAM_ECC:
+ err_source = 0;
+ break;
+ case SDMA0_4_0__SRCID__SDMA_ECC:
+ err_source = 1;
+ break;
+ default:
+ return 0;
+ }
+
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+
+ amdgpu_ras_reset_gpu(adev, 0);
+
+ return AMDGPU_RAS_UE;
+}
+
+static int sdma_v4_0_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->sdma.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
+
static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -1741,6 +1904,25 @@ static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
return 0;
}
+static int sdma_v4_0_set_ecc_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 sdma_edc_config;
+
+ u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
+ sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_EDC_CONFIG) :
+ sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_EDC_CONFIG);
+
+ sdma_edc_config = RREG32(reg_offset);
+ sdma_edc_config = REG_SET_FIELD(sdma_edc_config, SDMA0_EDC_CONFIG, ECC_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32(reg_offset, sdma_edc_config);
+
+ return 0;
+}
+
static void sdma_v4_0_update_medium_grain_clock_gating(
struct amdgpu_device *adev,
bool enable)
@@ -1906,7 +2088,7 @@ static void sdma_v4_0_get_clockgating_state(void *handle, u32 *flags)
const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
.name = "sdma_v4_0",
.early_init = sdma_v4_0_early_init,
- .late_init = NULL,
+ .late_init = sdma_v4_0_late_init,
.sw_init = sdma_v4_0_sw_init,
.sw_fini = sdma_v4_0_sw_fini,
.hw_init = sdma_v4_0_hw_init,
@@ -2008,11 +2190,20 @@ static const struct amdgpu_irq_src_funcs sdma_v4_0_illegal_inst_irq_funcs = {
.process = sdma_v4_0_process_illegal_inst_irq,
};
+static const struct amdgpu_irq_src_funcs sdma_v4_0_ecc_irq_funcs = {
+ .set = sdma_v4_0_set_ecc_irq_state,
+ .process = sdma_v4_0_process_ecc_irq,
+};
+
+
+
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
adev->sdma.trap_irq.funcs = &sdma_v4_0_trap_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v4_0_illegal_inst_irq_funcs;
+ adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+ adev->sdma.ecc_irq.funcs = &sdma_v4_0_ecc_irq_funcs;
}
/**
@@ -2077,8 +2268,8 @@ static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
{
adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
- if (adev->sdma.has_page_queue)
- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
+ if (adev->sdma.has_page_queue && adev->sdma.num_instances > 1)
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[1].page;
else
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
@@ -2097,15 +2288,22 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
unsigned i;
adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (adev->sdma.has_page_queue)
+ if (adev->sdma.has_page_queue && adev->sdma.num_instances > 1) {
+ for (i = 1; i < adev->sdma.num_instances; i++) {
sched = &adev->sdma.instance[i].page.sched;
- else
+ adev->vm_manager.vm_pte_rqs[i - 1] =
+ &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ }
+ adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances - 1;
+ adev->vm_manager.page_fault = &adev->sdma.instance[0].page;
+ } else {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ adev->vm_manager.vm_pte_rqs[i] =
+ &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ }
+ adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
}
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index f15f196684ba..3eeefd40dae0 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -503,8 +503,8 @@ static int si_dma_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 :
- AMDGPU_SDMA_IRQ_TRAP1);
+ AMDGPU_SDMA_IRQ_INSTANCE0 :
+ AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
}
@@ -591,7 +591,7 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
switch (type) {
- case AMDGPU_SDMA_IRQ_TRAP0:
+ case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
@@ -607,7 +607,7 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
break;
}
break;
- case AMDGPU_SDMA_IRQ_TRAP1:
+ case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 41e01a7f57a4..d57e75e5c71f 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -4098,14 +4098,13 @@ static int si_notify_smc_display_change(struct amdgpu_device *adev,
static void si_program_response_times(struct amdgpu_device *adev)
{
- u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out;
+ u32 voltage_response_time, acpi_delay_time, vbi_time_out;
u32 vddc_dly, acpi_dly, vbi_dly;
u32 reference_clock;
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
voltage_response_time = (u32)adev->pm.dpm.voltage_response_time;
- backbias_response_time = (u32)adev->pm.dpm.backbias_response_time;
if (voltage_response_time == 0)
voltage_response_time = 1000;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index ed89a101f73f..4900e4958dec 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -63,6 +63,7 @@
#include "vcn_v1_0.h"
#include "dce_virtual.h"
#include "mxgpu_ai.h"
+#include "amdgpu_smu.h"
#define mmMP0_MISC_CGTT_CTRL0 0x01b9
#define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
@@ -392,6 +393,7 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
{
u32 i;
+ int ret = 0;
amdgpu_atombios_scratch_regs_engine_hung(adev, true);
@@ -402,7 +404,9 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
pci_save_state(adev->pdev);
- psp_gpu_reset(adev);
+ ret = psp_gpu_reset(adev);
+ if (ret)
+ dev_err(adev->dev, "GPU mode1 reset failed\n");
pci_restore_state(adev->pdev);
@@ -417,7 +421,7 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
- return 0;
+ return ret;
}
static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
@@ -451,6 +455,8 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev)
dev_info(adev->dev, "GPU BACO reset\n");
+ adev->in_baco_reset = 1;
+
return 0;
}
@@ -461,8 +467,15 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
+ case CHIP_VEGA12:
soc15_asic_get_baco_capability(adev, &baco_reset);
break;
+ case CHIP_VEGA20:
+ if (adev->psp.sos_fw_version >= 0x80067)
+ soc15_asic_get_baco_capability(adev, &baco_reset);
+ else
+ baco_reset = false;
+ break;
default:
baco_reset = false;
break;
@@ -602,8 +615,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
}
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
- if (!amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ if (!amdgpu_sriov_vf(adev)) {
+ if (is_support_sw_smu(adev))
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ }
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
@@ -884,7 +901,8 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
} else if (adev->pdev->device == 0x15d8) {
- adev->cg_flags = AMD_CG_SUPPORT_GFX_MGLS |
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
@@ -927,7 +945,7 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
}
- if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_RLC_SMU_HS;
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
new file mode 100644
index 000000000000..0b4e7b55595a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
@@ -0,0 +1,108 @@
+/****************************************************************************\
+*
+* File Name ta_ras_if.h
+* Project AMD PSP SW IP Module
+*
+* Description Interface to the RAS Trusted Application
+*
+* Copyright 2019 Advanced Micro Devices, Inc.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a copy of this software
+* and associated documentation files (the "Software"), to deal in the Software without restriction,
+* including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
+* and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
+* subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in all copies or substantial
+* portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+* OTHER DEALINGS IN THE SOFTWARE.
+*/
+#ifndef _TA_RAS_IF_H
+#define _TA_RAS_IF_H
+
+/* Responses have bit 31 set */
+#define RSP_ID_MASK (1U << 31)
+#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
+
+#define TA_NUM_BLOCK_MAX 14
+
+enum ras_command {
+ TA_RAS_COMMAND__ENABLE_FEATURES = 0,
+ TA_RAS_COMMAND__DISABLE_FEATURES,
+ TA_RAS_COMMAND__TRIGGER_ERROR,
+};
+
+enum ta_ras_status {
+ TA_RAS_STATUS__SUCCESS = 0x00,
+ TA_RAS_STATUS__RESET_NEEDED = 0x01,
+ TA_RAS_STATUS__ERROR_INVALID_PARAMETER = 0x02,
+ TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE = 0x03,
+ TA_RAS_STATUS__ERROR_RAS_DUPLICATE_CMD = 0x04,
+ TA_RAS_STATUS__ERROR_INJECTION_FAILED = 0x05
+};
+
+enum ta_ras_block {
+ TA_RAS_BLOCK__UMC = 0,
+ TA_RAS_BLOCK__SDMA,
+ TA_RAS_BLOCK__GFX,
+ TA_RAS_BLOCK__MMHUB,
+ TA_RAS_BLOCK__ATHUB,
+ TA_RAS_BLOCK__PCIE_BIF,
+ TA_RAS_BLOCK__HDP,
+ TA_RAS_BLOCK__XGMI_WAFL,
+ TA_RAS_BLOCK__DF,
+ TA_RAS_BLOCK__SMN,
+ TA_RAS_BLOCK__SEM,
+ TA_RAS_BLOCK__MP0,
+ TA_RAS_BLOCK__MP1,
+ TA_RAS_BLOCK__FUSE = (TA_NUM_BLOCK_MAX - 1),
+};
+
+enum ta_ras_error_type {
+ TA_RAS_ERROR__NONE = 0,
+ TA_RAS_ERROR__PARITY = 1,
+ TA_RAS_ERROR__SINGLE_CORRECTABLE = 2,
+ TA_RAS_ERROR__MULTI_UNCORRECTABLE = 4,
+ TA_RAS_ERROR__POISON = 8
+};
+
+struct ta_ras_enable_features_input {
+ enum ta_ras_block block_id;
+ enum ta_ras_error_type error_type;
+};
+
+struct ta_ras_disable_features_input {
+ enum ta_ras_block block_id;
+ enum ta_ras_error_type error_type;
+};
+
+struct ta_ras_trigger_error_input {
+ enum ta_ras_block block_id;
+ enum ta_ras_error_type inject_error_type;
+ uint32_t sub_block_index;
+ uint64_t address;
+ uint64_t value;
+};
+
+union ta_ras_cmd_input {
+ struct ta_ras_enable_features_input enable_features;
+ struct ta_ras_disable_features_input disable_features;
+ struct ta_ras_trigger_error_input trigger_error;
+};
+
+struct ta_ras_shared_memory {
+ uint32_t cmd_id;
+ uint32_t resp_id;
+ enum ta_ras_status ras_status;
+ uint32_t reserved;
+ union ta_ras_cmd_input ras_in_message;
+};
+
+#endif // TL_RAS_IF_H_
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index dc461df48da0..2191d3d0a219 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -787,10 +787,13 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
0xFFFFFFFF, 0x00000004);
/* mc resume*/
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
- lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
- upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
offset = 0;
} else {
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
@@ -798,10 +801,11 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->uvd.inst[i].gpu_addr));
offset = size;
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+
}
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
- AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index bed78a778e3f..40363ca6c5f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -283,7 +283,7 @@ static int vce_v2_0_stop(struct amdgpu_device *adev)
}
if (vce_v2_0_wait_for_idle(adev)) {
- DRM_INFO("VCE is busy, Can't set clock gateing");
+ DRM_INFO("VCE is busy, Can't set clock gating");
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index aadc3e66ebd7..c0ec27991c22 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -244,13 +244,18 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
+ offset = AMDGPU_VCE_FIRMWARE_OFFSET;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ uint32_t low = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_lo;
+ uint32_t hi = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_hi;
+ uint64_t tmr_mc_addr = (uint64_t)(hi) << 32 | low;
+
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
- mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
+ mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), tmr_mc_addr >> 8);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
- (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 40) & 0xff);
+ (tmr_mc_addr >> 40) & 0xff);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), 0);
} else {
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
@@ -258,6 +263,9 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
(adev->vce.gpu_addr >> 40) & 0xff);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
+ offset & ~0x0f000000);
+
}
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
@@ -272,10 +280,7 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
mmVCE_LMI_VCPU_CACHE_64BIT_BAR2),
(adev->vce.gpu_addr >> 40) & 0xff);
- offset = AMDGPU_VCE_FIRMWARE_OFFSET;
size = VCE_V4_0_FW_SIZE;
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
- offset & ~0x0f000000);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0;
@@ -382,6 +387,7 @@ static int vce_v4_0_start(struct amdgpu_device *adev)
static int vce_v4_0_stop(struct amdgpu_device *adev)
{
+ /* Disable VCPU */
WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 0, ~0x200001);
/* hold on ECPU */
@@ -389,8 +395,8 @@ static int vce_v4_0_stop(struct amdgpu_device *adev)
VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- /* clear BUSY flag */
- WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0, ~VCE_STATUS__JOB_BUSY_MASK);
+ /* clear VCE_STATUS */
+ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0);
/* Set Clock-Gating off */
/* if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
@@ -922,6 +928,7 @@ static int vce_v4_0_set_clockgating_state(void *handle,
return 0;
}
+#endif
static int vce_v4_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
@@ -935,16 +942,11 @@ static int vce_v4_0_set_powergating_state(void *handle,
*/
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
- return 0;
-
if (state == AMD_PG_STATE_GATE)
- /* XXX do we need a vce_v4_0_stop()? */
- return 0;
+ return vce_v4_0_stop(adev);
else
return vce_v4_0_start(adev);
}
-#endif
static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
struct amdgpu_ib *ib, uint32_t flags)
@@ -1059,7 +1061,7 @@ const struct amd_ip_funcs vce_v4_0_ip_funcs = {
.soft_reset = NULL /* vce_v4_0_soft_reset */,
.post_soft_reset = NULL /* vce_v4_0_post_soft_reset */,
.set_clockgating_state = vce_v4_0_set_clockgating_state,
- .set_powergating_state = NULL /* vce_v4_0_set_powergating_state */,
+ .set_powergating_state = vce_v4_0_set_powergating_state,
};
static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 6d1f804277f8..8d89ab7f0ae8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -31,7 +31,7 @@
#include "soc15_common.h"
#include "vega10_ih.h"
-
+#define MAX_REARM_RETRY 10
static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
@@ -136,6 +136,25 @@ static uint32_t vega10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl
return ih_rb_cntl;
}
+static uint32_t vega10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
+{
+ u32 ih_doorbell_rtpr = 0;
+
+ if (ih->use_doorbell) {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR, OFFSET,
+ ih->doorbell_index);
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 1);
+ } else {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 0);
+ }
+ return ih_doorbell_rtpr;
+}
+
/**
* vega10_ih_irq_init - init and enable the interrupt ring
*
@@ -150,8 +169,8 @@ static uint32_t vega10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl
static int vega10_ih_irq_init(struct amdgpu_device *adev)
{
struct amdgpu_ih_ring *ih;
+ u32 ih_rb_cntl;
int ret = 0;
- u32 ih_rb_cntl, ih_doorbell_rtpr;
u32 tmp;
/* disable irqs */
@@ -177,23 +196,11 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
upper_32_bits(ih->wptr_addr) & 0xFFFF);
/* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
- ih_doorbell_rtpr = RREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR);
- if (adev->irq.ih.use_doorbell) {
- ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
- IH_DOORBELL_RPTR, OFFSET,
- adev->irq.ih.doorbell_index);
- ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
- IH_DOORBELL_RPTR,
- ENABLE, 1);
- } else {
- ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
- IH_DOORBELL_RPTR,
- ENABLE, 0);
- }
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
+ WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR,
+ vega10_ih_doorbell_rptr(ih));
ih = &adev->irq.ih1;
if (ih->ring_size) {
@@ -203,11 +210,18 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_OVERFLOW_ENABLE, 0);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ RB_FULL_DRAIN_ENABLE, 1);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
/* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
+
+ WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1,
+ vega10_ih_doorbell_rptr(ih));
}
ih = &adev->irq.ih2;
@@ -216,13 +230,16 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
(ih->gpu_addr >> 40) & 0xff);
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
/* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
+
+ WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2,
+ vega10_ih_doorbell_rptr(ih));
}
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
@@ -365,6 +382,38 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev,
}
/**
+ * vega10_ih_irq_rearm - rearm IRQ if lost
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+static void vega10_ih_irq_rearm(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ uint32_t reg_rptr = 0;
+ uint32_t v = 0;
+ uint32_t i = 0;
+
+ if (ih == &adev->irq.ih)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
+ else if (ih == &adev->irq.ih1)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
+ else if (ih == &adev->irq.ih2)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
+ else
+ return;
+
+ /* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */
+ for (i = 0; i < MAX_REARM_RETRY; i++) {
+ v = RREG32_NO_KIQ(reg_rptr);
+ if ((v < ih->ring_size) && (v != ih->rptr))
+ WDOORBELL32(ih->doorbell_index, ih->rptr);
+ else
+ break;
+ }
+}
+
+/**
* vega10_ih_set_rptr - set the IH ring buffer rptr
*
* @adev: amdgpu_device pointer
@@ -378,6 +427,9 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev,
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
WDOORBELL32(ih->doorbell_index, ih->rptr);
+
+ if (amdgpu_sriov_vf(adev))
+ vega10_ih_irq_rearm(adev, ih);
} else if (ih == &adev->irq.ih) {
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
} else if (ih == &adev->irq.ih1) {
@@ -449,20 +501,23 @@ static int vega10_ih_sw_init(void *handle)
if (r)
return r;
- if (adev->asic_type == CHIP_VEGA10) {
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
- if (r)
- return r;
-
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
- if (r)
- return r;
- }
-
- /* TODO add doorbell for IH1 & IH2 as well */
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ adev->irq.ih1.use_doorbell = true;
+ adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ adev->irq.ih2.use_doorbell = true;
+ adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+
r = amdgpu_irq_init(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index cf9a49f49d3a..c1e4d44d6137 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -467,6 +467,8 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
memset(&kfd->doorbell_available_index, 0,
sizeof(kfd->doorbell_available_index));
+ atomic_set(&kfd->sram_ecc_flag, 0);
+
return kfd;
}
@@ -492,9 +494,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
{
unsigned int size;
- kfd->mec_fw_version = kfd->kfd2kgd->get_fw_version(kfd->kgd,
+ kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
KGD_ENGINE_MEC1);
- kfd->sdma_fw_version = kfd->kfd2kgd->get_fw_version(kfd->kgd,
+ kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
KGD_ENGINE_SDMA1);
kfd->shared_resources = *gpu_resources;
@@ -662,6 +664,9 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd)
return ret;
count = atomic_dec_return(&kfd_locked);
WARN_ONCE(count != 0, "KFD reset ref. error");
+
+ atomic_set(&kfd->sram_ecc_flag, 0);
+
return 0;
}
@@ -1025,6 +1030,12 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
return 0;
}
+void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
+{
+ if (kfd)
+ atomic_inc(&kfd->sram_ecc_flag);
+}
+
#if defined(CONFIG_DEBUG_FS)
/* This function will send a package to HIQ to hang the HWS
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index e9f0e0a1b41c..6e1d41c5bf86 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -1011,25 +1011,41 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
void kfd_signal_reset_event(struct kfd_dev *dev)
{
struct kfd_hsa_hw_exception_data hw_exception_data;
+ struct kfd_hsa_memory_exception_data memory_exception_data;
struct kfd_process *p;
struct kfd_event *ev;
unsigned int temp;
uint32_t id, idx;
+ int reset_cause = atomic_read(&dev->sram_ecc_flag) ?
+ KFD_HW_EXCEPTION_ECC :
+ KFD_HW_EXCEPTION_GPU_HANG;
/* Whole gpu reset caused by GPU hang and memory is lost */
memset(&hw_exception_data, 0, sizeof(hw_exception_data));
hw_exception_data.gpu_id = dev->id;
hw_exception_data.memory_lost = 1;
+ hw_exception_data.reset_cause = reset_cause;
+
+ memset(&memory_exception_data, 0, sizeof(memory_exception_data));
+ memory_exception_data.ErrorType = KFD_MEM_ERR_SRAM_ECC;
+ memory_exception_data.gpu_id = dev->id;
+ memory_exception_data.failure.imprecise = true;
idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
mutex_lock(&p->event_mutex);
id = KFD_FIRST_NONSIGNAL_EVENT_ID;
- idr_for_each_entry_continue(&p->event_idr, ev, id)
+ idr_for_each_entry_continue(&p->event_idr, ev, id) {
if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
ev->hw_exception_data = hw_exception_data;
set_event(ev);
}
+ if (ev->type == KFD_EVENT_TYPE_MEMORY &&
+ reset_cause == KFD_HW_EXCEPTION_ECC) {
+ ev->memory_exception_data = memory_exception_data;
+ set_event(ev);
+ }
+ }
mutex_unlock(&p->event_mutex);
}
srcu_read_unlock(&kfd_processes_srcu, idx);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 0eeee3c6d6dc..9e0230965675 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -276,6 +276,9 @@ struct kfd_dev {
uint64_t hive_id;
bool pci_atomic_requested;
+
+ /* SRAM ECC flag */
+ atomic_t sram_ecc_flag;
};
enum kfd_mempool {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 09da91644f9f..769dbc7be8cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -37,6 +37,7 @@
#include "kfd_device_queue_manager.h"
#include "kfd_iommu.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_ras.h"
/* topology_device_list - Master list of all topology devices */
static struct list_head topology_device_list;
@@ -1197,6 +1198,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
void *crat_image = NULL;
size_t image_size = 0;
int proximity_domain;
+ struct amdgpu_ras *ctx;
INIT_LIST_HEAD(&temp_topology_device_list);
@@ -1270,8 +1272,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.vendor_id = gpu->pdev->vendor;
dev->node_props.device_id = gpu->pdev->device;
- dev->node_props.location_id = PCI_DEVID(gpu->pdev->bus->number,
- gpu->pdev->devfn);
+ dev->node_props.location_id = pci_dev_id(gpu->pdev);
dev->node_props.max_engine_clk_fcompute =
amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd);
dev->node_props.max_engine_clk_ccompute =
@@ -1328,6 +1329,20 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
}
+ ctx = amdgpu_ras_get_context((struct amdgpu_device *)(dev->gpu->kgd));
+ if (ctx) {
+ /* kfd only concerns sram ecc on GFX/SDMA and HBM ecc on UMC */
+ dev->node_props.capability |=
+ (((ctx->features & BIT(AMDGPU_RAS_BLOCK__SDMA)) != 0) ||
+ ((ctx->features & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0)) ?
+ HSA_CAP_SRAM_EDCSUPPORTED : 0;
+ dev->node_props.capability |= ((ctx->features & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ?
+ HSA_CAP_MEM_EDCSUPPORTED : 0;
+
+ dev->node_props.capability |= (ctx->features != 0) ?
+ HSA_CAP_RASEVENTNOTIFY : 0;
+ }
+
kfd_debug_print_topology();
if (!res)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 92a19be07344..84710cfd23c2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -48,6 +48,10 @@
#define HSA_CAP_DOORBELL_TYPE_2_0 0x2
#define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000
+#define HSA_CAP_SRAM_EDCSUPPORTED 0x00080000
+#define HSA_CAP_MEM_EDCSUPPORTED 0x00100000
+#define HSA_CAP_RASEVENTNOTIFY 0x00200000
+
struct kfd_node_properties {
uint64_t hive_id;
uint32_t cpu_cores_count;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 3082b55b1e77..995f9df66142 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -111,7 +111,8 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
- unsigned long possible_crtcs);
+ unsigned long possible_crtcs,
+ const struct dc_plane_cap *plane_cap);
static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
uint32_t link_index);
@@ -137,30 +138,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
static void handle_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state);
-
-
-static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
-};
-
-static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
-};
-
-static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
-};
-
/*
* dm_vblank_get_counter
*
@@ -275,12 +252,22 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev,
return NULL;
}
+static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
+{
+ return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
+ dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
+}
+
static void dm_pflip_high_irq(void *interrupt_params)
{
struct amdgpu_crtc *amdgpu_crtc;
struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev;
unsigned long flags;
+ struct drm_pending_vblank_event *e;
+ struct dm_crtc_state *acrtc_state;
+ uint32_t vpos, hpos, v_blank_start, v_blank_end;
+ bool vrr_active;
amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
@@ -303,26 +290,116 @@ static void dm_pflip_high_irq(void *interrupt_params)
return;
}
- /* Update to correct count(s) if racing with vblank irq */
- amdgpu_crtc->last_flip_vblank = drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
+ /* page flip completed. */
+ e = amdgpu_crtc->event;
+ amdgpu_crtc->event = NULL;
- /* wake up userspace */
- if (amdgpu_crtc->event) {
- drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
+ if (!e)
+ WARN_ON(1);
- /* page flip completed. clean up */
- amdgpu_crtc->event = NULL;
+ acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
+ vrr_active = amdgpu_dm_vrr_active(acrtc_state);
+
+ /* Fixed refresh rate, or VRR scanout position outside front-porch? */
+ if (!vrr_active ||
+ !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
+ &v_blank_end, &hpos, &vpos) ||
+ (vpos < v_blank_start)) {
+ /* Update to correct count and vblank timestamp if racing with
+ * vblank irq. This also updates to the correct vblank timestamp
+ * even in VRR mode, as scanout is past the front-porch atm.
+ */
+ drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
- } else
- WARN_ON(1);
+ /* Wake up userspace by sending the pageflip event with proper
+ * count and timestamp of vblank of flip completion.
+ */
+ if (e) {
+ drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
+
+ /* Event sent, so done with vblank for this flip */
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
+ }
+ } else if (e) {
+ /* VRR active and inside front-porch: vblank count and
+ * timestamp for pageflip event will only be up to date after
+ * drm_crtc_handle_vblank() has been executed from late vblank
+ * irq handler after start of back-porch (vline 0). We queue the
+ * pageflip event for send-out by drm_crtc_handle_vblank() with
+ * updated timestamp and count, once it runs after us.
+ *
+ * We need to open-code this instead of using the helper
+ * drm_crtc_arm_vblank_event(), as that helper would
+ * call drm_crtc_accurate_vblank_count(), which we must
+ * not call in VRR mode while we are in front-porch!
+ */
+
+ /* sequence will be replaced by real count during send-out. */
+ e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
+ e->pipe = amdgpu_crtc->crtc_id;
+
+ list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
+ e = NULL;
+ }
+
+ /* Keep track of vblank of this flip for flip throttling. We use the
+ * cooked hw counter, as that one incremented at start of this vblank
+ * of pageflip completion, so last_flip_vblank is the forbidden count
+ * for queueing new pageflips if vsync + VRR is enabled.
+ */
+ amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
+ amdgpu_crtc->crtc_id);
amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
- __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
+ DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
+ amdgpu_crtc->crtc_id, amdgpu_crtc,
+ vrr_active, (int) !e);
+}
+
+static void dm_vupdate_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct amdgpu_crtc *acrtc;
+ struct dm_crtc_state *acrtc_state;
+ unsigned long flags;
+
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
+
+ if (acrtc) {
+ acrtc_state = to_dm_crtc_state(acrtc->base.state);
+
+ DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
+ amdgpu_dm_vrr_active(acrtc_state));
- drm_crtc_vblank_put(&amdgpu_crtc->base);
+ /* Core vblank handling is done here after end of front-porch in
+ * vrr mode, as vblank timestamping will give valid results
+ * while now done after front-porch. This will also deliver
+ * page-flip completion events that have been queued to us
+ * if a pageflip happened inside front-porch.
+ */
+ if (amdgpu_dm_vrr_active(acrtc_state)) {
+ drm_crtc_handle_vblank(&acrtc->base);
+
+ /* BTR processing for pre-DCE12 ASICs */
+ if (acrtc_state->stream &&
+ adev->family < AMDGPU_FAMILY_AI) {
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ mod_freesync_handle_v_update(
+ adev->dm.freesync_module,
+ acrtc_state->stream,
+ &acrtc_state->vrr_params);
+
+ dc_stream_adjust_vmin_vmax(
+ adev->dm.dc,
+ acrtc_state->stream,
+ &acrtc_state->vrr_params.adjust);
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ }
+ }
+ }
}
static void dm_crtc_high_irq(void *interrupt_params)
@@ -331,18 +408,33 @@ static void dm_crtc_high_irq(void *interrupt_params)
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_crtc *acrtc;
struct dm_crtc_state *acrtc_state;
+ unsigned long flags;
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
if (acrtc) {
- drm_crtc_handle_vblank(&acrtc->base);
- amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
-
acrtc_state = to_dm_crtc_state(acrtc->base.state);
- if (acrtc_state->stream &&
+ DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
+ amdgpu_dm_vrr_active(acrtc_state));
+
+ /* Core vblank handling at start of front-porch is only possible
+ * in non-vrr mode, as only there vblank timestamping will give
+ * valid results while done in front-porch. Otherwise defer it
+ * to dm_vupdate_high_irq after end of front-porch.
+ */
+ if (!amdgpu_dm_vrr_active(acrtc_state))
+ drm_crtc_handle_vblank(&acrtc->base);
+
+ /* Following stuff must happen at start of vblank, for crc
+ * computation and below-the-range btr support in vrr mode.
+ */
+ amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
+
+ if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
acrtc_state->vrr_params.supported &&
acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
mod_freesync_handle_v_update(
adev->dm.freesync_module,
acrtc_state->stream,
@@ -352,6 +444,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
adev->dm.dc,
acrtc_state->stream,
&acrtc_state->vrr_params.adjust);
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
}
}
}
@@ -462,6 +555,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_feature_mask & DC_FBC_MASK)
init_data.flags.fbc_support = true;
+ init_data.flags.power_down_display_on_boot = true;
+
/* Display Core create. */
adev->dm.dc = dc_create(&init_data);
@@ -912,9 +1007,16 @@ static int dm_resume(void *handle)
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
struct dm_plane_state *dm_new_plane_state;
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
int i;
+ /* Recreate dc_state - DC invalidates it when setting power state to S3. */
+ dc_release_state(dm_state->context);
+ dm_state->context = dc_create_state(dm->dc);
+ /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
+ dc_resource_state_construct(dm->dc, dm_state->context);
+
/* power on hardware */
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
@@ -1457,6 +1559,27 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
dm_crtc_high_irq, c_irq_params);
}
+ /* Use VUPDATE interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
+ if (r) {
+ DRM_ERROR("Failed to add vupdate irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_vupdate_high_irq, c_irq_params);
+ }
+
/* Use GRPH_PFLIP interrupt */
for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
@@ -1542,6 +1665,34 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
dm_crtc_high_irq, c_irq_params);
}
+ /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
+ * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
+ * to trigger at end of each vblank, regardless of state of the lock,
+ * matching DCE behaviour.
+ */
+ for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
+ i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add vupdate irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_vupdate_high_irq, c_irq_params);
+ }
+
/* Use GRPH_PFLIP interrupt */
for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
@@ -1593,15 +1744,10 @@ static int dm_atomic_get_state(struct drm_atomic_state *state,
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_display_manager *dm = &adev->dm;
struct drm_private_state *priv_state;
- int ret;
if (*dm_state)
return 0;
- ret = drm_modeset_lock(&dm->atomic_obj_lock, state->acquire_ctx);
- if (ret)
- return ret;
-
priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
if (IS_ERR(priv_state))
return PTR_ERR(priv_state);
@@ -1658,17 +1804,16 @@ dm_atomic_duplicate_state(struct drm_private_obj *obj)
__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
- new_state->context = dc_create_state();
+ old_state = to_dm_atomic_state(obj->state);
+
+ if (old_state && old_state->context)
+ new_state->context = dc_copy_state(old_state->context);
+
if (!new_state->context) {
kfree(new_state);
return NULL;
}
- old_state = to_dm_atomic_state(obj->state);
- if (old_state && old_state->context)
- dc_resource_state_copy_construct(old_state->context,
- new_state->context);
-
return &new_state->base;
}
@@ -1708,13 +1853,11 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
- drm_modeset_lock_init(&adev->dm.atomic_obj_lock);
-
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
- state->context = dc_create_state();
+ state->context = dc_create_state(adev->dm.dc);
if (!state->context) {
kfree(state);
return -ENOMEM;
@@ -1841,39 +1984,42 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
#endif
static int initialize_plane(struct amdgpu_display_manager *dm,
- struct amdgpu_mode_info *mode_info,
- int plane_id)
+ struct amdgpu_mode_info *mode_info, int plane_id,
+ enum drm_plane_type plane_type,
+ const struct dc_plane_cap *plane_cap)
{
struct drm_plane *plane;
unsigned long possible_crtcs;
int ret = 0;
plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
- mode_info->planes[plane_id] = plane;
-
if (!plane) {
DRM_ERROR("KMS: Failed to allocate plane\n");
return -ENOMEM;
}
- plane->type = mode_info->plane_type[plane_id];
+ plane->type = plane_type;
/*
- * HACK: IGT tests expect that each plane can only have
- * one possible CRTC. For now, set one CRTC for each
- * plane that is not an underlay, but still allow multiple
- * CRTCs for underlay planes.
+ * HACK: IGT tests expect that the primary plane for a CRTC
+ * can only have one possible CRTC. Only expose support for
+ * any CRTC if they're not going to be used as a primary plane
+ * for a CRTC - like overlay or underlay planes.
*/
possible_crtcs = 1 << plane_id;
if (plane_id >= dm->dc->caps.max_streams)
possible_crtcs = 0xff;
- ret = amdgpu_dm_plane_init(dm, mode_info->planes[plane_id], possible_crtcs);
+ ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
if (ret) {
DRM_ERROR("KMS: Failed to initialize plane\n");
+ kfree(plane);
return ret;
}
+ if (mode_info)
+ mode_info->planes[plane_id] = plane;
+
return ret;
}
@@ -1916,8 +2062,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
struct amdgpu_encoder *aencoder = NULL;
struct amdgpu_mode_info *mode_info = &adev->mode_info;
uint32_t link_cnt;
- int32_t total_overlay_planes, total_primary_planes;
+ int32_t primary_planes;
enum dc_connection_type new_connection_type = dc_connection_none;
+ const struct dc_plane_cap *plane;
link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) {
@@ -1925,24 +2072,53 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
return -EINVAL;
}
- /* Identify the number of planes to be initialized */
- total_overlay_planes = dm->dc->caps.max_slave_planes;
- total_primary_planes = dm->dc->caps.max_planes - dm->dc->caps.max_slave_planes;
+ /* There is one primary plane per CRTC */
+ primary_planes = dm->dc->caps.max_streams;
+ ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
- /* First initialize overlay planes, index starting after primary planes */
- for (i = (total_overlay_planes - 1); i >= 0; i--) {
- if (initialize_plane(dm, mode_info, (total_primary_planes + i))) {
- DRM_ERROR("KMS: Failed to initialize overlay plane\n");
+ /*
+ * Initialize primary planes, implicit planes for legacy IOCTLS.
+ * Order is reversed to match iteration order in atomic check.
+ */
+ for (i = (primary_planes - 1); i >= 0; i--) {
+ plane = &dm->dc->caps.planes[i];
+
+ if (initialize_plane(dm, mode_info, i,
+ DRM_PLANE_TYPE_PRIMARY, plane)) {
+ DRM_ERROR("KMS: Failed to initialize primary plane\n");
goto fail;
}
}
- /* Initialize primary planes */
- for (i = (total_primary_planes - 1); i >= 0; i--) {
- if (initialize_plane(dm, mode_info, i)) {
- DRM_ERROR("KMS: Failed to initialize primary plane\n");
+ /*
+ * Initialize overlay planes, index starting after primary planes.
+ * These planes have a higher DRM index than the primary planes since
+ * they should be considered as having a higher z-order.
+ * Order is reversed to match iteration order in atomic check.
+ *
+ * Only support DCN for now, and only expose one so we don't encourage
+ * userspace to use up all the pipes.
+ */
+ for (i = 0; i < dm->dc->caps.max_planes; ++i) {
+ struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
+
+ if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
+ continue;
+
+ if (!plane->blends_with_above || !plane->blends_with_below)
+ continue;
+
+ if (!plane->pixel_format_support.argb8888)
+ continue;
+
+ if (initialize_plane(dm, NULL, primary_planes + i,
+ DRM_PLANE_TYPE_OVERLAY, plane)) {
+ DRM_ERROR("KMS: Failed to initialize overlay plane\n");
goto fail;
}
+
+ /* Only create one overlay plane. */
+ break;
}
for (i = 0; i < dm->dc->caps.max_streams; i++)
@@ -2042,8 +2218,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
fail:
kfree(aencoder);
kfree(aconnector);
- for (i = 0; i < dm->dc->caps.max_planes; i++)
- kfree(mode_info->planes[i]);
+
return -EINVAL;
}
@@ -2124,53 +2299,45 @@ static int dm_early_init(void *handle)
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
case CHIP_KAVERI:
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 7;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
case CHIP_KABINI:
case CHIP_MULLINS:
adev->mode_info.num_crtc = 2;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
case CHIP_FIJI:
case CHIP_TONGA:
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 7;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
case CHIP_CARRIZO:
adev->mode_info.num_crtc = 3;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 9;
- adev->mode_info.plane_type = dm_plane_type_carizzo;
break;
case CHIP_STONEY:
adev->mode_info.num_crtc = 2;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 9;
- adev->mode_info.plane_type = dm_plane_type_stoney;
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
adev->mode_info.num_crtc = 5;
adev->mode_info.num_hpd = 5;
adev->mode_info.num_dig = 5;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
case CHIP_POLARIS10:
case CHIP_VEGAM:
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
case CHIP_VEGA10:
case CHIP_VEGA12:
@@ -2178,14 +2345,12 @@ static int dm_early_init(void *handle)
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case CHIP_RAVEN:
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
adev->mode_info.num_dig = 4;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
#endif
default:
@@ -2243,56 +2408,63 @@ static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
.destroy = amdgpu_dm_encoder_destroy,
};
-static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
- struct dc_plane_state *plane_state)
+
+static int fill_dc_scaling_info(const struct drm_plane_state *state,
+ struct dc_scaling_info *scaling_info)
{
- plane_state->src_rect.x = state->src_x >> 16;
- plane_state->src_rect.y = state->src_y >> 16;
- /* we ignore the mantissa for now and do not deal with floating pixels :( */
- plane_state->src_rect.width = state->src_w >> 16;
+ int scale_w, scale_h;
- if (plane_state->src_rect.width == 0)
- return false;
+ memset(scaling_info, 0, sizeof(*scaling_info));
- plane_state->src_rect.height = state->src_h >> 16;
- if (plane_state->src_rect.height == 0)
- return false;
+ /* Source is fixed 16.16 but we ignore mantissa for now... */
+ scaling_info->src_rect.x = state->src_x >> 16;
+ scaling_info->src_rect.y = state->src_y >> 16;
+
+ scaling_info->src_rect.width = state->src_w >> 16;
+ if (scaling_info->src_rect.width == 0)
+ return -EINVAL;
+
+ scaling_info->src_rect.height = state->src_h >> 16;
+ if (scaling_info->src_rect.height == 0)
+ return -EINVAL;
- plane_state->dst_rect.x = state->crtc_x;
- plane_state->dst_rect.y = state->crtc_y;
+ scaling_info->dst_rect.x = state->crtc_x;
+ scaling_info->dst_rect.y = state->crtc_y;
if (state->crtc_w == 0)
- return false;
+ return -EINVAL;
- plane_state->dst_rect.width = state->crtc_w;
+ scaling_info->dst_rect.width = state->crtc_w;
if (state->crtc_h == 0)
- return false;
+ return -EINVAL;
- plane_state->dst_rect.height = state->crtc_h;
+ scaling_info->dst_rect.height = state->crtc_h;
- plane_state->clip_rect = plane_state->dst_rect;
+ /* DRM doesn't specify clipping on destination output. */
+ scaling_info->clip_rect = scaling_info->dst_rect;
- switch (state->rotation & DRM_MODE_ROTATE_MASK) {
- case DRM_MODE_ROTATE_0:
- plane_state->rotation = ROTATION_ANGLE_0;
- break;
- case DRM_MODE_ROTATE_90:
- plane_state->rotation = ROTATION_ANGLE_90;
- break;
- case DRM_MODE_ROTATE_180:
- plane_state->rotation = ROTATION_ANGLE_180;
- break;
- case DRM_MODE_ROTATE_270:
- plane_state->rotation = ROTATION_ANGLE_270;
- break;
- default:
- plane_state->rotation = ROTATION_ANGLE_0;
- break;
- }
+ /* TODO: Validate scaling per-format with DC plane caps */
+ scale_w = scaling_info->dst_rect.width * 1000 /
+ scaling_info->src_rect.width;
- return true;
+ if (scale_w < 250 || scale_w > 16000)
+ return -EINVAL;
+
+ scale_h = scaling_info->dst_rect.height * 1000 /
+ scaling_info->src_rect.height;
+
+ if (scale_h < 250 || scale_h > 16000)
+ return -EINVAL;
+
+ /*
+ * The "scaling_quality" can be ignored for now, quality = 0 has DC
+ * assume reasonable defaults based on the format.
+ */
+
+ return 0;
}
+
static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
uint64_t *tiling_flags)
{
@@ -2321,10 +2493,16 @@ static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
return offset ? (address + offset * 256) : 0;
}
-static bool fill_plane_dcc_attributes(struct amdgpu_device *adev,
- const struct amdgpu_framebuffer *afb,
- struct dc_plane_state *plane_state,
- uint64_t info)
+static int
+fill_plane_dcc_attributes(struct amdgpu_device *adev,
+ const struct amdgpu_framebuffer *afb,
+ const enum surface_pixel_format format,
+ const enum dc_rotation_angle rotation,
+ const union plane_size *plane_size,
+ const union dc_tiling_info *tiling_info,
+ const uint64_t info,
+ struct dc_plane_dcc_param *dcc,
+ struct dc_plane_address *address)
{
struct dc *dc = adev->dm.dc;
struct dc_dcc_surface_param input;
@@ -2337,133 +2515,103 @@ static bool fill_plane_dcc_attributes(struct amdgpu_device *adev,
memset(&output, 0, sizeof(output));
if (!offset)
- return false;
+ return 0;
+
+ if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return 0;
if (!dc->cap_funcs.get_dcc_compression_cap)
- return false;
+ return -EINVAL;
- input.format = plane_state->format;
- input.surface_size.width =
- plane_state->plane_size.grph.surface_size.width;
- input.surface_size.height =
- plane_state->plane_size.grph.surface_size.height;
- input.swizzle_mode = plane_state->tiling_info.gfx9.swizzle;
+ input.format = format;
+ input.surface_size.width = plane_size->grph.surface_size.width;
+ input.surface_size.height = plane_size->grph.surface_size.height;
+ input.swizzle_mode = tiling_info->gfx9.swizzle;
- if (plane_state->rotation == ROTATION_ANGLE_0 ||
- plane_state->rotation == ROTATION_ANGLE_180)
+ if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
input.scan = SCAN_DIRECTION_HORIZONTAL;
- else if (plane_state->rotation == ROTATION_ANGLE_90 ||
- plane_state->rotation == ROTATION_ANGLE_270)
+ else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
input.scan = SCAN_DIRECTION_VERTICAL;
if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
- return false;
+ return -EINVAL;
if (!output.capable)
- return false;
+ return -EINVAL;
if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
- return false;
+ return -EINVAL;
- plane_state->dcc.enable = 1;
- plane_state->dcc.grph.meta_pitch =
+ dcc->enable = 1;
+ dcc->grph.meta_pitch =
AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
- plane_state->dcc.grph.independent_64b_blks = i64b;
+ dcc->grph.independent_64b_blks = i64b;
dcc_address = get_dcc_address(afb->address, info);
- plane_state->address.grph.meta_addr.low_part =
- lower_32_bits(dcc_address);
- plane_state->address.grph.meta_addr.high_part =
- upper_32_bits(dcc_address);
+ address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
+ address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
- return true;
+ return 0;
}
-static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
- struct dc_plane_state *plane_state,
- const struct amdgpu_framebuffer *amdgpu_fb)
-{
- uint64_t tiling_flags;
- unsigned int awidth;
- const struct drm_framebuffer *fb = &amdgpu_fb->base;
- int ret = 0;
- struct drm_format_name_buf format_name;
-
- ret = get_fb_info(
- amdgpu_fb,
- &tiling_flags);
-
- if (ret)
- return ret;
-
- switch (fb->format->format) {
- case DRM_FORMAT_C8:
- plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
- break;
- case DRM_FORMAT_RGB565:
- plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
- break;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
- break;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_ARGB2101010:
- plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
- break;
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ABGR2101010:
- plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
- break;
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ABGR8888:
- plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
- break;
- case DRM_FORMAT_NV21:
- plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
- break;
- case DRM_FORMAT_NV12:
- plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
- break;
- default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(fb->format->format, &format_name));
- return -EINVAL;
- }
-
- memset(&plane_state->address, 0, sizeof(plane_state->address));
- memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
- memset(&plane_state->dcc, 0, sizeof(plane_state->dcc));
-
- if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
- plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
- plane_state->plane_size.grph.surface_size.x = 0;
- plane_state->plane_size.grph.surface_size.y = 0;
- plane_state->plane_size.grph.surface_size.width = fb->width;
- plane_state->plane_size.grph.surface_size.height = fb->height;
- plane_state->plane_size.grph.surface_pitch =
- fb->pitches[0] / fb->format->cpp[0];
- /* TODO: unhardcode */
- plane_state->color_space = COLOR_SPACE_SRGB;
+static int
+fill_plane_buffer_attributes(struct amdgpu_device *adev,
+ const struct amdgpu_framebuffer *afb,
+ const enum surface_pixel_format format,
+ const enum dc_rotation_angle rotation,
+ const uint64_t tiling_flags,
+ union dc_tiling_info *tiling_info,
+ union plane_size *plane_size,
+ struct dc_plane_dcc_param *dcc,
+ struct dc_plane_address *address)
+{
+ const struct drm_framebuffer *fb = &afb->base;
+ int ret;
+ memset(tiling_info, 0, sizeof(*tiling_info));
+ memset(plane_size, 0, sizeof(*plane_size));
+ memset(dcc, 0, sizeof(*dcc));
+ memset(address, 0, sizeof(*address));
+
+ if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ plane_size->grph.surface_size.x = 0;
+ plane_size->grph.surface_size.y = 0;
+ plane_size->grph.surface_size.width = fb->width;
+ plane_size->grph.surface_size.height = fb->height;
+ plane_size->grph.surface_pitch =
+ fb->pitches[0] / fb->format->cpp[0];
+
+ address->type = PLN_ADDR_TYPE_GRAPHICS;
+ address->grph.addr.low_part = lower_32_bits(afb->address);
+ address->grph.addr.high_part = upper_32_bits(afb->address);
} else {
- awidth = ALIGN(fb->width, 64);
- plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
- plane_state->plane_size.video.luma_size.x = 0;
- plane_state->plane_size.video.luma_size.y = 0;
- plane_state->plane_size.video.luma_size.width = awidth;
- plane_state->plane_size.video.luma_size.height = fb->height;
- /* TODO: unhardcode */
- plane_state->plane_size.video.luma_pitch = awidth;
-
- plane_state->plane_size.video.chroma_size.x = 0;
- plane_state->plane_size.video.chroma_size.y = 0;
- plane_state->plane_size.video.chroma_size.width = awidth;
- plane_state->plane_size.video.chroma_size.height = fb->height;
- plane_state->plane_size.video.chroma_pitch = awidth / 2;
-
- /* TODO: unhardcode */
- plane_state->color_space = COLOR_SPACE_YCBCR709;
+ uint64_t chroma_addr = afb->address + fb->offsets[1];
+
+ plane_size->video.luma_size.x = 0;
+ plane_size->video.luma_size.y = 0;
+ plane_size->video.luma_size.width = fb->width;
+ plane_size->video.luma_size.height = fb->height;
+ plane_size->video.luma_pitch =
+ fb->pitches[0] / fb->format->cpp[0];
+
+ plane_size->video.chroma_size.x = 0;
+ plane_size->video.chroma_size.y = 0;
+ /* TODO: set these based on surface format */
+ plane_size->video.chroma_size.width = fb->width / 2;
+ plane_size->video.chroma_size.height = fb->height / 2;
+
+ plane_size->video.chroma_pitch =
+ fb->pitches[1] / fb->format->cpp[1];
+
+ address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
+ address->video_progressive.luma_addr.low_part =
+ lower_32_bits(afb->address);
+ address->video_progressive.luma_addr.high_part =
+ upper_32_bits(afb->address);
+ address->video_progressive.chroma_addr.low_part =
+ lower_32_bits(chroma_addr);
+ address->video_progressive.chroma_addr.high_part =
+ upper_32_bits(chroma_addr);
}
/* Fill GFX8 params */
@@ -2477,21 +2625,21 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
/* XXX fix me for VI */
- plane_state->tiling_info.gfx8.num_banks = num_banks;
- plane_state->tiling_info.gfx8.array_mode =
+ tiling_info->gfx8.num_banks = num_banks;
+ tiling_info->gfx8.array_mode =
DC_ARRAY_2D_TILED_THIN1;
- plane_state->tiling_info.gfx8.tile_split = tile_split;
- plane_state->tiling_info.gfx8.bank_width = bankw;
- plane_state->tiling_info.gfx8.bank_height = bankh;
- plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
- plane_state->tiling_info.gfx8.tile_mode =
+ tiling_info->gfx8.tile_split = tile_split;
+ tiling_info->gfx8.bank_width = bankw;
+ tiling_info->gfx8.bank_height = bankh;
+ tiling_info->gfx8.tile_aspect = mtaspect;
+ tiling_info->gfx8.tile_mode =
DC_ADDR_SURF_MICRO_TILING_DISPLAY;
} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
== DC_ARRAY_1D_TILED_THIN1) {
- plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
+ tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
}
- plane_state->tiling_info.gfx8.pipe_config =
+ tiling_info->gfx8.pipe_config =
AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
if (adev->asic_type == CHIP_VEGA10 ||
@@ -2499,60 +2647,249 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
adev->asic_type == CHIP_VEGA20 ||
adev->asic_type == CHIP_RAVEN) {
/* Fill GFX9 params */
- plane_state->tiling_info.gfx9.num_pipes =
+ tiling_info->gfx9.num_pipes =
adev->gfx.config.gb_addr_config_fields.num_pipes;
- plane_state->tiling_info.gfx9.num_banks =
+ tiling_info->gfx9.num_banks =
adev->gfx.config.gb_addr_config_fields.num_banks;
- plane_state->tiling_info.gfx9.pipe_interleave =
+ tiling_info->gfx9.pipe_interleave =
adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
- plane_state->tiling_info.gfx9.num_shader_engines =
+ tiling_info->gfx9.num_shader_engines =
adev->gfx.config.gb_addr_config_fields.num_se;
- plane_state->tiling_info.gfx9.max_compressed_frags =
+ tiling_info->gfx9.max_compressed_frags =
adev->gfx.config.gb_addr_config_fields.max_compress_frags;
- plane_state->tiling_info.gfx9.num_rb_per_se =
+ tiling_info->gfx9.num_rb_per_se =
adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
- plane_state->tiling_info.gfx9.swizzle =
+ tiling_info->gfx9.swizzle =
AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
- plane_state->tiling_info.gfx9.shaderEnable = 1;
+ tiling_info->gfx9.shaderEnable = 1;
- fill_plane_dcc_attributes(adev, amdgpu_fb, plane_state,
- tiling_flags);
+ ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
+ plane_size, tiling_info,
+ tiling_flags, dcc, address);
+ if (ret)
+ return ret;
}
- plane_state->visible = true;
- plane_state->scaling_quality.h_taps_c = 0;
- plane_state->scaling_quality.v_taps_c = 0;
+ return 0;
+}
- /* is this needed? is plane_state zeroed at allocation? */
- plane_state->scaling_quality.h_taps = 0;
- plane_state->scaling_quality.v_taps = 0;
- plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
+static void
+fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
+ bool *per_pixel_alpha, bool *global_alpha,
+ int *global_alpha_value)
+{
+ *per_pixel_alpha = false;
+ *global_alpha = false;
+ *global_alpha_value = 0xff;
- return ret;
+ if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
+ return;
+ if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
+ static const uint32_t alpha_formats[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_ABGR8888,
+ };
+ uint32_t format = plane_state->fb->format->format;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
+ if (format == alpha_formats[i]) {
+ *per_pixel_alpha = true;
+ break;
+ }
+ }
+ }
+
+ if (plane_state->alpha < 0xffff) {
+ *global_alpha = true;
+ *global_alpha_value = plane_state->alpha >> 8;
+ }
}
-static int fill_plane_attributes(struct amdgpu_device *adev,
- struct dc_plane_state *dc_plane_state,
- struct drm_plane_state *plane_state,
- struct drm_crtc_state *crtc_state)
+static int
+fill_plane_color_attributes(const struct drm_plane_state *plane_state,
+ const enum surface_pixel_format format,
+ enum dc_color_space *color_space)
{
- const struct amdgpu_framebuffer *amdgpu_fb =
+ bool full_range;
+
+ *color_space = COLOR_SPACE_SRGB;
+
+ /* DRM color properties only affect non-RGB formats. */
+ if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return 0;
+
+ full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
+
+ switch (plane_state->color_encoding) {
+ case DRM_COLOR_YCBCR_BT601:
+ if (full_range)
+ *color_space = COLOR_SPACE_YCBCR601;
+ else
+ *color_space = COLOR_SPACE_YCBCR601_LIMITED;
+ break;
+
+ case DRM_COLOR_YCBCR_BT709:
+ if (full_range)
+ *color_space = COLOR_SPACE_YCBCR709;
+ else
+ *color_space = COLOR_SPACE_YCBCR709_LIMITED;
+ break;
+
+ case DRM_COLOR_YCBCR_BT2020:
+ if (full_range)
+ *color_space = COLOR_SPACE_2020_YCBCR;
+ else
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
+ const struct drm_plane_state *plane_state,
+ const uint64_t tiling_flags,
+ struct dc_plane_info *plane_info,
+ struct dc_plane_address *address)
+{
+ const struct drm_framebuffer *fb = plane_state->fb;
+ const struct amdgpu_framebuffer *afb =
to_amdgpu_framebuffer(plane_state->fb);
- const struct drm_crtc *crtc = plane_state->crtc;
- int ret = 0;
+ struct drm_format_name_buf format_name;
+ int ret;
+
+ memset(plane_info, 0, sizeof(*plane_info));
- if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ plane_info->format =
+ SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
+ break;
+ case DRM_FORMAT_RGB565:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
+ break;
+ case DRM_FORMAT_NV21:
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
+ break;
+ case DRM_FORMAT_NV12:
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
+ break;
+ default:
+ DRM_ERROR(
+ "Unsupported screen format %s\n",
+ drm_get_format_name(fb->format->format, &format_name));
return -EINVAL;
+ }
+
+ switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_0:
+ plane_info->rotation = ROTATION_ANGLE_0;
+ break;
+ case DRM_MODE_ROTATE_90:
+ plane_info->rotation = ROTATION_ANGLE_90;
+ break;
+ case DRM_MODE_ROTATE_180:
+ plane_info->rotation = ROTATION_ANGLE_180;
+ break;
+ case DRM_MODE_ROTATE_270:
+ plane_info->rotation = ROTATION_ANGLE_270;
+ break;
+ default:
+ plane_info->rotation = ROTATION_ANGLE_0;
+ break;
+ }
- ret = fill_plane_attributes_from_fb(
- crtc->dev->dev_private,
- dc_plane_state,
- amdgpu_fb);
+ plane_info->visible = true;
+ plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
+ ret = fill_plane_color_attributes(plane_state, plane_info->format,
+ &plane_info->color_space);
if (ret)
return ret;
+ ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
+ plane_info->rotation, tiling_flags,
+ &plane_info->tiling_info,
+ &plane_info->plane_size,
+ &plane_info->dcc, address);
+ if (ret)
+ return ret;
+
+ fill_blending_from_plane_state(
+ plane_state, &plane_info->per_pixel_alpha,
+ &plane_info->global_alpha, &plane_info->global_alpha_value);
+
+ return 0;
+}
+
+static int fill_dc_plane_attributes(struct amdgpu_device *adev,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state)
+{
+ const struct amdgpu_framebuffer *amdgpu_fb =
+ to_amdgpu_framebuffer(plane_state->fb);
+ struct dc_scaling_info scaling_info;
+ struct dc_plane_info plane_info;
+ uint64_t tiling_flags;
+ int ret;
+
+ ret = fill_dc_scaling_info(plane_state, &scaling_info);
+ if (ret)
+ return ret;
+
+ dc_plane_state->src_rect = scaling_info.src_rect;
+ dc_plane_state->dst_rect = scaling_info.dst_rect;
+ dc_plane_state->clip_rect = scaling_info.clip_rect;
+ dc_plane_state->scaling_quality = scaling_info.scaling_quality;
+
+ ret = get_fb_info(amdgpu_fb, &tiling_flags);
+ if (ret)
+ return ret;
+
+ ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
+ &plane_info,
+ &dc_plane_state->address);
+ if (ret)
+ return ret;
+
+ dc_plane_state->format = plane_info.format;
+ dc_plane_state->color_space = plane_info.color_space;
+ dc_plane_state->format = plane_info.format;
+ dc_plane_state->plane_size = plane_info.plane_size;
+ dc_plane_state->rotation = plane_info.rotation;
+ dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
+ dc_plane_state->stereo_format = plane_info.stereo_format;
+ dc_plane_state->tiling_info = plane_info.tiling_info;
+ dc_plane_state->visible = plane_info.visible;
+ dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
+ dc_plane_state->global_alpha = plane_info.global_alpha;
+ dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
+ dc_plane_state->dcc = plane_info.dcc;
+
/*
* Always set input transfer function, since plane state is refreshed
* every time.
@@ -3124,6 +3461,8 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
dc_stream_retain(state->stream);
}
+ state->active_planes = cur->active_planes;
+ state->interrupts_enabled = cur->interrupts_enabled;
state->vrr_params = cur->vrr_params;
state->vrr_infopacket = cur->vrr_infopacket;
state->abm_level = cur->abm_level;
@@ -3136,12 +3475,41 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
return &state->base;
}
+static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
+{
+ enum dc_irq_source irq_source;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ int rc;
+
+ irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
+
+ rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
+
+ DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
+ acrtc->crtc_id, enable ? "en" : "dis", rc);
+ return rc;
+}
static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
{
enum dc_irq_source irq_source;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
+ int rc = 0;
+
+ if (enable) {
+ /* vblank irq on -> Only need vupdate irq in vrr mode */
+ if (amdgpu_dm_vrr_active(acrtc_state))
+ rc = dm_set_vupdate_irq(crtc, true);
+ } else {
+ /* vblank irq off -> vupdate irq off */
+ rc = dm_set_vupdate_irq(crtc, false);
+ }
+
+ if (rc)
+ return rc;
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
@@ -3519,6 +3887,76 @@ static void dm_crtc_helper_disable(struct drm_crtc *crtc)
{
}
+static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
+{
+ struct drm_device *dev = new_crtc_state->crtc->dev;
+ struct drm_plane *plane;
+
+ drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ return true;
+ }
+
+ return false;
+}
+
+static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
+{
+ struct drm_atomic_state *state = new_crtc_state->state;
+ struct drm_plane *plane;
+ int num_active = 0;
+
+ drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
+ struct drm_plane_state *new_plane_state;
+
+ /* Cursor planes are "fake". */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+
+ if (!new_plane_state) {
+ /*
+ * The plane is enable on the CRTC and hasn't changed
+ * state. This means that it previously passed
+ * validation and is therefore enabled.
+ */
+ num_active += 1;
+ continue;
+ }
+
+ /* We need a framebuffer to be considered enabled. */
+ num_active += (new_plane_state->fb != NULL);
+ }
+
+ return num_active;
+}
+
+/*
+ * Sets whether interrupts should be enabled on a specific CRTC.
+ * We require that the stream be enabled and that there exist active
+ * DC planes on the stream.
+ */
+static void
+dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *new_crtc_state)
+{
+ struct dm_crtc_state *dm_new_crtc_state =
+ to_dm_crtc_state(new_crtc_state);
+
+ dm_new_crtc_state->active_planes = 0;
+ dm_new_crtc_state->interrupts_enabled = false;
+
+ if (!dm_new_crtc_state->stream)
+ return;
+
+ dm_new_crtc_state->active_planes =
+ count_crtc_active_planes(new_crtc_state);
+
+ dm_new_crtc_state->interrupts_enabled =
+ dm_new_crtc_state->active_planes > 0;
+}
+
static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -3527,6 +3965,14 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
int ret = -EINVAL;
+ /*
+ * Update interrupt state for the CRTC. This needs to happen whenever
+ * the CRTC has changed or whenever any of its planes have changed.
+ * Atomic check satisfies both of these requirements since the CRTC
+ * is added to the state by DRM during drm_atomic_helper_check_planes.
+ */
+ dm_update_crtc_interrupt_state(crtc, state);
+
if (unlikely(!dm_crtc_state->stream &&
modeset_required(state, NULL, dm_crtc_state->stream))) {
WARN_ON(1);
@@ -3537,6 +3983,15 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
if (!dm_crtc_state->stream)
return 0;
+ /*
+ * We want at least one hardware plane enabled to use
+ * the stream with a cursor enabled.
+ */
+ if (state->enable && state->active &&
+ does_crtc_have_active_cursor(state) &&
+ dm_crtc_state->active_planes == 0)
+ return -EINVAL;
+
if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
return 0;
@@ -3583,11 +4038,8 @@ static void dm_drm_plane_reset(struct drm_plane *plane)
amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
WARN_ON(amdgpu_state == NULL);
- if (amdgpu_state) {
- plane->state = &amdgpu_state->base;
- plane->state->plane = plane;
- plane->state->rotation = DRM_MODE_ROTATE_0;
- }
+ if (amdgpu_state)
+ __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
}
static struct drm_plane_state *
@@ -3637,10 +4089,8 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
struct drm_gem_object *obj;
struct amdgpu_device *adev;
struct amdgpu_bo *rbo;
- uint64_t chroma_addr = 0;
struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
- uint64_t tiling_flags, dcc_address;
- unsigned int awidth;
+ uint64_t tiling_flags;
uint32_t domain;
int r;
@@ -3693,29 +4143,11 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
- if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
- plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
- plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
-
- dcc_address =
- get_dcc_address(afb->address, tiling_flags);
- plane_state->address.grph.meta_addr.low_part =
- lower_32_bits(dcc_address);
- plane_state->address.grph.meta_addr.high_part =
- upper_32_bits(dcc_address);
- } else {
- awidth = ALIGN(new_state->fb->width, 64);
- plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
- plane_state->address.video_progressive.luma_addr.low_part
- = lower_32_bits(afb->address);
- plane_state->address.video_progressive.luma_addr.high_part
- = upper_32_bits(afb->address);
- chroma_addr = afb->address + (u64)awidth * new_state->fb->height;
- plane_state->address.video_progressive.chroma_addr.low_part
- = lower_32_bits(chroma_addr);
- plane_state->address.video_progressive.chroma_addr.high_part
- = upper_32_bits(chroma_addr);
- }
+ fill_plane_buffer_attributes(
+ adev, afb, plane_state->format, plane_state->rotation,
+ tiling_flags, &plane_state->tiling_info,
+ &plane_state->plane_size, &plane_state->dcc,
+ &plane_state->address);
}
return 0;
@@ -3747,13 +4179,18 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
{
struct amdgpu_device *adev = plane->dev->dev_private;
struct dc *dc = adev->dm.dc;
- struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+ struct dm_plane_state *dm_plane_state;
+ struct dc_scaling_info scaling_info;
+ int ret;
+
+ dm_plane_state = to_dm_plane_state(state);
if (!dm_plane_state->dc_state)
return 0;
- if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
- return -EINVAL;
+ ret = fill_dc_scaling_info(state, &scaling_info);
+ if (ret)
+ return ret;
if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
return 0;
@@ -3826,64 +4263,115 @@ static const uint32_t rgb_formats[] = {
DRM_FORMAT_ABGR2101010,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGB565,
};
-static const uint32_t yuv_formats[] = {
- DRM_FORMAT_NV12,
- DRM_FORMAT_NV21,
+static const uint32_t overlay_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGB565
};
static const u32 cursor_formats[] = {
DRM_FORMAT_ARGB8888
};
-static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
- struct drm_plane *plane,
- unsigned long possible_crtcs)
+static int get_plane_formats(const struct drm_plane *plane,
+ const struct dc_plane_cap *plane_cap,
+ uint32_t *formats, int max_formats)
{
- int res = -EPERM;
+ int i, num_formats = 0;
+
+ /*
+ * TODO: Query support for each group of formats directly from
+ * DC plane caps. This will require adding more formats to the
+ * caps list.
+ */
switch (plane->type) {
case DRM_PLANE_TYPE_PRIMARY:
- res = drm_universal_plane_init(
- dm->adev->ddev,
- plane,
- possible_crtcs,
- &dm_plane_funcs,
- rgb_formats,
- ARRAY_SIZE(rgb_formats),
- NULL, plane->type, NULL);
+ for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
+ if (num_formats >= max_formats)
+ break;
+
+ formats[num_formats++] = rgb_formats[i];
+ }
+
+ if (plane_cap && plane_cap->pixel_format_support.nv12)
+ formats[num_formats++] = DRM_FORMAT_NV12;
break;
+
case DRM_PLANE_TYPE_OVERLAY:
- res = drm_universal_plane_init(
- dm->adev->ddev,
- plane,
- possible_crtcs,
- &dm_plane_funcs,
- yuv_formats,
- ARRAY_SIZE(yuv_formats),
- NULL, plane->type, NULL);
+ for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
+ if (num_formats >= max_formats)
+ break;
+
+ formats[num_formats++] = overlay_formats[i];
+ }
break;
+
case DRM_PLANE_TYPE_CURSOR:
- res = drm_universal_plane_init(
- dm->adev->ddev,
- plane,
- possible_crtcs,
- &dm_plane_funcs,
- cursor_formats,
- ARRAY_SIZE(cursor_formats),
- NULL, plane->type, NULL);
+ for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
+ if (num_formats >= max_formats)
+ break;
+
+ formats[num_formats++] = cursor_formats[i];
+ }
break;
}
+ return num_formats;
+}
+
+static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
+ struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct dc_plane_cap *plane_cap)
+{
+ uint32_t formats[32];
+ int num_formats;
+ int res = -EPERM;
+
+ num_formats = get_plane_formats(plane, plane_cap, formats,
+ ARRAY_SIZE(formats));
+
+ res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
+ &dm_plane_funcs, formats, num_formats,
+ NULL, plane->type, NULL);
+ if (res)
+ return res;
+
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
+ plane_cap && plane_cap->per_pixel_alpha) {
+ unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI);
+
+ drm_plane_create_alpha_property(plane);
+ drm_plane_create_blend_mode_property(plane, blend_caps);
+ }
+
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
+ plane_cap && plane_cap->pixel_format_support.nv12) {
+ /* This only affects YUV formats. */
+ drm_plane_create_color_properties(
+ plane,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+ DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
+ }
+
drm_plane_helper_add(plane, &dm_plane_helper_funcs);
/* Create (reset) the plane state */
if (plane->funcs->reset)
plane->funcs->reset(plane);
-
- return res;
+ return 0;
}
static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
@@ -3900,7 +4388,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
goto fail;
cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
- res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
+ res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
if (!acrtc)
@@ -4334,6 +4822,8 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
DRM_ERROR("Failed to create debugfs for connector");
goto out_free;
}
+ aconnector->debugfs_dpcd_address = 0;
+ aconnector->debugfs_dpcd_size = 0;
#endif
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
@@ -4473,9 +4963,13 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
x = plane->state->crtc_x;
y = plane->state->crtc_y;
- /* avivo cursor are offset into the total surface */
- x += crtc->primary->state->src_x >> 16;
- y += crtc->primary->state->src_y >> 16;
+
+ if (crtc->primary->state) {
+ /* avivo cursor are offset into the total surface */
+ x += crtc->primary->state->src_x >> 16;
+ y += crtc->primary->state->src_y >> 16;
+ }
+
if (x < 0) {
xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
x = 0;
@@ -4582,9 +5076,10 @@ static void update_freesync_state_on_stream(
struct dc_plane_state *surface,
u32 flip_timestamp_in_us)
{
- struct mod_vrr_params vrr_params = new_crtc_state->vrr_params;
+ struct mod_vrr_params vrr_params;
struct dc_info_packet vrr_infopacket = {0};
- struct mod_freesync_config config = new_crtc_state->freesync_config;
+ struct amdgpu_device *adev = dm->adev;
+ unsigned long flags;
if (!new_stream)
return;
@@ -4597,19 +5092,8 @@ static void update_freesync_state_on_stream(
if (!new_stream->timing.h_total || !new_stream->timing.v_total)
return;
- if (new_crtc_state->vrr_supported &&
- config.min_refresh_in_uhz &&
- config.max_refresh_in_uhz) {
- config.state = new_crtc_state->base.vrr_enabled ?
- VRR_STATE_ACTIVE_VARIABLE :
- VRR_STATE_INACTIVE;
- } else {
- config.state = VRR_STATE_UNSUPPORTED;
- }
-
- mod_freesync_build_vrr_params(dm->freesync_module,
- new_stream,
- &config, &vrr_params);
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ vrr_params = new_crtc_state->vrr_params;
if (surface) {
mod_freesync_handle_preflip(
@@ -4618,6 +5102,12 @@ static void update_freesync_state_on_stream(
new_stream,
flip_timestamp_in_us,
&vrr_params);
+
+ if (adev->family < AMDGPU_FAMILY_AI &&
+ amdgpu_dm_vrr_active(new_crtc_state)) {
+ mod_freesync_handle_v_update(dm->freesync_module,
+ new_stream, &vrr_params);
+ }
}
mod_freesync_build_vrr_infopacket(
@@ -4649,6 +5139,100 @@ static void update_freesync_state_on_stream(
new_crtc_state->base.crtc->base.id,
(int)new_crtc_state->base.vrr_enabled,
(int)vrr_params.state);
+
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+}
+
+static void pre_update_freesync_state_on_stream(
+ struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *new_crtc_state)
+{
+ struct dc_stream_state *new_stream = new_crtc_state->stream;
+ struct mod_vrr_params vrr_params;
+ struct mod_freesync_config config = new_crtc_state->freesync_config;
+ struct amdgpu_device *adev = dm->adev;
+ unsigned long flags;
+
+ if (!new_stream)
+ return;
+
+ /*
+ * TODO: Determine why min/max totals and vrefresh can be 0 here.
+ * For now it's sufficient to just guard against these conditions.
+ */
+ if (!new_stream->timing.h_total || !new_stream->timing.v_total)
+ return;
+
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ vrr_params = new_crtc_state->vrr_params;
+
+ if (new_crtc_state->vrr_supported &&
+ config.min_refresh_in_uhz &&
+ config.max_refresh_in_uhz) {
+ config.state = new_crtc_state->base.vrr_enabled ?
+ VRR_STATE_ACTIVE_VARIABLE :
+ VRR_STATE_INACTIVE;
+ } else {
+ config.state = VRR_STATE_UNSUPPORTED;
+ }
+
+ mod_freesync_build_vrr_params(dm->freesync_module,
+ new_stream,
+ &config, &vrr_params);
+
+ new_crtc_state->freesync_timing_changed |=
+ (memcmp(&new_crtc_state->vrr_params.adjust,
+ &vrr_params.adjust,
+ sizeof(vrr_params.adjust)) != 0);
+
+ new_crtc_state->vrr_params = vrr_params;
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+}
+
+static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
+ struct dm_crtc_state *new_state)
+{
+ bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
+ bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
+
+ if (!old_vrr_active && new_vrr_active) {
+ /* Transition VRR inactive -> active:
+ * While VRR is active, we must not disable vblank irq, as a
+ * reenable after disable would compute bogus vblank/pflip
+ * timestamps if it likely happened inside display front-porch.
+ *
+ * We also need vupdate irq for the actual core vblank handling
+ * at end of vblank.
+ */
+ dm_set_vupdate_irq(new_state->base.crtc, true);
+ drm_crtc_vblank_get(new_state->base.crtc);
+ DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
+ __func__, new_state->base.crtc->base.id);
+ } else if (old_vrr_active && !new_vrr_active) {
+ /* Transition VRR active -> inactive:
+ * Allow vblank irq disable again for fixed refresh rate.
+ */
+ dm_set_vupdate_irq(new_state->base.crtc, false);
+ drm_crtc_vblank_put(new_state->base.crtc);
+ DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
+ __func__, new_state->base.crtc->base.id);
+ }
+}
+
+static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ int i;
+
+ /*
+ * TODO: Make this per-stream so we don't issue redundant updates for
+ * commits with multiple streams.
+ */
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state,
+ new_plane_state, i)
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ handle_cursor_update(plane, old_plane_state);
}
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
@@ -4656,9 +5240,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct drm_device *dev,
struct amdgpu_display_manager *dm,
struct drm_crtc *pcrtc,
- bool *wait_for_vblank)
+ bool wait_for_vblank)
{
- uint32_t i, r;
+ uint32_t i;
uint64_t timestamp_ns;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
@@ -4668,42 +5252,43 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
struct dm_crtc_state *dm_old_crtc_state =
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
- int flip_count = 0, planes_count = 0, vpos, hpos;
+ int planes_count = 0, vpos, hpos;
+ long r;
unsigned long flags;
struct amdgpu_bo *abo;
- uint64_t tiling_flags, dcc_address;
- uint32_t target, target_vblank;
- uint64_t last_flip_vblank;
- bool vrr_active = acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
-
- struct {
- struct dc_surface_update surface_updates[MAX_SURFACES];
- struct dc_flip_addrs flip_addrs[MAX_SURFACES];
- struct dc_stream_update stream_update;
- } *flip;
-
+ uint64_t tiling_flags;
+ uint32_t target_vblank, last_flip_vblank;
+ bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
+ bool pflip_present = false;
struct {
struct dc_surface_update surface_updates[MAX_SURFACES];
struct dc_plane_info plane_infos[MAX_SURFACES];
struct dc_scaling_info scaling_infos[MAX_SURFACES];
+ struct dc_flip_addrs flip_addrs[MAX_SURFACES];
struct dc_stream_update stream_update;
- } *full;
+ } *bundle;
- flip = kzalloc(sizeof(*flip), GFP_KERNEL);
- full = kzalloc(sizeof(*full), GFP_KERNEL);
+ bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
- if (!flip || !full) {
- dm_error("Failed to allocate update bundles\n");
+ if (!bundle) {
+ dm_error("Failed to allocate update bundle\n");
goto cleanup;
}
+ /*
+ * Disable the cursor first if we're disabling all the planes.
+ * It'll remain on the screen after the planes are re-enabled
+ * if we don't.
+ */
+ if (acrtc_state->active_planes == 0)
+ amdgpu_dm_commit_cursors(state);
+
/* update planes when needed */
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *new_crtc_state;
struct drm_framebuffer *fb = new_plane_state->fb;
- struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
- bool pflip_needed;
+ bool plane_needs_flip;
struct dc_plane_state *dc_plane;
struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
@@ -4718,122 +5303,96 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
if (!new_crtc_state->active)
continue;
- pflip_needed = old_plane_state->fb &&
- old_plane_state->fb != new_plane_state->fb;
-
dc_plane = dm_new_plane_state->dc_state;
- if (pflip_needed) {
- /*
- * Assume even ONE crtc with immediate flip means
- * entire can't wait for VBLANK
- * TODO Check if it's correct
- */
- if (new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
- *wait_for_vblank = false;
+ bundle->surface_updates[planes_count].surface = dc_plane;
+ if (new_pcrtc_state->color_mgmt_changed) {
+ bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
+ bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
+ }
- /*
- * TODO This might fail and hence better not used, wait
- * explicitly on fences instead
- * and in general should be called for
- * blocking commit to as per framework helpers
- */
- abo = gem_to_amdgpu_bo(fb->obj[0]);
- r = amdgpu_bo_reserve(abo, true);
- if (unlikely(r != 0))
- DRM_ERROR("failed to reserve buffer before flip\n");
+ fill_dc_scaling_info(new_plane_state,
+ &bundle->scaling_infos[planes_count]);
- /*
- * Wait for all fences on this FB. Do limited wait to avoid
- * deadlock during GPU reset when this fence will not signal
- * but we hold reservation lock for the BO.
- */
- r = reservation_object_wait_timeout_rcu(abo->tbo.resv,
- true, false,
- msecs_to_jiffies(5000));
- if (unlikely(r == 0))
- DRM_ERROR("Waiting for fences timed out.");
+ bundle->surface_updates[planes_count].scaling_info =
+ &bundle->scaling_infos[planes_count];
+ plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
+ pflip_present = pflip_present || plane_needs_flip;
- amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
+ if (!plane_needs_flip) {
+ planes_count += 1;
+ continue;
+ }
- amdgpu_bo_unreserve(abo);
+ abo = gem_to_amdgpu_bo(fb->obj[0]);
- flip->flip_addrs[flip_count].address.grph.addr.low_part = lower_32_bits(afb->address);
- flip->flip_addrs[flip_count].address.grph.addr.high_part = upper_32_bits(afb->address);
+ /*
+ * Wait for all fences on this FB. Do limited wait to avoid
+ * deadlock during GPU reset when this fence will not signal
+ * but we hold reservation lock for the BO.
+ */
+ r = reservation_object_wait_timeout_rcu(abo->tbo.resv, true,
+ false,
+ msecs_to_jiffies(5000));
+ if (unlikely(r <= 0))
+ DRM_ERROR("Waiting for fences timed out or interrupted!");
- dcc_address = get_dcc_address(afb->address, tiling_flags);
- flip->flip_addrs[flip_count].address.grph.meta_addr.low_part = lower_32_bits(dcc_address);
- flip->flip_addrs[flip_count].address.grph.meta_addr.high_part = upper_32_bits(dcc_address);
+ /*
+ * TODO This might fail and hence better not used, wait
+ * explicitly on fences instead
+ * and in general should be called for
+ * blocking commit to as per framework helpers
+ */
+ r = amdgpu_bo_reserve(abo, true);
+ if (unlikely(r != 0))
+ DRM_ERROR("failed to reserve buffer before flip\n");
- flip->flip_addrs[flip_count].flip_immediate =
- (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
+ amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
- timestamp_ns = ktime_get_ns();
- flip->flip_addrs[flip_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
- flip->surface_updates[flip_count].flip_addr = &flip->flip_addrs[flip_count];
- flip->surface_updates[flip_count].surface = dc_plane;
+ amdgpu_bo_unreserve(abo);
- if (!flip->surface_updates[flip_count].surface) {
- DRM_ERROR("No surface for CRTC: id=%d\n",
- acrtc_attach->crtc_id);
- continue;
- }
+ fill_dc_plane_info_and_addr(
+ dm->adev, new_plane_state, tiling_flags,
+ &bundle->plane_infos[planes_count],
+ &bundle->flip_addrs[planes_count].address);
- if (plane == pcrtc->primary)
- update_freesync_state_on_stream(
- dm,
- acrtc_state,
- acrtc_state->stream,
- dc_plane,
- flip->flip_addrs[flip_count].flip_timestamp_in_us);
+ bundle->surface_updates[planes_count].plane_info =
+ &bundle->plane_infos[planes_count];
- DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
- __func__,
- flip->flip_addrs[flip_count].address.grph.addr.high_part,
- flip->flip_addrs[flip_count].address.grph.addr.low_part);
+ bundle->flip_addrs[planes_count].flip_immediate =
+ (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
- flip_count += 1;
- }
+ timestamp_ns = ktime_get_ns();
+ bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
+ bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
+ bundle->surface_updates[planes_count].surface = dc_plane;
- full->surface_updates[planes_count].surface = dc_plane;
- if (new_pcrtc_state->color_mgmt_changed) {
- full->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
- full->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
+ if (!bundle->surface_updates[planes_count].surface) {
+ DRM_ERROR("No surface for CRTC: id=%d\n",
+ acrtc_attach->crtc_id);
+ continue;
}
+ if (plane == pcrtc->primary)
+ update_freesync_state_on_stream(
+ dm,
+ acrtc_state,
+ acrtc_state->stream,
+ dc_plane,
+ bundle->flip_addrs[planes_count].flip_timestamp_in_us);
- full->scaling_infos[planes_count].scaling_quality = dc_plane->scaling_quality;
- full->scaling_infos[planes_count].src_rect = dc_plane->src_rect;
- full->scaling_infos[planes_count].dst_rect = dc_plane->dst_rect;
- full->scaling_infos[planes_count].clip_rect = dc_plane->clip_rect;
- full->surface_updates[planes_count].scaling_info = &full->scaling_infos[planes_count];
-
-
- full->plane_infos[planes_count].color_space = dc_plane->color_space;
- full->plane_infos[planes_count].format = dc_plane->format;
- full->plane_infos[planes_count].plane_size = dc_plane->plane_size;
- full->plane_infos[planes_count].rotation = dc_plane->rotation;
- full->plane_infos[planes_count].horizontal_mirror = dc_plane->horizontal_mirror;
- full->plane_infos[planes_count].stereo_format = dc_plane->stereo_format;
- full->plane_infos[planes_count].tiling_info = dc_plane->tiling_info;
- full->plane_infos[planes_count].visible = dc_plane->visible;
- full->plane_infos[planes_count].per_pixel_alpha = dc_plane->per_pixel_alpha;
- full->plane_infos[planes_count].dcc = dc_plane->dcc;
- full->surface_updates[planes_count].plane_info = &full->plane_infos[planes_count];
+ DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
+ __func__,
+ bundle->flip_addrs[planes_count].address.grph.addr.high_part,
+ bundle->flip_addrs[planes_count].address.grph.addr.low_part);
planes_count += 1;
}
- /*
- * TODO: For proper atomic behaviour, we should be calling into DC once with
- * all the changes. However, DC refuses to do pageflips and non-pageflip
- * changes in the same call. Change DC to respect atomic behaviour,
- * hopefully eliminating dc_*_update structs in their entirety.
- */
- if (flip_count) {
+ if (pflip_present) {
if (!vrr_active) {
/* Use old throttling in non-vrr fixed refresh rate mode
* to keep flip scheduling based on target vblank counts
@@ -4841,7 +5400,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* clients using the GLX_OML_sync_control extension or
* DRI3/Present extension with defined target_msc.
*/
- last_flip_vblank = drm_crtc_vblank_count(pcrtc);
+ last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id);
}
else {
/* For variable refresh rate mode only:
@@ -4857,11 +5416,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
- target = (uint32_t)last_flip_vblank + *wait_for_vblank;
-
- /* Prepare wait for target vblank early - before the fence-waits */
- target_vblank = target - (uint32_t)drm_crtc_vblank_count(pcrtc) +
- amdgpu_get_vblank_counter_kms(pcrtc->dev, acrtc_attach->crtc_id);
+ target_vblank = last_flip_vblank + wait_for_vblank;
/*
* Wait until we're out of the vertical blank period before the one
@@ -4892,54 +5447,102 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
if (acrtc_state->stream) {
if (acrtc_state->freesync_timing_changed)
- flip->stream_update.adjust =
+ bundle->stream_update.adjust =
&acrtc_state->stream->adjust;
if (acrtc_state->freesync_vrr_info_changed)
- flip->stream_update.vrr_infopacket =
+ bundle->stream_update.vrr_infopacket =
&acrtc_state->stream->vrr_infopacket;
}
-
- mutex_lock(&dm->dc_lock);
- dc_commit_updates_for_stream(dm->dc,
- flip->surface_updates,
- flip_count,
- acrtc_state->stream,
- &flip->stream_update,
- dc_state);
- mutex_unlock(&dm->dc_lock);
}
- if (planes_count) {
+ /* Update the planes if changed or disable if we don't have any. */
+ if (planes_count || acrtc_state->active_planes == 0) {
if (new_pcrtc_state->mode_changed) {
- full->stream_update.src = acrtc_state->stream->src;
- full->stream_update.dst = acrtc_state->stream->dst;
+ bundle->stream_update.src = acrtc_state->stream->src;
+ bundle->stream_update.dst = acrtc_state->stream->dst;
}
if (new_pcrtc_state->color_mgmt_changed)
- full->stream_update.out_transfer_func = acrtc_state->stream->out_transfer_func;
+ bundle->stream_update.out_transfer_func = acrtc_state->stream->out_transfer_func;
acrtc_state->stream->abm_level = acrtc_state->abm_level;
if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
- full->stream_update.abm_level = &acrtc_state->abm_level;
+ bundle->stream_update.abm_level = &acrtc_state->abm_level;
mutex_lock(&dm->dc_lock);
dc_commit_updates_for_stream(dm->dc,
- full->surface_updates,
+ bundle->surface_updates,
planes_count,
acrtc_state->stream,
- &full->stream_update,
+ &bundle->stream_update,
dc_state);
mutex_unlock(&dm->dc_lock);
}
- for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
- if (plane->type == DRM_PLANE_TYPE_CURSOR)
- handle_cursor_update(plane, old_plane_state);
+ /*
+ * Update cursor state *after* programming all the planes.
+ * This avoids redundant programming in the case where we're going
+ * to be disabling a single plane - those pipes are being disabled.
+ */
+ if (acrtc_state->active_planes)
+ amdgpu_dm_commit_cursors(state);
cleanup:
- kfree(flip);
- kfree(full);
+ kfree(bundle);
+}
+
+/*
+ * Enable interrupts on CRTCs that are newly active, undergone
+ * a modeset, or have active planes again.
+ *
+ * Done in two passes, based on the for_modeset flag:
+ * Pass 1: For CRTCs going through modeset
+ * Pass 2: For CRTCs going from 0 to n active planes
+ *
+ * Interrupts can only be enabled after the planes are programmed,
+ * so this requires a two-pass approach since we don't want to
+ * just defer the interrupts until after commit planes every time.
+ */
+static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool for_modeset)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ int i;
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ struct dm_crtc_state *dm_new_crtc_state =
+ to_dm_crtc_state(new_crtc_state);
+ struct dm_crtc_state *dm_old_crtc_state =
+ to_dm_crtc_state(old_crtc_state);
+ bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
+ bool run_pass;
+
+ run_pass = (for_modeset && modeset) ||
+ (!for_modeset && !modeset &&
+ !dm_old_crtc_state->interrupts_enabled);
+
+ if (!run_pass)
+ continue;
+
+ if (!dm_new_crtc_state->interrupts_enabled)
+ continue;
+
+ manage_dm_interrupts(adev, acrtc, true);
+
+#ifdef CONFIG_DEBUG_FS
+ /* The stream has changed so CRC capture needs to re-enabled. */
+ if (dm_new_crtc_state->crc_enabled) {
+ dm_new_crtc_state->crc_enabled = false;
+ amdgpu_dm_crtc_set_crc_source(crtc, "auto");
+ }
+#endif
+ }
}
/*
@@ -4953,8 +5556,7 @@ cleanup:
static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
struct dc_stream_state *stream_state)
{
- stream_state->mode_changed =
- crtc_state->mode_changed || crtc_state->active_changed;
+ stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
}
static int amdgpu_dm_atomic_commit(struct drm_device *dev,
@@ -4967,30 +5569,41 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
int i;
/*
- * We evade vblanks and pflips on crtc that
- * should be changed. We do it here to flush & disable
- * interrupts before drm_swap_state is called in drm_atomic_helper_commit
- * it will update crtc->dm_crtc_state->stream pointer which is used in
- * the ISRs.
+ * We evade vblank and pflip interrupts on CRTCs that are undergoing
+ * a modeset, being disabled, or have no active planes.
+ *
+ * It's done in atomic commit rather than commit tail for now since
+ * some of these interrupt handlers access the current CRTC state and
+ * potentially the stream pointer itself.
+ *
+ * Since the atomic state is swapped within atomic commit and not within
+ * commit tail this would leave to new state (that hasn't been committed yet)
+ * being accesssed from within the handlers.
+ *
+ * TODO: Fix this so we can do this in commit tail and not have to block
+ * in atomic check.
*/
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
- if (drm_atomic_crtc_needs_modeset(new_crtc_state)
- && dm_old_crtc_state->stream) {
+ if (dm_old_crtc_state->interrupts_enabled &&
+ (!dm_new_crtc_state->interrupts_enabled ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
/*
- * If the stream is removed and CRC capture was
- * enabled on the CRTC the extra vblank reference
- * needs to be dropped since CRC capture will be
- * disabled.
+ * Drop the extra vblank reference added by CRC
+ * capture if applicable.
*/
- if (!dm_new_crtc_state->stream
- && dm_new_crtc_state->crc_enabled) {
+ if (dm_new_crtc_state->crc_enabled)
drm_crtc_vblank_put(crtc);
+
+ /*
+ * Only keep CRC capture enabled if there's
+ * still a stream for the CRTC.
+ */
+ if (!dm_new_crtc_state->stream)
dm_new_crtc_state->crc_enabled = false;
- }
manage_dm_interrupts(adev, acrtc, false);
}
@@ -5037,7 +5650,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dc_state = dm_state->context;
} else {
/* No state changes, retain current state. */
- dc_state_temp = dc_create_state();
+ dc_state_temp = dc_create_state(dm->dc);
ASSERT(dc_state_temp);
dc_state = dc_state_temp;
dc_resource_state_copy_construct_current(dm->dc, dc_state);
@@ -5206,45 +5819,41 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
mutex_unlock(&dm->dc_lock);
}
+ /* Count number of newly disabled CRTCs for dropping PM refs later. */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- /*
- * loop to enable interrupts on newly arrived crtc
- */
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
- bool modeset_needed;
-
+ new_crtc_state, i) {
if (old_crtc_state->active && !new_crtc_state->active)
crtc_disable_count++;
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
- modeset_needed = modeset_required(
- new_crtc_state,
- dm_new_crtc_state->stream,
- dm_old_crtc_state->stream);
- if (dm_new_crtc_state->stream == NULL || !modeset_needed)
- continue;
-
- manage_dm_interrupts(adev, acrtc, true);
+ /* Update freesync active state. */
+ pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
-#ifdef CONFIG_DEBUG_FS
- /* The stream has changed so CRC capture needs to re-enabled. */
- if (dm_new_crtc_state->crc_enabled)
- amdgpu_dm_crtc_set_crc_source(crtc, "auto");
-#endif
+ /* Handle vrr on->off / off->on transitions */
+ amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
+ dm_new_crtc_state);
}
+ /* Enable interrupts for CRTCs going through a modeset. */
+ amdgpu_dm_enable_crtc_interrupts(dev, state, true);
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
+ if (new_crtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
+ wait_for_vblank = false;
+
/* update planes when needed per crtc*/
for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
if (dm_new_crtc_state->stream)
amdgpu_dm_commit_planes(state, dc_state, dev,
- dm, crtc, &wait_for_vblank);
+ dm, crtc, wait_for_vblank);
}
+ /* Enable interrupts for CRTCs going from 0 to n active planes. */
+ amdgpu_dm_enable_crtc_interrupts(dev, state, false);
/*
* send vblank event on all events not handled in flip and
@@ -5484,21 +6093,12 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector = NULL;
struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
- struct drm_plane_state *new_plane_state = NULL;
new_stream = NULL;
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
acrtc = to_amdgpu_crtc(crtc);
-
- new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
-
- if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
- ret = -EINVAL;
- goto fail;
- }
-
aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
/* TODO This hack should go away */
@@ -5661,6 +6261,9 @@ skip_modeset:
update_stream_scaling_settings(
&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
+ /* ABM settings */
+ dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
+
/*
* Color management settings. We also update color properties
* when a modeset is needed, to ensure it gets reprogrammed.
@@ -5685,6 +6288,69 @@ fail:
return ret;
}
+static bool should_reset_plane(struct drm_atomic_state *state,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state,
+ struct drm_plane_state *new_plane_state)
+{
+ struct drm_plane *other;
+ struct drm_plane_state *old_other_state, *new_other_state;
+ struct drm_crtc_state *new_crtc_state;
+ int i;
+
+ /*
+ * TODO: Remove this hack once the checks below are sufficient
+ * enough to determine when we need to reset all the planes on
+ * the stream.
+ */
+ if (state->allow_modeset)
+ return true;
+
+ /* Exit early if we know that we're adding or removing the plane. */
+ if (old_plane_state->crtc != new_plane_state->crtc)
+ return true;
+
+ /* old crtc == new_crtc == NULL, plane not in context. */
+ if (!new_plane_state->crtc)
+ return false;
+
+ new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
+
+ if (!new_crtc_state)
+ return true;
+
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state))
+ return true;
+
+ /*
+ * If there are any new primary or overlay planes being added or
+ * removed then the z-order can potentially change. To ensure
+ * correct z-order and pipe acquisition the current DC architecture
+ * requires us to remove and recreate all existing planes.
+ *
+ * TODO: Come up with a more elegant solution for this.
+ */
+ for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
+ if (other->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ if (old_other_state->crtc != new_plane_state->crtc &&
+ new_other_state->crtc != new_plane_state->crtc)
+ continue;
+
+ if (old_other_state->crtc != new_other_state->crtc)
+ return true;
+
+ /* TODO: Remove this once we can handle fast format changes. */
+ if (old_other_state->fb && new_other_state->fb &&
+ old_other_state->fb->format != new_other_state->fb->format)
+ return true;
+ }
+
+ return false;
+}
+
static int dm_update_plane_state(struct dc *dc,
struct drm_atomic_state *state,
struct drm_plane *plane,
@@ -5699,8 +6365,7 @@ static int dm_update_plane_state(struct dc *dc,
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
- /* TODO return page_flip_needed() function */
- bool pflip_needed = !state->allow_modeset;
+ bool needs_reset;
int ret = 0;
@@ -5713,10 +6378,12 @@ static int dm_update_plane_state(struct dc *dc,
if (plane->type == DRM_PLANE_TYPE_CURSOR)
return 0;
+ needs_reset = should_reset_plane(state, plane, old_plane_state,
+ new_plane_state);
+
/* Remove any changed/removed planes */
if (!enable) {
- if (pflip_needed &&
- plane->type != DRM_PLANE_TYPE_OVERLAY)
+ if (!needs_reset)
return 0;
if (!old_plane_crtc)
@@ -5767,7 +6434,7 @@ static int dm_update_plane_state(struct dc *dc,
if (!dm_new_crtc_state->stream)
return 0;
- if (pflip_needed && plane->type != DRM_PLANE_TYPE_OVERLAY)
+ if (!needs_reset)
return 0;
WARN_ON(dm_new_plane_state->dc_state);
@@ -5779,7 +6446,7 @@ static int dm_update_plane_state(struct dc *dc,
DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
plane->base.id, new_plane_crtc->base.id);
- ret = fill_plane_attributes(
+ ret = fill_dc_plane_attributes(
new_plane_crtc->dev->dev_private,
dc_new_plane_state,
new_plane_state,
@@ -5827,10 +6494,11 @@ static int dm_update_plane_state(struct dc *dc,
}
static int
-dm_determine_update_type_for_commit(struct dc *dc,
+dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
struct drm_atomic_state *state,
enum surface_update_type *out_type)
{
+ struct dc *dc = dm->dc;
struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
int i, j, num_plane, ret = 0;
struct drm_plane_state *old_plane_state, *new_plane_state;
@@ -5844,21 +6512,22 @@ dm_determine_update_type_for_commit(struct dc *dc,
struct dc_stream_status *status = NULL;
struct dc_surface_update *updates;
- struct dc_plane_state *surface;
enum surface_update_type update_type = UPDATE_TYPE_FAST;
updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL);
- surface = kcalloc(MAX_SURFACES, sizeof(*surface), GFP_KERNEL);
- if (!updates || !surface) {
- DRM_ERROR("Plane or surface update failed to allocate");
+ if (!updates) {
+ DRM_ERROR("Failed to allocate plane updates\n");
/* Set type to FULL to avoid crashing in DC*/
update_type = UPDATE_TYPE_FULL;
goto cleanup;
}
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- struct dc_stream_update stream_update = { 0 };
+ struct dc_scaling_info scaling_info;
+ struct dc_stream_update stream_update;
+
+ memset(&stream_update, 0, sizeof(stream_update));
new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
@@ -5886,23 +6555,12 @@ dm_determine_update_type_for_commit(struct dc *dc,
goto cleanup;
}
- if (!state->allow_modeset)
- continue;
-
if (crtc != new_plane_crtc)
continue;
- updates[num_plane].surface = &surface[num_plane];
+ updates[num_plane].surface = new_dm_plane_state->dc_state;
if (new_crtc_state->mode_changed) {
- updates[num_plane].surface->src_rect =
- new_dm_plane_state->dc_state->src_rect;
- updates[num_plane].surface->dst_rect =
- new_dm_plane_state->dc_state->dst_rect;
- updates[num_plane].surface->rotation =
- new_dm_plane_state->dc_state->rotation;
- updates[num_plane].surface->in_transfer_func =
- new_dm_plane_state->dc_state->in_transfer_func;
stream_update.dst = new_dm_crtc_state->stream->dst;
stream_update.src = new_dm_crtc_state->stream->src;
}
@@ -5918,6 +6576,13 @@ dm_determine_update_type_for_commit(struct dc *dc,
new_dm_crtc_state->stream->out_transfer_func;
}
+ ret = fill_dc_scaling_info(new_plane_state,
+ &scaling_info);
+ if (ret)
+ goto cleanup;
+
+ updates[num_plane].scaling_info = &scaling_info;
+
num_plane++;
}
@@ -5937,8 +6602,14 @@ dm_determine_update_type_for_commit(struct dc *dc,
status = dc_stream_get_status_from_state(old_dm_state->context,
new_dm_crtc_state->stream);
+ /*
+ * TODO: DC modifies the surface during this call so we need
+ * to lock here - find a way to do this without locking.
+ */
+ mutex_lock(&dm->dc_lock);
update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
&stream_update, status);
+ mutex_unlock(&dm->dc_lock);
if (update_type > UPDATE_TYPE_MED) {
update_type = UPDATE_TYPE_FULL;
@@ -5948,7 +6619,6 @@ dm_determine_update_type_for_commit(struct dc *dc,
cleanup:
kfree(updates);
- kfree(surface);
*out_type = update_type;
return ret;
@@ -6132,7 +6802,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
lock_and_validation_needed = true;
}
- ret = dm_determine_update_type_for_commit(dc, state, &update_type);
+ ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
if (ret)
goto fail;
@@ -6147,9 +6817,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
*/
if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
- else if (!lock_and_validation_needed && overall_update_type > UPDATE_TYPE_FAST)
- WARN(1, "Global lock should NOT be set, overall_update_type should be UPDATE_TYPE_FAST");
-
if (overall_update_type > UPDATE_TYPE_FAST) {
ret = dm_atomic_get_state(state, &dm_state);
@@ -6160,7 +6827,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
+ if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
ret = -EINVAL;
goto fail;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index fbd161ddc3f4..978ff14a7d45 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -132,8 +132,6 @@ struct amdgpu_display_manager {
*/
struct drm_private_obj atomic_obj;
- struct drm_modeset_lock atomic_obj_lock;
-
/**
* @dc_lock:
*
@@ -184,6 +182,15 @@ struct amdgpu_display_manager {
struct common_irq_params
vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
+ /**
+ * @vupdate_params:
+ *
+ * Vertical update IRQ parameters, passed to registered handlers when
+ * triggered.
+ */
+ struct common_irq_params
+ vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1];
+
spinlock_t irq_handler_list_table_lock;
struct backlight_device *backlight_dev;
@@ -240,6 +247,10 @@ struct amdgpu_dm_connector {
struct mutex hpd_lock;
bool fake_enable;
+#ifdef CONFIG_DEBUG_FS
+ uint32_t debugfs_dpcd_address;
+ uint32_t debugfs_dpcd_size;
+#endif
};
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
@@ -260,6 +271,9 @@ struct dm_crtc_state {
struct drm_crtc_state base;
struct dc_stream_state *stream;
+ int active_planes;
+ bool interrupts_enabled;
+
int crc_skip_count;
bool crc_enabled;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index 216e48cec716..7258c992a2bf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -126,46 +126,51 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
crtc->base.state->dev->dev_private;
struct drm_color_lut *lut;
uint32_t lut_size;
- struct dc_gamma *gamma;
+ struct dc_gamma *gamma = NULL;
enum dc_transfer_func_type old_type = stream->out_transfer_func->type;
bool ret;
- if (!blob) {
+ if (!blob && adev->asic_type <= CHIP_RAVEN) {
/* By default, use the SRGB predefined curve.*/
stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
return 0;
}
- lut = (struct drm_color_lut *)blob->data;
- lut_size = blob->length / sizeof(struct drm_color_lut);
-
- gamma = dc_create_gamma();
- if (!gamma)
- return -ENOMEM;
+ if (blob) {
+ lut = (struct drm_color_lut *)blob->data;
+ lut_size = blob->length / sizeof(struct drm_color_lut);
+
+ gamma = dc_create_gamma();
+ if (!gamma)
+ return -ENOMEM;
+
+ gamma->num_entries = lut_size;
+ if (gamma->num_entries == MAX_COLOR_LEGACY_LUT_ENTRIES)
+ gamma->type = GAMMA_RGB_256;
+ else if (gamma->num_entries == MAX_COLOR_LUT_ENTRIES)
+ gamma->type = GAMMA_CS_TFM_1D;
+ else {
+ /* Invalid lut size */
+ dc_gamma_release(&gamma);
+ return -EINVAL;
+ }
- gamma->num_entries = lut_size;
- if (gamma->num_entries == MAX_COLOR_LEGACY_LUT_ENTRIES)
- gamma->type = GAMMA_RGB_256;
- else if (gamma->num_entries == MAX_COLOR_LUT_ENTRIES)
- gamma->type = GAMMA_CS_TFM_1D;
- else {
- /* Invalid lut size */
- dc_gamma_release(&gamma);
- return -EINVAL;
+ /* Convert drm_lut into dc_gamma */
+ __drm_lut_to_dc_gamma(lut, gamma, gamma->type == GAMMA_RGB_256);
}
- /* Convert drm_lut into dc_gamma */
- __drm_lut_to_dc_gamma(lut, gamma, gamma->type == GAMMA_RGB_256);
-
- /* Call color module to translate into something DC understands. Namely
- * a transfer function.
+ /* predefined gamma ROM only exist for RAVEN and pre-RAVEN ASIC,
+ * set canRomBeUsed accordingly
*/
stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
ret = mod_color_calculate_regamma_params(stream->out_transfer_func,
- gamma, true, adev->asic_type <= CHIP_RAVEN, NULL);
- dc_gamma_release(&gamma);
+ gamma, true, adev->asic_type <= CHIP_RAVEN, NULL);
+
+ if (gamma)
+ dc_gamma_release(&gamma);
+
if (!ret) {
stream->out_transfer_func->type = old_type;
DRM_ERROR("Out of memory when calculating regamma params\n");
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 4a55cde027cf..1d5fc5ad3bee 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -29,6 +29,7 @@
#include "amdgpu.h"
#include "amdgpu_dm.h"
#include "amdgpu_dm_debugfs.h"
+#include "dm_helpers.h"
/* function description
* get/ set DP configuration: lane_count, link_rate, spread_spectrum
@@ -688,8 +689,131 @@ static int vrr_range_show(struct seq_file *m, void *data)
return 0;
}
+
+/* function description
+ *
+ * generic SDP message access for testing
+ *
+ * debugfs sdp_message is located at /syskernel/debug/dri/0/DP-x
+ *
+ * SDP header
+ * Hb0 : Secondary-Data Packet ID
+ * Hb1 : Secondary-Data Packet type
+ * Hb2 : Secondary-Data-packet-specific header, Byte 0
+ * Hb3 : Secondary-Data-packet-specific header, Byte 1
+ *
+ * for using custom sdp message: input 4 bytes SDP header and 32 bytes raw data
+ */
+static ssize_t dp_sdp_message_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ int r;
+ uint8_t data[36];
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dm_crtc_state *acrtc_state;
+ uint32_t write_size = 36;
+
+ if (connector->base.status != connector_status_connected)
+ return -ENODEV;
+
+ if (size == 0)
+ return 0;
+
+ acrtc_state = to_dm_crtc_state(connector->base.state->crtc->state);
+
+ r = copy_from_user(data, buf, write_size);
+
+ write_size -= r;
+
+ dc_stream_send_dp_sdp(acrtc_state->stream, data, write_size);
+
+ return write_size;
+}
+
DEFINE_SHOW_ATTRIBUTE(vrr_range);
+static ssize_t dp_dpcd_address_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ int r;
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+
+ if (size < sizeof(connector->debugfs_dpcd_address))
+ return 0;
+
+ r = copy_from_user(&connector->debugfs_dpcd_address,
+ buf, sizeof(connector->debugfs_dpcd_address));
+
+ return size - r;
+}
+
+static ssize_t dp_dpcd_size_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ int r;
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+
+ if (size < sizeof(connector->debugfs_dpcd_size))
+ return 0;
+
+ r = copy_from_user(&connector->debugfs_dpcd_size,
+ buf, sizeof(connector->debugfs_dpcd_size));
+
+ if (connector->debugfs_dpcd_size > 256)
+ connector->debugfs_dpcd_size = 0;
+
+ return size - r;
+}
+
+static ssize_t dp_dpcd_data_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ int r;
+ char *data;
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ uint32_t write_size = connector->debugfs_dpcd_size;
+
+ if (size < write_size)
+ return 0;
+
+ data = kzalloc(write_size, GFP_KERNEL);
+ if (!data)
+ return 0;
+
+ r = copy_from_user(data, buf, write_size);
+
+ dm_helpers_dp_write_dpcd(link->ctx, link,
+ connector->debugfs_dpcd_address, data, write_size - r);
+ kfree(data);
+ return write_size - r;
+}
+
+static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ int r;
+ char *data;
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ uint32_t read_size = connector->debugfs_dpcd_size;
+
+ if (size < read_size)
+ return 0;
+
+ data = kzalloc(read_size, GFP_KERNEL);
+ if (!data)
+ return 0;
+
+ dm_helpers_dp_read_dpcd(link->ctx, link,
+ connector->debugfs_dpcd_address, data, read_size);
+
+ r = copy_to_user(buf, data, read_size);
+
+ kfree(data);
+ return read_size - r;
+}
+
static const struct file_operations dp_link_settings_debugfs_fops = {
.owner = THIS_MODULE,
.read = dp_link_settings_read,
@@ -710,6 +834,31 @@ static const struct file_operations dp_phy_test_pattern_fops = {
.llseek = default_llseek
};
+static const struct file_operations sdp_message_fops = {
+ .owner = THIS_MODULE,
+ .write = dp_sdp_message_debugfs_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_dpcd_address_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .write = dp_dpcd_address_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_dpcd_size_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .write = dp_dpcd_size_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_dpcd_data_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = dp_dpcd_data_read,
+ .write = dp_dpcd_data_write,
+ .llseek = default_llseek
+};
+
static const struct {
char *name;
const struct file_operations *fops;
@@ -717,7 +866,11 @@ static const struct {
{"link_settings", &dp_link_settings_debugfs_fops},
{"phy_settings", &dp_phy_settings_debugfs_fop},
{"test_pattern", &dp_phy_test_pattern_fops},
- {"vrr_range", &vrr_range_fops}
+ {"vrr_range", &vrr_range_fops},
+ {"sdp_message", &sdp_message_fops},
+ {"aux_dpcd_address", &dp_dpcd_address_debugfs_fops},
+ {"aux_dpcd_size", &dp_dpcd_size_debugfs_fops},
+ {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops}
};
int connector_debugfs_init(struct amdgpu_dm_connector *connector)
@@ -842,6 +995,35 @@ static const struct drm_info_list amdgpu_dm_debugfs_list[] = {
{"amdgpu_target_backlight_pwm", &target_backlight_read},
};
+/*
+ * Sets the DC visual confirm debug option from the given string.
+ * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm
+ */
+static int visual_confirm_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = data;
+
+ adev->dm.dc->debug.visual_confirm = (enum visual_confirm)val;
+
+ return 0;
+}
+
+/*
+ * Reads the DC visual confirm debug option value into the given buffer.
+ * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_visual_confirm
+ */
+static int visual_confirm_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = data;
+
+ *val = adev->dm.dc->debug.visual_confirm;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(visual_confirm_fops, visual_confirm_get,
+ visual_confirm_set, "%llu\n");
+
int dtn_debugfs_init(struct amdgpu_device *adev)
{
static const struct file_operations dtn_log_fops = {
@@ -867,5 +1049,13 @@ int dtn_debugfs_init(struct amdgpu_device *adev)
adev,
&dtn_log_fops);
- return PTR_ERR_OR_ZERO(ent);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ ent = debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root,
+ adev, &visual_confirm_fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index b39766bd2840..e6cd67342df8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -264,7 +264,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
}
/*
- * poll pending down reply before clear payload allocation table
+ * poll pending down reply
*/
void dm_helpers_dp_mst_poll_pending_down_reply(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index cd10f77cdeb0..fd22b4474dbf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -674,11 +674,30 @@ static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
__func__);
}
+static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int crtc_id,
+ enum amdgpu_interrupt_state state)
+{
+ return dm_irq_state(
+ adev,
+ source,
+ crtc_id,
+ state,
+ IRQ_TYPE_VUPDATE,
+ __func__);
+}
+
static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
.set = amdgpu_dm_set_crtc_irq_state,
.process = amdgpu_dm_irq_handler,
};
+static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
+ .set = amdgpu_dm_set_vupdate_irq_state,
+ .process = amdgpu_dm_irq_handler,
+};
+
static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
.set = amdgpu_dm_set_pflip_irq_state,
.process = amdgpu_dm_irq_handler,
@@ -695,6 +714,9 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
adev->crtc_irq.num_types = adev->mode_info.num_crtc;
adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
+ adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
+ adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
+
adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index c4ea3a91f17a..6e205ee36ac3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -84,6 +84,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
{
ssize_t result = 0;
struct aux_payload payload;
+ enum aux_channel_operation_result operation_result;
if (WARN_ON(msg->size > 16))
return -E2BIG;
@@ -97,13 +98,27 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0;
payload.defer_delay = 0;
- result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service, &payload);
+ result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
+ &operation_result);
if (payload.write)
result = msg->size;
- if (result < 0) /* DC doesn't know about kernel error codes */
- result = -EIO;
+ if (result < 0)
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
+ result = -EIO;
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ result = -EBUSY;
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ result = -ETIMEDOUT;
+ break;
+ }
return result;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index a114954d6a5b..350e7a620d45 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -33,6 +33,7 @@
#include "amdgpu_dm_irq.h"
#include "amdgpu_pm.h"
#include "dm_pp_smu.h"
+#include "amdgpu_smu.h"
bool dm_pp_apply_display_requirements(
@@ -40,6 +41,7 @@ bool dm_pp_apply_display_requirements(
const struct dm_pp_display_configuration *pp_display_cfg)
{
struct amdgpu_device *adev = ctx->driver_context;
+ struct smu_context *smu = &adev->smu;
int i;
if (adev->pm.dpm_enabled) {
@@ -105,6 +107,9 @@ bool dm_pp_apply_display_requirements(
adev->powerplay.pp_funcs->display_configuration_change(
adev->powerplay.pp_handle,
&adev->pm.pm_display_cfg);
+ else
+ smu_display_configuration_change(smu,
+ &adev->pm.pm_display_cfg);
amdgpu_pm_compute_clocks(adev);
}
@@ -308,6 +313,12 @@ bool dm_pp_get_clock_levels_by_type(
if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
dc_to_pp_clock_type(clk_type), &pp_clks)) {
/* Error in pplib. Provide default values. */
+ return true;
+ }
+ } else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) {
+ if (smu_get_clock_by_type(&adev->smu,
+ dc_to_pp_clock_type(clk_type),
+ &pp_clks)) {
get_default_clock_levels(clk_type, dc_clks);
return true;
}
@@ -324,6 +335,13 @@ bool dm_pp_get_clock_levels_by_type(
validation_clks.memory_max_clock = 80000;
validation_clks.level = 0;
}
+ } else if (adev->smu.funcs && adev->smu.funcs->get_max_high_clocks) {
+ if (smu_get_max_high_clocks(&adev->smu, &validation_clks)) {
+ DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
+ validation_clks.engine_max_clock = 72000;
+ validation_clks.memory_max_clock = 80000;
+ validation_clks.level = 0;
+ }
}
DRM_INFO("DM_PPLIB: Validation clocks:\n");
@@ -374,14 +392,21 @@ bool dm_pp_get_clock_levels_by_type_with_latency(
void *pp_handle = adev->powerplay.pp_handle;
struct pp_clock_levels_with_latency pp_clks = { 0 };
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret;
+
+ if (pp_funcs && pp_funcs->get_clock_by_type_with_latency) {
+ ret = pp_funcs->get_clock_by_type_with_latency(pp_handle,
+ dc_to_pp_clock_type(clk_type),
+ &pp_clks);
+ if (ret)
+ return false;
+ } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_latency) {
+ if (smu_get_clock_by_type_with_latency(&adev->smu,
+ dc_to_pp_clock_type(clk_type),
+ &pp_clks))
+ return false;
+ }
- if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency)
- return false;
-
- if (pp_funcs->get_clock_by_type_with_latency(pp_handle,
- dc_to_pp_clock_type(clk_type),
- &pp_clks))
- return false;
pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
@@ -397,14 +422,20 @@ bool dm_pp_get_clock_levels_by_type_with_voltage(
void *pp_handle = adev->powerplay.pp_handle;
struct pp_clock_levels_with_voltage pp_clk_info = {0};
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
- if (!pp_funcs || !pp_funcs->get_clock_by_type_with_voltage)
- return false;
-
- if (pp_funcs->get_clock_by_type_with_voltage(pp_handle,
- dc_to_pp_clock_type(clk_type),
- &pp_clk_info))
- return false;
+ int ret;
+
+ if (pp_funcs && pp_funcs->get_clock_by_type_with_voltage) {
+ ret = pp_funcs->get_clock_by_type_with_voltage(pp_handle,
+ dc_to_pp_clock_type(clk_type),
+ &pp_clk_info);
+ if (ret)
+ return false;
+ } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_voltage) {
+ if (smu_get_clock_by_type_with_voltage(&adev->smu,
+ dc_to_pp_clock_type(clk_type),
+ &pp_clk_info))
+ return false;
+ }
pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
@@ -445,6 +476,10 @@ bool dm_pp_apply_clock_for_voltage_request(
ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
adev->powerplay.pp_handle,
&pp_clock_request);
+ else if (adev->smu.funcs &&
+ adev->smu.funcs->display_clock_voltage_request)
+ ret = smu_display_clock_voltage_request(&adev->smu,
+ &pp_clock_request);
if (ret)
return false;
return true;
@@ -462,6 +497,8 @@ bool dm_pp_get_static_clocks(
ret = adev->powerplay.pp_funcs->get_current_clocks(
adev->powerplay.pp_handle,
&pp_clk_info);
+ else if (adev->smu.funcs)
+ ret = smu_get_current_clocks(&adev->smu, &pp_clk_info);
if (ret)
return false;
@@ -472,27 +509,6 @@ bool dm_pp_get_static_clocks(
return true;
}
-void pp_rv_set_display_requirement(struct pp_smu *pp,
- struct pp_smu_display_requirement_rv *req)
-{
- const struct dc_context *ctx = pp->dm;
- struct amdgpu_device *adev = ctx->driver_context;
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- struct pp_display_clock_request clock = {0};
-
- if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
- return;
-
- clock.clock_type = amd_pp_dcf_clock;
- clock.clock_freq_in_khz = req->hard_min_dcefclk_mhz * 1000;
- pp_funcs->display_clock_voltage_request(pp_handle, &clock);
-
- clock.clock_type = amd_pp_f_clock;
- clock.clock_freq_in_khz = req->hard_min_fclk_mhz * 1000;
- pp_funcs->display_clock_voltage_request(pp_handle, &clock);
-}
-
void pp_rv_set_wm_ranges(struct pp_smu *pp,
struct pp_smu_wm_range_sets *ranges)
{
@@ -508,9 +524,6 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
- if (!pp_funcs || !pp_funcs->set_watermarks_for_clocks_ranges)
- return;
-
for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
if (ranges->reader_wm_sets[i].wm_inst > 3)
wm_dce_clocks[i].wm_set_id = WM_SET_A;
@@ -543,7 +556,13 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
}
- pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges);
+ if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
+ pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
+ &wm_with_clock_ranges);
+ else if (adev->smu.funcs &&
+ adev->smu.funcs->set_watermarks_for_clock_ranges)
+ smu_set_watermarks_for_clock_ranges(&adev->smu,
+ &wm_with_clock_ranges);
}
void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
@@ -553,10 +572,10 @@ void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
void *pp_handle = adev->powerplay.pp_handle;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- if (!pp_funcs || !pp_funcs->notify_smu_enable_pwe)
- return;
-
- pp_funcs->notify_smu_enable_pwe(pp_handle);
+ if (pp_funcs && pp_funcs->notify_smu_enable_pwe)
+ pp_funcs->notify_smu_enable_pwe(pp_handle);
+ else if (adev->smu.funcs)
+ smu_notify_smu_enable_pwe(&adev->smu);
}
void pp_rv_set_active_display_count(struct pp_smu *pp, int count)
@@ -611,17 +630,16 @@ void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz)
pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz);
}
-void dm_pp_get_funcs_rv(
+void dm_pp_get_funcs(
struct dc_context *ctx,
- struct pp_smu_funcs_rv *funcs)
+ struct pp_smu_funcs *funcs)
{
- funcs->pp_smu.dm = ctx;
- funcs->set_display_requirement = pp_rv_set_display_requirement;
- funcs->set_wm_ranges = pp_rv_set_wm_ranges;
- funcs->set_pme_wa_enable = pp_rv_set_pme_wa_enable;
- funcs->set_display_count = pp_rv_set_active_display_count;
- funcs->set_min_deep_sleep_dcfclk = pp_rv_set_min_deep_sleep_dcfclk;
- funcs->set_hard_min_dcfclk_by_freq = pp_rv_set_hard_min_dcefclk_by_freq;
- funcs->set_hard_min_fclk_by_freq = pp_rv_set_hard_min_fclk_by_freq;
+ funcs->rv_funcs.pp_smu.dm = ctx;
+ funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges;
+ funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable;
+ funcs->rv_funcs.set_display_count = pp_rv_set_active_display_count;
+ funcs->rv_funcs.set_min_deep_sleep_dcfclk = pp_rv_set_min_deep_sleep_dcfclk;
+ funcs->rv_funcs.set_hard_min_dcfclk_by_freq = pp_rv_set_hard_min_dcefclk_by_freq;
+ funcs->rv_funcs.set_hard_min_fclk_by_freq = pp_rv_set_hard_min_fclk_by_freq;
}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index f28989860fd8..1e9a2d352068 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -449,6 +449,11 @@ static inline unsigned int clamp_ux_dy(
return min_clamp;
}
+unsigned int dc_fixpt_u4d19(struct fixed31_32 arg)
+{
+ return ux_dy(arg.value, 4, 19);
+}
+
unsigned int dc_fixpt_u3d19(struct fixed31_32 arg)
{
return ux_dy(arg.value, 3, 19);
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index eb62d10bb65c..1b4b51657f5e 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -247,6 +247,53 @@ static enum dcn_bw_defs tl_pixel_format_to_bw_defs(enum surface_pixel_format for
}
}
+enum source_macro_tile_size swizzle_mode_to_macro_tile_size(enum swizzle_mode_values sw_mode)
+{
+ switch (sw_mode) {
+ /* for 4/8/16 high tiles */
+ case DC_SW_LINEAR:
+ return dm_4k_tile;
+ case DC_SW_4KB_S:
+ case DC_SW_4KB_S_X:
+ return dm_4k_tile;
+ case DC_SW_64KB_S:
+ case DC_SW_64KB_S_X:
+ case DC_SW_64KB_S_T:
+ return dm_64k_tile;
+ case DC_SW_VAR_S:
+ case DC_SW_VAR_S_X:
+ return dm_256k_tile;
+
+ /* For 64bpp 2 high tiles */
+ case DC_SW_4KB_D:
+ case DC_SW_4KB_D_X:
+ return dm_4k_tile;
+ case DC_SW_64KB_D:
+ case DC_SW_64KB_D_X:
+ case DC_SW_64KB_D_T:
+ return dm_64k_tile;
+ case DC_SW_VAR_D:
+ case DC_SW_VAR_D_X:
+ return dm_256k_tile;
+
+ case DC_SW_4KB_R:
+ case DC_SW_4KB_R_X:
+ return dm_4k_tile;
+ case DC_SW_64KB_R:
+ case DC_SW_64KB_R_X:
+ return dm_64k_tile;
+ case DC_SW_VAR_R:
+ case DC_SW_VAR_R_X:
+ return dm_256k_tile;
+
+ /* Unsupported swizzle modes for dcn */
+ case DC_SW_256B_S:
+ default:
+ ASSERT(0); /* Not supported */
+ return 0;
+ }
+}
+
static void pipe_ctx_to_e2e_pipe_params (
const struct pipe_ctx *pipe,
struct _vcs_dpi_display_pipe_params_st *input)
@@ -287,46 +334,7 @@ static void pipe_ctx_to_e2e_pipe_params (
input->src.cur0_src_width = 128; /* TODO: Cursor calcs, not curently stored */
input->src.cur0_bpp = 32;
- switch (pipe->plane_state->tiling_info.gfx9.swizzle) {
- /* for 4/8/16 high tiles */
- case DC_SW_LINEAR:
- input->src.macro_tile_size = dm_4k_tile;
- break;
- case DC_SW_4KB_S:
- case DC_SW_4KB_S_X:
- input->src.macro_tile_size = dm_4k_tile;
- break;
- case DC_SW_64KB_S:
- case DC_SW_64KB_S_X:
- case DC_SW_64KB_S_T:
- input->src.macro_tile_size = dm_64k_tile;
- break;
- case DC_SW_VAR_S:
- case DC_SW_VAR_S_X:
- input->src.macro_tile_size = dm_256k_tile;
- break;
-
- /* For 64bpp 2 high tiles */
- case DC_SW_4KB_D:
- case DC_SW_4KB_D_X:
- input->src.macro_tile_size = dm_4k_tile;
- break;
- case DC_SW_64KB_D:
- case DC_SW_64KB_D_X:
- case DC_SW_64KB_D_T:
- input->src.macro_tile_size = dm_64k_tile;
- break;
- case DC_SW_VAR_D:
- case DC_SW_VAR_D_X:
- input->src.macro_tile_size = dm_256k_tile;
- break;
-
- /* Unsupported swizzle modes for dcn */
- case DC_SW_256B_S:
- default:
- ASSERT(0); /* Not supported */
- break;
- }
+ input->src.macro_tile_size = swizzle_mode_to_macro_tile_size(pipe->plane_state->tiling_info.gfx9.swizzle);
switch (pipe->plane_state->rotation) {
case ROTATION_ANGLE_0:
@@ -466,7 +474,7 @@ static void dcn_bw_calc_rq_dlg_ttu(
input.clks_cfg.dcfclk_mhz = v->dcfclk;
input.clks_cfg.dispclk_mhz = v->dispclk;
input.clks_cfg.dppclk_mhz = v->dppclk;
- input.clks_cfg.refclk_mhz = dc->res_pool->ref_clock_inKhz / 1000.0;
+ input.clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
input.clks_cfg.socclk_mhz = v->socclk;
input.clks_cfg.voltage = v->voltage_level;
// dc->dml.logger = pool->base.logger;
@@ -536,28 +544,28 @@ static void calc_wm_sets_and_perf_params(
v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_vnom0p8;
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
- context->bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
- context->bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
- context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns =
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
- context->bw.dcn.watermarks.b.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
- context->bw.dcn.watermarks.b.urgent_ns = v->urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = v->urgent_watermark * 1000;
v->dcfclk_per_state[1] = v->dcfclkv_nom0p8;
v->dcfclk_per_state[0] = v->dcfclkv_nom0p8;
v->dcfclk = v->dcfclkv_nom0p8;
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
- context->bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
- context->bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
- context->bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns =
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
- context->bw.dcn.watermarks.c.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
- context->bw.dcn.watermarks.c.urgent_ns = v->urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = v->urgent_watermark * 1000;
}
if (v->voltage_level < 3) {
@@ -571,14 +579,14 @@ static void calc_wm_sets_and_perf_params(
v->dcfclk = v->dcfclkv_max0p9;
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
- context->bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
- context->bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
- context->bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns =
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
- context->bw.dcn.watermarks.d.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
- context->bw.dcn.watermarks.d.urgent_ns = v->urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = v->urgent_watermark * 1000;
}
v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vnom0p8;
@@ -591,20 +599,20 @@ static void calc_wm_sets_and_perf_params(
v->dcfclk = v->dcfclk_per_state[v->voltage_level];
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
- context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
- context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
- context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
- context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
- context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
if (v->voltage_level >= 2) {
- context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a;
- context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
}
if (v->voltage_level >= 3)
- context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
}
#endif
@@ -693,8 +701,15 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
bool dcn_validate_bandwidth(
struct dc *dc,
- struct dc_state *context)
+ struct dc_state *context,
+ bool fast_validate)
{
+ /*
+ * we want a breakdown of the various stages of validation, which the
+ * perf_trace macro doesn't support
+ */
+ BW_VAL_TRACE_SETUP();
+
const struct resource_pool *pool = dc->res_pool;
struct dcn_bw_internal_vars *v = &context->dcn_bw_vars;
int i, input_idx;
@@ -703,6 +718,9 @@ bool dcn_validate_bandwidth(
float bw_limit;
PERFORMANCE_TRACE_START();
+
+ BW_VAL_TRACE_COUNT();
+
if (dcn_bw_apply_registry_override(dc))
dcn_bw_sync_calcs_and_dml(dc);
@@ -1000,13 +1018,16 @@ bool dcn_validate_bandwidth(
dc->debug.sr_enter_plus_exit_time_dpm0_ns / 1000.0f;
if (dc->debug.sr_exit_time_dpm0_ns)
v->sr_exit_time = dc->debug.sr_exit_time_dpm0_ns / 1000.0f;
- dc->dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time;
- dc->dml.soc.sr_exit_time_us = v->sr_exit_time;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time;
+ context->bw_ctx.dml.soc.sr_exit_time_us = v->sr_exit_time;
mode_support_and_system_configuration(v);
}
- if (v->voltage_level != 5) {
+ BW_VAL_TRACE_END_VOLTAGE_LEVEL();
+
+ if (v->voltage_level != number_of_states_plus_one && !fast_validate) {
float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second;
+
if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65)
bw_consumed = v->fabric_and_dram_bandwidth_vmin0p65;
else if (bw_consumed < v->fabric_and_dram_bandwidth_vmid0p72)
@@ -1027,58 +1048,60 @@ bool dcn_validate_bandwidth(
*/
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
- context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
- context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
- context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
- context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
- context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
- context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a;
- context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a;
- context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
- context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 /
+ context->bw_ctx.bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 /
(ddr4_dram_factor_single_Channel * v->number_of_channels));
if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65) {
- context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
+ context->bw_ctx.bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
}
- context->bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
- context->bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000);
+ context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000);
- context->bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000);
+ context->bw_ctx.bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000);
if (dc->debug.max_disp_clk == true)
- context->bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
+ context->bw_ctx.bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
- if (context->bw.dcn.clk.dispclk_khz <
+ if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
dc->debug.min_disp_clk_khz) {
- context->bw.dcn.clk.dispclk_khz =
+ context->bw_ctx.bw.dcn.clk.dispclk_khz =
dc->debug.min_disp_clk_khz;
}
- context->bw.dcn.clk.dppclk_khz = context->bw.dcn.clk.dispclk_khz / v->dispclk_dppclk_ratio;
- context->bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
+ context->bw_ctx.bw.dcn.clk.dppclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz / v->dispclk_dppclk_ratio;
+ context->bw_ctx.bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
switch (v->voltage_level) {
case 0:
- context->bw.dcn.clk.max_supported_dppclk_khz =
+ context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000);
break;
case 1:
- context->bw.dcn.clk.max_supported_dppclk_khz =
+ context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000);
break;
case 2:
- context->bw.dcn.clk.max_supported_dppclk_khz =
+ context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000);
break;
default:
- context->bw.dcn.clk.max_supported_dppclk_khz =
+ context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000);
break;
}
+ BW_VAL_TRACE_END_WATERMARKS();
+
for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -1141,7 +1164,7 @@ bool dcn_validate_bandwidth(
hsplit_pipe->pipe_dlg_param.vblank_end = pipe->pipe_dlg_param.vblank_end;
} else {
/* pipe not split previously needs split */
- hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, pool);
+ hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, pool, pipe);
ASSERT(hsplit_pipe);
split_stream_across_pipes(
&context->res_ctx, pool,
@@ -1169,13 +1192,17 @@ bool dcn_validate_bandwidth(
input_idx++;
}
+ } else if (v->voltage_level == number_of_states_plus_one) {
+ BW_VAL_TRACE_SKIP(fail);
+ } else if (fast_validate) {
+ BW_VAL_TRACE_SKIP(fast);
}
if (v->voltage_level == 0) {
- dc->dml.soc.sr_enter_plus_exit_time_us =
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us =
dc->dcn_soc->sr_enter_plus_exit_time;
- dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
}
/*
@@ -1188,6 +1215,7 @@ bool dcn_validate_bandwidth(
kernel_fpu_end();
PERFORMANCE_TRACE_END();
+ BW_VAL_TRACE_FINISH();
if (bw_limit_pass && v->voltage_level != 5)
return true;
@@ -1395,12 +1423,14 @@ void dcn_bw_update_from_pplib(struct dc *dc)
void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
{
- struct pp_smu_funcs_rv *pp = dc->res_pool->pp_smu;
+ struct pp_smu_funcs_rv *pp = NULL;
struct pp_smu_wm_range_sets ranges = {0};
int min_fclk_khz, min_dcfclk_khz, socclk_khz;
const int overdrive = 5000000; /* 5 GHz to cover Overdrive */
- if (!pp->set_wm_ranges)
+ if (dc->res_pool->pp_smu)
+ pp = &dc->res_pool->pp_smu->rv_funcs;
+ if (!pp || !pp->set_wm_ranges)
return;
kernel_fpu_begin();
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index a6cda201c964..18c775a950cc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -524,6 +524,14 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
struct dc_stream_state *link_stream;
struct dc_link_settings store_settings = *link_setting;
+ link->preferred_link_setting = store_settings;
+
+ /* Retrain with preferred link settings only relevant for
+ * DP signal type
+ */
+ if (!dc_is_dp_signal(link->connector_signal))
+ return;
+
for (i = 0; i < MAX_PIPES; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream && pipe->stream->link) {
@@ -538,7 +546,10 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
link_stream = link->dc->current_state->res_ctx.pipe_ctx[i].stream;
- link->preferred_link_setting = store_settings;
+ /* Cannot retrain link if backend is off */
+ if (link_stream->dpms_off)
+ return;
+
if (link_stream)
decide_link_settings(link_stream, &store_settings);
@@ -573,6 +584,28 @@ void dc_link_set_test_pattern(struct dc_link *link,
cust_pattern_size);
}
+uint32_t dc_link_bandwidth_kbps(
+ const struct dc_link *link,
+ const struct dc_link_settings *link_setting)
+{
+ uint32_t link_bw_kbps = link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */
+
+ link_bw_kbps *= 8; /* 8 bits per byte*/
+ link_bw_kbps *= link_setting->lane_count;
+
+ return link_bw_kbps;
+
+}
+
+const struct dc_link_settings *dc_link_get_link_cap(
+ const struct dc_link *link)
+{
+ if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
+ return &link->preferred_link_setting;
+ return &link->verified_link_cap;
+}
+
static void destruct(struct dc *dc)
{
dc_release_state(dc->current_state);
@@ -621,6 +654,10 @@ static bool construct(struct dc *dc,
#endif
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
+ dc->config = init_params->flags;
+
+ memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
+
dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
if (!dc_dceip) {
dm_error("%s: failed to create dceip\n", __func__);
@@ -668,13 +705,6 @@ static bool construct(struct dc *dc,
dc_ctx->dc_stream_id_count = 0;
dc->ctx = dc_ctx;
- dc->current_state = dc_create_state();
-
- if (!dc->current_state) {
- dm_error("%s: failed to create validate ctx\n", __func__);
- goto fail;
- }
-
/* Create logger */
dc_ctx->dce_environment = init_params->dce_environment;
@@ -722,14 +752,22 @@ static bool construct(struct dc *dc,
goto fail;
}
- dc->res_pool = dc_create_resource_pool(
- dc,
- init_params->num_virtual_links,
- dc_version,
- init_params->asic_id);
+ dc->res_pool = dc_create_resource_pool(dc, init_params, dc_version);
if (!dc->res_pool)
goto fail;
+ /* Creation of current_state must occur after dc->dml
+ * is initialized in dc_create_resource_pool because
+ * on creation it copies the contents of dc->dml
+ */
+
+ dc->current_state = dc_create_state(dc);
+
+ if (!dc->current_state) {
+ dm_error("%s: failed to create validate ctx\n", __func__);
+ goto fail;
+ }
+
dc_resource_state_construct(dc, dc->current_state);
if (!create_links(dc, init_params->num_virtual_links))
@@ -746,7 +784,7 @@ fail:
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
- struct dc_state *dangling_context = dc_create_state();
+ struct dc_state *dangling_context = dc_create_state(dc);
struct dc_state *current_ctx;
if (dangling_context == NULL)
@@ -811,8 +849,6 @@ struct dc *dc_create(const struct dc_init_data *init_params)
if (dc->res_pool->dmcu != NULL)
dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
- dc->config = init_params->flags;
-
dc->build_id = DC_BUILD_ID;
DC_LOG_DC("Display Core initialized\n");
@@ -969,7 +1005,7 @@ static bool context_changed(
return false;
}
-bool dc_validate_seamless_boot_timing(struct dc *dc,
+bool dc_validate_seamless_boot_timing(const struct dc *dc,
const struct dc_sink *sink,
struct dc_crtc_timing *crtc_timing)
{
@@ -1060,7 +1096,13 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (!dcb->funcs->is_accelerated_mode(dcb))
dc->hwss.enable_accelerated_mode(dc, context);
- dc->hwss.prepare_bandwidth(dc, context);
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->apply_seamless_boot_optimization)
+ dc->optimize_seamless_boot = true;
+ }
+
+ if (!dc->optimize_seamless_boot)
+ dc->hwss.prepare_bandwidth(dc, context);
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
@@ -1135,12 +1177,15 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
- /* pplib is notified if disp_num changed */
- dc->hwss.optimize_bandwidth(dc, context);
+ if (!dc->optimize_seamless_boot)
+ /* pplib is notified if disp_num changed */
+ dc->hwss.optimize_bandwidth(dc, context);
for (i = 0; i < context->stream_count; i++)
context->streams[i]->mode_changed = false;
+ memset(&context->commit_hints, 0, sizeof(context->commit_hints));
+
dc_release_state(dc->current_state);
dc->current_state = context;
@@ -1177,7 +1222,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
int i;
struct dc_state *context = dc->current_state;
- if (dc->optimized_required == false)
+ if (!dc->optimized_required || dc->optimize_seamless_boot)
return true;
post_surface_trace(dc);
@@ -1195,18 +1240,60 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
return true;
}
-struct dc_state *dc_create_state(void)
+struct dc_state *dc_create_state(struct dc *dc)
{
struct dc_state *context = kzalloc(sizeof(struct dc_state),
GFP_KERNEL);
if (!context)
return NULL;
+ /* Each context must have their own instance of VBA and in order to
+ * initialize and obtain IP and SOC the base DML instance from DC is
+ * initially copied into every context
+ */
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
+#endif
kref_init(&context->refcount);
+
return context;
}
+struct dc_state *dc_copy_state(struct dc_state *src_ctx)
+{
+ int i, j;
+ struct dc_state *new_ctx = kzalloc(sizeof(struct dc_state),
+ GFP_KERNEL);
+
+ if (!new_ctx)
+ return NULL;
+
+ memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
+
+ if (cur_pipe->top_pipe)
+ cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
+
+ if (cur_pipe->bottom_pipe)
+ cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
+
+ }
+
+ for (i = 0; i < new_ctx->stream_count; i++) {
+ dc_stream_retain(new_ctx->streams[i]);
+ for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
+ dc_plane_state_retain(
+ new_ctx->stream_status[i].plane_states[j]);
+ }
+
+ kref_init(&new_ctx->refcount);
+
+ return new_ctx;
+}
+
void dc_retain_state(struct dc_state *context)
{
kref_get(&context->refcount);
@@ -1666,6 +1753,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
continue;
if (stream_update->dpms_off) {
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
if (*stream_update->dpms_off) {
core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE);
dc->hwss.optimize_bandwidth(dc, dc->current_state);
@@ -1673,6 +1761,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
dc->hwss.prepare_bandwidth(dc, dc->current_state);
core_link_enable_stream(dc->current_state, pipe_ctx);
}
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
}
if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
@@ -1700,7 +1789,16 @@ static void commit_planes_for_stream(struct dc *dc,
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
- if (update_type == UPDATE_TYPE_FULL) {
+ if (dc->optimize_seamless_boot && surface_count > 0) {
+ /* Optimize seamless boot flag keeps clocks and watermarks high until
+ * first flip. After first flip, optimization is required to lower
+ * bandwidth.
+ */
+ dc->optimize_seamless_boot = false;
+ dc->optimized_required = true;
+ }
+
+ if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) {
dc->hwss.prepare_bandwidth(dc, context);
context_clock_trace(dc, context);
}
@@ -1800,7 +1898,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (update_type >= UPDATE_TYPE_FULL) {
/* initialize scratch memory for building context */
- context = dc_create_state();
+ context = dc_create_state(dc);
if (context == NULL) {
DC_ERROR("Failed to allocate new validate context!\n");
return;
@@ -2099,13 +2197,13 @@ void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
{
- info->displayClock = (unsigned int)state->bw.dcn.clk.dispclk_khz;
- info->engineClock = (unsigned int)state->bw.dcn.clk.dcfclk_khz;
- info->memoryClock = (unsigned int)state->bw.dcn.clk.dramclk_khz;
- info->maxSupportedDppClock = (unsigned int)state->bw.dcn.clk.max_supported_dppclk_khz;
- info->dppClock = (unsigned int)state->bw.dcn.clk.dppclk_khz;
- info->socClock = (unsigned int)state->bw.dcn.clk.socclk_khz;
- info->dcfClockDeepSleep = (unsigned int)state->bw.dcn.clk.dcfclk_deep_sleep_khz;
- info->fClock = (unsigned int)state->bw.dcn.clk.fclk_khz;
- info->phyClock = (unsigned int)state->bw.dcn.clk.phyclk_khz;
+ info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
+ info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
+ info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
+ info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
+ info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
+ info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
+ info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
+ info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
+ info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 73d049506618..5903e7822f98 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -351,19 +351,19 @@ void context_clock_trace(
DC_LOGGER_INIT(dc->ctx->logger);
CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
- context->bw.dcn.clk.dispclk_khz,
- context->bw.dcn.clk.dppclk_khz,
- context->bw.dcn.clk.dcfclk_khz,
- context->bw.dcn.clk.dcfclk_deep_sleep_khz,
- context->bw.dcn.clk.fclk_khz,
- context->bw.dcn.clk.socclk_khz);
+ context->bw_ctx.bw.dcn.clk.dispclk_khz,
+ context->bw_ctx.bw.dcn.clk.dppclk_khz,
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz,
+ context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
+ context->bw_ctx.bw.dcn.clk.fclk_khz,
+ context->bw_ctx.bw.dcn.clk.socclk_khz);
CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
- context->bw.dcn.clk.dispclk_khz,
- context->bw.dcn.clk.dppclk_khz,
- context->bw.dcn.clk.dcfclk_khz,
- context->bw.dcn.clk.dcfclk_deep_sleep_khz,
- context->bw.dcn.clk.fclk_khz,
- context->bw.dcn.clk.socclk_khz);
+ context->bw_ctx.bw.dcn.clk.dispclk_khz,
+ context->bw_ctx.bw.dcn.clk.dppclk_khz,
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz,
+ context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
+ context->bw_ctx.bw.dcn.clk.fclk_khz,
+ context->bw_ctx.bw.dcn.clk.socclk_khz);
#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index ea18e9c2d8ce..b37ecc3ede61 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -58,7 +58,6 @@
******************************************************************************/
enum {
- LINK_RATE_REF_FREQ_IN_MHZ = 27,
PEAK_FACTOR_X1000 = 1006,
/*
* Some receivers fail to train on first try and are good
@@ -515,6 +514,40 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin
}
+static void read_edp_current_link_settings_on_detect(struct dc_link *link)
+{
+ union lane_count_set lane_count_set = { {0} };
+ uint8_t link_bw_set;
+ uint8_t link_rate_set;
+
+ // Read DPCD 00101h to find out the number of lanes currently set
+ core_link_read_dpcd(link, DP_LANE_COUNT_SET,
+ &lane_count_set.raw, sizeof(lane_count_set));
+ link->cur_link_settings.lane_count = lane_count_set.bits.LANE_COUNT_SET;
+
+ // Read DPCD 00100h to find if standard link rates are set
+ core_link_read_dpcd(link, DP_LINK_BW_SET,
+ &link_bw_set, sizeof(link_bw_set));
+
+ if (link_bw_set == 0) {
+ /* If standard link rates are not being used,
+ * Read DPCD 00115h to find the link rate set used
+ */
+ core_link_read_dpcd(link, DP_LINK_RATE_SET,
+ &link_rate_set, sizeof(link_rate_set));
+
+ if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+ link->cur_link_settings.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[link_rate_set];
+ link->cur_link_settings.link_rate_set = link_rate_set;
+ link->cur_link_settings.use_link_rate_set = true;
+ }
+ } else {
+ link->cur_link_settings.link_rate = link_bw_set;
+ link->cur_link_settings.use_link_rate_set = false;
+ }
+}
+
static bool detect_dp(
struct dc_link *link,
struct display_sink_capability *sink_caps,
@@ -640,7 +673,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
bool same_dpcd = true;
enum dc_connection_type new_connection_type = dc_connection_none;
DC_LOGGER_INIT(link->ctx->logger);
- if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
+
+ if (dc_is_virtual_signal(link->connector_signal))
return false;
if (false == dc_link_detect_sink(link, &new_connection_type)) {
@@ -648,9 +682,14 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
return false;
}
- if (link->connector_signal == SIGNAL_TYPE_EDP &&
- link->local_sink)
- return true;
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ /* On detect, we want to make sure current link settings are
+ * up to date, especially if link was powered on by GOP.
+ */
+ read_edp_current_link_settings_on_detect(link);
+ if (link->local_sink)
+ return true;
+ }
if (link->connector_signal == SIGNAL_TYPE_LVDS &&
link->local_sink)
@@ -720,9 +759,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
same_dpcd = false;
}
/* Active dongle plug in without display or downstream unplug*/
- if (link->type == dc_connection_active_dongle
- && link->dpcd_caps.sink_count.
- bits.SINK_COUNT == 0) {
+ if (link->type == dc_connection_active_dongle &&
+ link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
if (prev_sink != NULL) {
/* Downstream unplug */
dc_sink_release(prev_sink);
@@ -1172,8 +1210,6 @@ static bool construct(
goto create_fail;
}
-
-
/* TODO: #DAL3 Implement id to str function.*/
LINK_INFO("Connector[%d] description:"
"signal %d\n",
@@ -1207,7 +1243,7 @@ static bool construct(
link->link_enc = link->dc->res_pool->funcs->link_enc_create(
&enc_init_data);
- if( link->link_enc == NULL) {
+ if (link->link_enc == NULL) {
DC_ERROR("Failed to create link encoder!\n");
goto link_enc_create_fail;
}
@@ -1399,9 +1435,24 @@ static enum dc_status enable_link_dp(
/* get link settings for video mode timing */
decide_link_settings(stream, &link_settings);
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) {
+ /* If link settings are different than current and link already enabled
+ * then need to disable before programming to new rate.
+ */
+ if (link->link_status.link_active &&
+ (link->cur_link_settings.lane_count != link_settings.lane_count ||
+ link->cur_link_settings.link_rate != link_settings.link_rate)) {
+ dp_disable_link_phy(link, pipe_ctx->stream->signal);
+ }
+
+ /*in case it is not on*/
+ link->dc->hwss.edp_power_control(link, true);
+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+ }
+
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
- state->dccg->funcs->update_clocks(state->dccg, state, false);
+ state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
dp_enable_link_phy(
link,
@@ -1442,15 +1493,9 @@ static enum dc_status enable_link_edp(
struct pipe_ctx *pipe_ctx)
{
enum dc_status status;
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- /*in case it is not on*/
- link->dc->hwss.edp_power_control(link, true);
- link->dc->hwss.edp_wait_for_hpd_ready(link, true);
status = enable_link_dp(state, pipe_ctx);
-
return status;
}
@@ -1466,14 +1511,14 @@ static enum dc_status enable_link_dp_mst(
if (link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN)
return DC_OK;
+ /* clear payload table */
+ dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
+
/* to make sure the pending down rep can be processed
- * before clear payload table
+ * before enabling the link
*/
dm_helpers_dp_mst_poll_pending_down_reply(link->ctx, link);
- /* clear payload table */
- dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
-
/* set the sink to MST mode before enabling the link */
dp_enable_mst_on_sink(link, true);
@@ -1982,7 +2027,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream->signal,
stream->phy_pix_clk);
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
dal_ddc_service_read_scdc_data(link->ddc);
}
@@ -2074,11 +2119,28 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
}
}
+static uint32_t get_timing_pixel_clock_100hz(const struct dc_crtc_timing *timing)
+{
+
+ uint32_t pxl_clk = timing->pix_clk_100hz;
+
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ pxl_clk /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ pxl_clk = pxl_clk * 2 / 3;
+
+ if (timing->display_color_depth == COLOR_DEPTH_101010)
+ pxl_clk = pxl_clk * 10 / 8;
+ else if (timing->display_color_depth == COLOR_DEPTH_121212)
+ pxl_clk = pxl_clk * 12 / 8;
+
+ return pxl_clk;
+}
+
static bool dp_active_dongle_validate_timing(
const struct dc_crtc_timing *timing,
const struct dpcd_caps *dpcd_caps)
{
- unsigned int required_pix_clk_100hz = timing->pix_clk_100hz;
const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps;
switch (dpcd_caps->dongle_type) {
@@ -2115,13 +2177,6 @@ static bool dp_active_dongle_validate_timing(
return false;
}
-
- /* Check Color Depth and Pixel Clock */
- if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
- required_pix_clk_100hz /= 2;
- else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
- required_pix_clk_100hz = required_pix_clk_100hz * 2 / 3;
-
switch (timing->display_color_depth) {
case COLOR_DEPTH_666:
case COLOR_DEPTH_888:
@@ -2130,14 +2185,11 @@ static bool dp_active_dongle_validate_timing(
case COLOR_DEPTH_101010:
if (dongle_caps->dp_hdmi_max_bpc < 10)
return false;
- required_pix_clk_100hz = required_pix_clk_100hz * 10 / 8;
break;
case COLOR_DEPTH_121212:
if (dongle_caps->dp_hdmi_max_bpc < 12)
return false;
- required_pix_clk_100hz = required_pix_clk_100hz * 12 / 8;
break;
-
case COLOR_DEPTH_141414:
case COLOR_DEPTH_161616:
default:
@@ -2145,7 +2197,7 @@ static bool dp_active_dongle_validate_timing(
return false;
}
- if (required_pix_clk_100hz > (dongle_caps->dp_hdmi_max_pixel_clk * 10))
+ if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
return false;
return true;
@@ -2166,7 +2218,7 @@ enum dc_status dc_link_validate_mode_timing(
return DC_OK;
/* Passive Dongle */
- if (0 != max_pix_clk && timing->pix_clk_100hz > max_pix_clk)
+ if (max_pix_clk != 0 && get_timing_pixel_clock_100hz(timing) > max_pix_clk)
return DC_EXCEED_DONGLE_CAP;
/* Active Dongle*/
@@ -2284,14 +2336,13 @@ void core_link_resume(struct dc_link *link)
static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
{
- struct dc_link_settings *link_settings =
- &stream->link->cur_link_settings;
- uint32_t link_rate_in_mbps =
- link_settings->link_rate * LINK_RATE_REF_FREQ_IN_MHZ;
- struct fixed31_32 mbps = dc_fixpt_from_int(
- link_rate_in_mbps * link_settings->lane_count);
-
- return dc_fixpt_div_int(mbps, 54);
+ struct fixed31_32 mbytes_per_sec;
+ uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link, &stream->link->cur_link_settings);
+ link_rate_in_mbytes_per_sec /= 8000; /* Kbits to MBytes */
+
+ mbytes_per_sec = dc_fixpt_from_int(link_rate_in_mbytes_per_sec);
+
+ return dc_fixpt_div_int(mbytes_per_sec, 54);
}
static int get_color_depth(enum dc_color_depth color_depth)
@@ -2316,7 +2367,7 @@ static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
uint32_t denominator;
bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth);
- kbps = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10 * bpc * 3;
+ kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing);
/*
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
@@ -2551,12 +2602,12 @@ void core_link_enable_stream(
struct dc_state *state,
struct pipe_ctx *pipe_ctx)
{
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
enum dc_status status;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
- if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL) {
+ if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) {
stream->link->link_enc->funcs->setup(
stream->link->link_enc,
pipe_ctx->stream->signal);
@@ -2570,9 +2621,10 @@ void core_link_enable_stream(
pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(
pipe_ctx->stream_res.stream_enc,
&stream->timing,
- stream->output_color_space);
+ stream->output_color_space,
+ stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->hdmi_set_stream_attribute(
pipe_ctx->stream_res.stream_enc,
&stream->timing,
@@ -2736,3 +2788,49 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
}
}
+uint32_t dc_bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t bits_per_channel = 0;
+ uint32_t kbps;
+
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ bits_per_channel = 6;
+ break;
+ case COLOR_DEPTH_888:
+ bits_per_channel = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bits_per_channel = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bits_per_channel = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bits_per_channel = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bits_per_channel = 16;
+ break;
+ default:
+ break;
+ }
+
+ ASSERT(bits_per_channel != 0);
+
+ kbps = timing->pix_clk_100hz / 10;
+ kbps *= bits_per_channel;
+
+ if (timing->flags.Y_ONLY != 1) {
+ /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
+ kbps *= 3;
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ kbps /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ kbps = kbps * 2 / 3;
+ }
+
+ return kbps;
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index b7ee63cd8dc7..f02092a0dc76 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -573,12 +573,28 @@ bool dal_ddc_service_query_ddc_data(
return ret;
}
-int dc_link_aux_transfer(struct ddc_service *ddc,
- struct aux_payload *payload)
+/* dc_link_aux_transfer_raw() - Attempt to transfer
+ * the given aux payload. This function does not perform
+ * retries or handle error states. The reply is returned
+ * in the payload->reply and the result through
+ * *operation_result. Returns the number of bytes transferred,
+ * or -1 on a failure.
+ */
+int dc_link_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_channel_operation_result *operation_result)
{
- return dce_aux_transfer(ddc, payload);
+ return dce_aux_transfer_raw(ddc, payload, operation_result);
}
+/* dc_link_aux_transfer_with_retries() - Attempt to submit an
+ * aux payload, retrying on timeouts, defers, and busy states
+ * as outlined in the DP spec. Returns true if the request
+ * was successful.
+ *
+ * Unless you want to implement your own retry semantics, this
+ * is probably the one you want.
+ */
bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *payload)
{
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 09d301216076..1ee544a32ebb 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -93,12 +93,10 @@ static void dpcd_set_link_settings(
struct dc_link *link,
const struct link_training_settings *lt_settings)
{
- uint8_t rate = (uint8_t)
- (lt_settings->link_settings.link_rate);
+ uint8_t rate;
union down_spread_ctrl downspread = { {0} };
union lane_count_set lane_count_set = { {0} };
- uint8_t link_set_buffer[2];
downspread.raw = (uint8_t)
(lt_settings->link_settings.link_spread);
@@ -111,29 +109,42 @@ static void dpcd_set_link_settings(
lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
- link_set_buffer[0] = rate;
- link_set_buffer[1] = lane_count_set.raw;
-
- core_link_write_dpcd(link, DP_LINK_BW_SET,
- link_set_buffer, 2);
core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
&downspread.raw, sizeof(downspread));
+ core_link_write_dpcd(link, DP_LANE_COUNT_SET,
+ &lane_count_set.raw, 1);
+
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
- (link->dpcd_caps.link_rate_set >= 1 &&
- link->dpcd_caps.link_rate_set <= 8)) {
+ lt_settings->link_settings.use_link_rate_set == true) {
+ rate = 0;
+ core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
core_link_write_dpcd(link, DP_LINK_RATE_SET,
- &link->dpcd_caps.link_rate_set, 1);
+ &lt_settings->link_settings.link_rate_set, 1);
+ } else {
+ rate = (uint8_t) (lt_settings->link_settings.link_rate);
+ core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
}
- DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
- __func__,
- DP_LINK_BW_SET,
- lt_settings->link_settings.link_rate,
- DP_LANE_COUNT_SET,
- lt_settings->link_settings.lane_count,
- DP_DOWNSPREAD_CTRL,
- lt_settings->link_settings.link_spread);
+ if (rate) {
+ DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
+ __func__,
+ DP_LINK_BW_SET,
+ lt_settings->link_settings.link_rate,
+ DP_LANE_COUNT_SET,
+ lt_settings->link_settings.lane_count,
+ DP_DOWNSPREAD_CTRL,
+ lt_settings->link_settings.link_spread);
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s\n %x rate set = %x\n %x lane = %x\n %x spread = %x\n",
+ __func__,
+ DP_LINK_RATE_SET,
+ lt_settings->link_settings.link_rate_set,
+ DP_LANE_COUNT_SET,
+ lt_settings->link_settings.lane_count,
+ DP_DOWNSPREAD_CTRL,
+ lt_settings->link_settings.link_spread);
+ }
}
@@ -952,6 +963,8 @@ enum link_training_result dc_link_dp_perform_link_training(
lt_settings.link_settings.link_rate = link_setting->link_rate;
lt_settings.link_settings.lane_count = link_setting->lane_count;
+ lt_settings.link_settings.use_link_rate_set = link_setting->use_link_rate_set;
+ lt_settings.link_settings.link_rate_set = link_setting->link_rate_set;
/*@todo[vdevulap] move SS to LS, should not be handled by displaypath*/
@@ -1075,7 +1088,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
{
/* Set Default link settings */
struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
- LINK_SPREAD_05_DOWNSPREAD_30KHZ};
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
/* Higher link settings based on feature supported */
if (link->link_enc->features.flags.bits.IS_HBR2_CAPABLE)
@@ -1520,69 +1533,6 @@ static bool decide_fallback_link_setting(
return true;
}
-static uint32_t bandwidth_in_kbps_from_timing(
- const struct dc_crtc_timing *timing)
-{
- uint32_t bits_per_channel = 0;
- uint32_t kbps;
-
- switch (timing->display_color_depth) {
- case COLOR_DEPTH_666:
- bits_per_channel = 6;
- break;
- case COLOR_DEPTH_888:
- bits_per_channel = 8;
- break;
- case COLOR_DEPTH_101010:
- bits_per_channel = 10;
- break;
- case COLOR_DEPTH_121212:
- bits_per_channel = 12;
- break;
- case COLOR_DEPTH_141414:
- bits_per_channel = 14;
- break;
- case COLOR_DEPTH_161616:
- bits_per_channel = 16;
- break;
- default:
- break;
- }
-
- ASSERT(bits_per_channel != 0);
-
- kbps = timing->pix_clk_100hz / 10;
- kbps *= bits_per_channel;
-
- if (timing->flags.Y_ONLY != 1) {
- /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
- kbps *= 3;
- if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
- kbps /= 2;
- else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
- kbps = kbps * 2 / 3;
- }
-
- return kbps;
-
-}
-
-static uint32_t bandwidth_in_kbps_from_link_settings(
- const struct dc_link_settings *link_setting)
-{
- uint32_t link_rate_in_kbps = link_setting->link_rate *
- LINK_RATE_REF_FREQ_IN_KHZ;
-
- uint32_t lane_count = link_setting->lane_count;
- uint32_t kbps = link_rate_in_kbps;
-
- kbps *= lane_count;
- kbps *= 8; /* 8 bits per byte*/
-
- return kbps;
-
-}
-
bool dp_validate_mode_timing(
struct dc_link *link,
const struct dc_crtc_timing *timing)
@@ -1598,8 +1548,7 @@ bool dp_validate_mode_timing(
timing->v_addressable == (uint32_t) 480)
return true;
- /* We always use verified link settings */
- link_setting = &link->verified_link_cap;
+ link_setting = dc_link_get_link_cap(link);
/* TODO: DYNAMIC_VALIDATION needs to be implemented */
/*if (flags.DYNAMIC_VALIDATION == 1 &&
@@ -1607,8 +1556,8 @@ bool dp_validate_mode_timing(
link_setting = &link->verified_link_cap;
*/
- req_bw = bandwidth_in_kbps_from_timing(timing);
- max_bw = bandwidth_in_kbps_from_link_settings(link_setting);
+ req_bw = dc_bandwidth_in_kbps_from_timing(timing);
+ max_bw = dc_link_bandwidth_kbps(link, link_setting);
if (req_bw <= max_bw) {
/* remember the biggest mode here, during
@@ -1629,58 +1578,78 @@ bool dp_validate_mode_timing(
return false;
}
-void decide_link_settings(struct dc_stream_state *stream,
- struct dc_link_settings *link_setting)
+static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
{
-
struct dc_link_settings initial_link_setting = {
- LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED};
+ LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED, false, 0};
struct dc_link_settings current_link_setting =
initial_link_setting;
- struct dc_link *link;
- uint32_t req_bw;
uint32_t link_bw;
- req_bw = bandwidth_in_kbps_from_timing(&stream->timing);
-
- link = stream->link;
-
- /* if preferred is specified through AMDDP, use it, if it's enough
- * to drive the mode
+ /* search for the minimum link setting that:
+ * 1. is supported according to the link training result
+ * 2. could support the b/w requested by the timing
*/
- if (link->preferred_link_setting.lane_count !=
- LANE_COUNT_UNKNOWN &&
- link->preferred_link_setting.link_rate !=
- LINK_RATE_UNKNOWN) {
- *link_setting = link->preferred_link_setting;
- return;
- }
+ while (current_link_setting.link_rate <=
+ link->verified_link_cap.link_rate) {
+ link_bw = dc_link_bandwidth_kbps(
+ link,
+ &current_link_setting);
+ if (req_bw <= link_bw) {
+ *link_setting = current_link_setting;
+ return true;
+ }
- /* MST doesn't perform link training for now
- * TODO: add MST specific link training routine
- */
- if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- *link_setting = link->verified_link_cap;
- return;
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ } else {
+ current_link_setting.link_rate =
+ increase_link_rate(
+ current_link_setting.link_rate);
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ }
}
- /* EDP use the link cap setting */
- if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ return false;
+}
+
+static bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
+{
+ struct dc_link_settings initial_link_setting;
+ struct dc_link_settings current_link_setting;
+ uint32_t link_bw;
+
+ if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14 ||
+ link->dpcd_caps.edp_supported_link_rates_count == 0 ||
+ link->dc->config.optimize_edp_link_rate == false) {
*link_setting = link->verified_link_cap;
- return;
+ return true;
}
+ memset(&initial_link_setting, 0, sizeof(initial_link_setting));
+ initial_link_setting.lane_count = LANE_COUNT_ONE;
+ initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0];
+ initial_link_setting.link_spread = LINK_SPREAD_DISABLED;
+ initial_link_setting.use_link_rate_set = true;
+ initial_link_setting.link_rate_set = 0;
+ current_link_setting = initial_link_setting;
+
/* search for the minimum link setting that:
* 1. is supported according to the link training result
* 2. could support the b/w requested by the timing
*/
while (current_link_setting.link_rate <=
link->verified_link_cap.link_rate) {
- link_bw = bandwidth_in_kbps_from_link_settings(
+ link_bw = dc_link_bandwidth_kbps(
+ link,
&current_link_setting);
if (req_bw <= link_bw) {
*link_setting = current_link_setting;
- return;
+ return true;
}
if (current_link_setting.lane_count <
@@ -1689,13 +1658,53 @@ void decide_link_settings(struct dc_stream_state *stream,
increase_lane_count(
current_link_setting.lane_count);
} else {
- current_link_setting.link_rate =
- increase_link_rate(
- current_link_setting.link_rate);
- current_link_setting.lane_count =
- initial_link_setting.lane_count;
+ if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+ current_link_setting.link_rate_set++;
+ current_link_setting.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ } else
+ break;
}
}
+ return false;
+}
+
+void decide_link_settings(struct dc_stream_state *stream,
+ struct dc_link_settings *link_setting)
+{
+ struct dc_link *link;
+ uint32_t req_bw;
+
+ req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+
+ link = stream->link;
+
+ /* if preferred is specified through AMDDP, use it, if it's enough
+ * to drive the mode
+ */
+ if (link->preferred_link_setting.lane_count !=
+ LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate !=
+ LINK_RATE_UNKNOWN) {
+ *link_setting = link->preferred_link_setting;
+ return;
+ }
+
+ /* MST doesn't perform link training for now
+ * TODO: add MST specific link training routine
+ */
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ *link_setting = link->verified_link_cap;
+ return;
+ }
+
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ if (decide_edp_link_settings(link, link_setting, req_bw))
+ return;
+ } else if (decide_dp_link_settings(link, link_setting, req_bw))
+ return;
BREAK_TO_DEBUGGER();
ASSERT(link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN);
@@ -2155,11 +2164,7 @@ bool is_mst_supported(struct dc_link *link)
bool is_dp_active_dongle(const struct dc_link *link)
{
- enum display_dongle_type dongle_type = link->dpcd_caps.dongle_type;
-
- return (dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) ||
- (dongle_type == DISPLAY_DONGLE_DP_DVI_CONVERTER) ||
- (dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER);
+ return link->dpcd_caps.is_branch_dev;
}
static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)
@@ -2180,6 +2185,30 @@ static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)
return -1;
}
+static void read_dp_device_vendor_id(struct dc_link *link)
+{
+ struct dp_device_vendor_id dp_id;
+
+ /* read IEEE branch device id */
+ core_link_read_dpcd(
+ link,
+ DP_BRANCH_OUI,
+ (uint8_t *)&dp_id,
+ sizeof(dp_id));
+
+ link->dpcd_caps.branch_dev_id =
+ (dp_id.ieee_oui[0] << 16) +
+ (dp_id.ieee_oui[1] << 8) +
+ dp_id.ieee_oui[2];
+
+ memmove(
+ link->dpcd_caps.branch_dev_name,
+ dp_id.ieee_device_id,
+ sizeof(dp_id.ieee_device_id));
+}
+
+
+
static void get_active_converter_info(
uint8_t data, struct dc_link *link)
{
@@ -2193,6 +2222,9 @@ static void get_active_converter_info(
return;
}
+ /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */
+ link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
+
switch (ds_port.fields.PORT_TYPE) {
case DOWNSTREAM_VGA:
link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER;
@@ -2234,8 +2266,8 @@ static void get_active_converter_info(
hdmi_caps = {.raw = det_caps[3] };
union dwnstream_port_caps_byte2
hdmi_color_caps = {.raw = det_caps[2] };
- link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk =
- det_caps[1] * 25000;
+ link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz =
+ det_caps[1] * 2500;
link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter =
hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK;
@@ -2252,7 +2284,7 @@ static void get_active_converter_info(
translate_dpcd_max_bpc(
hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
- if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk != 0)
+ if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0)
link->dpcd_caps.dongle_caps.extendedCapValid = true;
}
@@ -2263,27 +2295,6 @@ static void get_active_converter_info(
ddc_service_set_dongle_type(link->ddc, link->dpcd_caps.dongle_type);
{
- struct dp_device_vendor_id dp_id;
-
- /* read IEEE branch device id */
- core_link_read_dpcd(
- link,
- DP_BRANCH_OUI,
- (uint8_t *)&dp_id,
- sizeof(dp_id));
-
- link->dpcd_caps.branch_dev_id =
- (dp_id.ieee_oui[0] << 16) +
- (dp_id.ieee_oui[1] << 8) +
- dp_id.ieee_oui[2];
-
- memmove(
- link->dpcd_caps.branch_dev_name,
- dp_id.ieee_device_id,
- sizeof(dp_id.ieee_device_id));
- }
-
- {
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
core_link_read_dpcd(
@@ -2347,6 +2358,10 @@ static bool retrieve_link_cap(struct dc_link *link)
{
uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1];
+ /*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST.
+ */
+ uint8_t dpcd_dprx_data = '\0';
+
struct dp_device_vendor_id sink_id;
union down_stream_port_count down_strm_port_count;
union edp_configuration_cap edp_config_cap;
@@ -2383,7 +2398,10 @@ static bool retrieve_link_cap(struct dc_link *link)
aux_rd_interval.raw =
dpcd_data[DP_TRAINING_AUX_RD_INTERVAL];
- if (aux_rd_interval.bits.EXT_RECIEVER_CAP_FIELD_PRESENT == 1) {
+ link->dpcd_caps.ext_receiver_cap_field_present =
+ aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1 ? true:false;
+
+ if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) {
uint8_t ext_cap_data[16];
memset(ext_cap_data, '\0', sizeof(ext_cap_data));
@@ -2404,11 +2422,44 @@ static bool retrieve_link_cap(struct dc_link *link)
}
link->dpcd_caps.dpcd_rev.raw =
- dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
+ dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
+
+ if (link->dpcd_caps.dpcd_rev.raw >= 0x14) {
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(
+ link,
+ DP_DPRX_FEATURE_ENUMERATION_LIST,
+ &dpcd_dprx_data,
+ sizeof(dpcd_dprx_data));
+ if (status == DC_OK)
+ break;
+ }
+
+ link->dpcd_caps.dprx_feature.raw = dpcd_dprx_data;
+
+ if (status != DC_OK)
+ dm_error("%s: Read DPRX caps data failed.\n", __func__);
+ }
+
+ else {
+ link->dpcd_caps.dprx_feature.raw = 0;
+ }
+
+
+ /* Error condition checking...
+ * It is impossible for Sink to report Max Lane Count = 0.
+ * It is possible for Sink to report Max Link Rate = 0, if it is
+ * an eDP device that is reporting specialized link rates in the
+ * SUPPORTED_LINK_RATE table.
+ */
+ if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0)
+ return false;
ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
DP_DPCD_REV];
+ read_dp_device_vendor_id(link);
+
get_active_converter_info(ds_port.byte, link);
dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data));
@@ -2536,31 +2587,31 @@ enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz)
void detect_edp_sink_caps(struct dc_link *link)
{
- uint8_t supported_link_rates[16] = {0};
+ uint8_t supported_link_rates[16];
uint32_t entry;
uint32_t link_rate_in_khz;
enum dc_link_rate link_rate = LINK_RATE_UNKNOWN;
retrieve_link_cap(link);
+ link->dpcd_caps.edp_supported_link_rates_count = 0;
+ memset(supported_link_rates, 0, sizeof(supported_link_rates));
- if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14) {
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
+ link->dc->config.optimize_edp_link_rate) {
// Read DPCD 00010h - 0001Fh 16 bytes at one shot
core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
supported_link_rates, sizeof(supported_link_rates));
- link->dpcd_caps.link_rate_set = 0;
for (entry = 0; entry < 16; entry += 2) {
// DPCD register reports per-lane link rate = 16-bit link rate capability
- // value X 200 kHz. Need multipler to find link rate in kHz.
+ // value X 200 kHz. Need multiplier to find link rate in kHz.
link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 +
supported_link_rates[entry]) * 200;
if (link_rate_in_khz != 0) {
link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz);
- if (link->reported_link_cap.link_rate < link_rate) {
- link->reported_link_cap.link_rate = link_rate;
- link->dpcd_caps.link_rate_set = entry;
- }
+ link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate;
+ link->dpcd_caps.edp_supported_link_rates_count++;
}
}
}
@@ -2601,6 +2652,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
enum dc_color_depth color_depth = pipe_ctx->
stream->timing.display_color_depth;
struct bit_depth_reduction_params params;
+ struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
memset(&params, 0, sizeof(params));
@@ -2640,8 +2692,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
{
/* disable bit depth reduction */
pipe_ctx->stream->bit_depth_params = params;
- pipe_ctx->stream_res.opp->funcs->
- opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
+ opp->funcs->opp_program_bit_depth_reduction(opp, &params);
if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
controller_test_pattern, color_depth);
@@ -2650,11 +2701,9 @@ static void set_crtc_test_pattern(struct dc_link *link,
case DP_TEST_PATTERN_VIDEO_MODE:
{
/* restore bitdepth reduction */
- resource_build_bit_depth_reduction_params(pipe_ctx->stream,
- &params);
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream, &params);
pipe_ctx->stream->bit_depth_params = params;
- pipe_ctx->stream_res.opp->funcs->
- opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
+ opp->funcs->opp_program_bit_depth_reduction(opp, &params);
if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index f7f7515f65f4..b0dea759cd86 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -58,6 +58,8 @@ void dp_enable_link_phy(
const struct dc_link_settings *link_settings)
{
struct link_encoder *link_enc = link->link_enc;
+ struct dc *core_dc = link->ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
struct pipe_ctx *pipes =
link->dc->current_state->res_ctx.pipe_ctx;
@@ -84,6 +86,9 @@ void dp_enable_link_phy(
}
}
+ if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->lock_phy(dmcu);
+
if (dc_is_dp_sst_signal(signal)) {
link_enc->funcs->enable_dp_output(
link_enc,
@@ -95,6 +100,10 @@ void dp_enable_link_phy(
link_settings,
clock_source);
}
+
+ if (dmcu != NULL && dmcu->funcs->unlock_phy)
+ dmcu->funcs->unlock_phy(dmcu);
+
link->cur_link_settings = *link_settings;
dp_receiver_power_ctrl(link, true);
@@ -150,15 +159,25 @@ bool edp_receiver_ready_T7(struct dc_link *link)
void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
{
+ struct dc *core_dc = link->ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
if (!link->wa_flags.dp_keep_receiver_powered)
dp_receiver_power_ctrl(link, false);
if (signal == SIGNAL_TYPE_EDP) {
link->link_enc->funcs->disable_output(link->link_enc, signal);
link->dc->hwss.edp_power_control(link, false);
- } else
+ } else {
+ if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->lock_phy(dmcu);
+
link->link_enc->funcs->disable_output(link->link_enc, signal);
+ if (dmcu != NULL && dmcu->funcs->unlock_phy)
+ dmcu->funcs->unlock_phy(dmcu);
+ }
+
/* Clear current link setting.*/
memset(&link->cur_link_settings, 0,
sizeof(link->cur_link_settings));
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 349ab8017776..eac7186e4f08 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -31,6 +31,8 @@
#include "opp.h"
#include "timing_generator.h"
#include "transform.h"
+#include "dccg.h"
+#include "dchubbub.h"
#include "dpp.h"
#include "core_types.h"
#include "set_mode_types.h"
@@ -104,44 +106,43 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
return dc_version;
}
-struct resource_pool *dc_create_resource_pool(
- struct dc *dc,
- int num_virtual_links,
- enum dce_version dc_version,
- struct hw_asic_id asic_id)
+struct resource_pool *dc_create_resource_pool(struct dc *dc,
+ const struct dc_init_data *init_data,
+ enum dce_version dc_version)
{
struct resource_pool *res_pool = NULL;
switch (dc_version) {
case DCE_VERSION_8_0:
res_pool = dce80_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
case DCE_VERSION_8_1:
res_pool = dce81_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
case DCE_VERSION_8_3:
res_pool = dce83_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
case DCE_VERSION_10_0:
res_pool = dce100_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
case DCE_VERSION_11_0:
res_pool = dce110_create_resource_pool(
- num_virtual_links, dc, asic_id);
+ init_data->num_virtual_links, dc,
+ init_data->asic_id);
break;
case DCE_VERSION_11_2:
case DCE_VERSION_11_22:
res_pool = dce112_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
case DCE_VERSION_12_0:
case DCE_VERSION_12_1:
res_pool = dce120_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
@@ -149,8 +150,7 @@ struct resource_pool *dc_create_resource_pool(
#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
case DCN_VERSION_1_01:
#endif
- res_pool = dcn10_create_resource_pool(
- num_virtual_links, dc);
+ res_pool = dcn10_create_resource_pool(init_data, dc);
break;
#endif
@@ -163,7 +163,28 @@ struct resource_pool *dc_create_resource_pool(
if (dc->ctx->dc_bios->funcs->get_firmware_info(
dc->ctx->dc_bios, &fw_info) == BP_RESULT_OK) {
- res_pool->ref_clock_inKhz = fw_info.pll_info.crystal_frequency;
+ res_pool->ref_clocks.xtalin_clock_inKhz = fw_info.pll_info.crystal_frequency;
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ // On FPGA these dividers are currently not configured by GDB
+ res_pool->ref_clocks.dccg_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz;
+ res_pool->ref_clocks.dchub_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz;
+ } else if (res_pool->dccg && res_pool->hubbub) {
+ // If DCCG reference frequency cannot be determined (usually means not set to xtalin) then this is a critical error
+ // as this value must be known for DCHUB programming
+ (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
+ fw_info.pll_info.crystal_frequency,
+ &res_pool->ref_clocks.dccg_ref_clock_inKhz);
+
+ // Similarly, if DCHUB reference frequency cannot be determined, then it is also a critical error
+ (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
+ res_pool->ref_clocks.dccg_ref_clock_inKhz,
+ &res_pool->ref_clocks.dchub_ref_clock_inKhz);
+ } else {
+ // Not all ASICs have DCCG sw component
+ res_pool->ref_clocks.dccg_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz;
+ res_pool->ref_clocks.dchub_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz;
+ }
} else
ASSERT_CRITICAL(false);
}
@@ -260,6 +281,7 @@ bool resource_construct(
pool->stream_enc_count++;
}
}
+
dc->caps.dynamic_audio = false;
if (pool->audio_count < pool->stream_enc_count) {
dc->caps.dynamic_audio = true;
@@ -1014,24 +1036,60 @@ enum dc_status resource_build_scaling_params_for_context(
struct pipe_ctx *find_idle_secondary_pipe(
struct resource_context *res_ctx,
- const struct resource_pool *pool)
+ const struct resource_pool *pool,
+ const struct pipe_ctx *primary_pipe)
{
int i;
struct pipe_ctx *secondary_pipe = NULL;
/*
- * search backwards for the second pipe to keep pipe
- * assignment more consistent
+ * We add a preferred pipe mapping to avoid the chance that
+ * MPCCs already in use will need to be reassigned to other trees.
+ * For example, if we went with the strict, assign backwards logic:
+ *
+ * (State 1)
+ * Display A on, no surface, top pipe = 0
+ * Display B on, no surface, top pipe = 1
+ *
+ * (State 2)
+ * Display A on, no surface, top pipe = 0
+ * Display B on, surface enable, top pipe = 1, bottom pipe = 5
+ *
+ * (State 3)
+ * Display A on, surface enable, top pipe = 0, bottom pipe = 5
+ * Display B on, surface enable, top pipe = 1, bottom pipe = 4
+ *
+ * The state 2->3 transition requires remapping MPCC 5 from display B
+ * to display A.
+ *
+ * However, with the preferred pipe logic, state 2 would look like:
+ *
+ * (State 2)
+ * Display A on, no surface, top pipe = 0
+ * Display B on, surface enable, top pipe = 1, bottom pipe = 4
+ *
+ * This would then cause 2->3 to not require remapping any MPCCs.
*/
-
- for (i = pool->pipe_count - 1; i >= 0; i--) {
- if (res_ctx->pipe_ctx[i].stream == NULL) {
- secondary_pipe = &res_ctx->pipe_ctx[i];
- secondary_pipe->pipe_idx = i;
- break;
+ if (primary_pipe) {
+ int preferred_pipe_idx = (pool->pipe_count - 1) - primary_pipe->pipe_idx;
+ if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
+ secondary_pipe->pipe_idx = preferred_pipe_idx;
}
}
+ /*
+ * search backwards for the second pipe to keep pipe
+ * assignment more consistent
+ */
+ if (!secondary_pipe)
+ for (i = pool->pipe_count - 1; i >= 0; i--) {
+ if (res_ctx->pipe_ctx[i].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[i];
+ secondary_pipe->pipe_idx = i;
+ break;
+ }
+ }
return secondary_pipe;
}
@@ -1214,6 +1272,9 @@ bool dc_add_plane_to_context(
free_pipe->clock_source = tail_pipe->clock_source;
free_pipe->top_pipe = tail_pipe;
tail_pipe->bottom_pipe = free_pipe;
+ } else if (free_pipe->bottom_pipe && free_pipe->bottom_pipe->plane_state == NULL) {
+ ASSERT(free_pipe->bottom_pipe->stream_res.opp != free_pipe->stream_res.opp);
+ free_pipe->bottom_pipe->plane_state = plane_state;
}
/* assign new surfaces*/
@@ -1224,6 +1285,35 @@ bool dc_add_plane_to_context(
return true;
}
+struct pipe_ctx *dc_res_get_odm_bottom_pipe(struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *bottom_pipe = pipe_ctx->bottom_pipe;
+
+ /* ODM should only be updated once per otg */
+ if (pipe_ctx->top_pipe)
+ return NULL;
+
+ while (bottom_pipe) {
+ if (bottom_pipe->stream_res.opp != pipe_ctx->stream_res.opp)
+ break;
+ bottom_pipe = bottom_pipe->bottom_pipe;
+ }
+
+ return bottom_pipe;
+}
+
+bool dc_res_is_odm_head_pipe(struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *top_pipe = pipe_ctx->top_pipe;
+
+ if (!top_pipe)
+ return false;
+ if (top_pipe && top_pipe->stream_res.opp == pipe_ctx->stream_res.opp)
+ return false;
+
+ return true;
+}
+
bool dc_remove_plane_from_context(
const struct dc *dc,
struct dc_stream_state *stream,
@@ -1247,10 +1337,14 @@ bool dc_remove_plane_from_context(
/* release pipe for plane*/
for (i = pool->pipe_count - 1; i >= 0; i--) {
- struct pipe_ctx *pipe_ctx;
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- if (context->res_ctx.pipe_ctx[i].plane_state == plane_state) {
- pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->plane_state == plane_state) {
+ if (dc_res_is_odm_head_pipe(pipe_ctx)) {
+ pipe_ctx->plane_state = NULL;
+ pipe_ctx->bottom_pipe = NULL;
+ continue;
+ }
if (pipe_ctx->top_pipe)
pipe_ctx->top_pipe->bottom_pipe = pipe_ctx->bottom_pipe;
@@ -1268,8 +1362,9 @@ bool dc_remove_plane_from_context(
*/
if (!pipe_ctx->top_pipe) {
pipe_ctx->plane_state = NULL;
- pipe_ctx->bottom_pipe = NULL;
- } else {
+ if (!dc_res_get_odm_bottom_pipe(pipe_ctx))
+ pipe_ctx->bottom_pipe = NULL;
+ } else {
memset(pipe_ctx, 0, sizeof(*pipe_ctx));
}
}
@@ -1674,6 +1769,9 @@ enum dc_status dc_remove_stream_from_ctx(
for (i = 0; i < MAX_PIPES; i++) {
if (new_ctx->res_ctx.pipe_ctx[i].stream == stream &&
!new_ctx->res_ctx.pipe_ctx[i].top_pipe) {
+ struct pipe_ctx *odm_pipe =
+ dc_res_get_odm_bottom_pipe(&new_ctx->res_ctx.pipe_ctx[i]);
+
del_pipe = &new_ctx->res_ctx.pipe_ctx[i];
ASSERT(del_pipe->stream_res.stream_enc);
@@ -1698,6 +1796,8 @@ enum dc_status dc_remove_stream_from_ctx(
dc->res_pool->funcs->remove_stream_from_ctx(dc, new_ctx, stream);
memset(del_pipe, 0, sizeof(*del_pipe));
+ if (odm_pipe)
+ memset(odm_pipe, 0, sizeof(*odm_pipe));
break;
}
@@ -1855,6 +1955,7 @@ enum dc_status resource_map_pool_resources(
struct dc_context *dc_ctx = dc->ctx;
struct pipe_ctx *pipe_ctx = NULL;
int pipe_idx = -1;
+ struct dc_bios *dcb = dc->ctx->dc_bios;
/* TODO Check if this is needed */
/*if (!resource_is_stream_unchanged(old_context, stream)) {
@@ -1869,6 +1970,13 @@ enum dc_status resource_map_pool_resources(
calculate_phy_pix_clks(stream);
+ /* TODO: Check Linux */
+ if (dc->config.allow_seamless_boot_optimization &&
+ !dcb->funcs->is_accelerated_mode(dcb)) {
+ if (dc_validate_seamless_boot_timing(dc, stream->sink, &stream->timing))
+ stream->apply_seamless_boot_optimization = true;
+ }
+
if (stream->apply_seamless_boot_optimization)
pipe_idx = acquire_resource_from_hw_enabled_state(
&context->res_ctx,
@@ -1951,7 +2059,7 @@ void dc_resource_state_construct(
const struct dc *dc,
struct dc_state *dst_ctx)
{
- dst_ctx->dccg = dc->res_pool->clk_mgr;
+ dst_ctx->clk_mgr = dc->res_pool->clk_mgr;
}
/**
@@ -1959,12 +2067,14 @@ void dc_resource_state_construct(
* Checks HW resource availability and bandwidth requirement.
* @dc: dc struct for this driver
* @new_ctx: state to be validated
+ * @fast_validate: set to true if only yes/no to support matters
*
* Return: DC_OK if the result can be programmed. Otherwise, an error code.
*/
enum dc_status dc_validate_global_state(
struct dc *dc,
- struct dc_state *new_ctx)
+ struct dc_state *new_ctx,
+ bool fast_validate)
{
enum dc_status result = DC_ERROR_UNEXPECTED;
int i, j;
@@ -2019,7 +2129,7 @@ enum dc_status dc_validate_global_state(
result = resource_build_scaling_params_for_context(dc, new_ctx);
if (result == DC_OK)
- if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx))
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
result = DC_FAIL_BANDWIDTH_VALIDATE;
return result;
@@ -2315,6 +2425,21 @@ static void set_spd_info_packet(
*info_packet = stream->vrr_infopacket;
}
+static void set_dp_sdp_info_packet(
+ struct dc_info_packet *info_packet,
+ struct dc_stream_state *stream)
+{
+ /* SPD info packet for custom sdp message */
+
+ /* Return if false. If true,
+ * set the corresponding bit in the info packet
+ */
+ if (!stream->dpsdp_infopacket.valid)
+ return;
+
+ *info_packet = stream->dpsdp_infopacket;
+}
+
static void set_hdr_static_info_packet(
struct dc_info_packet *info_packet,
struct dc_stream_state *stream)
@@ -2411,6 +2536,7 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
info->spd.valid = false;
info->hdrsmd.valid = false;
info->vsc.valid = false;
+ info->dpsdp.valid = false;
signal = pipe_ctx->stream->signal;
@@ -2430,6 +2556,8 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
set_spd_info_packet(&info->spd, pipe_ctx->stream);
set_hdr_static_info_packet(&info->hdrsmd, pipe_ctx->stream);
+
+ set_dp_sdp_info_packet(&info->dpsdp, pipe_ctx->stream);
}
patch_gamut_packet_checksum(&info->gamut);
@@ -2657,10 +2785,11 @@ enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
if (!tg->funcs->validate_timing(tg, &stream->timing))
res = DC_FAIL_CONTROLLER_VALIDATE;
- if (res == DC_OK)
+ if (res == DC_OK) {
if (!link->link_enc->funcs->validate_output_with_stream(
link->link_enc, stream))
res = DC_FAIL_ENC_VALIDATE;
+ }
/* TODO: validate audio ASIC caps, encoder */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 996298c35f42..96e97d25d639 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -29,6 +29,9 @@
#include "resource.h"
#include "ipp.h"
#include "timing_generator.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "dcn10/dcn10_hw_sequencer.h"
+#endif
#define DC_LOGGER dc->ctx->logger
@@ -160,6 +163,27 @@ struct dc_stream_state *dc_create_stream_for_sink(
return stream;
}
+struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
+{
+ struct dc_stream_state *new_stream;
+
+ new_stream = kzalloc(sizeof(struct dc_stream_state), GFP_KERNEL);
+ if (!new_stream)
+ return NULL;
+
+ memcpy(new_stream, stream, sizeof(struct dc_stream_state));
+
+ if (new_stream->sink)
+ dc_sink_retain(new_stream->sink);
+
+ if (new_stream->out_transfer_func)
+ dc_transfer_func_retain(new_stream->out_transfer_func);
+
+ kref_init(&new_stream->refcount);
+
+ return new_stream;
+}
+
/**
* dc_stream_get_status_from_state - Get stream status from given dc state
* @state: DC state to find the stream status in
@@ -196,6 +220,35 @@ struct dc_stream_status *dc_stream_get_status(
return dc_stream_get_status_from_state(dc->current_state, stream);
}
+static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
+{
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ unsigned int vupdate_line;
+ unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ unsigned int us_per_line;
+
+ if (stream->ctx->asic_id.chip_family == FAMILY_RV &&
+ ASIC_REV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
+
+ vupdate_line = get_vupdate_offset_from_vsync(pipe_ctx);
+ if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
+ return;
+
+ if (vpos >= vupdate_line)
+ return;
+
+ us_per_line = stream->timing.h_total * 10000 / stream->timing.pix_clk_100hz;
+ lines_to_vupdate = vupdate_line - vpos;
+ us_to_vupdate = lines_to_vupdate * us_per_line;
+
+ /* 70 us is a conservative estimate of cursor update time*/
+ if (us_to_vupdate < 70)
+ udelay(us_to_vupdate);
+ }
+#endif
+}
+
/**
* dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
*/
@@ -234,6 +287,8 @@ bool dc_stream_set_cursor_attributes(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
+
+ delay_cursor_until_vupdate(pipe_ctx, core_dc);
core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
}
@@ -278,11 +333,13 @@ bool dc_stream_set_cursor_position(
(!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) ||
!pipe_ctx->plane_state ||
(!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) ||
- !pipe_ctx->plane_res.ipp)
+ (!pipe_ctx->plane_res.ipp && !pipe_ctx->plane_res.dpp))
continue;
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
+
+ delay_cursor_until_vupdate(pipe_ctx, core_dc);
core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
}
@@ -314,6 +371,68 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
return 0;
}
+static void build_dp_sdp_info_frame(struct pipe_ctx *pipe_ctx,
+ const uint8_t *custom_sdp_message,
+ unsigned int sdp_message_size)
+{
+ uint8_t i;
+ struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame;
+
+ /* set valid info */
+ info->dpsdp.valid = true;
+
+ /* set sdp message header */
+ info->dpsdp.hb0 = custom_sdp_message[0]; /* package id */
+ info->dpsdp.hb1 = custom_sdp_message[1]; /* package type */
+ info->dpsdp.hb2 = custom_sdp_message[2]; /* package specific byte 0 any data */
+ info->dpsdp.hb3 = custom_sdp_message[3]; /* package specific byte 0 any data */
+
+ /* set sdp message data */
+ for (i = 0; i < 32; i++)
+ info->dpsdp.sb[i] = (custom_sdp_message[i+4]);
+
+}
+
+static void invalid_dp_sdp_info_frame(struct pipe_ctx *pipe_ctx)
+{
+ struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame;
+
+ /* in-valid info */
+ info->dpsdp.valid = false;
+}
+
+bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream,
+ const uint8_t *custom_sdp_message,
+ unsigned int sdp_message_size)
+{
+ int i;
+ struct dc *core_dc;
+ struct resource_context *res_ctx;
+
+ if (stream == NULL) {
+ dm_error("DC: dc_stream is NULL!\n");
+ return false;
+ }
+
+ core_dc = stream->ctx->dc;
+ res_ctx = &core_dc->current_state->res_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (pipe_ctx->stream != stream)
+ continue;
+
+ build_dp_sdp_info_frame(pipe_ctx, custom_sdp_message, sdp_message_size);
+
+ core_dc->hwss.update_info_frame(pipe_ctx);
+
+ invalid_dp_sdp_info_frame(pipe_ctx);
+ }
+
+ return true;
+}
+
bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
uint32_t *v_blank_start,
uint32_t *v_blank_end,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index ee6bd50f60b8..a5e86f9b148f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -119,6 +119,19 @@ const struct dc_plane_status *dc_plane_get_status(
if (core_dc->current_state == NULL)
return NULL;
+ /* Find the current plane state and set its pending bit to false */
+ for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx =
+ &core_dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state != plane_state)
+ continue;
+
+ pipe_ctx->plane_state->status.is_flip_pending = false;
+
+ break;
+ }
+
for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
&core_dc->current_state->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 0515095574e7..44e4b0465587 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,9 +39,10 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.17"
+#define DC_VER "3.2.27"
#define MAX_SURFACES 3
+#define MAX_PLANES 6
#define MAX_STREAMS 6
#define MAX_SINKS_PER_LINK 4
@@ -53,6 +54,41 @@ struct dc_versions {
struct dmcu_version dmcu_version;
};
+enum dc_plane_type {
+ DC_PLANE_TYPE_INVALID,
+ DC_PLANE_TYPE_DCE_RGB,
+ DC_PLANE_TYPE_DCE_UNDERLAY,
+ DC_PLANE_TYPE_DCN_UNIVERSAL,
+};
+
+struct dc_plane_cap {
+ enum dc_plane_type type;
+ uint32_t blends_with_above : 1;
+ uint32_t blends_with_below : 1;
+ uint32_t per_pixel_alpha : 1;
+ struct {
+ uint32_t argb8888 : 1;
+ uint32_t nv12 : 1;
+ uint32_t fp16 : 1;
+ } pixel_format_support;
+ // max upscaling factor x1000
+ // upscaling factors are always >= 1
+ // for example, 1080p -> 8K is 4.0, or 4000 raw value
+ struct {
+ uint32_t argb8888;
+ uint32_t nv12;
+ uint32_t fp16;
+ } max_upscale_factor;
+ // max downscale factor x1000
+ // downscale factors are always <= 1
+ // for example, 8K -> 1080p is 0.25, or 250 raw value
+ struct {
+ uint32_t argb8888;
+ uint32_t nv12;
+ uint32_t fp16;
+ } max_downscale_factor;
+};
+
struct dc_caps {
uint32_t max_streams;
uint32_t max_links;
@@ -73,6 +109,7 @@ struct dc_caps {
bool force_dp_tps4_for_cp2520;
bool disable_dp_clk_share;
bool psp_setup_panel_mode;
+ struct dc_plane_cap planes[MAX_PLANES];
};
struct dc_dcc_surface_param {
@@ -164,6 +201,10 @@ struct dc_config {
bool gpu_vm_support;
bool disable_disp_pll_sharing;
bool fbc_support;
+ bool optimize_edp_link_rate;
+ bool disable_fractional_pwm;
+ bool allow_seamless_boot_optimization;
+ bool power_down_display_on_boot;
};
enum visual_confirm {
@@ -203,7 +244,59 @@ struct dc_clocks {
int fclk_khz;
int phyclk_khz;
int dramclk_khz;
-};
+ bool p_state_change_support;
+};
+
+struct dc_bw_validation_profile {
+ bool enable;
+
+ unsigned long long total_ticks;
+ unsigned long long voltage_level_ticks;
+ unsigned long long watermark_ticks;
+ unsigned long long rq_dlg_ticks;
+
+ unsigned long long total_count;
+ unsigned long long skip_fast_count;
+ unsigned long long skip_pass_count;
+ unsigned long long skip_fail_count;
+};
+
+#define BW_VAL_TRACE_SETUP() \
+ unsigned long long end_tick = 0; \
+ unsigned long long voltage_level_tick = 0; \
+ unsigned long long watermark_tick = 0; \
+ unsigned long long start_tick = dc->debug.bw_val_profile.enable ? \
+ dm_get_timestamp(dc->ctx) : 0
+
+#define BW_VAL_TRACE_COUNT() \
+ if (dc->debug.bw_val_profile.enable) \
+ dc->debug.bw_val_profile.total_count++
+
+#define BW_VAL_TRACE_SKIP(status) \
+ if (dc->debug.bw_val_profile.enable) { \
+ if (!voltage_level_tick) \
+ voltage_level_tick = dm_get_timestamp(dc->ctx); \
+ dc->debug.bw_val_profile.skip_ ## status ## _count++; \
+ }
+
+#define BW_VAL_TRACE_END_VOLTAGE_LEVEL() \
+ if (dc->debug.bw_val_profile.enable) \
+ voltage_level_tick = dm_get_timestamp(dc->ctx)
+
+#define BW_VAL_TRACE_END_WATERMARKS() \
+ if (dc->debug.bw_val_profile.enable) \
+ watermark_tick = dm_get_timestamp(dc->ctx)
+
+#define BW_VAL_TRACE_FINISH() \
+ if (dc->debug.bw_val_profile.enable) { \
+ end_tick = dm_get_timestamp(dc->ctx); \
+ dc->debug.bw_val_profile.total_ticks += end_tick - start_tick; \
+ dc->debug.bw_val_profile.voltage_level_ticks += voltage_level_tick - start_tick; \
+ if (watermark_tick) { \
+ dc->debug.bw_val_profile.watermark_ticks += watermark_tick - voltage_level_tick; \
+ dc->debug.bw_val_profile.rq_dlg_ticks += end_tick - watermark_tick; \
+ } \
+ }
struct dc_debug_options {
enum visual_confirm visual_confirm;
@@ -257,6 +350,8 @@ struct dc_debug_options {
bool skip_detection_link_training;
unsigned int force_odm_combine; //bit vector based on otg inst
unsigned int force_fclk_khz;
+ bool disable_tri_buf;
+ struct dc_bw_validation_profile bw_val_profile;
};
struct dc_debug_data {
@@ -265,6 +360,14 @@ struct dc_debug_data {
uint32_t auxErrorCount;
};
+struct dc_bounding_box_overrides {
+ int sr_exit_time_ns;
+ int sr_enter_plus_exit_time_ns;
+ int urgent_latency_ns;
+ int percent_of_ideal_drambw;
+ int dram_clock_change_latency_ns;
+};
+
struct dc_state;
struct resource_pool;
struct dce_hwseq;
@@ -274,6 +377,7 @@ struct dc {
struct dc_cap_funcs cap_funcs;
struct dc_config config;
struct dc_debug_options debug;
+ struct dc_bounding_box_overrides bb_overrides;
struct dc_context *ctx;
uint8_t link_count;
@@ -298,8 +402,12 @@ struct dc {
struct hw_sequencer_funcs hwss;
struct dce_hwseq *hwseq;
+ /* Require to optimize clocks and bandwidth for added/removed planes */
bool optimized_required;
+ /* Require to maintain clocks and bandwidth for UEFI enabled HW */
+ bool optimize_seamless_boot;
+
/* FBC compressor */
struct compressor *fbc_compressor;
@@ -327,6 +435,7 @@ struct dc_init_data {
struct hw_asic_id asic_id;
void *driver; /* ctx */
struct cgs_device *cgs_device;
+ struct dc_bounding_box_overrides bb_overrides;
int num_virtual_links;
/*
@@ -597,7 +706,7 @@ struct dc_validation_set {
uint8_t plane_count;
};
-bool dc_validate_seamless_boot_timing(struct dc *dc,
+bool dc_validate_seamless_boot_timing(const struct dc *dc,
const struct dc_sink *sink,
struct dc_crtc_timing *crtc_timing);
@@ -605,9 +714,14 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info);
+/*
+ * fast_validate: we return after determining if we can support the new state,
+ * but before we populate the programming info
+ */
enum dc_status dc_validate_global_state(
struct dc *dc,
- struct dc_state *new_ctx);
+ struct dc_state *new_ctx,
+ bool fast_validate);
void dc_resource_state_construct(
@@ -636,7 +750,8 @@ void dc_resource_state_destruct(struct dc_state *context);
bool dc_commit_state(struct dc *dc, struct dc_state *context);
-struct dc_state *dc_create_state(void);
+struct dc_state *dc_create_state(struct dc *dc);
+struct dc_state *dc_copy_state(struct dc_state *src_ctx);
void dc_retain_state(struct dc_state *context);
void dc_release_state(struct dc_state *context);
@@ -648,9 +763,16 @@ struct dpcd_caps {
union dpcd_rev dpcd_rev;
union max_lane_count max_ln_count;
union max_down_spread max_down_spread;
+ union dprx_feature dprx_feature;
+
+ /* valid only for eDP v1.4 or higher*/
+ uint8_t edp_supported_link_rates_count;
+ enum dc_link_rate edp_supported_link_rates[8];
/* dongle type (DP converter, CV smart dongle) */
enum display_dongle_type dongle_type;
+ /* branch device or sink device */
+ bool is_branch_dev;
/* Dongle's downstream count. */
union sink_count sink_count;
/* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
@@ -666,11 +788,11 @@ struct dpcd_caps {
int8_t branch_dev_name[6];
int8_t branch_hw_revision;
int8_t branch_fw_revision[2];
- uint8_t link_rate_set;
bool allow_invalid_MSA_timing_param;
bool panel_mode_edp;
bool dpcd_display_control_capable;
+ bool ext_receiver_cap_field_present;
};
#include "dc_link.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
index 05c8c31d8b31..4ef97f65e55d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
@@ -68,6 +68,8 @@ enum aux_transaction_reply {
AUX_TRANSACTION_REPLY_AUX_ACK = 0x00,
AUX_TRANSACTION_REPLY_AUX_NACK = 0x01,
AUX_TRANSACTION_REPLY_AUX_DEFER = 0x02,
+ AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK = 0x04,
+ AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER = 0x08,
AUX_TRANSACTION_REPLY_I2C_ACK = 0x00,
AUX_TRANSACTION_REPLY_I2C_NACK = 0x10,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index d4eab33c453b..11c68a399267 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -94,6 +94,8 @@ struct dc_link_settings {
enum dc_lane_count lane_count;
enum dc_link_rate link_rate;
enum dc_link_spread link_spread;
+ bool use_link_rate_set;
+ uint8_t link_rate_set;
};
struct dc_lane_settings {
@@ -420,10 +422,24 @@ union edp_configuration_cap {
uint8_t raw;
};
+union dprx_feature {
+ struct {
+ uint8_t GTC_CAP:1; // bit 0: DP 1.3+
+ uint8_t SST_SPLIT_SDP_CAP:1; // bit 1: DP 1.4
+ uint8_t AV_SYNC_CAP:1; // bit 2: DP 1.3+
+ uint8_t VSC_SDP_COLORIMETRY_SUPPORTED:1; // bit 3: DP 1.3+
+ uint8_t VSC_EXT_VESA_SDP_SUPPORTED:1; // bit 4: DP 1.4
+ uint8_t VSC_EXT_VESA_SDP_CHAINING_SUPPORTED:1; // bit 5: DP 1.4
+ uint8_t VSC_EXT_CEA_SDP_SUPPORTED:1; // bit 6: DP 1.4
+ uint8_t VSC_EXT_CEA_SDP_CHAINING_SUPPORTED:1; // bit 7: DP 1.4
+ } bits;
+ uint8_t raw;
+};
+
union training_aux_rd_interval {
struct {
uint8_t TRAINIG_AUX_RD_INTERVAL:7;
- uint8_t EXT_RECIEVER_CAP_FIELD_PRESENT:1;
+ uint8_t EXT_RECEIVER_CAP_FIELD_PRESENT:1;
} bits;
uint8_t raw;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 597d38393379..5e6c5eff49cf 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -51,20 +51,16 @@ static inline void set_reg_field_value_masks(
field_value_mask->mask = field_value_mask->mask | mask;
}
-uint32_t generic_reg_update_ex(const struct dc_context *ctx,
- uint32_t addr, uint32_t reg_val, int n,
+static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask,
+ uint32_t addr, int n,
uint8_t shift1, uint32_t mask1, uint32_t field_value1,
- ...)
+ va_list ap)
{
- struct dc_reg_value_masks field_value_mask = {0};
uint32_t shift, mask, field_value;
int i = 1;
- va_list ap;
- va_start(ap, field_value1);
-
/* gather all bits value/mask getting updated in this register */
- set_reg_field_value_masks(&field_value_mask,
+ set_reg_field_value_masks(field_value_mask,
field_value1, mask1, shift1);
while (i < n) {
@@ -72,10 +68,48 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx,
mask = va_arg(ap, uint32_t);
field_value = va_arg(ap, uint32_t);
- set_reg_field_value_masks(&field_value_mask,
+ set_reg_field_value_masks(field_value_mask,
field_value, mask, shift);
i++;
}
+}
+
+uint32_t generic_reg_update_ex(const struct dc_context *ctx,
+ uint32_t addr, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1,
+ ...)
+{
+ struct dc_reg_value_masks field_value_mask = {0};
+ uint32_t reg_val;
+ va_list ap;
+
+ va_start(ap, field_value1);
+
+ set_reg_field_values(&field_value_mask, addr, n, shift1, mask1,
+ field_value1, ap);
+
+ va_end(ap);
+
+ /* mmio write directly */
+ reg_val = dm_read_reg(ctx, addr);
+ reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
+ dm_write_reg(ctx, addr, reg_val);
+ return reg_val;
+}
+
+uint32_t generic_reg_set_ex(const struct dc_context *ctx,
+ uint32_t addr, uint32_t reg_val, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1,
+ ...)
+{
+ struct dc_reg_value_masks field_value_mask = {0};
+ va_list ap;
+
+ va_start(ap, field_value1);
+
+ set_reg_field_values(&field_value_mask, addr, n, shift1, mask1,
+ field_value1, ap);
+
va_end(ap);
@@ -85,6 +119,24 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx,
return reg_val;
}
+uint32_t dm_read_reg_func(
+ const struct dc_context *ctx,
+ uint32_t address,
+ const char *func_name)
+{
+ uint32_t value;
+#ifdef DM_CHECK_ADDR_0
+ if (address == 0) {
+ DC_ERR("invalid register read; address = 0\n");
+ return 0;
+ }
+#endif
+ value = cgs_read_register(ctx->cgs_device, address);
+ trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
+
+ return value;
+}
+
uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr,
uint8_t shift, uint32_t mask, uint32_t *field_value)
{
@@ -235,7 +287,7 @@ uint32_t generic_reg_get(const struct dc_context *ctx,
}
*/
-uint32_t generic_reg_wait(const struct dc_context *ctx,
+void generic_reg_wait(const struct dc_context *ctx,
uint32_t addr, uint32_t shift, uint32_t mask, uint32_t condition_value,
unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
const char *func_name, int line)
@@ -265,7 +317,7 @@ uint32_t generic_reg_wait(const struct dc_context *ctx,
DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n",
delay_between_poll_us * i / 1000,
func_name, line);
- return reg_val;
+ return;
}
}
@@ -275,8 +327,6 @@ uint32_t generic_reg_wait(const struct dc_context *ctx,
if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
BREAK_TO_DEBUGGER();
-
- return reg_val;
}
void generic_write_indirect_reg(const struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 8fc223defed4..7b9429e30d82 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -120,6 +120,7 @@ struct dc_link {
/* MST record stream using this link */
struct link_flags {
bool dp_keep_receiver_powered;
+ bool dp_skip_DID2;
} wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
@@ -246,10 +247,18 @@ void dc_link_set_test_pattern(struct dc_link *link,
const struct link_training_settings *p_link_settings,
const unsigned char *p_custom_pattern,
unsigned int cust_pattern_size);
+uint32_t dc_link_bandwidth_kbps(
+ const struct dc_link *link,
+ const struct dc_link_settings *link_setting);
+
+const struct dc_link_settings *dc_link_get_link_cap(
+ const struct dc_link *link);
bool dc_submit_i2c(
struct dc *dc,
uint32_t link_index,
struct i2c_command *cmd);
+uint32_t dc_bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing);
#endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 5657cb3a2ad3..189bdab929a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -80,6 +80,7 @@ struct dc_stream_state {
struct dc_info_packet vrr_infopacket;
struct dc_info_packet vsc_infopacket;
struct dc_info_packet vsp_infopacket;
+ struct dc_info_packet dpsdp_infopacket;
struct rect src; /* composition area */
struct rect dst; /* stream addressable area */
@@ -221,6 +222,13 @@ struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i);
*/
uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream);
+/*
+ * Send dp sdp message.
+ */
+bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream,
+ const uint8_t *custom_sdp_message,
+ unsigned int sdp_message_size);
+
/* TODO: Return parsed values rather than direct register read
* This has a dependency on the caller (amdgpu_display_get_crtc_scanoutpos)
* being refactored properly to be dce-specific
@@ -299,6 +307,8 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
*/
struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink);
+struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream);
+
void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink);
void dc_stream_retain(struct dc_stream_state *dc_stream);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index da2009a108cf..6c2a3d9a4c2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -103,7 +103,7 @@ struct dc_context {
};
-#define DC_MAX_EDID_BUFFER_SIZE 512
+#define DC_MAX_EDID_BUFFER_SIZE 1024
#define EDID_BLOCK_SIZE 128
#define MAX_SURFACE_NUM 4
#define NUM_PIXEL_FORMATS 10
@@ -395,7 +395,7 @@ struct dc_dongle_caps {
bool is_dp_hdmi_ycbcr422_converter;
bool is_dp_hdmi_ycbcr420_converter;
uint32_t dp_hdmi_max_bpc;
- uint32_t dp_hdmi_max_pixel_clk;
+ uint32_t dp_hdmi_max_pixel_clk_in_khz;
};
/* Scaling format */
enum scaling_transformation {
@@ -550,9 +550,9 @@ struct psr_config {
unsigned char psr_version;
unsigned int psr_rfb_setup_time;
bool psr_exit_link_training_required;
-
bool psr_frame_capture_indication_req;
unsigned int psr_sdp_transmit_line_num_deadline;
+ bool allow_smu_optimizations;
};
union dmcu_psr_level {
@@ -654,6 +654,7 @@ struct psr_context {
* continuing powerd own
*/
unsigned int frame_delay;
+ bool allow_smu_optimizations;
};
struct colorspace_transform {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 4fe3664fb495..bd33c47183fc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -171,24 +171,24 @@ static void submit_channel_request(
(request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
if (REG(AUXN_IMPCAL)) {
/* clear_aux_error */
- REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
- 1,
- 0);
+ REG_UPDATE_SEQ_2(AUXN_IMPCAL,
+ AUXN_CALOUT_ERROR_AK, 1,
+ AUXN_CALOUT_ERROR_AK, 0);
- REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
- 1,
- 0);
+ REG_UPDATE_SEQ_2(AUXP_IMPCAL,
+ AUXP_CALOUT_ERROR_AK, 1,
+ AUXP_CALOUT_ERROR_AK, 0);
/* force_default_calibrate */
- REG_UPDATE_1BY1_2(AUXN_IMPCAL,
+ REG_UPDATE_SEQ_2(AUXN_IMPCAL,
AUXN_IMPCAL_ENABLE, 1,
AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
/* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
- REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
- 1,
- 0);
+ REG_UPDATE_SEQ_2(AUXP_IMPCAL,
+ AUXP_IMPCAL_OVERRIDE_ENABLE, 1,
+ AUXP_IMPCAL_OVERRIDE_ENABLE, 0);
}
REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
@@ -270,7 +270,7 @@ static int read_channel_reply(struct dce_aux *engine, uint32_t size,
if (!bytes_replied)
return -1;
- REG_UPDATE_1BY1_3(AUX_SW_DATA,
+ REG_UPDATE_SEQ_3(AUX_SW_DATA,
AUX_SW_INDEX, 0,
AUX_SW_AUTOINCREMENT_DISABLE, 1,
AUX_SW_DATA_RW, 1);
@@ -320,9 +320,10 @@ static enum aux_channel_operation_result get_channel_status(
*returned_bytes = 0;
/* poll to make sure that SW_DONE is asserted */
- value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
+ REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
10, aux110->timeout_period/10);
+ value = REG_READ(AUX_SW_STATUS);
/* in case HPD is LOW, exit AUX transaction */
if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
return AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
@@ -377,7 +378,6 @@ static bool acquire(
struct dce_aux *engine,
struct ddc *ddc)
{
-
enum gpio_result result;
if (!is_engine_available(engine))
@@ -442,12 +442,12 @@ static enum i2caux_transaction_action i2caux_action_from_payload(struct aux_payl
return I2CAUX_TRANSACTION_ACTION_DP_READ;
}
-int dce_aux_transfer(struct ddc_service *ddc,
- struct aux_payload *payload)
+int dce_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_channel_operation_result *operation_result)
{
struct ddc *ddc_pin = ddc->ddc_pin;
struct dce_aux *aux_engine;
- enum aux_channel_operation_result operation_result;
struct aux_request_transaction_data aux_req;
struct aux_reply_transaction_data aux_rep;
uint8_t returned_bytes = 0;
@@ -458,7 +458,8 @@ int dce_aux_transfer(struct ddc_service *ddc,
memset(&aux_rep, 0, sizeof(aux_rep));
aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
- acquire(aux_engine, ddc_pin);
+ if (!acquire(aux_engine, ddc_pin))
+ return -1;
if (payload->i2c_over_aux)
aux_req.type = AUX_TRANSACTION_TYPE_I2C;
@@ -473,28 +474,26 @@ int dce_aux_transfer(struct ddc_service *ddc,
aux_req.data = payload->data;
submit_channel_request(aux_engine, &aux_req);
- operation_result = get_channel_status(aux_engine, &returned_bytes);
-
- switch (operation_result) {
- case AUX_CHANNEL_OPERATION_SUCCEEDED:
- res = read_channel_reply(aux_engine, payload->length,
- payload->data, payload->reply,
- &status);
- break;
- case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
- res = 0;
- break;
- case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
- case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
- case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ *operation_result = get_channel_status(aux_engine, &returned_bytes);
+
+ if (*operation_result == AUX_CHANNEL_OPERATION_SUCCEEDED) {
+ read_channel_reply(aux_engine, payload->length,
+ payload->data, payload->reply,
+ &status);
+ res = returned_bytes;
+ } else {
res = -1;
- break;
}
+
release_engine(aux_engine);
return res;
}
-#define AUX_RETRY_MAX 7
+#define AUX_MAX_RETRIES 7
+#define AUX_MAX_DEFER_RETRIES 7
+#define AUX_MAX_I2C_DEFER_RETRIES 7
+#define AUX_MAX_INVALID_REPLY_RETRIES 2
+#define AUX_MAX_TIMEOUT_RETRIES 3
bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *payload)
@@ -502,24 +501,85 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
int i, ret = 0;
uint8_t reply;
bool payload_reply = true;
+ enum aux_channel_operation_result operation_result;
+ int aux_ack_retries = 0,
+ aux_defer_retries = 0,
+ aux_i2c_defer_retries = 0,
+ aux_timeout_retries = 0,
+ aux_invalid_reply_retries = 0;
if (!payload->reply) {
payload_reply = false;
payload->reply = &reply;
}
- for (i = 0; i < AUX_RETRY_MAX; i++) {
- ret = dce_aux_transfer(ddc, payload);
-
- if (ret >= 0) {
- if (*payload->reply == 0) {
- if (!payload_reply)
- payload->reply = NULL;
- return true;
+ for (i = 0; i < AUX_MAX_RETRIES; i++) {
+ ret = dce_aux_transfer_raw(ddc, payload, &operation_result);
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ aux_timeout_retries = 0;
+ aux_invalid_reply_retries = 0;
+
+ switch (*payload->reply) {
+ case AUX_TRANSACTION_REPLY_AUX_ACK:
+ if (!payload->write && payload->length != ret) {
+ if (++aux_ack_retries >= AUX_MAX_RETRIES)
+ goto fail;
+ else
+ udelay(300);
+ } else
+ return true;
+ break;
+
+ case AUX_TRANSACTION_REPLY_AUX_DEFER:
+ case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
+ case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
+ if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES)
+ goto fail;
+ break;
+
+ case AUX_TRANSACTION_REPLY_I2C_DEFER:
+ aux_defer_retries = 0;
+ if (++aux_i2c_defer_retries >= AUX_MAX_I2C_DEFER_RETRIES)
+ goto fail;
+ break;
+
+ case AUX_TRANSACTION_REPLY_AUX_NACK:
+ case AUX_TRANSACTION_REPLY_HPD_DISCON:
+ default:
+ goto fail;
}
- }
+ break;
+
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ if (++aux_invalid_reply_retries >= AUX_MAX_INVALID_REPLY_RETRIES)
+ goto fail;
+ else
+ udelay(400);
+ break;
+
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES)
+ goto fail;
+ else {
+ /*
+ * DP 1.4, 2.8.2: AUX Transaction Response/Reply Timeouts
+ * According to the DP spec there should be 3 retries total
+ * with a 400us wait inbetween each. Hardware already waits
+ * for 550us therefore no wait is required here.
+ */
+ }
+ break;
- udelay(1000);
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
+ default:
+ goto fail;
+ }
}
+
+fail:
+ if (!payload_reply)
+ payload->reply = NULL;
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
index e28ed6a00ff4..ce6a26d189b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -123,8 +123,9 @@ bool dce110_aux_engine_acquire(
struct dce_aux *aux_engine,
struct ddc *ddc);
-int dce_aux_transfer(struct ddc_service *ddc,
- struct aux_payload *cmd);
+int dce_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *cmd,
+ enum aux_channel_operation_result *operation_result);
bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *cmd);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
index 6e142c2db986..963686380738 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -222,7 +222,7 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state(
* all required clocks
*/
for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
- if (context->bw.dce.dispclk_khz >
+ if (context->bw_ctx.bw.dce.dispclk_khz >
clk_mgr_dce->max_clks_by_state[i].display_clk_khz
|| max_pix_clk >
clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz)
@@ -232,7 +232,7 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state(
if (low_req_clk > clk_mgr_dce->max_clks_state) {
/* set max clock state for high phyclock, invalid on exceeding display clock */
if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz
- < context->bw.dce.dispclk_khz)
+ < context->bw_ctx.bw.dce.dispclk_khz)
low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
else
low_req_clk = clk_mgr_dce->max_clks_state;
@@ -610,22 +610,22 @@ static void dce11_pplib_apply_display_requirements(
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
pp_display_cfg->all_displays_in_sync =
- context->bw.dce.all_displays_in_sync;
+ context->bw_ctx.bw.dce.all_displays_in_sync;
pp_display_cfg->nb_pstate_switch_disable =
- context->bw.dce.nbp_state_change_enable == false;
+ context->bw_ctx.bw.dce.nbp_state_change_enable == false;
pp_display_cfg->cpu_cc6_disable =
- context->bw.dce.cpuc_state_change_enable == false;
+ context->bw_ctx.bw.dce.cpuc_state_change_enable == false;
pp_display_cfg->cpu_pstate_disable =
- context->bw.dce.cpup_state_change_enable == false;
+ context->bw_ctx.bw.dce.cpup_state_change_enable == false;
pp_display_cfg->cpu_pstate_separation_time =
- context->bw.dce.blackout_recovery_time_us;
+ context->bw_ctx.bw.dce.blackout_recovery_time_us;
- pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
+ pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz
/ MEMORY_TYPE_MULTIPLIER_CZ;
pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
dc,
- context->bw.dce.sclk_khz);
+ context->bw_ctx.bw.dce.sclk_khz);
/*
* As workaround for >4x4K lightup set dcfclock to min_engine_clock value.
@@ -638,7 +638,7 @@ static void dce11_pplib_apply_display_requirements(
pp_display_cfg->min_engine_clock_khz : 0;
pp_display_cfg->min_engine_clock_deep_sleep_khz
- = context->bw.dce.sclk_deep_sleep_khz;
+ = context->bw_ctx.bw.dce.sclk_deep_sleep_khz;
pp_display_cfg->avail_mclk_switch_time_us =
dce110_get_min_vblank_time_us(context);
@@ -669,7 +669,7 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr,
{
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_power_level_change_request level_change_req;
- int patched_disp_clk = context->bw.dce.dispclk_khz;
+ int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
@@ -696,7 +696,7 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
{
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_power_level_change_request level_change_req;
- int patched_disp_clk = context->bw.dce.dispclk_khz;
+ int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
@@ -711,7 +711,7 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
}
if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) {
- context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk);
+ context->bw_ctx.bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk);
clk_mgr->clks.dispclk_khz = patched_disp_clk;
}
dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
@@ -723,7 +723,7 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr,
{
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_power_level_change_request level_change_req;
- int patched_disp_clk = context->bw.dce.dispclk_khz;
+ int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
@@ -751,7 +751,7 @@ static void dce12_update_clocks(struct clk_mgr *clk_mgr,
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
- int patched_disp_clk = context->bw.dce.dispclk_khz;
+ int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 71d5777de961..f70437aae8e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -978,7 +978,7 @@ static bool dce110_clock_source_power_down(
}
static bool get_pixel_clk_frequency_100hz(
- struct clock_source *clock_source,
+ const struct clock_source *clock_source,
unsigned int inst,
unsigned int *pixel_clk_khz)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index c2926cf19dee..818536eea00a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -51,6 +51,9 @@
#define PSR_SET_WAITLOOP 0x31
#define MCP_INIT_DMCU 0x88
#define MCP_INIT_IRAM 0x89
+#define MCP_SYNC_PHY_LOCK 0x90
+#define MCP_SYNC_PHY_UNLOCK 0x91
+#define MCP_BL_SET_PWM_FRAC 0x6A /* Enable or disable Fractional PWM */
#define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L
static bool dce_dmcu_init(struct dmcu *dmcu)
@@ -213,9 +216,6 @@ static bool dce_dmcu_setup_psr(struct dmcu *dmcu,
link->link_enc->funcs->psr_program_secondary_packet(link->link_enc,
psr_context->sdpTransmitLineNumDeadline);
- if (psr_context->psr_level.bits.SKIP_SMU_NOTIFICATION)
- REG_UPDATE(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, 1);
-
/* waitDMCUReadyForCmd */
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
dmcu_wait_reg_ready_interval,
@@ -342,9 +342,32 @@ static void dcn10_get_dmcu_version(struct dmcu *dmcu)
IRAM_RD_ADDR_AUTO_INC, 0);
}
+static void dcn10_dmcu_enable_fractional_pwm(struct dmcu *dmcu,
+ uint32_t fractional_pwm)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+
+ /* Wait until microcontroller is ready to process interrupt */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800);
+
+ /* Set PWM fractional enable/disable */
+ REG_WRITE(MASTER_COMM_DATA_REG1, fractional_pwm);
+
+ /* Set command to enable or disable fractional PWM microcontroller */
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
+ MCP_BL_SET_PWM_FRAC);
+
+ /* Notify microcontroller of new command */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ /* Ensure command has been executed before continuing */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800);
+}
+
static bool dcn10_dmcu_init(struct dmcu *dmcu)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ const struct dc_config *config = &dmcu->ctx->dc->config;
bool status = false;
/* Definition of DC_DMCU_SCRATCH
@@ -382,9 +405,14 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
if (dmcu->dmcu_state == DMCU_RUNNING) {
/* Retrieve and cache the DMCU firmware version. */
dcn10_get_dmcu_version(dmcu);
+
+ /* Initialize DMCU to use fractional PWM or not */
+ dcn10_dmcu_enable_fractional_pwm(dmcu,
+ (config->disable_fractional_pwm == false) ? 1 : 0);
status = true;
- } else
+ } else {
status = false;
+ }
break;
case DMCU_RUNNING:
@@ -594,7 +622,7 @@ static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu,
link->link_enc->funcs->psr_program_secondary_packet(link->link_enc,
psr_context->sdpTransmitLineNumDeadline);
- if (psr_context->psr_level.bits.SKIP_SMU_NOTIFICATION)
+ if (psr_context->allow_smu_optimizations)
REG_UPDATE(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, 1);
/* waitDMCUReadyForCmd */
@@ -615,6 +643,7 @@ static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu,
psr_context->psrFrameCaptureIndicationReq;
masterCmdData1.bits.aux_chan = psr_context->channel;
masterCmdData1.bits.aux_repeat = psr_context->aux_repeats;
+ masterCmdData1.bits.allow_smu_optimizations = psr_context->allow_smu_optimizations;
dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1),
masterCmdData1.u32All);
@@ -635,6 +664,7 @@ static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu,
dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG3),
masterCmdData3.u32All);
+
/* setDMCUParam_Cmd */
REG_UPDATE(MASTER_COMM_CMD_REG,
MASTER_COMM_CMD_REG_BYTE0, PSR_SET);
@@ -691,7 +721,7 @@ static bool dcn10_is_dmcu_initialized(struct dmcu *dmcu)
return true;
}
-#endif
+#endif //(CONFIG_DRM_AMD_DC_DCN1_0)
static const struct dmcu_funcs dce_funcs = {
.dmcu_init = dce_dmcu_init,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
index c24c0e5ea44e..60ce56f60ae3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
@@ -199,16 +199,16 @@ struct dce_dmcu {
******************************************************************/
union dce_dmcu_psr_config_data_reg1 {
struct {
- unsigned int timehyst_frames:8; /*[7:0]*/
- unsigned int hyst_lines:7; /*[14:8]*/
- unsigned int rfb_update_auto_en:1; /*[15:15]*/
- unsigned int dp_port_num:3; /*[18:16]*/
- unsigned int dcp_sel:3; /*[21:19]*/
- unsigned int phy_type:1; /*[22:22]*/
- unsigned int frame_cap_ind:1; /*[23:23]*/
- unsigned int aux_chan:3; /*[26:24]*/
- unsigned int aux_repeat:4; /*[30:27]*/
- unsigned int reserved:1; /*[31:31]*/
+ unsigned int timehyst_frames:8; /*[7:0]*/
+ unsigned int hyst_lines:7; /*[14:8]*/
+ unsigned int rfb_update_auto_en:1; /*[15:15]*/
+ unsigned int dp_port_num:3; /*[18:16]*/
+ unsigned int dcp_sel:3; /*[21:19]*/
+ unsigned int phy_type:1; /*[22:22]*/
+ unsigned int frame_cap_ind:1; /*[23:23]*/
+ unsigned int aux_chan:3; /*[26:24]*/
+ unsigned int aux_repeat:4; /*[30:27]*/
+ unsigned int allow_smu_optimizations:1; /*[31:31]*/
} bits;
unsigned int u32All;
};
@@ -236,7 +236,7 @@ union dce_dmcu_psr_config_data_reg3 {
struct {
unsigned int psr_level:16; /*[15:0]*/
unsigned int link_rate:4; /*[19:16]*/
- unsigned int reserved:12; /*[31:20]*/
+ unsigned int reserved:12; /*[31:20]*/
} bits;
unsigned int u32All;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 40f2d6e0b122..cd26161bcc4d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -346,6 +346,16 @@ static void release_engine(
}
+static bool is_engine_available(struct dce_i2c_hw *dce_i2c_hw)
+{
+ unsigned int arbitrate;
+
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ if (arbitrate == DC_I2C_REG_RW_CNTL_STATUS_DMCU_ONLY)
+ return false;
+ return true;
+}
+
struct dce_i2c_hw *acquire_i2c_hw_engine(
struct resource_pool *pool,
struct ddc *ddc)
@@ -368,7 +378,7 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
if (!dce_i2c_hw)
return NULL;
- if (pool->i2c_hw_buffer_in_use)
+ if (pool->i2c_hw_buffer_in_use || !is_engine_available(dce_i2c_hw))
return NULL;
do {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
index 7f19bb439665..575500755b2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
@@ -29,7 +29,8 @@
enum dc_i2c_status {
DC_I2C_STATUS__DC_I2C_STATUS_IDLE,
DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW,
- DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW
+ DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW,
+ DC_I2C_REG_RW_CNTL_STATUS_DMCU_ONLY = 2,
};
enum dc_i2c_arbitration {
@@ -129,7 +130,8 @@ enum {
I2C_SF(DC_I2C_DATA, DC_I2C_DATA, mask_sh),\
I2C_SF(DC_I2C_DATA, DC_I2C_INDEX, mask_sh),\
I2C_SF(DC_I2C_DATA, DC_I2C_INDEX_WRITE, mask_sh),\
- I2C_SF(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, mask_sh)
+ I2C_SF(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, mask_sh),\
+ I2C_SF(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, mask_sh)
#define I2C_COMMON_MASK_SH_LIST_DCE110(mask_sh)\
I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh),\
@@ -170,6 +172,7 @@ struct dce_i2c_shift {
uint8_t DC_I2C_INDEX;
uint8_t DC_I2C_INDEX_WRITE;
uint8_t XTAL_REF_DIV;
+ uint8_t DC_I2C_REG_RW_CNTL_STATUS;
};
struct dce_i2c_mask {
@@ -207,6 +210,7 @@ struct dce_i2c_mask {
uint32_t DC_I2C_INDEX;
uint32_t DC_I2C_INDEX_WRITE;
uint32_t XTAL_REF_DIV;
+ uint32_t DC_I2C_REG_RW_CNTL_STATUS;
};
struct dce_i2c_registers {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 1fa2d4fd7a35..14309fe6f2e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -272,7 +272,8 @@ static void dce110_update_hdmi_info_packet(
static void dce110_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
- enum dc_color_space output_color_space)
+ enum dc_color_space output_color_space,
+ uint32_t enable_sdp_splitting)
{
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
uint32_t h_active_start;
@@ -977,7 +978,7 @@ static void dce110_stream_encoder_dp_unblank(
uint64_t m_vid_l = n_vid;
- m_vid_l *= param->pixel_clk_khz;
+ m_vid_l *= param->timing.pix_clk_100hz / 10;
m_vid_l = div_u64(m_vid_l,
param->link_settings.link_rate
* LINK_RATE_REF_FREQ_IN_KHZ);
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 23044e6723e8..e938bf9986d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -378,6 +378,28 @@ static const struct resource_caps res_cap = {
.num_ddc = 6,
};
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_RGB,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = false,
+ .fp16 = false
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 1,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 1,
+ .fp16 = 1
+ }
+};
+
#define CTX ctx
#define REG(reg) mm ## reg
@@ -756,7 +778,8 @@ static enum dc_status build_mapped_resource(
bool dce100_validate_bandwidth(
struct dc *dc,
- struct dc_state *context)
+ struct dc_state *context,
+ bool fast_validate)
{
int i;
bool at_least_one_pipe = false;
@@ -768,11 +791,11 @@ bool dce100_validate_bandwidth(
if (at_least_one_pipe) {
/* TODO implement when needed but for now hardcode max value*/
- context->bw.dce.dispclk_khz = 681000;
- context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
+ context->bw_ctx.bw.dce.dispclk_khz = 681000;
+ context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
} else {
- context->bw.dce.dispclk_khz = 0;
- context->bw.dce.yclk_khz = 0;
+ context->bw_ctx.bw.dce.dispclk_khz = 0;
+ context->bw_ctx.bw.dce.yclk_khz = 0;
}
return true;
@@ -1023,6 +1046,9 @@ static bool construct(
dc->caps.max_planes = pool->base.pipe_count;
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
goto res_create_fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 5e4db3712eef..7ac50ab1b762 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -616,7 +616,7 @@ dce110_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
{
- bool is_hdmi;
+ bool is_hdmi_tmds;
bool is_dp;
ASSERT(pipe_ctx->stream);
@@ -624,13 +624,13 @@ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream_res.stream_enc == NULL)
return; /* this is not root pipe */
- is_hdmi = dc_is_hdmi_signal(pipe_ctx->stream->signal);
+ is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal);
is_dp = dc_is_dp_signal(pipe_ctx->stream->signal);
- if (!is_hdmi && !is_dp)
+ if (!is_hdmi_tmds && !is_dp)
return;
- if (is_hdmi)
+ if (is_hdmi_tmds)
pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
@@ -935,13 +935,31 @@ void hwss_edp_backlight_control(
edp_receiver_ready_T9(link);
}
+// Static helper function which calls the correct function
+// based on pp_smu version
+static void set_pme_wa_enable_by_version(struct dc *dc)
+{
+ struct pp_smu_funcs *pp_smu = NULL;
+
+ if (dc->res_pool->pp_smu)
+ pp_smu = dc->res_pool->pp_smu;
+
+ if (pp_smu) {
+ if (pp_smu->ctx.ver == PP_SMU_VER_RV && pp_smu->rv_funcs.set_pme_wa_enable)
+ pp_smu->rv_funcs.set_pme_wa_enable(&(pp_smu->ctx));
+ }
+}
+
void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
{
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
/* notify audio driver for audio modes of monitor */
- struct pp_smu_funcs_rv *pp_smu = core_dc->res_pool->pp_smu;
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct pp_smu_funcs *pp_smu = NULL;
unsigned int i, num_audio = 1;
+ if (core_dc->res_pool->pp_smu)
+ pp_smu = core_dc->res_pool->pp_smu;
+
if (pipe_ctx->stream_res.audio) {
for (i = 0; i < MAX_PIPES; i++) {
/*current_state not updated yet*/
@@ -951,30 +969,31 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
- if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+ if (num_audio >= 1 && pp_smu != NULL)
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
- pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
+ set_pme_wa_enable_by_version(core_dc);
/* un-mute audio */
/* TODO: audio should be per stream rather than per link */
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
- pipe_ctx->stream_res.stream_enc, false);
+ pipe_ctx->stream_res.stream_enc, false);
}
}
void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
{
struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct pp_smu_funcs *pp_smu = NULL;
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, true);
if (pipe_ctx->stream_res.audio) {
- struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+ if (dc->res_pool->pp_smu)
+ pp_smu = dc->res_pool->pp_smu;
if (option != KEEP_ACQUIRED_RESOURCE ||
- !dc->debug.az_endpoint_mute_only) {
+ !dc->debug.az_endpoint_mute_only)
/*only disalbe az_endpoint if power down or free*/
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
- }
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable(
@@ -989,9 +1008,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
pipe_ctx->stream_res.audio = NULL;
}
- if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+ if (pp_smu != NULL)
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
- pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
+ set_pme_wa_enable_by_version(dc);
/* TODO: notify audio driver for if audio modes list changed
* add audio mode list change flag */
@@ -1007,7 +1026,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
struct dc_link *link = stream->link;
struct dc *dc = pipe_ctx->stream->ctx->dc;
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc);
@@ -1032,7 +1051,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link *link = stream->link;
/* only 3 items below are used by unblank */
- params.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
+ params.timing = pipe_ctx->stream->timing;
params.link_settings.link_rate = link_settings->link_rate;
if (dc_is_dp_signal(pipe_ctx->stream->signal))
@@ -1147,8 +1166,8 @@ static void build_audio_output(
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
audio_output->pll_info.dp_dto_source_clock_in_khz =
- state->dccg->funcs->get_dp_ref_clk_frequency(
- state->dccg);
+ state->clk_mgr->funcs->get_dp_ref_clk_frequency(
+ state->clk_mgr);
}
audio_output->pll_info.feed_back_divider =
@@ -1349,7 +1368,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
pipe_ctx->stream_res.tg, event_triggers);
- if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL)
+ if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst);
@@ -1358,7 +1377,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.opp,
COLOR_SPACE_YCBCR601,
stream->timing.display_color_depth,
- pipe_ctx->stream->signal);
+ stream->signal);
pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
pipe_ctx->stream_res.opp,
@@ -1532,6 +1551,9 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
}
}
+ if (dc->hwss.init_pipes)
+ dc->hwss.init_pipes(dc, context);
+
if (edp_link) {
/* this seems to cause blank screens on DCE8 */
if ((dc->ctx->dce_version == DCE_VERSION_8_0) ||
@@ -1608,18 +1630,18 @@ static void dce110_set_displaymarks(
dc->bw_vbios->blackout_duration, pipe_ctx->stream);
pipe_ctx->plane_res.mi->funcs->mem_input_program_display_marks(
pipe_ctx->plane_res.mi,
- context->bw.dce.nbp_state_change_wm_ns[num_pipes],
- context->bw.dce.stutter_exit_wm_ns[num_pipes],
- context->bw.dce.stutter_entry_wm_ns[num_pipes],
- context->bw.dce.urgent_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.stutter_entry_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.urgent_wm_ns[num_pipes],
total_dest_line_time_ns);
if (i == underlay_idx) {
num_pipes++;
pipe_ctx->plane_res.mi->funcs->mem_input_program_chroma_display_marks(
pipe_ctx->plane_res.mi,
- context->bw.dce.nbp_state_change_wm_ns[num_pipes],
- context->bw.dce.stutter_exit_wm_ns[num_pipes],
- context->bw.dce.urgent_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.urgent_wm_ns[num_pipes],
total_dest_line_time_ns);
}
num_pipes++;
@@ -2612,7 +2634,7 @@ void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
struct mem_input *mi = pipe_ctx->plane_res.mi;
struct dc_cursor_mi_param param = {
.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
- .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
+ .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.xtalin_clock_inKhz,
.viewport = pipe_ctx->plane_res.scl_data.viewport,
.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 7549adaa1542..dcd04e9ea76b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -392,6 +392,55 @@ static const struct resource_caps stoney_resource_cap = {
.num_ddc = 3,
};
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_RGB,
+ .blends_with_below = true,
+ .blends_with_above = true,
+ .per_pixel_alpha = 1,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = false,
+ .fp16 = false
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 1,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 1,
+ .fp16 = 1
+ }
+};
+
+static const struct dc_plane_cap underlay_plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_UNDERLAY,
+ .blends_with_above = true,
+ .per_pixel_alpha = 1,
+
+ .pixel_format_support = {
+ .argb8888 = false,
+ .nv12 = true,
+ .fp16 = false
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 1,
+ .nv12 = 16000,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 1,
+ .nv12 = 250,
+ .fp16 = 1
+ }
+};
+
#define CTX ctx
#define REG(reg) mm ## reg
@@ -854,7 +903,8 @@ static enum dc_status build_mapped_resource(
static bool dce110_validate_bandwidth(
struct dc *dc,
- struct dc_state *context)
+ struct dc_state *context,
+ bool fast_validate)
{
bool result = false;
@@ -868,7 +918,7 @@ static bool dce110_validate_bandwidth(
dc->bw_vbios,
context->res_ctx.pipe_ctx,
dc->res_pool->pipe_count,
- &context->bw.dce))
+ &context->bw_ctx.bw.dce))
result = true;
if (!result)
@@ -878,8 +928,8 @@ static bool dce110_validate_bandwidth(
context->streams[0]->timing.v_addressable,
context->streams[0]->timing.pix_clk_100hz / 10);
- if (memcmp(&dc->current_state->bw.dce,
- &context->bw.dce, sizeof(context->bw.dce))) {
+ if (memcmp(&dc->current_state->bw_ctx.bw.dce,
+ &context->bw_ctx.bw.dce, sizeof(context->bw_ctx.bw.dce))) {
DC_LOG_BANDWIDTH_CALCS(
"%s: finish,\n"
@@ -893,34 +943,34 @@ static bool dce110_validate_bandwidth(
"sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
,
__func__,
- context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
- context->bw.dce.urgent_wm_ns[0].b_mark,
- context->bw.dce.urgent_wm_ns[0].a_mark,
- context->bw.dce.stutter_exit_wm_ns[0].b_mark,
- context->bw.dce.stutter_exit_wm_ns[0].a_mark,
- context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
- context->bw.dce.urgent_wm_ns[1].b_mark,
- context->bw.dce.urgent_wm_ns[1].a_mark,
- context->bw.dce.stutter_exit_wm_ns[1].b_mark,
- context->bw.dce.stutter_exit_wm_ns[1].a_mark,
- context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
- context->bw.dce.urgent_wm_ns[2].b_mark,
- context->bw.dce.urgent_wm_ns[2].a_mark,
- context->bw.dce.stutter_exit_wm_ns[2].b_mark,
- context->bw.dce.stutter_exit_wm_ns[2].a_mark,
- context->bw.dce.stutter_mode_enable,
- context->bw.dce.cpuc_state_change_enable,
- context->bw.dce.cpup_state_change_enable,
- context->bw.dce.nbp_state_change_enable,
- context->bw.dce.all_displays_in_sync,
- context->bw.dce.dispclk_khz,
- context->bw.dce.sclk_khz,
- context->bw.dce.sclk_deep_sleep_khz,
- context->bw.dce.yclk_khz,
- context->bw.dce.blackout_recovery_time_us);
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.stutter_mode_enable,
+ context->bw_ctx.bw.dce.cpuc_state_change_enable,
+ context->bw_ctx.bw.dce.cpup_state_change_enable,
+ context->bw_ctx.bw.dce.nbp_state_change_enable,
+ context->bw_ctx.bw.dce.all_displays_in_sync,
+ context->bw_ctx.bw.dce.dispclk_khz,
+ context->bw_ctx.bw.dce.sclk_khz,
+ context->bw_ctx.bw.dce.sclk_deep_sleep_khz,
+ context->bw_ctx.bw.dce.yclk_khz,
+ context->bw_ctx.bw.dce.blackout_recovery_time_us);
}
return result;
}
@@ -1371,6 +1421,11 @@ static bool construct(
dc->caps.max_planes = pool->base.pipe_count;
+ for (i = 0; i < pool->base.underlay_pipe_index; ++i)
+ dc->caps.planes[i] = plane_cap;
+
+ dc->caps.planes[pool->base.underlay_pipe_index] = underlay_plane_cap;
+
bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id);
bw_calcs_data_update_from_pplib(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index ea3065d63372..a480b15f6885 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -397,6 +397,28 @@ static const struct resource_caps polaris_11_resource_cap = {
.num_ddc = 5,
};
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_RGB,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = false,
+ .fp16 = false
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 1,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 1,
+ .fp16 = 1
+ }
+};
+
#define CTX ctx
#define REG(reg) mm ## reg
@@ -804,7 +826,8 @@ static enum dc_status build_mapped_resource(
bool dce112_validate_bandwidth(
struct dc *dc,
- struct dc_state *context)
+ struct dc_state *context,
+ bool fast_validate)
{
bool result = false;
@@ -818,7 +841,7 @@ bool dce112_validate_bandwidth(
dc->bw_vbios,
context->res_ctx.pipe_ctx,
dc->res_pool->pipe_count,
- &context->bw.dce))
+ &context->bw_ctx.bw.dce))
result = true;
if (!result)
@@ -826,8 +849,8 @@ bool dce112_validate_bandwidth(
"%s: Bandwidth validation failed!",
__func__);
- if (memcmp(&dc->current_state->bw.dce,
- &context->bw.dce, sizeof(context->bw.dce))) {
+ if (memcmp(&dc->current_state->bw_ctx.bw.dce,
+ &context->bw_ctx.bw.dce, sizeof(context->bw_ctx.bw.dce))) {
DC_LOG_BANDWIDTH_CALCS(
"%s: finish,\n"
@@ -841,34 +864,34 @@ bool dce112_validate_bandwidth(
"sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
,
__func__,
- context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
- context->bw.dce.urgent_wm_ns[0].b_mark,
- context->bw.dce.urgent_wm_ns[0].a_mark,
- context->bw.dce.stutter_exit_wm_ns[0].b_mark,
- context->bw.dce.stutter_exit_wm_ns[0].a_mark,
- context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
- context->bw.dce.urgent_wm_ns[1].b_mark,
- context->bw.dce.urgent_wm_ns[1].a_mark,
- context->bw.dce.stutter_exit_wm_ns[1].b_mark,
- context->bw.dce.stutter_exit_wm_ns[1].a_mark,
- context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
- context->bw.dce.urgent_wm_ns[2].b_mark,
- context->bw.dce.urgent_wm_ns[2].a_mark,
- context->bw.dce.stutter_exit_wm_ns[2].b_mark,
- context->bw.dce.stutter_exit_wm_ns[2].a_mark,
- context->bw.dce.stutter_mode_enable,
- context->bw.dce.cpuc_state_change_enable,
- context->bw.dce.cpup_state_change_enable,
- context->bw.dce.nbp_state_change_enable,
- context->bw.dce.all_displays_in_sync,
- context->bw.dce.dispclk_khz,
- context->bw.dce.sclk_khz,
- context->bw.dce.sclk_deep_sleep_khz,
- context->bw.dce.yclk_khz,
- context->bw.dce.blackout_recovery_time_us);
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.stutter_mode_enable,
+ context->bw_ctx.bw.dce.cpuc_state_change_enable,
+ context->bw_ctx.bw.dce.cpup_state_change_enable,
+ context->bw_ctx.bw.dce.nbp_state_change_enable,
+ context->bw_ctx.bw.dce.all_displays_in_sync,
+ context->bw_ctx.bw.dce.dispclk_khz,
+ context->bw_ctx.bw.dce.sclk_khz,
+ context->bw_ctx.bw.dce.sclk_deep_sleep_khz,
+ context->bw_ctx.bw.dce.yclk_khz,
+ context->bw_ctx.bw.dce.blackout_recovery_time_us);
}
return result;
}
@@ -887,7 +910,7 @@ enum dc_status resource_map_phy_clock_resources(
return DC_ERROR_UNEXPECTED;
if (dc_is_dp_signal(pipe_ctx->stream->signal)
- || pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
+ || dc_is_virtual_signal(pipe_ctx->stream->signal))
pipe_ctx->clock_source =
dc->res_pool->dp_clock_source;
else
@@ -1310,6 +1333,9 @@ static bool construct(
dc->caps.max_planes = pool->base.pipe_count;
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
/* Create hardware sequencer */
dce112_hw_sequencer_construct(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
index 95a403396219..1f57ebc6f9b4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
@@ -44,7 +44,8 @@ enum dc_status dce112_validate_with_context(
bool dce112_validate_bandwidth(
struct dc *dc,
- struct dc_state *context);
+ struct dc_state *context,
+ bool fast_validate);
enum dc_status dce112_add_stream_to_ctx(
struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 312a0aebf91f..6d49c7143c67 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -454,6 +454,28 @@ static const struct resource_caps res_cap = {
.num_ddc = 6,
};
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_RGB,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = false,
+ .fp16 = false
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 1,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 1,
+ .fp16 = 1
+ }
+};
+
static const struct dc_debug_options debug_defaults = {
.disable_clock_gate = true,
};
@@ -1171,6 +1193,9 @@ static bool construct(
dc->caps.max_planes = pool->base.pipe_count;
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id);
bw_calcs_data_update_from_pplib(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index c109ace96be9..27d0cc394963 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -387,6 +387,28 @@ static const struct resource_caps res_cap_83 = {
.num_ddc = 2,
};
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_RGB,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = false,
+ .fp16 = false
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 1,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 1,
+ .fp16 = 1
+ }
+};
+
static const struct dce_dmcu_registers dmcu_regs = {
DMCU_DCE80_REG_LIST()
};
@@ -790,7 +812,8 @@ static void destruct(struct dce110_resource_pool *pool)
bool dce80_validate_bandwidth(
struct dc *dc,
- struct dc_state *context)
+ struct dc_state *context,
+ bool fast_validate)
{
int i;
bool at_least_one_pipe = false;
@@ -802,11 +825,11 @@ bool dce80_validate_bandwidth(
if (at_least_one_pipe) {
/* TODO implement when needed but for now hardcode max value*/
- context->bw.dce.dispclk_khz = 681000;
- context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
+ context->bw_ctx.bw.dce.dispclk_khz = 681000;
+ context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
} else {
- context->bw.dce.dispclk_khz = 0;
- context->bw.dce.yclk_khz = 0;
+ context->bw_ctx.bw.dce.dispclk_khz = 0;
+ context->bw_ctx.bw.dce.yclk_khz = 0;
}
return true;
@@ -1032,6 +1055,10 @@ static bool dce80_construct(
}
dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
@@ -1237,6 +1264,10 @@ static bool dce81_construct(
}
dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
@@ -1438,6 +1469,10 @@ static bool dce83_construct(
}
dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
index afe8c42211cd..2b2de1d913c9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
@@ -43,23 +43,6 @@
#define DC_LOGGER \
clk_mgr->ctx->logger
-void dcn1_pplib_apply_display_requirements(
- struct dc *dc,
- struct dc_state *context)
-{
- struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
-
- pp_display_cfg->min_engine_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
- pp_display_cfg->min_memory_clock_khz = dc->res_pool->clk_mgr->clks.fclk_khz;
- pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
- pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
- pp_display_cfg->min_dcfclock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
- pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz;
- dce110_fill_display_configs(context, pp_display_cfg);
-
- dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
-}
-
static int dcn1_determine_dppclk_threshold(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
{
bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
@@ -167,11 +150,8 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
{
struct dc *dc = clk_mgr->ctx->dc;
struct dc_debug_options *debug = &dc->debug;
- struct dc_clocks *new_clocks = &context->bw.dcn.clk;
- struct pp_smu_display_requirement_rv *smu_req_cur =
- &dc->res_pool->pp_smu_req;
- struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
- struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+ struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
+ struct pp_smu_funcs_rv *pp_smu = NULL;
bool send_request_to_increase = false;
bool send_request_to_lower = false;
int display_count;
@@ -179,7 +159,8 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
bool enter_display_off = false;
display_count = get_active_display_cnt(dc, context);
-
+ if (dc->res_pool->pp_smu)
+ pp_smu = &dc->res_pool->pp_smu->rv_funcs;
if (display_count == 0)
enter_display_off = true;
@@ -189,10 +170,8 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
* if function pointer not set up, this message is
* sent as part of pplib_apply_display_requirements.
*/
- if (pp_smu->set_display_count)
+ if (pp_smu && pp_smu->set_display_count)
pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
-
- smu_req.display_count = display_count;
}
if (new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz
@@ -203,7 +182,6 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) {
clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz;
-
send_request_to_lower = true;
}
@@ -213,24 +191,18 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) {
clk_mgr->clks.fclk_khz = new_clocks->fclk_khz;
- smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000;
-
send_request_to_lower = true;
}
//DCF Clock
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) {
clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz;
- smu_req.hard_min_dcefclk_mhz = new_clocks->dcfclk_khz / 1000;
-
send_request_to_lower = true;
}
if (should_set_clock(safe_to_lower,
new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) {
clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
- smu_req.min_deep_sleep_dcefclk_mhz = (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000;
-
send_request_to_lower = true;
}
@@ -239,17 +211,13 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
*/
if (send_request_to_increase) {
/*use dcfclk to request voltage*/
- if (pp_smu->set_hard_min_fclk_by_freq &&
+ if (pp_smu && pp_smu->set_hard_min_fclk_by_freq &&
pp_smu->set_hard_min_dcfclk_by_freq &&
pp_smu->set_min_deep_sleep_dcfclk) {
- pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_fclk_mhz);
- pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_dcefclk_mhz);
- pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, smu_req.min_deep_sleep_dcefclk_mhz);
- } else {
- if (pp_smu->set_display_requirement)
- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
- dcn1_pplib_apply_display_requirements(dc, context);
+ pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000);
+ pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000);
+ pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
}
}
@@ -259,27 +227,20 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
|| new_clocks->dispclk_khz == clk_mgr->clks.dispclk_khz) {
dcn1_ramp_up_dispclk_with_dpp(clk_mgr, new_clocks);
clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
-
send_request_to_lower = true;
}
if (!send_request_to_increase && send_request_to_lower) {
/*use dcfclk to request voltage*/
- if (pp_smu->set_hard_min_fclk_by_freq &&
+ if (pp_smu && pp_smu->set_hard_min_fclk_by_freq &&
pp_smu->set_hard_min_dcfclk_by_freq &&
pp_smu->set_min_deep_sleep_dcfclk) {
- pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_fclk_mhz);
- pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_dcefclk_mhz);
- pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, smu_req.min_deep_sleep_dcefclk_mhz);
- } else {
- if (pp_smu->set_display_requirement)
- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
- dcn1_pplib_apply_display_requirements(dc, context);
+ pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000);
+ pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000);
+ pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
}
}
-
- *smu_req_cur = smu_req;
}
static const struct clk_mgr_funcs dcn1_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
index a995eda443a3..97007cf33665 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
@@ -34,10 +34,6 @@ struct clk_bypass {
uint32_t dprefclk_bypass;
};
-void dcn1_pplib_apply_display_requirements(
- struct dc *dc,
- struct dc_state *context);
-
struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx);
#endif //__DCN10_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index cd1ebe57ed59..6f4b24756323 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -91,13 +91,6 @@ enum dscl_mode_sel {
DSCL_MODE_DSCL_BYPASS = 6
};
-enum gamut_remap_select {
- GAMUT_REMAP_BYPASS = 0,
- GAMUT_REMAP_COEFF,
- GAMUT_REMAP_COMA_COEFF,
- GAMUT_REMAP_COMB_COEFF
-};
-
void dpp_read_state(struct dpp *dpp_base,
struct dcn_dpp_state *s)
{
@@ -392,6 +385,10 @@ void dpp1_cnv_setup (
default:
break;
}
+
+ /* Set default color space based on format if none is given. */
+ color_space = input_color_space ? input_color_space : color_space;
+
REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
CNVC_SURFACE_PIXEL_FORMAT, pixel_format);
REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en);
@@ -403,7 +400,7 @@ void dpp1_cnv_setup (
for (i = 0; i < 12; i++)
tbl_entry.regval[i] = input_csc_color_matrix.matrix[i];
- tbl_entry.color_space = input_color_space;
+ tbl_entry.color_space = color_space;
if (color_space >= COLOR_SPACE_YCBCR601)
select = INPUT_CSC_SELECT_ICSC;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index 41f0f4c912e7..882bcc5a40f6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -88,13 +88,6 @@ enum dscl_mode_sel {
DSCL_MODE_DSCL_BYPASS = 6
};
-enum gamut_remap_select {
- GAMUT_REMAP_BYPASS = 0,
- GAMUT_REMAP_COEFF,
- GAMUT_REMAP_COMA_COEFF,
- GAMUT_REMAP_COMB_COEFF
-};
-
static const struct dpp_input_csc_matrix dpp_input_csc_matrix[] = {
{COLOR_SPACE_SRGB,
{0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index c7642e748297..ce21a290bf3e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -406,15 +406,25 @@ void dpp1_dscl_calc_lb_num_partitions(
int *num_part_y,
int *num_part_c)
{
+ int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a,
+ lb_bpc, memory_line_size_y, memory_line_size_c, memory_line_size_a;
+
int line_size = scl_data->viewport.width < scl_data->recout.width ?
scl_data->viewport.width : scl_data->recout.width;
int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ?
scl_data->viewport_c.width : scl_data->recout.width;
- int lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth);
- int memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */
- int memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */
- int memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
- int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a;
+
+ if (line_size == 0)
+ line_size = 1;
+
+ if (line_size_c == 0)
+ line_size_c = 1;
+
+
+ lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth);
+ memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */
+ memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */
+ memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
if (lb_config == LB_MEMORY_CONFIG_1) {
lb_memory_size = 816;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index e161ad836812..0db2a6e96fc0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -258,8 +258,9 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
- REG_UPDATE_SEQ(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
- DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0, 1);
+ REG_UPDATE_SEQ_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
}
void hubbub1_program_watermarks(
@@ -282,7 +283,8 @@ void hubbub1_program_watermarks(
hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
@@ -309,7 +311,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
@@ -322,7 +325,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
@@ -336,7 +340,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
@@ -347,7 +352,8 @@ void hubbub1_program_watermarks(
hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
@@ -374,7 +380,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
@@ -387,7 +394,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
@@ -401,7 +409,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
@@ -412,7 +421,8 @@ void hubbub1_program_watermarks(
hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
@@ -439,7 +449,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
@@ -452,7 +463,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
@@ -466,7 +478,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
@@ -477,7 +490,8 @@ void hubbub1_program_watermarks(
hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
@@ -504,7 +518,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
@@ -517,7 +532,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
@@ -531,7 +547,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
@@ -866,6 +883,7 @@ static const struct hubbub_funcs hubbub1_funcs = {
.dcc_support_pixel_format = hubbub1_dcc_support_pixel_format,
.get_dcc_compression_cap = hubbub1_get_dcc_compression_cap,
.wm_read_state = hubbub1_wm_read_state,
+ .program_watermarks = hubbub1_program_watermarks,
};
void hubbub1_construct(struct hubbub *hubbub,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index 9cd4a5194154..85811b24a497 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -32,18 +32,14 @@
#define TO_DCN10_HUBBUB(hubbub)\
container_of(hubbub, struct dcn10_hubbub, base)
-#define HUBHUB_REG_LIST_DCN()\
+#define HUBBUB_REG_LIST_DCN_COMMON()\
SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\
- SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\
SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A),\
SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B),\
- SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B),\
SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B),\
SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C),\
- SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C),\
SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C),\
SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D),\
- SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D),\
SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D),\
SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL),\
SR(DCHUBBUB_ARB_DRAM_STATE_CNTL),\
@@ -54,6 +50,12 @@
SR(DCHUBBUB_TEST_DEBUG_DATA),\
SR(DCHUBBUB_SOFT_RESET)
+#define HUBBUB_VM_REG_LIST() \
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D)
+
#define HUBBUB_SR_WATERMARK_REG_LIST()\
SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A),\
SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A),\
@@ -65,7 +67,8 @@
SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D)
#define HUBBUB_REG_LIST_DCN10(id)\
- HUBHUB_REG_LIST_DCN(), \
+ HUBBUB_REG_LIST_DCN_COMMON(), \
+ HUBBUB_VM_REG_LIST(), \
HUBBUB_SR_WATERMARK_REG_LIST(), \
SR(DCHUBBUB_SDPIF_FB_TOP),\
SR(DCHUBBUB_SDPIF_FB_BASE),\
@@ -122,8 +125,7 @@ struct dcn_hubbub_registers {
#define HUBBUB_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
-
-#define HUBBUB_MASK_SH_LIST_DCN(mask_sh)\
+#define HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh)\
HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
HUBBUB_SF(DCHUBBUB_SOFT_RESET, DCHUBBUB_GLOBAL_SOFT_RESET, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \
@@ -133,10 +135,29 @@ struct dcn_hubbub_registers {
HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, mask_sh), \
- HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh)
+ HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, mask_sh)
+
+#define HUBBUB_MASK_SH_LIST_STUTTER(mask_sh) \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, mask_sh)
#define HUBBUB_MASK_SH_LIST_DCN10(mask_sh)\
- HUBBUB_MASK_SH_LIST_DCN(mask_sh), \
+ HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \
+ HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \
HUBBUB_SF(DCHUBBUB_SDPIF_FB_TOP, SDPIF_FB_TOP, mask_sh), \
HUBBUB_SF(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, mask_sh), \
HUBBUB_SF(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, mask_sh), \
@@ -167,15 +188,35 @@ struct dcn_hubbub_registers {
type FB_OFFSET;\
type AGP_BOT;\
type AGP_TOP;\
- type AGP_BASE
+ type AGP_BASE;\
+ type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A;\
+ type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B;\
+ type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C;\
+ type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D;\
+ type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A;\
+ type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B;\
+ type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C;\
+ type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D
+
+#define HUBBUB_STUTTER_REG_FIELD_LIST(type) \
+ type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A;\
+ type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B;\
+ type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C;\
+ type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D;\
+ type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A;\
+ type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B;\
+ type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;\
+ type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D
struct dcn_hubbub_shift {
DCN_HUBBUB_REG_FIELD_LIST(uint8_t);
+ HUBBUB_STUTTER_REG_FIELD_LIST(uint8_t);
};
struct dcn_hubbub_mask {
DCN_HUBBUB_REG_FIELD_LIST(uint32_t);
+ HUBBUB_STUTTER_REG_FIELD_LIST(uint32_t);
};
struct dc;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 0ba68d41b9c3..54b219a710d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -1178,6 +1178,10 @@ void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst)
REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst);
}
+void hubp1_init(struct hubp *hubp)
+{
+ //do nothing
+}
static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_program_surface_flip_and_addr =
hubp1_program_surface_flip_and_addr,
@@ -1201,7 +1205,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_clear_underflow = hubp1_clear_underflow,
.hubp_disable_control = hubp1_disable_control,
.hubp_get_underflow_status = hubp1_get_underflow_status,
-
+ .hubp_init = hubp1_init,
};
/*****************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index a6d6dfe00617..99d2b7e2a578 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -34,6 +34,7 @@
#define HUBP_REG_LIST_DCN(id)\
SRI(DCHUBP_CNTL, HUBP, id),\
SRI(HUBPREQ_DEBUG_DB, HUBP, id),\
+ SRI(HUBPREQ_DEBUG, HUBP, id),\
SRI(DCSURF_ADDR_CONFIG, HUBP, id),\
SRI(DCSURF_TILING_CONFIG, HUBP, id),\
SRI(DCSURF_SURFACE_PITCH, HUBPREQ, id),\
@@ -138,6 +139,7 @@
#define HUBP_COMMON_REG_VARIABLE_LIST \
uint32_t DCHUBP_CNTL; \
uint32_t HUBPREQ_DEBUG_DB; \
+ uint32_t HUBPREQ_DEBUG; \
uint32_t DCSURF_ADDR_CONFIG; \
uint32_t DCSURF_TILING_CONFIG; \
uint32_t DCSURF_SURFACE_PITCH; \
@@ -247,7 +249,7 @@
.field_name = reg_name ## __ ## field_name ## post_fix
/* Mask/shift struct generation macro for all ASICs (including those with reduced functionality) */
-#define HUBP_MASK_SH_LIST_DCN(mask_sh)\
+#define HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh)\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
@@ -331,7 +333,6 @@
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, META_CHUNK_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_META_CHUNK_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, DPTE_GROUP_SIZE, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, PTE_ROW_HEIGHT_LINEAR, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, CHUNK_SIZE_C, mask_sh),\
@@ -339,7 +340,6 @@
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, META_CHUNK_SIZE_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_META_CHUNK_SIZE_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, DPTE_GROUP_SIZE_C, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, SWATH_HEIGHT_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, PTE_ROW_HEIGHT_LINEAR_C, mask_sh),\
HUBP_SF(HUBPREQ0_BLANK_OFFSET_0, REFCYC_H_BLANK_END, mask_sh),\
@@ -373,6 +373,11 @@
HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\
HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh)
+#define HUBP_MASK_SH_LIST_DCN(mask_sh)\
+ HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh)
+
/* Mask/shift struct generation macro for ASICs with VM */
#define HUBP_MASK_SH_LIST_DCN_VM(mask_sh)\
HUBP_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\
@@ -595,6 +600,9 @@
type AGP_BASE;\
type AGP_BOT;\
type AGP_TOP;\
+ type DCN_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM;\
+ type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;\
+ type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;\
/* todo: get these from GVM instead of reading registers ourselves */\
type PAGE_DIRECTORY_ENTRY_HI32;\
type PAGE_DIRECTORY_ENTRY_LO32;\
@@ -743,4 +751,6 @@ enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch);
void hubp1_vready_workaround(struct hubp *hubp,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+void hubp1_init(struct hubp *hubp);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index d1a8f1c302a9..33d311cea28c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -65,7 +65,7 @@ void print_microsec(struct dc_context *dc_ctx,
struct dc_log_buffer_ctx *log_ctx,
uint32_t ref_cycle)
{
- const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000;
+ const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
static const unsigned int frac = 1000;
uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
@@ -345,13 +345,13 @@ void dcn10_log_hw_state(struct dc *dc,
DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
"dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
- dc->current_state->bw.dcn.clk.dcfclk_khz,
- dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
- dc->current_state->bw.dcn.clk.dispclk_khz,
- dc->current_state->bw.dcn.clk.dppclk_khz,
- dc->current_state->bw.dcn.clk.max_supported_dppclk_khz,
- dc->current_state->bw.dcn.clk.fclk_khz,
- dc->current_state->bw.dcn.clk.socclk_khz);
+ dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
log_mpc_crc(dc, log_ctx);
@@ -714,7 +714,7 @@ static enum dc_status dcn10_enable_stream_timing(
return DC_OK;
}
-static void reset_back_end_for_pipe(
+static void dcn10_reset_back_end_for_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
@@ -889,22 +889,23 @@ void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
dcn10_verify_allow_pstate_change_high(dc);
}
-static void plane_atomic_power_down(struct dc *dc, struct pipe_ctx *pipe_ctx)
+static void plane_atomic_power_down(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp)
{
struct dce_hwseq *hws = dc->hwseq;
- struct dpp *dpp = pipe_ctx->plane_res.dpp;
DC_LOGGER_INIT(dc->ctx->logger);
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
dpp_pg_control(hws, dpp->inst, false);
- hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, false);
+ hubp_pg_control(hws, hubp->inst, false);
dpp->funcs->dpp_reset(dpp);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
DC_LOG_DEBUG(
- "Power gated front end %d\n", pipe_ctx->pipe_idx);
+ "Power gated front end %d\n", hubp->inst);
}
}
@@ -931,7 +932,9 @@ static void plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->power_gated = true;
dc->optimized_required = false; /* We're powering off, no need to optimize */
- plane_atomic_power_down(dc, pipe_ctx);
+ plane_atomic_power_down(dc,
+ pipe_ctx->plane_res.dpp,
+ pipe_ctx->plane_res.hubp);
pipe_ctx->stream = NULL;
memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
@@ -976,16 +979,14 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
* to non-preferred front end. If pipe_ctx->stream is not NULL,
* we will use the pipe, so don't disable
*/
- if (pipe_ctx->stream != NULL)
+ if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
continue;
- if (tg->funcs->is_tg_enabled(tg))
- tg->funcs->lock(tg);
-
/* Blank controller using driver code instead of
* command table.
*/
if (tg->funcs->is_tg_enabled(tg)) {
+ tg->funcs->lock(tg);
tg->funcs->set_blank(tg, true);
hwss_wait_for_blank_complete(tg);
}
@@ -1001,16 +1002,19 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
struct dpp *dpp = dc->res_pool->dpps[i];
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- // W/A for issue with dc_post_update_surfaces_to_stream
- hubp->power_gated = true;
-
/* There is assumption that pipe_ctx is not mapping irregularly
* to non-preferred front end. If pipe_ctx->stream is not NULL,
* we will use the pipe, so don't disable
*/
- if (pipe_ctx->stream != NULL)
+ if (can_apply_seamless_boot &&
+ pipe_ctx->stream != NULL &&
+ pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
+ pipe_ctx->stream_res.tg))
continue;
+ /* Disable on the current state so the new one isn't cleared. */
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
dpp->funcs->dpp_reset(dpp);
pipe_ctx->stream_res.tg = tg;
@@ -1108,6 +1112,25 @@ static void dcn10_init_hw(struct dc *dc)
link->link_status.link_active = true;
}
+ /* If taking control over from VBIOS, we may want to optimize our first
+ * mode set, so we need to skip powering down pipes until we know which
+ * pipes we want to use.
+ * Otherwise, if taking control is not possible, we need to power
+ * everything down.
+ */
+ if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct hubp *hubp = dc->res_pool->hubps[i];
+ struct dpp *dpp = dc->res_pool->dpps[i];
+
+ hubp->funcs->hubp_init(hubp);
+ dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
+ plane_atomic_power_down(dc, dpp, hubp);
+ }
+
+ apply_DEGVIDCN10_253_wa(dc);
+ }
+
for (i = 0; i < dc->res_pool->audio_count; i++) {
struct audio *audio = dc->res_pool->audios[i];
@@ -1137,12 +1160,9 @@ static void dcn10_init_hw(struct dc *dc)
enable_power_gating_plane(dc->hwseq, true);
memset(&dc->res_pool->clk_mgr->clks, 0, sizeof(dc->res_pool->clk_mgr->clks));
-
- if (dc->hwss.init_pipes)
- dc->hwss.init_pipes(dc, dc->current_state);
}
-static void reset_hw_ctx_wrap(
+static void dcn10_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context)
{
@@ -1164,10 +1184,9 @@ static void reset_hw_ctx_wrap(
pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
struct clock_source *old_clk = pipe_ctx_old->clock_source;
- reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
- if (dc->hwss.enable_stream_gating) {
+ dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
+ if (dc->hwss.enable_stream_gating)
dc->hwss.enable_stream_gating(dc, pipe_ctx);
- }
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
@@ -1836,7 +1855,7 @@ void dcn10_get_hdr_visual_confirm_color(
switch (top_pipe_ctx->plane_res.scl_data.format) {
case PIXEL_FORMAT_ARGB2101010:
- if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_UNITY) {
+ if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
/* HDR10, ARGB2101010 - set boarder color to red */
color->color_r_cr = color_value;
}
@@ -1931,7 +1950,7 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
plane_state->format,
EXPANSION_MODE_ZERO,
plane_state->input_csc_color_matrix,
- COLOR_SPACE_YCBCR601_LIMITED);
+ plane_state->color_space);
//set scale and bias registers
build_prescale_params(&bns_params, plane_state);
@@ -2051,7 +2070,7 @@ void update_dchubp_dpp(
* divided by 2
*/
if (plane_state->update_flags.bits.full_update) {
- bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <=
+ bool should_divided_by_2 = context->bw_ctx.bw.dcn.clk.dppclk_khz <=
dc->res_pool->clk_mgr->clks.dispclk_khz / 2;
dpp->funcs->dpp_dppclk_control(
@@ -2120,6 +2139,9 @@ void update_dchubp_dpp(
if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
dc->hwss.set_cursor_position(pipe_ctx);
dc->hwss.set_cursor_attribute(pipe_ctx);
+
+ if (dc->hwss.set_cursor_sdr_white_level)
+ dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
}
if (plane_state->update_flags.bits.full_update) {
@@ -2310,6 +2332,7 @@ static void dcn10_apply_ctx_for_surface(
int i;
struct timing_generator *tg;
bool removed_pipe[4] = { false };
+ bool interdependent_update = false;
struct pipe_ctx *top_pipe_to_program =
find_top_pipe_for_stream(dc, context, stream);
DC_LOGGER_INIT(dc->ctx->logger);
@@ -2319,7 +2342,13 @@ static void dcn10_apply_ctx_for_surface(
tg = top_pipe_to_program->stream_res.tg;
- dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
+ interdependent_update = top_pipe_to_program->plane_state &&
+ top_pipe_to_program->plane_state->update_flags.bits.full_update;
+
+ if (interdependent_update)
+ lock_all_pipes(dc, context, true);
+ else
+ dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
if (num_planes == 0) {
/* OTG blank before remove all front end */
@@ -2339,15 +2368,9 @@ static void dcn10_apply_ctx_for_surface(
*/
if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
if (old_pipe_ctx->stream_res.tg == tg &&
- old_pipe_ctx->plane_res.hubp &&
- old_pipe_ctx->plane_res.hubp->opp_id != 0xf) {
+ old_pipe_ctx->plane_res.hubp &&
+ old_pipe_ctx->plane_res.hubp->opp_id != 0xf)
dcn10_disable_plane(dc, old_pipe_ctx);
- /*
- * power down fe will unlock when calling reset, need
- * to lock it back here. Messy, need rework.
- */
- pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
- }
}
if ((!pipe_ctx->plane_state ||
@@ -2366,29 +2389,25 @@ static void dcn10_apply_ctx_for_surface(
if (num_planes > 0)
program_all_pipe_in_tree(dc, top_pipe_to_program, context);
- dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
-
- if (top_pipe_to_program->plane_state &&
- top_pipe_to_program->plane_state->update_flags.bits.full_update)
+ if (interdependent_update)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- tg = pipe_ctx->stream_res.tg;
/* Skip inactive pipes and ones already updated */
- if (!pipe_ctx->stream || pipe_ctx->stream == stream
- || !pipe_ctx->plane_state
- || !tg->funcs->is_tg_enabled(tg))
+ if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
+ !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
continue;
- tg->funcs->lock(tg);
-
pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
pipe_ctx->plane_res.hubp,
&pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs);
-
- tg->funcs->unlock(tg);
}
+ if (interdependent_update)
+ lock_all_pipes(dc, context, false);
+ else
+ dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
+
if (num_planes == 0)
false_optc_underflow_wa(dc, stream, tg);
@@ -2420,12 +2439,14 @@ static void dcn10_prepare_bandwidth(
struct dc *dc,
struct dc_state *context)
{
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
if (dc->debug.sanity_checks)
dcn10_verify_allow_pstate_change_high(dc);
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (context->stream_count == 0)
- context->bw.dcn.clk.phyclk_khz = 0;
+ context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
dc->res_pool->clk_mgr->funcs->update_clocks(
dc->res_pool->clk_mgr,
@@ -2433,9 +2454,9 @@ static void dcn10_prepare_bandwidth(
false);
}
- hubbub1_program_watermarks(dc->res_pool->hubbub,
- &context->bw.dcn.watermarks,
- dc->res_pool->ref_clock_inKhz / 1000,
+ hubbub->funcs->program_watermarks(hubbub,
+ &context->bw_ctx.bw.dcn.watermarks,
+ dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
dcn10_stereo_hw_frame_pack_wa(dc, context);
@@ -2450,12 +2471,14 @@ static void dcn10_optimize_bandwidth(
struct dc *dc,
struct dc_state *context)
{
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
if (dc->debug.sanity_checks)
dcn10_verify_allow_pstate_change_high(dc);
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (context->stream_count == 0)
- context->bw.dcn.clk.phyclk_khz = 0;
+ context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
dc->res_pool->clk_mgr->funcs->update_clocks(
dc->res_pool->clk_mgr,
@@ -2463,9 +2486,9 @@ static void dcn10_optimize_bandwidth(
true);
}
- hubbub1_program_watermarks(dc->res_pool->hubbub,
- &context->bw.dcn.watermarks,
- dc->res_pool->ref_clock_inKhz / 1000,
+ hubbub->funcs->program_watermarks(hubbub,
+ &context->bw_ctx.bw.dcn.watermarks,
+ dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
dcn10_stereo_hw_frame_pack_wa(dc, context);
@@ -2654,7 +2677,7 @@ static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
pipe_ctx->plane_res.hubp);
- plane_state->status.is_flip_pending = flip_pending;
+ plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
if (!flip_pending)
plane_state->status.current_address = plane_state->status.requested_address;
@@ -2685,16 +2708,22 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
struct dpp *dpp = pipe_ctx->plane_res.dpp;
struct dc_cursor_mi_param param = {
.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
- .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
+ .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
.viewport = pipe_ctx->plane_res.scl_data.viewport,
.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
.rotation = pipe_ctx->plane_state->rotation,
.mirror = pipe_ctx->plane_state->horizontal_mirror
};
+ uint32_t x_plane = pipe_ctx->plane_state->dst_rect.x;
+ uint32_t y_plane = pipe_ctx->plane_state->dst_rect.y;
+ uint32_t x_offset = min(x_plane, pos_cpy.x);
+ uint32_t y_offset = min(y_plane, pos_cpy.y);
- pos_cpy.x_hotspot += pipe_ctx->plane_state->dst_rect.x;
- pos_cpy.y_hotspot += pipe_ctx->plane_state->dst_rect.y;
+ pos_cpy.x -= x_offset;
+ pos_cpy.y -= y_offset;
+ pos_cpy.x_hotspot += (x_plane - x_offset);
+ pos_cpy.y_hotspot += (y_plane - y_offset);
if (pipe_ctx->plane_state->address.type
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
@@ -2789,6 +2818,33 @@ int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
return vertical_line_start;
}
+void lock_all_pipes(struct dc *dc,
+ struct dc_state *context,
+ bool lock)
+{
+ struct pipe_ctx *pipe_ctx;
+ struct timing_generator *tg;
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ tg = pipe_ctx->stream_res.tg;
+ /*
+ * Only lock the top pipe's tg to prevent redundant
+ * (un)locking. Also skip if pipe is disabled.
+ */
+ if (pipe_ctx->top_pipe ||
+ !pipe_ctx->stream || !pipe_ctx->plane_state ||
+ !tg->funcs->is_tg_enabled(tg))
+ continue;
+
+ if (lock)
+ tg->funcs->lock(tg);
+ else
+ tg->funcs->unlock(tg);
+ }
+}
+
static void calc_vupdate_position(
struct pipe_ctx *pipe_ctx,
uint32_t *start_line,
@@ -2882,6 +2938,29 @@ static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx)
tg->funcs->setup_vertical_interrupt2(tg, start_line);
}
+static void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings)
+{
+ struct encoder_unblank_param params = { { 0 } };
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+
+ /* only 3 items below are used by unblank */
+ params.timing = pipe_ctx->stream->timing;
+
+ params.link_settings.link_rate = link_settings->link_rate;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
+ if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ params.timing.pix_clk_100hz /= 2;
+ pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
+ }
+
+ if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
+ link->dc->hwss.edp_backlight_control(link, true);
+ }
+}
+
static const struct hw_sequencer_funcs dcn10_funcs = {
.program_gamut_remap = program_gamut_remap,
.init_hw = dcn10_init_hw,
@@ -2903,7 +2982,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.update_info_frame = dce110_update_info_frame,
.enable_stream = dce110_enable_stream,
.disable_stream = dce110_disable_stream,
- .unblank_stream = dce110_unblank_stream,
+ .unblank_stream = dcn10_unblank_stream,
.blank_stream = dce110_blank_stream,
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
@@ -2913,7 +2992,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.pipe_control_lock = dcn10_pipe_control_lock,
.prepare_bandwidth = dcn10_prepare_bandwidth,
.optimize_bandwidth = dcn10_optimize_bandwidth,
- .reset_hw_ctx_wrap = reset_hw_ctx_wrap,
+ .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
.enable_stream_timing = dcn10_enable_stream_timing,
.set_drr = set_drr,
.get_position = get_position,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 6d66084df55f..4b3b27a5d23b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -83,4 +83,8 @@ struct pipe_ctx *find_top_pipe_for_stream(
int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
+void lock_all_pipes(struct dc *dc,
+ struct dc_state *context,
+ bool lock);
+
#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index 98f41d250978..991622da9ed5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -77,7 +77,7 @@ static unsigned int dcn10_get_hubbub_state(struct dc *dc, char *pBuf, unsigned i
unsigned int chars_printed = 0;
unsigned int remaining_buffer = bufSize;
- const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000;
+ const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
static const unsigned int frac = 1000;
memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
@@ -115,7 +115,7 @@ static unsigned int dcn10_get_hubp_states(struct dc *dc, char *pBuf, unsigned in
unsigned int chars_printed = 0;
unsigned int remaining_buffer = bufSize;
- const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000;
+ const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
static const unsigned int frac = 1000;
if (invarOnly)
@@ -472,12 +472,12 @@ static unsigned int dcn10_get_clock_states(struct dc *dc, char *pBuf, unsigned i
chars_printed = snprintf_count(pBuf, bufSize, "dcfclk,dcfclk_deep_sleep,dispclk,"
"dppclk,fclk,socclk\n"
"%d,%d,%d,%d,%d,%d\n",
- dc->current_state->bw.dcn.clk.dcfclk_khz,
- dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
- dc->current_state->bw.dcn.clk.dispclk_khz,
- dc->current_state->bw.dcn.clk.dppclk_khz,
- dc->current_state->bw.dcn.clk.fclk_khz,
- dc->current_state->bw.dcn.clk.socclk_khz);
+ dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
remaining_buffer -= chars_printed;
pBuf += chars_printed;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index a9db372688ff..0126a44ba012 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -1304,7 +1304,6 @@ void dcn10_link_encoder_connect_dig_be_to_fe(
#define HPD_REG_UPDATE_N(reg_name, n, ...) \
generic_reg_update_ex(CTX, \
HPD_REG(reg_name), \
- HPD_REG_READ(reg_name), \
n, __VA_ARGS__)
#define HPD_REG_UPDATE(reg_name, field, val) \
@@ -1337,7 +1336,6 @@ void dcn10_link_encoder_disable_hpd(struct link_encoder *enc)
#define AUX_REG_UPDATE_N(reg_name, n, ...) \
generic_reg_update_ex(CTX, \
AUX_REG(reg_name), \
- AUX_REG_READ(reg_name), \
n, __VA_ARGS__)
#define AUX_REG_UPDATE(reg_name, field, val) \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 09d74070a49b..7eccb54c421d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -516,6 +516,31 @@ static const struct resource_caps rv2_res_cap = {
};
#endif
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCN_UNIVERSAL,
+ .blends_with_above = true,
+ .blends_with_below = true,
+ .per_pixel_alpha = true,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = true,
+ .fp16 = true
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 16000,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 250,
+ .fp16 = 1
+ }
+};
+
static const struct dc_debug_options debug_defaults_drv = {
.sanity_checks = true,
.disable_dmcu = true,
@@ -848,14 +873,14 @@ void dcn10_clock_source_destroy(struct clock_source **clk_src)
*clk_src = NULL;
}
-static struct pp_smu_funcs_rv *dcn10_pp_smu_create(struct dc_context *ctx)
+static struct pp_smu_funcs *dcn10_pp_smu_create(struct dc_context *ctx)
{
- struct pp_smu_funcs_rv *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
+ struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
if (!pp_smu)
return pp_smu;
- dm_pp_get_funcs_rv(ctx, pp_smu);
+ dm_pp_get_funcs(ctx, pp_smu);
return pp_smu;
}
@@ -865,10 +890,7 @@ static void destruct(struct dcn10_resource_pool *pool)
for (i = 0; i < pool->base.stream_enc_count; i++) {
if (pool->base.stream_enc[i] != NULL) {
- /* TODO: free dcn version of stream encoder once implemented
- * rather than using virtual stream encoder
- */
- kfree(pool->base.stream_enc[i]);
+ kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
pool->base.stream_enc[i] = NULL;
}
}
@@ -921,9 +943,6 @@ static void destruct(struct dcn10_resource_pool *pool)
}
}
- for (i = 0; i < pool->base.stream_enc_count; i++)
- kfree(pool->base.stream_enc[i]);
-
for (i = 0; i < pool->base.audio_count; i++) {
if (pool->base.audios[i])
dce_aud_destroy(&pool->base.audios[i]);
@@ -1078,7 +1097,7 @@ static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
{
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
- struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool);
+ struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe);
if (!head_pipe) {
ASSERT(0);
@@ -1143,7 +1162,7 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont
continue;
if (context->stream_status[i].plane_count > 2)
- return false;
+ return DC_FAIL_UNSUPPORTED_1;
for (j = 0; j < context->stream_status[i].plane_count; j++) {
struct dc_plane_state *plane =
@@ -1351,7 +1370,7 @@ static bool construct(
goto fail;
}
- dml_init_instance(&dc->dml, DML_PROJECT_RAVEN1);
+ dml_init_instance(&dc->dml, &dcn1_0_soc, &dcn1_0_ip, DML_PROJECT_RAVEN1);
memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
@@ -1510,6 +1529,9 @@ static bool construct(
dcn10_hw_sequencer_construct(dc);
dc->caps.max_planes = pool->base.pipe_count;
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
dc->cap_funcs = cap_funcs;
return true;
@@ -1522,7 +1544,7 @@ fail:
}
struct resource_pool *dcn10_create_resource_pool(
- uint8_t num_virtual_links,
+ const struct dc_init_data *init_data,
struct dc *dc)
{
struct dcn10_resource_pool *pool =
@@ -1531,7 +1553,7 @@ struct resource_pool *dcn10_create_resource_pool(
if (!pool)
return NULL;
- if (construct(num_virtual_links, dc, pool))
+ if (construct(init_data->num_virtual_links, dc, pool))
return &pool->base;
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h
index 8f71225bc61b..999c684a0b36 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h
@@ -39,7 +39,7 @@ struct dcn10_resource_pool {
struct resource_pool base;
};
struct resource_pool *dcn10_create_resource_pool(
- uint8_t num_virtual_links,
+ const struct dc_init_data *init_data,
struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index b08254121251..8ee9f6dc1d62 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -245,7 +245,8 @@ static void enc1_update_hdmi_info_packet(
void enc1_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
- enum dc_color_space output_color_space)
+ enum dc_color_space output_color_space,
+ uint32_t enable_sdp_splitting)
{
uint32_t h_active_start;
uint32_t v_active_start;
@@ -298,7 +299,6 @@ void enc1_stream_encoder_dp_set_stream_attribute(
break;
case PIXEL_ENCODING_YCBCR420:
dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR420;
- REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
break;
default:
dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_RGB444;
@@ -726,13 +726,19 @@ void enc1_stream_encoder_update_dp_info_packets(
3, /* packetIndex */
&info_frame->hdrsmd);
+ if (info_frame->dpsdp.valid)
+ enc1_update_generic_info_packet(
+ enc1,
+ 4,/* packetIndex */
+ &info_frame->dpsdp);
+
/* enable/disable transmission of packet(s).
* If enabled, packet transmission begins on the next frame
*/
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid);
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid);
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid);
-
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP4_ENABLE, info_frame->dpsdp.valid);
/* This bit is the master enable bit.
* When enabling secondary stream engine,
@@ -797,10 +803,10 @@ void enc1_stream_encoder_dp_blank(
*/
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
/* Larger delay to wait until VBLANK - use max retry of
- * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode +
+ * 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode +
* a little more because we may not trust delay accuracy.
*/
- max_retries = DP_BLANK_MAX_RETRY * 150;
+ max_retries = DP_BLANK_MAX_RETRY * 250;
/* disable DP stream */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
@@ -833,14 +839,19 @@ void enc1_stream_encoder_dp_unblank(
if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) {
uint32_t n_vid = 0x8000;
uint32_t m_vid;
+ uint32_t n_multiply = 0;
+ uint64_t m_vid_l = n_vid;
+ /* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */
+ if (param->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ /*this param->pixel_clk_khz is half of 444 rate for 420 already*/
+ n_multiply = 1;
+ }
/* M / N = Fstream / Flink
* m_vid / n_vid = pixel rate / link rate
*/
- uint64_t m_vid_l = n_vid;
-
- m_vid_l *= param->pixel_clk_khz;
+ m_vid_l *= param->timing.pix_clk_100hz / 10;
m_vid_l = div_u64(m_vid_l,
param->link_settings.link_rate
* LINK_RATE_REF_FREQ_IN_KHZ);
@@ -859,7 +870,9 @@ void enc1_stream_encoder_dp_unblank(
REG_UPDATE(DP_VID_M, DP_VID_M, m_vid);
- REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 1);
+ REG_UPDATE_2(DP_VID_TIMING,
+ DP_VID_M_N_GEN_EN, 1,
+ DP_VID_N_MUL, n_multiply);
}
/* set DIG_START to 0x1 to resync FIFO */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index b7c800e10a32..e654c2f55971 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -462,7 +462,8 @@ void enc1_update_generic_info_packet(
void enc1_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
- enum dc_color_space output_color_space);
+ enum dc_color_space output_color_space,
+ uint32_t enable_sdp_splitting);
void enc1_stream_encoder_hdmi_set_stream_attribute(
struct stream_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index e81b24374bcb..ccbfe9680d27 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -58,7 +58,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
bool enable);
/*
- * poll pending down reply before clear payload allocation table
+ * poll pending down reply
*/
void dm_helpers_dp_mst_poll_pending_down_reply(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index 14bed5b1fa97..4fc4208d1472 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -30,6 +30,8 @@
* interface to PPLIB/SMU to setup clocks and pstate requirements on SoC
*/
+typedef bool BOOLEAN;
+
enum pp_smu_ver {
/*
* PP_SMU_INTERFACE_X should be interpreted as the interface defined
@@ -72,29 +74,6 @@ struct pp_smu_wm_range_sets {
struct pp_smu_wm_set_range writer_wm_sets[MAX_WATERMARK_SETS];
};
-struct pp_smu_display_requirement_rv {
- /* PPSMC_MSG_SetDisplayCount: count
- * 0 triggers S0i2 optimization
- */
- unsigned int display_count;
-
- /* PPSMC_MSG_SetHardMinFclkByFreq: mhz
- * FCLK will vary with DPM, but never below requested hard min
- */
- unsigned int hard_min_fclk_mhz;
-
- /* PPSMC_MSG_SetHardMinDcefclkByFreq: mhz
- * fixed clock at requested freq, either from FCH bypass or DFS
- */
- unsigned int hard_min_dcefclk_mhz;
-
- /* PPSMC_MSG_SetMinDeepSleepDcefclk: mhz
- * when DF is in cstate, dcf clock is further divided down
- * to just above given frequency
- */
- unsigned int min_deep_sleep_dcefclk_mhz;
-};
-
struct pp_smu_funcs_rv {
struct pp_smu pp_smu;
@@ -137,12 +116,6 @@ struct pp_smu_funcs_rv {
/* PME w/a */
void (*set_pme_wa_enable)(struct pp_smu *pp);
- /*
- * Legacy functions. Used for backwards comp. with existing
- * PPlib code.
- */
- void (*set_display_requirement)(struct pp_smu *pp,
- struct pp_smu_display_requirement_rv *req);
};
struct pp_smu_funcs {
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 1961cc6d9143..b426ba02b793 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -52,30 +52,17 @@ irq_handler_idx dm_register_interrupt(
* GPU registers access
*
*/
-
+uint32_t dm_read_reg_func(
+ const struct dc_context *ctx,
+ uint32_t address,
+ const char *func_name);
/* enable for debugging new code, this adds 50k to the driver size. */
/* #define DM_CHECK_ADDR_0 */
#define dm_read_reg(ctx, address) \
dm_read_reg_func(ctx, address, __func__)
-static inline uint32_t dm_read_reg_func(
- const struct dc_context *ctx,
- uint32_t address,
- const char *func_name)
-{
- uint32_t value;
-#ifdef DM_CHECK_ADDR_0
- if (address == 0) {
- DC_ERR("invalid register read; address = 0\n");
- return 0;
- }
-#endif
- value = cgs_read_register(ctx->cgs_device, address);
- trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
- return value;
-}
#define dm_write_reg(ctx, address, value) \
dm_write_reg_func(ctx, address, value, __func__)
@@ -144,10 +131,14 @@ static inline uint32_t set_reg_field_value_ex(
reg_name ## __ ## reg_field ## _MASK,\
reg_name ## __ ## reg_field ## __SHIFT)
-uint32_t generic_reg_update_ex(const struct dc_context *ctx,
+uint32_t generic_reg_set_ex(const struct dc_context *ctx,
uint32_t addr, uint32_t reg_val, int n,
uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...);
+uint32_t generic_reg_update_ex(const struct dc_context *ctx,
+ uint32_t addr, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...);
+
#define FD(reg_field) reg_field ## __SHIFT, \
reg_field ## _MASK
@@ -155,7 +146,7 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx,
* return number of poll before condition is met
* return 0 if condition is not meet after specified time out tries
*/
-unsigned int generic_reg_wait(const struct dc_context *ctx,
+void generic_reg_wait(const struct dc_context *ctx,
uint32_t addr, uint32_t mask, uint32_t shift, uint32_t condition_value,
unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
const char *func_name, int line);
@@ -172,11 +163,10 @@ unsigned int generic_reg_wait(const struct dc_context *ctx,
#define generic_reg_update_soc15(ctx, inst_offset, reg_name, n, ...)\
generic_reg_update_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + mm##reg_name + inst_offset, \
- dm_read_reg_func(ctx, mm##reg_name + DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + inst_offset, __func__), \
n, __VA_ARGS__)
#define generic_reg_set_soc15(ctx, inst_offset, reg_name, n, ...)\
- generic_reg_update_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + mm##reg_name + inst_offset, 0, \
+ generic_reg_set_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + mm##reg_name + inst_offset, 0, \
n, __VA_ARGS__)
#define get_reg_field_value_soc15(reg_value, block, reg_num, reg_name, reg_field)\
@@ -223,8 +213,8 @@ bool dm_pp_notify_wm_clock_changes(
const struct dc_context *ctx,
struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges);
-void dm_pp_get_funcs_rv(struct dc_context *ctx,
- struct pp_smu_funcs_rv *funcs);
+void dm_pp_get_funcs(struct dc_context *ctx,
+ struct pp_smu_funcs *funcs);
/* DAL calls this function to notify PP about completion of Mode Set.
* For PP it means that current DCE clocks are those which were returned
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index 77200711abbe..a3d1be20dd9d 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -29,7 +29,7 @@
#include "os_types.h"
#include "dc_types.h"
-struct pp_smu_funcs_rv;
+struct pp_smu_funcs;
struct dm_pp_clock_range {
int min_khz;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index d303b789adfe..80ffd7d958b2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -26,40 +26,14 @@
#include "display_mode_lib.h"
#include "dc_features.h"
-extern const struct _vcs_dpi_ip_params_st dcn1_0_ip;
-extern const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc;
-
-static void set_soc_bounding_box(struct _vcs_dpi_soc_bounding_box_st *soc, enum dml_project project)
-{
- switch (project) {
- case DML_PROJECT_RAVEN1:
- *soc = dcn1_0_soc;
- break;
- default:
- ASSERT(0);
- break;
- }
-}
-
-static void set_ip_params(struct _vcs_dpi_ip_params_st *ip, enum dml_project project)
+void dml_init_instance(struct display_mode_lib *lib,
+ const struct _vcs_dpi_soc_bounding_box_st *soc_bb,
+ const struct _vcs_dpi_ip_params_st *ip_params,
+ enum dml_project project)
{
- switch (project) {
- case DML_PROJECT_RAVEN1:
- *ip = dcn1_0_ip;
- break;
- default:
- ASSERT(0);
- break;
- }
-}
-
-void dml_init_instance(struct display_mode_lib *lib, enum dml_project project)
-{
- if (lib->project != project) {
- set_soc_bounding_box(&lib->soc, project);
- set_ip_params(&lib->ip, project);
- lib->project = project;
- }
+ lib->soc = *soc_bb;
+ lib->ip = *ip_params;
+ lib->project = project;
}
const char *dml_get_status_message(enum dm_validation_status status)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index a730e0209c05..1b546dba34bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -41,7 +41,10 @@ struct display_mode_lib {
struct dal_logger *logger;
};
-void dml_init_instance(struct display_mode_lib *lib, enum dml_project project);
+void dml_init_instance(struct display_mode_lib *lib,
+ const struct _vcs_dpi_soc_bounding_box_st *soc_bb,
+ const struct _vcs_dpi_ip_params_st *ip_params,
+ enum dml_project project);
const char *dml_get_status_message(enum dm_validation_status status);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 391183e3428f..c5b791d158a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -25,6 +25,8 @@
#ifndef __DISPLAY_MODE_STRUCTS_H__
#define __DISPLAY_MODE_STRUCTS_H__
+#define MAX_CLOCK_LIMIT_STATES 8
+
typedef struct _vcs_dpi_voltage_scaling_st voltage_scaling_st;
typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st;
typedef struct _vcs_dpi_ip_params_st ip_params_st;
@@ -103,7 +105,7 @@ struct _vcs_dpi_soc_bounding_box_st {
double xfc_xbuf_latency_tolerance_us;
int use_urgent_burst_bw;
unsigned int num_states;
- struct _vcs_dpi_voltage_scaling_st clock_limits[8];
+ struct _vcs_dpi_voltage_scaling_st clock_limits[MAX_CLOCK_LIMIT_STATES];
};
struct _vcs_dpi_ip_params_st {
@@ -416,6 +418,7 @@ struct _vcs_dpi_display_dlg_regs_st {
unsigned int refcyc_per_vm_group_flip;
unsigned int refcyc_per_vm_req_vblank;
unsigned int refcyc_per_vm_req_flip;
+ unsigned int refcyc_per_vm_dmdata;
};
struct _vcs_dpi_display_ttu_regs_st {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
index 48400d642610..e2d82aacd3bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
@@ -321,6 +321,9 @@ void print__dlg_regs_st(struct display_mode_lib *mode_lib, display_dlg_regs_st d
dml_print(
"DML_RQ_DLG_CALC: xfc_reg_remote_surface_flip_latency = 0x%0x\n",
dlg_regs.xfc_reg_remote_surface_flip_latency);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_vm_dmdata = 0x%0x\n",
+ dlg_regs.refcyc_per_vm_dmdata);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/clock_source.h b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
index fe6301cb8681..1b01a9a58d14 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
@@ -167,7 +167,7 @@ struct clock_source_funcs {
struct pixel_clk_params *,
struct pll_settings *);
bool (*get_pixel_clk_frequency_100hz)(
- struct clock_source *clock_source,
+ const struct clock_source *clock_source,
unsigned int inst,
unsigned int *pixel_clk_khz);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index 2e61a22ef4b2..8dca3b7700e5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -43,7 +43,7 @@ enum dc_status {
DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */
DC_FAIL_SCALING = 14,
DC_FAIL_DP_LINK_TRAINING = 15,
-
+ DC_FAIL_UNSUPPORTED_1 = 18,
DC_ERROR_UNEXPECTED = -1
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 986ed1728644..6f5ab05d6467 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -95,10 +95,10 @@ struct resource_funcs {
void (*link_init)(struct dc_link *link);
struct link_encoder *(*link_enc_create)(
const struct encoder_init_data *init);
-
bool (*validate_bandwidth)(
struct dc *dc,
- struct dc_state *context);
+ struct dc_state *context,
+ bool fast_validate);
enum dc_status (*validate_global)(
struct dc *dc,
@@ -144,8 +144,7 @@ struct resource_pool {
struct stream_encoder *stream_enc[MAX_PIPES * 2];
struct hubbub *hubbub;
struct mpc *mpc;
- struct pp_smu_funcs_rv *pp_smu;
- struct pp_smu_display_requirement_rv pp_smu_req;
+ struct pp_smu_funcs *pp_smu;
struct dce_aux *engines[MAX_PIPES];
struct dce_i2c_hw *hw_i2cs[MAX_PIPES];
struct dce_i2c_sw *sw_i2cs[MAX_PIPES];
@@ -154,7 +153,12 @@ struct resource_pool {
unsigned int pipe_count;
unsigned int underlay_pipe_index;
unsigned int stream_enc_count;
- unsigned int ref_clock_inKhz;
+
+ struct {
+ unsigned int xtalin_clock_inKhz;
+ unsigned int dccg_ref_clock_inKhz;
+ unsigned int dchub_ref_clock_inKhz;
+ } ref_clocks;
unsigned int timing_generator_count;
/*
@@ -262,18 +266,22 @@ struct dcn_bw_output {
struct dcn_watermark_set watermarks;
};
-union bw_context {
+union bw_output {
struct dcn_bw_output dcn;
struct dce_bw_output dce;
};
+struct bw_context {
+ union bw_output bw;
+ struct display_mode_lib dml;
+};
/**
* struct dc_state - The full description of a state requested by a user
*
* @streams: Stream properties
* @stream_status: The planes on a given stream
* @res_ctx: Persistent state of resources
- * @bw: The output from bandwidth and watermark calculations
+ * @bw_ctx: The output from bandwidth and watermark calculations and the DML
* @pp_display_cfg: PowerPlay clocks and settings
* @dcn_bw_vars: non-stack memory to support bandwidth calculations
*
@@ -285,7 +293,7 @@ struct dc_state {
struct resource_context res_ctx;
- union bw_context bw;
+ struct bw_context bw_ctx;
/* Note: these are big structures, do *not* put on stack! */
struct dm_pp_display_configuration pp_display_cfg;
@@ -293,7 +301,11 @@ struct dc_state {
struct dcn_bw_internal_vars dcn_bw_vars;
#endif
- struct clk_mgr *dccg;
+ struct clk_mgr *clk_mgr;
+
+ struct {
+ bool full_update_needed : 1;
+ } commit_hints;
struct kref refcount;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index 16fd4dc6c4dd..b1fab251c09b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -95,8 +95,9 @@ bool dal_ddc_service_query_ddc_data(
uint8_t *read_buf,
uint32_t read_size);
-int dc_link_aux_transfer(struct ddc_service *ddc,
- struct aux_payload *payload);
+int dc_link_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_channel_operation_result *operation_result);
bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *payload);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index ece954a40a8e..263c09630c06 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -621,7 +621,8 @@ extern const struct dcn_ip_params dcn10_ip_defaults;
bool dcn_validate_bandwidth(
struct dc *dc,
- struct dc_state *context);
+ struct dc_state *context,
+ bool fast_validate);
unsigned int dcn_find_dcfclk_suits_all(
const struct dc *dc,
@@ -631,5 +632,7 @@ void dcn_bw_update_from_pplib(struct dc *dc);
void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc);
void dcn_bw_sync_calcs_and_dml(struct dc *dc);
+enum source_macro_tile_size swizzle_mode_to_macro_tile_size(enum swizzle_mode_values sw_mode);
+
#endif /* __DCN_CALCS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 23a4b18e5fee..31bd6d5183ab 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -42,6 +42,8 @@ struct clk_mgr_funcs {
bool safe_to_lower);
int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr);
+
+ void (*init_clocks)(struct clk_mgr *clk_mgr);
};
#endif /* __DAL_CLK_MGR_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index 95a56d012626..05ee5295d2c1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -39,6 +39,10 @@ struct dccg_funcs {
void (*update_dpp_dto)(struct dccg *dccg,
int dpp_inst,
int req_dppclk);
+ void (*get_dccg_ref_freq)(struct dccg *dccg,
+ unsigned int xtalin_freq_inKhz,
+ unsigned int *dccg_ref_freq_inKhz);
+ void (*dccg_init)(struct dccg *dccg);
};
#endif //__DAL_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 9d2d8e51306c..93667e8b23b3 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -73,6 +73,16 @@ struct hubbub_funcs {
void (*wm_read_state)(struct hubbub *hubbub,
struct dcn_hubbub_wm *wm);
+
+ void (*get_dchub_ref_freq)(struct hubbub *hubbub,
+ unsigned int dccg_ref_freq_inKhz,
+ unsigned int *dchub_ref_freq_inKhz);
+
+ void (*program_watermarks)(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index cbaa43853611..c68f0ce346c7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -70,6 +70,8 @@ struct dmcu_funcs {
void (*get_psr_wait_loop)(struct dmcu *dmcu,
unsigned int *psr_wait_loop_number);
bool (*is_dmcu_initialized)(struct dmcu *dmcu);
+ bool (*lock_phy)(struct dmcu *dmcu);
+ bool (*unlock_phy)(struct dmcu *dmcu);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 1cd07e94ee63..455df4999797 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -130,6 +130,7 @@ struct hubp_funcs {
void (*hubp_clear_underflow)(struct hubp *hubp);
void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp);
unsigned int (*hubp_get_underflow_status)(struct hubp *hubp);
+ void (*hubp_init)(struct hubp *hubp);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index da85537a4488..4c8e2c6fb6db 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -146,6 +146,12 @@ struct out_csc_color_matrix {
uint16_t regval[12];
};
+enum gamut_remap_select {
+ GAMUT_REMAP_BYPASS = 0,
+ GAMUT_REMAP_COEFF,
+ GAMUT_REMAP_COMA_COEFF,
+ GAMUT_REMAP_COMB_COEFF
+};
enum opp_regamma {
OPP_REGAMMA_BYPASS = 0,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 4051493557bc..49854eb73d1d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -63,11 +63,13 @@ struct encoder_info_frame {
struct dc_info_packet vsc;
/* HDR Static MetaData */
struct dc_info_packet hdrsmd;
+ /* custom sdp message */
+ struct dc_info_packet dpsdp;
};
struct encoder_unblank_param {
struct dc_link_settings link_settings;
- unsigned int pixel_clk_khz;
+ struct dc_crtc_timing timing;
};
struct encoder_set_dp_phy_pattern_param {
@@ -88,7 +90,8 @@ struct stream_encoder_funcs {
void (*dp_set_stream_attribute)(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
- enum dc_color_space output_color_space);
+ enum dc_color_space output_color_space,
+ uint32_t enable_sdp_splitting);
void (*hdmi_set_stream_attribute)(
struct stream_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index c25f7df7b5e3..067d53caf28a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -187,8 +187,10 @@ struct timing_generator_funcs {
bool (*did_triggered_reset_occur)(struct timing_generator *tg);
void (*setup_global_swap_lock)(struct timing_generator *tg,
const struct dcp_gsl_params *gsl_params);
+ void (*setup_global_lock)(struct timing_generator *tg);
void (*unlock)(struct timing_generator *tg);
void (*lock)(struct timing_generator *tg);
+ void (*lock_global)(struct timing_generator *tg);
void (*enable_reset_trigger)(struct timing_generator *tg,
int source_tg_inst);
void (*enable_crtc_reset)(struct timing_generator *tg,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 7676f25216b1..33905468e2b9 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -176,6 +176,10 @@ struct hw_sequencer_funcs {
struct dc *dc,
struct pipe_ctx *pipe,
bool lock);
+ void (*pipe_control_lock_global)(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
void (*blank_pixel_data)(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
index cf5a84b9e27c..8503d9cc4763 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -52,7 +52,7 @@
/* macro to set register fields. */
#define REG_SET_N(reg_name, n, initial_val, ...) \
- generic_reg_update_ex(CTX, \
+ generic_reg_set_ex(CTX, \
REG(reg_name), \
initial_val, \
n, __VA_ARGS__)
@@ -225,7 +225,6 @@
#define REG_UPDATE_N(reg_name, n, ...) \
generic_reg_update_ex(CTX, \
REG(reg_name), \
- REG_READ(reg_name), \
n, __VA_ARGS__)
#define REG_UPDATE(reg_name, field, val) \
@@ -380,16 +379,11 @@
/* macro to update a register field to specified values in given sequences.
* useful when toggling bits
*/
-#define REG_UPDATE_SEQ(reg, field, value1, value2) \
-{ uint32_t val = REG_UPDATE(reg, field, value1); \
- REG_SET(reg, val, field, value2); }
-
-/* macro to update fields in register 1 field at a time in given order */
-#define REG_UPDATE_1BY1_2(reg, f1, v1, f2, v2) \
+#define REG_UPDATE_SEQ_2(reg, f1, v1, f2, v2) \
{ uint32_t val = REG_UPDATE(reg, f1, v1); \
REG_SET(reg, val, f2, v2); }
-#define REG_UPDATE_1BY1_3(reg, f1, v1, f2, v2, f3, v3) \
+#define REG_UPDATE_SEQ_3(reg, f1, v1, f2, v2, f3, v3) \
{ uint32_t val = REG_UPDATE(reg, f1, v1); \
val = REG_SET(reg, val, f2, v2); \
REG_SET(reg, val, f3, v3); }
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 0086a2f1d21a..3ce0a4fc5822 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -70,11 +70,9 @@ bool resource_construct(
struct resource_pool *pool,
const struct resource_create_funcs *create_funcs);
-struct resource_pool *dc_create_resource_pool(
- struct dc *dc,
- int num_virtual_links,
- enum dce_version dc_version,
- struct hw_asic_id asic_id);
+struct resource_pool *dc_create_resource_pool(struct dc *dc,
+ const struct dc_init_data *init_data,
+ enum dce_version dc_version);
void dc_destroy_resource_pool(struct dc *dc);
@@ -131,7 +129,8 @@ bool resource_attach_surfaces_to_context(
struct pipe_ctx *find_idle_secondary_pipe(
struct resource_context *res_ctx,
- const struct resource_pool *pool);
+ const struct resource_pool *pool,
+ const struct pipe_ctx *primary_pipe);
bool resource_is_stream_unchanged(
struct dc_state *old_context, struct dc_stream_state *stream);
@@ -172,4 +171,7 @@ void update_audio_usage(
unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
+struct pipe_ctx *dc_res_get_odm_bottom_pipe(struct pipe_ctx *pipe_ctx);
+bool dc_res_is_odm_head_pipe(struct pipe_ctx *pipe_ctx);
+
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
index afe0876fe6f8..86987f5e8bd5 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
@@ -81,6 +81,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs vupdate_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1 + reg_num] = {\
.enable_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
@@ -137,7 +142,7 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
.ack_value =\
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
- .funcs = &vblank_irq_info_funcs\
+ .funcs = &vupdate_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
index 1ea7256ec89b..750ba0ab4106 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
@@ -84,6 +84,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs vupdate_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#define BASE_INNER(seg) \
DCE_BASE__INST0_SEG ## seg
@@ -140,7 +145,7 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
IRQ_REG_ENTRY(CRTC, reg_num,\
CRTC_INTERRUPT_CONTROL, CRTC_V_UPDATE_INT_MSK,\
CRTC_V_UPDATE_INT_STATUS, CRTC_V_UPDATE_INT_CLEAR),\
- .funcs = &vblank_irq_info_funcs\
+ .funcs = &vupdate_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
index 8a2066c313fe..de218fe84a43 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
@@ -84,6 +84,10 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs vupdate_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_INVALID + reg_num] = {\
@@ -142,7 +146,7 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
.ack_value =\
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
- .funcs = &vblank_irq_info_funcs\
+ .funcs = &vupdate_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
index e04ae49243f6..10ac6deff5ff 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -56,6 +56,18 @@ enum dc_irq_source to_dal_irq_source_dcn10(
return DC_IRQ_SOURCE_VBLANK5;
case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK6;
+ case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE1;
+ case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE2;
+ case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE3;
+ case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE4;
+ case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE5;
+ case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE6;
case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP1;
case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
@@ -153,6 +165,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#define BASE_INNER(seg) \
DCE_BASE__INST0_SEG ## seg
@@ -203,12 +220,15 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.funcs = &pflip_irq_info_funcs\
}
-#define vupdate_int_entry(reg_num)\
+/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
+ * of DCE's DC_IRQ_SOURCE_VUPDATEx.
+ */
+#define vupdate_no_lock_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
- OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\
- OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\
- .funcs = &vblank_irq_info_funcs\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
+ .funcs = &vupdate_no_lock_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
@@ -315,12 +335,12 @@ irq_source_info_dcn10[DAL_IRQ_SOURCES_NUMBER] = {
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
- vupdate_int_entry(0),
- vupdate_int_entry(1),
- vupdate_int_entry(2),
- vupdate_int_entry(3),
- vupdate_int_entry(4),
- vupdate_int_entry(5),
+ vupdate_no_lock_int_entry(0),
+ vupdate_no_lock_int_entry(1),
+ vupdate_no_lock_int_entry(2),
+ vupdate_no_lock_int_entry(3),
+ vupdate_no_lock_int_entry(4),
+ vupdate_no_lock_int_entry(5),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
index 3dc1733eea20..fdcf9e66d852 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
@@ -29,7 +29,8 @@
static void virtual_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
- enum dc_color_space output_color_space) {}
+ enum dc_color_space output_color_space,
+ uint32_t enable_sdp_splitting) {}
static void virtual_stream_encoder_hdmi_set_stream_attribute(
struct stream_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index 52a73332befb..89ef9f6860e5 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -503,6 +503,8 @@ static inline int dc_fixpt_ceil(struct fixed31_32 arg)
* fractional
*/
+unsigned int dc_fixpt_u4d19(struct fixed31_32 arg);
+
unsigned int dc_fixpt_u3d19(struct fixed31_32 arg);
unsigned int dc_fixpt_u2d19(struct fixed31_32 arg);
diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
index f56d2891475f..beed70179bb5 100644
--- a/drivers/gpu/drm/amd/display/include/signal_types.h
+++ b/drivers/gpu/drm/amd/display/include/signal_types.h
@@ -45,6 +45,11 @@ enum signal_type {
};
/* help functions for signal types manipulation */
+static inline bool dc_is_hdmi_tmds_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_HDMI_TYPE_A);
+}
+
static inline bool dc_is_hdmi_signal(enum signal_type signal)
{
return (signal == SIGNAL_TYPE_HDMI_TYPE_A);
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 0fbc8fbc3541..a1055413bade 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1854,6 +1854,8 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
coordinates_x, axis_x, curve,
MAX_HW_POINTS, tf_pts,
mapUserRamp && ramp && ramp->type == GAMMA_RGB_256);
+ if (ramp->type == GAMMA_CUSTOM)
+ apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
ret = true;
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index bfd27f10879e..19b1eaebe484 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -37,6 +37,8 @@
#define RENDER_TIMES_MAX_COUNT 10
/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
#define BTR_EXIT_MARGIN 2000
+/* Threshold to change BTR multiplier (to avoid frequent changes) */
+#define BTR_DRIFT_MARGIN 2000
/*Threshold to exit fixed refresh rate*/
#define FIXED_REFRESH_EXIT_MARGIN_IN_HZ 4
/* Number of consecutive frames to check before entering/exiting fixed refresh*/
@@ -48,6 +50,93 @@ struct core_freesync {
struct dc *dc;
};
+void setFieldWithMask(unsigned char *dest, unsigned int mask, unsigned int value)
+{
+ unsigned int shift = 0;
+
+ if (!mask || !dest)
+ return;
+
+ while (!((mask >> shift) & 1))
+ shift++;
+
+ //reset
+ *dest = *dest & ~mask;
+ //set
+ //dont let value span past mask
+ value = value & (mask >> shift);
+ //insert value
+ *dest = *dest | (value << shift);
+}
+
+// VTEM Byte Offset
+#define VRR_VTEM_PB0 0
+#define VRR_VTEM_PB1 1
+#define VRR_VTEM_PB2 2
+#define VRR_VTEM_PB3 3
+#define VRR_VTEM_PB4 4
+#define VRR_VTEM_PB5 5
+#define VRR_VTEM_PB6 6
+
+#define VRR_VTEM_MD0 7
+#define VRR_VTEM_MD1 8
+#define VRR_VTEM_MD2 9
+#define VRR_VTEM_MD3 10
+
+
+// VTEM Byte Masks
+//PB0
+#define MASK__VRR_VTEM_PB0__RESERVED0 0x01
+#define MASK__VRR_VTEM_PB0__SYNC 0x02
+#define MASK__VRR_VTEM_PB0__VFR 0x04
+#define MASK__VRR_VTEM_PB0__AFR 0x08
+#define MASK__VRR_VTEM_PB0__DS_TYPE 0x30
+ //0: Periodic pseudo-static EM Data Set
+ //1: Periodic dynamic EM Data Set
+ //2: Unique EM Data Set
+ //3: Reserved
+#define MASK__VRR_VTEM_PB0__END 0x40
+#define MASK__VRR_VTEM_PB0__NEW 0x80
+
+//PB1
+#define MASK__VRR_VTEM_PB1__RESERVED1 0xFF
+
+//PB2
+#define MASK__VRR_VTEM_PB2__ORGANIZATION_ID 0xFF
+ //0: This is a Vendor Specific EM Data Set
+ //1: This EM Data Set is defined by This Specification (HDMI 2.1 r102.clean)
+ //2: This EM Data Set is defined by CTA-861-G
+ //3: This EM Data Set is defined by VESA
+//PB3
+#define MASK__VRR_VTEM_PB3__DATA_SET_TAG_MSB 0xFF
+//PB4
+#define MASK__VRR_VTEM_PB4__DATA_SET_TAG_LSB 0xFF
+//PB5
+#define MASK__VRR_VTEM_PB5__DATA_SET_LENGTH_MSB 0xFF
+//PB6
+#define MASK__VRR_VTEM_PB6__DATA_SET_LENGTH_LSB 0xFF
+
+
+
+//PB7-27 (20 bytes):
+//PB7 = MD0
+#define MASK__VRR_VTEM_MD0__VRR_EN 0x01
+#define MASK__VRR_VTEM_MD0__M_CONST 0x02
+#define MASK__VRR_VTEM_MD0__RESERVED2 0x0C
+#define MASK__VRR_VTEM_MD0__FVA_FACTOR_M1 0xF0
+
+//MD1
+#define MASK__VRR_VTEM_MD1__BASE_VFRONT 0xFF
+
+//MD2
+#define MASK__VRR_VTEM_MD2__BASE_REFRESH_RATE_98 0x03
+#define MASK__VRR_VTEM_MD2__RB 0x04
+#define MASK__VRR_VTEM_MD2__RESERVED3 0xF8
+
+//MD3
+#define MASK__VRR_VTEM_MD3__BASE_REFRESH_RATE_07 0xFF
+
+
#define MOD_FREESYNC_TO_CORE(mod_freesync)\
container_of(mod_freesync, struct core_freesync, public)
@@ -248,6 +337,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
unsigned int frames_to_insert = 0;
unsigned int min_frame_duration_in_ns = 0;
unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
+ unsigned int delta_from_mid_point_delta_in_us;
min_frame_duration_in_ns = ((unsigned int) (div64_u64(
(1000000000ULL * 1000000),
@@ -318,10 +408,27 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Choose number of frames to insert based on how close it
* can get to the mid point of the variable range.
*/
- if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2)
+ if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
frames_to_insert = mid_point_frames_ceil;
- else
+ delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
+ delta_from_mid_point_in_us_1;
+ } else {
frames_to_insert = mid_point_frames_floor;
+ delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_1 -
+ delta_from_mid_point_in_us_2;
+ }
+
+ /* Prefer current frame multiplier when BTR is enabled unless it drifts
+ * too far from the midpoint
+ */
+ if (in_out_vrr->btr.frames_to_insert != 0 &&
+ delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) {
+ if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) <
+ in_out_vrr->max_duration_in_us) &&
+ ((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) >
+ in_out_vrr->min_duration_in_us))
+ frames_to_insert = in_out_vrr->btr.frames_to_insert;
+ }
/* Either we've calculated the number of frames to insert,
* or we need to insert min duration frames
@@ -330,10 +437,8 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
inserted_frame_duration_in_us = last_render_time_in_us /
frames_to_insert;
- if (inserted_frame_duration_in_us <
- (1000000 / in_out_vrr->max_refresh_in_uhz))
- inserted_frame_duration_in_us =
- (1000000 / in_out_vrr->max_refresh_in_uhz);
+ if (inserted_frame_duration_in_us < in_out_vrr->min_duration_in_us)
+ inserted_frame_duration_in_us = in_out_vrr->min_duration_in_us;
/* Cache the calculated variables */
in_out_vrr->btr.inserted_duration_in_us =
@@ -469,16 +574,14 @@ static void build_vrr_infopacket_header_vtem(enum signal_type signal,
// HB0, HB1, HB2 indicates PacketType VTEMPacket
infopacket->hb0 = 0x7F;
infopacket->hb1 = 0xC0;
- infopacket->hb2 = 0x00;
- /* HB3 Bit Fields
- * Reserved :1 = 0
- * Sync :1 = 0
- * VFR :1 = 1
- * Ds_Type :2 = 0
- * End :1 = 0
- * New :1 = 0
- */
- infopacket->hb3 = 0x20;
+ infopacket->hb2 = 0x00; //sequence_index
+
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_PB0], MASK__VRR_VTEM_PB0__VFR, 1);
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_PB2], MASK__VRR_VTEM_PB2__ORGANIZATION_ID, 1);
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_PB3], MASK__VRR_VTEM_PB3__DATA_SET_TAG_MSB, 0);
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_PB4], MASK__VRR_VTEM_PB4__DATA_SET_TAG_LSB, 1);
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_PB5], MASK__VRR_VTEM_PB5__DATA_SET_LENGTH_MSB, 0);
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_PB6], MASK__VRR_VTEM_PB6__DATA_SET_LENGTH_LSB, 4);
}
static void build_vrr_infopacket_header_v1(enum signal_type signal,
@@ -583,45 +686,36 @@ static void build_vrr_vtem_infopacket_data(const struct dc_stream_state *stream,
const struct mod_vrr_params *vrr,
struct dc_info_packet *infopacket)
{
- /* dc_info_packet to VtemPacket Translation of Bit-fields,
- * SB[6]
- * unsigned char VRR_EN :1
- * unsigned char M_CONST :1
- * unsigned char Reserved2 :2
- * unsigned char FVA_Factor_M1 :4
- * SB[7]
- * unsigned char Base_Vfront :8
- * SB[8]
- * unsigned char Base_Refresh_Rate_98 :2
- * unsigned char RB :1
- * unsigned char Reserved3 :5
- * SB[9]
- * unsigned char Base_RefreshRate_07 :8
- */
unsigned int fieldRateInHz;
if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
- vrr->state == VRR_STATE_ACTIVE_FIXED){
- infopacket->sb[6] |= 0x80; //VRR_EN Bit = 1
+ vrr->state == VRR_STATE_ACTIVE_FIXED) {
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_MD0], MASK__VRR_VTEM_MD0__VRR_EN, 1);
} else {
- infopacket->sb[6] &= 0x7F; //VRR_EN Bit = 0
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_MD0], MASK__VRR_VTEM_MD0__VRR_EN, 0);
}
if (!stream->timing.vic) {
- infopacket->sb[7] = stream->timing.v_front_porch;
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_MD1], MASK__VRR_VTEM_MD1__BASE_VFRONT,
+ stream->timing.v_front_porch);
+
/* TODO: In dal2, we check mode flags for a reduced blanking timing.
* Need a way to relay that information to this function.
* if("ReducedBlanking")
* {
- * infopacket->sb[8] |= 0x20; //Set 3rd bit to 1
+ * setFieldWithMask(&infopacket->sb[VRR_VTEM_MD2], MASK__VRR_VTEM_MD2__RB, 1;
* }
*/
+
+ //TODO: DAL2 does FixPoint and rounding. Here we might need to account for that
fieldRateInHz = (stream->timing.pix_clk_100hz * 100)/
- (stream->timing.h_total * stream->timing.v_total);
+ (stream->timing.h_total * stream->timing.v_total);
- infopacket->sb[8] |= ((fieldRateInHz & 0x300) >> 2);
- infopacket->sb[9] |= fieldRateInHz & 0xFF;
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_MD2], MASK__VRR_VTEM_MD2__BASE_REFRESH_RATE_98,
+ fieldRateInHz >> 8);
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_MD3], MASK__VRR_VTEM_MD3__BASE_REFRESH_RATE_07,
+ fieldRateInHz);
}
infopacket->valid = true;
@@ -745,6 +839,8 @@ static void build_vrr_infopacket_vtem(const struct dc_stream_state *stream,
{
//VTEM info packet for HdmiVrr
+ memset(infopacket, 0, sizeof(struct dc_info_packet));
+
//VTEM Packet is structured differently
build_vrr_infopacket_header_vtem(stream->signal, infopacket);
build_vrr_vtem_infopacket_data(stream, vrr, infopacket);
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 038b88221c5f..b3810b864676 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -41,9 +41,12 @@ static const unsigned char min_reduction_table[13] = {
static const unsigned char max_reduction_table[13] = {
0xf5, 0xe5, 0xd9, 0xcd, 0xb1, 0xa5, 0xa5, 0x80, 0x65, 0x4d, 0x4d, 0x4d, 0x32};
-/* ABM 2.2 Min Reduction effectively disabled (100% for all configs)*/
+/* Possible ABM 2.2 Min Reduction configs from least aggressive to most aggressive
+ * 0 1 2 3 4 5 6 7 8 9 10 11 12
+ * 100 100 100 100 100 100 100 100 100 92.2 83.1 75.3 75.3 %
+ */
static const unsigned char min_reduction_table_v_2_2[13] = {
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xd4, 0xc0, 0xc0};
/* Possible ABM 2.2 Max Reduction configs from least aggressive to most aggressive
* 0 1 2 3 4 5 6 7 8 9 10 11 12
@@ -408,9 +411,9 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
ram_table->flags = 0x0;
ram_table->deviation_gain[0] = 0xb3;
- ram_table->deviation_gain[1] = 0xb3;
- ram_table->deviation_gain[2] = 0xb3;
- ram_table->deviation_gain[3] = 0xb3;
+ ram_table->deviation_gain[1] = 0xa8;
+ ram_table->deviation_gain[2] = 0x98;
+ ram_table->deviation_gain[3] = 0x68;
ram_table->min_reduction[0][0] = min_reduction_table_v_2_2[abm_config[set][0]];
ram_table->min_reduction[1][0] = min_reduction_table_v_2_2[abm_config[set][0]];
@@ -505,7 +508,7 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
ram_table->contrastFactor[0] = 0x99;
ram_table->contrastFactor[1] = 0x99;
- ram_table->contrastFactor[2] = 0x99;
+ ram_table->contrastFactor[2] = 0x90;
ram_table->contrastFactor[3] = 0x80;
ram_table->iir_curve[0] = 0x65;
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 470d7b89071a..574bf6e70763 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -137,6 +137,7 @@ enum DC_FEATURE_MASK {
DC_FBC_MASK = 0x1,
};
+enum amd_dpm_forced_level;
/**
* struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
*/
@@ -186,6 +187,8 @@ struct amd_ip_funcs {
enum amd_powergating_state state);
/** @get_clockgating_state: get current clockgating status */
void (*get_clockgating_state)(void *handle, u32 *flags);
+ /** @enable_umd_pstate: enable UMD powerstate */
+ int (*enable_umd_pstate)(void *handle, enum amd_dpm_forced_level *level);
};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h
index 721c61171045..5a44e614ab7e 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h
@@ -2347,6 +2347,8 @@
#define mmHUBP0_DCHUBP_VMPG_CONFIG_BASE_IDX 2
#define mmHUBP0_HUBPREQ_DEBUG_DB 0x0569
#define mmHUBP0_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP0_HUBPREQ_DEBUG 0x056a
+#define mmHUBP0_HUBPREQ_DEBUG_BASE_IDX 2
#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x056e
#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x056f
@@ -2631,6 +2633,8 @@
#define mmHUBP1_DCHUBP_VMPG_CONFIG_BASE_IDX 2
#define mmHUBP1_HUBPREQ_DEBUG_DB 0x062d
#define mmHUBP1_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP1_HUBPREQ_DEBUG 0x062e
+#define mmHUBP1_HUBPREQ_DEBUG_BASE_IDX 2
#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x0632
#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x0633
@@ -2915,6 +2919,8 @@
#define mmHUBP2_DCHUBP_VMPG_CONFIG_BASE_IDX 2
#define mmHUBP2_HUBPREQ_DEBUG_DB 0x06f1
#define mmHUBP2_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP2_HUBPREQ_DEBUG 0x06f2
+#define mmHUBP2_HUBPREQ_DEBUG_BASE_IDX 2
#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x06f6
#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x06f7
@@ -3199,6 +3205,8 @@
#define mmHUBP3_DCHUBP_VMPG_CONFIG_BASE_IDX 2
#define mmHUBP3_HUBPREQ_DEBUG_DB 0x07b5
#define mmHUBP3_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP3_HUBPREQ_DEBUG 0x07b6
+#define mmHUBP3_HUBPREQ_DEBUG_BASE_IDX 2
#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x07ba
#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x07bb
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
index 442ca7c471a5..6109f5ad25ad 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
@@ -141,6 +141,8 @@
#define mmUVD_GPCOM_VCPU_DATA0_BASE_IDX 1
#define mmUVD_GPCOM_VCPU_DATA1 0x03c5
#define mmUVD_GPCOM_VCPU_DATA1_BASE_IDX 1
+#define mmUVD_ENGINE_CNTL 0x03c6
+#define mmUVD_ENGINE_CNTL_BASE_IDX 1
#define mmUVD_UDEC_DBW_UV_ADDR_CONFIG 0x03d2
#define mmUVD_UDEC_DBW_UV_ADDR_CONFIG_BASE_IDX 1
#define mmUVD_UDEC_ADDR_CONFIG 0x03d3
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h
index 63457f9df4c5..f84bed6eecb9 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h
@@ -312,6 +312,11 @@
//UVD_GPCOM_VCPU_DATA1
#define UVD_GPCOM_VCPU_DATA1__DATA1__SHIFT 0x0
#define UVD_GPCOM_VCPU_DATA1__DATA1_MASK 0xFFFFFFFFL
+//UVD_ENGINE_CNTL
+#define UVD_ENGINE_CNTL__ENGINE_START_MASK 0x1
+#define UVD_ENGINE_CNTL__ENGINE_START__SHIFT 0x0
+#define UVD_ENGINE_CNTL__ENGINE_START_MODE_MASK 0x2
+#define UVD_ENGINE_CNTL__ENGINE_START_MODE__SHIFT 0x1
//UVD_UDEC_DBW_UV_ADDR_CONFIG
#define UVD_UDEC_DBW_UV_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
#define UVD_UDEC_DBW_UV_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 8eb0bb241210..d3075adb3297 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -494,6 +494,9 @@ enum atombios_firmware_capability
ATOM_FIRMWARE_CAP_FIRMWARE_POSTED = 0x00000001,
ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION = 0x00000002,
ATOM_FIRMWARE_CAP_WMI_SUPPORT = 0x00000040,
+ ATOM_FIRMWARE_CAP_HWEMU_ENABLE = 0x00000080,
+ ATOM_FIRMWARE_CAP_HWEMU_UMC_CFG = 0x00000100,
+ ATOM_FIRMWARE_CAP_SRAM_ECC = 0x00000200,
};
enum atom_cooling_solution_id{
@@ -528,6 +531,35 @@ struct atom_firmware_info_v3_2 {
uint32_t reserved2[3];
};
+struct atom_firmware_info_v3_3
+{
+ struct atom_common_table_header table_header;
+ uint32_t firmware_revision;
+ uint32_t bootup_sclk_in10khz;
+ uint32_t bootup_mclk_in10khz;
+ uint32_t firmware_capability; // enum atombios_firmware_capability
+ uint32_t main_call_parser_entry; /* direct address of main parser call in VBIOS binary. */
+ uint32_t bios_scratch_reg_startaddr; // 1st bios scratch register dword address
+ uint16_t bootup_vddc_mv;
+ uint16_t bootup_vddci_mv;
+ uint16_t bootup_mvddc_mv;
+ uint16_t bootup_vddgfx_mv;
+ uint8_t mem_module_id;
+ uint8_t coolingsolution_id; /*0: Air cooling; 1: Liquid cooling ... */
+ uint8_t reserved1[2];
+ uint32_t mc_baseaddr_high;
+ uint32_t mc_baseaddr_low;
+ uint8_t board_i2c_feature_id; // enum of atom_board_i2c_feature_id_def
+ uint8_t board_i2c_feature_gpio_id; // i2c id find in gpio_lut data table gpio_id
+ uint8_t board_i2c_feature_slave_addr;
+ uint8_t reserved3;
+ uint16_t bootup_mvddq_mv;
+ uint16_t bootup_mvpp_mv;
+ uint32_t zfbstartaddrin16mb;
+ uint32_t pplib_pptable_id; // if pplib_pptable_id!=0, pplib get powerplay table inside driver instead of from VBIOS
+ uint32_t reserved2[2];
+};
+
/*
***************************************************************************
Data Table lcd_info structure
@@ -686,6 +718,7 @@ enum atom_encoder_caps_def
ATOM_ENCODER_CAP_RECORD_HBR2_EN =0x02, // DP1.2 HBR2 setting is qualified and HBR2 can be enabled
ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN =0x04, // HDMI2.0 6Gbps enable or not.
ATOM_ENCODER_CAP_RECORD_HBR3_EN =0x08, // DP1.3 HBR3 is supported by board.
+ ATOM_ENCODER_CAP_RECORD_USB_C_TYPE =0x100, // the DP connector is a USB-C type.
};
struct atom_encoder_caps_record
@@ -1226,16 +1259,17 @@ struct atom_gfx_info_v2_3 {
uint32_t rm21_sram_vmin_value;
};
-struct atom_gfx_info_v2_4 {
+struct atom_gfx_info_v2_4
+{
struct atom_common_table_header table_header;
uint8_t gfxip_min_ver;
uint8_t gfxip_max_ver;
- uint8_t gc_num_se;
- uint8_t max_tile_pipes;
- uint8_t gc_num_cu_per_sh;
- uint8_t gc_num_sh_per_se;
- uint8_t gc_num_rb_per_se;
- uint8_t gc_num_tccs;
+ uint8_t max_shader_engines;
+ uint8_t reserved;
+ uint8_t max_cu_per_sh;
+ uint8_t max_sh_per_se;
+ uint8_t max_backends_per_se;
+ uint8_t max_texture_channel_caches;
uint32_t regaddr_cp_dma_src_addr;
uint32_t regaddr_cp_dma_src_addr_hi;
uint32_t regaddr_cp_dma_dst_addr;
@@ -1780,6 +1814,56 @@ struct atom_umc_info_v3_1
uint32_t mem_refclk_10khz;
};
+// umc_info.umc_config
+enum atom_umc_config_def {
+ UMC_CONFIG__ENABLE_1KB_INTERLEAVE_MODE = 0x00000001,
+ UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE = 0x00000002,
+ UMC_CONFIG__ENABLE_HBM_LANE_REPAIR = 0x00000004,
+ UMC_CONFIG__ENABLE_BANK_HARVESTING = 0x00000008,
+ UMC_CONFIG__ENABLE_PHY_REINIT = 0x00000010,
+ UMC_CONFIG__DISABLE_UCODE_CHKSTATUS = 0x00000020,
+};
+
+struct atom_umc_info_v3_2
+{
+ struct atom_common_table_header table_header;
+ uint32_t ucode_version;
+ uint32_t ucode_rom_startaddr;
+ uint32_t ucode_length;
+ uint16_t umc_reg_init_offset;
+ uint16_t customer_ucode_name_offset;
+ uint16_t mclk_ss_percentage;
+ uint16_t mclk_ss_rate_10hz;
+ uint8_t umcip_min_ver;
+ uint8_t umcip_max_ver;
+ uint8_t vram_type; //enum of atom_dgpu_vram_type
+ uint8_t umc_config;
+ uint32_t mem_refclk_10khz;
+ uint32_t pstate_uclk_10khz[4];
+ uint16_t umcgoldenoffset;
+ uint16_t densitygoldenoffset;
+};
+
+struct atom_umc_info_v3_3
+{
+ struct atom_common_table_header table_header;
+ uint32_t ucode_reserved;
+ uint32_t ucode_rom_startaddr;
+ uint32_t ucode_length;
+ uint16_t umc_reg_init_offset;
+ uint16_t customer_ucode_name_offset;
+ uint16_t mclk_ss_percentage;
+ uint16_t mclk_ss_rate_10hz;
+ uint8_t umcip_min_ver;
+ uint8_t umcip_max_ver;
+ uint8_t vram_type; //enum of atom_dgpu_vram_type
+ uint8_t umc_config;
+ uint32_t mem_refclk_10khz;
+ uint32_t pstate_uclk_10khz[4];
+ uint16_t umcgoldenoffset;
+ uint16_t densitygoldenoffset;
+ uint32_t reserved[4];
+};
/*
***************************************************************************
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 5f3c10ebff08..b897aca9b4c9 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -85,18 +85,6 @@ enum kgd_memory_pool {
KGD_POOL_FRAMEBUFFER = 3,
};
-enum kgd_engine_type {
- KGD_ENGINE_PFP = 1,
- KGD_ENGINE_ME,
- KGD_ENGINE_CE,
- KGD_ENGINE_MEC1,
- KGD_ENGINE_MEC2,
- KGD_ENGINE_RLC,
- KGD_ENGINE_SDMA1,
- KGD_ENGINE_SDMA2,
- KGD_ENGINE_MAX
-};
-
/**
* enum kfd_sched_policy
*
@@ -230,8 +218,6 @@ struct tile_config {
* @hqd_sdma_destroy: Destructs and preempts the SDMA queue assigned to that
* SDMA hqd slot.
*
- * @get_fw_version: Returns FW versions from the header
- *
* @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID.
* Only used for no cp scheduling mode
*
@@ -311,8 +297,6 @@ struct kfd2kgd_calls {
struct kgd_dev *kgd,
uint8_t vmid);
- uint16_t (*get_fw_version)(struct kgd_dev *kgd,
- enum kgd_engine_type type);
void (*set_scratch_backing_va)(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
diff --git a/drivers/gpu/drm/amd/include/linux/chash.h b/drivers/gpu/drm/amd/include/linux/chash.h
deleted file mode 100644
index 6dc159924ed1..000000000000
--- a/drivers/gpu/drm/amd/include/linux/chash.h
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _LINUX_CHASH_H
-#define _LINUX_CHASH_H
-
-#include <linux/types.h>
-#include <linux/hash.h>
-#include <linux/bug.h>
-#include <asm/bitsperlong.h>
-
-#if BITS_PER_LONG == 32
-# define _CHASH_LONG_SHIFT 5
-#elif BITS_PER_LONG == 64
-# define _CHASH_LONG_SHIFT 6
-#else
-# error "Unexpected BITS_PER_LONG"
-#endif
-
-struct __chash_table {
- u8 bits;
- u8 key_size;
- unsigned int value_size;
- u32 size_mask;
- unsigned long *occup_bitmap, *valid_bitmap;
- union {
- u32 *keys32;
- u64 *keys64;
- };
- u8 *values;
-
-#ifdef CONFIG_CHASH_STATS
- u64 hits, hits_steps, hits_time_ns;
- u64 miss, miss_steps, miss_time_ns;
- u64 relocs, reloc_dist;
-#endif
-};
-
-#define __CHASH_BITMAP_SIZE(bits) \
- (((1 << (bits)) + BITS_PER_LONG - 1) / BITS_PER_LONG)
-#define __CHASH_ARRAY_SIZE(bits, size) \
- ((((size) << (bits)) + sizeof(long) - 1) / sizeof(long))
-
-#define __CHASH_DATA_SIZE(bits, key_size, value_size) \
- (__CHASH_BITMAP_SIZE(bits) * 2 + \
- __CHASH_ARRAY_SIZE(bits, key_size) + \
- __CHASH_ARRAY_SIZE(bits, value_size))
-
-#define STRUCT_CHASH_TABLE(bits, key_size, value_size) \
- struct { \
- struct __chash_table table; \
- unsigned long data \
- [__CHASH_DATA_SIZE(bits, key_size, value_size)];\
- }
-
-/**
- * struct chash_table - Dynamically allocated closed hash table
- *
- * Use this struct for dynamically allocated hash tables (using
- * chash_table_alloc and chash_table_free), where the size is
- * determined at runtime.
- */
-struct chash_table {
- struct __chash_table table;
- unsigned long *data;
-};
-
-/**
- * DECLARE_CHASH_TABLE - macro to declare a closed hash table
- * @table: name of the declared hash table
- * @bts: Table size will be 2^bits entries
- * @key_sz: Size of hash keys in bytes, 4 or 8
- * @val_sz: Size of data values in bytes, can be 0
- *
- * This declares the hash table variable with a static size.
- *
- * The closed hash table stores key-value pairs with low memory and
- * lookup overhead. In operation it performs no dynamic memory
- * management. The data being stored does not require any
- * list_heads. The hash table performs best with small @val_sz and as
- * long as some space (about 50%) is left free in the table. But the
- * table can still work reasonably efficiently even when filled up to
- * about 90%. If bigger data items need to be stored and looked up,
- * store the pointer to it as value in the hash table.
- *
- * @val_sz may be 0. This can be useful when all the stored
- * information is contained in the key itself and the fact that it is
- * in the hash table (or not).
- */
-#define DECLARE_CHASH_TABLE(table, bts, key_sz, val_sz) \
- STRUCT_CHASH_TABLE(bts, key_sz, val_sz) table
-
-#ifdef CONFIG_CHASH_STATS
-#define __CHASH_STATS_INIT(prefix), \
- prefix.hits = 0, \
- prefix.hits_steps = 0, \
- prefix.hits_time_ns = 0, \
- prefix.miss = 0, \
- prefix.miss_steps = 0, \
- prefix.miss_time_ns = 0, \
- prefix.relocs = 0, \
- prefix.reloc_dist = 0
-#else
-#define __CHASH_STATS_INIT(prefix)
-#endif
-
-#define __CHASH_TABLE_INIT(prefix, data, bts, key_sz, val_sz) \
- prefix.bits = (bts), \
- prefix.key_size = (key_sz), \
- prefix.value_size = (val_sz), \
- prefix.size_mask = ((1 << bts) - 1), \
- prefix.occup_bitmap = &data[0], \
- prefix.valid_bitmap = &data \
- [__CHASH_BITMAP_SIZE(bts)], \
- prefix.keys64 = (u64 *)&data \
- [__CHASH_BITMAP_SIZE(bts) * 2], \
- prefix.values = (u8 *)&data \
- [__CHASH_BITMAP_SIZE(bts) * 2 + \
- __CHASH_ARRAY_SIZE(bts, key_sz)] \
- __CHASH_STATS_INIT(prefix)
-
-/**
- * DEFINE_CHASH_TABLE - macro to define and initialize a closed hash table
- * @tbl: name of the declared hash table
- * @bts: Table size will be 2^bits entries
- * @key_sz: Size of hash keys in bytes, 4 or 8
- * @val_sz: Size of data values in bytes, can be 0
- *
- * Note: the macro can be used for global and local hash table variables.
- */
-#define DEFINE_CHASH_TABLE(tbl, bts, key_sz, val_sz) \
- DECLARE_CHASH_TABLE(tbl, bts, key_sz, val_sz) = { \
- .table = { \
- __CHASH_TABLE_INIT(, (tbl).data, bts, key_sz, val_sz) \
- }, \
- .data = {0} \
- }
-
-/**
- * INIT_CHASH_TABLE - Initialize a hash table declared by DECLARE_CHASH_TABLE
- * @tbl: name of the declared hash table
- * @bts: Table size will be 2^bits entries
- * @key_sz: Size of hash keys in bytes, 4 or 8
- * @val_sz: Size of data values in bytes, can be 0
- */
-#define INIT_CHASH_TABLE(tbl, bts, key_sz, val_sz) \
- __CHASH_TABLE_INIT(((tbl).table), (tbl).data, bts, key_sz, val_sz)
-
-int chash_table_alloc(struct chash_table *table, u8 bits, u8 key_size,
- unsigned int value_size, gfp_t gfp_mask);
-void chash_table_free(struct chash_table *table);
-
-/**
- * chash_table_dump_stats - Dump statistics of a closed hash table
- * @tbl: Pointer to the table structure
- *
- * Dumps some performance statistics of the table gathered in operation
- * in the kernel log using pr_debug. If CONFIG_DYNAMIC_DEBUG is enabled,
- * user must turn on messages for chash.c (file chash.c +p).
- */
-#ifdef CONFIG_CHASH_STATS
-#define chash_table_dump_stats(tbl) __chash_table_dump_stats(&(*tbl).table)
-
-void __chash_table_dump_stats(struct __chash_table *table);
-#else
-#define chash_table_dump_stats(tbl)
-#endif
-
-/**
- * chash_table_reset_stats - Reset statistics of a closed hash table
- * @tbl: Pointer to the table structure
- */
-#ifdef CONFIG_CHASH_STATS
-#define chash_table_reset_stats(tbl) __chash_table_reset_stats(&(*tbl).table)
-
-static inline void __chash_table_reset_stats(struct __chash_table *table)
-{
- (void)table __CHASH_STATS_INIT((*table));
-}
-#else
-#define chash_table_reset_stats(tbl)
-#endif
-
-/**
- * chash_table_copy_in - Copy a new value into the hash table
- * @tbl: Pointer to the table structure
- * @key: Key of the entry to add or update
- * @value: Pointer to value to copy, may be NULL
- *
- * If @key already has an entry, its value is replaced. Otherwise a
- * new entry is added. If @value is NULL, the value is left unchanged
- * or uninitialized. Returns 1 if an entry already existed, 0 if a new
- * entry was added or %-ENOMEM if there was no free space in the
- * table.
- */
-#define chash_table_copy_in(tbl, key, value) \
- __chash_table_copy_in(&(*tbl).table, key, value)
-
-int __chash_table_copy_in(struct __chash_table *table, u64 key,
- const void *value);
-
-/**
- * chash_table_copy_out - Copy a value out of the hash table
- * @tbl: Pointer to the table structure
- * @key: Key of the entry to find
- * @value: Pointer to value to copy, may be NULL
- *
- * If @value is not NULL and the table has a non-0 value_size, the
- * value at @key is copied to @value. Returns the slot index of the
- * entry or %-EINVAL if @key was not found.
- */
-#define chash_table_copy_out(tbl, key, value) \
- __chash_table_copy_out(&(*tbl).table, key, value, false)
-
-int __chash_table_copy_out(struct __chash_table *table, u64 key,
- void *value, bool remove);
-
-/**
- * chash_table_remove - Remove an entry from the hash table
- * @tbl: Pointer to the table structure
- * @key: Key of the entry to find
- * @value: Pointer to value to copy, may be NULL
- *
- * If @value is not NULL and the table has a non-0 value_size, the
- * value at @key is copied to @value. The entry is removed from the
- * table. Returns the slot index of the removed entry or %-EINVAL if
- * @key was not found.
- */
-#define chash_table_remove(tbl, key, value) \
- __chash_table_copy_out(&(*tbl).table, key, value, true)
-
-/*
- * Low level iterator API used internally by the above functions.
- */
-struct chash_iter {
- struct __chash_table *table;
- unsigned long mask;
- int slot;
-};
-
-/**
- * CHASH_ITER_INIT - Initialize a hash table iterator
- * @tbl: Pointer to hash table to iterate over
- * @s: Initial slot number
- */
-#define CHASH_ITER_INIT(table, s) { \
- table, \
- 1UL << ((s) & (BITS_PER_LONG - 1)), \
- s \
- }
-/**
- * CHASH_ITER_SET - Set hash table iterator to new slot
- * @iter: Iterator
- * @s: Slot number
- */
-#define CHASH_ITER_SET(iter, s) \
- (iter).mask = 1UL << ((s) & (BITS_PER_LONG - 1)), \
- (iter).slot = (s)
-/**
- * CHASH_ITER_INC - Increment hash table iterator
- * @table: Hash table to iterate over
- *
- * Wraps around at the end.
- */
-#define CHASH_ITER_INC(iter) do { \
- (iter).mask = (iter).mask << 1 | \
- (iter).mask >> (BITS_PER_LONG - 1); \
- (iter).slot = ((iter).slot + 1) & (iter).table->size_mask; \
- } while (0)
-
-static inline bool chash_iter_is_valid(const struct chash_iter iter)
-{
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- return !!(iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &
- iter.mask);
-}
-static inline bool chash_iter_is_empty(const struct chash_iter iter)
-{
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- return !(iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &
- iter.mask);
-}
-
-static inline void chash_iter_set_valid(const struct chash_iter iter)
-{
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] |= iter.mask;
- iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] |= iter.mask;
-}
-static inline void chash_iter_set_invalid(const struct chash_iter iter)
-{
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &= ~iter.mask;
-}
-static inline void chash_iter_set_empty(const struct chash_iter iter)
-{
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &= ~iter.mask;
-}
-
-static inline u32 chash_iter_key32(const struct chash_iter iter)
-{
- BUG_ON(iter.table->key_size != 4);
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- return iter.table->keys32[iter.slot];
-}
-static inline u64 chash_iter_key64(const struct chash_iter iter)
-{
- BUG_ON(iter.table->key_size != 8);
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- return iter.table->keys64[iter.slot];
-}
-static inline u64 chash_iter_key(const struct chash_iter iter)
-{
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- return (iter.table->key_size == 4) ?
- iter.table->keys32[iter.slot] : iter.table->keys64[iter.slot];
-}
-
-static inline u32 chash_iter_hash32(const struct chash_iter iter)
-{
- BUG_ON(iter.table->key_size != 4);
- return hash_32(chash_iter_key32(iter), iter.table->bits);
-}
-
-static inline u32 chash_iter_hash64(const struct chash_iter iter)
-{
- BUG_ON(iter.table->key_size != 8);
- return hash_64(chash_iter_key64(iter), iter.table->bits);
-}
-
-static inline u32 chash_iter_hash(const struct chash_iter iter)
-{
- return (iter.table->key_size == 4) ?
- hash_32(chash_iter_key32(iter), iter.table->bits) :
- hash_64(chash_iter_key64(iter), iter.table->bits);
-}
-
-static inline void *chash_iter_value(const struct chash_iter iter)
-{
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- return iter.table->values +
- ((unsigned long)iter.slot * iter.table->value_size);
-}
-
-#endif /* _LINUX_CHASH_H */
diff --git a/drivers/gpu/drm/amd/lib/Kconfig b/drivers/gpu/drm/amd/lib/Kconfig
deleted file mode 100644
index 776ef3434c10..000000000000
--- a/drivers/gpu/drm/amd/lib/Kconfig
+++ /dev/null
@@ -1,28 +0,0 @@
-menu "AMD Library routines"
-
-#
-# Closed hash table
-#
-config CHASH
- tristate
- default DRM_AMDGPU
- help
- Statically sized closed hash table implementation with low
- memory and CPU overhead.
-
-config CHASH_STATS
- bool "Closed hash table performance statistics"
- depends on CHASH
- default n
- help
- Enable collection of performance statistics for closed hash tables.
-
-config CHASH_SELFTEST
- bool "Closed hash table self test"
- depends on CHASH
- default n
- help
- Runs a selftest during module load. Several module parameters
- are available to modify the behaviour of the test.
-
-endmenu
diff --git a/drivers/gpu/drm/amd/lib/Makefile b/drivers/gpu/drm/amd/lib/Makefile
deleted file mode 100644
index 690243001e1a..000000000000
--- a/drivers/gpu/drm/amd/lib/Makefile
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-# Copyright 2017 Advanced Micro Devices, Inc.
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-#
-#
-# Makefile for AMD library routines, which are used by AMD driver
-# components.
-#
-# This is for common library routines that can be shared between AMD
-# driver components or later moved to kernel/lib for sharing with
-# other drivers.
-
-ccflags-y := -I$(src)/../include
-
-obj-$(CONFIG_CHASH) += chash.o
diff --git a/drivers/gpu/drm/amd/lib/chash.c b/drivers/gpu/drm/amd/lib/chash.c
deleted file mode 100644
index b8e45f356a1c..000000000000
--- a/drivers/gpu/drm/amd/lib/chash.c
+++ /dev/null
@@ -1,638 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/types.h>
-#include <linux/hash.h>
-#include <linux/bug.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/sched/clock.h>
-#include <asm/div64.h>
-#include <linux/chash.h>
-
-/**
- * chash_table_alloc - Allocate closed hash table
- * @table: Pointer to the table structure
- * @bits: Table size will be 2^bits entries
- * @key_size: Size of hash keys in bytes, 4 or 8
- * @value_size: Size of data values in bytes, can be 0
- */
-int chash_table_alloc(struct chash_table *table, u8 bits, u8 key_size,
- unsigned int value_size, gfp_t gfp_mask)
-{
- if (bits > 31)
- return -EINVAL;
-
- if (key_size != 4 && key_size != 8)
- return -EINVAL;
-
- table->data = kcalloc(__CHASH_DATA_SIZE(bits, key_size, value_size),
- sizeof(long), gfp_mask);
- if (!table->data)
- return -ENOMEM;
-
- __CHASH_TABLE_INIT(table->table, table->data,
- bits, key_size, value_size);
-
- return 0;
-}
-EXPORT_SYMBOL(chash_table_alloc);
-
-/**
- * chash_table_free - Free closed hash table
- * @table: Pointer to the table structure
- */
-void chash_table_free(struct chash_table *table)
-{
- kfree(table->data);
-}
-EXPORT_SYMBOL(chash_table_free);
-
-#ifdef CONFIG_CHASH_STATS
-
-#define DIV_FRAC(nom, denom, quot, frac, frac_digits) do { \
- u64 __nom = (nom); \
- u64 __denom = (denom); \
- u64 __quot, __frac; \
- u32 __rem; \
- \
- while (__denom >> 32) { \
- __nom >>= 1; \
- __denom >>= 1; \
- } \
- __quot = __nom; \
- __rem = do_div(__quot, __denom); \
- __frac = __rem * (frac_digits) + (__denom >> 1); \
- do_div(__frac, __denom); \
- (quot) = __quot; \
- (frac) = __frac; \
- } while (0)
-
-void __chash_table_dump_stats(struct __chash_table *table)
-{
- struct chash_iter iter = CHASH_ITER_INIT(table, 0);
- u32 filled = 0, empty = 0, tombstones = 0;
- u64 quot1, quot2;
- u32 frac1, frac2;
-
- do {
- if (chash_iter_is_valid(iter))
- filled++;
- else if (chash_iter_is_empty(iter))
- empty++;
- else
- tombstones++;
- CHASH_ITER_INC(iter);
- } while (iter.slot);
-
- pr_debug("chash: key size %u, value size %u\n",
- table->key_size, table->value_size);
- pr_debug(" Slots total/filled/empty/tombstones: %u / %u / %u / %u\n",
- 1 << table->bits, filled, empty, tombstones);
- if (table->hits > 0) {
- DIV_FRAC(table->hits_steps, table->hits, quot1, frac1, 1000);
- DIV_FRAC(table->hits * 1000, table->hits_time_ns,
- quot2, frac2, 1000);
- } else {
- quot1 = quot2 = 0;
- frac1 = frac2 = 0;
- }
- pr_debug(" Hits (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n",
- table->hits, quot1, frac1, quot2, frac2);
- if (table->miss > 0) {
- DIV_FRAC(table->miss_steps, table->miss, quot1, frac1, 1000);
- DIV_FRAC(table->miss * 1000, table->miss_time_ns,
- quot2, frac2, 1000);
- } else {
- quot1 = quot2 = 0;
- frac1 = frac2 = 0;
- }
- pr_debug(" Misses (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n",
- table->miss, quot1, frac1, quot2, frac2);
- if (table->hits + table->miss > 0) {
- DIV_FRAC(table->hits_steps + table->miss_steps,
- table->hits + table->miss, quot1, frac1, 1000);
- DIV_FRAC((table->hits + table->miss) * 1000,
- (table->hits_time_ns + table->miss_time_ns),
- quot2, frac2, 1000);
- } else {
- quot1 = quot2 = 0;
- frac1 = frac2 = 0;
- }
- pr_debug(" Total (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n",
- table->hits + table->miss, quot1, frac1, quot2, frac2);
- if (table->relocs > 0) {
- DIV_FRAC(table->hits + table->miss, table->relocs,
- quot1, frac1, 1000);
- DIV_FRAC(table->reloc_dist, table->relocs, quot2, frac2, 1000);
- pr_debug(" Relocations (freq, avg.dist): %llu (1:%llu.%03u, %llu.%03u)\n",
- table->relocs, quot1, frac1, quot2, frac2);
- } else {
- pr_debug(" No relocations\n");
- }
-}
-EXPORT_SYMBOL(__chash_table_dump_stats);
-
-#undef DIV_FRAC
-#endif
-
-#define CHASH_INC(table, a) ((a) = ((a) + 1) & (table)->size_mask)
-#define CHASH_ADD(table, a, b) (((a) + (b)) & (table)->size_mask)
-#define CHASH_SUB(table, a, b) (((a) - (b)) & (table)->size_mask)
-#define CHASH_IN_RANGE(table, slot, first, last) \
- (CHASH_SUB(table, slot, first) <= CHASH_SUB(table, last, first))
-
-/*#define CHASH_DEBUG Uncomment this to enable verbose debug output*/
-#ifdef CHASH_DEBUG
-static void chash_table_dump(struct __chash_table *table)
-{
- struct chash_iter iter = CHASH_ITER_INIT(table, 0);
-
- do {
- if ((iter.slot & 3) == 0)
- pr_debug("%04x: ", iter.slot);
-
- if (chash_iter_is_valid(iter))
- pr_debug("[%016llx] ", chash_iter_key(iter));
- else if (chash_iter_is_empty(iter))
- pr_debug("[ <empty> ] ");
- else
- pr_debug("[ <tombstone> ] ");
-
- if ((iter.slot & 3) == 3)
- pr_debug("\n");
-
- CHASH_ITER_INC(iter);
- } while (iter.slot);
-
- if ((iter.slot & 3) != 0)
- pr_debug("\n");
-}
-
-static int chash_table_check(struct __chash_table *table)
-{
- u32 hash;
- struct chash_iter iter = CHASH_ITER_INIT(table, 0);
- struct chash_iter cur = CHASH_ITER_INIT(table, 0);
-
- do {
- if (!chash_iter_is_valid(iter)) {
- CHASH_ITER_INC(iter);
- continue;
- }
-
- hash = chash_iter_hash(iter);
- CHASH_ITER_SET(cur, hash);
- while (cur.slot != iter.slot) {
- if (chash_iter_is_empty(cur)) {
- pr_err("Path to element at %x with hash %x broken at slot %x\n",
- iter.slot, hash, cur.slot);
- chash_table_dump(table);
- return -EINVAL;
- }
- CHASH_ITER_INC(cur);
- }
-
- CHASH_ITER_INC(iter);
- } while (iter.slot);
-
- return 0;
-}
-#endif
-
-static void chash_iter_relocate(struct chash_iter dst, struct chash_iter src)
-{
- BUG_ON(src.table == dst.table && src.slot == dst.slot);
- BUG_ON(src.table->key_size != dst.table->key_size);
- BUG_ON(src.table->value_size != dst.table->value_size);
-
- if (dst.table->key_size == 4)
- dst.table->keys32[dst.slot] = src.table->keys32[src.slot];
- else
- dst.table->keys64[dst.slot] = src.table->keys64[src.slot];
-
- if (dst.table->value_size)
- memcpy(chash_iter_value(dst), chash_iter_value(src),
- dst.table->value_size);
-
- chash_iter_set_valid(dst);
- chash_iter_set_invalid(src);
-
-#ifdef CONFIG_CHASH_STATS
- if (src.table == dst.table) {
- dst.table->relocs++;
- dst.table->reloc_dist +=
- CHASH_SUB(dst.table, src.slot, dst.slot);
- }
-#endif
-}
-
-/**
- * __chash_table_find - Helper for looking up a hash table entry
- * @iter: Pointer to hash table iterator
- * @key: Key of the entry to find
- * @for_removal: set to true if the element will be removed soon
- *
- * Searches for an entry in the hash table with a given key. iter must
- * be initialized by the caller to point to the home position of the
- * hypothetical entry, i.e. it must be initialized with the hash table
- * and the key's hash as the initial slot for the search.
- *
- * This function also does some local clean-up to speed up future
- * look-ups by relocating entries to better slots and removing
- * tombstones that are no longer needed.
- *
- * If @for_removal is true, the function avoids relocating the entry
- * that is being returned.
- *
- * Returns 0 if the search is successful. In this case iter is updated
- * to point to the found entry. Otherwise %-EINVAL is returned and the
- * iter is updated to point to the first available slot for the given
- * key. If the table is full, the slot is set to -1.
- */
-static int chash_table_find(struct chash_iter *iter, u64 key,
- bool for_removal)
-{
-#ifdef CONFIG_CHASH_STATS
- u64 ts1 = local_clock();
-#endif
- u32 hash = iter->slot;
- struct chash_iter first_redundant = CHASH_ITER_INIT(iter->table, -1);
- int first_avail = (for_removal ? -2 : -1);
-
- while (!chash_iter_is_valid(*iter) || chash_iter_key(*iter) != key) {
- if (chash_iter_is_empty(*iter)) {
- /* Found an empty slot, which ends the
- * search. Clean up any preceding tombstones
- * that are no longer needed because they lead
- * to no-where
- */
- if ((int)first_redundant.slot < 0)
- goto not_found;
- while (first_redundant.slot != iter->slot) {
- if (!chash_iter_is_valid(first_redundant))
- chash_iter_set_empty(first_redundant);
- CHASH_ITER_INC(first_redundant);
- }
-#ifdef CHASH_DEBUG
- chash_table_check(iter->table);
-#endif
- goto not_found;
- } else if (!chash_iter_is_valid(*iter)) {
- /* Found a tombstone. Remember it as candidate
- * for relocating the entry we're looking for
- * or for adding a new entry with the given key
- */
- if (first_avail == -1)
- first_avail = iter->slot;
- /* Or mark it as the start of a series of
- * potentially redundant tombstones
- */
- else if (first_redundant.slot == -1)
- CHASH_ITER_SET(first_redundant, iter->slot);
- } else if (first_redundant.slot >= 0) {
- /* Found a valid, occupied slot with a
- * preceding series of tombstones. Relocate it
- * to a better position that no longer depends
- * on those tombstones
- */
- u32 cur_hash = chash_iter_hash(*iter);
-
- if (!CHASH_IN_RANGE(iter->table, cur_hash,
- first_redundant.slot + 1,
- iter->slot)) {
- /* This entry has a hash at or before
- * the first tombstone we found. We
- * can relocate it to that tombstone
- * and advance to the next tombstone
- */
- chash_iter_relocate(first_redundant, *iter);
- do {
- CHASH_ITER_INC(first_redundant);
- } while (chash_iter_is_valid(first_redundant));
- } else if (cur_hash != iter->slot) {
- /* Relocate entry to its home position
- * or as close as possible so it no
- * longer depends on any preceding
- * tombstones
- */
- struct chash_iter new_iter =
- CHASH_ITER_INIT(iter->table, cur_hash);
-
- while (new_iter.slot != iter->slot &&
- chash_iter_is_valid(new_iter))
- CHASH_ITER_INC(new_iter);
-
- if (new_iter.slot != iter->slot)
- chash_iter_relocate(new_iter, *iter);
- }
- }
-
- CHASH_ITER_INC(*iter);
- if (iter->slot == hash) {
- iter->slot = -1;
- goto not_found;
- }
- }
-
-#ifdef CONFIG_CHASH_STATS
- iter->table->hits++;
- iter->table->hits_steps += CHASH_SUB(iter->table, iter->slot, hash) + 1;
-#endif
-
- if (first_avail >= 0) {
- CHASH_ITER_SET(first_redundant, first_avail);
- chash_iter_relocate(first_redundant, *iter);
- iter->slot = first_redundant.slot;
- iter->mask = first_redundant.mask;
- }
-
-#ifdef CONFIG_CHASH_STATS
- iter->table->hits_time_ns += local_clock() - ts1;
-#endif
-
- return 0;
-
-not_found:
-#ifdef CONFIG_CHASH_STATS
- iter->table->miss++;
- iter->table->miss_steps += (iter->slot < 0) ?
- (1 << iter->table->bits) :
- CHASH_SUB(iter->table, iter->slot, hash) + 1;
-#endif
-
- if (first_avail >= 0)
- CHASH_ITER_SET(*iter, first_avail);
-
-#ifdef CONFIG_CHASH_STATS
- iter->table->miss_time_ns += local_clock() - ts1;
-#endif
-
- return -EINVAL;
-}
-
-int __chash_table_copy_in(struct __chash_table *table, u64 key,
- const void *value)
-{
- u32 hash = (table->key_size == 4) ?
- hash_32(key, table->bits) : hash_64(key, table->bits);
- struct chash_iter iter = CHASH_ITER_INIT(table, hash);
- int r = chash_table_find(&iter, key, false);
-
- /* Found an existing entry */
- if (!r) {
- if (value && table->value_size)
- memcpy(chash_iter_value(iter), value,
- table->value_size);
- return 1;
- }
-
- /* Is there a place to add a new entry? */
- if (iter.slot < 0) {
- pr_err("Hash table overflow\n");
- return -ENOMEM;
- }
-
- chash_iter_set_valid(iter);
-
- if (table->key_size == 4)
- table->keys32[iter.slot] = key;
- else
- table->keys64[iter.slot] = key;
- if (value && table->value_size)
- memcpy(chash_iter_value(iter), value, table->value_size);
-
- return 0;
-}
-EXPORT_SYMBOL(__chash_table_copy_in);
-
-int __chash_table_copy_out(struct __chash_table *table, u64 key,
- void *value, bool remove)
-{
- u32 hash = (table->key_size == 4) ?
- hash_32(key, table->bits) : hash_64(key, table->bits);
- struct chash_iter iter = CHASH_ITER_INIT(table, hash);
- int r = chash_table_find(&iter, key, remove);
-
- if (r < 0)
- return r;
-
- if (value && table->value_size)
- memcpy(value, chash_iter_value(iter), table->value_size);
-
- if (remove)
- chash_iter_set_invalid(iter);
-
- return iter.slot;
-}
-EXPORT_SYMBOL(__chash_table_copy_out);
-
-#ifdef CONFIG_CHASH_SELFTEST
-/**
- * chash_self_test - Run a self-test of the hash table implementation
- * @bits: Table size will be 2^bits entries
- * @key_size: Size of hash keys in bytes, 4 or 8
- * @min_fill: Minimum fill level during the test
- * @max_fill: Maximum fill level during the test
- * @iterations: Number of test iterations
- *
- * The test adds and removes entries from a hash table, cycling the
- * fill level between min_fill and max_fill entries. Also tests lookup
- * and value retrieval.
- */
-static int __init chash_self_test(u8 bits, u8 key_size,
- int min_fill, int max_fill,
- u64 iterations)
-{
- struct chash_table table;
- int ret;
- u64 add_count, rmv_count;
- u64 value;
-
- if (key_size == 4 && iterations > 0xffffffff)
- return -EINVAL;
- if (min_fill >= max_fill)
- return -EINVAL;
-
- ret = chash_table_alloc(&table, bits, key_size, sizeof(u64),
- GFP_KERNEL);
- if (ret) {
- pr_err("chash_table_alloc failed: %d\n", ret);
- return ret;
- }
-
- for (add_count = 0, rmv_count = 0; add_count < iterations;
- add_count++) {
- /* When we hit the max_fill level, remove entries down
- * to min_fill
- */
- if (add_count - rmv_count == max_fill) {
- u64 find_count = rmv_count;
-
- /* First try to find all entries that we're
- * about to remove, confirm their value, test
- * writing them back a second time.
- */
- for (; add_count - find_count > min_fill;
- find_count++) {
- ret = chash_table_copy_out(&table, find_count,
- &value);
- if (ret < 0) {
- pr_err("chash_table_copy_out failed: %d\n",
- ret);
- goto out;
- }
- if (value != ~find_count) {
- pr_err("Wrong value retrieved for key 0x%llx, expected 0x%llx got 0x%llx\n",
- find_count, ~find_count, value);
-#ifdef CHASH_DEBUG
- chash_table_dump(&table.table);
-#endif
- ret = -EFAULT;
- goto out;
- }
- ret = chash_table_copy_in(&table, find_count,
- &value);
- if (ret != 1) {
- pr_err("copy_in second time returned %d, expected 1\n",
- ret);
- ret = -EFAULT;
- goto out;
- }
- }
- /* Remove them until we hit min_fill level */
- for (; add_count - rmv_count > min_fill; rmv_count++) {
- ret = chash_table_remove(&table, rmv_count,
- NULL);
- if (ret < 0) {
- pr_err("chash_table_remove failed: %d\n",
- ret);
- goto out;
- }
- }
- }
-
- /* Add a new value */
- value = ~add_count;
- ret = chash_table_copy_in(&table, add_count, &value);
- if (ret != 0) {
- pr_err("copy_in first time returned %d, expected 0\n",
- ret);
- ret = -EFAULT;
- goto out;
- }
- }
-
- chash_table_dump_stats(&table);
- chash_table_reset_stats(&table);
-
-out:
- chash_table_free(&table);
- return ret;
-}
-
-static unsigned int chash_test_bits = 10;
-MODULE_PARM_DESC(test_bits,
- "Selftest number of hash bits ([4..20], default=10)");
-module_param_named(test_bits, chash_test_bits, uint, 0444);
-
-static unsigned int chash_test_keysize = 8;
-MODULE_PARM_DESC(test_keysize, "Selftest keysize (4 or 8, default=8)");
-module_param_named(test_keysize, chash_test_keysize, uint, 0444);
-
-static unsigned int chash_test_minfill;
-MODULE_PARM_DESC(test_minfill, "Selftest minimum #entries (default=50%)");
-module_param_named(test_minfill, chash_test_minfill, uint, 0444);
-
-static unsigned int chash_test_maxfill;
-MODULE_PARM_DESC(test_maxfill, "Selftest maximum #entries (default=80%)");
-module_param_named(test_maxfill, chash_test_maxfill, uint, 0444);
-
-static unsigned long chash_test_iters;
-MODULE_PARM_DESC(test_iters, "Selftest iterations (default=1000 x #entries)");
-module_param_named(test_iters, chash_test_iters, ulong, 0444);
-
-static int __init chash_init(void)
-{
- int ret;
- u64 ts1_ns;
-
- /* Skip self test on user errors */
- if (chash_test_bits < 4 || chash_test_bits > 20) {
- pr_err("chash: test_bits out of range [4..20].\n");
- return 0;
- }
- if (chash_test_keysize != 4 && chash_test_keysize != 8) {
- pr_err("chash: test_keysize invalid. Must be 4 or 8.\n");
- return 0;
- }
-
- if (!chash_test_minfill)
- chash_test_minfill = (1 << chash_test_bits) / 2;
- if (!chash_test_maxfill)
- chash_test_maxfill = (1 << chash_test_bits) * 4 / 5;
- if (!chash_test_iters)
- chash_test_iters = (1 << chash_test_bits) * 1000;
-
- if (chash_test_minfill >= (1 << chash_test_bits)) {
- pr_err("chash: test_minfill too big. Must be < table size.\n");
- return 0;
- }
- if (chash_test_maxfill >= (1 << chash_test_bits)) {
- pr_err("chash: test_maxfill too big. Must be < table size.\n");
- return 0;
- }
- if (chash_test_minfill >= chash_test_maxfill) {
- pr_err("chash: test_minfill must be < test_maxfill.\n");
- return 0;
- }
- if (chash_test_keysize == 4 && chash_test_iters > 0xffffffff) {
- pr_err("chash: test_iters must be < 4G for 4 byte keys.\n");
- return 0;
- }
-
- ts1_ns = local_clock();
- ret = chash_self_test(chash_test_bits, chash_test_keysize,
- chash_test_minfill, chash_test_maxfill,
- chash_test_iters);
- if (!ret) {
- u64 ts_delta_us = local_clock() - ts1_ns;
- u64 iters_per_second = (u64)chash_test_iters * 1000000;
-
- do_div(ts_delta_us, 1000);
- do_div(iters_per_second, ts_delta_us);
- pr_info("chash: self test took %llu us, %llu iterations/s\n",
- ts_delta_us, iters_per_second);
- } else {
- pr_err("chash: self test failed: %d\n", ret);
- }
-
- return ret;
-}
-
-module_init(chash_init);
-
-#endif /* CONFIG_CHASH_SELFTEST */
-
-MODULE_DESCRIPTION("Closed hash table");
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile
index 231785a9e24c..ec87b3430d12 100644
--- a/drivers/gpu/drm/amd/powerplay/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/Makefile
@@ -35,7 +35,7 @@ AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(
include $(AMD_POWERPLAY)
-POWER_MGR = amd_powerplay.o
+POWER_MGR = amd_powerplay.o amdgpu_smu.o smu_v11_0.o vega20_ppt.o
AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 3f73f7cd18b9..bea1587d352d 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -53,7 +53,7 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
mutex_init(&hwmgr->smu_lock);
hwmgr->chip_family = adev->family;
hwmgr->chip_id = adev->asic_type;
- hwmgr->feature_mask = adev->powerplay.pp_feature;
+ hwmgr->feature_mask = adev->pm.pp_feature;
hwmgr->display_config = &adev->pm.pm_display_cfg;
adev->powerplay.pp_handle = hwmgr;
adev->powerplay.pp_funcs = &pp_dpm_funcs;
@@ -1304,7 +1304,7 @@ static int pp_notify_smu_enable_pwe(void *handle)
if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
- return -EINVAL;;
+ return -EINVAL;
}
mutex_lock(&hwmgr->smu_lock);
@@ -1341,7 +1341,7 @@ static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
pr_debug("%s was not implemented.\n", __func__);
- return -EINVAL;;
+ return -EINVAL;
}
mutex_lock(&hwmgr->smu_lock);
@@ -1360,7 +1360,7 @@ static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
pr_debug("%s was not implemented.\n", __func__);
- return -EINVAL;;
+ return -EINVAL;
}
mutex_lock(&hwmgr->smu_lock);
@@ -1379,7 +1379,7 @@ static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
pr_debug("%s was not implemented.\n", __func__);
- return -EINVAL;;
+ return -EINVAL;
}
mutex_lock(&hwmgr->smu_lock);
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
new file mode 100644
index 000000000000..c058c784180e
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -0,0 +1,1253 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "pp_debug.h"
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "soc15_common.h"
+#include "smu_v11_0.h"
+#include "atom.h"
+#include "amd_pcie.h"
+
+int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
+ bool gate)
+{
+ int ret = 0;
+
+ switch (block_type) {
+ case AMD_IP_BLOCK_TYPE_UVD:
+ ret = smu_dpm_set_uvd_enable(smu, gate);
+ break;
+ case AMD_IP_BLOCK_TYPE_VCE:
+ ret = smu_dpm_set_vce_enable(smu, gate);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
+{
+ /* not support power state */
+ return POWER_STATE_TYPE_DEFAULT;
+}
+
+int smu_get_power_num_states(struct smu_context *smu,
+ struct pp_states_info *state_info)
+{
+ if (!state_info)
+ return -EINVAL;
+
+ /* not support power state */
+ memset(state_info, 0, sizeof(struct pp_states_info));
+ state_info->nums = 0;
+
+ return 0;
+}
+
+int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ int ret = 0;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
+ *((uint32_t *)data) = smu->pstate_sclk;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
+ *((uint32_t *)data) = smu->pstate_mclk;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
+ ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
+ *size = 8;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ *size = 0;
+
+ return ret;
+}
+
+int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16_t exarg,
+ void *table_data, bool drv2smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = NULL;
+ int ret = 0;
+ uint32_t table_index;
+
+ if (!table_data || table_id >= smu_table->table_count)
+ return -EINVAL;
+
+ table_index = (exarg << 16) | table_id;
+
+ table = &smu_table->tables[table_id];
+
+ if (drv2smu)
+ memcpy(table->cpu_addr, table_data, table->size);
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(table->mc_address));
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
+ lower_32_bits(table->mc_address));
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, drv2smu ?
+ SMU_MSG_TransferTableDram2Smu :
+ SMU_MSG_TransferTableSmu2Dram,
+ table_index);
+ if (ret)
+ return ret;
+
+ if (!drv2smu)
+ memcpy(table_data, table->cpu_addr, table->size);
+
+ return ret;
+}
+
+bool is_support_sw_smu(struct amdgpu_device *adev)
+{
+ if (amdgpu_dpm != 1)
+ return false;
+
+ if (adev->asic_type >= CHIP_VEGA20 && adev->asic_type != CHIP_RAVEN)
+ return true;
+
+ return false;
+}
+
+int smu_sys_get_pp_table(struct smu_context *smu, void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
+ return -EINVAL;
+
+ if (smu_table->hardcode_pptable)
+ *table = smu_table->hardcode_pptable;
+ else
+ *table = smu_table->power_play_table;
+
+ return smu_table->power_play_table_size;
+}
+
+int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
+ int ret = 0;
+
+ if (header->usStructureSize != size) {
+ pr_err("pp table size not matched !\n");
+ return -EIO;
+ }
+
+ mutex_lock(&smu->mutex);
+ if (!smu_table->hardcode_pptable)
+ smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
+ if (!smu_table->hardcode_pptable) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ memcpy(smu_table->hardcode_pptable, buf, size);
+ smu_table->power_play_table = smu_table->hardcode_pptable;
+ smu_table->power_play_table_size = size;
+ mutex_unlock(&smu->mutex);
+
+ ret = smu_reset(smu);
+ if (ret)
+ pr_info("smu reset failed, ret = %d\n", ret);
+
+ return ret;
+
+failed:
+ mutex_unlock(&smu->mutex);
+ return ret;
+}
+
+int smu_feature_init_dpm(struct smu_context *smu)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+ uint32_t unallowed_feature_mask[SMU_FEATURE_MAX/32];
+
+ mutex_lock(&feature->mutex);
+ bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
+ mutex_unlock(&feature->mutex);
+
+ ret = smu_get_unallowed_feature_mask(smu, unallowed_feature_mask,
+ SMU_FEATURE_MAX/32);
+ if (ret)
+ return ret;
+
+ mutex_lock(&feature->mutex);
+ bitmap_andnot(feature->allowed, feature->allowed,
+ (unsigned long *)unallowed_feature_mask,
+ feature->feature_num);
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
+int smu_feature_is_enabled(struct smu_context *smu, int feature_id)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+
+ WARN_ON(feature_id > feature->feature_num);
+
+ mutex_lock(&feature->mutex);
+ ret = test_bit(feature_id, feature->enabled);
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
+int smu_feature_set_enabled(struct smu_context *smu, int feature_id, bool enable)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+
+ WARN_ON(feature_id > feature->feature_num);
+
+ mutex_lock(&feature->mutex);
+ ret = smu_feature_update_enable_state(smu, feature_id, enable);
+ if (ret)
+ goto failed;
+
+ if (enable)
+ test_and_set_bit(feature_id, feature->enabled);
+ else
+ test_and_clear_bit(feature_id, feature->enabled);
+
+failed:
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
+int smu_feature_is_supported(struct smu_context *smu, int feature_id)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+
+ WARN_ON(feature_id > feature->feature_num);
+
+ mutex_lock(&feature->mutex);
+ ret = test_bit(feature_id, feature->supported);
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
+int smu_feature_set_supported(struct smu_context *smu, int feature_id,
+ bool enable)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+
+ WARN_ON(feature_id > feature->feature_num);
+
+ mutex_unlock(&feature->mutex);
+ if (enable)
+ test_and_set_bit(feature_id, feature->supported);
+ else
+ test_and_clear_bit(feature_id, feature->supported);
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
+static int smu_set_funcs(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = &adev->smu;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
+ smu->od_enabled = true;
+ smu_v11_0_set_smu_funcs(smu);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int smu_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+
+ smu->adev = adev;
+ mutex_init(&smu->mutex);
+
+ return smu_set_funcs(adev);
+}
+
+static int smu_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+ mutex_lock(&smu->mutex);
+ smu_handle_task(&adev->smu,
+ smu->smu_dpm.dpm_level,
+ AMD_PP_TASK_COMPLETE_INIT);
+ mutex_unlock(&smu->mutex);
+
+ return 0;
+}
+
+int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
+ uint16_t *size, uint8_t *frev, uint8_t *crev,
+ uint8_t **addr)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint16_t data_start;
+
+ if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
+ size, frev, crev, &data_start))
+ return -EINVAL;
+
+ *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
+
+ return 0;
+}
+
+static int smu_initialize_pptable(struct smu_context *smu)
+{
+ /* TODO */
+ return 0;
+}
+
+static int smu_smc_table_sw_init(struct smu_context *smu)
+{
+ int ret;
+
+ ret = smu_initialize_pptable(smu);
+ if (ret) {
+ pr_err("Failed to init smu_initialize_pptable!\n");
+ return ret;
+ }
+
+ /**
+ * Create smu_table structure, and init smc tables such as
+ * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
+ */
+ ret = smu_init_smc_tables(smu);
+ if (ret) {
+ pr_err("Failed to init smc tables!\n");
+ return ret;
+ }
+
+ /**
+ * Create smu_power_context structure, and allocate smu_dpm_context and
+ * context size to fill the smu_power_context data.
+ */
+ ret = smu_init_power(smu);
+ if (ret) {
+ pr_err("Failed to init smu_init_power!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int smu_smc_table_sw_fini(struct smu_context *smu)
+{
+ int ret;
+
+ ret = smu_fini_smc_tables(smu);
+ if (ret) {
+ pr_err("Failed to smu_fini_smc_tables!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int smu_sw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+ int ret;
+
+ if (!is_support_sw_smu(adev))
+ return -EINVAL;
+
+ smu->pool_size = adev->pm.smu_prv_buffer_size;
+ smu->smu_feature.feature_num = SMU_FEATURE_MAX;
+ mutex_init(&smu->smu_feature.mutex);
+ bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
+ bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
+ bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
+ smu->watermarks_bitmap = 0;
+ smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+
+ smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
+ smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
+
+ smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
+ smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
+ smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
+ smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
+ smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
+ smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
+ smu->display_config = &adev->pm.pm_display_cfg;
+
+ smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
+ smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
+ ret = smu_init_microcode(smu);
+ if (ret) {
+ pr_err("Failed to load smu firmware!\n");
+ return ret;
+ }
+
+ ret = smu_smc_table_sw_init(smu);
+ if (ret) {
+ pr_err("Failed to sw init smc table!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int smu_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+ int ret;
+
+ if (!is_support_sw_smu(adev))
+ return -EINVAL;
+
+ ret = smu_smc_table_sw_fini(smu);
+ if (ret) {
+ pr_err("Failed to sw fini smc table!\n");
+ return ret;
+ }
+
+ ret = smu_fini_power(smu);
+ if (ret) {
+ pr_err("Failed to init smu_fini_power!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int smu_init_fb_allocations(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ uint32_t table_count = smu_table->table_count;
+ uint32_t i = 0;
+ int32_t ret = 0;
+
+ if (table_count <= 0)
+ return -EINVAL;
+
+ for (i = 0 ; i < table_count; i++) {
+ if (tables[i].size == 0)
+ continue;
+ ret = amdgpu_bo_create_kernel(adev,
+ tables[i].size,
+ tables[i].align,
+ tables[i].domain,
+ &tables[i].bo,
+ &tables[i].mc_address,
+ &tables[i].cpu_addr);
+ if (ret)
+ goto failed;
+ }
+
+ return 0;
+failed:
+ for (; i > 0; i--) {
+ if (tables[i].size == 0)
+ continue;
+ amdgpu_bo_free_kernel(&tables[i].bo,
+ &tables[i].mc_address,
+ &tables[i].cpu_addr);
+
+ }
+ return ret;
+}
+
+static int smu_fini_fb_allocations(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ uint32_t table_count = smu_table->table_count;
+ uint32_t i = 0;
+
+ if (table_count == 0 || tables == NULL)
+ return 0;
+
+ for (i = 0 ; i < table_count; i++) {
+ if (tables[i].size == 0)
+ continue;
+ amdgpu_bo_free_kernel(&tables[i].bo,
+ &tables[i].mc_address,
+ &tables[i].cpu_addr);
+ }
+
+ return 0;
+}
+
+static int smu_override_pcie_parameters(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
+ int ret;
+
+ if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
+ pcie_gen = 3;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+ pcie_gen = 2;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
+ pcie_gen = 1;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
+ pcie_gen = 0;
+
+ /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
+ * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
+ * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
+ */
+ if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+ pcie_width = 6;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
+ pcie_width = 5;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
+ pcie_width = 4;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
+ pcie_width = 3;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
+ pcie_width = 2;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
+ pcie_width = 1;
+
+ smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg);
+ if (ret)
+ pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
+ return ret;
+}
+
+static int smu_smc_table_hw_init(struct smu_context *smu,
+ bool initialize)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret;
+
+ if (smu_is_dpm_running(smu) && adev->in_suspend) {
+ pr_info("dpm has been enabled\n");
+ return 0;
+ }
+
+ ret = smu_init_display(smu);
+ if (ret)
+ return ret;
+
+ if (initialize) {
+ ret = smu_read_pptable_from_vbios(smu);
+ if (ret)
+ return ret;
+
+ /* get boot_values from vbios to set revision, gfxclk, and etc. */
+ ret = smu_get_vbios_bootup_values(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_get_clk_info_from_vbios(smu);
+ if (ret)
+ return ret;
+
+ /*
+ * check if the format_revision in vbios is up to pptable header
+ * version, and the structure size is not 0.
+ */
+ ret = smu_get_clk_info_from_vbios(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_check_pptable(smu);
+ if (ret)
+ return ret;
+
+ /*
+ * allocate vram bos to store smc table contents.
+ */
+ ret = smu_init_fb_allocations(smu);
+ if (ret)
+ return ret;
+
+ /*
+ * Parse pptable format and fill PPTable_t smc_pptable to
+ * smu_table_context structure. And read the smc_dpm_table from vbios,
+ * then fill it into smc_pptable.
+ */
+ ret = smu_parse_pptable(smu);
+ if (ret)
+ return ret;
+
+ /*
+ * Send msg GetDriverIfVersion to check if the return value is equal
+ * with DRIVER_IF_VERSION of smc header.
+ */
+ ret = smu_check_fw_version(smu);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Copy pptable bo in the vram to smc with SMU MSGs such as
+ * SetDriverDramAddr and TransferTableDram2Smu.
+ */
+ ret = smu_write_pptable(smu);
+ if (ret)
+ return ret;
+
+ /* issue RunAfllBtc msg */
+ ret = smu_run_afll_btc(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_feature_set_allowed_mask(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_system_features_control(smu, true);
+ if (ret)
+ return ret;
+
+ ret = smu_override_pcie_parameters(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_notify_display_change(smu);
+ if (ret)
+ return ret;
+
+ /*
+ * Set min deep sleep dce fclk with bootup value from vbios via
+ * SetMinDeepSleepDcefclk MSG.
+ */
+ ret = smu_set_min_dcef_deep_sleep(smu);
+ if (ret)
+ return ret;
+
+ /*
+ * Set initialized values (get from vbios) to dpm tables context such as
+ * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
+ * type of clks.
+ */
+ if (initialize) {
+ ret = smu_populate_smc_pptable(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_init_max_sustainable_clocks(smu);
+ if (ret)
+ return ret;
+ }
+
+ ret = smu_set_od8_default_settings(smu, initialize);
+ if (ret)
+ return ret;
+
+ if (initialize) {
+ ret = smu_populate_umd_state_clk(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_get_power_limit(smu, &smu->default_power_limit, false);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
+ */
+ ret = smu_set_tool_table_location(smu);
+
+ return ret;
+}
+
+/**
+ * smu_alloc_memory_pool - allocate memory pool in the system memory
+ *
+ * @smu: amdgpu_device pointer
+ *
+ * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
+ * and DramLogSetDramAddr can notify it changed.
+ *
+ * Returns 0 on success, error on failure.
+ */
+static int smu_alloc_memory_pool(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *memory_pool = &smu_table->memory_pool;
+ uint64_t pool_size = smu->pool_size;
+ int ret = 0;
+
+ if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
+ return ret;
+
+ memory_pool->size = pool_size;
+ memory_pool->align = PAGE_SIZE;
+ memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
+
+ switch (pool_size) {
+ case SMU_MEMORY_POOL_SIZE_256_MB:
+ case SMU_MEMORY_POOL_SIZE_512_MB:
+ case SMU_MEMORY_POOL_SIZE_1_GB:
+ case SMU_MEMORY_POOL_SIZE_2_GB:
+ ret = amdgpu_bo_create_kernel(adev,
+ memory_pool->size,
+ memory_pool->align,
+ memory_pool->domain,
+ &memory_pool->bo,
+ &memory_pool->mc_address,
+ &memory_pool->cpu_addr);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_free_memory_pool(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *memory_pool = &smu_table->memory_pool;
+ int ret = 0;
+
+ if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
+ return ret;
+
+ amdgpu_bo_free_kernel(&memory_pool->bo,
+ &memory_pool->mc_address,
+ &memory_pool->cpu_addr);
+
+ memset(memory_pool, 0, sizeof(struct smu_table));
+
+ return ret;
+}
+
+static int smu_hw_init(void *handle)
+{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+
+ if (!is_support_sw_smu(adev))
+ return -EINVAL;
+
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ ret = smu_load_microcode(smu);
+ if (ret)
+ return ret;
+ }
+
+ ret = smu_check_fw_status(smu);
+ if (ret) {
+ pr_err("SMC firmware status is not correct\n");
+ return ret;
+ }
+
+ mutex_lock(&smu->mutex);
+
+ ret = smu_feature_init_dpm(smu);
+ if (ret)
+ goto failed;
+
+ ret = smu_smc_table_hw_init(smu, true);
+ if (ret)
+ goto failed;
+
+ ret = smu_alloc_memory_pool(smu);
+ if (ret)
+ goto failed;
+
+ /*
+ * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
+ * pool location.
+ */
+ ret = smu_notify_memory_pool_location(smu);
+ if (ret)
+ goto failed;
+
+ ret = smu_start_thermal_control(smu);
+ if (ret)
+ goto failed;
+
+ mutex_unlock(&smu->mutex);
+
+ adev->pm.dpm_enabled = true;
+
+ pr_info("SMU is initialized successfully!\n");
+
+ return 0;
+
+failed:
+ mutex_unlock(&smu->mutex);
+ return ret;
+}
+
+static int smu_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+ struct smu_table_context *table_context = &smu->smu_table;
+ int ret = 0;
+
+ if (!is_support_sw_smu(adev))
+ return -EINVAL;
+
+ kfree(table_context->driver_pptable);
+ table_context->driver_pptable = NULL;
+
+ kfree(table_context->max_sustainable_clocks);
+ table_context->max_sustainable_clocks = NULL;
+
+ kfree(table_context->od_feature_capabilities);
+ table_context->od_feature_capabilities = NULL;
+
+ kfree(table_context->od_settings_max);
+ table_context->od_settings_max = NULL;
+
+ kfree(table_context->od_settings_min);
+ table_context->od_settings_min = NULL;
+
+ kfree(table_context->overdrive_table);
+ table_context->overdrive_table = NULL;
+
+ kfree(table_context->od8_settings);
+ table_context->od8_settings = NULL;
+
+ ret = smu_fini_fb_allocations(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_free_memory_pool(smu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int smu_reset(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ ret = smu_hw_fini(adev);
+ if (ret)
+ return ret;
+
+ ret = smu_hw_init(adev);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int smu_suspend(void *handle)
+{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+
+ if (!is_support_sw_smu(adev))
+ return -EINVAL;
+
+ ret = smu_system_features_control(smu, false);
+ if (ret)
+ return ret;
+
+ smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
+
+ return 0;
+}
+
+static int smu_resume(void *handle)
+{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+
+ if (!is_support_sw_smu(adev))
+ return -EINVAL;
+
+ pr_info("SMU is resuming...\n");
+
+ mutex_lock(&smu->mutex);
+
+ ret = smu_smc_table_hw_init(smu, false);
+ if (ret)
+ goto failed;
+
+ ret = smu_start_thermal_control(smu);
+ if (ret)
+ goto failed;
+
+ mutex_unlock(&smu->mutex);
+
+ pr_info("SMU is resumed successfully!\n");
+
+ return 0;
+failed:
+ mutex_unlock(&smu->mutex);
+ return ret;
+}
+
+int smu_display_configuration_change(struct smu_context *smu,
+ const struct amd_pp_display_configuration *display_config)
+{
+ int index = 0;
+ int num_of_active_display = 0;
+
+ if (!is_support_sw_smu(smu->adev))
+ return -EINVAL;
+
+ if (!display_config)
+ return -EINVAL;
+
+ mutex_lock(&smu->mutex);
+
+ smu_set_deep_sleep_dcefclk(smu,
+ display_config->min_dcef_deep_sleep_set_clk / 100);
+
+ for (index = 0; index < display_config->num_path_including_non_display; index++) {
+ if (display_config->displays[index].controller_id != 0)
+ num_of_active_display++;
+ }
+
+ smu_set_active_display_count(smu, num_of_active_display);
+
+ smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
+ display_config->cpu_cc6_disable,
+ display_config->cpu_pstate_disable,
+ display_config->nb_pstate_switch_disable);
+
+ mutex_unlock(&smu->mutex);
+
+ return 0;
+}
+
+static int smu_get_clock_info(struct smu_context *smu,
+ struct smu_clock_info *clk_info,
+ enum smu_perf_level_designation designation)
+{
+ int ret;
+ struct smu_performance_level level = {0};
+
+ if (!clk_info)
+ return -EINVAL;
+
+ ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
+ if (ret)
+ return -EINVAL;
+
+ clk_info->min_mem_clk = level.memory_clock;
+ clk_info->min_eng_clk = level.core_clock;
+ clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
+
+ ret = smu_get_perf_level(smu, designation, &level);
+ if (ret)
+ return -EINVAL;
+
+ clk_info->min_mem_clk = level.memory_clock;
+ clk_info->min_eng_clk = level.core_clock;
+ clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
+
+ return 0;
+}
+
+int smu_get_current_clocks(struct smu_context *smu,
+ struct amd_pp_clock_info *clocks)
+{
+ struct amd_pp_simple_clock_info simple_clocks = {0};
+ struct smu_clock_info hw_clocks;
+ int ret = 0;
+
+ if (!is_support_sw_smu(smu->adev))
+ return -EINVAL;
+
+ mutex_lock(&smu->mutex);
+
+ smu_get_dal_power_level(smu, &simple_clocks);
+
+ if (smu->support_power_containment)
+ ret = smu_get_clock_info(smu, &hw_clocks,
+ PERF_LEVEL_POWER_CONTAINMENT);
+ else
+ ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
+
+ if (ret) {
+ pr_err("Error in smu_get_clock_info\n");
+ goto failed;
+ }
+
+ clocks->min_engine_clock = hw_clocks.min_eng_clk;
+ clocks->max_engine_clock = hw_clocks.max_eng_clk;
+ clocks->min_memory_clock = hw_clocks.min_mem_clk;
+ clocks->max_memory_clock = hw_clocks.max_mem_clk;
+ clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
+ clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
+ clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
+ clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
+
+ if (simple_clocks.level == 0)
+ clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
+ else
+ clocks->max_clocks_state = simple_clocks.level;
+
+ if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
+ clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
+ clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
+ }
+
+failed:
+ mutex_unlock(&smu->mutex);
+ return ret;
+}
+
+static int smu_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int smu_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+static int smu_enable_umd_pstate(void *handle,
+ enum amd_dpm_forced_level *level)
+{
+ uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+
+ struct smu_context *smu = (struct smu_context*)(handle);
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ if (!smu_dpm_ctx->dpm_context)
+ return -EINVAL;
+
+ if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
+ /* enter umd pstate, save current level, disable gfx cg*/
+ if (*level & profile_mode_mask) {
+ smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
+ smu_dpm_ctx->enable_umd_pstate = true;
+ amdgpu_device_ip_set_clockgating_state(smu->adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(smu->adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_UNGATE);
+ }
+ } else {
+ /* exit umd pstate, restore level, enable gfx cg*/
+ if (!(*level & profile_mode_mask)) {
+ if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
+ *level = smu_dpm_ctx->saved_dpm_level;
+ smu_dpm_ctx->enable_umd_pstate = false;
+ amdgpu_device_ip_set_clockgating_state(smu->adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(smu->adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_GATE);
+ }
+ }
+
+ return 0;
+}
+
+int smu_adjust_power_state_dynamic(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ bool skip_display_settings)
+{
+ int ret = 0;
+ int index = 0;
+ uint32_t sclk_mask, mclk_mask, soc_mask;
+ long workload;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+ if (!skip_display_settings) {
+ ret = smu_display_config_changed(smu);
+ if (ret) {
+ pr_err("Failed to change display config!");
+ return ret;
+ }
+ }
+
+ ret = smu_apply_clocks_adjust_rules(smu);
+ if (ret) {
+ pr_err("Failed to apply clocks adjust rules!");
+ return ret;
+ }
+
+ if (!skip_display_settings) {
+ ret = smu_notify_smc_dispaly_config(smu);
+ if (ret) {
+ pr_err("Failed to notify smc display config!");
+ return ret;
+ }
+ }
+
+ if (smu_dpm_ctx->dpm_level != level) {
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ ret = smu_force_dpm_limit_value(smu, true);
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ ret = smu_force_dpm_limit_value(smu, false);
+ break;
+
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ ret = smu_unforce_dpm_levels(smu);
+ break;
+
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ ret = smu_get_profiling_clk_mask(smu, level,
+ &sclk_mask,
+ &mclk_mask,
+ &soc_mask);
+ if (ret)
+ return ret;
+ smu_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask);
+ smu_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask);
+ break;
+
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ default:
+ break;
+ }
+
+ if (!ret)
+ smu_dpm_ctx->dpm_level = level;
+ }
+
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+ index = fls(smu->workload_mask);
+ index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+ workload = smu->workload_setting[index];
+
+ if (smu->power_profile_mode != workload)
+ smu_set_power_profile_mode(smu, &workload, 0);
+ }
+
+ return ret;
+}
+
+int smu_handle_task(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ enum amd_pp_task task_id)
+{
+ int ret = 0;
+
+ switch (task_id) {
+ case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
+ ret = smu_pre_display_config_changed(smu);
+ if (ret)
+ return ret;
+ ret = smu_set_cpu_power_state(smu);
+ if (ret)
+ return ret;
+ ret = smu_adjust_power_state_dynamic(smu, level, false);
+ break;
+ case AMD_PP_TASK_COMPLETE_INIT:
+ case AMD_PP_TASK_READJUST_POWER_STATE:
+ ret = smu_adjust_power_state_dynamic(smu, level, true);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+const struct amd_ip_funcs smu_ip_funcs = {
+ .name = "smu",
+ .early_init = smu_early_init,
+ .late_init = smu_late_init,
+ .sw_init = smu_sw_init,
+ .sw_fini = smu_sw_fini,
+ .hw_init = smu_hw_init,
+ .hw_fini = smu_hw_fini,
+ .suspend = smu_suspend,
+ .resume = smu_resume,
+ .is_idle = NULL,
+ .check_soft_reset = NULL,
+ .wait_for_idle = NULL,
+ .soft_reset = NULL,
+ .set_clockgating_state = smu_set_clockgating_state,
+ .set_powergating_state = smu_set_powergating_state,
+ .enable_umd_pstate = smu_enable_umd_pstate,
+};
+
+const struct amdgpu_ip_block_version smu_v11_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 11,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &smu_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index 0b3c6d1d52e4..cc63705920dc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -35,7 +35,8 @@ HARDWARE_MGR = hwmgr.o processpptables.o \
vega12_thermal.o \
pp_overdriver.o smu_helper.o \
vega20_processpptables.o vega20_hwmgr.o vega20_powertune.o \
- vega20_thermal.o common_baco.o vega10_baco.o vega20_baco.o
+ vega20_thermal.o common_baco.o vega10_baco.o vega20_baco.o \
+ vega12_baco.o smu9_baco.o
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index c1c51c115e57..70f7f47a2fcf 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -76,7 +76,7 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr,
int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = NULL;
- int ret = -EINVAL;;
+ int ret = -EINVAL;
PHM_FUNC_CHECK(hwmgr);
adev = hwmgr->adev;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 0ad8fe4a6277..9a595f7525e6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -35,6 +35,7 @@
#include "smu10_hwmgr.h"
#include "power_state.h"
#include "soc15_common.h"
+#include "smu10.h"
#define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5
#define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
@@ -114,11 +115,6 @@ static int smu10_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
smu10_data->num_active_display = 0;
smu10_data->deep_sleep_dcefclk = 0;
- if (hwmgr->feature_mask & PP_GFXOFF_MASK)
- smu10_data->gfx_off_controled_by_driver = true;
- else
- smu10_data->gfx_off_controled_by_driver = false;
-
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep);
@@ -209,18 +205,13 @@ static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
return 0;
}
-static inline uint32_t convert_10k_to_mhz(uint32_t clock)
-{
- return (clock + 99) / 100;
-}
-
static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
if (smu10_data->need_min_deep_sleep_dcefclk &&
- smu10_data->deep_sleep_dcefclk != convert_10k_to_mhz(clock)) {
- smu10_data->deep_sleep_dcefclk = convert_10k_to_mhz(clock);
+ smu10_data->deep_sleep_dcefclk != clock) {
+ smu10_data->deep_sleep_dcefclk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
smu10_data->deep_sleep_dcefclk);
@@ -233,8 +224,8 @@ static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t c
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
if (smu10_data->dcf_actual_hard_min_freq &&
- smu10_data->dcf_actual_hard_min_freq != convert_10k_to_mhz(clock)) {
- smu10_data->dcf_actual_hard_min_freq = convert_10k_to_mhz(clock);
+ smu10_data->dcf_actual_hard_min_freq != clock) {
+ smu10_data->dcf_actual_hard_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinDcefclkByFreq,
smu10_data->dcf_actual_hard_min_freq);
@@ -247,8 +238,8 @@ static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cloc
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
if (smu10_data->f_actual_hard_min_freq &&
- smu10_data->f_actual_hard_min_freq != convert_10k_to_mhz(clock)) {
- smu10_data->f_actual_hard_min_freq = convert_10k_to_mhz(clock);
+ smu10_data->f_actual_hard_min_freq != clock) {
+ smu10_data->f_actual_hard_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
smu10_data->f_actual_hard_min_freq);
@@ -330,9 +321,9 @@ static bool smu10_is_gfx_on(struct pp_hwmgr *hwmgr)
static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
{
- struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+ struct amdgpu_device *adev = hwmgr->adev;
- if (smu10_data->gfx_off_controled_by_driver) {
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff);
/* confirm gfx is back to "on" state */
@@ -350,9 +341,9 @@ static int smu10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
{
- struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+ struct amdgpu_device *adev = hwmgr->adev;
- if (smu10_data->gfx_off_controled_by_driver)
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff);
return 0;
@@ -577,7 +568,6 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
struct smu10_hwmgr *data = hwmgr->backend;
- struct amdgpu_device *adev = hwmgr->adev;
uint32_t min_sclk = hwmgr->display_config->min_core_set_clock;
uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100;
@@ -586,11 +576,6 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
return 0;
}
- /* Disable UMDPSTATE support on rv2 temporarily */
- if ((adev->asic_type == CHIP_RAVEN) &&
- (adev->rev_id >= 8))
- return 0;
-
if (min_sclk < data->gfx_min_freq_limit)
min_sclk = data->gfx_min_freq_limit;
@@ -1205,6 +1190,94 @@ static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
}
}
+static int conv_power_profile_to_pplib_workload(int power_profile)
+{
+ int pplib_workload = 0;
+
+ switch (power_profile) {
+ case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
+ pplib_workload = WORKLOAD_DEFAULT_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
+ pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_POWERSAVING:
+ pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_VIDEO:
+ pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_VR:
+ pplib_workload = WORKLOAD_PPLIB_VR_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_COMPUTE:
+ pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
+ break;
+ }
+
+ return pplib_workload;
+}
+
+static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
+{
+ uint32_t i, size = 0;
+ static const uint8_t
+ profile_mode_setting[6][4] = {{70, 60, 0, 0,},
+ {70, 60, 1, 3,},
+ {90, 60, 0, 0,},
+ {70, 60, 0, 0,},
+ {70, 90, 0, 0,},
+ {30, 60, 0, 6,},
+ };
+ static const char *profile_name[6] = {
+ "BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
+ "POWER_SAVING",
+ "VIDEO",
+ "VR",
+ "COMPUTE"};
+ static const char *title[6] = {"NUM",
+ "MODE_NAME",
+ "BUSY_SET_POINT",
+ "FPS",
+ "USE_RLC_BUSY",
+ "MIN_ACTIVE_LEVEL"};
+
+ if (!buf)
+ return -EINVAL;
+
+ size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0],
+ title[1], title[2], title[3], title[4], title[5]);
+
+ for (i = 0; i <= PP_SMC_POWER_PROFILE_COMPUTE; i++)
+ size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n",
+ i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
+ profile_mode_setting[i][0], profile_mode_setting[i][1],
+ profile_mode_setting[i][2], profile_mode_setting[i][3]);
+
+ return size;
+}
+
+static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
+{
+ int workload_type = 0;
+
+ if (input[size] > PP_SMC_POWER_PROFILE_COMPUTE) {
+ pr_err("Invalid power profile mode %ld\n", input[size]);
+ return -EINVAL;
+ }
+ hwmgr->power_profile_mode = input[size];
+
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type =
+ conv_power_profile_to_pplib_workload(hwmgr->power_profile_mode);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify,
+ 1 << workload_type);
+
+ return 0;
+}
+
+
static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.backend_init = smu10_hwmgr_backend_init,
.backend_fini = smu10_hwmgr_backend_fini,
@@ -1246,6 +1319,8 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.powergate_sdma = smu10_powergate_sdma,
.set_hard_min_dcefclk_by_freq = smu10_set_hard_min_dcefclk_by_freq,
.set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq,
+ .get_power_profile_mode = smu10_get_power_profile_mode,
+ .set_power_profile_mode = smu10_set_power_profile_mode,
};
int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 83d3d935f3ac..048757e8f494 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -77,7 +77,7 @@
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
-static const struct profile_mode_setting smu7_profiling[7] =
+static struct profile_mode_setting smu7_profiling[7] =
{{0, 0, 0, 0, 0, 0, 0, 0},
{1, 0, 100, 30, 1, 0, 100, 10},
{1, 10, 0, 30, 0, 0, 0, 0},
@@ -4984,17 +4984,27 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint
mode = input[size];
switch (mode) {
case PP_SMC_POWER_PROFILE_CUSTOM:
- if (size < 8)
+ if (size < 8 && size != 0)
return -EINVAL;
-
- tmp.bupdate_sclk = input[0];
- tmp.sclk_up_hyst = input[1];
- tmp.sclk_down_hyst = input[2];
- tmp.sclk_activity = input[3];
- tmp.bupdate_mclk = input[4];
- tmp.mclk_up_hyst = input[5];
- tmp.mclk_down_hyst = input[6];
- tmp.mclk_activity = input[7];
+ /* If only CUSTOM is passed in, use the saved values. Check
+ * that we actually have a CUSTOM profile by ensuring that
+ * the "use sclk" or the "use mclk" bits are set
+ */
+ tmp = smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM];
+ if (size == 0) {
+ if (tmp.bupdate_sclk == 0 && tmp.bupdate_mclk == 0)
+ return -EINVAL;
+ } else {
+ tmp.bupdate_sclk = input[0];
+ tmp.sclk_up_hyst = input[1];
+ tmp.sclk_down_hyst = input[2];
+ tmp.sclk_activity = input[3];
+ tmp.bupdate_mclk = input[4];
+ tmp.mclk_up_hyst = input[5];
+ tmp.mclk_down_hyst = input[6];
+ tmp.mclk_activity = input[7];
+ smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM] = tmp;
+ }
if (!smum_update_dpm_settings(hwmgr, &tmp)) {
memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting));
hwmgr->power_profile_mode = mode;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.c
new file mode 100644
index 000000000000..de0a37f7c632
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "soc15.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
+#include "soc15_common.h"
+#include "vega10_inc.h"
+#include "smu9_baco.h"
+
+int smu9_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ uint32_t reg, data;
+
+ *cap = false;
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
+ return 0;
+
+ WREG32(0x12074, 0xFFF0003B);
+ data = RREG32(0x12075);
+
+ if (data == 0x1) {
+ reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0);
+
+ if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
+ *cap = true;
+ }
+
+ return 0;
+}
+
+int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ uint32_t reg;
+
+ reg = RREG32_SOC15(NBIF, 0, mmBACO_CNTL);
+
+ if (reg & BACO_CNTL__BACO_MODE_MASK)
+ /* gfx has already entered BACO state */
+ *state = BACO_STATE_IN;
+ else
+ *state = BACO_STATE_OUT;
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.h
new file mode 100644
index 000000000000..84e90f801ac3
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU9_BACO_H__
+#define __SMU9_BACO_H__
+#include "hwmgr.h"
+#include "common_baco.h"
+
+extern int smu9_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap);
+extern int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
index 7337be5602e4..d168af4a4d78 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
@@ -85,48 +85,11 @@ static const struct soc15_baco_cmd_entry clean_baco_tbl[] =
{CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_7), 0, 0, 0, 0},
};
-int vega10_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
- uint32_t reg, data;
-
- *cap = false;
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
- return 0;
-
- WREG32(0x12074, 0xFFF0003B);
- data = RREG32(0x12075);
-
- if (data == 0x1) {
- reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0);
-
- if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
- *cap = true;
- }
-
- return 0;
-}
-
-int vega10_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
- uint32_t reg;
-
- reg = RREG32_SOC15(NBIF, 0, mmBACO_CNTL);
-
- if (reg & BACO_CNTL__BACO_MODE_MASK)
- /* gfx has already entered BACO state */
- *state = BACO_STATE_IN;
- else
- *state = BACO_STATE_OUT;
- return 0;
-}
-
int vega10_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
{
enum BACO_STATE cur_state;
- vega10_baco_get_state(hwmgr, &cur_state);
+ smu9_baco_get_state(hwmgr, &cur_state);
if (cur_state == state)
/* aisc already in the target state */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h
index f7a3ffa744b3..96d793f026a5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h
@@ -22,11 +22,8 @@
*/
#ifndef __VEGA10_BACO_H__
#define __VEGA10_BACO_H__
-#include "hwmgr.h"
-#include "common_baco.h"
+#include "smu9_baco.h"
-extern int vega10_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap);
-extern int vega10_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
extern int vega10_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 5c4f701939ea..384c37875cd0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -1427,6 +1427,15 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
vega10_setup_default_pcie_table(hwmgr);
+ /* Zero out the saved copy of the CUSTOM profile
+ * This will be checked when trying to set the profile
+ * and will require that new values be passed in
+ */
+ data->custom_profile_mode[0] = 0;
+ data->custom_profile_mode[1] = 0;
+ data->custom_profile_mode[2] = 0;
+ data->custom_profile_mode[3] = 0;
+
/* save a copy of the default DPM table */
memcpy(&(data->golden_dpm_table), &(data->dpm_table),
sizeof(struct vega10_dpm_table));
@@ -4904,16 +4913,23 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
uint8_t FPS;
uint8_t use_rlc_busy;
uint8_t min_active_level;
+ uint32_t power_profile_mode = input[size];
- hwmgr->power_profile_mode = input[size];
-
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
- 1<<hwmgr->power_profile_mode);
-
- if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
- if (size == 0 || size > 4)
+ if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
+ if (size != 0 && size != 4)
return -EINVAL;
+ /* If size = 0 and the CUSTOM profile has been set already
+ * then just apply the profile. The copy stored in the hwmgr
+ * is zeroed out on init
+ */
+ if (size == 0) {
+ if (data->custom_profile_mode[0] != 0)
+ goto out;
+ else
+ return -EINVAL;
+ }
+
data->custom_profile_mode[0] = busy_set_point = input[0];
data->custom_profile_mode[1] = FPS = input[1];
data->custom_profile_mode[2] = use_rlc_busy = input[2];
@@ -4924,6 +4940,11 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
use_rlc_busy << 16 | min_active_level<<24);
}
+out:
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
+ 1 << power_profile_mode);
+ hwmgr->power_profile_mode = power_profile_mode;
+
return 0;
}
@@ -5170,8 +5191,8 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.set_power_limit = vega10_set_power_limit,
.odn_edit_dpm_table = vega10_odn_edit_dpm_table,
.get_performance_level = vega10_get_performance_level,
- .get_asic_baco_capability = vega10_baco_get_capability,
- .get_asic_baco_state = vega10_baco_get_state,
+ .get_asic_baco_capability = smu9_baco_get_capability,
+ .get_asic_baco_state = smu9_baco_get_state,
.set_asic_baco_state = vega10_baco_set_state,
.enable_mgpu_fan_boost = vega10_enable_mgpu_fan_boost,
.get_ppfeature_status = vega10_get_ppfeature_status,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c
new file mode 100644
index 000000000000..9d8ca94a8f0c
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "soc15.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
+#include "soc15_common.h"
+#include "vega12_inc.h"
+#include "vega12_ppsmc.h"
+#include "vega12_baco.h"
+
+static const struct soc15_baco_cmd_entry pre_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmBIF_DOORBELL_CNTL_BASE_IDX, mmBIF_DOORBELL_CNTL, BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN_MASK, BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN__SHIFT, 0, 0 },
+ { CMD_WRITE, NBIF_HWID, 0, mmBIF_FB_EN_BASE_IDX, mmBIF_FB_EN, 0, 0, 0, 0 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_DSTATE_BYPASS_MASK, BACO_CNTL__BACO_DSTATE_BYPASS__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_RST_INTR_MASK_MASK, BACO_CNTL__BACO_RST_INTR_MASK__SHIFT, 0, 1 }
+};
+
+static const struct soc15_baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_WAITFOR, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__SOC_DOMAIN_IDLE_MASK, THM_BACO_CNTL__SOC_DOMAIN_IDLE__SHIFT, 0xffffffff, 0x80000000 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_BIF_LCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_LCLK_SWITCH__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_DUMMY_EN_MASK, BACO_CNTL__BACO_DUMMY_EN__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_SOC_VDCI_RESET_MASK, THM_BACO_CNTL__BACO_SOC_VDCI_RESET__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_SMNCLK_MUX_MASK, THM_BACO_CNTL__BACO_SMNCLK_MUX__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_ISO_EN_MASK, THM_BACO_CNTL__BACO_ISO_EN__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_AEB_ISO_EN_MASK, THM_BACO_CNTL__BACO_AEB_ISO_EN__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_ANA_ISO_EN_MASK, THM_BACO_CNTL__BACO_ANA_ISO_EN__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF_MASK, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 1 },
+ { CMD_DELAY_MS, 0, 0, 0, 5, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_RESET_EN_MASK, THM_BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_PWROKRAW_CNTL_MASK, THM_BACO_CNTL__BACO_PWROKRAW_CNTL__SHIFT, 0, 0 },
+ { CMD_WAITFOR, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, BACO_CNTL__BACO_MODE__SHIFT, 0xffffffff, 0x100 }
+};
+
+static const struct soc15_baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0 },
+ { CMD_DELAY_MS, 0, 0, 0, 0, 0, 0, 10, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF_MASK, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_ANA_ISO_EN_MASK, THM_BACO_CNTL__BACO_ANA_ISO_EN__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_AEB_ISO_EN_MASK, THM_BACO_CNTL__BACO_AEB_ISO_EN__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_ISO_EN_MASK, THM_BACO_CNTL__BACO_ISO_EN__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_PWROKRAW_CNTL_MASK, THM_BACO_CNTL__BACO_PWROKRAW_CNTL__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_SMNCLK_MUX_MASK, THM_BACO_CNTL__BACO_SMNCLK_MUX__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_SOC_VDCI_RESET_MASK, THM_BACO_CNTL__BACO_SOC_VDCI_RESET__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_EXIT_MASK, THM_BACO_CNTL__BACO_EXIT__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_RESET_EN_MASK, THM_BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0 },
+ { CMD_WAITFOR, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_EXIT_MASK, 0, 0xffffffff, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_SB_AXI_FENCE_MASK, THM_BACO_CNTL__BACO_SB_AXI_FENCE__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_DUMMY_EN_MASK, BACO_CNTL__BACO_DUMMY_EN__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_BIF_LCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_LCLK_SWITCH__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0 },
+ { CMD_WAITFOR, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0 }
+};
+
+static const struct soc15_baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, NBIF_HWID, 0, mmBIOS_SCRATCH_6_BASE_IDX, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, NBIF_HWID, 0, mmBIOS_SCRATCH_7_BASE_IDX, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
+};
+
+int vega12_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu9_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ if (soc15_baco_program_registers(hwmgr, pre_baco_tbl,
+ ARRAY_SIZE(pre_baco_tbl))) {
+ if (smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0))
+ return -EINVAL;
+
+ if (soc15_baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+ }
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (soc15_baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (soc15_baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.h
new file mode 100644
index 000000000000..57b72e5a95ae
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __VEGA12_BACO_H__
+#define __VEGA12_BACO_H__
+#include "smu9_baco.h"
+
+extern int vega12_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index bdb48e94eff6..707cd4b0357f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -45,6 +45,7 @@
#include "ppinterrupt.h"
#include "pp_overdriver.h"
#include "pp_thermal.h"
+#include "vega12_baco.h"
static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
@@ -2626,8 +2627,12 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.start_thermal_controller = vega12_start_thermal_controller,
.powergate_gfx = vega12_gfx_off_control,
.get_performance_level = vega12_get_performance_level,
+ .get_asic_baco_capability = smu9_baco_get_capability,
+ .get_asic_baco_state = smu9_baco_get_state,
+ .set_asic_baco_state = vega12_baco_set_state,
.get_ppfeature_status = vega12_get_ppfeature_status,
.set_ppfeature_status = vega12_set_ppfeature_status,
+
};
int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h
index 30b278c50222..e6d9e84059e1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h
@@ -35,5 +35,7 @@
#include "asic_reg/gc/gc_9_2_1_sh_mask.h"
#include "asic_reg/nbio/nbio_6_1_offset.h"
+#include "asic_reg/nbio/nbio_6_1_offset.h"
+#include "asic_reg/nbio/nbio_6_1_sh_mask.h"
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
index 5e8602a79b1c..df6ff9252401 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
@@ -27,6 +27,7 @@
#include "vega20_inc.h"
#include "vega20_ppsmc.h"
#include "vega20_baco.h"
+#include "vega20_smumgr.h"
@@ -101,3 +102,14 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
return 0;
}
+
+int vega20_baco_apply_vdci_flush_workaround(struct pp_hwmgr *hwmgr)
+{
+ int ret = 0;
+
+ ret = vega20_set_pptable_driver_address(hwmgr);
+ if (ret)
+ return ret;
+
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_BacoWorkAroundFlushVDCI);
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h
index 51c7f8392925..f06471e712dc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h
@@ -28,5 +28,6 @@
extern int vega20_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap);
extern int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
extern int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+extern int vega20_baco_apply_vdci_flush_workaround(struct pp_hwmgr *hwmgr);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 23b5b94a4939..9b9f87b84910 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -434,6 +434,7 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->platform_descriptor.clockStep.memoryClock = 500;
data->total_active_cus = adev->gfx.cu_info.number;
+ data->is_custom_profile_set = false;
return 0;
}
@@ -450,6 +451,7 @@ static int vega20_init_sclk_threshold(struct pp_hwmgr *hwmgr)
static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
int ret = 0;
ret = vega20_init_sclk_threshold(hwmgr);
@@ -457,7 +459,15 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
"Failed to init sclk threshold!",
return ret);
- return 0;
+ if (adev->in_baco_reset) {
+ adev->in_baco_reset = 0;
+
+ ret = vega20_baco_apply_vdci_flush_workaround(hwmgr);
+ if (ret)
+ pr_err("Failed to apply vega20 baco workaround!\n");
+ }
+
+ return ret;
}
/*
@@ -3450,7 +3460,18 @@ static void vega20_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
return ;
data->vce_power_gated = bgate;
- vega20_enable_disable_vce_dpm(hwmgr, !bgate);
+ if (bgate) {
+ vega20_enable_disable_vce_dpm(hwmgr, !bgate);
+ amdgpu_device_ip_set_powergating_state(hwmgr->adev,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ } else {
+ amdgpu_device_ip_set_powergating_state(hwmgr->adev,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
+ vega20_enable_disable_vce_dpm(hwmgr, !bgate);
+ }
+
}
static void vega20_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
@@ -3826,16 +3847,19 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
{
DpmActivityMonitorCoeffInt_t activity_monitor;
int workload_type, result = 0;
+ uint32_t power_profile_mode = input[size];
- hwmgr->power_profile_mode = input[size];
-
- if (hwmgr->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
- pr_err("Invalid power profile mode %d\n", hwmgr->power_profile_mode);
+ if (power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ pr_err("Invalid power profile mode %d\n", power_profile_mode);
return -EINVAL;
}
- if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
- if (size < 10)
+ if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
+ struct vega20_hwmgr *data =
+ (struct vega20_hwmgr *)(hwmgr->backend);
+ if (size == 0 && !data->is_custom_profile_set)
+ return -EINVAL;
+ if (size < 10 && size != 0)
return -EINVAL;
result = vega20_get_activity_monitor_coeff(hwmgr,
@@ -3845,6 +3869,13 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
"[SetPowerProfile] Failed to get activity monitor!",
return result);
+ /* If size==0, then we want to apply the already-configured
+ * CUSTOM profile again. Just apply it, since we checked its
+ * validity above
+ */
+ if (size == 0)
+ goto out;
+
switch (input[0]) {
case 0: /* Gfxclk */
activity_monitor.Gfx_FPS = input[1];
@@ -3895,17 +3926,21 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
result = vega20_set_activity_monitor_coeff(hwmgr,
(uint8_t *)(&activity_monitor),
WORKLOAD_PPLIB_CUSTOM_BIT);
+ data->is_custom_profile_set = true;
PP_ASSERT_WITH_CODE(!result,
"[SetPowerProfile] Failed to set activity monitor!",
return result);
}
+out:
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type =
- conv_power_profile_to_pplib_workload(hwmgr->power_profile_mode);
+ conv_power_profile_to_pplib_workload(power_profile_mode);
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
1 << workload_type);
+ hwmgr->power_profile_mode = power_profile_mode;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
index ac2a3118a0ae..2c3125f82b24 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
@@ -531,6 +531,8 @@ struct vega20_hwmgr {
bool pcie_parameters_override;
uint32_t pcie_gen_level1;
uint32_t pcie_width_level1;
+
+ bool is_custom_profile_set;
};
#define VEGA20_DPM2_NEAR_TDP_DEC 10
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
new file mode 100644
index 000000000000..c8b168b3413b
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -0,0 +1,770 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __AMDGPU_SMU_H__
+#define __AMDGPU_SMU_H__
+
+#include "amdgpu.h"
+#include "kgd_pp_interface.h"
+#include "dm_pp_interface.h"
+
+struct smu_hw_power_state {
+ unsigned int magic;
+};
+
+struct smu_power_state;
+
+enum smu_state_ui_label {
+ SMU_STATE_UI_LABEL_NONE,
+ SMU_STATE_UI_LABEL_BATTERY,
+ SMU_STATE_UI_TABEL_MIDDLE_LOW,
+ SMU_STATE_UI_LABEL_BALLANCED,
+ SMU_STATE_UI_LABEL_MIDDLE_HIGHT,
+ SMU_STATE_UI_LABEL_PERFORMANCE,
+ SMU_STATE_UI_LABEL_BACO,
+};
+
+enum smu_state_classification_flag {
+ SMU_STATE_CLASSIFICATION_FLAG_BOOT = 0x0001,
+ SMU_STATE_CLASSIFICATION_FLAG_THERMAL = 0x0002,
+ SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE = 0x0004,
+ SMU_STATE_CLASSIFICATION_FLAG_RESET = 0x0008,
+ SMU_STATE_CLASSIFICATION_FLAG_FORCED = 0x0010,
+ SMU_STATE_CLASSIFICATION_FLAG_USER_3D_PERFORMANCE = 0x0020,
+ SMU_STATE_CLASSIFICATION_FLAG_USER_2D_PERFORMANCE = 0x0040,
+ SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE = 0x0080,
+ SMU_STATE_CLASSIFICATION_FLAG_AC_OVERDIRVER_TEMPLATE = 0x0100,
+ SMU_STATE_CLASSIFICATION_FLAG_UVD = 0x0200,
+ SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE_LOW = 0x0400,
+ SMU_STATE_CLASSIFICATION_FLAG_ACPI = 0x0800,
+ SMU_STATE_CLASSIFICATION_FLAG_HD2 = 0x1000,
+ SMU_STATE_CLASSIFICATION_FLAG_UVD_HD = 0x2000,
+ SMU_STATE_CLASSIFICATION_FLAG_UVD_SD = 0x4000,
+ SMU_STATE_CLASSIFICATION_FLAG_USER_DC_PERFORMANCE = 0x8000,
+ SMU_STATE_CLASSIFICATION_FLAG_DC_OVERDIRVER_TEMPLATE = 0x10000,
+ SMU_STATE_CLASSIFICATION_FLAG_BACO = 0x20000,
+ SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE2 = 0x40000,
+ SMU_STATE_CLASSIFICATION_FLAG_ULV = 0x80000,
+ SMU_STATE_CLASSIFICATION_FLAG_UVD_MVC = 0x100000,
+};
+
+struct smu_state_classification_block {
+ enum smu_state_ui_label ui_label;
+ enum smu_state_classification_flag flags;
+ int bios_index;
+ bool temporary_state;
+ bool to_be_deleted;
+};
+
+struct smu_state_pcie_block {
+ unsigned int lanes;
+};
+
+enum smu_refreshrate_source {
+ SMU_REFRESHRATE_SOURCE_EDID,
+ SMU_REFRESHRATE_SOURCE_EXPLICIT
+};
+
+struct smu_state_display_block {
+ bool disable_frame_modulation;
+ bool limit_refreshrate;
+ enum smu_refreshrate_source refreshrate_source;
+ int explicit_refreshrate;
+ int edid_refreshrate_index;
+ bool enable_vari_bright;
+};
+
+struct smu_state_memroy_block {
+ bool dll_off;
+ uint8_t m3arb;
+ uint8_t unused[3];
+};
+
+struct smu_state_software_algorithm_block {
+ bool disable_load_balancing;
+ bool enable_sleep_for_timestamps;
+};
+
+struct smu_temperature_range {
+ int min;
+ int max;
+};
+
+struct smu_state_validation_block {
+ bool single_display_only;
+ bool disallow_on_dc;
+ uint8_t supported_power_levels;
+};
+
+struct smu_uvd_clocks {
+ uint32_t vclk;
+ uint32_t dclk;
+};
+
+/**
+* Structure to hold a SMU Power State.
+*/
+struct smu_power_state {
+ uint32_t id;
+ struct list_head ordered_list;
+ struct list_head all_states_list;
+
+ struct smu_state_classification_block classification;
+ struct smu_state_validation_block validation;
+ struct smu_state_pcie_block pcie;
+ struct smu_state_display_block display;
+ struct smu_state_memroy_block memory;
+ struct smu_temperature_range temperatures;
+ struct smu_state_software_algorithm_block software;
+ struct smu_uvd_clocks uvd_clocks;
+ struct smu_hw_power_state hardware;
+};
+
+enum smu_message_type
+{
+ SMU_MSG_TestMessage = 0,
+ SMU_MSG_GetSmuVersion,
+ SMU_MSG_GetDriverIfVersion,
+ SMU_MSG_SetAllowedFeaturesMaskLow,
+ SMU_MSG_SetAllowedFeaturesMaskHigh,
+ SMU_MSG_EnableAllSmuFeatures,
+ SMU_MSG_DisableAllSmuFeatures,
+ SMU_MSG_EnableSmuFeaturesLow,
+ SMU_MSG_EnableSmuFeaturesHigh,
+ SMU_MSG_DisableSmuFeaturesLow,
+ SMU_MSG_DisableSmuFeaturesHigh,
+ SMU_MSG_GetEnabledSmuFeaturesLow,
+ SMU_MSG_GetEnabledSmuFeaturesHigh,
+ SMU_MSG_SetWorkloadMask,
+ SMU_MSG_SetPptLimit,
+ SMU_MSG_SetDriverDramAddrHigh,
+ SMU_MSG_SetDriverDramAddrLow,
+ SMU_MSG_SetToolsDramAddrHigh,
+ SMU_MSG_SetToolsDramAddrLow,
+ SMU_MSG_TransferTableSmu2Dram,
+ SMU_MSG_TransferTableDram2Smu,
+ SMU_MSG_UseDefaultPPTable,
+ SMU_MSG_UseBackupPPTable,
+ SMU_MSG_RunBtc,
+ SMU_MSG_RequestI2CBus,
+ SMU_MSG_ReleaseI2CBus,
+ SMU_MSG_SetFloorSocVoltage,
+ SMU_MSG_SoftReset,
+ SMU_MSG_StartBacoMonitor,
+ SMU_MSG_CancelBacoMonitor,
+ SMU_MSG_EnterBaco,
+ SMU_MSG_SetSoftMinByFreq,
+ SMU_MSG_SetSoftMaxByFreq,
+ SMU_MSG_SetHardMinByFreq,
+ SMU_MSG_SetHardMaxByFreq,
+ SMU_MSG_GetMinDpmFreq,
+ SMU_MSG_GetMaxDpmFreq,
+ SMU_MSG_GetDpmFreqByIndex,
+ SMU_MSG_GetDpmClockFreq,
+ SMU_MSG_GetSsVoltageByDpm,
+ SMU_MSG_SetMemoryChannelConfig,
+ SMU_MSG_SetGeminiMode,
+ SMU_MSG_SetGeminiApertureHigh,
+ SMU_MSG_SetGeminiApertureLow,
+ SMU_MSG_SetMinLinkDpmByIndex,
+ SMU_MSG_OverridePcieParameters,
+ SMU_MSG_OverDriveSetPercentage,
+ SMU_MSG_SetMinDeepSleepDcefclk,
+ SMU_MSG_ReenableAcDcInterrupt,
+ SMU_MSG_NotifyPowerSource,
+ SMU_MSG_SetUclkFastSwitch,
+ SMU_MSG_SetUclkDownHyst,
+ SMU_MSG_GfxDeviceDriverReset,
+ SMU_MSG_GetCurrentRpm,
+ SMU_MSG_SetVideoFps,
+ SMU_MSG_SetTjMax,
+ SMU_MSG_SetFanTemperatureTarget,
+ SMU_MSG_PrepareMp1ForUnload,
+ SMU_MSG_DramLogSetDramAddrHigh,
+ SMU_MSG_DramLogSetDramAddrLow,
+ SMU_MSG_DramLogSetDramSize,
+ SMU_MSG_SetFanMaxRpm,
+ SMU_MSG_SetFanMinPwm,
+ SMU_MSG_ConfigureGfxDidt,
+ SMU_MSG_NumOfDisplays,
+ SMU_MSG_RemoveMargins,
+ SMU_MSG_ReadSerialNumTop32,
+ SMU_MSG_ReadSerialNumBottom32,
+ SMU_MSG_SetSystemVirtualDramAddrHigh,
+ SMU_MSG_SetSystemVirtualDramAddrLow,
+ SMU_MSG_WaflTest,
+ SMU_MSG_SetFclkGfxClkRatio,
+ SMU_MSG_AllowGfxOff,
+ SMU_MSG_DisallowGfxOff,
+ SMU_MSG_GetPptLimit,
+ SMU_MSG_GetDcModeMaxDpmFreq,
+ SMU_MSG_GetDebugData,
+ SMU_MSG_SetXgmiMode,
+ SMU_MSG_RunAfllBtc,
+ SMU_MSG_ExitBaco,
+ SMU_MSG_PrepareMp1ForReset,
+ SMU_MSG_PrepareMp1ForShutdown,
+ SMU_MSG_SetMGpuFanBoostLimitRpm,
+ SMU_MSG_GetAVFSVoltageByDpm,
+ SMU_MSG_MAX_COUNT,
+};
+
+enum smu_memory_pool_size
+{
+ SMU_MEMORY_POOL_SIZE_ZERO = 0,
+ SMU_MEMORY_POOL_SIZE_256_MB = 0x10000000,
+ SMU_MEMORY_POOL_SIZE_512_MB = 0x20000000,
+ SMU_MEMORY_POOL_SIZE_1_GB = 0x40000000,
+ SMU_MEMORY_POOL_SIZE_2_GB = 0x80000000,
+};
+
+#define SMU_TABLE_INIT(tables, table_id, s, a, d) \
+ do { \
+ tables[table_id].size = s; \
+ tables[table_id].align = a; \
+ tables[table_id].domain = d; \
+ } while (0)
+
+struct smu_table {
+ uint64_t size;
+ uint32_t align;
+ uint8_t domain;
+ uint64_t mc_address;
+ void *cpu_addr;
+ struct amdgpu_bo *bo;
+};
+
+enum smu_perf_level_designation {
+ PERF_LEVEL_ACTIVITY,
+ PERF_LEVEL_POWER_CONTAINMENT,
+};
+
+struct smu_performance_level {
+ uint32_t core_clock;
+ uint32_t memory_clock;
+ uint32_t vddc;
+ uint32_t vddci;
+ uint32_t non_local_mem_freq;
+ uint32_t non_local_mem_width;
+};
+
+struct smu_clock_info {
+ uint32_t min_mem_clk;
+ uint32_t max_mem_clk;
+ uint32_t min_eng_clk;
+ uint32_t max_eng_clk;
+ uint32_t min_bus_bandwidth;
+ uint32_t max_bus_bandwidth;
+};
+
+struct smu_bios_boot_up_values
+{
+ uint32_t revision;
+ uint32_t gfxclk;
+ uint32_t uclk;
+ uint32_t socclk;
+ uint32_t dcefclk;
+ uint32_t eclk;
+ uint32_t vclk;
+ uint32_t dclk;
+ uint16_t vddc;
+ uint16_t vddci;
+ uint16_t mvddc;
+ uint16_t vdd_gfx;
+ uint8_t cooling_id;
+ uint32_t pp_table_id;
+};
+
+struct smu_table_context
+{
+ void *power_play_table;
+ uint32_t power_play_table_size;
+ void *hardcode_pptable;
+
+ void *max_sustainable_clocks;
+ struct smu_bios_boot_up_values boot_values;
+ void *driver_pptable;
+ struct smu_table *tables;
+ uint32_t table_count;
+ struct smu_table memory_pool;
+ uint16_t software_shutdown_temp;
+ uint8_t thermal_controller_type;
+ uint16_t TDPODLimit;
+
+ uint8_t *od_feature_capabilities;
+ uint32_t *od_settings_max;
+ uint32_t *od_settings_min;
+ void *overdrive_table;
+ void *od8_settings;
+ bool od_gfxclk_update;
+ bool od_memclk_update;
+};
+
+struct smu_dpm_context {
+ uint32_t dpm_context_size;
+ void *dpm_context;
+ void *golden_dpm_context;
+ bool enable_umd_pstate;
+ enum amd_dpm_forced_level dpm_level;
+ enum amd_dpm_forced_level saved_dpm_level;
+ enum amd_dpm_forced_level requested_dpm_level;
+ struct smu_power_state *dpm_request_power_state;
+ struct smu_power_state *dpm_current_power_state;
+ struct mclock_latency_table *mclk_latency_table;
+};
+
+struct smu_power_context {
+ void *power_context;
+ uint32_t power_context_size;
+};
+
+
+#define SMU_FEATURE_MAX (64)
+struct smu_feature
+{
+ uint32_t feature_num;
+ DECLARE_BITMAP(supported, SMU_FEATURE_MAX);
+ DECLARE_BITMAP(allowed, SMU_FEATURE_MAX);
+ DECLARE_BITMAP(enabled, SMU_FEATURE_MAX);
+ struct mutex mutex;
+};
+
+struct smu_clocks {
+ uint32_t engine_clock;
+ uint32_t memory_clock;
+ uint32_t bus_bandwidth;
+ uint32_t engine_clock_in_sr;
+ uint32_t dcef_clock;
+ uint32_t dcef_clock_in_sr;
+};
+
+#define MAX_REGULAR_DPM_NUM 16
+struct mclk_latency_entries {
+ uint32_t frequency;
+ uint32_t latency;
+};
+struct mclock_latency_table {
+ uint32_t count;
+ struct mclk_latency_entries entries[MAX_REGULAR_DPM_NUM];
+};
+
+#define WORKLOAD_POLICY_MAX 7
+struct smu_context
+{
+ struct amdgpu_device *adev;
+
+ const struct smu_funcs *funcs;
+ const struct pptable_funcs *ppt_funcs;
+ struct mutex mutex;
+ uint64_t pool_size;
+
+ struct smu_table_context smu_table;
+ struct smu_dpm_context smu_dpm;
+ struct smu_power_context smu_power;
+ struct smu_feature smu_feature;
+ struct amd_pp_display_configuration *display_config;
+
+ uint32_t pstate_sclk;
+ uint32_t pstate_mclk;
+
+ bool od_enabled;
+ uint32_t power_limit;
+ uint32_t default_power_limit;
+
+ bool support_power_containment;
+ bool disable_watermark;
+
+#define WATERMARKS_EXIST (1 << 0)
+#define WATERMARKS_LOADED (1 << 1)
+ uint32_t watermarks_bitmap;
+
+ uint32_t workload_mask;
+ uint32_t workload_prority[WORKLOAD_POLICY_MAX];
+ uint32_t workload_setting[WORKLOAD_POLICY_MAX];
+ uint32_t power_profile_mode;
+ uint32_t default_power_profile_mode;
+
+ uint32_t smc_if_version;
+};
+
+struct pptable_funcs {
+ int (*alloc_dpm_context)(struct smu_context *smu);
+ int (*store_powerplay_table)(struct smu_context *smu);
+ int (*check_powerplay_table)(struct smu_context *smu);
+ int (*append_powerplay_table)(struct smu_context *smu);
+ int (*get_smu_msg_index)(struct smu_context *smu, uint32_t index);
+ int (*run_afll_btc)(struct smu_context *smu);
+ int (*get_unallowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
+ enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu);
+ int (*set_default_dpm_table)(struct smu_context *smu);
+ int (*set_power_state)(struct smu_context *smu);
+ int (*populate_umd_state_clk)(struct smu_context *smu);
+ int (*print_clk_levels)(struct smu_context *smu, enum pp_clock_type type, char *buf);
+ int (*force_clk_levels)(struct smu_context *smu, enum pp_clock_type type, uint32_t mask);
+ int (*set_default_od8_settings)(struct smu_context *smu);
+ int (*update_specified_od8_value)(struct smu_context *smu,
+ uint32_t index,
+ uint32_t value);
+ int (*get_od_percentage)(struct smu_context *smu, enum pp_clock_type type);
+ int (*set_od_percentage)(struct smu_context *smu,
+ enum pp_clock_type type,
+ uint32_t value);
+ int (*od_edit_dpm_table)(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size);
+ int (*get_clock_by_type_with_latency)(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct
+ pp_clock_levels_with_latency
+ *clocks);
+ int (*get_clock_by_type_with_voltage)(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct
+ pp_clock_levels_with_voltage
+ *clocks);
+ int (*get_power_profile_mode)(struct smu_context *smu, char *buf);
+ int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size);
+ enum amd_dpm_forced_level (*get_performance_level)(struct smu_context *smu);
+ int (*force_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level);
+ int (*pre_display_config_changed)(struct smu_context *smu);
+ int (*display_config_changed)(struct smu_context *smu);
+ int (*apply_clocks_adjust_rules)(struct smu_context *smu);
+ int (*notify_smc_dispaly_config)(struct smu_context *smu);
+ int (*force_dpm_limit_value)(struct smu_context *smu, bool highest);
+ int (*unforce_dpm_levels)(struct smu_context *smu);
+ int (*upload_dpm_level)(struct smu_context *smu, bool max,
+ uint32_t feature_mask);
+ int (*get_profiling_clk_mask)(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ uint32_t *sclk_mask,
+ uint32_t *mclk_mask,
+ uint32_t *soc_mask);
+ int (*set_cpu_power_state)(struct smu_context *smu);
+};
+
+struct smu_funcs
+{
+ int (*init_microcode)(struct smu_context *smu);
+ int (*init_smc_tables)(struct smu_context *smu);
+ int (*fini_smc_tables)(struct smu_context *smu);
+ int (*init_power)(struct smu_context *smu);
+ int (*fini_power)(struct smu_context *smu);
+ int (*load_microcode)(struct smu_context *smu);
+ int (*check_fw_status)(struct smu_context *smu);
+ int (*read_pptable_from_vbios)(struct smu_context *smu);
+ int (*get_vbios_bootup_values)(struct smu_context *smu);
+ int (*get_clk_info_from_vbios)(struct smu_context *smu);
+ int (*check_pptable)(struct smu_context *smu);
+ int (*parse_pptable)(struct smu_context *smu);
+ int (*populate_smc_pptable)(struct smu_context *smu);
+ int (*check_fw_version)(struct smu_context *smu);
+ int (*write_pptable)(struct smu_context *smu);
+ int (*set_min_dcef_deep_sleep)(struct smu_context *smu);
+ int (*set_tool_table_location)(struct smu_context *smu);
+ int (*notify_memory_pool_location)(struct smu_context *smu);
+ int (*write_watermarks_table)(struct smu_context *smu);
+ int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
+ int (*system_features_control)(struct smu_context *smu, bool en);
+ int (*send_smc_msg)(struct smu_context *smu, uint16_t msg);
+ int (*send_smc_msg_with_param)(struct smu_context *smu, uint16_t msg, uint32_t param);
+ int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg);
+ int (*init_display)(struct smu_context *smu);
+ int (*set_allowed_mask)(struct smu_context *smu);
+ int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
+ bool (*is_dpm_running)(struct smu_context *smu);
+ int (*update_feature_enable_state)(struct smu_context *smu, uint32_t feature_id, bool enabled);
+ int (*notify_display_change)(struct smu_context *smu);
+ int (*get_power_limit)(struct smu_context *smu, uint32_t *limit, bool def);
+ int (*set_power_limit)(struct smu_context *smu, uint32_t n);
+ int (*get_current_clk_freq)(struct smu_context *smu, uint32_t clk_id, uint32_t *value);
+ int (*init_max_sustainable_clocks)(struct smu_context *smu);
+ int (*start_thermal_control)(struct smu_context *smu);
+ int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor,
+ void *data, uint32_t *size);
+ int (*set_deep_sleep_dcefclk)(struct smu_context *smu, uint32_t clk);
+ int (*set_active_display_count)(struct smu_context *smu, uint32_t count);
+ int (*store_cc6_data)(struct smu_context *smu, uint32_t separation_time,
+ bool cc6_disable, bool pstate_disable,
+ bool pstate_switch_disable);
+ int (*get_clock_by_type)(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks);
+ int (*get_max_high_clocks)(struct smu_context *smu,
+ struct amd_pp_simple_clock_info *clocks);
+ int (*display_clock_voltage_request)(struct smu_context *smu, struct
+ pp_display_clock_request
+ *clock_req);
+ int (*get_dal_power_level)(struct smu_context *smu,
+ struct amd_pp_simple_clock_info *clocks);
+ int (*get_perf_level)(struct smu_context *smu,
+ enum smu_perf_level_designation designation,
+ struct smu_performance_level *level);
+ int (*get_current_shallow_sleep_clocks)(struct smu_context *smu,
+ struct smu_clock_info *clocks);
+ int (*notify_smu_enable_pwe)(struct smu_context *smu);
+ int (*set_watermarks_for_clock_ranges)(struct smu_context *smu,
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
+ int (*set_od8_default_settings)(struct smu_context *smu,
+ bool initialize);
+ int (*conv_power_profile_to_pplib_workload)(int power_profile);
+ int (*get_power_profile_mode)(struct smu_context *smu, char *buf);
+ int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size);
+ int (*update_od8_settings)(struct smu_context *smu,
+ uint32_t index,
+ uint32_t value);
+ int (*dpm_set_uvd_enable)(struct smu_context *smu, bool enable);
+ int (*dpm_set_vce_enable)(struct smu_context *smu, bool enable);
+ uint32_t (*get_sclk)(struct smu_context *smu, bool low);
+ uint32_t (*get_mclk)(struct smu_context *smu, bool low);
+ int (*get_current_rpm)(struct smu_context *smu, uint32_t *speed);
+ uint32_t (*get_fan_control_mode)(struct smu_context *smu);
+ int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
+ int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed);
+ int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed);
+ int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
+ int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
+
+};
+
+#define smu_init_microcode(smu) \
+ ((smu)->funcs->init_microcode ? (smu)->funcs->init_microcode((smu)) : 0)
+#define smu_init_smc_tables(smu) \
+ ((smu)->funcs->init_smc_tables ? (smu)->funcs->init_smc_tables((smu)) : 0)
+#define smu_fini_smc_tables(smu) \
+ ((smu)->funcs->fini_smc_tables ? (smu)->funcs->fini_smc_tables((smu)) : 0)
+#define smu_init_power(smu) \
+ ((smu)->funcs->init_power ? (smu)->funcs->init_power((smu)) : 0)
+#define smu_fini_power(smu) \
+ ((smu)->funcs->fini_power ? (smu)->funcs->fini_power((smu)) : 0)
+#define smu_load_microcode(smu) \
+ ((smu)->funcs->load_microcode ? (smu)->funcs->load_microcode((smu)) : 0)
+#define smu_check_fw_status(smu) \
+ ((smu)->funcs->check_fw_status ? (smu)->funcs->check_fw_status((smu)) : 0)
+#define smu_read_pptable_from_vbios(smu) \
+ ((smu)->funcs->read_pptable_from_vbios ? (smu)->funcs->read_pptable_from_vbios((smu)) : 0)
+#define smu_get_vbios_bootup_values(smu) \
+ ((smu)->funcs->get_vbios_bootup_values ? (smu)->funcs->get_vbios_bootup_values((smu)) : 0)
+#define smu_get_clk_info_from_vbios(smu) \
+ ((smu)->funcs->get_clk_info_from_vbios ? (smu)->funcs->get_clk_info_from_vbios((smu)) : 0)
+#define smu_check_pptable(smu) \
+ ((smu)->funcs->check_pptable ? (smu)->funcs->check_pptable((smu)) : 0)
+#define smu_parse_pptable(smu) \
+ ((smu)->funcs->parse_pptable ? (smu)->funcs->parse_pptable((smu)) : 0)
+#define smu_populate_smc_pptable(smu) \
+ ((smu)->funcs->populate_smc_pptable ? (smu)->funcs->populate_smc_pptable((smu)) : 0)
+#define smu_check_fw_version(smu) \
+ ((smu)->funcs->check_fw_version ? (smu)->funcs->check_fw_version((smu)) : 0)
+#define smu_write_pptable(smu) \
+ ((smu)->funcs->write_pptable ? (smu)->funcs->write_pptable((smu)) : 0)
+#define smu_set_min_dcef_deep_sleep(smu) \
+ ((smu)->funcs->set_min_dcef_deep_sleep ? (smu)->funcs->set_min_dcef_deep_sleep((smu)) : 0)
+#define smu_set_tool_table_location(smu) \
+ ((smu)->funcs->set_tool_table_location ? (smu)->funcs->set_tool_table_location((smu)) : 0)
+#define smu_notify_memory_pool_location(smu) \
+ ((smu)->funcs->notify_memory_pool_location ? (smu)->funcs->notify_memory_pool_location((smu)) : 0)
+#define smu_write_watermarks_table(smu) \
+ ((smu)->funcs->write_watermarks_table ? (smu)->funcs->write_watermarks_table((smu)) : 0)
+#define smu_set_last_dcef_min_deep_sleep_clk(smu) \
+ ((smu)->funcs->set_last_dcef_min_deep_sleep_clk ? (smu)->funcs->set_last_dcef_min_deep_sleep_clk((smu)) : 0)
+#define smu_system_features_control(smu, en) \
+ ((smu)->funcs->system_features_control ? (smu)->funcs->system_features_control((smu), (en)) : 0)
+#define smu_init_max_sustainable_clocks(smu) \
+ ((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0)
+#define smu_set_od8_default_settings(smu, initialize) \
+ ((smu)->funcs->set_od8_default_settings ? (smu)->funcs->set_od8_default_settings((smu), (initialize)) : 0)
+#define smu_update_od8_settings(smu, index, value) \
+ ((smu)->funcs->update_od8_settings ? (smu)->funcs->update_od8_settings((smu), (index), (value)) : 0)
+#define smu_get_current_rpm(smu, speed) \
+ ((smu)->funcs->get_current_rpm ? (smu)->funcs->get_current_rpm((smu), (speed)) : 0)
+#define smu_set_fan_speed_rpm(smu, speed) \
+ ((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0)
+#define smu_send_smc_msg(smu, msg) \
+ ((smu)->funcs->send_smc_msg? (smu)->funcs->send_smc_msg((smu), (msg)) : 0)
+#define smu_send_smc_msg_with_param(smu, msg, param) \
+ ((smu)->funcs->send_smc_msg_with_param? (smu)->funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0)
+#define smu_read_smc_arg(smu, arg) \
+ ((smu)->funcs->read_smc_arg? (smu)->funcs->read_smc_arg((smu), (arg)) : 0)
+#define smu_alloc_dpm_context(smu) \
+ ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0)
+#define smu_init_display(smu) \
+ ((smu)->funcs->init_display ? (smu)->funcs->init_display((smu)) : 0)
+#define smu_feature_set_allowed_mask(smu) \
+ ((smu)->funcs->set_allowed_mask? (smu)->funcs->set_allowed_mask((smu)) : 0)
+#define smu_feature_get_enabled_mask(smu, mask, num) \
+ ((smu)->funcs->get_enabled_mask? (smu)->funcs->get_enabled_mask((smu), (mask), (num)) : 0)
+#define smu_is_dpm_running(smu) \
+ ((smu)->funcs->is_dpm_running ? (smu)->funcs->is_dpm_running((smu)) : 0)
+#define smu_feature_update_enable_state(smu, feature_id, enabled) \
+ ((smu)->funcs->update_feature_enable_state? (smu)->funcs->update_feature_enable_state((smu), (feature_id), (enabled)) : 0)
+#define smu_notify_display_change(smu) \
+ ((smu)->funcs->notify_display_change? (smu)->funcs->notify_display_change((smu)) : 0)
+#define smu_store_powerplay_table(smu) \
+ ((smu)->ppt_funcs->store_powerplay_table ? (smu)->ppt_funcs->store_powerplay_table((smu)) : 0)
+#define smu_check_powerplay_table(smu) \
+ ((smu)->ppt_funcs->check_powerplay_table ? (smu)->ppt_funcs->check_powerplay_table((smu)) : 0)
+#define smu_append_powerplay_table(smu) \
+ ((smu)->ppt_funcs->append_powerplay_table ? (smu)->ppt_funcs->append_powerplay_table((smu)) : 0)
+#define smu_set_default_dpm_table(smu) \
+ ((smu)->ppt_funcs->set_default_dpm_table ? (smu)->ppt_funcs->set_default_dpm_table((smu)) : 0)
+#define smu_populate_umd_state_clk(smu) \
+ ((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0)
+#define smu_set_default_od8_settings(smu) \
+ ((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0)
+#define smu_update_specified_od8_value(smu, index, value) \
+ ((smu)->ppt_funcs->update_specified_od8_value ? (smu)->ppt_funcs->update_specified_od8_value((smu), (index), (value)) : 0)
+#define smu_get_power_limit(smu, limit, def) \
+ ((smu)->funcs->get_power_limit ? (smu)->funcs->get_power_limit((smu), (limit), (def)) : 0)
+#define smu_set_power_limit(smu, limit) \
+ ((smu)->funcs->set_power_limit ? (smu)->funcs->set_power_limit((smu), (limit)) : 0)
+#define smu_get_current_clk_freq(smu, clk_id, value) \
+ ((smu)->funcs->get_current_clk_freq? (smu)->funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0)
+#define smu_print_clk_levels(smu, type, buf) \
+ ((smu)->ppt_funcs->print_clk_levels ? (smu)->ppt_funcs->print_clk_levels((smu), (type), (buf)) : 0)
+#define smu_force_clk_levels(smu, type, level) \
+ ((smu)->ppt_funcs->force_clk_levels ? (smu)->ppt_funcs->force_clk_levels((smu), (type), (level)) : 0)
+#define smu_get_od_percentage(smu, type) \
+ ((smu)->ppt_funcs->get_od_percentage ? (smu)->ppt_funcs->get_od_percentage((smu), (type)) : 0)
+#define smu_set_od_percentage(smu, type, value) \
+ ((smu)->ppt_funcs->set_od_percentage ? (smu)->ppt_funcs->set_od_percentage((smu), (type), (value)) : 0)
+#define smu_od_edit_dpm_table(smu, type, input, size) \
+ ((smu)->ppt_funcs->od_edit_dpm_table ? (smu)->ppt_funcs->od_edit_dpm_table((smu), (type), (input), (size)) : 0)
+#define smu_start_thermal_control(smu) \
+ ((smu)->funcs->start_thermal_control? (smu)->funcs->start_thermal_control((smu)) : 0)
+#define smu_read_sensor(smu, sensor, data, size) \
+ ((smu)->funcs->read_sensor? (smu)->funcs->read_sensor((smu), (sensor), (data), (size)) : 0)
+#define smu_get_power_profile_mode(smu, buf) \
+ ((smu)->funcs->get_power_profile_mode ? (smu)->funcs->get_power_profile_mode((smu), buf) : 0)
+#define smu_set_power_profile_mode(smu, param, param_size) \
+ ((smu)->funcs->set_power_profile_mode ? (smu)->funcs->set_power_profile_mode((smu), (param), (param_size)) : 0)
+#define smu_get_performance_level(smu) \
+ ((smu)->ppt_funcs->get_performance_level ? (smu)->ppt_funcs->get_performance_level((smu)) : 0)
+#define smu_force_performance_level(smu, level) \
+ ((smu)->ppt_funcs->force_performance_level ? (smu)->ppt_funcs->force_performance_level((smu), (level)) : 0)
+#define smu_pre_display_config_changed(smu) \
+ ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0)
+#define smu_display_config_changed(smu) \
+ ((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0)
+#define smu_apply_clocks_adjust_rules(smu) \
+ ((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0)
+#define smu_notify_smc_dispaly_config(smu) \
+ ((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0)
+#define smu_force_dpm_limit_value(smu, highest) \
+ ((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0)
+#define smu_unforce_dpm_levels(smu) \
+ ((smu)->ppt_funcs->unforce_dpm_levels ? (smu)->ppt_funcs->unforce_dpm_levels((smu)) : 0)
+#define smu_upload_dpm_level(smu, max, feature_mask) \
+ ((smu)->ppt_funcs->upload_dpm_level ? (smu)->ppt_funcs->upload_dpm_level((smu), (max), (feature_mask)) : 0)
+#define smu_get_profiling_clk_mask(smu, level, sclk_mask, mclk_mask, soc_mask) \
+ ((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0)
+#define smu_set_cpu_power_state(smu) \
+ ((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state((smu)) : 0)
+#define smu_get_fan_control_mode(smu) \
+ ((smu)->funcs->get_fan_control_mode ? (smu)->funcs->get_fan_control_mode((smu)) : 0)
+#define smu_set_fan_control_mode(smu, value) \
+ ((smu)->funcs->set_fan_control_mode ? (smu)->funcs->set_fan_control_mode((smu), (value)) : 0)
+#define smu_get_fan_speed_percent(smu, speed) \
+ ((smu)->funcs->get_fan_speed_percent ? (smu)->funcs->get_fan_speed_percent((smu), (speed)) : 0)
+#define smu_set_fan_speed_percent(smu, speed) \
+ ((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0)
+
+#define smu_msg_get_index(smu, msg) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL)
+#define smu_run_afll_btc(smu) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_afll_btc? (smu)->ppt_funcs->run_afll_btc((smu)) : 0) : 0)
+#define smu_get_unallowed_feature_mask(smu, feature_mask, num) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_unallowed_feature_mask? (smu)->ppt_funcs->get_unallowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0)
+#define smu_set_deep_sleep_dcefclk(smu, clk) \
+ ((smu)->funcs->set_deep_sleep_dcefclk ? (smu)->funcs->set_deep_sleep_dcefclk((smu), (clk)) : 0)
+#define smu_set_active_display_count(smu, count) \
+ ((smu)->funcs->set_active_display_count ? (smu)->funcs->set_active_display_count((smu), (count)) : 0)
+#define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) \
+ ((smu)->funcs->store_cc6_data ? (smu)->funcs->store_cc6_data((smu), (st), (cc6_dis), (pst_dis), (pst_sw_dis)) : 0)
+#define smu_get_clock_by_type(smu, type, clocks) \
+ ((smu)->funcs->get_clock_by_type ? (smu)->funcs->get_clock_by_type((smu), (type), (clocks)) : 0)
+#define smu_get_max_high_clocks(smu, clocks) \
+ ((smu)->funcs->get_max_high_clocks ? (smu)->funcs->get_max_high_clocks((smu), (clocks)) : 0)
+#define smu_get_clock_by_type_with_latency(smu, type, clocks) \
+ ((smu)->ppt_funcs->get_clock_by_type_with_latency ? (smu)->ppt_funcs->get_clock_by_type_with_latency((smu), (type), (clocks)) : 0)
+#define smu_get_clock_by_type_with_voltage(smu, type, clocks) \
+ ((smu)->ppt_funcs->get_clock_by_type_with_voltage ? (smu)->ppt_funcs->get_clock_by_type_with_voltage((smu), (type), (clocks)) : 0)
+#define smu_display_clock_voltage_request(smu, clock_req) \
+ ((smu)->funcs->display_clock_voltage_request ? (smu)->funcs->display_clock_voltage_request((smu), (clock_req)) : 0)
+#define smu_get_dal_power_level(smu, clocks) \
+ ((smu)->funcs->get_dal_power_level ? (smu)->funcs->get_dal_power_level((smu), (clocks)) : 0)
+#define smu_get_perf_level(smu, designation, level) \
+ ((smu)->funcs->get_perf_level ? (smu)->funcs->get_perf_level((smu), (designation), (level)) : 0)
+#define smu_get_current_shallow_sleep_clocks(smu, clocks) \
+ ((smu)->funcs->get_current_shallow_sleep_clocks ? (smu)->funcs->get_current_shallow_sleep_clocks((smu), (clocks)) : 0)
+#define smu_notify_smu_enable_pwe(smu) \
+ ((smu)->funcs->notify_smu_enable_pwe ? (smu)->funcs->notify_smu_enable_pwe((smu)) : 0)
+#define smu_set_watermarks_for_clock_ranges(smu, clock_ranges) \
+ ((smu)->funcs->set_watermarks_for_clock_ranges ? (smu)->funcs->set_watermarks_for_clock_ranges((smu), (clock_ranges)) : 0)
+#define smu_dpm_set_uvd_enable(smu, enable) \
+ ((smu)->funcs->dpm_set_uvd_enable ? (smu)->funcs->dpm_set_uvd_enable((smu), (enable)) : 0)
+#define smu_dpm_set_vce_enable(smu, enable) \
+ ((smu)->funcs->dpm_set_vce_enable ? (smu)->funcs->dpm_set_vce_enable((smu), (enable)) : 0)
+#define smu_get_sclk(smu, low) \
+ ((smu)->funcs->get_sclk ? (smu)->funcs->get_sclk((smu), (low)) : 0)
+#define smu_get_mclk(smu, low) \
+ ((smu)->funcs->get_mclk ? (smu)->funcs->get_mclk((smu), (low)) : 0)
+#define smu_set_xgmi_pstate(smu, pstate) \
+ ((smu)->funcs->set_xgmi_pstate ? (smu)->funcs->set_xgmi_pstate((smu), (pstate)) : 0)
+
+
+extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
+ uint16_t *size, uint8_t *frev, uint8_t *crev,
+ uint8_t **addr);
+
+extern const struct amd_ip_funcs smu_ip_funcs;
+
+extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
+extern int smu_feature_init_dpm(struct smu_context *smu);
+
+extern int smu_feature_is_enabled(struct smu_context *smu, int feature_id);
+extern int smu_feature_set_enabled(struct smu_context *smu, int feature_id, bool enable);
+extern int smu_feature_is_supported(struct smu_context *smu, int feature_id);
+extern int smu_feature_set_supported(struct smu_context *smu, int feature_id, bool enable);
+
+int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16_t exarg,
+ void *table_data, bool drv2smu);
+#define smu_update_table(smu, table_id, table_data, drv2smu) \
+ smu_update_table_with_arg((smu), (table_id), 0, (table_data), (drv2smu))
+
+bool is_support_sw_smu(struct amdgpu_device *adev);
+int smu_reset(struct smu_context *smu);
+int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
+ void *data, uint32_t *size);
+int smu_sys_get_pp_table(struct smu_context *smu, void **table);
+int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size);
+int smu_get_power_num_states(struct smu_context *smu, struct pp_states_info *state_info);
+enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu);
+
+/* smu to display interface */
+extern int smu_display_configuration_change(struct smu_context *smu, const
+ struct amd_pp_display_configuration
+ *display_config);
+extern int smu_get_current_clocks(struct smu_context *smu,
+ struct amd_pp_clock_info *clocks);
+extern int smu_dpm_set_power_gate(struct smu_context *smu,uint32_t block_type, bool gate);
+extern int smu_handle_task(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ enum amd_pp_task task_id);
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
index a2991fa2e6f8..90879e4092a3 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
@@ -85,7 +85,6 @@
#define PPSMC_MSG_SetRccPfcPmeRestoreRegister 0x36
#define PPSMC_Message_Count 0x37
-
typedef uint16_t PPSMC_Result;
typedef int PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu10.h b/drivers/gpu/drm/amd/powerplay/inc/smu10.h
index 9e837a5014c5..b96520528240 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu10.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu10.h
@@ -136,12 +136,14 @@
#define FEATURE_CORE_CSTATES_MASK (1 << FEATURE_CORE_CSTATES_BIT)
/* Workload bits */
-#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0
-#define WORKLOAD_PPLIB_VIDEO_BIT 2
-#define WORKLOAD_PPLIB_VR_BIT 3
-#define WORKLOAD_PPLIB_COMPUTE_BIT 4
-#define WORKLOAD_PPLIB_CUSTOM_BIT 5
-#define WORKLOAD_PPLIB_COUNT 6
+#define WORKLOAD_DEFAULT_BIT 0
+#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1
+#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2
+#define WORKLOAD_PPLIB_VIDEO_BIT 3
+#define WORKLOAD_PPLIB_VR_BIT 4
+#define WORKLOAD_PPLIB_COMPUTE_BIT 5
+#define WORKLOAD_PPLIB_CUSTOM_BIT 6
+#define WORKLOAD_PPLIB_COUNT 7
typedef struct {
/* MP1_EXT_SCRATCH0 */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
new file mode 100644
index 000000000000..aa8d81f4111e
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU_V11_0_H__
+#define __SMU_V11_0_H__
+
+#include "amdgpu_smu.h"
+
+/* MP Apertures */
+#define MP0_Public 0x03800000
+#define MP0_SRAM 0x03900000
+#define MP1_Public 0x03b00000
+#define MP1_SRAM 0x03c00004
+
+/* address block */
+#define smnMP1_FIRMWARE_FLAGS 0x3010024
+#define smnMP0_FW_INTF 0x30101c0
+#define smnMP1_PUB_CTRL 0x3010b14
+
+struct smu_11_0_max_sustainable_clocks {
+ uint32_t display_clock;
+ uint32_t phy_clock;
+ uint32_t pixel_clock;
+ uint32_t uclock;
+ uint32_t dcef_clock;
+ uint32_t soc_clock;
+};
+
+struct smu_11_0_dpm_table {
+ uint32_t min; /* MHz */
+ uint32_t max; /* MHz */
+};
+
+struct smu_11_0_dpm_tables {
+ struct smu_11_0_dpm_table soc_table;
+ struct smu_11_0_dpm_table gfx_table;
+ struct smu_11_0_dpm_table uclk_table;
+ struct smu_11_0_dpm_table eclk_table;
+ struct smu_11_0_dpm_table vclk_table;
+ struct smu_11_0_dpm_table dclk_table;
+ struct smu_11_0_dpm_table dcef_table;
+ struct smu_11_0_dpm_table pixel_table;
+ struct smu_11_0_dpm_table display_table;
+ struct smu_11_0_dpm_table phy_table;
+ struct smu_11_0_dpm_table fclk_table;
+};
+
+struct smu_11_0_dpm_context {
+ struct smu_11_0_dpm_tables dpm_tables;
+ uint32_t workload_policy_mask;
+ uint32_t dcef_min_ds_clk;
+};
+
+enum smu_11_0_power_state {
+ SMU_11_0_POWER_STATE__D0 = 0,
+ SMU_11_0_POWER_STATE__D1,
+ SMU_11_0_POWER_STATE__D3, /* Sleep*/
+ SMU_11_0_POWER_STATE__D4, /* Hibernate*/
+ SMU_11_0_POWER_STATE__D5, /* Power off*/
+};
+
+struct smu_11_0_power_context {
+ uint32_t power_source;
+ uint8_t in_power_limit_boost_mode;
+ enum smu_11_0_power_state power_state;
+};
+
+void smu_v11_0_set_smu_funcs(struct smu_context *smu);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h
new file mode 100644
index 000000000000..f466f624ad32
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU_V11_0_PPSMC_H
+#define SMU_V11_0_PPSMC_H
+
+// SMU Response Codes:
+#define PPSMC_Result_OK 0x1
+#define PPSMC_Result_Failed 0xFF
+#define PPSMC_Result_UnknownCmd 0xFE
+#define PPSMC_Result_CmdRejectedPrereq 0xFD
+#define PPSMC_Result_CmdRejectedBusy 0xFC
+
+// Message Definitions:
+// BASIC
+#define PPSMC_MSG_TestMessage 0x1
+#define PPSMC_MSG_GetSmuVersion 0x2
+#define PPSMC_MSG_GetDriverIfVersion 0x3
+#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4
+#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5
+#define PPSMC_MSG_EnableAllSmuFeatures 0x6
+#define PPSMC_MSG_DisableAllSmuFeatures 0x7
+#define PPSMC_MSG_EnableSmuFeaturesLow 0x8
+#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9
+#define PPSMC_MSG_DisableSmuFeaturesLow 0xA
+#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB
+#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xC
+#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xD
+#define PPSMC_MSG_SetDriverDramAddrHigh 0xE
+#define PPSMC_MSG_SetDriverDramAddrLow 0xF
+#define PPSMC_MSG_SetToolsDramAddrHigh 0x10
+#define PPSMC_MSG_SetToolsDramAddrLow 0x11
+#define PPSMC_MSG_TransferTableSmu2Dram 0x12
+#define PPSMC_MSG_TransferTableDram2Smu 0x13
+#define PPSMC_MSG_UseDefaultPPTable 0x14
+#define PPSMC_MSG_UseBackupPPTable 0x15
+#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x16
+#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x17
+
+//BACO/BAMACO/BOMACO
+#define PPSMC_MSG_EnterBaco 0x18
+#define PPSMC_MSG_ExitBaco 0x19
+
+//DPM
+#define PPSMC_MSG_SetSoftMinByFreq 0x1A
+#define PPSMC_MSG_SetSoftMaxByFreq 0x1B
+#define PPSMC_MSG_SetHardMinByFreq 0x1C
+#define PPSMC_MSG_SetHardMaxByFreq 0x1D
+#define PPSMC_MSG_GetMinDpmFreq 0x1E
+#define PPSMC_MSG_GetMaxDpmFreq 0x1F
+#define PPSMC_MSG_GetDpmFreqByIndex 0x20
+#define PPSMC_MSG_OverridePcieParameters 0x21
+#define PPSMC_MSG_SetMinDeepSleepDcefclk 0x22
+#define PPSMC_MSG_SetWorkloadMask 0x23
+#define PPSMC_MSG_SetUclkFastSwitch 0x24
+#define PPSMC_MSG_GetAvfsVoltageByDpm 0x25
+#define PPSMC_MSG_SetVideoFps 0x26
+#define PPSMC_MSG_GetDcModeMaxDpmFreq 0x27
+
+//Power Gating
+#define PPSMC_MSG_AllowGfxOff 0x28
+#define PPSMC_MSG_DisallowGfxOff 0x29
+#define PPSMC_MSG_PowerUpVcn 0x2A
+#define PPSMC_MSG_PowerDownVcn 0x2B
+#define PPSMC_MSG_PowerUpJpeg 0x2C
+#define PPSMC_MSG_PowerDownJpeg 0x2D
+//reserve 0x2A to 0x2F for PG harvesting TBD
+
+//I2C Interface
+#define PPSMC_RequestI2cTransaction 0x30
+
+//Resets
+#define PPSMC_MSG_SoftReset 0x31 //FIXME Need confirmation from driver
+#define PPSMC_MSG_PrepareMp1ForUnload 0x32
+#define PPSMC_MSG_PrepareMp1ForReset 0x33
+#define PPSMC_MSG_PrepareMp1ForShutdown 0x34
+
+//ACDC Power Source
+#define PPSMC_MSG_SetPptLimit 0x35
+#define PPSMC_MSG_GetPptLimit 0x36
+#define PPSMC_MSG_ReenableAcDcInterrupt 0x37
+#define PPSMC_MSG_NotifyPowerSource 0x38
+//#define PPSMC_MSG_GfxDeviceDriverReset 0x39 //FIXME mode1 and 2 resets will go directly go PSP
+
+//BTC
+#define PPSMC_MSG_RunBtc 0x3A
+
+//Debug
+#define PPSMC_MSG_DramLogSetDramAddrHigh 0x3B
+#define PPSMC_MSG_DramLogSetDramAddrLow 0x3C
+#define PPSMC_MSG_DramLogSetDramSize 0x3D
+#define PPSMC_MSG_GetDebugData 0x3E
+
+//Others
+#define PPSMC_MSG_ConfigureGfxDidt 0x3F
+#define PPSMC_MSG_NumOfDisplays 0x40
+
+#define PPSMC_MSG_SetMemoryChannelConfig 0x41
+#define PPSMC_MSG_SetGeminiMode 0x42
+#define PPSMC_MSG_SetGeminiApertureHigh 0x43
+#define PPSMC_MSG_SetGeminiApertureLow 0x44
+
+#define PPSMC_Message_Count 0x45
+
+typedef uint32_t PPSMC_Result;
+typedef uint32_t PPSMC_Msg;
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
new file mode 100644
index 000000000000..92c65b80bde2
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef SMU_11_0_PPTABLE_H
+#define SMU_11_0_PPTABLE_H
+
+
+#define SMU_11_0_TABLE_FORMAT_REVISION 12
+
+//// POWERPLAYTABLE::ulPlatformCaps
+#define SMU_11_0_PP_PLATFORM_CAP_POWERPLAY 0x1
+#define SMU_11_0_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 0x2
+#define SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC 0x4
+#define SMU_11_0_PP_PLATFORM_CAP_BACO 0x8
+#define SMU_11_0_PP_PLATFORM_CAP_MACO 0x10
+#define SMU_11_0_PP_PLATFORM_CAP_SHADOWPSTATE 0x20
+
+// SMU_11_0_PP_THERMALCONTROLLER - Thermal Controller Type
+#define SMU_11_0_PP_THERMALCONTROLLER_NONE 0
+
+#define SMU_11_0_PP_OVERDRIVE_VERSION 0x0800
+#define SMU_11_0_PP_POWERSAVINGCLOCK_VERSION 0x0100
+
+enum SMU_11_0_ODFEATURE_ID {
+ SMU_11_0_ODFEATURE_GFXCLK_LIMITS = 1 << 0, //GFXCLK Limit feature
+ SMU_11_0_ODFEATURE_GFXCLK_CURVE = 1 << 1, //GFXCLK Curve feature
+ SMU_11_0_ODFEATURE_UCLK_MAX = 1 << 2, //UCLK Limit feature
+ SMU_11_0_ODFEATURE_POWER_LIMIT = 1 << 3, //Power Limit feature
+ SMU_11_0_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << 4, //Fan Acoustic RPM feature
+ SMU_11_0_ODFEATURE_FAN_SPEED_MIN = 1 << 5, //Minimum Fan Speed feature
+ SMU_11_0_ODFEATURE_TEMPERATURE_FAN = 1 << 6, //Fan Target Temperature Limit feature
+ SMU_11_0_ODFEATURE_TEMPERATURE_SYSTEM = 1 << 7, //Operating Temperature Limit feature
+ SMU_11_0_ODFEATURE_MEMORY_TIMING_TUNE = 1 << 8, //AC Timing Tuning feature
+ SMU_11_0_ODFEATURE_FAN_ZERO_RPM_CONTROL = 1 << 9, //Zero RPM feature
+ SMU_11_0_ODFEATURE_AUTO_UV_ENGINE = 1 << 10, //Auto Under Volt GFXCLK feature
+ SMU_11_0_ODFEATURE_AUTO_OC_ENGINE = 1 << 11, //Auto Over Clock GFXCLK feature
+ SMU_11_0_ODFEATURE_AUTO_OC_MEMORY = 1 << 12, //Auto Over Clock MCLK feature
+ SMU_11_0_ODFEATURE_FAN_CURVE = 1 << 13, //VICTOR TODO
+ SMU_11_0_ODFEATURE_COUNT = 14,
+};
+#define SMU_11_0_MAX_ODFEATURE 32 //Maximum Number of OD Features
+
+enum SMU_11_0_ODSETTING_ID {
+ SMU_11_0_ODSETTING_GFXCLKFMAX = 0,
+ SMU_11_0_ODSETTING_GFXCLKFMIN,
+ SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
+ SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1,
+ SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2,
+ SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2,
+ SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3,
+ SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3,
+ SMU_11_0_ODSETTING_UCLKFMAX,
+ SMU_11_0_ODSETTING_POWERPERCENTAGE,
+ SMU_11_0_ODSETTING_FANRPMMIN,
+ SMU_11_0_ODSETTING_FANRPMACOUSTICLIMIT,
+ SMU_11_0_ODSETTING_FANTARGETTEMPERATURE,
+ SMU_11_0_ODSETTING_OPERATINGTEMPMAX,
+ SMU_11_0_ODSETTING_ACTIMING,
+ SMU_11_0_ODSETTING_FAN_ZERO_RPM_CONTROL,
+ SMU_11_0_ODSETTING_AUTOUVENGINE,
+ SMU_11_0_ODSETTING_AUTOOCENGINE,
+ SMU_11_0_ODSETTING_AUTOOCMEMORY,
+ SMU_11_0_ODSETTING_COUNT,
+};
+#define SMU_11_0_MAX_ODSETTING 32 //Maximum Number of ODSettings
+
+struct smu_11_0_overdrive_table
+{
+ uint8_t revision; //Revision = SMU_11_0_PP_OVERDRIVE_VERSION
+ uint8_t reserve[3]; //Zero filled field reserved for future use
+ uint32_t feature_count; //Total number of supported features
+ uint32_t setting_count; //Total number of supported settings
+ uint8_t cap[SMU_11_0_MAX_ODFEATURE]; //OD feature support flags
+ uint32_t max[SMU_11_0_MAX_ODSETTING]; //default maximum settings
+ uint32_t min[SMU_11_0_MAX_ODSETTING]; //default minimum settings
+} __attribute__((packed));
+
+enum SMU_11_0_PPCLOCK_ID {
+ SMU_11_0_PPCLOCK_GFXCLK = 0,
+ SMU_11_0_PPCLOCK_VCLK,
+ SMU_11_0_PPCLOCK_DCLK,
+ SMU_11_0_PPCLOCK_ECLK,
+ SMU_11_0_PPCLOCK_SOCCLK,
+ SMU_11_0_PPCLOCK_UCLK,
+ SMU_11_0_PPCLOCK_DCEFCLK,
+ SMU_11_0_PPCLOCK_DISPCLK,
+ SMU_11_0_PPCLOCK_PIXCLK,
+ SMU_11_0_PPCLOCK_PHYCLK,
+ SMU_11_0_PPCLOCK_COUNT,
+};
+#define SMU_11_0_MAX_PPCLOCK 16 //Maximum Number of PP Clocks
+
+struct smu_11_0_power_saving_clock_table
+{
+ uint8_t revision; //Revision = SMU_11_0_PP_POWERSAVINGCLOCK_VERSION
+ uint8_t reserve[3]; //Zero filled field reserved for future use
+ uint32_t count; //power_saving_clock_count = SMU_11_0_PPCLOCK_COUNT
+ uint32_t max[SMU_11_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz
+ uint32_t min[SMU_11_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz
+} __attribute__((packed));
+
+struct smu_11_0_powerplay_table
+{
+ struct atom_common_table_header header;
+ uint8_t table_revision;
+ uint32_t table_size; //Driver portion table size. The offset to smc_pptable including header size
+ uint32_t golden_pp_id;
+ uint32_t golden_revision;
+ uint16_t format_id;
+ uint32_t platform_caps; //POWERPLAYABLE::ulPlatformCaps
+
+ uint8_t thermal_controller_type; //one of SMU_11_0_PP_THERMALCONTROLLER
+
+ uint16_t small_power_limit1;
+ uint16_t small_power_limit2;
+ uint16_t boost_power_limit;
+ uint16_t od_turbo_power_limit; //Power limit setting for Turbo mode in Performance UI Tuning.
+ uint16_t od_power_save_power_limit; //Power limit setting for PowerSave/Optimal mode in Performance UI Tuning.
+ uint16_t software_shutdown_temp;
+
+ uint16_t reserve[6]; //Zero filled field reserved for future use
+
+ struct smu_11_0_power_saving_clock_table power_saving_clock;
+ struct smu_11_0_overdrive_table overdrive_table;
+
+ PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h
+} __attribute__((packed));
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
index 4f63a736ea0e..a0883038f3c3 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
@@ -119,7 +119,8 @@
#define PPSMC_MSG_PrepareMp1ForShutdown 0x5A
#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x5D
#define PPSMC_MSG_GetAVFSVoltageByDpm 0x5F
-#define PPSMC_Message_Count 0x60
+#define PPSMC_MSG_BacoWorkAroundFlushVDCI 0x60
+#define PPSMC_Message_Count 0x61
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
new file mode 100644
index 000000000000..92903a4cc4d8
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -0,0 +1,1977 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "pp_debug.h"
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "atomfirmware.h"
+#include "amdgpu_atomfirmware.h"
+#include "smu_v11_0.h"
+#include "smu11_driver_if.h"
+#include "soc15_common.h"
+#include "atom.h"
+#include "vega20_ppt.h"
+#include "pp_thermal.h"
+
+#include "asic_reg/thm/thm_11_0_2_offset.h"
+#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
+#include "asic_reg/mp/mp_9_0_offset.h"
+#include "asic_reg/mp/mp_9_0_sh_mask.h"
+#include "asic_reg/nbio/nbio_7_4_offset.h"
+#include "asic_reg/smuio/smuio_9_0_offset.h"
+#include "asic_reg/smuio/smuio_9_0_sh_mask.h"
+
+MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
+
+#define SMU11_TOOL_SIZE 0x19000
+#define SMU11_THERMAL_MINIMUM_ALERT_TEMP 0
+#define SMU11_THERMAL_MAXIMUM_ALERT_TEMP 255
+
+#define SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES 1000
+#define SMU11_VOLTAGE_SCALE 4
+
+#define SMC_DPM_FEATURE (FEATURE_DPM_PREFETCHER_MASK | \
+ FEATURE_DPM_GFXCLK_MASK | \
+ FEATURE_DPM_UCLK_MASK | \
+ FEATURE_DPM_SOCCLK_MASK | \
+ FEATURE_DPM_UVD_MASK | \
+ FEATURE_DPM_VCE_MASK | \
+ FEATURE_DPM_MP0CLK_MASK | \
+ FEATURE_DPM_LINK_MASK | \
+ FEATURE_DPM_DCEFCLK_MASK)
+
+static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
+ uint16_t msg)
+{
+ struct amdgpu_device *adev = smu->adev;
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+ return 0;
+}
+
+static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ return 0;
+}
+
+static int smu_v11_0_wait_for_response(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t cur_value, i;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
+ break;
+ udelay(1);
+ }
+
+ /* timeout means wrong logic */
+ if (i == adev->usec_timeout)
+ return -ETIME;
+
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
+}
+
+static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0, index = 0;
+
+ index = smu_msg_get_index(smu, msg);
+ if (index < 0)
+ return index;
+
+ smu_v11_0_wait_for_response(smu);
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+
+ smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
+
+ ret = smu_v11_0_wait_for_response(smu);
+
+ if (ret)
+ pr_err("Failed to send message 0x%x, response 0x%x\n", index,
+ ret);
+
+ return ret;
+
+}
+
+static int
+smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
+ uint32_t param)
+{
+
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0, index = 0;
+
+ index = smu_msg_get_index(smu, msg);
+ if (index < 0)
+ return index;
+
+ ret = smu_v11_0_wait_for_response(smu);
+ if (ret)
+ pr_err("Failed to send message 0x%x, response 0x%x, param 0x%x\n",
+ index, ret, param);
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
+
+ smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
+
+ ret = smu_v11_0_wait_for_response(smu);
+ if (ret)
+ pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
+ index, ret, param);
+
+ return ret;
+}
+
+static int smu_v11_0_init_microcode(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ const char *chip_name;
+ char fw_name[30];
+ int err = 0;
+ const struct smc_firmware_header_v1_0 *hdr;
+ const struct common_firmware_header *header;
+ struct amdgpu_firmware_info *ucode = NULL;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ chip_name = "vega20";
+ break;
+ default:
+ BUG();
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
+
+ err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->pm.fw);
+ if (err)
+ goto out;
+
+ hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ amdgpu_ucode_print_smc_hdr(&hdr->header);
+ adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
+ ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
+ ucode->fw = adev->pm.fw;
+ header = (const struct common_firmware_header *)ucode->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ }
+
+out:
+ if (err) {
+ DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
+ fw_name);
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+ }
+ return err;
+}
+
+static int smu_v11_0_load_microcode(struct smu_context *smu)
+{
+ return 0;
+}
+
+static int smu_v11_0_check_fw_status(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t mp1_fw_flags;
+
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+
+ if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
+ MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
+ return 0;
+
+ return -EIO;
+}
+
+static int smu_v11_0_check_fw_version(struct smu_context *smu)
+{
+ uint32_t smu_version = 0xff;
+ int ret = 0;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
+ if (ret)
+ goto err;
+
+ ret = smu_read_smc_arg(smu, &smu_version);
+ if (ret)
+ goto err;
+
+ if (smu_version != smu->smc_if_version)
+ ret = -EINVAL;
+err:
+ return ret;
+}
+
+static int smu_v11_0_read_pptable_from_vbios(struct smu_context *smu)
+{
+ int ret, index;
+ uint16_t size;
+ uint8_t frev, crev;
+ void *table;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ powerplayinfo);
+
+ ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
+ (uint8_t **)&table);
+ if (ret)
+ return ret;
+
+ if (!smu->smu_table.power_play_table)
+ smu->smu_table.power_play_table = table;
+ if (!smu->smu_table.power_play_table_size)
+ smu->smu_table.power_play_table_size = size;
+
+ return 0;
+}
+
+static int smu_v11_0_init_dpm_context(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+
+ if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0)
+ return -EINVAL;
+
+ return smu_alloc_dpm_context(smu);
+}
+
+static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+
+ if (!smu_dpm->dpm_context || smu_dpm->dpm_context_size == 0)
+ return -EINVAL;
+
+ kfree(smu_dpm->dpm_context);
+ kfree(smu_dpm->golden_dpm_context);
+ kfree(smu_dpm->dpm_current_power_state);
+ kfree(smu_dpm->dpm_request_power_state);
+ smu_dpm->dpm_context = NULL;
+ smu_dpm->golden_dpm_context = NULL;
+ smu_dpm->dpm_context_size = 0;
+ smu_dpm->dpm_current_power_state = NULL;
+ smu_dpm->dpm_request_power_state = NULL;
+
+ return 0;
+}
+
+static int smu_v11_0_init_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = NULL;
+ int ret = 0;
+
+ if (smu_table->tables || smu_table->table_count != 0)
+ return -EINVAL;
+
+ tables = kcalloc(TABLE_COUNT, sizeof(struct smu_table), GFP_KERNEL);
+ if (!tables)
+ return -ENOMEM;
+
+ smu_table->tables = tables;
+ smu_table->table_count = TABLE_COUNT;
+
+ SMU_TABLE_INIT(tables, TABLE_PPTABLE, sizeof(PPTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, TABLE_WATERMARKS, sizeof(Watermarks_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, TABLE_ACTIVITY_MONITOR_COEFF,
+ sizeof(DpmActivityMonitorCoeffInt_t),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM);
+
+ ret = smu_v11_0_init_dpm_context(smu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ int ret = 0;
+
+ if (!smu_table->tables || smu_table->table_count == 0)
+ return -EINVAL;
+
+ kfree(smu_table->tables);
+ smu_table->tables = NULL;
+ smu_table->table_count = 0;
+
+ ret = smu_v11_0_fini_dpm_context(smu);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+static int smu_v11_0_init_power(struct smu_context *smu)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+
+ if (smu_power->power_context || smu_power->power_context_size != 0)
+ return -EINVAL;
+
+ smu_power->power_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
+ GFP_KERNEL);
+ if (!smu_power->power_context)
+ return -ENOMEM;
+ smu_power->power_context_size = sizeof(struct smu_11_0_dpm_context);
+
+ return 0;
+}
+
+static int smu_v11_0_fini_power(struct smu_context *smu)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+
+ if (!smu_power->power_context || smu_power->power_context_size == 0)
+ return -EINVAL;
+
+ kfree(smu_power->power_context);
+ smu_power->power_context = NULL;
+ smu_power->power_context_size = 0;
+
+ return 0;
+}
+
+int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
+{
+ int ret, index;
+ uint16_t size;
+ uint8_t frev, crev;
+ struct atom_common_table_header *header;
+ struct atom_firmware_info_v3_3 *v_3_3;
+ struct atom_firmware_info_v3_1 *v_3_1;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+
+ ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
+ (uint8_t **)&header);
+ if (ret)
+ return ret;
+
+ if (header->format_revision != 3) {
+ pr_err("unknown atom_firmware_info version! for smu11\n");
+ return -EINVAL;
+ }
+
+ switch (header->content_revision) {
+ case 0:
+ case 1:
+ case 2:
+ v_3_1 = (struct atom_firmware_info_v3_1 *)header;
+ smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = 0;
+ break;
+ case 3:
+ default:
+ v_3_3 = (struct atom_firmware_info_v3_3 *)header;
+ smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
+ }
+
+ return 0;
+}
+
+static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
+{
+ int ret, index;
+ struct amdgpu_device *adev = smu->adev;
+ struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
+ struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
+
+ input.clk_id = SMU11_SYSPLL0_SOCCLK_ID;
+ input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
+ index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
+ getsmuclockinfo);
+
+ ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&input);
+ if (ret)
+ return -EINVAL;
+
+ output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
+ smu->smu_table.boot_values.socclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+
+ memset(&input, 0, sizeof(input));
+ input.clk_id = SMU11_SYSPLL0_DCEFCLK_ID;
+ input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
+ index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
+ getsmuclockinfo);
+
+ ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&input);
+ if (ret)
+ return -EINVAL;
+
+ output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
+ smu->smu_table.boot_values.dcefclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+
+ memset(&input, 0, sizeof(input));
+ input.clk_id = SMU11_SYSPLL0_ECLK_ID;
+ input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
+ index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
+ getsmuclockinfo);
+
+ ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&input);
+ if (ret)
+ return -EINVAL;
+
+ output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
+ smu->smu_table.boot_values.eclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+
+ memset(&input, 0, sizeof(input));
+ input.clk_id = SMU11_SYSPLL0_VCLK_ID;
+ input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
+ index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
+ getsmuclockinfo);
+
+ ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&input);
+ if (ret)
+ return -EINVAL;
+
+ output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
+ smu->smu_table.boot_values.vclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+
+ memset(&input, 0, sizeof(input));
+ input.clk_id = SMU11_SYSPLL0_DCLK_ID;
+ input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
+ index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
+ getsmuclockinfo);
+
+ ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&input);
+ if (ret)
+ return -EINVAL;
+
+ output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
+ smu->smu_table.boot_values.dclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+
+ return 0;
+}
+
+static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *memory_pool = &smu_table->memory_pool;
+ int ret = 0;
+ uint64_t address;
+ uint32_t address_low, address_high;
+
+ if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
+ return ret;
+
+ address = (uintptr_t)memory_pool->cpu_addr;
+ address_high = (uint32_t)upper_32_bits(address);
+ address_low = (uint32_t)lower_32_bits(address);
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSystemVirtualDramAddrHigh,
+ address_high);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSystemVirtualDramAddrLow,
+ address_low);
+ if (ret)
+ return ret;
+
+ address = memory_pool->mc_address;
+ address_high = (uint32_t)upper_32_bits(address);
+ address_low = (uint32_t)lower_32_bits(address);
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
+ address_high);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
+ address_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
+ (uint32_t)memory_pool->size);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int smu_v11_0_check_pptable(struct smu_context *smu)
+{
+ int ret;
+
+ ret = smu_check_powerplay_table(smu);
+ return ret;
+}
+
+static int smu_v11_0_parse_pptable(struct smu_context *smu)
+{
+ int ret;
+
+ struct smu_table_context *table_context = &smu->smu_table;
+
+ if (table_context->driver_pptable)
+ return -EINVAL;
+
+ table_context->driver_pptable = kzalloc(sizeof(PPTable_t), GFP_KERNEL);
+
+ if (!table_context->driver_pptable)
+ return -ENOMEM;
+
+ ret = smu_store_powerplay_table(smu);
+ if (ret)
+ return -EINVAL;
+
+ ret = smu_append_powerplay_table(smu);
+
+ return ret;
+}
+
+static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
+{
+ int ret;
+
+ ret = smu_set_default_dpm_table(smu);
+
+ return ret;
+}
+
+static int smu_v11_0_write_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ int ret = 0;
+
+ ret = smu_update_table(smu, TABLE_PPTABLE, table_context->driver_pptable, true);
+
+ return ret;
+}
+
+static int smu_v11_0_write_watermarks_table(struct smu_context *smu)
+{
+ return smu_update_table(smu, TABLE_WATERMARKS,
+ smu->smu_table.tables[TABLE_WATERMARKS].cpu_addr, true);
+}
+
+static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
+{
+ int ret;
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetMinDeepSleepDcefclk, clk);
+ if (ret)
+ pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");
+
+ return ret;
+}
+
+static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+
+ if (!table_context)
+ return -EINVAL;
+
+ return smu_set_deep_sleep_dcefclk(smu,
+ table_context->boot_values.dcefclk / 100);
+}
+
+static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
+{
+ int ret = 0;
+ struct smu_table *tool_table = &smu->smu_table.tables[TABLE_PMSTATUSLOG];
+
+ if (tool_table->mc_address) {
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetToolsDramAddrHigh,
+ upper_32_bits(tool_table->mc_address));
+ if (!ret)
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetToolsDramAddrLow,
+ lower_32_bits(tool_table->mc_address));
+ }
+
+ return ret;
+}
+
+static int smu_v11_0_init_display(struct smu_context *smu)
+{
+ int ret = 0;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
+ return ret;
+}
+
+static int smu_v11_0_update_feature_enable_state(struct smu_context *smu, uint32_t feature_id, bool enabled)
+{
+ uint32_t feature_low = 0, feature_high = 0;
+ int ret = 0;
+
+ if (feature_id >= 0 && feature_id < 31)
+ feature_low = (1 << feature_id);
+ else if (feature_id > 31 && feature_id < 63)
+ feature_high = (1 << feature_id);
+ else
+ return -EINVAL;
+
+ if (enabled) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
+ feature_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
+ feature_high);
+ if (ret)
+ return ret;
+
+ } else {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
+ feature_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
+ feature_high);
+ if (ret)
+ return ret;
+
+ }
+
+ return ret;
+}
+
+static int smu_v11_0_set_allowed_mask(struct smu_context *smu)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+ uint32_t feature_mask[2];
+
+ mutex_lock(&feature->mutex);
+ if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
+ goto failed;
+
+ bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
+ feature_mask[1]);
+ if (ret)
+ goto failed;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
+ feature_mask[0]);
+ if (ret)
+ goto failed;
+
+failed:
+ mutex_unlock(&feature->mutex);
+ return ret;
+}
+
+static int smu_v11_0_get_enabled_mask(struct smu_context *smu,
+ uint32_t *feature_mask, uint32_t num)
+{
+ uint32_t feature_mask_high = 0, feature_mask_low = 0;
+ int ret = 0;
+
+ if (!feature_mask || num < 2)
+ return -EINVAL;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
+ if (ret)
+ return ret;
+ ret = smu_read_smc_arg(smu, &feature_mask_high);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
+ if (ret)
+ return ret;
+ ret = smu_read_smc_arg(smu, &feature_mask_low);
+ if (ret)
+ return ret;
+
+ feature_mask[0] = feature_mask_low;
+ feature_mask[1] = feature_mask_high;
+
+ return ret;
+}
+
+static bool smu_v11_0_is_dpm_running(struct smu_context *smu)
+{
+ int ret = 0;
+ uint32_t feature_mask[2];
+ unsigned long feature_enabled;
+ ret = smu_v11_0_get_enabled_mask(smu, feature_mask, 2);
+ feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
+ ((uint64_t)feature_mask[1] << 32));
+ return !!(feature_enabled & SMC_DPM_FEATURE);
+}
+
+static int smu_v11_0_system_features_control(struct smu_context *smu,
+ bool en)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ uint32_t feature_mask[2];
+ int ret = 0;
+
+ ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
+ SMU_MSG_DisableAllSmuFeatures));
+ if (ret)
+ return ret;
+ ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+ if (ret)
+ return ret;
+
+ bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
+ feature->feature_num);
+ bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
+ feature->feature_num);
+
+ return ret;
+}
+
+static int smu_v11_0_notify_display_change(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT))
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
+
+ return ret;
+}
+
+static int
+smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
+ PPCLK_e clock_select)
+{
+ int ret = 0;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
+ clock_select << 16);
+ if (ret) {
+ pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
+ return ret;
+ }
+
+ ret = smu_read_smc_arg(smu, clock);
+ if (ret)
+ return ret;
+
+ if (*clock != 0)
+ return 0;
+
+ /* if DC limit is zero, return AC limit */
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
+ clock_select << 16);
+ if (ret) {
+ pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
+ return ret;
+ }
+
+ ret = smu_read_smc_arg(smu, clock);
+
+ return ret;
+}
+
+static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
+{
+ struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
+ int ret = 0;
+
+ max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
+ GFP_KERNEL);
+ smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;
+
+ max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
+ max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
+ max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
+ max_sustainable_clocks->display_clock = 0xFFFFFFFF;
+ max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
+ max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
+ ret = smu_v11_0_get_max_sustainable_clock(smu,
+ &(max_sustainable_clocks->uclock),
+ PPCLK_UCLK);
+ if (ret) {
+ pr_err("[%s] failed to get max UCLK from SMC!",
+ __func__);
+ return ret;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
+ ret = smu_v11_0_get_max_sustainable_clock(smu,
+ &(max_sustainable_clocks->soc_clock),
+ PPCLK_SOCCLK);
+ if (ret) {
+ pr_err("[%s] failed to get max SOCCLK from SMC!",
+ __func__);
+ return ret;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
+ ret = smu_v11_0_get_max_sustainable_clock(smu,
+ &(max_sustainable_clocks->dcef_clock),
+ PPCLK_DCEFCLK);
+ if (ret) {
+ pr_err("[%s] failed to get max DCEFCLK from SMC!",
+ __func__);
+ return ret;
+ }
+
+ ret = smu_v11_0_get_max_sustainable_clock(smu,
+ &(max_sustainable_clocks->display_clock),
+ PPCLK_DISPCLK);
+ if (ret) {
+ pr_err("[%s] failed to get max DISPCLK from SMC!",
+ __func__);
+ return ret;
+ }
+ ret = smu_v11_0_get_max_sustainable_clock(smu,
+ &(max_sustainable_clocks->phy_clock),
+ PPCLK_PHYCLK);
+ if (ret) {
+ pr_err("[%s] failed to get max PHYCLK from SMC!",
+ __func__);
+ return ret;
+ }
+ ret = smu_v11_0_get_max_sustainable_clock(smu,
+ &(max_sustainable_clocks->pixel_clock),
+ PPCLK_PIXCLK);
+ if (ret) {
+ pr_err("[%s] failed to get max PIXCLK from SMC!",
+ __func__);
+ return ret;
+ }
+ }
+
+ if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
+ max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
+
+ return 0;
+}
+
+static int smu_v11_0_get_power_limit(struct smu_context *smu,
+ uint32_t *limit,
+ bool get_default)
+{
+ int ret = 0;
+
+ if (get_default) {
+ mutex_lock(&smu->mutex);
+ *limit = smu->default_power_limit;
+ if (smu->od_enabled) {
+ *limit *= (100 + smu->smu_table.TDPODLimit);
+ *limit /= 100;
+ }
+ mutex_unlock(&smu->mutex);
+ } else {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
+ POWER_SOURCE_AC << 16);
+ if (ret) {
+ pr_err("[%s] get PPT limit failed!", __func__);
+ return ret;
+ }
+ smu_read_smc_arg(smu, limit);
+ smu->power_limit = *limit;
+ }
+
+ return ret;
+}
+
+static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
+{
+ uint32_t max_power_limit;
+ int ret = 0;
+
+ if (n == 0)
+ n = smu->default_power_limit;
+
+ max_power_limit = smu->default_power_limit;
+
+ if (smu->od_enabled) {
+ max_power_limit *= (100 + smu->smu_table.TDPODLimit);
+ max_power_limit /= 100;
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_PPT_BIT))
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
+ if (ret) {
+ pr_err("[%s] Set power limit Failed!", __func__);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int smu_v11_0_get_current_clk_freq(struct smu_context *smu, uint32_t clk_id, uint32_t *value)
+{
+ int ret = 0;
+ uint32_t freq;
+
+ if (clk_id >= PPCLK_COUNT || !value)
+ return -EINVAL;
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDpmClockFreq, (clk_id << 16));
+ if (ret)
+ return ret;
+
+ ret = smu_read_smc_arg(smu, &freq);
+ if (ret)
+ return ret;
+
+ freq *= 100;
+ *value = freq;
+
+ return ret;
+}
+
+static int smu_v11_0_get_thermal_range(struct smu_context *smu,
+ struct PP_TemperatureRange *range)
+{
+ memcpy(range, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
+
+ range->max = smu->smu_table.software_shutdown_temp *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ return 0;
+}
+
+static int smu_v11_0_set_thermal_range(struct smu_context *smu,
+ struct PP_TemperatureRange *range)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int low = SMU11_THERMAL_MINIMUM_ALERT_TEMP *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ int high = SMU11_THERMAL_MAXIMUM_ALERT_TEMP *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ uint32_t val;
+
+ if (low < range->min)
+ low = range->min;
+ if (high > range->max)
+ high = range->max;
+
+ if (low > high)
+ return -EINVAL;
+
+ val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
+
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
+
+ return 0;
+}
+
+static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t val = 0;
+
+ val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
+ val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
+ val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
+
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
+
+ return 0;
+}
+
+static int smu_v11_0_set_thermal_fan_table(struct smu_context *smu)
+{
+ int ret;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetFanTemperatureTarget,
+ (uint32_t)pptable->FanTargetTemperature);
+
+ return ret;
+}
+
+static int smu_v11_0_start_thermal_control(struct smu_context *smu)
+{
+ int ret = 0;
+ struct PP_TemperatureRange range;
+ struct amdgpu_device *adev = smu->adev;
+
+ smu_v11_0_get_thermal_range(smu, &range);
+
+ if (smu->smu_table.thermal_controller_type) {
+ ret = smu_v11_0_set_thermal_range(smu, &range);
+ if (ret)
+ return ret;
+
+ ret = smu_v11_0_enable_thermal_alert(smu);
+ if (ret)
+ return ret;
+ ret = smu_v11_0_set_thermal_fan_table(smu);
+ if (ret)
+ return ret;
+ }
+
+ adev->pm.dpm.thermal.min_temp = range.min;
+ adev->pm.dpm.thermal.max_temp = range.max;
+
+ return ret;
+}
+
+static int smu_v11_0_get_current_activity_percent(struct smu_context *smu,
+ uint32_t *value)
+{
+ int ret = 0;
+ SmuMetrics_t metrics;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = smu_update_table(smu, TABLE_SMU_METRICS, (void *)&metrics, false);
+ if (ret)
+ return ret;
+
+ *value = metrics.AverageGfxActivity;
+
+ return 0;
+}
+
+static int smu_v11_0_thermal_get_temperature(struct smu_context *smu, uint32_t *value)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t temp = 0;
+
+ if (!value)
+ return -EINVAL;
+
+ temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS);
+ temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
+ CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
+
+ temp = temp & 0x1ff;
+ temp *= SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ *value = temp;
+
+ return 0;
+}
+
+static int smu_v11_0_get_gpu_power(struct smu_context *smu, uint32_t *value)
+{
+ int ret = 0;
+ SmuMetrics_t metrics;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = smu_update_table(smu, TABLE_SMU_METRICS, (void *)&metrics, false);
+ if (ret)
+ return ret;
+
+ *value = metrics.CurrSocketPower << 8;
+
+ return 0;
+}
+
+static uint16_t convert_to_vddc(uint8_t vid)
+{
+ return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
+}
+
+static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t vdd = 0, val_vid = 0;
+
+ if (!value)
+ return -EINVAL;
+ val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
+ SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
+ SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
+
+ vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);
+
+ *value = vdd;
+
+ return 0;
+
+}
+
+static int smu_v11_0_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ int ret = 0;
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ ret = smu_v11_0_get_current_activity_percent(smu,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_MCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_UCLK, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_GFXCLK, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_TEMP:
+ ret = smu_v11_0_thermal_get_temperature(smu, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_POWER:
+ ret = smu_v11_0_get_gpu_power(smu, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VDDGFX:
+ ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_UVD_POWER:
+ *(uint32_t *)data = smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT) ? 1 : 0;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VCE_POWER:
+ *(uint32_t *)data = smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT) ? 1 : 0;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
+ *(uint32_t *)data = 0;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
+ *(uint32_t *)data = pptable->FanMaximumRpm;
+ *size = 4;
+ break;
+ default:
+ ret = smu_common_read_sensor(smu, sensor, data, size);
+ break;
+ }
+
+ if (ret)
+ *size = 0;
+
+ return ret;
+}
+
+static int
+smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
+ struct pp_display_clock_request
+ *clock_req)
+{
+ enum amd_pp_clock_type clk_type = clock_req->clock_type;
+ int ret = 0;
+ PPCLK_e clk_select = 0;
+ uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
+ switch (clk_type) {
+ case amd_pp_dcef_clock:
+ clk_select = PPCLK_DCEFCLK;
+ break;
+ case amd_pp_disp_clock:
+ clk_select = PPCLK_DISPCLK;
+ break;
+ case amd_pp_pixel_clock:
+ clk_select = PPCLK_PIXCLK;
+ break;
+ case amd_pp_phy_clock:
+ clk_select = PPCLK_PHYCLK;
+ break;
+ default:
+ pr_info("[%s] Invalid Clock Type!", __func__);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ goto failed;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
+ (clk_select << 16) | clk_freq);
+ }
+
+failed:
+ return ret;
+}
+
+static int smu_v11_0_set_watermarks_table(struct smu_context *smu,
+ Watermarks_t *table, struct
+ dm_pp_wm_sets_with_clock_ranges_soc15
+ *clock_ranges)
+{
+ int i;
+
+ if (!table || !clock_ranges)
+ return -EINVAL;
+
+ if (clock_ranges->num_wm_dmif_sets > 4 ||
+ clock_ranges->num_wm_mcif_sets > 4)
+ return -EINVAL;
+
+ for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
+ table->WatermarkRow[1][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
+ 1000));
+ table->WatermarkRow[1][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
+ 1000));
+ table->WatermarkRow[1][i].MinUclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+ 1000));
+ table->WatermarkRow[1][i].MaxUclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+ 1000));
+ table->WatermarkRow[1][i].WmSetting = (uint8_t)
+ clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
+ }
+
+ for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
+ table->WatermarkRow[0][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
+ 1000));
+ table->WatermarkRow[0][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
+ 1000));
+ table->WatermarkRow[0][i].MinUclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+ 1000));
+ table->WatermarkRow[0][i].MaxUclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+ 1000));
+ table->WatermarkRow[0][i].WmSetting = (uint8_t)
+ clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+ }
+
+ return 0;
+}
+
+static int
+smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct
+ dm_pp_wm_sets_with_clock_ranges_soc15
+ *clock_ranges)
+{
+ int ret = 0;
+ struct smu_table *watermarks = &smu->smu_table.tables[TABLE_WATERMARKS];
+ Watermarks_t *table = watermarks->cpu_addr;
+
+ if (!smu->disable_watermark &&
+ smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT) &&
+ smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
+ smu_v11_0_set_watermarks_table(smu, table, clock_ranges);
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
+ smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
+ }
+
+ return ret;
+}
+
+static int smu_v11_0_get_clock_ranges(struct smu_context *smu,
+ uint32_t *clock,
+ PPCLK_e clock_select,
+ bool max)
+{
+ int ret;
+ *clock = 0;
+ if (max) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
+ (clock_select << 16));
+ if (ret) {
+ pr_err("[GetClockRanges] Failed to get max clock from SMC!\n");
+ return ret;
+ }
+ smu_read_smc_arg(smu, clock);
+ } else {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq,
+ (clock_select << 16));
+ if (ret) {
+ pr_err("[GetClockRanges] Failed to get min clock from SMC!\n");
+ return ret;
+ }
+ smu_read_smc_arg(smu, clock);
+ }
+
+ return 0;
+}
+
+static uint32_t smu_v11_0_dpm_get_sclk(struct smu_context *smu, bool low)
+{
+ uint32_t gfx_clk;
+ int ret;
+
+ if (!smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT)) {
+ pr_err("[GetSclks]: gfxclk dpm not enabled!\n");
+ return -EPERM;
+ }
+
+ if (low) {
+ ret = smu_v11_0_get_clock_ranges(smu, &gfx_clk, PPCLK_GFXCLK, false);
+ if (ret) {
+ pr_err("[GetSclks]: fail to get min PPCLK_GFXCLK\n");
+ return ret;
+ }
+ } else {
+ ret = smu_v11_0_get_clock_ranges(smu, &gfx_clk, PPCLK_GFXCLK, true);
+ if (ret) {
+ pr_err("[GetSclks]: fail to get max PPCLK_GFXCLK\n");
+ return ret;
+ }
+ }
+
+ return (gfx_clk * 100);
+}
+
+static uint32_t smu_v11_0_dpm_get_mclk(struct smu_context *smu, bool low)
+{
+ uint32_t mem_clk;
+ int ret;
+
+ if (!smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
+ pr_err("[GetMclks]: memclk dpm not enabled!\n");
+ return -EPERM;
+ }
+
+ if (low) {
+ ret = smu_v11_0_get_clock_ranges(smu, &mem_clk, PPCLK_UCLK, false);
+ if (ret) {
+ pr_err("[GetMclks]: fail to get min PPCLK_UCLK\n");
+ return ret;
+ }
+ } else {
+ ret = smu_v11_0_get_clock_ranges(smu, &mem_clk, PPCLK_GFXCLK, true);
+ if (ret) {
+ pr_err("[GetMclks]: fail to get max PPCLK_UCLK\n");
+ return ret;
+ }
+ }
+
+ return (mem_clk * 100);
+}
+
+static int smu_v11_0_set_od8_default_settings(struct smu_context *smu,
+ bool initialize)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ int ret;
+
+ if (initialize) {
+ if (table_context->overdrive_table)
+ return -EINVAL;
+
+ table_context->overdrive_table = kzalloc(sizeof(OverDriveTable_t), GFP_KERNEL);
+
+ if (!table_context->overdrive_table)
+ return -ENOMEM;
+
+ ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, false);
+ if (ret) {
+ pr_err("Failed to export over drive table!\n");
+ return ret;
+ }
+
+ smu_set_default_od8_settings(smu);
+ }
+
+ ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, true);
+ if (ret) {
+ pr_err("Failed to import over drive table!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int smu_v11_0_conv_power_profile_to_pplib_workload(int power_profile)
+{
+ int pplib_workload = 0;
+
+ switch (power_profile) {
+ case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
+ pplib_workload = WORKLOAD_DEFAULT_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
+ pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_POWERSAVING:
+ pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_VIDEO:
+ pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_VR:
+ pplib_workload = WORKLOAD_PPLIB_VR_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_COMPUTE:
+ pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_CUSTOM:
+ pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
+ break;
+ }
+
+ return pplib_workload;
+}
+
+static int smu_v11_0_get_power_profile_mode(struct smu_context *smu, char *buf)
+{
+ DpmActivityMonitorCoeffInt_t activity_monitor;
+ uint32_t i, size = 0;
+ uint16_t workload_type = 0;
+ static const char *profile_name[] = {
+ "BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
+ "POWER_SAVING",
+ "VIDEO",
+ "VR",
+ "COMPUTE",
+ "CUSTOM"};
+ static const char *title[] = {
+ "PROFILE_INDEX(NAME)",
+ "CLOCK_TYPE(NAME)",
+ "FPS",
+ "UseRlcBusy",
+ "MinActiveFreqType",
+ "MinActiveFreq",
+ "BoosterFreqType",
+ "BoosterFreq",
+ "PD_Data_limit_c",
+ "PD_Data_error_coeff",
+ "PD_Data_error_rate_coeff"};
+ int result = 0;
+
+ if (!buf)
+ return -EINVAL;
+
+ size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
+ title[0], title[1], title[2], title[3], title[4], title[5],
+ title[6], title[7], title[8], title[9], title[10]);
+
+ for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type = smu_v11_0_conv_power_profile_to_pplib_workload(i);
+ result = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
+ workload_type, &activity_monitor, false);
+ if (result) {
+ pr_err("[%s] Failed to get activity monitor!", __func__);
+ return result;
+ }
+
+ size += sprintf(buf + size, "%2d %14s%s:\n",
+ i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+
+ size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ " ",
+ 0,
+ "GFXCLK",
+ activity_monitor.Gfx_FPS,
+ activity_monitor.Gfx_UseRlcBusy,
+ activity_monitor.Gfx_MinActiveFreqType,
+ activity_monitor.Gfx_MinActiveFreq,
+ activity_monitor.Gfx_BoosterFreqType,
+ activity_monitor.Gfx_BoosterFreq,
+ activity_monitor.Gfx_PD_Data_limit_c,
+ activity_monitor.Gfx_PD_Data_error_coeff,
+ activity_monitor.Gfx_PD_Data_error_rate_coeff);
+
+ size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ " ",
+ 1,
+ "SOCCLK",
+ activity_monitor.Soc_FPS,
+ activity_monitor.Soc_UseRlcBusy,
+ activity_monitor.Soc_MinActiveFreqType,
+ activity_monitor.Soc_MinActiveFreq,
+ activity_monitor.Soc_BoosterFreqType,
+ activity_monitor.Soc_BoosterFreq,
+ activity_monitor.Soc_PD_Data_limit_c,
+ activity_monitor.Soc_PD_Data_error_coeff,
+ activity_monitor.Soc_PD_Data_error_rate_coeff);
+
+ size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ " ",
+ 2,
+ "UCLK",
+ activity_monitor.Mem_FPS,
+ activity_monitor.Mem_UseRlcBusy,
+ activity_monitor.Mem_MinActiveFreqType,
+ activity_monitor.Mem_MinActiveFreq,
+ activity_monitor.Mem_BoosterFreqType,
+ activity_monitor.Mem_BoosterFreq,
+ activity_monitor.Mem_PD_Data_limit_c,
+ activity_monitor.Mem_PD_Data_error_coeff,
+ activity_monitor.Mem_PD_Data_error_rate_coeff);
+
+ size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ " ",
+ 3,
+ "FCLK",
+ activity_monitor.Fclk_FPS,
+ activity_monitor.Fclk_UseRlcBusy,
+ activity_monitor.Fclk_MinActiveFreqType,
+ activity_monitor.Fclk_MinActiveFreq,
+ activity_monitor.Fclk_BoosterFreqType,
+ activity_monitor.Fclk_BoosterFreq,
+ activity_monitor.Fclk_PD_Data_limit_c,
+ activity_monitor.Fclk_PD_Data_error_coeff,
+ activity_monitor.Fclk_PD_Data_error_rate_coeff);
+ }
+
+ return size;
+}
+
+static int smu_v11_0_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+{
+ DpmActivityMonitorCoeffInt_t activity_monitor;
+ int workload_type = 0, ret = 0;
+
+ smu->power_profile_mode = input[size];
+
+ if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
+ return -EINVAL;
+ }
+
+ if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
+ ret = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT, &activity_monitor, false);
+ if (ret) {
+ pr_err("[%s] Failed to get activity monitor!", __func__);
+ return ret;
+ }
+
+ switch (input[0]) {
+ case 0: /* Gfxclk */
+ activity_monitor.Gfx_FPS = input[1];
+ activity_monitor.Gfx_UseRlcBusy = input[2];
+ activity_monitor.Gfx_MinActiveFreqType = input[3];
+ activity_monitor.Gfx_MinActiveFreq = input[4];
+ activity_monitor.Gfx_BoosterFreqType = input[5];
+ activity_monitor.Gfx_BoosterFreq = input[6];
+ activity_monitor.Gfx_PD_Data_limit_c = input[7];
+ activity_monitor.Gfx_PD_Data_error_coeff = input[8];
+ activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
+ break;
+ case 1: /* Socclk */
+ activity_monitor.Soc_FPS = input[1];
+ activity_monitor.Soc_UseRlcBusy = input[2];
+ activity_monitor.Soc_MinActiveFreqType = input[3];
+ activity_monitor.Soc_MinActiveFreq = input[4];
+ activity_monitor.Soc_BoosterFreqType = input[5];
+ activity_monitor.Soc_BoosterFreq = input[6];
+ activity_monitor.Soc_PD_Data_limit_c = input[7];
+ activity_monitor.Soc_PD_Data_error_coeff = input[8];
+ activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
+ break;
+ case 2: /* Uclk */
+ activity_monitor.Mem_FPS = input[1];
+ activity_monitor.Mem_UseRlcBusy = input[2];
+ activity_monitor.Mem_MinActiveFreqType = input[3];
+ activity_monitor.Mem_MinActiveFreq = input[4];
+ activity_monitor.Mem_BoosterFreqType = input[5];
+ activity_monitor.Mem_BoosterFreq = input[6];
+ activity_monitor.Mem_PD_Data_limit_c = input[7];
+ activity_monitor.Mem_PD_Data_error_coeff = input[8];
+ activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
+ break;
+ case 3: /* Fclk */
+ activity_monitor.Fclk_FPS = input[1];
+ activity_monitor.Fclk_UseRlcBusy = input[2];
+ activity_monitor.Fclk_MinActiveFreqType = input[3];
+ activity_monitor.Fclk_MinActiveFreq = input[4];
+ activity_monitor.Fclk_BoosterFreqType = input[5];
+ activity_monitor.Fclk_BoosterFreq = input[6];
+ activity_monitor.Fclk_PD_Data_limit_c = input[7];
+ activity_monitor.Fclk_PD_Data_error_coeff = input[8];
+ activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
+ break;
+ }
+
+ ret = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_COMPUTE_BIT, &activity_monitor, true);
+ if (ret) {
+ pr_err("[%s] Failed to set activity monitor!", __func__);
+ return ret;
+ }
+ }
+
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type =
+ smu_v11_0_conv_power_profile_to_pplib_workload(smu->power_profile_mode);
+ smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
+ 1 << workload_type);
+
+ return ret;
+}
+
+static int smu_v11_0_update_od8_settings(struct smu_context *smu,
+ uint32_t index,
+ uint32_t value)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ int ret;
+
+ ret = smu_update_table(smu, TABLE_OVERDRIVE,
+ table_context->overdrive_table, false);
+ if (ret) {
+ pr_err("Failed to export over drive table!\n");
+ return ret;
+ }
+
+ smu_update_specified_od8_value(smu, index, value);
+
+ ret = smu_update_table(smu, TABLE_OVERDRIVE,
+ table_context->overdrive_table, true);
+ if (ret) {
+ pr_err("Failed to import over drive table!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int smu_v11_0_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
+{
+ if (!smu_feature_is_supported(smu, FEATURE_DPM_VCE_BIT))
+ return 0;
+
+ if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT))
+ return 0;
+
+ return smu_feature_set_enabled(smu, FEATURE_DPM_VCE_BIT, enable);
+}
+
+static int smu_v11_0_dpm_set_vce_enable(struct smu_context *smu, bool enable)
+{
+ if (!smu_feature_is_supported(smu, FEATURE_DPM_UVD_BIT))
+ return 0;
+
+ if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT))
+ return 0;
+
+ return smu_feature_set_enabled(smu, FEATURE_DPM_UVD_BIT, enable);
+}
+
+static int smu_v11_0_get_current_rpm(struct smu_context *smu,
+ uint32_t *current_rpm)
+{
+ int ret;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm);
+
+ if (ret) {
+ pr_err("Attempt to get current RPM from SMC Failed!\n");
+ return ret;
+ }
+
+ smu_read_smc_arg(smu, current_rpm);
+
+ return 0;
+}
+
+static uint32_t
+smu_v11_0_get_fan_control_mode(struct smu_context *smu)
+{
+ if (!smu_feature_is_enabled(smu, FEATURE_FAN_CONTROL_BIT))
+ return AMD_FAN_CTRL_MANUAL;
+ else
+ return AMD_FAN_CTRL_AUTO;
+}
+
+static int
+smu_v11_0_get_fan_speed_percent(struct smu_context *smu,
+ uint32_t *speed)
+{
+ int ret = 0;
+ uint32_t percent = 0;
+ uint32_t current_rpm;
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+
+ ret = smu_v11_0_get_current_rpm(smu, &current_rpm);
+ percent = current_rpm * 100 / pptable->FanMaximumRpm;
+ *speed = percent > 100 ? 100 : percent;
+
+ return ret;
+}
+
+static int
+smu_v11_0_smc_fan_control(struct smu_context *smu, bool start)
+{
+ int ret = 0;
+
+ if (smu_feature_is_supported(smu, FEATURE_FAN_CONTROL_BIT))
+ return 0;
+
+ ret = smu_feature_set_enabled(smu, FEATURE_FAN_CONTROL_BIT, start);
+ if (ret)
+ pr_err("[%s]%s smc FAN CONTROL feature failed!",
+ __func__, (start ? "Start" : "Stop"));
+
+ return ret;
+}
+
+static int
+smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
+ CG_FDO_CTRL2, TMIN, 0));
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
+ CG_FDO_CTRL2, FDO_PWM_MODE, mode));
+
+ return 0;
+}
+
+static int
+smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t duty100;
+ uint32_t duty;
+ uint64_t tmp64;
+ bool stop = 0;
+
+ if (speed > 100)
+ speed = 100;
+
+ if (smu_v11_0_smc_fan_control(smu, stop))
+ return -EINVAL;
+ duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
+ CG_FDO_CTRL1, FMAX_DUTY100);
+ if (!duty100)
+ return -EINVAL;
+
+ tmp64 = (uint64_t)speed * duty100;
+ do_div(tmp64, 100);
+ duty = (uint32_t)tmp64;
+
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
+ CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
+
+ return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
+}
+
+static int
+smu_v11_0_set_fan_control_mode(struct smu_context *smu,
+ uint32_t mode)
+{
+ int ret = 0;
+ bool start = 1;
+ bool stop = 0;
+
+ switch (mode) {
+ case AMD_FAN_CTRL_NONE:
+ ret = smu_v11_0_set_fan_speed_percent(smu, 100);
+ break;
+ case AMD_FAN_CTRL_MANUAL:
+ ret = smu_v11_0_smc_fan_control(smu, stop);
+ break;
+ case AMD_FAN_CTRL_AUTO:
+ ret = smu_v11_0_smc_fan_control(smu, start);
+ break;
+ default:
+ break;
+ }
+
+ if (ret) {
+ pr_err("[%s]Set fan control mode failed!", __func__);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
+ uint32_t speed)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret;
+ uint32_t tach_period, crystal_clock_freq;
+ bool stop = 0;
+
+ if (!speed)
+ return -EINVAL;
+
+ mutex_lock(&(smu->mutex));
+ ret = smu_v11_0_smc_fan_control(smu, stop);
+ if (ret)
+ goto set_fan_speed_rpm_failed;
+
+ crystal_clock_freq = amdgpu_asic_get_xclk(adev);
+ tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
+ WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
+ CG_TACH_CTRL, TARGET_PERIOD,
+ tach_period));
+
+ ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
+
+set_fan_speed_rpm_failed:
+ mutex_unlock(&(smu->mutex));
+ return ret;
+}
+
+static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
+ uint32_t pstate)
+{
+ int ret = 0;
+ mutex_lock(&(smu->mutex));
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetXgmiMode,
+ pstate ? XGMI_STATE_D0 : XGMI_STATE_D3);
+ mutex_unlock(&(smu->mutex));
+ return ret;
+}
+
+static const struct smu_funcs smu_v11_0_funcs = {
+ .init_microcode = smu_v11_0_init_microcode,
+ .load_microcode = smu_v11_0_load_microcode,
+ .check_fw_status = smu_v11_0_check_fw_status,
+ .check_fw_version = smu_v11_0_check_fw_version,
+ .send_smc_msg = smu_v11_0_send_msg,
+ .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+ .read_smc_arg = smu_v11_0_read_arg,
+ .read_pptable_from_vbios = smu_v11_0_read_pptable_from_vbios,
+ .init_smc_tables = smu_v11_0_init_smc_tables,
+ .fini_smc_tables = smu_v11_0_fini_smc_tables,
+ .init_power = smu_v11_0_init_power,
+ .fini_power = smu_v11_0_fini_power,
+ .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
+ .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
+ .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
+ .check_pptable = smu_v11_0_check_pptable,
+ .parse_pptable = smu_v11_0_parse_pptable,
+ .populate_smc_pptable = smu_v11_0_populate_smc_pptable,
+ .write_pptable = smu_v11_0_write_pptable,
+ .write_watermarks_table = smu_v11_0_write_watermarks_table,
+ .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_tool_table_location = smu_v11_0_set_tool_table_location,
+ .init_display = smu_v11_0_init_display,
+ .set_allowed_mask = smu_v11_0_set_allowed_mask,
+ .get_enabled_mask = smu_v11_0_get_enabled_mask,
+ .is_dpm_running = smu_v11_0_is_dpm_running,
+ .system_features_control = smu_v11_0_system_features_control,
+ .update_feature_enable_state = smu_v11_0_update_feature_enable_state,
+ .notify_display_change = smu_v11_0_notify_display_change,
+ .get_power_limit = smu_v11_0_get_power_limit,
+ .set_power_limit = smu_v11_0_set_power_limit,
+ .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
+ .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
+ .start_thermal_control = smu_v11_0_start_thermal_control,
+ .read_sensor = smu_v11_0_read_sensor,
+ .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
+ .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
+ .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges,
+ .get_sclk = smu_v11_0_dpm_get_sclk,
+ .get_mclk = smu_v11_0_dpm_get_mclk,
+ .set_od8_default_settings = smu_v11_0_set_od8_default_settings,
+ .conv_power_profile_to_pplib_workload = smu_v11_0_conv_power_profile_to_pplib_workload,
+ .get_power_profile_mode = smu_v11_0_get_power_profile_mode,
+ .set_power_profile_mode = smu_v11_0_set_power_profile_mode,
+ .update_od8_settings = smu_v11_0_update_od8_settings,
+ .dpm_set_uvd_enable = smu_v11_0_dpm_set_uvd_enable,
+ .dpm_set_vce_enable = smu_v11_0_dpm_set_vce_enable,
+ .get_current_rpm = smu_v11_0_get_current_rpm,
+ .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
+ .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .get_fan_speed_percent = smu_v11_0_get_fan_speed_percent,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+ .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
+};
+
+void smu_v11_0_set_smu_funcs(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ smu->funcs = &smu_v11_0_funcs;
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ vega20_set_ppt_funcs(smu);
+ break;
+ default:
+ pr_warn("Unknown asic for smu11\n");
+ }
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index d111dd4e03d7..6d11076a79ba 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -212,6 +212,10 @@ static int smu10_start_smu(struct pp_hwmgr *hwmgr)
hwmgr->smu_version = smu10_read_arg_from_smc(hwmgr);
adev->pm.fw_version = hwmgr->smu_version >> 8;
+ if (adev->rev_id < 0x8 && adev->pdev->device != 0x15d8 &&
+ adev->pm.fw_version < 0x1e45)
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+
if (smu10_verify_smc_interface(hwmgr))
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index ba00744c3413..f301a73f6df1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -367,6 +367,26 @@ static int vega20_set_tools_address(struct pp_hwmgr *hwmgr)
return ret;
}
+int vega20_set_pptable_driver_address(struct pp_hwmgr *hwmgr)
+{
+ struct vega20_smumgr *priv =
+ (struct vega20_smumgr *)(hwmgr->smu_backend);
+ int ret = 0;
+
+ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0,
+ "[SetPPtabeDriverAddress] Attempt to Set Dram Addr High Failed!",
+ return ret);
+ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrLow,
+ lower_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0,
+ "[SetPPtabeDriverAddress] Attempt to Set Dram Addr Low Failed!",
+ return ret);
+
+ return ret;
+}
+
static int vega20_smu_init(struct pp_hwmgr *hwmgr)
{
struct vega20_smumgr *priv;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
index 77349c3f0162..ec953ab13e87 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
@@ -55,6 +55,7 @@ int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
uint8_t *table, uint16_t workload_type);
int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
uint8_t *table, uint16_t workload_type);
+int vega20_set_pptable_driver_address(struct pp_hwmgr *hwmgr);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
new file mode 100644
index 000000000000..8fafcbdb1dfd
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -0,0 +1,2413 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "pp_debug.h"
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "atomfirmware.h"
+#include "amdgpu_atomfirmware.h"
+#include "smu_v11_0.h"
+#include "smu11_driver_if.h"
+#include "soc15_common.h"
+#include "atom.h"
+#include "power_state.h"
+#include "vega20_ppt.h"
+#include "vega20_pptable.h"
+#include "vega20_ppsmc.h"
+#include "nbio/nbio_7_4_sh_mask.h"
+
+#define smnPCIE_LC_SPEED_CNTL 0x11140290
+#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
+
+#define MSG_MAP(msg) \
+ [SMU_MSG_##msg] = PPSMC_MSG_##msg
+
+static int vega20_message_map[SMU_MSG_MAX_COUNT] = {
+ MSG_MAP(TestMessage),
+ MSG_MAP(GetSmuVersion),
+ MSG_MAP(GetDriverIfVersion),
+ MSG_MAP(SetAllowedFeaturesMaskLow),
+ MSG_MAP(SetAllowedFeaturesMaskHigh),
+ MSG_MAP(EnableAllSmuFeatures),
+ MSG_MAP(DisableAllSmuFeatures),
+ MSG_MAP(EnableSmuFeaturesLow),
+ MSG_MAP(EnableSmuFeaturesHigh),
+ MSG_MAP(DisableSmuFeaturesLow),
+ MSG_MAP(DisableSmuFeaturesHigh),
+ MSG_MAP(GetEnabledSmuFeaturesLow),
+ MSG_MAP(GetEnabledSmuFeaturesHigh),
+ MSG_MAP(SetWorkloadMask),
+ MSG_MAP(SetPptLimit),
+ MSG_MAP(SetDriverDramAddrHigh),
+ MSG_MAP(SetDriverDramAddrLow),
+ MSG_MAP(SetToolsDramAddrHigh),
+ MSG_MAP(SetToolsDramAddrLow),
+ MSG_MAP(TransferTableSmu2Dram),
+ MSG_MAP(TransferTableDram2Smu),
+ MSG_MAP(UseDefaultPPTable),
+ MSG_MAP(UseBackupPPTable),
+ MSG_MAP(RunBtc),
+ MSG_MAP(RequestI2CBus),
+ MSG_MAP(ReleaseI2CBus),
+ MSG_MAP(SetFloorSocVoltage),
+ MSG_MAP(SoftReset),
+ MSG_MAP(StartBacoMonitor),
+ MSG_MAP(CancelBacoMonitor),
+ MSG_MAP(EnterBaco),
+ MSG_MAP(SetSoftMinByFreq),
+ MSG_MAP(SetSoftMaxByFreq),
+ MSG_MAP(SetHardMinByFreq),
+ MSG_MAP(SetHardMaxByFreq),
+ MSG_MAP(GetMinDpmFreq),
+ MSG_MAP(GetMaxDpmFreq),
+ MSG_MAP(GetDpmFreqByIndex),
+ MSG_MAP(GetDpmClockFreq),
+ MSG_MAP(GetSsVoltageByDpm),
+ MSG_MAP(SetMemoryChannelConfig),
+ MSG_MAP(SetGeminiMode),
+ MSG_MAP(SetGeminiApertureHigh),
+ MSG_MAP(SetGeminiApertureLow),
+ MSG_MAP(SetMinLinkDpmByIndex),
+ MSG_MAP(OverridePcieParameters),
+ MSG_MAP(OverDriveSetPercentage),
+ MSG_MAP(SetMinDeepSleepDcefclk),
+ MSG_MAP(ReenableAcDcInterrupt),
+ MSG_MAP(NotifyPowerSource),
+ MSG_MAP(SetUclkFastSwitch),
+ MSG_MAP(SetUclkDownHyst),
+ MSG_MAP(GetCurrentRpm),
+ MSG_MAP(SetVideoFps),
+ MSG_MAP(SetTjMax),
+ MSG_MAP(SetFanTemperatureTarget),
+ MSG_MAP(PrepareMp1ForUnload),
+ MSG_MAP(DramLogSetDramAddrHigh),
+ MSG_MAP(DramLogSetDramAddrLow),
+ MSG_MAP(DramLogSetDramSize),
+ MSG_MAP(SetFanMaxRpm),
+ MSG_MAP(SetFanMinPwm),
+ MSG_MAP(ConfigureGfxDidt),
+ MSG_MAP(NumOfDisplays),
+ MSG_MAP(RemoveMargins),
+ MSG_MAP(ReadSerialNumTop32),
+ MSG_MAP(ReadSerialNumBottom32),
+ MSG_MAP(SetSystemVirtualDramAddrHigh),
+ MSG_MAP(SetSystemVirtualDramAddrLow),
+ MSG_MAP(WaflTest),
+ MSG_MAP(SetFclkGfxClkRatio),
+ MSG_MAP(AllowGfxOff),
+ MSG_MAP(DisallowGfxOff),
+ MSG_MAP(GetPptLimit),
+ MSG_MAP(GetDcModeMaxDpmFreq),
+ MSG_MAP(GetDebugData),
+ MSG_MAP(SetXgmiMode),
+ MSG_MAP(RunAfllBtc),
+ MSG_MAP(ExitBaco),
+ MSG_MAP(PrepareMp1ForReset),
+ MSG_MAP(PrepareMp1ForShutdown),
+ MSG_MAP(SetMGpuFanBoostLimitRpm),
+ MSG_MAP(GetAVFSVoltageByDpm),
+};
+
+static int vega20_get_smu_msg_index(struct smu_context *smc, uint32_t index)
+{
+ int val;
+
+ if (index >= SMU_MSG_MAX_COUNT)
+ return -EINVAL;
+
+ val = vega20_message_map[index];
+ if (val > PPSMC_Message_Count)
+ return -EINVAL;
+
+ return val;
+}
+
+static int vega20_allocate_dpm_context(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+
+ if (smu_dpm->dpm_context)
+ return -EINVAL;
+
+ smu_dpm->dpm_context = kzalloc(sizeof(struct vega20_dpm_table),
+ GFP_KERNEL);
+ if (!smu_dpm->dpm_context)
+ return -ENOMEM;
+
+ if (smu_dpm->golden_dpm_context)
+ return -EINVAL;
+
+ smu_dpm->golden_dpm_context = kzalloc(sizeof(struct vega20_dpm_table),
+ GFP_KERNEL);
+ if (!smu_dpm->golden_dpm_context)
+ return -ENOMEM;
+
+ smu_dpm->dpm_context_size = sizeof(struct vega20_dpm_table);
+
+ smu_dpm->dpm_current_power_state = kzalloc(sizeof(struct smu_power_state),
+ GFP_KERNEL);
+ if (!smu_dpm->dpm_current_power_state)
+ return -ENOMEM;
+
+ smu_dpm->dpm_request_power_state = kzalloc(sizeof(struct smu_power_state),
+ GFP_KERNEL);
+ if (!smu_dpm->dpm_request_power_state)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int vega20_setup_od8_information(struct smu_context *smu)
+{
+ ATOM_Vega20_POWERPLAYTABLE *powerplay_table = NULL;
+ struct smu_table_context *table_context = &smu->smu_table;
+
+ uint32_t od_feature_count, od_feature_array_size,
+ od_setting_count, od_setting_array_size;
+
+ if (!table_context->power_play_table)
+ return -EINVAL;
+
+ powerplay_table = table_context->power_play_table;
+
+ if (powerplay_table->OverDrive8Table.ucODTableRevision == 1) {
+ /* Setup correct ODFeatureCount, and store ODFeatureArray from
+ * powerplay table to od_feature_capabilities */
+ od_feature_count =
+ (le32_to_cpu(powerplay_table->OverDrive8Table.ODFeatureCount) >
+ ATOM_VEGA20_ODFEATURE_COUNT) ?
+ ATOM_VEGA20_ODFEATURE_COUNT :
+ le32_to_cpu(powerplay_table->OverDrive8Table.ODFeatureCount);
+
+ od_feature_array_size = sizeof(uint8_t) * od_feature_count;
+
+ if (table_context->od_feature_capabilities)
+ return -EINVAL;
+
+ table_context->od_feature_capabilities = kmemdup(&powerplay_table->OverDrive8Table.ODFeatureCapabilities,
+ od_feature_array_size,
+ GFP_KERNEL);
+ if (!table_context->od_feature_capabilities)
+ return -ENOMEM;
+
+ /* Setup correct ODSettingCount, and store ODSettingArray from
+ * powerplay table to od_settings_max and od_setting_min */
+ od_setting_count =
+ (le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingCount) >
+ ATOM_VEGA20_ODSETTING_COUNT) ?
+ ATOM_VEGA20_ODSETTING_COUNT :
+ le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingCount);
+
+ od_setting_array_size = sizeof(uint32_t) * od_setting_count;
+
+ if (table_context->od_settings_max)
+ return -EINVAL;
+
+ table_context->od_settings_max = kmemdup(&powerplay_table->OverDrive8Table.ODSettingsMax,
+ od_setting_array_size,
+ GFP_KERNEL);
+
+ if (!table_context->od_settings_max) {
+ kfree(table_context->od_feature_capabilities);
+ table_context->od_feature_capabilities = NULL;
+ return -ENOMEM;
+ }
+
+ if (table_context->od_settings_min)
+ return -EINVAL;
+
+ table_context->od_settings_min = kmemdup(&powerplay_table->OverDrive8Table.ODSettingsMin,
+ od_setting_array_size,
+ GFP_KERNEL);
+
+ if (!table_context->od_settings_min) {
+ kfree(table_context->od_feature_capabilities);
+ table_context->od_feature_capabilities = NULL;
+ kfree(table_context->od_settings_max);
+ table_context->od_settings_max = NULL;
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int vega20_store_powerplay_table(struct smu_context *smu)
+{
+ ATOM_Vega20_POWERPLAYTABLE *powerplay_table = NULL;
+ struct smu_table_context *table_context = &smu->smu_table;
+ int ret;
+
+ if (!table_context->power_play_table)
+ return -EINVAL;
+
+ powerplay_table = table_context->power_play_table;
+
+ memcpy(table_context->driver_pptable, &powerplay_table->smcPPTable,
+ sizeof(PPTable_t));
+
+ table_context->software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp;
+ table_context->thermal_controller_type = powerplay_table->ucThermalControllerType;
+ table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
+
+ ret = vega20_setup_od8_information(smu);
+
+ return ret;
+}
+
+static int vega20_append_powerplay_table(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *smc_pptable = table_context->driver_pptable;
+ struct atom_smc_dpm_info_v4_4 *smc_dpm_table;
+ int index, i, ret;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ smc_dpm_info);
+
+ ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
+ (uint8_t **)&smc_dpm_table);
+ if (ret)
+ return ret;
+
+ smc_pptable->MaxVoltageStepGfx = smc_dpm_table->maxvoltagestepgfx;
+ smc_pptable->MaxVoltageStepSoc = smc_dpm_table->maxvoltagestepsoc;
+
+ smc_pptable->VddGfxVrMapping = smc_dpm_table->vddgfxvrmapping;
+ smc_pptable->VddSocVrMapping = smc_dpm_table->vddsocvrmapping;
+ smc_pptable->VddMem0VrMapping = smc_dpm_table->vddmem0vrmapping;
+ smc_pptable->VddMem1VrMapping = smc_dpm_table->vddmem1vrmapping;
+
+ smc_pptable->GfxUlvPhaseSheddingMask = smc_dpm_table->gfxulvphasesheddingmask;
+ smc_pptable->SocUlvPhaseSheddingMask = smc_dpm_table->soculvphasesheddingmask;
+ smc_pptable->ExternalSensorPresent = smc_dpm_table->externalsensorpresent;
+
+ smc_pptable->GfxMaxCurrent = smc_dpm_table->gfxmaxcurrent;
+ smc_pptable->GfxOffset = smc_dpm_table->gfxoffset;
+ smc_pptable->Padding_TelemetryGfx = smc_dpm_table->padding_telemetrygfx;
+
+ smc_pptable->SocMaxCurrent = smc_dpm_table->socmaxcurrent;
+ smc_pptable->SocOffset = smc_dpm_table->socoffset;
+ smc_pptable->Padding_TelemetrySoc = smc_dpm_table->padding_telemetrysoc;
+
+ smc_pptable->Mem0MaxCurrent = smc_dpm_table->mem0maxcurrent;
+ smc_pptable->Mem0Offset = smc_dpm_table->mem0offset;
+ smc_pptable->Padding_TelemetryMem0 = smc_dpm_table->padding_telemetrymem0;
+
+ smc_pptable->Mem1MaxCurrent = smc_dpm_table->mem1maxcurrent;
+ smc_pptable->Mem1Offset = smc_dpm_table->mem1offset;
+ smc_pptable->Padding_TelemetryMem1 = smc_dpm_table->padding_telemetrymem1;
+
+ smc_pptable->AcDcGpio = smc_dpm_table->acdcgpio;
+ smc_pptable->AcDcPolarity = smc_dpm_table->acdcpolarity;
+ smc_pptable->VR0HotGpio = smc_dpm_table->vr0hotgpio;
+ smc_pptable->VR0HotPolarity = smc_dpm_table->vr0hotpolarity;
+
+ smc_pptable->VR1HotGpio = smc_dpm_table->vr1hotgpio;
+ smc_pptable->VR1HotPolarity = smc_dpm_table->vr1hotpolarity;
+ smc_pptable->Padding1 = smc_dpm_table->padding1;
+ smc_pptable->Padding2 = smc_dpm_table->padding2;
+
+ smc_pptable->LedPin0 = smc_dpm_table->ledpin0;
+ smc_pptable->LedPin1 = smc_dpm_table->ledpin1;
+ smc_pptable->LedPin2 = smc_dpm_table->ledpin2;
+
+ smc_pptable->PllGfxclkSpreadEnabled = smc_dpm_table->pllgfxclkspreadenabled;
+ smc_pptable->PllGfxclkSpreadPercent = smc_dpm_table->pllgfxclkspreadpercent;
+ smc_pptable->PllGfxclkSpreadFreq = smc_dpm_table->pllgfxclkspreadfreq;
+
+ smc_pptable->UclkSpreadEnabled = 0;
+ smc_pptable->UclkSpreadPercent = smc_dpm_table->uclkspreadpercent;
+ smc_pptable->UclkSpreadFreq = smc_dpm_table->uclkspreadfreq;
+
+ smc_pptable->FclkSpreadEnabled = smc_dpm_table->fclkspreadenabled;
+ smc_pptable->FclkSpreadPercent = smc_dpm_table->fclkspreadpercent;
+ smc_pptable->FclkSpreadFreq = smc_dpm_table->fclkspreadfreq;
+
+ smc_pptable->FllGfxclkSpreadEnabled = smc_dpm_table->fllgfxclkspreadenabled;
+ smc_pptable->FllGfxclkSpreadPercent = smc_dpm_table->fllgfxclkspreadpercent;
+ smc_pptable->FllGfxclkSpreadFreq = smc_dpm_table->fllgfxclkspreadfreq;
+
+ for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) {
+ smc_pptable->I2cControllers[i].Enabled =
+ smc_dpm_table->i2ccontrollers[i].enabled;
+ smc_pptable->I2cControllers[i].SlaveAddress =
+ smc_dpm_table->i2ccontrollers[i].slaveaddress;
+ smc_pptable->I2cControllers[i].ControllerPort =
+ smc_dpm_table->i2ccontrollers[i].controllerport;
+ smc_pptable->I2cControllers[i].ThermalThrottler =
+ smc_dpm_table->i2ccontrollers[i].thermalthrottler;
+ smc_pptable->I2cControllers[i].I2cProtocol =
+ smc_dpm_table->i2ccontrollers[i].i2cprotocol;
+ smc_pptable->I2cControllers[i].I2cSpeed =
+ smc_dpm_table->i2ccontrollers[i].i2cspeed;
+ }
+
+ return 0;
+}
+
+static int vega20_check_powerplay_table(struct smu_context *smu)
+{
+ ATOM_Vega20_POWERPLAYTABLE *powerplay_table = NULL;
+ struct smu_table_context *table_context = &smu->smu_table;
+
+ powerplay_table = table_context->power_play_table;
+
+ if (powerplay_table->sHeader.format_revision < ATOM_VEGA20_TABLE_REVISION_VEGA20) {
+ pr_err("Unsupported PPTable format!");
+ return -EINVAL;
+ }
+
+ if (!powerplay_table->sHeader.structuresize) {
+ pr_err("Invalid PowerPlay Table!");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vega20_run_btc_afll(struct smu_context *smu)
+{
+ return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+}
+
+static int
+vega20_get_unallowed_feature_mask(struct smu_context *smu,
+ uint32_t *feature_mask, uint32_t num)
+{
+ if (num > 2)
+ return -EINVAL;
+
+ feature_mask[0] = 0xE0041C00;
+ feature_mask[1] = 0xFFFFFFFE; /* bit32~bit63 is Unsupported */
+
+ return 0;
+}
+
+static enum
+amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu)
+{
+ enum amd_pm_state_type pm_type;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+ if (!smu_dpm_ctx->dpm_context ||
+ !smu_dpm_ctx->dpm_current_power_state)
+ return -EINVAL;
+
+ mutex_lock(&(smu->mutex));
+ switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) {
+ case SMU_STATE_UI_LABEL_BATTERY:
+ pm_type = POWER_STATE_TYPE_BATTERY;
+ break;
+ case SMU_STATE_UI_LABEL_BALLANCED:
+ pm_type = POWER_STATE_TYPE_BALANCED;
+ break;
+ case SMU_STATE_UI_LABEL_PERFORMANCE:
+ pm_type = POWER_STATE_TYPE_PERFORMANCE;
+ break;
+ default:
+ if (smu_dpm_ctx->dpm_current_power_state->classification.flags & SMU_STATE_CLASSIFICATION_FLAG_BOOT)
+ pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
+ else
+ pm_type = POWER_STATE_TYPE_DEFAULT;
+ break;
+ }
+ mutex_unlock(&(smu->mutex));
+
+ return pm_type;
+}
+
+static int
+vega20_set_single_dpm_table(struct smu_context *smu,
+ struct vega20_single_dpm_table *single_dpm_table,
+ PPCLK_e clk_id)
+{
+ int ret = 0;
+ uint32_t i, num_of_levels = 0, clk;
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDpmFreqByIndex,
+ (clk_id << 16 | 0xFF));
+ if (ret) {
+ pr_err("[GetNumOfDpmLevel] failed to get dpm levels!");
+ return ret;
+ }
+
+ smu_read_smc_arg(smu, &num_of_levels);
+ if (!num_of_levels) {
+ pr_err("[GetNumOfDpmLevel] number of clk levels is invalid!");
+ return -EINVAL;
+ }
+
+ single_dpm_table->count = num_of_levels;
+
+ for (i = 0; i < num_of_levels; i++) {
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDpmFreqByIndex,
+ (clk_id << 16 | i));
+ if (ret) {
+ pr_err("[GetDpmFreqByIndex] failed to get dpm freq by index!");
+ return ret;
+ }
+ smu_read_smc_arg(smu, &clk);
+ if (!clk) {
+ pr_err("[GetDpmFreqByIndex] clk value is invalid!");
+ return -EINVAL;
+ }
+ single_dpm_table->dpm_levels[i].value = clk;
+ single_dpm_table->dpm_levels[i].enabled = true;
+ }
+ return 0;
+}
+
+static void vega20_init_single_dpm_state(struct vega20_dpm_state *dpm_state)
+{
+ dpm_state->soft_min_level = 0x0;
+ dpm_state->soft_max_level = 0xffff;
+ dpm_state->hard_min_level = 0x0;
+ dpm_state->hard_max_level = 0xffff;
+}
+
+static int vega20_set_default_dpm_table(struct smu_context *smu)
+{
+ int ret;
+
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct vega20_dpm_table *dpm_table = NULL;
+ struct vega20_single_dpm_table *single_dpm_table;
+
+ dpm_table = smu_dpm->dpm_context;
+
+ /* socclk */
+ single_dpm_table = &(dpm_table->soc_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_SOCCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get socclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* gfxclk */
+ single_dpm_table = &(dpm_table->gfx_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_GFXCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get gfxclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* memclk */
+ single_dpm_table = &(dpm_table->mem_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_UCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get memclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* eclk */
+ single_dpm_table = &(dpm_table->eclk_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table, PPCLK_ECLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get eclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.eclk / 100;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* vclk */
+ single_dpm_table = &(dpm_table->vclk_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table, PPCLK_VCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get vclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* dclk */
+ single_dpm_table = &(dpm_table->dclk_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table, PPCLK_DCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get dclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* dcefclk */
+ single_dpm_table = &(dpm_table->dcef_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_DCEFCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get dcefclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* pixclk */
+ single_dpm_table = &(dpm_table->pixel_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_PIXCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get pixclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 0;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* dispclk */
+ single_dpm_table = &(dpm_table->display_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_DISPCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get dispclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 0;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* phyclk */
+ single_dpm_table = &(dpm_table->phy_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_PHYCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get phyclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 0;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* fclk */
+ single_dpm_table = &(dpm_table->fclk_table);
+
+ if (smu_feature_is_enabled(smu,FEATURE_DPM_FCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_FCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get fclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 0;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ memcpy(smu_dpm->golden_dpm_context, dpm_table,
+ sizeof(struct vega20_dpm_table));
+
+ return 0;
+}
+
+static int vega20_populate_umd_state_clk(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct vega20_dpm_table *dpm_table = NULL;
+ struct vega20_single_dpm_table *gfx_table = NULL;
+ struct vega20_single_dpm_table *mem_table = NULL;
+
+ dpm_table = smu_dpm->dpm_context;
+ gfx_table = &(dpm_table->gfx_table);
+ mem_table = &(dpm_table->mem_table);
+
+ smu->pstate_sclk = gfx_table->dpm_levels[0].value;
+ smu->pstate_mclk = mem_table->dpm_levels[0].value;
+
+ if (gfx_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
+ mem_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) {
+ smu->pstate_sclk = gfx_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
+ smu->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
+ }
+
+ smu->pstate_sclk = smu->pstate_sclk * 100;
+ smu->pstate_mclk = smu->pstate_mclk * 100;
+
+ return 0;
+}
+
+static int vega20_get_clk_table(struct smu_context *smu,
+ struct pp_clock_levels_with_latency *clocks,
+ struct vega20_single_dpm_table *dpm_table)
+{
+ int i, count;
+
+ count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
+ clocks->num_levels = count;
+
+ for (i = 0; i < count; i++) {
+ clocks->data[i].clocks_in_khz =
+ dpm_table->dpm_levels[i].value * 1000;
+ clocks->data[i].latency_in_us = 0;
+ }
+
+ return 0;
+}
+
+static int vega20_print_clk_levels(struct smu_context *smu,
+ enum pp_clock_type type, char *buf)
+{
+ int i, now, size = 0;
+ int ret = 0;
+ uint32_t gen_speed, lane_width;
+ struct amdgpu_device *adev = smu->adev;
+ struct pp_clock_levels_with_latency clocks;
+ struct vega20_single_dpm_table *single_dpm_table;
+ struct smu_table_context *table_context = &smu->smu_table;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct vega20_dpm_table *dpm_table = NULL;
+ struct vega20_od8_settings *od8_settings =
+ (struct vega20_od8_settings *)table_context->od8_settings;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)(table_context->overdrive_table);
+ PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
+
+ dpm_table = smu_dpm->dpm_context;
+
+ switch (type) {
+ case PP_SCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_GFXCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current gfx clk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_table->gfx_table);
+ ret = vega20_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get gfx clk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n", i,
+ clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz == now * 10)
+ ? "*" : "");
+ break;
+
+ case PP_MCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_UCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current mclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_table->mem_table);
+ ret = vega20_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get memory clk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz == now * 10)
+ ? "*" : "");
+ break;
+
+ case PP_SOCCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_SOCCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current socclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_table->soc_table);
+ ret = vega20_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get socclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz == now * 10)
+ ? "*" : "");
+ break;
+
+ case PP_FCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_FCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current fclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_table->fclk_table);
+ for (i = 0; i < single_dpm_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, single_dpm_table->dpm_levels[i].value,
+ (single_dpm_table->dpm_levels[i].value == now / 100)
+ ? "*" : "");
+ break;
+
+ case PP_DCEFCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_DCEFCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current dcefclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_table->dcef_table);
+ ret = vega20_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get dcefclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
+ break;
+
+ case PP_PCIE:
+ gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+ PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+ >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+ lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
+ PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
+ >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+ for (i = 0; i < NUM_LINK_LEVELS; i++)
+ size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
+ (pptable->PcieGenSpeed[i] == 0) ? "2.5GT/s," :
+ (pptable->PcieGenSpeed[i] == 1) ? "5.0GT/s," :
+ (pptable->PcieGenSpeed[i] == 2) ? "8.0GT/s," :
+ (pptable->PcieGenSpeed[i] == 3) ? "16.0GT/s," : "",
+ (pptable->PcieLaneCount[i] == 1) ? "x1" :
+ (pptable->PcieLaneCount[i] == 2) ? "x2" :
+ (pptable->PcieLaneCount[i] == 3) ? "x4" :
+ (pptable->PcieLaneCount[i] == 4) ? "x8" :
+ (pptable->PcieLaneCount[i] == 5) ? "x12" :
+ (pptable->PcieLaneCount[i] == 6) ? "x16" : "",
+ pptable->LclkFreq[i],
+ (gen_speed == pptable->PcieGenSpeed[i]) &&
+ (lane_width == pptable->PcieLaneCount[i]) ?
+ "*" : "");
+ break;
+
+ case OD_SCLK:
+ if (od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id) {
+ size = sprintf(buf, "%s:\n", "OD_SCLK");
+ size += sprintf(buf + size, "0: %10uMhz\n",
+ od_table->GfxclkFmin);
+ size += sprintf(buf + size, "1: %10uMhz\n",
+ od_table->GfxclkFmax);
+ }
+
+ break;
+
+ case OD_MCLK:
+ if (od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id) {
+ size = sprintf(buf, "%s:\n", "OD_MCLK");
+ size += sprintf(buf + size, "1: %10uMhz\n",
+ od_table->UclkFmax);
+ }
+
+ break;
+
+ case OD_VDDC_CURVE:
+ if (od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
+ size = sprintf(buf, "%s:\n", "OD_VDDC_CURVE");
+ size += sprintf(buf + size, "0: %10uMhz %10dmV\n",
+ od_table->GfxclkFreq1,
+ od_table->GfxclkVolt1 / VOLTAGE_SCALE);
+ size += sprintf(buf + size, "1: %10uMhz %10dmV\n",
+ od_table->GfxclkFreq2,
+ od_table->GfxclkVolt2 / VOLTAGE_SCALE);
+ size += sprintf(buf + size, "2: %10uMhz %10dmV\n",
+ od_table->GfxclkFreq3,
+ od_table->GfxclkVolt3 / VOLTAGE_SCALE);
+ }
+
+ break;
+
+ case OD_RANGE:
+ size = sprintf(buf, "%s:\n", "OD_RANGE");
+
+ if (od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id) {
+ size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].max_value);
+ }
+
+ if (od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id) {
+ single_dpm_table = &(dpm_table->mem_table);
+ ret = vega20_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get memory clk levels Failed!");
+ return ret;
+ }
+
+ size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
+ clocks.data[0].clocks_in_khz / 1000,
+ od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].max_value);
+ }
+
+ if (od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].max_value);
+ }
+
+ break;
+
+ default:
+ break;
+ }
+ return size;
+}
+
+static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
+ uint32_t feature_mask)
+{
+ struct vega20_dpm_table *dpm_table;
+ struct vega20_single_dpm_table *single_dpm_table;
+ uint32_t freq;
+ int ret = 0;
+
+ dpm_table = smu->smu_dpm.dpm_context;
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT) &&
+ (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
+ single_dpm_table = &(dpm_table->gfx_table);
+ freq = max ? single_dpm_table->dpm_state.soft_max_level :
+ single_dpm_table->dpm_state.soft_min_level;
+ ret = smu_send_smc_msg_with_param(smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_GFXCLK << 16) | (freq & 0xffff));
+ if (ret) {
+ pr_err("Failed to set soft %s gfxclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT) &&
+ (feature_mask & FEATURE_DPM_UCLK_MASK)) {
+ single_dpm_table = &(dpm_table->mem_table);
+ freq = max ? single_dpm_table->dpm_state.soft_max_level :
+ single_dpm_table->dpm_state.soft_min_level;
+ ret = smu_send_smc_msg_with_param(smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_UCLK << 16) | (freq & 0xffff));
+ if (ret) {
+ pr_err("Failed to set soft %s memclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT) &&
+ (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
+ single_dpm_table = &(dpm_table->soc_table);
+ freq = max ? single_dpm_table->dpm_state.soft_max_level :
+ single_dpm_table->dpm_state.soft_min_level;
+ ret = smu_send_smc_msg_with_param(smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_SOCCLK << 16) | (freq & 0xffff));
+ if (ret) {
+ pr_err("Failed to set soft %s socclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_FCLK_BIT) &&
+ (feature_mask & FEATURE_DPM_FCLK_MASK)) {
+ single_dpm_table = &(dpm_table->fclk_table);
+ freq = max ? single_dpm_table->dpm_state.soft_max_level :
+ single_dpm_table->dpm_state.soft_min_level;
+ ret = smu_send_smc_msg_with_param(smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_FCLK << 16) | (freq & 0xffff));
+ if (ret) {
+ pr_err("Failed to set soft %s fclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT) &&
+ (feature_mask & FEATURE_DPM_DCEFCLK_MASK)) {
+ single_dpm_table = &(dpm_table->dcef_table);
+ freq = single_dpm_table->dpm_state.hard_min_level;
+ if (!max) {
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinByFreq,
+ (PPCLK_DCEFCLK << 16) | (freq & 0xffff));
+ if (ret) {
+ pr_err("Failed to set hard min dcefclk !\n");
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int vega20_force_clk_levels(struct smu_context *smu,
+ enum pp_clock_type type, uint32_t mask)
+{
+ struct vega20_dpm_table *dpm_table;
+ struct vega20_single_dpm_table *single_dpm_table;
+ uint32_t soft_min_level, soft_max_level, hard_min_level;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ int ret = 0;
+
+ if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+ pr_info("force clock level is for dpm manual mode only.\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&(smu->mutex));
+
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ dpm_table = smu->smu_dpm.dpm_context;
+
+ switch (type) {
+ case PP_SCLK:
+ single_dpm_table = &(dpm_table->gfx_table);
+
+ if (soft_max_level >= single_dpm_table->count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ soft_max_level, single_dpm_table->count - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ single_dpm_table->dpm_state.soft_min_level =
+ single_dpm_table->dpm_levels[soft_min_level].value;
+ single_dpm_table->dpm_state.soft_max_level =
+ single_dpm_table->dpm_levels[soft_max_level].value;
+
+ ret = vega20_upload_dpm_level(smu, false, FEATURE_DPM_GFXCLK_MASK);
+ if (ret) {
+ pr_err("Failed to upload boot level to lowest!\n");
+ break;
+ }
+
+ ret = vega20_upload_dpm_level(smu, true, FEATURE_DPM_GFXCLK_MASK);
+ if (ret)
+ pr_err("Failed to upload dpm max level to highest!\n");
+
+ break;
+
+ case PP_MCLK:
+ single_dpm_table = &(dpm_table->mem_table);
+
+ if (soft_max_level >= single_dpm_table->count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ soft_max_level, single_dpm_table->count - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ single_dpm_table->dpm_state.soft_min_level =
+ single_dpm_table->dpm_levels[soft_min_level].value;
+ single_dpm_table->dpm_state.soft_max_level =
+ single_dpm_table->dpm_levels[soft_max_level].value;
+
+ ret = vega20_upload_dpm_level(smu, false, FEATURE_DPM_UCLK_MASK);
+ if (ret) {
+ pr_err("Failed to upload boot level to lowest!\n");
+ break;
+ }
+
+ ret = vega20_upload_dpm_level(smu, true, FEATURE_DPM_UCLK_MASK);
+ if (ret)
+ pr_err("Failed to upload dpm max level to highest!\n");
+
+ break;
+
+ case PP_SOCCLK:
+ single_dpm_table = &(dpm_table->soc_table);
+
+ if (soft_max_level >= single_dpm_table->count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ soft_max_level, single_dpm_table->count - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ single_dpm_table->dpm_state.soft_min_level =
+ single_dpm_table->dpm_levels[soft_min_level].value;
+ single_dpm_table->dpm_state.soft_max_level =
+ single_dpm_table->dpm_levels[soft_max_level].value;
+
+ ret = vega20_upload_dpm_level(smu, false, FEATURE_DPM_SOCCLK_MASK);
+ if (ret) {
+ pr_err("Failed to upload boot level to lowest!\n");
+ break;
+ }
+
+ ret = vega20_upload_dpm_level(smu, true, FEATURE_DPM_SOCCLK_MASK);
+ if (ret)
+ pr_err("Failed to upload dpm max level to highest!\n");
+
+ break;
+
+ case PP_FCLK:
+ single_dpm_table = &(dpm_table->fclk_table);
+
+ if (soft_max_level >= single_dpm_table->count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ soft_max_level, single_dpm_table->count - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ single_dpm_table->dpm_state.soft_min_level =
+ single_dpm_table->dpm_levels[soft_min_level].value;
+ single_dpm_table->dpm_state.soft_max_level =
+ single_dpm_table->dpm_levels[soft_max_level].value;
+
+ ret = vega20_upload_dpm_level(smu, false, FEATURE_DPM_FCLK_MASK);
+ if (ret) {
+ pr_err("Failed to upload boot level to lowest!\n");
+ break;
+ }
+
+ ret = vega20_upload_dpm_level(smu, true, FEATURE_DPM_FCLK_MASK);
+ if (ret)
+ pr_err("Failed to upload dpm max level to highest!\n");
+
+ break;
+
+ case PP_DCEFCLK:
+ hard_min_level = soft_min_level;
+ single_dpm_table = &(dpm_table->dcef_table);
+
+ if (hard_min_level >= single_dpm_table->count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ hard_min_level, single_dpm_table->count - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ single_dpm_table->dpm_state.hard_min_level =
+ single_dpm_table->dpm_levels[hard_min_level].value;
+
+ ret = vega20_upload_dpm_level(smu, false, FEATURE_DPM_DCEFCLK_MASK);
+ if (ret)
+ pr_err("Failed to upload boot level to lowest!\n");
+
+ break;
+
+ case PP_PCIE:
+ if (soft_min_level >= NUM_LINK_LEVELS ||
+ soft_max_level >= NUM_LINK_LEVELS) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetMinLinkDpmByIndex, soft_min_level);
+ if (ret)
+ pr_err("Failed to set min link dpm level!\n");
+
+ break;
+
+ default:
+ break;
+ }
+
+ mutex_unlock(&(smu->mutex));
+ return ret;
+}
+
+static int vega20_get_clock_by_type_with_latency(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_latency *clocks)
+{
+ int ret;
+ struct vega20_single_dpm_table *single_dpm_table;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct vega20_dpm_table *dpm_table = NULL;
+
+ dpm_table = smu_dpm->dpm_context;
+
+ mutex_lock(&smu->mutex);
+
+ switch (type) {
+ case amd_pp_sys_clock:
+ single_dpm_table = &(dpm_table->gfx_table);
+ ret = vega20_get_clk_table(smu, clocks, single_dpm_table);
+ break;
+ case amd_pp_mem_clock:
+ single_dpm_table = &(dpm_table->mem_table);
+ ret = vega20_get_clk_table(smu, clocks, single_dpm_table);
+ break;
+ case amd_pp_dcef_clock:
+ single_dpm_table = &(dpm_table->dcef_table);
+ ret = vega20_get_clk_table(smu, clocks, single_dpm_table);
+ break;
+ case amd_pp_soc_clock:
+ single_dpm_table = &(dpm_table->soc_table);
+ ret = vega20_get_clk_table(smu, clocks, single_dpm_table);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&smu->mutex);
+ return ret;
+}
+
+static int vega20_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
+ uint32_t *voltage,
+ uint32_t freq)
+{
+ int ret;
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GetAVFSVoltageByDpm,
+ ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq));
+ if (ret) {
+ pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
+ return ret;
+ }
+
+ smu_read_smc_arg(smu, voltage);
+ *voltage = *voltage / VOLTAGE_SCALE;
+
+ return 0;
+}
+
+static int vega20_set_default_od8_setttings(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTable_t *od_table = (OverDriveTable_t *)(table_context->overdrive_table);
+ struct vega20_od8_settings *od8_settings = NULL;
+ PPTable_t *smc_pptable = table_context->driver_pptable;
+ int i, ret;
+
+ if (table_context->od8_settings)
+ return -EINVAL;
+
+ table_context->od8_settings = kzalloc(sizeof(struct vega20_od8_settings), GFP_KERNEL);
+
+ if (!table_context->od8_settings)
+ return -ENOMEM;
+
+ memset(table_context->od8_settings, 0, sizeof(struct vega20_od8_settings));
+ od8_settings = (struct vega20_od8_settings *)table_context->od8_settings;
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_LIMITS] &&
+ table_context->od_settings_max[OD8_SETTING_GFXCLK_FMAX] > 0 &&
+ table_context->od_settings_min[OD8_SETTING_GFXCLK_FMIN] > 0 &&
+ (table_context->od_settings_max[OD8_SETTING_GFXCLK_FMAX] >=
+ table_context->od_settings_min[OD8_SETTING_GFXCLK_FMIN])) {
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id =
+ OD8_GFXCLK_LIMITS;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id =
+ OD8_GFXCLK_LIMITS;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value =
+ od_table->GfxclkFmin;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value =
+ od_table->GfxclkFmax;
+ }
+
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_CURVE] &&
+ (table_context->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1] >=
+ smc_pptable->MinVoltageGfx / VOLTAGE_SCALE) &&
+ (table_context->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] <=
+ smc_pptable->MaxVoltageGfx / VOLTAGE_SCALE) &&
+ (table_context->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1] <=
+ table_context->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3])) {
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id =
+ OD8_GFXCLK_CURVE;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id =
+ OD8_GFXCLK_CURVE;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id =
+ OD8_GFXCLK_CURVE;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id =
+ OD8_GFXCLK_CURVE;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id =
+ OD8_GFXCLK_CURVE;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id =
+ OD8_GFXCLK_CURVE;
+
+ od_table->GfxclkFreq1 = od_table->GfxclkFmin;
+ od_table->GfxclkFreq2 = (od_table->GfxclkFmin + od_table->GfxclkFmax) / 2;
+ od_table->GfxclkFreq3 = od_table->GfxclkFmax;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value =
+ od_table->GfxclkFreq1;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value =
+ od_table->GfxclkFreq2;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value =
+ od_table->GfxclkFreq3;
+
+ ret = vega20_overdrive_get_gfx_clk_base_voltage(smu,
+ &od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value,
+ od_table->GfxclkFreq1);
+ if (ret)
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value = 0;
+ od_table->GfxclkVolt1 =
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value
+ * VOLTAGE_SCALE;
+ ret = vega20_overdrive_get_gfx_clk_base_voltage(smu,
+ &od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value,
+ od_table->GfxclkFreq2);
+ if (ret)
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value = 0;
+ od_table->GfxclkVolt2 =
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value
+ * VOLTAGE_SCALE;
+ ret = vega20_overdrive_get_gfx_clk_base_voltage(smu,
+ &od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value,
+ od_table->GfxclkFreq3);
+ if (ret)
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value = 0;
+ od_table->GfxclkVolt3 =
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value
+ * VOLTAGE_SCALE;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] &&
+ table_context->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 &&
+ table_context->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 &&
+ (table_context->od_settings_max[OD8_SETTING_UCLK_FMAX] >=
+ table_context->od_settings_min[OD8_SETTING_UCLK_FMAX])) {
+ od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id =
+ OD8_UCLK_MAX;
+ od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value =
+ od_table->UclkFmax;
+ }
+ }
+
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_POWER_LIMIT] &&
+ table_context->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] > 0 &&
+ table_context->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] <= 100 &&
+ table_context->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] > 0 &&
+ table_context->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] <= 100) {
+ od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id =
+ OD8_POWER_LIMIT;
+ od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value =
+ od_table->OverDrivePct;
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_FAN_CONTROL_BIT)) {
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ACOUSTIC_LIMIT] &&
+ table_context->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 &&
+ table_context->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 &&
+ (table_context->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] >=
+ table_context->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT])) {
+ od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id =
+ OD8_ACOUSTIC_LIMIT_SCLK;
+ od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value =
+ od_table->FanMaximumRpm;
+ }
+
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_SPEED_MIN] &&
+ table_context->od_settings_min[OD8_SETTING_FAN_MIN_SPEED] > 0 &&
+ table_context->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] > 0 &&
+ (table_context->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] >=
+ table_context->od_settings_min[OD8_SETTING_FAN_MIN_SPEED])) {
+ od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id =
+ OD8_FAN_SPEED_MIN;
+ od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value =
+ od_table->FanMinimumPwm * smc_pptable->FanMaximumRpm / 100;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_THERMAL_BIT)) {
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_FAN] &&
+ table_context->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP] > 0 &&
+ table_context->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] > 0 &&
+ (table_context->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] >=
+ table_context->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP])) {
+ od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id =
+ OD8_TEMPERATURE_FAN;
+ od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value =
+ od_table->FanTargetTemperature;
+ }
+
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_SYSTEM] &&
+ table_context->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX] > 0 &&
+ table_context->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] > 0 &&
+ (table_context->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] >=
+ table_context->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX])) {
+ od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id =
+ OD8_TEMPERATURE_SYSTEM;
+ od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value =
+ od_table->MaxOpTemp;
+ }
+ }
+
+ for (i = 0; i < OD8_SETTING_COUNT; i++) {
+ if (od8_settings->od8_settings_array[i].feature_id) {
+ od8_settings->od8_settings_array[i].min_value =
+ table_context->od_settings_min[i];
+ od8_settings->od8_settings_array[i].max_value =
+ table_context->od_settings_max[i];
+ od8_settings->od8_settings_array[i].current_value =
+ od8_settings->od8_settings_array[i].default_value;
+ } else {
+ od8_settings->od8_settings_array[i].min_value = 0;
+ od8_settings->od8_settings_array[i].max_value = 0;
+ od8_settings->od8_settings_array[i].current_value = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int vega20_get_od_percentage(struct smu_context *smu,
+ enum pp_clock_type type)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct vega20_dpm_table *dpm_table = NULL;
+ struct vega20_dpm_table *golden_table = NULL;
+ struct vega20_single_dpm_table *single_dpm_table;
+ struct vega20_single_dpm_table *golden_dpm_table;
+ int value, golden_value;
+
+ dpm_table = smu_dpm->dpm_context;
+ golden_table = smu_dpm->golden_dpm_context;
+
+ switch (type) {
+ case OD_SCLK:
+ single_dpm_table = &(dpm_table->gfx_table);
+ golden_dpm_table = &(golden_table->gfx_table);
+ break;
+ case OD_MCLK:
+ single_dpm_table = &(dpm_table->mem_table);
+ golden_dpm_table = &(golden_table->mem_table);
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ value = single_dpm_table->dpm_levels[single_dpm_table->count - 1].value;
+ golden_value = golden_dpm_table->dpm_levels[golden_dpm_table->count - 1].value;
+
+ value -= golden_value;
+ value = DIV_ROUND_UP(value * 100, golden_value);
+
+ return value;
+}
+
+static int
+vega20_get_profiling_clk_mask(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ uint32_t *sclk_mask,
+ uint32_t *mclk_mask,
+ uint32_t *soc_mask)
+{
+ struct vega20_dpm_table *dpm_table = (struct vega20_dpm_table *)smu->smu_dpm.dpm_context;
+ struct vega20_single_dpm_table *gfx_dpm_table;
+ struct vega20_single_dpm_table *mem_dpm_table;
+ struct vega20_single_dpm_table *soc_dpm_table;
+
+ if (!smu->smu_dpm.dpm_context)
+ return -EINVAL;
+
+ gfx_dpm_table = &dpm_table->gfx_table;
+ mem_dpm_table = &dpm_table->mem_table;
+ soc_dpm_table = &dpm_table->soc_table;
+
+ *sclk_mask = 0;
+ *mclk_mask = 0;
+ *soc_mask = 0;
+
+ if (gfx_dpm_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
+ mem_dpm_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL &&
+ soc_dpm_table->count > VEGA20_UMD_PSTATE_SOCCLK_LEVEL) {
+ *sclk_mask = VEGA20_UMD_PSTATE_GFXCLK_LEVEL;
+ *mclk_mask = VEGA20_UMD_PSTATE_MCLK_LEVEL;
+ *soc_mask = VEGA20_UMD_PSTATE_SOCCLK_LEVEL;
+ }
+
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ *sclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ *mclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ *sclk_mask = gfx_dpm_table->count - 1;
+ *mclk_mask = mem_dpm_table->count - 1;
+ *soc_mask = soc_dpm_table->count - 1;
+ }
+
+ return 0;
+}
+
+static int
+vega20_set_uclk_to_highest_dpm_level(struct smu_context *smu,
+ struct vega20_single_dpm_table *dpm_table)
+{
+ int ret = 0;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ if (!smu_dpm_ctx->dpm_context)
+ return -EINVAL;
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
+ if (dpm_table->count <= 0) {
+ pr_err("[%s] Dpm table has no entry!", __func__);
+ return -EINVAL;
+ }
+
+ if (dpm_table->count > NUM_UCLK_DPM_LEVELS) {
+ pr_err("[%s] Dpm table has too many entries!", __func__);
+ return -EINVAL;
+ }
+
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinByFreq,
+ (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level);
+ if (ret) {
+ pr_err("[%s] Set hard min uclk failed!", __func__);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int vega20_pre_display_config_changed(struct smu_context *smu)
+{
+ int ret = 0;
+ struct vega20_dpm_table *dpm_table = smu->smu_dpm.dpm_context;
+
+ if (!smu->smu_dpm.dpm_context)
+ return -EINVAL;
+
+ smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
+ ret = vega20_set_uclk_to_highest_dpm_level(smu,
+ &dpm_table->mem_table);
+ if (ret)
+ pr_err("Failed to set uclk to highest dpm level");
+ return ret;
+}
+
+static int vega20_display_config_changed(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (!smu->funcs)
+ return -EINVAL;
+
+ if (!smu->smu_dpm.dpm_context ||
+ !smu->smu_table.tables ||
+ !smu->smu_table.tables[TABLE_WATERMARKS].cpu_addr)
+ return -EINVAL;
+
+ if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
+ !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+ ret = smu->funcs->write_watermarks_table(smu);
+ if (ret) {
+ pr_err("Failed to update WMTABLE!");
+ return ret;
+ }
+ smu->watermarks_bitmap |= WATERMARKS_LOADED;
+ }
+
+ if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
+ smu_feature_is_supported(smu, FEATURE_DPM_DCEFCLK_BIT) &&
+ smu_feature_is_supported(smu, FEATURE_DPM_SOCCLK_BIT)) {
+ smu_send_smc_msg_with_param(smu,
+ SMU_MSG_NumOfDisplays,
+ smu->display_config->num_display);
+ }
+
+ return ret;
+}
+
+static int vega20_apply_clocks_adjust_rules(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ struct vega20_dpm_table *dpm_ctx = (struct vega20_dpm_table *)(smu_dpm_ctx->dpm_context);
+ struct vega20_single_dpm_table *dpm_table;
+ bool vblank_too_short = false;
+ bool disable_mclk_switching;
+ uint32_t i, latency;
+
+ disable_mclk_switching = ((1 < smu->display_config->num_display) &&
+ !smu->display_config->multi_monitor_in_sync) || vblank_too_short;
+ latency = smu->display_config->dce_tolerable_mclk_in_active_latency;
+
+ /* gfxclk */
+ dpm_table = &(dpm_ctx->gfx_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+
+ /* memclk */
+ dpm_table = &(dpm_ctx->mem_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (VEGA20_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+
+ /* honour DAL's UCLK Hardmin */
+ if (dpm_table->dpm_state.hard_min_level < (smu->display_config->min_mem_set_clock / 100))
+ dpm_table->dpm_state.hard_min_level = smu->display_config->min_mem_set_clock / 100;
+
+ /* Hardmin is dependent on displayconfig */
+ if (disable_mclk_switching) {
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ for (i = 0; i < smu_dpm_ctx->mclk_latency_table->count - 1; i++) {
+ if (smu_dpm_ctx->mclk_latency_table->entries[i].latency <= latency) {
+ if (dpm_table->dpm_levels[i].value >= (smu->display_config->min_mem_set_clock / 100)) {
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
+ break;
+ }
+ }
+ }
+ }
+
+ if (smu->display_config->nb_pstate_switch_disable)
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ /* vclk */
+ dpm_table = &(dpm_ctx->vclk_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+
+ /* dclk */
+ dpm_table = &(dpm_ctx->dclk_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+
+ /* socclk */
+ dpm_table = &(dpm_ctx->soc_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+
+ /* eclk */
+ dpm_table = &(dpm_ctx->eclk_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ return 0;
+}
+
+static int
+vega20_notify_smc_dispaly_config(struct smu_context *smu)
+{
+ struct vega20_dpm_table *dpm_table = smu->smu_dpm.dpm_context;
+ struct vega20_single_dpm_table *memtable = &dpm_table->mem_table;
+ struct smu_clocks min_clocks = {0};
+ struct pp_display_clock_request clock_req;
+ int ret = 0;
+
+ min_clocks.dcef_clock = smu->display_config->min_dcef_set_clk;
+ min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
+ min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
+
+ if (smu_feature_is_supported(smu, FEATURE_DPM_DCEFCLK_BIT)) {
+ clock_req.clock_type = amd_pp_dcef_clock;
+ clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
+ if (!smu->funcs->display_clock_voltage_request(smu, &clock_req)) {
+ if (smu_feature_is_supported(smu, FEATURE_DS_DCEFCLK_BIT)) {
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetMinDeepSleepDcefclk,
+ min_clocks.dcef_clock_in_sr/100);
+ if (ret) {
+ pr_err("Attempt to set divider for DCEFCLK Failed!");
+ return ret;
+ }
+ }
+ } else {
+ pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
+ memtable->dpm_state.hard_min_level = min_clocks.memory_clock/100;
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinByFreq,
+ (PPCLK_UCLK << 16) | memtable->dpm_state.hard_min_level);
+ if (ret) {
+ pr_err("[%s] Set hard min uclk failed!", __func__);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static uint32_t vega20_find_lowest_dpm_level(struct vega20_single_dpm_table *table)
+{
+ uint32_t i;
+
+ for (i = 0; i < table->count; i++) {
+ if (table->dpm_levels[i].enabled)
+ break;
+ }
+ if (i >= table->count) {
+ i = 0;
+ table->dpm_levels[i].enabled = true;
+ }
+
+ return i;
+}
+
+static uint32_t vega20_find_highest_dpm_level(struct vega20_single_dpm_table *table)
+{
+ int i = 0;
+
+ if (!table) {
+ pr_err("[%s] DPM Table does not exist!", __func__);
+ return 0;
+ }
+ if (table->count <= 0) {
+ pr_err("[%s] DPM Table has no entry!", __func__);
+ return 0;
+ }
+ if (table->count > MAX_REGULAR_DPM_NUMBER) {
+ pr_err("[%s] DPM Table has too many entries!", __func__);
+ return MAX_REGULAR_DPM_NUMBER - 1;
+ }
+
+ for (i = table->count - 1; i >= 0; i--) {
+ if (table->dpm_levels[i].enabled)
+ break;
+ }
+ if (i < 0) {
+ i = 0;
+ table->dpm_levels[i].enabled = true;
+ }
+
+ return i;
+}
+
+static int vega20_force_dpm_limit_value(struct smu_context *smu, bool highest)
+{
+ uint32_t soft_level;
+ int ret = 0;
+ struct vega20_dpm_table *dpm_table =
+ (struct vega20_dpm_table *)smu->smu_dpm.dpm_context;
+
+ if (highest)
+ soft_level = vega20_find_highest_dpm_level(&(dpm_table->gfx_table));
+ else
+ soft_level = vega20_find_lowest_dpm_level(&(dpm_table->gfx_table));
+
+ dpm_table->gfx_table.dpm_state.soft_min_level =
+ dpm_table->gfx_table.dpm_state.soft_max_level =
+ dpm_table->gfx_table.dpm_levels[soft_level].value;
+
+ if (highest)
+ soft_level = vega20_find_highest_dpm_level(&(dpm_table->mem_table));
+ else
+ soft_level = vega20_find_lowest_dpm_level(&(dpm_table->mem_table));
+
+ dpm_table->mem_table.dpm_state.soft_min_level =
+ dpm_table->mem_table.dpm_state.soft_max_level =
+ dpm_table->mem_table.dpm_levels[soft_level].value;
+
+ if (highest)
+ soft_level = vega20_find_highest_dpm_level(&(dpm_table->soc_table));
+ else
+ soft_level = vega20_find_lowest_dpm_level(&(dpm_table->soc_table));
+
+ dpm_table->soc_table.dpm_state.soft_min_level =
+ dpm_table->soc_table.dpm_state.soft_max_level =
+ dpm_table->soc_table.dpm_levels[soft_level].value;
+
+ ret = vega20_upload_dpm_level(smu, false, 0xFFFFFFFF);
+ if (ret) {
+ pr_err("Failed to upload boot level to %s!\n",
+ highest ? "highest" : "lowest");
+ return ret;
+ }
+
+ ret = vega20_upload_dpm_level(smu, true, 0xFFFFFFFF);
+ if (ret) {
+ pr_err("Failed to upload dpm max level to %s!\n!",
+ highest ? "highest" : "lowest");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int vega20_unforce_dpm_levels(struct smu_context *smu)
+{
+ uint32_t soft_min_level, soft_max_level;
+ int ret = 0;
+ struct vega20_dpm_table *dpm_table =
+ (struct vega20_dpm_table *)smu->smu_dpm.dpm_context;
+
+ soft_min_level = vega20_find_lowest_dpm_level(&(dpm_table->gfx_table));
+ soft_max_level = vega20_find_highest_dpm_level(&(dpm_table->gfx_table));
+ dpm_table->gfx_table.dpm_state.soft_min_level =
+ dpm_table->gfx_table.dpm_levels[soft_min_level].value;
+ dpm_table->gfx_table.dpm_state.soft_max_level =
+ dpm_table->gfx_table.dpm_levels[soft_max_level].value;
+
+ soft_min_level = vega20_find_lowest_dpm_level(&(dpm_table->mem_table));
+ soft_max_level = vega20_find_highest_dpm_level(&(dpm_table->mem_table));
+ dpm_table->mem_table.dpm_state.soft_min_level =
+ dpm_table->gfx_table.dpm_levels[soft_min_level].value;
+ dpm_table->mem_table.dpm_state.soft_max_level =
+ dpm_table->gfx_table.dpm_levels[soft_max_level].value;
+
+ soft_min_level = vega20_find_lowest_dpm_level(&(dpm_table->soc_table));
+ soft_max_level = vega20_find_highest_dpm_level(&(dpm_table->soc_table));
+ dpm_table->soc_table.dpm_state.soft_min_level =
+ dpm_table->soc_table.dpm_levels[soft_min_level].value;
+ dpm_table->soc_table.dpm_state.soft_max_level =
+ dpm_table->soc_table.dpm_levels[soft_max_level].value;
+
+ ret = smu_upload_dpm_level(smu, false, 0xFFFFFFFF);
+ if (ret) {
+ pr_err("Failed to upload DPM Bootup Levels!");
+ return ret;
+ }
+
+ ret = smu_upload_dpm_level(smu, true, 0xFFFFFFFF);
+ if (ret) {
+ pr_err("Failed to upload DPM Max Levels!");
+ return ret;
+ }
+
+ return ret;
+}
+
+static enum amd_dpm_forced_level vega20_get_performance_level(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ if (!smu_dpm_ctx->dpm_context)
+ return -EINVAL;
+
+ if (smu_dpm_ctx->dpm_level != smu_dpm_ctx->saved_dpm_level) {
+ mutex_lock(&(smu->mutex));
+ smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
+ mutex_unlock(&(smu->mutex));
+ }
+ return smu_dpm_ctx->dpm_level;
+}
+
+static int
+vega20_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+ int i;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+ if (!smu_dpm_ctx->dpm_context)
+ return -EINVAL;
+
+ for (i = 0; i < smu->adev->num_ip_blocks; i++) {
+ if (smu->adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)
+ break;
+ }
+
+ mutex_lock(&smu->mutex);
+
+ smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level);
+ ret = smu_handle_task(smu, level,
+ AMD_PP_TASK_READJUST_POWER_STATE);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+static int vega20_update_specified_od8_value(struct smu_context *smu,
+ uint32_t index,
+ uint32_t value)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)(table_context->overdrive_table);
+ struct vega20_od8_settings *od8_settings =
+ (struct vega20_od8_settings *)table_context->od8_settings;
+
+ switch (index) {
+ case OD8_SETTING_GFXCLK_FMIN:
+ od_table->GfxclkFmin = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_GFXCLK_FMAX:
+ if (value < od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].min_value ||
+ value > od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].max_value)
+ return -EINVAL;
+ od_table->GfxclkFmax = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_GFXCLK_FREQ1:
+ od_table->GfxclkFreq1 = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_GFXCLK_VOLTAGE1:
+ od_table->GfxclkVolt1 = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_GFXCLK_FREQ2:
+ od_table->GfxclkFreq2 = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_GFXCLK_VOLTAGE2:
+ od_table->GfxclkVolt2 = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_GFXCLK_FREQ3:
+ od_table->GfxclkFreq3 = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_GFXCLK_VOLTAGE3:
+ od_table->GfxclkVolt3 = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_UCLK_FMAX:
+ if (value < od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].min_value ||
+ value > od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].max_value)
+ return -EINVAL;
+ od_table->UclkFmax = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_POWER_PERCENTAGE:
+ od_table->OverDrivePct = (int16_t)value;
+ break;
+
+ case OD8_SETTING_FAN_ACOUSTIC_LIMIT:
+ od_table->FanMaximumRpm = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_FAN_MIN_SPEED:
+ od_table->FanMinimumPwm = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_FAN_TARGET_TEMP:
+ od_table->FanTargetTemperature = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_OPERATING_TEMP_MAX:
+ od_table->MaxOpTemp = (uint16_t)value;
+ break;
+ }
+
+ return 0;
+}
+
+static int vega20_set_od_percentage(struct smu_context *smu,
+ enum pp_clock_type type,
+ uint32_t value)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct vega20_dpm_table *dpm_table = NULL;
+ struct vega20_dpm_table *golden_table = NULL;
+ struct vega20_single_dpm_table *single_dpm_table;
+ struct vega20_single_dpm_table *golden_dpm_table;
+ uint32_t od_clk, index;
+ int ret = 0;
+ int feature_enabled;
+ PPCLK_e clk_id;
+
+ mutex_lock(&(smu->mutex));
+
+ dpm_table = smu_dpm->dpm_context;
+ golden_table = smu_dpm->golden_dpm_context;
+
+ switch (type) {
+ case OD_SCLK:
+ single_dpm_table = &(dpm_table->gfx_table);
+ golden_dpm_table = &(golden_table->gfx_table);
+ feature_enabled = smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT);
+ clk_id = PPCLK_GFXCLK;
+ index = OD8_SETTING_GFXCLK_FMAX;
+ break;
+ case OD_MCLK:
+ single_dpm_table = &(dpm_table->mem_table);
+ golden_dpm_table = &(golden_table->mem_table);
+ feature_enabled = smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT);
+ clk_id = PPCLK_UCLK;
+ index = OD8_SETTING_UCLK_FMAX;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ goto set_od_failed;
+
+ od_clk = golden_dpm_table->dpm_levels[golden_dpm_table->count - 1].value * value;
+ od_clk /= 100;
+ od_clk += golden_dpm_table->dpm_levels[golden_dpm_table->count - 1].value;
+
+ ret = smu_update_od8_settings(smu, index, od_clk);
+ if (ret) {
+ pr_err("[Setoverdrive] failed to set od clk!\n");
+ goto set_od_failed;
+ }
+
+ if (feature_enabled) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ clk_id);
+ if (ret) {
+ pr_err("[Setoverdrive] failed to refresh dpm table!\n");
+ goto set_od_failed;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
+ }
+
+ ret = smu_handle_task(smu, smu_dpm->dpm_level,
+ AMD_PP_TASK_READJUST_POWER_STATE);
+
+set_od_failed:
+ mutex_unlock(&(smu->mutex));
+
+ return ret;
+}
+
+static int vega20_odn_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)(table_context->overdrive_table);
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct vega20_dpm_table *dpm_table = NULL;
+ struct vega20_single_dpm_table *single_dpm_table;
+ struct vega20_od8_settings *od8_settings =
+ (struct vega20_od8_settings *)table_context->od8_settings;
+ struct pp_clock_levels_with_latency clocks;
+ int32_t input_index, input_clk, input_vol, i;
+ int od8_id;
+ int ret = 0;
+
+ dpm_table = smu_dpm->dpm_context;
+
+ if (!input) {
+ pr_warn("NULL user input for clock and voltage\n");
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (!(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id)) {
+ pr_info("Sclk min/max frequency overdrive not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ pr_info("invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
+
+ input_index = input[i];
+ input_clk = input[i + 1];
+
+ if (input_index != 0 && input_index != 1) {
+ pr_info("Invalid index %d\n", input_index);
+ pr_info("Support min/max sclk frequency settingonly which index by 0/1\n");
+ return -EINVAL;
+ }
+
+ if (input_clk < od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].min_value ||
+ input_clk > od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].max_value) {
+ pr_info("clock freq %d is not within allowed range [%d - %d]\n",
+ input_clk,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].max_value);
+ return -EINVAL;
+ }
+
+ if (input_index == 0 && od_table->GfxclkFmin != input_clk) {
+ od_table->GfxclkFmin = input_clk;
+ table_context->od_gfxclk_update = true;
+ } else if (input_index == 1 && od_table->GfxclkFmax != input_clk) {
+ od_table->GfxclkFmax = input_clk;
+ table_context->od_gfxclk_update = true;
+ }
+ }
+
+ break;
+
+ case PP_OD_EDIT_MCLK_VDDC_TABLE:
+ if (!od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id) {
+ pr_info("Mclk max frequency overdrive not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ single_dpm_table = &(dpm_table->mem_table);
+ ret = vega20_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get memory clk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ pr_info("invalid number of input parameters %d\n",
+ size);
+ return -EINVAL;
+ }
+
+ input_index = input[i];
+ input_clk = input[i + 1];
+
+ if (input_index != 1) {
+ pr_info("Invalid index %d\n", input_index);
+ pr_info("Support max Mclk frequency setting only which index by 1\n");
+ return -EINVAL;
+ }
+
+ if (input_clk < clocks.data[0].clocks_in_khz / 1000 ||
+ input_clk > od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].max_value) {
+ pr_info("clock freq %d is not within allowed range [%d - %d]\n",
+ input_clk,
+ clocks.data[0].clocks_in_khz / 1000,
+ od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].max_value);
+ return -EINVAL;
+ }
+
+ if (input_index == 1 && od_table->UclkFmax != input_clk) {
+ table_context->od_gfxclk_update = true;
+ od_table->UclkFmax = input_clk;
+ }
+ }
+
+ break;
+
+ case PP_OD_EDIT_VDDC_CURVE:
+ if (!(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id)) {
+ pr_info("Voltage curve calibrate not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ for (i = 0; i < size; i += 3) {
+ if (i + 3 > size) {
+ pr_info("invalid number of input parameters %d\n",
+ size);
+ return -EINVAL;
+ }
+
+ input_index = input[i];
+ input_clk = input[i + 1];
+ input_vol = input[i + 2];
+
+ if (input_index > 2) {
+ pr_info("Setting for point %d is not supported\n",
+ input_index + 1);
+ pr_info("Three supported points index by 0, 1, 2\n");
+ return -EINVAL;
+ }
+
+ od8_id = OD8_SETTING_GFXCLK_FREQ1 + 2 * input_index;
+ if (input_clk < od8_settings->od8_settings_array[od8_id].min_value ||
+ input_clk > od8_settings->od8_settings_array[od8_id].max_value) {
+ pr_info("clock freq %d is not within allowed range [%d - %d]\n",
+ input_clk,
+ od8_settings->od8_settings_array[od8_id].min_value,
+ od8_settings->od8_settings_array[od8_id].max_value);
+ return -EINVAL;
+ }
+
+ od8_id = OD8_SETTING_GFXCLK_VOLTAGE1 + 2 * input_index;
+ if (input_vol < od8_settings->od8_settings_array[od8_id].min_value ||
+ input_vol > od8_settings->od8_settings_array[od8_id].max_value) {
+ pr_info("clock voltage %d is not within allowed range [%d- %d]\n",
+ input_vol,
+ od8_settings->od8_settings_array[od8_id].min_value,
+ od8_settings->od8_settings_array[od8_id].max_value);
+ return -EINVAL;
+ }
+
+ switch (input_index) {
+ case 0:
+ od_table->GfxclkFreq1 = input_clk;
+ od_table->GfxclkVolt1 = input_vol * VOLTAGE_SCALE;
+ break;
+ case 1:
+ od_table->GfxclkFreq2 = input_clk;
+ od_table->GfxclkVolt2 = input_vol * VOLTAGE_SCALE;
+ break;
+ case 2:
+ od_table->GfxclkFreq3 = input_clk;
+ od_table->GfxclkVolt3 = input_vol * VOLTAGE_SCALE;
+ break;
+ }
+ }
+
+ break;
+
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, false);
+ if (ret) {
+ pr_err("Failed to export over drive table!\n");
+ return ret;
+ }
+
+ break;
+
+ case PP_OD_COMMIT_DPM_TABLE:
+ ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, true);
+ if (ret) {
+ pr_err("Failed to import over drive table!\n");
+ return ret;
+ }
+
+ /* retrieve updated gfxclk table */
+ if (table_context->od_gfxclk_update) {
+ table_context->od_gfxclk_update = false;
+ single_dpm_table = &(dpm_table->gfx_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_GFXCLK);
+ if (ret) {
+ pr_err("[Setoverdrive] failed to refresh dpm table!\n");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
+ }
+ }
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (type == PP_OD_COMMIT_DPM_TABLE) {
+ mutex_lock(&(smu->mutex));
+ ret = smu_handle_task(smu, smu_dpm->dpm_level,
+ AMD_PP_TASK_READJUST_POWER_STATE);
+ mutex_unlock(&(smu->mutex));
+ }
+
+ return ret;
+}
+
+static const struct pptable_funcs vega20_ppt_funcs = {
+ .alloc_dpm_context = vega20_allocate_dpm_context,
+ .store_powerplay_table = vega20_store_powerplay_table,
+ .check_powerplay_table = vega20_check_powerplay_table,
+ .append_powerplay_table = vega20_append_powerplay_table,
+ .get_smu_msg_index = vega20_get_smu_msg_index,
+ .run_afll_btc = vega20_run_btc_afll,
+ .get_unallowed_feature_mask = vega20_get_unallowed_feature_mask,
+ .get_current_power_state = vega20_get_current_power_state,
+ .set_default_dpm_table = vega20_set_default_dpm_table,
+ .set_power_state = NULL,
+ .populate_umd_state_clk = vega20_populate_umd_state_clk,
+ .print_clk_levels = vega20_print_clk_levels,
+ .force_clk_levels = vega20_force_clk_levels,
+ .get_clock_by_type_with_latency = vega20_get_clock_by_type_with_latency,
+ .set_default_od8_settings = vega20_set_default_od8_setttings,
+ .get_od_percentage = vega20_get_od_percentage,
+ .get_performance_level = vega20_get_performance_level,
+ .force_performance_level = vega20_force_performance_level,
+ .update_specified_od8_value = vega20_update_specified_od8_value,
+ .set_od_percentage = vega20_set_od_percentage,
+ .od_edit_dpm_table = vega20_odn_edit_dpm_table,
+ .pre_display_config_changed = vega20_pre_display_config_changed,
+ .display_config_changed = vega20_display_config_changed,
+ .apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules,
+ .notify_smc_dispaly_config = vega20_notify_smc_dispaly_config,
+ .force_dpm_limit_value = vega20_force_dpm_limit_value,
+ .unforce_dpm_levels = vega20_unforce_dpm_levels,
+ .upload_dpm_level = vega20_upload_dpm_level,
+ .get_profiling_clk_mask = vega20_get_profiling_clk_mask,
+};
+
+void vega20_set_ppt_funcs(struct smu_context *smu)
+{
+ smu->ppt_funcs = &vega20_ppt_funcs;
+ smu->smc_if_version = SMU11_DRIVER_IF_VERSION;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.h b/drivers/gpu/drm/amd/powerplay/vega20_ppt.h
new file mode 100644
index 000000000000..5a0d2af63173
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __VEGA20_PPT_H__
+#define __VEGA20_PPT_H__
+
+#define VEGA20_UMD_PSTATE_GFXCLK_LEVEL 0x3
+#define VEGA20_UMD_PSTATE_SOCCLK_LEVEL 0x3
+#define VEGA20_UMD_PSTATE_MCLK_LEVEL 0x2
+#define VEGA20_UMD_PSTATE_UVDCLK_LEVEL 0x3
+#define VEGA20_UMD_PSTATE_VCEMCLK_LEVEL 0x3
+
+#define MAX_REGULAR_DPM_NUMBER 16
+#define MAX_PCIE_CONF 2
+
+#define VOLTAGE_SCALE 4
+#define AVFS_CURVE 0
+#define OD8_HOTCURVE_TEMPERATURE 85
+
+struct vega20_dpm_level {
+ bool enabled;
+ uint32_t value;
+ uint32_t param1;
+};
+
+struct vega20_dpm_state {
+ uint32_t soft_min_level;
+ uint32_t soft_max_level;
+ uint32_t hard_min_level;
+ uint32_t hard_max_level;
+};
+
+struct vega20_single_dpm_table {
+ uint32_t count;
+ struct vega20_dpm_state dpm_state;
+ struct vega20_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
+};
+
+struct vega20_pcie_table {
+ uint16_t count;
+ uint8_t pcie_gen[MAX_PCIE_CONF];
+ uint8_t pcie_lane[MAX_PCIE_CONF];
+ uint32_t lclk[MAX_PCIE_CONF];
+};
+
+struct vega20_dpm_table {
+ struct vega20_single_dpm_table soc_table;
+ struct vega20_single_dpm_table gfx_table;
+ struct vega20_single_dpm_table mem_table;
+ struct vega20_single_dpm_table eclk_table;
+ struct vega20_single_dpm_table vclk_table;
+ struct vega20_single_dpm_table dclk_table;
+ struct vega20_single_dpm_table dcef_table;
+ struct vega20_single_dpm_table pixel_table;
+ struct vega20_single_dpm_table display_table;
+ struct vega20_single_dpm_table phy_table;
+ struct vega20_single_dpm_table fclk_table;
+ struct vega20_pcie_table pcie_table;
+};
+
+enum OD8_FEATURE_ID
+{
+ OD8_GFXCLK_LIMITS = 1 << 0,
+ OD8_GFXCLK_CURVE = 1 << 1,
+ OD8_UCLK_MAX = 1 << 2,
+ OD8_POWER_LIMIT = 1 << 3,
+ OD8_ACOUSTIC_LIMIT_SCLK = 1 << 4, //FanMaximumRpm
+ OD8_FAN_SPEED_MIN = 1 << 5, //FanMinimumPwm
+ OD8_TEMPERATURE_FAN = 1 << 6, //FanTargetTemperature
+ OD8_TEMPERATURE_SYSTEM = 1 << 7, //MaxOpTemp
+ OD8_MEMORY_TIMING_TUNE = 1 << 8,
+ OD8_FAN_ZERO_RPM_CONTROL = 1 << 9
+};
+
+enum OD8_SETTING_ID
+{
+ OD8_SETTING_GFXCLK_FMIN = 0,
+ OD8_SETTING_GFXCLK_FMAX,
+ OD8_SETTING_GFXCLK_FREQ1,
+ OD8_SETTING_GFXCLK_VOLTAGE1,
+ OD8_SETTING_GFXCLK_FREQ2,
+ OD8_SETTING_GFXCLK_VOLTAGE2,
+ OD8_SETTING_GFXCLK_FREQ3,
+ OD8_SETTING_GFXCLK_VOLTAGE3,
+ OD8_SETTING_UCLK_FMAX,
+ OD8_SETTING_POWER_PERCENTAGE,
+ OD8_SETTING_FAN_ACOUSTIC_LIMIT,
+ OD8_SETTING_FAN_MIN_SPEED,
+ OD8_SETTING_FAN_TARGET_TEMP,
+ OD8_SETTING_OPERATING_TEMP_MAX,
+ OD8_SETTING_AC_TIMING,
+ OD8_SETTING_FAN_ZERO_RPM_CONTROL,
+ OD8_SETTING_COUNT
+};
+
+struct vega20_od8_single_setting {
+ uint32_t feature_id;
+ int32_t min_value;
+ int32_t max_value;
+ int32_t current_value;
+ int32_t default_value;
+};
+
+struct vega20_od8_settings {
+ struct vega20_od8_single_setting od8_settings_array[OD8_SETTING_COUNT];
+};
+
+extern void vega20_set_ppt_funcs(struct smu_context *smu);
+
+#endif
diff --git a/drivers/gpu/drm/arm/display/include/malidp_product.h b/drivers/gpu/drm/arm/display/include/malidp_product.h
index b35fc5db866b..1053b11352eb 100644
--- a/drivers/gpu/drm/arm/display/include/malidp_product.h
+++ b/drivers/gpu/drm/arm/display/include/malidp_product.h
@@ -20,4 +20,16 @@
/* Mali-display product IDs */
#define MALIDP_D71_PRODUCT_ID 0x0071
+union komeda_config_id {
+ struct {
+ __u32 max_line_sz:16,
+ n_pipelines:2,
+ n_scalers:2, /* number of scalers per pipeline */
+ n_layers:3, /* number of layers per pipeline */
+ n_richs:3, /* number of rich layers per pipeline */
+ reserved_bits:6;
+ };
+ __u32 value;
+};
+
#endif /* _MALIDP_PRODUCT_H_ */
diff --git a/drivers/gpu/drm/arm/display/include/malidp_utils.h b/drivers/gpu/drm/arm/display/include/malidp_utils.h
index 63cc47cefcf8..8cfd91196e15 100644
--- a/drivers/gpu/drm/arm/display/include/malidp_utils.h
+++ b/drivers/gpu/drm/arm/display/include/malidp_utils.h
@@ -7,10 +7,41 @@
#ifndef _MALIDP_UTILS_
#define _MALIDP_UTILS_
+#include <linux/delay.h>
+
#define has_bit(nr, mask) (BIT(nr) & (mask))
#define has_bits(bits, mask) (((bits) & (mask)) == (bits))
#define dp_for_each_set_bit(bit, mask) \
for_each_set_bit((bit), ((unsigned long *)&(mask)), sizeof(mask) * 8)
+#define dp_wait_cond(__cond, __tries, __min_range, __max_range) \
+({ \
+ int num_tries = __tries; \
+ while (!__cond && (num_tries > 0)) { \
+ usleep_range(__min_range, __max_range); \
+ if (__cond) \
+ break; \
+ num_tries--; \
+ } \
+ num_tries; \
+})
+
+/* the restriction of range is [start, end] */
+struct malidp_range {
+ u32 start;
+ u32 end;
+};
+
+static inline void set_range(struct malidp_range *rg, u32 start, u32 end)
+{
+ rg->start = start;
+ rg->end = end;
+}
+
+static inline bool in_range(struct malidp_range *rg, u32 v)
+{
+ return (v >= rg->start) && (v <= rg->end);
+}
+
#endif /* _MALIDP_UTILS_ */
diff --git a/drivers/gpu/drm/arm/display/komeda/Makefile b/drivers/gpu/drm/arm/display/komeda/Makefile
index 1b875e5dc0f6..412eeba8c39f 100644
--- a/drivers/gpu/drm/arm/display/komeda/Makefile
+++ b/drivers/gpu/drm/arm/display/komeda/Makefile
@@ -1,14 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y := \
- -I$(src)/../include \
- -I$(src)
+ -I $(srctree)/$(src)/../include \
+ -I $(srctree)/$(src)
komeda-y := \
komeda_drv.o \
komeda_dev.o \
komeda_format_caps.o \
komeda_pipeline.o \
+ komeda_pipeline_state.o \
komeda_framebuffer.o \
komeda_kms.o \
komeda_crtc.o \
@@ -16,6 +17,7 @@ komeda-y := \
komeda_private_obj.o
komeda-y += \
- d71/d71_dev.o
+ d71/d71_dev.o \
+ d71/d71_component.o
obj-$(CONFIG_DRM_KOMEDA) += komeda.o
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
new file mode 100644
index 000000000000..031e5f305a3c
--- /dev/null
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
@@ -0,0 +1,685 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+
+#include <drm/drm_print.h>
+#include "d71_dev.h"
+#include "komeda_kms.h"
+#include "malidp_io.h"
+#include "komeda_framebuffer.h"
+
+static void get_resources_id(u32 hw_id, u32 *pipe_id, u32 *comp_id)
+{
+ u32 id = BLOCK_INFO_BLK_ID(hw_id);
+ u32 pipe = id;
+
+ switch (BLOCK_INFO_BLK_TYPE(hw_id)) {
+ case D71_BLK_TYPE_LPU_WB_LAYER:
+ id = KOMEDA_COMPONENT_WB_LAYER;
+ break;
+ case D71_BLK_TYPE_CU_SPLITTER:
+ id = KOMEDA_COMPONENT_SPLITTER;
+ break;
+ case D71_BLK_TYPE_CU_SCALER:
+ pipe = id / D71_PIPELINE_MAX_SCALERS;
+ id %= D71_PIPELINE_MAX_SCALERS;
+ id += KOMEDA_COMPONENT_SCALER0;
+ break;
+ case D71_BLK_TYPE_CU:
+ id += KOMEDA_COMPONENT_COMPIZ0;
+ break;
+ case D71_BLK_TYPE_LPU_LAYER:
+ pipe = id / D71_PIPELINE_MAX_LAYERS;
+ id %= D71_PIPELINE_MAX_LAYERS;
+ id += KOMEDA_COMPONENT_LAYER0;
+ break;
+ case D71_BLK_TYPE_DOU_IPS:
+ id += KOMEDA_COMPONENT_IPS0;
+ break;
+ case D71_BLK_TYPE_CU_MERGER:
+ id = KOMEDA_COMPONENT_MERGER;
+ break;
+ case D71_BLK_TYPE_DOU:
+ id = KOMEDA_COMPONENT_TIMING_CTRLR;
+ break;
+ default:
+ id = 0xFFFFFFFF;
+ }
+
+ if (comp_id)
+ *comp_id = id;
+
+ if (pipe_id)
+ *pipe_id = pipe;
+}
+
+static u32 get_valid_inputs(struct block_header *blk)
+{
+ u32 valid_inputs = 0, comp_id;
+ int i;
+
+ for (i = 0; i < PIPELINE_INFO_N_VALID_INPUTS(blk->pipeline_info); i++) {
+ get_resources_id(blk->input_ids[i], NULL, &comp_id);
+ if (comp_id == 0xFFFFFFFF)
+ continue;
+ valid_inputs |= BIT(comp_id);
+ }
+
+ return valid_inputs;
+}
+
+static void get_values_from_reg(void __iomem *reg, u32 offset,
+ u32 count, u32 *val)
+{
+ u32 i, addr;
+
+ for (i = 0; i < count; i++) {
+ addr = offset + (i << 2);
+ /* 0xA4 is WO register */
+ if (addr != 0xA4)
+ val[i] = malidp_read32(reg, addr);
+ else
+ val[i] = 0xDEADDEAD;
+ }
+}
+
+static void dump_block_header(struct seq_file *sf, void __iomem *reg)
+{
+ struct block_header hdr;
+ u32 i, n_input, n_output;
+
+ d71_read_block_header(reg, &hdr);
+ seq_printf(sf, "BLOCK_INFO:\t\t0x%X\n", hdr.block_info);
+ seq_printf(sf, "PIPELINE_INFO:\t\t0x%X\n", hdr.pipeline_info);
+
+ n_output = PIPELINE_INFO_N_OUTPUTS(hdr.pipeline_info);
+ n_input = PIPELINE_INFO_N_VALID_INPUTS(hdr.pipeline_info);
+
+ for (i = 0; i < n_input; i++)
+ seq_printf(sf, "VALID_INPUT_ID%u:\t0x%X\n",
+ i, hdr.input_ids[i]);
+
+ for (i = 0; i < n_output; i++)
+ seq_printf(sf, "OUTPUT_ID%u:\t\t0x%X\n",
+ i, hdr.output_ids[i]);
+}
+
+static u32 to_rot_ctrl(u32 rot)
+{
+ u32 lr_ctrl = 0;
+
+ switch (rot & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_0:
+ lr_ctrl |= L_ROT(L_ROT_R0);
+ break;
+ case DRM_MODE_ROTATE_90:
+ lr_ctrl |= L_ROT(L_ROT_R90);
+ break;
+ case DRM_MODE_ROTATE_180:
+ lr_ctrl |= L_ROT(L_ROT_R180);
+ break;
+ case DRM_MODE_ROTATE_270:
+ lr_ctrl |= L_ROT(L_ROT_R270);
+ break;
+ }
+
+ if (rot & DRM_MODE_REFLECT_X)
+ lr_ctrl |= L_HFLIP;
+ if (rot & DRM_MODE_REFLECT_Y)
+ lr_ctrl |= L_VFLIP;
+
+ return lr_ctrl;
+}
+
+static inline u32 to_d71_input_id(struct komeda_component_output *output)
+{
+ struct komeda_component *comp = output->component;
+
+ return comp ? (comp->hw_id + output->output_port) : 0;
+}
+
+static void d71_layer_disable(struct komeda_component *c)
+{
+ malidp_write32_mask(c->reg, BLK_CONTROL, L_EN, 0);
+}
+
+static void d71_layer_update(struct komeda_component *c,
+ struct komeda_component_state *state)
+{
+ struct komeda_layer_state *st = to_layer_st(state);
+ struct drm_plane_state *plane_st = state->plane->state;
+ struct drm_framebuffer *fb = plane_st->fb;
+ struct komeda_fb *kfb = to_kfb(fb);
+ u32 __iomem *reg = c->reg;
+ u32 ctrl_mask = L_EN | L_ROT(L_ROT_R270) | L_HFLIP | L_VFLIP | L_TBU_EN;
+ u32 ctrl = L_EN | to_rot_ctrl(st->rot);
+ int i;
+
+ for (i = 0; i < fb->format->num_planes; i++) {
+ malidp_write32(reg,
+ BLK_P0_PTR_LOW + i * LAYER_PER_PLANE_REGS * 4,
+ lower_32_bits(st->addr[i]));
+ malidp_write32(reg,
+ BLK_P0_PTR_HIGH + i * LAYER_PER_PLANE_REGS * 4,
+ upper_32_bits(st->addr[i]));
+ if (i >= 2)
+ break;
+
+ malidp_write32(reg,
+ BLK_P0_STRIDE + i * LAYER_PER_PLANE_REGS * 4,
+ fb->pitches[i] & 0xFFFF);
+ }
+
+ malidp_write32(reg, LAYER_FMT, kfb->format_caps->hw_id);
+ malidp_write32(reg, BLK_IN_SIZE, HV_SIZE(st->hsize, st->vsize));
+
+ malidp_write32_mask(reg, BLK_CONTROL, ctrl_mask, ctrl);
+}
+
+static void d71_layer_dump(struct komeda_component *c, struct seq_file *sf)
+{
+ u32 v[15], i;
+ bool rich, rgb2rgb;
+ char *prefix;
+
+ get_values_from_reg(c->reg, LAYER_INFO, 1, &v[14]);
+ if (v[14] & 0x1) {
+ rich = true;
+ prefix = "LR_";
+ } else {
+ rich = false;
+ prefix = "LS_";
+ }
+
+ rgb2rgb = !!(v[14] & L_INFO_CM);
+
+ dump_block_header(sf, c->reg);
+
+ seq_printf(sf, "%sLAYER_INFO:\t\t0x%X\n", prefix, v[14]);
+
+ get_values_from_reg(c->reg, 0xD0, 1, v);
+ seq_printf(sf, "%sCONTROL:\t\t0x%X\n", prefix, v[0]);
+ if (rich) {
+ get_values_from_reg(c->reg, 0xD4, 1, v);
+ seq_printf(sf, "LR_RICH_CONTROL:\t0x%X\n", v[0]);
+ }
+ get_values_from_reg(c->reg, 0xD8, 4, v);
+ seq_printf(sf, "%sFORMAT:\t\t0x%X\n", prefix, v[0]);
+ seq_printf(sf, "%sIT_COEFFTAB:\t\t0x%X\n", prefix, v[1]);
+ seq_printf(sf, "%sIN_SIZE:\t\t0x%X\n", prefix, v[2]);
+ seq_printf(sf, "%sPALPHA:\t\t0x%X\n", prefix, v[3]);
+
+ get_values_from_reg(c->reg, 0x100, 3, v);
+ seq_printf(sf, "%sP0_PTR_LOW:\t\t0x%X\n", prefix, v[0]);
+ seq_printf(sf, "%sP0_PTR_HIGH:\t\t0x%X\n", prefix, v[1]);
+ seq_printf(sf, "%sP0_STRIDE:\t\t0x%X\n", prefix, v[2]);
+
+ get_values_from_reg(c->reg, 0x110, 2, v);
+ seq_printf(sf, "%sP1_PTR_LOW:\t\t0x%X\n", prefix, v[0]);
+ seq_printf(sf, "%sP1_PTR_HIGH:\t\t0x%X\n", prefix, v[1]);
+ if (rich) {
+ get_values_from_reg(c->reg, 0x118, 1, v);
+ seq_printf(sf, "LR_P1_STRIDE:\t\t0x%X\n", v[0]);
+
+ get_values_from_reg(c->reg, 0x120, 2, v);
+ seq_printf(sf, "LR_P2_PTR_LOW:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "LR_P2_PTR_HIGH:\t\t0x%X\n", v[1]);
+
+ get_values_from_reg(c->reg, 0x130, 12, v);
+ for (i = 0; i < 12; i++)
+ seq_printf(sf, "LR_YUV_RGB_COEFF%u:\t0x%X\n", i, v[i]);
+ }
+
+ if (rgb2rgb) {
+ get_values_from_reg(c->reg, LAYER_RGB_RGB_COEFF0, 12, v);
+ for (i = 0; i < 12; i++)
+ seq_printf(sf, "LS_RGB_RGB_COEFF%u:\t0x%X\n", i, v[i]);
+ }
+
+ get_values_from_reg(c->reg, 0x160, 3, v);
+ seq_printf(sf, "%sAD_CONTROL:\t\t0x%X\n", prefix, v[0]);
+ seq_printf(sf, "%sAD_H_CROP:\t\t0x%X\n", prefix, v[1]);
+ seq_printf(sf, "%sAD_V_CROP:\t\t0x%X\n", prefix, v[2]);
+}
+
+static struct komeda_component_funcs d71_layer_funcs = {
+ .update = d71_layer_update,
+ .disable = d71_layer_disable,
+ .dump_register = d71_layer_dump,
+};
+
+static int d71_layer_init(struct d71_dev *d71,
+ struct block_header *blk, u32 __iomem *reg)
+{
+ struct komeda_component *c;
+ struct komeda_layer *layer;
+ u32 pipe_id, layer_id, layer_info;
+
+ get_resources_id(blk->block_info, &pipe_id, &layer_id);
+ c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*layer),
+ layer_id,
+ BLOCK_INFO_INPUT_ID(blk->block_info),
+ &d71_layer_funcs, 0,
+ get_valid_inputs(blk),
+ 1, reg, "LPU%d_LAYER%d", pipe_id, layer_id);
+ if (IS_ERR(c)) {
+ DRM_ERROR("Failed to add layer component\n");
+ return PTR_ERR(c);
+ }
+
+ layer = to_layer(c);
+ layer_info = malidp_read32(reg, LAYER_INFO);
+
+ if (layer_info & L_INFO_RF)
+ layer->layer_type = KOMEDA_FMT_RICH_LAYER;
+ else
+ layer->layer_type = KOMEDA_FMT_SIMPLE_LAYER;
+
+ set_range(&layer->hsize_in, 4, d71->max_line_size);
+ set_range(&layer->vsize_in, 4, d71->max_vsize);
+
+ malidp_write32(reg, LAYER_PALPHA, D71_PALPHA_DEF_MAP);
+
+ layer->supported_rots = DRM_MODE_ROTATE_MASK | DRM_MODE_REFLECT_MASK;
+
+ return 0;
+}
+
+static int d71_wb_layer_init(struct d71_dev *d71,
+ struct block_header *blk, u32 __iomem *reg)
+{
+ DRM_DEBUG("Detect D71_Wb_Layer.\n");
+
+ return 0;
+}
+
+static void d71_component_disable(struct komeda_component *c)
+{
+ u32 __iomem *reg = c->reg;
+ u32 i;
+
+ malidp_write32(reg, BLK_CONTROL, 0);
+
+ for (i = 0; i < c->max_active_inputs; i++)
+ malidp_write32(reg, BLK_INPUT_ID0 + (i << 2), 0);
+}
+
+static void compiz_enable_input(u32 __iomem *id_reg,
+ u32 __iomem *cfg_reg,
+ u32 input_hw_id,
+ struct komeda_compiz_input_cfg *cin)
+{
+ u32 ctrl = CU_INPUT_CTRL_EN;
+ u8 blend = cin->pixel_blend_mode;
+
+ if (blend == DRM_MODE_BLEND_PIXEL_NONE)
+ ctrl |= CU_INPUT_CTRL_PAD;
+ else if (blend == DRM_MODE_BLEND_PREMULTI)
+ ctrl |= CU_INPUT_CTRL_PMUL;
+
+ ctrl |= CU_INPUT_CTRL_ALPHA(cin->layer_alpha);
+
+ malidp_write32(id_reg, BLK_INPUT_ID0, input_hw_id);
+
+ malidp_write32(cfg_reg, CU_INPUT0_SIZE,
+ HV_SIZE(cin->hsize, cin->vsize));
+ malidp_write32(cfg_reg, CU_INPUT0_OFFSET,
+ HV_OFFSET(cin->hoffset, cin->voffset));
+ malidp_write32(cfg_reg, CU_INPUT0_CONTROL, ctrl);
+}
+
+static void d71_compiz_update(struct komeda_component *c,
+ struct komeda_component_state *state)
+{
+ struct komeda_compiz_state *st = to_compiz_st(state);
+ u32 __iomem *reg = c->reg;
+ u32 __iomem *id_reg, *cfg_reg;
+ u32 index, input_hw_id;
+
+ for_each_changed_input(state, index) {
+ id_reg = reg + index;
+ cfg_reg = reg + index * CU_PER_INPUT_REGS;
+ input_hw_id = to_d71_input_id(&state->inputs[index]);
+ if (state->active_inputs & BIT(index)) {
+ compiz_enable_input(id_reg, cfg_reg,
+ input_hw_id, &st->cins[index]);
+ } else {
+ malidp_write32(id_reg, BLK_INPUT_ID0, 0);
+ malidp_write32(cfg_reg, CU_INPUT0_CONTROL, 0);
+ }
+ }
+
+ malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize));
+}
+
+static void d71_compiz_dump(struct komeda_component *c, struct seq_file *sf)
+{
+ u32 v[8], i;
+
+ dump_block_header(sf, c->reg);
+
+ get_values_from_reg(c->reg, 0x80, 5, v);
+ for (i = 0; i < 5; i++)
+ seq_printf(sf, "CU_INPUT_ID%u:\t\t0x%X\n", i, v[i]);
+
+ get_values_from_reg(c->reg, 0xA0, 5, v);
+ seq_printf(sf, "CU_IRQ_RAW_STATUS:\t0x%X\n", v[0]);
+ seq_printf(sf, "CU_IRQ_CLEAR:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "CU_IRQ_MASK:\t\t0x%X\n", v[2]);
+ seq_printf(sf, "CU_IRQ_STATUS:\t\t0x%X\n", v[3]);
+ seq_printf(sf, "CU_STATUS:\t\t0x%X\n", v[4]);
+
+ get_values_from_reg(c->reg, 0xD0, 2, v);
+ seq_printf(sf, "CU_CONTROL:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "CU_SIZE:\t\t0x%X\n", v[1]);
+
+ get_values_from_reg(c->reg, 0xDC, 1, v);
+ seq_printf(sf, "CU_BG_COLOR:\t\t0x%X\n", v[0]);
+
+ for (i = 0, v[4] = 0xE0; i < 5; i++, v[4] += 0x10) {
+ get_values_from_reg(c->reg, v[4], 3, v);
+ seq_printf(sf, "CU_INPUT%u_SIZE:\t\t0x%X\n", i, v[0]);
+ seq_printf(sf, "CU_INPUT%u_OFFSET:\t0x%X\n", i, v[1]);
+ seq_printf(sf, "CU_INPUT%u_CONTROL:\t0x%X\n", i, v[2]);
+ }
+
+ get_values_from_reg(c->reg, 0x130, 2, v);
+ seq_printf(sf, "CU_USER_LOW:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "CU_USER_HIGH:\t\t0x%X\n", v[1]);
+}
+
+static struct komeda_component_funcs d71_compiz_funcs = {
+ .update = d71_compiz_update,
+ .disable = d71_component_disable,
+ .dump_register = d71_compiz_dump,
+};
+
+static int d71_compiz_init(struct d71_dev *d71,
+ struct block_header *blk, u32 __iomem *reg)
+{
+ struct komeda_component *c;
+ struct komeda_compiz *compiz;
+ u32 pipe_id, comp_id;
+
+ get_resources_id(blk->block_info, &pipe_id, &comp_id);
+
+ c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*compiz),
+ comp_id,
+ BLOCK_INFO_INPUT_ID(blk->block_info),
+ &d71_compiz_funcs,
+ CU_NUM_INPUT_IDS, get_valid_inputs(blk),
+ CU_NUM_OUTPUT_IDS, reg,
+ "CU%d", pipe_id);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+
+ compiz = to_compiz(c);
+
+ set_range(&compiz->hsize, D71_MIN_LINE_SIZE, d71->max_line_size);
+ set_range(&compiz->vsize, D71_MIN_VERTICAL_SIZE, d71->max_vsize);
+
+ return 0;
+}
+
+static void d71_improc_update(struct komeda_component *c,
+ struct komeda_component_state *state)
+{
+ struct komeda_improc_state *st = to_improc_st(state);
+ u32 __iomem *reg = c->reg;
+ u32 index, input_hw_id;
+
+ for_each_changed_input(state, index) {
+ input_hw_id = state->active_inputs & BIT(index) ?
+ to_d71_input_id(&state->inputs[index]) : 0;
+ malidp_write32(reg, BLK_INPUT_ID0 + index * 4, input_hw_id);
+ }
+
+ malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize));
+}
+
+static void d71_improc_dump(struct komeda_component *c, struct seq_file *sf)
+{
+ u32 v[12], i;
+
+ dump_block_header(sf, c->reg);
+
+ get_values_from_reg(c->reg, 0x80, 2, v);
+ seq_printf(sf, "IPS_INPUT_ID0:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "IPS_INPUT_ID1:\t\t0x%X\n", v[1]);
+
+ get_values_from_reg(c->reg, 0xC0, 1, v);
+ seq_printf(sf, "IPS_INFO:\t\t0x%X\n", v[0]);
+
+ get_values_from_reg(c->reg, 0xD0, 3, v);
+ seq_printf(sf, "IPS_CONTROL:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "IPS_SIZE:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "IPS_DEPTH:\t\t0x%X\n", v[2]);
+
+ get_values_from_reg(c->reg, 0x130, 12, v);
+ for (i = 0; i < 12; i++)
+ seq_printf(sf, "IPS_RGB_RGB_COEFF%u:\t0x%X\n", i, v[i]);
+
+ get_values_from_reg(c->reg, 0x170, 12, v);
+ for (i = 0; i < 12; i++)
+ seq_printf(sf, "IPS_RGB_YUV_COEFF%u:\t0x%X\n", i, v[i]);
+}
+
+static struct komeda_component_funcs d71_improc_funcs = {
+ .update = d71_improc_update,
+ .disable = d71_component_disable,
+ .dump_register = d71_improc_dump,
+};
+
+static int d71_improc_init(struct d71_dev *d71,
+ struct block_header *blk, u32 __iomem *reg)
+{
+ struct komeda_component *c;
+ struct komeda_improc *improc;
+ u32 pipe_id, comp_id, value;
+
+ get_resources_id(blk->block_info, &pipe_id, &comp_id);
+
+ c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*improc),
+ comp_id,
+ BLOCK_INFO_INPUT_ID(blk->block_info),
+ &d71_improc_funcs, IPS_NUM_INPUT_IDS,
+ get_valid_inputs(blk),
+ IPS_NUM_OUTPUT_IDS, reg, "DOU%d_IPS", pipe_id);
+ if (IS_ERR(c)) {
+ DRM_ERROR("Failed to add improc component\n");
+ return PTR_ERR(c);
+ }
+
+ improc = to_improc(c);
+ improc->supported_color_depths = BIT(8) | BIT(10);
+ improc->supported_color_formats = DRM_COLOR_FORMAT_RGB444 |
+ DRM_COLOR_FORMAT_YCRCB444 |
+ DRM_COLOR_FORMAT_YCRCB422;
+ value = malidp_read32(reg, BLK_INFO);
+ if (value & IPS_INFO_CHD420)
+ improc->supported_color_formats |= DRM_COLOR_FORMAT_YCRCB420;
+
+ improc->supports_csc = true;
+ improc->supports_gamma = true;
+
+ return 0;
+}
+
+static void d71_timing_ctrlr_disable(struct komeda_component *c)
+{
+ malidp_write32_mask(c->reg, BLK_CONTROL, BS_CTRL_EN, 0);
+}
+
+static void d71_timing_ctrlr_update(struct komeda_component *c,
+ struct komeda_component_state *state)
+{
+ struct drm_crtc_state *crtc_st = state->crtc->state;
+ u32 __iomem *reg = c->reg;
+ struct videomode vm;
+ u32 value;
+
+ drm_display_mode_to_videomode(&crtc_st->adjusted_mode, &vm);
+
+ malidp_write32(reg, BS_ACTIVESIZE, HV_SIZE(vm.hactive, vm.vactive));
+ malidp_write32(reg, BS_HINTERVALS, BS_H_INTVALS(vm.hfront_porch,
+ vm.hback_porch));
+ malidp_write32(reg, BS_VINTERVALS, BS_V_INTVALS(vm.vfront_porch,
+ vm.vback_porch));
+
+ value = BS_SYNC_VSW(vm.vsync_len) | BS_SYNC_HSW(vm.hsync_len);
+ value |= vm.flags & DISPLAY_FLAGS_VSYNC_HIGH ? BS_SYNC_VSP : 0;
+ value |= vm.flags & DISPLAY_FLAGS_HSYNC_HIGH ? BS_SYNC_HSP : 0;
+ malidp_write32(reg, BS_SYNC, value);
+
+ malidp_write32(reg, BS_PROG_LINE, D71_DEFAULT_PREPRETCH_LINE - 1);
+ malidp_write32(reg, BS_PREFETCH_LINE, D71_DEFAULT_PREPRETCH_LINE);
+
+ /* configure bs control register */
+ value = BS_CTRL_EN | BS_CTRL_VM;
+
+ malidp_write32(reg, BLK_CONTROL, value);
+}
+
+static void d71_timing_ctrlr_dump(struct komeda_component *c,
+ struct seq_file *sf)
+{
+ u32 v[8], i;
+
+ dump_block_header(sf, c->reg);
+
+ get_values_from_reg(c->reg, 0xC0, 1, v);
+ seq_printf(sf, "BS_INFO:\t\t0x%X\n", v[0]);
+
+ get_values_from_reg(c->reg, 0xD0, 8, v);
+ seq_printf(sf, "BS_CONTROL:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "BS_PROG_LINE:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "BS_PREFETCH_LINE:\t0x%X\n", v[2]);
+ seq_printf(sf, "BS_BG_COLOR:\t\t0x%X\n", v[3]);
+ seq_printf(sf, "BS_ACTIVESIZE:\t\t0x%X\n", v[4]);
+ seq_printf(sf, "BS_HINTERVALS:\t\t0x%X\n", v[5]);
+ seq_printf(sf, "BS_VINTERVALS:\t\t0x%X\n", v[6]);
+ seq_printf(sf, "BS_SYNC:\t\t0x%X\n", v[7]);
+
+ get_values_from_reg(c->reg, 0x100, 3, v);
+ seq_printf(sf, "BS_DRIFT_TO:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "BS_FRAME_TO:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "BS_TE_TO:\t\t0x%X\n", v[2]);
+
+ get_values_from_reg(c->reg, 0x110, 3, v);
+ for (i = 0; i < 3; i++)
+ seq_printf(sf, "BS_T%u_INTERVAL:\t\t0x%X\n", i, v[i]);
+
+ get_values_from_reg(c->reg, 0x120, 5, v);
+ for (i = 0; i < 2; i++) {
+ seq_printf(sf, "BS_CRC%u_LOW:\t\t0x%X\n", i, v[i << 1]);
+ seq_printf(sf, "BS_CRC%u_HIGH:\t\t0x%X\n", i, v[(i << 1) + 1]);
+ }
+ seq_printf(sf, "BS_USER:\t\t0x%X\n", v[4]);
+}
+
+static struct komeda_component_funcs d71_timing_ctrlr_funcs = {
+ .update = d71_timing_ctrlr_update,
+ .disable = d71_timing_ctrlr_disable,
+ .dump_register = d71_timing_ctrlr_dump,
+};
+
+static int d71_timing_ctrlr_init(struct d71_dev *d71,
+ struct block_header *blk, u32 __iomem *reg)
+{
+ struct komeda_component *c;
+ struct komeda_timing_ctrlr *ctrlr;
+ u32 pipe_id, comp_id;
+
+ get_resources_id(blk->block_info, &pipe_id, &comp_id);
+
+ c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*ctrlr),
+ KOMEDA_COMPONENT_TIMING_CTRLR,
+ BLOCK_INFO_INPUT_ID(blk->block_info),
+ &d71_timing_ctrlr_funcs,
+ 1, BIT(KOMEDA_COMPONENT_IPS0 + pipe_id),
+ BS_NUM_OUTPUT_IDS, reg, "DOU%d_BS", pipe_id);
+ if (IS_ERR(c)) {
+ DRM_ERROR("Failed to add display_ctrl component\n");
+ return PTR_ERR(c);
+ }
+
+ ctrlr = to_ctrlr(c);
+
+ ctrlr->supports_dual_link = true;
+
+ return 0;
+}
+
+int d71_probe_block(struct d71_dev *d71,
+ struct block_header *blk, u32 __iomem *reg)
+{
+ struct d71_pipeline *pipe;
+ int blk_id = BLOCK_INFO_BLK_ID(blk->block_info);
+
+ int err = 0;
+
+ switch (BLOCK_INFO_BLK_TYPE(blk->block_info)) {
+ case D71_BLK_TYPE_GCU:
+ break;
+
+ case D71_BLK_TYPE_LPU:
+ pipe = d71->pipes[blk_id];
+ pipe->lpu_addr = reg;
+ break;
+
+ case D71_BLK_TYPE_LPU_LAYER:
+ err = d71_layer_init(d71, blk, reg);
+ break;
+
+ case D71_BLK_TYPE_LPU_WB_LAYER:
+ err = d71_wb_layer_init(d71, blk, reg);
+ break;
+
+ case D71_BLK_TYPE_CU:
+ pipe = d71->pipes[blk_id];
+ pipe->cu_addr = reg;
+ err = d71_compiz_init(d71, blk, reg);
+ break;
+
+ case D71_BLK_TYPE_CU_SPLITTER:
+ case D71_BLK_TYPE_CU_SCALER:
+ case D71_BLK_TYPE_CU_MERGER:
+ break;
+
+ case D71_BLK_TYPE_DOU:
+ pipe = d71->pipes[blk_id];
+ pipe->dou_addr = reg;
+ break;
+
+ case D71_BLK_TYPE_DOU_IPS:
+ err = d71_improc_init(d71, blk, reg);
+ break;
+
+ case D71_BLK_TYPE_DOU_FT_COEFF:
+ pipe = d71->pipes[blk_id];
+ pipe->dou_ft_coeff_addr = reg;
+ break;
+
+ case D71_BLK_TYPE_DOU_BS:
+ err = d71_timing_ctrlr_init(d71, blk, reg);
+ break;
+
+ case D71_BLK_TYPE_GLB_LT_COEFF:
+ break;
+
+ case D71_BLK_TYPE_GLB_SCL_COEFF:
+ d71->glb_scl_coeff_addr[blk_id] = reg;
+ break;
+
+ default:
+ DRM_ERROR("Unknown block (block_info: 0x%x) is found\n",
+ blk->block_info);
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
index edbf9daa1545..34506ef7ad40 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
@@ -4,13 +4,425 @@
* Author: James.Qian.Wang <james.qian.wang@arm.com>
*
*/
+
+#include <drm/drm_print.h>
+#include "d71_dev.h"
#include "malidp_io.h"
-#include "komeda_dev.h"
+
+static u64 get_lpu_event(struct d71_pipeline *d71_pipeline)
+{
+ u32 __iomem *reg = d71_pipeline->lpu_addr;
+ u32 status, raw_status;
+ u64 evts = 0ULL;
+
+ raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
+ if (raw_status & LPU_IRQ_IBSY)
+ evts |= KOMEDA_EVENT_IBSY;
+ if (raw_status & LPU_IRQ_EOW)
+ evts |= KOMEDA_EVENT_EOW;
+
+ if (raw_status & (LPU_IRQ_ERR | LPU_IRQ_IBSY)) {
+ u32 restore = 0, tbu_status;
+ /* Check error of LPU status */
+ status = malidp_read32(reg, BLK_STATUS);
+ if (status & LPU_STATUS_AXIE) {
+ restore |= LPU_STATUS_AXIE;
+ evts |= KOMEDA_ERR_AXIE;
+ }
+ if (status & LPU_STATUS_ACE0) {
+ restore |= LPU_STATUS_ACE0;
+ evts |= KOMEDA_ERR_ACE0;
+ }
+ if (status & LPU_STATUS_ACE1) {
+ restore |= LPU_STATUS_ACE1;
+ evts |= KOMEDA_ERR_ACE1;
+ }
+ if (status & LPU_STATUS_ACE2) {
+ restore |= LPU_STATUS_ACE2;
+ evts |= KOMEDA_ERR_ACE2;
+ }
+ if (status & LPU_STATUS_ACE3) {
+ restore |= LPU_STATUS_ACE3;
+ evts |= KOMEDA_ERR_ACE3;
+ }
+ if (restore != 0)
+ malidp_write32_mask(reg, BLK_STATUS, restore, 0);
+
+ restore = 0;
+ /* Check errors of TBU status */
+ tbu_status = malidp_read32(reg, LPU_TBU_STATUS);
+ if (tbu_status & LPU_TBU_STATUS_TCF) {
+ restore |= LPU_TBU_STATUS_TCF;
+ evts |= KOMEDA_ERR_TCF;
+ }
+ if (tbu_status & LPU_TBU_STATUS_TTNG) {
+ restore |= LPU_TBU_STATUS_TTNG;
+ evts |= KOMEDA_ERR_TTNG;
+ }
+ if (tbu_status & LPU_TBU_STATUS_TITR) {
+ restore |= LPU_TBU_STATUS_TITR;
+ evts |= KOMEDA_ERR_TITR;
+ }
+ if (tbu_status & LPU_TBU_STATUS_TEMR) {
+ restore |= LPU_TBU_STATUS_TEMR;
+ evts |= KOMEDA_ERR_TEMR;
+ }
+ if (tbu_status & LPU_TBU_STATUS_TTF) {
+ restore |= LPU_TBU_STATUS_TTF;
+ evts |= KOMEDA_ERR_TTF;
+ }
+ if (restore != 0)
+ malidp_write32_mask(reg, LPU_TBU_STATUS, restore, 0);
+ }
+
+ malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
+ return evts;
+}
+
+static u64 get_cu_event(struct d71_pipeline *d71_pipeline)
+{
+ u32 __iomem *reg = d71_pipeline->cu_addr;
+ u32 status, raw_status;
+ u64 evts = 0ULL;
+
+ raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
+ if (raw_status & CU_IRQ_OVR)
+ evts |= KOMEDA_EVENT_OVR;
+
+ if (raw_status & (CU_IRQ_ERR | CU_IRQ_OVR)) {
+ status = malidp_read32(reg, BLK_STATUS) & 0x7FFFFFFF;
+ if (status & CU_STATUS_CPE)
+ evts |= KOMEDA_ERR_CPE;
+ if (status & CU_STATUS_ZME)
+ evts |= KOMEDA_ERR_ZME;
+ if (status & CU_STATUS_CFGE)
+ evts |= KOMEDA_ERR_CFGE;
+ if (status)
+ malidp_write32_mask(reg, BLK_STATUS, status, 0);
+ }
+
+ malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
+
+ return evts;
+}
+
+static u64 get_dou_event(struct d71_pipeline *d71_pipeline)
+{
+ u32 __iomem *reg = d71_pipeline->dou_addr;
+ u32 status, raw_status;
+ u64 evts = 0ULL;
+
+ raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
+ if (raw_status & DOU_IRQ_PL0)
+ evts |= KOMEDA_EVENT_VSYNC;
+ if (raw_status & DOU_IRQ_UND)
+ evts |= KOMEDA_EVENT_URUN;
+
+ if (raw_status & (DOU_IRQ_ERR | DOU_IRQ_UND)) {
+ u32 restore = 0;
+
+ status = malidp_read32(reg, BLK_STATUS);
+ if (status & DOU_STATUS_DRIFTTO) {
+ restore |= DOU_STATUS_DRIFTTO;
+ evts |= KOMEDA_ERR_DRIFTTO;
+ }
+ if (status & DOU_STATUS_FRAMETO) {
+ restore |= DOU_STATUS_FRAMETO;
+ evts |= KOMEDA_ERR_FRAMETO;
+ }
+ if (status & DOU_STATUS_TETO) {
+ restore |= DOU_STATUS_TETO;
+ evts |= KOMEDA_ERR_TETO;
+ }
+ if (status & DOU_STATUS_CSCE) {
+ restore |= DOU_STATUS_CSCE;
+ evts |= KOMEDA_ERR_CSCE;
+ }
+
+ if (restore != 0)
+ malidp_write32_mask(reg, BLK_STATUS, restore, 0);
+ }
+
+ malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
+ return evts;
+}
+
+static u64 get_pipeline_event(struct d71_pipeline *d71_pipeline, u32 gcu_status)
+{
+ u32 evts = 0ULL;
+
+ if (gcu_status & (GLB_IRQ_STATUS_LPU0 | GLB_IRQ_STATUS_LPU1))
+ evts |= get_lpu_event(d71_pipeline);
+
+ if (gcu_status & (GLB_IRQ_STATUS_CU0 | GLB_IRQ_STATUS_CU1))
+ evts |= get_cu_event(d71_pipeline);
+
+ if (gcu_status & (GLB_IRQ_STATUS_DOU0 | GLB_IRQ_STATUS_DOU1))
+ evts |= get_dou_event(d71_pipeline);
+
+ return evts;
+}
+
+static irqreturn_t
+d71_irq_handler(struct komeda_dev *mdev, struct komeda_events *evts)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+ u32 status, gcu_status, raw_status;
+
+ gcu_status = malidp_read32(d71->gcu_addr, GLB_IRQ_STATUS);
+
+ if (gcu_status & GLB_IRQ_STATUS_GCU) {
+ raw_status = malidp_read32(d71->gcu_addr, BLK_IRQ_RAW_STATUS);
+ if (raw_status & GCU_IRQ_CVAL0)
+ evts->pipes[0] |= KOMEDA_EVENT_FLIP;
+ if (raw_status & GCU_IRQ_CVAL1)
+ evts->pipes[1] |= KOMEDA_EVENT_FLIP;
+ if (raw_status & GCU_IRQ_ERR) {
+ status = malidp_read32(d71->gcu_addr, BLK_STATUS);
+ if (status & GCU_STATUS_MERR) {
+ evts->global |= KOMEDA_ERR_MERR;
+ malidp_write32_mask(d71->gcu_addr, BLK_STATUS,
+ GCU_STATUS_MERR, 0);
+ }
+ }
+
+ malidp_write32(d71->gcu_addr, BLK_IRQ_CLEAR, raw_status);
+ }
+
+ if (gcu_status & GLB_IRQ_STATUS_PIPE0)
+ evts->pipes[0] |= get_pipeline_event(d71->pipes[0], gcu_status);
+
+ if (gcu_status & GLB_IRQ_STATUS_PIPE1)
+ evts->pipes[1] |= get_pipeline_event(d71->pipes[1], gcu_status);
+
+ return gcu_status ? IRQ_HANDLED : IRQ_NONE;
+}
+
+#define ENABLED_GCU_IRQS (GCU_IRQ_CVAL0 | GCU_IRQ_CVAL1 | \
+ GCU_IRQ_MODE | GCU_IRQ_ERR)
+#define ENABLED_LPU_IRQS (LPU_IRQ_IBSY | LPU_IRQ_ERR | LPU_IRQ_EOW)
+#define ENABLED_CU_IRQS (CU_IRQ_OVR | CU_IRQ_ERR)
+#define ENABLED_DOU_IRQS (DOU_IRQ_UND | DOU_IRQ_ERR)
+
+static int d71_enable_irq(struct komeda_dev *mdev)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+ struct d71_pipeline *pipe;
+ u32 i;
+
+ malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK,
+ ENABLED_GCU_IRQS, ENABLED_GCU_IRQS);
+ for (i = 0; i < d71->num_pipelines; i++) {
+ pipe = d71->pipes[i];
+ malidp_write32_mask(pipe->cu_addr, BLK_IRQ_MASK,
+ ENABLED_CU_IRQS, ENABLED_CU_IRQS);
+ malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK,
+ ENABLED_LPU_IRQS, ENABLED_LPU_IRQS);
+ malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
+ ENABLED_DOU_IRQS, ENABLED_DOU_IRQS);
+ }
+ return 0;
+}
+
+static int d71_disable_irq(struct komeda_dev *mdev)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+ struct d71_pipeline *pipe;
+ u32 i;
+
+ malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK, ENABLED_GCU_IRQS, 0);
+ for (i = 0; i < d71->num_pipelines; i++) {
+ pipe = d71->pipes[i];
+ malidp_write32_mask(pipe->cu_addr, BLK_IRQ_MASK,
+ ENABLED_CU_IRQS, 0);
+ malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK,
+ ENABLED_LPU_IRQS, 0);
+ malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
+ ENABLED_DOU_IRQS, 0);
+ }
+ return 0;
+}
+
+static void d71_on_off_vblank(struct komeda_dev *mdev, int master_pipe, bool on)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+ struct d71_pipeline *pipe = d71->pipes[master_pipe];
+
+ malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
+ DOU_IRQ_PL0, on ? DOU_IRQ_PL0 : 0);
+}
+
+static int to_d71_opmode(int core_mode)
+{
+ switch (core_mode) {
+ case KOMEDA_MODE_DISP0:
+ return DO0_ACTIVE_MODE;
+ case KOMEDA_MODE_DISP1:
+ return DO1_ACTIVE_MODE;
+ case KOMEDA_MODE_DUAL_DISP:
+ return DO01_ACTIVE_MODE;
+ case KOMEDA_MODE_INACTIVE:
+ return INACTIVE_MODE;
+ default:
+ WARN(1, "Unknown operation mode");
+ return INACTIVE_MODE;
+ }
+}
+
+static int d71_change_opmode(struct komeda_dev *mdev, int new_mode)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+ u32 opmode = to_d71_opmode(new_mode);
+ int ret;
+
+ malidp_write32_mask(d71->gcu_addr, BLK_CONTROL, 0x7, opmode);
+
+ ret = dp_wait_cond(((malidp_read32(d71->gcu_addr, BLK_CONTROL) & 0x7) == opmode),
+ 100, 1000, 10000);
+
+ return ret > 0 ? 0 : -ETIMEDOUT;
+}
+
+static void d71_flush(struct komeda_dev *mdev,
+ int master_pipe, u32 active_pipes)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+ u32 reg_offset = (master_pipe == 0) ?
+ GCU_CONFIG_VALID0 : GCU_CONFIG_VALID1;
+
+ malidp_write32(d71->gcu_addr, reg_offset, GCU_CONFIG_CVAL);
+}
+
+static int d71_reset(struct d71_dev *d71)
+{
+ u32 __iomem *gcu = d71->gcu_addr;
+ int ret;
+
+ malidp_write32_mask(gcu, BLK_CONTROL,
+ GCU_CONTROL_SRST, GCU_CONTROL_SRST);
+
+ ret = dp_wait_cond(!(malidp_read32(gcu, BLK_CONTROL) & GCU_CONTROL_SRST),
+ 100, 1000, 10000);
+
+ return ret > 0 ? 0 : -ETIMEDOUT;
+}
+
+void d71_read_block_header(u32 __iomem *reg, struct block_header *blk)
+{
+ int i;
+
+ blk->block_info = malidp_read32(reg, BLK_BLOCK_INFO);
+ if (BLOCK_INFO_BLK_TYPE(blk->block_info) == D71_BLK_TYPE_RESERVED)
+ return;
+
+ blk->pipeline_info = malidp_read32(reg, BLK_PIPELINE_INFO);
+
+ /* get valid input and output ids */
+ for (i = 0; i < PIPELINE_INFO_N_VALID_INPUTS(blk->pipeline_info); i++)
+ blk->input_ids[i] = malidp_read32(reg + i, BLK_VALID_INPUT_ID0);
+ for (i = 0; i < PIPELINE_INFO_N_OUTPUTS(blk->pipeline_info); i++)
+ blk->output_ids[i] = malidp_read32(reg + i, BLK_OUTPUT_ID0);
+}
+
+static void d71_cleanup(struct komeda_dev *mdev)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+
+ if (!d71)
+ return;
+
+ devm_kfree(mdev->dev, d71);
+ mdev->chip_data = NULL;
+}
static int d71_enum_resources(struct komeda_dev *mdev)
{
- /* TODO add enum resources */
- return -1;
+ struct d71_dev *d71;
+ struct komeda_pipeline *pipe;
+ struct block_header blk;
+ u32 __iomem *blk_base;
+ u32 i, value, offset;
+ int err;
+
+ d71 = devm_kzalloc(mdev->dev, sizeof(*d71), GFP_KERNEL);
+ if (!d71)
+ return -ENOMEM;
+
+ mdev->chip_data = d71;
+ d71->mdev = mdev;
+ d71->gcu_addr = mdev->reg_base;
+ d71->periph_addr = mdev->reg_base + (D71_BLOCK_OFFSET_PERIPH >> 2);
+
+ err = d71_reset(d71);
+ if (err) {
+ DRM_ERROR("Fail to reset d71 device.\n");
+ goto err_cleanup;
+ }
+
+ /* probe GCU */
+ value = malidp_read32(d71->gcu_addr, GLB_CORE_INFO);
+ d71->num_blocks = value & 0xFF;
+ d71->num_pipelines = (value >> 8) & 0x7;
+
+ if (d71->num_pipelines > D71_MAX_PIPELINE) {
+ DRM_ERROR("d71 supports %d pipelines, but got: %d.\n",
+ D71_MAX_PIPELINE, d71->num_pipelines);
+ err = -EINVAL;
+ goto err_cleanup;
+ }
+
+ /* probe PERIPH */
+ value = malidp_read32(d71->periph_addr, BLK_BLOCK_INFO);
+ if (BLOCK_INFO_BLK_TYPE(value) != D71_BLK_TYPE_PERIPH) {
+ DRM_ERROR("access blk periph but got blk: %d.\n",
+ BLOCK_INFO_BLK_TYPE(value));
+ err = -EINVAL;
+ goto err_cleanup;
+ }
+
+ value = malidp_read32(d71->periph_addr, PERIPH_CONFIGURATION_ID);
+
+ d71->max_line_size = value & PERIPH_MAX_LINE_SIZE ? 4096 : 2048;
+ d71->max_vsize = 4096;
+ d71->num_rich_layers = value & PERIPH_NUM_RICH_LAYERS ? 2 : 1;
+ d71->supports_dual_link = value & PERIPH_SPLIT_EN ? true : false;
+ d71->integrates_tbu = value & PERIPH_TBU_EN ? true : false;
+
+ for (i = 0; i < d71->num_pipelines; i++) {
+ pipe = komeda_pipeline_add(mdev, sizeof(struct d71_pipeline),
+ NULL);
+ if (IS_ERR(pipe)) {
+ err = PTR_ERR(pipe);
+ goto err_cleanup;
+ }
+ d71->pipes[i] = to_d71_pipeline(pipe);
+ }
+
+ /* loop the register blks and probe */
+ i = 2; /* exclude GCU and PERIPH */
+ offset = D71_BLOCK_SIZE; /* skip GCU */
+ while (i < d71->num_blocks) {
+ blk_base = mdev->reg_base + (offset >> 2);
+
+ d71_read_block_header(blk_base, &blk);
+ if (BLOCK_INFO_BLK_TYPE(blk.block_info) != D71_BLK_TYPE_RESERVED) {
+ err = d71_probe_block(d71, &blk, blk_base);
+ if (err)
+ goto err_cleanup;
+ i++;
+ }
+
+ offset += D71_BLOCK_SIZE;
+ }
+
+ DRM_DEBUG("total %d (out of %d) blocks are found.\n",
+ i, d71->num_blocks);
+
+ return 0;
+
+err_cleanup:
+ d71_cleanup(mdev);
+ return err;
}
#define __HW_ID(__group, __format) \
@@ -93,19 +505,22 @@ static void d71_init_fmt_tbl(struct komeda_dev *mdev)
static struct komeda_dev_funcs d71_chip_funcs = {
.init_format_table = d71_init_fmt_tbl,
.enum_resources = d71_enum_resources,
- .cleanup = NULL,
+ .cleanup = d71_cleanup,
+ .irq_handler = d71_irq_handler,
+ .enable_irq = d71_enable_irq,
+ .disable_irq = d71_disable_irq,
+ .on_off_vblank = d71_on_off_vblank,
+ .change_opmode = d71_change_opmode,
+ .flush = d71_flush,
};
-#define GLB_ARCH_ID 0x000
-#define GLB_CORE_ID 0x004
-#define GLB_CORE_INFO 0x008
-
struct komeda_dev_funcs *
d71_identify(u32 __iomem *reg_base, struct komeda_chip_info *chip)
{
chip->arch_id = malidp_read32(reg_base, GLB_ARCH_ID);
chip->core_id = malidp_read32(reg_base, GLB_CORE_ID);
chip->core_info = malidp_read32(reg_base, GLB_CORE_INFO);
+ chip->bus_width = D71_BUS_WIDTH_16_BYTES;
return &d71_chip_funcs;
}
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h
new file mode 100644
index 000000000000..7465c57d9774
--- /dev/null
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#ifndef _D71_DEV_H_
+#define _D71_DEV_H_
+
+#include "komeda_dev.h"
+#include "komeda_pipeline.h"
+#include "d71_regs.h"
+
+struct d71_pipeline {
+ struct komeda_pipeline base;
+
+ /* d71 private pipeline blocks */
+ u32 __iomem *lpu_addr;
+ u32 __iomem *cu_addr;
+ u32 __iomem *dou_addr;
+ u32 __iomem *dou_ft_coeff_addr; /* forward transform coeffs table */
+};
+
+struct d71_dev {
+ struct komeda_dev *mdev;
+
+ int num_blocks;
+ int num_pipelines;
+ int num_rich_layers;
+ u32 max_line_size;
+ u32 max_vsize;
+ u32 supports_dual_link : 1;
+ u32 integrates_tbu : 1;
+
+ /* global register blocks */
+ u32 __iomem *gcu_addr;
+ /* scaling coeffs table */
+ u32 __iomem *glb_scl_coeff_addr[D71_MAX_GLB_SCL_COEFF];
+ u32 __iomem *periph_addr;
+
+ struct d71_pipeline *pipes[D71_MAX_PIPELINE];
+};
+
+#define to_d71_pipeline(x) container_of(x, struct d71_pipeline, base)
+
+int d71_probe_block(struct d71_dev *d71,
+ struct block_header *blk, u32 __iomem *reg);
+void d71_read_block_header(u32 __iomem *reg, struct block_header *blk);
+
+#endif /* !_D71_DEV_H_ */
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
new file mode 100644
index 000000000000..2d5e6d00b42c
--- /dev/null
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
@@ -0,0 +1,530 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#ifndef _D71_REG_H_
+#define _D71_REG_H_
+
+/* Common block registers offset */
+#define BLK_BLOCK_INFO 0x000
+#define BLK_PIPELINE_INFO 0x004
+#define BLK_VALID_INPUT_ID0 0x020
+#define BLK_OUTPUT_ID0 0x060
+#define BLK_INPUT_ID0 0x080
+#define BLK_IRQ_RAW_STATUS 0x0A0
+#define BLK_IRQ_CLEAR 0x0A4
+#define BLK_IRQ_MASK 0x0A8
+#define BLK_IRQ_STATUS 0x0AC
+#define BLK_STATUS 0x0B0
+#define BLK_INFO 0x0C0
+#define BLK_CONTROL 0x0D0
+#define BLK_SIZE 0x0D4
+#define BLK_IN_SIZE 0x0E0
+
+#define BLK_P0_PTR_LOW 0x100
+#define BLK_P0_PTR_HIGH 0x104
+#define BLK_P0_STRIDE 0x108
+#define BLK_P1_PTR_LOW 0x110
+#define BLK_P1_PTR_HIGH 0x114
+#define BLK_P1_STRIDE 0x118
+#define BLK_P2_PTR_LOW 0x120
+#define BLK_P2_PTR_HIGH 0x124
+
+#define BLOCK_INFO_N_SUBBLKS(x) ((x) & 0x000F)
+#define BLOCK_INFO_BLK_ID(x) (((x) & 0x00F0) >> 4)
+#define BLOCK_INFO_BLK_TYPE(x) (((x) & 0xFF00) >> 8)
+#define BLOCK_INFO_INPUT_ID(x) ((x) & 0xFFF0)
+#define BLOCK_INFO_TYPE_ID(x) (((x) & 0x0FF0) >> 4)
+
+#define PIPELINE_INFO_N_OUTPUTS(x) ((x) & 0x000F)
+#define PIPELINE_INFO_N_VALID_INPUTS(x) (((x) & 0x0F00) >> 8)
+
+/* Common block control register bits */
+#define BLK_CTRL_EN BIT(0)
+/* Common size macro */
+#define HV_SIZE(h, v) (((h) & 0x1FFF) + (((v) & 0x1FFF) << 16))
+#define HV_OFFSET(h, v) (((h) & 0xFFF) + (((v) & 0xFFF) << 16))
+#define HV_CROP(h, v) (((h) & 0xFFF) + (((v) & 0xFFF) << 16))
+
+/* AD_CONTROL register */
+#define AD_CONTROL 0x160
+
+/* AD_CONTROL register bits */
+#define AD_AEN BIT(0)
+#define AD_YT BIT(1)
+#define AD_BS BIT(2)
+#define AD_WB BIT(3)
+#define AD_TH BIT(4)
+
+/* Global Control Unit */
+#define GLB_ARCH_ID 0x000
+#define GLB_CORE_ID 0x004
+#define GLB_CORE_INFO 0x008
+#define GLB_IRQ_STATUS 0x010
+
+#define GCU_CONFIG_VALID0 0x0D4
+#define GCU_CONFIG_VALID1 0x0D8
+
+/* GCU_CONTROL_BITS */
+#define GCU_CONTROL_MODE(x) ((x) & 0x7)
+#define GCU_CONTROL_SRST BIT(16)
+
+/* GCU opmode */
+#define INACTIVE_MODE 0
+#define TBU_CONNECT_MODE 1
+#define TBU_DISCONNECT_MODE 2
+#define DO0_ACTIVE_MODE 3
+#define DO1_ACTIVE_MODE 4
+#define DO01_ACTIVE_MODE 5
+
+/* GLB_IRQ_STATUS bits */
+#define GLB_IRQ_STATUS_GCU BIT(0)
+#define GLB_IRQ_STATUS_LPU0 BIT(8)
+#define GLB_IRQ_STATUS_LPU1 BIT(9)
+#define GLB_IRQ_STATUS_ATU0 BIT(10)
+#define GLB_IRQ_STATUS_ATU1 BIT(11)
+#define GLB_IRQ_STATUS_ATU2 BIT(12)
+#define GLB_IRQ_STATUS_ATU3 BIT(13)
+#define GLB_IRQ_STATUS_CU0 BIT(16)
+#define GLB_IRQ_STATUS_CU1 BIT(17)
+#define GLB_IRQ_STATUS_DOU0 BIT(24)
+#define GLB_IRQ_STATUS_DOU1 BIT(25)
+
+#define GLB_IRQ_STATUS_PIPE0 (GLB_IRQ_STATUS_LPU0 |\
+ GLB_IRQ_STATUS_ATU0 |\
+ GLB_IRQ_STATUS_ATU1 |\
+ GLB_IRQ_STATUS_CU0 |\
+ GLB_IRQ_STATUS_DOU0)
+
+#define GLB_IRQ_STATUS_PIPE1 (GLB_IRQ_STATUS_LPU1 |\
+ GLB_IRQ_STATUS_ATU2 |\
+ GLB_IRQ_STATUS_ATU3 |\
+ GLB_IRQ_STATUS_CU1 |\
+ GLB_IRQ_STATUS_DOU1)
+
+#define GLB_IRQ_STATUS_ATU (GLB_IRQ_STATUS_ATU0 |\
+ GLB_IRQ_STATUS_ATU1 |\
+ GLB_IRQ_STATUS_ATU2 |\
+ GLB_IRQ_STATUS_ATU3)
+
+/* GCU_IRQ_BITS */
+#define GCU_IRQ_CVAL0 BIT(0)
+#define GCU_IRQ_CVAL1 BIT(1)
+#define GCU_IRQ_MODE BIT(4)
+#define GCU_IRQ_ERR BIT(11)
+
+/* GCU_STATUS_BITS */
+#define GCU_STATUS_MODE(x) ((x) & 0x7)
+#define GCU_STATUS_MERR BIT(4)
+#define GCU_STATUS_TCS0 BIT(8)
+#define GCU_STATUS_TCS1 BIT(9)
+#define GCU_STATUS_ACTIVE BIT(31)
+
+/* GCU_CONFIG_VALIDx BITS */
+#define GCU_CONFIG_CVAL BIT(0)
+
+/* PERIPHERAL registers */
+#define PERIPH_MAX_LINE_SIZE BIT(0)
+#define PERIPH_NUM_RICH_LAYERS BIT(4)
+#define PERIPH_SPLIT_EN BIT(8)
+#define PERIPH_TBU_EN BIT(12)
+#define PERIPH_AFBC_DMA_EN BIT(16)
+#define PERIPH_CONFIGURATION_ID 0x1D4
+
+/* LPU register */
+#define LPU_TBU_STATUS 0x0B4
+#define LPU_RAXI_CONTROL 0x0D0
+#define LPU_WAXI_CONTROL 0x0D4
+#define LPU_TBU_CONTROL 0x0D8
+
+/* LPU_xAXI_CONTROL_BITS */
+#define TO_RAXI_AOUTSTDCAPB(x) (x)
+#define TO_RAXI_BOUTSTDCAPB(x) ((x) << 8)
+#define TO_RAXI_BEN(x) ((x) << 15)
+#define TO_xAXI_BURSTLEN(x) ((x) << 16)
+#define TO_xAXI_AxQOS(x) ((x) << 24)
+#define TO_xAXI_ORD(x) ((x) << 31)
+#define TO_WAXI_OUTSTDCAPB(x) (x)
+
+#define RAXI_AOUTSTDCAPB_MASK 0x7F
+#define RAXI_BOUTSTDCAPB_MASK 0x7F00
+#define RAXI_BEN_MASK BIT(15)
+#define xAXI_BURSTLEN_MASK 0x3F0000
+#define xAXI_AxQOS_MASK 0xF000000
+#define xAXI_ORD_MASK BIT(31)
+#define WAXI_OUTSTDCAPB_MASK 0x3F
+
+/* LPU_TBU_CONTROL BITS */
+#define TO_TBU_DOUTSTDCAPB(x) (x)
+#define TBU_DOUTSTDCAPB_MASK 0x3F
+
+/* LPU_IRQ_BITS */
+#define LPU_IRQ_IBSY BIT(10)
+#define LPU_IRQ_ERR BIT(11)
+#define LPU_IRQ_EOW BIT(12)
+#define LPU_IRQ_PL0 BIT(13)
+
+/* LPU_STATUS_BITS */
+#define LPU_STATUS_AXIED(x) ((x) & 0xF)
+#define LPU_STATUS_AXIE BIT(4)
+#define LPU_STATUS_AXIRP BIT(5)
+#define LPU_STATUS_AXIWP BIT(6)
+#define LPU_STATUS_ACE0 BIT(16)
+#define LPU_STATUS_ACE1 BIT(17)
+#define LPU_STATUS_ACE2 BIT(18)
+#define LPU_STATUS_ACE3 BIT(19)
+#define LPU_STATUS_ACTIVE BIT(31)
+
+#define AXIEID_MASK 0xF
+#define AXIE_MASK LPU_STATUS_AXIE
+#define AXIRP_MASK LPU_STATUS_AXIRP
+#define AXIWP_MASK LPU_STATUS_AXIWP
+
+#define FROM_AXIEID(reg) ((reg) & AXIEID_MASK)
+#define TO_AXIE(x) ((x) << 4)
+#define FROM_AXIRP(reg) (((reg) & AXIRP_MASK) >> 5)
+#define FROM_AXIWP(reg) (((reg) & AXIWP_MASK) >> 6)
+
+/* LPU_TBU_STATUS_BITS */
+#define LPU_TBU_STATUS_TCF BIT(1)
+#define LPU_TBU_STATUS_TTNG BIT(2)
+#define LPU_TBU_STATUS_TITR BIT(8)
+#define LPU_TBU_STATUS_TEMR BIT(16)
+#define LPU_TBU_STATUS_TTF BIT(31)
+
+/* LPU_TBU_CONTROL BITS */
+#define LPU_TBU_CTRL_TLBPEN BIT(16)
+
+/* CROSSBAR CONTROL BITS */
+#define CBU_INPUT_CTRL_EN BIT(0)
+#define CBU_NUM_INPUT_IDS 5
+#define CBU_NUM_OUTPUT_IDS 5
+
+/* CU register */
+#define CU_BG_COLOR 0x0DC
+#define CU_INPUT0_SIZE 0x0E0
+#define CU_INPUT0_OFFSET 0x0E4
+#define CU_INPUT0_CONTROL 0x0E8
+#define CU_INPUT1_SIZE 0x0F0
+#define CU_INPUT1_OFFSET 0x0F4
+#define CU_INPUT1_CONTROL 0x0F8
+#define CU_INPUT2_SIZE 0x100
+#define CU_INPUT2_OFFSET 0x104
+#define CU_INPUT2_CONTROL 0x108
+#define CU_INPUT3_SIZE 0x110
+#define CU_INPUT3_OFFSET 0x114
+#define CU_INPUT3_CONTROL 0x118
+#define CU_INPUT4_SIZE 0x120
+#define CU_INPUT4_OFFSET 0x124
+#define CU_INPUT4_CONTROL 0x128
+
+#define CU_PER_INPUT_REGS 4
+
+#define CU_NUM_INPUT_IDS 5
+#define CU_NUM_OUTPUT_IDS 1
+
+/* CU control register bits */
+#define CU_CTRL_COPROC BIT(0)
+
+/* CU_IRQ_BITS */
+#define CU_IRQ_OVR BIT(9)
+#define CU_IRQ_ERR BIT(11)
+
+/* CU_STATUS_BITS */
+#define CU_STATUS_CPE BIT(0)
+#define CU_STATUS_ZME BIT(1)
+#define CU_STATUS_CFGE BIT(2)
+#define CU_STATUS_ACTIVE BIT(31)
+
+/* CU input control register bits */
+#define CU_INPUT_CTRL_EN BIT(0)
+#define CU_INPUT_CTRL_PAD BIT(1)
+#define CU_INPUT_CTRL_PMUL BIT(2)
+#define CU_INPUT_CTRL_ALPHA(x) (((x) & 0xFF) << 8)
+
+/* DOU register */
+
+/* DOU_IRQ_BITS */
+#define DOU_IRQ_UND BIT(8)
+#define DOU_IRQ_ERR BIT(11)
+#define DOU_IRQ_PL0 BIT(13)
+#define DOU_IRQ_PL1 BIT(14)
+
+/* DOU_STATUS_BITS */
+#define DOU_STATUS_DRIFTTO BIT(0)
+#define DOU_STATUS_FRAMETO BIT(1)
+#define DOU_STATUS_TETO BIT(2)
+#define DOU_STATUS_CSCE BIT(8)
+#define DOU_STATUS_ACTIVE BIT(31)
+
+/* Layer registers */
+#define LAYER_INFO 0x0C0
+#define LAYER_R_CONTROL 0x0D4
+#define LAYER_FMT 0x0D8
+#define LAYER_LT_COEFFTAB 0x0DC
+#define LAYER_PALPHA 0x0E4
+
+#define LAYER_YUV_RGB_COEFF0 0x130
+
+#define LAYER_AD_H_CROP 0x164
+#define LAYER_AD_V_CROP 0x168
+
+#define LAYER_RGB_RGB_COEFF0 0x170
+
+/* L_CONTROL_BITS */
+#define L_EN BIT(0)
+#define L_IT BIT(4)
+#define L_R2R BIT(5)
+#define L_FT BIT(6)
+#define L_ROT(x) (((x) & 3) << 8)
+#define L_HFLIP BIT(10)
+#define L_VFLIP BIT(11)
+#define L_TBU_EN BIT(16)
+#define L_A_RCACHE(x) (((x) & 0xF) << 28)
+#define L_ROT_R0 0
+#define L_ROT_R90 1
+#define L_ROT_R180 2
+#define L_ROT_R270 3
+
+/* LAYER_R_CONTROL BITS */
+#define LR_CHI422_BILINEAR 0
+#define LR_CHI422_REPLICATION 1
+#define LR_CHI420_JPEG (0 << 2)
+#define LR_CHI420_MPEG (1 << 2)
+
+#define L_ITSEL(x) ((x) & 0xFFF)
+#define L_FTSEL(x) (((x) & 0xFFF) << 16)
+
+#define LAYER_PER_PLANE_REGS 4
+
+/* Layer_WR registers */
+#define LAYER_WR_PROG_LINE 0x0D4
+#define LAYER_WR_FORMAT 0x0D8
+
+/* Layer_WR control bits */
+#define LW_OFM BIT(4)
+#define LW_LALPHA(x) (((x) & 0xFF) << 8)
+#define LW_A_WCACHE(x) (((x) & 0xF) << 28)
+#define LW_TBU_EN BIT(16)
+
+#define AxCACHE_MASK 0xF0000000
+
+/* Layer AXI R/W cache setting */
+#define AxCACHE_B BIT(0) /* Bufferable */
+#define AxCACHE_M BIT(1) /* Modifiable */
+#define AxCACHE_RA BIT(2) /* Read-Allocate */
+#define AxCACHE_WA BIT(3) /* Write-Allocate */
+
+/* Layer info bits */
+#define L_INFO_RF BIT(0)
+#define L_INFO_CM BIT(1)
+#define L_INFO_ABUF_SIZE(x) (((x) >> 4) & 0x7)
+
+/* Scaler registers */
+#define SC_COEFFTAB 0x0DC
+#define SC_OUT_SIZE 0x0E4
+#define SC_H_CROP 0x0E8
+#define SC_V_CROP 0x0EC
+#define SC_H_INIT_PH 0x0F0
+#define SC_H_DELTA_PH 0x0F4
+#define SC_V_INIT_PH 0x0F8
+#define SC_V_DELTA_PH 0x0FC
+#define SC_ENH_LIMITS 0x130
+#define SC_ENH_COEFF0 0x134
+
+#define SC_MAX_ENH_COEFF 9
+
+/* SC_CTRL_BITS */
+#define SC_CTRL_SCL BIT(0)
+#define SC_CTRL_LS BIT(1)
+#define SC_CTRL_AP BIT(4)
+#define SC_CTRL_IENH BIT(8)
+#define SC_CTRL_RGBSM BIT(16)
+#define SC_CTRL_ASM BIT(17)
+
+#define SC_VTSEL(vtal) ((vtal) << 16)
+
+#define SC_NUM_INPUTS_IDS 1
+#define SC_NUM_OUTPUTS_IDS 1
+
+#define MG_NUM_INPUTS_IDS 2
+#define MG_NUM_OUTPUTS_IDS 1
+
+/* Merger registers */
+#define MG_INPUT_ID0 BLK_INPUT_ID0
+#define MG_INPUT_ID1 (MG_INPUT_ID0 + 4)
+#define MG_SIZE BLK_SIZE
+
+/* Splitter registers */
+#define SP_OVERLAP_SIZE 0xD8
+
+/* Backend registers */
+#define BS_INFO 0x0C0
+#define BS_PROG_LINE 0x0D4
+#define BS_PREFETCH_LINE 0x0D8
+#define BS_BG_COLOR 0x0DC
+#define BS_ACTIVESIZE 0x0E0
+#define BS_HINTERVALS 0x0E4
+#define BS_VINTERVALS 0x0E8
+#define BS_SYNC 0x0EC
+#define BS_DRIFT_TO 0x100
+#define BS_FRAME_TO 0x104
+#define BS_TE_TO 0x108
+#define BS_T0_INTERVAL 0x110
+#define BS_T1_INTERVAL 0x114
+#define BS_T2_INTERVAL 0x118
+#define BS_CRC0_LOW 0x120
+#define BS_CRC0_HIGH 0x124
+#define BS_CRC1_LOW 0x128
+#define BS_CRC1_HIGH 0x12C
+#define BS_USER 0x130
+
+/* BS control register bits */
+#define BS_CTRL_EN BIT(0)
+#define BS_CTRL_VM BIT(1)
+#define BS_CTRL_BM BIT(2)
+#define BS_CTRL_HMASK BIT(4)
+#define BS_CTRL_VD BIT(5)
+#define BS_CTRL_TE BIT(8)
+#define BS_CTRL_TS BIT(9)
+#define BS_CTRL_TM BIT(12)
+#define BS_CTRL_DL BIT(16)
+#define BS_CTRL_SBS BIT(17)
+#define BS_CTRL_CRC BIT(18)
+#define BS_CTRL_PM BIT(20)
+
+/* BS active size/intervals */
+#define BS_H_INTVALS(hfp, hbp) (((hfp) & 0xFFF) + (((hbp) & 0x3FF) << 16))
+#define BS_V_INTVALS(vfp, vbp) (((vfp) & 0x3FFF) + (((vbp) & 0xFF) << 16))
+
+/* BS_SYNC bits */
+#define BS_SYNC_HSW(x) ((x) & 0x3FF)
+#define BS_SYNC_HSP BIT(12)
+#define BS_SYNC_VSW(x) (((x) & 0xFF) << 16)
+#define BS_SYNC_VSP BIT(28)
+
+#define BS_NUM_INPUT_IDS 0
+#define BS_NUM_OUTPUT_IDS 0
+
+/* Image process registers */
+#define IPS_DEPTH 0x0D8
+#define IPS_RGB_RGB_COEFF0 0x130
+#define IPS_RGB_YUV_COEFF0 0x170
+
+#define IPS_DEPTH_MARK 0xF
+
+/* IPS control register bits */
+#define IPS_CTRL_RGB BIT(0)
+#define IPS_CTRL_FT BIT(4)
+#define IPS_CTRL_YUV BIT(8)
+#define IPS_CTRL_CHD422 BIT(9)
+#define IPS_CTRL_CHD420 BIT(10)
+#define IPS_CTRL_LPF BIT(11)
+#define IPS_CTRL_DITH BIT(12)
+#define IPS_CTRL_CLAMP BIT(16)
+#define IPS_CTRL_SBS BIT(17)
+
+/* IPS info register bits */
+#define IPS_INFO_CHD420 BIT(10)
+
+#define IPS_NUM_INPUT_IDS 2
+#define IPS_NUM_OUTPUT_IDS 1
+
+/* FT_COEFF block registers */
+#define FT_COEFF0 0x80
+#define GLB_IT_COEFF 0x80
+
+/* GLB_SC_COEFF registers */
+#define GLB_SC_COEFF_ADDR 0x0080
+#define GLB_SC_COEFF_DATA 0x0084
+#define GLB_LT_COEFF_DATA 0x0080
+
+#define GLB_SC_COEFF_MAX_NUM 1024
+#define GLB_LT_COEFF_NUM 65
+/* GLB_SC_ADDR */
+#define SC_COEFF_R_ADDR BIT(18)
+#define SC_COEFF_G_ADDR BIT(17)
+#define SC_COEFF_B_ADDR BIT(16)
+
+#define SC_COEFF_DATA(x, y) (((y) & 0xFFFF) | (((x) & 0xFFFF) << 16))
+
+enum d71_blk_type {
+ D71_BLK_TYPE_GCU = 0x00,
+ D71_BLK_TYPE_LPU = 0x01,
+ D71_BLK_TYPE_CU = 0x02,
+ D71_BLK_TYPE_DOU = 0x03,
+ D71_BLK_TYPE_AEU = 0x04,
+ D71_BLK_TYPE_GLB_LT_COEFF = 0x05,
+ D71_BLK_TYPE_GLB_SCL_COEFF = 0x06, /* SH/SV scaler coeff */
+ D71_BLK_TYPE_GLB_SC_COEFF = 0x07,
+ D71_BLK_TYPE_PERIPH = 0x08,
+ D71_BLK_TYPE_LPU_TRUSTED = 0x09,
+ D71_BLK_TYPE_AEU_TRUSTED = 0x0A,
+ D71_BLK_TYPE_LPU_LAYER = 0x10,
+ D71_BLK_TYPE_LPU_WB_LAYER = 0x11,
+ D71_BLK_TYPE_CU_SPLITTER = 0x20,
+ D71_BLK_TYPE_CU_SCALER = 0x21,
+ D71_BLK_TYPE_CU_MERGER = 0x22,
+ D71_BLK_TYPE_DOU_IPS = 0x30,
+ D71_BLK_TYPE_DOU_BS = 0x31,
+ D71_BLK_TYPE_DOU_FT_COEFF = 0x32,
+ D71_BLK_TYPE_AEU_DS = 0x40,
+ D71_BLK_TYPE_AEU_AES = 0x41,
+ D71_BLK_TYPE_RESERVED = 0xFF
+};
+
+/* Constant of components */
+#define D71_MAX_PIPELINE 2
+#define D71_PIPELINE_MAX_SCALERS 2
+#define D71_PIPELINE_MAX_LAYERS 4
+
+#define D71_MAX_GLB_IT_COEFF 3
+#define D71_MAX_GLB_SCL_COEFF 4
+
+#define D71_MAX_LAYERS_PER_LPU 4
+#define D71_BLOCK_MAX_INPUT 9
+#define D71_BLOCK_MAX_OUTPUT 5
+#define D71_MAX_SC_PER_CU 2
+
+#define D71_BLOCK_OFFSET_PERIPH 0xFE00
+#define D71_BLOCK_SIZE 0x0200
+
+#define D71_DEFAULT_PREPRETCH_LINE 5
+#define D71_BUS_WIDTH_16_BYTES 16
+
+#define D71_MIN_LINE_SIZE 64
+#define D71_MIN_VERTICAL_SIZE 64
+#define D71_SC_MIN_LIN_SIZE 4
+#define D71_SC_MIN_VERTICAL_SIZE 4
+#define D71_SC_MAX_LIN_SIZE 2048
+#define D71_SC_MAX_VERTICAL_SIZE 4096
+
+#define D71_SC_MAX_UPSCALING 64
+#define D71_SC_MAX_DOWNSCALING 6
+#define D71_SC_SPLIT_OVERLAP 8
+#define D71_SC_ENH_SPLIT_OVERLAP 1
+
+#define D71_MG_MIN_MERGED_SIZE 4
+#define D71_MG_MAX_MERGED_HSIZE 4032
+#define D71_MG_MAX_MERGED_VSIZE 4096
+
+#define D71_PALPHA_DEF_MAP 0xFFAA5500
+#define D71_LAYER_CONTROL_DEFAULT 0x30000000
+#define D71_WB_LAYER_CONTROL_DEFAULT 0x3000FF00
+#define D71_BS_CONTROL_DEFAULT 0x00000002
+
+struct block_header {
+ u32 block_info;
+ u32 pipeline_info;
+ u32 input_ids[D71_BLOCK_MAX_INPUT];
+ u32 output_ids[D71_BLOCK_MAX_OUTPUT];
+};
+
+static inline u32 get_block_type(struct block_header *blk)
+{
+ return BLOCK_INFO_BLK_TYPE(blk->block_info);
+}
+
+#endif /* !_D71_REG_H_ */
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 3ca5718aa0c2..62fad59f5a6a 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -18,10 +18,415 @@
#include "komeda_dev.h"
#include "komeda_kms.h"
-struct drm_crtc_helper_funcs komeda_crtc_helper_funcs = {
+/**
+ * komeda_crtc_atomic_check - build display output data flow
+ * @crtc: DRM crtc
+ * @state: the crtc state object
+ *
+ * crtc_atomic_check is the final check stage, so beside build a display data
+ * pipeline according to the crtc_state, but still needs to release or disable
+ * the unclaimed pipeline resources.
+ *
+ * RETURNS:
+ * Zero for success or -errno
+ */
+static int
+komeda_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct komeda_crtc *kcrtc = to_kcrtc(crtc);
+ struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(state);
+ int err;
+
+ if (state->active) {
+ err = komeda_build_display_data_flow(kcrtc, kcrtc_st);
+ if (err)
+ return err;
+ }
+
+ /* release unclaimed pipeline resources */
+ err = komeda_release_unclaimed_resources(kcrtc->master, kcrtc_st);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static u32 komeda_calc_mclk(struct komeda_crtc_state *kcrtc_st)
+{
+ unsigned long mclk = kcrtc_st->base.adjusted_mode.clock * 1000;
+
+ return mclk;
+}
+
+/* For active a crtc, mainly need two parts of preparation
+ * 1. adjust display operation mode.
+ * 2. enable needed clk
+ */
+static int
+komeda_crtc_prepare(struct komeda_crtc *kcrtc)
+{
+ struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
+ struct komeda_pipeline *master = kcrtc->master;
+ struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(kcrtc->base.state);
+ unsigned long pxlclk_rate = kcrtc_st->base.adjusted_mode.clock * 1000;
+ u32 new_mode;
+ int err;
+
+ mutex_lock(&mdev->lock);
+
+ new_mode = mdev->dpmode | BIT(master->id);
+ if (WARN_ON(new_mode == mdev->dpmode)) {
+ err = 0;
+ goto unlock;
+ }
+
+ err = mdev->funcs->change_opmode(mdev, new_mode);
+ if (err) {
+ DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,",
+ mdev->dpmode, new_mode);
+ goto unlock;
+ }
+
+ mdev->dpmode = new_mode;
+ /* Only need to enable mclk on single display mode, but no need to
+ * enable mclk it on dual display mode, since the dual mode always
+ * switch from single display mode, the mclk already enabled, no need
+ * to enable it again.
+ */
+ if (new_mode != KOMEDA_MODE_DUAL_DISP) {
+ err = clk_set_rate(mdev->mclk, komeda_calc_mclk(kcrtc_st));
+ if (err)
+ DRM_ERROR("failed to set mclk.\n");
+ err = clk_prepare_enable(mdev->mclk);
+ if (err)
+ DRM_ERROR("failed to enable mclk.\n");
+ }
+
+ err = clk_prepare_enable(master->aclk);
+ if (err)
+ DRM_ERROR("failed to enable axi clk for pipe%d.\n", master->id);
+ err = clk_set_rate(master->pxlclk, pxlclk_rate);
+ if (err)
+ DRM_ERROR("failed to set pxlclk for pipe%d\n", master->id);
+ err = clk_prepare_enable(master->pxlclk);
+ if (err)
+ DRM_ERROR("failed to enable pxl clk for pipe%d.\n", master->id);
+
+unlock:
+ mutex_unlock(&mdev->lock);
+
+ return err;
+}
+
+static int
+komeda_crtc_unprepare(struct komeda_crtc *kcrtc)
+{
+ struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
+ struct komeda_pipeline *master = kcrtc->master;
+ u32 new_mode;
+ int err;
+
+ mutex_lock(&mdev->lock);
+
+ new_mode = mdev->dpmode & (~BIT(master->id));
+
+ if (WARN_ON(new_mode == mdev->dpmode)) {
+ err = 0;
+ goto unlock;
+ }
+
+ err = mdev->funcs->change_opmode(mdev, new_mode);
+ if (err) {
+ DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,",
+ mdev->dpmode, new_mode);
+ goto unlock;
+ }
+
+ mdev->dpmode = new_mode;
+
+ clk_disable_unprepare(master->pxlclk);
+ clk_disable_unprepare(master->aclk);
+ if (new_mode == KOMEDA_MODE_INACTIVE)
+ clk_disable_unprepare(mdev->mclk);
+
+unlock:
+ mutex_unlock(&mdev->lock);
+
+ return err;
+}
+
+void komeda_crtc_handle_event(struct komeda_crtc *kcrtc,
+ struct komeda_events *evts)
+{
+ struct drm_crtc *crtc = &kcrtc->base;
+ u32 events = evts->pipes[kcrtc->master->id];
+
+ if (events & KOMEDA_EVENT_VSYNC)
+ drm_crtc_handle_vblank(crtc);
+
+ /* will handle it together with the write back support */
+ if (events & KOMEDA_EVENT_EOW)
+ DRM_DEBUG("EOW.\n");
+
+ if (events & KOMEDA_EVENT_FLIP) {
+ unsigned long flags;
+ struct drm_pending_vblank_event *event;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (kcrtc->disable_done) {
+ complete_all(kcrtc->disable_done);
+ kcrtc->disable_done = NULL;
+ } else if (crtc->state->event) {
+ event = crtc->state->event;
+ /*
+ * Consume event before notifying drm core that flip
+ * happened.
+ */
+ crtc->state->event = NULL;
+ drm_crtc_send_vblank_event(crtc, event);
+ } else {
+ DRM_WARN("CRTC[%d]: FLIP happen but no pending commit.\n",
+ drm_crtc_index(&kcrtc->base));
+ }
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+}
+
+static void
+komeda_crtc_do_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old)
+{
+ struct komeda_crtc *kcrtc = to_kcrtc(crtc);
+ struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc->state);
+ struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
+ struct komeda_pipeline *master = kcrtc->master;
+
+ DRM_DEBUG_ATOMIC("CRTC%d_FLUSH: active_pipes: 0x%x, affected: 0x%x.\n",
+ drm_crtc_index(crtc),
+ kcrtc_st->active_pipes, kcrtc_st->affected_pipes);
+
+ /* step 1: update the pipeline/component state to HW */
+ if (has_bit(master->id, kcrtc_st->affected_pipes))
+ komeda_pipeline_update(master, old->state);
+
+ /* step 2: notify the HW to kickoff the update */
+ mdev->funcs->flush(mdev, master->id, kcrtc_st->active_pipes);
+}
+
+static void
+komeda_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old)
+{
+ komeda_crtc_prepare(to_kcrtc(crtc));
+ drm_crtc_vblank_on(crtc);
+ komeda_crtc_do_flush(crtc, old);
+}
+
+static void
+komeda_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old)
+{
+ struct komeda_crtc *kcrtc = to_kcrtc(crtc);
+ struct komeda_crtc_state *old_st = to_kcrtc_st(old);
+ struct komeda_dev *mdev = crtc->dev->dev_private;
+ struct komeda_pipeline *master = kcrtc->master;
+ struct completion *disable_done = &crtc->state->commit->flip_done;
+ struct completion temp;
+ int timeout;
+
+ DRM_DEBUG_ATOMIC("CRTC%d_DISABLE: active_pipes: 0x%x, affected: 0x%x.\n",
+ drm_crtc_index(crtc),
+ old_st->active_pipes, old_st->affected_pipes);
+
+ if (has_bit(master->id, old_st->active_pipes))
+ komeda_pipeline_disable(master, old->state);
+
+ /* crtc_disable has two scenarios according to the state->active switch.
+ * 1. active -> inactive
+ * this commit is a disable commit. and the commit will be finished
+ * or done after the disable operation. on this case we can directly
+ * use the crtc->state->event to tracking the HW disable operation.
+ * 2. active -> active
+ * the crtc->commit is not for disable, but a modeset operation when
+ * crtc is active, such commit actually has been completed by 3
+ * DRM operations:
+ * crtc_disable, update_planes(crtc_flush), crtc_enable
+ * so on this case the crtc->commit is for the whole process.
+ * we can not use it for tracing the disable, we need a temporary
+ * flip_done for tracing the disable. and crtc->state->event for
+ * the crtc_enable operation.
+ * That's also the reason why skip modeset commit in
+ * komeda_crtc_atomic_flush()
+ */
+ if (crtc->state->active) {
+ struct komeda_pipeline_state *pipe_st;
+ /* clear the old active_comps to zero */
+ pipe_st = komeda_pipeline_get_old_state(master, old->state);
+ pipe_st->active_comps = 0;
+
+ init_completion(&temp);
+ kcrtc->disable_done = &temp;
+ disable_done = &temp;
+ }
+
+ mdev->funcs->flush(mdev, master->id, 0);
+
+ /* wait the disable take affect.*/
+ timeout = wait_for_completion_timeout(disable_done, HZ);
+ if (timeout == 0) {
+ DRM_ERROR("disable pipeline%d timeout.\n", kcrtc->master->id);
+ if (crtc->state->active) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ kcrtc->disable_done = NULL;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+ }
+
+ drm_crtc_vblank_off(crtc);
+ komeda_crtc_unprepare(kcrtc);
+}
+
+static void
+komeda_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old)
+{
+ /* commit with modeset will be handled in enable/disable */
+ if (drm_atomic_crtc_needs_modeset(crtc->state))
+ return;
+
+ komeda_crtc_do_flush(crtc, old);
+}
+
+static enum drm_mode_status
+komeda_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *m)
+{
+ struct komeda_dev *mdev = crtc->dev->dev_private;
+ struct komeda_crtc *kcrtc = to_kcrtc(crtc);
+ struct komeda_pipeline *master = kcrtc->master;
+ long mode_clk, pxlclk;
+
+ if (m->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ /* main clock/AXI clk must be faster than pxlclk*/
+ mode_clk = m->clock * 1000;
+ pxlclk = clk_round_rate(master->pxlclk, mode_clk);
+ if (pxlclk != mode_clk) {
+ DRM_DEBUG_ATOMIC("pxlclk doesn't support %ld Hz\n", mode_clk);
+
+ return MODE_NOCLOCK;
+ }
+
+ if (clk_round_rate(mdev->mclk, mode_clk) < pxlclk) {
+ DRM_DEBUG_ATOMIC("mclk can't satisfy the requirement of %s-clk: %ld.\n",
+ m->name, pxlclk);
+
+ return MODE_CLOCK_HIGH;
+ }
+
+ if (clk_round_rate(master->aclk, mode_clk) < pxlclk) {
+ DRM_DEBUG_ATOMIC("aclk can't satisfy the requirement of %s-clk: %ld.\n",
+ m->name, pxlclk);
+
+ return MODE_CLOCK_HIGH;
+ }
+
+ return MODE_OK;
+}
+
+static bool komeda_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *m,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct komeda_crtc *kcrtc = to_kcrtc(crtc);
+ struct komeda_pipeline *master = kcrtc->master;
+ long mode_clk = m->clock * 1000;
+
+ adjusted_mode->clock = clk_round_rate(master->pxlclk, mode_clk) / 1000;
+
+ return true;
+}
+
+static struct drm_crtc_helper_funcs komeda_crtc_helper_funcs = {
+ .atomic_check = komeda_crtc_atomic_check,
+ .atomic_flush = komeda_crtc_atomic_flush,
+ .atomic_enable = komeda_crtc_atomic_enable,
+ .atomic_disable = komeda_crtc_atomic_disable,
+ .mode_valid = komeda_crtc_mode_valid,
+ .mode_fixup = komeda_crtc_mode_fixup,
};
+static void komeda_crtc_reset(struct drm_crtc *crtc)
+{
+ struct komeda_crtc_state *state;
+
+ if (crtc->state)
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+
+ kfree(to_kcrtc_st(crtc->state));
+ crtc->state = NULL;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ crtc->state = &state->base;
+ crtc->state->crtc = crtc;
+ }
+}
+
+static struct drm_crtc_state *
+komeda_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
+{
+ struct komeda_crtc_state *old = to_kcrtc_st(crtc->state);
+ struct komeda_crtc_state *new;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &new->base);
+
+ new->affected_pipes = old->active_pipes;
+
+ return &new->base;
+}
+
+static void komeda_crtc_atomic_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ __drm_atomic_helper_crtc_destroy_state(state);
+ kfree(to_kcrtc_st(state));
+}
+
+static int komeda_crtc_vblank_enable(struct drm_crtc *crtc)
+{
+ struct komeda_dev *mdev = crtc->dev->dev_private;
+ struct komeda_crtc *kcrtc = to_kcrtc(crtc);
+
+ mdev->funcs->on_off_vblank(mdev, kcrtc->master->id, true);
+ return 0;
+}
+
+static void komeda_crtc_vblank_disable(struct drm_crtc *crtc)
+{
+ struct komeda_dev *mdev = crtc->dev->dev_private;
+ struct komeda_crtc *kcrtc = to_kcrtc(crtc);
+
+ mdev->funcs->on_off_vblank(mdev, kcrtc->master->id, false);
+}
+
static const struct drm_crtc_funcs komeda_crtc_funcs = {
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = komeda_crtc_reset,
+ .atomic_duplicate_state = komeda_crtc_atomic_duplicate_state,
+ .atomic_destroy_state = komeda_crtc_atomic_destroy_state,
+ .enable_vblank = komeda_crtc_vblank_enable,
+ .disable_vblank = komeda_crtc_vblank_disable,
};
int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index 70e9bb7fa30c..ca3599e4a4d3 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -8,11 +8,99 @@
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#endif
#include <drm/drm_print.h>
#include "komeda_dev.h"
+static int komeda_register_show(struct seq_file *sf, void *x)
+{
+ struct komeda_dev *mdev = sf->private;
+ int i;
+
+ if (mdev->funcs->dump_register)
+ mdev->funcs->dump_register(mdev, sf);
+
+ for (i = 0; i < mdev->n_pipelines; i++)
+ komeda_pipeline_dump_register(mdev->pipelines[i], sf);
+
+ return 0;
+}
+
+static int komeda_register_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, komeda_register_show, inode->i_private);
+}
+
+static const struct file_operations komeda_register_fops = {
+ .owner = THIS_MODULE,
+ .open = komeda_register_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static void komeda_debugfs_init(struct komeda_dev *mdev)
+{
+ if (!debugfs_initialized())
+ return;
+
+ mdev->debugfs_root = debugfs_create_dir("komeda", NULL);
+ if (IS_ERR_OR_NULL(mdev->debugfs_root))
+ return;
+
+ debugfs_create_file("register", 0444, mdev->debugfs_root,
+ mdev, &komeda_register_fops);
+}
+#endif
+
+static ssize_t
+core_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct komeda_dev *mdev = dev_to_mdev(dev);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", mdev->chip.core_id);
+}
+static DEVICE_ATTR_RO(core_id);
+
+static ssize_t
+config_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct komeda_dev *mdev = dev_to_mdev(dev);
+ struct komeda_pipeline *pipe = mdev->pipelines[0];
+ union komeda_config_id config_id;
+ int i;
+
+ memset(&config_id, 0, sizeof(config_id));
+
+ config_id.max_line_sz = pipe->layers[0]->hsize_in.end;
+ config_id.n_pipelines = mdev->n_pipelines;
+ config_id.n_scalers = pipe->n_scalers;
+ config_id.n_layers = pipe->n_layers;
+ config_id.n_richs = 0;
+ for (i = 0; i < pipe->n_layers; i++) {
+ if (pipe->layers[i]->layer_type == KOMEDA_FMT_RICH_LAYER)
+ config_id.n_richs++;
+ }
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", config_id.value);
+}
+static DEVICE_ATTR_RO(config_id);
+
+static struct attribute *komeda_sysfs_entries[] = {
+ &dev_attr_core_id.attr,
+ &dev_attr_config_id.attr,
+ NULL,
+};
+
+static struct attribute_group komeda_sysfs_attr_group = {
+ .attrs = komeda_sysfs_entries,
+};
+
static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np)
{
struct komeda_pipeline *pipe;
@@ -53,6 +141,7 @@ static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np)
static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct device_node *child, *np = dev->of_node;
struct clk *clk;
int ret;
@@ -62,6 +151,11 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
return PTR_ERR(clk);
mdev->mclk = clk;
+ mdev->irq = platform_get_irq(pdev, 0);
+ if (mdev->irq < 0) {
+ DRM_ERROR("could not get IRQ number.\n");
+ return mdev->irq;
+ }
for_each_available_child_of_node(np, child) {
if (of_node_cmp(child->name, "pipeline") == 0) {
@@ -99,6 +193,8 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
if (!mdev)
return ERR_PTR(-ENOMEM);
+ mutex_init(&mdev->lock);
+
mdev->dev = dev;
mdev->reg_base = devm_ioremap_resource(dev, io_res);
if (IS_ERR(mdev->reg_base)) {
@@ -147,6 +243,22 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
goto err_cleanup;
}
+ err = komeda_assemble_pipelines(mdev);
+ if (err) {
+ DRM_ERROR("assemble display pipelines failed.\n");
+ goto err_cleanup;
+ }
+
+ err = sysfs_create_group(&dev->kobj, &komeda_sysfs_attr_group);
+ if (err) {
+ DRM_ERROR("create sysfs group failed.\n");
+ goto err_cleanup;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ komeda_debugfs_init(mdev);
+#endif
+
return mdev;
err_cleanup:
@@ -160,6 +272,12 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
struct komeda_dev_funcs *funcs = mdev->funcs;
int i;
+ sysfs_remove_group(&dev->kobj, &komeda_sysfs_attr_group);
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(mdev->debugfs_root);
+#endif
+
for (i = 0; i < mdev->n_pipelines; i++) {
komeda_pipeline_destroy(mdev, mdev->pipelines[i]);
mdev->pipelines[i] = NULL;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
index 0f77dead6a23..29e03c4e1ffc 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
@@ -13,6 +13,33 @@
#include "malidp_product.h"
#include "komeda_format_caps.h"
+#define KOMEDA_EVENT_VSYNC BIT_ULL(0)
+#define KOMEDA_EVENT_FLIP BIT_ULL(1)
+#define KOMEDA_EVENT_URUN BIT_ULL(2)
+#define KOMEDA_EVENT_IBSY BIT_ULL(3)
+#define KOMEDA_EVENT_OVR BIT_ULL(4)
+#define KOMEDA_EVENT_EOW BIT_ULL(5)
+#define KOMEDA_EVENT_MODE BIT_ULL(6)
+
+#define KOMEDA_ERR_TETO BIT_ULL(14)
+#define KOMEDA_ERR_TEMR BIT_ULL(15)
+#define KOMEDA_ERR_TITR BIT_ULL(16)
+#define KOMEDA_ERR_CPE BIT_ULL(17)
+#define KOMEDA_ERR_CFGE BIT_ULL(18)
+#define KOMEDA_ERR_AXIE BIT_ULL(19)
+#define KOMEDA_ERR_ACE0 BIT_ULL(20)
+#define KOMEDA_ERR_ACE1 BIT_ULL(21)
+#define KOMEDA_ERR_ACE2 BIT_ULL(22)
+#define KOMEDA_ERR_ACE3 BIT_ULL(23)
+#define KOMEDA_ERR_DRIFTTO BIT_ULL(24)
+#define KOMEDA_ERR_FRAMETO BIT_ULL(25)
+#define KOMEDA_ERR_CSCE BIT_ULL(26)
+#define KOMEDA_ERR_ZME BIT_ULL(27)
+#define KOMEDA_ERR_MERR BIT_ULL(28)
+#define KOMEDA_ERR_TCF BIT_ULL(29)
+#define KOMEDA_ERR_TTNG BIT_ULL(30)
+#define KOMEDA_ERR_TTF BIT_ULL(31)
+
/* malidp device id */
enum {
MALI_D71 = 0,
@@ -39,6 +66,11 @@ struct komeda_product_data {
struct komeda_dev;
+struct komeda_events {
+ u64 global;
+ u64 pipes[KOMEDA_MAX_PIPELINES];
+};
+
/**
* struct komeda_dev_funcs
*
@@ -60,6 +92,49 @@ struct komeda_dev_funcs {
int (*enum_resources)(struct komeda_dev *mdev);
/** @cleanup: call to chip to cleanup komeda_dev->chip data */
void (*cleanup)(struct komeda_dev *mdev);
+ /**
+ * @irq_handler:
+ *
+ * for CORE to get the HW event from the CHIP when interrupt happened.
+ */
+ irqreturn_t (*irq_handler)(struct komeda_dev *mdev,
+ struct komeda_events *events);
+ /** @enable_irq: enable irq */
+ int (*enable_irq)(struct komeda_dev *mdev);
+ /** @disable_irq: disable irq */
+ int (*disable_irq)(struct komeda_dev *mdev);
+ /** @on_off_vblank: notify HW to on/off vblank */
+ void (*on_off_vblank)(struct komeda_dev *mdev,
+ int master_pipe, bool on);
+
+ /** @dump_register: Optional, dump registers to seq_file */
+ void (*dump_register)(struct komeda_dev *mdev, struct seq_file *seq);
+ /**
+ * @change_opmode:
+ *
+ * Notify HW to switch to a new display operation mode.
+ */
+ int (*change_opmode)(struct komeda_dev *mdev, int new_mode);
+ /** @flush: Notify the HW to flush or kickoff the update */
+ void (*flush)(struct komeda_dev *mdev,
+ int master_pipe, u32 active_pipes);
+};
+
+/*
+ * DISPLAY_MODE describes how many display been enabled, and which will be
+ * passed to CHIP by &komeda_dev_funcs->change_opmode(), then CHIP can do the
+ * pipeline resources assignment according to this usage hint.
+ * - KOMEDA_MODE_DISP0: Only one display enabled, pipeline-0 work as master.
+ * - KOMEDA_MODE_DISP1: Only one display enabled, pipeline-0 work as master.
+ * - KOMEDA_MODE_DUAL_DISP: Dual display mode, both display has been enabled.
+ * And D71 supports assign two pipelines to one single display on mode
+ * KOMEDA_MODE_DISP0/DISP1
+ */
+enum {
+ KOMEDA_MODE_INACTIVE = 0,
+ KOMEDA_MODE_DISP0 = BIT(0),
+ KOMEDA_MODE_DISP1 = BIT(1),
+ KOMEDA_MODE_DUAL_DISP = KOMEDA_MODE_DISP0 | KOMEDA_MODE_DISP1,
};
/**
@@ -70,18 +145,31 @@ struct komeda_dev_funcs {
* control-abilites of device.
*/
struct komeda_dev {
+ /** @dev: the base device structure */
struct device *dev;
+ /** @reg_base: the base address of komeda io space */
u32 __iomem *reg_base;
+ /** @chip: the basic chip information */
struct komeda_chip_info chip;
/** @fmt_tbl: initialized by &komeda_dev_funcs->init_format_table */
struct komeda_format_caps_table fmt_tbl;
/** @pclk: APB clock for register access */
struct clk *pclk;
- /** @mck: HW main engine clk */
+ /** @mclk: HW main engine clk */
struct clk *mclk;
+ /** @irq: irq number */
+ int irq;
+
+ /** @lock: used to protect dpmode */
+ struct mutex lock;
+ /** @dpmode: current display mode */
+ u32 dpmode;
+
+ /** @n_pipelines: the number of pipe in @pipelines */
int n_pipelines;
+ /** @pipelines: the komeda pipelines */
struct komeda_pipeline *pipelines[KOMEDA_MAX_PIPELINES];
/** @funcs: chip funcs to access to HW */
@@ -93,6 +181,9 @@ struct komeda_dev {
* destroyed by &komeda_dev_funcs.cleanup()
*/
void *chip_data;
+
+ /** @debugfs_root: root directory of komeda debugfs */
+ struct dentry *debugfs_root;
};
static inline bool
@@ -107,4 +198,6 @@ d71_identify(u32 __iomem *reg, struct komeda_chip_info *chip);
struct komeda_dev *komeda_dev_create(struct device *dev);
void komeda_dev_destroy(struct komeda_dev *mdev);
+struct komeda_dev *dev_to_mdev(struct device *dev);
+
#endif /*_KOMEDA_DEV_H_*/
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
index 2bdd189b041d..cfa5068d9d1e 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
@@ -17,6 +17,13 @@ struct komeda_drv {
struct komeda_kms_dev *kms;
};
+struct komeda_dev *dev_to_mdev(struct device *dev)
+{
+ struct komeda_drv *mdrv = dev_get_drvdata(dev);
+
+ return mdrv ? mdrv->mdev : NULL;
+}
+
static void komeda_unbind(struct device *dev)
{
struct komeda_drv *mdrv = dev_get_drvdata(dev);
@@ -120,7 +127,7 @@ static const struct komeda_product_data komeda_products[] = {
},
};
-const struct of_device_id komeda_of_match[] = {
+static const struct of_device_id komeda_of_match[] = {
{ .compatible = "arm,mali-d71", .data = &komeda_products[MALI_D71], },
{},
};
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
index 0de2e4a2afd2..ea2fe190c1e3 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
@@ -10,11 +10,16 @@
#include <drm/drm_framebuffer.h>
#include "komeda_format_caps.h"
-/** struct komeda_fb - entend drm_framebuffer with komeda attribute */
+/**
+ * struct komeda_fb - Entending drm_framebuffer with komeda attribute
+ */
struct komeda_fb {
/** @base: &drm_framebuffer */
struct drm_framebuffer base;
- /* @format_caps: &komeda_format_caps */
+ /**
+ * @format_caps:
+ * extends drm_format_info for komeda specific information
+ */
const struct komeda_format_caps *format_caps;
/** @aligned_w: aligned frame buffer width */
u32 aligned_w;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 47a58ab20434..86f6542afb40 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -13,6 +13,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_irq.h>
#include <drm/drm_vblank.h>
#include "komeda_dev.h"
@@ -25,18 +26,39 @@ static int komeda_gem_cma_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- u32 alignment = 16; /* TODO get alignment from dev */
+ struct komeda_dev *mdev = dev->dev_private;
+ u32 pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
- args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8),
- alignment);
+ args->pitch = ALIGN(pitch, mdev->chip.bus_width);
return drm_gem_cma_dumb_create_internal(file, dev, args);
}
+static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
+{
+ struct drm_device *drm = data;
+ struct komeda_dev *mdev = drm->dev_private;
+ struct komeda_kms_dev *kms = to_kdev(drm);
+ struct komeda_events evts;
+ irqreturn_t status;
+ u32 i;
+
+ /* Call into the CHIP to recognize events */
+ memset(&evts, 0, sizeof(evts));
+ status = mdev->funcs->irq_handler(mdev, &evts);
+
+ /* Notify the crtc to handle the events */
+ for (i = 0; i < kms->n_crtcs; i++)
+ komeda_crtc_handle_event(&kms->crtcs[i], &evts);
+
+ return status;
+}
+
static struct drm_driver komeda_kms_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
- DRIVER_PRIME,
+ DRIVER_PRIME | DRIVER_HAVE_IRQ,
.lastclose = drm_fb_helper_lastclose,
+ .irq_handler = komeda_kms_irq_handler,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = komeda_gem_cma_dumb_create,
@@ -78,9 +100,37 @@ static const struct drm_mode_config_helper_funcs komeda_mode_config_helpers = {
.atomic_commit_tail = komeda_kms_commit_tail,
};
+static int komeda_kms_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_st, *new_crtc_st;
+ int i, err;
+
+ err = drm_atomic_helper_check_modeset(dev, state);
+ if (err)
+ return err;
+
+ /* komeda need to re-calculate resource assumption in every commit
+ * so need to add all affected_planes (even unchanged) to
+ * drm_atomic_state.
+ */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_st, new_crtc_st, i) {
+ err = drm_atomic_add_affected_planes(state, crtc);
+ if (err)
+ return err;
+ }
+
+ err = drm_atomic_helper_check_planes(dev, state);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static const struct drm_mode_config_funcs komeda_mode_config_funcs = {
.fb_create = komeda_fb_create,
- .atomic_check = drm_atomic_helper_check,
+ .atomic_check = komeda_kms_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -144,14 +194,25 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
drm_mode_config_reset(drm);
- err = drm_dev_register(drm, 0);
+ err = drm_irq_install(drm, mdev->irq);
if (err)
goto cleanup_mode_config;
+ err = mdev->funcs->enable_irq(mdev);
+ if (err)
+ goto uninstall_irq;
+
+ err = drm_dev_register(drm, 0);
+ if (err)
+ goto uninstall_irq;
+
return kms;
+uninstall_irq:
+ drm_irq_uninstall(drm);
cleanup_mode_config:
drm_mode_config_cleanup(drm);
+ komeda_kms_cleanup_private_objs(kms);
free_kms:
kfree(kms);
return ERR_PTR(err);
@@ -162,9 +223,11 @@ void komeda_kms_detach(struct komeda_kms_dev *kms)
struct drm_device *drm = &kms->base;
struct komeda_dev *mdev = drm->dev_private;
+ mdev->funcs->disable_irq(mdev);
drm_dev_unregister(drm);
+ drm_irq_uninstall(drm);
component_unbind_all(mdev->dev, drm);
- komeda_kms_cleanup_private_objs(mdev);
+ komeda_kms_cleanup_private_objs(kms);
drm_mode_config_cleanup(drm);
drm->dev_private = NULL;
drm_dev_put(drm);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
index 874e9c9f0749..ac3d9209b4d9 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
@@ -12,8 +12,12 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_writeback.h>
+#include <video/videomode.h>
+#include <video/display_timing.h>
-/** struct komeda_plane - komeda instance of drm_plane */
+/**
+ * struct komeda_plane - komeda instance of drm_plane
+ */
struct komeda_plane {
/** @base: &drm_plane */
struct drm_plane base;
@@ -68,9 +72,14 @@ struct komeda_crtc {
* merge into the master.
*/
struct komeda_pipeline *slave;
+
+ /** @disable_done: this flip_done is for tracing the disable */
+ struct completion *disable_done;
};
-/** struct komeda_crtc_state */
+/**
+ * struct komeda_crtc_state
+ */
struct komeda_crtc_state {
/** @base: &drm_crtc_state */
struct drm_crtc_state base;
@@ -78,7 +87,15 @@ struct komeda_crtc_state {
/* private properties */
/* computed state which are used by validate/check */
+ /**
+ * @affected_pipes:
+ * the affected pipelines in once display instance
+ */
u32 affected_pipes;
+ /**
+ * @active_pipes:
+ * the active pipelines in once display instance
+ */
u32 active_pipes;
};
@@ -106,7 +123,10 @@ int komeda_kms_add_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev);
int komeda_kms_add_planes(struct komeda_kms_dev *kms, struct komeda_dev *mdev);
int komeda_kms_add_private_objs(struct komeda_kms_dev *kms,
struct komeda_dev *mdev);
-void komeda_kms_cleanup_private_objs(struct komeda_dev *mdev);
+void komeda_kms_cleanup_private_objs(struct komeda_kms_dev *kms);
+
+void komeda_crtc_handle_event(struct komeda_crtc *kcrtc,
+ struct komeda_events *evts);
struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev);
void komeda_kms_detach(struct komeda_kms_dev *kms);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
index f1908e9ef128..c379439c6194 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
@@ -19,17 +19,17 @@ komeda_pipeline_add(struct komeda_dev *mdev, size_t size,
if (mdev->n_pipelines + 1 > KOMEDA_MAX_PIPELINES) {
DRM_ERROR("Exceed max support %d pipelines.\n",
KOMEDA_MAX_PIPELINES);
- return NULL;
+ return ERR_PTR(-ENOSPC);
}
if (size < sizeof(*pipe)) {
DRM_ERROR("Request pipeline size too small.\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
pipe = devm_kzalloc(mdev->dev, size, GFP_KERNEL);
if (!pipe)
- return NULL;
+ return ERR_PTR(-ENOMEM);
pipe->mdev = mdev;
pipe->id = mdev->n_pipelines;
@@ -62,7 +62,7 @@ void komeda_pipeline_destroy(struct komeda_dev *mdev,
devm_kfree(mdev->dev, pipe);
}
-struct komeda_component **
+static struct komeda_component **
komeda_pipeline_get_component_pos(struct komeda_pipeline *pipe, int id)
{
struct komeda_dev *mdev = pipe->mdev;
@@ -142,32 +142,32 @@ komeda_component_add(struct komeda_pipeline *pipe,
if (max_active_inputs > KOMEDA_COMPONENT_N_INPUTS) {
WARN(1, "please large KOMEDA_COMPONENT_N_INPUTS to %d.\n",
max_active_inputs);
- return NULL;
+ return ERR_PTR(-ENOSPC);
}
pos = komeda_pipeline_get_component_pos(pipe, id);
if (!pos || (*pos))
- return NULL;
+ return ERR_PTR(-EINVAL);
if (has_bit(id, KOMEDA_PIPELINE_LAYERS)) {
idx = id - KOMEDA_COMPONENT_LAYER0;
num = &pipe->n_layers;
if (idx != pipe->n_layers) {
DRM_ERROR("please add Layer by id sequence.\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
} else if (has_bit(id, KOMEDA_PIPELINE_SCALERS)) {
idx = id - KOMEDA_COMPONENT_SCALER0;
num = &pipe->n_scalers;
if (idx != pipe->n_scalers) {
DRM_ERROR("please add Scaler by id sequence.\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
}
c = devm_kzalloc(pipe->mdev->dev, comp_sz, GFP_KERNEL);
if (!c)
- return NULL;
+ return ERR_PTR(-ENOMEM);
c->id = id;
c->hw_id = hw_id;
@@ -200,3 +200,98 @@ void komeda_component_destroy(struct komeda_dev *mdev,
{
devm_kfree(mdev->dev, c);
}
+
+static void komeda_component_dump(struct komeda_component *c)
+{
+ if (!c)
+ return;
+
+ DRM_DEBUG(" %s: ID %d-0x%08lx.\n",
+ c->name, c->id, BIT(c->id));
+ DRM_DEBUG(" max_active_inputs:%d, supported_inputs: 0x%08x.\n",
+ c->max_active_inputs, c->supported_inputs);
+ DRM_DEBUG(" max_active_outputs:%d, supported_outputs: 0x%08x.\n",
+ c->max_active_outputs, c->supported_outputs);
+}
+
+static void komeda_pipeline_dump(struct komeda_pipeline *pipe)
+{
+ struct komeda_component *c;
+ int id;
+
+ DRM_INFO("Pipeline-%d: n_layers: %d, n_scalers: %d, output: %s\n",
+ pipe->id, pipe->n_layers, pipe->n_scalers,
+ pipe->of_output_dev ? pipe->of_output_dev->full_name : "none");
+
+ dp_for_each_set_bit(id, pipe->avail_comps) {
+ c = komeda_pipeline_get_component(pipe, id);
+
+ komeda_component_dump(c);
+ }
+}
+
+static void komeda_component_verify_inputs(struct komeda_component *c)
+{
+ struct komeda_pipeline *pipe = c->pipeline;
+ struct komeda_component *input;
+ int id;
+
+ dp_for_each_set_bit(id, c->supported_inputs) {
+ input = komeda_pipeline_get_component(pipe, id);
+ if (!input) {
+ c->supported_inputs &= ~(BIT(id));
+ DRM_WARN("Can not find input(ID-%d) for component: %s.\n",
+ id, c->name);
+ continue;
+ }
+
+ input->supported_outputs |= BIT(c->id);
+ }
+}
+
+static void komeda_pipeline_assemble(struct komeda_pipeline *pipe)
+{
+ struct komeda_component *c;
+ int id;
+
+ dp_for_each_set_bit(id, pipe->avail_comps) {
+ c = komeda_pipeline_get_component(pipe, id);
+
+ komeda_component_verify_inputs(c);
+ }
+}
+
+int komeda_assemble_pipelines(struct komeda_dev *mdev)
+{
+ struct komeda_pipeline *pipe;
+ int i;
+
+ for (i = 0; i < mdev->n_pipelines; i++) {
+ pipe = mdev->pipelines[i];
+
+ komeda_pipeline_assemble(pipe);
+ komeda_pipeline_dump(pipe);
+ }
+
+ return 0;
+}
+
+void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
+ struct seq_file *sf)
+{
+ struct komeda_component *c;
+ u32 id;
+
+ seq_printf(sf, "\n======== Pipeline-%d ==========\n", pipe->id);
+
+ if (pipe->funcs && pipe->funcs->dump_register)
+ pipe->funcs->dump_register(pipe, sf);
+
+ dp_for_each_set_bit(id, pipe->avail_comps) {
+ c = komeda_pipeline_get_component(pipe, id);
+
+ seq_printf(sf, "\n------%s------\n", c->name);
+ if (c->funcs->dump_register)
+ c->funcs->dump_register(c, sf);
+ }
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
index 8c950bc8ae96..b1f813a349a4 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
@@ -90,32 +90,35 @@ struct komeda_component {
u32 __iomem *reg;
/** @id: component id */
u32 id;
- /** @hw_ic: component hw id,
- * which is initialized by chip and used by chip only
+ /**
+ * @hw_id: component hw id,
+ * which is initialized by chip and used by chip only
*/
u32 hw_id;
/**
* @max_active_inputs:
- * @max_active_outpus:
+ * @max_active_outputs:
*
- * maximum number of inputs/outputs that can be active in the same time
+ * maximum number of inputs/outputs that can be active at the same time
* Note:
* the number isn't the bit number of @supported_inputs or
* @supported_outputs, but may be less than it, since component may not
* support enabling all @supported_inputs/outputs at the same time.
*/
u8 max_active_inputs;
+ /** @max_active_outputs: maximum number of outputs */
u8 max_active_outputs;
/**
* @supported_inputs:
* @supported_outputs:
*
- * bitmask of BIT(component->id) for the supported inputs/outputs
+ * bitmask of BIT(component->id) for the supported inputs/outputs,
* describes the possibilities of how a component is linked into a
* pipeline.
*/
u32 supported_inputs;
+ /** @supported_outputs: bitmask of supported output componenet ids */
u32 supported_outputs;
/**
@@ -134,7 +137,8 @@ struct komeda_component {
struct komeda_component_output {
/** @component: indicate which component the data comes from */
struct komeda_component *component;
- /** @output_port:
+ /**
+ * @output_port:
* the output port of the &komeda_component_output.component
*/
u8 output_port;
@@ -150,11 +154,12 @@ struct komeda_component_output {
struct komeda_component_state {
/** @obj: tracking component_state by drm_atomic_state */
struct drm_private_state obj;
+ /** @component: backpointer to the component */
struct komeda_component *component;
/**
* @binding_user:
- * currently bound user, the user can be crtc/plane/wb_conn, which is
- * valid decided by @component and @inputs
+ * currently bound user, the user can be @crtc, @plane or @wb_conn,
+ * which is valid decided by @component and @inputs
*
* - Layer: its user always is plane.
* - compiz/improc/timing_ctrlr: the user is crtc.
@@ -162,20 +167,24 @@ struct komeda_component_state {
* - scaler: plane when input is layer, wb_conn if input is compiz.
*/
union {
+ /** @crtc: backpointer for user crtc */
struct drm_crtc *crtc;
+ /** @plane: backpointer for user plane */
struct drm_plane *plane;
+ /** @wb_conn: backpointer for user wb_connector */
struct drm_connector *wb_conn;
void *binding_user;
};
+
/**
* @active_inputs:
*
* active_inputs is bitmask of @inputs index
*
- * - active_inputs = changed_active_inputs + unchanged_active_inputs
- * - affected_inputs = old->active_inputs + new->active_inputs;
+ * - active_inputs = changed_active_inputs | unchanged_active_inputs
+ * - affected_inputs = old->active_inputs | new->active_inputs;
* - disabling_inputs = affected_inputs ^ active_inputs;
- * - changed_inputs = disabling_inputs + changed_active_inputs;
+ * - changed_inputs = disabling_inputs | changed_active_inputs;
*
* NOTE:
* changed_inputs doesn't include all active_input but only
@@ -183,7 +192,9 @@ struct komeda_component_state {
* level for dirty update.
*/
u16 active_inputs;
+ /** @changed_active_inputs: bitmask of the changed @active_inputs */
u16 changed_active_inputs;
+ /** @affected_inputs: bitmask for affected @inputs */
u16 affected_inputs;
/**
* @inputs:
@@ -204,57 +215,96 @@ static inline u16 component_changed_inputs(struct komeda_component_state *st)
return component_disabling_inputs(st) | st->changed_active_inputs;
}
+#define for_each_changed_input(st, i) \
+ for ((i) = 0; (i) < (st)->component->max_active_inputs; (i)++) \
+ if (has_bit((i), component_changed_inputs(st)))
+
#define to_comp(__c) (((__c) == NULL) ? NULL : &((__c)->base))
#define to_cpos(__c) ((struct komeda_component **)&(__c))
-/* these structures are going to be filled in in uture patches */
struct komeda_layer {
struct komeda_component base;
- /* layer specific features and caps */
- int layer_type; /* RICH, SIMPLE or WB */
+ /* accepted h/v input range before rotation */
+ struct malidp_range hsize_in, vsize_in;
+ u32 layer_type; /* RICH, SIMPLE or WB */
+ u32 supported_rots;
};
struct komeda_layer_state {
struct komeda_component_state base;
/* layer specific configuration state */
+ u16 hsize, vsize;
+ u32 rot;
+ dma_addr_t addr[3];
};
-struct komeda_compiz {
+struct komeda_scaler {
struct komeda_component base;
- /* compiz specific features and caps */
+ /* scaler features and caps */
};
-struct komeda_compiz_state {
+struct komeda_scaler_state {
struct komeda_component_state base;
- /* compiz specific configuration state */
};
-struct komeda_scaler {
+struct komeda_compiz {
struct komeda_component base;
- /* scaler features and caps */
+ struct malidp_range hsize, vsize;
};
-struct komeda_scaler_state {
+struct komeda_compiz_input_cfg {
+ u16 hsize, vsize;
+ u16 hoffset, voffset;
+ u8 pixel_blend_mode, layer_alpha;
+};
+
+struct komeda_compiz_state {
struct komeda_component_state base;
+ /* composition size */
+ u16 hsize, vsize;
+ struct komeda_compiz_input_cfg cins[KOMEDA_COMPONENT_N_INPUTS];
};
struct komeda_improc {
struct komeda_component base;
+ u32 supported_color_formats; /* DRM_RGB/YUV444/YUV420*/
+ u32 supported_color_depths; /* BIT(8) | BIT(10)*/
+ u8 supports_degamma : 1;
+ u8 supports_csc : 1;
+ u8 supports_gamma : 1;
};
struct komeda_improc_state {
struct komeda_component_state base;
+ u16 hsize, vsize;
};
/* display timing controller */
struct komeda_timing_ctrlr {
struct komeda_component base;
+ u8 supports_dual_link : 1;
};
struct komeda_timing_ctrlr_state {
struct komeda_component_state base;
};
+/* Why define A separated structure but not use plane_state directly ?
+ * 1. Komeda supports layer_split which means a plane_state can be split and
+ * handled by two layers, one layer only handle half of plane image.
+ * 2. Fix up the user properties according to HW's capabilities, like user
+ * set rotation to R180, but HW only supports REFLECT_X+Y. the rot here is
+ * after drm_rotation_simplify()
+ */
+struct komeda_data_flow_cfg {
+ struct komeda_component_output input;
+ u16 in_x, in_y, in_w, in_h;
+ u32 out_x, out_y, out_w, out_h;
+ u32 rot;
+ int blending_zorder;
+ u8 pixel_blend_mode, layer_alpha;
+};
+
/** struct komeda_pipeline_funcs */
struct komeda_pipeline_funcs {
/* dump_register: Optional, dump registers to seq_file */
@@ -280,14 +330,23 @@ struct komeda_pipeline {
int id;
/** @avail_comps: available components mask of pipeline */
u32 avail_comps;
+ /** @n_layers: the number of layer on @layers */
int n_layers;
+ /** @layers: the pipeline layers */
struct komeda_layer *layers[KOMEDA_PIPELINE_MAX_LAYERS];
+ /** @n_scalers: the number of scaler on @scalers */
int n_scalers;
+ /** @scalers: the pipeline scalers */
struct komeda_scaler *scalers[KOMEDA_PIPELINE_MAX_SCALERS];
+ /** @compiz: compositor */
struct komeda_compiz *compiz;
+ /** @wb_layer: writeback layer */
struct komeda_layer *wb_layer;
+ /** @improc: post image processor */
struct komeda_improc *improc;
+ /** @ctrlr: timing controller */
struct komeda_timing_ctrlr *ctrlr;
+ /** @funcs: chip pipeline functions */
struct komeda_pipeline_funcs *funcs; /* private pipeline functions */
/** @of_node: pipeline dt node */
@@ -308,6 +367,7 @@ struct komeda_pipeline {
struct komeda_pipeline_state {
/** @obj: tracking pipeline_state by drm_atomic_state */
struct drm_private_state obj;
+ /** @pipe: backpointer to the pipeline */
struct komeda_pipeline *pipe;
/** @crtc: currently bound crtc */
struct drm_crtc *crtc;
@@ -340,10 +400,13 @@ komeda_pipeline_add(struct komeda_dev *mdev, size_t size,
struct komeda_pipeline_funcs *funcs);
void komeda_pipeline_destroy(struct komeda_dev *mdev,
struct komeda_pipeline *pipe);
-
+int komeda_assemble_pipelines(struct komeda_dev *mdev);
struct komeda_component *
komeda_pipeline_get_component(struct komeda_pipeline *pipe, int id);
+void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
+ struct seq_file *sf);
+
/* component APIs */
struct komeda_component *
komeda_component_add(struct komeda_pipeline *pipe,
@@ -356,4 +419,26 @@ komeda_component_add(struct komeda_pipeline *pipe,
void komeda_component_destroy(struct komeda_dev *mdev,
struct komeda_component *c);
+struct komeda_plane_state;
+struct komeda_crtc_state;
+struct komeda_crtc;
+
+int komeda_build_layer_data_flow(struct komeda_layer *layer,
+ struct komeda_plane_state *kplane_st,
+ struct komeda_crtc_state *kcrtc_st,
+ struct komeda_data_flow_cfg *dflow);
+int komeda_build_display_data_flow(struct komeda_crtc *kcrtc,
+ struct komeda_crtc_state *kcrtc_st);
+
+int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
+ struct komeda_crtc_state *kcrtc_st);
+
+struct komeda_pipeline_state *
+komeda_pipeline_get_old_state(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *state);
+void komeda_pipeline_disable(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *old_state);
+void komeda_pipeline_update(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *old_state);
+
#endif /* _KOMEDA_PIPELINE_H_*/
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
new file mode 100644
index 000000000000..36570d7dad61
--- /dev/null
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -0,0 +1,610 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+
+#include <drm/drm_print.h>
+#include <linux/clk.h>
+#include "komeda_dev.h"
+#include "komeda_kms.h"
+#include "komeda_pipeline.h"
+#include "komeda_framebuffer.h"
+
+static inline bool is_switching_user(void *old, void *new)
+{
+ if (!old || !new)
+ return false;
+
+ return old != new;
+}
+
+static struct komeda_pipeline_state *
+komeda_pipeline_get_state(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *state)
+{
+ struct drm_private_state *priv_st;
+
+ priv_st = drm_atomic_get_private_obj_state(state, &pipe->obj);
+ if (IS_ERR(priv_st))
+ return ERR_CAST(priv_st);
+
+ return priv_to_pipe_st(priv_st);
+}
+
+struct komeda_pipeline_state *
+komeda_pipeline_get_old_state(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *state)
+{
+ struct drm_private_state *priv_st;
+
+ priv_st = drm_atomic_get_old_private_obj_state(state, &pipe->obj);
+ if (priv_st)
+ return priv_to_pipe_st(priv_st);
+ return NULL;
+}
+
+static struct komeda_pipeline_state *
+komeda_pipeline_get_new_state(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *state)
+{
+ struct drm_private_state *priv_st;
+
+ priv_st = drm_atomic_get_new_private_obj_state(state, &pipe->obj);
+ if (priv_st)
+ return priv_to_pipe_st(priv_st);
+ return NULL;
+}
+
+/* Assign pipeline for crtc */
+static struct komeda_pipeline_state *
+komeda_pipeline_get_state_and_set_crtc(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ struct komeda_pipeline_state *st;
+
+ st = komeda_pipeline_get_state(pipe, state);
+ if (IS_ERR(st))
+ return st;
+
+ if (is_switching_user(crtc, st->crtc)) {
+ DRM_DEBUG_ATOMIC("CRTC%d required pipeline%d is busy.\n",
+ drm_crtc_index(crtc), pipe->id);
+ return ERR_PTR(-EBUSY);
+ }
+
+ /* pipeline only can be disabled when the it is free or unused */
+ if (!crtc && st->active_comps) {
+ DRM_DEBUG_ATOMIC("Disabling a busy pipeline:%d.\n", pipe->id);
+ return ERR_PTR(-EBUSY);
+ }
+
+ st->crtc = crtc;
+
+ if (crtc) {
+ struct komeda_crtc_state *kcrtc_st;
+
+ kcrtc_st = to_kcrtc_st(drm_atomic_get_new_crtc_state(state,
+ crtc));
+
+ kcrtc_st->active_pipes |= BIT(pipe->id);
+ kcrtc_st->affected_pipes |= BIT(pipe->id);
+ }
+ return st;
+}
+
+static struct komeda_component_state *
+komeda_component_get_state(struct komeda_component *c,
+ struct drm_atomic_state *state)
+{
+ struct drm_private_state *priv_st;
+
+ WARN_ON(!drm_modeset_is_locked(&c->pipeline->obj.lock));
+
+ priv_st = drm_atomic_get_private_obj_state(state, &c->obj);
+ if (IS_ERR(priv_st))
+ return ERR_CAST(priv_st);
+
+ return priv_to_comp_st(priv_st);
+}
+
+static struct komeda_component_state *
+komeda_component_get_old_state(struct komeda_component *c,
+ struct drm_atomic_state *state)
+{
+ struct drm_private_state *priv_st;
+
+ priv_st = drm_atomic_get_old_private_obj_state(state, &c->obj);
+ if (priv_st)
+ return priv_to_comp_st(priv_st);
+ return NULL;
+}
+
+/**
+ * komeda_component_get_state_and_set_user()
+ *
+ * @c: component to get state and set user
+ * @state: global atomic state
+ * @user: direct user, the binding user
+ * @crtc: the CRTC user, the big boss :)
+ *
+ * This function accepts two users:
+ * - The direct user: can be plane/crtc/wb_connector depends on component
+ * - The big boss (CRTC)
+ * CRTC is the big boss (the final user), because all component resources
+ * eventually will be assigned to CRTC, like the layer will be binding to
+ * kms_plane, but kms plane will be binding to a CRTC eventually.
+ *
+ * The big boss (CRTC) is for pipeline assignment, since &komeda_component isn't
+ * independent and can be assigned to CRTC freely, but belongs to a specific
+ * pipeline, only pipeline can be shared between crtc, and pipeline as a whole
+ * (include all the internal components) assigned to a specific CRTC.
+ *
+ * So when set a user to komeda_component, need first to check the status of
+ * component->pipeline to see if the pipeline is available on this specific
+ * CRTC. if the pipeline is busy (assigned to another CRTC), even the required
+ * component is free, the component still cannot be assigned to the direct user.
+ */
+static struct komeda_component_state *
+komeda_component_get_state_and_set_user(struct komeda_component *c,
+ struct drm_atomic_state *state,
+ void *user,
+ struct drm_crtc *crtc)
+{
+ struct komeda_pipeline_state *pipe_st;
+ struct komeda_component_state *st;
+
+ /* First check if the pipeline is available */
+ pipe_st = komeda_pipeline_get_state_and_set_crtc(c->pipeline,
+ state, crtc);
+ if (IS_ERR(pipe_st))
+ return ERR_CAST(pipe_st);
+
+ st = komeda_component_get_state(c, state);
+ if (IS_ERR(st))
+ return st;
+
+ /* check if the component has been occupied */
+ if (is_switching_user(user, st->binding_user)) {
+ DRM_DEBUG_ATOMIC("required %s is busy.\n", c->name);
+ return ERR_PTR(-EBUSY);
+ }
+
+ st->binding_user = user;
+ /* mark the component as active if user is valid */
+ if (st->binding_user)
+ pipe_st->active_comps |= BIT(c->id);
+
+ return st;
+}
+
+static void
+komeda_component_add_input(struct komeda_component_state *state,
+ struct komeda_component_output *input,
+ int idx)
+{
+ struct komeda_component *c = state->component;
+
+ WARN_ON((idx < 0 || idx >= c->max_active_inputs));
+
+ /* since the inputs[i] is only valid when it is active. So if a input[i]
+ * is a newly enabled input which switches from disable to enable, then
+ * the old inputs[i] is undefined (NOT zeroed), we can not rely on
+ * memcmp, but directly mark it changed
+ */
+ if (!has_bit(idx, state->affected_inputs) ||
+ memcmp(&state->inputs[idx], input, sizeof(*input))) {
+ memcpy(&state->inputs[idx], input, sizeof(*input));
+ state->changed_active_inputs |= BIT(idx);
+ }
+ state->active_inputs |= BIT(idx);
+ state->affected_inputs |= BIT(idx);
+}
+
+static int
+komeda_component_check_input(struct komeda_component_state *state,
+ struct komeda_component_output *input,
+ int idx)
+{
+ struct komeda_component *c = state->component;
+
+ if ((idx < 0) || (idx >= c->max_active_inputs)) {
+ DRM_DEBUG_ATOMIC("%s invalid input id: %d.\n", c->name, idx);
+ return -EINVAL;
+ }
+
+ if (has_bit(idx, state->active_inputs)) {
+ DRM_DEBUG_ATOMIC("%s required input_id: %d has been occupied already.\n",
+ c->name, idx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+komeda_component_set_output(struct komeda_component_output *output,
+ struct komeda_component *comp,
+ u8 output_port)
+{
+ output->component = comp;
+ output->output_port = output_port;
+}
+
+static int
+komeda_component_validate_private(struct komeda_component *c,
+ struct komeda_component_state *st)
+{
+ int err;
+
+ if (!c->funcs->validate)
+ return 0;
+
+ err = c->funcs->validate(c, st);
+ if (err)
+ DRM_DEBUG_ATOMIC("%s validate private failed.\n", c->name);
+
+ return err;
+}
+
+static int
+komeda_layer_check_cfg(struct komeda_layer *layer,
+ struct komeda_plane_state *kplane_st,
+ struct komeda_data_flow_cfg *dflow)
+{
+ if (!in_range(&layer->hsize_in, dflow->in_w)) {
+ DRM_DEBUG_ATOMIC("src_w: %d is out of range.\n", dflow->in_w);
+ return -EINVAL;
+ }
+
+ if (!in_range(&layer->vsize_in, dflow->in_h)) {
+ DRM_DEBUG_ATOMIC("src_h: %d is out of range.\n", dflow->in_h);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+komeda_layer_validate(struct komeda_layer *layer,
+ struct komeda_plane_state *kplane_st,
+ struct komeda_data_flow_cfg *dflow)
+{
+ struct drm_plane_state *plane_st = &kplane_st->base;
+ struct drm_framebuffer *fb = plane_st->fb;
+ struct komeda_fb *kfb = to_kfb(fb);
+ struct komeda_component_state *c_st;
+ struct komeda_layer_state *st;
+ int i, err;
+
+ err = komeda_layer_check_cfg(layer, kplane_st, dflow);
+ if (err)
+ return err;
+
+ c_st = komeda_component_get_state_and_set_user(&layer->base,
+ plane_st->state, plane_st->plane, plane_st->crtc);
+ if (IS_ERR(c_st))
+ return PTR_ERR(c_st);
+
+ st = to_layer_st(c_st);
+
+ st->rot = dflow->rot;
+ st->hsize = kfb->aligned_w;
+ st->vsize = kfb->aligned_h;
+
+ for (i = 0; i < fb->format->num_planes; i++)
+ st->addr[i] = komeda_fb_get_pixel_addr(kfb, dflow->in_x,
+ dflow->in_y, i);
+
+ err = komeda_component_validate_private(&layer->base, c_st);
+ if (err)
+ return err;
+
+ /* update the data flow for the next stage */
+ komeda_component_set_output(&dflow->input, &layer->base, 0);
+
+ return 0;
+}
+
+static void pipeline_composition_size(struct komeda_crtc_state *kcrtc_st,
+ u16 *hsize, u16 *vsize)
+{
+ struct drm_display_mode *m = &kcrtc_st->base.adjusted_mode;
+
+ if (hsize)
+ *hsize = m->hdisplay;
+ if (vsize)
+ *vsize = m->vdisplay;
+}
+
+static int
+komeda_compiz_set_input(struct komeda_compiz *compiz,
+ struct komeda_crtc_state *kcrtc_st,
+ struct komeda_data_flow_cfg *dflow)
+{
+ struct drm_atomic_state *drm_st = kcrtc_st->base.state;
+ struct komeda_component_state *c_st, *old_st;
+ struct komeda_compiz_input_cfg *cin;
+ u16 compiz_w, compiz_h;
+ int idx = dflow->blending_zorder;
+
+ pipeline_composition_size(kcrtc_st, &compiz_w, &compiz_h);
+ /* check display rect */
+ if ((dflow->out_x + dflow->out_w > compiz_w) ||
+ (dflow->out_y + dflow->out_h > compiz_h) ||
+ dflow->out_w == 0 || dflow->out_h == 0) {
+ DRM_DEBUG_ATOMIC("invalid disp rect [x=%d, y=%d, w=%d, h=%d]\n",
+ dflow->out_x, dflow->out_y,
+ dflow->out_w, dflow->out_h);
+ return -EINVAL;
+ }
+
+ c_st = komeda_component_get_state_and_set_user(&compiz->base, drm_st,
+ kcrtc_st->base.crtc, kcrtc_st->base.crtc);
+ if (IS_ERR(c_st))
+ return PTR_ERR(c_st);
+
+ if (komeda_component_check_input(c_st, &dflow->input, idx))
+ return -EINVAL;
+
+ cin = &(to_compiz_st(c_st)->cins[idx]);
+
+ cin->hsize = dflow->out_w;
+ cin->vsize = dflow->out_h;
+ cin->hoffset = dflow->out_x;
+ cin->voffset = dflow->out_y;
+ cin->pixel_blend_mode = dflow->pixel_blend_mode;
+ cin->layer_alpha = dflow->layer_alpha;
+
+ old_st = komeda_component_get_old_state(&compiz->base, drm_st);
+ WARN_ON(!old_st);
+
+ /* compare with old to check if this input has been changed */
+ if (memcmp(&(to_compiz_st(old_st)->cins[idx]), cin, sizeof(*cin)))
+ c_st->changed_active_inputs |= BIT(idx);
+
+ komeda_component_add_input(c_st, &dflow->input, idx);
+
+ return 0;
+}
+
+static int
+komeda_compiz_validate(struct komeda_compiz *compiz,
+ struct komeda_crtc_state *state,
+ struct komeda_data_flow_cfg *dflow)
+{
+ struct komeda_component_state *c_st;
+ struct komeda_compiz_state *st;
+
+ c_st = komeda_component_get_state_and_set_user(&compiz->base,
+ state->base.state, state->base.crtc, state->base.crtc);
+ if (IS_ERR(c_st))
+ return PTR_ERR(c_st);
+
+ st = to_compiz_st(c_st);
+
+ pipeline_composition_size(state, &st->hsize, &st->vsize);
+
+ komeda_component_set_output(&dflow->input, &compiz->base, 0);
+
+ /* compiz output dflow will be fed to the next pipeline stage, prepare
+ * the data flow configuration for the next stage
+ */
+ if (dflow) {
+ dflow->in_w = st->hsize;
+ dflow->in_h = st->vsize;
+ dflow->out_w = dflow->in_w;
+ dflow->out_h = dflow->in_h;
+ /* the output data of compiz doesn't have alpha, it only can be
+ * used as bottom layer when blend it with master layers
+ */
+ dflow->pixel_blend_mode = DRM_MODE_BLEND_PIXEL_NONE;
+ dflow->layer_alpha = 0xFF;
+ dflow->blending_zorder = 0;
+ }
+
+ return 0;
+}
+
+static int
+komeda_improc_validate(struct komeda_improc *improc,
+ struct komeda_crtc_state *kcrtc_st,
+ struct komeda_data_flow_cfg *dflow)
+{
+ struct drm_crtc *crtc = kcrtc_st->base.crtc;
+ struct komeda_component_state *c_st;
+ struct komeda_improc_state *st;
+
+ c_st = komeda_component_get_state_and_set_user(&improc->base,
+ kcrtc_st->base.state, crtc, crtc);
+ if (IS_ERR(c_st))
+ return PTR_ERR(c_st);
+
+ st = to_improc_st(c_st);
+
+ st->hsize = dflow->in_w;
+ st->vsize = dflow->in_h;
+
+ komeda_component_add_input(&st->base, &dflow->input, 0);
+ komeda_component_set_output(&dflow->input, &improc->base, 0);
+
+ return 0;
+}
+
+static int
+komeda_timing_ctrlr_validate(struct komeda_timing_ctrlr *ctrlr,
+ struct komeda_crtc_state *kcrtc_st,
+ struct komeda_data_flow_cfg *dflow)
+{
+ struct drm_crtc *crtc = kcrtc_st->base.crtc;
+ struct komeda_timing_ctrlr_state *st;
+ struct komeda_component_state *c_st;
+
+ c_st = komeda_component_get_state_and_set_user(&ctrlr->base,
+ kcrtc_st->base.state, crtc, crtc);
+ if (IS_ERR(c_st))
+ return PTR_ERR(c_st);
+
+ st = to_ctrlr_st(c_st);
+
+ komeda_component_add_input(&st->base, &dflow->input, 0);
+ komeda_component_set_output(&dflow->input, &ctrlr->base, 0);
+
+ return 0;
+}
+
+int komeda_build_layer_data_flow(struct komeda_layer *layer,
+ struct komeda_plane_state *kplane_st,
+ struct komeda_crtc_state *kcrtc_st,
+ struct komeda_data_flow_cfg *dflow)
+{
+ struct drm_plane *plane = kplane_st->base.plane;
+ struct komeda_pipeline *pipe = layer->base.pipeline;
+ int err;
+
+ DRM_DEBUG_ATOMIC("%s handling [PLANE:%d:%s]: src[x/y:%d/%d, w/h:%d/%d] disp[x/y:%d/%d, w/h:%d/%d]",
+ layer->base.name, plane->base.id, plane->name,
+ dflow->in_x, dflow->in_y, dflow->in_w, dflow->in_h,
+ dflow->out_x, dflow->out_y, dflow->out_w, dflow->out_h);
+
+ err = komeda_layer_validate(layer, kplane_st, dflow);
+ if (err)
+ return err;
+
+ err = komeda_compiz_set_input(pipe->compiz, kcrtc_st, dflow);
+
+ return err;
+}
+
+/* build display output data flow, the data path is:
+ * compiz -> improc -> timing_ctrlr
+ */
+int komeda_build_display_data_flow(struct komeda_crtc *kcrtc,
+ struct komeda_crtc_state *kcrtc_st)
+{
+ struct komeda_pipeline *master = kcrtc->master;
+ struct komeda_data_flow_cfg m_dflow; /* master data flow */
+ int err;
+
+ memset(&m_dflow, 0, sizeof(m_dflow));
+
+ err = komeda_compiz_validate(master->compiz, kcrtc_st, &m_dflow);
+ if (err)
+ return err;
+
+ err = komeda_improc_validate(master->improc, kcrtc_st, &m_dflow);
+ if (err)
+ return err;
+
+ err = komeda_timing_ctrlr_validate(master->ctrlr, kcrtc_st, &m_dflow);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void
+komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
+ struct komeda_pipeline_state *new)
+{
+ struct drm_atomic_state *drm_st = new->obj.state;
+ struct komeda_pipeline_state *old = priv_to_pipe_st(pipe->obj.state);
+ struct komeda_component_state *c_st;
+ struct komeda_component *c;
+ u32 disabling_comps, id;
+
+ WARN_ON(!old);
+
+ disabling_comps = (~new->active_comps) & old->active_comps;
+
+ /* unbound all disabling component */
+ dp_for_each_set_bit(id, disabling_comps) {
+ c = komeda_pipeline_get_component(pipe, id);
+ c_st = komeda_component_get_state_and_set_user(c,
+ drm_st, NULL, new->crtc);
+ WARN_ON(IS_ERR(c_st));
+ }
+}
+
+/* release unclaimed pipeline resource */
+int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
+ struct komeda_crtc_state *kcrtc_st)
+{
+ struct drm_atomic_state *drm_st = kcrtc_st->base.state;
+ struct komeda_pipeline_state *st;
+
+ /* ignore the pipeline which is not affected */
+ if (!pipe || !has_bit(pipe->id, kcrtc_st->affected_pipes))
+ return 0;
+
+ if (has_bit(pipe->id, kcrtc_st->active_pipes))
+ st = komeda_pipeline_get_new_state(pipe, drm_st);
+ else
+ st = komeda_pipeline_get_state_and_set_crtc(pipe, drm_st, NULL);
+
+ if (WARN_ON(IS_ERR_OR_NULL(st)))
+ return -EINVAL;
+
+ komeda_pipeline_unbound_components(pipe, st);
+
+ return 0;
+}
+
+void komeda_pipeline_disable(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *old_state)
+{
+ struct komeda_pipeline_state *old;
+ struct komeda_component *c;
+ struct komeda_component_state *c_st;
+ u32 id, disabling_comps = 0;
+
+ old = komeda_pipeline_get_old_state(pipe, old_state);
+
+ disabling_comps = old->active_comps;
+ DRM_DEBUG_ATOMIC("PIPE%d: disabling_comps: 0x%x.\n",
+ pipe->id, disabling_comps);
+
+ dp_for_each_set_bit(id, disabling_comps) {
+ c = komeda_pipeline_get_component(pipe, id);
+ c_st = priv_to_comp_st(c->obj.state);
+
+ /*
+ * If we disabled a component then all active_inputs should be
+ * put in the list of changed_active_inputs, so they get
+ * re-enabled.
+ * This usually happens during a modeset when the pipeline is
+ * first disabled and then the actual state gets committed
+ * again.
+ */
+ c_st->changed_active_inputs |= c_st->active_inputs;
+
+ c->funcs->disable(c);
+ }
+}
+
+void komeda_pipeline_update(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *old_state)
+{
+ struct komeda_pipeline_state *new = priv_to_pipe_st(pipe->obj.state);
+ struct komeda_pipeline_state *old;
+ struct komeda_component *c;
+ u32 id, changed_comps = 0;
+
+ old = komeda_pipeline_get_old_state(pipe, old_state);
+
+ changed_comps = new->active_comps | old->active_comps;
+
+ DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%x.\n",
+ pipe->id, new->active_comps, changed_comps);
+
+ dp_for_each_set_bit(id, changed_comps) {
+ c = komeda_pipeline_get_component(pipe, id);
+
+ if (new->active_comps & BIT(c->id))
+ c->funcs->update(c, priv_to_comp_st(c->obj.state));
+ else
+ c->funcs->disable(c);
+ }
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
index 0a4953a9a909..07ed0cc1bc44 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
@@ -7,10 +7,96 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
#include "komeda_dev.h"
#include "komeda_kms.h"
+static int
+komeda_plane_init_data_flow(struct drm_plane_state *st,
+ struct komeda_data_flow_cfg *dflow)
+{
+ struct drm_framebuffer *fb = st->fb;
+
+ memset(dflow, 0, sizeof(*dflow));
+
+ dflow->blending_zorder = st->zpos;
+
+ /* if format doesn't have alpha, fix blend mode to PIXEL_NONE */
+ dflow->pixel_blend_mode = fb->format->has_alpha ?
+ st->pixel_blend_mode : DRM_MODE_BLEND_PIXEL_NONE;
+ dflow->layer_alpha = st->alpha >> 8;
+
+ dflow->out_x = st->crtc_x;
+ dflow->out_y = st->crtc_y;
+ dflow->out_w = st->crtc_w;
+ dflow->out_h = st->crtc_h;
+
+ dflow->in_x = st->src_x >> 16;
+ dflow->in_y = st->src_y >> 16;
+ dflow->in_w = st->src_w >> 16;
+ dflow->in_h = st->src_h >> 16;
+
+ return 0;
+}
+
+/**
+ * komeda_plane_atomic_check - build input data flow
+ * @plane: DRM plane
+ * @state: the plane state object
+ *
+ * RETURNS:
+ * Zero for success or -errno
+ */
+static int
+komeda_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct komeda_plane *kplane = to_kplane(plane);
+ struct komeda_plane_state *kplane_st = to_kplane_st(state);
+ struct komeda_layer *layer = kplane->layer;
+ struct drm_crtc_state *crtc_st;
+ struct komeda_crtc *kcrtc;
+ struct komeda_crtc_state *kcrtc_st;
+ struct komeda_data_flow_cfg dflow;
+ int err;
+
+ if (!state->crtc || !state->fb)
+ return 0;
+
+ crtc_st = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (!crtc_st->enable) {
+ DRM_DEBUG_ATOMIC("Cannot update plane on a disabled CRTC.\n");
+ return -EINVAL;
+ }
+
+ /* crtc is inactive, skip the resource assignment */
+ if (!crtc_st->active)
+ return 0;
+
+ kcrtc = to_kcrtc(state->crtc);
+ kcrtc_st = to_kcrtc_st(crtc_st);
+
+ err = komeda_plane_init_data_flow(state, &dflow);
+ if (err)
+ return err;
+
+ err = komeda_build_layer_data_flow(layer, kplane_st, kcrtc_st, &dflow);
+
+ return err;
+}
+
+/* plane doesn't represent a real HW, so there is no HW update for plane.
+ * komeda handles all the HW update in crtc->atomic_flush
+ */
+static void
+komeda_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+}
+
static const struct drm_plane_helper_funcs komeda_plane_helper_funcs = {
+ .atomic_check = komeda_plane_atomic_check,
+ .atomic_update = komeda_plane_atomic_update,
};
static void komeda_plane_destroy(struct drm_plane *plane)
@@ -20,7 +106,60 @@ static void komeda_plane_destroy(struct drm_plane *plane)
kfree(to_kplane(plane));
}
+static void komeda_plane_reset(struct drm_plane *plane)
+{
+ struct komeda_plane_state *state;
+ struct komeda_plane *kplane = to_kplane(plane);
+
+ if (plane->state)
+ __drm_atomic_helper_plane_destroy_state(plane->state);
+
+ kfree(plane->state);
+ plane->state = NULL;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ state->base.rotation = DRM_MODE_ROTATE_0;
+ state->base.pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
+ state->base.alpha = DRM_BLEND_ALPHA_OPAQUE;
+ state->base.zpos = kplane->layer->base.id;
+ plane->state = &state->base;
+ plane->state->plane = plane;
+ }
+}
+
+static struct drm_plane_state *
+komeda_plane_atomic_duplicate_state(struct drm_plane *plane)
+{
+ struct komeda_plane_state *new;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &new->base);
+
+ return &new->base;
+}
+
+static void
+komeda_plane_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ __drm_atomic_helper_plane_destroy_state(state);
+ kfree(to_kplane_st(state));
+}
+
static const struct drm_plane_funcs komeda_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = komeda_plane_destroy,
+ .reset = komeda_plane_reset,
+ .atomic_duplicate_state = komeda_plane_atomic_duplicate_state,
+ .atomic_destroy_state = komeda_plane_atomic_destroy_state,
};
/* for komeda, which is pipeline can be share between crtcs */
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c b/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c
index f1c9e3fefa86..a54878cbd6e4 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c
@@ -7,6 +7,188 @@
#include "komeda_dev.h"
#include "komeda_kms.h"
+static void
+komeda_component_state_reset(struct komeda_component_state *st)
+{
+ st->binding_user = NULL;
+ st->affected_inputs = st->active_inputs;
+ st->active_inputs = 0;
+ st->changed_active_inputs = 0;
+}
+
+static struct drm_private_state *
+komeda_layer_atomic_duplicate_state(struct drm_private_obj *obj)
+{
+ struct komeda_layer_state *st;
+
+ st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return NULL;
+
+ komeda_component_state_reset(&st->base);
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj);
+
+ return &st->base.obj;
+}
+
+static void
+komeda_layer_atomic_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct komeda_layer_state *st = to_layer_st(priv_to_comp_st(state));
+
+ kfree(st);
+}
+
+static const struct drm_private_state_funcs komeda_layer_obj_funcs = {
+ .atomic_duplicate_state = komeda_layer_atomic_duplicate_state,
+ .atomic_destroy_state = komeda_layer_atomic_destroy_state,
+};
+
+static int komeda_layer_obj_add(struct komeda_kms_dev *kms,
+ struct komeda_layer *layer)
+{
+ struct komeda_layer_state *st;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->base.component = &layer->base;
+ drm_atomic_private_obj_init(&kms->base, &layer->base.obj, &st->base.obj,
+ &komeda_layer_obj_funcs);
+ return 0;
+}
+
+static struct drm_private_state *
+komeda_compiz_atomic_duplicate_state(struct drm_private_obj *obj)
+{
+ struct komeda_compiz_state *st;
+
+ st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return NULL;
+
+ komeda_component_state_reset(&st->base);
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj);
+
+ return &st->base.obj;
+}
+
+static void
+komeda_compiz_atomic_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ kfree(to_compiz_st(priv_to_comp_st(state)));
+}
+
+static const struct drm_private_state_funcs komeda_compiz_obj_funcs = {
+ .atomic_duplicate_state = komeda_compiz_atomic_duplicate_state,
+ .atomic_destroy_state = komeda_compiz_atomic_destroy_state,
+};
+
+static int komeda_compiz_obj_add(struct komeda_kms_dev *kms,
+ struct komeda_compiz *compiz)
+{
+ struct komeda_compiz_state *st;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->base.component = &compiz->base;
+ drm_atomic_private_obj_init(&kms->base, &compiz->base.obj, &st->base.obj,
+ &komeda_compiz_obj_funcs);
+
+ return 0;
+}
+
+static struct drm_private_state *
+komeda_improc_atomic_duplicate_state(struct drm_private_obj *obj)
+{
+ struct komeda_improc_state *st;
+
+ st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return NULL;
+
+ komeda_component_state_reset(&st->base);
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj);
+
+ return &st->base.obj;
+}
+
+static void
+komeda_improc_atomic_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ kfree(to_improc_st(priv_to_comp_st(state)));
+}
+
+static const struct drm_private_state_funcs komeda_improc_obj_funcs = {
+ .atomic_duplicate_state = komeda_improc_atomic_duplicate_state,
+ .atomic_destroy_state = komeda_improc_atomic_destroy_state,
+};
+
+static int komeda_improc_obj_add(struct komeda_kms_dev *kms,
+ struct komeda_improc *improc)
+{
+ struct komeda_improc_state *st;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->base.component = &improc->base;
+ drm_atomic_private_obj_init(&kms->base, &improc->base.obj, &st->base.obj,
+ &komeda_improc_obj_funcs);
+
+ return 0;
+}
+
+static struct drm_private_state *
+komeda_timing_ctrlr_atomic_duplicate_state(struct drm_private_obj *obj)
+{
+ struct komeda_timing_ctrlr_state *st;
+
+ st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return NULL;
+
+ komeda_component_state_reset(&st->base);
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj);
+
+ return &st->base.obj;
+}
+
+static void
+komeda_timing_ctrlr_atomic_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ kfree(to_ctrlr_st(priv_to_comp_st(state)));
+}
+
+static const struct drm_private_state_funcs komeda_timing_ctrlr_obj_funcs = {
+ .atomic_duplicate_state = komeda_timing_ctrlr_atomic_duplicate_state,
+ .atomic_destroy_state = komeda_timing_ctrlr_atomic_destroy_state,
+};
+
+static int komeda_timing_ctrlr_obj_add(struct komeda_kms_dev *kms,
+ struct komeda_timing_ctrlr *ctrlr)
+{
+ struct komeda_compiz_state *st;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->base.component = &ctrlr->base;
+ drm_atomic_private_obj_init(&kms->base, &ctrlr->base.obj, &st->base.obj,
+ &komeda_timing_ctrlr_obj_funcs);
+
+ return 0;
+}
+
static struct drm_private_state *
komeda_pipeline_atomic_duplicate_state(struct drm_private_obj *obj)
{
@@ -55,7 +237,7 @@ int komeda_kms_add_private_objs(struct komeda_kms_dev *kms,
struct komeda_dev *mdev)
{
struct komeda_pipeline *pipe;
- int i, err;
+ int i, j, err;
for (i = 0; i < mdev->n_pipelines; i++) {
pipe = mdev->pipelines[i];
@@ -64,25 +246,33 @@ int komeda_kms_add_private_objs(struct komeda_kms_dev *kms,
if (err)
return err;
- /* Add component */
+ for (j = 0; j < pipe->n_layers; j++) {
+ err = komeda_layer_obj_add(kms, pipe->layers[j]);
+ if (err)
+ return err;
+ }
+
+ err = komeda_compiz_obj_add(kms, pipe->compiz);
+ if (err)
+ return err;
+
+ err = komeda_improc_obj_add(kms, pipe->improc);
+ if (err)
+ return err;
+
+ err = komeda_timing_ctrlr_obj_add(kms, pipe->ctrlr);
+ if (err)
+ return err;
}
return 0;
}
-void komeda_kms_cleanup_private_objs(struct komeda_dev *mdev)
+void komeda_kms_cleanup_private_objs(struct komeda_kms_dev *kms)
{
- struct komeda_pipeline *pipe;
- struct komeda_component *c;
- int i, id;
+ struct drm_mode_config *config = &kms->base.mode_config;
+ struct drm_private_obj *obj, *next;
- for (i = 0; i < mdev->n_pipelines; i++) {
- pipe = mdev->pipelines[i];
- dp_for_each_set_bit(id, pipe->avail_comps) {
- c = komeda_pipeline_get_component(pipe, id);
-
- drm_atomic_private_obj_fini(&c->obj);
- }
- drm_atomic_private_obj_fini(&pipe->obj);
- }
+ list_for_each_entry_safe(obj, next, &config->privobj_list, head)
+ drm_atomic_private_obj_fini(obj);
}
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index ab50ad06e271..21725c9b9f5e 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -264,37 +264,17 @@ static bool
malidp_verify_afbc_framebuffer_caps(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- const struct drm_format_info *info;
-
- if ((mode_cmd->modifier[0] >> 56) != DRM_FORMAT_MOD_VENDOR_ARM) {
- DRM_DEBUG_KMS("Unknown modifier (not Arm)\n");
- return false;
- }
-
- if (mode_cmd->modifier[0] &
- ~DRM_FORMAT_MOD_ARM_AFBC(AFBC_MOD_VALID_BITS)) {
- DRM_DEBUG_KMS("Unsupported modifiers\n");
- return false;
- }
-
- info = drm_get_format_info(dev, mode_cmd);
- if (!info) {
- DRM_DEBUG_KMS("Unable to get the format information\n");
+ if (malidp_format_mod_supported(dev, mode_cmd->pixel_format,
+ mode_cmd->modifier[0]) == false)
return false;
- }
-
- if (info->num_planes != 1) {
- DRM_DEBUG_KMS("AFBC buffers expect one plane\n");
- return false;
- }
if (mode_cmd->offsets[0] != 0) {
DRM_DEBUG_KMS("AFBC buffers' plane offset should be 0\n");
return false;
}
- switch (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) {
- case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16:
+ switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) {
+ case AFBC_SIZE_16X16:
if ((mode_cmd->width % 16) || (mode_cmd->height % 16)) {
DRM_DEBUG_KMS("AFBC buffers must be aligned to 16 pixels\n");
return false;
@@ -318,9 +298,10 @@ malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
struct drm_gem_object *objs = NULL;
u32 afbc_superblock_size = 0, afbc_superblock_height = 0;
u32 afbc_superblock_width = 0, afbc_size = 0;
+ int bpp = 0;
- switch (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) {
- case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16:
+ switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) {
+ case AFBC_SIZE_16X16:
afbc_superblock_height = 16;
afbc_superblock_width = 16;
break;
@@ -334,15 +315,19 @@ malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
n_superblocks = (mode_cmd->width / afbc_superblock_width) *
(mode_cmd->height / afbc_superblock_height);
- afbc_superblock_size = info->cpp[0] * afbc_superblock_width *
- afbc_superblock_height;
+ bpp = malidp_format_get_bpp(info->format);
+
+ afbc_superblock_size = (bpp * afbc_superblock_width * afbc_superblock_height)
+ / BITS_PER_BYTE;
afbc_size = ALIGN(n_superblocks * AFBC_HEADER_SIZE, AFBC_SUPERBLK_ALIGNMENT);
afbc_size += n_superblocks * ALIGN(afbc_superblock_size, AFBC_SUPERBLK_ALIGNMENT);
- if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) {
- DRM_DEBUG_KMS("Invalid value of pitch (=%u) should be same as width (=%u) * cpp (=%u)\n",
- mode_cmd->pitches[0], mode_cmd->width, info->cpp[0]);
+ if ((mode_cmd->width * bpp) != (mode_cmd->pitches[0] * BITS_PER_BYTE)) {
+ DRM_DEBUG_KMS("Invalid value of (pitch * BITS_PER_BYTE) (=%u) "
+ "should be same as width (=%u) * bpp (=%u)\n",
+ (mode_cmd->pitches[0] * BITS_PER_BYTE),
+ mode_cmd->width, bpp);
return false;
}
@@ -406,6 +391,7 @@ static int malidp_init(struct drm_device *drm)
drm->mode_config.max_height = hwdev->max_line_size;
drm->mode_config.funcs = &malidp_mode_config_funcs;
drm->mode_config.helper_private = &malidp_mode_config_helpers;
+ drm->mode_config.allow_fb_modifiers = true;
ret = malidp_crtc_init(drm);
if (ret)
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index b76c86f18a56..019a682b2716 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -90,6 +90,12 @@ struct malidp_crtc_state {
int malidp_de_planes_init(struct drm_device *drm);
int malidp_crtc_init(struct drm_device *drm);
+bool malidp_hw_format_is_linear_only(u32 format);
+bool malidp_hw_format_is_afbc_only(u32 format);
+
+bool malidp_format_mod_supported(struct drm_device *drm,
+ u32 format, u64 modifier);
+
#ifdef CONFIG_DEBUG_FS
void malidp_error(struct malidp_drm *malidp,
struct malidp_error_stats *error_stats, u32 status,
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index b9bed1138fa3..8df12e9a33bb 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -49,11 +49,19 @@ static const struct malidp_format_id malidp500_de_formats[] = {
{ DRM_FORMAT_YUYV, DE_VIDEO1, 13 },
{ DRM_FORMAT_NV12, DE_VIDEO1 | SE_MEMWRITE, 14 },
{ DRM_FORMAT_YUV420, DE_VIDEO1, 15 },
+ { DRM_FORMAT_XYUV8888, DE_VIDEO1, 16 },
+ /* These are supported with AFBC only */
+ { DRM_FORMAT_YUV420_8BIT, DE_VIDEO1, 14 },
+ { DRM_FORMAT_VUY888, DE_VIDEO1, 16 },
+ { DRM_FORMAT_VUY101010, DE_VIDEO1, 17 },
+ { DRM_FORMAT_YUV420_10BIT, DE_VIDEO1, 18 }
};
#define MALIDP_ID(__group, __format) \
((((__group) & 0x7) << 3) | ((__format) & 0x7))
+#define AFBC_YUV_422_FORMAT_ID MALIDP_ID(5, 1)
+
#define MALIDP_COMMON_FORMATS \
/* fourcc, layers supporting the format, internal id */ \
{ DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 0) }, \
@@ -74,11 +82,25 @@ static const struct malidp_format_id malidp500_de_formats[] = {
{ DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 1) }, \
{ DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 2) }, \
{ DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 3) }, \
+ /* This is only supported with linear modifier */ \
+ { DRM_FORMAT_XYUV8888, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 0) },\
+ /* This is only supported with AFBC modifier */ \
+ { DRM_FORMAT_VUY888, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 0) }, \
{ DRM_FORMAT_YUYV, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 2) }, \
+ /* This is only supported with linear modifier */ \
{ DRM_FORMAT_UYVY, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 3) }, \
{ DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(5, 6) }, \
+ /* This is only supported with AFBC modifier */ \
+ { DRM_FORMAT_YUV420_8BIT, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 6) }, \
{ DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }, \
- { DRM_FORMAT_X0L2, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 6)}
+ /* This is only supported with linear modifier */ \
+ { DRM_FORMAT_XVYU2101010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 0)}, \
+ /* This is only supported with AFBC modifier */ \
+ { DRM_FORMAT_VUY101010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 0)}, \
+ { DRM_FORMAT_X0L2, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 6)}, \
+ /* This is only supported with AFBC modifier */ \
+ { DRM_FORMAT_YUV420_10BIT, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 7)}, \
+ { DRM_FORMAT_P010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 7)}
static const struct malidp_format_id malidp550_de_formats[] = {
MALIDP_COMMON_FORMATS,
@@ -94,11 +116,14 @@ static const struct malidp_layer malidp500_layers[] = {
* yuv2rgb matrix offset, mmu control register offset, rotation_features
*/
{ DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE,
- MALIDP_DE_LV_STRIDE0, MALIDP500_LV_YUV2RGB, 0, ROTATE_ANY },
+ MALIDP_DE_LV_STRIDE0, MALIDP500_LV_YUV2RGB, 0, ROTATE_ANY,
+ MALIDP500_DE_LV_AD_CTRL },
{ DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE,
- MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY },
+ MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY,
+ MALIDP500_DE_LG1_AD_CTRL },
{ DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE,
- MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY },
+ MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY,
+ MALIDP500_DE_LG2_AD_CTRL },
};
static const struct malidp_layer malidp550_layers[] = {
@@ -106,13 +131,16 @@ static const struct malidp_layer malidp550_layers[] = {
* yuv2rgb matrix offset, mmu control register offset, rotation_features
*/
{ DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE,
- MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY },
+ MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY,
+ MALIDP550_DE_LV1_AD_CTRL },
{ DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE,
- MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY },
+ MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY,
+ MALIDP550_DE_LG_AD_CTRL },
{ DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE,
- MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY },
+ MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY,
+ MALIDP550_DE_LV2_AD_CTRL },
{ DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE,
- MALIDP550_DE_LS_R1_STRIDE, 0, 0, ROTATE_NONE },
+ MALIDP550_DE_LS_R1_STRIDE, 0, 0, ROTATE_NONE, 0 },
};
static const struct malidp_layer malidp650_layers[] = {
@@ -122,16 +150,44 @@ static const struct malidp_layer malidp650_layers[] = {
*/
{ DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE,
MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB,
- MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY },
+ MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY,
+ MALIDP550_DE_LV1_AD_CTRL },
{ DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE,
MALIDP_DE_LG_STRIDE, 0, MALIDP650_DE_LG_MMU_CTRL,
- ROTATE_COMPRESSED },
+ ROTATE_COMPRESSED, MALIDP550_DE_LG_AD_CTRL },
{ DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE,
MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB,
- MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY },
+ MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY,
+ MALIDP550_DE_LV2_AD_CTRL },
{ DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE,
MALIDP550_DE_LS_R1_STRIDE, 0, MALIDP650_DE_LS_MMU_CTRL,
- ROTATE_NONE },
+ ROTATE_NONE, 0 },
+};
+
+const u64 malidp_format_modifiers[] = {
+ /* All RGB formats (except XRGB, RGBX, XBGR, BGRX) */
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR | AFBC_SPARSE),
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR),
+
+ /* All RGB formats > 16bpp (except XRGB, RGBX, XBGR, BGRX) */
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR | AFBC_SPARSE | AFBC_SPLIT),
+
+ /* All 8 or 10 bit YUV 444 formats. */
+ /* In DP550, 10 bit YUV 420 format also supported */
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_SPARSE | AFBC_SPLIT),
+
+ /* YUV 420, 422 P1 8 bit and YUV 444 8 bit/10 bit formats */
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_SPARSE),
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16),
+
+ /* YUV 420, 422 P1 8, 10 bit formats */
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_CBR | AFBC_SPARSE),
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_CBR),
+
+ /* All formats */
+ DRM_FORMAT_MOD_LINEAR,
+
+ DRM_FORMAT_MOD_INVALID
};
#define SE_N_SCALING_COEFFS 96
@@ -324,14 +380,39 @@ static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *
malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
}
-static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt)
+int malidp_format_get_bpp(u32 fmt)
+{
+ int bpp = drm_format_plane_cpp(fmt, 0) * 8;
+
+ if (bpp == 0) {
+ switch (fmt) {
+ case DRM_FORMAT_VUY101010:
+ bpp = 30;
+ case DRM_FORMAT_YUV420_10BIT:
+ bpp = 15;
+ break;
+ case DRM_FORMAT_YUV420_8BIT:
+ bpp = 12;
+ break;
+ default:
+ bpp = 0;
+ }
+ }
+
+ return bpp;
+}
+
+static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w,
+ u16 h, u32 fmt, bool has_modifier)
{
/*
* Each layer needs enough rotation memory to fit 8 lines
* worth of pixel data. Required size is then:
* size = rotated_width * (bpp / 8) * 8;
*/
- return w * drm_format_plane_cpp(fmt, 0) * 8;
+ int bpp = malidp_format_get_bpp(fmt);
+
+ return w * bpp;
}
static void malidp500_se_write_pp_coefftab(struct malidp_hw_device *hwdev,
@@ -609,9 +690,9 @@ static void malidp550_modeset(struct malidp_hw_device *hwdev, struct videomode *
malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
}
-static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt)
+static int malidpx50_get_bytes_per_column(u32 fmt)
{
- u32 bytes_per_col;
+ u32 bytes_per_column;
switch (fmt) {
/* 8 lines at 4 bytes per pixel */
@@ -637,19 +718,77 @@ static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_X0L0:
- case DRM_FORMAT_X0L2:
- bytes_per_col = 32;
+ bytes_per_column = 32;
break;
/* 16 lines at 1.5 bytes per pixel */
case DRM_FORMAT_NV12:
case DRM_FORMAT_YUV420:
- bytes_per_col = 24;
+ /* 8 lines at 3 bytes per pixel */
+ case DRM_FORMAT_VUY888:
+ /* 16 lines at 12 bits per pixel */
+ case DRM_FORMAT_YUV420_8BIT:
+ /* 8 lines at 3 bytes per pixel */
+ case DRM_FORMAT_P010:
+ bytes_per_column = 24;
+ break;
+ /* 8 lines at 30 bits per pixel */
+ case DRM_FORMAT_VUY101010:
+ /* 16 lines at 15 bits per pixel */
+ case DRM_FORMAT_YUV420_10BIT:
+ bytes_per_column = 30;
break;
default:
return -EINVAL;
}
- return w * bytes_per_col;
+ return bytes_per_column;
+}
+
+static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w,
+ u16 h, u32 fmt, bool has_modifier)
+{
+ int bytes_per_column = 0;
+
+ switch (fmt) {
+ /* 8 lines at 15 bits per pixel */
+ case DRM_FORMAT_YUV420_10BIT:
+ bytes_per_column = 15;
+ break;
+ /* Uncompressed YUV 420 10 bit single plane cannot be rotated */
+ case DRM_FORMAT_X0L2:
+ if (has_modifier)
+ bytes_per_column = 8;
+ else
+ return -EINVAL;
+ break;
+ default:
+ bytes_per_column = malidpx50_get_bytes_per_column(fmt);
+ }
+
+ if (bytes_per_column == -EINVAL)
+ return bytes_per_column;
+
+ return w * bytes_per_column;
+}
+
+static int malidp650_rotmem_required(struct malidp_hw_device *hwdev, u16 w,
+ u16 h, u32 fmt, bool has_modifier)
+{
+ int bytes_per_column = 0;
+
+ switch (fmt) {
+ /* 16 lines at 2 bytes per pixel */
+ case DRM_FORMAT_X0L2:
+ bytes_per_column = 32;
+ break;
+ default:
+ bytes_per_column = malidpx50_get_bytes_per_column(fmt);
+ }
+
+ if (bytes_per_column == -EINVAL)
+ return bytes_per_column;
+
+ return w * bytes_per_column;
}
static int malidp550_se_set_scaling_coeffs(struct malidp_hw_device *hwdev,
@@ -838,7 +977,10 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
.se_base = MALIDP550_SE_BASE,
.dc_base = MALIDP550_DC_BASE,
.out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
- .features = MALIDP_REGMAP_HAS_CLEARIRQ,
+ .features = MALIDP_REGMAP_HAS_CLEARIRQ |
+ MALIDP_DEVICE_AFBC_SUPPORT_SPLIT |
+ MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT |
+ MALIDP_DEVICE_AFBC_YUYV_USE_422_P2,
.n_layers = ARRAY_SIZE(malidp550_layers),
.layers = malidp550_layers,
.de_irq_map = {
@@ -884,7 +1026,9 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
.se_base = MALIDP550_SE_BASE,
.dc_base = MALIDP550_DC_BASE,
.out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
- .features = MALIDP_REGMAP_HAS_CLEARIRQ,
+ .features = MALIDP_REGMAP_HAS_CLEARIRQ |
+ MALIDP_DEVICE_AFBC_SUPPORT_SPLIT |
+ MALIDP_DEVICE_AFBC_YUYV_USE_422_P2,
.n_layers = ARRAY_SIZE(malidp650_layers),
.layers = malidp650_layers,
.de_irq_map = {
@@ -923,7 +1067,7 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
.in_config_mode = malidp550_in_config_mode,
.set_config_valid = malidp550_set_config_valid,
.modeset = malidp550_modeset,
- .rotmem_required = malidp550_rotmem_required,
+ .rotmem_required = malidp650_rotmem_required,
.se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs,
.se_calc_mclk = malidp550_se_calc_mclk,
.enable_memwrite = malidp550_enable_memwrite,
@@ -933,19 +1077,72 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
};
u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
- u8 layer_id, u32 format)
+ u8 layer_id, u32 format, bool has_modifier)
{
unsigned int i;
for (i = 0; i < map->n_pixel_formats; i++) {
if (((map->pixel_formats[i].layer & layer_id) == layer_id) &&
- (map->pixel_formats[i].format == format))
- return map->pixel_formats[i].id;
+ (map->pixel_formats[i].format == format)) {
+ /*
+ * In some DP550 and DP650, DRM_FORMAT_YUYV + AFBC modifier
+ * is supported by a different h/w format id than
+ * DRM_FORMAT_YUYV (only).
+ */
+ if (format == DRM_FORMAT_YUYV &&
+ (has_modifier) &&
+ (map->features & MALIDP_DEVICE_AFBC_YUYV_USE_422_P2))
+ return AFBC_YUV_422_FORMAT_ID;
+ else
+ return map->pixel_formats[i].id;
+ }
}
return MALIDP_INVALID_FORMAT_ID;
}
+bool malidp_hw_format_is_linear_only(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_XYUV8888:
+ case DRM_FORMAT_XVYU2101010:
+ case DRM_FORMAT_X0L2:
+ case DRM_FORMAT_X0L0:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool malidp_hw_format_is_afbc_only(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_VUY888:
+ case DRM_FORMAT_VUY101010:
+ case DRM_FORMAT_YUV420_8BIT:
+ case DRM_FORMAT_YUV420_10BIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 irq)
{
u32 base = malidp_get_block_base(hwdev, block);
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index 40155e2ea9d9..207c3ce52f1a 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -70,6 +70,8 @@ struct malidp_layer {
s16 yuv2rgb_offset; /* offset to the YUV->RGB matrix entries */
u16 mmu_ctrl_offset; /* offset to the MMU control register */
enum rotation_features rot; /* type of rotation supported */
+ /* address offset for the AFBC decoder registers */
+ u16 afbc_decoder_offset;
};
enum malidp_scaling_coeff_set {
@@ -93,7 +95,10 @@ struct malidp_se_config {
};
/* regmap features */
-#define MALIDP_REGMAP_HAS_CLEARIRQ (1 << 0)
+#define MALIDP_REGMAP_HAS_CLEARIRQ BIT(0)
+#define MALIDP_DEVICE_AFBC_SUPPORT_SPLIT BIT(1)
+#define MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT BIT(2)
+#define MALIDP_DEVICE_AFBC_YUYV_USE_422_P2 BIT(3)
struct malidp_hw_regmap {
/* address offset of the DE register bank */
@@ -179,7 +184,8 @@ struct malidp_hw {
* Calculate the required rotation memory given the active area
* and the buffer format.
*/
- int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt);
+ int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h,
+ u32 fmt, bool has_modifier);
int (*se_set_scaling_coeffs)(struct malidp_hw_device *hwdev,
struct malidp_se_config *se_config,
@@ -319,7 +325,9 @@ int malidp_se_irq_init(struct drm_device *drm, int irq);
void malidp_se_irq_fini(struct malidp_hw_device *hwdev);
u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
- u8 layer_id, u32 format);
+ u8 layer_id, u32 format, bool has_modifier);
+
+int malidp_format_get_bpp(u32 fmt);
static inline u8 malidp_hw_get_pitch_align(struct malidp_hw_device *hwdev, bool rotated)
{
@@ -388,9 +396,18 @@ static inline void malidp_se_set_enh_coeffs(struct malidp_hw_device *hwdev)
#define MALIDP_GAMMA_LUT_SIZE 4096
-#define AFBC_MOD_VALID_BITS (AFBC_FORMAT_MOD_BLOCK_SIZE_MASK | \
- AFBC_FORMAT_MOD_YTR | AFBC_FORMAT_MOD_SPLIT | \
- AFBC_FORMAT_MOD_SPARSE | AFBC_FORMAT_MOD_CBR | \
- AFBC_FORMAT_MOD_TILED | AFBC_FORMAT_MOD_SC)
+#define AFBC_SIZE_MASK AFBC_FORMAT_MOD_BLOCK_SIZE_MASK
+#define AFBC_SIZE_16X16 AFBC_FORMAT_MOD_BLOCK_SIZE_16x16
+#define AFBC_YTR AFBC_FORMAT_MOD_YTR
+#define AFBC_SPARSE AFBC_FORMAT_MOD_SPARSE
+#define AFBC_CBR AFBC_FORMAT_MOD_CBR
+#define AFBC_SPLIT AFBC_FORMAT_MOD_SPLIT
+#define AFBC_TILED AFBC_FORMAT_MOD_TILED
+#define AFBC_SC AFBC_FORMAT_MOD_SC
+
+#define AFBC_MOD_VALID_BITS (AFBC_SIZE_MASK | AFBC_YTR | AFBC_SPLIT | \
+ AFBC_SPARSE | AFBC_CBR | AFBC_TILED | AFBC_SC)
+
+extern const u64 malidp_format_modifiers[];
#endif /* __MALIDP_HW_H__ */
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index 041a64dc7167..5f102bdaf841 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -141,9 +141,14 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
return -EINVAL;
}
+ if (fb->modifier) {
+ DRM_DEBUG_KMS("Writeback framebuffer does not support modifiers\n");
+ return -EINVAL;
+ }
+
mw_state->format =
malidp_hw_get_format_id(&malidp->dev->hw->map, SE_MEMWRITE,
- fb->format->format);
+ fb->format->format, !!fb->modifier);
if (mw_state->format == MALIDP_INVALID_FORMAT_ID) {
struct drm_format_name_buf format_name;
@@ -252,8 +257,7 @@ void malidp_mw_atomic_commit(struct drm_device *drm,
&mw_state->addrs[0],
mw_state->format);
- drm_writeback_queue_job(mw_conn, conn_state->writeback_job);
- conn_state->writeback_job = NULL;
+ drm_writeback_queue_job(mw_conn, conn_state);
hwdev->hw->enable_memwrite(hwdev, mw_state->addrs,
mw_state->pitches, mw_state->n_planes,
fb->width, fb->height, mw_state->format,
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index c9a6d3e0cada..d42e0ea9a303 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -52,6 +52,8 @@
#define MALIDP550_LS_ENABLE 0x01c
#define MALIDP550_LS_R1_IN_SIZE 0x020
+#define MODIFIERS_COUNT_MAX 15
+
/*
* This 4-entry look-up-table is used to determine the full 8-bit alpha value
* for formats with 1- or 2-bit alpha channels.
@@ -145,6 +147,119 @@ static void malidp_plane_atomic_print_state(struct drm_printer *p,
drm_printf(p, "\tmmu_prefetch_pgsize=%d\n", ms->mmu_prefetch_pgsize);
}
+bool malidp_format_mod_supported(struct drm_device *drm,
+ u32 format, u64 modifier)
+{
+ const struct drm_format_info *info;
+ const u64 *modifiers;
+ struct malidp_drm *malidp = drm->dev_private;
+ const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
+
+ if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
+ return false;
+
+ /* Some pixel formats are supported without any modifier */
+ if (modifier == DRM_FORMAT_MOD_LINEAR) {
+ /*
+ * However these pixel formats need to be supported with
+ * modifiers only
+ */
+ return !malidp_hw_format_is_afbc_only(format);
+ }
+
+ if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_ARM) {
+ DRM_ERROR("Unknown modifier (not Arm)\n");
+ return false;
+ }
+
+ if (modifier &
+ ~DRM_FORMAT_MOD_ARM_AFBC(AFBC_MOD_VALID_BITS)) {
+ DRM_DEBUG_KMS("Unsupported modifiers\n");
+ return false;
+ }
+
+ modifiers = malidp_format_modifiers;
+
+ /* SPLIT buffers must use SPARSE layout */
+ if (WARN_ON_ONCE((modifier & AFBC_SPLIT) && !(modifier & AFBC_SPARSE)))
+ return false;
+
+ /* CBR only applies to YUV formats, where YTR should be always 0 */
+ if (WARN_ON_ONCE((modifier & AFBC_CBR) && (modifier & AFBC_YTR)))
+ return false;
+
+ while (*modifiers != DRM_FORMAT_MOD_INVALID) {
+ if (*modifiers == modifier)
+ break;
+
+ modifiers++;
+ }
+
+ /* return false, if the modifier was not found */
+ if (*modifiers == DRM_FORMAT_MOD_INVALID) {
+ DRM_DEBUG_KMS("Unsupported modifier\n");
+ return false;
+ }
+
+ info = drm_format_info(format);
+
+ if (info->num_planes != 1) {
+ DRM_DEBUG_KMS("AFBC buffers expect one plane\n");
+ return false;
+ }
+
+ if (malidp_hw_format_is_linear_only(format) == true) {
+ DRM_DEBUG_KMS("Given format (0x%x) is supported is linear mode only\n",
+ format);
+ return false;
+ }
+
+ /*
+ * RGB formats need to provide YTR modifier and YUV formats should not
+ * provide YTR modifier.
+ */
+ if (!(info->is_yuv) != !!(modifier & AFBC_FORMAT_MOD_YTR)) {
+ DRM_DEBUG_KMS("AFBC_FORMAT_MOD_YTR is %s for %s formats\n",
+ info->is_yuv ? "disallowed" : "mandatory",
+ info->is_yuv ? "YUV" : "RGB");
+ return false;
+ }
+
+ if (modifier & AFBC_SPLIT) {
+ if (!info->is_yuv) {
+ if (drm_format_plane_cpp(format, 0) <= 2) {
+ DRM_DEBUG_KMS("RGB formats <= 16bpp are not supported with SPLIT\n");
+ return false;
+ }
+ }
+
+ if ((drm_format_horz_chroma_subsampling(format) != 1) ||
+ (drm_format_vert_chroma_subsampling(format) != 1)) {
+ if (!(format == DRM_FORMAT_YUV420_10BIT &&
+ (map->features & MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT))) {
+ DRM_DEBUG_KMS("Formats which are sub-sampled should never be split\n");
+ return false;
+ }
+ }
+ }
+
+ if (modifier & AFBC_CBR) {
+ if ((drm_format_horz_chroma_subsampling(format) == 1) ||
+ (drm_format_vert_chroma_subsampling(format) == 1)) {
+ DRM_DEBUG_KMS("Formats which are not sub-sampled should not have CBR set\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool malidp_format_mod_supported_per_plane(struct drm_plane *plane,
+ u32 format, u64 modifier)
+{
+ return malidp_format_mod_supported(plane->dev, format, modifier);
+}
+
static const struct drm_plane_funcs malidp_de_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -153,6 +268,7 @@ static const struct drm_plane_funcs malidp_de_plane_funcs = {
.atomic_duplicate_state = malidp_duplicate_plane_state,
.atomic_destroy_state = malidp_destroy_plane_state,
.atomic_print_state = malidp_plane_atomic_print_state,
+ .format_mod_supported = malidp_format_mod_supported_per_plane,
};
static int malidp_se_check_scaling(struct malidp_plane *mp,
@@ -406,8 +522,8 @@ static int malidp_de_plane_check(struct drm_plane *plane,
fb = state->fb;
ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map,
- mp->layer->id,
- fb->format->format);
+ mp->layer->id, fb->format->format,
+ !!fb->modifier);
if (ms->format == MALIDP_INVALID_FORMAT_ID)
return -EINVAL;
@@ -415,8 +531,8 @@ static int malidp_de_plane_check(struct drm_plane *plane,
for (i = 0; i < ms->n_planes; i++) {
u8 alignment = malidp_hw_get_pitch_align(mp->hwdev, rotated);
- if ((fb->pitches[i] * drm_format_info_block_height(fb->format, i))
- & (alignment - 1)) {
+ if (((fb->pitches[i] * drm_format_info_block_height(fb->format, i))
+ & (alignment - 1)) && !(fb->modifier)) {
DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n",
fb->pitches[i], i);
return -EINVAL;
@@ -469,13 +585,20 @@ static int malidp_de_plane_check(struct drm_plane *plane,
return -EINVAL;
}
+ /* SMART layer does not support AFBC */
+ if (mp->layer->id == DE_SMART && fb->modifier) {
+ DRM_ERROR("AFBC framebuffer not supported in SMART layer");
+ return -EINVAL;
+ }
+
ms->rotmem_size = 0;
if (state->rotation & MALIDP_ROTATED_MASK) {
int val;
val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_w,
state->crtc_h,
- fb->format->format);
+ fb->format->format,
+ !!(fb->modifier));
if (val < 0)
return val;
@@ -592,6 +715,83 @@ static void malidp_de_set_mmu_control(struct malidp_plane *mp,
mp->layer->base + mp->layer->mmu_ctrl_offset);
}
+static void malidp_set_plane_base_addr(struct drm_framebuffer *fb,
+ struct malidp_plane *mp,
+ int plane_index)
+{
+ dma_addr_t paddr;
+ u16 ptr;
+ struct drm_plane *plane = &mp->base;
+ bool afbc = fb->modifier ? true : false;
+
+ ptr = mp->layer->ptr + (plane_index << 4);
+
+ /*
+ * drm_fb_cma_get_gem_addr() alters the physical base address of the
+ * framebuffer as per the plane's src_x, src_y co-ordinates (ie to
+ * take care of source cropping).
+ * For AFBC, this is not needed as the cropping is handled by _AD_CROP_H
+ * and _AD_CROP_V registers.
+ */
+ if (!afbc) {
+ paddr = drm_fb_cma_get_gem_addr(fb, plane->state,
+ plane_index);
+ } else {
+ struct drm_gem_cma_object *obj;
+
+ obj = drm_fb_cma_get_gem_obj(fb, plane_index);
+
+ if (WARN_ON(!obj))
+ return;
+ paddr = obj->paddr;
+ }
+
+ malidp_hw_write(mp->hwdev, lower_32_bits(paddr), ptr);
+ malidp_hw_write(mp->hwdev, upper_32_bits(paddr), ptr + 4);
+}
+
+static void malidp_de_set_plane_afbc(struct drm_plane *plane)
+{
+ struct malidp_plane *mp;
+ u32 src_w, src_h, val = 0, src_x, src_y;
+ struct drm_framebuffer *fb = plane->state->fb;
+
+ mp = to_malidp_plane(plane);
+
+ /* no afbc_decoder_offset means AFBC is not supported on this plane */
+ if (!mp->layer->afbc_decoder_offset)
+ return;
+
+ if (!fb->modifier) {
+ malidp_hw_write(mp->hwdev, 0, mp->layer->afbc_decoder_offset);
+ return;
+ }
+
+ /* convert src values from Q16 fixed point to integer */
+ src_w = plane->state->src_w >> 16;
+ src_h = plane->state->src_h >> 16;
+ src_x = plane->state->src_x >> 16;
+ src_y = plane->state->src_y >> 16;
+
+ val = ((fb->width - (src_x + src_w)) << MALIDP_AD_CROP_RIGHT_OFFSET) |
+ src_x;
+ malidp_hw_write(mp->hwdev, val,
+ mp->layer->afbc_decoder_offset + MALIDP_AD_CROP_H);
+
+ val = ((fb->height - (src_y + src_h)) << MALIDP_AD_CROP_BOTTOM_OFFSET) |
+ src_y;
+ malidp_hw_write(mp->hwdev, val,
+ mp->layer->afbc_decoder_offset + MALIDP_AD_CROP_V);
+
+ val = MALIDP_AD_EN;
+ if (fb->modifier & AFBC_FORMAT_MOD_SPLIT)
+ val |= MALIDP_AD_BS;
+ if (fb->modifier & AFBC_FORMAT_MOD_YTR)
+ val |= MALIDP_AD_YTR;
+
+ malidp_hw_write(mp->hwdev, val, mp->layer->afbc_decoder_offset);
+}
+
static void malidp_de_plane_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -602,12 +802,23 @@ static void malidp_de_plane_update(struct drm_plane *plane,
u8 plane_alpha = state->alpha >> 8;
u32 src_w, src_h, dest_w, dest_h, val;
int i;
+ struct drm_framebuffer *fb = plane->state->fb;
mp = to_malidp_plane(plane);
- /* convert src values from Q16 fixed point to integer */
- src_w = state->src_w >> 16;
- src_h = state->src_h >> 16;
+ /*
+ * For AFBC framebuffer, use the framebuffer width and height for
+ * configuring layer input size register.
+ */
+ if (fb->modifier) {
+ src_w = fb->width;
+ src_h = fb->height;
+ } else {
+ /* convert src values from Q16 fixed point to integer */
+ src_w = state->src_w >> 16;
+ src_h = state->src_h >> 16;
+ }
+
dest_w = state->crtc_w;
dest_h = state->crtc_h;
@@ -615,15 +826,8 @@ static void malidp_de_plane_update(struct drm_plane *plane,
val = (val & ~LAYER_FORMAT_MASK) | ms->format;
malidp_hw_write(mp->hwdev, val, mp->layer->base);
- for (i = 0; i < ms->n_planes; i++) {
- /* calculate the offset for the layer's plane registers */
- u16 ptr = mp->layer->ptr + (i << 4);
- dma_addr_t fb_addr = drm_fb_cma_get_gem_addr(state->fb,
- state, i);
-
- malidp_hw_write(mp->hwdev, lower_32_bits(fb_addr), ptr);
- malidp_hw_write(mp->hwdev, upper_32_bits(fb_addr), ptr + 4);
- }
+ for (i = 0; i < ms->n_planes; i++)
+ malidp_set_plane_base_addr(fb, mp, i);
malidp_de_set_mmu_control(mp, ms);
@@ -657,6 +861,8 @@ static void malidp_de_plane_update(struct drm_plane *plane,
mp->layer->base + MALIDP550_LS_R1_IN_SIZE);
}
+ malidp_de_set_plane_afbc(plane);
+
/* first clear the rotation bits */
val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
val &= ~LAYER_ROT_MASK;
@@ -733,7 +939,26 @@ int malidp_de_planes_init(struct drm_device *drm)
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE);
u32 *formats;
- int ret, i, j, n;
+ int ret, i = 0, j = 0, n;
+ u64 supported_modifiers[MODIFIERS_COUNT_MAX];
+ const u64 *modifiers;
+
+ modifiers = malidp_format_modifiers;
+
+ if (!(map->features & MALIDP_DEVICE_AFBC_SUPPORT_SPLIT)) {
+ /*
+ * Since our hardware does not support SPLIT, so build the list
+ * of supported modifiers excluding SPLIT ones.
+ */
+ while (*modifiers != DRM_FORMAT_MOD_INVALID) {
+ if (!(*modifiers & AFBC_SPLIT))
+ supported_modifiers[j++] = *modifiers;
+
+ modifiers++;
+ }
+ supported_modifiers[j++] = DRM_FORMAT_MOD_INVALID;
+ modifiers = supported_modifiers;
+ }
formats = kcalloc(map->n_pixel_formats, sizeof(*formats), GFP_KERNEL);
if (!formats) {
@@ -758,9 +983,15 @@ int malidp_de_planes_init(struct drm_device *drm)
plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY :
DRM_PLANE_TYPE_OVERLAY;
+
+ /*
+ * All the layers except smart layer supports AFBC modifiers.
+ */
ret = drm_universal_plane_init(drm, &plane->base, crtcs,
- &malidp_de_plane_funcs, formats,
- n, NULL, plane_type, NULL);
+ &malidp_de_plane_funcs, formats, n,
+ (id == DE_SMART) ? NULL : modifiers, plane_type,
+ NULL);
+
if (ret < 0)
goto cleanup;
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index 7ce3e141464d..a0dd6e1676a8 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -198,10 +198,13 @@
#define MALIDP500_LV_YUV2RGB ((s16)(-0xB8))
#define MALIDP500_DE_LV_BASE 0x00100
#define MALIDP500_DE_LV_PTR_BASE 0x00124
+#define MALIDP500_DE_LV_AD_CTRL 0x00400
#define MALIDP500_DE_LG1_BASE 0x00200
#define MALIDP500_DE_LG1_PTR_BASE 0x0021c
+#define MALIDP500_DE_LG1_AD_CTRL 0x0040c
#define MALIDP500_DE_LG2_BASE 0x00300
#define MALIDP500_DE_LG2_PTR_BASE 0x0031c
+#define MALIDP500_DE_LG2_AD_CTRL 0x00418
#define MALIDP500_SE_BASE 0x00c00
#define MALIDP500_SE_CONTROL 0x00c0c
#define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c
@@ -228,10 +231,13 @@
#define MALIDP550_LV_YUV2RGB 0x00084
#define MALIDP550_DE_LV1_BASE 0x00100
#define MALIDP550_DE_LV1_PTR_BASE 0x00124
+#define MALIDP550_DE_LV1_AD_CTRL 0x001B8
#define MALIDP550_DE_LV2_BASE 0x00200
#define MALIDP550_DE_LV2_PTR_BASE 0x00224
+#define MALIDP550_DE_LV2_AD_CTRL 0x002B8
#define MALIDP550_DE_LG_BASE 0x00300
#define MALIDP550_DE_LG_PTR_BASE 0x0031c
+#define MALIDP550_DE_LG_AD_CTRL 0x00330
#define MALIDP550_DE_LS_BASE 0x00400
#define MALIDP550_DE_LS_PTR_BASE 0x0042c
#define MALIDP550_DE_PERF_BASE 0x00500
@@ -258,6 +264,20 @@
#define MALIDP_MMU_CTRL_PX_PS(x) (1 << (8 + (x)))
#define MALIDP_MMU_CTRL_PP_NUM_REQ(x) (((x) & 0x7f) << 12)
+/* AFBC register offsets relative to MALIDPXXX_DE_LX_AD_CTRL */
+/* The following register offsets are common for DP500, DP550 and DP650 */
+#define MALIDP_AD_CROP_H 0x4
+#define MALIDP_AD_CROP_V 0x8
+#define MALIDP_AD_END_PTR_LOW 0xc
+#define MALIDP_AD_END_PTR_HIGH 0x10
+
+/* AFBC decoder Registers */
+#define MALIDP_AD_EN BIT(0)
+#define MALIDP_AD_YTR BIT(4)
+#define MALIDP_AD_BS BIT(8)
+#define MALIDP_AD_CROP_RIGHT_OFFSET 16
+#define MALIDP_AD_CROP_BOTTOM_OFFSET 16
+
/*
* Starting with DP550 the register map blocks has been standardised to the
* following layout:
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 8d23700848df..1e7140f005a5 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -78,8 +78,6 @@ static int armada_fbdev_create(struct drm_fb_helper *fbh,
goto err_fballoc;
}
- strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
- info->par = fbh;
info->fbops = &armada_fb_ops;
info->fix.smem_start = obj->phys_addr;
info->fix.smem_len = obj->obj.size;
@@ -87,9 +85,7 @@ static int armada_fbdev_create(struct drm_fb_helper *fbh,
info->screen_base = ptr;
fbh->fb = &dfb->fb;
- drm_fb_helper_fill_fix(info, dfb->fb.pitches[0],
- dfb->fb.format->depth);
- drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, fbh, sizes);
DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n",
dfb->fb.width, dfb->fb.height, dfb->fb.format->cpp[0] * 8,
diff --git a/drivers/gpu/drm/aspeed/Kconfig b/drivers/gpu/drm/aspeed/Kconfig
new file mode 100644
index 000000000000..cccab520e02f
--- /dev/null
+++ b/drivers/gpu/drm/aspeed/Kconfig
@@ -0,0 +1,14 @@
+config DRM_ASPEED_GFX
+ tristate "ASPEED BMC Display Controller"
+ depends on DRM && OF
+ depends on (COMPILE_TEST || ARCH_ASPEED)
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DMA_CMA if HAVE_DMA_CONTIGUOUS
+ select CMA if HAVE_DMA_CONTIGUOUS
+ select MFD_SYSCON
+ help
+ Chose this option if you have an ASPEED AST2500 SOC Display
+ Controller (aka GFX).
+
+ If M is selected this module will be called aspeed_gfx.
diff --git a/drivers/gpu/drm/aspeed/Makefile b/drivers/gpu/drm/aspeed/Makefile
new file mode 100644
index 000000000000..6e194cd790d8
--- /dev/null
+++ b/drivers/gpu/drm/aspeed/Makefile
@@ -0,0 +1,3 @@
+aspeed_gfx-y := aspeed_gfx_drv.o aspeed_gfx_crtc.o aspeed_gfx_out.o
+
+obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed_gfx.o
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx.h b/drivers/gpu/drm/aspeed/aspeed_gfx.h
new file mode 100644
index 000000000000..a10358bb61ec
--- /dev/null
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright 2018 IBM Corporation */
+
+#include <drm/drm_device.h>
+#include <drm/drm_simple_kms_helper.h>
+
+struct aspeed_gfx {
+ void __iomem *base;
+ struct clk *clk;
+ struct reset_control *rst;
+ struct regmap *scu;
+
+ struct drm_simple_display_pipe pipe;
+ struct drm_connector connector;
+ struct drm_fbdev_cma *fbdev;
+};
+
+int aspeed_gfx_create_pipe(struct drm_device *drm);
+int aspeed_gfx_create_output(struct drm_device *drm);
+
+#define CRT_CTRL1 0x60 /* CRT Control I */
+#define CRT_CTRL2 0x64 /* CRT Control II */
+#define CRT_STATUS 0x68 /* CRT Status */
+#define CRT_MISC 0x6c /* CRT Misc Setting */
+#define CRT_HORIZ0 0x70 /* CRT Horizontal Total & Display Enable End */
+#define CRT_HORIZ1 0x74 /* CRT Horizontal Retrace Start & End */
+#define CRT_VERT0 0x78 /* CRT Vertical Total & Display Enable End */
+#define CRT_VERT1 0x7C /* CRT Vertical Retrace Start & End */
+#define CRT_ADDR 0x80 /* CRT Display Starting Address */
+#define CRT_OFFSET 0x84 /* CRT Display Offset & Terminal Count */
+#define CRT_THROD 0x88 /* CRT Threshold */
+#define CRT_XSCALE 0x8C /* CRT Scaling-Up Factor */
+#define CRT_CURSOR0 0x90 /* CRT Hardware Cursor X & Y Offset */
+#define CRT_CURSOR1 0x94 /* CRT Hardware Cursor X & Y Position */
+#define CRT_CURSOR2 0x98 /* CRT Hardware Cursor Pattern Address */
+#define CRT_9C 0x9C
+#define CRT_OSD_H 0xA0 /* CRT OSD Horizontal Start/End */
+#define CRT_OSD_V 0xA4 /* CRT OSD Vertical Start/End */
+#define CRT_OSD_ADDR 0xA8 /* CRT OSD Pattern Address */
+#define CRT_OSD_DISP 0xAC /* CRT OSD Offset */
+#define CRT_OSD_THRESH 0xB0 /* CRT OSD Threshold & Alpha */
+#define CRT_B4 0xB4
+#define CRT_STS_V 0xB8 /* CRT Status V */
+#define CRT_SCRATCH 0xBC /* Scratchpad */
+#define CRT_BB0_ADDR 0xD0 /* CRT Display BB0 Starting Address */
+#define CRT_BB1_ADDR 0xD4 /* CRT Display BB1 Starting Address */
+#define CRT_BB_COUNT 0xD8 /* CRT Display BB Terminal Count */
+#define OSD_COLOR1 0xE0 /* OSD Color Palette Index 1 & 0 */
+#define OSD_COLOR2 0xE4 /* OSD Color Palette Index 3 & 2 */
+#define OSD_COLOR3 0xE8 /* OSD Color Palette Index 5 & 4 */
+#define OSD_COLOR4 0xEC /* OSD Color Palette Index 7 & 6 */
+#define OSD_COLOR5 0xF0 /* OSD Color Palette Index 9 & 8 */
+#define OSD_COLOR6 0xF4 /* OSD Color Palette Index 11 & 10 */
+#define OSD_COLOR7 0xF8 /* OSD Color Palette Index 13 & 12 */
+#define OSD_COLOR8 0xFC /* OSD Color Palette Index 15 & 14 */
+
+/* CTRL1 */
+#define CRT_CTRL_EN BIT(0)
+#define CRT_CTRL_HW_CURSOR_EN BIT(1)
+#define CRT_CTRL_OSD_EN BIT(2)
+#define CRT_CTRL_INTERLACED BIT(3)
+#define CRT_CTRL_COLOR_RGB565 (0 << 7)
+#define CRT_CTRL_COLOR_YUV444 (1 << 7)
+#define CRT_CTRL_COLOR_XRGB8888 (2 << 7)
+#define CRT_CTRL_COLOR_RGB888 (3 << 7)
+#define CRT_CTRL_COLOR_YUV444_2RGB (5 << 7)
+#define CRT_CTRL_COLOR_YUV422 (7 << 7)
+#define CRT_CTRL_COLOR_MASK GENMASK(9, 7)
+#define CRT_CTRL_HSYNC_NEGATIVE BIT(16)
+#define CRT_CTRL_VSYNC_NEGATIVE BIT(17)
+#define CRT_CTRL_VERTICAL_INTR_EN BIT(30)
+#define CRT_CTRL_VERTICAL_INTR_STS BIT(31)
+
+/* CTRL2 */
+#define CRT_CTRL_DAC_EN BIT(0)
+#define CRT_CTRL_VBLANK_LINE(x) (((x) << 20) & CRT_CTRL_VBLANK_LINE_MASK)
+#define CRT_CTRL_VBLANK_LINE_MASK GENMASK(20, 31)
+
+/* CRT_HORIZ0 */
+#define CRT_H_TOTAL(x) (x)
+#define CRT_H_DE(x) ((x) << 16)
+
+/* CRT_HORIZ1 */
+#define CRT_H_RS_START(x) (x)
+#define CRT_H_RS_END(x) ((x) << 16)
+
+/* CRT_VIRT0 */
+#define CRT_V_TOTAL(x) (x)
+#define CRT_V_DE(x) ((x) << 16)
+
+/* CRT_VIRT1 */
+#define CRT_V_RS_START(x) (x)
+#define CRT_V_RS_END(x) ((x) << 16)
+
+/* CRT_OFFSET */
+#define CRT_DISP_OFFSET(x) (x)
+#define CRT_TERM_COUNT(x) ((x) << 16)
+
+/* CRT_THROD */
+#define CRT_THROD_LOW(x) (x)
+#define CRT_THROD_HIGH(x) ((x) << 8)
+
+/* Default Threshold Seting */
+#define G5_CRT_THROD_VAL (CRT_THROD_LOW(0x24) | CRT_THROD_HIGH(0x3C))
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
new file mode 100644
index 000000000000..15db9e426ec4
--- /dev/null
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2018 IBM Corporation
+
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "aspeed_gfx.h"
+
+static struct aspeed_gfx *
+drm_pipe_to_aspeed_gfx(struct drm_simple_display_pipe *pipe)
+{
+ return container_of(pipe, struct aspeed_gfx, pipe);
+}
+
+static int aspeed_gfx_set_pixel_fmt(struct aspeed_gfx *priv, u32 *bpp)
+{
+ struct drm_crtc *crtc = &priv->pipe.crtc;
+ struct drm_device *drm = crtc->dev;
+ const u32 format = crtc->primary->state->fb->format->format;
+ u32 ctrl1;
+
+ ctrl1 = readl(priv->base + CRT_CTRL1);
+ ctrl1 &= ~CRT_CTRL_COLOR_MASK;
+
+ switch (format) {
+ case DRM_FORMAT_RGB565:
+ dev_dbg(drm->dev, "Setting up RGB565 mode\n");
+ ctrl1 |= CRT_CTRL_COLOR_RGB565;
+ *bpp = 16;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ dev_dbg(drm->dev, "Setting up XRGB8888 mode\n");
+ ctrl1 |= CRT_CTRL_COLOR_XRGB8888;
+ *bpp = 32;
+ break;
+ default:
+ dev_err(drm->dev, "Unhandled pixel format %08x\n", format);
+ return -EINVAL;
+ }
+
+ writel(ctrl1, priv->base + CRT_CTRL1);
+
+ return 0;
+}
+
+static void aspeed_gfx_enable_controller(struct aspeed_gfx *priv)
+{
+ u32 ctrl1 = readl(priv->base + CRT_CTRL1);
+ u32 ctrl2 = readl(priv->base + CRT_CTRL2);
+
+ /* SCU2C: set DAC source for display output to Graphics CRT (GFX) */
+ regmap_update_bits(priv->scu, 0x2c, BIT(16), BIT(16));
+
+ writel(ctrl1 | CRT_CTRL_EN, priv->base + CRT_CTRL1);
+ writel(ctrl2 | CRT_CTRL_DAC_EN, priv->base + CRT_CTRL2);
+}
+
+static void aspeed_gfx_disable_controller(struct aspeed_gfx *priv)
+{
+ u32 ctrl1 = readl(priv->base + CRT_CTRL1);
+ u32 ctrl2 = readl(priv->base + CRT_CTRL2);
+
+ writel(ctrl1 & ~CRT_CTRL_EN, priv->base + CRT_CTRL1);
+ writel(ctrl2 & ~CRT_CTRL_DAC_EN, priv->base + CRT_CTRL2);
+
+ regmap_update_bits(priv->scu, 0x2c, BIT(16), 0);
+}
+
+static void aspeed_gfx_crtc_mode_set_nofb(struct aspeed_gfx *priv)
+{
+ struct drm_display_mode *m = &priv->pipe.crtc.state->adjusted_mode;
+ u32 ctrl1, d_offset, t_count, bpp;
+ int err;
+
+ err = aspeed_gfx_set_pixel_fmt(priv, &bpp);
+ if (err)
+ return;
+
+#if 0
+ /* TODO: we have only been able to test with the 40MHz USB clock. The
+ * clock is fixed, so we cannot adjust it here. */
+ clk_set_rate(priv->pixel_clk, m->crtc_clock * 1000);
+#endif
+
+ ctrl1 = readl(priv->base + CRT_CTRL1);
+ ctrl1 &= ~(CRT_CTRL_INTERLACED |
+ CRT_CTRL_HSYNC_NEGATIVE |
+ CRT_CTRL_VSYNC_NEGATIVE);
+
+ if (m->flags & DRM_MODE_FLAG_INTERLACE)
+ ctrl1 |= CRT_CTRL_INTERLACED;
+
+ if (!(m->flags & DRM_MODE_FLAG_PHSYNC))
+ ctrl1 |= CRT_CTRL_HSYNC_NEGATIVE;
+
+ if (!(m->flags & DRM_MODE_FLAG_PVSYNC))
+ ctrl1 |= CRT_CTRL_VSYNC_NEGATIVE;
+
+ writel(ctrl1, priv->base + CRT_CTRL1);
+
+ /* Horizontal timing */
+ writel(CRT_H_TOTAL(m->htotal - 1) | CRT_H_DE(m->hdisplay - 1),
+ priv->base + CRT_HORIZ0);
+ writel(CRT_H_RS_START(m->hsync_start - 1) | CRT_H_RS_END(m->hsync_end),
+ priv->base + CRT_HORIZ1);
+
+
+ /* Vertical timing */
+ writel(CRT_V_TOTAL(m->vtotal - 1) | CRT_V_DE(m->vdisplay - 1),
+ priv->base + CRT_VERT0);
+ writel(CRT_V_RS_START(m->vsync_start) | CRT_V_RS_END(m->vsync_end),
+ priv->base + CRT_VERT1);
+
+ /*
+ * Display Offset: address difference between consecutive scan lines
+ * Terminal Count: memory size of one scan line
+ */
+ d_offset = m->hdisplay * bpp / 8;
+ t_count = (m->hdisplay * bpp + 127) / 128;
+ writel(CRT_DISP_OFFSET(d_offset) | CRT_TERM_COUNT(t_count),
+ priv->base + CRT_OFFSET);
+
+ /*
+ * Threshold: FIFO thresholds of refill and stop (16 byte chunks
+ * per line, rounded up)
+ */
+ writel(G5_CRT_THROD_VAL, priv->base + CRT_THROD);
+}
+
+static void aspeed_gfx_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe);
+ struct drm_crtc *crtc = &pipe->crtc;
+
+ aspeed_gfx_crtc_mode_set_nofb(priv);
+ aspeed_gfx_enable_controller(priv);
+ drm_crtc_vblank_on(crtc);
+}
+
+static void aspeed_gfx_pipe_disable(struct drm_simple_display_pipe *pipe)
+{
+ struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe);
+ struct drm_crtc *crtc = &pipe->crtc;
+
+ drm_crtc_vblank_off(crtc);
+ aspeed_gfx_disable_controller(priv);
+}
+
+static void aspeed_gfx_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
+{
+ struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe);
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_framebuffer *fb = pipe->plane.state->fb;
+ struct drm_pending_vblank_event *event;
+ struct drm_gem_cma_object *gem;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ event = crtc->state->event;
+ if (event) {
+ crtc->state->event = NULL;
+
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ if (!fb)
+ return;
+
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+ if (!gem)
+ return;
+ writel(gem->paddr, priv->base + CRT_ADDR);
+}
+
+static int aspeed_gfx_enable_vblank(struct drm_simple_display_pipe *pipe)
+{
+ struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe);
+ u32 reg = readl(priv->base + CRT_CTRL1);
+
+ /* Clear pending VBLANK IRQ */
+ writel(reg | CRT_CTRL_VERTICAL_INTR_STS, priv->base + CRT_CTRL1);
+
+ reg |= CRT_CTRL_VERTICAL_INTR_EN;
+ writel(reg, priv->base + CRT_CTRL1);
+
+ return 0;
+}
+
+static void aspeed_gfx_disable_vblank(struct drm_simple_display_pipe *pipe)
+{
+ struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe);
+ u32 reg = readl(priv->base + CRT_CTRL1);
+
+ reg &= ~CRT_CTRL_VERTICAL_INTR_EN;
+ writel(reg, priv->base + CRT_CTRL1);
+
+ /* Clear pending VBLANK IRQ */
+ writel(reg | CRT_CTRL_VERTICAL_INTR_STS, priv->base + CRT_CTRL1);
+}
+
+static struct drm_simple_display_pipe_funcs aspeed_gfx_funcs = {
+ .enable = aspeed_gfx_pipe_enable,
+ .disable = aspeed_gfx_pipe_disable,
+ .update = aspeed_gfx_pipe_update,
+ .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+ .enable_vblank = aspeed_gfx_enable_vblank,
+ .disable_vblank = aspeed_gfx_disable_vblank,
+};
+
+static const uint32_t aspeed_gfx_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB565,
+};
+
+int aspeed_gfx_create_pipe(struct drm_device *drm)
+{
+ struct aspeed_gfx *priv = drm->dev_private;
+
+ return drm_simple_display_pipe_init(drm, &priv->pipe, &aspeed_gfx_funcs,
+ aspeed_gfx_formats,
+ ARRAY_SIZE(aspeed_gfx_formats),
+ NULL,
+ &priv->connector);
+}
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
new file mode 100644
index 000000000000..eeb22eccd1fc
--- /dev/null
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2018 IBM Corporation
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_drv.h>
+
+#include "aspeed_gfx.h"
+
+/**
+ * DOC: ASPEED GFX Driver
+ *
+ * This driver is for the ASPEED BMC SoC's 'GFX' display hardware, also called
+ * the 'SOC Display Controller' in the datasheet. This driver runs on the ARM
+ * based BMC systems, unlike the ast driver which runs on a host CPU and is for
+ * a PCIe graphics device.
+ *
+ * The AST2500 supports a total of 3 output paths:
+ *
+ * 1. VGA output, the output target can choose either or both to the DAC
+ * or DVO interface.
+ *
+ * 2. Graphics CRT output, the output target can choose either or both to
+ * the DAC or DVO interface.
+ *
+ * 3. Video input from DVO, the video input can be used for video engine
+ * capture or DAC display output.
+ *
+ * Output options are selected in SCU2C.
+ *
+ * The "VGA mode" device is the PCI attached controller. The "Graphics CRT"
+ * is the ARM's internal display controller.
+ *
+ * The driver only supports a simple configuration consisting of a 40MHz
+ * pixel clock, fixed by hardware limitations, and the VGA output path.
+ *
+ * The driver was written with the 'AST2500 Software Programming Guide' v17,
+ * which is available under NDA from ASPEED.
+ */
+
+static const struct drm_mode_config_funcs aspeed_gfx_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void aspeed_gfx_setup_mode_config(struct drm_device *drm)
+{
+ drm_mode_config_init(drm);
+
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+ drm->mode_config.max_width = 800;
+ drm->mode_config.max_height = 600;
+ drm->mode_config.funcs = &aspeed_gfx_mode_config_funcs;
+}
+
+static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data)
+{
+ struct drm_device *drm = data;
+ struct aspeed_gfx *priv = drm->dev_private;
+ u32 reg;
+
+ reg = readl(priv->base + CRT_CTRL1);
+
+ if (reg & CRT_CTRL_VERTICAL_INTR_STS) {
+ drm_crtc_handle_vblank(&priv->pipe.crtc);
+ writel(reg, priv->base + CRT_CTRL1);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+
+
+static int aspeed_gfx_load(struct drm_device *drm)
+{
+ struct platform_device *pdev = to_platform_device(drm->dev);
+ struct aspeed_gfx *priv;
+ struct resource *res;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ drm->dev_private = priv;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(drm->dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2500-scu");
+ if (IS_ERR(priv->scu)) {
+ dev_err(&pdev->dev, "failed to find SCU regmap\n");
+ return PTR_ERR(priv->scu);
+ }
+
+ ret = of_reserved_mem_device_init(drm->dev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to initialize reserved mem: %d\n", ret);
+ return ret;
+ }
+
+ ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", ret);
+ return ret;
+ }
+
+ priv->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ dev_err(&pdev->dev,
+ "missing or invalid reset controller device tree entry");
+ return PTR_ERR(priv->rst);
+ }
+ reset_control_deassert(priv->rst);
+
+ priv->clk = devm_clk_get(drm->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev,
+ "missing or invalid clk device tree entry");
+ return PTR_ERR(priv->clk);
+ }
+ clk_prepare_enable(priv->clk);
+
+ /* Sanitize control registers */
+ writel(0, priv->base + CRT_CTRL1);
+ writel(0, priv->base + CRT_CTRL2);
+
+ aspeed_gfx_setup_mode_config(drm);
+
+ ret = drm_vblank_init(drm, 1);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to initialise vblank\n");
+ return ret;
+ }
+
+ ret = aspeed_gfx_create_output(drm);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to create outputs\n");
+ return ret;
+ }
+
+ ret = aspeed_gfx_create_pipe(drm);
+ if (ret < 0) {
+ dev_err(drm->dev, "Cannot setup simple display pipe\n");
+ return ret;
+ }
+
+ ret = devm_request_irq(drm->dev, platform_get_irq(pdev, 0),
+ aspeed_gfx_irq_handler, 0, "aspeed gfx", drm);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to install IRQ handler\n");
+ return ret;
+ }
+
+ drm_mode_config_reset(drm);
+
+ drm_fbdev_generic_setup(drm, 32);
+
+ return 0;
+}
+
+static void aspeed_gfx_unload(struct drm_device *drm)
+{
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
+
+ drm->dev_private = NULL;
+}
+
+DEFINE_DRM_GEM_CMA_FOPS(fops);
+
+static struct drm_driver aspeed_gfx_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET |
+ DRIVER_PRIME | DRIVER_ATOMIC,
+ .gem_create_object = drm_cma_gem_create_object_default_funcs,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_mmap = drm_gem_prime_mmap,
+ .fops = &fops,
+ .name = "aspeed-gfx-drm",
+ .desc = "ASPEED GFX DRM",
+ .date = "20180319",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct of_device_id aspeed_gfx_match[] = {
+ { .compatible = "aspeed,ast2500-gfx" },
+ { }
+};
+
+static int aspeed_gfx_probe(struct platform_device *pdev)
+{
+ struct drm_device *drm;
+ int ret;
+
+ drm = drm_dev_alloc(&aspeed_gfx_driver, &pdev->dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+
+ ret = aspeed_gfx_load(drm);
+ if (ret)
+ goto err_free;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto err_unload;
+
+ return 0;
+
+err_unload:
+ aspeed_gfx_unload(drm);
+err_free:
+ drm_dev_put(drm);
+
+ return ret;
+}
+
+static int aspeed_gfx_remove(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ drm_dev_unregister(drm);
+ aspeed_gfx_unload(drm);
+ drm_dev_put(drm);
+
+ return 0;
+}
+
+static struct platform_driver aspeed_gfx_platform_driver = {
+ .probe = aspeed_gfx_probe,
+ .remove = aspeed_gfx_remove,
+ .driver = {
+ .name = "aspeed_gfx",
+ .of_match_table = aspeed_gfx_match,
+ },
+};
+
+module_platform_driver(aspeed_gfx_platform_driver);
+
+MODULE_AUTHOR("Joel Stanley <joel@jms.id.au>");
+MODULE_DESCRIPTION("ASPEED BMC DRM/KMS driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_out.c b/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
new file mode 100644
index 000000000000..67ee5fa10055
--- /dev/null
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2018 IBM Corporation
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
+
+#include "aspeed_gfx.h"
+
+static int aspeed_gfx_get_modes(struct drm_connector *connector)
+{
+ return drm_add_modes_noedid(connector, 800, 600);
+}
+
+static const struct
+drm_connector_helper_funcs aspeed_gfx_connector_helper_funcs = {
+ .get_modes = aspeed_gfx_get_modes,
+};
+
+static const struct drm_connector_funcs aspeed_gfx_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+int aspeed_gfx_create_output(struct drm_device *drm)
+{
+ struct aspeed_gfx *priv = drm->dev_private;
+ int ret;
+
+ priv->connector.dpms = DRM_MODE_DPMS_OFF;
+ priv->connector.polled = 0;
+ drm_connector_helper_add(&priv->connector,
+ &aspeed_gfx_connector_helper_funcs);
+ ret = drm_connector_init(drm, &priv->connector,
+ &aspeed_gfx_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ return ret;
+}
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index bfc65040dfcb..1cf0c75e411d 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -259,7 +259,7 @@ struct ast_framebuffer {
};
struct ast_fbdev {
- struct drm_fb_helper helper;
+ struct drm_fb_helper helper; /* must be first */
struct ast_framebuffer afb;
void *sysram;
int size;
@@ -353,8 +353,6 @@ extern int ast_dumb_mmap_offset(struct drm_file *file,
uint32_t handle,
uint64_t *offset);
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
int ast_mm_init(struct ast_private *ast);
void ast_mm_fini(struct ast_private *ast);
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 2c9f8dd9733a..e718d0f60d6b 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -217,8 +217,6 @@ static int astfb_create(struct drm_fb_helper *helper,
ret = PTR_ERR(info);
goto out;
}
- info->par = afbdev;
-
ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj);
if (ret)
goto out;
@@ -229,15 +227,12 @@ static int astfb_create(struct drm_fb_helper *helper,
fb = &afbdev->afb.base;
afbdev->helper.fb = fb;
- strcpy(info->fix.id, "astdrmfb");
-
info->fbops = &astfb_ops;
info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(info, &afbdev->helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, &afbdev->helper, sizes);
info->screen_base = sysram;
info->screen_size = size;
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index c168d62fe8f9..75d477b37854 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -178,7 +178,6 @@ int ast_mm_init(struct ast_private *ast)
ret = ttm_bo_device_init(&ast->ttm.bdev,
&ast_bo_driver,
dev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
@@ -344,13 +343,8 @@ int ast_bo_push_sysram(struct ast_bo *bo)
int ast_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct ast_private *ast;
+ struct drm_file *file_priv = filp->private_data;
+ struct ast_private *ast = file_priv->minor->dev->dev_private;
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
-
- file_priv = filp->private_data;
- ast = file_priv->minor->dev->dev_private;
return ttm_bo_mmap(filp, vma, &ast->ttm.bdev);
}
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 03711394f1ed..341cc9d1bab4 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -7,6 +7,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_gem.h>
@@ -69,11 +70,9 @@ struct bochs_device {
struct edid *edid;
/* drm */
- struct drm_device *dev;
- struct drm_crtc crtc;
- struct drm_encoder encoder;
+ struct drm_device *dev;
+ struct drm_simple_display_pipe pipe;
struct drm_connector connector;
- bool mode_config_initialized;
/* ttm */
struct {
@@ -101,8 +100,6 @@ static inline struct bochs_bo *gem_to_bochs_bo(struct drm_gem_object *gem)
return container_of(gem, struct bochs_bo, gem);
}
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
static inline u64 bochs_bo_mmap_offset(struct bochs_bo *bo)
{
return drm_vma_node_offset_addr(&bo->bo.vma_node);
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 9cd82e3631fb..5e905f50449d 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -22,76 +22,55 @@ MODULE_PARM_DESC(defy, "default y resolution");
/* ---------------------------------------------------------------------- */
-static void bochs_crtc_mode_set_nofb(struct drm_crtc *crtc)
+static const uint32_t bochs_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_BGRX8888,
+};
+
+static void bochs_plane_update(struct bochs_device *bochs,
+ struct drm_plane_state *state)
{
- struct bochs_device *bochs =
- container_of(crtc, struct bochs_device, crtc);
+ struct bochs_bo *bo;
- bochs_hw_setmode(bochs, &crtc->mode);
+ if (!state->fb || !bochs->stride)
+ return;
+
+ bo = gem_to_bochs_bo(state->fb->obj[0]);
+ bochs_hw_setbase(bochs,
+ state->crtc_x,
+ state->crtc_y,
+ bo->bo.offset);
+ bochs_hw_setformat(bochs, state->fb->format);
}
-static void bochs_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+static void bochs_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
{
+ struct bochs_device *bochs = pipe->crtc.dev->dev_private;
+
+ bochs_hw_setmode(bochs, &crtc_state->mode);
+ bochs_plane_update(bochs, plane_state);
}
-static void bochs_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+static void bochs_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_pending_vblank_event *event;
+ struct bochs_device *bochs = pipe->crtc.dev->dev_private;
+ struct drm_crtc *crtc = &pipe->crtc;
- if (crtc->state && crtc->state->event) {
- unsigned long irqflags;
+ bochs_plane_update(bochs, pipe->plane.state);
- spin_lock_irqsave(&dev->event_lock, irqflags);
- event = crtc->state->event;
+ if (crtc->state->event) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irqrestore(&dev->event_lock, irqflags);
+ spin_unlock_irq(&crtc->dev->event_lock);
}
}
-
-/* These provide the minimum set of functions required to handle a CRTC */
-static const struct drm_crtc_funcs bochs_crtc_funcs = {
- .set_config = drm_atomic_helper_set_config,
- .destroy = drm_crtc_cleanup,
- .page_flip = drm_atomic_helper_page_flip,
- .reset = drm_atomic_helper_crtc_reset,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
-};
-
-static const struct drm_crtc_helper_funcs bochs_helper_funcs = {
- .mode_set_nofb = bochs_crtc_mode_set_nofb,
- .atomic_enable = bochs_crtc_atomic_enable,
- .atomic_flush = bochs_crtc_atomic_flush,
-};
-
-static const uint32_t bochs_formats[] = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_BGRX8888,
-};
-
-static void bochs_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct bochs_device *bochs = plane->dev->dev_private;
- struct bochs_bo *bo;
-
- if (!plane->state->fb)
- return;
- bo = gem_to_bochs_bo(plane->state->fb->obj[0]);
- bochs_hw_setbase(bochs,
- plane->state->crtc_x,
- plane->state->crtc_y,
- bo->bo.offset);
- bochs_hw_setformat(bochs, plane->state->fb->format);
-}
-
-static int bochs_plane_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
+static int bochs_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *new_state)
{
struct bochs_bo *bo;
@@ -101,8 +80,8 @@ static int bochs_plane_prepare_fb(struct drm_plane *plane,
return bochs_bo_pin(bo, TTM_PL_FLAG_VRAM);
}
-static void bochs_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+static void bochs_pipe_cleanup_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state)
{
struct bochs_bo *bo;
@@ -112,73 +91,13 @@ static void bochs_plane_cleanup_fb(struct drm_plane *plane,
bochs_bo_unpin(bo);
}
-static const struct drm_plane_helper_funcs bochs_plane_helper_funcs = {
- .atomic_update = bochs_plane_atomic_update,
- .prepare_fb = bochs_plane_prepare_fb,
- .cleanup_fb = bochs_plane_cleanup_fb,
-};
-
-static const struct drm_plane_funcs bochs_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_primary_helper_destroy,
- .reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
-};
-
-static struct drm_plane *bochs_primary_plane(struct drm_device *dev)
-{
- struct drm_plane *primary;
- int ret;
-
- primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (primary == NULL) {
- DRM_DEBUG_KMS("Failed to allocate primary plane\n");
- return NULL;
- }
-
- ret = drm_universal_plane_init(dev, primary, 0,
- &bochs_plane_funcs,
- bochs_formats,
- ARRAY_SIZE(bochs_formats),
- NULL,
- DRM_PLANE_TYPE_PRIMARY, NULL);
- if (ret) {
- kfree(primary);
- return NULL;
- }
-
- drm_plane_helper_add(primary, &bochs_plane_helper_funcs);
- return primary;
-}
-
-static void bochs_crtc_init(struct drm_device *dev)
-{
- struct bochs_device *bochs = dev->dev_private;
- struct drm_crtc *crtc = &bochs->crtc;
- struct drm_plane *primary = bochs_primary_plane(dev);
-
- drm_crtc_init_with_planes(dev, crtc, primary, NULL,
- &bochs_crtc_funcs, NULL);
- drm_crtc_helper_add(crtc, &bochs_helper_funcs);
-}
-
-static const struct drm_encoder_funcs bochs_encoder_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
+static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = {
+ .enable = bochs_pipe_enable,
+ .update = bochs_pipe_update,
+ .prepare_fb = bochs_pipe_prepare_fb,
+ .cleanup_fb = bochs_pipe_cleanup_fb,
};
-static void bochs_encoder_init(struct drm_device *dev)
-{
- struct bochs_device *bochs = dev->dev_private;
- struct drm_encoder *encoder = &bochs->encoder;
-
- encoder->possible_crtcs = 0x1;
- drm_encoder_init(dev, encoder, &bochs_encoder_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
-}
-
-
static int bochs_connector_get_modes(struct drm_connector *connector)
{
struct bochs_device *bochs =
@@ -214,20 +133,9 @@ static enum drm_mode_status bochs_connector_mode_valid(struct drm_connector *con
return MODE_OK;
}
-static struct drm_encoder *
-bochs_connector_best_encoder(struct drm_connector *connector)
-{
- int enc_id = connector->encoder_ids[0];
- /* pick the encoder ids */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
- return NULL;
-}
-
static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
.get_modes = bochs_connector_get_modes,
.mode_valid = bochs_connector_mode_valid,
- .best_encoder = bochs_connector_best_encoder,
};
static const struct drm_connector_funcs bochs_connector_connector_funcs = {
@@ -278,7 +186,6 @@ const struct drm_mode_config_funcs bochs_mode_funcs = {
int bochs_kms_init(struct bochs_device *bochs)
{
drm_mode_config_init(bochs->dev);
- bochs->mode_config_initialized = true;
bochs->dev->mode_config.max_width = 8192;
bochs->dev->mode_config.max_height = 8192;
@@ -290,11 +197,14 @@ int bochs_kms_init(struct bochs_device *bochs)
bochs->dev->mode_config.funcs = &bochs_mode_funcs;
- bochs_crtc_init(bochs->dev);
- bochs_encoder_init(bochs->dev);
bochs_connector_init(bochs->dev);
- drm_connector_attach_encoder(&bochs->connector,
- &bochs->encoder);
+ drm_simple_display_pipe_init(bochs->dev,
+ &bochs->pipe,
+ &bochs_pipe_funcs,
+ bochs_formats,
+ ARRAY_SIZE(bochs_formats),
+ NULL,
+ &bochs->connector);
drm_mode_config_reset(bochs->dev);
@@ -303,8 +213,6 @@ int bochs_kms_init(struct bochs_device *bochs)
void bochs_kms_fini(struct bochs_device *bochs)
{
- if (bochs->mode_config_initialized) {
- drm_mode_config_cleanup(bochs->dev);
- bochs->mode_config_initialized = false;
- }
+ drm_atomic_helper_shutdown(bochs->dev);
+ drm_mode_config_cleanup(bochs->dev);
}
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 49463348a07a..4a40308169c4 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -156,7 +156,6 @@ int bochs_mm_init(struct bochs_device *bochs)
ret = ttm_bo_device_init(&bochs->ttm.bdev,
&bochs_bo_driver,
bochs->dev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
@@ -264,14 +263,9 @@ int bochs_bo_unpin(struct bochs_bo *bo)
int bochs_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct bochs_device *bochs;
+ struct drm_file *file_priv = filp->private_data;
+ struct bochs_device *bochs = file_priv->minor->dev->dev_private;
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
-
- file_priv = filp->private_data;
- bochs = file_priv->minor->dev->dev_private;
return ttm_bo_mmap(filp, vma, &bochs->ttm.bdev);
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 8840f396a7b6..3dff9997f5e3 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -76,7 +76,6 @@ config DRM_PARADE_PS8622
depends on OF
select DRM_PANEL
select DRM_KMS_HELPER
- select BACKLIGHT_LCD_SUPPORT
select BACKLIGHT_CLASS_DEVICE
---help---
Parade eDP-LVDS bridge chip driver.
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index ec2ca71e1323..c532e9c9e491 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -748,11 +748,11 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
vsync_polarity = 1;
}
- if (mode->vrefresh <= 24000)
+ if (drm_mode_vrefresh(mode) <= 24)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
- else if (mode->vrefresh <= 25000)
+ else if (drm_mode_vrefresh(mode) <= 25)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
- else if (mode->vrefresh <= 30000)
+ else if (drm_mode_vrefresh(mode) <= 30)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
else
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index 0805801f4e94..e64736c39a9f 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -234,7 +234,7 @@ static int dumb_vga_remove(struct platform_device *pdev)
*/
static const struct drm_bridge_timings default_dac_timings = {
/* Timing specifications, datasheet page 7 */
- .sampling_edge = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
.setup_time_ps = 500,
.hold_time_ps = 1500,
};
@@ -245,7 +245,7 @@ static const struct drm_bridge_timings default_dac_timings = {
*/
static const struct drm_bridge_timings ti_ths8134_dac_timings = {
/* From timing diagram, datasheet page 9 */
- .sampling_edge = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
/* From datasheet, page 12 */
.setup_time_ps = 3000,
/* I guess this means latched input */
@@ -258,7 +258,7 @@ static const struct drm_bridge_timings ti_ths8134_dac_timings = {
*/
static const struct drm_bridge_timings ti_ths8135_dac_timings = {
/* From timing diagram, datasheet page 14 */
- .sampling_edge = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
/* From datasheet, page 16 */
.setup_time_ps = 2000,
.hold_time_ps = 500,
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 888980d4bc74..e570c9dee180 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1222,8 +1222,8 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
&bus_format, 1);
tc->connector.display_info.bus_flags =
DRM_BUS_FLAG_DE_HIGH |
- DRM_BUS_FLAG_PIXDATA_NEGEDGE |
- DRM_BUS_FLAG_SYNC_NEGEDGE;
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE;
drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
return 0;
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 7bfb4f338813..8b0e71bd3ca7 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -27,10 +27,16 @@
struct tfp410 {
struct drm_bridge bridge;
struct drm_connector connector;
+ unsigned int connector_type;
+ u32 bus_format;
struct i2c_adapter *ddc;
struct gpio_desc *hpd;
+ int hpd_irq;
struct delayed_work hpd_work;
+ struct gpio_desc *powerdown;
+
+ struct drm_bridge_timings timings;
struct device *dev;
};
@@ -120,26 +126,47 @@ static int tfp410_attach(struct drm_bridge *bridge)
return -ENODEV;
}
- if (dvi->hpd)
+ if (dvi->hpd_irq >= 0)
dvi->connector.polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ dvi->connector.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
drm_connector_helper_add(&dvi->connector,
&tfp410_con_helper_funcs);
ret = drm_connector_init(bridge->dev, &dvi->connector,
- &tfp410_con_funcs, DRM_MODE_CONNECTOR_HDMIA);
+ &tfp410_con_funcs, dvi->connector_type);
if (ret) {
dev_err(dvi->dev, "drm_connector_init() failed: %d\n", ret);
return ret;
}
+ drm_display_info_set_bus_formats(&dvi->connector.display_info,
+ &dvi->bus_format, 1);
+
drm_connector_attach_encoder(&dvi->connector,
bridge->encoder);
return 0;
}
+static void tfp410_enable(struct drm_bridge *bridge)
+{
+ struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
+
+ gpiod_set_value_cansleep(dvi->powerdown, 0);
+}
+
+static void tfp410_disable(struct drm_bridge *bridge)
+{
+ struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
+
+ gpiod_set_value_cansleep(dvi->powerdown, 1);
+}
+
static const struct drm_bridge_funcs tfp410_bridge_funcs = {
.attach = tfp410_attach,
+ .enable = tfp410_enable,
+ .disable = tfp410_disable,
};
static void tfp410_hpd_work_func(struct work_struct *work)
@@ -162,6 +189,83 @@ static irqreturn_t tfp410_hpd_irq_thread(int irq, void *arg)
return IRQ_HANDLED;
}
+static const struct drm_bridge_timings tfp410_default_timings = {
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE
+ | DRM_BUS_FLAG_DE_HIGH,
+ .setup_time_ps = 1200,
+ .hold_time_ps = 1300,
+};
+
+static int tfp410_parse_timings(struct tfp410 *dvi, bool i2c)
+{
+ struct drm_bridge_timings *timings = &dvi->timings;
+ struct device_node *ep;
+ u32 pclk_sample = 0;
+ u32 bus_width = 24;
+ s32 deskew = 0;
+
+ /* Start with defaults. */
+ *timings = tfp410_default_timings;
+
+ if (i2c)
+ /*
+ * In I2C mode timings are configured through the I2C interface.
+ * As the driver doesn't support I2C configuration yet, we just
+ * go with the defaults (BSEL=1, DSEL=1, DKEN=0, EDGE=1).
+ */
+ return 0;
+
+ /*
+ * In non-I2C mode, timings are configured through the BSEL, DSEL, DKEN
+ * and EDGE pins. They are specified in DT through endpoint properties
+ * and vendor-specific properties.
+ */
+ ep = of_graph_get_endpoint_by_regs(dvi->dev->of_node, 0, 0);
+ if (!ep)
+ return -EINVAL;
+
+ /* Get the sampling edge from the endpoint. */
+ of_property_read_u32(ep, "pclk-sample", &pclk_sample);
+ of_property_read_u32(ep, "bus-width", &bus_width);
+ of_node_put(ep);
+
+ timings->input_bus_flags = DRM_BUS_FLAG_DE_HIGH;
+
+ switch (pclk_sample) {
+ case 0:
+ timings->input_bus_flags |= DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE
+ | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE;
+ break;
+ case 1:
+ timings->input_bus_flags |= DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE
+ | DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (bus_width) {
+ case 12:
+ dvi->bus_format = MEDIA_BUS_FMT_RGB888_2X12_LE;
+ break;
+ case 24:
+ dvi->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Get the setup and hold time from vendor-specific properties. */
+ of_property_read_u32(dvi->dev->of_node, "ti,deskew", (u32 *)&deskew);
+ if (deskew < -4 || deskew > 3)
+ return -EINVAL;
+
+ timings->setup_time_ps = min(0, 1200 - 350 * deskew);
+ timings->hold_time_ps = min(0, 1300 + 350 * deskew);
+
+ return 0;
+}
+
static int tfp410_get_connector_properties(struct tfp410 *dvi)
{
struct device_node *connector_node, *ddc_phandle;
@@ -172,6 +276,11 @@ static int tfp410_get_connector_properties(struct tfp410 *dvi)
if (!connector_node)
return -ENODEV;
+ if (of_device_is_compatible(connector_node, "hdmi-connector"))
+ dvi->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ else
+ dvi->connector_type = DRM_MODE_CONNECTOR_DVID;
+
dvi->hpd = fwnode_get_named_gpiod(&connector_node->fwnode,
"hpd-gpios", 0, GPIOD_IN, "hpd");
if (IS_ERR(dvi->hpd)) {
@@ -200,7 +309,7 @@ fail:
return ret;
}
-static int tfp410_init(struct device *dev)
+static int tfp410_init(struct device *dev, bool i2c)
{
struct tfp410 *dvi;
int ret;
@@ -217,16 +326,33 @@ static int tfp410_init(struct device *dev)
dvi->bridge.funcs = &tfp410_bridge_funcs;
dvi->bridge.of_node = dev->of_node;
+ dvi->bridge.timings = &dvi->timings;
dvi->dev = dev;
+ ret = tfp410_parse_timings(dvi, i2c);
+ if (ret)
+ goto fail;
+
ret = tfp410_get_connector_properties(dvi);
if (ret)
goto fail;
- if (dvi->hpd) {
+ dvi->powerdown = devm_gpiod_get_optional(dev, "powerdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(dvi->powerdown)) {
+ dev_err(dev, "failed to parse powerdown gpio\n");
+ return PTR_ERR(dvi->powerdown);
+ }
+
+ if (dvi->hpd)
+ dvi->hpd_irq = gpiod_to_irq(dvi->hpd);
+ else
+ dvi->hpd_irq = -ENXIO;
+
+ if (dvi->hpd_irq >= 0) {
INIT_DELAYED_WORK(&dvi->hpd_work, tfp410_hpd_work_func);
- ret = devm_request_threaded_irq(dev, gpiod_to_irq(dvi->hpd),
+ ret = devm_request_threaded_irq(dev, dvi->hpd_irq,
NULL, tfp410_hpd_irq_thread, IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"hdmi-hpd", dvi);
@@ -264,7 +390,7 @@ static int tfp410_fini(struct device *dev)
static int tfp410_probe(struct platform_device *pdev)
{
- return tfp410_init(&pdev->dev);
+ return tfp410_init(&pdev->dev, false);
}
static int tfp410_remove(struct platform_device *pdev)
@@ -301,7 +427,7 @@ static int tfp410_i2c_probe(struct i2c_client *client,
return -ENXIO;
}
- return tfp410_init(&client->dev);
+ return tfp410_init(&client->dev, true);
}
static int tfp410_i2c_remove(struct i2c_client *client)
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
index fc78c90ee931..dd4f52a0bc1c 100644
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -2,7 +2,7 @@ config DRM_CIRRUS_QEMU
tristate "Cirrus driver for QEMU emulated device"
depends on DRM && PCI && MMU
select DRM_KMS_HELPER
- select DRM_TTM
+ select DRM_GEM_SHMEM_HELPER
help
This is a KMS driver for emulated cirrus device in qemu.
It is *NOT* intended for real cirrus devices. This requires
diff --git a/drivers/gpu/drm/cirrus/Makefile b/drivers/gpu/drm/cirrus/Makefile
index 919c0a336c97..acf8971d37a1 100644
--- a/drivers/gpu/drm/cirrus/Makefile
+++ b/drivers/gpu/drm/cirrus/Makefile
@@ -1,4 +1 @@
-cirrus-y := cirrus_main.o cirrus_mode.o \
- cirrus_drv.o cirrus_fbdev.o cirrus_ttm.o
-
obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/cirrus/cirrus.c
new file mode 100644
index 000000000000..be4ea370ba31
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus.c
@@ -0,0 +1,657 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2012-2019 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ * Gerd Hoffmann
+ *
+ * Portions of this code derived from cirrusfb.c:
+ * drivers/video/cirrusfb.c - driver for Cirrus Logic chipsets
+ *
+ * Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com>
+ */
+
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <video/cirrus.h>
+#include <video/vga.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_file.h>
+#include <drm/drm_format_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+
+#define DRIVER_NAME "cirrus"
+#define DRIVER_DESC "qemu cirrus vga"
+#define DRIVER_DATE "2019"
+#define DRIVER_MAJOR 2
+#define DRIVER_MINOR 0
+
+#define CIRRUS_MAX_PITCH (0x1FF << 3) /* (4096 - 1) & ~111b bytes */
+#define CIRRUS_VRAM_SIZE (4 * 1024 * 1024) /* 4 MB */
+
+struct cirrus_device {
+ struct drm_device dev;
+ struct drm_simple_display_pipe pipe;
+ struct drm_connector conn;
+ unsigned int cpp;
+ unsigned int pitch;
+ void __iomem *vram;
+ void __iomem *mmio;
+};
+
+/* ------------------------------------------------------------------ */
+/*
+ * The meat of this driver. The core passes us a mode and we have to program
+ * it. The modesetting here is the bare minimum required to satisfy the qemu
+ * emulation of this hardware, and running this against a real device is
+ * likely to result in an inadequately programmed mode. We've already had
+ * the opportunity to modify the mode, so whatever we receive here should
+ * be something that can be correctly programmed and displayed
+ */
+
+#define SEQ_INDEX 4
+#define SEQ_DATA 5
+
+static u8 rreg_seq(struct cirrus_device *cirrus, u8 reg)
+{
+ iowrite8(reg, cirrus->mmio + SEQ_INDEX);
+ return ioread8(cirrus->mmio + SEQ_DATA);
+}
+
+static void wreg_seq(struct cirrus_device *cirrus, u8 reg, u8 val)
+{
+ iowrite8(reg, cirrus->mmio + SEQ_INDEX);
+ iowrite8(val, cirrus->mmio + SEQ_DATA);
+}
+
+#define CRT_INDEX 0x14
+#define CRT_DATA 0x15
+
+static u8 rreg_crt(struct cirrus_device *cirrus, u8 reg)
+{
+ iowrite8(reg, cirrus->mmio + CRT_INDEX);
+ return ioread8(cirrus->mmio + CRT_DATA);
+}
+
+static void wreg_crt(struct cirrus_device *cirrus, u8 reg, u8 val)
+{
+ iowrite8(reg, cirrus->mmio + CRT_INDEX);
+ iowrite8(val, cirrus->mmio + CRT_DATA);
+}
+
+#define GFX_INDEX 0xe
+#define GFX_DATA 0xf
+
+static void wreg_gfx(struct cirrus_device *cirrus, u8 reg, u8 val)
+{
+ iowrite8(reg, cirrus->mmio + GFX_INDEX);
+ iowrite8(val, cirrus->mmio + GFX_DATA);
+}
+
+#define VGA_DAC_MASK 0x06
+
+static void wreg_hdr(struct cirrus_device *cirrus, u8 val)
+{
+ ioread8(cirrus->mmio + VGA_DAC_MASK);
+ ioread8(cirrus->mmio + VGA_DAC_MASK);
+ ioread8(cirrus->mmio + VGA_DAC_MASK);
+ ioread8(cirrus->mmio + VGA_DAC_MASK);
+ iowrite8(val, cirrus->mmio + VGA_DAC_MASK);
+}
+
+static int cirrus_convert_to(struct drm_framebuffer *fb)
+{
+ if (fb->format->cpp[0] == 4 && fb->pitches[0] > CIRRUS_MAX_PITCH) {
+ if (fb->width * 3 <= CIRRUS_MAX_PITCH)
+ /* convert from XR24 to RG24 */
+ return 3;
+ else
+ /* convert from XR24 to RG16 */
+ return 2;
+ }
+ return 0;
+}
+
+static int cirrus_cpp(struct drm_framebuffer *fb)
+{
+ int convert_cpp = cirrus_convert_to(fb);
+
+ if (convert_cpp)
+ return convert_cpp;
+ return fb->format->cpp[0];
+}
+
+static int cirrus_pitch(struct drm_framebuffer *fb)
+{
+ int convert_cpp = cirrus_convert_to(fb);
+
+ if (convert_cpp)
+ return convert_cpp * fb->width;
+ return fb->pitches[0];
+}
+
+static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset)
+{
+ u32 addr;
+ u8 tmp;
+
+ addr = offset >> 2;
+ wreg_crt(cirrus, 0x0c, (u8)((addr >> 8) & 0xff));
+ wreg_crt(cirrus, 0x0d, (u8)(addr & 0xff));
+
+ tmp = rreg_crt(cirrus, 0x1b);
+ tmp &= 0xf2;
+ tmp |= (addr >> 16) & 0x01;
+ tmp |= (addr >> 15) & 0x0c;
+ wreg_crt(cirrus, 0x1b, tmp);
+
+ tmp = rreg_crt(cirrus, 0x1d);
+ tmp &= 0x7f;
+ tmp |= (addr >> 12) & 0x80;
+ wreg_crt(cirrus, 0x1d, tmp);
+}
+
+static int cirrus_mode_set(struct cirrus_device *cirrus,
+ struct drm_display_mode *mode,
+ struct drm_framebuffer *fb)
+{
+ int hsyncstart, hsyncend, htotal, hdispend;
+ int vtotal, vdispend;
+ int tmp;
+ int sr07 = 0, hdr = 0;
+
+ htotal = mode->htotal / 8;
+ hsyncend = mode->hsync_end / 8;
+ hsyncstart = mode->hsync_start / 8;
+ hdispend = mode->hdisplay / 8;
+
+ vtotal = mode->vtotal;
+ vdispend = mode->vdisplay;
+
+ vdispend -= 1;
+ vtotal -= 2;
+
+ htotal -= 5;
+ hdispend -= 1;
+ hsyncstart += 1;
+ hsyncend += 1;
+
+ wreg_crt(cirrus, VGA_CRTC_V_SYNC_END, 0x20);
+ wreg_crt(cirrus, VGA_CRTC_H_TOTAL, htotal);
+ wreg_crt(cirrus, VGA_CRTC_H_DISP, hdispend);
+ wreg_crt(cirrus, VGA_CRTC_H_SYNC_START, hsyncstart);
+ wreg_crt(cirrus, VGA_CRTC_H_SYNC_END, hsyncend);
+ wreg_crt(cirrus, VGA_CRTC_V_TOTAL, vtotal & 0xff);
+ wreg_crt(cirrus, VGA_CRTC_V_DISP_END, vdispend & 0xff);
+
+ tmp = 0x40;
+ if ((vdispend + 1) & 512)
+ tmp |= 0x20;
+ wreg_crt(cirrus, VGA_CRTC_MAX_SCAN, tmp);
+
+ /*
+ * Overflow bits for values that don't fit in the standard registers
+ */
+ tmp = 0x10;
+ if (vtotal & 0x100)
+ tmp |= 0x01;
+ if (vdispend & 0x100)
+ tmp |= 0x02;
+ if ((vdispend + 1) & 0x100)
+ tmp |= 0x08;
+ if (vtotal & 0x200)
+ tmp |= 0x20;
+ if (vdispend & 0x200)
+ tmp |= 0x40;
+ wreg_crt(cirrus, VGA_CRTC_OVERFLOW, tmp);
+
+ tmp = 0;
+
+ /* More overflow bits */
+
+ if ((htotal + 5) & 0x40)
+ tmp |= 0x10;
+ if ((htotal + 5) & 0x80)
+ tmp |= 0x20;
+ if (vtotal & 0x100)
+ tmp |= 0x40;
+ if (vtotal & 0x200)
+ tmp |= 0x80;
+
+ wreg_crt(cirrus, CL_CRT1A, tmp);
+
+ /* Disable Hercules/CGA compatibility */
+ wreg_crt(cirrus, VGA_CRTC_MODE, 0x03);
+
+ sr07 = rreg_seq(cirrus, 0x07);
+ sr07 &= 0xe0;
+ hdr = 0;
+
+ cirrus->cpp = cirrus_cpp(fb);
+ switch (cirrus->cpp * 8) {
+ case 8:
+ sr07 |= 0x11;
+ break;
+ case 16:
+ sr07 |= 0x17;
+ hdr = 0xc1;
+ break;
+ case 24:
+ sr07 |= 0x15;
+ hdr = 0xc5;
+ break;
+ case 32:
+ sr07 |= 0x19;
+ hdr = 0xc5;
+ break;
+ default:
+ return -1;
+ }
+
+ wreg_seq(cirrus, 0x7, sr07);
+
+ /* Program the pitch */
+ cirrus->pitch = cirrus_pitch(fb);
+ tmp = cirrus->pitch / 8;
+ wreg_crt(cirrus, VGA_CRTC_OFFSET, tmp);
+
+ /* Enable extended blanking and pitch bits, and enable full memory */
+ tmp = 0x22;
+ tmp |= (cirrus->pitch >> 7) & 0x10;
+ tmp |= (cirrus->pitch >> 6) & 0x40;
+ wreg_crt(cirrus, 0x1b, tmp);
+
+ /* Enable high-colour modes */
+ wreg_gfx(cirrus, VGA_GFX_MODE, 0x40);
+
+ /* And set graphics mode */
+ wreg_gfx(cirrus, VGA_GFX_MISC, 0x01);
+
+ wreg_hdr(cirrus, hdr);
+
+ cirrus_set_start_address(cirrus, 0);
+
+ /* Unblank (needed on S3 resume, vgabios doesn't do it then) */
+ outb(0x20, 0x3c0);
+ return 0;
+}
+
+static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
+ struct drm_rect *rect)
+{
+ struct cirrus_device *cirrus = fb->dev->dev_private;
+ void *vmap;
+
+ vmap = drm_gem_shmem_vmap(fb->obj[0]);
+ if (!vmap)
+ return -ENOMEM;
+
+ if (cirrus->cpp == fb->format->cpp[0])
+ drm_fb_memcpy_dstclip(cirrus->vram,
+ vmap, fb, rect);
+
+ else if (fb->format->cpp[0] == 4 && cirrus->cpp == 2)
+ drm_fb_xrgb8888_to_rgb565_dstclip(cirrus->vram,
+ cirrus->pitch,
+ vmap, fb, rect, false);
+
+ else if (fb->format->cpp[0] == 4 && cirrus->cpp == 3)
+ drm_fb_xrgb8888_to_rgb888_dstclip(cirrus->vram,
+ cirrus->pitch,
+ vmap, fb, rect);
+
+ else
+ WARN_ON_ONCE("cpp mismatch");
+
+ drm_gem_shmem_vunmap(fb->obj[0], vmap);
+ return 0;
+}
+
+static int cirrus_fb_blit_fullscreen(struct drm_framebuffer *fb)
+{
+ struct drm_rect fullscreen = {
+ .x1 = 0,
+ .x2 = fb->width,
+ .y1 = 0,
+ .y2 = fb->height,
+ };
+ return cirrus_fb_blit_rect(fb, &fullscreen);
+}
+
+static int cirrus_check_size(int width, int height,
+ struct drm_framebuffer *fb)
+{
+ int pitch = width * 2;
+
+ if (fb)
+ pitch = cirrus_pitch(fb);
+
+ if (pitch > CIRRUS_MAX_PITCH)
+ return -EINVAL;
+ if (pitch * height > CIRRUS_VRAM_SIZE)
+ return -EINVAL;
+ return 0;
+}
+
+/* ------------------------------------------------------------------ */
+/* cirrus connector */
+
+static int cirrus_conn_get_modes(struct drm_connector *conn)
+{
+ int count;
+
+ count = drm_add_modes_noedid(conn,
+ conn->dev->mode_config.max_width,
+ conn->dev->mode_config.max_height);
+ drm_set_preferred_mode(conn, 1024, 768);
+ return count;
+}
+
+static const struct drm_connector_helper_funcs cirrus_conn_helper_funcs = {
+ .get_modes = cirrus_conn_get_modes,
+};
+
+static const struct drm_connector_funcs cirrus_conn_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int cirrus_conn_init(struct cirrus_device *cirrus)
+{
+ drm_connector_helper_add(&cirrus->conn, &cirrus_conn_helper_funcs);
+ return drm_connector_init(&cirrus->dev, &cirrus->conn,
+ &cirrus_conn_funcs, DRM_MODE_CONNECTOR_VGA);
+
+}
+
+/* ------------------------------------------------------------------ */
+/* cirrus (simple) display pipe */
+
+static enum drm_mode_status cirrus_pipe_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ if (cirrus_check_size(mode->hdisplay, mode->vdisplay, NULL) < 0)
+ return MODE_BAD;
+ return MODE_OK;
+}
+
+static int cirrus_pipe_check(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state)
+{
+ struct drm_framebuffer *fb = plane_state->fb;
+
+ if (!fb)
+ return 0;
+ return cirrus_check_size(fb->width, fb->height, fb);
+}
+
+static void cirrus_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct cirrus_device *cirrus = pipe->crtc.dev->dev_private;
+
+ cirrus_mode_set(cirrus, &crtc_state->mode, plane_state->fb);
+ cirrus_fb_blit_fullscreen(plane_state->fb);
+}
+
+static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state)
+{
+ struct cirrus_device *cirrus = pipe->crtc.dev->dev_private;
+ struct drm_plane_state *state = pipe->plane.state;
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_rect rect;
+
+ if (pipe->plane.state->fb &&
+ cirrus->cpp != cirrus_cpp(pipe->plane.state->fb))
+ cirrus_mode_set(cirrus, &crtc->mode,
+ pipe->plane.state->fb);
+
+ if (drm_atomic_helper_damage_merged(old_state, state, &rect))
+ cirrus_fb_blit_rect(pipe->plane.state->fb, &rect);
+
+ if (crtc->state->event) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
+}
+
+static const struct drm_simple_display_pipe_funcs cirrus_pipe_funcs = {
+ .mode_valid = cirrus_pipe_mode_valid,
+ .check = cirrus_pipe_check,
+ .enable = cirrus_pipe_enable,
+ .update = cirrus_pipe_update,
+};
+
+static const uint32_t cirrus_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+};
+
+static const uint64_t cirrus_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static int cirrus_pipe_init(struct cirrus_device *cirrus)
+{
+ return drm_simple_display_pipe_init(&cirrus->dev,
+ &cirrus->pipe,
+ &cirrus_pipe_funcs,
+ cirrus_formats,
+ ARRAY_SIZE(cirrus_formats),
+ cirrus_modifiers,
+ &cirrus->conn);
+}
+
+/* ------------------------------------------------------------------ */
+/* cirrus framebuffers & mode config */
+
+static struct drm_framebuffer*
+cirrus_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ if (mode_cmd->pixel_format != DRM_FORMAT_RGB565 &&
+ mode_cmd->pixel_format != DRM_FORMAT_RGB888 &&
+ mode_cmd->pixel_format != DRM_FORMAT_XRGB8888)
+ return ERR_PTR(-EINVAL);
+ if (cirrus_check_size(mode_cmd->width, mode_cmd->height, NULL) < 0)
+ return ERR_PTR(-EINVAL);
+ return drm_gem_fb_create_with_dirty(dev, file_priv, mode_cmd);
+}
+
+static const struct drm_mode_config_funcs cirrus_mode_config_funcs = {
+ .fb_create = cirrus_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void cirrus_mode_config_init(struct cirrus_device *cirrus)
+{
+ struct drm_device *dev = &cirrus->dev;
+
+ drm_mode_config_init(dev);
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = CIRRUS_MAX_PITCH / 2;
+ dev->mode_config.max_height = 1024;
+ dev->mode_config.preferred_depth = 16;
+ dev->mode_config.prefer_shadow = 0;
+ dev->mode_config.funcs = &cirrus_mode_config_funcs;
+}
+
+/* ------------------------------------------------------------------ */
+
+DEFINE_DRM_GEM_SHMEM_FOPS(cirrus_fops);
+
+static struct drm_driver cirrus_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_PRIME,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+
+ .fops = &cirrus_fops,
+ DRM_GEM_SHMEM_DRIVER_OPS,
+};
+
+static int cirrus_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct drm_device *dev;
+ struct cirrus_device *cirrus;
+ int ret;
+
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "cirrusdrmfb");
+ if (ret)
+ return ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pci_request_regions(pdev, DRIVER_NAME);
+ if (ret)
+ return ret;
+
+ ret = -ENOMEM;
+ cirrus = kzalloc(sizeof(*cirrus), GFP_KERNEL);
+ if (cirrus == NULL)
+ goto err_pci_release;
+
+ dev = &cirrus->dev;
+ ret = drm_dev_init(dev, &cirrus_driver, &pdev->dev);
+ if (ret)
+ goto err_free_cirrus;
+ dev->dev_private = cirrus;
+
+ ret = -ENOMEM;
+ cirrus->vram = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (cirrus->vram == NULL)
+ goto err_dev_put;
+
+ cirrus->mmio = ioremap(pci_resource_start(pdev, 1),
+ pci_resource_len(pdev, 1));
+ if (cirrus->mmio == NULL)
+ goto err_unmap_vram;
+
+ cirrus_mode_config_init(cirrus);
+
+ ret = cirrus_conn_init(cirrus);
+ if (ret < 0)
+ goto err_cleanup;
+
+ ret = cirrus_pipe_init(cirrus);
+ if (ret < 0)
+ goto err_cleanup;
+
+ drm_mode_config_reset(dev);
+
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ goto err_cleanup;
+
+ drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
+ return 0;
+
+err_cleanup:
+ drm_mode_config_cleanup(dev);
+ iounmap(cirrus->mmio);
+err_unmap_vram:
+ iounmap(cirrus->vram);
+err_dev_put:
+ drm_dev_put(dev);
+err_free_cirrus:
+ kfree(cirrus);
+err_pci_release:
+ pci_release_regions(pdev);
+ return ret;
+}
+
+static void cirrus_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct cirrus_device *cirrus = dev->dev_private;
+
+ drm_dev_unregister(dev);
+ drm_mode_config_cleanup(dev);
+ iounmap(cirrus->mmio);
+ iounmap(cirrus->vram);
+ drm_dev_put(dev);
+ kfree(cirrus);
+ pci_release_regions(pdev);
+}
+
+static const struct pci_device_id pciidlist[] = {
+ {
+ .vendor = PCI_VENDOR_ID_CIRRUS,
+ .device = PCI_DEVICE_ID_CIRRUS_5446,
+ /* only bind to the cirrus chip in qemu */
+ .subvendor = PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
+ .subdevice = PCI_SUBDEVICE_ID_QEMU,
+ }, {
+ .vendor = PCI_VENDOR_ID_CIRRUS,
+ .device = PCI_DEVICE_ID_CIRRUS_5446,
+ .subvendor = PCI_VENDOR_ID_XEN,
+ .subdevice = 0x0001,
+ },
+ { /* end if list */ }
+};
+
+static struct pci_driver cirrus_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = cirrus_pci_probe,
+ .remove = cirrus_pci_remove,
+};
+
+static int __init cirrus_init(void)
+{
+ if (vgacon_text_force())
+ return -EINVAL;
+ return pci_register_driver(&cirrus_pci_driver);
+}
+
+static void __exit cirrus_exit(void)
+{
+ pci_unregister_driver(&cirrus_pci_driver);
+}
+
+module_init(cirrus_init);
+module_exit(cirrus_exit);
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
deleted file mode 100644
index 8ec880f3a322..000000000000
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright 2012 Red Hat <mjg@redhat.com>
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License version 2. See the file COPYING in the main
- * directory of this archive for more details.
- *
- * Authors: Matthew Garrett
- * Dave Airlie
- */
-#include <linux/module.h>
-#include <linux/console.h>
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_probe_helper.h>
-
-#include "cirrus_drv.h"
-
-int cirrus_modeset = -1;
-int cirrus_bpp = 16;
-
-MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
-module_param_named(modeset, cirrus_modeset, int, 0400);
-MODULE_PARM_DESC(bpp, "Max bits-per-pixel (default:16)");
-module_param_named(bpp, cirrus_bpp, int, 0400);
-
-/*
- * This is the generic driver code. This binds the driver to the drm core,
- * which then performs further device association and calls our graphics init
- * functions
- */
-
-static struct drm_driver driver;
-
-/* only bind to the cirrus chip in qemu */
-static const struct pci_device_id pciidlist[] = {
- { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446,
- PCI_SUBVENDOR_ID_REDHAT_QUMRANET, PCI_SUBDEVICE_ID_QEMU,
- 0, 0, 0 },
- { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, PCI_VENDOR_ID_XEN,
- 0x0001, 0, 0, 0 },
- {0,}
-};
-
-
-static int cirrus_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int ret;
-
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "cirrusdrmfb");
- if (ret)
- return ret;
-
- return drm_get_pci_dev(pdev, ent, &driver);
-}
-
-static void cirrus_pci_remove(struct pci_dev *pdev)
-{
- struct drm_device *dev = pci_get_drvdata(pdev);
-
- drm_put_dev(dev);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int cirrus_pm_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct cirrus_device *cdev = drm_dev->dev_private;
-
- drm_kms_helper_poll_disable(drm_dev);
-
- if (cdev->mode_info.gfbdev) {
- console_lock();
- drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 1);
- console_unlock();
- }
-
- return 0;
-}
-
-static int cirrus_pm_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct cirrus_device *cdev = drm_dev->dev_private;
-
- drm_helper_resume_force_mode(drm_dev);
-
- if (cdev->mode_info.gfbdev) {
- console_lock();
- drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 0);
- console_unlock();
- }
-
- drm_kms_helper_poll_enable(drm_dev);
- return 0;
-}
-#endif
-
-static const struct file_operations cirrus_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = cirrus_mmap,
- .poll = drm_poll,
- .compat_ioctl = drm_compat_ioctl,
-};
-static struct drm_driver driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM,
- .load = cirrus_driver_load,
- .unload = cirrus_driver_unload,
- .fops = &cirrus_driver_fops,
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
- .gem_free_object_unlocked = cirrus_gem_free_object,
- .dumb_create = cirrus_dumb_create,
- .dumb_map_offset = cirrus_dumb_mmap_offset,
-};
-
-static const struct dev_pm_ops cirrus_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(cirrus_pm_suspend,
- cirrus_pm_resume)
-};
-
-static struct pci_driver cirrus_pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = cirrus_pci_probe,
- .remove = cirrus_pci_remove,
- .driver.pm = &cirrus_pm_ops,
-};
-
-static int __init cirrus_init(void)
-{
- if (vgacon_text_force() && cirrus_modeset == -1)
- return -EINVAL;
-
- if (cirrus_modeset == 0)
- return -EINVAL;
- return pci_register_driver(&cirrus_pci_driver);
-}
-
-static void __exit cirrus_exit(void)
-{
- pci_unregister_driver(&cirrus_pci_driver);
-}
-
-module_init(cirrus_init);
-module_exit(cirrus_exit);
-
-MODULE_DEVICE_TABLE(pci, pciidlist);
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index f2b2e0d169fa..1bd816be3aae 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -101,7 +101,6 @@ struct cirrus_crtc {
struct cirrus_fbdev;
struct cirrus_mode_info {
- bool mode_config_initialized;
struct cirrus_crtc *crtc;
/* pointer to fbdev info structure */
struct cirrus_fbdev *gfbdev;
@@ -143,7 +142,7 @@ struct cirrus_device {
struct cirrus_fbdev {
- struct drm_fb_helper helper;
+ struct drm_fb_helper helper; /* must be first */
struct drm_framebuffer *gfb;
void *sysram;
int size;
@@ -169,7 +168,6 @@ cirrus_bo(struct ttm_buffer_object *bo)
#define to_cirrus_obj(x) container_of(x, struct cirrus_gem_object, base)
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
/* cirrus_main.c */
int cirrus_device_init(struct cirrus_device *cdev,
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
deleted file mode 100644
index 39df62acac69..000000000000
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ /dev/null
@@ -1,315 +0,0 @@
-/*
- * Copyright 2012 Red Hat
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License version 2. See the file COPYING in the main
- * directory of this archive for more details.
- *
- * Authors: Matthew Garrett
- * Dave Airlie
- */
-#include <linux/module.h>
-#include <drm/drmP.h>
-#include <drm/drm_util.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "cirrus_drv.h"
-
-static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
- int x, int y, int width, int height)
-{
- int i;
- struct drm_gem_object *obj;
- struct cirrus_bo *bo;
- int src_offset, dst_offset;
- int bpp = afbdev->gfb->format->cpp[0];
- int ret = -EBUSY;
- bool unmap = false;
- bool store_for_later = false;
- int x2, y2;
- unsigned long flags;
-
- obj = afbdev->gfb->obj[0];
- bo = gem_to_cirrus_bo(obj);
-
- /*
- * try and reserve the BO, if we fail with busy
- * then the BO is being moved and we should
- * store up the damage until later.
- */
- if (drm_can_sleep())
- ret = cirrus_bo_reserve(bo, true);
- if (ret) {
- if (ret != -EBUSY)
- return;
- store_for_later = true;
- }
-
- x2 = x + width - 1;
- y2 = y + height - 1;
- spin_lock_irqsave(&afbdev->dirty_lock, flags);
-
- if (afbdev->y1 < y)
- y = afbdev->y1;
- if (afbdev->y2 > y2)
- y2 = afbdev->y2;
- if (afbdev->x1 < x)
- x = afbdev->x1;
- if (afbdev->x2 > x2)
- x2 = afbdev->x2;
-
- if (store_for_later) {
- afbdev->x1 = x;
- afbdev->x2 = x2;
- afbdev->y1 = y;
- afbdev->y2 = y2;
- spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
- return;
- }
-
- afbdev->x1 = afbdev->y1 = INT_MAX;
- afbdev->x2 = afbdev->y2 = 0;
- spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
-
- if (!bo->kmap.virtual) {
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
- if (ret) {
- DRM_ERROR("failed to kmap fb updates\n");
- cirrus_bo_unreserve(bo);
- return;
- }
- unmap = true;
- }
- for (i = y; i < y + height; i++) {
- /* assume equal stride for now */
- src_offset = dst_offset = i * afbdev->gfb->pitches[0] + (x * bpp);
- memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
-
- }
- if (unmap)
- ttm_bo_kunmap(&bo->kmap);
-
- cirrus_bo_unreserve(bo);
-}
-
-static void cirrus_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
- struct cirrus_fbdev *afbdev = info->par;
- drm_fb_helper_sys_fillrect(info, rect);
- cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
- rect->height);
-}
-
-static void cirrus_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
- struct cirrus_fbdev *afbdev = info->par;
- drm_fb_helper_sys_copyarea(info, area);
- cirrus_dirty_update(afbdev, area->dx, area->dy, area->width,
- area->height);
-}
-
-static void cirrus_imageblit(struct fb_info *info,
- const struct fb_image *image)
-{
- struct cirrus_fbdev *afbdev = info->par;
- drm_fb_helper_sys_imageblit(info, image);
- cirrus_dirty_update(afbdev, image->dx, image->dy, image->width,
- image->height);
-}
-
-
-static struct fb_ops cirrusfb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = cirrus_fillrect,
- .fb_copyarea = cirrus_copyarea,
- .fb_imageblit = cirrus_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
-};
-
-static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object **gobj_p)
-{
- struct drm_device *dev = afbdev->helper.dev;
- struct cirrus_device *cdev = dev->dev_private;
- u32 bpp;
- u32 size;
- struct drm_gem_object *gobj;
- int ret = 0;
-
- bpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0) * 8;
-
- if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
- bpp, mode_cmd->pitches[0]))
- return -EINVAL;
-
- size = mode_cmd->pitches[0] * mode_cmd->height;
- ret = cirrus_gem_create(dev, size, true, &gobj);
- if (ret)
- return ret;
-
- *gobj_p = gobj;
- return ret;
-}
-
-static int cirrusfb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct cirrus_fbdev *gfbdev =
- container_of(helper, struct cirrus_fbdev, helper);
- struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
- struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_mode_fb_cmd2 mode_cmd;
- void *sysram;
- struct drm_gem_object *gobj = NULL;
- int size, ret;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
- size = mode_cmd.pitches[0] * mode_cmd.height;
-
- ret = cirrusfb_create_object(gfbdev, &mode_cmd, &gobj);
- if (ret) {
- DRM_ERROR("failed to create fbcon backing object %d\n", ret);
- return ret;
- }
-
- sysram = vmalloc(size);
- if (!sysram)
- return -ENOMEM;
-
- info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto err_vfree;
- }
-
- info->par = gfbdev;
-
- fb = kzalloc(sizeof(*fb), GFP_KERNEL);
- if (!fb) {
- ret = -ENOMEM;
- goto err_drm_gem_object_put_unlocked;
- }
-
- ret = cirrus_framebuffer_init(cdev->dev, fb, &mode_cmd, gobj);
- if (ret)
- goto err_kfree;
-
- gfbdev->sysram = sysram;
- gfbdev->size = size;
- gfbdev->gfb = fb;
-
- /* setup helper */
- gfbdev->helper.fb = fb;
-
- strcpy(info->fix.id, "cirrusdrmfb");
-
- info->fbops = &cirrusfb_ops;
-
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(info, &gfbdev->helper, sizes->fb_width,
- sizes->fb_height);
-
- /* setup aperture base/size for vesafb takeover */
- info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
- info->apertures->ranges[0].size = cdev->mc.vram_size;
-
- info->fix.smem_start = cdev->dev->mode_config.fb_base;
- info->fix.smem_len = cdev->mc.vram_size;
-
- info->screen_base = sysram;
- info->screen_size = size;
-
- info->fix.mmio_start = 0;
- info->fix.mmio_len = 0;
-
- DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
- DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
- DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
- DRM_INFO("fb depth is %d\n", fb->format->depth);
- DRM_INFO(" pitch is %d\n", fb->pitches[0]);
-
- return 0;
-
-err_kfree:
- kfree(fb);
-err_drm_gem_object_put_unlocked:
- drm_gem_object_put_unlocked(gobj);
-err_vfree:
- vfree(sysram);
- return ret;
-}
-
-static int cirrus_fbdev_destroy(struct drm_device *dev,
- struct cirrus_fbdev *gfbdev)
-{
- struct drm_framebuffer *gfb = gfbdev->gfb;
-
- drm_helper_force_disable_all(dev);
-
- drm_fb_helper_unregister_fbi(&gfbdev->helper);
-
- vfree(gfbdev->sysram);
- drm_fb_helper_fini(&gfbdev->helper);
- if (gfb)
- drm_framebuffer_put(gfb);
-
- return 0;
-}
-
-static const struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
- .fb_probe = cirrusfb_create,
-};
-
-int cirrus_fbdev_init(struct cirrus_device *cdev)
-{
- struct cirrus_fbdev *gfbdev;
- int ret;
-
- /*bpp_sel = 8;*/
- gfbdev = kzalloc(sizeof(struct cirrus_fbdev), GFP_KERNEL);
- if (!gfbdev)
- return -ENOMEM;
-
- cdev->mode_info.gfbdev = gfbdev;
- spin_lock_init(&gfbdev->dirty_lock);
-
- drm_fb_helper_prepare(cdev->dev, &gfbdev->helper,
- &cirrus_fb_helper_funcs);
-
- ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
- CIRRUSFB_CONN_LIMIT);
- if (ret)
- return ret;
-
- ret = drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
- if (ret)
- return ret;
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(cdev->dev);
-
- return drm_fb_helper_initial_config(&gfbdev->helper, cirrus_bpp);
-}
-
-void cirrus_fbdev_fini(struct cirrus_device *cdev)
-{
- if (!cdev->mode_info.gfbdev)
- return;
-
- cirrus_fbdev_destroy(cdev->dev, cdev->mode_info.gfbdev);
- kfree(cdev->mode_info.gfbdev);
- cdev->mode_info.gfbdev = NULL;
-}
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
deleted file mode 100644
index 57f8fe6d020b..000000000000
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * Copyright 2012 Red Hat
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License version 2. See the file COPYING in the main
- * directory of this archive for more details.
- *
- * Authors: Matthew Garrett
- * Dave Airlie
- */
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-
-#include "cirrus_drv.h"
-
-static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
- .create_handle = drm_gem_fb_create_handle,
- .destroy = drm_gem_fb_destroy,
-};
-
-int cirrus_framebuffer_init(struct drm_device *dev,
- struct drm_framebuffer *gfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
-{
- int ret;
-
- drm_helper_mode_fill_fb_struct(dev, gfb, mode_cmd);
- gfb->obj[0] = obj;
- ret = drm_framebuffer_init(dev, gfb, &cirrus_fb_funcs);
- if (ret) {
- DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
- return ret;
- }
- return 0;
-}
-
-static struct drm_framebuffer *
-cirrus_user_framebuffer_create(struct drm_device *dev,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct cirrus_device *cdev = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_framebuffer *fb;
- u32 bpp;
- int ret;
-
- bpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0) * 8;
-
- if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
- bpp, mode_cmd->pitches[0]))
- return ERR_PTR(-EINVAL);
-
- obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
- if (obj == NULL)
- return ERR_PTR(-ENOENT);
-
- fb = kzalloc(sizeof(*fb), GFP_KERNEL);
- if (!fb) {
- drm_gem_object_put_unlocked(obj);
- return ERR_PTR(-ENOMEM);
- }
-
- ret = cirrus_framebuffer_init(dev, fb, mode_cmd, obj);
- if (ret) {
- drm_gem_object_put_unlocked(obj);
- kfree(fb);
- return ERR_PTR(ret);
- }
- return fb;
-}
-
-static const struct drm_mode_config_funcs cirrus_mode_funcs = {
- .fb_create = cirrus_user_framebuffer_create,
-};
-
-/* Unmap the framebuffer from the core and release the memory */
-static void cirrus_vram_fini(struct cirrus_device *cdev)
-{
- iounmap(cdev->rmmio);
- cdev->rmmio = NULL;
- if (cdev->mc.vram_base)
- release_mem_region(cdev->mc.vram_base, cdev->mc.vram_size);
-}
-
-/* Map the framebuffer from the card and configure the core */
-static int cirrus_vram_init(struct cirrus_device *cdev)
-{
- /* BAR 0 is VRAM */
- cdev->mc.vram_base = pci_resource_start(cdev->dev->pdev, 0);
- cdev->mc.vram_size = pci_resource_len(cdev->dev->pdev, 0);
-
- if (!request_mem_region(cdev->mc.vram_base, cdev->mc.vram_size,
- "cirrusdrmfb_vram")) {
- DRM_ERROR("can't reserve VRAM\n");
- return -ENXIO;
- }
-
- return 0;
-}
-
-/*
- * Our emulated hardware has two sets of memory. One is video RAM and can
- * simply be used as a linear framebuffer - the other provides mmio access
- * to the display registers. The latter can also be accessed via IO port
- * access, but we map the range and use mmio to program them instead
- */
-
-int cirrus_device_init(struct cirrus_device *cdev,
- struct drm_device *ddev,
- struct pci_dev *pdev, uint32_t flags)
-{
- int ret;
-
- cdev->dev = ddev;
- cdev->flags = flags;
-
- /* Hardcode the number of CRTCs to 1 */
- cdev->num_crtc = 1;
-
- /* BAR 0 is the framebuffer, BAR 1 contains registers */
- cdev->rmmio_base = pci_resource_start(cdev->dev->pdev, 1);
- cdev->rmmio_size = pci_resource_len(cdev->dev->pdev, 1);
-
- if (!request_mem_region(cdev->rmmio_base, cdev->rmmio_size,
- "cirrusdrmfb_mmio")) {
- DRM_ERROR("can't reserve mmio registers\n");
- return -ENOMEM;
- }
-
- cdev->rmmio = ioremap(cdev->rmmio_base, cdev->rmmio_size);
-
- if (cdev->rmmio == NULL)
- return -ENOMEM;
-
- ret = cirrus_vram_init(cdev);
- if (ret) {
- release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
- return ret;
- }
-
- return 0;
-}
-
-void cirrus_device_fini(struct cirrus_device *cdev)
-{
- release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
- cirrus_vram_fini(cdev);
-}
-
-/*
- * Functions here will be called by the core once it's bound the driver to
- * a PCI device
- */
-
-int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
-{
- struct cirrus_device *cdev;
- int r;
-
- cdev = kzalloc(sizeof(struct cirrus_device), GFP_KERNEL);
- if (cdev == NULL)
- return -ENOMEM;
- dev->dev_private = (void *)cdev;
-
- r = cirrus_device_init(cdev, dev, dev->pdev, flags);
- if (r) {
- dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
- goto out;
- }
-
- r = cirrus_mm_init(cdev);
- if (r) {
- dev_err(&dev->pdev->dev, "fatal err on mm init\n");
- goto out;
- }
-
- /*
- * cirrus_modeset_init() is initializing/registering the emulated fbdev
- * and DRM internals can access/test some of the fields in
- * mode_config->funcs as part of the fbdev registration process.
- * Make sure dev->mode_config.funcs is properly set to avoid
- * dereferencing a NULL pointer.
- * FIXME: mode_config.funcs assignment should probably be done in
- * cirrus_modeset_init() (that's a common pattern seen in other DRM
- * drivers).
- */
- dev->mode_config.funcs = &cirrus_mode_funcs;
- r = cirrus_modeset_init(cdev);
- if (r) {
- dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
- goto out;
- }
-
- return 0;
-out:
- cirrus_driver_unload(dev);
- return r;
-}
-
-void cirrus_driver_unload(struct drm_device *dev)
-{
- struct cirrus_device *cdev = dev->dev_private;
-
- if (cdev == NULL)
- return;
- cirrus_modeset_fini(cdev);
- cirrus_mm_fini(cdev);
- cirrus_device_fini(cdev);
- kfree(cdev);
- dev->dev_private = NULL;
-}
-
-int cirrus_gem_create(struct drm_device *dev,
- u32 size, bool iskernel,
- struct drm_gem_object **obj)
-{
- struct cirrus_bo *cirrusbo;
- int ret;
-
- *obj = NULL;
-
- size = roundup(size, PAGE_SIZE);
- if (size == 0)
- return -EINVAL;
-
- ret = cirrus_bo_create(dev, size, 0, 0, &cirrusbo);
- if (ret) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("failed to allocate GEM object\n");
- return ret;
- }
- *obj = &cirrusbo->gem;
- return 0;
-}
-
-int cirrus_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- int ret;
- struct drm_gem_object *gobj;
- u32 handle;
-
- args->pitch = args->width * ((args->bpp + 7) / 8);
- args->size = args->pitch * args->height;
-
- ret = cirrus_gem_create(dev, args->size, false,
- &gobj);
- if (ret)
- return ret;
-
- ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_put_unlocked(gobj);
- if (ret)
- return ret;
-
- args->handle = handle;
- return 0;
-}
-
-static void cirrus_bo_unref(struct cirrus_bo **bo)
-{
- struct ttm_buffer_object *tbo;
-
- if ((*bo) == NULL)
- return;
-
- tbo = &((*bo)->bo);
- ttm_bo_put(tbo);
- *bo = NULL;
-}
-
-void cirrus_gem_free_object(struct drm_gem_object *obj)
-{
- struct cirrus_bo *cirrus_bo = gem_to_cirrus_bo(obj);
-
- cirrus_bo_unref(&cirrus_bo);
-}
-
-
-static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
-{
- return drm_vma_node_offset_addr(&bo->bo.vma_node);
-}
-
-int
-cirrus_dumb_mmap_offset(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle,
- uint64_t *offset)
-{
- struct drm_gem_object *obj;
- struct cirrus_bo *bo;
-
- obj = drm_gem_object_lookup(file, handle);
- if (obj == NULL)
- return -ENOENT;
-
- bo = gem_to_cirrus_bo(obj);
- *offset = cirrus_bo_mmap_offset(bo);
-
- drm_gem_object_put_unlocked(obj);
-
- return 0;
-}
-
-bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
- int bpp, int pitch)
-{
- const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */
- const int max_size = cdev->mc.vram_size;
-
- if (bpp > cirrus_bpp)
- return false;
- if (bpp > 32)
- return false;
-
- if (pitch > max_pitch)
- return false;
-
- if (pitch * height > max_size)
- return false;
-
- return true;
-}
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
deleted file mode 100644
index 7f9bc32af685..000000000000
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ /dev/null
@@ -1,621 +0,0 @@
-
-/*
- * Copyright 2012 Red Hat
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License version 2. See the file COPYING in the main
- * directory of this archive for more details.
- *
- * Authors: Matthew Garrett
- * Dave Airlie
- *
- * Portions of this code derived from cirrusfb.c:
- * drivers/video/cirrusfb.c - driver for Cirrus Logic chipsets
- *
- * Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com>
- */
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_plane_helper.h>
-#include <drm/drm_probe_helper.h>
-
-#include <video/cirrus.h>
-
-#include "cirrus_drv.h"
-
-#define CIRRUS_LUT_SIZE 256
-
-#define PALETTE_INDEX 0x8
-#define PALETTE_DATA 0x9
-
-/*
- * This file contains setup code for the CRTC.
- */
-
-/*
- * The DRM core requires DPMS functions, but they make little sense in our
- * case and so are just stubs
- */
-
-static void cirrus_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct drm_device *dev = crtc->dev;
- struct cirrus_device *cdev = dev->dev_private;
- u8 sr01, gr0e;
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- sr01 = 0x00;
- gr0e = 0x00;
- break;
- case DRM_MODE_DPMS_STANDBY:
- sr01 = 0x20;
- gr0e = 0x02;
- break;
- case DRM_MODE_DPMS_SUSPEND:
- sr01 = 0x20;
- gr0e = 0x04;
- break;
- case DRM_MODE_DPMS_OFF:
- sr01 = 0x20;
- gr0e = 0x06;
- break;
- default:
- return;
- }
-
- WREG8(SEQ_INDEX, 0x1);
- sr01 |= RREG8(SEQ_DATA) & ~0x20;
- WREG_SEQ(0x1, sr01);
-
- WREG8(GFX_INDEX, 0xe);
- gr0e |= RREG8(GFX_DATA) & ~0x06;
- WREG_GFX(0xe, gr0e);
-}
-
-static void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset)
-{
- struct cirrus_device *cdev = crtc->dev->dev_private;
- u32 addr;
- u8 tmp;
-
- addr = offset >> 2;
- WREG_CRT(0x0c, (u8)((addr >> 8) & 0xff));
- WREG_CRT(0x0d, (u8)(addr & 0xff));
-
- WREG8(CRT_INDEX, 0x1b);
- tmp = RREG8(CRT_DATA);
- tmp &= 0xf2;
- tmp |= (addr >> 16) & 0x01;
- tmp |= (addr >> 15) & 0x0c;
- WREG_CRT(0x1b, tmp);
- WREG8(CRT_INDEX, 0x1d);
- tmp = RREG8(CRT_DATA);
- tmp &= 0x7f;
- tmp |= (addr >> 12) & 0x80;
- WREG_CRT(0x1d, tmp);
-}
-
-/* cirrus is different - we will force move buffers out of VRAM */
-static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, int atomic)
-{
- struct cirrus_device *cdev = crtc->dev->dev_private;
- struct cirrus_bo *bo;
- int ret;
- u64 gpu_addr;
-
- /* push the previous fb to system ram */
- if (!atomic && fb) {
- bo = gem_to_cirrus_bo(fb->obj[0]);
- ret = cirrus_bo_reserve(bo, false);
- if (ret)
- return ret;
- cirrus_bo_push_sysram(bo);
- cirrus_bo_unreserve(bo);
- }
-
- bo = gem_to_cirrus_bo(crtc->primary->fb->obj[0]);
-
- ret = cirrus_bo_reserve(bo, false);
- if (ret)
- return ret;
-
- ret = cirrus_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
- if (ret) {
- cirrus_bo_unreserve(bo);
- return ret;
- }
-
- if (cdev->mode_info.gfbdev->gfb == crtc->primary->fb) {
- /* if pushing console in kmap it */
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
- if (ret)
- DRM_ERROR("failed to kmap fbcon\n");
- }
- cirrus_bo_unreserve(bo);
-
- cirrus_set_start_address(crtc, (u32)gpu_addr);
- return 0;
-}
-
-static int cirrus_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- return cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
-}
-
-/*
- * The meat of this driver. The core passes us a mode and we have to program
- * it. The modesetting here is the bare minimum required to satisfy the qemu
- * emulation of this hardware, and running this against a real device is
- * likely to result in an inadequately programmed mode. We've already had
- * the opportunity to modify the mode, so whatever we receive here should
- * be something that can be correctly programmed and displayed
- */
-static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y, struct drm_framebuffer *old_fb)
-{
- struct drm_device *dev = crtc->dev;
- struct cirrus_device *cdev = dev->dev_private;
- const struct drm_framebuffer *fb = crtc->primary->fb;
- int hsyncstart, hsyncend, htotal, hdispend;
- int vtotal, vdispend;
- int tmp;
- int sr07 = 0, hdr = 0;
-
- htotal = mode->htotal / 8;
- hsyncend = mode->hsync_end / 8;
- hsyncstart = mode->hsync_start / 8;
- hdispend = mode->hdisplay / 8;
-
- vtotal = mode->vtotal;
- vdispend = mode->vdisplay;
-
- vdispend -= 1;
- vtotal -= 2;
-
- htotal -= 5;
- hdispend -= 1;
- hsyncstart += 1;
- hsyncend += 1;
-
- WREG_CRT(VGA_CRTC_V_SYNC_END, 0x20);
- WREG_CRT(VGA_CRTC_H_TOTAL, htotal);
- WREG_CRT(VGA_CRTC_H_DISP, hdispend);
- WREG_CRT(VGA_CRTC_H_SYNC_START, hsyncstart);
- WREG_CRT(VGA_CRTC_H_SYNC_END, hsyncend);
- WREG_CRT(VGA_CRTC_V_TOTAL, vtotal & 0xff);
- WREG_CRT(VGA_CRTC_V_DISP_END, vdispend & 0xff);
-
- tmp = 0x40;
- if ((vdispend + 1) & 512)
- tmp |= 0x20;
- WREG_CRT(VGA_CRTC_MAX_SCAN, tmp);
-
- /*
- * Overflow bits for values that don't fit in the standard registers
- */
- tmp = 16;
- if (vtotal & 256)
- tmp |= 1;
- if (vdispend & 256)
- tmp |= 2;
- if ((vdispend + 1) & 256)
- tmp |= 8;
- if (vtotal & 512)
- tmp |= 32;
- if (vdispend & 512)
- tmp |= 64;
- WREG_CRT(VGA_CRTC_OVERFLOW, tmp);
-
- tmp = 0;
-
- /* More overflow bits */
-
- if ((htotal + 5) & 64)
- tmp |= 16;
- if ((htotal + 5) & 128)
- tmp |= 32;
- if (vtotal & 256)
- tmp |= 64;
- if (vtotal & 512)
- tmp |= 128;
-
- WREG_CRT(CL_CRT1A, tmp);
-
- /* Disable Hercules/CGA compatibility */
- WREG_CRT(VGA_CRTC_MODE, 0x03);
-
- WREG8(SEQ_INDEX, 0x7);
- sr07 = RREG8(SEQ_DATA);
- sr07 &= 0xe0;
- hdr = 0;
- switch (fb->format->cpp[0] * 8) {
- case 8:
- sr07 |= 0x11;
- break;
- case 16:
- sr07 |= 0x17;
- hdr = 0xc1;
- break;
- case 24:
- sr07 |= 0x15;
- hdr = 0xc5;
- break;
- case 32:
- sr07 |= 0x19;
- hdr = 0xc5;
- break;
- default:
- return -1;
- }
-
- WREG_SEQ(0x7, sr07);
-
- /* Program the pitch */
- tmp = fb->pitches[0] / 8;
- WREG_CRT(VGA_CRTC_OFFSET, tmp);
-
- /* Enable extended blanking and pitch bits, and enable full memory */
- tmp = 0x22;
- tmp |= (fb->pitches[0] >> 7) & 0x10;
- tmp |= (fb->pitches[0] >> 6) & 0x40;
- WREG_CRT(0x1b, tmp);
-
- /* Enable high-colour modes */
- WREG_GFX(VGA_GFX_MODE, 0x40);
-
- /* And set graphics mode */
- WREG_GFX(VGA_GFX_MISC, 0x01);
-
- WREG_HDR(hdr);
- cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
-
- /* Unblank (needed on S3 resume, vgabios doesn't do it then) */
- outb(0x20, 0x3c0);
- return 0;
-}
-
-/*
- * This is called before a mode is programmed. A typical use might be to
- * enable DPMS during the programming to avoid seeing intermediate stages,
- * but that's not relevant to us
- */
-static void cirrus_crtc_prepare(struct drm_crtc *crtc)
-{
-}
-
-static void cirrus_crtc_load_lut(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct cirrus_device *cdev = dev->dev_private;
- u16 *r, *g, *b;
- int i;
-
- if (!crtc->enabled)
- return;
-
- r = crtc->gamma_store;
- g = r + crtc->gamma_size;
- b = g + crtc->gamma_size;
-
- for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
- /* VGA registers */
- WREG8(PALETTE_INDEX, i);
- WREG8(PALETTE_DATA, *r++ >> 8);
- WREG8(PALETTE_DATA, *g++ >> 8);
- WREG8(PALETTE_DATA, *b++ >> 8);
- }
-}
-
-/*
- * This is called after a mode is programmed. It should reverse anything done
- * by the prepare function
- */
-static void cirrus_crtc_commit(struct drm_crtc *crtc)
-{
- cirrus_crtc_load_lut(crtc);
-}
-
-/*
- * The core can pass us a set of gamma values to program. We actually only
- * use this for 8-bit mode so can't perform smooth fades on deeper modes,
- * but it's a requirement that we provide the function
- */
-static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size,
- struct drm_modeset_acquire_ctx *ctx)
-{
- cirrus_crtc_load_lut(crtc);
-
- return 0;
-}
-
-/* Simple cleanup function */
-static void cirrus_crtc_destroy(struct drm_crtc *crtc)
-{
- struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
-
- drm_crtc_cleanup(crtc);
- kfree(cirrus_crtc);
-}
-
-/* These provide the minimum set of functions required to handle a CRTC */
-static const struct drm_crtc_funcs cirrus_crtc_funcs = {
- .gamma_set = cirrus_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
- .destroy = cirrus_crtc_destroy,
-};
-
-static const struct drm_crtc_helper_funcs cirrus_helper_funcs = {
- .dpms = cirrus_crtc_dpms,
- .mode_set = cirrus_crtc_mode_set,
- .mode_set_base = cirrus_crtc_mode_set_base,
- .prepare = cirrus_crtc_prepare,
- .commit = cirrus_crtc_commit,
-};
-
-/* CRTC setup */
-static const uint32_t cirrus_formats_16[] = {
- DRM_FORMAT_RGB565,
-};
-
-static const uint32_t cirrus_formats_24[] = {
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB565,
-};
-
-static const uint32_t cirrus_formats_32[] = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB565,
-};
-
-static struct drm_plane *cirrus_primary_plane(struct drm_device *dev)
-{
- const uint32_t *formats;
- uint32_t nformats;
- struct drm_plane *primary;
- int ret;
-
- switch (cirrus_bpp) {
- case 16:
- formats = cirrus_formats_16;
- nformats = ARRAY_SIZE(cirrus_formats_16);
- break;
- case 24:
- formats = cirrus_formats_24;
- nformats = ARRAY_SIZE(cirrus_formats_24);
- break;
- case 32:
- formats = cirrus_formats_32;
- nformats = ARRAY_SIZE(cirrus_formats_32);
- break;
- default:
- return NULL;
- }
-
- primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (primary == NULL) {
- DRM_DEBUG_KMS("Failed to allocate primary plane\n");
- return NULL;
- }
-
- ret = drm_universal_plane_init(dev, primary, 0,
- &drm_primary_helper_funcs,
- formats, nformats,
- NULL,
- DRM_PLANE_TYPE_PRIMARY, NULL);
- if (ret) {
- kfree(primary);
- primary = NULL;
- }
-
- return primary;
-}
-
-static void cirrus_crtc_init(struct drm_device *dev)
-{
- struct cirrus_device *cdev = dev->dev_private;
- struct cirrus_crtc *cirrus_crtc;
- struct drm_plane *primary;
-
- cirrus_crtc = kzalloc(sizeof(struct cirrus_crtc) +
- (CIRRUSFB_CONN_LIMIT * sizeof(struct drm_connector *)),
- GFP_KERNEL);
-
- if (cirrus_crtc == NULL)
- return;
-
- primary = cirrus_primary_plane(dev);
- if (primary == NULL) {
- kfree(cirrus_crtc);
- return;
- }
-
- drm_crtc_init_with_planes(dev, &cirrus_crtc->base,
- primary, NULL,
- &cirrus_crtc_funcs, NULL);
-
- drm_mode_crtc_set_gamma_size(&cirrus_crtc->base, CIRRUS_LUT_SIZE);
- cdev->mode_info.crtc = cirrus_crtc;
-
- drm_crtc_helper_add(&cirrus_crtc->base, &cirrus_helper_funcs);
-}
-
-static void cirrus_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-}
-
-static void cirrus_encoder_dpms(struct drm_encoder *encoder, int state)
-{
- return;
-}
-
-static void cirrus_encoder_prepare(struct drm_encoder *encoder)
-{
-}
-
-static void cirrus_encoder_commit(struct drm_encoder *encoder)
-{
-}
-
-static void cirrus_encoder_destroy(struct drm_encoder *encoder)
-{
- struct cirrus_encoder *cirrus_encoder = to_cirrus_encoder(encoder);
- drm_encoder_cleanup(encoder);
- kfree(cirrus_encoder);
-}
-
-static const struct drm_encoder_helper_funcs cirrus_encoder_helper_funcs = {
- .dpms = cirrus_encoder_dpms,
- .mode_set = cirrus_encoder_mode_set,
- .prepare = cirrus_encoder_prepare,
- .commit = cirrus_encoder_commit,
-};
-
-static const struct drm_encoder_funcs cirrus_encoder_encoder_funcs = {
- .destroy = cirrus_encoder_destroy,
-};
-
-static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
-{
- struct drm_encoder *encoder;
- struct cirrus_encoder *cirrus_encoder;
-
- cirrus_encoder = kzalloc(sizeof(struct cirrus_encoder), GFP_KERNEL);
- if (!cirrus_encoder)
- return NULL;
-
- encoder = &cirrus_encoder->base;
- encoder->possible_crtcs = 0x1;
-
- drm_encoder_init(dev, encoder, &cirrus_encoder_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- drm_encoder_helper_add(encoder, &cirrus_encoder_helper_funcs);
-
- return encoder;
-}
-
-
-static int cirrus_vga_get_modes(struct drm_connector *connector)
-{
- int count;
-
- /* Just add a static list of modes */
- if (cirrus_bpp <= 24) {
- count = drm_add_modes_noedid(connector, 1280, 1024);
- drm_set_preferred_mode(connector, 1024, 768);
- } else {
- count = drm_add_modes_noedid(connector, 800, 600);
- drm_set_preferred_mode(connector, 800, 600);
- }
- return count;
-}
-
-static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
- *connector)
-{
- int enc_id = connector->encoder_ids[0];
- /* pick the encoder ids */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
- return NULL;
-}
-
-static void cirrus_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_cleanup(connector);
- kfree(connector);
-}
-
-static const struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs = {
- .get_modes = cirrus_vga_get_modes,
- .best_encoder = cirrus_connector_best_encoder,
-};
-
-static const struct drm_connector_funcs cirrus_vga_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = cirrus_connector_destroy,
-};
-
-static struct drm_connector *cirrus_vga_init(struct drm_device *dev)
-{
- struct drm_connector *connector;
- struct cirrus_connector *cirrus_connector;
-
- cirrus_connector = kzalloc(sizeof(struct cirrus_connector), GFP_KERNEL);
- if (!cirrus_connector)
- return NULL;
-
- connector = &cirrus_connector->base;
-
- drm_connector_init(dev, connector,
- &cirrus_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA);
-
- drm_connector_helper_add(connector, &cirrus_vga_connector_helper_funcs);
-
- drm_connector_register(connector);
- return connector;
-}
-
-
-int cirrus_modeset_init(struct cirrus_device *cdev)
-{
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- int ret;
-
- drm_mode_config_init(cdev->dev);
- cdev->mode_info.mode_config_initialized = true;
-
- cdev->dev->mode_config.max_width = CIRRUS_MAX_FB_WIDTH;
- cdev->dev->mode_config.max_height = CIRRUS_MAX_FB_HEIGHT;
-
- cdev->dev->mode_config.fb_base = cdev->mc.vram_base;
- cdev->dev->mode_config.preferred_depth = cirrus_bpp;
- /* don't prefer a shadow on virt GPU */
- cdev->dev->mode_config.prefer_shadow = 0;
-
- cirrus_crtc_init(cdev->dev);
-
- encoder = cirrus_encoder_init(cdev->dev);
- if (!encoder) {
- DRM_ERROR("cirrus_encoder_init failed\n");
- return -1;
- }
-
- connector = cirrus_vga_init(cdev->dev);
- if (!connector) {
- DRM_ERROR("cirrus_vga_init failed\n");
- return -1;
- }
-
- drm_connector_attach_encoder(connector, encoder);
-
- ret = cirrus_fbdev_init(cdev);
- if (ret) {
- DRM_ERROR("cirrus_fbdev_init failed\n");
- return ret;
- }
-
- return 0;
-}
-
-void cirrus_modeset_fini(struct cirrus_device *cdev)
-{
- cirrus_fbdev_fini(cdev);
-
- if (cdev->mode_info.mode_config_initialized) {
- drm_mode_config_cleanup(cdev->dev);
- cdev->mode_info.mode_config_initialized = false;
- }
-}
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index e075810b4bd4..e6b98467a428 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -178,7 +178,6 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
ret = ttm_bo_device_init(&cirrus->ttm.bdev,
&cirrus_bo_driver,
dev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
@@ -331,13 +330,8 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo)
int cirrus_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct cirrus_device *cirrus;
+ struct drm_file *file_priv = filp->private_data;
+ struct cirrus_device *cirrus = file_priv->minor->dev->dev_private;
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
-
- file_priv = filp->private_data;
- cirrus = file_priv->minor->dev->dev_private;
return ttm_bo_mmap(filp, vma, &cirrus->ttm.bdev);
}
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 5eb40130fafb..f4924cb7f495 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -798,6 +798,50 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_get_private_obj_state);
/**
+ * drm_atomic_get_old_private_obj_state
+ * @state: global atomic state object
+ * @obj: private_obj to grab
+ *
+ * This function returns the old private object state for the given private_obj,
+ * or NULL if the private_obj is not part of the global atomic state.
+ */
+struct drm_private_state *
+drm_atomic_get_old_private_obj_state(struct drm_atomic_state *state,
+ struct drm_private_obj *obj)
+{
+ int i;
+
+ for (i = 0; i < state->num_private_objs; i++)
+ if (obj == state->private_objs[i].ptr)
+ return state->private_objs[i].old_state;
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_atomic_get_old_private_obj_state);
+
+/**
+ * drm_atomic_get_new_private_obj_state
+ * @state: global atomic state object
+ * @obj: private_obj to grab
+ *
+ * This function returns the new private object state for the given private_obj,
+ * or NULL if the private_obj is not part of the global atomic state.
+ */
+struct drm_private_state *
+drm_atomic_get_new_private_obj_state(struct drm_atomic_state *state,
+ struct drm_private_obj *obj)
+{
+ int i;
+
+ for (i = 0; i < state->num_private_objs; i++)
+ if (obj == state->private_objs[i].ptr)
+ return state->private_objs[i].new_state;
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_atomic_get_new_private_obj_state);
+
+/**
* drm_atomic_get_connector_state - get connector state
* @state: global atomic state object
* @connector: connector to get state object for
@@ -1236,4 +1280,3 @@ int drm_atomic_debugfs_init(struct drm_minor *minor)
minor->debugfs_root, minor);
}
#endif
-
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index fbb76332cc9f..2e0cb4246cbd 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -495,7 +495,7 @@ mode_fixup(struct drm_atomic_state *state)
static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
struct drm_encoder *encoder,
struct drm_crtc *crtc,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
enum drm_mode_status ret;
@@ -534,7 +534,7 @@ mode_valid(struct drm_atomic_state *state)
struct drm_crtc *crtc = conn_state->crtc;
struct drm_crtc_state *crtc_state;
enum drm_mode_status mode_status;
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
if (!crtc || !encoder)
continue;
@@ -1751,7 +1751,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
*
* NOTE: Commit work has multiple phases, first hardware commit, then
* cleanup. We want them to overlap, hence need system_unbound_wq to
- * make sure work items don't artifically stall on each another.
+ * make sure work items don't artificially stall on each another.
*/
drm_atomic_state_get(state);
@@ -1785,7 +1785,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
*
* Asynchronous workers need to have sufficient parallelism to be able to run
* different atomic commits on different CRTCs in parallel. The simplest way to
- * achive this is by running them on the &system_unbound_wq work queue. Note
+ * achieve this is by running them on the &system_unbound_wq work queue. Note
* that drivers are not required to split up atomic commits and run an
* individual commit in parallel - userspace is supposed to do that if it cares.
* But it might be beneficial to do that for modesets, since those necessarily
@@ -2260,10 +2260,21 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
struct drm_atomic_state *state)
{
+ struct drm_connector *connector;
+ struct drm_connector_state *new_conn_state;
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
int ret, i, j;
+ for_each_new_connector_in_state(state, connector, new_conn_state, i) {
+ if (!new_conn_state->writeback_job)
+ continue;
+
+ ret = drm_writeback_prepare_job(new_conn_state->writeback_job);
+ if (ret < 0)
+ return ret;
+ }
+
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 4985384e51f6..59ffb6b9c745 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -30,6 +30,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_atomic.h>
#include <drm/drm_device.h>
+#include <drm/drm_writeback.h>
#include <linux/slab.h>
#include <linux/dma-fence.h>
@@ -412,6 +413,9 @@ __drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state)
if (state->commit)
drm_crtc_commit_put(state->commit);
+
+ if (state->writeback_job)
+ drm_writeback_cleanup_job(state->writeback_job);
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 0aabd401d3ca..428d82662dc4 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -512,8 +512,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
}
static int drm_atomic_plane_set_property(struct drm_plane *plane,
- struct drm_plane_state *state, struct drm_property *property,
- uint64_t val)
+ struct drm_plane_state *state, struct drm_file *file_priv,
+ struct drm_property *property, uint64_t val)
{
struct drm_device *dev = plane->dev;
struct drm_mode_config *config = &dev->mode_config;
@@ -521,7 +521,8 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
int ret;
if (property == config->prop_fb_id) {
- struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
+ struct drm_framebuffer *fb;
+ fb = drm_framebuffer_lookup(dev, file_priv, val);
drm_atomic_set_fb_for_plane(state, fb);
if (fb)
drm_framebuffer_put(fb);
@@ -537,7 +538,9 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
return -EINVAL;
} else if (property == config->prop_crtc_id) {
- struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
+ struct drm_crtc *crtc = drm_crtc_find(dev, file_priv, val);
+ if (val && !crtc)
+ return -EACCES;
return drm_atomic_set_crtc_for_plane(state, crtc);
} else if (property == config->prop_crtc_x) {
state->crtc_x = U642I64(val);
@@ -647,28 +650,15 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
return 0;
}
-static struct drm_writeback_job *
-drm_atomic_get_writeback_job(struct drm_connector_state *conn_state)
-{
- WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
-
- if (!conn_state->writeback_job)
- conn_state->writeback_job =
- kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL);
-
- return conn_state->writeback_job;
-}
-
static int drm_atomic_set_writeback_fb_for_connector(
struct drm_connector_state *conn_state,
struct drm_framebuffer *fb)
{
- struct drm_writeback_job *job =
- drm_atomic_get_writeback_job(conn_state);
- if (!job)
- return -ENOMEM;
+ int ret;
- drm_framebuffer_assign(&job->fb, fb);
+ ret = drm_writeback_set_fb(conn_state, fb);
+ if (ret < 0)
+ return ret;
if (fb)
DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n",
@@ -681,14 +671,16 @@ static int drm_atomic_set_writeback_fb_for_connector(
}
static int drm_atomic_connector_set_property(struct drm_connector *connector,
- struct drm_connector_state *state, struct drm_property *property,
- uint64_t val)
+ struct drm_connector_state *state, struct drm_file *file_priv,
+ struct drm_property *property, uint64_t val)
{
struct drm_device *dev = connector->dev;
struct drm_mode_config *config = &dev->mode_config;
if (property == config->prop_crtc_id) {
- struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
+ struct drm_crtc *crtc = drm_crtc_find(dev, file_priv, val);
+ if (val && !crtc)
+ return -EACCES;
return drm_atomic_set_crtc_for_connector(state, crtc);
} else if (property == config->dpms_property) {
/* setting DPMS property requires special handling, which
@@ -746,9 +738,13 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
return -EINVAL;
}
state->content_protection = val;
+ } else if (property == connector->colorspace_property) {
+ state->colorspace = val;
} else if (property == config->writeback_fb_id_property) {
- struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
- int ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
+ struct drm_framebuffer *fb;
+ int ret;
+ fb = drm_framebuffer_lookup(dev, file_priv, val);
+ ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
if (fb)
drm_framebuffer_put(fb);
return ret;
@@ -814,6 +810,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
*val = state->picture_aspect_ratio;
} else if (property == config->content_type_property) {
*val = state->content_type;
+ } else if (property == connector->colorspace_property) {
+ *val = state->colorspace;
} else if (property == connector->scaling_mode_property) {
*val = state->scaling_mode;
} else if (property == connector->content_protection_property) {
@@ -943,6 +941,7 @@ out:
}
int drm_atomic_set_property(struct drm_atomic_state *state,
+ struct drm_file *file_priv,
struct drm_mode_object *obj,
struct drm_property *prop,
uint64_t prop_value)
@@ -965,7 +964,8 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
}
ret = drm_atomic_connector_set_property(connector,
- connector_state, prop, prop_value);
+ connector_state, file_priv,
+ prop, prop_value);
break;
}
case DRM_MODE_OBJECT_CRTC: {
@@ -993,7 +993,8 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
}
ret = drm_atomic_plane_set_property(plane,
- plane_state, prop, prop_value);
+ plane_state, file_priv,
+ prop, prop_value);
break;
}
default:
@@ -1158,19 +1159,17 @@ static int prepare_signaling(struct drm_device *dev,
for_each_new_connector_in_state(state, conn, conn_state, i) {
struct drm_writeback_connector *wb_conn;
- struct drm_writeback_job *job;
struct drm_out_fence_state *f;
struct dma_fence *fence;
s32 __user *fence_ptr;
+ if (!conn_state->writeback_job)
+ continue;
+
fence_ptr = get_out_fence_for_connector(state, conn);
if (!fence_ptr)
continue;
- job = drm_atomic_get_writeback_job(conn_state);
- if (!job)
- return -ENOMEM;
-
f = krealloc(*fence_state, sizeof(**fence_state) *
(*num_fences + 1), GFP_KERNEL);
if (!f)
@@ -1192,7 +1191,7 @@ static int prepare_signaling(struct drm_device *dev,
return ret;
}
- job->out_fence = fence;
+ conn_state->writeback_job->out_fence = fence;
}
/*
@@ -1365,8 +1364,8 @@ retry:
goto out;
}
- ret = drm_atomic_set_property(state, obj, prop,
- prop_value);
+ ret = drm_atomic_set_property(state, file_priv,
+ obj, prop, prop_value);
if (ret) {
drm_mode_object_put(obj);
goto out;
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 1669c42c40ed..22c7a104b802 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -103,14 +103,11 @@ struct drm_master *drm_master_create(struct drm_device *dev)
return NULL;
kref_init(&master->refcount);
- spin_lock_init(&master->lock.spinlock);
- init_waitqueue_head(&master->lock.lock_queue);
+ drm_master_legacy_init(master);
idr_init(&master->magic_map);
master->dev = dev;
/* initialize the tree of output resource lessees */
- master->lessor = NULL;
- master->lessee_id = 0;
INIT_LIST_HEAD(&master->lessees);
INIT_LIST_HEAD(&master->lessee_list);
idr_init(&master->leases);
@@ -274,21 +271,7 @@ void drm_master_release(struct drm_file *file_priv)
if (!drm_is_current_master(file_priv))
goto out;
- if (drm_core_check_feature(dev, DRIVER_LEGACY)) {
- /*
- * Since the master is disappearing, so is the
- * possibility to lock.
- */
- mutex_lock(&dev->struct_mutex);
- if (master->lock.hw_lock) {
- if (dev->sigdata.lock == master->lock.hw_lock)
- dev->sigdata.lock = NULL;
- master->lock.hw_lock = NULL;
- master->lock.file_priv = NULL;
- wake_up_interruptible_all(&master->lock.lock_queue);
- }
- mutex_unlock(&dev->struct_mutex);
- }
+ drm_legacy_lock_master_cleanup(dev, master);
if (dev->master == file_priv->master)
drm_drop_master(dev, file_priv);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index e407adb033e7..bfc419ed9d6c 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -584,6 +584,14 @@ void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
mutex_unlock(&dev->struct_mutex);
}
+void drm_legacy_rmmaps(struct drm_device *dev)
+{
+ struct drm_map_list *r_list, *list_temp;
+
+ list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
+ drm_legacy_rmmap(dev, r_list->map);
+}
+
/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
* the last close of the device, and this is necessary for cleanup when things
* exit uncleanly. Therefore, having userland manually remove mappings seems
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index 9b2bd28dde0a..f20d1dda3961 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -69,7 +69,8 @@ EXPORT_SYMBOL(drm_client_close);
* @name: Client name
* @funcs: DRM client functions (optional)
*
- * This initialises the client and opens a &drm_file. Use drm_client_add() to complete the process.
+ * This initialises the client and opens a &drm_file.
+ * Use drm_client_register() to complete the process.
* The caller needs to hold a reference on @dev before calling this function.
* The client is freed when the &drm_device is unregistered. See drm_client_release().
*
@@ -108,16 +109,16 @@ err_put_module:
EXPORT_SYMBOL(drm_client_init);
/**
- * drm_client_add - Add client to the device list
+ * drm_client_register - Register client
* @client: DRM client
*
* Add the client to the &drm_device client list to activate its callbacks.
* @client must be initialized by a call to drm_client_init(). After
- * drm_client_add() it is no longer permissible to call drm_client_release()
+ * drm_client_register() it is no longer permissible to call drm_client_release()
* directly (outside the unregister callback), instead cleanup will happen
* automatically on driver unload.
*/
-void drm_client_add(struct drm_client_dev *client)
+void drm_client_register(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
@@ -125,7 +126,7 @@ void drm_client_add(struct drm_client_dev *client)
list_add(&client->list, &dev->clientlist);
mutex_unlock(&dev->clientlist_mutex);
}
-EXPORT_SYMBOL(drm_client_add);
+EXPORT_SYMBOL(drm_client_register);
/**
* drm_client_release - Release DRM client resources
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index dd40eff0911c..b34c3d38bf15 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -245,6 +245,7 @@ int drm_connector_init(struct drm_device *dev,
INIT_LIST_HEAD(&connector->modes);
mutex_init(&connector->mutex);
connector->edid_blob_ptr = NULL;
+ connector->tile_blob_ptr = NULL;
connector->status = connector_status_unknown;
connector->display_info.panel_orientation =
DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
@@ -272,6 +273,9 @@ int drm_connector_init(struct drm_device *dev,
drm_object_attach_property(&connector->base,
config->non_desktop_property,
0);
+ drm_object_attach_property(&connector->base,
+ config->tile_property,
+ 0);
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
@@ -826,6 +830,33 @@ static struct drm_prop_enum_list drm_cp_enum_list[] = {
};
DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
+static const struct drm_prop_enum_list hdmi_colorspaces[] = {
+ /* For Default case, driver will set the colorspace */
+ { DRM_MODE_COLORIMETRY_DEFAULT, "Default" },
+ /* Standard Definition Colorimetry based on CEA 861 */
+ { DRM_MODE_COLORIMETRY_SMPTE_170M_YCC, "SMPTE_170M_YCC" },
+ { DRM_MODE_COLORIMETRY_BT709_YCC, "BT709_YCC" },
+ /* Standard Definition Colorimetry based on IEC 61966-2-4 */
+ { DRM_MODE_COLORIMETRY_XVYCC_601, "XVYCC_601" },
+ /* High Definition Colorimetry based on IEC 61966-2-4 */
+ { DRM_MODE_COLORIMETRY_XVYCC_709, "XVYCC_709" },
+ /* Colorimetry based on IEC 61966-2-1/Amendment 1 */
+ { DRM_MODE_COLORIMETRY_SYCC_601, "SYCC_601" },
+ /* Colorimetry based on IEC 61966-2-5 [33] */
+ { DRM_MODE_COLORIMETRY_OPYCC_601, "opYCC_601" },
+ /* Colorimetry based on IEC 61966-2-5 */
+ { DRM_MODE_COLORIMETRY_OPRGB, "opRGB" },
+ /* Colorimetry based on ITU-R BT.2020 */
+ { DRM_MODE_COLORIMETRY_BT2020_CYCC, "BT2020_CYCC" },
+ /* Colorimetry based on ITU-R BT.2020 */
+ { DRM_MODE_COLORIMETRY_BT2020_RGB, "BT2020_RGB" },
+ /* Colorimetry based on ITU-R BT.2020 */
+ { DRM_MODE_COLORIMETRY_BT2020_YCC, "BT2020_YCC" },
+ /* Added as part of Additional Colorimetry Extension in 861.G */
+ { DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65, "DCI-P3_RGB_D65" },
+ { DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER, "DCI-P3_RGB_Theater" },
+};
+
/**
* DOC: standard connector properties
*
@@ -1385,12 +1416,6 @@ EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
*
* The driver may place further restrictions within these minimum
* and maximum bounds.
- *
- * The semantics for the vertical blank timestamp differ when
- * variable refresh rate is active. The vertical blank timestamp
- * is defined to be an estimate using the current mode's fixed
- * refresh rate timings. The semantics for the page-flip event
- * timestamp remain the same.
*/
/**
@@ -1548,6 +1573,57 @@ int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
/**
+ * DOC: standard connector properties
+ *
+ * Colorspace:
+ * drm_mode_create_colorspace_property - create colorspace property
+ * This property helps select a suitable colorspace based on the sink
+ * capability. Modern sink devices support wider gamut like BT2020.
+ * This helps switch to BT2020 mode if the BT2020 encoded video stream
+ * is being played by the user, same for any other colorspace. Thereby
+ * giving a good visual experience to users.
+ *
+ * The expectation from userspace is that it should parse the EDID
+ * and get supported colorspaces. Use this property and switch to the
+ * one supported. Sink supported colorspaces should be retrieved by
+ * userspace from EDID and driver will not explicitly expose them.
+ *
+ * Basically the expectation from userspace is:
+ * - Set up CRTC DEGAMMA/CTM/GAMMA to convert to some sink
+ * colorspace
+ * - Set this new property to let the sink know what it
+ * converted the CRTC output to.
+ * - This property is just to inform sink what colorspace
+ * source is trying to drive.
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_colorspace_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_property *prop;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+ prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
+ "Colorspace",
+ hdmi_colorspaces,
+ ARRAY_SIZE(hdmi_colorspaces));
+ if (!prop)
+ return -ENOMEM;
+ } else {
+ DRM_DEBUG_KMS("Colorspace property not supported\n");
+ return 0;
+ }
+
+ connector->colorspace_property = prop;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_colorspace_property);
+
+/**
* drm_mode_create_content_type_property - create content type property
* @dev: DRM device
*
@@ -1634,6 +1710,8 @@ EXPORT_SYMBOL(drm_connector_set_path_property);
* This looks up the tile information for a connector, and creates a
* property for userspace to parse if it exists. The property is of
* the form of 8 integers using ':' as a separator.
+ * This is used for dual port tiled displays with DisplayPort SST
+ * or DisplayPort MST connectors.
*
* Returns:
* Zero on success, errno on failure.
@@ -1677,6 +1755,9 @@ EXPORT_SYMBOL(drm_connector_set_tile_property);
*
* This function creates a new blob modeset object and assigns its id to the
* connector's edid property.
+ * Since we also parse tile information from EDID's displayID block, we also
+ * set the connector's tile property here. See drm_connector_set_tile_property()
+ * for more details.
*
* Returns:
* Zero on success, negative errno on failure.
@@ -1718,7 +1799,9 @@ int drm_connector_update_edid_property(struct drm_connector *connector,
edid,
&connector->base,
dev->mode_config.edid_property);
- return ret;
+ if (ret)
+ return ret;
+ return drm_connector_set_tile_property(connector);
}
EXPORT_SYMBOL(drm_connector_update_edid_property);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 7dabbaf033a1..790ba5941954 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -559,6 +559,10 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
plane = crtc->primary;
+ /* allow disabling with the primary plane leased */
+ if (crtc_req->mode_valid && !drm_lease_held(file_priv, plane->base.id))
+ return -EACCES;
+
mutex_lock(&crtc->dev->mode_config.mutex);
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx,
DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 216f2a9ee3d4..0719a235d6cc 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -214,6 +214,7 @@ int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
struct drm_connector *connector,
int mode);
int drm_atomic_set_property(struct drm_atomic_state *state,
+ struct drm_file *file_priv,
struct drm_mode_object *obj,
struct drm_property *prop,
uint64_t prop_value);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index dc7ac0c60547..c630ed157994 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -3022,7 +3022,6 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
edid = drm_edid_duplicate(port->cached_edid);
else {
edid = drm_get_edid(connector, &port->aux.ddc);
- drm_connector_set_tile_property(connector);
}
port->has_audio = drm_detect_monitor_audio(edid);
drm_dp_mst_topology_put_port(port);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 05bbc2b622fc..862621494a93 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -286,6 +286,138 @@ void drm_minor_release(struct drm_minor *minor)
* Note that the lifetime rules for &drm_device instance has still a lot of
* historical baggage. Hence use the reference counting provided by
* drm_dev_get() and drm_dev_put() only carefully.
+ *
+ * Display driver example
+ * ~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The following example shows a typical structure of a DRM display driver.
+ * The example focus on the probe() function and the other functions that is
+ * almost always present and serves as a demonstration of devm_drm_dev_init()
+ * usage with its accompanying drm_driver->release callback.
+ *
+ * .. code-block:: c
+ *
+ * struct driver_device {
+ * struct drm_device drm;
+ * void *userspace_facing;
+ * struct clk *pclk;
+ * };
+ *
+ * static void driver_drm_release(struct drm_device *drm)
+ * {
+ * struct driver_device *priv = container_of(...);
+ *
+ * drm_mode_config_cleanup(drm);
+ * drm_dev_fini(drm);
+ * kfree(priv->userspace_facing);
+ * kfree(priv);
+ * }
+ *
+ * static struct drm_driver driver_drm_driver = {
+ * [...]
+ * .release = driver_drm_release,
+ * };
+ *
+ * static int driver_probe(struct platform_device *pdev)
+ * {
+ * struct driver_device *priv;
+ * struct drm_device *drm;
+ * int ret;
+ *
+ * [
+ * devm_kzalloc() can't be used here because the drm_device
+ * lifetime can exceed the device lifetime if driver unbind
+ * happens when userspace still has open file descriptors.
+ * ]
+ * priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ * if (!priv)
+ * return -ENOMEM;
+ *
+ * drm = &priv->drm;
+ *
+ * ret = devm_drm_dev_init(&pdev->dev, drm, &driver_drm_driver);
+ * if (ret) {
+ * kfree(drm);
+ * return ret;
+ * }
+ *
+ * drm_mode_config_init(drm);
+ *
+ * priv->userspace_facing = kzalloc(..., GFP_KERNEL);
+ * if (!priv->userspace_facing)
+ * return -ENOMEM;
+ *
+ * priv->pclk = devm_clk_get(dev, "PCLK");
+ * if (IS_ERR(priv->pclk))
+ * return PTR_ERR(priv->pclk);
+ *
+ * [ Further setup, display pipeline etc ]
+ *
+ * platform_set_drvdata(pdev, drm);
+ *
+ * drm_mode_config_reset(drm);
+ *
+ * ret = drm_dev_register(drm);
+ * if (ret)
+ * return ret;
+ *
+ * drm_fbdev_generic_setup(drm, 32);
+ *
+ * return 0;
+ * }
+ *
+ * [ This function is called before the devm_ resources are released ]
+ * static int driver_remove(struct platform_device *pdev)
+ * {
+ * struct drm_device *drm = platform_get_drvdata(pdev);
+ *
+ * drm_dev_unregister(drm);
+ * drm_atomic_helper_shutdown(drm)
+ *
+ * return 0;
+ * }
+ *
+ * [ This function is called on kernel restart and shutdown ]
+ * static void driver_shutdown(struct platform_device *pdev)
+ * {
+ * drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+ * }
+ *
+ * static int __maybe_unused driver_pm_suspend(struct device *dev)
+ * {
+ * return drm_mode_config_helper_suspend(dev_get_drvdata(dev));
+ * }
+ *
+ * static int __maybe_unused driver_pm_resume(struct device *dev)
+ * {
+ * drm_mode_config_helper_resume(dev_get_drvdata(dev));
+ *
+ * return 0;
+ * }
+ *
+ * static const struct dev_pm_ops driver_pm_ops = {
+ * SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume)
+ * };
+ *
+ * static struct platform_driver driver_driver = {
+ * .driver = {
+ * [...]
+ * .pm = &driver_pm_ops,
+ * },
+ * .probe = driver_probe,
+ * .remove = driver_remove,
+ * .shutdown = driver_shutdown,
+ * };
+ * module_platform_driver(driver_driver);
+ *
+ * Drivers that want to support device unplugging (USB, DT overlay unload) should
+ * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect
+ * regions that is accessing device resources to prevent use after they're
+ * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one
+ * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before
+ * drm_atomic_helper_shutdown() is called. This means that if the disable code
+ * paths are protected, they will not run on regular driver module unload,
+ * possibily leaving the hardware enabled.
*/
/**
@@ -376,7 +508,6 @@ void drm_dev_unplug(struct drm_device *dev)
synchronize_srcu(&drm_unplug_srcu);
drm_dev_unregister(dev);
- drm_dev_put(dev);
}
EXPORT_SYMBOL(drm_dev_unplug);
@@ -453,6 +584,31 @@ static void drm_fs_inode_free(struct inode *inode)
}
/**
+ * DOC: component helper usage recommendations
+ *
+ * DRM drivers that drive hardware where a logical device consists of a pile of
+ * independent hardware blocks are recommended to use the :ref:`component helper
+ * library<component>`. For consistency and better options for code reuse the
+ * following guidelines apply:
+ *
+ * - The entire device initialization procedure should be run from the
+ * &component_master_ops.master_bind callback, starting with drm_dev_init(),
+ * then binding all components with component_bind_all() and finishing with
+ * drm_dev_register().
+ *
+ * - The opaque pointer passed to all components through component_bind_all()
+ * should point at &struct drm_device of the device instance, not some driver
+ * specific private structure.
+ *
+ * - The component helper fills the niche where further standardization of
+ * interfaces is not practical. When there already is, or will be, a
+ * standardized interface like &drm_bridge or &drm_panel, providing its own
+ * functions to find such components at driver load time, like
+ * drm_of_find_panel_or_bridge(), then the component helper should not be
+ * used.
+ */
+
+/**
* drm_dev_init - Initialise new DRM device
* @dev: DRM device
* @driver: DRM driver
@@ -497,26 +653,22 @@ int drm_dev_init(struct drm_device *dev,
BUG_ON(!parent);
kref_init(&dev->ref);
- dev->dev = parent;
+ dev->dev = get_device(parent);
dev->driver = driver;
/* no per-device feature limits by default */
dev->driver_features = ~0u;
+ drm_legacy_init_members(dev);
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->filelist_internal);
INIT_LIST_HEAD(&dev->clientlist);
- INIT_LIST_HEAD(&dev->ctxlist);
- INIT_LIST_HEAD(&dev->vmalist);
- INIT_LIST_HEAD(&dev->maplist);
INIT_LIST_HEAD(&dev->vblank_event_list);
- spin_lock_init(&dev->buf_lock);
spin_lock_init(&dev->event_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->filelist_mutex);
mutex_init(&dev->clientlist_mutex);
- mutex_init(&dev->ctxlist_mutex);
mutex_init(&dev->master_mutex);
dev->anon_inode = drm_fs_inode_new();
@@ -536,7 +688,7 @@ int drm_dev_init(struct drm_device *dev,
if (ret)
goto err_minors;
- ret = drm_ht_create(&dev->map_hash, 12);
+ ret = drm_legacy_create_map_hash(dev);
if (ret)
goto err_minors;
@@ -561,21 +713,61 @@ err_setunique:
drm_gem_destroy(dev);
err_ctxbitmap:
drm_legacy_ctxbitmap_cleanup(dev);
- drm_ht_remove(&dev->map_hash);
+ drm_legacy_remove_map_hash(dev);
err_minors:
drm_minor_free(dev, DRM_MINOR_PRIMARY);
drm_minor_free(dev, DRM_MINOR_RENDER);
drm_fs_inode_free(dev->anon_inode);
err_free:
+ put_device(dev->dev);
mutex_destroy(&dev->master_mutex);
- mutex_destroy(&dev->ctxlist_mutex);
mutex_destroy(&dev->clientlist_mutex);
mutex_destroy(&dev->filelist_mutex);
mutex_destroy(&dev->struct_mutex);
+ drm_legacy_destroy_members(dev);
return ret;
}
EXPORT_SYMBOL(drm_dev_init);
+static void devm_drm_dev_init_release(void *data)
+{
+ drm_dev_put(data);
+}
+
+/**
+ * devm_drm_dev_init - Resource managed drm_dev_init()
+ * @parent: Parent device object
+ * @dev: DRM device
+ * @driver: DRM driver
+ *
+ * Managed drm_dev_init(). The DRM device initialized with this function is
+ * automatically put on driver detach using drm_dev_put(). You must supply a
+ * &drm_driver.release callback to control the finalization explicitly.
+ *
+ * RETURNS:
+ * 0 on success, or error code on failure.
+ */
+int devm_drm_dev_init(struct device *parent,
+ struct drm_device *dev,
+ struct drm_driver *driver)
+{
+ int ret;
+
+ if (WARN_ON(!parent || !driver->release))
+ return -EINVAL;
+
+ ret = drm_dev_init(dev, driver, parent);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action(parent, devm_drm_dev_init_release, dev);
+ if (ret)
+ devm_drm_dev_init_release(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(devm_drm_dev_init);
+
/**
* drm_dev_fini - Finalize a dead DRM device
* @dev: DRM device
@@ -596,17 +788,19 @@ void drm_dev_fini(struct drm_device *dev)
drm_gem_destroy(dev);
drm_legacy_ctxbitmap_cleanup(dev);
- drm_ht_remove(&dev->map_hash);
+ drm_legacy_remove_map_hash(dev);
drm_fs_inode_free(dev->anon_inode);
drm_minor_free(dev, DRM_MINOR_PRIMARY);
drm_minor_free(dev, DRM_MINOR_RENDER);
+ put_device(dev->dev);
+
mutex_destroy(&dev->master_mutex);
- mutex_destroy(&dev->ctxlist_mutex);
mutex_destroy(&dev->clientlist_mutex);
mutex_destroy(&dev->filelist_mutex);
mutex_destroy(&dev->struct_mutex);
+ drm_legacy_destroy_members(dev);
kfree(dev->unique);
}
EXPORT_SYMBOL(drm_dev_fini);
@@ -840,8 +1034,6 @@ EXPORT_SYMBOL(drm_dev_register);
*/
void drm_dev_unregister(struct drm_device *dev)
{
- struct drm_map_list *r_list, *list_temp;
-
if (drm_core_check_feature(dev, DRIVER_LEGACY))
drm_lastclose(dev);
@@ -858,8 +1050,7 @@ void drm_dev_unregister(struct drm_device *dev)
if (dev->agp)
drm_pci_agp_destroy(dev);
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
- drm_legacy_rmmap(dev, r_list->map);
+ drm_legacy_rmmaps(dev);
remove_compat_control_link(dev);
drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
index bce99f95c1a3..77f4e5ae4197 100644
--- a/drivers/gpu/drm/drm_dsc.c
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/byteorder/generic.h>
+#include <drm/drm_print.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_dsc.h>
@@ -31,75 +32,74 @@
/**
* drm_dsc_dp_pps_header_init() - Initializes the PPS Header
* for DisplayPort as per the DP 1.4 spec.
- * @pps_sdp: Secondary data packet for DSC Picture Parameter Set
- * as defined in &struct drm_dsc_pps_infoframe
+ * @pps_header: Secondary data packet header for DSC Picture
+ * Parameter Set as defined in &struct dp_sdp_header
*
* DP 1.4 spec defines the secondary data packet for sending the
* picture parameter infoframes from the source to the sink.
- * This function populates the pps header defined in
- * &struct drm_dsc_pps_infoframe as per the header bytes defined
- * in &struct dp_sdp_header.
+ * This function populates the SDP header defined in
+ * &struct dp_sdp_header.
*/
-void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp)
+void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header)
{
- memset(&pps_sdp->pps_header, 0, sizeof(pps_sdp->pps_header));
+ memset(pps_header, 0, sizeof(*pps_header));
- pps_sdp->pps_header.HB1 = DP_SDP_PPS;
- pps_sdp->pps_header.HB2 = DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1;
+ pps_header->HB1 = DP_SDP_PPS;
+ pps_header->HB2 = DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1;
}
EXPORT_SYMBOL(drm_dsc_dp_pps_header_init);
/**
- * drm_dsc_pps_infoframe_pack() - Populates the DSC PPS infoframe
+ * drm_dsc_pps_payload_pack() - Populates the DSC PPS
*
- * @pps_sdp:
- * Secondary data packet for DSC Picture Parameter Set. This is defined
- * by &struct drm_dsc_pps_infoframe
+ * @pps_payload:
+ * Bitwise struct for DSC Picture Parameter Set. This is defined
+ * by &struct drm_dsc_picture_parameter_set
* @dsc_cfg:
* DSC Configuration data filled by driver as defined by
* &struct drm_dsc_config
*
- * DSC source device sends a secondary data packet filled with all the
- * picture parameter set (PPS) information required by the sink to decode
- * the compressed frame. Driver populates the dsC PPS infoframe using the DSC
- * configuration parameters in the order expected by the DSC Display Sink
- * device. For the DSC, the sink device expects the PPS payload in the big
- * endian format for the fields that span more than 1 byte.
+ * DSC source device sends a picture parameter set (PPS) containing the
+ * information required by the sink to decode the compressed frame. Driver
+ * populates the DSC PPS struct using the DSC configuration parameters in
+ * the order expected by the DSC Display Sink device. For the DSC, the sink
+ * device expects the PPS payload in big endian format for fields
+ * that span more than 1 byte.
*/
-void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
+void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload,
const struct drm_dsc_config *dsc_cfg)
{
int i;
/* Protect against someone accidently changing struct size */
- BUILD_BUG_ON(sizeof(pps_sdp->pps_payload) !=
+ BUILD_BUG_ON(sizeof(*pps_payload) !=
DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 + 1);
- memset(&pps_sdp->pps_payload, 0, sizeof(pps_sdp->pps_payload));
+ memset(pps_payload, 0, sizeof(*pps_payload));
/* PPS 0 */
- pps_sdp->pps_payload.dsc_version =
+ pps_payload->dsc_version =
dsc_cfg->dsc_version_minor |
dsc_cfg->dsc_version_major << DSC_PPS_VERSION_MAJOR_SHIFT;
/* PPS 1, 2 is 0 */
/* PPS 3 */
- pps_sdp->pps_payload.pps_3 =
+ pps_payload->pps_3 =
dsc_cfg->line_buf_depth |
dsc_cfg->bits_per_component << DSC_PPS_BPC_SHIFT;
/* PPS 4 */
- pps_sdp->pps_payload.pps_4 =
+ pps_payload->pps_4 =
((dsc_cfg->bits_per_pixel & DSC_PPS_BPP_HIGH_MASK) >>
DSC_PPS_MSB_SHIFT) |
dsc_cfg->vbr_enable << DSC_PPS_VBR_EN_SHIFT |
- dsc_cfg->enable422 << DSC_PPS_SIMPLE422_SHIFT |
+ dsc_cfg->simple_422 << DSC_PPS_SIMPLE422_SHIFT |
dsc_cfg->convert_rgb << DSC_PPS_CONVERT_RGB_SHIFT |
dsc_cfg->block_pred_enable << DSC_PPS_BLOCK_PRED_EN_SHIFT;
/* PPS 5 */
- pps_sdp->pps_payload.bits_per_pixel_low =
+ pps_payload->bits_per_pixel_low =
(dsc_cfg->bits_per_pixel & DSC_PPS_LSB_MASK);
/*
@@ -110,103 +110,103 @@ void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
*/
/* PPS 6, 7 */
- pps_sdp->pps_payload.pic_height = cpu_to_be16(dsc_cfg->pic_height);
+ pps_payload->pic_height = cpu_to_be16(dsc_cfg->pic_height);
/* PPS 8, 9 */
- pps_sdp->pps_payload.pic_width = cpu_to_be16(dsc_cfg->pic_width);
+ pps_payload->pic_width = cpu_to_be16(dsc_cfg->pic_width);
/* PPS 10, 11 */
- pps_sdp->pps_payload.slice_height = cpu_to_be16(dsc_cfg->slice_height);
+ pps_payload->slice_height = cpu_to_be16(dsc_cfg->slice_height);
/* PPS 12, 13 */
- pps_sdp->pps_payload.slice_width = cpu_to_be16(dsc_cfg->slice_width);
+ pps_payload->slice_width = cpu_to_be16(dsc_cfg->slice_width);
/* PPS 14, 15 */
- pps_sdp->pps_payload.chunk_size = cpu_to_be16(dsc_cfg->slice_chunk_size);
+ pps_payload->chunk_size = cpu_to_be16(dsc_cfg->slice_chunk_size);
/* PPS 16 */
- pps_sdp->pps_payload.initial_xmit_delay_high =
+ pps_payload->initial_xmit_delay_high =
((dsc_cfg->initial_xmit_delay &
DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK) >>
DSC_PPS_MSB_SHIFT);
/* PPS 17 */
- pps_sdp->pps_payload.initial_xmit_delay_low =
+ pps_payload->initial_xmit_delay_low =
(dsc_cfg->initial_xmit_delay & DSC_PPS_LSB_MASK);
/* PPS 18, 19 */
- pps_sdp->pps_payload.initial_dec_delay =
+ pps_payload->initial_dec_delay =
cpu_to_be16(dsc_cfg->initial_dec_delay);
/* PPS 20 is 0 */
/* PPS 21 */
- pps_sdp->pps_payload.initial_scale_value =
+ pps_payload->initial_scale_value =
dsc_cfg->initial_scale_value;
/* PPS 22, 23 */
- pps_sdp->pps_payload.scale_increment_interval =
+ pps_payload->scale_increment_interval =
cpu_to_be16(dsc_cfg->scale_increment_interval);
/* PPS 24 */
- pps_sdp->pps_payload.scale_decrement_interval_high =
+ pps_payload->scale_decrement_interval_high =
((dsc_cfg->scale_decrement_interval &
DSC_PPS_SCALE_DEC_INT_HIGH_MASK) >>
DSC_PPS_MSB_SHIFT);
/* PPS 25 */
- pps_sdp->pps_payload.scale_decrement_interval_low =
+ pps_payload->scale_decrement_interval_low =
(dsc_cfg->scale_decrement_interval & DSC_PPS_LSB_MASK);
/* PPS 26[7:0], PPS 27[7:5] RESERVED */
/* PPS 27 */
- pps_sdp->pps_payload.first_line_bpg_offset =
+ pps_payload->first_line_bpg_offset =
dsc_cfg->first_line_bpg_offset;
/* PPS 28, 29 */
- pps_sdp->pps_payload.nfl_bpg_offset =
+ pps_payload->nfl_bpg_offset =
cpu_to_be16(dsc_cfg->nfl_bpg_offset);
/* PPS 30, 31 */
- pps_sdp->pps_payload.slice_bpg_offset =
+ pps_payload->slice_bpg_offset =
cpu_to_be16(dsc_cfg->slice_bpg_offset);
/* PPS 32, 33 */
- pps_sdp->pps_payload.initial_offset =
+ pps_payload->initial_offset =
cpu_to_be16(dsc_cfg->initial_offset);
/* PPS 34, 35 */
- pps_sdp->pps_payload.final_offset = cpu_to_be16(dsc_cfg->final_offset);
+ pps_payload->final_offset = cpu_to_be16(dsc_cfg->final_offset);
/* PPS 36 */
- pps_sdp->pps_payload.flatness_min_qp = dsc_cfg->flatness_min_qp;
+ pps_payload->flatness_min_qp = dsc_cfg->flatness_min_qp;
/* PPS 37 */
- pps_sdp->pps_payload.flatness_max_qp = dsc_cfg->flatness_max_qp;
+ pps_payload->flatness_max_qp = dsc_cfg->flatness_max_qp;
/* PPS 38, 39 */
- pps_sdp->pps_payload.rc_model_size =
+ pps_payload->rc_model_size =
cpu_to_be16(DSC_RC_MODEL_SIZE_CONST);
/* PPS 40 */
- pps_sdp->pps_payload.rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
+ pps_payload->rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
/* PPS 41 */
- pps_sdp->pps_payload.rc_quant_incr_limit0 =
+ pps_payload->rc_quant_incr_limit0 =
dsc_cfg->rc_quant_incr_limit0;
/* PPS 42 */
- pps_sdp->pps_payload.rc_quant_incr_limit1 =
+ pps_payload->rc_quant_incr_limit1 =
dsc_cfg->rc_quant_incr_limit1;
/* PPS 43 */
- pps_sdp->pps_payload.rc_tgt_offset = DSC_RC_TGT_OFFSET_LO_CONST |
+ pps_payload->rc_tgt_offset = DSC_RC_TGT_OFFSET_LO_CONST |
DSC_RC_TGT_OFFSET_HI_CONST << DSC_PPS_RC_TGT_OFFSET_HI_SHIFT;
/* PPS 44 - 57 */
for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++)
- pps_sdp->pps_payload.rc_buf_thresh[i] =
+ pps_payload->rc_buf_thresh[i] =
dsc_cfg->rc_buf_thresh[i];
/* PPS 58 - 87 */
@@ -215,32 +215,181 @@ void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
* are as follows: Min_qp[15:11], max_qp[10:6], offset[5:0]
*/
for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
- pps_sdp->pps_payload.rc_range_parameters[i] =
+ pps_payload->rc_range_parameters[i] =
((dsc_cfg->rc_range_params[i].range_min_qp <<
DSC_PPS_RC_RANGE_MINQP_SHIFT) |
(dsc_cfg->rc_range_params[i].range_max_qp <<
DSC_PPS_RC_RANGE_MAXQP_SHIFT) |
(dsc_cfg->rc_range_params[i].range_bpg_offset));
- pps_sdp->pps_payload.rc_range_parameters[i] =
- cpu_to_be16(pps_sdp->pps_payload.rc_range_parameters[i]);
+ pps_payload->rc_range_parameters[i] =
+ cpu_to_be16(pps_payload->rc_range_parameters[i]);
}
/* PPS 88 */
- pps_sdp->pps_payload.native_422_420 = dsc_cfg->native_422 |
+ pps_payload->native_422_420 = dsc_cfg->native_422 |
dsc_cfg->native_420 << DSC_PPS_NATIVE_420_SHIFT;
/* PPS 89 */
- pps_sdp->pps_payload.second_line_bpg_offset =
+ pps_payload->second_line_bpg_offset =
dsc_cfg->second_line_bpg_offset;
/* PPS 90, 91 */
- pps_sdp->pps_payload.nsl_bpg_offset =
+ pps_payload->nsl_bpg_offset =
cpu_to_be16(dsc_cfg->nsl_bpg_offset);
/* PPS 92, 93 */
- pps_sdp->pps_payload.second_line_offset_adj =
+ pps_payload->second_line_offset_adj =
cpu_to_be16(dsc_cfg->second_line_offset_adj);
/* PPS 94 - 127 are O */
}
-EXPORT_SYMBOL(drm_dsc_pps_infoframe_pack);
+EXPORT_SYMBOL(drm_dsc_pps_payload_pack);
+
+/**
+ * drm_dsc_compute_rc_parameters() - Write rate control
+ * parameters to the dsc configuration defined in
+ * &struct drm_dsc_config in accordance with the DSC 1.2
+ * specification. Some configuration fields must be present
+ * beforehand.
+ *
+ * @vdsc_cfg:
+ * DSC Configuration data partially filled by driver
+ */
+int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
+{
+ unsigned long groups_per_line = 0;
+ unsigned long groups_total = 0;
+ unsigned long num_extra_mux_bits = 0;
+ unsigned long slice_bits = 0;
+ unsigned long hrd_delay = 0;
+ unsigned long final_scale = 0;
+ unsigned long rbs_min = 0;
+
+ if (vdsc_cfg->native_420 || vdsc_cfg->native_422) {
+ /* Number of groups used to code each line of a slice */
+ groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width / 2,
+ DSC_RC_PIXELS_PER_GROUP);
+
+ /* chunksize in Bytes */
+ vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width / 2 *
+ vdsc_cfg->bits_per_pixel,
+ (8 * 16));
+ } else {
+ /* Number of groups used to code each line of a slice */
+ groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width,
+ DSC_RC_PIXELS_PER_GROUP);
+
+ /* chunksize in Bytes */
+ vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width *
+ vdsc_cfg->bits_per_pixel,
+ (8 * 16));
+ }
+
+ if (vdsc_cfg->convert_rgb)
+ num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size +
+ (4 * vdsc_cfg->bits_per_component + 4)
+ - 2);
+ else if (vdsc_cfg->native_422)
+ num_extra_mux_bits = 4 * vdsc_cfg->mux_word_size +
+ (4 * vdsc_cfg->bits_per_component + 4) +
+ 3 * (4 * vdsc_cfg->bits_per_component) - 2;
+ else
+ num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size +
+ (4 * vdsc_cfg->bits_per_component + 4) +
+ 2 * (4 * vdsc_cfg->bits_per_component) - 2;
+ /* Number of bits in one Slice */
+ slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height;
+
+ while ((num_extra_mux_bits > 0) &&
+ ((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size))
+ num_extra_mux_bits--;
+
+ if (groups_per_line < vdsc_cfg->initial_scale_value - 8)
+ vdsc_cfg->initial_scale_value = groups_per_line + 8;
+
+ /* scale_decrement_interval calculation according to DSC spec 1.11 */
+ if (vdsc_cfg->initial_scale_value > 8)
+ vdsc_cfg->scale_decrement_interval = groups_per_line /
+ (vdsc_cfg->initial_scale_value - 8);
+ else
+ vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX;
+
+ vdsc_cfg->final_offset = vdsc_cfg->rc_model_size -
+ (vdsc_cfg->initial_xmit_delay *
+ vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits;
+
+ if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) {
+ DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n");
+ return -ERANGE;
+ }
+
+ final_scale = (vdsc_cfg->rc_model_size * 8) /
+ (vdsc_cfg->rc_model_size - vdsc_cfg->final_offset);
+ if (vdsc_cfg->slice_height > 1)
+ /*
+ * NflBpgOffset is 16 bit value with 11 fractional bits
+ * hence we multiply by 2^11 for preserving the
+ * fractional part
+ */
+ vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11),
+ (vdsc_cfg->slice_height - 1));
+ else
+ vdsc_cfg->nfl_bpg_offset = 0;
+
+ /* 2^16 - 1 */
+ if (vdsc_cfg->nfl_bpg_offset > 65535) {
+ DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n");
+ return -ERANGE;
+ }
+
+ /* Number of groups used to code the entire slice */
+ groups_total = groups_per_line * vdsc_cfg->slice_height;
+
+ /* slice_bpg_offset is 16 bit value with 11 fractional bits */
+ vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size -
+ vdsc_cfg->initial_offset +
+ num_extra_mux_bits) << 11),
+ groups_total);
+
+ if (final_scale > 9) {
+ /*
+ * ScaleIncrementInterval =
+ * finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125))
+ * as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value,
+ * we need divide by 2^11 from pstDscCfg values
+ */
+ vdsc_cfg->scale_increment_interval =
+ (vdsc_cfg->final_offset * (1 << 11)) /
+ ((vdsc_cfg->nfl_bpg_offset +
+ vdsc_cfg->slice_bpg_offset) *
+ (final_scale - 9));
+ } else {
+ /*
+ * If finalScaleValue is less than or equal to 9, a value of 0 should
+ * be used to disable the scale increment at the end of the slice
+ */
+ vdsc_cfg->scale_increment_interval = 0;
+ }
+
+ if (vdsc_cfg->scale_increment_interval > 65535) {
+ DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n");
+ return -ERANGE;
+ }
+
+ /*
+ * DSC spec mentions that bits_per_pixel specifies the target
+ * bits/pixel (bpp) rate that is used by the encoder,
+ * in steps of 1/16 of a bit per pixel
+ */
+ rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset +
+ DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay *
+ vdsc_cfg->bits_per_pixel, 16) +
+ groups_per_line * vdsc_cfg->first_line_bpg_offset;
+
+ hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel);
+ vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16;
+ vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dsc_compute_rc_parameters);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 990b1909f9d7..649cfd8b4200 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -68,8 +68,6 @@
* maximum size and use that.
*/
#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4)
-/* Monitor forgot to set the first detailed is preferred bit. */
-#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
/* use +hsync +vsync for detailed mode */
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
/* Force reduced-blanking timings for detailed modes */
@@ -107,8 +105,6 @@ static const struct edid_quirk {
{ "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
/* Acer F51 */
{ "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
- /* Unknown Acer */
- { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
{ "AEO", 0, EDID_QUIRK_FORCE_6BPC },
@@ -145,12 +141,6 @@ static const struct edid_quirk {
{ "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
{ "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
- /* Philips 107p5 CRT */
- { "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
-
- /* Proview AY765C */
- { "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
-
/* Samsung SyncMaster 205BW. Note: irony */
{ "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
/* Samsung SyncMaster 22[5-6]BW */
@@ -172,6 +162,25 @@ static const struct edid_quirk {
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
{ "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
+ /* Valve Index Headset */
+ { "VLV", 0x91a8, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b0, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b1, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b2, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b3, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b4, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b5, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b6, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b7, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b8, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b9, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91ba, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91bb, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91bc, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91bd, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91be, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91bf, EDID_QUIRK_NON_DESKTOP },
+
/* HTC Vive and Vive Pro VR Headsets */
{ "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
{ "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
@@ -193,6 +202,12 @@ static const struct edid_quirk {
/* Sony PlayStation VR Headset */
{ "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP },
+
+ /* Sensics VR Headsets */
+ { "SEN", 0x1019, EDID_QUIRK_NON_DESKTOP },
+
+ /* OSVR HDK and HDK2 VR Headsets */
+ { "SVR", 0x1019, EDID_QUIRK_NON_DESKTOP },
};
/*
@@ -4924,6 +4939,76 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
+/* HDMI Colorspace Spec Definitions */
+#define FULL_COLORIMETRY_MASK 0x1FF
+#define NORMAL_COLORIMETRY_MASK 0x3
+#define EXTENDED_COLORIMETRY_MASK 0x7
+#define EXTENDED_ACE_COLORIMETRY_MASK 0xF
+
+#define C(x) ((x) << 0)
+#define EC(x) ((x) << 2)
+#define ACE(x) ((x) << 5)
+
+#define HDMI_COLORIMETRY_NO_DATA 0x0
+#define HDMI_COLORIMETRY_SMPTE_170M_YCC (C(1) | EC(0) | ACE(0))
+#define HDMI_COLORIMETRY_BT709_YCC (C(2) | EC(0) | ACE(0))
+#define HDMI_COLORIMETRY_XVYCC_601 (C(3) | EC(0) | ACE(0))
+#define HDMI_COLORIMETRY_XVYCC_709 (C(3) | EC(1) | ACE(0))
+#define HDMI_COLORIMETRY_SYCC_601 (C(3) | EC(2) | ACE(0))
+#define HDMI_COLORIMETRY_OPYCC_601 (C(3) | EC(3) | ACE(0))
+#define HDMI_COLORIMETRY_OPRGB (C(3) | EC(4) | ACE(0))
+#define HDMI_COLORIMETRY_BT2020_CYCC (C(3) | EC(5) | ACE(0))
+#define HDMI_COLORIMETRY_BT2020_RGB (C(3) | EC(6) | ACE(0))
+#define HDMI_COLORIMETRY_BT2020_YCC (C(3) | EC(6) | ACE(0))
+#define HDMI_COLORIMETRY_DCI_P3_RGB_D65 (C(3) | EC(7) | ACE(0))
+#define HDMI_COLORIMETRY_DCI_P3_RGB_THEATER (C(3) | EC(7) | ACE(1))
+
+static const u32 hdmi_colorimetry_val[] = {
+ [DRM_MODE_COLORIMETRY_NO_DATA] = HDMI_COLORIMETRY_NO_DATA,
+ [DRM_MODE_COLORIMETRY_SMPTE_170M_YCC] = HDMI_COLORIMETRY_SMPTE_170M_YCC,
+ [DRM_MODE_COLORIMETRY_BT709_YCC] = HDMI_COLORIMETRY_BT709_YCC,
+ [DRM_MODE_COLORIMETRY_XVYCC_601] = HDMI_COLORIMETRY_XVYCC_601,
+ [DRM_MODE_COLORIMETRY_XVYCC_709] = HDMI_COLORIMETRY_XVYCC_709,
+ [DRM_MODE_COLORIMETRY_SYCC_601] = HDMI_COLORIMETRY_SYCC_601,
+ [DRM_MODE_COLORIMETRY_OPYCC_601] = HDMI_COLORIMETRY_OPYCC_601,
+ [DRM_MODE_COLORIMETRY_OPRGB] = HDMI_COLORIMETRY_OPRGB,
+ [DRM_MODE_COLORIMETRY_BT2020_CYCC] = HDMI_COLORIMETRY_BT2020_CYCC,
+ [DRM_MODE_COLORIMETRY_BT2020_RGB] = HDMI_COLORIMETRY_BT2020_RGB,
+ [DRM_MODE_COLORIMETRY_BT2020_YCC] = HDMI_COLORIMETRY_BT2020_YCC,
+};
+
+#undef C
+#undef EC
+#undef ACE
+
+/**
+ * drm_hdmi_avi_infoframe_colorspace() - fill the HDMI AVI infoframe
+ * colorspace information
+ * @frame: HDMI AVI infoframe
+ * @conn_state: connector state
+ */
+void
+drm_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state)
+{
+ u32 colorimetry_val;
+ u32 colorimetry_index = conn_state->colorspace & FULL_COLORIMETRY_MASK;
+
+ if (colorimetry_index >= ARRAY_SIZE(hdmi_colorimetry_val))
+ colorimetry_val = HDMI_COLORIMETRY_NO_DATA;
+ else
+ colorimetry_val = hdmi_colorimetry_val[colorimetry_index];
+
+ frame->colorimetry = colorimetry_val & NORMAL_COLORIMETRY_MASK;
+ /*
+ * ToDo: Extend it for ACE formats as well. Modify the infoframe
+ * structure and extend it in drivers/video/hdmi
+ */
+ frame->extended_colorimetry = (colorimetry_val >> 2) &
+ EXTENDED_COLORIMETRY_MASK;
+}
+EXPORT_SYMBOL(drm_hdmi_avi_infoframe_colorspace);
+
/**
* drm_hdmi_avi_infoframe_quant_range() - fill the HDMI AVI infoframe
* quantization range information
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index af2ab640cadb..498f95c3e81d 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -639,20 +639,19 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
static void dpms_legacy(struct drm_fb_helper *fb_helper, int dpms_mode)
{
struct drm_device *dev = fb_helper->dev;
- struct drm_crtc *crtc;
struct drm_connector *connector;
+ struct drm_mode_set *modeset;
int i, j;
drm_modeset_lock_all(dev);
for (i = 0; i < fb_helper->crtc_count; i++) {
- crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ modeset = &fb_helper->crtc_info[i].mode_set;
- if (!crtc->enabled)
+ if (!modeset->crtc->enabled)
continue;
- /* Walk the connectors & encoders on this fb turning them on/off */
- drm_fb_helper_for_each_connector(fb_helper, j) {
- connector = fb_helper->connector_info[j]->connector;
+ for (j = 0; j < modeset->num_connectors; j++) {
+ connector = modeset->connectors[j];
connector->funcs->dpms(connector, dpms_mode);
drm_object_property_set_value(&connector->base,
dev->mode_config.dpms_property, dpms_mode);
@@ -934,6 +933,7 @@ struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
}
fb_helper->fbdev = info;
+ info->skip_vt_switch = true;
return info;
@@ -1873,7 +1873,6 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
int crtc_count = 0;
int i;
struct drm_fb_helper_surface_size sizes;
- int gamma_size = 0;
int best_depth = 0;
memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
@@ -1889,7 +1888,6 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (preferred_bpp != sizes.surface_bpp)
sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
- /* first up get a count of crtcs now in use and new min/maxes width/heights */
drm_fb_helper_for_each_connector(fb_helper, i) {
struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
struct drm_cmdline_mode *cmdline_mode;
@@ -1969,6 +1967,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
sizes.surface_depth = best_depth;
}
+ /* first up get a count of crtcs now in use and new min/maxes width/heights */
crtc_count = 0;
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_display_mode *desired_mode;
@@ -1991,9 +1990,6 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
x = fb_helper->crtc_info[i].x;
y = fb_helper->crtc_info[i].y;
- if (gamma_size == 0)
- gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
-
sizes.surface_width = max_t(u32, desired_mode->hdisplay + x, sizes.surface_width);
sizes.surface_height = max_t(u32, desired_mode->vdisplay + y, sizes.surface_height);
@@ -2036,21 +2032,8 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
return 0;
}
-/**
- * drm_fb_helper_fill_fix - initializes fixed fbdev information
- * @info: fbdev registered by the helper
- * @pitch: desired pitch
- * @depth: desired depth
- *
- * Helper to fill in the fixed fbdev information useful for a non-accelerated
- * fbdev emulations. Drivers which support acceleration methods which impose
- * additional constraints need to set up their own limits.
- *
- * Drivers should call this (or their equivalent setup code) from their
- * &drm_fb_helper_funcs.fb_probe callback.
- */
-void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
- uint32_t depth)
+static void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+ uint32_t depth)
{
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
@@ -2065,24 +2048,10 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
info->fix.line_length = pitch;
}
-EXPORT_SYMBOL(drm_fb_helper_fill_fix);
-/**
- * drm_fb_helper_fill_var - initalizes variable fbdev information
- * @info: fbdev instance to set up
- * @fb_helper: fb helper instance to use as template
- * @fb_width: desired fb width
- * @fb_height: desired fb height
- *
- * Sets up the variable fbdev metainformation from the given fb helper instance
- * and the drm framebuffer allocated in &drm_fb_helper.fb.
- *
- * Drivers should call this (or their equivalent setup code) from their
- * &drm_fb_helper_funcs.fb_probe callback after having allocated the fbdev
- * backing storage framebuffer.
- */
-void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
- uint32_t fb_width, uint32_t fb_height)
+static void drm_fb_helper_fill_var(struct fb_info *info,
+ struct drm_fb_helper *fb_helper,
+ uint32_t fb_width, uint32_t fb_height)
{
struct drm_framebuffer *fb = fb_helper->fb;
@@ -2102,7 +2071,36 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
info->var.xres = fb_width;
info->var.yres = fb_height;
}
-EXPORT_SYMBOL(drm_fb_helper_fill_var);
+
+/**
+ * drm_fb_helper_fill_info - initializes fbdev information
+ * @info: fbdev instance to set up
+ * @fb_helper: fb helper instance to use as template
+ * @sizes: describes fbdev size and scanout surface size
+ *
+ * Sets up the variable and fixed fbdev metainformation from the given fb helper
+ * instance and the drm framebuffer allocated in &drm_fb_helper.fb.
+ *
+ * Drivers should call this (or their equivalent setup code) from their
+ * &drm_fb_helper_funcs.fb_probe callback after having allocated the fbdev
+ * backing storage framebuffer.
+ */
+void drm_fb_helper_fill_info(struct fb_info *info,
+ struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_framebuffer *fb = fb_helper->fb;
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
+ drm_fb_helper_fill_var(info, fb_helper,
+ sizes->fb_width, sizes->fb_height);
+
+ info->par = fb_helper;
+ snprintf(info->fix.id, sizeof(info->fix.id), "%sdrmfb",
+ fb_helper->dev->driver->name);
+
+}
+EXPORT_SYMBOL(drm_fb_helper_fill_info);
static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
uint32_t maxX,
@@ -2561,6 +2559,195 @@ static void drm_setup_crtc_rotation(struct drm_fb_helper *fb_helper,
fb_helper->sw_rotations |= DRM_MODE_ROTATE_0;
}
+static struct drm_fb_helper_crtc *
+drm_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
+{
+ int i;
+
+ for (i = 0; i < fb_helper->crtc_count; i++)
+ if (fb_helper->crtc_info[i].mode_set.crtc == crtc)
+ return &fb_helper->crtc_info[i];
+
+ return NULL;
+}
+
+/* Try to read the BIOS display configuration and use it for the initial config */
+static bool drm_fb_helper_firmware_config(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_crtc **crtcs,
+ struct drm_display_mode **modes,
+ struct drm_fb_offset *offsets,
+ bool *enabled, int width, int height)
+{
+ struct drm_device *dev = fb_helper->dev;
+ unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
+ unsigned long conn_configured, conn_seq, mask;
+ int i, j;
+ bool *save_enabled;
+ bool fallback = true, ret = true;
+ int num_connectors_enabled = 0;
+ int num_connectors_detected = 0;
+ struct drm_modeset_acquire_ctx ctx;
+
+ if (!drm_drv_uses_atomic_modeset(dev))
+ return false;
+
+ save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
+ if (!save_enabled)
+ return false;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ while (drm_modeset_lock_all_ctx(dev, &ctx) != 0)
+ drm_modeset_backoff(&ctx);
+
+ memcpy(save_enabled, enabled, count);
+ mask = GENMASK(count - 1, 0);
+ conn_configured = 0;
+retry:
+ conn_seq = conn_configured;
+ for (i = 0; i < count; i++) {
+ struct drm_fb_helper_connector *fb_conn;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_fb_helper_crtc *new_crtc;
+
+ fb_conn = fb_helper->connector_info[i];
+ connector = fb_conn->connector;
+
+ if (conn_configured & BIT(i))
+ continue;
+
+ if (conn_seq == 0 && !connector->has_tile)
+ continue;
+
+ if (connector->status == connector_status_connected)
+ num_connectors_detected++;
+
+ if (!enabled[i]) {
+ DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
+ connector->name);
+ conn_configured |= BIT(i);
+ continue;
+ }
+
+ if (connector->force == DRM_FORCE_OFF) {
+ DRM_DEBUG_KMS("connector %s is disabled by user, skipping\n",
+ connector->name);
+ enabled[i] = false;
+ continue;
+ }
+
+ encoder = connector->state->best_encoder;
+ if (!encoder || WARN_ON(!connector->state->crtc)) {
+ if (connector->force > DRM_FORCE_OFF)
+ goto bail;
+
+ DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
+ connector->name);
+ enabled[i] = false;
+ conn_configured |= BIT(i);
+ continue;
+ }
+
+ num_connectors_enabled++;
+
+ new_crtc = drm_fb_helper_crtc(fb_helper, connector->state->crtc);
+
+ /*
+ * Make sure we're not trying to drive multiple connectors
+ * with a single CRTC, since our cloning support may not
+ * match the BIOS.
+ */
+ for (j = 0; j < count; j++) {
+ if (crtcs[j] == new_crtc) {
+ DRM_DEBUG_KMS("fallback: cloned configuration\n");
+ goto bail;
+ }
+ }
+
+ DRM_DEBUG_KMS("looking for cmdline mode on connector %s\n",
+ connector->name);
+
+ /* go for command line mode first */
+ modes[i] = drm_pick_cmdline_mode(fb_conn);
+
+ /* try for preferred next */
+ if (!modes[i]) {
+ DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n",
+ connector->name, connector->has_tile);
+ modes[i] = drm_has_preferred_mode(fb_conn, width,
+ height);
+ }
+
+ /* No preferred mode marked by the EDID? Are there any modes? */
+ if (!modes[i] && !list_empty(&connector->modes)) {
+ DRM_DEBUG_KMS("using first mode listed on connector %s\n",
+ connector->name);
+ modes[i] = list_first_entry(&connector->modes,
+ struct drm_display_mode,
+ head);
+ }
+
+ /* last resort: use current mode */
+ if (!modes[i]) {
+ /*
+ * IMPORTANT: We want to use the adjusted mode (i.e.
+ * after the panel fitter upscaling) as the initial
+ * config, not the input mode, which is what crtc->mode
+ * usually contains. But since our current
+ * code puts a mode derived from the post-pfit timings
+ * into crtc->mode this works out correctly.
+ *
+ * This is crtc->mode and not crtc->state->mode for the
+ * fastboot check to work correctly.
+ */
+ DRM_DEBUG_KMS("looking for current mode on connector %s\n",
+ connector->name);
+ modes[i] = &connector->state->crtc->mode;
+ }
+ crtcs[i] = new_crtc;
+
+ DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
+ connector->name,
+ connector->state->crtc->base.id,
+ connector->state->crtc->name,
+ modes[i]->hdisplay, modes[i]->vdisplay,
+ modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" : "");
+
+ fallback = false;
+ conn_configured |= BIT(i);
+ }
+
+ if ((conn_configured & mask) != mask && conn_configured != conn_seq)
+ goto retry;
+
+ /*
+ * If the BIOS didn't enable everything it could, fall back to have the
+ * same user experiencing of lighting up as much as possible like the
+ * fbdev helper library.
+ */
+ if (num_connectors_enabled != num_connectors_detected &&
+ num_connectors_enabled < dev->mode_config.num_crtc) {
+ DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
+ DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
+ num_connectors_detected);
+ fallback = true;
+ }
+
+ if (fallback) {
+bail:
+ DRM_DEBUG_KMS("Not using firmware configuration\n");
+ memcpy(enabled, save_enabled, count);
+ ret = false;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ kfree(save_enabled);
+ return ret;
+}
+
static void drm_setup_crtcs(struct drm_fb_helper *fb_helper,
u32 width, u32 height)
{
@@ -2593,10 +2780,8 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper,
DRM_DEBUG_KMS("No connectors reported connected with modes\n");
drm_enable_connectors(fb_helper, enabled);
- if (!(fb_helper->funcs->initial_config &&
- fb_helper->funcs->initial_config(fb_helper, crtcs, modes,
- offsets,
- enabled, width, height))) {
+ if (!drm_fb_helper_firmware_config(fb_helper, crtcs, modes, offsets,
+ enabled, width, height)) {
memset(modes, 0, fb_helper->connector_count*sizeof(modes[0]));
memset(crtcs, 0, fb_helper->connector_count*sizeof(crtcs[0]));
memset(offsets, 0, fb_helper->connector_count*sizeof(offsets[0]));
@@ -2780,9 +2965,8 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper,
*
* This function will call down into the &drm_fb_helper_funcs.fb_probe callback
* to let the driver allocate and initialize the fbdev info structure and the
- * drm framebuffer used to back the fbdev. drm_fb_helper_fill_var() and
- * drm_fb_helper_fill_fix() are provided as helpers to setup simple default
- * values for the fbdev info structure.
+ * drm framebuffer used to back the fbdev. drm_fb_helper_fill_info() is provided
+ * as a helper to setup simple default values for the fbdev info structure.
*
* HANG DEBUGGING:
*
@@ -3024,7 +3208,8 @@ static int drm_fbdev_fb_open(struct fb_info *info, int user)
{
struct drm_fb_helper *fb_helper = info->par;
- if (!try_module_get(fb_helper->dev->driver->fops->owner))
+ /* No need to take a ref for fbcon because it unbinds on unregister */
+ if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
return -ENODEV;
return 0;
@@ -3034,7 +3219,8 @@ static int drm_fbdev_fb_release(struct fb_info *info, int user)
{
struct drm_fb_helper *fb_helper = info->par;
- module_put(fb_helper->dev->driver->fops->owner);
+ if (user)
+ module_put(fb_helper->dev->driver->fops->owner);
return 0;
}
@@ -3149,7 +3335,6 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
if (IS_ERR(fbi))
return PTR_ERR(fbi);
- fbi->par = fb_helper;
fbi->fbops = &drm_fbdev_fb_ops;
fbi->screen_size = fb->height * fb->pitches[0];
fbi->fix.smem_len = fbi->screen_size;
@@ -3160,10 +3345,7 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
fbi->fix.smem_start =
page_to_phys(virt_to_page(fbi->screen_buffer));
#endif
- strcpy(fbi->fix.id, "DRM emulated");
-
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(fbi, fb_helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(fbi, fb_helper, sizes);
if (fb->funcs->dirty) {
struct fb_ops *fbops;
@@ -3317,8 +3499,6 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
return ret;
}
- drm_client_add(&fb_helper->client);
-
if (!preferred_bpp)
preferred_bpp = dev->mode_config.preferred_depth;
if (!preferred_bpp)
@@ -3329,6 +3509,8 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
if (ret)
DRM_DEV_DEBUG(dev->dev, "client hotplug ret=%d\n", ret);
+ drm_client_register(&fb_helper->client);
+
return 0;
}
EXPORT_SYMBOL(drm_fbdev_generic_setup);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 7caa3c7ed978..233f114d2186 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -128,7 +128,6 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
/* for compatibility root is always authenticated */
file->authenticated = capable(CAP_SYS_ADMIN);
- file->lock_count = 0;
INIT_LIST_HEAD(&file->lhead);
INIT_LIST_HEAD(&file->fbs);
@@ -425,30 +424,6 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
return 0;
}
-static void drm_legacy_dev_reinit(struct drm_device *dev)
-{
- if (dev->irq_enabled)
- drm_irq_uninstall(dev);
-
- mutex_lock(&dev->struct_mutex);
-
- drm_legacy_agp_clear(dev);
-
- drm_legacy_sg_cleanup(dev);
- drm_legacy_vma_flush(dev);
- drm_legacy_dma_takedown(dev);
-
- mutex_unlock(&dev->struct_mutex);
-
- dev->sigdata.lock = NULL;
-
- dev->context_flag = 0;
- dev->last_context = 0;
- dev->if_version = 0;
-
- DRM_DEBUG("lastclose completed\n");
-}
-
void drm_lastclose(struct drm_device * dev)
{
DRM_DEBUG("\n");
@@ -577,6 +552,7 @@ put_back_event:
file_priv->event_space -= length;
list_add(&e->link, &file_priv->event_list);
spin_unlock_irq(&dev->event_lock);
+ wake_up_interruptible(&file_priv->event_wait);
break;
}
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
new file mode 100644
index 000000000000..a18da35145b7
--- /dev/null
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -0,0 +1,324 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2016 Noralf Trønnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include <drm/drm_format_helper.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_rect.h>
+
+static unsigned int clip_offset(struct drm_rect *clip,
+ unsigned int pitch, unsigned int cpp)
+{
+ return clip->y1 * pitch + clip->x1 * cpp;
+}
+
+/**
+ * drm_fb_memcpy - Copy clip buffer
+ * @dst: Destination buffer
+ * @vaddr: Source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * This function does not apply clipping on dst, i.e. the destination
+ * is a small buffer containing the clip rect only.
+ */
+void drm_fb_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
+ struct drm_rect *clip)
+{
+ unsigned int cpp = drm_format_plane_cpp(fb->format->format, 0);
+ size_t len = (clip->x2 - clip->x1) * cpp;
+ unsigned int y, lines = clip->y2 - clip->y1;
+
+ vaddr += clip_offset(clip, fb->pitches[0], cpp);
+ for (y = 0; y < lines; y++) {
+ memcpy(dst, vaddr, len);
+ vaddr += fb->pitches[0];
+ dst += len;
+ }
+}
+EXPORT_SYMBOL(drm_fb_memcpy);
+
+/**
+ * drm_fb_memcpy_dstclip - Copy clip buffer
+ * @dst: Destination buffer (iomem)
+ * @vaddr: Source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * This function applies clipping on dst, i.e. the destination is a
+ * full (iomem) framebuffer but only the clip rect content is copied over.
+ */
+void drm_fb_memcpy_dstclip(void __iomem *dst, void *vaddr,
+ struct drm_framebuffer *fb,
+ struct drm_rect *clip)
+{
+ unsigned int cpp = drm_format_plane_cpp(fb->format->format, 0);
+ unsigned int offset = clip_offset(clip, fb->pitches[0], cpp);
+ size_t len = (clip->x2 - clip->x1) * cpp;
+ unsigned int y, lines = clip->y2 - clip->y1;
+
+ vaddr += offset;
+ dst += offset;
+ for (y = 0; y < lines; y++) {
+ memcpy_toio(dst, vaddr, len);
+ vaddr += fb->pitches[0];
+ dst += fb->pitches[0];
+ }
+}
+EXPORT_SYMBOL(drm_fb_memcpy_dstclip);
+
+/**
+ * drm_fb_swab16 - Swap bytes into clip buffer
+ * @dst: RGB565 destination buffer
+ * @vaddr: RGB565 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ */
+void drm_fb_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb,
+ struct drm_rect *clip)
+{
+ size_t len = (clip->x2 - clip->x1) * sizeof(u16);
+ unsigned int x, y;
+ u16 *src, *buf;
+
+ /*
+ * The cma memory is write-combined so reads are uncached.
+ * Speed up by fetching one line at a time.
+ */
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ src = vaddr + (y * fb->pitches[0]);
+ src += clip->x1;
+ memcpy(buf, src, len);
+ src = buf;
+ for (x = clip->x1; x < clip->x2; x++)
+ *dst++ = swab16(*src++);
+ }
+
+ kfree(buf);
+}
+EXPORT_SYMBOL(drm_fb_swab16);
+
+static void drm_fb_xrgb8888_to_rgb565_line(u16 *dbuf, u32 *sbuf,
+ unsigned int pixels,
+ bool swab)
+{
+ unsigned int x;
+ u16 val16;
+
+ for (x = 0; x < pixels; x++) {
+ val16 = ((sbuf[x] & 0x00F80000) >> 8) |
+ ((sbuf[x] & 0x0000FC00) >> 5) |
+ ((sbuf[x] & 0x000000F8) >> 3);
+ if (swab)
+ dbuf[x] = swab16(val16);
+ else
+ dbuf[x] = val16;
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_rgb565 - Convert XRGB8888 to RGB565 clip buffer
+ * @dst: RGB565 destination buffer
+ * @vaddr: XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @swab: Swap bytes
+ *
+ * Drivers can use this function for RGB565 devices that don't natively
+ * support XRGB8888.
+ *
+ * This function does not apply clipping on dst, i.e. the destination
+ * is a small buffer containing the clip rect only.
+ */
+void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr,
+ struct drm_framebuffer *fb,
+ struct drm_rect *clip, bool swab)
+{
+ size_t linepixels = clip->x2 - clip->x1;
+ size_t src_len = linepixels * sizeof(u32);
+ size_t dst_len = linepixels * sizeof(u16);
+ unsigned y, lines = clip->y2 - clip->y1;
+ void *sbuf;
+
+ /*
+ * The cma memory is write-combined so reads are uncached.
+ * Speed up by fetching one line at a time.
+ */
+ sbuf = kmalloc(src_len, GFP_KERNEL);
+ if (!sbuf)
+ return;
+
+ vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32));
+ for (y = 0; y < lines; y++) {
+ memcpy(sbuf, vaddr, src_len);
+ drm_fb_xrgb8888_to_rgb565_line(dst, sbuf, linepixels, swab);
+ vaddr += fb->pitches[0];
+ dst += dst_len;
+ }
+
+ kfree(sbuf);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
+
+/**
+ * drm_fb_xrgb8888_to_rgb565_dstclip - Convert XRGB8888 to RGB565 clip buffer
+ * @dst: RGB565 destination buffer (iomem)
+ * @dst_pitch: destination buffer pitch
+ * @vaddr: XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @swab: Swap bytes
+ *
+ * Drivers can use this function for RGB565 devices that don't natively
+ * support XRGB8888.
+ *
+ * This function applies clipping on dst, i.e. the destination is a
+ * full (iomem) framebuffer but only the clip rect content is copied over.
+ */
+void drm_fb_xrgb8888_to_rgb565_dstclip(void __iomem *dst, unsigned int dst_pitch,
+ void *vaddr, struct drm_framebuffer *fb,
+ struct drm_rect *clip, bool swab)
+{
+ size_t linepixels = clip->x2 - clip->x1;
+ size_t dst_len = linepixels * sizeof(u16);
+ unsigned y, lines = clip->y2 - clip->y1;
+ void *dbuf;
+
+ dbuf = kmalloc(dst_len, GFP_KERNEL);
+ if (!dbuf)
+ return;
+
+ vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32));
+ dst += clip_offset(clip, dst_pitch, sizeof(u16));
+ for (y = 0; y < lines; y++) {
+ drm_fb_xrgb8888_to_rgb565_line(dbuf, vaddr, linepixels, swab);
+ memcpy_toio(dst, dbuf, dst_len);
+ vaddr += fb->pitches[0];
+ dst += dst_len;
+ }
+
+ kfree(dbuf);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565_dstclip);
+
+static void drm_fb_xrgb8888_to_rgb888_line(u8 *dbuf, u32 *sbuf,
+ unsigned int pixels)
+{
+ unsigned int x;
+
+ for (x = 0; x < pixels; x++) {
+ *dbuf++ = (sbuf[x] & 0x000000FF) >> 0;
+ *dbuf++ = (sbuf[x] & 0x0000FF00) >> 8;
+ *dbuf++ = (sbuf[x] & 0x00FF0000) >> 16;
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_rgb888_dstclip - Convert XRGB8888 to RGB888 clip buffer
+ * @dst: RGB565 destination buffer (iomem)
+ * @dst_pitch: destination buffer pitch
+ * @vaddr: XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * Drivers can use this function for RGB888 devices that don't natively
+ * support XRGB8888.
+ *
+ * This function applies clipping on dst, i.e. the destination is a
+ * full (iomem) framebuffer but only the clip rect content is copied over.
+ */
+void drm_fb_xrgb8888_to_rgb888_dstclip(void __iomem *dst, unsigned int dst_pitch,
+ void *vaddr, struct drm_framebuffer *fb,
+ struct drm_rect *clip)
+{
+ size_t linepixels = clip->x2 - clip->x1;
+ size_t dst_len = linepixels * 3;
+ unsigned y, lines = clip->y2 - clip->y1;
+ void *dbuf;
+
+ dbuf = kmalloc(dst_len, GFP_KERNEL);
+ if (!dbuf)
+ return;
+
+ vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32));
+ dst += clip_offset(clip, dst_pitch, sizeof(u16));
+ for (y = 0; y < lines; y++) {
+ drm_fb_xrgb8888_to_rgb888_line(dbuf, vaddr, linepixels);
+ memcpy_toio(dst, dbuf, dst_len);
+ vaddr += fb->pitches[0];
+ dst += dst_len;
+ }
+
+ kfree(dbuf);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888_dstclip);
+
+/**
+ * drm_fb_xrgb8888_to_gray8 - Convert XRGB8888 to grayscale
+ * @dst: 8-bit grayscale destination buffer
+ * @vaddr: XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * Drm doesn't have native monochrome or grayscale support.
+ * Such drivers can announce the commonly supported XR24 format to userspace
+ * and use this function to convert to the native format.
+ *
+ * Monochrome drivers will use the most significant bit,
+ * where 1 means foreground color and 0 background color.
+ *
+ * ITU BT.601 is used for the RGB -> luma (brightness) conversion.
+ */
+void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
+ struct drm_rect *clip)
+{
+ unsigned int len = (clip->x2 - clip->x1) * sizeof(u32);
+ unsigned int x, y;
+ void *buf;
+ u32 *src;
+
+ if (WARN_ON(fb->format->format != DRM_FORMAT_XRGB8888))
+ return;
+ /*
+ * The cma memory is write-combined so reads are uncached.
+ * Speed up by fetching one line at a time.
+ */
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ src = vaddr + (y * fb->pitches[0]);
+ src += clip->x1;
+ memcpy(buf, src, len);
+ src = buf;
+ for (x = clip->x1; x < clip->x2; x++) {
+ u8 r = (*src & 0x00ff0000) >> 16;
+ u8 g = (*src & 0x0000ff00) >> 8;
+ u8 b = *src & 0x000000ff;
+
+ /* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
+ *dst++ = (3 * r + 6 * g + b) / 10;
+ src++;
+ }
+ }
+
+ kfree(buf);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8);
+
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index ba7e19d4336c..6ea55fb4526d 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -198,6 +198,10 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGBA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_XRGB16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XBGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ARGB16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGB888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGR888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_XRGB8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
@@ -225,7 +229,17 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_XYUV8888, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_VUY888, .depth = 0, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
+ { .format = DRM_FORMAT_Y210, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_Y212, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_Y216, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_Y410, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
+ { .format = DRM_FORMAT_Y412, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
+ { .format = DRM_FORMAT_Y416, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
+ { .format = DRM_FORMAT_XVYU2101010, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_XVYU12_16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_XVYU16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_Y0L0, .depth = 0, .num_planes = 1,
.char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
.hsub = 2, .vsub = 2, .has_alpha = true, .is_yuv = true },
@@ -247,6 +261,19 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_P016, .depth = 0, .num_planes = 2,
.char_per_block = { 2, 4, 0 }, .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true},
+ { .format = DRM_FORMAT_P210, .depth = 0,
+ .num_planes = 2, .char_per_block = { 2, 4, 0 },
+ .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 }, .hsub = 2,
+ .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_VUY101010, .depth = 0,
+ .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 1, .vsub = 1,
+ .is_yuv = true },
+ { .format = DRM_FORMAT_YUV420_8BIT, .depth = 0,
+ .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 2, .vsub = 2,
+ .is_yuv = true },
+ { .format = DRM_FORMAT_YUV420_10BIT, .depth = 0,
+ .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 2, .vsub = 2,
+ .is_yuv = true },
};
unsigned int i;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index d0b9f6a9953f..50de138c89e0 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -72,23 +72,6 @@
* up at a later date, and as our interface with shmfs for memory allocation.
*/
-/*
- * We make up offsets for buffer objects so we can recognize them at
- * mmap time.
- */
-
-/* pgoff in mmap is an unsigned long, so we need to make sure that
- * the faked up offset will fit
- */
-
-#if BITS_PER_LONG == 64
-#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
-#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
-#else
-#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
-#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
-#endif
-
/**
* drm_gem_init - Initialize the GEM device fields
* @dev: drm_devic structure to initialize
@@ -171,6 +154,10 @@ void drm_gem_private_object_init(struct drm_device *dev,
kref_init(&obj->refcount);
obj->handle_count = 0;
obj->size = size;
+ reservation_object_init(&obj->_resv);
+ if (!obj->resv)
+ obj->resv = &obj->_resv;
+
drm_vma_node_reset(&obj->vma_node);
}
EXPORT_SYMBOL(drm_gem_private_object_init);
@@ -659,6 +646,85 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
}
EXPORT_SYMBOL(drm_gem_put_pages);
+static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
+ struct drm_gem_object **objs)
+{
+ int i, ret = 0;
+ struct drm_gem_object *obj;
+
+ spin_lock(&filp->table_lock);
+
+ for (i = 0; i < count; i++) {
+ /* Check if we currently have a reference on the object */
+ obj = idr_find(&filp->object_idr, handle[i]);
+ if (!obj) {
+ ret = -ENOENT;
+ break;
+ }
+ drm_gem_object_get(obj);
+ objs[i] = obj;
+ }
+ spin_unlock(&filp->table_lock);
+
+ return ret;
+}
+
+/**
+ * drm_gem_objects_lookup - look up GEM objects from an array of handles
+ * @filp: DRM file private date
+ * @bo_handles: user pointer to array of userspace handle
+ * @count: size of handle array
+ * @objs_out: returned pointer to array of drm_gem_object pointers
+ *
+ * Takes an array of userspace handles and returns a newly allocated array of
+ * GEM objects.
+ *
+ * For a single handle lookup, use drm_gem_object_lookup().
+ *
+ * Returns:
+ *
+ * @objs filled in with GEM object pointers. Returned GEM objects need to be
+ * released with drm_gem_object_put(). -ENOENT is returned on a lookup
+ * failure. 0 is returned on success.
+ *
+ */
+int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
+ int count, struct drm_gem_object ***objs_out)
+{
+ int ret;
+ u32 *handles;
+ struct drm_gem_object **objs;
+
+ if (!count)
+ return 0;
+
+ objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!objs)
+ return -ENOMEM;
+
+ handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
+ if (!handles) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
+ ret = -EFAULT;
+ DRM_DEBUG("Failed to copy in GEM handles\n");
+ goto out;
+ }
+
+ ret = objects_lookup(filp, handles, count, objs);
+ *objs_out = objs;
+
+out:
+ kvfree(handles);
+ return ret;
+
+}
+EXPORT_SYMBOL(drm_gem_objects_lookup);
+
/**
* drm_gem_object_lookup - look up a GEM object from its handle
* @filp: DRM file private date
@@ -668,24 +734,56 @@ EXPORT_SYMBOL(drm_gem_put_pages);
*
* A reference to the object named by the handle if such exists on @filp, NULL
* otherwise.
+ *
+ * If looking up an array of handles, use drm_gem_objects_lookup().
*/
struct drm_gem_object *
drm_gem_object_lookup(struct drm_file *filp, u32 handle)
{
+ struct drm_gem_object *obj = NULL;
+
+ objects_lookup(filp, &handle, 1, &obj);
+ return obj;
+}
+EXPORT_SYMBOL(drm_gem_object_lookup);
+
+/**
+ * drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects
+ * shared and/or exclusive fences.
+ * @filep: DRM file private date
+ * @handle: userspace handle
+ * @wait_all: if true, wait on all fences, else wait on just exclusive fence
+ * @timeout: timeout value in jiffies or zero to return immediately
+ *
+ * Returns:
+ *
+ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
+ * greater than 0 on success.
+ */
+long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
+ bool wait_all, unsigned long timeout)
+{
+ long ret;
struct drm_gem_object *obj;
- spin_lock(&filp->table_lock);
+ obj = drm_gem_object_lookup(filep, handle);
+ if (!obj) {
+ DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
+ return -EINVAL;
+ }
- /* Check if we currently have a reference on the object */
- obj = idr_find(&filp->object_idr, handle);
- if (obj)
- drm_gem_object_get(obj);
+ ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all,
+ true, timeout);
+ if (ret == 0)
+ ret = -ETIME;
+ else if (ret > 0)
+ ret = 0;
- spin_unlock(&filp->table_lock);
+ drm_gem_object_put_unlocked(obj);
- return obj;
+ return ret;
}
-EXPORT_SYMBOL(drm_gem_object_lookup);
+EXPORT_SYMBOL(drm_gem_reservation_object_wait);
/**
* drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
@@ -851,6 +949,7 @@ drm_gem_object_release(struct drm_gem_object *obj)
if (obj->filp)
fput(obj->filp);
+ reservation_object_fini(&obj->_resv);
drm_gem_free_mmap_offset(obj);
}
EXPORT_SYMBOL(drm_gem_object_release);
@@ -1190,3 +1289,174 @@ void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
obj->dev->driver->gem_prime_vunmap(obj, vaddr);
}
EXPORT_SYMBOL(drm_gem_vunmap);
+
+/**
+ * drm_gem_lock_reservations - Sets up the ww context and acquires
+ * the lock on an array of GEM objects.
+ *
+ * Once you've locked your reservations, you'll want to set up space
+ * for your shared fences (if applicable), submit your job, then
+ * drm_gem_unlock_reservations().
+ *
+ * @objs: drm_gem_objects to lock
+ * @count: Number of objects in @objs
+ * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
+ * part of tracking this set of locked reservations.
+ */
+int
+drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
+ struct ww_acquire_ctx *acquire_ctx)
+{
+ int contended = -1;
+ int i, ret;
+
+ ww_acquire_init(acquire_ctx, &reservation_ww_class);
+
+retry:
+ if (contended != -1) {
+ struct drm_gem_object *obj = objs[contended];
+
+ ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
+ acquire_ctx);
+ if (ret) {
+ ww_acquire_done(acquire_ctx);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < count; i++) {
+ if (i == contended)
+ continue;
+
+ ret = ww_mutex_lock_interruptible(&objs[i]->resv->lock,
+ acquire_ctx);
+ if (ret) {
+ int j;
+
+ for (j = 0; j < i; j++)
+ ww_mutex_unlock(&objs[j]->resv->lock);
+
+ if (contended != -1 && contended >= i)
+ ww_mutex_unlock(&objs[contended]->resv->lock);
+
+ if (ret == -EDEADLK) {
+ contended = i;
+ goto retry;
+ }
+
+ ww_acquire_done(acquire_ctx);
+ return ret;
+ }
+ }
+
+ ww_acquire_done(acquire_ctx);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_lock_reservations);
+
+void
+drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
+ struct ww_acquire_ctx *acquire_ctx)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ ww_mutex_unlock(&objs[i]->resv->lock);
+
+ ww_acquire_fini(acquire_ctx);
+}
+EXPORT_SYMBOL(drm_gem_unlock_reservations);
+
+/**
+ * drm_gem_fence_array_add - Adds the fence to an array of fences to be
+ * waited on, deduplicating fences from the same context.
+ *
+ * @fence_array: array of dma_fence * for the job to block on.
+ * @fence: the dma_fence to add to the list of dependencies.
+ *
+ * Returns:
+ * 0 on success, or an error on failing to expand the array.
+ */
+int drm_gem_fence_array_add(struct xarray *fence_array,
+ struct dma_fence *fence)
+{
+ struct dma_fence *entry;
+ unsigned long index;
+ u32 id = 0;
+ int ret;
+
+ if (!fence)
+ return 0;
+
+ /* Deduplicate if we already depend on a fence from the same context.
+ * This lets the size of the array of deps scale with the number of
+ * engines involved, rather than the number of BOs.
+ */
+ xa_for_each(fence_array, index, entry) {
+ if (entry->context != fence->context)
+ continue;
+
+ if (dma_fence_is_later(fence, entry)) {
+ dma_fence_put(entry);
+ xa_store(fence_array, index, fence, GFP_KERNEL);
+ } else {
+ dma_fence_put(fence);
+ }
+ return 0;
+ }
+
+ ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
+ if (ret != 0)
+ dma_fence_put(fence);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_fence_array_add);
+
+/**
+ * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
+ * in the GEM object's reservation object to an array of dma_fences for use in
+ * scheduling a rendering job.
+ *
+ * This should be called after drm_gem_lock_reservations() on your array of
+ * GEM objects used in the job but before updating the reservations with your
+ * own fences.
+ *
+ * @fence_array: array of dma_fence * for the job to block on.
+ * @obj: the gem object to add new dependencies from.
+ * @write: whether the job might write the object (so we need to depend on
+ * shared fences in the reservation object).
+ */
+int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
+ struct drm_gem_object *obj,
+ bool write)
+{
+ int ret;
+ struct dma_fence **fences;
+ unsigned int i, fence_count;
+
+ if (!write) {
+ struct dma_fence *fence =
+ reservation_object_get_excl_rcu(obj->resv);
+
+ return drm_gem_fence_array_add(fence_array, fence);
+ }
+
+ ret = reservation_object_get_fences_rcu(obj->resv, NULL,
+ &fence_count, &fences);
+ if (ret || !fence_count)
+ return ret;
+
+ for (i = 0; i < fence_count; i++) {
+ ret = drm_gem_fence_array_add(fence_array, fences[i]);
+ if (ret)
+ break;
+ }
+
+ for (; i < fence_count; i++)
+ dma_fence_put(fences[i]);
+ kfree(fences);
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index cc26625b4b33..e01ceed09e67 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -186,13 +186,13 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
cma_obj = to_drm_gem_cma_obj(gem_obj);
- if (cma_obj->vaddr) {
- dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
- cma_obj->vaddr, cma_obj->paddr);
- } else if (gem_obj->import_attach) {
+ if (gem_obj->import_attach) {
if (cma_obj->vaddr)
dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr);
drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
+ } else if (cma_obj->vaddr) {
+ dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
+ cma_obj->vaddr, cma_obj->paddr);
}
drm_gem_object_release(gem_obj);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
new file mode 100644
index 000000000000..1ee208c2c85e
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -0,0 +1,625 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Noralf Trønnes
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
+
+/**
+ * DOC: overview
+ *
+ * This library provides helpers for GEM objects backed by shmem buffers
+ * allocated using anonymous pageable memory.
+ */
+
+static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
+ .free = drm_gem_shmem_free_object,
+ .print_info = drm_gem_shmem_print_info,
+ .pin = drm_gem_shmem_pin,
+ .unpin = drm_gem_shmem_unpin,
+ .get_sg_table = drm_gem_shmem_get_sg_table,
+ .vmap = drm_gem_shmem_vmap,
+ .vunmap = drm_gem_shmem_vunmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+};
+
+/**
+ * drm_gem_shmem_create - Allocate an object with the given size
+ * @dev: DRM device
+ * @size: Size of the object to allocate
+ *
+ * This function creates a shmem GEM object.
+ *
+ * Returns:
+ * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
+{
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *obj;
+ int ret;
+
+ size = PAGE_ALIGN(size);
+
+ if (dev->driver->gem_create_object)
+ obj = dev->driver->gem_create_object(dev, size);
+ else
+ obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ if (!obj->funcs)
+ obj->funcs = &drm_gem_shmem_funcs;
+
+ ret = drm_gem_object_init(dev, obj, size);
+ if (ret)
+ goto err_free;
+
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto err_release;
+
+ shmem = to_drm_gem_shmem_obj(obj);
+ mutex_init(&shmem->pages_lock);
+ mutex_init(&shmem->vmap_lock);
+
+ /*
+ * Our buffers are kept pinned, so allocating them
+ * from the MOVABLE zone is a really bad idea, and
+ * conflicts with CMA. See comments above new_inode()
+ * why this is required _and_ expected if you're
+ * going to pin these pages.
+ */
+ mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+
+ return shmem;
+
+err_release:
+ drm_gem_object_release(obj);
+err_free:
+ kfree(obj);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
+
+/**
+ * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
+ * @obj: GEM object to free
+ *
+ * This function cleans up the GEM object state and frees the memory used to
+ * store the object itself.
+ */
+void drm_gem_shmem_free_object(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ WARN_ON(shmem->vmap_use_count);
+
+ if (obj->import_attach) {
+ shmem->pages_use_count--;
+ drm_prime_gem_destroy(obj, shmem->sgt);
+ kvfree(shmem->pages);
+ } else {
+ if (shmem->sgt) {
+ dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
+ shmem->sgt->nents, DMA_BIDIRECTIONAL);
+
+ drm_gem_shmem_put_pages(shmem);
+ sg_free_table(shmem->sgt);
+ kfree(shmem->sgt);
+ }
+ }
+
+ WARN_ON(shmem->pages_use_count);
+
+ drm_gem_object_release(obj);
+ mutex_destroy(&shmem->pages_lock);
+ mutex_destroy(&shmem->vmap_lock);
+ kfree(shmem);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object);
+
+static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
+{
+ struct drm_gem_object *obj = &shmem->base;
+ struct page **pages;
+
+ if (shmem->pages_use_count++ > 0)
+ return 0;
+
+ pages = drm_gem_get_pages(obj);
+ if (IS_ERR(pages)) {
+ DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
+ shmem->pages_use_count = 0;
+ return PTR_ERR(pages);
+ }
+
+ shmem->pages = pages;
+
+ return 0;
+}
+
+/*
+ * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
+ * @shmem: shmem GEM object
+ *
+ * This function makes sure that backing pages exists for the shmem GEM object
+ * and increases the use count.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
+{
+ int ret;
+
+ ret = mutex_lock_interruptible(&shmem->pages_lock);
+ if (ret)
+ return ret;
+ ret = drm_gem_shmem_get_pages_locked(shmem);
+ mutex_unlock(&shmem->pages_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_shmem_get_pages);
+
+static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
+{
+ struct drm_gem_object *obj = &shmem->base;
+
+ if (WARN_ON_ONCE(!shmem->pages_use_count))
+ return;
+
+ if (--shmem->pages_use_count > 0)
+ return;
+
+ drm_gem_put_pages(obj, shmem->pages,
+ shmem->pages_mark_dirty_on_put,
+ shmem->pages_mark_accessed_on_put);
+ shmem->pages = NULL;
+}
+
+/*
+ * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
+ * @shmem: shmem GEM object
+ *
+ * This function decreases the use count and puts the backing pages when use drops to zero.
+ */
+void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
+{
+ mutex_lock(&shmem->pages_lock);
+ drm_gem_shmem_put_pages_locked(shmem);
+ mutex_unlock(&shmem->pages_lock);
+}
+EXPORT_SYMBOL(drm_gem_shmem_put_pages);
+
+/**
+ * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
+ * @obj: GEM object
+ *
+ * This function makes sure the backing pages are pinned in memory while the
+ * buffer is exported.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_pin(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ return drm_gem_shmem_get_pages(shmem);
+}
+EXPORT_SYMBOL(drm_gem_shmem_pin);
+
+/**
+ * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
+ * @obj: GEM object
+ *
+ * This function removes the requirement that the backing pages are pinned in
+ * memory.
+ */
+void drm_gem_shmem_unpin(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ drm_gem_shmem_put_pages(shmem);
+}
+EXPORT_SYMBOL(drm_gem_shmem_unpin);
+
+static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
+{
+ struct drm_gem_object *obj = &shmem->base;
+ int ret;
+
+ if (shmem->vmap_use_count++ > 0)
+ return shmem->vaddr;
+
+ ret = drm_gem_shmem_get_pages(shmem);
+ if (ret)
+ goto err_zero_use;
+
+ if (obj->import_attach)
+ shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
+ else
+ shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, PAGE_KERNEL);
+
+ if (!shmem->vaddr) {
+ DRM_DEBUG_KMS("Failed to vmap pages\n");
+ ret = -ENOMEM;
+ goto err_put_pages;
+ }
+
+ return shmem->vaddr;
+
+err_put_pages:
+ drm_gem_shmem_put_pages(shmem);
+err_zero_use:
+ shmem->vmap_use_count = 0;
+
+ return ERR_PTR(ret);
+}
+
+/*
+ * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
+ * @shmem: shmem GEM object
+ *
+ * This function makes sure that a virtual address exists for the buffer backing
+ * the shmem GEM object.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+void *drm_gem_shmem_vmap(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ void *vaddr;
+ int ret;
+
+ ret = mutex_lock_interruptible(&shmem->vmap_lock);
+ if (ret)
+ return ERR_PTR(ret);
+ vaddr = drm_gem_shmem_vmap_locked(shmem);
+ mutex_unlock(&shmem->vmap_lock);
+
+ return vaddr;
+}
+EXPORT_SYMBOL(drm_gem_shmem_vmap);
+
+static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
+{
+ struct drm_gem_object *obj = &shmem->base;
+
+ if (WARN_ON_ONCE(!shmem->vmap_use_count))
+ return;
+
+ if (--shmem->vmap_use_count > 0)
+ return;
+
+ if (obj->import_attach)
+ dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr);
+ else
+ vunmap(shmem->vaddr);
+
+ shmem->vaddr = NULL;
+ drm_gem_shmem_put_pages(shmem);
+}
+
+/*
+ * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
+ * @shmem: shmem GEM object
+ *
+ * This function removes the virtual address when use count drops to zero.
+ */
+void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ mutex_lock(&shmem->vmap_lock);
+ drm_gem_shmem_vunmap_locked(shmem);
+ mutex_unlock(&shmem->vmap_lock);
+}
+EXPORT_SYMBOL(drm_gem_shmem_vunmap);
+
+struct drm_gem_shmem_object *
+drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
+ struct drm_device *dev, size_t size,
+ uint32_t *handle)
+{
+ struct drm_gem_shmem_object *shmem;
+ int ret;
+
+ shmem = drm_gem_shmem_create(dev, size);
+ if (IS_ERR(shmem))
+ return shmem;
+
+ /*
+ * Allocate an id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_put_unlocked(&shmem->base);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return shmem;
+}
+EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
+
+/**
+ * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
+ * @file: DRM file structure to create the dumb buffer for
+ * @dev: DRM device
+ * @args: IOCTL data
+ *
+ * This function computes the pitch of the dumb buffer and rounds it up to an
+ * integer number of bytes per pixel. Drivers for hardware that doesn't have
+ * any additional restrictions on the pitch can directly use this function as
+ * their &drm_driver.dumb_create callback.
+ *
+ * For hardware with additional restrictions, drivers can adjust the fields
+ * set up by userspace before calling into this function.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ struct drm_gem_shmem_object *shmem;
+
+ if (!args->pitch || !args->size) {
+ args->pitch = min_pitch;
+ args->size = args->pitch * args->height;
+ } else {
+ /* ensure sane minimum values */
+ if (args->pitch < min_pitch)
+ args->pitch = min_pitch;
+ if (args->size < args->pitch * args->height)
+ args->size = args->pitch * args->height;
+ }
+
+ shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
+
+ return PTR_ERR_OR_ZERO(shmem);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
+
+static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ loff_t num_pages = obj->size >> PAGE_SHIFT;
+ struct page *page;
+
+ if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages))
+ return VM_FAULT_SIGBUS;
+
+ page = shmem->pages[vmf->pgoff];
+
+ return vmf_insert_page(vma, vmf->address, page);
+}
+
+static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
+{
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ int ret;
+
+ ret = drm_gem_shmem_get_pages(shmem);
+ WARN_ON_ONCE(ret != 0);
+
+ drm_gem_vm_open(vma);
+}
+
+static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
+{
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ drm_gem_shmem_put_pages(shmem);
+ drm_gem_vm_close(vma);
+}
+
+const struct vm_operations_struct drm_gem_shmem_vm_ops = {
+ .fault = drm_gem_shmem_fault,
+ .open = drm_gem_shmem_vm_open,
+ .close = drm_gem_shmem_vm_close,
+};
+EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
+
+/**
+ * drm_gem_shmem_mmap - Memory-map a shmem GEM object
+ * @filp: File object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function implements an augmented version of the GEM DRM file mmap
+ * operation for shmem objects. Drivers which employ the shmem helpers should
+ * use this function as their &file_operations.mmap handler in the DRM device file's
+ * file_operations structure.
+ *
+ * Instead of directly referencing this function, drivers should use the
+ * DEFINE_DRM_GEM_SHMEM_FOPS() macro.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_gem_shmem_object *shmem;
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret)
+ return ret;
+
+ shmem = to_drm_gem_shmem_obj(vma->vm_private_data);
+
+ ret = drm_gem_shmem_get_pages(shmem);
+ if (ret) {
+ drm_gem_vm_close(vma);
+ return ret;
+ }
+
+ /* VM_PFNMAP was set by drm_gem_mmap() */
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
+
+ /* Remove the fake offset */
+ vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
+
+/**
+ * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
+ * @p: DRM printer
+ * @indent: Tab indentation level
+ * @obj: GEM object
+ */
+void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *obj)
+{
+ const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
+ drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
+ drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
+}
+EXPORT_SYMBOL(drm_gem_shmem_print_info);
+
+/**
+ * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
+ * pages for a shmem GEM object
+ * @obj: GEM object
+ *
+ * This function exports a scatter/gather table suitable for PRIME usage by
+ * calling the standard DMA mapping API.
+ *
+ * Returns:
+ * A pointer to the scatter/gather table of pinned pages or NULL on failure.
+ */
+struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
+
+/**
+ * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
+ * scatter/gather table for a shmem GEM object.
+ * @obj: GEM object
+ *
+ * This function returns a scatter/gather table suitable for driver usage. If
+ * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
+ * table created.
+ *
+ * Returns:
+ * A pointer to the scatter/gather table of pinned pages or errno on failure.
+ */
+struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
+{
+ int ret;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ struct sg_table *sgt;
+
+ if (shmem->sgt)
+ return shmem->sgt;
+
+ WARN_ON(obj->import_attach);
+
+ ret = drm_gem_shmem_get_pages(shmem);
+ if (ret)
+ return ERR_PTR(ret);
+
+ sgt = drm_gem_shmem_get_sg_table(&shmem->base);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto err_put_pages;
+ }
+ /* Map the pages for use by the h/w. */
+ dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
+
+ shmem->sgt = sgt;
+
+ return sgt;
+
+err_put_pages:
+ drm_gem_shmem_put_pages(shmem);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
+
+/**
+ * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
+ * another driver's scatter/gather table of pinned pages
+ * @dev: Device to import into
+ * @attach: DMA-BUF attachment
+ * @sgt: Scatter/gather table of pinned pages
+ *
+ * This function imports a scatter/gather table exported via DMA-BUF by
+ * another driver. Drivers that use the shmem helpers should set this as their
+ * &drm_driver.gem_prime_import_sg_table callback.
+ *
+ * Returns:
+ * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_object *
+drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt)
+{
+ size_t size = PAGE_ALIGN(attach->dmabuf->size);
+ size_t npages = size >> PAGE_SHIFT;
+ struct drm_gem_shmem_object *shmem;
+ int ret;
+
+ shmem = drm_gem_shmem_create(dev, size);
+ if (IS_ERR(shmem))
+ return ERR_CAST(shmem);
+
+ shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!shmem->pages) {
+ ret = -ENOMEM;
+ goto err_free_gem;
+ }
+
+ ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages);
+ if (ret < 0)
+ goto err_free_array;
+
+ shmem->sgt = sgt;
+ shmem->pages_use_count = 1; /* Permanently pinned from our point of view */
+
+ DRM_DEBUG_PRIME("size = %zu\n", size);
+
+ return &shmem->base;
+
+err_free_array:
+ kvfree(shmem->pages);
+err_free_gem:
+ drm_gem_object_put_unlocked(&shmem->base);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 251d67e04c2d..e19ac7ca602d 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -71,8 +71,10 @@ int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data,
/* drm_irq.c */
/* IOCTLS */
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_irq_control(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+#endif
int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
@@ -180,12 +182,20 @@ int drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
+int drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
+int drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
+int drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
+int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
/* drm_framebuffer.c */
void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 0e3043e08c69..374b372da58a 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -156,6 +156,7 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd,
return -EINVAL;
}
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
typedef struct drm_map32 {
u32 offset; /* Requested physical address (0 for SAREA) */
u32 size; /* Requested physical size (bytes) */
@@ -239,6 +240,7 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd,
map.handle = compat_ptr(handle);
return drm_ioctl_kernel(file, drm_legacy_rmmap_ioctl, &map, DRM_AUTH);
}
+#endif
typedef struct drm_client32 {
int idx; /* Which client desired? */
@@ -301,6 +303,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
return 0;
}
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
typedef struct drm_buf_desc32 {
int count; /* Number of buffers of this size */
int size; /* Size in bytes */
@@ -604,6 +607,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd,
return 0;
}
+#endif
#if IS_ENABLED(CONFIG_AGP)
typedef struct drm_agp_mode32 {
@@ -748,6 +752,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
}
#endif /* CONFIG_AGP */
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
typedef struct drm_scatter_gather32 {
u32 size; /**< In bytes -- will round to page boundary */
u32 handle; /**< Used for mapping / unmapping */
@@ -788,7 +793,7 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
return drm_ioctl_kernel(file, drm_legacy_sg_free, &request,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
-
+#endif
#if defined(CONFIG_X86)
typedef struct drm_update_draw32 {
drm_drawable_t handle;
@@ -903,10 +908,13 @@ static struct {
#define DRM_IOCTL32_DEF(n, f) [DRM_IOCTL_NR(n##32)] = {.fn = f, .name = #n}
DRM_IOCTL32_DEF(DRM_IOCTL_VERSION, compat_drm_version),
DRM_IOCTL32_DEF(DRM_IOCTL_GET_UNIQUE, compat_drm_getunique),
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
DRM_IOCTL32_DEF(DRM_IOCTL_GET_MAP, compat_drm_getmap),
+#endif
DRM_IOCTL32_DEF(DRM_IOCTL_GET_CLIENT, compat_drm_getclient),
DRM_IOCTL32_DEF(DRM_IOCTL_GET_STATS, compat_drm_getstats),
DRM_IOCTL32_DEF(DRM_IOCTL_SET_UNIQUE, compat_drm_setunique),
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
DRM_IOCTL32_DEF(DRM_IOCTL_ADD_MAP, compat_drm_addmap),
DRM_IOCTL32_DEF(DRM_IOCTL_ADD_BUFS, compat_drm_addbufs),
DRM_IOCTL32_DEF(DRM_IOCTL_MARK_BUFS, compat_drm_markbufs),
@@ -918,6 +926,7 @@ static struct {
DRM_IOCTL32_DEF(DRM_IOCTL_GET_SAREA_CTX, compat_drm_getsareactx),
DRM_IOCTL32_DEF(DRM_IOCTL_RES_CTX, compat_drm_resctx),
DRM_IOCTL32_DEF(DRM_IOCTL_DMA, compat_drm_dma),
+#endif
#if IS_ENABLED(CONFIG_AGP)
DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ENABLE, compat_drm_agp_enable),
DRM_IOCTL32_DEF(DRM_IOCTL_AGP_INFO, compat_drm_agp_info),
@@ -926,8 +935,10 @@ static struct {
DRM_IOCTL32_DEF(DRM_IOCTL_AGP_BIND, compat_drm_agp_bind),
DRM_IOCTL32_DEF(DRM_IOCTL_AGP_UNBIND, compat_drm_agp_unbind),
#endif
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
DRM_IOCTL32_DEF(DRM_IOCTL_SG_ALLOC, compat_drm_sg_alloc),
DRM_IOCTL32_DEF(DRM_IOCTL_SG_FREE, compat_drm_sg_free),
+#endif
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
DRM_IOCTL32_DEF(DRM_IOCTL_UPDATE_DRAW, compat_drm_update_draw),
#endif
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 687943df58e1..2263e3ddd822 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -245,6 +245,9 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
case DRM_CAP_SYNCOBJ:
req->value = drm_core_check_feature(dev, DRIVER_SYNCOBJ);
return 0;
+ case DRM_CAP_SYNCOBJ_TIMELINE:
+ req->value = drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE);
+ return 0;
}
/* Other caps only work with KMS drivers */
@@ -508,13 +511,6 @@ int drm_version(struct drm_device *dev, void *data,
return err;
}
-static inline bool
-drm_render_driver_and_ioctl(const struct drm_device *dev, u32 flags)
-{
- return drm_core_check_feature(dev, DRIVER_RENDER) &&
- (flags & DRM_RENDER_ALLOW);
-}
-
/**
* drm_ioctl_permit - Check ioctl permissions against caller
*
@@ -529,19 +525,14 @@ drm_render_driver_and_ioctl(const struct drm_device *dev, u32 flags)
*/
int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
{
- const struct drm_device *dev = file_priv->minor->dev;
-
/* ROOT_ONLY is only for CAP_SYS_ADMIN */
if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
return -EACCES;
- /* AUTH is only for master ... */
- if (unlikely((flags & DRM_AUTH) && drm_is_primary_client(file_priv))) {
- /* authenticated ones, or render capable on DRM_RENDER_ALLOW. */
- if (!file_priv->authenticated &&
- !drm_render_driver_and_ioctl(dev, flags))
- return -EACCES;
- }
+ /* AUTH is only for authenticated or render client */
+ if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) &&
+ !file_priv->authenticated))
+ return -EACCES;
/* MASTER is only for master or control clients */
if (unlikely((flags & DRM_MASTER) &&
@@ -565,6 +556,12 @@ EXPORT_SYMBOL(drm_ioctl_permit);
.name = #ioctl \
}
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
+#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags) DRM_IOCTL_DEF(ioctl, _func, _flags)
+#else
+#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags) DRM_IOCTL_DEF(ioctl, drm_invalid_op, _flags)
+#endif
+
/* Ioctl table */
static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
@@ -572,7 +569,9 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED),
+
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED),
+
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
@@ -584,39 +583,38 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_UNLOCKED|DRM_MASTER),
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_legacy_lock, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_legacy_unlock, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_LOCK, drm_legacy_lock, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_legacy_unlock, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_legacy_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_legacy_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_legacy_infobufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_legacy_mapbufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_legacy_irq_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_legacy_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_legacy_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_legacy_infobufs, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_legacy_mapbufs, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_legacy_irq_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#if IS_ENABLED(CONFIG_AGP)
DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -629,8 +627,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#endif
- DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_legacy_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_legacy_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, DRM_UNLOCKED),
@@ -686,12 +684,20 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl,
DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TRANSFER, drm_syncobj_transfer_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl,
DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, drm_syncobj_timeline_wait_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl,
DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl,
DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, drm_syncobj_timeline_signal_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_QUERY, drm_syncobj_query_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 9bd8908d5fd8..02f38cc9f468 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -213,6 +213,7 @@ int drm_irq_uninstall(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_irq_uninstall);
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_irq_control(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -253,3 +254,4 @@ int drm_legacy_irq_control(struct drm_device *dev, void *data,
return -EINVAL;
}
}
+#endif
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index 93e2b30fe1a5..9c5ae825c507 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -39,7 +39,7 @@ MODULE_LICENSE("GPL and additional rights");
/* Backward compatibility for drm_kms_helper.edid_firmware */
static int edid_firmware_set(const char *val, const struct kernel_param *kp)
{
- DRM_NOTE("drm_kms_firmware.edid_firmware is deprecated, please use drm.edid_firmware intead.\n");
+ DRM_NOTE("drm_kms_firmware.edid_firmware is deprecated, please use drm.edid_firmware instead.\n");
return __drm_set_edid_firmware_path(val);
}
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index 603b0bd9c5ce..694ff363a90b 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -111,7 +111,7 @@ static bool _drm_has_leased(struct drm_master *master, int id)
*/
bool _drm_lease_held(struct drm_file *file_priv, int id)
{
- if (file_priv == NULL || file_priv->master == NULL)
+ if (!file_priv || !file_priv->master)
return true;
return _drm_lease_held_master(file_priv->master, id);
@@ -133,7 +133,7 @@ bool drm_lease_held(struct drm_file *file_priv, int id)
struct drm_master *master;
bool ret;
- if (file_priv == NULL || file_priv->master == NULL)
+ if (!file_priv || !file_priv->master || !file_priv->master->lessor)
return true;
master = file_priv->master;
@@ -159,7 +159,7 @@ uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs_in)
int count_in, count_out;
uint32_t crtcs_out = 0;
- if (file_priv == NULL || file_priv->master == NULL)
+ if (!file_priv || !file_priv->master || !file_priv->master->lessor)
return crtcs_in;
master = file_priv->master;
@@ -220,8 +220,6 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
error = 0;
if (!idr_find(&dev->mode_config.object_idr, object))
error = -ENOENT;
- else if (!_drm_lease_held_master(lessor, object))
- error = -EACCES;
else if (_drm_has_leased(lessor, object))
error = -EBUSY;
@@ -403,11 +401,6 @@ static int fill_object_idr(struct drm_device *dev,
/* step one - get references to all the mode objects
and check for validity. */
for (o = 0; o < object_count; o++) {
- if ((int) object_ids[o] < 0) {
- ret = -EINVAL;
- goto out_free_objects;
- }
-
objects[o] = drm_mode_object_find(dev, lessor_priv,
object_ids[o],
DRM_MODE_OBJECT_ANY);
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index 280fbeb846ff..51f1fabfa145 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -42,11 +42,19 @@ struct drm_file;
#define DRM_KERNEL_CONTEXT 0
#define DRM_RESERVED_CONTEXTS 1
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_ctxbitmap_init(struct drm_device *dev);
void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
-void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file);
+#else
+static inline void drm_legacy_ctxbitmap_init(struct drm_device *dev) {}
+static inline void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev) {}
+static inline void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file) {}
+#endif
+void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
+
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_resctx(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_addctx(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_getctx(struct drm_device *d, void *v, struct drm_file *f);
@@ -56,6 +64,7 @@ int drm_legacy_rmctx(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
+#endif
/*
* Generic Buffer Management
@@ -63,16 +72,39 @@ int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
#define DRM_MAP_HASH_OFFSET 0x10000000
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
+static inline int drm_legacy_create_map_hash(struct drm_device *dev)
+{
+ return drm_ht_create(&dev->map_hash, 12);
+}
+
+static inline void drm_legacy_remove_map_hash(struct drm_device *dev)
+{
+ drm_ht_remove(&dev->map_hash);
+}
+#else
+static inline int drm_legacy_create_map_hash(struct drm_device *dev)
+{
+ return 0;
+}
+
+static inline void drm_legacy_remove_map_hash(struct drm_device *dev) {}
+#endif
+
+
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
+
int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_infobufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_markbufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_freebufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_mapbufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_dma_ioctl(struct drm_device *d, void *v, struct drm_file *f);
+#endif
int __drm_legacy_infobufs(struct drm_device *, void *, int *,
int (*)(void *, int, struct drm_buf_entry *));
@@ -81,7 +113,17 @@ int __drm_legacy_mapbufs(struct drm_device *, void *, int *,
int (*)(void *, int, unsigned long, struct drm_buf *),
struct drm_file *);
-#ifdef CONFIG_DRM_VM
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
+void drm_legacy_master_rmmaps(struct drm_device *dev,
+ struct drm_master *master);
+void drm_legacy_rmmaps(struct drm_device *dev);
+#else
+static inline void drm_legacy_master_rmmaps(struct drm_device *dev,
+ struct drm_master *master) {}
+static inline void drm_legacy_rmmaps(struct drm_device *dev) {}
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_VM) && IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_vma_flush(struct drm_device *d);
#else
static inline void drm_legacy_vma_flush(struct drm_device *d)
@@ -103,23 +145,64 @@ struct drm_agp_mem {
};
/* drm_lock.c */
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f);
void drm_legacy_lock_release(struct drm_device *dev, struct file *filp);
+#else
+static inline void drm_legacy_lock_release(struct drm_device *dev, struct file *filp) {}
+#endif
/* DMA support */
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_dma_setup(struct drm_device *dev);
void drm_legacy_dma_takedown(struct drm_device *dev);
+#else
+static inline int drm_legacy_dma_setup(struct drm_device *dev)
+{
+ return 0;
+}
+#endif
+
void drm_legacy_free_buffer(struct drm_device *dev,
struct drm_buf * buf);
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_reclaim_buffers(struct drm_device *dev,
struct drm_file *filp);
+#else
+static inline void drm_legacy_reclaim_buffers(struct drm_device *dev,
+ struct drm_file *filp) {}
+#endif
/* Scatter Gather Support */
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_sg_cleanup(struct drm_device *dev);
int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_legacy_sg_free(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
+void drm_legacy_init_members(struct drm_device *dev);
+void drm_legacy_destroy_members(struct drm_device *dev);
+void drm_legacy_dev_reinit(struct drm_device *dev);
+#else
+static inline void drm_legacy_init_members(struct drm_device *dev) {}
+static inline void drm_legacy_destroy_members(struct drm_device *dev) {}
+static inline void drm_legacy_dev_reinit(struct drm_device *dev) {}
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
+void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master);
+#else
+static inline void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master) {}
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
+void drm_master_legacy_init(struct drm_master *master);
+#else
+static inline void drm_master_legacy_init(struct drm_master *master) {}
+#endif
#endif /* __DRM_LEGACY_H__ */
diff --git a/drivers/gpu/drm/drm_legacy_misc.c b/drivers/gpu/drm/drm_legacy_misc.c
new file mode 100644
index 000000000000..2fe786839ca8
--- /dev/null
+++ b/drivers/gpu/drm/drm_legacy_misc.c
@@ -0,0 +1,82 @@
+/**
+ * \file drm_legacy_misc.c
+ * Misc legacy support functions.
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include "drm_internal.h"
+#include "drm_legacy.h"
+
+void drm_legacy_init_members(struct drm_device *dev)
+{
+ INIT_LIST_HEAD(&dev->ctxlist);
+ INIT_LIST_HEAD(&dev->vmalist);
+ INIT_LIST_HEAD(&dev->maplist);
+ spin_lock_init(&dev->buf_lock);
+ mutex_init(&dev->ctxlist_mutex);
+}
+
+void drm_legacy_destroy_members(struct drm_device *dev)
+{
+ mutex_destroy(&dev->ctxlist_mutex);
+}
+
+void drm_legacy_dev_reinit(struct drm_device *dev)
+{
+ if (dev->irq_enabled)
+ drm_irq_uninstall(dev);
+
+ mutex_lock(&dev->struct_mutex);
+
+ drm_legacy_agp_clear(dev);
+
+ drm_legacy_sg_cleanup(dev);
+ drm_legacy_vma_flush(dev);
+ drm_legacy_dma_takedown(dev);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ dev->sigdata.lock = NULL;
+
+ dev->context_flag = 0;
+ dev->last_context = 0;
+ dev->if_version = 0;
+
+ DRM_DEBUG("lastclose completed\n");
+}
+
+void drm_master_legacy_init(struct drm_master *master)
+{
+ spin_lock_init(&master->lock.spinlock);
+ init_waitqueue_head(&master->lock.lock_queue);
+}
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 67a1a2ca7174..b70058e77a28 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -347,3 +347,22 @@ void drm_legacy_lock_release(struct drm_device *dev, struct file *filp)
_DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
}
}
+
+void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master)
+{
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
+ return;
+
+ /*
+ * Since the master is disappearing, so is the
+ * possibility to lock.
+ */ mutex_lock(&dev->struct_mutex);
+ if (master->lock.hw_lock) {
+ if (dev->sigdata.lock == master->lock.hw_lock)
+ dev->sigdata.lock = NULL;
+ master->lock.hw_lock = NULL;
+ master->lock.file_priv = NULL;
+ wake_up_interruptible_all(&master->lock.lock_queue);
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 40c4349cb939..132fef8ff1b6 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -35,6 +35,7 @@
#include <linux/highmem.h>
#include <linux/export.h>
+#include <xen/xen.h>
#include <drm/drmP.h>
#include "drm_legacy.h"
@@ -150,15 +151,34 @@ void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
}
EXPORT_SYMBOL(drm_legacy_ioremapfree);
-u64 drm_get_max_iomem(void)
+bool drm_need_swiotlb(int dma_bits)
{
struct resource *tmp;
resource_size_t max_iomem = 0;
+ /*
+ * Xen paravirtual hosts require swiotlb regardless of requested dma
+ * transfer size.
+ *
+ * NOTE: Really, what it requires is use of the dma_alloc_coherent
+ * allocator used in ttm_dma_populate() instead of
+ * ttm_populate_and_map_pages(), which bounce buffers so much in
+ * Xen it leads to swiotlb buffer exhaustion.
+ */
+ if (xen_pv_domain())
+ return true;
+
+ /*
+ * Enforce dma_alloc_coherent when memory encryption is active as well
+ * for the same reasons as for Xen paravirtual hosts.
+ */
+ if (mem_encrypt_active())
+ return true;
+
for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) {
max_iomem = max(max_iomem, tmp->end);
}
- return max_iomem;
+ return max_iomem > ((u64)1 << dma_bits);
}
-EXPORT_SYMBOL(drm_get_max_iomem);
+EXPORT_SYMBOL(drm_need_swiotlb);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 2b4f373736c7..8b4cd31ce7bd 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -106,25 +106,19 @@
static noinline void save_stack(struct drm_mm_node *node)
{
unsigned long entries[STACKDEPTH];
- struct stack_trace trace = {
- .entries = entries,
- .max_entries = STACKDEPTH,
- .skip = 1
- };
+ unsigned int n;
- save_stack_trace(&trace);
- if (trace.nr_entries != 0 &&
- trace.entries[trace.nr_entries-1] == ULONG_MAX)
- trace.nr_entries--;
+ n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
/* May be called under spinlock, so avoid sleeping */
- node->stack = depot_save_stack(&trace, GFP_NOWAIT);
+ node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
}
static void show_leaks(struct drm_mm *mm)
{
struct drm_mm_node *node;
- unsigned long entries[STACKDEPTH];
+ unsigned long *entries;
+ unsigned int nr_entries;
char *buf;
buf = kmalloc(BUFSZ, GFP_KERNEL);
@@ -132,19 +126,14 @@ static void show_leaks(struct drm_mm *mm)
return;
list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
- struct stack_trace trace = {
- .entries = entries,
- .max_entries = STACKDEPTH
- };
-
if (!node->stack) {
DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
node->start, node->size);
continue;
}
- depot_fetch_stack(node->stack, &trace);
- snprint_stack_trace(buf, BUFSZ, &trace, 0);
+ nr_entries = stack_depot_fetch(node->stack, &entries);
+ stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0);
DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
node->start, node->size, buf);
}
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 4a1c2023ccf0..1a346ae1599d 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -297,8 +297,9 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
return -ENOMEM;
dev->mode_config.prop_crtc_id = prop;
- prop = drm_property_create(dev, DRM_MODE_PROP_BLOB, "FB_DAMAGE_CLIPS",
- 0);
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB,
+ "FB_DAMAGE_CLIPS", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_fb_damage_clips = prop;
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index a9005c1c2384..f32507e65b79 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -451,6 +451,7 @@ static int set_property_legacy(struct drm_mode_object *obj,
}
static int set_property_atomic(struct drm_mode_object *obj,
+ struct drm_file *file_priv,
struct drm_property *prop,
uint64_t prop_value)
{
@@ -477,7 +478,7 @@ retry:
obj_to_connector(obj),
prop_value);
} else {
- ret = drm_atomic_set_property(state, obj, prop, prop_value);
+ ret = drm_atomic_set_property(state, file_priv, obj, prop, prop_value);
if (ret)
goto out;
ret = drm_atomic_commit(state);
@@ -520,7 +521,7 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
goto out_unref;
if (drm_drv_uses_atomic_modeset(property->dev))
- ret = set_property_atomic(arg_obj, property, arg->value);
+ ret = set_property_atomic(arg_obj, file_priv, property, arg->value);
else
ret = set_property_legacy(arg_obj, property, arg->value);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 869ac6f4671e..56f92a0bba62 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -655,22 +655,22 @@ EXPORT_SYMBOL_GPL(drm_display_mode_to_videomode);
* @bus_flags: information about pixelclk, sync and DE polarity will be stored
* here
*
- * Sets DRM_BUS_FLAG_DE_(LOW|HIGH), DRM_BUS_FLAG_PIXDATA_(POS|NEG)EDGE and
- * DISPLAY_FLAGS_SYNC_(POS|NEG)EDGE in @bus_flags according to DISPLAY_FLAGS
+ * Sets DRM_BUS_FLAG_DE_(LOW|HIGH), DRM_BUS_FLAG_PIXDATA_DRIVE_(POS|NEG)EDGE
+ * and DISPLAY_FLAGS_SYNC_(POS|NEG)EDGE in @bus_flags according to DISPLAY_FLAGS
* found in @vm
*/
void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags)
{
*bus_flags = 0;
if (vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
- *bus_flags |= DRM_BUS_FLAG_PIXDATA_POSEDGE;
+ *bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
- *bus_flags |= DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+ *bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
if (vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE)
- *bus_flags |= DRM_BUS_FLAG_SYNC_POSEDGE;
+ *bus_flags |= DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE;
if (vm->flags & DISPLAY_FLAGS_SYNC_NEGEDGE)
- *bus_flags |= DRM_BUS_FLAG_SYNC_NEGEDGE;
+ *bus_flags |= DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE;
if (vm->flags & DISPLAY_FLAGS_DE_LOW)
*bus_flags |= DRM_BUS_FLAG_DE_LOW;
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 52e445bb1aa5..521aff99b08a 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -80,6 +80,12 @@ static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
+ .width = 1200,
+ .height = 1920,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
static const struct dmi_system_id orientation_data[] = {
{ /* Acer One 10 (S1003) */
.matches = {
@@ -148,6 +154,13 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Lenovo Ideapad D330 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81H3"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* VIOS LTH17 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 4cfb56893b7f..d6ad60ab0d38 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -960,6 +960,11 @@ retry:
if (ret)
goto out;
+ if (!drm_lease_held(file_priv, crtc->cursor->base.id)) {
+ ret = -EACCES;
+ goto out;
+ }
+
ret = drm_mode_cursor_universal(crtc, req, file_priv, &ctx);
goto out;
}
@@ -1062,6 +1067,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
plane = crtc->primary;
+ if (!drm_lease_held(file_priv, plane->base.id))
+ return -EACCES;
+
if (crtc->funcs->page_flip_target) {
u32 current_vblank;
int r;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 231e3f6d5f41..dc079efb3b0f 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -504,6 +504,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
.size = obj->size,
.flags = flags,
.priv = obj,
+ .resv = obj->resv,
};
if (dev->driver->gem_prime_res_obj)
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index 0e7fc3e7dfb4..f5cb0aabfe35 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -253,3 +253,31 @@ void drm_err(const char *format, ...)
va_end(args);
}
EXPORT_SYMBOL(drm_err);
+
+/**
+ * drm_print_regset32 - print the contents of registers to a
+ * &drm_printer stream.
+ *
+ * @p: the &drm printer
+ * @regset: the list of registers to print.
+ *
+ * Often in driver debug, it's useful to be able to either capture the
+ * contents of registers in the steady state using debugfs or at
+ * specific points during operation. This lets the driver have a
+ * single list of registers for both.
+ */
+void drm_print_regset32(struct drm_printer *p, struct debugfs_regset32 *regset)
+{
+ int namelen = 0;
+ int i;
+
+ for (i = 0; i < regset->nregs; i++)
+ namelen = max(namelen, (int)strlen(regset->regs[i].name));
+
+ for (i = 0; i < regset->nregs; i++) {
+ drm_printf(p, "%*s = 0x%08x\n",
+ namelen, regset->regs[i].name,
+ readl(regset->base + regset->regs[i].offset));
+ }
+}
+EXPORT_SYMBOL(drm_print_regset32);
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index e19525af0cce..3d400905100b 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -61,6 +61,7 @@ struct syncobj_wait_entry {
struct task_struct *task;
struct dma_fence *fence;
struct dma_fence_cb fence_cb;
+ u64 point;
};
static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
@@ -95,6 +96,8 @@ EXPORT_SYMBOL(drm_syncobj_find);
static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
struct syncobj_wait_entry *wait)
{
+ struct dma_fence *fence;
+
if (wait->fence)
return;
@@ -103,11 +106,15 @@ static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
* have the lock, try one more time just to be sure we don't add a
* callback when a fence has already been set.
*/
- if (syncobj->fence)
- wait->fence = dma_fence_get(
- rcu_dereference_protected(syncobj->fence, 1));
- else
+ fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
+ if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
+ dma_fence_put(fence);
list_add_tail(&wait->node, &syncobj->cb_list);
+ } else if (!fence) {
+ wait->fence = dma_fence_get_stub();
+ } else {
+ wait->fence = fence;
+ }
spin_unlock(&syncobj->lock);
}
@@ -123,6 +130,44 @@ static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
}
/**
+ * drm_syncobj_add_point - add new timeline point to the syncobj
+ * @syncobj: sync object to add timeline point do
+ * @chain: chain node to use to add the point
+ * @fence: fence to encapsulate in the chain node
+ * @point: sequence number to use for the point
+ *
+ * Add the chain node as new timeline point to the syncobj.
+ */
+void drm_syncobj_add_point(struct drm_syncobj *syncobj,
+ struct dma_fence_chain *chain,
+ struct dma_fence *fence,
+ uint64_t point)
+{
+ struct syncobj_wait_entry *cur, *tmp;
+ struct dma_fence *prev;
+
+ dma_fence_get(fence);
+
+ spin_lock(&syncobj->lock);
+
+ prev = drm_syncobj_fence_get(syncobj);
+ /* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */
+ if (prev && prev->seqno >= point)
+ DRM_ERROR("You are adding an unorder point to timeline!\n");
+ dma_fence_chain_init(chain, prev, fence, point);
+ rcu_assign_pointer(syncobj->fence, &chain->base);
+
+ list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
+ syncobj_wait_syncobj_func(syncobj, cur);
+ spin_unlock(&syncobj->lock);
+
+ /* Walk the chain once to trigger garbage collection */
+ dma_fence_chain_for_each(fence, prev);
+ dma_fence_put(prev);
+}
+EXPORT_SYMBOL(drm_syncobj_add_point);
+
+/**
* drm_syncobj_replace_fence - replace fence in a sync object.
* @syncobj: Sync object to replace fence in
* @fence: fence to install in sync file.
@@ -145,10 +190,8 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
rcu_assign_pointer(syncobj->fence, fence);
if (fence != old_fence) {
- list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
- list_del_init(&cur->node);
+ list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
syncobj_wait_syncobj_func(syncobj, cur);
- }
}
spin_unlock(&syncobj->lock);
@@ -171,6 +214,8 @@ static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
dma_fence_put(fence);
}
+/* 5s default for wait submission */
+#define DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT 5000000000ULL
/**
* drm_syncobj_find_fence - lookup and reference the fence in a sync object
* @file_private: drm file private pointer
@@ -191,16 +236,58 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
struct dma_fence **fence)
{
struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
- int ret = 0;
+ struct syncobj_wait_entry wait;
+ u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT);
+ int ret;
if (!syncobj)
return -ENOENT;
*fence = drm_syncobj_fence_get(syncobj);
- if (!*fence) {
+ drm_syncobj_put(syncobj);
+
+ if (*fence) {
+ ret = dma_fence_chain_find_seqno(fence, point);
+ if (!ret)
+ return 0;
+ dma_fence_put(*fence);
+ } else {
ret = -EINVAL;
}
- drm_syncobj_put(syncobj);
+
+ if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
+ return ret;
+
+ memset(&wait, 0, sizeof(wait));
+ wait.task = current;
+ wait.point = point;
+ drm_syncobj_fence_add_wait(syncobj, &wait);
+
+ do {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (wait.fence) {
+ ret = 0;
+ break;
+ }
+ if (timeout == 0) {
+ ret = -ETIME;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ timeout = schedule_timeout(timeout);
+ } while (1);
+
+ __set_current_state(TASK_RUNNING);
+ *fence = wait.fence;
+
+ if (wait.node.next)
+ drm_syncobj_remove_wait(syncobj, &wait);
+
return ret;
}
EXPORT_SYMBOL(drm_syncobj_find_fence);
@@ -388,20 +475,19 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
int fd, u32 *handle)
{
struct drm_syncobj *syncobj;
- struct file *file;
+ struct fd f = fdget(fd);
int ret;
- file = fget(fd);
- if (!file)
+ if (!f.file)
return -EINVAL;
- if (file->f_op != &drm_syncobj_file_fops) {
- fput(file);
+ if (f.file->f_op != &drm_syncobj_file_fops) {
+ fdput(f);
return -EINVAL;
}
/* take a reference to put in the idr */
- syncobj = file->private_data;
+ syncobj = f.file->private_data;
drm_syncobj_get(syncobj);
idr_preload(GFP_KERNEL);
@@ -416,7 +502,7 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
} else
drm_syncobj_put(syncobj);
- fput(file);
+ fdput(f);
return ret;
}
@@ -593,6 +679,80 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
&args->handle);
}
+static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
+ struct drm_syncobj_transfer *args)
+{
+ struct drm_syncobj *timeline_syncobj = NULL;
+ struct dma_fence *fence;
+ struct dma_fence_chain *chain;
+ int ret;
+
+ timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle);
+ if (!timeline_syncobj) {
+ return -ENOENT;
+ }
+ ret = drm_syncobj_find_fence(file_private, args->src_handle,
+ args->src_point, args->flags,
+ &fence);
+ if (ret)
+ goto err;
+ chain = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
+ if (!chain) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+ drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point);
+err1:
+ dma_fence_put(fence);
+err:
+ drm_syncobj_put(timeline_syncobj);
+
+ return ret;
+}
+
+static int
+drm_syncobj_transfer_to_binary(struct drm_file *file_private,
+ struct drm_syncobj_transfer *args)
+{
+ struct drm_syncobj *binary_syncobj = NULL;
+ struct dma_fence *fence;
+ int ret;
+
+ binary_syncobj = drm_syncobj_find(file_private, args->dst_handle);
+ if (!binary_syncobj)
+ return -ENOENT;
+ ret = drm_syncobj_find_fence(file_private, args->src_handle,
+ args->src_point, args->flags, &fence);
+ if (ret)
+ goto err;
+ drm_syncobj_replace_fence(binary_syncobj, fence);
+ dma_fence_put(fence);
+err:
+ drm_syncobj_put(binary_syncobj);
+
+ return ret;
+}
+int
+drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_transfer *args = data;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
+ return -EOPNOTSUPP;
+
+ if (args->pad)
+ return -EINVAL;
+
+ if (args->dst_point)
+ ret = drm_syncobj_transfer_to_timeline(file_private, args);
+ else
+ ret = drm_syncobj_transfer_to_binary(file_private, args);
+
+ return ret;
+}
+
static void syncobj_wait_fence_func(struct dma_fence *fence,
struct dma_fence_cb *cb)
{
@@ -605,13 +765,27 @@ static void syncobj_wait_fence_func(struct dma_fence *fence,
static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
struct syncobj_wait_entry *wait)
{
+ struct dma_fence *fence;
+
/* This happens inside the syncobj lock */
- wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
- lockdep_is_held(&syncobj->lock)));
+ fence = rcu_dereference_protected(syncobj->fence,
+ lockdep_is_held(&syncobj->lock));
+ dma_fence_get(fence);
+ if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
+ dma_fence_put(fence);
+ return;
+ } else if (!fence) {
+ wait->fence = dma_fence_get_stub();
+ } else {
+ wait->fence = fence;
+ }
+
wake_up_process(wait->task);
+ list_del_init(&wait->node);
}
static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
+ void __user *user_points,
uint32_t count,
uint32_t flags,
signed long timeout,
@@ -619,12 +793,27 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
{
struct syncobj_wait_entry *entries;
struct dma_fence *fence;
+ uint64_t *points;
uint32_t signaled_count, i;
- entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
- if (!entries)
+ points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
+ if (points == NULL)
return -ENOMEM;
+ if (!user_points) {
+ memset(points, 0, count * sizeof(uint64_t));
+
+ } else if (copy_from_user(points, user_points,
+ sizeof(uint64_t) * count)) {
+ timeout = -EFAULT;
+ goto err_free_points;
+ }
+
+ entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
+ if (!entries) {
+ timeout = -ENOMEM;
+ goto err_free_points;
+ }
/* Walk the list of sync objects and initialize entries. We do
* this up-front so that we can properly return -EINVAL if there is
* a syncobj with a missing fence and then never have the chance of
@@ -632,9 +821,13 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
*/
signaled_count = 0;
for (i = 0; i < count; ++i) {
+ struct dma_fence *fence;
+
entries[i].task = current;
- entries[i].fence = drm_syncobj_fence_get(syncobjs[i]);
- if (!entries[i].fence) {
+ entries[i].point = points[i];
+ fence = drm_syncobj_fence_get(syncobjs[i]);
+ if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
+ dma_fence_put(fence);
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
continue;
} else {
@@ -643,7 +836,13 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
}
}
- if (dma_fence_is_signaled(entries[i].fence)) {
+ if (fence)
+ entries[i].fence = fence;
+ else
+ entries[i].fence = dma_fence_get_stub();
+
+ if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
+ dma_fence_is_signaled(entries[i].fence)) {
if (signaled_count == 0 && idx)
*idx = i;
signaled_count++;
@@ -676,7 +875,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
if (!fence)
continue;
- if (dma_fence_is_signaled(fence) ||
+ if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
+ dma_fence_is_signaled(fence) ||
(!entries[i].fence_cb.func &&
dma_fence_add_callback(fence,
&entries[i].fence_cb,
@@ -721,6 +921,9 @@ cleanup_entries:
}
kfree(entries);
+err_free_points:
+ kfree(points);
+
return timeout;
}
@@ -731,7 +934,7 @@ cleanup_entries:
*
* Calculate the timeout in jiffies from an absolute time in sec/nsec.
*/
-static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
+signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
{
ktime_t abs_timeout, now;
u64 timeout_ns, timeout_jiffies64;
@@ -755,23 +958,38 @@ static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
return timeout_jiffies64 + 1;
}
+EXPORT_SYMBOL(drm_timeout_abs_to_jiffies);
static int drm_syncobj_array_wait(struct drm_device *dev,
struct drm_file *file_private,
struct drm_syncobj_wait *wait,
- struct drm_syncobj **syncobjs)
+ struct drm_syncobj_timeline_wait *timeline_wait,
+ struct drm_syncobj **syncobjs, bool timeline)
{
- signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
+ signed long timeout = 0;
uint32_t first = ~0;
- timeout = drm_syncobj_array_wait_timeout(syncobjs,
- wait->count_handles,
- wait->flags,
- timeout, &first);
- if (timeout < 0)
- return timeout;
-
- wait->first_signaled = first;
+ if (!timeline) {
+ timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
+ timeout = drm_syncobj_array_wait_timeout(syncobjs,
+ NULL,
+ wait->count_handles,
+ wait->flags,
+ timeout, &first);
+ if (timeout < 0)
+ return timeout;
+ wait->first_signaled = first;
+ } else {
+ timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec);
+ timeout = drm_syncobj_array_wait_timeout(syncobjs,
+ u64_to_user_ptr(timeline_wait->points),
+ timeline_wait->count_handles,
+ timeline_wait->flags,
+ timeout, &first);
+ if (timeout < 0)
+ return timeout;
+ timeline_wait->first_signaled = first;
+ }
return 0;
}
@@ -857,13 +1075,48 @@ drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
return ret;
ret = drm_syncobj_array_wait(dev, file_private,
- args, syncobjs);
+ args, NULL, syncobjs, false);
+
+ drm_syncobj_array_free(syncobjs, args->count_handles);
+
+ return ret;
+}
+
+int
+drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_timeline_wait *args = data;
+ struct drm_syncobj **syncobjs;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
+ return -EOPNOTSUPP;
+
+ if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
+ return -EINVAL;
+
+ if (args->count_handles == 0)
+ return -EINVAL;
+
+ ret = drm_syncobj_array_find(file_private,
+ u64_to_user_ptr(args->handles),
+ args->count_handles,
+ &syncobjs);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_syncobj_array_wait(dev, file_private,
+ NULL, args, syncobjs, true);
drm_syncobj_array_free(syncobjs, args->count_handles);
return ret;
}
+
int
drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
@@ -929,3 +1182,138 @@ drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
return ret;
}
+
+int
+drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_timeline_array *args = data;
+ struct drm_syncobj **syncobjs;
+ struct dma_fence_chain **chains;
+ uint64_t *points;
+ uint32_t i, j;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
+ return -EOPNOTSUPP;
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ if (args->count_handles == 0)
+ return -EINVAL;
+
+ ret = drm_syncobj_array_find(file_private,
+ u64_to_user_ptr(args->handles),
+ args->count_handles,
+ &syncobjs);
+ if (ret < 0)
+ return ret;
+
+ points = kmalloc_array(args->count_handles, sizeof(*points),
+ GFP_KERNEL);
+ if (!points) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (!u64_to_user_ptr(args->points)) {
+ memset(points, 0, args->count_handles * sizeof(uint64_t));
+ } else if (copy_from_user(points, u64_to_user_ptr(args->points),
+ sizeof(uint64_t) * args->count_handles)) {
+ ret = -EFAULT;
+ goto err_points;
+ }
+
+ chains = kmalloc_array(args->count_handles, sizeof(void *), GFP_KERNEL);
+ if (!chains) {
+ ret = -ENOMEM;
+ goto err_points;
+ }
+ for (i = 0; i < args->count_handles; i++) {
+ chains[i] = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
+ if (!chains[i]) {
+ for (j = 0; j < i; j++)
+ kfree(chains[j]);
+ ret = -ENOMEM;
+ goto err_chains;
+ }
+ }
+
+ for (i = 0; i < args->count_handles; i++) {
+ struct dma_fence *fence = dma_fence_get_stub();
+
+ drm_syncobj_add_point(syncobjs[i], chains[i],
+ fence, points[i]);
+ dma_fence_put(fence);
+ }
+err_chains:
+ kfree(chains);
+err_points:
+ kfree(points);
+out:
+ drm_syncobj_array_free(syncobjs, args->count_handles);
+
+ return ret;
+}
+
+int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_timeline_array *args = data;
+ struct drm_syncobj **syncobjs;
+ uint64_t __user *points = u64_to_user_ptr(args->points);
+ uint32_t i;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
+ return -EOPNOTSUPP;
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ if (args->count_handles == 0)
+ return -EINVAL;
+
+ ret = drm_syncobj_array_find(file_private,
+ u64_to_user_ptr(args->handles),
+ args->count_handles,
+ &syncobjs);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < args->count_handles; i++) {
+ struct dma_fence_chain *chain;
+ struct dma_fence *fence;
+ uint64_t point;
+
+ fence = drm_syncobj_fence_get(syncobjs[i]);
+ chain = to_dma_fence_chain(fence);
+ if (chain) {
+ struct dma_fence *iter, *last_signaled = NULL;
+
+ dma_fence_chain_for_each(iter, fence) {
+ if (!iter)
+ break;
+ dma_fence_put(last_signaled);
+ last_signaled = dma_fence_get(iter);
+ if (!to_dma_fence_chain(last_signaled)->prev_seqno)
+ /* It is most likely that timeline has
+ * unorder points. */
+ break;
+ }
+ point = dma_fence_is_signaled(last_signaled) ?
+ last_signaled->seqno :
+ to_dma_fence_chain(last_signaled)->prev_seqno;
+ dma_fence_put(last_signaled);
+ } else {
+ point = 0;
+ }
+ ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
+ ret = ret ? -EFAULT : 0;
+ if (ret)
+ break;
+ }
+ drm_syncobj_array_free(syncobjs, args->count_handles);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index c3301046dfaa..10cf83d569e1 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -584,8 +584,8 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &drm_vm_ops;
break;
}
- /* fall through to _DRM_FRAME_BUFFER... */
#endif
+ /* fall through - to _DRM_FRAME_BUFFER... */
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
offset = drm_core_get_reg_ofs(dev);
@@ -610,7 +610,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
vma->vm_page_prot = drm_dma_prot(map->type, vma);
- /* fall through to _DRM_SHM */
+ /* fall through - to _DRM_SHM */
case _DRM_SHM:
vma->vm_ops = &drm_vm_shm_ops;
vma->vm_private_data = (void *)map;
@@ -646,6 +646,7 @@ int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
}
EXPORT_SYMBOL(drm_legacy_mmap);
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_vma_flush(struct drm_device *dev)
{
struct drm_vma_entry *vma, *vma_temp;
@@ -656,3 +657,4 @@ void drm_legacy_vma_flush(struct drm_device *dev)
kfree(vma);
}
}
+#endif
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
index c20e6fe00cb3..79ac014701c8 100644
--- a/drivers/gpu/drm/drm_writeback.c
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -239,14 +239,52 @@ fail:
}
EXPORT_SYMBOL(drm_writeback_connector_init);
+int drm_writeback_set_fb(struct drm_connector_state *conn_state,
+ struct drm_framebuffer *fb)
+{
+ WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
+
+ if (!conn_state->writeback_job) {
+ conn_state->writeback_job =
+ kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL);
+ if (!conn_state->writeback_job)
+ return -ENOMEM;
+
+ conn_state->writeback_job->connector =
+ drm_connector_to_writeback(conn_state->connector);
+ }
+
+ drm_framebuffer_assign(&conn_state->writeback_job->fb, fb);
+ return 0;
+}
+
+int drm_writeback_prepare_job(struct drm_writeback_job *job)
+{
+ struct drm_writeback_connector *connector = job->connector;
+ const struct drm_connector_helper_funcs *funcs =
+ connector->base.helper_private;
+ int ret;
+
+ if (funcs->prepare_writeback_job) {
+ ret = funcs->prepare_writeback_job(connector, job);
+ if (ret < 0)
+ return ret;
+ }
+
+ job->prepared = true;
+ return 0;
+}
+EXPORT_SYMBOL(drm_writeback_prepare_job);
+
/**
* drm_writeback_queue_job - Queue a writeback job for later signalling
* @wb_connector: The writeback connector to queue a job on
- * @job: The job to queue
+ * @conn_state: The connector state containing the job to queue
*
- * This function adds a job to the job_queue for a writeback connector. It
- * should be considered to take ownership of the writeback job, and so any other
- * references to the job must be cleared after calling this function.
+ * This function adds the job contained in @conn_state to the job_queue for a
+ * writeback connector. It takes ownership of the writeback job and sets the
+ * @conn_state->writeback_job to NULL, and so no access to the job may be
+ * performed by the caller after this function returns.
*
* Drivers must ensure that for a given writeback connector, jobs are queued in
* exactly the same order as they will be completed by the hardware (and
@@ -258,16 +296,36 @@ EXPORT_SYMBOL(drm_writeback_connector_init);
* See also: drm_writeback_signal_completion()
*/
void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector,
- struct drm_writeback_job *job)
+ struct drm_connector_state *conn_state)
{
+ struct drm_writeback_job *job;
unsigned long flags;
+ job = conn_state->writeback_job;
+ conn_state->writeback_job = NULL;
+
spin_lock_irqsave(&wb_connector->job_lock, flags);
list_add_tail(&job->list_entry, &wb_connector->job_queue);
spin_unlock_irqrestore(&wb_connector->job_lock, flags);
}
EXPORT_SYMBOL(drm_writeback_queue_job);
+void drm_writeback_cleanup_job(struct drm_writeback_job *job)
+{
+ struct drm_writeback_connector *connector = job->connector;
+ const struct drm_connector_helper_funcs *funcs =
+ connector->base.helper_private;
+
+ if (job->prepared && funcs->cleanup_writeback_job)
+ funcs->cleanup_writeback_job(connector, job);
+
+ if (job->fb)
+ drm_framebuffer_put(job->fb);
+
+ kfree(job);
+}
+EXPORT_SYMBOL(drm_writeback_cleanup_job);
+
/*
* @cleanup_work: deferred cleanup of a writeback job
*
@@ -280,10 +338,9 @@ static void cleanup_work(struct work_struct *work)
struct drm_writeback_job *job = container_of(work,
struct drm_writeback_job,
cleanup_work);
- drm_framebuffer_put(job->fb);
- kfree(job);
-}
+ drm_writeback_cleanup_job(job);
+}
/**
* drm_writeback_signal_completion - Signal the completion of a writeback job
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 18c27f795cf6..7eb7cf9c3fa8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -473,7 +473,6 @@ static struct drm_driver etnaviv_drm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
- .gem_prime_res_obj = etnaviv_gem_prime_res_obj,
.gem_prime_pin = etnaviv_gem_prime_pin,
.gem_prime_unpin = etnaviv_gem_prime_unpin,
.gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
@@ -515,6 +514,9 @@ static int etnaviv_bind(struct device *dev)
}
drm->dev_private = priv;
+ dev->dma_parms = &priv->dma_parms;
+ dma_set_max_seg_size(dev, SZ_2G);
+
mutex_init(&priv->gem_lock);
INIT_LIST_HEAD(&priv->gem_list);
priv->num_gpus = 0;
@@ -552,6 +554,8 @@ static void etnaviv_unbind(struct device *dev)
component_unbind_all(dev, drm);
+ dev->dma_parms = NULL;
+
drm->dev_private = NULL;
kfree(priv);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index a6a7ded37ef1..8798423705e1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -42,6 +42,7 @@ struct etnaviv_file_private {
struct etnaviv_drm_private {
int num_gpus;
+ struct device_dma_parameters dma_parms;
struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
/* list of GEM objects: */
@@ -60,7 +61,6 @@ void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int etnaviv_gem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma);
-struct reservation_object *etnaviv_gem_prime_res_obj(struct drm_gem_object *obj);
struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 5c48915f492d..e8778ebb72e6 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -397,13 +397,13 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
}
if (op & ETNA_PREP_NOSYNC) {
- if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
+ if (!reservation_object_test_signaled_rcu(obj->resv,
write))
return -EBUSY;
} else {
unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
- ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
+ ret = reservation_object_wait_timeout_rcu(obj->resv,
write, true, remain);
if (ret <= 0)
return ret == 0 ? -ETIMEDOUT : ret;
@@ -459,7 +459,7 @@ static void etnaviv_gem_describe_fence(struct dma_fence *fence,
static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
- struct reservation_object *robj = etnaviv_obj->resv;
+ struct reservation_object *robj = obj->resv;
struct reservation_object_list *fobj;
struct dma_fence *fence;
unsigned long off = drm_vma_node_start(&obj->vma_node);
@@ -549,8 +549,6 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj)
drm_gem_free_mmap_offset(obj);
etnaviv_obj->ops->release(etnaviv_obj);
- if (etnaviv_obj->resv == &etnaviv_obj->_resv)
- reservation_object_fini(&etnaviv_obj->_resv);
drm_gem_object_release(obj);
kfree(etnaviv_obj);
@@ -596,12 +594,8 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
etnaviv_obj->flags = flags;
etnaviv_obj->ops = ops;
- if (robj) {
- etnaviv_obj->resv = robj;
- } else {
- etnaviv_obj->resv = &etnaviv_obj->_resv;
- reservation_object_init(&etnaviv_obj->_resv);
- }
+ if (robj)
+ etnaviv_obj->base.resv = robj;
mutex_init(&etnaviv_obj->lock);
INIT_LIST_HEAD(&etnaviv_obj->vram_list);
@@ -628,24 +622,18 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
ret = drm_gem_object_init(dev, obj, size);
- if (ret == 0) {
- struct address_space *mapping;
-
- /*
- * Our buffers are kept pinned, so allocating them
- * from the MOVABLE zone is a really bad idea, and
- * conflicts with CMA. See comments above new_inode()
- * why this is required _and_ expected if you're
- * going to pin these pages.
- */
- mapping = obj->filp->f_mapping;
- mapping_set_gfp_mask(mapping, GFP_HIGHUSER |
- __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
- }
-
if (ret)
goto fail;
+ /*
+ * Our buffers are kept pinned, so allocating them from the MOVABLE
+ * zone is a really bad idea, and conflicts with CMA. See comments
+ * above new_inode() why this is required _and_ expected if you're
+ * going to pin these pages.
+ */
+ mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+
etnaviv_gem_obj_add(dev, obj);
ret = drm_gem_handle_create(file, obj, handle);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index f0abb744ef95..753c458497d0 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -47,10 +47,6 @@ struct etnaviv_gem_object {
struct sg_table *sgt;
void *vaddr;
- /* normally (resv == &_resv) except for imported bo's */
- struct reservation_object *resv;
- struct reservation_object _resv;
-
struct list_head vram_list;
/* cache maintenance */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index f21529e635e3..00e8b6a817e3 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -139,10 +139,3 @@ fail:
return ERR_PTR(ret);
}
-
-struct reservation_object *etnaviv_gem_prime_res_obj(struct drm_gem_object *obj)
-{
- struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
-
- return etnaviv_obj->resv;
-}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index b2fe3446bfbc..e054f09ac828 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -108,9 +108,9 @@ out_unlock:
static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
{
if (submit->bos[i].flags & BO_LOCKED) {
- struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
- ww_mutex_unlock(&etnaviv_obj->resv->lock);
+ ww_mutex_unlock(&obj->resv->lock);
submit->bos[i].flags &= ~BO_LOCKED;
}
}
@@ -122,7 +122,7 @@ static int submit_lock_objects(struct etnaviv_gem_submit *submit,
retry:
for (i = 0; i < submit->nr_bos; i++) {
- struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
if (slow_locked == i)
slow_locked = -1;
@@ -130,7 +130,7 @@ retry:
contended = i;
if (!(submit->bos[i].flags & BO_LOCKED)) {
- ret = ww_mutex_lock_interruptible(&etnaviv_obj->resv->lock,
+ ret = ww_mutex_lock_interruptible(&obj->resv->lock,
ticket);
if (ret == -EALREADY)
DRM_ERROR("BO at index %u already on submit list\n",
@@ -153,12 +153,12 @@ fail:
submit_unlock_object(submit, slow_locked);
if (ret == -EDEADLK) {
- struct etnaviv_gem_object *etnaviv_obj;
+ struct drm_gem_object *obj;
- etnaviv_obj = submit->bos[contended].obj;
+ obj = &submit->bos[contended].obj->base;
/* we lost out in a seqno race, lock and retry.. */
- ret = ww_mutex_lock_slow_interruptible(&etnaviv_obj->resv->lock,
+ ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
ticket);
if (!ret) {
submit->bos[contended].flags |= BO_LOCKED;
@@ -176,7 +176,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
- struct reservation_object *robj = bo->obj->resv;
+ struct reservation_object *robj = bo->obj->base.resv;
if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
ret = reservation_object_reserve_shared(robj, 1);
@@ -207,13 +207,13 @@ static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
int i;
for (i = 0; i < submit->nr_bos; i++) {
- struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
- reservation_object_add_excl_fence(etnaviv_obj->resv,
+ reservation_object_add_excl_fence(obj->resv,
submit->out_fence);
else
- reservation_object_add_shared_fence(etnaviv_obj->resv,
+ reservation_object_add_shared_fence(obj->resv,
submit->out_fence);
submit_unlock_object(submit, i);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 6904535475de..72d01e873160 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -365,6 +365,7 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
dev_info(gpu->dev, "model: GC%x, revision: %x\n",
gpu->identity.model, gpu->identity.revision);
+ gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
/*
* If there is a match in the HWDB, we aren't interested in the
* remaining register values, as they might be wrong.
@@ -412,7 +413,7 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
}
/* GC600 idle register reports zero bits where modules aren't present */
- if (gpu->identity.model == chipModel_GC600) {
+ if (gpu->identity.model == chipModel_GC600)
gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
VIVS_HI_IDLE_STATE_RA |
VIVS_HI_IDLE_STATE_SE |
@@ -421,9 +422,6 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
VIVS_HI_IDLE_STATE_PE |
VIVS_HI_IDLE_STATE_DE |
VIVS_HI_IDLE_STATE_FE;
- } else {
- gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
- }
etnaviv_hw_specs(gpu);
}
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 5b4e0e8b23bc..73b318a7ef49 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -188,7 +188,7 @@ static void decon_setup_trigger(struct decon_context *ctx)
if (regmap_update_bits(ctx->sysreg, DSD_CFG_MUX,
DSD_CFG_MUX_TE_UNMASK_GLOBAL, ~0))
- DRM_ERROR("Cannot update sysreg.\n");
+ DRM_DEV_ERROR(ctx->dev, "Cannot update sysreg.\n");
}
static void decon_commit(struct exynos_drm_crtc *crtc)
@@ -356,7 +356,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
break;
}
- DRM_DEBUG_KMS("cpp = %u\n", fb->format->cpp[0]);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "cpp = %u\n", fb->format->cpp[0]);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -561,8 +561,6 @@ static void decon_clear_channels(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int win, i, ret;
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
ret = clk_prepare_enable(ctx->clks[i]);
if (ret < 0)
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 381aa3d60e37..0217ee9a118d 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -99,7 +99,7 @@ static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
if (!wait_event_timeout(ctx->wait_vsync_queue,
!atomic_read(&ctx->wait_vsync_event),
HZ/20))
- DRM_DEBUG_KMS("vblank wait timed out.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev, "vblank wait timed out.\n");
}
static void decon_clear_channels(struct exynos_drm_crtc *crtc)
@@ -107,8 +107,6 @@ static void decon_clear_channels(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
unsigned int win, ch_enabled = 0;
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
/* Check if any channel is enabled. */
for (win = 0; win < WINDOWS_NR; win++) {
u32 val = readl(ctx->regs + WINCON(win));
@@ -315,7 +313,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
break;
}
- DRM_DEBUG_KMS("cpp = %d\n", fb->format->cpp[0]);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "cpp = %d\n", fb->format->cpp[0]);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -422,9 +420,9 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
writel(state->src.x, ctx->regs + VIDW_OFFSET_X(win));
writel(state->src.y, ctx->regs + VIDW_OFFSET_Y(win));
- DRM_DEBUG_KMS("start addr = 0x%lx\n",
+ DRM_DEV_DEBUG_KMS(ctx->dev, "start addr = 0x%lx\n",
(unsigned long)val);
- DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
+ DRM_DEV_DEBUG_KMS(ctx->dev, "ovl_width = %d, ovl_height = %d\n",
state->crtc.w, state->crtc.h);
val = VIDOSDxA_TOPLEFT_X(state->crtc.x) |
@@ -442,7 +440,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
writel(val, ctx->regs + VIDOSD_B(win));
- DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
+ DRM_DEV_DEBUG_KMS(ctx->dev, "osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
state->crtc.x, state->crtc.y, last_x, last_y);
/* OSD alpha */
@@ -622,7 +620,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
ret = decon_ctx_initialize(ctx, drm_dev);
if (ret) {
- DRM_ERROR("decon_ctx_initialize failed.\n");
+ DRM_DEV_ERROR(dev, "decon_ctx_initialize failed.\n");
return ret;
}
@@ -802,25 +800,29 @@ static int exynos7_decon_resume(struct device *dev)
ret = clk_prepare_enable(ctx->pclk);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the pclk [%d]\n", ret);
+ DRM_DEV_ERROR(dev, "Failed to prepare_enable the pclk [%d]\n",
+ ret);
return ret;
}
ret = clk_prepare_enable(ctx->aclk);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the aclk [%d]\n", ret);
+ DRM_DEV_ERROR(dev, "Failed to prepare_enable the aclk [%d]\n",
+ ret);
return ret;
}
ret = clk_prepare_enable(ctx->eclk);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the eclk [%d]\n", ret);
+ DRM_DEV_ERROR(dev, "Failed to prepare_enable the eclk [%d]\n",
+ ret);
return ret;
}
ret = clk_prepare_enable(ctx->vclk);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the vclk [%d]\n", ret);
+ DRM_DEV_ERROR(dev, "Failed to prepare_enable the vclk [%d]\n",
+ ret);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 471242a5e580..b0288cf85701 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -83,7 +83,8 @@ static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data,
mode = drm_mode_create(connector->dev);
if (!mode) {
- DRM_ERROR("failed to create a new display mode.\n");
+ DRM_DEV_ERROR(dp->dev,
+ "failed to create a new display mode.\n");
return num_modes;
}
@@ -111,7 +112,8 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
if (dp->ptn_bridge) {
ret = drm_bridge_attach(&dp->encoder, dp->ptn_bridge, bridge);
if (ret) {
- DRM_ERROR("Failed to attach bridge to drm\n");
+ DRM_DEV_ERROR(dp->dev,
+ "Failed to attach bridge to drm\n");
bridge->next = NULL;
return ret;
}
@@ -147,7 +149,8 @@ static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
ret = of_get_videomode(dp->dev->of_node, &dp->vm, OF_USE_NATIVE_MODE);
if (ret) {
- DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
+ DRM_DEV_ERROR(dp->dev,
+ "failed: of_get_videomode() : %d\n", ret);
return ret;
}
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
index 3432c5ee9f0c..bef8bc3c8e00 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -62,7 +62,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
int ret;
if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
- DRM_ERROR("Device %s lacks support for IOMMU\n",
+ DRM_DEV_ERROR(subdrv_dev, "Device %s lacks support for IOMMU\n",
dev_name(subdrv_dev));
return -EINVAL;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index ae425c9a3f7b..6ea92173db9f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -77,7 +77,8 @@ static int exynos_dpi_get_modes(struct drm_connector *connector)
mode = drm_mode_create(connector->dev);
if (!mode) {
- DRM_ERROR("failed to create a new display mode\n");
+ DRM_DEV_ERROR(ctx->dev,
+ "failed to create a new display mode\n");
return 0;
}
drm_display_mode_from_videomode(ctx->vm, mode);
@@ -108,7 +109,8 @@ static int exynos_dpi_create_connector(struct drm_encoder *encoder)
&exynos_dpi_connector_funcs,
DRM_MODE_CONNECTOR_VGA);
if (ret) {
- DRM_ERROR("failed to initialize connector with drm\n");
+ DRM_DEV_ERROR(ctx->dev,
+ "failed to initialize connector with drm\n");
return ret;
}
@@ -213,7 +215,8 @@ int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder)
ret = exynos_dpi_create_connector(encoder);
if (ret) {
- DRM_ERROR("failed to create connector ret = %d\n", ret);
+ DRM_DEV_ERROR(encoder_to_dpi(encoder)->dev,
+ "failed to create connector ret = %d\n", ret);
drm_encoder_cleanup(encoder);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index a4253dd55f86..63a4b5074a99 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1483,7 +1483,8 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
ret = drm_connector_init(drm, connector, &exynos_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
if (ret) {
- DRM_ERROR("Failed to initialize connector with drm\n");
+ DRM_DEV_ERROR(dsi->dev,
+ "Failed to initialize connector with drm\n");
return ret;
}
@@ -1527,7 +1528,9 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
int ret = exynos_dsi_create_connector(encoder);
if (ret) {
- DRM_ERROR("failed to create connector ret = %d\n", ret);
+ DRM_DEV_ERROR(dsi->dev,
+ "failed to create connector ret = %d\n",
+ ret);
drm_encoder_cleanup(encoder);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 1f11ab0f8e9d..832d22f57b4b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -45,7 +45,8 @@ static int check_fb_gem_memory_type(struct drm_device *drm_dev,
* supported without IOMMU.
*/
if (IS_NONCONTIG_BUFFER(flags)) {
- DRM_ERROR("Non-contiguous GEM memory is not supported.\n");
+ DRM_DEV_ERROR(drm_dev->dev,
+ "Non-contiguous GEM memory is not supported.\n");
return -EINVAL;
}
@@ -83,7 +84,8 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
if (ret < 0) {
- DRM_ERROR("failed to initialize framebuffer\n");
+ DRM_DEV_ERROR(dev->dev,
+ "failed to initialize framebuffer\n");
goto err;
}
@@ -113,7 +115,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
exynos_gem[i] = exynos_drm_gem_get(file_priv,
mode_cmd->handles[i]);
if (!exynos_gem[i]) {
- DRM_ERROR("failed to lookup gem object\n");
+ DRM_DEV_ERROR(dev->dev,
+ "failed to lookup gem object\n");
ret = -ENOENT;
goto err;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index c30dd88cdb25..724cb52a374a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -55,7 +55,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
exynos_gem->dma_addr, exynos_gem->size,
exynos_gem->dma_attrs);
if (ret < 0) {
- DRM_ERROR("failed to mmap.\n");
+ DRM_DEV_ERROR(to_dma_dev(helper->dev), "failed to mmap.\n");
return ret;
}
@@ -83,22 +83,22 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) {
- DRM_ERROR("failed to allocate fb info.\n");
+ DRM_DEV_ERROR(to_dma_dev(helper->dev),
+ "failed to allocate fb info.\n");
return PTR_ERR(fbi);
}
- fbi->par = helper;
fbi->fbops = &exynos_drm_fb_ops;
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(fbi, helper, sizes);
nr_pages = exynos_gem->size >> PAGE_SHIFT;
exynos_gem->kvaddr = (void __iomem *) vmap(exynos_gem->pages, nr_pages,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
if (!exynos_gem->kvaddr) {
- DRM_ERROR("failed to map pages to kernel space.\n");
+ DRM_DEV_ERROR(to_dma_dev(helper->dev),
+ "failed to map pages to kernel space.\n");
return -EIO;
}
@@ -122,9 +122,10 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
unsigned long size;
int ret;
- DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
- sizes->surface_width, sizes->surface_height,
- sizes->surface_bpp);
+ DRM_DEV_DEBUG_KMS(dev->dev,
+ "surface width(%d), height(%d) and bpp(%d\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
@@ -154,7 +155,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
helper->fb =
exynos_drm_framebuffer_init(dev, &mode_cmd, &exynos_gem, 1);
if (IS_ERR(helper->fb)) {
- DRM_ERROR("failed to create drm framebuffer.\n");
+ DRM_DEV_ERROR(dev->dev, "failed to create drm framebuffer.\n");
ret = PTR_ERR(helper->fb);
goto err_destroy_gem;
}
@@ -203,20 +204,23 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
ret = drm_fb_helper_init(dev, helper, MAX_CONNECTOR);
if (ret < 0) {
- DRM_ERROR("failed to initialize drm fb helper.\n");
+ DRM_DEV_ERROR(dev->dev,
+ "failed to initialize drm fb helper.\n");
goto err_init;
}
ret = drm_fb_helper_single_add_all_connectors(helper);
if (ret < 0) {
- DRM_ERROR("failed to register drm_fb_helper_connector.\n");
+ DRM_DEV_ERROR(dev->dev,
+ "failed to register drm_fb_helper_connector.\n");
goto err_setup;
}
ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
if (ret < 0) {
- DRM_ERROR("failed to set up hw configuration.\n");
+ DRM_DEV_ERROR(dev->dev,
+ "failed to set up hw configuration.\n");
goto err_setup;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 90dfea0aec4d..c50b0f9270a4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -186,7 +186,7 @@ static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
{
u32 cfg;
- DRM_DEBUG_KMS("enable[%d]\n", enable);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "enable[%d]\n", enable);
cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
if (enable)
@@ -201,7 +201,7 @@ static void fimc_mask_irq(struct fimc_context *ctx, bool enable)
{
u32 cfg;
- DRM_DEBUG_KMS("enable[%d]\n", enable);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "enable[%d]\n", enable);
cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
if (enable) {
@@ -225,15 +225,16 @@ static bool fimc_check_ovf(struct fimc_context *ctx)
flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
EXYNOS_CISTATUS_OVFICR;
- DRM_DEBUG_KMS("flag[0x%x]\n", flag);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "flag[0x%x]\n", flag);
if (status & flag) {
fimc_set_bits(ctx, EXYNOS_CIWDOFST,
EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
EXYNOS_CIWDOFST_CLROVFICR);
- dev_err(ctx->dev, "occurred overflow at %d, status 0x%x.\n",
- ctx->id, status);
+ DRM_DEV_ERROR(ctx->dev,
+ "occurred overflow at %d, status 0x%x.\n",
+ ctx->id, status);
return true;
}
@@ -246,7 +247,7 @@ static bool fimc_check_frame_end(struct fimc_context *ctx)
cfg = fimc_read(ctx, EXYNOS_CISTATUS);
- DRM_DEBUG_KMS("cfg[0x%x]\n", cfg);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "cfg[0x%x]\n", cfg);
if (!(cfg & EXYNOS_CISTATUS_FRAMEEND))
return false;
@@ -268,17 +269,17 @@ static int fimc_get_buf_id(struct fimc_context *ctx)
if (frame_cnt == 0)
frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg);
- DRM_DEBUG_KMS("present[%d]before[%d]\n",
- EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
- EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
+ DRM_DEV_DEBUG_KMS(ctx->dev, "present[%d]before[%d]\n",
+ EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
+ EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
if (frame_cnt == 0) {
- DRM_ERROR("failed to get frame count.\n");
+ DRM_DEV_ERROR(ctx->dev, "failed to get frame count.\n");
return -EIO;
}
buf_id = frame_cnt - 1;
- DRM_DEBUG_KMS("buf_id[%d]\n", buf_id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "buf_id[%d]\n", buf_id);
return buf_id;
}
@@ -287,7 +288,7 @@ static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
{
u32 cfg;
- DRM_DEBUG_KMS("enable[%d]\n", enable);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "enable[%d]\n", enable);
cfg = fimc_read(ctx, EXYNOS_CIOCTRL);
if (enable)
@@ -302,7 +303,7 @@ static void fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
{
u32 cfg;
- DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
/* RGB */
cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
@@ -367,7 +368,7 @@ static void fimc_src_set_fmt(struct fimc_context *ctx, u32 fmt, bool tiled)
{
u32 cfg;
- DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
cfg = fimc_read(ctx, EXYNOS_MSCTRL);
cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
@@ -420,7 +421,7 @@ static void fimc_src_set_transf(struct fimc_context *ctx, unsigned int rotation)
unsigned int degree = rotation & DRM_MODE_ROTATE_MASK;
u32 cfg1, cfg2;
- DRM_DEBUG_KMS("rotation[%x]\n", rotation);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "rotation[%x]\n", rotation);
cfg1 = fimc_read(ctx, EXYNOS_MSCTRL);
cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
@@ -478,10 +479,11 @@ static void fimc_set_window(struct fimc_context *ctx,
v1 = buf->rect.y;
v2 = buf->buf.height - buf->rect.h - buf->rect.y;
- DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
- buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h,
- real_width, buf->buf.height);
- DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
+ buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h,
+ real_width, buf->buf.height);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1,
+ v2);
/*
* set window offset 1, 2 size
@@ -506,7 +508,8 @@ static void fimc_src_set_size(struct fimc_context *ctx,
unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
u32 cfg;
- DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "hsize[%d]vsize[%d]\n", real_width,
+ buf->buf.height);
/* original size */
cfg = (EXYNOS_ORGISIZE_HORIZONTAL(real_width) |
@@ -514,8 +517,8 @@ static void fimc_src_set_size(struct fimc_context *ctx,
fimc_write(ctx, cfg, EXYNOS_ORGISIZE);
- DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", buf->rect.x, buf->rect.y,
- buf->rect.w, buf->rect.h);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "x[%d]y[%d]w[%d]h[%d]\n", buf->rect.x,
+ buf->rect.y, buf->rect.w, buf->rect.h);
/* set input DMA image size */
cfg = fimc_read(ctx, EXYNOS_CIREAL_ISIZE);
@@ -560,7 +563,7 @@ static void fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
{
u32 cfg;
- DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
/* RGB */
cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
@@ -631,7 +634,7 @@ static void fimc_dst_set_fmt(struct fimc_context *ctx, u32 fmt, bool tiled)
{
u32 cfg;
- DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
cfg = fimc_read(ctx, EXYNOS_CIEXTEN);
@@ -691,7 +694,7 @@ static void fimc_dst_set_transf(struct fimc_context *ctx, unsigned int rotation)
unsigned int degree = rotation & DRM_MODE_ROTATE_MASK;
u32 cfg;
- DRM_DEBUG_KMS("rotation[0x%x]\n", rotation);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "rotation[0x%x]\n", rotation);
cfg = fimc_read(ctx, EXYNOS_CITRGFMT);
cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
@@ -775,19 +778,20 @@ static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
pre_dst_width = src_w >> hfactor;
pre_dst_height = src_h >> vfactor;
- DRM_DEBUG_KMS("pre_dst_width[%d]pre_dst_height[%d]\n",
- pre_dst_width, pre_dst_height);
- DRM_DEBUG_KMS("hfactor[%d]vfactor[%d]\n", hfactor, vfactor);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "pre_dst_width[%d]pre_dst_height[%d]\n",
+ pre_dst_width, pre_dst_height);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "hfactor[%d]vfactor[%d]\n", hfactor,
+ vfactor);
sc->hratio = (src_w << 14) / (dst_w << hfactor);
sc->vratio = (src_h << 14) / (dst_h << vfactor);
sc->up_h = (dst_w >= src_w) ? true : false;
sc->up_v = (dst_h >= src_h) ? true : false;
- DRM_DEBUG_KMS("hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
- sc->hratio, sc->vratio, sc->up_h, sc->up_v);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
+ sc->hratio, sc->vratio, sc->up_h, sc->up_v);
shfactor = FIMC_SHFACTOR - (hfactor + vfactor);
- DRM_DEBUG_KMS("shfactor[%d]\n", shfactor);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "shfactor[%d]\n", shfactor);
cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
EXYNOS_CISCPRERATIO_PREHORRATIO(1 << hfactor) |
@@ -805,10 +809,10 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
{
u32 cfg, cfg_ext;
- DRM_DEBUG_KMS("range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
- sc->range, sc->bypass, sc->up_h, sc->up_v);
- DRM_DEBUG_KMS("hratio[%d]vratio[%d]\n",
- sc->hratio, sc->vratio);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
+ sc->range, sc->bypass, sc->up_h, sc->up_v);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "hratio[%d]vratio[%d]\n",
+ sc->hratio, sc->vratio);
cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
@@ -846,7 +850,8 @@ static void fimc_dst_set_size(struct fimc_context *ctx,
unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
u32 cfg, cfg_ext;
- DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "hsize[%d]vsize[%d]\n", real_width,
+ buf->buf.height);
/* original size */
cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(real_width) |
@@ -854,8 +859,9 @@ static void fimc_dst_set_size(struct fimc_context *ctx,
fimc_write(ctx, cfg, EXYNOS_ORGOSIZE);
- DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", buf->rect.x, buf->rect.y,
- buf->rect.w, buf->rect.h);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "x[%d]y[%d]w[%d]h[%d]\n", buf->rect.x,
+ buf->rect.y,
+ buf->rect.w, buf->rect.h);
/* CSC ITU */
cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
@@ -905,7 +911,7 @@ static void fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
u32 buf_num;
u32 cfg;
- DRM_DEBUG_KMS("buf_id[%d]enqueu[%d]\n", buf_id, enqueue);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "buf_id[%d]enqueu[%d]\n", buf_id, enqueue);
spin_lock_irqsave(&ctx->lock, flags);
@@ -945,7 +951,7 @@ static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
struct fimc_context *ctx = dev_id;
int buf_id;
- DRM_DEBUG_KMS("fimc id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "fimc id[%d]\n", ctx->id);
fimc_clear_irq(ctx);
if (fimc_check_ovf(ctx))
@@ -958,7 +964,7 @@ static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
if (buf_id < 0)
return IRQ_HANDLED;
- DRM_DEBUG_KMS("buf_id[%d]\n", buf_id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "buf_id[%d]\n", buf_id);
if (ctx->task) {
struct exynos_drm_ipp_task *task = ctx->task;
@@ -1128,9 +1134,10 @@ static int fimc_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &ctx->ipp;
ctx->drm_dev = drm_dev;
+ ipp->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev);
- exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
+ exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT,
ctx->formats, ctx->num_formats, "fimc");
@@ -1147,7 +1154,7 @@ static void fimc_unbind(struct device *dev, struct device *master,
struct drm_device *drm_dev = data;
struct exynos_drm_ipp *ipp = &ctx->ipp;
- exynos_drm_ipp_unregister(drm_dev, ipp);
+ exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(drm_dev, dev);
}
@@ -1380,7 +1387,7 @@ static int fimc_runtime_suspend(struct device *dev)
{
struct fimc_context *ctx = get_fimc_context(dev);
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(dev, "id[%d]\n", ctx->id);
clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
return 0;
}
@@ -1389,7 +1396,7 @@ static int fimc_runtime_resume(struct device *dev)
{
struct fimc_context *ctx = get_fimc_context(dev);
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(dev, "id[%d]\n", ctx->id);
return clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
}
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 786a8ee6f10f..8039e1a3671d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -315,7 +315,7 @@ static void fimd_wait_for_vblank(struct exynos_drm_crtc *crtc)
if (!wait_event_timeout(ctx->wait_vsync_queue,
!atomic_read(&ctx->wait_vsync_event),
HZ/20))
- DRM_DEBUG_KMS("vblank wait timed out.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev, "vblank wait timed out.\n");
}
static void fimd_enable_video_output(struct fimd_context *ctx, unsigned int win,
@@ -350,8 +350,6 @@ static void fimd_clear_channels(struct exynos_drm_crtc *crtc)
struct fimd_context *ctx = crtc->ctx;
unsigned int win, ch_enabled = 0;
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
/* Hardware is in unknown state, so ensure it gets enabled properly */
pm_runtime_get_sync(ctx->dev);
@@ -400,7 +398,7 @@ static int fimd_atomic_check(struct exynos_drm_crtc *crtc,
u32 clkdiv;
if (mode->clock == 0) {
- DRM_INFO("Mode has zero clock value.\n");
+ DRM_DEV_ERROR(ctx->dev, "Mode has zero clock value.\n");
return -EINVAL;
}
@@ -416,15 +414,17 @@ static int fimd_atomic_check(struct exynos_drm_crtc *crtc,
lcd_rate = clk_get_rate(ctx->lcd_clk);
if (2 * lcd_rate < ideal_clk) {
- DRM_INFO("sclk_fimd clock too low(%lu) for requested pixel clock(%lu)\n",
- lcd_rate, ideal_clk);
+ DRM_DEV_ERROR(ctx->dev,
+ "sclk_fimd clock too low(%lu) for requested pixel clock(%lu)\n",
+ lcd_rate, ideal_clk);
return -EINVAL;
}
/* Find the clock divider value that gets us closest to ideal_clk */
clkdiv = DIV_ROUND_CLOSEST(lcd_rate, ideal_clk);
if (clkdiv >= 0x200) {
- DRM_INFO("requested pixel clock(%lu) too low\n", ideal_clk);
+ DRM_DEV_ERROR(ctx->dev, "requested pixel clock(%lu) too low\n",
+ ideal_clk);
return -EINVAL;
}
@@ -481,7 +481,8 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
driver_data->lcdblk_offset,
0x3 << driver_data->lcdblk_vt_shift,
0x1 << driver_data->lcdblk_vt_shift)) {
- DRM_ERROR("Failed to update sysreg for I80 i/f.\n");
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to update sysreg for I80 i/f.\n");
return;
}
} else {
@@ -525,7 +526,8 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
driver_data->lcdblk_offset,
0x1 << driver_data->lcdblk_bypass_shift,
0x1 << driver_data->lcdblk_bypass_shift)) {
- DRM_ERROR("Failed to update sysreg for bypass setting.\n");
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to update sysreg for bypass setting.\n");
return;
}
@@ -537,7 +539,8 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
driver_data->lcdblk_offset,
0x1 << driver_data->lcdblk_mic_bypass_shift,
0x1 << driver_data->lcdblk_mic_bypass_shift)) {
- DRM_ERROR("Failed to update sysreg for bypass mic.\n");
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to update sysreg for bypass mic.\n");
return;
}
@@ -814,10 +817,11 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
val = (unsigned long)(dma_addr + size);
writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
- DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
- (unsigned long)dma_addr, val, size);
- DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- state->crtc.w, state->crtc.h);
+ DRM_DEV_DEBUG_KMS(ctx->dev,
+ "start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
+ (unsigned long)dma_addr, val, size);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "ovl_width = %d, ovl_height = %d\n",
+ state->crtc.w, state->crtc.h);
/* buffer size */
buf_offsize = pitch - (state->crtc.w * cpp);
@@ -847,8 +851,9 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
writel(val, ctx->regs + VIDOSD_B(win));
- DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
- state->crtc.x, state->crtc.y, last_x, last_y);
+ DRM_DEV_DEBUG_KMS(ctx->dev,
+ "osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
+ state->crtc.x, state->crtc.y, last_x, last_y);
/* OSD size */
if (win != 3 && win != 4) {
@@ -858,7 +863,8 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
val = state->crtc.w * state->crtc.h;
writel(val, ctx->regs + offset);
- DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "osd size = 0x%x\n",
+ (unsigned int)val);
}
fimd_win_set_pixfmt(ctx, win, fb, state->src.w);
@@ -1252,13 +1258,17 @@ static int exynos_fimd_resume(struct device *dev)
ret = clk_prepare_enable(ctx->bus_clk);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the bus clk [%d]\n", ret);
+ DRM_DEV_ERROR(dev,
+ "Failed to prepare_enable the bus clk [%d]\n",
+ ret);
return ret;
}
ret = clk_prepare_enable(ctx->lcd_clk);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the lcd clk [%d]\n", ret);
+ DRM_DEV_ERROR(dev,
+ "Failed to prepare_enable the lcd clk [%d]\n",
+ ret);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 24c536d6d9cf..c20b3a759370 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -429,7 +429,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
int ret;
if (!size) {
- DRM_ERROR("invalid userptr size.\n");
+ DRM_DEV_ERROR(g2d->dev, "invalid userptr size.\n");
return ERR_PTR(-EINVAL);
}
@@ -482,7 +482,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
g2d_userptr->vec);
if (ret != npages) {
- DRM_ERROR("failed to get user pages from userptr.\n");
+ DRM_DEV_ERROR(g2d->dev,
+ "failed to get user pages from userptr.\n");
if (ret < 0)
goto err_destroy_framevec;
ret = -EFAULT;
@@ -503,7 +504,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
frame_vector_pages(g2d_userptr->vec),
npages, offset, size, GFP_KERNEL);
if (ret < 0) {
- DRM_ERROR("failed to get sgt from pages.\n");
+ DRM_DEV_ERROR(g2d->dev, "failed to get sgt from pages.\n");
goto err_free_sgt;
}
@@ -511,7 +512,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
if (!dma_map_sg(to_dma_dev(g2d->drm_dev), sgt->sgl, sgt->nents,
DMA_BIDIRECTIONAL)) {
- DRM_ERROR("failed to map sgt with dma region.\n");
+ DRM_DEV_ERROR(g2d->dev, "failed to map sgt with dma region.\n");
ret = -ENOMEM;
goto err_sg_free_table;
}
@@ -560,7 +561,7 @@ static void g2d_userptr_free_all(struct g2d_data *g2d, struct drm_file *filp)
g2d->current_pool = 0;
}
-static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
+static enum g2d_reg_type g2d_get_reg_type(struct g2d_data *g2d, int reg_offset)
{
enum g2d_reg_type reg_type;
@@ -593,7 +594,8 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
break;
default:
reg_type = REG_TYPE_NONE;
- DRM_ERROR("Unknown register offset![%d]\n", reg_offset);
+ DRM_DEV_ERROR(g2d->dev, "Unknown register offset![%d]\n",
+ reg_offset);
break;
}
@@ -627,9 +629,10 @@ static unsigned long g2d_get_buf_bpp(unsigned int format)
return bpp;
}
-static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
- enum g2d_reg_type reg_type,
- unsigned long size)
+static bool g2d_check_buf_desc_is_valid(struct g2d_data *g2d,
+ struct g2d_buf_desc *buf_desc,
+ enum g2d_reg_type reg_type,
+ unsigned long size)
{
int width, height;
unsigned long bpp, last_pos;
@@ -644,14 +647,15 @@ static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
/* This check also makes sure that right_x > left_x. */
width = (int)buf_desc->right_x - (int)buf_desc->left_x;
if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) {
- DRM_ERROR("width[%d] is out of range!\n", width);
+ DRM_DEV_ERROR(g2d->dev, "width[%d] is out of range!\n", width);
return false;
}
/* This check also makes sure that bottom_y > top_y. */
height = (int)buf_desc->bottom_y - (int)buf_desc->top_y;
if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) {
- DRM_ERROR("height[%d] is out of range!\n", height);
+ DRM_DEV_ERROR(g2d->dev,
+ "height[%d] is out of range!\n", height);
return false;
}
@@ -670,8 +674,8 @@ static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
*/
if (last_pos >= size) {
- DRM_ERROR("last engine access position [%lu] "
- "is out of range [%lu]!\n", last_pos, size);
+ DRM_DEV_ERROR(g2d->dev, "last engine access position [%lu] "
+ "is out of range [%lu]!\n", last_pos, size);
return false;
}
@@ -701,7 +705,7 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
offset = cmdlist->data[reg_pos];
handle = cmdlist->data[reg_pos + 1];
- reg_type = g2d_get_reg_type(offset);
+ reg_type = g2d_get_reg_type(g2d, offset);
if (reg_type == REG_TYPE_NONE) {
ret = -EFAULT;
goto err;
@@ -718,7 +722,7 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
goto err;
}
- if (!g2d_check_buf_desc_is_valid(buf_desc,
+ if (!g2d_check_buf_desc_is_valid(g2d, buf_desc,
reg_type, exynos_gem->size)) {
exynos_drm_gem_put(exynos_gem);
ret = -EFAULT;
@@ -736,8 +740,9 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
goto err;
}
- if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
- g2d_userptr.size)) {
+ if (!g2d_check_buf_desc_is_valid(g2d, buf_desc,
+ reg_type,
+ g2d_userptr.size)) {
ret = -EFAULT;
goto err;
}
@@ -845,7 +850,7 @@ static void g2d_free_runqueue_node(struct g2d_data *g2d,
*
* Has to be called under runqueue lock.
*/
-static void g2d_remove_runqueue_nodes(struct g2d_data *g2d, struct drm_file* file)
+static void g2d_remove_runqueue_nodes(struct g2d_data *g2d, struct drm_file *file)
{
struct g2d_runqueue_node *node, *n;
@@ -1044,7 +1049,7 @@ static int g2d_check_reg_offset(struct g2d_data *g2d,
if (!for_addr)
goto err;
- reg_type = g2d_get_reg_type(reg_offset);
+ reg_type = g2d_get_reg_type(g2d, reg_offset);
/* check userptr buffer type. */
if ((cmdlist->data[index] & ~0x7fffffff) >> 31) {
@@ -1058,7 +1063,7 @@ static int g2d_check_reg_offset(struct g2d_data *g2d,
if (for_addr)
goto err;
- reg_type = g2d_get_reg_type(reg_offset);
+ reg_type = g2d_get_reg_type(g2d, reg_offset);
buf_desc = &buf_info->descs[reg_type];
buf_desc->stride = cmdlist->data[index + 1];
@@ -1068,7 +1073,7 @@ static int g2d_check_reg_offset(struct g2d_data *g2d,
if (for_addr)
goto err;
- reg_type = g2d_get_reg_type(reg_offset);
+ reg_type = g2d_get_reg_type(g2d, reg_offset);
buf_desc = &buf_info->descs[reg_type];
value = cmdlist->data[index + 1];
@@ -1080,7 +1085,7 @@ static int g2d_check_reg_offset(struct g2d_data *g2d,
if (for_addr)
goto err;
- reg_type = g2d_get_reg_type(reg_offset);
+ reg_type = g2d_get_reg_type(g2d, reg_offset);
buf_desc = &buf_info->descs[reg_type];
value = cmdlist->data[index + 1];
@@ -1093,7 +1098,7 @@ static int g2d_check_reg_offset(struct g2d_data *g2d,
if (for_addr)
goto err;
- reg_type = g2d_get_reg_type(reg_offset);
+ reg_type = g2d_get_reg_type(g2d, reg_offset);
buf_desc = &buf_info->descs[reg_type];
value = cmdlist->data[index + 1];
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index df66c383a877..a55f5ac41bf3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -29,7 +29,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
int ret = -ENOMEM;
if (exynos_gem->dma_addr) {
- DRM_DEBUG_KMS("already allocated.\n");
+ DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n");
return 0;
}
@@ -61,7 +61,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
GFP_KERNEL | __GFP_ZERO);
if (!exynos_gem->pages) {
- DRM_ERROR("failed to allocate pages.\n");
+ DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate pages.\n");
return -ENOMEM;
}
@@ -69,7 +69,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
&exynos_gem->dma_addr, GFP_KERNEL,
exynos_gem->dma_attrs);
if (!exynos_gem->cookie) {
- DRM_ERROR("failed to allocate buffer.\n");
+ DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n");
goto err_free;
}
@@ -77,20 +77,20 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
exynos_gem->dma_addr, exynos_gem->size,
exynos_gem->dma_attrs);
if (ret < 0) {
- DRM_ERROR("failed to get sgtable.\n");
+ DRM_DEV_ERROR(to_dma_dev(dev), "failed to get sgtable.\n");
goto err_dma_free;
}
if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
nr_pages)) {
- DRM_ERROR("invalid sgtable.\n");
+ DRM_DEV_ERROR(to_dma_dev(dev), "invalid sgtable.\n");
ret = -EINVAL;
goto err_sgt_free;
}
sg_free_table(&sgt);
- DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+ DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
return 0;
@@ -111,11 +111,11 @@ static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
struct drm_device *dev = exynos_gem->base.dev;
if (!exynos_gem->dma_addr) {
- DRM_DEBUG_KMS("dma_addr is invalid.\n");
+ DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr is invalid.\n");
return;
}
- DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+ DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
@@ -139,7 +139,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
if (ret)
return ret;
- DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
+ DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "gem handle = 0x%x\n", *handle);
/* drop reference from allocate - handle holds it now. */
drm_gem_object_put_unlocked(obj);
@@ -151,7 +151,8 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
{
struct drm_gem_object *obj = &exynos_gem->base;
- DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
+ DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "handle count = %d\n",
+ obj->handle_count);
/*
* do not release memory region from exporter.
@@ -186,7 +187,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
ret = drm_gem_object_init(dev, obj, size);
if (ret < 0) {
- DRM_ERROR("failed to initialize gem object\n");
+ DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n");
kfree(exynos_gem);
return ERR_PTR(ret);
}
@@ -198,7 +199,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
return ERR_PTR(ret);
}
- DRM_DEBUG_KMS("created file object = %pK\n", obj->filp);
+ DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %pK\n", obj->filp);
return exynos_gem;
}
@@ -211,12 +212,13 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
int ret;
if (flags & ~(EXYNOS_BO_MASK)) {
- DRM_ERROR("invalid GEM buffer flags: %u\n", flags);
+ DRM_DEV_ERROR(dev->dev,
+ "invalid GEM buffer flags: %u\n", flags);
return ERR_PTR(-EINVAL);
}
if (!size) {
- DRM_ERROR("invalid GEM buffer size: %lu\n", size);
+ DRM_DEV_ERROR(dev->dev, "invalid GEM buffer size: %lu\n", size);
return ERR_PTR(-EINVAL);
}
@@ -325,7 +327,7 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(file_priv, args->handle);
if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
+ DRM_DEV_ERROR(dev->dev, "failed to lookup gem object.\n");
return -EINVAL;
}
@@ -408,7 +410,8 @@ static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
int ret;
- DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem->flags);
+ DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
+ exynos_gem->flags);
/* non-cachable as default. */
if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index f048d97fe9e2..0bfb5e9f6e91 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -395,7 +395,7 @@ static int gsc_sw_reset(struct gsc_context *ctx)
}
if (cfg) {
- DRM_ERROR("failed to reset gsc h/w.\n");
+ DRM_DEV_ERROR(ctx->dev, "failed to reset gsc h/w.\n");
return -EBUSY;
}
@@ -422,8 +422,8 @@ static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
{
u32 cfg;
- DRM_DEBUG_KMS("enable[%d]overflow[%d]level[%d]\n",
- enable, overflow, done);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "enable[%d]overflow[%d]level[%d]\n",
+ enable, overflow, done);
cfg = gsc_read(GSC_IRQ);
cfg |= (GSC_IRQ_OR_MASK | GSC_IRQ_FRMDONE_MASK);
@@ -451,7 +451,7 @@ static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt, bool tiled)
{
u32 cfg;
- DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
cfg = gsc_read(GSC_IN_CON);
cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
@@ -638,7 +638,7 @@ static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt, bool tiled)
{
u32 cfg;
- DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
cfg = gsc_read(GSC_OUT_CON);
cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
@@ -706,12 +706,13 @@ static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt, bool tiled)
gsc_write(cfg, GSC_OUT_CON);
}
-static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio)
+static int gsc_get_ratio_shift(struct gsc_context *ctx, u32 src, u32 dst,
+ u32 *ratio)
{
- DRM_DEBUG_KMS("src[%d]dst[%d]\n", src, dst);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "src[%d]dst[%d]\n", src, dst);
if (src >= dst * 8) {
- DRM_ERROR("failed to make ratio and shift.\n");
+ DRM_DEV_ERROR(ctx->dev, "failed to make ratio and shift.\n");
return -EINVAL;
} else if (src >= dst * 4)
*ratio = 4;
@@ -759,31 +760,31 @@ static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
dst_h = dst->h;
}
- ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio);
+ ret = gsc_get_ratio_shift(ctx, src_w, dst_w, &sc->pre_hratio);
if (ret) {
- dev_err(ctx->dev, "failed to get ratio horizontal.\n");
+ DRM_DEV_ERROR(ctx->dev, "failed to get ratio horizontal.\n");
return ret;
}
- ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio);
+ ret = gsc_get_ratio_shift(ctx, src_h, dst_h, &sc->pre_vratio);
if (ret) {
- dev_err(ctx->dev, "failed to get ratio vertical.\n");
+ DRM_DEV_ERROR(ctx->dev, "failed to get ratio vertical.\n");
return ret;
}
- DRM_DEBUG_KMS("pre_hratio[%d]pre_vratio[%d]\n",
- sc->pre_hratio, sc->pre_vratio);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "pre_hratio[%d]pre_vratio[%d]\n",
+ sc->pre_hratio, sc->pre_vratio);
sc->main_hratio = (src_w << 16) / dst_w;
sc->main_vratio = (src_h << 16) / dst_h;
- DRM_DEBUG_KMS("main_hratio[%ld]main_vratio[%ld]\n",
- sc->main_hratio, sc->main_vratio);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "main_hratio[%ld]main_vratio[%ld]\n",
+ sc->main_hratio, sc->main_vratio);
gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
&sc->pre_shfactor);
- DRM_DEBUG_KMS("pre_shfactor[%d]\n", sc->pre_shfactor);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "pre_shfactor[%d]\n", sc->pre_shfactor);
cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) |
GSC_PRESC_H_RATIO(sc->pre_hratio) |
@@ -849,8 +850,8 @@ static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
{
u32 cfg;
- DRM_DEBUG_KMS("main_hratio[%ld]main_vratio[%ld]\n",
- sc->main_hratio, sc->main_vratio);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "main_hratio[%ld]main_vratio[%ld]\n",
+ sc->main_hratio, sc->main_vratio);
gsc_set_h_coef(ctx, sc->main_hratio);
cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
@@ -916,7 +917,7 @@ static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
if (cfg & (mask << i))
buf_num--;
- DRM_DEBUG_KMS("buf_num[%d]\n", buf_num);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "buf_num[%d]\n", buf_num);
return buf_num;
}
@@ -963,7 +964,7 @@ static int gsc_get_src_buf_index(struct gsc_context *ctx)
u32 cfg, curr_index, i;
u32 buf_id = GSC_MAX_SRC;
- DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "gsc id[%d]\n", ctx->id);
cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
curr_index = GSC_IN_CURR_GET_INDEX(cfg);
@@ -975,11 +976,11 @@ static int gsc_get_src_buf_index(struct gsc_context *ctx)
}
}
- DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
- curr_index, buf_id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
+ curr_index, buf_id);
if (buf_id == GSC_MAX_SRC) {
- DRM_ERROR("failed to get in buffer index.\n");
+ DRM_DEV_ERROR(ctx->dev, "failed to get in buffer index.\n");
return -EINVAL;
}
@@ -993,7 +994,7 @@ static int gsc_get_dst_buf_index(struct gsc_context *ctx)
u32 cfg, curr_index, i;
u32 buf_id = GSC_MAX_DST;
- DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "gsc id[%d]\n", ctx->id);
cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
curr_index = GSC_OUT_CURR_GET_INDEX(cfg);
@@ -1006,14 +1007,14 @@ static int gsc_get_dst_buf_index(struct gsc_context *ctx)
}
if (buf_id == GSC_MAX_DST) {
- DRM_ERROR("failed to get out buffer index.\n");
+ DRM_DEV_ERROR(ctx->dev, "failed to get out buffer index.\n");
return -EINVAL;
}
gsc_dst_set_buf_seq(ctx, buf_id, false);
- DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
- curr_index, buf_id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
+ curr_index, buf_id);
return buf_id;
}
@@ -1024,7 +1025,7 @@ static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
u32 status;
int err = 0;
- DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "gsc id[%d]\n", ctx->id);
status = gsc_read(GSC_IRQ);
if (status & GSC_IRQ_STATUS_OR_IRQ) {
@@ -1042,8 +1043,8 @@ static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
src_buf_id = gsc_get_src_buf_index(ctx);
dst_buf_id = gsc_get_dst_buf_index(ctx);
- DRM_DEBUG_KMS("buf_id_src[%d]buf_id_dst[%d]\n", src_buf_id,
- dst_buf_id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "buf_id_src[%d]buf_id_dst[%d]\n",
+ src_buf_id, dst_buf_id);
if (src_buf_id < 0 || dst_buf_id < 0)
err = -EINVAL;
@@ -1169,9 +1170,10 @@ static int gsc_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &ctx->ipp;
ctx->drm_dev = drm_dev;
+ ctx->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev);
- exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
+ exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT,
ctx->formats, ctx->num_formats, "gsc");
@@ -1188,7 +1190,7 @@ static void gsc_unbind(struct device *dev, struct device *master,
struct drm_device *drm_dev = data;
struct exynos_drm_ipp *ipp = &ctx->ipp;
- exynos_drm_ipp_unregister(drm_dev, ipp);
+ exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(drm_dev, dev);
}
@@ -1324,7 +1326,7 @@ static int __maybe_unused gsc_runtime_suspend(struct device *dev)
struct gsc_context *ctx = get_gsc_context(dev);
int i;
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(dev, "id[%d]\n", ctx->id);
for (i = ctx->num_clocks - 1; i >= 0; i--)
clk_disable_unprepare(ctx->clocks[i]);
@@ -1337,7 +1339,7 @@ static int __maybe_unused gsc_runtime_resume(struct device *dev)
struct gsc_context *ctx = get_gsc_context(dev);
int i, ret;
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(dev, "id[%d]\n", ctx->id);
for (i = 0; i < ctx->num_clocks; i++) {
ret = clk_prepare_enable(ctx->clocks[i]);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 23226a0212e8..c862099723a0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -43,7 +43,7 @@ static LIST_HEAD(ipp_list);
* Returns:
* Zero on success, error code on failure.
*/
-int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp,
+int exynos_drm_ipp_register(struct device *dev, struct exynos_drm_ipp *ipp,
const struct exynos_drm_ipp_funcs *funcs, unsigned int caps,
const struct exynos_drm_ipp_formats *formats,
unsigned int num_formats, const char *name)
@@ -67,7 +67,7 @@ int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp,
list_add_tail(&ipp->head, &ipp_list);
ipp->id = num_ipp++;
- DRM_DEBUG_DRIVER("Registered ipp %d\n", ipp->id);
+ DRM_DEV_DEBUG_DRIVER(dev, "Registered ipp %d\n", ipp->id);
return 0;
}
@@ -77,7 +77,7 @@ int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp,
* @dev: DRM device
* @ipp: ipp module
*/
-void exynos_drm_ipp_unregister(struct drm_device *dev,
+void exynos_drm_ipp_unregister(struct device *dev,
struct exynos_drm_ipp *ipp)
{
WARN_ON(ipp->task);
@@ -268,7 +268,7 @@ static inline struct exynos_drm_ipp_task *
task->src.rect.h = task->dst.rect.h = UINT_MAX;
task->transform.rotation = DRM_MODE_ROTATE_0;
- DRM_DEBUG_DRIVER("Allocated task %pK\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Allocated task %pK\n", task);
return task;
}
@@ -335,7 +335,9 @@ static int exynos_drm_ipp_task_set(struct exynos_drm_ipp_task *task,
size -= map[i].size;
}
- DRM_DEBUG_DRIVER("Got task %pK configuration from userspace\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev,
+ "Got task %pK configuration from userspace\n",
+ task);
return 0;
}
@@ -389,12 +391,12 @@ static void exynos_drm_ipp_task_release_buf(struct exynos_drm_ipp_buffer *buf)
static void exynos_drm_ipp_task_free(struct exynos_drm_ipp *ipp,
struct exynos_drm_ipp_task *task)
{
- DRM_DEBUG_DRIVER("Freeing task %pK\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Freeing task %pK\n", task);
exynos_drm_ipp_task_release_buf(&task->src);
exynos_drm_ipp_task_release_buf(&task->dst);
if (task->event)
- drm_event_cancel_free(ipp->dev, &task->event->base);
+ drm_event_cancel_free(ipp->drm_dev, &task->event->base);
kfree(task);
}
@@ -553,8 +555,9 @@ static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task,
buf == src ? DRM_EXYNOS_IPP_FORMAT_SOURCE :
DRM_EXYNOS_IPP_FORMAT_DESTINATION);
if (!fmt) {
- DRM_DEBUG_DRIVER("Task %pK: %s format not supported\n", task,
- buf == src ? "src" : "dst");
+ DRM_DEV_DEBUG_DRIVER(task->dev,
+ "Task %pK: %s format not supported\n",
+ task, buf == src ? "src" : "dst");
return -EINVAL;
}
@@ -603,7 +606,7 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
bool rotate = (rotation != DRM_MODE_ROTATE_0);
bool scale = false;
- DRM_DEBUG_DRIVER("Checking task %pK\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Checking task %pK\n", task);
if (src->rect.w == UINT_MAX)
src->rect.w = src->buf.width;
@@ -618,8 +621,9 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
src->rect.y + src->rect.h > (src->buf.height) ||
dst->rect.x + dst->rect.w > (dst->buf.width) ||
dst->rect.y + dst->rect.h > (dst->buf.height)) {
- DRM_DEBUG_DRIVER("Task %pK: defined area is outside provided buffers\n",
- task);
+ DRM_DEV_DEBUG_DRIVER(task->dev,
+ "Task %pK: defined area is outside provided buffers\n",
+ task);
return -EINVAL;
}
@@ -635,7 +639,8 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
(!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_SCALE) && scale) ||
(!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CONVERT) &&
src->buf.fourcc != dst->buf.fourcc)) {
- DRM_DEBUG_DRIVER("Task %pK: hw capabilities exceeded\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Task %pK: hw capabilities exceeded\n",
+ task);
return -EINVAL;
}
@@ -647,7 +652,8 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
if (ret)
return ret;
- DRM_DEBUG_DRIVER("Task %pK: all checks done.\n", task);
+ DRM_DEV_DEBUG_DRIVER(ipp->dev, "Task %pK: all checks done.\n",
+ task);
return ret;
}
@@ -658,20 +664,26 @@ static int exynos_drm_ipp_task_setup_buffers(struct exynos_drm_ipp_task *task,
struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
int ret = 0;
- DRM_DEBUG_DRIVER("Setting buffer for task %pK\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Setting buffer for task %pK\n",
+ task);
ret = exynos_drm_ipp_task_setup_buffer(src, filp);
if (ret) {
- DRM_DEBUG_DRIVER("Task %pK: src buffer setup failed\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev,
+ "Task %pK: src buffer setup failed\n",
+ task);
return ret;
}
ret = exynos_drm_ipp_task_setup_buffer(dst, filp);
if (ret) {
- DRM_DEBUG_DRIVER("Task %pK: dst buffer setup failed\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev,
+ "Task %pK: dst buffer setup failed\n",
+ task);
return ret;
}
- DRM_DEBUG_DRIVER("Task %pK: buffers prepared.\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Task %pK: buffers prepared.\n",
+ task);
return ret;
}
@@ -691,7 +703,7 @@ static int exynos_drm_ipp_event_create(struct exynos_drm_ipp_task *task,
e->event.base.length = sizeof(e->event);
e->event.user_data = user_data;
- ret = drm_event_reserve_init(task->dev, file_priv, &e->base,
+ ret = drm_event_reserve_init(task->ipp->drm_dev, file_priv, &e->base,
&e->event.base);
if (ret)
goto free;
@@ -712,7 +724,7 @@ static void exynos_drm_ipp_event_send(struct exynos_drm_ipp_task *task)
task->event->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
task->event->event.sequence = atomic_inc_return(&task->ipp->sequence);
- drm_send_event(task->dev, &task->event->base);
+ drm_send_event(task->ipp->drm_dev, &task->event->base);
}
static int exynos_drm_ipp_task_cleanup(struct exynos_drm_ipp_task *task)
@@ -749,7 +761,8 @@ void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret)
struct exynos_drm_ipp *ipp = task->ipp;
unsigned long flags;
- DRM_DEBUG_DRIVER("ipp: %d, task %pK done: %d\n", ipp->id, task, ret);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "ipp: %d, task %pK done: %d\n",
+ ipp->id, task, ret);
spin_lock_irqsave(&ipp->lock, flags);
if (ipp->task == task)
@@ -773,7 +786,8 @@ static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp)
unsigned long flags;
int ret;
- DRM_DEBUG_DRIVER("ipp: %d, try to run new task\n", ipp->id);
+ DRM_DEV_DEBUG_DRIVER(ipp->dev, "ipp: %d, try to run new task\n",
+ ipp->id);
spin_lock_irqsave(&ipp->lock, flags);
@@ -789,7 +803,9 @@ static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp)
spin_unlock_irqrestore(&ipp->lock, flags);
- DRM_DEBUG_DRIVER("ipp: %d, selected task %pK to run\n", ipp->id, task);
+ DRM_DEV_DEBUG_DRIVER(ipp->dev,
+ "ipp: %d, selected task %pK to run\n", ipp->id,
+ task);
ret = ipp->funcs->commit(ipp, task);
if (ret)
@@ -897,15 +913,16 @@ int exynos_drm_ipp_commit_ioctl(struct drm_device *dev, void *data,
* then freed after exynos_drm_ipp_task_done()
*/
if (arg->flags & DRM_EXYNOS_IPP_FLAG_NONBLOCK) {
- DRM_DEBUG_DRIVER("ipp: %d, nonblocking processing task %pK\n",
- ipp->id, task);
+ DRM_DEV_DEBUG_DRIVER(ipp->dev,
+ "ipp: %d, nonblocking processing task %pK\n",
+ ipp->id, task);
task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
exynos_drm_ipp_schedule_task(task->ipp, task);
ret = 0;
} else {
- DRM_DEBUG_DRIVER("ipp: %d, processing task %pK\n", ipp->id,
- task);
+ DRM_DEV_DEBUG_DRIVER(ipp->dev, "ipp: %d, processing task %pK\n",
+ ipp->id, task);
exynos_drm_ipp_schedule_task(ipp, task);
ret = wait_event_interruptible(ipp->done_wq,
task->flags & DRM_EXYNOS_IPP_TASK_DONE);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
index 0b27d4a9bf94..5524c457a947 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -54,7 +54,8 @@ struct exynos_drm_ipp_funcs {
* struct exynos_drm_ipp - central picture processor module structure
*/
struct exynos_drm_ipp {
- struct drm_device *dev;
+ struct drm_device *drm_dev;
+ struct device *dev;
struct list_head head;
unsigned int id;
@@ -85,7 +86,7 @@ struct exynos_drm_ipp_buffer {
* has to be performed by the picture processor hardware module
*/
struct exynos_drm_ipp_task {
- struct drm_device *dev;
+ struct device *dev;
struct exynos_drm_ipp *ipp;
struct list_head head;
@@ -129,11 +130,11 @@ struct exynos_drm_ipp_formats {
#define IPP_SCALE_LIMIT(val...) \
.type = (DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE), val
-int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp,
+int exynos_drm_ipp_register(struct device *dev, struct exynos_drm_ipp *ipp,
const struct exynos_drm_ipp_funcs *funcs, unsigned int caps,
const struct exynos_drm_ipp_formats *formats,
unsigned int num_formats, const char *name);
-void exynos_drm_ipp_unregister(struct drm_device *dev,
+void exynos_drm_ipp_unregister(struct device *dev,
struct exynos_drm_ipp *ipp);
void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index dd02e8a323ef..d1c8411ae7d4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -113,7 +113,8 @@ static void mic_set_path(struct exynos_mic *mic, bool enable)
ret = regmap_read(mic->sysreg, DSD_CFG_MUX, &val);
if (ret) {
- DRM_ERROR("mic: Failed to read system register\n");
+ DRM_DEV_ERROR(mic->dev,
+ "mic: Failed to read system register\n");
return;
}
@@ -129,7 +130,8 @@ static void mic_set_path(struct exynos_mic *mic, bool enable)
ret = regmap_write(mic->sysreg, DSD_CFG_MUX, val);
if (ret)
- DRM_ERROR("mic: Failed to read system register\n");
+ DRM_DEV_ERROR(mic->dev,
+ "mic: Failed to read system register\n");
}
static int mic_sw_reset(struct exynos_mic *mic)
@@ -190,7 +192,7 @@ static void mic_set_output_timing(struct exynos_mic *mic)
struct videomode vm = mic->vm;
u32 reg, bs_size_2d;
- DRM_DEBUG("w: %u, h: %u\n", vm.hactive, vm.vactive);
+ DRM_DEV_DEBUG(mic->dev, "w: %u, h: %u\n", vm.hactive, vm.vactive);
bs_size_2d = ((vm.hactive >> 2) << 1) + (vm.vactive % 4);
reg = MIC_BS_SIZE_2D(bs_size_2d);
writel(reg, mic->reg + MIC_2D_OUTPUT_TIMING_2);
@@ -274,7 +276,7 @@ static void mic_pre_enable(struct drm_bridge *bridge)
ret = mic_sw_reset(mic);
if (ret) {
- DRM_ERROR("Failed to reset\n");
+ DRM_DEV_ERROR(mic->dev, "Failed to reset\n");
goto turn_off;
}
@@ -354,8 +356,8 @@ static int exynos_mic_resume(struct device *dev)
for (i = 0; i < NUM_CLKS; i++) {
ret = clk_prepare_enable(mic->clks[i]);
if (ret < 0) {
- DRM_ERROR("Failed to enable clock (%s)\n",
- clk_names[i]);
+ DRM_DEV_ERROR(dev, "Failed to enable clock (%s)\n",
+ clk_names[i]);
while (--i > -1)
clk_disable_unprepare(mic->clks[i]);
return ret;
@@ -380,7 +382,8 @@ static int exynos_mic_probe(struct platform_device *pdev)
mic = devm_kzalloc(dev, sizeof(*mic), GFP_KERNEL);
if (!mic) {
- DRM_ERROR("mic: Failed to allocate memory for MIC object\n");
+ DRM_DEV_ERROR(dev,
+ "mic: Failed to allocate memory for MIC object\n");
ret = -ENOMEM;
goto err;
}
@@ -389,12 +392,12 @@ static int exynos_mic_probe(struct platform_device *pdev)
ret = of_address_to_resource(dev->of_node, 0, &res);
if (ret) {
- DRM_ERROR("mic: Failed to get mem region for MIC\n");
+ DRM_DEV_ERROR(dev, "mic: Failed to get mem region for MIC\n");
goto err;
}
mic->reg = devm_ioremap(dev, res.start, resource_size(&res));
if (!mic->reg) {
- DRM_ERROR("mic: Failed to remap for MIC\n");
+ DRM_DEV_ERROR(dev, "mic: Failed to remap for MIC\n");
ret = -ENOMEM;
goto err;
}
@@ -402,7 +405,7 @@ static int exynos_mic_probe(struct platform_device *pdev)
mic->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,disp-syscon");
if (IS_ERR(mic->sysreg)) {
- DRM_ERROR("mic: Failed to get system register.\n");
+ DRM_DEV_ERROR(dev, "mic: Failed to get system register.\n");
ret = PTR_ERR(mic->sysreg);
goto err;
}
@@ -410,8 +413,8 @@ static int exynos_mic_probe(struct platform_device *pdev)
for (i = 0; i < NUM_CLKS; i++) {
mic->clks[i] = devm_clk_get(dev, clk_names[i]);
if (IS_ERR(mic->clks[i])) {
- DRM_ERROR("mic: Failed to get clock (%s)\n",
- clk_names[i]);
+ DRM_DEV_ERROR(dev, "mic: Failed to get clock (%s)\n",
+ clk_names[i]);
ret = PTR_ERR(mic->clks[i]);
goto err;
}
@@ -430,7 +433,7 @@ static int exynos_mic_probe(struct platform_device *pdev)
if (ret)
goto err_pm;
- DRM_DEBUG_KMS("MIC has been probed\n");
+ DRM_DEV_DEBUG_KMS(dev, "MIC has been probed\n");
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index df0508e0e49e..e18babb25170 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -119,9 +119,10 @@ static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state)
exynos_state->crtc.w = actual_w;
exynos_state->crtc.h = actual_h;
- DRM_DEBUG_KMS("plane : offset_x/y(%d,%d), width/height(%d,%d)",
- exynos_state->crtc.x, exynos_state->crtc.y,
- exynos_state->crtc.w, exynos_state->crtc.h);
+ DRM_DEV_DEBUG_KMS(crtc->dev->dev,
+ "plane : offset_x/y(%d,%d), width/height(%d,%d)",
+ exynos_state->crtc.x, exynos_state->crtc.y,
+ exynos_state->crtc.w, exynos_state->crtc.h);
}
static void exynos_drm_plane_reset(struct drm_plane *plane)
@@ -181,6 +182,7 @@ exynos_drm_plane_check_format(const struct exynos_drm_plane_config *config,
struct exynos_drm_plane_state *state)
{
struct drm_framebuffer *fb = state->base.fb;
+ struct drm_device *dev = fb->dev;
switch (fb->modifier) {
case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
@@ -192,7 +194,7 @@ exynos_drm_plane_check_format(const struct exynos_drm_plane_config *config,
break;
default:
- DRM_ERROR("unsupported pixel format modifier");
+ DRM_DEV_ERROR(dev->dev, "unsupported pixel format modifier");
return -ENOTSUPP;
}
@@ -203,6 +205,7 @@ static int
exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config,
struct exynos_drm_plane_state *state)
{
+ struct drm_crtc *crtc = state->base.crtc;
bool width_ok = false, height_ok = false;
if (config->capabilities & EXYNOS_DRM_PLANE_CAP_SCALE)
@@ -225,7 +228,7 @@ exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config,
if (width_ok && height_ok)
return 0;
- DRM_DEBUG_KMS("scaling mode is not supported");
+ DRM_DEV_DEBUG_KMS(crtc->dev->dev, "scaling mode is not supported");
return -ENOTSUPP;
}
@@ -310,7 +313,7 @@ int exynos_plane_init(struct drm_device *dev,
config->num_pixel_formats,
NULL, config->type, NULL);
if (err) {
- DRM_ERROR("failed to initialize plane\n");
+ DRM_DEV_ERROR(dev->dev, "failed to initialize plane\n");
return err;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 05abfed6f7f8..b6586fa95ad1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -243,9 +243,10 @@ static int rotator_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &rot->ipp;
rot->drm_dev = drm_dev;
+ ipp->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev);
- exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
+ exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE,
rot->formats, rot->num_formats, "rotator");
@@ -258,10 +259,9 @@ static void rotator_unbind(struct device *dev, struct device *master,
void *data)
{
struct rot_context *rot = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
struct exynos_drm_ipp *ipp = &rot->ipp;
- exynos_drm_ipp_unregister(drm_dev, ipp);
+ exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(rot->drm_dev, rot->dev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index ed1dd1aec902..f1cbdd1e6e3c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -451,9 +451,10 @@ static int scaler_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &scaler->ipp;
scaler->drm_dev = drm_dev;
+ ipp->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev);
- exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
+ exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT,
scaler->scaler_data->formats,
@@ -468,10 +469,9 @@ static void scaler_unbind(struct device *dev, struct device *master,
void *data)
{
struct scaler_context *scaler = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
struct exynos_drm_ipp *ipp = &scaler->ipp;
- exynos_drm_ipp_unregister(drm_dev, ipp);
+ exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 29f4c1932aed..44bcb2d60bb2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -40,8 +40,8 @@
struct vidi_context {
struct drm_encoder encoder;
- struct platform_device *pdev;
struct drm_device *drm_dev;
+ struct device *dev;
struct exynos_drm_crtc *crtc;
struct drm_connector connector;
struct exynos_drm_plane planes[WINDOWS_NR];
@@ -123,7 +123,7 @@ static void vidi_update_plane(struct exynos_drm_crtc *crtc,
return;
addr = exynos_drm_fb_dma_addr(state->fb, 0);
- DRM_DEBUG_KMS("dma_addr = %pad\n", &addr);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "dma_addr = %pad\n", &addr);
}
static void vidi_enable(struct exynos_drm_crtc *crtc)
@@ -205,11 +205,11 @@ static ssize_t vidi_store_connection(struct device *dev,
/* if raw_edid isn't same as fake data then it can't be tested. */
if (ctx->raw_edid != (struct edid *)fake_edid_info) {
- DRM_DEBUG_KMS("edid data is not fake data.\n");
+ DRM_DEV_DEBUG_KMS(dev, "edid data is not fake data.\n");
return -EINVAL;
}
- DRM_DEBUG_KMS("requested connection.\n");
+ DRM_DEV_DEBUG_KMS(dev, "requested connection.\n");
drm_helper_hpd_irq_event(ctx->drm_dev);
@@ -226,17 +226,20 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
struct drm_exynos_vidi_connection *vidi = data;
if (!vidi) {
- DRM_DEBUG_KMS("user data for vidi is null.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev,
+ "user data for vidi is null.\n");
return -EINVAL;
}
if (vidi->connection > 1) {
- DRM_DEBUG_KMS("connection should be 0 or 1.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev,
+ "connection should be 0 or 1.\n");
return -EINVAL;
}
if (ctx->connected == vidi->connection) {
- DRM_DEBUG_KMS("same connection request.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev,
+ "same connection request.\n");
return -EINVAL;
}
@@ -245,12 +248,14 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
raw_edid = (struct edid *)(unsigned long)vidi->edid;
if (!drm_edid_is_valid(raw_edid)) {
- DRM_DEBUG_KMS("edid data is invalid.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev,
+ "edid data is invalid.\n");
return -EINVAL;
}
ctx->raw_edid = drm_edid_duplicate(raw_edid);
if (!ctx->raw_edid) {
- DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev,
+ "failed to allocate raw_edid.\n");
return -ENOMEM;
}
} else {
@@ -308,14 +313,14 @@ static int vidi_get_modes(struct drm_connector *connector)
* to ctx->raw_edid through specific ioctl.
*/
if (!ctx->raw_edid) {
- DRM_DEBUG_KMS("raw_edid is null.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev, "raw_edid is null.\n");
return -EFAULT;
}
edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
if (!edid) {
- DRM_DEBUG_KMS("failed to allocate edid\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev, "failed to allocate edid\n");
return -ENOMEM;
}
@@ -339,7 +344,8 @@ static int vidi_create_connector(struct drm_encoder *encoder)
ret = drm_connector_init(ctx->drm_dev, connector,
&vidi_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
if (ret) {
- DRM_ERROR("Failed to initialize connector with drm\n");
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to initialize connector with drm\n");
return ret;
}
@@ -402,7 +408,7 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
EXYNOS_DISPLAY_TYPE_VIDI, &vidi_crtc_ops, ctx);
if (IS_ERR(ctx->crtc)) {
- DRM_ERROR("failed to create crtc.\n");
+ DRM_DEV_ERROR(dev, "failed to create crtc.\n");
return PTR_ERR(ctx->crtc);
}
@@ -417,7 +423,8 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
ret = vidi_create_connector(encoder);
if (ret) {
- DRM_ERROR("failed to create connector ret = %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to create connector ret = %d\n",
+ ret);
drm_encoder_cleanup(encoder);
return ret;
}
@@ -441,13 +448,14 @@ static const struct component_ops vidi_component_ops = {
static int vidi_probe(struct platform_device *pdev)
{
struct vidi_context *ctx;
+ struct device *dev = &pdev->dev;
int ret;
- ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- ctx->pdev = pdev;
+ ctx->dev = dev;
timer_setup(&ctx->timer, vidi_fake_vblank_timer, 0);
@@ -455,20 +463,21 @@ static int vidi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
- ret = device_create_file(&pdev->dev, &dev_attr_connection);
+ ret = device_create_file(dev, &dev_attr_connection);
if (ret < 0) {
- DRM_ERROR("failed to create connection sysfs.\n");
+ DRM_DEV_ERROR(dev,
+ "failed to create connection sysfs.\n");
return ret;
}
- ret = component_add(&pdev->dev, &vidi_component_ops);
+ ret = component_add(dev, &vidi_component_ops);
if (ret)
goto err_remove_file;
return ret;
err_remove_file:
- device_remove_file(&pdev->dev, &dev_attr_connection);
+ device_remove_file(dev, &dev_attr_connection);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 8e2c02fc66e8..19c252f659dd 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -885,9 +885,9 @@ static int hdmi_get_modes(struct drm_connector *connector)
return -ENODEV;
hdata->dvi_mode = !drm_detect_hdmi_monitor(edid);
- DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
- (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
- edid->width_cm, edid->height_cm);
+ DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
+ (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
+ edid->width_cm, edid->height_cm);
drm_connector_update_edid_property(connector, edid);
cec_notifier_set_phys_addr_from_edid(hdata->notifier, edid);
@@ -908,7 +908,8 @@ static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
if (confs->data[i].pixel_clock == pixel_clock)
return i;
- DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
+ DRM_DEV_DEBUG_KMS(hdata->dev, "Could not find phy config for %d\n",
+ pixel_clock);
return -EINVAL;
}
@@ -918,10 +919,11 @@ static int hdmi_mode_valid(struct drm_connector *connector,
struct hdmi_context *hdata = connector_to_hdmi(connector);
int ret;
- DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
- mode->hdisplay, mode->vdisplay, mode->vrefresh,
- (mode->flags & DRM_MODE_FLAG_INTERLACE) ? true :
- false, mode->clock * 1000);
+ DRM_DEV_DEBUG_KMS(hdata->dev,
+ "xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
+ mode->hdisplay, mode->vdisplay, mode->vrefresh,
+ (mode->flags & DRM_MODE_FLAG_INTERLACE) ? true :
+ false, mode->clock * 1000);
ret = hdmi_find_phy_conf(hdata, mode->clock * 1000);
if (ret < 0)
@@ -947,7 +949,8 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
ret = drm_connector_init(hdata->drm_dev, connector,
&hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
if (ret) {
- DRM_ERROR("Failed to initialize connector with drm\n");
+ DRM_DEV_ERROR(hdata->dev,
+ "Failed to initialize connector with drm\n");
return ret;
}
@@ -957,7 +960,7 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
if (hdata->bridge) {
ret = drm_bridge_attach(encoder, hdata->bridge, NULL);
if (ret)
- DRM_ERROR("Failed to attach bridge\n");
+ DRM_DEV_ERROR(hdata->dev, "Failed to attach bridge\n");
}
return ret;
@@ -1002,8 +1005,10 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder,
DRM_INFO("desired mode doesn't exist so\n");
DRM_INFO("use the most suitable mode among modes.\n");
- DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n",
- m->hdisplay, m->vdisplay, m->vrefresh);
+ DRM_DEV_DEBUG_KMS(dev->dev,
+ "Adjusted Mode: [%d]x[%d] [%d]Hz\n",
+ m->hdisplay, m->vdisplay,
+ m->vrefresh);
drm_mode_copy(adjusted_mode, m);
break;
@@ -1169,13 +1174,15 @@ static void hdmiphy_wait_for_pll(struct hdmi_context *hdata)
u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS);
if (val & HDMI_PHY_STATUS_READY) {
- DRM_DEBUG_KMS("PLL stabilized after %d tries\n", tries);
+ DRM_DEV_DEBUG_KMS(hdata->dev,
+ "PLL stabilized after %d tries\n",
+ tries);
return;
}
usleep_range(10, 20);
}
- DRM_ERROR("PLL could not reach steady state\n");
+ DRM_DEV_ERROR(hdata->dev, "PLL could not reach steady state\n");
}
static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
@@ -1411,7 +1418,7 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
ret = hdmi_find_phy_conf(hdata, m->clock * 1000);
if (ret < 0) {
- DRM_ERROR("failed to find hdmiphy conf\n");
+ DRM_DEV_ERROR(hdata->dev, "failed to find hdmiphy conf\n");
return;
}
phy_conf = hdata->drv_data->phy_confs.data[ret].conf;
@@ -1423,7 +1430,7 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
hdmiphy_enable_mode_set(hdata, true);
ret = hdmiphy_reg_write_buf(hdata, 0, phy_conf, 32);
if (ret) {
- DRM_ERROR("failed to configure hdmiphy\n");
+ DRM_DEV_ERROR(hdata->dev, "failed to configure hdmiphy\n");
return;
}
hdmiphy_enable_mode_set(hdata, false);
@@ -1460,7 +1467,8 @@ static void hdmiphy_enable(struct hdmi_context *hdata)
pm_runtime_get_sync(hdata->dev);
if (regulator_bulk_enable(ARRAY_SIZE(supply), hdata->regul_bulk))
- DRM_DEBUG_KMS("failed to enable regulator bulk\n");
+ DRM_DEV_DEBUG_KMS(hdata->dev,
+ "failed to enable regulator bulk\n");
regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
PMU_HDMI_PHY_ENABLE_BIT, 1);
@@ -1734,7 +1742,7 @@ static int hdmi_bridge_init(struct hdmi_context *hdata)
np = of_graph_get_remote_port_parent(ep);
of_node_put(ep);
if (!np) {
- DRM_ERROR("failed to get remote port parent");
+ DRM_DEV_ERROR(dev, "failed to get remote port parent");
return -EINVAL;
}
@@ -1752,17 +1760,17 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
struct device *dev = hdata->dev;
int i, ret;
- DRM_DEBUG_KMS("HDMI resource init\n");
+ DRM_DEV_DEBUG_KMS(dev, "HDMI resource init\n");
hdata->hpd_gpio = devm_gpiod_get(dev, "hpd", GPIOD_IN);
if (IS_ERR(hdata->hpd_gpio)) {
- DRM_ERROR("cannot get hpd gpio property\n");
+ DRM_DEV_ERROR(dev, "cannot get hpd gpio property\n");
return PTR_ERR(hdata->hpd_gpio);
}
hdata->irq = gpiod_to_irq(hdata->hpd_gpio);
if (hdata->irq < 0) {
- DRM_ERROR("failed to get GPIO irq\n");
+ DRM_DEV_ERROR(dev, "failed to get GPIO irq\n");
return hdata->irq;
}
@@ -1780,7 +1788,7 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), hdata->regul_bulk);
if (ret) {
if (ret != -EPROBE_DEFER)
- DRM_ERROR("failed to get regulators\n");
+ DRM_DEV_ERROR(dev, "failed to get regulators\n");
return ret;
}
@@ -1792,7 +1800,8 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
ret = regulator_enable(hdata->reg_hdmi_en);
if (ret) {
- DRM_ERROR("failed to enable hdmi-en regulator\n");
+ DRM_DEV_ERROR(dev,
+ "failed to enable hdmi-en regulator\n");
return ret;
}
}
@@ -1845,7 +1854,8 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
ret = hdmi_create_connector(encoder);
if (ret) {
- DRM_ERROR("failed to create connector ret = %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to create connector ret = %d\n",
+ ret);
drm_encoder_cleanup(encoder);
return ret;
}
@@ -1875,7 +1885,8 @@ static int hdmi_get_ddc_adapter(struct hdmi_context *hdata)
np = of_parse_phandle(hdata->dev->of_node, "ddc", 0);
if (!np) {
- DRM_ERROR("Failed to find ddc node in device tree\n");
+ DRM_DEV_ERROR(hdata->dev,
+ "Failed to find ddc node in device tree\n");
return -ENODEV;
}
@@ -1902,7 +1913,8 @@ static int hdmi_get_phy_io(struct hdmi_context *hdata)
if (!np) {
np = of_parse_phandle(hdata->dev->of_node, "phy", 0);
if (!np) {
- DRM_ERROR("Failed to find hdmiphy node in device tree\n");
+ DRM_DEV_ERROR(hdata->dev,
+ "Failed to find hdmiphy node in device tree\n");
return -ENODEV;
}
}
@@ -1910,7 +1922,8 @@ static int hdmi_get_phy_io(struct hdmi_context *hdata)
if (hdata->drv_data->is_apb_phy) {
hdata->regs_hdmiphy = of_iomap(np, 0);
if (!hdata->regs_hdmiphy) {
- DRM_ERROR("failed to ioremap hdmi phy\n");
+ DRM_DEV_ERROR(hdata->dev,
+ "failed to ioremap hdmi phy\n");
ret = -ENOMEM;
goto out;
}
@@ -1951,7 +1964,7 @@ static int hdmi_probe(struct platform_device *pdev)
ret = hdmi_resources_init(hdata);
if (ret) {
if (ret != -EPROBE_DEFER)
- DRM_ERROR("hdmi_resources_init failed\n");
+ DRM_DEV_ERROR(dev, "hdmi_resources_init failed\n");
return ret;
}
@@ -1977,14 +1990,14 @@ static int hdmi_probe(struct platform_device *pdev)
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"hdmi", hdata);
if (ret) {
- DRM_ERROR("failed to register hdmi interrupt\n");
+ DRM_DEV_ERROR(dev, "failed to register hdmi interrupt\n");
goto err_hdmiphy;
}
hdata->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,syscon-phandle");
if (IS_ERR(hdata->pmureg)) {
- DRM_ERROR("syscon regmap lookup failed.\n");
+ DRM_DEV_ERROR(dev, "syscon regmap lookup failed.\n");
ret = -EPROBE_DEFER;
goto err_hdmiphy;
}
@@ -1993,7 +2006,7 @@ static int hdmi_probe(struct platform_device *pdev)
hdata->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,sysreg-phandle");
if (IS_ERR(hdata->sysreg)) {
- DRM_ERROR("sysreg regmap lookup failed.\n");
+ DRM_DEV_ERROR(dev, "sysreg regmap lookup failed.\n");
ret = -EPROBE_DEFER;
goto err_hdmiphy;
}
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index f35e4ab55b27..b8415e53964d 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -228,8 +228,8 @@ static void mixer_regs_dump(struct mixer_context *ctx)
{
#define DUMPREG(reg_id) \
do { \
- DRM_DEBUG_KMS(#reg_id " = %08x\n", \
- (u32)readl(ctx->mixer_regs + reg_id)); \
+ DRM_DEV_DEBUG_KMS(ctx->dev, #reg_id " = %08x\n", \
+ (u32)readl(ctx->mixer_regs + reg_id)); \
} while (0)
DUMPREG(MXR_STATUS);
@@ -260,8 +260,8 @@ static void vp_regs_dump(struct mixer_context *ctx)
{
#define DUMPREG(reg_id) \
do { \
- DRM_DEBUG_KMS(#reg_id " = %08x\n", \
- (u32) readl(ctx->vp_regs + reg_id)); \
+ DRM_DEV_DEBUG_KMS(ctx->dev, #reg_id " = %08x\n", \
+ (u32) readl(ctx->vp_regs + reg_id)); \
} while (0)
DUMPREG(VP_ENABLE);
@@ -885,7 +885,8 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
/* acquire resources: regs, irqs, clocks */
ret = mixer_resources_init(mixer_ctx);
if (ret) {
- DRM_ERROR("mixer_resources_init failed ret=%d\n", ret);
+ DRM_DEV_ERROR(mixer_ctx->dev,
+ "mixer_resources_init failed ret=%d\n", ret);
return ret;
}
@@ -893,7 +894,8 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
/* acquire vp resources: regs, irqs, clocks */
ret = vp_resources_init(mixer_ctx);
if (ret) {
- DRM_ERROR("vp_resources_init failed ret=%d\n", ret);
+ DRM_DEV_ERROR(mixer_ctx->dev,
+ "vp_resources_init failed ret=%d\n", ret);
return ret;
}
}
@@ -952,7 +954,7 @@ static void mixer_update_plane(struct exynos_drm_crtc *crtc,
{
struct mixer_context *mixer_ctx = crtc->ctx;
- DRM_DEBUG_KMS("win: %d\n", plane->index);
+ DRM_DEV_DEBUG_KMS(mixer_ctx->dev, "win: %d\n", plane->index);
if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return;
@@ -969,7 +971,7 @@ static void mixer_disable_plane(struct exynos_drm_crtc *crtc,
struct mixer_context *mixer_ctx = crtc->ctx;
unsigned long flags;
- DRM_DEBUG_KMS("win: %d\n", plane->index);
+ DRM_DEV_DEBUG_KMS(mixer_ctx->dev, "win: %d\n", plane->index);
if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return;
@@ -1046,8 +1048,9 @@ static int mixer_mode_valid(struct exynos_drm_crtc *crtc,
struct mixer_context *ctx = crtc->ctx;
u32 w = mode->hdisplay, h = mode->vdisplay;
- DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d\n", w, h,
- mode->vrefresh, !!(mode->flags & DRM_MODE_FLAG_INTERLACE));
+ DRM_DEV_DEBUG_KMS(ctx->dev, "xres=%d, yres=%d, refresh=%d, intl=%d\n",
+ w, h, mode->vrefresh,
+ !!(mode->flags & DRM_MODE_FLAG_INTERLACE));
if (ctx->mxr_ver == MXR_VER_128_0_0_184)
return MODE_OK;
@@ -1227,7 +1230,7 @@ static int mixer_probe(struct platform_device *pdev)
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
- DRM_ERROR("failed to alloc mixer context.\n");
+ DRM_DEV_ERROR(dev, "failed to alloc mixer context.\n");
return -ENOMEM;
}
@@ -1282,27 +1285,33 @@ static int __maybe_unused exynos_mixer_resume(struct device *dev)
ret = clk_prepare_enable(ctx->mixer);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the mixer clk [%d]\n", ret);
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to prepare_enable the mixer clk [%d]\n",
+ ret);
return ret;
}
ret = clk_prepare_enable(ctx->hdmi);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret);
+ DRM_DEV_ERROR(dev,
+ "Failed to prepare_enable the hdmi clk [%d]\n",
+ ret);
return ret;
}
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
ret = clk_prepare_enable(ctx->vp);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the vp clk [%d]\n",
- ret);
+ DRM_DEV_ERROR(dev,
+ "Failed to prepare_enable the vp clk [%d]\n",
+ ret);
return ret;
}
if (test_bit(MXR_BIT_HAS_SCLK, &ctx->flags)) {
ret = clk_prepare_enable(ctx->sclk_mixer);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the " \
+ DRM_DEV_ERROR(dev,
+ "Failed to prepare_enable the " \
"sclk_mixer clk [%d]\n",
- ret);
+ ret);
return ret;
}
}
diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig
index 14a72c4c496d..dc825883400d 100644
--- a/drivers/gpu/drm/fsl-dcu/Kconfig
+++ b/drivers/gpu/drm/fsl-dcu/Kconfig
@@ -2,7 +2,6 @@ config DRM_FSL_DCU
tristate "DRM Support for Freescale DCU"
depends on DRM && OF && ARM && COMMON_CLK
select BACKLIGHT_CLASS_DEVICE
- select BACKLIGHT_LCD_SUPPORT
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_PANEL
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index bf256971063d..83c841b50272 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -94,7 +94,7 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
drm_display_mode_to_videomode(mode, &vm);
/* INV_PXCK as default (most display sample data on rising edge) */
- if (!(con->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE))
+ if (!(con->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE))
pol |= DCU_SYN_POL_INV_PXCK;
if (vm.flags & DISPLAY_FLAGS_HSYNC_LOW)
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index c934b3df1f81..a9d3a4a30ab8 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -389,7 +389,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
ret = PTR_ERR(info);
goto out;
}
- info->par = fbdev;
mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
@@ -402,9 +401,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
fbdev->psb_fb_helper.fb = fb;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- strcpy(info->fix.id, "psbdrmfb");
-
if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
info->fbops = &psbfb_ops;
else if (gtt_roll) { /* GTT rolling seems best */
@@ -427,8 +423,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
}
- drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
- sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, &fbdev->psb_fb_helper, sizes);
info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
index 23dc3c5f8f0d..e8e6357f033b 100644
--- a/drivers/gpu/drm/gma500/framebuffer.h
+++ b/drivers/gpu/drm/gma500/framebuffer.h
@@ -34,7 +34,7 @@ struct psb_framebuffer {
};
struct psb_fbdev {
- struct drm_fb_helper psb_fb_helper;
+ struct drm_fb_helper psb_fb_helper; /* must be first */
struct psb_framebuffer pfb;
};
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index 3c168ae77b0c..0a381c22de26 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -31,7 +31,7 @@ struct hibmc_framebuffer {
};
struct hibmc_fbdev {
- struct drm_fb_helper helper;
+ struct drm_fb_helper helper; /* must be first */
struct hibmc_framebuffer *fb;
int size;
};
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
index de9d7cc97e44..8026859aa07d 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
@@ -116,8 +116,6 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
goto out_release_fbi;
}
- info->par = hi_fbdev;
-
hi_fbdev->fb = hibmc_framebuffer_init(priv->dev, &mode_cmd, gobj);
if (IS_ERR(hi_fbdev->fb)) {
ret = PTR_ERR(hi_fbdev->fb);
@@ -129,14 +127,9 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
priv->fbdev->size = size;
hi_fbdev->helper.fb = &hi_fbdev->fb->fb;
- strcpy(info->fix.id, "hibmcdrmfb");
-
info->fbops = &hibmc_drm_fb_ops;
- drm_fb_helper_fill_fix(info, hi_fbdev->fb->fb.pitches[0],
- hi_fbdev->fb->fb.format->depth);
- drm_fb_helper_fill_var(info, &priv->fbdev->helper, sizes->fb_width,
- sizes->fb_height);
+ drm_fb_helper_fill_info(info, &priv->fbdev->helper, sizes);
info->screen_base = bo->kmap.virtual;
info->screen_size = size;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index d2cf7317930a..8c2f9b9cafb3 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -33,17 +33,10 @@ static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *con
return MODE_OK;
}
-static struct drm_encoder *
-hibmc_connector_best_encoder(struct drm_connector *connector)
-{
- return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
-}
-
static const struct drm_connector_helper_funcs
hibmc_connector_helper_funcs = {
.get_modes = hibmc_connector_get_modes,
.mode_valid = hibmc_connector_mode_valid,
- .best_encoder = hibmc_connector_best_encoder,
};
static const struct drm_connector_funcs hibmc_connector_funcs = {
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index dd383267884c..6093c421daff 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -21,8 +21,6 @@
#include "hibmc_drm_drv.h"
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
static inline struct hibmc_drm_private *
hibmc_bdev(struct ttm_bo_device *bd)
{
@@ -191,7 +189,6 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc)
ret = ttm_bo_device_init(&hibmc->bdev,
&hibmc_bo_driver,
dev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
DRM_ERROR("error initializing bo driver: %d\n", ret);
@@ -322,14 +319,9 @@ int hibmc_bo_unpin(struct hibmc_bo *bo)
int hibmc_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct hibmc_drm_private *hibmc;
-
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
+ struct drm_file *file_priv = filp->private_data;
+ struct hibmc_drm_private *hibmc = file_priv->minor->dev->dev_private;
- file_priv = filp->private_data;
- hibmc = file_priv->minor->dev->dev_private;
return ttm_bo_mmap(filp, vma, &hibmc->bdev);
}
diff --git a/drivers/gpu/drm/i915/.gitignore b/drivers/gpu/drm/i915/.gitignore
new file mode 100644
index 000000000000..cff45d81f42f
--- /dev/null
+++ b/drivers/gpu/drm/i915/.gitignore
@@ -0,0 +1 @@
+header_test_*.c
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 148be8e1a090..3d5f1cb6a76c 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -15,7 +15,6 @@ config DRM_I915
select IRQ_WORK
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
- select BACKLIGHT_LCD_SUPPORT if ACPI
select BACKLIGHT_CLASS_DEVICE if ACPI
select INPUT if ACPI
select ACPI_VIDEO if ACPI
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 1787e1299b1b..fbcb0904f4a8 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -32,10 +32,13 @@ CFLAGS_intel_fbdev.o = $(call cc-disable-warning, override-init)
subdir-ccflags-y += \
$(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
+# Extra header tests
+include $(src)/Makefile.header-test
+
# Please keep these build lists sorted!
# core driver code
-i915-y := i915_drv.o \
+i915-y += i915_drv.o \
i915_irq.o \
i915_memcpy.o \
i915_mm.o \
@@ -46,6 +49,7 @@ i915-y := i915_drv.o \
i915_sw_fence.o \
i915_syncmap.o \
i915_sysfs.o \
+ i915_user_extensions.o \
intel_csr.o \
intel_device_info.o \
intel_pm.o \
@@ -77,6 +81,7 @@ i915-y += \
i915_gem_tiling.o \
i915_gem_userptr.o \
i915_gemfs.o \
+ i915_globals.o \
i915_query.o \
i915_request.o \
i915_scheduler.o \
@@ -84,6 +89,7 @@ i915-y += \
i915_trace_points.o \
i915_vma.o \
intel_breadcrumbs.o \
+ intel_context.o \
intel_engine_cs.o \
intel_hangcheck.o \
intel_lrc.o \
diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test
new file mode 100644
index 000000000000..c1c391816fa7
--- /dev/null
+++ b/drivers/gpu/drm/i915/Makefile.header-test
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: MIT
+# Copyright © 2019 Intel Corporation
+
+# Test the headers are compilable as standalone units
+header_test := \
+ i915_active_types.h \
+ i915_gem_context_types.h \
+ i915_priolist_types.h \
+ i915_scheduler_types.h \
+ i915_timeline_types.h \
+ intel_atomic_plane.h \
+ intel_audio.h \
+ intel_cdclk.h \
+ intel_color.h \
+ intel_connector.h \
+ intel_context_types.h \
+ intel_crt.h \
+ intel_csr.h \
+ intel_ddi.h \
+ intel_dp.h \
+ intel_dvo.h \
+ intel_engine_types.h \
+ intel_fbc.h \
+ intel_fbdev.h \
+ intel_frontbuffer.h \
+ intel_hdcp.h \
+ intel_hdmi.h \
+ intel_lspcon.h \
+ intel_lvds.h \
+ intel_panel.h \
+ intel_pipe_crc.h \
+ intel_pm.h \
+ intel_psr.h \
+ intel_sdvo.h \
+ intel_sprite.h \
+ intel_tv.h \
+ intel_workarounds_types.h
+
+quiet_cmd_header_test = HDRTEST $@
+ cmd_header_test = echo "\#include \"$(<F)\"" > $@
+
+header_test_%.c: %.h
+ $(call cmd,header_test)
+
+i915-$(CONFIG_DRM_I915_WERROR) += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
+
+clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index 271fb46d4dd0..ea8324abc784 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -5,5 +5,5 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \
fb_decoder.o dmabuf.o page_track.o
-ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
+ccflags-y += -I $(srctree)/$(src) -I $(srctree)/$(src)/$(GVT_DIR)/
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 3592d04c33b2..ab002cfd3cab 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -391,12 +391,12 @@ struct cmd_info {
#define F_POST_HANDLE (1<<2)
u32 flag;
-#define R_RCS (1 << RCS)
-#define R_VCS1 (1 << VCS)
-#define R_VCS2 (1 << VCS2)
+#define R_RCS BIT(RCS0)
+#define R_VCS1 BIT(VCS0)
+#define R_VCS2 BIT(VCS1)
#define R_VCS (R_VCS1 | R_VCS2)
-#define R_BCS (1 << BCS)
-#define R_VECS (1 << VECS)
+#define R_BCS BIT(BCS0)
+#define R_VECS BIT(VECS0)
#define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
/* rings that support this cmd: BLT/RCS/VCS/VECS */
u16 rings;
@@ -558,7 +558,7 @@ static const struct decode_info decode_info_vebox = {
};
static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
- [RCS] = {
+ [RCS0] = {
&decode_info_mi,
NULL,
NULL,
@@ -569,7 +569,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
- [VCS] = {
+ [VCS0] = {
&decode_info_mi,
NULL,
NULL,
@@ -580,7 +580,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
- [BCS] = {
+ [BCS0] = {
&decode_info_mi,
NULL,
&decode_info_2d,
@@ -591,7 +591,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
- [VECS] = {
+ [VECS0] = {
&decode_info_mi,
NULL,
NULL,
@@ -602,7 +602,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
- [VCS2] = {
+ [VCS1] = {
&decode_info_mi,
NULL,
NULL,
@@ -631,8 +631,7 @@ static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
struct cmd_entry *e;
hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
- if ((opcode == e->info->opcode) &&
- (e->info->rings & (1 << ring_id)))
+ if (opcode == e->info->opcode && e->info->rings & BIT(ring_id))
return e->info;
}
return NULL;
@@ -943,15 +942,12 @@ static int cmd_handler_lri(struct parser_exec_state *s)
struct intel_gvt *gvt = s->vgpu->gvt;
for (i = 1; i < cmd_len; i += 2) {
- if (IS_BROADWELL(gvt->dev_priv) &&
- (s->ring_id != RCS)) {
- if (s->ring_id == BCS &&
- cmd_reg(s, i) ==
- i915_mmio_reg_offset(DERRMR))
+ if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) {
+ if (s->ring_id == BCS0 &&
+ cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR))
ret |= 0;
else
- ret |= (cmd_reg_inhibit(s, i)) ?
- -EBADRQC : 0;
+ ret |= cmd_reg_inhibit(s, i) ? -EBADRQC : 0;
}
if (ret)
break;
@@ -1047,27 +1043,27 @@ struct cmd_interrupt_event {
};
static struct cmd_interrupt_event cmd_interrupt_events[] = {
- [RCS] = {
+ [RCS0] = {
.pipe_control_notify = RCS_PIPE_CONTROL,
.mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
.mi_user_interrupt = RCS_MI_USER_INTERRUPT,
},
- [BCS] = {
+ [BCS0] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = BCS_MI_FLUSH_DW,
.mi_user_interrupt = BCS_MI_USER_INTERRUPT,
},
- [VCS] = {
+ [VCS0] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VCS_MI_FLUSH_DW,
.mi_user_interrupt = VCS_MI_USER_INTERRUPT,
},
- [VCS2] = {
+ [VCS1] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VCS2_MI_FLUSH_DW,
.mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
},
- [VECS] = {
+ [VECS0] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VECS_MI_FLUSH_DW,
.mi_user_interrupt = VECS_MI_USER_INTERRUPT,
@@ -1081,6 +1077,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
bool index_mode = false;
unsigned int post_sync;
int ret = 0;
+ u32 hws_pga, val;
post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
@@ -1104,6 +1101,15 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
index_mode = true;
ret |= cmd_address_audit(s, gma, sizeof(u64),
index_mode);
+ if (ret)
+ return ret;
+ if (index_mode) {
+ hws_pga = s->vgpu->hws_pga[s->ring_id];
+ gma = hws_pga + gma;
+ patch_value(s, cmd_ptr(s, 2), gma);
+ val = cmd_val(s, 1) & (~(1 << 21));
+ patch_value(s, cmd_ptr(s, 1), val);
+ }
}
}
}
@@ -1321,8 +1327,14 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
info->tile_val << 10);
}
- vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
- intel_vgpu_trigger_virtual_event(vgpu, info->event);
+ if (info->plane == PLANE_PRIMARY)
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(info->pipe))++;
+
+ if (info->async_flip)
+ intel_vgpu_trigger_virtual_event(vgpu, info->event);
+ else
+ set_bit(info->event, vgpu->irq.flip_done_event[info->pipe]);
+
return 0;
}
@@ -1567,6 +1579,7 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
unsigned long gma;
bool index_mode = false;
int ret = 0;
+ u32 hws_pga, val;
/* Check post-sync and ppgtt bit */
if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
@@ -1577,6 +1590,15 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
if (cmd_val(s, 0) & (1 << 21))
index_mode = true;
ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
+ if (ret)
+ return ret;
+ if (index_mode) {
+ hws_pga = s->vgpu->hws_pga[s->ring_id];
+ gma = hws_pga + gma;
+ patch_value(s, cmd_ptr(s, 1), gma);
+ val = cmd_val(s, 0) & (~(1 << 21));
+ patch_value(s, cmd_ptr(s, 0), val);
+ }
}
/* Check notify bit */
if ((cmd_val(s, 0) & (1 << 8)))
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 2ec89bcb59f1..8a9606f91e68 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -196,9 +196,9 @@ DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
{
struct dentry *ent;
- char name[10] = "";
+ char name[16] = "";
- sprintf(name, "vgpu%d", vgpu->id);
+ snprintf(name, 16, "vgpu%d", vgpu->id);
vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root);
if (!vgpu->debugfs)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index e3f9caa7839f..e1c313da6c00 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -407,7 +407,6 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
if (!pipe_is_enabled(vgpu, pipe))
continue;
- vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, event);
}
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 69a9a1b2ea4a..41c8ebc60c63 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -45,6 +45,7 @@ static int vgpu_gem_get_pages(
int i, ret;
gen8_pte_t __iomem *gtt_entries;
struct intel_vgpu_fb_info *fb_info;
+ u32 page_num;
fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
if (WARN_ON(!fb_info))
@@ -54,14 +55,15 @@ static int vgpu_gem_get_pages(
if (unlikely(!st))
return -ENOMEM;
- ret = sg_alloc_table(st, fb_info->size, GFP_KERNEL);
+ page_num = obj->base.size >> PAGE_SHIFT;
+ ret = sg_alloc_table(st, page_num, GFP_KERNEL);
if (ret) {
kfree(st);
return ret;
}
gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
(fb_info->start >> PAGE_SHIFT);
- for_each_sg(st->sgl, sg, fb_info->size, i) {
+ for_each_sg(st->sgl, sg, page_num, i) {
sg->offset = 0;
sg->length = PAGE_SIZE;
sg_dma_address(sg) =
@@ -153,12 +155,12 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
- obj = i915_gem_object_alloc(dev_priv);
+ obj = i915_gem_object_alloc();
if (obj == NULL)
return NULL;
drm_gem_private_object_init(dev, &obj->base,
- info->size << PAGE_SHIFT);
+ roundup(info->size, PAGE_SIZE));
i915_gem_object_init(obj, &intel_vgpu_gem_ops);
obj->read_domains = I915_GEM_DOMAIN_GTT;
@@ -206,11 +208,12 @@ static int vgpu_get_plane_info(struct drm_device *dev,
struct intel_vgpu_fb_info *info,
int plane_id)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_vgpu_primary_plane_format p;
struct intel_vgpu_cursor_plane_format c;
int ret, tile_height = 1;
+ memset(info, 0, sizeof(*info));
+
if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
ret = intel_vgpu_decode_primary_plane(vgpu, &p);
if (ret)
@@ -267,8 +270,7 @@ static int vgpu_get_plane_info(struct drm_device *dev,
return -EINVAL;
}
- info->size = (info->stride * roundup(info->height, tile_height)
- + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ info->size = info->stride * roundup(info->height, tile_height);
if (info->size == 0) {
gvt_vgpu_err("fb size is zero\n");
return -EINVAL;
@@ -278,11 +280,6 @@ static int vgpu_get_plane_info(struct drm_device *dev,
gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
return -EFAULT;
}
- if (((info->start >> PAGE_SHIFT) + info->size) >
- ggtt_total_entries(&dev_priv->ggtt)) {
- gvt_vgpu_err("Invalid GTT offset or size\n");
- return -EFAULT;
- }
if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
gvt_vgpu_err("invalid gma addr\n");
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 70494e394d2c..f21b8fb5b37e 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -47,17 +47,16 @@
((a)->lrca == (b)->lrca))
static int context_switch_events[] = {
- [RCS] = RCS_AS_CONTEXT_SWITCH,
- [BCS] = BCS_AS_CONTEXT_SWITCH,
- [VCS] = VCS_AS_CONTEXT_SWITCH,
- [VCS2] = VCS2_AS_CONTEXT_SWITCH,
- [VECS] = VECS_AS_CONTEXT_SWITCH,
+ [RCS0] = RCS_AS_CONTEXT_SWITCH,
+ [BCS0] = BCS_AS_CONTEXT_SWITCH,
+ [VCS0] = VCS_AS_CONTEXT_SWITCH,
+ [VCS1] = VCS2_AS_CONTEXT_SWITCH,
+ [VECS0] = VECS_AS_CONTEXT_SWITCH,
};
-static int ring_id_to_context_switch_event(int ring_id)
+static int ring_id_to_context_switch_event(unsigned int ring_id)
{
- if (WARN_ON(ring_id < RCS ||
- ring_id >= ARRAY_SIZE(context_switch_events)))
+ if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events)))
return -EINVAL;
return context_switch_events[ring_id];
@@ -411,7 +410,7 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
gvt_dbg_el("complete workload %p status %d\n", workload,
workload->status);
- if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id)))
+ if (workload->status || (vgpu->resetting_eng & BIT(ring_id)))
goto out;
if (!list_empty(workload_q_head(vgpu, ring_id))) {
@@ -527,12 +526,13 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
}
-static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
+static void clean_execlist(struct intel_vgpu *vgpu,
+ intel_engine_mask_t engine_mask)
{
- unsigned int tmp;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
struct intel_vgpu_submission *s = &vgpu->submission;
+ intel_engine_mask_t tmp;
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
kfree(s->ring_scan_buffer[engine->id]);
@@ -542,18 +542,18 @@ static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
}
static void reset_execlist(struct intel_vgpu *vgpu,
- unsigned long engine_mask)
+ intel_engine_mask_t engine_mask)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
init_vgpu_execlist(vgpu, engine->id);
}
static int init_execlist(struct intel_vgpu *vgpu,
- unsigned long engine_mask)
+ intel_engine_mask_t engine_mask)
{
reset_execlist(vgpu, engine_mask);
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 714d709829a2..5ccc2c695848 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -180,6 +180,6 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
- unsigned long engine_mask);
+ intel_engine_mask_t engine_mask);
#endif /*_GVT_EXECLIST_H_*/
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 9814773882ec..08c74e65836b 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -811,7 +811,7 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
/* Allocate shadow page table without guest page. */
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
- struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type)
+ struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
{
struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
struct intel_vgpu_ppgtt_spt *spt = NULL;
@@ -861,7 +861,7 @@ err_free_spt:
/* Allocate shadow page table associated with specific gfn. */
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
- struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type,
+ struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type,
unsigned long gfn, bool guest_pde_ips)
{
struct intel_vgpu_ppgtt_spt *spt;
@@ -936,7 +936,7 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
{
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s;
- intel_gvt_gtt_type_t cur_pt_type;
+ enum intel_gvt_gtt_type cur_pt_type;
GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
@@ -1076,6 +1076,9 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
} else {
int type = get_next_pt_type(we->type);
+ if (!gtt_type_is_pt(type))
+ goto err;
+
spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
if (IS_ERR(spt)) {
ret = PTR_ERR(spt);
@@ -1855,7 +1858,7 @@ static void vgpu_free_mm(struct intel_vgpu_mm *mm)
* Zero on success, negative error code in pointer if failed.
*/
struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
+ enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_vgpu_mm *mm;
@@ -2309,7 +2312,7 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
}
static int alloc_scratch_pages(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t type)
+ enum intel_gvt_gtt_type type)
{
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
@@ -2504,6 +2507,7 @@ static void clean_spt_oos(struct intel_gvt *gvt)
list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
list_del(&oos_page->list);
+ free_page((unsigned long)oos_page->mem);
kfree(oos_page);
}
}
@@ -2524,6 +2528,12 @@ static int setup_spt_oos(struct intel_gvt *gvt)
ret = -ENOMEM;
goto fail;
}
+ oos_page->mem = (void *)__get_free_pages(GFP_KERNEL, 0);
+ if (!oos_page->mem) {
+ ret = -ENOMEM;
+ kfree(oos_page);
+ goto fail;
+ }
INIT_LIST_HEAD(&oos_page->list);
INIT_LIST_HEAD(&oos_page->vm_list);
@@ -2587,7 +2597,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
* Zero on success, negative error code if failed.
*/
struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
+ enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
{
struct intel_vgpu_mm *mm;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index edb610dc5d86..42d0394f0de2 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -95,8 +95,8 @@ struct intel_gvt_gtt {
unsigned long scratch_mfn;
};
-typedef enum {
- GTT_TYPE_INVALID = -1,
+enum intel_gvt_gtt_type {
+ GTT_TYPE_INVALID = 0,
GTT_TYPE_GGTT_PTE,
@@ -124,7 +124,7 @@ typedef enum {
GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_MAX,
-} intel_gvt_gtt_type_t;
+};
enum intel_gvt_mm_type {
INTEL_GVT_MM_GGTT,
@@ -148,7 +148,7 @@ struct intel_vgpu_mm {
union {
struct {
- intel_gvt_gtt_type_t root_entry_type;
+ enum intel_gvt_gtt_type root_entry_type;
/*
* The 4 PDPs in ring context. For 48bit addressing,
* only PDP0 is valid and point to PML4. For 32it
@@ -169,7 +169,7 @@ struct intel_vgpu_mm {
};
struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
+ enum intel_gvt_gtt_type root_entry_type, u64 pdps[]);
static inline void intel_vgpu_mm_get(struct intel_vgpu_mm *mm)
{
@@ -222,7 +222,7 @@ struct intel_vgpu_oos_page {
struct list_head list;
struct list_head vm_list;
int id;
- unsigned char mem[I915_GTT_PAGE_SIZE];
+ void *mem;
};
#define GTT_ENTRY_NUM_IN_ONE_PAGE 512
@@ -233,7 +233,7 @@ struct intel_vgpu_ppgtt_spt {
struct intel_vgpu *vgpu;
struct {
- intel_gvt_gtt_type_t type;
+ enum intel_gvt_gtt_type type;
bool pde_ips; /* for 64KB PTEs */
void *vaddr;
struct page *page;
@@ -241,7 +241,7 @@ struct intel_vgpu_ppgtt_spt {
} shadow_page;
struct {
- intel_gvt_gtt_type_t type;
+ enum intel_gvt_gtt_type type;
bool pde_ips; /* for 64KB PTEs */
unsigned long gfn;
unsigned long write_cnt;
@@ -267,7 +267,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
u64 pdps[]);
struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
+ enum intel_gvt_gtt_type root_entry_type, u64 pdps[]);
int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 8bce09de4b82..f5a328b5290a 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -94,7 +94,6 @@ struct intel_vgpu_fence {
struct intel_vgpu_mmio {
void *vreg;
- void *sreg;
};
#define INTEL_GVT_MAX_BAR_NUM 4
@@ -111,11 +110,9 @@ struct intel_vgpu_cfg_space {
#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
-#define INTEL_GVT_MAX_PIPE 4
-
struct intel_vgpu_irq {
bool irq_warn_once[INTEL_GVT_EVENT_MAX];
- DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
+ DECLARE_BITMAP(flip_done_event[I915_MAX_PIPES],
INTEL_GVT_EVENT_MAX);
};
@@ -144,9 +141,9 @@ enum {
struct intel_vgpu_submission_ops {
const char *name;
- int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask);
- void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask);
- void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
+ int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
+ void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
+ void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
};
struct intel_vgpu_submission {
@@ -449,10 +446,6 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
(*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
#define vgpu_vreg64(vgpu, offset) \
(*(u64 *)(vgpu->mmio.vreg + (offset)))
-#define vgpu_sreg_t(vgpu, reg) \
- (*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg)))
-#define vgpu_sreg(vgpu, offset) \
- (*(u32 *)(vgpu->mmio.sreg + (offset)))
#define for_each_active_vgpu(gvt, vgpu, id) \
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
@@ -488,7 +481,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
- unsigned int engine_mask);
+ intel_engine_mask_t engine_mask);
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index bc64b810e0d5..90673fca792f 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -311,7 +311,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- unsigned int engine_mask = 0;
+ intel_engine_mask_t engine_mask = 0;
u32 data;
write_vreg(vgpu, offset, p_data, bytes);
@@ -323,25 +323,25 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
} else {
if (data & GEN6_GRDOM_RENDER) {
gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
- engine_mask |= (1 << RCS);
+ engine_mask |= BIT(RCS0);
}
if (data & GEN6_GRDOM_MEDIA) {
gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
- engine_mask |= (1 << VCS);
+ engine_mask |= BIT(VCS0);
}
if (data & GEN6_GRDOM_BLT) {
gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
- engine_mask |= (1 << BCS);
+ engine_mask |= BIT(BCS0);
}
if (data & GEN6_GRDOM_VECS) {
gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
- engine_mask |= (1 << VECS);
+ engine_mask |= BIT(VECS0);
}
if (data & GEN8_GRDOM_MEDIA2) {
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
- if (HAS_BSD2(vgpu->gvt->dev_priv))
- engine_mask |= (1 << VCS2);
+ engine_mask |= BIT(VCS1);
}
+ engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask;
}
/* vgpu_lock already hold by emulate mmio r/w */
@@ -750,18 +750,19 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- unsigned int index = DSPSURF_TO_PIPE(offset);
- i915_reg_t surflive_reg = DSPSURFLIVE(index);
- int flip_event[] = {
- [PIPE_A] = PRIMARY_A_FLIP_DONE,
- [PIPE_B] = PRIMARY_B_FLIP_DONE,
- [PIPE_C] = PRIMARY_C_FLIP_DONE,
- };
+ u32 pipe = DSPSURF_TO_PIPE(offset);
+ int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
write_vreg(vgpu, offset, p_data, bytes);
- vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+ vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
+
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
+
+ if (vgpu_vreg_t(vgpu, DSPCNTR(pipe)) & PLANE_CTL_ASYNC_FLIP)
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ else
+ set_bit(event, vgpu->irq.flip_done_event[pipe]);
- set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
return 0;
}
@@ -771,18 +772,42 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- unsigned int index = SPRSURF_TO_PIPE(offset);
- i915_reg_t surflive_reg = SPRSURFLIVE(index);
- int flip_event[] = {
- [PIPE_A] = SPRITE_A_FLIP_DONE,
- [PIPE_B] = SPRITE_B_FLIP_DONE,
- [PIPE_C] = SPRITE_C_FLIP_DONE,
- };
+ u32 pipe = SPRSURF_TO_PIPE(offset);
+ int event = SKL_FLIP_EVENT(pipe, PLANE_SPRITE0);
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
+
+ if (vgpu_vreg_t(vgpu, SPRCTL(pipe)) & PLANE_CTL_ASYNC_FLIP)
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ else
+ set_bit(event, vgpu->irq.flip_done_event[pipe]);
+
+ return 0;
+}
+
+static int reg50080_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data,
+ unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ enum pipe pipe = REG_50080_TO_PIPE(offset);
+ enum plane_id plane = REG_50080_TO_PLANE(offset);
+ int event = SKL_FLIP_EVENT(pipe, plane);
write_vreg(vgpu, offset, p_data, bytes);
- vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+ if (plane == PLANE_PRIMARY) {
+ vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
+ } else {
+ vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
+ }
+
+ if ((vgpu_vreg(vgpu, offset) & REG50080_FLIP_TYPE_MASK) == REG50080_FLIP_TYPE_ASYNC)
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ else
+ set_bit(event, vgpu->irq.flip_done_event[pipe]);
- set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
return 0;
}
@@ -1181,7 +1206,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
{
- intel_gvt_gtt_type_t root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
+ enum intel_gvt_gtt_type root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
struct intel_vgpu_mm *mm;
u64 *pdps;
@@ -1704,7 +1729,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
ret = intel_vgpu_select_submission_ops(vgpu,
- ENGINE_MASK(ring_id),
+ BIT(ring_id),
INTEL_VGPU_EXECLIST_SUBMISSION);
if (ret)
return ret;
@@ -1724,19 +1749,19 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
switch (offset) {
case 0x4260:
- id = RCS;
+ id = RCS0;
break;
case 0x4264:
- id = VCS;
+ id = VCS0;
break;
case 0x4268:
- id = VCS2;
+ id = VCS1;
break;
case 0x426c:
- id = BCS;
+ id = BCS0;
break;
case 0x4270:
- id = VECS;
+ id = VECS0;
break;
default:
return -EINVAL;
@@ -1793,7 +1818,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
- if (HAS_BSD2(dev_priv)) \
+ if (HAS_ENGINE(dev_priv, VCS1)) \
MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
} while (0)
@@ -1848,7 +1873,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
- MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL);
MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
MMIO_D(GEN7_CXT_SIZE, D_ALL);
@@ -1969,6 +1994,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_A), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL);
+ MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(DSPCNTR(PIPE_B), D_ALL);
MMIO_D(DSPADDR(PIPE_B), D_ALL);
@@ -1978,6 +2005,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_B), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL);
+ MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(DSPCNTR(PIPE_C), D_ALL);
MMIO_D(DSPADDR(PIPE_C), D_ALL);
@@ -1987,6 +2016,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_C), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL);
+ MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(SPRCTL(PIPE_A), D_ALL);
MMIO_D(SPRLINOFF(PIPE_A), D_ALL);
@@ -2000,6 +2031,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(SPROFFSET(PIPE_A), D_ALL);
MMIO_D(SPRSCALE(PIPE_A), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL);
+ MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(SPRCTL(PIPE_B), D_ALL);
MMIO_D(SPRLINOFF(PIPE_B), D_ALL);
@@ -2013,6 +2046,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(SPROFFSET(PIPE_B), D_ALL);
MMIO_D(SPRSCALE(PIPE_B), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL);
+ MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(SPRCTL(PIPE_C), D_ALL);
MMIO_D(SPRLINOFF(PIPE_C), D_ALL);
@@ -2026,6 +2061,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(SPROFFSET(PIPE_C), D_ALL);
MMIO_D(SPRSCALE(PIPE_C), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
+ MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
@@ -2827,26 +2864,26 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
- MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
+ MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DH(_MMIO(0x4ddc), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_MMIO(0x42080), D_SKL_PLUS, NULL, NULL);
- MMIO_D(_MMIO(0x45504), D_SKL_PLUS);
- MMIO_D(_MMIO(0x45520), D_SKL_PLUS);
- MMIO_D(_MMIO(0x46000), D_SKL_PLUS);
- MMIO_DH(_MMIO(0x46010), D_SKL_PLUS, NULL, skl_lcpll_write);
- MMIO_DH(_MMIO(0x46014), D_SKL_PLUS, NULL, skl_lcpll_write);
- MMIO_D(_MMIO(0x6C040), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C048), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C050), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C044), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C04C), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C054), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6c058), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6c05c), D_SKL_PLUS);
- MMIO_DH(_MMIO(0x6c060), D_SKL_PLUS, dpll_status_read, NULL);
+ MMIO_DH(MMCD_MISC_CTRL, D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL);
+ MMIO_D(DC_STATE_EN, D_SKL_PLUS);
+ MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS);
+ MMIO_D(CDCLK_CTL, D_SKL_PLUS);
+ MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
+ MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
+ MMIO_D(_MMIO(_DPLL1_CFGCR1), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL2_CFGCR1), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL3_CFGCR1), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL1_CFGCR2), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL2_CFGCR2), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL3_CFGCR2), D_SKL_PLUS);
+ MMIO_D(DPLL_CTRL1, D_SKL_PLUS);
+ MMIO_D(DPLL_CTRL2, D_SKL_PLUS);
+ MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
@@ -2965,40 +3002,41 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_D(_MMIO(0x70380), D_SKL_PLUS);
- MMIO_D(_MMIO(0x71380), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_CTL_3_A), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_CTL_3_B), D_SKL_PLUS);
MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
- MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
- MMIO_D(_MMIO(0x8f074), D_SKL_PLUS);
- MMIO_D(_MMIO(0x8f004), D_SKL_PLUS);
- MMIO_D(_MMIO(0x8f034), D_SKL_PLUS);
+ MMIO_D(CSR_SSP_BASE, D_SKL_PLUS);
+ MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
+ MMIO_D(CSR_LAST_WRITE, D_SKL_PLUS);
- MMIO_D(_MMIO(0xb11c), D_SKL_PLUS);
+ MMIO_D(BDW_SCRATCH1, D_SKL_PLUS);
- MMIO_D(_MMIO(0x51000), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS);
+ MMIO_D(SKL_DFSM, D_SKL_PLUS);
+ MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS);
- MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
+ MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
NULL, NULL);
- MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
+ MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
NULL, NULL);
MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
MMIO_D(RC6_LOCATION, D_SKL_PLUS);
- MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+ MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS, F_MODE_MASK,
+ NULL, NULL);
+ MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
/* TRTT */
- MMIO_DFH(_MMIO(0x4de0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4de4), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4de8), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4dec), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4df0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4df4), D_SKL_PLUS, F_CMD_ACCESS,
+ MMIO_DFH(TRVATTL3PTRDW(0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRVATTL3PTRDW(1), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS,
NULL, gen9_trtte_write);
MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
@@ -3011,7 +3049,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
- MMIO_D(_MMIO(0x1082c0), D_SKL_PLUS);
+ MMIO_D(GEN6_STOLEN_RESERVED, D_SKL_PLUS);
MMIO_D(_MMIO(0x4068), D_SKL_PLUS);
MMIO_D(_MMIO(0x67054), D_SKL_PLUS);
MMIO_D(_MMIO(0x6e560), D_SKL_PLUS);
@@ -3042,8 +3080,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_D(_MMIO(0x4ab8), D_KBL | D_CFL);
- MMIO_D(_MMIO(0x2248), D_SKL_PLUS);
+ MMIO_D(GAMT_CHKN_BIT_REG, D_KBL);
+ MMIO_D(GEN9_CTX_PREEMPT_REG, D_KBL | D_SKL);
return 0;
}
@@ -3489,12 +3527,11 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
return mmio_info->read(vgpu, offset, pdata, bytes);
else {
u64 ro_mask = mmio_info->ro_mask;
- u32 old_vreg = 0, old_sreg = 0;
+ u32 old_vreg = 0;
u64 data = 0;
if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
old_vreg = vgpu_vreg(vgpu, offset);
- old_sreg = vgpu_sreg(vgpu, offset);
}
if (likely(!ro_mask))
@@ -3516,8 +3553,6 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
| (vgpu_vreg(vgpu, offset) & mask);
- vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
- | (vgpu_sreg(vgpu, offset) & mask);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 67125c5eec6e..951681813230 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -536,7 +536,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
- if (HAS_BSD2(gvt->dev_priv)) {
+ if (HAS_ENGINE(gvt->dev_priv, VCS1)) {
SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index ed4df2f6d60b..a55178884d67 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -239,7 +239,6 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
if (dmlr) {
memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
- memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);
vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
@@ -280,7 +279,6 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
* touched
*/
memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
- memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
}
}
@@ -296,12 +294,10 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
{
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
- vgpu->mmio.vreg = vzalloc(array_size(info->mmio_size, 2));
+ vgpu->mmio.vreg = vzalloc(info->mmio_size);
if (!vgpu->mmio.vreg)
return -ENOMEM;
- vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
-
intel_vgpu_reset_mmio(vgpu, true);
return 0;
@@ -315,5 +311,5 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
{
vfree(vgpu->mmio.vreg);
- vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
+ vgpu->mmio.vreg = NULL;
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 7902fb162d09..edf6d646eb25 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -41,103 +41,103 @@
/* Raw offset is appened to each line for convenience. */
static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
- {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
- {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
- {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
- {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
- {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
- {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
- {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
- {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
- {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
- {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
-
- {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
- {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
- {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
- {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
- {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
- {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
+ {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
+ {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */
+ {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
+ {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
+ {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
+ {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
+ {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
+ {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
+ {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
+
+ {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
+ {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
+ {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
+ {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
+ {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
+ {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */
};
static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
- {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
- {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
- {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
- {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
- {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
- {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
- {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
- {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
- {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
- {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
-
- {RCS, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
- {RCS, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
- {RCS, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
- {RCS, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
- {RCS, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
- {RCS, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
- {RCS, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
- {RCS, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
- {RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
- {RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
- {RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
- {RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
- {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
- {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
- {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */
- {RCS, TRINVTILEDETCT, 0, false}, /* 0x4dec */
- {RCS, TRVADR, 0, false}, /* 0x4df0 */
- {RCS, TRTTE, 0, false}, /* 0x4df4 */
-
- {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
- {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
- {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
- {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
- {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
-
- {VCS2, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
-
- {VECS, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
-
- {RCS, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
- {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
- {RCS, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
- {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
-
- {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
- {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
- {RCS, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
-
- {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
- {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
- {RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
- {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
+ {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
+ {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */
+ {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
+ {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
+ {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
+ {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
+ {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
+ {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
+ {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
+
+ {RCS0, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
+ {RCS0, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
+ {RCS0, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
+ {RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
+ {RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
+ {RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
+ {RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
+ {RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
+ {RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
+ {RCS0, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
+ {RCS0, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
+ {RCS0, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
+ {RCS0, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
+ {RCS0, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
+ {RCS0, TRNULLDETCT, 0, false}, /* 0x4de8 */
+ {RCS0, TRINVTILEDETCT, 0, false}, /* 0x4dec */
+ {RCS0, TRVADR, 0, false}, /* 0x4df0 */
+ {RCS0, TRTTE, 0, false}, /* 0x4df4 */
+
+ {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
+ {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
+ {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
+ {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
+ {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
+
+ {VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
+
+ {VECS0, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
+
+ {RCS0, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
+ {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS0, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
+ {RCS0, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
+
+ {RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
+ {RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
+ {RCS0, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
+
+ {RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
+ {RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
+ {RCS0, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
+ {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */
};
static struct {
@@ -150,11 +150,11 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
{
i915_reg_t offset;
u32 regs[] = {
- [RCS] = 0xc800,
- [VCS] = 0xc900,
- [VCS2] = 0xca00,
- [BCS] = 0xcc00,
- [VECS] = 0xcb00,
+ [RCS0] = 0xc800,
+ [VCS0] = 0xc900,
+ [VCS1] = 0xca00,
+ [BCS0] = 0xcc00,
+ [VECS0] = 0xcb00,
};
int ring_id, i;
@@ -302,7 +302,7 @@ int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
goto out;
/* no MOCS register in context except render engine */
- if (req->engine->id != RCS)
+ if (req->engine->id != RCS0)
goto out;
ret = restore_render_mocs_control_for_inhibit(vgpu, req);
@@ -328,15 +328,16 @@ out:
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_uncore *uncore = &dev_priv->uncore;
struct intel_vgpu_submission *s = &vgpu->submission;
enum forcewake_domains fw;
i915_reg_t reg;
u32 regs[] = {
- [RCS] = 0x4260,
- [VCS] = 0x4264,
- [VCS2] = 0x4268,
- [BCS] = 0x426c,
- [VECS] = 0x4270,
+ [RCS0] = 0x4260,
+ [VCS0] = 0x4264,
+ [VCS1] = 0x4268,
+ [BCS0] = 0x426c,
+ [VECS0] = 0x4270,
};
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
@@ -352,21 +353,21 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
* otherwise device can go to RC6 state and interrupt invalidation
* process
*/
- fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
+ fw = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ | FW_REG_WRITE);
- if (ring_id == RCS && (INTEL_GEN(dev_priv) >= 9))
+ if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9)
fw |= FORCEWAKE_RENDER;
- intel_uncore_forcewake_get(dev_priv, fw);
+ intel_uncore_forcewake_get(uncore, fw);
- I915_WRITE_FW(reg, 0x1);
+ intel_uncore_write_fw(uncore, reg, 0x1);
- if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
+ if (wait_for_atomic((intel_uncore_read_fw(uncore, reg) == 0), 50))
gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
else
vgpu_vreg_t(vgpu, reg) = 0;
- intel_uncore_forcewake_put(dev_priv, fw);
+ intel_uncore_forcewake_put(uncore, fw);
gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
}
@@ -379,11 +380,11 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
u32 old_v, new_v;
u32 regs[] = {
- [RCS] = 0xc800,
- [VCS] = 0xc900,
- [VCS2] = 0xca00,
- [BCS] = 0xcc00,
- [VECS] = 0xcb00,
+ [RCS0] = 0xc800,
+ [VCS0] = 0xc900,
+ [VCS1] = 0xca00,
+ [BCS0] = 0xcc00,
+ [VECS0] = 0xcb00,
};
int i;
@@ -391,8 +392,10 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
- if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)
- || IS_COFFEELAKE(dev_priv)) && ring_id == RCS)
+ if (ring_id == RCS0 &&
+ (IS_KABYLAKE(dev_priv) ||
+ IS_BROXTON(dev_priv) ||
+ IS_COFFEELAKE(dev_priv)))
return;
if (!pre && !gen9_render_mocs.initialized)
@@ -415,7 +418,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
offset.reg += 4;
}
- if (ring_id == RCS) {
+ if (ring_id == RCS0) {
l3_offset.reg = 0xb020;
for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
if (pre)
@@ -493,7 +496,8 @@ static void switch_mmio(struct intel_vgpu *pre,
* itself.
*/
if (mmio->in_context &&
- !is_inhibit_context(&s->shadow_ctx->__engine[ring_id]))
+ !is_inhibit_context(intel_context_lookup(s->shadow_ctx,
+ dev_priv->engine[ring_id])))
continue;
if (mmio->mask)
@@ -550,9 +554,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
* performace for batch mmio read/write, so we need
* handle forcewake mannually.
*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
switch_mmio(pre, next, ring_id);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 428d252344f1..33aaa14bfdde 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -60,6 +60,37 @@
#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100)
#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100)
+#define SKL_FLIP_EVENT(pipe, plane) (PRIMARY_A_FLIP_DONE + (plane) * 3 + (pipe))
+
+#define PLANE_CTL_ASYNC_FLIP (1 << 9)
+#define REG50080_FLIP_TYPE_MASK 0x3
+#define REG50080_FLIP_TYPE_ASYNC 0x1
+
+#define REG_50080(_pipe, _plane) ({ \
+ typeof(_pipe) (p) = (_pipe); \
+ typeof(_plane) (q) = (_plane); \
+ (((p) == PIPE_A) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50080)) : \
+ (_MMIO(0x50090))) : \
+ (((p) == PIPE_B) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50088)) : \
+ (_MMIO(0x50098))) : \
+ (((p) == PIPE_C) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x5008C)) : \
+ (_MMIO(0x5009C))) : \
+ (_MMIO(0x50080))))); })
+
+#define REG_50080_TO_PIPE(_reg) ({ \
+ typeof(_reg) (reg) = (_reg); \
+ (((reg) == 0x50080 || (reg) == 0x50090) ? (PIPE_A) : \
+ (((reg) == 0x50088 || (reg) == 0x50098) ? (PIPE_B) : \
+ (((reg) == 0x5008C || (reg) == 0x5009C) ? (PIPE_C) : \
+ (INVALID_PIPE)))); })
+
+#define REG_50080_TO_PLANE(_reg) ({ \
+ typeof(_reg) (reg) = (_reg); \
+ (((reg) == 0x50080 || (reg) == 0x50088 || (reg) == 0x5008C) ? \
+ (PLANE_PRIMARY) : \
+ (((reg) == 0x50090 || (reg) == 0x50098 || (reg) == 0x5009C) ? \
+ (PLANE_SPRITE0) : (I915_MAX_PLANES))); })
+
#define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 05b953793316..7c99bbc3e2b8 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -93,7 +93,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload,
i915_mmio_reg_offset(EU_PERF_CNTL6),
};
- if (workload->ring_id != RCS)
+ if (workload->ring_id != RCS0)
return;
if (save) {
@@ -149,7 +149,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
COPY_REG_MASKED(ctx_ctrl);
COPY_REG(ctx_timestamp);
- if (ring_id == RCS) {
+ if (ring_id == RCS0) {
COPY_REG(bb_per_ctx_ptr);
COPY_REG(rcs_indirect_ctx);
COPY_REG(rcs_indirect_ctx_offset);
@@ -177,7 +177,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+ if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0)
context_page_num = 19;
i = 2;
@@ -434,8 +434,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
if (ret)
goto err_unpin;
- if ((workload->ring_id == RCS) &&
- (workload->wa_ctx.indirect_ctx.size != 0)) {
+ if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
goto err_shadow;
@@ -803,7 +802,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
+ if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0)
context_page_num = 19;
i = 2;
@@ -851,13 +850,13 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
}
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
- unsigned long engine_mask)
+ intel_engine_mask_t engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
struct intel_vgpu_workload *pos, *n;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
/* free the unsubmited workloads in the queues. */
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
@@ -903,8 +902,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload->status = 0;
}
- if (!workload->status && !(vgpu->resetting_eng &
- ENGINE_MASK(ring_id))) {
+ if (!workload->status &&
+ !(vgpu->resetting_eng & BIT(ring_id))) {
update_guest_context(workload);
for_each_set_bit(event, workload->pending_events,
@@ -927,7 +926,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
list_del_init(&workload->list);
- if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
+ if (workload->status || vgpu->resetting_eng & BIT(ring_id)) {
/* if workload->status is not successful means HW GPU
* has occurred GPU hang or something wrong with i915/GVT,
* and GVT won't inject context switch interrupt to guest.
@@ -941,7 +940,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
* cleaned up during the resetting process later, so doing
* the workload clean up here doesn't have any impact.
**/
- intel_vgpu_clean_workloads(vgpu, ENGINE_MASK(ring_id));
+ intel_vgpu_clean_workloads(vgpu, BIT(ring_id));
}
workload->complete(workload);
@@ -1001,7 +1000,7 @@ static int workload_thread(void *priv)
workload->ring_id, workload);
if (need_force_wake)
- intel_uncore_forcewake_get(gvt->dev_priv,
+ intel_uncore_forcewake_get(&gvt->dev_priv->uncore,
FORCEWAKE_ALL);
ret = dispatch_workload(workload);
@@ -1023,7 +1022,7 @@ complete:
complete_current_workload(gvt, ring_id);
if (need_force_wake)
- intel_uncore_forcewake_put(gvt->dev_priv,
+ intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
FORCEWAKE_ALL);
intel_runtime_pm_put_unchecked(gvt->dev_priv);
@@ -1114,9 +1113,9 @@ i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s)
struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
int i;
- if (i915_vm_is_48bit(&i915_ppgtt->vm))
+ if (i915_vm_is_4lvl(&i915_ppgtt->vm)) {
px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4;
- else {
+ } else {
for (i = 0; i < GEN8_3LVL_PDPES; i++)
px_dma(i915_ppgtt->pdp.page_directory[i]) =
s->i915_context_pdps[i];
@@ -1150,7 +1149,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
*
*/
void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
- unsigned long engine_mask)
+ intel_engine_mask_t engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
@@ -1167,7 +1166,7 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s)
struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
int i;
- if (i915_vm_is_48bit(&i915_ppgtt->vm))
+ if (i915_vm_is_4lvl(&i915_ppgtt->vm))
s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4);
else {
for (i = 0; i < GEN8_3LVL_PDPES; i++)
@@ -1240,7 +1239,7 @@ out_shadow_ctx:
*
*/
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
- unsigned long engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int interface)
{
struct intel_vgpu_submission *s = &vgpu->submission;
@@ -1344,7 +1343,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
struct intel_vgpu_mm *mm;
struct intel_vgpu *vgpu = workload->vgpu;
- intel_gvt_gtt_type_t root_entry_type;
+ enum intel_gvt_gtt_type root_entry_type;
u64 pdps[GVT_RING_CTX_NR_PDPS];
switch (desc->addressing_mode) {
@@ -1450,7 +1449,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
workload->rb_start = start;
workload->rb_ctl = ctl;
- if (ring_id == RCS) {
+ if (ring_id == RCS0) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 0635b2c4bed7..90c6756f5453 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -142,12 +142,12 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu);
void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
- unsigned long engine_mask);
+ intel_engine_mask_t engine_mask);
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
- unsigned long engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int interface);
extern const struct intel_vgpu_submission_ops
@@ -160,6 +160,6 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
- unsigned long engine_mask);
+ intel_engine_mask_t engine_mask);
#endif
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 720e2b10adaa..44ce3c2b9ac1 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -44,7 +44,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
- vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
+ vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
@@ -526,11 +526,11 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
* GPU engines. For FLR, engine_mask is ignored.
*/
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
- unsigned int engine_mask)
+ intel_engine_mask_t engine_mask)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
- unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
+ intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
gvt_dbg_core("------------------------------------------\n");
gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index db7bb5bd5add..863ae12707ba 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "i915_active.h"
+#include "i915_globals.h"
#define BKL(ref) (&(ref)->i915->drm.struct_mutex)
@@ -17,6 +18,7 @@
* nodes from a local slab cache to hopefully reduce the fragmentation.
*/
static struct i915_global_active {
+ struct i915_global base;
struct kmem_cache *slab_cache;
} global;
@@ -285,16 +287,27 @@ void i915_active_retire_noop(struct i915_active_request *active,
#include "selftests/i915_active.c"
#endif
+static void i915_global_active_shrink(void)
+{
+ kmem_cache_shrink(global.slab_cache);
+}
+
+static void i915_global_active_exit(void)
+{
+ kmem_cache_destroy(global.slab_cache);
+}
+
+static struct i915_global_active global = { {
+ .shrink = i915_global_active_shrink,
+ .exit = i915_global_active_exit,
+} };
+
int __init i915_global_active_init(void)
{
global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
if (!global.slab_cache)
return -ENOMEM;
+ i915_global_register(&global.base);
return 0;
}
-
-void __exit i915_global_active_exit(void)
-{
- kmem_cache_destroy(global.slab_cache);
-}
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 12b5c1d287d1..7d758719ce39 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -108,19 +108,6 @@ i915_active_request_set_retire_fn(struct i915_active_request *active,
active->retire = fn ?: i915_active_retire_noop;
}
-static inline struct i915_request *
-__i915_active_request_peek(const struct i915_active_request *active)
-{
- /*
- * Inside the error capture (running with the driver in an unknown
- * state), we want to bend the rules slightly (a lot).
- *
- * Work is in progress to make it safer, in the meantime this keeps
- * the known issue from spamming the logs.
- */
- return rcu_dereference_protected(active->request, 1);
-}
-
/**
* i915_active_request_raw - return the active request
* @active - the active tracker
@@ -419,7 +406,4 @@ void i915_active_fini(struct i915_active *ref);
static inline void i915_active_fini(struct i915_active *ref) { }
#endif
-int i915_global_active_init(void);
-void i915_global_active_exit(void);
-
#endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 33e8eed64423..503d548a55f7 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -868,8 +868,8 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
if (!IS_GEN(engine->i915, 7))
return;
- switch (engine->id) {
- case RCS:
+ switch (engine->class) {
+ case RENDER_CLASS:
if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_render_ring_cmds;
cmd_table_count =
@@ -889,12 +889,12 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
break;
- case VCS:
+ case VIDEO_DECODE_CLASS:
cmd_tables = gen7_video_cmds;
cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
- case BCS:
+ case COPY_ENGINE_CLASS:
if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_blt_ring_cmds;
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
@@ -913,14 +913,14 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
break;
- case VECS:
+ case VIDEO_ENHANCEMENT_CLASS:
cmd_tables = hsw_vebox_cmds;
cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
/* VECS can use the same length_mask function as VCS */
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
default:
- MISSING_CASE(engine->id);
+ MISSING_CASE(engine->class);
return;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index f6f6e5b78e97..5823ffb17821 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -26,14 +26,21 @@
*
*/
-#include <linux/sort.h>
#include <linux/sched/mm.h>
+#include <linux/sort.h>
+
#include <drm/drm_debugfs.h>
#include <drm/drm_fourcc.h>
-#include "intel_drv.h"
-#include "intel_guc_submission.h"
#include "i915_reset.h"
+#include "intel_dp.h"
+#include "intel_drv.h"
+#include "intel_fbc.h"
+#include "intel_guc_submission.h"
+#include "intel_hdcp.h"
+#include "intel_hdmi.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -388,12 +395,9 @@ static void print_context_stats(struct seq_file *m,
struct i915_gem_context *ctx;
list_for_each_entry(ctx, &i915->contexts.list, link) {
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, i915, id) {
- struct intel_context *ce = to_intel_context(ctx, engine);
+ struct intel_context *ce;
+ list_for_each_entry(ce, &ctx->active_engines, active_link) {
if (ce->state)
per_file_stats(0, ce->state->obj, &kstats);
if (ce->ring)
@@ -412,9 +416,8 @@ static void print_context_stats(struct seq_file *m,
rcu_read_lock();
task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
- snprintf(name, sizeof(name), "%s/%d",
- task ? task->comm : "<unknown>",
- ctx->user_handle);
+ snprintf(name, sizeof(name), "%s",
+ task ? task->comm : "<unknown>");
rcu_read_unlock();
print_file_stats(m, name, stats);
@@ -830,11 +833,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
} else if (!HAS_PCH_SPLIT(dev_priv)) {
seq_printf(m, "Interrupt enable: %08x\n",
- I915_READ(IER));
+ I915_READ(GEN2_IER));
seq_printf(m, "Interrupt identity: %08x\n",
- I915_READ(IIR));
+ I915_READ(GEN2_IIR));
seq_printf(m, "Interrupt mask: %08x\n",
- I915_READ(IMR));
+ I915_READ(GEN2_IMR));
for_each_pipe(dev_priv, pipe)
seq_printf(m, "Pipe %c stat: %08x\n",
pipe_name(pipe),
@@ -884,7 +887,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
for_each_engine(engine, dev_priv, id) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
- engine->name, I915_READ_IMR(engine));
+ engine->name, ENGINE_READ(engine, RING_IMR));
}
}
@@ -1097,7 +1100,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
}
/* RPSTAT1 is in the GT power well */
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
reqf = I915_READ(GEN6_RPNSWREQ);
if (INTEL_GEN(dev_priv) >= 9)
@@ -1125,7 +1128,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
cagf = intel_gpu_freq(dev_priv,
intel_get_cagf(dev_priv, rpstat));
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
if (INTEL_GEN(dev_priv) >= 11) {
pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
@@ -1281,14 +1284,11 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
intel_wakeref_t wakeref;
enum intel_engine_id id;
+ seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
- seq_puts(m, "Wedged\n");
+ seq_puts(m, "\tWedged\n");
if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
- seq_puts(m, "Reset in progress: struct_mutex backoff\n");
- if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
- seq_puts(m, "Waiter holding struct mutex\n");
- if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
- seq_puts(m, "struct_mutex blocked for reset\n");
+ seq_puts(m, "\tDevice (global) reset in progress\n");
if (!i915_modparams.enable_hangcheck) {
seq_puts(m, "Hangcheck disabled\n");
@@ -1298,10 +1298,10 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
with_intel_runtime_pm(dev_priv, wakeref) {
for_each_engine(engine, dev_priv, id) {
acthd[id] = intel_engine_get_active_head(engine);
- seqno[id] = intel_engine_get_seqno(engine);
+ seqno[id] = intel_engine_get_hangcheck_seqno(engine);
}
- intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
+ intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
}
if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
@@ -1318,8 +1318,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
for_each_engine(engine, dev_priv, id) {
seq_printf(m, "%s:\n", engine->name);
seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
- engine->hangcheck.seqno, seqno[id],
- intel_engine_last_submit(engine),
+ engine->hangcheck.last_seqno,
+ seqno[id],
+ engine->hangcheck.next_seqno,
jiffies_to_msecs(jiffies -
engine->hangcheck.action_timestamp));
@@ -1327,7 +1328,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
(long long)engine->hangcheck.acthd,
(long long)acthd[id]);
- if (engine->id == RCS) {
+ if (engine->id == RCS0) {
seq_puts(m, "\tinstdone read =\n");
i915_instdone_info(dev_priv, m, &instdone);
@@ -1419,13 +1420,14 @@ static int ironlake_drpc_info(struct seq_file *m)
static int i915_forcewake_domains(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_uncore *uncore = &i915->uncore;
struct intel_uncore_forcewake_domain *fw_domain;
unsigned int tmp;
seq_printf(m, "user.bypass_count = %u\n",
- i915->uncore.user_forcewake.count);
+ uncore->user_forcewake.count);
- for_each_fw_domain(fw_domain, i915, tmp)
+ for_each_fw_domain(fw_domain, uncore, tmp)
seq_printf(m, "%s.wake_count = %u\n",
intel_uncore_forcewake_domain_to_str(fw_domain->id),
READ_ONCE(fw_domain->wake_count));
@@ -1882,9 +1884,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
- struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
- enum intel_engine_id id;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1892,6 +1892,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
return ret;
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
+ struct intel_context *ce;
+
seq_puts(m, "HW context ");
if (!list_empty(&ctx->hw_id_link))
seq_printf(m, "%x [pin %u]", ctx->hw_id,
@@ -1914,11 +1916,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
seq_putc(m, '\n');
- for_each_engine(engine, dev_priv, id) {
- struct intel_context *ce =
- to_intel_context(ctx, engine);
-
- seq_printf(m, "%s: ", engine->name);
+ list_for_each_entry(ce, &ctx->active_engines, active_link) {
+ seq_printf(m, "%s: ", ce->engine->name);
if (ce->state)
describe_obj(m, ce->state->obj);
if (ce->ring)
@@ -2023,11 +2022,9 @@ static const char *rps_power_to_str(unsigned int power)
static int i915_rps_boost_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 act_freq = rps->cur_freq;
intel_wakeref_t wakeref;
- struct drm_file *file;
with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -2061,22 +2058,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
intel_gpu_freq(dev_priv, rps->efficient_freq),
intel_gpu_freq(dev_priv, rps->boost_freq));
- mutex_lock(&dev->filelist_mutex);
- list_for_each_entry_reverse(file, &dev->filelist, lhead) {
- struct drm_i915_file_private *file_priv = file->driver_priv;
- struct task_struct *task;
-
- rcu_read_lock();
- task = pid_task(file->pid, PIDTYPE_PID);
- seq_printf(m, "%s [%d]: %d boosts\n",
- task ? task->comm : "<unknown>",
- task ? task->pid : -1,
- atomic_read(&file_priv->rps_client.boosts));
- rcu_read_unlock();
- }
- seq_printf(m, "Kernel (anonymous) boosts: %d\n",
- atomic_read(&rps->boosts));
- mutex_unlock(&dev->filelist_mutex);
+ seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
if (INTEL_GEN(dev_priv) >= 6 &&
rps->enabled &&
@@ -2084,12 +2066,12 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
u32 rpup, rpupei;
u32 rpdown, rpdownei;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
rps_power_to_str(rps->power.mode));
@@ -2112,8 +2094,8 @@ static int i915_llc(struct seq_file *m, void *data)
const bool edram = INTEL_GEN(dev_priv) > 8;
seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
- seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
- intel_uncore_edram_size(dev_priv)/1024/1024);
+ seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
+ dev_priv->edram_size_mb);
return 0;
}
@@ -2270,7 +2252,7 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data)
const struct intel_guc *guc = &dev_priv->guc;
struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
struct intel_guc_client *client = guc->execbuf_client;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
int index;
if (!USES_GUC_SUBMISSION(dev_priv))
@@ -2607,7 +2589,6 @@ static int
i915_edp_psr_debug_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
- struct drm_modeset_acquire_ctx ctx;
intel_wakeref_t wakeref;
int ret;
@@ -2618,18 +2599,7 @@ i915_edp_psr_debug_set(void *data, u64 val)
wakeref = intel_runtime_pm_get(dev_priv);
- drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-
-retry:
- ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
- if (ret == -EDEADLK) {
- ret = drm_modeset_backoff(&ctx);
- if (!ret)
- goto retry;
- }
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
+ ret = intel_psr_debug_set(dev_priv, val);
intel_runtime_pm_put(dev_priv, wakeref);
@@ -2686,8 +2656,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
seq_printf(m, "Runtime power status: %s\n",
enableddisabled(!dev_priv->power_domains.wakeref));
- seq_printf(m, "GPU idle: %s (epoch %u)\n",
- yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
+ seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
seq_printf(m, "IRQs disabled: %s\n",
yesno(!intel_irqs_enabled(dev_priv)));
#ifdef CONFIG_PM
@@ -2904,7 +2873,6 @@ static void intel_connector_info(struct seq_file *m,
if (connector->status == connector_status_disconnected)
return;
- seq_printf(m, "\tname: %s\n", connector->display_info.name);
seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
connector->display_info.width_mm,
connector->display_info.height_mm);
@@ -3123,8 +3091,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(dev_priv);
- seq_printf(m, "GT awake? %s (epoch %u)\n",
- yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
+ seq_printf(m, "GT awake? %s\n", yesno(dev_priv->gt.awake));
seq_printf(m, "Global active requests: %d\n",
dev_priv->gt.active_requests);
seq_printf(m, "CS timestamp frequency: %u kHz\n",
@@ -3211,7 +3178,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
static int i915_wa_registers(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
- const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
+ const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list;
struct i915_wa *wa;
unsigned int i;
@@ -3865,11 +3832,18 @@ static const struct file_operations i915_cur_wm_latency_fops = {
static int
i915_wedged_get(void *data, u64 *val)
{
- struct drm_i915_private *dev_priv = data;
-
- *val = i915_terminally_wedged(&dev_priv->gpu_error);
+ int ret = i915_terminally_wedged(data);
- return 0;
+ switch (ret) {
+ case -EIO:
+ *val = 1;
+ return 0;
+ case 0:
+ *val = 0;
+ return 0;
+ default:
+ return ret;
+ }
}
static int
@@ -3877,16 +3851,9 @@ i915_wedged_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
- /*
- * There is no safeguard against this debugfs entry colliding
- * with the hangcheck calling same i915_handle_error() in
- * parallel, causing an explosion. For now we assume that the
- * test harness is responsible enough not to inject gpu hangs
- * while it is writing to 'i915_wedged'
- */
-
- if (i915_reset_backoff(&i915->gpu_error))
- return -EAGAIN;
+ /* Flush any previous reset before applying for a new one */
+ wait_event(i915->gpu_error.reset_queue,
+ !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags));
i915_handle_error(i915, val, I915_ERROR_CAPTURE,
"Manually set wedged engine mask = %llx", val);
@@ -3927,12 +3894,9 @@ static int
i915_drop_caches_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
- intel_wakeref_t wakeref;
- int ret = 0;
DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
val, val & DROP_ALL);
- wakeref = intel_runtime_pm_get(i915);
if (val & DROP_RESET_ACTIVE &&
wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
@@ -3941,9 +3905,11 @@ i915_drop_caches_set(void *data, u64 val)
/* No need to check and wait for gpu resets, only libdrm auto-restarts
* on ioctls on -EAGAIN. */
if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
+ int ret;
+
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
- goto out;
+ return ret;
if (val & DROP_ACTIVE)
ret = i915_gem_wait_for_idle(i915,
@@ -3957,7 +3923,7 @@ i915_drop_caches_set(void *data, u64 val)
mutex_unlock(&i915->drm.struct_mutex);
}
- if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(&i915->gpu_error))
+ if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915))
i915_handle_error(i915, ALL_ENGINES, 0, NULL);
fs_reclaim_acquire(GFP_KERNEL);
@@ -3982,10 +3948,7 @@ i915_drop_caches_set(void *data, u64 val)
if (val & DROP_FREED)
i915_gem_drain_freed_objects(i915);
-out:
- intel_runtime_pm_put(i915, wakeref);
-
- return ret;
+ return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
@@ -4293,7 +4256,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
return 0;
file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
- intel_uncore_forcewake_user_get(i915);
+ intel_uncore_forcewake_user_get(&i915->uncore);
return 0;
}
@@ -4305,7 +4268,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
if (INTEL_GEN(i915) < 6)
return 0;
- intel_uncore_forcewake_user_put(i915);
+ intel_uncore_forcewake_user_put(&i915->uncore);
intel_runtime_pm_put(i915,
(intel_wakeref_t)(uintptr_t)file->private_data);
@@ -4858,6 +4821,8 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
yesno(crtc_state->dsc_params.compression_enable));
seq_printf(m, "DSC_Sink_Support: %s\n",
yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
+ seq_printf(m, "Force_DSC_Enable: %s\n",
+ yesno(intel_dp->force_dsc_en));
if (!intel_dp_is_edp(intel_dp))
seq_printf(m, "FEC_Sink_Support: %s\n",
yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 9df65d386d11..1ad88e6d7c04 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -48,12 +48,19 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "i915_trace.h"
#include "i915_pmu.h"
-#include "i915_reset.h"
#include "i915_query.h"
+#include "i915_reset.h"
+#include "i915_trace.h"
#include "i915_vgpu.h"
+#include "intel_audio.h"
+#include "intel_cdclk.h"
+#include "intel_csr.h"
+#include "intel_dp.h"
#include "intel_drv.h"
+#include "intel_fbdev.h"
+#include "intel_pm.h"
+#include "intel_sprite.h"
#include "intel_uc.h"
#include "intel_workarounds.h"
@@ -188,6 +195,11 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
return PCH_CNP;
+ case INTEL_PCH_CMP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n");
+ WARN_ON(!IS_COFFEELAKE(dev_priv));
+ /* CometPoint is CNP Compatible */
+ return PCH_CNP;
case INTEL_PCH_ICP_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found Ice Lake PCH\n");
WARN_ON(!IS_ICELAKE(dev_priv));
@@ -219,20 +231,20 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
* make an educated guess as to which PCH is really there.
*/
- if (IS_GEN(dev_priv, 5))
- id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
- else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
- id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
+ if (IS_ICELAKE(dev_priv))
+ id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
+ else if (IS_CANNONLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
+ else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
+ id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
- else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
- id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
- else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv))
- id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
- else if (IS_ICELAKE(dev_priv))
- id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
+ else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+ id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
+ else if (IS_GEN(dev_priv, 5))
+ id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
if (id)
DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
@@ -330,16 +342,16 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = dev_priv->overlay ? 1 : 0;
break;
case I915_PARAM_HAS_BSD:
- value = !!dev_priv->engine[VCS];
+ value = !!dev_priv->engine[VCS0];
break;
case I915_PARAM_HAS_BLT:
- value = !!dev_priv->engine[BCS];
+ value = !!dev_priv->engine[BCS0];
break;
case I915_PARAM_HAS_VEBOX:
- value = !!dev_priv->engine[VECS];
+ value = !!dev_priv->engine[VECS0];
break;
case I915_PARAM_HAS_BSD2:
- value = !!dev_priv->engine[VCS2];
+ value = !!dev_priv->engine[VCS1];
break;
case I915_PARAM_HAS_LLC:
value = HAS_LLC(dev_priv);
@@ -348,10 +360,10 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = HAS_WT(dev_priv);
break;
case I915_PARAM_HAS_ALIASING_PPGTT:
- value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL);
+ value = INTEL_PPGTT(dev_priv);
break;
case I915_PARAM_HAS_SEMAPHORES:
- value = 0;
+ value = !!(dev_priv->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
break;
case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN);
@@ -714,8 +726,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
return 0;
cleanup_gem:
- if (i915_gem_suspend(dev_priv))
- DRM_ERROR("failed to idle hardware; continuing to unload!\n");
+ i915_gem_suspend(dev_priv);
i915_gem_fini(dev_priv);
cleanup_modeset:
intel_modeset_cleanup(dev);
@@ -864,15 +875,19 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
if (i915_inject_load_failure())
return -ENODEV;
+ intel_device_info_subplatform_init(dev_priv);
+
+ intel_uncore_init_early(&dev_priv->uncore);
+
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
mutex_init(&dev_priv->backlight_lock);
- spin_lock_init(&dev_priv->uncore.lock);
mutex_init(&dev_priv->sb_lock);
mutex_init(&dev_priv->av_mutex);
mutex_init(&dev_priv->wm.wm_mutex);
mutex_init(&dev_priv->pps_mutex);
+ mutex_init(&dev_priv->hdcp_comp_mutex);
i915_memcpy_init_early(dev_priv);
intel_runtime_pm_init_early(dev_priv);
@@ -930,46 +945,6 @@ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
i915_engines_cleanup(dev_priv);
}
-static int i915_mmio_setup(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- int mmio_bar;
- int mmio_size;
-
- mmio_bar = IS_GEN(dev_priv, 2) ? 1 : 0;
- /*
- * Before gen4, the registers and the GTT are behind different BARs.
- * However, from gen4 onwards, the registers and the GTT are shared
- * in the same BAR, so we want to restrict this ioremap from
- * clobbering the GTT which we want ioremap_wc instead. Fortunately,
- * the register BAR remains the same size for all the earlier
- * generations up to Ironlake.
- */
- if (INTEL_GEN(dev_priv) < 5)
- mmio_size = 512 * 1024;
- else
- mmio_size = 2 * 1024 * 1024;
- dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size);
- if (dev_priv->regs == NULL) {
- DRM_ERROR("failed to map registers\n");
-
- return -EIO;
- }
-
- /* Try to make sure MCHBAR is enabled before poking at it */
- intel_setup_mchbar(dev_priv);
-
- return 0;
-}
-
-static void i915_mmio_cleanup(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
-
- intel_teardown_mchbar(dev_priv);
- pci_iounmap(pdev, dev_priv->regs);
-}
-
/**
* i915_driver_init_mmio - setup device MMIO
* @dev_priv: device private
@@ -989,15 +964,16 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
if (i915_get_bridge_dev(dev_priv))
return -EIO;
- ret = i915_mmio_setup(dev_priv);
+ ret = intel_uncore_init_mmio(&dev_priv->uncore);
if (ret < 0)
goto err_bridge;
- intel_uncore_init(dev_priv);
+ /* Try to make sure MCHBAR is enabled before poking at it */
+ intel_setup_mchbar(dev_priv);
intel_device_info_init_mmio(dev_priv);
- intel_uncore_prune(dev_priv);
+ intel_uncore_prune_mmio_domains(&dev_priv->uncore);
intel_uc_init_mmio(dev_priv);
@@ -1010,8 +986,8 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
return 0;
err_uncore:
- intel_uncore_fini(dev_priv);
- i915_mmio_cleanup(dev_priv);
+ intel_teardown_mchbar(dev_priv);
+ intel_uncore_fini_mmio(&dev_priv->uncore);
err_bridge:
pci_dev_put(dev_priv->bridge_dev);
@@ -1024,8 +1000,8 @@ err_bridge:
*/
static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
{
- intel_uncore_fini(dev_priv);
- i915_mmio_cleanup(dev_priv);
+ intel_teardown_mchbar(dev_priv);
+ intel_uncore_fini_mmio(&dev_priv->uncore);
pci_dev_put(dev_priv->bridge_dev);
}
@@ -1034,110 +1010,180 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
intel_gvt_sanitize_options(dev_priv);
}
-static enum dram_rank skl_get_dimm_rank(u8 size, u32 rank)
+#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
+
+static const char *intel_dram_type_str(enum intel_dram_type type)
{
- if (size == 0)
- return I915_DRAM_RANK_INVALID;
- if (rank == SKL_DRAM_RANK_SINGLE)
- return I915_DRAM_RANK_SINGLE;
- else if (rank == SKL_DRAM_RANK_DUAL)
- return I915_DRAM_RANK_DUAL;
+ static const char * const str[] = {
+ DRAM_TYPE_STR(UNKNOWN),
+ DRAM_TYPE_STR(DDR3),
+ DRAM_TYPE_STR(DDR4),
+ DRAM_TYPE_STR(LPDDR3),
+ DRAM_TYPE_STR(LPDDR4),
+ };
+
+ if (type >= ARRAY_SIZE(str))
+ type = INTEL_DRAM_UNKNOWN;
- return I915_DRAM_RANK_INVALID;
+ return str[type];
}
-static bool
-skl_is_16gb_dimm(enum dram_rank rank, u8 size, u8 width)
+#undef DRAM_TYPE_STR
+
+static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
{
- if (rank == I915_DRAM_RANK_SINGLE && width == 8 && size == 16)
- return true;
- else if (rank == I915_DRAM_RANK_DUAL && width == 8 && size == 32)
- return true;
- else if (rank == SKL_DRAM_RANK_SINGLE && width == 16 && size == 8)
- return true;
- else if (rank == SKL_DRAM_RANK_DUAL && width == 16 && size == 16)
- return true;
+ return dimm->ranks * 64 / (dimm->width ?: 1);
+}
- return false;
+/* Returns total GB for the whole DIMM */
+static int skl_get_dimm_size(u16 val)
+{
+ return val & SKL_DRAM_SIZE_MASK;
}
-static int
-skl_dram_get_channel_info(struct dram_channel_info *ch, u32 val)
+static int skl_get_dimm_width(u16 val)
{
- u32 tmp_l, tmp_s;
- u32 s_val = val >> SKL_DRAM_S_SHIFT;
+ if (skl_get_dimm_size(val) == 0)
+ return 0;
- if (!val)
- return -EINVAL;
+ switch (val & SKL_DRAM_WIDTH_MASK) {
+ case SKL_DRAM_WIDTH_X8:
+ case SKL_DRAM_WIDTH_X16:
+ case SKL_DRAM_WIDTH_X32:
+ val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
+ return 8 << val;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int skl_get_dimm_ranks(u16 val)
+{
+ if (skl_get_dimm_size(val) == 0)
+ return 0;
+
+ val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
+
+ return val + 1;
+}
+
+/* Returns total GB for the whole DIMM */
+static int cnl_get_dimm_size(u16 val)
+{
+ return (val & CNL_DRAM_SIZE_MASK) / 2;
+}
+
+static int cnl_get_dimm_width(u16 val)
+{
+ if (cnl_get_dimm_size(val) == 0)
+ return 0;
+
+ switch (val & CNL_DRAM_WIDTH_MASK) {
+ case CNL_DRAM_WIDTH_X8:
+ case CNL_DRAM_WIDTH_X16:
+ case CNL_DRAM_WIDTH_X32:
+ val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
+ return 8 << val;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int cnl_get_dimm_ranks(u16 val)
+{
+ if (cnl_get_dimm_size(val) == 0)
+ return 0;
+
+ val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
+
+ return val + 1;
+}
- tmp_l = val & SKL_DRAM_SIZE_MASK;
- tmp_s = s_val & SKL_DRAM_SIZE_MASK;
+static bool
+skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
+{
+ /* Convert total GB to Gb per DRAM device */
+ return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
+}
+
+static void
+skl_dram_get_dimm_info(struct drm_i915_private *dev_priv,
+ struct dram_dimm_info *dimm,
+ int channel, char dimm_name, u16 val)
+{
+ if (INTEL_GEN(dev_priv) >= 10) {
+ dimm->size = cnl_get_dimm_size(val);
+ dimm->width = cnl_get_dimm_width(val);
+ dimm->ranks = cnl_get_dimm_ranks(val);
+ } else {
+ dimm->size = skl_get_dimm_size(val);
+ dimm->width = skl_get_dimm_width(val);
+ dimm->ranks = skl_get_dimm_ranks(val);
+ }
+
+ DRM_DEBUG_KMS("CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
+ channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
+ yesno(skl_is_16gb_dimm(dimm)));
+}
+
+static int
+skl_dram_get_channel_info(struct drm_i915_private *dev_priv,
+ struct dram_channel_info *ch,
+ int channel, u32 val)
+{
+ skl_dram_get_dimm_info(dev_priv, &ch->dimm_l,
+ channel, 'L', val & 0xffff);
+ skl_dram_get_dimm_info(dev_priv, &ch->dimm_s,
+ channel, 'S', val >> 16);
- if (tmp_l == 0 && tmp_s == 0)
+ if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
+ DRM_DEBUG_KMS("CH%u not populated\n", channel);
return -EINVAL;
+ }
- ch->l_info.size = tmp_l;
- ch->s_info.size = tmp_s;
-
- tmp_l = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
- tmp_s = (s_val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
- ch->l_info.width = (1 << tmp_l) * 8;
- ch->s_info.width = (1 << tmp_s) * 8;
-
- tmp_l = val & SKL_DRAM_RANK_MASK;
- tmp_s = s_val & SKL_DRAM_RANK_MASK;
- ch->l_info.rank = skl_get_dimm_rank(ch->l_info.size, tmp_l);
- ch->s_info.rank = skl_get_dimm_rank(ch->s_info.size, tmp_s);
-
- if (ch->l_info.rank == I915_DRAM_RANK_DUAL ||
- ch->s_info.rank == I915_DRAM_RANK_DUAL)
- ch->rank = I915_DRAM_RANK_DUAL;
- else if (ch->l_info.rank == I915_DRAM_RANK_SINGLE &&
- ch->s_info.rank == I915_DRAM_RANK_SINGLE)
- ch->rank = I915_DRAM_RANK_DUAL;
+ if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
+ ch->ranks = 2;
+ else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
+ ch->ranks = 2;
else
- ch->rank = I915_DRAM_RANK_SINGLE;
+ ch->ranks = 1;
- ch->is_16gb_dimm = skl_is_16gb_dimm(ch->l_info.rank, ch->l_info.size,
- ch->l_info.width) ||
- skl_is_16gb_dimm(ch->s_info.rank, ch->s_info.size,
- ch->s_info.width);
+ ch->is_16gb_dimm =
+ skl_is_16gb_dimm(&ch->dimm_l) ||
+ skl_is_16gb_dimm(&ch->dimm_s);
- DRM_DEBUG_KMS("(size:width:rank) L(%dGB:X%d:%s) S(%dGB:X%d:%s)\n",
- ch->l_info.size, ch->l_info.width,
- ch->l_info.rank ? "dual" : "single",
- ch->s_info.size, ch->s_info.width,
- ch->s_info.rank ? "dual" : "single");
+ DRM_DEBUG_KMS("CH%u ranks: %u, 16Gb DIMMs: %s\n",
+ channel, ch->ranks, yesno(ch->is_16gb_dimm));
return 0;
}
static bool
-intel_is_dram_symmetric(u32 val_ch0, u32 val_ch1,
- struct dram_channel_info *ch0)
+intel_is_dram_symmetric(const struct dram_channel_info *ch0,
+ const struct dram_channel_info *ch1)
{
- return (val_ch0 == val_ch1 &&
- (ch0->s_info.size == 0 ||
- (ch0->l_info.size == ch0->s_info.size &&
- ch0->l_info.width == ch0->s_info.width &&
- ch0->l_info.rank == ch0->s_info.rank)));
+ return !memcmp(ch0, ch1, sizeof(*ch0)) &&
+ (ch0->dimm_s.size == 0 ||
+ !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
}
static int
skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
{
struct dram_info *dram_info = &dev_priv->dram_info;
- struct dram_channel_info ch0, ch1;
- u32 val_ch0, val_ch1;
+ struct dram_channel_info ch0 = {}, ch1 = {};
+ u32 val;
int ret;
- val_ch0 = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
- ret = skl_dram_get_channel_info(&ch0, val_ch0);
+ val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
+ ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val);
if (ret == 0)
dram_info->num_channels++;
- val_ch1 = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
- ret = skl_dram_get_channel_info(&ch1, val_ch1);
+ val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
+ ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val);
if (ret == 0)
dram_info->num_channels++;
@@ -1151,28 +1197,47 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
* will be same as if single rank memory, so consider single rank
* memory.
*/
- if (ch0.rank == I915_DRAM_RANK_SINGLE ||
- ch1.rank == I915_DRAM_RANK_SINGLE)
- dram_info->rank = I915_DRAM_RANK_SINGLE;
+ if (ch0.ranks == 1 || ch1.ranks == 1)
+ dram_info->ranks = 1;
else
- dram_info->rank = max(ch0.rank, ch1.rank);
+ dram_info->ranks = max(ch0.ranks, ch1.ranks);
- if (dram_info->rank == I915_DRAM_RANK_INVALID) {
+ if (dram_info->ranks == 0) {
DRM_INFO("couldn't get memory rank information\n");
return -EINVAL;
}
dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
- dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0,
- val_ch1,
- &ch0);
+ dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
- DRM_DEBUG_KMS("memory configuration is %sSymmetric memory\n",
- dev_priv->dram_info.symmetric_memory ? "" : "not ");
+ DRM_DEBUG_KMS("Memory configuration is symmetric? %s\n",
+ yesno(dram_info->symmetric_memory));
return 0;
}
+static enum intel_dram_type
+skl_get_dram_type(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
+
+ switch (val & SKL_DRAM_DDR_TYPE_MASK) {
+ case SKL_DRAM_DDR_TYPE_DDR3:
+ return INTEL_DRAM_DDR3;
+ case SKL_DRAM_DDR_TYPE_DDR4:
+ return INTEL_DRAM_DDR4;
+ case SKL_DRAM_DDR_TYPE_LPDDR3:
+ return INTEL_DRAM_LPDDR3;
+ case SKL_DRAM_DDR_TYPE_LPDDR4:
+ return INTEL_DRAM_LPDDR4;
+ default:
+ MISSING_CASE(val);
+ return INTEL_DRAM_UNKNOWN;
+ }
+}
+
static int
skl_get_dram_info(struct drm_i915_private *dev_priv)
{
@@ -1180,6 +1245,9 @@ skl_get_dram_info(struct drm_i915_private *dev_priv)
u32 mem_freq_khz, val;
int ret;
+ dram_info->type = skl_get_dram_type(dev_priv);
+ DRM_DEBUG_KMS("DRAM type: %s\n", intel_dram_type_str(dram_info->type));
+
ret = skl_dram_get_channels_info(dev_priv);
if (ret)
return ret;
@@ -1200,6 +1268,85 @@ skl_get_dram_info(struct drm_i915_private *dev_priv)
return 0;
}
+/* Returns Gb per DRAM device */
+static int bxt_get_dimm_size(u32 val)
+{
+ switch (val & BXT_DRAM_SIZE_MASK) {
+ case BXT_DRAM_SIZE_4GBIT:
+ return 4;
+ case BXT_DRAM_SIZE_6GBIT:
+ return 6;
+ case BXT_DRAM_SIZE_8GBIT:
+ return 8;
+ case BXT_DRAM_SIZE_12GBIT:
+ return 12;
+ case BXT_DRAM_SIZE_16GBIT:
+ return 16;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int bxt_get_dimm_width(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return 0;
+
+ val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
+
+ return 8 << val;
+}
+
+static int bxt_get_dimm_ranks(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return 0;
+
+ switch (val & BXT_DRAM_RANK_MASK) {
+ case BXT_DRAM_RANK_SINGLE:
+ return 1;
+ case BXT_DRAM_RANK_DUAL:
+ return 2;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static enum intel_dram_type bxt_get_dimm_type(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return INTEL_DRAM_UNKNOWN;
+
+ switch (val & BXT_DRAM_TYPE_MASK) {
+ case BXT_DRAM_TYPE_DDR3:
+ return INTEL_DRAM_DDR3;
+ case BXT_DRAM_TYPE_LPDDR3:
+ return INTEL_DRAM_LPDDR3;
+ case BXT_DRAM_TYPE_DDR4:
+ return INTEL_DRAM_DDR4;
+ case BXT_DRAM_TYPE_LPDDR4:
+ return INTEL_DRAM_LPDDR4;
+ default:
+ MISSING_CASE(val);
+ return INTEL_DRAM_UNKNOWN;
+ }
+}
+
+static void bxt_get_dimm_info(struct dram_dimm_info *dimm,
+ u32 val)
+{
+ dimm->width = bxt_get_dimm_width(val);
+ dimm->ranks = bxt_get_dimm_ranks(val);
+
+ /*
+ * Size in register is Gb per DRAM device. Convert to total
+ * GB to match the way we report this for non-LP platforms.
+ */
+ dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
+}
+
static int
bxt_get_dram_info(struct drm_i915_private *dev_priv)
{
@@ -1228,57 +1375,44 @@ bxt_get_dram_info(struct drm_i915_private *dev_priv)
* Now read each DUNIT8/9/10/11 to check the rank of each dimms.
*/
for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
- u8 size, width;
- enum dram_rank rank;
- u32 tmp;
+ struct dram_dimm_info dimm;
+ enum intel_dram_type type;
val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
if (val == 0xFFFFFFFF)
continue;
dram_info->num_channels++;
- tmp = val & BXT_DRAM_RANK_MASK;
-
- if (tmp == BXT_DRAM_RANK_SINGLE)
- rank = I915_DRAM_RANK_SINGLE;
- else if (tmp == BXT_DRAM_RANK_DUAL)
- rank = I915_DRAM_RANK_DUAL;
- else
- rank = I915_DRAM_RANK_INVALID;
-
- tmp = val & BXT_DRAM_SIZE_MASK;
- if (tmp == BXT_DRAM_SIZE_4GB)
- size = 4;
- else if (tmp == BXT_DRAM_SIZE_6GB)
- size = 6;
- else if (tmp == BXT_DRAM_SIZE_8GB)
- size = 8;
- else if (tmp == BXT_DRAM_SIZE_12GB)
- size = 12;
- else if (tmp == BXT_DRAM_SIZE_16GB)
- size = 16;
- else
- size = 0;
-
- tmp = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
- width = (1 << tmp) * 8;
- DRM_DEBUG_KMS("dram size:%dGB width:X%d rank:%s\n", size,
- width, rank == I915_DRAM_RANK_SINGLE ? "single" :
- rank == I915_DRAM_RANK_DUAL ? "dual" : "unknown");
+
+ bxt_get_dimm_info(&dimm, val);
+ type = bxt_get_dimm_type(val);
+
+ WARN_ON(type != INTEL_DRAM_UNKNOWN &&
+ dram_info->type != INTEL_DRAM_UNKNOWN &&
+ dram_info->type != type);
+
+ DRM_DEBUG_KMS("CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
+ i - BXT_D_CR_DRP0_DUNIT_START,
+ dimm.size, dimm.width, dimm.ranks,
+ intel_dram_type_str(type));
/*
* If any of the channel is single rank channel,
* worst case output will be same as if single rank
* memory, so consider single rank memory.
*/
- if (dram_info->rank == I915_DRAM_RANK_INVALID)
- dram_info->rank = rank;
- else if (rank == I915_DRAM_RANK_SINGLE)
- dram_info->rank = I915_DRAM_RANK_SINGLE;
+ if (dram_info->ranks == 0)
+ dram_info->ranks = dimm.ranks;
+ else if (dimm.ranks == 1)
+ dram_info->ranks = 1;
+
+ if (type != INTEL_DRAM_UNKNOWN)
+ dram_info->type = type;
}
- if (dram_info->rank == I915_DRAM_RANK_INVALID) {
- DRM_INFO("couldn't get memory rank information\n");
+ if (dram_info->type == INTEL_DRAM_UNKNOWN ||
+ dram_info->ranks == 0) {
+ DRM_INFO("couldn't get memory information\n");
return -EINVAL;
}
@@ -1290,14 +1424,8 @@ static void
intel_get_dram_info(struct drm_i915_private *dev_priv)
{
struct dram_info *dram_info = &dev_priv->dram_info;
- char bandwidth_str[32];
int ret;
- dram_info->valid = false;
- dram_info->rank = I915_DRAM_RANK_INVALID;
- dram_info->bandwidth_kbps = 0;
- dram_info->num_channels = 0;
-
/*
* Assume 16Gb DIMMs are present until proven otherwise.
* This is only used for the level 0 watermark latency
@@ -1305,28 +1433,61 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
*/
dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
- if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) < 9)
return;
- /* Need to calculate bandwidth only for Gen9 */
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
ret = bxt_get_dram_info(dev_priv);
- else if (IS_GEN(dev_priv, 9))
- ret = skl_get_dram_info(dev_priv);
else
- ret = skl_dram_get_channels_info(dev_priv);
+ ret = skl_get_dram_info(dev_priv);
if (ret)
return;
- if (dram_info->bandwidth_kbps)
- sprintf(bandwidth_str, "%d KBps", dram_info->bandwidth_kbps);
+ DRM_DEBUG_KMS("DRAM bandwidth: %u kBps, channels: %u\n",
+ dram_info->bandwidth_kbps,
+ dram_info->num_channels);
+
+ DRM_DEBUG_KMS("DRAM ranks: %u, 16Gb DIMMs: %s\n",
+ dram_info->ranks, yesno(dram_info->is_16gb_dimm));
+}
+
+static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
+{
+ const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
+ const unsigned int sets[4] = { 1, 1, 2, 2 };
+
+ return EDRAM_NUM_BANKS(cap) *
+ ways[EDRAM_WAYS_IDX(cap)] *
+ sets[EDRAM_SETS_IDX(cap)];
+}
+
+static void edram_detect(struct drm_i915_private *dev_priv)
+{
+ u32 edram_cap = 0;
+
+ if (!(IS_HASWELL(dev_priv) ||
+ IS_BROADWELL(dev_priv) ||
+ INTEL_GEN(dev_priv) >= 9))
+ return;
+
+ edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP);
+
+ /* NB: We can't write IDICR yet because we don't have gt funcs set up */
+
+ if (!(edram_cap & EDRAM_ENABLED))
+ return;
+
+ /*
+ * The needed capability bits for size calculation are not there with
+ * pre gen9 so return 128MB always.
+ */
+ if (INTEL_GEN(dev_priv) < 9)
+ dev_priv->edram_size_mb = 128;
else
- sprintf(bandwidth_str, "unknown");
- DRM_DEBUG_KMS("DRAM bandwidth:%s, total-channels: %u\n",
- bandwidth_str, dram_info->num_channels);
- DRM_DEBUG_KMS("DRAM rank: %s rank 16GB-dimm:%s\n",
- (dram_info->rank == I915_DRAM_RANK_DUAL) ?
- "dual" : "single", yesno(dram_info->is_16gb_dimm));
+ dev_priv->edram_size_mb =
+ gen9_edram_size_mb(dev_priv, edram_cap);
+
+ DRM_INFO("Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
}
/**
@@ -1348,7 +1509,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (HAS_PPGTT(dev_priv)) {
if (intel_vgpu_active(dev_priv) &&
- !intel_vgpu_has_full_48bit_ppgtt(dev_priv)) {
+ !intel_vgpu_has_full_ppgtt(dev_priv)) {
i915_report_error(dev_priv,
"incompatible vGPU found, support for isolated ppGTT required\n");
return -ENXIO;
@@ -1371,6 +1532,9 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_sanitize_options(dev_priv);
+ /* needs to be done before ggtt probe */
+ edram_detect(dev_priv);
+
i915_perf_init(dev_priv);
ret = i915_ggtt_probe_hw(dev_priv);
@@ -1606,10 +1770,12 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
if (drm_debug & DRM_UT_DRIVER) {
struct drm_printer p = drm_debug_printer("i915 device info:");
- drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
+ drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
INTEL_DEVID(dev_priv),
INTEL_REVID(dev_priv),
intel_platform_name(INTEL_INFO(dev_priv)->platform),
+ intel_subplatform(RUNTIME_INFO(dev_priv),
+ INTEL_INFO(dev_priv)->platform),
INTEL_GEN(dev_priv));
intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
@@ -1652,8 +1818,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
memcpy(device_info, match_info, sizeof(*device_info));
RUNTIME_INFO(i915)->device_id = pdev->device;
- BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
- BITS_PER_TYPE(device_info->platform_mask));
BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
return i915;
@@ -1750,11 +1914,17 @@ void i915_driver_unload(struct drm_device *dev)
i915_driver_unregister(dev_priv);
+ /*
+ * After unregistering the device to prevent any new users, cancel
+ * all in-flight requests so that we can quickly unbind the active
+ * resources.
+ */
+ i915_gem_set_wedged(dev_priv);
+
/* Flush any external code that still may be under the RCU lock */
synchronize_rcu();
- if (i915_gem_suspend(dev_priv))
- DRM_ERROR("failed to idle hardware; continuing to unload!\n");
+ i915_gem_suspend(dev_priv);
drm_atomic_helper_shutdown(dev);
@@ -1862,7 +2032,6 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
static int i915_drm_prepare(struct drm_device *dev)
{
struct drm_i915_private *i915 = to_i915(dev);
- int err;
/*
* NB intel_display_suspend() may issue new requests after we've
@@ -1870,12 +2039,9 @@ static int i915_drm_prepare(struct drm_device *dev)
* split out that work and pull it forward so that after point,
* the GPU is not woken again.
*/
- err = i915_gem_suspend(i915);
- if (err)
- dev_err(&i915->drm.pdev->dev,
- "GEM idle failed, suspend/resume might fail\n");
+ i915_gem_suspend(i915);
- return err;
+ return 0;
}
static int i915_drm_suspend(struct drm_device *dev)
@@ -1945,7 +2111,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
i915_gem_suspend_late(dev_priv);
- intel_uncore_suspend(dev_priv);
+ intel_uncore_suspend(&dev_priv->uncore);
intel_power_domains_suspend(dev_priv,
get_suspend_mode(dev_priv, hibernation));
@@ -2141,7 +2307,9 @@ static int i915_drm_resume_early(struct drm_device *dev)
DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
ret);
- intel_uncore_resume_early(dev_priv);
+ intel_uncore_resume_early(&dev_priv->uncore);
+
+ i915_check_and_clear_faults(dev_priv);
if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
gen9_sanitize_dc_state(dev_priv);
@@ -2545,7 +2713,7 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
if (!force_on)
return 0;
- err = intel_wait_for_register(dev_priv,
+ err = intel_wait_for_register(&dev_priv->uncore,
VLV_GTLC_SURVIVABILITY_REG,
VLV_GFX_CLK_STATUS_BIT,
VLV_GFX_CLK_STATUS_BIT,
@@ -2711,7 +2879,7 @@ static int intel_runtime_suspend(struct device *kdev)
intel_runtime_pm_disable_interrupts(dev_priv);
- intel_uncore_suspend(dev_priv);
+ intel_uncore_suspend(&dev_priv->uncore);
ret = 0;
if (INTEL_GEN(dev_priv) >= 11) {
@@ -2728,7 +2896,7 @@ static int intel_runtime_suspend(struct device *kdev)
if (ret) {
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
- intel_uncore_runtime_resume(dev_priv);
+ intel_uncore_runtime_resume(&dev_priv->uncore);
intel_runtime_pm_enable_interrupts(dev_priv);
@@ -2745,7 +2913,7 @@ static int intel_runtime_suspend(struct device *kdev)
enable_rpm_wakeref_asserts(dev_priv);
intel_runtime_pm_cleanup(dev_priv);
- if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
+ if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
DRM_ERROR("Unclaimed access detected prior to suspending\n");
dev_priv->runtime_pm.suspended = true;
@@ -2773,7 +2941,7 @@ static int intel_runtime_suspend(struct device *kdev)
intel_opregion_notify_adapter(dev_priv, PCI_D1);
}
- assert_forcewakes_inactive(dev_priv);
+ assert_forcewakes_inactive(&dev_priv->uncore);
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_poll_init(dev_priv);
@@ -2799,7 +2967,7 @@ static int intel_runtime_resume(struct device *kdev)
intel_opregion_notify_adapter(dev_priv, PCI_D0);
dev_priv->runtime_pm.suspended = false;
- if (intel_uncore_unclaimed_mmio(dev_priv))
+ if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
if (INTEL_GEN(dev_priv) >= 11) {
@@ -2825,7 +2993,7 @@ static int intel_runtime_resume(struct device *kdev)
ret = vlv_resume_prepare(dev_priv, true);
}
- intel_uncore_runtime_resume(dev_priv);
+ intel_uncore_runtime_resume(&dev_priv->uncore);
intel_runtime_pm_enable_interrupts(dev_priv);
@@ -2969,7 +3137,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a67a63b5aa84..066fd2a12851 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -55,6 +55,7 @@
#include <drm/drm_util.h>
#include <drm/drm_dsc.h>
#include <drm/drm_connector.h>
+#include <drm/i915_mei_hdcp_interface.h>
#include "i915_fixed.h"
#include "i915_params.h"
@@ -65,13 +66,14 @@
#include "intel_device_info.h"
#include "intel_display.h"
#include "intel_dpll_mgr.h"
+#include "intel_frontbuffer.h"
#include "intel_lrc.h"
#include "intel_opregion.h"
#include "intel_ringbuffer.h"
+#include "intel_uc.h"
#include "intel_uncore.h"
#include "intel_wopcm.h"
#include "intel_workarounds.h"
-#include "intel_uc.h"
#include "i915_gem.h"
#include "i915_gem_context.h"
@@ -91,8 +93,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20190207"
-#define DRIVER_TIMESTAMP 1549572331
+#define DRIVER_DATE "20190417"
+#define DRIVER_TIMESTAMP 1555492067
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -215,11 +217,12 @@ struct drm_i915_file_private {
*/
#define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
} mm;
+
struct idr context_idr;
+ struct mutex context_idr_lock; /* guards context_idr */
- struct intel_rps_client {
- atomic_t boosts;
- } rps_client;
+ struct idr vm_idr;
+ struct mutex vm_idr_lock; /* guards vm_idr */
unsigned int bsd_engine;
@@ -280,7 +283,8 @@ struct drm_i915_display_funcs {
void (*get_cdclk)(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state);
void (*set_cdclk)(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state);
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe);
int (*get_fifo_size)(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane);
int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
@@ -323,6 +327,7 @@ struct drm_i915_display_funcs {
/* display clock increase/decrease */
/* pll clock increase/decrease */
+ int (*color_check)(struct intel_crtc_state *crtc_state);
/*
* Program double buffered color management registers during
* vblank evasion. The registers should then latch during the
@@ -371,14 +376,6 @@ enum i915_cache_level {
#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
-enum fb_op_origin {
- ORIGIN_GTT,
- ORIGIN_CPU,
- ORIGIN_CS,
- ORIGIN_FLIP,
- ORIGIN_DIRTYFB,
-};
-
struct intel_fbc {
/* This is always the inner lock when overlapping with struct_mutex and
* it's the outer lock when overlapping with stolen_lock. */
@@ -508,7 +505,7 @@ struct i915_psr {
u32 debug;
bool sink_support;
- bool prepared, enabled;
+ bool enabled;
struct intel_dp *dp;
enum pipe pipe;
bool active;
@@ -526,16 +523,22 @@ struct i915_psr {
u16 su_x_granularity;
};
+/*
+ * Sorted by south display engine compatibility.
+ * If the new PCH comes with a south display engine that is not
+ * inherited from the latest item, please do not add it to the
+ * end. Instead, add it right after its "parent" PCH.
+ */
enum intel_pch {
+ PCH_NOP = -1, /* PCH without south display */
PCH_NONE = 0, /* No PCH present */
PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
PCH_SPT, /* Sunrisepoint PCH */
PCH_KBP, /* Kaby Lake PCH */
- PCH_CNP, /* Cannon Lake PCH */
+ PCH_CNP, /* Cannon/Comet Lake PCH */
PCH_ICP, /* Ice Lake PCH */
- PCH_NOP, /* PCH without south display */
};
enum intel_sbi_destination {
@@ -949,6 +952,7 @@ struct ddi_vbt_port_info {
#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff
u8 hdmi_level_shift;
+ u8 present:1;
u8 supports_dvi:1;
u8 supports_hdmi:1;
u8 supports_dp:1;
@@ -1009,6 +1013,7 @@ struct intel_vbt_data {
enum psr_lines_to_wait lines_to_wait;
int tp1_wakeup_time_us;
int tp2_tp3_wakeup_time_us;
+ int psr2_tp2_tp3_wakeup_time_us;
} psr;
struct {
@@ -1130,6 +1135,7 @@ struct skl_wm_level {
u16 plane_res_b;
u8 plane_res_l;
bool plane_en;
+ bool ignore_lines;
};
/* Stores plane specific WM parameters */
@@ -1200,7 +1206,11 @@ enum intel_pipe_crc_source {
INTEL_PIPE_CRC_SOURCE_NONE,
INTEL_PIPE_CRC_SOURCE_PLANE1,
INTEL_PIPE_CRC_SOURCE_PLANE2,
- INTEL_PIPE_CRC_SOURCE_PF,
+ INTEL_PIPE_CRC_SOURCE_PLANE3,
+ INTEL_PIPE_CRC_SOURCE_PLANE4,
+ INTEL_PIPE_CRC_SOURCE_PLANE5,
+ INTEL_PIPE_CRC_SOURCE_PLANE6,
+ INTEL_PIPE_CRC_SOURCE_PLANE7,
INTEL_PIPE_CRC_SOURCE_PIPE,
/* TV/DP on pre-gen5/vlv can't use the pipe source. */
INTEL_PIPE_CRC_SOURCE_TV,
@@ -1468,13 +1478,6 @@ struct intel_cdclk_state {
struct drm_i915_private {
struct drm_device drm;
- struct kmem_cache *objects;
- struct kmem_cache *vmas;
- struct kmem_cache *luts;
- struct kmem_cache *requests;
- struct kmem_cache *dependencies;
- struct kmem_cache *priorities;
-
const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
struct intel_driver_caps caps;
@@ -1503,8 +1506,6 @@ struct drm_i915_private {
*/
resource_size_t stolen_usable_size; /* Total size minus reserved ranges */
- void __iomem *regs;
-
struct intel_uncore uncore;
struct i915_virtual_gpu vgpu;
@@ -1622,6 +1623,8 @@ struct drm_i915_private {
struct intel_cdclk_state actual;
/* The current hardware cdclk state */
struct intel_cdclk_state hw;
+
+ int force_min_cdclk;
} cdclk;
/**
@@ -1700,8 +1703,11 @@ struct drm_i915_private {
struct intel_l3_parity l3_parity;
- /* Cannot be determined by PCIID. You must always read a register. */
- u32 edram_cap;
+ /*
+ * edram size in MB.
+ * Cannot be determined by PCIID. You must always read a register.
+ */
+ u32 edram_size_mb;
/*
* Protects RPS/RC6 register access and PCU communication.
@@ -1741,6 +1747,7 @@ struct drm_i915_private {
*
*/
struct mutex av_mutex;
+ int audio_power_refcount;
struct {
struct mutex mutex;
@@ -1831,13 +1838,16 @@ struct drm_i915_private {
bool valid;
bool is_16gb_dimm;
u8 num_channels;
- enum dram_rank {
- I915_DRAM_RANK_INVALID = 0,
- I915_DRAM_RANK_SINGLE,
- I915_DRAM_RANK_DUAL
- } rank;
+ u8 ranks;
u32 bandwidth_kbps;
bool symmetric_memory;
+ enum intel_dram_type {
+ INTEL_DRAM_UNKNOWN,
+ INTEL_DRAM_DDR3,
+ INTEL_DRAM_DDR4,
+ INTEL_DRAM_LPDDR3,
+ INTEL_DRAM_LPDDR4
+ } type;
} dram_info;
struct i915_runtime_pm runtime_pm;
@@ -1985,7 +1995,6 @@ struct drm_i915_private {
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct {
- void (*resume)(struct drm_i915_private *);
void (*cleanup_engine)(struct intel_engine_cs *engine);
struct i915_gt_timelines {
@@ -1997,6 +2006,7 @@ struct drm_i915_private {
struct list_head hwsp_free_list;
} timelines;
+ intel_engine_mask_t active_engines;
struct list_head active_rings;
struct list_head closed_vma;
u32 active_requests;
@@ -2011,12 +2021,6 @@ struct drm_i915_private {
intel_wakeref_t awake;
/**
- * The number of times we have woken up.
- */
- unsigned int epoch;
-#define I915_EPOCH_INVALID 0
-
- /**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
* be retired once the system goes idle. Set a timer to
@@ -2039,6 +2043,14 @@ struct drm_i915_private {
struct i915_vma *scratch;
} gt;
+ /* For i945gm vblank irq vs. C3 workaround */
+ struct {
+ struct work_struct work;
+ struct pm_qos_request pm_qos;
+ u8 c3_disable_latency;
+ u8 enabled;
+ } i945gm_vblank;
+
/* perform PHY state sanity checks? */
bool chv_phy_assert[2];
@@ -2055,18 +2067,25 @@ struct drm_i915_private {
struct i915_pmu pmu;
+ struct i915_hdcp_comp_master *hdcp_master;
+ bool hdcp_comp_added;
+
+ /* Mutex to protect the above hdcp component related values. */
+ struct mutex hdcp_comp_mutex;
+
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place.
*/
};
+struct dram_dimm_info {
+ u8 size, width, ranks;
+};
+
struct dram_channel_info {
- struct info {
- u8 size, width;
- enum dram_rank rank;
- } l_info, s_info;
- enum dram_rank rank;
+ struct dram_dimm_info dimm_l, dimm_s;
+ u8 ranks;
bool is_16gb_dimm;
};
@@ -2095,6 +2114,11 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
return container_of(huc, struct drm_i915_private, huc);
}
+static inline struct drm_i915_private *uncore_to_i915(struct intel_uncore *uncore)
+{
+ return container_of(uncore, struct drm_i915_private, uncore);
+}
+
/* Simple iterator over all initialised engines */
#define for_each_engine(engine__, dev_priv__, id__) \
for ((id__) = 0; \
@@ -2104,7 +2128,7 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
/* Iterator over subset of engines selected by mask */
#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
- for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->ring_mask; \
+ for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->engine_mask; \
(tmp__) ? \
((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \
0;)
@@ -2274,7 +2298,69 @@ static inline unsigned int i915_sg_segment_size(void)
#define IS_REVID(p, since, until) \
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
-#define IS_PLATFORM(dev_priv, p) (INTEL_INFO(dev_priv)->platform_mask & BIT(p))
+static __always_inline unsigned int
+__platform_mask_index(const struct intel_runtime_info *info,
+ enum intel_platform p)
+{
+ const unsigned int pbits =
+ BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
+
+ /* Expand the platform_mask array if this fails. */
+ BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
+ pbits * ARRAY_SIZE(info->platform_mask));
+
+ return p / pbits;
+}
+
+static __always_inline unsigned int
+__platform_mask_bit(const struct intel_runtime_info *info,
+ enum intel_platform p)
+{
+ const unsigned int pbits =
+ BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
+
+ return p % pbits + INTEL_SUBPLATFORM_BITS;
+}
+
+static inline u32
+intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
+{
+ const unsigned int pi = __platform_mask_index(info, p);
+
+ return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS;
+}
+
+static __always_inline bool
+IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
+{
+ const struct intel_runtime_info *info = RUNTIME_INFO(i915);
+ const unsigned int pi = __platform_mask_index(info, p);
+ const unsigned int pb = __platform_mask_bit(info, p);
+
+ BUILD_BUG_ON(!__builtin_constant_p(p));
+
+ return info->platform_mask[pi] & BIT(pb);
+}
+
+static __always_inline bool
+IS_SUBPLATFORM(const struct drm_i915_private *i915,
+ enum intel_platform p, unsigned int s)
+{
+ const struct intel_runtime_info *info = RUNTIME_INFO(i915);
+ const unsigned int pi = __platform_mask_index(info, p);
+ const unsigned int pb = __platform_mask_bit(info, p);
+ const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1;
+ const u32 mask = info->platform_mask[pi];
+
+ BUILD_BUG_ON(!__builtin_constant_p(p));
+ BUILD_BUG_ON(!__builtin_constant_p(s));
+ BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS);
+
+ /* Shift and test on the MSB position so sign flag can be used. */
+ return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
+}
+
+#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
@@ -2289,11 +2375,11 @@ static inline unsigned int i915_sg_segment_size(void)
#define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45)
#define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45)
#define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv))
-#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001)
-#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011)
#define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
#define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33)
-#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
+#define IS_IRONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IRONLAKE)
+#define IS_IRONLAKE_M(dev_priv) \
+ (IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv))
#define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 1)
@@ -2308,46 +2394,35 @@ static inline unsigned int i915_sg_segment_size(void)
#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
-#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
+#define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
-#define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \
- ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \
- (INTEL_DEVID(dev_priv) & 0xf) == 0xb || \
- (INTEL_DEVID(dev_priv) & 0xf) == 0xe))
-/* ULX machines are also considered ULT. */
-#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0xf) == 0xe)
+#define IS_BDW_ULT(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
+#define IS_BDW_ULX(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 3)
-#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
+#define IS_HSW_ULT(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 3)
#define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 1)
/* ULX machines are also considered ULT. */
-#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \
- INTEL_DEVID(dev_priv) == 0x0A1E)
-#define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \
- INTEL_DEVID(dev_priv) == 0x1913 || \
- INTEL_DEVID(dev_priv) == 0x1916 || \
- INTEL_DEVID(dev_priv) == 0x1921 || \
- INTEL_DEVID(dev_priv) == 0x1926)
-#define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \
- INTEL_DEVID(dev_priv) == 0x1915 || \
- INTEL_DEVID(dev_priv) == 0x191E)
-#define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \
- INTEL_DEVID(dev_priv) == 0x5913 || \
- INTEL_DEVID(dev_priv) == 0x5916 || \
- INTEL_DEVID(dev_priv) == 0x5921 || \
- INTEL_DEVID(dev_priv) == 0x5926)
-#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
- INTEL_DEVID(dev_priv) == 0x5915 || \
- INTEL_DEVID(dev_priv) == 0x591E)
-#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
- INTEL_DEVID(dev_priv) == 0x87C0 || \
- INTEL_DEVID(dev_priv) == 0x87CA)
+#define IS_HSW_ULX(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
+#define IS_SKL_ULT(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
+#define IS_SKL_ULX(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
+#define IS_KBL_ULT(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
+#define IS_KBL_ULX(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
+#define IS_AML_ULX(dev_priv) \
+ (IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_AML) || \
+ IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_AML))
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 2)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
@@ -2358,16 +2433,16 @@ static inline unsigned int i915_sg_segment_size(void)
INTEL_INFO(dev_priv)->gt == 2)
#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 3)
-#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
+#define IS_CFL_ULT(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 2)
#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 3)
-#define IS_CNL_WITH_PORT_F(dev_priv) (IS_CANNONLAKE(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0x0004) == 0x0004)
-#define IS_ICL_WITH_PORT_F(dev_priv) (IS_ICELAKE(dev_priv) && \
- INTEL_DEVID(dev_priv) != 0x8A51)
+#define IS_CNL_WITH_PORT_F(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF)
+#define IS_ICL_WITH_PORT_F(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
@@ -2426,28 +2501,22 @@ static inline unsigned int i915_sg_segment_size(void)
#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
-#define ENGINE_MASK(id) BIT(id)
-#define RENDER_RING ENGINE_MASK(RCS)
-#define BSD_RING ENGINE_MASK(VCS)
-#define BLT_RING ENGINE_MASK(BCS)
-#define VEBOX_RING ENGINE_MASK(VECS)
-#define BSD2_RING ENGINE_MASK(VCS2)
-#define BSD3_RING ENGINE_MASK(VCS3)
-#define BSD4_RING ENGINE_MASK(VCS4)
-#define VEBOX2_RING ENGINE_MASK(VECS2)
-#define ALL_ENGINES (~0)
-
-#define HAS_ENGINE(dev_priv, id) \
- (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
-
-#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS)
-#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2)
-#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
-#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
+#define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id))
+
+#define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({ \
+ unsigned int first__ = (first); \
+ unsigned int count__ = (count); \
+ (INTEL_INFO(dev_priv)->engine_mask & \
+ GENMASK(first__ + count__ - 1, first__)) >> first__; \
+})
+#define VDBOX_MASK(dev_priv) \
+ ENGINE_INSTANCES_MASK(dev_priv, VCS0, I915_MAX_VCS)
+#define VEBOX_MASK(dev_priv) \
+ ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)
#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
-#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED))
+#define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
@@ -2462,13 +2531,11 @@ static inline unsigned int i915_sg_segment_size(void)
#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
-#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt)
+#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
#define HAS_PPGTT(dev_priv) \
(INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
#define HAS_FULL_PPGTT(dev_priv) \
(INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
-#define HAS_FULL_48BIT_PPGTT(dev_priv) \
- (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL_4LVL)
#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
GEM_BUG_ON((sizes) == 0); \
@@ -2512,6 +2579,7 @@ static inline unsigned int i915_sg_segment_size(void)
#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
+#define HAS_TRANSCODER_EDP(dev_priv) (INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_EDP] != 0)
#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
@@ -2558,6 +2626,7 @@ static inline unsigned int i915_sg_segment_size(void)
#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
+#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
@@ -2567,8 +2636,6 @@ static inline unsigned int i915_sg_segment_size(void)
#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
-#define HAS_PCH_CNP_LP(dev_priv) \
- (INTEL_PCH_ID(dev_priv) == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE)
#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
@@ -2800,8 +2867,6 @@ void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv);
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
-void *i915_gem_object_alloc(struct drm_i915_private *dev_priv);
-void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *
@@ -2844,6 +2909,7 @@ static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
int pass = 2;
do {
rcu_barrier();
+ i915_gem_drain_freed_objects(i915);
drain_workqueue(i915->wq);
} while (--pass);
}
@@ -2974,6 +3040,14 @@ i915_coherent_map_type(struct drm_i915_private *i915)
void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type);
+void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ unsigned long size);
+static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
+{
+ __i915_gem_object_flush_map(obj, 0, obj->base.size);
+}
+
/**
* i915_gem_object_unpin_map - releases an earlier mapping
* @obj: the object to unmap
@@ -3002,7 +3076,12 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_pages(obj);
}
-int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
+static inline int __must_check
+i915_mutex_lock_interruptible(struct drm_device *dev)
+{
+ return mutex_lock_interruptible(&dev->struct_mutex);
+}
+
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -3016,22 +3095,14 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
-struct i915_request *
-i915_gem_find_active_request(struct intel_engine_cs *engine);
-
-static inline bool i915_reset_backoff(struct i915_gpu_error *error)
-{
- return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
-}
-
-static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
+static inline bool __i915_wedged(struct i915_gpu_error *error)
{
return unlikely(test_bit(I915_WEDGED, &error->flags));
}
-static inline bool i915_reset_backoff_or_wedged(struct i915_gpu_error *error)
+static inline bool i915_reset_failed(struct drm_i915_private *i915)
{
- return i915_reset_backoff(error) | i915_terminally_wedged(error);
+ return __i915_wedged(&i915->gpu_error);
}
static inline u32 i915_reset_count(struct i915_gpu_error *error)
@@ -3056,14 +3127,13 @@ void i915_gem_fini(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags, long timeout);
-int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
+void i915_gem_suspend(struct drm_i915_private *dev_priv);
void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
vm_fault_t i915_gem_fault(struct vm_fault *vmf);
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
- long timeout,
- struct intel_rps_client *rps);
+ long timeout);
int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int flags,
const struct i915_sched_attr *attr);
@@ -3106,7 +3176,6 @@ struct drm_i915_fence_reg *
i915_reserve_fence(struct drm_i915_private *dev_priv);
void i915_unreserve_fence(struct drm_i915_fence_reg *fence);
-void i915_gem_revoke_fences(struct drm_i915_private *dev_priv);
void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
@@ -3142,7 +3211,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
void i915_oa_init_reg_state(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx,
+ struct intel_context *ce,
u32 *reg_state);
/* i915_gem_evict.c */
@@ -3457,18 +3526,21 @@ static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
}
-#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
-#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
+#define __I915_REG_OP(op__, dev_priv__, ...) \
+ intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
+
+#define I915_READ8(reg__) __I915_REG_OP(read8, dev_priv, (reg__))
+#define I915_WRITE8(reg__, val__) __I915_REG_OP(write8, dev_priv, (reg__), (val__))
-#define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
-#define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
-#define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
-#define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
+#define I915_READ16(reg__) __I915_REG_OP(read16, dev_priv, (reg__))
+#define I915_WRITE16(reg__, val__) __I915_REG_OP(write16, dev_priv, (reg__), (val__))
+#define I915_READ16_NOTRACE(reg__) __I915_REG_OP(read16_notrace, dev_priv, (reg__))
+#define I915_WRITE16_NOTRACE(reg__, val__) __I915_REG_OP(write16_notrace, dev_priv, (reg__), (val__))
-#define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
-#define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
-#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
-#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
+#define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__))
+#define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__))
+#define I915_READ_NOTRACE(reg__) __I915_REG_OP(read_notrace, dev_priv, (reg__))
+#define I915_WRITE_NOTRACE(reg__, val__) __I915_REG_OP(write_notrace, dev_priv, (reg__), (val__))
/* Be very careful with read/write 64-bit values. On 32-bit machines, they
* will be implemented using 2 32-bit writes in an arbitrary order with
@@ -3484,46 +3556,12 @@ static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
*
* You have been warned.
*/
-#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
-
-#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
- u32 upper, lower, old_upper, loop = 0; \
- upper = I915_READ(upper_reg); \
- do { \
- old_upper = upper; \
- lower = I915_READ(lower_reg); \
- upper = I915_READ(upper_reg); \
- } while (upper != old_upper && loop++ < 2); \
- (u64)upper << 32 | lower; })
-
-#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
-#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
-
-#define __raw_read(x, s) \
-static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \
- i915_reg_t reg) \
-{ \
- return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
-}
-
-#define __raw_write(x, s) \
-static inline void __raw_i915_write##x(const struct drm_i915_private *dev_priv, \
- i915_reg_t reg, uint##x##_t val) \
-{ \
- write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
-}
-__raw_read(8, b)
-__raw_read(16, w)
-__raw_read(32, l)
-__raw_read(64, q)
-
-__raw_write(8, b)
-__raw_write(16, w)
-__raw_write(32, l)
-__raw_write(64, q)
+#define I915_READ64(reg__) __I915_REG_OP(read64, dev_priv, (reg__))
+#define I915_READ64_2x32(lower_reg__, upper_reg__) \
+ __I915_REG_OP(read64_2x32, dev_priv, (lower_reg__), (upper_reg__))
-#undef __raw_read
-#undef __raw_write
+#define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__))
+#define POSTING_READ16(reg__) __I915_REG_OP(posting_read16, dev_priv, (reg__))
/* These are untraced mmio-accessors that are only valid to be used inside
* critical sections, such as inside IRQ handlers, where forcewake is explicitly
@@ -3551,10 +3589,10 @@ __raw_write(64, q)
* therefore generally be serialised, by either the dev_priv->uncore.lock or
* a more localised lock guarding all access to that bank of registers.
*/
-#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
-#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
-#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__))
-#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
+#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
+#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
+#define I915_WRITE64_FW(reg__, val__) __I915_REG_OP(write64_fw, dev_priv, (reg__), (val__))
+#define POSTING_READ_FW(reg__) __I915_REG_OP(posting_read_fw, dev_priv, (reg__))
/* "Broadcast RGB" property */
#define INTEL_BROADCAST_RGB_AUTO 0
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8558e81fdc2a..ad01c92aaf74 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -42,6 +42,7 @@
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gemfs.h"
+#include "i915_globals.h"
#include "i915_reset.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
@@ -49,6 +50,7 @@
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include "intel_mocs.h"
+#include "intel_pm.h"
#include "intel_workarounds.h"
static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
@@ -100,48 +102,7 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
spin_unlock(&dev_priv->mm.object_stat_lock);
}
-static int
-i915_gem_wait_for_error(struct i915_gpu_error *error)
-{
- int ret;
-
- might_sleep();
-
- /*
- * Only wait 10 seconds for the gpu reset to complete to avoid hanging
- * userspace. If it takes that long something really bad is going on and
- * we should simply try to bail out and fail as gracefully as possible.
- */
- ret = wait_event_interruptible_timeout(error->reset_queue,
- !i915_reset_backoff(error),
- I915_RESET_TIMEOUT);
- if (ret == 0) {
- DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
- return -EIO;
- } else if (ret < 0) {
- return ret;
- } else {
- return 0;
- }
-}
-
-int i915_mutex_lock_interruptible(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int ret;
-
- ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
- if (ret)
- return ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static u32 __i915_gem_park(struct drm_i915_private *i915)
+static void __i915_gem_park(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
@@ -152,9 +113,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
if (!i915->gt.awake)
- return I915_EPOCH_INVALID;
-
- GEM_BUG_ON(i915->gt.epoch == I915_EPOCH_INVALID);
+ return;
/*
* Be paranoid and flush a concurrent interrupt to make sure
@@ -183,7 +142,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
- return i915->gt.epoch;
+ i915_globals_park();
}
void i915_gem_park(struct drm_i915_private *i915)
@@ -225,8 +184,7 @@ void i915_gem_unpark(struct drm_i915_private *i915)
i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
GEM_BUG_ON(!i915->gt.awake);
- if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
- i915->gt.epoch = 1;
+ i915_globals_unpark();
intel_enable_gt_powersave(i915);
i915_update_gfx_val(i915);
@@ -351,7 +309,7 @@ static void __start_cpu_write(struct drm_i915_gem_object *obj)
obj->cache_dirty = true;
}
-static void
+void
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
struct sg_table *pages,
bool needs_clflush)
@@ -459,8 +417,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
static long
i915_gem_object_wait_fence(struct dma_fence *fence,
unsigned int flags,
- long timeout,
- struct intel_rps_client *rps_client)
+ long timeout)
{
struct i915_request *rq;
@@ -478,27 +435,6 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
if (i915_request_completed(rq))
goto out;
- /*
- * This client is about to stall waiting for the GPU. In many cases
- * this is undesirable and limits the throughput of the system, as
- * many clients cannot continue processing user input/output whilst
- * blocked. RPS autotuning may take tens of milliseconds to respond
- * to the GPU load and thus incurs additional latency for the client.
- * We can circumvent that by promoting the GPU frequency to maximum
- * before we wait. This makes the GPU throttle up much more quickly
- * (good for benchmarks and user experience, e.g. window animations),
- * but at a cost of spending more power processing the workload
- * (bad for battery). Not all clients even want their results
- * immediately and for them we should just let the GPU select its own
- * frequency to maximise efficiency. To prevent a single client from
- * forcing the clocks too high for the whole system, we only allow
- * each client to waitboost once in a busy period.
- */
- if (rps_client && !i915_request_started(rq)) {
- if (INTEL_GEN(rq->i915) >= 6)
- gen6_rps_boost(rq, rps_client);
- }
-
timeout = i915_request_wait(rq, flags, timeout);
out:
@@ -511,8 +447,7 @@ out:
static long
i915_gem_object_wait_reservation(struct reservation_object *resv,
unsigned int flags,
- long timeout,
- struct intel_rps_client *rps_client)
+ long timeout)
{
unsigned int seq = __read_seqcount_begin(&resv->seq);
struct dma_fence *excl;
@@ -530,8 +465,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
for (i = 0; i < count; i++) {
timeout = i915_gem_object_wait_fence(shared[i],
- flags, timeout,
- rps_client);
+ flags, timeout);
if (timeout < 0)
break;
@@ -557,8 +491,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
}
if (excl && timeout >= 0)
- timeout = i915_gem_object_wait_fence(excl, flags, timeout,
- rps_client);
+ timeout = i915_gem_object_wait_fence(excl, flags, timeout);
dma_fence_put(excl);
@@ -652,30 +585,19 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
* @obj: i915 gem object
* @flags: how to wait (under a lock, for all rendering or just for writes etc)
* @timeout: how long to wait
- * @rps_client: client (user process) to charge for any waitboosting
*/
int
i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
- long timeout,
- struct intel_rps_client *rps_client)
+ long timeout)
{
might_sleep();
GEM_BUG_ON(timeout < 0);
- timeout = i915_gem_object_wait_reservation(obj->resv,
- flags, timeout,
- rps_client);
+ timeout = i915_gem_object_wait_reservation(obj->resv, flags, timeout);
return timeout < 0 ? timeout : 0;
}
-static struct intel_rps_client *to_rps_client(struct drm_file *file)
-{
- struct drm_i915_file_private *fpriv = file->driver_priv;
-
- return &fpriv->rps_client;
-}
-
static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
@@ -698,28 +620,18 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
return 0;
}
-void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
-{
- return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
-}
-
-void i915_gem_object_free(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- kmem_cache_free(dev_priv->objects, obj);
-}
-
static int
i915_gem_create(struct drm_file *file,
struct drm_i915_private *dev_priv,
- u64 size,
+ u64 *size_p,
u32 *handle_p)
{
struct drm_i915_gem_object *obj;
- int ret;
u32 handle;
+ u64 size;
+ int ret;
- size = roundup(size, PAGE_SIZE);
+ size = round_up(*size_p, PAGE_SIZE);
if (size == 0)
return -EINVAL;
@@ -735,6 +647,7 @@ i915_gem_create(struct drm_file *file,
return ret;
*handle_p = handle;
+ *size_p = size;
return 0;
}
@@ -747,7 +660,7 @@ i915_gem_dumb_create(struct drm_file *file,
args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
args->size = args->pitch * args->height;
return i915_gem_create(file, to_i915(dev),
- args->size, &args->handle);
+ &args->size, &args->handle);
}
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
@@ -772,7 +685,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
i915_gem_flush_free_objects(dev_priv);
return i915_gem_create(file, dev_priv,
- args->size, &args->handle);
+ &args->size, &args->handle);
}
static inline enum fb_op_origin
@@ -881,8 +794,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT,
- NULL);
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -934,8 +846,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT,
- NULL);
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -1197,8 +1108,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT,
- to_rps_client(file));
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
goto out;
@@ -1497,8 +1407,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT,
- to_rps_client(file));
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
goto err;
@@ -1578,17 +1487,37 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
return -EINVAL;
- /* Having something in the write domain implies it's in the read
+ /*
+ * Having something in the write domain implies it's in the read
* domain, and only that read domain. Enforce that in the request.
*/
- if (write_domain != 0 && read_domains != write_domain)
+ if (write_domain && read_domains != write_domain)
return -EINVAL;
+ if (!read_domains)
+ return 0;
+
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
- /* Try to flush the object off the GPU without holding the lock.
+ /*
+ * Already in the desired write domain? Nothing for us to do!
+ *
+ * We apply a little bit of cunning here to catch a broader set of
+ * no-ops. If obj->write_domain is set, we must be in the same
+ * obj->read_domains, and only that domain. Therefore, if that
+ * obj->write_domain matches the request read_domains, we are
+ * already in the same read/write domain and can skip the operation,
+ * without having to further check the requested write_domain.
+ */
+ if (READ_ONCE(obj->write_domain) == read_domains) {
+ err = 0;
+ goto out;
+ }
+
+ /*
+ * Try to flush the object off the GPU without holding the lock.
* We will repeat the flush holding the lock in the normal manner
* to catch cases where we are gazumped.
*/
@@ -1596,8 +1525,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_PRIORITY |
(write_domain ? I915_WAIT_ALL : 0),
- MAX_SCHEDULE_TIMEOUT,
- to_rps_client(file));
+ MAX_SCHEDULE_TIMEOUT);
if (err)
goto out;
@@ -1808,6 +1736,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
* 2 - Recognise WC as a separate cache domain so that we can flush the
* delayed writes via GTT before performing direct access via WC.
*
+ * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
+ * pagefault; swapin remains transparent.
+ *
* Restrictions:
*
* * snoopable objects cannot be accessed via the GTT. It can cause machine
@@ -1835,7 +1766,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
*/
int i915_gem_mmap_gtt_version(void)
{
- return 2;
+ return 3;
}
static inline struct i915_ggtt_view
@@ -1891,6 +1822,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
intel_wakeref_t wakeref;
struct i915_vma *vma;
pgoff_t page_offset;
+ int srcu;
int ret;
/* Sanity check that we allow writing into this object */
@@ -1902,27 +1834,21 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
trace_i915_gem_object_fault(obj, page_offset, true, write);
- /* Try to flush the object off the GPU first without holding the lock.
- * Upon acquiring the lock, we will perform our sanity checks and then
- * repeat the flush holding the lock in the normal manner to catch cases
- * where we are gazumped.
- */
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT,
- NULL);
- if (ret)
- goto err;
-
ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err;
wakeref = intel_runtime_pm_get(dev_priv);
+ srcu = i915_reset_trylock(dev_priv);
+ if (srcu < 0) {
+ ret = srcu;
+ goto err_rpm;
+ }
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
- goto err_rpm;
+ goto err_reset;
/* Access to snoopable pages through the GTT is incoherent. */
if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
@@ -1930,7 +1856,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
goto err_unlock;
}
-
/* Now pin it into the GTT as needed */
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
@@ -1964,10 +1889,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
goto err_unlock;
}
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
- if (ret)
- goto err_unpin;
-
ret = i915_vma_pin_fence(vma);
if (ret)
goto err_unpin;
@@ -1995,6 +1916,8 @@ err_unpin:
__i915_vma_unpin(vma);
err_unlock:
mutex_unlock(&dev->struct_mutex);
+err_reset:
+ i915_reset_unlock(dev_priv, srcu);
err_rpm:
intel_runtime_pm_put(dev_priv, wakeref);
i915_gem_object_unpin_pages(obj);
@@ -2007,7 +1930,7 @@ err:
* fail). But any other -EIO isn't ours (e.g. swap in failure)
* and so needs to be reported.
*/
- if (!i915_terminally_wedged(&dev_priv->gpu_error))
+ if (!i915_terminally_wedged(dev_priv))
return VM_FAULT_SIGBUS;
/* else: fall through */
case -EAGAIN:
@@ -2280,7 +2203,6 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
struct page *page;
__i915_gem_object_release_shmem(obj, pages, true);
-
i915_gem_gtt_finish_pages(obj, pages);
if (i915_gem_object_needs_bit17_swizzle(obj))
@@ -2488,7 +2410,7 @@ rebuild_st:
do {
cond_resched();
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
- if (likely(!IS_ERR(page)))
+ if (!IS_ERR(page))
break;
if (!*s) {
@@ -2622,6 +2544,14 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->mm.lock);
+ /* Make the pages coherent with the GPU (flushing any swapin). */
+ if (obj->cache_dirty) {
+ obj->write_domain = 0;
+ if (i915_gem_object_has_struct_page(obj))
+ drm_clflush_sg(pages);
+ obj->cache_dirty = false;
+ }
+
obj->mm.get_page.sg_pos = pages->sgl;
obj->mm.get_page.sg_idx = 0;
@@ -2823,6 +2753,33 @@ err_unlock:
goto out_unlock;
}
+void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ unsigned long size)
+{
+ enum i915_map_type has_type;
+ void *ptr;
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
+ offset, size, obj->base.size));
+
+ obj->mm.dirty = true;
+
+ if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
+ return;
+
+ ptr = page_unpack_bits(obj->mm.mapping, &has_type);
+ if (has_type == I915_MAP_WC)
+ return;
+
+ drm_clflush_virt_range(ptr + offset, size);
+ if (size == obj->base.size) {
+ obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
+ obj->cache_dirty = false;
+ }
+}
+
static int
i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *arg)
@@ -2832,7 +2789,11 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
u64 remain, offset;
unsigned int pg;
- /* Before we instantiate/pin the backing store for our use, we
+ /* Caller already validated user args */
+ GEM_BUG_ON(!access_ok(user_data, arg->size));
+
+ /*
+ * Before we instantiate/pin the backing store for our use, we
* can prepopulate the shmemfs filp efficiently using a write into
* the pagecache. We avoid the penalty of instantiating all the
* pages, important if the user is just writing to a few and never
@@ -2846,7 +2807,8 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
if (obj->mm.madv != I915_MADV_WILLNEED)
return -EFAULT;
- /* Before the pages are instantiated the object is treated as being
+ /*
+ * Before the pages are instantiated the object is treated as being
* in the CPU domain. The pages will be clflushed as required before
* use, and we can freely write into the pages directly. If userspace
* races pwrite with any other operation; corruption will ensue -
@@ -2862,20 +2824,32 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
struct page *page;
void *data, *vaddr;
int err;
+ char c;
len = PAGE_SIZE - pg;
if (len > remain)
len = remain;
+ /* Prefault the user page to reduce potential recursion */
+ err = __get_user(c, user_data);
+ if (err)
+ return err;
+
+ err = __get_user(c, user_data + len - 1);
+ if (err)
+ return err;
+
err = pagecache_write_begin(obj->base.filp, mapping,
offset, len, 0,
&page, &data);
if (err < 0)
return err;
- vaddr = kmap(page);
- unwritten = copy_from_user(vaddr + pg, user_data, len);
- kunmap(page);
+ vaddr = kmap_atomic(page);
+ unwritten = __copy_from_user_inatomic(vaddr + pg,
+ user_data,
+ len);
+ kunmap_atomic(vaddr);
err = pagecache_write_end(obj->base.filp, mapping,
offset, len, len - unwritten,
@@ -2883,8 +2857,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
if (err < 0)
return err;
+ /* We don't handle -EFAULT, leave it to the caller to check */
if (unwritten)
- return -EFAULT;
+ return -ENODEV;
remain -= len;
user_data += len;
@@ -2895,51 +2870,6 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
return 0;
}
-static bool match_ring(struct i915_request *rq)
-{
- struct drm_i915_private *dev_priv = rq->i915;
- u32 ring = I915_READ(RING_START(rq->engine->mmio_base));
-
- return ring == i915_ggtt_offset(rq->ring->vma);
-}
-
-struct i915_request *
-i915_gem_find_active_request(struct intel_engine_cs *engine)
-{
- struct i915_request *request, *active = NULL;
- unsigned long flags;
-
- /*
- * We are called by the error capture, reset and to dump engine
- * state at random points in time. In particular, note that neither is
- * crucially ordered with an interrupt. After a hang, the GPU is dead
- * and we assume that no more writes can happen (we waited long enough
- * for all writes that were in transaction to be flushed) - adding an
- * extra delay for a recent interrupt is pointless. Hence, we do
- * not need an engine->irq_seqno_barrier() before the seqno reads.
- * At all other times, we must assume the GPU is still running, but
- * we only care about the snapshot of this moment.
- */
- spin_lock_irqsave(&engine->timeline.lock, flags);
- list_for_each_entry(request, &engine->timeline.requests, link) {
- if (i915_request_completed(request))
- continue;
-
- if (!i915_request_started(request))
- break;
-
- /* More than one preemptible request may match! */
- if (!match_ring(request))
- break;
-
- active = request;
- break;
- }
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
-
- return active;
-}
-
static void
i915_gem_retire_work_handler(struct work_struct *work)
{
@@ -2964,180 +2894,105 @@ i915_gem_retire_work_handler(struct work_struct *work)
round_jiffies_up_relative(HZ));
}
-static void shrink_caches(struct drm_i915_private *i915)
+static bool switch_to_kernel_context_sync(struct drm_i915_private *i915,
+ unsigned long mask)
{
- /*
- * kmem_cache_shrink() discards empty slabs and reorders partially
- * filled slabs to prioritise allocating from the mostly full slabs,
- * with the aim of reducing fragmentation.
- */
- kmem_cache_shrink(i915->priorities);
- kmem_cache_shrink(i915->dependencies);
- kmem_cache_shrink(i915->requests);
- kmem_cache_shrink(i915->luts);
- kmem_cache_shrink(i915->vmas);
- kmem_cache_shrink(i915->objects);
-}
-
-struct sleep_rcu_work {
- union {
- struct rcu_head rcu;
- struct work_struct work;
- };
- struct drm_i915_private *i915;
- unsigned int epoch;
-};
+ bool result = true;
-static inline bool
-same_epoch(struct drm_i915_private *i915, unsigned int epoch)
-{
/*
- * There is a small chance that the epoch wrapped since we started
- * sleeping. If we assume that epoch is at least a u32, then it will
- * take at least 2^32 * 100ms for it to wrap, or about 326 years.
+ * Even if we fail to switch, give whatever is running a small chance
+ * to save itself before we report the failure. Yes, this may be a
+ * false positive due to e.g. ENOMEM, caveat emptor!
*/
- return epoch == READ_ONCE(i915->gt.epoch);
-}
-
-static void __sleep_work(struct work_struct *work)
-{
- struct sleep_rcu_work *s = container_of(work, typeof(*s), work);
- struct drm_i915_private *i915 = s->i915;
- unsigned int epoch = s->epoch;
-
- kfree(s);
- if (same_epoch(i915, epoch))
- shrink_caches(i915);
-}
+ if (i915_gem_switch_to_kernel_context(i915, mask))
+ result = false;
-static void __sleep_rcu(struct rcu_head *rcu)
-{
- struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
- struct drm_i915_private *i915 = s->i915;
-
- destroy_rcu_head(&s->rcu);
+ if (i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED |
+ I915_WAIT_FOR_IDLE_BOOST,
+ I915_GEM_IDLE_TIMEOUT))
+ result = false;
+
+ if (!result) {
+ if (i915_modparams.reset) { /* XXX hide warning from gem_eio */
+ dev_err(i915->drm.dev,
+ "Failed to idle engines, declaring wedged!\n");
+ GEM_TRACE_DUMP();
+ }
- if (same_epoch(i915, s->epoch)) {
- INIT_WORK(&s->work, __sleep_work);
- queue_work(i915->wq, &s->work);
- } else {
- kfree(s);
+ /* Forcibly cancel outstanding work and leave the gpu quiet. */
+ i915_gem_set_wedged(i915);
}
-}
-static inline bool
-new_requests_since_last_retire(const struct drm_i915_private *i915)
-{
- return (READ_ONCE(i915->gt.active_requests) ||
- work_pending(&i915->gt.idle_work.work));
+ i915_retire_requests(i915); /* ensure we flush after wedging */
+ return result;
}
-static void assert_kernel_context_is_current(struct drm_i915_private *i915)
+static bool load_power_context(struct drm_i915_private *i915)
{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ /* Force loading the kernel context on all engines */
+ if (!switch_to_kernel_context_sync(i915, ALL_ENGINES))
+ return false;
- if (i915_terminally_wedged(&i915->gpu_error))
- return;
+ /*
+ * Immediately park the GPU so that we enable powersaving and
+ * treat it as idle. The next time we issue a request, we will
+ * unpark and start using the engine->pinned_default_state, otherwise
+ * it is in limbo and an early reset may fail.
+ */
+ __i915_gem_park(i915);
- GEM_BUG_ON(i915->gt.active_requests);
- for_each_engine(engine, i915, id) {
- GEM_BUG_ON(__i915_active_request_peek(&engine->timeline.last_request));
- GEM_BUG_ON(engine->last_retired_context !=
- to_intel_context(i915->kernel_context, engine));
- }
+ return true;
}
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), gt.idle_work.work);
- unsigned int epoch = I915_EPOCH_INVALID;
+ struct drm_i915_private *i915 =
+ container_of(work, typeof(*i915), gt.idle_work.work);
bool rearm_hangcheck;
- if (!READ_ONCE(dev_priv->gt.awake))
+ if (!READ_ONCE(i915->gt.awake))
return;
- if (READ_ONCE(dev_priv->gt.active_requests))
+ if (READ_ONCE(i915->gt.active_requests))
return;
- /*
- * Flush out the last user context, leaving only the pinned
- * kernel context resident. When we are idling on the kernel_context,
- * no more new requests (with a context switch) are emitted and we
- * can finally rest. A consequence is that the idle work handler is
- * always called at least twice before idling (and if the system is
- * idle that implies a round trip through the retire worker).
- */
- mutex_lock(&dev_priv->drm.struct_mutex);
- i915_gem_switch_to_kernel_context(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n",
- READ_ONCE(dev_priv->gt.active_requests));
-
- /*
- * Wait for last execlists context complete, but bail out in case a
- * new request is submitted. As we don't trust the hardware, we
- * continue on if the wait times out. This is necessary to allow
- * the machine to suspend even if the hardware dies, and we will
- * try to recover in resume (after depriving the hardware of power,
- * it may be in a better mmod).
- */
- __wait_for(if (new_requests_since_last_retire(dev_priv)) return,
- intel_engines_are_idle(dev_priv),
- I915_IDLE_ENGINES_TIMEOUT * 1000,
- 10, 500);
-
rearm_hangcheck =
- cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+ cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
- if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
+ if (!mutex_trylock(&i915->drm.struct_mutex)) {
/* Currently busy, come back later */
- mod_delayed_work(dev_priv->wq,
- &dev_priv->gt.idle_work,
+ mod_delayed_work(i915->wq,
+ &i915->gt.idle_work,
msecs_to_jiffies(50));
goto out_rearm;
}
/*
- * New request retired after this work handler started, extend active
- * period until next instance of the work.
+ * Flush out the last user context, leaving only the pinned
+ * kernel context resident. Should anything unfortunate happen
+ * while we are idle (such as the GPU being power cycled), no users
+ * will be harmed.
*/
- if (new_requests_since_last_retire(dev_priv))
- goto out_unlock;
+ if (!work_pending(&i915->gt.idle_work.work) &&
+ !i915->gt.active_requests) {
+ ++i915->gt.active_requests; /* don't requeue idle */
- epoch = __i915_gem_park(dev_priv);
+ switch_to_kernel_context_sync(i915, i915->gt.active_engines);
- assert_kernel_context_is_current(dev_priv);
+ if (!--i915->gt.active_requests) {
+ __i915_gem_park(i915);
+ rearm_hangcheck = false;
+ }
+ }
- rearm_hangcheck = false;
-out_unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&i915->drm.struct_mutex);
out_rearm:
if (rearm_hangcheck) {
- GEM_BUG_ON(!dev_priv->gt.awake);
- i915_queue_hangcheck(dev_priv);
- }
-
- /*
- * When we are idle, it is an opportune time to reap our caches.
- * However, we have many objects that utilise RCU and the ordered
- * i915->wq that this work is executing on. To try and flush any
- * pending frees now we are idle, we first wait for an RCU grace
- * period, and then queue a task (that will run last on the wq) to
- * shrink and re-optimize the caches.
- */
- if (same_epoch(dev_priv, epoch)) {
- struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
- if (s) {
- init_rcu_head(&s->rcu);
- s->i915 = dev_priv;
- s->epoch = epoch;
- call_rcu(&s->rcu, __sleep_rcu);
- }
+ GEM_BUG_ON(!i915->gt.awake);
+ i915_queue_hangcheck(i915);
}
}
@@ -3171,7 +3026,7 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
list_del(&lut->obj_link);
list_del(&lut->ctx_link);
- kmem_cache_free(i915->luts, lut);
+ i915_lut_handle_free(lut);
__i915_gem_object_release_unless_active(obj);
}
@@ -3234,8 +3089,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_PRIORITY |
I915_WAIT_ALL,
- to_wait_timeout(args->timeout_ns),
- to_rps_client(file));
+ to_wait_timeout(args->timeout_ns));
if (args->timeout_ns > 0) {
args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
@@ -3304,7 +3158,7 @@ wait_for_timelines(struct drm_i915_private *i915,
* stalls, so allow the gpu to boost to maximum clocks.
*/
if (flags & I915_WAIT_FOR_IDLE_BOOST)
- gen6_rps_boost(rq, NULL);
+ gen6_rps_boost(rq);
timeout = i915_request_wait(rq, flags, timeout);
i915_request_put(rq);
@@ -3340,19 +3194,11 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
lockdep_assert_held(&i915->drm.struct_mutex);
- if (GEM_SHOW_DEBUG() && !timeout) {
- /* Presume that timeout was non-zero to begin with! */
- dev_warn(&i915->drm.pdev->dev,
- "Missed idle-completion interrupt!\n");
- GEM_TRACE_DUMP();
- }
-
err = wait_for_engines(i915);
if (err)
return err;
i915_retire_requests(i915);
- GEM_BUG_ON(i915->gt.active_requests);
}
return 0;
@@ -3399,8 +3245,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
(write ? I915_WAIT_ALL : 0),
- MAX_SCHEDULE_TIMEOUT,
- NULL);
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -3462,8 +3307,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
(write ? I915_WAIT_ALL : 0),
- MAX_SCHEDULE_TIMEOUT,
- NULL);
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -3578,8 +3422,7 @@ restart:
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT,
- NULL);
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -3717,8 +3560,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT,
- to_rps_client(file));
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
goto out;
@@ -3844,8 +3686,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
(write ? I915_WAIT_ALL : 0),
- MAX_SCHEDULE_TIMEOUT,
- NULL);
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -3891,8 +3732,9 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
long ret;
/* ABI: return -EIO if already wedged */
- if (i915_terminally_wedged(&dev_priv->gpu_error))
- return -EIO;
+ ret = i915_terminally_wedged(dev_priv);
+ if (ret)
+ return ret;
spin_lock(&file_priv->mm.lock);
list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
@@ -3968,7 +3810,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
}
vma = i915_vma_instance(obj, vm, view);
- if (unlikely(IS_ERR(vma)))
+ if (IS_ERR(vma))
return vma;
if (i915_vma_misplaced(vma, size, alignment, flags)) {
@@ -4000,22 +3842,19 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return vma;
}
-static __always_inline unsigned int __busy_read_flag(unsigned int id)
+static __always_inline u32 __busy_read_flag(u8 id)
{
- /* Note that we could alias engines in the execbuf API, but
- * that would be very unwise as it prevents userspace from
- * fine control over engine selection. Ahem.
- *
- * This should be something like EXEC_MAX_ENGINE instead of
- * I915_NUM_ENGINES.
- */
- BUILD_BUG_ON(I915_NUM_ENGINES > 16);
- return 0x10000 << id;
+ if (id == (u8)I915_ENGINE_CLASS_INVALID)
+ return 0xffff0000u;
+
+ GEM_BUG_ON(id >= 16);
+ return 0x10000u << id;
}
-static __always_inline unsigned int __busy_write_id(unsigned int id)
+static __always_inline u32 __busy_write_id(u8 id)
{
- /* The uABI guarantees an active writer is also amongst the read
+ /*
+ * The uABI guarantees an active writer is also amongst the read
* engines. This would be true if we accessed the activity tracking
* under the lock, but as we perform the lookup of the object and
* its activity locklessly we can not guarantee that the last_write
@@ -4023,16 +3862,19 @@ static __always_inline unsigned int __busy_write_id(unsigned int id)
* last_read - hence we always set both read and write busy for
* last_write.
*/
- return id | __busy_read_flag(id);
+ if (id == (u8)I915_ENGINE_CLASS_INVALID)
+ return 0xffffffffu;
+
+ return (id + 1) | __busy_read_flag(id);
}
static __always_inline unsigned int
-__busy_set_if_active(const struct dma_fence *fence,
- unsigned int (*flag)(unsigned int id))
+__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
{
- struct i915_request *rq;
+ const struct i915_request *rq;
- /* We have to check the current hw status of the fence as the uABI
+ /*
+ * We have to check the current hw status of the fence as the uABI
* guarantees forward progress. We could rely on the idle worker
* to eventually flush us, but to minimise latency just ask the
* hardware.
@@ -4043,11 +3885,13 @@ __busy_set_if_active(const struct dma_fence *fence,
return 0;
/* opencode to_request() in order to avoid const warnings */
- rq = container_of(fence, struct i915_request, fence);
+ rq = container_of(fence, const struct i915_request, fence);
if (i915_request_completed(rq))
return 0;
- return flag(rq->engine->uabi_id);
+ /* Beware type-expansion follies! */
+ BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class));
+ return flag(rq->engine->uabi_class);
}
static __always_inline unsigned int
@@ -4081,7 +3925,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
if (!obj)
goto out;
- /* A discrepancy here is that we do not report the status of
+ /*
+ * A discrepancy here is that we do not report the status of
* non-i915 fences, i.e. even though we may report the object as idle,
* a call to set-domain may still stall waiting for foreign rendering.
* This also means that wait-ioctl may report an object as busy,
@@ -4281,7 +4126,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(dev_priv);
+ obj = i915_gem_object_alloc();
if (obj == NULL)
return ERR_PTR(-ENOMEM);
@@ -4354,7 +4199,7 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
* acquiring such a reference whilst we are in the middle of
* freeing the object.
*/
- return atomic_long_read(&obj->base.filp->f_count) == 1;
+ return file_count(obj->base.filp) == 1;
}
static void __i915_gem_free_objects(struct drm_i915_private *i915,
@@ -4414,7 +4259,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(i915, obj->base.size);
- kfree(obj->bit_17);
+ bitmap_free(obj->bit_17);
i915_gem_object_free(obj);
GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
@@ -4537,7 +4382,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
GEM_TRACE("\n");
wakeref = intel_runtime_pm_get(i915);
- intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
/*
* As we have just resumed the machine and woken the device up from
@@ -4545,7 +4390,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
* back to defaults, recovering from whatever wedged state we left it
* in and so worth trying to use the device once more.
*/
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
i915_gem_unset_wedged(i915);
/*
@@ -4558,7 +4403,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
*/
intel_engines_sanitize(i915, false);
- intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(i915, wakeref);
mutex_lock(&i915->drm.struct_mutex);
@@ -4566,15 +4411,13 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
mutex_unlock(&i915->drm.struct_mutex);
}
-int i915_gem_suspend(struct drm_i915_private *i915)
+void i915_gem_suspend(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
- int ret;
GEM_TRACE("\n");
wakeref = intel_runtime_pm_get(i915);
- intel_suspend_gt_powersave(i915);
flush_workqueue(i915->wq);
@@ -4589,22 +4432,7 @@ int i915_gem_suspend(struct drm_i915_private *i915)
* state. Fortunately, the kernel_context is disposable and we do
* not rely on its state.
*/
- if (!i915_terminally_wedged(&i915->gpu_error)) {
- ret = i915_gem_switch_to_kernel_context(i915);
- if (ret)
- goto err_unlock;
-
- ret = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED |
- I915_WAIT_FOR_IDLE_BOOST,
- MAX_SCHEDULE_TIMEOUT);
- if (ret && ret != -EIO)
- goto err_unlock;
-
- assert_kernel_context_is_current(i915);
- }
- i915_retire_requests(i915); /* ensure we flush after wedging */
+ switch_to_kernel_context_sync(i915, i915->gt.active_engines);
mutex_unlock(&i915->drm.struct_mutex);
i915_reset_flush(i915);
@@ -4617,23 +4445,15 @@ int i915_gem_suspend(struct drm_i915_private *i915)
*/
drain_delayed_work(&i915->gt.idle_work);
- intel_uc_suspend(i915);
-
/*
* Assert that we successfully flushed all the work and
* reset the GPU back to its idle, low power state.
*/
- WARN_ON(i915->gt.awake);
- if (WARN_ON(!intel_engines_are_idle(i915)))
- i915_gem_set_wedged(i915); /* no hope, discard everything */
+ GEM_BUG_ON(i915->gt.awake);
- intel_runtime_pm_put(i915, wakeref);
- return 0;
+ intel_uc_suspend(i915);
-err_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
intel_runtime_pm_put(i915, wakeref);
- return ret;
}
void i915_gem_suspend_late(struct drm_i915_private *i915)
@@ -4683,7 +4503,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
WARN_ON(i915->gt.awake);
mutex_lock(&i915->drm.struct_mutex);
- intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
i915_gem_restore_gtt_mappings(i915);
i915_gem_restore_fences(i915);
@@ -4693,7 +4513,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
- i915->gt.resume(i915);
+ intel_gt_resume(i915);
if (i915_gem_init_hw(i915))
goto err_wedged;
@@ -4701,17 +4521,18 @@ void i915_gem_resume(struct drm_i915_private *i915)
intel_uc_resume(i915);
/* Always reload a context for powersaving. */
- if (i915_gem_switch_to_kernel_context(i915))
+ if (!load_power_context(i915))
goto err_wedged;
out_unlock:
- intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
mutex_unlock(&i915->drm.struct_mutex);
return;
err_wedged:
- if (!i915_terminally_wedged(&i915->gpu_error)) {
- DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
+ if (!i915_reset_failed(i915)) {
+ dev_err(i915->drm.dev,
+ "Failed to re-initialize GPU, declaring it wedged!\n");
i915_gem_set_wedged(i915);
}
goto out_unlock;
@@ -4781,6 +4602,8 @@ static int __i915_gem_restart_engines(void *data)
}
}
+ intel_engines_set_scheduler_caps(i915);
+
return 0;
}
@@ -4791,7 +4614,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
dev_priv->gt.last_init_time = ktime_get();
/* Double layer security blanket, see i915_gem_init() */
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
@@ -4816,10 +4639,9 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
init_unused_rings(dev_priv);
BUG_ON(!dev_priv->kernel_context);
- if (i915_terminally_wedged(&dev_priv->gpu_error)) {
- ret = -EIO;
+ ret = i915_terminally_wedged(dev_priv);
+ if (ret)
goto out;
- }
ret = i915_ppgtt_init_hw(dev_priv);
if (ret) {
@@ -4847,14 +4669,14 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
if (ret)
goto cleanup_uc;
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
return 0;
cleanup_uc:
intel_uc_fini_hw(dev_priv);
out:
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
return ret;
}
@@ -4864,7 +4686,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
struct i915_gem_context *ctx;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- int err;
+ int err = 0;
/*
* As we reset the gpu during very early sanitisation, the current
@@ -4897,36 +4719,27 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
goto err_active;
}
- err = i915_gem_switch_to_kernel_context(i915);
- if (err)
- goto err_active;
-
- if (i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, HZ / 5)) {
- i915_gem_set_wedged(i915);
- err = -EIO; /* Caller will declare us wedged */
+ /* Flush the default context image to memory, and enable powersaving. */
+ if (!load_power_context(i915)) {
+ err = -EIO;
goto err_active;
}
- assert_kernel_context_is_current(i915);
-
- /*
- * Immediately park the GPU so that we enable powersaving and
- * treat it as idle. The next time we issue a request, we will
- * unpark and start using the engine->pinned_default_state, otherwise
- * it is in limbo and an early reset may fail.
- */
- __i915_gem_park(i915);
-
for_each_engine(engine, i915, id) {
+ struct intel_context *ce;
struct i915_vma *state;
void *vaddr;
- GEM_BUG_ON(to_intel_context(ctx, engine)->pin_count);
+ ce = intel_context_lookup(ctx, engine);
+ if (!ce)
+ continue;
- state = to_intel_context(ctx, engine)->state;
+ state = ce->state;
if (!state)
continue;
+ GEM_BUG_ON(intel_context_is_pinned(ce));
+
/*
* As we will hold a reference to the logical state, it will
* not be torn down with the context, and importantly the
@@ -4944,6 +4757,8 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
goto err_active;
engine->default_state = i915_gem_object_get(state->obj);
+ i915_gem_object_set_cache_coherency(engine->default_state,
+ I915_CACHE_LLC);
/* Check we can acquire the image of the context state */
vaddr = i915_gem_object_pin_map(engine->default_state,
@@ -4982,19 +4797,10 @@ out_ctx:
err_active:
/*
* If we have to abandon now, we expect the engines to be idle
- * and ready to be torn-down. First try to flush any remaining
- * request, ensure we are pointing at the kernel context and
- * then remove it.
+ * and ready to be torn-down. The quickest way we can accomplish
+ * this is by declaring ourselves wedged.
*/
- if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
- goto out_ctx;
-
- if (WARN_ON(i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT)))
- goto out_ctx;
-
- i915_gem_contexts_lost(i915);
+ i915_gem_set_wedged(i915);
goto out_ctx;
}
@@ -5047,13 +4853,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
- dev_priv->gt.resume = intel_lr_context_resume;
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
- } else {
- dev_priv->gt.resume = intel_legacy_submission_resume;
+ else
dev_priv->gt.cleanup_engine = intel_engine_cleanup;
- }
i915_timelines_init(dev_priv);
@@ -5076,7 +4879,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* just magically go away.
*/
mutex_lock(&dev_priv->drm.struct_mutex);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
ret = i915_gem_init_ggtt(dev_priv);
if (ret) {
@@ -5138,7 +4941,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
goto err_init_hw;
}
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
return 0;
@@ -5152,7 +4955,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
err_init_hw:
mutex_unlock(&dev_priv->drm.struct_mutex);
- WARN_ON(i915_gem_suspend(dev_priv));
+ i915_gem_suspend(dev_priv);
i915_gem_suspend_late(dev_priv);
i915_gem_drain_workqueue(dev_priv);
@@ -5173,7 +4976,7 @@ err_scratch:
i915_gem_fini_scratch(dev_priv);
err_ggtt:
err_unlock:
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
err_uc_misc:
@@ -5192,7 +4995,7 @@ err_uc_misc:
* wedged. But we only want to do this where the GPU is angry,
* for all other failure, such as an allocation failure, bail.
*/
- if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
+ if (!i915_reset_failed(dev_priv)) {
i915_load_error(dev_priv,
"Failed to initialize GPU, declaring it wedged!\n");
i915_gem_set_wedged(dev_priv);
@@ -5305,36 +5108,7 @@ static void i915_gem_init__mm(struct drm_i915_private *i915)
int i915_gem_init_early(struct drm_i915_private *dev_priv)
{
- int err = -ENOMEM;
-
- dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
- if (!dev_priv->objects)
- goto err_out;
-
- dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
- if (!dev_priv->vmas)
- goto err_objects;
-
- dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
- if (!dev_priv->luts)
- goto err_vmas;
-
- dev_priv->requests = KMEM_CACHE(i915_request,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT |
- SLAB_TYPESAFE_BY_RCU);
- if (!dev_priv->requests)
- goto err_luts;
-
- dev_priv->dependencies = KMEM_CACHE(i915_dependency,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT);
- if (!dev_priv->dependencies)
- goto err_requests;
-
- dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
- if (!dev_priv->priorities)
- goto err_dependencies;
+ int err;
INIT_LIST_HEAD(&dev_priv->gt.active_rings);
INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
@@ -5348,6 +5122,7 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
mutex_init(&dev_priv->gpu_error.wedge_mutex);
+ init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
@@ -5358,19 +5133,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
return 0;
-
-err_dependencies:
- kmem_cache_destroy(dev_priv->dependencies);
-err_requests:
- kmem_cache_destroy(dev_priv->requests);
-err_luts:
- kmem_cache_destroy(dev_priv->luts);
-err_vmas:
- kmem_cache_destroy(dev_priv->vmas);
-err_objects:
- kmem_cache_destroy(dev_priv->objects);
-err_out:
- return err;
}
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
@@ -5380,15 +5142,7 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
WARN_ON(dev_priv->mm.object_count);
- kmem_cache_destroy(dev_priv->priorities);
- kmem_cache_destroy(dev_priv->dependencies);
- kmem_cache_destroy(dev_priv->requests);
- kmem_cache_destroy(dev_priv->luts);
- kmem_cache_destroy(dev_priv->vmas);
- kmem_cache_destroy(dev_priv->objects);
-
- /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
- rcu_barrier();
+ cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
i915_gemfs_fini(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index b0e4b976880c..9074eb1e843f 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -73,14 +73,14 @@ struct drm_i915_private;
#define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
-#define I915_NUM_ENGINES 8
+#define I915_GEM_IDLE_TIMEOUT (HZ / 5)
void i915_gem_park(struct drm_i915_private *i915);
void i915_gem_unpark(struct drm_i915_private *i915);
static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
{
- if (atomic_inc_return(&t->count) == 1)
+ if (!atomic_fetch_inc(&t->count))
tasklet_unlock_wait(t);
}
@@ -89,4 +89,9 @@ static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
return !atomic_read(&t->count);
}
+static inline bool __tasklet_enable(struct tasklet_struct *t)
+{
+ return atomic_dec_and_test(&t->count);
+}
+
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 280813a4bf82..dd728b26b5aa 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -88,12 +88,32 @@
#include <linux/log2.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_globals.h"
#include "i915_trace.h"
+#include "i915_user_extensions.h"
#include "intel_lrc_reg.h"
#include "intel_workarounds.h"
+#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1 << 1)
+#define I915_CONTEXT_PARAM_VM 0x9
+
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
+static struct i915_global_gem_context {
+ struct i915_global base;
+ struct kmem_cache *slab_luts;
+} global;
+
+struct i915_lut_handle *i915_lut_handle_alloc(void)
+{
+ return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
+}
+
+void i915_lut_handle_free(struct i915_lut_handle *lut)
+{
+ return kmem_cache_free(global.slab_luts, lut);
+}
+
static void lut_close(struct i915_gem_context *ctx)
{
struct i915_lut_handle *lut, *ln;
@@ -102,14 +122,17 @@ static void lut_close(struct i915_gem_context *ctx)
list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
list_del(&lut->obj_link);
- kmem_cache_free(ctx->i915->luts, lut);
+ i915_lut_handle_free(lut);
}
+ INIT_LIST_HEAD(&ctx->handles_list);
rcu_read_lock();
radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
struct i915_vma *vma = rcu_dereference_raw(*slot);
radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
+
+ vma->open_count--;
__i915_gem_object_release_unless_active(vma->obj);
}
rcu_read_unlock();
@@ -206,25 +229,26 @@ static void release_hw_id(struct i915_gem_context *ctx)
static void i915_gem_context_free(struct i915_gem_context *ctx)
{
- unsigned int n;
+ struct intel_context *it, *n;
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+ GEM_BUG_ON(!list_empty(&ctx->active_engines));
release_hw_id(ctx);
i915_ppgtt_put(ctx->ppgtt);
- for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
- struct intel_context *ce = &ctx->__engine[n];
+ rbtree_postorder_for_each_entry_safe(it, n, &ctx->hw_contexts, node)
+ intel_context_put(it);
- if (ce->ops)
- ce->ops->destroy(ce);
- }
+ if (ctx->timeline)
+ i915_timeline_put(ctx->timeline);
kfree(ctx->name);
put_pid(ctx->pid);
list_del(&ctx->link);
+ mutex_destroy(&ctx->mutex);
kfree_rcu(ctx, rcu);
}
@@ -291,8 +315,6 @@ static void context_close(struct i915_gem_context *ctx)
* the ppgtt).
*/
lut_close(ctx);
- if (ctx->ppgtt)
- i915_ppgtt_close(&ctx->ppgtt->vm);
ctx->file_priv = ERR_PTR(-EBADF);
i915_gem_context_put(ctx);
@@ -307,7 +329,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
address_mode = INTEL_LEGACY_32B_CONTEXT;
- if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
+ if (ppgtt && i915_vm_is_4lvl(&ppgtt->vm))
address_mode = INTEL_LEGACY_64B_CONTEXT;
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
@@ -322,134 +344,115 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
return desc;
}
-static void intel_context_retire(struct i915_active_request *active,
- struct i915_request *rq)
-{
- struct intel_context *ce =
- container_of(active, typeof(*ce), active_tracker);
-
- intel_context_unpin(ce);
-}
-
-void
-intel_context_init(struct intel_context *ce,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- ce->gem_context = ctx;
-
- INIT_LIST_HEAD(&ce->signal_link);
- INIT_LIST_HEAD(&ce->signals);
-
- /* Use the whole device by default */
- ce->sseu = intel_device_default_sseu(ctx->i915);
-
- i915_active_request_init(&ce->active_tracker,
- NULL, intel_context_retire);
-}
-
static struct i915_gem_context *
-__create_hw_context(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *file_priv)
+__create_context(struct drm_i915_private *dev_priv)
{
struct i915_gem_context *ctx;
- unsigned int n;
- int ret;
+ int i;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (ctx == NULL)
+ if (!ctx)
return ERR_PTR(-ENOMEM);
kref_init(&ctx->ref);
list_add_tail(&ctx->link, &dev_priv->contexts.list);
ctx->i915 = dev_priv;
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
+ INIT_LIST_HEAD(&ctx->active_engines);
+ mutex_init(&ctx->mutex);
- for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
- intel_context_init(&ctx->__engine[n], ctx, dev_priv->engine[n]);
+ ctx->hw_contexts = RB_ROOT;
+ spin_lock_init(&ctx->hw_contexts_lock);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
INIT_LIST_HEAD(&ctx->hw_id_link);
- /* Default context will never have a file_priv */
- ret = DEFAULT_CONTEXT_HANDLE;
- if (file_priv) {
- ret = idr_alloc(&file_priv->context_idr, ctx,
- DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
- if (ret < 0)
- goto err_lut;
- }
- ctx->user_handle = ret;
-
- ctx->file_priv = file_priv;
- if (file_priv) {
- ctx->pid = get_task_pid(current, PIDTYPE_PID);
- ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
- current->comm,
- pid_nr(ctx->pid),
- ctx->user_handle);
- if (!ctx->name) {
- ret = -ENOMEM;
- goto err_pid;
- }
- }
-
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
ctx->remap_slice = ALL_L3_SLICES(dev_priv);
i915_gem_context_set_bannable(ctx);
+ i915_gem_context_set_recoverable(ctx);
+
ctx->ring_size = 4 * PAGE_SIZE;
ctx->desc_template =
default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
+ for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
+ ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
+
return ctx;
+}
-err_pid:
- put_pid(ctx->pid);
- idr_remove(&file_priv->context_idr, ctx->user_handle);
-err_lut:
- context_close(ctx);
- return ERR_PTR(ret);
+static struct i915_hw_ppgtt *
+__set_ppgtt(struct i915_gem_context *ctx, struct i915_hw_ppgtt *ppgtt)
+{
+ struct i915_hw_ppgtt *old = ctx->ppgtt;
+
+ ctx->ppgtt = i915_ppgtt_get(ppgtt);
+ ctx->desc_template = default_desc_template(ctx->i915, ppgtt);
+
+ return old;
}
-static void __destroy_hw_context(struct i915_gem_context *ctx,
- struct drm_i915_file_private *file_priv)
+static void __assign_ppgtt(struct i915_gem_context *ctx,
+ struct i915_hw_ppgtt *ppgtt)
{
- idr_remove(&file_priv->context_idr, ctx->user_handle);
- context_close(ctx);
+ if (ppgtt == ctx->ppgtt)
+ return;
+
+ ppgtt = __set_ppgtt(ctx, ppgtt);
+ if (ppgtt)
+ i915_ppgtt_put(ppgtt);
}
static struct i915_gem_context *
-i915_gem_create_context(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *file_priv)
+i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
{
struct i915_gem_context *ctx;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ BUILD_BUG_ON(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &
+ ~I915_CONTEXT_CREATE_FLAGS_UNKNOWN);
+ if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
+ !HAS_EXECLISTS(dev_priv))
+ return ERR_PTR(-EINVAL);
+
/* Reap the most stale context */
contexts_free_first(dev_priv);
- ctx = __create_hw_context(dev_priv, file_priv);
+ ctx = __create_context(dev_priv);
if (IS_ERR(ctx))
return ctx;
if (HAS_FULL_PPGTT(dev_priv)) {
struct i915_hw_ppgtt *ppgtt;
- ppgtt = i915_ppgtt_create(dev_priv, file_priv);
+ ppgtt = i915_ppgtt_create(dev_priv);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
- __destroy_hw_context(ctx, file_priv);
+ context_close(ctx);
return ERR_CAST(ppgtt);
}
- ctx->ppgtt = ppgtt;
- ctx->desc_template = default_desc_template(dev_priv, ppgtt);
+ __assign_ppgtt(ctx, ppgtt);
+ i915_ppgtt_put(ppgtt);
+ }
+
+ if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
+ struct i915_timeline *timeline;
+
+ timeline = i915_timeline_create(dev_priv, NULL);
+ if (IS_ERR(timeline)) {
+ context_close(ctx);
+ return ERR_CAST(timeline);
+ }
+
+ ctx->timeline = timeline;
}
trace_i915_context_create(ctx);
@@ -480,10 +483,17 @@ i915_gem_context_create_gvt(struct drm_device *dev)
if (ret)
return ERR_PTR(ret);
- ctx = i915_gem_create_context(to_i915(dev), NULL);
+ ctx = i915_gem_create_context(to_i915(dev), 0);
if (IS_ERR(ctx))
goto out;
+ ret = i915_gem_context_pin_hw_id(ctx);
+ if (ret) {
+ context_close(ctx);
+ ctx = ERR_PTR(ret);
+ goto out;
+ }
+
ctx->file_priv = ERR_PTR(-EBADF);
i915_gem_context_set_closed(ctx); /* not user accessible */
i915_gem_context_clear_bannable(ctx);
@@ -516,7 +526,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
struct i915_gem_context *ctx;
int err;
- ctx = i915_gem_create_context(i915, NULL);
+ ctx = i915_gem_create_context(i915, 0);
if (IS_ERR(ctx))
return ctx;
@@ -552,7 +562,7 @@ static void init_contexts(struct drm_i915_private *i915)
static bool needs_preempt_context(struct drm_i915_private *i915)
{
- return HAS_LOGICAL_RING_PREEMPTION(i915);
+ return HAS_EXECLISTS(i915);
}
int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
@@ -563,7 +573,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
GEM_BUG_ON(dev_priv->kernel_context);
GEM_BUG_ON(dev_priv->preempt_context);
- intel_engine_init_ctx_wa(dev_priv->engine[RCS]);
+ intel_engine_init_ctx_wa(dev_priv->engine[RCS0]);
init_contexts(dev_priv);
/* lowest priority; idle task */
@@ -624,31 +634,87 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915)
static int context_idr_cleanup(int id, void *p, void *data)
{
- struct i915_gem_context *ctx = p;
+ context_close(p);
+ return 0;
+}
- context_close(ctx);
+static int vm_idr_cleanup(int id, void *p, void *data)
+{
+ i915_ppgtt_put(p);
return 0;
}
+static int gem_context_register(struct i915_gem_context *ctx,
+ struct drm_i915_file_private *fpriv)
+{
+ int ret;
+
+ ctx->file_priv = fpriv;
+ if (ctx->ppgtt)
+ ctx->ppgtt->vm.file = fpriv;
+
+ ctx->pid = get_task_pid(current, PIDTYPE_PID);
+ ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
+ current->comm, pid_nr(ctx->pid));
+ if (!ctx->name) {
+ ret = -ENOMEM;
+ goto err_pid;
+ }
+
+ /* And finally expose ourselves to userspace via the idr */
+ mutex_lock(&fpriv->context_idr_lock);
+ ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL);
+ mutex_unlock(&fpriv->context_idr_lock);
+ if (ret >= 0)
+ goto out;
+
+ kfree(fetch_and_zero(&ctx->name));
+err_pid:
+ put_pid(fetch_and_zero(&ctx->pid));
+out:
+ return ret;
+}
+
int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_gem_context *ctx;
+ int err;
+
+ mutex_init(&file_priv->context_idr_lock);
+ mutex_init(&file_priv->vm_idr_lock);
idr_init(&file_priv->context_idr);
+ idr_init_base(&file_priv->vm_idr, 1);
mutex_lock(&i915->drm.struct_mutex);
- ctx = i915_gem_create_context(i915, file_priv);
+ ctx = i915_gem_create_context(i915, 0);
mutex_unlock(&i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
- idr_destroy(&file_priv->context_idr);
- return PTR_ERR(ctx);
+ err = PTR_ERR(ctx);
+ goto err;
}
+ err = gem_context_register(ctx, file_priv);
+ if (err < 0)
+ goto err_ctx;
+
GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
+ GEM_BUG_ON(err > 0);
return 0;
+
+err_ctx:
+ mutex_lock(&i915->drm.struct_mutex);
+ context_close(ctx);
+ mutex_unlock(&i915->drm.struct_mutex);
+err:
+ idr_destroy(&file_priv->vm_idr);
+ idr_destroy(&file_priv->context_idr);
+ mutex_destroy(&file_priv->vm_idr_lock);
+ mutex_destroy(&file_priv->context_idr_lock);
+ return err;
}
void i915_gem_context_close(struct drm_file *file)
@@ -659,6 +725,100 @@ void i915_gem_context_close(struct drm_file *file)
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr);
+ mutex_destroy(&file_priv->context_idr_lock);
+
+ idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
+ idr_destroy(&file_priv->vm_idr);
+ mutex_destroy(&file_priv->vm_idr_lock);
+}
+
+int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct drm_i915_gem_vm_control *args = data;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct i915_hw_ppgtt *ppgtt;
+ int err;
+
+ if (!HAS_FULL_PPGTT(i915))
+ return -ENODEV;
+
+ if (args->flags)
+ return -EINVAL;
+
+ ppgtt = i915_ppgtt_create(i915);
+ if (IS_ERR(ppgtt))
+ return PTR_ERR(ppgtt);
+
+ ppgtt->vm.file = file_priv;
+
+ if (args->extensions) {
+ err = i915_user_extensions(u64_to_user_ptr(args->extensions),
+ NULL, 0,
+ ppgtt);
+ if (err)
+ goto err_put;
+ }
+
+ err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
+ if (err)
+ goto err_put;
+
+ err = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
+ if (err < 0)
+ goto err_unlock;
+
+ GEM_BUG_ON(err == 0); /* reserved for default/unassigned ppgtt */
+ ppgtt->user_handle = err;
+
+ mutex_unlock(&file_priv->vm_idr_lock);
+
+ args->vm_id = err;
+ return 0;
+
+err_unlock:
+ mutex_unlock(&file_priv->vm_idr_lock);
+err_put:
+ i915_ppgtt_put(ppgtt);
+ return err;
+}
+
+int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_gem_vm_control *args = data;
+ struct i915_hw_ppgtt *ppgtt;
+ int err;
+ u32 id;
+
+ if (args->flags)
+ return -EINVAL;
+
+ if (args->extensions)
+ return -EINVAL;
+
+ id = args->vm_id;
+ if (!id)
+ return -ENOENT;
+
+ err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
+ if (err)
+ return err;
+
+ ppgtt = idr_remove(&file_priv->vm_idr, id);
+ if (ppgtt) {
+ GEM_BUG_ON(ppgtt->user_handle != id);
+ ppgtt->user_handle = 0;
+ }
+
+ mutex_unlock(&file_priv->vm_idr_lock);
+ if (!ppgtt)
+ return -ENOENT;
+
+ i915_ppgtt_put(ppgtt);
+ return 0;
}
static struct i915_request *
@@ -671,10 +831,9 @@ last_request_on_engine(struct i915_timeline *timeline,
rq = i915_active_request_raw(&timeline->last_request,
&engine->i915->drm.struct_mutex);
- if (rq && rq->engine == engine) {
- GEM_TRACE("last request for %s on engine %s: %llx:%llu\n",
- timeline->name, engine->name,
- rq->fence.context, rq->fence.seqno);
+ if (rq && rq->engine->mask & engine->mask) {
+ GEM_TRACE("last request on engine %s: %llx:%llu\n",
+ engine->name, rq->fence.context, rq->fence.seqno);
GEM_BUG_ON(rq->timeline != timeline);
return rq;
}
@@ -682,81 +841,104 @@ last_request_on_engine(struct i915_timeline *timeline,
return NULL;
}
-static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
+struct context_barrier_task {
+ struct i915_active base;
+ void (*task)(void *data);
+ void *data;
+};
+
+static void cb_retire(struct i915_active *base)
+{
+ struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
+
+ if (cb->task)
+ cb->task(cb->data);
+
+ i915_active_fini(&cb->base);
+ kfree(cb);
+}
+
+I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
+static int context_barrier_task(struct i915_gem_context *ctx,
+ intel_engine_mask_t engines,
+ int (*emit)(struct i915_request *rq, void *data),
+ void (*task)(void *data),
+ void *data)
{
- struct drm_i915_private *i915 = engine->i915;
- const struct intel_context * const ce =
- to_intel_context(i915->kernel_context, engine);
- struct i915_timeline *barrier = ce->ring->timeline;
- struct intel_ring *ring;
- bool any_active = false;
+ struct drm_i915_private *i915 = ctx->i915;
+ struct context_barrier_task *cb;
+ struct intel_context *ce, *next;
+ intel_wakeref_t wakeref;
+ int err = 0;
lockdep_assert_held(&i915->drm.struct_mutex);
- list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
- struct i915_request *rq;
+ GEM_BUG_ON(!task);
- rq = last_request_on_engine(ring->timeline, engine);
- if (!rq)
- continue;
+ cb = kmalloc(sizeof(*cb), GFP_KERNEL);
+ if (!cb)
+ return -ENOMEM;
- any_active = true;
+ i915_active_init(i915, &cb->base, cb_retire);
+ i915_active_acquire(&cb->base);
- if (rq->hw_context == ce)
+ wakeref = intel_runtime_pm_get(i915);
+ rbtree_postorder_for_each_entry_safe(ce, next, &ctx->hw_contexts, node) {
+ struct intel_engine_cs *engine = ce->engine;
+ struct i915_request *rq;
+
+ if (!(engine->mask & engines))
continue;
- /*
- * Was this request submitted after the previous
- * switch-to-kernel-context?
- */
- if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
- GEM_TRACE("%s needs barrier for %llx:%lld\n",
- ring->timeline->name,
- rq->fence.context,
- rq->fence.seqno);
- return false;
+ if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
+ engine->mask)) {
+ err = -ENXIO;
+ break;
+ }
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
}
- GEM_TRACE("%s has barrier after %llx:%lld\n",
- ring->timeline->name,
- rq->fence.context,
- rq->fence.seqno);
+ err = 0;
+ if (emit)
+ err = emit(rq, data);
+ if (err == 0)
+ err = i915_active_ref(&cb->base, rq->fence.context, rq);
+
+ i915_request_add(rq);
+ if (err)
+ break;
}
+ intel_runtime_pm_put(i915, wakeref);
- /*
- * If any other timeline was still active and behind the last barrier,
- * then our last switch-to-kernel-context must still be queued and
- * will run last (leaving the engine in the kernel context when it
- * eventually idles).
- */
- if (any_active)
- return true;
+ cb->task = err ? NULL : task; /* caller needs to unwind instead */
+ cb->data = data;
- /* The engine is idle; check that it is idling in the kernel context. */
- return engine->last_retired_context == ce;
+ i915_active_release(&cb->base);
+
+ return err;
}
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
+ intel_engine_mask_t mask)
{
struct intel_engine_cs *engine;
- enum intel_engine_id id;
GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!i915->kernel_context);
- i915_retire_requests(i915);
+ /* Inoperable, so presume the GPU is safely pointing into the void! */
+ if (i915_terminally_wedged(i915))
+ return 0;
- for_each_engine(engine, i915, id) {
+ for_each_engine_masked(engine, i915, mask, mask) {
struct intel_ring *ring;
struct i915_request *rq;
- GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
- if (engine_has_kernel_context_barrier(engine))
- continue;
-
- GEM_TRACE("emit barrier on %s\n", engine->name);
-
rq = i915_request_alloc(engine, i915->kernel_context);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -779,7 +961,6 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
i915_sw_fence_await_sw_fence_gfp(&rq->submit,
&prev->submit,
I915_FENCE_GFP);
- i915_timeline_sync_set(rq->timeline, &prev->fence);
}
i915_request_add(rq);
@@ -788,183 +969,173 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
return 0;
}
-static bool client_is_banned(struct drm_i915_file_private *file_priv)
+static int get_ppgtt(struct drm_i915_file_private *file_priv,
+ struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
{
- return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
-}
-
-int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_context_create *args = data;
- struct drm_i915_file_private *file_priv = file->driver_priv;
- struct i915_gem_context *ctx;
+ struct i915_hw_ppgtt *ppgtt;
int ret;
- if (!DRIVER_CAPS(dev_priv)->has_logical_contexts)
+ return -EINVAL; /* nothing to see here; please move along */
+
+ if (!ctx->ppgtt)
return -ENODEV;
- if (args->pad != 0)
- return -EINVAL;
+ /* XXX rcu acquire? */
+ ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
+ if (ret)
+ return ret;
- if (client_is_banned(file_priv)) {
- DRM_DEBUG("client %s[%d] banned from creating ctx\n",
- current->comm,
- pid_nr(get_task_pid(current, PIDTYPE_PID)));
+ ppgtt = i915_ppgtt_get(ctx->ppgtt);
+ mutex_unlock(&ctx->i915->drm.struct_mutex);
- return -EIO;
+ ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
+ if (ret)
+ goto err_put;
+
+ if (!ppgtt->user_handle) {
+ ret = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
+ GEM_BUG_ON(!ret);
+ if (ret < 0)
+ goto err_unlock;
+
+ ppgtt->user_handle = ret;
+ i915_ppgtt_get(ppgtt);
}
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
+ args->size = 0;
+ args->value = ppgtt->user_handle;
- ctx = i915_gem_create_context(dev_priv, file_priv);
- mutex_unlock(&dev->struct_mutex);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
+ ret = 0;
+err_unlock:
+ mutex_unlock(&file_priv->vm_idr_lock);
+err_put:
+ i915_ppgtt_put(ppgtt);
+ return ret;
+}
- GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
+static void set_ppgtt_barrier(void *data)
+{
+ struct i915_hw_ppgtt *old = data;
- args->ctx_id = ctx->user_handle;
- DRM_DEBUG("HW context %d created\n", args->ctx_id);
+ if (INTEL_GEN(old->vm.i915) < 8)
+ gen6_ppgtt_unpin_all(old);
- return 0;
+ i915_ppgtt_put(old);
}
-int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+static int emit_ppgtt_update(struct i915_request *rq, void *data)
{
- struct drm_i915_gem_context_destroy *args = data;
- struct drm_i915_file_private *file_priv = file->driver_priv;
- struct i915_gem_context *ctx;
- int ret;
+ struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
+ struct intel_engine_cs *engine = rq->engine;
+ u32 base = engine->mmio_base;
+ u32 *cs;
+ int i;
- if (args->pad != 0)
- return -EINVAL;
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
+ const dma_addr_t pd_daddr = px_dma(&ppgtt->pml4);
- if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
- return -ENOENT;
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
- if (!ctx)
- return -ENOENT;
+ *cs++ = MI_LOAD_REGISTER_IMM(2);
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- goto out;
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
+ *cs++ = upper_32_bits(pd_daddr);
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
+ *cs++ = lower_32_bits(pd_daddr);
- __destroy_hw_context(ctx, file_priv);
- mutex_unlock(&dev->struct_mutex);
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+ } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
+ cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES);
+ for (i = GEN8_3LVL_PDPES; i--; ) {
+ const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
+ *cs++ = upper_32_bits(pd_daddr);
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
+ *cs++ = lower_32_bits(pd_daddr);
+ }
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+ } else {
+ /* ppGTT is not part of the legacy context image */
+ gen6_ppgtt_pin(ppgtt);
+ }
-out:
- i915_gem_context_put(ctx);
return 0;
}
-static int get_sseu(struct i915_gem_context *ctx,
- struct drm_i915_gem_context_param *args)
+static int set_ppgtt(struct drm_i915_file_private *file_priv,
+ struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
{
- struct drm_i915_gem_context_param_sseu user_sseu;
- struct intel_engine_cs *engine;
- struct intel_context *ce;
- int ret;
-
- if (args->size == 0)
- goto out;
- else if (args->size < sizeof(user_sseu))
- return -EINVAL;
-
- if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
- sizeof(user_sseu)))
- return -EFAULT;
+ struct i915_hw_ppgtt *ppgtt, *old;
+ int err;
- if (user_sseu.flags || user_sseu.rsvd)
- return -EINVAL;
+ return -EINVAL; /* nothing to see here; please move along */
- engine = intel_engine_lookup_user(ctx->i915,
- user_sseu.engine_class,
- user_sseu.engine_instance);
- if (!engine)
+ if (args->size)
return -EINVAL;
- /* Only use for mutex here is to serialize get_param and set_param. */
- ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
- if (ret)
- return ret;
-
- ce = to_intel_context(ctx, engine);
-
- user_sseu.slice_mask = ce->sseu.slice_mask;
- user_sseu.subslice_mask = ce->sseu.subslice_mask;
- user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
- user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
+ if (!ctx->ppgtt)
+ return -ENODEV;
- mutex_unlock(&ctx->i915->drm.struct_mutex);
+ if (upper_32_bits(args->value))
+ return -ENOENT;
- if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
- sizeof(user_sseu)))
- return -EFAULT;
+ err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
+ if (err)
+ return err;
-out:
- args->size = sizeof(user_sseu);
+ ppgtt = idr_find(&file_priv->vm_idr, args->value);
+ if (ppgtt) {
+ GEM_BUG_ON(ppgtt->user_handle != args->value);
+ i915_ppgtt_get(ppgtt);
+ }
+ mutex_unlock(&file_priv->vm_idr_lock);
+ if (!ppgtt)
+ return -ENOENT;
- return 0;
-}
+ err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
+ if (err)
+ goto out;
-int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_file_private *file_priv = file->driver_priv;
- struct drm_i915_gem_context_param *args = data;
- struct i915_gem_context *ctx;
- int ret = 0;
+ if (ppgtt == ctx->ppgtt)
+ goto unlock;
- ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
- if (!ctx)
- return -ENOENT;
+ /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
+ lut_close(ctx);
- switch (args->param) {
- case I915_CONTEXT_PARAM_BAN_PERIOD:
- ret = -EINVAL;
- break;
- case I915_CONTEXT_PARAM_NO_ZEROMAP:
- args->size = 0;
- args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
- break;
- case I915_CONTEXT_PARAM_GTT_SIZE:
- args->size = 0;
+ old = __set_ppgtt(ctx, ppgtt);
- if (ctx->ppgtt)
- args->value = ctx->ppgtt->vm.total;
- else if (to_i915(dev)->mm.aliasing_ppgtt)
- args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
- else
- args->value = to_i915(dev)->ggtt.vm.total;
- break;
- case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
- args->size = 0;
- args->value = i915_gem_context_no_error_capture(ctx);
- break;
- case I915_CONTEXT_PARAM_BANNABLE:
- args->size = 0;
- args->value = i915_gem_context_is_bannable(ctx);
- break;
- case I915_CONTEXT_PARAM_PRIORITY:
- args->size = 0;
- args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
- break;
- case I915_CONTEXT_PARAM_SSEU:
- ret = get_sseu(ctx, args);
- break;
- default:
- ret = -EINVAL;
- break;
+ /*
+ * We need to flush any requests using the current ppgtt before
+ * we release it as the requests do not hold a reference themselves,
+ * only indirectly through the context.
+ */
+ err = context_barrier_task(ctx, ALL_ENGINES,
+ emit_ppgtt_update,
+ set_ppgtt_barrier,
+ old);
+ if (err) {
+ ctx->ppgtt = old;
+ ctx->desc_template = default_desc_template(ctx->i915, old);
+ i915_ppgtt_put(ppgtt);
}
- i915_gem_context_put(ctx);
- return ret;
+unlock:
+ mutex_unlock(&ctx->i915->drm.struct_mutex);
+
+out:
+ i915_ppgtt_put(ppgtt);
+ return err;
}
static int gen8_emit_rpcs_config(struct i915_request *rq,
@@ -993,39 +1164,35 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
}
static int
-gen8_modify_rpcs_gpu(struct intel_context *ce,
- struct intel_engine_cs *engine,
- struct intel_sseu sseu)
+gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
{
- struct drm_i915_private *i915 = engine->i915;
- struct i915_request *rq, *prev;
+ struct drm_i915_private *i915 = ce->engine->i915;
+ struct i915_request *rq;
intel_wakeref_t wakeref;
int ret;
- GEM_BUG_ON(!ce->pin_count);
+ lockdep_assert_held(&ce->pin_mutex);
- lockdep_assert_held(&i915->drm.struct_mutex);
+ /*
+ * If the context is not idle, we have to submit an ordered request to
+ * modify its context image via the kernel context (writing to our own
+ * image, or into the registers directory, does not stick). Pristine
+ * and idle contexts will be configured on pinning.
+ */
+ if (!intel_context_is_pinned(ce))
+ return 0;
/* Submitting requests etc needs the hw awake. */
wakeref = intel_runtime_pm_get(i915);
- rq = i915_request_alloc(engine, i915->kernel_context);
+ rq = i915_request_alloc(ce->engine, i915->kernel_context);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
goto out_put;
}
/* Queue this switch after all other activity by this context. */
- prev = i915_active_request_raw(&ce->ring->timeline->last_request,
- &i915->drm.struct_mutex);
- if (prev && !i915_request_completed(prev)) {
- ret = i915_request_await_dma_fence(rq, &prev->fence);
- if (ret < 0)
- goto out_add;
- }
-
- /* Order all following requests to be after. */
- ret = i915_timeline_set_barrier(ce->ring->timeline, rq);
+ ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
if (ret)
goto out_add;
@@ -1057,27 +1224,26 @@ __i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
struct intel_sseu sseu)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
+ struct intel_context *ce;
int ret = 0;
GEM_BUG_ON(INTEL_GEN(ctx->i915) < 8);
- GEM_BUG_ON(engine->id != RCS);
+ GEM_BUG_ON(engine->id != RCS0);
+
+ ce = intel_context_pin_lock(ctx, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
/* Nothing to do if unmodified. */
if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
- return 0;
-
- /*
- * If context is not idle we have to submit an ordered request to modify
- * its context image via the kernel context. Pristine and idle contexts
- * will be configured on pinning.
- */
- if (ce->pin_count)
- ret = gen8_modify_rpcs_gpu(ce, engine, sseu);
+ goto unlock;
+ ret = gen8_modify_rpcs(ce, sseu);
if (!ret)
ce->sseu = sseu;
+unlock:
+ intel_context_pin_unlock(ce);
return ret;
}
@@ -1220,8 +1386,8 @@ static int set_sseu(struct i915_gem_context *ctx,
return -EINVAL;
engine = intel_engine_lookup_user(i915,
- user_sseu.engine_class,
- user_sseu.engine_instance);
+ user_sseu.engine.engine_class,
+ user_sseu.engine.engine_instance);
if (!engine)
return -EINVAL;
@@ -1242,22 +1408,13 @@ static int set_sseu(struct i915_gem_context *ctx,
return 0;
}
-int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+static int ctx_setparam(struct drm_i915_file_private *fpriv,
+ struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
{
- struct drm_i915_file_private *file_priv = file->driver_priv;
- struct drm_i915_gem_context_param *args = data;
- struct i915_gem_context *ctx;
int ret = 0;
- ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
- if (!ctx)
- return -ENOENT;
-
switch (args->param) {
- case I915_CONTEXT_PARAM_BAN_PERIOD:
- ret = -EINVAL;
- break;
case I915_CONTEXT_PARAM_NO_ZEROMAP:
if (args->size)
ret = -EINVAL;
@@ -1266,6 +1423,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
else
clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
break;
+
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
if (args->size)
ret = -EINVAL;
@@ -1274,6 +1432,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
else
i915_gem_context_clear_no_error_capture(ctx);
break;
+
case I915_CONTEXT_PARAM_BANNABLE:
if (args->size)
ret = -EINVAL;
@@ -1285,13 +1444,22 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
i915_gem_context_clear_bannable(ctx);
break;
+ case I915_CONTEXT_PARAM_RECOVERABLE:
+ if (args->size)
+ ret = -EINVAL;
+ else if (args->value)
+ i915_gem_context_set_recoverable(ctx);
+ else
+ i915_gem_context_clear_recoverable(ctx);
+ break;
+
case I915_CONTEXT_PARAM_PRIORITY:
{
s64 priority = args->value;
if (args->size)
ret = -EINVAL;
- else if (!(to_i915(dev)->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
+ else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
ret = -ENODEV;
else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
priority < I915_CONTEXT_MIN_USER_PRIORITY)
@@ -1304,14 +1472,266 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
I915_USER_PRIORITY(priority);
}
break;
+
case I915_CONTEXT_PARAM_SSEU:
ret = set_sseu(ctx, args);
break;
+
+ case I915_CONTEXT_PARAM_VM:
+ ret = set_ppgtt(fpriv, ctx, args);
+ break;
+
+ case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
break;
}
+ return ret;
+}
+
+struct create_ext {
+ struct i915_gem_context *ctx;
+ struct drm_i915_file_private *fpriv;
+};
+
+static int create_setparam(struct i915_user_extension __user *ext, void *data)
+{
+ struct drm_i915_gem_context_create_ext_setparam local;
+ const struct create_ext *arg = data;
+
+ if (copy_from_user(&local, ext, sizeof(local)))
+ return -EFAULT;
+
+ if (local.param.ctx_id)
+ return -EINVAL;
+
+ return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
+}
+
+static const i915_user_extension_fn create_extensions[] = {
+ [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
+};
+
+static bool client_is_banned(struct drm_i915_file_private *file_priv)
+{
+ return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
+}
+
+int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct drm_i915_gem_context_create_ext *args = data;
+ struct create_ext ext_data;
+ int ret;
+
+ if (!DRIVER_CAPS(i915)->has_logical_contexts)
+ return -ENODEV;
+
+ if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
+ return -EINVAL;
+
+ ret = i915_terminally_wedged(i915);
+ if (ret)
+ return ret;
+
+ ext_data.fpriv = file->driver_priv;
+ if (client_is_banned(ext_data.fpriv)) {
+ DRM_DEBUG("client %s[%d] banned from creating ctx\n",
+ current->comm,
+ pid_nr(get_task_pid(current, PIDTYPE_PID)));
+ return -EIO;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ext_data.ctx = i915_gem_create_context(i915, args->flags);
+ mutex_unlock(&dev->struct_mutex);
+ if (IS_ERR(ext_data.ctx))
+ return PTR_ERR(ext_data.ctx);
+
+ if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
+ ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
+ create_extensions,
+ ARRAY_SIZE(create_extensions),
+ &ext_data);
+ if (ret)
+ goto err_ctx;
+ }
+
+ ret = gem_context_register(ext_data.ctx, ext_data.fpriv);
+ if (ret < 0)
+ goto err_ctx;
+
+ args->ctx_id = ret;
+ DRM_DEBUG("HW context %d created\n", args->ctx_id);
+
+ return 0;
+
+err_ctx:
+ mutex_lock(&dev->struct_mutex);
+ context_close(ext_data.ctx);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_context_destroy *args = data;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct i915_gem_context *ctx;
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ if (!args->ctx_id)
+ return -ENOENT;
+
+ if (mutex_lock_interruptible(&file_priv->context_idr_lock))
+ return -EINTR;
+
+ ctx = idr_remove(&file_priv->context_idr, args->ctx_id);
+ mutex_unlock(&file_priv->context_idr_lock);
+ if (!ctx)
+ return -ENOENT;
+
+ mutex_lock(&dev->struct_mutex);
+ context_close(ctx);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static int get_sseu(struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
+{
+ struct drm_i915_gem_context_param_sseu user_sseu;
+ struct intel_engine_cs *engine;
+ struct intel_context *ce;
+
+ if (args->size == 0)
+ goto out;
+ else if (args->size < sizeof(user_sseu))
+ return -EINVAL;
+
+ if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
+ sizeof(user_sseu)))
+ return -EFAULT;
+
+ if (user_sseu.flags || user_sseu.rsvd)
+ return -EINVAL;
+
+ engine = intel_engine_lookup_user(ctx->i915,
+ user_sseu.engine.engine_class,
+ user_sseu.engine.engine_instance);
+ if (!engine)
+ return -EINVAL;
+
+ ce = intel_context_pin_lock(ctx, engine); /* serialises with set_sseu */
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ user_sseu.slice_mask = ce->sseu.slice_mask;
+ user_sseu.subslice_mask = ce->sseu.subslice_mask;
+ user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
+ user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
+
+ intel_context_pin_unlock(ce);
+
+ if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
+ sizeof(user_sseu)))
+ return -EFAULT;
+
+out:
+ args->size = sizeof(user_sseu);
+
+ return 0;
+}
+
+int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_gem_context_param *args = data;
+ struct i915_gem_context *ctx;
+ int ret = 0;
+
+ ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
+ if (!ctx)
+ return -ENOENT;
+
+ switch (args->param) {
+ case I915_CONTEXT_PARAM_NO_ZEROMAP:
+ args->size = 0;
+ args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
+ break;
+
+ case I915_CONTEXT_PARAM_GTT_SIZE:
+ args->size = 0;
+ if (ctx->ppgtt)
+ args->value = ctx->ppgtt->vm.total;
+ else if (to_i915(dev)->mm.aliasing_ppgtt)
+ args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
+ else
+ args->value = to_i915(dev)->ggtt.vm.total;
+ break;
+
+ case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
+ args->size = 0;
+ args->value = i915_gem_context_no_error_capture(ctx);
+ break;
+
+ case I915_CONTEXT_PARAM_BANNABLE:
+ args->size = 0;
+ args->value = i915_gem_context_is_bannable(ctx);
+ break;
+
+ case I915_CONTEXT_PARAM_RECOVERABLE:
+ args->size = 0;
+ args->value = i915_gem_context_is_recoverable(ctx);
+ break;
+
+ case I915_CONTEXT_PARAM_PRIORITY:
+ args->size = 0;
+ args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
+ break;
+
+ case I915_CONTEXT_PARAM_SSEU:
+ ret = get_sseu(ctx, args);
+ break;
+
+ case I915_CONTEXT_PARAM_VM:
+ ret = get_ppgtt(file_priv, ctx, args);
+ break;
+
+ case I915_CONTEXT_PARAM_BAN_PERIOD:
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ i915_gem_context_put(ctx);
+ return ret;
+}
+
+int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_gem_context_param *args = data;
+ struct i915_gem_context *ctx;
+ int ret;
+
+ ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
+ if (!ctx)
+ return -ENOENT;
+
+ ret = ctx_setparam(file_priv, ctx, args);
+
i915_gem_context_put(ctx);
return ret;
}
@@ -1385,3 +1805,28 @@ out_unlock:
#include "selftests/mock_context.c"
#include "selftests/i915_gem_context.c"
#endif
+
+static void i915_global_gem_context_shrink(void)
+{
+ kmem_cache_shrink(global.slab_luts);
+}
+
+static void i915_global_gem_context_exit(void)
+{
+ kmem_cache_destroy(global.slab_luts);
+}
+
+static struct i915_global_gem_context global = { {
+ .shrink = i915_global_gem_context_shrink,
+ .exit = i915_global_gem_context_exit,
+} };
+
+int __init i915_global_gem_context_init(void)
+{
+ global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
+ if (!global.slab_luts)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index ca150a764c24..23dcb01bfd82 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -25,210 +25,16 @@
#ifndef __I915_GEM_CONTEXT_H__
#define __I915_GEM_CONTEXT_H__
-#include <linux/bitops.h>
-#include <linux/list.h>
-#include <linux/radix-tree.h>
+#include "i915_gem_context_types.h"
#include "i915_gem.h"
#include "i915_scheduler.h"
+#include "intel_context.h"
#include "intel_device_info.h"
-struct pid;
-
struct drm_device;
struct drm_file;
-struct drm_i915_private;
-struct drm_i915_file_private;
-struct i915_hw_ppgtt;
-struct i915_request;
-struct i915_vma;
-struct intel_ring;
-
-#define DEFAULT_CONTEXT_HANDLE 0
-
-struct intel_context;
-
-struct intel_context_ops {
- void (*unpin)(struct intel_context *ce);
- void (*destroy)(struct intel_context *ce);
-};
-
-/*
- * Powergating configuration for a particular (context,engine).
- */
-struct intel_sseu {
- u8 slice_mask;
- u8 subslice_mask;
- u8 min_eus_per_subslice;
- u8 max_eus_per_subslice;
-};
-
-/**
- * struct i915_gem_context - client state
- *
- * The struct i915_gem_context represents the combined view of the driver and
- * logical hardware state for a particular client.
- */
-struct i915_gem_context {
- /** i915: i915 device backpointer */
- struct drm_i915_private *i915;
-
- /** file_priv: owning file descriptor */
- struct drm_i915_file_private *file_priv;
-
- /**
- * @ppgtt: unique address space (GTT)
- *
- * In full-ppgtt mode, each context has its own address space ensuring
- * complete seperation of one client from all others.
- *
- * In other modes, this is a NULL pointer with the expectation that
- * the caller uses the shared global GTT.
- */
- struct i915_hw_ppgtt *ppgtt;
-
- /**
- * @pid: process id of creator
- *
- * Note that who created the context may not be the principle user,
- * as the context may be shared across a local socket. However,
- * that should only affect the default context, all contexts created
- * explicitly by the client are expected to be isolated.
- */
- struct pid *pid;
-
- /**
- * @name: arbitrary name
- *
- * A name is constructed for the context from the creator's process
- * name, pid and user handle in order to uniquely identify the
- * context in messages.
- */
- const char *name;
-
- /** link: place with &drm_i915_private.context_list */
- struct list_head link;
- struct llist_node free_link;
-
- /**
- * @ref: reference count
- *
- * A reference to a context is held by both the client who created it
- * and on each request submitted to the hardware using the request
- * (to ensure the hardware has access to the state until it has
- * finished all pending writes). See i915_gem_context_get() and
- * i915_gem_context_put() for access.
- */
- struct kref ref;
-
- /**
- * @rcu: rcu_head for deferred freeing.
- */
- struct rcu_head rcu;
-
- /**
- * @user_flags: small set of booleans controlled by the user
- */
- unsigned long user_flags;
-#define UCONTEXT_NO_ZEROMAP 0
-#define UCONTEXT_NO_ERROR_CAPTURE 1
-#define UCONTEXT_BANNABLE 2
-
- /**
- * @flags: small set of booleans
- */
- unsigned long flags;
-#define CONTEXT_BANNED 0
-#define CONTEXT_CLOSED 1
-#define CONTEXT_FORCE_SINGLE_SUBMISSION 2
-
- /**
- * @hw_id: - unique identifier for the context
- *
- * The hardware needs to uniquely identify the context for a few
- * functions like fault reporting, PASID, scheduling. The
- * &drm_i915_private.context_hw_ida is used to assign a unqiue
- * id for the lifetime of the context.
- *
- * @hw_id_pin_count: - number of times this context had been pinned
- * for use (should be, at most, once per engine).
- *
- * @hw_id_link: - all contexts with an assigned id are tracked
- * for possible repossession.
- */
- unsigned int hw_id;
- atomic_t hw_id_pin_count;
- struct list_head hw_id_link;
-
- /**
- * @user_handle: userspace identifier
- *
- * A unique per-file identifier is generated from
- * &drm_i915_file_private.contexts.
- */
- u32 user_handle;
-
- struct i915_sched_attr sched;
-
- /** engine: per-engine logical HW state */
- struct intel_context {
- struct i915_gem_context *gem_context;
- struct intel_engine_cs *active;
- struct list_head signal_link;
- struct list_head signals;
- struct i915_vma *state;
- struct intel_ring *ring;
- u32 *lrc_reg_state;
- u64 lrc_desc;
- int pin_count;
-
- /**
- * active_tracker: Active tracker for the external rq activity
- * on this intel_context object.
- */
- struct i915_active_request active_tracker;
-
- const struct intel_context_ops *ops;
-
- /** sseu: Control eu/slice partitioning */
- struct intel_sseu sseu;
- } __engine[I915_NUM_ENGINES];
-
- /** ring_size: size for allocating the per-engine ring buffer */
- u32 ring_size;
- /** desc_template: invariant fields for the HW context descriptor */
- u32 desc_template;
-
- /** guilty_count: How many times this context has caused a GPU hang. */
- atomic_t guilty_count;
- /**
- * @active_count: How many times this context was active during a GPU
- * hang, but did not cause it.
- */
- atomic_t active_count;
-
-#define CONTEXT_SCORE_GUILTY 10
-#define CONTEXT_SCORE_BAN_THRESHOLD 40
- /** ban_score: Accumulated score of all hangs caused by this context. */
- atomic_t ban_score;
-
- /** remap_slice: Bitmask of cache lines that need remapping */
- u8 remap_slice;
-
- /** handles_vma: rbtree to look up our context specific obj/vma for
- * the user handle. (user handles are per fd, but the binding is
- * per vm, which may be one per context or shared with the global GTT)
- */
- struct radix_tree_root handles_vma;
-
- /** handles_list: reverse list of all the rbtree entries in use for
- * this context, which allows us to free all the allocations on
- * context close.
- */
- struct list_head handles_list;
-};
-
static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
{
return test_bit(CONTEXT_CLOSED, &ctx->flags);
@@ -270,6 +76,21 @@ static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
}
+static inline bool i915_gem_context_is_recoverable(const struct i915_gem_context *ctx)
+{
+ return test_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
+}
+
+static inline void i915_gem_context_set_recoverable(struct i915_gem_context *ctx)
+{
+ set_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
+}
+
+static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *ctx)
+{
+ clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
+}
+
static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
{
return test_bit(CONTEXT_BANNED, &ctx->flags);
@@ -305,45 +126,11 @@ static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx)
atomic_dec(&ctx->hw_id_pin_count);
}
-static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
-{
- return c->user_handle == DEFAULT_CONTEXT_HANDLE;
-}
-
static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
{
return !ctx->file_priv;
}
-static inline struct intel_context *
-to_intel_context(struct i915_gem_context *ctx,
- const struct intel_engine_cs *engine)
-{
- return &ctx->__engine[engine->id];
-}
-
-static inline struct intel_context *
-intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
-{
- return engine->context_pin(engine, ctx);
-}
-
-static inline void __intel_context_pin(struct intel_context *ce)
-{
- GEM_BUG_ON(!ce->pin_count);
- ce->pin_count++;
-}
-
-static inline void intel_context_unpin(struct intel_context *ce)
-{
- GEM_BUG_ON(!ce->pin_count);
- if (--ce->pin_count)
- return;
-
- GEM_BUG_ON(!ce->ops);
- ce->ops->unpin(ce);
-}
-
/* i915_gem_context.c */
int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
@@ -354,12 +141,18 @@ int i915_gem_context_open(struct drm_i915_private *i915,
void i915_gem_context_close(struct drm_file *file);
int i915_switch_context(struct i915_request *rq);
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
+ intel_engine_mask_t engine_mask);
void i915_gem_context_release(struct kref *ctx_ref);
struct i915_gem_context *
i915_gem_context_create_gvt(struct drm_device *dev);
+int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
@@ -386,8 +179,7 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
kref_put(&ctx->ref, i915_gem_context_release);
}
-void intel_context_init(struct intel_context *ce,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine);
+struct i915_lut_handle *i915_lut_handle_alloc(void);
+void i915_lut_handle_free(struct i915_lut_handle *lut);
#endif /* !__I915_GEM_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_context_types.h b/drivers/gpu/drm/i915/i915_gem_context_types.h
new file mode 100644
index 000000000000..e2ec58b10fb2
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_context_types.h
@@ -0,0 +1,175 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_CONTEXT_TYPES_H__
+#define __I915_GEM_CONTEXT_TYPES_H__
+
+#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/llist.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+#include <linux/rbtree.h>
+#include <linux/rcupdate.h>
+#include <linux/types.h>
+
+#include "i915_scheduler.h"
+#include "intel_context_types.h"
+
+struct pid;
+
+struct drm_i915_private;
+struct drm_i915_file_private;
+struct i915_hw_ppgtt;
+struct i915_timeline;
+struct intel_ring;
+
+/**
+ * struct i915_gem_context - client state
+ *
+ * The struct i915_gem_context represents the combined view of the driver and
+ * logical hardware state for a particular client.
+ */
+struct i915_gem_context {
+ /** i915: i915 device backpointer */
+ struct drm_i915_private *i915;
+
+ /** file_priv: owning file descriptor */
+ struct drm_i915_file_private *file_priv;
+
+ struct i915_timeline *timeline;
+
+ /**
+ * @ppgtt: unique address space (GTT)
+ *
+ * In full-ppgtt mode, each context has its own address space ensuring
+ * complete seperation of one client from all others.
+ *
+ * In other modes, this is a NULL pointer with the expectation that
+ * the caller uses the shared global GTT.
+ */
+ struct i915_hw_ppgtt *ppgtt;
+
+ /**
+ * @pid: process id of creator
+ *
+ * Note that who created the context may not be the principle user,
+ * as the context may be shared across a local socket. However,
+ * that should only affect the default context, all contexts created
+ * explicitly by the client are expected to be isolated.
+ */
+ struct pid *pid;
+
+ /**
+ * @name: arbitrary name
+ *
+ * A name is constructed for the context from the creator's process
+ * name, pid and user handle in order to uniquely identify the
+ * context in messages.
+ */
+ const char *name;
+
+ /** link: place with &drm_i915_private.context_list */
+ struct list_head link;
+ struct llist_node free_link;
+
+ /**
+ * @ref: reference count
+ *
+ * A reference to a context is held by both the client who created it
+ * and on each request submitted to the hardware using the request
+ * (to ensure the hardware has access to the state until it has
+ * finished all pending writes). See i915_gem_context_get() and
+ * i915_gem_context_put() for access.
+ */
+ struct kref ref;
+
+ /**
+ * @rcu: rcu_head for deferred freeing.
+ */
+ struct rcu_head rcu;
+
+ /**
+ * @user_flags: small set of booleans controlled by the user
+ */
+ unsigned long user_flags;
+#define UCONTEXT_NO_ZEROMAP 0
+#define UCONTEXT_NO_ERROR_CAPTURE 1
+#define UCONTEXT_BANNABLE 2
+#define UCONTEXT_RECOVERABLE 3
+
+ /**
+ * @flags: small set of booleans
+ */
+ unsigned long flags;
+#define CONTEXT_BANNED 0
+#define CONTEXT_CLOSED 1
+#define CONTEXT_FORCE_SINGLE_SUBMISSION 2
+
+ /**
+ * @hw_id: - unique identifier for the context
+ *
+ * The hardware needs to uniquely identify the context for a few
+ * functions like fault reporting, PASID, scheduling. The
+ * &drm_i915_private.context_hw_ida is used to assign a unqiue
+ * id for the lifetime of the context.
+ *
+ * @hw_id_pin_count: - number of times this context had been pinned
+ * for use (should be, at most, once per engine).
+ *
+ * @hw_id_link: - all contexts with an assigned id are tracked
+ * for possible repossession.
+ */
+ unsigned int hw_id;
+ atomic_t hw_id_pin_count;
+ struct list_head hw_id_link;
+
+ struct list_head active_engines;
+ struct mutex mutex;
+
+ struct i915_sched_attr sched;
+
+ /** hw_contexts: per-engine logical HW state */
+ struct rb_root hw_contexts;
+ spinlock_t hw_contexts_lock;
+
+ /** ring_size: size for allocating the per-engine ring buffer */
+ u32 ring_size;
+ /** desc_template: invariant fields for the HW context descriptor */
+ u32 desc_template;
+
+ /** guilty_count: How many times this context has caused a GPU hang. */
+ atomic_t guilty_count;
+ /**
+ * @active_count: How many times this context was active during a GPU
+ * hang, but did not cause it.
+ */
+ atomic_t active_count;
+
+ /**
+ * @hang_timestamp: The last time(s) this context caused a GPU hang
+ */
+ unsigned long hang_timestamp[2];
+#define CONTEXT_FAST_HANG_JIFFIES (120 * HZ) /* 3 hangs within 120s? Banned! */
+
+ /** remap_slice: Bitmask of cache lines that need remapping */
+ u8 remap_slice;
+
+ /** handles_vma: rbtree to look up our context specific obj/vma for
+ * the user handle. (user handles are per fd, but the binding is
+ * per vm, which may be one per context or shared with the global GTT)
+ */
+ struct radix_tree_root handles_vma;
+
+ /** handles_list: reverse list of all the rbtree entries in use for
+ * this context, which allows us to free all the allocations on
+ * context close.
+ */
+ struct list_head handles_list;
+};
+
+#endif /* __I915_GEM_CONTEXT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 02f7298bfe57..5a101a9462d8 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -107,6 +107,7 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+ i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
}
@@ -300,7 +301,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
get_dma_buf(dma_buf);
- obj = i915_gem_object_alloc(to_i915(dev));
+ obj = i915_gem_object_alloc();
if (obj == NULL) {
ret = -ENOMEM;
goto fail_detach;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 68d74c50ac39..060f5903544a 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -38,31 +38,21 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
static bool ggtt_is_idle(struct drm_i915_private *i915)
{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- if (i915->gt.active_requests)
- return false;
-
- for_each_engine(engine, i915, id) {
- if (!intel_engine_has_kernel_context(engine))
- return false;
- }
-
- return true;
+ return !i915->gt.active_requests;
}
static int ggtt_flush(struct drm_i915_private *i915)
{
int err;
- /* Not everything in the GGTT is tracked via vma (otherwise we
+ /*
+ * Not everything in the GGTT is tracked via vma (otherwise we
* could evict as required with minimal stalling) so we are forced
* to idle the GPU and explicitly retire outstanding requests in
* the hopes that we can then remove contexts and the like only
* bound by their active reference.
*/
- err = i915_gem_switch_to_kernel_context(i915);
+ err = i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 02adcaf6ebea..c83d2a195d15 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -794,8 +794,8 @@ static int eb_wait_for_ring(const struct i915_execbuffer *eb)
* keeping all of their resources pinned.
*/
- ce = to_intel_context(eb->ctx, eb->engine);
- if (!ce->ring) /* first use, assume empty! */
+ ce = intel_context_lookup(eb->ctx, eb->engine);
+ if (!ce || !ce->ring) /* first use, assume empty! */
return 0;
rq = __eb_wait_for_ring(ce->ring);
@@ -849,12 +849,12 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
}
vma = i915_vma_instance(obj, eb->vm, NULL);
- if (unlikely(IS_ERR(vma))) {
+ if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
}
- lut = kmem_cache_alloc(eb->i915->luts, GFP_KERNEL);
+ lut = i915_lut_handle_alloc();
if (unlikely(!lut)) {
err = -ENOMEM;
goto err_obj;
@@ -862,7 +862,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
err = radix_tree_insert(handles_vma, handle, vma);
if (unlikely(err)) {
- kmem_cache_free(eb->i915->luts, lut);
+ i915_lut_handle_free(lut);
goto err_obj;
}
@@ -1001,7 +1001,10 @@ static void reloc_gpu_flush(struct reloc_cache *cache)
{
GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
+
+ __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
i915_gem_object_unpin_map(cache->rq->batch->obj);
+
i915_gem_chipset_flush(cache->rq->i915);
i915_request_add(cache->rq);
@@ -1214,10 +1217,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (IS_ERR(cmd))
return PTR_ERR(cmd);
- err = i915_gem_object_set_to_wc_domain(obj, false);
- if (err)
- goto err_unmap;
-
batch = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
@@ -1667,6 +1666,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
len)) {
end_user:
user_access_end();
+end:
kvfree(relocs);
err = -EFAULT;
goto err;
@@ -1686,7 +1686,7 @@ end_user:
* relocations were valid.
*/
if (!user_access_begin(urelocs, size))
- goto end_user;
+ goto end;
for (copied = 0; copied < nreloc; copied++)
unsafe_put_user(-1,
@@ -1957,7 +1957,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
u32 *cs;
int i;
- if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS) {
+ if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
@@ -2082,11 +2082,11 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
#define I915_USER_RINGS (4)
static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
- [I915_EXEC_DEFAULT] = RCS,
- [I915_EXEC_RENDER] = RCS,
- [I915_EXEC_BLT] = BCS,
- [I915_EXEC_BSD] = VCS,
- [I915_EXEC_VEBOX] = VECS
+ [I915_EXEC_DEFAULT] = RCS0,
+ [I915_EXEC_RENDER] = RCS0,
+ [I915_EXEC_BLT] = BCS0,
+ [I915_EXEC_BSD] = VCS0,
+ [I915_EXEC_VEBOX] = VECS0
};
static struct intel_engine_cs *
@@ -2109,7 +2109,7 @@ eb_select_engine(struct drm_i915_private *dev_priv,
return NULL;
}
- if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
+ if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(dev_priv, VCS1)) {
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
@@ -2312,10 +2312,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (args->flags & I915_EXEC_IS_PINNED)
eb.batch_flags |= I915_DISPATCH_PINNED;
- eb.engine = eb_select_engine(eb.i915, file, args);
- if (!eb.engine)
- return -EINVAL;
-
if (args->flags & I915_EXEC_FENCE_IN) {
in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
if (!in_fence)
@@ -2340,6 +2336,12 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (unlikely(err))
goto err_destroy;
+ eb.engine = eb_select_engine(eb.i915, file, args);
+ if (!eb.engine) {
+ err = -EINVAL;
+ goto err_engine;
+ }
+
/*
* Take a local wakeref for preparing to dispatch the execbuf as
* we expect to access the hardware fairly frequently in the
@@ -2505,6 +2507,7 @@ err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
intel_runtime_pm_put(eb.i915, wakeref);
+err_engine:
i915_gem_context_put(eb.ctx);
err_destroy:
eb_destroy(&eb);
@@ -2695,7 +2698,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
* when we did the "copy_from_user()" above.
*/
if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list)))
- goto end_user;
+ goto end;
for (i = 0; i < args->buffer_count; i++) {
if (!(exec2_list[i].offset & UPDATE))
@@ -2709,6 +2712,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
}
end_user:
user_access_end();
+end:;
}
args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index e037e94792f3..3084f52e3372 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -210,6 +210,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
struct i915_vma *vma)
{
intel_wakeref_t wakeref;
+ struct i915_vma *old;
int ret;
if (vma) {
@@ -229,49 +230,55 @@ static int fence_update(struct drm_i915_fence_reg *fence,
return ret;
}
- if (fence->vma) {
- struct i915_vma *old = fence->vma;
-
+ old = xchg(&fence->vma, NULL);
+ if (old) {
ret = i915_active_request_retire(&old->last_fence,
&old->obj->base.dev->struct_mutex);
- if (ret)
+ if (ret) {
+ fence->vma = old;
return ret;
+ }
i915_vma_flush_writes(old);
- }
- if (fence->vma && fence->vma != vma) {
- /* Ensure that all userspace CPU access is completed before
+ /*
+ * Ensure that all userspace CPU access is completed before
* stealing the fence.
*/
- GEM_BUG_ON(fence->vma->fence != fence);
- i915_vma_revoke_mmap(fence->vma);
-
- fence->vma->fence = NULL;
- fence->vma = NULL;
+ if (old != vma) {
+ GEM_BUG_ON(old->fence != fence);
+ i915_vma_revoke_mmap(old);
+ old->fence = NULL;
+ }
list_move(&fence->link, &fence->i915->mm.fence_list);
}
- /* We only need to update the register itself if the device is awake.
+ /*
+ * We only need to update the register itself if the device is awake.
* If the device is currently powered down, we will defer the write
* to the runtime resume, see i915_gem_restore_fences().
+ *
+ * This only works for removing the fence register, on acquisition
+ * the caller must hold the rpm wakeref. The fence register must
+ * be cleared before we can use any other fences to ensure that
+ * the new fences do not overlap the elided clears, confusing HW.
*/
wakeref = intel_runtime_pm_get_if_in_use(fence->i915);
- if (wakeref) {
- fence_write(fence, vma);
- intel_runtime_pm_put(fence->i915, wakeref);
+ if (!wakeref) {
+ GEM_BUG_ON(vma);
+ return 0;
}
- if (vma) {
- if (fence->vma != vma) {
- vma->fence = fence;
- fence->vma = vma;
- }
+ WRITE_ONCE(fence->vma, vma);
+ fence_write(fence, vma);
+ if (vma) {
+ vma->fence = fence;
list_move_tail(&fence->link, &fence->i915->mm.fence_list);
}
+ intel_runtime_pm_put(fence->i915, wakeref);
return 0;
}
@@ -436,32 +443,6 @@ void i915_unreserve_fence(struct drm_i915_fence_reg *fence)
}
/**
- * i915_gem_revoke_fences - revoke fence state
- * @dev_priv: i915 device private
- *
- * Removes all GTT mmappings via the fence registers. This forces any user
- * of the fence to reacquire that fence before continuing with their access.
- * One use is during GPU reset where the fence register is lost and we need to
- * revoke concurrent userspace access via GTT mmaps until the hardware has been
- * reset and the fence registers have been restored.
- */
-void i915_gem_revoke_fences(struct drm_i915_private *dev_priv)
-{
- int i;
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
-
- GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
-
- if (fence->vma)
- i915_vma_revoke_mmap(fence->vma);
- }
-}
-
-/**
* i915_gem_restore_fences - restore fence state
* @dev_priv: i915 device private
*
@@ -473,9 +454,10 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
{
int i;
+ rcu_read_lock(); /* keep obj alive as we dereference */
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
- struct i915_vma *vma = reg->vma;
+ struct i915_vma *vma = READ_ONCE(reg->vma);
GEM_BUG_ON(vma && vma->fence != reg);
@@ -483,18 +465,12 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
* Commit delayed tiling changes if we have an object still
* attached to the fence, otherwise just clear the fence.
*/
- if (vma && !i915_gem_object_is_tiled(vma->obj)) {
- GEM_BUG_ON(!reg->dirty);
- GEM_BUG_ON(i915_vma_has_userfault(vma));
-
- list_move(&reg->link, &dev_priv->mm.fence_list);
- vma->fence = NULL;
+ if (vma && !i915_gem_object_is_tiled(vma->obj))
vma = NULL;
- }
fence_write(reg, vma);
- reg->vma = vma;
}
+ rcu_read_unlock();
}
/**
@@ -609,8 +585,38 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (IS_MOBILE(dev_priv) ||
- IS_I915G(dev_priv) || IS_I945G(dev_priv)) {
+ } else if (IS_G45(dev_priv) || IS_I965G(dev_priv) || IS_G33(dev_priv)) {
+ /* The 965, G33, and newer, have a very flexible memory
+ * configuration. It will enable dual-channel mode
+ * (interleaving) on as much memory as it can, and the GPU
+ * will additionally sometimes enable different bit 6
+ * swizzling for tiled objects from the CPU.
+ *
+ * Here's what I found on the G965:
+ * slot fill memory size swizzling
+ * 0A 0B 1A 1B 1-ch 2-ch
+ * 512 0 0 0 512 0 O
+ * 512 0 512 0 16 1008 X
+ * 512 0 0 512 16 1008 X
+ * 0 512 0 512 16 1008 X
+ * 1024 1024 1024 0 2048 1024 O
+ *
+ * We could probably detect this based on either the DRB
+ * matching, which was the case for the swizzling required in
+ * the table above, or from the 1-ch value being less than
+ * the minimum size of a rank.
+ *
+ * Reports indicate that the swizzling actually
+ * varies depending upon page placement inside the
+ * channels, i.e. we see swizzled pages where the
+ * banks of memory are paired and unswizzled on the
+ * uneven portion, so leave that as unknown.
+ */
+ if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ }
+ } else {
u32 dcc;
/* On 9xx chipsets, channel interleave by the CPU is
@@ -660,37 +666,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
- } else {
- /* The 965, G33, and newer, have a very flexible memory
- * configuration. It will enable dual-channel mode
- * (interleaving) on as much memory as it can, and the GPU
- * will additionally sometimes enable different bit 6
- * swizzling for tiled objects from the CPU.
- *
- * Here's what I found on the G965:
- * slot fill memory size swizzling
- * 0A 0B 1A 1B 1-ch 2-ch
- * 512 0 0 0 512 0 O
- * 512 0 512 0 16 1008 X
- * 512 0 0 512 16 1008 X
- * 0 512 0 512 16 1008 X
- * 1024 1024 1024 0 2048 1024 O
- *
- * We could probably detect this based on either the DRB
- * matching, which was the case for the swizzling required in
- * the table above, or from the 1-ch value being less than
- * the minimum size of a rank.
- *
- * Reports indicate that the swizzling actually
- * varies depending upon page placement inside the
- * channels, i.e. we see swizzled pages where the
- * banks of memory are paired and unswizzled on the
- * uneven portion, so leave that as unknown.
- */
- if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
- }
}
if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
@@ -790,8 +765,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
int i;
if (obj->bit_17 == NULL) {
- obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
- sizeof(long), GFP_KERNEL);
+ obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL);
if (obj->bit_17 == NULL) {
DRM_ERROR("Failed to allocate memory for bit 17 "
"record\n");
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index d646d37eec2f..8f460cc4cc1f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -584,7 +584,7 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
* for all.
*/
size = I915_GTT_PAGE_SIZE_4K;
- if (i915_vm_is_48bit(vm) &&
+ if (i915_vm_is_4lvl(vm) &&
HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
size = I915_GTT_PAGE_SIZE_64K;
gfp |= __GFP_NOWARN;
@@ -613,7 +613,7 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
vm->scratch_page.page = page;
vm->scratch_page.daddr = addr;
- vm->scratch_page.order = order;
+ vm->scratch_order = order;
return 0;
unmap_page:
@@ -632,10 +632,11 @@ skip:
static void cleanup_scratch_page(struct i915_address_space *vm)
{
struct i915_page_dma *p = &vm->scratch_page;
+ int order = vm->scratch_order;
- dma_unmap_page(vm->dma, p->daddr, BIT(p->order) << PAGE_SHIFT,
+ dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
PCI_DMA_BIDIRECTIONAL);
- __free_pages(p->page, p->order);
+ __free_pages(p->page, order);
}
static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
@@ -726,18 +727,13 @@ static void __pdp_fini(struct i915_page_directory_pointer *pdp)
pdp->page_directory = NULL;
}
-static inline bool use_4lvl(const struct i915_address_space *vm)
-{
- return i915_vm_is_48bit(vm);
-}
-
static struct i915_page_directory_pointer *
alloc_pdp(struct i915_address_space *vm)
{
struct i915_page_directory_pointer *pdp;
int ret = -ENOMEM;
- GEM_BUG_ON(!use_4lvl(vm));
+ GEM_BUG_ON(!i915_vm_is_4lvl(vm));
pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
if (!pdp)
@@ -766,7 +762,7 @@ static void free_pdp(struct i915_address_space *vm,
{
__pdp_fini(pdp);
- if (!use_4lvl(vm))
+ if (!i915_vm_is_4lvl(vm))
return;
cleanup_px(vm, pdp);
@@ -791,14 +787,15 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
}
-/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
+/*
+ * PDE TLBs are a pain to invalidate on GEN8+. When we modify
* the page table structures, we mark them dirty so that
* context switching/execlist queuing code takes extra steps
* to ensure that tlbs are flushed.
*/
static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
{
- ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->vm.i915)->ring_mask;
+ ppgtt->pd_dirty_engines = ALL_ENGINES;
}
/* Removes entries from a single page table, releasing it if it's empty.
@@ -809,8 +806,6 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
u64 start, u64 length)
{
unsigned int num_entries = gen8_pte_count(start, length);
- unsigned int pte = gen8_pte_index(start);
- unsigned int pte_end = pte + num_entries;
gen8_pte_t *vaddr;
GEM_BUG_ON(num_entries > pt->used_ptes);
@@ -820,8 +815,7 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
return true;
vaddr = kmap_atomic_px(pt);
- while (pte < pte_end)
- vaddr[pte++] = vm->scratch_pte;
+ memset64(vaddr + gen8_pte_index(start), vm->scratch_pte, num_entries);
kunmap_atomic(vaddr);
return false;
@@ -872,7 +866,7 @@ static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
gen8_ppgtt_pdpe_t *vaddr;
pdp->page_directory[pdpe] = pd;
- if (!use_4lvl(vm))
+ if (!i915_vm_is_4lvl(vm))
return;
vaddr = kmap_atomic_px(pdp);
@@ -937,7 +931,7 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp;
unsigned int pml4e;
- GEM_BUG_ON(!use_4lvl(vm));
+ GEM_BUG_ON(!i915_vm_is_4lvl(vm));
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
GEM_BUG_ON(pdp == vm->scratch_pdp);
@@ -1219,7 +1213,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
GEM_BUG_ON(!clone->has_read_only);
- vm->scratch_page.order = clone->scratch_page.order;
+ vm->scratch_order = clone->scratch_order;
vm->scratch_pte = clone->scratch_pte;
vm->scratch_pt = clone->scratch_pt;
vm->scratch_pd = clone->scratch_pd;
@@ -1234,7 +1228,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
vm->scratch_pte =
gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC,
- PTE_READ_ONLY);
+ vm->has_read_only);
vm->scratch_pt = alloc_pt(vm);
if (IS_ERR(vm->scratch_pt)) {
@@ -1248,7 +1242,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
goto free_pt;
}
- if (use_4lvl(vm)) {
+ if (i915_vm_is_4lvl(vm)) {
vm->scratch_pdp = alloc_pdp(vm);
if (IS_ERR(vm->scratch_pdp)) {
ret = PTR_ERR(vm->scratch_pdp);
@@ -1258,7 +1252,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
gen8_initialize_pt(vm, vm->scratch_pt);
gen8_initialize_pd(vm, vm->scratch_pd);
- if (use_4lvl(vm))
+ if (i915_vm_is_4lvl(vm))
gen8_initialize_pdp(vm, vm->scratch_pdp);
return 0;
@@ -1280,7 +1274,7 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
enum vgt_g2v_type msg;
int i;
- if (use_4lvl(vm)) {
+ if (i915_vm_is_4lvl(vm)) {
const u64 daddr = px_dma(&ppgtt->pml4);
I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
@@ -1310,7 +1304,7 @@ static void gen8_free_scratch(struct i915_address_space *vm)
if (!vm->scratch_page.daddr)
return;
- if (use_4lvl(vm))
+ if (i915_vm_is_4lvl(vm))
free_pdp(vm, vm->scratch_pdp);
free_pd(vm, vm->scratch_pd);
free_pt(vm, vm->scratch_pt);
@@ -1356,7 +1350,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
if (intel_vgpu_active(dev_priv))
gen8_ppgtt_notify_vgt(ppgtt, false);
- if (use_4lvl(vm))
+ if (i915_vm_is_4lvl(vm))
gen8_ppgtt_cleanup_4lvl(ppgtt);
else
gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
@@ -1519,6 +1513,23 @@ unwind:
return -ENOMEM;
}
+static void ppgtt_init(struct drm_i915_private *i915,
+ struct i915_hw_ppgtt *ppgtt)
+{
+ kref_init(&ppgtt->ref);
+
+ ppgtt->vm.i915 = i915;
+ ppgtt->vm.dma = &i915->drm.pdev->dev;
+ ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
+
+ i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
+
+ ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
+ ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
+ ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
+ ppgtt->vm.vma_ops.clear_pages = clear_pages;
+}
+
/*
* GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
* with a net effect resembling a 2-level page table in normal x86 terms. Each
@@ -1535,19 +1546,15 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
if (!ppgtt)
return ERR_PTR(-ENOMEM);
- kref_init(&ppgtt->ref);
+ ppgtt_init(i915, ppgtt);
- ppgtt->vm.i915 = i915;
- ppgtt->vm.dma = &i915->drm.pdev->dev;
-
- ppgtt->vm.total = HAS_FULL_48BIT_PPGTT(i915) ?
- 1ULL << 48 :
- 1ULL << 32;
-
- /* From bdw, there is support for read-only pages in the PPGTT. */
- ppgtt->vm.has_read_only = true;
-
- i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
+ /*
+ * From bdw, there is hw support for read-only pages in the PPGTT.
+ *
+ * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
+ * for now.
+ */
+ ppgtt->vm.has_read_only = INTEL_GEN(i915) != 11;
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
@@ -1559,7 +1566,7 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
if (err)
goto err_free;
- if (use_4lvl(&ppgtt->vm)) {
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
err = setup_px(&ppgtt->vm, &ppgtt->pml4);
if (err)
goto err_scratch;
@@ -1592,11 +1599,6 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
- ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
- ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
- ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
- ppgtt->vm.vma_ops.clear_pages = clear_pages;
-
return ppgtt;
err_scratch:
@@ -1672,8 +1674,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
while (num_entries) {
struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
- const unsigned int end = min(pte + num_entries, GEN6_PTES);
- const unsigned int count = end - pte;
+ const unsigned int count = min(num_entries, GEN6_PTES - pte);
gen6_pte_t *vaddr;
GEM_BUG_ON(pt == vm->scratch_pt);
@@ -1693,9 +1694,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
*/
vaddr = kmap_atomic_px(pt);
- do {
- vaddr[pte++] = scratch_pte;
- } while (pte < end);
+ memset32(vaddr + pte, scratch_pte, count);
kunmap_atomic(vaddr);
pte = 0;
@@ -1913,7 +1912,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(size > ggtt->vm.total);
- vma = kmem_cache_zalloc(i915->vmas, GFP_KERNEL);
+ vma = i915_vma_alloc();
if (!vma)
return ERR_PTR(-ENOMEM);
@@ -1943,6 +1942,8 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
int err;
+ GEM_BUG_ON(ppgtt->base.vm.closed);
+
/*
* Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
* which will be pinned into every active context.
@@ -1981,6 +1982,17 @@ void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
i915_vma_unpin(ppgtt->vma);
}
+void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base)
+{
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+
+ if (!ppgtt->pin_count)
+ return;
+
+ ppgtt->pin_count = 0;
+ i915_vma_unpin(ppgtt->vma);
+}
+
static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
{
struct i915_ggtt * const ggtt = &i915->ggtt;
@@ -1991,25 +2003,13 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
if (!ppgtt)
return ERR_PTR(-ENOMEM);
- kref_init(&ppgtt->base.ref);
-
- ppgtt->base.vm.i915 = i915;
- ppgtt->base.vm.dma = &i915->drm.pdev->dev;
-
- ppgtt->base.vm.total = I915_PDES * GEN6_PTES * I915_GTT_PAGE_SIZE;
-
- i915_address_space_init(&ppgtt->base.vm, VM_CLASS_PPGTT);
+ ppgtt_init(i915, &ppgtt->base);
ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
- ppgtt->base.vm.vma_ops.bind_vma = ppgtt_bind_vma;
- ppgtt->base.vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
- ppgtt->base.vm.vma_ops.set_pages = ppgtt_set_pages;
- ppgtt->base.vm.vma_ops.clear_pages = clear_pages;
-
ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
err = gen6_ppgtt_init_scratch(ppgtt);
@@ -2087,8 +2087,7 @@ __hw_ppgtt_create(struct drm_i915_private *i915)
}
struct i915_hw_ppgtt *
-i915_ppgtt_create(struct drm_i915_private *i915,
- struct drm_i915_file_private *fpriv)
+i915_ppgtt_create(struct drm_i915_private *i915)
{
struct i915_hw_ppgtt *ppgtt;
@@ -2096,19 +2095,11 @@ i915_ppgtt_create(struct drm_i915_private *i915,
if (IS_ERR(ppgtt))
return ppgtt;
- ppgtt->vm.file = fpriv;
-
trace_i915_ppgtt_create(&ppgtt->vm);
return ppgtt;
}
-void i915_ppgtt_close(struct i915_address_space *vm)
-{
- GEM_BUG_ON(vm->closed);
- vm->closed = true;
-}
-
static void ppgtt_destroy_vma(struct i915_address_space *vm)
{
struct list_head *phases[] = {
@@ -2675,7 +2666,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
struct i915_hw_ppgtt *ppgtt;
int err;
- ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM));
+ ppgtt = i915_ppgtt_create(i915);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -3701,7 +3692,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
}
ret = 0;
- if (unlikely(IS_ERR(vma->pages))) {
+ if (IS_ERR(vma->pages)) {
ret = PTR_ERR(vma->pages);
vma->pages = NULL;
DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 03ade71b8d9a..f597f35b109b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -213,7 +213,6 @@ struct i915_vma;
struct i915_page_dma {
struct page *page;
- int order;
union {
dma_addr_t daddr;
@@ -293,6 +292,7 @@ struct i915_address_space {
#define VM_CLASS_PPGTT 1
u64 scratch_pte;
+ int scratch_order;
struct i915_page_dma scratch_page;
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
@@ -348,7 +348,7 @@ struct i915_address_space {
#define i915_is_ggtt(vm) ((vm)->is_ggtt)
static inline bool
-i915_vm_is_48bit(const struct i915_address_space *vm)
+i915_vm_is_4lvl(const struct i915_address_space *vm)
{
return (vm->total - 1) >> 32;
}
@@ -356,7 +356,7 @@ i915_vm_is_48bit(const struct i915_address_space *vm)
static inline bool
i915_vm_has_scratch_64K(struct i915_address_space *vm)
{
- return vm->scratch_page.order == get_order(I915_GTT_PAGE_SIZE_64K);
+ return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
}
/* The Graphics Translation Table is the way in which GEN hardware translates a
@@ -390,12 +390,14 @@ struct i915_hw_ppgtt {
struct i915_address_space vm;
struct kref ref;
- unsigned long pd_dirty_rings;
+ intel_engine_mask_t pd_dirty_engines;
union {
struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */
struct i915_page_directory_pointer pdp; /* GEN8+ */
struct i915_page_directory pd; /* GEN6-7 */
};
+
+ u32 user_handle;
};
struct gen6_hw_ppgtt {
@@ -488,7 +490,7 @@ static inline u32 gen6_pde_index(u32 addr)
static inline unsigned int
i915_pdpes_per_pdp(const struct i915_address_space *vm)
{
- if (i915_vm_is_48bit(vm))
+ if (i915_vm_is_4lvl(vm))
return GEN8_PML4ES_PER_PML4;
return GEN8_3LVL_PDPES;
@@ -603,15 +605,16 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
+
+struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
void i915_ppgtt_release(struct kref *kref);
-struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *fpriv);
-void i915_ppgtt_close(struct i915_address_space *vm);
-static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
+
+static inline struct i915_hw_ppgtt *i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{
- if (ppgtt)
- kref_get(&ppgtt->ref);
+ kref_get(&ppgtt->ref);
+ return ppgtt;
}
+
static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
{
if (ppgtt)
@@ -620,6 +623,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
int gen6_ppgtt_pin(struct i915_hw_ppgtt *base);
void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base);
+void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base);
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index fddde1033e74..ab627ed1269c 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -193,7 +193,7 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/i915_gem_object.c b/drivers/gpu/drm/i915/i915_gem_object.c
index aab8cdd80e6d..ac6a5ab84586 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/i915_gem_object.c
@@ -24,6 +24,22 @@
#include "i915_drv.h"
#include "i915_gem_object.h"
+#include "i915_globals.h"
+
+static struct i915_global_object {
+ struct i915_global base;
+ struct kmem_cache *slab_objects;
+} global;
+
+struct drm_i915_gem_object *i915_gem_object_alloc(void)
+{
+ return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
+}
+
+void i915_gem_object_free(struct drm_i915_gem_object *obj)
+{
+ return kmem_cache_free(global.slab_objects, obj);
+}
/**
* Mark up the object's coherency levels for a given cache_level
@@ -46,3 +62,29 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
obj->cache_dirty =
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
}
+
+static void i915_global_objects_shrink(void)
+{
+ kmem_cache_shrink(global.slab_objects);
+}
+
+static void i915_global_objects_exit(void)
+{
+ kmem_cache_destroy(global.slab_objects);
+}
+
+static struct i915_global_object global = { {
+ .shrink = i915_global_objects_shrink,
+ .exit = i915_global_objects_exit,
+} };
+
+int __init i915_global_objects_init(void)
+{
+ global.slab_objects =
+ KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
+ if (!global.slab_objects)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index fab040331cdb..ca93a40c0c87 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -304,6 +304,9 @@ to_intel_bo(struct drm_gem_object *gem)
return container_of(gem, struct drm_i915_gem_object, base);
}
+struct drm_i915_gem_object *i915_gem_object_alloc(void);
+void i915_gem_object_free(struct drm_i915_gem_object *obj);
+
/**
* i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
* @filp: DRM file private date
@@ -499,5 +502,8 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
unsigned int cache_level);
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
-#endif
+void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
+ struct sg_table *pages,
+ bool needs_clflush);
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 90baf9086d0a..9440024c763f 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -42,7 +42,7 @@ struct intel_render_state {
static const struct intel_renderstate_rodata *
render_state_get_rodata(const struct intel_engine_cs *engine)
{
- if (engine->id != RCS)
+ if (engine->id != RCS0)
return NULL;
switch (INTEL_GEN(engine->i915)) {
@@ -164,7 +164,7 @@ static int render_state_setup(struct intel_render_state *so,
drm_clflush_virt_range(d, i * sizeof(u32));
kunmap_atomic(d);
- ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
+ ret = 0;
out:
i915_gem_obj_finish_shmem_access(so->obj);
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 74a9661479ca..0a8082cfc761 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -565,7 +565,7 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *obj;
unsigned int cache_level;
- obj = i915_gem_object_alloc(dev_priv);
+ obj = i915_gem_object_alloc();
if (obj == NULL)
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 16cc9ddbce34..a9b5329dae3b 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -301,11 +301,11 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
/* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) {
if (!obj->bit_17) {
- obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
- sizeof(long), GFP_KERNEL);
+ obj->bit_17 = bitmap_zalloc(obj->base.size >> PAGE_SHIFT,
+ GFP_KERNEL);
}
} else {
- kfree(obj->bit_17);
+ bitmap_free(obj->bit_17);
obj->bit_17 = NULL;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 1d3f9a31ad61..8079ea3af103 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -122,7 +122,7 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
while (it) {
struct drm_i915_gem_object *obj;
- if (!range->blockable) {
+ if (!mmu_notifier_range_blockable(range)) {
ret = -EAGAIN;
break;
}
@@ -673,9 +673,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
if (!pages)
return;
- if (obj->mm.madv != I915_MADV_WILLNEED)
- obj->mm.dirty = false;
-
+ __i915_gem_object_release_shmem(obj, pages, true);
i915_gem_gtt_finish_pages(obj, pages);
for_each_sgt_page(page, sgt_iter, pages) {
@@ -795,7 +793,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -ENODEV;
}
- obj = i915_gem_object_alloc(dev_priv);
+ obj = i915_gem_object_alloc();
if (obj == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
new file mode 100644
index 000000000000..81e5c2ce336b
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_globals.c
@@ -0,0 +1,125 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "i915_active.h"
+#include "i915_gem_context.h"
+#include "i915_gem_object.h"
+#include "i915_globals.h"
+#include "i915_request.h"
+#include "i915_scheduler.h"
+#include "i915_vma.h"
+
+static LIST_HEAD(globals);
+
+static atomic_t active;
+static atomic_t epoch;
+static struct park_work {
+ struct rcu_work work;
+ int epoch;
+} park;
+
+static void i915_globals_shrink(void)
+{
+ struct i915_global *global;
+
+ /*
+ * kmem_cache_shrink() discards empty slabs and reorders partially
+ * filled slabs to prioritise allocating from the mostly full slabs,
+ * with the aim of reducing fragmentation.
+ */
+ list_for_each_entry(global, &globals, link)
+ global->shrink();
+}
+
+static void __i915_globals_park(struct work_struct *work)
+{
+ /* Confirm nothing woke up in the last grace period */
+ if (park.epoch == atomic_read(&epoch))
+ i915_globals_shrink();
+}
+
+void __init i915_global_register(struct i915_global *global)
+{
+ GEM_BUG_ON(!global->shrink);
+ GEM_BUG_ON(!global->exit);
+
+ list_add_tail(&global->link, &globals);
+}
+
+static void __i915_globals_cleanup(void)
+{
+ struct i915_global *global, *next;
+
+ list_for_each_entry_safe_reverse(global, next, &globals, link)
+ global->exit();
+}
+
+static __initconst int (* const initfn[])(void) = {
+ i915_global_active_init,
+ i915_global_context_init,
+ i915_global_gem_context_init,
+ i915_global_objects_init,
+ i915_global_request_init,
+ i915_global_scheduler_init,
+ i915_global_vma_init,
+};
+
+int __init i915_globals_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(initfn); i++) {
+ int err;
+
+ err = initfn[i]();
+ if (err) {
+ __i915_globals_cleanup();
+ return err;
+ }
+ }
+
+ INIT_RCU_WORK(&park.work, __i915_globals_park);
+ return 0;
+}
+
+void i915_globals_park(void)
+{
+ /*
+ * Defer shrinking the global slab caches (and other work) until
+ * after a RCU grace period has completed with no activity. This
+ * is to try and reduce the latency impact on the consumers caused
+ * by us shrinking the caches the same time as they are trying to
+ * allocate, with the assumption being that if we idle long enough
+ * for an RCU grace period to elapse since the last use, it is likely
+ * to be longer until we need the caches again.
+ */
+ if (!atomic_dec_and_test(&active))
+ return;
+
+ park.epoch = atomic_inc_return(&epoch);
+ queue_rcu_work(system_wq, &park.work);
+}
+
+void i915_globals_unpark(void)
+{
+ atomic_inc(&epoch);
+ atomic_inc(&active);
+}
+
+void __exit i915_globals_exit(void)
+{
+ /* Flush any residual park_work */
+ atomic_inc(&epoch);
+ flush_rcu_work(&park.work);
+
+ __i915_globals_cleanup();
+
+ /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
+ rcu_barrier();
+}
diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h
new file mode 100644
index 000000000000..04c1ce107fc0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_globals.h
@@ -0,0 +1,35 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef _I915_GLOBALS_H_
+#define _I915_GLOBALS_H_
+
+typedef void (*i915_global_func_t)(void);
+
+struct i915_global {
+ struct list_head link;
+
+ i915_global_func_t shrink;
+ i915_global_func_t exit;
+};
+
+void i915_global_register(struct i915_global *global);
+
+int i915_globals_init(void);
+void i915_globals_park(void);
+void i915_globals_unpark(void);
+void i915_globals_exit(void);
+
+/* constructors */
+int i915_global_active_init(void);
+int i915_global_context_init(void);
+int i915_global_gem_context_init(void);
+int i915_global_objects_init(void);
+int i915_global_request_init(void);
+int i915_global_scheduler_init(void);
+int i915_global_vma_init(void);
+
+#endif /* _I915_GLOBALS_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index aa6791255252..f51ff683dd2e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -380,19 +380,16 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
err_printf(m, "%s [%d]:\n", name, count);
while (count--) {
- err_printf(m, " %08x_%08x %8u %02x %02x %02x",
+ err_printf(m, " %08x_%08x %8u %02x %02x",
upper_32_bits(err->gtt_offset),
lower_32_bits(err->gtt_offset),
err->size,
err->read_domains,
- err->write_domain,
- err->wseqno);
+ err->write_domain);
err_puts(m, tiling_flag(err->tiling));
err_puts(m, dirty_flag(err->dirty));
err_puts(m, purgeable_flag(err->purgeable));
err_puts(m, err->userptr ? " userptr" : "");
- err_puts(m, err->engine != -1 ? " " : "");
- err_puts(m, engine_name(m->i915, err->engine));
err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
if (err->name)
@@ -414,7 +411,7 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
err_printf(m, " INSTDONE: 0x%08x\n",
ee->instdone.instdone);
- if (ee->engine_id != RCS || INTEL_GEN(m->i915) <= 3)
+ if (ee->engine_id != RCS0 || INTEL_GEN(m->i915) <= 3)
return;
err_printf(m, " SC_INSTDONE: 0x%08x\n",
@@ -434,11 +431,6 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
ee->instdone.row[slice][subslice]);
}
-static const char *bannable(const struct drm_i915_error_context *ctx)
-{
- return ctx->bannable ? "" : " (unbannable)";
-}
-
static void error_print_request(struct drm_i915_error_state_buf *m,
const char *prefix,
const struct drm_i915_error_request *erq,
@@ -447,9 +439,8 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
if (!erq->seqno)
return;
- err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x%s%s, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n",
- prefix, erq->pid, erq->ban_score,
- erq->context, erq->seqno,
+ err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n",
+ prefix, erq->pid, erq->context, erq->seqno,
test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
@@ -463,10 +454,9 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct drm_i915_error_context *ctx)
{
- err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d%s guilty %d active %d\n",
- header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
- ctx->sched_attr.priority, ctx->ban_score, bannable(ctx),
- ctx->guilty, ctx->active);
+ err_printf(m, "%s%s[%d] hw_id %d, prio %d, guilty %d active %d\n",
+ header, ctx->comm, ctx->pid, ctx->hw_id,
+ ctx->sched_attr.priority, ctx->guilty, ctx->active);
}
static void error_print_engine(struct drm_i915_error_state_buf *m,
@@ -512,13 +502,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
if (INTEL_GEN(m->i915) >= 6) {
err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
- err_printf(m, " SYNC_0: 0x%08x\n",
- ee->semaphore_mboxes[0]);
- err_printf(m, " SYNC_1: 0x%08x\n",
- ee->semaphore_mboxes[1]);
- if (HAS_VEBOX(m->i915))
- err_printf(m, " SYNC_2: 0x%08x\n",
- ee->semaphore_mboxes[2]);
}
if (HAS_PPGTT(m->i915)) {
err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
@@ -533,8 +516,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
ee->vm_info.pp_dir_base);
}
}
- err_printf(m, " seqno: 0x%08x\n", ee->seqno);
- err_printf(m, " last_seqno: 0x%08x\n", ee->last_seqno);
err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
err_printf(m, " hangcheck timestamp: %dms (%lu%s)\n",
@@ -688,16 +669,17 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (!error->engine[i].context.pid)
continue;
- err_printf(m, "Active process (on ring %s): %s [%d], score %d%s\n",
+ err_printf(m, "Active process (on ring %s): %s [%d]\n",
engine_name(m->i915, i),
error->engine[i].context.comm,
- error->engine[i].context.pid,
- error->engine[i].context.ban_score,
- bannable(&error->engine[i].context));
+ error->engine[i].context.pid);
}
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
+ err_printf(m, "Subplatform: 0x%x\n",
+ intel_subplatform(&error->runtime_info,
+ error->device_info.platform));
err_print_pciid(m, m->i915);
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
@@ -779,13 +761,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (obj) {
err_puts(m, m->i915->engine[i]->name);
if (ee->context.pid)
- err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d%s)",
+ err_printf(m, " (submitted by %s [%d])",
ee->context.comm,
- ee->context.pid,
- ee->context.handle,
- ee->context.hw_id,
- ee->context.ban_score,
- bannable(&ee->context));
+ ee->context.pid);
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
@@ -1061,27 +1039,6 @@ i915_error_object_create(struct drm_i915_private *i915,
return dst;
}
-/* The error capture is special as tries to run underneath the normal
- * locking rules - so we use the raw version of the i915_active_request lookup.
- */
-static inline u32
-__active_get_seqno(struct i915_active_request *active)
-{
- struct i915_request *request;
-
- request = __i915_active_request_peek(active);
- return request ? request->global_seqno : 0;
-}
-
-static inline int
-__active_get_engine_id(struct i915_active_request *active)
-{
- struct i915_request *request;
-
- request = __i915_active_request_peek(active);
- return request ? request->engine->id : -1;
-}
-
static void capture_bo(struct drm_i915_error_buffer *err,
struct i915_vma *vma)
{
@@ -1090,9 +1047,6 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->size = obj->base.size;
err->name = obj->base.name;
- err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
- err->engine = __active_get_engine_id(&obj->frontbuffer_write);
-
err->gtt_offset = vma->node.start;
err->read_domains = obj->read_domains;
err->write_domain = obj->write_domain;
@@ -1142,7 +1096,7 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
* It's only a small step better than a random number in its current form.
*/
static u32 i915_error_generate_code(struct i915_gpu_state *error,
- unsigned long engine_mask)
+ intel_engine_mask_t engine_mask)
{
/*
* IPEHR would be an ideal way to detect errors, as it's the gross
@@ -1178,18 +1132,6 @@ static void gem_record_fences(struct i915_gpu_state *error)
error->nfence = i;
}
-static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
- struct drm_i915_error_engine *ee)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
- ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
- if (HAS_VEBOX(dev_priv))
- ee->semaphore_mboxes[2] =
- I915_READ(RING_SYNC_2(engine->mmio_base));
-}
-
static void error_record_engine_registers(struct i915_gpu_state *error,
struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
@@ -1197,44 +1139,40 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
struct drm_i915_private *dev_priv = engine->i915;
if (INTEL_GEN(dev_priv) >= 6) {
- ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
- if (INTEL_GEN(dev_priv) >= 8) {
+ ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
+ if (INTEL_GEN(dev_priv) >= 8)
ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
- } else {
- gen6_record_semaphore_state(engine, ee);
+ else
ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
- }
}
if (INTEL_GEN(dev_priv) >= 4) {
- ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
- ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
- ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
- ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
- ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
+ ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
+ ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
+ ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
+ ee->instps = ENGINE_READ(engine, RING_INSTPS);
+ ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
if (INTEL_GEN(dev_priv) >= 8) {
- ee->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
- ee->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
+ ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
+ ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
}
- ee->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
+ ee->bbstate = ENGINE_READ(engine, RING_BBSTATE);
} else {
- ee->faddr = I915_READ(DMA_FADD_I8XX);
- ee->ipeir = I915_READ(IPEIR);
- ee->ipehr = I915_READ(IPEHR);
+ ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX);
+ ee->ipeir = ENGINE_READ(engine, IPEIR);
+ ee->ipehr = ENGINE_READ(engine, IPEHR);
}
intel_engine_get_instdone(engine, &ee->instdone);
- ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
+ ee->instpm = ENGINE_READ(engine, RING_INSTPM);
ee->acthd = intel_engine_get_active_head(engine);
- ee->seqno = intel_engine_get_seqno(engine);
- ee->last_seqno = intel_engine_last_submit(engine);
- ee->start = I915_READ_START(engine);
- ee->head = I915_READ_HEAD(engine);
- ee->tail = I915_READ_TAIL(engine);
- ee->ctl = I915_READ_CTL(engine);
+ ee->start = ENGINE_READ(engine, RING_START);
+ ee->head = ENGINE_READ(engine, RING_HEAD);
+ ee->tail = ENGINE_READ(engine, RING_TAIL);
+ ee->ctl = ENGINE_READ(engine, RING_CTL);
if (INTEL_GEN(dev_priv) > 2)
- ee->mode = I915_READ_MODE(engine);
+ ee->mode = ENGINE_READ(engine, RING_MI_MODE);
if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
i915_reg_t mmio;
@@ -1242,16 +1180,17 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
if (IS_GEN(dev_priv, 7)) {
switch (engine->id) {
default:
- case RCS:
+ MISSING_CASE(engine->id);
+ case RCS0:
mmio = RENDER_HWS_PGA_GEN7;
break;
- case BCS:
+ case BCS0:
mmio = BLT_HWS_PGA_GEN7;
break;
- case VCS:
+ case VCS0:
mmio = BSD_HWS_PGA_GEN7;
break;
- case VECS:
+ case VECS0:
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
@@ -1276,20 +1215,23 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
- if (IS_GEN(dev_priv, 6))
+ if (IS_GEN(dev_priv, 6)) {
ee->vm_info.pp_dir_base =
- I915_READ(RING_PP_DIR_BASE_READ(engine));
- else if (IS_GEN(dev_priv, 7))
+ ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
+ } else if (IS_GEN(dev_priv, 7)) {
ee->vm_info.pp_dir_base =
- I915_READ(RING_PP_DIR_BASE(engine));
- else if (INTEL_GEN(dev_priv) >= 8)
+ ENGINE_READ(engine, RING_PP_DIR_BASE);
+ } else if (INTEL_GEN(dev_priv) >= 8) {
+ u32 base = engine->mmio_base;
+
for (i = 0; i < 4; i++) {
ee->vm_info.pdp[i] =
- I915_READ(GEN8_RING_PDP_UDW(engine, i));
+ I915_READ(GEN8_RING_PDP_UDW(base, i));
ee->vm_info.pdp[i] <<= 32;
ee->vm_info.pdp[i] |=
- I915_READ(GEN8_RING_PDP_LDW(engine, i));
+ I915_READ(GEN8_RING_PDP_LDW(base, i));
}
+ }
}
}
@@ -1299,10 +1241,9 @@ static void record_request(struct i915_request *request,
struct i915_gem_context *ctx = request->gem_context;
erq->flags = request->fence.flags;
- erq->context = ctx->hw_id;
+ erq->context = request->fence.context;
+ erq->seqno = request->fence.seqno;
erq->sched_attr = request->sched.attr;
- erq->ban_score = atomic_read(&ctx->ban_score);
- erq->seqno = request->global_seqno;
erq->jiffies = request->emitted_jiffies;
erq->start = i915_ggtt_offset(request->ring->vma);
erq->head = request->head;
@@ -1393,11 +1334,8 @@ static void record_context(struct drm_i915_error_context *e,
rcu_read_unlock();
}
- e->handle = ctx->user_handle;
e->hw_id = ctx->hw_id;
e->sched_attr = ctx->sched;
- e->ban_score = atomic_read(&ctx->ban_score);
- e->bannable = i915_gem_context_is_bannable(ctx);
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
}
@@ -1476,7 +1414,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
error_record_engine_registers(error, engine, ee);
error_record_engine_execlists(engine, ee);
- request = i915_gem_find_active_request(engine);
+ request = intel_engine_find_active_request(engine);
if (request) {
struct i915_gem_context *ctx = request->gem_context;
struct intel_ring *ring;
@@ -1669,7 +1607,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
}
if (INTEL_GEN(dev_priv) >= 5)
- error->ccid = I915_READ(CCID);
+ error->ccid = I915_READ(CCID(RENDER_RING_BASE));
/* 3: Feature specific registers */
if (IS_GEN_RANGE(dev_priv, 6, 7)) {
@@ -1697,16 +1635,17 @@ static void capture_reg_state(struct i915_gpu_state *error)
error->gtier[0] = I915_READ(GTIER);
error->ngtier = 1;
} else if (IS_GEN(dev_priv, 2)) {
- error->ier = I915_READ16(IER);
+ error->ier = I915_READ16(GEN2_IER);
} else if (!IS_VALLEYVIEW(dev_priv)) {
- error->ier = I915_READ(IER);
+ error->ier = I915_READ(GEN2_IER);
}
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
}
static const char *
-error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg)
+error_msg(struct i915_gpu_state *error,
+ intel_engine_mask_t engines, const char *msg)
{
int len;
int i;
@@ -1716,7 +1655,7 @@ error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg)
engines &= ~BIT(i);
len = scnprintf(error->error_msg, sizeof(error->error_msg),
- "GPU HANG: ecode %d:%lx:0x%08x",
+ "GPU HANG: ecode %d:%x:0x%08x",
INTEL_GEN(error->i915), engines,
i915_error_generate_code(error, engines));
if (engines) {
@@ -1855,7 +1794,7 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
* to pick up.
*/
void i915_capture_error_state(struct drm_i915_private *i915,
- unsigned long engine_mask,
+ intel_engine_mask_t engine_mask,
const char *msg)
{
static bool warned;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 53b1f22dd365..5dc761e85d9d 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -94,8 +94,6 @@ struct i915_gpu_state {
u32 cpu_ring_head;
u32 cpu_ring_tail;
- u32 last_seqno;
-
/* Register state */
u32 start;
u32 tail;
@@ -108,24 +106,19 @@ struct i915_gpu_state {
u32 bbstate;
u32 instpm;
u32 instps;
- u32 seqno;
u64 bbaddr;
u64 acthd;
u32 fault_reg;
u64 faddr;
u32 rc_psmi; /* sleep state */
- u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
struct intel_instdone instdone;
struct drm_i915_error_context {
char comm[TASK_COMM_LEN];
pid_t pid;
- u32 handle;
u32 hw_id;
- int ban_score;
int active;
int guilty;
- bool bannable;
struct i915_sched_attr sched_attr;
} context;
@@ -149,7 +142,6 @@ struct i915_gpu_state {
long jiffies;
pid_t pid;
u32 context;
- int ban_score;
u32 seqno;
u32 start;
u32 head;
@@ -170,7 +162,6 @@ struct i915_gpu_state {
struct drm_i915_error_buffer {
u32 size;
u32 name;
- u32 wseqno;
u64 gtt_offset;
u32 read_domains;
u32 write_domain;
@@ -179,7 +170,6 @@ struct i915_gpu_state {
u32 dirty:1;
u32 purgeable:1;
u32 userptr:1;
- s32 engine:4;
u32 cache_level:3;
} *active_bo[I915_NUM_ENGINES], *pinned_bo;
u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
@@ -205,38 +195,12 @@ struct i915_gpu_error {
atomic_t pending_fb_pin;
/**
- * State variable controlling the reset flow and count
- *
- * This is a counter which gets incremented when reset is triggered,
- *
- * Before the reset commences, the I915_RESET_BACKOFF bit is set
- * meaning that any waiters holding onto the struct_mutex should
- * relinquish the lock immediately in order for the reset to start.
- *
- * If reset is not completed successfully, the I915_WEDGE bit is
- * set meaning that hardware is terminally sour and there is no
- * recovery. All waiters on the reset_queue will be woken when
- * that happens.
- *
- * This counter is used by the wait_seqno code to notice that reset
- * event happened and it needs to restart the entire ioctl (since most
- * likely the seqno it waited for won't ever signal anytime soon).
- *
- * This is important for lock-free wait paths, where no contended lock
- * naturally enforces the correct ordering between the bail-out of the
- * waiter and the gpu reset work code.
- */
- unsigned long reset_count;
-
- /**
* flags: Control various stages of the GPU reset
*
- * #I915_RESET_BACKOFF - When we start a reset, we want to stop any
- * other users acquiring the struct_mutex. To do this we set the
- * #I915_RESET_BACKOFF bit in the error flags when we detect a reset
- * and then check for that bit before acquiring the struct_mutex (in
- * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a
- * secondary role in preventing two concurrent global reset attempts.
+ * #I915_RESET_BACKOFF - When we start a global reset, we need to
+ * serialise with any other users attempting to do the same, and
+ * any global resources that may be clobber by the reset (such as
+ * FENCE registers).
*
* #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
* acquire the struct_mutex to reset an engine, we need an explicit
@@ -255,6 +219,9 @@ struct i915_gpu_error {
#define I915_RESET_ENGINE 2
#define I915_WEDGED (BITS_PER_LONG - 1)
+ /** Number of times the device has been reset (global) */
+ u32 reset_count;
+
/** Number of times an engine has been reset */
u32 reset_engine_count[I915_NUM_ENGINES];
@@ -272,6 +239,8 @@ struct i915_gpu_error {
*/
wait_queue_head_t reset_queue;
+ struct srcu_struct reset_backoff_srcu;
+
struct i915_gpu_restart *restart;
};
@@ -294,7 +263,7 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
void i915_capture_error_state(struct drm_i915_private *dev_priv,
- unsigned long engine_mask,
+ intel_engine_mask_t engine_mask,
const char *error_msg);
static inline struct i915_gpu_state *
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 441d2674b272..b92cfd69134b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -28,15 +28,19 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/sysrq.h>
-#include <linux/slab.h>
#include <linux/circ_buf.h>
-#include <drm/drm_irq.h>
+#include <linux/cpuidle.h>
+#include <linux/slab.h>
+#include <linux/sysrq.h>
+
#include <drm/drm_drv.h>
+#include <drm/drm_irq.h>
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include "intel_psr.h"
/**
* DOC: interrupt handling
@@ -132,92 +136,120 @@ static const u32 hpd_icp[HPD_NUM_PINS] = {
[HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
};
-/* IIR can theoretically queue up two events. Be paranoid. */
-#define GEN8_IRQ_RESET_NDX(type, which) do { \
- I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
- POSTING_READ(GEN8_##type##_IMR(which)); \
- I915_WRITE(GEN8_##type##_IER(which), 0); \
- I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
- POSTING_READ(GEN8_##type##_IIR(which)); \
- I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
- POSTING_READ(GEN8_##type##_IIR(which)); \
-} while (0)
-
-#define GEN3_IRQ_RESET(type) do { \
- I915_WRITE(type##IMR, 0xffffffff); \
- POSTING_READ(type##IMR); \
- I915_WRITE(type##IER, 0); \
- I915_WRITE(type##IIR, 0xffffffff); \
- POSTING_READ(type##IIR); \
- I915_WRITE(type##IIR, 0xffffffff); \
- POSTING_READ(type##IIR); \
-} while (0)
-
-#define GEN2_IRQ_RESET(type) do { \
- I915_WRITE16(type##IMR, 0xffff); \
- POSTING_READ16(type##IMR); \
- I915_WRITE16(type##IER, 0); \
- I915_WRITE16(type##IIR, 0xffff); \
- POSTING_READ16(type##IIR); \
- I915_WRITE16(type##IIR, 0xffff); \
- POSTING_READ16(type##IIR); \
-} while (0)
+static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
+ i915_reg_t iir, i915_reg_t ier)
+{
+ intel_uncore_write(uncore, imr, 0xffffffff);
+ intel_uncore_posting_read(uncore, imr);
+
+ intel_uncore_write(uncore, ier, 0);
+
+ /* IIR can theoretically queue up two events. Be paranoid. */
+ intel_uncore_write(uncore, iir, 0xffffffff);
+ intel_uncore_posting_read(uncore, iir);
+ intel_uncore_write(uncore, iir, 0xffffffff);
+ intel_uncore_posting_read(uncore, iir);
+}
+
+static void gen2_irq_reset(struct intel_uncore *uncore)
+{
+ intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
+ intel_uncore_posting_read16(uncore, GEN2_IMR);
+
+ intel_uncore_write16(uncore, GEN2_IER, 0);
+
+ /* IIR can theoretically queue up two events. Be paranoid. */
+ intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
+ intel_uncore_posting_read16(uncore, GEN2_IIR);
+ intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
+ intel_uncore_posting_read16(uncore, GEN2_IIR);
+}
+
+#define GEN8_IRQ_RESET_NDX(uncore, type, which) \
+({ \
+ unsigned int which_ = which; \
+ gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
+ GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
+})
+
+#define GEN3_IRQ_RESET(uncore, type) \
+ gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
+
+#define GEN2_IRQ_RESET(uncore) \
+ gen2_irq_reset(uncore)
/*
* We should clear IMR at preinstall/uninstall, and just check at postinstall.
*/
-static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv,
- i915_reg_t reg)
+static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
{
- u32 val = I915_READ(reg);
+ u32 val = intel_uncore_read(uncore, reg);
if (val == 0)
return;
WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
i915_mmio_reg_offset(reg), val);
- I915_WRITE(reg, 0xffffffff);
- POSTING_READ(reg);
- I915_WRITE(reg, 0xffffffff);
- POSTING_READ(reg);
+ intel_uncore_write(uncore, reg, 0xffffffff);
+ intel_uncore_posting_read(uncore, reg);
+ intel_uncore_write(uncore, reg, 0xffffffff);
+ intel_uncore_posting_read(uncore, reg);
}
-static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv,
- i915_reg_t reg)
+static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
{
- u16 val = I915_READ16(reg);
+ u16 val = intel_uncore_read16(uncore, GEN2_IIR);
if (val == 0)
return;
WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
- i915_mmio_reg_offset(reg), val);
- I915_WRITE16(reg, 0xffff);
- POSTING_READ16(reg);
- I915_WRITE16(reg, 0xffff);
- POSTING_READ16(reg);
-}
-
-#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
- gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
- I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
- I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
- POSTING_READ(GEN8_##type##_IMR(which)); \
-} while (0)
-
-#define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \
- gen3_assert_iir_is_zero(dev_priv, type##IIR); \
- I915_WRITE(type##IER, (ier_val)); \
- I915_WRITE(type##IMR, (imr_val)); \
- POSTING_READ(type##IMR); \
-} while (0)
-
-#define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \
- gen2_assert_iir_is_zero(dev_priv, type##IIR); \
- I915_WRITE16(type##IER, (ier_val)); \
- I915_WRITE16(type##IMR, (imr_val)); \
- POSTING_READ16(type##IMR); \
-} while (0)
+ i915_mmio_reg_offset(GEN2_IIR), val);
+ intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
+ intel_uncore_posting_read16(uncore, GEN2_IIR);
+ intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
+ intel_uncore_posting_read16(uncore, GEN2_IIR);
+}
+
+static void gen3_irq_init(struct intel_uncore *uncore,
+ i915_reg_t imr, u32 imr_val,
+ i915_reg_t ier, u32 ier_val,
+ i915_reg_t iir)
+{
+ gen3_assert_iir_is_zero(uncore, iir);
+
+ intel_uncore_write(uncore, ier, ier_val);
+ intel_uncore_write(uncore, imr, imr_val);
+ intel_uncore_posting_read(uncore, imr);
+}
+
+static void gen2_irq_init(struct intel_uncore *uncore,
+ u32 imr_val, u32 ier_val)
+{
+ gen2_assert_iir_is_zero(uncore);
+
+ intel_uncore_write16(uncore, GEN2_IER, ier_val);
+ intel_uncore_write16(uncore, GEN2_IMR, imr_val);
+ intel_uncore_posting_read16(uncore, GEN2_IMR);
+}
+
+#define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
+({ \
+ unsigned int which_ = which; \
+ gen3_irq_init((uncore), \
+ GEN8_##type##_IMR(which_), imr_val, \
+ GEN8_##type##_IER(which_), ier_val, \
+ GEN8_##type##_IIR(which_)); \
+})
+
+#define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
+ gen3_irq_init((uncore), \
+ type##IMR, imr_val, \
+ type##IER, ier_val, \
+ type##IIR)
+
+#define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
+ gen2_irq_init((uncore), imr_val, ier_val)
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
@@ -268,7 +300,7 @@ static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
const unsigned int bank,
const unsigned int bit)
{
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
u32 dw;
lockdep_assert_held(&i915->irq_lock);
@@ -365,24 +397,41 @@ static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
}
-static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
+static void write_pm_imr(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 11)
- return GEN11_GPM_WGBOXPERF_INTR_MASK;
- else if (INTEL_GEN(dev_priv) >= 8)
- return GEN8_GT_IMR(2);
- else
- return GEN6_PMIMR;
+ i915_reg_t reg;
+ u32 mask = dev_priv->pm_imr;
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
+ /* pm is in upper half */
+ mask = mask << 16;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
+ reg = GEN8_GT_IMR(2);
+ } else {
+ reg = GEN6_PMIMR;
+ }
+
+ I915_WRITE(reg, mask);
+ POSTING_READ(reg);
}
-static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
+static void write_pm_ier(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 11)
- return GEN11_GPM_WGBOXPERF_INTR_ENABLE;
- else if (INTEL_GEN(dev_priv) >= 8)
- return GEN8_GT_IER(2);
- else
- return GEN6_PMIER;
+ i915_reg_t reg;
+ u32 mask = dev_priv->pm_ier;
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
+ /* pm is in upper half */
+ mask = mask << 16;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
+ reg = GEN8_GT_IER(2);
+ } else {
+ reg = GEN6_PMIER;
+ }
+
+ I915_WRITE(reg, mask);
}
/**
@@ -407,8 +456,7 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
if (new_val != dev_priv->pm_imr) {
dev_priv->pm_imr = new_val;
- I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
- POSTING_READ(gen6_pm_imr(dev_priv));
+ write_pm_imr(dev_priv);
}
}
@@ -449,7 +497,7 @@ static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mas
lockdep_assert_held(&dev_priv->irq_lock);
dev_priv->pm_ier |= enable_mask;
- I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
+ write_pm_ier(dev_priv);
gen6_unmask_pm_irq(dev_priv, enable_mask);
/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
}
@@ -460,7 +508,7 @@ static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_m
dev_priv->pm_ier &= ~disable_mask;
__gen6_mask_pm_irq(dev_priv, disable_mask);
- I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
+ write_pm_ier(dev_priv);
/* though a barrier is missing here, but don't really need a one */
}
@@ -748,13 +796,21 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
POSTING_READ(reg);
}
+static bool i915_has_asle(struct drm_i915_private *dev_priv)
+{
+ if (!dev_priv->opregion.asle)
+ return false;
+
+ return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+}
+
/**
* i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
* @dev_priv: i915 device private
*/
static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
{
- if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
+ if (!i915_has_asle(dev_priv))
return;
spin_lock_irq(&dev_priv->irq_lock);
@@ -1288,6 +1344,18 @@ static void gen6_pm_rps_work(struct work_struct *work)
rps->last_adj = adj;
+ /*
+ * Limit deboosting and boosting to keep ourselves at the extremes
+ * when in the respective power modes (i.e. slowly decrease frequencies
+ * while in the HIGH_POWER zone and slowly increase frequencies while
+ * in the LOW_POWER zone). On idle, we will hit the timeout and drop
+ * to the next level quickly, and conversely if busy we expect to
+ * hit a waitboost and rapidly switch into max power.
+ */
+ if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
+ (adj > 0 && rps->power.mode == LOW_POWER))
+ rps->last_adj = 0;
+
/* sysfs frequency interfaces may have snuck in while servicing the
* interrupt
*/
@@ -1415,20 +1483,20 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
}
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
if (gt_iir & GT_BLT_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[BCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
@@ -1449,7 +1517,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
if (iir & GT_RENDER_USER_INTERRUPT) {
intel_engine_breadcrumbs_irq(engine);
- tasklet |= USES_GUC_SUBMISSION(engine->i915);
+ tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
}
if (tasklet)
@@ -1459,12 +1527,12 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
static void gen8_gt_irq_ack(struct drm_i915_private *i915,
u32 master_ctl, u32 gt_iir[4])
{
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
GEN8_GT_BCS_IRQ | \
+ GEN8_GT_VCS0_IRQ | \
GEN8_GT_VCS1_IRQ | \
- GEN8_GT_VCS2_IRQ | \
GEN8_GT_VECS_IRQ | \
GEN8_GT_PM_IRQ | \
GEN8_GT_GUC_IRQ)
@@ -1475,7 +1543,7 @@ static void gen8_gt_irq_ack(struct drm_i915_private *i915,
raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
}
- if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
+ if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
if (likely(gt_iir[1]))
raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
@@ -1498,21 +1566,21 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915,
u32 master_ctl, u32 gt_iir[4])
{
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
- gen8_cs_irq_handler(i915->engine[RCS],
+ gen8_cs_irq_handler(i915->engine[RCS0],
gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
- gen8_cs_irq_handler(i915->engine[BCS],
+ gen8_cs_irq_handler(i915->engine[BCS0],
gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
}
- if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
- gen8_cs_irq_handler(i915->engine[VCS],
+ if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
+ gen8_cs_irq_handler(i915->engine[VCS0],
+ gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
+ gen8_cs_irq_handler(i915->engine[VCS1],
gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
- gen8_cs_irq_handler(i915->engine[VCS2],
- gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT);
}
if (master_ctl & GEN8_GT_VECS_IRQ) {
- gen8_cs_irq_handler(i915->engine[VECS],
+ gen8_cs_irq_handler(i915->engine[VECS0],
gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
}
@@ -1693,7 +1761,9 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
{
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- u32 crcs[5];
+ u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
+
+ trace_intel_pipe_crc(crtc, crcs);
spin_lock(&pipe_crc->lock);
/*
@@ -1712,11 +1782,6 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
}
spin_unlock(&pipe_crc->lock);
- crcs[0] = crc0;
- crcs[1] = crc1;
- crcs[2] = crc2;
- crcs[3] = crc3;
- crcs[4] = crc4;
drm_crtc_add_crc_entry(&crtc->base, true,
drm_crtc_accurate_vblank_count(&crtc->base),
crcs);
@@ -1775,6 +1840,25 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
/* The RPS events need forcewake, so we add them to a work queue and mask their
* IMR bits until the work is done. Other interrupts can be processed without
* the work queue. */
+static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir)
+{
+ struct intel_rps *rps = &i915->gt_pm.rps;
+ const u32 events = i915->pm_rps_events & pm_iir;
+
+ lockdep_assert_held(&i915->irq_lock);
+
+ if (unlikely(!events))
+ return;
+
+ gen6_mask_pm_irq(i915, events);
+
+ if (!rps->interrupts_enabled)
+ return;
+
+ rps->pm_iir |= events;
+ schedule_work(&rps->work);
+}
+
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
@@ -1792,13 +1876,11 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
if (INTEL_GEN(dev_priv) >= 8)
return;
- if (HAS_VEBOX(dev_priv)) {
- if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VECS]);
+ if (pm_iir & PM_VEBOX_USER_INTERRUPT)
+ intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
- if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
- }
+ if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
+ DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
}
static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
@@ -2667,6 +2749,25 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
}
+static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
+{
+ u32 mask = GEN8_AUX_CHANNEL_A;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ mask |= GEN9_AUX_CHANNEL_B |
+ GEN9_AUX_CHANNEL_C |
+ GEN9_AUX_CHANNEL_D;
+
+ if (IS_CNL_WITH_PORT_F(dev_priv))
+ mask |= CNL_AUX_CHANNEL_F;
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ mask |= ICL_AUX_CHANNEL_E |
+ CNL_AUX_CHANNEL_F;
+
+ return mask;
+}
+
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
@@ -2722,20 +2823,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(GEN8_DE_PORT_IIR, iir);
ret = IRQ_HANDLED;
- tmp_mask = GEN8_AUX_CHANNEL_A;
- if (INTEL_GEN(dev_priv) >= 9)
- tmp_mask |= GEN9_AUX_CHANNEL_B |
- GEN9_AUX_CHANNEL_C |
- GEN9_AUX_CHANNEL_D;
-
- if (INTEL_GEN(dev_priv) >= 11)
- tmp_mask |= ICL_AUX_CHANNEL_E;
-
- if (IS_CNL_WITH_PORT_F(dev_priv) ||
- INTEL_GEN(dev_priv) >= 11)
- tmp_mask |= CNL_AUX_CHANNEL_F;
-
- if (iir & tmp_mask) {
+ if (iir & gen8_de_port_aux_mask(dev_priv)) {
dp_aux_irq_handler(dev_priv);
found = true;
}
@@ -2816,11 +2904,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
- if (HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_irq_handler(dev_priv, iir);
- else if (HAS_PCH_SPT(dev_priv) ||
- HAS_PCH_KBP(dev_priv) ||
- HAS_PCH_CNP(dev_priv))
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
spt_irq_handler(dev_priv, iir);
else
cpt_irq_handler(dev_priv, iir);
@@ -2857,7 +2943,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = to_i915(arg);
- void __iomem * const regs = dev_priv->regs;
+ void __iomem * const regs = dev_priv->uncore.regs;
u32 master_ctl;
u32 gt_iir[4];
@@ -2891,7 +2977,7 @@ static u32
gen11_gt_engine_identity(struct drm_i915_private * const i915,
const unsigned int bank, const unsigned int bit)
{
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
u32 timeout_ts;
u32 ident;
@@ -2926,7 +3012,7 @@ gen11_other_irq_handler(struct drm_i915_private * const i915,
const u8 instance, const u16 iir)
{
if (instance == OTHER_GTPM_INSTANCE)
- return gen6_rps_irq_handler(i915, iir);
+ return gen11_rps_irq_handler(i915, iir);
WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
instance, iir);
@@ -2975,7 +3061,7 @@ static void
gen11_gt_bank_handler(struct drm_i915_private * const i915,
const unsigned int bank)
{
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
unsigned long intr_dw;
unsigned int bit;
@@ -2983,14 +3069,8 @@ gen11_gt_bank_handler(struct drm_i915_private * const i915,
intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
- if (unlikely(!intr_dw)) {
- DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
- return;
- }
-
for_each_set_bit(bit, &intr_dw, 32) {
- const u32 ident = gen11_gt_engine_identity(i915,
- bank, bit);
+ const u32 ident = gen11_gt_engine_identity(i915, bank, bit);
gen11_gt_identity_handler(i915, ident);
}
@@ -3018,7 +3098,7 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
static u32
gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
{
- void __iomem * const regs = dev_priv->regs;
+ void __iomem * const regs = dev_priv->uncore.regs;
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
@@ -3059,7 +3139,7 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = to_i915(arg);
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
u32 master_ctl;
u32 gu_misc_iir;
@@ -3112,6 +3192,16 @@ static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
return 0;
}
+static int i945gm_enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (dev_priv->i945gm_vblank.enabled++ == 0)
+ schedule_work(&dev_priv->i945gm_vblank.work);
+
+ return i8xx_enable_vblank(dev, pipe);
+}
+
static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3176,6 +3266,16 @@ static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
+static void i945gm_disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ i8xx_disable_vblank(dev, pipe);
+
+ if (--dev_priv->i945gm_vblank.enabled == 0)
+ schedule_work(&dev_priv->i945gm_vblank.work);
+}
+
static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3209,12 +3309,68 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
+static void i945gm_vblank_work_func(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, i945gm_vblank.work);
+
+ /*
+ * Vblank interrupts fail to wake up the device from C3,
+ * hence we want to prevent C3 usage while vblank interrupts
+ * are enabled.
+ */
+ pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos,
+ READ_ONCE(dev_priv->i945gm_vblank.enabled) ?
+ dev_priv->i945gm_vblank.c3_disable_latency :
+ PM_QOS_DEFAULT_VALUE);
+}
+
+static int cstate_disable_latency(const char *name)
+{
+ const struct cpuidle_driver *drv;
+ int i;
+
+ drv = cpuidle_get_driver();
+ if (!drv)
+ return 0;
+
+ for (i = 0; i < drv->state_count; i++) {
+ const struct cpuidle_state *state = &drv->states[i];
+
+ if (!strcmp(state->name, name))
+ return state->exit_latency ?
+ state->exit_latency - 1 : 0;
+ }
+
+ return 0;
+}
+
+static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv)
+{
+ INIT_WORK(&dev_priv->i945gm_vblank.work,
+ i945gm_vblank_work_func);
+
+ dev_priv->i945gm_vblank.c3_disable_latency =
+ cstate_disable_latency("C3");
+ pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos,
+ PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+}
+
+static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
+{
+ cancel_work_sync(&dev_priv->i945gm_vblank.work);
+ pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos);
+}
+
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
if (HAS_PCH_NOP(dev_priv))
return;
- GEN3_IRQ_RESET(SDE);
+ GEN3_IRQ_RESET(uncore, SDE);
if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
I915_WRITE(SERR_INT, 0xffffffff);
@@ -3242,13 +3398,17 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
{
- GEN3_IRQ_RESET(GT);
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
+ GEN3_IRQ_RESET(uncore, GT);
if (INTEL_GEN(dev_priv) >= 6)
- GEN3_IRQ_RESET(GEN6_PM);
+ GEN3_IRQ_RESET(uncore, GEN6_PM);
}
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
if (IS_CHERRYVIEW(dev_priv))
I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
@@ -3259,12 +3419,14 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
i9xx_pipestat_irq_reset(dev_priv);
- GEN3_IRQ_RESET(VLV_);
+ GEN3_IRQ_RESET(uncore, VLV_);
dev_priv->irq_mask = ~0u;
}
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
u32 pipestat_mask;
u32 enable_mask;
enum pipe pipe;
@@ -3289,7 +3451,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->irq_mask = ~enable_mask;
- GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
+ GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
}
/* drm_dma.h hooks
@@ -3297,8 +3459,9 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
static void ironlake_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
- GEN3_IRQ_RESET(DE);
+ GEN3_IRQ_RESET(uncore, DE);
if (IS_GEN(dev_priv, 7))
I915_WRITE(GEN7_ERR_INT, 0xffffffff);
@@ -3329,18 +3492,21 @@ static void valleyview_irq_reset(struct drm_device *dev)
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
{
- GEN8_IRQ_RESET_NDX(GT, 0);
- GEN8_IRQ_RESET_NDX(GT, 1);
- GEN8_IRQ_RESET_NDX(GT, 2);
- GEN8_IRQ_RESET_NDX(GT, 3);
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
+ GEN8_IRQ_RESET_NDX(uncore, GT, 0);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 1);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 2);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 3);
}
static void gen8_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
int pipe;
- gen8_master_intr_disable(dev_priv->regs);
+ gen8_master_intr_disable(dev_priv->uncore.regs);
gen8_gt_irq_reset(dev_priv);
@@ -3350,11 +3516,11 @@ static void gen8_irq_reset(struct drm_device *dev)
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
- GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+ GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
- GEN3_IRQ_RESET(GEN8_DE_PORT_);
- GEN3_IRQ_RESET(GEN8_DE_MISC_);
- GEN3_IRQ_RESET(GEN8_PCU_);
+ GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
+ GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
+ GEN3_IRQ_RESET(uncore, GEN8_PCU_);
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_reset(dev_priv);
@@ -3380,9 +3546,10 @@ static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
static void gen11_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_uncore *uncore = &dev_priv->uncore;
int pipe;
- gen11_master_intr_disable(dev_priv->regs);
+ gen11_master_intr_disable(dev_priv->uncore.regs);
gen11_gt_irq_reset(dev_priv);
@@ -3394,21 +3561,23 @@ static void gen11_irq_reset(struct drm_device *dev)
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
- GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+ GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
- GEN3_IRQ_RESET(GEN8_DE_PORT_);
- GEN3_IRQ_RESET(GEN8_DE_MISC_);
- GEN3_IRQ_RESET(GEN11_DE_HPD_);
- GEN3_IRQ_RESET(GEN11_GU_MISC_);
- GEN3_IRQ_RESET(GEN8_PCU_);
+ GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
+ GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
+ GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
+ GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
+ GEN3_IRQ_RESET(uncore, GEN8_PCU_);
- if (HAS_PCH_ICP(dev_priv))
- GEN3_IRQ_RESET(SDE);
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ GEN3_IRQ_RESET(uncore, SDE);
}
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
enum pipe pipe;
@@ -3420,7 +3589,7 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
}
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
- GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
+ GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
dev_priv->de_irq_mask[pipe],
~dev_priv->de_irq_mask[pipe] | extra_ier);
@@ -3430,6 +3599,7 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
enum pipe pipe;
spin_lock_irq(&dev_priv->irq_lock);
@@ -3440,7 +3610,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
}
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
- GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+ GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -3451,13 +3621,14 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
static void cherryview_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
gen8_gt_irq_reset(dev_priv);
- GEN3_IRQ_RESET(GEN8_PCU_);
+ GEN3_IRQ_RESET(uncore, GEN8_PCU_);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -3583,7 +3754,7 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
gen11_hpd_detection_setup(dev_priv);
- if (HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_hpd_irq_setup(dev_priv);
}
@@ -3729,7 +3900,7 @@ static void ibx_irq_postinstall(struct drm_device *dev)
else
mask = SDE_GMBUS_CPT;
- gen3_assert_iir_is_zero(dev_priv, SDEIIR);
+ gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
I915_WRITE(SDEIMR, ~mask);
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
@@ -3742,6 +3913,7 @@ static void ibx_irq_postinstall(struct drm_device *dev)
static void gen5_gt_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 pm_irqs, gt_irqs;
pm_irqs = gt_irqs = 0;
@@ -3760,26 +3932,27 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
}
- GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
+ GEN3_IRQ_INIT(uncore, GT, dev_priv->gt_irq_mask, gt_irqs);
if (INTEL_GEN(dev_priv) >= 6) {
/*
* RPS interrupts will get enabled/disabled on demand when RPS
* itself is enabled/disabled.
*/
- if (HAS_VEBOX(dev_priv)) {
+ if (HAS_ENGINE(dev_priv, VECS0)) {
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
}
dev_priv->pm_imr = 0xffffffff;
- GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
+ GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->pm_imr, pm_irqs);
}
}
static int ironlake_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 display_mask, extra_mask;
if (INTEL_GEN(dev_priv) >= 7) {
@@ -3798,7 +3971,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
}
if (IS_HASWELL(dev_priv)) {
- gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
+ gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
display_mask |= DE_EDP_PSR_INT_HSW;
}
@@ -3807,7 +3980,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
ibx_irq_pre_postinstall(dev);
- GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
+ GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
+ display_mask | extra_mask);
gen5_gt_irq_postinstall(dev);
@@ -3877,35 +4051,42 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
/* These are interrupts we'll toggle with the ring mask register */
u32 gt_interrupts[] = {
- GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
- GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
+ (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+ GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
+
+ (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
+ GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
+
0,
- GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
- };
+
+ (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
+ };
dev_priv->pm_ier = 0x0;
dev_priv->pm_imr = ~dev_priv->pm_ier;
- GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
- GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
* is enabled/disabled. Same wil be the case for GuC interrupts.
*/
- GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
- GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
u32 de_pipe_enables;
u32 de_port_masked = GEN8_AUX_CHANNEL_A;
@@ -3941,7 +4122,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
else if (IS_BROADWELL(dev_priv))
de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
- gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
+ gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
for_each_pipe(dev_priv, pipe) {
@@ -3949,20 +4130,21 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
- GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
+ GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
dev_priv->de_irq_mask[pipe],
de_pipe_enables);
}
- GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
- GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
+ GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
+ GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
if (INTEL_GEN(dev_priv) >= 11) {
u32 de_hpd_masked = 0;
u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
GEN11_DE_TBT_HOTPLUG_MASK;
- GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables);
+ GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
+ de_hpd_enables);
gen11_hpd_detection_setup(dev_priv);
} else if (IS_GEN9_LP(dev_priv)) {
bxt_hpd_detection_setup(dev_priv);
@@ -3984,7 +4166,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev);
- gen8_master_intr_enable(dev_priv->regs);
+ gen8_master_intr_enable(dev_priv->uncore.regs);
return 0;
}
@@ -4025,7 +4207,7 @@ static void icp_irq_postinstall(struct drm_device *dev)
I915_WRITE(SDEIER, 0xffffffff);
POSTING_READ(SDEIER);
- gen3_assert_iir_is_zero(dev_priv, SDEIIR);
+ gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
I915_WRITE(SDEIMR, ~mask);
icp_hpd_detection_setup(dev_priv);
@@ -4034,19 +4216,20 @@ static void icp_irq_postinstall(struct drm_device *dev)
static int gen11_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
- if (HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_irq_postinstall(dev);
gen11_gt_irq_postinstall(dev_priv);
gen8_de_irq_postinstall(dev_priv);
- GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
+ GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
- gen11_master_intr_enable(dev_priv->regs);
+ gen11_master_intr_enable(dev_priv->uncore.regs);
POSTING_READ(GEN11_GFX_MSTR_IRQ);
return 0;
@@ -4072,15 +4255,17 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
static void i8xx_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
i9xx_pipestat_irq_reset(dev_priv);
- GEN2_IRQ_RESET();
+ GEN2_IRQ_RESET(uncore);
}
static int i8xx_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u16 enable_mask;
I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
@@ -4098,7 +4283,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
I915_MASTER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
- GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
+ GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
@@ -4202,7 +4387,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
u16 eir = 0, eir_stuck = 0;
u16 iir;
- iir = I915_READ16(IIR);
+ iir = I915_READ16(GEN2_IIR);
if (iir == 0)
break;
@@ -4215,10 +4400,10 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
- I915_WRITE16(IIR, iir);
+ I915_WRITE16(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4234,6 +4419,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
static void i915_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
@@ -4242,12 +4428,13 @@ static void i915_irq_reset(struct drm_device *dev)
i9xx_pipestat_irq_reset(dev_priv);
- GEN3_IRQ_RESET();
+ GEN3_IRQ_RESET(uncore, GEN2_);
}
static int i915_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
@@ -4274,7 +4461,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
}
- GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
+ GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
@@ -4306,7 +4493,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 iir;
- iir = I915_READ(IIR);
+ iir = I915_READ(GEN2_IIR);
if (iir == 0)
break;
@@ -4323,10 +4510,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
- I915_WRITE(IIR, iir);
+ I915_WRITE(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4345,18 +4532,20 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
static void i965_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
i9xx_pipestat_irq_reset(dev_priv);
- GEN3_IRQ_RESET();
+ GEN3_IRQ_RESET(uncore, GEN2_);
}
static int i965_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
u32 error_mask;
@@ -4394,7 +4583,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
if (IS_G4X(dev_priv))
enable_mask |= I915_BSD_USER_INTERRUPT;
- GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
+ GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
@@ -4452,7 +4641,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 iir;
- iir = I915_READ(IIR);
+ iir = I915_READ(GEN2_IIR);
if (iir == 0)
break;
@@ -4468,13 +4657,13 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
- I915_WRITE(IIR, iir);
+ I915_WRITE(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (iir & I915_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4503,6 +4692,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
struct intel_rps *rps = &dev_priv->gt_pm.rps;
int i;
+ if (IS_I945GM(dev_priv))
+ i945gm_vblank_work_init(dev_priv);
+
intel_hpd_init_work(dev_priv);
INIT_WORK(&rps->work, gen6_pm_rps_work);
@@ -4523,6 +4715,10 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
GEN6_PM_RP_DOWN_THRESHOLD |
GEN6_PM_RP_DOWN_TIMEOUT);
+ /* We share the register with other engine */
+ if (INTEL_GEN(dev_priv) > 9)
+ GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000);
+
rps->pm_intrmsk_mbz = 0;
/*
@@ -4542,13 +4738,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
else if (INTEL_GEN(dev_priv) >= 3)
dev->driver->get_vblank_counter = i915_get_vblank_counter;
- /*
- * Opt out of the vblank disable timer on everything except gen2.
- * Gen2 doesn't have a hardware frame counter and so depends on
- * vblank interrupts to produce sane vblank seuquence numbers.
- */
- if (!IS_GEN(dev_priv, 2))
- dev->vblank_disable_immediate = true;
+ dev->vblank_disable_immediate = true;
/* Most platforms treat the display irq block as an always-on
* power domain. vlv/chv can disable it at runtime and need
@@ -4605,8 +4795,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->disable_vblank = gen8_disable_vblank;
if (IS_GEN9_LP(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
- else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
- HAS_PCH_CNP(dev_priv))
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
@@ -4626,6 +4815,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_uninstall = i8xx_irq_reset;
dev->driver->enable_vblank = i8xx_enable_vblank;
dev->driver->disable_vblank = i8xx_disable_vblank;
+ } else if (IS_I945GM(dev_priv)) {
+ dev->driver->irq_preinstall = i915_irq_reset;
+ dev->driver->irq_postinstall = i915_irq_postinstall;
+ dev->driver->irq_uninstall = i915_irq_reset;
+ dev->driver->irq_handler = i915_irq_handler;
+ dev->driver->enable_vblank = i945gm_enable_vblank;
+ dev->driver->disable_vblank = i945gm_disable_vblank;
} else if (IS_GEN(dev_priv, 3)) {
dev->driver->irq_preinstall = i915_irq_reset;
dev->driver->irq_postinstall = i915_irq_postinstall;
@@ -4656,6 +4852,9 @@ void intel_irq_fini(struct drm_i915_private *i915)
{
int i;
+ if (IS_I945GM(i915))
+ i945gm_vblank_work_fini(i915);
+
for (i = 0; i < MAX_L3_SLICES; ++i)
kfree(i915->l3_parity.remap_info[i]);
}
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 66f82f3f050f..f893c2cbce15 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -28,14 +28,45 @@
#include <drm/drm_drv.h>
-#include "i915_active.h"
#include "i915_drv.h"
+#include "i915_globals.h"
#include "i915_selftest.h"
+#include "intel_fbdev.h"
-#define PLATFORM(x) .platform = (x), .platform_mask = BIT(x)
+#define PLATFORM(x) .platform = (x)
#define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
-#define GEN_DEFAULT_PIPEOFFSETS \
+#define I845_PIPE_OFFSETS \
+ .pipe_offsets = { \
+ [TRANSCODER_A] = PIPE_A_OFFSET, \
+ }, \
+ .trans_offsets = { \
+ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+ }
+
+#define I9XX_PIPE_OFFSETS \
+ .pipe_offsets = { \
+ [TRANSCODER_A] = PIPE_A_OFFSET, \
+ [TRANSCODER_B] = PIPE_B_OFFSET, \
+ }, \
+ .trans_offsets = { \
+ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+ }
+
+#define IVB_PIPE_OFFSETS \
+ .pipe_offsets = { \
+ [TRANSCODER_A] = PIPE_A_OFFSET, \
+ [TRANSCODER_B] = PIPE_B_OFFSET, \
+ [TRANSCODER_C] = PIPE_C_OFFSET, \
+ }, \
+ .trans_offsets = { \
+ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+ [TRANSCODER_C] = TRANSCODER_C_OFFSET, \
+ }
+
+#define HSW_PIPE_OFFSETS \
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
@@ -49,7 +80,7 @@
[TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \
}
-#define GEN_CHV_PIPEOFFSETS \
+#define CHV_PIPE_OFFSETS \
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
@@ -61,21 +92,48 @@
[TRANSCODER_C] = CHV_TRANSCODER_C_OFFSET, \
}
-#define CURSOR_OFFSETS \
- .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
+#define I845_CURSOR_OFFSETS \
+ .cursor_offsets = { \
+ [PIPE_A] = CURSOR_A_OFFSET, \
+ }
+
+#define I9XX_CURSOR_OFFSETS \
+ .cursor_offsets = { \
+ [PIPE_A] = CURSOR_A_OFFSET, \
+ [PIPE_B] = CURSOR_B_OFFSET, \
+ }
+
+#define CHV_CURSOR_OFFSETS \
+ .cursor_offsets = { \
+ [PIPE_A] = CURSOR_A_OFFSET, \
+ [PIPE_B] = CURSOR_B_OFFSET, \
+ [PIPE_C] = CHV_CURSOR_C_OFFSET, \
+ }
#define IVB_CURSOR_OFFSETS \
- .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
+ .cursor_offsets = { \
+ [PIPE_A] = CURSOR_A_OFFSET, \
+ [PIPE_B] = IVB_CURSOR_B_OFFSET, \
+ [PIPE_C] = IVB_CURSOR_C_OFFSET, \
+ }
-#define BDW_COLORS \
- .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
+#define I9XX_COLORS \
+ .color = { .gamma_lut_size = 256 }
+#define I965_COLORS \
+ .color = { .gamma_lut_size = 129, \
+ .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
+ }
+#define ILK_COLORS \
+ .color = { .gamma_lut_size = 1024 }
+#define IVB_COLORS \
+ .color = { .degamma_lut_size = 1024, .gamma_lut_size = 1024 }
#define CHV_COLORS \
.color = { .degamma_lut_size = 65, .gamma_lut_size = 257, \
.degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
.gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
}
#define GLK_COLORS \
- .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024, \
+ .color = { .degamma_lut_size = 33, .gamma_lut_size = 1024, \
.degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
DRM_COLOR_LUT_EQUAL_CHANNELS, \
}
@@ -85,7 +143,26 @@
#define GEN_DEFAULT_PAGE_SIZES \
.page_sizes = I915_GTT_PAGE_SIZE_4K
-#define GEN2_FEATURES \
+#define I830_FEATURES \
+ GEN(2), \
+ .is_mobile = 1, \
+ .num_pipes = 2, \
+ .display.has_overlay = 1, \
+ .display.cursor_needs_physical = 1, \
+ .display.overlay_needs_physical = 1, \
+ .display.has_gmch = 1, \
+ .gpu_reset_clobbers_display = true, \
+ .hws_needs_physical = 1, \
+ .unfenced_needs_alignment = 1, \
+ .engine_mask = BIT(RCS0), \
+ .has_snoop = true, \
+ .has_coherent_ggtt = false, \
+ I9XX_PIPE_OFFSETS, \
+ I9XX_CURSOR_OFFSETS, \
+ I9XX_COLORS, \
+ GEN_DEFAULT_PAGE_SIZES
+
+#define I845_FEATURES \
GEN(2), \
.num_pipes = 1, \
.display.has_overlay = 1, \
@@ -94,37 +171,32 @@
.gpu_reset_clobbers_display = true, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
- .ring_mask = RENDER_RING, \
+ .engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = false, \
- GEN_DEFAULT_PIPEOFFSETS, \
- GEN_DEFAULT_PAGE_SIZES, \
- CURSOR_OFFSETS
+ I845_PIPE_OFFSETS, \
+ I845_CURSOR_OFFSETS, \
+ I9XX_COLORS, \
+ GEN_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_i830_info = {
- GEN2_FEATURES,
+ I830_FEATURES,
PLATFORM(INTEL_I830),
- .is_mobile = 1,
- .display.cursor_needs_physical = 1,
- .num_pipes = 2, /* legal, last one wins */
};
static const struct intel_device_info intel_i845g_info = {
- GEN2_FEATURES,
+ I845_FEATURES,
PLATFORM(INTEL_I845G),
};
static const struct intel_device_info intel_i85x_info = {
- GEN2_FEATURES,
+ I830_FEATURES,
PLATFORM(INTEL_I85X),
- .is_mobile = 1,
- .num_pipes = 2, /* legal, last one wins */
- .display.cursor_needs_physical = 1,
.display.has_fbc = 1,
};
static const struct intel_device_info intel_i865g_info = {
- GEN2_FEATURES,
+ I845_FEATURES,
PLATFORM(INTEL_I865G),
};
@@ -133,12 +205,13 @@ static const struct intel_device_info intel_i865g_info = {
.num_pipes = 2, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
- .ring_mask = RENDER_RING, \
+ .engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
- GEN_DEFAULT_PIPEOFFSETS, \
- GEN_DEFAULT_PAGE_SIZES, \
- CURSOR_OFFSETS
+ I9XX_PIPE_OFFSETS, \
+ I9XX_CURSOR_OFFSETS, \
+ I9XX_COLORS, \
+ GEN_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_i915g_info = {
GEN3_FEATURES,
@@ -196,7 +269,14 @@ static const struct intel_device_info intel_g33_info = {
.display.has_overlay = 1,
};
-static const struct intel_device_info intel_pineview_info = {
+static const struct intel_device_info intel_pineview_g_info = {
+ GEN3_FEATURES,
+ PLATFORM(INTEL_PINEVIEW),
+ .display.has_hotplug = 1,
+ .display.has_overlay = 1,
+};
+
+static const struct intel_device_info intel_pineview_m_info = {
GEN3_FEATURES,
PLATFORM(INTEL_PINEVIEW),
.is_mobile = 1,
@@ -210,12 +290,13 @@ static const struct intel_device_info intel_pineview_info = {
.display.has_hotplug = 1, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
- .ring_mask = RENDER_RING, \
+ .engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
- GEN_DEFAULT_PIPEOFFSETS, \
- GEN_DEFAULT_PAGE_SIZES, \
- CURSOR_OFFSETS
+ I9XX_PIPE_OFFSETS, \
+ I9XX_CURSOR_OFFSETS, \
+ I965_COLORS, \
+ GEN_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_i965g_info = {
GEN4_FEATURES,
@@ -239,7 +320,7 @@ static const struct intel_device_info intel_i965gm_info = {
static const struct intel_device_info intel_g45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_G45),
- .ring_mask = RENDER_RING | BSD_RING,
+ .engine_mask = BIT(RCS0) | BIT(VCS0),
.gpu_reset_clobbers_display = false,
};
@@ -249,7 +330,7 @@ static const struct intel_device_info intel_gm45_info = {
.is_mobile = 1,
.display.has_fbc = 1,
.display.supports_tv = 1,
- .ring_mask = RENDER_RING | BSD_RING,
+ .engine_mask = BIT(RCS0) | BIT(VCS0),
.gpu_reset_clobbers_display = false,
};
@@ -257,14 +338,15 @@ static const struct intel_device_info intel_gm45_info = {
GEN(5), \
.num_pipes = 2, \
.display.has_hotplug = 1, \
- .ring_mask = RENDER_RING | BSD_RING, \
+ .engine_mask = BIT(RCS0) | BIT(VCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
/* ilk does support rc6, but we do not implement [power] contexts */ \
.has_rc6 = 0, \
- GEN_DEFAULT_PIPEOFFSETS, \
- GEN_DEFAULT_PAGE_SIZES, \
- CURSOR_OFFSETS
+ I9XX_PIPE_OFFSETS, \
+ I9XX_CURSOR_OFFSETS, \
+ ILK_COLORS, \
+ GEN_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_ironlake_d_info = {
GEN5_FEATURES,
@@ -283,15 +365,17 @@ static const struct intel_device_info intel_ironlake_m_info = {
.num_pipes = 2, \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
- .ppgtt = INTEL_PPGTT_ALIASING, \
- GEN_DEFAULT_PIPEOFFSETS, \
- GEN_DEFAULT_PAGE_SIZES, \
- CURSOR_OFFSETS
+ .ppgtt_type = INTEL_PPGTT_ALIASING, \
+ .ppgtt_size = 31, \
+ I9XX_PIPE_OFFSETS, \
+ I9XX_CURSOR_OFFSETS, \
+ ILK_COLORS, \
+ GEN_DEFAULT_PAGE_SIZES
#define SNB_D_PLATFORM \
GEN6_FEATURES, \
@@ -328,15 +412,17 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
.num_pipes = 3, \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
- .ppgtt = INTEL_PPGTT_FULL, \
- GEN_DEFAULT_PIPEOFFSETS, \
- GEN_DEFAULT_PAGE_SIZES, \
- IVB_CURSOR_OFFSETS
+ .ppgtt_type = INTEL_PPGTT_FULL, \
+ .ppgtt_size = 31, \
+ IVB_PIPE_OFFSETS, \
+ IVB_CURSOR_OFFSETS, \
+ IVB_COLORS, \
+ GEN_DEFAULT_PAGE_SIZES
#define IVB_D_PLATFORM \
GEN7_FEATURES, \
@@ -386,24 +472,27 @@ static const struct intel_device_info intel_valleyview_info = {
.has_rc6 = 1,
.display.has_gmch = 1,
.display.has_hotplug = 1,
- .ppgtt = INTEL_PPGTT_FULL,
+ .ppgtt_type = INTEL_PPGTT_FULL,
+ .ppgtt_size = 31,
.has_snoop = true,
.has_coherent_ggtt = false,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
+ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
.display_mmio_offset = VLV_DISPLAY_BASE,
+ I9XX_PIPE_OFFSETS,
+ I9XX_CURSOR_OFFSETS,
+ I965_COLORS,
GEN_DEFAULT_PAGE_SIZES,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS
};
#define G75_FEATURES \
GEN7_FEATURES, \
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
+ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.display.has_ddi = 1, \
.has_fpga_dbg = 1, \
.display.has_psr = 1, \
.display.has_dp_mst = 1, \
.has_rc6p = 0 /* RC6p removed-by HSW */, \
+ HSW_PIPE_OFFSETS, \
.has_runtime_pm = 1
#define HSW_PLATFORM \
@@ -429,11 +518,11 @@ static const struct intel_device_info intel_haswell_gt3_info = {
#define GEN8_FEATURES \
G75_FEATURES, \
GEN(8), \
- BDW_COLORS, \
.page_sizes = I915_GTT_PAGE_SIZE_4K | \
I915_GTT_PAGE_SIZE_2M, \
.has_logical_ring_contexts = 1, \
- .ppgtt = INTEL_PPGTT_FULL_4LVL, \
+ .ppgtt_type = INTEL_PPGTT_FULL, \
+ .ppgtt_size = 48, \
.has_64bit_reloc = 1, \
.has_reset_engine = 1
@@ -462,7 +551,8 @@ static const struct intel_device_info intel_broadwell_rsvd_info = {
static const struct intel_device_info intel_broadwell_gt3_info = {
BDW_PLATFORM,
.gt = 3,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+ .engine_mask =
+ BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
static const struct intel_device_info intel_cherryview_info = {
@@ -471,21 +561,22 @@ static const struct intel_device_info intel_cherryview_info = {
.num_pipes = 3,
.display.has_hotplug = 1,
.is_lp = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
.has_64bit_reloc = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_logical_ring_contexts = 1,
.display.has_gmch = 1,
- .ppgtt = INTEL_PPGTT_FULL,
+ .ppgtt_type = INTEL_PPGTT_FULL,
+ .ppgtt_size = 32,
.has_reset_engine = 1,
.has_snoop = true,
.has_coherent_ggtt = false,
.display_mmio_offset = VLV_DISPLAY_BASE,
- GEN_DEFAULT_PAGE_SIZES,
- GEN_CHV_PIPEOFFSETS,
- CURSOR_OFFSETS,
+ CHV_PIPE_OFFSETS,
+ CHV_CURSOR_OFFSETS,
CHV_COLORS,
+ GEN_DEFAULT_PAGE_SIZES,
};
#define GEN9_DEFAULT_PAGE_SIZES \
@@ -521,7 +612,8 @@ static const struct intel_device_info intel_skylake_gt2_info = {
#define SKL_GT3_PLUS_PLATFORM \
SKL_PLATFORM, \
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING
+ .engine_mask = \
+ BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1)
static const struct intel_device_info intel_skylake_gt3_info = {
@@ -538,7 +630,7 @@ static const struct intel_device_info intel_skylake_gt4_info = {
GEN(9), \
.is_lp = 1, \
.display.has_hotplug = 1, \
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
+ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.num_pipes = 3, \
.has_64bit_reloc = 1, \
.display.has_ddi = 1, \
@@ -552,15 +644,16 @@ static const struct intel_device_info intel_skylake_gt4_info = {
.has_logical_ring_contexts = 1, \
.has_logical_ring_preemption = 1, \
.has_guc = 1, \
- .ppgtt = INTEL_PPGTT_FULL_4LVL, \
+ .ppgtt_type = INTEL_PPGTT_FULL, \
+ .ppgtt_size = 48, \
.has_reset_engine = 1, \
.has_snoop = true, \
.has_coherent_ggtt = false, \
.display.has_ipc = 1, \
- GEN9_DEFAULT_PAGE_SIZES, \
- GEN_DEFAULT_PIPEOFFSETS, \
+ HSW_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
- BDW_COLORS
+ IVB_COLORS, \
+ GEN9_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_broxton_info = {
GEN9_LP_FEATURES,
@@ -592,7 +685,8 @@ static const struct intel_device_info intel_kabylake_gt2_info = {
static const struct intel_device_info intel_kabylake_gt3_info = {
KBL_PLATFORM,
.gt = 3,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+ .engine_mask =
+ BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
#define CFL_PLATFORM \
@@ -612,7 +706,8 @@ static const struct intel_device_info intel_coffeelake_gt2_info = {
static const struct intel_device_info intel_coffeelake_gt3_info = {
CFL_PLATFORM,
.gt = 3,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+ .engine_mask =
+ BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
#define GEN10_FEATURES \
@@ -648,13 +743,22 @@ static const struct intel_device_info intel_cannonlake_info = {
}, \
GEN(11), \
.ddb_size = 2048, \
- .has_logical_ring_elsq = 1
+ .has_logical_ring_elsq = 1, \
+ .color = { .degamma_lut_size = 33, .gamma_lut_size = 1024 }
static const struct intel_device_info intel_icelake_11_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ICELAKE),
+ .engine_mask =
+ BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
+};
+
+static const struct intel_device_info intel_elkhartlake_info = {
+ GEN11_FEATURES,
+ PLATFORM(INTEL_ELKHARTLAKE),
.is_alpha_support = 1,
- .ring_mask = RENDER_RING | BLT_RING | VEBOX_RING | BSD_RING | BSD3_RING,
+ .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0),
+ .ppgtt_size = 36,
};
#undef GEN
@@ -680,7 +784,8 @@ static const struct pci_device_id pciidlist[] = {
INTEL_I965GM_IDS(&intel_i965gm_info),
INTEL_GM45_IDS(&intel_gm45_info),
INTEL_G45_IDS(&intel_g45_info),
- INTEL_PINEVIEW_IDS(&intel_pineview_info),
+ INTEL_PINEVIEW_G_IDS(&intel_pineview_g_info),
+ INTEL_PINEVIEW_M_IDS(&intel_pineview_m_info),
INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
INTEL_SNB_D_GT1_IDS(&intel_sandybridge_d_gt1_info),
@@ -722,8 +827,11 @@ static const struct pci_device_id pciidlist[] = {
INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_AML_CFL_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
+ INTEL_CML_GT1_IDS(&intel_coffeelake_gt1_info),
+ INTEL_CML_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CNL_IDS(&intel_cannonlake_info),
INTEL_ICL_11_IDS(&intel_icelake_11_info),
+ INTEL_EHL_IDS(&intel_elkhartlake_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
@@ -801,7 +909,9 @@ static int __init i915_init(void)
bool use_kms = true;
int err;
- i915_global_active_init();
+ err = i915_globals_init();
+ if (err)
+ return err;
err = i915_mock_selftests();
if (err)
@@ -834,7 +944,7 @@ static void __exit i915_exit(void)
return;
pci_unregister_driver(&i915_pci_driver);
- i915_global_active_exit();
+ i915_globals_exit();
}
module_init(i915_init);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 9ebf99f3d8d3..39a4804091d7 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1202,7 +1202,7 @@ static int i915_oa_read(struct i915_perf_stream *stream,
static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
struct i915_gem_context *ctx)
{
- struct intel_engine_cs *engine = i915->engine[RCS];
+ struct intel_engine_cs *engine = i915->engine[RCS0];
struct intel_context *ce;
int ret;
@@ -1364,7 +1364,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
free_oa_buffer(dev_priv);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(dev_priv, stream->wakeref);
if (stream->ctx)
@@ -1509,9 +1509,7 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
goto unlock;
}
- ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC);
- if (ret)
- goto err_unref;
+ i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
/* PreHSW required 512K alignment, HSW requires 16M */
vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
@@ -1629,13 +1627,14 @@ static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
* It's fine to put out-of-date values into these per-context registers
* in the case that the OA unit has been disabled.
*/
-static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
- u32 *reg_state,
- const struct i915_oa_config *oa_config)
+static void
+gen8_update_reg_state_unlocked(struct intel_context *ce,
+ u32 *reg_state,
+ const struct i915_oa_config *oa_config)
{
- struct drm_i915_private *dev_priv = ctx->i915;
- u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
- u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
+ struct drm_i915_private *i915 = ce->gem_context->i915;
+ u32 ctx_oactxctrl = i915->perf.oa.ctx_oactxctrl_offset;
+ u32 ctx_flexeu0 = i915->perf.oa.ctx_flexeu0_offset;
/* The MMIO offsets for Flex EU registers aren't contiguous */
i915_reg_t flex_regs[] = {
EU_PERF_CNTL0,
@@ -1649,8 +1648,8 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
int i;
CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL,
- (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
- (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+ (i915->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+ (i915->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
GEN8_OA_COUNTER_RESUME);
for (i = 0; i < ARRAY_SIZE(flex_regs); i++) {
@@ -1678,10 +1677,9 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
CTX_REG(reg_state, state_offset, flex_regs[i], value);
}
- CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
- gen8_make_rpcs(dev_priv,
- &to_intel_context(ctx,
- dev_priv->engine[RCS])->sseu));
+ CTX_REG(reg_state,
+ CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
+ gen8_make_rpcs(i915, &ce->sseu));
}
/*
@@ -1711,7 +1709,7 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
const struct i915_oa_config *oa_config)
{
- struct intel_engine_cs *engine = dev_priv->engine[RCS];
+ struct intel_engine_cs *engine = dev_priv->engine[RCS0];
unsigned int map_type = i915_coherent_map_type(dev_priv);
struct i915_gem_context *ctx;
struct i915_request *rq;
@@ -1740,11 +1738,11 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
/* Update all contexts now that we've stalled the submission. */
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
- struct intel_context *ce = to_intel_context(ctx, engine);
+ struct intel_context *ce = intel_context_lookup(ctx, engine);
u32 *regs;
/* OA settings will be set upon first use */
- if (!ce->state)
+ if (!ce || !ce->state)
continue;
regs = i915_gem_object_pin_map(ce->state->obj, map_type);
@@ -1754,7 +1752,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
ce->state->obj->mm.dirty = true;
regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
- gen8_update_reg_state_unlocked(ctx, regs, oa_config);
+ gen8_update_reg_state_unlocked(ce, regs, oa_config);
i915_gem_object_unpin_map(ce->state->obj);
}
@@ -1922,10 +1920,10 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
static void gen7_oa_disable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = &stream->dev_priv->uncore;
- I915_WRITE(GEN7_OACONTROL, 0);
- if (intel_wait_for_register(dev_priv,
+ intel_uncore_write(uncore, GEN7_OACONTROL, 0);
+ if (intel_wait_for_register(uncore,
GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
50))
DRM_ERROR("wait for OA to be disabled timed out\n");
@@ -1933,10 +1931,10 @@ static void gen7_oa_disable(struct i915_perf_stream *stream)
static void gen8_oa_disable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = &stream->dev_priv->uncore;
- I915_WRITE(GEN8_OACONTROL, 0);
- if (intel_wait_for_register(dev_priv,
+ intel_uncore_write(uncore, GEN8_OACONTROL, 0);
+ if (intel_wait_for_register(uncore,
GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
50))
DRM_ERROR("wait for OA to be disabled timed out\n");
@@ -2093,7 +2091,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
* references will effectively disable RC6.
*/
stream->wakeref = intel_runtime_pm_get(dev_priv);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
ret = alloc_oa_buffer(dev_priv);
if (ret)
@@ -2127,7 +2125,7 @@ err_lock:
err_oa_buf_alloc:
put_oa_config(dev_priv, stream->oa_config);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(dev_priv, stream->wakeref);
err_config:
@@ -2138,17 +2136,17 @@ err_config:
}
void i915_oa_init_reg_state(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx,
- u32 *reg_state)
+ struct intel_context *ce,
+ u32 *regs)
{
struct i915_perf_stream *stream;
- if (engine->id != RCS)
+ if (engine->class != RENDER_CLASS)
return;
stream = engine->i915->perf.oa.exclusive_stream;
if (stream)
- gen8_update_reg_state_unlocked(ctx, reg_state, stream->oa_config);
+ gen8_update_reg_state_unlocked(ce, regs, stream->oa_config);
}
/**
@@ -2881,12 +2879,24 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr);
- if (IS_HASWELL(dev_priv)) {
- i915_perf_load_test_config_hsw(dev_priv);
- } else if (IS_BROADWELL(dev_priv)) {
- i915_perf_load_test_config_bdw(dev_priv);
- } else if (IS_CHERRYVIEW(dev_priv)) {
- i915_perf_load_test_config_chv(dev_priv);
+ if (INTEL_GEN(dev_priv) >= 11) {
+ i915_perf_load_test_config_icl(dev_priv);
+ } else if (IS_CANNONLAKE(dev_priv)) {
+ i915_perf_load_test_config_cnl(dev_priv);
+ } else if (IS_COFFEELAKE(dev_priv)) {
+ if (IS_CFL_GT2(dev_priv))
+ i915_perf_load_test_config_cflgt2(dev_priv);
+ if (IS_CFL_GT3(dev_priv))
+ i915_perf_load_test_config_cflgt3(dev_priv);
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ i915_perf_load_test_config_glk(dev_priv);
+ } else if (IS_KABYLAKE(dev_priv)) {
+ if (IS_KBL_GT2(dev_priv))
+ i915_perf_load_test_config_kblgt2(dev_priv);
+ else if (IS_KBL_GT3(dev_priv))
+ i915_perf_load_test_config_kblgt3(dev_priv);
+ } else if (IS_BROXTON(dev_priv)) {
+ i915_perf_load_test_config_bxt(dev_priv);
} else if (IS_SKYLAKE(dev_priv)) {
if (IS_SKL_GT2(dev_priv))
i915_perf_load_test_config_sklgt2(dev_priv);
@@ -2894,25 +2904,13 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
i915_perf_load_test_config_sklgt3(dev_priv);
else if (IS_SKL_GT4(dev_priv))
i915_perf_load_test_config_sklgt4(dev_priv);
- } else if (IS_BROXTON(dev_priv)) {
- i915_perf_load_test_config_bxt(dev_priv);
- } else if (IS_KABYLAKE(dev_priv)) {
- if (IS_KBL_GT2(dev_priv))
- i915_perf_load_test_config_kblgt2(dev_priv);
- else if (IS_KBL_GT3(dev_priv))
- i915_perf_load_test_config_kblgt3(dev_priv);
- } else if (IS_GEMINILAKE(dev_priv)) {
- i915_perf_load_test_config_glk(dev_priv);
- } else if (IS_COFFEELAKE(dev_priv)) {
- if (IS_CFL_GT2(dev_priv))
- i915_perf_load_test_config_cflgt2(dev_priv);
- if (IS_CFL_GT3(dev_priv))
- i915_perf_load_test_config_cflgt3(dev_priv);
- } else if (IS_CANNONLAKE(dev_priv)) {
- i915_perf_load_test_config_cnl(dev_priv);
- } else if (IS_ICELAKE(dev_priv)) {
- i915_perf_load_test_config_icl(dev_priv);
- }
+ } else if (IS_CHERRYVIEW(dev_priv)) {
+ i915_perf_load_test_config_chv(dev_priv);
+ } else if (IS_BROADWELL(dev_priv)) {
+ i915_perf_load_test_config_bdw(dev_priv);
+ } else if (IS_HASWELL(dev_priv)) {
+ i915_perf_load_test_config_hsw(dev_priv);
+}
if (dev_priv->perf.oa.test_config.id == 0)
goto sysfs_error;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index b745c49a5af6..46a52da3db29 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -102,7 +102,7 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
*
* Use RCS as proxy for all engines.
*/
- else if (intel_engine_supports_stats(i915->engine[RCS]))
+ else if (intel_engine_supports_stats(i915->engine[RCS0]))
enable &= ~BIT(I915_SAMPLE_BUSY);
/*
@@ -149,14 +149,6 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915)
spin_unlock_irq(&i915->pmu.lock);
}
-static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
-{
- if (!fw)
- intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
-
- return true;
-}
-
static void
add_sample(struct i915_pmu_sample *sample, u32 val)
{
@@ -169,49 +161,48 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
- bool fw = false;
+ unsigned long flags;
if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
return;
- if (!dev_priv->gt.awake)
- return;
-
- wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+ wakeref = 0;
+ if (READ_ONCE(dev_priv->gt.awake))
+ wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
if (!wakeref)
return;
+ spin_lock_irqsave(&dev_priv->uncore.lock, flags);
for_each_engine(engine, dev_priv, id) {
- u32 current_seqno = intel_engine_get_seqno(engine);
- u32 last_seqno = intel_engine_last_submit(engine);
+ struct intel_engine_pmu *pmu = &engine->pmu;
+ bool busy;
u32 val;
- val = !i915_seqno_passed(current_seqno, last_seqno);
-
- if (val)
- add_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
- period_ns);
-
- if (val && (engine->pmu.enable &
- (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
- fw = grab_forcewake(dev_priv, fw);
-
- val = I915_READ_FW(RING_CTL(engine->mmio_base));
- } else {
- val = 0;
- }
+ val = I915_READ_FW(RING_CTL(engine->mmio_base));
+ if (val == 0) /* powerwell off => engine idle */
+ continue;
if (val & RING_WAIT)
- add_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
- period_ns);
-
+ add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
if (val & RING_WAIT_SEMAPHORE)
- add_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
- period_ns);
- }
+ add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
- if (fw)
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ /*
+ * While waiting on a semaphore or event, MI_MODE reports the
+ * ring as idle. However, previously using the seqno, and with
+ * execlists sampling, we account for the ring waiting as the
+ * engine being busy. Therefore, we record the sample as being
+ * busy if either waiting or !idle.
+ */
+ busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
+ if (!busy) {
+ val = I915_READ_FW(RING_MI_MODE(engine->mmio_base));
+ busy = !(val & MODE_IDLE);
+ }
+ if (busy)
+ add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
+ }
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
intel_runtime_pm_put(dev_priv, wakeref);
}
diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h
new file mode 100644
index 000000000000..cc44ebd3b553
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_priolist_types.h
@@ -0,0 +1,42 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef _I915_PRIOLIST_TYPES_H_
+#define _I915_PRIOLIST_TYPES_H_
+
+#include <linux/list.h>
+#include <linux/rbtree.h>
+
+#include <uapi/drm/i915_drm.h>
+
+enum {
+ I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
+ I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
+ I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
+
+ I915_PRIORITY_INVALID = INT_MIN
+};
+
+#define I915_USER_PRIORITY_SHIFT 3
+#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
+
+#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
+#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
+
+#define I915_PRIORITY_WAIT ((u8)BIT(0))
+#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1))
+#define I915_PRIORITY_NOSEMAPHORE ((u8)BIT(2))
+
+#define __NO_PREEMPTION (I915_PRIORITY_WAIT)
+
+struct i915_priolist {
+ struct list_head requests[I915_PRIORITY_COUNT];
+ struct rb_node node;
+ unsigned long used;
+ int priority;
+};
+
+#endif /* _I915_PRIOLIST_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index eeaa3d506d95..969e514916ab 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -52,7 +52,7 @@ enum vgt_g2v_type {
/*
* VGT capabilities type
*/
-#define VGT_CAPS_FULL_48BIT_PPGTT BIT(2)
+#define VGT_CAPS_FULL_PPGTT BIT(2)
#define VGT_CAPS_HWSP_EMULATION BIT(3)
#define VGT_CAPS_HUGE_GTT BIT(4)
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index cbcb957b7141..782183b78f49 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -10,12 +10,34 @@
#include "i915_query.h"
#include <uapi/drm/i915_drm.h>
+static int copy_query_item(void *query_hdr, size_t query_sz,
+ u32 total_length,
+ struct drm_i915_query_item *query_item)
+{
+ if (query_item->length == 0)
+ return total_length;
+
+ if (query_item->length < total_length)
+ return -EINVAL;
+
+ if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr),
+ query_sz))
+ return -EFAULT;
+
+ if (!access_ok(u64_to_user_ptr(query_item->data_ptr),
+ total_length))
+ return -EFAULT;
+
+ return 0;
+}
+
static int query_topology_info(struct drm_i915_private *dev_priv,
struct drm_i915_query_item *query_item)
{
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
struct drm_i915_query_topology_info topo;
u32 slice_length, subslice_length, eu_length, total_length;
+ int ret;
if (query_item->flags != 0)
return -EINVAL;
@@ -33,23 +55,14 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
total_length = sizeof(topo) + slice_length + subslice_length + eu_length;
- if (query_item->length == 0)
- return total_length;
-
- if (query_item->length < total_length)
- return -EINVAL;
-
- if (copy_from_user(&topo, u64_to_user_ptr(query_item->data_ptr),
- sizeof(topo)))
- return -EFAULT;
+ ret = copy_query_item(&topo, sizeof(topo), total_length,
+ query_item);
+ if (ret != 0)
+ return ret;
if (topo.flags != 0)
return -EINVAL;
- if (!access_ok(u64_to_user_ptr(query_item->data_ptr),
- total_length))
- return -EFAULT;
-
memset(&topo, 0, sizeof(topo));
topo.max_slices = sseu->max_slices;
topo.max_subslices = sseu->max_subslices;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 047855dd8c6b..b74824f0b5b1 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -25,6 +25,9 @@
#ifndef _I915_REG_H_
#define _I915_REG_H_
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
/**
* DOC: The i915 register macro definition style guide
*
@@ -59,15 +62,13 @@
* significant to least significant bit. Indent the register content macros
* using two extra spaces between ``#define`` and the macro name.
*
- * For bit fields, define a ``_MASK`` and a ``_SHIFT`` macro. Define bit field
- * contents so that they are already shifted in place, and can be directly
- * OR'd. For convenience, function-like macros may be used to define bit fields,
- * but do note that the macros may be needed to read as well as write the
- * register contents.
+ * Define bit fields using ``REG_GENMASK(h, l)``. Define bit field contents
+ * using ``REG_FIELD_PREP(mask, value)``. This will define the values already
+ * shifted in place, so they can be directly OR'd together. For convenience,
+ * function-like macros may be used to define bit fields, but do note that the
+ * macros may be needed to read as well as write the register contents.
*
- * Define bits using ``(1 << N)`` instead of ``BIT(N)``. We may change this in
- * the future, but this is the prevailing style. Do **not** add ``_BIT`` suffix
- * to the name.
+ * Define bits using ``REG_BIT(N)``. Do **not** add ``_BIT`` suffix to the name.
*
* Group the register and its contents together without blank lines, separate
* from other registers and their contents with one blank line.
@@ -105,17 +106,78 @@
* #define _FOO_A 0xf000
* #define _FOO_B 0xf001
* #define FOO(pipe) _MMIO_PIPE(pipe, _FOO_A, _FOO_B)
- * #define FOO_ENABLE (1 << 31)
- * #define FOO_MODE_MASK (0xf << 16)
- * #define FOO_MODE_SHIFT 16
- * #define FOO_MODE_BAR (0 << 16)
- * #define FOO_MODE_BAZ (1 << 16)
- * #define FOO_MODE_QUX_SNB (2 << 16)
+ * #define FOO_ENABLE REG_BIT(31)
+ * #define FOO_MODE_MASK REG_GENMASK(19, 16)
+ * #define FOO_MODE_BAR REG_FIELD_PREP(FOO_MODE_MASK, 0)
+ * #define FOO_MODE_BAZ REG_FIELD_PREP(FOO_MODE_MASK, 1)
+ * #define FOO_MODE_QUX_SNB REG_FIELD_PREP(FOO_MODE_MASK, 2)
*
* #define BAR _MMIO(0xb000)
* #define GEN8_BAR _MMIO(0xb888)
*/
+/**
+ * REG_BIT() - Prepare a u32 bit value
+ * @__n: 0-based bit number
+ *
+ * Local wrapper for BIT() to force u32, with compile time checks.
+ *
+ * @return: Value with bit @__n set.
+ */
+#define REG_BIT(__n) \
+ ((u32)(BIT(__n) + \
+ BUILD_BUG_ON_ZERO(__builtin_constant_p(__n) && \
+ ((__n) < 0 || (__n) > 31))))
+
+/**
+ * REG_GENMASK() - Prepare a continuous u32 bitmask
+ * @__high: 0-based high bit
+ * @__low: 0-based low bit
+ *
+ * Local wrapper for GENMASK() to force u32, with compile time checks.
+ *
+ * @return: Continuous bitmask from @__high to @__low, inclusive.
+ */
+#define REG_GENMASK(__high, __low) \
+ ((u32)(GENMASK(__high, __low) + \
+ BUILD_BUG_ON_ZERO(__builtin_constant_p(__high) && \
+ __builtin_constant_p(__low) && \
+ ((__low) < 0 || (__high) > 31 || (__low) > (__high)))))
+
+/*
+ * Local integer constant expression version of is_power_of_2().
+ */
+#define IS_POWER_OF_2(__x) ((__x) && (((__x) & ((__x) - 1)) == 0))
+
+/**
+ * REG_FIELD_PREP() - Prepare a u32 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to put in the field
+
+ * Local copy of FIELD_PREP() to generate an integer constant expression, force
+ * u32 and for consistency with REG_FIELD_GET(), REG_BIT() and REG_GENMASK().
+ *
+ * @return: @__val masked and shifted into the field defined by @__mask.
+ */
+#define REG_FIELD_PREP(__mask, __val) \
+ ((u32)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \
+ BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \
+ BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U32_MAX) + \
+ BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
+ BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
+
+/**
+ * REG_FIELD_GET() - Extract a u32 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to extract the bitfield value from
+ *
+ * Local wrapper for FIELD_GET() to force u32 and for consistency with
+ * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK().
+ *
+ * @return: Masked and shifted value of the field defined by @__mask in @__val.
+ */
+#define REG_FIELD_GET(__mask, __val) ((u32)FIELD_GET(__mask, __val))
+
typedef struct {
u32 reg;
} i915_reg_t;
@@ -210,14 +272,14 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/* Engine ID */
-#define RCS_HW 0
-#define VCS_HW 1
-#define BCS_HW 2
-#define VECS_HW 3
-#define VCS2_HW 4
-#define VCS3_HW 6
-#define VCS4_HW 7
-#define VECS2_HW 12
+#define RCS0_HW 0
+#define VCS0_HW 1
+#define BCS0_HW 2
+#define VECS0_HW 3
+#define VCS1_HW 4
+#define VCS2_HW 6
+#define VCS3_HW 7
+#define VECS1_HW 12
/* Engine class */
@@ -372,13 +434,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_VECS_SFC_USAGE(engine) _MMIO((engine)->mmio_base + 0x2014)
#define GEN11_VECS_SFC_USAGE_BIT (1 << 0)
-#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base + 0x228)
-#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base + 0x518)
-#define RING_PP_DIR_DCLV(engine) _MMIO((engine)->mmio_base + 0x220)
+#define RING_PP_DIR_BASE(base) _MMIO((base) + 0x228)
+#define RING_PP_DIR_BASE_READ(base) _MMIO((base) + 0x518)
+#define RING_PP_DIR_DCLV(base) _MMIO((base) + 0x220)
#define PP_DIR_DCLV_2G 0xffffffff
-#define GEN8_RING_PDP_UDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8 + 4)
-#define GEN8_RING_PDP_LDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8)
+#define GEN8_RING_PDP_UDW(base, n) _MMIO((base) + 0x270 + (n) * 8 + 4)
+#define GEN8_RING_PDP_LDW(base, n) _MMIO((base) + 0x270 + (n) * 8)
#define GEN8_R_PWR_CLK_STATE _MMIO(0x20C8)
#define GEN8_RPCS_ENABLE (1 << 31)
@@ -1044,7 +1106,32 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/* See configdb bunit SB addr map */
#define BUNIT_REG_BISOC 0x11
-#define PUNIT_REG_DSPFREQ 0x36
+/* PUNIT_REG_*SSPM0 */
+#define _SSPM0_SSC(val) ((val) << 0)
+#define SSPM0_SSC_MASK _SSPM0_SSC(0x3)
+#define SSPM0_SSC_PWR_ON _SSPM0_SSC(0x0)
+#define SSPM0_SSC_CLK_GATE _SSPM0_SSC(0x1)
+#define SSPM0_SSC_RESET _SSPM0_SSC(0x2)
+#define SSPM0_SSC_PWR_GATE _SSPM0_SSC(0x3)
+#define _SSPM0_SSS(val) ((val) << 24)
+#define SSPM0_SSS_MASK _SSPM0_SSS(0x3)
+#define SSPM0_SSS_PWR_ON _SSPM0_SSS(0x0)
+#define SSPM0_SSS_CLK_GATE _SSPM0_SSS(0x1)
+#define SSPM0_SSS_RESET _SSPM0_SSS(0x2)
+#define SSPM0_SSS_PWR_GATE _SSPM0_SSS(0x3)
+
+/* PUNIT_REG_*SSPM1 */
+#define SSPM1_FREQSTAT_SHIFT 24
+#define SSPM1_FREQSTAT_MASK (0x1f << SSPM1_FREQSTAT_SHIFT)
+#define SSPM1_FREQGUAR_SHIFT 8
+#define SSPM1_FREQGUAR_MASK (0x1f << SSPM1_FREQGUAR_SHIFT)
+#define SSPM1_FREQ_SHIFT 0
+#define SSPM1_FREQ_MASK (0x1f << SSPM1_FREQ_SHIFT)
+
+#define PUNIT_REG_VEDSSPM0 0x32
+#define PUNIT_REG_VEDSSPM1 0x33
+
+#define PUNIT_REG_DSPSSPM 0x36
#define DSPFREQSTAT_SHIFT_CHV 24
#define DSPFREQSTAT_MASK_CHV (0x1f << DSPFREQSTAT_SHIFT_CHV)
#define DSPFREQGUAR_SHIFT_CHV 8
@@ -1069,6 +1156,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define DP_SSS_RESET(pipe) _DP_SSS(0x2, (pipe))
#define DP_SSS_PWR_GATE(pipe) _DP_SSS(0x3, (pipe))
+#define PUNIT_REG_ISPSSPM0 0x39
+#define PUNIT_REG_ISPSSPM1 0x3a
+
/*
* i915_power_well_id:
*
@@ -1860,13 +1950,13 @@ enum i915_power_well_id {
#define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0
#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(4, (port)))
#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)))
-#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \
+#define CNL_PORT_TX_DW4_LN(ln, port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \
((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
_CNL_PORT_TX_DW4_LN0_AE)))
#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port))
#define ICL_PORT_TX_DW4_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(4, port))
#define ICL_PORT_TX_DW4_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, port))
-#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port))
+#define ICL_PORT_TX_DW4_LN(ln, port) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port))
#define LOADGEN_SELECT (1 << 31)
#define POST_CURSOR_1(x) ((x) << 12)
#define POST_CURSOR_1_MASK (0x3F << 12)
@@ -1893,11 +1983,11 @@ enum i915_power_well_id {
#define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port))
#define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port))
#define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port))
-#define ICL_PORT_TX_DW7_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port))
+#define ICL_PORT_TX_DW7_LN(ln, port) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port))
#define N_SCALAR(x) ((x) << 24)
#define N_SCALAR_MASK (0x7F << 24)
-#define MG_PHY_PORT_LN(port, ln, ln0p1, ln0p2, ln1p1) \
+#define MG_PHY_PORT_LN(ln, port, ln0p1, ln0p2, ln1p1) \
_MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1)))
#define MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C
@@ -1908,8 +1998,8 @@ enum i915_power_well_id {
#define MG_TX_LINK_PARAMS_TX1LN1_PORT3 0x16A52C
#define MG_TX_LINK_PARAMS_TX1LN0_PORT4 0x16B12C
#define MG_TX_LINK_PARAMS_TX1LN1_PORT4 0x16B52C
-#define MG_TX1_LINK_PARAMS(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
+#define MG_TX1_LINK_PARAMS(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
MG_TX_LINK_PARAMS_TX1LN0_PORT2, \
MG_TX_LINK_PARAMS_TX1LN1_PORT1)
@@ -1921,8 +2011,8 @@ enum i915_power_well_id {
#define MG_TX_LINK_PARAMS_TX2LN1_PORT3 0x16A4AC
#define MG_TX_LINK_PARAMS_TX2LN0_PORT4 0x16B0AC
#define MG_TX_LINK_PARAMS_TX2LN1_PORT4 0x16B4AC
-#define MG_TX2_LINK_PARAMS(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
+#define MG_TX2_LINK_PARAMS(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
MG_TX_LINK_PARAMS_TX2LN0_PORT2, \
MG_TX_LINK_PARAMS_TX2LN1_PORT1)
#define CRI_USE_FS32 (1 << 5)
@@ -1935,8 +2025,8 @@ enum i915_power_well_id {
#define MG_TX_PISO_READLOAD_TX1LN1_PORT3 0x16A54C
#define MG_TX_PISO_READLOAD_TX1LN0_PORT4 0x16B14C
#define MG_TX_PISO_READLOAD_TX1LN1_PORT4 0x16B54C
-#define MG_TX1_PISO_READLOAD(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
+#define MG_TX1_PISO_READLOAD(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
MG_TX_PISO_READLOAD_TX1LN0_PORT2, \
MG_TX_PISO_READLOAD_TX1LN1_PORT1)
@@ -1948,8 +2038,8 @@ enum i915_power_well_id {
#define MG_TX_PISO_READLOAD_TX2LN1_PORT3 0x16A4CC
#define MG_TX_PISO_READLOAD_TX2LN0_PORT4 0x16B0CC
#define MG_TX_PISO_READLOAD_TX2LN1_PORT4 0x16B4CC
-#define MG_TX2_PISO_READLOAD(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
+#define MG_TX2_PISO_READLOAD(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
MG_TX_PISO_READLOAD_TX2LN0_PORT2, \
MG_TX_PISO_READLOAD_TX2LN1_PORT1)
#define CRI_CALCINIT (1 << 1)
@@ -1962,8 +2052,8 @@ enum i915_power_well_id {
#define MG_TX_SWINGCTRL_TX1LN1_PORT3 0x16A548
#define MG_TX_SWINGCTRL_TX1LN0_PORT4 0x16B148
#define MG_TX_SWINGCTRL_TX1LN1_PORT4 0x16B548
-#define MG_TX1_SWINGCTRL(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_SWINGCTRL_TX1LN0_PORT1, \
+#define MG_TX1_SWINGCTRL(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX1LN0_PORT1, \
MG_TX_SWINGCTRL_TX1LN0_PORT2, \
MG_TX_SWINGCTRL_TX1LN1_PORT1)
@@ -1975,8 +2065,8 @@ enum i915_power_well_id {
#define MG_TX_SWINGCTRL_TX2LN1_PORT3 0x16A4C8
#define MG_TX_SWINGCTRL_TX2LN0_PORT4 0x16B0C8
#define MG_TX_SWINGCTRL_TX2LN1_PORT4 0x16B4C8
-#define MG_TX2_SWINGCTRL(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_SWINGCTRL_TX2LN0_PORT1, \
+#define MG_TX2_SWINGCTRL(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX2LN0_PORT1, \
MG_TX_SWINGCTRL_TX2LN0_PORT2, \
MG_TX_SWINGCTRL_TX2LN1_PORT1)
#define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0)
@@ -1990,8 +2080,8 @@ enum i915_power_well_id {
#define MG_TX_DRVCTRL_TX1LN1_TXPORT3 0x16A544
#define MG_TX_DRVCTRL_TX1LN0_TXPORT4 0x16B144
#define MG_TX_DRVCTRL_TX1LN1_TXPORT4 0x16B544
-#define MG_TX1_DRVCTRL(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \
+#define MG_TX1_DRVCTRL(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \
MG_TX_DRVCTRL_TX1LN0_TXPORT2, \
MG_TX_DRVCTRL_TX1LN1_TXPORT1)
@@ -2003,8 +2093,8 @@ enum i915_power_well_id {
#define MG_TX_DRVCTRL_TX2LN1_PORT3 0x16A4C4
#define MG_TX_DRVCTRL_TX2LN0_PORT4 0x16B0C4
#define MG_TX_DRVCTRL_TX2LN1_PORT4 0x16B4C4
-#define MG_TX2_DRVCTRL(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_DRVCTRL_TX2LN0_PORT1, \
+#define MG_TX2_DRVCTRL(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX2LN0_PORT1, \
MG_TX_DRVCTRL_TX2LN0_PORT2, \
MG_TX_DRVCTRL_TX2LN1_PORT1)
#define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24)
@@ -2023,8 +2113,8 @@ enum i915_power_well_id {
#define MG_CLKHUB_LN1_PORT3 0x16A79C
#define MG_CLKHUB_LN0_PORT4 0x16B39C
#define MG_CLKHUB_LN1_PORT4 0x16B79C
-#define MG_CLKHUB(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_CLKHUB_LN0_PORT1, \
+#define MG_CLKHUB(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_CLKHUB_LN0_PORT1, \
MG_CLKHUB_LN0_PORT2, \
MG_CLKHUB_LN1_PORT1)
#define CFG_LOW_RATE_LKREN_EN (1 << 11)
@@ -2037,8 +2127,8 @@ enum i915_power_well_id {
#define MG_TX_DCC_TX1LN1_PORT3 0x16A510
#define MG_TX_DCC_TX1LN0_PORT4 0x16B110
#define MG_TX_DCC_TX1LN1_PORT4 0x16B510
-#define MG_TX1_DCC(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_DCC_TX1LN0_PORT1, \
+#define MG_TX1_DCC(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX1LN0_PORT1, \
MG_TX_DCC_TX1LN0_PORT2, \
MG_TX_DCC_TX1LN1_PORT1)
#define MG_TX_DCC_TX2LN0_PORT1 0x168090
@@ -2049,8 +2139,8 @@ enum i915_power_well_id {
#define MG_TX_DCC_TX2LN1_PORT3 0x16A490
#define MG_TX_DCC_TX2LN0_PORT4 0x16B090
#define MG_TX_DCC_TX2LN1_PORT4 0x16B490
-#define MG_TX2_DCC(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_DCC_TX2LN0_PORT1, \
+#define MG_TX2_DCC(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX2LN0_PORT1, \
MG_TX_DCC_TX2LN0_PORT2, \
MG_TX_DCC_TX2LN1_PORT1)
#define CFG_AMI_CK_DIV_OVERRIDE_VAL(x) ((x) << 25)
@@ -2065,8 +2155,8 @@ enum i915_power_well_id {
#define MG_DP_MODE_LN1_ACU_PORT3 0x16A7A0
#define MG_DP_MODE_LN0_ACU_PORT4 0x16B3A0
#define MG_DP_MODE_LN1_ACU_PORT4 0x16B7A0
-#define MG_DP_MODE(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_DP_MODE_LN0_ACU_PORT1, \
+#define MG_DP_MODE(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_DP_MODE_LN0_ACU_PORT1, \
MG_DP_MODE_LN0_ACU_PORT2, \
MG_DP_MODE_LN1_ACU_PORT1)
#define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7)
@@ -2356,8 +2446,10 @@ enum i915_power_well_id {
#define RING_HWS_PGA(base) _MMIO((base) + 0x80)
#define RING_HWS_PGA_GEN6(base) _MMIO((base) + 0x2080)
#define RING_RESET_CTL(base) _MMIO((base) + 0xd0)
-#define RESET_CTL_REQUEST_RESET (1 << 0)
-#define RESET_CTL_READY_TO_RESET (1 << 1)
+#define RESET_CTL_CAT_ERROR REG_BIT(2)
+#define RESET_CTL_READY_TO_RESET REG_BIT(1)
+#define RESET_CTL_REQUEST_RESET REG_BIT(0)
+
#define RING_SEMA_WAIT_POLL(base) _MMIO((base) + 0x24c)
#define HSW_GTT_CACHE_EN _MMIO(0x4024)
@@ -2478,12 +2570,12 @@ enum i915_power_well_id {
#define HWS_START_ADDRESS_SHIFT 4
#define PWRCTXA _MMIO(0x2088) /* 965GM+ only */
#define PWRCTX_EN (1 << 0)
-#define IPEIR _MMIO(0x2088)
-#define IPEHR _MMIO(0x208c)
+#define IPEIR(base) _MMIO((base) + 0x88)
+#define IPEHR(base) _MMIO((base) + 0x8c)
#define GEN2_INSTDONE _MMIO(0x2090)
#define NOPID _MMIO(0x2094)
#define HWSTAM _MMIO(0x2098)
-#define DMA_FADD_I8XX _MMIO(0x20d0)
+#define DMA_FADD_I8XX(base) _MMIO((base) + 0xd0)
#define RING_BBSTATE(base) _MMIO((base) + 0x110)
#define RING_BB_PPGTT (1 << 5)
#define RING_SBBADDR(base) _MMIO((base) + 0x114) /* hsw+ */
@@ -2623,10 +2715,10 @@ enum i915_power_well_id {
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
#define SCPD0 _MMIO(0x209c) /* 915+ only */
-#define IER _MMIO(0x20a0)
-#define IIR _MMIO(0x20a4)
-#define IMR _MMIO(0x20a8)
-#define ISR _MMIO(0x20ac)
+#define GEN2_IER _MMIO(0x20a0)
+#define GEN2_IIR _MMIO(0x20a4)
+#define GEN2_IMR _MMIO(0x20a8)
+#define GEN2_ISR _MMIO(0x20ac)
#define VLV_GUNIT_CLOCK_GATE _MMIO(VLV_DISPLAY_BASE + 0x2060)
#define GINT_DIS (1 << 22)
#define GCFG_DIS (1 << 8)
@@ -2657,7 +2749,7 @@ enum i915_power_well_id {
#define INSTPM_FORCE_ORDERING (1 << 7) /* GEN6+ */
#define INSTPM_TLB_INVALIDATE (1 << 9)
#define INSTPM_SYNC_FLUSH (1 << 5)
-#define ACTHD _MMIO(0x20c8)
+#define ACTHD(base) _MMIO((base) + 0xc8)
#define MEM_MODE _MMIO(0x20cc)
#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1 << 3) /* 830 only */
#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1 << 2) /* 830/845 only */
@@ -3857,7 +3949,7 @@ enum i915_power_well_id {
/*
* Logical Context regs
*/
-#define CCID _MMIO(0x2180)
+#define CCID(base) _MMIO((base) + 0x180)
#define CCID_EN BIT(0)
#define CCID_EXTENDED_STATE_RESTORE BIT(2)
#define CCID_EXTENDED_STATE_SAVE BIT(3)
@@ -3989,6 +4081,15 @@ enum {
/* Pipe A CRC regs */
#define _PIPE_CRC_CTL_A 0x60050
#define PIPE_CRC_ENABLE (1 << 31)
+/* skl+ source selection */
+#define PIPE_CRC_SOURCE_PLANE_1_SKL (0 << 28)
+#define PIPE_CRC_SOURCE_PLANE_2_SKL (2 << 28)
+#define PIPE_CRC_SOURCE_DMUX_SKL (4 << 28)
+#define PIPE_CRC_SOURCE_PLANE_3_SKL (6 << 28)
+#define PIPE_CRC_SOURCE_PLANE_4_SKL (7 << 28)
+#define PIPE_CRC_SOURCE_PLANE_5_SKL (5 << 28)
+#define PIPE_CRC_SOURCE_PLANE_6_SKL (3 << 28)
+#define PIPE_CRC_SOURCE_PLANE_7_SKL (1 << 28)
/* ivb+ source selection */
#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29)
#define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29)
@@ -4110,42 +4211,6 @@ enum {
#define PIPESRC(trans) _MMIO_TRANS2(trans, _PIPEASRC)
#define PIPE_MULT(trans) _MMIO_TRANS2(trans, _PIPE_MULT_A)
-/* VLV eDP PSR registers */
-#define _PSRCTLA (VLV_DISPLAY_BASE + 0x60090)
-#define _PSRCTLB (VLV_DISPLAY_BASE + 0x61090)
-#define VLV_EDP_PSR_ENABLE (1 << 0)
-#define VLV_EDP_PSR_RESET (1 << 1)
-#define VLV_EDP_PSR_MODE_MASK (7 << 2)
-#define VLV_EDP_PSR_MODE_HW_TIMER (1 << 3)
-#define VLV_EDP_PSR_MODE_SW_TIMER (1 << 2)
-#define VLV_EDP_PSR_SINGLE_FRAME_UPDATE (1 << 7)
-#define VLV_EDP_PSR_ACTIVE_ENTRY (1 << 8)
-#define VLV_EDP_PSR_SRC_TRANSMITTER_STATE (1 << 9)
-#define VLV_EDP_PSR_DBL_FRAME (1 << 10)
-#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff << 16)
-#define VLV_EDP_PSR_IDLE_FRAME_SHIFT 16
-#define VLV_PSRCTL(pipe) _MMIO_PIPE(pipe, _PSRCTLA, _PSRCTLB)
-
-#define _VSCSDPA (VLV_DISPLAY_BASE + 0x600a0)
-#define _VSCSDPB (VLV_DISPLAY_BASE + 0x610a0)
-#define VLV_EDP_PSR_SDP_FREQ_MASK (3 << 30)
-#define VLV_EDP_PSR_SDP_FREQ_ONCE (1 << 31)
-#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1 << 30)
-#define VLV_VSCSDP(pipe) _MMIO_PIPE(pipe, _VSCSDPA, _VSCSDPB)
-
-#define _PSRSTATA (VLV_DISPLAY_BASE + 0x60094)
-#define _PSRSTATB (VLV_DISPLAY_BASE + 0x61094)
-#define VLV_EDP_PSR_LAST_STATE_MASK (7 << 3)
-#define VLV_EDP_PSR_CURR_STATE_MASK 7
-#define VLV_EDP_PSR_DISABLED (0 << 0)
-#define VLV_EDP_PSR_INACTIVE (1 << 0)
-#define VLV_EDP_PSR_IN_TRANS_TO_ACTIVE (2 << 0)
-#define VLV_EDP_PSR_ACTIVE_NORFB_UP (3 << 0)
-#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4 << 0)
-#define VLV_EDP_PSR_EXIT (5 << 0)
-#define VLV_EDP_PSR_IN_TRANS (1 << 7)
-#define VLV_PSRSTAT(pipe) _MMIO_PIPE(pipe, _PSRSTATA, _PSRSTATB)
-
/* HSW+ eDP PSR registers */
#define HSW_EDP_PSR_BASE 0x64800
#define BDW_EDP_PSR_BASE 0x6f800
@@ -4168,6 +4233,7 @@ enum {
#define EDP_PSR_TP2_TP3_TIME_100us (1 << 8)
#define EDP_PSR_TP2_TP3_TIME_2500us (2 << 8)
#define EDP_PSR_TP2_TP3_TIME_0us (3 << 8)
+#define EDP_PSR_TP4_TIME_0US (3 << 6) /* ICL+ */
#define EDP_PSR_TP1_TIME_500us (0 << 4)
#define EDP_PSR_TP1_TIME_100us (1 << 4)
#define EDP_PSR_TP1_TIME_2500us (2 << 4)
@@ -4612,13 +4678,14 @@ enum {
#define VIDEO_DIP_ENABLE (1 << 31)
#define VIDEO_DIP_PORT(port) ((port) << 29)
#define VIDEO_DIP_PORT_MASK (3 << 29)
-#define VIDEO_DIP_ENABLE_GCP (1 << 25)
+#define VIDEO_DIP_ENABLE_GCP (1 << 25) /* ilk+ */
#define VIDEO_DIP_ENABLE_AVI (1 << 21)
#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
-#define VIDEO_DIP_ENABLE_GAMUT (4 << 21)
+#define VIDEO_DIP_ENABLE_GAMUT (4 << 21) /* ilk+ */
#define VIDEO_DIP_ENABLE_SPD (8 << 21)
#define VIDEO_DIP_SELECT_AVI (0 << 19)
#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
+#define VIDEO_DIP_SELECT_GAMUT (2 << 19)
#define VIDEO_DIP_SELECT_SPD (3 << 19)
#define VIDEO_DIP_SELECT_MASK (3 << 19)
#define VIDEO_DIP_FREQ_ONCE (0 << 16)
@@ -4653,18 +4720,17 @@ enum {
#define _PP_STATUS 0x61200
#define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS)
-#define PP_ON (1 << 31)
+#define PP_ON REG_BIT(31)
#define _PP_CONTROL_1 0xc7204
#define _PP_CONTROL_2 0xc7304
#define ICP_PP_CONTROL(x) _MMIO(((x) == 1) ? _PP_CONTROL_1 : \
_PP_CONTROL_2)
-#define POWER_CYCLE_DELAY_MASK (0x1f << 4)
-#define POWER_CYCLE_DELAY_SHIFT 4
-#define VDD_OVERRIDE_FORCE (1 << 3)
-#define BACKLIGHT_ENABLE (1 << 2)
-#define PWR_DOWN_ON_RESET (1 << 1)
-#define PWR_STATE_TARGET (1 << 0)
+#define POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4)
+#define VDD_OVERRIDE_FORCE REG_BIT(3)
+#define BACKLIGHT_ENABLE REG_BIT(2)
+#define PWR_DOWN_ON_RESET REG_BIT(1)
+#define PWR_STATE_TARGET REG_BIT(0)
/*
* Indicates that all dependencies of the panel are on:
*
@@ -4672,62 +4738,53 @@ enum {
* - pipe enabled
* - LVDS/DVOB/DVOC on
*/
-#define PP_READY (1 << 30)
-#define PP_SEQUENCE_NONE (0 << 28)
-#define PP_SEQUENCE_POWER_UP (1 << 28)
-#define PP_SEQUENCE_POWER_DOWN (2 << 28)
-#define PP_SEQUENCE_MASK (3 << 28)
-#define PP_SEQUENCE_SHIFT 28
-#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
-#define PP_SEQUENCE_STATE_MASK 0x0000000f
-#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0)
-#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0)
-#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0)
-#define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0)
-#define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0)
-#define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0)
-#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0)
-#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0)
-#define PP_SEQUENCE_STATE_RESET (0xf << 0)
+#define PP_READY REG_BIT(30)
+#define PP_SEQUENCE_MASK REG_GENMASK(29, 28)
+#define PP_SEQUENCE_NONE REG_FIELD_PREP(PP_SEQUENCE_MASK, 0)
+#define PP_SEQUENCE_POWER_UP REG_FIELD_PREP(PP_SEQUENCE_MASK, 1)
+#define PP_SEQUENCE_POWER_DOWN REG_FIELD_PREP(PP_SEQUENCE_MASK, 2)
+#define PP_CYCLE_DELAY_ACTIVE REG_BIT(27)
+#define PP_SEQUENCE_STATE_MASK REG_GENMASK(3, 0)
+#define PP_SEQUENCE_STATE_OFF_IDLE REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x0)
+#define PP_SEQUENCE_STATE_OFF_S0_1 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x1)
+#define PP_SEQUENCE_STATE_OFF_S0_2 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x2)
+#define PP_SEQUENCE_STATE_OFF_S0_3 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x3)
+#define PP_SEQUENCE_STATE_ON_IDLE REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x8)
+#define PP_SEQUENCE_STATE_ON_S1_1 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x9)
+#define PP_SEQUENCE_STATE_ON_S1_2 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xa)
+#define PP_SEQUENCE_STATE_ON_S1_3 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xb)
+#define PP_SEQUENCE_STATE_RESET REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xf)
#define _PP_CONTROL 0x61204
#define PP_CONTROL(pps_idx) _MMIO_PPS(pps_idx, _PP_CONTROL)
-#define PANEL_UNLOCK_REGS (0xabcd << 16)
-#define PANEL_UNLOCK_MASK (0xffff << 16)
-#define BXT_POWER_CYCLE_DELAY_MASK 0x1f0
-#define BXT_POWER_CYCLE_DELAY_SHIFT 4
-#define EDP_FORCE_VDD (1 << 3)
-#define EDP_BLC_ENABLE (1 << 2)
-#define PANEL_POWER_RESET (1 << 1)
-#define PANEL_POWER_ON (1 << 0)
+#define PANEL_UNLOCK_MASK REG_GENMASK(31, 16)
+#define PANEL_UNLOCK_REGS REG_FIELD_PREP(PANEL_UNLOCK_MASK, 0xabcd)
+#define BXT_POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4)
+#define EDP_FORCE_VDD REG_BIT(3)
+#define EDP_BLC_ENABLE REG_BIT(2)
+#define PANEL_POWER_RESET REG_BIT(1)
+#define PANEL_POWER_ON REG_BIT(0)
#define _PP_ON_DELAYS 0x61208
#define PP_ON_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_ON_DELAYS)
-#define PANEL_PORT_SELECT_SHIFT 30
-#define PANEL_PORT_SELECT_MASK (3 << 30)
-#define PANEL_PORT_SELECT_LVDS (0 << 30)
-#define PANEL_PORT_SELECT_DPA (1 << 30)
-#define PANEL_PORT_SELECT_DPC (2 << 30)
-#define PANEL_PORT_SELECT_DPD (3 << 30)
-#define PANEL_PORT_SELECT_VLV(port) ((port) << 30)
-#define PANEL_POWER_UP_DELAY_MASK 0x1fff0000
-#define PANEL_POWER_UP_DELAY_SHIFT 16
-#define PANEL_LIGHT_ON_DELAY_MASK 0x1fff
-#define PANEL_LIGHT_ON_DELAY_SHIFT 0
+#define PANEL_PORT_SELECT_MASK REG_GENMASK(31, 30)
+#define PANEL_PORT_SELECT_LVDS REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 0)
+#define PANEL_PORT_SELECT_DPA REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 1)
+#define PANEL_PORT_SELECT_DPC REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 2)
+#define PANEL_PORT_SELECT_DPD REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 3)
+#define PANEL_PORT_SELECT_VLV(port) REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, port)
+#define PANEL_POWER_UP_DELAY_MASK REG_GENMASK(28, 16)
+#define PANEL_LIGHT_ON_DELAY_MASK REG_GENMASK(12, 0)
#define _PP_OFF_DELAYS 0x6120C
#define PP_OFF_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_OFF_DELAYS)
-#define PANEL_POWER_DOWN_DELAY_MASK 0x1fff0000
-#define PANEL_POWER_DOWN_DELAY_SHIFT 16
-#define PANEL_LIGHT_OFF_DELAY_MASK 0x1fff
-#define PANEL_LIGHT_OFF_DELAY_SHIFT 0
+#define PANEL_POWER_DOWN_DELAY_MASK REG_GENMASK(28, 16)
+#define PANEL_LIGHT_OFF_DELAY_MASK REG_GENMASK(12, 0)
#define _PP_DIVISOR 0x61210
#define PP_DIVISOR(pps_idx) _MMIO_PPS(pps_idx, _PP_DIVISOR)
-#define PP_REFERENCE_DIVIDER_MASK 0xffffff00
-#define PP_REFERENCE_DIVIDER_SHIFT 8
-#define PANEL_POWER_CYCLE_DELAY_MASK 0x1f
-#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
+#define PP_REFERENCE_DIVIDER_MASK REG_GENMASK(31, 8)
+#define PANEL_POWER_CYCLE_DELAY_MASK REG_GENMASK(4, 0)
/* Panel fitting */
#define PFIT_CONTROL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230)
@@ -5590,9 +5647,15 @@ enum {
#define PIPECONF_SINGLE_WIDE 0
#define PIPECONF_PIPE_UNLOCKED 0
#define PIPECONF_PIPE_LOCKED (1 << 25)
-#define PIPECONF_PALETTE 0
-#define PIPECONF_GAMMA (1 << 24)
#define PIPECONF_FORCE_BORDER (1 << 25)
+#define PIPECONF_GAMMA_MODE_MASK_I9XX (1 << 24) /* gmch */
+#define PIPECONF_GAMMA_MODE_MASK_ILK (3 << 24) /* ilk-ivb */
+#define PIPECONF_GAMMA_MODE_8BIT (0 << 24) /* gmch,ilk-ivb */
+#define PIPECONF_GAMMA_MODE_10BIT (1 << 24) /* gmch,ilk-ivb */
+#define PIPECONF_GAMMA_MODE_12BIT (2 << 24) /* ilk-ivb */
+#define PIPECONF_GAMMA_MODE_SPLIT (3 << 24) /* ivb */
+#define PIPECONF_GAMMA_MODE(x) ((x) << 24) /* pass in GAMMA_MODE_MODE_* */
+#define PIPECONF_GAMMA_MODE_SHIFT 24
#define PIPECONF_INTERLACE_MASK (7 << 21)
#define PIPECONF_INTERLACE_MASK_HSW (3 << 21)
/* Note that pre-gen3 does not support interlaced display directly. Panel
@@ -5698,6 +5761,10 @@ enum {
#define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEPIXEL)
#define PIPESTAT(pipe) _MMIO_PIPE2(pipe, _PIPEASTAT)
+#define _PIPEAGCMAX 0x70010
+#define _PIPEBGCMAX 0x71010
+#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4)
+
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
#define PIPEMISC_YUV420_ENABLE (1 << 27)
@@ -5998,6 +6065,7 @@ enum {
#define _CUR_WM_TRANS_A_0 0x70168
#define _CUR_WM_TRANS_B_0 0x71168
#define PLANE_WM_EN (1 << 31)
+#define PLANE_WM_IGNORE_LINES (1 << 30)
#define PLANE_WM_LINES_SHIFT 14
#define PLANE_WM_LINES_MASK 0x1f
#define PLANE_WM_BLOCKS_MASK 0x7ff /* skl+: 10 bits, icl+ 11 bits */
@@ -6124,7 +6192,7 @@ enum {
#define MCURSOR_PIPE_SELECT_SHIFT 28
#define MCURSOR_PIPE_SELECT(pipe) ((pipe) << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
-#define MCURSOR_PIPE_CSC_ENABLE (1 << 24)
+#define MCURSOR_PIPE_CSC_ENABLE (1 << 24) /* ilk+ */
#define MCURSOR_ROTATE_180 (1 << 15)
#define MCURSOR_TRICKLE_FEED_DISABLE (1 << 14)
#define _CURABASE 0x70084
@@ -6179,7 +6247,7 @@ enum {
#define DISPPLANE_RGBA888 (0xf << 26)
#define DISPPLANE_STEREO_ENABLE (1 << 25)
#define DISPPLANE_STEREO_DISABLE 0
-#define DISPPLANE_PIPE_CSC_ENABLE (1 << 24)
+#define DISPPLANE_PIPE_CSC_ENABLE (1 << 24) /* ilk+ */
#define DISPPLANE_SEL_PIPE_SHIFT 24
#define DISPPLANE_SEL_PIPE_MASK (3 << DISPPLANE_SEL_PIPE_SHIFT)
#define DISPPLANE_SEL_PIPE(pipe) ((pipe) << DISPPLANE_SEL_PIPE_SHIFT)
@@ -6557,13 +6625,22 @@ enum {
#define PLANE_CTL_FORMAT_YUV422 (0 << 24)
#define PLANE_CTL_FORMAT_NV12 (1 << 24)
#define PLANE_CTL_FORMAT_XRGB_2101010 (2 << 24)
+#define PLANE_CTL_FORMAT_P010 (3 << 24)
#define PLANE_CTL_FORMAT_XRGB_8888 (4 << 24)
+#define PLANE_CTL_FORMAT_P012 (5 << 24)
#define PLANE_CTL_FORMAT_XRGB_16161616F (6 << 24)
+#define PLANE_CTL_FORMAT_P016 (7 << 24)
#define PLANE_CTL_FORMAT_AYUV (8 << 24)
#define PLANE_CTL_FORMAT_INDEXED (12 << 24)
#define PLANE_CTL_FORMAT_RGB_565 (14 << 24)
#define ICL_PLANE_CTL_FORMAT_MASK (0x1f << 23)
#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23) /* Pre-GLK */
+#define PLANE_CTL_FORMAT_Y210 (1 << 23)
+#define PLANE_CTL_FORMAT_Y212 (3 << 23)
+#define PLANE_CTL_FORMAT_Y216 (5 << 23)
+#define PLANE_CTL_FORMAT_Y410 (7 << 23)
+#define PLANE_CTL_FORMAT_Y412 (9 << 23)
+#define PLANE_CTL_FORMAT_Y416 (0xb << 23)
#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21)
#define PLANE_CTL_KEY_ENABLE_SOURCE (1 << 21)
#define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21)
@@ -7102,14 +7179,25 @@ enum {
#define _LGC_PALETTE_B 0x4a800
#define LGC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4)
+/* ilk/snb precision palette */
+#define _PREC_PALETTE_A 0x4b000
+#define _PREC_PALETTE_B 0x4c000
+#define PREC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _PREC_PALETTE_A, _PREC_PALETTE_B) + (i) * 4)
+
+#define _PREC_PIPEAGCMAX 0x4d000
+#define _PREC_PIPEBGCMAX 0x4d010
+#define PREC_PIPEGCMAX(pipe, i) _MMIO(_PIPE(pipe, _PIPEAGCMAX, _PIPEBGCMAX) + (i) * 4)
+
#define _GAMMA_MODE_A 0x4a480
#define _GAMMA_MODE_B 0x4ac80
#define GAMMA_MODE(pipe) _MMIO_PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B)
-#define GAMMA_MODE_MODE_MASK (3 << 0)
-#define GAMMA_MODE_MODE_8BIT (0 << 0)
-#define GAMMA_MODE_MODE_10BIT (1 << 0)
-#define GAMMA_MODE_MODE_12BIT (2 << 0)
-#define GAMMA_MODE_MODE_SPLIT (3 << 0)
+#define PRE_CSC_GAMMA_ENABLE (1 << 31)
+#define POST_CSC_GAMMA_ENABLE (1 << 30)
+#define GAMMA_MODE_MODE_MASK (3 << 0)
+#define GAMMA_MODE_MODE_8BIT (0 << 0)
+#define GAMMA_MODE_MODE_10BIT (1 << 0)
+#define GAMMA_MODE_MODE_12BIT (2 << 0)
+#define GAMMA_MODE_MODE_SPLIT (3 << 0)
/* DMC/CSR */
#define CSR_PROGRAM(i) _MMIO(0x80000 + (i) * 4)
@@ -7204,8 +7292,8 @@ enum {
#define GEN8_GT_VECS_IRQ (1 << 6)
#define GEN8_GT_GUC_IRQ (1 << 5)
#define GEN8_GT_PM_IRQ (1 << 4)
-#define GEN8_GT_VCS2_IRQ (1 << 3)
-#define GEN8_GT_VCS1_IRQ (1 << 2)
+#define GEN8_GT_VCS1_IRQ (1 << 3) /* NB: VCS2 in bspec! */
+#define GEN8_GT_VCS0_IRQ (1 << 2) /* NB: VCS1 in bpsec! */
#define GEN8_GT_BCS_IRQ (1 << 1)
#define GEN8_GT_RCS_IRQ (1 << 0)
@@ -7226,8 +7314,8 @@ enum {
#define GEN8_RCS_IRQ_SHIFT 0
#define GEN8_BCS_IRQ_SHIFT 16
-#define GEN8_VCS1_IRQ_SHIFT 0
-#define GEN8_VCS2_IRQ_SHIFT 16
+#define GEN8_VCS0_IRQ_SHIFT 0 /* NB: VCS1 in bspec! */
+#define GEN8_VCS1_IRQ_SHIFT 16 /* NB: VCS2 in bpsec! */
#define GEN8_VECS_IRQ_SHIFT 0
#define GEN8_WD_IRQ_SHIFT 16
@@ -7613,13 +7701,13 @@ enum {
#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1 << 2)
/*GEN11 chicken */
-#define _PIPEA_CHICKEN 0x70038
-#define _PIPEB_CHICKEN 0x71038
-#define _PIPEC_CHICKEN 0x72038
-#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7)
-#define PM_FILL_MAINTAIN_DBUF_FULLNESS (1 << 0)
-#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
- _PIPEB_CHICKEN)
+#define _PIPEA_CHICKEN 0x70038
+#define _PIPEB_CHICKEN 0x71038
+#define _PIPEC_CHICKEN 0x72038
+#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
+ _PIPEB_CHICKEN)
+#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU (1 << 15)
+#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7)
/* PCH */
@@ -8089,10 +8177,11 @@ enum {
#define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4
#define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_CTL_A)
+#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A)
#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
-#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A)
+#define HSW_TVIDEO_DIP_GMP_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
#define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
#define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
@@ -8600,8 +8689,9 @@ enum {
#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8)
#define GEN9_PG_ENABLE _MMIO(0xA210)
-#define GEN9_RENDER_PG_ENABLE (1 << 0)
-#define GEN9_MEDIA_PG_ENABLE (1 << 1)
+#define GEN9_RENDER_PG_ENABLE REG_BIT(0)
+#define GEN9_MEDIA_PG_ENABLE REG_BIT(1)
+#define GEN11_MEDIA_SAMPLER_PG_ENABLE REG_BIT(2)
#define GEN8_PUSHBUS_CONTROL _MMIO(0xA248)
#define GEN8_PUSHBUS_ENABLE _MMIO(0xA250)
#define GEN8_PUSHBUS_SHIFT _MMIO(0xA25C)
@@ -8616,6 +8706,11 @@ enum {
#define GEN6_PMIER _MMIO(0x4402C)
#define GEN6_PM_MBOX_EVENT (1 << 25)
#define GEN6_PM_THERMAL_EVENT (1 << 24)
+
+/*
+ * For Gen11 these are in the upper word of the GPM_WGBOXPERF
+ * registers. Shifting is handled on accessing the imr and ier.
+ */
#define GEN6_PM_RP_DOWN_TIMEOUT (1 << 6)
#define GEN6_PM_RP_UP_THRESHOLD (1 << 5)
#define GEN6_PM_RP_DOWN_THRESHOLD (1 << 4)
@@ -9741,7 +9836,7 @@ enum skl_power_gate {
#define DPLL_CFGCR1_KDIV(x) ((x) << 6)
#define DPLL_CFGCR1_KDIV_1 (1 << 6)
#define DPLL_CFGCR1_KDIV_2 (2 << 6)
-#define DPLL_CFGCR1_KDIV_4 (4 << 6)
+#define DPLL_CFGCR1_KDIV_3 (4 << 6)
#define DPLL_CFGCR1_PDIV_MASK (0xf << 2)
#define DPLL_CFGCR1_PDIV_SHIFT (2)
#define DPLL_CFGCR1_PDIV(x) ((x) << 2)
@@ -9810,16 +9905,29 @@ enum skl_power_gate {
#define BXT_DRAM_WIDTH_X64 (0x3 << 4)
#define BXT_DRAM_SIZE_MASK (0x7 << 6)
#define BXT_DRAM_SIZE_SHIFT 6
-#define BXT_DRAM_SIZE_4GB (0x0 << 6)
-#define BXT_DRAM_SIZE_6GB (0x1 << 6)
-#define BXT_DRAM_SIZE_8GB (0x2 << 6)
-#define BXT_DRAM_SIZE_12GB (0x3 << 6)
-#define BXT_DRAM_SIZE_16GB (0x4 << 6)
+#define BXT_DRAM_SIZE_4GBIT (0x0 << 6)
+#define BXT_DRAM_SIZE_6GBIT (0x1 << 6)
+#define BXT_DRAM_SIZE_8GBIT (0x2 << 6)
+#define BXT_DRAM_SIZE_12GBIT (0x3 << 6)
+#define BXT_DRAM_SIZE_16GBIT (0x4 << 6)
+#define BXT_DRAM_TYPE_MASK (0x7 << 22)
+#define BXT_DRAM_TYPE_SHIFT 22
+#define BXT_DRAM_TYPE_DDR3 (0x0 << 22)
+#define BXT_DRAM_TYPE_LPDDR3 (0x1 << 22)
+#define BXT_DRAM_TYPE_LPDDR4 (0x2 << 22)
+#define BXT_DRAM_TYPE_DDR4 (0x4 << 22)
#define SKL_MEMORY_FREQ_MULTIPLIER_HZ 266666666
#define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5E04)
#define SKL_REQ_DATA_MASK (0xF << 0)
+#define SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5000)
+#define SKL_DRAM_DDR_TYPE_MASK (0x3 << 0)
+#define SKL_DRAM_DDR_TYPE_DDR4 (0 << 0)
+#define SKL_DRAM_DDR_TYPE_DDR3 (1 << 0)
+#define SKL_DRAM_DDR_TYPE_LPDDR3 (2 << 0)
+#define SKL_DRAM_DDR_TYPE_LPDDR4 (3 << 0)
+
#define SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C)
#define SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5010)
#define SKL_DRAM_S_SHIFT 16
@@ -9831,8 +9939,21 @@ enum skl_power_gate {
#define SKL_DRAM_WIDTH_X32 (0x2 << 8)
#define SKL_DRAM_RANK_MASK (0x1 << 10)
#define SKL_DRAM_RANK_SHIFT 10
-#define SKL_DRAM_RANK_SINGLE (0x0 << 10)
-#define SKL_DRAM_RANK_DUAL (0x1 << 10)
+#define SKL_DRAM_RANK_1 (0x0 << 10)
+#define SKL_DRAM_RANK_2 (0x1 << 10)
+#define SKL_DRAM_RANK_MASK (0x1 << 10)
+#define CNL_DRAM_SIZE_MASK 0x7F
+#define CNL_DRAM_WIDTH_MASK (0x3 << 7)
+#define CNL_DRAM_WIDTH_SHIFT 7
+#define CNL_DRAM_WIDTH_X8 (0x0 << 7)
+#define CNL_DRAM_WIDTH_X16 (0x1 << 7)
+#define CNL_DRAM_WIDTH_X32 (0x2 << 7)
+#define CNL_DRAM_RANK_MASK (0x3 << 9)
+#define CNL_DRAM_RANK_SHIFT 9
+#define CNL_DRAM_RANK_1 (0x0 << 9)
+#define CNL_DRAM_RANK_2 (0x1 << 9)
+#define CNL_DRAM_RANK_3 (0x2 << 9)
+#define CNL_DRAM_RANK_4 (0x3 << 9)
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
* since on HSW we can't write to it using I915_WRITE. */
@@ -9877,10 +9998,14 @@ enum skl_power_gate {
#define _PIPE_A_CSC_COEFF_BU 0x4901c
#define _PIPE_A_CSC_COEFF_RV_GV 0x49020
#define _PIPE_A_CSC_COEFF_BV 0x49024
+
#define _PIPE_A_CSC_MODE 0x49028
-#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
-#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
-#define CSC_MODE_YUV_TO_RGB (1 << 0)
+#define ICL_CSC_ENABLE (1 << 31)
+#define ICL_OUTPUT_CSC_ENABLE (1 << 30)
+#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
+#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
+#define CSC_MODE_YUV_TO_RGB (1 << 0)
+
#define _PIPE_A_CSC_PREOFF_HI 0x49030
#define _PIPE_A_CSC_PREOFF_ME 0x49034
#define _PIPE_A_CSC_PREOFF_LO 0x49038
@@ -9916,6 +10041,70 @@ enum skl_power_gate {
#define PIPE_CSC_POSTOFF_ME(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
#define PIPE_CSC_POSTOFF_LO(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
+/* Pipe Output CSC */
+#define _PIPE_A_OUTPUT_CSC_COEFF_RY_GY 0x49050
+#define _PIPE_A_OUTPUT_CSC_COEFF_BY 0x49054
+#define _PIPE_A_OUTPUT_CSC_COEFF_RU_GU 0x49058
+#define _PIPE_A_OUTPUT_CSC_COEFF_BU 0x4905c
+#define _PIPE_A_OUTPUT_CSC_COEFF_RV_GV 0x49060
+#define _PIPE_A_OUTPUT_CSC_COEFF_BV 0x49064
+#define _PIPE_A_OUTPUT_CSC_PREOFF_HI 0x49068
+#define _PIPE_A_OUTPUT_CSC_PREOFF_ME 0x4906c
+#define _PIPE_A_OUTPUT_CSC_PREOFF_LO 0x49070
+#define _PIPE_A_OUTPUT_CSC_POSTOFF_HI 0x49074
+#define _PIPE_A_OUTPUT_CSC_POSTOFF_ME 0x49078
+#define _PIPE_A_OUTPUT_CSC_POSTOFF_LO 0x4907c
+
+#define _PIPE_B_OUTPUT_CSC_COEFF_RY_GY 0x49150
+#define _PIPE_B_OUTPUT_CSC_COEFF_BY 0x49154
+#define _PIPE_B_OUTPUT_CSC_COEFF_RU_GU 0x49158
+#define _PIPE_B_OUTPUT_CSC_COEFF_BU 0x4915c
+#define _PIPE_B_OUTPUT_CSC_COEFF_RV_GV 0x49160
+#define _PIPE_B_OUTPUT_CSC_COEFF_BV 0x49164
+#define _PIPE_B_OUTPUT_CSC_PREOFF_HI 0x49168
+#define _PIPE_B_OUTPUT_CSC_PREOFF_ME 0x4916c
+#define _PIPE_B_OUTPUT_CSC_PREOFF_LO 0x49170
+#define _PIPE_B_OUTPUT_CSC_POSTOFF_HI 0x49174
+#define _PIPE_B_OUTPUT_CSC_POSTOFF_ME 0x49178
+#define _PIPE_B_OUTPUT_CSC_POSTOFF_LO 0x4917c
+
+#define PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe) _MMIO_PIPE(pipe,\
+ _PIPE_A_OUTPUT_CSC_COEFF_RY_GY,\
+ _PIPE_B_OUTPUT_CSC_COEFF_RY_GY)
+#define PIPE_CSC_OUTPUT_COEFF_BY(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_COEFF_BY, \
+ _PIPE_B_OUTPUT_CSC_COEFF_BY)
+#define PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_COEFF_RU_GU, \
+ _PIPE_B_OUTPUT_CSC_COEFF_RU_GU)
+#define PIPE_CSC_OUTPUT_COEFF_BU(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_COEFF_BU, \
+ _PIPE_B_OUTPUT_CSC_COEFF_BU)
+#define PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_COEFF_RV_GV, \
+ _PIPE_B_OUTPUT_CSC_COEFF_RV_GV)
+#define PIPE_CSC_OUTPUT_COEFF_BV(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_COEFF_BV, \
+ _PIPE_B_OUTPUT_CSC_COEFF_BV)
+#define PIPE_CSC_OUTPUT_PREOFF_HI(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_PREOFF_HI, \
+ _PIPE_B_OUTPUT_CSC_PREOFF_HI)
+#define PIPE_CSC_OUTPUT_PREOFF_ME(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_PREOFF_ME, \
+ _PIPE_B_OUTPUT_CSC_PREOFF_ME)
+#define PIPE_CSC_OUTPUT_PREOFF_LO(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_PREOFF_LO, \
+ _PIPE_B_OUTPUT_CSC_PREOFF_LO)
+#define PIPE_CSC_OUTPUT_POSTOFF_HI(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_POSTOFF_HI, \
+ _PIPE_B_OUTPUT_CSC_POSTOFF_HI)
+#define PIPE_CSC_OUTPUT_POSTOFF_ME(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_POSTOFF_ME, \
+ _PIPE_B_OUTPUT_CSC_POSTOFF_ME)
+#define PIPE_CSC_OUTPUT_POSTOFF_LO(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_POSTOFF_LO, \
+ _PIPE_B_OUTPUT_CSC_POSTOFF_LO)
+
/* pipe degamma/gamma LUTs on IVB+ */
#define _PAL_PREC_INDEX_A 0x4A400
#define _PAL_PREC_INDEX_B 0x4AC00
@@ -9924,6 +10113,7 @@ enum skl_power_gate {
#define PAL_PREC_SPLIT_MODE (1 << 31)
#define PAL_PREC_AUTO_INCREMENT (1 << 15)
#define PAL_PREC_INDEX_VALUE_MASK (0x3ff << 0)
+#define PAL_PREC_INDEX_VALUE(x) ((x) << 0)
#define _PAL_PREC_DATA_A 0x4A404
#define _PAL_PREC_DATA_B 0x4AC04
#define _PAL_PREC_DATA_C 0x4B404
@@ -9941,6 +10131,7 @@ enum skl_power_gate {
#define PREC_PAL_DATA(pipe) _MMIO_PIPE(pipe, _PAL_PREC_DATA_A, _PAL_PREC_DATA_B)
#define PREC_PAL_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_GC_MAX_A, _PAL_PREC_GC_MAX_B) + (i) * 4)
#define PREC_PAL_EXT_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_EXT_GC_MAX_A, _PAL_PREC_EXT_GC_MAX_B) + (i) * 4)
+#define PREC_PAL_EXT2_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_EXT2_GC_MAX_A, _PAL_PREC_EXT2_GC_MAX_B) + (i) * 4)
#define _PRE_CSC_GAMC_INDEX_A 0x4A484
#define _PRE_CSC_GAMC_INDEX_B 0x4AC84
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index c2a5c48c7541..f6c78c0fa74b 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -22,15 +22,31 @@
*
*/
-#include <linux/prefetch.h>
#include <linux/dma-fence-array.h>
+#include <linux/irq_work.h>
+#include <linux/prefetch.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/sched/signal.h>
-#include "i915_drv.h"
#include "i915_active.h"
+#include "i915_drv.h"
+#include "i915_globals.h"
#include "i915_reset.h"
+#include "intel_pm.h"
+
+struct execute_cb {
+ struct list_head link;
+ struct irq_work work;
+ struct i915_sw_fence *fence;
+};
+
+static struct i915_global_request {
+ struct i915_global base;
+ struct kmem_cache *slab_requests;
+ struct kmem_cache *slab_dependencies;
+ struct kmem_cache *slab_execute_cbs;
+} global;
static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
@@ -51,7 +67,7 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return "signaled";
- return to_request(fence)->timeline->name;
+ return to_request(fence)->gem_context->name ?: "[i915]";
}
static bool i915_fence_signaled(struct dma_fence *fence)
@@ -68,7 +84,9 @@ static signed long i915_fence_wait(struct dma_fence *fence,
bool interruptible,
signed long timeout)
{
- return i915_request_wait(to_request(fence), interruptible, timeout);
+ return i915_request_wait(to_request(fence),
+ interruptible | I915_WAIT_PRIORITY,
+ timeout);
}
static void i915_fence_release(struct dma_fence *fence)
@@ -83,8 +101,9 @@ static void i915_fence_release(struct dma_fence *fence)
* caught trying to reuse dead objects.
*/
i915_sw_fence_fini(&rq->submit);
+ i915_sw_fence_fini(&rq->semaphore);
- kmem_cache_free(rq->i915->requests, rq);
+ kmem_cache_free(global.slab_requests, rq);
}
const struct dma_fence_ops i915_fence_ops = {
@@ -150,7 +169,6 @@ static void advance_ring(struct i915_request *request)
* is just about to be. Either works, if we miss the last two
* noops - they are safe to be replayed on a reset.
*/
- GEM_TRACE("marking %s as inactive\n", ring->timeline->name);
tail = READ_ONCE(request->tail);
list_del(&ring->active_link);
} else {
@@ -177,12 +195,10 @@ static void free_capture_list(struct i915_request *request)
static void __retire_engine_request(struct intel_engine_cs *engine,
struct i915_request *rq)
{
- GEM_TRACE("%s(%s) fence %llx:%lld, global=%d, current %d:%d\n",
+ GEM_TRACE("%s(%s) fence %llx:%lld, current %d\n",
__func__, engine->name,
rq->fence.context, rq->fence.seqno,
- rq->global_seqno,
- hwsp_seqno(rq),
- intel_engine_get_seqno(engine));
+ hwsp_seqno(rq));
GEM_BUG_ON(!i915_request_completed(rq));
@@ -241,12 +257,10 @@ static void i915_request_retire(struct i915_request *request)
{
struct i915_active_request *active, *next;
- GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n",
+ GEM_TRACE("%s fence %llx:%lld, current %d\n",
request->engine->name,
request->fence.context, request->fence.seqno,
- request->global_seqno,
- hwsp_seqno(request),
- intel_engine_get_seqno(request->engine));
+ hwsp_seqno(request));
lockdep_assert_held(&request->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
@@ -288,15 +302,13 @@ static void i915_request_retire(struct i915_request *request)
i915_request_remove_from_client(request);
- /* Retirement decays the ban score as it is a sign of ctx progress */
- atomic_dec_if_positive(&request->gem_context->ban_score);
intel_context_unpin(request->hw_context);
__retire_engine_upto(request->engine, request);
unreserve_gt(request->i915);
- i915_sched_node_fini(request->i915, &request->sched);
+ i915_sched_node_fini(&request->sched);
i915_request_put(request);
}
@@ -305,12 +317,10 @@ void i915_request_retire_upto(struct i915_request *rq)
struct intel_ring *ring = rq->ring;
struct i915_request *tmp;
- GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n",
+ GEM_TRACE("%s fence %llx:%lld, current %d\n",
rq->engine->name,
rq->fence.context, rq->fence.seqno,
- rq->global_seqno,
- hwsp_seqno(rq),
- intel_engine_get_seqno(rq->engine));
+ hwsp_seqno(rq));
lockdep_assert_held(&rq->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_request_completed(rq));
@@ -326,9 +336,67 @@ void i915_request_retire_upto(struct i915_request *rq)
} while (tmp != rq);
}
-static u32 timeline_get_seqno(struct i915_timeline *tl)
+static void irq_execute_cb(struct irq_work *wrk)
+{
+ struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
+
+ i915_sw_fence_complete(cb->fence);
+ kmem_cache_free(global.slab_execute_cbs, cb);
+}
+
+static void __notify_execute_cb(struct i915_request *rq)
{
- return tl->seqno += 1 + tl->has_initial_breadcrumb;
+ struct execute_cb *cb;
+
+ lockdep_assert_held(&rq->lock);
+
+ if (list_empty(&rq->execute_cb))
+ return;
+
+ list_for_each_entry(cb, &rq->execute_cb, link)
+ irq_work_queue(&cb->work);
+
+ /*
+ * XXX Rollback on __i915_request_unsubmit()
+ *
+ * In the future, perhaps when we have an active time-slicing scheduler,
+ * it will be interesting to unsubmit parallel execution and remove
+ * busywaits from the GPU until their master is restarted. This is
+ * quite hairy, we have to carefully rollback the fence and do a
+ * preempt-to-idle cycle on the target engine, all the while the
+ * master execute_cb may refire.
+ */
+ INIT_LIST_HEAD(&rq->execute_cb);
+}
+
+static int
+i915_request_await_execution(struct i915_request *rq,
+ struct i915_request *signal,
+ gfp_t gfp)
+{
+ struct execute_cb *cb;
+
+ if (i915_request_is_active(signal))
+ return 0;
+
+ cb = kmem_cache_alloc(global.slab_execute_cbs, gfp);
+ if (!cb)
+ return -ENOMEM;
+
+ cb->fence = &rq->submit;
+ i915_sw_fence_await(cb->fence);
+ init_irq_work(&cb->work, irq_execute_cb);
+
+ spin_lock_irq(&signal->lock);
+ if (i915_request_is_active(signal)) {
+ i915_sw_fence_complete(cb->fence);
+ kmem_cache_free(global.slab_execute_cbs, cb);
+ } else {
+ list_add_tail(&cb->link, &signal->execute_cb);
+ }
+ spin_unlock_irq(&signal->lock);
+
+ return 0;
}
static void move_to_timeline(struct i915_request *request,
@@ -342,42 +410,54 @@ static void move_to_timeline(struct i915_request *request,
spin_unlock(&request->timeline->lock);
}
-static u32 next_global_seqno(struct i915_timeline *tl)
-{
- if (!++tl->seqno)
- ++tl->seqno;
- return tl->seqno;
-}
-
void __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- u32 seqno;
- GEM_TRACE("%s fence %llx:%lld -> global=%d, current %d:%d\n",
+ GEM_TRACE("%s fence %llx:%lld -> current %d\n",
engine->name,
request->fence.context, request->fence.seqno,
- engine->timeline.seqno + 1,
- hwsp_seqno(request),
- intel_engine_get_seqno(engine));
+ hwsp_seqno(request));
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->timeline.lock);
- GEM_BUG_ON(request->global_seqno);
+ if (i915_gem_context_is_banned(request->gem_context))
+ i915_request_skip(request, -EIO);
- seqno = next_global_seqno(&engine->timeline);
- GEM_BUG_ON(!seqno);
- GEM_BUG_ON(intel_engine_signaled(engine, seqno));
+ /*
+ * Are we using semaphores when the gpu is already saturated?
+ *
+ * Using semaphores incurs a cost in having the GPU poll a
+ * memory location, busywaiting for it to change. The continual
+ * memory reads can have a noticeable impact on the rest of the
+ * system with the extra bus traffic, stalling the cpu as it too
+ * tries to access memory across the bus (perf stat -e bus-cycles).
+ *
+ * If we installed a semaphore on this request and we only submit
+ * the request after the signaler completed, that indicates the
+ * system is overloaded and using semaphores at this time only
+ * increases the amount of work we are doing. If so, we disable
+ * further use of semaphores until we are idle again, whence we
+ * optimistically try again.
+ */
+ if (request->sched.semaphores &&
+ i915_sw_fence_signaled(&request->semaphore))
+ request->hw_context->saturated |= request->sched.semaphores;
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+
GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
- request->global_seqno = seqno;
+
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
!i915_request_enable_breadcrumb(request))
intel_engine_queue_breadcrumbs(engine);
+
+ __notify_execute_cb(request);
+
spin_unlock(&request->lock);
engine->emit_fini_breadcrumb(request,
@@ -406,12 +486,10 @@ void __i915_request_unsubmit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- GEM_TRACE("%s fence %llx:%lld <- global=%d, current %d:%d\n",
+ GEM_TRACE("%s fence %llx:%lld, current %d\n",
engine->name,
request->fence.context, request->fence.seqno,
- request->global_seqno,
- hwsp_seqno(request),
- intel_engine_get_seqno(engine));
+ hwsp_seqno(request));
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->timeline.lock);
@@ -420,18 +498,25 @@ void __i915_request_unsubmit(struct i915_request *request)
* Only unwind in reverse order, required so that the per-context list
* is kept in seqno/ring order.
*/
- GEM_BUG_ON(!request->global_seqno);
- GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
- GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno));
- engine->timeline.seqno--;
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
- request->global_seqno = 0;
+
+ /*
+ * As we do not allow WAIT to preempt inflight requests,
+ * once we have executed a request, along with triggering
+ * any execution callbacks, we must preserve its ordering
+ * within the non-preemptible FIFO.
+ */
+ BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); /* only internal */
+ request->sched.attr.priority |= __NO_PREEMPTION;
+
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
i915_request_cancel_breadcrumb(request);
+
GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
+
spin_unlock(&request->lock);
/* Transfer back from the global per-engine timeline to per-context */
@@ -489,6 +574,36 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
+static int __i915_sw_fence_call
+semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ struct i915_request *request =
+ container_of(fence, typeof(*request), semaphore);
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ /*
+ * We only check a small portion of our dependencies
+ * and so cannot guarantee that there remains no
+ * semaphore chain across all. Instead of opting
+ * for the full NOSEMAPHORE boost, we go for the
+ * smaller (but still preempting) boost of
+ * NEWCLIENT. This will be enough to boost over
+ * a busywaiting request (as that cannot be
+ * NEWCLIENT) without accidentally boosting
+ * a busywait over real work elsewhere.
+ */
+ i915_schedule_bump_priority(request, I915_PRIORITY_NEWCLIENT);
+ break;
+
+ case FENCE_FREE:
+ i915_request_put(request);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
static void ring_retire_requests(struct intel_ring *ring)
{
struct i915_request *rq, *rn;
@@ -518,12 +633,7 @@ i915_request_alloc_slow(struct intel_context *ce)
ring_retire_requests(ring);
out:
- return kmem_cache_alloc(ce->gem_context->i915->requests, GFP_KERNEL);
-}
-
-static int add_timeline_barrier(struct i915_request *rq)
-{
- return i915_request_await_active_request(rq, &rq->timeline->barrier);
+ return kmem_cache_alloc(global.slab_requests, GFP_KERNEL);
}
/**
@@ -539,8 +649,10 @@ struct i915_request *
i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_request *rq;
struct intel_context *ce;
+ struct i915_timeline *tl;
+ struct i915_request *rq;
+ u32 seqno;
int ret;
lockdep_assert_held(&i915->drm.struct_mutex);
@@ -556,8 +668,9 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged.
*/
- if (i915_terminally_wedged(&i915->gpu_error))
- return ERR_PTR(-EIO);
+ ret = i915_terminally_wedged(i915);
+ if (ret)
+ return ERR_PTR(ret);
/*
* Pinning the contexts may generate requests in order to acquire
@@ -569,6 +682,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
return ERR_CAST(ce);
reserve_gt(i915);
+ mutex_lock(&ce->ring->timeline->mutex);
/* Move our oldest request to the slab-cache (if not in use!) */
rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
@@ -605,7 +719,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
*
* Do not use kmem_cache_zalloc() here!
*/
- rq = kmem_cache_alloc(i915->requests,
+ rq = kmem_cache_alloc(global.slab_requests,
GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(!rq)) {
rq = i915_request_alloc_slow(ce);
@@ -615,32 +729,36 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
}
}
- rq->rcustate = get_state_synchronize_rcu();
-
INIT_LIST_HEAD(&rq->active_list);
+ INIT_LIST_HEAD(&rq->execute_cb);
+
+ tl = ce->ring->timeline;
+ ret = i915_timeline_get_seqno(tl, rq, &seqno);
+ if (ret)
+ goto err_free;
+
rq->i915 = i915;
rq->engine = engine;
rq->gem_context = ctx;
rq->hw_context = ce;
rq->ring = ce->ring;
- rq->timeline = ce->ring->timeline;
+ rq->timeline = tl;
GEM_BUG_ON(rq->timeline == &engine->timeline);
- rq->hwsp_seqno = rq->timeline->hwsp_seqno;
+ rq->hwsp_seqno = tl->hwsp_seqno;
+ rq->hwsp_cacheline = tl->hwsp_cacheline;
+ rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
spin_lock_init(&rq->lock);
- dma_fence_init(&rq->fence,
- &i915_fence_ops,
- &rq->lock,
- rq->timeline->fence_context,
- timeline_get_seqno(rq->timeline));
+ dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
+ tl->fence_context, seqno);
/* We bump the ref for the fence chain */
i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
+ i915_sw_fence_init(&i915_request_get(rq)->semaphore, semaphore_notify);
i915_sched_node_init(&rq->sched);
/* No zalloc, must clear what we need by hand */
- rq->global_seqno = 0;
rq->file_priv = NULL;
rq->batch = NULL;
rq->capture_list = NULL;
@@ -668,10 +786,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
*/
rq->head = rq->ring->emit;
- ret = add_timeline_barrier(rq);
- if (ret)
- goto err_unwind;
-
ret = engine->request_alloc(rq);
if (ret)
goto err_unwind;
@@ -682,7 +796,10 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
rq->infix = rq->ring->emit; /* end of header; start of user payload */
/* Check that we didn't interrupt ourselves with a new request */
+ lockdep_assert_held(&rq->timeline->mutex);
GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
+ rq->cookie = lockdep_pin_lock(&rq->timeline->mutex);
+
return rq;
err_unwind:
@@ -693,14 +810,113 @@ err_unwind:
GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
- kmem_cache_free(i915->requests, rq);
+err_free:
+ kmem_cache_free(global.slab_requests, rq);
err_unreserve:
+ mutex_unlock(&ce->ring->timeline->mutex);
unreserve_gt(i915);
intel_context_unpin(ce);
return ERR_PTR(ret);
}
static int
+i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
+{
+ if (list_is_first(&signal->ring_link, &signal->ring->request_list))
+ return 0;
+
+ signal = list_prev_entry(signal, ring_link);
+ if (i915_timeline_sync_is_later(rq->timeline, &signal->fence))
+ return 0;
+
+ return i915_sw_fence_await_dma_fence(&rq->submit,
+ &signal->fence, 0,
+ I915_FENCE_GFP);
+}
+
+static intel_engine_mask_t
+already_busywaiting(struct i915_request *rq)
+{
+ /*
+ * Polling a semaphore causes bus traffic, delaying other users of
+ * both the GPU and CPU. We want to limit the impact on others,
+ * while taking advantage of early submission to reduce GPU
+ * latency. Therefore we restrict ourselves to not using more
+ * than one semaphore from each source, and not using a semaphore
+ * if we have detected the engine is saturated (i.e. would not be
+ * submitted early and cause bus traffic reading an already passed
+ * semaphore).
+ *
+ * See the are-we-too-late? check in __i915_request_submit().
+ */
+ return rq->sched.semaphores | rq->hw_context->saturated;
+}
+
+static int
+emit_semaphore_wait(struct i915_request *to,
+ struct i915_request *from,
+ gfp_t gfp)
+{
+ u32 hwsp_offset;
+ u32 *cs;
+ int err;
+
+ GEM_BUG_ON(!from->timeline->has_initial_breadcrumb);
+ GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
+
+ /* Just emit the first semaphore we see as request space is limited. */
+ if (already_busywaiting(to) & from->engine->mask)
+ return i915_sw_fence_await_dma_fence(&to->submit,
+ &from->fence, 0,
+ I915_FENCE_GFP);
+
+ err = i915_request_await_start(to, from);
+ if (err < 0)
+ return err;
+
+ err = i915_sw_fence_await_dma_fence(&to->semaphore,
+ &from->fence, 0,
+ I915_FENCE_GFP);
+ if (err < 0)
+ return err;
+
+ /* We need to pin the signaler's HWSP until we are finished reading. */
+ err = i915_timeline_read_hwsp(from, to, &hwsp_offset);
+ if (err)
+ return err;
+
+ /* Only submit our spinner after the signaler is running! */
+ err = i915_request_await_execution(to, from, gfp);
+ if (err)
+ return err;
+
+ cs = intel_ring_begin(to, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * Using greater-than-or-equal here means we have to worry
+ * about seqno wraparound. To side step that issue, we swap
+ * the timeline HWSP upon wrapping, so that everyone listening
+ * for the old (pre-wrap) values do not see the much smaller
+ * (post-wrap) values than they were expecting (and so wait
+ * forever).
+ */
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = from->fence.seqno;
+ *cs++ = hwsp_offset;
+ *cs++ = 0;
+
+ intel_ring_advance(to, cs);
+ to->sched.semaphores |= from->engine->mask;
+ to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
+ return 0;
+}
+
+static int
i915_request_await_request(struct i915_request *to, struct i915_request *from)
{
int ret;
@@ -712,9 +928,7 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
return 0;
if (to->engine->schedule) {
- ret = i915_sched_node_add_dependency(to->i915,
- &to->sched,
- &from->sched);
+ ret = i915_sched_node_add_dependency(&to->sched, &from->sched);
if (ret < 0)
return ret;
}
@@ -723,6 +937,9 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
&from->submit,
I915_FENCE_GFP);
+ } else if (intel_engine_has_semaphores(to->engine) &&
+ to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
+ ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
} else {
ret = i915_sw_fence_await_dma_fence(&to->submit,
&from->fence, 0,
@@ -873,6 +1090,60 @@ void i915_request_skip(struct i915_request *rq, int error)
memset(vaddr + head, 0, rq->postfix - head);
}
+static struct i915_request *
+__i915_request_add_to_timeline(struct i915_request *rq)
+{
+ struct i915_timeline *timeline = rq->timeline;
+ struct i915_request *prev;
+
+ /*
+ * Dependency tracking and request ordering along the timeline
+ * is special cased so that we can eliminate redundant ordering
+ * operations while building the request (we know that the timeline
+ * itself is ordered, and here we guarantee it).
+ *
+ * As we know we will need to emit tracking along the timeline,
+ * we embed the hooks into our request struct -- at the cost of
+ * having to have specialised no-allocation interfaces (which will
+ * be beneficial elsewhere).
+ *
+ * A second benefit to open-coding i915_request_await_request is
+ * that we can apply a slight variant of the rules specialised
+ * for timelines that jump between engines (such as virtual engines).
+ * If we consider the case of virtual engine, we must emit a dma-fence
+ * to prevent scheduling of the second request until the first is
+ * complete (to maximise our greedy late load balancing) and this
+ * precludes optimising to use semaphores serialisation of a single
+ * timeline across engines.
+ */
+ prev = i915_active_request_raw(&timeline->last_request,
+ &rq->i915->drm.struct_mutex);
+ if (prev && !i915_request_completed(prev)) {
+ if (is_power_of_2(prev->engine->mask | rq->engine->mask))
+ i915_sw_fence_await_sw_fence(&rq->submit,
+ &prev->submit,
+ &rq->submitq);
+ else
+ __i915_sw_fence_await_dma_fence(&rq->submit,
+ &prev->fence,
+ &rq->dmaq);
+ if (rq->engine->schedule)
+ __i915_sched_node_add_dependency(&rq->sched,
+ &prev->sched,
+ &rq->dep,
+ 0);
+ }
+
+ spin_lock_irq(&timeline->lock);
+ list_add_tail(&rq->link, &timeline->requests);
+ spin_unlock_irq(&timeline->lock);
+
+ GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
+ __i915_active_request_set(&timeline->last_request, rq);
+
+ return prev;
+}
+
/*
* NB: This function is not allowed to fail. Doing so would mean the the
* request is not being tracked for completion but the work itself is
@@ -889,7 +1160,9 @@ void i915_request_add(struct i915_request *request)
GEM_TRACE("%s fence %llx:%lld\n",
engine->name, request->fence.context, request->fence.seqno);
- lockdep_assert_held(&request->i915->drm.struct_mutex);
+ lockdep_assert_held(&request->timeline->mutex);
+ lockdep_unpin_lock(&request->timeline->mutex, request->cookie);
+
trace_i915_request_add(request);
/*
@@ -917,37 +1190,12 @@ void i915_request_add(struct i915_request *request)
GEM_BUG_ON(IS_ERR(cs));
request->postfix = intel_ring_offset(request, cs);
- /*
- * Seal the request and mark it as pending execution. Note that
- * we may inspect this state, without holding any locks, during
- * hangcheck. Hence we apply the barrier to ensure that we do not
- * see a more recent value in the hws than we are tracking.
- */
-
- prev = i915_active_request_raw(&timeline->last_request,
- &request->i915->drm.struct_mutex);
- if (prev && !i915_request_completed(prev)) {
- i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
- &request->submitq);
- if (engine->schedule)
- __i915_sched_node_add_dependency(&request->sched,
- &prev->sched,
- &request->dep,
- 0);
- }
-
- spin_lock_irq(&timeline->lock);
- list_add_tail(&request->link, &timeline->requests);
- spin_unlock_irq(&timeline->lock);
-
- GEM_BUG_ON(timeline->seqno != request->fence.seqno);
- __i915_active_request_set(&timeline->last_request, request);
+ prev = __i915_request_add_to_timeline(request);
list_add_tail(&request->ring_link, &ring->request_list);
- if (list_is_first(&request->ring_link, &ring->request_list)) {
- GEM_TRACE("marking %s as active\n", ring->timeline->name);
+ if (list_is_first(&request->ring_link, &ring->request_list))
list_add(&ring->active_link, &request->i915->gt.active_rings);
- }
+ request->i915->gt.active_engines |= request->engine->mask;
request->emitted_jiffies = jiffies;
/*
@@ -962,11 +1210,27 @@ void i915_request_add(struct i915_request *request)
* run at the earliest possible convenience.
*/
local_bh_disable();
+ i915_sw_fence_commit(&request->semaphore);
rcu_read_lock(); /* RCU serialisation for set-wedged protection */
if (engine->schedule) {
struct i915_sched_attr attr = request->gem_context->sched;
/*
+ * Boost actual workloads past semaphores!
+ *
+ * With semaphores we spin on one engine waiting for another,
+ * simply to reduce the latency of starting our work when
+ * the signaler completes. However, if there is any other
+ * work that we could be doing on this engine instead, that
+ * is better utilisation and will reduce the overall duration
+ * of the current work. To avoid PI boosting a semaphore
+ * far in the distance past over useful work, we keep a history
+ * of any semaphore use along our dependency chain.
+ */
+ if (!(request->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
+ attr.priority |= I915_PRIORITY_NOSEMAPHORE;
+
+ /*
* Boost priorities to new clients (new request flows).
*
* Allow interactive/synchronous clients to jump ahead of
@@ -1000,6 +1264,8 @@ void i915_request_add(struct i915_request *request)
*/
if (prev && i915_request_completed(prev))
i915_request_retire_upto(prev);
+
+ mutex_unlock(&request->timeline->mutex);
}
static unsigned long local_clock_us(unsigned int *cpu)
@@ -1136,8 +1402,25 @@ long i915_request_wait(struct i915_request *rq,
if (__i915_spin_request(rq, state, 5))
goto out;
- if (flags & I915_WAIT_PRIORITY)
+ /*
+ * This client is about to stall waiting for the GPU. In many cases
+ * this is undesirable and limits the throughput of the system, as
+ * many clients cannot continue processing user input/output whilst
+ * blocked. RPS autotuning may take tens of milliseconds to respond
+ * to the GPU load and thus incurs additional latency for the client.
+ * We can circumvent that by promoting the GPU frequency to maximum
+ * before we sleep. This makes the GPU throttle up much more quickly
+ * (good for benchmarks and user experience, e.g. window animations),
+ * but at a cost of spending more power processing the workload
+ * (bad for battery).
+ */
+ if (flags & I915_WAIT_PRIORITY) {
+ if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
+ gen6_rps_boost(rq);
+ local_bh_disable(); /* suspend tasklets for reprioritisation */
i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
+ local_bh_enable(); /* kick tasklets en masse */
+ }
wait.tsk = current;
if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
@@ -1179,11 +1462,66 @@ void i915_retire_requests(struct drm_i915_private *i915)
if (!i915->gt.active_requests)
return;
- list_for_each_entry_safe(ring, tmp, &i915->gt.active_rings, active_link)
+ list_for_each_entry_safe(ring, tmp,
+ &i915->gt.active_rings, active_link) {
+ intel_ring_get(ring); /* last rq holds reference! */
ring_retire_requests(ring);
+ intel_ring_put(ring);
+ }
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_request.c"
#include "selftests/i915_request.c"
#endif
+
+static void i915_global_request_shrink(void)
+{
+ kmem_cache_shrink(global.slab_dependencies);
+ kmem_cache_shrink(global.slab_execute_cbs);
+ kmem_cache_shrink(global.slab_requests);
+}
+
+static void i915_global_request_exit(void)
+{
+ kmem_cache_destroy(global.slab_dependencies);
+ kmem_cache_destroy(global.slab_execute_cbs);
+ kmem_cache_destroy(global.slab_requests);
+}
+
+static struct i915_global_request global = { {
+ .shrink = i915_global_request_shrink,
+ .exit = i915_global_request_exit,
+} };
+
+int __init i915_global_request_init(void)
+{
+ global.slab_requests = KMEM_CACHE(i915_request,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_TYPESAFE_BY_RCU);
+ if (!global.slab_requests)
+ return -ENOMEM;
+
+ global.slab_execute_cbs = KMEM_CACHE(execute_cb,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_TYPESAFE_BY_RCU);
+ if (!global.slab_execute_cbs)
+ goto err_requests;
+
+ global.slab_dependencies = KMEM_CACHE(i915_dependency,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT);
+ if (!global.slab_dependencies)
+ goto err_execute_cbs;
+
+ i915_global_register(&global.base);
+ return 0;
+
+err_execute_cbs:
+ kmem_cache_destroy(global.slab_execute_cbs);
+err_requests:
+ kmem_cache_destroy(global.slab_requests);
+ return -ENOMEM;
+}
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 40f3e8dcbdd5..a982664618c2 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -26,9 +26,11 @@
#define I915_REQUEST_H
#include <linux/dma-fence.h>
+#include <linux/lockdep.h>
#include "i915_gem.h"
#include "i915_scheduler.h"
+#include "i915_selftest.h"
#include "i915_sw_fence.h"
#include <uapi/drm/i915_drm.h>
@@ -37,6 +39,7 @@ struct drm_file;
struct drm_i915_gem_object;
struct i915_request;
struct i915_timeline;
+struct i915_timeline_cacheline;
struct i915_capture_list {
struct i915_capture_list *next;
@@ -119,6 +122,15 @@ struct i915_request {
unsigned long rcustate;
/*
+ * We pin the timeline->mutex while constructing the request to
+ * ensure that no caller accidentally drops it during construction.
+ * The timeline->mutex must be held to ensure that only this caller
+ * can use the ring and manipulate the associated timeline during
+ * construction.
+ */
+ struct pin_cookie cookie;
+
+ /*
* Fences for the various phases in the request's lifetime.
*
* The submit fence is used to await upon all of the request's
@@ -126,7 +138,12 @@ struct i915_request {
* It is used by the driver to then queue the request for execution.
*/
struct i915_sw_fence submit;
- wait_queue_entry_t submitq;
+ union {
+ wait_queue_entry_t submitq;
+ struct i915_sw_dma_fence_cb dmaq;
+ };
+ struct list_head execute_cb;
+ struct i915_sw_fence semaphore;
/*
* A list of everyone we wait upon, and everyone who waits upon us.
@@ -147,13 +164,15 @@ struct i915_request {
*/
const u32 *hwsp_seqno;
- /**
- * GEM sequence number associated with this request on the
- * global execution timeline. It is zero when the request is not
- * on the HW queue (i.e. not on the engine timeline list).
- * Its value is guarded by the timeline spinlock.
+ /*
+ * If we need to access the timeline's seqno for this request in
+ * another request, we need to keep a read reference to this associated
+ * cacheline, so that we do not free and recycle it before the foreign
+ * observers have completed. Hence, we keep a pointer to the cacheline
+ * inside the timeline's HWSP vma, but it is only valid while this
+ * request has not completed and guarded by the timeline mutex.
*/
- u32 global_seqno;
+ struct i915_timeline_cacheline *hwsp_cacheline;
/** Position in the ring of the start of the request */
u32 head;
@@ -204,6 +223,11 @@ struct i915_request {
struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_link;
+
+ I915_SELFTEST_DECLARE(struct {
+ struct list_head link;
+ unsigned long delay;
+ } mock;)
};
#define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
@@ -247,30 +271,6 @@ i915_request_put(struct i915_request *rq)
dma_fence_put(&rq->fence);
}
-/**
- * i915_request_global_seqno - report the current global seqno
- * @request - the request
- *
- * A request is assigned a global seqno only when it is on the hardware
- * execution queue. The global seqno can be used to maintain a list of
- * requests on the same engine in retirement order, for example for
- * constructing a priority queue for waiting. Prior to its execution, or
- * if it is subsequently removed in the event of preemption, its global
- * seqno is zero. As both insertion and removal from the execution queue
- * may operate in IRQ context, it is not guarded by the usual struct_mutex
- * BKL. Instead those relying on the global seqno must be prepared for its
- * value to change between reads. Only when the request is complete can
- * the global seqno be stable (due to the memory barriers on submitting
- * the commands to the hardware to write the breadcrumb, if the HWS shows
- * that it has passed the global seqno and the global seqno is unchanged
- * after the read, it is indeed complete).
- */
-static inline u32
-i915_request_global_seqno(const struct i915_request *request)
-{
- return READ_ONCE(request->global_seqno);
-}
-
int i915_request_await_object(struct i915_request *to,
struct drm_i915_gem_object *obj,
bool write);
@@ -358,10 +358,27 @@ static inline bool __i915_request_has_started(const struct i915_request *rq)
* i915_request_started - check if the request has begun being executed
* @rq: the request
*
- * Returns true if the request has been submitted to hardware, and the hardware
- * has advanced passed the end of the previous request and so should be either
- * currently processing the request (though it may be preempted and so
- * not necessarily the next request to complete) or have completed the request.
+ * If the timeline is not using initial breadcrumbs, a request is
+ * considered started if the previous request on its timeline (i.e.
+ * context) has been signaled.
+ *
+ * If the timeline is using semaphores, it will also be emitting an
+ * "initial breadcrumb" after the semaphores are complete and just before
+ * it began executing the user payload. A request can therefore be active
+ * on the HW and not yet started as it is still busywaiting on its
+ * dependencies (via HW semaphores).
+ *
+ * If the request has started, its dependencies will have been signaled
+ * (either by fences or by semaphores) and it will have begun processing
+ * the user payload.
+ *
+ * However, even if a request has started, it may have been preempted and
+ * so no longer active, or it may have already completed.
+ *
+ * See also i915_request_is_active().
+ *
+ * Returns true if the request has begun executing the user payload, or
+ * has completed:
*/
static inline bool i915_request_started(const struct i915_request *rq)
{
diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c
index 0e0ddf2e6815..677d59304e78 100644
--- a/drivers/gpu/drm/i915/i915_reset.c
+++ b/drivers/gpu/drm/i915/i915_reset.c
@@ -18,28 +18,39 @@
/* XXX How to handle concurrent GGTT updates using tiling registers? */
#define RESET_UNDER_STOP_MACHINE 0
+static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
+{
+ intel_uncore_rmw(uncore, reg, 0, set);
+}
+
+static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
+{
+ intel_uncore_rmw(uncore, reg, clr, 0);
+}
+
+static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
+{
+ intel_uncore_rmw_fw(uncore, reg, 0, set);
+}
+
+static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
+{
+ intel_uncore_rmw_fw(uncore, reg, clr, 0);
+}
+
static void engine_skip_context(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct i915_gem_context *hung_ctx = rq->gem_context;
- struct i915_timeline *timeline = rq->timeline;
lockdep_assert_held(&engine->timeline.lock);
- GEM_BUG_ON(timeline == &engine->timeline);
-
- spin_lock(&timeline->lock);
-
- if (i915_request_is_active(rq)) {
- list_for_each_entry_continue(rq,
- &engine->timeline.requests, link)
- if (rq->gem_context == hung_ctx)
- i915_request_skip(rq, -EIO);
- }
- list_for_each_entry(rq, &timeline->requests, link)
- i915_request_skip(rq, -EIO);
+ if (!i915_request_is_active(rq))
+ return;
- spin_unlock(&timeline->lock);
+ list_for_each_entry_continue(rq, &engine->timeline.requests, link)
+ if (rq->gem_context == hung_ctx)
+ i915_request_skip(rq, -EIO);
}
static void client_mark_guilty(struct drm_i915_file_private *file_priv,
@@ -68,23 +79,29 @@ static void client_mark_guilty(struct drm_i915_file_private *file_priv,
static bool context_mark_guilty(struct i915_gem_context *ctx)
{
- unsigned int score;
- bool banned, bannable;
+ unsigned long prev_hang;
+ bool banned;
+ int i;
atomic_inc(&ctx->guilty_count);
- bannable = i915_gem_context_is_bannable(ctx);
- score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
- banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
-
- /* Cool contexts don't accumulate client ban score */
- if (!bannable)
+ /* Cool contexts are too cool to be banned! (Used for reset testing.) */
+ if (!i915_gem_context_is_bannable(ctx))
return false;
+ /* Record the timestamp for the last N hangs */
+ prev_hang = ctx->hang_timestamp[0];
+ for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
+ ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
+ ctx->hang_timestamp[i] = jiffies;
+
+ /* If we have hung N+1 times in rapid succession, we ban the context! */
+ banned = !i915_gem_context_is_recoverable(ctx);
+ if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
+ banned = true;
if (banned) {
- DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n",
- ctx->name, atomic_read(&ctx->guilty_count),
- score);
+ DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
+ ctx->name, atomic_read(&ctx->guilty_count));
i915_gem_context_set_banned(ctx);
}
@@ -101,6 +118,12 @@ static void context_mark_innocent(struct i915_gem_context *ctx)
void i915_reset_request(struct i915_request *rq, bool guilty)
{
+ GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
+ rq->engine->name,
+ rq->fence.context,
+ rq->fence.seqno,
+ yesno(guilty));
+
lockdep_assert_held(&rq->engine->timeline.lock);
GEM_BUG_ON(i915_request_completed(rq));
@@ -116,38 +139,43 @@ void i915_reset_request(struct i915_request *rq, bool guilty)
static void gen3_stop_engine(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
const u32 base = engine->mmio_base;
+ GEM_TRACE("%s\n", engine->name);
+
if (intel_engine_stop_cs(engine))
- DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name);
+ GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
- I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
- POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
+ intel_uncore_write_fw(uncore,
+ RING_HEAD(base),
+ intel_uncore_read_fw(uncore, RING_TAIL(base)));
+ intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
- I915_WRITE_FW(RING_HEAD(base), 0);
- I915_WRITE_FW(RING_TAIL(base), 0);
- POSTING_READ_FW(RING_TAIL(base));
+ intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
+ intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
+ intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
/* The ring must be empty before it is disabled */
- I915_WRITE_FW(RING_CTL(base), 0);
+ intel_uncore_write_fw(uncore, RING_CTL(base), 0);
/* Check acts as a post */
- if (I915_READ_FW(RING_HEAD(base)) != 0)
- DRM_DEBUG_DRIVER("%s: ring head not parked\n",
- engine->name);
+ if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
+ GEM_TRACE("%s: ring head [%x] not parked\n",
+ engine->name,
+ intel_uncore_read_fw(uncore, RING_HEAD(base)));
}
static void i915_stop_engines(struct drm_i915_private *i915,
- unsigned int engine_mask)
+ intel_engine_mask_t engine_mask)
{
struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ intel_engine_mask_t tmp;
if (INTEL_GEN(i915) < 3)
return;
- for_each_engine_masked(engine, i915, engine_mask, id)
+ for_each_engine_masked(engine, i915, engine_mask, tmp)
gen3_stop_engine(engine);
}
@@ -160,7 +188,7 @@ static bool i915_in_reset(struct pci_dev *pdev)
}
static int i915_do_reset(struct drm_i915_private *i915,
- unsigned int engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct pci_dev *pdev = i915->drm.pdev;
@@ -189,7 +217,7 @@ static bool g4x_reset_complete(struct pci_dev *pdev)
}
static int g33_do_reset(struct drm_i915_private *i915,
- unsigned int engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct pci_dev *pdev = i915->drm.pdev;
@@ -198,17 +226,17 @@ static int g33_do_reset(struct drm_i915_private *i915,
return wait_for_atomic(g4x_reset_complete(pdev), 50);
}
-static int g4x_do_reset(struct drm_i915_private *dev_priv,
- unsigned int engine_mask,
+static int g4x_do_reset(struct drm_i915_private *i915,
+ intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = i915->drm.pdev;
+ struct intel_uncore *uncore = &i915->uncore;
int ret;
/* WaVcpClkGateDisableForMediaReset:ctg,elk */
- I915_WRITE_FW(VDECCLK_GATE_D,
- I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
- POSTING_READ_FW(VDECCLK_GATE_D);
+ rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
pci_write_config_byte(pdev, I915_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
@@ -229,21 +257,22 @@ static int g4x_do_reset(struct drm_i915_private *dev_priv,
out:
pci_write_config_byte(pdev, I915_GDRST, 0);
- I915_WRITE_FW(VDECCLK_GATE_D,
- I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
- POSTING_READ_FW(VDECCLK_GATE_D);
+ rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
return ret;
}
-static int ironlake_do_reset(struct drm_i915_private *dev_priv,
- unsigned int engine_mask,
+static int ironlake_do_reset(struct drm_i915_private *i915,
+ intel_engine_mask_t engine_mask,
unsigned int retry)
{
+ struct intel_uncore *uncore = &i915->uncore;
int ret;
- I915_WRITE_FW(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
- ret = __intel_wait_for_register_fw(dev_priv, ILK_GDSR,
+ intel_uncore_write_fw(uncore, ILK_GDSR,
+ ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
+ ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
ILK_GRDOM_RESET_ENABLE, 0,
5000, 0,
NULL);
@@ -252,8 +281,9 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv,
goto out;
}
- I915_WRITE_FW(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
- ret = __intel_wait_for_register_fw(dev_priv, ILK_GDSR,
+ intel_uncore_write_fw(uncore, ILK_GDSR,
+ ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
+ ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
ILK_GRDOM_RESET_ENABLE, 0,
5000, 0,
NULL);
@@ -263,15 +293,16 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv,
}
out:
- I915_WRITE_FW(ILK_GDSR, 0);
- POSTING_READ_FW(ILK_GDSR);
+ intel_uncore_write_fw(uncore, ILK_GDSR, 0);
+ intel_uncore_posting_read_fw(uncore, ILK_GDSR);
return ret;
}
/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
-static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
+static int gen6_hw_domain_reset(struct drm_i915_private *i915,
u32 hw_domain_mask)
{
+ struct intel_uncore *uncore = &i915->uncore;
int err;
/*
@@ -279,10 +310,10 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
* for fifo space for the write or forcewake the chip for
* the read
*/
- I915_WRITE_FW(GEN6_GDRST, hw_domain_mask);
+ intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
/* Wait for the device to ack the reset requests */
- err = __intel_wait_for_register_fw(dev_priv,
+ err = __intel_wait_for_register_fw(uncore,
GEN6_GDRST, hw_domain_mask, 0,
500, 0,
NULL);
@@ -294,36 +325,38 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
}
static int gen6_reset_engines(struct drm_i915_private *i915,
- unsigned int engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct intel_engine_cs *engine;
- const u32 hw_engine_mask[I915_NUM_ENGINES] = {
- [RCS] = GEN6_GRDOM_RENDER,
- [BCS] = GEN6_GRDOM_BLT,
- [VCS] = GEN6_GRDOM_MEDIA,
- [VCS2] = GEN8_GRDOM_MEDIA2,
- [VECS] = GEN6_GRDOM_VECS,
+ const u32 hw_engine_mask[] = {
+ [RCS0] = GEN6_GRDOM_RENDER,
+ [BCS0] = GEN6_GRDOM_BLT,
+ [VCS0] = GEN6_GRDOM_MEDIA,
+ [VCS1] = GEN8_GRDOM_MEDIA2,
+ [VECS0] = GEN6_GRDOM_VECS,
};
u32 hw_mask;
if (engine_mask == ALL_ENGINES) {
hw_mask = GEN6_GRDOM_FULL;
} else {
- unsigned int tmp;
+ intel_engine_mask_t tmp;
hw_mask = 0;
- for_each_engine_masked(engine, i915, engine_mask, tmp)
+ for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
hw_mask |= hw_engine_mask[engine->id];
+ }
}
return gen6_hw_domain_reset(i915, hw_mask);
}
-static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *engine)
+static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
{
- u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
+ struct intel_uncore *uncore = engine->uncore;
+ u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
i915_reg_t sfc_usage;
@@ -370,10 +403,9 @@ static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
* ends up being locked to the engine we want to reset, we have to reset
* it as well (we will unlock it once the reset sequence is completed).
*/
- I915_WRITE_FW(sfc_forced_lock,
- I915_READ_FW(sfc_forced_lock) | sfc_forced_lock_bit);
+ rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
- if (__intel_wait_for_register_fw(dev_priv,
+ if (__intel_wait_for_register_fw(uncore,
sfc_forced_lock_ack,
sfc_forced_lock_ack_bit,
sfc_forced_lock_ack_bit,
@@ -382,16 +414,16 @@ static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
return 0;
}
- if (I915_READ_FW(sfc_usage) & sfc_usage_bit)
+ if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
return sfc_reset_bit;
return 0;
}
-static void gen11_unlock_sfc(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *engine)
+static void gen11_unlock_sfc(struct intel_engine_cs *engine)
{
- u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
+ struct intel_uncore *uncore = engine->uncore;
+ u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
i915_reg_t sfc_forced_lock;
u32 sfc_forced_lock_bit;
@@ -413,38 +445,36 @@ static void gen11_unlock_sfc(struct drm_i915_private *dev_priv,
return;
}
- I915_WRITE_FW(sfc_forced_lock,
- I915_READ_FW(sfc_forced_lock) & ~sfc_forced_lock_bit);
+ rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
}
static int gen11_reset_engines(struct drm_i915_private *i915,
- unsigned int engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int retry)
{
- const u32 hw_engine_mask[I915_NUM_ENGINES] = {
- [RCS] = GEN11_GRDOM_RENDER,
- [BCS] = GEN11_GRDOM_BLT,
- [VCS] = GEN11_GRDOM_MEDIA,
- [VCS2] = GEN11_GRDOM_MEDIA2,
- [VCS3] = GEN11_GRDOM_MEDIA3,
- [VCS4] = GEN11_GRDOM_MEDIA4,
- [VECS] = GEN11_GRDOM_VECS,
- [VECS2] = GEN11_GRDOM_VECS2,
+ const u32 hw_engine_mask[] = {
+ [RCS0] = GEN11_GRDOM_RENDER,
+ [BCS0] = GEN11_GRDOM_BLT,
+ [VCS0] = GEN11_GRDOM_MEDIA,
+ [VCS1] = GEN11_GRDOM_MEDIA2,
+ [VCS2] = GEN11_GRDOM_MEDIA3,
+ [VCS3] = GEN11_GRDOM_MEDIA4,
+ [VECS0] = GEN11_GRDOM_VECS,
+ [VECS1] = GEN11_GRDOM_VECS2,
};
struct intel_engine_cs *engine;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
u32 hw_mask;
int ret;
- BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES);
-
if (engine_mask == ALL_ENGINES) {
hw_mask = GEN11_GRDOM_FULL;
} else {
hw_mask = 0;
for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
hw_mask |= hw_engine_mask[engine->id];
- hw_mask |= gen11_lock_sfc(i915, engine);
+ hw_mask |= gen11_lock_sfc(engine);
}
}
@@ -452,46 +482,62 @@ static int gen11_reset_engines(struct drm_i915_private *i915,
if (engine_mask != ALL_ENGINES)
for_each_engine_masked(engine, i915, engine_mask, tmp)
- gen11_unlock_sfc(i915, engine);
+ gen11_unlock_sfc(engine);
return ret;
}
static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
+ const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
+ u32 request, mask, ack;
int ret;
- I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
- _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
+ ack = intel_uncore_read_fw(uncore, reg);
+ if (ack & RESET_CTL_CAT_ERROR) {
+ /*
+ * For catastrophic errors, ready-for-reset sequence
+ * needs to be bypassed: HAS#396813
+ */
+ request = RESET_CTL_CAT_ERROR;
+ mask = RESET_CTL_CAT_ERROR;
+
+ /* Catastrophic errors need to be cleared by HW */
+ ack = 0;
+ } else if (!(ack & RESET_CTL_READY_TO_RESET)) {
+ request = RESET_CTL_REQUEST_RESET;
+ mask = RESET_CTL_READY_TO_RESET;
+ ack = RESET_CTL_READY_TO_RESET;
+ } else {
+ return 0;
+ }
- ret = __intel_wait_for_register_fw(dev_priv,
- RING_RESET_CTL(engine->mmio_base),
- RESET_CTL_READY_TO_RESET,
- RESET_CTL_READY_TO_RESET,
- 700, 0,
- NULL);
+ intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
+ ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
+ 700, 0, NULL);
if (ret)
- DRM_ERROR("%s: reset request timeout\n", engine->name);
+ DRM_ERROR("%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
+ engine->name, request,
+ intel_uncore_read_fw(uncore, reg));
return ret;
}
static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
- _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
+ intel_uncore_write_fw(engine->uncore,
+ RING_RESET_CTL(engine->mmio_base),
+ _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
}
static int gen8_reset_engines(struct drm_i915_private *i915,
- unsigned int engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct intel_engine_cs *engine;
const bool reset_non_ready = retry >= 1;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
int ret;
for_each_engine_masked(engine, i915, engine_mask, tmp) {
@@ -527,14 +573,11 @@ skip_reset:
}
typedef int (*reset_func)(struct drm_i915_private *,
- unsigned int engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int retry);
static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
{
- if (!i915_modparams.reset)
- return NULL;
-
if (INTEL_GEN(i915) >= 8)
return gen8_reset_engines;
else if (INTEL_GEN(i915) >= 6)
@@ -551,7 +594,8 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
return NULL;
}
-int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask)
+int intel_gpu_reset(struct drm_i915_private *i915,
+ intel_engine_mask_t engine_mask)
{
const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
reset_func reset;
@@ -566,7 +610,7 @@ int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask)
* If the power well sleeps during the reset, the reset
* request may be dropped and never completes (causing -EIO).
*/
- intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
/*
* We stop engines, otherwise we might get failed reset and a
@@ -582,14 +626,15 @@ int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask)
*
* FIXME: Wa for more modern gens needs to be validated
*/
- i915_stop_engines(i915, engine_mask);
+ if (retry)
+ i915_stop_engines(i915, engine_mask);
GEM_TRACE("engine_mask=%x\n", engine_mask);
preempt_disable();
ret = reset(i915, engine_mask, retry);
preempt_enable();
}
- intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
return ret;
}
@@ -599,6 +644,9 @@ bool intel_has_gpu_reset(struct drm_i915_private *i915)
if (USES_GUC(i915))
return false;
+ if (!i915_modparams.reset)
+ return NULL;
+
return intel_get_gpu_reset(i915);
}
@@ -615,9 +663,9 @@ int intel_reset_guc(struct drm_i915_private *i915)
GEM_BUG_ON(!HAS_GUC(i915));
- intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
ret = gen6_hw_domain_reset(i915, guc_domain);
- intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
return ret;
}
@@ -635,10 +683,36 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
* written to the powercontext is undefined and so we may lose
* GPU state upon resume, i.e. fail to restart after a reset.
*/
- intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
engine->reset.prepare(engine);
}
+static void revoke_mmaps(struct drm_i915_private *i915)
+{
+ int i;
+
+ for (i = 0; i < i915->num_fence_regs; i++) {
+ struct drm_vma_offset_node *node;
+ struct i915_vma *vma;
+ u64 vma_offset;
+
+ vma = READ_ONCE(i915->fence_regs[i].vma);
+ if (!vma)
+ continue;
+
+ if (!i915_vma_has_userfault(vma))
+ continue;
+
+ GEM_BUG_ON(vma->fence != &i915->fence_regs[i]);
+ node = &vma->obj->base.vma_node;
+ vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
+ unmap_mapping_range(i915->drm.anon_inode->i_mapping,
+ drm_vma_node_offset_addr(node) + vma_offset,
+ vma->size,
+ 1);
+ }
+}
+
static void reset_prepare(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
@@ -647,10 +721,16 @@ static void reset_prepare(struct drm_i915_private *i915)
for_each_engine(engine, i915, id)
reset_prepare_engine(engine);
- intel_uc_sanitize(i915);
+ intel_uc_reset_prepare(i915);
+}
+
+static void gt_revoke(struct drm_i915_private *i915)
+{
+ revoke_mmaps(i915);
}
-static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
+static int gt_reset(struct drm_i915_private *i915,
+ intel_engine_mask_t stalled_mask)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -665,7 +745,7 @@ static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
return err;
for_each_engine(engine, i915, id)
- intel_engine_reset(engine, stalled_mask & ENGINE_MASK(id));
+ intel_engine_reset(engine, stalled_mask & engine->mask);
i915_gem_restore_fences(i915);
@@ -675,7 +755,7 @@ static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
static void reset_finish_engine(struct intel_engine_cs *engine)
{
engine->reset.finish(engine);
- intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
}
struct i915_gpu_restart {
@@ -722,8 +802,10 @@ static void reset_finish(struct drm_i915_private *i915)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, i915, id) {
reset_finish_engine(engine);
+ intel_engine_signal_breadcrumbs(engine);
+ }
}
static void reset_restart(struct drm_i915_private *i915)
@@ -761,23 +843,19 @@ static void nop_submit_request(struct i915_request *request)
spin_lock_irqsave(&engine->timeline.lock, flags);
__i915_request_submit(request);
i915_request_mark_complete(request);
- intel_engine_write_global_seqno(engine, request->global_seqno);
spin_unlock_irqrestore(&engine->timeline.lock, flags);
intel_engine_queue_breadcrumbs(engine);
}
-void i915_gem_set_wedged(struct drm_i915_private *i915)
+static void __i915_gem_set_wedged(struct drm_i915_private *i915)
{
struct i915_gpu_error *error = &i915->gpu_error;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- mutex_lock(&error->wedge_mutex);
- if (test_bit(I915_WEDGED, &error->flags)) {
- mutex_unlock(&error->wedge_mutex);
+ if (test_bit(I915_WEDGED, &error->flags))
return;
- }
if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) {
struct drm_printer p = drm_debug_printer(__func__);
@@ -793,11 +871,10 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
* rolling the global seqno forward (since this would complete requests
* for which we haven't set the fence error to EIO yet).
*/
- for_each_engine(engine, i915, id)
- reset_prepare_engine(engine);
+ reset_prepare(i915);
/* Even if the GPU reset fails, it should still stop the engines */
- if (INTEL_GEN(i915) >= 5)
+ if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
intel_gpu_reset(i915, ALL_ENGINES);
for_each_engine(engine, i915, id) {
@@ -811,31 +888,35 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
* either this call here to intel_engine_write_global_seqno, or the one
* in nop_submit_request.
*/
- synchronize_rcu();
+ synchronize_rcu_expedited();
/* Mark all executing requests as skipped */
for_each_engine(engine, i915, id)
engine->cancel_requests(engine);
- for_each_engine(engine, i915, id) {
- reset_finish_engine(engine);
- intel_engine_signal_breadcrumbs(engine);
- }
+ reset_finish(i915);
smp_mb__before_atomic();
set_bit(I915_WEDGED, &error->flags);
GEM_TRACE("end\n");
- mutex_unlock(&error->wedge_mutex);
+}
- wake_up_all(&error->reset_queue);
+void i915_gem_set_wedged(struct drm_i915_private *i915)
+{
+ struct i915_gpu_error *error = &i915->gpu_error;
+ intel_wakeref_t wakeref;
+
+ mutex_lock(&error->wedge_mutex);
+ with_intel_runtime_pm(i915, wakeref)
+ __i915_gem_set_wedged(i915);
+ mutex_unlock(&error->wedge_mutex);
}
-bool i915_gem_unset_wedged(struct drm_i915_private *i915)
+static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
{
struct i915_gpu_error *error = &i915->gpu_error;
struct i915_timeline *tl;
- bool ret = false;
if (!test_bit(I915_WEDGED, &error->flags))
return true;
@@ -843,8 +924,6 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
return false;
- mutex_lock(&error->wedge_mutex);
-
GEM_TRACE("start\n");
/*
@@ -860,30 +939,20 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
mutex_lock(&i915->gt.timelines.mutex);
list_for_each_entry(tl, &i915->gt.timelines.active_list, link) {
struct i915_request *rq;
- long timeout;
rq = i915_active_request_get_unlocked(&tl->last_request);
if (!rq)
continue;
/*
- * We can't use our normal waiter as we want to
- * avoid recursively trying to handle the current
- * reset. The basic dma_fence_default_wait() installs
- * a callback for dma_fence_signal(), which is
- * triggered by our nop handler (indirectly, the
- * callback enables the signaler thread which is
- * woken by the nop_submit_request() advancing the seqno
- * and when the seqno passes the fence, the signaler
- * then signals the fence waking us up).
+ * All internal dependencies (i915_requests) will have
+ * been flushed by the set-wedge, but we may be stuck waiting
+ * for external fences. These should all be capped to 10s
+ * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
+ * in the worst case.
*/
- timeout = dma_fence_default_wait(&rq->fence, true,
- MAX_SCHEDULE_TIMEOUT);
+ dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
i915_request_put(rq);
- if (timeout < 0) {
- mutex_unlock(&i915->gt.timelines.mutex);
- goto unlock;
- }
}
mutex_unlock(&i915->gt.timelines.mutex);
@@ -904,57 +973,38 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
clear_bit(I915_WEDGED, &i915->gpu_error.flags);
- ret = true;
-unlock:
- mutex_unlock(&i915->gpu_error.wedge_mutex);
- return ret;
+ return true;
}
-struct __i915_reset {
- struct drm_i915_private *i915;
- unsigned int stalled_mask;
-};
-
-static int __i915_reset__BKL(void *data)
+bool i915_gem_unset_wedged(struct drm_i915_private *i915)
{
- struct __i915_reset *arg = data;
- int err;
+ struct i915_gpu_error *error = &i915->gpu_error;
+ bool result;
- err = intel_gpu_reset(arg->i915, ALL_ENGINES);
- if (err)
- return err;
+ mutex_lock(&error->wedge_mutex);
+ result = __i915_gem_unset_wedged(i915);
+ mutex_unlock(&error->wedge_mutex);
- return gt_reset(arg->i915, arg->stalled_mask);
+ return result;
}
-#if RESET_UNDER_STOP_MACHINE
-/*
- * XXX An alternative to using stop_machine would be to park only the
- * processes that have a GGTT mmap. By remote parking the threads (SIGSTOP)
- * we should be able to prevent their memmory accesses via the lost fence
- * registers over the course of the reset without the potential recursive
- * of mutexes between the pagefault handler and reset.
- *
- * See igt/gem_mmap_gtt/hang
- */
-#define __do_reset(fn, arg) stop_machine(fn, arg, NULL)
-#else
-#define __do_reset(fn, arg) fn(arg)
-#endif
-
-static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
+static int do_reset(struct drm_i915_private *i915,
+ intel_engine_mask_t stalled_mask)
{
- struct __i915_reset arg = { i915, stalled_mask };
int err, i;
- err = __do_reset(__i915_reset__BKL, &arg);
+ gt_revoke(i915);
+
+ err = intel_gpu_reset(i915, ALL_ENGINES);
for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
- msleep(100);
- err = __do_reset(__i915_reset__BKL, &arg);
+ msleep(10 * (i + 1));
+ err = intel_gpu_reset(i915, ALL_ENGINES);
}
+ if (err)
+ return err;
- return err;
+ return gt_reset(i915, stalled_mask);
}
/**
@@ -966,8 +1016,6 @@ static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
* Reset the chip. Useful if a hang is detected. Marks the device as wedged
* on failure.
*
- * Caller must hold the struct_mutex.
- *
* Procedure is fairly simple:
* - reset the chip using the reset reg
* - re-init context state
@@ -977,7 +1025,7 @@ static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
* - re-init display
*/
void i915_reset(struct drm_i915_private *i915,
- unsigned int stalled_mask,
+ intel_engine_mask_t stalled_mask,
const char *reason)
{
struct i915_gpu_error *error = &i915->gpu_error;
@@ -990,7 +1038,7 @@ void i915_reset(struct drm_i915_private *i915,
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
/* Clear any previous failed attempts at recovery. Time to try again. */
- if (!i915_gem_unset_wedged(i915))
+ if (!__i915_gem_unset_wedged(i915))
return;
if (reason)
@@ -1007,11 +1055,17 @@ void i915_reset(struct drm_i915_private *i915,
goto error;
}
+ if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
+ intel_runtime_pm_disable_interrupts(i915);
+
if (do_reset(i915, stalled_mask)) {
dev_err(i915->drm.dev, "Failed to reset chip\n");
goto taint;
}
+ if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
+ intel_runtime_pm_enable_interrupts(i915);
+
intel_overlay_reset(i915);
/*
@@ -1033,7 +1087,7 @@ void i915_reset(struct drm_i915_private *i915,
finish:
reset_finish(i915);
- if (!i915_terminally_wedged(error))
+ if (!__i915_wedged(error))
reset_restart(i915);
return;
@@ -1052,14 +1106,14 @@ taint:
*/
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
error:
- i915_gem_set_wedged(i915);
+ __i915_gem_set_wedged(i915);
goto finish;
}
static inline int intel_gt_reset_engine(struct drm_i915_private *i915,
struct intel_engine_cs *engine)
{
- return intel_gpu_reset(i915, intel_engine_flag(engine));
+ return intel_gpu_reset(i915, engine->mask);
}
/**
@@ -1144,7 +1198,12 @@ static void i915_reset_device(struct drm_i915_private *i915,
i915_wedge_on_timeout(&w, i915, 5 * HZ) {
intel_prepare_reset(i915);
+ /* Flush everyone using a resource about to be clobbered */
+ synchronize_srcu_expedited(&error->reset_backoff_srcu);
+
+ mutex_lock(&error->wedge_mutex);
i915_reset(i915, engine_mask, reason);
+ mutex_unlock(&error->wedge_mutex);
intel_finish_reset(i915);
}
@@ -1153,44 +1212,50 @@ static void i915_reset_device(struct drm_i915_private *i915,
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
}
-void i915_clear_error_registers(struct drm_i915_private *dev_priv)
+static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
{
+ intel_uncore_rmw(uncore, reg, 0, 0);
+}
+
+void i915_clear_error_registers(struct drm_i915_private *i915)
+{
+ struct intel_uncore *uncore = &i915->uncore;
u32 eir;
- if (!IS_GEN(dev_priv, 2))
- I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
+ if (!IS_GEN(i915, 2))
+ clear_register(uncore, PGTBL_ER);
- if (INTEL_GEN(dev_priv) < 4)
- I915_WRITE(IPEIR, I915_READ(IPEIR));
+ if (INTEL_GEN(i915) < 4)
+ clear_register(uncore, IPEIR(RENDER_RING_BASE));
else
- I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
+ clear_register(uncore, IPEIR_I965);
- I915_WRITE(EIR, I915_READ(EIR));
- eir = I915_READ(EIR);
+ clear_register(uncore, EIR);
+ eir = intel_uncore_read(uncore, EIR);
if (eir) {
/*
* some errors might have become stuck,
* mask them.
*/
DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
- I915_WRITE(EMR, I915_READ(EMR) | eir);
- I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
+ rmw_set(uncore, EMR, eir);
+ intel_uncore_write(uncore, GEN2_IIR,
+ I915_MASTER_ERROR_INTERRUPT);
}
- if (INTEL_GEN(dev_priv) >= 8) {
- I915_WRITE(GEN8_RING_FAULT_REG,
- I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID);
- POSTING_READ(GEN8_RING_FAULT_REG);
- } else if (INTEL_GEN(dev_priv) >= 6) {
+ if (INTEL_GEN(i915) >= 8) {
+ rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
+ intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
+ } else if (INTEL_GEN(i915) >= 6) {
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, dev_priv, id) {
- I915_WRITE(RING_FAULT_REG(engine),
- I915_READ(RING_FAULT_REG(engine)) &
- ~RING_FAULT_VALID);
+ for_each_engine(engine, i915, id) {
+ rmw_clear(uncore,
+ RING_FAULT_REG(engine), RING_FAULT_VALID);
+ intel_uncore_posting_read(uncore,
+ RING_FAULT_REG(engine));
}
- POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
}
}
@@ -1208,13 +1273,14 @@ void i915_clear_error_registers(struct drm_i915_private *dev_priv)
* of a ring dump etc.).
*/
void i915_handle_error(struct drm_i915_private *i915,
- u32 engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned long flags,
const char *fmt, ...)
{
+ struct i915_gpu_error *error = &i915->gpu_error;
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
char error_msg[80];
char *msg = NULL;
@@ -1237,7 +1303,7 @@ void i915_handle_error(struct drm_i915_private *i915,
*/
wakeref = intel_runtime_pm_get(i915);
- engine_mask &= INTEL_INFO(i915)->ring_mask;
+ engine_mask &= INTEL_INFO(i915)->engine_mask;
if (flags & I915_ERROR_CAPTURE) {
i915_capture_error_state(i915, engine_mask, msg);
@@ -1248,20 +1314,19 @@ void i915_handle_error(struct drm_i915_private *i915,
* Try engine reset when available. We fall back to full reset if
* single reset fails.
*/
- if (intel_has_reset_engine(i915) &&
- !i915_terminally_wedged(&i915->gpu_error)) {
+ if (intel_has_reset_engine(i915) && !__i915_wedged(error)) {
for_each_engine_masked(engine, i915, engine_mask, tmp) {
BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &i915->gpu_error.flags))
+ &error->flags))
continue;
if (i915_reset_engine(engine, msg) == 0)
- engine_mask &= ~intel_engine_flag(engine);
+ engine_mask &= ~engine->mask;
clear_bit(I915_RESET_ENGINE + engine->id,
- &i915->gpu_error.flags);
- wake_up_bit(&i915->gpu_error.flags,
+ &error->flags);
+ wake_up_bit(&error->flags,
I915_RESET_ENGINE + engine->id);
}
}
@@ -1270,18 +1335,20 @@ void i915_handle_error(struct drm_i915_private *i915,
goto out;
/* Full reset needs the mutex, stop any other user trying to do so. */
- if (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags)) {
- wait_event(i915->gpu_error.reset_queue,
- !test_bit(I915_RESET_BACKOFF,
- &i915->gpu_error.flags));
- goto out;
+ if (test_and_set_bit(I915_RESET_BACKOFF, &error->flags)) {
+ wait_event(error->reset_queue,
+ !test_bit(I915_RESET_BACKOFF, &error->flags));
+ goto out; /* piggy-back on the other reset */
}
+ /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
+ synchronize_rcu_expedited();
+
/* Prevent any other reset-engine attempt. */
for_each_engine(engine, i915, tmp) {
while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &i915->gpu_error.flags))
- wait_on_bit(&i915->gpu_error.flags,
+ &error->flags))
+ wait_on_bit(&error->flags,
I915_RESET_ENGINE + engine->id,
TASK_UNINTERRUPTIBLE);
}
@@ -1290,16 +1357,74 @@ void i915_handle_error(struct drm_i915_private *i915,
for_each_engine(engine, i915, tmp) {
clear_bit(I915_RESET_ENGINE + engine->id,
- &i915->gpu_error.flags);
+ &error->flags);
}
- clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
- wake_up_all(&i915->gpu_error.reset_queue);
+ clear_bit(I915_RESET_BACKOFF, &error->flags);
+ wake_up_all(&error->reset_queue);
out:
intel_runtime_pm_put(i915, wakeref);
}
+int i915_reset_trylock(struct drm_i915_private *i915)
+{
+ struct i915_gpu_error *error = &i915->gpu_error;
+ int srcu;
+
+ might_lock(&error->reset_backoff_srcu);
+ might_sleep();
+
+ rcu_read_lock();
+ while (test_bit(I915_RESET_BACKOFF, &error->flags)) {
+ rcu_read_unlock();
+
+ if (wait_event_interruptible(error->reset_queue,
+ !test_bit(I915_RESET_BACKOFF,
+ &error->flags)))
+ return -EINTR;
+
+ rcu_read_lock();
+ }
+ srcu = srcu_read_lock(&error->reset_backoff_srcu);
+ rcu_read_unlock();
+
+ return srcu;
+}
+
+void i915_reset_unlock(struct drm_i915_private *i915, int tag)
+__releases(&i915->gpu_error.reset_backoff_srcu)
+{
+ struct i915_gpu_error *error = &i915->gpu_error;
+
+ srcu_read_unlock(&error->reset_backoff_srcu, tag);
+}
+
+int i915_terminally_wedged(struct drm_i915_private *i915)
+{
+ struct i915_gpu_error *error = &i915->gpu_error;
+
+ might_sleep();
+
+ if (!__i915_wedged(error))
+ return 0;
+
+ /* Reset still in progress? Maybe we will recover? */
+ if (!test_bit(I915_RESET_BACKOFF, &error->flags))
+ return -EIO;
+
+ /* XXX intel_reset_finish() still takes struct_mutex!!! */
+ if (mutex_is_locked(&i915->drm.struct_mutex))
+ return -EAGAIN;
+
+ if (wait_event_interruptible(error->reset_queue,
+ !test_bit(I915_RESET_BACKOFF,
+ &error->flags)))
+ return -EINTR;
+
+ return __i915_wedged(error) ? -EIO : 0;
+}
+
bool i915_reset_flush(struct drm_i915_private *i915)
{
int err;
diff --git a/drivers/gpu/drm/i915/i915_reset.h b/drivers/gpu/drm/i915/i915_reset.h
index f2d347f319df..3c0450289b8f 100644
--- a/drivers/gpu/drm/i915/i915_reset.h
+++ b/drivers/gpu/drm/i915/i915_reset.h
@@ -9,14 +9,18 @@
#include <linux/compiler.h>
#include <linux/types.h>
+#include <linux/srcu.h>
+
+#include "intel_engine_types.h"
struct drm_i915_private;
+struct i915_request;
struct intel_engine_cs;
struct intel_guc;
__printf(4, 5)
void i915_handle_error(struct drm_i915_private *i915,
- u32 engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned long flags,
const char *fmt, ...);
#define I915_ERROR_CAPTURE BIT(0)
@@ -24,7 +28,7 @@ void i915_handle_error(struct drm_i915_private *i915,
void i915_clear_error_registers(struct drm_i915_private *i915);
void i915_reset(struct drm_i915_private *i915,
- unsigned int stalled_mask,
+ intel_engine_mask_t stalled_mask,
const char *reason);
int i915_reset_engine(struct intel_engine_cs *engine,
const char *reason);
@@ -32,10 +36,16 @@ int i915_reset_engine(struct intel_engine_cs *engine,
void i915_reset_request(struct i915_request *rq, bool guilty);
bool i915_reset_flush(struct drm_i915_private *i915);
+int __must_check i915_reset_trylock(struct drm_i915_private *i915);
+void i915_reset_unlock(struct drm_i915_private *i915, int tag);
+
+int i915_terminally_wedged(struct drm_i915_private *i915);
+
bool intel_has_gpu_reset(struct drm_i915_private *i915);
bool intel_has_reset_engine(struct drm_i915_private *i915);
-int intel_gpu_reset(struct drm_i915_private *i915, u32 engine_mask);
+int intel_gpu_reset(struct drm_i915_private *i915,
+ intel_engine_mask_t engine_mask);
int intel_reset_guc(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 8bc042551692..39bc4f54e272 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -7,9 +7,16 @@
#include <linux/mutex.h>
#include "i915_drv.h"
+#include "i915_globals.h"
#include "i915_request.h"
#include "i915_scheduler.h"
+static struct i915_global_scheduler {
+ struct i915_global base;
+ struct kmem_cache *slab_dependencies;
+ struct kmem_cache *slab_priorities;
+} global;
+
static DEFINE_SPINLOCK(schedule_lock);
static const struct i915_request *
@@ -18,6 +25,11 @@ node_to_request(const struct i915_sched_node *node)
return container_of(node, const struct i915_request, sched);
}
+static inline bool node_started(const struct i915_sched_node *node)
+{
+ return i915_request_started(node_to_request(node));
+}
+
static inline bool node_signaled(const struct i915_sched_node *node)
{
return i915_request_completed(node_to_request(node));
@@ -29,19 +41,20 @@ void i915_sched_node_init(struct i915_sched_node *node)
INIT_LIST_HEAD(&node->waiters_list);
INIT_LIST_HEAD(&node->link);
node->attr.priority = I915_PRIORITY_INVALID;
+ node->semaphores = 0;
+ node->flags = 0;
}
static struct i915_dependency *
-i915_dependency_alloc(struct drm_i915_private *i915)
+i915_dependency_alloc(void)
{
- return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
+ return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
}
static void
-i915_dependency_free(struct drm_i915_private *i915,
- struct i915_dependency *dep)
+i915_dependency_free(struct i915_dependency *dep)
{
- kmem_cache_free(i915->dependencies, dep);
+ kmem_cache_free(global.slab_dependencies, dep);
}
bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
@@ -51,7 +64,7 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
{
bool ret = false;
- spin_lock(&schedule_lock);
+ spin_lock_irq(&schedule_lock);
if (!node_signaled(signal)) {
INIT_LIST_HEAD(&dep->dfs_link);
@@ -60,39 +73,42 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
dep->signaler = signal;
dep->flags = flags;
+ /* Keep track of whether anyone on this chain has a semaphore */
+ if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN &&
+ !node_started(signal))
+ node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
+
ret = true;
}
- spin_unlock(&schedule_lock);
+ spin_unlock_irq(&schedule_lock);
return ret;
}
-int i915_sched_node_add_dependency(struct drm_i915_private *i915,
- struct i915_sched_node *node,
+int i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal)
{
struct i915_dependency *dep;
- dep = i915_dependency_alloc(i915);
+ dep = i915_dependency_alloc();
if (!dep)
return -ENOMEM;
if (!__i915_sched_node_add_dependency(node, signal, dep,
I915_DEPENDENCY_ALLOC))
- i915_dependency_free(i915, dep);
+ i915_dependency_free(dep);
return 0;
}
-void i915_sched_node_fini(struct drm_i915_private *i915,
- struct i915_sched_node *node)
+void i915_sched_node_fini(struct i915_sched_node *node)
{
struct i915_dependency *dep, *tmp;
GEM_BUG_ON(!list_empty(&node->link));
- spin_lock(&schedule_lock);
+ spin_lock_irq(&schedule_lock);
/*
* Everyone we depended upon (the fences we wait to be signaled)
@@ -106,7 +122,7 @@ void i915_sched_node_fini(struct drm_i915_private *i915,
list_del(&dep->wait_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
- i915_dependency_free(i915, dep);
+ i915_dependency_free(dep);
}
/* Remove ourselves from everyone who depends upon us */
@@ -116,10 +132,10 @@ void i915_sched_node_fini(struct drm_i915_private *i915,
list_del(&dep->signal_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
- i915_dependency_free(i915, dep);
+ i915_dependency_free(dep);
}
- spin_unlock(&schedule_lock);
+ spin_unlock_irq(&schedule_lock);
}
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
@@ -193,7 +209,7 @@ find_priolist:
if (prio == I915_PRIORITY_NORMAL) {
p = &execlists->default_priolist;
} else {
- p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
+ p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
/* Convert an allocation failure to a priority bump */
if (unlikely(!p)) {
prio = I915_PRIORITY_NORMAL; /* recurses just once */
@@ -301,6 +317,10 @@ static void __i915_schedule(struct i915_request *rq,
list_for_each_entry(dep, &dfs, dfs_link) {
struct i915_sched_node *node = dep->signaler;
+ /* If we are already flying, we know we have no signalers */
+ if (node_started(node))
+ continue;
+
/*
* Within an engine, there can be no cycle, but we may
* refer to the same dependency chain multiple times
@@ -313,7 +333,6 @@ static void __i915_schedule(struct i915_request *rq,
if (node_signaled(p->signaler))
continue;
- GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
if (prio > READ_ONCE(p->signaler->attr.priority))
list_move_tail(&p->dfs_link, &dfs);
}
@@ -337,7 +356,7 @@ static void __i915_schedule(struct i915_request *rq,
memset(&cache, 0, sizeof(cache));
engine = rq->engine;
- spin_lock_irq(&engine->timeline.lock);
+ spin_lock(&engine->timeline.lock);
/* Fifo and depth-first replacement ensure our deps execute before us */
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
@@ -388,30 +407,73 @@ static void __i915_schedule(struct i915_request *rq,
tasklet_hi_schedule(&engine->execlists.tasklet);
}
- spin_unlock_irq(&engine->timeline.lock);
+ spin_unlock(&engine->timeline.lock);
}
void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
{
- spin_lock(&schedule_lock);
+ spin_lock_irq(&schedule_lock);
__i915_schedule(rq, attr);
- spin_unlock(&schedule_lock);
+ spin_unlock_irq(&schedule_lock);
}
void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
{
struct i915_sched_attr attr;
+ unsigned long flags;
GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
return;
- spin_lock_bh(&schedule_lock);
+ spin_lock_irqsave(&schedule_lock, flags);
attr = rq->sched.attr;
attr.priority |= bump;
__i915_schedule(rq, &attr);
- spin_unlock_bh(&schedule_lock);
+ spin_unlock_irqrestore(&schedule_lock, flags);
+}
+
+void __i915_priolist_free(struct i915_priolist *p)
+{
+ kmem_cache_free(global.slab_priorities, p);
+}
+
+static void i915_global_scheduler_shrink(void)
+{
+ kmem_cache_shrink(global.slab_dependencies);
+ kmem_cache_shrink(global.slab_priorities);
+}
+
+static void i915_global_scheduler_exit(void)
+{
+ kmem_cache_destroy(global.slab_dependencies);
+ kmem_cache_destroy(global.slab_priorities);
+}
+
+static struct i915_global_scheduler global = { {
+ .shrink = i915_global_scheduler_shrink,
+ .exit = i915_global_scheduler_exit,
+} };
+
+int __init i915_global_scheduler_init(void)
+{
+ global.slab_dependencies = KMEM_CACHE(i915_dependency,
+ SLAB_HWCACHE_ALIGN);
+ if (!global.slab_dependencies)
+ return -ENOMEM;
+
+ global.slab_priorities = KMEM_CACHE(i915_priolist,
+ SLAB_HWCACHE_ALIGN);
+ if (!global.slab_priorities)
+ goto err_priorities;
+
+ i915_global_register(&global.base);
+ return 0;
+
+err_priorities:
+ kmem_cache_destroy(global.slab_priorities);
+ return -ENOMEM;
}
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index dbe9cb7ecd82..07d243acf553 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -8,80 +8,22 @@
#define _I915_SCHEDULER_H_
#include <linux/bitops.h>
+#include <linux/list.h>
#include <linux/kernel.h>
-#include <uapi/drm/i915_drm.h>
+#include "i915_scheduler_types.h"
-struct drm_i915_private;
-struct i915_request;
-struct intel_engine_cs;
+#define priolist_for_each_request(it, plist, idx) \
+ for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
+ list_for_each_entry(it, &(plist)->requests[idx], sched.link)
-enum {
- I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
- I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
- I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
-
- I915_PRIORITY_INVALID = INT_MIN
-};
-
-#define I915_USER_PRIORITY_SHIFT 2
-#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
-
-#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
-#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
-
-#define I915_PRIORITY_WAIT ((u8)BIT(0))
-#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1))
-
-struct i915_sched_attr {
- /**
- * @priority: execution and service priority
- *
- * All clients are equal, but some are more equal than others!
- *
- * Requests from a context with a greater (more positive) value of
- * @priority will be executed before those with a lower @priority
- * value, forming a simple QoS.
- *
- * The &drm_i915_private.kernel_context is assigned the lowest priority.
- */
- int priority;
-};
-
-/*
- * "People assume that time is a strict progression of cause to effect, but
- * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
- * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
- *
- * Requests exist in a complex web of interdependencies. Each request
- * has to wait for some other request to complete before it is ready to be run
- * (e.g. we have to wait until the pixels have been rendering into a texture
- * before we can copy from it). We track the readiness of a request in terms
- * of fences, but we also need to keep the dependency tree for the lifetime
- * of the request (beyond the life of an individual fence). We use the tree
- * at various points to reorder the requests whilst keeping the requests
- * in order with respect to their various dependencies.
- *
- * There is no active component to the "scheduler". As we know the dependency
- * DAG of each request, we are able to insert it into a sorted queue when it
- * is ready, and are able to reorder its portion of the graph to accommodate
- * dynamic priority changes.
- */
-struct i915_sched_node {
- struct list_head signalers_list; /* those before us, we depend upon */
- struct list_head waiters_list; /* those after us, they depend upon us */
- struct list_head link;
- struct i915_sched_attr attr;
-};
-
-struct i915_dependency {
- struct i915_sched_node *signaler;
- struct list_head signal_link;
- struct list_head wait_link;
- struct list_head dfs_link;
- unsigned long flags;
-#define I915_DEPENDENCY_ALLOC BIT(0)
-};
+#define priolist_for_each_request_consume(it, n, plist, idx) \
+ for (; \
+ (plist)->used ? (idx = __ffs((plist)->used)), 1 : 0; \
+ (plist)->used &= ~BIT(idx)) \
+ list_for_each_entry_safe(it, n, \
+ &(plist)->requests[idx], \
+ sched.link)
void i915_sched_node_init(struct i915_sched_node *node);
@@ -90,12 +32,10 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_dependency *dep,
unsigned long flags);
-int i915_sched_node_add_dependency(struct drm_i915_private *i915,
- struct i915_sched_node *node,
+int i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal);
-void i915_sched_node_fini(struct drm_i915_private *i915,
- struct i915_sched_node *node);
+void i915_sched_node_fini(struct i915_sched_node *node);
void i915_schedule(struct i915_request *request,
const struct i915_sched_attr *attr);
@@ -105,4 +45,11 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump);
struct list_head *
i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
+void __i915_priolist_free(struct i915_priolist *p);
+static inline void i915_priolist_free(struct i915_priolist *p)
+{
+ if (p->priority != I915_PRIORITY_NORMAL)
+ __i915_priolist_free(p);
+}
+
#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
new file mode 100644
index 000000000000..f1af3916a808
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -0,0 +1,72 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef _I915_SCHEDULER_TYPES_H_
+#define _I915_SCHEDULER_TYPES_H_
+
+#include <linux/list.h>
+
+#include "i915_priolist_types.h"
+#include "intel_engine_types.h"
+
+struct drm_i915_private;
+struct i915_request;
+struct intel_engine_cs;
+
+struct i915_sched_attr {
+ /**
+ * @priority: execution and service priority
+ *
+ * All clients are equal, but some are more equal than others!
+ *
+ * Requests from a context with a greater (more positive) value of
+ * @priority will be executed before those with a lower @priority
+ * value, forming a simple QoS.
+ *
+ * The &drm_i915_private.kernel_context is assigned the lowest priority.
+ */
+ int priority;
+};
+
+/*
+ * "People assume that time is a strict progression of cause to effect, but
+ * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
+ * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
+ *
+ * Requests exist in a complex web of interdependencies. Each request
+ * has to wait for some other request to complete before it is ready to be run
+ * (e.g. we have to wait until the pixels have been rendering into a texture
+ * before we can copy from it). We track the readiness of a request in terms
+ * of fences, but we also need to keep the dependency tree for the lifetime
+ * of the request (beyond the life of an individual fence). We use the tree
+ * at various points to reorder the requests whilst keeping the requests
+ * in order with respect to their various dependencies.
+ *
+ * There is no active component to the "scheduler". As we know the dependency
+ * DAG of each request, we are able to insert it into a sorted queue when it
+ * is ready, and are able to reorder its portion of the graph to accommodate
+ * dynamic priority changes.
+ */
+struct i915_sched_node {
+ struct list_head signalers_list; /* those before us, we depend upon */
+ struct list_head waiters_list; /* those after us, they depend upon us */
+ struct list_head link;
+ struct i915_sched_attr attr;
+ unsigned int flags;
+#define I915_SCHED_HAS_SEMAPHORE_CHAIN BIT(0)
+ intel_engine_mask_t semaphores;
+};
+
+struct i915_dependency {
+ struct i915_sched_node *signaler;
+ struct list_head signal_link;
+ struct list_head wait_link;
+ struct list_head dfs_link;
+ unsigned long flags;
+#define I915_DEPENDENCY_ALLOC BIT(0)
+};
+
+#endif /* _I915_SCHEDULER_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index d2f2a9c2fabd..95f3dab1b229 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -25,8 +25,10 @@
*/
#include <drm/i915_drm.h>
-#include "intel_drv.h"
+
#include "i915_reg.h"
+#include "intel_drv.h"
+#include "intel_fbc.h"
static void i915_save_display(struct drm_i915_private *dev_priv)
{
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 7c58b049ecb5..5387aafd3424 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -192,7 +192,7 @@ static void __i915_sw_fence_complete(struct i915_sw_fence *fence,
__i915_sw_fence_notify(fence, FENCE_FREE);
}
-static void i915_sw_fence_complete(struct i915_sw_fence *fence)
+void i915_sw_fence_complete(struct i915_sw_fence *fence)
{
debug_fence_assert(fence);
@@ -202,7 +202,7 @@ static void i915_sw_fence_complete(struct i915_sw_fence *fence)
__i915_sw_fence_complete(fence, NULL);
}
-static void i915_sw_fence_await(struct i915_sw_fence *fence)
+void i915_sw_fence_await(struct i915_sw_fence *fence)
{
debug_fence_assert(fence);
WARN_ON(atomic_inc_return(&fence->pending) <= 1);
@@ -359,11 +359,6 @@ int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp);
}
-struct i915_sw_dma_fence_cb {
- struct dma_fence_cb base;
- struct i915_sw_fence *fence;
-};
-
struct i915_sw_dma_fence_cb_timer {
struct i915_sw_dma_fence_cb base;
struct dma_fence *dma;
@@ -480,6 +475,40 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
return ret;
}
+static void __dma_i915_sw_fence_wake(struct dma_fence *dma,
+ struct dma_fence_cb *data)
+{
+ struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+
+ i915_sw_fence_complete(cb->fence);
+}
+
+int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
+ struct dma_fence *dma,
+ struct i915_sw_dma_fence_cb *cb)
+{
+ int ret;
+
+ debug_fence_assert(fence);
+
+ if (dma_fence_is_signaled(dma))
+ return 0;
+
+ cb->fence = fence;
+ i915_sw_fence_await(fence);
+
+ ret = dma_fence_add_callback(dma, &cb->base, __dma_i915_sw_fence_wake);
+ if (ret == 0) {
+ ret = 1;
+ } else {
+ i915_sw_fence_complete(fence);
+ if (ret == -ENOENT) /* fence already signaled */
+ ret = 0;
+ }
+
+ return ret;
+}
+
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct reservation_object *resv,
const struct dma_fence_ops *exclude,
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 0e055ea0179f..9cb5c3b307a6 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -9,14 +9,13 @@
#ifndef _I915_SW_FENCE_H_
#define _I915_SW_FENCE_H_
+#include <linux/dma-fence.h>
#include <linux/gfp.h>
#include <linux/kref.h>
#include <linux/notifier.h> /* for NOTIFY_DONE */
#include <linux/wait.h>
struct completion;
-struct dma_fence;
-struct dma_fence_ops;
struct reservation_object;
struct i915_sw_fence {
@@ -68,10 +67,20 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
struct i915_sw_fence *after,
gfp_t gfp);
+
+struct i915_sw_dma_fence_cb {
+ struct dma_fence_cb base;
+ struct i915_sw_fence *fence;
+};
+
+int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
+ struct dma_fence *dma,
+ struct i915_sw_dma_fence_cb *cb);
int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
struct dma_fence *dma,
unsigned long timeout,
gfp_t gfp);
+
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct reservation_object *resv,
const struct dma_fence_ops *exclude,
@@ -79,6 +88,9 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
unsigned long timeout,
gfp_t gfp);
+void i915_sw_fence_await(struct i915_sw_fence *fence);
+void i915_sw_fence_complete(struct i915_sw_fence *fence);
+
static inline bool i915_sw_fence_signaled(const struct i915_sw_fence *fence)
{
return atomic_read(&fence->pending) <= 0;
diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c
index b2202d2e58a2..5fbea0892f33 100644
--- a/drivers/gpu/drm/i915/i915_timeline.c
+++ b/drivers/gpu/drm/i915/i915_timeline.c
@@ -6,19 +6,32 @@
#include "i915_drv.h"
-#include "i915_timeline.h"
+#include "i915_active.h"
#include "i915_syncmap.h"
+#include "i915_timeline.h"
+
+#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
+#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
struct i915_timeline_hwsp {
- struct i915_vma *vma;
+ struct i915_gt_timelines *gt;
struct list_head free_link;
+ struct i915_vma *vma;
u64 free_bitmap;
};
-static inline struct i915_timeline_hwsp *
-i915_timeline_hwsp(const struct i915_timeline *tl)
+struct i915_timeline_cacheline {
+ struct i915_active active;
+ struct i915_timeline_hwsp *hwsp;
+ void *vaddr;
+#define CACHELINE_BITS 6
+#define CACHELINE_FREE CACHELINE_BITS
+};
+
+static inline struct drm_i915_private *
+hwsp_to_i915(struct i915_timeline_hwsp *hwsp)
{
- return tl->hwsp_ggtt->private;
+ return container_of(hwsp->gt, struct drm_i915_private, gt.timelines);
}
static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915)
@@ -71,6 +84,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
vma->private = hwsp;
hwsp->vma = vma;
hwsp->free_bitmap = ~0ull;
+ hwsp->gt = gt;
spin_lock(&gt->hwsp_lock);
list_add(&hwsp->free_link, &gt->hwsp_free_list);
@@ -88,14 +102,9 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
return hwsp->vma;
}
-static void hwsp_free(struct i915_timeline *timeline)
+static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline)
{
- struct i915_gt_timelines *gt = &timeline->i915->gt.timelines;
- struct i915_timeline_hwsp *hwsp;
-
- hwsp = i915_timeline_hwsp(timeline);
- if (!hwsp) /* leave global HWSP alone! */
- return;
+ struct i915_gt_timelines *gt = hwsp->gt;
spin_lock(&gt->hwsp_lock);
@@ -103,7 +112,8 @@ static void hwsp_free(struct i915_timeline *timeline)
if (!hwsp->free_bitmap)
list_add_tail(&hwsp->free_link, &gt->hwsp_free_list);
- hwsp->free_bitmap |= BIT_ULL(timeline->hwsp_offset / CACHELINE_BYTES);
+ GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap));
+ hwsp->free_bitmap |= BIT_ULL(cacheline);
/* And if no one is left using it, give the page back to the system */
if (hwsp->free_bitmap == ~0ull) {
@@ -115,9 +125,78 @@ static void hwsp_free(struct i915_timeline *timeline)
spin_unlock(&gt->hwsp_lock);
}
+static void __idle_cacheline_free(struct i915_timeline_cacheline *cl)
+{
+ GEM_BUG_ON(!i915_active_is_idle(&cl->active));
+
+ i915_gem_object_unpin_map(cl->hwsp->vma->obj);
+ i915_vma_put(cl->hwsp->vma);
+ __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
+
+ i915_active_fini(&cl->active);
+ kfree(cl);
+}
+
+static void __cacheline_retire(struct i915_active *active)
+{
+ struct i915_timeline_cacheline *cl =
+ container_of(active, typeof(*cl), active);
+
+ i915_vma_unpin(cl->hwsp->vma);
+ if (ptr_test_bit(cl->vaddr, CACHELINE_FREE))
+ __idle_cacheline_free(cl);
+}
+
+static struct i915_timeline_cacheline *
+cacheline_alloc(struct i915_timeline_hwsp *hwsp, unsigned int cacheline)
+{
+ struct i915_timeline_cacheline *cl;
+ void *vaddr;
+
+ GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS));
+
+ cl = kmalloc(sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ return ERR_PTR(-ENOMEM);
+
+ vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ kfree(cl);
+ return ERR_CAST(vaddr);
+ }
+
+ i915_vma_get(hwsp->vma);
+ cl->hwsp = hwsp;
+ cl->vaddr = page_pack_bits(vaddr, cacheline);
+
+ i915_active_init(hwsp_to_i915(hwsp), &cl->active, __cacheline_retire);
+
+ return cl;
+}
+
+static void cacheline_acquire(struct i915_timeline_cacheline *cl)
+{
+ if (cl && i915_active_acquire(&cl->active))
+ __i915_vma_pin(cl->hwsp->vma);
+}
+
+static void cacheline_release(struct i915_timeline_cacheline *cl)
+{
+ if (cl)
+ i915_active_release(&cl->active);
+}
+
+static void cacheline_free(struct i915_timeline_cacheline *cl)
+{
+ GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
+ cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
+
+ if (i915_active_is_idle(&cl->active))
+ __idle_cacheline_free(cl);
+}
+
int i915_timeline_init(struct drm_i915_private *i915,
struct i915_timeline *timeline,
- const char *name,
struct i915_vma *hwsp)
{
void *vaddr;
@@ -133,37 +212,47 @@ int i915_timeline_init(struct drm_i915_private *i915,
BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
timeline->i915 = i915;
- timeline->name = name;
timeline->pin_count = 0;
timeline->has_initial_breadcrumb = !hwsp;
+ timeline->hwsp_cacheline = NULL;
- timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR;
if (!hwsp) {
+ struct i915_timeline_cacheline *cl;
unsigned int cacheline;
hwsp = hwsp_alloc(timeline, &cacheline);
if (IS_ERR(hwsp))
return PTR_ERR(hwsp);
+ cl = cacheline_alloc(hwsp->private, cacheline);
+ if (IS_ERR(cl)) {
+ __idle_hwsp_free(hwsp->private, cacheline);
+ return PTR_ERR(cl);
+ }
+
+ timeline->hwsp_cacheline = cl;
timeline->hwsp_offset = cacheline * CACHELINE_BYTES;
- }
- timeline->hwsp_ggtt = i915_vma_get(hwsp);
- vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- hwsp_free(timeline);
- i915_vma_put(hwsp);
- return PTR_ERR(vaddr);
+ vaddr = page_mask_bits(cl->vaddr);
+ } else {
+ timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR;
+
+ vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
}
timeline->hwsp_seqno =
memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES);
+ timeline->hwsp_ggtt = i915_vma_get(hwsp);
+ GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size);
+
timeline->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&timeline->lock);
+ mutex_init(&timeline->mutex);
- INIT_ACTIVE_REQUEST(&timeline->barrier);
INIT_ACTIVE_REQUEST(&timeline->last_request);
INIT_LIST_HEAD(&timeline->requests);
@@ -236,18 +325,19 @@ void i915_timeline_fini(struct i915_timeline *timeline)
{
GEM_BUG_ON(timeline->pin_count);
GEM_BUG_ON(!list_empty(&timeline->requests));
- GEM_BUG_ON(i915_active_request_isset(&timeline->barrier));
i915_syncmap_free(&timeline->sync);
- hwsp_free(timeline);
- i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
+ if (timeline->hwsp_cacheline)
+ cacheline_free(timeline->hwsp_cacheline);
+ else
+ i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
+
i915_vma_put(timeline->hwsp_ggtt);
}
struct i915_timeline *
i915_timeline_create(struct drm_i915_private *i915,
- const char *name,
struct i915_vma *global_hwsp)
{
struct i915_timeline *timeline;
@@ -257,7 +347,7 @@ i915_timeline_create(struct drm_i915_private *i915,
if (!timeline)
return ERR_PTR(-ENOMEM);
- err = i915_timeline_init(i915, timeline, name, global_hwsp);
+ err = i915_timeline_init(i915, timeline, global_hwsp);
if (err) {
kfree(timeline);
return ERR_PTR(err);
@@ -284,6 +374,7 @@ int i915_timeline_pin(struct i915_timeline *tl)
i915_ggtt_offset(tl->hwsp_ggtt) +
offset_in_page(tl->hwsp_offset);
+ cacheline_acquire(tl->hwsp_cacheline);
timeline_add_to_active(tl);
return 0;
@@ -293,6 +384,157 @@ unpin:
return err;
}
+static u32 timeline_advance(struct i915_timeline *tl)
+{
+ GEM_BUG_ON(!tl->pin_count);
+ GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
+
+ return tl->seqno += 1 + tl->has_initial_breadcrumb;
+}
+
+static void timeline_rollback(struct i915_timeline *tl)
+{
+ tl->seqno -= 1 + tl->has_initial_breadcrumb;
+}
+
+static noinline int
+__i915_timeline_get_seqno(struct i915_timeline *tl,
+ struct i915_request *rq,
+ u32 *seqno)
+{
+ struct i915_timeline_cacheline *cl;
+ unsigned int cacheline;
+ struct i915_vma *vma;
+ void *vaddr;
+ int err;
+
+ /*
+ * If there is an outstanding GPU reference to this cacheline,
+ * such as it being sampled by a HW semaphore on another timeline,
+ * we cannot wraparound our seqno value (the HW semaphore does
+ * a strict greater-than-or-equals compare, not i915_seqno_passed).
+ * So if the cacheline is still busy, we must detach ourselves
+ * from it and leave it inflight alongside its users.
+ *
+ * However, if nobody is watching and we can guarantee that nobody
+ * will, we could simply reuse the same cacheline.
+ *
+ * if (i915_active_request_is_signaled(&tl->last_request) &&
+ * i915_active_is_signaled(&tl->hwsp_cacheline->active))
+ * return 0;
+ *
+ * That seems unlikely for a busy timeline that needed to wrap in
+ * the first place, so just replace the cacheline.
+ */
+
+ vma = hwsp_alloc(tl, &cacheline);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_rollback;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (err) {
+ __idle_hwsp_free(vma->private, cacheline);
+ goto err_rollback;
+ }
+
+ cl = cacheline_alloc(vma->private, cacheline);
+ if (IS_ERR(cl)) {
+ err = PTR_ERR(cl);
+ __idle_hwsp_free(vma->private, cacheline);
+ goto err_unpin;
+ }
+ GEM_BUG_ON(cl->hwsp->vma != vma);
+
+ /*
+ * Attach the old cacheline to the current request, so that we only
+ * free it after the current request is retired, which ensures that
+ * all writes into the cacheline from previous requests are complete.
+ */
+ err = i915_active_ref(&tl->hwsp_cacheline->active,
+ tl->fence_context, rq);
+ if (err)
+ goto err_cacheline;
+
+ cacheline_release(tl->hwsp_cacheline); /* ownership now xfered to rq */
+ cacheline_free(tl->hwsp_cacheline);
+
+ i915_vma_unpin(tl->hwsp_ggtt); /* binding kept alive by old cacheline */
+ i915_vma_put(tl->hwsp_ggtt);
+
+ tl->hwsp_ggtt = i915_vma_get(vma);
+
+ vaddr = page_mask_bits(cl->vaddr);
+ tl->hwsp_offset = cacheline * CACHELINE_BYTES;
+ tl->hwsp_seqno =
+ memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES);
+
+ tl->hwsp_offset += i915_ggtt_offset(vma);
+
+ cacheline_acquire(cl);
+ tl->hwsp_cacheline = cl;
+
+ *seqno = timeline_advance(tl);
+ GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno));
+ return 0;
+
+err_cacheline:
+ cacheline_free(cl);
+err_unpin:
+ i915_vma_unpin(vma);
+err_rollback:
+ timeline_rollback(tl);
+ return err;
+}
+
+int i915_timeline_get_seqno(struct i915_timeline *tl,
+ struct i915_request *rq,
+ u32 *seqno)
+{
+ *seqno = timeline_advance(tl);
+
+ /* Replace the HWSP on wraparound for HW semaphores */
+ if (unlikely(!*seqno && tl->hwsp_cacheline))
+ return __i915_timeline_get_seqno(tl, rq, seqno);
+
+ return 0;
+}
+
+static int cacheline_ref(struct i915_timeline_cacheline *cl,
+ struct i915_request *rq)
+{
+ return i915_active_ref(&cl->active, rq->fence.context, rq);
+}
+
+int i915_timeline_read_hwsp(struct i915_request *from,
+ struct i915_request *to,
+ u32 *hwsp)
+{
+ struct i915_timeline_cacheline *cl = from->hwsp_cacheline;
+ struct i915_timeline *tl = from->timeline;
+ int err;
+
+ GEM_BUG_ON(to->timeline == tl);
+
+ mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
+ err = i915_request_completed(from);
+ if (!err)
+ err = cacheline_ref(cl, to);
+ if (!err) {
+ if (likely(cl == tl->hwsp_cacheline)) {
+ *hwsp = tl->hwsp_offset;
+ } else { /* across a seqno wrap, recover the original offset */
+ *hwsp = i915_ggtt_offset(cl->hwsp->vma) +
+ ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) *
+ CACHELINE_BYTES;
+ }
+ }
+ mutex_unlock(&tl->mutex);
+
+ return err;
+}
+
void i915_timeline_unpin(struct i915_timeline *tl)
{
GEM_BUG_ON(!tl->pin_count);
@@ -300,6 +542,7 @@ void i915_timeline_unpin(struct i915_timeline *tl)
return;
timeline_remove_from_active(tl);
+ cacheline_release(tl->hwsp_cacheline);
/*
* Since this timeline is idle, all bariers upon which we were waiting
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
index 7bec7d2e45bf..27668a1a69a3 100644
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ b/drivers/gpu/drm/i915/i915_timeline.h
@@ -25,76 +25,14 @@
#ifndef I915_TIMELINE_H
#define I915_TIMELINE_H
-#include <linux/list.h>
-#include <linux/kref.h>
+#include <linux/lockdep.h>
#include "i915_active.h"
-#include "i915_request.h"
#include "i915_syncmap.h"
-#include "i915_utils.h"
-
-struct i915_vma;
-struct i915_timeline_hwsp;
-
-struct i915_timeline {
- u64 fence_context;
- u32 seqno;
-
- spinlock_t lock;
-#define TIMELINE_CLIENT 0 /* default subclass */
-#define TIMELINE_ENGINE 1
-
- unsigned int pin_count;
- const u32 *hwsp_seqno;
- struct i915_vma *hwsp_ggtt;
- u32 hwsp_offset;
-
- bool has_initial_breadcrumb;
-
- /**
- * List of breadcrumbs associated with GPU requests currently
- * outstanding.
- */
- struct list_head requests;
-
- /* Contains an RCU guarded pointer to the last request. No reference is
- * held to the request, users must carefully acquire a reference to
- * the request using i915_active_request_get_request_rcu(), or hold the
- * struct_mutex.
- */
- struct i915_active_request last_request;
-
- /**
- * We track the most recent seqno that we wait on in every context so
- * that we only have to emit a new await and dependency on a more
- * recent sync point. As the contexts may be executed out-of-order, we
- * have to track each individually and can not rely on an absolute
- * global_seqno. When we know that all tracked fences are completed
- * (i.e. when the driver is idle), we know that the syncmap is
- * redundant and we can discard it without loss of generality.
- */
- struct i915_syncmap *sync;
-
- /**
- * Barrier provides the ability to serialize ordering between different
- * timelines.
- *
- * Users can call i915_timeline_set_barrier which will make all
- * subsequent submissions to this timeline be executed only after the
- * barrier has been completed.
- */
- struct i915_active_request barrier;
-
- struct list_head link;
- const char *name;
- struct drm_i915_private *i915;
-
- struct kref kref;
-};
+#include "i915_timeline_types.h"
int i915_timeline_init(struct drm_i915_private *i915,
struct i915_timeline *tl,
- const char *name,
struct i915_vma *hwsp);
void i915_timeline_fini(struct i915_timeline *tl);
@@ -119,7 +57,6 @@ i915_timeline_set_subclass(struct i915_timeline *timeline,
struct i915_timeline *
i915_timeline_create(struct drm_i915_private *i915,
- const char *name,
struct i915_vma *global_hwsp);
static inline struct i915_timeline *
@@ -160,25 +97,17 @@ static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl,
}
int i915_timeline_pin(struct i915_timeline *tl);
+int i915_timeline_get_seqno(struct i915_timeline *tl,
+ struct i915_request *rq,
+ u32 *seqno);
void i915_timeline_unpin(struct i915_timeline *tl);
+int i915_timeline_read_hwsp(struct i915_request *from,
+ struct i915_request *until,
+ u32 *hwsp_offset);
+
void i915_timelines_init(struct drm_i915_private *i915);
void i915_timelines_park(struct drm_i915_private *i915);
void i915_timelines_fini(struct drm_i915_private *i915);
-/**
- * i915_timeline_set_barrier - orders submission between different timelines
- * @timeline: timeline to set the barrier on
- * @rq: request after which new submissions can proceed
- *
- * Sets the passed in request as the serialization point for all subsequent
- * submissions on @timeline. Subsequent requests will not be submitted to GPU
- * until the barrier has been completed.
- */
-static inline int
-i915_timeline_set_barrier(struct i915_timeline *tl, struct i915_request *rq)
-{
- return i915_active_request_set(&tl->barrier, rq);
-}
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_timeline_types.h b/drivers/gpu/drm/i915/i915_timeline_types.h
new file mode 100644
index 000000000000..5256a0b5c5f7
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_timeline_types.h
@@ -0,0 +1,70 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#ifndef __I915_TIMELINE_TYPES_H__
+#define __I915_TIMELINE_TYPES_H__
+
+#include <linux/list.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include "i915_active_types.h"
+
+struct drm_i915_private;
+struct i915_vma;
+struct i915_timeline_cacheline;
+struct i915_syncmap;
+
+struct i915_timeline {
+ u64 fence_context;
+ u32 seqno;
+
+ spinlock_t lock;
+#define TIMELINE_CLIENT 0 /* default subclass */
+#define TIMELINE_ENGINE 1
+ struct mutex mutex; /* protects the flow of requests */
+
+ unsigned int pin_count;
+ const u32 *hwsp_seqno;
+ struct i915_vma *hwsp_ggtt;
+ u32 hwsp_offset;
+
+ struct i915_timeline_cacheline *hwsp_cacheline;
+
+ bool has_initial_breadcrumb;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head requests;
+
+ /* Contains an RCU guarded pointer to the last request. No reference is
+ * held to the request, users must carefully acquire a reference to
+ * the request using i915_active_request_get_request_rcu(), or hold the
+ * struct_mutex.
+ */
+ struct i915_active_request last_request;
+
+ /**
+ * We track the most recent seqno that we wait on in every context so
+ * that we only have to emit a new await and dependency on a more
+ * recent sync point. As the contexts may be executed out-of-order, we
+ * have to track each individually and can not rely on an absolute
+ * global_seqno. When we know that all tracked fences are completed
+ * (i.e. when the driver is idle), we know that the syncmap is
+ * redundant and we can discard it without loss of generality.
+ */
+ struct i915_syncmap *sync;
+
+ struct list_head link;
+ struct drm_i915_private *i915;
+
+ struct kref kref;
+};
+
+#endif /* __I915_TIMELINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index eab313c3163c..12893304c8f8 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -18,6 +18,87 @@
/* watermark/fifo updates */
+TRACE_EVENT(intel_pipe_enable,
+ TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
+ TP_ARGS(dev_priv, pipe),
+
+ TP_STRUCT__entry(
+ __array(u32, frame, 3)
+ __array(u32, scanline, 3)
+ __field(enum pipe, pipe)
+ ),
+
+ TP_fast_assign(
+ enum pipe _pipe;
+ for_each_pipe(dev_priv, _pipe) {
+ __entry->frame[_pipe] =
+ dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe);
+ __entry->scanline[_pipe] =
+ intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
+ }
+ __entry->pipe = pipe;
+ ),
+
+ TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
+ pipe_name(__entry->pipe),
+ __entry->frame[PIPE_A], __entry->scanline[PIPE_A],
+ __entry->frame[PIPE_B], __entry->scanline[PIPE_B],
+ __entry->frame[PIPE_C], __entry->scanline[PIPE_C])
+);
+
+TRACE_EVENT(intel_pipe_disable,
+ TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
+ TP_ARGS(dev_priv, pipe),
+
+ TP_STRUCT__entry(
+ __array(u32, frame, 3)
+ __array(u32, scanline, 3)
+ __field(enum pipe, pipe)
+ ),
+
+ TP_fast_assign(
+ enum pipe _pipe;
+ for_each_pipe(dev_priv, _pipe) {
+ __entry->frame[_pipe] =
+ dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe);
+ __entry->scanline[_pipe] =
+ intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
+ }
+ __entry->pipe = pipe;
+ ),
+
+ TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
+ pipe_name(__entry->pipe),
+ __entry->frame[PIPE_A], __entry->scanline[PIPE_A],
+ __entry->frame[PIPE_B], __entry->scanline[PIPE_B],
+ __entry->frame[PIPE_C], __entry->scanline[PIPE_C])
+);
+
+TRACE_EVENT(intel_pipe_crc,
+ TP_PROTO(struct intel_crtc *crtc, const u32 *crcs),
+ TP_ARGS(crtc, crcs),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __array(u32, crcs, 5)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
+ crtc->pipe);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ memcpy(__entry->crcs, crcs, sizeof(__entry->crcs));
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u crc=%08x %08x %08x %08x %08x",
+ pipe_name(__entry->pipe), __entry->frame, __entry->scanline,
+ __entry->crcs[0], __entry->crcs[1], __entry->crcs[2],
+ __entry->crcs[3], __entry->crcs[4])
+);
+
TRACE_EVENT(intel_cpu_fifo_underrun,
TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
TP_ARGS(dev_priv, pipe),
@@ -627,7 +708,6 @@ DECLARE_EVENT_CLASS(i915_request,
__field(u16, class)
__field(u16, instance)
__field(u32, seqno)
- __field(u32, global)
),
TP_fast_assign(
@@ -637,13 +717,11 @@ DECLARE_EVENT_CLASS(i915_request,
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
- __entry->global = rq->global_seqno;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u",
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->global)
+ __entry->hw_id, __entry->ctx, __entry->seqno)
);
DEFINE_EVENT(i915_request, i915_request_add,
@@ -673,7 +751,6 @@ TRACE_EVENT(i915_request_in,
__field(u16, class)
__field(u16, instance)
__field(u32, seqno)
- __field(u32, global_seqno)
__field(u32, port)
__field(u32, prio)
),
@@ -685,15 +762,14 @@ TRACE_EVENT(i915_request_in,
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
- __entry->global_seqno = rq->global_seqno;
__entry->prio = rq->sched.attr.priority;
__entry->port = port;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, global=%u, port=%u",
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, port=%u",
__entry->dev, __entry->class, __entry->instance,
__entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->prio, __entry->global_seqno, __entry->port)
+ __entry->prio, __entry->port)
);
TRACE_EVENT(i915_request_out,
@@ -707,7 +783,6 @@ TRACE_EVENT(i915_request_out,
__field(u16, class)
__field(u16, instance)
__field(u32, seqno)
- __field(u32, global_seqno)
__field(u32, completed)
),
@@ -718,14 +793,13 @@ TRACE_EVENT(i915_request_out,
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
- __entry->global_seqno = rq->global_seqno;
__entry->completed = i915_request_completed(rq);
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, completed?=%u",
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, completed?=%u",
__entry->dev, __entry->class, __entry->instance,
__entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->global_seqno, __entry->completed)
+ __entry->completed)
);
#else
@@ -768,7 +842,6 @@ TRACE_EVENT(i915_request_wait_begin,
__field(u16, class)
__field(u16, instance)
__field(u32, seqno)
- __field(u32, global)
__field(unsigned int, flags)
),
@@ -785,14 +858,13 @@ TRACE_EVENT(i915_request_wait_begin,
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
- __entry->global = rq->global_seqno;
__entry->flags = flags;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, blocking=%u, flags=0x%x",
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, blocking=%u, flags=0x%x",
__entry->dev, __entry->class, __entry->instance,
__entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->global, !!(__entry->flags & I915_WAIT_LOCKED),
+ !!(__entry->flags & I915_WAIT_LOCKED),
__entry->flags)
);
diff --git a/drivers/gpu/drm/i915/i915_user_extensions.c b/drivers/gpu/drm/i915/i915_user_extensions.c
new file mode 100644
index 000000000000..c822d0aafd2d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_user_extensions.c
@@ -0,0 +1,61 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/nospec.h>
+#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+
+#include <uapi/drm/i915_drm.h>
+
+#include "i915_user_extensions.h"
+#include "i915_utils.h"
+
+int i915_user_extensions(struct i915_user_extension __user *ext,
+ const i915_user_extension_fn *tbl,
+ unsigned int count,
+ void *data)
+{
+ unsigned int stackdepth = 512;
+
+ while (ext) {
+ int i, err;
+ u32 name;
+ u64 next;
+
+ if (!stackdepth--) /* recursion vs useful flexibility */
+ return -E2BIG;
+
+ err = check_user_mbz(&ext->flags);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(ext->rsvd); i++) {
+ err = check_user_mbz(&ext->rsvd[i]);
+ if (err)
+ return err;
+ }
+
+ if (get_user(name, &ext->name))
+ return -EFAULT;
+
+ err = -EINVAL;
+ if (name < count) {
+ name = array_index_nospec(name, count);
+ if (tbl[name])
+ err = tbl[name](ext, data);
+ }
+ if (err)
+ return err;
+
+ if (get_user(next, &ext->next_extension) ||
+ overflows_type(next, ext))
+ return -EFAULT;
+
+ ext = u64_to_user_ptr(next);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_user_extensions.h b/drivers/gpu/drm/i915/i915_user_extensions.h
new file mode 100644
index 000000000000..a14bf6bba9a1
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_user_extensions.h
@@ -0,0 +1,20 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef I915_USER_EXTENSIONS_H
+#define I915_USER_EXTENSIONS_H
+
+struct i915_user_extension;
+
+typedef int (*i915_user_extension_fn)(struct i915_user_extension __user *ext,
+ void *data);
+
+int i915_user_extensions(struct i915_user_extension __user *ext,
+ const i915_user_extension_fn *tbl,
+ unsigned int count,
+ void *data);
+
+#endif /* I915_USER_EXTENSIONS_H */
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 540e20eb032c..2dbe8933b50a 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -105,6 +105,37 @@
__T; \
})
+/*
+ * container_of_user: Extract the superclass from a pointer to a member.
+ *
+ * Exactly like container_of() with the exception that it plays nicely
+ * with sparse for __user @ptr.
+ */
+#define container_of_user(ptr, type, member) ({ \
+ void __user *__mptr = (void __user *)(ptr); \
+ BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
+ !__same_type(*(ptr), void), \
+ "pointer type mismatch in container_of()"); \
+ ((type __user *)(__mptr - offsetof(type, member))); })
+
+/*
+ * check_user_mbz: Check that a user value exists and is zero
+ *
+ * Frequently in our uABI we reserve space for future extensions, and
+ * two ensure that userspace is prepared we enforce that space must
+ * be zero. (Then any future extension can safely assume a default value
+ * of 0.)
+ *
+ * check_user_mbz() combines checking that the user pointer is accessible
+ * and that the contained value is zero.
+ *
+ * Returns: -EFAULT if not accessible, -EINVAL if !zero, or 0 on success.
+ */
+#define check_user_mbz(U) ({ \
+ typeof(*(U)) mbz__; \
+ get_user(mbz__, (U)) ? -EFAULT : mbz__ ? -EINVAL : 0; \
+})
+
static inline u64 ptr_to_u64(const void *ptr)
{
return (uintptr_t)ptr;
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 869cf4a3b6de..94d3992b599d 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -60,30 +60,31 @@
*/
void i915_check_vgpu(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
u64 magic;
u16 version_major;
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
- magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
+ magic = __raw_uncore_read64(uncore, vgtif_reg(magic));
if (magic != VGT_MAGIC)
return;
- version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major));
+ version_major = __raw_uncore_read16(uncore, vgtif_reg(version_major));
if (version_major < VGT_VERSION_MAJOR) {
DRM_INFO("VGT interface version mismatch!\n");
return;
}
- dev_priv->vgpu.caps = __raw_i915_read32(dev_priv, vgtif_reg(vgt_caps));
+ dev_priv->vgpu.caps = __raw_uncore_read32(uncore, vgtif_reg(vgt_caps));
dev_priv->vgpu.active = true;
DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
}
-bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv)
+bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv)
{
- return dev_priv->vgpu.caps & VGT_CAPS_FULL_48BIT_PPGTT;
+ return dev_priv->vgpu.caps & VGT_CAPS_FULL_PPGTT;
}
struct _balloon_info_ {
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index 551acc390046..ebe1b7bced98 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -28,7 +28,7 @@
void i915_check_vgpu(struct drm_i915_private *dev_priv);
-bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv);
+bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv);
static inline bool
intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index b713bed20c38..961268f66c63 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -25,22 +25,35 @@
#include "i915_vma.h"
#include "i915_drv.h"
+#include "i915_globals.h"
#include "intel_ringbuffer.h"
#include "intel_frontbuffer.h"
#include <drm/drm_gem.h>
+static struct i915_global_vma {
+ struct i915_global base;
+ struct kmem_cache *slab_vmas;
+} global;
+
+struct i915_vma *i915_vma_alloc(void)
+{
+ return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
+}
+
+void i915_vma_free(struct i915_vma *vma)
+{
+ return kmem_cache_free(global.slab_vmas, vma);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
#include <linux/stackdepot.h>
static void vma_print_allocator(struct i915_vma *vma, const char *reason)
{
- unsigned long entries[12];
- struct stack_trace trace = {
- .entries = entries,
- .max_entries = ARRAY_SIZE(entries),
- };
+ unsigned long *entries;
+ unsigned int nr_entries;
char buf[512];
if (!vma->node.stack) {
@@ -49,8 +62,8 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason)
return;
}
- depot_fetch_stack(vma->node.stack, &trace);
- snprint_stack_trace(buf, sizeof(buf), &trace, 0);
+ nr_entries = stack_depot_fetch(vma->node.stack, &entries);
+ stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
vma->node.start, vma->node.size, reason, buf);
}
@@ -115,7 +128,7 @@ vma_create(struct drm_i915_gem_object *obj,
/* The aliasing_ppgtt should never be used directly! */
GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
- vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
+ vma = i915_vma_alloc();
if (vma == NULL)
return ERR_PTR(-ENOMEM);
@@ -190,7 +203,7 @@ vma_create(struct drm_i915_gem_object *obj,
cmp = i915_vma_compare(pos, vm, view);
if (cmp == 0) {
spin_unlock(&obj->vma.lock);
- kmem_cache_free(vm->i915->vmas, vma);
+ i915_vma_free(vma);
return pos;
}
@@ -222,7 +235,7 @@ vma_create(struct drm_i915_gem_object *obj,
return vma;
err_vma:
- kmem_cache_free(vm->i915->vmas, vma);
+ i915_vma_free(vma);
return ERR_PTR(-E2BIG);
}
@@ -803,8 +816,6 @@ void i915_vma_reopen(struct i915_vma *vma)
static void __i915_vma_destroy(struct i915_vma *vma)
{
- struct drm_i915_private *i915 = vma->vm->i915;
-
GEM_BUG_ON(vma->node.allocated);
GEM_BUG_ON(vma->fence);
@@ -825,7 +836,7 @@ static void __i915_vma_destroy(struct i915_vma *vma)
i915_active_fini(&vma->active);
- kmem_cache_free(i915->vmas, vma);
+ i915_vma_free(vma);
}
void i915_vma_destroy(struct i915_vma *vma)
@@ -1041,3 +1052,28 @@ unpin:
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_vma.c"
#endif
+
+static void i915_global_vma_shrink(void)
+{
+ kmem_cache_shrink(global.slab_vmas);
+}
+
+static void i915_global_vma_exit(void)
+{
+ kmem_cache_destroy(global.slab_vmas);
+}
+
+static struct i915_global_vma global = { {
+ .shrink = i915_global_vma_shrink,
+ .exit = i915_global_vma_exit,
+} };
+
+int __init i915_global_vma_init(void)
+{
+ global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
+ if (!global.slab_vmas)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 7c742027f866..6eab70953a57 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -440,4 +440,7 @@ void i915_vma_parked(struct drm_i915_private *i915);
list_for_each_entry(V, &(OBJ)->vma.list, obj_link) \
for_each_until(!i915_vma_is_ggtt(V))
+struct i915_vma *i915_vma_alloc(void);
+void i915_vma_free(struct i915_vma *vma);
+
#endif
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
index 641e0778fa9c..9d962ea1e635 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -25,9 +25,13 @@
* Jani Nikula <jani.nikula@intel.com>
*/
-#include <drm/drm_mipi_dsi.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_mipi_dsi.h>
+
+#include "intel_connector.h"
+#include "intel_ddi.h"
#include "intel_dsi.h"
+#include "intel_panel.h"
static inline int header_credits_available(struct drm_i915_private *dev_priv,
enum transcoder dsi_trans)
@@ -246,13 +250,13 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
for (lane = 0; lane <= 3; lane++) {
/* Bspec: must not use GRP register for write */
- tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
+ tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port));
tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
tmp |= POST_CURSOR_1(0x0);
tmp |= POST_CURSOR_2(0x0);
tmp |= CURSOR_COEFF(0x3f);
- I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp);
}
}
}
@@ -399,11 +403,11 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
tmp &= ~LOADGEN_SELECT;
I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
for (lane = 0; lane <= 3; lane++) {
- tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
+ tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port));
tmp &= ~LOADGEN_SELECT;
if (lane != 2)
tmp |= LOADGEN_SELECT;
- I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp);
}
}
@@ -876,7 +880,8 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
I915_WRITE(PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be enabled */
- if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
+ if (intel_wait_for_register(&dev_priv->uncore,
+ PIPECONF(dsi_trans),
I965_PIPECONF_ACTIVE,
I965_PIPECONF_ACTIVE, 10))
DRM_ERROR("DSI transcoder not enabled\n");
@@ -1054,7 +1059,8 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
I915_WRITE(PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be disabled */
- if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
+ if (intel_wait_for_register(&dev_priv->uncore,
+ PIPECONF(dsi_trans),
I965_PIPECONF_ACTIVE, 0, 50))
DRM_ERROR("DSI trancoder not disabled\n");
}
@@ -1146,13 +1152,11 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
intel_wakeref_t wakeref;
wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port]);
- if (wakeref) {
- intel_display_power_put(dev_priv,
- port == PORT_A ?
- POWER_DOMAIN_PORT_DDI_A_IO :
- POWER_DOMAIN_PORT_DDI_B_IO,
- wakeref);
- }
+ intel_display_power_put(dev_priv,
+ port == PORT_A ?
+ POWER_DOMAIN_PORT_DDI_A_IO :
+ POWER_DOMAIN_PORT_DDI_B_IO,
+ wakeref);
}
/* set mode to DDI */
@@ -1194,11 +1198,10 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- u32 pll_id;
/* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
- pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
- pipe_config->port_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+ pipe_config->port_clock =
+ cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state);
pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk;
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
}
@@ -1367,7 +1370,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
struct intel_encoder *encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
- struct drm_display_mode *scan, *fixed_mode = NULL;
+ struct drm_display_mode *fixed_mode;
enum port port;
if (!intel_bios_is_dsi_present(dev_priv, &port))
@@ -1417,15 +1420,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
/* attach connector to encoder */
intel_connector_attach_encoder(intel_connector, encoder);
- /* fill mode info from VBT */
mutex_lock(&dev->mode_config.mutex);
- intel_dsi_vbt_get_modes(intel_dsi);
- list_for_each_entry(scan, &connector->probed_modes, head) {
- if (scan->type & DRM_MODE_TYPE_PREFERRED) {
- fixed_mode = drm_mode_duplicate(dev, scan);
- break;
- }
- }
+ fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
mutex_unlock(&dev->mode_config.mutex);
if (!fixed_mode) {
@@ -1433,12 +1429,9 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
goto err;
}
- connector->display_info.width_mm = fixed_mode->width_mm;
- connector->display_info.height_mm = fixed_mode->height_mm;
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
intel_panel_setup_backlight(connector, INVALID_PIPE);
-
if (dev_priv->vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B);
else
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 7cf9290ea34a..8c8fae32ec50 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -35,6 +35,8 @@
#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
+#include "intel_hdcp.h"
+#include "intel_sprite.h"
/**
* intel_digital_connector_atomic_get_property - hook for connector->atomic_get_property.
@@ -126,6 +128,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
*/
if (new_conn_state->force_audio != old_conn_state->force_audio ||
new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb ||
+ new_conn_state->base.colorspace != old_conn_state->base.colorspace ||
new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio ||
new_conn_state->base.content_type != old_conn_state->base.content_type ||
new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode)
@@ -234,10 +237,11 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
if (plane_state && plane_state->base.fb &&
plane_state->base.fb->format->is_yuv &&
plane_state->base.fb->format->num_planes > 1) {
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
if (IS_GEN(dev_priv, 9) &&
!IS_GEMINILAKE(dev_priv)) {
mode = SKL_PS_SCALER_MODE_NV12;
- } else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) {
+ } else if (icl_is_hdr_plane(dev_priv, plane->id)) {
/*
* On gen11+'s HDR planes we only use the scaler for
* scaling. They have a dedicated chroma upsampler, so
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index db0965904439..d11681d71add 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -35,7 +35,10 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+#include "intel_atomic_plane.h"
#include "intel_drv.h"
+#include "intel_pm.h"
+#include "intel_sprite.h"
struct intel_plane *intel_plane_alloc(void)
{
@@ -121,6 +124,7 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
new_crtc_state->active_planes &= ~BIT(plane->id);
new_crtc_state->nv12_planes &= ~BIT(plane->id);
+ new_crtc_state->c8_planes &= ~BIT(plane->id);
new_plane_state->base.visible = false;
if (!new_plane_state->base.crtc && !old_plane_state->base.crtc)
@@ -135,9 +139,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
new_crtc_state->active_planes |= BIT(plane->id);
if (new_plane_state->base.visible &&
- new_plane_state->base.fb->format->format == DRM_FORMAT_NV12)
+ is_planar_yuv_format(new_plane_state->base.fb->format->format))
new_crtc_state->nv12_planes |= BIT(plane->id);
+ if (new_plane_state->base.visible &&
+ new_plane_state->base.fb->format->format == DRM_FORMAT_C8)
+ new_crtc_state->c8_planes |= BIT(plane->id);
+
if (new_plane_state->base.visible || old_plane_state->base.visible)
new_crtc_state->update_planes |= BIT(plane->id);
@@ -214,6 +222,35 @@ skl_next_plane_to_commit(struct intel_atomic_state *state,
return NULL;
}
+void intel_update_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+ trace_intel_update_plane(&plane->base, crtc);
+ plane->update_plane(plane, crtc_state, plane_state);
+}
+
+void intel_update_slave(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+ trace_intel_update_plane(&plane->base, crtc);
+ plane->update_slave(plane, crtc_state, plane_state);
+}
+
+void intel_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+ trace_intel_disable_plane(&plane->base, crtc);
+ plane->disable_plane(plane, crtc_state);
+}
+
void skl_update_planes_on_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -238,8 +275,7 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state,
intel_atomic_get_new_plane_state(state, plane);
if (new_plane_state->base.visible) {
- trace_intel_update_plane(&plane->base, crtc);
- plane->update_plane(plane, new_crtc_state, new_plane_state);
+ intel_update_plane(plane, new_crtc_state, new_plane_state);
} else if (new_plane_state->slave) {
struct intel_plane *master =
new_plane_state->linked_plane;
@@ -256,11 +292,9 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state,
new_plane_state =
intel_atomic_get_new_plane_state(state, master);
- trace_intel_update_plane(&plane->base, crtc);
- plane->update_slave(plane, new_crtc_state, new_plane_state);
+ intel_update_slave(plane, new_crtc_state, new_plane_state);
} else {
- trace_intel_disable_plane(&plane->base, crtc);
- plane->disable_plane(plane, new_crtc_state);
+ intel_disable_plane(plane, new_crtc_state);
}
}
}
@@ -280,13 +314,10 @@ void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
!(update_mask & BIT(plane->id)))
continue;
- if (new_plane_state->base.visible) {
- trace_intel_update_plane(&plane->base, crtc);
- plane->update_plane(plane, new_crtc_state, new_plane_state);
- } else {
- trace_intel_disable_plane(&plane->base, crtc);
- plane->disable_plane(plane, new_crtc_state);
- }
+ if (new_plane_state->base.visible)
+ intel_update_plane(plane, new_crtc_state, new_plane_state);
+ else
+ intel_disable_plane(plane, new_crtc_state);
}
}
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.h b/drivers/gpu/drm/i915/intel_atomic_plane.h
new file mode 100644
index 000000000000..14678620440f
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_ATOMIC_PLANE_H__
+#define __INTEL_ATOMIC_PLANE_H__
+
+struct drm_plane;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_plane;
+struct intel_plane_state;
+
+extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+
+void intel_update_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+void intel_update_slave(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+void intel_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+struct intel_plane *intel_plane_alloc(void);
+void intel_plane_free(struct intel_plane *plane);
+struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
+void intel_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
+void skl_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *old_plane_state,
+ struct intel_plane_state *intel_state);
+
+#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 5104c6bbd66f..bca4cc025d3d 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -21,14 +21,16 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include <linux/kernel.h>
#include <linux/component.h>
+#include <linux/kernel.h>
+
+#include <drm/drm_edid.h>
#include <drm/i915_component.h>
#include <drm/intel_lpe_audio.h>
-#include "intel_drv.h"
-#include <drm/drm_edid.h>
#include "i915_drv.h"
+#include "intel_audio.h"
+#include "intel_drv.h"
/**
* DOC: High Definition Audio over HDMI and Display Port
@@ -741,27 +743,91 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
}
}
-static void i915_audio_component_get_power(struct device *kdev)
+static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv,
+ bool enable)
{
- intel_display_power_get(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO);
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ int ret;
+
+ drm_modeset_acquire_init(&ctx, 0);
+ state = drm_atomic_state_alloc(&dev_priv->drm);
+ if (WARN_ON(!state))
+ return;
+
+ state->acquire_ctx = &ctx;
+
+retry:
+ to_intel_atomic_state(state)->cdclk.force_min_cdclk_changed = true;
+ to_intel_atomic_state(state)->cdclk.force_min_cdclk =
+ enable ? 2 * 96000 : 0;
+
+ /*
+ * Protects dev_priv->cdclk.force_min_cdclk
+ * Need to lock this here in case we have no active pipes
+ * and thus wouldn't lock it during the commit otherwise.
+ */
+ ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
+ &ctx);
+ if (!ret)
+ ret = drm_atomic_commit(state);
+
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ WARN_ON(ret);
+
+ drm_atomic_state_put(state);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
-static void i915_audio_component_put_power(struct device *kdev)
+static unsigned long i915_audio_component_get_power(struct device *kdev)
{
- intel_display_power_put_unchecked(kdev_to_i915(kdev),
- POWER_DOMAIN_AUDIO);
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ intel_wakeref_t ret;
+
+ /* Catch potential impedance mismatches before they occur! */
+ BUILD_BUG_ON(sizeof(intel_wakeref_t) > sizeof(unsigned long));
+
+ ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+
+ /* Force CDCLK to 2*BCLK as long as we need audio to be powered. */
+ if (dev_priv->audio_power_refcount++ == 0)
+ if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ glk_force_audio_cdclk(dev_priv, true);
+
+ return ret;
+}
+
+static void i915_audio_component_put_power(struct device *kdev,
+ unsigned long cookie)
+{
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+
+ /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
+ if (--dev_priv->audio_power_refcount == 0)
+ if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ glk_force_audio_cdclk(dev_priv, false);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie);
}
static void i915_audio_component_codec_wake_override(struct device *kdev,
bool enable)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ unsigned long cookie;
u32 tmp;
if (!IS_GEN(dev_priv, 9))
return;
- i915_audio_component_get_power(kdev);
+ cookie = i915_audio_component_get_power(kdev);
/*
* Enable/disable generating the codec wake signal, overriding the
@@ -779,7 +845,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
usleep_range(1000, 1500);
}
- i915_audio_component_put_power(kdev);
+ i915_audio_component_put_power(kdev, cookie);
}
/* Get CDCLK in kHz */
@@ -850,12 +916,13 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
struct i915_audio_component *acomp = dev_priv->audio_component;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
+ unsigned long cookie;
int err = 0;
if (!HAS_DDI(dev_priv))
return 0;
- i915_audio_component_get_power(kdev);
+ cookie = i915_audio_component_get_power(kdev);
mutex_lock(&dev_priv->av_mutex);
/* 1. get the pipe */
@@ -875,7 +942,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
unlock:
mutex_unlock(&dev_priv->av_mutex);
- i915_audio_component_put_power(kdev);
+ i915_audio_component_put_power(kdev, cookie);
return err;
}
@@ -980,7 +1047,7 @@ static const struct component_ops i915_audio_component_bind_ops = {
* We ignore any error during registration and continue with reduced
* functionality (i.e. without HDMI audio).
*/
-void i915_audio_component_init(struct drm_i915_private *dev_priv)
+static void i915_audio_component_init(struct drm_i915_private *dev_priv)
{
int ret;
@@ -1003,7 +1070,7 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
* Deregisters the audio component, breaking any existing binding to the
* corresponding snd_hda_intel driver's master component.
*/
-void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
+static void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
{
if (!dev_priv->audio_component_registered)
return;
diff --git a/drivers/gpu/drm/i915/intel_audio.h b/drivers/gpu/drm/i915/intel_audio.h
new file mode 100644
index 000000000000..a3657c7a7ba2
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_audio.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_AUDIO_H__
+#define __INTEL_AUDIO_H__
+
+struct drm_connector_state;
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_encoder;
+
+void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
+void intel_audio_codec_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_audio_codec_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state);
+void intel_audio_init(struct drm_i915_private *dev_priv);
+void intel_audio_deinit(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_AUDIO_H__ */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 4364f42cac6b..1dc8d03ff127 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -760,6 +760,31 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
}
+
+ if (bdb->version >= 226) {
+ u32 wakeup_time = psr_table->psr2_tp2_tp3_wakeup_time;
+
+ wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3;
+ switch (wakeup_time) {
+ case 0:
+ wakeup_time = 500;
+ break;
+ case 1:
+ wakeup_time = 100;
+ break;
+ case 3:
+ wakeup_time = 50;
+ break;
+ default:
+ case 2:
+ wakeup_time = 2500;
+ break;
+ }
+ dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time;
+ } else {
+ /* Reusing PSR1 wakeup time for PSR2 in older VBTs */
+ dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us = dev_priv->vbt.psr.tp2_tp3_wakeup_time_us;
+ }
}
static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv,
@@ -1222,10 +1247,11 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
if (!info->alternate_ddc_pin)
return;
- for_each_port_masked(p, (1 << port) - 1) {
+ for (p = PORT_A; p < I915_MAX_PORTS; p++) {
struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
- if (info->alternate_ddc_pin != i->alternate_ddc_pin)
+ if (p == port || !i->present ||
+ info->alternate_ddc_pin != i->alternate_ddc_pin)
continue;
DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
@@ -1239,8 +1265,8 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
* port. Otherwise they share the same ddc bin and
* system couldn't communicate with them separately.
*
- * Due to parsing the ports in alphabetical order,
- * a higher port will always clobber a lower one.
+ * Due to parsing the ports in child device order,
+ * a later device will always clobber an earlier one.
*/
i->supports_dvi = false;
i->supports_hdmi = false;
@@ -1258,10 +1284,11 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
if (!info->alternate_aux_channel)
return;
- for_each_port_masked(p, (1 << port) - 1) {
+ for (p = PORT_A; p < I915_MAX_PORTS; p++) {
struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
- if (info->alternate_aux_channel != i->alternate_aux_channel)
+ if (p == port || !i->present ||
+ info->alternate_aux_channel != i->alternate_aux_channel)
continue;
DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
@@ -1275,8 +1302,8 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
* port. Otherwise they share the same aux channel
* and system couldn't communicate with them separately.
*
- * Due to parsing the ports in alphabetical order,
- * a higher port will always clobber a lower one.
+ * Due to parsing the ports in child device order,
+ * a later device will always clobber an earlier one.
*/
i->supports_dp = false;
i->alternate_aux_channel = 0;
@@ -1324,48 +1351,57 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
return 0;
}
-static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
- u8 bdb_version)
+static enum port dvo_port_to_port(u8 dvo_port)
{
- struct child_device_config *it, *child = NULL;
- struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
- int i, j;
- bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
- /* Each DDI port can have more than one value on the "DVO Port" field,
+ /*
+ * Each DDI port can have more than one value on the "DVO Port" field,
* so look for all the possible values for each port.
*/
- int dvo_ports[][3] = {
- {DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
- {DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
- {DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
- {DVO_PORT_HDMID, DVO_PORT_DPD, -1},
- {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
- {DVO_PORT_HDMIF, DVO_PORT_DPF, -1},
+ static const int dvo_ports[][3] = {
+ [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
+ [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
+ [PORT_C] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
+ [PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1},
+ [PORT_E] = { DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
+ [PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1},
};
+ enum port port;
+ int i;
- /*
- * Find the first child device to reference the port, report if more
- * than one found.
- */
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- it = dev_priv->vbt.child_dev + i;
-
- for (j = 0; j < 3; j++) {
- if (dvo_ports[port][j] == -1)
+ for (port = PORT_A; port < ARRAY_SIZE(dvo_ports); port++) {
+ for (i = 0; i < ARRAY_SIZE(dvo_ports[port]); i++) {
+ if (dvo_ports[port][i] == -1)
break;
- if (it->dvo_port == dvo_ports[port][j]) {
- if (child) {
- DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
- port_name(port));
- } else {
- child = it;
- }
- }
+ if (dvo_port == dvo_ports[port][i])
+ return port;
}
}
- if (!child)
+
+ return PORT_NONE;
+}
+
+static void parse_ddi_port(struct drm_i915_private *dev_priv,
+ const struct child_device_config *child,
+ u8 bdb_version)
+{
+ struct ddi_vbt_port_info *info;
+ bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
+ enum port port;
+
+ port = dvo_port_to_port(child->dvo_port);
+ if (port == PORT_NONE)
+ return;
+
+ info = &dev_priv->vbt.ddi_port_info[port];
+
+ if (info->present) {
+ DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
+ port_name(port));
return;
+ }
+
+ info->present = true;
is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
@@ -1498,19 +1534,20 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
{
- enum port port;
+ const struct child_device_config *child;
+ int i;
if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
return;
- if (!dev_priv->vbt.child_dev_num)
- return;
-
if (bdb_version < 155)
return;
- for (port = PORT_A; port < I915_MAX_PORTS; port++)
- parse_ddi_port(dev_priv, port, bdb_version);
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ child = dev_priv->vbt.child_dev + i;
+
+ parse_ddi_port(dev_priv, child, bdb_version);
+ }
}
static void
@@ -2094,8 +2131,8 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
dvo_port = child->dvo_port;
if (dvo_port == DVO_PORT_MIPIA ||
- (dvo_port == DVO_PORT_MIPIB && IS_ICELAKE(dev_priv)) ||
- (dvo_port == DVO_PORT_MIPIC && !IS_ICELAKE(dev_priv))) {
+ (dvo_port == DVO_PORT_MIPIB && INTEL_GEN(dev_priv) >= 11) ||
+ (dvo_port == DVO_PORT_MIPIC && INTEL_GEN(dev_priv) < 11)) {
if (port)
*port = dvo_port - DVO_PORT_MIPIA;
return true;
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 09ed90c0ba00..832cb6b1e9bd 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -23,12 +23,11 @@
*/
#include <linux/kthread.h>
+#include <trace/events/dma_fence.h>
#include <uapi/linux/sched/types.h>
#include "i915_drv.h"
-#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_rq)
-
static void irq_enable(struct intel_engine_cs *engine)
{
if (!engine->irq_enable)
@@ -82,9 +81,39 @@ static inline bool __request_completed(const struct i915_request *rq)
return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
}
-bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
+static bool
+__dma_fence_signal(struct dma_fence *fence)
+{
+ return !test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags);
+}
+
+static void
+__dma_fence_signal__timestamp(struct dma_fence *fence, ktime_t timestamp)
+{
+ fence->timestamp = timestamp;
+ set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
+ trace_dma_fence_signaled(fence);
+}
+
+static void
+__dma_fence_signal__notify(struct dma_fence *fence)
+{
+ struct dma_fence_cb *cur, *tmp;
+
+ lockdep_assert_held(fence->lock);
+ lockdep_assert_irqs_disabled();
+
+ list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
+ INIT_LIST_HEAD(&cur->node);
+ cur->func(fence, cur);
+ }
+ INIT_LIST_HEAD(&fence->cb_list);
+}
+
+void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ const ktime_t timestamp = ktime_get();
struct intel_context *ce, *cn;
struct list_head *pos, *next;
LIST_HEAD(signal);
@@ -106,6 +135,10 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL,
&rq->fence.flags));
+ clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+
+ if (!__dma_fence_signal(&rq->fence))
+ continue;
/*
* Queue for execution after dropping the signaling
@@ -113,14 +146,6 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
* more signalers to the same context or engine.
*/
i915_request_get(rq);
-
- /*
- * We may race with direct invocation of
- * dma_fence_signal(), e.g. i915_request_retire(),
- * so we need to acquire our reference to the request
- * before we cancel the breadcrumb.
- */
- clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
list_add_tail(&rq->signal_link, &signal);
}
@@ -143,22 +168,21 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
struct i915_request *rq =
list_entry(pos, typeof(*rq), signal_link);
- dma_fence_signal(&rq->fence);
+ __dma_fence_signal__timestamp(&rq->fence, timestamp);
+
+ spin_lock(&rq->lock);
+ __dma_fence_signal__notify(&rq->fence);
+ spin_unlock(&rq->lock);
+
i915_request_put(rq);
}
-
- return !list_empty(&signal);
}
-bool intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
+void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
{
- bool result;
-
local_irq_disable();
- result = intel_engine_breadcrumbs_irq(engine);
+ intel_engine_breadcrumbs_irq(engine);
local_irq_enable();
-
- return result;
}
static void signal_irq_work(struct irq_work *work)
@@ -251,19 +275,17 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
bool i915_request_enable_breadcrumb(struct i915_request *rq)
{
- struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
+ lockdep_assert_held(&rq->lock);
+ lockdep_assert_irqs_disabled();
- GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
-
- if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
- return true;
-
- spin_lock(&b->irq_lock);
- if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags) &&
- !__request_completed(rq)) {
+ if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
+ struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
struct intel_context *ce = rq->hw_context;
struct list_head *pos;
+ spin_lock(&b->irq_lock);
+ GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
+
__intel_breadcrumbs_arm_irq(b);
/*
@@ -292,8 +314,8 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
list_move_tail(&ce->signal_link, &b->signalers);
set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+ spin_unlock(&b->irq_lock);
}
- spin_unlock(&b->irq_lock);
return !__request_completed(rq);
}
@@ -302,9 +324,15 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq)
{
struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
- if (!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
- return;
+ lockdep_assert_held(&rq->lock);
+ lockdep_assert_irqs_disabled();
+ /*
+ * We must wait for b->irq_lock so that we know the interrupt handler
+ * has released its reference to the intel_context and has completed
+ * the DMA_FENCE_FLAG_SIGNALED_BIT/I915_FENCE_FLAG_SIGNAL dance (if
+ * required).
+ */
spin_lock(&b->irq_lock);
if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
struct intel_context *ce = rq->hw_context;
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 15ba950dee00..ae40a8679314 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include "intel_cdclk.h"
#include "intel_drv.h"
/**
@@ -234,7 +235,8 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
else
return 0;
- tmp = I915_READ(IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
+ tmp = I915_READ(IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ?
+ HPLLVCO_MOBILE : HPLLVCO);
vco = vco_table[tmp & 0x7];
if (vco == 0)
@@ -468,7 +470,7 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
cdclk_state->vco);
mutex_lock(&dev_priv->pcu_lock);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
mutex_unlock(&dev_priv->pcu_lock);
if (IS_VALLEYVIEW(dev_priv))
@@ -516,7 +518,8 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
}
static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
u32 val, cmd = cdclk_state->voltage_level;
@@ -543,11 +546,11 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
mutex_lock(&dev_priv->pcu_lock);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
val &= ~DSPFREQGUAR_MASK;
val |= (cmd << DSPFREQGUAR_SHIFT);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
50)) {
DRM_ERROR("timed out waiting for CDclk change\n");
@@ -598,7 +601,8 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
}
static void chv_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
u32 val, cmd = cdclk_state->voltage_level;
@@ -624,11 +628,11 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
mutex_lock(&dev_priv->pcu_lock);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
val &= ~DSPFREQGUAR_MASK_CHV;
val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
50)) {
DRM_ERROR("timed out waiting for CDclk change\n");
@@ -697,7 +701,8 @@ static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
}
static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
u32 val;
@@ -964,7 +969,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
5))
DRM_ERROR("DPLL0 not locked\n");
@@ -978,16 +983,17 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
{
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
- if (intel_wait_for_register(dev_priv,
- LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
- 1))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
+ 1))
DRM_ERROR("Couldn't disable DPLL0\n");
dev_priv->cdclk.hw.vco = 0;
}
static void skl_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
int vco = cdclk_state->vco;
@@ -1123,16 +1129,7 @@ sanitize:
dev_priv->cdclk.hw.vco = -1;
}
-/**
- * skl_init_cdclk - Initialize CDCLK on SKL
- * @dev_priv: i915 device
- *
- * Initialize CDCLK for SKL and derivatives. This is generally
- * done only during the display core initialization sequence,
- * after which the DMC will take care of turning CDCLK off/on
- * as needed.
- */
-void skl_init_cdclk(struct drm_i915_private *dev_priv)
+static void skl_init_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state;
@@ -1158,17 +1155,10 @@ void skl_init_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.cdclk = skl_calc_cdclk(0, cdclk_state.vco);
cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
- skl_set_cdclk(dev_priv, &cdclk_state);
+ skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
-/**
- * skl_uninit_cdclk - Uninitialize CDCLK on SKL
- * @dev_priv: i915 device
- *
- * Uninitialize CDCLK for SKL and derivatives. This is done only
- * during the display core uninitialization sequence.
- */
-void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
@@ -1176,7 +1166,7 @@ void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.vco = 0;
cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
- skl_set_cdclk(dev_priv, &cdclk_state);
+ skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
static int bxt_calc_cdclk(int min_cdclk)
@@ -1323,7 +1313,7 @@ static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
I915_WRITE(BXT_DE_PLL_ENABLE, 0);
/* Timeout 200us */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
1))
DRM_ERROR("timeout waiting for DE PLL unlock\n");
@@ -1344,7 +1334,7 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
/* Timeout 200us */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
BXT_DE_PLL_ENABLE,
BXT_DE_PLL_LOCK,
BXT_DE_PLL_LOCK,
@@ -1355,7 +1345,8 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
}
static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
int vco = cdclk_state->vco;
@@ -1408,11 +1399,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
bxt_de_pll_enable(dev_priv, vco);
val = divider | skl_cdclk_decimal(cdclk);
- /*
- * FIXME if only the cd2x divider needs changing, it could be done
- * without shutting off the pipe (if only one pipe is active).
- */
- val |= BXT_CDCLK_CD2X_PIPE_NONE;
+ if (pipe == INVALID_PIPE)
+ val |= BXT_CDCLK_CD2X_PIPE_NONE;
+ else
+ val |= BXT_CDCLK_CD2X_PIPE(pipe);
/*
* Disable SSA Precharge when CD clock frequency < 500 MHz,
* enable otherwise.
@@ -1421,6 +1411,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
I915_WRITE(CDCLK_CTL, val);
+ if (pipe != INVALID_PIPE)
+ intel_wait_for_vblank(dev_priv, pipe);
+
mutex_lock(&dev_priv->pcu_lock);
/*
* The timeout isn't specified, the 2ms used here is based on
@@ -1490,16 +1483,7 @@ sanitize:
dev_priv->cdclk.hw.vco = -1;
}
-/**
- * bxt_init_cdclk - Initialize CDCLK on BXT
- * @dev_priv: i915 device
- *
- * Initialize CDCLK for BXT and derivatives. This is generally
- * done only during the display core initialization sequence,
- * after which the DMC will take care of turning CDCLK off/on
- * as needed.
- */
-void bxt_init_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state;
@@ -1525,17 +1509,10 @@ void bxt_init_cdclk(struct drm_i915_private *dev_priv)
}
cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
- bxt_set_cdclk(dev_priv, &cdclk_state);
+ bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
-/**
- * bxt_uninit_cdclk - Uninitialize CDCLK on BXT
- * @dev_priv: i915 device
- *
- * Uninitialize CDCLK for BXT and derivatives. This is done only
- * during the display core uninitialization sequence.
- */
-void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
@@ -1543,7 +1520,7 @@ void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.vco = 0;
cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
- bxt_set_cdclk(dev_priv, &cdclk_state);
+ bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
static int cnl_calc_cdclk(int min_cdclk)
@@ -1663,7 +1640,8 @@ static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
}
static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
int vco = cdclk_state->vco;
@@ -1704,13 +1682,15 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
cnl_cdclk_pll_enable(dev_priv, vco);
val = divider | skl_cdclk_decimal(cdclk);
- /*
- * FIXME if only the cd2x divider needs changing, it could be done
- * without shutting off the pipe (if only one pipe is active).
- */
- val |= BXT_CDCLK_CD2X_PIPE_NONE;
+ if (pipe == INVALID_PIPE)
+ val |= BXT_CDCLK_CD2X_PIPE_NONE;
+ else
+ val |= BXT_CDCLK_CD2X_PIPE(pipe);
I915_WRITE(CDCLK_CTL, val);
+ if (pipe != INVALID_PIPE)
+ intel_wait_for_vblank(dev_priv, pipe);
+
/* inform PCU of the change */
mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
@@ -1847,7 +1827,8 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
}
static void icl_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
unsigned int cdclk = cdclk_state->cdclk;
unsigned int vco = cdclk_state->vco;
@@ -1872,6 +1853,11 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
if (dev_priv->cdclk.hw.vco != vco)
cnl_cdclk_pll_enable(dev_priv, vco);
+ /*
+ * On ICL CD2X_DIV can only be 1, so we'll never end up changing the
+ * divider here synchronized to a pipe while CDCLK is on, nor will we
+ * need the corresponding vblank wait.
+ */
I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE |
skl_cdclk_decimal(cdclk));
@@ -1959,16 +1945,7 @@ out:
icl_calc_voltage_level(cdclk_state->cdclk);
}
-/**
- * icl_init_cdclk - Initialize CDCLK on ICL
- * @dev_priv: i915 device
- *
- * Initialize CDCLK for ICL. This consists mainly of initializing
- * dev_priv->cdclk.hw and sanitizing the state of the hardware if needed. This
- * is generally done only during the display core initialization sequence, after
- * which the DMC will take care of turning CDCLK off/on as needed.
- */
-void icl_init_cdclk(struct drm_i915_private *dev_priv)
+static void icl_init_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state sanitized_state;
u32 val;
@@ -2002,17 +1979,10 @@ sanitize:
sanitized_state.voltage_level =
icl_calc_voltage_level(sanitized_state.cdclk);
- icl_set_cdclk(dev_priv, &sanitized_state);
+ icl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE);
}
-/**
- * icl_uninit_cdclk - Uninitialize CDCLK on ICL
- * @dev_priv: i915 device
- *
- * Uninitialize CDCLK for ICL. This is done only during the display core
- * uninitialization sequence.
- */
-void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
@@ -2020,19 +1990,10 @@ void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.vco = 0;
cdclk_state.voltage_level = icl_calc_voltage_level(cdclk_state.cdclk);
- icl_set_cdclk(dev_priv, &cdclk_state);
+ icl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
-/**
- * cnl_init_cdclk - Initialize CDCLK on CNL
- * @dev_priv: i915 device
- *
- * Initialize CDCLK for CNL. This is generally
- * done only during the display core initialization sequence,
- * after which the DMC will take care of turning CDCLK off/on
- * as needed.
- */
-void cnl_init_cdclk(struct drm_i915_private *dev_priv)
+static void cnl_init_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state;
@@ -2048,17 +2009,10 @@ void cnl_init_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.vco = cnl_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
- cnl_set_cdclk(dev_priv, &cdclk_state);
+ cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
-/**
- * cnl_uninit_cdclk - Uninitialize CDCLK on CNL
- * @dev_priv: i915 device
- *
- * Uninitialize CDCLK for CNL. This is done only
- * during the display core uninitialization sequence.
- */
-void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
@@ -2066,7 +2020,47 @@ void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.vco = 0;
cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
- cnl_set_cdclk(dev_priv, &cdclk_state);
+ cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+}
+
+/**
+ * intel_cdclk_init - Initialize CDCLK
+ * @i915: i915 device
+ *
+ * Initialize CDCLK. This consists mainly of initializing dev_priv->cdclk.hw and
+ * sanitizing the state of the hardware if needed. This is generally done only
+ * during the display core initialization sequence, after which the DMC will
+ * take care of turning CDCLK off/on as needed.
+ */
+void intel_cdclk_init(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11)
+ icl_init_cdclk(i915);
+ else if (IS_CANNONLAKE(i915))
+ cnl_init_cdclk(i915);
+ else if (IS_GEN9_BC(i915))
+ skl_init_cdclk(i915);
+ else if (IS_GEN9_LP(i915))
+ bxt_init_cdclk(i915);
+}
+
+/**
+ * intel_cdclk_uninit - Uninitialize CDCLK
+ * @i915: i915 device
+ *
+ * Uninitialize CDCLK. This is done only during the display core
+ * uninitialization sequence.
+ */
+void intel_cdclk_uninit(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11)
+ icl_uninit_cdclk(i915);
+ else if (IS_CANNONLAKE(i915))
+ cnl_uninit_cdclk(i915);
+ else if (IS_GEN9_BC(i915))
+ skl_uninit_cdclk(i915);
+ else if (IS_GEN9_LP(i915))
+ bxt_uninit_cdclk(i915);
}
/**
@@ -2086,6 +2080,28 @@ bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
}
/**
+ * intel_cdclk_needs_cd2x_update - Determine if two CDCLK states require a cd2x divider update
+ * @dev_priv: Not a CDCLK state, it's the drm_i915_private!
+ * @a: first CDCLK state
+ * @b: second CDCLK state
+ *
+ * Returns:
+ * True if the CDCLK states require just a cd2x divider update, false if not.
+ */
+bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b)
+{
+ /* Older hw doesn't have the capability */
+ if (INTEL_GEN(dev_priv) < 10 && !IS_GEN9_LP(dev_priv))
+ return false;
+
+ return a->cdclk != b->cdclk &&
+ a->vco == b->vco &&
+ a->ref == b->ref;
+}
+
+/**
* intel_cdclk_changed - Determine if two CDCLK states are different
* @a: first CDCLK state
* @b: second CDCLK state
@@ -2100,6 +2116,26 @@ bool intel_cdclk_changed(const struct intel_cdclk_state *a,
a->voltage_level != b->voltage_level;
}
+/**
+ * intel_cdclk_swap_state - make atomic CDCLK configuration effective
+ * @state: atomic state
+ *
+ * This is the CDCLK version of drm_atomic_helper_swap_state() since the
+ * helper does not handle driver-specific global state.
+ *
+ * Similarly to the atomic helpers this function does a complete swap,
+ * i.e. it also puts the old state into @state. This is used by the commit
+ * code to determine how CDCLK has changed (for instance did it increase or
+ * decrease).
+ */
+void intel_cdclk_swap_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+
+ swap(state->cdclk.logical, dev_priv->cdclk.logical);
+ swap(state->cdclk.actual, dev_priv->cdclk.actual);
+}
+
void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
const char *context)
{
@@ -2113,12 +2149,14 @@ void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
* intel_set_cdclk - Push the CDCLK state to the hardware
* @dev_priv: i915 device
* @cdclk_state: new CDCLK state
+ * @pipe: pipe with which to synchronize the update
*
* Program the hardware based on the passed in CDCLK state,
* if necessary.
*/
-void intel_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+static void intel_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state))
return;
@@ -2128,7 +2166,7 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
intel_dump_cdclk_state(cdclk_state, "Changing CDCLK to");
- dev_priv->display.set_cdclk(dev_priv, cdclk_state);
+ dev_priv->display.set_cdclk(dev_priv, cdclk_state, pipe);
if (WARN(intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state),
"cdclk state doesn't match!\n")) {
@@ -2137,6 +2175,46 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
}
}
+/**
+ * intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware
+ * @dev_priv: i915 device
+ * @old_state: old CDCLK state
+ * @new_state: new CDCLK state
+ * @pipe: pipe with which to synchronize the update
+ *
+ * Program the hardware before updating the HW plane state based on the passed
+ * in CDCLK state, if necessary.
+ */
+void
+intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *old_state,
+ const struct intel_cdclk_state *new_state,
+ enum pipe pipe)
+{
+ if (pipe == INVALID_PIPE || old_state->cdclk <= new_state->cdclk)
+ intel_set_cdclk(dev_priv, new_state, pipe);
+}
+
+/**
+ * intel_set_cdclk_post_plane_update - Push the CDCLK state to the hardware
+ * @dev_priv: i915 device
+ * @old_state: old CDCLK state
+ * @new_state: new CDCLK state
+ * @pipe: pipe with which to synchronize the update
+ *
+ * Program the hardware after updating the HW plane state based on the passed
+ * in CDCLK state, if necessary.
+ */
+void
+intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *old_state,
+ const struct intel_cdclk_state *new_state,
+ enum pipe pipe)
+{
+ if (pipe != INVALID_PIPE && old_state->cdclk > new_state->cdclk)
+ intel_set_cdclk(dev_priv, new_state, pipe);
+}
+
static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
int pixel_rate)
{
@@ -2187,19 +2265,8 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
/*
* According to BSpec, "The CD clock frequency must be at least twice
* the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
- *
- * FIXME: Check the actual, not default, BCLK being used.
- *
- * FIXME: This does not depend on ->has_audio because the higher CDCLK
- * is required for audio probe, also when there are no audio capable
- * displays connected at probe time. This leads to unnecessarily high
- * CDCLK when audio is not required.
- *
- * FIXME: This limit is only applied when there are displays connected
- * at probe time. If we probe without displays, we'll still end up using
- * the platform minimum CDCLK, failing audio probe.
*/
- if (INTEL_GEN(dev_priv) >= 9)
+ if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
min_cdclk = max(2 * 96000, min_cdclk);
/*
@@ -2239,7 +2306,7 @@ static int intel_compute_min_cdclk(struct drm_atomic_state *state)
intel_state->min_cdclk[i] = min_cdclk;
}
- min_cdclk = 0;
+ min_cdclk = intel_state->cdclk.force_min_cdclk;
for_each_pipe(dev_priv, pipe)
min_cdclk = max(intel_state->min_cdclk[pipe], min_cdclk);
@@ -2300,7 +2367,8 @@ static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state)
vlv_calc_voltage_level(dev_priv, cdclk);
if (!intel_state->active_crtcs) {
- cdclk = vlv_calc_cdclk(dev_priv, 0);
+ cdclk = vlv_calc_cdclk(dev_priv,
+ intel_state->cdclk.force_min_cdclk);
intel_state->cdclk.actual.cdclk = cdclk;
intel_state->cdclk.actual.voltage_level =
@@ -2333,7 +2401,7 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
bdw_calc_voltage_level(cdclk);
if (!intel_state->active_crtcs) {
- cdclk = bdw_calc_cdclk(0);
+ cdclk = bdw_calc_cdclk(intel_state->cdclk.force_min_cdclk);
intel_state->cdclk.actual.cdclk = cdclk;
intel_state->cdclk.actual.voltage_level =
@@ -2405,7 +2473,7 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
skl_calc_voltage_level(cdclk);
if (!intel_state->active_crtcs) {
- cdclk = skl_calc_cdclk(0, vco);
+ cdclk = skl_calc_cdclk(intel_state->cdclk.force_min_cdclk, vco);
intel_state->cdclk.actual.vco = vco;
intel_state->cdclk.actual.cdclk = cdclk;
@@ -2444,10 +2512,10 @@ static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
if (!intel_state->active_crtcs) {
if (IS_GEMINILAKE(dev_priv)) {
- cdclk = glk_calc_cdclk(0);
+ cdclk = glk_calc_cdclk(intel_state->cdclk.force_min_cdclk);
vco = glk_de_pll_vco(dev_priv, cdclk);
} else {
- cdclk = bxt_calc_cdclk(0);
+ cdclk = bxt_calc_cdclk(intel_state->cdclk.force_min_cdclk);
vco = bxt_de_pll_vco(dev_priv, cdclk);
}
@@ -2483,7 +2551,7 @@ static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state)
cnl_compute_min_voltage_level(intel_state));
if (!intel_state->active_crtcs) {
- cdclk = cnl_calc_cdclk(0);
+ cdclk = cnl_calc_cdclk(intel_state->cdclk.force_min_cdclk);
vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
intel_state->cdclk.actual.vco = vco;
@@ -2519,7 +2587,7 @@ static int icl_modeset_calc_cdclk(struct drm_atomic_state *state)
cnl_compute_min_voltage_level(intel_state));
if (!intel_state->active_crtcs) {
- cdclk = icl_calc_cdclk(0, ref);
+ cdclk = icl_calc_cdclk(intel_state->cdclk.force_min_cdclk, ref);
vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
intel_state->cdclk.actual.vco = vco;
@@ -2560,7 +2628,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
*/
void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
{
- if (IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
if (dev_priv->cdclk.hw.ref == 24000)
dev_priv->max_cdclk_freq = 648000;
else
@@ -2668,7 +2736,7 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000,
fraction) - 1);
- if (HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
rawclk |= ICP_RAWCLK_NUM(numerator);
}
@@ -2723,7 +2791,7 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
*/
void intel_update_rawclk(struct drm_i915_private *dev_priv)
{
- if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
else if (HAS_PCH_SPLIT(dev_priv))
dev_priv->rawclk_freq = pch_rawclk(dev_priv);
@@ -2744,18 +2812,13 @@ void intel_update_rawclk(struct drm_i915_private *dev_priv)
*/
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.set_cdclk = chv_set_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- vlv_modeset_calc_cdclk;
- } else if (IS_VALLEYVIEW(dev_priv)) {
- dev_priv->display.set_cdclk = vlv_set_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- vlv_modeset_calc_cdclk;
- } else if (IS_BROADWELL(dev_priv)) {
- dev_priv->display.set_cdclk = bdw_set_cdclk;
+ if (INTEL_GEN(dev_priv) >= 11) {
+ dev_priv->display.set_cdclk = icl_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk;
+ } else if (IS_CANNONLAKE(dev_priv)) {
+ dev_priv->display.set_cdclk = cnl_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
- bdw_modeset_calc_cdclk;
+ cnl_modeset_calc_cdclk;
} else if (IS_GEN9_LP(dev_priv)) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
@@ -2764,23 +2827,28 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.set_cdclk = skl_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
skl_modeset_calc_cdclk;
- } else if (IS_CANNONLAKE(dev_priv)) {
- dev_priv->display.set_cdclk = cnl_set_cdclk;
+ } else if (IS_BROADWELL(dev_priv)) {
+ dev_priv->display.set_cdclk = bdw_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
- cnl_modeset_calc_cdclk;
- } else if (IS_ICELAKE(dev_priv)) {
- dev_priv->display.set_cdclk = icl_set_cdclk;
- dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk;
+ bdw_modeset_calc_cdclk;
+ } else if (IS_CHERRYVIEW(dev_priv)) {
+ dev_priv->display.set_cdclk = chv_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk =
+ vlv_modeset_calc_cdclk;
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ dev_priv->display.set_cdclk = vlv_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk =
+ vlv_modeset_calc_cdclk;
}
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
dev_priv->display.get_cdclk = icl_get_cdclk;
else if (IS_CANNONLAKE(dev_priv))
dev_priv->display.get_cdclk = cnl_get_cdclk;
- else if (IS_GEN9_BC(dev_priv))
- dev_priv->display.get_cdclk = skl_get_cdclk;
else if (IS_GEN9_LP(dev_priv))
dev_priv->display.get_cdclk = bxt_get_cdclk;
+ else if (IS_GEN9_BC(dev_priv))
+ dev_priv->display.get_cdclk = skl_get_cdclk;
else if (IS_BROADWELL(dev_priv))
dev_priv->display.get_cdclk = bdw_get_cdclk;
else if (IS_HASWELL(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_cdclk.h b/drivers/gpu/drm/i915/intel_cdclk.h
new file mode 100644
index 000000000000..4d6f7f5f8930
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_cdclk.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CDCLK_H__
+#define __INTEL_CDCLK_H__
+
+#include <linux/types.h>
+
+#include "intel_display.h"
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_cdclk_state;
+struct intel_crtc_state;
+
+int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
+void intel_cdclk_init(struct drm_i915_private *i915);
+void intel_cdclk_uninit(struct drm_i915_private *i915);
+void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
+void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
+void intel_update_cdclk(struct drm_i915_private *dev_priv);
+void intel_update_rawclk(struct drm_i915_private *dev_priv);
+bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b);
+bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b);
+bool intel_cdclk_changed(const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b);
+void intel_cdclk_swap_state(struct intel_atomic_state *state);
+void
+intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *old_state,
+ const struct intel_cdclk_state *new_state,
+ enum pipe pipe);
+void
+intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *old_state,
+ const struct intel_cdclk_state *new_state,
+ enum pipe pipe);
+void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
+ const char *context);
+
+#endif /* __INTEL_CDCLK_H__ */
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 71a1f12c6b2a..9093daabc290 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -22,6 +22,7 @@
*
*/
+#include "intel_color.h"
#include "intel_drv.h"
#define CTM_COEFF_SIGN (1ULL << 63)
@@ -40,23 +41,6 @@
#define CTM_COEFF_ABS(coeff) ((coeff) & (CTM_COEFF_SIGN - 1))
#define LEGACY_LUT_LENGTH 256
-
-/* Post offset values for RGB->YCBCR conversion */
-#define POSTOFF_RGB_TO_YUV_HI 0x800
-#define POSTOFF_RGB_TO_YUV_ME 0x100
-#define POSTOFF_RGB_TO_YUV_LO 0x800
-
-/*
- * These values are direct register values specified in the Bspec,
- * for RGB->YUV conversion matrix (colorspace BT709)
- */
-#define CSC_RGB_TO_YUV_RU_GU 0x2ba809d8
-#define CSC_RGB_TO_YUV_BU 0x37e80000
-#define CSC_RGB_TO_YUV_RY_GY 0x1e089cc0
-#define CSC_RGB_TO_YUV_BY 0xb5280000
-#define CSC_RGB_TO_YUV_RV_GV 0xbce89ad8
-#define CSC_RGB_TO_YUV_BV 0x1e080000
-
/*
* Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point
* format). This macro takes the coefficient we want transformed and the
@@ -69,10 +53,45 @@
#define ILK_CSC_COEFF_FP(coeff, fbits) \
(clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8)
-#define ILK_CSC_COEFF_LIMITED_RANGE \
- ILK_CSC_COEFF_FP(CTM_COEFF_LIMITED_RANGE, 9)
-#define ILK_CSC_COEFF_1_0 \
- ((7 << 12) | ILK_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
+#define ILK_CSC_COEFF_LIMITED_RANGE 0x0dc0
+#define ILK_CSC_COEFF_1_0 0x7800
+
+#define ILK_CSC_POSTOFF_LIMITED_RANGE (16 * (1 << 12) / 255)
+
+static const u16 ilk_csc_off_zero[3] = {};
+
+static const u16 ilk_csc_coeff_identity[9] = {
+ ILK_CSC_COEFF_1_0, 0, 0,
+ 0, ILK_CSC_COEFF_1_0, 0,
+ 0, 0, ILK_CSC_COEFF_1_0,
+};
+
+static const u16 ilk_csc_postoff_limited_range[3] = {
+ ILK_CSC_POSTOFF_LIMITED_RANGE,
+ ILK_CSC_POSTOFF_LIMITED_RANGE,
+ ILK_CSC_POSTOFF_LIMITED_RANGE,
+};
+
+static const u16 ilk_csc_coeff_limited_range[9] = {
+ ILK_CSC_COEFF_LIMITED_RANGE, 0, 0,
+ 0, ILK_CSC_COEFF_LIMITED_RANGE, 0,
+ 0, 0, ILK_CSC_COEFF_LIMITED_RANGE,
+};
+
+/*
+ * These values are direct register values specified in the Bspec,
+ * for RGB->YUV conversion matrix (colorspace BT709)
+ */
+static const u16 ilk_csc_coeff_rgb_to_ycbcr[9] = {
+ 0x1e08, 0x9cc0, 0xb528,
+ 0x2ba8, 0x09d8, 0x37e8,
+ 0xbce8, 0x9ad8, 0x1e08,
+};
+
+/* Post offset values for RGB->YCBCR conversion */
+static const u16 ilk_csc_postoff_rgb_to_ycbcr[3] = {
+ 0x0800, 0x0100, 0x0800,
+};
static bool lut_is_legacy(const struct drm_property_blob *lut)
{
@@ -113,145 +132,188 @@ static u64 *ctm_mult_by_limited(u64 *result, const u64 *input)
return result;
}
-static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *crtc)
+static void ilk_update_pipe_csc(struct intel_crtc *crtc,
+ const u16 preoff[3],
+ const u16 coeff[9],
+ const u16 postoff[3])
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
- I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
- I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
+ I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), preoff[0]);
+ I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), preoff[1]);
+ I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), preoff[2]);
- I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), CSC_RGB_TO_YUV_RU_GU);
- I915_WRITE(PIPE_CSC_COEFF_BU(pipe), CSC_RGB_TO_YUV_BU);
+ I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]);
+ I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16);
- I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), CSC_RGB_TO_YUV_RY_GY);
- I915_WRITE(PIPE_CSC_COEFF_BY(pipe), CSC_RGB_TO_YUV_BY);
+ I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]);
+ I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16);
- I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), CSC_RGB_TO_YUV_RV_GV);
- I915_WRITE(PIPE_CSC_COEFF_BV(pipe), CSC_RGB_TO_YUV_BV);
+ I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]);
+ I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16);
- I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), POSTOFF_RGB_TO_YUV_HI);
- I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), POSTOFF_RGB_TO_YUV_ME);
- I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), POSTOFF_RGB_TO_YUV_LO);
- I915_WRITE(PIPE_CSC_MODE(pipe), 0);
+ if (INTEL_GEN(dev_priv) >= 7) {
+ I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff[0]);
+ I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff[1]);
+ I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff[2]);
+ }
}
-static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+static void icl_update_output_csc(struct intel_crtc *crtc,
+ const u16 preoff[3],
+ const u16 coeff[9],
+ const u16 postoff[3])
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- bool limited_color_range = false;
enum pipe pipe = crtc->pipe;
- u16 coeffs[9] = {};
- int i;
+
+ I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]);
+ I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]);
+ I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]);
+
+ I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]);
+ I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BY(pipe), coeff[2] << 16);
+
+ I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]);
+ I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BU(pipe), coeff[5] << 16);
+
+ I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]);
+ I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BV(pipe), coeff[8] << 16);
+
+ I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]);
+ I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]);
+ I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]);
+}
+
+static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
/*
* FIXME if there's a gamma LUT after the CSC, we should
* do the range compression using the gamma LUT instead.
*/
- if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
- limited_color_range = crtc_state->limited_color_range;
+ return crtc_state->limited_color_range &&
+ (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
+ IS_GEN_RANGE(dev_priv, 9, 10));
+}
- if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
- crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
- ilk_load_ycbcr_conversion_matrix(crtc);
- return;
- } else if (crtc_state->base.ctm) {
- struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
- const u64 *input;
- u64 temp[9];
+static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
+ u16 coeffs[9])
+{
+ const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
+ const u64 *input;
+ u64 temp[9];
+ int i;
- if (limited_color_range)
- input = ctm_mult_by_limited(temp, ctm->matrix);
- else
- input = ctm->matrix;
+ if (ilk_csc_limited_range(crtc_state))
+ input = ctm_mult_by_limited(temp, ctm->matrix);
+ else
+ input = ctm->matrix;
+
+ /*
+ * Convert fixed point S31.32 input to format supported by the
+ * hardware.
+ */
+ for (i = 0; i < 9; i++) {
+ u64 abs_coeff = ((1ULL << 63) - 1) & input[i];
/*
- * Convert fixed point S31.32 input to format supported by the
+ * Clamp input value to min/max supported by
* hardware.
*/
- for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
- u64 abs_coeff = ((1ULL << 63) - 1) & input[i];
-
- /*
- * Clamp input value to min/max supported by
- * hardware.
- */
- abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
-
- /* sign bit */
- if (CTM_COEFF_NEGATIVE(input[i]))
- coeffs[i] |= 1 << 15;
-
- if (abs_coeff < CTM_COEFF_0_125)
- coeffs[i] |= (3 << 12) |
- ILK_CSC_COEFF_FP(abs_coeff, 12);
- else if (abs_coeff < CTM_COEFF_0_25)
- coeffs[i] |= (2 << 12) |
- ILK_CSC_COEFF_FP(abs_coeff, 11);
- else if (abs_coeff < CTM_COEFF_0_5)
- coeffs[i] |= (1 << 12) |
- ILK_CSC_COEFF_FP(abs_coeff, 10);
- else if (abs_coeff < CTM_COEFF_1_0)
- coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
- else if (abs_coeff < CTM_COEFF_2_0)
- coeffs[i] |= (7 << 12) |
- ILK_CSC_COEFF_FP(abs_coeff, 8);
- else
- coeffs[i] |= (6 << 12) |
- ILK_CSC_COEFF_FP(abs_coeff, 7);
- }
- } else {
- /*
- * Load an identity matrix if no coefficients are provided.
- *
- * TODO: Check what kind of values actually come out of the
- * pipe with these coeff/postoff values and adjust to get the
- * best accuracy. Perhaps we even need to take the bpc value
- * into consideration.
- */
- for (i = 0; i < 3; i++) {
- if (limited_color_range)
- coeffs[i * 3 + i] =
- ILK_CSC_COEFF_LIMITED_RANGE;
- else
- coeffs[i * 3 + i] = ILK_CSC_COEFF_1_0;
- }
+ abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
+
+ coeffs[i] = 0;
+
+ /* sign bit */
+ if (CTM_COEFF_NEGATIVE(input[i]))
+ coeffs[i] |= 1 << 15;
+
+ if (abs_coeff < CTM_COEFF_0_125)
+ coeffs[i] |= (3 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 12);
+ else if (abs_coeff < CTM_COEFF_0_25)
+ coeffs[i] |= (2 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 11);
+ else if (abs_coeff < CTM_COEFF_0_5)
+ coeffs[i] |= (1 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 10);
+ else if (abs_coeff < CTM_COEFF_1_0)
+ coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
+ else if (abs_coeff < CTM_COEFF_2_0)
+ coeffs[i] |= (7 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 8);
+ else
+ coeffs[i] |= (6 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 7);
}
+}
- I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeffs[0] << 16 | coeffs[1]);
- I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeffs[2] << 16);
-
- I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeffs[3] << 16 | coeffs[4]);
- I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeffs[5] << 16);
-
- I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeffs[6] << 16 | coeffs[7]);
- I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeffs[8] << 16);
+static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ bool limited_color_range = ilk_csc_limited_range(crtc_state);
- I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
- I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
- I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
+ if (crtc_state->base.ctm) {
+ u16 coeff[9];
+
+ ilk_csc_convert_ctm(crtc_state, coeff);
+ ilk_update_pipe_csc(crtc, ilk_csc_off_zero, coeff,
+ limited_color_range ?
+ ilk_csc_postoff_limited_range :
+ ilk_csc_off_zero);
+ } else if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) {
+ ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+ ilk_csc_coeff_rgb_to_ycbcr,
+ ilk_csc_postoff_rgb_to_ycbcr);
+ } else if (limited_color_range) {
+ ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+ ilk_csc_coeff_limited_range,
+ ilk_csc_postoff_limited_range);
+ } else if (crtc_state->csc_enable) {
+ /*
+ * On GLK+ both pipe CSC and degamma LUT are controlled
+ * by csc_enable. Hence for the cases where the degama
+ * LUT is needed but CSC is not we need to load an
+ * identity matrix.
+ */
+ WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_GEMINILAKE(dev_priv));
- if (INTEL_GEN(dev_priv) > 6) {
- u16 postoff = 0;
+ ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+ ilk_csc_coeff_identity,
+ ilk_csc_off_zero);
+ }
- if (limited_color_range)
- postoff = (16 * (1 << 12) / 255) & 0x1fff;
+ I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
+}
- I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
- I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
- I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
+static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- I915_WRITE(PIPE_CSC_MODE(pipe), 0);
- } else {
- u32 mode = CSC_MODE_YUV_TO_RGB;
+ if (crtc_state->base.ctm) {
+ u16 coeff[9];
- if (limited_color_range)
- mode |= CSC_BLACK_SCREEN_OFFSET;
+ ilk_csc_convert_ctm(crtc_state, coeff);
+ ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+ coeff, ilk_csc_off_zero);
+ }
- I915_WRITE(PIPE_CSC_MODE(pipe), mode);
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) {
+ icl_update_output_csc(crtc, ilk_csc_off_zero,
+ ilk_csc_coeff_rgb_to_ycbcr,
+ ilk_csc_postoff_rgb_to_ycbcr);
+ } else if (crtc_state->limited_color_range) {
+ icl_update_output_csc(crtc, ilk_csc_off_zero,
+ ilk_csc_coeff_limited_range,
+ ilk_csc_postoff_limited_range);
}
+
+ I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
}
/*
@@ -262,7 +324,6 @@ static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- u32 mode;
if (crtc_state->base.ctm) {
const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
@@ -296,12 +357,30 @@ static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state
I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
}
- mode = (crtc_state->base.ctm ? CGM_PIPE_MODE_CSC : 0);
- if (!crtc_state_is_legacy_gamma(crtc_state)) {
- mode |= (crtc_state->base.degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
- (crtc_state->base.gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
- }
- I915_WRITE(CGM_PIPE_MODE(pipe), mode);
+ I915_WRITE(CGM_PIPE_MODE(pipe), crtc_state->cgm_mode);
+}
+
+/* i965+ "10.6" bit interpolated format "even DW" (low 8 bits) */
+static u32 i965_lut_10p6_ldw(const struct drm_color_lut *color)
+{
+ return (color->red & 0xff) << 16 |
+ (color->green & 0xff) << 8 |
+ (color->blue & 0xff);
+}
+
+/* i965+ "10.6" interpolated format "odd DW" (high 8 bits) */
+static u32 i965_lut_10p6_udw(const struct drm_color_lut *color)
+{
+ return (color->red >> 8) << 16 |
+ (color->green >> 8) << 8 |
+ (color->blue >> 8);
+}
+
+static u32 ilk_lut_10(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->red, 10) << 20 |
+ drm_color_lut_extract(color->green, 10) << 10 |
+ drm_color_lut_extract(color->blue, 10);
}
/* Loads the legacy palette/gamma unit for the CRTC. */
@@ -334,15 +413,6 @@ static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
else
I915_WRITE(LGC_PALETTE(pipe, i), word);
}
- } else {
- for (i = 0; i < 256; i++) {
- u32 word = (i << 16) | (i << 8) | i;
-
- if (HAS_GMCH(dev_priv))
- I915_WRITE(PALETTE(pipe, i), word);
- else
- I915_WRITE(LGC_PALETTE(pipe, i), word);
- }
}
}
@@ -351,6 +421,34 @@ static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut);
}
+static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 val;
+
+ val = I915_READ(PIPECONF(pipe));
+ val &= ~PIPECONF_GAMMA_MODE_MASK_I9XX;
+ val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+ I915_WRITE(PIPECONF(pipe), val);
+}
+
+static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 val;
+
+ val = I915_READ(PIPECONF(pipe));
+ val &= ~PIPECONF_GAMMA_MODE_MASK_ILK;
+ val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+ I915_WRITE(PIPECONF(pipe), val);
+
+ ilk_load_csc_matrix(crtc_state);
+}
+
static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -361,106 +459,219 @@ static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
ilk_load_csc_matrix(crtc_state);
}
-static void bdw_load_degamma_lut(const struct intel_crtc_state *crtc_state)
+static void skl_color_commit(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
- u32 i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
enum pipe pipe = crtc->pipe;
+ u32 val = 0;
- I915_WRITE(PREC_PAL_INDEX(pipe),
- PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT);
+ /*
+ * We don't (yet) allow userspace to control the pipe background color,
+ * so force it to black, but apply pipe gamma and CSC appropriately
+ * so that its handling will match how we program our planes.
+ */
+ if (crtc_state->gamma_enable)
+ val |= SKL_BOTTOM_COLOR_GAMMA_ENABLE;
+ if (crtc_state->csc_enable)
+ val |= SKL_BOTTOM_COLOR_CSC_ENABLE;
+ I915_WRITE(SKL_BOTTOM_COLOR(pipe), val);
- if (degamma_lut) {
- const struct drm_color_lut *lut = degamma_lut->data;
+ I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
- for (i = 0; i < lut_size; i++) {
- u32 word =
- drm_color_lut_extract(lut[i].red, 10) << 20 |
- drm_color_lut_extract(lut[i].green, 10) << 10 |
- drm_color_lut_extract(lut[i].blue, 10);
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_load_csc_matrix(crtc_state);
+ else
+ ilk_load_csc_matrix(crtc_state);
+}
- I915_WRITE(PREC_PAL_DATA(pipe), word);
- }
- } else {
- for (i = 0; i < lut_size; i++) {
- u32 v = (i * ((1 << 10) - 1)) / (lut_size - 1);
+static void i965_load_lut_10p6(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
+ enum pipe pipe = crtc->pipe;
- I915_WRITE(PREC_PAL_DATA(pipe),
- (v << 20) | (v << 10) | v);
- }
+ for (i = 0; i < lut_size - 1; i++) {
+ I915_WRITE(PALETTE(pipe, 2 * i + 0),
+ i965_lut_10p6_ldw(&lut[i]));
+ I915_WRITE(PALETTE(pipe, 2 * i + 1),
+ i965_lut_10p6_udw(&lut[i]));
}
+
+ I915_WRITE(PIPEGCMAX(pipe, 0), lut[i].red);
+ I915_WRITE(PIPEGCMAX(pipe, 1), lut[i].green);
+ I915_WRITE(PIPEGCMAX(pipe, 2), lut[i].blue);
}
-static void bdw_load_gamma_lut(const struct intel_crtc_state *crtc_state, u32 offset)
+static void i965_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- u32 i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ i9xx_load_luts(crtc_state);
+ else
+ i965_load_lut_10p6(crtc, gamma_lut);
+}
+
+static void ilk_load_lut_10(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
- WARN_ON(offset & ~PAL_PREC_INDEX_VALUE_MASK);
+ for (i = 0; i < lut_size; i++)
+ I915_WRITE(PREC_PALETTE(pipe, i), ilk_lut_10(&lut[i]));
+}
- I915_WRITE(PREC_PAL_INDEX(pipe),
- (offset ? PAL_PREC_SPLIT_MODE : 0) |
- PAL_PREC_AUTO_INCREMENT |
- offset);
+static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- if (gamma_lut) {
- const struct drm_color_lut *lut = gamma_lut->data;
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ i9xx_load_luts(crtc_state);
+ else
+ ilk_load_lut_10(crtc, gamma_lut);
+}
- for (i = 0; i < lut_size; i++) {
- u32 word =
- (drm_color_lut_extract(lut[i].red, 10) << 20) |
- (drm_color_lut_extract(lut[i].green, 10) << 10) |
- drm_color_lut_extract(lut[i].blue, 10);
+static int ivb_lut_10_size(u32 prec_index)
+{
+ if (prec_index & PAL_PREC_SPLIT_MODE)
+ return 512;
+ else
+ return 1024;
+}
- I915_WRITE(PREC_PAL_DATA(pipe), word);
- }
+/*
+ * IVB/HSW Bspec / PAL_PREC_INDEX:
+ * "Restriction : Index auto increment mode is not
+ * supported and must not be enabled."
+ */
+static void ivb_load_lut_10(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob,
+ u32 prec_index)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int hw_lut_size = ivb_lut_10_size(prec_index);
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
+ enum pipe pipe = crtc->pipe;
- /* Program the max register to clamp values > 1.0. */
- i = lut_size - 1;
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
- drm_color_lut_extract(lut[i].red, 16));
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
- drm_color_lut_extract(lut[i].green, 16));
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 2),
- drm_color_lut_extract(lut[i].blue, 16));
- } else {
- for (i = 0; i < lut_size; i++) {
- u32 v = (i * ((1 << 10) - 1)) / (lut_size - 1);
+ for (i = 0; i < hw_lut_size; i++) {
+ /* We discard half the user entries in split gamma mode */
+ const struct drm_color_lut *entry =
+ &lut[i * (lut_size - 1) / (hw_lut_size - 1)];
- I915_WRITE(PREC_PAL_DATA(pipe),
- (v << 20) | (v << 10) | v);
- }
+ I915_WRITE(PREC_PAL_INDEX(pipe), prec_index++);
+ I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry));
+ }
+
+ /*
+ * Reset the index, otherwise it prevents the legacy palette to be
+ * written properly.
+ */
+ I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+}
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), (1 << 16) - 1);
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), (1 << 16) - 1);
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), (1 << 16) - 1);
+/* On BDW+ the index auto increment mode actually works */
+static void bdw_load_lut_10(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob,
+ u32 prec_index)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int hw_lut_size = ivb_lut_10_size(prec_index);
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
+ enum pipe pipe = crtc->pipe;
+
+ I915_WRITE(PREC_PAL_INDEX(pipe), prec_index |
+ PAL_PREC_AUTO_INCREMENT);
+
+ for (i = 0; i < hw_lut_size; i++) {
+ /* We discard half the user entries in split gamma mode */
+ const struct drm_color_lut *entry =
+ &lut[i * (lut_size - 1) / (hw_lut_size - 1)];
+
+ I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry));
}
+
+ /*
+ * Reset the index, otherwise it prevents the legacy palette to be
+ * written properly.
+ */
+ I915_WRITE(PREC_PAL_INDEX(pipe), 0);
}
-/* Loads the palette/gamma unit for the CRTC on Broadwell+. */
-static void broadwell_load_luts(const struct intel_crtc_state *crtc_state)
+static void ivb_load_lut_10_max(struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (crtc_state_is_legacy_gamma(crtc_state)) {
+ /* Program the max register to clamp values > 1.0. */
+ I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
+ I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
+ I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
+
+ /*
+ * Program the gc max 2 register to clamp values > 1.0.
+ * ToDo: Extend the ABI to be able to program values
+ * from 3.0 to 7.0
+ */
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 0), 1 << 16);
+ I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 1), 1 << 16);
+ I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 2), 1 << 16);
+ }
+}
+
+static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
i9xx_load_luts(crtc_state);
+ } else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
+ ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_10_max(crtc);
+ ivb_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(512));
} else {
- bdw_load_degamma_lut(crtc_state);
- bdw_load_gamma_lut(crtc_state,
- INTEL_INFO(dev_priv)->color.degamma_lut_size);
+ const struct drm_property_blob *blob = gamma_lut ?: degamma_lut;
- /*
- * Reset the index, otherwise it prevents the legacy palette to be
- * written properly.
- */
- I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+ ivb_load_lut_10(crtc, blob,
+ PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_10_max(crtc);
+ }
+}
+
+static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
+ i9xx_load_luts(crtc_state);
+ } else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
+ bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_10_max(crtc);
+ bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(512));
+ } else {
+ const struct drm_property_blob *blob = gamma_lut ?: degamma_lut;
+
+ bdw_load_lut_10(crtc, blob,
+ PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_10_max(crtc);
}
}
@@ -469,7 +680,8 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- const u32 lut_size = 33;
+ const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+ const struct drm_color_lut *lut = crtc_state->base.degamma_lut->data;
u32 i;
/*
@@ -480,39 +692,95 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
+ for (i = 0; i < lut_size; i++) {
+ /*
+ * First 33 entries represent range from 0 to 1.0
+ * 34th and 35th entry will represent extended range
+ * inputs 3.0 and 7.0 respectively, currently clamped
+ * at 1.0. Since the precision is 16bit, the user
+ * value can be directly filled to register.
+ * The pipe degamma table in GLK+ onwards doesn't
+ * support different values per channel, so this just
+ * programs green value which will be equal to Red and
+ * Blue into the lut registers.
+ * ToDo: Extend to max 7.0. Enable 32 bit input value
+ * as compared to just 16 to achieve this.
+ */
+ I915_WRITE(PRE_CSC_GAMC_DATA(pipe), lut[i].green);
+ }
+
+ /* Clamp values > 1.0. */
+ while (i++ < 35)
+ I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16);
+}
+
+static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+ u32 i;
+
/*
- * FIXME: The pipe degamma table in geminilake doesn't support
- * different values per channel, so this just loads a linear table.
+ * When setting the auto-increment bit, the hardware seems to
+ * ignore the index bits, so we need to reset it to index 0
+ * separately.
*/
+ I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
+ I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
+
for (i = 0; i < lut_size; i++) {
- u32 v = (i * (1 << 16)) / (lut_size - 1);
+ u32 v = (i << 16) / (lut_size - 1);
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v);
}
/* Clamp values > 1.0. */
while (i++ < 35)
- I915_WRITE(PRE_CSC_GAMC_DATA(pipe), (1 << 16));
+ I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16);
}
static void glk_load_luts(const struct intel_crtc_state *crtc_state)
{
+ const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- glk_load_degamma_lut(crtc_state);
+ /*
+ * On GLK+ both pipe CSC and degamma LUT are controlled
+ * by csc_enable. Hence for the cases where the CSC is
+ * needed but degamma LUT is not we need to load a
+ * linear degamma LUT. In fact we'll just always load
+ * the degama LUT so that we don't have to reload
+ * it every time the pipe CSC is being enabled.
+ */
+ if (crtc_state->base.degamma_lut)
+ glk_load_degamma_lut(crtc_state);
+ else
+ glk_load_degamma_lut_linear(crtc_state);
- if (crtc_state_is_legacy_gamma(crtc_state)) {
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
i9xx_load_luts(crtc_state);
} else {
- bdw_load_gamma_lut(crtc_state, 0);
+ bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_10_max(crtc);
+ }
+}
- /*
- * Reset the index, otherwise it prevents the legacy palette to be
- * written properly.
- */
- I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+static void icl_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+ if (crtc_state->base.degamma_lut)
+ glk_load_degamma_lut(crtc_state);
+
+ if ((crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) ==
+ GAMMA_MODE_MODE_8BIT) {
+ i9xx_load_luts(crtc_state);
+ } else {
+ bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_10_max(crtc);
}
}
@@ -527,7 +795,7 @@ static void cherryview_load_luts(const struct intel_crtc_state *crtc_state)
cherryview_load_csc_matrix(crtc_state);
if (crtc_state_is_legacy_gamma(crtc_state)) {
- i9xx_load_luts_internal(crtc_state, gamma_lut);
+ i9xx_load_luts(crtc_state);
return;
}
@@ -566,12 +834,6 @@ static void cherryview_load_luts(const struct intel_crtc_state *crtc_state)
I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 1), word1);
}
}
-
- /*
- * Also program a linear LUT in the legacy block (behind the
- * CGM block).
- */
- i9xx_load_luts_internal(crtc_state, NULL);
}
void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
@@ -585,8 +847,64 @@ void intel_color_commit(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- if (dev_priv->display.color_commit)
- dev_priv->display.color_commit(crtc_state);
+ dev_priv->display.color_commit(crtc_state);
+}
+
+int intel_color_check(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+ return dev_priv->display.color_check(crtc_state);
+}
+
+static bool need_plane_update(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+
+ /*
+ * On pre-SKL the pipe gamma enable and pipe csc enable for
+ * the pipe bottom color are configured via the primary plane.
+ * We have to reconfigure that even if the plane is inactive.
+ */
+ return crtc_state->active_planes & BIT(plane->id) ||
+ (INTEL_GEN(dev_priv) < 9 &&
+ plane->id == PLANE_PRIMARY);
+}
+
+static int
+intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_crtc_state->base.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_plane *plane;
+
+ if (!new_crtc_state->base.active ||
+ drm_atomic_crtc_needs_modeset(&new_crtc_state->base))
+ return 0;
+
+ if (new_crtc_state->gamma_enable == old_crtc_state->gamma_enable &&
+ new_crtc_state->csc_enable == old_crtc_state->csc_enable)
+ return 0;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+
+ if (!need_plane_update(plane, new_crtc_state))
+ continue;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ new_crtc_state->update_planes |= BIT(plane->id);
+ }
+
+ return 0;
}
static int check_lut_size(const struct drm_property_blob *lut, int expected)
@@ -606,7 +924,7 @@ static int check_lut_size(const struct drm_property_blob *lut, int expected)
return 0;
}
-int intel_color_check(struct intel_crtc_state *crtc_state)
+static int check_luts(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
@@ -614,17 +932,19 @@ int intel_color_check(struct intel_crtc_state *crtc_state)
int gamma_length, degamma_length;
u32 gamma_tests, degamma_tests;
+ /* Always allow legacy gamma LUT with no further checking. */
+ if (crtc_state_is_legacy_gamma(crtc_state))
+ return 0;
+
+ /* C8 relies on its palette being stored in the legacy LUT */
+ if (crtc_state->c8_planes)
+ return -EINVAL;
+
degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size;
degamma_tests = INTEL_INFO(dev_priv)->color.degamma_lut_tests;
gamma_tests = INTEL_INFO(dev_priv)->color.gamma_lut_tests;
- /* Always allow legacy gamma LUT with no further checking. */
- if (crtc_state_is_legacy_gamma(crtc_state)) {
- crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
- return 0;
- }
-
if (check_lut_size(degamma_lut, degamma_length) ||
check_lut_size(gamma_lut, gamma_length))
return -EINVAL;
@@ -633,12 +953,270 @@ int intel_color_check(struct intel_crtc_state *crtc_state)
drm_color_lut_check(gamma_lut, gamma_tests))
return -EINVAL;
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- crtc_state->gamma_mode = GAMMA_MODE_MODE_10BIT;
- else if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- crtc_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
+ return 0;
+}
+
+static u32 i9xx_gamma_mode(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable ||
+ crtc_state_is_legacy_gamma(crtc_state))
+ return GAMMA_MODE_MODE_8BIT;
else
- crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
+ return GAMMA_MODE_MODE_10BIT; /* i965+ only */
+}
+
+static int i9xx_color_check(struct intel_crtc_state *crtc_state)
+{
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ crtc_state->gamma_enable =
+ crtc_state->base.gamma_lut &&
+ !crtc_state->c8_planes;
+
+ crtc_state->gamma_mode = i9xx_gamma_mode(crtc_state);
+
+ ret = intel_color_add_affected_planes(crtc_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static u32 chv_cgm_mode(const struct intel_crtc_state *crtc_state)
+{
+ u32 cgm_mode = 0;
+
+ if (crtc_state_is_legacy_gamma(crtc_state))
+ return 0;
+
+ if (crtc_state->base.degamma_lut)
+ cgm_mode |= CGM_PIPE_MODE_DEGAMMA;
+ if (crtc_state->base.ctm)
+ cgm_mode |= CGM_PIPE_MODE_CSC;
+ if (crtc_state->base.gamma_lut)
+ cgm_mode |= CGM_PIPE_MODE_GAMMA;
+
+ return cgm_mode;
+}
+
+/*
+ * CHV color pipeline:
+ * u0.10 -> CGM degamma -> u0.14 -> CGM csc -> u0.14 -> CGM gamma ->
+ * u0.10 -> WGC csc -> u0.10 -> pipe gamma -> u0.10
+ *
+ * We always bypass the WGC csc and use the CGM csc
+ * instead since it has degamma and better precision.
+ */
+static int chv_color_check(struct intel_crtc_state *crtc_state)
+{
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ /*
+ * Pipe gamma will be used only for the legacy LUT.
+ * Otherwise we bypass it and use the CGM gamma instead.
+ */
+ crtc_state->gamma_enable =
+ crtc_state_is_legacy_gamma(crtc_state) &&
+ !crtc_state->c8_planes;
+
+ crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
+
+ crtc_state->cgm_mode = chv_cgm_mode(crtc_state);
+
+ ret = intel_color_add_affected_planes(crtc_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static u32 ilk_gamma_mode(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable ||
+ crtc_state_is_legacy_gamma(crtc_state))
+ return GAMMA_MODE_MODE_8BIT;
+ else
+ return GAMMA_MODE_MODE_10BIT;
+}
+
+static int ilk_color_check(struct intel_crtc_state *crtc_state)
+{
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ crtc_state->gamma_enable =
+ crtc_state->base.gamma_lut &&
+ !crtc_state->c8_planes;
+
+ /*
+ * We don't expose the ctm on ilk/snb currently,
+ * nor do we enable YCbCr output. Also RGB limited
+ * range output is handled by the hw automagically.
+ */
+ crtc_state->csc_enable = false;
+
+ crtc_state->gamma_mode = ilk_gamma_mode(crtc_state);
+
+ crtc_state->csc_mode = 0;
+
+ ret = intel_color_add_affected_planes(crtc_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static u32 ivb_gamma_mode(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable ||
+ crtc_state_is_legacy_gamma(crtc_state))
+ return GAMMA_MODE_MODE_8BIT;
+ else if (crtc_state->base.gamma_lut &&
+ crtc_state->base.degamma_lut)
+ return GAMMA_MODE_MODE_SPLIT;
+ else
+ return GAMMA_MODE_MODE_10BIT;
+}
+
+static u32 ivb_csc_mode(const struct intel_crtc_state *crtc_state)
+{
+ bool limited_color_range = ilk_csc_limited_range(crtc_state);
+
+ /*
+ * CSC comes after the LUT in degamma, RGB->YCbCr,
+ * and RGB full->limited range mode.
+ */
+ if (crtc_state->base.degamma_lut ||
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
+ limited_color_range)
+ return 0;
+
+ return CSC_POSITION_BEFORE_GAMMA;
+}
+
+static int ivb_color_check(struct intel_crtc_state *crtc_state)
+{
+ bool limited_color_range = ilk_csc_limited_range(crtc_state);
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ crtc_state->gamma_enable =
+ (crtc_state->base.gamma_lut ||
+ crtc_state->base.degamma_lut) &&
+ !crtc_state->c8_planes;
+
+ crtc_state->csc_enable =
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
+ crtc_state->base.ctm || limited_color_range;
+
+ crtc_state->gamma_mode = ivb_gamma_mode(crtc_state);
+
+ crtc_state->csc_mode = ivb_csc_mode(crtc_state);
+
+ ret = intel_color_add_affected_planes(crtc_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static u32 glk_gamma_mode(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable ||
+ crtc_state_is_legacy_gamma(crtc_state))
+ return GAMMA_MODE_MODE_8BIT;
+ else
+ return GAMMA_MODE_MODE_10BIT;
+}
+
+static int glk_color_check(struct intel_crtc_state *crtc_state)
+{
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ crtc_state->gamma_enable =
+ crtc_state->base.gamma_lut &&
+ !crtc_state->c8_planes;
+
+ /* On GLK+ degamma LUT is controlled by csc_enable */
+ crtc_state->csc_enable =
+ crtc_state->base.degamma_lut ||
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
+ crtc_state->base.ctm || crtc_state->limited_color_range;
+
+ crtc_state->gamma_mode = glk_gamma_mode(crtc_state);
+
+ crtc_state->csc_mode = 0;
+
+ ret = intel_color_add_affected_planes(crtc_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state)
+{
+ u32 gamma_mode = 0;
+
+ if (crtc_state->base.degamma_lut)
+ gamma_mode |= PRE_CSC_GAMMA_ENABLE;
+
+ if (crtc_state->base.gamma_lut &&
+ !crtc_state->c8_planes)
+ gamma_mode |= POST_CSC_GAMMA_ENABLE;
+
+ if (!crtc_state->base.gamma_lut ||
+ crtc_state_is_legacy_gamma(crtc_state))
+ gamma_mode |= GAMMA_MODE_MODE_8BIT;
+ else
+ gamma_mode |= GAMMA_MODE_MODE_10BIT;
+
+ return gamma_mode;
+}
+
+static u32 icl_csc_mode(const struct intel_crtc_state *crtc_state)
+{
+ u32 csc_mode = 0;
+
+ if (crtc_state->base.ctm)
+ csc_mode |= ICL_CSC_ENABLE;
+
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
+ crtc_state->limited_color_range)
+ csc_mode |= ICL_OUTPUT_CSC_ENABLE;
+
+ return csc_mode;
+}
+
+static int icl_color_check(struct intel_crtc_state *crtc_state)
+{
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ crtc_state->gamma_mode = icl_gamma_mode(crtc_state);
+
+ crtc_state->csc_mode = icl_csc_mode(crtc_state);
return 0;
}
@@ -646,30 +1224,55 @@ int intel_color_check(struct intel_crtc_state *crtc_state)
void intel_color_init(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ bool has_ctm = INTEL_INFO(dev_priv)->color.degamma_lut_size != 0;
drm_mode_crtc_set_gamma_size(&crtc->base, 256);
- if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.load_luts = cherryview_load_luts;
- } else if (IS_HASWELL(dev_priv)) {
- dev_priv->display.load_luts = i9xx_load_luts;
- dev_priv->display.color_commit = hsw_color_commit;
- } else if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv) ||
- IS_BROXTON(dev_priv)) {
- dev_priv->display.load_luts = broadwell_load_luts;
- dev_priv->display.color_commit = hsw_color_commit;
- } else if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
- dev_priv->display.load_luts = glk_load_luts;
- dev_priv->display.color_commit = hsw_color_commit;
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
+ dev_priv->display.color_check = chv_color_check;
+ dev_priv->display.color_commit = i9xx_color_commit;
+ dev_priv->display.load_luts = cherryview_load_luts;
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ dev_priv->display.color_check = i9xx_color_check;
+ dev_priv->display.color_commit = i9xx_color_commit;
+ dev_priv->display.load_luts = i965_load_luts;
+ } else {
+ dev_priv->display.color_check = i9xx_color_check;
+ dev_priv->display.color_commit = i9xx_color_commit;
+ dev_priv->display.load_luts = i9xx_load_luts;
+ }
} else {
- dev_priv->display.load_luts = i9xx_load_luts;
+ if (INTEL_GEN(dev_priv) >= 11)
+ dev_priv->display.color_check = icl_color_check;
+ else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ dev_priv->display.color_check = glk_color_check;
+ else if (INTEL_GEN(dev_priv) >= 7)
+ dev_priv->display.color_check = ivb_color_check;
+ else
+ dev_priv->display.color_check = ilk_color_check;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ dev_priv->display.color_commit = skl_color_commit;
+ else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ dev_priv->display.color_commit = hsw_color_commit;
+ else
+ dev_priv->display.color_commit = ilk_color_commit;
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ dev_priv->display.load_luts = icl_load_luts;
+ else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ dev_priv->display.load_luts = glk_load_luts;
+ else if (INTEL_GEN(dev_priv) >= 8)
+ dev_priv->display.load_luts = bdw_load_luts;
+ else if (INTEL_GEN(dev_priv) >= 7)
+ dev_priv->display.load_luts = ivb_load_luts;
+ else
+ dev_priv->display.load_luts = ilk_load_luts;
}
- /* Enable color management support when we have degamma & gamma LUTs. */
- if (INTEL_INFO(dev_priv)->color.degamma_lut_size != 0 &&
- INTEL_INFO(dev_priv)->color.gamma_lut_size != 0)
- drm_crtc_enable_color_mgmt(&crtc->base,
- INTEL_INFO(dev_priv)->color.degamma_lut_size,
- true,
- INTEL_INFO(dev_priv)->color.gamma_lut_size);
+ drm_crtc_enable_color_mgmt(&crtc->base,
+ INTEL_INFO(dev_priv)->color.degamma_lut_size,
+ has_ctm,
+ INTEL_INFO(dev_priv)->color.gamma_lut_size);
}
diff --git a/drivers/gpu/drm/i915/intel_color.h b/drivers/gpu/drm/i915/intel_color.h
new file mode 100644
index 000000000000..b8a3ce609587
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_color.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_COLOR_H__
+#define __INTEL_COLOR_H__
+
+struct intel_crtc_state;
+struct intel_crtc;
+
+void intel_color_init(struct intel_crtc *crtc);
+int intel_color_check(struct intel_crtc_state *crtc_state);
+void intel_color_commit(const struct intel_crtc_state *crtc_state);
+void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_COLOR_H__ */
diff --git a/drivers/gpu/drm/i915/intel_combo_phy.c b/drivers/gpu/drm/i915/intel_combo_phy.c
index 3d0271cebf99..2bf4359d7e41 100644
--- a/drivers/gpu/drm/i915/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/intel_combo_phy.c
@@ -239,7 +239,8 @@ void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
for_each_combo_port_reverse(dev_priv, port) {
u32 val;
- if (!icl_combo_phy_verify_state(dev_priv, port))
+ if (port == PORT_A &&
+ !icl_combo_phy_verify_state(dev_priv, port))
DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n",
port_name(port));
diff --git a/drivers/gpu/drm/i915/intel_connector.c b/drivers/gpu/drm/i915/intel_connector.c
index ee16758747c5..073b6c3ab7cc 100644
--- a/drivers/gpu/drm/i915/intel_connector.c
+++ b/drivers/gpu/drm/i915/intel_connector.c
@@ -23,12 +23,17 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/slab.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
-#include "intel_drv.h"
+
#include "i915_drv.h"
+#include "intel_connector.h"
+#include "intel_drv.h"
+#include "intel_hdcp.h"
+#include "intel_panel.h"
int intel_connector_init(struct intel_connector *connector)
{
@@ -88,6 +93,8 @@ void intel_connector_destroy(struct drm_connector *connector)
kfree(intel_connector->detect_edid);
+ intel_hdcp_cleanup(intel_connector);
+
if (!IS_ERR_OR_NULL(intel_connector->edid))
kfree(intel_connector->edid);
@@ -265,3 +272,11 @@ intel_attach_aspect_ratio_property(struct drm_connector *connector)
connector->dev->mode_config.aspect_ratio_property,
DRM_MODE_PICTURE_ASPECT_NONE);
}
+
+void
+intel_attach_colorspace_property(struct drm_connector *connector)
+{
+ if (!drm_mode_create_colorspace_property(connector))
+ drm_object_attach_property(&connector->base,
+ connector->colorspace_property, 0);
+}
diff --git a/drivers/gpu/drm/i915/intel_connector.h b/drivers/gpu/drm/i915/intel_connector.h
new file mode 100644
index 000000000000..93a7375c8196
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_connector.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CONNECTOR_H__
+#define __INTEL_CONNECTOR_H__
+
+#include "intel_display.h"
+
+struct drm_connector;
+struct edid;
+struct i2c_adapter;
+struct intel_connector;
+struct intel_encoder;
+
+int intel_connector_init(struct intel_connector *connector);
+struct intel_connector *intel_connector_alloc(void);
+void intel_connector_free(struct intel_connector *connector);
+void intel_connector_destroy(struct drm_connector *connector);
+int intel_connector_register(struct drm_connector *connector);
+void intel_connector_unregister(struct drm_connector *connector);
+void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder);
+bool intel_connector_get_hw_state(struct intel_connector *connector);
+enum pipe intel_connector_get_pipe(struct intel_connector *connector);
+int intel_connector_update_modes(struct drm_connector *connector,
+ struct edid *edid);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
+void intel_attach_force_audio_property(struct drm_connector *connector);
+void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+void intel_attach_aspect_ratio_property(struct drm_connector *connector);
+void intel_attach_colorspace_property(struct drm_connector *connector);
+
+#endif /* __INTEL_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/i915/intel_context.c b/drivers/gpu/drm/i915/intel_context.c
new file mode 100644
index 000000000000..924cc556223a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_context.c
@@ -0,0 +1,270 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_gem_context.h"
+#include "i915_globals.h"
+#include "intel_context.h"
+#include "intel_ringbuffer.h"
+
+static struct i915_global_context {
+ struct i915_global base;
+ struct kmem_cache *slab_ce;
+} global;
+
+struct intel_context *intel_context_alloc(void)
+{
+ return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
+}
+
+void intel_context_free(struct intel_context *ce)
+{
+ kmem_cache_free(global.slab_ce, ce);
+}
+
+struct intel_context *
+intel_context_lookup(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce = NULL;
+ struct rb_node *p;
+
+ spin_lock(&ctx->hw_contexts_lock);
+ p = ctx->hw_contexts.rb_node;
+ while (p) {
+ struct intel_context *this =
+ rb_entry(p, struct intel_context, node);
+
+ if (this->engine == engine) {
+ GEM_BUG_ON(this->gem_context != ctx);
+ ce = this;
+ break;
+ }
+
+ if (this->engine < engine)
+ p = p->rb_right;
+ else
+ p = p->rb_left;
+ }
+ spin_unlock(&ctx->hw_contexts_lock);
+
+ return ce;
+}
+
+struct intel_context *
+__intel_context_insert(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct intel_context *ce)
+{
+ struct rb_node **p, *parent;
+ int err = 0;
+
+ spin_lock(&ctx->hw_contexts_lock);
+
+ parent = NULL;
+ p = &ctx->hw_contexts.rb_node;
+ while (*p) {
+ struct intel_context *this;
+
+ parent = *p;
+ this = rb_entry(parent, struct intel_context, node);
+
+ if (this->engine == engine) {
+ err = -EEXIST;
+ ce = this;
+ break;
+ }
+
+ if (this->engine < engine)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ if (!err) {
+ rb_link_node(&ce->node, parent, p);
+ rb_insert_color(&ce->node, &ctx->hw_contexts);
+ }
+
+ spin_unlock(&ctx->hw_contexts_lock);
+
+ return ce;
+}
+
+void __intel_context_remove(struct intel_context *ce)
+{
+ struct i915_gem_context *ctx = ce->gem_context;
+
+ spin_lock(&ctx->hw_contexts_lock);
+ rb_erase(&ce->node, &ctx->hw_contexts);
+ spin_unlock(&ctx->hw_contexts_lock);
+}
+
+static struct intel_context *
+intel_context_instance(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce, *pos;
+
+ ce = intel_context_lookup(ctx, engine);
+ if (likely(ce))
+ return ce;
+
+ ce = intel_context_alloc();
+ if (!ce)
+ return ERR_PTR(-ENOMEM);
+
+ intel_context_init(ce, ctx, engine);
+
+ pos = __intel_context_insert(ctx, engine, ce);
+ if (unlikely(pos != ce)) /* Beaten! Use their HW context instead */
+ intel_context_free(ce);
+
+ GEM_BUG_ON(intel_context_lookup(ctx, engine) != pos);
+ return pos;
+}
+
+struct intel_context *
+intel_context_pin_lock(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+ __acquires(ce->pin_mutex)
+{
+ struct intel_context *ce;
+
+ ce = intel_context_instance(ctx, engine);
+ if (IS_ERR(ce))
+ return ce;
+
+ if (mutex_lock_interruptible(&ce->pin_mutex))
+ return ERR_PTR(-EINTR);
+
+ return ce;
+}
+
+struct intel_context *
+intel_context_pin(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ int err;
+
+ ce = intel_context_instance(ctx, engine);
+ if (IS_ERR(ce))
+ return ce;
+
+ if (likely(atomic_inc_not_zero(&ce->pin_count)))
+ return ce;
+
+ if (mutex_lock_interruptible(&ce->pin_mutex))
+ return ERR_PTR(-EINTR);
+
+ if (likely(!atomic_read(&ce->pin_count))) {
+ err = ce->ops->pin(ce);
+ if (err)
+ goto err;
+
+ i915_gem_context_get(ctx);
+ GEM_BUG_ON(ce->gem_context != ctx);
+
+ mutex_lock(&ctx->mutex);
+ list_add(&ce->active_link, &ctx->active_engines);
+ mutex_unlock(&ctx->mutex);
+
+ intel_context_get(ce);
+ smp_mb__before_atomic(); /* flush pin before it is visible */
+ }
+
+ atomic_inc(&ce->pin_count);
+ GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
+
+ mutex_unlock(&ce->pin_mutex);
+ return ce;
+
+err:
+ mutex_unlock(&ce->pin_mutex);
+ return ERR_PTR(err);
+}
+
+void intel_context_unpin(struct intel_context *ce)
+{
+ if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
+ return;
+
+ /* We may be called from inside intel_context_pin() to evict another */
+ intel_context_get(ce);
+ mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
+
+ if (likely(atomic_dec_and_test(&ce->pin_count))) {
+ ce->ops->unpin(ce);
+
+ mutex_lock(&ce->gem_context->mutex);
+ list_del(&ce->active_link);
+ mutex_unlock(&ce->gem_context->mutex);
+
+ i915_gem_context_put(ce->gem_context);
+ intel_context_put(ce);
+ }
+
+ mutex_unlock(&ce->pin_mutex);
+ intel_context_put(ce);
+}
+
+static void intel_context_retire(struct i915_active_request *active,
+ struct i915_request *rq)
+{
+ struct intel_context *ce =
+ container_of(active, typeof(*ce), active_tracker);
+
+ intel_context_unpin(ce);
+}
+
+void
+intel_context_init(struct intel_context *ce,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ kref_init(&ce->ref);
+
+ ce->gem_context = ctx;
+ ce->engine = engine;
+ ce->ops = engine->cops;
+ ce->saturated = 0;
+
+ INIT_LIST_HEAD(&ce->signal_link);
+ INIT_LIST_HEAD(&ce->signals);
+
+ mutex_init(&ce->pin_mutex);
+
+ /* Use the whole device by default */
+ ce->sseu = intel_device_default_sseu(ctx->i915);
+
+ i915_active_request_init(&ce->active_tracker,
+ NULL, intel_context_retire);
+}
+
+static void i915_global_context_shrink(void)
+{
+ kmem_cache_shrink(global.slab_ce);
+}
+
+static void i915_global_context_exit(void)
+{
+ kmem_cache_destroy(global.slab_ce);
+}
+
+static struct i915_global_context global = { {
+ .shrink = i915_global_context_shrink,
+ .exit = i915_global_context_exit,
+} };
+
+int __init i915_global_context_init(void)
+{
+ global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
+ if (!global.slab_ce)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_context.h b/drivers/gpu/drm/i915/intel_context.h
new file mode 100644
index 000000000000..ebc861b1a49e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_context.h
@@ -0,0 +1,87 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CONTEXT_H__
+#define __INTEL_CONTEXT_H__
+
+#include <linux/lockdep.h>
+
+#include "intel_context_types.h"
+#include "intel_engine_types.h"
+
+struct intel_context *intel_context_alloc(void);
+void intel_context_free(struct intel_context *ce);
+
+void intel_context_init(struct intel_context *ce,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine);
+
+/**
+ * intel_context_lookup - Find the matching HW context for this (ctx, engine)
+ * @ctx - the parent GEM context
+ * @engine - the target HW engine
+ *
+ * May return NULL if the HW context hasn't been instantiated (i.e. unused).
+ */
+struct intel_context *
+intel_context_lookup(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine);
+
+/**
+ * intel_context_pin_lock - Stablises the 'pinned' status of the HW context
+ * @ctx - the parent GEM context
+ * @engine - the target HW engine
+ *
+ * Acquire a lock on the pinned status of the HW context, such that the context
+ * can neither be bound to the GPU or unbound whilst the lock is held, i.e.
+ * intel_context_is_pinned() remains stable.
+ */
+struct intel_context *
+intel_context_pin_lock(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine);
+
+static inline bool
+intel_context_is_pinned(struct intel_context *ce)
+{
+ return atomic_read(&ce->pin_count);
+}
+
+static inline void intel_context_pin_unlock(struct intel_context *ce)
+__releases(ce->pin_mutex)
+{
+ mutex_unlock(&ce->pin_mutex);
+}
+
+struct intel_context *
+__intel_context_insert(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct intel_context *ce);
+void
+__intel_context_remove(struct intel_context *ce);
+
+struct intel_context *
+intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
+
+static inline void __intel_context_pin(struct intel_context *ce)
+{
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
+ atomic_inc(&ce->pin_count);
+}
+
+void intel_context_unpin(struct intel_context *ce);
+
+static inline struct intel_context *intel_context_get(struct intel_context *ce)
+{
+ kref_get(&ce->ref);
+ return ce;
+}
+
+static inline void intel_context_put(struct intel_context *ce)
+{
+ kref_put(&ce->ref, ce->ops->destroy);
+}
+
+#endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/intel_context_types.h b/drivers/gpu/drm/i915/intel_context_types.h
new file mode 100644
index 000000000000..339c7437fe82
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_context_types.h
@@ -0,0 +1,77 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CONTEXT_TYPES__
+#define __INTEL_CONTEXT_TYPES__
+
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/types.h>
+
+#include "i915_active_types.h"
+#include "intel_engine_types.h"
+
+struct i915_gem_context;
+struct i915_vma;
+struct intel_context;
+struct intel_ring;
+
+struct intel_context_ops {
+ int (*pin)(struct intel_context *ce);
+ void (*unpin)(struct intel_context *ce);
+
+ void (*reset)(struct intel_context *ce);
+ void (*destroy)(struct kref *kref);
+};
+
+/*
+ * Powergating configuration for a particular (context,engine).
+ */
+struct intel_sseu {
+ u8 slice_mask;
+ u8 subslice_mask;
+ u8 min_eus_per_subslice;
+ u8 max_eus_per_subslice;
+};
+
+struct intel_context {
+ struct kref ref;
+
+ struct i915_gem_context *gem_context;
+ struct intel_engine_cs *engine;
+ struct intel_engine_cs *active;
+
+ struct list_head active_link;
+ struct list_head signal_link;
+ struct list_head signals;
+
+ struct i915_vma *state;
+ struct intel_ring *ring;
+
+ u32 *lrc_reg_state;
+ u64 lrc_desc;
+
+ atomic_t pin_count;
+ struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
+
+ intel_engine_mask_t saturated; /* submitting semaphores too late? */
+
+ /**
+ * active_tracker: Active tracker for the external rq activity
+ * on this intel_context object.
+ */
+ struct i915_active_request active_tracker;
+
+ const struct intel_context_ops *ops;
+ struct rb_node node;
+
+ /** sseu: Control eu/slice partitioning */
+ struct intel_sseu sseu;
+};
+
+#endif /* __INTEL_CONTEXT_TYPES__ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 3716b2ee362f..b665c370111b 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -27,13 +27,18 @@
#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/slab.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
-#include "intel_drv.h"
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "intel_connector.h"
+#include "intel_crt.h"
+#include "intel_ddi.h"
+#include "intel_drv.h"
/* Here's the desired hotplug mode */
#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
@@ -435,7 +440,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
crt->adpa_reg,
ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
1000))
@@ -489,7 +494,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
crt->adpa_reg,
ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
1000)) {
@@ -542,7 +547,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
CRT_HOTPLUG_FORCE_DETECT,
CRT_HOTPLUG_FORCE_DETECT);
/* wait for FORCE_DETECT to go off */
- if (intel_wait_for_register(dev_priv, PORT_HOTPLUG_EN,
+ if (intel_wait_for_register(&dev_priv->uncore, PORT_HOTPLUG_EN,
CRT_HOTPLUG_FORCE_DETECT, 0,
1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
diff --git a/drivers/gpu/drm/i915/intel_crt.h b/drivers/gpu/drm/i915/intel_crt.h
new file mode 100644
index 000000000000..1b3fba359efc
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_crt.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CRT_H__
+#define __INTEL_CRT_H__
+
+#include "i915_reg.h"
+
+enum pipe;
+struct drm_encoder;
+struct drm_i915_private;
+struct drm_i915_private;
+
+bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t adpa_reg, enum pipe *pipe);
+void intel_crt_init(struct drm_i915_private *dev_priv);
+void intel_crt_reset(struct drm_encoder *encoder);
+
+#endif /* __INTEL_CRT_H__ */
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index e8ac04c33e29..f43c2a2563a5 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -21,9 +21,12 @@
* IN THE SOFTWARE.
*
*/
+
#include <linux/firmware.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
+#include "intel_csr.h"
/**
* DOC: csr support for dmc
@@ -486,7 +489,7 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) >= 12) {
/* Allow to load fw via parameter using the last known size */
csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
- } else if (IS_ICELAKE(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 11)) {
csr->fw_path = ICL_CSR_PATH;
csr->required_version = ICL_CSR_VERSION_REQUIRED;
csr->max_fw_size = ICL_CSR_MAX_FW_SIZE;
diff --git a/drivers/gpu/drm/i915/intel_csr.h b/drivers/gpu/drm/i915/intel_csr.h
new file mode 100644
index 000000000000..17a32c1e8a35
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_csr.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CSR_H__
+#define __INTEL_CSR_H__
+
+struct drm_i915_private;
+
+void intel_csr_ucode_init(struct drm_i915_private *i915);
+void intel_csr_load_program(struct drm_i915_private *i915);
+void intel_csr_ucode_fini(struct drm_i915_private *i915);
+void intel_csr_ucode_suspend(struct drm_i915_private *i915);
+void intel_csr_ucode_resume(struct drm_i915_private *i915);
+
+#endif /* __INTEL_CSR_H__ */
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 98cea1f4b3bf..f181c26f62fd 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -26,9 +26,19 @@
*/
#include <drm/drm_scdc_helper.h>
+
#include "i915_drv.h"
+#include "intel_audio.h"
+#include "intel_connector.h"
+#include "intel_ddi.h"
+#include "intel_dp.h"
#include "intel_drv.h"
#include "intel_dsi.h"
+#include "intel_hdcp.h"
+#include "intel_hdmi.h"
+#include "intel_lspcon.h"
+#include "intel_panel.h"
+#include "intel_psr.h"
struct ddi_buf_trans {
u32 trans1; /* balance leg enable, de-emph level */
@@ -851,7 +861,7 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
- if (IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
if (intel_port_is_combophy(dev_priv, port))
icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI,
0, &n_entries);
@@ -1240,24 +1250,15 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
return (refclk * n * 100) / (p * r);
}
-static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- enum intel_dpll_id pll_id)
+static int skl_calc_wrpll_link(const struct intel_dpll_hw_state *pll_state)
{
- i915_reg_t cfgcr1_reg, cfgcr2_reg;
- u32 cfgcr1_val, cfgcr2_val;
u32 p0, p1, p2, dco_freq;
- cfgcr1_reg = DPLL_CFGCR1(pll_id);
- cfgcr2_reg = DPLL_CFGCR2(pll_id);
-
- cfgcr1_val = I915_READ(cfgcr1_reg);
- cfgcr2_val = I915_READ(cfgcr2_reg);
-
- p0 = cfgcr2_val & DPLL_CFGCR2_PDIV_MASK;
- p2 = cfgcr2_val & DPLL_CFGCR2_KDIV_MASK;
+ p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
+ p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
- if (cfgcr2_val & DPLL_CFGCR2_QDIV_MODE(1))
- p1 = (cfgcr2_val & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
+ if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
+ p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
else
p1 = 1;
@@ -1292,10 +1293,11 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
break;
}
- dco_freq = (cfgcr1_val & DPLL_CFGCR1_DCO_INTEGER_MASK) * 24 * 1000;
+ dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK)
+ * 24 * 1000;
- dco_freq += (((cfgcr1_val & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * 24 *
- 1000) / 0x8000;
+ dco_freq += (((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9)
+ * 24 * 1000) / 0x8000;
if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
return 0;
@@ -1304,24 +1306,15 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
}
int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- enum intel_dpll_id pll_id)
+ struct intel_dpll_hw_state *pll_state)
{
- u32 cfgcr0, cfgcr1;
u32 p0, p1, p2, dco_freq, ref_clock;
- if (INTEL_GEN(dev_priv) >= 11) {
- cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
- cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
- } else {
- cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
- cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id));
- }
-
- p0 = cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
- p2 = cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
+ p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
+ p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
- if (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
- p1 = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
+ if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
+ p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
DPLL_CFGCR1_QDIV_RATIO_SHIFT;
else
p1 = 1;
@@ -1349,16 +1342,17 @@ int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
case DPLL_CFGCR1_KDIV_2:
p2 = 2;
break;
- case DPLL_CFGCR1_KDIV_4:
- p2 = 4;
+ case DPLL_CFGCR1_KDIV_3:
+ p2 = 3;
break;
}
ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
- dco_freq = (cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock;
+ dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK)
+ * ref_clock;
- dco_freq += (((cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
+ dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
@@ -1390,25 +1384,21 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
}
static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
- enum port port)
+ const struct intel_dpll_hw_state *pll_state)
{
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- u32 mg_pll_div0, mg_clktop_hsclkctl;
- u32 m1, m2_int, m2_frac, div1, div2, refclk;
+ u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
u64 tmp;
- refclk = dev_priv->cdclk.hw.ref;
-
- mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
- mg_clktop_hsclkctl = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
+ ref_clock = dev_priv->cdclk.hw.ref;
- m1 = I915_READ(MG_PLL_DIV1(tc_port)) & MG_PLL_DIV1_FBPREDIV_MASK;
- m2_int = mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
- m2_frac = (mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ?
- (mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >>
- MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0;
+ m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
+ m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
+ m2_frac = (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ?
+ (pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >>
+ MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0;
- switch (mg_clktop_hsclkctl & MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
+ switch (pll_state->mg_clktop2_hsclkctl &
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
div1 = 2;
break;
@@ -1422,12 +1412,14 @@ static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
div1 = 7;
break;
default:
- MISSING_CASE(mg_clktop_hsclkctl);
+ MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
return 0;
}
- div2 = (mg_clktop_hsclkctl & MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
+ div2 = (pll_state->mg_clktop2_hsclkctl &
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
+
/* div2 value of 0 is same as 1 means no div */
if (div2 == 0)
div2 = 1;
@@ -1436,8 +1428,8 @@ static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
* Adjust the original formula to delay the division by 2^22 in order to
* minimize possible rounding errors.
*/
- tmp = (u64)m1 * m2_int * refclk +
- (((u64)m1 * m2_frac * refclk) >> 22);
+ tmp = (u64)m1 * m2_int * ref_clock +
+ (((u64)m1 * m2_frac * ref_clock) >> 22);
tmp = div_u64(tmp, 5 * div1 * div2);
return tmp;
@@ -1471,25 +1463,24 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
enum port port = encoder->port;
- int link_clock = 0;
- u32 pll_id;
+ int link_clock;
- pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
if (intel_port_is_combophy(dev_priv, port)) {
- if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
- link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
- else
- link_clock = icl_calc_dp_combo_pll_link(dev_priv,
- pll_id);
+ link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
} else {
+ enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv,
+ pipe_config->shared_dpll);
+
if (pll_id == DPLL_ID_ICL_TBTPLL)
link_clock = icl_calc_tbt_pll_link(dev_priv, port);
else
- link_clock = icl_calc_mg_pll_link(dev_priv, port);
+ link_clock = icl_calc_mg_pll_link(dev_priv, pll_state);
}
pipe_config->port_clock = link_clock;
+
ddi_dotclock_get(pipe_config);
}
@@ -1497,18 +1488,13 @@ static void cnl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int link_clock = 0;
- u32 cfgcr0;
- enum intel_dpll_id pll_id;
-
- pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
+ struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
+ int link_clock;
- cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
-
- if (cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
- link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+ if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
+ link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
} else {
- link_clock = cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK;
+ link_clock = pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK;
switch (link_clock) {
case DPLL_CFGCR0_LINK_RATE_810:
@@ -1548,22 +1534,20 @@ static void cnl_ddi_clock_get(struct intel_encoder *encoder,
}
static void skl_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int link_clock = 0;
- u32 dpll_ctl1;
- enum intel_dpll_id pll_id;
-
- pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
+ struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
+ int link_clock;
- dpll_ctl1 = I915_READ(DPLL_CTRL1);
-
- if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(pll_id)) {
- link_clock = skl_calc_wrpll_link(dev_priv, pll_id);
+ /*
+ * ctrl1 register is already shifted for each pll, just use 0 to get
+ * the internal shift for each field
+ */
+ if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0)) {
+ link_clock = skl_calc_wrpll_link(pll_state);
} else {
- link_clock = dpll_ctl1 & DPLL_CTRL1_LINK_RATE_MASK(pll_id);
- link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(pll_id);
+ link_clock = pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0);
+ link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(0);
switch (link_clock) {
case DPLL_CTRL1_LINK_RATE_810:
@@ -1643,24 +1627,17 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
ddi_dotclock_get(pipe_config);
}
-static int bxt_calc_pll_link(struct intel_crtc_state *crtc_state)
+static int bxt_calc_pll_link(const struct intel_dpll_hw_state *pll_state)
{
- struct intel_dpll_hw_state *state;
struct dpll clock;
- /* For DDI ports we always use a shared PLL. */
- if (WARN_ON(!crtc_state->shared_dpll))
- return 0;
-
- state = &crtc_state->dpll_hw_state;
-
clock.m1 = 2;
- clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22;
- if (state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
- clock.m2 |= state->pll2 & PORT_PLL_M2_FRAC_MASK;
- clock.n = (state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
- clock.p1 = (state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
- clock.p2 = (state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
+ clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
+ if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+ clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
+ clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
+ clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
+ clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
return chv_calc_dpll_params(100000, &clock);
}
@@ -1668,7 +1645,8 @@ static int bxt_calc_pll_link(struct intel_crtc_state *crtc_state)
static void bxt_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- pipe_config->port_clock = bxt_calc_pll_link(pipe_config);
+ pipe_config->port_clock =
+ bxt_calc_pll_link(&pipe_config->dpll_hw_state);
ddi_dotclock_get(pipe_config);
}
@@ -1678,7 +1656,7 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_clock_get(encoder, pipe_config);
else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_clock_get(encoder, pipe_config);
@@ -1911,7 +1889,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
goto out;
}
- if (port == PORT_A)
+ if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
cpu_transcoder = TRANSCODER_EDP;
else
cpu_transcoder = (enum transcoder) pipe;
@@ -1973,7 +1951,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (!(tmp & DDI_BUF_CTL_ENABLE))
goto out;
- if (port == PORT_A) {
+ if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
@@ -2224,7 +2202,7 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
enum port port = encoder->port;
int n_entries;
- if (IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
if (intel_port_is_combophy(dev_priv, port))
icl_get_combo_buf_trans(dev_priv, port, encoder->type,
intel_dp->link_rate, &n_entries);
@@ -2316,13 +2294,13 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overrite individual loadgen */
for (ln = 0; ln < 4; ln++) {
- val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
+ val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
- I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
+ I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val);
}
/* Program PORT_TX_DW5 */
@@ -2378,14 +2356,14 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
+ val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port));
val &= ~LOADGEN_SELECT;
if ((rate <= 600000 && width == 4 && ln >= 1) ||
(rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
val |= LOADGEN_SELECT;
}
- I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
+ I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val);
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
@@ -2447,13 +2425,13 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overwrite individual loadgen. */
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln));
+ val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
- I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val);
}
/* Program PORT_TX_DW7 */
@@ -2504,14 +2482,14 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln));
+ val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port));
val &= ~LOADGEN_SELECT;
if ((rate <= 600000 && width == 4 && ln >= 1) ||
(rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
val |= LOADGEN_SELECT;
}
- I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val);
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
@@ -2554,33 +2532,33 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_LINK_PARAMS(port, ln));
+ val = I915_READ(MG_TX1_LINK_PARAMS(ln, port));
val &= ~CRI_USE_FS32;
- I915_WRITE(MG_TX1_LINK_PARAMS(port, ln), val);
+ I915_WRITE(MG_TX1_LINK_PARAMS(ln, port), val);
- val = I915_READ(MG_TX2_LINK_PARAMS(port, ln));
+ val = I915_READ(MG_TX2_LINK_PARAMS(ln, port));
val &= ~CRI_USE_FS32;
- I915_WRITE(MG_TX2_LINK_PARAMS(port, ln), val);
+ I915_WRITE(MG_TX2_LINK_PARAMS(ln, port), val);
}
/* Program MG_TX_SWINGCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_SWINGCTRL(port, ln));
+ val = I915_READ(MG_TX1_SWINGCTRL(ln, port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
- I915_WRITE(MG_TX1_SWINGCTRL(port, ln), val);
+ I915_WRITE(MG_TX1_SWINGCTRL(ln, port), val);
- val = I915_READ(MG_TX2_SWINGCTRL(port, ln));
+ val = I915_READ(MG_TX2_SWINGCTRL(ln, port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
- I915_WRITE(MG_TX2_SWINGCTRL(port, ln), val);
+ I915_WRITE(MG_TX2_SWINGCTRL(ln, port), val);
}
/* Program MG_TX_DRVCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_DRVCTRL(port, ln));
+ val = I915_READ(MG_TX1_DRVCTRL(ln, port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2588,9 +2566,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
- I915_WRITE(MG_TX1_DRVCTRL(port, ln), val);
+ I915_WRITE(MG_TX1_DRVCTRL(ln, port), val);
- val = I915_READ(MG_TX2_DRVCTRL(port, ln));
+ val = I915_READ(MG_TX2_DRVCTRL(ln, port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2598,7 +2576,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
- I915_WRITE(MG_TX2_DRVCTRL(port, ln), val);
+ I915_WRITE(MG_TX2_DRVCTRL(ln, port), val);
/* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */
}
@@ -2609,17 +2587,17 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* values from table for which TX1 and TX2 enabled.
*/
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_CLKHUB(port, ln));
+ val = I915_READ(MG_CLKHUB(ln, port));
if (link_clock < 300000)
val |= CFG_LOW_RATE_LKREN_EN;
else
val &= ~CFG_LOW_RATE_LKREN_EN;
- I915_WRITE(MG_CLKHUB(port, ln), val);
+ I915_WRITE(MG_CLKHUB(ln, port), val);
}
/* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_DCC(port, ln));
+ val = I915_READ(MG_TX1_DCC(ln, port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2627,9 +2605,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
- I915_WRITE(MG_TX1_DCC(port, ln), val);
+ I915_WRITE(MG_TX1_DCC(ln, port), val);
- val = I915_READ(MG_TX2_DCC(port, ln));
+ val = I915_READ(MG_TX2_DCC(ln, port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2637,18 +2615,18 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
- I915_WRITE(MG_TX2_DCC(port, ln), val);
+ I915_WRITE(MG_TX2_DCC(ln, port), val);
}
/* Program MG_TX_PISO_READLOAD with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_PISO_READLOAD(port, ln));
+ val = I915_READ(MG_TX1_PISO_READLOAD(ln, port));
val |= CRI_CALCINIT;
- I915_WRITE(MG_TX1_PISO_READLOAD(port, ln), val);
+ I915_WRITE(MG_TX1_PISO_READLOAD(ln, port), val);
- val = I915_READ(MG_TX2_PISO_READLOAD(port, ln));
+ val = I915_READ(MG_TX2_PISO_READLOAD(ln, port));
val |= CRI_CALCINIT;
- I915_WRITE(MG_TX2_PISO_READLOAD(port, ln), val);
+ I915_WRITE(MG_TX2_PISO_READLOAD(ln, port), val);
}
}
@@ -2697,7 +2675,7 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &dport->base;
int level = intel_ddi_dp_level(intel_dp);
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
level, encoder->type);
else if (IS_CANNONLAKE(dev_priv))
@@ -2866,7 +2844,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
mutex_lock(&dev_priv->dpll_lock);
- if (IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
if (!intel_port_is_combophy(dev_priv, port))
I915_WRITE(DDI_CLK_SEL(port),
icl_pll_to_ddi_clk_sel(encoder, crtc_state));
@@ -2908,7 +2886,7 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- if (IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
if (!intel_port_is_combophy(dev_priv, port))
I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
} else if (IS_CANNONLAKE(dev_priv)) {
@@ -2927,21 +2905,20 @@ static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
u32 val;
- int i;
+ int ln;
if (tc_port == PORT_TC_NONE)
return;
- for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
- val = I915_READ(mg_regs[i]);
+ for (ln = 0; ln < 2; ln++) {
+ val = I915_READ(MG_DP_MODE(ln, port));
val |= MG_DP_MODE_CFG_TR2PWR_GATING |
MG_DP_MODE_CFG_TRPWR_GATING |
MG_DP_MODE_CFG_CLNPWR_GATING |
MG_DP_MODE_CFG_DIGPWR_GATING |
MG_DP_MODE_CFG_GAONPWR_GATING;
- I915_WRITE(mg_regs[i], val);
+ I915_WRITE(MG_DP_MODE(ln, port), val);
}
val = I915_READ(MG_MISC_SUS0(tc_port));
@@ -2960,21 +2937,20 @@ static void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
u32 val;
- int i;
+ int ln;
if (tc_port == PORT_TC_NONE)
return;
- for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
- val = I915_READ(mg_regs[i]);
+ for (ln = 0; ln < 2; ln++) {
+ val = I915_READ(MG_DP_MODE(ln, port));
val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
MG_DP_MODE_CFG_TRPWR_GATING |
MG_DP_MODE_CFG_CLNPWR_GATING |
MG_DP_MODE_CFG_DIGPWR_GATING |
MG_DP_MODE_CFG_GAONPWR_GATING);
- I915_WRITE(mg_regs[i], val);
+ I915_WRITE(MG_DP_MODE(ln, port), val);
}
val = I915_READ(MG_MISC_SUS0(tc_port));
@@ -2998,8 +2974,8 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
return;
- ln0 = I915_READ(MG_DP_MODE(port, 0));
- ln1 = I915_READ(MG_DP_MODE(port, 1));
+ ln0 = I915_READ(MG_DP_MODE(0, port));
+ ln1 = I915_READ(MG_DP_MODE(1, port));
switch (intel_dig_port->tc_type) {
case TC_PORT_TYPEC:
@@ -3049,8 +3025,8 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
return;
}
- I915_WRITE(MG_DP_MODE(port, 0), ln0);
- I915_WRITE(MG_DP_MODE(port, 1), ln1);
+ I915_WRITE(MG_DP_MODE(0, port), ln0);
+ I915_WRITE(MG_DP_MODE(1, port), ln1);
}
static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
@@ -3077,7 +3053,7 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
val |= DP_TP_CTL_FEC_ENABLE;
I915_WRITE(DP_TP_CTL(port), val);
- if (intel_wait_for_register(dev_priv, DP_TP_STATUS(port),
+ if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
DP_TP_STATUS_FEC_ENABLE_LIVE,
DP_TP_STATUS_FEC_ENABLE_LIVE,
1))
@@ -3125,7 +3101,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
icl_program_mg_dp_mode(dig_port);
icl_disable_phy_clock_gating(dig_port);
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
level, encoder->type);
else if (IS_CANNONLAKE(dev_priv))
@@ -3174,7 +3150,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
icl_program_mg_dp_mode(dig_port);
icl_disable_phy_clock_gating(dig_port);
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
level, INTEL_OUTPUT_HDMI);
else if (IS_CANNONLAKE(dev_priv))
@@ -3555,7 +3531,9 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- intel_psr_enable(intel_dp, crtc_state);
+ intel_ddi_set_pipe_settings(crtc_state);
+
+ intel_psr_update(intel_dp, crtc_state);
intel_edp_drrs_enable(intel_dp, crtc_state);
intel_panel_update_backlight(encoder, crtc_state, conn_state);
@@ -3710,7 +3688,7 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state)
{
- if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
+ if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 1;
else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 2;
@@ -3763,7 +3741,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->has_hdmi_sink = true;
intel_dig_port = enc_to_dig_port(&encoder->base);
- if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
+ pipe_config->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, pipe_config);
+
+ if (pipe_config->infoframes.enable)
pipe_config->has_infoframe = true;
if (temp & TRANS_DDI_HDMI_SCRAMBLING)
@@ -3827,6 +3808,18 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+
+ intel_hdmi_read_gcp_infoframe(encoder, pipe_config);
+
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &pipe_config->infoframes.avi);
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &pipe_config->infoframes.spd);
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &pipe_config->infoframes.hdmi);
}
static enum intel_output_type
@@ -3855,7 +3848,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
enum port port = encoder->port;
int ret;
- if (port == PORT_A)
+ if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
pipe_config->cpu_transcoder = TRANSCODER_EDP;
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
@@ -3959,15 +3952,7 @@ static int modeset_pipe(struct drm_crtc *crtc,
goto out;
}
- crtc_state->mode_changed = true;
-
- ret = drm_atomic_add_affected_connectors(state, crtc);
- if (ret)
- goto out;
-
- ret = drm_atomic_add_affected_planes(state, crtc);
- if (ret)
- goto out;
+ crtc_state->connectors_changed = true;
ret = drm_atomic_commit(state);
out:
diff --git a/drivers/gpu/drm/i915/intel_ddi.h b/drivers/gpu/drm/i915/intel_ddi.h
new file mode 100644
index 000000000000..9cf69175942e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ddi.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DDI_H__
+#define __INTEL_DDI_H__
+
+#include <drm/i915_drm.h>
+
+#include "intel_display.h"
+
+struct drm_connector_state;
+struct drm_i915_private;
+struct intel_connector;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_dp;
+struct intel_dpll_hw_state;
+struct intel_encoder;
+
+void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state);
+void hsw_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state);
+void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
+void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
+void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
+void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
+void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
+void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
+void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
+bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+void intel_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config);
+void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
+ bool state);
+void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
+ struct intel_crtc_state *crtc_state);
+u32 bxt_signal_levels(struct intel_dp *intel_dp);
+u32 ddi_signal_levels(struct intel_dp *intel_dp);
+u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
+u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
+ u8 voltage_swing);
+int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
+ bool enable);
+void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
+int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+ struct intel_dpll_hw_state *state);
+
+#endif /* __INTEL_DDI_H__ */
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 855a5074ad77..6af480b95bc6 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -57,6 +57,7 @@ static const char * const platform_names[] = {
PLATFORM_NAME(COFFEELAKE),
PLATFORM_NAME(CANNONLAKE),
PLATFORM_NAME(ICELAKE),
+ PLATFORM_NAME(ELKHARTLAKE),
};
#undef PLATFORM_NAME
@@ -155,9 +156,15 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
u8 eu_en;
int s;
- sseu->max_slices = 1;
- sseu->max_subslices = 8;
- sseu->max_eus_per_subslice = 8;
+ if (IS_ELKHARTLAKE(dev_priv)) {
+ sseu->max_slices = 1;
+ sseu->max_subslices = 4;
+ sseu->max_eus_per_subslice = 8;
+ } else {
+ sseu->max_slices = 1;
+ sseu->max_subslices = 8;
+ sseu->max_eus_per_subslice = 8;
+ }
s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
@@ -707,6 +714,99 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
return 0;
}
+#undef INTEL_VGA_DEVICE
+#define INTEL_VGA_DEVICE(id, info) (id)
+
+static const u16 subplatform_ult_ids[] = {
+ INTEL_HSW_ULT_GT1_IDS(0),
+ INTEL_HSW_ULT_GT2_IDS(0),
+ INTEL_HSW_ULT_GT3_IDS(0),
+ INTEL_BDW_ULT_GT1_IDS(0),
+ INTEL_BDW_ULT_GT2_IDS(0),
+ INTEL_BDW_ULT_GT3_IDS(0),
+ INTEL_BDW_ULT_RSVD_IDS(0),
+ INTEL_SKL_ULT_GT1_IDS(0),
+ INTEL_SKL_ULT_GT2_IDS(0),
+ INTEL_SKL_ULT_GT3_IDS(0),
+ INTEL_KBL_ULT_GT1_IDS(0),
+ INTEL_KBL_ULT_GT2_IDS(0),
+ INTEL_KBL_ULT_GT3_IDS(0),
+ INTEL_CFL_U_GT2_IDS(0),
+ INTEL_CFL_U_GT3_IDS(0),
+ INTEL_WHL_U_GT1_IDS(0),
+ INTEL_WHL_U_GT2_IDS(0),
+ INTEL_WHL_U_GT3_IDS(0)
+};
+
+static const u16 subplatform_ulx_ids[] = {
+ INTEL_HSW_ULX_GT1_IDS(0),
+ INTEL_HSW_ULX_GT2_IDS(0),
+ INTEL_BDW_ULX_GT1_IDS(0),
+ INTEL_BDW_ULX_GT2_IDS(0),
+ INTEL_BDW_ULX_GT3_IDS(0),
+ INTEL_BDW_ULX_RSVD_IDS(0),
+ INTEL_SKL_ULX_GT1_IDS(0),
+ INTEL_SKL_ULX_GT2_IDS(0),
+ INTEL_KBL_ULX_GT1_IDS(0),
+ INTEL_KBL_ULX_GT2_IDS(0)
+};
+
+static const u16 subplatform_aml_ids[] = {
+ INTEL_AML_KBL_GT2_IDS(0),
+ INTEL_AML_CFL_GT2_IDS(0)
+};
+
+static const u16 subplatform_portf_ids[] = {
+ INTEL_CNL_PORT_F_IDS(0),
+ INTEL_ICL_PORT_F_IDS(0)
+};
+
+static bool find_devid(u16 id, const u16 *p, unsigned int num)
+{
+ for (; num; num--, p++) {
+ if (*p == id)
+ return true;
+ }
+
+ return false;
+}
+
+void intel_device_info_subplatform_init(struct drm_i915_private *i915)
+{
+ const struct intel_device_info *info = INTEL_INFO(i915);
+ const struct intel_runtime_info *rinfo = RUNTIME_INFO(i915);
+ const unsigned int pi = __platform_mask_index(rinfo, info->platform);
+ const unsigned int pb = __platform_mask_bit(rinfo, info->platform);
+ u16 devid = INTEL_DEVID(i915);
+ u32 mask = 0;
+
+ /* Make sure IS_<platform> checks are working. */
+ RUNTIME_INFO(i915)->platform_mask[pi] = BIT(pb);
+
+ /* Find and mark subplatform bits based on the PCI device id. */
+ if (find_devid(devid, subplatform_ult_ids,
+ ARRAY_SIZE(subplatform_ult_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_ULT);
+ } else if (find_devid(devid, subplatform_ulx_ids,
+ ARRAY_SIZE(subplatform_ulx_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_ULX);
+ if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ /* ULX machines are also considered ULT. */
+ mask |= BIT(INTEL_SUBPLATFORM_ULT);
+ }
+ } else if (find_devid(devid, subplatform_aml_ids,
+ ARRAY_SIZE(subplatform_aml_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_AML);
+ } else if (find_devid(devid, subplatform_portf_ids,
+ ARRAY_SIZE(subplatform_portf_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_PORTF);
+ }
+
+ GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_BITS);
+
+ RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
+}
+
/**
* intel_device_info_runtime_init - initialize runtime info
* @dev_priv: the i915 device
@@ -738,9 +838,9 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
runtime->num_scalers[PIPE_C] = 1;
}
- BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t));
+ BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
- if (IS_GEN(dev_priv, 11))
+ if (INTEL_GEN(dev_priv) >= 11)
for_each_pipe(dev_priv, pipe)
runtime->num_sprites[pipe] = 6;
else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
@@ -844,7 +944,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
DRM_INFO("Disabling ppGTT for VT-d support\n");
- info->ppgtt = INTEL_PPGTT_NONE;
+ info->ppgtt_type = INTEL_PPGTT_NONE;
}
/* Initialize command stream timestamp frequency */
@@ -871,23 +971,24 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
unsigned int logical_vdbox = 0;
unsigned int i;
u32 media_fuse;
+ u16 vdbox_mask;
+ u16 vebox_mask;
if (INTEL_GEN(dev_priv) < 11)
return;
media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
- RUNTIME_INFO(dev_priv)->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
- RUNTIME_INFO(dev_priv)->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
- GEN11_GT_VEBOX_DISABLE_SHIFT;
+ vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
+ vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
+ GEN11_GT_VEBOX_DISABLE_SHIFT;
- DRM_DEBUG_DRIVER("vdbox enable: %04x\n", RUNTIME_INFO(dev_priv)->vdbox_enable);
for (i = 0; i < I915_MAX_VCS; i++) {
if (!HAS_ENGINE(dev_priv, _VCS(i)))
continue;
- if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vdbox_enable)) {
- info->ring_mask &= ~ENGINE_MASK(_VCS(i));
+ if (!(BIT(i) & vdbox_mask)) {
+ info->engine_mask &= ~BIT(_VCS(i));
DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
continue;
}
@@ -899,15 +1000,20 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
if (logical_vdbox++ % 2 == 0)
RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
}
+ DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
+ vdbox_mask, VDBOX_MASK(dev_priv));
+ GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
- DRM_DEBUG_DRIVER("vebox enable: %04x\n", RUNTIME_INFO(dev_priv)->vebox_enable);
for (i = 0; i < I915_MAX_VECS; i++) {
if (!HAS_ENGINE(dev_priv, _VECS(i)))
continue;
- if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vebox_enable)) {
- info->ring_mask &= ~ENGINE_MASK(_VECS(i));
+ if (!(BIT(i) & vebox_mask)) {
+ info->engine_mask &= ~BIT(_VECS(i));
DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
}
}
+ DRM_DEBUG_DRIVER("vebox enable: %04x, instances: %04lx\n",
+ vebox_mask, VEBOX_MASK(dev_priv));
+ GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv));
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index e8b8661df746..0e579f158016 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -27,6 +27,7 @@
#include <uapi/drm/i915_drm.h>
+#include "intel_engine_types.h"
#include "intel_display.h"
struct drm_printer;
@@ -73,14 +74,29 @@ enum intel_platform {
INTEL_CANNONLAKE,
/* gen11 */
INTEL_ICELAKE,
+ INTEL_ELKHARTLAKE,
INTEL_MAX_PLATFORMS
};
-enum intel_ppgtt {
+/*
+ * Subplatform bits share the same namespace per parent platform. In other words
+ * it is fine for the same bit to be used on multiple parent platforms.
+ */
+
+#define INTEL_SUBPLATFORM_BITS (3)
+
+/* HSW/BDW/SKL/KBL/CFL */
+#define INTEL_SUBPLATFORM_ULT (0)
+#define INTEL_SUBPLATFORM_ULX (1)
+#define INTEL_SUBPLATFORM_AML (2)
+
+/* CNL/ICL */
+#define INTEL_SUBPLATFORM_PORTF (0)
+
+enum intel_ppgtt_type {
INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING,
INTEL_PPGTT_FULL = I915_GEM_PPGTT_FULL,
- INTEL_PPGTT_FULL_4LVL,
};
#define DEV_INFO_FOR_EACH_FLAG(func) \
@@ -150,19 +166,18 @@ struct sseu_dev_info {
u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES];
};
-typedef u8 intel_ring_mask_t;
-
struct intel_device_info {
u16 gen_mask;
u8 gen;
u8 gt; /* GT number, 0 if undefined */
- intel_ring_mask_t ring_mask; /* Rings supported by the HW */
+ intel_engine_mask_t engine_mask; /* Engines supported by the HW */
enum intel_platform platform;
- u32 platform_mask;
- enum intel_ppgtt ppgtt;
+ enum intel_ppgtt_type ppgtt_type;
+ unsigned int ppgtt_size; /* log2, e.g. 31/32/48 bits */
+
unsigned int page_sizes; /* page sizes supported by the HW */
u32 display_mmio_offset;
@@ -195,22 +210,28 @@ struct intel_device_info {
};
struct intel_runtime_info {
+ /*
+ * Platform mask is used for optimizing or-ed IS_PLATFORM calls into
+ * into single runtime conditionals, and also to provide groundwork
+ * for future per platform, or per SKU build optimizations.
+ *
+ * Array can be extended when necessary if the corresponding
+ * BUILD_BUG_ON is hit.
+ */
+ u32 platform_mask[2];
+
u16 device_id;
u8 num_sprites[I915_MAX_PIPES];
u8 num_scalers[I915_MAX_PIPES];
- u8 num_rings;
+ u8 num_engines;
/* Slice/subslice/EU info */
struct sseu_dev_info sseu;
u32 cs_timestamp_frequency_khz;
- /* Enabled (not fused off) media engine bitmasks. */
- u8 vdbox_enable;
- u8 vebox_enable;
-
/* Media engine access to SFC per instance */
u8 vdbox_sfc_access;
};
@@ -269,6 +290,7 @@ static inline void sseu_set_eus(struct sseu_dev_info *sseu,
const char *intel_platform_name(enum intel_platform platform);
+void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv);
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
void intel_device_info_dump_flags(const struct intel_device_info *info,
struct drm_printer *p);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 421aac80a838..5098228f1302 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -46,19 +46,29 @@
#include "i915_drv.h"
#include "i915_gem_clflush.h"
+#include "i915_reset.h"
#include "i915_trace.h"
+#include "intel_atomic_plane.h"
+#include "intel_color.h"
+#include "intel_cdclk.h"
+#include "intel_crt.h"
+#include "intel_ddi.h"
+#include "intel_dp.h"
#include "intel_drv.h"
#include "intel_dsi.h"
+#include "intel_dvo.h"
+#include "intel_fbc.h"
+#include "intel_fbdev.h"
#include "intel_frontbuffer.h"
-
-#include "intel_drv.h"
-#include "intel_dsi.h"
-#include "intel_frontbuffer.h"
-
-#include "i915_drv.h"
-#include "i915_gem_clflush.h"
-#include "i915_reset.h"
-#include "i915_trace.h"
+#include "intel_hdcp.h"
+#include "intel_hdmi.h"
+#include "intel_lvds.h"
+#include "intel_pipe_crc.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
+#include "intel_sdvo.h"
+#include "intel_sprite.h"
+#include "intel_tv.h"
/* Primary plane formats for gen <= 3 */
static const u32 i8xx_primary_formats[] = {
@@ -115,8 +125,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
-static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
-static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
+static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
+static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
static void intel_crtc_init_scalers(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
@@ -467,10 +477,11 @@ static const struct intel_limit intel_limits_bxt = {
};
static void
-skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
+skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
{
if (enable)
I915_WRITE(CLKGATE_DIS_PSL(pipe),
+ I915_READ(CLKGATE_DIS_PSL(pipe)) |
DUPS1_GATING_DIS | DUPS2_GATING_DIS);
else
I915_WRITE(CLKGATE_DIS_PSL(pipe),
@@ -595,7 +606,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
const struct intel_crtc_state *crtc_state,
int target)
{
- struct drm_device *dev = crtc_state->base.crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
@@ -603,7 +614,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
* We haven't figured out how to reliably set up different
* single/dual channel state, if we even can.
*/
- if (intel_is_dual_link_lvds(dev))
+ if (intel_is_dual_link_lvds(dev_priv))
return limit->p2.p2_fast;
else
return limit->p2.p2_slow;
@@ -951,14 +962,15 @@ chv_find_best_dpll(const struct intel_limit *limit,
return found;
}
-bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock)
{
int refclk = 100000;
const struct intel_limit *limit = &intel_limits_bxt;
return chv_find_best_dpll(limit, crtc_state,
- target_clock, refclk, NULL, best_clock);
+ crtc_state->port_clock, refclk,
+ NULL, best_clock);
}
bool intel_crtc_active(struct intel_crtc *crtc)
@@ -1039,7 +1051,7 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
i915_reg_t reg = PIPECONF(cpu_transcoder);
/* Wait for the Pipe State to go off */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
reg, I965_PIPECONF_ACTIVE, 0,
100))
WARN(1, "pipe_off wait timed out\n");
@@ -1345,7 +1357,7 @@ static void _vlv_enable_pll(struct intel_crtc *crtc,
POSTING_READ(DPLL(pipe));
udelay(150);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
DPLL(pipe),
DPLL_LOCK_VLV,
DPLL_LOCK_VLV,
@@ -1398,7 +1410,7 @@ static void _chv_enable_pll(struct intel_crtc *crtc,
I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
/* Check PLL is locked */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1))
DRM_ERROR("PLL %d failed to lock\n", pipe);
@@ -1441,17 +1453,12 @@ static void chv_enable_pll(struct intel_crtc *crtc,
}
}
-static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
+static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
{
- struct intel_crtc *crtc;
- int count = 0;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- count += crtc->base.state->active &&
- intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
- }
+ if (IS_I830(dev_priv))
+ return false;
- return count;
+ return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
}
static void i9xx_enable_pll(struct intel_crtc *crtc,
@@ -1465,29 +1472,15 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
assert_pipe_disabled(dev_priv, crtc->pipe);
/* PLL is protected by panel, make sure we can write it */
- if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
+ if (i9xx_has_pps(dev_priv))
assert_panel_unlocked(dev_priv, crtc->pipe);
- /* Enable DVO 2x clock on both PLLs if necessary */
- if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
- /*
- * It appears to be important that we don't enable this
- * for the current pipe before otherwise configuring the
- * PLL. No idea how this should be handled if multiple
- * DVO outputs are enabled simultaneosly.
- */
- dpll |= DPLL_DVO_2X_MODE;
- I915_WRITE(DPLL(!crtc->pipe),
- I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
- }
-
/*
* Apparently we need to have VGA mode enabled prior to changing
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
* dividers, even though the register value does change.
*/
- I915_WRITE(reg, 0);
-
+ I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
I915_WRITE(reg, dpll);
/* Wait for the clocks to stabilize. */
@@ -1520,16 +1513,6 @@ static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- /* Disable DVO 2x clock on both PLLs if necessary */
- if (IS_I830(dev_priv) &&
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) &&
- !intel_num_dvo_pipes(dev_priv)) {
- I915_WRITE(DPLL(PIPE_B),
- I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
- I915_WRITE(DPLL(PIPE_A),
- I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
- }
-
/* Don't disable pipe or pipe PLLs if needed */
if (IS_I830(dev_priv))
return;
@@ -1608,7 +1591,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
BUG();
}
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
dpll_reg, port_mask, expected_mask,
1000))
WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
@@ -1658,17 +1641,18 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
}
val &= ~TRANS_INTERLACE_MASK;
- if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
+ if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
if (HAS_PCH_IBX(dev_priv) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
val |= TRANS_LEGACY_INTERLACED_ILK;
else
val |= TRANS_INTERLACED;
- else
+ } else {
val |= TRANS_PROGRESSIVE;
+ }
I915_WRITE(reg, val | TRANS_ENABLE);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
100))
DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
@@ -1698,7 +1682,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val |= TRANS_PROGRESSIVE;
I915_WRITE(LPT_TRANSCONF, val);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
LPT_TRANSCONF,
TRANS_STATE_ENABLE,
TRANS_STATE_ENABLE,
@@ -1724,7 +1708,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
val &= ~TRANS_ENABLE;
I915_WRITE(reg, val);
/* wait for PCH transcoder off, transcoder state */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
reg, TRANS_STATE_ENABLE, 0,
50))
DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
@@ -1746,7 +1730,7 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
val &= ~TRANS_ENABLE;
I915_WRITE(LPT_TRANSCONF, val);
/* wait for PCH transcoder off, transcoder state */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
50))
DRM_ERROR("Failed to disable PCH transcoder\n");
@@ -1830,6 +1814,8 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
/* FIXME: assert CPU port conditions for SNB+ */
}
+ trace_intel_pipe_enable(dev_priv, pipe);
+
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
if (val & PIPECONF_ENABLE) {
@@ -1869,6 +1855,8 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
*/
assert_planes_disabled(crtc);
+ trace_intel_pipe_disable(dev_priv, pipe);
+
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
if ((val & PIPECONF_ENABLE) == 0)
@@ -2677,6 +2665,24 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
return DRM_FORMAT_RGB565;
case PLANE_CTL_FORMAT_NV12:
return DRM_FORMAT_NV12;
+ case PLANE_CTL_FORMAT_P010:
+ return DRM_FORMAT_P010;
+ case PLANE_CTL_FORMAT_P012:
+ return DRM_FORMAT_P012;
+ case PLANE_CTL_FORMAT_P016:
+ return DRM_FORMAT_P016;
+ case PLANE_CTL_FORMAT_Y210:
+ return DRM_FORMAT_Y210;
+ case PLANE_CTL_FORMAT_Y212:
+ return DRM_FORMAT_Y212;
+ case PLANE_CTL_FORMAT_Y216:
+ return DRM_FORMAT_Y216;
+ case PLANE_CTL_FORMAT_Y410:
+ return DRM_FORMAT_XVYU2101010;
+ case PLANE_CTL_FORMAT_Y412:
+ return DRM_FORMAT_XVYU12_16161616;
+ case PLANE_CTL_FORMAT_Y416:
+ return DRM_FORMAT_XVYU16161616;
default:
case PLANE_CTL_FORMAT_XRGB_8888:
if (rgb_order) {
@@ -2695,6 +2701,18 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
return DRM_FORMAT_XBGR2101010;
else
return DRM_FORMAT_XRGB2101010;
+ case PLANE_CTL_FORMAT_XRGB_16161616F:
+ if (rgb_order) {
+ if (alpha)
+ return DRM_FORMAT_ABGR16161616F;
+ else
+ return DRM_FORMAT_XBGR16161616F;
+ } else {
+ if (alpha)
+ return DRM_FORMAT_ARGB16161616F;
+ else
+ return DRM_FORMAT_XRGB16161616F;
+ }
}
}
@@ -2825,8 +2843,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
if (plane->id == PLANE_PRIMARY)
intel_pre_disable_primary_noatomic(&crtc->base);
- trace_intel_disable_plane(&plane->base, crtc);
- plane->disable_plane(plane, crtc_state);
+ intel_disable_plane(plane, crtc_state);
}
static void
@@ -3176,7 +3193,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
* Handle the AUX surface first since
* the main surface setup depends on it.
*/
- if (fb->format->format == DRM_FORMAT_NV12) {
+ if (is_planar_yuv_format(fb->format->format)) {
ret = skl_check_nv12_aux_surface(plane_state);
if (ret)
return ret;
@@ -3230,9 +3247,10 @@ static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dspcntr = 0;
- dspcntr |= DISPPLANE_GAMMA_ENABLE;
+ if (crtc_state->gamma_enable)
+ dspcntr |= DISPPLANE_GAMMA_ENABLE;
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (crtc_state->csc_enable)
dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
if (INTEL_GEN(dev_priv) < 5)
@@ -3459,7 +3477,7 @@ static void i9xx_disable_plane(struct intel_plane *plane,
*
* On pre-g4x there is no way to gamma correct the
* pipe bottom color but we'll keep on doing this
- * anyway.
+ * anyway so that the crtc state readout works correctly.
*/
dspcntr = i9xx_plane_ctl_crtc(crtc_state);
@@ -3590,6 +3608,12 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
return PLANE_CTL_FORMAT_XRGB_2101010;
case DRM_FORMAT_XBGR2101010:
return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ return PLANE_CTL_FORMAT_XRGB_16161616F;
case DRM_FORMAT_YUYV:
return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
case DRM_FORMAT_YVYU:
@@ -3600,6 +3624,24 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
case DRM_FORMAT_NV12:
return PLANE_CTL_FORMAT_NV12;
+ case DRM_FORMAT_P010:
+ return PLANE_CTL_FORMAT_P010;
+ case DRM_FORMAT_P012:
+ return PLANE_CTL_FORMAT_P012;
+ case DRM_FORMAT_P016:
+ return PLANE_CTL_FORMAT_P016;
+ case DRM_FORMAT_Y210:
+ return PLANE_CTL_FORMAT_Y210;
+ case DRM_FORMAT_Y212:
+ return PLANE_CTL_FORMAT_Y212;
+ case DRM_FORMAT_Y216:
+ return PLANE_CTL_FORMAT_Y216;
+ case DRM_FORMAT_XVYU2101010:
+ return PLANE_CTL_FORMAT_Y410;
+ case DRM_FORMAT_XVYU12_16161616:
+ return PLANE_CTL_FORMAT_Y412;
+ case DRM_FORMAT_XVYU16161616:
+ return PLANE_CTL_FORMAT_Y416;
default:
MISSING_CASE(pixel_format);
}
@@ -3710,8 +3752,11 @@ u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return plane_ctl;
- plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
- plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
+ if (crtc_state->gamma_enable)
+ plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
return plane_ctl;
}
@@ -3763,8 +3808,11 @@ u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
if (INTEL_GEN(dev_priv) >= 11)
return plane_color_ctl;
- plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
- plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
+ if (crtc_state->gamma_enable)
+ plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
return plane_color_ctl;
}
@@ -3772,6 +3820,8 @@ u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
u32 plane_color_ctl = 0;
@@ -3779,7 +3829,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
- if (fb->format->is_yuv && !icl_is_hdr_plane(plane)) {
+ if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
else
@@ -3921,9 +3971,6 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
* The display has been reset as well,
* so need a full re-initialization.
*/
- intel_runtime_pm_disable_interrupts(dev_priv);
- intel_runtime_pm_enable_interrupts(dev_priv);
-
intel_pps_unlock_regs_wa(dev_priv);
intel_modeset_init_hw(dev);
intel_init_clock_gating(dev_priv);
@@ -3963,13 +4010,13 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
* and rounding for per-pixel values 00 and 0xff
*/
tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
-
/*
- * W/A for underruns with linear/X-tiled with
- * WM1+ disabled.
+ * Display WA # 1605353570: icl
+ * Set the pixel rounding bit to 1 for allowing
+ * passthrough of Frame buffer pixels unmodified
+ * across pipe
*/
- tmp |= PM_FILL_MAINTAIN_DBUF_FULLNESS;
-
+ tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
I915_WRITE(PIPE_CHICKEN(pipe), tmp);
}
@@ -4008,16 +4055,6 @@ static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_sta
ironlake_pfit_disable(old_crtc_state);
}
- /*
- * We don't (yet) allow userspace to control the pipe background color,
- * so force it to black, but apply pipe gamma and CSC so that its
- * handling will match how we program our planes.
- */
- if (INTEL_GEN(dev_priv) >= 9)
- I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
- SKL_BOTTOM_COLOR_GAMMA_ENABLE |
- SKL_BOTTOM_COLOR_CSC_ENABLE);
-
if (INTEL_GEN(dev_priv) >= 11)
icl_set_pipe_chicken(crtc);
}
@@ -5036,19 +5073,19 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return 0;
}
- if (format && format->format == DRM_FORMAT_NV12 &&
+ if (format && is_planar_yuv_format(format->format) &&
(src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
- DRM_DEBUG_KMS("NV12: src dimensions not met\n");
+ DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
return -EINVAL;
}
/* range checks */
if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
- (IS_GEN(dev_priv, 11) &&
+ (INTEL_GEN(dev_priv) >= 11 &&
(src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
- (!IS_GEN(dev_priv, 11) &&
+ (INTEL_GEN(dev_priv) < 11 &&
(src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
@@ -5105,14 +5142,15 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
{
struct intel_plane *intel_plane =
to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
struct drm_framebuffer *fb = plane_state->base.fb;
int ret;
bool force_detach = !fb || !plane_state->base.visible;
bool need_scaler = false;
/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
- if (!icl_is_hdr_plane(intel_plane) &&
- fb && fb->format->format == DRM_FORMAT_NV12)
+ if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
+ fb && is_planar_yuv_format(fb->format->format))
need_scaler = true;
ret = skl_update_scaler(crtc_state, force_detach,
@@ -5144,11 +5182,24 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV12:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU2101010:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
break;
default:
DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
@@ -5259,7 +5310,7 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
* and don't wait for vblanks until the end of crtc_enable, then
* the HW state readout code will complain that the expected
* IPS_CTL value is not the one we read. */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
IPS_CTL, IPS_ENABLE, IPS_ENABLE,
50))
DRM_ERROR("Timed out waiting for IPS enable\n");
@@ -5284,7 +5335,7 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
* 42ms timeout value leads to occasional timeouts so use 100ms
* instead.
*/
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
IPS_CTL, IPS_ENABLE, 0,
100))
DRM_ERROR("Timed out waiting for IPS disable\n");
@@ -5490,7 +5541,7 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
/* Display WA 827 */
if (needs_nv12_wa(dev_priv, old_crtc_state) &&
!needs_nv12_wa(dev_priv, pipe_config)) {
- skl_wa_clkgate(dev_priv, crtc->pipe, false);
+ skl_wa_827(dev_priv, crtc->pipe, false);
}
}
@@ -5529,7 +5580,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
/* Display WA 827 */
if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
needs_nv12_wa(dev_priv, pipe_config)) {
- skl_wa_clkgate(dev_priv, crtc->pipe, true);
+ skl_wa_827(dev_priv, crtc->pipe, true);
}
/*
@@ -5603,7 +5654,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
!(update_mask & BIT(plane->id)))
continue;
- plane->disable_plane(plane, new_crtc_state);
+ intel_disable_plane(plane, new_crtc_state);
if (old_plane_state->base.visible)
fb_bits |= plane->frontbuffer_bit;
@@ -5754,6 +5805,14 @@ static void intel_encoders_update_pipe(struct drm_crtc *crtc,
}
}
+static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+
+ plane->disable_plane(plane, crtc_state);
+}
+
static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_atomic_state *old_state)
{
@@ -5819,6 +5878,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
*/
intel_color_load_luts(pipe_config);
intel_color_commit(pipe_config);
+ /* update DSPCNTR to configure gamma for pipe bottom color */
+ intel_disable_primary_plane(pipe_config);
if (dev_priv->display.initial_watermarks != NULL)
dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
@@ -5947,6 +6008,9 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
*/
intel_color_load_luts(pipe_config);
intel_color_commit(pipe_config);
+ /* update DSPCNTR to configure gamma/csc for pipe bottom color */
+ if (INTEL_GEN(dev_priv) < 9)
+ intel_disable_primary_plane(pipe_config);
if (INTEL_GEN(dev_priv) >= 11)
icl_set_pipe_chicken(intel_crtc);
@@ -6127,7 +6191,10 @@ bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
if (port == PORT_NONE)
return false;
- if (IS_ICELAKE(dev_priv))
+ if (IS_ELKHARTLAKE(dev_priv))
+ return port <= PORT_C;
+
+ if (INTEL_GEN(dev_priv) >= 11)
return port <= PORT_B;
return false;
@@ -6135,7 +6202,7 @@ bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
{
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
return port >= PORT_C && port <= PORT_F;
return false;
@@ -6304,6 +6371,8 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(pipe_config);
intel_color_commit(pipe_config);
+ /* update DSPCNTR to configure gamma for pipe bottom color */
+ intel_disable_primary_plane(pipe_config);
dev_priv->display.initial_watermarks(old_intel_state,
pipe_config);
@@ -6361,6 +6430,8 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(pipe_config);
intel_color_commit(pipe_config);
+ /* update DSPCNTR to configure gamma for pipe bottom color */
+ intel_disable_primary_plane(pipe_config);
if (dev_priv->display.initial_watermarks != NULL)
dev_priv->display.initial_watermarks(old_intel_state,
@@ -6743,7 +6814,13 @@ static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
if (!hsw_crtc_state_ips_capable(crtc_state))
return false;
- if (crtc_state->ips_force_disable)
+ /*
+ * When IPS gets enabled, the pipe CRC changes. Since IPS gets
+ * enabled and disabled dynamically based on package C states,
+ * user space can't make reliable use of the CRCs, so let's just
+ * completely disable it.
+ */
+ if (crtc_state->crc_enabled)
return false;
/* IPS should be fine as long as at least one plane is enabled. */
@@ -6818,8 +6895,7 @@ static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int clock_limit = dev_priv->max_dotclk_freq;
@@ -6869,7 +6945,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
}
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
- intel_is_dual_link_lvds(dev)) {
+ intel_is_dual_link_lvds(dev_priv)) {
DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
return -EINVAL;
}
@@ -7486,7 +7562,19 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
dpll |= PLL_P2_DIVIDE_BY_4;
}
- if (!IS_I830(dev_priv) &&
+ /*
+ * Bspec:
+ * "[Almador Errata}: For the correct operation of the muxed DVO pins
+ * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
+ * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
+ * Enable) must be set to “1” in both the DPLL A Control Register
+ * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
+ *
+ * For simplicity We simply keep both bits always enabled in
+ * both DPLLS. The spec says we should disable the DVO 2X clock
+ * when not needed, but this seems to work fine in practice.
+ */
+ if (IS_I830(dev_priv) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
@@ -7694,13 +7782,16 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
else
pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
- } else
+ } else {
pipeconf |= PIPECONF_PROGRESSIVE;
+ }
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
crtc_state->limited_color_range)
pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
+ pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+
I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
POSTING_READ(PIPECONF(crtc->pipe));
}
@@ -7744,8 +7835,7 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_limit *limit;
int refclk = 96000;
@@ -7758,7 +7848,7 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
}
- if (intel_is_dual_link_lvds(dev))
+ if (intel_is_dual_link_lvds(dev_priv))
limit = &intel_limits_g4x_dual_channel_lvds;
else
limit = &intel_limits_g4x_single_channel_lvds;
@@ -7894,14 +7984,22 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
return 0;
}
+static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
+{
+ if (IS_I830(dev_priv))
+ return false;
+
+ return INTEL_GEN(dev_priv) >= 4 ||
+ IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+}
+
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
- if (INTEL_GEN(dev_priv) <= 3 &&
- (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
+ if (!i9xx_has_pfit(dev_priv))
return;
tmp = I915_READ(PFIT_CONTROL);
@@ -8108,6 +8206,24 @@ static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
pipe_config->output_format = output;
}
+static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ u32 tmp;
+
+ tmp = I915_READ(DSPCNTR(i9xx_plane));
+
+ if (tmp & DISPPLANE_GAMMA_ENABLE)
+ crtc_state->gamma_enable = true;
+
+ if (!HAS_GMCH(dev_priv) &&
+ tmp & DISPPLANE_PIPE_CSC_ENABLE)
+ crtc_state->csc_enable = true;
+}
+
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -8153,6 +8269,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
(tmp & PIPECONF_COLOR_RANGE_SELECT))
pipe_config->limited_color_range = true;
+ pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
+ PIPECONF_GAMMA_MODE_SHIFT;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
+
+ i9xx_get_pipe_color_config(pipe_config);
+
if (INTEL_GEN(dev_priv) < 4)
pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
@@ -8185,14 +8309,6 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
}
pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- /*
- * DPLL_DVO_2X_MODE must be enabled for both DPLLs
- * on 830. Filter it out here so that we don't
- * report errors due to that.
- */
- if (IS_I830(dev_priv))
- pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
-
pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
} else {
@@ -8692,6 +8808,8 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
if (crtc_state->limited_color_range)
val |= PIPECONF_COLOR_RANGE_SELECT;
+ val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+
I915_WRITE(PIPECONF(pipe), val);
POSTING_READ(PIPECONF(pipe));
}
@@ -8772,13 +8890,11 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
}
-static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
+static void ironlake_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct dpll *reduced_clock)
{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll, fp, fp2;
int factor;
@@ -8787,10 +8903,12 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if ((intel_panel_use_ssc(dev_priv) &&
dev_priv->vbt.lvds_ssc_freq == 100000) ||
- (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
+ (HAS_PCH_IBX(dev_priv) &&
+ intel_is_dual_link_lvds(dev_priv)))
factor = 25;
- } else if (crtc_state->sdvo_tv_clock)
+ } else if (crtc_state->sdvo_tv_clock) {
factor = 20;
+ }
fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
@@ -8877,8 +8995,7 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_limit *limit;
int refclk = 120000;
@@ -8896,7 +9013,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
refclk = dev_priv->vbt.lvds_ssc_freq;
}
- if (intel_is_dual_link_lvds(dev)) {
+ if (intel_is_dual_link_lvds(dev_priv)) {
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
else
@@ -8920,7 +9037,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
ironlake_compute_dpll(crtc, crtc_state, NULL);
- if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) {
+ if (!intel_get_shared_dpll(crtc_state, NULL)) {
DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
pipe_name(crtc->pipe));
return -EINVAL;
@@ -9226,6 +9343,13 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
if (tmp & PIPECONF_COLOR_RANGE_SELECT)
pipe_config->limited_color_range = true;
+ pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
+ PIPECONF_GAMMA_MODE_SHIFT;
+
+ pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
+
+ i9xx_get_pipe_color_config(pipe_config);
+
if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
struct intel_shared_dpll *pll;
enum intel_dpll_id pll_id;
@@ -9371,7 +9495,8 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
I915_WRITE(LCPLL_CTL, val);
POSTING_READ(LCPLL_CTL);
- if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
DRM_ERROR("LCPLL still locked\n");
val = hsw_read_dcomp(dev_priv);
@@ -9409,7 +9534,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
* Make sure we're not on PC8 state before disabling PC8, otherwise
* we'll hang the machine. To prevent PC8 state, just enable force_wake.
*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -9426,7 +9551,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
val &= ~LCPLL_PLL_DISABLE;
I915_WRITE(LCPLL_CTL, val);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
5))
DRM_ERROR("LCPLL not locked yet\n");
@@ -9441,7 +9566,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
DRM_ERROR("Switching back to LCPLL failed\n");
}
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_update_cdclk(dev_priv);
intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
@@ -9510,11 +9635,11 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
to_intel_atomic_state(crtc_state->base.state);
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
- IS_ICELAKE(dev_priv)) {
+ INTEL_GEN(dev_priv) >= 11) {
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
- if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
+ if (!intel_get_shared_dpll(crtc_state, encoder)) {
DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
pipe_name(crtc->pipe));
return -EINVAL;
@@ -9552,9 +9677,6 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
temp = I915_READ(DPCLKA_CFGCR0_ICL) &
DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
-
- if (WARN_ON(!intel_dpll_is_combophy(id)))
- return;
} else if (intel_port_is_tc(dev_priv, port)) {
id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
} else {
@@ -9643,20 +9765,25 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
- u64 *power_domain_mask)
+ u64 *power_domain_mask,
+ intel_wakeref_t *wakerefs)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
- unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
+ unsigned long panel_transcoder_mask = 0;
unsigned long enabled_panel_transcoders = 0;
enum transcoder panel_transcoder;
+ intel_wakeref_t wf;
u32 tmp;
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
panel_transcoder_mask |=
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
+ if (HAS_TRANSCODER_EDP(dev_priv))
+ panel_transcoder_mask |= BIT(TRANSCODER_EDP);
+
/*
* The pipe->transcoder mapping is fixed with the exception of the eDP
* and DSI transcoders handled below.
@@ -9713,10 +9840,13 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
enabled_panel_transcoders != BIT(TRANSCODER_EDP));
power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+
+ wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wf)
return false;
- WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+ wakerefs[power_domain] = wf;
*power_domain_mask |= BIT_ULL(power_domain);
tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
@@ -9726,13 +9856,15 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
- u64 *power_domain_mask)
+ u64 *power_domain_mask,
+ intel_wakeref_t *wakerefs)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
- enum port port;
enum transcoder cpu_transcoder;
+ intel_wakeref_t wf;
+ enum port port;
u32 tmp;
for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
@@ -9742,10 +9874,13 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
cpu_transcoder = TRANSCODER_DSI_C;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+
+ wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wf)
continue;
- WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+ wakerefs[power_domain] = wf;
*power_domain_mask |= BIT_ULL(power_domain);
/*
@@ -9786,7 +9921,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
icelake_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_CANNONLAKE(dev_priv))
cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
@@ -9824,6 +9959,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
enum intel_display_power_domain power_domain;
u64 power_domain_mask;
bool active;
@@ -9831,16 +9967,21 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
intel_crtc_init_scalers(crtc, pipe_config);
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wf)
return false;
+
+ wakerefs[power_domain] = wf;
power_domain_mask = BIT_ULL(power_domain);
pipe_config->shared_dpll = NULL;
- active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
+ active = hsw_get_transcoder_state(crtc, pipe_config,
+ &power_domain_mask, wakerefs);
if (IS_GEN9_LP(dev_priv) &&
- bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
+ bxt_get_dsi_transcoder_state(crtc, pipe_config,
+ &power_domain_mask, wakerefs)) {
WARN_ON(active);
active = true;
}
@@ -9849,7 +9990,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
goto out;
if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
- IS_ICELAKE(dev_priv)) {
+ INTEL_GEN(dev_priv) >= 11) {
haswell_get_ddi_port_state(crtc, pipe_config);
intel_get_pipe_timings(crtc, pipe_config);
}
@@ -9857,12 +9998,28 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_src_size(crtc, pipe_config);
intel_get_crtc_ycbcr_config(crtc, pipe_config);
- pipe_config->gamma_mode =
- I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
+ pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
+
+ pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
+
+ if (INTEL_GEN(dev_priv) >= 9) {
+ u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
+
+ if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
+ pipe_config->gamma_enable = true;
+
+ if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
+ pipe_config->csc_enable = true;
+ } else {
+ i9xx_get_pipe_color_config(pipe_config);
+ }
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
- if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
- WARN_ON(power_domain_mask & BIT_ULL(power_domain));
+ WARN_ON(power_domain_mask & BIT_ULL(power_domain));
+
+ wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (wf) {
+ wakerefs[power_domain] = wf;
power_domain_mask |= BIT_ULL(power_domain);
if (INTEL_GEN(dev_priv) >= 9)
@@ -9894,7 +10051,8 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
out:
for_each_power_domain(power_domain, power_domain_mask)
- intel_display_power_put_unchecked(dev_priv, power_domain);
+ intel_display_power_put(dev_priv,
+ power_domain, wakerefs[power_domain]);
return active;
}
@@ -10030,7 +10188,12 @@ i845_cursor_max_stride(struct intel_plane *plane,
static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- return CURSOR_GAMMA_ENABLE;
+ u32 cntl = 0;
+
+ if (crtc_state->gamma_enable)
+ cntl |= CURSOR_GAMMA_ENABLE;
+
+ return cntl;
}
static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
@@ -10184,9 +10347,10 @@ static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
if (INTEL_GEN(dev_priv) >= 11)
return cntl;
- cntl |= MCURSOR_GAMMA_ENABLE;
+ if (crtc_state->gamma_enable)
+ cntl = MCURSOR_GAMMA_ENABLE;
- if (HAS_DDI(dev_priv))
+ if (crtc_state->csc_enable)
cntl |= MCURSOR_PIPE_CSC_ENABLE;
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
@@ -11134,7 +11298,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
}
if (!linked_state) {
- DRM_DEBUG_KMS("Need %d free Y planes for NV12\n",
+ DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
hweight8(crtc_state->nv12_planes));
return -EINVAL;
@@ -11175,16 +11339,11 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
return ret;
}
- if (mode_changed || crtc_state->color_mgmt_changed) {
+ if (mode_changed || pipe_config->update_pipe ||
+ crtc_state->color_mgmt_changed) {
ret = intel_color_check(pipe_config);
if (ret)
return ret;
-
- /*
- * Changing color management on Intel hardware is
- * handled as part of planes update.
- */
- crtc_state->planes_changed = true;
}
ret = 0;
@@ -11355,6 +11514,16 @@ intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
m_n->link_m, m_n->link_n, m_n->tu);
}
+static void
+intel_dump_infoframe(struct drm_i915_private *dev_priv,
+ const union hdmi_infoframe *frame)
+{
+ if ((drm_debug & DRM_UT_KMS) == 0)
+ return;
+
+ hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
+}
+
#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
static const char * const output_type_str[] = {
@@ -11458,6 +11627,22 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
pipe_config->has_audio, pipe_config->has_infoframe);
+ DRM_DEBUG_KMS("infoframes enabled: 0x%x\n",
+ pipe_config->infoframes.enable);
+
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
+ DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
+ intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
+ intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
+ intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
+
DRM_DEBUG_KMS("requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->base.mode);
DRM_DEBUG_KMS("adjusted mode:\n");
@@ -11606,7 +11791,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
saved_state->shared_dpll = crtc_state->shared_dpll;
saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
saved_state->pch_pfit.force_thru = crtc_state->pch_pfit.force_thru;
- saved_state->ips_force_disable = crtc_state->ips_force_disable;
+ saved_state->crc_enabled = crtc_state->crc_enabled;
if (IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
saved_state->wm = crtc_state->wm;
@@ -11825,6 +12010,37 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n,
return false;
}
+static bool
+intel_compare_infoframe(const union hdmi_infoframe *a,
+ const union hdmi_infoframe *b)
+{
+ return memcmp(a, b, sizeof(*a)) == 0;
+}
+
+static void
+pipe_config_infoframe_err(struct drm_i915_private *dev_priv,
+ bool adjust, const char *name,
+ const union hdmi_infoframe *a,
+ const union hdmi_infoframe *b)
+{
+ if (adjust) {
+ if ((drm_debug & DRM_UT_KMS) == 0)
+ return;
+
+ drm_dbg(DRM_UT_KMS, "mismatch in %s infoframe", name);
+ drm_dbg(DRM_UT_KMS, "expected:");
+ hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
+ drm_dbg(DRM_UT_KMS, "found");
+ hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
+ } else {
+ drm_err("mismatch in %s infoframe", name);
+ drm_err("expected:");
+ hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
+ drm_err("found");
+ hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
+ }
+}
+
static void __printf(3, 4)
pipe_config_err(bool adjust, const char *name, const char *format, ...)
{
@@ -11866,6 +12082,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
struct intel_crtc_state *pipe_config,
bool adjust)
{
+ struct intel_crtc *crtc = to_intel_crtc(current_config->base.crtc);
bool ret = true;
bool fixup_inherited = adjust &&
(current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
@@ -12008,7 +12225,17 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
} \
} while (0)
-#define PIPE_CONF_QUIRK(quirk) \
+#define PIPE_CONF_CHECK_INFOFRAME(name) do { \
+ if (!intel_compare_infoframe(&current_config->infoframes.name, \
+ &pipe_config->infoframes.name)) { \
+ pipe_config_infoframe_err(dev_priv, adjust, __stringify(name), \
+ &current_config->infoframes.name, \
+ &pipe_config->infoframes.name); \
+ ret = false; \
+ } \
+} while (0)
+
+#define PIPE_CONF_QUIRK(quirk) \
((current_config->quirks | pipe_config->quirks) & (quirk))
PIPE_CONF_CHECK_I(cpu_transcoder);
@@ -12077,6 +12304,14 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
+ /*
+ * Changing the EDP transcoder input mux
+ * (A_ONOFF vs. A_ON) requires a full modeset.
+ */
+ if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A &&
+ current_config->cpu_transcoder == TRANSCODER_EDP)
+ PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
+
if (!adjust) {
PIPE_CONF_CHECK_I(pipe_src_w);
PIPE_CONF_CHECK_I(pipe_src_h);
@@ -12089,6 +12324,14 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_I(scaler_state.scaler_id);
PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
+
+ PIPE_CONF_CHECK_X(gamma_mode);
+ if (IS_CHERRYVIEW(dev_priv))
+ PIPE_CONF_CHECK_X(cgm_mode);
+ else
+ PIPE_CONF_CHECK_X(csc_mode);
+ PIPE_CONF_CHECK_BOOL(gamma_enable);
+ PIPE_CONF_CHECK_BOOL(csc_enable);
}
PIPE_CONF_CHECK_BOOL(double_wide);
@@ -12137,6 +12380,12 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_I(min_voltage_level);
+ PIPE_CONF_CHECK_X(infoframes.enable);
+ PIPE_CONF_CHECK_X(infoframes.gcp);
+ PIPE_CONF_CHECK_INFOFRAME(avi);
+ PIPE_CONF_CHECK_INFOFRAME(spd);
+ PIPE_CONF_CHECK_INFOFRAME(hdmi);
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_BOOL
@@ -12171,12 +12420,15 @@ static void verify_wm_state(struct drm_crtc *crtc,
struct drm_crtc_state *new_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct skl_ddb_allocation hw_ddb, *sw_ddb;
- struct skl_pipe_wm hw_wm, *sw_wm;
- struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
+ struct skl_hw_state {
+ struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
+ struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
+ struct skl_ddb_allocation ddb;
+ struct skl_pipe_wm wm;
+ } *hw;
+ struct skl_ddb_allocation *sw_ddb;
+ struct skl_pipe_wm *sw_wm;
struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
- struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES];
- struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const enum pipe pipe = intel_crtc->pipe;
int plane, level, max_level = ilk_wm_max_level(dev_priv);
@@ -12184,22 +12436,29 @@ static void verify_wm_state(struct drm_crtc *crtc,
if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
return;
- skl_pipe_wm_get_hw_state(intel_crtc, &hw_wm);
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return;
+
+ skl_pipe_wm_get_hw_state(intel_crtc, &hw->wm);
sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
- skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
+ skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv);
- skl_ddb_get_hw_state(dev_priv, &hw_ddb);
+ skl_ddb_get_hw_state(dev_priv, &hw->ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
- if (INTEL_GEN(dev_priv) >= 11)
- if (hw_ddb.enabled_slices != sw_ddb->enabled_slices)
- DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
- sw_ddb->enabled_slices,
- hw_ddb.enabled_slices);
+ if (INTEL_GEN(dev_priv) >= 11 &&
+ hw->ddb.enabled_slices != sw_ddb->enabled_slices)
+ DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
+ sw_ddb->enabled_slices,
+ hw->ddb.enabled_slices);
+
/* planes */
for_each_universal_plane(dev_priv, pipe, plane) {
- hw_plane_wm = &hw_wm.planes[plane];
+ struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
+
+ hw_plane_wm = &hw->wm.planes[plane];
sw_plane_wm = &sw_wm->planes[plane];
/* Watermarks */
@@ -12231,7 +12490,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
/* DDB */
- hw_ddb_entry = &hw_ddb_y[plane];
+ hw_ddb_entry = &hw->ddb_y[plane];
sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
@@ -12249,7 +12508,9 @@ static void verify_wm_state(struct drm_crtc *crtc,
* once the plane becomes visible, we can skip this check
*/
if (1) {
- hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
+ struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
+
+ hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
/* Watermarks */
@@ -12281,7 +12542,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
/* DDB */
- hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR];
+ hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
@@ -12291,6 +12552,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
hw_ddb_entry->start, hw_ddb_entry->end);
}
}
+
+ kfree(hw);
}
static void
@@ -12447,7 +12710,8 @@ intel_verify_planes(struct intel_atomic_state *state)
for_each_new_intel_plane_in_state(state, plane,
plane_state, i)
- assert_plane(plane, plane_state->base.visible);
+ assert_plane(plane, plane_state->slave ||
+ plane_state->base.visible);
}
static void
@@ -12769,10 +13033,16 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
return -EINVAL;
}
+ /* keep the current setting */
+ if (!intel_state->cdclk.force_min_cdclk_changed)
+ intel_state->cdclk.force_min_cdclk =
+ dev_priv->cdclk.force_min_cdclk;
+
intel_state->modeset = true;
intel_state->active_crtcs = dev_priv->active_crtcs;
intel_state->cdclk.logical = dev_priv->cdclk.logical;
intel_state->cdclk.actual = dev_priv->cdclk.actual;
+ intel_state->cdclk.pipe = INVALID_PIPE;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (new_crtc_state->active)
@@ -12792,6 +13062,8 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
* adjusted_mode bits in the crtc directly.
*/
if (dev_priv->display.modeset_calc_cdclk) {
+ enum pipe pipe;
+
ret = dev_priv->display.modeset_calc_cdclk(state);
if (ret < 0)
return ret;
@@ -12808,12 +13080,36 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
return ret;
}
+ if (is_power_of_2(intel_state->active_crtcs)) {
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+
+ pipe = ilog2(intel_state->active_crtcs);
+ crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base;
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (crtc_state && needs_modeset(crtc_state))
+ pipe = INVALID_PIPE;
+ } else {
+ pipe = INVALID_PIPE;
+ }
+
/* All pipes must be switched off while we change the cdclk. */
- if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
- &intel_state->cdclk.actual)) {
+ if (pipe != INVALID_PIPE &&
+ intel_cdclk_needs_cd2x_update(dev_priv,
+ &dev_priv->cdclk.actual,
+ &intel_state->cdclk.actual)) {
+ ret = intel_lock_all_pipes(state);
+ if (ret < 0)
+ return ret;
+
+ intel_state->cdclk.pipe = pipe;
+ } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
+ &intel_state->cdclk.actual)) {
ret = intel_modeset_all_pipes(state);
if (ret < 0)
return ret;
+
+ intel_state->cdclk.pipe = INVALID_PIPE;
}
DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
@@ -12822,8 +13118,6 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
intel_state->cdclk.logical.voltage_level,
intel_state->cdclk.actual.voltage_level);
- } else {
- to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
}
intel_modeset_clear_plls(state);
@@ -12864,7 +13158,7 @@ static int intel_atomic_check(struct drm_device *dev,
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *crtc_state;
int ret, i;
- bool any_ms = false;
+ bool any_ms = intel_state->cdclk.force_min_cdclk_changed;
/* Catch I915_MODE_FLAG_INHERITED */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
@@ -12989,14 +13283,14 @@ static void intel_update_crtc(struct drm_crtc *crtc,
else if (new_plane_state)
intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
- intel_begin_crtc_commit(crtc, old_crtc_state);
+ intel_begin_crtc_commit(to_intel_atomic_state(state), intel_crtc);
if (INTEL_GEN(dev_priv) >= 9)
skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
else
i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
- intel_finish_crtc_commit(crtc, old_crtc_state);
+ intel_finish_crtc_commit(to_intel_atomic_state(state), intel_crtc);
}
static void intel_update_crtcs(struct drm_atomic_state *state)
@@ -13224,7 +13518,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (intel_state->modeset) {
drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
- intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual);
+ intel_set_cdclk_pre_plane_update(dev_priv,
+ &intel_state->cdclk.actual,
+ &dev_priv->cdclk.actual,
+ intel_state->cdclk.pipe);
/*
* SKL workaround: bspec recommends we disable the SAGV when we
@@ -13253,6 +13550,12 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
dev_priv->display.update_crtcs(state);
+ if (intel_state->modeset)
+ intel_set_cdclk_post_plane_update(dev_priv,
+ &intel_state->cdclk.actual,
+ &dev_priv->cdclk.actual,
+ intel_state->cdclk.pipe);
+
/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
* already, but still need the state for the delayed optimization. To
* fix this:
@@ -13313,7 +13616,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* so enable debugging for the next modeset - and hope we catch
* the culprit.
*/
- intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+ intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
}
@@ -13454,8 +13757,10 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_state->min_voltage_level,
sizeof(intel_state->min_voltage_level));
dev_priv->active_crtcs = intel_state->active_crtcs;
- dev_priv->cdclk.logical = intel_state->cdclk.logical;
- dev_priv->cdclk.actual = intel_state->cdclk.actual;
+ dev_priv->cdclk.force_min_cdclk =
+ intel_state->cdclk.force_min_cdclk;
+
+ intel_cdclk_swap_state(intel_state);
}
drm_atomic_state_get(state);
@@ -13506,7 +13811,7 @@ static int do_rps_boost(struct wait_queue_entry *_wait,
* vblank without our intervention, so leave RPS alone.
*/
if (!i915_request_started(rq))
- gen6_rps_boost(rq, NULL);
+ gen6_rps_boost(rq);
i915_request_put(rq);
drm_crtc_vblank_put(wait->crtc);
@@ -13767,7 +14072,7 @@ skl_max_scale(const struct intel_crtc_state *crtc_state,
* or
* cdclk/crtc_clock
*/
- mult = pixel_format == DRM_FORMAT_NV12 ? 2 : 3;
+ mult = is_planar_yuv_format(pixel_format) ? 2 : 3;
tmpclk1 = (1 << 16) * mult - 1;
tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
max_scale = min(tmpclk1, tmpclk2);
@@ -13775,39 +14080,35 @@ skl_max_scale(const struct intel_crtc_state *crtc_state,
return max_scale;
}
-static void intel_begin_crtc_commit(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+static void intel_begin_crtc_commit(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *old_intel_cstate =
- to_intel_crtc_state(old_crtc_state);
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_crtc_state->state);
- struct intel_crtc_state *intel_cstate =
- intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
- bool modeset = needs_modeset(&intel_cstate->base);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ bool modeset = needs_modeset(&new_crtc_state->base);
/* Perform vblank evasion around commit operation */
- intel_pipe_update_start(intel_cstate);
+ intel_pipe_update_start(new_crtc_state);
if (modeset)
goto out;
- if (intel_cstate->base.color_mgmt_changed ||
- intel_cstate->update_pipe)
- intel_color_commit(intel_cstate);
+ if (new_crtc_state->base.color_mgmt_changed ||
+ new_crtc_state->update_pipe)
+ intel_color_commit(new_crtc_state);
- if (intel_cstate->update_pipe)
- intel_update_pipe_config(old_intel_cstate, intel_cstate);
+ if (new_crtc_state->update_pipe)
+ intel_update_pipe_config(old_crtc_state, new_crtc_state);
else if (INTEL_GEN(dev_priv) >= 9)
- skl_detach_scalers(intel_cstate);
+ skl_detach_scalers(new_crtc_state);
out:
if (dev_priv->display.atomic_update_watermarks)
- dev_priv->display.atomic_update_watermarks(old_intel_state,
- intel_cstate);
+ dev_priv->display.atomic_update_watermarks(state,
+ new_crtc_state);
}
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
@@ -13826,21 +14127,20 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
}
}
-static void intel_finish_crtc_commit(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+static void intel_finish_crtc_commit(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_crtc_state->state);
+ struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
+ intel_atomic_get_new_crtc_state(state, crtc);
intel_pipe_update_end(new_crtc_state);
if (new_crtc_state->update_pipe &&
!needs_modeset(&new_crtc_state->base) &&
- old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED)
- intel_crtc_arm_fifo_underrun(intel_crtc, new_crtc_state);
+ old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
+ intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
}
/**
@@ -14039,14 +14339,11 @@ intel_legacy_cursor_update(struct drm_plane *plane,
*/
crtc_state->active_planes = new_crtc_state->active_planes;
- if (plane->state->visible) {
- trace_intel_update_plane(plane, to_intel_crtc(crtc));
- intel_plane->update_plane(intel_plane, crtc_state,
- to_intel_plane_state(plane->state));
- } else {
- trace_intel_disable_plane(plane, to_intel_crtc(crtc));
- intel_plane->disable_plane(intel_plane, crtc_state);
- }
+ if (plane->state->visible)
+ intel_update_plane(intel_plane, crtc_state,
+ to_intel_plane_state(plane->state));
+ else
+ intel_disable_plane(intel_plane, crtc_state);
intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
@@ -14496,7 +14793,12 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- if (IS_ICELAKE(dev_priv)) {
+ if (IS_ELKHARTLAKE(dev_priv)) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_C);
+ icl_dsi_init(dev_priv);
+ } else if (INTEL_GEN(dev_priv) >= 11) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
@@ -15397,6 +15699,8 @@ int intel_modeset_init(struct drm_device *dev)
intel_update_czclk(dev_priv);
intel_modeset_init_hw(dev);
+ intel_hdcp_component_init(dev_priv);
+
if (dev_priv->max_cdclk_freq == 0)
intel_update_max_cdclk(dev_priv);
@@ -15472,7 +15776,7 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
pipe_name(pipe), clock.vco, clock.dot);
fp = i9xx_dpll_compute_fp(&clock);
- dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) |
+ dpll = DPLL_DVO_2X_MODE |
DPLL_VGA_MODE_DIS |
((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
PLL_P2_DIVIDE_BY_4 |
@@ -16254,6 +16558,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
/* flush any delayed tasks or pending work */
flush_scheduled_work();
+ intel_hdcp_component_fini(dev_priv);
+
drm_mode_config_cleanup(dev);
intel_overlay_cleanup(dev_priv);
@@ -16300,8 +16606,6 @@ struct intel_display_error_state {
u32 power_well_driver;
- int num_transcoders;
-
struct intel_cursor_error_state {
u32 control;
u32 position;
@@ -16326,6 +16630,7 @@ struct intel_display_error_state {
} plane[I915_MAX_PIPES];
struct intel_transcoder_error_state {
+ bool available;
bool power_domain_on;
enum transcoder cpu_transcoder;
@@ -16352,6 +16657,8 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
};
int i;
+ BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
+
if (!HAS_DISPLAY(dev_priv))
return NULL;
@@ -16392,14 +16699,13 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
error->pipe[i].stat = I915_READ(PIPESTAT(i));
}
- /* Note: this does not include DSI transcoders. */
- error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
- if (HAS_DDI(dev_priv))
- error->num_transcoders++; /* Account for eDP. */
-
- for (i = 0; i < error->num_transcoders; i++) {
+ for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
enum transcoder cpu_transcoder = transcoders[i];
+ if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
+ continue;
+
+ error->transcoder[i].available = true;
error->transcoder[i].power_domain_on =
__intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
@@ -16463,7 +16769,10 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " BASE: %08x\n", error->cursor[i].base);
}
- for (i = 0; i < error->num_transcoders; i++) {
+ for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
+ if (!error->transcoder[i].available)
+ continue;
+
err_printf(m, "CPU transcoder: %s\n",
transcoder_name(error->transcoder[i].cpu_transcoder));
err_printf(m, " Power: %s\n",
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 48da4a969a0a..560274d1c50b 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -25,22 +25,34 @@
*
*/
-#include <linux/i2c.h>
-#include <linux/slab.h>
#include <linux/export.h>
-#include <linux/types.h>
+#include <linux/i2c.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/types.h>
#include <asm/byteorder.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_hdcp.h>
#include <drm/drm_probe_helper.h>
-#include "intel_drv.h"
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "intel_audio.h"
+#include "intel_connector.h"
+#include "intel_ddi.h"
+#include "intel_dp.h"
+#include "intel_drv.h"
+#include "intel_hdcp.h"
+#include "intel_hdmi.h"
+#include "intel_lspcon.h"
+#include "intel_lvds.h"
+#include "intel_panel.h"
+#include "intel_psr.h"
#define DP_DPRX_ESI_LEN 14
@@ -949,8 +961,11 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
regs->pp_stat = PP_STATUS(pps_idx);
regs->pp_on = PP_ON_DELAYS(pps_idx);
regs->pp_off = PP_OFF_DELAYS(pps_idx);
- if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
- !HAS_PCH_ICP(dev_priv))
+
+ /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
+ if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+ regs->pp_div = INVALID_MMIO_REG;
+ else
regs->pp_div = PP_DIVISOR(pps_idx);
}
@@ -1720,12 +1735,6 @@ void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
}
}
-struct link_config_limits {
- int min_clock, max_clock;
- int min_lane_count, max_lane_count;
- int min_bpp, max_bpp;
-};
-
static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
const struct intel_crtc_state *pipe_config)
{
@@ -1788,7 +1797,7 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
}
/* Adjust link config limits based on compliance test requests. */
-static void
+void
intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct link_config_limits *limits)
@@ -1972,6 +1981,14 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
return 0;
}
+int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
+ return 6 * 3;
+ else
+ return 8 * 3;
+}
+
static int
intel_dp_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -1995,7 +2012,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
limits.min_lane_count = 1;
limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
- limits.min_bpp = 6 * 3;
+ limits.min_bpp = intel_dp_min_bpp(pipe_config);
limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
if (intel_dp_is_edp(intel_dp)) {
@@ -2058,6 +2075,29 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
return 0;
}
+bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ const struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(conn_state);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+
+ if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
+ /*
+ * See:
+ * CEA-861-E - 5.1 Default Encoding Parameters
+ * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
+ */
+ return crtc_state->pipe_bpp != 18 &&
+ drm_default_rgb_quant_range(adjusted_mode) ==
+ HDMI_QUANTIZATION_RANGE_LIMITED;
+ } else {
+ return intel_conn_state->broadcast_rgb ==
+ INTEL_BROADCAST_RGB_LIMITED;
+ }
+}
+
int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -2074,7 +2114,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
to_intel_digital_connector_state(conn_state);
bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
DP_DPCD_QUIRK_CONSTANT_N);
- int ret;
+ int ret, output_bpp;
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
pipe_config->has_pch_encoder = true;
@@ -2123,40 +2163,25 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (ret < 0)
return ret;
- if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
- /*
- * See:
- * CEA-861-E - 5.1 Default Encoding Parameters
- * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
- */
- pipe_config->limited_color_range =
- pipe_config->pipe_bpp != 18 &&
- drm_default_rgb_quant_range(adjusted_mode) ==
- HDMI_QUANTIZATION_RANGE_LIMITED;
- } else {
- pipe_config->limited_color_range =
- intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
- }
+ pipe_config->limited_color_range =
+ intel_dp_limited_color_range(pipe_config, conn_state);
- if (!pipe_config->dsc_params.compression_enable)
- intel_link_compute_m_n(pipe_config->pipe_bpp,
- pipe_config->lane_count,
- adjusted_mode->crtc_clock,
- pipe_config->port_clock,
- &pipe_config->dp_m_n,
- constant_n);
+ if (pipe_config->dsc_params.compression_enable)
+ output_bpp = pipe_config->dsc_params.compressed_bpp;
else
- intel_link_compute_m_n(pipe_config->dsc_params.compressed_bpp,
- pipe_config->lane_count,
- adjusted_mode->crtc_clock,
- pipe_config->port_clock,
- &pipe_config->dp_m_n,
- constant_n);
+ output_bpp = pipe_config->pipe_bpp;
+
+ intel_link_compute_m_n(output_bpp,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m_n,
+ constant_n);
if (intel_connector->panel.downclock_mode != NULL &&
dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
pipe_config->has_drrs = true;
- intel_link_compute_m_n(pipe_config->pipe_bpp,
+ intel_link_compute_m_n(output_bpp,
pipe_config->lane_count,
intel_connector->panel.downclock_mode->clock,
pipe_config->port_clock,
@@ -2296,7 +2321,7 @@ static void wait_panel_status(struct intel_dp *intel_dp,
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
pp_stat_reg, mask, value,
5000))
DRM_ERROR("Panel status timeout: status %08x control %08x\n",
@@ -3885,7 +3910,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
if (port == PORT_A)
return;
- if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port),
+ if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
DP_TP_STATUS_IDLE_DONE,
DP_TP_STATUS_IDLE_DONE,
1))
@@ -4731,7 +4756,7 @@ static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
intel_dp_handle_test_request(intel_dp);
if (val & DP_CP_IRQ)
- intel_hdcp_check_link(intel_dp->attached_connector);
+ intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
if (val & DP_SINK_SPECIFIC_IRQ)
DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
@@ -5574,6 +5599,18 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
edp_panel_vdd_off_sync(intel_dp);
}
+static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
+{
+ long ret;
+
+#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
+ ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
+ msecs_to_jiffies(timeout));
+
+ if (!ret)
+ DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
+}
+
static
int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
u8 *an)
@@ -5798,6 +5835,336 @@ int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
return 0;
}
+struct hdcp2_dp_errata_stream_type {
+ u8 msg_id;
+ u8 stream_type;
+} __packed;
+
+static struct hdcp2_dp_msg_data {
+ u8 msg_id;
+ u32 offset;
+ bool msg_detectable;
+ u32 timeout;
+ u32 timeout2; /* Added for non_paired situation */
+ } hdcp2_msg_data[] = {
+ {HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0},
+ {HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
+ false, HDCP_2_2_CERT_TIMEOUT_MS, 0},
+ {HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
+ false, 0, 0},
+ {HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
+ false, 0, 0},
+ {HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
+ true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
+ HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
+ {HDCP_2_2_AKE_SEND_PAIRING_INFO,
+ DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
+ HDCP_2_2_PAIRING_TIMEOUT_MS, 0},
+ {HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0},
+ {HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
+ false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0},
+ {HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
+ 0, 0},
+ {HDCP_2_2_REP_SEND_RECVID_LIST,
+ DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
+ HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
+ {HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
+ 0, 0},
+ {HDCP_2_2_REP_STREAM_MANAGE,
+ DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
+ 0, 0},
+ {HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
+ false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0},
+/* local define to shovel this through the write_2_2 interface */
+#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
+ {HDCP_2_2_ERRATA_DP_STREAM_TYPE,
+ DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
+ 0, 0},
+ };
+
+static inline
+int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
+ u8 *rx_status)
+{
+ ssize_t ret;
+
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
+ HDCP_2_2_DP_RXSTATUS_LEN);
+ if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
+ DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ return 0;
+}
+
+static
+int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
+ u8 msg_id, bool *msg_ready)
+{
+ u8 rx_status;
+ int ret;
+
+ *msg_ready = false;
+ ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
+ if (ret < 0)
+ return ret;
+
+ switch (msg_id) {
+ case HDCP_2_2_AKE_SEND_HPRIME:
+ if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
+ *msg_ready = true;
+ break;
+ case HDCP_2_2_AKE_SEND_PAIRING_INFO:
+ if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
+ *msg_ready = true;
+ break;
+ case HDCP_2_2_REP_SEND_RECVID_LIST:
+ if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
+ *msg_ready = true;
+ break;
+ default:
+ DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t
+intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
+ struct hdcp2_dp_msg_data *hdcp2_msg_data)
+{
+ struct intel_dp *dp = &intel_dig_port->dp;
+ struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
+ u8 msg_id = hdcp2_msg_data->msg_id;
+ int ret, timeout;
+ bool msg_ready = false;
+
+ if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
+ timeout = hdcp2_msg_data->timeout2;
+ else
+ timeout = hdcp2_msg_data->timeout;
+
+ /*
+ * There is no way to detect the CERT, LPRIME and STREAM_READY
+ * availability. So Wait for timeout and read the msg.
+ */
+ if (!hdcp2_msg_data->msg_detectable) {
+ mdelay(timeout);
+ ret = 0;
+ } else {
+ /*
+ * As we want to check the msg availability at timeout, Ignoring
+ * the timeout at wait for CP_IRQ.
+ */
+ intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
+ ret = hdcp2_detect_msg_availability(intel_dig_port,
+ msg_id, &msg_ready);
+ if (!msg_ready)
+ ret = -ETIMEDOUT;
+ }
+
+ if (ret)
+ DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
+ hdcp2_msg_data->msg_id, ret, timeout);
+
+ return ret;
+}
+
+static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
+ if (hdcp2_msg_data[i].msg_id == msg_id)
+ return &hdcp2_msg_data[i];
+
+ return NULL;
+}
+
+static
+int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
+ void *buf, size_t size)
+{
+ struct intel_dp *dp = &intel_dig_port->dp;
+ struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
+ unsigned int offset;
+ u8 *byte = buf;
+ ssize_t ret, bytes_to_write, len;
+ struct hdcp2_dp_msg_data *hdcp2_msg_data;
+
+ hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
+ if (!hdcp2_msg_data)
+ return -EINVAL;
+
+ offset = hdcp2_msg_data->offset;
+
+ /* No msg_id in DP HDCP2.2 msgs */
+ bytes_to_write = size - 1;
+ byte++;
+
+ hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
+
+ while (bytes_to_write) {
+ len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
+ DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
+
+ ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
+ offset, (void *)byte, len);
+ if (ret < 0)
+ return ret;
+
+ bytes_to_write -= ret;
+ byte += ret;
+ offset += ret;
+ }
+
+ return size;
+}
+
+static
+ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
+{
+ u8 rx_info[HDCP_2_2_RXINFO_LEN];
+ u32 dev_cnt;
+ ssize_t ret;
+
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ DP_HDCP_2_2_REG_RXINFO_OFFSET,
+ (void *)rx_info, HDCP_2_2_RXINFO_LEN);
+ if (ret != HDCP_2_2_RXINFO_LEN)
+ return ret >= 0 ? -EIO : ret;
+
+ dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
+ HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
+
+ if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
+ dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
+
+ ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
+ HDCP_2_2_RECEIVER_IDS_MAX_LEN +
+ (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
+
+ return ret;
+}
+
+static
+int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
+ u8 msg_id, void *buf, size_t size)
+{
+ unsigned int offset;
+ u8 *byte = buf;
+ ssize_t ret, bytes_to_recv, len;
+ struct hdcp2_dp_msg_data *hdcp2_msg_data;
+
+ hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
+ if (!hdcp2_msg_data)
+ return -EINVAL;
+ offset = hdcp2_msg_data->offset;
+
+ ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
+ if (ret < 0)
+ return ret;
+
+ if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
+ ret = get_receiver_id_list_size(intel_dig_port);
+ if (ret < 0)
+ return ret;
+
+ size = ret;
+ }
+ bytes_to_recv = size - 1;
+
+ /* DP adaptation msgs has no msg_id */
+ byte++;
+
+ while (bytes_to_recv) {
+ len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
+ DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
+
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
+ (void *)byte, len);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
+ return ret;
+ }
+
+ bytes_to_recv -= ret;
+ byte += ret;
+ offset += ret;
+ }
+ byte = buf;
+ *byte = msg_id;
+
+ return size;
+}
+
+static
+int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
+ bool is_repeater, u8 content_type)
+{
+ struct hdcp2_dp_errata_stream_type stream_type_msg;
+
+ if (is_repeater)
+ return 0;
+
+ /*
+ * Errata for DP: As Stream type is used for encryption, Receiver
+ * should be communicated with stream type for the decryption of the
+ * content.
+ * Repeater will be communicated with stream type as a part of it's
+ * auth later in time.
+ */
+ stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
+ stream_type_msg.stream_type = content_type;
+
+ return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
+ sizeof(stream_type_msg));
+}
+
+static
+int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
+{
+ u8 rx_status;
+ int ret;
+
+ ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
+ if (ret)
+ return ret;
+
+ if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
+ ret = HDCP_REAUTH_REQUEST;
+ else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
+ ret = HDCP_LINK_INTEGRITY_FAILURE;
+ else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
+ ret = HDCP_TOPOLOGY_CHANGE;
+
+ return ret;
+}
+
+static
+int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
+ bool *capable)
+{
+ u8 rx_caps[3];
+ int ret;
+
+ *capable = false;
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
+ rx_caps, HDCP_2_2_RXCAPS_LEN);
+ if (ret != HDCP_2_2_RXCAPS_LEN)
+ return ret >= 0 ? -EIO : ret;
+
+ if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
+ HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
+ *capable = true;
+
+ return 0;
+}
+
static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
.write_an_aksv = intel_dp_hdcp_write_an_aksv,
.read_bksv = intel_dp_hdcp_read_bksv,
@@ -5810,6 +6177,12 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
.toggle_signalling = intel_dp_hdcp_toggle_signalling,
.check_link = intel_dp_hdcp_check_link,
.hdcp_capable = intel_dp_hdcp_capable,
+ .write_2_2_msg = intel_dp_hdcp2_write_msg,
+ .read_2_2_msg = intel_dp_hdcp2_read_msg,
+ .config_stream_type = intel_dp_hdcp2_config_stream_type,
+ .check_2_2_link = intel_dp_hdcp2_check_link,
+ .hdcp_2_2_capable = intel_dp_hdcp2_capable,
+ .protocol = HDCP_PROTOCOL_DP,
};
static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
@@ -6023,43 +6396,34 @@ static void
intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
+ u32 pp_on, pp_off, pp_ctl;
struct pps_registers regs;
intel_pps_get_registers(intel_dp, &regs);
- /* Workaround: Need to write PP_CONTROL with the unlock key as
- * the very first thing. */
pp_ctl = ironlake_get_pp_control(intel_dp);
+ /* Ensure PPS is unlocked */
+ if (!HAS_DDI(dev_priv))
+ I915_WRITE(regs.pp_ctrl, pp_ctl);
+
pp_on = I915_READ(regs.pp_on);
pp_off = I915_READ(regs.pp_off);
- if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
- !HAS_PCH_ICP(dev_priv)) {
- I915_WRITE(regs.pp_ctrl, pp_ctl);
- pp_div = I915_READ(regs.pp_div);
- }
/* Pull timing values out of registers */
- seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
- PANEL_POWER_UP_DELAY_SHIFT;
+ seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
+ seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
+ seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
+ seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
- seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
- PANEL_LIGHT_ON_DELAY_SHIFT;
+ if (i915_mmio_reg_valid(regs.pp_div)) {
+ u32 pp_div;
- seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
- PANEL_LIGHT_OFF_DELAY_SHIFT;
-
- seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
- PANEL_POWER_DOWN_DELAY_SHIFT;
+ pp_div = I915_READ(regs.pp_div);
- if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
- HAS_PCH_ICP(dev_priv)) {
- seq->t11_t12 = ((pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
- BXT_POWER_CYCLE_DELAY_SHIFT) * 1000;
+ seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
} else {
- seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
- PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+ seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
}
}
@@ -6184,7 +6548,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
bool force_disable_vdd)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 pp_on, pp_off, pp_div, port_sel = 0;
+ u32 pp_on, pp_off, port_sel = 0;
int div = dev_priv->rawclk_freq / 1000;
struct pps_registers regs;
enum port port = dp_to_dig_port(intel_dp)->base.port;
@@ -6219,23 +6583,10 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
I915_WRITE(regs.pp_ctrl, pp);
}
- pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
- (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
- pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
- (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
- /* Compute the divisor for the pp clock, simply match the Bspec
- * formula. */
- if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
- HAS_PCH_ICP(dev_priv)) {
- pp_div = I915_READ(regs.pp_ctrl);
- pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
- pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
- << BXT_POWER_CYCLE_DELAY_SHIFT);
- } else {
- pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
- pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
- << PANEL_POWER_CYCLE_DELAY_SHIFT);
- }
+ pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
+ REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
+ pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
@@ -6262,19 +6613,29 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
I915_WRITE(regs.pp_on, pp_on);
I915_WRITE(regs.pp_off, pp_off);
- if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
- HAS_PCH_ICP(dev_priv))
- I915_WRITE(regs.pp_ctrl, pp_div);
- else
- I915_WRITE(regs.pp_div, pp_div);
+
+ /*
+ * Compute the divisor for the pp clock, simply match the Bspec formula.
+ */
+ if (i915_mmio_reg_valid(regs.pp_div)) {
+ I915_WRITE(regs.pp_div,
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) |
+ REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
+ } else {
+ u32 pp_ctl;
+
+ pp_ctl = I915_READ(regs.pp_ctrl);
+ pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
+ pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
+ I915_WRITE(regs.pp_ctrl, pp_ctl);
+ }
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
I915_READ(regs.pp_on),
I915_READ(regs.pp_off),
- (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
- HAS_PCH_ICP(dev_priv)) ?
- (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
- I915_READ(regs.pp_div));
+ i915_mmio_reg_valid(regs.pp_div) ?
+ I915_READ(regs.pp_div) :
+ (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
}
static void intel_dp_pps_init(struct intel_dp *intel_dp)
@@ -6645,9 +7006,7 @@ intel_dp_drrs_init(struct intel_connector *connector,
return NULL;
}
- downclock_mode = intel_find_panel_downclock(dev_priv, fixed_mode,
- &connector->base);
-
+ downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
if (!downclock_mode) {
DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
return NULL;
@@ -6669,7 +7028,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
bool has_dpcd;
- struct drm_display_mode *scan;
enum pipe pipe = INVALID_PIPE;
intel_wakeref_t wakeref;
struct edid *edid;
@@ -6685,7 +7043,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* eDP and LVDS bail out early in this case to prevent interfering
* with an already powered-on LVDS power sequencer.
*/
- if (intel_get_lvds_encoder(&dev_priv->drm)) {
+ if (intel_get_lvds_encoder(dev_priv)) {
WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
DRM_INFO("LVDS was detected, not registering eDP\n");
@@ -6722,26 +7080,13 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
intel_connector->edid = edid;
- /* prefer fixed mode from EDID if available */
- list_for_each_entry(scan, &connector->probed_modes, head) {
- if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
- fixed_mode = drm_mode_duplicate(dev, scan);
- downclock_mode = intel_dp_drrs_init(
- intel_connector, fixed_mode);
- break;
- }
- }
+ fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
+ if (fixed_mode)
+ downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
/* fallback to VBT if available for eDP */
- if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
- fixed_mode = drm_mode_duplicate(dev,
- dev_priv->vbt.lfp_lvds_vbt_mode);
- if (fixed_mode) {
- fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
- connector->display_info.width_mm = fixed_mode->width_mm;
- connector->display_info.height_mm = fixed_mode->height_mm;
- }
- }
+ if (!fixed_mode)
+ fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
mutex_unlock(&dev->mode_config.mutex);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/intel_dp.h
new file mode 100644
index 000000000000..5e9e8d13de6e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_H__
+#define __INTEL_DP_H__
+
+#include <linux/types.h>
+
+#include <drm/i915_drm.h>
+
+#include "i915_reg.h"
+
+enum pipe;
+struct drm_connector_state;
+struct drm_encoder;
+struct drm_i915_private;
+struct drm_modeset_acquire_ctx;
+struct intel_connector;
+struct intel_crtc_state;
+struct intel_digital_port;
+struct intel_dp;
+struct intel_encoder;
+
+struct link_config_limits {
+ int min_clock, max_clock;
+ int min_lane_count, max_lane_count;
+ int min_bpp, max_bpp;
+};
+
+void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ struct link_config_limits *limits);
+bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state);
+bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t dp_reg, enum port port,
+ enum pipe *pipe);
+bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
+ enum port port);
+bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
+void intel_dp_set_link_params(struct intel_dp *intel_dp,
+ int link_rate, u8 lane_count,
+ bool link_mst);
+int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
+ int link_rate, u8 lane_count);
+int intel_dp_retrain_link(struct intel_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx);
+void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool enable);
+void intel_dp_encoder_reset(struct drm_encoder *encoder);
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
+void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
+int intel_dp_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state);
+bool intel_dp_is_edp(struct intel_dp *intel_dp);
+bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
+enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
+ bool long_hpd);
+void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
+void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
+void intel_edp_panel_on(struct intel_dp *intel_dp);
+void intel_edp_panel_off(struct intel_dp *intel_dp);
+void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
+void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
+int intel_dp_max_link_rate(struct intel_dp *intel_dp);
+int intel_dp_max_lane_count(struct intel_dp *intel_dp);
+int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
+void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
+u32 intel_dp_pack_aux(const u8 *src, int src_bytes);
+
+void intel_edp_drrs_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_edp_drrs_disable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits);
+void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits);
+
+void
+intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
+ u8 dp_train_pat);
+void
+intel_dp_set_signal_levels(struct intel_dp *intel_dp);
+void intel_dp_set_idle_link_train(struct intel_dp *intel_dp);
+u8
+intel_dp_voltage_max(struct intel_dp *intel_dp);
+u8
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing);
+void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
+ u8 *link_bw, u8 *rate_select);
+bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
+bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
+bool
+intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status);
+u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
+ int mode_clock, int mode_hdisplay);
+u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
+ int mode_hdisplay);
+
+bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
+int intel_dp_link_required(int pixel_clock, int bpp);
+int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
+bool intel_digital_port_connected(struct intel_encoder *encoder);
+void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *dig_port);
+
+static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
+{
+ return ~((1 << lane_count) - 1) & 0xf;
+}
+
+#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index b59c87daa4f7..54b069333e2f 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -21,6 +21,7 @@
* IN THE SOFTWARE.
*/
+#include "intel_dp.h"
#include "intel_drv.h"
static void
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index fb67cd931117..8839eaea8371 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -23,78 +23,119 @@
*
*/
-#include "i915_drv.h"
-#include "intel_drv.h"
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
+#include "i915_drv.h"
+#include "intel_audio.h"
+#include "intel_connector.h"
+#include "intel_ddi.h"
+#include "intel_dp.h"
+#include "intel_drv.h"
+
+static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits)
+{
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+ void *port = connector->port;
+ bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
+ DP_DPCD_QUIRK_CONSTANT_N);
+ int bpp, slots = -EINVAL;
+
+ crtc_state->lane_count = limits->max_lane_count;
+ crtc_state->port_clock = limits->max_clock;
+
+ for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+ crtc_state->pipe_bpp = bpp;
+
+ crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
+ crtc_state->pipe_bpp);
+
+ slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
+ port, crtc_state->pbn);
+ if (slots == -EDEADLK)
+ return slots;
+ if (slots >= 0)
+ break;
+ }
+
+ if (slots < 0) {
+ DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", slots);
+ return slots;
+ }
+
+ intel_link_compute_m_n(crtc_state->pipe_bpp,
+ crtc_state->lane_count,
+ adjusted_mode->crtc_clock,
+ crtc_state->port_clock,
+ &crtc_state->dp_m_n,
+ constant_n);
+ crtc_state->dp_m_n.tu = slots;
+
+ return 0;
+}
+
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_connector *connector = conn_state->connector;
- void *port = to_intel_connector(connector)->port;
- struct drm_atomic_state *state = pipe_config->base.state;
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_crtc_state *old_crtc_state =
- drm_atomic_get_old_crtc_state(state, crtc);
- int bpp;
- int lane_count, slots =
- to_intel_crtc_state(old_crtc_state)->dp_m_n.tu;
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- int mst_pbn;
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
- DP_DPCD_QUIRK_CONSTANT_N);
+ struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(conn_state);
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+ void *port = connector->port;
+ struct link_config_limits limits;
+ int ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_pch_encoder = false;
- bpp = 24;
- if (intel_dp->compliance.test_data.bpc) {
- bpp = intel_dp->compliance.test_data.bpc * 3;
- DRM_DEBUG_KMS("Setting pipe bpp to %d\n",
- bpp);
- }
+
+ if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
+ pipe_config->has_audio =
+ drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port);
+ else
+ pipe_config->has_audio =
+ intel_conn_state->force_audio == HDMI_AUDIO_ON;
+
/*
* for MST we always configure max link bw - the spec doesn't
* seem to suggest we should do otherwise.
*/
- lane_count = intel_dp_max_lane_count(intel_dp);
-
- pipe_config->lane_count = lane_count;
-
- pipe_config->pipe_bpp = bpp;
+ limits.min_clock =
+ limits.max_clock = intel_dp_max_link_rate(intel_dp);
- pipe_config->port_clock = intel_dp_max_link_rate(intel_dp);
+ limits.min_lane_count =
+ limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
- if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port))
- pipe_config->has_audio = true;
+ limits.min_bpp = intel_dp_min_bpp(pipe_config);
+ limits.max_bpp = pipe_config->pipe_bpp;
- mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
- pipe_config->pbn = mst_pbn;
+ intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
- slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, port,
- mst_pbn);
- if (slots < 0) {
- DRM_DEBUG_KMS("failed finding vcpi slots:%d\n",
- slots);
- return slots;
- }
-
- intel_link_compute_m_n(bpp, lane_count,
- adjusted_mode->crtc_clock,
- pipe_config->port_clock,
- &pipe_config->dp_m_n,
- constant_n);
+ ret = intel_dp_mst_compute_link_config(encoder, pipe_config,
+ conn_state, &limits);
+ if (ret)
+ return ret;
- pipe_config->dp_m_n.tu = slots;
+ pipe_config->limited_color_range =
+ intel_dp_limited_color_range(pipe_config, conn_state);
if (IS_GEN9_LP(dev_priv))
pipe_config->lane_lat_optim_mask =
@@ -117,7 +158,11 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
struct drm_crtc *new_crtc = new_conn_state->crtc;
struct drm_crtc_state *crtc_state;
struct drm_dp_mst_topology_mgr *mgr;
- int ret = 0;
+ int ret;
+
+ ret = intel_digital_connector_atomic_check(connector, new_conn_state);
+ if (ret)
+ return ret;
if (!old_conn_state->crtc)
return 0;
@@ -289,7 +334,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
DP_TP_STATUS(port),
DP_TP_STATUS_ACT_SENT,
DP_TP_STATUS_ACT_SENT,
@@ -354,11 +399,13 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_get_property = intel_digital_connector_atomic_get_property,
+ .atomic_set_property = intel_digital_connector_atomic_set_property,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
static int intel_dp_mst_get_modes(struct drm_connector *connector)
@@ -373,7 +420,6 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
- int bpp = 24; /* MST uses fixed bpp */
int max_rate, mode_rate, max_lanes, max_link_clock;
if (drm_connector_is_unregistered(connector))
@@ -386,7 +432,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
max_lanes = intel_dp_max_lane_count(intel_dp);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
- mode_rate = intel_dp_link_required(mode->clock, bpp);
+ mode_rate = intel_dp_link_required(mode->clock, 18);
/* TODO - validate mode against available PBN for link */
if (mode->clock < 10000)
@@ -487,6 +533,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
if (ret)
goto err;
+ intel_attach_force_audio_property(connector);
+ intel_attach_broadcast_rgb_property(connector);
+ drm_connector_attach_max_bpc_property(connector, 6, 12);
+
return connector;
err:
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 95cb8b154f87..ab4ac7158b79 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include "intel_dp.h"
#include "intel_drv.h"
/**
@@ -341,7 +342,7 @@ static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
BXT_PORT_REF_DW3(phy),
GRC_DONE, GRC_DONE,
10))
@@ -383,7 +384,8 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
* The flag should get set in 100us according to the HW team, but
* use 1ms due to occasional timeouts observed with that.
*/
- if (intel_wait_for_register_fw(dev_priv, BXT_PORT_CL1CM_DW0(phy),
+ if (intel_wait_for_register_fw(&dev_priv->uncore,
+ BXT_PORT_CL1CM_DW0(phy),
PHY_RESERVED | PHY_POWER_GOOD,
PHY_POWER_GOOD,
1))
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 0a42d11c4c33..e01c057ce50b 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -241,11 +241,11 @@ out:
}
static struct intel_shared_dpll *
-intel_find_shared_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
+intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
enum intel_dpll_id range_min,
enum intel_dpll_id range_max)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll, *unused_pll = NULL;
struct intel_shared_dpll_state *shared_dpll;
@@ -420,9 +420,10 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
}
static struct intel_shared_dpll *
-ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+ibx_get_dpll(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum intel_dpll_id i;
@@ -436,7 +437,7 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
crtc->base.base.id, crtc->base.name,
pll->info->name);
} else {
- pll = intel_find_shared_dpll(crtc, crtc_state,
+ pll = intel_find_shared_dpll(crtc_state,
DPLL_ID_PCH_PLL_A,
DPLL_ID_PCH_PLL_B);
}
@@ -764,15 +765,13 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
*r2_out = best.r2;
}
-static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
- struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *crtc_state)
{
struct intel_shared_dpll *pll;
u32 val;
unsigned int p, n2, r2;
- hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+ hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
@@ -780,7 +779,7 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
crtc_state->dpll_hw_state.wrpll = val;
- pll = intel_find_shared_dpll(crtc, crtc_state,
+ pll = intel_find_shared_dpll(crtc_state,
DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
if (!pll)
@@ -790,11 +789,12 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
}
static struct intel_shared_dpll *
-hsw_ddi_dp_get_dpll(struct intel_encoder *encoder, int clock)
+hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
struct intel_shared_dpll *pll;
enum intel_dpll_id pll_id;
+ int clock = crtc_state->port_clock;
switch (clock / 2) {
case 81000:
@@ -820,19 +820,18 @@ hsw_ddi_dp_get_dpll(struct intel_encoder *encoder, int clock)
}
static struct intel_shared_dpll *
-hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+hsw_get_dpll(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
struct intel_shared_dpll *pll;
- int clock = crtc_state->port_clock;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- pll = hsw_ddi_hdmi_get_dpll(clock, crtc, crtc_state);
+ pll = hsw_ddi_hdmi_get_dpll(crtc_state);
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
- pll = hsw_ddi_dp_get_dpll(encoder, clock);
+ pll = hsw_ddi_dp_get_dpll(crtc_state);
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
if (WARN_ON(crtc_state->port_clock / 2 != 135000))
return NULL;
@@ -840,7 +839,7 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
crtc_state->dpll_hw_state.spll =
SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
- pll = intel_find_shared_dpll(crtc, crtc_state,
+ pll = intel_find_shared_dpll(crtc_state,
DPLL_ID_SPLL, DPLL_ID_SPLL);
} else {
return NULL;
@@ -961,7 +960,7 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(regs[id].ctl,
I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
DPLL_STATUS,
DPLL_LOCK(id),
DPLL_LOCK(id),
@@ -1308,9 +1307,7 @@ skip_remaining_dividers:
return true;
}
-static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- int clock)
+static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
{
u32 ctrl1, cfgcr1, cfgcr2;
struct skl_wrpll_params wrpll_params = { 0, };
@@ -1323,7 +1320,8 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
- if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
+ if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
+ &wrpll_params))
return false;
cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
@@ -1346,8 +1344,7 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
}
static bool
-skl_ddi_dp_set_dpll_hw_state(int clock,
- struct intel_dpll_hw_state *dpll_hw_state)
+skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
u32 ctrl1;
@@ -1356,7 +1353,7 @@ skl_ddi_dp_set_dpll_hw_state(int clock,
* as the DPLL id in this function.
*/
ctrl1 = DPLL_CTRL1_OVERRIDE(0);
- switch (clock / 2) {
+ switch (crtc_state->port_clock / 2) {
case 81000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
break;
@@ -1378,44 +1375,43 @@ skl_ddi_dp_set_dpll_hw_state(int clock,
break;
}
- dpll_hw_state->ctrl1 = ctrl1;
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+
return true;
}
static struct intel_shared_dpll *
-skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+skl_get_dpll(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
struct intel_shared_dpll *pll;
- int clock = crtc_state->port_clock;
bool bret;
- struct intel_dpll_hw_state dpll_hw_state;
-
- memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- bret = skl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
+ bret = skl_ddi_hdmi_pll_dividers(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
return NULL;
}
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
- bret = skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
+ bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
return NULL;
}
- crtc_state->dpll_hw_state = dpll_hw_state;
} else {
return NULL;
}
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- pll = intel_find_shared_dpll(crtc, crtc_state,
+ pll = intel_find_shared_dpll(crtc_state,
DPLL_ID_SKL_DPLL0,
DPLL_ID_SKL_DPLL0);
else
- pll = intel_find_shared_dpll(crtc, crtc_state,
+ pll = intel_find_shared_dpll(crtc_state,
DPLL_ID_SKL_DPLL1,
DPLL_ID_SKL_DPLL3);
if (!pll)
@@ -1692,10 +1688,10 @@ static const struct bxt_clk_div bxt_dp_clk_val[] = {
};
static bool
-bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state, int clock,
+bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
struct bxt_clk_div *clk_div)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct dpll best_clock;
/* Calculate HDMI div */
@@ -1703,9 +1699,10 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc,
* FIXME: tie the following calculation into
* i9xx_crtc_compute_clock
*/
- if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
+ if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
- clock, pipe_name(intel_crtc->pipe));
+ crtc_state->port_clock,
+ pipe_name(crtc->pipe));
return false;
}
@@ -1722,8 +1719,10 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc,
return true;
}
-static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div)
+static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
+ struct bxt_clk_div *clk_div)
{
+ int clock = crtc_state->port_clock;
int i;
*clk_div = bxt_dp_clk_val[0];
@@ -1737,14 +1736,17 @@ static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div)
clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
}
-static bool bxt_ddi_set_dpll_hw_state(int clock,
- struct bxt_clk_div *clk_div,
- struct intel_dpll_hw_state *dpll_hw_state)
+static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
+ const struct bxt_clk_div *clk_div)
{
+ struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
+ int clock = crtc_state->port_clock;
int vco = clk_div->vco;
u32 prop_coef, int_coef, gain_ctl, targ_cnt;
u32 lanestagger;
+ memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
+
if (vco >= 6200000 && vco <= 6700000) {
prop_coef = 4;
int_coef = 9;
@@ -1804,55 +1806,45 @@ static bool bxt_ddi_set_dpll_hw_state(int clock,
}
static bool
-bxt_ddi_dp_set_dpll_hw_state(int clock,
- struct intel_dpll_hw_state *dpll_hw_state)
+bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
- struct bxt_clk_div clk_div = {0};
+ struct bxt_clk_div clk_div = {};
- bxt_ddi_dp_pll_dividers(clock, &clk_div);
+ bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
- return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
+ return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
}
static bool
-bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state, int clock,
- struct intel_dpll_hw_state *dpll_hw_state)
+bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
- struct bxt_clk_div clk_div = { };
+ struct bxt_clk_div clk_div = {};
- bxt_ddi_hdmi_pll_dividers(intel_crtc, crtc_state, clock, &clk_div);
+ bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
- return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
+ return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
}
static struct intel_shared_dpll *
-bxt_get_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+bxt_get_dpll(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
{
- struct intel_dpll_hw_state dpll_hw_state = { };
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
- int i, clock = crtc_state->port_clock;
+ enum intel_dpll_id id;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
- !bxt_ddi_hdmi_set_dpll_hw_state(crtc, crtc_state, clock,
- &dpll_hw_state))
+ !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
return NULL;
if (intel_crtc_has_dp_encoder(crtc_state) &&
- !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
+ !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
return NULL;
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- crtc_state->dpll_hw_state = dpll_hw_state;
-
/* 1:1 mapping between ports and PLLs */
- i = (enum intel_dpll_id) encoder->port;
- pll = intel_get_shared_dpll_by_id(dev_priv, i);
+ id = (enum intel_dpll_id) encoder->port;
+ pll = intel_get_shared_dpll_by_id(dev_priv, id);
DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
crtc->base.base.id, crtc->base.name, pll->info->name);
@@ -1911,8 +1903,7 @@ static void intel_ddi_pll_init(struct drm_device *dev)
struct intel_dpll_mgr {
const struct dpll_info *dpll_info;
- struct intel_shared_dpll *(*get_dpll)(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
+ struct intel_shared_dpll *(*get_dpll)(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder);
void (*dump_hw_state)(struct drm_i915_private *dev_priv,
@@ -1986,7 +1977,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
CNL_DPLL_ENABLE(id),
PLL_POWER_STATE,
PLL_POWER_STATE,
@@ -2027,7 +2018,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 7. Wait for PLL lock status in DPLL_ENABLE. */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
CNL_DPLL_ENABLE(id),
PLL_LOCK,
PLL_LOCK,
@@ -2075,7 +2066,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
CNL_DPLL_ENABLE(id),
PLL_LOCK,
0,
@@ -2097,7 +2088,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
CNL_DPLL_ENABLE(id),
PLL_POWER_STATE,
0,
@@ -2242,11 +2233,11 @@ int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
}
static bool
-cnl_ddi_calculate_wrpll(int clock,
- struct drm_i915_private *dev_priv,
+cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *wrpll_params)
{
- u32 afe_clock = clock * 5;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ u32 afe_clock = crtc_state->port_clock * 5;
u32 ref_clock;
u32 dco_min = 7998000;
u32 dco_max = 10000000;
@@ -2282,23 +2273,20 @@ cnl_ddi_calculate_wrpll(int clock,
ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
- cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock, pdiv, qdiv,
- kdiv);
+ cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
+ pdiv, qdiv, kdiv);
return true;
}
-static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- int clock)
+static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 cfgcr0, cfgcr1;
struct skl_wrpll_params wrpll_params = { 0, };
cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
- if (!cnl_ddi_calculate_wrpll(clock, dev_priv, &wrpll_params))
+ if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
return false;
cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
@@ -2319,14 +2307,13 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
}
static bool
-cnl_ddi_dp_set_dpll_hw_state(int clock,
- struct intel_dpll_hw_state *dpll_hw_state)
+cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
u32 cfgcr0;
cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
- switch (clock / 2) {
+ switch (crtc_state->port_clock / 2) {
case 81000:
cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
break;
@@ -2356,41 +2343,40 @@ cnl_ddi_dp_set_dpll_hw_state(int clock,
break;
}
- dpll_hw_state->cfgcr0 = cfgcr0;
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
+
return true;
}
static struct intel_shared_dpll *
-cnl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+cnl_get_dpll(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
struct intel_shared_dpll *pll;
- int clock = crtc_state->port_clock;
bool bret;
- struct intel_dpll_hw_state dpll_hw_state;
-
- memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- bret = cnl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
+ bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
return NULL;
}
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
- bret = cnl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
+ bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
return NULL;
}
- crtc_state->dpll_hw_state = dpll_hw_state;
} else {
DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
crtc_state->output_types);
return NULL;
}
- pll = intel_find_shared_dpll(crtc, crtc_state,
+ pll = intel_find_shared_dpll(crtc_state,
DPLL_ID_SKL_DPLL0,
DPLL_ID_SKL_DPLL2);
if (!pll) {
@@ -2431,47 +2417,69 @@ static const struct intel_dpll_mgr cnl_pll_mgr = {
.dump_hw_state = cnl_dump_hw_state,
};
+struct icl_combo_pll_params {
+ int clock;
+ struct skl_wrpll_params wrpll;
+};
+
/*
* These values alrea already adjusted: they're the bits we write to the
* registers, not the logical values.
*/
-static const struct skl_wrpll_params icl_dp_combo_pll_24MHz_values[] = {
- { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
- .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
- .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
- .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
- .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
- .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2},
- { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
- .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
- .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
- .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
+ { 540000,
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 270000,
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 162000,
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
+ .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 324000,
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
+ .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 216000,
+ { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
+ { 432000,
+ { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 648000,
+ { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 810000,
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
};
+
/* Also used for 38.4 MHz values. */
-static const struct skl_wrpll_params icl_dp_combo_pll_19_2MHz_values[] = {
- { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
- .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
- .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
- .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
- .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
- .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2},
- { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
- .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
- .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
- .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
+ { 540000,
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 270000,
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 162000,
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
+ .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 324000,
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
+ .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 216000,
+ { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
+ { 432000,
+ { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 648000,
+ { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 810000,
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
};
static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
@@ -2484,72 +2492,53 @@ static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
};
-static bool icl_calc_dp_combo_pll(struct drm_i915_private *dev_priv, int clock,
+static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *pll_params)
{
- const struct skl_wrpll_params *params;
-
- params = dev_priv->cdclk.hw.ref == 24000 ?
- icl_dp_combo_pll_24MHz_values :
- icl_dp_combo_pll_19_2MHz_values;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ const struct icl_combo_pll_params *params =
+ dev_priv->cdclk.hw.ref == 24000 ?
+ icl_dp_combo_pll_24MHz_values :
+ icl_dp_combo_pll_19_2MHz_values;
+ int clock = crtc_state->port_clock;
+ int i;
- switch (clock) {
- case 540000:
- *pll_params = params[0];
- break;
- case 270000:
- *pll_params = params[1];
- break;
- case 162000:
- *pll_params = params[2];
- break;
- case 324000:
- *pll_params = params[3];
- break;
- case 216000:
- *pll_params = params[4];
- break;
- case 432000:
- *pll_params = params[5];
- break;
- case 648000:
- *pll_params = params[6];
- break;
- case 810000:
- *pll_params = params[7];
- break;
- default:
- MISSING_CASE(clock);
- return false;
+ for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
+ if (clock == params[i].clock) {
+ *pll_params = params[i].wrpll;
+ return true;
+ }
}
- return true;
+ MISSING_CASE(clock);
+ return false;
}
-static bool icl_calc_tbt_pll(struct drm_i915_private *dev_priv, int clock,
+static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *pll_params)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
*pll_params = dev_priv->cdclk.hw.ref == 24000 ?
icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values;
return true;
}
static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder, int clock,
- struct intel_dpll_hw_state *pll_state)
+ struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
u32 cfgcr0, cfgcr1;
struct skl_wrpll_params pll_params = { 0 };
bool ret;
if (intel_port_is_tc(dev_priv, encoder->port))
- ret = icl_calc_tbt_pll(dev_priv, clock, &pll_params);
+ ret = icl_calc_tbt_pll(crtc_state, &pll_params);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
- ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params);
+ ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
else
- ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params);
+ ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
if (!ret)
return false;
@@ -2563,82 +2552,16 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
DPLL_CFGCR1_PDIV(pll_params.pdiv) |
DPLL_CFGCR1_CENTRAL_FREQ_8400;
- pll_state->cfgcr0 = cfgcr0;
- pll_state->cfgcr1 = cfgcr1;
- return true;
-}
-
-int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
- u32 pll_id)
-{
- u32 cfgcr0, cfgcr1;
- u32 pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction;
- const struct skl_wrpll_params *params;
- int index, n_entries, link_clock;
-
- /* Read back values from DPLL CFGCR registers */
- cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
- cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
-
- dco_integer = cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK;
- dco_fraction = (cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
- DPLL_CFGCR0_DCO_FRACTION_SHIFT;
- pdiv = (cfgcr1 & DPLL_CFGCR1_PDIV_MASK) >> DPLL_CFGCR1_PDIV_SHIFT;
- kdiv = (cfgcr1 & DPLL_CFGCR1_KDIV_MASK) >> DPLL_CFGCR1_KDIV_SHIFT;
- qdiv_mode = (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) >>
- DPLL_CFGCR1_QDIV_MODE_SHIFT;
- qdiv_ratio = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
- DPLL_CFGCR1_QDIV_RATIO_SHIFT;
-
- params = dev_priv->cdclk.hw.ref == 24000 ?
- icl_dp_combo_pll_24MHz_values :
- icl_dp_combo_pll_19_2MHz_values;
- n_entries = ARRAY_SIZE(icl_dp_combo_pll_24MHz_values);
-
- for (index = 0; index < n_entries; index++) {
- if (dco_integer == params[index].dco_integer &&
- dco_fraction == params[index].dco_fraction &&
- pdiv == params[index].pdiv &&
- kdiv == params[index].kdiv &&
- qdiv_mode == params[index].qdiv_mode &&
- qdiv_ratio == params[index].qdiv_ratio)
- break;
- }
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
- /* Map PLL Index to Link Clock */
- switch (index) {
- default:
- MISSING_CASE(index);
- /* fall through */
- case 0:
- link_clock = 540000;
- break;
- case 1:
- link_clock = 270000;
- break;
- case 2:
- link_clock = 162000;
- break;
- case 3:
- link_clock = 324000;
- break;
- case 4:
- link_clock = 216000;
- break;
- case 5:
- link_clock = 432000;
- break;
- case 6:
- link_clock = 648000;
- break;
- case 7:
- link_clock = 810000;
- break;
- }
+ crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
+ crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
- return link_clock;
+ return true;
}
+
static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
{
return id - DPLL_ID_ICL_MGPLL1;
@@ -2649,11 +2572,6 @@ enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
return tc_port + DPLL_ID_ICL_MGPLL1;
}
-bool intel_dpll_is_combophy(enum intel_dpll_id id)
-{
- return id == DPLL_ID_ICL_DPLL0 || id == DPLL_ID_ICL_DPLL1;
-}
-
static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
u32 *target_dco_khz,
struct intel_dpll_hw_state *state)
@@ -2728,12 +2646,12 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
* The specification for this function uses real numbers, so the math had to be
* adapted to integer-only calculation, that's why it looks so different.
*/
-static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder, int clock,
- struct intel_dpll_hw_state *pll_state)
+static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct intel_dpll_hw_state *pll_state = &crtc_state->dpll_hw_state;
int refclk_khz = dev_priv->cdclk.hw.ref;
+ int clock = crtc_state->port_clock;
u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
u32 iref_ndiv, iref_trim, iref_pulse_w;
u32 prop_coeff, int_coeff;
@@ -2743,6 +2661,8 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
bool use_ssc = false;
bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
+ memset(pll_state, 0, sizeof(*pll_state));
+
if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
pll_state)) {
DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
@@ -2892,23 +2812,20 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
}
static struct intel_shared_dpll *
-icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+icl_get_dpll(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
struct intel_digital_port *intel_dig_port;
struct intel_shared_dpll *pll;
- struct intel_dpll_hw_state pll_state = {};
enum port port = encoder->port;
enum intel_dpll_id min, max;
- int clock = crtc_state->port_clock;
bool ret;
if (intel_port_is_combophy(dev_priv, port)) {
min = DPLL_ID_ICL_DPLL0;
max = DPLL_ID_ICL_DPLL1;
- ret = icl_calc_dpll_state(crtc_state, encoder, clock,
- &pll_state);
+ ret = icl_calc_dpll_state(crtc_state, encoder);
} else if (intel_port_is_tc(dev_priv, port)) {
if (encoder->type == INTEL_OUTPUT_DP_MST) {
struct intel_dp_mst_encoder *mst_encoder;
@@ -2922,16 +2839,14 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
if (intel_dig_port->tc_type == TC_PORT_TBT) {
min = DPLL_ID_ICL_TBTPLL;
max = min;
- ret = icl_calc_dpll_state(crtc_state, encoder, clock,
- &pll_state);
+ ret = icl_calc_dpll_state(crtc_state, encoder);
} else {
enum tc_port tc_port;
tc_port = intel_port_to_tc(dev_priv, port);
min = icl_tc_port_to_pll_id(tc_port);
max = min;
- ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
- &pll_state);
+ ret = icl_calc_mg_pll_state(crtc_state);
}
} else {
MISSING_CASE(port);
@@ -2943,9 +2858,8 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
return NULL;
}
- crtc_state->dpll_hw_state = pll_state;
- pll = intel_find_shared_dpll(crtc, crtc_state, min, max);
+ pll = intel_find_shared_dpll(crtc_state, min, max);
if (!pll) {
DRM_DEBUG_KMS("No PLL selected\n");
return NULL;
@@ -2956,19 +2870,72 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
return pll;
}
-static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id)
+static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
{
- if (intel_dpll_is_combophy(id))
- return CNL_DPLL_ENABLE(id);
- else if (id == DPLL_ID_ICL_TBTPLL)
- return TBT_PLL_ENABLE;
+ const enum intel_dpll_id id = pll->info->id;
+ enum tc_port tc_port = icl_pll_id_to_tc_port(id);
+ intel_wakeref_t wakeref;
+ bool ret = false;
+ u32 val;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_PLLS);
+ if (!wakeref)
+ return false;
- return MG_PLL_ENABLE(icl_pll_id_to_tc_port(id));
+ val = I915_READ(MG_PLL_ENABLE(tc_port));
+ if (!(val & PLL_ENABLE))
+ goto out;
+
+ hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
+ hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+
+ hw_state->mg_clktop2_coreclkctl1 =
+ I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
+ hw_state->mg_clktop2_coreclkctl1 &=
+ MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+
+ hw_state->mg_clktop2_hsclkctl =
+ I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
+ hw_state->mg_clktop2_hsclkctl &=
+ MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
+
+ hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
+ hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
+ hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
+ hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
+ hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
+
+ hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
+ hw_state->mg_pll_tdc_coldst_bias =
+ I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
+
+ if (dev_priv->cdclk.hw.ref == 38400) {
+ hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
+ hw_state->mg_pll_bias_mask = 0;
+ } else {
+ hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
+ hw_state->mg_pll_bias_mask = -1U;
+ }
+
+ hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
+ hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
+
+ ret = true;
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
+ return ret;
}
static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *hw_state,
+ i915_reg_t enable_reg)
{
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
@@ -2980,54 +2947,12 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!wakeref)
return false;
- val = I915_READ(icl_pll_id_to_enable_reg(id));
+ val = I915_READ(enable_reg);
if (!(val & PLL_ENABLE))
goto out;
- if (intel_dpll_is_combophy(id) ||
- id == DPLL_ID_ICL_TBTPLL) {
- hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
- hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
- } else {
- enum tc_port tc_port = icl_pll_id_to_tc_port(id);
-
- hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
- hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
-
- hw_state->mg_clktop2_coreclkctl1 =
- I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
- hw_state->mg_clktop2_coreclkctl1 &=
- MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
-
- hw_state->mg_clktop2_hsclkctl =
- I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
- hw_state->mg_clktop2_hsclkctl &=
- MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
- MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
- MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
- MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
-
- hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
- hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
- hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
- hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
- hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
-
- hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
- hw_state->mg_pll_tdc_coldst_bias =
- I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
-
- if (dev_priv->cdclk.hw.ref == 38400) {
- hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
- hw_state->mg_pll_bias_mask = 0;
- } else {
- hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
- hw_state->mg_pll_bias_mask = -1U;
- }
-
- hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
- hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
- }
+ hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
ret = true;
out:
@@ -3035,6 +2960,21 @@ out:
return ret;
}
+static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ return icl_pll_get_hw_state(dev_priv, pll, hw_state,
+ CNL_DPLL_ENABLE(pll->info->id));
+}
+
+static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
+}
+
static void icl_dpll_write(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
@@ -3096,11 +3036,10 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
}
-static void icl_pll_enable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
+static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ i915_reg_t enable_reg)
{
- const enum intel_dpll_id id = pll->info->id;
- i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id);
u32 val;
val = I915_READ(enable_reg);
@@ -3111,37 +3050,90 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
- if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE,
- PLL_POWER_STATE, 1))
- DRM_ERROR("PLL %d Power not enabled\n", id);
+ if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
+ PLL_POWER_STATE, PLL_POWER_STATE, 1))
+ DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
+}
- if (intel_dpll_is_combophy(id) || id == DPLL_ID_ICL_TBTPLL)
- icl_dpll_write(dev_priv, pll);
- else
- icl_mg_pll_write(dev_priv, pll);
+static void icl_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ i915_reg_t enable_reg)
+{
+ u32 val;
+
+ val = I915_READ(enable_reg);
+ val |= PLL_ENABLE;
+ I915_WRITE(enable_reg, val);
+
+ /* Timeout is actually 600us. */
+ if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
+ PLL_LOCK, PLL_LOCK, 1))
+ DRM_ERROR("PLL %d not locked\n", pll->info->id);
+}
+
+static void combo_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
+
+ icl_pll_power_enable(dev_priv, pll, enable_reg);
+
+ icl_dpll_write(dev_priv, pll);
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
* paths should already be setting the appropriate voltage, hence we do
- * nothign here.
+ * nothing here.
*/
- val = I915_READ(enable_reg);
- val |= PLL_ENABLE;
- I915_WRITE(enable_reg, val);
+ icl_pll_enable(dev_priv, pll, enable_reg);
- if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, PLL_LOCK,
- 1)) /* 600us actually. */
- DRM_ERROR("PLL %d not locked\n", id);
+ /* DVFS post sequence would be here. See the comment above. */
+}
+
+static void tbt_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
+
+ icl_dpll_write(dev_priv, pll);
+
+ /*
+ * DVFS pre sequence would be here, but in our driver the cdclk code
+ * paths should already be setting the appropriate voltage, hence we do
+ * nothing here.
+ */
+
+ icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
+
+ /* DVFS post sequence would be here. See the comment above. */
+}
+
+static void mg_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ i915_reg_t enable_reg =
+ MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
+
+ icl_pll_power_enable(dev_priv, pll, enable_reg);
+
+ icl_mg_pll_write(dev_priv, pll);
+
+ /*
+ * DVFS pre sequence would be here, but in our driver the cdclk code
+ * paths should already be setting the appropriate voltage, hence we do
+ * nothing here.
+ */
+
+ icl_pll_enable(dev_priv, pll, enable_reg);
/* DVFS post sequence would be here. See the comment above. */
}
static void icl_pll_disable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ i915_reg_t enable_reg)
{
- const enum intel_dpll_id id = pll->info->id;
- i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id);
u32 val;
/* The first steps are done by intel_ddi_post_disable(). */
@@ -3157,8 +3149,9 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
I915_WRITE(enable_reg, val);
/* Timeout is actually 1us. */
- if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, 0, 1))
- DRM_ERROR("PLL %d locked\n", id);
+ if (intel_wait_for_register(&dev_priv->uncore,
+ enable_reg, PLL_LOCK, 0, 1))
+ DRM_ERROR("PLL %d locked\n", pll->info->id);
/* DVFS post sequence would be here. See the comment above. */
@@ -3170,9 +3163,30 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
- if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE, 0,
- 1))
- DRM_ERROR("PLL %d Power not disabled\n", id);
+ if (intel_wait_for_register(&dev_priv->uncore,
+ enable_reg, PLL_POWER_STATE, 0, 1))
+ DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
+}
+
+static void combo_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ icl_pll_disable(dev_priv, pll, CNL_DPLL_ENABLE(pll->info->id));
+}
+
+static void tbt_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
+}
+
+static void mg_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ i915_reg_t enable_reg =
+ MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
+
+ icl_pll_disable(dev_priv, pll, enable_reg);
}
static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -3197,20 +3211,32 @@ static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
hw_state->mg_pll_tdc_coldst_bias);
}
-static const struct intel_shared_dpll_funcs icl_pll_funcs = {
- .enable = icl_pll_enable,
- .disable = icl_pll_disable,
- .get_hw_state = icl_pll_get_hw_state,
+static const struct intel_shared_dpll_funcs combo_pll_funcs = {
+ .enable = combo_pll_enable,
+ .disable = combo_pll_disable,
+ .get_hw_state = combo_pll_get_hw_state,
+};
+
+static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
+ .enable = tbt_pll_enable,
+ .disable = tbt_pll_disable,
+ .get_hw_state = tbt_pll_get_hw_state,
+};
+
+static const struct intel_shared_dpll_funcs mg_pll_funcs = {
+ .enable = mg_pll_enable,
+ .disable = mg_pll_disable,
+ .get_hw_state = mg_pll_get_hw_state,
};
static const struct dpll_info icl_plls[] = {
- { "DPLL 0", &icl_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
- { "DPLL 1", &icl_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
- { "TBT PLL", &icl_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
- { "MG PLL 1", &icl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
- { "MG PLL 2", &icl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
- { "MG PLL 3", &icl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
- { "MG PLL 4", &icl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
+ { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
+ { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
+ { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
+ { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
+ { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
+ { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
{ },
};
@@ -3220,6 +3246,18 @@ static const struct intel_dpll_mgr icl_pll_mgr = {
.dump_hw_state = icl_dump_hw_state,
};
+static const struct dpll_info ehl_plls[] = {
+ { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
+ { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr ehl_pll_mgr = {
+ .dpll_info = ehl_plls,
+ .get_dpll = icl_get_dpll,
+ .dump_hw_state = icl_dump_hw_state,
+};
+
/**
* intel_shared_dpll_init - Initialize shared DPLLs
* @dev: drm device
@@ -3233,7 +3271,9 @@ void intel_shared_dpll_init(struct drm_device *dev)
const struct dpll_info *dpll_info;
int i;
- if (IS_ICELAKE(dev_priv))
+ if (IS_ELKHARTLAKE(dev_priv))
+ dpll_mgr = &ehl_pll_mgr;
+ else if (INTEL_GEN(dev_priv) >= 11)
dpll_mgr = &icl_pll_mgr;
else if (IS_CANNONLAKE(dev_priv))
dpll_mgr = &cnl_pll_mgr;
@@ -3271,31 +3311,29 @@ void intel_shared_dpll_init(struct drm_device *dev)
/**
* intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination
- * @crtc: CRTC
- * @crtc_state: atomic state for @crtc
+ * @crtc_state: atomic state for the crtc
* @encoder: encoder
*
* Find an appropriate DPLL for the given CRTC and encoder combination. A
- * reference from the @crtc to the returned pll is registered in the atomic
- * state. That configuration is made effective by calling
+ * reference from the @crtc_state to the returned pll is registered in the
+ * atomic state. That configuration is made effective by calling
* intel_shared_dpll_swap_state(). The reference should be released by calling
* intel_release_shared_dpll().
*
* Returns:
- * A shared DPLL to be used by @crtc and @encoder with the given @crtc_state.
+ * A shared DPLL to be used by @crtc_state and @encoder.
*/
struct intel_shared_dpll *
-intel_get_shared_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
+intel_get_shared_dpll(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
if (WARN_ON(!dpll_mgr))
return NULL;
- return dpll_mgr->get_dpll(crtc, crtc_state, encoder);
+ return dpll_mgr->get_dpll(crtc_state, encoder);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index 40e8391a92f2..bd8124cc81ed 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -327,8 +327,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
-struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *state,
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc_state *state,
struct intel_encoder *encoder);
void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
struct intel_crtc *crtc,
@@ -341,8 +340,6 @@ void intel_shared_dpll_init(struct drm_device *dev);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state);
-int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
- u32 pll_id);
int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
bool intel_dpll_is_combophy(enum intel_dpll_id id);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index d5660ac1b0d6..a38b9cff5cd0 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -27,22 +27,24 @@
#include <linux/async.h>
#include <linux/i2c.h>
-#include <linux/hdmi.h>
#include <linux/sched/clock.h>
#include <linux/stackdepot.h>
-#include <drm/i915_drm.h>
-#include "i915_drv.h"
+
+#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_encoder.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_dp_dual_mode_helper.h>
#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
-#include <drm/drm_atomic.h>
+#include <drm/i915_drm.h>
+#include <drm/i915_mei_hdcp_interface.h>
#include <media/cec-notifier.h>
+#include "i915_drv.h"
+
struct drm_printer;
/**
@@ -325,6 +327,13 @@ struct intel_panel {
struct intel_digital_port;
+enum check_link_response {
+ HDCP_LINK_PROTECTED = 0,
+ HDCP_TOPOLOGY_CHANGE,
+ HDCP_LINK_INTEGRITY_FAILURE,
+ HDCP_REAUTH_REQUEST
+};
+
/*
* This structure serves as a translation layer between the generic HDCP code
* and the bus-specific code. What that means is that HDCP over HDMI differs
@@ -397,6 +406,32 @@ struct intel_hdcp_shim {
/* Detects panel's hdcp capability. This is optional for HDMI. */
int (*hdcp_capable)(struct intel_digital_port *intel_dig_port,
bool *hdcp_capable);
+
+ /* HDCP adaptation(DP/HDMI) required on the port */
+ enum hdcp_wired_protocol protocol;
+
+ /* Detects whether sink is HDCP2.2 capable */
+ int (*hdcp_2_2_capable)(struct intel_digital_port *intel_dig_port,
+ bool *capable);
+
+ /* Write HDCP2.2 messages */
+ int (*write_2_2_msg)(struct intel_digital_port *intel_dig_port,
+ void *buf, size_t size);
+
+ /* Read HDCP2.2 messages */
+ int (*read_2_2_msg)(struct intel_digital_port *intel_dig_port,
+ u8 msg_id, void *buf, size_t size);
+
+ /*
+ * Implementation of DP HDCP2.2 Errata for the communication of stream
+ * type to Receivers. In DP HDCP2.2 Stream type is one of the input to
+ * the HDCP2.2 Cipher for En/De-Cryption. Not applicable for HDMI.
+ */
+ int (*config_stream_type)(struct intel_digital_port *intel_dig_port,
+ bool is_repeater, u8 type);
+
+ /* HDCP2.2 Link Integrity Check */
+ int (*check_2_2_link)(struct intel_digital_port *intel_dig_port);
};
struct intel_hdcp {
@@ -406,6 +441,50 @@ struct intel_hdcp {
u64 value;
struct delayed_work check_work;
struct work_struct prop_work;
+
+ /* HDCP1.4 Encryption status */
+ bool hdcp_encrypted;
+
+ /* HDCP2.2 related definitions */
+ /* Flag indicates whether this connector supports HDCP2.2 or not. */
+ bool hdcp2_supported;
+
+ /* HDCP2.2 Encryption status */
+ bool hdcp2_encrypted;
+
+ /*
+ * Content Stream Type defined by content owner. TYPE0(0x0) content can
+ * flow in the link protected by HDCP2.2 or HDCP1.4, where as TYPE1(0x1)
+ * content can flow only through a link protected by HDCP2.2.
+ */
+ u8 content_type;
+ struct hdcp_port_data port_data;
+
+ bool is_paired;
+ bool is_repeater;
+
+ /*
+ * Count of ReceiverID_List received. Initialized to 0 at AKE_INIT.
+ * Incremented after processing the RepeaterAuth_Send_ReceiverID_List.
+ * When it rolls over re-auth has to be triggered.
+ */
+ u32 seq_num_v;
+
+ /*
+ * Count of RepeaterAuth_Stream_Manage msg propagated.
+ * Initialized to 0 on AKE_INIT. Incremented after every successful
+ * transmission of RepeaterAuth_Stream_Manage message. When it rolls
+ * over re-Auth has to be triggered.
+ */
+ u32 seq_num_m;
+
+ /*
+ * Work queue to signal the CP_IRQ. Used for the waiters to read the
+ * available information from HDCP DP sink.
+ */
+ wait_queue_head_t cp_irq_queue;
+ atomic_t cp_irq_count;
+ int cp_irq_count_cached;
};
struct intel_connector {
@@ -480,6 +559,11 @@ struct intel_atomic_state {
* state only when all crtc's are DPMS off.
*/
struct intel_cdclk_state actual;
+
+ int force_min_cdclk;
+ bool force_min_cdclk_changed;
+ /* pipe to which cd2x update is synchronized */
+ enum pipe pipe;
} cdclk;
bool dpll_set, modeset;
@@ -923,7 +1007,8 @@ struct intel_crtc_state {
struct intel_link_m_n fdi_m_n;
bool ips_enabled;
- bool ips_force_disable;
+
+ bool crc_enabled;
bool enable_fbc;
@@ -944,13 +1029,30 @@ struct intel_crtc_state {
/* Gamma mode programmed on the pipe */
u32 gamma_mode;
+ union {
+ /* CSC mode programmed on the pipe */
+ u32 csc_mode;
+
+ /* CHV CGM mode */
+ u32 cgm_mode;
+ };
+
/* bitmask of visible planes (enum plane_id) */
u8 active_planes;
u8 nv12_planes;
+ u8 c8_planes;
/* bitmask of planes that will be updated during the commit */
u8 update_planes;
+ struct {
+ u32 enable;
+ u32 gcp;
+ union hdmi_infoframe avi;
+ union hdmi_infoframe spd;
+ union hdmi_infoframe hdmi;
+ } infoframes;
+
/* HDMI scrambling status */
bool hdmi_scrambling;
@@ -963,6 +1065,12 @@ struct intel_crtc_state {
/* Output down scaling is done in LSPCON device */
bool lspcon_downsampling;
+ /* enable pipe gamma? */
+ bool gamma_enable;
+
+ /* enable pipe csc? */
+ bool csc_enable;
+
/* Display Stream compression state */
struct {
bool compression_enable;
@@ -991,9 +1099,6 @@ struct intel_crtc {
struct intel_crtc_state *config;
- /* global reset count when the last flip was submitted */
- unsigned int reset_count;
-
/* Access to these should be protected by dev_priv->irq_lock. */
bool cpu_fifo_underrun_disabled;
bool pch_fifo_underrun_disabled;
@@ -1262,11 +1367,15 @@ struct intel_digital_port {
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len);
+ void (*read_infoframe)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len);
void (*set_infoframes)(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
- bool (*infoframe_enabled)(struct intel_encoder *encoder,
+ u32 (*infoframes_enabled)(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
};
@@ -1494,6 +1603,7 @@ void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
+void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
u32 mask)
@@ -1521,85 +1631,8 @@ void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv);
void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
-/* intel_crt.c */
-bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
- i915_reg_t adpa_reg, enum pipe *pipe);
-void intel_crt_init(struct drm_i915_private *dev_priv);
-void intel_crt_reset(struct drm_encoder *encoder);
-
-/* intel_ddi.c */
-void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state);
-void hsw_fdi_link_train(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state);
-void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
-bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
-void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
-void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
-void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
-void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
-void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
-void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
-bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
-void intel_ddi_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config);
-
-void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
- bool state);
-void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
- struct intel_crtc_state *crtc_state);
-u32 bxt_signal_levels(struct intel_dp *intel_dp);
-u32 ddi_signal_levels(struct intel_dp *intel_dp);
-u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
-u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
- u8 voltage_swing);
-int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
- bool enable);
-void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
-int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- enum intel_dpll_id pll_id);
-
-unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
- int color_plane, unsigned int height);
-
-/* intel_audio.c */
-void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
-void intel_audio_codec_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-void intel_audio_codec_disable(struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state);
-void i915_audio_component_init(struct drm_i915_private *dev_priv);
-void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
-void intel_audio_init(struct drm_i915_private *dev_priv);
-void intel_audio_deinit(struct drm_i915_private *dev_priv);
-
-/* intel_cdclk.c */
-int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
-void skl_init_cdclk(struct drm_i915_private *dev_priv);
-void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
-void cnl_init_cdclk(struct drm_i915_private *dev_priv);
-void cnl_uninit_cdclk(struct drm_i915_private *dev_priv);
-void bxt_init_cdclk(struct drm_i915_private *dev_priv);
-void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
-void icl_init_cdclk(struct drm_i915_private *dev_priv);
-void icl_uninit_cdclk(struct drm_i915_private *dev_priv);
-void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
-void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
-void intel_update_cdclk(struct drm_i915_private *dev_priv);
-void intel_update_rawclk(struct drm_i915_private *dev_priv);
-bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b);
-bool intel_cdclk_changed(const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b);
-void intel_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state);
-void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
- const char *context);
-
/* intel_display.c */
+void intel_plane_destroy(struct drm_plane *plane);
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
@@ -1614,6 +1647,8 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv);
unsigned int intel_fb_xy_to_linear(int x, int y,
const struct intel_plane_state *state,
int plane);
+unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
+ int color_plane, unsigned int height);
void intel_add_fb_offsets(int *x, int *y,
const struct intel_plane_state *state, int plane);
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
@@ -1740,7 +1775,7 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
enum link_m_n_set m_n);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
-bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock);
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
@@ -1781,106 +1816,9 @@ unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);
-/* intel_connector.c */
-int intel_connector_init(struct intel_connector *connector);
-struct intel_connector *intel_connector_alloc(void);
-void intel_connector_free(struct intel_connector *connector);
-void intel_connector_destroy(struct drm_connector *connector);
-int intel_connector_register(struct drm_connector *connector);
-void intel_connector_unregister(struct drm_connector *connector);
-void intel_connector_attach_encoder(struct intel_connector *connector,
- struct intel_encoder *encoder);
-bool intel_connector_get_hw_state(struct intel_connector *connector);
-enum pipe intel_connector_get_pipe(struct intel_connector *connector);
-int intel_connector_update_modes(struct drm_connector *connector,
- struct edid *edid);
-int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
-void intel_attach_force_audio_property(struct drm_connector *connector);
-void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
-void intel_attach_aspect_ratio_property(struct drm_connector *connector);
-
-/* intel_csr.c */
-void intel_csr_ucode_init(struct drm_i915_private *);
-void intel_csr_load_program(struct drm_i915_private *);
-void intel_csr_ucode_fini(struct drm_i915_private *);
-void intel_csr_ucode_suspend(struct drm_i915_private *);
-void intel_csr_ucode_resume(struct drm_i915_private *);
-
-/* intel_dp.c */
-bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
- i915_reg_t dp_reg, enum port port,
- enum pipe *pipe);
-bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
- enum port port);
-bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
- struct intel_connector *intel_connector);
-void intel_dp_set_link_params(struct intel_dp *intel_dp,
- int link_rate, u8 lane_count,
- bool link_mst);
-int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
- int link_rate, u8 lane_count);
+/* intel_dp_link_training.c */
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
-int intel_dp_retrain_link(struct intel_encoder *encoder,
- struct drm_modeset_acquire_ctx *ctx);
-void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
-void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- bool enable);
-void intel_dp_encoder_reset(struct drm_encoder *encoder);
-void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
-void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
-int intel_dp_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state);
-bool intel_dp_is_edp(struct intel_dp *intel_dp);
-bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
-enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
- bool long_hpd);
-void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
-void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
-void intel_edp_panel_on(struct intel_dp *intel_dp);
-void intel_edp_panel_off(struct intel_dp *intel_dp);
-void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
-void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
-int intel_dp_max_link_rate(struct intel_dp *intel_dp);
-int intel_dp_max_lane_count(struct intel_dp *intel_dp);
-int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
-void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
-void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
-u32 intel_dp_pack_aux(const u8 *src, int src_bytes);
-void intel_plane_destroy(struct drm_plane *plane);
-void intel_edp_drrs_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
-void intel_edp_drrs_disable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
-void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits);
-void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits);
-
-void
-intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
- u8 dp_train_pat);
-void
-intel_dp_set_signal_levels(struct intel_dp *intel_dp);
-void intel_dp_set_idle_link_train(struct intel_dp *intel_dp);
-u8
-intel_dp_voltage_max(struct intel_dp *intel_dp);
-u8
-intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing);
-void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
- u8 *link_bw, u8 *rate_select);
-bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
-bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
-bool
-intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]);
-u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
- int mode_clock, int mode_hdisplay);
-u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
- int mode_hdisplay);
/* intel_vdsc.c */
int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
@@ -1888,18 +1826,6 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
enum intel_display_power_domain
intel_dsc_power_domain(const struct intel_crtc_state *crtc_state);
-static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
-{
- return ~((1 << lane_count) - 1) & 0xf;
-}
-
-bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
-int intel_dp_link_required(int pixel_clock, int bpp);
-int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
-bool intel_digital_port_connected(struct intel_encoder *encoder);
-void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dig_port);
-
/* intel_dp_aux_backlight.c */
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
@@ -1915,100 +1841,11 @@ void icl_dsi_init(struct drm_i915_private *dev_priv);
/* intel_dsi_dcs_backlight.c */
int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
-/* intel_dvo.c */
-void intel_dvo_init(struct drm_i915_private *dev_priv);
/* intel_hotplug.c */
void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
bool intel_encoder_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector);
-/* legacy fbdev emulation in intel_fbdev.c */
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-extern int intel_fbdev_init(struct drm_device *dev);
-extern void intel_fbdev_initial_config_async(struct drm_device *dev);
-extern void intel_fbdev_unregister(struct drm_i915_private *dev_priv);
-extern void intel_fbdev_fini(struct drm_i915_private *dev_priv);
-extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
-extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
-extern void intel_fbdev_restore_mode(struct drm_device *dev);
-#else
-static inline int intel_fbdev_init(struct drm_device *dev)
-{
- return 0;
-}
-
-static inline void intel_fbdev_initial_config_async(struct drm_device *dev)
-{
-}
-
-static inline void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
-{
-}
-
-static inline void intel_fbdev_fini(struct drm_i915_private *dev_priv)
-{
-}
-
-static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
-{
-}
-
-static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
-{
-}
-
-static inline void intel_fbdev_restore_mode(struct drm_device *dev)
-{
-}
-#endif
-
-/* intel_fbc.c */
-void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
- struct intel_atomic_state *state);
-bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
-void intel_fbc_pre_update(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state);
-void intel_fbc_post_update(struct intel_crtc *crtc);
-void intel_fbc_init(struct drm_i915_private *dev_priv);
-void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
-void intel_fbc_enable(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state);
-void intel_fbc_disable(struct intel_crtc *crtc);
-void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
-void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits,
- enum fb_op_origin origin);
-void intel_fbc_flush(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits, enum fb_op_origin origin);
-void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
-void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
-int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv);
-
-/* intel_hdmi.c */
-void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
- enum port port);
-void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
- struct intel_connector *intel_connector);
-struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
-int intel_hdmi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state);
-bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
- struct drm_connector *connector,
- bool high_tmds_clock_ratio,
- bool scrambling);
-void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
-void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
-
-/* intel_lvds.c */
-bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
- i915_reg_t lvds_reg, enum pipe *pipe);
-void intel_lvds_init(struct drm_i915_private *dev_priv);
-struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
-bool intel_is_dual_link_lvds(struct drm_device *dev);
-
/* intel_overlay.c */
void intel_overlay_setup(struct drm_i915_private *dev_priv);
void intel_overlay_cleanup(struct drm_i915_private *dev_priv);
@@ -2019,86 +1856,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_overlay_reset(struct drm_i915_private *dev_priv);
-
-/* intel_panel.c */
-int intel_panel_init(struct intel_panel *panel,
- struct drm_display_mode *fixed_mode,
- struct drm_display_mode *downclock_mode);
-void intel_panel_fini(struct intel_panel *panel);
-void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
- struct drm_display_mode *adjusted_mode);
-void intel_pch_panel_fitting(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config,
- int fitting_mode);
-void intel_gmch_panel_fitting(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config,
- int fitting_mode);
-void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state,
- u32 level, u32 max);
-int intel_panel_setup_backlight(struct drm_connector *connector,
- enum pipe pipe);
-void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-void intel_panel_update_backlight(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
-extern struct drm_display_mode *intel_find_panel_downclock(
- struct drm_i915_private *dev_priv,
- struct drm_display_mode *fixed_mode,
- struct drm_connector *connector);
-
-#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
-int intel_backlight_device_register(struct intel_connector *connector);
-void intel_backlight_device_unregister(struct intel_connector *connector);
-#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
-static inline int intel_backlight_device_register(struct intel_connector *connector)
-{
- return 0;
-}
-static inline void intel_backlight_device_unregister(struct intel_connector *connector)
-{
-}
-#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
-
-/* intel_hdcp.c */
-void intel_hdcp_atomic_check(struct drm_connector *connector,
- struct drm_connector_state *old_state,
- struct drm_connector_state *new_state);
-int intel_hdcp_init(struct intel_connector *connector,
- const struct intel_hdcp_shim *hdcp_shim);
-int intel_hdcp_enable(struct intel_connector *connector);
-int intel_hdcp_disable(struct intel_connector *connector);
-int intel_hdcp_check_link(struct intel_connector *connector);
-bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
-bool intel_hdcp_capable(struct intel_connector *connector);
-
-/* intel_psr.c */
-#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
-void intel_psr_init_dpcd(struct intel_dp *intel_dp);
-void intel_psr_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
-void intel_psr_disable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *old_crtc_state);
-int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
- struct drm_modeset_acquire_ctx *ctx,
- u64 value);
-void intel_psr_invalidate(struct drm_i915_private *dev_priv,
- unsigned frontbuffer_bits,
- enum fb_op_origin origin);
-void intel_psr_flush(struct drm_i915_private *dev_priv,
- unsigned frontbuffer_bits,
- enum fb_op_origin origin);
-void intel_psr_init(struct drm_i915_private *dev_priv);
-void intel_psr_compute_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state);
-void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug);
-void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
-void intel_psr_short_pulse(struct intel_dp *intel_dp);
-int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
- u32 *out_value);
-bool intel_psr_enabled(struct intel_dp *intel_dp);
-
/* intel_quirks.c */
void intel_init_quirks(struct drm_i915_private *dev_priv);
@@ -2153,20 +1910,26 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices);
static inline void
-assert_rpm_device_not_suspended(struct drm_i915_private *i915)
+assert_rpm_device_not_suspended(struct i915_runtime_pm *rpm)
{
- WARN_ONCE(i915->runtime_pm.suspended,
+ WARN_ONCE(rpm->suspended,
"Device suspended during HW access\n");
}
static inline void
-assert_rpm_wakelock_held(struct drm_i915_private *i915)
+__assert_rpm_wakelock_held(struct i915_runtime_pm *rpm)
{
- assert_rpm_device_not_suspended(i915);
- WARN_ONCE(!atomic_read(&i915->runtime_pm.wakeref_count),
+ assert_rpm_device_not_suspended(rpm);
+ WARN_ONCE(!atomic_read(&rpm->wakeref_count),
"RPM wakelock ref not held during HW access");
}
+static inline void
+assert_rpm_wakelock_held(struct drm_i915_private *i915)
+{
+ __assert_rpm_wakelock_held(&i915->runtime_pm);
+}
+
/**
* disable_rpm_wakeref_asserts - disable the RPM assert checks
* @i915: i915 device instance
@@ -2242,101 +2005,6 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
enum dpio_channel ch, bool override);
-
-/* intel_pm.c */
-void intel_init_clock_gating(struct drm_i915_private *dev_priv);
-void intel_suspend_hw(struct drm_i915_private *dev_priv);
-int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
-void intel_update_watermarks(struct intel_crtc *crtc);
-void intel_init_pm(struct drm_i915_private *dev_priv);
-void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
-void intel_pm_setup(struct drm_i915_private *dev_priv);
-void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
-void intel_gpu_ips_teardown(void);
-void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
-void gen6_rps_busy(struct drm_i915_private *dev_priv);
-void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
-void gen6_rps_idle(struct drm_i915_private *dev_priv);
-void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
-void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
- struct skl_ddb_entry *ddb_y,
- struct skl_ddb_entry *ddb_uv);
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb /* out */);
-void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
- struct skl_pipe_wm *out);
-void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
-void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
-bool intel_can_enable_sagv(struct drm_atomic_state *state);
-int intel_enable_sagv(struct drm_i915_private *dev_priv);
-int intel_disable_sagv(struct drm_i915_private *dev_priv);
-bool skl_wm_level_equals(const struct skl_wm_level *l1,
- const struct skl_wm_level *l2);
-bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
- const struct skl_ddb_entry entries[],
- int num_entries, int ignore_idx);
-void skl_write_plane_wm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state);
-void skl_write_cursor_wm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state);
-bool ilk_disable_lp_wm(struct drm_device *dev);
-int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *cstate);
-void intel_init_ipc(struct drm_i915_private *dev_priv);
-void intel_enable_ipc(struct drm_i915_private *dev_priv);
-
-/* intel_sdvo.c */
-bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
- i915_reg_t sdvo_reg, enum pipe *pipe);
-bool intel_sdvo_init(struct drm_i915_private *dev_priv,
- i915_reg_t reg, enum port port);
-
-
-/* intel_sprite.c */
-int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
- int usecs);
-struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
- enum pipe pipe, int plane);
-int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
-void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
-int intel_plane_check_stride(const struct intel_plane_state *plane_state);
-int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
-int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
-struct intel_plane *
-skl_universal_plane_create(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id);
-
-static inline bool icl_is_nv12_y_plane(enum plane_id id)
-{
- /* Don't need to do a gen check, these planes are only available on gen11 */
- if (id == PLANE_SPRITE4 || id == PLANE_SPRITE5)
- return true;
-
- return false;
-}
-
-static inline bool icl_is_hdr_plane(struct intel_plane *plane)
-{
- if (INTEL_GEN(to_i915(plane->base.dev)) < 11)
- return false;
-
- return plane->id < PLANE_SPRITE2;
-}
-
-/* intel_tv.c */
-void intel_tv_init(struct drm_i915_private *dev_priv);
-
/* intel_atomic.c */
int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
@@ -2373,64 +2041,4 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state);
-/* intel_atomic_plane.c */
-struct intel_plane *intel_plane_alloc(void);
-void intel_plane_free(struct intel_plane *plane);
-struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
-void intel_plane_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *state);
-extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
-void skl_update_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
-void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
-int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *old_plane_state,
- struct intel_plane_state *intel_state);
-
-/* intel_color.c */
-void intel_color_init(struct intel_crtc *crtc);
-int intel_color_check(struct intel_crtc_state *crtc_state);
-void intel_color_commit(const struct intel_crtc_state *crtc_state);
-void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
-
-/* intel_lspcon.c */
-bool lspcon_init(struct intel_digital_port *intel_dig_port);
-void lspcon_resume(struct intel_lspcon *lspcon);
-void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
-void lspcon_write_infoframe(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- unsigned int type,
- const void *buf, ssize_t len);
-void lspcon_set_infoframes(struct intel_encoder *encoder,
- bool enable,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config);
-void lspcon_ycbcr420_config(struct drm_connector *connector,
- struct intel_crtc_state *crtc_state);
-
-/* intel_pipe_crc.c */
-#ifdef CONFIG_DEBUG_FS
-int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name);
-int intel_crtc_verify_crc_source(struct drm_crtc *crtc,
- const char *source_name, size_t *values_cnt);
-const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
- size_t *count);
-void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
-void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
-#else
-#define intel_crtc_set_crc_source NULL
-#define intel_crtc_verify_crc_source NULL
-#define intel_crtc_get_crc_sources NULL
-static inline void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
-{
-}
-
-static inline void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
-{
-}
-#endif
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index a9a19778dc7f..705a609050c0 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -189,7 +189,6 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
/* intel_dsi_vbt.c */
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
-int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi);
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id);
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index 06a11c35a784..3074448446bc 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -194,7 +194,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
break;
}
- if (!IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) < 11)
vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
out:
@@ -365,7 +365,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
/* pull up/down */
value = *data++ & 1;
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
icl_exec_gpio(dev_priv, gpio_source, gpio_index, value);
else if (IS_VALLEYVIEW(dev_priv))
vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
@@ -532,24 +532,6 @@ void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
msleep(msec);
}
-int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
-{
- struct intel_connector *connector = intel_dsi->attached_connector;
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
- if (!mode)
- return 0;
-
- mode->type |= DRM_MODE_TYPE_PREFERRED;
-
- drm_mode_probed_add(&connector->base, mode);
-
- return 1;
-}
-
#define ICL_PREPARE_CNT_MAX 0x7
#define ICL_CLK_ZERO_CNT_MAX 0xf
#define ICL_TRAIL_CNT_MAX 0x7
@@ -890,7 +872,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->burst_mode_ratio = burst_mode_ratio;
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
icl_dphy_param_init(intel_dsi);
else
vlv_dphy_param_init(intel_dsi);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a6c82482a841..adef81c8cccb 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -24,14 +24,20 @@
* Authors:
* Eric Anholt <eric@anholt.net>
*/
+
#include <linux/i2c.h>
#include <linux/slab.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include "intel_drv.h"
#include <drm/i915_drm.h>
-#include "i915_drv.h"
+
#include "dvo.h"
+#include "i915_drv.h"
+#include "intel_connector.h"
+#include "intel_drv.h"
+#include "intel_dvo.h"
+#include "intel_panel.h"
#define SIL164_ADDR 0x38
#define CH7xxx_ADDR 0x76
diff --git a/drivers/gpu/drm/i915/intel_dvo.h b/drivers/gpu/drm/i915/intel_dvo.h
new file mode 100644
index 000000000000..3ed0fdf8efff
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dvo.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DVO_H__
+#define __INTEL_DVO_H__
+
+struct drm_i915_private;
+
+void intel_dvo_init(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_DVO_H__ */
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 49fa43ff02ba..eea9bec04f1b 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -84,7 +84,6 @@ static const struct engine_class_info intel_engine_classes[] = {
#define MAX_MMIO_BASES 3
struct engine_info {
unsigned int hw_id;
- unsigned int uabi_id;
u8 class;
u8 instance;
/* mmio bases table *must* be sorted in reverse gen order */
@@ -95,27 +94,24 @@ struct engine_info {
};
static const struct engine_info intel_engines[] = {
- [RCS] = {
- .hw_id = RCS_HW,
- .uabi_id = I915_EXEC_RENDER,
+ [RCS0] = {
+ .hw_id = RCS0_HW,
.class = RENDER_CLASS,
.instance = 0,
.mmio_bases = {
{ .gen = 1, .base = RENDER_RING_BASE }
},
},
- [BCS] = {
- .hw_id = BCS_HW,
- .uabi_id = I915_EXEC_BLT,
+ [BCS0] = {
+ .hw_id = BCS0_HW,
.class = COPY_ENGINE_CLASS,
.instance = 0,
.mmio_bases = {
{ .gen = 6, .base = BLT_RING_BASE }
},
},
- [VCS] = {
- .hw_id = VCS_HW,
- .uabi_id = I915_EXEC_BSD,
+ [VCS0] = {
+ .hw_id = VCS0_HW,
.class = VIDEO_DECODE_CLASS,
.instance = 0,
.mmio_bases = {
@@ -124,9 +120,8 @@ static const struct engine_info intel_engines[] = {
{ .gen = 4, .base = BSD_RING_BASE }
},
},
- [VCS2] = {
- .hw_id = VCS2_HW,
- .uabi_id = I915_EXEC_BSD,
+ [VCS1] = {
+ .hw_id = VCS1_HW,
.class = VIDEO_DECODE_CLASS,
.instance = 1,
.mmio_bases = {
@@ -134,27 +129,24 @@ static const struct engine_info intel_engines[] = {
{ .gen = 8, .base = GEN8_BSD2_RING_BASE }
},
},
- [VCS3] = {
- .hw_id = VCS3_HW,
- .uabi_id = I915_EXEC_BSD,
+ [VCS2] = {
+ .hw_id = VCS2_HW,
.class = VIDEO_DECODE_CLASS,
.instance = 2,
.mmio_bases = {
{ .gen = 11, .base = GEN11_BSD3_RING_BASE }
},
},
- [VCS4] = {
- .hw_id = VCS4_HW,
- .uabi_id = I915_EXEC_BSD,
+ [VCS3] = {
+ .hw_id = VCS3_HW,
.class = VIDEO_DECODE_CLASS,
.instance = 3,
.mmio_bases = {
{ .gen = 11, .base = GEN11_BSD4_RING_BASE }
},
},
- [VECS] = {
- .hw_id = VECS_HW,
- .uabi_id = I915_EXEC_VEBOX,
+ [VECS0] = {
+ .hw_id = VECS0_HW,
.class = VIDEO_ENHANCEMENT_CLASS,
.instance = 0,
.mmio_bases = {
@@ -162,9 +154,8 @@ static const struct engine_info intel_engines[] = {
{ .gen = 7, .base = VEBOX_RING_BASE }
},
},
- [VECS2] = {
- .hw_id = VECS2_HW,
- .uabi_id = I915_EXEC_VEBOX,
+ [VECS1] = {
+ .hw_id = VECS1_HW,
.class = VIDEO_ENHANCEMENT_CLASS,
.instance = 1,
.mmio_bases = {
@@ -264,21 +255,17 @@ static void __sprint_engine_name(char *name, const struct engine_info *info)
void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
{
- struct drm_i915_private *dev_priv = engine->i915;
- i915_reg_t hwstam;
-
/*
* Though they added more rings on g4x/ilk, they did not add
* per-engine HWSTAM until gen6.
*/
- if (INTEL_GEN(dev_priv) < 6 && engine->class != RENDER_CLASS)
+ if (INTEL_GEN(engine->i915) < 6 && engine->class != RENDER_CLASS)
return;
- hwstam = RING_HWSTAM(engine->mmio_base);
- if (INTEL_GEN(dev_priv) >= 3)
- I915_WRITE(hwstam, mask);
+ if (INTEL_GEN(engine->i915) >= 3)
+ ENGINE_WRITE(engine, RING_HWSTAM, mask);
else
- I915_WRITE16(hwstam, mask);
+ ENGINE_WRITE16(engine, RING_HWSTAM, mask);
}
static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
@@ -313,15 +300,18 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
if (!engine)
return -ENOMEM;
+ BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
+
engine->id = id;
+ engine->mask = BIT(id);
engine->i915 = dev_priv;
+ engine->uncore = &dev_priv->uncore;
__sprint_engine_name(engine->name, info);
engine->hw_id = engine->guc_id = info->hw_id;
engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
engine->class = info->class;
engine->instance = info->instance;
- engine->uabi_id = info->uabi_id;
engine->uabi_class = intel_engine_classes[info->class].uabi_class;
engine->context_size = __intel_engine_context_size(dev_priv,
@@ -355,15 +345,15 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
{
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
- const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
+ const unsigned int engine_mask = INTEL_INFO(dev_priv)->engine_mask;
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int mask = 0;
unsigned int i;
int err;
- WARN_ON(ring_mask == 0);
- WARN_ON(ring_mask &
+ WARN_ON(engine_mask == 0);
+ WARN_ON(engine_mask &
GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
if (i915_inject_load_failure())
@@ -377,7 +367,7 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
if (err)
goto cleanup;
- mask |= ENGINE_MASK(i);
+ mask |= BIT(i);
}
/*
@@ -385,16 +375,16 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
* are added to the driver by a warning and disabling the forgotten
* engines.
*/
- if (WARN_ON(mask != ring_mask))
- device_info->ring_mask = mask;
+ if (WARN_ON(mask != engine_mask))
+ device_info->engine_mask = mask;
/* We always presume we have at least RCS available for later probing */
- if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) {
+ if (WARN_ON(!HAS_ENGINE(dev_priv, RCS0))) {
err = -ENODEV;
goto cleanup;
}
- RUNTIME_INFO(dev_priv)->num_rings = hweight32(mask);
+ RUNTIME_INFO(dev_priv)->num_engines = hweight32(mask);
i915_check_and_clear_faults(dev_priv);
@@ -455,12 +445,6 @@ cleanup:
return err;
}
-void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
- intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
- GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
-}
-
static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
{
i915_gem_batch_pool_init(&engine->batch_pool, engine);
@@ -541,9 +525,7 @@ static int init_status_page(struct intel_engine_cs *engine)
return PTR_ERR(obj);
}
- ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
- if (ret)
- goto err;
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
@@ -594,7 +576,6 @@ int intel_engine_setup_common(struct intel_engine_cs *engine)
err = i915_timeline_init(engine->i915,
&engine->timeline,
- engine->name,
engine->status_page.vma);
if (err)
goto err_hwsp;
@@ -614,10 +595,44 @@ err_hwsp:
return err;
}
-static void __intel_context_unpin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+void intel_engines_set_scheduler_caps(struct drm_i915_private *i915)
{
- intel_context_unpin(to_intel_context(ctx, engine));
+ static const struct {
+ u8 engine;
+ u8 sched;
+ } map[] = {
+#define MAP(x, y) { ilog2(I915_ENGINE_HAS_##x), ilog2(I915_SCHEDULER_CAP_##y) }
+ MAP(PREEMPTION, PREEMPTION),
+ MAP(SEMAPHORES, SEMAPHORES),
+#undef MAP
+ };
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 enabled, disabled;
+
+ enabled = 0;
+ disabled = 0;
+ for_each_engine(engine, i915, id) { /* all engines must agree! */
+ int i;
+
+ if (engine->schedule)
+ enabled |= (I915_SCHEDULER_CAP_ENABLED |
+ I915_SCHEDULER_CAP_PRIORITY);
+ else
+ disabled |= (I915_SCHEDULER_CAP_ENABLED |
+ I915_SCHEDULER_CAP_PRIORITY);
+
+ for (i = 0; i < ARRAY_SIZE(map); i++) {
+ if (engine->flags & BIT(map[i].engine))
+ enabled |= BIT(map[i].sched);
+ else
+ disabled |= BIT(map[i].sched);
+ }
+ }
+
+ i915->caps.scheduler = enabled & ~disabled;
+ if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
+ i915->caps.scheduler = 0;
}
struct measure_breadcrumb {
@@ -639,7 +654,7 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
return -ENOMEM;
if (i915_timeline_init(engine->i915,
- &frame->timeline, "measure",
+ &frame->timeline,
engine->status_page.vma))
goto out_frame;
@@ -670,6 +685,20 @@ out_frame:
return dw;
}
+static int pin_context(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct intel_context **out)
+{
+ struct intel_context *ce;
+
+ ce = intel_context_pin(ctx, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ *out = ce;
+ return 0;
+}
+
/**
* intel_engines_init_common - initialize cengine state which might require hw access
* @engine: Engine to initialize.
@@ -684,11 +713,8 @@ out_frame:
int intel_engine_init_common(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
- struct intel_context *ce;
int ret;
- engine->set_default_submission(engine);
-
/* We may need to do things with the shrinker which
* require us to immediately switch back to the default
* context. This can cause a problem as pinning the
@@ -696,39 +722,61 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
* be available. To avoid this we always pin the default
* context.
*/
- ce = intel_context_pin(i915->kernel_context, engine);
- if (IS_ERR(ce))
- return PTR_ERR(ce);
+ ret = pin_context(i915->kernel_context, engine,
+ &engine->kernel_context);
+ if (ret)
+ return ret;
/*
* Similarly the preempt context must always be available so that
- * we can interrupt the engine at any time.
+ * we can interrupt the engine at any time. However, as preemption
+ * is optional, we allow it to fail.
*/
- if (i915->preempt_context) {
- ce = intel_context_pin(i915->preempt_context, engine);
- if (IS_ERR(ce)) {
- ret = PTR_ERR(ce);
- goto err_unpin_kernel;
- }
- }
+ if (i915->preempt_context)
+ pin_context(i915->preempt_context, engine,
+ &engine->preempt_context);
ret = measure_breadcrumb_dw(engine);
if (ret < 0)
- goto err_unpin_preempt;
+ goto err_unpin;
engine->emit_fini_breadcrumb_dw = ret;
- return 0;
+ engine->set_default_submission(engine);
-err_unpin_preempt:
- if (i915->preempt_context)
- __intel_context_unpin(i915->preempt_context, engine);
+ return 0;
-err_unpin_kernel:
- __intel_context_unpin(i915->kernel_context, engine);
+err_unpin:
+ if (engine->preempt_context)
+ intel_context_unpin(engine->preempt_context);
+ intel_context_unpin(engine->kernel_context);
return ret;
}
+void intel_gt_resume(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * After resume, we may need to poke into the pinned kernel
+ * contexts to paper over any damage caused by the sudden suspend.
+ * Only the kernel contexts should remain pinned over suspend,
+ * allowing us to fixup the user contexts on their first pin.
+ */
+ for_each_engine(engine, i915, id) {
+ struct intel_context *ce;
+
+ ce = engine->kernel_context;
+ if (ce)
+ ce->ops->reset(ce);
+
+ ce = engine->preempt_context;
+ if (ce)
+ ce->ops->reset(ce);
+ }
+}
+
/**
* intel_engines_cleanup_common - cleans up the engine state created by
* the common initiailizers.
@@ -738,8 +786,6 @@ err_unpin_kernel:
*/
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = engine->i915;
-
cleanup_status_page(engine);
intel_engine_fini_breadcrumbs(engine);
@@ -749,9 +795,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->default_state)
i915_gem_object_put(engine->default_state);
- if (i915->preempt_context)
- __intel_context_unpin(i915->preempt_context, engine);
- __intel_context_unpin(i915->kernel_context, engine);
+ if (engine->preempt_context)
+ intel_context_unpin(engine->preempt_context);
+ intel_context_unpin(engine->kernel_context);
i915_timeline_fini(&engine->timeline);
@@ -762,50 +808,48 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_private *i915 = engine->i915;
+
u64 acthd;
- if (INTEL_GEN(dev_priv) >= 8)
- acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
- RING_ACTHD_UDW(engine->mmio_base));
- else if (INTEL_GEN(dev_priv) >= 4)
- acthd = I915_READ(RING_ACTHD(engine->mmio_base));
+ if (INTEL_GEN(i915) >= 8)
+ acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
+ else if (INTEL_GEN(i915) >= 4)
+ acthd = ENGINE_READ(engine, RING_ACTHD);
else
- acthd = I915_READ(ACTHD);
+ acthd = ENGINE_READ(engine, ACTHD);
return acthd;
}
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
u64 bbaddr;
- if (INTEL_GEN(dev_priv) >= 8)
- bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
- RING_BBADDR_UDW(engine->mmio_base));
+ if (INTEL_GEN(engine->i915) >= 8)
+ bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
else
- bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
+ bbaddr = ENGINE_READ(engine, RING_BBADDR);
return bbaddr;
}
int intel_engine_stop_cs(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
const u32 base = engine->mmio_base;
const i915_reg_t mode = RING_MI_MODE(base);
int err;
- if (INTEL_GEN(dev_priv) < 3)
+ if (INTEL_GEN(engine->i915) < 3)
return -ENODEV;
GEM_TRACE("%s\n", engine->name);
- I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
+ intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
err = 0;
- if (__intel_wait_for_register_fw(dev_priv,
+ if (__intel_wait_for_register_fw(uncore,
mode, MODE_IDLE, MODE_IDLE,
1000, 0,
NULL)) {
@@ -814,19 +858,16 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
}
/* A final mmio read to let GPU writes be hopefully flushed to memory */
- POSTING_READ_FW(mode);
+ intel_uncore_posting_read_fw(uncore, mode);
return err;
}
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
GEM_TRACE("%s\n", engine->name);
- I915_WRITE_FW(RING_MI_MODE(engine->mmio_base),
- _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
}
const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
@@ -863,6 +904,7 @@ static inline u32
read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
int subslice, i915_reg_t reg)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 mcr_slice_subslice_mask;
u32 mcr_slice_subslice_select;
u32 default_mcr_s_ss_select;
@@ -884,33 +926,33 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv);
- fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
+ fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ);
- fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ fw_domains |= intel_uncore_forcewake_for_reg(uncore,
GEN8_MCR_SELECTOR,
FW_REG_READ | FW_REG_WRITE);
- spin_lock_irq(&dev_priv->uncore.lock);
- intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
+ spin_lock_irq(&uncore->lock);
+ intel_uncore_forcewake_get__locked(uncore, fw_domains);
- mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
+ mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) !=
default_mcr_s_ss_select);
mcr &= ~mcr_slice_subslice_mask;
mcr |= mcr_slice_subslice_select;
- I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
+ intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
- ret = I915_READ_FW(reg);
+ ret = intel_uncore_read_fw(uncore, reg);
mcr &= ~mcr_slice_subslice_mask;
mcr |= default_mcr_s_ss_select;
- I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
+ intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
- intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
- spin_unlock_irq(&dev_priv->uncore.lock);
+ intel_uncore_forcewake_put__locked(uncore, fw_domains);
+ spin_unlock_irq(&uncore->lock);
return ret;
}
@@ -920,6 +962,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone)
{
struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
u32 mmio_base = engine->mmio_base;
int slice;
int subslice;
@@ -928,12 +971,14 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
switch (INTEL_GEN(dev_priv)) {
default:
- instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
+ instdone->instdone =
+ intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
- if (engine->id != RCS)
+ if (engine->id != RCS0)
break;
- instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
+ instdone->slice_common =
+ intel_uncore_read(uncore, GEN7_SC_INSTDONE);
for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
instdone->sampler[slice][subslice] =
read_subslice_reg(dev_priv, slice, subslice,
@@ -944,28 +989,33 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
}
break;
case 7:
- instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
+ instdone->instdone =
+ intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
- if (engine->id != RCS)
+ if (engine->id != RCS0)
break;
- instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
- instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
- instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
+ instdone->slice_common =
+ intel_uncore_read(uncore, GEN7_SC_INSTDONE);
+ instdone->sampler[0][0] =
+ intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE);
+ instdone->row[0][0] =
+ intel_uncore_read(uncore, GEN7_ROW_INSTDONE);
break;
case 6:
case 5:
case 4:
- instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
-
- if (engine->id == RCS)
+ instdone->instdone =
+ intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
+ if (engine->id == RCS0)
/* HACK: Using the wrong struct member */
- instdone->slice_common = I915_READ(GEN4_INSTDONE1);
+ instdone->slice_common =
+ intel_uncore_read(uncore, GEN4_INSTDONE1);
break;
case 3:
case 2:
- instdone->instdone = I915_READ(GEN2_INSTDONE);
+ instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE);
break;
}
}
@@ -985,12 +1035,13 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
return true;
/* First check that no commands are left in the ring */
- if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
- (I915_READ_TAIL(engine) & TAIL_ADDR))
+ if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
+ (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
idle = false;
/* No bit for gen2, so assume the CS parser is idle */
- if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
+ if (INTEL_GEN(dev_priv) > 2 &&
+ !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
idle = false;
intel_runtime_pm_put(dev_priv, wakeref);
@@ -1007,16 +1058,10 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
*/
bool intel_engine_is_idle(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
/* More white lies, if wedged, hw state is inconsistent */
- if (i915_terminally_wedged(&dev_priv->gpu_error))
+ if (i915_reset_failed(engine->i915))
return true;
- /* Any inflight/incomplete requests? */
- if (!intel_engine_signaled(engine, intel_engine_last_submit(engine)))
- return false;
-
/* Waiting to drain ELSP? */
if (READ_ONCE(engine->execlists.active)) {
struct tasklet_struct *t = &engine->execlists.tasklet;
@@ -1045,7 +1090,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return ring_is_idle(engine);
}
-bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
+bool intel_engines_are_idle(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -1054,10 +1099,14 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
* If the driver is wedged, HW state may be very inconsistent and
* report that it is still busy, even though we have stopped using it.
*/
- if (i915_terminally_wedged(&dev_priv->gpu_error))
+ if (i915_reset_failed(i915))
return true;
- for_each_engine(engine, dev_priv, id) {
+ /* Already parked (and passed an idleness test); must still be idle */
+ if (!READ_ONCE(i915->gt.awake))
+ return true;
+
+ for_each_engine(engine, i915, id) {
if (!intel_engine_is_idle(engine))
return false;
}
@@ -1065,34 +1114,6 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
return true;
}
-/**
- * intel_engine_has_kernel_context:
- * @engine: the engine
- *
- * Returns true if the last context to be executed on this engine, or has been
- * executed if the engine is already idle, is the kernel context
- * (#i915.kernel_context).
- */
-bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
-{
- const struct intel_context *kernel_context =
- to_intel_context(engine->i915->kernel_context, engine);
- struct i915_request *rq;
-
- lockdep_assert_held(&engine->i915->drm.struct_mutex);
-
- /*
- * Check the last context seen by the engine. If active, it will be
- * the last request that remains in the timeline. When idle, it is
- * the last executed context as tracked by retirement.
- */
- rq = __i915_active_request_peek(&engine->timeline.last_request);
- if (rq)
- return rq->hw_context == kernel_context;
- else
- return engine->last_retired_context == kernel_context;
-}
-
void intel_engines_reset_default_submission(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
@@ -1180,6 +1201,8 @@ void intel_engines_park(struct drm_i915_private *i915)
i915_gem_batch_pool_fini(&engine->batch_pool);
engine->execlists.no_priolist = false;
}
+
+ i915->gt.active_engines = 0;
}
/**
@@ -1283,15 +1306,14 @@ static void print_request(struct drm_printer *m,
x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
- drm_printf(m, "%s%x%s%s [%llx:%llx]%s @ %dms: %s\n",
+ drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n",
prefix,
- rq->global_seqno,
+ rq->fence.context, rq->fence.seqno,
i915_request_completed(rq) ? "!" :
i915_request_started(rq) ? "*" :
"",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&rq->fence.flags) ? "+" : "",
- rq->fence.context, rq->fence.seqno,
buf,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
name);
@@ -1334,25 +1356,26 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
&engine->execlists;
u64 addr;
- if (engine->id == RCS && IS_GEN_RANGE(dev_priv, 4, 7))
- drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID));
+ if (engine->id == RCS0 && IS_GEN_RANGE(dev_priv, 4, 7))
+ drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
drm_printf(m, "\tRING_START: 0x%08x\n",
- I915_READ(RING_START(engine->mmio_base)));
+ ENGINE_READ(engine, RING_START));
drm_printf(m, "\tRING_HEAD: 0x%08x\n",
- I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR);
+ ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR);
drm_printf(m, "\tRING_TAIL: 0x%08x\n",
- I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR);
+ ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR);
drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
- I915_READ(RING_CTL(engine->mmio_base)),
- I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
if (INTEL_GEN(engine->i915) > 2) {
drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
- I915_READ(RING_MI_MODE(engine->mmio_base)),
- I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
+ ENGINE_READ(engine, RING_MI_MODE),
+ ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
}
if (INTEL_GEN(dev_priv) >= 6) {
- drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
+ drm_printf(m, "\tRING_IMR: %08x\n",
+ ENGINE_READ(engine, RING_IMR));
}
addr = intel_engine_get_active_head(engine);
@@ -1362,57 +1385,53 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
if (INTEL_GEN(dev_priv) >= 8)
- addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base),
- RING_DMA_FADD_UDW(engine->mmio_base));
+ addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
else if (INTEL_GEN(dev_priv) >= 4)
- addr = I915_READ(RING_DMA_FADD(engine->mmio_base));
+ addr = ENGINE_READ(engine, RING_DMA_FADD);
else
- addr = I915_READ(DMA_FADD_I8XX);
+ addr = ENGINE_READ(engine, DMA_FADD_I8XX);
drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
if (INTEL_GEN(dev_priv) >= 4) {
drm_printf(m, "\tIPEIR: 0x%08x\n",
- I915_READ(RING_IPEIR(engine->mmio_base)));
+ ENGINE_READ(engine, RING_IPEIR));
drm_printf(m, "\tIPEHR: 0x%08x\n",
- I915_READ(RING_IPEHR(engine->mmio_base)));
+ ENGINE_READ(engine, RING_IPEHR));
} else {
- drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR));
- drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR));
+ drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR));
+ drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
}
if (HAS_EXECLISTS(dev_priv)) {
const u32 *hws =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
+ const u8 num_entries = execlists->csb_size;
unsigned int idx;
u8 read, write;
- drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
- I915_READ(RING_EXECLIST_STATUS_LO(engine)),
- I915_READ(RING_EXECLIST_STATUS_HI(engine)));
+ drm_printf(m, "\tExeclist status: 0x%08x %08x, entries %u\n",
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
+ num_entries);
read = execlists->csb_head;
write = READ_ONCE(*execlists->csb_write);
- drm_printf(m, "\tExeclist CSB read %d, write %d [mmio:%d], tasklet queued? %s (%s)\n",
+ drm_printf(m, "\tExeclist CSB read %d, write %d, tasklet queued? %s (%s)\n",
read, write,
- GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine))),
yesno(test_bit(TASKLET_STATE_SCHED,
&engine->execlists.tasklet.state)),
enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
- if (read >= GEN8_CSB_ENTRIES)
+ if (read >= num_entries)
read = 0;
- if (write >= GEN8_CSB_ENTRIES)
+ if (write >= num_entries)
write = 0;
if (read > write)
- write += GEN8_CSB_ENTRIES;
+ write += num_entries;
while (read < write) {
- idx = ++read % GEN8_CSB_ENTRIES;
- drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [mmio:0x%08x], context: %d [mmio:%d]\n",
- idx,
- hws[idx * 2],
- I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
- hws[idx * 2 + 1],
- I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
+ idx = ++read % num_entries;
+ drm_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
+ idx, hws[idx * 2], hws[idx * 2 + 1]);
}
rcu_read_lock();
@@ -1425,10 +1444,11 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
char hdr[80];
snprintf(hdr, sizeof(hdr),
- "\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x}, rq: ",
+ "\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
idx, count,
i915_ggtt_offset(rq->ring->vma),
- rq->timeline->hwsp_offset);
+ rq->timeline->hwsp_offset,
+ hwsp_seqno(rq));
print_request(m, rq, hdr);
} else {
drm_printf(m, "\t\tELSP[%d] idle\n", idx);
@@ -1438,11 +1458,11 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
rcu_read_unlock();
} else if (INTEL_GEN(dev_priv) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
- I915_READ(RING_PP_DIR_BASE(engine)));
+ ENGINE_READ(engine, RING_PP_DIR_BASE));
drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
- I915_READ(RING_PP_DIR_BASE_READ(engine)));
+ ENGINE_READ(engine, RING_PP_DIR_BASE_READ));
drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
- I915_READ(RING_PP_DIR_DCLV(engine)));
+ ENGINE_READ(engine, RING_PP_DIR_DCLV));
}
}
@@ -1495,13 +1515,12 @@ void intel_engine_dump(struct intel_engine_cs *engine,
va_end(ap);
}
- if (i915_terminally_wedged(&engine->i915->gpu_error))
+ if (i915_reset_failed(engine->i915))
drm_printf(m, "*** WEDGED ***\n");
- drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n",
- intel_engine_get_seqno(engine),
- intel_engine_last_submit(engine),
- engine->hangcheck.seqno,
+ drm_printf(m, "\tHangcheck %x:%x [%d ms]\n",
+ engine->hangcheck.last_seqno,
+ engine->hangcheck.next_seqno,
jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
drm_printf(m, "\tReset count: %d (global %d)\n",
i915_reset_engine_count(error, engine),
@@ -1521,7 +1540,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
if (&rq->link != &engine->timeline.requests)
print_request(m, rq, "\t\tlast ");
- rq = i915_gem_find_active_request(engine);
+ rq = intel_engine_find_active_request(engine);
if (rq) {
print_request(m, rq, "\t\tactive ");
@@ -1688,6 +1707,50 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine)
write_sequnlock_irqrestore(&engine->stats.lock, flags);
}
+static bool match_ring(struct i915_request *rq)
+{
+ u32 ring = ENGINE_READ(rq->engine, RING_START);
+
+ return ring == i915_ggtt_offset(rq->ring->vma);
+}
+
+struct i915_request *
+intel_engine_find_active_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *request, *active = NULL;
+ unsigned long flags;
+
+ /*
+ * We are called by the error capture, reset and to dump engine
+ * state at random points in time. In particular, note that neither is
+ * crucially ordered with an interrupt. After a hang, the GPU is dead
+ * and we assume that no more writes can happen (we waited long enough
+ * for all writes that were in transaction to be flushed) - adding an
+ * extra delay for a recent interrupt is pointless. Hence, we do
+ * not need an engine->irq_seqno_barrier() before the seqno reads.
+ * At all other times, we must assume the GPU is still running, but
+ * we only care about the snapshot of this moment.
+ */
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+ list_for_each_entry(request, &engine->timeline.requests, link) {
+ if (i915_request_completed(request))
+ continue;
+
+ if (!i915_request_started(request))
+ break;
+
+ /* More than one preemptible request may match! */
+ if (!match_ring(request))
+ break;
+
+ active = request;
+ break;
+ }
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+
+ return active;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_engine.c"
#include "selftests/intel_engine_cs.c"
diff --git a/drivers/gpu/drm/i915/intel_engine_types.h b/drivers/gpu/drm/i915/intel_engine_types.h
new file mode 100644
index 000000000000..1f970c76b6a6
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_engine_types.h
@@ -0,0 +1,546 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_ENGINE_TYPES__
+#define __INTEL_ENGINE_TYPES__
+
+#include <linux/hashtable.h>
+#include <linux/irq_work.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+#include "i915_gem.h"
+#include "i915_priolist_types.h"
+#include "i915_selftest.h"
+#include "i915_timeline_types.h"
+#include "intel_workarounds_types.h"
+
+#include "i915_gem_batch_pool.h"
+#include "i915_pmu.h"
+
+#define I915_MAX_SLICES 3
+#define I915_MAX_SUBSLICES 8
+
+#define I915_CMD_HASH_ORDER 9
+
+struct dma_fence;
+struct drm_i915_reg_table;
+struct i915_gem_context;
+struct i915_request;
+struct i915_sched_attr;
+struct intel_uncore;
+
+typedef u8 intel_engine_mask_t;
+#define ALL_ENGINES ((intel_engine_mask_t)~0ul)
+
+struct intel_hw_status_page {
+ struct i915_vma *vma;
+ u32 *addr;
+};
+
+struct intel_instdone {
+ u32 instdone;
+ /* The following exist only in the RCS engine */
+ u32 slice_common;
+ u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
+ u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
+};
+
+struct intel_engine_hangcheck {
+ u64 acthd;
+ u32 last_seqno;
+ u32 next_seqno;
+ unsigned long action_timestamp;
+ struct intel_instdone instdone;
+};
+
+struct intel_ring {
+ struct kref ref;
+ struct i915_vma *vma;
+ void *vaddr;
+
+ struct i915_timeline *timeline;
+ struct list_head request_list;
+ struct list_head active_link;
+
+ u32 head;
+ u32 tail;
+ u32 emit;
+
+ u32 space;
+ u32 size;
+ u32 effective_size;
+};
+
+/*
+ * we use a single page to load ctx workarounds so all of these
+ * values are referred in terms of dwords
+ *
+ * struct i915_wa_ctx_bb:
+ * offset: specifies batch starting position, also helpful in case
+ * if we want to have multiple batches at different offsets based on
+ * some criteria. It is not a requirement at the moment but provides
+ * an option for future use.
+ * size: size of the batch in DWORDS
+ */
+struct i915_ctx_workarounds {
+ struct i915_wa_ctx_bb {
+ u32 offset;
+ u32 size;
+ } indirect_ctx, per_ctx;
+ struct i915_vma *vma;
+};
+
+#define I915_MAX_VCS 4
+#define I915_MAX_VECS 2
+
+/*
+ * Engine IDs definitions.
+ * Keep instances of the same type engine together.
+ */
+enum intel_engine_id {
+ RCS0 = 0,
+ BCS0,
+ VCS0,
+ VCS1,
+ VCS2,
+ VCS3,
+#define _VCS(n) (VCS0 + (n))
+ VECS0,
+ VECS1,
+#define _VECS(n) (VECS0 + (n))
+ I915_NUM_ENGINES
+};
+
+struct st_preempt_hang {
+ struct completion completion;
+ unsigned int count;
+ bool inject_hang;
+};
+
+/**
+ * struct intel_engine_execlists - execlist submission queue and port state
+ *
+ * The struct intel_engine_execlists represents the combined logical state of
+ * driver and the hardware state for execlist mode of submission.
+ */
+struct intel_engine_execlists {
+ /**
+ * @tasklet: softirq tasklet for bottom handler
+ */
+ struct tasklet_struct tasklet;
+
+ /**
+ * @default_priolist: priority list for I915_PRIORITY_NORMAL
+ */
+ struct i915_priolist default_priolist;
+
+ /**
+ * @no_priolist: priority lists disabled
+ */
+ bool no_priolist;
+
+ /**
+ * @submit_reg: gen-specific execlist submission register
+ * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
+ * the ExecList Submission Queue Contents register array for Gen11+
+ */
+ u32 __iomem *submit_reg;
+
+ /**
+ * @ctrl_reg: the enhanced execlists control register, used to load the
+ * submit queue on the HW and to request preemptions to idle
+ */
+ u32 __iomem *ctrl_reg;
+
+ /**
+ * @port: execlist port states
+ *
+ * For each hardware ELSP (ExecList Submission Port) we keep
+ * track of the last request and the number of times we submitted
+ * that port to hw. We then count the number of times the hw reports
+ * a context completion or preemption. As only one context can
+ * be active on hw, we limit resubmission of context to port[0]. This
+ * is called Lite Restore, of the context.
+ */
+ struct execlist_port {
+ /**
+ * @request_count: combined request and submission count
+ */
+ struct i915_request *request_count;
+#define EXECLIST_COUNT_BITS 2
+#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
+#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
+#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
+#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
+#define port_set(p, packed) ((p)->request_count = (packed))
+#define port_isset(p) ((p)->request_count)
+#define port_index(p, execlists) ((p) - (execlists)->port)
+
+ /**
+ * @context_id: context ID for port
+ */
+ GEM_DEBUG_DECL(u32 context_id);
+
+#define EXECLIST_MAX_PORTS 2
+ } port[EXECLIST_MAX_PORTS];
+
+ /**
+ * @active: is the HW active? We consider the HW as active after
+ * submitting any context for execution and until we have seen the
+ * last context completion event. After that, we do not expect any
+ * more events until we submit, and so can park the HW.
+ *
+ * As we have a small number of different sources from which we feed
+ * the HW, we track the state of each inside a single bitfield.
+ */
+ unsigned int active;
+#define EXECLISTS_ACTIVE_USER 0
+#define EXECLISTS_ACTIVE_PREEMPT 1
+#define EXECLISTS_ACTIVE_HWACK 2
+
+ /**
+ * @port_mask: number of execlist ports - 1
+ */
+ unsigned int port_mask;
+
+ /**
+ * @queue_priority_hint: Highest pending priority.
+ *
+ * When we add requests into the queue, or adjust the priority of
+ * executing requests, we compute the maximum priority of those
+ * pending requests. We can then use this value to determine if
+ * we need to preempt the executing requests to service the queue.
+ * However, since the we may have recorded the priority of an inflight
+ * request we wanted to preempt but since completed, at the time of
+ * dequeuing the priority hint may no longer may match the highest
+ * available request priority.
+ */
+ int queue_priority_hint;
+
+ /**
+ * @queue: queue of requests, in priority lists
+ */
+ struct rb_root_cached queue;
+
+ /**
+ * @csb_write: control register for Context Switch buffer
+ *
+ * Note this register may be either mmio or HWSP shadow.
+ */
+ u32 *csb_write;
+
+ /**
+ * @csb_status: status array for Context Switch buffer
+ *
+ * Note these register may be either mmio or HWSP shadow.
+ */
+ u32 *csb_status;
+
+ /**
+ * @preempt_complete_status: expected CSB upon completing preemption
+ */
+ u32 preempt_complete_status;
+
+ /**
+ * @csb_size: context status buffer FIFO size
+ */
+ u8 csb_size;
+
+ /**
+ * @csb_head: context status buffer head
+ */
+ u8 csb_head;
+
+ I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
+};
+
+#define INTEL_ENGINE_CS_MAX_NAME 8
+
+struct intel_engine_cs {
+ struct drm_i915_private *i915;
+ struct intel_uncore *uncore;
+ char name[INTEL_ENGINE_CS_MAX_NAME];
+
+ enum intel_engine_id id;
+ unsigned int hw_id;
+ unsigned int guc_id;
+ intel_engine_mask_t mask;
+
+ u8 uabi_class;
+
+ u8 class;
+ u8 instance;
+ u32 context_size;
+ u32 mmio_base;
+
+ struct intel_ring *buffer;
+
+ struct i915_timeline timeline;
+
+ struct intel_context *kernel_context; /* pinned */
+ struct intel_context *preempt_context; /* pinned; optional */
+
+ struct drm_i915_gem_object *default_state;
+ void *pinned_default_state;
+
+ /* Rather than have every client wait upon all user interrupts,
+ * with the herd waking after every interrupt and each doing the
+ * heavyweight seqno dance, we delegate the task (of being the
+ * bottom-half of the user interrupt) to the first client. After
+ * every interrupt, we wake up one client, who does the heavyweight
+ * coherent seqno read and either goes back to sleep (if incomplete),
+ * or wakes up all the completed clients in parallel, before then
+ * transferring the bottom-half status to the next client in the queue.
+ *
+ * Compared to walking the entire list of waiters in a single dedicated
+ * bottom-half, we reduce the latency of the first waiter by avoiding
+ * a context switch, but incur additional coherent seqno reads when
+ * following the chain of request breadcrumbs. Since it is most likely
+ * that we have a single client waiting on each seqno, then reducing
+ * the overhead of waking that client is much preferred.
+ */
+ struct intel_breadcrumbs {
+ spinlock_t irq_lock;
+ struct list_head signalers;
+
+ struct irq_work irq_work; /* for use from inside irq_lock */
+
+ unsigned int irq_enabled;
+
+ bool irq_armed;
+ } breadcrumbs;
+
+ struct intel_engine_pmu {
+ /**
+ * @enable: Bitmask of enable sample events on this engine.
+ *
+ * Bits correspond to sample event types, for instance
+ * I915_SAMPLE_QUEUED is bit 0 etc.
+ */
+ u32 enable;
+ /**
+ * @enable_count: Reference count for the enabled samplers.
+ *
+ * Index number corresponds to @enum drm_i915_pmu_engine_sample.
+ */
+ unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
+ /**
+ * @sample: Counter values for sampling events.
+ *
+ * Our internal timer stores the current counters in this field.
+ *
+ * Index number corresponds to @enum drm_i915_pmu_engine_sample.
+ */
+ struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
+ } pmu;
+
+ /*
+ * A pool of objects to use as shadow copies of client batch buffers
+ * when the command parser is enabled. Prevents the client from
+ * modifying the batch contents after software parsing.
+ */
+ struct i915_gem_batch_pool batch_pool;
+
+ struct intel_hw_status_page status_page;
+ struct i915_ctx_workarounds wa_ctx;
+ struct i915_wa_list ctx_wa_list;
+ struct i915_wa_list wa_list;
+ struct i915_wa_list whitelist;
+
+ u32 irq_keep_mask; /* always keep these interrupts */
+ u32 irq_enable_mask; /* bitmask to enable ring interrupt */
+ void (*irq_enable)(struct intel_engine_cs *engine);
+ void (*irq_disable)(struct intel_engine_cs *engine);
+
+ int (*init_hw)(struct intel_engine_cs *engine);
+
+ struct {
+ void (*prepare)(struct intel_engine_cs *engine);
+ void (*reset)(struct intel_engine_cs *engine, bool stalled);
+ void (*finish)(struct intel_engine_cs *engine);
+ } reset;
+
+ void (*park)(struct intel_engine_cs *engine);
+ void (*unpark)(struct intel_engine_cs *engine);
+
+ void (*set_default_submission)(struct intel_engine_cs *engine);
+
+ const struct intel_context_ops *cops;
+
+ int (*request_alloc)(struct i915_request *rq);
+ int (*init_context)(struct i915_request *rq);
+
+ int (*emit_flush)(struct i915_request *request, u32 mode);
+#define EMIT_INVALIDATE BIT(0)
+#define EMIT_FLUSH BIT(1)
+#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
+ int (*emit_bb_start)(struct i915_request *rq,
+ u64 offset, u32 length,
+ unsigned int dispatch_flags);
+#define I915_DISPATCH_SECURE BIT(0)
+#define I915_DISPATCH_PINNED BIT(1)
+ int (*emit_init_breadcrumb)(struct i915_request *rq);
+ u32 *(*emit_fini_breadcrumb)(struct i915_request *rq,
+ u32 *cs);
+ unsigned int emit_fini_breadcrumb_dw;
+
+ /* Pass the request to the hardware queue (e.g. directly into
+ * the legacy ringbuffer or to the end of an execlist).
+ *
+ * This is called from an atomic context with irqs disabled; must
+ * be irq safe.
+ */
+ void (*submit_request)(struct i915_request *rq);
+
+ /*
+ * Call when the priority on a request has changed and it and its
+ * dependencies may need rescheduling. Note the request itself may
+ * not be ready to run!
+ */
+ void (*schedule)(struct i915_request *request,
+ const struct i915_sched_attr *attr);
+
+ /*
+ * Cancel all requests on the hardware, or queued for execution.
+ * This should only cancel the ready requests that have been
+ * submitted to the engine (via the engine->submit_request callback).
+ * This is called when marking the device as wedged.
+ */
+ void (*cancel_requests)(struct intel_engine_cs *engine);
+
+ void (*cleanup)(struct intel_engine_cs *engine);
+
+ struct intel_engine_execlists execlists;
+
+ /* Contexts are pinned whilst they are active on the GPU. The last
+ * context executed remains active whilst the GPU is idle - the
+ * switch away and write to the context object only occurs on the
+ * next execution. Contexts are only unpinned on retirement of the
+ * following request ensuring that we can always write to the object
+ * on the context switch even after idling. Across suspend, we switch
+ * to the kernel context and trash it as the save may not happen
+ * before the hardware is powered down.
+ */
+ struct intel_context *last_retired_context;
+
+ /* status_notifier: list of callbacks for context-switch changes */
+ struct atomic_notifier_head context_status_notifier;
+
+ struct intel_engine_hangcheck hangcheck;
+
+#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
+#define I915_ENGINE_SUPPORTS_STATS BIT(1)
+#define I915_ENGINE_HAS_PREEMPTION BIT(2)
+#define I915_ENGINE_HAS_SEMAPHORES BIT(3)
+#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
+ unsigned int flags;
+
+ /*
+ * Table of commands the command parser needs to know about
+ * for this engine.
+ */
+ DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
+
+ /*
+ * Table of registers allowed in commands that read/write registers.
+ */
+ const struct drm_i915_reg_table *reg_tables;
+ int reg_table_count;
+
+ /*
+ * Returns the bitmask for the length field of the specified command.
+ * Return 0 for an unrecognized/invalid command.
+ *
+ * If the command parser finds an entry for a command in the engine's
+ * cmd_tables, it gets the command's length based on the table entry.
+ * If not, it calls this function to determine the per-engine length
+ * field encoding for the command (i.e. different opcode ranges use
+ * certain bits to encode the command length in the header).
+ */
+ u32 (*get_cmd_length_mask)(u32 cmd_header);
+
+ struct {
+ /**
+ * @lock: Lock protecting the below fields.
+ */
+ seqlock_t lock;
+ /**
+ * @enabled: Reference count indicating number of listeners.
+ */
+ unsigned int enabled;
+ /**
+ * @active: Number of contexts currently scheduled in.
+ */
+ unsigned int active;
+ /**
+ * @enabled_at: Timestamp when busy stats were enabled.
+ */
+ ktime_t enabled_at;
+ /**
+ * @start: Timestamp of the last idle to active transition.
+ *
+ * Idle is defined as active == 0, active is active > 0.
+ */
+ ktime_t start;
+ /**
+ * @total: Total time this engine was busy.
+ *
+ * Accumulated time not counting the most recent block in cases
+ * where engine is currently busy (active > 0).
+ */
+ ktime_t total;
+ } stats;
+};
+
+static inline bool
+intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
+}
+
+static inline bool
+intel_engine_supports_stats(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_SUPPORTS_STATS;
+}
+
+static inline bool
+intel_engine_has_preemption(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_HAS_PREEMPTION;
+}
+
+static inline bool
+intel_engine_has_semaphores(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_HAS_SEMAPHORES;
+}
+
+static inline bool
+intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
+}
+
+#define instdone_slice_mask(dev_priv__) \
+ (IS_GEN(dev_priv__, 7) ? \
+ 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
+
+#define instdone_subslice_mask(dev_priv__) \
+ (IS_GEN(dev_priv__, 7) ? \
+ 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
+
+#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
+ for ((slice__) = 0, (subslice__) = 0; \
+ (slice__) < I915_MAX_SLICES; \
+ (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
+ (slice__) += ((subslice__) == 0)) \
+ for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
+ (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
+
+#endif /* __INTEL_ENGINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 656e684e7c9a..5679f2fffb7c 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -40,8 +40,10 @@
#include <drm/drm_fourcc.h>
-#include "intel_drv.h"
#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_fbc.h"
+#include "intel_frontbuffer.h"
static inline bool fbc_supported(struct drm_i915_private *dev_priv)
{
@@ -108,7 +110,7 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
FBC_STATUS, FBC_STAT_COMPRESSING, 0,
10)) {
DRM_DEBUG_KMS("FBC idle timed out\n");
@@ -1278,6 +1280,10 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
if (!HAS_FBC(dev_priv))
return 0;
+ /* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
+ if (IS_GEMINILAKE(dev_priv))
+ return 0;
+
if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
return 1;
diff --git a/drivers/gpu/drm/i915/intel_fbc.h b/drivers/gpu/drm/i915/intel_fbc.h
new file mode 100644
index 000000000000..50272eda8d43
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_fbc.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_FBC_H__
+#define __INTEL_FBC_H__
+
+#include <linux/types.h>
+
+#include "intel_frontbuffer.h"
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_plane_state;
+
+void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
+ struct intel_atomic_state *state);
+bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
+void intel_fbc_pre_update(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state);
+void intel_fbc_post_update(struct intel_crtc *crtc);
+void intel_fbc_init(struct drm_i915_private *dev_priv);
+void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
+void intel_fbc_enable(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state);
+void intel_fbc_disable(struct intel_crtc *crtc);
+void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
+void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits,
+ enum fb_op_origin origin);
+void intel_fbc_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits, enum fb_op_origin origin);
+void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
+void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
+int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_FBC_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 376ffe842e26..89db71996148 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -25,26 +25,27 @@
*/
#include <linux/async.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
#include <linux/console.h>
+#include <linux/delay.h>
#include <linux/errno.h>
-#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/string.h>
#include <linux/sysrq.h>
-#include <linux/delay.h>
-#include <linux/init.h>
+#include <linux/tty.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
#include "intel_drv.h"
+#include "intel_fbdev.h"
#include "intel_frontbuffer.h"
-#include <drm/i915_drm.h>
-#include "i915_drv.h"
static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
{
@@ -235,12 +236,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out_unpin;
}
- info->par = helper;
-
ifbdev->helper.fb = fb;
- strcpy(info->fix.id, "inteldrmfb");
-
info->fbops = &intelfb_ops;
/* setup aperture base/size for vesafb takeover */
@@ -259,11 +256,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->screen_base = vaddr;
info->screen_size = vma->node.size;
- /* This driver doesn't need a VT switch to restore the mode on resume */
- info->skip_vt_switch = true;
-
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
/* If the object is shmemfs backed, it will have given us zeroed pages.
* If the object is stolen however, it will be full of whatever
@@ -292,223 +285,7 @@ out_unlock:
return ret;
}
-static struct drm_fb_helper_crtc *
-intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
-{
- int i;
-
- for (i = 0; i < fb_helper->crtc_count; i++)
- if (fb_helper->crtc_info[i].mode_set.crtc == crtc)
- return &fb_helper->crtc_info[i];
-
- return NULL;
-}
-
-/*
- * Try to read the BIOS display configuration and use it for the initial
- * fb configuration.
- *
- * The BIOS or boot loader will generally create an initial display
- * configuration for us that includes some set of active pipes and displays.
- * This routine tries to figure out which pipes and connectors are active
- * and stuffs them into the crtcs and modes array given to us by the
- * drm_fb_helper code.
- *
- * The overall sequence is:
- * intel_fbdev_init - from driver load
- * intel_fbdev_init_bios - initialize the intel_fbdev using BIOS data
- * drm_fb_helper_init - build fb helper structs
- * drm_fb_helper_single_add_all_connectors - more fb helper structs
- * intel_fbdev_initial_config - apply the config
- * drm_fb_helper_initial_config - call ->probe then register_framebuffer()
- * drm_setup_crtcs - build crtc config for fbdev
- * intel_fb_initial_config - find active connectors etc
- * drm_fb_helper_single_fb_probe - set up fbdev
- * intelfb_create - re-use or alloc fb, build out fbdev structs
- *
- * Note that we don't make special consideration whether we could actually
- * switch to the selected modes without a full modeset. E.g. when the display
- * is in VGA mode we need to recalculate watermarks and set a new high-res
- * framebuffer anyway.
- */
-static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_crtc **crtcs,
- struct drm_display_mode **modes,
- struct drm_fb_offset *offsets,
- bool *enabled, int width, int height)
-{
- struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
- unsigned long conn_configured, conn_seq, mask;
- unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
- int i, j;
- bool *save_enabled;
- bool fallback = true, ret = true;
- int num_connectors_enabled = 0;
- int num_connectors_detected = 0;
- struct drm_modeset_acquire_ctx ctx;
-
- save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
- if (!save_enabled)
- return false;
-
- drm_modeset_acquire_init(&ctx, 0);
-
- while (drm_modeset_lock_all_ctx(fb_helper->dev, &ctx) != 0)
- drm_modeset_backoff(&ctx);
-
- memcpy(save_enabled, enabled, count);
- mask = GENMASK(count - 1, 0);
- conn_configured = 0;
-retry:
- conn_seq = conn_configured;
- for (i = 0; i < count; i++) {
- struct drm_fb_helper_connector *fb_conn;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
- struct drm_fb_helper_crtc *new_crtc;
-
- fb_conn = fb_helper->connector_info[i];
- connector = fb_conn->connector;
-
- if (conn_configured & BIT(i))
- continue;
-
- if (conn_seq == 0 && !connector->has_tile)
- continue;
-
- if (connector->status == connector_status_connected)
- num_connectors_detected++;
-
- if (!enabled[i]) {
- DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
- connector->name);
- conn_configured |= BIT(i);
- continue;
- }
-
- if (connector->force == DRM_FORCE_OFF) {
- DRM_DEBUG_KMS("connector %s is disabled by user, skipping\n",
- connector->name);
- enabled[i] = false;
- continue;
- }
-
- encoder = connector->state->best_encoder;
- if (!encoder || WARN_ON(!connector->state->crtc)) {
- if (connector->force > DRM_FORCE_OFF)
- goto bail;
-
- DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
- connector->name);
- enabled[i] = false;
- conn_configured |= BIT(i);
- continue;
- }
-
- num_connectors_enabled++;
-
- new_crtc = intel_fb_helper_crtc(fb_helper,
- connector->state->crtc);
-
- /*
- * Make sure we're not trying to drive multiple connectors
- * with a single CRTC, since our cloning support may not
- * match the BIOS.
- */
- for (j = 0; j < count; j++) {
- if (crtcs[j] == new_crtc) {
- DRM_DEBUG_KMS("fallback: cloned configuration\n");
- goto bail;
- }
- }
-
- DRM_DEBUG_KMS("looking for cmdline mode on connector %s\n",
- connector->name);
-
- /* go for command line mode first */
- modes[i] = drm_pick_cmdline_mode(fb_conn);
-
- /* try for preferred next */
- if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n",
- connector->name, connector->has_tile);
- modes[i] = drm_has_preferred_mode(fb_conn, width,
- height);
- }
-
- /* No preferred mode marked by the EDID? Are there any modes? */
- if (!modes[i] && !list_empty(&connector->modes)) {
- DRM_DEBUG_KMS("using first mode listed on connector %s\n",
- connector->name);
- modes[i] = list_first_entry(&connector->modes,
- struct drm_display_mode,
- head);
- }
-
- /* last resort: use current mode */
- if (!modes[i]) {
- /*
- * IMPORTANT: We want to use the adjusted mode (i.e.
- * after the panel fitter upscaling) as the initial
- * config, not the input mode, which is what crtc->mode
- * usually contains. But since our current
- * code puts a mode derived from the post-pfit timings
- * into crtc->mode this works out correctly.
- *
- * This is crtc->mode and not crtc->state->mode for the
- * fastboot check to work correctly. crtc_state->mode has
- * I915_MODE_FLAG_INHERITED, which we clear to force check
- * state.
- */
- DRM_DEBUG_KMS("looking for current mode on connector %s\n",
- connector->name);
- modes[i] = &connector->state->crtc->mode;
- }
- crtcs[i] = new_crtc;
-
- DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
- connector->name,
- connector->state->crtc->base.id,
- connector->state->crtc->name,
- modes[i]->hdisplay, modes[i]->vdisplay,
- modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
-
- fallback = false;
- conn_configured |= BIT(i);
- }
-
- if ((conn_configured & mask) != mask && conn_configured != conn_seq)
- goto retry;
-
- /*
- * If the BIOS didn't enable everything it could, fall back to have the
- * same user experiencing of lighting up as much as possible like the
- * fbdev helper library.
- */
- if (num_connectors_enabled != num_connectors_detected &&
- num_connectors_enabled < INTEL_INFO(dev_priv)->num_pipes) {
- DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
- DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
- num_connectors_detected);
- fallback = true;
- }
-
- if (fallback) {
-bail:
- DRM_DEBUG_KMS("Not using firmware configuration\n");
- memcpy(enabled, save_enabled, count);
- ret = false;
- }
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
- kfree(save_enabled);
- return ret;
-}
-
static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
- .initial_config = intel_fb_initial_config,
.fb_probe = intelfb_create,
};
diff --git a/drivers/gpu/drm/i915/intel_fbdev.h b/drivers/gpu/drm/i915/intel_fbdev.h
new file mode 100644
index 000000000000..de7c84250eb5
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_fbdev.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_FBDEV_H__
+#define __INTEL_FBDEV_H__
+
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_i915_private;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+int intel_fbdev_init(struct drm_device *dev);
+void intel_fbdev_initial_config_async(struct drm_device *dev);
+void intel_fbdev_unregister(struct drm_i915_private *dev_priv);
+void intel_fbdev_fini(struct drm_i915_private *dev_priv);
+void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
+void intel_fbdev_output_poll_changed(struct drm_device *dev);
+void intel_fbdev_restore_mode(struct drm_device *dev);
+#else
+static inline int intel_fbdev_init(struct drm_device *dev)
+{
+ return 0;
+}
+
+static inline void intel_fbdev_initial_config_async(struct drm_device *dev)
+{
+}
+
+static inline void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_fbdev_fini(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
+{
+}
+
+static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
+{
+}
+
+static inline void intel_fbdev_restore_mode(struct drm_device *dev)
+{
+}
+#endif
+
+#endif /* __INTEL_FBDEV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index f33de4be4b89..74c8b0528294 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -27,6 +27,7 @@
#include "i915_drv.h"
#include "intel_drv.h"
+#include "intel_fbc.h"
/**
* DOC: fifo underrun handling
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 16f253deaf8d..aa34e33b6087 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -61,9 +61,12 @@
*/
+#include "i915_drv.h"
+#include "intel_dp.h"
#include "intel_drv.h"
+#include "intel_fbc.h"
#include "intel_frontbuffer.h"
-#include "i915_drv.h"
+#include "intel_psr.h"
void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin,
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/intel_frontbuffer.h
index 63cd9a753a72..d5894666f658 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.h
@@ -24,9 +24,19 @@
#ifndef __INTEL_FRONTBUFFER_H__
#define __INTEL_FRONTBUFFER_H__
+#include "i915_gem_object.h"
+
struct drm_i915_private;
struct drm_i915_gem_object;
+enum fb_op_origin {
+ ORIGIN_GTT,
+ ORIGIN_CPU,
+ ORIGIN_CS,
+ ORIGIN_FLIP,
+ ORIGIN_DIRTYFB,
+};
+
void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_gpu_commands.h b/drivers/gpu/drm/i915/intel_gpu_commands.h
index b96a31bc1080..a34ece53a771 100644
--- a/drivers/gpu/drm/i915/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/intel_gpu_commands.h
@@ -105,8 +105,13 @@
#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
-#define MI_SEMAPHORE_POLL (1<<15)
-#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
+#define MI_SEMAPHORE_POLL (1 << 15)
+#define MI_SEMAPHORE_SAD_GT_SDD (0 << 12)
+#define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12)
+#define MI_SEMAPHORE_SAD_LT_SDD (2 << 12)
+#define MI_SEMAPHORE_SAD_LTE_SDD (3 << 12)
+#define MI_SEMAPHORE_SAD_EQ_SDD (4 << 12)
+#define MI_SEMAPHORE_SAD_NEQ_SDD (5 << 12)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2)
#define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 8660af3fd755..3aabfa2d9198 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -54,7 +54,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
for (i = 0; i < guc->send_regs.count; i++) {
- fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ fw_domains |= intel_uncore_forcewake_for_reg(&dev_priv->uncore,
guc_send_reg(guc, i),
FW_REG_READ | FW_REG_WRITE);
}
@@ -203,11 +203,19 @@ int intel_guc_init(struct intel_guc *guc)
goto err_log;
GEM_BUG_ON(!guc->ads_vma);
+ if (HAS_GUC_CT(dev_priv)) {
+ ret = intel_guc_ct_init(&guc->ct);
+ if (ret)
+ goto err_ads;
+ }
+
/* We need to notify the guc whenever we change the GGTT */
i915_ggtt_enable_guc(dev_priv);
return 0;
+err_ads:
+ intel_guc_ads_destroy(guc);
err_log:
intel_guc_log_destroy(&guc->log);
err_shared:
@@ -222,6 +230,10 @@ void intel_guc_fini(struct intel_guc *guc)
struct drm_i915_private *dev_priv = guc_to_i915(guc);
i915_ggtt_disable_guc(dev_priv);
+
+ if (HAS_GUC_CT(dev_priv))
+ intel_guc_ct_fini(&guc->ct);
+
intel_guc_ads_destroy(guc);
intel_guc_log_destroy(&guc->log);
guc_shared_data_destroy(guc);
@@ -357,14 +369,14 @@ void intel_guc_init_params(struct intel_guc *guc)
* they are power context saved so it's ok to release forcewake
* when we are done here and take it again at xfer time.
*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_BLITTER);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_BLITTER);
I915_WRITE(SOFT_SCRATCH(0), 0);
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_BLITTER);
}
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
@@ -386,6 +398,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 status;
int i;
int ret;
@@ -402,12 +415,12 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
*action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
mutex_lock(&guc->send_mutex);
- intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains);
+ intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
for (i = 0; i < len; i++)
- I915_WRITE(guc_send_reg(guc, i), action[i]);
+ intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]);
- POSTING_READ(guc_send_reg(guc, i - 1));
+ intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
intel_guc_notify(guc);
@@ -415,7 +428,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
* No GuC command should ever take longer than 10ms.
* Fast commands should still complete in 10us.
*/
- ret = __intel_wait_for_register_fw(dev_priv,
+ ret = __intel_wait_for_register_fw(uncore,
guc_send_reg(guc, 0),
INTEL_GUC_MSG_TYPE_MASK,
INTEL_GUC_MSG_TYPE_RESPONSE <<
@@ -442,7 +455,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
ret = INTEL_GUC_MSG_TO_DATA(status);
out:
- intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
+ intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
mutex_unlock(&guc->send_mutex);
return ret;
@@ -472,17 +485,25 @@ void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc)
spin_unlock(&guc->irq_lock);
enable_rpm_wakeref_asserts(dev_priv);
- intel_guc_to_host_process_recv_msg(guc, msg);
+ intel_guc_to_host_process_recv_msg(guc, &msg, 1);
}
-void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg)
+int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
+ const u32 *payload, u32 len)
{
+ u32 msg;
+
+ if (unlikely(!len))
+ return -EPROTO;
+
/* Make sure to handle only enabled messages */
- msg &= guc->msg_enabled_mask;
+ msg = payload[0] & guc->msg_enabled_mask;
if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
intel_guc_log_handle_flush_event(&guc->log);
+
+ return 0;
}
int intel_guc_sample_forcewake(struct intel_guc *guc)
@@ -544,7 +565,7 @@ static int guc_sleep_state_action(struct intel_guc *guc,
if (ret)
return ret;
- ret = __intel_wait_for_register(dev_priv, SOFT_SCRATCH(14),
+ ret = __intel_wait_for_register(&dev_priv->uncore, SOFT_SCRATCH(14),
INTEL_GUC_SLEEP_STATE_INVALID_MASK,
0, 0, 10, &status);
if (ret)
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 744220296653..2c59ff8d9f39 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -32,6 +32,7 @@
#include "intel_guc_log.h"
#include "intel_guc_reg.h"
#include "intel_uc_fw.h"
+#include "i915_utils.h"
#include "i915_vma.h"
struct guc_preempt_work {
@@ -164,7 +165,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
void intel_guc_to_host_event_handler(struct intel_guc *guc);
void intel_guc_to_host_event_handler_nop(struct intel_guc *guc);
void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc);
-void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg);
+int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
+ const u32 *payload, u32 len);
int intel_guc_sample_forcewake(struct intel_guc *guc);
int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
int intel_guc_suspend(struct intel_guc *guc);
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c
index f0db62887f50..bec62f34b15a 100644
--- a/drivers/gpu/drm/i915/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/intel_guc_ads.c
@@ -121,8 +121,7 @@ int intel_guc_ads_create(struct intel_guc *guc)
* to find it. Note that we have to skip our header (1 page),
* because our GuC shared data is there.
*/
- kernel_ctx_vma = to_intel_context(dev_priv->kernel_context,
- dev_priv->engine[RCS])->state;
+ kernel_ctx_vma = dev_priv->engine[RCS0]->kernel_context->state;
blob->ads.golden_context_lrca =
intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset;
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index a52883e9146f..dde1dc0d6e69 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -140,11 +140,6 @@ static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
return err;
}
-static bool ctch_is_open(struct intel_guc_ct_channel *ctch)
-{
- return ctch->vma != NULL;
-}
-
static int ctch_init(struct intel_guc *guc,
struct intel_guc_ct_channel *ctch)
{
@@ -214,25 +209,21 @@ err_out:
static void ctch_fini(struct intel_guc *guc,
struct intel_guc_ct_channel *ctch)
{
+ GEM_BUG_ON(ctch->enabled);
+
i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP);
}
-static int ctch_open(struct intel_guc *guc,
- struct intel_guc_ct_channel *ctch)
+static int ctch_enable(struct intel_guc *guc,
+ struct intel_guc_ct_channel *ctch)
{
u32 base;
int err;
int i;
- CT_DEBUG_DRIVER("CT: channel %d reopen=%s\n",
- ctch->owner, yesno(ctch_is_open(ctch)));
+ GEM_BUG_ON(!ctch->vma);
- if (!ctch->vma) {
- err = ctch_init(guc, ctch);
- if (unlikely(err))
- goto err_out;
- GEM_BUG_ON(!ctch->vma);
- }
+ GEM_BUG_ON(ctch->enabled);
/* vma should be already allocated and map'ed */
base = intel_guc_ggtt_offset(guc, ctch->vma);
@@ -255,7 +246,7 @@ static int ctch_open(struct intel_guc *guc,
base + PAGE_SIZE/4 * CTB_RECV,
INTEL_GUC_CT_BUFFER_TYPE_RECV);
if (unlikely(err))
- goto err_fini;
+ goto err_out;
err = guc_action_register_ct_buffer(guc,
base + PAGE_SIZE/4 * CTB_SEND,
@@ -263,23 +254,25 @@ static int ctch_open(struct intel_guc *guc,
if (unlikely(err))
goto err_deregister;
+ ctch->enabled = true;
+
return 0;
err_deregister:
guc_action_deregister_ct_buffer(guc,
ctch->owner,
INTEL_GUC_CT_BUFFER_TYPE_RECV);
-err_fini:
- ctch_fini(guc, ctch);
err_out:
DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err);
return err;
}
-static void ctch_close(struct intel_guc *guc,
- struct intel_guc_ct_channel *ctch)
+static void ctch_disable(struct intel_guc *guc,
+ struct intel_guc_ct_channel *ctch)
{
- GEM_BUG_ON(!ctch_is_open(ctch));
+ GEM_BUG_ON(!ctch->enabled);
+
+ ctch->enabled = false;
guc_action_deregister_ct_buffer(guc,
ctch->owner,
@@ -287,7 +280,6 @@ static void ctch_close(struct intel_guc *guc,
guc_action_deregister_ct_buffer(guc,
ctch->owner,
INTEL_GUC_CT_BUFFER_TYPE_RECV);
- ctch_fini(guc, ctch);
}
static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch)
@@ -481,7 +473,7 @@ static int ctch_send(struct intel_guc_ct *ct,
u32 fence;
int err;
- GEM_BUG_ON(!ctch_is_open(ctch));
+ GEM_BUG_ON(!ctch->enabled);
GEM_BUG_ON(!len);
GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
GEM_BUG_ON(!response_buf && response_buf_size);
@@ -709,14 +701,15 @@ static void ct_process_request(struct intel_guc_ct *ct,
u32 action, u32 len, const u32 *payload)
{
struct intel_guc *guc = ct_to_guc(ct);
+ int ret;
CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload);
switch (action) {
case INTEL_GUC_ACTION_DEFAULT:
- if (unlikely(len < 1))
+ ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
+ if (unlikely(ret))
goto fail_unexpected;
- intel_guc_to_host_process_recv_msg(guc, *payload);
break;
default:
@@ -817,7 +810,7 @@ static void ct_process_host_channel(struct intel_guc_ct *ct)
u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
int err = 0;
- if (!ctch_is_open(ctch))
+ if (!ctch->enabled)
return;
do {
@@ -849,6 +842,51 @@ static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
}
/**
+ * intel_guc_ct_init - Init CT communication
+ * @ct: pointer to CT struct
+ *
+ * Allocate memory required for communication via
+ * the CT channel.
+ *
+ * Shall only be called for platforms with HAS_GUC_CT.
+ *
+ * Return: 0 on success, a negative errno code on failure.
+ */
+int intel_guc_ct_init(struct intel_guc_ct *ct)
+{
+ struct intel_guc *guc = ct_to_guc(ct);
+ struct intel_guc_ct_channel *ctch = &ct->host_channel;
+ int err;
+
+ err = ctch_init(guc, ctch);
+ if (unlikely(err)) {
+ DRM_ERROR("CT: can't open channel %d; err=%d\n",
+ ctch->owner, err);
+ return err;
+ }
+
+ GEM_BUG_ON(!ctch->vma);
+ return 0;
+}
+
+/**
+ * intel_guc_ct_fini - Fini CT communication
+ * @ct: pointer to CT struct
+ *
+ * Deallocate memory required for communication via
+ * the CT channel.
+ *
+ * Shall only be called for platforms with HAS_GUC_CT.
+ */
+void intel_guc_ct_fini(struct intel_guc_ct *ct)
+{
+ struct intel_guc *guc = ct_to_guc(ct);
+ struct intel_guc_ct_channel *ctch = &ct->host_channel;
+
+ ctch_fini(guc, ctch);
+}
+
+/**
* intel_guc_ct_enable - Enable buffer based command transport.
* @ct: pointer to CT struct
*
@@ -865,7 +903,10 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
GEM_BUG_ON(!HAS_GUC_CT(i915));
- err = ctch_open(guc, ctch);
+ if (ctch->enabled)
+ return 0;
+
+ err = ctch_enable(guc, ctch);
if (unlikely(err))
return err;
@@ -890,10 +931,10 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct)
GEM_BUG_ON(!HAS_GUC_CT(i915));
- if (!ctch_is_open(ctch))
+ if (!ctch->enabled)
return;
- ctch_close(guc, ctch);
+ ctch_disable(guc, ctch);
/* Disable send */
guc->send = intel_guc_send_nop;
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h
index d774895ab143..f5e7f0663304 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/intel_guc_ct.h
@@ -66,6 +66,7 @@ struct intel_guc_ct_channel {
struct intel_guc_ct_buffer ctbs[2];
u32 owner;
u32 next_fence;
+ bool enabled;
};
/** Holds all command transport channels.
@@ -90,6 +91,8 @@ struct intel_guc_ct {
};
void intel_guc_ct_init_early(struct intel_guc_ct *ct);
+int intel_guc_ct_init(struct intel_guc_ct *ct);
+void intel_guc_ct_fini(struct intel_guc_ct *ct);
int intel_guc_ct_enable(struct intel_guc_ct *ct);
void intel_guc_ct_disable(struct intel_guc_ct *ct);
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index 13ff7003c6be..792a551450c7 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -241,7 +241,7 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
guc_prepare_xfer(guc);
@@ -254,7 +254,7 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
ret = guc_xfer_ucode(guc, vma);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 806fdfd7c78a..7146524264dd 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -620,7 +620,12 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
void intel_guc_log_relay_close(struct intel_guc_log *log)
{
+ struct intel_guc *guc = log_to_guc(log);
+ struct drm_i915_private *i915 = guc_to_i915(guc);
+
guc_log_disable_flush_events(log);
+ synchronize_irq(i915->drm.irq);
+
flush_work(&log->relay.flush_work);
mutex_lock(&log->relay.lock);
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 8bc8aa54aa35..46cd0e70aecb 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -23,7 +23,6 @@
*/
#include <linux/circ_buf.h>
-#include <trace/events/dma_fence.h>
#include "intel_guc_submission.h"
#include "intel_lrc_reg.h"
@@ -382,7 +381,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
desc->db_id = client->doorbell_id;
for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
- struct intel_context *ce = to_intel_context(ctx, engine);
+ struct intel_context *ce = intel_context_lookup(ctx, engine);
u32 guc_engine_id = engine->guc_id;
struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
@@ -393,7 +392,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
* for now who owns a GuC client. But for future owner of GuC
* client, need to make sure lrc is pinned prior to enter here.
*/
- if (!ce->state)
+ if (!ce || !ce->state)
break; /* XXX: continue? */
/*
@@ -535,7 +534,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
spin_lock(&client->wq_lock);
guc_wq_item_append(client, engine->guc_id, ctx_desc,
- ring_tail, rq->global_seqno);
+ ring_tail, rq->fence.seqno);
guc_ring_doorbell(client);
client->submissions[engine->id] += 1;
@@ -567,7 +566,7 @@ static void inject_preempt_context(struct work_struct *work)
preempt_work[engine->id]);
struct intel_guc_client *client = guc->preempt_client;
struct guc_stage_desc *stage_desc = __get_stage_desc(client);
- struct intel_context *ce = to_intel_context(client->owner, engine);
+ struct intel_context *ce = engine->preempt_context;
u32 data[7];
if (!ce->ring->emit) { /* recreate upon load/resume */
@@ -575,7 +574,7 @@ static void inject_preempt_context(struct work_struct *work)
u32 *cs;
cs = ce->ring->vaddr;
- if (engine->id == RCS) {
+ if (engine->class == RENDER_CLASS) {
cs = gen8_emit_ggtt_write_rcs(cs,
GUC_PREEMPT_FINISHED,
addr,
@@ -583,7 +582,8 @@ static void inject_preempt_context(struct work_struct *work)
} else {
cs = gen8_emit_ggtt_write(cs,
GUC_PREEMPT_FINISHED,
- addr);
+ addr,
+ 0);
*cs++ = MI_NOOP;
*cs++ = MI_NOOP;
}
@@ -649,9 +649,10 @@ static void wait_for_guc_preempt_report(struct intel_engine_cs *engine)
struct guc_ctx_report *report =
&data->preempt_ctx_report[engine->guc_id];
- WARN_ON(wait_for_atomic(report->report_return_status ==
- INTEL_GUC_REPORT_STATUS_COMPLETE,
- GUC_PREEMPT_POSTPROCESS_DELAY_MS));
+ if (wait_for_atomic(report->report_return_status ==
+ INTEL_GUC_REPORT_STATUS_COMPLETE,
+ GUC_PREEMPT_POSTPROCESS_DELAY_MS))
+ DRM_ERROR("Timed out waiting for GuC preemption report\n");
/*
* GuC is expecting that we're also going to clear the affected context
* counter, let's also reset the return status to not depend on GuC
@@ -720,7 +721,7 @@ static inline int rq_prio(const struct i915_request *rq)
static inline int port_prio(const struct execlist_port *port)
{
- return rq_prio(port_request(port));
+ return rq_prio(port_request(port)) | __NO_PREEMPTION;
}
static bool __guc_dequeue(struct intel_engine_cs *engine)
@@ -781,8 +782,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
}
rb_erase_cached(&p->node, &execlists->queue);
- if (p->priority != I915_PRIORITY_NORMAL)
- kmem_cache_free(engine->i915->priorities, p);
+ i915_priolist_free(p);
}
done:
execlists->queue_priority_hint =
@@ -871,6 +871,104 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
flush_workqueue(engine->i915->guc.preempt_wq);
}
+static void guc_reset(struct intel_engine_cs *engine, bool stalled)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *rq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+
+ execlists_cancel_port_requests(execlists);
+
+ /* Push back any incomplete requests for replay after the reset. */
+ rq = execlists_unwind_incomplete_requests(execlists);
+ if (!rq)
+ goto out_unlock;
+
+ if (!i915_request_started(rq))
+ stalled = false;
+
+ i915_reset_request(rq, stalled);
+ intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled);
+
+out_unlock:
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+}
+
+static void guc_cancel_requests(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *rq, *rn;
+ struct rb_node *rb;
+ unsigned long flags;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ /*
+ * Before we call engine->cancel_requests(), we should have exclusive
+ * access to the submission state. This is arranged for us by the
+ * caller disabling the interrupt generation, the tasklet and other
+ * threads that may then access the same state, giving us a free hand
+ * to reset state. However, we still need to let lockdep be aware that
+ * we know this state may be accessed in hardirq context, so we
+ * disable the irq around this manipulation and we want to keep
+ * the spinlock focused on its duties and not accidentally conflate
+ * coverage to the submission's irq state. (Similarly, although we
+ * shouldn't need to disable irq around the manipulation of the
+ * submission's irq state, we also wish to remind ourselves that
+ * it is irq state.)
+ */
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+
+ /* Cancel the requests on the HW and clear the ELSP tracker. */
+ execlists_cancel_port_requests(execlists);
+
+ /* Mark all executing requests as skipped. */
+ list_for_each_entry(rq, &engine->timeline.requests, link) {
+ if (!i915_request_signaled(rq))
+ dma_fence_set_error(&rq->fence, -EIO);
+
+ i915_request_mark_complete(rq);
+ }
+
+ /* Flush the queued requests to the timeline list (for retiring). */
+ while ((rb = rb_first_cached(&execlists->queue))) {
+ struct i915_priolist *p = to_priolist(rb);
+ int i;
+
+ priolist_for_each_request_consume(rq, rn, p, i) {
+ list_del_init(&rq->sched.link);
+ __i915_request_submit(rq);
+ dma_fence_set_error(&rq->fence, -EIO);
+ i915_request_mark_complete(rq);
+ }
+
+ rb_erase_cached(&p->node, &execlists->queue);
+ i915_priolist_free(p);
+ }
+
+ /* Remaining _unready_ requests will be nop'ed when submitted */
+
+ execlists->queue_priority_hint = INT_MIN;
+ execlists->queue = RB_ROOT_CACHED;
+ GEM_BUG_ON(port_isset(execlists->port));
+
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+}
+
+static void guc_reset_finish(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ if (__tasklet_enable(&execlists->tasklet))
+ /* And kick in case we missed a new request submission. */
+ tasklet_hi_schedule(&execlists->tasklet);
+
+ GEM_TRACE("%s: depth->%d\n", engine->name,
+ atomic_read(&execlists->tasklet.count));
+}
+
/*
* Everything below here is concerned with setup & teardown, and is
* therefore not part of the somewhat time-critical batch-submission
@@ -1031,7 +1129,7 @@ static int guc_clients_create(struct intel_guc *guc)
GEM_BUG_ON(guc->preempt_client);
client = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->ring_mask,
+ INTEL_INFO(dev_priv)->engine_mask,
GUC_CLIENT_PRIORITY_KMD_NORMAL,
dev_priv->kernel_context);
if (IS_ERR(client)) {
@@ -1042,7 +1140,7 @@ static int guc_clients_create(struct intel_guc *guc)
if (dev_priv->preempt_context) {
client = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->ring_mask,
+ INTEL_INFO(dev_priv)->engine_mask,
GUC_CLIENT_PRIORITY_KMD_HIGH,
dev_priv->preempt_context);
if (IS_ERR(client)) {
@@ -1262,10 +1360,12 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
static void guc_submission_park(struct intel_engine_cs *engine)
{
intel_engine_unpin_breadcrumbs_irq(engine);
+ engine->flags &= ~I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
}
static void guc_submission_unpark(struct intel_engine_cs *engine)
{
+ engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
intel_engine_pin_breadcrumbs_irq(engine);
}
@@ -1290,6 +1390,10 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
engine->unpark = guc_submission_unpark;
engine->reset.prepare = guc_reset_prepare;
+ engine->reset.reset = guc_reset;
+ engine->reset.finish = guc_reset_finish;
+
+ engine->cancel_requests = guc_cancel_requests;
engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
}
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.h b/drivers/gpu/drm/i915/intel_guc_submission.h
index 169c54568340..aa5e6749c925 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/intel_guc_submission.h
@@ -29,6 +29,7 @@
#include "i915_gem.h"
#include "i915_selftest.h"
+#include "intel_engine_types.h"
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index a219c796e56d..3d51ed1428d4 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -56,7 +56,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
int slice;
int subslice;
- if (engine->id != RCS)
+ if (engine->id != RCS0)
return true;
intel_engine_get_instdone(engine, &instdone);
@@ -118,11 +118,11 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
* and break the hang. This should work on
* all but the second generation chipsets.
*/
- tmp = I915_READ_CTL(engine);
+ tmp = ENGINE_READ(engine, RING_CTL);
if (tmp & RING_WAIT) {
- i915_handle_error(dev_priv, BIT(engine->id), 0,
+ i915_handle_error(dev_priv, engine->mask, 0,
"stuck wait on %s", engine->name);
- I915_WRITE_CTL(engine, tmp);
+ ENGINE_WRITE(engine, RING_CTL, tmp);
return ENGINE_WAIT_KICK;
}
@@ -133,21 +133,21 @@ static void hangcheck_load_sample(struct intel_engine_cs *engine,
struct hangcheck *hc)
{
hc->acthd = intel_engine_get_active_head(engine);
- hc->seqno = intel_engine_get_seqno(engine);
+ hc->seqno = intel_engine_get_hangcheck_seqno(engine);
}
static void hangcheck_store_sample(struct intel_engine_cs *engine,
const struct hangcheck *hc)
{
engine->hangcheck.acthd = hc->acthd;
- engine->hangcheck.seqno = hc->seqno;
+ engine->hangcheck.last_seqno = hc->seqno;
}
static enum intel_engine_hangcheck_action
hangcheck_get_action(struct intel_engine_cs *engine,
const struct hangcheck *hc)
{
- if (engine->hangcheck.seqno != hc->seqno)
+ if (engine->hangcheck.last_seqno != hc->seqno)
return ENGINE_ACTIVE_SEQNO;
if (intel_engine_is_idle(engine))
@@ -221,8 +221,8 @@ static void hangcheck_declare_hang(struct drm_i915_private *i915,
unsigned int stuck)
{
struct intel_engine_cs *engine;
+ intel_engine_mask_t tmp;
char msg[80];
- unsigned int tmp;
int len;
/* If some rings hung but others were still busy, only
@@ -263,14 +263,14 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (!READ_ONCE(dev_priv->gt.awake))
return;
- if (i915_terminally_wedged(&dev_priv->gpu_error))
+ if (i915_terminally_wedged(dev_priv))
return;
/* As enabling the GPU requires fairly extensive mmio access,
* periodically arm the mmio checker to see if we are triggering
* any invalid access.
*/
- intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+ intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
for_each_engine(engine, dev_priv, id) {
struct hangcheck hc;
@@ -282,13 +282,13 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
hangcheck_store_sample(engine, &hc);
if (hc.stalled) {
- hung |= intel_engine_flag(engine);
+ hung |= engine->mask;
if (hc.action != ENGINE_DEAD)
- stuck |= intel_engine_flag(engine);
+ stuck |= engine->mask;
}
if (hc.wedged)
- wedged |= intel_engine_flag(engine);
+ wedged |= engine->mask;
}
if (GEM_SHOW_DEBUG() && (hung | stuck)) {
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index ce7ba3a9c000..99b007169c49 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -6,15 +6,20 @@
* Sean Paul <seanpaul@chromium.org>
*/
-#include <drm/drm_hdcp.h>
+#include <linux/component.h>
#include <linux/i2c.h>
#include <linux/random.h>
-#include "intel_drv.h"
+#include <drm/drm_hdcp.h>
+#include <drm/i915_component.h>
+
#include "i915_reg.h"
+#include "intel_drv.h"
+#include "intel_hdcp.h"
#define KEY_LOAD_TRIES 5
#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
+#define HDCP2_LC_RETRY_CNT 3
static
bool intel_hdcp_is_ksv_valid(u8 *ksv)
@@ -72,6 +77,52 @@ bool intel_hdcp_capable(struct intel_connector *connector)
return capable;
}
+/* Is HDCP2.2 capable on Platform and Sink */
+static bool intel_hdcp2_capable(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ bool capable = false;
+
+ /* I915 support for HDCP2.2 */
+ if (!hdcp->hdcp2_supported)
+ return false;
+
+ /* MEI interface is solid */
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ if (!dev_priv->hdcp_comp_added || !dev_priv->hdcp_master) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return false;
+ }
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ /* Sink's capability for HDCP2.2 */
+ hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable);
+
+ return capable;
+}
+
+static inline bool intel_hdcp_in_use(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ enum port port = connector->encoder->port;
+ u32 reg;
+
+ reg = I915_READ(PORT_HDCP_STATUS(port));
+ return reg & HDCP_STATUS_ENC;
+}
+
+static inline bool intel_hdcp2_in_use(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ enum port port = connector->encoder->port;
+ u32 reg;
+
+ reg = I915_READ(HDCP2_STATUS_DDI(port));
+ return reg & LINK_ENCRYPTION_STATUS;
+}
+
static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
const struct intel_hdcp_shim *shim)
{
@@ -176,7 +227,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
}
/* Wait for the keys to load (500us) */
- ret = __intel_wait_for_register(dev_priv, HDCP_KEY_STATUS,
+ ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
10, 1, &val);
if (ret)
@@ -194,7 +245,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
{
I915_WRITE(HDCP_SHA_TEXT, sha_text);
- if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
+ if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) {
DRM_ERROR("Timed out waiting for SHA1 ready\n");
return -ETIMEDOUT;
@@ -425,7 +476,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
/* Tell the HW we're done with the hash and wait for it to ACK */
I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
- if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
+ if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
HDCP_SHA1_COMPLETE,
HDCP_SHA1_COMPLETE, 1)) {
DRM_ERROR("Timed out waiting for SHA1 complete\n");
@@ -555,7 +606,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
/* Wait for An to be acquired */
- if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
+ if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
HDCP_STATUS_AN_READY,
HDCP_STATUS_AN_READY, 1)) {
DRM_ERROR("Timed out waiting for An\n");
@@ -636,7 +687,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
}
/* Wait for encryption confirmation */
- if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
+ if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
HDCP_STATUS_ENC, HDCP_STATUS_ENC,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Timed out waiting for encryption\n");
@@ -666,8 +717,10 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
connector->base.name, connector->base.base.id);
+ hdcp->hdcp_encrypted = false;
I915_WRITE(PORT_HDCP_CONF(port), 0);
- if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0,
+ if (intel_wait_for_register(&dev_priv->uncore,
+ PORT_HDCP_STATUS(port), ~0, 0,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
@@ -711,8 +764,10 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
/* Incase of authentication failures, HDCP spec expects reauth. */
for (i = 0; i < tries; i++) {
ret = intel_hdcp_auth(conn_to_dig_port(connector), hdcp->shim);
- if (!ret)
+ if (!ret) {
+ hdcp->hdcp_encrypted = true;
return 0;
+ }
DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret);
@@ -730,16 +785,64 @@ struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
return container_of(hdcp, struct intel_connector, hdcp);
}
-static void intel_hdcp_check_work(struct work_struct *work)
+/* Implements Part 3 of the HDCP authorization procedure */
+static int intel_hdcp_check_link(struct intel_connector *connector)
{
- struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
- struct intel_hdcp,
- check_work);
- struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ enum port port = intel_dig_port->base.port;
+ int ret = 0;
- if (!intel_hdcp_check_link(connector))
- schedule_delayed_work(&hdcp->check_work,
- DRM_HDCP_CHECK_PERIOD_MS);
+ mutex_lock(&hdcp->mutex);
+
+ /* Check_link valid only when HDCP1.4 is enabled */
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
+ !hdcp->hdcp_encrypted) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (WARN_ON(!intel_hdcp_in_use(connector))) {
+ DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n",
+ connector->base.name, connector->base.base.id,
+ I915_READ(PORT_HDCP_STATUS(port)));
+ ret = -ENXIO;
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ goto out;
+ }
+
+ if (hdcp->shim->check_link(intel_dig_port)) {
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ schedule_work(&hdcp->prop_work);
+ }
+ goto out;
+ }
+
+ DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
+ connector->base.name, connector->base.base.id);
+
+ ret = _intel_hdcp_disable(connector);
+ if (ret) {
+ DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ goto out;
+ }
+
+ ret = _intel_hdcp_enable(connector);
+ if (ret) {
+ DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&hdcp->mutex);
+ return ret;
}
static void intel_hdcp_prop_work(struct work_struct *work)
@@ -773,14 +876,929 @@ bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
return INTEL_GEN(dev_priv) >= 9 && port < PORT_E;
}
+static int
+hdcp2_prepare_ake_init(struct intel_connector *connector,
+ struct hdcp2_ake_init *ake_data)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
+ if (ret)
+ DRM_DEBUG_KMS("Prepare_ake_init failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
+ struct hdcp2_ake_send_cert *rx_cert,
+ bool *paired,
+ struct hdcp2_ake_no_stored_km *ek_pub_km,
+ size_t *msg_sz)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
+ rx_cert, paired,
+ ek_pub_km, msg_sz);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Verify rx_cert failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int hdcp2_verify_hprime(struct intel_connector *connector,
+ struct hdcp2_ake_send_hprime *rx_hprime)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Verify hprime failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_store_pairing_info(struct intel_connector *connector,
+ struct hdcp2_ake_send_pairing_info *pairing_info)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Store pairing info failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_prepare_lc_init(struct intel_connector *connector,
+ struct hdcp2_lc_init *lc_init)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Prepare lc_init failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_verify_lprime(struct intel_connector *connector,
+ struct hdcp2_lc_send_lprime *rx_lprime)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Verify L_Prime failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int hdcp2_prepare_skey(struct intel_connector *connector,
+ struct hdcp2_ske_send_eks *ske_data)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Get session key failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
+ struct hdcp2_rep_send_receiverid_list
+ *rep_topology,
+ struct hdcp2_rep_send_ack *rep_send_ack)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
+ rep_topology,
+ rep_send_ack);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Verify rep topology failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_verify_mprime(struct intel_connector *connector,
+ struct hdcp2_rep_stream_ready *stream_ready)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Verify mprime failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int hdcp2_authenticate_port(struct intel_connector *connector)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Enable hdcp auth failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int hdcp2_close_mei_session(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->close_hdcp_session(comp->mei_dev,
+ &connector->hdcp.port_data);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int hdcp2_deauthenticate_port(struct intel_connector *connector)
+{
+ return hdcp2_close_mei_session(connector);
+}
+
+/* Authentication flow starts from here */
+static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ union {
+ struct hdcp2_ake_init ake_init;
+ struct hdcp2_ake_send_cert send_cert;
+ struct hdcp2_ake_no_stored_km no_stored_km;
+ struct hdcp2_ake_send_hprime send_hprime;
+ struct hdcp2_ake_send_pairing_info pairing_info;
+ } msgs;
+ const struct intel_hdcp_shim *shim = hdcp->shim;
+ size_t size;
+ int ret;
+
+ /* Init for seq_num */
+ hdcp->seq_num_v = 0;
+ hdcp->seq_num_m = 0;
+
+ ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
+ if (ret < 0)
+ return ret;
+
+ ret = shim->write_2_2_msg(intel_dig_port, &msgs.ake_init,
+ sizeof(msgs.ake_init));
+ if (ret < 0)
+ return ret;
+
+ ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_CERT,
+ &msgs.send_cert, sizeof(msgs.send_cert));
+ if (ret < 0)
+ return ret;
+
+ if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL)
+ return -EINVAL;
+
+ hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
+
+ /*
+ * Here msgs.no_stored_km will hold msgs corresponding to the km
+ * stored also.
+ */
+ ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
+ &hdcp->is_paired,
+ &msgs.no_stored_km, &size);
+ if (ret < 0)
+ return ret;
+
+ ret = shim->write_2_2_msg(intel_dig_port, &msgs.no_stored_km, size);
+ if (ret < 0)
+ return ret;
+
+ ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_HPRIME,
+ &msgs.send_hprime, sizeof(msgs.send_hprime));
+ if (ret < 0)
+ return ret;
+
+ ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
+ if (ret < 0)
+ return ret;
+
+ if (!hdcp->is_paired) {
+ /* Pairing is required */
+ ret = shim->read_2_2_msg(intel_dig_port,
+ HDCP_2_2_AKE_SEND_PAIRING_INFO,
+ &msgs.pairing_info,
+ sizeof(msgs.pairing_info));
+ if (ret < 0)
+ return ret;
+
+ ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
+ if (ret < 0)
+ return ret;
+ hdcp->is_paired = true;
+ }
+
+ return 0;
+}
+
+static int hdcp2_locality_check(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ union {
+ struct hdcp2_lc_init lc_init;
+ struct hdcp2_lc_send_lprime send_lprime;
+ } msgs;
+ const struct intel_hdcp_shim *shim = hdcp->shim;
+ int tries = HDCP2_LC_RETRY_CNT, ret, i;
+
+ for (i = 0; i < tries; i++) {
+ ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
+ if (ret < 0)
+ continue;
+
+ ret = shim->write_2_2_msg(intel_dig_port, &msgs.lc_init,
+ sizeof(msgs.lc_init));
+ if (ret < 0)
+ continue;
+
+ ret = shim->read_2_2_msg(intel_dig_port,
+ HDCP_2_2_LC_SEND_LPRIME,
+ &msgs.send_lprime,
+ sizeof(msgs.send_lprime));
+ if (ret < 0)
+ continue;
+
+ ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
+ if (!ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int hdcp2_session_key_exchange(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ struct hdcp2_ske_send_eks send_eks;
+ int ret;
+
+ ret = hdcp2_prepare_skey(connector, &send_eks);
+ if (ret < 0)
+ return ret;
+
+ ret = hdcp->shim->write_2_2_msg(intel_dig_port, &send_eks,
+ sizeof(send_eks));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static
+int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ union {
+ struct hdcp2_rep_stream_manage stream_manage;
+ struct hdcp2_rep_stream_ready stream_ready;
+ } msgs;
+ const struct intel_hdcp_shim *shim = hdcp->shim;
+ int ret;
+
+ /* Prepare RepeaterAuth_Stream_Manage msg */
+ msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
+ drm_hdcp2_u32_to_seq_num(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
+
+ /* K no of streams is fixed as 1. Stored as big-endian. */
+ msgs.stream_manage.k = cpu_to_be16(1);
+
+ /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
+ msgs.stream_manage.streams[0].stream_id = 0;
+ msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
+
+ /* Send it to Repeater */
+ ret = shim->write_2_2_msg(intel_dig_port, &msgs.stream_manage,
+ sizeof(msgs.stream_manage));
+ if (ret < 0)
+ return ret;
+
+ ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_STREAM_READY,
+ &msgs.stream_ready, sizeof(msgs.stream_ready));
+ if (ret < 0)
+ return ret;
+
+ hdcp->port_data.seq_num_m = hdcp->seq_num_m;
+ hdcp->port_data.streams[0].stream_type = hdcp->content_type;
+
+ ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
+ if (ret < 0)
+ return ret;
+
+ hdcp->seq_num_m++;
+
+ if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
+ DRM_DEBUG_KMS("seq_num_m roll over.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static
+int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ union {
+ struct hdcp2_rep_send_receiverid_list recvid_list;
+ struct hdcp2_rep_send_ack rep_ack;
+ } msgs;
+ const struct intel_hdcp_shim *shim = hdcp->shim;
+ u8 *rx_info;
+ u32 seq_num_v;
+ int ret;
+
+ ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
+ &msgs.recvid_list, sizeof(msgs.recvid_list));
+ if (ret < 0)
+ return ret;
+
+ rx_info = msgs.recvid_list.rx_info;
+
+ if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
+ HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
+ DRM_DEBUG_KMS("Topology Max Size Exceeded\n");
+ return -EINVAL;
+ }
+
+ /* Converting and Storing the seq_num_v to local variable as DWORD */
+ seq_num_v = drm_hdcp2_seq_num_to_u32(msgs.recvid_list.seq_num_v);
+
+ if (seq_num_v < hdcp->seq_num_v) {
+ /* Roll over of the seq_num_v from repeater. Reauthenticate. */
+ DRM_DEBUG_KMS("Seq_num_v roll over.\n");
+ return -EINVAL;
+ }
+
+ ret = hdcp2_verify_rep_topology_prepare_ack(connector,
+ &msgs.recvid_list,
+ &msgs.rep_ack);
+ if (ret < 0)
+ return ret;
+
+ hdcp->seq_num_v = seq_num_v;
+ ret = shim->write_2_2_msg(intel_dig_port, &msgs.rep_ack,
+ sizeof(msgs.rep_ack));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int hdcp2_authenticate_repeater(struct intel_connector *connector)
+{
+ int ret;
+
+ ret = hdcp2_authenticate_repeater_topology(connector);
+ if (ret < 0)
+ return ret;
+
+ return hdcp2_propagate_stream_management_info(connector);
+}
+
+static int hdcp2_authenticate_sink(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ const struct intel_hdcp_shim *shim = hdcp->shim;
+ int ret;
+
+ ret = hdcp2_authentication_key_exchange(connector);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret);
+ return ret;
+ }
+
+ ret = hdcp2_locality_check(connector);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret);
+ return ret;
+ }
+
+ ret = hdcp2_session_key_exchange(connector);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret);
+ return ret;
+ }
+
+ if (shim->config_stream_type) {
+ ret = shim->config_stream_type(intel_dig_port,
+ hdcp->is_repeater,
+ hdcp->content_type);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (hdcp->is_repeater) {
+ ret = hdcp2_authenticate_repeater(connector);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret);
+ return ret;
+ }
+ }
+
+ hdcp->port_data.streams[0].stream_type = hdcp->content_type;
+ ret = hdcp2_authenticate_port(connector);
+ if (ret < 0)
+ return ret;
+
+ return ret;
+}
+
+static int hdcp2_enable_encryption(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum port port = connector->encoder->port;
+ int ret;
+
+ WARN_ON(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS);
+
+ if (hdcp->shim->toggle_signalling) {
+ ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
+ if (ret) {
+ DRM_ERROR("Failed to enable HDCP signalling. %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (I915_READ(HDCP2_STATUS_DDI(port)) & LINK_AUTH_STATUS) {
+ /* Link is Authenticated. Now set for Encryption */
+ I915_WRITE(HDCP2_CTL_DDI(port),
+ I915_READ(HDCP2_CTL_DDI(port)) |
+ CTL_LINK_ENCRYPTION_REQ);
+ }
+
+ ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
+ LINK_ENCRYPTION_STATUS,
+ LINK_ENCRYPTION_STATUS,
+ ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+
+ return ret;
+}
+
+static int hdcp2_disable_encryption(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum port port = connector->encoder->port;
+ int ret;
+
+ WARN_ON(!(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS));
+
+ I915_WRITE(HDCP2_CTL_DDI(port),
+ I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ);
+
+ ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
+ LINK_ENCRYPTION_STATUS, 0x0,
+ ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ if (ret == -ETIMEDOUT)
+ DRM_DEBUG_KMS("Disable Encryption Timedout");
+
+ if (hdcp->shim->toggle_signalling) {
+ ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
+ if (ret) {
+ DRM_ERROR("Failed to disable HDCP signalling. %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
+{
+ int ret, i, tries = 3;
+
+ for (i = 0; i < tries; i++) {
+ ret = hdcp2_authenticate_sink(connector);
+ if (!ret)
+ break;
+
+ /* Clearing the mei hdcp session */
+ DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n",
+ i + 1, tries, ret);
+ if (hdcp2_deauthenticate_port(connector) < 0)
+ DRM_DEBUG_KMS("Port deauth failed.\n");
+ }
+
+ if (i != tries) {
+ /*
+ * Ensuring the required 200mSec min time interval between
+ * Session Key Exchange and encryption.
+ */
+ msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
+ ret = hdcp2_enable_encryption(connector);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret);
+ if (hdcp2_deauthenticate_port(connector) < 0)
+ DRM_DEBUG_KMS("Port deauth failed.\n");
+ }
+ }
+
+ return ret;
+}
+
+static int _intel_hdcp2_enable(struct intel_connector *connector)
+{
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ int ret;
+
+ DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
+ connector->base.name, connector->base.base.id,
+ hdcp->content_type);
+
+ ret = hdcp2_authenticate_and_encrypt(connector);
+ if (ret) {
+ DRM_DEBUG_KMS("HDCP2 Type%d Enabling Failed. (%d)\n",
+ hdcp->content_type, ret);
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n",
+ connector->base.name, connector->base.base.id,
+ hdcp->content_type);
+
+ hdcp->hdcp2_encrypted = true;
+ return 0;
+}
+
+static int _intel_hdcp2_disable(struct intel_connector *connector)
+{
+ int ret;
+
+ DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n",
+ connector->base.name, connector->base.base.id);
+
+ ret = hdcp2_disable_encryption(connector);
+
+ if (hdcp2_deauthenticate_port(connector) < 0)
+ DRM_DEBUG_KMS("Port deauth failed.\n");
+
+ connector->hdcp.hdcp2_encrypted = false;
+
+ return ret;
+}
+
+/* Implements the Link Integrity Check for HDCP2.2 */
+static int intel_hdcp2_check_link(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum port port = connector->encoder->port;
+ int ret = 0;
+
+ mutex_lock(&hdcp->mutex);
+
+ /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
+ !hdcp->hdcp2_encrypted) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (WARN_ON(!intel_hdcp2_in_use(connector))) {
+ DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n",
+ I915_READ(HDCP2_STATUS_DDI(port)));
+ ret = -ENXIO;
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ goto out;
+ }
+
+ ret = hdcp->shim->check_2_2_link(intel_dig_port);
+ if (ret == HDCP_LINK_PROTECTED) {
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ schedule_work(&hdcp->prop_work);
+ }
+ goto out;
+ }
+
+ if (ret == HDCP_TOPOLOGY_CHANGE) {
+ if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ goto out;
+
+ DRM_DEBUG_KMS("HDCP2.2 Downstream topology change\n");
+ ret = hdcp2_authenticate_repeater_topology(connector);
+ if (!ret) {
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ schedule_work(&hdcp->prop_work);
+ goto out;
+ }
+ DRM_DEBUG_KMS("[%s:%d] Repeater topology auth failed.(%d)\n",
+ connector->base.name, connector->base.base.id,
+ ret);
+ } else {
+ DRM_DEBUG_KMS("[%s:%d] HDCP2.2 link failed, retrying auth\n",
+ connector->base.name, connector->base.base.id);
+ }
+
+ ret = _intel_hdcp2_disable(connector);
+ if (ret) {
+ DRM_ERROR("[%s:%d] Failed to disable hdcp2.2 (%d)\n",
+ connector->base.name, connector->base.base.id, ret);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ goto out;
+ }
+
+ ret = _intel_hdcp2_enable(connector);
+ if (ret) {
+ DRM_DEBUG_KMS("[%s:%d] Failed to enable hdcp2.2 (%d)\n",
+ connector->base.name, connector->base.base.id,
+ ret);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&hdcp->mutex);
+ return ret;
+}
+
+static void intel_hdcp_check_work(struct work_struct *work)
+{
+ struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
+ struct intel_hdcp,
+ check_work);
+ struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
+
+ if (!intel_hdcp2_check_link(connector))
+ schedule_delayed_work(&hdcp->check_work,
+ DRM_HDCP2_CHECK_PERIOD_MS);
+ else if (!intel_hdcp_check_link(connector))
+ schedule_delayed_work(&hdcp->check_work,
+ DRM_HDCP_CHECK_PERIOD_MS);
+}
+
+static int i915_hdcp_component_bind(struct device *i915_kdev,
+ struct device *mei_kdev, void *data)
+{
+ struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
+
+ DRM_DEBUG("I915 HDCP comp bind\n");
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
+ dev_priv->hdcp_master->mei_dev = mei_kdev;
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return 0;
+}
+
+static void i915_hdcp_component_unbind(struct device *i915_kdev,
+ struct device *mei_kdev, void *data)
+{
+ struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
+
+ DRM_DEBUG("I915 HDCP comp unbind\n");
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ dev_priv->hdcp_master = NULL;
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+}
+
+static const struct component_ops i915_hdcp_component_ops = {
+ .bind = i915_hdcp_component_bind,
+ .unbind = i915_hdcp_component_unbind,
+};
+
+static inline int initialize_hdcp_port_data(struct intel_connector *connector)
+{
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ struct hdcp_port_data *data = &hdcp->port_data;
+
+ data->port = connector->encoder->port;
+ data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
+ data->protocol = (u8)hdcp->shim->protocol;
+
+ data->k = 1;
+ if (!data->streams)
+ data->streams = kcalloc(data->k,
+ sizeof(struct hdcp2_streamid_type),
+ GFP_KERNEL);
+ if (!data->streams) {
+ DRM_ERROR("Out of Memory\n");
+ return -ENOMEM;
+ }
+
+ data->streams[0].stream_id = 0;
+ data->streams[0].stream_type = hdcp->content_type;
+
+ return 0;
+}
+
+static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
+{
+ if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
+ return false;
+
+ return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv));
+}
+
+void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ if (!is_hdcp2_supported(dev_priv))
+ return;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ WARN_ON(dev_priv->hdcp_comp_added);
+
+ dev_priv->hdcp_comp_added = true;
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
+ I915_COMPONENT_HDCP);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("Failed at component add(%d)\n", ret);
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ dev_priv->hdcp_comp_added = false;
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return;
+ }
+}
+
+static void intel_hdcp2_init(struct intel_connector *connector)
+{
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ int ret;
+
+ ret = initialize_hdcp_port_data(connector);
+ if (ret) {
+ DRM_DEBUG_KMS("Mei hdcp data init failed\n");
+ return;
+ }
+
+ hdcp->hdcp2_supported = true;
+}
+
int intel_hdcp_init(struct intel_connector *connector,
const struct intel_hdcp_shim *shim)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- ret = drm_connector_attach_content_protection_property(
- &connector->base);
+ if (!shim)
+ return -EINVAL;
+
+ ret = drm_connector_attach_content_protection_property(&connector->base);
if (ret)
return ret;
@@ -788,28 +1806,47 @@ int intel_hdcp_init(struct intel_connector *connector,
mutex_init(&hdcp->mutex);
INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
+
+ if (is_hdcp2_supported(dev_priv))
+ intel_hdcp2_init(connector);
+ init_waitqueue_head(&hdcp->cp_irq_queue);
+
return 0;
}
int intel_hdcp_enable(struct intel_connector *connector)
{
struct intel_hdcp *hdcp = &connector->hdcp;
- int ret;
+ unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
+ int ret = -EINVAL;
if (!hdcp->shim)
return -ENOENT;
mutex_lock(&hdcp->mutex);
+ WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
- ret = _intel_hdcp_enable(connector);
- if (ret)
- goto out;
+ /*
+ * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
+ * is capable of HDCP2.2, it is preferred to use HDCP2.2.
+ */
+ if (intel_hdcp2_capable(connector)) {
+ ret = _intel_hdcp2_enable(connector);
+ if (!ret)
+ check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
+ }
+
+ /* When HDCP2.2 fails, HDCP1.4 will be attempted */
+ if (ret && intel_hdcp_capable(connector)) {
+ ret = _intel_hdcp_enable(connector);
+ }
+
+ if (!ret) {
+ schedule_delayed_work(&hdcp->check_work, check_link_interval);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ schedule_work(&hdcp->prop_work);
+ }
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
- schedule_work(&hdcp->prop_work);
- schedule_delayed_work(&hdcp->check_work,
- DRM_HDCP_CHECK_PERIOD_MS);
-out:
mutex_unlock(&hdcp->mutex);
return ret;
}
@@ -826,7 +1863,10 @@ int intel_hdcp_disable(struct intel_connector *connector)
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
- ret = _intel_hdcp_disable(connector);
+ if (hdcp->hdcp2_encrypted)
+ ret = _intel_hdcp2_disable(connector);
+ else if (hdcp->hdcp_encrypted)
+ ret = _intel_hdcp_disable(connector);
}
mutex_unlock(&hdcp->mutex);
@@ -834,6 +1874,30 @@ int intel_hdcp_disable(struct intel_connector *connector)
return ret;
}
+void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
+{
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ if (!dev_priv->hdcp_comp_added) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return;
+ }
+
+ dev_priv->hdcp_comp_added = false;
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
+}
+
+void intel_hdcp_cleanup(struct intel_connector *connector)
+{
+ if (!connector->hdcp.shim)
+ return;
+
+ mutex_lock(&connector->hdcp.mutex);
+ kfree(connector->hdcp.port_data.streams);
+ mutex_unlock(&connector->hdcp.mutex);
+}
+
void intel_hdcp_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state,
struct drm_connector_state *new_state)
@@ -867,61 +1931,16 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
crtc_state->mode_changed = true;
}
-/* Implements Part 3 of the HDCP authorization procedure */
-int intel_hdcp_check_link(struct intel_connector *connector)
+/* Handles the CP_IRQ raised from the DP HDCP sink */
+void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
{
struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
- enum port port = intel_dig_port->base.port;
- int ret = 0;
if (!hdcp->shim)
- return -ENOENT;
-
- mutex_lock(&hdcp->mutex);
-
- if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
- goto out;
-
- if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) {
- DRM_ERROR("%s:%d HDCP check failed: link is not encrypted,%x\n",
- connector->base.name, connector->base.base.id,
- I915_READ(PORT_HDCP_STATUS(port)));
- ret = -ENXIO;
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&hdcp->prop_work);
- goto out;
- }
-
- if (hdcp->shim->check_link(intel_dig_port)) {
- if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
- schedule_work(&hdcp->prop_work);
- }
- goto out;
- }
-
- DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
- connector->base.name, connector->base.base.id);
-
- ret = _intel_hdcp_disable(connector);
- if (ret) {
- DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&hdcp->prop_work);
- goto out;
- }
+ return;
- ret = _intel_hdcp_enable(connector);
- if (ret) {
- DRM_DEBUG_KMS("Failed to enable hdcp (%d)\n", ret);
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&hdcp->prop_work);
- goto out;
- }
+ atomic_inc(&connector->hdcp.cp_irq_count);
+ wake_up_all(&connector->hdcp.cp_irq_queue);
-out:
- mutex_unlock(&hdcp->mutex);
- return ret;
+ schedule_delayed_work(&hdcp->check_work, 0);
}
diff --git a/drivers/gpu/drm/i915/intel_hdcp.h b/drivers/gpu/drm/i915/intel_hdcp.h
new file mode 100644
index 000000000000..a75f25f09d39
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_hdcp.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_HDCP_H__
+#define __INTEL_HDCP_H__
+
+#include <linux/types.h>
+
+#include <drm/i915_drm.h>
+
+struct drm_connector;
+struct drm_connector_state;
+struct drm_i915_private;
+struct intel_connector;
+struct intel_hdcp_shim;
+
+void intel_hdcp_atomic_check(struct drm_connector *connector,
+ struct drm_connector_state *old_state,
+ struct drm_connector_state *new_state);
+int intel_hdcp_init(struct intel_connector *connector,
+ const struct intel_hdcp_shim *hdcp_shim);
+int intel_hdcp_enable(struct intel_connector *connector);
+int intel_hdcp_disable(struct intel_connector *connector);
+bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
+bool intel_hdcp_capable(struct intel_connector *connector);
+void intel_hdcp_component_init(struct drm_i915_private *dev_priv);
+void intel_hdcp_component_fini(struct drm_i915_private *dev_priv);
+void intel_hdcp_cleanup(struct intel_connector *connector);
+void intel_hdcp_handle_cp_irq(struct intel_connector *connector);
+
+#endif /* __INTEL_HDCP_H__ */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f125a62eba8c..34be2cfd0ec8 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -26,19 +26,30 @@
* Jesse Barnes <jesse.barnes@intel.com>
*/
-#include <linux/i2c.h>
-#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/hdmi.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_hdcp.h>
#include <drm/drm_scdc_helper.h>
-#include "intel_drv.h"
#include <drm/i915_drm.h>
#include <drm/intel_lpe_audio.h>
+
#include "i915_drv.h"
+#include "intel_audio.h"
+#include "intel_connector.h"
+#include "intel_ddi.h"
+#include "intel_dp.h"
+#include "intel_drv.h"
+#include "intel_hdcp.h"
+#include "intel_hdmi.h"
+#include "intel_lspcon.h"
+#include "intel_sdvo.h"
+#include "intel_panel.h"
static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
{
@@ -82,6 +93,8 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
static u32 g4x_infoframe_index(unsigned int type)
{
switch (type) {
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ return VIDEO_DIP_SELECT_GAMUT;
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_SELECT_AVI;
case HDMI_INFOFRAME_TYPE_SPD:
@@ -97,6 +110,12 @@ static u32 g4x_infoframe_index(unsigned int type)
static u32 g4x_infoframe_enable(unsigned int type)
{
switch (type) {
+ case HDMI_PACKET_TYPE_GENERAL_CONTROL:
+ return VIDEO_DIP_ENABLE_GCP;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ return VIDEO_DIP_ENABLE_GAMUT;
+ case DP_SDP_VSC:
+ return 0;
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI;
case HDMI_INFOFRAME_TYPE_SPD:
@@ -112,6 +131,10 @@ static u32 g4x_infoframe_enable(unsigned int type)
static u32 hsw_infoframe_enable(unsigned int type)
{
switch (type) {
+ case HDMI_PACKET_TYPE_GENERAL_CONTROL:
+ return VIDEO_DIP_ENABLE_GCP_HSW;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ return VIDEO_DIP_ENABLE_GMP_HSW;
case DP_SDP_VSC:
return VIDEO_DIP_ENABLE_VSC_HSW;
case DP_SDP_PPS:
@@ -135,6 +158,8 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
int i)
{
switch (type) {
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ return HSW_TVIDEO_DIP_GMP_DATA(cpu_transcoder, i);
case DP_SDP_VSC:
return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i);
case DP_SDP_PPS:
@@ -182,7 +207,6 @@ static void g4x_write_infoframe(struct intel_encoder *encoder,
I915_WRITE(VIDEO_DIP_CTL, val);
- mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
@@ -190,7 +214,6 @@ static void g4x_write_infoframe(struct intel_encoder *encoder,
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(VIDEO_DIP_DATA, 0);
- mmiowb();
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
@@ -200,17 +223,37 @@ static void g4x_write_infoframe(struct intel_encoder *encoder,
POSTING_READ(VIDEO_DIP_CTL);
}
-static bool g4x_infoframe_enabled(struct intel_encoder *encoder,
+static void g4x_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 val, *data = frame;
+ int i;
+
+ val = I915_READ(VIDEO_DIP_CTL);
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
+
+ I915_WRITE(VIDEO_DIP_CTL, val);
+
+ for (i = 0; i < len; i += 4)
+ *data++ = I915_READ(VIDEO_DIP_DATA);
+}
+
+static u32 g4x_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val = I915_READ(VIDEO_DIP_CTL);
if ((val & VIDEO_DIP_ENABLE) == 0)
- return false;
+ return 0;
if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
- return false;
+ return 0;
return val & (VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
@@ -237,7 +280,6 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
I915_WRITE(reg, val);
- mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
@@ -245,7 +287,6 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
- mmiowb();
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
@@ -255,7 +296,28 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
POSTING_READ(reg);
}
-static bool ibx_infoframe_enabled(struct intel_encoder *encoder,
+static void ibx_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ u32 val, *data = frame;
+ int i;
+
+ val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe));
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
+
+ I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val);
+
+ for (i = 0; i < len; i += 4)
+ *data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe));
+}
+
+static u32 ibx_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -264,10 +326,10 @@ static bool ibx_infoframe_enabled(struct intel_encoder *encoder,
u32 val = I915_READ(reg);
if ((val & VIDEO_DIP_ENABLE) == 0)
- return false;
+ return 0;
if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
- return false;
+ return 0;
return val & (VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
@@ -298,7 +360,6 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
I915_WRITE(reg, val);
- mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
@@ -306,7 +367,6 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
- mmiowb();
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
@@ -316,7 +376,28 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
POSTING_READ(reg);
}
-static bool cpt_infoframe_enabled(struct intel_encoder *encoder,
+static void cpt_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ u32 val, *data = frame;
+ int i;
+
+ val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe));
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
+
+ I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val);
+
+ for (i = 0; i < len; i += 4)
+ *data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe));
+}
+
+static u32 cpt_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -324,7 +405,7 @@ static bool cpt_infoframe_enabled(struct intel_encoder *encoder,
u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
- return false;
+ return 0;
return val & (VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
@@ -352,7 +433,6 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
I915_WRITE(reg, val);
- mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
@@ -360,7 +440,6 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
- mmiowb();
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
@@ -370,7 +449,28 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
POSTING_READ(reg);
}
-static bool vlv_infoframe_enabled(struct intel_encoder *encoder,
+static void vlv_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ u32 val, *data = frame;
+ int i;
+
+ val = I915_READ(VLV_TVIDEO_DIP_CTL(crtc->pipe));
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
+
+ I915_WRITE(VLV_TVIDEO_DIP_CTL(crtc->pipe), val);
+
+ for (i = 0; i < len; i += 4)
+ *data++ = I915_READ(VLV_TVIDEO_DIP_DATA(crtc->pipe));
+}
+
+static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -378,10 +478,10 @@ static bool vlv_infoframe_enabled(struct intel_encoder *encoder,
u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
- return false;
+ return 0;
if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
- return false;
+ return 0;
return val & (VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
@@ -406,7 +506,6 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
- mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
type, i >> 2), *data);
@@ -416,14 +515,30 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
for (; i < data_size; i += 4)
I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
type, i >> 2), 0);
- mmiowb();
val |= hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
POSTING_READ(ctl_reg);
}
-static bool hsw_infoframe_enabled(struct intel_encoder *encoder,
+static void hsw_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 val, *data = frame;
+ int i;
+
+ val = I915_READ(HSW_TVIDEO_DIP_CTL(cpu_transcoder));
+
+ for (i = 0; i < len; i += 4)
+ *data++ = I915_READ(hsw_dip_data_reg(dev_priv, cpu_transcoder,
+ type, i >> 2));
+}
+
+static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -434,6 +549,53 @@ static bool hsw_infoframe_enabled(struct intel_encoder *encoder,
VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
}
+static const u8 infoframe_type_to_idx[] = {
+ HDMI_PACKET_TYPE_GENERAL_CONTROL,
+ HDMI_PACKET_TYPE_GAMUT_METADATA,
+ DP_SDP_VSC,
+ HDMI_INFOFRAME_TYPE_AVI,
+ HDMI_INFOFRAME_TYPE_SPD,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+};
+
+u32 intel_hdmi_infoframe_enable(unsigned int type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(infoframe_type_to_idx); i++) {
+ if (infoframe_type_to_idx[i] == type)
+ return BIT(i);
+ }
+
+ return 0;
+}
+
+u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ u32 val, ret = 0;
+ int i;
+
+ val = dig_port->infoframes_enabled(encoder, crtc_state);
+
+ /* map from hardware bits to dip idx */
+ for (i = 0; i < ARRAY_SIZE(infoframe_type_to_idx); i++) {
+ unsigned int type = infoframe_type_to_idx[i];
+
+ if (HAS_DDI(dev_priv)) {
+ if (val & hsw_infoframe_enable(type))
+ ret |= BIT(i);
+ } else {
+ if (val & g4x_infoframe_enable(type))
+ ret |= BIT(i);
+ }
+ }
+
+ return ret;
+}
+
/*
* The data we write to the DIP data buffer registers is 1 byte bigger than the
* HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
@@ -453,15 +615,23 @@ static bool hsw_infoframe_enabled(struct intel_encoder *encoder,
*/
static void intel_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
- union hdmi_infoframe *frame)
+ enum hdmi_infoframe_type type,
+ const union hdmi_infoframe *frame)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
u8 buffer[VIDEO_DIP_DATA_SIZE];
ssize_t len;
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(type)) == 0)
+ return;
+
+ if (WARN_ON(frame->any.type != type))
+ return;
+
/* see comment above for the reason for this offset */
- len = hdmi_infoframe_pack(frame, buffer + 1, sizeof(buffer) - 1);
- if (len < 0)
+ len = hdmi_infoframe_pack_only(frame, buffer + 1, sizeof(buffer) - 1);
+ if (WARN_ON(len < 0))
return;
/* Insert the 'hole' (see big comment above) at position 3 */
@@ -469,84 +639,143 @@ static void intel_write_infoframe(struct intel_encoder *encoder,
buffer[3] = 0;
len++;
- intel_dig_port->write_infoframe(encoder,
- crtc_state,
- frame->any.type, buffer, len);
+ intel_dig_port->write_infoframe(encoder, crtc_state, type, buffer, len);
}
-static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+void intel_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ enum hdmi_infoframe_type type,
+ union hdmi_infoframe *frame)
+{
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ u8 buffer[VIDEO_DIP_DATA_SIZE];
+ int ret;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(type)) == 0)
+ return;
+
+ intel_dig_port->read_infoframe(encoder, crtc_state,
+ type, buffer, sizeof(buffer));
+
+ /* Fill the 'hole' (see big comment above) at position 3 */
+ memmove(&buffer[1], &buffer[0], 3);
+
+ /* see comment above for the reason for this offset */
+ ret = hdmi_infoframe_unpack(frame, buffer + 1, sizeof(buffer) - 1);
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to unpack infoframe type 0x%02x\n", type);
+ return;
+ }
+
+ if (frame->any.type != type)
+ DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
+ frame->any.type, type);
+}
+
+static bool
+intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
+ struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
- union hdmi_infoframe frame;
+ struct drm_connector *connector = conn_state->connector;
int ret;
- ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- conn_state->connector,
+ if (!crtc_state->has_infoframe)
+ return true;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(frame, connector,
adjusted_mode);
- if (ret < 0) {
- DRM_ERROR("couldn't fill AVI infoframe\n");
- return;
- }
+ if (ret)
+ return false;
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
- frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
+ frame->colorspace = HDMI_COLORSPACE_YUV420;
else if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
- frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
+ frame->colorspace = HDMI_COLORSPACE_YUV444;
else
- frame.avi.colorspace = HDMI_COLORSPACE_RGB;
+ frame->colorspace = HDMI_COLORSPACE_RGB;
+
+ drm_hdmi_avi_infoframe_colorspace(frame, conn_state);
- drm_hdmi_avi_infoframe_quant_range(&frame.avi,
- conn_state->connector,
+ drm_hdmi_avi_infoframe_quant_range(frame, connector,
adjusted_mode,
crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL);
- drm_hdmi_avi_infoframe_content_type(&frame.avi,
- conn_state);
+ drm_hdmi_avi_infoframe_content_type(frame, conn_state);
/* TODO: handle pixel repetition for YCBCR420 outputs */
- intel_write_infoframe(encoder, crtc_state,
- &frame);
+
+ ret = hdmi_avi_infoframe_check(frame);
+ if (WARN_ON(ret))
+ return false;
+
+ return true;
}
-static void intel_hdmi_set_spd_infoframe(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static bool
+intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
- union hdmi_infoframe frame;
+ struct hdmi_spd_infoframe *frame = &crtc_state->infoframes.spd.spd;
int ret;
- ret = hdmi_spd_infoframe_init(&frame.spd, "Intel", "Integrated gfx");
- if (ret < 0) {
- DRM_ERROR("couldn't fill SPD infoframe\n");
- return;
- }
+ if (!crtc_state->has_infoframe)
+ return true;
- frame.spd.sdi = HDMI_SPD_SDI_PC;
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD);
- intel_write_infoframe(encoder, crtc_state,
- &frame);
+ ret = hdmi_spd_infoframe_init(frame, "Intel", "Integrated gfx");
+ if (WARN_ON(ret))
+ return false;
+
+ frame->sdi = HDMI_SPD_SDI_PC;
+
+ ret = hdmi_spd_infoframe_check(frame);
+ if (WARN_ON(ret))
+ return false;
+
+ return true;
}
-static void
-intel_hdmi_set_hdmi_infoframe(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
-{
- union hdmi_infoframe frame;
+static bool
+intel_hdmi_compute_hdmi_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct hdmi_vendor_infoframe *frame =
+ &crtc_state->infoframes.hdmi.vendor.hdmi;
+ const struct drm_display_info *info =
+ &conn_state->connector->display_info;
int ret;
- ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
+ if (!crtc_state->has_infoframe || !info->has_hdmi_infoframe)
+ return true;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR);
+
+ ret = drm_hdmi_vendor_infoframe_from_display_mode(frame,
conn_state->connector,
&crtc_state->base.adjusted_mode);
- if (ret < 0)
- return;
+ if (WARN_ON(ret))
+ return false;
- intel_write_infoframe(encoder, crtc_state,
- &frame);
+ ret = hdmi_vendor_infoframe_check(frame);
+ if (WARN_ON(ret))
+ return false;
+
+ return true;
}
static void g4x_set_infoframes(struct intel_encoder *encoder,
@@ -606,9 +835,15 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
- intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &crtc_state->infoframes.avi);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &crtc_state->infoframes.spd);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &crtc_state->infoframes.hdmi);
}
static bool hdmi_sink_is_deep_color(const struct drm_connector_state *conn_state)
@@ -674,7 +909,10 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
i915_reg_t reg;
- u32 val = 0;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0)
+ return false;
if (HAS_DDI(dev_priv))
reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder);
@@ -685,18 +923,54 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
else
return false;
+ I915_WRITE(reg, crtc_state->infoframes.gcp);
+
+ return true;
+}
+
+void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ i915_reg_t reg;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0)
+ return;
+
+ if (HAS_DDI(dev_priv))
+ reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
+ else if (HAS_PCH_SPLIT(dev_priv))
+ reg = TVIDEO_DIP_GCP(crtc->pipe);
+ else
+ return;
+
+ crtc_state->infoframes.gcp = I915_READ(reg);
+}
+
+static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (IS_G4X(dev_priv) || !crtc_state->has_infoframe)
+ return;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL);
+
/* Indicate color depth whenever the sink supports deep color */
if (hdmi_sink_is_deep_color(conn_state))
- val |= GCP_COLOR_INDICATION;
+ crtc_state->infoframes.gcp |= GCP_COLOR_INDICATION;
/* Enable default_phase whenever the display mode is suitably aligned */
if (gcp_default_phase_possible(crtc_state->pipe_bpp,
&crtc_state->base.adjusted_mode))
- val |= GCP_DEFAULT_PHASE_ENABLE;
-
- I915_WRITE(reg, val);
-
- return val != 0;
+ crtc_state->infoframes.gcp |= GCP_DEFAULT_PHASE_ENABLE;
}
static void ibx_set_infoframes(struct intel_encoder *encoder,
@@ -747,9 +1021,15 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
- intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &crtc_state->infoframes.avi);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &crtc_state->infoframes.spd);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &crtc_state->infoframes.hdmi);
}
static void cpt_set_infoframes(struct intel_encoder *encoder,
@@ -790,9 +1070,15 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
- intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &crtc_state->infoframes.avi);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &crtc_state->infoframes.spd);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &crtc_state->infoframes.hdmi);
}
static void vlv_set_infoframes(struct intel_encoder *encoder,
@@ -842,9 +1128,15 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
- intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &crtc_state->infoframes.avi);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &crtc_state->infoframes.spd);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &crtc_state->infoframes.hdmi);
}
static void hsw_set_infoframes(struct intel_encoder *encoder,
@@ -875,9 +1167,15 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
- intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &crtc_state->infoframes.avi);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &crtc_state->infoframes.spd);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &crtc_state->infoframes.hdmi);
}
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
@@ -1083,10 +1381,44 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
return ret;
}
+static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct drm_crtc *crtc = connector->base.state->crtc;
+ struct intel_crtc *intel_crtc = container_of(crtc,
+ struct intel_crtc, base);
+ u32 scanline;
+ int ret;
+
+ for (;;) {
+ scanline = I915_READ(PIPEDSL(intel_crtc->pipe));
+ if (scanline > 100 && scanline < 200)
+ break;
+ usleep_range(25, 50);
+ }
+
+ ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, false);
+ if (ret) {
+ DRM_ERROR("Disable HDCP signalling failed (%d)\n", ret);
+ return ret;
+ }
+ ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, true);
+ if (ret) {
+ DRM_ERROR("Enable HDCP signalling failed (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static
int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
bool enable)
{
+ struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+ struct intel_connector *connector = hdmi->attached_connector;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
int ret;
if (!enable)
@@ -1098,6 +1430,14 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
enable ? "Enable" : "Disable", ret);
return ret;
}
+
+ /*
+ * WA: To fix incorrect positioning of the window of
+ * opportunity and enc_en signalling in KABYLAKE.
+ */
+ if (IS_KABYLAKE(dev_priv) && enable)
+ return kbl_repositioning_enc_en_signal(connector);
+
return 0;
}
@@ -1129,6 +1469,190 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
return true;
}
+static struct hdcp2_hdmi_msg_data {
+ u8 msg_id;
+ u32 timeout;
+ u32 timeout2;
+ } hdcp2_msg_data[] = {
+ {HDCP_2_2_AKE_INIT, 0, 0},
+ {HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, 0},
+ {HDCP_2_2_AKE_NO_STORED_KM, 0, 0},
+ {HDCP_2_2_AKE_STORED_KM, 0, 0},
+ {HDCP_2_2_AKE_SEND_HPRIME, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
+ HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
+ {HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS,
+ 0},
+ {HDCP_2_2_LC_INIT, 0, 0},
+ {HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, 0},
+ {HDCP_2_2_SKE_SEND_EKS, 0, 0},
+ {HDCP_2_2_REP_SEND_RECVID_LIST,
+ HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
+ {HDCP_2_2_REP_SEND_ACK, 0, 0},
+ {HDCP_2_2_REP_STREAM_MANAGE, 0, 0},
+ {HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS,
+ 0},
+ };
+
+static
+int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
+ u8 *rx_status)
+{
+ return intel_hdmi_hdcp_read(intel_dig_port,
+ HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET,
+ rx_status,
+ HDCP_2_2_HDMI_RXSTATUS_LEN);
+}
+
+static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
+ if (hdcp2_msg_data[i].msg_id == msg_id &&
+ (msg_id != HDCP_2_2_AKE_SEND_HPRIME || is_paired))
+ return hdcp2_msg_data[i].timeout;
+ else if (hdcp2_msg_data[i].msg_id == msg_id)
+ return hdcp2_msg_data[i].timeout2;
+
+ return -EINVAL;
+}
+
+static inline
+int hdcp2_detect_msg_availability(struct intel_digital_port *intel_digital_port,
+ u8 msg_id, bool *msg_ready,
+ ssize_t *msg_sz)
+{
+ u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
+ int ret;
+
+ ret = intel_hdmi_hdcp2_read_rx_status(intel_digital_port, rx_status);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("rx_status read failed. Err %d\n", ret);
+ return ret;
+ }
+
+ *msg_sz = ((HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(rx_status[1]) << 8) |
+ rx_status[0]);
+
+ if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST)
+ *msg_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(rx_status[1]) &&
+ *msg_sz);
+ else
+ *msg_ready = *msg_sz;
+
+ return 0;
+}
+
+static ssize_t
+intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
+ u8 msg_id, bool paired)
+{
+ bool msg_ready = false;
+ int timeout, ret;
+ ssize_t msg_sz = 0;
+
+ timeout = get_hdcp2_msg_timeout(msg_id, paired);
+ if (timeout < 0)
+ return timeout;
+
+ ret = __wait_for(ret = hdcp2_detect_msg_availability(intel_dig_port,
+ msg_id, &msg_ready,
+ &msg_sz),
+ !ret && msg_ready && msg_sz, timeout * 1000,
+ 1000, 5 * 1000);
+ if (ret)
+ DRM_DEBUG_KMS("msg_id: %d, ret: %d, timeout: %d\n",
+ msg_id, ret, timeout);
+
+ return ret ? ret : msg_sz;
+}
+
+static
+int intel_hdmi_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
+ void *buf, size_t size)
+{
+ unsigned int offset;
+
+ offset = HDCP_2_2_HDMI_REG_WR_MSG_OFFSET;
+ return intel_hdmi_hdcp_write(intel_dig_port, offset, buf, size);
+}
+
+static
+int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
+ u8 msg_id, void *buf, size_t size)
+{
+ struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+ struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp;
+ unsigned int offset;
+ ssize_t ret;
+
+ ret = intel_hdmi_hdcp2_wait_for_msg(intel_dig_port, msg_id,
+ hdcp->is_paired);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Available msg size should be equal to or lesser than the
+ * available buffer.
+ */
+ if (ret > size) {
+ DRM_DEBUG_KMS("msg_sz(%zd) is more than exp size(%zu)\n",
+ ret, size);
+ return -1;
+ }
+
+ offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET;
+ ret = intel_hdmi_hdcp_read(intel_dig_port, offset, buf, ret);
+ if (ret)
+ DRM_DEBUG_KMS("Failed to read msg_id: %d(%zd)\n", msg_id, ret);
+
+ return ret;
+}
+
+static
+int intel_hdmi_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
+{
+ u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
+ int ret;
+
+ ret = intel_hdmi_hdcp2_read_rx_status(intel_dig_port, rx_status);
+ if (ret)
+ return ret;
+
+ /*
+ * Re-auth request and Link Integrity Failures are represented by
+ * same bit. i.e reauth_req.
+ */
+ if (HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(rx_status[1]))
+ ret = HDCP_REAUTH_REQUEST;
+ else if (HDCP_2_2_HDMI_RXSTATUS_READY(rx_status[1]))
+ ret = HDCP_TOPOLOGY_CHANGE;
+
+ return ret;
+}
+
+static
+int intel_hdmi_hdcp2_capable(struct intel_digital_port *intel_dig_port,
+ bool *capable)
+{
+ u8 hdcp2_version;
+ int ret;
+
+ *capable = false;
+ ret = intel_hdmi_hdcp_read(intel_dig_port, HDCP_2_2_HDMI_REG_VER_OFFSET,
+ &hdcp2_version, sizeof(hdcp2_version));
+ if (!ret && hdcp2_version & HDCP_2_2_HDMI_SUPPORT_MASK)
+ *capable = true;
+
+ return ret;
+}
+
+static inline
+enum hdcp_wired_protocol intel_hdmi_hdcp2_protocol(void)
+{
+ return HDCP_PROTOCOL_HDMI;
+}
+
static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
.write_an_aksv = intel_hdmi_hdcp_write_an_aksv,
.read_bksv = intel_hdmi_hdcp_read_bksv,
@@ -1140,6 +1664,11 @@ static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
.read_v_prime_part = intel_hdmi_hdcp_read_v_prime_part,
.toggle_signalling = intel_hdmi_hdcp_toggle_signalling,
.check_link = intel_hdmi_hdcp_check_link,
+ .write_2_2_msg = intel_hdmi_hdcp2_write_msg,
+ .read_2_2_msg = intel_hdmi_hdcp2_read_msg,
+ .check_2_2_link = intel_hdmi_hdcp2_check_link,
+ .hdcp_2_2_capable = intel_hdmi_hdcp2_capable,
+ .protocol = HDCP_PROTOCOL_HDMI,
};
static void intel_hdmi_prepare(struct intel_encoder *encoder,
@@ -1205,7 +1734,6 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- struct intel_digital_port *intel_dig_port = hdmi_to_dig_port(intel_hdmi);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp, flags = 0;
@@ -1228,7 +1756,10 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_MODE_SELECT_HDMI)
pipe_config->has_hdmi_sink = true;
- if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
+ pipe_config->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, pipe_config);
+
+ if (pipe_config->infoframes.enable)
pipe_config->has_infoframe = true;
if (tmp & SDVO_AUDIO_ENABLE)
@@ -1251,6 +1782,18 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
pipe_config->lane_count = 4;
+
+ intel_hdmi_read_gcp_infoframe(encoder, pipe_config);
+
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &pipe_config->infoframes.avi);
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &pipe_config->infoframes.spd);
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &pipe_config->infoframes.hdmi);
}
static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
@@ -1664,7 +2207,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
/* Display Wa_1405510057:icl */
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
- bpc == 10 && IS_ICELAKE(dev_priv) &&
+ bpc == 10 && INTEL_GEN(dev_priv) >= 11 &&
(adjusted_mode->crtc_hblank_end -
adjusted_mode->crtc_hblank_start) % 8 == 2)
return false;
@@ -1822,6 +2365,23 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
}
}
+ intel_hdmi_compute_gcp_infoframe(encoder, pipe_config, conn_state);
+
+ if (!intel_hdmi_compute_avi_infoframe(encoder, pipe_config, conn_state)) {
+ DRM_DEBUG_KMS("bad AVI infoframe\n");
+ return -EINVAL;
+ }
+
+ if (!intel_hdmi_compute_spd_infoframe(encoder, pipe_config, conn_state)) {
+ DRM_DEBUG_KMS("bad SPD infoframe\n");
+ return -EINVAL;
+ }
+
+ if (!intel_hdmi_compute_hdmi_infoframe(encoder, pipe_config, conn_state)) {
+ DRM_DEBUG_KMS("bad HDMI infoframe\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -1941,7 +2501,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
- if (IS_ICELAKE(dev_priv) &&
+ if (INTEL_GEN(dev_priv) >= 11 &&
!intel_digital_port_connected(encoder))
goto out;
@@ -2143,10 +2703,21 @@ static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_digital_port *intel_dig_port =
+ hdmi_to_dig_port(intel_hdmi);
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_attach_aspect_ratio_property(connector);
+
+ /*
+ * Attach Colorspace property for Non LSPCON based device
+ * ToDo: This needs to be extended for LSPCON implementation
+ * as well. Will be implemented separately.
+ */
+ if (!intel_dig_port->lspcon.active)
+ intel_attach_colorspace_property(connector);
+
drm_connector_attach_content_type_property(connector);
connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
@@ -2331,14 +2902,14 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
return info->alternate_ddc_pin;
}
- if (IS_CHERRYVIEW(dev_priv))
- ddc_pin = chv_port_to_ddc_pin(dev_priv, port);
- else if (IS_GEN9_LP(dev_priv))
- ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
+ if (HAS_PCH_ICP(dev_priv))
+ ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_CNP(dev_priv))
ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
- else if (HAS_PCH_ICP(dev_priv))
- ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
+ else if (IS_GEN9_LP(dev_priv))
+ ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
+ else if (IS_CHERRYVIEW(dev_priv))
+ ddc_pin = chv_port_to_ddc_pin(dev_priv, port);
else
ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
@@ -2355,33 +2926,36 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port)
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_dig_port->write_infoframe = vlv_write_infoframe;
+ intel_dig_port->read_infoframe = vlv_read_infoframe;
intel_dig_port->set_infoframes = vlv_set_infoframes;
- intel_dig_port->infoframe_enabled = vlv_infoframe_enabled;
+ intel_dig_port->infoframes_enabled = vlv_infoframes_enabled;
} else if (IS_G4X(dev_priv)) {
intel_dig_port->write_infoframe = g4x_write_infoframe;
+ intel_dig_port->read_infoframe = g4x_read_infoframe;
intel_dig_port->set_infoframes = g4x_set_infoframes;
- intel_dig_port->infoframe_enabled = g4x_infoframe_enabled;
+ intel_dig_port->infoframes_enabled = g4x_infoframes_enabled;
} else if (HAS_DDI(dev_priv)) {
if (intel_dig_port->lspcon.active) {
- intel_dig_port->write_infoframe =
- lspcon_write_infoframe;
+ intel_dig_port->write_infoframe = lspcon_write_infoframe;
+ intel_dig_port->read_infoframe = lspcon_read_infoframe;
intel_dig_port->set_infoframes = lspcon_set_infoframes;
- intel_dig_port->infoframe_enabled =
- lspcon_infoframe_enabled;
+ intel_dig_port->infoframes_enabled = lspcon_infoframes_enabled;
} else {
- intel_dig_port->set_infoframes = hsw_set_infoframes;
- intel_dig_port->infoframe_enabled =
- hsw_infoframe_enabled;
intel_dig_port->write_infoframe = hsw_write_infoframe;
+ intel_dig_port->read_infoframe = hsw_read_infoframe;
+ intel_dig_port->set_infoframes = hsw_set_infoframes;
+ intel_dig_port->infoframes_enabled = hsw_infoframes_enabled;
}
} else if (HAS_PCH_IBX(dev_priv)) {
intel_dig_port->write_infoframe = ibx_write_infoframe;
+ intel_dig_port->read_infoframe = ibx_read_infoframe;
intel_dig_port->set_infoframes = ibx_set_infoframes;
- intel_dig_port->infoframe_enabled = ibx_infoframe_enabled;
+ intel_dig_port->infoframes_enabled = ibx_infoframes_enabled;
} else {
intel_dig_port->write_infoframe = cpt_write_infoframe;
+ intel_dig_port->read_infoframe = cpt_read_infoframe;
intel_dig_port->set_infoframes = cpt_set_infoframes;
- intel_dig_port->infoframe_enabled = cpt_infoframe_enabled;
+ intel_dig_port->infoframes_enabled = cpt_infoframes_enabled;
}
}
@@ -2427,6 +3001,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_hdmi_add_properties(intel_hdmi, connector);
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+ intel_hdmi->attached_connector = intel_connector;
+
if (is_hdcp_supported(dev_priv, port)) {
int ret = intel_hdcp_init(intel_connector,
&intel_hdmi_hdcp_shim);
@@ -2434,9 +3011,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
}
- intel_connector_attach_encoder(intel_connector, intel_encoder);
- intel_hdmi->attached_connector = intel_connector;
-
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
diff --git a/drivers/gpu/drm/i915/intel_hdmi.h b/drivers/gpu/drm/i915/intel_hdmi.h
new file mode 100644
index 000000000000..106c2e0bc3c9
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_hdmi.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_HDMI_H__
+#define __INTEL_HDMI_H__
+
+#include <linux/hdmi.h>
+#include <linux/types.h>
+
+#include <drm/i915_drm.h>
+
+#include "i915_reg.h"
+
+struct drm_connector;
+struct drm_encoder;
+struct drm_i915_private;
+struct intel_connector;
+struct intel_digital_port;
+struct intel_encoder;
+struct intel_crtc_state;
+struct intel_hdmi;
+struct drm_connector_state;
+union hdmi_infoframe;
+
+void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
+ enum port port);
+void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
+struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+int intel_hdmi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state);
+bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
+ struct drm_connector *connector,
+ bool high_tmds_clock_ratio,
+ bool scrambling);
+void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
+void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
+u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+u32 intel_hdmi_infoframe_enable(unsigned int type);
+void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
+void intel_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ enum hdmi_infoframe_type type,
+ union hdmi_infoframe *frame);
+
+#endif /* __INTEL_HDMI_H__ */
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 9bd1c9002c2a..94c04f16a2ad 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -79,7 +79,7 @@ int intel_huc_auth(struct intel_huc *huc)
}
/* Check authentication status, it should be done by now */
- ret = __intel_wait_for_register(i915,
+ ret = __intel_wait_for_register(&i915->uncore,
HUC_STATUS2,
HUC_FW_VERIFIED,
HUC_FW_VERIFIED,
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c
index 7d7bfc7f7ca7..68d47c105939 100644
--- a/drivers/gpu/drm/i915/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/intel_huc_fw.c
@@ -106,41 +106,46 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
{
struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
struct drm_i915_private *dev_priv = huc_to_i915(huc);
+ struct intel_uncore *uncore = &dev_priv->uncore;
unsigned long offset = 0;
u32 size;
int ret;
GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
/* Set the source address for the uCode */
offset = intel_guc_ggtt_offset(&dev_priv->guc, vma) +
huc_fw->header_offset;
- I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
- I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
+ intel_uncore_write(uncore, DMA_ADDR_0_LOW,
+ lower_32_bits(offset));
+ intel_uncore_write(uncore, DMA_ADDR_0_HIGH,
+ upper_32_bits(offset) & 0xFFFF);
- /* Hardware doesn't look at destination address for HuC. Set it to 0,
+ /*
+ * Hardware doesn't look at destination address for HuC. Set it to 0,
* but still program the correct address space.
*/
- I915_WRITE(DMA_ADDR_1_LOW, 0);
- I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+ intel_uncore_write(uncore, DMA_ADDR_1_LOW, 0);
+ intel_uncore_write(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
size = huc_fw->header_size + huc_fw->ucode_size;
- I915_WRITE(DMA_COPY_SIZE, size);
+ intel_uncore_write(uncore, DMA_COPY_SIZE, size);
/* Start the DMA */
- I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
+ intel_uncore_write(uncore, DMA_CTRL,
+ _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
/* Wait for DMA to finish */
- ret = intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0, 100);
+ ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);
/* Disable the bits once DMA is over */
- I915_WRITE(DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
+ intel_uncore_write(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 5a733e711355..422685d120e9 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -348,7 +348,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
I915_WRITE_FW(GMBUS4, irq_enable);
- ret = intel_wait_for_register_fw(dev_priv,
+ ret = intel_wait_for_register_fw(&dev_priv->uncore,
GMBUS2, GMBUS_ACTIVE, 0,
10);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 5e98fd79bd9d..4e0a351bfbca 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -164,20 +164,15 @@
#define WA_TAIL_DWORDS 2
#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
-static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct intel_context *ce);
+#define ACTIVE_PRIORITY (I915_PRIORITY_NEWCLIENT | I915_PRIORITY_NOSEMAPHORE)
+
+static int execlists_context_deferred_alloc(struct intel_context *ce,
+ struct intel_engine_cs *engine);
static void execlists_init_reg_state(u32 *reg_state,
- struct i915_gem_context *ctx,
+ struct intel_context *ce,
struct intel_engine_cs *engine,
struct intel_ring *ring);
-static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
-{
- return (i915_ggtt_offset(engine->status_page.vma) +
- I915_GEM_HWS_INDEX_ADDR);
-}
-
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
return rb_entry(rb, struct i915_priolist, node);
@@ -188,6 +183,34 @@ static inline int rq_prio(const struct i915_request *rq)
return rq->sched.attr.priority;
}
+static int effective_prio(const struct i915_request *rq)
+{
+ int prio = rq_prio(rq);
+
+ /*
+ * On unwinding the active request, we give it a priority bump
+ * equivalent to a freshly submitted request. This protects it from
+ * being gazumped again, but it would be preferable if we didn't
+ * let it be gazumped in the first place!
+ *
+ * See __unwind_incomplete_requests()
+ */
+ if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(rq)) {
+ /*
+ * After preemption, we insert the active request at the
+ * end of the new priority level. This means that we will be
+ * _lower_ priority than the preemptee all things equal (and
+ * so the preemption is valid), so adjust our comparison
+ * accordingly.
+ */
+ prio |= ACTIVE_PRIORITY;
+ prio--;
+ }
+
+ /* Restrict mere WAIT boosts from triggering preemption */
+ return prio | __NO_PREEMPTION;
+}
+
static int queue_prio(const struct intel_engine_execlists *execlists)
{
struct i915_priolist *p;
@@ -208,9 +231,9 @@ static int queue_prio(const struct intel_engine_execlists *execlists)
static inline bool need_preempt(const struct intel_engine_cs *engine,
const struct i915_request *rq)
{
- const int last_prio = rq_prio(rq);
+ int last_prio;
- if (!intel_engine_has_preemption(engine))
+ if (!engine->preempt_context)
return false;
if (i915_request_completed(rq))
@@ -228,6 +251,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
* preempt. If that hint is stale or we may be trying to preempt
* ourselves, ignore the request.
*/
+ last_prio = effective_prio(rq);
if (!__execlists_need_preempt(engine->execlists.queue_priority_hint,
last_prio))
return false;
@@ -254,12 +278,11 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
}
__maybe_unused static inline bool
-assert_priority_queue(const struct intel_engine_execlists *execlists,
- const struct i915_request *prev,
+assert_priority_queue(const struct i915_request *prev,
const struct i915_request *next)
{
- if (!prev)
- return true;
+ const struct intel_engine_execlists *execlists =
+ &prev->engine->execlists;
/*
* Without preemption, the prev may refer to the still active element
@@ -300,11 +323,10 @@ assert_priority_queue(const struct intel_engine_execlists *execlists,
* engine info, SW context ID and SW counter need to form a unique number
* (Context ID) per lrc.
*/
-static void
-intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct intel_context *ce)
+static u64
+lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
{
+ struct i915_gem_context *ctx = ce->gem_context;
u64 desc;
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
@@ -322,7 +344,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
* Consider updating oa_get_render_ctx_id in i915_perf.c when changing
* anything below.
*/
- if (INTEL_GEN(ctx->i915) >= 11) {
+ if (INTEL_GEN(engine->i915) >= 11) {
GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
/* bits 37-47 */
@@ -339,7 +361,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
}
- ce->lrc_desc = desc;
+ return desc;
}
static void unwind_wa_tail(struct i915_request *rq)
@@ -353,7 +375,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
{
struct i915_request *rq, *rn, *active = NULL;
struct list_head *uninitialized_var(pl);
- int prio = I915_PRIORITY_INVALID | I915_PRIORITY_NEWCLIENT;
+ int prio = I915_PRIORITY_INVALID | ACTIVE_PRIORITY;
lockdep_assert_held(&engine->timeline.lock);
@@ -384,9 +406,21 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
* The active request is now effectively the start of a new client
* stream, so give it the equivalent small priority bump to prevent
* it being gazumped a second time by another peer.
+ *
+ * Note we have to be careful not to apply a priority boost to a request
+ * still spinning on its semaphores. If the request hasn't started, that
+ * means it is still waiting for its dependencies to be signaled, and
+ * if we apply a priority boost to this request, we will boost it past
+ * its signalers and so break PI.
+ *
+ * One consequence of this preemption boost is that we may jump
+ * over lesser priorities (such as I915_PRIORITY_WAIT), effectively
+ * making those priorities non-preemptible. They will be moved forward
+ * in the priority queue, but they will not gain immediate access to
+ * the GPU.
*/
- if (!(prio & I915_PRIORITY_NEWCLIENT)) {
- prio |= I915_PRIORITY_NEWCLIENT;
+ if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(active)) {
+ prio |= ACTIVE_PRIORITY;
active->sched.attr.priority = prio;
list_move_tail(&active->sched.link,
i915_sched_lookup_priolist(engine, prio));
@@ -395,13 +429,13 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
return active;
}
-void
+struct i915_request *
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
{
struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists);
- __unwind_incomplete_requests(engine);
+ return __unwind_incomplete_requests(engine);
}
static inline void
@@ -523,13 +557,11 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
desc = execlists_update_context(rq);
GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
- GEM_TRACE("%s in[%d]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d:%d), prio=%d\n",
+ GEM_TRACE("%s in[%d]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n",
engine->name, n,
port[n].context_id, count,
- rq->global_seqno,
rq->fence.context, rq->fence.seqno,
hwsp_seqno(rq),
- intel_engine_get_seqno(engine),
rq_prio(rq));
} else {
GEM_BUG_ON(!n);
@@ -564,6 +596,17 @@ static bool can_merge_ctx(const struct intel_context *prev,
return true;
}
+static bool can_merge_rq(const struct i915_request *prev,
+ const struct i915_request *next)
+{
+ GEM_BUG_ON(!assert_priority_queue(prev, next));
+
+ if (!can_merge_ctx(prev->hw_context, next->hw_context))
+ return false;
+
+ return true;
+}
+
static void port_assign(struct execlist_port *port, struct i915_request *rq)
{
GEM_BUG_ON(rq == port_request(port));
@@ -577,8 +620,7 @@ static void port_assign(struct execlist_port *port, struct i915_request *rq)
static void inject_preempt_context(struct intel_engine_cs *engine)
{
struct intel_engine_execlists *execlists = &engine->execlists;
- struct intel_context *ce =
- to_intel_context(engine->i915->preempt_context, engine);
+ struct intel_context *ce = engine->preempt_context;
unsigned int n;
GEM_BUG_ON(execlists->preempt_complete_status !=
@@ -716,8 +758,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
int i;
priolist_for_each_request_consume(rq, rn, p, i) {
- GEM_BUG_ON(!assert_priority_queue(execlists, last, rq));
-
/*
* Can we combine this request with the current port?
* It has to be the same context/ringbuffer and not
@@ -729,8 +769,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* second request, and so we never need to tell the
* hardware about the first.
*/
- if (last &&
- !can_merge_ctx(rq->hw_context, last->hw_context)) {
+ if (last && !can_merge_rq(last, rq)) {
/*
* If we are on the second port and cannot
* combine this request with the last, then we
@@ -740,6 +779,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
goto done;
/*
+ * We must not populate both ELSP[] with the
+ * same LRCA, i.e. we must submit 2 different
+ * contexts if we submit 2 ELSP.
+ */
+ if (last->hw_context == rq->hw_context)
+ goto done;
+
+ /*
* If GVT overrides us we only ever submit
* port[0], leaving port[1] empty. Note that we
* also have to be careful that we don't queue
@@ -750,7 +797,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
ctx_single_port_submission(rq->hw_context))
goto done;
- GEM_BUG_ON(last->hw_context == rq->hw_context);
if (submit)
port_assign(port, last);
@@ -769,8 +815,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
rb_erase_cached(&p->node, &execlists->queue);
- if (p->priority != I915_PRIORITY_NORMAL)
- kmem_cache_free(engine->i915->priorities, p);
+ i915_priolist_free(p);
}
done:
@@ -790,8 +835,7 @@ done:
* request triggering preemption on the next dequeue (or subsequent
* interrupt for secondary ports).
*/
- execlists->queue_priority_hint =
- port != execlists->port ? rq_prio(last) : INT_MIN;
+ execlists->queue_priority_hint = queue_prio(execlists);
if (submit) {
port_assign(port, last);
@@ -821,13 +865,11 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
while (num_ports-- && port_isset(port)) {
struct i915_request *rq = port_request(port);
- GEM_TRACE("%s:port%u global=%d (fence %llx:%lld), (current %d:%d)\n",
+ GEM_TRACE("%s:port%u fence %llx:%lld, (current %d)\n",
rq->engine->name,
(unsigned int)(port - execlists->port),
- rq->global_seqno,
rq->fence.context, rq->fence.seqno,
- hwsp_seqno(rq),
- intel_engine_get_seqno(rq->engine));
+ hwsp_seqno(rq));
GEM_BUG_ON(!execlists->active);
execlists_context_schedule_out(rq,
@@ -851,104 +893,6 @@ invalidate_csb_entries(const u32 *first, const u32 *last)
clflush((void *)last);
}
-static void reset_csb_pointers(struct intel_engine_execlists *execlists)
-{
- const unsigned int reset_value = GEN8_CSB_ENTRIES - 1;
-
- /*
- * After a reset, the HW starts writing into CSB entry [0]. We
- * therefore have to set our HEAD pointer back one entry so that
- * the *first* entry we check is entry 0. To complicate this further,
- * as we don't wait for the first interrupt after reset, we have to
- * fake the HW write to point back to the last entry so that our
- * inline comparison of our cached head position against the last HW
- * write works even before the first interrupt.
- */
- execlists->csb_head = reset_value;
- WRITE_ONCE(*execlists->csb_write, reset_value);
-
- invalidate_csb_entries(&execlists->csb_status[0],
- &execlists->csb_status[GEN8_CSB_ENTRIES - 1]);
-}
-
-static void nop_submission_tasklet(unsigned long data)
-{
- /* The driver is wedged; don't process any more events. */
-}
-
-static void execlists_cancel_requests(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_request *rq, *rn;
- struct rb_node *rb;
- unsigned long flags;
-
- GEM_TRACE("%s current %d\n",
- engine->name, intel_engine_get_seqno(engine));
-
- /*
- * Before we call engine->cancel_requests(), we should have exclusive
- * access to the submission state. This is arranged for us by the
- * caller disabling the interrupt generation, the tasklet and other
- * threads that may then access the same state, giving us a free hand
- * to reset state. However, we still need to let lockdep be aware that
- * we know this state may be accessed in hardirq context, so we
- * disable the irq around this manipulation and we want to keep
- * the spinlock focused on its duties and not accidentally conflate
- * coverage to the submission's irq state. (Similarly, although we
- * shouldn't need to disable irq around the manipulation of the
- * submission's irq state, we also wish to remind ourselves that
- * it is irq state.)
- */
- spin_lock_irqsave(&engine->timeline.lock, flags);
-
- /* Cancel the requests on the HW and clear the ELSP tracker. */
- execlists_cancel_port_requests(execlists);
- execlists_user_end(execlists);
-
- /* Mark all executing requests as skipped. */
- list_for_each_entry(rq, &engine->timeline.requests, link) {
- GEM_BUG_ON(!rq->global_seqno);
-
- if (!i915_request_signaled(rq))
- dma_fence_set_error(&rq->fence, -EIO);
-
- i915_request_mark_complete(rq);
- }
-
- /* Flush the queued requests to the timeline list (for retiring). */
- while ((rb = rb_first_cached(&execlists->queue))) {
- struct i915_priolist *p = to_priolist(rb);
- int i;
-
- priolist_for_each_request_consume(rq, rn, p, i) {
- list_del_init(&rq->sched.link);
- __i915_request_submit(rq);
- dma_fence_set_error(&rq->fence, -EIO);
- i915_request_mark_complete(rq);
- }
-
- rb_erase_cached(&p->node, &execlists->queue);
- if (p->priority != I915_PRIORITY_NORMAL)
- kmem_cache_free(engine->i915->priorities, p);
- }
-
- intel_write_status_page(engine,
- I915_GEM_HWS_INDEX,
- intel_engine_last_submit(engine));
-
- /* Remaining _unready_ requests will be nop'ed when submitted */
-
- execlists->queue_priority_hint = INT_MIN;
- execlists->queue = RB_ROOT_CACHED;
- GEM_BUG_ON(port_isset(execlists->port));
-
- GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
- execlists->tasklet.func = nop_submission_tasklet;
-
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
-}
-
static inline bool
reset_in_progress(const struct intel_engine_execlists *execlists)
{
@@ -960,6 +904,7 @@ static void process_csb(struct intel_engine_cs *engine)
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
const u32 * const buf = execlists->csb_status;
+ const u8 num_entries = execlists->csb_size;
u8 head, tail;
lockdep_assert_held(&engine->timeline.lock);
@@ -995,7 +940,7 @@ static void process_csb(struct intel_engine_cs *engine)
unsigned int status;
unsigned int count;
- if (++head == GEN8_CSB_ENTRIES)
+ if (++head == num_entries)
head = 0;
/*
@@ -1052,14 +997,12 @@ static void process_csb(struct intel_engine_cs *engine)
EXECLISTS_ACTIVE_USER));
rq = port_unpack(port, &count);
- GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d:%d), prio=%d\n",
+ GEM_TRACE("%s out[0]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n",
engine->name,
port->context_id, count,
- rq ? rq->global_seqno : 0,
rq ? rq->fence.context : 0,
rq ? rq->fence.seqno : 0,
rq ? hwsp_seqno(rq) : 0,
- intel_engine_get_seqno(engine),
rq ? rq_prio(rq) : 0);
/* Check the context/desc id for this event matches */
@@ -1119,7 +1062,7 @@ static void process_csb(struct intel_engine_cs *engine)
* the wash as hardware, working or not, will need to do the
* invalidation before.
*/
- invalidate_csb_entries(&buf[0], &buf[GEN8_CSB_ENTRIES - 1]);
+ invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
}
static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
@@ -1196,19 +1139,50 @@ static void execlists_submit_request(struct i915_request *request)
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
-static void execlists_context_destroy(struct intel_context *ce)
+static void __execlists_context_fini(struct intel_context *ce)
{
- GEM_BUG_ON(ce->pin_count);
-
- if (!ce->state)
- return;
-
- intel_ring_free(ce->ring);
+ intel_ring_put(ce->ring);
GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
i915_gem_object_put(ce->state->obj);
}
+static void execlists_context_destroy(struct kref *kref)
+{
+ struct intel_context *ce = container_of(kref, typeof(*ce), ref);
+
+ GEM_BUG_ON(intel_context_is_pinned(ce));
+
+ if (ce->state)
+ __execlists_context_fini(ce);
+
+ intel_context_free(ce);
+}
+
+static int __context_pin(struct i915_vma *vma)
+{
+ unsigned int flags;
+ int err;
+
+ flags = PIN_GLOBAL | PIN_HIGH;
+ flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err)
+ return err;
+
+ vma->obj->pin_global++;
+ vma->obj->mm.dirty = true;
+
+ return 0;
+}
+
+static void __context_unpin(struct i915_vma *vma)
+{
+ vma->obj->pin_global--;
+ __i915_vma_unpin(vma);
+}
+
static void execlists_context_unpin(struct intel_context *ce)
{
struct intel_engine_cs *engine;
@@ -1237,41 +1211,19 @@ static void execlists_context_unpin(struct intel_context *ce)
intel_ring_unpin(ce->ring);
- ce->state->obj->pin_global--;
i915_gem_object_unpin_map(ce->state->obj);
- i915_vma_unpin(ce->state);
-
- i915_gem_context_put(ce->gem_context);
-}
-
-static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
-{
- unsigned int flags;
- int err;
-
- /*
- * Clear this page out of any CPU caches for coherent swap-in/out.
- * We only want to do this on the first bind so that we do not stall
- * on an active context (which by nature is already on the GPU).
- */
- if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
- err = i915_gem_object_set_to_wc_domain(vma->obj, true);
- if (err)
- return err;
- }
-
- flags = PIN_GLOBAL | PIN_HIGH;
- flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
-
- return i915_vma_pin(vma, 0, 0, flags);
+ __context_unpin(ce->state);
}
static void
-__execlists_update_reg_state(struct intel_engine_cs *engine,
- struct intel_context *ce)
+__execlists_update_reg_state(struct intel_context *ce,
+ struct intel_engine_cs *engine)
{
- u32 *regs = ce->lrc_reg_state;
struct intel_ring *ring = ce->ring;
+ u32 *regs = ce->lrc_reg_state;
+
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma);
regs[CTX_RING_HEAD + 1] = ring->head;
@@ -1279,29 +1231,30 @@ __execlists_update_reg_state(struct intel_engine_cs *engine,
/* RPCS */
if (engine->class == RENDER_CLASS)
- regs[CTX_R_PWR_CLK_STATE + 1] = gen8_make_rpcs(engine->i915,
- &ce->sseu);
+ regs[CTX_R_PWR_CLK_STATE + 1] =
+ gen8_make_rpcs(engine->i915, &ce->sseu);
}
-static struct intel_context *
-__execlists_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx,
- struct intel_context *ce)
+static int
+__execlists_context_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine)
{
void *vaddr;
int ret;
- ret = execlists_context_deferred_alloc(ctx, engine, ce);
+ GEM_BUG_ON(!ce->gem_context->ppgtt);
+
+ ret = execlists_context_deferred_alloc(ce, engine);
if (ret)
goto err;
GEM_BUG_ON(!ce->state);
- ret = __context_pin(ctx, ce->state);
+ ret = __context_pin(ce->state);
if (ret)
goto err;
vaddr = i915_gem_object_pin_map(ce->state->obj,
- i915_coherent_map_type(ctx->i915) |
+ i915_coherent_map_type(engine->i915) |
I915_MAP_OVERRIDE);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
@@ -1312,55 +1265,60 @@ __execlists_context_pin(struct intel_engine_cs *engine,
if (ret)
goto unpin_map;
- ret = i915_gem_context_pin_hw_id(ctx);
+ ret = i915_gem_context_pin_hw_id(ce->gem_context);
if (ret)
goto unpin_ring;
- intel_lr_context_descriptor_update(ctx, engine, ce);
-
- GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
-
+ ce->lrc_desc = lrc_descriptor(ce, engine);
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+ __execlists_update_reg_state(ce, engine);
- __execlists_update_reg_state(engine, ce);
-
- ce->state->obj->pin_global++;
- i915_gem_context_get(ctx);
- return ce;
+ return 0;
unpin_ring:
intel_ring_unpin(ce->ring);
unpin_map:
i915_gem_object_unpin_map(ce->state->obj);
unpin_vma:
- __i915_vma_unpin(ce->state);
+ __context_unpin(ce->state);
err:
- ce->pin_count = 0;
- return ERR_PTR(ret);
+ return ret;
}
-static const struct intel_context_ops execlists_context_ops = {
- .unpin = execlists_context_unpin,
- .destroy = execlists_context_destroy,
-};
-
-static struct intel_context *
-execlists_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static int execlists_context_pin(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
-
- lockdep_assert_held(&ctx->i915->drm.struct_mutex);
- GEM_BUG_ON(!ctx->ppgtt);
+ return __execlists_context_pin(ce, ce->engine);
+}
- if (likely(ce->pin_count++))
- return ce;
- GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
+static void execlists_context_reset(struct intel_context *ce)
+{
+ /*
+ * Because we emit WA_TAIL_DWORDS there may be a disparity
+ * between our bookkeeping in ce->ring->head and ce->ring->tail and
+ * that stored in context. As we only write new commands from
+ * ce->ring->tail onwards, everything before that is junk. If the GPU
+ * starts reading from its RING_HEAD from the context, it may try to
+ * execute that junk and die.
+ *
+ * The contexts that are stilled pinned on resume belong to the
+ * kernel, and are local to each engine. All other contexts will
+ * have their head/tail sanitized upon pinning before use, so they
+ * will never see garbage,
+ *
+ * So to avoid that we reset the context images upon resume. For
+ * simplicity, we just zero everything out.
+ */
+ intel_ring_reset(ce->ring, 0);
+ __execlists_update_reg_state(ce, ce->engine);
+}
- ce->ops = &execlists_context_ops;
+static const struct intel_context_ops execlists_context_ops = {
+ .pin = execlists_context_pin,
+ .unpin = execlists_context_unpin,
- return __execlists_context_pin(engine, ctx, ce);
-}
+ .reset = execlists_context_reset,
+ .destroy = execlists_context_destroy,
+};
static int gen8_emit_init_breadcrumb(struct i915_request *rq)
{
@@ -1387,6 +1345,10 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
*cs++ = rq->fence.seqno - 1;
intel_ring_advance(rq, cs);
+
+ /* Record the updated position of the request's payload */
+ rq->infix = intel_ring_offset(rq, cs);
+
return 0;
}
@@ -1424,10 +1386,11 @@ static int emit_pdps(struct i915_request *rq)
*cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
for (i = GEN8_3LVL_PDPES; i--; ) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+ u32 base = engine->mmio_base;
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
*cs++ = upper_32_bits(pd_daddr);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
*cs++ = lower_32_bits(pd_daddr);
}
*cs++ = MI_NOOP;
@@ -1447,7 +1410,7 @@ static int execlists_request_alloc(struct i915_request *request)
{
int ret;
- GEM_BUG_ON(!request->hw_context->pin_count);
+ GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
/*
* Flush enough space to reduce the likelihood of waiting after
@@ -1465,7 +1428,7 @@ static int execlists_request_alloc(struct i915_request *request)
*/
/* Unconditionally invalidate GPU caches and TLBs. */
- if (i915_vm_is_48bit(&request->gem_context->ppgtt->vm))
+ if (i915_vm_is_4lvl(&request->gem_context->ppgtt->vm))
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
else
ret = emit_pdps(request);
@@ -1732,7 +1695,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
unsigned int i;
int ret;
- if (GEM_DEBUG_WARN_ON(engine->id != RCS))
+ if (GEM_DEBUG_WARN_ON(engine->id != RCS0))
return -EINVAL;
switch (INTEL_GEN(engine->i915)) {
@@ -1796,17 +1759,9 @@ static void enable_execlists(struct intel_engine_cs *engine)
intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
- /*
- * Make sure we're not enabling the new 12-deep CSB
- * FIFO as that requires a slightly updated handling
- * in the ctx switch irq. Since we're currently only
- * using only 2 elements of the enhanced execlists the
- * deeper FIFO it's not needed and it's not worth adding
- * more statements to the irq handler to support it.
- */
if (INTEL_GEN(dev_priv) >= 11)
I915_WRITE(RING_MODE_GEN7(engine),
- _MASKED_BIT_DISABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
+ _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
else
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
@@ -1872,20 +1827,72 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
__tasklet_disable_sync_once(&execlists->tasklet);
GEM_BUG_ON(!reset_in_progress(execlists));
+ intel_engine_stop_cs(engine);
+
/* And flush any current direct submission. */
spin_lock_irqsave(&engine->timeline.lock, flags);
- process_csb(engine); /* drain preemption events */
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
-static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
+static bool lrc_regs_ok(const struct i915_request *rq)
+{
+ const struct intel_ring *ring = rq->ring;
+ const u32 *regs = rq->hw_context->lrc_reg_state;
+
+ /* Quick spot check for the common signs of context corruption */
+
+ if (regs[CTX_RING_BUFFER_CONTROL + 1] !=
+ (RING_CTL_SIZE(ring->size) | RING_VALID))
+ return false;
+
+ if (regs[CTX_RING_BUFFER_START + 1] != i915_ggtt_offset(ring->vma))
+ return false;
+
+ return true;
+}
+
+static void reset_csb_pointers(struct intel_engine_execlists *execlists)
+{
+ const unsigned int reset_value = execlists->csb_size - 1;
+
+ /*
+ * After a reset, the HW starts writing into CSB entry [0]. We
+ * therefore have to set our HEAD pointer back one entry so that
+ * the *first* entry we check is entry 0. To complicate this further,
+ * as we don't wait for the first interrupt after reset, we have to
+ * fake the HW write to point back to the last entry so that our
+ * inline comparison of our cached head position against the last HW
+ * write works even before the first interrupt.
+ */
+ execlists->csb_head = reset_value;
+ WRITE_ONCE(*execlists->csb_write, reset_value);
+ wmb(); /* Make sure this is visible to HW (paranoia?) */
+
+ invalidate_csb_entries(&execlists->csb_status[0],
+ &execlists->csb_status[reset_value]);
+}
+
+static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct intel_context *ce;
struct i915_request *rq;
- unsigned long flags;
u32 *regs;
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ process_csb(engine); /* drain preemption events */
+
+ /* Following the reset, we need to reload the CSB read/write pointers */
+ reset_csb_pointers(&engine->execlists);
+
+ /*
+ * Save the currently executing context, even if we completed
+ * its request, it was still running at the time of the
+ * reset and will have been clobbered.
+ */
+ if (!port_isset(execlists->port))
+ goto out_clear;
+
+ ce = port_request(execlists->port)->hw_context;
/*
* Catch up with any missed context-switch interrupts.
@@ -1900,17 +1907,28 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
/* Push back any incomplete requests for replay after the reset. */
rq = __unwind_incomplete_requests(engine);
+ if (!rq)
+ goto out_replay;
- /* Following the reset, we need to reload the CSB read/write pointers */
- reset_csb_pointers(&engine->execlists);
+ if (rq->hw_context != ce) { /* caught just before a CS event */
+ rq = NULL;
+ goto out_replay;
+ }
- GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n",
- engine->name,
- rq ? rq->global_seqno : 0,
- intel_engine_get_seqno(engine),
- yesno(stalled));
- if (!rq)
- goto out_unlock;
+ /*
+ * If this request hasn't started yet, e.g. it is waiting on a
+ * semaphore, we need to avoid skipping the request or else we
+ * break the signaling chain. However, if the context is corrupt
+ * the request will not restart and we will be stuck with a wedged
+ * device. It is quite often the case that if we issue a reset
+ * while the GPU is loading the context image, that the context
+ * image becomes corrupt.
+ *
+ * Otherwise, if we have not started yet, the request should replay
+ * perfectly and we do not need to flag the result as being erroneous.
+ */
+ if (!i915_request_started(rq) && lrc_regs_ok(rq))
+ goto out_replay;
/*
* If the request was innocent, we leave the request in the ELSP
@@ -1924,8 +1942,8 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
* image back to the expected values to skip over the guilty request.
*/
i915_reset_request(rq, stalled);
- if (!stalled)
- goto out_unlock;
+ if (!stalled && lrc_regs_ok(rq))
+ goto out_replay;
/*
* We want a simple context + ring to execute the breadcrumb update.
@@ -1935,21 +1953,103 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
- regs = rq->hw_context->lrc_reg_state;
+ regs = ce->lrc_reg_state;
if (engine->pinned_default_state) {
memcpy(regs, /* skip restoring the vanilla PPHWSP */
engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
engine->context_size - PAGE_SIZE);
}
+ execlists_init_reg_state(regs, ce, engine, ce->ring);
- /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
- rq->ring->head = intel_ring_wrap(rq->ring, rq->postfix);
- intel_ring_update_space(rq->ring);
+ /* Rerun the request; its payload has been neutered (if guilty). */
+out_replay:
+ ce->ring->head =
+ rq ? intel_ring_wrap(ce->ring, rq->head) : ce->ring->tail;
+ intel_ring_update_space(ce->ring);
+ __execlists_update_reg_state(ce, engine);
+
+out_clear:
+ execlists_clear_all_active(execlists);
+}
+
+static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
+{
+ unsigned long flags;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
- execlists_init_reg_state(regs, rq->gem_context, engine, rq->ring);
- __execlists_update_reg_state(engine, rq->hw_context);
+ __execlists_reset(engine, stalled);
+
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+}
+
+static void nop_submission_tasklet(unsigned long data)
+{
+ /* The driver is wedged; don't process any more events. */
+}
+
+static void execlists_cancel_requests(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *rq, *rn;
+ struct rb_node *rb;
+ unsigned long flags;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ /*
+ * Before we call engine->cancel_requests(), we should have exclusive
+ * access to the submission state. This is arranged for us by the
+ * caller disabling the interrupt generation, the tasklet and other
+ * threads that may then access the same state, giving us a free hand
+ * to reset state. However, we still need to let lockdep be aware that
+ * we know this state may be accessed in hardirq context, so we
+ * disable the irq around this manipulation and we want to keep
+ * the spinlock focused on its duties and not accidentally conflate
+ * coverage to the submission's irq state. (Similarly, although we
+ * shouldn't need to disable irq around the manipulation of the
+ * submission's irq state, we also wish to remind ourselves that
+ * it is irq state.)
+ */
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+
+ __execlists_reset(engine, true);
+
+ /* Mark all executing requests as skipped. */
+ list_for_each_entry(rq, &engine->timeline.requests, link) {
+ if (!i915_request_signaled(rq))
+ dma_fence_set_error(&rq->fence, -EIO);
+
+ i915_request_mark_complete(rq);
+ }
+
+ /* Flush the queued requests to the timeline list (for retiring). */
+ while ((rb = rb_first_cached(&execlists->queue))) {
+ struct i915_priolist *p = to_priolist(rb);
+ int i;
+
+ priolist_for_each_request_consume(rq, rn, p, i) {
+ list_del_init(&rq->sched.link);
+ __i915_request_submit(rq);
+ dma_fence_set_error(&rq->fence, -EIO);
+ i915_request_mark_complete(rq);
+ }
+
+ rb_erase_cached(&p->node, &execlists->queue);
+ i915_priolist_free(p);
+ }
+
+ /* Remaining _unready_ requests will be nop'ed when submitted */
+
+ execlists->queue_priority_hint = INT_MIN;
+ execlists->queue = RB_ROOT_CACHED;
+ GEM_BUG_ON(port_isset(execlists->port));
+
+ GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
+ execlists->tasklet.func = nop_submission_tasklet;
-out_unlock:
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
@@ -1961,13 +2061,14 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
* After a GPU reset, we may have requests to replay. Do so now while
* we still have the forcewake to be sure that the GPU is not allowed
* to sleep before we restart and reload a context.
- *
*/
GEM_BUG_ON(!reset_in_progress(execlists));
if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
execlists->tasklet.func(execlists->tasklet.data);
- tasklet_enable(&execlists->tasklet);
+ if (__tasklet_enable(&execlists->tasklet))
+ /* And kick in case we missed a new request submission. */
+ tasklet_hi_schedule(&execlists->tasklet);
GEM_TRACE("%s: depth->%d\n", engine->name,
atomic_read(&execlists->tasklet.count));
}
@@ -1978,7 +2079,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
{
u32 *cs;
- cs = intel_ring_begin(rq, 6);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1989,19 +2090,37 @@ static int gen8_emit_bb_start(struct i915_request *rq,
* particular all the gen that do not need the w/a at all!), if we
* took care to make sure that on every switch into this context
* (both ordinary and for preemption) that arbitrartion was enabled
- * we would be fine. However, there doesn't seem to be a downside to
- * being paranoid and making sure it is set before each batch and
- * every context-switch.
- *
- * Note that if we fail to enable arbitration before the request
- * is complete, then we do not see the context-switch interrupt and
- * the engine hangs (with RING_HEAD == RING_TAIL).
- *
- * That satisfies both the GPGPU w/a and our heavy-handed paranoia.
+ * we would be fine. However, for gen8 there is another w/a that
+ * requires us to not preempt inside GPGPU execution, so we keep
+ * arbitration disabled for gen8 batches. Arbitration will be
+ * re-enabled before we close the request
+ * (engine->emit_fini_breadcrumb).
*/
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+ /* FIXME(BDW+): Address space and security selectors. */
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 |
+ (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int gen9_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
- /* FIXME(BDW): Address space and security selectors. */
*cs++ = MI_BATCH_BUFFER_START_GEN8 |
(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
*cs++ = lower_32_bits(offset);
@@ -2017,16 +2136,14 @@ static int gen8_emit_bb_start(struct i915_request *rq,
static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
- I915_WRITE_IMR(engine,
- ~(engine->irq_enable_mask | engine->irq_keep_mask));
- POSTING_READ_FW(RING_IMR(engine->mmio_base));
+ ENGINE_WRITE(engine, RING_IMR,
+ ~(engine->irq_enable_mask | engine->irq_keep_mask));
+ ENGINE_POSTING_READ(engine, RING_IMR);
}
static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
- I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
+ ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
}
static int gen8_emit_flush(struct i915_request *request, u32 mode)
@@ -2148,16 +2265,16 @@ static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
{
- /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
- BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
-
cs = gen8_emit_ggtt_write(cs,
request->fence.seqno,
- request->timeline->hwsp_offset);
+ request->timeline->hwsp_offset,
+ 0);
cs = gen8_emit_ggtt_write(cs,
- request->global_seqno,
- intel_hws_seqno_address(request->engine));
+ intel_engine_next_hangcheck_seqno(request->engine),
+ I915_GEM_HWS_HANGCHECK_ADDR,
+ MI_FLUSH_DW_STORE_INDEX);
+
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -2180,9 +2297,9 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
PIPE_CONTROL_CS_STALL);
cs = gen8_emit_ggtt_write_rcs(cs,
- request->global_seqno,
- intel_hws_seqno_address(request->engine),
- PIPE_CONTROL_CS_STALL);
+ intel_engine_next_hangcheck_seqno(request->engine),
+ I915_GEM_HWS_HANGCHECK_ADDR,
+ PIPE_CONTROL_STORE_DATA_INDEX);
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -2231,7 +2348,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
dev_priv = engine->i915;
if (engine->buffer) {
- WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
+ WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
}
if (engine->cleanup)
@@ -2254,19 +2371,18 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
engine->execlists.tasklet.func = execlists_submission_tasklet;
engine->reset.prepare = execlists_reset_prepare;
+ engine->reset.reset = execlists_reset;
+ engine->reset.finish = execlists_reset_finish;
engine->park = NULL;
engine->unpark = NULL;
engine->flags |= I915_ENGINE_SUPPORTS_STATS;
- if (engine->i915->preempt_context)
+ if (!intel_vgpu_active(engine->i915))
+ engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
+ if (engine->preempt_context &&
+ HAS_LOGICAL_RING_PREEMPTION(engine->i915))
engine->flags |= I915_ENGINE_HAS_PREEMPTION;
-
- engine->i915->caps.scheduler =
- I915_SCHEDULER_CAP_ENABLED |
- I915_SCHEDULER_CAP_PRIORITY;
- if (intel_engine_has_preemption(engine))
- engine->i915->caps.scheduler |= I915_SCHEDULER_CAP_PREEMPTION;
}
static void
@@ -2279,7 +2395,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->reset.reset = execlists_reset;
engine->reset.finish = execlists_reset_finish;
- engine->context_pin = execlists_context_pin;
+ engine->cops = &execlists_context_ops;
engine->request_alloc = execlists_request_alloc;
engine->emit_flush = gen8_emit_flush;
@@ -2299,7 +2415,10 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
* until a more refined solution exists.
*/
}
- engine->emit_bb_start = gen8_emit_bb_start;
+ if (IS_GEN(engine->i915, 8))
+ engine->emit_bb_start = gen8_emit_bb_start;
+ else
+ engine->emit_bb_start = gen9_emit_bb_start;
}
static inline void
@@ -2309,11 +2428,11 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
if (INTEL_GEN(engine->i915) < 11) {
const u8 irq_shifts[] = {
- [RCS] = GEN8_RCS_IRQ_SHIFT,
- [BCS] = GEN8_BCS_IRQ_SHIFT,
- [VCS] = GEN8_VCS1_IRQ_SHIFT,
- [VCS2] = GEN8_VCS2_IRQ_SHIFT,
- [VECS] = GEN8_VECS_IRQ_SHIFT,
+ [RCS0] = GEN8_RCS_IRQ_SHIFT,
+ [BCS0] = GEN8_BCS_IRQ_SHIFT,
+ [VCS0] = GEN8_VCS0_IRQ_SHIFT,
+ [VCS1] = GEN8_VCS1_IRQ_SHIFT,
+ [VECS0] = GEN8_VECS_IRQ_SHIFT,
};
shift = irq_shifts[engine->id];
@@ -2348,6 +2467,7 @@ static int logical_ring_init(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
struct intel_engine_execlists * const execlists = &engine->execlists;
+ u32 base = engine->mmio_base;
int ret;
ret = intel_engine_init_common(engine);
@@ -2357,23 +2477,19 @@ static int logical_ring_init(struct intel_engine_cs *engine)
intel_engine_init_workarounds(engine);
if (HAS_LOGICAL_RING_ELSQ(i915)) {
- execlists->submit_reg = i915->regs +
- i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
- execlists->ctrl_reg = i915->regs +
- i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine));
+ execlists->submit_reg = i915->uncore.regs +
+ i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
+ execlists->ctrl_reg = i915->uncore.regs +
+ i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
} else {
- execlists->submit_reg = i915->regs +
- i915_mmio_reg_offset(RING_ELSP(engine));
+ execlists->submit_reg = i915->uncore.regs +
+ i915_mmio_reg_offset(RING_ELSP(base));
}
execlists->preempt_complete_status = ~0u;
- if (i915->preempt_context) {
- struct intel_context *ce =
- to_intel_context(i915->preempt_context, engine);
-
+ if (engine->preempt_context)
execlists->preempt_complete_status =
- upper_32_bits(ce->lrc_desc);
- }
+ upper_32_bits(engine->preempt_context->lrc_desc);
execlists->csb_status =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
@@ -2381,6 +2497,11 @@ static int logical_ring_init(struct intel_engine_cs *engine)
execlists->csb_write =
&engine->status_page.addr[intel_hws_csb_write_index(i915)];
+ if (INTEL_GEN(engine->i915) < 11)
+ execlists->csb_size = GEN8_CSB_ENTRIES;
+ else
+ execlists->csb_size = GEN11_CSB_ENTRIES;
+
reset_csb_pointers(execlists);
return 0;
@@ -2592,13 +2713,13 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
}
static void execlists_init_reg_state(u32 *regs,
- struct i915_gem_context *ctx,
+ struct intel_context *ce,
struct intel_engine_cs *engine,
struct intel_ring *ring)
{
- struct drm_i915_private *dev_priv = engine->i915;
- u32 base = engine->mmio_base;
+ struct i915_hw_ppgtt *ppgtt = ce->gem_context->ppgtt;
bool rcs = engine->class == RENDER_CLASS;
+ u32 base = engine->mmio_base;
/* A context is actually a big batch buffer with several
* MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
@@ -2610,10 +2731,10 @@ static void execlists_init_reg_state(u32 *regs,
regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) |
MI_LRI_FORCE_POSTED;
- CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine),
+ CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(base),
_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH));
- if (INTEL_GEN(dev_priv) < 11) {
+ if (INTEL_GEN(engine->i915) < 11) {
regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
CTX_CTRL_RS_CTX_ENABLE);
@@ -2659,42 +2780,42 @@ static void execlists_init_reg_state(u32 *regs,
CTX_REG(regs, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(base), 0);
/* PDP values well be assigned later if needed */
- CTX_REG(regs, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3), 0);
- CTX_REG(regs, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3), 0);
- CTX_REG(regs, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2), 0);
- CTX_REG(regs, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2), 0);
- CTX_REG(regs, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1), 0);
- CTX_REG(regs, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1), 0);
- CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
- CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
-
- if (i915_vm_is_48bit(&ctx->ppgtt->vm)) {
+ CTX_REG(regs, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(base, 3), 0);
+ CTX_REG(regs, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(base, 3), 0);
+ CTX_REG(regs, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(base, 2), 0);
+ CTX_REG(regs, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(base, 2), 0);
+ CTX_REG(regs, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(base, 1), 0);
+ CTX_REG(regs, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(base, 1), 0);
+ CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(base, 0), 0);
+ CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(base, 0), 0);
+
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
/* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and
* other PDP Descriptors are ignored.
*/
- ASSIGN_CTX_PML4(ctx->ppgtt, regs);
+ ASSIGN_CTX_PML4(ppgtt, regs);
} else {
- ASSIGN_CTX_PDP(ctx->ppgtt, regs, 3);
- ASSIGN_CTX_PDP(ctx->ppgtt, regs, 2);
- ASSIGN_CTX_PDP(ctx->ppgtt, regs, 1);
- ASSIGN_CTX_PDP(ctx->ppgtt, regs, 0);
+ ASSIGN_CTX_PDP(ppgtt, regs, 3);
+ ASSIGN_CTX_PDP(ppgtt, regs, 2);
+ ASSIGN_CTX_PDP(ppgtt, regs, 1);
+ ASSIGN_CTX_PDP(ppgtt, regs, 0);
}
if (rcs) {
regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0);
- i915_oa_init_reg_state(engine, ctx, regs);
+ i915_oa_init_reg_state(engine, ce, regs);
}
regs[CTX_END] = MI_BATCH_BUFFER_END;
- if (INTEL_GEN(dev_priv) >= 10)
+ if (INTEL_GEN(engine->i915) >= 10)
regs[CTX_END] |= BIT(0);
}
static int
-populate_lr_context(struct i915_gem_context *ctx,
+populate_lr_context(struct intel_context *ce,
struct drm_i915_gem_object *ctx_obj,
struct intel_engine_cs *engine,
struct intel_ring *ring)
@@ -2703,19 +2824,12 @@ populate_lr_context(struct i915_gem_context *ctx,
u32 *regs;
int ret;
- ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
- if (ret) {
- DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
- return ret;
- }
-
vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
return ret;
}
- ctx_obj->mm.dirty = true;
if (engine->default_state) {
/*
@@ -2740,23 +2854,35 @@ populate_lr_context(struct i915_gem_context *ctx,
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
- execlists_init_reg_state(regs, ctx, engine, ring);
+ execlists_init_reg_state(regs, ce, engine, ring);
if (!engine->default_state)
regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
- if (ctx == ctx->i915->preempt_context && INTEL_GEN(engine->i915) < 11)
+ if (ce->gem_context == engine->i915->preempt_context &&
+ INTEL_GEN(engine->i915) < 11)
regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
+ ret = 0;
err_unpin_ctx:
+ __i915_gem_object_flush_map(ctx_obj,
+ LRC_HEADER_PAGES * PAGE_SIZE,
+ engine->context_size);
i915_gem_object_unpin_map(ctx_obj);
return ret;
}
-static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct intel_context *ce)
+static struct i915_timeline *get_timeline(struct i915_gem_context *ctx)
+{
+ if (ctx->timeline)
+ return i915_timeline_get(ctx->timeline);
+ else
+ return i915_timeline_create(ctx->i915, NULL);
+}
+
+static int execlists_context_deferred_alloc(struct intel_context *ce,
+ struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *ctx_obj;
struct i915_vma *vma;
@@ -2776,30 +2902,32 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
*/
context_size += LRC_HEADER_PAGES * PAGE_SIZE;
- ctx_obj = i915_gem_object_create(ctx->i915, context_size);
+ ctx_obj = i915_gem_object_create(engine->i915, context_size);
if (IS_ERR(ctx_obj))
return PTR_ERR(ctx_obj);
- vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(ctx_obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto error_deref_obj;
}
- timeline = i915_timeline_create(ctx->i915, ctx->name, NULL);
+ timeline = get_timeline(ce->gem_context);
if (IS_ERR(timeline)) {
ret = PTR_ERR(timeline);
goto error_deref_obj;
}
- ring = intel_engine_create_ring(engine, timeline, ctx->ring_size);
+ ring = intel_engine_create_ring(engine,
+ timeline,
+ ce->gem_context->ring_size);
i915_timeline_put(timeline);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
goto error_deref_obj;
}
- ret = populate_lr_context(ctx, ctx_obj, engine, ring);
+ ret = populate_lr_context(ce, ctx_obj, engine, ring);
if (ret) {
DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
goto error_ring_free;
@@ -2811,45 +2939,12 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
return 0;
error_ring_free:
- intel_ring_free(ring);
+ intel_ring_put(ring);
error_deref_obj:
i915_gem_object_put(ctx_obj);
return ret;
}
-void intel_lr_context_resume(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
- enum intel_engine_id id;
-
- /*
- * Because we emit WA_TAIL_DWORDS there may be a disparity
- * between our bookkeeping in ce->ring->head and ce->ring->tail and
- * that stored in context. As we only write new commands from
- * ce->ring->tail onwards, everything before that is junk. If the GPU
- * starts reading from its RING_HEAD from the context, it may try to
- * execute that junk and die.
- *
- * So to avoid that we reset the context images upon resume. For
- * simplicity, we just zero everything out.
- */
- list_for_each_entry(ctx, &i915->contexts.list, link) {
- for_each_engine(engine, i915, id) {
- struct intel_context *ce =
- to_intel_context(ctx, engine);
-
- if (!ce->state)
- continue;
-
- intel_ring_reset(ce->ring, 0);
-
- if (ce->pin_count) /* otherwise done in context_pin */
- __execlists_update_reg_state(engine, ce);
- }
- }
-}
-
void intel_execlists_show_requests(struct intel_engine_cs *engine,
struct drm_printer *m,
void (*show_request)(struct drm_printer *m,
@@ -2910,6 +3005,37 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
+void intel_lr_context_reset(struct intel_engine_cs *engine,
+ struct intel_context *ce,
+ u32 head,
+ bool scrub)
+{
+ /*
+ * We want a simple context + ring to execute the breadcrumb update.
+ * We cannot rely on the context being intact across the GPU hang,
+ * so clear it and rebuild just what we need for the breadcrumb.
+ * All pending requests for this context will be zapped, and any
+ * future request will be after userspace has had the opportunity
+ * to recreate its own state.
+ */
+ if (scrub) {
+ u32 *regs = ce->lrc_reg_state;
+
+ if (engine->pinned_default_state) {
+ memcpy(regs, /* skip restoring the vanilla PPHWSP */
+ engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
+ engine->context_size - PAGE_SIZE);
+ }
+ execlists_init_reg_state(regs, ce, engine, ce->ring);
+ }
+
+ /* Rerun the request; its payload has been neutered (if guilty). */
+ ce->ring->head = head;
+ intel_ring_update_space(ce->ring);
+
+ __execlists_update_reg_state(ce, engine);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/intel_lrc.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index f1aec8a6986f..84aa230ea27b 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -28,20 +28,18 @@
#include "i915_gem_context.h"
/* Execlists regs */
-#define RING_ELSP(engine) _MMIO((engine)->mmio_base + 0x230)
-#define RING_EXECLIST_STATUS_LO(engine) _MMIO((engine)->mmio_base + 0x234)
-#define RING_EXECLIST_STATUS_HI(engine) _MMIO((engine)->mmio_base + 0x234 + 4)
-#define RING_CONTEXT_CONTROL(engine) _MMIO((engine)->mmio_base + 0x244)
+#define RING_ELSP(base) _MMIO((base) + 0x230)
+#define RING_EXECLIST_STATUS_LO(base) _MMIO((base) + 0x234)
+#define RING_EXECLIST_STATUS_HI(base) _MMIO((base) + 0x234 + 4)
+#define RING_CONTEXT_CONTROL(base) _MMIO((base) + 0x244)
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
-#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
+#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT (1 << 2)
-#define RING_CONTEXT_STATUS_BUF_BASE(engine) _MMIO((engine)->mmio_base + 0x370)
-#define RING_CONTEXT_STATUS_BUF_LO(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8)
-#define RING_CONTEXT_STATUS_BUF_HI(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8 + 4)
-#define RING_CONTEXT_STATUS_PTR(engine) _MMIO((engine)->mmio_base + 0x3a0)
-#define RING_EXECLIST_SQ_CONTENTS(engine) _MMIO((engine)->mmio_base + 0x510)
-#define RING_EXECLIST_CONTROL(engine) _MMIO((engine)->mmio_base + 0x550)
+#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0)
+#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510)
+#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550)
+
#define EL_CTRL_LOAD (1 << 0)
/* The docs specify that the write pointer wraps around after 5h, "After status
@@ -55,10 +53,11 @@
#define GEN8_CSB_PTR_MASK 0x7
#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
-#define GEN8_CSB_WRITE_PTR(csb_status) \
- (((csb_status) & GEN8_CSB_WRITE_PTR_MASK) >> 0)
-#define GEN8_CSB_READ_PTR(csb_status) \
- (((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8)
+
+#define GEN11_CSB_ENTRIES 12
+#define GEN11_CSB_PTR_MASK 0xf
+#define GEN11_CSB_READ_PTR_MASK (GEN11_CSB_PTR_MASK << 8)
+#define GEN11_CSB_WRITE_PTR_MASK (GEN11_CSB_PTR_MASK << 0)
enum {
INTEL_CONTEXT_SCHEDULE_IN = 0,
@@ -102,9 +101,13 @@ struct drm_printer;
struct drm_i915_private;
struct i915_gem_context;
-void intel_lr_context_resume(struct drm_i915_private *dev_priv);
void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
+void intel_lr_context_reset(struct intel_engine_cs *engine,
+ struct intel_context *ce,
+ u32 head,
+ bool scrub);
+
void intel_execlists_show_requests(struct intel_engine_cs *engine,
struct drm_printer *m,
void (*show_request)(struct drm_printer *m,
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 322bdddda164..7028d0cf3bb1 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -22,10 +22,14 @@
*
*
*/
-#include <drm/drm_edid.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_dual_mode_helper.h>
+#include <drm/drm_edid.h>
+
+#include "intel_dp.h"
#include "intel_drv.h"
+#include "intel_lspcon.h"
/* LSPCON OUI Vendor ID(signatures) */
#define LSPCON_VENDOR_PARADE_OUI 0x001CF8
@@ -452,6 +456,14 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n");
}
+void lspcon_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len)
+{
+ /* FIXME implement this */
+}
+
void lspcon_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
@@ -470,6 +482,8 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
return;
}
+ /* FIXME precompute infoframes */
+
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
conn_state->connector,
adjusted_mode);
@@ -504,9 +518,10 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
buf, ret);
}
-bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
+u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
+ /* FIXME actually read this from the hw */
return enc_to_intel_lspcon(&encoder->base)->active;
}
diff --git a/drivers/gpu/drm/i915/intel_lspcon.h b/drivers/gpu/drm/i915/intel_lspcon.h
new file mode 100644
index 000000000000..37cfddf8a9c5
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lspcon.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_LSPCON_H__
+#define __INTEL_LSPCON_H__
+
+#include <linux/types.h>
+
+struct drm_connector;
+struct drm_connector_state;
+struct intel_crtc_state;
+struct intel_digital_port;
+struct intel_encoder;
+struct intel_lspcon;
+
+bool lspcon_init(struct intel_digital_port *intel_dig_port);
+void lspcon_resume(struct intel_lspcon *lspcon);
+void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
+void lspcon_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *buf, ssize_t len);
+void lspcon_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len);
+void lspcon_set_infoframes(struct intel_encoder *encoder,
+ bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config);
+void lspcon_ycbcr420_config(struct drm_connector *connector,
+ struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_LSPCON_H__ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b4aa49768e90..51d1d59c1619 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -28,17 +28,22 @@
*/
#include <acpi/button.h>
+#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/vga_switcheroo.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include "intel_drv.h"
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
-#include <linux/acpi.h>
+#include "intel_connector.h"
+#include "intel_drv.h"
+#include "intel_lvds.h"
+#include "intel_panel.h"
/* Private structure for the integrated LVDS support */
struct intel_lvds_pps {
@@ -152,24 +157,17 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
pps->powerdown_on_reset = I915_READ(PP_CONTROL(0)) & PANEL_POWER_RESET;
val = I915_READ(PP_ON_DELAYS(0));
- pps->port = (val & PANEL_PORT_SELECT_MASK) >>
- PANEL_PORT_SELECT_SHIFT;
- pps->t1_t2 = (val & PANEL_POWER_UP_DELAY_MASK) >>
- PANEL_POWER_UP_DELAY_SHIFT;
- pps->t5 = (val & PANEL_LIGHT_ON_DELAY_MASK) >>
- PANEL_LIGHT_ON_DELAY_SHIFT;
+ pps->port = REG_FIELD_GET(PANEL_PORT_SELECT_MASK, val);
+ pps->t1_t2 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val);
+ pps->t5 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val);
val = I915_READ(PP_OFF_DELAYS(0));
- pps->t3 = (val & PANEL_POWER_DOWN_DELAY_MASK) >>
- PANEL_POWER_DOWN_DELAY_SHIFT;
- pps->tx = (val & PANEL_LIGHT_OFF_DELAY_MASK) >>
- PANEL_LIGHT_OFF_DELAY_SHIFT;
+ pps->t3 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val);
+ pps->tx = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val);
val = I915_READ(PP_DIVISOR(0));
- pps->divider = (val & PP_REFERENCE_DIVIDER_MASK) >>
- PP_REFERENCE_DIVIDER_SHIFT;
- val = (val & PANEL_POWER_CYCLE_DELAY_MASK) >>
- PANEL_POWER_CYCLE_DELAY_SHIFT;
+ pps->divider = REG_FIELD_GET(PP_REFERENCE_DIVIDER_MASK, val);
+ val = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, val);
/*
* Remove the BSpec specified +1 (100ms) offset that accounts for a
* too short power-cycle delay due to the asynchronous programming of
@@ -209,16 +207,19 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
val |= PANEL_POWER_RESET;
I915_WRITE(PP_CONTROL(0), val);
- I915_WRITE(PP_ON_DELAYS(0), (pps->port << PANEL_PORT_SELECT_SHIFT) |
- (pps->t1_t2 << PANEL_POWER_UP_DELAY_SHIFT) |
- (pps->t5 << PANEL_LIGHT_ON_DELAY_SHIFT));
- I915_WRITE(PP_OFF_DELAYS(0), (pps->t3 << PANEL_POWER_DOWN_DELAY_SHIFT) |
- (pps->tx << PANEL_LIGHT_OFF_DELAY_SHIFT));
+ I915_WRITE(PP_ON_DELAYS(0),
+ REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) |
+ REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) |
+ REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
+
+ I915_WRITE(PP_OFF_DELAYS(0),
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) |
+ REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
- val = pps->divider << PP_REFERENCE_DIVIDER_SHIFT;
- val |= (DIV_ROUND_UP(pps->t4, 1000) + 1) <<
- PANEL_POWER_CYCLE_DELAY_SHIFT;
- I915_WRITE(PP_DIVISOR(0), val);
+ I915_WRITE(PP_DIVISOR(0),
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) |
+ REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK,
+ DIV_ROUND_UP(pps->t4, 1000) + 1));
}
static void intel_pre_enable_lvds(struct intel_encoder *encoder,
@@ -315,7 +316,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
POSTING_READ(lvds_encoder->reg);
- if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 5000))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ PP_STATUS(0), PP_ON, PP_ON, 5000))
DRM_ERROR("timed out waiting for panel to power on\n");
intel_panel_enable_backlight(pipe_config, conn_state);
@@ -329,7 +331,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON);
- if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, 0, 1000))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ PP_STATUS(0), PP_ON, 0, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
@@ -746,20 +749,21 @@ static const struct dmi_system_id intel_dual_link_lvds[] = {
{ } /* terminating entry */
};
-struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev)
+struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv)
{
- struct intel_encoder *intel_encoder;
+ struct intel_encoder *encoder;
- for_each_intel_encoder(dev, intel_encoder)
- if (intel_encoder->type == INTEL_OUTPUT_LVDS)
- return intel_encoder;
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ if (encoder->type == INTEL_OUTPUT_LVDS)
+ return encoder;
+ }
return NULL;
}
-bool intel_is_dual_link_lvds(struct drm_device *dev)
+bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv)
{
- struct intel_encoder *encoder = intel_get_lvds_encoder(dev);
+ struct intel_encoder *encoder = intel_get_lvds_encoder(dev_priv);
return encoder && to_lvds_encoder(&encoder->base)->is_dual_link;
}
@@ -813,7 +817,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
- struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
struct edid *edid;
@@ -952,30 +955,14 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
}
intel_connector->edid = edid;
- list_for_each_entry(scan, &connector->probed_modes, head) {
- if (scan->type & DRM_MODE_TYPE_PREFERRED) {
- DRM_DEBUG_KMS("using preferred mode from EDID: ");
- drm_mode_debug_printmodeline(scan);
-
- fixed_mode = drm_mode_duplicate(dev, scan);
- if (fixed_mode)
- goto out;
- }
- }
+ fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
+ if (fixed_mode)
+ goto out;
/* Failed to get EDID, what about VBT? */
- if (dev_priv->vbt.lfp_lvds_vbt_mode) {
- DRM_DEBUG_KMS("using mode from VBT: ");
- drm_mode_debug_printmodeline(dev_priv->vbt.lfp_lvds_vbt_mode);
-
- fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
- if (fixed_mode) {
- fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
- connector->display_info.width_mm = fixed_mode->width_mm;
- connector->display_info.height_mm = fixed_mode->height_mm;
- goto out;
- }
- }
+ fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
+ if (fixed_mode)
+ goto out;
/*
* If we didn't get EDID, try checking if the panel is already turned
diff --git a/drivers/gpu/drm/i915/intel_lvds.h b/drivers/gpu/drm/i915/intel_lvds.h
new file mode 100644
index 000000000000..bc9c8b84ba2f
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lvds.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_LVDS_H__
+#define __INTEL_LVDS_H__
+
+#include <linux/types.h>
+
+#include "i915_reg.h"
+
+enum pipe;
+struct drm_i915_private;
+
+bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t lvds_reg, enum pipe *pipe);
+void intel_lvds_init(struct drm_i915_private *dev_priv);
+struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv);
+bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_LVDS_H__ */
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 331e7a678fb7..274ba78500c0 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -252,7 +252,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
{
bool result = false;
- if (IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
table->size = ARRAY_SIZE(icelake_mocs_table);
table->table = icelake_mocs_table;
table->n_entries = GEN11_NUM_MOCS_ENTRIES;
@@ -288,17 +288,17 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index)
{
switch (engine_id) {
- case RCS:
+ case RCS0:
return GEN9_GFX_MOCS(index);
- case VCS:
+ case VCS0:
return GEN9_MFX0_MOCS(index);
- case BCS:
+ case BCS0:
return GEN9_BLT_MOCS(index);
- case VECS:
+ case VECS0:
return GEN9_VEBOX_MOCS(index);
- case VCS2:
+ case VCS1:
return GEN9_MFX1_MOCS(index);
- case VCS3:
+ case VCS2:
return GEN11_MFX2_MOCS(index);
default:
MISSING_CASE(engine_id);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 5e00ee9270b5..8fa1159d097f 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -32,9 +32,10 @@
#include <drm/i915_drm.h>
-#include "intel_opregion.h"
#include "i915_drv.h"
#include "intel_drv.h"
+#include "intel_opregion.h"
+#include "intel_panel.h"
#define OPREGION_HEADER_OFFSET 0
#define OPREGION_ACPI_OFFSET 0x100
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index c0df1dbb0069..eb317759b5d3 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -236,7 +236,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
static struct i915_request *alloc_request(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
- struct intel_engine_cs *engine = dev_priv->engine[RCS];
+ struct intel_engine_cs *engine = dev_priv->engine[RCS0];
return i915_request_alloc(engine, dev_priv->kernel_context);
}
@@ -446,7 +446,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (!overlay->old_vma)
return 0;
- if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
+ if (I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
/* synchronous slowpath */
struct i915_request *rq;
@@ -1430,7 +1430,7 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
return NULL;
error->dovsta = I915_READ(DOVSTA);
- error->isr = I915_READ(ISR);
+ error->isr = I915_READ(GEN2_ISR);
error->base = overlay->flip_addr;
memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index beca98d2b035..4ab4ce6569e7 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -33,7 +33,10 @@
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/pwm.h>
+
+#include "intel_connector.h"
#include "intel_drv.h"
+#include "intel_panel.h"
#define CRC_PMIC_PWM_PERIOD_NS 21333
@@ -46,27 +49,26 @@ intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
drm_mode_set_crtcinfo(adjusted_mode, 0);
}
-/**
- * intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
- * @dev_priv: i915 device instance
- * @fixed_mode : panel native mode
- * @connector: LVDS/eDP connector
- *
- * Return downclock_avail
- * Find the reduced downclock for LVDS/eDP in EDID.
- */
-struct drm_display_mode *
-intel_find_panel_downclock(struct drm_i915_private *dev_priv,
- struct drm_display_mode *fixed_mode,
- struct drm_connector *connector)
+static bool is_downclock_mode(const struct drm_display_mode *downclock_mode,
+ const struct drm_display_mode *fixed_mode)
{
- struct drm_display_mode *scan, *tmp_mode;
- int temp_downclock;
+ return drm_mode_match(downclock_mode, fixed_mode,
+ DRM_MODE_MATCH_TIMINGS |
+ DRM_MODE_MATCH_FLAGS |
+ DRM_MODE_MATCH_3D_FLAGS) &&
+ downclock_mode->clock < fixed_mode->clock;
+}
- temp_downclock = fixed_mode->clock;
- tmp_mode = NULL;
+struct drm_display_mode *
+intel_panel_edid_downclock_mode(struct intel_connector *connector,
+ const struct drm_display_mode *fixed_mode)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ const struct drm_display_mode *scan, *best_mode = NULL;
+ struct drm_display_mode *downclock_mode;
+ int best_clock = fixed_mode->clock;
- list_for_each_entry(scan, &connector->probed_modes, head) {
+ list_for_each_entry(scan, &connector->base.probed_modes, head) {
/*
* If one mode has the same resolution with the fixed_panel
* mode while they have the different refresh rate, it means
@@ -74,29 +76,98 @@ intel_find_panel_downclock(struct drm_i915_private *dev_priv,
* case we can set the different FPx0/1 to dynamically select
* between low and high frequency.
*/
- if (scan->hdisplay == fixed_mode->hdisplay &&
- scan->hsync_start == fixed_mode->hsync_start &&
- scan->hsync_end == fixed_mode->hsync_end &&
- scan->htotal == fixed_mode->htotal &&
- scan->vdisplay == fixed_mode->vdisplay &&
- scan->vsync_start == fixed_mode->vsync_start &&
- scan->vsync_end == fixed_mode->vsync_end &&
- scan->vtotal == fixed_mode->vtotal) {
- if (scan->clock < temp_downclock) {
- /*
- * The downclock is already found. But we
- * expect to find the lower downclock.
- */
- temp_downclock = scan->clock;
- tmp_mode = scan;
- }
+ if (is_downclock_mode(scan, fixed_mode) &&
+ scan->clock < best_clock) {
+ /*
+ * The downclock is already found. But we
+ * expect to find the lower downclock.
+ */
+ best_clock = scan->clock;
+ best_mode = scan;
}
}
- if (temp_downclock < fixed_mode->clock)
- return drm_mode_duplicate(&dev_priv->drm, tmp_mode);
- else
+ if (!best_mode)
+ return NULL;
+
+ downclock_mode = drm_mode_duplicate(&dev_priv->drm, best_mode);
+ if (!downclock_mode)
+ return NULL;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using downclock mode from EDID: ",
+ connector->base.base.id, connector->base.name);
+ drm_mode_debug_printmodeline(downclock_mode);
+
+ return downclock_mode;
+}
+
+struct drm_display_mode *
+intel_panel_edid_fixed_mode(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ const struct drm_display_mode *scan;
+ struct drm_display_mode *fixed_mode;
+
+ if (list_empty(&connector->base.probed_modes))
+ return NULL;
+
+ /* prefer fixed mode from EDID if available */
+ list_for_each_entry(scan, &connector->base.probed_modes, head) {
+ if ((scan->type & DRM_MODE_TYPE_PREFERRED) == 0)
+ continue;
+
+ fixed_mode = drm_mode_duplicate(&dev_priv->drm, scan);
+ if (!fixed_mode)
+ return NULL;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using preferred mode from EDID: ",
+ connector->base.base.id, connector->base.name);
+ drm_mode_debug_printmodeline(fixed_mode);
+
+ return fixed_mode;
+ }
+
+ scan = list_first_entry(&connector->base.probed_modes,
+ typeof(*scan), head);
+
+ fixed_mode = drm_mode_duplicate(&dev_priv->drm, scan);
+ if (!fixed_mode)
return NULL;
+
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using first mode from EDID: ",
+ connector->base.base.id, connector->base.name);
+ drm_mode_debug_printmodeline(fixed_mode);
+
+ return fixed_mode;
+}
+
+struct drm_display_mode *
+intel_panel_vbt_fixed_mode(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_display_info *info = &connector->base.display_info;
+ struct drm_display_mode *fixed_mode;
+
+ if (!dev_priv->vbt.lfp_lvds_vbt_mode)
+ return NULL;
+
+ fixed_mode = drm_mode_duplicate(&dev_priv->drm,
+ dev_priv->vbt.lfp_lvds_vbt_mode);
+ if (!fixed_mode)
+ return NULL;
+
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using mode from VBT: ",
+ connector->base.base.id, connector->base.name);
+ drm_mode_debug_printmodeline(fixed_mode);
+
+ info->width_mm = fixed_mode->width_mm;
+ info->height_mm = fixed_mode->height_mm;
+
+ return fixed_mode;
}
/* adjusted_mode has been preset to be the panel's fixed mode */
@@ -1894,15 +1965,14 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
panel->backlight.set = bxt_set_backlight;
panel->backlight.get = bxt_get_backlight;
panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
- } else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv)) {
+ } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) {
panel->backlight.setup = cnp_setup_backlight;
panel->backlight.enable = cnp_enable_backlight;
panel->backlight.disable = cnp_disable_backlight;
panel->backlight.set = bxt_set_backlight;
panel->backlight.get = bxt_get_backlight;
panel->backlight.hz_to_pwm = cnp_hz_to_pwm;
- } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) ||
- HAS_PCH_KBP(dev_priv)) {
+ } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) {
panel->backlight.setup = lpt_setup_backlight;
panel->backlight.enable = lpt_enable_backlight;
panel->backlight.disable = lpt_disable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_panel.h b/drivers/gpu/drm/i915/intel_panel.h
new file mode 100644
index 000000000000..cedeea443336
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_panel.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_PANEL_H__
+#define __INTEL_PANEL_H__
+
+#include <linux/types.h>
+
+#include "intel_display.h"
+
+struct drm_connector;
+struct drm_connector_state;
+struct drm_display_mode;
+struct intel_connector;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_encoder;
+struct intel_panel;
+
+int intel_panel_init(struct intel_panel *panel,
+ struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *downclock_mode);
+void intel_panel_fini(struct intel_panel *panel);
+void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *adjusted_mode);
+void intel_pch_panel_fitting(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config,
+ int fitting_mode);
+void intel_gmch_panel_fitting(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config,
+ int fitting_mode);
+void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state,
+ u32 level, u32 max);
+int intel_panel_setup_backlight(struct drm_connector *connector,
+ enum pipe pipe);
+void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_panel_update_backlight(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
+struct drm_display_mode *
+intel_panel_edid_downclock_mode(struct intel_connector *connector,
+ const struct drm_display_mode *fixed_mode);
+struct drm_display_mode *
+intel_panel_edid_fixed_mode(struct intel_connector *connector);
+struct drm_display_mode *
+intel_panel_vbt_fixed_mode(struct intel_connector *connector);
+
+#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
+int intel_backlight_device_register(struct intel_connector *connector);
+void intel_backlight_device_unregister(struct intel_connector *connector);
+#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+static inline int intel_backlight_device_register(struct intel_connector *connector)
+{
+ return 0;
+}
+static inline void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+}
+#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+#endif /* __INTEL_PANEL_H__ */
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index a8554dc4f196..e7c7be4911c1 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -24,23 +24,29 @@
*
*/
-#include <linux/seq_file.h>
#include <linux/circ_buf.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
#include "intel_drv.h"
+#include "intel_pipe_crc.h"
static const char * const pipe_crc_sources[] = {
- "none",
- "plane1",
- "plane2",
- "pf",
- "pipe",
- "TV",
- "DP-B",
- "DP-C",
- "DP-D",
- "auto",
+ [INTEL_PIPE_CRC_SOURCE_NONE] = "none",
+ [INTEL_PIPE_CRC_SOURCE_PLANE1] = "plane1",
+ [INTEL_PIPE_CRC_SOURCE_PLANE2] = "plane2",
+ [INTEL_PIPE_CRC_SOURCE_PLANE3] = "plane3",
+ [INTEL_PIPE_CRC_SOURCE_PLANE4] = "plane4",
+ [INTEL_PIPE_CRC_SOURCE_PLANE5] = "plane5",
+ [INTEL_PIPE_CRC_SOURCE_PLANE6] = "plane6",
+ [INTEL_PIPE_CRC_SOURCE_PLANE7] = "plane7",
+ [INTEL_PIPE_CRC_SOURCE_PIPE] = "pipe",
+ [INTEL_PIPE_CRC_SOURCE_TV] = "TV",
+ [INTEL_PIPE_CRC_SOURCE_DP_B] = "DP-B",
+ [INTEL_PIPE_CRC_SOURCE_DP_C] = "DP-C",
+ [INTEL_PIPE_CRC_SOURCE_DP_D] = "DP-D",
+ [INTEL_PIPE_CRC_SOURCE_AUTO] = "auto",
};
static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@@ -192,8 +198,6 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum intel_pipe_crc_source *source,
u32 *val)
{
- bool need_stable_symbols = false;
-
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
if (ret)
@@ -209,56 +213,23 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
break;
- case INTEL_PIPE_CRC_SOURCE_DP_B:
- if (!IS_G4X(dev_priv))
- return -EINVAL;
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
- need_stable_symbols = true;
- break;
- case INTEL_PIPE_CRC_SOURCE_DP_C:
- if (!IS_G4X(dev_priv))
- return -EINVAL;
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
- need_stable_symbols = true;
- break;
- case INTEL_PIPE_CRC_SOURCE_DP_D:
- if (!IS_G4X(dev_priv))
- return -EINVAL;
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
- need_stable_symbols = true;
- break;
case INTEL_PIPE_CRC_SOURCE_NONE:
*val = 0;
break;
default:
+ /*
+ * The DP CRC source doesn't work on g4x.
+ * It can be made to work to some degree by selecting
+ * the correct CRC source before the port is enabled,
+ * and not touching the CRC source bits again until
+ * the port is disabled. But even then the bits
+ * eventually get stuck and a reboot is needed to get
+ * working CRCs on the pipe again. Let's simply
+ * refuse to use DP CRCs on g4x.
+ */
return -EINVAL;
}
- /*
- * When the pipe CRC tap point is after the transcoders we need
- * to tweak symbol-level features to produce a deterministic series of
- * symbols for a given frame. We need to reset those features only once
- * a frame (instead of every nth symbol):
- * - DC-balance: used to ensure a better clock recovery from the data
- * link (SDVO)
- * - DisplayPort scrambling: used for EMI reduction
- */
- if (need_stable_symbols) {
- u32 tmp = I915_READ(PORT_DFT2_G4X);
-
- WARN_ON(!IS_G4X(dev_priv));
-
- I915_WRITE(PORT_DFT_I9XX,
- I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
-
- if (pipe == PIPE_A)
- tmp |= PIPE_A_SCRAMBLE_RESET;
- else
- tmp |= PIPE_B_SCRAMBLE_RESET;
-
- I915_WRITE(PORT_DFT2_G4X, tmp);
- }
-
return 0;
}
@@ -283,24 +254,6 @@ static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
tmp &= ~DC_BALANCE_RESET_VLV;
I915_WRITE(PORT_DFT2_G4X, tmp);
-
-}
-
-static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- u32 tmp = I915_READ(PORT_DFT2_G4X);
-
- if (pipe == PIPE_A)
- tmp &= ~PIPE_A_SCRAMBLE_RESET;
- else
- tmp &= ~PIPE_B_SCRAMBLE_RESET;
- I915_WRITE(PORT_DFT2_G4X, tmp);
-
- if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
- I915_WRITE(PORT_DFT_I9XX,
- I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
- }
}
static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@@ -329,19 +282,18 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
-static void hsw_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
- bool enable)
+static void
+intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable)
{
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *pipe_config;
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
- int ret = 0;
+ int ret;
drm_modeset_acquire_init(&ctx, 0);
- state = drm_atomic_state_alloc(dev);
+ state = drm_atomic_state_alloc(&dev_priv->drm);
if (!state) {
ret = -ENOMEM;
goto unlock;
@@ -356,20 +308,20 @@ retry:
goto put_state;
}
- if (HAS_IPS(dev_priv)) {
- /*
- * When IPS gets enabled, the pipe CRC changes. Since IPS gets
- * enabled and disabled dynamically based on package C states,
- * user space can't make reliable use of the CRCs, so let's just
- * completely disable it.
- */
- pipe_config->ips_force_disable = enable;
- }
+ pipe_config->base.mode_changed = pipe_config->has_psr;
+ pipe_config->crc_enabled = enable;
+
+ if (IS_HASWELL(dev_priv) &&
+ pipe_config->base.active && crtc->pipe == PIPE_A &&
+ pipe_config->cpu_transcoder == TRANSCODER_EDP) {
+ bool old_need_power_well = pipe_config->pch_pfit.enabled ||
+ pipe_config->pch_pfit.force_thru;
+ bool new_need_power_well = pipe_config->pch_pfit.enabled ||
+ enable;
- if (IS_HASWELL(dev_priv)) {
pipe_config->pch_pfit.force_thru = enable;
- if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
- pipe_config->pch_pfit.enabled != enable)
+
+ if (old_need_power_well != new_need_power_well)
pipe_config->base.connectors_changed = true;
}
@@ -392,11 +344,10 @@ unlock:
static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source,
- u32 *val,
- bool set_wa)
+ u32 *val)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
- *source = INTEL_PIPE_CRC_SOURCE_PF;
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
switch (*source) {
case INTEL_PIPE_CRC_SOURCE_PLANE1:
@@ -405,11 +356,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
case INTEL_PIPE_CRC_SOURCE_PLANE2:
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
break;
- case INTEL_PIPE_CRC_SOURCE_PF:
- if (set_wa && (IS_HASWELL(dev_priv) ||
- IS_BROADWELL(dev_priv)) && pipe == PIPE_A)
- hsw_pipe_A_crc_wa(dev_priv, true);
-
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
break;
case INTEL_PIPE_CRC_SOURCE_NONE:
@@ -422,10 +369,52 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return 0;
}
+static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source,
+ u32 *val)
+{
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_1_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_2_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE3:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_3_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE4:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_4_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE5:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_5_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE6:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_6_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE7:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_7_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DMUX_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
- enum intel_pipe_crc_source *source, u32 *val,
- bool set_wa)
+ enum intel_pipe_crc_source *source, u32 *val)
{
if (IS_GEN(dev_priv, 2))
return i8xx_pipe_crc_ctl_reg(source, val);
@@ -435,8 +424,10 @@ static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
else if (IS_GEN_RANGE(dev_priv, 5, 6))
return ilk_pipe_crc_ctl_reg(source, val);
+ else if (INTEL_GEN(dev_priv) < 9)
+ return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
else
- return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa);
+ return skl_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
}
static int
@@ -486,9 +477,6 @@ static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv,
switch (source) {
case INTEL_PIPE_CRC_SOURCE_PIPE:
case INTEL_PIPE_CRC_SOURCE_TV:
- case INTEL_PIPE_CRC_SOURCE_DP_B:
- case INTEL_PIPE_CRC_SOURCE_DP_C:
- case INTEL_PIPE_CRC_SOURCE_DP_D:
case INTEL_PIPE_CRC_SOURCE_NONE:
return 0;
default:
@@ -532,7 +520,25 @@ static int ivb_crc_source_valid(struct drm_i915_private *dev_priv,
case INTEL_PIPE_CRC_SOURCE_PIPE:
case INTEL_PIPE_CRC_SOURCE_PLANE1:
case INTEL_PIPE_CRC_SOURCE_PLANE2:
- case INTEL_PIPE_CRC_SOURCE_PF:
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int skl_crc_source_valid(struct drm_i915_private *dev_priv,
+ const enum intel_pipe_crc_source source)
+{
+ switch (source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ case INTEL_PIPE_CRC_SOURCE_PLANE3:
+ case INTEL_PIPE_CRC_SOURCE_PLANE4:
+ case INTEL_PIPE_CRC_SOURCE_PLANE5:
+ case INTEL_PIPE_CRC_SOURCE_PLANE6:
+ case INTEL_PIPE_CRC_SOURCE_PLANE7:
case INTEL_PIPE_CRC_SOURCE_NONE:
return 0;
default:
@@ -552,8 +558,10 @@ intel_is_valid_crc_source(struct drm_i915_private *dev_priv,
return vlv_crc_source_valid(dev_priv, source);
else if (IS_GEN_RANGE(dev_priv, 5, 6))
return ilk_crc_source_valid(dev_priv, source);
- else
+ else if (INTEL_GEN(dev_priv) < 9)
return ivb_crc_source_valid(dev_priv, source);
+ else
+ return skl_crc_source_valid(dev_priv, source);
}
const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
@@ -592,6 +600,7 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
intel_wakeref_t wakeref;
u32 val = 0; /* shut up gcc */
int ret = 0;
+ bool enable;
if (display_crc_ctl_parse_source(source_name, &source) < 0) {
DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
@@ -605,7 +614,11 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
return -EIO;
}
- ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val, true);
+ enable = source != INTEL_PIPE_CRC_SOURCE_NONE;
+ if (enable)
+ intel_crtc_crc_setup_workarounds(to_intel_crtc(crtc), true);
+
+ ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val);
if (ret != 0)
goto out;
@@ -614,18 +627,16 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
POSTING_READ(PIPE_CRC_CTL(crtc->index));
if (!source) {
- if (IS_G4X(dev_priv))
- g4x_undo_pipe_scramble_reset(dev_priv, crtc->index);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_undo_pipe_scramble_reset(dev_priv, crtc->index);
- else if ((IS_HASWELL(dev_priv) ||
- IS_BROADWELL(dev_priv)) && crtc->index == PIPE_A)
- hsw_pipe_A_crc_wa(dev_priv, false);
}
pipe_crc->skipped = 0;
out:
+ if (!enable)
+ intel_crtc_crc_setup_workarounds(to_intel_crtc(crtc), false);
+
intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
@@ -641,7 +652,7 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc)
if (!crtc->crc.opened)
return;
- if (get_new_crc_ctl_reg(dev_priv, crtc->index, &pipe_crc->source, &val, false) < 0)
+ if (get_new_crc_ctl_reg(dev_priv, crtc->index, &pipe_crc->source, &val) < 0)
return;
/* Don't need pipe_crc->lock here, IRQs are not generated. */
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.h b/drivers/gpu/drm/i915/intel_pipe_crc.h
new file mode 100644
index 000000000000..81eaf1854788
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_PIPE_CRC_H__
+#define __INTEL_PIPE_CRC_H__
+
+#include <linux/types.h>
+
+struct drm_crtc;
+struct intel_crtc;
+
+#ifdef CONFIG_DEBUG_FS
+int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name);
+int intel_crtc_verify_crc_source(struct drm_crtc *crtc,
+ const char *source_name, size_t *values_cnt);
+const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count);
+void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
+void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
+#else
+#define intel_crtc_set_crc_source NULL
+#define intel_crtc_verify_crc_source NULL
+#define intel_crtc_get_crc_sources NULL
+static inline void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
+{
+}
+
+static inline void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
+{
+}
+#endif
+
+#endif /* __INTEL_PIPE_CRC_H__ */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 54307f1df6cf..44be676fabd6 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -35,6 +35,9 @@
#include "i915_drv.h"
#include "intel_drv.h"
+#include "intel_fbc.h"
+#include "intel_pm.h"
+#include "intel_sprite.h"
#include "../../../platform/x86/intel_ips.h"
/**
@@ -338,12 +341,12 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
mutex_lock(&dev_priv->pcu_lock);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
if (enable)
val |= DSP_MAXFIFO_PM5_ENABLE;
else
val &= ~DSP_MAXFIFO_PM5_ENABLE;
- vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
mutex_unlock(&dev_priv->pcu_lock);
}
@@ -850,7 +853,7 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
u32 reg;
unsigned int wm;
- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
+ latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
dev_priv->is_ddr3,
dev_priv->fsb_freq,
dev_priv->mem_freq);
@@ -3624,7 +3627,12 @@ static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) < 11)
return enabled_slices;
- if (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
+ /*
+ * FIXME: for now we'll only ever use 1 slice; pretend that we have
+ * only that 1 slice enabled until we have a proper way for on-demand
+ * toggling of the second slice.
+ */
+ if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
enabled_slices++;
return enabled_slices;
@@ -3919,12 +3927,43 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
}
-static unsigned int skl_cursor_allocation(int num_active)
+static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
+ int width, const struct drm_format_info *format,
+ u64 modifier, unsigned int rotation,
+ u32 plane_pixel_rate, struct skl_wm_params *wp,
+ int color_plane);
+static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
+ int level,
+ const struct skl_wm_params *wp,
+ const struct skl_wm_level *result_prev,
+ struct skl_wm_level *result /* out */);
+
+static unsigned int
+skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
+ int num_active)
{
- if (num_active == 1)
- return 32;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
+ struct skl_wm_level wm = {};
+ int ret, min_ddb_alloc = 0;
+ struct skl_wm_params wp;
+
+ ret = skl_compute_wm_params(crtc_state, 256,
+ drm_format_info(DRM_FORMAT_ARGB8888),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_MODE_ROTATE_0,
+ crtc_state->pixel_rate, &wp, 0);
+ WARN_ON(ret);
- return 8;
+ for (level = 0; level <= max_level; level++) {
+ skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm);
+ if (wm.min_ddb_alloc == U16_MAX)
+ break;
+
+ min_ddb_alloc = wm.min_ddb_alloc;
+ }
+
+ return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
}
static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
@@ -3970,7 +4009,7 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
- if (fourcc == DRM_FORMAT_NV12)
+ if (is_planar_yuv_format(fourcc))
swap(val, val2);
skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
@@ -4180,7 +4219,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
if (intel_plane->id == PLANE_CURSOR)
return 0;
- if (plane == 1 && format != DRM_FORMAT_NV12)
+ if (plane == 1 && !is_planar_yuv_format(format))
return 0;
/*
@@ -4192,7 +4231,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
height = drm_rect_height(&intel_pstate->base.src) >> 16;
/* UV plane does 1/2 pixel sub-sampling */
- if (plane == 1 && format == DRM_FORMAT_NV12) {
+ if (plane == 1 && is_planar_yuv_format(format)) {
width /= 2;
height /= 2;
}
@@ -4308,7 +4347,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
- struct skl_plane_wm *wm;
u16 alloc_size, start = 0;
u16 total[I915_MAX_PLANES] = {};
u16 uv_total[I915_MAX_PLANES] = {};
@@ -4349,7 +4387,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0;
/* Allocate fixed number of blocks for cursor. */
- total[PLANE_CURSOR] = skl_cursor_allocation(num_active);
+ total[PLANE_CURSOR] = skl_cursor_allocation(cstate, num_active);
alloc_size -= total[PLANE_CURSOR];
cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
alloc->end - total[PLANE_CURSOR];
@@ -4365,15 +4403,23 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
blocks = 0;
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
- if (plane_id == PLANE_CURSOR)
+ const struct skl_plane_wm *wm =
+ &cstate->wm.skl.optimal.planes[plane_id];
+
+ if (plane_id == PLANE_CURSOR) {
+ if (WARN_ON(wm->wm[level].min_ddb_alloc >
+ total[PLANE_CURSOR])) {
+ blocks = U32_MAX;
+ break;
+ }
continue;
+ }
- wm = &cstate->wm.skl.optimal.planes[plane_id];
blocks += wm->wm[level].min_ddb_alloc;
blocks += wm->uv_wm[level].min_ddb_alloc;
}
- if (blocks < alloc_size) {
+ if (blocks <= alloc_size) {
alloc_size -= blocks;
break;
}
@@ -4392,6 +4438,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* proportional to its relative data rate.
*/
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ const struct skl_plane_wm *wm =
+ &cstate->wm.skl.optimal.planes[plane_id];
u64 rate;
u16 extra;
@@ -4405,8 +4453,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
if (total_data_rate == 0)
break;
- wm = &cstate->wm.skl.optimal.planes[plane_id];
-
rate = plane_data_rate[plane_id];
extra = min_t(u16, alloc_size,
DIV64_U64_ROUND_UP(alloc_size * rate,
@@ -4431,14 +4477,14 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
/* Set the actual DDB start/end points for each plane */
start = alloc->start;
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
- struct skl_ddb_entry *plane_alloc, *uv_plane_alloc;
+ struct skl_ddb_entry *plane_alloc =
+ &cstate->wm.skl.plane_ddb_y[plane_id];
+ struct skl_ddb_entry *uv_plane_alloc =
+ &cstate->wm.skl.plane_ddb_uv[plane_id];
if (plane_id == PLANE_CURSOR)
continue;
- plane_alloc = &cstate->wm.skl.plane_ddb_y[plane_id];
- uv_plane_alloc = &cstate->wm.skl.plane_ddb_uv[plane_id];
-
/* Gen11+ uses a separate plane for UV watermarks */
WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
@@ -4464,8 +4510,35 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
*/
for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
- wm = &cstate->wm.skl.optimal.planes[plane_id];
- memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
+ struct skl_plane_wm *wm =
+ &cstate->wm.skl.optimal.planes[plane_id];
+
+ /*
+ * We only disable the watermarks for each plane if
+ * they exceed the ddb allocation of said plane. This
+ * is done so that we don't end up touching cursor
+ * watermarks needlessly when some other plane reduces
+ * our max possible watermark level.
+ *
+ * Bspec has this to say about the PLANE_WM enable bit:
+ * "All the watermarks at this level for all enabled
+ * planes must be enabled before the level will be used."
+ * So this is actually safe to do.
+ */
+ if (wm->wm[level].min_ddb_alloc > total[plane_id] ||
+ wm->uv_wm[level].min_ddb_alloc > uv_total[plane_id])
+ memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
+
+ /*
+ * Wa_1408961008:icl, ehl
+ * Underruns with WM1+ disabled
+ */
+ if (IS_GEN(dev_priv, 11) &&
+ level == 1 && wm->wm[0].plane_en) {
+ wm->wm[level].plane_res_b = wm->wm[0].plane_res_b;
+ wm->wm[level].plane_res_l = wm->wm[0].plane_res_l;
+ wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
+ }
}
}
@@ -4474,7 +4547,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* don't have enough DDB blocks for it.
*/
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
- wm = &cstate->wm.skl.optimal.planes[plane_id];
+ struct skl_plane_wm *wm =
+ &cstate->wm.skl.optimal.planes[plane_id];
+
if (wm->trans_wm.plane_res_b >= total[plane_id])
memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
}
@@ -4568,57 +4643,45 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
}
static int
-skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *intel_pstate,
- struct skl_wm_params *wp, int color_plane)
+skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
+ int width, const struct drm_format_info *format,
+ u64 modifier, unsigned int rotation,
+ u32 plane_pixel_rate, struct skl_wm_params *wp,
+ int color_plane)
{
- struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_plane_state *pstate = &intel_pstate->base;
- const struct drm_framebuffer *fb = pstate->fb;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 interm_pbpl;
- /* only NV12 format has two planes */
- if (color_plane == 1 && fb->format->format != DRM_FORMAT_NV12) {
- DRM_DEBUG_KMS("Non NV12 format have single plane\n");
+ /* only planar format has two planes */
+ if (color_plane == 1 && !is_planar_yuv_format(format->format)) {
+ DRM_DEBUG_KMS("Non planar format have single plane\n");
return -EINVAL;
}
- wp->y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
- wp->x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
- wp->rc_surface = fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
- wp->is_planar = fb->format->format == DRM_FORMAT_NV12;
-
- if (plane->id == PLANE_CURSOR) {
- wp->width = intel_pstate->base.crtc_w;
- } else {
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- */
- wp->width = drm_rect_width(&intel_pstate->base.src) >> 16;
- }
+ wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
+ modifier == I915_FORMAT_MOD_Yf_TILED ||
+ modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+ wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
+ wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+ wp->is_planar = is_planar_yuv_format(format->format);
+ wp->width = width;
if (color_plane == 1 && wp->is_planar)
wp->width /= 2;
- wp->cpp = fb->format->cpp[color_plane];
- wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate,
- intel_pstate);
+ wp->cpp = format->cpp[color_plane];
+ wp->plane_pixel_rate = plane_pixel_rate;
if (INTEL_GEN(dev_priv) >= 11 &&
- fb->modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
+ modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
wp->dbuf_block_size = 256;
else
wp->dbuf_block_size = 512;
- if (drm_rotation_90_or_270(pstate->rotation)) {
-
+ if (drm_rotation_90_or_270(rotation)) {
switch (wp->cpp) {
case 1:
wp->y_min_scanlines = 16;
@@ -4663,12 +4726,40 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
wp->plane_blocks_per_line);
+
wp->linetime_us = fixed16_to_u32_round_up(
- intel_get_linetime_us(cstate));
+ intel_get_linetime_us(crtc_state));
return 0;
}
+static int
+skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ struct skl_wm_params *wp, int color_plane)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ int width;
+
+ if (plane->id == PLANE_CURSOR) {
+ width = plane_state->base.crtc_w;
+ } else {
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
+ width = drm_rect_width(&plane_state->base.src) >> 16;
+ }
+
+ return skl_compute_wm_params(crtc_state, width,
+ fb->format, fb->modifier,
+ plane_state->base.rotation,
+ skl_adjusted_plane_pixel_rate(crtc_state, plane_state),
+ wp, color_plane);
+}
+
static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
{
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -4679,14 +4770,12 @@ static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
}
static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *intel_pstate,
int level,
const struct skl_wm_params *wp,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */)
{
- struct drm_i915_private *dev_priv =
- to_i915(intel_pstate->base.plane->dev);
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
u32 latency = dev_priv->wm.skl_latency[level];
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t selected_result;
@@ -4805,19 +4894,17 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
static void
skl_compute_wm_levels(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *intel_pstate,
const struct skl_wm_params *wm_params,
struct skl_wm_level *levels)
{
- struct drm_i915_private *dev_priv =
- to_i915(intel_pstate->base.plane->dev);
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
int level, max_level = ilk_wm_max_level(dev_priv);
struct skl_wm_level *result_prev = &levels[0];
for (level = 0; level <= max_level; level++) {
struct skl_wm_level *result = &levels[level];
- skl_compute_plane_wm(cstate, intel_pstate, level, wm_params,
+ skl_compute_plane_wm(cstate, level, wm_params,
result_prev, result);
result_prev = result;
@@ -4914,7 +5001,7 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- skl_compute_wm_levels(crtc_state, plane_state, &wm_params, wm->wm);
+ skl_compute_wm_levels(crtc_state, &wm_params, wm->wm);
skl_compute_transition_wm(crtc_state, &wm_params, wm);
return 0;
@@ -4936,13 +5023,12 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- skl_compute_wm_levels(crtc_state, plane_state, &wm_params, wm->uv_wm);
+ skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm);
return 0;
}
-static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
- struct intel_crtc_state *crtc_state,
+static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
@@ -4968,8 +5054,7 @@ static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
return 0;
}
-static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
- struct intel_crtc_state *crtc_state,
+static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id;
@@ -5006,10 +5091,10 @@ static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
return 0;
}
-static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
- struct skl_pipe_wm *pipe_wm)
+static int skl_build_pipe_wm(struct intel_crtc_state *cstate)
{
struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+ struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
struct drm_crtc_state *crtc_state = &cstate->base;
struct drm_plane *plane;
const struct drm_plane_state *pstate;
@@ -5026,11 +5111,9 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
to_intel_plane_state(pstate);
if (INTEL_GEN(dev_priv) >= 11)
- ret = icl_build_plane_wm(pipe_wm,
- cstate, intel_pstate);
+ ret = icl_build_plane_wm(cstate, intel_pstate);
else
- ret = skl_build_plane_wm(pipe_wm,
- cstate, intel_pstate);
+ ret = skl_build_plane_wm(cstate, intel_pstate);
if (ret)
return ret;
}
@@ -5056,11 +5139,12 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
{
u32 val = 0;
- if (level->plane_en) {
+ if (level->plane_en)
val |= PLANE_WM_EN;
- val |= level->plane_res_b;
- val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
- }
+ if (level->ignore_lines)
+ val |= PLANE_WM_IGNORE_LINES;
+ val |= level->plane_res_b;
+ val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
I915_WRITE_FW(reg, val);
}
@@ -5126,6 +5210,7 @@ bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2)
{
return l1->plane_en == l2->plane_en &&
+ l1->ignore_lines == l2->ignore_lines &&
l1->plane_res_l == l2->plane_res_l &&
l1->plane_res_b == l2->plane_res_b;
}
@@ -5169,7 +5254,7 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
}
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
- const struct skl_ddb_entry entries[],
+ const struct skl_ddb_entry *entries,
int num_entries, int ignore_idx)
{
int i;
@@ -5183,23 +5268,6 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
return false;
}
-static int skl_update_pipe_wm(struct intel_crtc_state *cstate,
- const struct skl_pipe_wm *old_pipe_wm,
- struct skl_pipe_wm *pipe_wm, /* out */
- bool *changed /* out */)
-{
- struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
- int ret;
-
- ret = skl_build_pipe_wm(cstate, pipe_wm);
- if (ret)
- return ret;
-
- *changed = !skl_pipe_wm_equals(crtc, old_pipe_wm, pipe_wm);
-
- return 0;
-}
-
static u32
pipes_modified(struct intel_atomic_state *state)
{
@@ -5269,6 +5337,11 @@ skl_compute_ddb(struct intel_atomic_state *state)
return 0;
}
+static char enast(bool enable)
+{
+ return enable ? '*' : ' ';
+}
+
static void
skl_print_wm_changes(struct intel_atomic_state *state)
{
@@ -5279,8 +5352,16 @@ skl_print_wm_changes(struct intel_atomic_state *state)
struct intel_crtc *crtc;
int i;
+ if ((drm_debug & DRM_UT_KMS) == 0)
+ return;
+
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
+ const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
+
+ old_pipe_wm = &old_crtc_state->wm.skl.optimal;
+ new_pipe_wm = &new_crtc_state->wm.skl.optimal;
+
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
enum plane_id plane_id = plane->id;
const struct skl_ddb_entry *old, *new;
@@ -5291,10 +5372,86 @@ skl_print_wm_changes(struct intel_atomic_state *state)
if (skl_ddb_entry_equal(old, new))
continue;
- DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
+ DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
+ plane->base.base.id, plane->base.name,
+ old->start, old->end, new->start, new->end,
+ skl_ddb_entry_size(old), skl_ddb_entry_size(new));
+ }
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ enum plane_id plane_id = plane->id;
+ const struct skl_plane_wm *old_wm, *new_wm;
+
+ old_wm = &old_pipe_wm->planes[plane_id];
+ new_wm = &new_pipe_wm->planes[plane_id];
+
+ if (skl_plane_wm_equals(dev_priv, old_wm, new_wm))
+ continue;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
+ " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
+ plane->base.base.id, plane->base.name,
+ enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
+ enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
+ enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
+ enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
+ enast(old_wm->trans_wm.plane_en),
+ enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
+ enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
+ enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
+ enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
+ enast(new_wm->trans_wm.plane_en));
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
+ " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
plane->base.base.id, plane->base.name,
- old->start, old->end,
- new->start, new->end);
+ enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
+ enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
+ enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l,
+ enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l,
+ enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l,
+ enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l,
+ enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
+ enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
+ enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
+
+ enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
+ enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
+ enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,
+ enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l,
+ enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l,
+ enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
+ enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
+ enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
+ enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l);
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
+ old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
+ old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
+ old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
+ old_wm->trans_wm.plane_res_b,
+ new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
+ new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
+ new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
+ new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
+ new_wm->trans_wm.plane_res_b);
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
+ old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
+ old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
+ old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
+ old_wm->trans_wm.min_ddb_alloc,
+ new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
+ new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
+ new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
+ new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
+ new_wm->trans_wm.min_ddb_alloc);
}
}
}
@@ -5449,10 +5606,9 @@ static int
skl_compute_wm(struct intel_atomic_state *state)
{
struct intel_crtc *crtc;
- struct intel_crtc_state *cstate;
+ struct intel_crtc_state *new_crtc_state;
struct intel_crtc_state *old_crtc_state;
struct skl_ddb_values *results = &state->wm_results;
- struct skl_pipe_wm *pipe_wm;
bool changed = false;
int ret, i;
@@ -5470,12 +5626,8 @@ skl_compute_wm(struct intel_atomic_state *state)
* pipe allocations had to change.
*/
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- cstate, i) {
- const struct skl_pipe_wm *old_pipe_wm =
- &old_crtc_state->wm.skl.optimal;
-
- pipe_wm = &cstate->wm.skl.optimal;
- ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed);
+ new_crtc_state, i) {
+ ret = skl_build_pipe_wm(new_crtc_state);
if (ret)
return ret;
@@ -5483,7 +5635,9 @@ skl_compute_wm(struct intel_atomic_state *state)
if (ret)
return ret;
- if (changed)
+ if (!skl_pipe_wm_equals(crtc,
+ &old_crtc_state->wm.skl.optimal,
+ &new_crtc_state->wm.skl.optimal))
results->dirty_pipes |= drm_crtc_mask(&crtc->base);
}
@@ -5609,6 +5763,7 @@ static inline void skl_wm_level_from_reg_val(u32 val,
struct skl_wm_level *level)
{
level->plane_en = val & PLANE_WM_EN;
+ level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
PLANE_WM_LINES_MASK;
@@ -5986,7 +6141,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
if (IS_CHERRYVIEW(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
if (val & DSP_MAXFIFO_PM5_ENABLE)
wm->level = VLV_WM_LEVEL_PM5;
@@ -6451,7 +6606,7 @@ static void rps_set_power(struct drm_i915_private *dev_priv, int new_power)
ei_down * threshold_down / 100));
I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
+ (INTEL_GEN(dev_priv) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
@@ -6629,9 +6784,9 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
* punit into committing the voltage change) as that takes a lot less
* power than the render powerwell.
*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_MEDIA);
err = valleyview_set_rps(dev_priv, val);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_MEDIA);
if (err)
DRM_ERROR("Failed to set RPS for idle\n");
@@ -6691,8 +6846,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->pcu_lock);
}
-void gen6_rps_boost(struct i915_request *rq,
- struct intel_rps_client *rps_client)
+void gen6_rps_boost(struct i915_request *rq)
{
struct intel_rps *rps = &rq->i915->gt_pm.rps;
unsigned long flags;
@@ -6721,7 +6875,7 @@ void gen6_rps_boost(struct i915_request *rq,
if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
schedule_work(&rps->work);
- atomic_inc(rps_client ? &rps_client->boosts : &rps->boosts);
+ atomic_inc(&rps->boosts);
}
int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
@@ -6782,11 +6936,11 @@ static void valleyview_disable_rc6(struct drm_i915_private *dev_priv)
{
/* We're doing forcewake before Disabling RC6,
* This what the BIOS expects when going into suspend */
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
I915_WRITE(GEN6_RC_CONTROL, 0);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
@@ -6945,7 +7099,7 @@ static void reset_rps(struct drm_i915_private *dev_priv,
/* See the Gen9_GT_PM_Programming_Guide doc for the below */
static void gen9_enable_rps(struct drm_i915_private *dev_priv)
{
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* Program defaults and thresholds for RPS */
if (IS_GEN(dev_priv, 9))
@@ -6963,7 +7117,79 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
* RP_INTERRUPT_LIMITS & RPNSWREQ registers */
reset_rps(dev_priv, gen6_set_rps);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+}
+
+static void gen11_enable_rc6(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* 1a: Software RC state - RC0 */
+ I915_WRITE(GEN6_RC_STATE, 0);
+
+ /*
+ * 1b: Get forcewake during program sequence. Although the driver
+ * hasn't enabled a state yet where we need forcewake, BIOS may have.
+ */
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+
+ /* 2a: Disable RC states. */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ /* 2b: Program RC6 thresholds.*/
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
+ I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
+
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ for_each_engine(engine, dev_priv, id)
+ I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
+
+ if (HAS_GUC(dev_priv))
+ I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
+
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+
+ /*
+ * 2c: Program Coarse Power Gating Policies.
+ *
+ * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
+ * use instead is a more conservative estimate for the maximum time
+ * it takes us to service a CS interrupt and submit a new ELSP - that
+ * is the time which the GPU is idle waiting for the CPU to select the
+ * next request to execute. If the idle hysteresis is less than that
+ * interrupt service latency, the hardware will automatically gate
+ * the power well and we will then incur the wake up cost on top of
+ * the service latency. A similar guide from intel_pstate is that we
+ * do not want the enable hysteresis to less than the wakeup latency.
+ *
+ * igt/gem_exec_nop/sequential provides a rough estimate for the
+ * service latency, and puts it around 10us for Broadwell (and other
+ * big core) and around 40us for Broxton (and other low power cores).
+ * [Note that for legacy ringbuffer submission, this is less than 1us!]
+ * However, the wakeup latency on Broxton is closer to 100us. To be
+ * conservative, we have to factor in a context switch on top (due
+ * to ksoftirqd).
+ */
+ I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
+ I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
+
+ /* 3a: Enable RC6 */
+ I915_WRITE(GEN6_RC_CONTROL,
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1));
+
+ /* 3b: Enable Coarse Power Gating only when RC6 is enabled. */
+ I915_WRITE(GEN9_PG_ENABLE,
+ GEN9_RENDER_PG_ENABLE |
+ GEN9_MEDIA_PG_ENABLE |
+ GEN11_MEDIA_SAMPLER_PG_ENABLE);
+
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
@@ -6977,7 +7203,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
/* 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7054,7 +7280,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN9_PG_ENABLE,
GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
@@ -7067,7 +7293,7 @@ static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
/* 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7088,14 +7314,14 @@ static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
GEN7_RC_CTL_TO_MODE |
GEN6_RC_CTL_RC6_ENABLE);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen8_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* 1 Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RPNSWREQ,
@@ -7128,7 +7354,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
reset_rps(dev_priv, gen6_set_rps);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
@@ -7148,7 +7374,7 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GTFIFODBG, gtfifodbg);
}
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7196,7 +7422,7 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
}
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen6_enable_rps(struct drm_i915_private *dev_priv)
@@ -7207,7 +7433,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
* Perhaps there might be some value in exposing these to
* userspace...
*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* Power down if completely idle for over 50ms */
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
@@ -7215,7 +7441,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
reset_rps(dev_priv, gen6_set_rps);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
@@ -7638,7 +7864,7 @@ static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
/* 1a & 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7670,14 +7896,14 @@ static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
rc6_mode = GEN7_RC_CTL_TO_MODE;
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
{
u32 val;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* 1: Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
@@ -7712,7 +7938,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
reset_rps(dev_priv, valleyview_set_rps);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
@@ -7730,7 +7956,7 @@ static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GTFIFODBG, gtfifodbg);
}
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7755,14 +7981,14 @@ static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_CONTROL,
GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
{
u32 val;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
@@ -7796,7 +8022,7 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
reset_rps(dev_priv, valleyview_set_rps);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static unsigned long intel_pxfreq(u32 vidfreq)
@@ -8037,14 +8263,14 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
return val;
}
-static struct drm_i915_private *i915_mch_dev;
+static struct drm_i915_private __rcu *i915_mch_dev;
static struct drm_i915_private *mchdev_get(void)
{
struct drm_i915_private *i915;
rcu_read_lock();
- i915 = i915_mch_dev;
+ i915 = rcu_dereference(i915_mch_dev);
if (!kref_get_unless_zero(&i915->drm.ref))
i915 = NULL;
rcu_read_unlock();
@@ -8343,22 +8569,6 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
pm_runtime_put(&dev_priv->drm.pdev->dev);
}
-/**
- * intel_suspend_gt_powersave - suspend PM work and helper threads
- * @dev_priv: i915 device
- *
- * We don't want to disable RC6 or other features here, we just want
- * to make sure any work we've queued has finished and won't bother
- * us while we're suspended.
- */
-void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
-{
- if (INTEL_GEN(dev_priv) < 6)
- return;
-
- /* gen6_rps_idle() will be called later to disable interrupts */
-}
-
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
{
dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */
@@ -8458,6 +8668,8 @@ static void intel_enable_rc6(struct drm_i915_private *dev_priv)
cherryview_enable_rc6(dev_priv);
else if (IS_VALLEYVIEW(dev_priv))
valleyview_enable_rc6(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 11)
+ gen11_enable_rc6(dev_priv);
else if (INTEL_GEN(dev_priv) >= 9)
gen9_enable_rc6(dev_priv);
else if (IS_BROADWELL(dev_priv))
@@ -9361,7 +9573,7 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
*/
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_ICELAKE(dev_priv))
+ if (IS_GEN(dev_priv, 11))
dev_priv->display.init_clock_gating = icl_init_clock_gating;
else if (IS_CANNONLAKE(dev_priv))
dev_priv->display.init_clock_gating = cnl_init_clock_gating;
@@ -9454,7 +9666,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->display.initial_watermarks = g4x_initial_watermarks;
dev_priv->display.optimize_watermarks = g4x_optimize_watermarks;
} else if (IS_PINEVIEW(dev_priv)) {
- if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
+ if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
dev_priv->is_ddr3,
dev_priv->fsb_freq,
dev_priv->mem_freq)) {
@@ -9552,7 +9764,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
- if (__intel_wait_for_register_fw(dev_priv,
+ if (__intel_wait_for_register_fw(&dev_priv->uncore,
GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
500, 0, NULL)) {
DRM_ERROR("timeout waiting for pcode read (from mbox %x) to finish for %ps\n",
@@ -9600,7 +9812,7 @@ int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
- if (__intel_wait_for_register_fw(dev_priv,
+ if (__intel_wait_for_register_fw(&dev_priv->uncore,
GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
fast_timeout_us, slow_timeout_ms,
NULL)) {
@@ -9824,6 +10036,7 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
const i915_reg_t reg)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
u64 time_hw, prev_hw, overflow_hw;
unsigned int fw_domains;
unsigned long flags;
@@ -9845,10 +10058,10 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency)))
return 0;
- fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
+ fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
- spin_lock_irqsave(&dev_priv->uncore.lock, flags);
- intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
+ spin_lock_irqsave(&uncore->lock, flags);
+ intel_uncore_forcewake_get__locked(uncore, fw_domains);
/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -9867,7 +10080,7 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
}
overflow_hw = BIT_ULL(32);
- time_hw = I915_READ_FW(reg);
+ time_hw = intel_uncore_read_fw(uncore, reg);
}
/*
@@ -9889,8 +10102,8 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
time_hw += dev_priv->gt_pm.rc6.cur_residency[i];
dev_priv->gt_pm.rc6.cur_residency[i] = time_hw;
- intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+ intel_uncore_forcewake_put__locked(uncore, fw_domains);
+ spin_unlock_irqrestore(&uncore->lock, flags);
return mul_u64_u32_div(time_hw, mul, div);
}
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
new file mode 100644
index 000000000000..674a3f0f16a7
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_PM_H__
+#define __INTEL_PM_H__
+
+#include <linux/types.h>
+
+struct drm_atomic_state;
+struct drm_device;
+struct drm_i915_private;
+struct i915_request;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_plane;
+struct skl_ddb_allocation;
+struct skl_ddb_entry;
+struct skl_pipe_wm;
+struct skl_wm_level;
+
+void intel_init_clock_gating(struct drm_i915_private *dev_priv);
+void intel_suspend_hw(struct drm_i915_private *dev_priv);
+int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
+void intel_update_watermarks(struct intel_crtc *crtc);
+void intel_init_pm(struct drm_i915_private *dev_priv);
+void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
+void intel_pm_setup(struct drm_i915_private *dev_priv);
+void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
+void intel_gpu_ips_teardown(void);
+void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
+void gen6_rps_busy(struct drm_i915_private *dev_priv);
+void gen6_rps_idle(struct drm_i915_private *dev_priv);
+void gen6_rps_boost(struct i915_request *rq);
+void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+ struct skl_ddb_entry *ddb_y,
+ struct skl_ddb_entry *ddb_uv);
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
+ struct skl_ddb_allocation *ddb /* out */);
+void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
+ struct skl_pipe_wm *out);
+void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
+void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
+bool intel_can_enable_sagv(struct drm_atomic_state *state);
+int intel_enable_sagv(struct drm_i915_private *dev_priv);
+int intel_disable_sagv(struct drm_i915_private *dev_priv);
+bool skl_wm_level_equals(const struct skl_wm_level *l1,
+ const struct skl_wm_level *l2);
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
+ const struct skl_ddb_entry *entries,
+ int num_entries, int ignore_idx);
+void skl_write_plane_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+void skl_write_cursor_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+bool ilk_disable_lp_wm(struct drm_device *dev);
+int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *cstate);
+void intel_init_ipc(struct drm_i915_private *dev_priv);
+void intel_enable_ipc(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 84a0fb981561..963663ba0edf 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -21,6 +21,14 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <drm/drm_atomic_helper.h>
+
+#include "i915_drv.h"
+#include "intel_dp.h"
+#include "intel_drv.h"
+#include "intel_psr.h"
+#include "intel_sprite.h"
+
/**
* DOC: Panel Self Refresh (PSR/SRD)
*
@@ -51,10 +59,6 @@
* must be correctly synchronized/cancelled when shutting down the pipe."
*/
-
-#include "intel_drv.h"
-#include "i915_drv.h"
-
static bool psr_global_enabled(u32 debug)
{
switch (debug & I915_PSR_DEBUG_MODE_MASK) {
@@ -78,9 +82,6 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
case I915_PSR_DEBUG_DISABLE:
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
- case I915_PSR_DEBUG_DEFAULT:
- if (i915_modparams.enable_psr <= 0)
- return false;
default:
return crtc_state->has_psr2;
}
@@ -435,32 +436,16 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
}
-static void hsw_activate_psr1(struct intel_dp *intel_dp)
+static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 max_sleep_time = 0x1f;
- u32 val = EDP_PSR_ENABLE;
+ u32 val = 0;
- /* Let's use 6 as the minimum to cover all known cases including the
- * off-by-one issue that HW has in some cases.
- */
- int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-
- /* sink_sync_latency of 8 means source has to wait for more than 8
- * frames, we'll go with 9 frames for now
- */
- idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
- val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
-
- val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
- if (IS_HASWELL(dev_priv))
- val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
-
- if (dev_priv->psr.link_standby)
- val |= EDP_PSR_LINK_STANDBY;
+ if (INTEL_GEN(dev_priv) >= 11)
+ val |= EDP_PSR_TP4_TIME_0US;
if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
- val |= EDP_PSR_TP1_TIME_0us;
+ val |= EDP_PSR_TP1_TIME_0us;
else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
val |= EDP_PSR_TP1_TIME_100us;
else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
@@ -469,7 +454,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
val |= EDP_PSR_TP1_TIME_2500us;
if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
- val |= EDP_PSR_TP2_TP3_TIME_0us;
+ val |= EDP_PSR_TP2_TP3_TIME_0us;
else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
val |= EDP_PSR_TP2_TP3_TIME_100us;
else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
@@ -483,6 +468,35 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
else
val |= EDP_PSR_TP1_TP2_SEL;
+ return val;
+}
+
+static void hsw_activate_psr1(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 max_sleep_time = 0x1f;
+ u32 val = EDP_PSR_ENABLE;
+
+ /* Let's use 6 as the minimum to cover all known cases including the
+ * off-by-one issue that HW has in some cases.
+ */
+ int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+
+ /* sink_sync_latency of 8 means source has to wait for more than 8
+ * frames, we'll go with 9 frames for now
+ */
+ idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+ val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
+
+ val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
+ if (IS_HASWELL(dev_priv))
+ val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+
+ if (dev_priv->psr.link_standby)
+ val |= EDP_PSR_LINK_STANDBY;
+
+ val |= intel_psr1_get_tp_time(intel_dp);
+
if (INTEL_GEN(dev_priv) >= 8)
val |= EDP_PSR_CRC_ENABLE;
@@ -509,16 +523,22 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
- if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us >= 0 &&
- dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 50)
+ if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
+ dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
val |= EDP_PSR2_TP2_TIME_50us;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
+ else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
val |= EDP_PSR2_TP2_TIME_100us;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
+ else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
val |= EDP_PSR2_TP2_TIME_500us;
else
val |= EDP_PSR2_TP2_TIME_2500us;
+ /*
+ * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
+ * recommending keep this bit unset while PSR2 is enabled.
+ */
+ I915_WRITE(EDP_PSR_CTL, 0);
+
I915_WRITE(EDP_PSR2_CTL, val);
}
@@ -530,11 +550,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
int psr_max_h = 0, psr_max_v = 0;
- /*
- * FIXME psr2_support is messed up. It's both computed
- * dynamically during PSR enable, and extracted from sink
- * caps during eDP detection.
- */
if (!dev_priv->psr.sink_psr2_support)
return false;
@@ -575,6 +590,11 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
+ if (crtc_state->crc_enabled) {
+ DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n");
+ return false;
+ }
+
return true;
}
@@ -610,9 +630,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
- if (IS_HASWELL(dev_priv) &&
- adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ DRM_DEBUG_KMS("PSR condition failed: Interlaced mode enabled\n");
return;
}
@@ -718,8 +737,11 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
{
struct intel_dp *intel_dp = dev_priv->psr.dp;
- if (dev_priv->psr.enabled)
- return;
+ WARN_ON(dev_priv->psr.enabled);
+
+ dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
+ dev_priv->psr.busy_frontbuffer_bits = 0;
+ dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
DRM_DEBUG_KMS("Enabling PSR%s\n",
dev_priv->psr.psr2_enabled ? "2" : "1");
@@ -752,20 +774,13 @@ void intel_psr_enable(struct intel_dp *intel_dp,
WARN_ON(dev_priv->drrs.dp);
mutex_lock(&dev_priv->psr.lock);
- if (dev_priv->psr.prepared) {
- DRM_DEBUG_KMS("PSR already in use\n");
+
+ if (!psr_global_enabled(dev_priv->psr.debug)) {
+ DRM_DEBUG_KMS("PSR disabled by flag\n");
goto unlock;
}
- dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
- dev_priv->psr.busy_frontbuffer_bits = 0;
- dev_priv->psr.prepared = true;
- dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
-
- if (psr_global_enabled(dev_priv->psr.debug))
- intel_psr_enable_locked(dev_priv, crtc_state);
- else
- DRM_DEBUG_KMS("PSR disabled by flag\n");
+ intel_psr_enable_locked(dev_priv, crtc_state);
unlock:
mutex_unlock(&dev_priv->psr.lock);
@@ -819,8 +834,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
}
/* Wait till PSR is idle */
- if (intel_wait_for_register(dev_priv, psr_status, psr_status_mask, 0,
- 2000))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ psr_status, psr_status_mask, 0, 2000))
DRM_ERROR("Timed out waiting PSR idle state\n");
/* Disable PSR on Sink */
@@ -848,18 +863,69 @@ void intel_psr_disable(struct intel_dp *intel_dp,
return;
mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.prepared) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
intel_psr_disable_locked(intel_dp);
- dev_priv->psr.prepared = false;
mutex_unlock(&dev_priv->psr.lock);
cancel_work_sync(&dev_priv->psr.work);
}
+static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
+{
+ /*
+ * Display WA #0884: all
+ * This documented WA for bxt can be safely applied
+ * broadly so we can force HW tracking to exit PSR
+ * instead of disabling and re-enabling.
+ * Workaround tells us to write 0 to CUR_SURFLIVE_A,
+ * but it makes more sense write to the current active
+ * pipe.
+ */
+ I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
+}
+
+/**
+ * intel_psr_update - Update PSR state
+ * @intel_dp: Intel DP
+ * @crtc_state: new CRTC state
+ *
+ * This functions will update PSR states, disabling, enabling or switching PSR
+ * version when executing fastsets. For full modeset, intel_psr_disable() and
+ * intel_psr_enable() should be called instead.
+ */
+void intel_psr_update(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct i915_psr *psr = &dev_priv->psr;
+ bool enable, psr2_enable;
+
+ if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
+ return;
+
+ mutex_lock(&dev_priv->psr.lock);
+
+ enable = crtc_state->has_psr && psr_global_enabled(psr->debug);
+ psr2_enable = intel_psr2_enabled(dev_priv, crtc_state);
+
+ if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
+ /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
+ if (crtc_state->crc_enabled && psr->enabled)
+ psr_force_hw_tracking_exit(dev_priv);
+
+ goto unlock;
+ }
+
+ if (psr->enabled)
+ intel_psr_disable_locked(intel_dp);
+
+ if (enable)
+ intel_psr_enable_locked(dev_priv, crtc_state);
+
+unlock:
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
/**
* intel_psr_wait_for_idle - wait for PSR1 to idle
* @new_crtc_state: new CRTC state
@@ -890,7 +956,7 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
* defensive enough to cover everything.
*/
- return __intel_wait_for_register(dev_priv, EDP_PSR_STATUS,
+ return __intel_wait_for_register(&dev_priv->uncore, EDP_PSR_STATUS,
EDP_PSR_STATUS_STATE_MASK,
EDP_PSR_STATUS_STATE_IDLE, 2, 50,
out_value);
@@ -915,7 +981,7 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->psr.lock);
- err = intel_wait_for_register(dev_priv, reg, mask, 0, 50);
+ err = intel_wait_for_register(&dev_priv->uncore, reg, mask, 0, 50);
if (err)
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
@@ -924,36 +990,63 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
return err == 0 && dev_priv->psr.enabled;
}
-static bool switching_psr(struct drm_i915_private *dev_priv,
- struct intel_crtc_state *crtc_state,
- u32 mode)
+static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
{
- /* Can't switch psr state anyway if PSR2 is not supported. */
- if (!crtc_state || !crtc_state->has_psr2)
- return false;
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct drm_crtc *crtc;
+ int err;
- if (dev_priv->psr.psr2_enabled && mode == I915_PSR_DEBUG_FORCE_PSR1)
- return true;
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return -ENOMEM;
- if (!dev_priv->psr.psr2_enabled && mode != I915_PSR_DEBUG_FORCE_PSR1)
- return true;
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+ state->acquire_ctx = &ctx;
+
+retry:
+ drm_for_each_crtc(crtc, dev) {
+ struct drm_crtc_state *crtc_state;
+ struct intel_crtc_state *intel_crtc_state;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ err = PTR_ERR(crtc_state);
+ goto error;
+ }
+
+ intel_crtc_state = to_intel_crtc_state(crtc_state);
- return false;
+ if (crtc_state->active && intel_crtc_state->has_psr) {
+ /* Mark mode as changed to trigger a pipe->update() */
+ crtc_state->mode_changed = true;
+ break;
+ }
+ }
+
+ err = drm_atomic_commit(state);
+
+error:
+ if (err == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ err = drm_modeset_backoff(&ctx);
+ if (!err)
+ goto retry;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ drm_atomic_state_put(state);
+
+ return err;
}
-int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
- struct drm_modeset_acquire_ctx *ctx,
- u64 val)
+int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
{
- struct drm_device *dev = &dev_priv->drm;
- struct drm_connector_state *conn_state;
- struct intel_crtc_state *crtc_state = NULL;
- struct drm_crtc_commit *commit;
- struct drm_crtc *crtc;
- struct intel_dp *dp;
+ const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
+ u32 old_mode;
int ret;
- bool enable;
- u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
mode > I915_PSR_DEBUG_FORCE_PSR1) {
@@ -961,49 +1054,19 @@ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
return -EINVAL;
}
- ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
- if (ret)
- return ret;
-
- /* dev_priv->psr.dp should be set once and then never touched again. */
- dp = READ_ONCE(dev_priv->psr.dp);
- conn_state = dp->attached_connector->base.state;
- crtc = conn_state->crtc;
- if (crtc) {
- ret = drm_modeset_lock(&crtc->mutex, ctx);
- if (ret)
- return ret;
-
- crtc_state = to_intel_crtc_state(crtc->state);
- commit = crtc_state->base.commit;
- } else {
- commit = conn_state->commit;
- }
- if (commit) {
- ret = wait_for_completion_interruptible(&commit->hw_done);
- if (ret)
- return ret;
- }
-
ret = mutex_lock_interruptible(&dev_priv->psr.lock);
if (ret)
return ret;
- enable = psr_global_enabled(val);
-
- if (!enable || switching_psr(dev_priv, crtc_state, mode))
- intel_psr_disable_locked(dev_priv->psr.dp);
-
+ old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
dev_priv->psr.debug = val;
- if (crtc)
- dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
-
intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
- if (dev_priv->psr.prepared && enable)
- intel_psr_enable_locked(dev_priv, crtc_state);
-
mutex_unlock(&dev_priv->psr.lock);
+
+ if (old_mode != mode)
+ ret = intel_psr_fastset_force(dev_priv);
+
return ret;
}
@@ -1121,18 +1184,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
/* By definition flush = invalidate + flush */
- if (frontbuffer_bits) {
- /*
- * Display WA #0884: all
- * This documented WA for bxt can be safely applied
- * broadly so we can force HW tracking to exit PSR
- * instead of disabling and re-enabling.
- * Workaround tells us to write 0 to CUR_SURFLIVE_A,
- * but it makes more sense write to the current active
- * pipe.
- */
- I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
- }
+ if (frontbuffer_bits)
+ psr_force_hw_tracking_exit(dev_priv);
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
schedule_work(&dev_priv->psr.work);
@@ -1176,7 +1229,6 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
if (val) {
DRM_DEBUG_KMS("PSR interruption error set\n");
dev_priv->psr.sink_not_reliable = true;
- return;
}
/* Set link_standby x link_off defaults */
diff --git a/drivers/gpu/drm/i915/intel_psr.h b/drivers/gpu/drm/i915/intel_psr.h
new file mode 100644
index 000000000000..dc818826f36d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_psr.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_PSR_H__
+#define __INTEL_PSR_H__
+
+#include "intel_frontbuffer.h"
+
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_dp;
+
+#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
+void intel_psr_init_dpcd(struct intel_dp *intel_dp);
+void intel_psr_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_psr_disable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state);
+void intel_psr_update(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 value);
+void intel_psr_invalidate(struct drm_i915_private *dev_priv,
+ unsigned frontbuffer_bits,
+ enum fb_op_origin origin);
+void intel_psr_flush(struct drm_i915_private *dev_priv,
+ unsigned frontbuffer_bits,
+ enum fb_op_origin origin);
+void intel_psr_init(struct drm_i915_private *dev_priv);
+void intel_psr_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state);
+void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug);
+void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
+void intel_psr_short_pulse(struct intel_dp *intel_dp);
+int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
+ u32 *out_value);
+bool intel_psr_enabled(struct intel_dp *intel_dp);
+
+#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 7f841dba87b3..029fd8ec1857 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -43,12 +43,6 @@
*/
#define LEGACY_REQUEST_SIZE 200
-static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
-{
- return (i915_ggtt_offset(engine->status_page.vma) +
- I915_GEM_HWS_INDEX_ADDR);
-}
-
unsigned int intel_ring_update_space(struct intel_ring *ring)
{
unsigned int space;
@@ -317,9 +311,9 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = rq->fence.seqno;
*cs++ = GFX_OP_PIPE_CONTROL(4);
- *cs++ = PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
- *cs++ = intel_hws_seqno_address(rq->engine) | PIPE_CONTROL_GLOBAL_GTT;
- *cs++ = rq->global_seqno;
+ *cs++ = PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_STORE_DATA_INDEX;
+ *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
@@ -424,10 +418,10 @@ static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = (PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL);
- *cs++ = intel_hws_seqno_address(rq->engine);
- *cs++ = rq->global_seqno;
+ PIPE_CONTROL_STORE_DATA_INDEX |
+ PIPE_CONTROL_GLOBAL_GTT_IVB);
+ *cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
+ *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
@@ -448,8 +442,8 @@ static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = rq->fence.seqno;
*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
- *cs++ = I915_GEM_HWS_INDEX_ADDR | MI_FLUSH_DW_USE_GTT;
- *cs++ = rq->global_seqno;
+ *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
@@ -473,8 +467,8 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = rq->fence.seqno;
*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
- *cs++ = I915_GEM_HWS_INDEX_ADDR | MI_FLUSH_DW_USE_GTT;
- *cs++ = rq->global_seqno;
+ *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
for (i = 0; i < GEN7_XCS_WA; i++) {
*cs++ = MI_STORE_DWORD_INDEX;
@@ -554,16 +548,17 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
*/
default:
GEM_BUG_ON(engine->id);
- case RCS:
+ /* fallthrough */
+ case RCS0:
hwsp = RENDER_HWS_PGA_GEN7;
break;
- case BCS:
+ case BCS0:
hwsp = BLT_HWS_PGA_GEN7;
break;
- case VCS:
+ case VCS0:
hwsp = BSD_HWS_PGA_GEN7;
break;
- case VECS:
+ case VECS0:
hwsp = VEBOX_HWS_PGA_GEN7;
break;
}
@@ -580,19 +575,19 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
static void flush_cs_tlb(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- i915_reg_t instpm = RING_INSTPM(engine->mmio_base);
if (!IS_GEN_RANGE(dev_priv, 6, 7))
return;
/* ring should be idle before issuing a sync flush*/
- WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
-
- I915_WRITE(instpm,
- _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
- INSTPM_SYNC_FLUSH));
- if (intel_wait_for_register(dev_priv,
- instpm, INSTPM_SYNC_FLUSH, 0,
+ WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+
+ ENGINE_WRITE(engine, RING_INSTPM,
+ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+ INSTPM_SYNC_FLUSH));
+ if (intel_wait_for_register(engine->uncore,
+ RING_INSTPM(engine->mmio_base),
+ INSTPM_SYNC_FLUSH, 0,
1000))
DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
engine->name);
@@ -611,32 +606,36 @@ static bool stop_ring(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
if (INTEL_GEN(dev_priv) > 2) {
- I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
- if (intel_wait_for_register(dev_priv,
+ ENGINE_WRITE(engine,
+ RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING));
+ if (intel_wait_for_register(engine->uncore,
RING_MI_MODE(engine->mmio_base),
MODE_IDLE,
MODE_IDLE,
1000)) {
DRM_ERROR("%s : timed out trying to stop ring\n",
engine->name);
- /* Sometimes we observe that the idle flag is not
+
+ /*
+ * Sometimes we observe that the idle flag is not
* set even though the ring is empty. So double
* check before giving up.
*/
- if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
+ if (ENGINE_READ(engine, RING_HEAD) !=
+ ENGINE_READ(engine, RING_TAIL))
return false;
}
}
- I915_WRITE_HEAD(engine, I915_READ_TAIL(engine));
+ ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL));
- I915_WRITE_HEAD(engine, 0);
- I915_WRITE_TAIL(engine, 0);
+ ENGINE_WRITE(engine, RING_HEAD, 0);
+ ENGINE_WRITE(engine, RING_TAIL, 0);
/* The ring must be empty before it is disabled */
- I915_WRITE_CTL(engine, 0);
+ ENGINE_WRITE(engine, RING_CTL, 0);
- return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
+ return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0;
}
static int init_ring_common(struct intel_engine_cs *engine)
@@ -645,26 +644,26 @@ static int init_ring_common(struct intel_engine_cs *engine)
struct intel_ring *ring = engine->buffer;
int ret = 0;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
if (!stop_ring(engine)) {
/* G45 ring initialization often fails to reset head to zero */
DRM_DEBUG_DRIVER("%s head not reset to zero "
"ctl %08x head %08x tail %08x start %08x\n",
engine->name,
- I915_READ_CTL(engine),
- I915_READ_HEAD(engine),
- I915_READ_TAIL(engine),
- I915_READ_START(engine));
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_HEAD),
+ ENGINE_READ(engine, RING_TAIL),
+ ENGINE_READ(engine, RING_START));
if (!stop_ring(engine)) {
DRM_ERROR("failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n",
engine->name,
- I915_READ_CTL(engine),
- I915_READ_HEAD(engine),
- I915_READ_TAIL(engine),
- I915_READ_START(engine));
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_HEAD),
+ ENGINE_READ(engine, RING_TAIL),
+ ENGINE_READ(engine, RING_START));
ret = -EIO;
goto out;
}
@@ -678,18 +677,18 @@ static int init_ring_common(struct intel_engine_cs *engine)
intel_engine_reset_breadcrumbs(engine);
/* Enforce ordering by reading HEAD register back */
- I915_READ_HEAD(engine);
+ ENGINE_READ(engine, RING_HEAD);
/* Initialize the ring. This must happen _after_ we've cleared the ring
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
* register values. */
- I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
+ ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma));
/* WaClearRingBufHeadRegAtInit:ctg,elk */
- if (I915_READ_HEAD(engine))
+ if (ENGINE_READ(engine, RING_HEAD))
DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
- engine->name, I915_READ_HEAD(engine));
+ engine->name, ENGINE_READ(engine, RING_HEAD));
/* Check that the ring offsets point within the ring! */
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
@@ -697,42 +696,44 @@ static int init_ring_common(struct intel_engine_cs *engine)
intel_ring_update_space(ring);
/* First wake the ring up to an empty/idle ring */
- I915_WRITE_HEAD(engine, ring->head);
- I915_WRITE_TAIL(engine, ring->head);
- (void)I915_READ_TAIL(engine);
+ ENGINE_WRITE(engine, RING_HEAD, ring->head);
+ ENGINE_WRITE(engine, RING_TAIL, ring->head);
+ ENGINE_POSTING_READ(engine, RING_TAIL);
- I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
+ ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID);
/* If the head is still not zero, the ring is dead */
- if (intel_wait_for_register(dev_priv, RING_CTL(engine->mmio_base),
+ if (intel_wait_for_register(engine->uncore,
+ RING_CTL(engine->mmio_base),
RING_VALID, RING_VALID,
50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
engine->name,
- I915_READ_CTL(engine),
- I915_READ_CTL(engine) & RING_VALID,
- I915_READ_HEAD(engine), ring->head,
- I915_READ_TAIL(engine), ring->tail,
- I915_READ_START(engine),
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_CTL) & RING_VALID,
+ ENGINE_READ(engine, RING_HEAD), ring->head,
+ ENGINE_READ(engine, RING_TAIL), ring->tail,
+ ENGINE_READ(engine, RING_START),
i915_ggtt_offset(ring->vma));
ret = -EIO;
goto out;
}
if (INTEL_GEN(dev_priv) > 2)
- I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE(engine,
+ RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
/* Now awake, let it get started */
if (ring->tail != ring->head) {
- I915_WRITE_TAIL(engine, ring->tail);
- (void)I915_READ_TAIL(engine);
+ ENGINE_WRITE(engine, RING_TAIL, ring->tail);
+ ENGINE_POSTING_READ(engine, RING_TAIL);
}
/* Papering over lost _interrupts_ immediately following the restart */
intel_engine_queue_breadcrumbs(engine);
out:
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
return ret;
}
@@ -758,11 +759,6 @@ static void reset_ring(struct intel_engine_cs *engine, bool stalled)
}
}
- GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n",
- engine->name,
- rq ? rq->global_seqno : 0,
- intel_engine_get_seqno(engine),
- yesno(stalled));
/*
* The guilty request will get skipped on a hung engine.
*
@@ -878,7 +874,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
if (INTEL_GEN(dev_priv) >= 6)
- I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
+ ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
return 0;
}
@@ -892,18 +888,12 @@ static void cancel_requests(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->timeline.requests, link) {
- GEM_BUG_ON(!request->global_seqno);
-
if (!i915_request_signaled(request))
dma_fence_set_error(&request->fence, -EIO);
i915_request_mark_complete(request);
}
- intel_write_status_page(engine,
- I915_GEM_HWS_INDEX,
- intel_engine_last_submit(engine));
-
/* Remaining _unready_ requests will be nop'ed when submitted */
spin_unlock_irqrestore(&engine->timeline.lock, flags);
@@ -911,12 +901,10 @@ static void cancel_requests(struct intel_engine_cs *engine)
static void i9xx_submit_request(struct i915_request *request)
{
- struct drm_i915_private *dev_priv = request->i915;
-
i915_request_submit(request);
- I915_WRITE_TAIL(request->engine,
- intel_ring_set_tail(request->ring, request->tail));
+ ENGINE_WRITE(request->engine, RING_TAIL,
+ intel_ring_set_tail(request->ring, request->tail));
}
static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
@@ -931,8 +919,8 @@ static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = rq->fence.seqno;
*cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_INDEX_ADDR;
- *cs++ = rq->global_seqno;
+ *cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
+ *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
*cs++ = MI_USER_INTERRUPT;
@@ -953,14 +941,14 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = MI_FLUSH;
*cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_SEQNO_ADDR;
- *cs++ = rq->fence.seqno;
+ *cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
+ *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
BUILD_BUG_ON(GEN5_WA_STORES < 1);
for (i = 0; i < GEN5_WA_STORES; i++) {
*cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_INDEX_ADDR;
- *cs++ = rq->global_seqno;
+ *cs++ = I915_GEM_HWS_SEQNO_ADDR;
+ *cs++ = rq->fence.seqno;
}
*cs++ = MI_USER_INTERRUPT;
@@ -988,20 +976,16 @@ gen5_irq_disable(struct intel_engine_cs *engine)
static void
i9xx_irq_enable(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- dev_priv->irq_mask &= ~engine->irq_enable_mask;
- I915_WRITE(IMR, dev_priv->irq_mask);
- POSTING_READ_FW(RING_IMR(engine->mmio_base));
+ engine->i915->irq_mask &= ~engine->irq_enable_mask;
+ intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
+ intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR);
}
static void
i9xx_irq_disable(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- dev_priv->irq_mask |= engine->irq_enable_mask;
- I915_WRITE(IMR, dev_priv->irq_mask);
+ engine->i915->irq_mask |= engine->irq_enable_mask;
+ intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
}
static void
@@ -1010,7 +994,7 @@ i8xx_irq_enable(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
dev_priv->irq_mask &= ~engine->irq_enable_mask;
- I915_WRITE16(IMR, dev_priv->irq_mask);
+ I915_WRITE16(GEN2_IMR, dev_priv->irq_mask);
POSTING_READ16(RING_IMR(engine->mmio_base));
}
@@ -1020,7 +1004,7 @@ i8xx_irq_disable(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
dev_priv->irq_mask |= engine->irq_enable_mask;
- I915_WRITE16(IMR, dev_priv->irq_mask);
+ I915_WRITE16(GEN2_IMR, dev_priv->irq_mask);
}
static int
@@ -1041,47 +1025,38 @@ bsd_ring_flush(struct i915_request *rq, u32 mode)
static void
gen6_irq_enable(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- I915_WRITE_IMR(engine,
- ~(engine->irq_enable_mask |
- engine->irq_keep_mask));
+ ENGINE_WRITE(engine, RING_IMR,
+ ~(engine->irq_enable_mask | engine->irq_keep_mask));
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
- POSTING_READ_FW(RING_IMR(engine->mmio_base));
+ ENGINE_POSTING_READ(engine, RING_IMR);
- gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
+ gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
}
static void
gen6_irq_disable(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
- gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
+ ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
+ gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
}
static void
hsw_vebox_irq_enable(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+ ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask);
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
- POSTING_READ_FW(RING_IMR(engine->mmio_base));
+ ENGINE_POSTING_READ(engine, RING_IMR);
- gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
+ gen6_unmask_pm_irq(engine->i915, engine->irq_enable_mask);
}
static void
hsw_vebox_irq_disable(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- I915_WRITE_IMR(engine, ~0);
- gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
+ ENGINE_WRITE(engine, RING_IMR, ~0);
+ gen6_mask_pm_irq(engine->i915, engine->irq_enable_mask);
}
static int
@@ -1211,15 +1186,6 @@ int intel_ring_pin(struct intel_ring *ring)
else
flags |= PIN_HIGH;
- if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
- if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
- ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
- else
- ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
- if (unlikely(ret))
- goto unpin_timeline;
- }
-
ret = i915_vma_pin(vma, 0, 0, flags);
if (unlikely(ret))
goto unpin_timeline;
@@ -1323,6 +1289,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
if (!ring)
return ERR_PTR(-ENOMEM);
+ kref_init(&ring->ref);
INIT_LIST_HEAD(&ring->request_list);
ring->timeline = i915_timeline_get(timeline);
@@ -1347,9 +1314,9 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
return ring;
}
-void
-intel_ring_free(struct intel_ring *ring)
+void intel_ring_free(struct kref *ref)
{
+ struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
struct drm_i915_gem_object *obj = ring->vma->obj;
i915_vma_close(ring->vma);
@@ -1359,17 +1326,24 @@ intel_ring_free(struct intel_ring *ring)
kfree(ring);
}
-static void intel_ring_context_destroy(struct intel_context *ce)
+static void __ring_context_fini(struct intel_context *ce)
{
- GEM_BUG_ON(ce->pin_count);
-
- if (!ce->state)
- return;
-
GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
i915_gem_object_put(ce->state->obj);
}
+static void ring_context_destroy(struct kref *ref)
+{
+ struct intel_context *ce = container_of(ref, typeof(*ce), ref);
+
+ GEM_BUG_ON(intel_context_is_pinned(ce));
+
+ if (ce->state)
+ __ring_context_fini(ce);
+
+ intel_context_free(ce);
+}
+
static int __context_pin_ppgtt(struct i915_gem_context *ctx)
{
struct i915_hw_ppgtt *ppgtt;
@@ -1400,17 +1374,6 @@ static int __context_pin(struct intel_context *ce)
if (!vma)
return 0;
- /*
- * Clear this page out of any CPU caches for coherent swap-in/out.
- * We only want to do this on the first bind so that we do not stall
- * on an active context (which by nature is already on the GPU).
- */
- if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
- err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
- if (err)
- return err;
- }
-
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
return err;
@@ -1420,6 +1383,7 @@ static int __context_pin(struct intel_context *ce)
* it cannot reclaim the object until we release it.
*/
vma->obj->pin_global++;
+ vma->obj->mm.dirty = true;
return 0;
}
@@ -1436,12 +1400,10 @@ static void __context_unpin(struct intel_context *ce)
i915_vma_unpin(vma);
}
-static void intel_ring_context_unpin(struct intel_context *ce)
+static void ring_context_unpin(struct intel_context *ce)
{
__context_unpin_ppgtt(ce->gem_context);
__context_unpin(ce);
-
- i915_gem_context_put(ce->gem_context);
}
static struct i915_vma *
@@ -1456,6 +1418,24 @@ alloc_context_vma(struct intel_engine_cs *engine)
if (IS_ERR(obj))
return ERR_CAST(obj);
+ /*
+ * Try to make the context utilize L3 as well as LLC.
+ *
+ * On VLV we don't have L3 controls in the PTEs so we
+ * shouldn't touch the cache level, especially as that
+ * would make the object snooped which might have a
+ * negative performance impact.
+ *
+ * Snooping is required on non-llc platforms in execlist
+ * mode, but since all GGTT accesses use PAT entry 0 we
+ * get snooping anyway regardless of cache_level.
+ *
+ * This is only applicable for Ivy Bridge devices since
+ * later platforms don't have L3 control bits in the PTE.
+ */
+ if (IS_IVYBRIDGE(i915))
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
+
if (engine->default_state) {
void *defaults, *vaddr;
@@ -1473,29 +1453,10 @@ alloc_context_vma(struct intel_engine_cs *engine)
}
memcpy(vaddr, defaults, engine->context_size);
-
i915_gem_object_unpin_map(engine->default_state);
- i915_gem_object_unpin_map(obj);
- }
- /*
- * Try to make the context utilize L3 as well as LLC.
- *
- * On VLV we don't have L3 controls in the PTEs so we
- * shouldn't touch the cache level, especially as that
- * would make the object snooped which might have a
- * negative performance impact.
- *
- * Snooping is required on non-llc platforms in execlist
- * mode, but since all GGTT accesses use PAT entry 0 we
- * get snooping anyway regardless of cache_level.
- *
- * This is only applicable for Ivy Bridge devices since
- * later platforms don't have L3 control bits in the PTE.
- */
- if (IS_IVYBRIDGE(i915)) {
- /* Ignore any error, regard it as a simple optimisation */
- i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
}
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
@@ -1513,69 +1474,52 @@ err_obj:
return ERR_PTR(err);
}
-static struct intel_context *
-__ring_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx,
- struct intel_context *ce)
+static int ring_context_pin(struct intel_context *ce)
{
+ struct intel_engine_cs *engine = ce->engine;
int err;
+ /* One ringbuffer to rule them all */
+ GEM_BUG_ON(!engine->buffer);
+ ce->ring = engine->buffer;
+
if (!ce->state && engine->context_size) {
struct i915_vma *vma;
vma = alloc_context_vma(engine);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err;
- }
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
ce->state = vma;
}
err = __context_pin(ce);
if (err)
- goto err;
+ return err;
err = __context_pin_ppgtt(ce->gem_context);
if (err)
goto err_unpin;
- i915_gem_context_get(ctx);
-
- /* One ringbuffer to rule them all */
- GEM_BUG_ON(!engine->buffer);
- ce->ring = engine->buffer;
-
- return ce;
+ return 0;
err_unpin:
__context_unpin(ce);
-err:
- ce->pin_count = 0;
- return ERR_PTR(err);
+ return err;
}
-static const struct intel_context_ops ring_context_ops = {
- .unpin = intel_ring_context_unpin,
- .destroy = intel_ring_context_destroy,
-};
-
-static struct intel_context *
-intel_ring_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static void ring_context_reset(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
-
- lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-
- if (likely(ce->pin_count++))
- return ce;
- GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
+ intel_ring_reset(ce->ring, 0);
+}
- ce->ops = &ring_context_ops;
+static const struct intel_context_ops ring_context_ops = {
+ .pin = ring_context_pin,
+ .unpin = ring_context_unpin,
- return __ring_context_pin(engine, ctx, ce);
-}
+ .reset = ring_context_reset,
+ .destroy = ring_context_destroy,
+};
static int intel_init_ring_buffer(struct intel_engine_cs *engine)
{
@@ -1587,9 +1531,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
if (err)
return err;
- timeline = i915_timeline_create(engine->i915,
- engine->name,
- engine->status_page.vma);
+ timeline = i915_timeline_create(engine->i915, engine->status_page.vma);
if (IS_ERR(timeline)) {
err = PTR_ERR(timeline);
goto err;
@@ -1621,7 +1563,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
err_unpin:
intel_ring_unpin(ring);
err_ring:
- intel_ring_free(ring);
+ intel_ring_put(ring);
err:
intel_engine_cleanup_common(engine);
return err;
@@ -1632,10 +1574,10 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
WARN_ON(INTEL_GEN(dev_priv) > 2 &&
- (I915_READ_MODE(engine) & MODE_IDLE) == 0);
+ (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
intel_ring_unpin(engine->buffer);
- intel_ring_free(engine->buffer);
+ intel_ring_put(engine->buffer);
if (engine->cleanup)
engine->cleanup(engine);
@@ -1646,16 +1588,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
kfree(engine);
}
-void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /* Restart from the beginning of the rings for convenience */
- for_each_engine(engine, dev_priv, id)
- intel_ring_reset(engine->buffer, 0);
-}
-
static int load_pd_dir(struct i915_request *rq,
const struct i915_hw_ppgtt *ppgtt)
{
@@ -1667,11 +1599,11 @@ static int load_pd_dir(struct i915_request *rq,
return PTR_ERR(cs);
*cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base));
*cs++ = PP_DIR_DCLV_2G;
*cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
*cs++ = ppgtt->pd.base.ggtt_offset << 10;
intel_ring_advance(rq, cs);
@@ -1690,7 +1622,7 @@ static int flush_pd_dir(struct i915_request *rq)
/* Stall until the page table load is complete */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
*cs++ = i915_scratch_offset(rq->i915);
*cs++ = MI_NOOP;
@@ -1703,8 +1635,8 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
struct drm_i915_private *i915 = rq->i915;
struct intel_engine_cs *engine = rq->engine;
enum intel_engine_id id;
- const int num_rings =
- IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_rings - 1 : 0;
+ const int num_engines =
+ IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
bool force_restore = false;
int len;
u32 *cs;
@@ -1718,7 +1650,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
len = 4;
if (IS_GEN(i915, 7))
- len += 2 + (num_rings ? 4*num_rings + 6 : 0);
+ len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
if (flags & MI_FORCE_RESTORE) {
GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
flags &= ~MI_FORCE_RESTORE;
@@ -1733,10 +1665,10 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
if (IS_GEN(i915, 7)) {
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
- if (num_rings) {
+ if (num_engines) {
struct intel_engine_cs *signaller;
- *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
+ *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
for_each_engine(signaller, i915, id) {
if (signaller == engine)
continue;
@@ -1763,8 +1695,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
* placeholder we use to flush other contexts.
*/
*cs++ = MI_SET_CONTEXT;
- *cs++ = i915_ggtt_offset(to_intel_context(i915->kernel_context,
- engine)->state) |
+ *cs++ = i915_ggtt_offset(engine->kernel_context->state) |
MI_MM_SPACE_GTT |
MI_RESTORE_INHIBIT;
}
@@ -1779,11 +1710,11 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
*cs++ = MI_NOOP;
if (IS_GEN(i915, 7)) {
- if (num_rings) {
+ if (num_engines) {
struct intel_engine_cs *signaller;
i915_reg_t last_reg = {}; /* keep gcc quiet */
- *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
+ *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
for_each_engine(signaller, i915, id) {
if (signaller == engine)
continue;
@@ -1861,7 +1792,7 @@ static int switch_context(struct i915_request *rq)
* explanation.
*/
loops = 1;
- if (engine->id == BCS && IS_VALLEYVIEW(engine->i915))
+ if (engine->id == BCS0 && IS_VALLEYVIEW(engine->i915))
loops = 32;
do {
@@ -1870,15 +1801,15 @@ static int switch_context(struct i915_request *rq)
goto err;
} while (--loops);
- if (intel_engine_flag(engine) & ppgtt->pd_dirty_rings) {
- unwind_mm = intel_engine_flag(engine);
- ppgtt->pd_dirty_rings &= ~unwind_mm;
+ if (ppgtt->pd_dirty_engines & engine->mask) {
+ unwind_mm = engine->mask;
+ ppgtt->pd_dirty_engines &= ~unwind_mm;
hw_flags = MI_FORCE_RESTORE;
}
}
if (rq->hw_context->state) {
- GEM_BUG_ON(engine->id != RCS);
+ GEM_BUG_ON(engine->id != RCS0);
/*
* The kernel context(s) is treated as pure scratch and is not
@@ -1938,7 +1869,7 @@ static int switch_context(struct i915_request *rq)
err_mm:
if (unwind_mm)
- ppgtt->pd_dirty_rings |= unwind_mm;
+ ppgtt->pd_dirty_engines |= unwind_mm;
err:
return ret;
}
@@ -1947,7 +1878,7 @@ static int ring_request_alloc(struct i915_request *request)
{
int ret;
- GEM_BUG_ON(!request->hw_context->pin_count);
+ GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
GEM_BUG_ON(request->timeline->has_initial_breadcrumb);
/*
@@ -2108,23 +2039,23 @@ int intel_ring_cacheline_align(struct i915_request *rq)
static void gen6_bsd_submit_request(struct i915_request *request)
{
- struct drm_i915_private *dev_priv = request->i915;
+ struct intel_uncore *uncore = request->engine->uncore;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
/* Every tail move must follow the sequence below */
/* Disable notification that the ring is IDLE. The GT
* will then assume that it is busy and bring it out of rc6.
*/
- I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
- _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+ intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
/* Clear the context id. Here be magic! */
- I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
+ intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0);
/* Wait for the ring not to be idle, i.e. for it to wake up. */
- if (__intel_wait_for_register_fw(dev_priv,
+ if (__intel_wait_for_register_fw(uncore,
GEN6_BSD_SLEEP_PSMI_CONTROL,
GEN6_BSD_SLEEP_INDICATOR,
0,
@@ -2137,10 +2068,10 @@ static void gen6_bsd_submit_request(struct i915_request *request)
/* Let the ring send IDLE messages to the GT again,
* and so let it sleep to conserve power when idle.
*/
- I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
- _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+ intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
}
static int mi_flush_dw(struct i915_request *rq, u32 flags)
@@ -2282,7 +2213,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
engine->reset.reset = reset_ring;
engine->reset.finish = reset_finish;
- engine->context_pin = intel_ring_context_pin;
+ engine->cops = &ring_context_ops;
engine->request_alloc = ring_request_alloc;
/*
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 710ffb221775..72c7c337ace9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -6,22 +6,20 @@
#include <linux/hashtable.h>
#include <linux/irq_work.h>
+#include <linux/random.h>
#include <linux/seqlock.h>
#include "i915_gem_batch_pool.h"
-
-#include "i915_reg.h"
#include "i915_pmu.h"
+#include "i915_reg.h"
#include "i915_request.h"
#include "i915_selftest.h"
#include "i915_timeline.h"
+#include "intel_engine_types.h"
#include "intel_gpu_commands.h"
#include "intel_workarounds.h"
struct drm_printer;
-struct i915_sched_attr;
-
-#define I915_CMD_HASH_ORDER 9
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
* but keeps the logic simple. Indeed, the whole purpose of this macro is just
@@ -31,28 +29,44 @@ struct i915_sched_attr;
#define CACHELINE_BYTES 64
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
-struct intel_hw_status_page {
- struct i915_vma *vma;
- u32 *addr;
-};
+/*
+ * The register defines to be used with the following macros need to accept a
+ * base param, e.g:
+ *
+ * REG_FOO(base) _MMIO((base) + <relative offset>)
+ * ENGINE_READ(engine, REG_FOO);
+ *
+ * register arrays are to be defined and accessed as follows:
+ *
+ * REG_BAR(base, i) _MMIO((base) + <relative offset> + (i) * <shift>)
+ * ENGINE_READ_IDX(engine, REG_BAR, i)
+ */
+
+#define __ENGINE_REG_OP(op__, engine__, ...) \
+ intel_uncore_##op__((engine__)->uncore, __VA_ARGS__)
-#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
-#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
+#define __ENGINE_READ_OP(op__, engine__, reg__) \
+ __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base))
-#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
-#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
+#define ENGINE_READ16(...) __ENGINE_READ_OP(read16, __VA_ARGS__)
+#define ENGINE_READ(...) __ENGINE_READ_OP(read, __VA_ARGS__)
+#define ENGINE_READ_FW(...) __ENGINE_READ_OP(read_fw, __VA_ARGS__)
+#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read, __VA_ARGS__)
-#define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
-#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
+#define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \
+ __ENGINE_REG_OP(read64_2x32, (engine__), \
+ lower_reg__((engine__)->mmio_base), \
+ upper_reg__((engine__)->mmio_base))
-#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
-#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
+#define ENGINE_READ_IDX(engine__, reg__, idx__) \
+ __ENGINE_REG_OP(read, (engine__), reg__((engine__)->mmio_base, (idx__)))
-#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
-#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
+#define __ENGINE_WRITE_OP(op__, engine__, reg__, val__) \
+ __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base), (val__))
-#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
-#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
+#define ENGINE_WRITE16(...) __ENGINE_WRITE_OP(write16, __VA_ARGS__)
+#define ENGINE_WRITE(...) __ENGINE_WRITE_OP(write, __VA_ARGS__)
+#define ENGINE_WRITE_FW(...) __ENGINE_WRITE_OP(write_fw, __VA_ARGS__)
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
* do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
@@ -90,506 +104,7 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
return "unknown";
}
-#define I915_MAX_SLICES 3
-#define I915_MAX_SUBSLICES 8
-
-#define instdone_slice_mask(dev_priv__) \
- (IS_GEN(dev_priv__, 7) ? \
- 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
-
-#define instdone_subslice_mask(dev_priv__) \
- (IS_GEN(dev_priv__, 7) ? \
- 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
-
-#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
- for ((slice__) = 0, (subslice__) = 0; \
- (slice__) < I915_MAX_SLICES; \
- (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
- (slice__) += ((subslice__) == 0)) \
- for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
- (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
-
-struct intel_instdone {
- u32 instdone;
- /* The following exist only in the RCS engine */
- u32 slice_common;
- u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
- u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
-};
-
-struct intel_engine_hangcheck {
- u64 acthd;
- u32 seqno;
- unsigned long action_timestamp;
- struct intel_instdone instdone;
-};
-
-struct intel_ring {
- struct i915_vma *vma;
- void *vaddr;
-
- struct i915_timeline *timeline;
- struct list_head request_list;
- struct list_head active_link;
-
- u32 head;
- u32 tail;
- u32 emit;
-
- u32 space;
- u32 size;
- u32 effective_size;
-};
-
-struct i915_gem_context;
-struct drm_i915_reg_table;
-
-/*
- * we use a single page to load ctx workarounds so all of these
- * values are referred in terms of dwords
- *
- * struct i915_wa_ctx_bb:
- * offset: specifies batch starting position, also helpful in case
- * if we want to have multiple batches at different offsets based on
- * some criteria. It is not a requirement at the moment but provides
- * an option for future use.
- * size: size of the batch in DWORDS
- */
-struct i915_ctx_workarounds {
- struct i915_wa_ctx_bb {
- u32 offset;
- u32 size;
- } indirect_ctx, per_ctx;
- struct i915_vma *vma;
-};
-
-struct i915_request;
-
-#define I915_MAX_VCS 4
-#define I915_MAX_VECS 2
-
-/*
- * Engine IDs definitions.
- * Keep instances of the same type engine together.
- */
-enum intel_engine_id {
- RCS = 0,
- BCS,
- VCS,
- VCS2,
- VCS3,
- VCS4,
-#define _VCS(n) (VCS + (n))
- VECS,
- VECS2
-#define _VECS(n) (VECS + (n))
-};
-
-struct i915_priolist {
- struct list_head requests[I915_PRIORITY_COUNT];
- struct rb_node node;
- unsigned long used;
- int priority;
-};
-
-#define priolist_for_each_request(it, plist, idx) \
- for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
- list_for_each_entry(it, &(plist)->requests[idx], sched.link)
-
-#define priolist_for_each_request_consume(it, n, plist, idx) \
- for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \
- list_for_each_entry_safe(it, n, \
- &(plist)->requests[idx - 1], \
- sched.link)
-
-struct st_preempt_hang {
- struct completion completion;
- unsigned int count;
- bool inject_hang;
-};
-
-/**
- * struct intel_engine_execlists - execlist submission queue and port state
- *
- * The struct intel_engine_execlists represents the combined logical state of
- * driver and the hardware state for execlist mode of submission.
- */
-struct intel_engine_execlists {
- /**
- * @tasklet: softirq tasklet for bottom handler
- */
- struct tasklet_struct tasklet;
-
- /**
- * @default_priolist: priority list for I915_PRIORITY_NORMAL
- */
- struct i915_priolist default_priolist;
-
- /**
- * @no_priolist: priority lists disabled
- */
- bool no_priolist;
-
- /**
- * @submit_reg: gen-specific execlist submission register
- * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
- * the ExecList Submission Queue Contents register array for Gen11+
- */
- u32 __iomem *submit_reg;
-
- /**
- * @ctrl_reg: the enhanced execlists control register, used to load the
- * submit queue on the HW and to request preemptions to idle
- */
- u32 __iomem *ctrl_reg;
-
- /**
- * @port: execlist port states
- *
- * For each hardware ELSP (ExecList Submission Port) we keep
- * track of the last request and the number of times we submitted
- * that port to hw. We then count the number of times the hw reports
- * a context completion or preemption. As only one context can
- * be active on hw, we limit resubmission of context to port[0]. This
- * is called Lite Restore, of the context.
- */
- struct execlist_port {
- /**
- * @request_count: combined request and submission count
- */
- struct i915_request *request_count;
-#define EXECLIST_COUNT_BITS 2
-#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
-#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
-#define port_set(p, packed) ((p)->request_count = (packed))
-#define port_isset(p) ((p)->request_count)
-#define port_index(p, execlists) ((p) - (execlists)->port)
-
- /**
- * @context_id: context ID for port
- */
- GEM_DEBUG_DECL(u32 context_id);
-
-#define EXECLIST_MAX_PORTS 2
- } port[EXECLIST_MAX_PORTS];
-
- /**
- * @active: is the HW active? We consider the HW as active after
- * submitting any context for execution and until we have seen the
- * last context completion event. After that, we do not expect any
- * more events until we submit, and so can park the HW.
- *
- * As we have a small number of different sources from which we feed
- * the HW, we track the state of each inside a single bitfield.
- */
- unsigned int active;
-#define EXECLISTS_ACTIVE_USER 0
-#define EXECLISTS_ACTIVE_PREEMPT 1
-#define EXECLISTS_ACTIVE_HWACK 2
-
- /**
- * @port_mask: number of execlist ports - 1
- */
- unsigned int port_mask;
-
- /**
- * @queue_priority_hint: Highest pending priority.
- *
- * When we add requests into the queue, or adjust the priority of
- * executing requests, we compute the maximum priority of those
- * pending requests. We can then use this value to determine if
- * we need to preempt the executing requests to service the queue.
- * However, since the we may have recorded the priority of an inflight
- * request we wanted to preempt but since completed, at the time of
- * dequeuing the priority hint may no longer may match the highest
- * available request priority.
- */
- int queue_priority_hint;
-
- /**
- * @queue: queue of requests, in priority lists
- */
- struct rb_root_cached queue;
-
- /**
- * @csb_write: control register for Context Switch buffer
- *
- * Note this register may be either mmio or HWSP shadow.
- */
- u32 *csb_write;
-
- /**
- * @csb_status: status array for Context Switch buffer
- *
- * Note these register may be either mmio or HWSP shadow.
- */
- u32 *csb_status;
-
- /**
- * @preempt_complete_status: expected CSB upon completing preemption
- */
- u32 preempt_complete_status;
-
- /**
- * @csb_head: context status buffer head
- */
- u8 csb_head;
-
- I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
-};
-
-#define INTEL_ENGINE_CS_MAX_NAME 8
-
-struct intel_engine_cs {
- struct drm_i915_private *i915;
- char name[INTEL_ENGINE_CS_MAX_NAME];
-
- enum intel_engine_id id;
- unsigned int hw_id;
- unsigned int guc_id;
-
- u8 uabi_id;
- u8 uabi_class;
-
- u8 class;
- u8 instance;
- u32 context_size;
- u32 mmio_base;
-
- struct intel_ring *buffer;
-
- struct i915_timeline timeline;
-
- struct drm_i915_gem_object *default_state;
- void *pinned_default_state;
-
- /* Rather than have every client wait upon all user interrupts,
- * with the herd waking after every interrupt and each doing the
- * heavyweight seqno dance, we delegate the task (of being the
- * bottom-half of the user interrupt) to the first client. After
- * every interrupt, we wake up one client, who does the heavyweight
- * coherent seqno read and either goes back to sleep (if incomplete),
- * or wakes up all the completed clients in parallel, before then
- * transferring the bottom-half status to the next client in the queue.
- *
- * Compared to walking the entire list of waiters in a single dedicated
- * bottom-half, we reduce the latency of the first waiter by avoiding
- * a context switch, but incur additional coherent seqno reads when
- * following the chain of request breadcrumbs. Since it is most likely
- * that we have a single client waiting on each seqno, then reducing
- * the overhead of waking that client is much preferred.
- */
- struct intel_breadcrumbs {
- spinlock_t irq_lock;
- struct list_head signalers;
-
- struct irq_work irq_work; /* for use from inside irq_lock */
-
- unsigned int irq_enabled;
-
- bool irq_armed;
- } breadcrumbs;
-
- struct {
- /**
- * @enable: Bitmask of enable sample events on this engine.
- *
- * Bits correspond to sample event types, for instance
- * I915_SAMPLE_QUEUED is bit 0 etc.
- */
- u32 enable;
- /**
- * @enable_count: Reference count for the enabled samplers.
- *
- * Index number corresponds to @enum drm_i915_pmu_engine_sample.
- */
- unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
- /**
- * @sample: Counter values for sampling events.
- *
- * Our internal timer stores the current counters in this field.
- *
- * Index number corresponds to @enum drm_i915_pmu_engine_sample.
- */
- struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
- } pmu;
-
- /*
- * A pool of objects to use as shadow copies of client batch buffers
- * when the command parser is enabled. Prevents the client from
- * modifying the batch contents after software parsing.
- */
- struct i915_gem_batch_pool batch_pool;
-
- struct intel_hw_status_page status_page;
- struct i915_ctx_workarounds wa_ctx;
- struct i915_wa_list ctx_wa_list;
- struct i915_wa_list wa_list;
- struct i915_wa_list whitelist;
-
- u32 irq_keep_mask; /* always keep these interrupts */
- u32 irq_enable_mask; /* bitmask to enable ring interrupt */
- void (*irq_enable)(struct intel_engine_cs *engine);
- void (*irq_disable)(struct intel_engine_cs *engine);
-
- int (*init_hw)(struct intel_engine_cs *engine);
-
- struct {
- void (*prepare)(struct intel_engine_cs *engine);
- void (*reset)(struct intel_engine_cs *engine, bool stalled);
- void (*finish)(struct intel_engine_cs *engine);
- } reset;
-
- void (*park)(struct intel_engine_cs *engine);
- void (*unpark)(struct intel_engine_cs *engine);
-
- void (*set_default_submission)(struct intel_engine_cs *engine);
-
- struct intel_context *(*context_pin)(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx);
-
- int (*request_alloc)(struct i915_request *rq);
- int (*init_context)(struct i915_request *rq);
-
- int (*emit_flush)(struct i915_request *request, u32 mode);
-#define EMIT_INVALIDATE BIT(0)
-#define EMIT_FLUSH BIT(1)
-#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
- int (*emit_bb_start)(struct i915_request *rq,
- u64 offset, u32 length,
- unsigned int dispatch_flags);
-#define I915_DISPATCH_SECURE BIT(0)
-#define I915_DISPATCH_PINNED BIT(1)
- int (*emit_init_breadcrumb)(struct i915_request *rq);
- u32 *(*emit_fini_breadcrumb)(struct i915_request *rq,
- u32 *cs);
- unsigned int emit_fini_breadcrumb_dw;
-
- /* Pass the request to the hardware queue (e.g. directly into
- * the legacy ringbuffer or to the end of an execlist).
- *
- * This is called from an atomic context with irqs disabled; must
- * be irq safe.
- */
- void (*submit_request)(struct i915_request *rq);
-
- /*
- * Call when the priority on a request has changed and it and its
- * dependencies may need rescheduling. Note the request itself may
- * not be ready to run!
- */
- void (*schedule)(struct i915_request *request,
- const struct i915_sched_attr *attr);
-
- /*
- * Cancel all requests on the hardware, or queued for execution.
- * This should only cancel the ready requests that have been
- * submitted to the engine (via the engine->submit_request callback).
- * This is called when marking the device as wedged.
- */
- void (*cancel_requests)(struct intel_engine_cs *engine);
-
- void (*cleanup)(struct intel_engine_cs *engine);
-
- struct intel_engine_execlists execlists;
-
- /* Contexts are pinned whilst they are active on the GPU. The last
- * context executed remains active whilst the GPU is idle - the
- * switch away and write to the context object only occurs on the
- * next execution. Contexts are only unpinned on retirement of the
- * following request ensuring that we can always write to the object
- * on the context switch even after idling. Across suspend, we switch
- * to the kernel context and trash it as the save may not happen
- * before the hardware is powered down.
- */
- struct intel_context *last_retired_context;
-
- /* status_notifier: list of callbacks for context-switch changes */
- struct atomic_notifier_head context_status_notifier;
-
- struct intel_engine_hangcheck hangcheck;
-
-#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
-#define I915_ENGINE_SUPPORTS_STATS BIT(1)
-#define I915_ENGINE_HAS_PREEMPTION BIT(2)
- unsigned int flags;
-
- /*
- * Table of commands the command parser needs to know about
- * for this engine.
- */
- DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
-
- /*
- * Table of registers allowed in commands that read/write registers.
- */
- const struct drm_i915_reg_table *reg_tables;
- int reg_table_count;
-
- /*
- * Returns the bitmask for the length field of the specified command.
- * Return 0 for an unrecognized/invalid command.
- *
- * If the command parser finds an entry for a command in the engine's
- * cmd_tables, it gets the command's length based on the table entry.
- * If not, it calls this function to determine the per-engine length
- * field encoding for the command (i.e. different opcode ranges use
- * certain bits to encode the command length in the header).
- */
- u32 (*get_cmd_length_mask)(u32 cmd_header);
-
- struct {
- /**
- * @lock: Lock protecting the below fields.
- */
- seqlock_t lock;
- /**
- * @enabled: Reference count indicating number of listeners.
- */
- unsigned int enabled;
- /**
- * @active: Number of contexts currently scheduled in.
- */
- unsigned int active;
- /**
- * @enabled_at: Timestamp when busy stats were enabled.
- */
- ktime_t enabled_at;
- /**
- * @start: Timestamp of the last idle to active transition.
- *
- * Idle is defined as active == 0, active is active > 0.
- */
- ktime_t start;
- /**
- * @total: Total time this engine was busy.
- *
- * Accumulated time not counting the most recent block in cases
- * where engine is currently busy (active > 0).
- */
- ktime_t total;
- } stats;
-};
-
-static inline bool
-intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
-{
- return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
-}
-
-static inline bool
-intel_engine_supports_stats(const struct intel_engine_cs *engine)
-{
- return engine->flags & I915_ENGINE_SUPPORTS_STATS;
-}
-
-static inline bool
-intel_engine_has_preemption(const struct intel_engine_cs *engine)
-{
- return engine->flags & I915_ENGINE_HAS_PREEMPTION;
-}
+void intel_engines_set_scheduler_caps(struct drm_i915_private *i915);
static inline bool __execlists_need_preempt(int prio, int last)
{
@@ -650,7 +165,7 @@ void execlists_user_end(struct intel_engine_execlists *execlists);
void
execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
-void
+struct i915_request *
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
static inline unsigned int
@@ -674,12 +189,6 @@ execlists_port_complete(struct intel_engine_execlists * const execlists,
return port;
}
-static inline unsigned int
-intel_engine_flag(const struct intel_engine_cs *engine)
-{
- return BIT(engine->id);
-}
-
static inline u32
intel_read_status_page(const struct intel_engine_cs *engine, int reg)
{
@@ -722,10 +231,10 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
*
* The area from dword 0x30 to 0x3ff is available for driver usage.
*/
-#define I915_GEM_HWS_INDEX 0x30
-#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX * sizeof(u32))
#define I915_GEM_HWS_PREEMPT 0x32
#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT * sizeof(u32))
+#define I915_GEM_HWS_HANGCHECK 0x34
+#define I915_GEM_HWS_HANGCHECK_ADDR (I915_GEM_HWS_HANGCHECK * sizeof(u32))
#define I915_GEM_HWS_SEQNO 0x40
#define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32))
#define I915_GEM_HWS_SCRATCH 0x80
@@ -743,13 +252,22 @@ int intel_ring_pin(struct intel_ring *ring);
void intel_ring_reset(struct intel_ring *ring, u32 tail);
unsigned int intel_ring_update_space(struct intel_ring *ring);
void intel_ring_unpin(struct intel_ring *ring);
-void intel_ring_free(struct intel_ring *ring);
+void intel_ring_free(struct kref *ref);
+
+static inline struct intel_ring *intel_ring_get(struct intel_ring *ring)
+{
+ kref_get(&ring->ref);
+ return ring;
+}
+
+static inline void intel_ring_put(struct intel_ring *ring)
+{
+ kref_put(&ring->ref, intel_ring_free);
+}
void intel_engine_stop(struct intel_engine_cs *engine);
void intel_engine_cleanup(struct intel_engine_cs *engine);
-void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
-
int __must_check intel_ring_cacheline_align(struct i915_request *rq);
u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
@@ -844,8 +362,6 @@ __intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
return (head - tail - CACHELINE_BYTES) & (size - 1);
}
-void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno);
-
int intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
@@ -863,44 +379,6 @@ void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
-static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
-{
- /*
- * We are only peeking at the tail of the submit queue (and not the
- * queue itself) in order to gain a hint as to the current active
- * state of the engine. Callers are not expected to be taking
- * engine->timeline->lock, nor are they expected to be concerned
- * wtih serialising this hint with anything, so document it as
- * a hint and nothing more.
- */
- return READ_ONCE(engine->timeline.seqno);
-}
-
-static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
-{
- return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
-static inline bool intel_engine_signaled(struct intel_engine_cs *engine,
- u32 seqno)
-{
- return i915_seqno_passed(intel_engine_get_seqno(engine), seqno);
-}
-
-static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,
- u32 seqno)
-{
- GEM_BUG_ON(!seqno);
- return intel_engine_signaled(engine, seqno);
-}
-
-static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
- u32 seqno)
-{
- GEM_BUG_ON(!seqno);
- return intel_engine_signaled(engine, seqno - 1);
-}
-
void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone);
@@ -910,7 +388,7 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
-bool intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine);
+void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
static inline void
@@ -919,7 +397,7 @@ intel_engine_queue_breadcrumbs(struct intel_engine_cs *engine)
irq_work_queue(&engine->breadcrumbs.irq_work);
}
-bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine);
+void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine);
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
@@ -960,14 +438,14 @@ gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
}
static inline u32 *
-gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
+gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
{
/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
GEM_BUG_ON(gtt_offset & (1 << 5));
/* Offset should be aligned to 8 bytes for both (QW/DW) write types */
GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
- *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+ *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW | flags;
*cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
*cs++ = 0;
*cs++ = value;
@@ -983,11 +461,11 @@ static inline void intel_engine_reset(struct intel_engine_cs *engine,
}
void intel_engines_sanitize(struct drm_i915_private *i915, bool force);
+void intel_gt_resume(struct drm_i915_private *i915);
bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
-bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
void intel_engine_lost_context(struct intel_engine_cs *engine);
void intel_engines_park(struct drm_i915_private *i915);
@@ -1066,6 +544,9 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine);
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
+struct i915_request *
+intel_engine_find_active_request(struct intel_engine_cs *engine);
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
@@ -1086,4 +567,17 @@ static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
#endif
+static inline u32
+intel_engine_next_hangcheck_seqno(struct intel_engine_cs *engine)
+{
+ return engine->hangcheck.next_seqno =
+ next_pseudo_random32(engine->hangcheck.next_seqno);
+}
+
+static inline u32
+intel_engine_get_hangcheck_seqno(struct intel_engine_cs *engine)
+{
+ return intel_read_status_page(engine, I915_GEM_HWS_HANGCHECK);
+}
+
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index a017a4232c0f..6150e35bf7b5 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -32,6 +32,10 @@
#include <drm/drm_print.h>
#include "i915_drv.h"
+#include "intel_cdclk.h"
+#include "intel_crt.h"
+#include "intel_csr.h"
+#include "intel_dp.h"
#include "intel_drv.h"
/**
@@ -60,31 +64,20 @@
static noinline depot_stack_handle_t __save_depot_stack(void)
{
unsigned long entries[STACKDEPTH];
- struct stack_trace trace = {
- .entries = entries,
- .max_entries = ARRAY_SIZE(entries),
- .skip = 1,
- };
-
- save_stack_trace(&trace);
- if (trace.nr_entries &&
- trace.entries[trace.nr_entries - 1] == ULONG_MAX)
- trace.nr_entries--;
+ unsigned int n;
- return depot_save_stack(&trace, GFP_NOWAIT | __GFP_NOWARN);
+ n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
+ return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
}
static void __print_depot_stack(depot_stack_handle_t stack,
char *buf, int sz, int indent)
{
- unsigned long entries[STACKDEPTH];
- struct stack_trace trace = {
- .entries = entries,
- .max_entries = ARRAY_SIZE(entries),
- };
+ unsigned long *entries;
+ unsigned int nr_entries;
- depot_fetch_stack(stack, &trace);
- snprint_stack_trace(buf, sz, &trace, indent);
+ nr_entries = stack_depot_fetch(stack, &entries);
+ stack_trace_snprint(buf, sz, entries, nr_entries, indent);
}
static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
@@ -158,7 +151,7 @@ static void cancel_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
char *buf;
- buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
if (!buf)
return;
@@ -194,7 +187,7 @@ __print_intel_runtime_pm_wakeref(struct drm_printer *p,
unsigned long i;
char *buf;
- buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
if (!buf)
return;
@@ -278,7 +271,9 @@ void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
if (dbg.count <= alloc)
break;
- s = krealloc(dbg.owners, dbg.count * sizeof(*s), GFP_KERNEL);
+ s = krealloc(dbg.owners,
+ dbg.count * sizeof(*s),
+ GFP_NOWAIT | __GFP_NOWARN);
if (!s)
goto out;
@@ -565,7 +560,7 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
int pw_idx = power_well->desc->hsw.idx;
/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
- WARN_ON(intel_wait_for_register(dev_priv,
+ WARN_ON(intel_wait_for_register(&dev_priv->uncore,
regs->driver,
HSW_PWR_WELL_CTL_STATE(pw_idx),
HSW_PWR_WELL_CTL_STATE(pw_idx),
@@ -620,7 +615,7 @@ static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
enum skl_power_gate pg)
{
/* Timeout 5us for PG#0, for other PGs 1us */
- WARN_ON(intel_wait_for_register(dev_priv, SKL_FUSE_STATUS,
+ WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS,
SKL_FUSE_PG_DIST_STATUS(pg),
SKL_FUSE_PG_DIST_STATUS(pg), 1));
}
@@ -1521,7 +1516,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
* The PHY may be busy with some initial calibration and whatnot,
* so the power state can take a while to actually change.
*/
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
DISPLAY_PHY_STATUS,
phy_status_mask,
phy_status,
@@ -1556,7 +1551,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
vlv_set_power_well(dev_priv, power_well, true);
/* Poll for phypwrgood signal */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
DISPLAY_PHY_STATUS,
PHY_POWERGOOD(phy),
PHY_POWERGOOD(phy),
@@ -1760,7 +1755,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
mutex_lock(&dev_priv->pcu_lock);
- state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
+ state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
/*
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
@@ -1772,7 +1767,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
* A transient state at this point would mean some unexpected party
* is poking at the power controls too.
*/
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
WARN_ON(ctrl << 16 != state);
mutex_unlock(&dev_priv->pcu_lock);
@@ -1793,20 +1788,20 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
mutex_lock(&dev_priv->pcu_lock);
#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
+ ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
if (COND)
goto out;
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
ctrl &= ~DP_SSC_MASK(pipe);
ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
if (wait_for(COND, 100))
DRM_ERROR("timeout setting power well state %08x (%08x)\n",
state,
- vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
+ vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
#undef COND
@@ -3442,7 +3437,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
- if (IS_ICELAKE(dev_priv)) {
+ if (IS_GEN(dev_priv, 11)) {
err = set_power_wells(power_domains, icl_power_wells);
} else if (IS_CANNONLAKE(dev_priv)) {
err = set_power_wells(power_domains, cnl_power_wells);
@@ -3576,7 +3571,11 @@ static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
!(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
DRM_ERROR("DBuf power enable timeout\n");
else
- dev_priv->wm.skl_hw.ddb.enabled_slices = 2;
+ /*
+ * FIXME: for now pretend that we only have 1 slice, see
+ * intel_enabled_dbuf_slices_num().
+ */
+ dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
}
static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
@@ -3591,7 +3590,11 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
DRM_ERROR("DBuf power disable timeout!\n");
else
- dev_priv->wm.skl_hw.ddb.enabled_slices = 0;
+ /*
+ * FIXME: for now pretend that the first slice is always
+ * enabled, see intel_enabled_dbuf_slices_num().
+ */
+ dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
}
static void icl_mbus_init(struct drm_i915_private *dev_priv)
@@ -3652,7 +3655,7 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
- skl_init_cdclk(dev_priv);
+ intel_cdclk_init(dev_priv);
gen9_dbuf_enable(dev_priv);
@@ -3669,7 +3672,7 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_dbuf_disable(dev_priv);
- skl_uninit_cdclk(dev_priv);
+ intel_cdclk_uninit(dev_priv);
/* The spec doesn't call for removing the reset handshake flag */
/* disable PG1 and Misc I/O */
@@ -3714,7 +3717,7 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
- bxt_init_cdclk(dev_priv);
+ intel_cdclk_init(dev_priv);
gen9_dbuf_enable(dev_priv);
@@ -3731,7 +3734,7 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_dbuf_disable(dev_priv);
- bxt_uninit_cdclk(dev_priv);
+ intel_cdclk_uninit(dev_priv);
/* The spec doesn't call for removing the reset handshake flag */
@@ -3773,7 +3776,7 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
mutex_unlock(&power_domains->lock);
/* 5. Enable CD clock */
- cnl_init_cdclk(dev_priv);
+ intel_cdclk_init(dev_priv);
/* 6. Enable DBUF */
gen9_dbuf_enable(dev_priv);
@@ -3795,7 +3798,7 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_dbuf_disable(dev_priv);
/* 3. Disable CD clock */
- cnl_uninit_cdclk(dev_priv);
+ intel_cdclk_uninit(dev_priv);
/*
* 4. Disable Power Well 1 (PG1).
@@ -3837,7 +3840,7 @@ void icl_display_core_init(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
/* 5. Enable CDCLK. */
- icl_init_cdclk(dev_priv);
+ intel_cdclk_init(dev_priv);
/* 6. Enable DBUF. */
icl_dbuf_enable(dev_priv);
@@ -3862,7 +3865,7 @@ void icl_display_core_uninit(struct drm_i915_private *dev_priv)
icl_dbuf_disable(dev_priv);
/* 3. Disable CD clock */
- icl_uninit_cdclk(dev_priv);
+ intel_cdclk_uninit(dev_priv);
/*
* 4. Disable Power Well 1 (PG1).
@@ -3993,6 +3996,36 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
cmn->desc->ops->disable(dev_priv, cmn);
}
+static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
+{
+ bool ret;
+
+ mutex_lock(&dev_priv->pcu_lock);
+ ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
+ mutex_unlock(&dev_priv->pcu_lock);
+
+ return ret;
+}
+
+static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
+{
+ WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
+ "VED not power gated\n");
+}
+
+static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
+{
+ static const struct pci_device_id isp_ids[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
+ {}
+ };
+
+ WARN(!pci_dev_present(isp_ids) &&
+ !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
+ "ISP not power gated\n");
+}
+
static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
/**
@@ -4017,7 +4050,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
power_domains->initializing = true;
- if (IS_ICELAKE(i915)) {
+ if (INTEL_GEN(i915) >= 11) {
icl_display_core_init(i915, resume);
} else if (IS_CANNONLAKE(i915)) {
cnl_display_core_init(i915, resume);
@@ -4029,10 +4062,13 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
mutex_lock(&power_domains->lock);
chv_phy_control_init(i915);
mutex_unlock(&power_domains->lock);
+ assert_isp_power_gated(i915);
} else if (IS_VALLEYVIEW(i915)) {
mutex_lock(&power_domains->lock);
vlv_cmnlane_wa(i915);
mutex_unlock(&power_domains->lock);
+ assert_ved_power_gated(i915);
+ assert_isp_power_gated(i915);
} else if (IS_IVYBRIDGE(i915) || INTEL_GEN(i915) >= 7) {
intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
}
@@ -4162,7 +4198,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
intel_power_domains_verify_state(i915);
}
- if (IS_ICELAKE(i915))
+ if (INTEL_GEN(i915) >= 11)
icl_display_core_uninit(i915);
else if (IS_CANNONLAKE(i915))
cnl_display_core_uninit(i915);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e7b0884ba5a5..0e3d91d9ef13 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -25,16 +25,23 @@
* Authors:
* Eric Anholt <eric@anholt.net>
*/
-#include <linux/i2c.h>
-#include <linux/slab.h>
+
#include <linux/delay.h>
#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include "intel_drv.h"
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "intel_connector.h"
+#include "intel_drv.h"
+#include "intel_hdmi.h"
+#include "intel_panel.h"
+#include "intel_sdvo.h"
#include "intel_sdvo_regs.h"
#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
@@ -978,34 +985,109 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
&tx_rate, 1);
}
-static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
+ unsigned int if_index,
+ u8 *data, unsigned int length)
{
+ u8 set_buf_index[2] = { if_index, 0 };
+ u8 hbuf_size, tx_rate, av_split;
+ int i;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_HBUF_AV_SPLIT,
+ &av_split, 1))
+ return -ENXIO;
+
+ if (av_split < if_index)
+ return 0;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_HBUF_TXRATE,
+ &tx_rate, 1))
+ return -ENXIO;
+
+ if (tx_rate == SDVO_HBUF_TX_DISABLED)
+ return 0;
+
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return -ENXIO;
+
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
+ &hbuf_size, 1))
+ return -ENXIO;
+
+ /* Buffer size is 0 based, hooray! */
+ hbuf_size++;
+
+ DRM_DEBUG_KMS("reading sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
+ if_index, length, hbuf_size);
+
+ hbuf_size = min_t(unsigned int, length, hbuf_size);
+
+ for (i = 0; i < hbuf_size; i += 8) {
+ if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HBUF_DATA, NULL, 0))
+ return -ENXIO;
+ if (!intel_sdvo_read_response(intel_sdvo, &data[i],
+ min_t(unsigned int, 8, hbuf_size - i)))
+ return -ENXIO;
+ }
+
+ return hbuf_size;
+}
+
+static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
const struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
- u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
- union hdmi_infoframe frame;
+ &crtc_state->base.adjusted_mode;
int ret;
- ssize_t len;
- ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+ if (!crtc_state->has_hdmi_sink)
+ return true;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(frame,
conn_state->connector,
adjusted_mode);
- if (ret < 0) {
- DRM_ERROR("couldn't fill AVI infoframe\n");
+ if (ret)
return false;
- }
- drm_hdmi_avi_infoframe_quant_range(&frame.avi,
+ drm_hdmi_avi_infoframe_quant_range(frame,
conn_state->connector,
adjusted_mode,
- pipe_config->limited_color_range ?
+ crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL);
- len = hdmi_infoframe_pack(&frame, sdvo_data, sizeof(sdvo_data));
- if (len < 0)
+ ret = hdmi_avi_infoframe_check(frame);
+ if (WARN_ON(ret))
+ return false;
+
+ return true;
+}
+
+static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
+ const struct intel_crtc_state *crtc_state)
+{
+ u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
+ const union hdmi_infoframe *frame = &crtc_state->infoframes.avi;
+ ssize_t len;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) == 0)
+ return true;
+
+ if (WARN_ON(frame->any.type != HDMI_INFOFRAME_TYPE_AVI))
+ return false;
+
+ len = hdmi_infoframe_pack_only(frame, sdvo_data, sizeof(sdvo_data));
+ if (WARN_ON(len < 0))
return false;
return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
@@ -1013,6 +1095,40 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
sdvo_data, sizeof(sdvo_data));
}
+static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
+ struct intel_crtc_state *crtc_state)
+{
+ u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
+ union hdmi_infoframe *frame = &crtc_state->infoframes.avi;
+ ssize_t len;
+ int ret;
+
+ if (!crtc_state->has_hdmi_sink)
+ return;
+
+ len = intel_sdvo_read_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
+ sdvo_data, sizeof(sdvo_data));
+ if (len < 0) {
+ DRM_DEBUG_KMS("failed to read AVI infoframe\n");
+ return;
+ } else if (len == 0) {
+ return;
+ }
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
+
+ ret = hdmi_infoframe_unpack(frame, sdvo_data, sizeof(sdvo_data));
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to unpack AVI infoframe\n");
+ return;
+ }
+
+ if (frame->any.type != HDMI_INFOFRAME_TYPE_AVI)
+ DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
+ frame->any.type, HDMI_INFOFRAME_TYPE_AVI);
+}
+
static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
const struct drm_connector_state *conn_state)
{
@@ -1193,6 +1309,12 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
if (intel_sdvo_connector->is_hdmi)
adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
+ if (!intel_sdvo_compute_avi_infoframe(intel_sdvo,
+ pipe_config, conn_state)) {
+ DRM_DEBUG_KMS("bad AVI infoframe\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -1315,8 +1437,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
- intel_sdvo_set_avi_infoframe(intel_sdvo,
- crtc_state, conn_state);
+ intel_sdvo_set_avi_infoframe(intel_sdvo, crtc_state);
} else
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
@@ -1507,6 +1628,10 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
}
}
+ WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
+ "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
+ pipe_config->pixel_multiplier, encoder_pixel_multiplier);
+
if (sdvox & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
@@ -1519,9 +1644,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
pipe_config->has_hdmi_sink = true;
}
- WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
- "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
- pipe_config->pixel_multiplier, encoder_pixel_multiplier);
+ intel_sdvo_get_avi_infoframe(intel_sdvo, pipe_config);
}
static void intel_disable_sdvo(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_sdvo.h b/drivers/gpu/drm/i915/intel_sdvo.h
new file mode 100644
index 000000000000..c9e05bcdd141
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sdvo.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_SDVO_H__
+#define __INTEL_SDVO_H__
+
+#include <linux/types.h>
+
+#include <drm/i915_drm.h>
+
+#include "i915_reg.h"
+
+struct drm_i915_private;
+enum pipe;
+
+bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t sdvo_reg, enum pipe *pipe);
+bool intel_sdvo_init(struct drm_i915_private *dev_priv,
+ i915_reg_t reg, enum port port);
+
+#endif /* __INTEL_SDVO_H__ */
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 75c872bb8cc9..57de41b1f989 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -51,7 +51,7 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
5)) {
DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
@@ -63,7 +63,7 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
I915_WRITE(VLV_IOSF_DATA, is_read ? 0 : *val);
I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
5)) {
DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
@@ -208,7 +208,7 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
u32 value = 0;
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
SBI_CTL_STAT, SBI_BUSY, 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
@@ -224,7 +224,7 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
SBI_CTL_STAT,
SBI_BUSY,
0,
@@ -248,7 +248,7 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
SBI_CTL_STAT, SBI_BUSY, 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
@@ -264,7 +264,7 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
SBI_CTL_STAT,
SBI_BUSY,
0,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index b56a1a9ad01d..2913e89280d7 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -29,17 +29,36 @@
* registers; newer ones are much simpler and we can use the new DRM plane
* support.
*/
+
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_rect.h>
-#include <drm/drm_atomic.h>
#include <drm/drm_plane_helper.h>
-#include "intel_drv.h"
-#include "intel_frontbuffer.h"
+#include <drm/drm_rect.h>
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
-#include <drm/drm_color_mgmt.h>
+#include "intel_atomic_plane.h"
+#include "intel_drv.h"
+#include "intel_frontbuffer.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
+#include "intel_sprite.h"
+
+bool is_planar_yuv_format(u32 pixelformat)
+{
+ switch (pixelformat) {
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ return true;
+ default:
+ return false;
+ }
+}
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs)
@@ -256,7 +275,8 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_rect *src = &plane_state->base.src;
- u32 src_x, src_y, src_w, src_h;
+ u32 src_x, src_y, src_w, src_h, hsub, vsub;
+ bool rotated = drm_rotation_90_or_270(plane_state->base.rotation);
/*
* Hardware doesn't handle subpixel coordinates.
@@ -274,18 +294,26 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
src->y1 = src_y << 16;
src->y2 = (src_y + src_h) << 16;
- if (fb->format->is_yuv &&
- (src_x & 1 || src_w & 1)) {
- DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
- src_x, src_w);
+ if (!fb->format->is_yuv)
+ return 0;
+
+ /* YUV specific checks */
+ if (!rotated) {
+ hsub = fb->format->hsub;
+ vsub = fb->format->vsub;
+ } else {
+ hsub = vsub = max(fb->format->hsub, fb->format->vsub);
+ }
+
+ if (src_x % hsub || src_w % hsub) {
+ DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u for %sYUV planes\n",
+ src_x, src_w, hsub, rotated ? "rotated " : "");
return -EINVAL;
}
- if (fb->format->is_yuv &&
- fb->format->num_planes > 1 &&
- (src_y & 1 || src_h & 1)) {
- DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of 2 for planar YUV planes\n",
- src_y, src_h);
+ if (src_y % vsub || src_h % vsub) {
+ DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u for %sYUV planes\n",
+ src_y, src_h, vsub, rotated ? "rotated " : "");
return -EINVAL;
}
@@ -335,8 +363,8 @@ skl_program_scaler(struct intel_plane *plane,
0, INT_MAX);
/* TODO: handle sub-pixel coordinates */
- if (plane_state->base.fb->format->format == DRM_FORMAT_NV12 &&
- !icl_is_hdr_plane(plane)) {
+ if (is_planar_yuv_format(plane_state->base.fb->format->format) &&
+ !icl_is_hdr_plane(dev_priv, plane->id)) {
y_hphase = skl_scaler_calc_phase(1, hscale, false);
y_vphase = skl_scaler_calc_phase(1, vscale, false);
@@ -518,7 +546,7 @@ skl_program_plane(struct intel_plane *plane,
I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
(plane_state->color_plane[1].offset - surf_addr) | aux_stride);
- if (icl_is_hdr_plane(plane)) {
+ if (icl_is_hdr_plane(dev_priv, plane_id)) {
u32 cus_ctl = 0;
if (linked) {
@@ -542,7 +570,7 @@ skl_program_plane(struct intel_plane *plane,
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
- if (fb->format->is_yuv && icl_is_hdr_plane(plane))
+ if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
icl_program_input_csc(plane, crtc_state, plane_state);
skl_write_plane_wm(plane, crtc_state);
@@ -609,6 +637,9 @@ skl_disable_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ if (icl_is_hdr_plane(dev_priv, plane_id))
+ I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), 0);
+
skl_write_plane_wm(plane, crtc_state);
I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
@@ -741,7 +772,12 @@ vlv_update_clrc(const struct intel_plane_state *plane_state)
static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- return SP_GAMMA_ENABLE;
+ u32 sprctl = 0;
+
+ if (crtc_state->gamma_enable)
+ sprctl |= SP_GAMMA_ENABLE;
+
+ return sprctl;
}
static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
@@ -916,12 +952,12 @@ vlv_plane_get_hw_state(struct intel_plane *plane,
static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
u32 sprctl = 0;
- sprctl |= SPRITE_GAMMA_ENABLE;
+ if (crtc_state->gamma_enable)
+ sprctl |= SPRITE_GAMMA_ENABLE;
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (crtc_state->csc_enable)
sprctl |= SPRITE_PIPE_CSC_ENABLE;
return sprctl;
@@ -1107,7 +1143,15 @@ g4x_sprite_max_stride(struct intel_plane *plane,
static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- return DVS_GAMMA_ENABLE;
+ u32 dvscntr = 0;
+
+ if (crtc_state->gamma_enable)
+ dvscntr |= DVS_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ dvscntr |= DVS_PIPE_CSC_ENABLE;
+
+ return dvscntr;
}
static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
@@ -1482,8 +1526,6 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
/*
* 90/270 is not allowed with RGB64 16:16:16:16 and
* Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards.
- * TBD: Add RGB64 case once its added in supported format
- * list.
*/
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
@@ -1491,6 +1533,15 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
break;
/* fall through */
case DRM_FORMAT_C8:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
drm_get_format_name(fb->format->format,
&format_name));
@@ -1551,10 +1602,10 @@ static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_s
int src_w = drm_rect_width(&plane_state->base.src) >> 16;
/* Display WA #1106 */
- if (fb->format->format == DRM_FORMAT_NV12 && src_w & 3 &&
+ if (is_planar_yuv_format(fb->format->format) && src_w & 3 &&
(rotation == DRM_MODE_ROTATE_270 ||
rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
- DRM_DEBUG_KMS("src width must be multiple of 4 for rotated NV12\n");
+ DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n");
return -EINVAL;
}
@@ -1790,6 +1841,52 @@ static const u32 skl_plane_formats[] = {
DRM_FORMAT_VYUY,
};
+static const u32 icl_plane_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
+};
+
+static const u32 icl_hdr_plane_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_ARGB16161616F,
+ DRM_FORMAT_ABGR16161616F,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
+};
+
static const u32 skl_planar_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
@@ -1806,6 +1903,79 @@ static const u32 skl_planar_formats[] = {
DRM_FORMAT_NV12,
};
+static const u32 glk_planar_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
+};
+
+static const u32 icl_planar_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
+};
+
+static const u32 icl_hdr_planar_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_ARGB16161616F,
+ DRM_FORMAT_ABGR16161616F,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
+};
+
static const u64 skl_plane_format_modifiers_noccs[] = {
I915_FORMAT_MOD_Yf_TILED,
I915_FORMAT_MOD_Y_TILED,
@@ -1945,10 +2115,23 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV12:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ case DRM_FORMAT_XVYU2101010:
if (modifier == I915_FORMAT_MOD_Yf_TILED)
return true;
/* fall through */
case DRM_FORMAT_C8:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
if (modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == I915_FORMAT_MOD_X_TILED ||
modifier == I915_FORMAT_MOD_Y_TILED)
@@ -2085,8 +2268,25 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->update_slave = icl_update_slave;
if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
- formats = skl_planar_formats;
- num_formats = ARRAY_SIZE(skl_planar_formats);
+ if (icl_is_hdr_plane(dev_priv, plane_id)) {
+ formats = icl_hdr_planar_formats;
+ num_formats = ARRAY_SIZE(icl_hdr_planar_formats);
+ } else if (INTEL_GEN(dev_priv) >= 11) {
+ formats = icl_planar_formats;
+ num_formats = ARRAY_SIZE(icl_planar_formats);
+ } else if (INTEL_GEN(dev_priv) == 10 || IS_GEMINILAKE(dev_priv)) {
+ formats = glk_planar_formats;
+ num_formats = ARRAY_SIZE(glk_planar_formats);
+ } else {
+ formats = skl_planar_formats;
+ num_formats = ARRAY_SIZE(skl_planar_formats);
+ }
+ } else if (icl_is_hdr_plane(dev_priv, plane_id)) {
+ formats = icl_hdr_plane_formats;
+ num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
+ } else if (INTEL_GEN(dev_priv) >= 11) {
+ formats = icl_plane_formats;
+ num_formats = ARRAY_SIZE(icl_plane_formats);
} else {
formats = skl_plane_formats;
num_formats = ARRAY_SIZE(skl_plane_formats);
diff --git a/drivers/gpu/drm/i915/intel_sprite.h b/drivers/gpu/drm/i915/intel_sprite.h
new file mode 100644
index 000000000000..84be8686be16
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sprite.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_SPRITE_H__
+#define __INTEL_SPRITE_H__
+
+#include <linux/types.h>
+
+#include "i915_drv.h"
+#include "intel_display.h"
+
+struct drm_device;
+struct drm_display_mode;
+struct drm_file;
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_plane_state;
+
+bool is_planar_yuv_format(u32 pixelformat);
+int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
+ int usecs);
+struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int plane);
+int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
+void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
+int intel_plane_check_stride(const struct intel_plane_state *plane_state);
+int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
+int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
+struct intel_plane *
+skl_universal_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id);
+
+static inline bool icl_is_nv12_y_plane(enum plane_id id)
+{
+ /* Don't need to do a gen check, these planes are only available on gen11 */
+ if (id == PLANE_SPRITE4 || id == PLANE_SPRITE5)
+ return true;
+
+ return false;
+}
+
+static inline bool icl_is_hdr_plane(struct drm_i915_private *dev_priv,
+ enum plane_id plane_id)
+{
+ if (INTEL_GEN(dev_priv) < 11)
+ return false;
+
+ return plane_id < PLANE_SPRITE2;
+}
+
+#endif /* __INTEL_SPRITE_H__ */
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 3924c4944e1f..5dbba33f4202 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -33,9 +33,12 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include "intel_drv.h"
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "intel_connector.h"
+#include "intel_drv.h"
+#include "intel_tv.h"
enum tv_margin {
TV_MARGIN_LEFT, TV_MARGIN_TOP,
diff --git a/drivers/gpu/drm/i915/intel_tv.h b/drivers/gpu/drm/i915/intel_tv.h
new file mode 100644
index 000000000000..44518575ec5c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_tv.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_TV_H__
+#define __INTEL_TV_H__
+
+struct drm_i915_private;
+
+void intel_tv_init(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_TV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index e711eb3268bc..25b80ffe71ad 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -332,8 +332,6 @@ void intel_uc_sanitize(struct drm_i915_private *i915)
GEM_BUG_ON(!HAS_GUC(i915));
- guc_disable_communication(guc);
-
intel_huc_sanitize(huc);
intel_guc_sanitize(guc);
@@ -377,7 +375,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
intel_guc_init_params(guc);
ret = intel_guc_fw_upload(guc);
- if (ret == 0 || ret != -ETIMEDOUT)
+ if (ret == 0)
break;
DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
@@ -451,6 +449,23 @@ void intel_uc_fini_hw(struct drm_i915_private *i915)
guc_disable_communication(guc);
}
+/**
+ * intel_uc_reset_prepare - Prepare for reset
+ * @i915: device private
+ *
+ * Preparing for full gpu reset.
+ */
+void intel_uc_reset_prepare(struct drm_i915_private *i915)
+{
+ struct intel_guc *guc = &i915->guc;
+
+ if (!USES_GUC(i915))
+ return;
+
+ guc_disable_communication(guc);
+ intel_uc_sanitize(i915);
+}
+
int intel_uc_suspend(struct drm_i915_private *i915)
{
struct intel_guc *guc = &i915->guc;
@@ -468,7 +483,7 @@ int intel_uc_suspend(struct drm_i915_private *i915)
return err;
}
- gen9_disable_guc_interrupts(i915);
+ guc_disable_communication(guc);
return 0;
}
@@ -484,7 +499,7 @@ int intel_uc_resume(struct drm_i915_private *i915)
if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
return 0;
- gen9_enable_guc_interrupts(i915);
+ guc_enable_communication(guc);
err = intel_guc_resume(guc);
if (err) {
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index 870faf9011b9..c14729786652 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -38,6 +38,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv);
void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
int intel_uc_init(struct drm_i915_private *dev_priv);
void intel_uc_fini(struct drm_i915_private *dev_priv);
+void intel_uc_reset_prepare(struct drm_i915_private *i915);
int intel_uc_suspend(struct drm_i915_private *dev_priv);
int intel_uc_resume(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 75646a1e0051..d1d51e1121e2 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -21,17 +21,18 @@
* IN THE SOFTWARE.
*/
+#include <linux/pm_runtime.h>
+#include <asm/iosf_mbi.h>
+
#include "i915_drv.h"
-#include "intel_drv.h"
#include "i915_vgpu.h"
-
-#include <asm/iosf_mbi.h>
-#include <linux/pm_runtime.h>
+#include "intel_drv.h"
+#include "intel_pm.h"
#define FORCEWAKE_ACK_TIMEOUT_MS 50
#define GT_FIFO_TIMEOUT_MS 10
-#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
+#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
static const char * const forcewake_domain_names[] = {
"render",
@@ -58,16 +59,20 @@ intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
return "unknown";
}
+#define fw_ack(d) readl((d)->reg_ack)
+#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
+#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
+
static inline void
-fw_domain_reset(struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
{
/*
* We don't really know if the powerwell for the forcewake domain we are
* trying to reset here does exist at this point (engines could be fused
* off in ICL+), so no waiting for acks
*/
- __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset);
+ /* WaRsClearFWBitsAtReset:bdw,skl */
+ fw_clear(d, 0xffff);
}
static inline void
@@ -81,36 +86,32 @@ fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
}
static inline int
-__wait_for_ack(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d,
+__wait_for_ack(const struct intel_uncore_forcewake_domain *d,
const u32 ack,
const u32 value)
{
- return wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) & ack) == value,
+ return wait_for_atomic((fw_ack(d) & ack) == value,
FORCEWAKE_ACK_TIMEOUT_MS);
}
static inline int
-wait_ack_clear(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d,
+wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
const u32 ack)
{
- return __wait_for_ack(i915, d, ack, 0);
+ return __wait_for_ack(d, ack, 0);
}
static inline int
-wait_ack_set(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d,
+wait_ack_set(const struct intel_uncore_forcewake_domain *d,
const u32 ack)
{
- return __wait_for_ack(i915, d, ack, ack);
+ return __wait_for_ack(d, ack, ack);
}
static inline void
-fw_domain_wait_ack_clear(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
{
- if (wait_ack_clear(i915, d, FORCEWAKE_KERNEL))
+ if (wait_ack_clear(d, FORCEWAKE_KERNEL))
DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
intel_uncore_forcewake_domain_to_str(d->id));
}
@@ -121,8 +122,7 @@ enum ack_type {
};
static int
-fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d,
+fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
const enum ack_type type)
{
const u32 ack_bit = FORCEWAKE_KERNEL;
@@ -146,129 +146,122 @@ fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915,
pass = 1;
do {
- wait_ack_clear(i915, d, FORCEWAKE_KERNEL_FALLBACK);
+ wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
- __raw_i915_write32(i915, d->reg_set,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL_FALLBACK));
+ fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
/* Give gt some time to relax before the polling frenzy */
udelay(10 * pass);
- wait_ack_set(i915, d, FORCEWAKE_KERNEL_FALLBACK);
+ wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
- ack_detected = (__raw_i915_read32(i915, d->reg_ack) & ack_bit) == value;
+ ack_detected = (fw_ack(d) & ack_bit) == value;
- __raw_i915_write32(i915, d->reg_set,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL_FALLBACK));
+ fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
} while (!ack_detected && pass++ < 10);
DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
intel_uncore_forcewake_domain_to_str(d->id),
type == ACK_SET ? "set" : "clear",
- __raw_i915_read32(i915, d->reg_ack),
+ fw_ack(d),
pass);
return ack_detected ? 0 : -ETIMEDOUT;
}
static inline void
-fw_domain_wait_ack_clear_fallback(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
{
- if (likely(!wait_ack_clear(i915, d, FORCEWAKE_KERNEL)))
+ if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
return;
- if (fw_domain_wait_ack_with_fallback(i915, d, ACK_CLEAR))
- fw_domain_wait_ack_clear(i915, d);
+ if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
+ fw_domain_wait_ack_clear(d);
}
static inline void
-fw_domain_get(struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_get(const struct intel_uncore_forcewake_domain *d)
{
- __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set);
+ fw_set(d, FORCEWAKE_KERNEL);
}
static inline void
-fw_domain_wait_ack_set(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
{
- if (wait_ack_set(i915, d, FORCEWAKE_KERNEL))
+ if (wait_ack_set(d, FORCEWAKE_KERNEL))
DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
intel_uncore_forcewake_domain_to_str(d->id));
}
static inline void
-fw_domain_wait_ack_set_fallback(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
{
- if (likely(!wait_ack_set(i915, d, FORCEWAKE_KERNEL)))
+ if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
return;
- if (fw_domain_wait_ack_with_fallback(i915, d, ACK_SET))
- fw_domain_wait_ack_set(i915, d);
+ if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
+ fw_domain_wait_ack_set(d);
}
static inline void
-fw_domain_put(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_put(const struct intel_uncore_forcewake_domain *d)
{
- __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear);
+ fw_clear(d, FORCEWAKE_KERNEL);
}
static void
-fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
+fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
unsigned int tmp;
- GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
+ GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
- for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
- fw_domain_wait_ack_clear(i915, d);
- fw_domain_get(i915, d);
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
+ fw_domain_wait_ack_clear(d);
+ fw_domain_get(d);
}
- for_each_fw_domain_masked(d, fw_domains, i915, tmp)
- fw_domain_wait_ack_set(i915, d);
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
+ fw_domain_wait_ack_set(d);
- i915->uncore.fw_domains_active |= fw_domains;
+ uncore->fw_domains_active |= fw_domains;
}
static void
-fw_domains_get_with_fallback(struct drm_i915_private *i915,
+fw_domains_get_with_fallback(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
unsigned int tmp;
- GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
+ GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
- for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
- fw_domain_wait_ack_clear_fallback(i915, d);
- fw_domain_get(i915, d);
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
+ fw_domain_wait_ack_clear_fallback(d);
+ fw_domain_get(d);
}
- for_each_fw_domain_masked(d, fw_domains, i915, tmp)
- fw_domain_wait_ack_set_fallback(i915, d);
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
+ fw_domain_wait_ack_set_fallback(d);
- i915->uncore.fw_domains_active |= fw_domains;
+ uncore->fw_domains_active |= fw_domains;
}
static void
-fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
+fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
unsigned int tmp;
- GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
+ GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
- for_each_fw_domain_masked(d, fw_domains, i915, tmp)
- fw_domain_put(i915, d);
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
+ fw_domain_put(d);
- i915->uncore.fw_domains_active &= ~fw_domains;
+ uncore->fw_domains_active &= ~fw_domains;
}
static void
-fw_domains_reset(struct drm_i915_private *i915,
+fw_domains_reset(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
@@ -277,61 +270,61 @@ fw_domains_reset(struct drm_i915_private *i915,
if (!fw_domains)
return;
- GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
+ GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
- for_each_fw_domain_masked(d, fw_domains, i915, tmp)
- fw_domain_reset(i915, d);
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
+ fw_domain_reset(d);
}
-static inline u32 gt_thread_status(struct drm_i915_private *dev_priv)
+static inline u32 gt_thread_status(struct intel_uncore *uncore)
{
u32 val;
- val = __raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG);
+ val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
return val;
}
-static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
+static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
{
/*
* w/a for a sporadic read returning 0 by waiting for the GT
* thread to wake up.
*/
- WARN_ONCE(wait_for_atomic_us(gt_thread_status(dev_priv) == 0, 5000),
+ WARN_ONCE(wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
"GT thread status wait timed out\n");
}
-static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
+static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
- fw_domains_get(dev_priv, fw_domains);
+ fw_domains_get(uncore, fw_domains);
/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
- __gen6_gt_wait_for_thread_c0(dev_priv);
+ __gen6_gt_wait_for_thread_c0(uncore);
}
-static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
+static inline u32 fifo_free_entries(struct intel_uncore *uncore)
{
- u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
+ u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
return count & GT_FIFO_FREE_ENTRIES_MASK;
}
-static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
{
u32 n;
/* On VLV, FIFO will be shared by both SW and HW.
* So, we need to read the FREE_ENTRIES everytime */
- if (IS_VALLEYVIEW(dev_priv))
- n = fifo_free_entries(dev_priv);
+ if (IS_VALLEYVIEW(uncore_to_i915(uncore)))
+ n = fifo_free_entries(uncore);
else
- n = dev_priv->uncore.fifo_count;
+ n = uncore->fifo_count;
if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
- if (wait_for_atomic((n = fifo_free_entries(dev_priv)) >
+ if (wait_for_atomic((n = fifo_free_entries(uncore)) >
GT_FIFO_NUM_RESERVED_ENTRIES,
GT_FIFO_TIMEOUT_MS)) {
DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
@@ -339,7 +332,7 @@ static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
}
}
- dev_priv->uncore.fifo_count = n - 1;
+ uncore->fifo_count = n - 1;
}
static enum hrtimer_restart
@@ -347,30 +340,29 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
{
struct intel_uncore_forcewake_domain *domain =
container_of(timer, struct intel_uncore_forcewake_domain, timer);
- struct drm_i915_private *dev_priv =
- container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]);
+ struct intel_uncore *uncore = forcewake_domain_to_uncore(domain);
unsigned long irqflags;
- assert_rpm_device_not_suspended(dev_priv);
+ assert_rpm_device_not_suspended(uncore->rpm);
if (xchg(&domain->active, false))
return HRTIMER_RESTART;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ spin_lock_irqsave(&uncore->lock, irqflags);
if (WARN_ON(domain->wake_count == 0))
domain->wake_count++;
if (--domain->wake_count == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
+ uncore->funcs.force_wake_put(uncore, domain->mask);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
return HRTIMER_NORESTART;
}
/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
static unsigned int
-intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
+intel_uncore_forcewake_reset(struct intel_uncore *uncore)
{
unsigned long irqflags;
struct intel_uncore_forcewake_domain *domain;
@@ -388,7 +380,7 @@ intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
active_domains = 0;
- for_each_fw_domain(domain, dev_priv, tmp) {
+ for_each_fw_domain(domain, uncore, tmp) {
smp_store_mb(domain->active, false);
if (hrtimer_cancel(&domain->timer) == 0)
continue;
@@ -396,9 +388,9 @@ intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
intel_uncore_fw_release_timer(&domain->timer);
}
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ spin_lock_irqsave(&uncore->lock, irqflags);
- for_each_fw_domain(domain, dev_priv, tmp) {
+ for_each_fw_domain(domain, uncore, tmp) {
if (hrtimer_active(&domain->timer))
active_domains |= domain->mask;
}
@@ -411,185 +403,134 @@ intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
break;
}
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
cond_resched();
}
WARN_ON(active_domains);
- fw = dev_priv->uncore.fw_domains_active;
+ fw = uncore->fw_domains_active;
if (fw)
- dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
+ uncore->funcs.force_wake_put(uncore, fw);
- fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
- assert_forcewakes_inactive(dev_priv);
+ fw_domains_reset(uncore, uncore->fw_domains);
+ assert_forcewakes_inactive(uncore);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
return fw; /* track the lost user forcewake domains */
}
-static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
-{
- const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
- const unsigned int sets[4] = { 1, 1, 2, 2 };
- const u32 cap = dev_priv->edram_cap;
-
- return EDRAM_NUM_BANKS(cap) *
- ways[EDRAM_WAYS_IDX(cap)] *
- sets[EDRAM_SETS_IDX(cap)] *
- 1024 * 1024;
-}
-
-u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
-{
- if (!HAS_EDRAM(dev_priv))
- return 0;
-
- /* The needed capability bits for size calculation
- * are not there with pre gen9 so return 128MB always.
- */
- if (INTEL_GEN(dev_priv) < 9)
- return 128 * 1024 * 1024;
-
- return gen9_edram_size(dev_priv);
-}
-
-static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
-{
- if (IS_HASWELL(dev_priv) ||
- IS_BROADWELL(dev_priv) ||
- INTEL_GEN(dev_priv) >= 9) {
- dev_priv->edram_cap = __raw_i915_read32(dev_priv,
- HSW_EDRAM_CAP);
-
- /* NB: We can't write IDICR yet because we do not have gt funcs
- * set up */
- } else {
- dev_priv->edram_cap = 0;
- }
-
- if (HAS_EDRAM(dev_priv))
- DRM_INFO("Found %lluMB of eDRAM\n",
- intel_uncore_edram_size(dev_priv) / (1024 * 1024));
-}
-
static bool
-fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
{
u32 dbg;
- dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
+ dbg = __raw_uncore_read32(uncore, FPGA_DBG);
if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
return false;
- __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
return true;
}
static bool
-vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
{
u32 cer;
- cer = __raw_i915_read32(dev_priv, CLAIM_ER);
+ cer = __raw_uncore_read32(uncore, CLAIM_ER);
if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
return false;
- __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
+ __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
return true;
}
static bool
-gen6_check_for_fifo_debug(struct drm_i915_private *dev_priv)
+gen6_check_for_fifo_debug(struct intel_uncore *uncore)
{
u32 fifodbg;
- fifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
+ fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
if (unlikely(fifodbg)) {
DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
- __raw_i915_write32(dev_priv, GTFIFODBG, fifodbg);
+ __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
}
return fifodbg;
}
static bool
-check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+check_for_unclaimed_mmio(struct intel_uncore *uncore)
{
bool ret = false;
- if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
- ret |= fpga_check_for_unclaimed_mmio(dev_priv);
+ if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
+ ret |= fpga_check_for_unclaimed_mmio(uncore);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- ret |= vlv_check_for_unclaimed_mmio(dev_priv);
+ if (intel_uncore_has_dbg_unclaimed(uncore))
+ ret |= vlv_check_for_unclaimed_mmio(uncore);
- if (IS_GEN_RANGE(dev_priv, 6, 7))
- ret |= gen6_check_for_fifo_debug(dev_priv);
+ if (intel_uncore_has_fifo(uncore))
+ ret |= gen6_check_for_fifo_debug(uncore);
return ret;
}
-static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
+static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
unsigned int restore_forcewake)
{
/* clear out unclaimed reg detection bit */
- if (check_for_unclaimed_mmio(dev_priv))
+ if (check_for_unclaimed_mmio(uncore))
DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
/* WaDisableShadowRegForCpd:chv */
- if (IS_CHERRYVIEW(dev_priv)) {
- __raw_i915_write32(dev_priv, GTFIFOCTL,
- __raw_i915_read32(dev_priv, GTFIFOCTL) |
- GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
- GT_FIFO_CTL_RC6_POLICY_STALL);
+ if (IS_CHERRYVIEW(uncore_to_i915(uncore))) {
+ __raw_uncore_write32(uncore, GTFIFOCTL,
+ __raw_uncore_read32(uncore, GTFIFOCTL) |
+ GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
+ GT_FIFO_CTL_RC6_POLICY_STALL);
}
iosf_mbi_punit_acquire();
- intel_uncore_forcewake_reset(dev_priv);
+ intel_uncore_forcewake_reset(uncore);
if (restore_forcewake) {
- spin_lock_irq(&dev_priv->uncore.lock);
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- restore_forcewake);
-
- if (IS_GEN_RANGE(dev_priv, 6, 7))
- dev_priv->uncore.fifo_count =
- fifo_free_entries(dev_priv);
- spin_unlock_irq(&dev_priv->uncore.lock);
+ spin_lock_irq(&uncore->lock);
+ uncore->funcs.force_wake_get(uncore, restore_forcewake);
+
+ if (intel_uncore_has_fifo(uncore))
+ uncore->fifo_count = fifo_free_entries(uncore);
+ spin_unlock_irq(&uncore->lock);
}
iosf_mbi_punit_release();
}
-void intel_uncore_suspend(struct drm_i915_private *dev_priv)
+void intel_uncore_suspend(struct intel_uncore *uncore)
{
iosf_mbi_punit_acquire();
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
- &dev_priv->uncore.pmic_bus_access_nb);
- dev_priv->uncore.fw_domains_saved =
- intel_uncore_forcewake_reset(dev_priv);
+ &uncore->pmic_bus_access_nb);
+ uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
iosf_mbi_punit_release();
}
-void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
+void intel_uncore_resume_early(struct intel_uncore *uncore)
{
unsigned int restore_forcewake;
- restore_forcewake = fetch_and_zero(&dev_priv->uncore.fw_domains_saved);
- __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
+ restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
+ __intel_uncore_early_sanitize(uncore, restore_forcewake);
- iosf_mbi_register_pmic_bus_access_notifier(
- &dev_priv->uncore.pmic_bus_access_nb);
- i915_check_and_clear_faults(dev_priv);
+ iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
}
-void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
+void intel_uncore_runtime_resume(struct intel_uncore *uncore)
{
- iosf_mbi_register_pmic_bus_access_notifier(
- &dev_priv->uncore.pmic_bus_access_nb);
+ iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
}
void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
@@ -598,15 +539,15 @@ void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
intel_sanitize_gt_powersave(dev_priv);
}
-static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
unsigned int tmp;
- fw_domains &= dev_priv->uncore.fw_domains;
+ fw_domains &= uncore->fw_domains;
- for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
if (domain->wake_count++) {
fw_domains &= ~domain->mask;
domain->active = true;
@@ -614,12 +555,12 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
}
if (fw_domains)
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+ uncore->funcs.force_wake_get(uncore, fw_domains);
}
/**
* intel_uncore_forcewake_get - grab forcewake domain references
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
* @fw_domains: forcewake domains to get reference on
*
* This function can be used get GT's forcewake domain references.
@@ -630,100 +571,100 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
* call to intel_unforce_forcewake_put(). Usually caller wants all the domains
* to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
*/
-void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_get(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
unsigned long irqflags;
- if (!dev_priv->uncore.funcs.force_wake_get)
+ if (!uncore->funcs.force_wake_get)
return;
- assert_rpm_wakelock_held(dev_priv);
+ __assert_rpm_wakelock_held(uncore->rpm);
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- __intel_uncore_forcewake_get(dev_priv, fw_domains);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ spin_lock_irqsave(&uncore->lock, irqflags);
+ __intel_uncore_forcewake_get(uncore, fw_domains);
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
}
/**
* intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
*
* This function is a wrapper around intel_uncore_forcewake_get() to acquire
* the GT powerwell and in the process disable our debugging for the
* duration of userspace's bypass.
*/
-void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv)
+void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
{
- spin_lock_irq(&dev_priv->uncore.lock);
- if (!dev_priv->uncore.user_forcewake.count++) {
- intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
+ spin_lock_irq(&uncore->lock);
+ if (!uncore->user_forcewake.count++) {
+ intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
/* Save and disable mmio debugging for the user bypass */
- dev_priv->uncore.user_forcewake.saved_mmio_check =
- dev_priv->uncore.unclaimed_mmio_check;
- dev_priv->uncore.user_forcewake.saved_mmio_debug =
+ uncore->user_forcewake.saved_mmio_check =
+ uncore->unclaimed_mmio_check;
+ uncore->user_forcewake.saved_mmio_debug =
i915_modparams.mmio_debug;
- dev_priv->uncore.unclaimed_mmio_check = 0;
+ uncore->unclaimed_mmio_check = 0;
i915_modparams.mmio_debug = 0;
}
- spin_unlock_irq(&dev_priv->uncore.lock);
+ spin_unlock_irq(&uncore->lock);
}
/**
* intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
*
* This function complements intel_uncore_forcewake_user_get() and releases
* the GT powerwell taken on behalf of the userspace bypass.
*/
-void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv)
+void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
{
- spin_lock_irq(&dev_priv->uncore.lock);
- if (!--dev_priv->uncore.user_forcewake.count) {
- if (intel_uncore_unclaimed_mmio(dev_priv))
- dev_info(dev_priv->drm.dev,
+ spin_lock_irq(&uncore->lock);
+ if (!--uncore->user_forcewake.count) {
+ if (intel_uncore_unclaimed_mmio(uncore))
+ dev_info(uncore_to_i915(uncore)->drm.dev,
"Invalid mmio detected during user access\n");
- dev_priv->uncore.unclaimed_mmio_check =
- dev_priv->uncore.user_forcewake.saved_mmio_check;
+ uncore->unclaimed_mmio_check =
+ uncore->user_forcewake.saved_mmio_check;
i915_modparams.mmio_debug =
- dev_priv->uncore.user_forcewake.saved_mmio_debug;
+ uncore->user_forcewake.saved_mmio_debug;
- intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
}
- spin_unlock_irq(&dev_priv->uncore.lock);
+ spin_unlock_irq(&uncore->lock);
}
/**
* intel_uncore_forcewake_get__locked - grab forcewake domain references
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
* @fw_domains: forcewake domains to get reference on
*
* See intel_uncore_forcewake_get(). This variant places the onus
* on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
*/
-void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
- lockdep_assert_held(&dev_priv->uncore.lock);
+ lockdep_assert_held(&uncore->lock);
- if (!dev_priv->uncore.funcs.force_wake_get)
+ if (!uncore->funcs.force_wake_get)
return;
- __intel_uncore_forcewake_get(dev_priv, fw_domains);
+ __intel_uncore_forcewake_get(uncore, fw_domains);
}
-static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
unsigned int tmp;
- fw_domains &= dev_priv->uncore.fw_domains;
+ fw_domains &= uncore->fw_domains;
- for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
if (WARN_ON(domain->wake_count == 0))
continue;
@@ -738,66 +679,66 @@ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
/**
* intel_uncore_forcewake_put - release a forcewake domain reference
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
* @fw_domains: forcewake domains to put references
*
* This function drops the device-level forcewakes for specified
* domains obtained by intel_uncore_forcewake_get().
*/
-void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_put(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
unsigned long irqflags;
- if (!dev_priv->uncore.funcs.force_wake_put)
+ if (!uncore->funcs.force_wake_put)
return;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- __intel_uncore_forcewake_put(dev_priv, fw_domains);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ spin_lock_irqsave(&uncore->lock, irqflags);
+ __intel_uncore_forcewake_put(uncore, fw_domains);
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
}
/**
* intel_uncore_forcewake_put__locked - grab forcewake domain references
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
* @fw_domains: forcewake domains to get reference on
*
* See intel_uncore_forcewake_put(). This variant places the onus
* on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
*/
-void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
- lockdep_assert_held(&dev_priv->uncore.lock);
+ lockdep_assert_held(&uncore->lock);
- if (!dev_priv->uncore.funcs.force_wake_put)
+ if (!uncore->funcs.force_wake_put)
return;
- __intel_uncore_forcewake_put(dev_priv, fw_domains);
+ __intel_uncore_forcewake_put(uncore, fw_domains);
}
-void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
+void assert_forcewakes_inactive(struct intel_uncore *uncore)
{
- if (!dev_priv->uncore.funcs.force_wake_get)
+ if (!uncore->funcs.force_wake_get)
return;
- WARN(dev_priv->uncore.fw_domains_active,
+ WARN(uncore->fw_domains_active,
"Expected all fw_domains to be inactive, but %08x are still on\n",
- dev_priv->uncore.fw_domains_active);
+ uncore->fw_domains_active);
}
-void assert_forcewakes_active(struct drm_i915_private *dev_priv,
+void assert_forcewakes_active(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
- if (!dev_priv->uncore.funcs.force_wake_get)
+ if (!uncore->funcs.force_wake_get)
return;
- assert_rpm_wakelock_held(dev_priv);
+ __assert_rpm_wakelock_held(uncore->rpm);
- fw_domains &= dev_priv->uncore.fw_domains;
- WARN(fw_domains & ~dev_priv->uncore.fw_domains_active,
+ fw_domains &= uncore->fw_domains;
+ WARN(fw_domains & ~uncore->fw_domains_active,
"Expected %08x fw_domains to be active, but %08x are off\n",
- fw_domains, fw_domains & ~dev_priv->uncore.fw_domains_active);
+ fw_domains, fw_domains & ~uncore->fw_domains_active);
}
/* We give fast paths for the really cool registers */
@@ -806,7 +747,7 @@ void assert_forcewakes_active(struct drm_i915_private *dev_priv,
#define GEN11_NEEDS_FORCE_WAKE(reg) \
((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000))
-#define __gen6_reg_read_fw_domains(offset) \
+#define __gen6_reg_read_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd; \
if (NEEDS_FORCE_WAKE(offset)) \
@@ -846,13 +787,13 @@ static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
})
static enum forcewake_domains
-find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
+find_fw_domain(struct intel_uncore *uncore, u32 offset)
{
const struct intel_forcewake_range *entry;
entry = BSEARCH(offset,
- dev_priv->uncore.fw_domains_table,
- dev_priv->uncore.fw_domains_table_entries,
+ uncore->fw_domains_table,
+ uncore->fw_domains_table_entries,
fw_range_cmp);
if (!entry)
@@ -864,11 +805,11 @@ find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
* translate it here to the list of available domains.
*/
if (entry->domains == FORCEWAKE_ALL)
- return dev_priv->uncore.fw_domains;
+ return uncore->fw_domains;
- WARN(entry->domains & ~dev_priv->uncore.fw_domains,
+ WARN(entry->domains & ~uncore->fw_domains,
"Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
- entry->domains & ~dev_priv->uncore.fw_domains, offset);
+ entry->domains & ~uncore->fw_domains, offset);
return entry->domains;
}
@@ -892,19 +833,19 @@ static const struct intel_forcewake_range __vlv_fw_ranges[] = {
GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
};
-#define __fwtable_reg_read_fw_domains(offset) \
+#define __fwtable_reg_read_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd = 0; \
if (NEEDS_FORCE_WAKE((offset))) \
- __fwd = find_fw_domain(dev_priv, offset); \
+ __fwd = find_fw_domain(uncore, offset); \
__fwd; \
})
-#define __gen11_fwtable_reg_read_fw_domains(offset) \
+#define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd = 0; \
if (GEN11_NEEDS_FORCE_WAKE((offset))) \
- __fwd = find_fw_domain(dev_priv, offset); \
+ __fwd = find_fw_domain(uncore, offset); \
__fwd; \
})
@@ -956,7 +897,7 @@ static bool is_gen##x##_shadowed(u32 offset) \
__is_genX_shadowed(8)
__is_genX_shadowed(11)
-#define __gen8_reg_write_fw_domains(offset) \
+#define __gen8_reg_write_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd; \
if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
@@ -986,19 +927,19 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = {
GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
};
-#define __fwtable_reg_write_fw_domains(offset) \
+#define __fwtable_reg_write_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd = 0; \
if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
- __fwd = find_fw_domain(dev_priv, offset); \
+ __fwd = find_fw_domain(uncore, offset); \
__fwd; \
})
-#define __gen11_fwtable_reg_write_fw_domains(offset) \
+#define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd = 0; \
if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \
- __fwd = find_fw_domain(dev_priv, offset); \
+ __fwd = find_fw_domain(uncore, offset); \
__fwd; \
})
@@ -1073,21 +1014,21 @@ static const struct intel_forcewake_range __gen11_fw_ranges[] = {
};
static void
-ilk_dummy_write(struct drm_i915_private *dev_priv)
+ilk_dummy_write(struct intel_uncore *uncore)
{
/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
* the chip from rc6 before touching it for real. MI_MODE is masked,
* hence harmless to write 0 into. */
- __raw_i915_write32(dev_priv, MI_MODE, 0);
+ __raw_uncore_write32(uncore, MI_MODE, 0);
}
static void
-__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
+__unclaimed_reg_debug(struct intel_uncore *uncore,
const i915_reg_t reg,
const bool read,
const bool before)
{
- if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
+ if (WARN(check_for_unclaimed_mmio(uncore) && !before,
"Unclaimed %s register 0x%x\n",
read ? "read from" : "write to",
i915_mmio_reg_offset(reg)))
@@ -1096,7 +1037,7 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
}
static inline void
-unclaimed_reg_debug(struct drm_i915_private *dev_priv,
+unclaimed_reg_debug(struct intel_uncore *uncore,
const i915_reg_t reg,
const bool read,
const bool before)
@@ -1104,12 +1045,12 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv,
if (likely(!i915_modparams.mmio_debug))
return;
- __unclaimed_reg_debug(dev_priv, reg, read, before);
+ __unclaimed_reg_debug(uncore, reg, read, before);
}
#define GEN2_READ_HEADER(x) \
u##x val = 0; \
- assert_rpm_wakelock_held(dev_priv);
+ __assert_rpm_wakelock_held(uncore->rpm);
#define GEN2_READ_FOOTER \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
@@ -1117,18 +1058,18 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv,
#define __gen2_read(x) \
static u##x \
-gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
GEN2_READ_HEADER(x); \
- val = __raw_i915_read##x(dev_priv, reg); \
+ val = __raw_uncore_read##x(uncore, reg); \
GEN2_READ_FOOTER; \
}
#define __gen5_read(x) \
static u##x \
-gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
GEN2_READ_HEADER(x); \
- ilk_dummy_write(dev_priv); \
- val = __raw_i915_read##x(dev_priv, reg); \
+ ilk_dummy_write(uncore); \
+ val = __raw_uncore_read##x(uncore, reg); \
GEN2_READ_FOOTER; \
}
@@ -1151,53 +1092,53 @@ __gen2_read(64)
u32 offset = i915_mmio_reg_offset(reg); \
unsigned long irqflags; \
u##x val = 0; \
- assert_rpm_wakelock_held(dev_priv); \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
- unclaimed_reg_debug(dev_priv, reg, true, true)
+ __assert_rpm_wakelock_held(uncore->rpm); \
+ spin_lock_irqsave(&uncore->lock, irqflags); \
+ unclaimed_reg_debug(uncore, reg, true, true)
#define GEN6_READ_FOOTER \
- unclaimed_reg_debug(dev_priv, reg, true, false); \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+ unclaimed_reg_debug(uncore, reg, true, false); \
+ spin_unlock_irqrestore(&uncore->lock, irqflags); \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val
-static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
+static noinline void ___force_wake_auto(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
unsigned int tmp;
- GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains);
+ GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
- for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
fw_domain_arm_timer(domain);
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+ uncore->funcs.force_wake_get(uncore, fw_domains);
}
-static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
+static inline void __force_wake_auto(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
if (WARN_ON(!fw_domains))
return;
/* Turn on all requested but inactive supported forcewake domains. */
- fw_domains &= dev_priv->uncore.fw_domains;
- fw_domains &= ~dev_priv->uncore.fw_domains_active;
+ fw_domains &= uncore->fw_domains;
+ fw_domains &= ~uncore->fw_domains_active;
if (fw_domains)
- ___force_wake_auto(dev_priv, fw_domains);
+ ___force_wake_auto(uncore, fw_domains);
}
#define __gen_read(func, x) \
static u##x \
-func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- fw_engine = __##func##_reg_read_fw_domains(offset); \
+ fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \
if (fw_engine) \
- __force_wake_auto(dev_priv, fw_engine); \
- val = __raw_i915_read##x(dev_priv, reg); \
+ __force_wake_auto(uncore, fw_engine); \
+ val = __raw_uncore_read##x(uncore, reg); \
GEN6_READ_FOOTER; \
}
#define __gen6_read(x) __gen_read(gen6, x)
@@ -1225,24 +1166,24 @@ __gen6_read(64)
#define GEN2_WRITE_HEADER \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
- assert_rpm_wakelock_held(dev_priv); \
+ __assert_rpm_wakelock_held(uncore->rpm); \
#define GEN2_WRITE_FOOTER
#define __gen2_write(x) \
static void \
-gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
GEN2_WRITE_HEADER; \
- __raw_i915_write##x(dev_priv, reg, val); \
+ __raw_uncore_write##x(uncore, reg, val); \
GEN2_WRITE_FOOTER; \
}
#define __gen5_write(x) \
static void \
-gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
GEN2_WRITE_HEADER; \
- ilk_dummy_write(dev_priv); \
- __raw_i915_write##x(dev_priv, reg, val); \
+ ilk_dummy_write(uncore); \
+ __raw_uncore_write##x(uncore, reg, val); \
GEN2_WRITE_FOOTER; \
}
@@ -1263,33 +1204,33 @@ __gen2_write(32)
u32 offset = i915_mmio_reg_offset(reg); \
unsigned long irqflags; \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
- assert_rpm_wakelock_held(dev_priv); \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
- unclaimed_reg_debug(dev_priv, reg, false, true)
+ __assert_rpm_wakelock_held(uncore->rpm); \
+ spin_lock_irqsave(&uncore->lock, irqflags); \
+ unclaimed_reg_debug(uncore, reg, false, true)
#define GEN6_WRITE_FOOTER \
- unclaimed_reg_debug(dev_priv, reg, false, false); \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
+ unclaimed_reg_debug(uncore, reg, false, false); \
+ spin_unlock_irqrestore(&uncore->lock, irqflags)
#define __gen6_write(x) \
static void \
-gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
GEN6_WRITE_HEADER; \
if (NEEDS_FORCE_WAKE(offset)) \
- __gen6_gt_wait_for_fifo(dev_priv); \
- __raw_i915_write##x(dev_priv, reg, val); \
+ __gen6_gt_wait_for_fifo(uncore); \
+ __raw_uncore_write##x(uncore, reg, val); \
GEN6_WRITE_FOOTER; \
}
#define __gen_write(func, x) \
static void \
-func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
- fw_engine = __##func##_reg_write_fw_domains(offset); \
+ fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \
if (fw_engine) \
- __force_wake_auto(dev_priv, fw_engine); \
- __raw_i915_write##x(dev_priv, reg, val); \
+ __force_wake_auto(uncore, fw_engine); \
+ __raw_uncore_write##x(uncore, reg, val); \
GEN6_WRITE_FOOTER; \
}
#define __gen8_write(x) __gen_write(gen8, x)
@@ -1316,23 +1257,23 @@ __gen6_write(32)
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
-#define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \
+#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
do { \
- (i915)->uncore.funcs.mmio_writeb = x##_write8; \
- (i915)->uncore.funcs.mmio_writew = x##_write16; \
- (i915)->uncore.funcs.mmio_writel = x##_write32; \
+ (uncore)->funcs.mmio_writeb = x##_write8; \
+ (uncore)->funcs.mmio_writew = x##_write16; \
+ (uncore)->funcs.mmio_writel = x##_write32; \
} while (0)
-#define ASSIGN_READ_MMIO_VFUNCS(i915, x) \
+#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
do { \
- (i915)->uncore.funcs.mmio_readb = x##_read8; \
- (i915)->uncore.funcs.mmio_readw = x##_read16; \
- (i915)->uncore.funcs.mmio_readl = x##_read32; \
- (i915)->uncore.funcs.mmio_readq = x##_read64; \
+ (uncore)->funcs.mmio_readb = x##_read8; \
+ (uncore)->funcs.mmio_readw = x##_read16; \
+ (uncore)->funcs.mmio_readl = x##_read32; \
+ (uncore)->funcs.mmio_readq = x##_read64; \
} while (0)
-static void fw_domain_init(struct drm_i915_private *dev_priv,
+static void fw_domain_init(struct intel_uncore *uncore,
enum forcewake_domain_id domain_id,
i915_reg_t reg_set,
i915_reg_t reg_ack)
@@ -1342,7 +1283,7 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
return;
- d = &dev_priv->uncore.fw_domain[domain_id];
+ d = &uncore->fw_domain[domain_id];
WARN_ON(d->wake_count);
@@ -1350,8 +1291,8 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
WARN_ON(!i915_mmio_reg_valid(reg_ack));
d->wake_count = 0;
- d->reg_set = reg_set;
- d->reg_ack = reg_ack;
+ d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
+ d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
d->id = domain_id;
@@ -1371,12 +1312,12 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
d->timer.function = intel_uncore_fw_release_timer;
- dev_priv->uncore.fw_domains |= BIT(domain_id);
+ uncore->fw_domains |= BIT(domain_id);
- fw_domain_reset(dev_priv, d);
+ fw_domain_reset(d);
}
-static void fw_domain_fini(struct drm_i915_private *dev_priv,
+static void fw_domain_fini(struct intel_uncore *uncore,
enum forcewake_domain_id domain_id)
{
struct intel_uncore_forcewake_domain *d;
@@ -1384,85 +1325,76 @@ static void fw_domain_fini(struct drm_i915_private *dev_priv,
if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
return;
- d = &dev_priv->uncore.fw_domain[domain_id];
+ d = &uncore->fw_domain[domain_id];
WARN_ON(d->wake_count);
WARN_ON(hrtimer_cancel(&d->timer));
memset(d, 0, sizeof(*d));
- dev_priv->uncore.fw_domains &= ~BIT(domain_id);
+ uncore->fw_domains &= ~BIT(domain_id);
}
-static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
+static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
{
- if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
- return;
+ struct drm_i915_private *i915 = uncore_to_i915(uncore);
- if (IS_GEN(dev_priv, 6)) {
- dev_priv->uncore.fw_reset = 0;
- dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
- dev_priv->uncore.fw_clear = 0;
- } else {
- /* WaRsClearFWBitsAtReset:bdw,skl */
- dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff);
- dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
- dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
- }
+ if (!intel_uncore_has_forcewake(uncore))
+ return;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (INTEL_GEN(i915) >= 11) {
int i;
- dev_priv->uncore.funcs.force_wake_get =
+ uncore->funcs.force_wake_get =
fw_domains_get_with_fallback;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ uncore->funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER_GEN9,
FORCEWAKE_ACK_RENDER_GEN9);
- fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
+ fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
FORCEWAKE_BLITTER_GEN9,
FORCEWAKE_ACK_BLITTER_GEN9);
for (i = 0; i < I915_MAX_VCS; i++) {
- if (!HAS_ENGINE(dev_priv, _VCS(i)))
+ if (!HAS_ENGINE(i915, _VCS(i)))
continue;
- fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
+ fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
FORCEWAKE_MEDIA_VDBOX_GEN11(i),
FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
}
for (i = 0; i < I915_MAX_VECS; i++) {
- if (!HAS_ENGINE(dev_priv, _VECS(i)))
+ if (!HAS_ENGINE(i915, _VECS(i)))
continue;
- fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
+ fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
FORCEWAKE_MEDIA_VEBOX_GEN11(i),
FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
}
- } else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
- dev_priv->uncore.funcs.force_wake_get =
+ } else if (IS_GEN_RANGE(i915, 9, 10)) {
+ uncore->funcs.force_wake_get =
fw_domains_get_with_fallback;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ uncore->funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER_GEN9,
FORCEWAKE_ACK_RENDER_GEN9);
- fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
+ fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
FORCEWAKE_BLITTER_GEN9,
FORCEWAKE_ACK_BLITTER_GEN9);
- fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
+ fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ uncore->funcs.force_wake_get = fw_domains_get;
+ uncore->funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
- fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
+ fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- dev_priv->uncore.funcs.force_wake_get =
+ } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ uncore->funcs.force_wake_get =
fw_domains_get_with_thread_status;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ uncore->funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
- } else if (IS_IVYBRIDGE(dev_priv)) {
+ } else if (IS_IVYBRIDGE(i915)) {
u32 ecobus;
/* IVB configs may use multi-threaded forcewake */
@@ -1474,9 +1406,9 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
* (correctly) interpreted by the test below as MT
* forcewake being disabled.
*/
- dev_priv->uncore.funcs.force_wake_get =
+ uncore->funcs.force_wake_get =
fw_domains_get_with_thread_status;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ uncore->funcs.force_wake_put = fw_domains_put;
/* We need to init first for ECOBUS access and then
* determine later if we want to reinit, in case of MT access is
@@ -1485,41 +1417,41 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
* before the ecobus check.
*/
- __raw_i915_write32(dev_priv, FORCEWAKE, 0);
- __raw_posting_read(dev_priv, ECOBUS);
+ __raw_uncore_write32(uncore, FORCEWAKE, 0);
+ __raw_posting_read(uncore, ECOBUS);
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_MT_ACK);
- spin_lock_irq(&dev_priv->uncore.lock);
- fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER);
- ecobus = __raw_i915_read32(dev_priv, ECOBUS);
- fw_domains_put(dev_priv, FORCEWAKE_RENDER);
- spin_unlock_irq(&dev_priv->uncore.lock);
+ spin_lock_irq(&uncore->lock);
+ fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
+ ecobus = __raw_uncore_read32(uncore, ECOBUS);
+ fw_domains_put(uncore, FORCEWAKE_RENDER);
+ spin_unlock_irq(&uncore->lock);
if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
DRM_INFO("when using vblank-synced partial screen updates.\n");
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
}
- } else if (IS_GEN(dev_priv, 6)) {
- dev_priv->uncore.funcs.force_wake_get =
+ } else if (IS_GEN(i915, 6)) {
+ uncore->funcs.force_wake_get =
fw_domains_get_with_thread_status;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ uncore->funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
}
/* All future platforms are expected to require complex power gating */
- WARN_ON(dev_priv->uncore.fw_domains == 0);
+ WARN_ON(uncore->fw_domains == 0);
}
-#define ASSIGN_FW_DOMAINS_TABLE(d) \
+#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
{ \
- dev_priv->uncore.fw_domains_table = \
+ (uncore)->fw_domains_table = \
(struct intel_forcewake_range *)(d); \
- dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
+ (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
}
static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
@@ -1544,66 +1476,132 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
* the access.
*/
disable_rpm_wakeref_asserts(dev_priv);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
enable_rpm_wakeref_asserts(dev_priv);
break;
case MBI_PMIC_BUS_ACCESS_END:
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
break;
}
return NOTIFY_OK;
}
-void intel_uncore_init(struct drm_i915_private *dev_priv)
+static int uncore_mmio_setup(struct intel_uncore *uncore)
{
- i915_check_vgpu(dev_priv);
+ struct drm_i915_private *i915 = uncore_to_i915(uncore);
+ struct pci_dev *pdev = i915->drm.pdev;
+ int mmio_bar;
+ int mmio_size;
- intel_uncore_edram_detect(dev_priv);
- intel_uncore_fw_domains_init(dev_priv);
- __intel_uncore_early_sanitize(dev_priv, 0);
+ mmio_bar = IS_GEN(i915, 2) ? 1 : 0;
+ /*
+ * Before gen4, the registers and the GTT are behind different BARs.
+ * However, from gen4 onwards, the registers and the GTT are shared
+ * in the same BAR, so we want to restrict this ioremap from
+ * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+ * the register BAR remains the same size for all the earlier
+ * generations up to Ironlake.
+ */
+ if (INTEL_GEN(i915) < 5)
+ mmio_size = 512 * 1024;
+ else
+ mmio_size = 2 * 1024 * 1024;
+ uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
+ if (uncore->regs == NULL) {
+ DRM_ERROR("failed to map registers\n");
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void uncore_mmio_cleanup(struct intel_uncore *uncore)
+{
+ struct drm_i915_private *i915 = uncore_to_i915(uncore);
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ pci_iounmap(pdev, uncore->regs);
+}
+
+void intel_uncore_init_early(struct intel_uncore *uncore)
+{
+ spin_lock_init(&uncore->lock);
+}
+
+int intel_uncore_init_mmio(struct intel_uncore *uncore)
+{
+ struct drm_i915_private *i915 = uncore_to_i915(uncore);
+ int ret;
+
+ ret = uncore_mmio_setup(uncore);
+ if (ret)
+ return ret;
+
+ i915_check_vgpu(i915);
+
+ if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
+ uncore->flags |= UNCORE_HAS_FORCEWAKE;
- dev_priv->uncore.unclaimed_mmio_check = 1;
- dev_priv->uncore.pmic_bus_access_nb.notifier_call =
+ intel_uncore_fw_domains_init(uncore);
+ __intel_uncore_early_sanitize(uncore, 0);
+
+ uncore->unclaimed_mmio_check = 1;
+ uncore->pmic_bus_access_nb.notifier_call =
i915_pmic_bus_access_notifier;
- if (IS_GEN_RANGE(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2);
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2);
- } else if (IS_GEN(dev_priv, 5)) {
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5);
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5);
- } else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6);
-
- if (IS_VALLEYVIEW(dev_priv)) {
- ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
+ uncore->rpm = &i915->runtime_pm;
+
+ if (!intel_uncore_has_forcewake(uncore)) {
+ if (IS_GEN(i915, 5)) {
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen5);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, gen5);
+ } else {
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen2);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, gen2);
+ }
+ } else if (IS_GEN_RANGE(i915, 6, 7)) {
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
+
+ if (IS_VALLEYVIEW(i915)) {
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
} else {
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
}
- } else if (IS_GEN(dev_priv, 8)) {
- if (IS_CHERRYVIEW(dev_priv)) {
- ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
+ } else if (IS_GEN(i915, 8)) {
+ if (IS_CHERRYVIEW(i915)) {
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
} else {
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
}
- } else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
- ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
+ } else if (IS_GEN_RANGE(i915, 9, 10)) {
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
} else {
- ASSIGN_FW_DOMAINS_TABLE(__gen11_fw_ranges);
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen11_fwtable);
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen11_fwtable);
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
}
- iosf_mbi_register_pmic_bus_access_notifier(
- &dev_priv->uncore.pmic_bus_access_nb);
+ if (HAS_FPGA_DBG_UNCLAIMED(i915))
+ uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
+
+ if (IS_GEN_RANGE(i915, 6, 7))
+ uncore->flags |= UNCORE_HAS_FIFO;
+
+ iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
+
+ return 0;
}
/*
@@ -1611,45 +1609,48 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
* the forcewake domains. Prune them, to make sure they only reference existing
* engines.
*/
-void intel_uncore_prune(struct drm_i915_private *dev_priv)
+void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore)
{
- if (INTEL_GEN(dev_priv) >= 11) {
- enum forcewake_domains fw_domains = dev_priv->uncore.fw_domains;
+ struct drm_i915_private *i915 = uncore_to_i915(uncore);
+
+ if (INTEL_GEN(i915) >= 11) {
+ enum forcewake_domains fw_domains = uncore->fw_domains;
enum forcewake_domain_id domain_id;
int i;
for (i = 0; i < I915_MAX_VCS; i++) {
domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
- if (HAS_ENGINE(dev_priv, _VCS(i)))
+ if (HAS_ENGINE(i915, _VCS(i)))
continue;
if (fw_domains & BIT(domain_id))
- fw_domain_fini(dev_priv, domain_id);
+ fw_domain_fini(uncore, domain_id);
}
for (i = 0; i < I915_MAX_VECS; i++) {
domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
- if (HAS_ENGINE(dev_priv, _VECS(i)))
+ if (HAS_ENGINE(i915, _VECS(i)))
continue;
if (fw_domains & BIT(domain_id))
- fw_domain_fini(dev_priv, domain_id);
+ fw_domain_fini(uncore, domain_id);
}
}
}
-void intel_uncore_fini(struct drm_i915_private *dev_priv)
+void intel_uncore_fini_mmio(struct intel_uncore *uncore)
{
/* Paranoia: make sure we have disabled everything before we exit. */
- intel_uncore_sanitize(dev_priv);
+ intel_uncore_sanitize(uncore_to_i915(uncore));
iosf_mbi_punit_acquire();
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
- &dev_priv->uncore.pmic_bus_access_nb);
- intel_uncore_forcewake_reset(dev_priv);
+ &uncore->pmic_bus_access_nb);
+ intel_uncore_forcewake_reset(uncore);
iosf_mbi_punit_release();
+ uncore_mmio_cleanup(uncore);
}
static const struct reg_whitelist {
@@ -1717,7 +1718,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
/**
* __intel_wait_for_register_fw - wait until register matches expected state
- * @dev_priv: the i915 device
+ * @uncore: the struct intel_uncore
* @reg: the register to read
* @mask: mask to apply to register value
* @value: expected value
@@ -1741,7 +1742,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
*
* Returns 0 if the register matches the desired condition, or -ETIMEOUT.
*/
-int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
+int __intel_wait_for_register_fw(struct intel_uncore *uncore,
i915_reg_t reg,
u32 mask,
u32 value,
@@ -1750,7 +1751,7 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
u32 *out_value)
{
u32 uninitialized_var(reg_value);
-#define done (((reg_value = I915_READ_FW(reg)) & mask) == value)
+#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
int ret;
/* Catch any overuse of this function */
@@ -1772,7 +1773,7 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
/**
* __intel_wait_for_register - wait until register matches expected state
- * @dev_priv: the i915 device
+ * @uncore: the struct intel_uncore
* @reg: the register to read
* @mask: mask to apply to register value
* @value: expected value
@@ -1789,33 +1790,34 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
*
* Returns 0 if the register matches the desired condition, or -ETIMEOUT.
*/
-int __intel_wait_for_register(struct drm_i915_private *dev_priv,
- i915_reg_t reg,
- u32 mask,
- u32 value,
- unsigned int fast_timeout_us,
- unsigned int slow_timeout_ms,
- u32 *out_value)
+int __intel_wait_for_register(struct intel_uncore *uncore,
+ i915_reg_t reg,
+ u32 mask,
+ u32 value,
+ unsigned int fast_timeout_us,
+ unsigned int slow_timeout_ms,
+ u32 *out_value)
{
unsigned fw =
- intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
+ intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
u32 reg_value;
int ret;
might_sleep_if(slow_timeout_ms);
- spin_lock_irq(&dev_priv->uncore.lock);
- intel_uncore_forcewake_get__locked(dev_priv, fw);
+ spin_lock_irq(&uncore->lock);
+ intel_uncore_forcewake_get__locked(uncore, fw);
- ret = __intel_wait_for_register_fw(dev_priv,
+ ret = __intel_wait_for_register_fw(uncore,
reg, mask, value,
fast_timeout_us, 0, &reg_value);
- intel_uncore_forcewake_put__locked(dev_priv, fw);
- spin_unlock_irq(&dev_priv->uncore.lock);
+ intel_uncore_forcewake_put__locked(uncore, fw);
+ spin_unlock_irq(&uncore->lock);
if (ret && slow_timeout_ms)
- ret = __wait_for(reg_value = I915_READ_NOTRACE(reg),
+ ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
+ reg),
(reg_value & mask) == value,
slow_timeout_ms * 1000, 10, 1000);
@@ -1828,82 +1830,90 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
return ret;
}
-bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
+bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
{
- return check_for_unclaimed_mmio(dev_priv);
+ return check_for_unclaimed_mmio(uncore);
}
bool
-intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
+intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
{
bool ret = false;
- spin_lock_irq(&dev_priv->uncore.lock);
+ spin_lock_irq(&uncore->lock);
- if (unlikely(dev_priv->uncore.unclaimed_mmio_check <= 0))
+ if (unlikely(uncore->unclaimed_mmio_check <= 0))
goto out;
- if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
+ if (unlikely(intel_uncore_unclaimed_mmio(uncore))) {
if (!i915_modparams.mmio_debug) {
DRM_DEBUG("Unclaimed register detected, "
"enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n");
i915_modparams.mmio_debug++;
}
- dev_priv->uncore.unclaimed_mmio_check--;
+ uncore->unclaimed_mmio_check--;
ret = true;
}
out:
- spin_unlock_irq(&dev_priv->uncore.lock);
+ spin_unlock_irq(&uncore->lock);
return ret;
}
static enum forcewake_domains
-intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
+intel_uncore_forcewake_for_read(struct intel_uncore *uncore,
i915_reg_t reg)
{
+ struct drm_i915_private *i915 = uncore_to_i915(uncore);
u32 offset = i915_mmio_reg_offset(reg);
enum forcewake_domains fw_domains;
- if (INTEL_GEN(dev_priv) >= 11) {
- fw_domains = __gen11_fwtable_reg_read_fw_domains(offset);
- } else if (HAS_FWTABLE(dev_priv)) {
- fw_domains = __fwtable_reg_read_fw_domains(offset);
- } else if (INTEL_GEN(dev_priv) >= 6) {
- fw_domains = __gen6_reg_read_fw_domains(offset);
+ if (INTEL_GEN(i915) >= 11) {
+ fw_domains = __gen11_fwtable_reg_read_fw_domains(uncore, offset);
+ } else if (HAS_FWTABLE(i915)) {
+ fw_domains = __fwtable_reg_read_fw_domains(uncore, offset);
+ } else if (INTEL_GEN(i915) >= 6) {
+ fw_domains = __gen6_reg_read_fw_domains(uncore, offset);
} else {
- WARN_ON(!IS_GEN_RANGE(dev_priv, 2, 5));
+ /* on devices with FW we expect to hit one of the above cases */
+ if (intel_uncore_has_forcewake(uncore))
+ MISSING_CASE(INTEL_GEN(i915));
+
fw_domains = 0;
}
- WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
+ WARN_ON(fw_domains & ~uncore->fw_domains);
return fw_domains;
}
static enum forcewake_domains
-intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
+intel_uncore_forcewake_for_write(struct intel_uncore *uncore,
i915_reg_t reg)
{
+ struct drm_i915_private *i915 = uncore_to_i915(uncore);
u32 offset = i915_mmio_reg_offset(reg);
enum forcewake_domains fw_domains;
- if (INTEL_GEN(dev_priv) >= 11) {
- fw_domains = __gen11_fwtable_reg_write_fw_domains(offset);
- } else if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
- fw_domains = __fwtable_reg_write_fw_domains(offset);
- } else if (IS_GEN(dev_priv, 8)) {
- fw_domains = __gen8_reg_write_fw_domains(offset);
- } else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
+ if (INTEL_GEN(i915) >= 11) {
+ fw_domains = __gen11_fwtable_reg_write_fw_domains(uncore, offset);
+ } else if (HAS_FWTABLE(i915) && !IS_VALLEYVIEW(i915)) {
+ fw_domains = __fwtable_reg_write_fw_domains(uncore, offset);
+ } else if (IS_GEN(i915, 8)) {
+ fw_domains = __gen8_reg_write_fw_domains(uncore, offset);
+ } else if (IS_GEN_RANGE(i915, 6, 7)) {
fw_domains = FORCEWAKE_RENDER;
} else {
- WARN_ON(!IS_GEN_RANGE(dev_priv, 2, 5));
+ /* on devices with FW we expect to hit one of the above cases */
+ if (intel_uncore_has_forcewake(uncore))
+ MISSING_CASE(INTEL_GEN(i915));
+
fw_domains = 0;
}
- WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
+ WARN_ON(fw_domains & ~uncore->fw_domains);
return fw_domains;
}
@@ -1911,7 +1921,7 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
/**
* intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
* a register
- * @dev_priv: pointer to struct drm_i915_private
+ * @uncore: pointer to struct intel_uncore
* @reg: register in question
* @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
*
@@ -1923,21 +1933,21 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
* callers to do FIFO management on their own or risk losing writes.
*/
enum forcewake_domains
-intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
+intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
i915_reg_t reg, unsigned int op)
{
enum forcewake_domains fw_domains = 0;
WARN_ON(!op);
- if (intel_vgpu_active(dev_priv))
+ if (!intel_uncore_has_forcewake(uncore))
return 0;
if (op & FW_REG_READ)
- fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
+ fw_domains = intel_uncore_forcewake_for_read(uncore, reg);
if (op & FW_REG_WRITE)
- fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
+ fw_domains |= intel_uncore_forcewake_for_write(uncore, reg);
return fw_domains;
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index e5e157d288de..d6af3de70121 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -28,10 +28,13 @@
#include <linux/spinlock.h>
#include <linux/notifier.h>
#include <linux/hrtimer.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include "i915_reg.h"
struct drm_i915_private;
+struct i915_runtime_pm;
+struct intel_uncore;
enum forcewake_domain_id {
FW_DOMAIN_ID_RENDER = 0,
@@ -62,25 +65,25 @@ enum forcewake_domains {
};
struct intel_uncore_funcs {
- void (*force_wake_get)(struct drm_i915_private *dev_priv,
+ void (*force_wake_get)(struct intel_uncore *uncore,
enum forcewake_domains domains);
- void (*force_wake_put)(struct drm_i915_private *dev_priv,
+ void (*force_wake_put)(struct intel_uncore *uncore,
enum forcewake_domains domains);
- u8 (*mmio_readb)(struct drm_i915_private *dev_priv,
+ u8 (*mmio_readb)(struct intel_uncore *uncore,
i915_reg_t r, bool trace);
- u16 (*mmio_readw)(struct drm_i915_private *dev_priv,
+ u16 (*mmio_readw)(struct intel_uncore *uncore,
i915_reg_t r, bool trace);
- u32 (*mmio_readl)(struct drm_i915_private *dev_priv,
+ u32 (*mmio_readl)(struct intel_uncore *uncore,
i915_reg_t r, bool trace);
- u64 (*mmio_readq)(struct drm_i915_private *dev_priv,
+ u64 (*mmio_readq)(struct intel_uncore *uncore,
i915_reg_t r, bool trace);
- void (*mmio_writeb)(struct drm_i915_private *dev_priv,
+ void (*mmio_writeb)(struct intel_uncore *uncore,
i915_reg_t r, u8 val, bool trace);
- void (*mmio_writew)(struct drm_i915_private *dev_priv,
+ void (*mmio_writew)(struct intel_uncore *uncore,
i915_reg_t r, u16 val, bool trace);
- void (*mmio_writel)(struct drm_i915_private *dev_priv,
+ void (*mmio_writel)(struct intel_uncore *uncore,
i915_reg_t r, u32 val, bool trace);
};
@@ -92,8 +95,18 @@ struct intel_forcewake_range {
};
struct intel_uncore {
+ void __iomem *regs;
+
+ struct i915_runtime_pm *rpm;
+
spinlock_t lock; /** lock is also taken in irq contexts. */
+ unsigned int flags;
+#define UNCORE_HAS_FORCEWAKE BIT(0)
+#define UNCORE_HAS_FPGA_DBG_UNCLAIMED BIT(1)
+#define UNCORE_HAS_DBG_UNCLAIMED BIT(2)
+#define UNCORE_HAS_FIFO BIT(3)
+
const struct intel_forcewake_range *fw_domains_table;
unsigned int fw_domains_table_entries;
@@ -106,18 +119,14 @@ struct intel_uncore {
enum forcewake_domains fw_domains_active;
enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
- u32 fw_set;
- u32 fw_clear;
- u32 fw_reset;
-
struct intel_uncore_forcewake_domain {
enum forcewake_domain_id id;
enum forcewake_domains mask;
unsigned int wake_count;
bool active;
struct hrtimer timer;
- i915_reg_t reg_set;
- i915_reg_t reg_ack;
+ u32 __iomem *reg_set;
+ u32 __iomem *reg_ack;
} fw_domain[FW_DOMAIN_ID_COUNT];
struct {
@@ -131,86 +140,257 @@ struct intel_uncore {
};
/* Iterate over initialised fw domains */
-#define for_each_fw_domain_masked(domain__, mask__, dev_priv__, tmp__) \
+#define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__) \
for (tmp__ = (mask__); \
- tmp__ ? (domain__ = &(dev_priv__)->uncore.fw_domain[__mask_next_bit(tmp__)]), 1 : 0;)
+ tmp__ ? (domain__ = &(uncore__)->fw_domain[__mask_next_bit(tmp__)]), 1 : 0;)
+
+#define for_each_fw_domain(domain__, uncore__, tmp__) \
+ for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__)
+
+static inline struct intel_uncore *
+forcewake_domain_to_uncore(const struct intel_uncore_forcewake_domain *d)
+{
+ return container_of(d, struct intel_uncore, fw_domain[d->id]);
+}
-#define for_each_fw_domain(domain__, dev_priv__, tmp__) \
- for_each_fw_domain_masked(domain__, (dev_priv__)->uncore.fw_domains, dev_priv__, tmp__)
+static inline bool
+intel_uncore_has_forcewake(const struct intel_uncore *uncore)
+{
+ return uncore->flags & UNCORE_HAS_FORCEWAKE;
+}
+static inline bool
+intel_uncore_has_fpga_dbg_unclaimed(const struct intel_uncore *uncore)
+{
+ return uncore->flags & UNCORE_HAS_FPGA_DBG_UNCLAIMED;
+}
+
+static inline bool
+intel_uncore_has_dbg_unclaimed(const struct intel_uncore *uncore)
+{
+ return uncore->flags & UNCORE_HAS_DBG_UNCLAIMED;
+}
+
+static inline bool
+intel_uncore_has_fifo(const struct intel_uncore *uncore)
+{
+ return uncore->flags & UNCORE_HAS_FIFO;
+}
void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
-void intel_uncore_init(struct drm_i915_private *dev_priv);
-void intel_uncore_prune(struct drm_i915_private *dev_priv);
-bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
-bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
-void intel_uncore_fini(struct drm_i915_private *dev_priv);
-void intel_uncore_suspend(struct drm_i915_private *dev_priv);
-void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
-void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv);
-
-u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
-void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
-void assert_forcewakes_active(struct drm_i915_private *dev_priv,
+void intel_uncore_init_early(struct intel_uncore *uncore);
+int intel_uncore_init_mmio(struct intel_uncore *uncore);
+void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore);
+bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
+bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore);
+void intel_uncore_fini_mmio(struct intel_uncore *uncore);
+void intel_uncore_suspend(struct intel_uncore *uncore);
+void intel_uncore_resume_early(struct intel_uncore *uncore);
+void intel_uncore_runtime_resume(struct intel_uncore *uncore);
+
+void assert_forcewakes_inactive(struct intel_uncore *uncore);
+void assert_forcewakes_active(struct intel_uncore *uncore,
enum forcewake_domains fw_domains);
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
enum forcewake_domains
-intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
+intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
i915_reg_t reg, unsigned int op);
#define FW_REG_READ (1)
#define FW_REG_WRITE (2)
-void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_get(struct intel_uncore *uncore,
enum forcewake_domains domains);
-void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_put(struct intel_uncore *uncore,
enum forcewake_domains domains);
/* Like above but the caller must manage the uncore.lock itself.
* Must be used with I915_READ_FW and friends.
*/
-void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
enum forcewake_domains domains);
-void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
enum forcewake_domains domains);
-void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv);
-void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv);
+void intel_uncore_forcewake_user_get(struct intel_uncore *uncore);
+void intel_uncore_forcewake_user_put(struct intel_uncore *uncore);
-int __intel_wait_for_register(struct drm_i915_private *dev_priv,
+int __intel_wait_for_register(struct intel_uncore *uncore,
i915_reg_t reg,
u32 mask,
u32 value,
unsigned int fast_timeout_us,
unsigned int slow_timeout_ms,
u32 *out_value);
-static inline
-int intel_wait_for_register(struct drm_i915_private *dev_priv,
- i915_reg_t reg,
- u32 mask,
- u32 value,
- unsigned int timeout_ms)
+static inline int
+intel_wait_for_register(struct intel_uncore *uncore,
+ i915_reg_t reg,
+ u32 mask,
+ u32 value,
+ unsigned int timeout_ms)
{
- return __intel_wait_for_register(dev_priv, reg, mask, value, 2,
+ return __intel_wait_for_register(uncore, reg, mask, value, 2,
timeout_ms, NULL);
}
-int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
+
+int __intel_wait_for_register_fw(struct intel_uncore *uncore,
i915_reg_t reg,
u32 mask,
u32 value,
unsigned int fast_timeout_us,
unsigned int slow_timeout_ms,
u32 *out_value);
-static inline
-int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
- i915_reg_t reg,
- u32 mask,
- u32 value,
+static inline int
+intel_wait_for_register_fw(struct intel_uncore *uncore,
+ i915_reg_t reg,
+ u32 mask,
+ u32 value,
unsigned int timeout_ms)
{
- return __intel_wait_for_register_fw(dev_priv, reg, mask, value,
+ return __intel_wait_for_register_fw(uncore, reg, mask, value,
2, timeout_ms, NULL);
}
+/* register access functions */
+#define __raw_read(x__, s__) \
+static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \
+ i915_reg_t reg) \
+{ \
+ return read##s__(uncore->regs + i915_mmio_reg_offset(reg)); \
+}
+
+#define __raw_write(x__, s__) \
+static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \
+ i915_reg_t reg, u##x__ val) \
+{ \
+ write##s__(val, uncore->regs + i915_mmio_reg_offset(reg)); \
+}
+__raw_read(8, b)
+__raw_read(16, w)
+__raw_read(32, l)
+__raw_read(64, q)
+
+__raw_write(8, b)
+__raw_write(16, w)
+__raw_write(32, l)
+__raw_write(64, q)
+
+#undef __raw_read
+#undef __raw_write
+
+#define __uncore_read(name__, x__, s__, trace__) \
+static inline u##x__ intel_uncore_##name__(struct intel_uncore *uncore, \
+ i915_reg_t reg) \
+{ \
+ return uncore->funcs.mmio_read##s__(uncore, reg, (trace__)); \
+}
+
+#define __uncore_write(name__, x__, s__, trace__) \
+static inline void intel_uncore_##name__(struct intel_uncore *uncore, \
+ i915_reg_t reg, u##x__ val) \
+{ \
+ uncore->funcs.mmio_write##s__(uncore, reg, val, (trace__)); \
+}
+
+__uncore_read(read8, 8, b, true)
+__uncore_read(read16, 16, w, true)
+__uncore_read(read, 32, l, true)
+__uncore_read(read16_notrace, 16, w, false)
+__uncore_read(read_notrace, 32, l, false)
+
+__uncore_write(write8, 8, b, true)
+__uncore_write(write16, 16, w, true)
+__uncore_write(write, 32, l, true)
+__uncore_write(write_notrace, 32, l, false)
+
+/* Be very careful with read/write 64-bit values. On 32-bit machines, they
+ * will be implemented using 2 32-bit writes in an arbitrary order with
+ * an arbitrary delay between them. This can cause the hardware to
+ * act upon the intermediate value, possibly leading to corruption and
+ * machine death. For this reason we do not support I915_WRITE64, or
+ * uncore->funcs.mmio_writeq.
+ *
+ * When reading a 64-bit value as two 32-bit values, the delay may cause
+ * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
+ * occasionally a 64-bit register does not actually support a full readq
+ * and must be read using two 32-bit reads.
+ *
+ * You have been warned.
+ */
+__uncore_read(read64, 64, q, true)
+
+static inline u64
+intel_uncore_read64_2x32(struct intel_uncore *uncore,
+ i915_reg_t lower_reg, i915_reg_t upper_reg)
+{
+ u32 upper, lower, old_upper, loop = 0;
+ upper = intel_uncore_read(uncore, upper_reg);
+ do {
+ old_upper = upper;
+ lower = intel_uncore_read(uncore, lower_reg);
+ upper = intel_uncore_read(uncore, upper_reg);
+ } while (upper != old_upper && loop++ < 2);
+ return (u64)upper << 32 | lower;
+}
+
+#define intel_uncore_posting_read(...) ((void)intel_uncore_read_notrace(__VA_ARGS__))
+#define intel_uncore_posting_read16(...) ((void)intel_uncore_read16_notrace(__VA_ARGS__))
+
+#undef __uncore_read
+#undef __uncore_write
+
+/* These are untraced mmio-accessors that are only valid to be used inside
+ * critical sections, such as inside IRQ handlers, where forcewake is explicitly
+ * controlled.
+ *
+ * Think twice, and think again, before using these.
+ *
+ * As an example, these accessors can possibly be used between:
+ *
+ * spin_lock_irq(&uncore->lock);
+ * intel_uncore_forcewake_get__locked();
+ *
+ * and
+ *
+ * intel_uncore_forcewake_put__locked();
+ * spin_unlock_irq(&uncore->lock);
+ *
+ *
+ * Note: some registers may not need forcewake held, so
+ * intel_uncore_forcewake_{get,put} can be omitted, see
+ * intel_uncore_forcewake_for_reg().
+ *
+ * Certain architectures will die if the same cacheline is concurrently accessed
+ * by different clients (e.g. on Ivybridge). Access to registers should
+ * therefore generally be serialised, by either the dev_priv->uncore.lock or
+ * a more localised lock guarding all access to that bank of registers.
+ */
+#define intel_uncore_read_fw(...) __raw_uncore_read32(__VA_ARGS__)
+#define intel_uncore_write_fw(...) __raw_uncore_write32(__VA_ARGS__)
+#define intel_uncore_write64_fw(...) __raw_uncore_write64(__VA_ARGS__)
+#define intel_uncore_posting_read_fw(...) ((void)intel_uncore_read_fw(__VA_ARGS__))
+
+static inline void intel_uncore_rmw(struct intel_uncore *uncore,
+ i915_reg_t reg, u32 clear, u32 set)
+{
+ u32 val;
+
+ val = intel_uncore_read(uncore, reg);
+ val &= ~clear;
+ val |= set;
+ intel_uncore_write(uncore, reg, val);
+}
+
+static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore,
+ i915_reg_t reg, u32 clear, u32 set)
+{
+ u32 val;
+
+ val = intel_uncore_read_fw(uncore, reg);
+ val &= ~clear;
+ val |= set;
+ intel_uncore_write_fw(uncore, reg, val);
+}
+
#define raw_reg_read(base, reg) \
readl(base + i915_mmio_reg_offset(reg))
#define raw_reg_write(base, reg, value) \
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index bf3662ad5fed..fdbbb9a53804 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -772,6 +772,9 @@ struct psr_table {
/* TP wake up time in multiple of 100 */
u16 tp1_wakeup_time;
u16 tp2_tp3_wakeup_time;
+
+ /* PSR2 TP2/TP3 wakeup time for 16 panels */
+ u32 psr2_tp2_tp3_wakeup_time;
} __packed;
struct bdb_psr {
diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/intel_vdsc.c
index 23abf03736e7..3f9921ba4a76 100644
--- a/drivers/gpu/drm/i915/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/intel_vdsc.c
@@ -317,129 +317,6 @@ static int get_column_index_for_rc_params(u8 bits_per_component)
}
}
-static int intel_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
-{
- unsigned long groups_per_line = 0;
- unsigned long groups_total = 0;
- unsigned long num_extra_mux_bits = 0;
- unsigned long slice_bits = 0;
- unsigned long hrd_delay = 0;
- unsigned long final_scale = 0;
- unsigned long rbs_min = 0;
-
- /* Number of groups used to code each line of a slice */
- groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width,
- DSC_RC_PIXELS_PER_GROUP);
-
- /* chunksize in Bytes */
- vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width *
- vdsc_cfg->bits_per_pixel,
- (8 * 16));
-
- if (vdsc_cfg->convert_rgb)
- num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size +
- (4 * vdsc_cfg->bits_per_component + 4)
- - 2);
- else
- num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size +
- (4 * vdsc_cfg->bits_per_component + 4) +
- 2 * (4 * vdsc_cfg->bits_per_component) - 2;
- /* Number of bits in one Slice */
- slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height;
-
- while ((num_extra_mux_bits > 0) &&
- ((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size))
- num_extra_mux_bits--;
-
- if (groups_per_line < vdsc_cfg->initial_scale_value - 8)
- vdsc_cfg->initial_scale_value = groups_per_line + 8;
-
- /* scale_decrement_interval calculation according to DSC spec 1.11 */
- if (vdsc_cfg->initial_scale_value > 8)
- vdsc_cfg->scale_decrement_interval = groups_per_line /
- (vdsc_cfg->initial_scale_value - 8);
- else
- vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX;
-
- vdsc_cfg->final_offset = vdsc_cfg->rc_model_size -
- (vdsc_cfg->initial_xmit_delay *
- vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits;
-
- if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) {
- DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n");
- return -ERANGE;
- }
-
- final_scale = (vdsc_cfg->rc_model_size * 8) /
- (vdsc_cfg->rc_model_size - vdsc_cfg->final_offset);
- if (vdsc_cfg->slice_height > 1)
- /*
- * NflBpgOffset is 16 bit value with 11 fractional bits
- * hence we multiply by 2^11 for preserving the
- * fractional part
- */
- vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11),
- (vdsc_cfg->slice_height - 1));
- else
- vdsc_cfg->nfl_bpg_offset = 0;
-
- /* 2^16 - 1 */
- if (vdsc_cfg->nfl_bpg_offset > 65535) {
- DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n");
- return -ERANGE;
- }
-
- /* Number of groups used to code the entire slice */
- groups_total = groups_per_line * vdsc_cfg->slice_height;
-
- /* slice_bpg_offset is 16 bit value with 11 fractional bits */
- vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size -
- vdsc_cfg->initial_offset +
- num_extra_mux_bits) << 11),
- groups_total);
-
- if (final_scale > 9) {
- /*
- * ScaleIncrementInterval =
- * finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125))
- * as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value,
- * we need divide by 2^11 from pstDscCfg values
- */
- vdsc_cfg->scale_increment_interval =
- (vdsc_cfg->final_offset * (1 << 11)) /
- ((vdsc_cfg->nfl_bpg_offset +
- vdsc_cfg->slice_bpg_offset) *
- (final_scale - 9));
- } else {
- /*
- * If finalScaleValue is less than or equal to 9, a value of 0 should
- * be used to disable the scale increment at the end of the slice
- */
- vdsc_cfg->scale_increment_interval = 0;
- }
-
- if (vdsc_cfg->scale_increment_interval > 65535) {
- DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n");
- return -ERANGE;
- }
-
- /*
- * DSC spec mentions that bits_per_pixel specifies the target
- * bits/pixel (bpp) rate that is used by the encoder,
- * in steps of 1/16 of a bit per pixel
- */
- rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset +
- DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay *
- vdsc_cfg->bits_per_pixel, 16) +
- groups_per_line * vdsc_cfg->first_line_bpg_offset;
-
- hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel);
- vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16;
- vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay;
-
- return 0;
-}
-
int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config)
{
@@ -491,7 +368,7 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
/* Gen 11 does not support YCbCr */
- vdsc_cfg->enable422 = false;
+ vdsc_cfg->simple_422 = false;
/* Gen 11 does not support VBR */
vdsc_cfg->vbr_enable = false;
vdsc_cfg->block_pred_enable =
@@ -574,7 +451,7 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) /
(vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset);
- return intel_compute_rc_parameters(vdsc_cfg);
+ return drm_dsc_compute_rc_parameters(vdsc_cfg);
}
enum intel_display_power_domain
@@ -618,7 +495,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_BLOCK_PREDICTION;
if (vdsc_cfg->convert_rgb)
pps_val |= DSC_COLOR_SPACE_CONVERSION;
- if (vdsc_cfg->enable422)
+ if (vdsc_cfg->simple_422)
pps_val |= DSC_422_ENABLE;
if (vdsc_cfg->vbr_enable)
pps_val |= DSC_VBR_ENABLE;
@@ -1004,10 +881,10 @@ static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder,
struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
/* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */
- drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp);
+ drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp.pps_header);
/* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */
- drm_dsc_pps_infoframe_pack(&dp_dsc_pps_sdp, vdsc_cfg);
+ drm_dsc_pps_payload_pack(&dp_dsc_pps_sdp.pps_payload, vdsc_cfg);
intel_dig_port->write_infoframe(encoder, crtc_state,
DP_SDP_PPS, &dp_dsc_pps_sdp,
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 15f4a6dee5aa..9682dd575152 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -541,10 +541,6 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
- /* WaEnableStateCacheRedirectToCS:icl */
- WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN1,
- GEN11_STATE_CACHE_REDIRECT_TO_CS);
-
/* Wa_2006665173:icl (pre-prod) */
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
@@ -555,6 +551,11 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
GEN10_CACHE_MODE_SS,
0, /* write-only, so skip validation */
_MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
+
+ /* WaDisableGPGPUMidThreadPreemption:icl */
+ WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+ GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
}
void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
@@ -564,26 +565,26 @@ void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
wa_init_start(wal, "context");
- if (INTEL_GEN(i915) < 8)
- return;
- else if (IS_BROADWELL(i915))
- bdw_ctx_workarounds_init(engine);
- else if (IS_CHERRYVIEW(i915))
- chv_ctx_workarounds_init(engine);
- else if (IS_SKYLAKE(i915))
- skl_ctx_workarounds_init(engine);
- else if (IS_BROXTON(i915))
- bxt_ctx_workarounds_init(engine);
- else if (IS_KABYLAKE(i915))
- kbl_ctx_workarounds_init(engine);
- else if (IS_GEMINILAKE(i915))
- glk_ctx_workarounds_init(engine);
- else if (IS_COFFEELAKE(i915))
- cfl_ctx_workarounds_init(engine);
+ if (IS_GEN(i915, 11))
+ icl_ctx_workarounds_init(engine);
else if (IS_CANNONLAKE(i915))
cnl_ctx_workarounds_init(engine);
- else if (IS_ICELAKE(i915))
- icl_ctx_workarounds_init(engine);
+ else if (IS_COFFEELAKE(i915))
+ cfl_ctx_workarounds_init(engine);
+ else if (IS_GEMINILAKE(i915))
+ glk_ctx_workarounds_init(engine);
+ else if (IS_KABYLAKE(i915))
+ kbl_ctx_workarounds_init(engine);
+ else if (IS_BROXTON(i915))
+ bxt_ctx_workarounds_init(engine);
+ else if (IS_SKYLAKE(i915))
+ skl_ctx_workarounds_init(engine);
+ else if (IS_CHERRYVIEW(i915))
+ chv_ctx_workarounds_init(engine);
+ else if (IS_BROADWELL(i915))
+ bdw_ctx_workarounds_init(engine);
+ else if (INTEL_GEN(i915) < 8)
+ return;
else
MISSING_CASE(INTEL_GEN(i915));
@@ -724,9 +725,9 @@ cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
static void
-wa_init_mcr(struct drm_i915_private *dev_priv, struct i915_wa_list *wal)
+wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
u32 mcr_slice_subslice_mask;
/*
@@ -742,14 +743,15 @@ wa_init_mcr(struct drm_i915_private *dev_priv, struct i915_wa_list *wal)
* something more complex that requires checking the range of every
* MMIO read).
*/
- if (INTEL_GEN(dev_priv) >= 10 &&
+ if (INTEL_GEN(i915) >= 10 &&
is_power_of_2(sseu->slice_mask)) {
/*
* read FUSE3 for enabled L3 Bank IDs, if L3 Bank matches
* enabled subslice, no need to redirect MCR packet
*/
u32 slice = fls(sseu->slice_mask);
- u32 fuse3 = I915_READ(GEN10_MIRROR_FUSE3);
+ u32 fuse3 =
+ intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3);
u8 ss_mask = sseu->subslice_mask[slice];
u8 enabled_mask = (ss_mask | ss_mask >>
@@ -763,7 +765,7 @@ wa_init_mcr(struct drm_i915_private *dev_priv, struct i915_wa_list *wal)
WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
}
- if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(i915) >= 11)
mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
GEN11_MCR_SUBSLICE_MASK;
else
@@ -783,7 +785,7 @@ wa_init_mcr(struct drm_i915_private *dev_priv, struct i915_wa_list *wal)
wa_write_masked_or(wal,
GEN8_MCR_SELECTOR,
mcr_slice_subslice_mask,
- intel_calculate_mcr_s_ss_select(dev_priv));
+ intel_calculate_mcr_s_ss_select(i915));
}
static void
@@ -862,26 +864,22 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
static void
gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- if (INTEL_GEN(i915) < 8)
- return;
- else if (IS_BROADWELL(i915))
- return;
- else if (IS_CHERRYVIEW(i915))
- return;
- else if (IS_SKYLAKE(i915))
- skl_gt_workarounds_init(i915, wal);
- else if (IS_BROXTON(i915))
- bxt_gt_workarounds_init(i915, wal);
- else if (IS_KABYLAKE(i915))
- kbl_gt_workarounds_init(i915, wal);
- else if (IS_GEMINILAKE(i915))
- glk_gt_workarounds_init(i915, wal);
- else if (IS_COFFEELAKE(i915))
- cfl_gt_workarounds_init(i915, wal);
+ if (IS_GEN(i915, 11))
+ icl_gt_workarounds_init(i915, wal);
else if (IS_CANNONLAKE(i915))
cnl_gt_workarounds_init(i915, wal);
- else if (IS_ICELAKE(i915))
- icl_gt_workarounds_init(i915, wal);
+ else if (IS_COFFEELAKE(i915))
+ cfl_gt_workarounds_init(i915, wal);
+ else if (IS_GEMINILAKE(i915))
+ glk_gt_workarounds_init(i915, wal);
+ else if (IS_KABYLAKE(i915))
+ kbl_gt_workarounds_init(i915, wal);
+ else if (IS_BROXTON(i915))
+ bxt_gt_workarounds_init(i915, wal);
+ else if (IS_SKYLAKE(i915))
+ skl_gt_workarounds_init(i915, wal);
+ else if (INTEL_GEN(i915) <= 8)
+ return;
else
MISSING_CASE(INTEL_GEN(i915));
}
@@ -896,15 +894,14 @@ void intel_gt_init_workarounds(struct drm_i915_private *i915)
}
static enum forcewake_domains
-wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
- const struct i915_wa_list *wal)
+wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
{
enum forcewake_domains fw = 0;
struct i915_wa *wa;
unsigned int i;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
- fw |= intel_uncore_forcewake_for_reg(dev_priv,
+ fw |= intel_uncore_forcewake_for_reg(uncore,
wa->reg,
FW_REG_READ |
FW_REG_WRITE);
@@ -913,7 +910,7 @@ wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
}
static void
-wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
+wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
{
enum forcewake_domains fw;
unsigned long flags;
@@ -923,27 +920,22 @@ wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
if (!wal->count)
return;
- fw = wal_get_fw_for_rmw(dev_priv, wal);
+ fw = wal_get_fw_for_rmw(uncore, wal);
- spin_lock_irqsave(&dev_priv->uncore.lock, flags);
- intel_uncore_forcewake_get__locked(dev_priv, fw);
+ spin_lock_irqsave(&uncore->lock, flags);
+ intel_uncore_forcewake_get__locked(uncore, fw);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
- u32 val = I915_READ_FW(wa->reg);
-
- val &= ~wa->mask;
- val |= wa->val;
-
- I915_WRITE_FW(wa->reg, val);
+ intel_uncore_rmw_fw(uncore, wa->reg, wa->mask, wa->val);
}
- intel_uncore_forcewake_put__locked(dev_priv, fw);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+ intel_uncore_forcewake_put__locked(uncore, fw);
+ spin_unlock_irqrestore(&uncore->lock, flags);
}
-void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
+void intel_gt_apply_workarounds(struct drm_i915_private *i915)
{
- wa_list_apply(dev_priv, &dev_priv->gt_wa_list);
+ wa_list_apply(&i915->uncore, &i915->gt_wa_list);
}
static bool
@@ -960,7 +952,7 @@ wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
return true;
}
-static bool wa_list_verify(struct drm_i915_private *dev_priv,
+static bool wa_list_verify(struct intel_uncore *uncore,
const struct i915_wa_list *wal,
const char *from)
{
@@ -969,15 +961,17 @@ static bool wa_list_verify(struct drm_i915_private *dev_priv,
bool ok = true;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
- ok &= wa_verify(wa, I915_READ(wa->reg), wal->name, from);
+ ok &= wa_verify(wa,
+ intel_uncore_read(uncore, wa->reg),
+ wal->name, from);
return ok;
}
-bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv,
+bool intel_gt_verify_workarounds(struct drm_i915_private *i915,
const char *from)
{
- return wa_list_verify(dev_priv, &dev_priv->gt_wa_list, from);
+ return wa_list_verify(&i915->uncore, &i915->gt_wa_list, from);
}
static void
@@ -1052,6 +1046,9 @@ static void icl_whitelist_build(struct i915_wa_list *w)
/* WaAllowUMDToModifySamplerMode:icl */
whitelist_reg(w, GEN10_SAMPLER_MODE);
+
+ /* WaEnableStateCacheRedirectToCS:icl */
+ whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
}
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
@@ -1059,30 +1056,26 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *w = &engine->whitelist;
- GEM_BUG_ON(engine->id != RCS);
+ GEM_BUG_ON(engine->id != RCS0);
wa_init_start(w, "whitelist");
- if (INTEL_GEN(i915) < 8)
- return;
- else if (IS_BROADWELL(i915))
- return;
- else if (IS_CHERRYVIEW(i915))
- return;
- else if (IS_SKYLAKE(i915))
- skl_whitelist_build(w);
- else if (IS_BROXTON(i915))
- bxt_whitelist_build(w);
- else if (IS_KABYLAKE(i915))
- kbl_whitelist_build(w);
- else if (IS_GEMINILAKE(i915))
- glk_whitelist_build(w);
- else if (IS_COFFEELAKE(i915))
- cfl_whitelist_build(w);
+ if (IS_GEN(i915, 11))
+ icl_whitelist_build(w);
else if (IS_CANNONLAKE(i915))
cnl_whitelist_build(w);
- else if (IS_ICELAKE(i915))
- icl_whitelist_build(w);
+ else if (IS_COFFEELAKE(i915))
+ cfl_whitelist_build(w);
+ else if (IS_GEMINILAKE(i915))
+ glk_whitelist_build(w);
+ else if (IS_KABYLAKE(i915))
+ kbl_whitelist_build(w);
+ else if (IS_BROXTON(i915))
+ bxt_whitelist_build(w);
+ else if (IS_SKYLAKE(i915))
+ skl_whitelist_build(w);
+ else if (INTEL_GEN(i915) <= 8)
+ return;
else
MISSING_CASE(INTEL_GEN(i915));
@@ -1091,8 +1084,8 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
const struct i915_wa_list *wal = &engine->whitelist;
+ struct intel_uncore *uncore = engine->uncore;
const u32 base = engine->mmio_base;
struct i915_wa *wa;
unsigned int i;
@@ -1101,13 +1094,15 @@ void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
return;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
- I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
- i915_mmio_reg_offset(wa->reg));
+ intel_uncore_write(uncore,
+ RING_FORCE_TO_NONPRIV(base, i),
+ i915_mmio_reg_offset(wa->reg));
/* And clear the rest just in case of garbage */
for (; i < RING_MAX_NONPRIV_SLOTS; i++)
- I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
- i915_mmio_reg_offset(RING_NOPID(base)));
+ intel_uncore_write(uncore,
+ RING_FORCE_TO_NONPRIV(base, i),
+ i915_mmio_reg_offset(RING_NOPID(base)));
}
static void
@@ -1115,7 +1110,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- if (IS_ICELAKE(i915)) {
+ if (IS_GEN(i915, 11)) {
/* This is not an Wa. Enable for better image quality */
wa_masked_en(wal,
_3D_CHICKEN3,
@@ -1170,8 +1165,8 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN7_DISABLE_SAMPLER_PREFETCH);
}
- if (IS_GEN(i915, 9) || IS_CANNONLAKE(i915)) {
- /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
+ if (IS_GEN_RANGE(i915, 9, 11)) {
+ /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl */
wa_masked_en(wal,
GEN7_FF_SLICE_CS_CHICKEN1,
GEN9_FFSC_PERCTX_PREEMPT_CTRL);
@@ -1236,7 +1231,7 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal
if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
return;
- if (engine->id == RCS)
+ if (engine->id == RCS0)
rcs_engine_wa_init(engine, wal);
else
xcs_engine_wa_init(engine, wal);
@@ -1256,7 +1251,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine)
void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
{
- wa_list_apply(engine->i915, &engine->wa_list);
+ wa_list_apply(engine->uncore, &engine->wa_list);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h
index 7c734714b05e..34eee5ec511e 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/intel_workarounds.h
@@ -9,18 +9,7 @@
#include <linux/slab.h>
-struct i915_wa {
- i915_reg_t reg;
- u32 mask;
- u32 val;
-};
-
-struct i915_wa_list {
- const char *name;
- struct i915_wa *list;
- unsigned int count;
- unsigned int wa_count;
-};
+#include "intel_workarounds_types.h"
static inline void intel_wa_list_free(struct i915_wa_list *wal)
{
@@ -31,9 +20,9 @@ static inline void intel_wa_list_free(struct i915_wa_list *wal)
void intel_engine_init_ctx_wa(struct intel_engine_cs *engine);
int intel_engine_emit_ctx_wa(struct i915_request *rq);
-void intel_gt_init_workarounds(struct drm_i915_private *dev_priv);
-void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv);
-bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv,
+void intel_gt_init_workarounds(struct drm_i915_private *i915);
+void intel_gt_apply_workarounds(struct drm_i915_private *i915);
+bool intel_gt_verify_workarounds(struct drm_i915_private *i915,
const char *from);
void intel_engine_init_whitelist(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_workarounds_types.h b/drivers/gpu/drm/i915/intel_workarounds_types.h
new file mode 100644
index 000000000000..30918da180ff
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_workarounds_types.h
@@ -0,0 +1,27 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef __INTEL_WORKAROUNDS_TYPES_H__
+#define __INTEL_WORKAROUNDS_TYPES_H__
+
+#include <linux/types.h>
+
+#include "i915_reg.h"
+
+struct i915_wa {
+ i915_reg_t reg;
+ u32 mask;
+ u32 val;
+};
+
+struct i915_wa_list {
+ const char *name;
+ struct i915_wa *list;
+ unsigned int count;
+ unsigned int wa_count;
+};
+
+#endif /* __INTEL_WORKAROUNDS_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
index 391f3d9ffdf1..419fd4d6a8f0 100644
--- a/drivers/gpu/drm/i915/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
@@ -122,7 +122,7 @@ huge_gem_object(struct drm_i915_private *i915,
if (overflows_type(dma_size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index a9a2fa35876f..90721b54e7ae 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -171,7 +171,7 @@ huge_pages_object(struct drm_i915_private *i915,
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
@@ -320,7 +320,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
@@ -908,10 +908,6 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
if (IS_ERR(obj))
return ERR_CAST(obj);
- err = i915_gem_object_set_to_wc_domain(obj, true);
- if (err)
- goto err;
-
cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
@@ -1449,7 +1445,7 @@ static int igt_ppgtt_pin_update(void *arg)
* huge-gtt-pages.
*/
- if (!ppgtt || !i915_vm_is_48bit(&ppgtt->vm)) {
+ if (!ppgtt || !i915_vm_is_4lvl(&ppgtt->vm)) {
pr_info("48b PPGTT not supported, skipping\n");
return 0;
}
@@ -1535,7 +1531,7 @@ static int igt_ppgtt_pin_update(void *arg)
* land in the now stale 2M page.
*/
- err = gpu_write(vma, ctx, dev_priv->engine[RCS], 0, 0xdeadbeaf);
+ err = gpu_write(vma, ctx, dev_priv->engine[RCS0], 0, 0xdeadbeaf);
if (err)
goto out_unpin;
@@ -1584,6 +1580,7 @@ static int igt_tmpfs_fallback(void *arg)
}
*vaddr = 0xdeadbeaf;
+ __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
vma = i915_vma_instance(obj, vm, NULL);
@@ -1653,7 +1650,7 @@ static int igt_shrink_thp(void *arg)
if (err)
goto out_unpin;
- err = gpu_write(vma, ctx, i915->engine[RCS], 0, 0xdeadbeaf);
+ err = gpu_write(vma, ctx, i915->engine[RCS0], 0, 0xdeadbeaf);
if (err)
goto out_unpin;
@@ -1709,16 +1706,17 @@ int i915_gem_huge_page_mock_selftests(void)
return -ENOMEM;
/* Pretend to be a device which supports the 48b PPGTT */
- mkwrite_device_info(dev_priv)->ppgtt = INTEL_PPGTT_FULL_4LVL;
+ mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
+ mkwrite_device_info(dev_priv)->ppgtt_size = 48;
mutex_lock(&dev_priv->drm.struct_mutex);
- ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV));
+ ppgtt = i915_ppgtt_create(dev_priv);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
}
- if (!i915_vm_is_48bit(&ppgtt->vm)) {
+ if (!i915_vm_is_4lvl(&ppgtt->vm)) {
pr_err("failed to create 48b PPGTT\n");
err = -EINVAL;
goto out_close;
@@ -1734,7 +1732,6 @@ int i915_gem_huge_page_mock_selftests(void)
err = i915_subtests(tests, ppgtt);
out_close:
- i915_ppgtt_close(&ppgtt->vm);
i915_ppgtt_put(ppgtt);
out_unlock:
@@ -1764,7 +1761,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
return 0;
}
- if (i915_terminally_wedged(&dev_priv->gpu_error))
+ if (i915_terminally_wedged(dev_priv))
return 0;
file = mock_file(dev_priv);
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 337b1f98b923..27d8f853111b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -150,7 +150,7 @@ int i915_active_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_active_retire),
};
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index e77b7ed449ae..6fd70d326468 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -84,14 +84,9 @@ static void simulate_hibernate(struct drm_i915_private *i915)
static int pm_prepare(struct drm_i915_private *i915)
{
- int err = 0;
-
- if (i915_gem_suspend(i915)) {
- pr_err("i915_gem_suspend failed\n");
- err = -EINVAL;
- }
+ i915_gem_suspend(i915);
- return err;
+ return 0;
}
static void pm_suspend(struct drm_i915_private *i915)
@@ -220,5 +215,8 @@ int i915_gem_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_gem_hibernate),
};
+ if (i915_terminally_wedged(i915))
+ return 0;
+
return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
index fd89a5a33c1a..e43630b40fce 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -202,7 +202,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
return PTR_ERR(vma);
- rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context);
+ rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
@@ -248,15 +248,15 @@ static bool always_valid(struct drm_i915_private *i915)
static bool needs_fence_registers(struct drm_i915_private *i915)
{
- return !i915_terminally_wedged(&i915->gpu_error);
+ return !i915_terminally_wedged(i915);
}
static bool needs_mi_store_dword(struct drm_i915_private *i915)
{
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return false;
- return intel_engine_can_store_dword(i915->engine[RCS]);
+ return intel_engine_can_store_dword(i915->engine[RCS0]);
}
static const struct igt_coherency_mode {
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 7eb58a9d1319..4e1b6efc6b22 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -76,7 +76,7 @@ static int live_nop_switch(void *arg)
}
for (n = 0; n < nctx; n++) {
- ctx[n] = i915_gem_create_context(i915, file->driver_priv);
+ ctx[n] = live_context(i915, file);
if (IS_ERR(ctx[n])) {
err = PTR_ERR(ctx[n]);
goto out_unlock;
@@ -220,6 +220,7 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
offset += PAGE_SIZE;
}
*cmd = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
@@ -372,7 +373,8 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
return 0;
}
-static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
+static noinline int cpu_check(struct drm_i915_gem_object *obj,
+ unsigned int idx, unsigned int max)
{
unsigned int n, m, needs_flush;
int err;
@@ -390,8 +392,10 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
for (m = 0; m < max; m++) {
if (map[m] != m) {
- pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
- n, m, map[m], m);
+ pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
+ __builtin_return_address(0), idx,
+ n, real_page_count(obj), m, max,
+ map[m], m);
err = -EINVAL;
goto out_unmap;
}
@@ -399,8 +403,9 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
for (; m < DW_PER_PAGE; m++) {
if (map[m] != STACK_MAGIC) {
- pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
- n, m, map[m], STACK_MAGIC);
+ pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
+ __builtin_return_address(0), idx, n, m,
+ map[m], STACK_MAGIC);
err = -EINVAL;
goto out_unmap;
}
@@ -478,12 +483,8 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
static int igt_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj = NULL;
- unsigned long ncontexts, ndwords, dw;
- struct igt_live_test t;
- struct drm_file *file;
- IGT_TIMEOUT(end_time);
- LIST_HEAD(objects);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int err = -ENODEV;
/*
@@ -495,44 +496,167 @@ static int igt_ctx_exec(void *arg)
if (!DRIVER_CAPS(i915)->has_logical_contexts)
return 0;
+ for_each_engine(engine, i915, id) {
+ struct drm_i915_gem_object *obj = NULL;
+ unsigned long ncontexts, ndwords, dw;
+ struct igt_live_test t;
+ struct drm_file *file;
+ IGT_TIMEOUT(end_time);
+ LIST_HEAD(objects);
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (!engine->context_size)
+ continue; /* No logical context support in HW */
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ err = igt_live_test_begin(&t, i915, __func__, engine->name);
+ if (err)
+ goto out_unlock;
+
+ ncontexts = 0;
+ ndwords = 0;
+ dw = 0;
+ while (!time_after(jiffies, end_time)) {
+ struct i915_gem_context *ctx;
+ intel_wakeref_t wakeref;
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_unlock;
+ }
+
+ if (!obj) {
+ obj = create_test_object(ctx, file, &objects);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_unlock;
+ }
+ }
+
+ with_intel_runtime_pm(i915, wakeref)
+ err = gpu_fill(obj, ctx, engine, dw);
+ if (err) {
+ pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+ ndwords, dw, max_dwords(obj),
+ engine->name, ctx->hw_id,
+ yesno(!!ctx->ppgtt), err);
+ goto out_unlock;
+ }
+
+ if (++dw == max_dwords(obj)) {
+ obj = NULL;
+ dw = 0;
+ }
+
+ ndwords++;
+ ncontexts++;
+ }
+
+ pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
+ ncontexts, engine->name, ndwords);
+
+ ncontexts = dw = 0;
+ list_for_each_entry(obj, &objects, st_link) {
+ unsigned int rem =
+ min_t(unsigned int, ndwords - dw, max_dwords(obj));
+
+ err = cpu_check(obj, ncontexts++, rem);
+ if (err)
+ break;
+
+ dw += rem;
+ }
+
+out_unlock:
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ mock_file_free(i915, file);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int igt_shared_ctx_exec(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *parent;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_live_test t;
+ struct drm_file *file;
+ int err = 0;
+
+ /*
+ * Create a few different contexts with the same mm and write
+ * through each ctx using the GPU making sure those writes end
+ * up in the expected pages of our obj.
+ */
+ if (!DRIVER_CAPS(i915)->has_logical_contexts)
+ return 0;
+
file = mock_file(i915);
if (IS_ERR(file))
return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
+ parent = live_context(i915, file);
+ if (IS_ERR(parent)) {
+ err = PTR_ERR(parent);
+ goto out_unlock;
+ }
+
+ if (!parent->ppgtt) { /* not full-ppgtt; nothing to share */
+ err = 0;
+ goto out_unlock;
+ }
+
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
goto out_unlock;
- ncontexts = 0;
- ndwords = 0;
- dw = 0;
- while (!time_after(jiffies, end_time)) {
- struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
- unsigned int id;
+ for_each_engine(engine, i915, id) {
+ unsigned long ncontexts, ndwords, dw;
+ struct drm_i915_gem_object *obj = NULL;
+ IGT_TIMEOUT(end_time);
+ LIST_HEAD(objects);
- ctx = i915_gem_create_context(i915, file->driver_priv);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out_unlock;
- }
+ if (!intel_engine_can_store_dword(engine))
+ continue;
- for_each_engine(engine, i915, id) {
+ dw = 0;
+ ndwords = 0;
+ ncontexts = 0;
+ while (!time_after(jiffies, end_time)) {
+ struct i915_gem_context *ctx;
intel_wakeref_t wakeref;
- if (!engine->context_size)
- continue; /* No logical context support in HW */
+ ctx = kernel_context(i915);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_test;
+ }
- if (!intel_engine_can_store_dword(engine))
- continue;
+ __assign_ppgtt(ctx, parent->ppgtt);
if (!obj) {
- obj = create_test_object(ctx, file, &objects);
+ obj = create_test_object(parent, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
- goto out_unlock;
+ kernel_context_close(ctx);
+ goto out_test;
}
}
@@ -544,35 +668,39 @@ static int igt_ctx_exec(void *arg)
ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id,
yesno(!!ctx->ppgtt), err);
- goto out_unlock;
+ kernel_context_close(ctx);
+ goto out_test;
}
if (++dw == max_dwords(obj)) {
obj = NULL;
dw = 0;
}
+
ndwords++;
+ ncontexts++;
+
+ kernel_context_close(ctx);
}
- ncontexts++;
- }
- pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
- ncontexts, RUNTIME_INFO(i915)->num_rings, ndwords);
+ pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
+ ncontexts, engine->name, ndwords);
- dw = 0;
- list_for_each_entry(obj, &objects, st_link) {
- unsigned int rem =
- min_t(unsigned int, ndwords - dw, max_dwords(obj));
+ ncontexts = dw = 0;
+ list_for_each_entry(obj, &objects, st_link) {
+ unsigned int rem =
+ min_t(unsigned int, ndwords - dw, max_dwords(obj));
- err = cpu_check(obj, rem);
- if (err)
- break;
+ err = cpu_check(obj, ncontexts++, rem);
+ if (err)
+ goto out_test;
- dw += rem;
+ dw += rem;
+ }
}
-
-out_unlock:
+out_test:
if (igt_live_test_end(&t))
err = -EIO;
+out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
@@ -604,12 +732,9 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
*cmd++ = upper_32_bits(vma->node.start);
*cmd = MI_BATCH_BUFFER_END;
+ __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- if (err)
- goto err;
-
vma = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -923,7 +1048,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
unsigned int flags)
{
struct intel_sseu default_sseu = intel_device_default_sseu(i915);
- struct intel_engine_cs *engine = i915->engine[RCS];
+ struct intel_engine_cs *engine = i915->engine[RCS0];
struct drm_i915_gem_object *obj;
struct i915_gem_context *ctx;
struct intel_sseu pg_sseu;
@@ -962,11 +1087,12 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
mutex_lock(&i915->drm.struct_mutex);
- ctx = i915_gem_create_context(i915, file->driver_priv);
+ ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
ret = PTR_ERR(ctx);
goto out_unlock;
}
+ i915_gem_context_clear_bannable(ctx); /* to reset and beyond! */
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
@@ -1047,7 +1173,7 @@ static int igt_ctx_readonly(void *arg)
struct drm_i915_gem_object *obj = NULL;
struct i915_gem_context *ctx;
struct i915_hw_ppgtt *ppgtt;
- unsigned long ndwords, dw;
+ unsigned long idx, ndwords, dw;
struct igt_live_test t;
struct drm_file *file;
I915_RND_STATE(prng);
@@ -1071,7 +1197,7 @@ static int igt_ctx_readonly(void *arg)
if (err)
goto out_unlock;
- ctx = i915_gem_create_context(i915, file->driver_priv);
+ ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_unlock;
@@ -1125,9 +1251,10 @@ static int igt_ctx_readonly(void *arg)
}
}
pr_info("Submitted %lu dwords (across %u engines)\n",
- ndwords, RUNTIME_INFO(i915)->num_rings);
+ ndwords, RUNTIME_INFO(i915)->num_engines);
dw = 0;
+ idx = 0;
list_for_each_entry(obj, &objects, st_link) {
unsigned int rem =
min_t(unsigned int, ndwords - dw, max_dwords(obj));
@@ -1137,7 +1264,7 @@ static int igt_ctx_readonly(void *arg)
if (i915_gem_object_is_readonly(obj))
num_writes = 0;
- err = cpu_check(obj, num_writes);
+ err = cpu_check(obj, idx++, num_writes);
if (err)
break;
@@ -1201,12 +1328,9 @@ static int write_to_scratch(struct i915_gem_context *ctx,
}
*cmd++ = value;
*cmd = MI_BATCH_BUFFER_END;
+ __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- if (err)
- goto err;
-
vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -1298,11 +1422,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
*cmd++ = result;
}
*cmd = MI_BATCH_BUFFER_END;
- i915_gem_object_unpin_map(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- if (err)
- goto err;
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
if (IS_ERR(vma)) {
@@ -1396,13 +1518,13 @@ static int igt_vm_isolation(void *arg)
if (err)
goto out_unlock;
- ctx_a = i915_gem_create_context(i915, file->driver_priv);
+ ctx_a = live_context(i915, file);
if (IS_ERR(ctx_a)) {
err = PTR_ERR(ctx_a);
goto out_unlock;
}
- ctx_b = i915_gem_create_context(i915, file->driver_priv);
+ ctx_b = live_context(i915, file);
if (IS_ERR(ctx_b)) {
err = PTR_ERR(ctx_b);
goto out_unlock;
@@ -1432,7 +1554,7 @@ static int igt_vm_isolation(void *arg)
div64_u64_rem(i915_prandom_u64_state(&prng),
vm_total, &offset);
- offset &= ~sizeof(u32);
+ offset &= -sizeof(u32);
offset += I915_GTT_PAGE_SIZE;
err = write_to_scratch(ctx_a, engine,
@@ -1458,7 +1580,7 @@ static int igt_vm_isolation(void *arg)
count += this;
}
pr_info("Checked %lu scratch offsets across %d engines\n",
- count, RUNTIME_INFO(i915)->num_rings);
+ count, RUNTIME_INFO(i915)->num_engines);
out_rpm:
intel_runtime_pm_put(i915, wakeref);
@@ -1472,10 +1594,10 @@ out_unlock:
}
static __maybe_unused const char *
-__engine_name(struct drm_i915_private *i915, unsigned int engines)
+__engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines)
{
struct intel_engine_cs *engine;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
if (engines == ALL_ENGINES)
return "all";
@@ -1488,67 +1610,60 @@ __engine_name(struct drm_i915_private *i915, unsigned int engines)
static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
struct i915_gem_context *ctx,
- unsigned int engines)
+ intel_engine_mask_t engines)
{
struct intel_engine_cs *engine;
- unsigned int tmp;
- int err;
+ intel_engine_mask_t tmp;
+ int pass;
GEM_TRACE("Testing %s\n", __engine_name(i915, engines));
- for_each_engine_masked(engine, i915, engines, tmp) {
- struct i915_request *rq;
+ for (pass = 0; pass < 4; pass++) { /* Once busy; once idle; repeat */
+ bool from_idle = pass & 1;
+ int err;
- rq = i915_request_alloc(engine, ctx);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ if (!from_idle) {
+ for_each_engine_masked(engine, i915, engines, tmp) {
+ struct i915_request *rq;
- i915_request_add(rq);
- }
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
- err = i915_gem_switch_to_kernel_context(i915);
- if (err)
- return err;
-
- for_each_engine_masked(engine, i915, engines, tmp) {
- if (!engine_has_kernel_context_barrier(engine)) {
- pr_err("kernel context not last on engine %s!\n",
- engine->name);
- return -EINVAL;
+ i915_request_add(rq);
+ }
}
- }
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (err)
- return err;
+ err = i915_gem_switch_to_kernel_context(i915,
+ i915->gt.active_engines);
+ if (err)
+ return err;
- GEM_BUG_ON(i915->gt.active_requests);
- for_each_engine_masked(engine, i915, engines, tmp) {
- if (engine->last_retired_context->gem_context != i915->kernel_context) {
- pr_err("engine %s not idling in kernel context!\n",
- engine->name);
+ if (!from_idle) {
+ err = i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
+ }
+
+ if (i915->gt.active_requests) {
+ pr_err("%d active requests remain after switching to kernel context, pass %d (%s) on %s engine%s\n",
+ i915->gt.active_requests,
+ pass, from_idle ? "idle" : "busy",
+ __engine_name(i915, engines),
+ is_power_of_2(engines) ? "" : "s");
return -EINVAL;
}
- }
- err = i915_gem_switch_to_kernel_context(i915);
- if (err)
- return err;
+ /* XXX Bonus points for proving we are the kernel context! */
- if (i915->gt.active_requests) {
- pr_err("switch-to-kernel-context emitted %d requests even though it should already be idling in the kernel context\n",
- i915->gt.active_requests);
- return -EINVAL;
+ mutex_unlock(&i915->drm.struct_mutex);
+ drain_delayed_work(&i915->gt.idle_work);
+ mutex_lock(&i915->drm.struct_mutex);
}
- for_each_engine_masked(engine, i915, engines, tmp) {
- if (!intel_engine_has_kernel_context(engine)) {
- pr_err("kernel context not last on engine %s!\n",
- engine->name);
- return -EINVAL;
- }
- }
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ return -EIO;
return 0;
}
@@ -1592,8 +1707,6 @@ static int igt_switch_to_kernel_context(void *arg)
out_unlock:
GEM_TRACE_DUMP_ON(err);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
@@ -1602,10 +1715,117 @@ out_unlock:
return err;
}
+static void mock_barrier_task(void *data)
+{
+ unsigned int *counter = data;
+
+ ++*counter;
+}
+
+static int mock_context_barrier(void *arg)
+{
+#undef pr_fmt
+#define pr_fmt(x) "context_barrier_task():" # x
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx;
+ struct i915_request *rq;
+ intel_wakeref_t wakeref;
+ unsigned int counter;
+ int err;
+
+ /*
+ * The context barrier provides us with a callback after it emits
+ * a request; useful for retiring old state after loading new.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ ctx = mock_context(i915, "mock");
+ if (!ctx) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ counter = 0;
+ err = context_barrier_task(ctx, 0,
+ NULL, mock_barrier_task, &counter);
+ if (err) {
+ pr_err("Failed at line %d, err=%d\n", __LINE__, err);
+ goto out;
+ }
+ if (counter == 0) {
+ pr_err("Did not retire immediately with 0 engines\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ counter = 0;
+ err = context_barrier_task(ctx, ALL_ENGINES,
+ NULL, mock_barrier_task, &counter);
+ if (err) {
+ pr_err("Failed at line %d, err=%d\n", __LINE__, err);
+ goto out;
+ }
+ if (counter == 0) {
+ pr_err("Did not retire immediately for all inactive engines\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ rq = ERR_PTR(-ENODEV);
+ with_intel_runtime_pm(i915, wakeref)
+ rq = i915_request_alloc(i915->engine[RCS0], ctx);
+ if (IS_ERR(rq)) {
+ pr_err("Request allocation failed!\n");
+ goto out;
+ }
+ i915_request_add(rq);
+ GEM_BUG_ON(list_empty(&ctx->active_engines));
+
+ counter = 0;
+ context_barrier_inject_fault = BIT(RCS0);
+ err = context_barrier_task(ctx, ALL_ENGINES,
+ NULL, mock_barrier_task, &counter);
+ context_barrier_inject_fault = 0;
+ if (err == -ENXIO)
+ err = 0;
+ else
+ pr_err("Did not hit fault injection!\n");
+ if (counter != 0) {
+ pr_err("Invoked callback on error!\n");
+ err = -EIO;
+ }
+ if (err)
+ goto out;
+
+ counter = 0;
+ err = context_barrier_task(ctx, ALL_ENGINES,
+ NULL, mock_barrier_task, &counter);
+ if (err) {
+ pr_err("Failed at line %d, err=%d\n", __LINE__, err);
+ goto out;
+ }
+ mock_device_flush(i915);
+ if (counter == 0) {
+ pr_err("Did not retire on each active engines\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ mock_context_close(ctx);
+unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+#undef pr_fmt
+#define pr_fmt(x) x
+}
+
int i915_gem_context_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_switch_to_kernel_context),
+ SUBTEST(mock_context_barrier),
};
struct drm_i915_private *i915;
int err;
@@ -1628,10 +1848,11 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
SUBTEST(igt_ctx_exec),
SUBTEST(igt_ctx_readonly),
SUBTEST(igt_ctx_sseu),
+ SUBTEST(igt_shared_ctx_exec),
SUBTEST(igt_vm_isolation),
};
- if (i915_terminally_wedged(&dev_priv->gpu_error))
+ if (i915_terminally_wedged(dev_priv))
return 0;
return i915_subtests(tests, dev_priv);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
index a7055b12e53c..2b943ee246c9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
@@ -315,6 +315,7 @@ static int igt_dmabuf_export_kmap(void *arg)
goto err;
}
memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE);
+ i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
ptr = dma_buf_kmap(dmabuf, 1);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index b9b0ea4e2404..89766688e420 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -274,7 +274,7 @@ static int igt_evict_for_cache_color(void *arg)
err = PTR_ERR(obj);
goto cleanup;
}
- i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
quirk_add(obj, &objects);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
@@ -290,7 +290,7 @@ static int igt_evict_for_cache_color(void *arg)
err = PTR_ERR(obj);
goto cleanup;
}
- i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
quirk_add(obj, &objects);
/* Neighbouring; same colour - should fit */
@@ -547,7 +547,7 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_evict_contexts),
};
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 3850ef4a5ec8..9cca66e4420a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -120,7 +120,7 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
goto err;
@@ -1010,7 +1010,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
return PTR_ERR(file);
mutex_lock(&dev_priv->drm.struct_mutex);
- ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv);
+ ppgtt = i915_ppgtt_create(dev_priv);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
@@ -1020,7 +1020,6 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
- i915_ppgtt_close(&ppgtt->vm);
i915_ppgtt_put(ppgtt);
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -1681,25 +1680,31 @@ int i915_gem_gtt_mock_selftests(void)
SUBTEST(igt_gtt_insert),
};
struct drm_i915_private *i915;
- struct i915_ggtt ggtt;
+ struct i915_ggtt *ggtt;
int err;
i915 = mock_gem_device();
if (!i915)
return -ENOMEM;
- mock_init_ggtt(i915, &ggtt);
+ ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
+ if (!ggtt) {
+ err = -ENOMEM;
+ goto out_put;
+ }
+ mock_init_ggtt(i915, ggtt);
mutex_lock(&i915->drm.struct_mutex);
- err = i915_subtests(tests, &ggtt);
+ err = i915_subtests(tests, ggtt);
mock_device_flush(i915);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_drain_freed_objects(i915);
- mock_fini_ggtt(&ggtt);
+ mock_fini_ggtt(ggtt);
+ kfree(ggtt);
+out_put:
drm_dev_put(&i915->drm);
-
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index 395ae878e0f7..971148fbe6f5 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -468,7 +468,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
if (err)
return err;
- rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context);
+ rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
@@ -583,7 +583,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
for (loop = 0; loop < 3; loop++) {
intel_wakeref_t wakeref;
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
break;
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 6733dc5b6b4c..e6ffe2240126 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -42,7 +42,7 @@ static int igt_add_request(void *arg)
/* Basic preliminary test to create a request and let it loose! */
mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS],
+ request = mock_request(i915->engine[RCS0],
i915->kernel_context,
HZ / 10);
if (!request)
@@ -66,7 +66,7 @@ static int igt_wait_request(void *arg)
/* Submit a request, then wait upon it */
mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS], i915->kernel_context, T);
+ request = mock_request(i915->engine[RCS0], i915->kernel_context, T);
if (!request) {
err = -ENOMEM;
goto out_unlock;
@@ -136,19 +136,17 @@ static int igt_fence_wait(void *arg)
/* Submit a request, treat it as a fence and wait upon it */
mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS], i915->kernel_context, T);
+ request = mock_request(i915->engine[RCS0], i915->kernel_context, T);
if (!request) {
err = -ENOMEM;
goto out_locked;
}
- mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */
if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
pr_err("fence wait success before submit (expected timeout)!\n");
- goto out_device;
+ goto out_locked;
}
- mutex_lock(&i915->drm.struct_mutex);
i915_request_add(request);
mutex_unlock(&i915->drm.struct_mutex);
@@ -195,7 +193,7 @@ static int igt_request_rewind(void *arg)
mutex_lock(&i915->drm.struct_mutex);
ctx[0] = mock_context(i915, "A");
- request = mock_request(i915->engine[RCS], ctx[0], 2 * HZ);
+ request = mock_request(i915->engine[RCS0], ctx[0], 2 * HZ);
if (!request) {
err = -ENOMEM;
goto err_context_0;
@@ -205,7 +203,7 @@ static int igt_request_rewind(void *arg)
i915_request_add(request);
ctx[1] = mock_context(i915, "B");
- vip = mock_request(i915->engine[RCS], ctx[1], 0);
+ vip = mock_request(i915->engine[RCS0], ctx[1], 0);
if (!vip) {
err = -ENOMEM;
goto err_context_1;
@@ -226,8 +224,7 @@ static int igt_request_rewind(void *arg)
mutex_unlock(&i915->drm.struct_mutex);
if (i915_request_wait(vip, 0, HZ) == -ETIME) {
- pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
- vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
+ pr_err("timed out waiting for high priority request\n");
goto err;
}
@@ -418,7 +415,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
{
struct drm_i915_private *i915 = arg;
struct smoketest t = {
- .engine = i915->engine[RCS],
+ .engine = i915->engine[RCS0],
.ncontexts = 1024,
.max_batch = 1024,
.request_alloc = __mock_request_alloc
@@ -622,13 +619,11 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
}
*cmd = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(i915);
+ __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- if (err)
- goto err;
+ i915_gem_chipset_flush(i915);
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
@@ -780,10 +775,6 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
if (err)
goto err;
- err = i915_gem_object_set_to_wc_domain(obj, true);
- if (err)
- goto err;
-
cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
@@ -802,10 +793,12 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
*cmd++ = lower_32_bits(vma->node.start);
}
*cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
- i915_gem_chipset_flush(i915);
+ __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
+ i915_gem_chipset_flush(i915);
+
return vma;
err:
@@ -1219,7 +1212,7 @@ out_flush:
num_fences += atomic_long_read(&t[id].num_fences);
}
pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
- num_waits, num_fences, RUNTIME_INFO(i915)->num_rings, ncpus);
+ num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus);
mutex_lock(&i915->drm.struct_mutex);
ret = igt_live_test_end(&live) ?: ret;
@@ -1246,7 +1239,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_breadcrumbs_smoketest),
};
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index 10ef0e636a24..b18eaefef798 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -133,7 +133,7 @@ static int __run_selftests(const char *name,
if (signal_pending(current))
return -EINTR;
- pr_debug(DRIVER_NAME ": Running %s\n", st->name);
+ pr_info(DRIVER_NAME ": Running %s\n", st->name);
if (data)
err = st->live(data);
else
@@ -255,7 +255,7 @@ int __i915_subtests(const char *caller,
if (!apply_subtest_filter(caller, st->name))
continue;
- pr_debug(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
+ pr_info(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
GEM_TRACE("Running %s/%s\n", caller, st->name);
err = st->func(data);
diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
index cdbc8f134e5e..cbf45d85cbff 100644
--- a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
@@ -571,21 +571,27 @@ static int test_timer(void *arg)
unsigned long target, delay;
struct timed_fence tf;
+ preempt_disable();
timed_fence_init(&tf, target = jiffies);
if (!i915_sw_fence_done(&tf.fence)) {
pr_err("Fence with immediate expiration not signaled\n");
goto err;
}
+ preempt_enable();
timed_fence_fini(&tf);
for_each_prime_number(delay, i915_selftest.timeout_jiffies/2) {
+ preempt_disable();
timed_fence_init(&tf, target = jiffies + delay);
if (i915_sw_fence_done(&tf.fence)) {
pr_err("Fence with future expiration (%lu jiffies) already signaled\n", delay);
goto err;
}
+ preempt_enable();
i915_sw_fence_wait(&tf.fence);
+
+ preempt_disable();
if (!i915_sw_fence_done(&tf.fence)) {
pr_err("Fence not signaled after wait\n");
goto err;
@@ -595,13 +601,14 @@ static int test_timer(void *arg)
target, jiffies);
goto err;
}
-
+ preempt_enable();
timed_fence_fini(&tf);
}
return 0;
err:
+ preempt_enable();
timed_fence_fini(&tf);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c
index 12ea69b1a1e5..bd96afcadfe7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c
@@ -64,7 +64,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
unsigned long cacheline;
int err;
- tl = i915_timeline_create(state->i915, "mock", NULL);
+ tl = i915_timeline_create(state->i915, NULL);
if (IS_ERR(tl))
return PTR_ERR(tl);
@@ -476,7 +476,7 @@ checked_i915_timeline_create(struct drm_i915_private *i915)
{
struct i915_timeline *tl;
- tl = i915_timeline_create(i915, "live", NULL);
+ tl = i915_timeline_create(i915, NULL);
if (IS_ERR(tl))
return tl;
@@ -641,6 +641,118 @@ out:
#undef NUM_TIMELINES
}
+static int live_hwsp_wrap(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_timeline *tl;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ int err = 0;
+
+ /*
+ * Across a seqno wrap, we need to keep the old cacheline alive for
+ * foreign GPU references.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ tl = i915_timeline_create(i915, NULL);
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto out_rpm;
+ }
+ if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
+ goto out_free;
+
+ err = i915_timeline_pin(tl);
+ if (err)
+ goto out_free;
+
+ for_each_engine(engine, i915, id) {
+ const u32 *hwsp_seqno[2];
+ struct i915_request *rq;
+ u32 seqno[2];
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ rq = i915_request_alloc(engine, i915->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ tl->seqno = -4u;
+
+ err = i915_timeline_get_seqno(tl, rq, &seqno[0]);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ pr_debug("seqno[0]:%08x, hwsp_offset:%08x\n",
+ seqno[0], tl->hwsp_offset);
+
+ err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ hwsp_seqno[0] = tl->hwsp_seqno;
+
+ err = i915_timeline_get_seqno(tl, rq, &seqno[1]);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ pr_debug("seqno[1]:%08x, hwsp_offset:%08x\n",
+ seqno[1], tl->hwsp_offset);
+
+ err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ hwsp_seqno[1] = tl->hwsp_seqno;
+
+ /* With wrap should come a new hwsp */
+ GEM_BUG_ON(seqno[1] >= seqno[0]);
+ GEM_BUG_ON(hwsp_seqno[0] == hwsp_seqno[1]);
+
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ pr_err("Wait for timeline writes timed out!\n");
+ err = -EIO;
+ goto out;
+ }
+
+ if (*hwsp_seqno[0] != seqno[0] || *hwsp_seqno[1] != seqno[1]) {
+ pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n",
+ *hwsp_seqno[0], *hwsp_seqno[1],
+ seqno[0], seqno[1]);
+ err = -EINVAL;
+ goto out;
+ }
+
+ i915_retire_requests(i915); /* recycle HWSP */
+ }
+
+out:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ i915_timeline_unpin(tl);
+out_free:
+ i915_timeline_put(tl);
+out_rpm:
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return err;
+}
+
static int live_hwsp_recycle(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -723,7 +835,11 @@ int i915_timeline_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_hwsp_recycle),
SUBTEST(live_hwsp_engine),
SUBTEST(live_hwsp_alternate),
+ SUBTEST(live_hwsp_wrap),
};
+ if (i915_terminally_wedged(i915))
+ return 0;
+
return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index cf1de82741fa..fc594b030f5a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -725,24 +725,30 @@ int i915_vma_mock_selftests(void)
SUBTEST(igt_vma_partial),
};
struct drm_i915_private *i915;
- struct i915_ggtt ggtt;
+ struct i915_ggtt *ggtt;
int err;
i915 = mock_gem_device();
if (!i915)
return -ENOMEM;
- mock_init_ggtt(i915, &ggtt);
+ ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
+ if (!ggtt) {
+ err = -ENOMEM;
+ goto out_put;
+ }
+ mock_init_ggtt(i915, ggtt);
mutex_lock(&i915->drm.struct_mutex);
- err = i915_subtests(tests, &ggtt);
+ err = i915_subtests(tests, ggtt);
mock_device_flush(i915);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_drain_freed_objects(i915);
- mock_fini_ggtt(&ggtt);
+ mock_fini_ggtt(ggtt);
+ kfree(ggtt);
+out_put:
drm_dev_put(&i915->drm);
-
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index af66e3d4e23a..94aee4071a66 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -14,7 +14,7 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
cond_resched();
if (flags & I915_WAIT_LOCKED &&
- i915_gem_switch_to_kernel_context(i915)) {
+ i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines)) {
pr_err("Failed to switch back to kernel context; declaring wedged\n");
i915_gem_set_wedged(i915);
}
@@ -29,5 +29,5 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
i915_gem_set_wedged(i915);
}
- return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
+ return i915_terminally_wedged(i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 9ebd9225684e..16890dfe74c0 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -29,7 +29,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
goto err_hws;
}
- i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
+ i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
@@ -144,6 +144,13 @@ igt_spinner_create_request(struct igt_spinner *spin,
i915_gem_chipset_flush(spin->i915);
+ if (engine->emit_init_breadcrumb &&
+ rq->timeline->has_initial_breadcrumb) {
+ err = engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto cancel_rq;
+ }
+
err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
cancel_rq:
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index c5e0a0e98fcb..b05a21eaa8f4 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -111,7 +111,7 @@ static int validate_client(struct intel_guc_client *client,
dev_priv->preempt_context : dev_priv->kernel_context;
if (client->owner != ctx_owner ||
- client->engines != INTEL_INFO(dev_priv)->ring_mask ||
+ client->engines != INTEL_INFO(dev_priv)->engine_mask ||
client->priority != client_priority ||
client->doorbell_id == GUC_DOORBELL_INVALID)
return -EINVAL;
@@ -261,7 +261,7 @@ static int igt_guc_doorbells(void *arg)
for (i = 0; i < ATTEMPTS; i++) {
clients[i] = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->ring_mask,
+ INTEL_INFO(dev_priv)->engine_mask,
i % GUC_CLIENT_PRIORITY_NUM,
dev_priv->kernel_context);
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 7b6f3bea9ef8..050bd1e19e02 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -56,6 +56,8 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
if (IS_ERR(h->ctx))
return PTR_ERR(h->ctx);
+ GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx));
+
h->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(h->hws)) {
err = PTR_ERR(h->hws);
@@ -68,7 +70,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
goto err_hws;
}
- i915_gem_object_set_cache_level(h->hws, I915_CACHE_LLC);
+ i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC);
vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
@@ -242,6 +244,12 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
i915_gem_chipset_flush(h->i915);
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto cancel_rq;
+ }
+
flags = 0;
if (INTEL_GEN(vm->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
@@ -334,7 +342,7 @@ static int igt_hang_sanitycheck(void *arg)
timeout = i915_request_wait(rq,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
timeout = -EIO;
i915_request_put(rq);
@@ -375,7 +383,7 @@ static int igt_global_reset(void *arg)
igt_global_reset_unlock(i915);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
err = -EIO;
return err;
@@ -393,15 +401,13 @@ static int igt_wedged_reset(void *arg)
i915_gem_set_wedged(i915);
- mutex_lock(&i915->drm.struct_mutex);
- GEM_BUG_ON(!i915_terminally_wedged(&i915->gpu_error));
+ GEM_BUG_ON(!i915_reset_failed(i915));
i915_reset(i915, ALL_ENGINES, NULL);
- mutex_unlock(&i915->drm.struct_mutex);
intel_runtime_pm_put(i915, wakeref);
igt_global_reset_unlock(i915);
- return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
+ return i915_reset_failed(i915) ? -EIO : 0;
}
static bool wait_for_idle(struct intel_engine_cs *engine)
@@ -409,6 +415,222 @@ static bool wait_for_idle(struct intel_engine_cs *engine)
return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0;
}
+static int igt_reset_nop(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ unsigned int reset_count, count;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ struct drm_file *file;
+ IGT_TIMEOUT(end_time);
+ int err = 0;
+
+ /* Check that we can reset during non-user portions of requests */
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ ctx = live_context(i915, file);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out;
+ }
+
+ i915_gem_context_clear_bannable(ctx);
+ wakeref = intel_runtime_pm_get(i915);
+ reset_count = i915_reset_count(&i915->gpu_error);
+ count = 0;
+ do {
+ mutex_lock(&i915->drm.struct_mutex);
+ for_each_engine(engine, i915, id) {
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ struct i915_request *rq;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+ }
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ igt_global_reset_lock(i915);
+ i915_reset(i915, ALL_ENGINES, NULL);
+ igt_global_reset_unlock(i915);
+ if (i915_reset_failed(i915)) {
+ err = -EIO;
+ break;
+ }
+
+ if (i915_reset_count(&i915->gpu_error) !=
+ reset_count + ++count) {
+ pr_err("Full GPU reset not recorded!\n");
+ err = -EINVAL;
+ break;
+ }
+
+ if (!i915_reset_flush(i915)) {
+ struct drm_printer p =
+ drm_info_printer(i915->drm.dev);
+
+ pr_err("%s failed to idle after reset\n",
+ engine->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ err = -EIO;
+ break;
+ }
+
+ err = igt_flush_test(i915, 0);
+ if (err)
+ break;
+ } while (time_before(jiffies, end_time));
+ pr_info("%s: %d resets\n", __func__, count);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = igt_flush_test(i915, I915_WAIT_LOCKED);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ intel_runtime_pm_put(i915, wakeref);
+
+out:
+ mock_file_free(i915, file);
+ if (i915_reset_failed(i915))
+ err = -EIO;
+ return err;
+}
+
+static int igt_reset_nop_engine(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ struct drm_file *file;
+ int err = 0;
+
+ /* Check that we can engine-reset during non-user portions */
+
+ if (!intel_has_reset_engine(i915))
+ return 0;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ ctx = live_context(i915, file);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out;
+ }
+
+ i915_gem_context_clear_bannable(ctx);
+ wakeref = intel_runtime_pm_get(i915);
+ for_each_engine(engine, i915, id) {
+ unsigned int reset_count, reset_engine_count;
+ unsigned int count;
+ IGT_TIMEOUT(end_time);
+
+ reset_count = i915_reset_count(&i915->gpu_error);
+ reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
+ engine);
+ count = 0;
+
+ set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ do {
+ int i;
+
+ if (!wait_for_idle(engine)) {
+ pr_err("%s failed to idle before reset\n",
+ engine->name);
+ err = -EIO;
+ break;
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+ for (i = 0; i < 16; i++) {
+ struct i915_request *rq;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ err = i915_reset_engine(engine, NULL);
+ if (err) {
+ pr_err("i915_reset_engine failed\n");
+ break;
+ }
+
+ if (i915_reset_count(&i915->gpu_error) != reset_count) {
+ pr_err("Full GPU reset recorded! (engine reset expected)\n");
+ err = -EINVAL;
+ break;
+ }
+
+ if (i915_reset_engine_count(&i915->gpu_error, engine) !=
+ reset_engine_count + ++count) {
+ pr_err("%s engine reset not recorded!\n",
+ engine->name);
+ err = -EINVAL;
+ break;
+ }
+
+ if (!i915_reset_flush(i915)) {
+ struct drm_printer p =
+ drm_info_printer(i915->drm.dev);
+
+ pr_err("%s failed to idle after reset\n",
+ engine->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ err = -EIO;
+ break;
+ }
+ } while (time_before(jiffies, end_time));
+ clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
+
+ if (err)
+ break;
+
+ err = igt_flush_test(i915, 0);
+ if (err)
+ break;
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = igt_flush_test(i915, I915_WAIT_LOCKED);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ intel_runtime_pm_put(i915, wakeref);
+out:
+ mock_file_free(i915, file);
+ if (i915_reset_failed(i915))
+ err = -EIO;
+ return err;
+}
+
static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
{
struct intel_engine_cs *engine;
@@ -523,7 +745,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
break;
}
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
err = -EIO;
if (active) {
@@ -565,11 +787,10 @@ static int active_request_put(struct i915_request *rq)
return 0;
if (i915_request_wait(rq, 0, 5 * HZ) < 0) {
- GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld, seqno %d.\n",
+ GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld\n",
rq->engine->name,
rq->fence.context,
- rq->fence.seqno,
- i915_request_global_seqno(rq));
+ rq->fence.seqno);
GEM_TRACE_DUMP();
i915_gem_set_wedged(rq->i915);
@@ -762,7 +983,23 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
count++;
if (rq) {
- i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(i915->drm.dev);
+
+ pr_err("i915_reset_engine(%s:%s):"
+ " failed to complete request after reset\n",
+ engine->name, test_name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+ i915_request_put(rq);
+
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ break;
+ }
+
i915_request_put(rq);
}
@@ -837,7 +1074,7 @@ unwind:
break;
}
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
err = -EIO;
if (flags & TEST_ACTIVE) {
@@ -887,7 +1124,8 @@ static int igt_reset_engines(void *arg)
return 0;
}
-static u32 fake_hangcheck(struct drm_i915_private *i915, u32 mask)
+static u32 fake_hangcheck(struct drm_i915_private *i915,
+ intel_engine_mask_t mask)
{
u32 count = i915_reset_count(&i915->gpu_error);
@@ -905,7 +1143,7 @@ static int igt_reset_wait(void *arg)
long timeout;
int err;
- if (!intel_engine_can_store_dword(i915->engine[RCS]))
+ if (!intel_engine_can_store_dword(i915->engine[RCS0]))
return 0;
/* Check that we detect a stuck waiter and issue a reset */
@@ -917,7 +1155,7 @@ static int igt_reset_wait(void *arg)
if (err)
goto unlock;
- rq = hang_create_request(&h, i915->engine[RCS]);
+ rq = hang_create_request(&h, i915->engine[RCS0]);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto fini;
@@ -963,7 +1201,7 @@ unlock:
mutex_unlock(&i915->drm.struct_mutex);
igt_global_reset_unlock(i915);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
return -EIO;
return err;
@@ -1034,13 +1272,11 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
struct hang h;
int err;
- if (!intel_engine_can_store_dword(i915->engine[RCS]))
+ if (!intel_engine_can_store_dword(i915->engine[RCS0]))
return 0;
/* Check that we can recover an unbind stuck on a hanging request */
- igt_global_reset_lock(i915);
-
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
if (err)
@@ -1066,7 +1302,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
goto out_obj;
}
- rq = hang_create_request(&h, i915->engine[RCS]);
+ rq = hang_create_request(&h, i915->engine[RCS0]);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_obj;
@@ -1138,7 +1374,9 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
}
out_reset:
- fake_hangcheck(rq->i915, intel_engine_flag(rq->engine));
+ igt_global_reset_lock(i915);
+ fake_hangcheck(rq->i915, rq->engine->mask);
+ igt_global_reset_unlock(i915);
if (tsk) {
struct igt_wedge_me w;
@@ -1159,9 +1397,8 @@ fini:
hang_fini(&h);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
- igt_global_reset_unlock(i915);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
return -EIO;
return err;
@@ -1317,7 +1554,7 @@ static int igt_reset_queue(void *arg)
goto fini;
}
- reset_count = fake_hangcheck(i915, ENGINE_MASK(id));
+ reset_count = fake_hangcheck(i915, BIT(id));
if (prev->fence.error != -EIO) {
pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
@@ -1367,7 +1604,7 @@ unlock:
mutex_unlock(&i915->drm.struct_mutex);
igt_global_reset_unlock(i915);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
return -EIO;
return err;
@@ -1376,7 +1613,7 @@ unlock:
static int igt_handle_error(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine = i915->engine[RCS];
+ struct intel_engine_cs *engine = i915->engine[RCS0];
struct hang h;
struct i915_request *rq;
struct i915_gpu_state *error;
@@ -1423,7 +1660,7 @@ static int igt_handle_error(void *arg)
/* Temporarily disable error capture */
error = xchg(&i915->gpu_error.first_error, (void *)-1);
- i915_handle_error(i915, ENGINE_MASK(engine->id), 0, NULL);
+ i915_handle_error(i915, engine->mask, 0, NULL);
xchg(&i915->gpu_error.first_error, error);
@@ -1547,7 +1784,7 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
i915_request_wait(rq,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
err = -EIO;
}
@@ -1586,7 +1823,7 @@ static int igt_atomic_reset(void *arg)
/* Flush any requests before we get started and check basics */
force_reset(i915);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
goto unlock;
if (intel_has_gpu_reset(i915)) {
@@ -1642,6 +1879,8 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_global_reset), /* attempt to recover GPU first */
SUBTEST(igt_wedged_reset),
SUBTEST(igt_hang_sanitycheck),
+ SUBTEST(igt_reset_nop),
+ SUBTEST(igt_reset_nop_engine),
SUBTEST(igt_reset_idle_engine),
SUBTEST(igt_reset_active_engine),
SUBTEST(igt_reset_engines),
@@ -1660,7 +1899,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
if (!intel_has_gpu_reset(i915))
return 0;
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return -EIO; /* we're long past hope of a successful reset */
wakeref = intel_runtime_pm_get(i915);
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 58144e024751..fbee030db940 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -10,6 +10,7 @@
#include "../i915_selftest.h"
#include "igt_flush_test.h"
+#include "igt_live_test.h"
#include "igt_spinner.h"
#include "i915_random.h"
@@ -75,6 +76,185 @@ err_unlock:
return err;
}
+static int live_busywait_preempt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ int err = -ENOMEM;
+ u32 *map;
+
+ /*
+ * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
+ * preempt the busywaits used to synchronise between rings.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ ctx_hi = kernel_context(i915);
+ if (!ctx_hi)
+ goto err_unlock;
+ ctx_hi->sched.priority = INT_MAX;
+
+ ctx_lo = kernel_context(i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority = INT_MIN;
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_ctx_lo;
+ }
+
+ map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto err_obj;
+ }
+
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_map;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ for_each_engine(engine, i915, id) {
+ struct i915_request *lo, *hi;
+ struct igt_live_test t;
+ u32 *cs;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_vma;
+ }
+
+ /*
+ * We create two requests. The low priority request
+ * busywaits on a semaphore (inside the ringbuffer where
+ * is should be preemptible) and the high priority requests
+ * uses a MI_STORE_DWORD_IMM to update the semaphore value
+ * allowing the first request to complete. If preemption
+ * fails, we hang instead.
+ */
+
+ lo = i915_request_alloc(engine, ctx_lo);
+ if (IS_ERR(lo)) {
+ err = PTR_ERR(lo);
+ goto err_vma;
+ }
+
+ cs = intel_ring_begin(lo, 8);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(lo);
+ goto err_vma;
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ /* XXX Do we need a flush + invalidate here? */
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+
+ intel_ring_advance(lo, cs);
+ i915_request_add(lo);
+
+ if (wait_for(READ_ONCE(*map), 10)) {
+ err = -ETIMEDOUT;
+ goto err_vma;
+ }
+
+ /* Low priority request should be busywaiting now */
+ if (i915_request_wait(lo, I915_WAIT_LOCKED, 1) != -ETIME) {
+ pr_err("%s: Busywaiting request did not!\n",
+ engine->name);
+ err = -EIO;
+ goto err_vma;
+ }
+
+ hi = i915_request_alloc(engine, ctx_hi);
+ if (IS_ERR(hi)) {
+ err = PTR_ERR(hi);
+ goto err_vma;
+ }
+
+ cs = intel_ring_begin(hi, 4);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(hi);
+ goto err_vma;
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+ *cs++ = 0;
+
+ intel_ring_advance(hi, cs);
+ i915_request_add(hi);
+
+ if (i915_request_wait(lo, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("%s: Failed to preempt semaphore busywait!\n",
+ engine->name);
+
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ GEM_TRACE_DUMP();
+
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_vma;
+ }
+ GEM_BUG_ON(READ_ONCE(*map));
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_vma;
+ }
+ }
+
+ err = 0;
+err_vma:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_unlock:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
static int live_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -88,6 +268,9 @@ static int live_preempt(void *arg)
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
return 0;
+ if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
+ pr_err("Logical preemption supported, but not exposed\n");
+
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(i915);
@@ -110,8 +293,17 @@ static int live_preempt(void *arg)
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
for_each_engine(engine, i915, id) {
+ struct igt_live_test t;
struct i915_request *rq;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
@@ -147,7 +339,8 @@ static int live_preempt(void *arg)
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
- if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+
+ if (igt_live_test_end(&t)) {
err = -EIO;
goto err_ctx_lo;
}
@@ -201,8 +394,17 @@ static int live_late_preempt(void *arg)
goto err_ctx_hi;
for_each_engine(engine, i915, id) {
+ struct igt_live_test t;
struct i915_request *rq;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
@@ -241,7 +443,8 @@ static int live_late_preempt(void *arg)
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
- if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+
+ if (igt_live_test_end(&t)) {
err = -EIO;
goto err_ctx_lo;
}
@@ -335,6 +538,9 @@ static int live_suppress_self_preempt(void *arg)
struct i915_request *rq_a, *rq_b;
int depth;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
engine->execlists.preempt_hang.count = 0;
rq_a = igt_spinner_create_request(&a.spin,
@@ -407,6 +613,171 @@ err_wedged:
goto err_client_b;
}
+static int __i915_sw_fence_call
+dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ return NOTIFY_DONE;
+}
+
+static struct i915_request *dummy_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = kzalloc(sizeof(*rq), GFP_KERNEL);
+ if (!rq)
+ return NULL;
+
+ INIT_LIST_HEAD(&rq->active_list);
+ rq->engine = engine;
+
+ i915_sched_node_init(&rq->sched);
+
+ /* mark this request as permanently incomplete */
+ rq->fence.seqno = 1;
+ BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */
+ rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1;
+ GEM_BUG_ON(i915_request_completed(rq));
+
+ i915_sw_fence_init(&rq->submit, dummy_notify);
+ i915_sw_fence_commit(&rq->submit);
+
+ return rq;
+}
+
+static void dummy_request_free(struct i915_request *dummy)
+{
+ i915_request_mark_complete(dummy);
+ i915_sched_node_fini(&dummy->sched);
+ i915_sw_fence_fini(&dummy->submit);
+
+ dma_fence_free(&dummy->fence);
+}
+
+static int live_suppress_wait_preempt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct preempt_client client[4];
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ int err = -ENOMEM;
+ int i;
+
+ /*
+ * Waiters are given a little priority nudge, but not enough
+ * to actually cause any preemption. Double check that we do
+ * not needlessly generate preempt-to-idle cycles.
+ */
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
+ goto err_unlock;
+ if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
+ goto err_client_0;
+ if (preempt_client_init(i915, &client[2])) /* head of queue */
+ goto err_client_1;
+ if (preempt_client_init(i915, &client[3])) /* bystander */
+ goto err_client_2;
+
+ for_each_engine(engine, i915, id) {
+ int depth;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (!engine->emit_init_breadcrumb)
+ continue;
+
+ for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
+ struct i915_request *rq[ARRAY_SIZE(client)];
+ struct i915_request *dummy;
+
+ engine->execlists.preempt_hang.count = 0;
+
+ dummy = dummy_request(engine);
+ if (!dummy)
+ goto err_client_3;
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ rq[i] = igt_spinner_create_request(&client[i].spin,
+ client[i].ctx, engine,
+ MI_NOOP);
+ if (IS_ERR(rq[i])) {
+ err = PTR_ERR(rq[i]);
+ goto err_wedged;
+ }
+
+ /* Disable NEWCLIENT promotion */
+ __i915_active_request_set(&rq[i]->timeline->last_request,
+ dummy);
+ i915_request_add(rq[i]);
+ }
+
+ dummy_request_free(dummy);
+
+ GEM_BUG_ON(i915_request_completed(rq[0]));
+ if (!igt_wait_for_spinner(&client[0].spin, rq[0])) {
+ pr_err("%s: First client failed to start\n",
+ engine->name);
+ goto err_wedged;
+ }
+ GEM_BUG_ON(!i915_request_started(rq[0]));
+
+ if (i915_request_wait(rq[depth],
+ I915_WAIT_LOCKED |
+ I915_WAIT_PRIORITY,
+ 1) != -ETIME) {
+ pr_err("%s: Waiter depth:%d completed!\n",
+ engine->name, depth);
+ goto err_wedged;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(client); i++)
+ igt_spinner_end(&client[i].spin);
+
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ goto err_wedged;
+
+ if (engine->execlists.preempt_hang.count) {
+ pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n",
+ engine->name,
+ engine->execlists.preempt_hang.count,
+ depth);
+ err = -EINVAL;
+ goto err_client_3;
+ }
+ }
+ }
+
+ err = 0;
+err_client_3:
+ preempt_client_fini(&client[3]);
+err_client_2:
+ preempt_client_fini(&client[2]);
+err_client_1:
+ preempt_client_fini(&client[1]);
+err_client_0:
+ preempt_client_fini(&client[0]);
+err_unlock:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+
+err_wedged:
+ for (i = 0; i < ARRAY_SIZE(client); i++)
+ igt_spinner_end(&client[i].spin);
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_client_3;
+}
+
static int live_chain_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -438,11 +809,39 @@ static int live_chain_preempt(void *arg)
struct i915_sched_attr attr = {
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
};
- int count, i;
+ struct igt_live_test t;
+ struct i915_request *rq;
+ int ring_size, count, i;
- for_each_prime_number_from(count, 1, 32) { /* must fit ring! */
- struct i915_request *rq;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+ rq = igt_spinner_create_request(&lo.spin,
+ lo.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+
+ ring_size = rq->wa_tail - rq->head;
+ if (ring_size < 0)
+ ring_size += rq->ring->size;
+ ring_size = rq->ring->size / ring_size;
+ pr_debug("%s(%s): Using maximum of %d requests\n",
+ __func__, engine->name, ring_size);
+
+ igt_spinner_end(&lo.spin);
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) {
+ pr_err("Timed out waiting to flush %s\n", engine->name);
+ goto err_wedged;
+ }
+
+ if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_wedged;
+ }
+
+ for_each_prime_number_from(count, 1, ring_size) {
rq = igt_spinner_create_request(&hi.spin,
hi.ctx, engine,
MI_ARB_CHECK);
@@ -484,6 +883,26 @@ static int live_chain_preempt(void *arg)
goto err_wedged;
}
igt_spinner_end(&lo.spin);
+
+ rq = i915_request_alloc(engine, lo.ctx);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(i915->drm.dev);
+
+ pr_err("Failed to flush low priority chain of %d requests\n",
+ count);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+ goto err_wedged;
+ }
+ }
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_wedged;
}
}
@@ -767,7 +1186,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
count, flags,
- RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
+ RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
return 0;
}
@@ -795,7 +1214,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
count, flags,
- RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
+ RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
return 0;
}
@@ -808,6 +1227,7 @@ static int live_preempt_smoke(void *arg)
};
const unsigned int phase[] = { 0, BATCH };
intel_wakeref_t wakeref;
+ struct igt_live_test t;
int err = -ENOMEM;
u32 *cs;
int n;
@@ -838,11 +1258,13 @@ static int live_preempt_smoke(void *arg)
for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
cs[n] = MI_ARB_CHECK;
cs[n] = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(smoke.batch);
i915_gem_object_unpin_map(smoke.batch);
- err = i915_gem_object_set_to_gtt_domain(smoke.batch, false);
- if (err)
+ if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
+ err = -EIO;
goto err_batch;
+ }
for (n = 0; n < smoke.ncontext; n++) {
smoke.contexts[n] = kernel_context(smoke.i915);
@@ -861,7 +1283,7 @@ static int live_preempt_smoke(void *arg)
}
err_ctx:
- if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED))
+ if (igt_live_test_end(&t))
err = -EIO;
for (n = 0; n < smoke.ncontext; n++) {
@@ -884,9 +1306,11 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_sanitycheck),
+ SUBTEST(live_busywait_preempt),
SUBTEST(live_preempt),
SUBTEST(live_late_preempt),
SUBTEST(live_suppress_self_preempt),
+ SUBTEST(live_suppress_wait_preempt),
SUBTEST(live_chain_preempt),
SUBTEST(live_preempt_hang),
SUBTEST(live_preempt_smoke),
@@ -895,7 +1319,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
if (!HAS_EXECLISTS(i915))
return 0;
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 81d9d31042a9..e0d7ebecb215 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -119,9 +119,143 @@ int intel_uncore_mock_selftests(void)
return 0;
}
-static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_priv)
+static int live_forcewake_ops(void *arg)
+{
+ static const struct reg {
+ const char *name;
+ unsigned long platforms;
+ unsigned int offset;
+ } registers[] = {
+ {
+ "RING_START",
+ INTEL_GEN_MASK(6, 7),
+ 0x38,
+ },
+ {
+ "RING_MI_MODE",
+ INTEL_GEN_MASK(8, BITS_PER_LONG),
+ 0x9c,
+ }
+ };
+ const struct reg *r;
+ struct drm_i915_private *i915 = arg;
+ struct intel_uncore_forcewake_domain *domain;
+ struct intel_uncore *uncore = &i915->uncore;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ unsigned int tmp;
+ int err = 0;
+
+ GEM_BUG_ON(i915->gt.awake);
+
+ /* vlv/chv with their pcu behave differently wrt reads */
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ pr_debug("PCU fakes forcewake badly; skipping\n");
+ return 0;
+ }
+
+ /*
+ * Not quite as reliable across the gen as one would hope.
+ *
+ * Either our theory of operation is incorrect, or there remain
+ * external parties interfering with the powerwells.
+ *
+ * https://bugs.freedesktop.org/show_bug.cgi?id=110210
+ */
+ if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
+ return 0;
+
+ /* We have to pick carefully to get the exact behaviour we need */
+ for (r = registers; r->name; r++)
+ if (r->platforms & INTEL_INFO(i915)->gen_mask)
+ break;
+ if (!r->name) {
+ pr_debug("Forcewaked register not known for %s; skipping\n",
+ intel_platform_name(INTEL_INFO(i915)->platform));
+ return 0;
+ }
+
+ wakeref = intel_runtime_pm_get(i915);
+
+ for_each_fw_domain(domain, uncore, tmp) {
+ smp_store_mb(domain->active, false);
+ if (!hrtimer_cancel(&domain->timer))
+ continue;
+
+ intel_uncore_fw_release_timer(&domain->timer);
+ }
+
+ for_each_engine(engine, i915, id) {
+ i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
+ u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset;
+ enum forcewake_domains fw_domains;
+ u32 val;
+
+ if (!engine->default_state)
+ continue;
+
+ fw_domains = intel_uncore_forcewake_for_reg(uncore, mmio,
+ FW_REG_READ);
+ if (!fw_domains)
+ continue;
+
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
+ if (!domain->wake_count)
+ continue;
+
+ pr_err("fw_domain %s still active, aborting test!\n",
+ intel_uncore_forcewake_domain_to_str(domain->id));
+ err = -EINVAL;
+ goto out_rpm;
+ }
+
+ intel_uncore_forcewake_get(uncore, fw_domains);
+ val = readl(reg);
+ intel_uncore_forcewake_put(uncore, fw_domains);
+
+ /* Flush the forcewake release (delayed onto a timer) */
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
+ smp_store_mb(domain->active, false);
+ if (hrtimer_cancel(&domain->timer))
+ intel_uncore_fw_release_timer(&domain->timer);
+
+ preempt_disable();
+ err = wait_ack_clear(domain, FORCEWAKE_KERNEL);
+ preempt_enable();
+ if (err) {
+ pr_err("Failed to clear fw_domain %s\n",
+ intel_uncore_forcewake_domain_to_str(domain->id));
+ goto out_rpm;
+ }
+ }
+
+ if (!val) {
+ pr_err("%s:%s was zero while fw was held!\n",
+ engine->name, r->name);
+ err = -EINVAL;
+ goto out_rpm;
+ }
+
+ /* We then expect the read to return 0 outside of the fw */
+ if (wait_for(readl(reg) == 0, 100)) {
+ pr_err("%s:%s=%0x, fw_domains 0x%x still up after 100ms!\n",
+ engine->name, r->name, readl(reg), fw_domains);
+ err = -ETIMEDOUT;
+ goto out_rpm;
+ }
+ }
+
+out_rpm:
+ intel_runtime_pm_put(i915, wakeref);
+ return err;
+}
+
+static int live_forcewake_domains(void *arg)
{
#define FW_RANGE 0x40000
+ struct drm_i915_private *dev_priv = arg;
+ struct intel_uncore *uncore = &dev_priv->uncore;
unsigned long *valid;
u32 offset;
int err;
@@ -137,48 +271,52 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri
if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
return 0;
- valid = kcalloc(BITS_TO_LONGS(FW_RANGE), sizeof(*valid),
- GFP_KERNEL);
+ valid = bitmap_zalloc(FW_RANGE, GFP_KERNEL);
if (!valid)
return -ENOMEM;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
- check_for_unclaimed_mmio(dev_priv);
+ check_for_unclaimed_mmio(uncore);
for (offset = 0; offset < FW_RANGE; offset += 4) {
i915_reg_t reg = { offset };
(void)I915_READ_FW(reg);
- if (!check_for_unclaimed_mmio(dev_priv))
+ if (!check_for_unclaimed_mmio(uncore))
set_bit(offset, valid);
}
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
err = 0;
for_each_set_bit(offset, valid, FW_RANGE) {
i915_reg_t reg = { offset };
iosf_mbi_punit_acquire();
- intel_uncore_forcewake_reset(dev_priv);
+ intel_uncore_forcewake_reset(uncore);
iosf_mbi_punit_release();
- check_for_unclaimed_mmio(dev_priv);
+ check_for_unclaimed_mmio(uncore);
(void)I915_READ(reg);
- if (check_for_unclaimed_mmio(dev_priv)) {
+ if (check_for_unclaimed_mmio(uncore)) {
pr_err("Unclaimed mmio read to register 0x%04x\n",
offset);
err = -EINVAL;
}
}
- kfree(valid);
+ bitmap_free(valid);
return err;
}
int intel_uncore_live_selftests(struct drm_i915_private *i915)
{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_forcewake_ops),
+ SUBTEST(live_forcewake_domains),
+ };
+
int err;
/* Confirm the table we load is still valid */
@@ -188,9 +326,5 @@ int intel_uncore_live_selftests(struct drm_i915_private *i915)
if (err)
return err;
- err = intel_uncore_check_forcewake_domains(i915);
- if (err)
- return err;
-
- return 0;
+ return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index b15c4f26c593..567b6f8dae86 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -12,6 +12,14 @@
#include "igt_spinner.h"
#include "igt_wedge_me.h"
#include "mock_context.h"
+#include "mock_drm.h"
+
+static const struct wo_register {
+ enum intel_platform platform;
+ u32 reg;
+} wo_registers[] = {
+ { INTEL_GEMINILAKE, 0x731c }
+};
#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4)
struct wa_lists {
@@ -74,7 +82,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
if (IS_ERR(result))
return result;
- i915_gem_object_set_cache_level(result, I915_CACHE_LLC);
+ i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
cs = i915_gem_object_pin_map(result, I915_MAP_WB);
if (IS_ERR(cs)) {
@@ -82,6 +90,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
goto err_obj;
}
memset(cs, 0xc5, PAGE_SIZE);
+ i915_gem_object_flush_map(result);
i915_gem_object_unpin_map(result);
vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
@@ -181,7 +190,7 @@ static int check_whitelist(struct i915_gem_context *ctx,
err = 0;
igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
err = i915_gem_object_set_to_cpu_domain(results, false);
- if (i915_terminally_wedged(&ctx->i915->gpu_error))
+ if (i915_terminally_wedged(ctx->i915))
err = -EIO;
if (err)
goto out_put;
@@ -214,7 +223,7 @@ out_put:
static int do_device_reset(struct intel_engine_cs *engine)
{
- i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds");
+ i915_reset(engine->i915, engine->mask, "live_workarounds");
return 0;
}
@@ -236,15 +245,11 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
+
rq = ERR_PTR(-ENODEV);
- with_intel_runtime_pm(engine->i915, wakeref) {
- if (spin)
- rq = igt_spinner_create_request(spin,
- ctx, engine,
- MI_NOOP);
- else
- rq = i915_request_alloc(engine, ctx);
- }
+ with_intel_runtime_pm(engine->i915, wakeref)
+ rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
kernel_context_close(ctx);
@@ -273,7 +278,6 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
const char *name)
{
struct drm_i915_private *i915 = engine->i915;
- bool want_spin = reset == do_engine_reset;
struct i915_gem_context *ctx;
struct igt_spinner spin;
intel_wakeref_t wakeref;
@@ -282,11 +286,9 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
engine->whitelist.count, name);
- if (want_spin) {
- err = igt_spinner_init(&spin, i915);
- if (err)
- return err;
- }
+ err = igt_spinner_init(&spin, i915);
+ if (err)
+ return err;
ctx = kernel_context(i915);
if (IS_ERR(ctx))
@@ -298,17 +300,15 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
goto out;
}
- err = switch_to_scratch_context(engine, want_spin ? &spin : NULL);
+ err = switch_to_scratch_context(engine, &spin);
if (err)
goto out;
with_intel_runtime_pm(i915, wakeref)
err = reset(engine);
- if (want_spin) {
- igt_spinner_end(&spin);
- igt_spinner_fini(&spin);
- }
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
if (err) {
pr_err("%s reset failed\n", name);
@@ -340,10 +340,379 @@ out:
return err;
}
+static struct i915_vma *create_scratch(struct i915_gem_context *ctx)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ void *ptr;
+ int err;
+
+ obj = i915_gem_object_create_internal(ctx->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
+
+ ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(ptr)) {
+ err = PTR_ERR(ptr);
+ goto err_obj;
+ }
+ memset(ptr, 0xc5, PAGE_SIZE);
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_obj;
+
+ err = i915_gem_object_set_to_cpu_domain(obj, false);
+ if (err)
+ goto err_obj;
+
+ return vma;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static struct i915_vma *create_batch(struct i915_gem_context *ctx)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_obj;
+
+ err = i915_gem_object_set_to_wc_domain(obj, true);
+ if (err)
+ goto err_obj;
+
+ return vma;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static u32 reg_write(u32 old, u32 new, u32 rsvd)
+{
+ if (rsvd == 0x0000ffff) {
+ old &= ~(new >> 16);
+ old |= new & (new >> 16);
+ } else {
+ old &= ~rsvd;
+ old |= new & rsvd;
+ }
+
+ return old;
+}
+
+static bool wo_register(struct intel_engine_cs *engine, u32 reg)
+{
+ enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
+ if (wo_registers[i].platform == platform &&
+ wo_registers[i].reg == reg)
+ return true;
+ }
+
+ return false;
+}
+
+static int check_dirty_whitelist(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ const u32 values[] = {
+ 0x00000000,
+ 0x01010101,
+ 0x10100101,
+ 0x03030303,
+ 0x30300303,
+ 0x05050505,
+ 0x50500505,
+ 0x0f0f0f0f,
+ 0xf00ff00f,
+ 0x10101010,
+ 0xf0f01010,
+ 0x30303030,
+ 0xa0a03030,
+ 0x50505050,
+ 0xc0c05050,
+ 0xf0f0f0f0,
+ 0x11111111,
+ 0x33333333,
+ 0x55555555,
+ 0x0000ffff,
+ 0x00ff00ff,
+ 0xff0000ff,
+ 0xffff00ff,
+ 0xffffffff,
+ };
+ struct i915_vma *scratch;
+ struct i915_vma *batch;
+ int err = 0, i, v;
+ u32 *cs, *results;
+
+ scratch = create_scratch(ctx);
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
+
+ batch = create_batch(ctx);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_scratch;
+ }
+
+ for (i = 0; i < engine->whitelist.count; i++) {
+ u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+ u64 addr = scratch->node.start;
+ struct i915_request *rq;
+ u32 srm, lrm, rsvd;
+ u32 expect;
+ int idx;
+
+ if (wo_register(engine, reg))
+ continue;
+
+ srm = MI_STORE_REGISTER_MEM;
+ lrm = MI_LOAD_REGISTER_MEM;
+ if (INTEL_GEN(ctx->i915) >= 8)
+ lrm++, srm++;
+
+ pr_debug("%s: Writing garbage to %x\n",
+ engine->name, reg);
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_batch;
+ }
+
+ /* SRM original */
+ *cs++ = srm;
+ *cs++ = reg;
+ *cs++ = lower_32_bits(addr);
+ *cs++ = upper_32_bits(addr);
+
+ idx = 1;
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ /* LRI garbage */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = reg;
+ *cs++ = values[v];
+
+ /* SRM result */
+ *cs++ = srm;
+ *cs++ = reg;
+ *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
+ *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
+ idx++;
+ }
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ /* LRI garbage */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = reg;
+ *cs++ = ~values[v];
+
+ /* SRM result */
+ *cs++ = srm;
+ *cs++ = reg;
+ *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
+ *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
+ idx++;
+ }
+ GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
+
+ /* LRM original -- don't leave garbage in the context! */
+ *cs++ = lrm;
+ *cs++ = reg;
+ *cs++ = lower_32_bits(addr);
+ *cs++ = upper_32_bits(addr);
+
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch->obj);
+ i915_gem_object_unpin_map(batch->obj);
+ i915_gem_chipset_flush(ctx->i915);
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_batch;
+ }
+
+ if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
+ err = engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto err_request;
+ }
+
+ err = engine->emit_bb_start(rq,
+ batch->node.start, PAGE_SIZE,
+ 0);
+ if (err)
+ goto err_request;
+
+err_request:
+ i915_request_add(rq);
+ if (err)
+ goto out_batch;
+
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ pr_err("%s: Futzing %x timedout; cancelling test\n",
+ engine->name, reg);
+ i915_gem_set_wedged(ctx->i915);
+ err = -EIO;
+ goto out_batch;
+ }
+
+ results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(results)) {
+ err = PTR_ERR(results);
+ goto out_batch;
+ }
+
+ GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
+ rsvd = results[ARRAY_SIZE(values)]; /* detect write masking */
+ if (!rsvd) {
+ pr_err("%s: Unable to write to whitelisted register %x\n",
+ engine->name, reg);
+ err = -EINVAL;
+ goto out_unpin;
+ }
+
+ expect = results[0];
+ idx = 1;
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ expect = reg_write(expect, values[v], rsvd);
+ if (results[idx] != expect)
+ err++;
+ idx++;
+ }
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ expect = reg_write(expect, ~values[v], rsvd);
+ if (results[idx] != expect)
+ err++;
+ idx++;
+ }
+ if (err) {
+ pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
+ engine->name, err, reg);
+
+ pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
+ engine->name, reg, results[0], rsvd);
+
+ expect = results[0];
+ idx = 1;
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ u32 w = values[v];
+
+ expect = reg_write(expect, w, rsvd);
+ pr_info("Wrote %08x, read %08x, expect %08x\n",
+ w, results[idx], expect);
+ idx++;
+ }
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ u32 w = ~values[v];
+
+ expect = reg_write(expect, w, rsvd);
+ pr_info("Wrote %08x, read %08x, expect %08x\n",
+ w, results[idx], expect);
+ idx++;
+ }
+
+ err = -EINVAL;
+ }
+out_unpin:
+ i915_gem_object_unpin_map(scratch->obj);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
+ err = -EIO;
+out_batch:
+ i915_vma_unpin_and_release(&batch, 0);
+out_scratch:
+ i915_vma_unpin_and_release(&scratch, 0);
+ return err;
+}
+
+static int live_dirty_whitelist(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ struct drm_file *file;
+ int err = 0;
+
+ /* Can the user write to the whitelisted registers? */
+
+ if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
+ return 0;
+
+ wakeref = intel_runtime_pm_get(i915);
+
+ mutex_unlock(&i915->drm.struct_mutex);
+ file = mock_file(i915);
+ mutex_lock(&i915->drm.struct_mutex);
+ if (IS_ERR(file)) {
+ err = PTR_ERR(file);
+ goto out_rpm;
+ }
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_file;
+ }
+
+ for_each_engine(engine, i915, id) {
+ if (engine->whitelist.count == 0)
+ continue;
+
+ err = check_dirty_whitelist(ctx, engine);
+ if (err)
+ goto out_file;
+ }
+
+out_file:
+ mutex_unlock(&i915->drm.struct_mutex);
+ mock_file_free(i915, file);
+ mutex_lock(&i915->drm.struct_mutex);
+out_rpm:
+ intel_runtime_pm_put(i915, wakeref);
+ return err;
+}
+
static int live_reset_whitelist(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine = i915->engine[RCS];
+ struct intel_engine_cs *engine = i915->engine[RCS0];
int err = 0;
/* If we reset the gpu, we should not lose the RING_NONPRIV */
@@ -381,10 +750,11 @@ static bool verify_gt_engine_wa(struct drm_i915_private *i915,
enum intel_engine_id id;
bool ok = true;
- ok &= wa_list_verify(i915, &lists->gt_wa_list, str);
+ ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
for_each_engine(engine, i915, id)
- ok &= wa_list_verify(i915, &lists->engine[id].wa_list, str);
+ ok &= wa_list_verify(engine->uncore,
+ &lists->engine[id].wa_list, str);
return ok;
}
@@ -513,13 +883,14 @@ err:
int intel_workarounds_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(live_dirty_whitelist),
SUBTEST(live_reset_whitelist),
SUBTEST(live_gpu_reset_gt_engine_workarounds),
SUBTEST(live_engine_reset_gt_engine_workarounds),
};
int err;
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index b646cdcdd602..0426093bf1d9 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -30,7 +30,6 @@ mock_context(struct drm_i915_private *i915,
const char *name)
{
struct i915_gem_context *ctx;
- unsigned int n;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -41,25 +40,31 @@ mock_context(struct drm_i915_private *i915,
INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915;
+ ctx->hw_contexts = RB_ROOT;
+ spin_lock_init(&ctx->hw_contexts_lock);
+
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
INIT_LIST_HEAD(&ctx->hw_id_link);
-
- for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
- intel_context_init(&ctx->__engine[n], ctx, i915->engine[n]);
+ INIT_LIST_HEAD(&ctx->active_engines);
+ mutex_init(&ctx->mutex);
ret = i915_gem_context_pin_hw_id(ctx);
if (ret < 0)
goto err_handles;
if (name) {
+ struct i915_hw_ppgtt *ppgtt;
+
ctx->name = kstrdup(name, GFP_KERNEL);
if (!ctx->name)
goto err_put;
- ctx->ppgtt = mock_ppgtt(i915, name);
- if (!ctx->ppgtt)
+ ppgtt = mock_ppgtt(i915, name);
+ if (!ppgtt)
goto err_put;
+
+ __set_ppgtt(ctx, ppgtt);
}
return ctx;
@@ -87,9 +92,24 @@ void mock_init_contexts(struct drm_i915_private *i915)
struct i915_gem_context *
live_context(struct drm_i915_private *i915, struct drm_file *file)
{
+ struct i915_gem_context *ctx;
+ int err;
+
lockdep_assert_held(&i915->drm.struct_mutex);
- return i915_gem_create_context(i915, file->driver_priv);
+ ctx = i915_gem_create_context(i915, 0);
+ if (IS_ERR(ctx))
+ return ctx;
+
+ err = gem_context_register(ctx, file->driver_priv);
+ if (err < 0)
+ goto err_ctx;
+
+ return ctx;
+
+err_ctx:
+ context_close(ctx);
+ return ERR_PTR(err);
}
struct i915_gem_context *
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 08f0cab02e0f..61a8206ed677 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -50,13 +50,12 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
if (!ring)
return NULL;
- if (i915_timeline_init(engine->i915,
- &ring->timeline, engine->name,
- NULL)) {
+ if (i915_timeline_init(engine->i915, &ring->timeline, NULL)) {
kfree(ring);
return NULL;
}
+ kref_init(&ring->base.ref);
ring->base.size = sz;
ring->base.effective_size = sz;
ring->base.vaddr = (void *)(ring + 1);
@@ -76,28 +75,26 @@ static void mock_ring_free(struct intel_ring *base)
kfree(ring);
}
-static struct mock_request *first_request(struct mock_engine *engine)
+static struct i915_request *first_request(struct mock_engine *engine)
{
return list_first_entry_or_null(&engine->hw_queue,
- struct mock_request,
- link);
+ struct i915_request,
+ mock.link);
}
-static void advance(struct mock_request *request)
+static void advance(struct i915_request *request)
{
- list_del_init(&request->link);
- intel_engine_write_global_seqno(request->base.engine,
- request->base.global_seqno);
- i915_request_mark_complete(&request->base);
- GEM_BUG_ON(!i915_request_completed(&request->base));
+ list_del_init(&request->mock.link);
+ i915_request_mark_complete(request);
+ GEM_BUG_ON(!i915_request_completed(request));
- intel_engine_queue_breadcrumbs(request->base.engine);
+ intel_engine_queue_breadcrumbs(request->engine);
}
static void hw_delay_complete(struct timer_list *t)
{
struct mock_engine *engine = from_timer(engine, t, hw_delay);
- struct mock_request *request;
+ struct i915_request *request;
unsigned long flags;
spin_lock_irqsave(&engine->hw_lock, flags);
@@ -112,8 +109,9 @@ static void hw_delay_complete(struct timer_list *t)
* requeue the timer for the next delayed request.
*/
while ((request = first_request(engine))) {
- if (request->delay) {
- mod_timer(&engine->hw_delay, jiffies + request->delay);
+ if (request->mock.delay) {
+ mod_timer(&engine->hw_delay,
+ jiffies + request->mock.delay);
break;
}
@@ -126,55 +124,43 @@ static void hw_delay_complete(struct timer_list *t)
static void mock_context_unpin(struct intel_context *ce)
{
mock_timeline_unpin(ce->ring->timeline);
- i915_gem_context_put(ce->gem_context);
}
-static void mock_context_destroy(struct intel_context *ce)
+static void mock_context_destroy(struct kref *ref)
{
- GEM_BUG_ON(ce->pin_count);
+ struct intel_context *ce = container_of(ref, typeof(*ce), ref);
+
+ GEM_BUG_ON(intel_context_is_pinned(ce));
if (ce->ring)
mock_ring_free(ce->ring);
-}
-static const struct intel_context_ops mock_context_ops = {
- .unpin = mock_context_unpin,
- .destroy = mock_context_destroy,
-};
+ intel_context_free(ce);
+}
-static struct intel_context *
-mock_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static int mock_context_pin(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
- int err = -ENOMEM;
-
- if (ce->pin_count++)
- return ce;
-
if (!ce->ring) {
- ce->ring = mock_ring(engine);
+ ce->ring = mock_ring(ce->engine);
if (!ce->ring)
- goto err;
+ return -ENOMEM;
}
mock_timeline_pin(ce->ring->timeline);
+ return 0;
+}
- ce->ops = &mock_context_ops;
- i915_gem_context_get(ctx);
- return ce;
+static const struct intel_context_ops mock_context_ops = {
+ .pin = mock_context_pin,
+ .unpin = mock_context_unpin,
-err:
- ce->pin_count = 0;
- return ERR_PTR(err);
-}
+ .destroy = mock_context_destroy,
+};
static int mock_request_alloc(struct i915_request *request)
{
- struct mock_request *mock = container_of(request, typeof(*mock), base);
-
- INIT_LIST_HEAD(&mock->link);
- mock->delay = 0;
+ INIT_LIST_HEAD(&request->mock.link);
+ request->mock.delay = 0;
return 0;
}
@@ -192,25 +178,55 @@ static u32 *mock_emit_breadcrumb(struct i915_request *request, u32 *cs)
static void mock_submit_request(struct i915_request *request)
{
- struct mock_request *mock = container_of(request, typeof(*mock), base);
struct mock_engine *engine =
container_of(request->engine, typeof(*engine), base);
unsigned long flags;
i915_request_submit(request);
- GEM_BUG_ON(!request->global_seqno);
spin_lock_irqsave(&engine->hw_lock, flags);
- list_add_tail(&mock->link, &engine->hw_queue);
- if (mock->link.prev == &engine->hw_queue) {
- if (mock->delay)
- mod_timer(&engine->hw_delay, jiffies + mock->delay);
+ list_add_tail(&request->mock.link, &engine->hw_queue);
+ if (list_is_first(&request->mock.link, &engine->hw_queue)) {
+ if (request->mock.delay)
+ mod_timer(&engine->hw_delay,
+ jiffies + request->mock.delay);
else
- advance(mock);
+ advance(request);
}
spin_unlock_irqrestore(&engine->hw_lock, flags);
}
+static void mock_reset_prepare(struct intel_engine_cs *engine)
+{
+}
+
+static void mock_reset(struct intel_engine_cs *engine, bool stalled)
+{
+ GEM_BUG_ON(stalled);
+}
+
+static void mock_reset_finish(struct intel_engine_cs *engine)
+{
+}
+
+static void mock_cancel_requests(struct intel_engine_cs *engine)
+{
+ struct i915_request *request;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+
+ /* Mark all submitted requests as skipped. */
+ list_for_each_entry(request, &engine->timeline.requests, sched.link) {
+ if (!i915_request_signaled(request))
+ dma_fence_set_error(&request->fence, -EIO);
+
+ i915_request_mark_complete(request);
+ }
+
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+}
+
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
const char *name,
int id)
@@ -227,18 +243,21 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.i915 = i915;
snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
engine->base.id = id;
+ engine->base.mask = BIT(id);
engine->base.status_page.addr = (void *)(engine + 1);
- engine->base.context_pin = mock_context_pin;
+ engine->base.cops = &mock_context_ops;
engine->base.request_alloc = mock_request_alloc;
engine->base.emit_flush = mock_emit_flush;
engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;
engine->base.submit_request = mock_submit_request;
- if (i915_timeline_init(i915,
- &engine->base.timeline,
- engine->base.name,
- NULL))
+ engine->base.reset.prepare = mock_reset_prepare;
+ engine->base.reset.reset = mock_reset;
+ engine->base.reset.finish = mock_reset_finish;
+ engine->base.cancel_requests = mock_cancel_requests;
+
+ if (i915_timeline_init(i915, &engine->base.timeline, NULL))
goto err_free;
i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE);
@@ -249,7 +268,8 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
timer_setup(&engine->hw_delay, hw_delay_complete, 0);
INIT_LIST_HEAD(&engine->hw_queue);
- if (IS_ERR(intel_context_pin(i915->kernel_context, &engine->base)))
+ if (pin_context(i915->kernel_context, &engine->base,
+ &engine->base.kernel_context))
goto err_breadcrumbs;
return &engine->base;
@@ -266,19 +286,18 @@ void mock_engine_flush(struct intel_engine_cs *engine)
{
struct mock_engine *mock =
container_of(engine, typeof(*mock), base);
- struct mock_request *request, *rn;
+ struct i915_request *request, *rn;
del_timer_sync(&mock->hw_delay);
spin_lock_irq(&mock->hw_lock);
- list_for_each_entry_safe(request, rn, &mock->hw_queue, link)
+ list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link)
advance(request);
spin_unlock_irq(&mock->hw_lock);
}
void mock_engine_reset(struct intel_engine_cs *engine)
{
- intel_engine_write_global_seqno(engine, 0);
}
void mock_engine_free(struct intel_engine_cs *engine)
@@ -293,7 +312,7 @@ void mock_engine_free(struct intel_engine_cs *engine)
if (ce)
intel_context_unpin(ce);
- __intel_context_unpin(engine->i915->kernel_context, engine);
+ intel_context_unpin(engine->kernel_context);
intel_engine_fini_breadcrumbs(engine);
i915_timeline_fini(&engine->timeline);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 14ae46fda49f..60bbf8b4df40 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -79,12 +79,6 @@ static void mock_device_release(struct drm_device *dev)
destroy_workqueue(i915->wq);
- kmem_cache_destroy(i915->priorities);
- kmem_cache_destroy(i915->dependencies);
- kmem_cache_destroy(i915->requests);
- kmem_cache_destroy(i915->vmas);
- kmem_cache_destroy(i915->objects);
-
i915_gemfs_fini(i915);
drm_mode_config_cleanup(&i915->drm);
@@ -115,6 +109,10 @@ static void mock_retire_work_handler(struct work_struct *work)
static void mock_idle_work_handler(struct work_struct *work)
{
+ struct drm_i915_private *i915 =
+ container_of(work, typeof(*i915), gt.idle_work.work);
+
+ i915->gt.active_engines = 0;
}
static int pm_domain_resume(struct device *dev)
@@ -184,11 +182,12 @@ struct drm_i915_private *mock_gem_device(void)
I915_GTT_PAGE_SIZE_64K |
I915_GTT_PAGE_SIZE_2M;
- mock_uncore_init(i915);
+ mock_uncore_init(&i915->uncore);
i915_gem_init__mm(i915);
init_waitqueue_head(&i915->gpu_error.wait_queue);
init_waitqueue_head(&i915->gpu_error.reset_queue);
+ init_srcu_struct(&i915->gpu_error.reset_backoff_srcu);
mutex_init(&i915->gpu_error.wedge_mutex);
i915->wq = alloc_ordered_workqueue("mock", 0);
@@ -202,31 +201,6 @@ struct drm_i915_private *mock_gem_device(void)
i915->gt.awake = true;
- i915->objects = KMEM_CACHE(mock_object, SLAB_HWCACHE_ALIGN);
- if (!i915->objects)
- goto err_wq;
-
- i915->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
- if (!i915->vmas)
- goto err_objects;
-
- i915->requests = KMEM_CACHE(mock_request,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT |
- SLAB_TYPESAFE_BY_RCU);
- if (!i915->requests)
- goto err_vmas;
-
- i915->dependencies = KMEM_CACHE(i915_dependency,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT);
- if (!i915->dependencies)
- goto err_requests;
-
- i915->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
- if (!i915->priorities)
- goto err_dependencies;
-
i915_timelines_init(i915);
INIT_LIST_HEAD(&i915->gt.active_rings);
@@ -236,13 +210,13 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_ggtt(i915, &i915->ggtt);
- mkwrite_device_info(i915)->ring_mask = BIT(0);
+ mkwrite_device_info(i915)->engine_mask = BIT(0);
i915->kernel_context = mock_context(i915, NULL);
if (!i915->kernel_context)
goto err_unlock;
- i915->engine[RCS] = mock_engine(i915, "mock", RCS);
- if (!i915->engine[RCS])
+ i915->engine[RCS0] = mock_engine(i915, "mock", RCS0);
+ if (!i915->engine[RCS0])
goto err_context;
mutex_unlock(&i915->drm.struct_mutex);
@@ -256,16 +230,6 @@ err_context:
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
i915_timelines_fini(i915);
- kmem_cache_destroy(i915->priorities);
-err_dependencies:
- kmem_cache_destroy(i915->dependencies);
-err_requests:
- kmem_cache_destroy(i915->requests);
-err_vmas:
- kmem_cache_destroy(i915->vmas);
-err_objects:
- kmem_cache_destroy(i915->objects);
-err_wq:
destroy_workqueue(i915->wq);
err_drv:
drm_mode_config_cleanup(&i915->drm);
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c
index 0dc29e242597..d1a7c9608712 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.c
+++ b/drivers/gpu/drm/i915/selftests/mock_request.c
@@ -31,29 +31,25 @@ mock_request(struct intel_engine_cs *engine,
unsigned long delay)
{
struct i915_request *request;
- struct mock_request *mock;
/* NB the i915->requests slab cache is enlarged to fit mock_request */
request = i915_request_alloc(engine, context);
if (IS_ERR(request))
return NULL;
- mock = container_of(request, typeof(*mock), base);
- mock->delay = delay;
-
- return &mock->base;
+ request->mock.delay = delay;
+ return request;
}
bool mock_cancel_request(struct i915_request *request)
{
- struct mock_request *mock = container_of(request, typeof(*mock), base);
struct mock_engine *engine =
container_of(request->engine, typeof(*engine), base);
bool was_queued;
spin_lock_irq(&engine->hw_lock);
- was_queued = !list_empty(&mock->link);
- list_del_init(&mock->link);
+ was_queued = !list_empty(&request->mock.link);
+ list_del_init(&request->mock.link);
spin_unlock_irq(&engine->hw_lock);
if (was_queued)
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.h b/drivers/gpu/drm/i915/selftests/mock_request.h
index 995fb728380c..4acf0211df20 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.h
+++ b/drivers/gpu/drm/i915/selftests/mock_request.h
@@ -29,13 +29,6 @@
#include "../i915_request.h"
-struct mock_request {
- struct i915_request base;
-
- struct list_head link;
- unsigned long delay;
-};
-
struct i915_request *
mock_request(struct intel_engine_cs *engine,
struct i915_gem_context *context,
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/selftests/mock_timeline.c
index d2de9ece2118..e084476469ef 100644
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/mock_timeline.c
@@ -14,8 +14,8 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context)
timeline->fence_context = context;
spin_lock_init(&timeline->lock);
+ mutex_init(&timeline->mutex);
- INIT_ACTIVE_REQUEST(&timeline->barrier);
INIT_ACTIVE_REQUEST(&timeline->last_request);
INIT_LIST_HEAD(&timeline->requests);
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c
index 8ef14c7e5e38..ff8999c63a12 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c
@@ -26,21 +26,21 @@
#define __nop_write(x) \
static void \
-nop_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { }
+nop_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { }
__nop_write(8)
__nop_write(16)
__nop_write(32)
#define __nop_read(x) \
static u##x \
-nop_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { return 0; }
+nop_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { return 0; }
__nop_read(8)
__nop_read(16)
__nop_read(32)
__nop_read(64)
-void mock_uncore_init(struct drm_i915_private *i915)
+void mock_uncore_init(struct intel_uncore *uncore)
{
- ASSIGN_WRITE_MMIO_VFUNCS(i915, nop);
- ASSIGN_READ_MMIO_VFUNCS(i915, nop);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, nop);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, nop);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.h b/drivers/gpu/drm/i915/selftests/mock_uncore.h
index d79aa3ca4d51..dacb36b5ffcd 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.h
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.h
@@ -25,6 +25,6 @@
#ifndef __MOCK_UNCORE_H
#define __MOCK_UNCORE_H
-void mock_uncore_init(struct drm_i915_private *i915);
+void mock_uncore_init(struct intel_uncore *uncore);
#endif /* !__MOCK_UNCORE_H */
diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c
index 31c93c3ccd00..e0b1ec821960 100644
--- a/drivers/gpu/drm/i915/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/vlv_dsi.c
@@ -23,16 +23,20 @@
* Author: Jani Nikula <jani.nikula@intel.com>
*/
+#include <linux/gpio/consumer.h>
+#include <linux/slab.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include <drm/drm_mipi_dsi.h>
-#include <linux/slab.h>
-#include <linux/gpio/consumer.h>
+#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "intel_connector.h"
#include "intel_drv.h"
#include "intel_dsi.h"
+#include "intel_panel.h"
/* return pixels in terms of txbyteclkhs */
static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
@@ -78,7 +82,7 @@ void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_GEN_FIFO_STAT(port), mask, mask,
100))
DRM_ERROR("DPI FIFOs are not empty\n");
@@ -148,7 +152,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* note: this is never true for reads */
if (packet.payload_length) {
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_GEN_FIFO_STAT(port),
data_mask, 0,
50))
@@ -162,7 +166,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
}
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_GEN_FIFO_STAT(port),
ctrl_mask, 0,
50)) {
@@ -174,7 +178,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* ->rx_len is set only for reads */
if (msg->rx_len) {
data_mask = GEN_READ_DATA_AVAIL;
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_INTR_STAT(port),
data_mask, data_mask,
50))
@@ -234,7 +238,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
mask = SPL_PKT_SENT_INTERRUPT;
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_INTR_STAT(port), mask, mask,
100))
DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
@@ -375,16 +379,18 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
/* Wait for Pwr ACK */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(dev_priv,
- MIPI_CTRL(port), GLK_MIPIIO_PORT_POWERED,
- GLK_MIPIIO_PORT_POWERED, 20))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ MIPI_CTRL(port),
+ GLK_MIPIIO_PORT_POWERED,
+ GLK_MIPIIO_PORT_POWERED,
+ 20))
DRM_ERROR("MIPIO port is powergated\n");
}
/* Check for cold boot scenario */
for_each_dsi_port(port, intel_dsi->ports) {
- cold_boot |= !(I915_READ(MIPI_DEVICE_READY(port)) &
- DEVICE_READY);
+ cold_boot |=
+ !(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY);
}
return cold_boot;
@@ -399,9 +405,11 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to set */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(dev_priv,
- MIPI_CTRL(port), GLK_PHY_STATUS_PORT_READY,
- GLK_PHY_STATUS_PORT_READY, 20))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ MIPI_CTRL(port),
+ GLK_PHY_STATUS_PORT_READY,
+ GLK_PHY_STATUS_PORT_READY,
+ 20))
DRM_ERROR("PHY is not ON\n");
}
@@ -425,8 +433,11 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
I915_WRITE(MIPI_DEVICE_READY(port), val);
/* Wait for ULPS active */
- if (intel_wait_for_register(dev_priv,
- MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE, 0, 20))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ MIPI_CTRL(port),
+ GLK_ULPS_NOT_ACTIVE,
+ 0,
+ 20))
DRM_ERROR("ULPS not active\n");
/* Exit ULPS */
@@ -449,17 +460,21 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
/* Wait for Stop state */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(dev_priv,
- MIPI_CTRL(port), GLK_DATA_LANE_STOP_STATE,
- GLK_DATA_LANE_STOP_STATE, 20))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ MIPI_CTRL(port),
+ GLK_DATA_LANE_STOP_STATE,
+ GLK_DATA_LANE_STOP_STATE,
+ 20))
DRM_ERROR("Date lane not in STOP state\n");
}
/* Wait for AFE LATCH */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(dev_priv,
- BXT_MIPI_PORT_CTRL(port), AFE_LATCHOUT,
- AFE_LATCHOUT, 20))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ BXT_MIPI_PORT_CTRL(port),
+ AFE_LATCHOUT,
+ AFE_LATCHOUT,
+ 20))
DRM_ERROR("D-PHY not entering LP-11 state\n");
}
}
@@ -559,7 +574,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 0, 20))
DRM_ERROR("PHY is not turning OFF\n");
@@ -567,7 +582,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
/* Wait for Pwr ACK bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_CTRL(port),
GLK_MIPIIO_PORT_POWERED, 0, 20))
DRM_ERROR("MIPI IO Port is not powergated\n");
@@ -588,7 +603,7 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 0, 20))
DRM_ERROR("PHY is not turning OFF\n");
@@ -638,7 +653,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
* Port A only. MIPI Port C has no similar bit for checking.
*/
if ((IS_GEN9_LP(dev_priv) || port == PORT_A) &&
- intel_wait_for_register(dev_priv,
+ intel_wait_for_register(&dev_priv->uncore,
port_ctrl, AFE_LATCHOUT, 0,
30))
DRM_ERROR("DSI LP not going Low\n");
@@ -1682,7 +1697,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
- struct drm_display_mode *scan, *fixed_mode = NULL;
+ struct drm_display_mode *fixed_mode;
enum port port;
DRM_DEBUG_KMS("\n");
@@ -1793,13 +1808,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_connector_attach_encoder(intel_connector, intel_encoder);
mutex_lock(&dev->mode_config.mutex);
- intel_dsi_vbt_get_modes(intel_dsi);
- list_for_each_entry(scan, &connector->probed_modes, head) {
- if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
- fixed_mode = drm_mode_duplicate(dev, scan);
- break;
- }
- }
+ fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
mutex_unlock(&dev->mode_config.mutex);
if (!fixed_mode) {
@@ -1807,9 +1816,6 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
goto err;
}
- connector->display_info.width_mm = fixed_mode->width_mm;
- connector->display_info.height_mm = fixed_mode->height_mm;
-
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
intel_panel_setup_backlight(connector, INVALID_PIPE);
diff --git a/drivers/gpu/drm/i915/vlv_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c
index 954d5a8c4fa7..5e7b1fb2db5d 100644
--- a/drivers/gpu/drm/i915/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c
@@ -244,7 +244,7 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
* PLL lock should deassert within 200us.
* Wait up to 1ms before timing out.
*/
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED,
0,
@@ -528,7 +528,7 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
I915_WRITE(BXT_DSI_PLL_ENABLE, val);
/* Timeout and fail if PLL not locked */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED,
BXT_DSI_PLL_LOCKED,
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index c935cbe059a7..3e8bece620df 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -185,7 +185,7 @@ static int compare_of(struct device *dev, void *data)
}
/* Special case for LDB, one device for two channels */
- if (of_node_cmp(np->name, "lvds-channel") == 0) {
+ if (of_node_name_eq(np, "lvds-channel")) {
np = of_get_parent(np);
of_node_put(np);
}
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 54011df8c2e8..9cc1d678674f 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -295,7 +295,7 @@ static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc)
sig_cfg.enable_pol = !(imx_crtc_state->bus_flags & DRM_BUS_FLAG_DE_LOW);
/* Default to driving pixel data on negative clock edges */
sig_cfg.clk_pol = !!(imx_crtc_state->bus_flags &
- DRM_BUS_FLAG_PIXDATA_POSEDGE);
+ DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE);
sig_cfg.bus_format = imx_crtc_state->bus_format;
sig_cfg.v_to_h_sync = 0;
sig_cfg.hsync_pin = imx_crtc_state->di_hsync_pin;
diff --git a/drivers/gpu/drm/lima/Kconfig b/drivers/gpu/drm/lima/Kconfig
new file mode 100644
index 000000000000..bb4ddc6bb0a6
--- /dev/null
+++ b/drivers/gpu/drm/lima/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0 OR MIT
+# Copyright 2017-2019 Qiang Yu <yuq825@gmail.com>
+
+config DRM_LIMA
+ tristate "LIMA (DRM support for ARM Mali 400/450 GPU)"
+ depends on DRM
+ depends on ARM || ARM64 || COMPILE_TEST
+ depends on MMU
+ depends on COMMON_CLK
+ depends on OF
+ select DRM_SCHED
+ help
+ DRM driver for ARM Mali 400/450 GPUs.
diff --git a/drivers/gpu/drm/lima/Makefile b/drivers/gpu/drm/lima/Makefile
new file mode 100644
index 000000000000..38cc70281ba5
--- /dev/null
+++ b/drivers/gpu/drm/lima/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0 OR MIT
+# Copyright 2017-2019 Qiang Yu <yuq825@gmail.com>
+
+lima-y := \
+ lima_drv.o \
+ lima_device.o \
+ lima_pmu.o \
+ lima_l2_cache.o \
+ lima_mmu.o \
+ lima_gp.o \
+ lima_pp.o \
+ lima_gem.o \
+ lima_vm.o \
+ lima_sched.o \
+ lima_ctx.o \
+ lima_gem_prime.o \
+ lima_dlbu.o \
+ lima_bcast.o \
+ lima_object.o
+
+obj-$(CONFIG_DRM_LIMA) += lima.o
diff --git a/drivers/gpu/drm/lima/lima_bcast.c b/drivers/gpu/drm/lima/lima_bcast.c
new file mode 100644
index 000000000000..288398027bfa
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_bcast.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/io.h>
+#include <linux/device.h>
+
+#include "lima_device.h"
+#include "lima_bcast.h"
+#include "lima_regs.h"
+
+#define bcast_write(reg, data) writel(data, ip->iomem + reg)
+#define bcast_read(reg) readl(ip->iomem + reg)
+
+void lima_bcast_enable(struct lima_device *dev, int num_pp)
+{
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+ struct lima_ip *ip = dev->ip + lima_ip_bcast;
+ int i, mask = bcast_read(LIMA_BCAST_BROADCAST_MASK) & 0xffff0000;
+
+ for (i = 0; i < num_pp; i++) {
+ struct lima_ip *pp = pipe->processor[i];
+
+ mask |= 1 << (pp->id - lima_ip_pp0);
+ }
+
+ bcast_write(LIMA_BCAST_BROADCAST_MASK, mask);
+}
+
+int lima_bcast_init(struct lima_ip *ip)
+{
+ int i, mask = 0;
+
+ for (i = lima_ip_pp0; i <= lima_ip_pp7; i++) {
+ if (ip->dev->ip[i].present)
+ mask |= 1 << (i - lima_ip_pp0);
+ }
+
+ bcast_write(LIMA_BCAST_BROADCAST_MASK, mask << 16);
+ bcast_write(LIMA_BCAST_INTERRUPT_MASK, mask);
+ return 0;
+}
+
+void lima_bcast_fini(struct lima_ip *ip)
+{
+
+}
+
diff --git a/drivers/gpu/drm/lima/lima_bcast.h b/drivers/gpu/drm/lima/lima_bcast.h
new file mode 100644
index 000000000000..c47e58563d0a
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_bcast.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_BCAST_H__
+#define __LIMA_BCAST_H__
+
+struct lima_ip;
+
+int lima_bcast_init(struct lima_ip *ip);
+void lima_bcast_fini(struct lima_ip *ip);
+
+void lima_bcast_enable(struct lima_device *dev, int num_pp);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_ctx.c b/drivers/gpu/drm/lima/lima_ctx.c
new file mode 100644
index 000000000000..22fff6caa961
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_ctx.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/slab.h>
+
+#include "lima_device.h"
+#include "lima_ctx.h"
+
+int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id)
+{
+ struct lima_ctx *ctx;
+ int i, err;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->dev = dev;
+ kref_init(&ctx->refcnt);
+
+ for (i = 0; i < lima_pipe_num; i++) {
+ err = lima_sched_context_init(dev->pipe + i, ctx->context + i, &ctx->guilty);
+ if (err)
+ goto err_out0;
+ }
+
+ err = xa_alloc(&mgr->handles, id, ctx, xa_limit_32b, GFP_KERNEL);
+ if (err < 0)
+ goto err_out0;
+
+ return 0;
+
+err_out0:
+ for (i--; i >= 0; i--)
+ lima_sched_context_fini(dev->pipe + i, ctx->context + i);
+ kfree(ctx);
+ return err;
+}
+
+static void lima_ctx_do_release(struct kref *ref)
+{
+ struct lima_ctx *ctx = container_of(ref, struct lima_ctx, refcnt);
+ int i;
+
+ for (i = 0; i < lima_pipe_num; i++)
+ lima_sched_context_fini(ctx->dev->pipe + i, ctx->context + i);
+ kfree(ctx);
+}
+
+int lima_ctx_free(struct lima_ctx_mgr *mgr, u32 id)
+{
+ struct lima_ctx *ctx;
+ int ret = 0;
+
+ mutex_lock(&mgr->lock);
+ ctx = xa_erase(&mgr->handles, id);
+ if (ctx)
+ kref_put(&ctx->refcnt, lima_ctx_do_release);
+ else
+ ret = -EINVAL;
+ mutex_unlock(&mgr->lock);
+ return ret;
+}
+
+struct lima_ctx *lima_ctx_get(struct lima_ctx_mgr *mgr, u32 id)
+{
+ struct lima_ctx *ctx;
+
+ mutex_lock(&mgr->lock);
+ ctx = xa_load(&mgr->handles, id);
+ if (ctx)
+ kref_get(&ctx->refcnt);
+ mutex_unlock(&mgr->lock);
+ return ctx;
+}
+
+void lima_ctx_put(struct lima_ctx *ctx)
+{
+ kref_put(&ctx->refcnt, lima_ctx_do_release);
+}
+
+void lima_ctx_mgr_init(struct lima_ctx_mgr *mgr)
+{
+ mutex_init(&mgr->lock);
+ xa_init_flags(&mgr->handles, XA_FLAGS_ALLOC);
+}
+
+void lima_ctx_mgr_fini(struct lima_ctx_mgr *mgr)
+{
+ struct lima_ctx *ctx;
+ unsigned long id;
+
+ xa_for_each(&mgr->handles, id, ctx) {
+ kref_put(&ctx->refcnt, lima_ctx_do_release);
+ }
+
+ xa_destroy(&mgr->handles);
+ mutex_destroy(&mgr->lock);
+}
diff --git a/drivers/gpu/drm/lima/lima_ctx.h b/drivers/gpu/drm/lima/lima_ctx.h
new file mode 100644
index 000000000000..6154e5c9bfe4
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_ctx.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_CTX_H__
+#define __LIMA_CTX_H__
+
+#include <linux/xarray.h>
+
+#include "lima_device.h"
+
+struct lima_ctx {
+ struct kref refcnt;
+ struct lima_device *dev;
+ struct lima_sched_context context[lima_pipe_num];
+ atomic_t guilty;
+};
+
+struct lima_ctx_mgr {
+ struct mutex lock;
+ struct xarray handles;
+};
+
+int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id);
+int lima_ctx_free(struct lima_ctx_mgr *mgr, u32 id);
+struct lima_ctx *lima_ctx_get(struct lima_ctx_mgr *mgr, u32 id);
+void lima_ctx_put(struct lima_ctx *ctx);
+void lima_ctx_mgr_init(struct lima_ctx_mgr *mgr);
+void lima_ctx_mgr_fini(struct lima_ctx_mgr *mgr);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c
new file mode 100644
index 000000000000..570d0e93f9a9
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_device.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+
+#include "lima_device.h"
+#include "lima_gp.h"
+#include "lima_pp.h"
+#include "lima_mmu.h"
+#include "lima_pmu.h"
+#include "lima_l2_cache.h"
+#include "lima_dlbu.h"
+#include "lima_bcast.h"
+#include "lima_vm.h"
+
+struct lima_ip_desc {
+ char *name;
+ char *irq_name;
+ bool must_have[lima_gpu_num];
+ int offset[lima_gpu_num];
+
+ int (*init)(struct lima_ip *ip);
+ void (*fini)(struct lima_ip *ip);
+};
+
+#define LIMA_IP_DESC(ipname, mst0, mst1, off0, off1, func, irq) \
+ [lima_ip_##ipname] = { \
+ .name = #ipname, \
+ .irq_name = irq, \
+ .must_have = { \
+ [lima_gpu_mali400] = mst0, \
+ [lima_gpu_mali450] = mst1, \
+ }, \
+ .offset = { \
+ [lima_gpu_mali400] = off0, \
+ [lima_gpu_mali450] = off1, \
+ }, \
+ .init = lima_##func##_init, \
+ .fini = lima_##func##_fini, \
+ }
+
+static struct lima_ip_desc lima_ip_desc[lima_ip_num] = {
+ LIMA_IP_DESC(pmu, false, false, 0x02000, 0x02000, pmu, "pmu"),
+ LIMA_IP_DESC(l2_cache0, true, true, 0x01000, 0x10000, l2_cache, NULL),
+ LIMA_IP_DESC(l2_cache1, false, true, -1, 0x01000, l2_cache, NULL),
+ LIMA_IP_DESC(l2_cache2, false, false, -1, 0x11000, l2_cache, NULL),
+ LIMA_IP_DESC(gp, true, true, 0x00000, 0x00000, gp, "gp"),
+ LIMA_IP_DESC(pp0, true, true, 0x08000, 0x08000, pp, "pp0"),
+ LIMA_IP_DESC(pp1, false, false, 0x0A000, 0x0A000, pp, "pp1"),
+ LIMA_IP_DESC(pp2, false, false, 0x0C000, 0x0C000, pp, "pp2"),
+ LIMA_IP_DESC(pp3, false, false, 0x0E000, 0x0E000, pp, "pp3"),
+ LIMA_IP_DESC(pp4, false, false, -1, 0x28000, pp, "pp4"),
+ LIMA_IP_DESC(pp5, false, false, -1, 0x2A000, pp, "pp5"),
+ LIMA_IP_DESC(pp6, false, false, -1, 0x2C000, pp, "pp6"),
+ LIMA_IP_DESC(pp7, false, false, -1, 0x2E000, pp, "pp7"),
+ LIMA_IP_DESC(gpmmu, true, true, 0x03000, 0x03000, mmu, "gpmmu"),
+ LIMA_IP_DESC(ppmmu0, true, true, 0x04000, 0x04000, mmu, "ppmmu0"),
+ LIMA_IP_DESC(ppmmu1, false, false, 0x05000, 0x05000, mmu, "ppmmu1"),
+ LIMA_IP_DESC(ppmmu2, false, false, 0x06000, 0x06000, mmu, "ppmmu2"),
+ LIMA_IP_DESC(ppmmu3, false, false, 0x07000, 0x07000, mmu, "ppmmu3"),
+ LIMA_IP_DESC(ppmmu4, false, false, -1, 0x1C000, mmu, "ppmmu4"),
+ LIMA_IP_DESC(ppmmu5, false, false, -1, 0x1D000, mmu, "ppmmu5"),
+ LIMA_IP_DESC(ppmmu6, false, false, -1, 0x1E000, mmu, "ppmmu6"),
+ LIMA_IP_DESC(ppmmu7, false, false, -1, 0x1F000, mmu, "ppmmu7"),
+ LIMA_IP_DESC(dlbu, false, true, -1, 0x14000, dlbu, NULL),
+ LIMA_IP_DESC(bcast, false, true, -1, 0x13000, bcast, NULL),
+ LIMA_IP_DESC(pp_bcast, false, true, -1, 0x16000, pp_bcast, "pp"),
+ LIMA_IP_DESC(ppmmu_bcast, false, true, -1, 0x15000, mmu, NULL),
+};
+
+const char *lima_ip_name(struct lima_ip *ip)
+{
+ return lima_ip_desc[ip->id].name;
+}
+
+static int lima_clk_init(struct lima_device *dev)
+{
+ int err;
+ unsigned long bus_rate, gpu_rate;
+
+ dev->clk_bus = devm_clk_get(dev->dev, "bus");
+ if (IS_ERR(dev->clk_bus)) {
+ dev_err(dev->dev, "get bus clk failed %ld\n", PTR_ERR(dev->clk_bus));
+ return PTR_ERR(dev->clk_bus);
+ }
+
+ dev->clk_gpu = devm_clk_get(dev->dev, "core");
+ if (IS_ERR(dev->clk_gpu)) {
+ dev_err(dev->dev, "get core clk failed %ld\n", PTR_ERR(dev->clk_gpu));
+ return PTR_ERR(dev->clk_gpu);
+ }
+
+ bus_rate = clk_get_rate(dev->clk_bus);
+ dev_info(dev->dev, "bus rate = %lu\n", bus_rate);
+
+ gpu_rate = clk_get_rate(dev->clk_gpu);
+ dev_info(dev->dev, "mod rate = %lu", gpu_rate);
+
+ err = clk_prepare_enable(dev->clk_bus);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(dev->clk_gpu);
+ if (err)
+ goto error_out0;
+
+ dev->reset = devm_reset_control_get_optional(dev->dev, NULL);
+ if (IS_ERR(dev->reset)) {
+ err = PTR_ERR(dev->reset);
+ goto error_out1;
+ } else if (dev->reset != NULL) {
+ err = reset_control_deassert(dev->reset);
+ if (err)
+ goto error_out1;
+ }
+
+ return 0;
+
+error_out1:
+ clk_disable_unprepare(dev->clk_gpu);
+error_out0:
+ clk_disable_unprepare(dev->clk_bus);
+ return err;
+}
+
+static void lima_clk_fini(struct lima_device *dev)
+{
+ if (dev->reset != NULL)
+ reset_control_assert(dev->reset);
+ clk_disable_unprepare(dev->clk_gpu);
+ clk_disable_unprepare(dev->clk_bus);
+}
+
+static int lima_regulator_init(struct lima_device *dev)
+{
+ int ret;
+
+ dev->regulator = devm_regulator_get_optional(dev->dev, "mali");
+ if (IS_ERR(dev->regulator)) {
+ ret = PTR_ERR(dev->regulator);
+ dev->regulator = NULL;
+ if (ret == -ENODEV)
+ return 0;
+ dev_err(dev->dev, "failed to get regulator: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(dev->regulator);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void lima_regulator_fini(struct lima_device *dev)
+{
+ if (dev->regulator)
+ regulator_disable(dev->regulator);
+}
+
+static int lima_init_ip(struct lima_device *dev, int index)
+{
+ struct lima_ip_desc *desc = lima_ip_desc + index;
+ struct lima_ip *ip = dev->ip + index;
+ int offset = desc->offset[dev->id];
+ bool must = desc->must_have[dev->id];
+ int err;
+
+ if (offset < 0)
+ return 0;
+
+ ip->dev = dev;
+ ip->id = index;
+ ip->iomem = dev->iomem + offset;
+ if (desc->irq_name) {
+ err = platform_get_irq_byname(dev->pdev, desc->irq_name);
+ if (err < 0)
+ goto out;
+ ip->irq = err;
+ }
+
+ err = desc->init(ip);
+ if (!err) {
+ ip->present = true;
+ return 0;
+ }
+
+out:
+ return must ? err : 0;
+}
+
+static void lima_fini_ip(struct lima_device *ldev, int index)
+{
+ struct lima_ip_desc *desc = lima_ip_desc + index;
+ struct lima_ip *ip = ldev->ip + index;
+
+ if (ip->present)
+ desc->fini(ip);
+}
+
+static int lima_init_gp_pipe(struct lima_device *dev)
+{
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
+ int err;
+
+ err = lima_sched_pipe_init(pipe, "gp");
+ if (err)
+ return err;
+
+ pipe->l2_cache[pipe->num_l2_cache++] = dev->ip + lima_ip_l2_cache0;
+ pipe->mmu[pipe->num_mmu++] = dev->ip + lima_ip_gpmmu;
+ pipe->processor[pipe->num_processor++] = dev->ip + lima_ip_gp;
+
+ err = lima_gp_pipe_init(dev);
+ if (err) {
+ lima_sched_pipe_fini(pipe);
+ return err;
+ }
+
+ return 0;
+}
+
+static void lima_fini_gp_pipe(struct lima_device *dev)
+{
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
+
+ lima_gp_pipe_fini(dev);
+ lima_sched_pipe_fini(pipe);
+}
+
+static int lima_init_pp_pipe(struct lima_device *dev)
+{
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+ int err, i;
+
+ err = lima_sched_pipe_init(pipe, "pp");
+ if (err)
+ return err;
+
+ for (i = 0; i < LIMA_SCHED_PIPE_MAX_PROCESSOR; i++) {
+ struct lima_ip *pp = dev->ip + lima_ip_pp0 + i;
+ struct lima_ip *ppmmu = dev->ip + lima_ip_ppmmu0 + i;
+ struct lima_ip *l2_cache;
+
+ if (dev->id == lima_gpu_mali400)
+ l2_cache = dev->ip + lima_ip_l2_cache0;
+ else
+ l2_cache = dev->ip + lima_ip_l2_cache1 + (i >> 2);
+
+ if (pp->present && ppmmu->present && l2_cache->present) {
+ pipe->mmu[pipe->num_mmu++] = ppmmu;
+ pipe->processor[pipe->num_processor++] = pp;
+ if (!pipe->l2_cache[i >> 2])
+ pipe->l2_cache[pipe->num_l2_cache++] = l2_cache;
+ }
+ }
+
+ if (dev->ip[lima_ip_bcast].present) {
+ pipe->bcast_processor = dev->ip + lima_ip_pp_bcast;
+ pipe->bcast_mmu = dev->ip + lima_ip_ppmmu_bcast;
+ }
+
+ err = lima_pp_pipe_init(dev);
+ if (err) {
+ lima_sched_pipe_fini(pipe);
+ return err;
+ }
+
+ return 0;
+}
+
+static void lima_fini_pp_pipe(struct lima_device *dev)
+{
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+
+ lima_pp_pipe_fini(dev);
+ lima_sched_pipe_fini(pipe);
+}
+
+int lima_device_init(struct lima_device *ldev)
+{
+ int err, i;
+ struct resource *res;
+
+ dma_set_coherent_mask(ldev->dev, DMA_BIT_MASK(32));
+
+ err = lima_clk_init(ldev);
+ if (err) {
+ dev_err(ldev->dev, "clk init fail %d\n", err);
+ return err;
+ }
+
+ err = lima_regulator_init(ldev);
+ if (err) {
+ dev_err(ldev->dev, "regulator init fail %d\n", err);
+ goto err_out0;
+ }
+
+ ldev->empty_vm = lima_vm_create(ldev);
+ if (!ldev->empty_vm) {
+ err = -ENOMEM;
+ goto err_out1;
+ }
+
+ ldev->va_start = 0;
+ if (ldev->id == lima_gpu_mali450) {
+ ldev->va_end = LIMA_VA_RESERVE_START;
+ ldev->dlbu_cpu = dma_alloc_wc(
+ ldev->dev, LIMA_PAGE_SIZE,
+ &ldev->dlbu_dma, GFP_KERNEL);
+ if (!ldev->dlbu_cpu) {
+ err = -ENOMEM;
+ goto err_out2;
+ }
+ } else
+ ldev->va_end = LIMA_VA_RESERVE_END;
+
+ res = platform_get_resource(ldev->pdev, IORESOURCE_MEM, 0);
+ ldev->iomem = devm_ioremap_resource(ldev->dev, res);
+ if (IS_ERR(ldev->iomem)) {
+ dev_err(ldev->dev, "fail to ioremap iomem\n");
+ err = PTR_ERR(ldev->iomem);
+ goto err_out3;
+ }
+
+ for (i = 0; i < lima_ip_num; i++) {
+ err = lima_init_ip(ldev, i);
+ if (err)
+ goto err_out4;
+ }
+
+ err = lima_init_gp_pipe(ldev);
+ if (err)
+ goto err_out4;
+
+ err = lima_init_pp_pipe(ldev);
+ if (err)
+ goto err_out5;
+
+ return 0;
+
+err_out5:
+ lima_fini_gp_pipe(ldev);
+err_out4:
+ while (--i >= 0)
+ lima_fini_ip(ldev, i);
+err_out3:
+ if (ldev->dlbu_cpu)
+ dma_free_wc(ldev->dev, LIMA_PAGE_SIZE,
+ ldev->dlbu_cpu, ldev->dlbu_dma);
+err_out2:
+ lima_vm_put(ldev->empty_vm);
+err_out1:
+ lima_regulator_fini(ldev);
+err_out0:
+ lima_clk_fini(ldev);
+ return err;
+}
+
+void lima_device_fini(struct lima_device *ldev)
+{
+ int i;
+
+ lima_fini_pp_pipe(ldev);
+ lima_fini_gp_pipe(ldev);
+
+ for (i = lima_ip_num - 1; i >= 0; i--)
+ lima_fini_ip(ldev, i);
+
+ if (ldev->dlbu_cpu)
+ dma_free_wc(ldev->dev, LIMA_PAGE_SIZE,
+ ldev->dlbu_cpu, ldev->dlbu_dma);
+
+ lima_vm_put(ldev->empty_vm);
+
+ lima_regulator_fini(ldev);
+
+ lima_clk_fini(ldev);
+}
diff --git a/drivers/gpu/drm/lima/lima_device.h b/drivers/gpu/drm/lima/lima_device.h
new file mode 100644
index 000000000000..31158d86271c
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_device.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_DEVICE_H__
+#define __LIMA_DEVICE_H__
+
+#include <drm/drm_device.h>
+#include <linux/delay.h>
+
+#include "lima_sched.h"
+
+enum lima_gpu_id {
+ lima_gpu_mali400 = 0,
+ lima_gpu_mali450,
+ lima_gpu_num,
+};
+
+enum lima_ip_id {
+ lima_ip_pmu,
+ lima_ip_gpmmu,
+ lima_ip_ppmmu0,
+ lima_ip_ppmmu1,
+ lima_ip_ppmmu2,
+ lima_ip_ppmmu3,
+ lima_ip_ppmmu4,
+ lima_ip_ppmmu5,
+ lima_ip_ppmmu6,
+ lima_ip_ppmmu7,
+ lima_ip_gp,
+ lima_ip_pp0,
+ lima_ip_pp1,
+ lima_ip_pp2,
+ lima_ip_pp3,
+ lima_ip_pp4,
+ lima_ip_pp5,
+ lima_ip_pp6,
+ lima_ip_pp7,
+ lima_ip_l2_cache0,
+ lima_ip_l2_cache1,
+ lima_ip_l2_cache2,
+ lima_ip_dlbu,
+ lima_ip_bcast,
+ lima_ip_pp_bcast,
+ lima_ip_ppmmu_bcast,
+ lima_ip_num,
+};
+
+struct lima_device;
+
+struct lima_ip {
+ struct lima_device *dev;
+ enum lima_ip_id id;
+ bool present;
+
+ void __iomem *iomem;
+ int irq;
+
+ union {
+ /* gp/pp */
+ bool async_reset;
+ /* l2 cache */
+ spinlock_t lock;
+ } data;
+};
+
+enum lima_pipe_id {
+ lima_pipe_gp,
+ lima_pipe_pp,
+ lima_pipe_num,
+};
+
+struct lima_device {
+ struct device *dev;
+ struct drm_device *ddev;
+ struct platform_device *pdev;
+
+ enum lima_gpu_id id;
+ u32 gp_version;
+ u32 pp_version;
+ int num_pp;
+
+ void __iomem *iomem;
+ struct clk *clk_bus;
+ struct clk *clk_gpu;
+ struct reset_control *reset;
+ struct regulator *regulator;
+
+ struct lima_ip ip[lima_ip_num];
+ struct lima_sched_pipe pipe[lima_pipe_num];
+
+ struct lima_vm *empty_vm;
+ uint64_t va_start;
+ uint64_t va_end;
+
+ u32 *dlbu_cpu;
+ dma_addr_t dlbu_dma;
+};
+
+static inline struct lima_device *
+to_lima_dev(struct drm_device *dev)
+{
+ return dev->dev_private;
+}
+
+int lima_device_init(struct lima_device *ldev);
+void lima_device_fini(struct lima_device *ldev);
+
+const char *lima_ip_name(struct lima_ip *ip);
+
+typedef int (*lima_poll_func_t)(struct lima_ip *);
+
+static inline int lima_poll_timeout(struct lima_ip *ip, lima_poll_func_t func,
+ int sleep_us, int timeout_us)
+{
+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us);
+
+ might_sleep_if(sleep_us);
+ while (1) {
+ if (func(ip))
+ return 0;
+
+ if (timeout_us && ktime_compare(ktime_get(), timeout) > 0)
+ return -ETIMEDOUT;
+
+ if (sleep_us)
+ usleep_range((sleep_us >> 2) + 1, sleep_us);
+ }
+ return 0;
+}
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_dlbu.c b/drivers/gpu/drm/lima/lima_dlbu.c
new file mode 100644
index 000000000000..8399ceffb94b
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_dlbu.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/io.h>
+#include <linux/device.h>
+
+#include "lima_device.h"
+#include "lima_dlbu.h"
+#include "lima_vm.h"
+#include "lima_regs.h"
+
+#define dlbu_write(reg, data) writel(data, ip->iomem + reg)
+#define dlbu_read(reg) readl(ip->iomem + reg)
+
+void lima_dlbu_enable(struct lima_device *dev, int num_pp)
+{
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+ struct lima_ip *ip = dev->ip + lima_ip_dlbu;
+ int i, mask = 0;
+
+ for (i = 0; i < num_pp; i++) {
+ struct lima_ip *pp = pipe->processor[i];
+
+ mask |= 1 << (pp->id - lima_ip_pp0);
+ }
+
+ dlbu_write(LIMA_DLBU_PP_ENABLE_MASK, mask);
+}
+
+void lima_dlbu_disable(struct lima_device *dev)
+{
+ struct lima_ip *ip = dev->ip + lima_ip_dlbu;
+
+ dlbu_write(LIMA_DLBU_PP_ENABLE_MASK, 0);
+}
+
+void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg)
+{
+ dlbu_write(LIMA_DLBU_TLLIST_VBASEADDR, reg[0]);
+ dlbu_write(LIMA_DLBU_FB_DIM, reg[1]);
+ dlbu_write(LIMA_DLBU_TLLIST_CONF, reg[2]);
+ dlbu_write(LIMA_DLBU_START_TILE_POS, reg[3]);
+}
+
+int lima_dlbu_init(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+
+ dlbu_write(LIMA_DLBU_MASTER_TLLIST_PHYS_ADDR, dev->dlbu_dma | 1);
+ dlbu_write(LIMA_DLBU_MASTER_TLLIST_VADDR, LIMA_VA_RESERVE_DLBU);
+
+ return 0;
+}
+
+void lima_dlbu_fini(struct lima_ip *ip)
+{
+
+}
diff --git a/drivers/gpu/drm/lima/lima_dlbu.h b/drivers/gpu/drm/lima/lima_dlbu.h
new file mode 100644
index 000000000000..16f877984466
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_dlbu.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_DLBU_H__
+#define __LIMA_DLBU_H__
+
+struct lima_ip;
+struct lima_device;
+
+void lima_dlbu_enable(struct lima_device *dev, int num_pp);
+void lima_dlbu_disable(struct lima_device *dev);
+
+void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg);
+
+int lima_dlbu_init(struct lima_ip *ip);
+void lima_dlbu_fini(struct lima_ip *ip);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
new file mode 100644
index 000000000000..f9a281a62083
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_prime.h>
+#include <drm/lima_drm.h>
+
+#include "lima_drv.h"
+#include "lima_gem.h"
+#include "lima_gem_prime.h"
+#include "lima_vm.h"
+
+int lima_sched_timeout_ms;
+
+MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms (0 = no timeout (default))");
+module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
+
+static int lima_ioctl_get_param(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_lima_get_param *args = data;
+ struct lima_device *ldev = to_lima_dev(dev);
+
+ if (args->pad)
+ return -EINVAL;
+
+ switch (args->param) {
+ case DRM_LIMA_PARAM_GPU_ID:
+ switch (ldev->id) {
+ case lima_gpu_mali400:
+ args->value = DRM_LIMA_PARAM_GPU_ID_MALI400;
+ break;
+ case lima_gpu_mali450:
+ args->value = DRM_LIMA_PARAM_GPU_ID_MALI450;
+ break;
+ default:
+ args->value = DRM_LIMA_PARAM_GPU_ID_UNKNOWN;
+ break;
+ }
+ break;
+
+ case DRM_LIMA_PARAM_NUM_PP:
+ args->value = ldev->pipe[lima_pipe_pp].num_processor;
+ break;
+
+ case DRM_LIMA_PARAM_GP_VERSION:
+ args->value = ldev->gp_version;
+ break;
+
+ case DRM_LIMA_PARAM_PP_VERSION:
+ args->value = ldev->pp_version;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int lima_ioctl_gem_create(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_lima_gem_create *args = data;
+
+ if (args->pad)
+ return -EINVAL;
+
+ if (args->flags)
+ return -EINVAL;
+
+ if (args->size == 0)
+ return -EINVAL;
+
+ return lima_gem_create_handle(dev, file, args->size, args->flags, &args->handle);
+}
+
+static int lima_ioctl_gem_info(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_lima_gem_info *args = data;
+
+ return lima_gem_get_info(file, args->handle, &args->va, &args->offset);
+}
+
+static int lima_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_lima_gem_submit *args = data;
+ struct lima_device *ldev = to_lima_dev(dev);
+ struct lima_drm_priv *priv = file->driver_priv;
+ struct drm_lima_gem_submit_bo *bos;
+ struct lima_sched_pipe *pipe;
+ struct lima_sched_task *task;
+ struct lima_ctx *ctx;
+ struct lima_submit submit = {0};
+ size_t size;
+ int err = 0;
+
+ if (args->pipe >= lima_pipe_num || args->nr_bos == 0)
+ return -EINVAL;
+
+ if (args->flags & ~(LIMA_SUBMIT_FLAG_EXPLICIT_FENCE))
+ return -EINVAL;
+
+ pipe = ldev->pipe + args->pipe;
+ if (args->frame_size != pipe->frame_size)
+ return -EINVAL;
+
+ bos = kvcalloc(args->nr_bos, sizeof(*submit.bos) + sizeof(*submit.lbos), GFP_KERNEL);
+ if (!bos)
+ return -ENOMEM;
+
+ size = args->nr_bos * sizeof(*submit.bos);
+ if (copy_from_user(bos, u64_to_user_ptr(args->bos), size)) {
+ err = -EFAULT;
+ goto out0;
+ }
+
+ task = kmem_cache_zalloc(pipe->task_slab, GFP_KERNEL);
+ if (!task) {
+ err = -ENOMEM;
+ goto out0;
+ }
+
+ task->frame = task + 1;
+ if (copy_from_user(task->frame, u64_to_user_ptr(args->frame), args->frame_size)) {
+ err = -EFAULT;
+ goto out1;
+ }
+
+ err = pipe->task_validate(pipe, task);
+ if (err)
+ goto out1;
+
+ ctx = lima_ctx_get(&priv->ctx_mgr, args->ctx);
+ if (!ctx) {
+ err = -ENOENT;
+ goto out1;
+ }
+
+ submit.pipe = args->pipe;
+ submit.bos = bos;
+ submit.lbos = (void *)bos + size;
+ submit.nr_bos = args->nr_bos;
+ submit.task = task;
+ submit.ctx = ctx;
+ submit.flags = args->flags;
+ submit.in_sync[0] = args->in_sync[0];
+ submit.in_sync[1] = args->in_sync[1];
+ submit.out_sync = args->out_sync;
+
+ err = lima_gem_submit(file, &submit);
+
+ lima_ctx_put(ctx);
+out1:
+ if (err)
+ kmem_cache_free(pipe->task_slab, task);
+out0:
+ kvfree(bos);
+ return err;
+}
+
+static int lima_ioctl_gem_wait(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_lima_gem_wait *args = data;
+
+ if (args->op & ~(LIMA_GEM_WAIT_READ|LIMA_GEM_WAIT_WRITE))
+ return -EINVAL;
+
+ return lima_gem_wait(file, args->handle, args->op, args->timeout_ns);
+}
+
+static int lima_ioctl_ctx_create(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_lima_ctx_create *args = data;
+ struct lima_drm_priv *priv = file->driver_priv;
+ struct lima_device *ldev = to_lima_dev(dev);
+
+ if (args->_pad)
+ return -EINVAL;
+
+ return lima_ctx_create(ldev, &priv->ctx_mgr, &args->id);
+}
+
+static int lima_ioctl_ctx_free(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_lima_ctx_create *args = data;
+ struct lima_drm_priv *priv = file->driver_priv;
+
+ if (args->_pad)
+ return -EINVAL;
+
+ return lima_ctx_free(&priv->ctx_mgr, args->id);
+}
+
+static int lima_drm_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+ int err;
+ struct lima_drm_priv *priv;
+ struct lima_device *ldev = to_lima_dev(dev);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->vm = lima_vm_create(ldev);
+ if (!priv->vm) {
+ err = -ENOMEM;
+ goto err_out0;
+ }
+
+ lima_ctx_mgr_init(&priv->ctx_mgr);
+
+ file->driver_priv = priv;
+ return 0;
+
+err_out0:
+ kfree(priv);
+ return err;
+}
+
+static void lima_drm_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct lima_drm_priv *priv = file->driver_priv;
+
+ lima_ctx_mgr_fini(&priv->ctx_mgr);
+ lima_vm_put(priv->vm);
+ kfree(priv);
+}
+
+static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(LIMA_GET_PARAM, lima_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_GEM_CREATE, lima_ioctl_gem_create, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_GEM_INFO, lima_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_GEM_SUBMIT, lima_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_GEM_WAIT, lima_ioctl_gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_CTX_CREATE, lima_ioctl_ctx_create, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_CTX_FREE, lima_ioctl_ctx_free, DRM_AUTH|DRM_RENDER_ALLOW),
+};
+
+static const struct file_operations lima_drm_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .mmap = lima_gem_mmap,
+};
+
+static struct drm_driver lima_drm_driver = {
+ .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_PRIME | DRIVER_SYNCOBJ,
+ .open = lima_drm_driver_open,
+ .postclose = lima_drm_driver_postclose,
+ .ioctls = lima_drm_driver_ioctls,
+ .num_ioctls = ARRAY_SIZE(lima_drm_driver_ioctls),
+ .fops = &lima_drm_driver_fops,
+ .gem_free_object_unlocked = lima_gem_free_object,
+ .gem_open_object = lima_gem_object_open,
+ .gem_close_object = lima_gem_object_close,
+ .gem_vm_ops = &lima_gem_vm_ops,
+ .name = "lima",
+ .desc = "lima DRM",
+ .date = "20190217",
+ .major = 1,
+ .minor = 0,
+ .patchlevel = 0,
+
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import_sg_table = lima_gem_prime_import_sg_table,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .gem_prime_get_sg_table = lima_gem_prime_get_sg_table,
+ .gem_prime_mmap = lima_gem_prime_mmap,
+};
+
+static int lima_pdev_probe(struct platform_device *pdev)
+{
+ struct lima_device *ldev;
+ struct drm_device *ddev;
+ int err;
+
+ err = lima_sched_slab_init();
+ if (err)
+ return err;
+
+ ldev = devm_kzalloc(&pdev->dev, sizeof(*ldev), GFP_KERNEL);
+ if (!ldev) {
+ err = -ENOMEM;
+ goto err_out0;
+ }
+
+ ldev->pdev = pdev;
+ ldev->dev = &pdev->dev;
+ ldev->id = (enum lima_gpu_id)of_device_get_match_data(&pdev->dev);
+
+ platform_set_drvdata(pdev, ldev);
+
+ /* Allocate and initialize the DRM device. */
+ ddev = drm_dev_alloc(&lima_drm_driver, &pdev->dev);
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
+
+ ddev->dev_private = ldev;
+ ldev->ddev = ddev;
+
+ err = lima_device_init(ldev);
+ if (err) {
+ dev_err(&pdev->dev, "Fatal error during GPU init\n");
+ goto err_out1;
+ }
+
+ /*
+ * Register the DRM device with the core and the connectors with
+ * sysfs.
+ */
+ err = drm_dev_register(ddev, 0);
+ if (err < 0)
+ goto err_out2;
+
+ return 0;
+
+err_out2:
+ lima_device_fini(ldev);
+err_out1:
+ drm_dev_put(ddev);
+err_out0:
+ lima_sched_slab_fini();
+ return err;
+}
+
+static int lima_pdev_remove(struct platform_device *pdev)
+{
+ struct lima_device *ldev = platform_get_drvdata(pdev);
+ struct drm_device *ddev = ldev->ddev;
+
+ drm_dev_unregister(ddev);
+ lima_device_fini(ldev);
+ drm_dev_put(ddev);
+ lima_sched_slab_fini();
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "arm,mali-400", .data = (void *)lima_gpu_mali400 },
+ { .compatible = "arm,mali-450", .data = (void *)lima_gpu_mali450 },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static struct platform_driver lima_platform_driver = {
+ .probe = lima_pdev_probe,
+ .remove = lima_pdev_remove,
+ .driver = {
+ .name = "lima",
+ .of_match_table = dt_match,
+ },
+};
+
+static int __init lima_init(void)
+{
+ return platform_driver_register(&lima_platform_driver);
+}
+module_init(lima_init);
+
+static void __exit lima_exit(void)
+{
+ platform_driver_unregister(&lima_platform_driver);
+}
+module_exit(lima_exit);
+
+MODULE_AUTHOR("Lima Project Developers");
+MODULE_DESCRIPTION("Lima DRM Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/lima/lima_drv.h b/drivers/gpu/drm/lima/lima_drv.h
new file mode 100644
index 000000000000..69c7344715c9
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_drv.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_DRV_H__
+#define __LIMA_DRV_H__
+
+#include <drm/drm_file.h>
+
+#include "lima_ctx.h"
+
+extern int lima_sched_timeout_ms;
+
+struct lima_vm;
+struct lima_bo;
+struct lima_sched_task;
+
+struct drm_lima_gem_submit_bo;
+
+struct lima_drm_priv {
+ struct lima_vm *vm;
+ struct lima_ctx_mgr ctx_mgr;
+};
+
+struct lima_submit {
+ struct lima_ctx *ctx;
+ int pipe;
+ u32 flags;
+
+ struct drm_lima_gem_submit_bo *bos;
+ struct lima_bo **lbos;
+ u32 nr_bos;
+
+ u32 in_sync[2];
+ u32 out_sync;
+
+ struct lima_sched_task *task;
+};
+
+static inline struct lima_drm_priv *
+to_lima_drm_priv(struct drm_file *file)
+{
+ return file->driver_priv;
+}
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
new file mode 100644
index 000000000000..477c0f766663
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/mm.h>
+#include <linux/sync_file.h>
+#include <linux/pfn_t.h>
+
+#include <drm/drm_file.h>
+#include <drm/drm_syncobj.h>
+#include <drm/drm_utils.h>
+
+#include <drm/lima_drm.h>
+
+#include "lima_drv.h"
+#include "lima_gem.h"
+#include "lima_gem_prime.h"
+#include "lima_vm.h"
+#include "lima_object.h"
+
+int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
+ u32 size, u32 flags, u32 *handle)
+{
+ int err;
+ struct lima_bo *bo;
+ struct lima_device *ldev = to_lima_dev(dev);
+
+ bo = lima_bo_create(ldev, size, flags, NULL, NULL);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ err = drm_gem_handle_create(file, &bo->gem, handle);
+
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_put_unlocked(&bo->gem);
+
+ return err;
+}
+
+void lima_gem_free_object(struct drm_gem_object *obj)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+
+ if (!list_empty(&bo->va))
+ dev_err(obj->dev->dev, "lima gem free bo still has va\n");
+
+ lima_bo_destroy(bo);
+}
+
+int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+ struct lima_drm_priv *priv = to_lima_drm_priv(file);
+ struct lima_vm *vm = priv->vm;
+
+ return lima_vm_bo_add(vm, bo, true);
+}
+
+void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+ struct lima_drm_priv *priv = to_lima_drm_priv(file);
+ struct lima_vm *vm = priv->vm;
+
+ lima_vm_bo_del(vm, bo);
+}
+
+int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
+{
+ struct drm_gem_object *obj;
+ struct lima_bo *bo;
+ struct lima_drm_priv *priv = to_lima_drm_priv(file);
+ struct lima_vm *vm = priv->vm;
+ int err;
+
+ obj = drm_gem_object_lookup(file, handle);
+ if (!obj)
+ return -ENOENT;
+
+ bo = to_lima_bo(obj);
+
+ *va = lima_vm_get_va(vm, bo);
+
+ err = drm_gem_create_mmap_offset(obj);
+ if (!err)
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
+
+ drm_gem_object_put_unlocked(obj);
+ return err;
+}
+
+static vm_fault_t lima_gem_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct lima_bo *bo = to_lima_bo(obj);
+ pfn_t pfn;
+ pgoff_t pgoff;
+
+ /* We don't use vmf->pgoff since that has the fake offset: */
+ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+ pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
+
+ return vmf_insert_mixed(vma, vmf->address, pfn);
+}
+
+const struct vm_operations_struct lima_gem_vm_ops = {
+ .fault = lima_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+void lima_set_vma_flags(struct vm_area_struct *vma)
+{
+ pgprot_t prot = vm_get_page_prot(vma->vm_flags);
+
+ vma->vm_flags |= VM_MIXEDMAP;
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_page_prot = pgprot_writecombine(prot);
+}
+
+int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret)
+ return ret;
+
+ lima_set_vma_flags(vma);
+ return 0;
+}
+
+static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
+ bool write, bool explicit)
+{
+ int err = 0;
+
+ if (!write) {
+ err = reservation_object_reserve_shared(bo->gem.resv, 1);
+ if (err)
+ return err;
+ }
+
+ /* explicit sync use user passed dep fence */
+ if (explicit)
+ return 0;
+
+ return drm_gem_fence_array_add_implicit(&task->deps, &bo->gem, write);
+}
+
+static int lima_gem_lock_bos(struct lima_bo **bos, u32 nr_bos,
+ struct ww_acquire_ctx *ctx)
+{
+ int i, ret = 0, contended, slow_locked = -1;
+
+ ww_acquire_init(ctx, &reservation_ww_class);
+
+retry:
+ for (i = 0; i < nr_bos; i++) {
+ if (i == slow_locked) {
+ slow_locked = -1;
+ continue;
+ }
+
+ ret = ww_mutex_lock_interruptible(&bos[i]->gem.resv->lock, ctx);
+ if (ret < 0) {
+ contended = i;
+ goto err;
+ }
+ }
+
+ ww_acquire_done(ctx);
+ return 0;
+
+err:
+ for (i--; i >= 0; i--)
+ ww_mutex_unlock(&bos[i]->gem.resv->lock);
+
+ if (slow_locked >= 0)
+ ww_mutex_unlock(&bos[slow_locked]->gem.resv->lock);
+
+ if (ret == -EDEADLK) {
+ /* we lost out in a seqno race, lock and retry.. */
+ ret = ww_mutex_lock_slow_interruptible(
+ &bos[contended]->gem.resv->lock, ctx);
+ if (!ret) {
+ slow_locked = contended;
+ goto retry;
+ }
+ }
+ ww_acquire_fini(ctx);
+
+ return ret;
+}
+
+static void lima_gem_unlock_bos(struct lima_bo **bos, u32 nr_bos,
+ struct ww_acquire_ctx *ctx)
+{
+ int i;
+
+ for (i = 0; i < nr_bos; i++)
+ ww_mutex_unlock(&bos[i]->gem.resv->lock);
+ ww_acquire_fini(ctx);
+}
+
+static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
+{
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) {
+ struct dma_fence *fence = NULL;
+
+ if (!submit->in_sync[i])
+ continue;
+
+ err = drm_syncobj_find_fence(file, submit->in_sync[i],
+ 0, 0, &fence);
+ if (err)
+ return err;
+
+ err = drm_gem_fence_array_add(&submit->task->deps, fence);
+ if (err) {
+ dma_fence_put(fence);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
+{
+ int i, err = 0;
+ struct ww_acquire_ctx ctx;
+ struct lima_drm_priv *priv = to_lima_drm_priv(file);
+ struct lima_vm *vm = priv->vm;
+ struct drm_syncobj *out_sync = NULL;
+ struct dma_fence *fence;
+ struct lima_bo **bos = submit->lbos;
+
+ if (submit->out_sync) {
+ out_sync = drm_syncobj_find(file, submit->out_sync);
+ if (!out_sync)
+ return -ENOENT;
+ }
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj;
+ struct lima_bo *bo;
+
+ obj = drm_gem_object_lookup(file, submit->bos[i].handle);
+ if (!obj) {
+ err = -ENOENT;
+ goto err_out0;
+ }
+
+ bo = to_lima_bo(obj);
+
+ /* increase refcnt of gpu va map to prevent unmapped when executing,
+ * will be decreased when task done
+ */
+ err = lima_vm_bo_add(vm, bo, false);
+ if (err) {
+ drm_gem_object_put_unlocked(obj);
+ goto err_out0;
+ }
+
+ bos[i] = bo;
+ }
+
+ err = lima_gem_lock_bos(bos, submit->nr_bos, &ctx);
+ if (err)
+ goto err_out0;
+
+ err = lima_sched_task_init(
+ submit->task, submit->ctx->context + submit->pipe,
+ bos, submit->nr_bos, vm);
+ if (err)
+ goto err_out1;
+
+ err = lima_gem_add_deps(file, submit);
+ if (err)
+ goto err_out2;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ err = lima_gem_sync_bo(
+ submit->task, bos[i],
+ submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE,
+ submit->flags & LIMA_SUBMIT_FLAG_EXPLICIT_FENCE);
+ if (err)
+ goto err_out2;
+ }
+
+ fence = lima_sched_context_queue_task(
+ submit->ctx->context + submit->pipe, submit->task);
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
+ reservation_object_add_excl_fence(bos[i]->gem.resv, fence);
+ else
+ reservation_object_add_shared_fence(bos[i]->gem.resv, fence);
+ }
+
+ lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
+
+ for (i = 0; i < submit->nr_bos; i++)
+ drm_gem_object_put_unlocked(&bos[i]->gem);
+
+ if (out_sync) {
+ drm_syncobj_replace_fence(out_sync, fence);
+ drm_syncobj_put(out_sync);
+ }
+
+ dma_fence_put(fence);
+
+ return 0;
+
+err_out2:
+ lima_sched_task_fini(submit->task);
+err_out1:
+ lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
+err_out0:
+ for (i = 0; i < submit->nr_bos; i++) {
+ if (!bos[i])
+ break;
+ lima_vm_bo_del(vm, bos[i]);
+ drm_gem_object_put_unlocked(&bos[i]->gem);
+ }
+ if (out_sync)
+ drm_syncobj_put(out_sync);
+ return err;
+}
+
+int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
+{
+ bool write = op & LIMA_GEM_WAIT_WRITE;
+ long ret, timeout;
+
+ if (!op)
+ return 0;
+
+ timeout = drm_timeout_abs_to_jiffies(timeout_ns);
+
+ ret = drm_gem_reservation_object_wait(file, handle, write, timeout);
+ if (ret == 0)
+ ret = timeout ? -ETIMEDOUT : -EBUSY;
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/lima/lima_gem.h b/drivers/gpu/drm/lima/lima_gem.h
new file mode 100644
index 000000000000..556111a01135
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gem.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_GEM_H__
+#define __LIMA_GEM_H__
+
+struct lima_bo;
+struct lima_submit;
+
+extern const struct vm_operations_struct lima_gem_vm_ops;
+
+struct lima_bo *lima_gem_create_bo(struct drm_device *dev, u32 size, u32 flags);
+int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
+ u32 size, u32 flags, u32 *handle);
+void lima_gem_free_object(struct drm_gem_object *obj);
+int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file);
+void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file);
+int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset);
+int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+int lima_gem_submit(struct drm_file *file, struct lima_submit *submit);
+int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns);
+
+void lima_set_vma_flags(struct vm_area_struct *vma);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_gem_prime.c b/drivers/gpu/drm/lima/lima_gem_prime.c
new file mode 100644
index 000000000000..9c6d9f1dba55
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gem_prime.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/dma-buf.h>
+#include <drm/drm_prime.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+
+#include "lima_device.h"
+#include "lima_object.h"
+#include "lima_gem.h"
+#include "lima_gem_prime.h"
+
+struct drm_gem_object *lima_gem_prime_import_sg_table(
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *sgt)
+{
+ struct lima_device *ldev = to_lima_dev(dev);
+ struct lima_bo *bo;
+
+ bo = lima_bo_create(ldev, attach->dmabuf->size, 0, sgt,
+ attach->dmabuf->resv);
+ if (IS_ERR(bo))
+ return ERR_CAST(bo);
+
+ return &bo->gem;
+}
+
+struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+ int npages = obj->size >> PAGE_SHIFT;
+
+ return drm_prime_pages_to_sg(bo->pages, npages);
+}
+
+int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ int ret;
+
+ ret = drm_gem_mmap_obj(obj, obj->size, vma);
+ if (ret)
+ return ret;
+
+ lima_set_vma_flags(vma);
+ return 0;
+}
diff --git a/drivers/gpu/drm/lima/lima_gem_prime.h b/drivers/gpu/drm/lima/lima_gem_prime.h
new file mode 100644
index 000000000000..34b4d35c21e3
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gem_prime.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_GEM_PRIME_H__
+#define __LIMA_GEM_PRIME_H__
+
+struct drm_gem_object *lima_gem_prime_import_sg_table(
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj);
+int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
new file mode 100644
index 000000000000..ccf49faedebf
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gp.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <drm/lima_drm.h>
+
+#include "lima_device.h"
+#include "lima_gp.h"
+#include "lima_regs.h"
+
+#define gp_write(reg, data) writel(data, ip->iomem + reg)
+#define gp_read(reg) readl(ip->iomem + reg)
+
+static irqreturn_t lima_gp_irq_handler(int irq, void *data)
+{
+ struct lima_ip *ip = data;
+ struct lima_device *dev = ip->dev;
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
+ u32 state = gp_read(LIMA_GP_INT_STAT);
+ u32 status = gp_read(LIMA_GP_STATUS);
+ bool done = false;
+
+ /* for shared irq case */
+ if (!state)
+ return IRQ_NONE;
+
+ if (state & LIMA_GP_IRQ_MASK_ERROR) {
+ dev_err(dev->dev, "gp error irq state=%x status=%x\n",
+ state, status);
+
+ /* mask all interrupts before hard reset */
+ gp_write(LIMA_GP_INT_MASK, 0);
+
+ pipe->error = true;
+ done = true;
+ } else {
+ bool valid = state & (LIMA_GP_IRQ_VS_END_CMD_LST |
+ LIMA_GP_IRQ_PLBU_END_CMD_LST);
+ bool active = status & (LIMA_GP_STATUS_VS_ACTIVE |
+ LIMA_GP_STATUS_PLBU_ACTIVE);
+ done = valid && !active;
+ }
+
+ gp_write(LIMA_GP_INT_CLEAR, state);
+
+ if (done)
+ lima_sched_pipe_task_done(pipe);
+
+ return IRQ_HANDLED;
+}
+
+static void lima_gp_soft_reset_async(struct lima_ip *ip)
+{
+ if (ip->data.async_reset)
+ return;
+
+ gp_write(LIMA_GP_INT_MASK, 0);
+ gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_RESET_COMPLETED);
+ gp_write(LIMA_GP_CMD, LIMA_GP_CMD_SOFT_RESET);
+ ip->data.async_reset = true;
+}
+
+static int lima_gp_soft_reset_async_wait(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+ u32 v;
+
+ if (!ip->data.async_reset)
+ return 0;
+
+ err = readl_poll_timeout(ip->iomem + LIMA_GP_INT_RAWSTAT, v,
+ v & LIMA_GP_IRQ_RESET_COMPLETED,
+ 0, 100);
+ if (err) {
+ dev_err(dev->dev, "gp soft reset time out\n");
+ return err;
+ }
+
+ gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_MASK_ALL);
+ gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED);
+
+ ip->data.async_reset = false;
+ return 0;
+}
+
+static int lima_gp_task_validate(struct lima_sched_pipe *pipe,
+ struct lima_sched_task *task)
+{
+ struct drm_lima_gp_frame *frame = task->frame;
+ u32 *f = frame->frame;
+ (void)pipe;
+
+ if (f[LIMA_GP_VSCL_START_ADDR >> 2] >
+ f[LIMA_GP_VSCL_END_ADDR >> 2] ||
+ f[LIMA_GP_PLBUCL_START_ADDR >> 2] >
+ f[LIMA_GP_PLBUCL_END_ADDR >> 2] ||
+ f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] >
+ f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2])
+ return -EINVAL;
+
+ if (f[LIMA_GP_VSCL_START_ADDR >> 2] ==
+ f[LIMA_GP_VSCL_END_ADDR >> 2] &&
+ f[LIMA_GP_PLBUCL_START_ADDR >> 2] ==
+ f[LIMA_GP_PLBUCL_END_ADDR >> 2])
+ return -EINVAL;
+
+ return 0;
+}
+
+static void lima_gp_task_run(struct lima_sched_pipe *pipe,
+ struct lima_sched_task *task)
+{
+ struct lima_ip *ip = pipe->processor[0];
+ struct drm_lima_gp_frame *frame = task->frame;
+ u32 *f = frame->frame;
+ u32 cmd = 0;
+ int i;
+
+ if (f[LIMA_GP_VSCL_START_ADDR >> 2] !=
+ f[LIMA_GP_VSCL_END_ADDR >> 2])
+ cmd |= LIMA_GP_CMD_START_VS;
+ if (f[LIMA_GP_PLBUCL_START_ADDR >> 2] !=
+ f[LIMA_GP_PLBUCL_END_ADDR >> 2])
+ cmd |= LIMA_GP_CMD_START_PLBU;
+
+ /* before any hw ops, wait last success task async soft reset */
+ lima_gp_soft_reset_async_wait(ip);
+
+ for (i = 0; i < LIMA_GP_FRAME_REG_NUM; i++)
+ writel(f[i], ip->iomem + LIMA_GP_VSCL_START_ADDR + i * 4);
+
+ gp_write(LIMA_GP_CMD, LIMA_GP_CMD_UPDATE_PLBU_ALLOC);
+ gp_write(LIMA_GP_CMD, cmd);
+}
+
+static int lima_gp_hard_reset_poll(struct lima_ip *ip)
+{
+ gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC01A0000);
+ return gp_read(LIMA_GP_PERF_CNT_0_LIMIT) == 0xC01A0000;
+}
+
+static int lima_gp_hard_reset(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int ret;
+
+ gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC0FFE000);
+ gp_write(LIMA_GP_INT_MASK, 0);
+ gp_write(LIMA_GP_CMD, LIMA_GP_CMD_RESET);
+ ret = lima_poll_timeout(ip, lima_gp_hard_reset_poll, 10, 100);
+ if (ret) {
+ dev_err(dev->dev, "gp hard reset timeout\n");
+ return ret;
+ }
+
+ gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0);
+ gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_MASK_ALL);
+ gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED);
+ return 0;
+}
+
+static void lima_gp_task_fini(struct lima_sched_pipe *pipe)
+{
+ lima_gp_soft_reset_async(pipe->processor[0]);
+}
+
+static void lima_gp_task_error(struct lima_sched_pipe *pipe)
+{
+ struct lima_ip *ip = pipe->processor[0];
+
+ dev_err(ip->dev->dev, "gp task error int_state=%x status=%x\n",
+ gp_read(LIMA_GP_INT_STAT), gp_read(LIMA_GP_STATUS));
+
+ lima_gp_hard_reset(ip);
+}
+
+static void lima_gp_task_mmu_error(struct lima_sched_pipe *pipe)
+{
+ lima_sched_pipe_task_done(pipe);
+}
+
+static void lima_gp_print_version(struct lima_ip *ip)
+{
+ u32 version, major, minor;
+ char *name;
+
+ version = gp_read(LIMA_GP_VERSION);
+ major = (version >> 8) & 0xFF;
+ minor = version & 0xFF;
+ switch (version >> 16) {
+ case 0xA07:
+ name = "mali200";
+ break;
+ case 0xC07:
+ name = "mali300";
+ break;
+ case 0xB07:
+ name = "mali400";
+ break;
+ case 0xD07:
+ name = "mali450";
+ break;
+ default:
+ name = "unknown";
+ break;
+ }
+ dev_info(ip->dev->dev, "%s - %s version major %d minor %d\n",
+ lima_ip_name(ip), name, major, minor);
+}
+
+static struct kmem_cache *lima_gp_task_slab;
+static int lima_gp_task_slab_refcnt;
+
+int lima_gp_init(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+
+ lima_gp_print_version(ip);
+
+ ip->data.async_reset = false;
+ lima_gp_soft_reset_async(ip);
+ err = lima_gp_soft_reset_async_wait(ip);
+ if (err)
+ return err;
+
+ err = devm_request_irq(dev->dev, ip->irq, lima_gp_irq_handler,
+ IRQF_SHARED, lima_ip_name(ip), ip);
+ if (err) {
+ dev_err(dev->dev, "gp %s fail to request irq\n",
+ lima_ip_name(ip));
+ return err;
+ }
+
+ dev->gp_version = gp_read(LIMA_GP_VERSION);
+
+ return 0;
+}
+
+void lima_gp_fini(struct lima_ip *ip)
+{
+
+}
+
+int lima_gp_pipe_init(struct lima_device *dev)
+{
+ int frame_size = sizeof(struct drm_lima_gp_frame);
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
+
+ if (!lima_gp_task_slab) {
+ lima_gp_task_slab = kmem_cache_create_usercopy(
+ "lima_gp_task", sizeof(struct lima_sched_task) + frame_size,
+ 0, SLAB_HWCACHE_ALIGN, sizeof(struct lima_sched_task),
+ frame_size, NULL);
+ if (!lima_gp_task_slab)
+ return -ENOMEM;
+ }
+ lima_gp_task_slab_refcnt++;
+
+ pipe->frame_size = frame_size;
+ pipe->task_slab = lima_gp_task_slab;
+
+ pipe->task_validate = lima_gp_task_validate;
+ pipe->task_run = lima_gp_task_run;
+ pipe->task_fini = lima_gp_task_fini;
+ pipe->task_error = lima_gp_task_error;
+ pipe->task_mmu_error = lima_gp_task_mmu_error;
+
+ return 0;
+}
+
+void lima_gp_pipe_fini(struct lima_device *dev)
+{
+ if (!--lima_gp_task_slab_refcnt) {
+ kmem_cache_destroy(lima_gp_task_slab);
+ lima_gp_task_slab = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/lima/lima_gp.h b/drivers/gpu/drm/lima/lima_gp.h
new file mode 100644
index 000000000000..516e5c1babbb
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gp.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_GP_H__
+#define __LIMA_GP_H__
+
+struct lima_ip;
+struct lima_device;
+
+int lima_gp_init(struct lima_ip *ip);
+void lima_gp_fini(struct lima_ip *ip);
+
+int lima_gp_pipe_init(struct lima_device *dev);
+void lima_gp_pipe_fini(struct lima_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_l2_cache.c b/drivers/gpu/drm/lima/lima_l2_cache.c
new file mode 100644
index 000000000000..6873a7af5a5c
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_l2_cache.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/iopoll.h>
+#include <linux/device.h>
+
+#include "lima_device.h"
+#include "lima_l2_cache.h"
+#include "lima_regs.h"
+
+#define l2_cache_write(reg, data) writel(data, ip->iomem + reg)
+#define l2_cache_read(reg) readl(ip->iomem + reg)
+
+static int lima_l2_cache_wait_idle(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+ u32 v;
+
+ err = readl_poll_timeout(ip->iomem + LIMA_L2_CACHE_STATUS, v,
+ !(v & LIMA_L2_CACHE_STATUS_COMMAND_BUSY),
+ 0, 1000);
+ if (err) {
+ dev_err(dev->dev, "l2 cache wait command timeout\n");
+ return err;
+ }
+ return 0;
+}
+
+int lima_l2_cache_flush(struct lima_ip *ip)
+{
+ int ret;
+
+ spin_lock(&ip->data.lock);
+ l2_cache_write(LIMA_L2_CACHE_COMMAND, LIMA_L2_CACHE_COMMAND_CLEAR_ALL);
+ ret = lima_l2_cache_wait_idle(ip);
+ spin_unlock(&ip->data.lock);
+ return ret;
+}
+
+int lima_l2_cache_init(struct lima_ip *ip)
+{
+ int i, err;
+ u32 size;
+ struct lima_device *dev = ip->dev;
+
+ /* l2_cache2 only exists when one of PP4-7 present */
+ if (ip->id == lima_ip_l2_cache2) {
+ for (i = lima_ip_pp4; i <= lima_ip_pp7; i++) {
+ if (dev->ip[i].present)
+ break;
+ }
+ if (i > lima_ip_pp7)
+ return -ENODEV;
+ }
+
+ spin_lock_init(&ip->data.lock);
+
+ size = l2_cache_read(LIMA_L2_CACHE_SIZE);
+ dev_info(dev->dev, "l2 cache %uK, %u-way, %ubyte cache line, %ubit external bus\n",
+ 1 << (((size >> 16) & 0xff) - 10),
+ 1 << ((size >> 8) & 0xff),
+ 1 << (size & 0xff),
+ 1 << ((size >> 24) & 0xff));
+
+ err = lima_l2_cache_flush(ip);
+ if (err)
+ return err;
+
+ l2_cache_write(LIMA_L2_CACHE_ENABLE,
+ LIMA_L2_CACHE_ENABLE_ACCESS|LIMA_L2_CACHE_ENABLE_READ_ALLOCATE);
+ l2_cache_write(LIMA_L2_CACHE_MAX_READS, 0x1c);
+
+ return 0;
+}
+
+void lima_l2_cache_fini(struct lima_ip *ip)
+{
+
+}
diff --git a/drivers/gpu/drm/lima/lima_l2_cache.h b/drivers/gpu/drm/lima/lima_l2_cache.h
new file mode 100644
index 000000000000..c63fb676ff14
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_l2_cache.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_L2_CACHE_H__
+#define __LIMA_L2_CACHE_H__
+
+struct lima_ip;
+
+int lima_l2_cache_init(struct lima_ip *ip);
+void lima_l2_cache_fini(struct lima_ip *ip);
+
+int lima_l2_cache_flush(struct lima_ip *ip);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
new file mode 100644
index 000000000000..8e1651d6a61f
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_mmu.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/device.h>
+
+#include "lima_device.h"
+#include "lima_mmu.h"
+#include "lima_vm.h"
+#include "lima_object.h"
+#include "lima_regs.h"
+
+#define mmu_write(reg, data) writel(data, ip->iomem + reg)
+#define mmu_read(reg) readl(ip->iomem + reg)
+
+#define lima_mmu_send_command(cmd, addr, val, cond) \
+({ \
+ int __ret; \
+ \
+ mmu_write(LIMA_MMU_COMMAND, cmd); \
+ __ret = readl_poll_timeout(ip->iomem + (addr), val, \
+ cond, 0, 100); \
+ if (__ret) \
+ dev_err(dev->dev, \
+ "mmu command %x timeout\n", cmd); \
+ __ret; \
+})
+
+static irqreturn_t lima_mmu_irq_handler(int irq, void *data)
+{
+ struct lima_ip *ip = data;
+ struct lima_device *dev = ip->dev;
+ u32 status = mmu_read(LIMA_MMU_INT_STATUS);
+ struct lima_sched_pipe *pipe;
+
+ /* for shared irq case */
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & LIMA_MMU_INT_PAGE_FAULT) {
+ u32 fault = mmu_read(LIMA_MMU_PAGE_FAULT_ADDR);
+
+ dev_err(dev->dev, "mmu page fault at 0x%x from bus id %d of type %s on %s\n",
+ fault, LIMA_MMU_STATUS_BUS_ID(status),
+ status & LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE ? "write" : "read",
+ lima_ip_name(ip));
+ }
+
+ if (status & LIMA_MMU_INT_READ_BUS_ERROR)
+ dev_err(dev->dev, "mmu %s irq bus error\n", lima_ip_name(ip));
+
+ /* mask all interrupts before resume */
+ mmu_write(LIMA_MMU_INT_MASK, 0);
+ mmu_write(LIMA_MMU_INT_CLEAR, status);
+
+ pipe = dev->pipe + (ip->id == lima_ip_gpmmu ? lima_pipe_gp : lima_pipe_pp);
+ lima_sched_pipe_mmu_error(pipe);
+
+ return IRQ_HANDLED;
+}
+
+int lima_mmu_init(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+ u32 v;
+
+ if (ip->id == lima_ip_ppmmu_bcast)
+ return 0;
+
+ mmu_write(LIMA_MMU_DTE_ADDR, 0xCAFEBABE);
+ if (mmu_read(LIMA_MMU_DTE_ADDR) != 0xCAFEB000) {
+ dev_err(dev->dev, "mmu %s dte write test fail\n", lima_ip_name(ip));
+ return -EIO;
+ }
+
+ mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_HARD_RESET);
+ err = lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET,
+ LIMA_MMU_DTE_ADDR, v, v == 0);
+ if (err)
+ return err;
+
+ err = devm_request_irq(dev->dev, ip->irq, lima_mmu_irq_handler,
+ IRQF_SHARED, lima_ip_name(ip), ip);
+ if (err) {
+ dev_err(dev->dev, "mmu %s fail to request irq\n", lima_ip_name(ip));
+ return err;
+ }
+
+ mmu_write(LIMA_MMU_INT_MASK, LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR);
+ mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma);
+ return lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING,
+ LIMA_MMU_STATUS, v,
+ v & LIMA_MMU_STATUS_PAGING_ENABLED);
+}
+
+void lima_mmu_fini(struct lima_ip *ip)
+{
+
+}
+
+void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm)
+{
+ struct lima_device *dev = ip->dev;
+ u32 v;
+
+ lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_STALL,
+ LIMA_MMU_STATUS, v,
+ v & LIMA_MMU_STATUS_STALL_ACTIVE);
+
+ if (vm)
+ mmu_write(LIMA_MMU_DTE_ADDR, vm->pd.dma);
+
+ /* flush the TLB */
+ mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_ZAP_CACHE);
+
+ lima_mmu_send_command(LIMA_MMU_COMMAND_DISABLE_STALL,
+ LIMA_MMU_STATUS, v,
+ !(v & LIMA_MMU_STATUS_STALL_ACTIVE));
+}
+
+void lima_mmu_page_fault_resume(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ u32 status = mmu_read(LIMA_MMU_STATUS);
+ u32 v;
+
+ if (status & LIMA_MMU_STATUS_PAGE_FAULT_ACTIVE) {
+ dev_info(dev->dev, "mmu resume\n");
+
+ mmu_write(LIMA_MMU_INT_MASK, 0);
+ mmu_write(LIMA_MMU_DTE_ADDR, 0xCAFEBABE);
+ lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET,
+ LIMA_MMU_DTE_ADDR, v, v == 0);
+ mmu_write(LIMA_MMU_INT_MASK, LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR);
+ mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma);
+ lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING,
+ LIMA_MMU_STATUS, v,
+ v & LIMA_MMU_STATUS_PAGING_ENABLED);
+ }
+}
diff --git a/drivers/gpu/drm/lima/lima_mmu.h b/drivers/gpu/drm/lima/lima_mmu.h
new file mode 100644
index 000000000000..8c78319bcc8e
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_mmu.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_MMU_H__
+#define __LIMA_MMU_H__
+
+struct lima_ip;
+struct lima_vm;
+
+int lima_mmu_init(struct lima_ip *ip);
+void lima_mmu_fini(struct lima_ip *ip);
+
+void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm);
+void lima_mmu_page_fault_resume(struct lima_ip *ip);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_object.c b/drivers/gpu/drm/lima/lima_object.c
new file mode 100644
index 000000000000..5c41f859a72f
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_object.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <drm/drm_prime.h>
+#include <linux/pagemap.h>
+#include <linux/dma-mapping.h>
+
+#include "lima_object.h"
+
+void lima_bo_destroy(struct lima_bo *bo)
+{
+ if (bo->sgt) {
+ kfree(bo->pages);
+ drm_prime_gem_destroy(&bo->gem, bo->sgt);
+ } else {
+ if (bo->pages_dma_addr) {
+ int i, npages = bo->gem.size >> PAGE_SHIFT;
+
+ for (i = 0; i < npages; i++) {
+ if (bo->pages_dma_addr[i])
+ dma_unmap_page(bo->gem.dev->dev,
+ bo->pages_dma_addr[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ }
+
+ if (bo->pages)
+ drm_gem_put_pages(&bo->gem, bo->pages, true, true);
+ }
+
+ kfree(bo->pages_dma_addr);
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+static struct lima_bo *lima_bo_create_struct(struct lima_device *dev, u32 size, u32 flags,
+ struct reservation_object *resv)
+{
+ struct lima_bo *bo;
+ int err;
+
+ size = PAGE_ALIGN(size);
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&bo->lock);
+ INIT_LIST_HEAD(&bo->va);
+ bo->gem.resv = resv;
+
+ err = drm_gem_object_init(dev->ddev, &bo->gem, size);
+ if (err) {
+ kfree(bo);
+ return ERR_PTR(err);
+ }
+
+ return bo;
+}
+
+struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size,
+ u32 flags, struct sg_table *sgt,
+ struct reservation_object *resv)
+{
+ int i, err;
+ size_t npages;
+ struct lima_bo *bo, *ret;
+
+ bo = lima_bo_create_struct(dev, size, flags, resv);
+ if (IS_ERR(bo))
+ return bo;
+
+ npages = bo->gem.size >> PAGE_SHIFT;
+
+ bo->pages_dma_addr = kcalloc(npages, sizeof(dma_addr_t), GFP_KERNEL);
+ if (!bo->pages_dma_addr) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err_out;
+ }
+
+ if (sgt) {
+ bo->sgt = sgt;
+
+ bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL);
+ if (!bo->pages) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err_out;
+ }
+
+ err = drm_prime_sg_to_page_addr_arrays(
+ sgt, bo->pages, bo->pages_dma_addr, npages);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto err_out;
+ }
+ } else {
+ mapping_set_gfp_mask(bo->gem.filp->f_mapping, GFP_DMA32);
+ bo->pages = drm_gem_get_pages(&bo->gem);
+ if (IS_ERR(bo->pages)) {
+ ret = ERR_CAST(bo->pages);
+ bo->pages = NULL;
+ goto err_out;
+ }
+
+ for (i = 0; i < npages; i++) {
+ dma_addr_t addr = dma_map_page(dev->dev, bo->pages[i], 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev->dev, addr)) {
+ ret = ERR_PTR(-EFAULT);
+ goto err_out;
+ }
+ bo->pages_dma_addr[i] = addr;
+ }
+
+ }
+
+ return bo;
+
+err_out:
+ lima_bo_destroy(bo);
+ return ret;
+}
diff --git a/drivers/gpu/drm/lima/lima_object.h b/drivers/gpu/drm/lima/lima_object.h
new file mode 100644
index 000000000000..6738724afb7b
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_object.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_OBJECT_H__
+#define __LIMA_OBJECT_H__
+
+#include <drm/drm_gem.h>
+
+#include "lima_device.h"
+
+struct lima_bo {
+ struct drm_gem_object gem;
+
+ struct page **pages;
+ dma_addr_t *pages_dma_addr;
+ struct sg_table *sgt;
+ void *vaddr;
+
+ struct mutex lock;
+ struct list_head va;
+};
+
+static inline struct lima_bo *
+to_lima_bo(struct drm_gem_object *obj)
+{
+ return container_of(obj, struct lima_bo, gem);
+}
+
+struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size,
+ u32 flags, struct sg_table *sgt,
+ struct reservation_object *resv);
+void lima_bo_destroy(struct lima_bo *bo);
+void *lima_bo_vmap(struct lima_bo *bo);
+void lima_bo_vunmap(struct lima_bo *bo);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_pmu.c b/drivers/gpu/drm/lima/lima_pmu.c
new file mode 100644
index 000000000000..571f6d661581
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_pmu.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/iopoll.h>
+#include <linux/device.h>
+
+#include "lima_device.h"
+#include "lima_pmu.h"
+#include "lima_regs.h"
+
+#define pmu_write(reg, data) writel(data, ip->iomem + reg)
+#define pmu_read(reg) readl(ip->iomem + reg)
+
+static int lima_pmu_wait_cmd(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+ u32 v;
+
+ err = readl_poll_timeout(ip->iomem + LIMA_PMU_INT_RAWSTAT,
+ v, v & LIMA_PMU_INT_CMD_MASK,
+ 100, 100000);
+ if (err) {
+ dev_err(dev->dev, "timeout wait pmd cmd\n");
+ return err;
+ }
+
+ pmu_write(LIMA_PMU_INT_CLEAR, LIMA_PMU_INT_CMD_MASK);
+ return 0;
+}
+
+int lima_pmu_init(struct lima_ip *ip)
+{
+ int err;
+ u32 stat;
+
+ pmu_write(LIMA_PMU_INT_MASK, 0);
+
+ /* If this value is too low, when in high GPU clk freq,
+ * GPU will be in unstable state.
+ */
+ pmu_write(LIMA_PMU_SW_DELAY, 0xffff);
+
+ /* status reg 1=off 0=on */
+ stat = pmu_read(LIMA_PMU_STATUS);
+
+ /* power up all ip */
+ if (stat) {
+ pmu_write(LIMA_PMU_POWER_UP, stat);
+ err = lima_pmu_wait_cmd(ip);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+void lima_pmu_fini(struct lima_ip *ip)
+{
+
+}
diff --git a/drivers/gpu/drm/lima/lima_pmu.h b/drivers/gpu/drm/lima/lima_pmu.h
new file mode 100644
index 000000000000..a2a18775eb07
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_pmu.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_PMU_H__
+#define __LIMA_PMU_H__
+
+struct lima_ip;
+
+int lima_pmu_init(struct lima_ip *ip);
+void lima_pmu_fini(struct lima_ip *ip);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c
new file mode 100644
index 000000000000..d29721e177bf
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_pp.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <drm/lima_drm.h>
+
+#include "lima_device.h"
+#include "lima_pp.h"
+#include "lima_dlbu.h"
+#include "lima_bcast.h"
+#include "lima_vm.h"
+#include "lima_regs.h"
+
+#define pp_write(reg, data) writel(data, ip->iomem + reg)
+#define pp_read(reg) readl(ip->iomem + reg)
+
+static void lima_pp_handle_irq(struct lima_ip *ip, u32 state)
+{
+ struct lima_device *dev = ip->dev;
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+
+ if (state & LIMA_PP_IRQ_MASK_ERROR) {
+ u32 status = pp_read(LIMA_PP_STATUS);
+
+ dev_err(dev->dev, "pp error irq state=%x status=%x\n",
+ state, status);
+
+ pipe->error = true;
+
+ /* mask all interrupts before hard reset */
+ pp_write(LIMA_PP_INT_MASK, 0);
+ }
+
+ pp_write(LIMA_PP_INT_CLEAR, state);
+}
+
+static irqreturn_t lima_pp_irq_handler(int irq, void *data)
+{
+ struct lima_ip *ip = data;
+ struct lima_device *dev = ip->dev;
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+ u32 state = pp_read(LIMA_PP_INT_STATUS);
+
+ /* for shared irq case */
+ if (!state)
+ return IRQ_NONE;
+
+ lima_pp_handle_irq(ip, state);
+
+ if (atomic_dec_and_test(&pipe->task))
+ lima_sched_pipe_task_done(pipe);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t lima_pp_bcast_irq_handler(int irq, void *data)
+{
+ int i;
+ irqreturn_t ret = IRQ_NONE;
+ struct lima_ip *pp_bcast = data;
+ struct lima_device *dev = pp_bcast->dev;
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+ struct drm_lima_m450_pp_frame *frame = pipe->current_task->frame;
+
+ for (i = 0; i < frame->num_pp; i++) {
+ struct lima_ip *ip = pipe->processor[i];
+ u32 status, state;
+
+ if (pipe->done & (1 << i))
+ continue;
+
+ /* status read first in case int state change in the middle
+ * which may miss the interrupt handling
+ */
+ status = pp_read(LIMA_PP_STATUS);
+ state = pp_read(LIMA_PP_INT_STATUS);
+
+ if (state) {
+ lima_pp_handle_irq(ip, state);
+ ret = IRQ_HANDLED;
+ } else {
+ if (status & LIMA_PP_STATUS_RENDERING_ACTIVE)
+ continue;
+ }
+
+ pipe->done |= (1 << i);
+ if (atomic_dec_and_test(&pipe->task))
+ lima_sched_pipe_task_done(pipe);
+ }
+
+ return ret;
+}
+
+static void lima_pp_soft_reset_async(struct lima_ip *ip)
+{
+ if (ip->data.async_reset)
+ return;
+
+ pp_write(LIMA_PP_INT_MASK, 0);
+ pp_write(LIMA_PP_INT_RAWSTAT, LIMA_PP_IRQ_MASK_ALL);
+ pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_SOFT_RESET);
+ ip->data.async_reset = true;
+}
+
+static int lima_pp_soft_reset_poll(struct lima_ip *ip)
+{
+ return !(pp_read(LIMA_PP_STATUS) & LIMA_PP_STATUS_RENDERING_ACTIVE) &&
+ pp_read(LIMA_PP_INT_RAWSTAT) == LIMA_PP_IRQ_RESET_COMPLETED;
+}
+
+static int lima_pp_soft_reset_async_wait_one(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int ret;
+
+ ret = lima_poll_timeout(ip, lima_pp_soft_reset_poll, 0, 100);
+ if (ret) {
+ dev_err(dev->dev, "pp %s reset time out\n", lima_ip_name(ip));
+ return ret;
+ }
+
+ pp_write(LIMA_PP_INT_CLEAR, LIMA_PP_IRQ_MASK_ALL);
+ pp_write(LIMA_PP_INT_MASK, LIMA_PP_IRQ_MASK_USED);
+ return 0;
+}
+
+static int lima_pp_soft_reset_async_wait(struct lima_ip *ip)
+{
+ int i, err = 0;
+
+ if (!ip->data.async_reset)
+ return 0;
+
+ if (ip->id == lima_ip_pp_bcast) {
+ struct lima_device *dev = ip->dev;
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+ struct drm_lima_m450_pp_frame *frame = pipe->current_task->frame;
+
+ for (i = 0; i < frame->num_pp; i++)
+ err |= lima_pp_soft_reset_async_wait_one(pipe->processor[i]);
+ } else
+ err = lima_pp_soft_reset_async_wait_one(ip);
+
+ ip->data.async_reset = false;
+ return err;
+}
+
+static void lima_pp_write_frame(struct lima_ip *ip, u32 *frame, u32 *wb)
+{
+ int i, j, n = 0;
+
+ for (i = 0; i < LIMA_PP_FRAME_REG_NUM; i++)
+ writel(frame[i], ip->iomem + LIMA_PP_FRAME + i * 4);
+
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < LIMA_PP_WB_REG_NUM; j++)
+ writel(wb[n++], ip->iomem + LIMA_PP_WB(i) + j * 4);
+ }
+}
+
+static int lima_pp_hard_reset_poll(struct lima_ip *ip)
+{
+ pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0xC01A0000);
+ return pp_read(LIMA_PP_PERF_CNT_0_LIMIT) == 0xC01A0000;
+}
+
+static int lima_pp_hard_reset(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int ret;
+
+ pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0xC0FFE000);
+ pp_write(LIMA_PP_INT_MASK, 0);
+ pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_FORCE_RESET);
+ ret = lima_poll_timeout(ip, lima_pp_hard_reset_poll, 10, 100);
+ if (ret) {
+ dev_err(dev->dev, "pp hard reset timeout\n");
+ return ret;
+ }
+
+ pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0);
+ pp_write(LIMA_PP_INT_CLEAR, LIMA_PP_IRQ_MASK_ALL);
+ pp_write(LIMA_PP_INT_MASK, LIMA_PP_IRQ_MASK_USED);
+ return 0;
+}
+
+static void lima_pp_print_version(struct lima_ip *ip)
+{
+ u32 version, major, minor;
+ char *name;
+
+ version = pp_read(LIMA_PP_VERSION);
+ major = (version >> 8) & 0xFF;
+ minor = version & 0xFF;
+ switch (version >> 16) {
+ case 0xC807:
+ name = "mali200";
+ break;
+ case 0xCE07:
+ name = "mali300";
+ break;
+ case 0xCD07:
+ name = "mali400";
+ break;
+ case 0xCF07:
+ name = "mali450";
+ break;
+ default:
+ name = "unknown";
+ break;
+ }
+ dev_info(ip->dev->dev, "%s - %s version major %d minor %d\n",
+ lima_ip_name(ip), name, major, minor);
+}
+
+int lima_pp_init(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+
+ lima_pp_print_version(ip);
+
+ ip->data.async_reset = false;
+ lima_pp_soft_reset_async(ip);
+ err = lima_pp_soft_reset_async_wait(ip);
+ if (err)
+ return err;
+
+ err = devm_request_irq(dev->dev, ip->irq, lima_pp_irq_handler,
+ IRQF_SHARED, lima_ip_name(ip), ip);
+ if (err) {
+ dev_err(dev->dev, "pp %s fail to request irq\n",
+ lima_ip_name(ip));
+ return err;
+ }
+
+ dev->pp_version = pp_read(LIMA_PP_VERSION);
+
+ return 0;
+}
+
+void lima_pp_fini(struct lima_ip *ip)
+{
+
+}
+
+int lima_pp_bcast_init(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+
+ err = devm_request_irq(dev->dev, ip->irq, lima_pp_bcast_irq_handler,
+ IRQF_SHARED, lima_ip_name(ip), ip);
+ if (err) {
+ dev_err(dev->dev, "pp %s fail to request irq\n",
+ lima_ip_name(ip));
+ return err;
+ }
+
+ return 0;
+}
+
+void lima_pp_bcast_fini(struct lima_ip *ip)
+{
+
+}
+
+static int lima_pp_task_validate(struct lima_sched_pipe *pipe,
+ struct lima_sched_task *task)
+{
+ u32 num_pp;
+
+ if (pipe->bcast_processor) {
+ struct drm_lima_m450_pp_frame *f = task->frame;
+
+ num_pp = f->num_pp;
+
+ if (f->_pad)
+ return -EINVAL;
+ } else {
+ struct drm_lima_m400_pp_frame *f = task->frame;
+
+ num_pp = f->num_pp;
+ }
+
+ if (num_pp == 0 || num_pp > pipe->num_processor)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void lima_pp_task_run(struct lima_sched_pipe *pipe,
+ struct lima_sched_task *task)
+{
+ if (pipe->bcast_processor) {
+ struct drm_lima_m450_pp_frame *frame = task->frame;
+ struct lima_device *dev = pipe->bcast_processor->dev;
+ struct lima_ip *ip = pipe->bcast_processor;
+ int i;
+
+ pipe->done = 0;
+ atomic_set(&pipe->task, frame->num_pp);
+
+ if (frame->use_dlbu) {
+ lima_dlbu_enable(dev, frame->num_pp);
+
+ frame->frame[LIMA_PP_FRAME >> 2] = LIMA_VA_RESERVE_DLBU;
+ lima_dlbu_set_reg(dev->ip + lima_ip_dlbu, frame->dlbu_regs);
+ } else
+ lima_dlbu_disable(dev);
+
+ lima_bcast_enable(dev, frame->num_pp);
+
+ lima_pp_soft_reset_async_wait(ip);
+
+ lima_pp_write_frame(ip, frame->frame, frame->wb);
+
+ for (i = 0; i < frame->num_pp; i++) {
+ struct lima_ip *ip = pipe->processor[i];
+
+ pp_write(LIMA_PP_STACK, frame->fragment_stack_address[i]);
+ if (!frame->use_dlbu)
+ pp_write(LIMA_PP_FRAME, frame->plbu_array_address[i]);
+ }
+
+ pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_START_RENDERING);
+ } else {
+ struct drm_lima_m400_pp_frame *frame = task->frame;
+ int i;
+
+ atomic_set(&pipe->task, frame->num_pp);
+
+ for (i = 0; i < frame->num_pp; i++) {
+ struct lima_ip *ip = pipe->processor[i];
+
+ frame->frame[LIMA_PP_FRAME >> 2] =
+ frame->plbu_array_address[i];
+ frame->frame[LIMA_PP_STACK >> 2] =
+ frame->fragment_stack_address[i];
+
+ lima_pp_soft_reset_async_wait(ip);
+
+ lima_pp_write_frame(ip, frame->frame, frame->wb);
+
+ pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_START_RENDERING);
+ }
+ }
+}
+
+static void lima_pp_task_fini(struct lima_sched_pipe *pipe)
+{
+ if (pipe->bcast_processor)
+ lima_pp_soft_reset_async(pipe->bcast_processor);
+ else {
+ int i;
+
+ for (i = 0; i < pipe->num_processor; i++)
+ lima_pp_soft_reset_async(pipe->processor[i]);
+ }
+}
+
+static void lima_pp_task_error(struct lima_sched_pipe *pipe)
+{
+ int i;
+
+ for (i = 0; i < pipe->num_processor; i++) {
+ struct lima_ip *ip = pipe->processor[i];
+
+ dev_err(ip->dev->dev, "pp task error %d int_state=%x status=%x\n",
+ i, pp_read(LIMA_PP_INT_STATUS), pp_read(LIMA_PP_STATUS));
+
+ lima_pp_hard_reset(ip);
+ }
+}
+
+static void lima_pp_task_mmu_error(struct lima_sched_pipe *pipe)
+{
+ if (atomic_dec_and_test(&pipe->task))
+ lima_sched_pipe_task_done(pipe);
+}
+
+static struct kmem_cache *lima_pp_task_slab;
+static int lima_pp_task_slab_refcnt;
+
+int lima_pp_pipe_init(struct lima_device *dev)
+{
+ int frame_size;
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+
+ if (dev->id == lima_gpu_mali400)
+ frame_size = sizeof(struct drm_lima_m400_pp_frame);
+ else
+ frame_size = sizeof(struct drm_lima_m450_pp_frame);
+
+ if (!lima_pp_task_slab) {
+ lima_pp_task_slab = kmem_cache_create_usercopy(
+ "lima_pp_task", sizeof(struct lima_sched_task) + frame_size,
+ 0, SLAB_HWCACHE_ALIGN, sizeof(struct lima_sched_task),
+ frame_size, NULL);
+ if (!lima_pp_task_slab)
+ return -ENOMEM;
+ }
+ lima_pp_task_slab_refcnt++;
+
+ pipe->frame_size = frame_size;
+ pipe->task_slab = lima_pp_task_slab;
+
+ pipe->task_validate = lima_pp_task_validate;
+ pipe->task_run = lima_pp_task_run;
+ pipe->task_fini = lima_pp_task_fini;
+ pipe->task_error = lima_pp_task_error;
+ pipe->task_mmu_error = lima_pp_task_mmu_error;
+
+ return 0;
+}
+
+void lima_pp_pipe_fini(struct lima_device *dev)
+{
+ if (!--lima_pp_task_slab_refcnt) {
+ kmem_cache_destroy(lima_pp_task_slab);
+ lima_pp_task_slab = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/lima/lima_pp.h b/drivers/gpu/drm/lima/lima_pp.h
new file mode 100644
index 000000000000..bf60c77b2633
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_pp.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_PP_H__
+#define __LIMA_PP_H__
+
+struct lima_ip;
+struct lima_device;
+
+int lima_pp_init(struct lima_ip *ip);
+void lima_pp_fini(struct lima_ip *ip);
+
+int lima_pp_bcast_init(struct lima_ip *ip);
+void lima_pp_bcast_fini(struct lima_ip *ip);
+
+int lima_pp_pipe_init(struct lima_device *dev);
+void lima_pp_pipe_fini(struct lima_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_regs.h b/drivers/gpu/drm/lima/lima_regs.h
new file mode 100644
index 000000000000..ace8ecefbe90
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_regs.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2010-2017 ARM Limited. All rights reserved.
+ * Copyright 2017-2019 Qiang Yu <yuq825@gmail.com>
+ */
+
+#ifndef __LIMA_REGS_H__
+#define __LIMA_REGS_H__
+
+/* This file's register definition is collected from the
+ * official ARM Mali Utgard GPU kernel driver source code
+ */
+
+/* PMU regs */
+#define LIMA_PMU_POWER_UP 0x00
+#define LIMA_PMU_POWER_DOWN 0x04
+#define LIMA_PMU_POWER_GP0_MASK BIT(0)
+#define LIMA_PMU_POWER_L2_MASK BIT(1)
+#define LIMA_PMU_POWER_PP_MASK(i) BIT(2 + i)
+
+/*
+ * On Mali450 each block automatically starts up its corresponding L2
+ * and the PPs are not fully independent controllable.
+ * Instead PP0, PP1-3 and PP4-7 can be turned on or off.
+ */
+#define LIMA450_PMU_POWER_PP0_MASK BIT(1)
+#define LIMA450_PMU_POWER_PP13_MASK BIT(2)
+#define LIMA450_PMU_POWER_PP47_MASK BIT(3)
+
+#define LIMA_PMU_STATUS 0x08
+#define LIMA_PMU_INT_MASK 0x0C
+#define LIMA_PMU_INT_RAWSTAT 0x10
+#define LIMA_PMU_INT_CLEAR 0x18
+#define LIMA_PMU_INT_CMD_MASK BIT(0)
+#define LIMA_PMU_SW_DELAY 0x1C
+
+/* L2 cache regs */
+#define LIMA_L2_CACHE_SIZE 0x0004
+#define LIMA_L2_CACHE_STATUS 0x0008
+#define LIMA_L2_CACHE_STATUS_COMMAND_BUSY BIT(0)
+#define LIMA_L2_CACHE_STATUS_DATA_BUSY BIT(1)
+#define LIMA_L2_CACHE_COMMAND 0x0010
+#define LIMA_L2_CACHE_COMMAND_CLEAR_ALL BIT(0)
+#define LIMA_L2_CACHE_CLEAR_PAGE 0x0014
+#define LIMA_L2_CACHE_MAX_READS 0x0018
+#define LIMA_L2_CACHE_ENABLE 0x001C
+#define LIMA_L2_CACHE_ENABLE_ACCESS BIT(0)
+#define LIMA_L2_CACHE_ENABLE_READ_ALLOCATE BIT(1)
+#define LIMA_L2_CACHE_PERFCNT_SRC0 0x0020
+#define LIMA_L2_CACHE_PERFCNT_VAL0 0x0024
+#define LIMA_L2_CACHE_PERFCNT_SRC1 0x0028
+#define LIMA_L2_CACHE_ERFCNT_VAL1 0x002C
+
+/* GP regs */
+#define LIMA_GP_VSCL_START_ADDR 0x00
+#define LIMA_GP_VSCL_END_ADDR 0x04
+#define LIMA_GP_PLBUCL_START_ADDR 0x08
+#define LIMA_GP_PLBUCL_END_ADDR 0x0c
+#define LIMA_GP_PLBU_ALLOC_START_ADDR 0x10
+#define LIMA_GP_PLBU_ALLOC_END_ADDR 0x14
+#define LIMA_GP_CMD 0x20
+#define LIMA_GP_CMD_START_VS BIT(0)
+#define LIMA_GP_CMD_START_PLBU BIT(1)
+#define LIMA_GP_CMD_UPDATE_PLBU_ALLOC BIT(4)
+#define LIMA_GP_CMD_RESET BIT(5)
+#define LIMA_GP_CMD_FORCE_HANG BIT(6)
+#define LIMA_GP_CMD_STOP_BUS BIT(9)
+#define LIMA_GP_CMD_SOFT_RESET BIT(10)
+#define LIMA_GP_INT_RAWSTAT 0x24
+#define LIMA_GP_INT_CLEAR 0x28
+#define LIMA_GP_INT_MASK 0x2C
+#define LIMA_GP_INT_STAT 0x30
+#define LIMA_GP_IRQ_VS_END_CMD_LST BIT(0)
+#define LIMA_GP_IRQ_PLBU_END_CMD_LST BIT(1)
+#define LIMA_GP_IRQ_PLBU_OUT_OF_MEM BIT(2)
+#define LIMA_GP_IRQ_VS_SEM_IRQ BIT(3)
+#define LIMA_GP_IRQ_PLBU_SEM_IRQ BIT(4)
+#define LIMA_GP_IRQ_HANG BIT(5)
+#define LIMA_GP_IRQ_FORCE_HANG BIT(6)
+#define LIMA_GP_IRQ_PERF_CNT_0_LIMIT BIT(7)
+#define LIMA_GP_IRQ_PERF_CNT_1_LIMIT BIT(8)
+#define LIMA_GP_IRQ_WRITE_BOUND_ERR BIT(9)
+#define LIMA_GP_IRQ_SYNC_ERROR BIT(10)
+#define LIMA_GP_IRQ_AXI_BUS_ERROR BIT(11)
+#define LIMA_GP_IRQ_AXI_BUS_STOPPED BIT(12)
+#define LIMA_GP_IRQ_VS_INVALID_CMD BIT(13)
+#define LIMA_GP_IRQ_PLB_INVALID_CMD BIT(14)
+#define LIMA_GP_IRQ_RESET_COMPLETED BIT(19)
+#define LIMA_GP_IRQ_SEMAPHORE_UNDERFLOW BIT(20)
+#define LIMA_GP_IRQ_SEMAPHORE_OVERFLOW BIT(21)
+#define LIMA_GP_IRQ_PTR_ARRAY_OUT_OF_BOUNDS BIT(22)
+#define LIMA_GP_WRITE_BOUND_LOW 0x34
+#define LIMA_GP_PERF_CNT_0_ENABLE 0x3C
+#define LIMA_GP_PERF_CNT_1_ENABLE 0x40
+#define LIMA_GP_PERF_CNT_0_SRC 0x44
+#define LIMA_GP_PERF_CNT_1_SRC 0x48
+#define LIMA_GP_PERF_CNT_0_VALUE 0x4C
+#define LIMA_GP_PERF_CNT_1_VALUE 0x50
+#define LIMA_GP_PERF_CNT_0_LIMIT 0x54
+#define LIMA_GP_STATUS 0x68
+#define LIMA_GP_STATUS_VS_ACTIVE BIT(1)
+#define LIMA_GP_STATUS_BUS_STOPPED BIT(2)
+#define LIMA_GP_STATUS_PLBU_ACTIVE BIT(3)
+#define LIMA_GP_STATUS_BUS_ERROR BIT(6)
+#define LIMA_GP_STATUS_WRITE_BOUND_ERR BIT(8)
+#define LIMA_GP_VERSION 0x6C
+#define LIMA_GP_VSCL_START_ADDR_READ 0x80
+#define LIMA_GP_PLBCL_START_ADDR_READ 0x84
+#define LIMA_GP_CONTR_AXI_BUS_ERROR_STAT 0x94
+
+#define LIMA_GP_IRQ_MASK_ALL \
+ ( \
+ LIMA_GP_IRQ_VS_END_CMD_LST | \
+ LIMA_GP_IRQ_PLBU_END_CMD_LST | \
+ LIMA_GP_IRQ_PLBU_OUT_OF_MEM | \
+ LIMA_GP_IRQ_VS_SEM_IRQ | \
+ LIMA_GP_IRQ_PLBU_SEM_IRQ | \
+ LIMA_GP_IRQ_HANG | \
+ LIMA_GP_IRQ_FORCE_HANG | \
+ LIMA_GP_IRQ_PERF_CNT_0_LIMIT | \
+ LIMA_GP_IRQ_PERF_CNT_1_LIMIT | \
+ LIMA_GP_IRQ_WRITE_BOUND_ERR | \
+ LIMA_GP_IRQ_SYNC_ERROR | \
+ LIMA_GP_IRQ_AXI_BUS_ERROR | \
+ LIMA_GP_IRQ_AXI_BUS_STOPPED | \
+ LIMA_GP_IRQ_VS_INVALID_CMD | \
+ LIMA_GP_IRQ_PLB_INVALID_CMD | \
+ LIMA_GP_IRQ_RESET_COMPLETED | \
+ LIMA_GP_IRQ_SEMAPHORE_UNDERFLOW | \
+ LIMA_GP_IRQ_SEMAPHORE_OVERFLOW | \
+ LIMA_GP_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+
+#define LIMA_GP_IRQ_MASK_ERROR \
+ ( \
+ LIMA_GP_IRQ_PLBU_OUT_OF_MEM | \
+ LIMA_GP_IRQ_FORCE_HANG | \
+ LIMA_GP_IRQ_WRITE_BOUND_ERR | \
+ LIMA_GP_IRQ_SYNC_ERROR | \
+ LIMA_GP_IRQ_AXI_BUS_ERROR | \
+ LIMA_GP_IRQ_VS_INVALID_CMD | \
+ LIMA_GP_IRQ_PLB_INVALID_CMD | \
+ LIMA_GP_IRQ_SEMAPHORE_UNDERFLOW | \
+ LIMA_GP_IRQ_SEMAPHORE_OVERFLOW | \
+ LIMA_GP_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+
+#define LIMA_GP_IRQ_MASK_USED \
+ ( \
+ LIMA_GP_IRQ_VS_END_CMD_LST | \
+ LIMA_GP_IRQ_PLBU_END_CMD_LST | \
+ LIMA_GP_IRQ_MASK_ERROR)
+
+/* PP regs */
+#define LIMA_PP_FRAME 0x0000
+#define LIMA_PP_RSW 0x0004
+#define LIMA_PP_STACK 0x0030
+#define LIMA_PP_STACK_SIZE 0x0034
+#define LIMA_PP_ORIGIN_OFFSET_X 0x0040
+#define LIMA_PP_WB(i) (0x0100 * (i + 1))
+#define LIMA_PP_WB_SOURCE_SELECT 0x0000
+#define LIMA_PP_WB_SOURCE_ADDR 0x0004
+
+#define LIMA_PP_VERSION 0x1000
+#define LIMA_PP_CURRENT_REND_LIST_ADDR 0x1004
+#define LIMA_PP_STATUS 0x1008
+#define LIMA_PP_STATUS_RENDERING_ACTIVE BIT(0)
+#define LIMA_PP_STATUS_BUS_STOPPED BIT(4)
+#define LIMA_PP_CTRL 0x100c
+#define LIMA_PP_CTRL_STOP_BUS BIT(0)
+#define LIMA_PP_CTRL_FLUSH_CACHES BIT(3)
+#define LIMA_PP_CTRL_FORCE_RESET BIT(5)
+#define LIMA_PP_CTRL_START_RENDERING BIT(6)
+#define LIMA_PP_CTRL_SOFT_RESET BIT(7)
+#define LIMA_PP_INT_RAWSTAT 0x1020
+#define LIMA_PP_INT_CLEAR 0x1024
+#define LIMA_PP_INT_MASK 0x1028
+#define LIMA_PP_INT_STATUS 0x102c
+#define LIMA_PP_IRQ_END_OF_FRAME BIT(0)
+#define LIMA_PP_IRQ_END_OF_TILE BIT(1)
+#define LIMA_PP_IRQ_HANG BIT(2)
+#define LIMA_PP_IRQ_FORCE_HANG BIT(3)
+#define LIMA_PP_IRQ_BUS_ERROR BIT(4)
+#define LIMA_PP_IRQ_BUS_STOP BIT(5)
+#define LIMA_PP_IRQ_CNT_0_LIMIT BIT(6)
+#define LIMA_PP_IRQ_CNT_1_LIMIT BIT(7)
+#define LIMA_PP_IRQ_WRITE_BOUNDARY_ERROR BIT(8)
+#define LIMA_PP_IRQ_INVALID_PLIST_COMMAND BIT(9)
+#define LIMA_PP_IRQ_CALL_STACK_UNDERFLOW BIT(10)
+#define LIMA_PP_IRQ_CALL_STACK_OVERFLOW BIT(11)
+#define LIMA_PP_IRQ_RESET_COMPLETED BIT(12)
+#define LIMA_PP_WRITE_BOUNDARY_LOW 0x1044
+#define LIMA_PP_BUS_ERROR_STATUS 0x1050
+#define LIMA_PP_PERF_CNT_0_ENABLE 0x1080
+#define LIMA_PP_PERF_CNT_0_SRC 0x1084
+#define LIMA_PP_PERF_CNT_0_LIMIT 0x1088
+#define LIMA_PP_PERF_CNT_0_VALUE 0x108c
+#define LIMA_PP_PERF_CNT_1_ENABLE 0x10a0
+#define LIMA_PP_PERF_CNT_1_SRC 0x10a4
+#define LIMA_PP_PERF_CNT_1_LIMIT 0x10a8
+#define LIMA_PP_PERF_CNT_1_VALUE 0x10ac
+#define LIMA_PP_PERFMON_CONTR 0x10b0
+#define LIMA_PP_PERFMON_BASE 0x10b4
+
+#define LIMA_PP_IRQ_MASK_ALL \
+ ( \
+ LIMA_PP_IRQ_END_OF_FRAME | \
+ LIMA_PP_IRQ_END_OF_TILE | \
+ LIMA_PP_IRQ_HANG | \
+ LIMA_PP_IRQ_FORCE_HANG | \
+ LIMA_PP_IRQ_BUS_ERROR | \
+ LIMA_PP_IRQ_BUS_STOP | \
+ LIMA_PP_IRQ_CNT_0_LIMIT | \
+ LIMA_PP_IRQ_CNT_1_LIMIT | \
+ LIMA_PP_IRQ_WRITE_BOUNDARY_ERROR | \
+ LIMA_PP_IRQ_INVALID_PLIST_COMMAND | \
+ LIMA_PP_IRQ_CALL_STACK_UNDERFLOW | \
+ LIMA_PP_IRQ_CALL_STACK_OVERFLOW | \
+ LIMA_PP_IRQ_RESET_COMPLETED)
+
+#define LIMA_PP_IRQ_MASK_ERROR \
+ ( \
+ LIMA_PP_IRQ_FORCE_HANG | \
+ LIMA_PP_IRQ_BUS_ERROR | \
+ LIMA_PP_IRQ_WRITE_BOUNDARY_ERROR | \
+ LIMA_PP_IRQ_INVALID_PLIST_COMMAND | \
+ LIMA_PP_IRQ_CALL_STACK_UNDERFLOW | \
+ LIMA_PP_IRQ_CALL_STACK_OVERFLOW)
+
+#define LIMA_PP_IRQ_MASK_USED \
+ ( \
+ LIMA_PP_IRQ_END_OF_FRAME | \
+ LIMA_PP_IRQ_MASK_ERROR)
+
+/* MMU regs */
+#define LIMA_MMU_DTE_ADDR 0x0000
+#define LIMA_MMU_STATUS 0x0004
+#define LIMA_MMU_STATUS_PAGING_ENABLED BIT(0)
+#define LIMA_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
+#define LIMA_MMU_STATUS_STALL_ACTIVE BIT(2)
+#define LIMA_MMU_STATUS_IDLE BIT(3)
+#define LIMA_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
+#define LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
+#define LIMA_MMU_STATUS_BUS_ID(x) ((x >> 6) & 0x1F)
+#define LIMA_MMU_COMMAND 0x0008
+#define LIMA_MMU_COMMAND_ENABLE_PAGING 0x00
+#define LIMA_MMU_COMMAND_DISABLE_PAGING 0x01
+#define LIMA_MMU_COMMAND_ENABLE_STALL 0x02
+#define LIMA_MMU_COMMAND_DISABLE_STALL 0x03
+#define LIMA_MMU_COMMAND_ZAP_CACHE 0x04
+#define LIMA_MMU_COMMAND_PAGE_FAULT_DONE 0x05
+#define LIMA_MMU_COMMAND_HARD_RESET 0x06
+#define LIMA_MMU_PAGE_FAULT_ADDR 0x000C
+#define LIMA_MMU_ZAP_ONE_LINE 0x0010
+#define LIMA_MMU_INT_RAWSTAT 0x0014
+#define LIMA_MMU_INT_CLEAR 0x0018
+#define LIMA_MMU_INT_MASK 0x001C
+#define LIMA_MMU_INT_PAGE_FAULT BIT(0)
+#define LIMA_MMU_INT_READ_BUS_ERROR BIT(1)
+#define LIMA_MMU_INT_STATUS 0x0020
+
+#define LIMA_VM_FLAG_PRESENT BIT(0)
+#define LIMA_VM_FLAG_READ_PERMISSION BIT(1)
+#define LIMA_VM_FLAG_WRITE_PERMISSION BIT(2)
+#define LIMA_VM_FLAG_OVERRIDE_CACHE BIT(3)
+#define LIMA_VM_FLAG_WRITE_CACHEABLE BIT(4)
+#define LIMA_VM_FLAG_WRITE_ALLOCATE BIT(5)
+#define LIMA_VM_FLAG_WRITE_BUFFERABLE BIT(6)
+#define LIMA_VM_FLAG_READ_CACHEABLE BIT(7)
+#define LIMA_VM_FLAG_READ_ALLOCATE BIT(8)
+#define LIMA_VM_FLAG_MASK 0x1FF
+
+#define LIMA_VM_FLAGS_CACHE ( \
+ LIMA_VM_FLAG_PRESENT | \
+ LIMA_VM_FLAG_READ_PERMISSION | \
+ LIMA_VM_FLAG_WRITE_PERMISSION | \
+ LIMA_VM_FLAG_OVERRIDE_CACHE | \
+ LIMA_VM_FLAG_WRITE_CACHEABLE | \
+ LIMA_VM_FLAG_WRITE_BUFFERABLE | \
+ LIMA_VM_FLAG_READ_CACHEABLE | \
+ LIMA_VM_FLAG_READ_ALLOCATE)
+
+#define LIMA_VM_FLAGS_UNCACHE ( \
+ LIMA_VM_FLAG_PRESENT | \
+ LIMA_VM_FLAG_READ_PERMISSION | \
+ LIMA_VM_FLAG_WRITE_PERMISSION)
+
+/* DLBU regs */
+#define LIMA_DLBU_MASTER_TLLIST_PHYS_ADDR 0x0000
+#define LIMA_DLBU_MASTER_TLLIST_VADDR 0x0004
+#define LIMA_DLBU_TLLIST_VBASEADDR 0x0008
+#define LIMA_DLBU_FB_DIM 0x000C
+#define LIMA_DLBU_TLLIST_CONF 0x0010
+#define LIMA_DLBU_START_TILE_POS 0x0014
+#define LIMA_DLBU_PP_ENABLE_MASK 0x0018
+
+/* BCAST regs */
+#define LIMA_BCAST_BROADCAST_MASK 0x0
+#define LIMA_BCAST_INTERRUPT_MASK 0x4
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
new file mode 100644
index 000000000000..d53bd45f8d96
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+
+#include "lima_drv.h"
+#include "lima_sched.h"
+#include "lima_vm.h"
+#include "lima_mmu.h"
+#include "lima_l2_cache.h"
+#include "lima_object.h"
+
+struct lima_fence {
+ struct dma_fence base;
+ struct lima_sched_pipe *pipe;
+};
+
+static struct kmem_cache *lima_fence_slab;
+static int lima_fence_slab_refcnt;
+
+int lima_sched_slab_init(void)
+{
+ if (!lima_fence_slab) {
+ lima_fence_slab = kmem_cache_create(
+ "lima_fence", sizeof(struct lima_fence), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!lima_fence_slab)
+ return -ENOMEM;
+ }
+
+ lima_fence_slab_refcnt++;
+ return 0;
+}
+
+void lima_sched_slab_fini(void)
+{
+ if (!--lima_fence_slab_refcnt) {
+ kmem_cache_destroy(lima_fence_slab);
+ lima_fence_slab = NULL;
+ }
+}
+
+static inline struct lima_fence *to_lima_fence(struct dma_fence *fence)
+{
+ return container_of(fence, struct lima_fence, base);
+}
+
+static const char *lima_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "lima";
+}
+
+static const char *lima_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct lima_fence *f = to_lima_fence(fence);
+
+ return f->pipe->base.name;
+}
+
+static void lima_fence_release_rcu(struct rcu_head *rcu)
+{
+ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
+ struct lima_fence *fence = to_lima_fence(f);
+
+ kmem_cache_free(lima_fence_slab, fence);
+}
+
+static void lima_fence_release(struct dma_fence *fence)
+{
+ struct lima_fence *f = to_lima_fence(fence);
+
+ call_rcu(&f->base.rcu, lima_fence_release_rcu);
+}
+
+static const struct dma_fence_ops lima_fence_ops = {
+ .get_driver_name = lima_fence_get_driver_name,
+ .get_timeline_name = lima_fence_get_timeline_name,
+ .release = lima_fence_release,
+};
+
+static struct lima_fence *lima_fence_create(struct lima_sched_pipe *pipe)
+{
+ struct lima_fence *fence;
+
+ fence = kmem_cache_zalloc(lima_fence_slab, GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ fence->pipe = pipe;
+ dma_fence_init(&fence->base, &lima_fence_ops, &pipe->fence_lock,
+ pipe->fence_context, ++pipe->fence_seqno);
+
+ return fence;
+}
+
+static inline struct lima_sched_task *to_lima_task(struct drm_sched_job *job)
+{
+ return container_of(job, struct lima_sched_task, base);
+}
+
+static inline struct lima_sched_pipe *to_lima_pipe(struct drm_gpu_scheduler *sched)
+{
+ return container_of(sched, struct lima_sched_pipe, base);
+}
+
+int lima_sched_task_init(struct lima_sched_task *task,
+ struct lima_sched_context *context,
+ struct lima_bo **bos, int num_bos,
+ struct lima_vm *vm)
+{
+ int err, i;
+
+ task->bos = kmemdup(bos, sizeof(*bos) * num_bos, GFP_KERNEL);
+ if (!task->bos)
+ return -ENOMEM;
+
+ for (i = 0; i < num_bos; i++)
+ drm_gem_object_get(&bos[i]->gem);
+
+ err = drm_sched_job_init(&task->base, &context->base, vm);
+ if (err) {
+ kfree(task->bos);
+ return err;
+ }
+
+ task->num_bos = num_bos;
+ task->vm = lima_vm_get(vm);
+
+ xa_init_flags(&task->deps, XA_FLAGS_ALLOC);
+
+ return 0;
+}
+
+void lima_sched_task_fini(struct lima_sched_task *task)
+{
+ struct dma_fence *fence;
+ unsigned long index;
+ int i;
+
+ drm_sched_job_cleanup(&task->base);
+
+ xa_for_each(&task->deps, index, fence) {
+ dma_fence_put(fence);
+ }
+ xa_destroy(&task->deps);
+
+ if (task->bos) {
+ for (i = 0; i < task->num_bos; i++)
+ drm_gem_object_put_unlocked(&task->bos[i]->gem);
+ kfree(task->bos);
+ }
+
+ lima_vm_put(task->vm);
+}
+
+int lima_sched_context_init(struct lima_sched_pipe *pipe,
+ struct lima_sched_context *context,
+ atomic_t *guilty)
+{
+ struct drm_sched_rq *rq = pipe->base.sched_rq + DRM_SCHED_PRIORITY_NORMAL;
+
+ return drm_sched_entity_init(&context->base, &rq, 1, guilty);
+}
+
+void lima_sched_context_fini(struct lima_sched_pipe *pipe,
+ struct lima_sched_context *context)
+{
+ drm_sched_entity_fini(&context->base);
+}
+
+struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
+ struct lima_sched_task *task)
+{
+ struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished);
+
+ drm_sched_entity_push_job(&task->base, &context->base);
+ return fence;
+}
+
+static struct dma_fence *lima_sched_dependency(struct drm_sched_job *job,
+ struct drm_sched_entity *entity)
+{
+ struct lima_sched_task *task = to_lima_task(job);
+
+ if (!xa_empty(&task->deps))
+ return xa_erase(&task->deps, task->last_dep++);
+
+ return NULL;
+}
+
+static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
+{
+ struct lima_sched_task *task = to_lima_task(job);
+ struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
+ struct lima_fence *fence;
+ struct dma_fence *ret;
+ struct lima_vm *vm = NULL, *last_vm = NULL;
+ int i;
+
+ /* after GPU reset */
+ if (job->s_fence->finished.error < 0)
+ return NULL;
+
+ fence = lima_fence_create(pipe);
+ if (!fence)
+ return NULL;
+ task->fence = &fence->base;
+
+ /* for caller usage of the fence, otherwise irq handler
+ * may consume the fence before caller use it
+ */
+ ret = dma_fence_get(task->fence);
+
+ pipe->current_task = task;
+
+ /* this is needed for MMU to work correctly, otherwise GP/PP
+ * will hang or page fault for unknown reason after running for
+ * a while.
+ *
+ * Need to investigate:
+ * 1. is it related to TLB
+ * 2. how much performance will be affected by L2 cache flush
+ * 3. can we reduce the calling of this function because all
+ * GP/PP use the same L2 cache on mali400
+ *
+ * TODO:
+ * 1. move this to task fini to save some wait time?
+ * 2. when GP/PP use different l2 cache, need PP wait GP l2
+ * cache flush?
+ */
+ for (i = 0; i < pipe->num_l2_cache; i++)
+ lima_l2_cache_flush(pipe->l2_cache[i]);
+
+ if (task->vm != pipe->current_vm) {
+ vm = lima_vm_get(task->vm);
+ last_vm = pipe->current_vm;
+ pipe->current_vm = task->vm;
+ }
+
+ if (pipe->bcast_mmu)
+ lima_mmu_switch_vm(pipe->bcast_mmu, vm);
+ else {
+ for (i = 0; i < pipe->num_mmu; i++)
+ lima_mmu_switch_vm(pipe->mmu[i], vm);
+ }
+
+ if (last_vm)
+ lima_vm_put(last_vm);
+
+ pipe->error = false;
+ pipe->task_run(pipe, task);
+
+ return task->fence;
+}
+
+static void lima_sched_handle_error_task(struct lima_sched_pipe *pipe,
+ struct lima_sched_task *task)
+{
+ drm_sched_stop(&pipe->base);
+
+ if (task)
+ drm_sched_increase_karma(&task->base);
+
+ pipe->task_error(pipe);
+
+ if (pipe->bcast_mmu)
+ lima_mmu_page_fault_resume(pipe->bcast_mmu);
+ else {
+ int i;
+
+ for (i = 0; i < pipe->num_mmu; i++)
+ lima_mmu_page_fault_resume(pipe->mmu[i]);
+ }
+
+ if (pipe->current_vm)
+ lima_vm_put(pipe->current_vm);
+
+ pipe->current_vm = NULL;
+ pipe->current_task = NULL;
+
+ drm_sched_resubmit_jobs(&pipe->base);
+ drm_sched_start(&pipe->base, true);
+}
+
+static void lima_sched_timedout_job(struct drm_sched_job *job)
+{
+ struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
+ struct lima_sched_task *task = to_lima_task(job);
+
+ DRM_ERROR("lima job timeout\n");
+
+ lima_sched_handle_error_task(pipe, task);
+}
+
+static void lima_sched_free_job(struct drm_sched_job *job)
+{
+ struct lima_sched_task *task = to_lima_task(job);
+ struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
+ struct lima_vm *vm = task->vm;
+ struct lima_bo **bos = task->bos;
+ int i;
+
+ dma_fence_put(task->fence);
+
+ for (i = 0; i < task->num_bos; i++)
+ lima_vm_bo_del(vm, bos[i]);
+
+ lima_sched_task_fini(task);
+ kmem_cache_free(pipe->task_slab, task);
+}
+
+static const struct drm_sched_backend_ops lima_sched_ops = {
+ .dependency = lima_sched_dependency,
+ .run_job = lima_sched_run_job,
+ .timedout_job = lima_sched_timedout_job,
+ .free_job = lima_sched_free_job,
+};
+
+static void lima_sched_error_work(struct work_struct *work)
+{
+ struct lima_sched_pipe *pipe =
+ container_of(work, struct lima_sched_pipe, error_work);
+ struct lima_sched_task *task = pipe->current_task;
+
+ lima_sched_handle_error_task(pipe, task);
+}
+
+int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
+{
+ long timeout;
+
+ if (lima_sched_timeout_ms <= 0)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ else
+ timeout = msecs_to_jiffies(lima_sched_timeout_ms);
+
+ pipe->fence_context = dma_fence_context_alloc(1);
+ spin_lock_init(&pipe->fence_lock);
+
+ INIT_WORK(&pipe->error_work, lima_sched_error_work);
+
+ return drm_sched_init(&pipe->base, &lima_sched_ops, 1, 0, timeout, name);
+}
+
+void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
+{
+ drm_sched_fini(&pipe->base);
+}
+
+void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
+{
+ if (pipe->error)
+ schedule_work(&pipe->error_work);
+ else {
+ struct lima_sched_task *task = pipe->current_task;
+
+ pipe->task_fini(pipe);
+ dma_fence_signal(task->fence);
+ }
+}
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
new file mode 100644
index 000000000000..928af91c1118
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_SCHED_H__
+#define __LIMA_SCHED_H__
+
+#include <drm/gpu_scheduler.h>
+
+struct lima_vm;
+
+struct lima_sched_task {
+ struct drm_sched_job base;
+
+ struct lima_vm *vm;
+ void *frame;
+
+ struct xarray deps;
+ unsigned long last_dep;
+
+ struct lima_bo **bos;
+ int num_bos;
+
+ /* pipe fence */
+ struct dma_fence *fence;
+};
+
+struct lima_sched_context {
+ struct drm_sched_entity base;
+};
+
+#define LIMA_SCHED_PIPE_MAX_MMU 8
+#define LIMA_SCHED_PIPE_MAX_L2_CACHE 2
+#define LIMA_SCHED_PIPE_MAX_PROCESSOR 8
+
+struct lima_ip;
+
+struct lima_sched_pipe {
+ struct drm_gpu_scheduler base;
+
+ u64 fence_context;
+ u32 fence_seqno;
+ spinlock_t fence_lock;
+
+ struct lima_sched_task *current_task;
+ struct lima_vm *current_vm;
+
+ struct lima_ip *mmu[LIMA_SCHED_PIPE_MAX_MMU];
+ int num_mmu;
+
+ struct lima_ip *l2_cache[LIMA_SCHED_PIPE_MAX_L2_CACHE];
+ int num_l2_cache;
+
+ struct lima_ip *processor[LIMA_SCHED_PIPE_MAX_PROCESSOR];
+ int num_processor;
+
+ struct lima_ip *bcast_processor;
+ struct lima_ip *bcast_mmu;
+
+ u32 done;
+ bool error;
+ atomic_t task;
+
+ int frame_size;
+ struct kmem_cache *task_slab;
+
+ int (*task_validate)(struct lima_sched_pipe *pipe, struct lima_sched_task *task);
+ void (*task_run)(struct lima_sched_pipe *pipe, struct lima_sched_task *task);
+ void (*task_fini)(struct lima_sched_pipe *pipe);
+ void (*task_error)(struct lima_sched_pipe *pipe);
+ void (*task_mmu_error)(struct lima_sched_pipe *pipe);
+
+ struct work_struct error_work;
+};
+
+int lima_sched_task_init(struct lima_sched_task *task,
+ struct lima_sched_context *context,
+ struct lima_bo **bos, int num_bos,
+ struct lima_vm *vm);
+void lima_sched_task_fini(struct lima_sched_task *task);
+
+int lima_sched_context_init(struct lima_sched_pipe *pipe,
+ struct lima_sched_context *context,
+ atomic_t *guilty);
+void lima_sched_context_fini(struct lima_sched_pipe *pipe,
+ struct lima_sched_context *context);
+struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
+ struct lima_sched_task *task);
+
+int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name);
+void lima_sched_pipe_fini(struct lima_sched_pipe *pipe);
+void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe);
+
+static inline void lima_sched_pipe_mmu_error(struct lima_sched_pipe *pipe)
+{
+ pipe->error = true;
+ pipe->task_mmu_error(pipe);
+}
+
+int lima_sched_slab_init(void);
+void lima_sched_slab_fini(void);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c
new file mode 100644
index 000000000000..19e88ca16527
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_vm.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include "lima_device.h"
+#include "lima_vm.h"
+#include "lima_object.h"
+#include "lima_regs.h"
+
+struct lima_bo_va {
+ struct list_head list;
+ unsigned int ref_count;
+
+ struct drm_mm_node node;
+
+ struct lima_vm *vm;
+};
+
+#define LIMA_VM_PD_SHIFT 22
+#define LIMA_VM_PT_SHIFT 12
+#define LIMA_VM_PB_SHIFT (LIMA_VM_PD_SHIFT + LIMA_VM_NUM_PT_PER_BT_SHIFT)
+#define LIMA_VM_BT_SHIFT LIMA_VM_PT_SHIFT
+
+#define LIMA_VM_PT_MASK ((1 << LIMA_VM_PD_SHIFT) - 1)
+#define LIMA_VM_BT_MASK ((1 << LIMA_VM_PB_SHIFT) - 1)
+
+#define LIMA_PDE(va) (va >> LIMA_VM_PD_SHIFT)
+#define LIMA_PTE(va) ((va & LIMA_VM_PT_MASK) >> LIMA_VM_PT_SHIFT)
+#define LIMA_PBE(va) (va >> LIMA_VM_PB_SHIFT)
+#define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT)
+
+
+static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end)
+{
+ u32 addr;
+
+ for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
+ u32 pbe = LIMA_PBE(addr);
+ u32 bte = LIMA_BTE(addr);
+
+ vm->bts[pbe].cpu[bte] = 0;
+ }
+}
+
+static int lima_vm_map_page_table(struct lima_vm *vm, dma_addr_t *dma,
+ u32 start, u32 end)
+{
+ u64 addr;
+ int i = 0;
+
+ for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
+ u32 pbe = LIMA_PBE(addr);
+ u32 bte = LIMA_BTE(addr);
+
+ if (!vm->bts[pbe].cpu) {
+ dma_addr_t pts;
+ u32 *pd;
+ int j;
+
+ vm->bts[pbe].cpu = dma_alloc_wc(
+ vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
+ &vm->bts[pbe].dma, GFP_KERNEL | __GFP_ZERO);
+ if (!vm->bts[pbe].cpu) {
+ if (addr != start)
+ lima_vm_unmap_page_table(vm, start, addr - 1);
+ return -ENOMEM;
+ }
+
+ pts = vm->bts[pbe].dma;
+ pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT);
+ for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
+ pd[j] = pts | LIMA_VM_FLAG_PRESENT;
+ pts += LIMA_PAGE_SIZE;
+ }
+ }
+
+ vm->bts[pbe].cpu[bte] = dma[i++] | LIMA_VM_FLAGS_CACHE;
+ }
+
+ return 0;
+}
+
+static struct lima_bo_va *
+lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo)
+{
+ struct lima_bo_va *bo_va, *ret = NULL;
+
+ list_for_each_entry(bo_va, &bo->va, list) {
+ if (bo_va->vm == vm) {
+ ret = bo_va;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
+{
+ struct lima_bo_va *bo_va;
+ int err;
+
+ mutex_lock(&bo->lock);
+
+ bo_va = lima_vm_bo_find(vm, bo);
+ if (bo_va) {
+ bo_va->ref_count++;
+ mutex_unlock(&bo->lock);
+ return 0;
+ }
+
+ /* should not create new bo_va if not asked by caller */
+ if (!create) {
+ mutex_unlock(&bo->lock);
+ return -ENOENT;
+ }
+
+ bo_va = kzalloc(sizeof(*bo_va), GFP_KERNEL);
+ if (!bo_va) {
+ err = -ENOMEM;
+ goto err_out0;
+ }
+
+ bo_va->vm = vm;
+ bo_va->ref_count = 1;
+
+ mutex_lock(&vm->lock);
+
+ err = drm_mm_insert_node(&vm->mm, &bo_va->node, bo->gem.size);
+ if (err)
+ goto err_out1;
+
+ err = lima_vm_map_page_table(vm, bo->pages_dma_addr, bo_va->node.start,
+ bo_va->node.start + bo_va->node.size - 1);
+ if (err)
+ goto err_out2;
+
+ mutex_unlock(&vm->lock);
+
+ list_add_tail(&bo_va->list, &bo->va);
+
+ mutex_unlock(&bo->lock);
+ return 0;
+
+err_out2:
+ drm_mm_remove_node(&bo_va->node);
+err_out1:
+ mutex_unlock(&vm->lock);
+ kfree(bo_va);
+err_out0:
+ mutex_unlock(&bo->lock);
+ return err;
+}
+
+void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
+{
+ struct lima_bo_va *bo_va;
+
+ mutex_lock(&bo->lock);
+
+ bo_va = lima_vm_bo_find(vm, bo);
+ if (--bo_va->ref_count > 0) {
+ mutex_unlock(&bo->lock);
+ return;
+ }
+
+ mutex_lock(&vm->lock);
+
+ lima_vm_unmap_page_table(vm, bo_va->node.start,
+ bo_va->node.start + bo_va->node.size - 1);
+
+ drm_mm_remove_node(&bo_va->node);
+
+ mutex_unlock(&vm->lock);
+
+ list_del(&bo_va->list);
+
+ mutex_unlock(&bo->lock);
+
+ kfree(bo_va);
+}
+
+u32 lima_vm_get_va(struct lima_vm *vm, struct lima_bo *bo)
+{
+ struct lima_bo_va *bo_va;
+ u32 ret;
+
+ mutex_lock(&bo->lock);
+
+ bo_va = lima_vm_bo_find(vm, bo);
+ ret = bo_va->node.start;
+
+ mutex_unlock(&bo->lock);
+
+ return ret;
+}
+
+struct lima_vm *lima_vm_create(struct lima_device *dev)
+{
+ struct lima_vm *vm;
+
+ vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+ if (!vm)
+ return NULL;
+
+ vm->dev = dev;
+ mutex_init(&vm->lock);
+ kref_init(&vm->refcount);
+
+ vm->pd.cpu = dma_alloc_wc(dev->dev, LIMA_PAGE_SIZE, &vm->pd.dma,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!vm->pd.cpu)
+ goto err_out0;
+
+ if (dev->dlbu_cpu) {
+ int err = lima_vm_map_page_table(
+ vm, &dev->dlbu_dma, LIMA_VA_RESERVE_DLBU,
+ LIMA_VA_RESERVE_DLBU + LIMA_PAGE_SIZE - 1);
+ if (err)
+ goto err_out1;
+ }
+
+ drm_mm_init(&vm->mm, dev->va_start, dev->va_end - dev->va_start);
+
+ return vm;
+
+err_out1:
+ dma_free_wc(dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
+err_out0:
+ kfree(vm);
+ return NULL;
+}
+
+void lima_vm_release(struct kref *kref)
+{
+ struct lima_vm *vm = container_of(kref, struct lima_vm, refcount);
+ int i;
+
+ drm_mm_takedown(&vm->mm);
+
+ for (i = 0; i < LIMA_VM_NUM_BT; i++) {
+ if (vm->bts[i].cpu)
+ dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
+ vm->bts[i].cpu, vm->bts[i].dma);
+ }
+
+ if (vm->pd.cpu)
+ dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
+
+ kfree(vm);
+}
+
+void lima_vm_print(struct lima_vm *vm)
+{
+ int i, j, k;
+ u32 *pd, *pt;
+
+ if (!vm->pd.cpu)
+ return;
+
+ pd = vm->pd.cpu;
+ for (i = 0; i < LIMA_VM_NUM_BT; i++) {
+ if (!vm->bts[i].cpu)
+ continue;
+
+ pt = vm->bts[i].cpu;
+ for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
+ int idx = (i << LIMA_VM_NUM_PT_PER_BT_SHIFT) + j;
+
+ printk(KERN_INFO "lima vm pd %03x:%08x\n", idx, pd[idx]);
+
+ for (k = 0; k < LIMA_PAGE_ENT_NUM; k++) {
+ u32 pte = *pt++;
+
+ if (pte)
+ printk(KERN_INFO " pt %03x:%08x\n", k, pte);
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/lima/lima_vm.h b/drivers/gpu/drm/lima/lima_vm.h
new file mode 100644
index 000000000000..caee2f8a29b4
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_vm.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_VM_H__
+#define __LIMA_VM_H__
+
+#include <drm/drm_mm.h>
+#include <linux/kref.h>
+
+#define LIMA_PAGE_SIZE 4096
+#define LIMA_PAGE_MASK (LIMA_PAGE_SIZE - 1)
+#define LIMA_PAGE_ENT_NUM (LIMA_PAGE_SIZE / sizeof(u32))
+
+#define LIMA_VM_NUM_PT_PER_BT_SHIFT 3
+#define LIMA_VM_NUM_PT_PER_BT (1 << LIMA_VM_NUM_PT_PER_BT_SHIFT)
+#define LIMA_VM_NUM_BT (LIMA_PAGE_ENT_NUM >> LIMA_VM_NUM_PT_PER_BT_SHIFT)
+
+#define LIMA_VA_RESERVE_START 0xFFF00000
+#define LIMA_VA_RESERVE_DLBU LIMA_VA_RESERVE_START
+#define LIMA_VA_RESERVE_END 0x100000000
+
+struct lima_device;
+
+struct lima_vm_page {
+ u32 *cpu;
+ dma_addr_t dma;
+};
+
+struct lima_vm {
+ struct mutex lock;
+ struct kref refcount;
+
+ struct drm_mm mm;
+
+ struct lima_device *dev;
+
+ struct lima_vm_page pd;
+ struct lima_vm_page bts[LIMA_VM_NUM_BT];
+};
+
+int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create);
+void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo);
+
+u32 lima_vm_get_va(struct lima_vm *vm, struct lima_bo *bo);
+
+struct lima_vm *lima_vm_create(struct lima_device *dev);
+void lima_vm_release(struct kref *kref);
+
+static inline struct lima_vm *lima_vm_get(struct lima_vm *vm)
+{
+ kref_get(&vm->refcount);
+ return vm;
+}
+
+static inline void lima_vm_put(struct lima_vm *vm)
+{
+ kref_put(&vm->refcount, lima_vm_release);
+}
+
+void lima_vm_print(struct lima_vm *vm);
+
+#endif
diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile
index 7709f2fbb9f7..d4ea82fc493b 100644
--- a/drivers/gpu/drm/meson/Makefile
+++ b/drivers/gpu/drm/meson/Makefile
@@ -1,5 +1,5 @@
meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
-meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o meson_overlay.o
+meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_overlay.o
obj-$(CONFIG_DRM_MESON) += meson-drm.o
obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o
diff --git a/drivers/gpu/drm/meson/meson_canvas.c b/drivers/gpu/drm/meson/meson_canvas.c
deleted file mode 100644
index 5de11aa7c775..000000000000
--- a/drivers/gpu/drm/meson/meson_canvas.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (C) 2016 BayLibre, SAS
- * Author: Neil Armstrong <narmstrong@baylibre.com>
- * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
- * Copyright (C) 2014 Endless Mobile
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include "meson_drv.h"
-#include "meson_canvas.h"
-#include "meson_registers.h"
-
-/**
- * DOC: Canvas
- *
- * CANVAS is a memory zone where physical memory frames information
- * are stored for the VIU to scanout.
- */
-
-/* DMC Registers */
-#define DMC_CAV_LUT_DATAL 0x48 /* 0x12 offset in data sheet */
-#define CANVAS_WIDTH_LBIT 29
-#define CANVAS_WIDTH_LWID 3
-#define DMC_CAV_LUT_DATAH 0x4c /* 0x13 offset in data sheet */
-#define CANVAS_WIDTH_HBIT 0
-#define CANVAS_HEIGHT_BIT 9
-#define CANVAS_BLKMODE_BIT 24
-#define CANVAS_ENDIAN_BIT 26
-#define DMC_CAV_LUT_ADDR 0x50 /* 0x14 offset in data sheet */
-#define CANVAS_LUT_WR_EN (0x2 << 8)
-#define CANVAS_LUT_RD_EN (0x1 << 8)
-
-void meson_canvas_setup(struct meson_drm *priv,
- uint32_t canvas_index, uint32_t addr,
- uint32_t stride, uint32_t height,
- unsigned int wrap,
- unsigned int blkmode,
- unsigned int endian)
-{
- unsigned int val;
-
- regmap_write(priv->dmc, DMC_CAV_LUT_DATAL,
- (((addr + 7) >> 3)) |
- (((stride + 7) >> 3) << CANVAS_WIDTH_LBIT));
-
- regmap_write(priv->dmc, DMC_CAV_LUT_DATAH,
- ((((stride + 7) >> 3) >> CANVAS_WIDTH_LWID) <<
- CANVAS_WIDTH_HBIT) |
- (height << CANVAS_HEIGHT_BIT) |
- (wrap << 22) |
- (blkmode << CANVAS_BLKMODE_BIT) |
- (endian << CANVAS_ENDIAN_BIT));
-
- regmap_write(priv->dmc, DMC_CAV_LUT_ADDR,
- CANVAS_LUT_WR_EN | canvas_index);
-
- /* Force a read-back to make sure everything is flushed. */
- regmap_read(priv->dmc, DMC_CAV_LUT_DATAH, &val);
-}
diff --git a/drivers/gpu/drm/meson/meson_canvas.h b/drivers/gpu/drm/meson/meson_canvas.h
deleted file mode 100644
index 85dbf26e2826..000000000000
--- a/drivers/gpu/drm/meson/meson_canvas.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (C) 2016 BayLibre, SAS
- * Author: Neil Armstrong <narmstrong@baylibre.com>
- * Copyright (C) 2014 Endless Mobile
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* Canvas LUT Memory */
-
-#ifndef __MESON_CANVAS_H
-#define __MESON_CANVAS_H
-
-#define MESON_CANVAS_ID_OSD1 0x4e
-#define MESON_CANVAS_ID_VD1_0 0x60
-#define MESON_CANVAS_ID_VD1_1 0x61
-#define MESON_CANVAS_ID_VD1_2 0x62
-
-/* Canvas configuration. */
-#define MESON_CANVAS_WRAP_NONE 0x00
-#define MESON_CANVAS_WRAP_X 0x01
-#define MESON_CANVAS_WRAP_Y 0x02
-
-#define MESON_CANVAS_BLKMODE_LINEAR 0x00
-#define MESON_CANVAS_BLKMODE_32x32 0x01
-#define MESON_CANVAS_BLKMODE_64x64 0x02
-
-#define MESON_CANVAS_ENDIAN_SWAP16 0x1
-#define MESON_CANVAS_ENDIAN_SWAP32 0x3
-#define MESON_CANVAS_ENDIAN_SWAP64 0x7
-#define MESON_CANVAS_ENDIAN_SWAP128 0xf
-
-void meson_canvas_setup(struct meson_drm *priv,
- uint32_t canvas_index, uint32_t addr,
- uint32_t stride, uint32_t height,
- unsigned int wrap,
- unsigned int blkmode,
- unsigned int endian);
-
-#endif /* __MESON_CANVAS_H */
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 43e29984f8b1..5579f8ac3e3f 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -37,15 +37,19 @@
#include "meson_venc.h"
#include "meson_vpp.h"
#include "meson_viu.h"
-#include "meson_canvas.h"
#include "meson_registers.h"
+#define MESON_G12A_VIU_OFFSET 0x5ec0
+
/* CRTC definition */
struct meson_crtc {
struct drm_crtc base;
struct drm_pending_vblank_event *event;
struct meson_drm *priv;
+ void (*enable_osd1)(struct meson_drm *priv);
+ void (*enable_vd1)(struct meson_drm *priv);
+ unsigned int viu_offset;
};
#define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
@@ -81,6 +85,44 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
};
+static void meson_g12a_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct meson_drm *priv = meson_crtc->priv;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (!crtc_state) {
+ DRM_ERROR("Invalid crtc_state\n");
+ return;
+ }
+
+ /* VD1 Preblend vertical start/end */
+ writel(FIELD_PREP(GENMASK(11, 0), 2303),
+ priv->io_base + _REG(VPP_PREBLEND_VD1_V_START_END));
+
+ /* Setup Blender */
+ writel(crtc_state->mode.hdisplay |
+ crtc_state->mode.vdisplay << 16,
+ priv->io_base + _REG(VPP_POSTBLEND_H_SIZE));
+
+ writel_relaxed(0 << 16 |
+ (crtc_state->mode.hdisplay - 1),
+ priv->io_base + _REG(VPP_OSD1_BLD_H_SCOPE));
+ writel_relaxed(0 << 16 |
+ (crtc_state->mode.vdisplay - 1),
+ priv->io_base + _REG(VPP_OSD1_BLD_V_SCOPE));
+ writel_relaxed(crtc_state->mode.hdisplay << 16 |
+ crtc_state->mode.vdisplay,
+ priv->io_base + _REG(VPP_OUT_H_V_SIZE));
+
+ drm_crtc_vblank_on(crtc);
+
+ priv->viu.osd1_enabled = true;
+}
+
static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -111,6 +153,31 @@ static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
priv->viu.osd1_enabled = true;
}
+static void meson_g12a_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
+ struct meson_drm *priv = meson_crtc->priv;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ drm_crtc_vblank_off(crtc);
+
+ priv->viu.osd1_enabled = false;
+ priv->viu.osd1_commit = false;
+
+ priv->viu.vd1_enabled = false;
+ priv->viu.vd1_commit = false;
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ crtc->state->event = NULL;
+ }
+}
+
static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -174,6 +241,53 @@ static const struct drm_crtc_helper_funcs meson_crtc_helper_funcs = {
.atomic_disable = meson_crtc_atomic_disable,
};
+static const struct drm_crtc_helper_funcs meson_g12a_crtc_helper_funcs = {
+ .atomic_begin = meson_crtc_atomic_begin,
+ .atomic_flush = meson_crtc_atomic_flush,
+ .atomic_enable = meson_g12a_crtc_atomic_enable,
+ .atomic_disable = meson_g12a_crtc_atomic_disable,
+};
+
+static void meson_crtc_enable_osd1(struct meson_drm *priv)
+{
+ writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND,
+ priv->io_base + _REG(VPP_MISC));
+}
+
+static void meson_g12a_crtc_enable_osd1(struct meson_drm *priv)
+{
+ writel_relaxed(priv->viu.osd_blend_din0_scope_h,
+ priv->io_base +
+ _REG(VIU_OSD_BLEND_DIN0_SCOPE_H));
+ writel_relaxed(priv->viu.osd_blend_din0_scope_v,
+ priv->io_base +
+ _REG(VIU_OSD_BLEND_DIN0_SCOPE_V));
+ writel_relaxed(priv->viu.osb_blend0_size,
+ priv->io_base +
+ _REG(VIU_OSD_BLEND_BLEND0_SIZE));
+ writel_relaxed(priv->viu.osb_blend1_size,
+ priv->io_base +
+ _REG(VIU_OSD_BLEND_BLEND1_SIZE));
+}
+
+static void meson_crtc_enable_vd1(struct meson_drm *priv)
+{
+ writel_bits_relaxed(VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
+ VPP_COLOR_MNG_ENABLE,
+ VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
+ VPP_COLOR_MNG_ENABLE,
+ priv->io_base + _REG(VPP_MISC));
+}
+
+static void meson_g12a_crtc_enable_vd1(struct meson_drm *priv)
+{
+ writel_relaxed(((1 << 16) | /* post bld premult*/
+ (1 << 8) | /* post src */
+ (1 << 4) | /* pre bld premult*/
+ (1 << 0)),
+ priv->io_base + _REG(VD1_BLEND_SRC_CTRL));
+}
+
void meson_crtc_irq(struct meson_drm *priv)
{
struct meson_crtc *meson_crtc = to_meson_crtc(priv->crtc);
@@ -214,20 +328,14 @@ void meson_crtc_irq(struct meson_drm *priv)
writel_relaxed(priv->viu.osd_sc_v_ctrl0,
priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
- if (priv->canvas)
- meson_canvas_config(priv->canvas, priv->canvas_id_osd1,
- priv->viu.osd1_addr, priv->viu.osd1_stride,
- priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR, 0);
- else
- meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
+ meson_canvas_config(priv->canvas, priv->canvas_id_osd1,
priv->viu.osd1_addr, priv->viu.osd1_stride,
priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
MESON_CANVAS_BLKMODE_LINEAR, 0);
/* Enable OSD1 */
- writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND,
- priv->io_base + _REG(VPP_MISC));
+ if (meson_crtc->enable_osd1)
+ meson_crtc->enable_osd1(priv);
priv->viu.osd1_commit = false;
}
@@ -237,147 +345,164 @@ void meson_crtc_irq(struct meson_drm *priv)
switch (priv->viu.vd1_planes) {
case 3:
- if (priv->canvas)
- meson_canvas_config(priv->canvas,
- priv->canvas_id_vd1_2,
- priv->viu.vd1_addr2,
- priv->viu.vd1_stride2,
- priv->viu.vd1_height2,
- MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR,
- MESON_CANVAS_ENDIAN_SWAP64);
- else
- meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_2,
- priv->viu.vd1_addr2,
- priv->viu.vd1_stride2,
- priv->viu.vd1_height2,
- MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR,
- MESON_CANVAS_ENDIAN_SWAP64);
+ meson_canvas_config(priv->canvas,
+ priv->canvas_id_vd1_2,
+ priv->viu.vd1_addr2,
+ priv->viu.vd1_stride2,
+ priv->viu.vd1_height2,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
/* fallthrough */
case 2:
- if (priv->canvas)
- meson_canvas_config(priv->canvas,
- priv->canvas_id_vd1_1,
- priv->viu.vd1_addr1,
- priv->viu.vd1_stride1,
- priv->viu.vd1_height1,
- MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR,
- MESON_CANVAS_ENDIAN_SWAP64);
- else
- meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_1,
- priv->viu.vd1_addr2,
- priv->viu.vd1_stride2,
- priv->viu.vd1_height2,
- MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR,
- MESON_CANVAS_ENDIAN_SWAP64);
+ meson_canvas_config(priv->canvas,
+ priv->canvas_id_vd1_1,
+ priv->viu.vd1_addr1,
+ priv->viu.vd1_stride1,
+ priv->viu.vd1_height1,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
/* fallthrough */
case 1:
- if (priv->canvas)
- meson_canvas_config(priv->canvas,
- priv->canvas_id_vd1_0,
- priv->viu.vd1_addr0,
- priv->viu.vd1_stride0,
- priv->viu.vd1_height0,
- MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR,
- MESON_CANVAS_ENDIAN_SWAP64);
- else
- meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_0,
- priv->viu.vd1_addr2,
- priv->viu.vd1_stride2,
- priv->viu.vd1_height2,
- MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR,
- MESON_CANVAS_ENDIAN_SWAP64);
+ meson_canvas_config(priv->canvas,
+ priv->canvas_id_vd1_0,
+ priv->viu.vd1_addr0,
+ priv->viu.vd1_stride0,
+ priv->viu.vd1_height0,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
};
writel_relaxed(priv->viu.vd1_if0_gen_reg,
- priv->io_base + _REG(VD1_IF0_GEN_REG));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_GEN_REG));
writel_relaxed(priv->viu.vd1_if0_gen_reg,
- priv->io_base + _REG(VD2_IF0_GEN_REG));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_GEN_REG));
writel_relaxed(priv->viu.vd1_if0_gen_reg2,
- priv->io_base + _REG(VD1_IF0_GEN_REG2));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_GEN_REG2));
writel_relaxed(priv->viu.viu_vd1_fmt_ctrl,
- priv->io_base + _REG(VIU_VD1_FMT_CTRL));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VIU_VD1_FMT_CTRL));
writel_relaxed(priv->viu.viu_vd1_fmt_ctrl,
- priv->io_base + _REG(VIU_VD2_FMT_CTRL));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VIU_VD2_FMT_CTRL));
writel_relaxed(priv->viu.viu_vd1_fmt_w,
- priv->io_base + _REG(VIU_VD1_FMT_W));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VIU_VD1_FMT_W));
writel_relaxed(priv->viu.viu_vd1_fmt_w,
- priv->io_base + _REG(VIU_VD2_FMT_W));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VIU_VD2_FMT_W));
writel_relaxed(priv->viu.vd1_if0_canvas0,
- priv->io_base + _REG(VD1_IF0_CANVAS0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CANVAS0));
writel_relaxed(priv->viu.vd1_if0_canvas0,
- priv->io_base + _REG(VD1_IF0_CANVAS1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CANVAS1));
writel_relaxed(priv->viu.vd1_if0_canvas0,
- priv->io_base + _REG(VD2_IF0_CANVAS0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CANVAS0));
writel_relaxed(priv->viu.vd1_if0_canvas0,
- priv->io_base + _REG(VD2_IF0_CANVAS1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CANVAS1));
writel_relaxed(priv->viu.vd1_if0_luma_x0,
- priv->io_base + _REG(VD1_IF0_LUMA_X0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_LUMA_X0));
writel_relaxed(priv->viu.vd1_if0_luma_x0,
- priv->io_base + _REG(VD1_IF0_LUMA_X1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_LUMA_X1));
writel_relaxed(priv->viu.vd1_if0_luma_x0,
- priv->io_base + _REG(VD2_IF0_LUMA_X0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_LUMA_X0));
writel_relaxed(priv->viu.vd1_if0_luma_x0,
- priv->io_base + _REG(VD2_IF0_LUMA_X1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_LUMA_X1));
writel_relaxed(priv->viu.vd1_if0_luma_y0,
- priv->io_base + _REG(VD1_IF0_LUMA_Y0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_LUMA_Y0));
writel_relaxed(priv->viu.vd1_if0_luma_y0,
- priv->io_base + _REG(VD1_IF0_LUMA_Y1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_LUMA_Y1));
writel_relaxed(priv->viu.vd1_if0_luma_y0,
- priv->io_base + _REG(VD2_IF0_LUMA_Y0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_LUMA_Y0));
writel_relaxed(priv->viu.vd1_if0_luma_y0,
- priv->io_base + _REG(VD2_IF0_LUMA_Y1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_LUMA_Y1));
writel_relaxed(priv->viu.vd1_if0_chroma_x0,
- priv->io_base + _REG(VD1_IF0_CHROMA_X0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CHROMA_X0));
writel_relaxed(priv->viu.vd1_if0_chroma_x0,
- priv->io_base + _REG(VD1_IF0_CHROMA_X1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CHROMA_X1));
writel_relaxed(priv->viu.vd1_if0_chroma_x0,
- priv->io_base + _REG(VD2_IF0_CHROMA_X0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CHROMA_X0));
writel_relaxed(priv->viu.vd1_if0_chroma_x0,
- priv->io_base + _REG(VD2_IF0_CHROMA_X1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CHROMA_X1));
writel_relaxed(priv->viu.vd1_if0_chroma_y0,
- priv->io_base + _REG(VD1_IF0_CHROMA_Y0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CHROMA_Y0));
writel_relaxed(priv->viu.vd1_if0_chroma_y0,
- priv->io_base + _REG(VD1_IF0_CHROMA_Y1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CHROMA_Y1));
writel_relaxed(priv->viu.vd1_if0_chroma_y0,
- priv->io_base + _REG(VD2_IF0_CHROMA_Y0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CHROMA_Y0));
writel_relaxed(priv->viu.vd1_if0_chroma_y0,
- priv->io_base + _REG(VD2_IF0_CHROMA_Y1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CHROMA_Y1));
writel_relaxed(priv->viu.vd1_if0_repeat_loop,
- priv->io_base + _REG(VD1_IF0_RPT_LOOP));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_RPT_LOOP));
writel_relaxed(priv->viu.vd1_if0_repeat_loop,
- priv->io_base + _REG(VD2_IF0_RPT_LOOP));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_RPT_LOOP));
writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
- priv->io_base + _REG(VD1_IF0_LUMA0_RPT_PAT));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_LUMA0_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
- priv->io_base + _REG(VD2_IF0_LUMA0_RPT_PAT));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_LUMA0_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
- priv->io_base + _REG(VD1_IF0_LUMA1_RPT_PAT));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_LUMA1_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
- priv->io_base + _REG(VD2_IF0_LUMA1_RPT_PAT));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_LUMA1_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
- priv->io_base + _REG(VD1_IF0_CHROMA0_RPT_PAT));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CHROMA0_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
- priv->io_base + _REG(VD2_IF0_CHROMA0_RPT_PAT));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CHROMA0_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
- priv->io_base + _REG(VD1_IF0_CHROMA1_RPT_PAT));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CHROMA1_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
- priv->io_base + _REG(VD2_IF0_CHROMA1_RPT_PAT));
- writel_relaxed(0, priv->io_base + _REG(VD1_IF0_LUMA_PSEL));
- writel_relaxed(0, priv->io_base + _REG(VD1_IF0_CHROMA_PSEL));
- writel_relaxed(0, priv->io_base + _REG(VD2_IF0_LUMA_PSEL));
- writel_relaxed(0, priv->io_base + _REG(VD2_IF0_CHROMA_PSEL));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CHROMA1_RPT_PAT));
+ writel_relaxed(0, priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_LUMA_PSEL));
+ writel_relaxed(0, priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CHROMA_PSEL));
+ writel_relaxed(0, priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_LUMA_PSEL));
+ writel_relaxed(0, priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CHROMA_PSEL));
writel_relaxed(priv->viu.vd1_range_map_y,
- priv->io_base + _REG(VD1_IF0_RANGE_MAP_Y));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_RANGE_MAP_Y));
writel_relaxed(priv->viu.vd1_range_map_cb,
- priv->io_base + _REG(VD1_IF0_RANGE_MAP_CB));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_RANGE_MAP_CB));
writel_relaxed(priv->viu.vd1_range_map_cr,
- priv->io_base + _REG(VD1_IF0_RANGE_MAP_CR));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_RANGE_MAP_CR));
writel_relaxed(0x78404,
priv->io_base + _REG(VPP_SC_MISC));
writel_relaxed(priv->viu.vpp_pic_in_height,
@@ -423,11 +548,8 @@ void meson_crtc_irq(struct meson_drm *priv)
writel_relaxed(0x42, priv->io_base + _REG(VPP_SCALE_COEF_IDX));
/* Enable VD1 */
- writel_bits_relaxed(VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
- VPP_COLOR_MNG_ENABLE,
- VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
- VPP_COLOR_MNG_ENABLE,
- priv->io_base + _REG(VPP_MISC));
+ if (meson_crtc->enable_vd1)
+ meson_crtc->enable_vd1(priv);
priv->viu.vd1_commit = false;
}
@@ -464,7 +586,16 @@ int meson_crtc_create(struct meson_drm *priv)
return ret;
}
- drm_crtc_helper_add(crtc, &meson_crtc_helper_funcs);
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ meson_crtc->enable_osd1 = meson_g12a_crtc_enable_osd1;
+ meson_crtc->enable_vd1 = meson_g12a_crtc_enable_vd1;
+ meson_crtc->viu_offset = MESON_G12A_VIU_OFFSET;
+ drm_crtc_helper_add(crtc, &meson_g12a_crtc_helper_funcs);
+ } else {
+ meson_crtc->enable_osd1 = meson_crtc_enable_osd1;
+ meson_crtc->enable_vd1 = meson_crtc_enable_vd1;
+ drm_crtc_helper_add(crtc, &meson_crtc_helper_funcs);
+ }
priv->crtc = crtc;
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 8a4ebcb6405c..72b01e6be0d9 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -48,7 +48,6 @@
#include "meson_vpp.h"
#include "meson_viu.h"
#include "meson_venc.h"
-#include "meson_canvas.h"
#include "meson_registers.h"
#define DRIVER_NAME "meson"
@@ -91,6 +90,18 @@ static irqreturn_t meson_irq(int irq, void *arg)
return IRQ_HANDLED;
}
+static int meson_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ /*
+ * We need 64bytes aligned stride, and PAGE aligned size
+ */
+ args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), SZ_64);
+ args->size = PAGE_ALIGN(args->pitch * args->height);
+
+ return drm_gem_cma_dumb_create_internal(file, dev, args);
+}
+
DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver meson_driver = {
@@ -113,7 +124,7 @@ static struct drm_driver meson_driver = {
.gem_prime_mmap = drm_gem_cma_prime_mmap,
/* GEM Ops */
- .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_create = meson_dumb_create,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
@@ -231,50 +242,31 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
}
priv->canvas = meson_canvas_get(dev);
- if (!IS_ERR(priv->canvas)) {
- ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_osd1);
- if (ret)
- goto free_drm;
- ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0);
- if (ret) {
- meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
- goto free_drm;
- }
- ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1);
- if (ret) {
- meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
- meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
- goto free_drm;
- }
- ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2);
- if (ret) {
- meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
- meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
- meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
- goto free_drm;
- }
- } else {
- priv->canvas = NULL;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
- if (!res) {
- ret = -EINVAL;
- goto free_drm;
- }
- /* Simply ioremap since it may be a shared register zone */
- regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!regs) {
- ret = -EADDRNOTAVAIL;
- goto free_drm;
- }
+ if (IS_ERR(priv->canvas)) {
+ ret = PTR_ERR(priv->canvas);
+ goto free_drm;
+ }
- priv->dmc = devm_regmap_init_mmio(dev, regs,
- &meson_regmap_config);
- if (IS_ERR(priv->dmc)) {
- dev_err(&pdev->dev, "Couldn't create the DMC regmap\n");
- ret = PTR_ERR(priv->dmc);
- goto free_drm;
- }
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_osd1);
+ if (ret)
+ goto free_drm;
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0);
+ if (ret) {
+ meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+ goto free_drm;
+ }
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1);
+ if (ret) {
+ meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
+ goto free_drm;
+ }
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2);
+ if (ret) {
+ meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
+ goto free_drm;
}
priv->vsync_irq = platform_get_irq(pdev, 0);
@@ -467,6 +459,7 @@ static const struct of_device_id dt_match[] = {
{ .compatible = "amlogic,meson-gxbb-vpu" },
{ .compatible = "amlogic,meson-gxl-vpu" },
{ .compatible = "amlogic,meson-gxm-vpu" },
+ { .compatible = "amlogic,meson-g12a-vpu" },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 4dccf4cd042a..9614baa836b9 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -29,7 +29,6 @@ struct meson_drm {
struct device *dev;
void __iomem *io_base;
struct regmap *hhi;
- struct regmap *dmc;
int vsync_irq;
struct meson_canvas *canvas;
@@ -63,6 +62,10 @@ struct meson_drm {
uint32_t osd_sc_h_phase_step;
uint32_t osd_sc_h_ctrl0;
uint32_t osd_sc_v_ctrl0;
+ uint32_t osd_blend_din0_scope_h;
+ uint32_t osd_blend_din0_scope_v;
+ uint32_t osb_blend0_size;
+ uint32_t osb_blend1_size;
bool vd1_enabled;
bool vd1_commit;
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 563953ec6ad0..779da21143b9 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/component.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/reset.h>
#include <linux/clk.h>
@@ -105,6 +106,7 @@
#define HDMITX_TOP_ADDR_REG 0x0
#define HDMITX_TOP_DATA_REG 0x4
#define HDMITX_TOP_CTRL_REG 0x8
+#define HDMITX_TOP_G12A_OFFSET 0x8000
/* Controller Communication Channel */
#define HDMITX_DWC_ADDR_REG 0x10
@@ -118,6 +120,8 @@
#define HHI_HDMI_PHY_CNTL1 0x3a4 /* 0xe9 */
#define HHI_HDMI_PHY_CNTL2 0x3a8 /* 0xea */
#define HHI_HDMI_PHY_CNTL3 0x3ac /* 0xeb */
+#define HHI_HDMI_PHY_CNTL4 0x3b0 /* 0xec */
+#define HHI_HDMI_PHY_CNTL5 0x3b4 /* 0xed */
static DEFINE_SPINLOCK(reg_lock);
@@ -127,12 +131,26 @@ enum meson_venc_source {
MESON_VENC_SOURCE_ENCP = 2,
};
+struct meson_dw_hdmi;
+
+struct meson_dw_hdmi_data {
+ unsigned int (*top_read)(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr);
+ void (*top_write)(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr, unsigned int data);
+ unsigned int (*dwc_read)(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr);
+ void (*dwc_write)(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr, unsigned int data);
+};
+
struct meson_dw_hdmi {
struct drm_encoder encoder;
struct dw_hdmi_plat_data dw_plat_data;
struct meson_drm *priv;
struct device *dev;
void __iomem *hdmitx;
+ const struct meson_dw_hdmi_data *data;
struct reset_control *hdmitx_apb;
struct reset_control *hdmitx_ctrl;
struct reset_control *hdmitx_phy;
@@ -174,6 +192,12 @@ static unsigned int dw_hdmi_top_read(struct meson_dw_hdmi *dw_hdmi,
return data;
}
+static unsigned int dw_hdmi_g12a_top_read(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr)
+{
+ return readl(dw_hdmi->hdmitx + HDMITX_TOP_G12A_OFFSET + (addr << 2));
+}
+
static inline void dw_hdmi_top_write(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr, unsigned int data)
{
@@ -191,18 +215,24 @@ static inline void dw_hdmi_top_write(struct meson_dw_hdmi *dw_hdmi,
spin_unlock_irqrestore(&reg_lock, flags);
}
+static inline void dw_hdmi_g12a_top_write(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr, unsigned int data)
+{
+ writel(data, dw_hdmi->hdmitx + HDMITX_TOP_G12A_OFFSET + (addr << 2));
+}
+
/* Helper to change specific bits in PHY registers */
static inline void dw_hdmi_top_write_bits(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr,
unsigned int mask,
unsigned int val)
{
- unsigned int data = dw_hdmi_top_read(dw_hdmi, addr);
+ unsigned int data = dw_hdmi->data->top_read(dw_hdmi, addr);
data &= ~mask;
data |= val;
- dw_hdmi_top_write(dw_hdmi, addr, data);
+ dw_hdmi->data->top_write(dw_hdmi, addr, data);
}
static unsigned int dw_hdmi_dwc_read(struct meson_dw_hdmi *dw_hdmi,
@@ -226,6 +256,12 @@ static unsigned int dw_hdmi_dwc_read(struct meson_dw_hdmi *dw_hdmi,
return data;
}
+static unsigned int dw_hdmi_g12a_dwc_read(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr)
+{
+ return readb(dw_hdmi->hdmitx + addr);
+}
+
static inline void dw_hdmi_dwc_write(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr, unsigned int data)
{
@@ -243,18 +279,24 @@ static inline void dw_hdmi_dwc_write(struct meson_dw_hdmi *dw_hdmi,
spin_unlock_irqrestore(&reg_lock, flags);
}
+static inline void dw_hdmi_g12a_dwc_write(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr, unsigned int data)
+{
+ writeb(data, dw_hdmi->hdmitx + addr);
+}
+
/* Helper to change specific bits in controller registers */
static inline void dw_hdmi_dwc_write_bits(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr,
unsigned int mask,
unsigned int val)
{
- unsigned int data = dw_hdmi_dwc_read(dw_hdmi, addr);
+ unsigned int data = dw_hdmi->data->dwc_read(dw_hdmi, addr);
data &= ~mask;
data |= val;
- dw_hdmi_dwc_write(dw_hdmi, addr, data);
+ dw_hdmi->data->dwc_write(dw_hdmi, addr, data);
}
/* Bridge */
@@ -300,6 +342,24 @@ static void meson_hdmi_phy_setup_mode(struct meson_dw_hdmi *dw_hdmi,
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33632122);
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2000115b);
}
+ } else if (dw_hdmi_is_compatible(dw_hdmi,
+ "amlogic,meson-g12a-dw-hdmi")) {
+ if (pixel_clock >= 371250) {
+ /* 5.94Gbps, 3.7125Gbps */
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x37eb65c4);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2ab0ff3b);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL5, 0x0000080b);
+ } else if (pixel_clock >= 297000) {
+ /* 2.97Gbps */
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33eb6262);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2ab0ff3b);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL5, 0x00000003);
+ } else {
+ /* 1.485Gbps, and below */
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33eb4242);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2ab0ff3b);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL5, 0x00000003);
+ }
}
}
@@ -375,7 +435,7 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0);
/* Bring out of reset */
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_SW_RESET, 0);
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_SW_RESET, 0);
/* Enable internal pixclk, tmds_clk, spdif_clk, i2s_clk, cecclk */
dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL,
@@ -384,24 +444,25 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
0x3 << 4, 0x3 << 4);
/* Enable normal output to PHY */
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
/* TMDS pattern setup (TOFIX Handle the YUV420 case) */
if (mode->clock > 340000) {
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01, 0);
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
+ 0);
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
0x03ff03ff);
} else {
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
0x001f001f);
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
0x001f001f);
}
/* Load TMDS pattern */
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x1);
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x1);
msleep(20);
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x2);
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x2);
/* Setup PHY parameters */
meson_hdmi_phy_setup_mode(dw_hdmi, mode);
@@ -412,7 +473,8 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
/* BIT_INVERT */
if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
- dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi"))
+ dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi") ||
+ dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-g12a-dw-hdmi"))
regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
BIT(17), 0);
else
@@ -480,7 +542,7 @@ static enum drm_connector_status dw_hdmi_read_hpd(struct dw_hdmi *hdmi,
{
struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data;
- return !!dw_hdmi_top_read(dw_hdmi, HDMITX_TOP_STAT0) ?
+ return !!dw_hdmi->data->top_read(dw_hdmi, HDMITX_TOP_STAT0) ?
connector_status_connected : connector_status_disconnected;
}
@@ -490,11 +552,11 @@ static void dw_hdmi_setup_hpd(struct dw_hdmi *hdmi,
struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data;
/* Setup HPD Filter */
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_HPD_FILTER,
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_HPD_FILTER,
(0xa << 12) | 0xa0);
/* Clear interrupts */
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
HDMITX_TOP_INTR_HPD_RISE | HDMITX_TOP_INTR_HPD_FALL);
/* Unmask interrupts */
@@ -515,8 +577,8 @@ static irqreturn_t dw_hdmi_top_irq(int irq, void *dev_id)
struct meson_dw_hdmi *dw_hdmi = dev_id;
u32 stat;
- stat = dw_hdmi_top_read(dw_hdmi, HDMITX_TOP_INTR_STAT);
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_INTR_STAT_CLR, stat);
+ stat = dw_hdmi->data->top_read(dw_hdmi, HDMITX_TOP_INTR_STAT);
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_INTR_STAT_CLR, stat);
/* HPD Events, handle in the threaded interrupt handler */
if (stat & (HDMITX_TOP_INTR_HPD_RISE | HDMITX_TOP_INTR_HPD_FALL)) {
@@ -686,7 +748,9 @@ static const struct drm_encoder_helper_funcs
static int meson_dw_hdmi_reg_read(void *context, unsigned int reg,
unsigned int *result)
{
- *result = dw_hdmi_dwc_read(context, reg);
+ struct meson_dw_hdmi *dw_hdmi = context;
+
+ *result = dw_hdmi->data->dwc_read(dw_hdmi, reg);
return 0;
@@ -695,7 +759,9 @@ static int meson_dw_hdmi_reg_read(void *context, unsigned int reg,
static int meson_dw_hdmi_reg_write(void *context, unsigned int reg,
unsigned int val)
{
- dw_hdmi_dwc_write(context, reg, val);
+ struct meson_dw_hdmi *dw_hdmi = context;
+
+ dw_hdmi->data->dwc_write(dw_hdmi, reg, val);
return 0;
}
@@ -709,6 +775,20 @@ static const struct regmap_config meson_dw_hdmi_regmap_config = {
.fast_io = true,
};
+static const struct meson_dw_hdmi_data meson_dw_hdmi_gx_data = {
+ .top_read = dw_hdmi_top_read,
+ .top_write = dw_hdmi_top_write,
+ .dwc_read = dw_hdmi_dwc_read,
+ .dwc_write = dw_hdmi_dwc_write,
+};
+
+static const struct meson_dw_hdmi_data meson_dw_hdmi_g12a_data = {
+ .top_read = dw_hdmi_g12a_top_read,
+ .top_write = dw_hdmi_g12a_top_write,
+ .dwc_read = dw_hdmi_g12a_dwc_read,
+ .dwc_write = dw_hdmi_g12a_dwc_write,
+};
+
static bool meson_hdmi_connector_is_available(struct device *dev)
{
struct device_node *ep, *remote;
@@ -735,6 +815,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
struct platform_device *pdev = to_platform_device(dev);
+ const struct meson_dw_hdmi_data *match;
struct meson_dw_hdmi *meson_dw_hdmi;
struct drm_device *drm = data;
struct meson_drm *priv = drm->dev_private;
@@ -751,6 +832,12 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
return -ENODEV;
}
+ match = of_device_get_match_data(&pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "failed to get match data\n");
+ return -ENODEV;
+ }
+
meson_dw_hdmi = devm_kzalloc(dev, sizeof(*meson_dw_hdmi),
GFP_KERNEL);
if (!meson_dw_hdmi)
@@ -758,6 +845,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
meson_dw_hdmi->priv = priv;
meson_dw_hdmi->dev = dev;
+ meson_dw_hdmi->data = match;
dw_plat_data = &meson_dw_hdmi->dw_plat_data;
encoder = &meson_dw_hdmi->encoder;
@@ -858,24 +946,28 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
reset_control_reset(meson_dw_hdmi->hdmitx_phy);
/* Enable APB3 fail on error */
- writel_bits_relaxed(BIT(15), BIT(15),
- meson_dw_hdmi->hdmitx + HDMITX_TOP_CTRL_REG);
- writel_bits_relaxed(BIT(15), BIT(15),
- meson_dw_hdmi->hdmitx + HDMITX_DWC_CTRL_REG);
+ if (!meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ writel_bits_relaxed(BIT(15), BIT(15),
+ meson_dw_hdmi->hdmitx + HDMITX_TOP_CTRL_REG);
+ writel_bits_relaxed(BIT(15), BIT(15),
+ meson_dw_hdmi->hdmitx + HDMITX_DWC_CTRL_REG);
+ }
/* Bring out of reset */
- dw_hdmi_top_write(meson_dw_hdmi, HDMITX_TOP_SW_RESET, 0);
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi,
+ HDMITX_TOP_SW_RESET, 0);
msleep(20);
- dw_hdmi_top_write(meson_dw_hdmi, HDMITX_TOP_CLK_CNTL, 0xff);
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi,
+ HDMITX_TOP_CLK_CNTL, 0xff);
/* Enable HDMI-TX Interrupt */
- dw_hdmi_top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
- HDMITX_TOP_INTR_CORE);
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
+ HDMITX_TOP_INTR_CORE);
- dw_hdmi_top_write(meson_dw_hdmi, HDMITX_TOP_INTR_MASKN,
- HDMITX_TOP_INTR_CORE);
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_MASKN,
+ HDMITX_TOP_INTR_CORE);
/* Bridge / Connector */
@@ -924,9 +1016,14 @@ static int meson_dw_hdmi_remove(struct platform_device *pdev)
}
static const struct of_device_id meson_dw_hdmi_of_table[] = {
- { .compatible = "amlogic,meson-gxbb-dw-hdmi" },
- { .compatible = "amlogic,meson-gxl-dw-hdmi" },
- { .compatible = "amlogic,meson-gxm-dw-hdmi" },
+ { .compatible = "amlogic,meson-gxbb-dw-hdmi",
+ .data = &meson_dw_hdmi_gx_data },
+ { .compatible = "amlogic,meson-gxl-dw-hdmi",
+ .data = &meson_dw_hdmi_gx_data },
+ { .compatible = "amlogic,meson-gxm-dw-hdmi",
+ .data = &meson_dw_hdmi_gx_data },
+ { .compatible = "amlogic,meson-g12a-dw-hdmi",
+ .data = &meson_dw_hdmi_g12a_data },
{ }
};
MODULE_DEVICE_TABLE(of, meson_dw_hdmi_of_table);
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.h b/drivers/gpu/drm/meson/meson_dw_hdmi.h
index 0b81183125e3..03e2f0c1a2d5 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.h
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.h
@@ -21,9 +21,12 @@
#define __MESON_DW_HDMI_H
/*
- * Bit 7 RW Reserved. Default 1.
- * Bit 6 RW Reserved. Default 1.
- * Bit 5 RW Reserved. Default 1.
+ * Bit 15-10: RW Reserved. Default 1 starting from G12A
+ * Bit 9 RW sw_reset_i2c starting from G12A
+ * Bit 8 RW sw_reset_axiarb starting from G12A
+ * Bit 7 RW Reserved. Default 1, sw_reset_emp starting from G12A
+ * Bit 6 RW Reserved. Default 1, sw_reset_flt starting from G12A
+ * Bit 5 RW Reserved. Default 1, sw_reset_hdcp22 starting from G12A
* Bit 4 RW sw_reset_phyif: PHY interface. 1=Apply reset; 0=Release from reset.
* Default 1.
* Bit 3 RW sw_reset_intr: interrupt module. 1=Apply reset;
@@ -39,12 +42,16 @@
#define HDMITX_TOP_SW_RESET (0x000)
/*
+ * Bit 31 RW free_clk_en: 0=Enable clock gating for power saving; 1= Disable
* Bit 12 RW i2s_ws_inv:1=Invert i2s_ws; 0=No invert. Default 0.
* Bit 11 RW i2s_clk_inv: 1=Invert i2s_clk; 0=No invert. Default 0.
* Bit 10 RW spdif_clk_inv: 1=Invert spdif_clk; 0=No invert. Default 0.
* Bit 9 RW tmds_clk_inv: 1=Invert tmds_clk; 0=No invert. Default 0.
* Bit 8 RW pixel_clk_inv: 1=Invert pixel_clk; 0=No invert. Default 0.
- * Bit 4 RW cec_clk_en: 1=enable cec_clk; 0=disable. Default 0.
+ * Bit 7 RW hdcp22_skpclk_en: starting from G12A, 1=enable; 0=disable
+ * Bit 6 RW hdcp22_esmclk_en: starting from G12A, 1=enable; 0=disable
+ * Bit 5 RW hdcp22_tmdsclk_en: starting from G12A, 1=enable; 0=disable
+ * Bit 4 RW cec_clk_en: 1=enable cec_clk; 0=disable. Default 0. Reserved for G12A
* Bit 3 RW i2s_clk_en: 1=enable i2s_clk; 0=disable. Default 0.
* Bit 2 RW spdif_clk_en: 1=enable spdif_clk; 0=disable. Default 0.
* Bit 1 RW tmds_clk_en: 1=enable tmds_clk; 0=disable. Default 0.
@@ -53,6 +60,8 @@
#define HDMITX_TOP_CLK_CNTL (0x001)
/*
+ * Bit 31:28 RW rxsense_glitch_width: starting from G12A
+ * Bit 27:16 RW rxsense_valid_width: starting from G12A
* Bit 11: 0 RW hpd_valid_width: filter out width <= M*1024. Default 0.
* Bit 15:12 RW hpd_glitch_width: filter out glitch <= N. Default 0.
*/
@@ -61,6 +70,9 @@
/*
* intr_maskn: MASK_N, one bit per interrupt source.
* 1=Enable interrupt source; 0=Disable interrupt source. Default 0.
+ * [ 7] rxsense_fall starting from G12A
+ * [ 6] rxsense_rise starting from G12A
+ * [ 5] err_i2c_timeout starting from G12A
* [ 4] hdcp22_rndnum_err
* [ 3] nonce_rfrsh_rise
* [ 2] hpd_fall_intr
@@ -73,6 +85,9 @@
* Bit 30: 0 RW intr_stat: For each bit, write 1 to manually set the interrupt
* bit, read back the interrupt status.
* Bit 31 R IP interrupt status
+ * Bit 7 RW rxsense_fall starting from G12A
+ * Bit 6 RW rxsense_rise starting from G12A
+ * Bit 5 RW err_i2c_timeout starting from G12A
* Bit 2 RW hpd_fall
* Bit 1 RW hpd_rise
* Bit 0 RW IP interrupt
@@ -80,6 +95,9 @@
#define HDMITX_TOP_INTR_STAT (0x004)
/*
+ * [7] rxsense_fall starting from G12A
+ * [6] rxsense_rise starting from G12A
+ * [5] err_i2c_timeout starting from G12A
* [4] hdcp22_rndnum_err
* [3] nonce_rfrsh_rise
* [2] hpd_fall
@@ -91,6 +109,8 @@
#define HDMITX_TOP_INTR_CORE BIT(0)
#define HDMITX_TOP_INTR_HPD_RISE BIT(1)
#define HDMITX_TOP_INTR_HPD_FALL BIT(2)
+#define HDMITX_TOP_INTR_RXSENSE_RISE BIT(6)
+#define HDMITX_TOP_INTR_RXSENSE_FALL BIT(7)
/* Bit 14:12 RW tmds_sel: 3'b000=Output zero; 3'b001=Output normal TMDS data;
* 3'b010=Output PRBS data; 3'b100=Output shift pattern. Default 0.
@@ -140,7 +160,9 @@
*/
#define HDMITX_TOP_REVOCMEM_STAT (0x00D)
-/* Bit 0 R filtered HPD status. */
+/* Bit 1 R filtered RxSense status
+ * Bit 0 R filtered HPD status.
+ */
#define HDMITX_TOP_STAT0 (0x00E)
#endif /* __MESON_DW_HDMI_H */
diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c
index 691a9fd16b36..bdbf925ff3e8 100644
--- a/drivers/gpu/drm/meson/meson_overlay.c
+++ b/drivers/gpu/drm/meson/meson_overlay.c
@@ -22,7 +22,6 @@
#include "meson_overlay.h"
#include "meson_vpp.h"
#include "meson_viu.h"
-#include "meson_canvas.h"
#include "meson_registers.h"
/* VD1_IF0_GEN_REG */
@@ -350,13 +349,6 @@ static void meson_overlay_atomic_update(struct drm_plane *plane,
DRM_DEBUG_DRIVER("\n");
- /* Fallback is canvas provider is not available */
- if (!priv->canvas) {
- priv->canvas_id_vd1_0 = MESON_CANVAS_ID_VD1_0;
- priv->canvas_id_vd1_1 = MESON_CANVAS_ID_VD1_1;
- priv->canvas_id_vd1_2 = MESON_CANVAS_ID_VD1_2;
- }
-
interlace_mode = state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE;
spin_lock_irqsave(&priv->drm->event_lock, flags);
@@ -524,8 +516,14 @@ static void meson_overlay_atomic_disable(struct drm_plane *plane,
priv->viu.vd1_enabled = false;
/* Disable VD1 */
- writel_bits_relaxed(VPP_VD1_POSTBLEND | VPP_VD1_PREBLEND, 0,
- priv->io_base + _REG(VPP_MISC));
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ writel_relaxed(0, priv->io_base + _REG(VD1_BLEND_SRC_CTRL));
+ writel_relaxed(0, priv->io_base + _REG(VD2_BLEND_SRC_CTRL));
+ writel_relaxed(0, priv->io_base + _REG(VD1_IF0_GEN_REG + 0x17b0));
+ writel_relaxed(0, priv->io_base + _REG(VD2_IF0_GEN_REG + 0x17b0));
+ } else
+ writel_bits_relaxed(VPP_VD1_POSTBLEND | VPP_VD1_PREBLEND, 0,
+ priv->io_base + _REG(VPP_MISC));
}
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 6119a0224278..bf8f1fab63aa 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -38,7 +38,6 @@
#include "meson_plane.h"
#include "meson_vpp.h"
#include "meson_viu.h"
-#include "meson_canvas.h"
#include "meson_registers.h"
/* OSD_SCI_WH_M1 */
@@ -148,10 +147,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
(0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
OSD_BLK0_ENABLE;
- if (priv->canvas)
- canvas_id_osd1 = priv->canvas_id_osd1;
- else
- canvas_id_osd1 = MESON_CANVAS_ID_OSD1;
+ canvas_id_osd1 = priv->canvas_id_osd1;
/* Set up BLK0 to point to the right canvas */
priv->viu.osd1_blk0_cfg[0] = ((canvas_id_osd1 << OSD_CANVAS_SEL) |
@@ -298,6 +294,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
priv->viu.osd1_blk0_cfg[3] = ((dest.x2 - 1) << 16) | dest.x1;
priv->viu.osd1_blk0_cfg[4] = ((dest.y2 - 1) << 16) | dest.y1;
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ priv->viu.osd_blend_din0_scope_h = ((dest.x2 - 1) << 16) | dest.x1;
+ priv->viu.osd_blend_din0_scope_v = ((dest.y2 - 1) << 16) | dest.y1;
+ priv->viu.osb_blend0_size = dst_h << 16 | dst_w;
+ priv->viu.osb_blend1_size = dst_h << 16 | dst_w;
+ }
+
/* Update Canvas with buffer address */
gem = drm_fb_cma_get_gem_obj(fb, 0);
@@ -324,8 +327,12 @@ static void meson_plane_atomic_disable(struct drm_plane *plane,
struct meson_drm *priv = meson_plane->priv;
/* Disable OSD1 */
- writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0,
- priv->io_base + _REG(VPP_MISC));
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ writel_bits_relaxed(BIT(0) | BIT(21), 0,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
+ else
+ writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0,
+ priv->io_base + _REG(VPP_MISC));
meson_plane->enabled = false;
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
index 5c7e02c703bc..cfaf90501bb1 100644
--- a/drivers/gpu/drm/meson/meson_registers.h
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -216,6 +216,29 @@
#define VIU_OSD2_FIFO_CTRL_STAT 0x1a4b
#define VIU_OSD2_TEST_RDDATA 0x1a4c
#define VIU_OSD2_PROT_CTRL 0x1a4e
+#define VIU_OSD2_MALI_UNPACK_CTRL 0x1abd
+#define VIU_OSD2_DIMM_CTRL 0x1acf
+
+#define VIU_OSD3_CTRL_STAT 0x3d80
+#define VIU_OSD3_CTRL_STAT2 0x3d81
+#define VIU_OSD3_COLOR_ADDR 0x3d82
+#define VIU_OSD3_COLOR 0x3d83
+#define VIU_OSD3_TCOLOR_AG0 0x3d84
+#define VIU_OSD3_TCOLOR_AG1 0x3d85
+#define VIU_OSD3_TCOLOR_AG2 0x3d86
+#define VIU_OSD3_TCOLOR_AG3 0x3d87
+#define VIU_OSD3_BLK0_CFG_W0 0x3d88
+#define VIU_OSD3_BLK0_CFG_W1 0x3d8c
+#define VIU_OSD3_BLK0_CFG_W2 0x3d90
+#define VIU_OSD3_BLK0_CFG_W3 0x3d94
+#define VIU_OSD3_BLK0_CFG_W4 0x3d98
+#define VIU_OSD3_BLK1_CFG_W4 0x3d99
+#define VIU_OSD3_BLK2_CFG_W4 0x3d9a
+#define VIU_OSD3_FIFO_CTRL_STAT 0x3d9c
+#define VIU_OSD3_TEST_RDDATA 0x3d9d
+#define VIU_OSD3_PROT_CTRL 0x3d9e
+#define VIU_OSD3_MALI_UNPACK_CTRL 0x3d9f
+#define VIU_OSD3_DIMM_CTRL 0x3da0
#define VD1_IF0_GEN_REG 0x1a50
#define VD1_IF0_CANVAS0 0x1a51
@@ -287,6 +310,27 @@
#define VIU_OSD1_MATRIX_COEF31_32 0x1a9e
#define VIU_OSD1_MATRIX_COEF40_41 0x1a9f
#define VD1_IF0_GEN_REG3 0x1aa7
+
+#define VIU_OSD_BLENDO_H_START_END 0x1aa9
+#define VIU_OSD_BLENDO_V_START_END 0x1aaa
+#define VIU_OSD_BLEND_GEN_CTRL0 0x1aab
+#define VIU_OSD_BLEND_GEN_CTRL1 0x1aac
+#define VIU_OSD_BLEND_DUMMY_DATA 0x1aad
+#define VIU_OSD_BLEND_CURRENT_XY 0x1aae
+
+#define VIU_OSD2_MATRIX_CTRL 0x1ab0
+#define VIU_OSD2_MATRIX_COEF00_01 0x1ab1
+#define VIU_OSD2_MATRIX_COEF02_10 0x1ab2
+#define VIU_OSD2_MATRIX_COEF11_12 0x1ab3
+#define VIU_OSD2_MATRIX_COEF20_21 0x1ab4
+#define VIU_OSD2_MATRIX_COEF22 0x1ab5
+#define VIU_OSD2_MATRIX_OFFSET0_1 0x1ab6
+#define VIU_OSD2_MATRIX_OFFSET2 0x1ab7
+#define VIU_OSD2_MATRIX_PRE_OFFSET0_1 0x1ab8
+#define VIU_OSD2_MATRIX_PRE_OFFSET2 0x1ab9
+#define VIU_OSD2_MATRIX_PROBE_COLOR 0x1aba
+#define VIU_OSD2_MATRIX_HL_COLOR 0x1abb
+#define VIU_OSD2_MATRIX_PROBE_POS 0x1abc
#define VIU_OSD1_EOTF_CTL 0x1ad4
#define VIU_OSD1_EOTF_COEF00_01 0x1ad5
#define VIU_OSD1_EOTF_COEF02_10 0x1ad6
@@ -481,6 +525,82 @@
#define VPP_OSD_SCALE_COEF 0x1dcd
#define VPP_INT_LINE_NUM 0x1dce
+#define VPP_WRAP_OSD1_MATRIX_COEF00_01 0x3d60
+#define VPP_WRAP_OSD1_MATRIX_COEF02_10 0x3d61
+#define VPP_WRAP_OSD1_MATRIX_COEF11_12 0x3d62
+#define VPP_WRAP_OSD1_MATRIX_COEF20_21 0x3d63
+#define VPP_WRAP_OSD1_MATRIX_COEF22 0x3d64
+#define VPP_WRAP_OSD1_MATRIX_COEF13_14 0x3d65
+#define VPP_WRAP_OSD1_MATRIX_COEF23_24 0x3d66
+#define VPP_WRAP_OSD1_MATRIX_COEF15_25 0x3d67
+#define VPP_WRAP_OSD1_MATRIX_CLIP 0x3d68
+#define VPP_WRAP_OSD1_MATRIX_OFFSET0_1 0x3d69
+#define VPP_WRAP_OSD1_MATRIX_OFFSET2 0x3d6a
+#define VPP_WRAP_OSD1_MATRIX_PRE_OFFSET0_1 0x3d6b
+#define VPP_WRAP_OSD1_MATRIX_PRE_OFFSET2 0x3d6c
+#define VPP_WRAP_OSD1_MATRIX_EN_CTRL 0x3d6d
+
+#define VPP_WRAP_OSD2_MATRIX_COEF00_01 0x3d70
+#define VPP_WRAP_OSD2_MATRIX_COEF02_10 0x3d71
+#define VPP_WRAP_OSD2_MATRIX_COEF11_12 0x3d72
+#define VPP_WRAP_OSD2_MATRIX_COEF20_21 0x3d73
+#define VPP_WRAP_OSD2_MATRIX_COEF22 0x3d74
+#define VPP_WRAP_OSD2_MATRIX_COEF13_14 0x3d75
+#define VPP_WRAP_OSD2_MATRIX_COEF23_24 0x3d76
+#define VPP_WRAP_OSD2_MATRIX_COEF15_25 0x3d77
+#define VPP_WRAP_OSD2_MATRIX_CLIP 0x3d78
+#define VPP_WRAP_OSD2_MATRIX_OFFSET0_1 0x3d79
+#define VPP_WRAP_OSD2_MATRIX_OFFSET2 0x3d7a
+#define VPP_WRAP_OSD2_MATRIX_PRE_OFFSET0_1 0x3d7b
+#define VPP_WRAP_OSD2_MATRIX_PRE_OFFSET2 0x3d7c
+#define VPP_WRAP_OSD2_MATRIX_EN_CTRL 0x3d7d
+
+#define VPP_WRAP_OSD3_MATRIX_COEF00_01 0x3db0
+#define VPP_WRAP_OSD3_MATRIX_COEF02_10 0x3db1
+#define VPP_WRAP_OSD3_MATRIX_COEF11_12 0x3db2
+#define VPP_WRAP_OSD3_MATRIX_COEF20_21 0x3db3
+#define VPP_WRAP_OSD3_MATRIX_COEF22 0x3db4
+#define VPP_WRAP_OSD3_MATRIX_COEF13_14 0x3db5
+#define VPP_WRAP_OSD3_MATRIX_COEF23_24 0x3db6
+#define VPP_WRAP_OSD3_MATRIX_COEF15_25 0x3db7
+#define VPP_WRAP_OSD3_MATRIX_CLIP 0x3db8
+#define VPP_WRAP_OSD3_MATRIX_OFFSET0_1 0x3db9
+#define VPP_WRAP_OSD3_MATRIX_OFFSET2 0x3dba
+#define VPP_WRAP_OSD3_MATRIX_PRE_OFFSET0_1 0x3dbb
+#define VPP_WRAP_OSD3_MATRIX_PRE_OFFSET2 0x3dbc
+#define VPP_WRAP_OSD3_MATRIX_EN_CTRL 0x3dbd
+
+/* osd2 scaler */
+#define OSD2_VSC_PHASE_STEP 0x3d00
+#define OSD2_VSC_INI_PHASE 0x3d01
+#define OSD2_VSC_CTRL0 0x3d02
+#define OSD2_HSC_PHASE_STEP 0x3d03
+#define OSD2_HSC_INI_PHASE 0x3d04
+#define OSD2_HSC_CTRL0 0x3d05
+#define OSD2_HSC_INI_PAT_CTRL 0x3d06
+#define OSD2_SC_DUMMY_DATA 0x3d07
+#define OSD2_SC_CTRL0 0x3d08
+#define OSD2_SCI_WH_M1 0x3d09
+#define OSD2_SCO_H_START_END 0x3d0a
+#define OSD2_SCO_V_START_END 0x3d0b
+#define OSD2_SCALE_COEF_IDX 0x3d18
+#define OSD2_SCALE_COEF 0x3d19
+
+/* osd34 scaler */
+#define OSD34_SCALE_COEF_IDX 0x3d1e
+#define OSD34_SCALE_COEF 0x3d1f
+#define OSD34_VSC_PHASE_STEP 0x3d20
+#define OSD34_VSC_INI_PHASE 0x3d21
+#define OSD34_VSC_CTRL0 0x3d22
+#define OSD34_HSC_PHASE_STEP 0x3d23
+#define OSD34_HSC_INI_PHASE 0x3d24
+#define OSD34_HSC_CTRL0 0x3d25
+#define OSD34_HSC_INI_PAT_CTRL 0x3d26
+#define OSD34_SC_DUMMY_DATA 0x3d27
+#define OSD34_SC_CTRL0 0x3d28
+#define OSD34_SCI_WH_M1 0x3d29
+#define OSD34_SCO_H_START_END 0x3d2a
+#define OSD34_SCO_V_START_END 0x3d2b
/* viu2 */
#define VIU2_ADDR_START 0x1e00
#define VIU2_ADDR_END 0x1eff
@@ -1400,4 +1520,131 @@
#define OSDSR_YBIC_VCOEF0 0x3149
#define OSDSR_CBIC_VCOEF0 0x314a
+/* osd afbcd on gxtvbb */
+#define OSD1_AFBCD_ENABLE 0x31a0
+#define OSD1_AFBCD_MODE 0x31a1
+#define OSD1_AFBCD_SIZE_IN 0x31a2
+#define OSD1_AFBCD_HDR_PTR 0x31a3
+#define OSD1_AFBCD_FRAME_PTR 0x31a4
+#define OSD1_AFBCD_CHROMA_PTR 0x31a5
+#define OSD1_AFBCD_CONV_CTRL 0x31a6
+#define OSD1_AFBCD_STATUS 0x31a8
+#define OSD1_AFBCD_PIXEL_HSCOPE 0x31a9
+#define OSD1_AFBCD_PIXEL_VSCOPE 0x31aa
+#define VIU_MISC_CTRL1 0x1a07
+
+/* add for gxm and 962e dv core2 */
+#define DOLBY_CORE2A_SWAP_CTRL1 0x3434
+#define DOLBY_CORE2A_SWAP_CTRL2 0x3435
+
+/* osd afbc on g12a */
+#define VPU_MAFBC_BLOCK_ID 0x3a00
+#define VPU_MAFBC_IRQ_RAW_STATUS 0x3a01
+#define VPU_MAFBC_IRQ_CLEAR 0x3a02
+#define VPU_MAFBC_IRQ_MASK 0x3a03
+#define VPU_MAFBC_IRQ_STATUS 0x3a04
+#define VPU_MAFBC_COMMAND 0x3a05
+#define VPU_MAFBC_STATUS 0x3a06
+#define VPU_MAFBC_SURFACE_CFG 0x3a07
+
+/* osd afbc on g12a */
+#define VPU_MAFBC_HEADER_BUF_ADDR_LOW_S0 0x3a10
+#define VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S0 0x3a11
+#define VPU_MAFBC_FORMAT_SPECIFIER_S0 0x3a12
+#define VPU_MAFBC_BUFFER_WIDTH_S0 0x3a13
+#define VPU_MAFBC_BUFFER_HEIGHT_S0 0x3a14
+#define VPU_MAFBC_BOUNDING_BOX_X_START_S0 0x3a15
+#define VPU_MAFBC_BOUNDING_BOX_X_END_S0 0x3a16
+#define VPU_MAFBC_BOUNDING_BOX_Y_START_S0 0x3a17
+#define VPU_MAFBC_BOUNDING_BOX_Y_END_S0 0x3a18
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_LOW_S0 0x3a19
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_HIGH_S0 0x3a1a
+#define VPU_MAFBC_OUTPUT_BUF_STRIDE_S0 0x3a1b
+#define VPU_MAFBC_PREFETCH_CFG_S0 0x3a1c
+
+#define VPU_MAFBC_HEADER_BUF_ADDR_LOW_S1 0x3a30
+#define VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S1 0x3a31
+#define VPU_MAFBC_FORMAT_SPECIFIER_S1 0x3a32
+#define VPU_MAFBC_BUFFER_WIDTH_S1 0x3a33
+#define VPU_MAFBC_BUFFER_HEIGHT_S1 0x3a34
+#define VPU_MAFBC_BOUNDING_BOX_X_START_S1 0x3a35
+#define VPU_MAFBC_BOUNDING_BOX_X_END_S1 0x3a36
+#define VPU_MAFBC_BOUNDING_BOX_Y_START_S1 0x3a37
+#define VPU_MAFBC_BOUNDING_BOX_Y_END_S1 0x3a38
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_LOW_S1 0x3a39
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_HIGH_S1 0x3a3a
+#define VPU_MAFBC_OUTPUT_BUF_STRIDE_S1 0x3a3b
+#define VPU_MAFBC_PREFETCH_CFG_S1 0x3a3c
+
+#define VPU_MAFBC_HEADER_BUF_ADDR_LOW_S2 0x3a50
+#define VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S2 0x3a51
+#define VPU_MAFBC_FORMAT_SPECIFIER_S2 0x3a52
+#define VPU_MAFBC_BUFFER_WIDTH_S2 0x3a53
+#define VPU_MAFBC_BUFFER_HEIGHT_S2 0x3a54
+#define VPU_MAFBC_BOUNDING_BOX_X_START_S2 0x3a55
+#define VPU_MAFBC_BOUNDING_BOX_X_END_S2 0x3a56
+#define VPU_MAFBC_BOUNDING_BOX_Y_START_S2 0x3a57
+#define VPU_MAFBC_BOUNDING_BOX_Y_END_S2 0x3a58
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_LOW_S2 0x3a59
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_HIGH_S2 0x3a5a
+#define VPU_MAFBC_OUTPUT_BUF_STRIDE_S2 0x3a5b
+#define VPU_MAFBC_PREFETCH_CFG_S2 0x3a5c
+
+#define VPU_MAFBC_HEADER_BUF_ADDR_LOW_S3 0x3a70
+#define VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S3 0x3a71
+#define VPU_MAFBC_FORMAT_SPECIFIER_S3 0x3a72
+#define VPU_MAFBC_BUFFER_WIDTH_S3 0x3a73
+#define VPU_MAFBC_BUFFER_HEIGHT_S3 0x3a74
+#define VPU_MAFBC_BOUNDING_BOX_X_START_S3 0x3a75
+#define VPU_MAFBC_BOUNDING_BOX_X_END_S3 0x3a76
+#define VPU_MAFBC_BOUNDING_BOX_Y_START_S3 0x3a77
+#define VPU_MAFBC_BOUNDING_BOX_Y_END_S3 0x3a78
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_LOW_S3 0x3a79
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_HIGH_S3 0x3a7a
+#define VPU_MAFBC_OUTPUT_BUF_STRIDE_S3 0x3a7b
+#define VPU_MAFBC_PREFETCH_CFG_S3 0x3a7c
+
+#define DOLBY_PATH_CTRL 0x1a0c
+#define OSD_PATH_MISC_CTRL 0x1a0e
+#define MALI_AFBCD_TOP_CTRL 0x1a0f
+
+#define VIU_OSD_BLEND_CTRL 0x39b0
+#define VIU_OSD_BLEND_CTRL1 0x39c0
+#define VIU_OSD_BLEND_DIN0_SCOPE_H 0x39b1
+#define VIU_OSD_BLEND_DIN0_SCOPE_V 0x39b2
+#define VIU_OSD_BLEND_DIN1_SCOPE_H 0x39b3
+#define VIU_OSD_BLEND_DIN1_SCOPE_V 0x39b4
+#define VIU_OSD_BLEND_DIN2_SCOPE_H 0x39b5
+#define VIU_OSD_BLEND_DIN2_SCOPE_V 0x39b6
+#define VIU_OSD_BLEND_DIN3_SCOPE_H 0x39b7
+#define VIU_OSD_BLEND_DIN3_SCOPE_V 0x39b8
+#define VIU_OSD_BLEND_DUMMY_DATA0 0x39b9
+#define VIU_OSD_BLEND_DUMMY_ALPHA 0x39ba
+#define VIU_OSD_BLEND_BLEND0_SIZE 0x39bb
+#define VIU_OSD_BLEND_BLEND1_SIZE 0x39bc
+#define VIU_OSD_BLEND_RO_CURRENT_XY 0x39bf
+
+#define VPP_OUT_H_V_SIZE 0x1da5
+
+#define VPP_VD2_HDR_IN_SIZE 0x1df0
+#define VPP_OSD1_IN_SIZE 0x1df1
+#define VPP_GCLK_CTRL2 0x1df2
+#define VD2_PPS_DUMMY_DATA 0x1df4
+#define VPP_OSD1_BLD_H_SCOPE 0x1df5
+#define VPP_OSD1_BLD_V_SCOPE 0x1df6
+#define VPP_OSD2_BLD_H_SCOPE 0x1df7
+#define VPP_OSD2_BLD_V_SCOPE 0x1df8
+#define VPP_WRBAK_CTRL 0x1df9
+#define VPP_SLEEP_CTRL 0x1dfa
+#define VD1_BLEND_SRC_CTRL 0x1dfb
+#define VD2_BLEND_SRC_CTRL 0x1dfc
+#define OSD1_BLEND_SRC_CTRL 0x1dfd
+#define OSD2_BLEND_SRC_CTRL 0x1dfe
+
+#define VPP_POST_BLEND_BLEND_DUMMY_DATA 0x3968
+#define VPP_POST_BLEND_DUMMY_ALPHA 0x3969
+#define VPP_RDARB_MODE 0x3978
+#define VPP_RDARB_REQEN_SLV 0x3979
+#define VPU_RDARB_MODE_L2C1 0x279d
+
#endif /* __MESON_REGISTERS_H */
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index f6ba35a405f8..b39034745444 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -113,9 +113,12 @@
#define HHI_HDMI_PLL_CNTL4 0x32C /* 0xcb offset in data sheet */
#define HHI_HDMI_PLL_CNTL5 0x330 /* 0xcc offset in data sheet */
#define HHI_HDMI_PLL_CNTL6 0x334 /* 0xcd offset in data sheet */
+#define HHI_HDMI_PLL_CNTL7 0x338 /* 0xce offset in data sheet */
#define HDMI_PLL_RESET BIT(28)
+#define HDMI_PLL_RESET_G12A BIT(29)
#define HDMI_PLL_LOCK BIT(31)
+#define HDMI_PLL_LOCK_G12A (3 << 30)
#define FREQ_1000_1001(_freq) DIV_ROUND_CLOSEST(_freq * 1000, 1001)
@@ -257,6 +260,10 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x4800023d);
+
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
+ (val & HDMI_PLL_LOCK), 10, 0);
} else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x4000027b);
@@ -271,11 +278,26 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
HDMI_PLL_RESET, HDMI_PLL_RESET);
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
HDMI_PLL_RESET, 0);
- }
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
- (val & HDMI_PLL_LOCK), 10, 0);
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
+ (val & HDMI_PLL_LOCK), 10, 0);
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x1a0504f7);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00010000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x00000000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x6a28dc00);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x65771290);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x39272000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL7, 0x56540000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x3a0504f7);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x1a0504f7);
+
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
+ ((val & HDMI_PLL_LOCK_G12A) == HDMI_PLL_LOCK_G12A),
+ 10, 0);
+ }
/* Disable VCLK2 */
regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL, VCLK2_EN, 0);
@@ -288,8 +310,13 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
VCLK2_DIV_MASK, (55 - 1));
/* select vid_pll for vclk2 */
- regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
- VCLK2_SEL_MASK, (4 << VCLK2_SEL_SHIFT));
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
+ VCLK2_SEL_MASK, (0 << VCLK2_SEL_SHIFT));
+ else
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
+ VCLK2_SEL_MASK, (4 << VCLK2_SEL_SHIFT));
+
/* enable vclk2 gate */
regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL, VCLK2_EN, VCLK2_EN);
@@ -396,8 +423,8 @@ struct meson_vclk_params {
},
[MESON_VCLK_HDMI_297000] = {
.pixel_freq = 297000,
- .pll_base_freq = 2970000,
- .pll_od1 = 1,
+ .pll_base_freq = 5940000,
+ .pll_od1 = 2,
.pll_od2 = 1,
.pll_od3 = 1,
.vid_pll_div = VID_PLL_DIV_5,
@@ -476,32 +503,80 @@ void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
/* Poll for lock bit */
regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
(val & HDMI_PLL_LOCK), 10, 0);
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x0b3a0400 | m);
+
+ /* Enable and reset */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ 0x3 << 28, 0x3 << 28);
+
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, frac);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x00000000);
+
+ /* G12A HDMI PLL Needs specific parameters for 5.4GHz */
+ if (m >= 0xf7) {
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0xea68dc00);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x65771290);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x39272000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL7, 0x55540000);
+ } else {
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0a691c00);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x33771290);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x39270000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL7, 0x50540000);
+ }
+
+ do {
+ /* Reset PLL */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ HDMI_PLL_RESET_G12A, HDMI_PLL_RESET_G12A);
+
+ /* UN-Reset PLL */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ HDMI_PLL_RESET_G12A, 0);
+
+ /* Poll for lock bits */
+ if (!regmap_read_poll_timeout(priv->hhi,
+ HHI_HDMI_PLL_CNTL, val,
+ ((val & HDMI_PLL_LOCK_G12A)
+ == HDMI_PLL_LOCK_G12A),
+ 10, 100))
+ break;
+ } while(1);
}
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
3 << 16, pll_od_to_reg(od1) << 16);
else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
3 << 21, pll_od_to_reg(od1) << 21);
+ else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ 3 << 16, pll_od_to_reg(od1) << 16);
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
3 << 22, pll_od_to_reg(od2) << 22);
else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
3 << 23, pll_od_to_reg(od2) << 23);
+ else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ 3 << 18, pll_od_to_reg(od2) << 18);
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
3 << 18, pll_od_to_reg(od3) << 18);
else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
3 << 19, pll_od_to_reg(od3) << 19);
-
+ else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ 3 << 20, pll_od_to_reg(od3) << 20);
}
#define XTAL_FREQ 24000
@@ -518,6 +593,7 @@ static unsigned int meson_hdmi_pll_get_m(struct meson_drm *priv,
#define HDMI_FRAC_MAX_GXBB 4096
#define HDMI_FRAC_MAX_GXL 1024
+#define HDMI_FRAC_MAX_G12A 131072
static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
unsigned int m,
@@ -534,6 +610,9 @@ static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
parent_freq *= 2;
}
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ frac_max = HDMI_FRAC_MAX_G12A;
+
/* We can have a perfect match !*/
if (pll_freq / m == parent_freq &&
pll_freq % m == 0)
@@ -559,7 +638,8 @@ static bool meson_hdmi_pll_validate_params(struct meson_drm *priv,
if (frac >= HDMI_FRAC_MAX_GXBB)
return false;
} else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
/* Empiric supported min/max dividers */
if (m < 106 || m > 247)
return false;
@@ -714,6 +794,23 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
}
meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ switch (pll_base_freq) {
+ case 2970000:
+ m = 0x7b;
+ frac = vic_alternate_clock ? 0x140b4 : 0x18000;
+ break;
+ case 4320000:
+ m = vic_alternate_clock ? 0xb3 : 0xb4;
+ frac = vic_alternate_clock ? 0x1a3ee : 0;
+ break;
+ case 5940000:
+ m = 0xf7;
+ frac = vic_alternate_clock ? 0x8148 : 0x10000;
+ break;
+ }
+
+ meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
}
/* Setup vid_pll divider */
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 66d73a932d19..6faca7313339 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -73,7 +73,9 @@
/* HHI Registers */
#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */
#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
+#define HHI_VDAC_CNTL0_G12A 0x2EC /* 0xbd offset in data sheet */
#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
+#define HHI_VDAC_CNTL1_G12A 0x2F0 /* 0xbe offset in data sheet */
#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */
struct meson_cvbs_enci_mode meson_cvbs_enci_pal = {
@@ -1675,8 +1677,13 @@ void meson_venc_disable_vsync(struct meson_drm *priv)
void meson_venc_init(struct meson_drm *priv)
{
/* Disable CVBS VDAC */
- regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
- regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 8);
+ } else {
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
+ }
/* Power Down Dacs */
writel_relaxed(0xff, priv->io_base + _REG(VENC_VDAC_SETTING));
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
index d622d817b6df..2c5341c881c4 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -37,7 +37,9 @@
/* HHI VDAC Registers */
#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
+#define HHI_VDAC_CNTL0_G12A 0x2EC /* 0xbd offset in data sheet */
#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
+#define HHI_VDAC_CNTL1_G12A 0x2F0 /* 0xbe offset in data sheet */
struct meson_venc_cvbs {
struct drm_encoder encoder;
@@ -166,8 +168,13 @@ static void meson_venc_cvbs_encoder_disable(struct drm_encoder *encoder)
struct meson_drm *priv = meson_venc_cvbs->priv;
/* Disable CVBS VDAC */
- regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
- regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0);
+ } else {
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
+ }
}
static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder)
@@ -179,13 +186,17 @@ static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder)
/* VDAC0 source is not from ATV */
writel_bits_relaxed(BIT(5), 0, priv->io_base + _REG(VENC_VDAC_DACSEL0));
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
regmap_write(priv->hhi, HHI_VDAC_CNTL0, 1);
- else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0xf0001);
-
- regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0x906001);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0);
+ }
}
static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index e46e05f50bad..b59072342cae 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -25,7 +25,6 @@
#include "meson_viu.h"
#include "meson_vpp.h"
#include "meson_venc.h"
-#include "meson_canvas.h"
#include "meson_registers.h"
/**
@@ -91,8 +90,36 @@ static int eotf_bypass_coeff[EOTF_COEFF_SIZE] = {
EOTF_COEFF_RIGHTSHIFT /* right shift */
};
-void meson_viu_set_osd_matrix(struct meson_drm *priv,
- enum viu_matrix_sel_e m_select,
+static void meson_viu_set_g12a_osd1_matrix(struct meson_drm *priv,
+ int *m, bool csc_on)
+{
+ /* VPP WRAP OSD1 matrix */
+ writel(((m[0] & 0xfff) << 16) | (m[1] & 0xfff),
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_PRE_OFFSET0_1));
+ writel(m[2] & 0xfff,
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_PRE_OFFSET2));
+ writel(((m[3] & 0x1fff) << 16) | (m[4] & 0x1fff),
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF00_01));
+ writel(((m[5] & 0x1fff) << 16) | (m[6] & 0x1fff),
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF02_10));
+ writel(((m[7] & 0x1fff) << 16) | (m[8] & 0x1fff),
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF11_12));
+ writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff),
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF20_21));
+ writel((m[11] & 0x1fff) << 16,
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF22));
+
+ writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff),
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_OFFSET0_1));
+ writel(m[20] & 0xfff,
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_OFFSET2));
+
+ writel_bits_relaxed(BIT(0), csc_on ? BIT(0) : 0,
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_EN_CTRL));
+}
+
+static void meson_viu_set_osd_matrix(struct meson_drm *priv,
+ enum viu_matrix_sel_e m_select,
int *m, bool csc_on)
{
if (m_select == VIU_MATRIX_OSD) {
@@ -160,10 +187,10 @@ void meson_viu_set_osd_matrix(struct meson_drm *priv,
#define OSD_EOTF_LUT_SIZE 33
#define OSD_OETF_LUT_SIZE 41
-void meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel,
- unsigned int *r_map, unsigned int *g_map,
- unsigned int *b_map,
- bool csc_on)
+static void
+meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel,
+ unsigned int *r_map, unsigned int *g_map,
+ unsigned int *b_map, bool csc_on)
{
unsigned int addr_port;
unsigned int data_port;
@@ -337,14 +364,24 @@ void meson_viu_init(struct meson_drm *priv)
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
meson_viu_load_matrix(priv);
+ else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ meson_viu_set_g12a_osd1_matrix(priv, RGB709_to_YUV709l_coeff,
+ true);
/* Initialize OSD1 fifo control register */
reg = BIT(0) | /* Urgent DDR request priority */
- (4 << 5) | /* hold_fifo_lines */
- (3 << 10) | /* burst length 64 */
- (32 << 12) | /* fifo_depth_val: 32*8=256 */
- (2 << 22) | /* 4 words in 1 burst */
- (2 << 24);
+ (4 << 5); /* hold_fifo_lines */
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ reg |= (1 << 10) | /* burst length 32 */
+ (32 << 12) | /* fifo_depth_val: 32*8=256 */
+ (2 << 22) | /* 4 words in 1 burst */
+ (2 << 24) |
+ (1 << 31);
+ else
+ reg |= (3 << 10) | /* burst length 64 */
+ (32 << 12) | /* fifo_depth_val: 32*8=256 */
+ (2 << 22) | /* 4 words in 1 burst */
+ (2 << 24);
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT));
@@ -370,6 +407,30 @@ void meson_viu_init(struct meson_drm *priv)
writel_relaxed(0x00FF00C0,
priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE));
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ writel_relaxed(4 << 29 |
+ 1 << 27 |
+ 1 << 26 | /* blend_din0 input to blend0 */
+ 1 << 25 | /* blend1_dout to blend2 */
+ 1 << 24 | /* blend1_din3 input to blend1 */
+ 1 << 20 |
+ 0 << 16 |
+ 1,
+ priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
+ writel_relaxed(3 << 8 |
+ 1 << 20,
+ priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
+ writel_relaxed(1 << 20,
+ priv->io_base + _REG(OSD2_BLEND_SRC_CTRL));
+ writel_relaxed(0, priv->io_base + _REG(VD1_BLEND_SRC_CTRL));
+ writel_relaxed(0, priv->io_base + _REG(VD2_BLEND_SRC_CTRL));
+ writel_relaxed(0,
+ priv->io_base + _REG(VIU_OSD_BLEND_DUMMY_DATA0));
+ writel_relaxed(0,
+ priv->io_base + _REG(VIU_OSD_BLEND_DUMMY_ALPHA));
+ writel_bits_relaxed(0x3 << 2, 0x3 << 2,
+ priv->io_base + _REG(DOLBY_PATH_CTRL));
+ }
priv->viu.osd1_enabled = false;
priv->viu.osd1_commit = false;
diff --git a/drivers/gpu/drm/meson/meson_vpp.c b/drivers/gpu/drm/meson/meson_vpp.c
index f9efb431e953..8c52a3455ef4 100644
--- a/drivers/gpu/drm/meson/meson_vpp.c
+++ b/drivers/gpu/drm/meson/meson_vpp.c
@@ -112,32 +112,39 @@ void meson_vpp_init(struct meson_drm *priv)
writel_relaxed(0x20000, priv->io_base + _REG(VPP_DOLBY_CTRL));
writel_relaxed(0x1020080,
priv->io_base + _REG(VPP_DUMMY_DATA1));
- }
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ writel_relaxed(0xf, priv->io_base + _REG(DOLBY_PATH_CTRL));
/* Initialize vpu fifo control registers */
- writel_relaxed(readl_relaxed(priv->io_base + _REG(VPP_OFIFO_SIZE)) |
- 0x77f, priv->io_base + _REG(VPP_OFIFO_SIZE));
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ writel_relaxed(0xfff << 20 | 0x1000,
+ priv->io_base + _REG(VPP_OFIFO_SIZE));
+ else
+ writel_relaxed(readl_relaxed(priv->io_base + _REG(VPP_OFIFO_SIZE)) |
+ 0x77f, priv->io_base + _REG(VPP_OFIFO_SIZE));
writel_relaxed(0x08080808, priv->io_base + _REG(VPP_HOLD_LINES));
- /* Turn off preblend */
- writel_bits_relaxed(VPP_PREBLEND_ENABLE, 0,
- priv->io_base + _REG(VPP_MISC));
-
- /* Turn off POSTBLEND */
- writel_bits_relaxed(VPP_POSTBLEND_ENABLE, 0,
- priv->io_base + _REG(VPP_MISC));
-
- /* Force all planes off */
- writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_OSD2_POSTBLEND |
- VPP_VD1_POSTBLEND | VPP_VD2_POSTBLEND |
- VPP_VD1_PREBLEND | VPP_VD2_PREBLEND, 0,
- priv->io_base + _REG(VPP_MISC));
-
- /* Setup default VD settings */
- writel_relaxed(4096,
- priv->io_base + _REG(VPP_PREBLEND_VD1_H_START_END));
- writel_relaxed(4096,
- priv->io_base + _REG(VPP_BLEND_VD2_H_START_END));
+ if (!meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ /* Turn off preblend */
+ writel_bits_relaxed(VPP_PREBLEND_ENABLE, 0,
+ priv->io_base + _REG(VPP_MISC));
+
+ /* Turn off POSTBLEND */
+ writel_bits_relaxed(VPP_POSTBLEND_ENABLE, 0,
+ priv->io_base + _REG(VPP_MISC));
+
+ /* Force all planes off */
+ writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_OSD2_POSTBLEND |
+ VPP_VD1_POSTBLEND | VPP_VD2_POSTBLEND |
+ VPP_VD1_PREBLEND | VPP_VD2_PREBLEND, 0,
+ priv->io_base + _REG(VPP_MISC));
+
+ /* Setup default VD settings */
+ writel_relaxed(4096,
+ priv->io_base + _REG(VPP_PREBLEND_VD1_H_START_END));
+ writel_relaxed(4096,
+ priv->io_base + _REG(VPP_BLEND_VD2_H_START_END));
+ }
/* Disable Scalers */
writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0));
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 0aaedc554879..8c31e4422cae 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -113,7 +113,7 @@ struct mga_framebuffer {
};
struct mga_fbdev {
- struct drm_fb_helper helper;
+ struct drm_fb_helper helper; /* must be first */
struct mga_framebuffer mfb;
void *sysram;
int size;
@@ -269,7 +269,6 @@ mgag200_dumb_mmap_offset(struct drm_file *file,
struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev);
void mgag200_i2c_destroy(struct mga_i2c_chan *i2c);
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
void mgag200_ttm_placement(struct mgag200_bo *bo, int domain);
static inline int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 6893934b26c0..5b7e64cac004 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -195,8 +195,6 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
goto err_alloc_fbi;
}
- info->par = mfbdev;
-
ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
if (ret)
goto err_alloc_fbi;
@@ -209,17 +207,13 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
/* setup helper */
mfbdev->helper.fb = fb;
- strcpy(info->fix.id, "mgadrmfb");
-
info->fbops = &mgag200fb_ops;
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
info->apertures->ranges[0].size = mdev->mc.vram_size;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(info, &mfbdev->helper, sizes->fb_width,
- sizes->fb_height);
+ drm_fb_helper_fill_info(info, &mfbdev->helper, sizes);
info->screen_base = sysram;
info->screen_size = size;
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index d96a9b32455e..bd42365a8aa8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -178,7 +178,6 @@ int mgag200_mm_init(struct mga_device *mdev)
ret = ttm_bo_device_init(&mdev->ttm.bdev,
&mgag200_bo_driver,
dev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
@@ -345,13 +344,8 @@ int mgag200_bo_push_sysram(struct mgag200_bo *bo)
int mgag200_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct mga_device *mdev;
+ struct drm_file *file_priv = filp->private_data;
+ struct mga_device *mdev = file_priv->minor->dev->dev_private;
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
-
- file_priv = filp->private_data;
- mdev = file_priv->minor->dev->dev_private;
return ttm_bo_mmap(filp, vma, &mdev->ttm.bdev);
}
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 78c9e5a5e793..9f2029eca39f 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -21,6 +21,11 @@ config DRM_MSM
help
DRM/KMS driver for MSM/snapdragon.
+config DRM_MSM_GPU_STATE
+ bool
+ depends on DRM_MSM && (DEBUG_FS || DEV_COREDUMP)
+ default y
+
config DRM_MSM_REGISTER_LOGGING
bool "MSM DRM register logging"
depends on DRM_MSM
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 56a70c74af4e..7a05cbf2f820 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-ccflags-y := -Idrivers/gpu/drm/msm
-ccflags-y += -Idrivers/gpu/drm/msm/disp/dpu1
-ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
+ccflags-y := -I $(srctree)/$(src)
+ccflags-y += -I $(srctree)/$(src)/disp/dpu1
+ccflags-$(CONFIG_DRM_MSM_DSI) += -I $(srctree)/$(src)/dsi
msm-y := \
adreno/adreno_device.o \
@@ -15,7 +15,6 @@ msm-y := \
adreno/a6xx_gpu.o \
adreno/a6xx_gmu.o \
adreno/a6xx_hfi.o \
- adreno/a6xx_gpu_state.o \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
@@ -96,6 +95,8 @@ msm-y := \
msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
+msm-$(CONFIG_DRM_MSM_GPU_STATE) += adreno/a6xx_gpu_state.o
+
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index d5f5e56422f5..e5fcefa49f19 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -15,9 +15,6 @@
#include <linux/types.h>
#include <linux/cpumask.h>
#include <linux/qcom_scm.h>
-#include <linux/dma-mapping.h>
-#include <linux/of_address.h>
-#include <linux/soc/qcom/mdt_loader.h>
#include <linux/pm_opp.h>
#include <linux/nvmem-consumer.h>
#include <linux/slab.h>
@@ -30,94 +27,6 @@ static void a5xx_dump(struct msm_gpu *gpu);
#define GPU_PAS_ID 13
-static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
-{
- struct device *dev = &gpu->pdev->dev;
- const struct firmware *fw;
- struct device_node *np;
- struct resource r;
- phys_addr_t mem_phys;
- ssize_t mem_size;
- void *mem_region = NULL;
- int ret;
-
- if (!IS_ENABLED(CONFIG_ARCH_QCOM))
- return -EINVAL;
-
- np = of_get_child_by_name(dev->of_node, "zap-shader");
- if (!np)
- return -ENODEV;
-
- np = of_parse_phandle(np, "memory-region", 0);
- if (!np)
- return -EINVAL;
-
- ret = of_address_to_resource(np, 0, &r);
- if (ret)
- return ret;
-
- mem_phys = r.start;
- mem_size = resource_size(&r);
-
- /* Request the MDT file for the firmware */
- fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
- if (IS_ERR(fw)) {
- DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
- return PTR_ERR(fw);
- }
-
- /* Figure out how much memory we need */
- mem_size = qcom_mdt_get_size(fw);
- if (mem_size < 0) {
- ret = mem_size;
- goto out;
- }
-
- /* Allocate memory for the firmware image */
- mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
- if (!mem_region) {
- ret = -ENOMEM;
- goto out;
- }
-
- /*
- * Load the rest of the MDT
- *
- * Note that we could be dealing with two different paths, since
- * with upstream linux-firmware it would be in a qcom/ subdir..
- * adreno_request_fw() handles this, but qcom_mdt_load() does
- * not. But since we've already gotten thru adreno_request_fw()
- * we know which of the two cases it is:
- */
- if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) {
- ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID,
- mem_region, mem_phys, mem_size, NULL);
- } else {
- char *newname;
-
- newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
-
- ret = qcom_mdt_load(dev, fw, newname, GPU_PAS_ID,
- mem_region, mem_phys, mem_size, NULL);
- kfree(newname);
- }
- if (ret)
- goto out;
-
- /* Send the image to the secure world */
- ret = qcom_scm_pas_auth_and_reset(GPU_PAS_ID);
- if (ret)
- DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
-
-out:
- if (mem_region)
- memunmap(mem_region);
-
- release_firmware(fw);
-
- return ret;
-}
-
static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -563,8 +472,6 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
static int a5xx_zap_shader_init(struct msm_gpu *gpu)
{
static bool loaded;
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct platform_device *pdev = gpu->pdev;
int ret;
/*
@@ -574,23 +481,9 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
if (loaded)
return a5xx_zap_shader_resume(gpu);
- /* We need SCM to be able to load the firmware */
- if (!qcom_scm_is_available()) {
- DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n");
- return -EPROBE_DEFER;
- }
-
- /* Each GPU has a target specific zap shader firmware name to use */
- if (!adreno_gpu->info->zapfw) {
- DRM_DEV_ERROR(&pdev->dev,
- "Zap shader firmware file not specified for this target\n");
- return -ENODEV;
- }
-
- ret = zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw);
+ ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
loaded = !ret;
-
return ret;
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index d1662a75c7ec..38e2cfa9cec7 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -3,12 +3,31 @@
#include <linux/clk.h>
#include <linux/interconnect.h>
+#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
#include <soc/qcom/cmd-db.h>
#include "a6xx_gpu.h"
#include "a6xx_gmu.xml.h"
+static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ /* FIXME: add a banner here */
+ gmu->hung = true;
+
+ /* Turn off the hangcheck timer while we are resetting */
+ del_timer(&gpu->hangcheck_timer);
+
+ /* Queue the GPU handler because we need to treat this as a recovery */
+ queue_work(priv->wq, &gpu->recover_work);
+}
+
static irqreturn_t a6xx_gmu_irq(int irq, void *data)
{
struct a6xx_gmu *gmu = data;
@@ -20,8 +39,7 @@ static irqreturn_t a6xx_gmu_irq(int irq, void *data)
if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
- /* Temporary until we can recover safely */
- BUG();
+ a6xx_gmu_fault(gmu);
}
if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
@@ -45,8 +63,7 @@ static irqreturn_t a6xx_hfi_irq(int irq, void *data)
if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
- /* Temporary until we can recover safely */
- BUG();
+ a6xx_gmu_fault(gmu);
}
return IRQ_HANDLED;
@@ -165,10 +182,8 @@ static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
}
/* Wait for the GMU to get to its most idle state */
-int a6xx_gmu_wait_for_idle(struct a6xx_gpu *a6xx_gpu)
+int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu)
{
- struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
-
return spin_until(a6xx_gmu_check_idle_level(gmu));
}
@@ -567,7 +582,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
if (!rpmh_init) {
a6xx_gmu_rpmh_init(gmu);
rpmh_init = true;
- } else if (state != GMU_RESET) {
+ } else {
ret = a6xx_rpmh_start(gmu);
if (ret)
return ret;
@@ -633,20 +648,6 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
-static void a6xx_gmu_irq_enable(struct a6xx_gmu *gmu)
-{
- gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
- gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
-
- gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK,
- ~A6XX_GMU_IRQ_MASK);
- gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
- ~A6XX_HFI_IRQ_MASK);
-
- enable_irq(gmu->gmu_irq);
- enable_irq(gmu->hfi_irq);
-}
-
static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
{
disable_irq(gmu->gmu_irq);
@@ -656,21 +657,10 @@ static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
}
-int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu)
+static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
{
- struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
- int ret;
u32 val;
- /* Flush all the queues */
- a6xx_hfi_stop(gmu);
-
- /* Stop the interrupts */
- a6xx_gmu_irq_disable(gmu);
-
- /* Force off SPTP in case the GMU is managing it */
- a6xx_sptprac_disable(gmu);
-
/* Make sure there are no outstanding RPMh votes */
gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
(val & 1), 100, 10000);
@@ -680,37 +670,22 @@ int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu)
(val & 1), 100, 10000);
gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
(val & 1), 100, 1000);
+}
- /* Force off the GX GSDC */
- regulator_force_disable(gmu->gx);
-
- /* Disable the resources */
- clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
- pm_runtime_put_sync(gmu->dev);
-
- /* Re-enable the resources */
- pm_runtime_get_sync(gmu->dev);
-
- /* Use a known rate to bring up the GMU */
- clk_set_rate(gmu->core_clk, 200000000);
- ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
- if (ret)
- goto out;
-
- a6xx_gmu_irq_enable(gmu);
-
- ret = a6xx_gmu_fw_start(gmu, GMU_RESET);
- if (!ret)
- ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT);
+/* Force the GMU off in case it isn't responsive */
+static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
+{
+ /* Flush all the queues */
+ a6xx_hfi_stop(gmu);
- /* Set the GPU back to the highest power frequency */
- __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+ /* Stop the interrupts */
+ a6xx_gmu_irq_disable(gmu);
-out:
- if (ret)
- a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+ /* Force off SPTP in case the GMU is managing it */
+ a6xx_sptprac_disable(gmu);
- return ret;
+ /* Make sure there are no outstanding RPMh votes */
+ a6xx_gmu_rpmh_off(gmu);
}
int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
@@ -723,19 +698,26 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
if (WARN(!gmu->mmio, "The GMU is not set up yet\n"))
return 0;
+ gmu->hung = false;
+
/* Turn on the resources */
pm_runtime_get_sync(gmu->dev);
/* Use a known rate to bring up the GMU */
clk_set_rate(gmu->core_clk, 200000000);
ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
- if (ret)
- goto out;
+ if (ret) {
+ pm_runtime_put(gmu->dev);
+ return ret;
+ }
/* Set the bus quota to a reasonable value for boot */
icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072));
- a6xx_gmu_irq_enable(gmu);
+ /* Enable the GMU interrupt */
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK);
+ enable_irq(gmu->gmu_irq);
/* Check to see if we are doing a cold or warm boot */
status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
@@ -746,14 +728,35 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
goto out;
ret = a6xx_hfi_start(gmu, status);
+ if (ret)
+ goto out;
+
+ /*
+ * Turn on the GMU firmware fault interrupt after we know the boot
+ * sequence is successful
+ */
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
+ enable_irq(gmu->hfi_irq);
/* Set the GPU to the highest power frequency */
__a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+ /*
+ * "enable" the GX power domain which won't actually do anything but it
+ * will make sure that the refcounting is correct in case we need to
+ * bring down the GX after a GMU failure
+ */
+ if (!IS_ERR_OR_NULL(gmu->gxpd))
+ pm_runtime_get(gmu->gxpd);
+
out:
- /* Make sure to turn off the boot OOB request on error */
- if (ret)
- a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+ /* On failure, shut down the GMU to leave it in a good state */
+ if (ret) {
+ disable_irq(gmu->gmu_irq);
+ a6xx_rpmh_stop(gmu);
+ pm_runtime_put(gmu->dev);
+ }
return ret;
}
@@ -773,11 +776,12 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
return true;
}
-int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
+/* Gracefully try to shut down the GMU and by extension the GPU */
+static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
- struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
u32 val;
/*
@@ -787,10 +791,19 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
if (val != 0xf) {
- int ret = a6xx_gmu_wait_for_idle(a6xx_gpu);
+ int ret = a6xx_gmu_wait_for_idle(gmu);
- /* Temporary until we can recover safely */
- BUG_ON(ret);
+ /* If the GMU isn't responding assume it is hung */
+ if (ret) {
+ a6xx_gmu_force_off(gmu);
+ return;
+ }
+
+ /* Clear the VBIF pipe before shutting down */
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
+ spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf)
+ == 0xf);
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
/* tell the GMU we want to slumber */
a6xx_gmu_notify_slumber(gmu);
@@ -822,10 +835,37 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
/* Tell RPMh to power off the GPU */
a6xx_rpmh_stop(gmu);
+}
+
+
+int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ struct msm_gpu *gpu = &a6xx_gpu->base.base;
+
+ if (!pm_runtime_active(gmu->dev))
+ return 0;
+
+ /*
+ * Force the GMU off if we detected a hang, otherwise try to shut it
+ * down gracefully
+ */
+ if (gmu->hung)
+ a6xx_gmu_force_off(gmu);
+ else
+ a6xx_gmu_shutdown(gmu);
/* Remove the bus vote */
icc_set_bw(gpu->icc_path, 0, 0);
+ /*
+ * Make sure the GX domain is off before turning off the GMU (CX)
+ * domain. Usually the GMU does this but only if the shutdown sequence
+ * was successful
+ */
+ if (!IS_ERR_OR_NULL(gmu->gxpd))
+ pm_runtime_put_sync(gmu->gxpd);
+
clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
pm_runtime_put_sync(gmu->dev);
@@ -948,25 +988,20 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
}
/* Return the 'arc-level' for the given frequency */
-static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
+static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
+ unsigned long freq)
{
struct dev_pm_opp *opp;
- struct device_node *np;
- u32 val = 0;
+ unsigned int val;
if (!freq)
return 0;
- opp = dev_pm_opp_find_freq_exact(dev, freq, true);
+ opp = dev_pm_opp_find_freq_exact(dev, freq, true);
if (IS_ERR(opp))
return 0;
- np = dev_pm_opp_get_of_node(opp);
-
- if (np) {
- of_property_read_u32(np, "opp-level", &val);
- of_node_put(np);
- }
+ val = dev_pm_opp_get_level(opp);
dev_pm_opp_put(opp);
@@ -1002,7 +1037,7 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
/* Construct a vote for each frequency */
for (i = 0; i < freqs_count; i++) {
u8 pindex = 0, sindex = 0;
- u32 level = a6xx_gmu_get_arc_level(dev, freqs[i]);
+ unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]);
/* Get the primary index that matches the arc level */
for (j = 0; j < pri_count; j++) {
@@ -1195,9 +1230,15 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
if (IS_ERR_OR_NULL(gmu->mmio))
return;
- pm_runtime_disable(gmu->dev);
a6xx_gmu_stop(a6xx_gpu);
+ pm_runtime_disable(gmu->dev);
+
+ if (!IS_ERR_OR_NULL(gmu->gxpd)) {
+ pm_runtime_disable(gmu->gxpd);
+ dev_pm_domain_detach(gmu->gxpd, false);
+ }
+
a6xx_gmu_irq_disable(gmu);
a6xx_gmu_memory_free(gmu, gmu->hfi);
@@ -1223,7 +1264,6 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
pm_runtime_enable(gmu->dev);
- gmu->gx = devm_regulator_get(gmu->dev, "vdd");
/* Get the list of clocks */
ret = a6xx_gmu_clocks_probe(gmu);
@@ -1257,6 +1297,12 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
goto err;
+ /*
+ * Get a link to the GX power domain to reset the GPU in case of GMU
+ * crash
+ */
+ gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
+
/* Get the power levels for the GMU and GPU */
a6xx_gmu_pwrlevels_probe(gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index c721d9165d8e..bedd8e6a63aa 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -27,9 +27,6 @@ struct a6xx_gmu_bo {
/* the GMU is coming up for the first time or back from a power collapse */
#define GMU_COLD_BOOT 1
-/* The GMU is being soft reset after a fault */
-#define GMU_RESET 2
-
/*
* These define the level of control that the GMU has - the higher the number
* the more things that the GMU hardware controls on its own.
@@ -52,11 +49,11 @@ struct a6xx_gmu {
int hfi_irq;
int gmu_irq;
- struct regulator *gx;
-
struct iommu_domain *domain;
u64 uncached_iova_base;
+ struct device *gxpd;
+
int idle_level;
struct a6xx_gmu_bo *hfi;
@@ -78,7 +75,7 @@ struct a6xx_gmu {
struct a6xx_hfi_queue queues[2];
- struct tasklet_struct hfi_tasklet;
+ bool hung;
};
static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index fefe773c989e..e74dce474250 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -10,6 +10,8 @@
#include <linux/devfreq.h>
+#define GPU_PAS_ID 13
+
static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -343,6 +345,20 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
return 0;
}
+static int a6xx_zap_shader_init(struct msm_gpu *gpu)
+{
+ static bool loaded;
+ int ret;
+
+ if (loaded)
+ return 0;
+
+ ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
+
+ loaded = !ret;
+ return ret;
+}
+
#define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
@@ -491,7 +507,28 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
if (ret)
goto out;
- gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+ /*
+ * Try to load a zap shader into the secure world. If successful
+ * we can use the CP to switch out of secure mode. If not then we
+ * have no resource but to try to switch ourselves out manually. If we
+ * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
+ * be blocked and a permissions violation will soon follow.
+ */
+ ret = a6xx_zap_shader_init(gpu);
+ if (!ret) {
+ OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
+ OUT_RING(gpu->rb[0], 0x00000000);
+
+ a6xx_flush(gpu, gpu->rb[0]);
+ if (!a6xx_idle(gpu, gpu->rb[0]))
+ return -EINVAL;
+ } else {
+ /* Print a warning so if we die, we know why */
+ dev_warn_once(gpu->dev->dev,
+ "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+ ret = 0;
+ }
out:
/*
@@ -678,13 +715,15 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
int ret;
- ret = a6xx_gmu_resume(a6xx_gpu);
-
gpu->needs_hw_init = true;
+ ret = a6xx_gmu_resume(a6xx_gpu);
+ if (ret)
+ return ret;
+
msm_gpu_resume_devfreq(gpu);
- return ret;
+ return 0;
}
static int a6xx_pm_suspend(struct msm_gpu *gpu)
@@ -694,18 +733,6 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
devfreq_suspend_device(gpu->devfreq.devfreq);
- /*
- * Make sure the GMU is idle before continuing (because some transitions
- * may use VBIF
- */
- a6xx_gmu_wait_for_idle(a6xx_gpu);
-
- /* Clear the VBIF pipe before shutting down */
- /* FIXME: This accesses the GPU - do we need to make sure it is on? */
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
- spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf) == 0xf);
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
-
return a6xx_gmu_stop(a6xx_gpu);
}
@@ -781,14 +808,16 @@ static const struct adreno_gpu_funcs funcs = {
.active_ring = a6xx_active_ring,
.irq = a6xx_irq,
.destroy = a6xx_destroy,
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
.show = a6xx_show,
#endif
.gpu_busy = a6xx_gpu_busy,
.gpu_get_freq = a6xx_gmu_get_freq,
.gpu_set_freq = a6xx_gmu_set_freq,
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
+#endif
},
.get_timestamp = a6xx_get_timestamp,
};
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 528a4cfe07cd..b46279eb18c5 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -46,9 +46,8 @@ struct a6xx_gpu {
int a6xx_gmu_resume(struct a6xx_gpu *gpu);
int a6xx_gmu_stop(struct a6xx_gpu *gpu);
-int a6xx_gmu_wait_for_idle(struct a6xx_gpu *gpu);
+int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu);
-int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu);
bool a6xx_gmu_isidle(struct a6xx_gmu *gmu);
int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 714ed6505e47..b907245d3d96 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -155,6 +155,7 @@ static const struct adreno_info gpulist[] = {
.gmem = SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
+ .zapfw = "a630_zap.mdt",
},
};
@@ -229,6 +230,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
+ pm_runtime_put_sync(&pdev->dev);
DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret);
return NULL;
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 27898475cdf4..6f7f4114afcf 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -19,13 +19,148 @@
#include <linux/ascii85.h>
#include <linux/interconnect.h>
+#include <linux/qcom_scm.h>
#include <linux/kernel.h>
+#include <linux/of_address.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
+#include <linux/soc/qcom/mdt_loader.h>
#include "adreno_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
+static bool zap_available = true;
+
+static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
+ u32 pasid)
+{
+ struct device *dev = &gpu->pdev->dev;
+ const struct firmware *fw;
+ struct device_node *np, *mem_np;
+ struct resource r;
+ phys_addr_t mem_phys;
+ ssize_t mem_size;
+ void *mem_region = NULL;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_ARCH_QCOM)) {
+ zap_available = false;
+ return -EINVAL;
+ }
+
+ np = of_get_child_by_name(dev->of_node, "zap-shader");
+ if (!np) {
+ zap_available = false;
+ return -ENODEV;
+ }
+
+ mem_np = of_parse_phandle(np, "memory-region", 0);
+ of_node_put(np);
+ if (!mem_np) {
+ zap_available = false;
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(mem_np, 0, &r);
+ of_node_put(mem_np);
+ if (ret)
+ return ret;
+
+ mem_phys = r.start;
+ mem_size = resource_size(&r);
+
+ /* Request the MDT file for the firmware */
+ fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
+ if (IS_ERR(fw)) {
+ DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
+ return PTR_ERR(fw);
+ }
+
+ /* Figure out how much memory we need */
+ mem_size = qcom_mdt_get_size(fw);
+ if (mem_size < 0) {
+ ret = mem_size;
+ goto out;
+ }
+
+ /* Allocate memory for the firmware image */
+ mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
+ if (!mem_region) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Load the rest of the MDT
+ *
+ * Note that we could be dealing with two different paths, since
+ * with upstream linux-firmware it would be in a qcom/ subdir..
+ * adreno_request_fw() handles this, but qcom_mdt_load() does
+ * not. But since we've already gotten through adreno_request_fw()
+ * we know which of the two cases it is:
+ */
+ if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) {
+ ret = qcom_mdt_load(dev, fw, fwname, pasid,
+ mem_region, mem_phys, mem_size, NULL);
+ } else {
+ char *newname;
+
+ newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
+
+ ret = qcom_mdt_load(dev, fw, newname, pasid,
+ mem_region, mem_phys, mem_size, NULL);
+ kfree(newname);
+ }
+ if (ret)
+ goto out;
+
+ /* Send the image to the secure world */
+ ret = qcom_scm_pas_auth_and_reset(pasid);
+
+ /*
+ * If the scm call returns -EOPNOTSUPP we assume that this target
+ * doesn't need/support the zap shader so quietly fail
+ */
+ if (ret == -EOPNOTSUPP)
+ zap_available = false;
+ else if (ret)
+ DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
+
+out:
+ if (mem_region)
+ memunmap(mem_region);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct platform_device *pdev = gpu->pdev;
+
+ /* Short cut if we determine the zap shader isn't available/needed */
+ if (!zap_available)
+ return -ENODEV;
+
+ /* We need SCM to be able to load the firmware */
+ if (!qcom_scm_is_available()) {
+ DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n");
+ return -EPROBE_DEFER;
+ }
+
+ /* Each GPU has a target specific zap shader firmware name to use */
+ if (!adreno_gpu->info->zapfw) {
+ zap_available = false;
+ DRM_DEV_ERROR(&pdev->dev,
+ "Zap shader firmware file not specified for this target\n");
+ return -ENODEV;
+ }
+
+ return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -63,6 +198,12 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
case MSM_PARAM_NR_RINGS:
*value = gpu->nr_rings;
return 0;
+ case MSM_PARAM_PP_PGTABLE:
+ *value = 0;
+ return 0;
+ case MSM_PARAM_FAULTS:
+ *value = gpu->global_faults;
+ return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 5db459bc28a7..0925606ec9b5 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -252,6 +252,12 @@ void adreno_gpu_state_destroy(struct msm_gpu_state *state);
int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
int adreno_gpu_state_put(struct msm_gpu_state *state);
+/*
+ * For a5xx and a6xx targets load the zap shader that is used to pull the GPU
+ * out of secure mode
+ */
+int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid);
+
/* ringbuffer helpers (the parts that are adreno specific) */
static inline void
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index b776fca571f3..dfdfa766da8f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -46,6 +46,9 @@
#define LEFT_MIXER 0
#define RIGHT_MIXER 1
+/* timeout in ms waiting for frame done */
+#define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60
+
static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv = crtc->dev->dev_private;
@@ -425,65 +428,6 @@ void dpu_crtc_complete_commit(struct drm_crtc *crtc,
trace_dpu_crtc_complete_commit(DRMID(crtc));
}
-static void _dpu_crtc_setup_mixer_for_encoder(
- struct drm_crtc *crtc,
- struct drm_encoder *enc)
-{
- struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
- struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
- struct dpu_rm *rm = &dpu_kms->rm;
- struct dpu_crtc_mixer *mixer;
- struct dpu_hw_ctl *last_valid_ctl = NULL;
- int i;
- struct dpu_rm_hw_iter lm_iter, ctl_iter;
-
- dpu_rm_init_hw_iter(&lm_iter, enc->base.id, DPU_HW_BLK_LM);
- dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL);
-
- /* Set up all the mixers and ctls reserved by this encoder */
- for (i = cstate->num_mixers; i < ARRAY_SIZE(cstate->mixers); i++) {
- mixer = &cstate->mixers[i];
-
- if (!dpu_rm_get_hw(rm, &lm_iter))
- break;
- mixer->hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
-
- /* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
- if (!dpu_rm_get_hw(rm, &ctl_iter)) {
- DPU_DEBUG("no ctl assigned to lm %d, using previous\n",
- mixer->hw_lm->idx - LM_0);
- mixer->lm_ctl = last_valid_ctl;
- } else {
- mixer->lm_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
- last_valid_ctl = mixer->lm_ctl;
- }
-
- /* Shouldn't happen, mixers are always >= ctls */
- if (!mixer->lm_ctl) {
- DPU_ERROR("no valid ctls found for lm %d\n",
- mixer->hw_lm->idx - LM_0);
- return;
- }
-
- cstate->num_mixers++;
- DPU_DEBUG("setup mixer %d: lm %d\n",
- i, mixer->hw_lm->idx - LM_0);
- DPU_DEBUG("setup mixer %d: ctl %d\n",
- i, mixer->lm_ctl->idx - CTL_0);
- }
-}
-
-static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
-{
- struct drm_encoder *enc;
-
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
- /* Check for mixers on all encoders attached to this crtc */
- drm_for_each_encoder_mask(enc, crtc->dev, crtc->state->encoder_mask)
- _dpu_crtc_setup_mixer_for_encoder(crtc, enc);
-}
-
static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -533,10 +477,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
dev = crtc->dev;
smmu_state = &dpu_crtc->smmu_state;
- if (!cstate->num_mixers) {
- _dpu_crtc_setup_mixers(crtc);
- _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
- }
+ _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
if (dpu_crtc->event) {
WARN_ON(dpu_crtc->event);
@@ -683,7 +624,7 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
DPU_ATRACE_BEGIN("frame done completion wait");
ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
- msecs_to_jiffies(DPU_FRAME_DONE_TIMEOUT));
+ msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
if (!ret) {
DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
rc = -ETIMEDOUT;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 5aa3307f3f0c..82bf16d61a45 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -69,6 +69,9 @@
#define MAX_VDISPLAY_SPLIT 1080
+/* timeout in frames waiting for frame done */
+#define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
+
/**
* enum dpu_enc_rc_events - events for resource control state machine
* @DPU_ENC_RC_EVENT_KICKOFF:
@@ -158,7 +161,7 @@ enum dpu_enc_rc_states {
* Bit0 = phys_encs[0] etc.
* @crtc_frame_event_cb: callback handler for frame event
* @crtc_frame_event_cb_data: callback handler private data
- * @frame_done_timeout: frame done timeout in Hz
+ * @frame_done_timeout_ms: frame done timeout in ms
* @frame_done_timer: watchdog timer for frame done event
* @vsync_event_timer: vsync timer
* @disp_info: local copy of msm_display_info struct
@@ -196,7 +199,7 @@ struct dpu_encoder_virt {
void (*crtc_frame_event_cb)(void *, u32 event);
void *crtc_frame_event_cb_data;
- atomic_t frame_done_timeout;
+ atomic_t frame_done_timeout_ms;
struct timer_list frame_done_timer;
struct timer_list vsync_event_timer;
@@ -520,8 +523,8 @@ static void _dpu_encoder_adjust_mode(struct drm_connector *connector,
list_for_each_entry(cur_mode, &connector->modes, head) {
if (cur_mode->vdisplay == adj_mode->vdisplay &&
- cur_mode->hdisplay == adj_mode->hdisplay &&
- cur_mode->vrefresh == adj_mode->vrefresh) {
+ cur_mode->hdisplay == adj_mode->hdisplay &&
+ drm_mode_vrefresh(cur_mode) == drm_mode_vrefresh(adj_mode)) {
adj_mode->private = cur_mode->private;
adj_mode->private_flags |= cur_mode->private_flags;
}
@@ -959,10 +962,14 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
struct dpu_kms *dpu_kms;
struct list_head *connector_list;
struct drm_connector *conn = NULL, *conn_iter;
- struct dpu_rm_hw_iter pp_iter, ctl_iter;
+ struct drm_crtc *drm_crtc;
+ struct dpu_crtc_state *cstate;
+ struct dpu_rm_hw_iter hw_iter;
struct msm_display_topology topology;
struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL };
- int i = 0, ret;
+ struct dpu_hw_mixer *hw_lm[MAX_CHANNELS_PER_ENC] = { NULL };
+ int num_lm = 0, num_ctl = 0;
+ int i, j, ret;
if (!drm_enc) {
DPU_ERROR("invalid encoder\n");
@@ -990,10 +997,14 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
return;
}
+ drm_for_each_crtc(drm_crtc, drm_enc->dev)
+ if (drm_crtc->state->encoder_mask & drm_encoder_mask(drm_enc))
+ break;
+
topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
/* Reserve dynamic resources now. Indicating non-AtomicTest phase */
- ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_enc->crtc->state,
+ ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_crtc->state,
topology, false);
if (ret) {
DPU_ERROR_ENC(dpu_enc,
@@ -1001,21 +1012,41 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
return;
}
- dpu_rm_init_hw_iter(&pp_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
+ dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
dpu_enc->hw_pp[i] = NULL;
- if (!dpu_rm_get_hw(&dpu_kms->rm, &pp_iter))
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
+ break;
+ dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) hw_iter.hw;
+ }
+
+ dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_CTL);
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
break;
- dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) pp_iter.hw;
+ hw_ctl[i] = (struct dpu_hw_ctl *)hw_iter.hw;
+ num_ctl++;
}
- dpu_rm_init_hw_iter(&ctl_iter, drm_enc->base.id, DPU_HW_BLK_CTL);
+ dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_LM);
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
- if (!dpu_rm_get_hw(&dpu_kms->rm, &ctl_iter))
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
break;
- hw_ctl[i] = (struct dpu_hw_ctl *)ctl_iter.hw;
+ hw_lm[i] = (struct dpu_hw_mixer *)hw_iter.hw;
+ num_lm++;
}
+ cstate = to_dpu_crtc_state(drm_crtc->state);
+
+ for (i = 0; i < num_lm; i++) {
+ int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
+
+ cstate->mixers[i].hw_lm = hw_lm[i];
+ cstate->mixers[i].lm_ctl = hw_ctl[ctl_idx];
+ }
+
+ cstate->num_mixers = num_lm;
+
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
@@ -1023,18 +1054,38 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
if (!dpu_enc->hw_pp[i]) {
DPU_ERROR_ENC(dpu_enc, "no pp block assigned"
"at idx: %d\n", i);
- return;
+ goto error;
}
if (!hw_ctl[i]) {
DPU_ERROR_ENC(dpu_enc, "no ctl block assigned"
"at idx: %d\n", i);
- return;
+ goto error;
}
phys->hw_pp = dpu_enc->hw_pp[i];
phys->hw_ctl = hw_ctl[i];
+ dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id,
+ DPU_HW_BLK_INTF);
+ for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) {
+ struct dpu_hw_intf *hw_intf;
+
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
+ break;
+
+ hw_intf = (struct dpu_hw_intf *)hw_iter.hw;
+ if (hw_intf->idx == phys->intf_idx)
+ phys->hw_intf = hw_intf;
+ }
+
+ if (!phys->hw_intf) {
+ DPU_ERROR_ENC(dpu_enc,
+ "no intf block assigned at idx: %d\n",
+ i);
+ goto error;
+ }
+
phys->connector = conn->state->connector;
if (phys->ops.mode_set)
phys->ops.mode_set(phys, mode, adj_mode);
@@ -1042,6 +1093,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
}
dpu_enc->mode_set_complete = true;
+
+error:
+ dpu_rm_release(&dpu_kms->rm, drm_enc);
}
static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
@@ -1182,7 +1236,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
}
/* after phys waits for frame-done, should be no more frames pending */
- if (atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
+ if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
del_timer_sync(&dpu_enc->frame_done_timer);
}
@@ -1339,7 +1393,7 @@ static void dpu_encoder_frame_done_callback(
}
if (!dpu_enc->frame_busy_mask[0]) {
- atomic_set(&dpu_enc->frame_done_timeout, 0);
+ atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
del_timer(&dpu_enc->frame_done_timer);
dpu_encoder_resource_control(drm_enc,
@@ -1547,8 +1601,14 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc,
if (!ctl)
continue;
- if (phys->split_role != ENC_ROLE_SLAVE)
+ /*
+ * This is cleared in frame_done worker, which isn't invoked
+ * for async commits. So don't set this for async, since it'll
+ * roll over to the next commit.
+ */
+ if (!async && phys->split_role != ENC_ROLE_SLAVE)
set_bit(i, dpu_enc->frame_busy_mask);
+
if (!phys->ops.needs_single_flush ||
!phys->ops.needs_single_flush(phys))
_dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0,
@@ -1800,11 +1860,20 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc, bool async)
trace_dpu_enc_kickoff(DRMID(drm_enc));
- atomic_set(&dpu_enc->frame_done_timeout,
- DPU_FRAME_DONE_TIMEOUT * 1000 /
- drm_enc->crtc->state->adjusted_mode.vrefresh);
- mod_timer(&dpu_enc->frame_done_timer, jiffies +
- ((atomic_read(&dpu_enc->frame_done_timeout) * HZ) / 1000));
+ /*
+ * Asynchronous frames don't handle FRAME_DONE events. As such, they
+ * shouldn't enable the frame_done watchdog since it will always time
+ * out.
+ */
+ if (!async) {
+ unsigned long timeout_ms;
+ timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
+ drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
+
+ atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
+ mod_timer(&dpu_enc->frame_done_timer,
+ jiffies + msecs_to_jiffies(timeout_ms));
+ }
/* All phys encs are ready to go, trigger the kickoff */
_dpu_encoder_kickoff_phys(dpu_enc, async);
@@ -2124,7 +2193,7 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t)
DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
return;
- } else if (!atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
+ } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
return;
}
@@ -2170,7 +2239,7 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
spin_lock_init(&dpu_enc->enc_spinlock);
- atomic_set(&dpu_enc->frame_done_timeout, 0);
+ atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
timer_setup(&dpu_enc->frame_done_timer,
dpu_encoder_frame_done_timeout, 0);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index db94f3d3bea3..97fb868a4ef6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -200,6 +200,7 @@ struct dpu_encoder_irq {
* @hw_mdptop: Hardware interface to the top registers
* @hw_ctl: Hardware interface to the ctl registers
* @hw_pp: Hardware interface to the ping pong registers
+ * @hw_intf: Hardware interface to the intf registers
* @dpu_kms: Pointer to the dpu_kms top level
* @cached_mode: DRM mode cached at mode_set time, acted on in enable
* @enabled: Whether the encoder has enabled and running a mode
@@ -228,6 +229,7 @@ struct dpu_encoder_phys {
struct dpu_hw_mdp *hw_mdptop;
struct dpu_hw_ctl *hw_ctl;
struct dpu_hw_pingpong *hw_pp;
+ struct dpu_hw_intf *hw_intf;
struct dpu_kms *dpu_kms;
struct drm_display_mode cached_mode;
enum dpu_enc_split_role split_role;
@@ -251,19 +253,6 @@ static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
}
/**
- * struct dpu_encoder_phys_vid - sub-class of dpu_encoder_phys to handle video
- * mode specific operations
- * @base: Baseclass physical encoder structure
- * @hw_intf: Hardware interface to the intf registers
- * @timing_params: Current timing parameter
- */
-struct dpu_encoder_phys_vid {
- struct dpu_encoder_phys base;
- struct dpu_hw_intf *hw_intf;
- struct intf_timing_params timing_params;
-};
-
-/**
* struct dpu_encoder_phys_cmd - sub-class of dpu_encoder_phys to handle command
* mode specific operations
* @base: Baseclass physical encoder structure
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index a399e1edd313..973737fb5c9f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -404,7 +404,8 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
return;
}
- tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh);
+ tc_cfg.vsync_count = vsync_hz /
+ (mode->vtotal * drm_mode_vrefresh(mode));
/* enable external TE after kickoff to avoid premature autorefresh */
tc_cfg.hw_vsync_mode = 0;
@@ -424,7 +425,7 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
DPU_DEBUG_CMDENC(cmd_enc,
"tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
- mode->vtotal, mode->vrefresh);
+ mode->vtotal, drm_mode_vrefresh(mode));
DPU_DEBUG_CMDENC(cmd_enc,
"tc %d enable %u start_pos %u rd_ptr_irq %u\n",
phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 3c4eb470a82c..1b7a335a6140 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -18,14 +18,14 @@
#include "dpu_trace.h"
#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
- (e) && (e)->base.parent ? \
- (e)->base.parent->base.id : -1, \
+ (e) && (e)->parent ? \
+ (e)->parent->base.id : -1, \
(e) && (e)->hw_intf ? \
(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
#define DPU_ERROR_VIDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
- (e) && (e)->base.parent ? \
- (e)->base.parent->base.id : -1, \
+ (e) && (e)->parent ? \
+ (e)->parent->base.id : -1, \
(e) && (e)->hw_intf ? \
(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
@@ -44,7 +44,7 @@ static bool dpu_encoder_phys_vid_is_master(
}
static void drm_mode_to_intf_timing_params(
- const struct dpu_encoder_phys_vid *vid_enc,
+ const struct dpu_encoder_phys *phys_enc,
const struct drm_display_mode *mode,
struct intf_timing_params *timing)
{
@@ -92,7 +92,7 @@ static void drm_mode_to_intf_timing_params(
timing->hsync_skew = mode->hskew;
/* DSI controller cannot handle active-low sync signals. */
- if (vid_enc->hw_intf->cap->type == INTF_DSI) {
+ if (phys_enc->hw_intf->cap->type == INTF_DSI) {
timing->hsync_polarity = 0;
timing->vsync_polarity = 0;
}
@@ -143,11 +143,11 @@ static u32 get_vertical_total(const struct intf_timing_params *timing)
* lines based on the chip worst case latencies.
*/
static u32 programmable_fetch_get_num_lines(
- struct dpu_encoder_phys_vid *vid_enc,
+ struct dpu_encoder_phys *phys_enc,
const struct intf_timing_params *timing)
{
u32 worst_case_needed_lines =
- vid_enc->hw_intf->cap->prog_fetch_lines_worst_case;
+ phys_enc->hw_intf->cap->prog_fetch_lines_worst_case;
u32 start_of_frame_lines =
timing->v_back_porch + timing->vsync_pulse_width;
u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
@@ -155,26 +155,26 @@ static u32 programmable_fetch_get_num_lines(
/* Fetch must be outside active lines, otherwise undefined. */
if (start_of_frame_lines >= worst_case_needed_lines) {
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"prog fetch is not needed, large vbp+vsw\n");
actual_vfp_lines = 0;
} else if (timing->v_front_porch < needed_vfp_lines) {
/* Warn fetch needed, but not enough porch in panel config */
pr_warn_once
("low vbp+vfp may lead to perf issues in some cases\n");
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"less vfp than fetch req, using entire vfp\n");
actual_vfp_lines = timing->v_front_porch;
} else {
- DPU_DEBUG_VIDENC(vid_enc, "room in vfp for needed prefetch\n");
+ DPU_DEBUG_VIDENC(phys_enc, "room in vfp for needed prefetch\n");
actual_vfp_lines = needed_vfp_lines;
}
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
timing->v_front_porch, timing->v_back_porch,
timing->vsync_pulse_width);
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
@@ -194,8 +194,6 @@ static u32 programmable_fetch_get_num_lines(
static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
const struct intf_timing_params *timing)
{
- struct dpu_encoder_phys_vid *vid_enc =
- to_dpu_encoder_phys_vid(phys_enc);
struct intf_prog_fetch f = { 0 };
u32 vfp_fetch_lines = 0;
u32 horiz_total = 0;
@@ -203,10 +201,10 @@ static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
u32 vfp_fetch_start_vsync_counter = 0;
unsigned long lock_flags;
- if (WARN_ON_ONCE(!vid_enc->hw_intf->ops.setup_prg_fetch))
+ if (WARN_ON_ONCE(!phys_enc->hw_intf->ops.setup_prg_fetch))
return;
- vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
+ vfp_fetch_lines = programmable_fetch_get_num_lines(phys_enc, timing);
if (vfp_fetch_lines) {
vert_total = get_vertical_total(timing);
horiz_total = get_horizontal_total(timing);
@@ -216,12 +214,12 @@ static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
f.fetch_start = vfp_fetch_start_vsync_counter;
}
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
vfp_fetch_lines, vfp_fetch_start_vsync_counter);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
- vid_enc->hw_intf->ops.setup_prg_fetch(vid_enc->hw_intf, &f);
+ phys_enc->hw_intf->ops.setup_prg_fetch(phys_enc->hw_intf, &f);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
}
@@ -231,7 +229,7 @@ static bool dpu_encoder_phys_vid_mode_fixup(
struct drm_display_mode *adj_mode)
{
if (phys_enc)
- DPU_DEBUG_VIDENC(to_dpu_encoder_phys_vid(phys_enc), "\n");
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
/*
* Modifying mode has consequences when the mode comes back to us
@@ -242,7 +240,6 @@ static bool dpu_encoder_phys_vid_mode_fixup(
static void dpu_encoder_phys_vid_setup_timing_engine(
struct dpu_encoder_phys *phys_enc)
{
- struct dpu_encoder_phys_vid *vid_enc;
struct drm_display_mode mode;
struct intf_timing_params timing_params = { 0 };
const struct dpu_format *fmt = NULL;
@@ -256,13 +253,12 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
}
mode = phys_enc->cached_mode;
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- if (!vid_enc->hw_intf->ops.setup_timing_gen) {
+ if (!phys_enc->hw_intf->ops.setup_timing_gen) {
DPU_ERROR("timing engine setup is not supported\n");
return;
}
- DPU_DEBUG_VIDENC(vid_enc, "enabling mode:\n");
+ DPU_DEBUG_VIDENC(phys_enc, "enabling mode:\n");
drm_mode_debug_printmodeline(&mode);
if (phys_enc->split_role != ENC_ROLE_SOLO) {
@@ -271,32 +267,30 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
mode.hsync_start >>= 1;
mode.hsync_end >>= 1;
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"split_role %d, halve horizontal %d %d %d %d\n",
phys_enc->split_role,
mode.hdisplay, mode.htotal,
mode.hsync_start, mode.hsync_end);
}
- drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
+ drm_mode_to_intf_timing_params(phys_enc, &mode, &timing_params);
fmt = dpu_get_dpu_format(fmt_fourcc);
- DPU_DEBUG_VIDENC(vid_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
+ DPU_DEBUG_VIDENC(phys_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
- intf_cfg.intf = vid_enc->hw_intf->idx;
+ intf_cfg.intf = phys_enc->hw_intf->idx;
intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
- vid_enc->hw_intf->ops.setup_timing_gen(vid_enc->hw_intf,
+ phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf,
&timing_params, fmt);
phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
programmable_fetch_config(phys_enc, &timing_params);
-
- vid_enc->timing_params = timing_params;
}
static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
@@ -353,22 +347,10 @@ static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
phys_enc);
}
-static bool _dpu_encoder_phys_is_dual_ctl(struct dpu_encoder_phys *phys_enc)
-{
- struct dpu_crtc_state *dpu_cstate;
-
- if (!phys_enc)
- return false;
-
- dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
-
- return dpu_cstate->num_ctls > 1;
-}
-
static bool dpu_encoder_phys_vid_needs_single_flush(
struct dpu_encoder_phys *phys_enc)
{
- return (phys_enc && _dpu_encoder_phys_is_dual_ctl(phys_enc));
+ return phys_enc->split_role != ENC_ROLE_SOLO;
}
static void _dpu_encoder_phys_vid_setup_irq_hw_idx(
@@ -396,19 +378,15 @@ static void dpu_encoder_phys_vid_mode_set(
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
- struct dpu_encoder_phys_vid *vid_enc;
-
if (!phys_enc || !phys_enc->dpu_kms) {
DPU_ERROR("invalid encoder/kms\n");
return;
}
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
-
if (adj_mode) {
phys_enc->cached_mode = *adj_mode;
drm_mode_debug_printmodeline(adj_mode);
- DPU_DEBUG_VIDENC(vid_enc, "caching mode:\n");
+ DPU_DEBUG_VIDENC(phys_enc, "caching mode:\n");
}
_dpu_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
@@ -419,7 +397,6 @@ static int dpu_encoder_phys_vid_control_vblank_irq(
bool enable)
{
int ret = 0;
- struct dpu_encoder_phys_vid *vid_enc;
int refcount;
if (!phys_enc) {
@@ -428,7 +405,6 @@ static int dpu_encoder_phys_vid_control_vblank_irq(
}
refcount = atomic_read(&phys_enc->vblank_refcount);
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
/* Slave encoders don't report vblank */
if (!dpu_encoder_phys_vid_is_master(phys_enc))
@@ -453,7 +429,7 @@ end:
if (ret) {
DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n",
DRMID(phys_enc->parent),
- vid_enc->hw_intf->idx - INTF_0, ret, enable,
+ phys_enc->hw_intf->idx - INTF_0, ret, enable,
refcount);
}
return ret;
@@ -461,43 +437,17 @@ end:
static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
{
- struct msm_drm_private *priv;
- struct dpu_encoder_phys_vid *vid_enc;
- struct dpu_rm_hw_iter iter;
struct dpu_hw_ctl *ctl;
u32 flush_mask = 0;
- if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
- !phys_enc->parent->dev->dev_private) {
- DPU_ERROR("invalid encoder/device\n");
- return;
- }
- priv = phys_enc->parent->dev->dev_private;
-
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
ctl = phys_enc->hw_ctl;
- dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_INTF);
- while (dpu_rm_get_hw(&phys_enc->dpu_kms->rm, &iter)) {
- struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf *)iter.hw;
-
- if (hw_intf->idx == phys_enc->intf_idx) {
- vid_enc->hw_intf = hw_intf;
- break;
- }
- }
-
- if (!vid_enc->hw_intf) {
- DPU_ERROR("hw_intf not assigned\n");
- return;
- }
-
- DPU_DEBUG_VIDENC(vid_enc, "\n");
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
- if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+ if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
return;
- dpu_encoder_helper_split_config(phys_enc, vid_enc->hw_intf->idx);
+ dpu_encoder_helper_split_config(phys_enc, phys_enc->hw_intf->idx);
dpu_encoder_phys_vid_setup_timing_engine(phys_enc);
@@ -510,12 +460,13 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
!dpu_encoder_phys_vid_is_master(phys_enc))
goto skip_flush;
- ctl->ops.get_bitmask_intf(ctl, &flush_mask, vid_enc->hw_intf->idx);
+ ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->hw_intf->idx);
ctl->ops.update_pending_flush(ctl, flush_mask);
skip_flush:
- DPU_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n",
- ctl->idx - CTL_0, flush_mask);
+ DPU_DEBUG_VIDENC(phys_enc,
+ "update pending flush ctl %d flush_mask %x\n",
+ ctl->idx - CTL_0, flush_mask);
/* ctl_flush & timing engine enable will be triggered by framework */
if (phys_enc->enable_state == DPU_ENC_DISABLED)
@@ -524,16 +475,13 @@ skip_flush:
static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
{
- struct dpu_encoder_phys_vid *vid_enc;
-
if (!phys_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- DPU_DEBUG_VIDENC(vid_enc, "\n");
- kfree(vid_enc);
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
+ kfree(phys_enc);
}
static void dpu_encoder_phys_vid_get_hw_resources(
@@ -589,7 +537,6 @@ static int dpu_encoder_phys_vid_wait_for_vblank(
static void dpu_encoder_phys_vid_prepare_for_kickoff(
struct dpu_encoder_phys *phys_enc)
{
- struct dpu_encoder_phys_vid *vid_enc;
struct dpu_hw_ctl *ctl;
int rc;
@@ -597,7 +544,6 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
DPU_ERROR("invalid encoder/parameters\n");
return;
}
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
ctl = phys_enc->hw_ctl;
if (!ctl || !ctl->ops.wait_reset_status)
@@ -609,7 +555,7 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
*/
rc = ctl->ops.wait_reset_status(ctl);
if (rc) {
- DPU_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
+ DPU_ERROR_VIDENC(phys_enc, "ctl %d reset failure: %d\n",
ctl->idx, rc);
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
}
@@ -618,7 +564,6 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
{
struct msm_drm_private *priv;
- struct dpu_encoder_phys_vid *vid_enc;
unsigned long lock_flags;
int ret;
@@ -629,16 +574,13 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
}
priv = phys_enc->parent->dev->dev_private;
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+ if (!phys_enc->hw_intf || !phys_enc->hw_ctl) {
DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
- vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+ phys_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
return;
}
- DPU_DEBUG_VIDENC(vid_enc, "\n");
-
- if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+ if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
return;
if (phys_enc->enable_state == DPU_ENC_DISABLED) {
@@ -647,7 +589,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
}
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
- vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 0);
+ phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 0);
if (dpu_encoder_phys_vid_is_master(phys_enc))
dpu_encoder_phys_inc_pending(phys_enc);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
@@ -666,7 +608,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
DRMID(phys_enc->parent),
- vid_enc->hw_intf->idx - INTF_0, ret);
+ phys_enc->hw_intf->idx - INTF_0, ret);
}
}
@@ -677,25 +619,21 @@ static void dpu_encoder_phys_vid_handle_post_kickoff(
struct dpu_encoder_phys *phys_enc)
{
unsigned long lock_flags;
- struct dpu_encoder_phys_vid *vid_enc;
if (!phys_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- DPU_DEBUG_VIDENC(vid_enc, "enable_state %d\n", phys_enc->enable_state);
-
/*
* Video mode must flush CTL before enabling timing engine
* Video encoders need to turn on their interfaces now
*/
if (phys_enc->enable_state == DPU_ENC_ENABLING) {
trace_dpu_enc_phys_vid_post_kickoff(DRMID(phys_enc->parent),
- vid_enc->hw_intf->idx - INTF_0);
+ phys_enc->hw_intf->idx - INTF_0);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
- vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 1);
+ phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 1);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
phys_enc->enable_state = DPU_ENC_ENABLED;
}
@@ -704,16 +642,13 @@ static void dpu_encoder_phys_vid_handle_post_kickoff(
static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
bool enable)
{
- struct dpu_encoder_phys_vid *vid_enc;
int ret;
if (!phys_enc)
return;
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
-
trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
- vid_enc->hw_intf->idx - INTF_0,
+ phys_enc->hw_intf->idx - INTF_0,
enable,
atomic_read(&phys_enc->vblank_refcount));
@@ -732,19 +667,16 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
static int dpu_encoder_phys_vid_get_line_count(
struct dpu_encoder_phys *phys_enc)
{
- struct dpu_encoder_phys_vid *vid_enc;
-
if (!phys_enc)
return -EINVAL;
if (!dpu_encoder_phys_vid_is_master(phys_enc))
return -EINVAL;
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- if (!vid_enc->hw_intf || !vid_enc->hw_intf->ops.get_line_count)
+ if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_line_count)
return -EINVAL;
- return vid_enc->hw_intf->ops.get_line_count(vid_enc->hw_intf);
+ return phys_enc->hw_intf->ops.get_line_count(phys_enc->hw_intf);
}
static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
@@ -771,7 +703,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
struct dpu_enc_phys_init_params *p)
{
struct dpu_encoder_phys *phys_enc = NULL;
- struct dpu_encoder_phys_vid *vid_enc = NULL;
struct dpu_encoder_irq *irq;
int i, ret = 0;
@@ -780,18 +711,16 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
goto fail;
}
- vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL);
- if (!vid_enc) {
+ phys_enc = kzalloc(sizeof(*phys_enc), GFP_KERNEL);
+ if (!phys_enc) {
ret = -ENOMEM;
goto fail;
}
- phys_enc = &vid_enc->base;
-
phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
phys_enc->intf_idx = p->intf_idx;
- DPU_DEBUG_VIDENC(vid_enc, "\n");
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
phys_enc->parent = p->parent;
@@ -825,13 +754,13 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
init_waitqueue_head(&phys_enc->pending_kickoff_wq);
phys_enc->enable_state = DPU_ENC_DISABLED;
- DPU_DEBUG_VIDENC(vid_enc, "created intf idx:%d\n", p->intf_idx);
+ DPU_DEBUG_VIDENC(phys_enc, "created intf idx:%d\n", p->intf_idx);
return phys_enc;
fail:
DPU_ERROR("failed to create encoder\n");
- if (vid_enc)
+ if (phys_enc)
dpu_encoder_phys_vid_destroy(phys_enc);
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index 018df2c3b7ed..45a5bc6ede5d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -15,7 +15,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_mdss.h"
-#include "dpu_kms.h"
#define LM_OP_MODE 0x00
#define LM_OUT_SIZE 0x04
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index ac75cfc267f4..31e9ef96ca5d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -73,9 +73,6 @@
#define DPU_NAME_SIZE 12
-/* timeout in frames waiting for frame done */
-#define DPU_FRAME_DONE_TIMEOUT 60
-
/*
* struct dpu_irq_callback - IRQ callback handlers
* @list: list to callback
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index b01183b309b9..ce1a555e1f31 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -387,7 +387,7 @@ static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
ot_params.width = drm_rect_width(&pdpu->pipe_cfg.src_rect);
ot_params.height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
ot_params.is_wfd = !pdpu->is_rt_pipe;
- ot_params.frame_rate = crtc->mode.vrefresh;
+ ot_params.frame_rate = drm_mode_vrefresh(&crtc->mode);
ot_params.vbif_idx = VBIF_RT;
ot_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
ot_params.rd = true;
@@ -780,7 +780,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
struct dpu_hw_fmt_layout layout;
struct drm_gem_object *obj;
- struct msm_gem_object *msm_obj;
struct dma_fence *fence;
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
int ret;
@@ -799,8 +798,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
* implicit fence and fb prepare by hand here.
*/
obj = msm_framebuffer_bo(new_state->fb, 0);
- msm_obj = to_msm_bo(obj);
- fence = reservation_object_get_excl_rcu(msm_obj->resv);
+ fence = reservation_object_get_excl_rcu(obj->resv);
if (fence)
drm_atomic_set_fence_for_plane(new_state, fence);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
index 9bf9d6065c55..7b9edc21bc2c 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
@@ -59,10 +59,10 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
return -EINVAL;
}
- total_lines_x100 = mode->vtotal * mode->vrefresh;
+ total_lines_x100 = mode->vtotal * drm_mode_vrefresh(mode);
if (!total_lines_x100) {
DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
- __func__, mode->vtotal, mode->vrefresh);
+ __func__, mode->vtotal, drm_mode_vrefresh(mode));
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index f5b1256e32b6..131c23a267ee 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -49,15 +49,13 @@ int msm_atomic_prepare_fb(struct drm_plane *plane,
struct msm_drm_private *priv = plane->dev->dev_private;
struct msm_kms *kms = priv->kms;
struct drm_gem_object *obj;
- struct msm_gem_object *msm_obj;
struct dma_fence *fence;
if (!new_state->fb)
return 0;
obj = msm_framebuffer_bo(new_state->fb, 0);
- msm_obj = to_msm_bo(obj);
- fence = reservation_object_get_excl_rcu(msm_obj->resv);
+ fence = reservation_object_get_excl_rcu(obj->resv);
drm_atomic_set_fence_for_plane(new_state, fence);
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index fb423d309e91..67ef300559cf 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -75,7 +75,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
struct msm_gpu_show_priv *show_priv;
int ret;
- if (!gpu)
+ if (!gpu || !gpu->funcs->gpu_state_get)
return -ENODEV;
show_priv = kmalloc(sizeof(*show_priv), GFP_KERNEL);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 0bdd93648761..31deb87abfc6 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -39,9 +39,10 @@
* MSM_GEM_INFO ioctl.
* - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
* GEM object's debug name
+ * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 4
+#define MSM_VERSION_MINOR 5
#define MSM_VERSION_PATCHLEVEL 0
static const struct drm_mode_config_funcs mode_config_funcs = {
@@ -457,6 +458,9 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
priv->wq = alloc_ordered_workqueue("msm", 0);
+ INIT_WORK(&priv->free_work, msm_gem_free_work);
+ init_llist_head(&priv->free_list);
+
INIT_LIST_HEAD(&priv->inactive_list);
drm_mode_config_init(ddev);
@@ -964,6 +968,11 @@ static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
args->flags, &args->id);
}
+static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ return msm_submitqueue_query(dev, file->driver_priv, data);
+}
static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -984,6 +993,7 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
@@ -1019,7 +1029,7 @@ static struct drm_driver msm_driver = {
.irq_uninstall = msm_irq_uninstall,
.enable_vblank = msm_enable_vblank,
.disable_vblank = msm_disable_vblank,
- .gem_free_object = msm_gem_free_object,
+ .gem_free_object_unlocked = msm_gem_free_object,
.gem_vm_ops = &vm_ops,
.dumb_create = msm_gem_dumb_create,
.dumb_map_offset = msm_gem_dumb_map_offset,
@@ -1027,7 +1037,6 @@ static struct drm_driver msm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
- .gem_prime_res_obj = msm_gem_prime_res_obj,
.gem_prime_pin = msm_gem_prime_pin,
.gem_prime_unpin = msm_gem_prime_unpin,
.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index c56dade2c1dc..e20e6b429804 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -33,7 +33,7 @@
#include <linux/types.h>
#include <linux/of_graph.h>
#include <linux/of_device.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#include <linux/kthread.h>
#include <drm/drmP.h>
@@ -185,6 +185,10 @@ struct msm_drm_private {
/* list of GEM objects: */
struct list_head inactive_list;
+ /* worker for delayed free of objects: */
+ struct work_struct free_work;
+ struct llist_head free_list;
+
struct workqueue_struct *wq;
unsigned int num_planes;
@@ -292,7 +296,6 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *msm_gem_prime_vmap(struct drm_gem_object *obj);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj);
@@ -325,6 +328,7 @@ void msm_gem_kernel_put(struct drm_gem_object *bo,
struct msm_gem_address_space *aspace, bool locked);
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt);
+void msm_gem_free_work(struct work_struct *work);
__printf(2, 3)
void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
@@ -420,6 +424,8 @@ struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
u32 id);
int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
u32 prio, u32 flags, u32 *id);
+int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
+ struct drm_msm_submitqueue_query *args);
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
void msm_submitqueue_close(struct msm_file_private *ctx);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index c03e860ba737..d088299babf3 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -122,13 +122,9 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
fbdev->fb = fb;
helper->fb = fb;
- fbi->par = helper;
fbi->fbops = &msm_fb_ops;
- strcpy(fbi->fix.id, "msm");
-
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(fbi, helper, sizes);
dev->mode_config.fb_base = paddr;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 18ca651ab942..35f55dd25994 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -672,14 +672,13 @@ void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct reservation_object_list *fobj;
struct dma_fence *fence;
int i, ret;
- fobj = reservation_object_get_list(msm_obj->resv);
+ fobj = reservation_object_get_list(obj->resv);
if (!fobj || (fobj->shared_count == 0)) {
- fence = reservation_object_get_excl(msm_obj->resv);
+ fence = reservation_object_get_excl(obj->resv);
/* don't need to wait on our own fences, since ring is fifo */
if (fence && (fence->context != fctx->context)) {
ret = dma_fence_wait(fence, true);
@@ -693,7 +692,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
for (i = 0; i < fobj->shared_count; i++) {
fence = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(msm_obj->resv));
+ reservation_object_held(obj->resv));
if (fence->context != fctx->context) {
ret = dma_fence_wait(fence, true);
if (ret)
@@ -711,9 +710,9 @@ void msm_gem_move_to_active(struct drm_gem_object *obj,
WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
msm_obj->gpu = gpu;
if (exclusive)
- reservation_object_add_excl_fence(msm_obj->resv, fence);
+ reservation_object_add_excl_fence(obj->resv, fence);
else
- reservation_object_add_shared_fence(msm_obj->resv, fence);
+ reservation_object_add_shared_fence(obj->resv, fence);
list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &gpu->active_list);
}
@@ -733,13 +732,12 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj)
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
bool write = !!(op & MSM_PREP_WRITE);
unsigned long remain =
op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
long ret;
- ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
+ ret = reservation_object_wait_timeout_rcu(obj->resv, write,
true, remain);
if (ret == 0)
return remain == 0 ? -EBUSY : -ETIMEDOUT;
@@ -771,7 +769,7 @@ static void describe_fence(struct dma_fence *fence, const char *type,
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct reservation_object *robj = msm_obj->resv;
+ struct reservation_object *robj = obj->resv;
struct reservation_object_list *fobj;
struct dma_fence *fence;
struct msm_gem_vma *vma;
@@ -805,7 +803,8 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
seq_puts(m, " vmas:");
list_for_each_entry(vma, &msm_obj->vmas, list)
- seq_printf(m, " [%s: %08llx,%s,inuse=%d]", vma->aspace->name,
+ seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
+ vma->aspace != NULL ? vma->aspace->name : NULL,
vma->iova, vma->mapped ? "mapped" : "unmapped",
vma->inuse);
@@ -853,8 +852,18 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
/* don't call directly! Use drm_gem_object_put() and friends */
void msm_gem_free_object(struct drm_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (llist_add(&msm_obj->freed, &priv->free_list))
+ queue_work(priv->wq, &priv->free_work);
+}
+
+static void free_object(struct msm_gem_object *msm_obj)
+{
+ struct drm_gem_object *obj = &msm_obj->base;
+ struct drm_device *dev = obj->dev;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -883,15 +892,35 @@ void msm_gem_free_object(struct drm_gem_object *obj)
put_pages(obj);
}
- if (msm_obj->resv == &msm_obj->_resv)
- reservation_object_fini(msm_obj->resv);
-
drm_gem_object_release(obj);
mutex_unlock(&msm_obj->lock);
kfree(msm_obj);
}
+void msm_gem_free_work(struct work_struct *work)
+{
+ struct msm_drm_private *priv =
+ container_of(work, struct msm_drm_private, free_work);
+ struct drm_device *dev = priv->dev;
+ struct llist_node *freed;
+ struct msm_gem_object *msm_obj, *next;
+
+ while ((freed = llist_del_all(&priv->free_list))) {
+
+ mutex_lock(&dev->struct_mutex);
+
+ llist_for_each_entry_safe(msm_obj, next,
+ freed, freed)
+ free_object(msm_obj);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ if (need_resched())
+ break;
+ }
+}
+
/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
uint32_t size, uint32_t flags, uint32_t *handle,
@@ -945,12 +974,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
- if (resv) {
- msm_obj->resv = resv;
- } else {
- msm_obj->resv = &msm_obj->_resv;
- reservation_object_init(msm_obj->resv);
- }
+ if (resv)
+ msm_obj->base.resv = resv;
INIT_LIST_HEAD(&msm_obj->submit_entry);
INIT_LIST_HEAD(&msm_obj->vmas);
@@ -1026,6 +1051,13 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
ret = drm_gem_object_init(dev, obj, size);
if (ret)
goto fail;
+ /*
+ * Our buffers are kept pinned, so allocating them from the
+ * MOVABLE zone is a really bad idea, and conflicts with CMA.
+ * See comments above new_inode() why this is required _and_
+ * expected if you're going to pin these pages.
+ */
+ mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
}
return obj;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 2064fac871b8..812d1b1369a5 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -84,9 +84,7 @@ struct msm_gem_object {
struct list_head vmas; /* list of msm_gem_vma */
- /* normally (resv == &_resv) except for imported bo's */
- struct reservation_object *resv;
- struct reservation_object _resv;
+ struct llist_node freed;
/* For physically contiguous buffers. Used when we don't have
* an IOMMU. Also used for stolen/splashscreen buffer.
@@ -133,6 +131,7 @@ enum msm_gem_lock {
void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass);
void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass);
+void msm_gem_free_work(struct work_struct *work);
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
* associated with the cmdstream submission for synchronization (and
@@ -163,7 +162,10 @@ struct msm_gem_submit {
} *cmd; /* array of size nr_cmds */
struct {
uint32_t flags;
- struct msm_gem_object *obj;
+ union {
+ struct msm_gem_object *obj;
+ uint32_t handle;
+ };
uint64_t iova;
} bos[0];
};
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 13403c6da6c7..60bb290700ce 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -70,10 +70,3 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
if (!obj->import_attach)
msm_gem_put_pages(obj);
}
-
-struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
-
- return msm_obj->resv;
-}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 12b983fc0b56..1b681306aca3 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -74,27 +74,14 @@ void msm_gem_submit_free(struct msm_gem_submit *submit)
kfree(submit);
}
-static inline unsigned long __must_check
-copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
-{
- if (access_ok(from, n))
- return __copy_from_user_inatomic(to, from, n);
- return -EFAULT;
-}
-
static int submit_lookup_objects(struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file)
{
unsigned i;
int ret = 0;
- spin_lock(&file->table_lock);
- pagefault_disable();
-
for (i = 0; i < args->nr_bos; i++) {
struct drm_msm_gem_submit_bo submit_bo;
- struct drm_gem_object *obj;
- struct msm_gem_object *msm_obj;
void __user *userptr =
u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
@@ -103,15 +90,10 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
*/
submit->bos[i].flags = 0;
- if (copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo))) {
- pagefault_enable();
- spin_unlock(&file->table_lock);
- if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
- ret = -EFAULT;
- goto out;
- }
- spin_lock(&file->table_lock);
- pagefault_disable();
+ if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
+ ret = -EFAULT;
+ i = 0;
+ goto out;
}
/* at least one of READ and/or WRITE flags should be set: */
@@ -121,19 +103,28 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
!(submit_bo.flags & MANDATORY_FLAGS)) {
DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
ret = -EINVAL;
- goto out_unlock;
+ i = 0;
+ goto out;
}
+ submit->bos[i].handle = submit_bo.handle;
submit->bos[i].flags = submit_bo.flags;
/* in validate_objects() we figure out if this is true: */
submit->bos[i].iova = submit_bo.presumed;
+ }
+
+ spin_lock(&file->table_lock);
+
+ for (i = 0; i < args->nr_bos; i++) {
+ struct drm_gem_object *obj;
+ struct msm_gem_object *msm_obj;
/* normally use drm_gem_object_lookup(), but for bulk lookup
* all under single table_lock just hit object_idr directly:
*/
- obj = idr_find(&file->object_idr, submit_bo.handle);
+ obj = idr_find(&file->object_idr, submit->bos[i].handle);
if (!obj) {
- DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
+ DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i);
ret = -EINVAL;
goto out_unlock;
}
@@ -142,7 +133,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
if (!list_empty(&msm_obj->submit_entry)) {
DRM_ERROR("handle %u at index %u already on submit list\n",
- submit_bo.handle, i);
+ submit->bos[i].handle, i);
ret = -EINVAL;
goto out_unlock;
}
@@ -155,7 +146,6 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
}
out_unlock:
- pagefault_enable();
spin_unlock(&file->table_lock);
out:
@@ -173,7 +163,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
msm_gem_unpin_iova(&msm_obj->base, submit->gpu->aspace);
if (submit->bos[i].flags & BO_LOCKED)
- ww_mutex_unlock(&msm_obj->resv->lock);
+ ww_mutex_unlock(&msm_obj->base.resv->lock);
if (backoff && !(submit->bos[i].flags & BO_VALID))
submit->bos[i].iova = 0;
@@ -196,7 +186,7 @@ retry:
contended = i;
if (!(submit->bos[i].flags & BO_LOCKED)) {
- ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
+ ret = ww_mutex_lock_interruptible(&msm_obj->base.resv->lock,
&submit->ticket);
if (ret)
goto fail;
@@ -218,7 +208,7 @@ fail:
if (ret == -EDEADLK) {
struct msm_gem_object *msm_obj = submit->bos[contended].obj;
/* we lost out in a seqno race, lock and retry.. */
- ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
+ ret = ww_mutex_lock_slow_interruptible(&msm_obj->base.resv->lock,
&submit->ticket);
if (!ret) {
submit->bos[contended].flags |= BO_LOCKED;
@@ -244,7 +234,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
* strange place to call it. OTOH this is a
* convenient can-fail point to hook it in.
*/
- ret = reservation_object_reserve_shared(msm_obj->resv,
+ ret = reservation_object_reserve_shared(msm_obj->base.resv,
1);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 49c04829cf34..fcf7a83f0e6f 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -85,7 +85,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
vma->mapped = true;
- if (aspace->mmu)
+ if (aspace && aspace->mmu)
ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
size, prot);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 10babd18e286..bf4ee2766431 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -443,24 +443,15 @@ static void recover_worker(struct work_struct *work)
if (submit) {
struct task_struct *task;
+ /* Increment the fault counts */
+ gpu->global_faults++;
+ submit->queue->faults++;
+
task = get_pid_task(submit->pid, PIDTYPE_PID);
if (task) {
comm = kstrdup(task->comm, GFP_KERNEL);
-
- /*
- * So slightly annoying, in other paths like
- * mmap'ing gem buffers, mmap_sem is acquired
- * before struct_mutex, which means we can't
- * hold struct_mutex across the call to
- * get_cmdline(). But submits are retired
- * from the same in-order workqueue, so we can
- * safely drop the lock here without worrying
- * about the submit going away.
- */
- mutex_unlock(&dev->struct_mutex);
cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
put_task_struct(task);
- mutex_lock(&dev->struct_mutex);
}
if (comm && cmd) {
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 6241986bab51..f2739cd97cea 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -104,6 +104,9 @@ struct msm_gpu {
/* does gpu need hw_init? */
bool needs_hw_init;
+ /* number of GPU hangs (for all contexts) */
+ int global_faults;
+
/* worker for handling active-list retiring: */
struct work_struct retire_work;
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 4d62790cd425..12bb54cefd46 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -38,13 +38,8 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names,
int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- int ret;
- pm_runtime_get_sync(mmu->dev);
- ret = iommu_attach_device(iommu->domain, mmu->dev);
- pm_runtime_put_sync(mmu->dev);
-
- return ret;
+ return iommu_attach_device(iommu->domain, mmu->dev);
}
static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
@@ -52,9 +47,7 @@ static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- pm_runtime_get_sync(mmu->dev);
iommu_detach_device(iommu->domain, mmu->dev);
- pm_runtime_put_sync(mmu->dev);
}
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
@@ -63,9 +56,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
struct msm_iommu *iommu = to_msm_iommu(mmu);
size_t ret;
-// pm_runtime_get_sync(mmu->dev);
ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
-// pm_runtime_put_sync(mmu->dev);
WARN_ON(!ret);
return (ret == len) ? 0 : -EINVAL;
@@ -75,9 +66,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- pm_runtime_get_sync(mmu->dev);
iommu_unmap(iommu->domain, iova, len);
- pm_runtime_put_sync(mmu->dev);
return 0;
}
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index 5115f75b5b7f..f160ec40a39b 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -120,6 +120,47 @@ int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
}
+static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
+ struct drm_msm_submitqueue_query *args)
+{
+ size_t size = min_t(size_t, args->len, sizeof(queue->faults));
+ int ret;
+
+ /* If a zero length was passed in, return the data size we expect */
+ if (!args->len) {
+ args->len = sizeof(queue->faults);
+ return 0;
+ }
+
+ /* Set the length to the actual size of the data */
+ args->len = size;
+
+ ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
+
+ return ret ? -EFAULT : 0;
+}
+
+int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
+ struct drm_msm_submitqueue_query *args)
+{
+ struct msm_gpu_submitqueue *queue;
+ int ret = -EINVAL;
+
+ if (args->pad)
+ return -EINVAL;
+
+ queue = msm_submitqueue_get(ctx, args->id);
+ if (!queue)
+ return -ENOENT;
+
+ if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS)
+ ret = msm_submitqueue_query_faults(queue, args);
+
+ msm_submitqueue_put(queue);
+
+ return ret;
+}
+
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
{
struct msm_gpu_submitqueue *entry;
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
index 0ee1ca8a316a..98e9bda91e80 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
@@ -253,12 +253,12 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
if (!(bus_flags & DRM_BUS_FLAG_DE_LOW))
vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
/*
- * DRM_BUS_FLAG_PIXDATA_ defines are controller centric,
+ * DRM_BUS_FLAG_PIXDATA_DRIVE_ defines are controller centric,
* controllers VDCTRL0_DOTCLK is display centric.
* Drive on positive edge -> display samples on falling edge
- * DRM_BUS_FLAG_PIXDATA_POSEDGE -> VDCTRL0_DOTCLK_ACT_FALLING
+ * DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE -> VDCTRL0_DOTCLK_ACT_FALLING
*/
- if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
+ if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING;
writel(vdctrl0, mxsfb->base + LCDC_VDCTRL0);
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 581404e6544d..378c5dd692b0 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -1,7 +1,7 @@
-ccflags-y += -I$(src)/include
-ccflags-y += -I$(src)/include/nvkm
-ccflags-y += -I$(src)/nvkm
-ccflags-y += -I$(src)
+ccflags-y += -I $(srctree)/$(src)/include
+ccflags-y += -I $(srctree)/$(src)/include/nvkm
+ccflags-y += -I $(srctree)/$(src)/nvkm
+ccflags-y += -I $(srctree)/$(src)
# NVKM - HW resource manager
#- code also used by various userspace tools/tests
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 00cd9ab8948d..1f1395148ff0 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -5,22 +5,31 @@ config DRM_NOUVEAU
select DRM_KMS_HELPER
select DRM_TTM
select BACKLIGHT_CLASS_DEVICE if DRM_NOUVEAU_BACKLIGHT
- select BACKLIGHT_LCD_SUPPORT if DRM_NOUVEAU_BACKLIGHT
select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT
select X86_PLATFORM_DEVICES if ACPI && X86
select ACPI_WMI if ACPI && X86
select MXM_WMI if ACPI && X86
select POWER_SUPPLY
# Similar to i915, we need to select ACPI_VIDEO and it's dependencies
- select BACKLIGHT_LCD_SUPPORT if ACPI && X86
select BACKLIGHT_CLASS_DEVICE if ACPI && X86
select INPUT if ACPI && X86
select THERMAL if ACPI && X86
select ACPI_VIDEO if ACPI && X86
- select DRM_VM
help
Choose this option for open-source NVIDIA support.
+config NOUVEAU_LEGACY_CTX_SUPPORT
+ bool "Nouveau legacy context support"
+ depends on DRM_NOUVEAU
+ select DRM_LEGACY
+ default y
+ help
+ There was a version of the nouveau DDX that relied on legacy
+ ctx ioctls not erroring out. But that was back in time a long
+ ways, so offer a way to disable it now. For uapi compat with
+ old nouveau ddx this should be on by default, but modern distros
+ should consider turning it off.
+
config NOUVEAU_PLATFORM_DRIVER
bool "Nouveau (NVIDIA) SoC GPUs"
depends on DRM_NOUVEAU && ARCH_TEGRA
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 2216c58620c2..7c41b0599d1a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -41,6 +41,7 @@ struct nv50_disp_interlock {
NV50_DISP_INTERLOCK__SIZE
} type;
u32 data;
+ u32 wimm;
};
void corec37d_ntfy_init(struct nouveau_bo *, u32);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 2e7a0c347ddb..06ee23823a68 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -306,7 +306,7 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
asyh->set.or = head->func->or != NULL;
}
- if (asyh->state.mode_changed)
+ if (asyh->state.mode_changed || asyh->state.connectors_changed)
nv50_head_atomic_check_mode(head, asyh);
if (asyh->state.color_mgmt_changed ||
@@ -413,6 +413,7 @@ nv50_head_atomic_duplicate_state(struct drm_crtc *crtc)
asyh->ovly = armh->ovly;
asyh->dither = armh->dither;
asyh->procamp = armh->procamp;
+ asyh->or = armh->or;
asyh->dp = armh->dp;
asyh->clr.mask = 0;
asyh->set.mask = 0;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
index 9103b8494279..f7dbd965e4e7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
@@ -75,6 +75,7 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
return ret;
}
+ wndw->interlock.wimm = wndw->interlock.data;
wndw->immd = func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index b95181027b31..283ff690350e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -127,7 +127,7 @@ void
nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
struct nv50_wndw_atom *asyw)
{
- if (interlock) {
+ if (interlock[NV50_DISP_INTERLOCK_CORE]) {
asyw->image.mode = 0;
asyw->image.interval = 1;
}
@@ -149,7 +149,7 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
if (asyw->set.point) {
if (asyw->set.point = false, asyw->set.mask)
interlock[wndw->interlock.type] |= wndw->interlock.data;
- interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.data;
+ interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.wimm;
wndw->immd->point(wndw, asyw);
wndw->immd->update(wndw, interlock);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
index eef54e9b5d77..7957eafa5f0e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -38,6 +38,7 @@ struct nvkm_i2c_bus {
struct mutex mutex;
struct list_head head;
struct i2c_adapter i2c;
+ u8 enabled;
};
int nvkm_i2c_bus_acquire(struct nvkm_i2c_bus *);
@@ -57,6 +58,7 @@ struct nvkm_i2c_aux {
struct mutex mutex;
struct list_head head;
struct i2c_adapter i2c;
+ u8 enabled;
u32 intr;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 55c0fa451163..832da8e0020d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -358,15 +358,6 @@ nouveau_display_hpd_work(struct work_struct *work)
#ifdef CONFIG_ACPI
-/*
- * Hans de Goede: This define belongs in acpi/video.h, I've submitted a patch
- * to the acpi subsys to move it there from drivers/acpi/acpi_video.c .
- * This should be dropped once that is merged.
- */
-#ifndef ACPI_VIDEO_NOTIFY_PROBE
-#define ACPI_VIDEO_NOTIFY_PROBE 0x81
-#endif
-
static int
nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
void *data)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 5020265bfbd9..7c2fcaba42d6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -631,7 +631,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
/* We need to check that the chipset is supported before booting
* fbdev off the hardware, as there's no way to put it back.
*/
- ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device);
+ ret = nvkm_device_pci_new(pdev, nouveau_config, "error",
+ true, false, 0, &device);
if (ret)
return ret;
@@ -802,10 +803,15 @@ fail_display:
static int
nouveau_do_resume(struct drm_device *dev, bool runtime)
{
+ int ret = 0;
struct nouveau_drm *drm = nouveau_drm(dev);
NV_DEBUG(drm, "resuming object tree...\n");
- nvif_client_resume(&drm->master.base);
+ ret = nvif_client_resume(&drm->master.base);
+ if (ret) {
+ NV_ERROR(drm, "Client resume failed with error: %d\n", ret);
+ return ret;
+ }
NV_DEBUG(drm, "resuming fence...\n");
if (drm->fence && nouveau_fence(drm)->resume)
@@ -925,6 +931,7 @@ nouveau_pmops_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
int ret;
@@ -941,6 +948,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
pci_set_master(pdev);
ret = nouveau_do_resume(drm_dev, true);
+ if (ret) {
+ NV_ERROR(drm, "resume failed with: %d\n", ret);
+ return ret;
+ }
/* do magic */
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
@@ -1094,8 +1105,11 @@ nouveau_driver_fops = {
static struct drm_driver
driver_stub = {
.driver_features =
- DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
- DRIVER_KMS_LEGACY_CONTEXT,
+ DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER
+#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT)
+ | DRIVER_KMS_LEGACY_CONTEXT
+#endif
+ ,
.open = nouveau_drm_open,
.postclose = nouveau_drm_postclose,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index da847244479d..35ff0ca01a3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -60,8 +60,6 @@
struct nouveau_channel;
struct platform_device;
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
#include "nouveau_fence.h"
#include "nouveau_bios.h"
#include "nouveau_vmm.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 0d3cd4e05728..73cc3217068a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -365,14 +365,10 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
ret = PTR_ERR(info);
goto out_unlock;
}
- info->skip_vt_switch = 1;
-
- info->par = fbcon;
/* setup helper */
fbcon->helper.fb = &fb->base;
- strcpy(info->fix.id, "nouveaufb");
if (!chan)
info->flags = FBINFO_HWACCEL_DISABLED;
else
@@ -387,9 +383,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
info->screen_base = nvbo_kmap_obj_iovirtual(fb->nvbo);
info->screen_size = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
- drm_fb_helper_fill_fix(info, fb->base.pitches[0],
- fb->base.format->depth);
- drm_fb_helper_fill_var(info, &fbcon->helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, &fbcon->helper, sizes);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index db9d52047ef8..73a7eeba3973 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -32,7 +32,7 @@
#include "nouveau_display.h"
struct nouveau_fbdev {
- struct drm_fb_helper helper;
+ struct drm_fb_helper helper; /* must be first */
unsigned int saved_flags;
struct nvif_object surf2d;
struct nvif_object clip;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 1543c2f8d3d3..f0daf958e03a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -168,9 +168,6 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
struct drm_file *file_priv = filp->private_data;
struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return drm_legacy_mmap(filp, vma);
-
return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
}
@@ -239,7 +236,6 @@ nouveau_ttm_init(struct nouveau_drm *drm)
ret = ttm_bo_device_init(&drm->ttm.bdev,
&nouveau_bo_driver,
dev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
drm->client.mmu.dmabits <= 32 ? true : false);
if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 7971096b6767..10d91e8bbb94 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2540,6 +2540,41 @@ nv166_chipset = {
.sec2 = tu102_sec2_new,
};
+static const struct nvkm_device_chip
+nv167_chipset = {
+ .name = "TU117",
+ .bar = tu102_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = tu102_devinit_new,
+ .fault = tu102_fault_new,
+ .fb = gv100_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .gsp = gv100_gsp_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp102_ltc_new,
+ .mc = tu102_mc_new,
+ .mmu = tu102_mmu_new,
+ .pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
+ .therm = gp100_therm_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = tu102_ce_new,
+ .ce[1] = tu102_ce_new,
+ .ce[2] = tu102_ce_new,
+ .ce[3] = tu102_ce_new,
+ .ce[4] = tu102_ce_new,
+ .disp = tu102_disp_new,
+ .dma = gv100_dma_new,
+ .fifo = tu102_fifo_new,
+ .nvdec[0] = gp102_nvdec_new,
+ .sec2 = tu102_sec2_new,
+};
+
static int
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
@@ -2824,8 +2859,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
u64 mmio_base, mmio_size;
u32 boot0, strap;
void __iomem *map;
- int ret = -EEXIST;
- int i;
+ int ret = -EEXIST, i;
+ unsigned chipset;
mutex_lock(&nv_devices_mutex);
if (nvkm_device_find_locked(handle))
@@ -2870,6 +2905,26 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
strap = ioread32_native(map + 0x101000);
iounmap(map);
+ /* chipset can be overridden for devel/testing purposes */
+ chipset = nvkm_longopt(device->cfgopt, "NvChipset", 0);
+ if (chipset) {
+ u32 override_boot0;
+
+ if (chipset >= 0x10) {
+ override_boot0 = ((chipset & 0x1ff) << 20);
+ override_boot0 |= 0x000000a1;
+ } else {
+ if (chipset != 0x04)
+ override_boot0 = 0x20104000;
+ else
+ override_boot0 = 0x20004000;
+ }
+
+ nvdev_warn(device, "CHIPSET OVERRIDE: %08x -> %08x\n",
+ boot0, override_boot0);
+ boot0 = override_boot0;
+ }
+
/* determine chipset and derive architecture from it */
if ((boot0 & 0x1f000000) > 0) {
device->chipset = (boot0 & 0x1ff00000) >> 20;
@@ -2996,6 +3051,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x162: device->chip = &nv162_chipset; break;
case 0x164: device->chip = &nv164_chipset; break;
case 0x166: device->chip = &nv166_chipset; break;
+ case 0x167: device->chip = &nv167_chipset; break;
default:
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
goto done;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 5f301e632599..818d21bd28d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -365,8 +365,15 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
* and it's better to have a failed modeset than that.
*/
for (cfg = nvkm_dp_rates; cfg->rate; cfg++) {
- if (cfg->nr <= outp_nr && cfg->nr <= outp_bw)
- failsafe = cfg;
+ if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) {
+ /* Try to respect sink limits too when selecting
+ * lowest link configuration.
+ */
+ if (!failsafe ||
+ (cfg->nr <= sink_nr && cfg->bw <= sink_bw))
+ failsafe = cfg;
+ }
+
if (failsafe && cfg[1].rate < dataKBps)
break;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c
index d131cca999dd..10f2aa9f29a4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c
@@ -23,38 +23,55 @@ void pack_hdmi_infoframe(struct packed_hdmi_infoframe *packed_frame,
*/
case 17:
subpack1_high = (raw_frame[16] << 16);
+ /* fall through */
case 16:
subpack1_high |= (raw_frame[15] << 8);
+ /* fall through */
case 15:
subpack1_high |= raw_frame[14];
+ /* fall through */
case 14:
subpack1_low = (raw_frame[13] << 24);
+ /* fall through */
case 13:
subpack1_low |= (raw_frame[12] << 16);
+ /* fall through */
case 12:
subpack1_low |= (raw_frame[11] << 8);
+ /* fall through */
case 11:
subpack1_low |= raw_frame[10];
+ /* fall through */
case 10:
subpack0_high = (raw_frame[9] << 16);
+ /* fall through */
case 9:
subpack0_high |= (raw_frame[8] << 8);
+ /* fall through */
case 8:
subpack0_high |= raw_frame[7];
+ /* fall through */
case 7:
subpack0_low = (raw_frame[6] << 24);
+ /* fall through */
case 6:
subpack0_low |= (raw_frame[5] << 16);
+ /* fall through */
case 5:
subpack0_low |= (raw_frame[4] << 8);
+ /* fall through */
case 4:
subpack0_low |= raw_frame[3];
+ /* fall through */
case 3:
header = (raw_frame[2] << 16);
+ /* fall through */
case 2:
header |= (raw_frame[1] << 8);
+ /* fall through */
case 1:
header |= raw_frame[0];
+ /* fall through */
case 0:
break;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
index 49ef7e57aad4..7f1adab21a5f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
@@ -122,6 +122,7 @@ nv04_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
break;
case NV_MEM_ACCESS_WO:
dmaobj->flags0 |= 0x00008000;
+ /* fall through */
case NV_MEM_ACCESS_RW:
dmaobj->flags2 |= 0x00000002;
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
index ad707ff176cc..93493b335d76 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
@@ -117,8 +117,10 @@ nv04_fifo_swmthd(struct nvkm_device *device, u32 chid, u32 addr, u32 data)
switch (mthd) {
case 0x0000 ... 0x0000: /* subchannel's engine -> software */
nvkm_wr32(device, 0x003280, (engine &= ~mask));
+ /* fall through */
case 0x0180 ... 0x01fc: /* handle -> instance */
data = nvkm_rd32(device, 0x003258) & 0x0000ffff;
+ /* fall through */
case 0x0100 ... 0x017c:
case 0x0200 ... 0x1ffc: /* pass method down to sw */
if (!(engine & mask) && sw)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
index 8c7ba32763c4..47c16821c37f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c
@@ -81,6 +81,7 @@ nv40_fifo_init(struct nvkm_fifo *base)
case 0x49:
case 0x4b:
nvkm_wr32(device, 0x002230, 0x00000001);
+ /* fall through */
case 0x40:
case 0x41:
case 0x42:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index a3ba7f50198b..a3dcb09a40ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -94,6 +94,8 @@ gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
return ret;
bar_len = device->func->resource_size(device, bar_nr);
+ if (!bar_len)
+ return -ENOMEM;
if (bar_nr == 3 && bar->bar2_halve)
bar_len >>= 1;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index 157b076a1272..f23a0ccc2bec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -109,7 +109,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
struct nvkm_device *device = bar->base.subdev.device;
static struct lock_class_key bar1_lock;
static struct lock_class_key bar2_lock;
- u64 start, limit;
+ u64 start, limit, size;
int ret;
ret = nvkm_gpuobj_new(device, 0x20000, 0, false, NULL, &bar->mem);
@@ -127,7 +127,10 @@ nv50_bar_oneinit(struct nvkm_bar *base)
/* BAR2 */
start = 0x0100000000ULL;
- limit = start + device->func->resource_size(device, 3);
+ size = device->func->resource_size(device, 3);
+ if (!size)
+ return -ENOMEM;
+ limit = start + size;
ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
&bar2_lock, "bar2", &bar->bar2_vmm);
@@ -164,10 +167,15 @@ nv50_bar_oneinit(struct nvkm_bar *base)
/* BAR1 */
start = 0x0000000000ULL;
- limit = start + device->func->resource_size(device, 1);
+ size = device->func->resource_size(device, 1);
+ if (!size)
+ return -ENOMEM;
+ limit = start + size;
ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
&bar1_lock, "bar1", &bar->bar1_vmm);
+ if (ret)
+ return ret;
atomic_inc(&bar->bar1_vmm->engref[NVKM_SUBDEV_BAR]);
bar->bar1_vmm->debug = bar->base.subdev.debug;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
index c3068358f695..7112992e0e38 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/perf.c
@@ -135,6 +135,7 @@ nvbios_perfEp(struct nvkm_bios *bios, int idx,
break;
case 0x30:
info->script = nvbios_rd16(bios, perf + 0x02);
+ /* fall through */
case 0x35:
info->fanspeed = nvbios_rd08(bios, perf + 0x06);
info->voltage = nvbios_rd08(bios, perf + 0x07);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
index e6e804cee2bc..bda6cc9a7aaf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
@@ -134,6 +134,7 @@ pll_map(struct nvkm_bios *bios)
device->chipset == 0xaa ||
device->chipset == 0xac)
return g84_pll_mapping;
+ /* fall through */
default:
return NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index ba6a868d4c95..40e564524b7a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -90,6 +90,7 @@ nvkm_cstate_valid(struct nvkm_clk *clk, struct nvkm_cstate *cstate,
case NVKM_CLK_BOOST_NONE:
if (clk->base_khz && freq > clk->base_khz)
return false;
+ /* fall through */
case NVKM_CLK_BOOST_BIOS:
if (clk->boost_khz && freq > clk->boost_khz)
return false;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
index 1c21b8b53b78..4f000237796f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/mcp77.c
@@ -363,6 +363,7 @@ mcp77_clk_prog(struct nvkm_clk *base)
switch (clk->vsrc) {
case nv_clk_src_cclk:
mast |= 0x00400000;
+ /* fall through */
default:
nvkm_wr32(device, 0x4600, clk->vdiv);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 8bcb7e79a0cb..456aed1f2a02 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -1070,7 +1070,7 @@ gk104_ram_calc_xits(struct gk104_ram *ram, struct nvkm_ram_data *next)
nvkm_error(subdev, "unable to calc plls\n");
return -EINVAL;
}
- nvkm_debug(subdev, "sucessfully calced PLLs for clock %i kHz"
+ nvkm_debug(subdev, "successfully calced PLLs for clock %i kHz"
" (refclock: %i kHz)\n", next->freq, ret);
} else {
/* calculate refpll coefficients */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
index 2b12e388f47a..5f4c287d7943 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.c
@@ -131,11 +131,13 @@ nv40_ram_prog(struct nvkm_ram *base)
nvkm_mask(device, 0x00402c, 0xc0771100, ram->ctrl);
nvkm_wr32(device, 0x004048, ram->coef);
nvkm_wr32(device, 0x004030, ram->coef);
+ /* fall through */
case 0x43:
case 0x49:
case 0x4b:
nvkm_mask(device, 0x004038, 0xc0771100, ram->ctrl);
nvkm_wr32(device, 0x00403c, ram->coef);
+ /* fall through */
default:
nvkm_mask(device, 0x004020, 0xc0771100, ram->ctrl);
nvkm_wr32(device, 0x004024, ram->coef);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index 4c1f547da463..b4e7404fe660 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -105,9 +105,15 @@ nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *aux)
{
struct nvkm_i2c_pad *pad = aux->pad;
int ret;
+
AUX_TRACE(aux, "acquire");
mutex_lock(&aux->mutex);
- ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_AUX);
+
+ if (aux->enabled)
+ ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_AUX);
+ else
+ ret = -EIO;
+
if (ret)
mutex_unlock(&aux->mutex);
return ret;
@@ -145,6 +151,24 @@ nvkm_i2c_aux_del(struct nvkm_i2c_aux **paux)
}
}
+void
+nvkm_i2c_aux_init(struct nvkm_i2c_aux *aux)
+{
+ AUX_TRACE(aux, "init");
+ mutex_lock(&aux->mutex);
+ aux->enabled = true;
+ mutex_unlock(&aux->mutex);
+}
+
+void
+nvkm_i2c_aux_fini(struct nvkm_i2c_aux *aux)
+{
+ AUX_TRACE(aux, "fini");
+ mutex_lock(&aux->mutex);
+ aux->enabled = false;
+ mutex_unlock(&aux->mutex);
+}
+
int
nvkm_i2c_aux_ctor(const struct nvkm_i2c_aux_func *func,
struct nvkm_i2c_pad *pad, int id,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
index 7d56c4ba693c..08f6b2ee64ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -16,6 +16,8 @@ int nvkm_i2c_aux_ctor(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
int nvkm_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
int id, struct nvkm_i2c_aux **);
void nvkm_i2c_aux_del(struct nvkm_i2c_aux **);
+void nvkm_i2c_aux_init(struct nvkm_i2c_aux *);
+void nvkm_i2c_aux_fini(struct nvkm_i2c_aux *);
int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
u32 addr, u8 *data, u8 *size);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
index 4f197b15acf6..ecacb22834d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
@@ -160,8 +160,18 @@ nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_i2c *i2c = nvkm_i2c(subdev);
struct nvkm_i2c_pad *pad;
+ struct nvkm_i2c_bus *bus;
+ struct nvkm_i2c_aux *aux;
u32 mask;
+ list_for_each_entry(aux, &i2c->aux, head) {
+ nvkm_i2c_aux_fini(aux);
+ }
+
+ list_for_each_entry(bus, &i2c->bus, head) {
+ nvkm_i2c_bus_fini(bus);
+ }
+
if ((mask = (1 << i2c->func->aux) - 1), i2c->func->aux_stat) {
i2c->func->aux_mask(i2c, NVKM_I2C_ANY, mask, 0);
i2c->func->aux_stat(i2c, &mask, &mask, &mask, &mask);
@@ -180,6 +190,7 @@ nvkm_i2c_init(struct nvkm_subdev *subdev)
struct nvkm_i2c *i2c = nvkm_i2c(subdev);
struct nvkm_i2c_bus *bus;
struct nvkm_i2c_pad *pad;
+ struct nvkm_i2c_aux *aux;
list_for_each_entry(pad, &i2c->pad, head) {
nvkm_i2c_pad_init(pad);
@@ -189,6 +200,10 @@ nvkm_i2c_init(struct nvkm_subdev *subdev)
nvkm_i2c_bus_init(bus);
}
+ list_for_each_entry(aux, &i2c->aux, head) {
+ nvkm_i2c_aux_init(aux);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c
index 807a2b67bd64..ed50cc3736b9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c
@@ -110,6 +110,19 @@ nvkm_i2c_bus_init(struct nvkm_i2c_bus *bus)
BUS_TRACE(bus, "init");
if (bus->func->init)
bus->func->init(bus);
+
+ mutex_lock(&bus->mutex);
+ bus->enabled = true;
+ mutex_unlock(&bus->mutex);
+}
+
+void
+nvkm_i2c_bus_fini(struct nvkm_i2c_bus *bus)
+{
+ BUS_TRACE(bus, "fini");
+ mutex_lock(&bus->mutex);
+ bus->enabled = false;
+ mutex_unlock(&bus->mutex);
}
void
@@ -126,9 +139,15 @@ nvkm_i2c_bus_acquire(struct nvkm_i2c_bus *bus)
{
struct nvkm_i2c_pad *pad = bus->pad;
int ret;
+
BUS_TRACE(bus, "acquire");
mutex_lock(&bus->mutex);
- ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_I2C);
+
+ if (bus->enabled)
+ ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_I2C);
+ else
+ ret = -EIO;
+
if (ret)
mutex_unlock(&bus->mutex);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
index bea0dd33961e..465464bba58b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
@@ -18,6 +18,7 @@ int nvkm_i2c_bus_new_(const struct nvkm_i2c_bus_func *, struct nvkm_i2c_pad *,
int id, struct nvkm_i2c_bus **);
void nvkm_i2c_bus_del(struct nvkm_i2c_bus **);
void nvkm_i2c_bus_init(struct nvkm_i2c_bus *);
+void nvkm_i2c_bus_fini(struct nvkm_i2c_bus *);
int nvkm_i2c_bit_xfer(struct nvkm_i2c_bus *, struct i2c_msg *, int);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index fa93f964e6a4..41640e0584ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -1783,7 +1783,7 @@ nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
void
nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
- if (inst && vmm->func->part) {
+ if (inst && vmm && vmm->func->part) {
mutex_lock(&vmm->mutex);
vmm->func->part(vmm, inst);
mutex_unlock(&vmm->mutex);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
index 844971e5e874..2a6150ab5611 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/nv50.c
@@ -159,6 +159,7 @@ mxm_dcb_sanitise_entry(struct nvkm_bios *bios, void *data, int idx, u16 pdcb)
break;
case 0x0e: /* eDP, falls through to DPint */
ctx.outp[1] |= 0x00010000;
+ /* fall through */
case 0x07: /* DP internal, wtf is this?? HP8670w */
ctx.outp[1] |= 0x00000004; /* use_power_scripts? */
type = DCB_CONNECTOR_eDP;
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
index a349cb61961e..7b0bcb494b5c 100644
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
@@ -6,23 +6,12 @@ config DRM_OMAP_ENCODER_OPA362
Driver for OPA362 external analog TV amplifier controlled
through a GPIO.
-config DRM_OMAP_ENCODER_TFP410
- tristate "TFP410 DPI to DVI Encoder"
- help
- Driver for TFP410 DPI to DVI encoder.
-
config DRM_OMAP_ENCODER_TPD12S015
tristate "TPD12S015 HDMI ESD protection and level shifter"
help
Driver for TPD12S015, which offers HDMI ESD protection and level
shifting.
-config DRM_OMAP_CONNECTOR_DVI
- tristate "DVI Connector"
- depends on I2C
- help
- Driver for a generic DVI connector.
-
config DRM_OMAP_CONNECTOR_HDMI
tristate "HDMI Connector"
help
@@ -33,12 +22,6 @@ config DRM_OMAP_CONNECTOR_ANALOG_TV
help
Driver for a generic analog TV connector.
-config DRM_OMAP_PANEL_DPI
- tristate "Generic DPI panel"
- depends on BACKLIGHT_CLASS_DEVICE
- help
- Driver for generic DPI panels.
-
config DRM_OMAP_PANEL_DSI_CM
tristate "Generic DSI Command Mode Panel"
depends on BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile
index d99659e1381b..1db34d4fed64 100644
--- a/drivers/gpu/drm/omapdrm/displays/Makefile
+++ b/drivers/gpu/drm/omapdrm/displays/Makefile
@@ -1,11 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_OMAP_ENCODER_OPA362) += encoder-opa362.o
-obj-$(CONFIG_DRM_OMAP_ENCODER_TFP410) += encoder-tfp410.o
obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o
-obj-$(CONFIG_DRM_OMAP_CONNECTOR_DVI) += connector-dvi.o
obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o
obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o
-obj-$(CONFIG_DRM_OMAP_PANEL_DPI) += panel-dpi.o
obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o
obj-$(CONFIG_DRM_OMAP_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
obj-$(CONFIG_DRM_OMAP_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
index 28a3ce8f88d2..6c0561101874 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
@@ -35,50 +35,9 @@ static void tvc_disconnect(struct omap_dss_device *src,
{
}
-static int tvc_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- dev_dbg(ddata->dev, "enable\n");
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return r;
-}
-
-static void tvc_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- dev_dbg(ddata->dev, "disable\n");
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
static const struct omap_dss_device_ops tvc_ops = {
.connect = tvc_connect,
.disconnect = tvc_disconnect,
-
- .enable = tvc_enable,
- .disable = tvc_disable,
};
static int tvc_probe(struct platform_device *pdev)
@@ -97,6 +56,7 @@ static int tvc_probe(struct platform_device *pdev)
dssdev->ops = &tvc_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_VENC;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
@@ -109,12 +69,9 @@ static int tvc_probe(struct platform_device *pdev)
static int __exit tvc_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
omapdss_device_unregister(&ddata->dssdev);
- tvc_disable(dssdev);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
deleted file mode 100644
index 24b14f44248e..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
+++ /dev/null
@@ -1,330 +0,0 @@
-/*
- * Generic DVI Connector driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
-
-#include <linux/gpio/consumer.h>
-#include <linux/i2c.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include <drm/drm_edid.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct i2c_adapter *i2c_adapter;
-
- struct gpio_desc *hpd_gpio;
-
- void (*hpd_cb)(void *cb_data, enum drm_connector_status status);
- void *hpd_cb_data;
- bool hpd_enabled;
- /* mutex for hpd fields above */
- struct mutex hpd_lock;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int dvic_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void dvic_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static int dvic_enable(struct omap_dss_device *dssdev)
-{
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
-}
-
-static void dvic_disable(struct omap_dss_device *dssdev)
-{
- struct omap_dss_device *src = dssdev->src;
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
-static int dvic_ddc_read(struct i2c_adapter *adapter,
- unsigned char *buf, u16 count, u8 offset)
-{
- int r, retries;
-
- for (retries = 3; retries > 0; retries--) {
- struct i2c_msg msgs[] = {
- {
- .addr = DDC_ADDR,
- .flags = 0,
- .len = 1,
- .buf = &offset,
- }, {
- .addr = DDC_ADDR,
- .flags = I2C_M_RD,
- .len = count,
- .buf = buf,
- }
- };
-
- r = i2c_transfer(adapter, msgs, 2);
- if (r == 2)
- return 0;
-
- if (r != -EAGAIN)
- break;
- }
-
- return r < 0 ? r : -EIO;
-}
-
-static int dvic_read_edid(struct omap_dss_device *dssdev,
- u8 *edid, int len)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- int r, l, bytes_read;
-
- l = min(EDID_LENGTH, len);
- r = dvic_ddc_read(ddata->i2c_adapter, edid, l, 0);
- if (r)
- return r;
-
- bytes_read = l;
-
- /* if there are extensions, read second block */
- if (len > EDID_LENGTH && edid[0x7e] > 0) {
- l = min(EDID_LENGTH, len - EDID_LENGTH);
-
- r = dvic_ddc_read(ddata->i2c_adapter, edid + EDID_LENGTH,
- l, EDID_LENGTH);
- if (r)
- return r;
-
- bytes_read += l;
- }
-
- return bytes_read;
-}
-
-static bool dvic_detect(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- unsigned char out;
- int r;
-
- if (ddata->hpd_gpio)
- return gpiod_get_value_cansleep(ddata->hpd_gpio);
-
- if (!ddata->i2c_adapter)
- return true;
-
- r = dvic_ddc_read(ddata->i2c_adapter, &out, 1, 0);
-
- return r == 0;
-}
-
-static void dvic_register_hpd_cb(struct omap_dss_device *dssdev,
- void (*cb)(void *cb_data,
- enum drm_connector_status status),
- void *cb_data)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = cb;
- ddata->hpd_cb_data = cb_data;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static void dvic_unregister_hpd_cb(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = NULL;
- ddata->hpd_cb_data = NULL;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static const struct omap_dss_device_ops dvic_ops = {
- .connect = dvic_connect,
- .disconnect = dvic_disconnect,
-
- .enable = dvic_enable,
- .disable = dvic_disable,
-
- .read_edid = dvic_read_edid,
- .detect = dvic_detect,
-
- .register_hpd_cb = dvic_register_hpd_cb,
- .unregister_hpd_cb = dvic_unregister_hpd_cb,
-};
-
-static irqreturn_t dvic_hpd_isr(int irq, void *data)
-{
- struct panel_drv_data *ddata = data;
-
- mutex_lock(&ddata->hpd_lock);
- if (ddata->hpd_enabled && ddata->hpd_cb) {
- enum drm_connector_status status;
-
- if (dvic_detect(&ddata->dssdev))
- status = connector_status_connected;
- else
- status = connector_status_disconnected;
-
- ddata->hpd_cb(ddata->hpd_cb_data, status);
- }
- mutex_unlock(&ddata->hpd_lock);
-
- return IRQ_HANDLED;
-}
-
-static int dvic_probe_of(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct device_node *node = pdev->dev.of_node;
- struct device_node *adapter_node;
- struct i2c_adapter *adapter;
- struct gpio_desc *gpio;
- int r;
-
- gpio = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN);
- if (IS_ERR(gpio)) {
- dev_err(&pdev->dev, "failed to parse HPD gpio\n");
- return PTR_ERR(gpio);
- }
-
- ddata->hpd_gpio = gpio;
-
- mutex_init(&ddata->hpd_lock);
-
- if (ddata->hpd_gpio) {
- r = devm_request_threaded_irq(&pdev->dev,
- gpiod_to_irq(ddata->hpd_gpio), NULL, dvic_hpd_isr,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "DVI HPD", ddata);
- if (r)
- return r;
- }
-
- adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
- if (adapter_node) {
- adapter = of_get_i2c_adapter_by_node(adapter_node);
- of_node_put(adapter_node);
- if (adapter == NULL) {
- dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
- return -EPROBE_DEFER;
- }
-
- ddata->i2c_adapter = adapter;
- }
-
- return 0;
-}
-
-static int dvic_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- int r;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
-
- r = dvic_probe_of(pdev);
- if (r)
- return r;
-
- dssdev = &ddata->dssdev;
- dssdev->ops = &dvic_ops;
- dssdev->dev = &pdev->dev;
- dssdev->type = OMAP_DISPLAY_TYPE_DVI;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
-
- if (ddata->hpd_gpio)
- dssdev->ops_flags |= OMAP_DSS_DEVICE_OP_DETECT
- | OMAP_DSS_DEVICE_OP_HPD;
- if (ddata->i2c_adapter)
- dssdev->ops_flags |= OMAP_DSS_DEVICE_OP_DETECT
- | OMAP_DSS_DEVICE_OP_EDID;
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit dvic_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- omapdss_device_unregister(&ddata->dssdev);
-
- dvic_disable(dssdev);
-
- i2c_put_adapter(ddata->i2c_adapter);
-
- mutex_destroy(&ddata->hpd_lock);
-
- return 0;
-}
-
-static const struct of_device_id dvic_of_match[] = {
- { .compatible = "omapdss,dvi-connector", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, dvic_of_match);
-
-static struct platform_driver dvi_connector_driver = {
- .probe = dvic_probe,
- .remove = __exit_p(dvic_remove),
- .driver = {
- .name = "connector-dvi",
- .of_match_table = dvic_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(dvi_connector_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("Generic DVI Connector driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index e602fa4a50a4..68d6f6e44b03 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -41,44 +41,6 @@ static void hdmic_disconnect(struct omap_dss_device *src,
{
}
-static int hdmic_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- dev_dbg(ddata->dev, "enable\n");
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return r;
-}
-
-static void hdmic_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- dev_dbg(ddata->dev, "disable\n");
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
static bool hdmic_detect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
@@ -113,9 +75,6 @@ static const struct omap_dss_device_ops hdmic_ops = {
.connect = hdmic_connect,
.disconnect = hdmic_disconnect,
- .enable = hdmic_enable,
- .disable = hdmic_disable,
-
.detect = hdmic_detect,
.register_hpd_cb = hdmic_register_hpd_cb,
.unregister_hpd_cb = hdmic_unregister_hpd_cb,
@@ -181,6 +140,7 @@ static int hdmic_probe(struct platform_device *pdev)
dssdev->ops = &hdmic_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
dssdev->ops_flags = ddata->hpd_gpio
@@ -196,12 +156,9 @@ static int hdmic_probe(struct platform_device *pdev)
static int __exit hdmic_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
omapdss_device_unregister(&ddata->dssdev);
- hdmic_disable(dssdev);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
index 4fefd80f53bb..29a5a130ebd1 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
@@ -41,48 +41,20 @@ static void opa362_disconnect(struct omap_dss_device *src,
omapdss_device_disconnect(dst, dst->next);
}
-static int opa362_enable(struct omap_dss_device *dssdev)
+static void opa362_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- dev_dbg(dssdev->dev, "enable\n");
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
if (ddata->enable_gpio)
gpiod_set_value_cansleep(ddata->enable_gpio, 1);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
}
static void opa362_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- dev_dbg(dssdev->dev, "disable\n");
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
if (ddata->enable_gpio)
gpiod_set_value_cansleep(ddata->enable_gpio, 0);
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
static const struct omap_dss_device_ops opa362_ops = {
@@ -116,7 +88,6 @@ static int opa362_probe(struct platform_device *pdev)
dssdev->ops = &opa362_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_VENC;
- dssdev->output_type = OMAP_DISPLAY_TYPE_VENC;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(1) | BIT(0);
@@ -141,13 +112,7 @@ static int __exit opa362_remove(struct platform_device *pdev)
omapdss_device_put(dssdev->next);
omapdss_device_unregister(&ddata->dssdev);
- WARN_ON(omapdss_device_is_enabled(dssdev));
- if (omapdss_device_is_enabled(dssdev))
- opa362_disable(dssdev);
-
- WARN_ON(omapdss_device_is_connected(dssdev));
- if (omapdss_device_is_connected(dssdev))
- omapdss_device_disconnect(NULL, dssdev);
+ opa362_disable(dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
deleted file mode 100644
index f1a748353279..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * TFP410 DPI-to-DVI encoder driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
-
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct gpio_desc *pd_gpio;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int tfp410_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return omapdss_device_connect(dst->dss, dst, dst->next);
-}
-
-static void tfp410_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- omapdss_device_disconnect(dst, dst->next);
-}
-
-static int tfp410_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
-
- if (ddata->pd_gpio)
- gpiod_set_value_cansleep(ddata->pd_gpio, 0);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
-}
-
-static void tfp410_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
-
- if (ddata->pd_gpio)
- gpiod_set_value_cansleep(ddata->pd_gpio, 0);
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
-static const struct omap_dss_device_ops tfp410_ops = {
- .connect = tfp410_connect,
- .disconnect = tfp410_disconnect,
- .enable = tfp410_enable,
- .disable = tfp410_disable,
-};
-
-static int tfp410_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- struct gpio_desc *gpio;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
-
- /* Powerdown GPIO */
- gpio = devm_gpiod_get_optional(&pdev->dev, "powerdown", GPIOD_OUT_HIGH);
- if (IS_ERR(gpio)) {
- dev_err(&pdev->dev, "failed to parse powerdown gpio\n");
- return PTR_ERR(gpio);
- }
-
- ddata->pd_gpio = gpio;
-
- dssdev = &ddata->dssdev;
- dssdev->ops = &tfp410_ops;
- dssdev->dev = &pdev->dev;
- dssdev->type = OMAP_DISPLAY_TYPE_DPI;
- dssdev->output_type = OMAP_DISPLAY_TYPE_DVI;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(1) | BIT(0);
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE
- | DRM_BUS_FLAG_PIXDATA_POSEDGE;
-
- dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1);
- if (IS_ERR(dssdev->next)) {
- if (PTR_ERR(dssdev->next) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to find video sink\n");
- return PTR_ERR(dssdev->next);
- }
-
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit tfp410_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- if (dssdev->next)
- omapdss_device_put(dssdev->next);
- omapdss_device_unregister(&ddata->dssdev);
-
- WARN_ON(omapdss_device_is_enabled(dssdev));
- if (omapdss_device_is_enabled(dssdev))
- tfp410_disable(dssdev);
-
- WARN_ON(omapdss_device_is_connected(dssdev));
- if (omapdss_device_is_connected(dssdev))
- omapdss_device_disconnect(NULL, dssdev);
-
- return 0;
-}
-
-static const struct of_device_id tfp410_of_match[] = {
- { .compatible = "omapdss,ti,tfp410", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, tfp410_of_match);
-
-static struct platform_driver tfp410_driver = {
- .probe = tfp410_probe,
- .remove = __exit_p(tfp410_remove),
- .driver = {
- .name = "tfp410",
- .of_match_table = tfp410_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(tfp410_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("TFP410 DPI to DVI encoder driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index 94de55fd8884..bc03752d2762 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -62,35 +62,6 @@ static void tpd_disconnect(struct omap_dss_device *src,
omapdss_device_disconnect(dst, dst->next);
}
-static int tpd_enable(struct omap_dss_device *dssdev)
-{
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return r;
-}
-
-static void tpd_disable(struct omap_dss_device *dssdev)
-{
- struct omap_dss_device *src = dssdev->src;
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return;
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
static bool tpd_detect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
@@ -124,8 +95,6 @@ static void tpd_unregister_hpd_cb(struct omap_dss_device *dssdev)
static const struct omap_dss_device_ops tpd_ops = {
.connect = tpd_connect,
.disconnect = tpd_disconnect,
- .enable = tpd_enable,
- .disable = tpd_disable,
.detect = tpd_detect,
.register_hpd_cb = tpd_register_hpd_cb,
.unregister_hpd_cb = tpd_unregister_hpd_cb,
@@ -198,7 +167,6 @@ static int tpd_probe(struct platform_device *pdev)
dssdev->ops = &tpd_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
- dssdev->output_type = OMAP_DISPLAY_TYPE_HDMI;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(1) | BIT(0);
dssdev->ops_flags = OMAP_DSS_DEVICE_OP_DETECT
@@ -225,14 +193,6 @@ static int __exit tpd_remove(struct platform_device *pdev)
omapdss_device_put(dssdev->next);
omapdss_device_unregister(&ddata->dssdev);
- WARN_ON(omapdss_device_is_enabled(dssdev));
- if (omapdss_device_is_enabled(dssdev))
- tpd_disable(dssdev);
-
- WARN_ON(omapdss_device_is_connected(dssdev));
- if (omapdss_device_is_connected(dssdev))
- omapdss_device_disconnect(NULL, dssdev);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
deleted file mode 100644
index 465120809eb3..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Generic MIPI DPI Panel Driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
-
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/regulator/consumer.h>
-#include <linux/backlight.h>
-
-#include <video/of_display_timing.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct videomode vm;
-
- struct backlight_device *backlight;
-
- struct gpio_desc *enable_gpio;
- struct regulator *vcc_supply;
-};
-
-#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-
-static int panel_dpi_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void panel_dpi_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static int panel_dpi_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
-
- r = regulator_enable(ddata->vcc_supply);
- if (r) {
- src->ops->disable(src);
- return r;
- }
-
- gpiod_set_value_cansleep(ddata->enable_gpio, 1);
- backlight_enable(ddata->backlight);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
-}
-
-static void panel_dpi_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
-
- backlight_disable(ddata->backlight);
-
- gpiod_set_value_cansleep(ddata->enable_gpio, 0);
- regulator_disable(ddata->vcc_supply);
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
-static void panel_dpi_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- *vm = ddata->vm;
-}
-
-static const struct omap_dss_device_ops panel_dpi_ops = {
- .connect = panel_dpi_connect,
- .disconnect = panel_dpi_disconnect,
-
- .enable = panel_dpi_enable,
- .disable = panel_dpi_disable,
-
- .get_timings = panel_dpi_get_timings,
-};
-
-static int panel_dpi_probe_of(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct device_node *node = pdev->dev.of_node;
- int r;
- struct display_timing timing;
- struct gpio_desc *gpio;
-
- gpio = devm_gpiod_get_optional(&pdev->dev, "enable", GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
-
- ddata->enable_gpio = gpio;
-
- /*
- * Many different panels are supported by this driver and there are
- * probably very different needs for their reset pins in regards to
- * timing and order relative to the enable gpio. So for now it's just
- * ensured that the reset line isn't active.
- */
- gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
-
- ddata->vcc_supply = devm_regulator_get(&pdev->dev, "vcc");
- if (IS_ERR(ddata->vcc_supply))
- return PTR_ERR(ddata->vcc_supply);
-
- ddata->backlight = devm_of_find_backlight(&pdev->dev);
-
- if (IS_ERR(ddata->backlight))
- return PTR_ERR(ddata->backlight);
-
- r = of_get_display_timing(node, "panel-timing", &timing);
- if (r) {
- dev_err(&pdev->dev, "failed to get video timing\n");
- return r;
- }
-
- videomode_from_timing(&timing, &ddata->vm);
-
- return 0;
-}
-
-static int panel_dpi_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- int r;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (ddata == NULL)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
-
- r = panel_dpi_probe_of(pdev);
- if (r)
- return r;
-
- dssdev = &ddata->dssdev;
- dssdev->dev = &pdev->dev;
- dssdev->ops = &panel_dpi_ops;
- dssdev->type = OMAP_DISPLAY_TYPE_DPI;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
- drm_bus_flags_from_videomode(&ddata->vm, &dssdev->bus_flags);
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit panel_dpi_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- omapdss_device_unregister(dssdev);
-
- panel_dpi_disable(dssdev);
-
- return 0;
-}
-
-static const struct of_device_id panel_dpi_of_match[] = {
- { .compatible = "omapdss,panel-dpi", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, panel_dpi_of_match);
-
-static struct platform_driver panel_dpi_driver = {
- .probe = panel_dpi_probe,
- .remove = __exit_p(panel_dpi_remove),
- .driver = {
- .name = "panel-dpi",
- .of_match_table = panel_dpi_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(panel_dpi_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("Generic MIPI DPI Panel Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 29692a5217c5..741a5e324767 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -24,6 +24,8 @@
#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
+#include <drm/drm_connector.h>
+
#include <video/mipi_display.h>
#include <video/of_display_timing.h>
@@ -41,6 +43,7 @@
struct panel_drv_data {
struct omap_dss_device dssdev;
+ struct omap_dss_device *src;
struct videomode vm;
@@ -141,7 +144,7 @@ static void hw_guard_wait(struct panel_drv_data *ddata)
static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int r;
u8 buf[1];
@@ -157,14 +160,14 @@ static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data)
static int dsicm_dcs_write_0(struct panel_drv_data *ddata, u8 dcs_cmd)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
return src->ops->dsi.dcs_write(src, ddata->channel, &dcs_cmd, 1);
}
static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
u8 buf[2] = { dcs_cmd, param };
return src->ops->dsi.dcs_write(src, ddata->channel, buf, 2);
@@ -173,7 +176,7 @@ static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param)
static int dsicm_sleep_in(struct panel_drv_data *ddata)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
u8 cmd;
int r;
@@ -228,7 +231,7 @@ static int dsicm_get_id(struct panel_drv_data *ddata, u8 *id1, u8 *id2, u8 *id3)
static int dsicm_set_update_window(struct panel_drv_data *ddata,
u16 x, u16 y, u16 w, u16 h)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int r;
u16 x1 = x;
u16 x2 = x + w - 1;
@@ -275,7 +278,7 @@ static void dsicm_cancel_ulps_work(struct panel_drv_data *ddata)
static int dsicm_enter_ulps(struct panel_drv_data *ddata)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int r;
if (ddata->ulps_enabled)
@@ -309,18 +312,13 @@ err:
static int dsicm_exit_ulps(struct panel_drv_data *ddata)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int r;
if (!ddata->ulps_enabled)
return 0;
- r = src->ops->enable(src);
- if (r) {
- dev_err(&ddata->pdev->dev, "failed to enable DSI\n");
- goto err1;
- }
-
+ src->ops->enable(src);
src->ops->dsi.enable_hs(src, ddata->channel, true);
r = _dsicm_enable_te(ddata, true);
@@ -347,7 +345,7 @@ err2:
enable_irq(gpiod_to_irq(ddata->ext_te_gpio));
ddata->ulps_enabled = false;
}
-err1:
+
dsicm_queue_ulps_work(ddata);
return r;
@@ -366,7 +364,7 @@ static int dsicm_wake_up(struct panel_drv_data *ddata)
static int dsicm_bl_update_status(struct backlight_device *dev)
{
struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int r = 0;
int level;
@@ -414,7 +412,7 @@ static ssize_t dsicm_num_errors_show(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
u8 errors = 0;
int r;
@@ -446,7 +444,7 @@ static ssize_t dsicm_hw_revision_show(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
u8 id1, id2, id3;
int r;
@@ -478,7 +476,7 @@ static ssize_t dsicm_store_ulps(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
unsigned long t;
int r;
@@ -528,7 +526,7 @@ static ssize_t dsicm_store_ulps_timeout(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
unsigned long t;
int r;
@@ -603,7 +601,7 @@ static void dsicm_hw_reset(struct panel_drv_data *ddata)
static int dsicm_power_on(struct panel_drv_data *ddata)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
u8 id1, id2, id3;
int r;
struct omap_dss_dsi_config dsi_config = {
@@ -649,11 +647,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
goto err_vddi;
}
- r = src->ops->enable(src);
- if (r) {
- dev_err(&ddata->pdev->dev, "failed to enable DSI\n");
- goto err_vddi;
- }
+ src->ops->enable(src);
dsicm_hw_reset(ddata);
@@ -722,7 +716,7 @@ err_vpnl:
static void dsicm_power_off(struct panel_drv_data *ddata)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int r;
src->ops->dsi.disable_video_output(src, ddata->channel);
@@ -776,6 +770,7 @@ static int dsicm_connect(struct omap_dss_device *src,
return r;
}
+ ddata->src = src;
return 0;
}
@@ -785,28 +780,17 @@ static void dsicm_disconnect(struct omap_dss_device *src,
struct panel_drv_data *ddata = to_panel_data(dst);
src->ops->dsi.release_vc(src, ddata->channel);
+ ddata->src = NULL;
}
-static int dsicm_enable(struct omap_dss_device *dssdev)
+static void dsicm_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
+ struct omap_dss_device *src = ddata->src;
int r;
- dev_dbg(&ddata->pdev->dev, "enable\n");
-
mutex_lock(&ddata->lock);
- if (!omapdss_device_is_connected(dssdev)) {
- r = -ENODEV;
- goto err;
- }
-
- if (omapdss_device_is_enabled(dssdev)) {
- r = 0;
- goto err;
- }
-
src->ops->dsi.bus_lock(src);
r = dsicm_power_on(ddata);
@@ -816,27 +800,22 @@ static int dsicm_enable(struct omap_dss_device *dssdev)
if (r)
goto err;
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
mutex_unlock(&ddata->lock);
dsicm_bl_power(ddata, true);
- return 0;
+ return;
err:
- dev_dbg(&ddata->pdev->dev, "enable failed\n");
+ dev_dbg(&ddata->pdev->dev, "enable failed (%d)\n", r);
mutex_unlock(&ddata->lock);
- return r;
}
static void dsicm_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
+ struct omap_dss_device *src = ddata->src;
int r;
- dev_dbg(&ddata->pdev->dev, "disable\n");
-
dsicm_bl_power(ddata, false);
mutex_lock(&ddata->lock);
@@ -845,23 +824,19 @@ static void dsicm_disable(struct omap_dss_device *dssdev)
src->ops->dsi.bus_lock(src);
- if (omapdss_device_is_enabled(dssdev)) {
- r = dsicm_wake_up(ddata);
- if (!r)
- dsicm_power_off(ddata);
- }
+ r = dsicm_wake_up(ddata);
+ if (!r)
+ dsicm_power_off(ddata);
src->ops->dsi.bus_unlock(src);
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-
mutex_unlock(&ddata->lock);
}
static void dsicm_framedone_cb(int err, void *data)
{
struct panel_drv_data *ddata = data;
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
dev_dbg(&ddata->pdev->dev, "framedone, err %d\n", err);
src->ops->dsi.bus_unlock(src);
@@ -870,7 +845,7 @@ static void dsicm_framedone_cb(int err, void *data)
static irqreturn_t dsicm_te_isr(int irq, void *data)
{
struct panel_drv_data *ddata = data;
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int old;
int r;
@@ -896,7 +871,7 @@ static void dsicm_te_timeout_work_callback(struct work_struct *work)
{
struct panel_drv_data *ddata = container_of(work, struct panel_drv_data,
te_timeout_work.work);
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
dev_err(&ddata->pdev->dev, "TE not received for 250ms!\n");
@@ -908,7 +883,7 @@ static int dsicm_update(struct omap_dss_device *dssdev,
u16 x, u16 y, u16 w, u16 h)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
+ struct omap_dss_device *src = ddata->src;
int r;
dev_dbg(&ddata->pdev->dev, "update %d, %d, %d x %d\n", x, y, w, h);
@@ -954,7 +929,7 @@ err:
static int dsicm_sync(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
+ struct omap_dss_device *src = ddata->src;
dev_dbg(&ddata->pdev->dev, "sync\n");
@@ -970,7 +945,7 @@ static int dsicm_sync(struct omap_dss_device *dssdev)
static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int r;
if (enable)
@@ -990,7 +965,7 @@ static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable)
static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
+ struct omap_dss_device *src = ddata->src;
int r;
mutex_lock(&ddata->lock);
@@ -1041,7 +1016,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev,
u16 x, u16 y, u16 w, u16 h)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
+ struct omap_dss_device *src = ddata->src;
int r;
int first = 1;
int plen;
@@ -1123,7 +1098,7 @@ static void dsicm_ulps_work(struct work_struct *work)
struct panel_drv_data *ddata = container_of(work, struct panel_drv_data,
ulps_work.work);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *src = dssdev->src;
+ struct omap_dss_device *src = ddata->src;
mutex_lock(&ddata->lock);
@@ -1140,29 +1115,32 @@ static void dsicm_ulps_work(struct work_struct *work)
mutex_unlock(&ddata->lock);
}
-static void dsicm_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int dsicm_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *vm = ddata->vm;
+ connector->display_info.width_mm = ddata->width_mm;
+ connector->display_info.height_mm = ddata->height_mm;
+
+ return omapdss_display_get_modes(connector, &ddata->vm);
}
static int dsicm_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+ struct drm_display_mode *mode)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
int ret = 0;
- if (vm->hactive != ddata->vm.hactive)
+ if (mode->hdisplay != ddata->vm.hactive)
ret = -EINVAL;
- if (vm->vactive != ddata->vm.vactive)
+ if (mode->vdisplay != ddata->vm.vactive)
ret = -EINVAL;
if (ret) {
dev_warn(dssdev->dev, "wrong resolution: %d x %d",
- vm->hactive, vm->vactive);
+ mode->hdisplay, mode->vdisplay);
dev_warn(dssdev->dev, "panel resolution: %d x %d",
ddata->vm.hactive, ddata->vm.vactive);
}
@@ -1170,15 +1148,6 @@ static int dsicm_check_timings(struct omap_dss_device *dssdev,
return ret;
}
-static void dsicm_get_size(struct omap_dss_device *dssdev,
- unsigned int *width, unsigned int *height)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- *width = ddata->width_mm;
- *height = ddata->height_mm;
-}
-
static const struct omap_dss_device_ops dsicm_ops = {
.connect = dsicm_connect,
.disconnect = dsicm_disconnect,
@@ -1186,7 +1155,7 @@ static const struct omap_dss_device_ops dsicm_ops = {
.enable = dsicm_enable,
.disable = dsicm_disable,
- .get_timings = dsicm_get_timings,
+ .get_modes = dsicm_get_modes,
.check_timings = dsicm_check_timings,
};
@@ -1194,8 +1163,6 @@ static const struct omap_dss_driver dsicm_dss_driver = {
.update = dsicm_update,
.sync = dsicm_sync,
- .get_size = dsicm_get_size,
-
.enable_te = dsicm_enable_te,
.get_te = dsicm_get_te,
@@ -1305,8 +1272,10 @@ static int dsicm_probe(struct platform_device *pdev)
dssdev->ops = &dsicm_ops;
dssdev->driver = &dsicm_dss_driver;
dssdev->type = OMAP_DISPLAY_TYPE_DSI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
+ dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
@@ -1385,8 +1354,9 @@ static int __exit dsicm_remove(struct platform_device *pdev)
omapdss_device_unregister(dssdev);
- dsicm_disable(dssdev);
- omapdss_device_disconnect(dssdev->src, dssdev);
+ if (omapdss_device_is_enabled(dssdev))
+ dsicm_disable(dssdev);
+ omapdss_device_disconnect(ddata->src, dssdev);
sysfs_remove_group(&pdev->dev.kobj, &dsicm_attr_group);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index f6ef8ff964dd..99f2350d462c 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -123,52 +123,28 @@ static void lb035q02_disconnect(struct omap_dss_device *src,
{
}
-static int lb035q02_enable(struct omap_dss_device *dssdev)
+static void lb035q02_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
if (ddata->enable_gpio)
gpiod_set_value_cansleep(ddata->enable_gpio, 1);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
}
static void lb035q02_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
if (ddata->enable_gpio)
gpiod_set_value_cansleep(ddata->enable_gpio, 0);
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void lb035q02_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int lb035q02_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *vm = ddata->vm;
+ return omapdss_display_get_modes(connector, &ddata->vm);
}
static const struct omap_dss_device_ops lb035q02_ops = {
@@ -178,7 +154,7 @@ static const struct omap_dss_device_ops lb035q02_ops = {
.enable = lb035q02_enable,
.disable = lb035q02_disable,
- .get_timings = lb035q02_get_timings,
+ .get_modes = lb035q02_get_modes,
};
static int lb035q02_probe_of(struct spi_device *spi)
@@ -221,16 +197,19 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
dssdev->dev = &spi->dev;
dssdev->ops = &lb035q02_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
+ dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
/*
* Note: According to the panel documentation:
* DE is active LOW
* DATA needs to be driven on the FALLING edge
*/
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE
- | DRM_BUS_FLAG_PIXDATA_POSEDGE;
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE
+ | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
omapdss_display_init(dssdev);
omapdss_device_register(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index f445de6369f7..c2409815a204 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -118,50 +118,26 @@ static void nec_8048_disconnect(struct omap_dss_device *src,
{
}
-static int nec_8048_enable(struct omap_dss_device *dssdev)
+static void nec_8048_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
gpiod_set_value_cansleep(ddata->res_gpio, 1);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
}
static void nec_8048_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
gpiod_set_value_cansleep(ddata->res_gpio, 0);
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void nec_8048_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int nec_8048_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *vm = ddata->vm;
+ return omapdss_display_get_modes(connector, &ddata->vm);
}
static const struct omap_dss_device_ops nec_8048_ops = {
@@ -171,7 +147,7 @@ static const struct omap_dss_device_ops nec_8048_ops = {
.enable = nec_8048_enable,
.disable = nec_8048_disable,
- .get_timings = nec_8048_get_timings,
+ .get_modes = nec_8048_get_modes,
};
static int nec_8048_probe(struct spi_device *spi)
@@ -216,10 +192,13 @@ static int nec_8048_probe(struct spi_device *spi)
dssdev->dev = &spi->dev;
dssdev->ops = &nec_8048_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE
- | DRM_BUS_FLAG_PIXDATA_POSEDGE;
+ dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE
+ | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
omapdss_display_init(dssdev);
omapdss_device_register(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
index 64b1369cb274..9c545de430f6 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
@@ -62,29 +62,22 @@ static void sharp_ls_disconnect(struct omap_dss_device *src,
{
}
-static int sharp_ls_enable(struct omap_dss_device *dssdev)
+static void sharp_ls_pre_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
int r;
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
if (ddata->vcc) {
r = regulator_enable(ddata->vcc);
- if (r != 0)
- return r;
+ if (r)
+ dev_err(dssdev->dev, "%s: failed to enable regulator\n",
+ __func__);
}
+}
- r = src->ops->enable(src);
- if (r) {
- regulator_disable(ddata->vcc);
- return r;
- }
+static void sharp_ls_enable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
/* wait couple of vsyncs until enabling the LCD */
msleep(50);
@@ -94,19 +87,11 @@ static int sharp_ls_enable(struct omap_dss_device *dssdev)
if (ddata->ini_gpio)
gpiod_set_value_cansleep(ddata->ini_gpio, 1);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
}
static void sharp_ls_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
if (ddata->ini_gpio)
gpiod_set_value_cansleep(ddata->ini_gpio, 0);
@@ -115,33 +100,35 @@ static void sharp_ls_disable(struct omap_dss_device *dssdev)
gpiod_set_value_cansleep(ddata->resb_gpio, 0);
/* wait at least 5 vsyncs after disabling the LCD */
-
msleep(100);
+}
- src->ops->disable(src);
+static void sharp_ls_post_disable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
if (ddata->vcc)
regulator_disable(ddata->vcc);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void sharp_ls_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int sharp_ls_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *vm = ddata->vm;
+ return omapdss_display_get_modes(connector, &ddata->vm);
}
static const struct omap_dss_device_ops sharp_ls_ops = {
.connect = sharp_ls_connect,
.disconnect = sharp_ls_disconnect,
+ .pre_enable = sharp_ls_pre_enable,
.enable = sharp_ls_enable,
.disable = sharp_ls_disable,
+ .post_disable = sharp_ls_post_disable,
- .get_timings = sharp_ls_get_timings,
+ .get_modes = sharp_ls_get_modes,
};
static int sharp_ls_get_gpio_of(struct device *dev, int index, int val,
@@ -220,15 +207,18 @@ static int sharp_ls_probe(struct platform_device *pdev)
dssdev->dev = &pdev->dev;
dssdev->ops = &sharp_ls_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
+ dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
/*
* Note: According to the panel documentation:
* DATA needs to be driven on the FALLING edge
*/
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE
- | DRM_BUS_FLAG_PIXDATA_POSEDGE;
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE
+ | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
omapdss_display_init(dssdev);
omapdss_device_register(dssdev);
@@ -243,7 +233,10 @@ static int __exit sharp_ls_remove(struct platform_device *pdev)
omapdss_device_unregister(dssdev);
- sharp_ls_disable(dssdev);
+ if (omapdss_device_is_enabled(dssdev)) {
+ sharp_ls_disable(dssdev);
+ sharp_ls_post_disable(dssdev);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index e04663856b31..2038def14ba1 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -516,17 +516,9 @@ static void acx565akm_disconnect(struct omap_dss_device *src,
static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
- r = src->ops->enable(src);
- if (r) {
- pr_err("%s sdi enable failed\n", __func__);
- return r;
- }
-
/*FIXME tweak me */
msleep(50);
@@ -562,7 +554,6 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
static void acx565akm_panel_power_off(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
dev_dbg(dssdev->dev, "%s\n", __func__);
@@ -585,56 +576,32 @@ static void acx565akm_panel_power_off(struct omap_dss_device *dssdev)
/* FIXME need to tweak this delay */
msleep(100);
-
- src->ops->disable(src);
}
-static int acx565akm_enable(struct omap_dss_device *dssdev)
+static void acx565akm_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- int r;
-
- dev_dbg(dssdev->dev, "%s\n", __func__);
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
mutex_lock(&ddata->mutex);
- r = acx565akm_panel_power_on(dssdev);
+ acx565akm_panel_power_on(dssdev);
mutex_unlock(&ddata->mutex);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
}
static void acx565akm_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- dev_dbg(dssdev->dev, "%s\n", __func__);
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
-
mutex_lock(&ddata->mutex);
acx565akm_panel_power_off(dssdev);
mutex_unlock(&ddata->mutex);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void acx565akm_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int acx565akm_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *vm = ddata->vm;
+ return omapdss_display_get_modes(connector, &ddata->vm);
}
static const struct omap_dss_device_ops acx565akm_ops = {
@@ -644,7 +611,7 @@ static const struct omap_dss_device_ops acx565akm_ops = {
.enable = acx565akm_enable,
.disable = acx565akm_disable,
- .get_timings = acx565akm_get_timings,
+ .get_modes = acx565akm_get_modes,
};
static int acx565akm_probe(struct spi_device *spi)
@@ -739,10 +706,13 @@ static int acx565akm_probe(struct spi_device *spi)
dssdev->dev = &spi->dev;
dssdev->ops = &acx565akm_ops;
dssdev->type = OMAP_DISPLAY_TYPE_SDI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE
- | DRM_BUS_FLAG_PIXDATA_POSEDGE;
+ dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE
+ | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
omapdss_display_init(dssdev);
omapdss_device_register(dssdev);
@@ -766,7 +736,8 @@ static int acx565akm_remove(struct spi_device *spi)
omapdss_device_unregister(dssdev);
- acx565akm_disable(dssdev);
+ if (omapdss_device_is_enabled(dssdev))
+ acx565akm_disable(dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index 7ddc8c574a61..2ad161e33106 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -35,6 +35,8 @@ struct panel_drv_data {
struct videomode vm;
+ struct backlight_device *backlight;
+
struct spi_device *spi_dev;
};
@@ -169,24 +171,12 @@ static void td028ttec1_panel_disconnect(struct omap_dss_device *src,
{
}
-static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
+static void td028ttec1_panel_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
+ int r = 0;
- dev_dbg(dssdev->dev, "td028ttec1_panel_enable() - state %d\n",
- dssdev->state);
+ dev_dbg(dssdev->dev, "%s: state %d\n", __func__, dssdev->state);
/* three times command zero */
r |= jbt_ret_write_0(ddata, 0x00);
@@ -197,8 +187,8 @@ static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
usleep_range(1000, 2000);
if (r) {
- dev_warn(dssdev->dev, "transfer error\n");
- goto transfer_err;
+ dev_warn(dssdev->dev, "%s: transfer error\n", __func__);
+ return;
}
/* deep standby out */
@@ -268,20 +258,17 @@ static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
r |= jbt_ret_write_0(ddata, JBT_REG_DISPLAY_ON);
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-transfer_err:
+ if (r)
+ dev_err(dssdev->dev, "%s: write error\n", __func__);
- return r ? -EIO : 0;
+ backlight_enable(ddata->backlight);
}
static void td028ttec1_panel_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- if (!omapdss_device_is_enabled(dssdev))
- return;
+ backlight_disable(ddata->backlight);
dev_dbg(dssdev->dev, "td028ttec1_panel_disable()\n");
@@ -289,18 +276,14 @@ static void td028ttec1_panel_disable(struct omap_dss_device *dssdev)
jbt_reg_write_2(ddata, JBT_REG_OUTPUT_CONTROL, 0x8002);
jbt_ret_write_0(ddata, JBT_REG_SLEEP_IN);
jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x00);
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int td028ttec1_panel_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *vm = ddata->vm;
+ return omapdss_display_get_modes(connector, &ddata->vm);
}
static const struct omap_dss_device_ops td028ttec1_ops = {
@@ -310,7 +293,7 @@ static const struct omap_dss_device_ops td028ttec1_ops = {
.enable = td028ttec1_panel_enable,
.disable = td028ttec1_panel_disable,
- .get_timings = td028ttec1_panel_get_timings,
+ .get_modes = td028ttec1_panel_get_modes,
};
static int td028ttec1_panel_probe(struct spi_device *spi)
@@ -334,6 +317,10 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
if (ddata == NULL)
return -ENOMEM;
+ ddata->backlight = devm_of_find_backlight(&spi->dev);
+ if (IS_ERR(ddata->backlight))
+ return PTR_ERR(ddata->backlight);
+
dev_set_drvdata(&spi->dev, ddata);
ddata->spi_dev = spi;
@@ -344,15 +331,18 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
dssdev->dev = &spi->dev;
dssdev->ops = &td028ttec1_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
+ dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
/*
* Note: According to the panel documentation:
* SYNC needs to be driven on the FALLING edge
*/
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE
- | DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE
+ | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
omapdss_display_init(dssdev);
omapdss_device_register(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index 8440fcb744d9..0b692fc7e5ea 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -320,22 +320,11 @@ static void tpo_td043_disconnect(struct omap_dss_device *src,
{
}
-static int tpo_td043_enable(struct omap_dss_device *dssdev)
+static void tpo_td043_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
int r;
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
-
/*
* If we are resuming from system suspend, SPI clocks might not be
* enabled yet, so we'll program the LCD from SPI PM resume callback.
@@ -343,38 +332,27 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev)
if (!ddata->spi_suspended) {
r = tpo_td043_power_on(ddata);
if (r) {
- src->ops->disable(src);
- return r;
+ dev_err(&ddata->spi->dev, "%s: power on failed (%d)\n",
+ __func__, r);
+ return;
}
}
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
}
static void tpo_td043_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
-
- src->ops->disable(src);
if (!ddata->spi_suspended)
tpo_td043_power_off(ddata);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void tpo_td043_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int tpo_td043_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *vm = ddata->vm;
+ return omapdss_display_get_modes(connector, &ddata->vm);
}
static const struct omap_dss_device_ops tpo_td043_ops = {
@@ -384,7 +362,7 @@ static const struct omap_dss_device_ops tpo_td043_ops = {
.enable = tpo_td043_enable,
.disable = tpo_td043_disable,
- .get_timings = tpo_td043_get_timings,
+ .get_modes = tpo_td043_get_modes,
};
static int tpo_td043_probe(struct spi_device *spi)
@@ -442,15 +420,18 @@ static int tpo_td043_probe(struct spi_device *spi)
dssdev->dev = &spi->dev;
dssdev->ops = &tpo_td043_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
+ dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
/*
* Note: According to the panel documentation:
* SYNC needs to be driven on the FALLING edge
*/
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE
- | DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE
+ | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
omapdss_display_init(dssdev);
omapdss_device_register(dssdev);
@@ -467,7 +448,8 @@ static int tpo_td043_remove(struct spi_device *spi)
omapdss_device_unregister(dssdev);
- tpo_td043_disable(dssdev);
+ if (omapdss_device_is_enabled(dssdev))
+ tpo_td043_disable(dssdev);
sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group);
diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c
index 472f56e3de70..f8dad99013e8 100644
--- a/drivers/gpu/drm/omapdrm/dss/base.c
+++ b/drivers/gpu/drm/omapdrm/dss/base.c
@@ -19,6 +19,7 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_graph.h>
+#include <linux/platform_device.h>
#include "dss.h"
#include "omapdss.h"
@@ -112,13 +113,12 @@ void omapdss_device_put(struct omap_dss_device *dssdev)
}
EXPORT_SYMBOL(omapdss_device_put);
-struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src,
- unsigned int port)
+struct omap_dss_device *omapdss_find_device_by_node(struct device_node *node)
{
struct omap_dss_device *dssdev;
list_for_each_entry(dssdev, &omapdss_devices_list, list) {
- if (dssdev->dev->of_node == src && dssdev->of_ports & BIT(port))
+ if (dssdev->dev->of_node == node)
return omapdss_device_get(dssdev);
}
@@ -126,13 +126,10 @@ struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src,
}
/*
- * Search for the next device starting at @from. The type argument specfies
- * which device types to consider when searching. Searching for multiple types
- * is supported by and'ing their type flags. Release the reference to the @from
- * device, and acquire a reference to the returned device if found.
+ * Search for the next output device starting at @from. Release the reference to
+ * the @from device, and acquire a reference to the returned device if found.
*/
-struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from,
- enum omap_dss_device_type type)
+struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from)
{
struct omap_dss_device *dssdev;
struct list_head *list;
@@ -160,15 +157,8 @@ struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from,
goto done;
}
- /*
- * Accept display entities if the display type is requested,
- * and output entities if the output type is requested.
- */
- if ((type & OMAP_DSS_DEVICE_TYPE_DISPLAY) &&
- !dssdev->output_type)
- goto done;
- if ((type & OMAP_DSS_DEVICE_TYPE_OUTPUT) && dssdev->id &&
- dssdev->next)
+ if (dssdev->id &&
+ (dssdev->next || dssdev->bridge || dssdev->panel))
goto done;
}
@@ -183,7 +173,12 @@ done:
mutex_unlock(&omapdss_devices_lock);
return dssdev;
}
-EXPORT_SYMBOL(omapdss_device_get_next);
+EXPORT_SYMBOL(omapdss_device_next_output);
+
+static bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
+{
+ return dssdev->dss;
+}
int omapdss_device_connect(struct dss_device *dss,
struct omap_dss_device *src,
@@ -191,7 +186,19 @@ int omapdss_device_connect(struct dss_device *dss,
{
int ret;
- dev_dbg(dst->dev, "connect\n");
+ dev_dbg(&dss->pdev->dev, "connect(%s, %s)\n",
+ src ? dev_name(src->dev) : "NULL",
+ dst ? dev_name(dst->dev) : "NULL");
+
+ if (!dst) {
+ /*
+ * The destination is NULL when the source is connected to a
+ * bridge or panel instead of a DSS device. Stop here, we will
+ * attach the bridge or panel later when we will have a DRM
+ * encoder.
+ */
+ return src && (src->bridge || src->panel) ? 0 : -EINVAL;
+ }
if (omapdss_device_is_connected(dst))
return -EBUSY;
@@ -204,12 +211,6 @@ int omapdss_device_connect(struct dss_device *dss,
return ret;
}
- if (src) {
- WARN_ON(src->dst);
- dst->src = src;
- src->dst = dst;
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(omapdss_device_connect);
@@ -217,19 +218,20 @@ EXPORT_SYMBOL_GPL(omapdss_device_connect);
void omapdss_device_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- dev_dbg(dst->dev, "disconnect\n");
+ struct dss_device *dss = src ? src->dss : dst->dss;
- if (!dst->id && !omapdss_device_is_connected(dst)) {
- WARN_ON(dst->output_type);
+ dev_dbg(&dss->pdev->dev, "disconnect(%s, %s)\n",
+ src ? dev_name(src->dev) : "NULL",
+ dst ? dev_name(dst->dev) : "NULL");
+
+ if (!dst) {
+ WARN_ON(!src->bridge && !src->panel);
return;
}
- if (src) {
- if (WARN_ON(dst != src->dst))
- return;
-
- dst->src = NULL;
- src->dst = NULL;
+ if (!dst->id && !omapdss_device_is_connected(dst)) {
+ WARN_ON(!dst->display);
+ return;
}
WARN_ON(dst->state != OMAP_DSS_DISPLAY_DISABLED);
@@ -239,6 +241,58 @@ void omapdss_device_disconnect(struct omap_dss_device *src,
}
EXPORT_SYMBOL_GPL(omapdss_device_disconnect);
+void omapdss_device_pre_enable(struct omap_dss_device *dssdev)
+{
+ if (!dssdev)
+ return;
+
+ omapdss_device_pre_enable(dssdev->next);
+
+ if (dssdev->ops->pre_enable)
+ dssdev->ops->pre_enable(dssdev);
+}
+EXPORT_SYMBOL_GPL(omapdss_device_pre_enable);
+
+void omapdss_device_enable(struct omap_dss_device *dssdev)
+{
+ if (!dssdev)
+ return;
+
+ if (dssdev->ops->enable)
+ dssdev->ops->enable(dssdev);
+
+ omapdss_device_enable(dssdev->next);
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+}
+EXPORT_SYMBOL_GPL(omapdss_device_enable);
+
+void omapdss_device_disable(struct omap_dss_device *dssdev)
+{
+ if (!dssdev)
+ return;
+
+ omapdss_device_disable(dssdev->next);
+
+ if (dssdev->ops->disable)
+ dssdev->ops->disable(dssdev);
+}
+EXPORT_SYMBOL_GPL(omapdss_device_disable);
+
+void omapdss_device_post_disable(struct omap_dss_device *dssdev)
+{
+ if (!dssdev)
+ return;
+
+ if (dssdev->ops->post_disable)
+ dssdev->ops->post_disable(dssdev);
+
+ omapdss_device_post_disable(dssdev->next);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+EXPORT_SYMBOL_GPL(omapdss_device_post_disable);
+
/* -----------------------------------------------------------------------------
* Components Handling
*/
@@ -249,6 +303,7 @@ struct omapdss_comp_node {
struct list_head list;
struct device_node *node;
bool dss_core_component;
+ const char *compat;
};
static bool omapdss_list_contains(const struct device_node *node)
@@ -266,13 +321,20 @@ static bool omapdss_list_contains(const struct device_node *node)
static void omapdss_walk_device(struct device *dev, struct device_node *node,
bool dss_core)
{
+ struct omapdss_comp_node *comp;
struct device_node *n;
- struct omapdss_comp_node *comp = devm_kzalloc(dev, sizeof(*comp),
- GFP_KERNEL);
+ const char *compat;
+ int ret;
+ ret = of_property_read_string(node, "compatible", &compat);
+ if (ret < 0)
+ return;
+
+ comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
if (comp) {
comp->node = node;
comp->dss_core_component = dss_core;
+ comp->compat = compat;
list_add(&comp->list, &omapdss_comp_list);
}
@@ -312,12 +374,8 @@ void omapdss_gather_components(struct device *dev)
omapdss_walk_device(dev, dev->of_node, true);
- for_each_available_child_of_node(dev->of_node, child) {
- if (!of_find_property(child, "compatible", NULL))
- continue;
-
+ for_each_available_child_of_node(dev->of_node, child)
omapdss_walk_device(dev, child, true);
- }
}
EXPORT_SYMBOL(omapdss_gather_components);
@@ -325,6 +383,8 @@ static bool omapdss_component_is_loaded(struct omapdss_comp_node *comp)
{
if (comp->dss_core_component)
return true;
+ if (!strstarts(comp->compat, "omapdss,"))
+ return true;
if (omapdss_device_is_registered(comp->node))
return true;
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 34b2a4ef63a4..e93f61a567a8 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -23,6 +23,9 @@
#include <linux/kernel.h>
#include <linux/of.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_modes.h>
+
#include "omapdss.h"
static int disp_num_counter;
@@ -39,8 +42,6 @@ void omapdss_display_init(struct omap_dss_device *dssdev)
if (id < 0)
id = disp_num_counter++;
- dssdev->alias_id = id;
-
/* Use 'label' property for name, if it exists */
of_property_read_string(dssdev->dev->of_node, "label", &dssdev->name);
@@ -58,3 +59,22 @@ struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output)
return omapdss_device_get(output);
}
EXPORT_SYMBOL_GPL(omapdss_display_get);
+
+int omapdss_display_get_modes(struct drm_connector *connector,
+ const struct videomode *vm)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_create(connector->dev);
+ if (!mode)
+ return 0;
+
+ drm_display_mode_from_videomode(vm, mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(omapdss_display_get_modes);
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index ca4f3c4c6318..cc78dfa07f04 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -47,8 +47,8 @@ struct dpi_data {
struct mutex lock;
- struct videomode vm;
struct dss_lcd_mgr_config mgr_config;
+ unsigned long pixelclock;
int data_lines;
struct omap_dss_device output;
@@ -347,16 +347,15 @@ static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req,
static int dpi_set_mode(struct dpi_data *dpi)
{
- const struct videomode *vm = &dpi->vm;
int lck_div = 0, pck_div = 0;
unsigned long fck = 0;
int r = 0;
if (dpi->pll)
r = dpi_set_pll_clk(dpi, dpi->output.dispc_channel,
- vm->pixelclock, &fck, &lck_div, &pck_div);
+ dpi->pixelclock, &fck, &lck_div, &pck_div);
else
- r = dpi_set_dispc_clk(dpi, vm->pixelclock, &fck,
+ r = dpi_set_dispc_clk(dpi, dpi->pixelclock, &fck,
&lck_div, &pck_div);
if (r)
return r;
@@ -378,7 +377,7 @@ static void dpi_config_lcd_manager(struct dpi_data *dpi)
dss_mgr_set_lcd_config(&dpi->output, &dpi->mgr_config);
}
-static int dpi_display_enable(struct omap_dss_device *dssdev)
+static void dpi_display_enable(struct omap_dss_device *dssdev)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
struct omap_dss_device *out = &dpi->output;
@@ -386,12 +385,6 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
mutex_lock(&dpi->lock);
- if (!out->dispc_channel_connected) {
- DSSERR("failed to enable display: no output/manager\n");
- r = -ENODEV;
- goto err_no_out_mgr;
- }
-
if (dpi->vdds_dsi_reg) {
r = regulator_enable(dpi->vdds_dsi_reg);
if (r)
@@ -426,7 +419,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
mutex_unlock(&dpi->lock);
- return 0;
+ return;
err_mgr_enable:
err_set_mode:
@@ -439,9 +432,7 @@ err_get_dispc:
if (dpi->vdds_dsi_reg)
regulator_disable(dpi->vdds_dsi_reg);
err_reg_enable:
-err_no_out_mgr:
mutex_unlock(&dpi->lock);
- return r;
}
static void dpi_display_disable(struct omap_dss_device *dssdev)
@@ -467,7 +458,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev)
}
static void dpi_set_timings(struct omap_dss_device *dssdev,
- const struct videomode *vm)
+ const struct drm_display_mode *mode)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
@@ -475,13 +466,13 @@ static void dpi_set_timings(struct omap_dss_device *dssdev,
mutex_lock(&dpi->lock);
- dpi->vm = *vm;
+ dpi->pixelclock = mode->clock * 1000;
mutex_unlock(&dpi->lock);
}
static int dpi_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+ struct drm_display_mode *mode)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
int lck_div, pck_div;
@@ -490,20 +481,20 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
struct dpi_clk_calc_ctx ctx;
bool ok;
- if (vm->hactive % 8 != 0)
+ if (mode->hdisplay % 8 != 0)
return -EINVAL;
- if (vm->pixelclock == 0)
+ if (mode->clock == 0)
return -EINVAL;
if (dpi->pll) {
- ok = dpi_pll_clk_calc(dpi, vm->pixelclock, &ctx);
+ ok = dpi_pll_clk_calc(dpi, mode->clock * 1000, &ctx);
if (!ok)
return -EINVAL;
fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
} else {
- ok = dpi_dss_clk_calc(dpi, vm->pixelclock, &ctx);
+ ok = dpi_dss_clk_calc(dpi, mode->clock * 1000, &ctx);
if (!ok)
return -EINVAL;
@@ -515,7 +506,7 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
pck = fck / lck_div / pck_div;
- vm->pixelclock = pck;
+ mode->clock = pck / 1000;
return 0;
}
@@ -596,23 +587,15 @@ static int dpi_connect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dst);
- int r;
dpi_init_pll(dpi);
- r = omapdss_device_connect(dst->dss, dst, dst->next);
- if (r)
- return r;
-
- dst->dispc_channel_connected = true;
- return 0;
+ return omapdss_device_connect(dst->dss, dst, dst->next);
}
static void dpi_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- dst->dispc_channel_connected = false;
-
omapdss_device_disconnect(dst, dst->next);
}
@@ -651,25 +634,15 @@ static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
out->dev = &dpi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_DPI;
- out->output_type = OMAP_DISPLAY_TYPE_DPI;
+ out->type = OMAP_DISPLAY_TYPE_DPI;
out->dispc_channel = dpi_get_channel(dpi);
out->of_ports = BIT(port_num);
out->ops = &dpi_ops;
out->owner = THIS_MODULE;
- out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
- if (IS_ERR(out->next)) {
- if (PTR_ERR(out->next) != -EPROBE_DEFER)
- dev_err(out->dev, "failed to find video sink\n");
- return PTR_ERR(out->next);
- }
-
- r = omapdss_output_validate(out);
- if (r) {
- omapdss_device_put(out->next);
- out->next = NULL;
+ r = omapdss_device_init_output(out);
+ if (r < 0)
return r;
- }
omapdss_device_register(out);
@@ -681,9 +654,8 @@ static void dpi_uninit_output_port(struct device_node *port)
struct dpi_data *dpi = port->data;
struct omap_dss_device *out = &dpi->output;
- if (out->next)
- omapdss_device_put(out->next);
omapdss_device_unregister(out);
+ omapdss_device_cleanup_output(out);
}
static const struct soc_device_attribute dpi_soc_devices[] = {
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 64fb788b6647..5202862d89b5 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -1342,12 +1342,9 @@ static int dsi_pll_enable(struct dss_pll *pll)
*/
dsi_enable_scp_clk(dsi);
- if (!dsi->vdds_dsi_enabled) {
- r = regulator_enable(dsi->vdds_dsi_reg);
- if (r)
- goto err0;
- dsi->vdds_dsi_enabled = true;
- }
+ r = regulator_enable(dsi->vdds_dsi_reg);
+ if (r)
+ goto err0;
/* XXX PLL does not come out of reset without this... */
dispc_pck_free_enable(dsi->dss->dispc, 1);
@@ -1372,36 +1369,25 @@ static int dsi_pll_enable(struct dss_pll *pll)
return 0;
err1:
- if (dsi->vdds_dsi_enabled) {
- regulator_disable(dsi->vdds_dsi_reg);
- dsi->vdds_dsi_enabled = false;
- }
+ regulator_disable(dsi->vdds_dsi_reg);
err0:
dsi_disable_scp_clk(dsi);
dsi_runtime_put(dsi);
return r;
}
-static void dsi_pll_uninit(struct dsi_data *dsi, bool disconnect_lanes)
+static void dsi_pll_disable(struct dss_pll *pll)
{
+ struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
+
dsi_pll_power(dsi, DSI_PLL_POWER_OFF);
- if (disconnect_lanes) {
- WARN_ON(!dsi->vdds_dsi_enabled);
- regulator_disable(dsi->vdds_dsi_reg);
- dsi->vdds_dsi_enabled = false;
- }
+
+ regulator_disable(dsi->vdds_dsi_reg);
dsi_disable_scp_clk(dsi);
dsi_runtime_put(dsi);
- DSSDBG("PLL uninit done\n");
-}
-
-static void dsi_pll_disable(struct dss_pll *pll)
-{
- struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
-
- dsi_pll_uninit(dsi, true);
+ DSSDBG("PLL disable done\n");
}
static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
@@ -3753,19 +3739,13 @@ static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
- struct omap_dss_device *out = &dsi->output;
u8 data_type;
u16 word_count;
int r;
- if (!out->dispc_channel_connected) {
- DSSERR("failed to enable display: no output/manager\n");
- return -ENODEV;
- }
-
r = dsi_display_init_dispc(dsi);
if (r)
- goto err_init_dispc;
+ return r;
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
switch (dsi->pix_fmt) {
@@ -3814,7 +3794,6 @@ err_mgr_enable:
}
err_pix_fmt:
dsi_display_uninit_dispc(dsi);
-err_init_dispc:
return r;
}
@@ -4096,11 +4075,11 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
r = dss_pll_enable(&dsi->pll);
if (r)
- goto err0;
+ return r;
r = dsi_configure_dsi_clocks(dsi);
if (r)
- goto err1;
+ goto err0;
dss_select_dsi_clk_source(dsi->dss, dsi->module_id,
dsi->module_id == 0 ?
@@ -4108,6 +4087,14 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
DSSDBG("PLL OK\n");
+ if (!dsi->vdds_dsi_enabled) {
+ r = regulator_enable(dsi->vdds_dsi_reg);
+ if (r)
+ goto err1;
+
+ dsi->vdds_dsi_enabled = true;
+ }
+
r = dsi_cio_init(dsi);
if (r)
goto err2;
@@ -4136,10 +4123,13 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
err3:
dsi_cio_uninit(dsi);
err2:
- dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
+ regulator_disable(dsi->vdds_dsi_reg);
+ dsi->vdds_dsi_enabled = false;
err1:
- dss_pll_disable(&dsi->pll);
+ dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
err0:
+ dss_pll_disable(&dsi->pll);
+
return r;
}
@@ -4158,13 +4148,18 @@ static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes,
dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
dsi_cio_uninit(dsi);
- dsi_pll_uninit(dsi, disconnect_lanes);
+ dss_pll_disable(&dsi->pll);
+
+ if (disconnect_lanes) {
+ regulator_disable(dsi->vdds_dsi_reg);
+ dsi->vdds_dsi_enabled = false;
+ }
}
-static int dsi_display_enable(struct omap_dss_device *dssdev)
+static void dsi_display_enable(struct omap_dss_device *dssdev)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
- int r = 0;
+ int r;
DSSDBG("dsi_display_enable\n");
@@ -4184,14 +4179,13 @@ static int dsi_display_enable(struct omap_dss_device *dssdev)
mutex_unlock(&dsi->lock);
- return 0;
+ return;
err_init_dsi:
dsi_runtime_put(dsi);
err_get_dsi:
mutex_unlock(&dsi->lock);
DSSDBG("dsi_display_enable FAILED\n");
- return r;
}
static void dsi_display_disable(struct omap_dss_device *dssdev,
@@ -4888,21 +4882,12 @@ static int dsi_get_clocks(struct dsi_data *dsi)
static int dsi_connect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- int r;
-
- r = omapdss_device_connect(dst->dss, dst, dst->next);
- if (r)
- return r;
-
- dst->dispc_channel_connected = true;
- return 0;
+ return omapdss_device_connect(dst->dss, dst, dst->next);
}
static void dsi_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- dst->dispc_channel_connected = false;
-
omapdss_device_disconnect(dst, dst->next);
}
@@ -5138,29 +5123,19 @@ static int dsi_init_output(struct dsi_data *dsi)
out->id = dsi->module_id == 0 ?
OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
- out->output_type = OMAP_DISPLAY_TYPE_DSI;
+ out->type = OMAP_DISPLAY_TYPE_DSI;
out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1";
out->dispc_channel = dsi_get_channel(dsi);
out->ops = &dsi_ops;
out->owner = THIS_MODULE;
out->of_ports = BIT(0);
- out->bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE
+ out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE
| DRM_BUS_FLAG_DE_HIGH
- | DRM_BUS_FLAG_SYNC_NEGEDGE;
-
- out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
- if (IS_ERR(out->next)) {
- if (PTR_ERR(out->next) != -EPROBE_DEFER)
- dev_err(out->dev, "failed to find video sink\n");
- return PTR_ERR(out->next);
- }
+ | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE;
- r = omapdss_output_validate(out);
- if (r) {
- omapdss_device_put(out->next);
- out->next = NULL;
+ r = omapdss_device_init_output(out);
+ if (r < 0)
return r;
- }
omapdss_device_register(out);
@@ -5171,9 +5146,8 @@ static void dsi_uninit_output(struct dsi_data *dsi)
{
struct omap_dss_device *out = &dsi->output;
- if (out->next)
- omapdss_device_put(out->next);
omapdss_device_unregister(out);
+ omapdss_device_cleanup_output(out);
}
static int dsi_probe_of(struct dsi_data *dsi)
diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c
index 0422597ac6b0..b2094055c5fc 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss-of.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c
@@ -12,71 +12,25 @@
* more details.
*/
-#include <linux/device.h>
#include <linux/err.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
-#include <linux/seq_file.h>
#include "omapdss.h"
-static struct device_node *
-dss_of_port_get_parent_device(struct device_node *port)
-{
- struct device_node *np;
- int i;
-
- if (!port)
- return NULL;
-
- np = of_get_parent(port);
-
- for (i = 0; i < 2 && np; ++i) {
- struct property *prop;
-
- prop = of_find_property(np, "compatible", NULL);
-
- if (prop)
- return np;
-
- np = of_get_next_parent(np);
- }
-
- return NULL;
-}
-
struct omap_dss_device *
omapdss_of_find_connected_device(struct device_node *node, unsigned int port)
{
- struct device_node *src_node;
- struct device_node *src_port;
- struct device_node *ep;
- struct omap_dss_device *src;
- u32 port_number = 0;
+ struct device_node *remote_node;
+ struct omap_dss_device *dssdev;
- /* Get the endpoint... */
- ep = of_graph_get_endpoint_by_regs(node, port, 0);
- if (!ep)
+ remote_node = of_graph_get_remote_node(node, port, 0);
+ if (!remote_node)
return NULL;
- /* ... and its remote port... */
- src_port = of_graph_get_remote_port(ep);
- of_node_put(ep);
- if (!src_port)
- return NULL;
-
- /* ... and the remote port's number and parent... */
- of_property_read_u32(src_port, "reg", &port_number);
- src_node = dss_of_port_get_parent_device(src_port);
- of_node_put(src_port);
- if (!src_node)
- return ERR_PTR(-EINVAL);
-
- /* ... and finally the connected device. */
- src = omapdss_find_device_by_port(src_node, port_number);
- of_node_put(src_node);
+ dssdev = omapdss_find_device_by_node(remote_node);
+ of_node_put(remote_node);
- return src ? src : ERR_PTR(-EPROBE_DEFER);
+ return dssdev ? dssdev : ERR_PTR(-EPROBE_DEFER);
}
EXPORT_SYMBOL_GPL(omapdss_of_find_connected_device);
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 7553c7fc1c45..55e68863ef15 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1560,7 +1560,7 @@ static void dss_shutdown(struct platform_device *pdev)
DSSDBG("shutdown\n");
- for_each_dss_display(dssdev) {
+ for_each_dss_output(dssdev) {
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
dssdev->ops->disable(dssdev);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index aabdda394c9c..6339e2756b34 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -249,15 +249,15 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi)
}
static void hdmi_display_set_timings(struct omap_dss_device *dssdev,
- const struct videomode *vm)
+ const struct drm_display_mode *mode)
{
struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
mutex_lock(&hdmi->lock);
- hdmi->cfg.vm = *vm;
+ drm_display_mode_to_videomode(mode, &hdmi->cfg.vm);
- dispc_set_tv_pclk(hdmi->dss->dispc, vm->pixelclock);
+ dispc_set_tv_pclk(hdmi->dss->dispc, mode->clock * 1000);
mutex_unlock(&hdmi->lock);
}
@@ -312,26 +312,20 @@ static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
hdmi_wp_audio_enable(&hd->wp, false);
}
-static int hdmi_display_enable(struct omap_dss_device *dssdev)
+static void hdmi_display_enable(struct omap_dss_device *dssdev)
{
struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
unsigned long flags;
- int r = 0;
+ int r;
DSSDBG("ENTER hdmi_display_enable\n");
mutex_lock(&hdmi->lock);
- if (!dssdev->dispc_channel_connected) {
- DSSERR("failed to enable display: no output/manager\n");
- r = -ENODEV;
- goto err0;
- }
-
r = hdmi_power_on_full(hdmi);
if (r) {
DSSERR("failed to power on device\n");
- goto err0;
+ goto done;
}
if (hdmi->audio_configured) {
@@ -351,12 +345,8 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev)
hdmi->display_enabled = true;
spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
+done:
mutex_unlock(&hdmi->lock);
- return 0;
-
-err0:
- mutex_unlock(&hdmi->lock);
- return r;
}
static void hdmi_display_disable(struct omap_dss_device *dssdev)
@@ -417,21 +407,12 @@ void hdmi4_core_disable(struct hdmi_core_data *core)
static int hdmi_connect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- int r;
-
- r = omapdss_device_connect(dst->dss, dst, dst->next);
- if (r)
- return r;
-
- dst->dispc_channel_connected = true;
- return 0;
+ return omapdss_device_connect(dst->dss, dst, dst->next);
}
static void hdmi_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- dst->dispc_channel_connected = false;
-
omapdss_device_disconnect(dst, dst->next);
}
@@ -698,7 +679,7 @@ static int hdmi4_init_output(struct omap_hdmi *hdmi)
out->dev = &hdmi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_HDMI;
- out->output_type = OMAP_DISPLAY_TYPE_HDMI;
+ out->type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
out->ops = &hdmi_ops;
@@ -706,19 +687,9 @@ static int hdmi4_init_output(struct omap_hdmi *hdmi)
out->of_ports = BIT(0);
out->ops_flags = OMAP_DSS_DEVICE_OP_EDID;
- out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
- if (IS_ERR(out->next)) {
- if (PTR_ERR(out->next) != -EPROBE_DEFER)
- dev_err(out->dev, "failed to find video sink\n");
- return PTR_ERR(out->next);
- }
-
- r = omapdss_output_validate(out);
- if (r) {
- omapdss_device_put(out->next);
- out->next = NULL;
+ r = omapdss_device_init_output(out);
+ if (r < 0)
return r;
- }
omapdss_device_register(out);
@@ -729,9 +700,8 @@ static void hdmi4_uninit_output(struct omap_hdmi *hdmi)
{
struct omap_dss_device *out = &hdmi->output;
- if (out->next)
- omapdss_device_put(out->next);
omapdss_device_unregister(out);
+ omapdss_device_cleanup_output(out);
}
static int hdmi4_probe_of(struct omap_hdmi *hdmi)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 9e8556f67a29..2955bbad13bb 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -248,15 +248,15 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi)
}
static void hdmi_display_set_timings(struct omap_dss_device *dssdev,
- const struct videomode *vm)
+ const struct drm_display_mode *mode)
{
struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
mutex_lock(&hdmi->lock);
- hdmi->cfg.vm = *vm;
+ drm_display_mode_to_videomode(mode, &hdmi->cfg.vm);
- dispc_set_tv_pclk(hdmi->dss->dispc, vm->pixelclock);
+ dispc_set_tv_pclk(hdmi->dss->dispc, mode->clock * 1000);
mutex_unlock(&hdmi->lock);
}
@@ -320,26 +320,20 @@ static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, hd->wp_idlemode, 3, 2);
}
-static int hdmi_display_enable(struct omap_dss_device *dssdev)
+static void hdmi_display_enable(struct omap_dss_device *dssdev)
{
struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
unsigned long flags;
- int r = 0;
+ int r;
DSSDBG("ENTER hdmi_display_enable\n");
mutex_lock(&hdmi->lock);
- if (!dssdev->dispc_channel_connected) {
- DSSERR("failed to enable display: no output/manager\n");
- r = -ENODEV;
- goto err0;
- }
-
r = hdmi_power_on_full(hdmi);
if (r) {
DSSERR("failed to power on device\n");
- goto err0;
+ goto done;
}
if (hdmi->audio_configured) {
@@ -359,12 +353,8 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev)
hdmi->display_enabled = true;
spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
+done:
mutex_unlock(&hdmi->lock);
- return 0;
-
-err0:
- mutex_unlock(&hdmi->lock);
- return r;
}
static void hdmi_display_disable(struct omap_dss_device *dssdev)
@@ -422,21 +412,12 @@ static void hdmi_core_disable(struct omap_hdmi *hdmi)
static int hdmi_connect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- int r;
-
- r = omapdss_device_connect(dst->dss, dst, dst->next);
- if (r)
- return r;
-
- dst->dispc_channel_connected = true;
- return 0;
+ return omapdss_device_connect(dst->dss, dst, dst->next);
}
static void hdmi_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- dst->dispc_channel_connected = false;
-
omapdss_device_disconnect(dst, dst->next);
}
@@ -682,7 +663,7 @@ static int hdmi5_init_output(struct omap_hdmi *hdmi)
out->dev = &hdmi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_HDMI;
- out->output_type = OMAP_DISPLAY_TYPE_HDMI;
+ out->type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
out->ops = &hdmi_ops;
@@ -690,19 +671,9 @@ static int hdmi5_init_output(struct omap_hdmi *hdmi)
out->of_ports = BIT(0);
out->ops_flags = OMAP_DSS_DEVICE_OP_EDID;
- out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
- if (IS_ERR(out->next)) {
- if (PTR_ERR(out->next) != -EPROBE_DEFER)
- dev_err(out->dev, "failed to find video sink\n");
- return PTR_ERR(out->next);
- }
-
- r = omapdss_output_validate(out);
- if (r) {
- omapdss_device_put(out->next);
- out->next = NULL;
+ r = omapdss_device_init_output(out);
+ if (r < 0)
return r;
- }
omapdss_device_register(out);
@@ -713,9 +684,8 @@ static void hdmi5_uninit_output(struct omap_hdmi *hdmi)
{
struct omap_dss_device *out = &hdmi->output;
- if (out->next)
- omapdss_device_put(out->next);
omapdss_device_unregister(out);
+ omapdss_device_cleanup_output(out);
}
static int hdmi5_probe_of(struct omap_hdmi *hdmi)
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
index 3bfb95d230e0..2b41c75ce988 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
@@ -184,6 +184,22 @@ static const struct of_device_id omapdss_of_match[] __initconst = {
{},
};
+static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = {
+ { .compatible = "composite-video-connector" },
+ { .compatible = "hdmi-connector" },
+ { .compatible = "lgphilips,lb035q02" },
+ { .compatible = "nec,nl8048hl11" },
+ { .compatible = "panel-dsi-cm" },
+ { .compatible = "sharp,ls037v7dw01" },
+ { .compatible = "sony,acx565akm" },
+ { .compatible = "svideo-connector" },
+ { .compatible = "ti,opa362" },
+ { .compatible = "ti,tpd12s015" },
+ { .compatible = "toppoly,td028ttec1" },
+ { .compatible = "tpo,td028ttec1" },
+ { .compatible = "tpo,td043mtea1" },
+};
+
static int __init omapdss_boot_init(void)
{
struct device_node *dss, *child;
@@ -210,7 +226,7 @@ static int __init omapdss_boot_init(void)
n = list_first_entry(&dss_conv_list, struct dss_conv_node,
list);
- if (!n->root)
+ if (of_match_node(omapdss_of_fixups_whitelist, n->node))
omapdss_omapify_node(n->node);
list_del(&n->list);
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 33e15cb77efa..0c734d1f89e1 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -19,7 +19,6 @@
#define __OMAP_DRM_DSS_H
#include <linux/list.h>
-#include <linux/kobject.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <video/videomode.h>
@@ -68,6 +67,7 @@ struct dss_lcd_mgr_config;
struct snd_aes_iec958;
struct snd_cea_861_aud_if;
struct hdmi_avi_infoframe;
+struct drm_connector;
enum omap_display_type {
OMAP_DISPLAY_TYPE_NONE = 0,
@@ -360,15 +360,15 @@ struct omap_dss_device_ops {
void (*disconnect)(struct omap_dss_device *dssdev,
struct omap_dss_device *dst);
- int (*enable)(struct omap_dss_device *dssdev);
+ void (*pre_enable)(struct omap_dss_device *dssdev);
+ void (*enable)(struct omap_dss_device *dssdev);
void (*disable)(struct omap_dss_device *dssdev);
+ void (*post_disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*get_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
+ struct drm_display_mode *mode);
void (*set_timings)(struct omap_dss_device *dssdev,
- const struct videomode *vm);
+ const struct drm_display_mode *mode);
bool (*detect)(struct omap_dss_device *dssdev);
@@ -380,6 +380,9 @@ struct omap_dss_device_ops {
int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
+ int (*get_modes)(struct omap_dss_device *dssdev,
+ struct drm_connector *connector);
+
union {
const struct omapdss_hdmi_ops hdmi;
const struct omapdss_dsi_ops dsi;
@@ -390,42 +393,40 @@ struct omap_dss_device_ops {
* enum omap_dss_device_ops_flag - Indicates which device ops are supported
* @OMAP_DSS_DEVICE_OP_DETECT: The device supports output connection detection
* @OMAP_DSS_DEVICE_OP_HPD: The device supports all hot-plug-related operations
- * @OMAP_DSS_DEVICE_OP_EDID: The device supports readind EDID
+ * @OMAP_DSS_DEVICE_OP_EDID: The device supports reading EDID
+ * @OMAP_DSS_DEVICE_OP_MODES: The device supports reading modes
*/
enum omap_dss_device_ops_flag {
OMAP_DSS_DEVICE_OP_DETECT = BIT(0),
OMAP_DSS_DEVICE_OP_HPD = BIT(1),
OMAP_DSS_DEVICE_OP_EDID = BIT(2),
-};
-
-enum omap_dss_device_type {
- OMAP_DSS_DEVICE_TYPE_OUTPUT = (1 << 0),
- OMAP_DSS_DEVICE_TYPE_DISPLAY = (1 << 1),
+ OMAP_DSS_DEVICE_OP_MODES = BIT(3),
};
struct omap_dss_device {
- struct kobject kobj;
struct device *dev;
struct module *owner;
struct dss_device *dss;
- struct omap_dss_device *src;
- struct omap_dss_device *dst;
struct omap_dss_device *next;
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
struct list_head list;
- unsigned int alias_id;
-
+ /*
+ * DSS type that this device generates (for DSS internal devices) or
+ * requires (for external encoders, connectors and panels). Must be a
+ * non-zero (different than OMAP_DISPLAY_TYPE_NONE) value.
+ */
enum omap_display_type type;
+
/*
- * DSS output type that this device generates (for DSS internal devices)
- * or requires (for external encoders). Must be OMAP_DISPLAY_TYPE_NONE
- * for display devices (connectors and panels) and to non-zero value for
- * all other devices.
+ * True if the device is a display (panel or connector) at the end of
+ * the pipeline, false otherwise.
*/
- enum omap_display_type output_type;
+ bool display;
const char *name;
@@ -434,9 +435,6 @@ struct omap_dss_device {
unsigned long ops_flags;
u32 bus_flags;
- /* helper variable for driver suspend/resume */
- bool activate_after_resume;
-
enum omap_display_caps caps;
enum omap_dss_display_state state;
@@ -445,7 +443,6 @@ struct omap_dss_device {
/* DISPC channel for this output */
enum omap_channel dispc_channel;
- bool dispc_channel_connected;
/* output instance */
enum omap_dss_output_id id;
@@ -465,9 +462,6 @@ struct omap_dss_driver {
int (*memory_read)(struct omap_dss_device *dssdev,
void *buf, size_t size,
u16 x, u16 y, u16 w, u16 h);
-
- void (*get_size)(struct omap_dss_device *dssdev,
- unsigned int *width, unsigned int *height);
};
struct dss_device *omapdss_get_dss(void);
@@ -477,32 +471,35 @@ static inline bool omapdss_is_initialized(void)
return !!omapdss_get_dss();
}
-#define for_each_dss_display(d) \
- while ((d = omapdss_device_get_next(d, OMAP_DSS_DEVICE_TYPE_DISPLAY)) != NULL)
void omapdss_display_init(struct omap_dss_device *dssdev);
struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output);
+int omapdss_display_get_modes(struct drm_connector *connector,
+ const struct videomode *vm);
void omapdss_device_register(struct omap_dss_device *dssdev);
void omapdss_device_unregister(struct omap_dss_device *dssdev);
struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev);
void omapdss_device_put(struct omap_dss_device *dssdev);
-struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src,
- unsigned int port);
-struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from,
- enum omap_dss_device_type type);
+struct omap_dss_device *omapdss_find_device_by_node(struct device_node *node);
int omapdss_device_connect(struct dss_device *dss,
struct omap_dss_device *src,
struct omap_dss_device *dst);
void omapdss_device_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst);
+void omapdss_device_pre_enable(struct omap_dss_device *dssdev);
+void omapdss_device_enable(struct omap_dss_device *dssdev);
+void omapdss_device_disable(struct omap_dss_device *dssdev);
+void omapdss_device_post_disable(struct omap_dss_device *dssdev);
int omap_dss_get_num_overlay_managers(void);
int omap_dss_get_num_overlays(void);
#define for_each_dss_output(d) \
- while ((d = omapdss_device_get_next(d, OMAP_DSS_DEVICE_TYPE_OUTPUT)) != NULL)
-int omapdss_output_validate(struct omap_dss_device *out);
+ while ((d = omapdss_device_next_output(d)) != NULL)
+struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from);
+int omapdss_device_init_output(struct omap_dss_device *out);
+void omapdss_device_cleanup_output(struct omap_dss_device *out);
typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
@@ -511,11 +508,6 @@ int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
int omapdss_compat_init(void);
void omapdss_compat_uninit(void);
-static inline bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
-{
- return dssdev->src;
-}
-
static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
{
return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 18505bc70f7e..10a9ee5cdc61 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -20,20 +20,48 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
+
+#include <drm/drm_panel.h>
#include "dss.h"
#include "omapdss.h"
-int omapdss_output_validate(struct omap_dss_device *out)
+int omapdss_device_init_output(struct omap_dss_device *out)
{
- if (out->next && out->output_type != out->next->type) {
+ struct device_node *remote_node;
+
+ remote_node = of_graph_get_remote_node(out->dev->of_node, 0, 0);
+ if (!remote_node) {
+ dev_dbg(out->dev, "failed to find video sink\n");
+ return 0;
+ }
+
+ out->next = omapdss_find_device_by_node(remote_node);
+ out->bridge = of_drm_find_bridge(remote_node);
+ out->panel = of_drm_find_panel(remote_node);
+ if (IS_ERR(out->panel))
+ out->panel = NULL;
+
+ of_node_put(remote_node);
+
+ if (out->next && out->type != out->next->type) {
dev_err(out->dev, "output type and display type don't match\n");
+ omapdss_device_put(out->next);
+ out->next = NULL;
return -EINVAL;
}
- return 0;
+ return out->next || out->bridge || out->panel ? 0 : -EPROBE_DEFER;
+}
+EXPORT_SYMBOL(omapdss_device_init_output);
+
+void omapdss_device_cleanup_output(struct omap_dss_device *out)
+{
+ if (out->next)
+ omapdss_device_put(out->next);
}
-EXPORT_SYMBOL(omapdss_output_validate);
+EXPORT_SYMBOL(omapdss_device_cleanup_output);
int dss_install_mgr_ops(struct dss_device *dss,
const struct dss_mgr_ops *mgr_ops,
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index b2fe2387037a..7aae52984fed 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -37,7 +37,7 @@ struct sdi_device {
struct regulator *vdds_sdi_reg;
struct dss_lcd_mgr_config mgr_config;
- struct videomode vm;
+ unsigned long pixelclock;
int datapairs;
struct omap_dss_device output;
@@ -129,27 +129,22 @@ static void sdi_config_lcd_manager(struct sdi_device *sdi)
dss_mgr_set_lcd_config(&sdi->output, &sdi->mgr_config);
}
-static int sdi_display_enable(struct omap_dss_device *dssdev)
+static void sdi_display_enable(struct omap_dss_device *dssdev)
{
struct sdi_device *sdi = dssdev_to_sdi(dssdev);
struct dispc_clock_info dispc_cinfo;
unsigned long fck;
int r;
- if (!sdi->output.dispc_channel_connected) {
- DSSERR("failed to enable display: no output/manager\n");
- return -ENODEV;
- }
-
r = regulator_enable(sdi->vdds_sdi_reg);
if (r)
- goto err_reg_enable;
+ return;
r = dispc_runtime_get(sdi->dss->dispc);
if (r)
goto err_get_dispc;
- r = sdi_calc_clock_div(sdi, sdi->vm.pixelclock, &fck, &dispc_cinfo);
+ r = sdi_calc_clock_div(sdi, sdi->pixelclock, &fck, &dispc_cinfo);
if (r)
goto err_calc_clock_div;
@@ -185,7 +180,7 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_mgr_enable;
- return 0;
+ return;
err_mgr_enable:
dss_sdi_disable(sdi->dss);
@@ -195,8 +190,6 @@ err_calc_clock_div:
dispc_runtime_put(sdi->dss->dispc);
err_get_dispc:
regulator_disable(sdi->vdds_sdi_reg);
-err_reg_enable:
- return r;
}
static void sdi_display_disable(struct omap_dss_device *dssdev)
@@ -213,36 +206,37 @@ static void sdi_display_disable(struct omap_dss_device *dssdev)
}
static void sdi_set_timings(struct omap_dss_device *dssdev,
- const struct videomode *vm)
+ const struct drm_display_mode *mode)
{
struct sdi_device *sdi = dssdev_to_sdi(dssdev);
- sdi->vm = *vm;
+ sdi->pixelclock = mode->clock * 1000;
}
static int sdi_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+ struct drm_display_mode *mode)
{
struct sdi_device *sdi = dssdev_to_sdi(dssdev);
struct dispc_clock_info dispc_cinfo;
+ unsigned long pixelclock = mode->clock * 1000;
unsigned long fck;
unsigned long pck;
int r;
- if (vm->pixelclock == 0)
+ if (pixelclock == 0)
return -EINVAL;
- r = sdi_calc_clock_div(sdi, vm->pixelclock, &fck, &dispc_cinfo);
+ r = sdi_calc_clock_div(sdi, pixelclock, &fck, &dispc_cinfo);
if (r)
return r;
pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
- if (pck != vm->pixelclock) {
+ if (pck != pixelclock) {
DSSWARN("Pixel clock adjusted from %lu Hz to %lu Hz\n",
- vm->pixelclock, pck);
+ pixelclock, pck);
- vm->pixelclock = pck;
+ mode->clock = pck / 1000;
}
return 0;
@@ -251,21 +245,12 @@ static int sdi_check_timings(struct omap_dss_device *dssdev,
static int sdi_connect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- int r;
-
- r = omapdss_device_connect(dst->dss, dst, dst->next);
- if (r)
- return r;
-
- dst->dispc_channel_connected = true;
- return 0;
+ return omapdss_device_connect(dst->dss, dst, dst->next);
}
static void sdi_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- dst->dispc_channel_connected = false;
-
omapdss_device_disconnect(dst, dst->next);
}
@@ -287,29 +272,19 @@ static int sdi_init_output(struct sdi_device *sdi)
out->dev = &sdi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_SDI;
- out->output_type = OMAP_DISPLAY_TYPE_SDI;
+ out->type = OMAP_DISPLAY_TYPE_SDI;
out->name = "sdi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
/* We have SDI only on OMAP3, where it's on port 1 */
out->of_ports = BIT(1);
out->ops = &sdi_ops;
out->owner = THIS_MODULE;
- out->bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE /* 15.5.9.1.2 */
- | DRM_BUS_FLAG_SYNC_POSEDGE;
-
- out->next = omapdss_of_find_connected_device(out->dev->of_node, 1);
- if (IS_ERR(out->next)) {
- if (PTR_ERR(out->next) != -EPROBE_DEFER)
- dev_err(out->dev, "failed to find video sink\n");
- return PTR_ERR(out->next);
- }
+ out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE /* 15.5.9.1.2 */
+ | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE;
- r = omapdss_output_validate(out);
- if (r) {
- omapdss_device_put(out->next);
- out->next = NULL;
+ r = omapdss_device_init_output(out);
+ if (r < 0)
return r;
- }
omapdss_device_register(out);
@@ -318,9 +293,8 @@ static int sdi_init_output(struct sdi_device *sdi)
static void sdi_uninit_output(struct sdi_device *sdi)
{
- if (sdi->output.next)
- omapdss_device_put(sdi->output.next);
omapdss_device_unregister(&sdi->output);
+ omapdss_device_cleanup_output(&sdi->output);
}
int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index b5f52727f8b1..da43b865d973 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -267,63 +267,40 @@ enum venc_videomode {
VENC_MODE_NTSC,
};
-static const struct videomode omap_dss_pal_vm = {
- .hactive = 720,
- .vactive = 574,
- .pixelclock = 13500000,
- .hsync_len = 64,
- .hfront_porch = 12,
- .hback_porch = 68,
- .vsync_len = 5,
- .vfront_porch = 5,
- .vback_porch = 41,
-
- .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW |
- DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_DE_HIGH |
- DISPLAY_FLAGS_PIXDATA_POSEDGE |
- DISPLAY_FLAGS_SYNC_NEGEDGE,
+static const struct drm_display_mode omap_dss_pal_mode = {
+ .hdisplay = 720,
+ .hsync_start = 732,
+ .hsync_end = 796,
+ .htotal = 864,
+ .vdisplay = 574,
+ .vsync_start = 579,
+ .vsync_end = 584,
+ .vtotal = 625,
+ .clock = 13500,
+
+ .flags = DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_NHSYNC |
+ DRM_MODE_FLAG_NVSYNC,
};
-static const struct videomode omap_dss_ntsc_vm = {
- .hactive = 720,
- .vactive = 482,
- .pixelclock = 13500000,
- .hsync_len = 64,
- .hfront_porch = 16,
- .hback_porch = 58,
- .vsync_len = 6,
- .vfront_porch = 6,
- .vback_porch = 31,
-
- .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW |
- DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_DE_HIGH |
- DISPLAY_FLAGS_PIXDATA_POSEDGE |
- DISPLAY_FLAGS_SYNC_NEGEDGE,
+static const struct drm_display_mode omap_dss_ntsc_mode = {
+ .hdisplay = 720,
+ .hsync_start = 736,
+ .hsync_end = 800,
+ .htotal = 858,
+ .vdisplay = 482,
+ .vsync_start = 488,
+ .vsync_end = 494,
+ .vtotal = 525,
+ .clock = 13500,
+
+ .flags = DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_NHSYNC |
+ DRM_MODE_FLAG_NVSYNC,
};
-static enum venc_videomode venc_get_videomode(const struct videomode *vm)
-{
- if (!(vm->flags & DISPLAY_FLAGS_INTERLACED))
- return VENC_MODE_UNKNOWN;
-
- if (vm->pixelclock == omap_dss_pal_vm.pixelclock &&
- vm->hactive == omap_dss_pal_vm.hactive &&
- vm->vactive == omap_dss_pal_vm.vactive)
- return VENC_MODE_PAL;
-
- if (vm->pixelclock == omap_dss_ntsc_vm.pixelclock &&
- vm->hactive == omap_dss_ntsc_vm.hactive &&
- vm->vactive == omap_dss_ntsc_vm.vactive)
- return VENC_MODE_NTSC;
-
- return VENC_MODE_UNKNOWN;
-}
-
struct venc_device {
struct platform_device *pdev;
void __iomem *base;
struct mutex venc_lock;
- u32 wss_data;
struct regulator *vdda_dac_reg;
struct dss_device *dss;
@@ -331,7 +308,7 @@ struct venc_device {
struct clk *tv_dac_clk;
- struct videomode vm;
+ const struct venc_config *config;
enum omap_dss_venc_type type;
bool invert_polarity;
bool requires_tv_dac_clk;
@@ -367,8 +344,7 @@ static void venc_write_config(struct venc_device *venc,
venc_write_reg(venc, VENC_BLACK_LEVEL, config->black_level);
venc_write_reg(venc, VENC_BLANK_LEVEL, config->blank_level);
venc_write_reg(venc, VENC_M_CONTROL, config->m_control);
- venc_write_reg(venc, VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
- venc->wss_data);
+ venc_write_reg(venc, VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data);
venc_write_reg(venc, VENC_S_CARR, config->s_carr);
venc_write_reg(venc, VENC_L21__WC_CTL, config->l21__wc_ctl);
venc_write_reg(venc, VENC_SAVID__EAVID, config->savid__eavid);
@@ -452,18 +428,6 @@ static void venc_runtime_put(struct venc_device *venc)
WARN_ON(r < 0 && r != -ENOSYS);
}
-static const struct venc_config *venc_timings_to_config(const struct videomode *vm)
-{
- switch (venc_get_videomode(vm)) {
- default:
- WARN_ON_ONCE(1);
- case VENC_MODE_PAL:
- return &venc_config_pal_trm;
- case VENC_MODE_NTSC:
- return &venc_config_ntsc_trm;
- }
-}
-
static int venc_power_on(struct venc_device *venc)
{
u32 l;
@@ -474,7 +438,7 @@ static int venc_power_on(struct venc_device *venc)
goto err0;
venc_reset(venc);
- venc_write_config(venc, venc_timings_to_config(&venc->vm));
+ venc_write_config(venc, venc->config);
dss_set_venc_output(venc->dss, venc->type);
dss_set_dac_pwrdn_bgz(venc->dss, 1);
@@ -524,33 +488,17 @@ static void venc_power_off(struct venc_device *venc)
venc_runtime_put(venc);
}
-static int venc_display_enable(struct omap_dss_device *dssdev)
+static void venc_display_enable(struct omap_dss_device *dssdev)
{
struct venc_device *venc = dssdev_to_venc(dssdev);
- int r;
DSSDBG("venc_display_enable\n");
mutex_lock(&venc->venc_lock);
- if (!dssdev->dispc_channel_connected) {
- DSSERR("Failed to enable display: no output/manager\n");
- r = -ENODEV;
- goto err0;
- }
-
- r = venc_power_on(venc);
- if (r)
- goto err0;
-
- venc->wss_data = 0;
+ venc_power_on(venc);
mutex_unlock(&venc->venc_lock);
-
- return 0;
-err0:
- mutex_unlock(&venc->venc_lock);
- return r;
}
static void venc_display_disable(struct omap_dss_device *dssdev)
@@ -566,30 +514,70 @@ static void venc_display_disable(struct omap_dss_device *dssdev)
mutex_unlock(&venc->venc_lock);
}
-static void venc_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int venc_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
- struct venc_device *venc = dssdev_to_venc(dssdev);
+ static const struct drm_display_mode *modes[] = {
+ &omap_dss_pal_mode,
+ &omap_dss_ntsc_mode,
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(modes); ++i) {
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, modes[i]);
+ if (!mode)
+ return i;
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+ }
- mutex_lock(&venc->venc_lock);
- *vm = venc->vm;
- mutex_unlock(&venc->venc_lock);
+ return ARRAY_SIZE(modes);
+}
+
+static enum venc_videomode venc_get_videomode(const struct drm_display_mode *mode)
+{
+ if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
+ return VENC_MODE_UNKNOWN;
+
+ if (mode->clock == omap_dss_pal_mode.clock &&
+ mode->hdisplay == omap_dss_pal_mode.hdisplay &&
+ mode->vdisplay == omap_dss_pal_mode.vdisplay)
+ return VENC_MODE_PAL;
+
+ if (mode->clock == omap_dss_ntsc_mode.clock &&
+ mode->hdisplay == omap_dss_ntsc_mode.hdisplay &&
+ mode->vdisplay == omap_dss_ntsc_mode.vdisplay)
+ return VENC_MODE_NTSC;
+
+ return VENC_MODE_UNKNOWN;
}
static void venc_set_timings(struct omap_dss_device *dssdev,
- const struct videomode *vm)
+ const struct drm_display_mode *mode)
{
struct venc_device *venc = dssdev_to_venc(dssdev);
+ enum venc_videomode venc_mode = venc_get_videomode(mode);
DSSDBG("venc_set_timings\n");
mutex_lock(&venc->venc_lock);
- /* Reset WSS data when the TV standard changes. */
- if (memcmp(&venc->vm, vm, sizeof(*vm)))
- venc->wss_data = 0;
+ switch (venc_mode) {
+ default:
+ WARN_ON_ONCE(1);
+ /* Fall-through */
+ case VENC_MODE_PAL:
+ venc->config = &venc_config_pal_trm;
+ break;
- venc->vm = *vm;
+ case VENC_MODE_NTSC:
+ venc->config = &venc_config_ntsc_trm;
+ break;
+ }
dispc_set_tv_pclk(venc->dss->dispc, 13500000);
@@ -597,22 +585,26 @@ static void venc_set_timings(struct omap_dss_device *dssdev,
}
static int venc_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+ struct drm_display_mode *mode)
{
DSSDBG("venc_check_timings\n");
- switch (venc_get_videomode(vm)) {
+ switch (venc_get_videomode(mode)) {
case VENC_MODE_PAL:
- *vm = omap_dss_pal_vm;
- return 0;
+ drm_mode_copy(mode, &omap_dss_pal_mode);
+ break;
case VENC_MODE_NTSC:
- *vm = omap_dss_ntsc_vm;
- return 0;
+ drm_mode_copy(mode, &omap_dss_ntsc_mode);
+ break;
default:
return -EINVAL;
}
+
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ drm_mode_set_name(mode);
+ return 0;
}
static int venc_dump_regs(struct seq_file *s, void *p)
@@ -695,21 +687,12 @@ static int venc_get_clocks(struct venc_device *venc)
static int venc_connect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- int r;
-
- r = omapdss_device_connect(dst->dss, dst, dst->next);
- if (r)
- return r;
-
- dst->dispc_channel_connected = true;
- return 0;
+ return omapdss_device_connect(dst->dss, dst, dst->next);
}
static void venc_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- dst->dispc_channel_connected = false;
-
omapdss_device_disconnect(dst, dst->next);
}
@@ -721,8 +704,9 @@ static const struct omap_dss_device_ops venc_ops = {
.disable = venc_display_disable,
.check_timings = venc_check_timings,
- .get_timings = venc_get_timings,
.set_timings = venc_set_timings,
+
+ .get_modes = venc_get_modes,
};
/* -----------------------------------------------------------------------------
@@ -776,26 +760,17 @@ static int venc_init_output(struct venc_device *venc)
out->dev = &venc->pdev->dev;
out->id = OMAP_DSS_OUTPUT_VENC;
- out->output_type = OMAP_DISPLAY_TYPE_VENC;
+ out->type = OMAP_DISPLAY_TYPE_VENC;
out->name = "venc.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
out->ops = &venc_ops;
out->owner = THIS_MODULE;
out->of_ports = BIT(0);
+ out->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
- out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
- if (IS_ERR(out->next)) {
- if (PTR_ERR(out->next) != -EPROBE_DEFER)
- dev_err(out->dev, "failed to find video sink\n");
- return PTR_ERR(out->next);
- }
-
- r = omapdss_output_validate(out);
- if (r) {
- omapdss_device_put(out->next);
- out->next = NULL;
+ r = omapdss_device_init_output(out);
+ if (r < 0)
return r;
- }
omapdss_device_register(out);
@@ -804,9 +779,8 @@ static int venc_init_output(struct venc_device *venc)
static void venc_uninit_output(struct venc_device *venc)
{
- if (venc->output.next)
- omapdss_device_put(venc->output.next);
omapdss_device_unregister(&venc->output);
+ omapdss_device_cleanup_output(&venc->output);
}
static int venc_probe_of(struct venc_device *venc)
@@ -878,8 +852,7 @@ static int venc_probe(struct platform_device *pdev)
mutex_init(&venc->venc_lock);
- venc->wss_data = 0;
- venc->vm = omap_dss_pal_vm;
+ venc->config = &venc_config_pal_trm;
venc_mem = platform_get_resource(venc->pdev, IORESOURCE_MEM, 0);
venc->base = devm_ioremap_resource(&pdev->dev, venc_mem);
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 9da94d10782a..5967283934e1 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -17,6 +17,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include "omap_drv.h"
@@ -30,24 +31,27 @@
struct omap_connector {
struct drm_connector base;
struct omap_dss_device *output;
- struct omap_dss_device *display;
struct omap_dss_device *hpd;
bool hdmi_mode;
};
static void omap_connector_hpd_notify(struct drm_connector *connector,
- struct omap_dss_device *src,
enum drm_connector_status status)
{
- if (status == connector_status_disconnected) {
- /*
- * If the source is an HDMI encoder, notify it of disconnection.
- * This is required to let the HDMI encoder reset any internal
- * state related to connection status, such as the CEC address.
- */
- if (src && src->type == OMAP_DISPLAY_TYPE_HDMI &&
- src->ops->hdmi.lost_hotplug)
- src->ops->hdmi.lost_hotplug(src);
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev;
+
+ if (status != connector_status_disconnected)
+ return;
+
+ /*
+ * Notify all devics in the pipeline of disconnection. This is required
+ * to let the HDMI encoders reset their internal state related to
+ * connection status, such as the CEC address.
+ */
+ for (dssdev = omap_connector->output; dssdev; dssdev = dssdev->next) {
+ if (dssdev->ops && dssdev->ops->hdmi.lost_hotplug)
+ dssdev->ops->hdmi.lost_hotplug(dssdev);
}
}
@@ -67,7 +71,7 @@ static void omap_connector_hpd_cb(void *cb_data,
if (old_status == status)
return;
- omap_connector_hpd_notify(connector, omap_connector->hpd, status);
+ omap_connector_hpd_notify(connector, status);
drm_kms_helper_hotplug_event(dev);
}
@@ -103,20 +107,20 @@ omap_connector_find_device(struct drm_connector *connector,
enum omap_dss_device_ops_flag op)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev;
+ struct omap_dss_device *dssdev = NULL;
+ struct omap_dss_device *d;
- for (dssdev = omap_connector->display; dssdev; dssdev = dssdev->src) {
- if (dssdev->ops_flags & op)
- return dssdev;
+ for (d = omap_connector->output; d; d = d->next) {
+ if (d->ops_flags & op)
+ dssdev = d;
}
- return NULL;
+ return dssdev;
}
static enum drm_connector_status omap_connector_detect(
struct drm_connector *connector, bool force)
{
- struct omap_connector *omap_connector = to_omap_connector(connector);
struct omap_dss_device *dssdev;
enum drm_connector_status status;
@@ -128,13 +132,12 @@ static enum drm_connector_status omap_connector_detect(
? connector_status_connected
: connector_status_disconnected;
- omap_connector_hpd_notify(connector, dssdev->src, status);
+ omap_connector_hpd_notify(connector, status);
} else {
- switch (omap_connector->display->type) {
- case OMAP_DISPLAY_TYPE_DPI:
- case OMAP_DISPLAY_TYPE_DBI:
- case OMAP_DISPLAY_TYPE_SDI:
- case OMAP_DISPLAY_TYPE_DSI:
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DPI:
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_DSI:
status = connector_status_connected;
break;
default:
@@ -143,7 +146,7 @@ static enum drm_connector_status omap_connector_detect(
}
}
- VERB("%s: %d (force=%d)", omap_connector->display->name, status, force);
+ VERB("%s: %d (force=%d)", connector->name, status, force);
return status;
}
@@ -152,7 +155,7 @@ static void omap_connector_destroy(struct drm_connector *connector)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
- DBG("%s", omap_connector->display->name);
+ DBG("%s", connector->name);
if (omap_connector->hpd) {
struct omap_dss_device *hpd = omap_connector->hpd;
@@ -166,7 +169,6 @@ static void omap_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
omapdss_device_put(omap_connector->output);
- omapdss_device_put(omap_connector->display);
kfree(omap_connector);
}
@@ -212,10 +214,8 @@ static int omap_connector_get_modes(struct drm_connector *connector)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
struct omap_dss_device *dssdev;
- struct drm_display_mode *mode;
- struct videomode vm = {0};
- DBG("%s", omap_connector->display->name);
+ DBG("%s", connector->name);
/*
* If display exposes EDID, then we parse that in the normal way to
@@ -227,89 +227,71 @@ static int omap_connector_get_modes(struct drm_connector *connector)
return omap_connector_get_modes_edid(connector, dssdev);
/*
- * Otherwise we have either a fixed resolution panel or an output that
- * doesn't support modes discovery (e.g. DVI or VGA with the DDC bus
- * unconnected, or analog TV). Start by querying the size.
+ * Otherwise if the display pipeline reports modes (e.g. with a fixed
+ * resolution panel or an analog TV output), query it.
*/
- dssdev = omap_connector->display;
- if (dssdev->driver && dssdev->driver->get_size)
- dssdev->driver->get_size(dssdev,
- &connector->display_info.width_mm,
- &connector->display_info.height_mm);
+ dssdev = omap_connector_find_device(connector,
+ OMAP_DSS_DEVICE_OP_MODES);
+ if (dssdev)
+ return dssdev->ops->get_modes(dssdev, connector);
/*
- * Iterate over the pipeline to find the first device that can provide
- * timing information. If we can't find any, we just let the KMS core
- * add the default modes.
+ * Otherwise if the display pipeline uses a drm_panel, we delegate the
+ * operation to the panel API.
*/
- for (dssdev = omap_connector->display; dssdev; dssdev = dssdev->src) {
- if (dssdev->ops->get_timings)
- break;
- }
- if (!dssdev)
- return 0;
+ if (omap_connector->output->panel)
+ return drm_panel_get_modes(omap_connector->output->panel);
- /* Add a single mode corresponding to the fixed panel timings. */
- mode = drm_mode_create(connector->dev);
- if (!mode)
- return 0;
+ /*
+ * We can't retrieve modes, which can happen for instance for a DVI or
+ * VGA output with the DDC bus unconnected. The KMS core will add the
+ * default modes.
+ */
+ return 0;
+}
- dssdev->ops->get_timings(dssdev, &vm);
+enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ int ret;
- drm_display_mode_from_videomode(&vm, mode);
+ drm_mode_copy(adjusted_mode, mode);
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_set_name(mode);
- drm_mode_probed_add(connector, mode);
+ for (; dssdev; dssdev = dssdev->next) {
+ if (!dssdev->ops->check_timings)
+ continue;
+
+ ret = dssdev->ops->check_timings(dssdev, adjusted_mode);
+ if (ret)
+ return MODE_BAD;
+ }
- return 1;
+ return MODE_OK;
}
-static int omap_connector_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status omap_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
- enum omap_channel channel = omap_connector->output->dispc_channel;
- struct omap_drm_private *priv = connector->dev->dev_private;
- struct omap_dss_device *dssdev;
- struct videomode vm = {0};
- struct drm_device *dev = connector->dev;
- struct drm_display_mode *new_mode;
- int r, ret = MODE_BAD;
-
- drm_display_mode_to_videomode(mode, &vm);
- mode->vrefresh = drm_mode_vrefresh(mode);
+ struct drm_display_mode new_mode = { { 0 } };
+ enum drm_mode_status status;
- r = priv->dispc_ops->mgr_check_timings(priv->dispc, channel, &vm);
- if (r)
+ status = omap_connector_mode_fixup(omap_connector->output, mode,
+ &new_mode);
+ if (status != MODE_OK)
goto done;
- for (dssdev = omap_connector->output; dssdev; dssdev = dssdev->next) {
- if (!dssdev->ops->check_timings)
- continue;
-
- r = dssdev->ops->check_timings(dssdev, &vm);
- if (r)
- goto done;
- }
-
- /* check if vrefresh is still valid */
- new_mode = drm_mode_duplicate(dev, mode);
- if (!new_mode)
- return MODE_BAD;
-
- new_mode->clock = vm.pixelclock / 1000;
- new_mode->vrefresh = 0;
- if (mode->vrefresh == drm_mode_vrefresh(new_mode))
- ret = MODE_OK;
- drm_mode_destroy(dev, new_mode);
+ /* Check if vrefresh is still valid. */
+ if (drm_mode_vrefresh(mode) != drm_mode_vrefresh(&new_mode))
+ status = MODE_NOCLOCK;
done:
DBG("connector: mode %s: " DRM_MODE_FMT,
- (ret == MODE_OK) ? "valid" : "invalid",
+ (status == MODE_OK) ? "valid" : "invalid",
DRM_MODE_ARG(mode));
- return ret;
+ return status;
}
static const struct drm_connector_funcs omap_connector_funcs = {
@@ -326,9 +308,16 @@ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
.mode_valid = omap_connector_mode_valid,
};
-static int omap_connector_get_type(struct omap_dss_device *display)
+static int omap_connector_get_type(struct omap_dss_device *output)
{
- switch (display->type) {
+ struct omap_dss_device *display;
+ enum omap_display_type type;
+
+ display = omapdss_display_get(output);
+ type = display->type;
+ omapdss_device_put(display);
+
+ switch (type) {
case OMAP_DISPLAY_TYPE_HDMI:
return DRM_MODE_CONNECTOR_HDMIA;
case OMAP_DISPLAY_TYPE_DVI:
@@ -351,28 +340,26 @@ static int omap_connector_get_type(struct omap_dss_device *display)
/* initialize connector */
struct drm_connector *omap_connector_init(struct drm_device *dev,
struct omap_dss_device *output,
- struct omap_dss_device *display,
struct drm_encoder *encoder)
{
struct drm_connector *connector = NULL;
struct omap_connector *omap_connector;
struct omap_dss_device *dssdev;
- DBG("%s", display->name);
+ DBG("%s", output->name);
omap_connector = kzalloc(sizeof(*omap_connector), GFP_KERNEL);
if (!omap_connector)
goto fail;
omap_connector->output = omapdss_device_get(output);
- omap_connector->display = omapdss_device_get(display);
connector = &omap_connector->base;
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
drm_connector_init(dev, connector, &omap_connector_funcs,
- omap_connector_get_type(display));
+ omap_connector_get_type(output));
drm_connector_helper_add(connector, &omap_connector_helper_funcs);
/*
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.h b/drivers/gpu/drm/omapdrm/omap_connector.h
index 854099801649..608085219336 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.h
+++ b/drivers/gpu/drm/omapdrm/omap_connector.h
@@ -22,6 +22,8 @@
#include <linux/types.h>
+enum drm_mode_status;
+
struct drm_connector;
struct drm_device;
struct drm_encoder;
@@ -29,12 +31,12 @@ struct omap_dss_device;
struct drm_connector *omap_connector_init(struct drm_device *dev,
struct omap_dss_device *output,
- struct omap_dss_device *display,
struct drm_encoder *encoder);
-struct drm_encoder *omap_connector_attached_encoder(
- struct drm_connector *connector);
bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
void omap_connector_enable_hpd(struct drm_connector *connector);
void omap_connector_disable_hpd(struct drm_connector *connector);
+enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
#endif /* __OMAPDRM_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index d99e24dcc0bf..5a29bf01c0e8 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -128,7 +128,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
if (WARN_ON(omap_crtc->enabled == enable))
return;
- if (omap_crtc->pipe->output->output_type == OMAP_DISPLAY_TYPE_HDMI) {
+ if (omap_crtc->pipe->output->type == OMAP_DISPLAY_TYPE_HDMI) {
priv->dispc_ops->mgr_enable(priv->dispc, channel, enable);
omap_crtc->enabled = enable;
return;
@@ -390,6 +390,15 @@ static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct videomode vm = {0};
+ int r;
+
+ drm_display_mode_to_videomode(mode, &vm);
+ r = priv->dispc_ops->mgr_check_timings(priv->dispc, omap_crtc->channel,
+ &vm);
+ if (r)
+ return r;
/* Check for bandwidth limit */
if (priv->max_bandwidth) {
@@ -657,7 +666,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
&omap_crtc_funcs, NULL);
if (ret < 0) {
dev_err(dev->dev, "%s(): could not init crtc for: %s\n",
- __func__, pipe->display->name);
+ __func__, pipe->output->name);
kfree(omap_crtc);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index f8292278f57d..1b9b6f5e48e1 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -23,6 +23,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_panel.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
@@ -137,12 +138,13 @@ static void omap_disconnect_pipelines(struct drm_device *ddev)
for (i = 0; i < priv->num_pipes; i++) {
struct omap_drm_pipeline *pipe = &priv->pipes[i];
+ if (pipe->output->panel)
+ drm_panel_detach(pipe->output->panel);
+
omapdss_device_disconnect(NULL, pipe->output);
omapdss_device_put(pipe->output);
- omapdss_device_put(pipe->display);
pipe->output = NULL;
- pipe->display = NULL;
}
memset(&priv->channels, 0, sizeof(priv->channels));
@@ -150,33 +152,17 @@ static void omap_disconnect_pipelines(struct drm_device *ddev)
priv->num_pipes = 0;
}
-static int omap_compare_pipes(const void *a, const void *b)
-{
- const struct omap_drm_pipeline *pipe1 = a;
- const struct omap_drm_pipeline *pipe2 = b;
-
- if (pipe1->display->alias_id > pipe2->display->alias_id)
- return 1;
- else if (pipe1->display->alias_id < pipe2->display->alias_id)
- return -1;
- return 0;
-}
-
static int omap_connect_pipelines(struct drm_device *ddev)
{
struct omap_drm_private *priv = ddev->dev_private;
struct omap_dss_device *output = NULL;
- unsigned int i;
int r;
- if (!omapdss_stack_is_ready())
- return -EPROBE_DEFER;
-
for_each_dss_output(output) {
r = omapdss_device_connect(priv->dss, NULL, output);
if (r == -EPROBE_DEFER) {
omapdss_device_put(output);
- goto cleanup;
+ return r;
} else if (r) {
dev_warn(output->dev, "could not connect output %s\n",
output->name);
@@ -185,7 +171,6 @@ static int omap_connect_pipelines(struct drm_device *ddev)
pipe = &priv->pipes[priv->num_pipes++];
pipe->output = omapdss_device_get(output);
- pipe->display = omapdss_display_get(output);
if (priv->num_pipes == ARRAY_SIZE(priv->pipes)) {
/* To balance the 'for_each_dss_output' loop */
@@ -195,36 +180,19 @@ static int omap_connect_pipelines(struct drm_device *ddev)
}
}
- /* Sort the list by DT aliases */
- sort(priv->pipes, priv->num_pipes, sizeof(priv->pipes[0]),
- omap_compare_pipes, NULL);
-
- /*
- * Populate the pipeline lookup table by DISPC channel. Only one display
- * is allowed per channel.
- */
- for (i = 0; i < priv->num_pipes; ++i) {
- struct omap_drm_pipeline *pipe = &priv->pipes[i];
- enum omap_channel channel = pipe->output->dispc_channel;
-
- if (WARN_ON(priv->channels[channel] != NULL)) {
- r = -EINVAL;
- goto cleanup;
- }
-
- priv->channels[channel] = pipe;
- }
-
return 0;
+}
-cleanup:
- /*
- * if we are deferring probe, we disconnect the devices we previously
- * connected
- */
- omap_disconnect_pipelines(ddev);
+static int omap_compare_pipelines(const void *a, const void *b)
+{
+ const struct omap_drm_pipeline *pipe1 = a;
+ const struct omap_drm_pipeline *pipe2 = b;
- return r;
+ if (pipe1->alias_id > pipe2->alias_id)
+ return 1;
+ else if (pipe1->alias_id < pipe2->alias_id)
+ return -1;
+ return 0;
}
static int omap_modeset_init_properties(struct drm_device *dev)
@@ -240,6 +208,30 @@ static int omap_modeset_init_properties(struct drm_device *dev)
return 0;
}
+static int omap_display_id(struct omap_dss_device *output)
+{
+ struct device_node *node = NULL;
+
+ if (output->next) {
+ struct omap_dss_device *display;
+
+ display = omapdss_display_get(output);
+ node = display->dev->of_node;
+ omapdss_device_put(display);
+ } else if (output->bridge) {
+ struct drm_bridge *bridge = output->bridge;
+
+ while (bridge->next)
+ bridge = bridge->next;
+
+ node = bridge->of_node;
+ } else if (output->panel) {
+ node = output->panel->dev->of_node;
+ }
+
+ return node ? of_alias_get_id(node, "display") : -ENODEV;
+}
+
static int omap_modeset_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
@@ -249,6 +241,9 @@ static int omap_modeset_init(struct drm_device *dev)
int ret;
u32 plane_crtc_mask;
+ if (!omapdss_stack_is_ready())
+ return -EPROBE_DEFER;
+
drm_mode_config_init(dev);
ret = omap_modeset_init_properties(dev);
@@ -263,6 +258,10 @@ static int omap_modeset_init(struct drm_device *dev)
* configuration does not match the expectations or exceeds
* the available resources, the configuration is rejected.
*/
+ ret = omap_connect_pipelines(dev);
+ if (ret < 0)
+ return ret;
+
if (priv->num_pipes > num_mgrs || priv->num_pipes > num_ovls) {
dev_err(dev->dev, "%s(): Too many connected displays\n",
__func__);
@@ -288,33 +287,75 @@ static int omap_modeset_init(struct drm_device *dev)
priv->planes[priv->num_planes++] = plane;
}
- /* Create the CRTCs, encoders and connectors. */
+ /*
+ * Create the encoders, attach the bridges and get the pipeline alias
+ * IDs.
+ */
for (i = 0; i < priv->num_pipes; i++) {
struct omap_drm_pipeline *pipe = &priv->pipes[i];
- struct omap_dss_device *display = pipe->display;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
- struct drm_crtc *crtc;
+ int id;
- encoder = omap_encoder_init(dev, pipe->output, display);
- if (!encoder)
+ pipe->encoder = omap_encoder_init(dev, pipe->output);
+ if (!pipe->encoder)
return -ENOMEM;
- connector = omap_connector_init(dev, pipe->output, display,
- encoder);
- if (!connector)
- return -ENOMEM;
+ if (pipe->output->bridge) {
+ ret = drm_bridge_attach(pipe->encoder,
+ pipe->output->bridge, NULL);
+ if (ret < 0)
+ return ret;
+ }
+
+ id = omap_display_id(pipe->output);
+ pipe->alias_id = id >= 0 ? id : i;
+ }
+
+ /* Sort the pipelines by DT aliases. */
+ sort(priv->pipes, priv->num_pipes, sizeof(priv->pipes[0]),
+ omap_compare_pipelines, NULL);
+
+ /*
+ * Populate the pipeline lookup table by DISPC channel. Only one display
+ * is allowed per channel.
+ */
+ for (i = 0; i < priv->num_pipes; ++i) {
+ struct omap_drm_pipeline *pipe = &priv->pipes[i];
+ enum omap_channel channel = pipe->output->dispc_channel;
+
+ if (WARN_ON(priv->channels[channel] != NULL))
+ return -EINVAL;
+
+ priv->channels[channel] = pipe;
+ }
+
+ /* Create the connectors and CRTCs. */
+ for (i = 0; i < priv->num_pipes; i++) {
+ struct omap_drm_pipeline *pipe = &priv->pipes[i];
+ struct drm_encoder *encoder = pipe->encoder;
+ struct drm_crtc *crtc;
+
+ if (!pipe->output->bridge) {
+ pipe->connector = omap_connector_init(dev, pipe->output,
+ encoder);
+ if (!pipe->connector)
+ return -ENOMEM;
+
+ drm_connector_attach_encoder(pipe->connector, encoder);
+
+ if (pipe->output->panel) {
+ ret = drm_panel_attach(pipe->output->panel,
+ pipe->connector);
+ if (ret < 0)
+ return ret;
+ }
+ }
crtc = omap_crtc_init(dev, pipe, priv->planes[i]);
if (IS_ERR(crtc))
return PTR_ERR(crtc);
- drm_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = 1 << i;
-
pipe->crtc = crtc;
- pipe->encoder = encoder;
- pipe->connector = connector;
}
DBG("registered %u planes, %u crtcs/encoders/connectors\n",
@@ -351,10 +392,12 @@ static int omap_modeset_init(struct drm_device *dev)
static void omap_modeset_enable_external_hpd(struct drm_device *ddev)
{
struct omap_drm_private *priv = ddev->dev_private;
- int i;
+ unsigned int i;
- for (i = 0; i < priv->num_pipes; i++)
- omap_connector_enable_hpd(priv->pipes[i].connector);
+ for (i = 0; i < priv->num_pipes; i++) {
+ if (priv->pipes[i].connector)
+ omap_connector_enable_hpd(priv->pipes[i].connector);
+ }
}
/*
@@ -363,10 +406,12 @@ static void omap_modeset_enable_external_hpd(struct drm_device *ddev)
static void omap_modeset_disable_external_hpd(struct drm_device *ddev)
{
struct omap_drm_private *priv = ddev->dev_private;
- int i;
+ unsigned int i;
- for (i = 0; i < priv->num_pipes; i++)
- omap_connector_disable_hpd(priv->pipes[i].connector);
+ for (i = 0; i < priv->num_pipes; i++) {
+ if (priv->pipes[i].connector)
+ omap_connector_disable_hpd(priv->pipes[i].connector);
+ }
}
/*
@@ -551,10 +596,6 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
omap_crtc_pre_init(priv);
- ret = omap_connect_pipelines(ddev);
- if (ret)
- goto err_crtc_uninit;
-
soc = soc_device_match(omapdrm_soc_devices);
priv->omaprev = soc ? (unsigned int)soc->data : 0;
priv->wq = alloc_ordered_workqueue("omapdrm", 0);
@@ -612,7 +653,6 @@ err_gem_deinit:
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
omap_disconnect_pipelines(ddev);
-err_crtc_uninit:
omap_crtc_pre_uninit(priv);
drm_dev_put(ddev);
return ret;
@@ -685,54 +725,12 @@ static int pdev_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int omap_drm_suspend_all_displays(struct drm_device *ddev)
-{
- struct omap_drm_private *priv = ddev->dev_private;
- int i;
-
- for (i = 0; i < priv->num_pipes; i++) {
- struct omap_dss_device *display = priv->pipes[i].display;
-
- if (display->state == OMAP_DSS_DISPLAY_ACTIVE) {
- display->ops->disable(display);
- display->activate_after_resume = true;
- } else {
- display->activate_after_resume = false;
- }
- }
-
- return 0;
-}
-
-static int omap_drm_resume_all_displays(struct drm_device *ddev)
-{
- struct omap_drm_private *priv = ddev->dev_private;
- int i;
-
- for (i = 0; i < priv->num_pipes; i++) {
- struct omap_dss_device *display = priv->pipes[i].display;
-
- if (display->activate_after_resume) {
- display->ops->enable(display);
- display->activate_after_resume = false;
- }
- }
-
- return 0;
-}
-
static int omap_drm_suspend(struct device *dev)
{
struct omap_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *drm_dev = priv->ddev;
- drm_kms_helper_poll_disable(drm_dev);
-
- drm_modeset_lock_all(drm_dev);
- omap_drm_suspend_all_displays(drm_dev);
- drm_modeset_unlock_all(drm_dev);
-
- return 0;
+ return drm_mode_config_helper_suspend(drm_dev);
}
static int omap_drm_resume(struct device *dev)
@@ -740,11 +738,7 @@ static int omap_drm_resume(struct device *dev)
struct omap_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *drm_dev = priv->ddev;
- drm_modeset_lock_all(drm_dev);
- omap_drm_resume_all_displays(drm_dev);
- drm_modeset_unlock_all(drm_dev);
-
- drm_kms_helper_poll_enable(drm_dev);
+ drm_mode_config_helper_resume(drm_dev);
return omap_gem_resume(drm_dev);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 0c57d2814c51..3cca45cb25f3 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -49,7 +49,7 @@ struct omap_drm_pipeline {
struct drm_encoder *encoder;
struct drm_connector *connector;
struct omap_dss_device *output;
- struct omap_dss_device *display;
+ unsigned int alias_id;
};
struct omap_drm_private {
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 0d85b3a35767..40512419642b 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -20,6 +20,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_edid.h>
+#include <drm/drm_panel.h>
#include "omap_drv.h"
@@ -37,7 +38,6 @@
struct omap_encoder {
struct drm_encoder base;
struct omap_dss_device *output;
- struct omap_dss_device *display;
};
static void omap_encoder_destroy(struct drm_encoder *encoder)
@@ -52,22 +52,43 @@ static const struct drm_encoder_funcs omap_encoder_funcs = {
.destroy = omap_encoder_destroy,
};
-static void omap_encoder_hdmi_mode_set(struct drm_encoder *encoder,
+static void omap_encoder_update_videomode_flags(struct videomode *vm,
+ u32 bus_flags)
+{
+ if (!(vm->flags & (DISPLAY_FLAGS_DE_LOW |
+ DISPLAY_FLAGS_DE_HIGH))) {
+ if (bus_flags & DRM_BUS_FLAG_DE_LOW)
+ vm->flags |= DISPLAY_FLAGS_DE_LOW;
+ else if (bus_flags & DRM_BUS_FLAG_DE_HIGH)
+ vm->flags |= DISPLAY_FLAGS_DE_HIGH;
+ }
+
+ if (!(vm->flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_NEGEDGE))) {
+ if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
+ vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
+ else if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
+ vm->flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+ }
+
+ if (!(vm->flags & (DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE))) {
+ if (bus_flags & DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE)
+ vm->flags |= DISPLAY_FLAGS_SYNC_POSEDGE;
+ else if (bus_flags & DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE)
+ vm->flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
+ }
+}
+
+static void omap_encoder_hdmi_mode_set(struct drm_connector *connector,
+ struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
struct omap_dss_device *dssdev = omap_encoder->output;
- struct drm_connector *connector;
bool hdmi_mode;
- hdmi_mode = false;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- hdmi_mode = omap_connector_get_hdmi_mode(connector);
- break;
- }
- }
+ hdmi_mode = omap_connector_get_hdmi_mode(connector);
if (dssdev->ops->hdmi.set_hdmi_mode)
dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
@@ -88,8 +109,18 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct omap_dss_device *output = omap_encoder->output;
struct omap_dss_device *dssdev;
+ struct drm_device *dev = encoder->dev;
+ struct drm_connector *connector;
+ struct drm_bridge *bridge;
struct videomode vm = { 0 };
+ u32 bus_flags;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ break;
+ }
drm_display_mode_to_videomode(adjusted_mode, &vm);
@@ -102,66 +133,102 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
*
* A better solution is to use DRM's bus-flags through the whole driver.
*/
- for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) {
- unsigned long bus_flags = dssdev->bus_flags;
-
- if (!(vm.flags & (DISPLAY_FLAGS_DE_LOW |
- DISPLAY_FLAGS_DE_HIGH))) {
- if (bus_flags & DRM_BUS_FLAG_DE_LOW)
- vm.flags |= DISPLAY_FLAGS_DE_LOW;
- else if (bus_flags & DRM_BUS_FLAG_DE_HIGH)
- vm.flags |= DISPLAY_FLAGS_DE_HIGH;
- }
-
- if (!(vm.flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE |
- DISPLAY_FLAGS_PIXDATA_NEGEDGE))) {
- if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
- vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
- else if (bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
- vm.flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE;
- }
-
- if (!(vm.flags & (DISPLAY_FLAGS_SYNC_POSEDGE |
- DISPLAY_FLAGS_SYNC_NEGEDGE))) {
- if (bus_flags & DRM_BUS_FLAG_SYNC_POSEDGE)
- vm.flags |= DISPLAY_FLAGS_SYNC_POSEDGE;
- else if (bus_flags & DRM_BUS_FLAG_SYNC_NEGEDGE)
- vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
- }
+ for (dssdev = output; dssdev; dssdev = dssdev->next)
+ omap_encoder_update_videomode_flags(&vm, dssdev->bus_flags);
+
+ for (bridge = output->bridge; bridge; bridge = bridge->next) {
+ if (!bridge->timings)
+ continue;
+
+ bus_flags = bridge->timings->input_bus_flags;
+ omap_encoder_update_videomode_flags(&vm, bus_flags);
}
+ bus_flags = connector->display_info.bus_flags;
+ omap_encoder_update_videomode_flags(&vm, bus_flags);
+
/* Set timings for all devices in the display pipeline. */
- dss_mgr_set_timings(omap_encoder->output, &vm);
+ dss_mgr_set_timings(output, &vm);
- for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) {
+ for (dssdev = output; dssdev; dssdev = dssdev->next) {
if (dssdev->ops->set_timings)
- dssdev->ops->set_timings(dssdev, &vm);
+ dssdev->ops->set_timings(dssdev, adjusted_mode);
}
/* Set the HDMI mode and HDMI infoframe if applicable. */
- if (omap_encoder->output->output_type == OMAP_DISPLAY_TYPE_HDMI)
- omap_encoder_hdmi_mode_set(encoder, adjusted_mode);
+ if (output->type == OMAP_DISPLAY_TYPE_HDMI)
+ omap_encoder_hdmi_mode_set(connector, encoder, adjusted_mode);
}
static void omap_encoder_disable(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->display;
+ struct omap_dss_device *dssdev = omap_encoder->output;
+ struct drm_device *dev = encoder->dev;
+
+ dev_dbg(dev->dev, "disable(%s)\n", dssdev->name);
+
+ /* Disable the panel if present. */
+ if (dssdev->panel) {
+ drm_panel_disable(dssdev->panel);
+ drm_panel_unprepare(dssdev->panel);
+ }
+
+ /*
+ * Disable the chain of external devices, starting at the one at the
+ * internal encoder's output.
+ */
+ omapdss_device_disable(dssdev->next);
+
+ /*
+ * Disable the internal encoder. This will disable the DSS output. The
+ * DSI is treated as an exception as DSI pipelines still use the legacy
+ * flow where the pipeline output controls the encoder.
+ */
+ if (dssdev->type != OMAP_DISPLAY_TYPE_DSI) {
+ dssdev->ops->disable(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+ }
- dssdev->ops->disable(dssdev);
+ /*
+ * Perform the post-disable operations on the chain of external devices
+ * to complete the display pipeline disable.
+ */
+ omapdss_device_post_disable(dssdev->next);
}
static void omap_encoder_enable(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->display;
- int r;
-
- r = dssdev->ops->enable(dssdev);
- if (r)
- dev_err(encoder->dev->dev,
- "Failed to enable display '%s': %d\n",
- dssdev->name, r);
+ struct omap_dss_device *dssdev = omap_encoder->output;
+ struct drm_device *dev = encoder->dev;
+
+ dev_dbg(dev->dev, "enable(%s)\n", dssdev->name);
+
+ /* Prepare the chain of external devices for pipeline enable. */
+ omapdss_device_pre_enable(dssdev->next);
+
+ /*
+ * Enable the internal encoder. This will enable the DSS output. The
+ * DSI is treated as an exception as DSI pipelines still use the legacy
+ * flow where the pipeline output controls the encoder.
+ */
+ if (dssdev->type != OMAP_DISPLAY_TYPE_DSI) {
+ dssdev->ops->enable(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+ }
+
+ /*
+ * Enable the chain of external devices, starting at the one at the
+ * internal encoder's output.
+ */
+ omapdss_device_enable(dssdev->next);
+
+ /* Enable the panel if present. */
+ if (dssdev->panel) {
+ drm_panel_prepare(dssdev->panel);
+ drm_panel_enable(dssdev->panel);
+ }
}
static int omap_encoder_atomic_check(struct drm_encoder *encoder,
@@ -169,35 +236,17 @@ static int omap_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- enum omap_channel channel = omap_encoder->output->dispc_channel;
- struct drm_device *dev = encoder->dev;
- struct omap_drm_private *priv = dev->dev_private;
- struct omap_dss_device *dssdev;
- struct videomode vm = { 0 };
- int ret;
-
- drm_display_mode_to_videomode(&crtc_state->mode, &vm);
-
- ret = priv->dispc_ops->mgr_check_timings(priv->dispc, channel, &vm);
- if (ret)
- goto done;
-
- for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) {
- if (!dssdev->ops->check_timings)
- continue;
-
- ret = dssdev->ops->check_timings(dssdev, &vm);
- if (ret)
- goto done;
+ enum drm_mode_status status;
+
+ status = omap_connector_mode_fixup(omap_encoder->output,
+ &crtc_state->mode,
+ &crtc_state->adjusted_mode);
+ if (status != MODE_OK) {
+ dev_err(encoder->dev->dev, "invalid timings: %d\n", status);
+ return -EINVAL;
}
- drm_display_mode_from_videomode(&vm, &crtc_state->adjusted_mode);
-
-done:
- if (ret)
- dev_err(dev->dev, "invalid timings: %d\n", ret);
-
- return ret;
+ return 0;
}
static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
@@ -209,8 +258,7 @@ static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
/* initialize encoder */
struct drm_encoder *omap_encoder_init(struct drm_device *dev,
- struct omap_dss_device *output,
- struct omap_dss_device *display)
+ struct omap_dss_device *output)
{
struct drm_encoder *encoder = NULL;
struct omap_encoder *omap_encoder;
@@ -220,7 +268,6 @@ struct drm_encoder *omap_encoder_init(struct drm_device *dev,
goto fail;
omap_encoder->output = output;
- omap_encoder->display = display;
encoder = &omap_encoder->base;
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.h b/drivers/gpu/drm/omapdrm/omap_encoder.h
index a7b5dde63ecb..4aefb3142886 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.h
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.h
@@ -25,7 +25,6 @@ struct drm_encoder;
struct omap_dss_device;
struct drm_encoder *omap_encoder_init(struct drm_device *dev,
- struct omap_dss_device *output,
- struct omap_dss_device *display);
+ struct omap_dss_device *output);
#endif /* __OMAPDRM_ENCODER_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 851c59f07eb1..50aabd854f4d 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -183,13 +183,9 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
fbdev->fb = fb;
helper->fb = fb;
- fbi->par = helper;
fbi->fbops = &omap_fb_ops;
- strcpy(fbi->fix.id, MODULE_NAME);
-
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(fbi, helper, sizes);
dev->mode_config.fb_base = dma_addr;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 3e070153ef21..e36dbb4df867 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -38,6 +38,15 @@ config DRM_PANEL_SIMPLE
that it can be automatically turned off when the panel goes into a
low power state.
+config DRM_PANEL_FEIYANG_FY07024DI26A30D
+ tristate "Feiyang FY07024DI26A30-D MIPI-DSI LCD panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y if you want to enable support for panels based on the
+ Feiyang FY07024DI26A30-D MIPI-DSI interface.
+
config DRM_PANEL_ILITEK_IL9322
tristate "Ilitek ILI9322 320x240 QVGA panels"
depends on OF && SPI
@@ -149,6 +158,28 @@ config DRM_PANEL_RAYDIUM_RM68200
Say Y here if you want to enable support for Raydium RM68200
720x1280 DSI video mode panel.
+config DRM_PANEL_ROCKTECH_JH057N00900
+ tristate "Rocktech JH057N00900 MIPI touchscreen panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Rocktech JH057N00900
+ MIPI DSI panel as e.g. used in the Librem 5 devkit. It has a
+ resolution of 720x1440 pixels, a built in backlight and touch
+ controller.
+ Touch input support is provided by the goodix driver and needs to be
+ selected separately.
+
+config DRM_PANEL_RONBO_RB070D30
+ tristate "Ronbo Electronics RB070D30 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Ronbo Electronics
+ RB070D30 1024x600 DSI panel.
+
config DRM_PANEL_SAMSUNG_S6D16D0
tristate "Samsung S6D16D0 DSI video mode panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index e7ab71968bbf..78e3dc376bdd 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
+obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
@@ -13,6 +14,8 @@ obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o
+obj-$(CONFIG_DRM_PANEL_ROCKTECH_JH057N00900) += panel-rocktech-jh057n00900.o
+obj-$(CONFIG_DRM_PANEL_RONBO_RB070D30) += panel-ronbo-rb070d30.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
index b428c4678106..a79908dfa3c8 100644
--- a/drivers/gpu/drm/panel/panel-arm-versatile.c
+++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
@@ -191,7 +191,7 @@ static const struct versatile_panel_type versatile_panels[] = {
.vrefresh = 390,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
},
/*
* Sanyo ALR252RGT 240x320 portrait display found on the
@@ -215,7 +215,7 @@ static const struct versatile_panel_type versatile_panels[] = {
.vrefresh = 116,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
.ib2 = true,
},
};
@@ -264,8 +264,6 @@ static int versatile_panel_get_modes(struct drm_panel *panel)
struct versatile_panel *vpanel = to_versatile_panel(panel);
struct drm_display_mode *mode;
- strncpy(connector->display_info.name, vpanel->panel_type->name,
- DRM_DISPLAY_INFO_LEN);
connector->display_info.width_mm = vpanel->panel_type->width_mm;
connector->display_info.height_mm = vpanel->panel_type->height_mm;
connector->display_info.bus_flags = vpanel->panel_type->bus_flags;
diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
new file mode 100644
index 000000000000..dabf59e0f56f
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 Amarula Solutions
+ * Author: Jagan Teki <jagan@amarulasolutions.com>
+ */
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#define FEIYANG_INIT_CMD_LEN 2
+
+struct feiyang {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+
+ struct backlight_device *backlight;
+ struct regulator *dvdd;
+ struct regulator *avdd;
+ struct gpio_desc *reset;
+};
+
+static inline struct feiyang *panel_to_feiyang(struct drm_panel *panel)
+{
+ return container_of(panel, struct feiyang, panel);
+}
+
+struct feiyang_init_cmd {
+ u8 data[FEIYANG_INIT_CMD_LEN];
+};
+
+static const struct feiyang_init_cmd feiyang_init_cmds[] = {
+ { .data = { 0x80, 0x58 } },
+ { .data = { 0x81, 0x47 } },
+ { .data = { 0x82, 0xD4 } },
+ { .data = { 0x83, 0x88 } },
+ { .data = { 0x84, 0xA9 } },
+ { .data = { 0x85, 0xC3 } },
+ { .data = { 0x86, 0x82 } },
+};
+
+static int feiyang_prepare(struct drm_panel *panel)
+{
+ struct feiyang *ctx = panel_to_feiyang(panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ unsigned int i;
+ int ret;
+
+ ret = regulator_enable(ctx->dvdd);
+ if (ret)
+ return ret;
+
+ /* T1 (dvdd start + dvdd rise) 0 < T1 <= 10ms */
+ msleep(10);
+
+ ret = regulator_enable(ctx->avdd);
+ if (ret)
+ return ret;
+
+ /* T3 (dvdd rise + avdd start + avdd rise) T3 >= 20ms */
+ msleep(20);
+
+ gpiod_set_value(ctx->reset, 0);
+
+ /*
+ * T5 + T6 (avdd rise + video & logic signal rise)
+ * T5 >= 10ms, 0 < T6 <= 10ms
+ */
+ msleep(20);
+
+ gpiod_set_value(ctx->reset, 1);
+
+ /* T12 (video & logic signal rise + backlight rise) T12 >= 200ms */
+ msleep(200);
+
+ for (i = 0; i < ARRAY_SIZE(feiyang_init_cmds); i++) {
+ const struct feiyang_init_cmd *cmd =
+ &feiyang_init_cmds[i];
+
+ ret = mipi_dsi_dcs_write_buffer(dsi, cmd->data,
+ FEIYANG_INIT_CMD_LEN);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int feiyang_enable(struct drm_panel *panel)
+{
+ struct feiyang *ctx = panel_to_feiyang(panel);
+
+ /* T12 (video & logic signal rise + backlight rise) T12 >= 200ms */
+ msleep(200);
+
+ mipi_dsi_dcs_set_display_on(ctx->dsi);
+ backlight_enable(ctx->backlight);
+
+ return 0;
+}
+
+static int feiyang_disable(struct drm_panel *panel)
+{
+ struct feiyang *ctx = panel_to_feiyang(panel);
+
+ backlight_disable(ctx->backlight);
+ return mipi_dsi_dcs_set_display_off(ctx->dsi);
+}
+
+static int feiyang_unprepare(struct drm_panel *panel)
+{
+ struct feiyang *ctx = panel_to_feiyang(panel);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(ctx->dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
+ ret);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
+ ret);
+
+ /* T13 (backlight fall + video & logic signal fall) T13 >= 200ms */
+ msleep(200);
+
+ gpiod_set_value(ctx->reset, 0);
+
+ regulator_disable(ctx->avdd);
+
+ /* T11 (dvdd rise to fall) 0 < T11 <= 10ms */
+ msleep(10);
+
+ regulator_disable(ctx->dvdd);
+
+ return 0;
+}
+
+static const struct drm_display_mode feiyang_default_mode = {
+ .clock = 55000,
+
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 310,
+ .hsync_end = 1024 + 310 + 20,
+ .htotal = 1024 + 310 + 20 + 90,
+
+ .vdisplay = 600,
+ .vsync_start = 600 + 12,
+ .vsync_end = 600 + 12 + 2,
+ .vtotal = 600 + 12 + 2 + 21,
+ .vrefresh = 60,
+
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static int feiyang_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct feiyang *ctx = panel_to_feiyang(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &feiyang_default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
+ feiyang_default_mode.hdisplay,
+ feiyang_default_mode.vdisplay,
+ feiyang_default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs feiyang_funcs = {
+ .disable = feiyang_disable,
+ .unprepare = feiyang_unprepare,
+ .prepare = feiyang_prepare,
+ .enable = feiyang_enable,
+ .get_modes = feiyang_get_modes,
+};
+
+static int feiyang_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ struct feiyang *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+ ctx->dsi = dsi;
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = &dsi->dev;
+ ctx->panel.funcs = &feiyang_funcs;
+
+ ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd");
+ if (IS_ERR(ctx->dvdd)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get dvdd regulator\n");
+ return PTR_ERR(ctx->dvdd);
+ }
+
+ ctx->avdd = devm_regulator_get(&dsi->dev, "avdd");
+ if (IS_ERR(ctx->avdd)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get avdd regulator\n");
+ return PTR_ERR(ctx->avdd);
+ }
+
+ ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get our reset GPIO\n");
+ return PTR_ERR(ctx->reset);
+ }
+
+ ctx->backlight = devm_of_find_backlight(&dsi->dev);
+ if (IS_ERR(ctx->backlight))
+ return PTR_ERR(ctx->backlight);
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 4;
+
+ return mipi_dsi_attach(dsi);
+}
+
+static int feiyang_dsi_remove(struct mipi_dsi_device *dsi)
+{
+ struct feiyang *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id feiyang_of_match[] = {
+ { .compatible = "feiyang,fy07024di26a30d", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, feiyang_of_match);
+
+static struct mipi_dsi_driver feiyang_driver = {
+ .probe = feiyang_dsi_probe,
+ .remove = feiyang_dsi_remove,
+ .driver = {
+ .name = "feiyang-fy07024di26a30d",
+ .of_match_table = feiyang_of_match,
+ },
+};
+module_mipi_dsi_driver(feiyang_driver);
+
+MODULE_AUTHOR("Jagan Teki <jagan@amarulasolutions.com>");
+MODULE_DESCRIPTION("Feiyang FY07024DI26A30-D MIPI-DSI LCD panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index bd38bf4f1ba6..a1c4cd2940fb 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -412,11 +412,11 @@ static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili)
if (ili->conf->dclk_active_high) {
reg = ILI9322_POL_DCLK;
connector->display_info.bus_flags |=
- DRM_BUS_FLAG_PIXDATA_POSEDGE;
+ DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
} else {
reg = 0;
connector->display_info.bus_flags |=
- DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
}
if (ili->conf->de_active_high) {
reg |= ILI9322_POL_DE;
@@ -662,8 +662,6 @@ static int ili9322_get_modes(struct drm_panel *panel)
struct ili9322 *ili = panel_to_ili9322(panel);
struct drm_display_mode *mode;
- strncpy(connector->display_info.name, "ILI9322 TFT LCD driver\0",
- DRM_DISPLAY_INFO_LEN);
connector->display_info.width_mm = ili->conf->width_mm;
connector->display_info.height_mm = ili->conf->height_mm;
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
index 5e8d4523e9ed..a1d8d92fac2b 100644
--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -190,7 +190,6 @@ static int lcd_olinuxino_get_modes(struct drm_panel *panel)
num++;
}
- memcpy(connector->display_info.name, lcd_info->name, 32);
connector->display_info.width_mm = lcd_info->width_mm;
connector->display_info.height_mm = lcd_info->height_mm;
connector->display_info.bpc = lcd_info->bpc;
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index 87fa316e1d7b..f27a7e426574 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -67,15 +67,15 @@ struct otm8009a {
};
static const struct drm_display_mode default_mode = {
- .clock = 32729,
+ .clock = 29700,
.hdisplay = 480,
- .hsync_start = 480 + 120,
- .hsync_end = 480 + 120 + 63,
- .htotal = 480 + 120 + 63 + 120,
+ .hsync_start = 480 + 98,
+ .hsync_end = 480 + 98 + 32,
+ .htotal = 480 + 98 + 32 + 98,
.vdisplay = 800,
- .vsync_start = 800 + 12,
- .vsync_end = 800 + 12 + 12,
- .vtotal = 800 + 12 + 12 + 12,
+ .vsync_start = 800 + 15,
+ .vsync_end = 800 + 15 + 10,
+ .vtotal = 800 + 15 + 10 + 14,
.vrefresh = 50,
.flags = 0,
.width_mm = 52,
@@ -248,6 +248,9 @@ static int otm8009a_init_sequence(struct otm8009a *ctx)
/* Send Command GRAM memory write (no parameters) */
dcs_write_seq(ctx, MIPI_DCS_WRITE_MEMORY_START);
+ /* Wait a short while to let the panel be ready before the 1st frame */
+ mdelay(10);
+
return 0;
}
@@ -433,7 +436,8 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
ctx->supply = devm_regulator_get(dev, "power");
if (IS_ERR(ctx->supply)) {
ret = PTR_ERR(ctx->supply);
- dev_err(dev, "failed to request regulator: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to request regulator: %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
index 77593533abcd..14186827e591 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
@@ -383,7 +383,8 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
ctx->supply = devm_regulator_get(dev, "power");
if (IS_ERR(ctx->supply)) {
ret = PTR_ERR(ctx->supply);
- dev_err(dev, "cannot get regulator: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "cannot get regulator: %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
new file mode 100644
index 000000000000..d88ea8da2ec2
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockteck jh057n00900 5.5" MIPI-DSI panel driver
+ *
+ * Copyright (C) Purism SPC 2019
+ */
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <linux/backlight.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <video/display_timing.h>
+#include <video/mipi_display.h>
+
+#define DRV_NAME "panel-rocktech-jh057n00900"
+
+/* Manufacturer specific Commands send via DSI */
+#define ST7703_CMD_ALL_PIXEL_OFF 0x22
+#define ST7703_CMD_ALL_PIXEL_ON 0x23
+#define ST7703_CMD_SETDISP 0xB2
+#define ST7703_CMD_SETRGBIF 0xB3
+#define ST7703_CMD_SETCYC 0xB4
+#define ST7703_CMD_SETBGP 0xB5
+#define ST7703_CMD_SETVCOM 0xB6
+#define ST7703_CMD_SETOTP 0xB7
+#define ST7703_CMD_SETPOWER_EXT 0xB8
+#define ST7703_CMD_SETEXTC 0xB9
+#define ST7703_CMD_SETMIPI 0xBA
+#define ST7703_CMD_SETVDC 0xBC
+#define ST7703_CMD_SETSCR 0xC0
+#define ST7703_CMD_SETPOWER 0xC1
+#define ST7703_CMD_SETPANEL 0xCC
+#define ST7703_CMD_SETGAMMA 0xE0
+#define ST7703_CMD_SETEQ 0xE3
+#define ST7703_CMD_SETGIP1 0xE9
+#define ST7703_CMD_SETGIP2 0xEA
+
+struct jh057n {
+ struct device *dev;
+ struct drm_panel panel;
+ struct gpio_desc *reset_gpio;
+ struct backlight_device *backlight;
+ bool prepared;
+
+ struct dentry *debugfs;
+};
+
+static inline struct jh057n *panel_to_jh057n(struct drm_panel *panel)
+{
+ return container_of(panel, struct jh057n, panel);
+}
+
+#define dsi_generic_write_seq(dsi, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static int jh057n_init_sequence(struct jh057n *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ struct device *dev = ctx->dev;
+ int ret;
+
+ /*
+ * Init sequence was supplied by the panel vendor. Most of the commands
+ * resemble the ST7703 but the number of parameters often don't match
+ * so it's likely a clone.
+ */
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETEXTC,
+ 0xF1, 0x12, 0x83);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETRGBIF,
+ 0x10, 0x10, 0x05, 0x05, 0x03, 0xFF, 0x00, 0x00,
+ 0x00, 0x00);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETSCR,
+ 0x73, 0x73, 0x50, 0x50, 0x00, 0x00, 0x08, 0x70,
+ 0x00);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETVDC, 0x4E);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETPANEL, 0x0B);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETCYC, 0x80);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETDISP, 0xF0, 0x12, 0x30);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETEQ,
+ 0x07, 0x07, 0x0B, 0x0B, 0x03, 0x0B, 0x00, 0x00,
+ 0x00, 0x00, 0xFF, 0x00, 0xC0, 0x10);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETBGP, 0x08, 0x08);
+ msleep(20);
+
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETVCOM, 0x3F, 0x3F);
+ dsi_generic_write_seq(dsi, 0xBF, 0x02, 0x11, 0x00);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETGIP1,
+ 0x82, 0x10, 0x06, 0x05, 0x9E, 0x0A, 0xA5, 0x12,
+ 0x31, 0x23, 0x37, 0x83, 0x04, 0xBC, 0x27, 0x38,
+ 0x0C, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0C, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x75, 0x75, 0x31, 0x88,
+ 0x88, 0x88, 0x88, 0x88, 0x88, 0x13, 0x88, 0x64,
+ 0x64, 0x20, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
+ 0x02, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETGIP2,
+ 0x02, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x46, 0x02, 0x88,
+ 0x88, 0x88, 0x88, 0x88, 0x88, 0x64, 0x88, 0x13,
+ 0x57, 0x13, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
+ 0x75, 0x88, 0x23, 0x14, 0x00, 0x00, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x0A,
+ 0xA5, 0x00, 0x00, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETGAMMA,
+ 0x00, 0x09, 0x0E, 0x29, 0x2D, 0x3C, 0x41, 0x37,
+ 0x07, 0x0B, 0x0D, 0x10, 0x11, 0x0F, 0x10, 0x11,
+ 0x18, 0x00, 0x09, 0x0E, 0x29, 0x2D, 0x3C, 0x41,
+ 0x37, 0x07, 0x0B, 0x0D, 0x10, 0x11, 0x0F, 0x10,
+ 0x11, 0x18);
+ msleep(20);
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to exit sleep mode\n");
+ return ret;
+ }
+ /* Panel is operational 120 msec after reset */
+ msleep(60);
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret)
+ return ret;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "Panel init sequence done\n");
+ return 0;
+}
+
+static int jh057n_enable(struct drm_panel *panel)
+{
+ struct jh057n *ctx = panel_to_jh057n(panel);
+
+ return backlight_enable(ctx->backlight);
+}
+
+static int jh057n_disable(struct drm_panel *panel)
+{
+ struct jh057n *ctx = panel_to_jh057n(panel);
+
+ return backlight_disable(ctx->backlight);
+}
+
+static int jh057n_unprepare(struct drm_panel *panel)
+{
+ struct jh057n *ctx = panel_to_jh057n(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ if (!ctx->prepared)
+ return 0;
+
+ mipi_dsi_dcs_set_display_off(dsi);
+ ctx->prepared = false;
+
+ return 0;
+}
+
+static int jh057n_prepare(struct drm_panel *panel)
+{
+ struct jh057n *ctx = panel_to_jh057n(panel);
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n");
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(20, 40);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(20);
+
+ ret = jh057n_init_sequence(ctx);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ ctx->prepared = true;
+
+ return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 90,
+ .hsync_end = 720 + 90 + 20,
+ .htotal = 720 + 90 + 20 + 20,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 20,
+ .vsync_end = 1440 + 20 + 4,
+ .vtotal = 1440 + 20 + 4 + 12,
+ .vrefresh = 60,
+ .clock = 75276,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 65,
+ .height_mm = 130,
+};
+
+static int jh057n_get_modes(struct drm_panel *panel)
+{
+ struct jh057n *ctx = panel_to_jh057n(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ panel->connector->display_info.width_mm = mode->width_mm;
+ panel->connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(panel->connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs jh057n_drm_funcs = {
+ .disable = jh057n_disable,
+ .unprepare = jh057n_unprepare,
+ .prepare = jh057n_prepare,
+ .enable = jh057n_enable,
+ .get_modes = jh057n_get_modes,
+};
+
+static int allpixelson_set(void *data, u64 val)
+{
+ struct jh057n *ctx = data;
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ DRM_DEV_DEBUG_DRIVER(ctx->dev, "Setting all pixels on\n");
+ dsi_generic_write_seq(dsi, ST7703_CMD_ALL_PIXEL_ON);
+ msleep(val * 1000);
+ /* Reset the panel to get video back */
+ drm_panel_disable(&ctx->panel);
+ drm_panel_unprepare(&ctx->panel);
+ drm_panel_prepare(&ctx->panel);
+ drm_panel_enable(&ctx->panel);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(allpixelson_fops, NULL,
+ allpixelson_set, "%llu\n");
+
+static int jh057n_debugfs_init(struct jh057n *ctx)
+{
+ struct dentry *f;
+
+ ctx->debugfs = debugfs_create_dir(DRV_NAME, NULL);
+ if (!ctx->debugfs)
+ return -ENOMEM;
+
+ f = debugfs_create_file("allpixelson", 0600,
+ ctx->debugfs, ctx, &allpixelson_fops);
+ if (!f)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void jh057n_debugfs_remove(struct jh057n *ctx)
+{
+ debugfs_remove_recursive(ctx->debugfs);
+ ctx->debugfs = NULL;
+}
+
+static int jh057n_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct jh057n *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+
+ ctx->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(ctx->backlight))
+ return PTR_ERR(ctx->backlight);
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = dev;
+ ctx->panel.funcs = &jh057n_drm_funcs;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "mipi_dsi_attach failed. Is host ready?\n");
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ DRM_DEV_INFO(dev, "%ux%u@%u %ubpp dsi %udl - ready\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh,
+ mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
+
+ jh057n_debugfs_init(ctx);
+ return 0;
+}
+
+static void jh057n_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct jh057n *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = jh057n_unprepare(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
+ ret);
+
+ ret = jh057n_disable(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
+ ret);
+}
+
+static int jh057n_remove(struct mipi_dsi_device *dsi)
+{
+ struct jh057n *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ jh057n_shutdown(dsi);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to detach from DSI host: %d\n",
+ ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ jh057n_debugfs_remove(ctx);
+
+ return 0;
+}
+
+static const struct of_device_id jh057n_of_match[] = {
+ { .compatible = "rocktech,jh057n00900" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, jh057n_of_match);
+
+static struct mipi_dsi_driver jh057n_driver = {
+ .probe = jh057n_probe,
+ .remove = jh057n_remove,
+ .shutdown = jh057n_shutdown,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = jh057n_of_match,
+ },
+};
+module_mipi_dsi_driver(jh057n_driver);
+
+MODULE_AUTHOR("Guido Günther <agx@sigxcpu.org>");
+MODULE_DESCRIPTION("DRM driver for Rocktech JH057N00900 MIPI DSI panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
new file mode 100644
index 000000000000..3c15764f0c03
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018-2019, Bridge Systems BV
+ * Copyright (C) 2018-2019, Bootlin
+ * Copyright (C) 2017, Free Electrons
+ *
+ * This file based on panel-ilitek-ili9881c.c
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/kernel.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+struct rb070d30_panel {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct backlight_device *backlight;
+ struct regulator *supply;
+
+ struct {
+ struct gpio_desc *power;
+ struct gpio_desc *reset;
+ struct gpio_desc *updn;
+ struct gpio_desc *shlr;
+ } gpios;
+};
+
+static inline struct rb070d30_panel *panel_to_rb070d30_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct rb070d30_panel, panel);
+}
+
+static int rb070d30_panel_prepare(struct drm_panel *panel)
+{
+ struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
+ int ret;
+
+ ret = regulator_enable(ctx->supply);
+ if (ret < 0) {
+ DRM_DEV_ERROR(&ctx->dsi->dev, "Failed to enable supply: %d\n", ret);
+ return ret;
+ }
+
+ msleep(20);
+ gpiod_set_value(ctx->gpios.power, 1);
+ msleep(20);
+ gpiod_set_value(ctx->gpios.reset, 1);
+ msleep(20);
+ return 0;
+}
+
+static int rb070d30_panel_unprepare(struct drm_panel *panel)
+{
+ struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
+
+ gpiod_set_value(ctx->gpios.reset, 0);
+ gpiod_set_value(ctx->gpios.power, 0);
+ regulator_disable(ctx->supply);
+
+ return 0;
+}
+
+static int rb070d30_panel_enable(struct drm_panel *panel)
+{
+ struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
+ int ret;
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(ctx->dsi);
+ if (ret)
+ return ret;
+
+ ret = backlight_enable(ctx->backlight);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+ return ret;
+}
+
+static int rb070d30_panel_disable(struct drm_panel *panel)
+{
+ struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
+
+ backlight_disable(ctx->backlight);
+ return mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+}
+
+/* Default timings */
+static const struct drm_display_mode default_mode = {
+ .clock = 51206,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 160,
+ .hsync_end = 1024 + 160 + 80,
+ .htotal = 1024 + 160 + 80 + 80,
+ .vdisplay = 600,
+ .vsync_start = 600 + 12,
+ .vsync_end = 600 + 12 + 10,
+ .vtotal = 600 + 12 + 10 + 13,
+ .vrefresh = 60,
+
+ .width_mm = 154,
+ .height_mm = 85,
+};
+
+static int rb070d30_panel_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
+ struct drm_display_mode *mode;
+ static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(&ctx->dsi->dev,
+ "Failed to add mode " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&default_mode));
+ return -EINVAL;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ panel->connector->display_info.bpc = 8;
+ panel->connector->display_info.width_mm = mode->width_mm;
+ panel->connector->display_info.height_mm = mode->height_mm;
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &bus_format, 1);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs rb070d30_panel_funcs = {
+ .get_modes = rb070d30_panel_get_modes,
+ .prepare = rb070d30_panel_prepare,
+ .enable = rb070d30_panel_enable,
+ .disable = rb070d30_panel_disable,
+ .unprepare = rb070d30_panel_unprepare,
+};
+
+static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ struct rb070d30_panel *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->supply = devm_regulator_get(&dsi->dev, "vcc-lcd");
+ if (IS_ERR(ctx->supply))
+ return PTR_ERR(ctx->supply);
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+ ctx->dsi = dsi;
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = &dsi->dev;
+ ctx->panel.funcs = &rb070d30_panel_funcs;
+
+ ctx->gpios.reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->gpios.reset)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get our reset GPIO\n");
+ return PTR_ERR(ctx->gpios.reset);
+ }
+
+ ctx->gpios.power = devm_gpiod_get(&dsi->dev, "power", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->gpios.power)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get our power GPIO\n");
+ return PTR_ERR(ctx->gpios.power);
+ }
+
+ /*
+ * We don't change the state of that GPIO later on but we need
+ * to force it into a low state.
+ */
+ ctx->gpios.updn = devm_gpiod_get(&dsi->dev, "updn", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->gpios.updn)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get our updn GPIO\n");
+ return PTR_ERR(ctx->gpios.updn);
+ }
+
+ /*
+ * We don't change the state of that GPIO later on but we need
+ * to force it into a low state.
+ */
+ ctx->gpios.shlr = devm_gpiod_get(&dsi->dev, "shlr", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->gpios.shlr)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get our shlr GPIO\n");
+ return PTR_ERR(ctx->gpios.shlr);
+ }
+
+ ctx->backlight = devm_of_find_backlight(&dsi->dev);
+ if (IS_ERR(ctx->backlight)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get our backlight\n");
+ return PTR_ERR(ctx->backlight);
+ }
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 4;
+
+ return mipi_dsi_attach(dsi);
+}
+
+static int rb070d30_panel_dsi_remove(struct mipi_dsi_device *dsi)
+{
+ struct rb070d30_panel *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id rb070d30_panel_of_match[] = {
+ { .compatible = "ronbo,rb070d30" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rb070d30_panel_of_match);
+
+static struct mipi_dsi_driver rb070d30_panel_driver = {
+ .probe = rb070d30_panel_dsi_probe,
+ .remove = rb070d30_panel_dsi_remove,
+ .driver = {
+ .name = "panel-ronbo-rb070d30",
+ .of_match_table = rb070d30_panel_of_match,
+ },
+};
+module_mipi_dsi_driver(rb070d30_panel_driver);
+
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
+MODULE_AUTHOR("Konstantin Sudakov <k.sudakov@integrasources.com>");
+MODULE_DESCRIPTION("Ronbo RB070D30 Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
index 33c22ee036f8..f75bef24e050 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -148,9 +148,6 @@ static int s6d16d0_get_modes(struct drm_panel *panel)
struct drm_connector *connector = panel->connector;
struct drm_display_mode *mode;
- strncpy(connector->display_info.name, "Samsung S6D16D0\0",
- DRM_DISPLAY_INFO_LEN);
-
mode = drm_mode_duplicate(panel->drm, &samsung_s6d16d0_mode);
if (!mode) {
DRM_ERROR("bad mode or failed to add mode\n");
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 2d99e28ff117..bdcc5d80823d 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -328,7 +328,7 @@ static const struct seiko_panel_desc seiko_43wvf1g = {
.height = 57,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
};
static const struct of_device_id platform_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 9e8218f6a3f2..569be4efd8d1 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -914,7 +914,7 @@ static const struct panel_desc cdtech_s043wq26h_ct7 = {
.width = 95,
.height = 54,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode cdtech_s070wv95_ct16_mode = {
@@ -1034,7 +1034,7 @@ static const struct panel_desc dataimage_scf0700c48ggu18 = {
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct display_timing dlc_dlc0700yzg_1_timing = {
@@ -1119,7 +1119,7 @@ static const struct panel_desc edt_et057090dhu = {
.height = 86,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
};
static const struct drm_display_mode edt_etm0700g0dh6_mode = {
@@ -1145,7 +1145,7 @@ static const struct panel_desc edt_etm0700g0dh6 = {
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
};
static const struct panel_desc edt_etm0700g0bdh6 = {
@@ -1157,7 +1157,7 @@ static const struct panel_desc edt_etm0700g0bdh6 = {
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = {
@@ -1311,7 +1311,7 @@ static const struct panel_desc innolux_at043tn24 = {
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode innolux_at070tn92_mode = {
@@ -1818,7 +1818,7 @@ static const struct panel_desc nec_nl4827hc19_05b = {
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode netron_dy_e231732_mode = {
@@ -1867,8 +1867,8 @@ static const struct panel_desc newhaven_nhd_43_480272ef_atxl = {
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE |
- DRM_BUS_FLAG_SYNC_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
+ DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
};
static const struct display_timing nlt_nl192108ac18_02d_timing = {
@@ -2029,7 +2029,33 @@ static const struct panel_desc ortustech_com43h4m85ulc = {
.height = 93,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+};
+
+static const struct drm_display_mode osddisplays_osd070t1718_19ts_mode = {
+ .clock = 33000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 210,
+ .hsync_end = 800 + 210 + 30,
+ .htotal = 800 + 210 + 30 + 16,
+ .vdisplay = 480,
+ .vsync_start = 480 + 22,
+ .vsync_end = 480 + 22 + 13,
+ .vtotal = 480 + 22 + 13 + 10,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc osddisplays_osd070t1718_19ts = {
+ .modes = &osddisplays_osd070t1718_19ts_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode pda_91_00156_a0_mode = {
@@ -2398,7 +2424,7 @@ static const struct panel_desc toshiba_lt089ac29000 = {
.height = 116,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode tpk_f07a_0102_mode = {
@@ -2421,7 +2447,7 @@ static const struct panel_desc tpk_f07a_0102 = {
.width = 152,
.height = 91,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode tpk_f10a_0102_mode = {
@@ -2737,6 +2763,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "ortustech,com43h4m85ulc",
.data = &ortustech_com43h4m85ulc,
}, {
+ .compatible = "osddisplays,osd070t1718-19ts",
+ .data = &osddisplays_osd070t1718_19ts,
+ }, {
.compatible = "pda,91-00156-a0",
.data = &pda_91_00156_a0,
}, {
@@ -2996,6 +3025,34 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
.lanes = 4,
};
+static const struct drm_display_mode lg_acx467akm_7_mode = {
+ .clock = 150000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 2,
+ .hsync_end = 1080 + 2 + 2,
+ .htotal = 1080 + 2 + 2 + 2,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 2,
+ .vsync_end = 1920 + 2 + 2,
+ .vtotal = 1920 + 2 + 2 + 2,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc_dsi lg_acx467akm_7 = {
+ .desc = {
+ .modes = &lg_acx467akm_7_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 62,
+ .height = 110,
+ },
+ },
+ .flags = 0,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+};
+
static const struct of_device_id dsi_of_match[] = {
{
.compatible = "auo,b080uan01",
@@ -3013,6 +3070,9 @@ static const struct of_device_id dsi_of_match[] = {
.compatible = "panasonic,vvx10f004b00",
.data = &panasonic_vvx10f004b00
}, {
+ .compatible = "lg,acx467akm-7",
+ .data = &lg_acx467akm_7
+ }, {
/* sentinel */
}
};
diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
index 5a9f8f4d5d24..71591e5f5938 100644
--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
@@ -118,7 +118,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = {
.vtotal = 480 + 10 + 1 + 35,
.vrefresh = 60,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
},
{
.name = "640x480 RGB",
@@ -135,7 +135,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = {
.vtotal = 480 + 18 + 1 + 27,
.vrefresh = 60,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
},
{
.name = "480x272 RGB",
@@ -152,7 +152,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = {
.vtotal = 272 + 2 + 1 + 12,
.vrefresh = 60,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
},
{
.name = "480x640 RGB",
@@ -169,7 +169,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = {
.vtotal = 640 + 4 + 1 + 8,
.vrefresh = 60,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
},
{
.name = "400x240 RGB",
@@ -186,7 +186,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = {
.vtotal = 240 + 2 + 1 + 20,
.vrefresh = 60,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
},
};
@@ -390,8 +390,6 @@ static int tpg110_get_modes(struct drm_panel *panel)
struct tpg110 *tpg = to_tpg110(panel);
struct drm_display_mode *mode;
- strncpy(connector->display_info.name, tpg->panel_mode->name,
- DRM_DISPLAY_INFO_LEN);
connector->display_info.width_mm = tpg->width;
connector->display_info.height_mm = tpg->height;
connector->display_info.bus_flags = tpg->panel_mode->bus_flags;
diff --git a/drivers/gpu/drm/panfrost/Kconfig b/drivers/gpu/drm/panfrost/Kconfig
new file mode 100644
index 000000000000..591611dc4e34
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config DRM_PANFROST
+ tristate "Panfrost (DRM support for ARM Mali Midgard/Bifrost GPUs)"
+ depends on DRM
+ depends on ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
+ depends on MMU
+ select DRM_SCHED
+ select IOMMU_SUPPORT
+ select IOMMU_IO_PGTABLE_LPAE
+ select DRM_GEM_SHMEM_HELPER
+ help
+ DRM driver for ARM Mali Midgard (T6xx, T7xx, T8xx) and
+ Bifrost (G3x, G5x, G7x) GPUs.
diff --git a/drivers/gpu/drm/panfrost/Makefile b/drivers/gpu/drm/panfrost/Makefile
new file mode 100644
index 000000000000..6de72d13c58f
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+
+panfrost-y := \
+ panfrost_drv.o \
+ panfrost_device.o \
+ panfrost_devfreq.o \
+ panfrost_gem.o \
+ panfrost_gpu.o \
+ panfrost_job.o \
+ panfrost_mmu.o
+
+obj-$(CONFIG_DRM_PANFROST) += panfrost.o
diff --git a/drivers/gpu/drm/panfrost/TODO b/drivers/gpu/drm/panfrost/TODO
new file mode 100644
index 000000000000..c2e44add37d8
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/TODO
@@ -0,0 +1,27 @@
+- Thermal support.
+
+- Bifrost support:
+ - DT bindings (Neil, WIP)
+ - MMU page table format and address space setup
+ - Bifrost specific feature and issue handling
+ - Coherent DMA support
+
+- Support for 2MB pages. The io-pgtable code already supports this. Finishing
+ support involves either copying or adapting the iommu API to handle passing
+ aligned addresses and sizes to the io-pgtable code.
+
+- Per FD address space support. The h/w supports multiple addresses spaces.
+ The hard part is handling when more address spaces are needed than what
+ the h/w provides.
+
+- Support pinning pages on demand (GPU page faults).
+
+- Support userspace controlled GPU virtual addresses. Needed for Vulkan. (Tomeu)
+
+- Support for madvise and a shrinker.
+
+- Compute job support. So called 'compute only' jobs need to be plumbed up to
+ userspace.
+
+- Performance counter support. (Boris)
+
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
new file mode 100644
index 000000000000..238bd1d89d43
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2019 Collabora ltd. */
+#include <linux/devfreq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include "panfrost_device.h"
+#include "panfrost_devfreq.h"
+#include "panfrost_features.h"
+#include "panfrost_issues.h"
+#include "panfrost_gpu.h"
+#include "panfrost_regs.h"
+
+static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot);
+
+static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
+ struct dev_pm_opp *opp;
+ unsigned long old_clk_rate = pfdev->devfreq.cur_freq;
+ unsigned long target_volt, target_rate;
+ int err;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ target_rate = dev_pm_opp_get_freq(opp);
+ target_volt = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+
+ if (old_clk_rate == target_rate)
+ return 0;
+
+ /*
+ * If frequency scaling from low to high, adjust voltage first.
+ * If frequency scaling from high to low, adjust frequency first.
+ */
+ if (old_clk_rate < target_rate) {
+ err = regulator_set_voltage(pfdev->regulator, target_volt,
+ target_volt);
+ if (err) {
+ dev_err(dev, "Cannot set voltage %lu uV\n",
+ target_volt);
+ return err;
+ }
+ }
+
+ err = clk_set_rate(pfdev->clock, target_rate);
+ if (err) {
+ dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate,
+ err);
+ regulator_set_voltage(pfdev->regulator, pfdev->devfreq.cur_volt,
+ pfdev->devfreq.cur_volt);
+ return err;
+ }
+
+ if (old_clk_rate > target_rate) {
+ err = regulator_set_voltage(pfdev->regulator, target_volt,
+ target_volt);
+ if (err)
+ dev_err(dev, "Cannot set voltage %lu uV\n", target_volt);
+ }
+
+ pfdev->devfreq.cur_freq = target_rate;
+ pfdev->devfreq.cur_volt = target_volt;
+
+ return 0;
+}
+
+static void panfrost_devfreq_reset(struct panfrost_device *pfdev)
+{
+ ktime_t now = ktime_get();
+ int i;
+
+ for (i = 0; i < NUM_JOB_SLOTS; i++) {
+ pfdev->devfreq.slot[i].busy_time = 0;
+ pfdev->devfreq.slot[i].idle_time = 0;
+ pfdev->devfreq.slot[i].time_last_update = now;
+ }
+}
+
+static int panfrost_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *status)
+{
+ struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
+ int i;
+
+ for (i = 0; i < NUM_JOB_SLOTS; i++) {
+ panfrost_devfreq_update_utilization(pfdev, i);
+ }
+
+ status->current_frequency = clk_get_rate(pfdev->clock);
+ status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.slot[0].busy_time,
+ pfdev->devfreq.slot[0].idle_time));
+
+ status->busy_time = 0;
+ for (i = 0; i < NUM_JOB_SLOTS; i++) {
+ status->busy_time += ktime_to_ns(pfdev->devfreq.slot[i].busy_time);
+ }
+
+ /* We're scheduling only to one core atm, so don't divide for now */
+ /* status->busy_time /= NUM_JOB_SLOTS; */
+
+ panfrost_devfreq_reset(pfdev);
+
+ dev_dbg(pfdev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n", status->busy_time,
+ status->total_time,
+ status->busy_time / (status->total_time / 100),
+ status->current_frequency / 1000 / 1000);
+
+ return 0;
+}
+
+static int panfrost_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
+
+ *freq = pfdev->devfreq.cur_freq;
+
+ return 0;
+}
+
+static struct devfreq_dev_profile panfrost_devfreq_profile = {
+ .polling_ms = 50, /* ~3 frames */
+ .target = panfrost_devfreq_target,
+ .get_dev_status = panfrost_devfreq_get_dev_status,
+ .get_cur_freq = panfrost_devfreq_get_cur_freq,
+};
+
+int panfrost_devfreq_init(struct panfrost_device *pfdev)
+{
+ int ret;
+ struct dev_pm_opp *opp;
+
+ if (!pfdev->regulator)
+ return 0;
+
+ ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev);
+ if (ret == -ENODEV) /* Optional, continue without devfreq */
+ return 0;
+
+ panfrost_devfreq_reset(pfdev);
+
+ pfdev->devfreq.cur_freq = clk_get_rate(pfdev->clock);
+
+ opp = devfreq_recommended_opp(&pfdev->pdev->dev, &pfdev->devfreq.cur_freq, 0);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ panfrost_devfreq_profile.initial_freq = pfdev->devfreq.cur_freq;
+ dev_pm_opp_put(opp);
+
+ pfdev->devfreq.devfreq = devm_devfreq_add_device(&pfdev->pdev->dev,
+ &panfrost_devfreq_profile, "simple_ondemand", NULL);
+ if (IS_ERR(pfdev->devfreq.devfreq)) {
+ DRM_DEV_ERROR(&pfdev->pdev->dev, "Couldn't initialize GPU devfreq\n");
+ ret = PTR_ERR(pfdev->devfreq.devfreq);
+ pfdev->devfreq.devfreq = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+void panfrost_devfreq_resume(struct panfrost_device *pfdev)
+{
+ int i;
+
+ if (!pfdev->devfreq.devfreq)
+ return;
+
+ panfrost_devfreq_reset(pfdev);
+ for (i = 0; i < NUM_JOB_SLOTS; i++)
+ pfdev->devfreq.slot[i].busy = false;
+
+ devfreq_resume_device(pfdev->devfreq.devfreq);
+}
+
+void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
+{
+ if (!pfdev->devfreq.devfreq)
+ return;
+
+ devfreq_suspend_device(pfdev->devfreq.devfreq);
+}
+
+static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot)
+{
+ struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
+ ktime_t now;
+ ktime_t last;
+
+ if (!pfdev->devfreq.devfreq)
+ return;
+
+ now = ktime_get();
+ last = pfdev->devfreq.slot[slot].time_last_update;
+
+ /* If we last recorded a transition to busy, we have been idle since */
+ if (devfreq_slot->busy)
+ pfdev->devfreq.slot[slot].busy_time += ktime_sub(now, last);
+ else
+ pfdev->devfreq.slot[slot].idle_time += ktime_sub(now, last);
+
+ pfdev->devfreq.slot[slot].time_last_update = now;
+}
+
+/* The job scheduler is expected to call this at every transition busy <-> idle */
+void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot)
+{
+ struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
+
+ panfrost_devfreq_update_utilization(pfdev, slot);
+ devfreq_slot->busy = !devfreq_slot->busy;
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.h b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
new file mode 100644
index 000000000000..eb999531ed90
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 Collabora ltd. */
+
+#ifndef __PANFROST_DEVFREQ_H__
+#define __PANFROST_DEVFREQ_H__
+
+int panfrost_devfreq_init(struct panfrost_device *pfdev);
+
+void panfrost_devfreq_resume(struct panfrost_device *pfdev);
+void panfrost_devfreq_suspend(struct panfrost_device *pfdev);
+
+void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot);
+
+#endif /* __PANFROST_DEVFREQ_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
new file mode 100644
index 000000000000..3b2bced1b015
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+
+#include "panfrost_device.h"
+#include "panfrost_devfreq.h"
+#include "panfrost_features.h"
+#include "panfrost_gpu.h"
+#include "panfrost_job.h"
+#include "panfrost_mmu.h"
+
+static int panfrost_reset_init(struct panfrost_device *pfdev)
+{
+ int err;
+
+ pfdev->rstc = devm_reset_control_array_get(pfdev->dev, false, true);
+ if (IS_ERR(pfdev->rstc)) {
+ dev_err(pfdev->dev, "get reset failed %ld\n", PTR_ERR(pfdev->rstc));
+ return PTR_ERR(pfdev->rstc);
+ }
+
+ err = reset_control_deassert(pfdev->rstc);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void panfrost_reset_fini(struct panfrost_device *pfdev)
+{
+ reset_control_assert(pfdev->rstc);
+}
+
+static int panfrost_clk_init(struct panfrost_device *pfdev)
+{
+ int err;
+ unsigned long rate;
+
+ pfdev->clock = devm_clk_get(pfdev->dev, NULL);
+ if (IS_ERR(pfdev->clock)) {
+ dev_err(pfdev->dev, "get clock failed %ld\n", PTR_ERR(pfdev->clock));
+ return PTR_ERR(pfdev->clock);
+ }
+
+ rate = clk_get_rate(pfdev->clock);
+ dev_info(pfdev->dev, "clock rate = %lu\n", rate);
+
+ err = clk_prepare_enable(pfdev->clock);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void panfrost_clk_fini(struct panfrost_device *pfdev)
+{
+ clk_disable_unprepare(pfdev->clock);
+}
+
+static int panfrost_regulator_init(struct panfrost_device *pfdev)
+{
+ int ret;
+
+ pfdev->regulator = devm_regulator_get_optional(pfdev->dev, "mali");
+ if (IS_ERR(pfdev->regulator)) {
+ ret = PTR_ERR(pfdev->regulator);
+ pfdev->regulator = NULL;
+ if (ret == -ENODEV)
+ return 0;
+ dev_err(pfdev->dev, "failed to get regulator: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(pfdev->regulator);
+ if (ret < 0) {
+ dev_err(pfdev->dev, "failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void panfrost_regulator_fini(struct panfrost_device *pfdev)
+{
+ if (pfdev->regulator)
+ regulator_disable(pfdev->regulator);
+}
+
+int panfrost_device_init(struct panfrost_device *pfdev)
+{
+ int err;
+ struct resource *res;
+
+ mutex_init(&pfdev->sched_lock);
+ mutex_init(&pfdev->reset_lock);
+ INIT_LIST_HEAD(&pfdev->scheduled_jobs);
+
+ spin_lock_init(&pfdev->hwaccess_lock);
+
+ err = panfrost_clk_init(pfdev);
+ if (err) {
+ dev_err(pfdev->dev, "clk init failed %d\n", err);
+ return err;
+ }
+
+ err = panfrost_regulator_init(pfdev);
+ if (err) {
+ dev_err(pfdev->dev, "regulator init failed %d\n", err);
+ goto err_out0;
+ }
+
+ err = panfrost_reset_init(pfdev);
+ if (err) {
+ dev_err(pfdev->dev, "reset init failed %d\n", err);
+ goto err_out1;
+ }
+
+ res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0);
+ pfdev->iomem = devm_ioremap_resource(pfdev->dev, res);
+ if (IS_ERR(pfdev->iomem)) {
+ dev_err(pfdev->dev, "failed to ioremap iomem\n");
+ err = PTR_ERR(pfdev->iomem);
+ goto err_out2;
+ }
+
+ err = panfrost_gpu_init(pfdev);
+ if (err)
+ goto err_out2;
+
+ err = panfrost_mmu_init(pfdev);
+ if (err)
+ goto err_out3;
+
+ err = panfrost_job_init(pfdev);
+ if (err)
+ goto err_out4;
+
+ /* runtime PM will wake us up later */
+ panfrost_gpu_power_off(pfdev);
+
+ pm_runtime_set_active(pfdev->dev);
+ pm_runtime_get_sync(pfdev->dev);
+ pm_runtime_mark_last_busy(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->dev);
+
+ return 0;
+err_out4:
+ panfrost_mmu_fini(pfdev);
+err_out3:
+ panfrost_gpu_fini(pfdev);
+err_out2:
+ panfrost_reset_fini(pfdev);
+err_out1:
+ panfrost_regulator_fini(pfdev);
+err_out0:
+ panfrost_clk_fini(pfdev);
+ return err;
+}
+
+void panfrost_device_fini(struct panfrost_device *pfdev)
+{
+ panfrost_job_fini(pfdev);
+ panfrost_mmu_fini(pfdev);
+ panfrost_gpu_fini(pfdev);
+ panfrost_reset_fini(pfdev);
+ panfrost_regulator_fini(pfdev);
+ panfrost_clk_fini(pfdev);
+}
+
+const char *panfrost_exception_name(struct panfrost_device *pfdev, u32 exception_code)
+{
+ switch (exception_code) {
+ /* Non-Fault Status code */
+ case 0x00: return "NOT_STARTED/IDLE/OK";
+ case 0x01: return "DONE";
+ case 0x02: return "INTERRUPTED";
+ case 0x03: return "STOPPED";
+ case 0x04: return "TERMINATED";
+ case 0x08: return "ACTIVE";
+ /* Job exceptions */
+ case 0x40: return "JOB_CONFIG_FAULT";
+ case 0x41: return "JOB_POWER_FAULT";
+ case 0x42: return "JOB_READ_FAULT";
+ case 0x43: return "JOB_WRITE_FAULT";
+ case 0x44: return "JOB_AFFINITY_FAULT";
+ case 0x48: return "JOB_BUS_FAULT";
+ case 0x50: return "INSTR_INVALID_PC";
+ case 0x51: return "INSTR_INVALID_ENC";
+ case 0x52: return "INSTR_TYPE_MISMATCH";
+ case 0x53: return "INSTR_OPERAND_FAULT";
+ case 0x54: return "INSTR_TLS_FAULT";
+ case 0x55: return "INSTR_BARRIER_FAULT";
+ case 0x56: return "INSTR_ALIGN_FAULT";
+ case 0x58: return "DATA_INVALID_FAULT";
+ case 0x59: return "TILE_RANGE_FAULT";
+ case 0x5A: return "ADDR_RANGE_FAULT";
+ case 0x60: return "OUT_OF_MEMORY";
+ /* GPU exceptions */
+ case 0x80: return "DELAYED_BUS_FAULT";
+ case 0x88: return "SHAREABILITY_FAULT";
+ /* MMU exceptions */
+ case 0xC1: return "TRANSLATION_FAULT_LEVEL1";
+ case 0xC2: return "TRANSLATION_FAULT_LEVEL2";
+ case 0xC3: return "TRANSLATION_FAULT_LEVEL3";
+ case 0xC4: return "TRANSLATION_FAULT_LEVEL4";
+ case 0xC8: return "PERMISSION_FAULT";
+ case 0xC9 ... 0xCF: return "PERMISSION_FAULT";
+ case 0xD1: return "TRANSTAB_BUS_FAULT_LEVEL1";
+ case 0xD2: return "TRANSTAB_BUS_FAULT_LEVEL2";
+ case 0xD3: return "TRANSTAB_BUS_FAULT_LEVEL3";
+ case 0xD4: return "TRANSTAB_BUS_FAULT_LEVEL4";
+ case 0xD8: return "ACCESS_FLAG";
+ case 0xD9 ... 0xDF: return "ACCESS_FLAG";
+ case 0xE0 ... 0xE7: return "ADDRESS_SIZE_FAULT";
+ case 0xE8 ... 0xEF: return "MEMORY_ATTRIBUTES_FAULT";
+ }
+
+ return "UNKNOWN";
+}
+
+#ifdef CONFIG_PM
+int panfrost_device_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct panfrost_device *pfdev = platform_get_drvdata(pdev);
+
+ panfrost_gpu_soft_reset(pfdev);
+
+ /* TODO: Re-enable all other address spaces */
+ panfrost_gpu_power_on(pfdev);
+ panfrost_mmu_enable(pfdev, 0);
+ panfrost_job_enable_interrupts(pfdev);
+ panfrost_devfreq_resume(pfdev);
+
+ return 0;
+}
+
+int panfrost_device_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct panfrost_device *pfdev = platform_get_drvdata(pdev);
+
+ if (!panfrost_job_is_idle(pfdev))
+ return -EBUSY;
+
+ panfrost_devfreq_suspend(pfdev);
+ panfrost_gpu_power_off(pfdev);
+
+ return 0;
+}
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
new file mode 100644
index 000000000000..56f452dfb490
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+
+#ifndef __PANFROST_DEVICE_H__
+#define __PANFROST_DEVICE_H__
+
+#include <linux/spinlock.h>
+#include <drm/drm_device.h>
+#include <drm/drm_mm.h>
+#include <drm/gpu_scheduler.h>
+
+struct panfrost_device;
+struct panfrost_mmu;
+struct panfrost_job_slot;
+struct panfrost_job;
+
+#define NUM_JOB_SLOTS 3
+
+struct panfrost_features {
+ u16 id;
+ u16 revision;
+
+ u64 shader_present;
+ u64 tiler_present;
+ u64 l2_present;
+ u64 stack_present;
+ u32 as_present;
+ u32 js_present;
+
+ u32 l2_features;
+ u32 core_features;
+ u32 tiler_features;
+ u32 mem_features;
+ u32 mmu_features;
+ u32 thread_features;
+ u32 max_threads;
+ u32 thread_max_workgroup_sz;
+ u32 thread_max_barrier_sz;
+ u32 coherency_features;
+ u32 texture_features[4];
+ u32 js_features[16];
+
+ u32 nr_core_groups;
+
+ unsigned long hw_features[64 / BITS_PER_LONG];
+ unsigned long hw_issues[64 / BITS_PER_LONG];
+};
+
+struct panfrost_devfreq_slot {
+ ktime_t busy_time;
+ ktime_t idle_time;
+ ktime_t time_last_update;
+ bool busy;
+};
+
+struct panfrost_device {
+ struct device *dev;
+ struct drm_device *ddev;
+ struct platform_device *pdev;
+
+ spinlock_t hwaccess_lock;
+
+ struct drm_mm mm;
+ spinlock_t mm_lock;
+
+ void __iomem *iomem;
+ struct clk *clock;
+ struct regulator *regulator;
+ struct reset_control *rstc;
+
+ struct panfrost_features features;
+
+ struct panfrost_mmu *mmu;
+ struct panfrost_job_slot *js;
+
+ struct panfrost_job *jobs[NUM_JOB_SLOTS];
+ struct list_head scheduled_jobs;
+
+ struct mutex sched_lock;
+ struct mutex reset_lock;
+
+ struct {
+ struct devfreq *devfreq;
+ struct thermal_cooling_device *cooling;
+ unsigned long cur_freq;
+ unsigned long cur_volt;
+ struct panfrost_devfreq_slot slot[NUM_JOB_SLOTS];
+ } devfreq;
+};
+
+struct panfrost_file_priv {
+ struct panfrost_device *pfdev;
+
+ struct drm_sched_entity sched_entity[NUM_JOB_SLOTS];
+};
+
+static inline struct panfrost_device *to_panfrost_device(struct drm_device *ddev)
+{
+ return ddev->dev_private;
+}
+
+static inline int panfrost_model_cmp(struct panfrost_device *pfdev, s32 id)
+{
+ s32 match_id = pfdev->features.id;
+
+ if (match_id & 0xf000)
+ match_id &= 0xf00f;
+ return match_id - id;
+}
+
+static inline bool panfrost_model_eq(struct panfrost_device *pfdev, s32 id)
+{
+ return !panfrost_model_cmp(pfdev, id);
+}
+
+int panfrost_device_init(struct panfrost_device *pfdev);
+void panfrost_device_fini(struct panfrost_device *pfdev);
+
+int panfrost_device_resume(struct device *dev);
+int panfrost_device_suspend(struct device *dev);
+
+const char *panfrost_exception_name(struct panfrost_device *pfdev, u32 exception_code);
+
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
new file mode 100644
index 000000000000..d11e2281dde6
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
+/* Copyright 2019 Collabora ltd. */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/pagemap.h>
+#include <linux/pm_runtime.h>
+#include <drm/panfrost_drm.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_syncobj.h>
+#include <drm/drm_utils.h>
+
+#include "panfrost_device.h"
+#include "panfrost_devfreq.h"
+#include "panfrost_gem.h"
+#include "panfrost_mmu.h"
+#include "panfrost_job.h"
+#include "panfrost_gpu.h"
+
+static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file)
+{
+ struct drm_panfrost_get_param *param = data;
+ struct panfrost_device *pfdev = ddev->dev_private;
+
+ if (param->pad != 0)
+ return -EINVAL;
+
+ switch (param->param) {
+ case DRM_PANFROST_PARAM_GPU_PROD_ID:
+ param->value = pfdev->features.id;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ int ret;
+ struct drm_gem_shmem_object *shmem;
+ struct drm_panfrost_create_bo *args = data;
+
+ if (!args->size || args->flags || args->pad)
+ return -EINVAL;
+
+ shmem = drm_gem_shmem_create_with_handle(file, dev, args->size,
+ &args->handle);
+ if (IS_ERR(shmem))
+ return PTR_ERR(shmem);
+
+ ret = panfrost_mmu_map(to_panfrost_bo(&shmem->base));
+ if (ret)
+ goto err_free;
+
+ args->offset = to_panfrost_bo(&shmem->base)->node.start << PAGE_SHIFT;
+
+ return 0;
+
+err_free:
+ drm_gem_object_put_unlocked(&shmem->base);
+ return ret;
+}
+
+/**
+ * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects
+ * referenced by the job.
+ * @dev: DRM device
+ * @file_priv: DRM file for this fd
+ * @args: IOCTL args
+ * @job: job being set up
+ *
+ * Resolve handles from userspace to BOs and attach them to job.
+ *
+ * Note that this function doesn't need to unreference the BOs on
+ * failure, because that will happen at panfrost_job_cleanup() time.
+ */
+static int
+panfrost_lookup_bos(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_panfrost_submit *args,
+ struct panfrost_job *job)
+{
+ job->bo_count = args->bo_handle_count;
+
+ if (!job->bo_count)
+ return 0;
+
+ job->implicit_fences = kvmalloc_array(job->bo_count,
+ sizeof(struct dma_fence *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!job->implicit_fences)
+ return -ENOMEM;
+
+ return drm_gem_objects_lookup(file_priv,
+ (void __user *)(uintptr_t)args->bo_handles,
+ job->bo_count, &job->bos);
+}
+
+/**
+ * panfrost_copy_in_sync() - Sets up job->in_fences[] with the sync objects
+ * referenced by the job.
+ * @dev: DRM device
+ * @file_priv: DRM file for this fd
+ * @args: IOCTL args
+ * @job: job being set up
+ *
+ * Resolve syncobjs from userspace to fences and attach them to job.
+ *
+ * Note that this function doesn't need to unreference the fences on
+ * failure, because that will happen at panfrost_job_cleanup() time.
+ */
+static int
+panfrost_copy_in_sync(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_panfrost_submit *args,
+ struct panfrost_job *job)
+{
+ u32 *handles;
+ int ret = 0;
+ int i;
+
+ job->in_fence_count = args->in_sync_count;
+
+ if (!job->in_fence_count)
+ return 0;
+
+ job->in_fences = kvmalloc_array(job->in_fence_count,
+ sizeof(struct dma_fence *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!job->in_fences) {
+ DRM_DEBUG("Failed to allocate job in fences\n");
+ return -ENOMEM;
+ }
+
+ handles = kvmalloc_array(job->in_fence_count, sizeof(u32), GFP_KERNEL);
+ if (!handles) {
+ ret = -ENOMEM;
+ DRM_DEBUG("Failed to allocate incoming syncobj handles\n");
+ goto fail;
+ }
+
+ if (copy_from_user(handles,
+ (void __user *)(uintptr_t)args->in_syncs,
+ job->in_fence_count * sizeof(u32))) {
+ ret = -EFAULT;
+ DRM_DEBUG("Failed to copy in syncobj handles\n");
+ goto fail;
+ }
+
+ for (i = 0; i < job->in_fence_count; i++) {
+ ret = drm_syncobj_find_fence(file_priv, handles[i], 0, 0,
+ &job->in_fences[i]);
+ if (ret == -EINVAL)
+ goto fail;
+ }
+
+fail:
+ kvfree(handles);
+ return ret;
+}
+
+static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct panfrost_device *pfdev = dev->dev_private;
+ struct drm_panfrost_submit *args = data;
+ struct drm_syncobj *sync_out = NULL;
+ struct panfrost_job *job;
+ int ret = 0;
+
+ if (!args->jc)
+ return -EINVAL;
+
+ if (args->requirements && args->requirements != PANFROST_JD_REQ_FS)
+ return -EINVAL;
+
+ if (args->out_sync > 0) {
+ sync_out = drm_syncobj_find(file, args->out_sync);
+ if (!sync_out)
+ return -ENODEV;
+ }
+
+ job = kzalloc(sizeof(*job), GFP_KERNEL);
+ if (!job) {
+ ret = -ENOMEM;
+ goto fail_out_sync;
+ }
+
+ kref_init(&job->refcount);
+
+ job->pfdev = pfdev;
+ job->jc = args->jc;
+ job->requirements = args->requirements;
+ job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
+ job->file_priv = file->driver_priv;
+
+ ret = panfrost_copy_in_sync(dev, file, args, job);
+ if (ret)
+ goto fail_job;
+
+ ret = panfrost_lookup_bos(dev, file, args, job);
+ if (ret)
+ goto fail_job;
+
+ ret = panfrost_job_push(job);
+ if (ret)
+ goto fail_job;
+
+ /* Update the return sync object for the job */
+ if (sync_out)
+ drm_syncobj_replace_fence(sync_out, job->render_done_fence);
+
+fail_job:
+ panfrost_job_put(job);
+fail_out_sync:
+ if (sync_out)
+ drm_syncobj_put(sync_out);
+
+ return ret;
+}
+
+static int
+panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ long ret;
+ struct drm_panfrost_wait_bo *args = data;
+ struct drm_gem_object *gem_obj;
+ unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
+
+ if (args->pad)
+ return -EINVAL;
+
+ gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!gem_obj)
+ return -ENOENT;
+
+ ret = reservation_object_wait_timeout_rcu(gem_obj->resv, true,
+ true, timeout);
+ if (!ret)
+ ret = timeout ? -ETIMEDOUT : -EBUSY;
+
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return ret;
+}
+
+static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_panfrost_mmap_bo *args = data;
+ struct drm_gem_object *gem_obj;
+ int ret;
+
+ if (args->flags != 0) {
+ DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
+ return -EINVAL;
+ }
+
+ gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!gem_obj) {
+ DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
+ return -ENOENT;
+ }
+
+ ret = drm_gem_create_mmap_offset(gem_obj);
+ if (ret == 0)
+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return ret;
+}
+
+static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_panfrost_get_bo_offset *args = data;
+ struct drm_gem_object *gem_obj;
+ struct panfrost_gem_object *bo;
+
+ gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!gem_obj) {
+ DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
+ return -ENOENT;
+ }
+ bo = to_panfrost_bo(gem_obj);
+
+ args->offset = bo->node.start << PAGE_SHIFT;
+
+ drm_gem_object_put_unlocked(gem_obj);
+ return 0;
+}
+
+static int
+panfrost_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_file_priv *panfrost_priv;
+
+ panfrost_priv = kzalloc(sizeof(*panfrost_priv), GFP_KERNEL);
+ if (!panfrost_priv)
+ return -ENOMEM;
+
+ panfrost_priv->pfdev = pfdev;
+ file->driver_priv = panfrost_priv;
+
+ return panfrost_job_open(panfrost_priv);
+}
+
+static void
+panfrost_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct panfrost_file_priv *panfrost_priv = file->driver_priv;
+
+ panfrost_job_close(panfrost_priv);
+
+ kfree(panfrost_priv);
+}
+
+/* DRM_AUTH is required on SUBMIT for now, while all clients share a single
+ * address space. Note that render nodes would be able to submit jobs that
+ * could access BOs from clients authenticated with the master node.
+ */
+static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
+#define PANFROST_IOCTL(n, func, flags) \
+ DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags)
+
+ PANFROST_IOCTL(SUBMIT, submit, DRM_RENDER_ALLOW | DRM_AUTH),
+ PANFROST_IOCTL(WAIT_BO, wait_bo, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(CREATE_BO, create_bo, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(MMAP_BO, mmap_bo, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(GET_PARAM, get_param, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(GET_BO_OFFSET, get_bo_offset, DRM_RENDER_ALLOW),
+};
+
+DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops);
+
+static struct drm_driver panfrost_drm_driver = {
+ .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_PRIME |
+ DRIVER_SYNCOBJ,
+ .open = panfrost_open,
+ .postclose = panfrost_postclose,
+ .ioctls = panfrost_drm_driver_ioctls,
+ .num_ioctls = ARRAY_SIZE(panfrost_drm_driver_ioctls),
+ .fops = &panfrost_drm_driver_fops,
+ .name = "panfrost",
+ .desc = "panfrost DRM",
+ .date = "20180908",
+ .major = 1,
+ .minor = 0,
+
+ .gem_create_object = panfrost_gem_create_object,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
+ .gem_prime_mmap = drm_gem_prime_mmap,
+};
+
+static int panfrost_probe(struct platform_device *pdev)
+{
+ struct panfrost_device *pfdev;
+ struct drm_device *ddev;
+ int err;
+
+ pfdev = devm_kzalloc(&pdev->dev, sizeof(*pfdev), GFP_KERNEL);
+ if (!pfdev)
+ return -ENOMEM;
+
+ pfdev->pdev = pdev;
+ pfdev->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, pfdev);
+
+ /* Allocate and initialze the DRM device. */
+ ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
+
+ ddev->dev_private = pfdev;
+ pfdev->ddev = ddev;
+
+ spin_lock_init(&pfdev->mm_lock);
+
+ /* 4G enough for now. can be 48-bit */
+ drm_mm_init(&pfdev->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
+
+ pm_runtime_use_autosuspend(pfdev->dev);
+ pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */
+ pm_runtime_enable(pfdev->dev);
+
+ err = panfrost_device_init(pfdev);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Fatal error during GPU init\n");
+ goto err_out0;
+ }
+
+ err = panfrost_devfreq_init(pfdev);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Fatal error during devfreq init\n");
+ goto err_out1;
+ }
+
+ /*
+ * Register the DRM device with the core and the connectors with
+ * sysfs
+ */
+ err = drm_dev_register(ddev, 0);
+ if (err < 0)
+ goto err_out1;
+
+ return 0;
+
+err_out1:
+ panfrost_device_fini(pfdev);
+err_out0:
+ pm_runtime_disable(pfdev->dev);
+ drm_dev_put(ddev);
+ return err;
+}
+
+static int panfrost_remove(struct platform_device *pdev)
+{
+ struct panfrost_device *pfdev = platform_get_drvdata(pdev);
+ struct drm_device *ddev = pfdev->ddev;
+
+ drm_dev_unregister(ddev);
+ pm_runtime_get_sync(pfdev->dev);
+ pm_runtime_put_sync_autosuspend(pfdev->dev);
+ pm_runtime_disable(pfdev->dev);
+ panfrost_device_fini(pfdev);
+ drm_dev_put(ddev);
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "arm,mali-t604" },
+ { .compatible = "arm,mali-t624" },
+ { .compatible = "arm,mali-t628" },
+ { .compatible = "arm,mali-t720" },
+ { .compatible = "arm,mali-t760" },
+ { .compatible = "arm,mali-t820" },
+ { .compatible = "arm,mali-t830" },
+ { .compatible = "arm,mali-t860" },
+ { .compatible = "arm,mali-t880" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static const struct dev_pm_ops panfrost_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(panfrost_device_suspend, panfrost_device_resume, NULL)
+};
+
+static struct platform_driver panfrost_driver = {
+ .probe = panfrost_probe,
+ .remove = panfrost_remove,
+ .driver = {
+ .name = "panfrost",
+ .pm = &panfrost_pm_ops,
+ .of_match_table = dt_match,
+ },
+};
+module_platform_driver(panfrost_driver);
+
+MODULE_AUTHOR("Panfrost Project Developers");
+MODULE_DESCRIPTION("Panfrost DRM Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panfrost/panfrost_features.h b/drivers/gpu/drm/panfrost/panfrost_features.h
new file mode 100644
index 000000000000..5056777c7744
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_features.h
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved. */
+/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
+#ifndef __PANFROST_FEATURES_H__
+#define __PANFROST_FEATURES_H__
+
+#include <linux/bitops.h>
+
+#include "panfrost_device.h"
+
+enum panfrost_hw_feature {
+ HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ HW_FEATURE_XAFFINITY,
+ HW_FEATURE_OUT_OF_ORDER_EXEC,
+ HW_FEATURE_MRT,
+ HW_FEATURE_BRNDOUT_CC,
+ HW_FEATURE_INTERPIPE_REG_ALIASING,
+ HW_FEATURE_LD_ST_TILEBUFFER,
+ HW_FEATURE_MSAA_16X,
+ HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ HW_FEATURE_OPTIMIZED_COVERAGE_MASK,
+ HW_FEATURE_T7XX_PAIRING_RULES,
+ HW_FEATURE_LD_ST_LEA_TEX,
+ HW_FEATURE_LINEAR_FILTER_FLOAT,
+ HW_FEATURE_WORKGROUP_ROUND_MULTIPLE_OF_4,
+ HW_FEATURE_IMAGES_IN_FRAGMENT_SHADERS,
+ HW_FEATURE_TEST4_DATUM_MODE,
+ HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ HW_FEATURE_BRNDOUT_KILL,
+ HW_FEATURE_WARPING,
+ HW_FEATURE_V4,
+ HW_FEATURE_FLUSH_REDUCTION,
+ HW_FEATURE_PROTECTED_MODE,
+ HW_FEATURE_COHERENCY_REG,
+ HW_FEATURE_PROTECTED_DEBUG_MODE,
+ HW_FEATURE_AARCH64_MMU,
+ HW_FEATURE_TLS_HASHING,
+ HW_FEATURE_THREAD_GROUP_SPLIT,
+ HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG,
+};
+
+#define hw_features_t600 (\
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_V4))
+
+#define hw_features_t620 (\
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_V4))
+
+#define hw_features_t720 (\
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_OPTIMIZED_COVERAGE_MASK) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_WORKGROUP_ROUND_MULTIPLE_OF_4) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_V4))
+
+
+#define hw_features_t760 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT))
+
+// T860
+#define hw_features_t860 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT))
+
+#define hw_features_t880 hw_features_t860
+
+#define hw_features_t830 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT))
+
+#define hw_features_t820 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT))
+
+#define hw_features_g71 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_COHERENCY_REG))
+
+#define hw_features_g72 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
+ BIT_ULL(HW_FEATURE_COHERENCY_REG))
+
+#define hw_features_g51 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
+ BIT_ULL(HW_FEATURE_COHERENCY_REG))
+
+#define hw_features_g52 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
+ BIT_ULL(HW_FEATURE_COHERENCY_REG))
+
+#define hw_features_g76 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
+ BIT_ULL(HW_FEATURE_COHERENCY_REG) | \
+ BIT_ULL(HW_FEATURE_AARCH64_MMU) | \
+ BIT_ULL(HW_FEATURE_TLS_HASHING) | \
+ BIT_ULL(HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG))
+
+#define hw_features_g31 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
+ BIT_ULL(HW_FEATURE_COHERENCY_REG) | \
+ BIT_ULL(HW_FEATURE_AARCH64_MMU) | \
+ BIT_ULL(HW_FEATURE_TLS_HASHING) | \
+ BIT_ULL(HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG))
+
+static inline bool panfrost_has_hw_feature(struct panfrost_device *pfdev,
+ enum panfrost_hw_feature feat)
+{
+ return test_bit(feat, pfdev->features.hw_features);
+}
+
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
new file mode 100644
index 000000000000..a5528a360ef4
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+
+#include <drm/panfrost_drm.h>
+#include "panfrost_device.h"
+#include "panfrost_gem.h"
+#include "panfrost_mmu.h"
+
+/* Called DRM core on the last userspace/kernel unreference of the
+ * BO.
+ */
+static void panfrost_gem_free_object(struct drm_gem_object *obj)
+{
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ struct panfrost_device *pfdev = obj->dev->dev_private;
+
+ panfrost_mmu_unmap(bo);
+
+ spin_lock(&pfdev->mm_lock);
+ drm_mm_remove_node(&bo->node);
+ spin_unlock(&pfdev->mm_lock);
+
+ drm_gem_shmem_free_object(obj);
+}
+
+static const struct drm_gem_object_funcs panfrost_gem_funcs = {
+ .free = panfrost_gem_free_object,
+ .print_info = drm_gem_shmem_print_info,
+ .pin = drm_gem_shmem_pin,
+ .unpin = drm_gem_shmem_unpin,
+ .get_sg_table = drm_gem_shmem_get_sg_table,
+ .vmap = drm_gem_shmem_vmap,
+ .vunmap = drm_gem_shmem_vunmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+};
+
+/**
+ * panfrost_gem_create_object - Implementation of driver->gem_create_object.
+ * @dev: DRM device
+ * @size: Size in bytes of the memory the object will reference
+ *
+ * This lets the GEM helpers allocate object structs for us, and keep
+ * our BO stats correct.
+ */
+struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
+{
+ int ret;
+ struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_gem_object *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return NULL;
+
+ obj->base.base.funcs = &panfrost_gem_funcs;
+
+ spin_lock(&pfdev->mm_lock);
+ ret = drm_mm_insert_node(&pfdev->mm, &obj->node,
+ roundup(size, PAGE_SIZE) >> PAGE_SHIFT);
+ spin_unlock(&pfdev->mm_lock);
+ if (ret)
+ goto free_obj;
+
+ return &obj->base.base;
+
+free_obj:
+ kfree(obj);
+ return ERR_PTR(ret);
+}
+
+struct drm_gem_object *
+panfrost_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt)
+{
+ struct drm_gem_object *obj;
+ struct panfrost_gem_object *pobj;
+
+ obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ pobj = to_panfrost_bo(obj);
+
+ obj->resv = attach->dmabuf->resv;
+
+ panfrost_mmu_map(pobj);
+
+ return obj;
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
new file mode 100644
index 000000000000..045000eb5fcf
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+
+#ifndef __PANFROST_GEM_H__
+#define __PANFROST_GEM_H__
+
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_mm.h>
+
+struct panfrost_gem_object {
+ struct drm_gem_shmem_object base;
+
+ struct drm_mm_node node;
+};
+
+static inline
+struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
+{
+ return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
+}
+
+struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
+
+struct drm_gem_object *
+panfrost_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+
+#endif /* __PANFROST_GEM_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
new file mode 100644
index 000000000000..58ef25573cda
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
+/* Copyright 2019 Collabora ltd. */
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/platform_device.h>
+
+#include "panfrost_device.h"
+#include "panfrost_features.h"
+#include "panfrost_issues.h"
+#include "panfrost_gpu.h"
+#include "panfrost_regs.h"
+
+#define gpu_write(dev, reg, data) writel(data, dev->iomem + reg)
+#define gpu_read(dev, reg) readl(dev->iomem + reg)
+
+static irqreturn_t panfrost_gpu_irq_handler(int irq, void *data)
+{
+ struct panfrost_device *pfdev = data;
+ u32 state = gpu_read(pfdev, GPU_INT_STAT);
+ u32 fault_status = gpu_read(pfdev, GPU_FAULT_STATUS);
+
+ if (!state)
+ return IRQ_NONE;
+
+ if (state & GPU_IRQ_MASK_ERROR) {
+ u64 address = (u64) gpu_read(pfdev, GPU_FAULT_ADDRESS_HI) << 32;
+ address |= gpu_read(pfdev, GPU_FAULT_ADDRESS_LO);
+
+ dev_warn(pfdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
+ fault_status & 0xFF, panfrost_exception_name(pfdev, fault_status),
+ address);
+
+ if (state & GPU_IRQ_MULTIPLE_FAULT)
+ dev_warn(pfdev->dev, "There were multiple GPU faults - some have not been reported\n");
+
+ gpu_write(pfdev, GPU_INT_MASK, 0);
+ }
+
+ gpu_write(pfdev, GPU_INT_CLEAR, state);
+
+ return IRQ_HANDLED;
+}
+
+int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
+{
+ int ret;
+ u32 val;
+
+ gpu_write(pfdev, GPU_INT_MASK, 0);
+ gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED);
+ gpu_write(pfdev, GPU_CMD, GPU_CMD_SOFT_RESET);
+
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT,
+ val, val & GPU_IRQ_RESET_COMPLETED, 100, 10000);
+
+ if (ret) {
+ dev_err(pfdev->dev, "gpu soft reset timed out\n");
+ return ret;
+ }
+
+ gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_MASK_ALL);
+ gpu_write(pfdev, GPU_INT_MASK, GPU_IRQ_MASK_ALL);
+
+ return 0;
+}
+
+static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
+{
+ u32 quirks = 0;
+
+ if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8443) ||
+ panfrost_has_hw_issue(pfdev, HW_ISSUE_11035))
+ quirks |= SC_LS_PAUSEBUFFER_DISABLE;
+
+ if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10327))
+ quirks |= SC_SDC_DISABLE_OQ_DISCARD;
+
+ if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10797))
+ quirks |= SC_ENABLE_TEXGRD_FLAGS;
+
+ if (!panfrost_has_hw_issue(pfdev, GPUCORE_1619)) {
+ if (panfrost_model_cmp(pfdev, 0x750) < 0) /* T60x, T62x, T72x */
+ quirks |= SC_LS_ATTR_CHECK_DISABLE;
+ else if (panfrost_model_cmp(pfdev, 0x880) <= 0) /* T76x, T8xx */
+ quirks |= SC_LS_ALLOW_ATTR_TYPES;
+ }
+
+ if (panfrost_has_hw_feature(pfdev, HW_FEATURE_TLS_HASHING))
+ quirks |= SC_TLS_HASH_ENABLE;
+
+ if (quirks)
+ gpu_write(pfdev, GPU_SHADER_CONFIG, quirks);
+
+
+ quirks = gpu_read(pfdev, GPU_TILER_CONFIG);
+
+ /* Set tiler clock gate override if required */
+ if (panfrost_has_hw_issue(pfdev, HW_ISSUE_T76X_3953))
+ quirks |= TC_CLOCK_GATE_OVERRIDE;
+
+ gpu_write(pfdev, GPU_TILER_CONFIG, quirks);
+
+
+ quirks = gpu_read(pfdev, GPU_L2_MMU_CONFIG);
+
+ /* Limit read & write ID width for AXI */
+ if (panfrost_has_hw_feature(pfdev, HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG))
+ quirks &= ~(L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS |
+ L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES);
+ else
+ quirks &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_READS |
+ L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES);
+
+ gpu_write(pfdev, GPU_L2_MMU_CONFIG, quirks);
+
+ quirks = 0;
+ if ((panfrost_model_eq(pfdev, 0x860) || panfrost_model_eq(pfdev, 0x880)) &&
+ pfdev->features.revision >= 0x2000)
+ quirks |= JM_MAX_JOB_THROTTLE_LIMIT << JM_JOB_THROTTLE_LIMIT_SHIFT;
+ else if (panfrost_model_eq(pfdev, 0x6000) &&
+ pfdev->features.coherency_features == COHERENCY_ACE)
+ quirks |= (COHERENCY_ACE_LITE | COHERENCY_ACE) <<
+ JM_FORCE_COHERENCY_FEATURES_SHIFT;
+
+ if (quirks)
+ gpu_write(pfdev, GPU_JM_CONFIG, quirks);
+}
+
+#define MAX_HW_REVS 6
+
+struct panfrost_model {
+ const char *name;
+ u32 id;
+ u32 id_mask;
+ u64 features;
+ u64 issues;
+ struct {
+ u32 revision;
+ u64 issues;
+ } revs[MAX_HW_REVS];
+};
+
+#define GPU_MODEL(_name, _id, ...) \
+{\
+ .name = __stringify(_name), \
+ .id = _id, \
+ .features = hw_features_##_name, \
+ .issues = hw_issues_##_name, \
+ .revs = { __VA_ARGS__ }, \
+}
+
+#define GPU_REV_EXT(name, _rev, _p, _s, stat) \
+{\
+ .revision = (_rev) << 12 | (_p) << 4 | (_s), \
+ .issues = hw_issues_##name##_r##_rev##p##_p##stat, \
+}
+#define GPU_REV(name, r, p) GPU_REV_EXT(name, r, p, 0, )
+
+static const struct panfrost_model gpu_models[] = {
+ /* T60x has an oddball version */
+ GPU_MODEL(t600, 0x600,
+ GPU_REV_EXT(t600, 0, 0, 1, _15dev0)),
+ GPU_MODEL(t620, 0x620,
+ GPU_REV(t620, 0, 1), GPU_REV(t620, 1, 0)),
+ GPU_MODEL(t720, 0x720),
+ GPU_MODEL(t760, 0x750,
+ GPU_REV(t760, 0, 0), GPU_REV(t760, 0, 1),
+ GPU_REV_EXT(t760, 0, 1, 0, _50rel0),
+ GPU_REV(t760, 0, 2), GPU_REV(t760, 0, 3)),
+ GPU_MODEL(t820, 0x820),
+ GPU_MODEL(t830, 0x830),
+ GPU_MODEL(t860, 0x860),
+ GPU_MODEL(t880, 0x880),
+
+ GPU_MODEL(g71, 0x6000,
+ GPU_REV_EXT(g71, 0, 0, 1, _05dev0)),
+ GPU_MODEL(g72, 0x6001),
+ GPU_MODEL(g51, 0x7000),
+ GPU_MODEL(g76, 0x7001),
+ GPU_MODEL(g52, 0x7002),
+ GPU_MODEL(g31, 0x7003,
+ GPU_REV(g31, 1, 0)),
+};
+
+static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
+{
+ u32 gpu_id, num_js, major, minor, status, rev;
+ const char *name = "unknown";
+ u64 hw_feat = 0;
+ u64 hw_issues = hw_issues_all;
+ const struct panfrost_model *model;
+ int i;
+
+ pfdev->features.l2_features = gpu_read(pfdev, GPU_L2_FEATURES);
+ pfdev->features.core_features = gpu_read(pfdev, GPU_CORE_FEATURES);
+ pfdev->features.tiler_features = gpu_read(pfdev, GPU_TILER_FEATURES);
+ pfdev->features.mem_features = gpu_read(pfdev, GPU_MEM_FEATURES);
+ pfdev->features.mmu_features = gpu_read(pfdev, GPU_MMU_FEATURES);
+ pfdev->features.thread_features = gpu_read(pfdev, GPU_THREAD_FEATURES);
+ pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES);
+ for (i = 0; i < 4; i++)
+ pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i));
+
+ pfdev->features.as_present = gpu_read(pfdev, GPU_AS_PRESENT);
+
+ pfdev->features.js_present = gpu_read(pfdev, GPU_JS_PRESENT);
+ num_js = hweight32(pfdev->features.js_present);
+ for (i = 0; i < num_js; i++)
+ pfdev->features.js_features[i] = gpu_read(pfdev, GPU_JS_FEATURES(i));
+
+ pfdev->features.shader_present = gpu_read(pfdev, GPU_SHADER_PRESENT_LO);
+ pfdev->features.shader_present |= (u64)gpu_read(pfdev, GPU_SHADER_PRESENT_HI) << 32;
+
+ pfdev->features.tiler_present = gpu_read(pfdev, GPU_TILER_PRESENT_LO);
+ pfdev->features.tiler_present |= (u64)gpu_read(pfdev, GPU_TILER_PRESENT_HI) << 32;
+
+ pfdev->features.l2_present = gpu_read(pfdev, GPU_L2_PRESENT_LO);
+ pfdev->features.l2_present |= (u64)gpu_read(pfdev, GPU_L2_PRESENT_HI) << 32;
+ pfdev->features.nr_core_groups = hweight64(pfdev->features.l2_present);
+
+ pfdev->features.stack_present = gpu_read(pfdev, GPU_STACK_PRESENT_LO);
+ pfdev->features.stack_present |= (u64)gpu_read(pfdev, GPU_STACK_PRESENT_HI) << 32;
+
+ gpu_id = gpu_read(pfdev, GPU_ID);
+ pfdev->features.revision = gpu_id & 0xffff;
+ pfdev->features.id = gpu_id >> 16;
+
+ /* The T60x has an oddball ID value. Fix it up to the standard Midgard
+ * format so we (and userspace) don't have to special case it.
+ */
+ if (pfdev->features.id == 0x6956)
+ pfdev->features.id = 0x0600;
+
+ major = (pfdev->features.revision >> 12) & 0xf;
+ minor = (pfdev->features.revision >> 4) & 0xff;
+ status = pfdev->features.revision & 0xf;
+ rev = pfdev->features.revision;
+
+ gpu_id = pfdev->features.id;
+
+ for (model = gpu_models; model->name; model++) {
+ int best = -1;
+
+ if (!panfrost_model_eq(pfdev, model->id))
+ continue;
+
+ name = model->name;
+ hw_feat = model->features;
+ hw_issues |= model->issues;
+ for (i = 0; i < MAX_HW_REVS; i++) {
+ if (model->revs[i].revision == rev) {
+ best = i;
+ break;
+ } else if (model->revs[i].revision == (rev & ~0xf))
+ best = i;
+ }
+
+ if (best >= 0)
+ hw_issues |= model->revs[best].issues;
+
+ break;
+ }
+
+ bitmap_from_u64(pfdev->features.hw_features, hw_feat);
+ bitmap_from_u64(pfdev->features.hw_issues, hw_issues);
+
+ dev_info(pfdev->dev, "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x",
+ name, gpu_id, major, minor, status);
+ dev_info(pfdev->dev, "features: %64pb, issues: %64pb",
+ pfdev->features.hw_features,
+ pfdev->features.hw_issues);
+
+ dev_info(pfdev->dev, "Features: L2:0x%08x Shader:0x%08x Tiler:0x%08x Mem:0x%0x MMU:0x%08x AS:0x%x JS:0x%x",
+ pfdev->features.l2_features,
+ pfdev->features.core_features,
+ pfdev->features.tiler_features,
+ pfdev->features.mem_features,
+ pfdev->features.mmu_features,
+ pfdev->features.as_present,
+ pfdev->features.js_present);
+
+ dev_info(pfdev->dev, "shader_present=0x%0llx l2_present=0x%0llx",
+ pfdev->features.shader_present, pfdev->features.l2_present);
+}
+
+void panfrost_gpu_power_on(struct panfrost_device *pfdev)
+{
+ int ret;
+ u32 val;
+
+ /* Just turn on everything for now */
+ gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present);
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
+ val, val == pfdev->features.l2_present, 100, 1000);
+
+ gpu_write(pfdev, STACK_PWRON_LO, pfdev->features.stack_present);
+ ret |= readl_relaxed_poll_timeout(pfdev->iomem + STACK_READY_LO,
+ val, val == pfdev->features.stack_present, 100, 1000);
+
+ gpu_write(pfdev, SHADER_PWRON_LO, pfdev->features.shader_present);
+ ret |= readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
+ val, val == pfdev->features.shader_present, 100, 1000);
+
+ gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present);
+ ret |= readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO,
+ val, val == pfdev->features.tiler_present, 100, 1000);
+
+ if (ret)
+ dev_err(pfdev->dev, "error powering up gpu");
+}
+
+void panfrost_gpu_power_off(struct panfrost_device *pfdev)
+{
+ gpu_write(pfdev, TILER_PWROFF_LO, 0);
+ gpu_write(pfdev, SHADER_PWROFF_LO, 0);
+ gpu_write(pfdev, STACK_PWROFF_LO, 0);
+ gpu_write(pfdev, L2_PWROFF_LO, 0);
+}
+
+int panfrost_gpu_init(struct panfrost_device *pfdev)
+{
+ int err, irq;
+
+ err = panfrost_gpu_soft_reset(pfdev);
+ if (err)
+ return err;
+
+ panfrost_gpu_init_features(pfdev);
+
+ dma_set_mask_and_coherent(pfdev->dev,
+ DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features)));
+
+ irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu");
+ if (irq <= 0)
+ return -ENODEV;
+
+ err = devm_request_irq(pfdev->dev, irq, panfrost_gpu_irq_handler,
+ IRQF_SHARED, "gpu", pfdev);
+ if (err) {
+ dev_err(pfdev->dev, "failed to request gpu irq");
+ return err;
+ }
+
+ panfrost_gpu_init_quirks(pfdev);
+ panfrost_gpu_power_on(pfdev);
+
+ return 0;
+}
+
+void panfrost_gpu_fini(struct panfrost_device *pfdev)
+{
+ panfrost_gpu_power_off(pfdev);
+}
+
+u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev)
+{
+ if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
+ return gpu_read(pfdev, GPU_LATEST_FLUSH_ID);
+ return 0;
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h
new file mode 100644
index 000000000000..4112412087b2
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Collabora ltd. */
+
+#ifndef __PANFROST_GPU_H__
+#define __PANFROST_GPU_H__
+
+struct panfrost_device;
+
+int panfrost_gpu_init(struct panfrost_device *pfdev);
+void panfrost_gpu_fini(struct panfrost_device *pfdev);
+
+u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev);
+
+int panfrost_gpu_soft_reset(struct panfrost_device *pfdev);
+void panfrost_gpu_power_on(struct panfrost_device *pfdev);
+void panfrost_gpu_power_off(struct panfrost_device *pfdev);
+
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_issues.h b/drivers/gpu/drm/panfrost/panfrost_issues.h
new file mode 100644
index 000000000000..cec6dcdadb5c
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_issues.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved. */
+/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
+#ifndef __PANFROST_ISSUES_H__
+#define __PANFROST_ISSUES_H__
+
+#include <linux/bitops.h>
+
+#include "panfrost_device.h"
+
+/*
+ * This is not a complete list of issues, but only the ones the driver needs
+ * to care about.
+ */
+enum panfrost_hw_issue {
+ HW_ISSUE_6367,
+ HW_ISSUE_6787,
+ HW_ISSUE_8186,
+ HW_ISSUE_8245,
+ HW_ISSUE_8316,
+ HW_ISSUE_8394,
+ HW_ISSUE_8401,
+ HW_ISSUE_8408,
+ HW_ISSUE_8443,
+ HW_ISSUE_8987,
+ HW_ISSUE_9435,
+ HW_ISSUE_9510,
+ HW_ISSUE_9630,
+ HW_ISSUE_10327,
+ HW_ISSUE_10649,
+ HW_ISSUE_10676,
+ HW_ISSUE_10797,
+ HW_ISSUE_10817,
+ HW_ISSUE_10883,
+ HW_ISSUE_10959,
+ HW_ISSUE_10969,
+ HW_ISSUE_11020,
+ HW_ISSUE_11024,
+ HW_ISSUE_11035,
+ HW_ISSUE_11056,
+ HW_ISSUE_T76X_3542,
+ HW_ISSUE_T76X_3953,
+ HW_ISSUE_TMIX_8463,
+ GPUCORE_1619,
+ HW_ISSUE_TMIX_8438,
+ HW_ISSUE_TGOX_R1_1234,
+ HW_ISSUE_END
+};
+
+#define hw_issues_all (\
+ BIT_ULL(HW_ISSUE_9435))
+
+#define hw_issues_t600 (\
+ BIT_ULL(HW_ISSUE_6367) | \
+ BIT_ULL(HW_ISSUE_6787) | \
+ BIT_ULL(HW_ISSUE_8408) | \
+ BIT_ULL(HW_ISSUE_9510) | \
+ BIT_ULL(HW_ISSUE_10649) | \
+ BIT_ULL(HW_ISSUE_10676) | \
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_11020) | \
+ BIT_ULL(HW_ISSUE_11035) | \
+ BIT_ULL(HW_ISSUE_11056) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_t600_r0p0_15dev0 (\
+ BIT_ULL(HW_ISSUE_8186) | \
+ BIT_ULL(HW_ISSUE_8245) | \
+ BIT_ULL(HW_ISSUE_8316) | \
+ BIT_ULL(HW_ISSUE_8394) | \
+ BIT_ULL(HW_ISSUE_8401) | \
+ BIT_ULL(HW_ISSUE_8443) | \
+ BIT_ULL(HW_ISSUE_8987) | \
+ BIT_ULL(HW_ISSUE_9630) | \
+ BIT_ULL(HW_ISSUE_10969) | \
+ BIT_ULL(GPUCORE_1619))
+
+#define hw_issues_t620 (\
+ BIT_ULL(HW_ISSUE_10649) | \
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_10959) | \
+ BIT_ULL(HW_ISSUE_11056) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_t620_r0p1 (\
+ BIT_ULL(HW_ISSUE_10327) | \
+ BIT_ULL(HW_ISSUE_10676) | \
+ BIT_ULL(HW_ISSUE_10817) | \
+ BIT_ULL(HW_ISSUE_11020) | \
+ BIT_ULL(HW_ISSUE_11024) | \
+ BIT_ULL(HW_ISSUE_11035))
+
+#define hw_issues_t620_r1p0 (\
+ BIT_ULL(HW_ISSUE_11020) | \
+ BIT_ULL(HW_ISSUE_11024))
+
+#define hw_issues_t720 (\
+ BIT_ULL(HW_ISSUE_10649) | \
+ BIT_ULL(HW_ISSUE_10797) | \
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_11056) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_t760 (\
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_T76X_3953) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_t760_r0p0 (\
+ BIT_ULL(HW_ISSUE_11020) | \
+ BIT_ULL(HW_ISSUE_11024) | \
+ BIT_ULL(HW_ISSUE_T76X_3542))
+
+#define hw_issues_t760_r0p1 (\
+ BIT_ULL(HW_ISSUE_11020) | \
+ BIT_ULL(HW_ISSUE_11024) | \
+ BIT_ULL(HW_ISSUE_T76X_3542))
+
+#define hw_issues_t760_r0p1_50rel0 (\
+ BIT_ULL(HW_ISSUE_T76X_3542))
+
+#define hw_issues_t760_r0p2 (\
+ BIT_ULL(HW_ISSUE_11020) | \
+ BIT_ULL(HW_ISSUE_11024) | \
+ BIT_ULL(HW_ISSUE_T76X_3542))
+
+#define hw_issues_t760_r0p3 (\
+ BIT_ULL(HW_ISSUE_T76X_3542))
+
+#define hw_issues_t820 (\
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_T76X_3953) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_t830 (\
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_T76X_3953) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_t860 (\
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_T76X_3953) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_t880 (\
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_T76X_3953) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_g31 0
+
+#define hw_issues_g31_r1p0 (\
+ BIT_ULL(HW_ISSUE_TGOX_R1_1234))
+
+#define hw_issues_g51 0
+
+#define hw_issues_g52 0
+
+#define hw_issues_g71 (\
+ BIT_ULL(HW_ISSUE_TMIX_8463) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_g71_r0p0_05dev0 (\
+ BIT_ULL(HW_ISSUE_T76X_3953))
+
+#define hw_issues_g72 0
+
+#define hw_issues_g76 0
+
+static inline bool panfrost_has_hw_issue(struct panfrost_device *pfdev,
+ enum panfrost_hw_issue issue)
+{
+ return test_bit(issue, pfdev->features.hw_issues);
+}
+
+#endif /* __PANFROST_ISSUES_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
new file mode 100644
index 000000000000..a5716c8fe8b3
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/* Copyright 2019 Collabora ltd. */
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reservation.h>
+#include <drm/gpu_scheduler.h>
+#include <drm/panfrost_drm.h>
+
+#include "panfrost_device.h"
+#include "panfrost_devfreq.h"
+#include "panfrost_job.h"
+#include "panfrost_features.h"
+#include "panfrost_issues.h"
+#include "panfrost_gem.h"
+#include "panfrost_regs.h"
+#include "panfrost_gpu.h"
+#include "panfrost_mmu.h"
+
+#define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
+#define job_read(dev, reg) readl(dev->iomem + (reg))
+
+struct panfrost_queue_state {
+ struct drm_gpu_scheduler sched;
+
+ u64 fence_context;
+ u64 emit_seqno;
+};
+
+struct panfrost_job_slot {
+ struct panfrost_queue_state queue[NUM_JOB_SLOTS];
+ spinlock_t job_lock;
+};
+
+static struct panfrost_job *
+to_panfrost_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct panfrost_job, base);
+}
+
+struct panfrost_fence {
+ struct dma_fence base;
+ struct drm_device *dev;
+ /* panfrost seqno for signaled() test */
+ u64 seqno;
+ int queue;
+};
+
+static inline struct panfrost_fence *
+to_panfrost_fence(struct dma_fence *fence)
+{
+ return (struct panfrost_fence *)fence;
+}
+
+static const char *panfrost_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "panfrost";
+}
+
+static const char *panfrost_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct panfrost_fence *f = to_panfrost_fence(fence);
+
+ switch (f->queue) {
+ case 0:
+ return "panfrost-js-0";
+ case 1:
+ return "panfrost-js-1";
+ case 2:
+ return "panfrost-js-2";
+ default:
+ return NULL;
+ }
+}
+
+static const struct dma_fence_ops panfrost_fence_ops = {
+ .get_driver_name = panfrost_fence_get_driver_name,
+ .get_timeline_name = panfrost_fence_get_timeline_name,
+};
+
+static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, int js_num)
+{
+ struct panfrost_fence *fence;
+ struct panfrost_job_slot *js = pfdev->js;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return ERR_PTR(-ENOMEM);
+
+ fence->dev = pfdev->ddev;
+ fence->queue = js_num;
+ fence->seqno = ++js->queue[js_num].emit_seqno;
+ dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock,
+ js->queue[js_num].fence_context, fence->seqno);
+
+ return &fence->base;
+}
+
+static int panfrost_job_get_slot(struct panfrost_job *job)
+{
+ /* JS0: fragment jobs.
+ * JS1: vertex/tiler jobs
+ * JS2: compute jobs
+ */
+ if (job->requirements & PANFROST_JD_REQ_FS)
+ return 0;
+
+/* Not exposed to userspace yet */
+#if 0
+ if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) {
+ if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) &&
+ (job->pfdev->features.nr_core_groups == 2))
+ return 2;
+ if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987))
+ return 2;
+ }
+#endif
+ return 1;
+}
+
+static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
+ u32 requirements,
+ int js)
+{
+ u64 affinity;
+
+ /*
+ * Use all cores for now.
+ * Eventually we may need to support tiler only jobs and h/w with
+ * multiple (2) coherent core groups
+ */
+ affinity = pfdev->features.shader_present;
+
+ job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0xFFFFFFFF);
+ job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32);
+}
+
+static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
+{
+ struct panfrost_device *pfdev = job->pfdev;
+ unsigned long flags;
+ u32 cfg;
+ u64 jc_head = job->jc;
+ int ret;
+
+ ret = pm_runtime_get_sync(pfdev->dev);
+ if (ret < 0)
+ return;
+
+ if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js))))
+ goto end;
+
+ panfrost_devfreq_record_transition(pfdev, js);
+ spin_lock_irqsave(&pfdev->hwaccess_lock, flags);
+
+ job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
+ job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
+
+ panfrost_job_write_affinity(pfdev, job->requirements, js);
+
+ /* start MMU, medium priority, cache clean/flush on end, clean/flush on
+ * start */
+ /* TODO: different address spaces */
+ cfg = JS_CONFIG_THREAD_PRI(8) |
+ JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE |
+ JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
+
+ if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
+ cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
+
+ if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10649))
+ cfg |= JS_CONFIG_START_MMU;
+
+ job_write(pfdev, JS_CONFIG_NEXT(js), cfg);
+
+ if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
+ job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id);
+
+ /* GO ! */
+ dev_dbg(pfdev->dev, "JS: Submitting atom %p to js[%d] with head=0x%llx",
+ job, js, jc_head);
+
+ job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
+
+ spin_unlock_irqrestore(&pfdev->hwaccess_lock, flags);
+
+end:
+ pm_runtime_mark_last_busy(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->dev);
+}
+
+static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
+ int bo_count,
+ struct dma_fence **implicit_fences)
+{
+ int i;
+
+ for (i = 0; i < bo_count; i++)
+ implicit_fences[i] = reservation_object_get_excl_rcu(bos[i]->resv);
+}
+
+static void panfrost_attach_object_fences(struct drm_gem_object **bos,
+ int bo_count,
+ struct dma_fence *fence)
+{
+ int i;
+
+ for (i = 0; i < bo_count; i++)
+ reservation_object_add_excl_fence(bos[i]->resv, fence);
+}
+
+int panfrost_job_push(struct panfrost_job *job)
+{
+ struct panfrost_device *pfdev = job->pfdev;
+ int slot = panfrost_job_get_slot(job);
+ struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot];
+ struct ww_acquire_ctx acquire_ctx;
+ int ret = 0;
+
+ mutex_lock(&pfdev->sched_lock);
+
+ ret = drm_gem_lock_reservations(job->bos, job->bo_count,
+ &acquire_ctx);
+ if (ret) {
+ mutex_unlock(&pfdev->sched_lock);
+ return ret;
+ }
+
+ ret = drm_sched_job_init(&job->base, entity, NULL);
+ if (ret) {
+ mutex_unlock(&pfdev->sched_lock);
+ goto unlock;
+ }
+
+ job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
+
+ kref_get(&job->refcount); /* put by scheduler job completion */
+
+ panfrost_acquire_object_fences(job->bos, job->bo_count,
+ job->implicit_fences);
+
+ drm_sched_entity_push_job(&job->base, entity);
+
+ mutex_unlock(&pfdev->sched_lock);
+
+ panfrost_attach_object_fences(job->bos, job->bo_count,
+ job->render_done_fence);
+
+unlock:
+ drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx);
+
+ return ret;
+}
+
+static void panfrost_job_cleanup(struct kref *ref)
+{
+ struct panfrost_job *job = container_of(ref, struct panfrost_job,
+ refcount);
+ unsigned int i;
+
+ if (job->in_fences) {
+ for (i = 0; i < job->in_fence_count; i++)
+ dma_fence_put(job->in_fences[i]);
+ kvfree(job->in_fences);
+ }
+ if (job->implicit_fences) {
+ for (i = 0; i < job->bo_count; i++)
+ dma_fence_put(job->implicit_fences[i]);
+ kvfree(job->implicit_fences);
+ }
+ dma_fence_put(job->done_fence);
+ dma_fence_put(job->render_done_fence);
+
+ if (job->bos) {
+ for (i = 0; i < job->bo_count; i++)
+ drm_gem_object_put_unlocked(job->bos[i]);
+ kvfree(job->bos);
+ }
+
+ kfree(job);
+}
+
+void panfrost_job_put(struct panfrost_job *job)
+{
+ kref_put(&job->refcount, panfrost_job_cleanup);
+}
+
+static void panfrost_job_free(struct drm_sched_job *sched_job)
+{
+ struct panfrost_job *job = to_panfrost_job(sched_job);
+
+ drm_sched_job_cleanup(sched_job);
+
+ panfrost_job_put(job);
+}
+
+static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity)
+{
+ struct panfrost_job *job = to_panfrost_job(sched_job);
+ struct dma_fence *fence;
+ unsigned int i;
+
+ /* Explicit fences */
+ for (i = 0; i < job->in_fence_count; i++) {
+ if (job->in_fences[i]) {
+ fence = job->in_fences[i];
+ job->in_fences[i] = NULL;
+ return fence;
+ }
+ }
+
+ /* Implicit fences, max. one per BO */
+ for (i = 0; i < job->bo_count; i++) {
+ if (job->implicit_fences[i]) {
+ fence = job->implicit_fences[i];
+ job->implicit_fences[i] = NULL;
+ return fence;
+ }
+ }
+
+ return NULL;
+}
+
+static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
+{
+ struct panfrost_job *job = to_panfrost_job(sched_job);
+ struct panfrost_device *pfdev = job->pfdev;
+ int slot = panfrost_job_get_slot(job);
+ struct dma_fence *fence = NULL;
+
+ if (unlikely(job->base.s_fence->finished.error))
+ return NULL;
+
+ pfdev->jobs[slot] = job;
+
+ fence = panfrost_fence_create(pfdev, slot);
+ if (IS_ERR(fence))
+ return NULL;
+
+ if (job->done_fence)
+ dma_fence_put(job->done_fence);
+ job->done_fence = dma_fence_get(fence);
+
+ panfrost_job_hw_submit(job, slot);
+
+ return fence;
+}
+
+void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
+{
+ int j;
+ u32 irq_mask = 0;
+
+ for (j = 0; j < NUM_JOB_SLOTS; j++) {
+ irq_mask |= MK_JS_MASK(j);
+ }
+
+ job_write(pfdev, JOB_INT_CLEAR, irq_mask);
+ job_write(pfdev, JOB_INT_MASK, irq_mask);
+}
+
+static void panfrost_job_timedout(struct drm_sched_job *sched_job)
+{
+ struct panfrost_job *job = to_panfrost_job(sched_job);
+ struct panfrost_device *pfdev = job->pfdev;
+ int js = panfrost_job_get_slot(job);
+ int i;
+
+ /*
+ * If the GPU managed to complete this jobs fence, the timeout is
+ * spurious. Bail out.
+ */
+ if (dma_fence_is_signaled(job->done_fence))
+ return;
+
+ dev_err(pfdev->dev, "gpu sched timeout, js=%d, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
+ js,
+ job_read(pfdev, JS_STATUS(js)),
+ job_read(pfdev, JS_HEAD_LO(js)),
+ job_read(pfdev, JS_TAIL_LO(js)),
+ sched_job);
+
+ mutex_lock(&pfdev->reset_lock);
+
+ for (i = 0; i < NUM_JOB_SLOTS; i++)
+ drm_sched_stop(&pfdev->js->queue[i].sched);
+
+ if (sched_job)
+ drm_sched_increase_karma(sched_job);
+
+ /* panfrost_core_dump(pfdev); */
+
+ panfrost_devfreq_record_transition(pfdev, js);
+ panfrost_gpu_soft_reset(pfdev);
+
+ /* TODO: Re-enable all other address spaces */
+ panfrost_mmu_enable(pfdev, 0);
+ panfrost_gpu_power_on(pfdev);
+ panfrost_job_enable_interrupts(pfdev);
+
+ for (i = 0; i < NUM_JOB_SLOTS; i++)
+ drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
+
+ /* restart scheduler after GPU is usable again */
+ for (i = 0; i < NUM_JOB_SLOTS; i++)
+ drm_sched_start(&pfdev->js->queue[i].sched, true);
+
+ mutex_unlock(&pfdev->reset_lock);
+}
+
+static const struct drm_sched_backend_ops panfrost_sched_ops = {
+ .dependency = panfrost_job_dependency,
+ .run_job = panfrost_job_run,
+ .timedout_job = panfrost_job_timedout,
+ .free_job = panfrost_job_free
+};
+
+static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
+{
+ struct panfrost_device *pfdev = data;
+ u32 status = job_read(pfdev, JOB_INT_STAT);
+ int j;
+
+ dev_dbg(pfdev->dev, "jobslot irq status=%x\n", status);
+
+ if (!status)
+ return IRQ_NONE;
+
+ pm_runtime_mark_last_busy(pfdev->dev);
+
+ for (j = 0; status; j++) {
+ u32 mask = MK_JS_MASK(j);
+
+ if (!(status & mask))
+ continue;
+
+ job_write(pfdev, JOB_INT_CLEAR, mask);
+
+ if (status & JOB_INT_MASK_ERR(j)) {
+ job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP);
+
+ dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
+ j,
+ panfrost_exception_name(pfdev, job_read(pfdev, JS_STATUS(j))),
+ job_read(pfdev, JS_HEAD_LO(j)),
+ job_read(pfdev, JS_TAIL_LO(j)));
+
+ drm_sched_fault(&pfdev->js->queue[j].sched);
+ }
+
+ if (status & JOB_INT_MASK_DONE(j)) {
+ panfrost_devfreq_record_transition(pfdev, j);
+ dma_fence_signal(pfdev->jobs[j]->done_fence);
+ }
+
+ status &= ~mask;
+ }
+
+ return IRQ_HANDLED;
+}
+
+int panfrost_job_init(struct panfrost_device *pfdev)
+{
+ struct panfrost_job_slot *js;
+ int ret, j, irq;
+
+ pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
+ if (!js)
+ return -ENOMEM;
+
+ spin_lock_init(&js->job_lock);
+
+ irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job");
+ if (irq <= 0)
+ return -ENODEV;
+
+ ret = devm_request_irq(pfdev->dev, irq, panfrost_job_irq_handler,
+ IRQF_SHARED, "job", pfdev);
+ if (ret) {
+ dev_err(pfdev->dev, "failed to request job irq");
+ return ret;
+ }
+
+ for (j = 0; j < NUM_JOB_SLOTS; j++) {
+ js->queue[j].fence_context = dma_fence_context_alloc(1);
+
+ ret = drm_sched_init(&js->queue[j].sched,
+ &panfrost_sched_ops,
+ 1, 0, msecs_to_jiffies(500),
+ "pan_js");
+ if (ret) {
+ dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
+ goto err_sched;
+ }
+ }
+
+ panfrost_job_enable_interrupts(pfdev);
+
+ return 0;
+
+err_sched:
+ for (j--; j >= 0; j--)
+ drm_sched_fini(&js->queue[j].sched);
+
+ return ret;
+}
+
+void panfrost_job_fini(struct panfrost_device *pfdev)
+{
+ struct panfrost_job_slot *js = pfdev->js;
+ int j;
+
+ job_write(pfdev, JOB_INT_MASK, 0);
+
+ for (j = 0; j < NUM_JOB_SLOTS; j++)
+ drm_sched_fini(&js->queue[j].sched);
+
+}
+
+int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
+{
+ struct panfrost_device *pfdev = panfrost_priv->pfdev;
+ struct panfrost_job_slot *js = pfdev->js;
+ struct drm_sched_rq *rq;
+ int ret, i;
+
+ for (i = 0; i < NUM_JOB_SLOTS; i++) {
+ rq = &js->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], &rq, 1, NULL);
+ if (WARN_ON(ret))
+ return ret;
+ }
+ return 0;
+}
+
+void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
+{
+ int i;
+
+ for (i = 0; i < NUM_JOB_SLOTS; i++)
+ drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
+}
+
+int panfrost_job_is_idle(struct panfrost_device *pfdev)
+{
+ struct panfrost_job_slot *js = pfdev->js;
+ int i;
+
+ for (i = 0; i < NUM_JOB_SLOTS; i++) {
+ /* If there are any jobs in the HW queue, we're not idle */
+ if (atomic_read(&js->queue[i].sched.hw_rq_count))
+ return false;
+
+ /* Check whether the hardware is idle */
+ if (pfdev->devfreq.slot[i].busy)
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
new file mode 100644
index 000000000000..62454128a792
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 Collabora ltd. */
+
+#ifndef __PANFROST_JOB_H__
+#define __PANFROST_JOB_H__
+
+#include <uapi/drm/panfrost_drm.h>
+#include <drm/gpu_scheduler.h>
+
+struct panfrost_device;
+struct panfrost_gem_object;
+struct panfrost_file_priv;
+
+struct panfrost_job {
+ struct drm_sched_job base;
+
+ struct kref refcount;
+
+ struct panfrost_device *pfdev;
+ struct panfrost_file_priv *file_priv;
+
+ /* Optional fences userspace can pass in for the job to depend on. */
+ struct dma_fence **in_fences;
+ u32 in_fence_count;
+
+ /* Fence to be signaled by IRQ handler when the job is complete. */
+ struct dma_fence *done_fence;
+
+ __u64 jc;
+ __u32 requirements;
+ __u32 flush_id;
+
+ /* Exclusive fences we have taken from the BOs to wait for */
+ struct dma_fence **implicit_fences;
+ struct drm_gem_object **bos;
+ u32 bo_count;
+
+ /* Fence to be signaled by drm-sched once its done with the job */
+ struct dma_fence *render_done_fence;
+};
+
+int panfrost_job_init(struct panfrost_device *pfdev);
+void panfrost_job_fini(struct panfrost_device *pfdev);
+int panfrost_job_open(struct panfrost_file_priv *panfrost_priv);
+void panfrost_job_close(struct panfrost_file_priv *panfrost_priv);
+int panfrost_job_push(struct panfrost_job *job);
+void panfrost_job_put(struct panfrost_job *job);
+void panfrost_job_enable_interrupts(struct panfrost_device *pfdev);
+int panfrost_job_is_idle(struct panfrost_device *pfdev);
+
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
new file mode 100644
index 000000000000..762b1bd2a8c2
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/io-pgtable.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
+
+#include "panfrost_device.h"
+#include "panfrost_mmu.h"
+#include "panfrost_gem.h"
+#include "panfrost_features.h"
+#include "panfrost_regs.h"
+
+#define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
+#define mmu_read(dev, reg) readl(dev->iomem + reg)
+
+struct panfrost_mmu {
+ struct io_pgtable_cfg pgtbl_cfg;
+ struct io_pgtable_ops *pgtbl_ops;
+ struct mutex lock;
+};
+
+static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
+{
+ int ret;
+ u32 val;
+
+ /* Wait for the MMU status to indicate there is no active command, in
+ * case one is pending. */
+ ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
+ val, !(val & AS_STATUS_AS_ACTIVE), 10, 1000);
+
+ if (ret)
+ dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
+
+ return ret;
+}
+
+static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
+{
+ int status;
+
+ /* write AS_COMMAND when MMU is ready to accept another command */
+ status = wait_ready(pfdev, as_nr);
+ if (!status)
+ mmu_write(pfdev, AS_COMMAND(as_nr), cmd);
+
+ return status;
+}
+
+static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
+ u64 iova, size_t size)
+{
+ u8 region_width;
+ u64 region = iova & PAGE_MASK;
+ /*
+ * fls returns:
+ * 1 .. 32
+ *
+ * 10 + fls(num_pages)
+ * results in the range (11 .. 42)
+ */
+
+ size = round_up(size, PAGE_SIZE);
+
+ region_width = 10 + fls(size >> PAGE_SHIFT);
+ if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
+ /* not pow2, so must go up to the next pow2 */
+ region_width += 1;
+ }
+ region |= region_width;
+
+ /* Lock the region that needs to be updated */
+ mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL);
+ mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL);
+ write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
+}
+
+
+static int mmu_hw_do_operation(struct panfrost_device *pfdev, u32 as_nr,
+ u64 iova, size_t size, u32 op)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&pfdev->hwaccess_lock, flags);
+
+ if (op != AS_COMMAND_UNLOCK)
+ lock_region(pfdev, as_nr, iova, size);
+
+ /* Run the MMU operation */
+ write_cmd(pfdev, as_nr, op);
+
+ /* Wait for the flush to complete */
+ ret = wait_ready(pfdev, as_nr);
+
+ spin_unlock_irqrestore(&pfdev->hwaccess_lock, flags);
+
+ return ret;
+}
+
+void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr)
+{
+ struct io_pgtable_cfg *cfg = &pfdev->mmu->pgtbl_cfg;
+ u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
+ u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
+
+ mmu_write(pfdev, MMU_INT_CLEAR, ~0);
+ mmu_write(pfdev, MMU_INT_MASK, ~0);
+
+ mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
+ mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
+
+ /* Need to revisit mem attrs.
+ * NC is the default, Mali driver is inner WT.
+ */
+ mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xffffffffUL);
+ mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32);
+
+ write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
+}
+
+static void mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
+{
+ mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
+ mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
+
+ mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
+ mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
+
+ write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
+}
+
+static size_t get_pgsize(u64 addr, size_t size)
+{
+ if (addr & (SZ_2M - 1) || size < SZ_2M)
+ return SZ_4K;
+
+ return SZ_2M;
+}
+
+int panfrost_mmu_map(struct panfrost_gem_object *bo)
+{
+ struct drm_gem_object *obj = &bo->base.base;
+ struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
+ struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
+ u64 iova = bo->node.start << PAGE_SHIFT;
+ unsigned int count;
+ struct scatterlist *sgl;
+ struct sg_table *sgt;
+ int ret;
+
+ sgt = drm_gem_shmem_get_pages_sgt(obj);
+ if (WARN_ON(IS_ERR(sgt)))
+ return PTR_ERR(sgt);
+
+ ret = pm_runtime_get_sync(pfdev->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&pfdev->mmu->lock);
+
+ for_each_sg(sgt->sgl, sgl, sgt->nents, count) {
+ unsigned long paddr = sg_dma_address(sgl);
+ size_t len = sg_dma_len(sgl);
+
+ dev_dbg(pfdev->dev, "map: iova=%llx, paddr=%lx, len=%zx", iova, paddr, len);
+
+ while (len) {
+ size_t pgsize = get_pgsize(iova | paddr, len);
+
+ ops->map(ops, iova, paddr, pgsize, IOMMU_WRITE | IOMMU_READ);
+ iova += pgsize;
+ paddr += pgsize;
+ len -= pgsize;
+ }
+ }
+
+ mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
+ bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
+
+ mutex_unlock(&pfdev->mmu->lock);
+
+ pm_runtime_mark_last_busy(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->dev);
+
+ return 0;
+}
+
+void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
+{
+ struct drm_gem_object *obj = &bo->base.base;
+ struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
+ struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
+ u64 iova = bo->node.start << PAGE_SHIFT;
+ size_t len = bo->node.size << PAGE_SHIFT;
+ size_t unmapped_len = 0;
+ int ret;
+
+ dev_dbg(pfdev->dev, "unmap: iova=%llx, len=%zx", iova, len);
+
+ ret = pm_runtime_get_sync(pfdev->dev);
+ if (ret < 0)
+ return;
+
+ mutex_lock(&pfdev->mmu->lock);
+
+ while (unmapped_len < len) {
+ size_t unmapped_page;
+ size_t pgsize = get_pgsize(iova, len - unmapped_len);
+
+ unmapped_page = ops->unmap(ops, iova, pgsize);
+ if (!unmapped_page)
+ break;
+
+ iova += unmapped_page;
+ unmapped_len += unmapped_page;
+ }
+
+ mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
+ bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
+
+ mutex_unlock(&pfdev->mmu->lock);
+
+ pm_runtime_mark_last_busy(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->dev);
+}
+
+static void mmu_tlb_inv_context_s1(void *cookie)
+{
+ struct panfrost_device *pfdev = cookie;
+
+ mmu_hw_do_operation(pfdev, 0, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
+}
+
+static void mmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
+ size_t granule, bool leaf, void *cookie)
+{}
+
+static void mmu_tlb_sync_context(void *cookie)
+{
+ //struct panfrost_device *pfdev = cookie;
+ // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
+}
+
+static const struct iommu_gather_ops mmu_tlb_ops = {
+ .tlb_flush_all = mmu_tlb_inv_context_s1,
+ .tlb_add_flush = mmu_tlb_inv_range_nosync,
+ .tlb_sync = mmu_tlb_sync_context,
+};
+
+static const char *access_type_name(struct panfrost_device *pfdev,
+ u32 fault_status)
+{
+ switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
+ case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
+ if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU))
+ return "ATOMIC";
+ else
+ return "UNKNOWN";
+ case AS_FAULTSTATUS_ACCESS_TYPE_READ:
+ return "READ";
+ case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
+ return "WRITE";
+ case AS_FAULTSTATUS_ACCESS_TYPE_EX:
+ return "EXECUTE";
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+}
+
+static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
+{
+ struct panfrost_device *pfdev = data;
+ u32 status = mmu_read(pfdev, MMU_INT_STAT);
+ int i;
+
+ if (!status)
+ return IRQ_NONE;
+
+ dev_err(pfdev->dev, "mmu irq status=%x\n", status);
+
+ for (i = 0; status; i++) {
+ u32 mask = BIT(i) | BIT(i + 16);
+ u64 addr;
+ u32 fault_status;
+ u32 exception_type;
+ u32 access_type;
+ u32 source_id;
+
+ if (!(status & mask))
+ continue;
+
+ fault_status = mmu_read(pfdev, AS_FAULTSTATUS(i));
+ addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(i));
+ addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(i)) << 32;
+
+ /* decode the fault status */
+ exception_type = fault_status & 0xFF;
+ access_type = (fault_status >> 8) & 0x3;
+ source_id = (fault_status >> 16);
+
+ /* terminal fault, print info about the fault */
+ dev_err(pfdev->dev,
+ "Unhandled Page fault in AS%d at VA 0x%016llX\n"
+ "Reason: %s\n"
+ "raw fault status: 0x%X\n"
+ "decoded fault status: %s\n"
+ "exception type 0x%X: %s\n"
+ "access type 0x%X: %s\n"
+ "source id 0x%X\n",
+ i, addr,
+ "TODO",
+ fault_status,
+ (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
+ exception_type, panfrost_exception_name(pfdev, exception_type),
+ access_type, access_type_name(pfdev, fault_status),
+ source_id);
+
+ mmu_write(pfdev, MMU_INT_CLEAR, mask);
+
+ status &= ~mask;
+ }
+
+ return IRQ_HANDLED;
+};
+
+int panfrost_mmu_init(struct panfrost_device *pfdev)
+{
+ struct io_pgtable_ops *pgtbl_ops;
+ int err, irq;
+
+ pfdev->mmu = devm_kzalloc(pfdev->dev, sizeof(*pfdev->mmu), GFP_KERNEL);
+ if (!pfdev->mmu)
+ return -ENOMEM;
+
+ mutex_init(&pfdev->mmu->lock);
+
+ irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
+ if (irq <= 0)
+ return -ENODEV;
+
+ err = devm_request_irq(pfdev->dev, irq, panfrost_mmu_irq_handler,
+ IRQF_SHARED, "mmu", pfdev);
+
+ if (err) {
+ dev_err(pfdev->dev, "failed to request mmu irq");
+ return err;
+ }
+ mmu_write(pfdev, MMU_INT_CLEAR, ~0);
+ mmu_write(pfdev, MMU_INT_MASK, ~0);
+
+ pfdev->mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
+ .pgsize_bitmap = SZ_4K | SZ_2M,
+ .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
+ .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
+ .tlb = &mmu_tlb_ops,
+ .iommu_dev = pfdev->dev,
+ };
+
+ pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &pfdev->mmu->pgtbl_cfg,
+ pfdev);
+ if (!pgtbl_ops)
+ return -ENOMEM;
+
+ pfdev->mmu->pgtbl_ops = pgtbl_ops;
+
+ panfrost_mmu_enable(pfdev, 0);
+
+ return 0;
+}
+
+void panfrost_mmu_fini(struct panfrost_device *pfdev)
+{
+ mmu_write(pfdev, MMU_INT_MASK, 0);
+ mmu_disable(pfdev, 0);
+
+ free_io_pgtable_ops(pfdev->mmu->pgtbl_ops);
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
new file mode 100644
index 000000000000..f5878d86a5ce
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+
+#ifndef __PANFROST_MMU_H__
+#define __PANFROST_MMU_H__
+
+struct panfrost_gem_object;
+
+int panfrost_mmu_map(struct panfrost_gem_object *bo);
+void panfrost_mmu_unmap(struct panfrost_gem_object *bo);
+
+int panfrost_mmu_init(struct panfrost_device *pfdev);
+void panfrost_mmu_fini(struct panfrost_device *pfdev);
+
+void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr);
+
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
new file mode 100644
index 000000000000..578c5fc2188b
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/*
+ * Register definitions based on mali_midg_regmap.h
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ */
+#ifndef __PANFROST_REGS_H__
+#define __PANFROST_REGS_H__
+
+#define GPU_ID 0x00
+#define GPU_L2_FEATURES 0x004 /* (RO) Level 2 cache features */
+#define GPU_CORE_FEATURES 0x008 /* (RO) Shader Core Features */
+#define GPU_TILER_FEATURES 0x00C /* (RO) Tiler Features */
+#define GPU_MEM_FEATURES 0x010 /* (RO) Memory system features */
+#define GROUPS_L2_COHERENT BIT(0) /* Cores groups are l2 coherent */
+
+#define GPU_MMU_FEATURES 0x014 /* (RO) MMU features */
+#define GPU_AS_PRESENT 0x018 /* (RO) Address space slots present */
+#define GPU_JS_PRESENT 0x01C /* (RO) Job slots present */
+
+#define GPU_INT_RAWSTAT 0x20
+#define GPU_INT_CLEAR 0x24
+#define GPU_INT_MASK 0x28
+#define GPU_INT_STAT 0x2c
+#define GPU_IRQ_FAULT BIT(0)
+#define GPU_IRQ_MULTIPLE_FAULT BIT(7)
+#define GPU_IRQ_RESET_COMPLETED BIT(8)
+#define GPU_IRQ_POWER_CHANGED BIT(9)
+#define GPU_IRQ_POWER_CHANGED_ALL BIT(10)
+#define GPU_IRQ_PERFCNT_SAMPLE_COMPLETED BIT(16)
+#define GPU_IRQ_CLEAN_CACHES_COMPLETED BIT(17)
+#define GPU_IRQ_MASK_ALL \
+ (GPU_IRQ_FAULT |\
+ GPU_IRQ_MULTIPLE_FAULT |\
+ GPU_IRQ_RESET_COMPLETED |\
+ GPU_IRQ_POWER_CHANGED |\
+ GPU_IRQ_POWER_CHANGED_ALL |\
+ GPU_IRQ_PERFCNT_SAMPLE_COMPLETED |\
+ GPU_IRQ_CLEAN_CACHES_COMPLETED)
+#define GPU_IRQ_MASK_ERROR \
+ ( \
+ GPU_IRQ_FAULT |\
+ GPU_IRQ_MULTIPLE_FAULT)
+#define GPU_CMD 0x30
+#define GPU_CMD_SOFT_RESET 0x01
+#define GPU_STATUS 0x34
+#define GPU_LATEST_FLUSH_ID 0x38
+#define GPU_FAULT_STATUS 0x3C
+#define GPU_FAULT_ADDRESS_LO 0x40
+#define GPU_FAULT_ADDRESS_HI 0x44
+
+#define GPU_THREAD_MAX_THREADS 0x0A0 /* (RO) Maximum number of threads per core */
+#define GPU_THREAD_MAX_WORKGROUP_SIZE 0x0A4 /* (RO) Maximum workgroup size */
+#define GPU_THREAD_MAX_BARRIER_SIZE 0x0A8 /* (RO) Maximum threads waiting at a barrier */
+#define GPU_THREAD_FEATURES 0x0AC /* (RO) Thread features */
+#define GPU_THREAD_TLS_ALLOC 0x310 /* (RO) Number of threads per core that
+ * TLS must be allocated for */
+
+#define GPU_TEXTURE_FEATURES(n) (0x0B0 + ((n) * 4))
+#define GPU_JS_FEATURES(n) (0x0C0 + ((n) * 4))
+
+#define GPU_SHADER_PRESENT_LO 0x100 /* (RO) Shader core present bitmap, low word */
+#define GPU_SHADER_PRESENT_HI 0x104 /* (RO) Shader core present bitmap, high word */
+#define GPU_TILER_PRESENT_LO 0x110 /* (RO) Tiler core present bitmap, low word */
+#define GPU_TILER_PRESENT_HI 0x114 /* (RO) Tiler core present bitmap, high word */
+
+#define GPU_L2_PRESENT_LO 0x120 /* (RO) Level 2 cache present bitmap, low word */
+#define GPU_L2_PRESENT_HI 0x124 /* (RO) Level 2 cache present bitmap, high word */
+
+#define GPU_COHERENCY_FEATURES 0x300 /* (RO) Coherency features present */
+#define COHERENCY_ACE_LITE BIT(0)
+#define COHERENCY_ACE BIT(1)
+
+#define GPU_STACK_PRESENT_LO 0xE00 /* (RO) Core stack present bitmap, low word */
+#define GPU_STACK_PRESENT_HI 0xE04 /* (RO) Core stack present bitmap, high word */
+
+#define SHADER_READY_LO 0x140 /* (RO) Shader core ready bitmap, low word */
+#define SHADER_READY_HI 0x144 /* (RO) Shader core ready bitmap, high word */
+
+#define TILER_READY_LO 0x150 /* (RO) Tiler core ready bitmap, low word */
+#define TILER_READY_HI 0x154 /* (RO) Tiler core ready bitmap, high word */
+
+#define L2_READY_LO 0x160 /* (RO) Level 2 cache ready bitmap, low word */
+#define L2_READY_HI 0x164 /* (RO) Level 2 cache ready bitmap, high word */
+
+#define STACK_READY_LO 0xE10 /* (RO) Core stack ready bitmap, low word */
+#define STACK_READY_HI 0xE14 /* (RO) Core stack ready bitmap, high word */
+
+
+#define SHADER_PWRON_LO 0x180 /* (WO) Shader core power on bitmap, low word */
+#define SHADER_PWRON_HI 0x184 /* (WO) Shader core power on bitmap, high word */
+
+#define TILER_PWRON_LO 0x190 /* (WO) Tiler core power on bitmap, low word */
+#define TILER_PWRON_HI 0x194 /* (WO) Tiler core power on bitmap, high word */
+
+#define L2_PWRON_LO 0x1A0 /* (WO) Level 2 cache power on bitmap, low word */
+#define L2_PWRON_HI 0x1A4 /* (WO) Level 2 cache power on bitmap, high word */
+
+#define STACK_PWRON_LO 0xE20 /* (RO) Core stack power on bitmap, low word */
+#define STACK_PWRON_HI 0xE24 /* (RO) Core stack power on bitmap, high word */
+
+
+#define SHADER_PWROFF_LO 0x1C0 /* (WO) Shader core power off bitmap, low word */
+#define SHADER_PWROFF_HI 0x1C4 /* (WO) Shader core power off bitmap, high word */
+
+#define TILER_PWROFF_LO 0x1D0 /* (WO) Tiler core power off bitmap, low word */
+#define TILER_PWROFF_HI 0x1D4 /* (WO) Tiler core power off bitmap, high word */
+
+#define L2_PWROFF_LO 0x1E0 /* (WO) Level 2 cache power off bitmap, low word */
+#define L2_PWROFF_HI 0x1E4 /* (WO) Level 2 cache power off bitmap, high word */
+
+#define STACK_PWROFF_LO 0xE30 /* (RO) Core stack power off bitmap, low word */
+#define STACK_PWROFF_HI 0xE34 /* (RO) Core stack power off bitmap, high word */
+
+
+#define SHADER_PWRTRANS_LO 0x200 /* (RO) Shader core power transition bitmap, low word */
+#define SHADER_PWRTRANS_HI 0x204 /* (RO) Shader core power transition bitmap, high word */
+
+#define TILER_PWRTRANS_LO 0x210 /* (RO) Tiler core power transition bitmap, low word */
+#define TILER_PWRTRANS_HI 0x214 /* (RO) Tiler core power transition bitmap, high word */
+
+#define L2_PWRTRANS_LO 0x220 /* (RO) Level 2 cache power transition bitmap, low word */
+#define L2_PWRTRANS_HI 0x224 /* (RO) Level 2 cache power transition bitmap, high word */
+
+#define STACK_PWRTRANS_LO 0xE40 /* (RO) Core stack power transition bitmap, low word */
+#define STACK_PWRTRANS_HI 0xE44 /* (RO) Core stack power transition bitmap, high word */
+
+
+#define SHADER_PWRACTIVE_LO 0x240 /* (RO) Shader core active bitmap, low word */
+#define SHADER_PWRACTIVE_HI 0x244 /* (RO) Shader core active bitmap, high word */
+
+#define TILER_PWRACTIVE_LO 0x250 /* (RO) Tiler core active bitmap, low word */
+#define TILER_PWRACTIVE_HI 0x254 /* (RO) Tiler core active bitmap, high word */
+
+#define L2_PWRACTIVE_LO 0x260 /* (RO) Level 2 cache active bitmap, low word */
+#define L2_PWRACTIVE_HI 0x264 /* (RO) Level 2 cache active bitmap, high word */
+
+#define GPU_JM_CONFIG 0xF00 /* (RW) Job Manager configuration register (Implementation specific register) */
+#define GPU_SHADER_CONFIG 0xF04 /* (RW) Shader core configuration settings (Implementation specific register) */
+#define GPU_TILER_CONFIG 0xF08 /* (RW) Tiler core configuration settings (Implementation specific register) */
+#define GPU_L2_MMU_CONFIG 0xF0C /* (RW) Configuration of the L2 cache and MMU (Implementation specific register) */
+
+/* L2_MMU_CONFIG register */
+#define L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY_SHIFT 23
+#define L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY (0x1 << L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT 24
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_OCTANT (0x1 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_QUARTER (0x2 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_HALF (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT 26
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_OCTANT (0x1 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_QUARTER (0x2 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_HALF (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+
+#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS_SHIFT 12
+#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS (0x7 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+
+#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES_SHIFT 15
+#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES (0x7 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+
+/* SHADER_CONFIG register */
+#define SC_ALT_COUNTERS BIT(3)
+#define SC_OVERRIDE_FWD_PIXEL_KILL BIT(4)
+#define SC_SDC_DISABLE_OQ_DISCARD BIT(6)
+#define SC_LS_ALLOW_ATTR_TYPES BIT(16)
+#define SC_LS_PAUSEBUFFER_DISABLE BIT(16)
+#define SC_TLS_HASH_ENABLE BIT(17)
+#define SC_LS_ATTR_CHECK_DISABLE BIT(18)
+#define SC_ENABLE_TEXGRD_FLAGS BIT(25)
+/* End SHADER_CONFIG register */
+
+/* TILER_CONFIG register */
+#define TC_CLOCK_GATE_OVERRIDE BIT(0)
+
+/* JM_CONFIG register */
+#define JM_TIMESTAMP_OVERRIDE BIT(0)
+#define JM_CLOCK_GATE_OVERRIDE BIT(1)
+#define JM_JOB_THROTTLE_ENABLE BIT(2)
+#define JM_JOB_THROTTLE_LIMIT_SHIFT 3
+#define JM_MAX_JOB_THROTTLE_LIMIT 0x3F
+#define JM_FORCE_COHERENCY_FEATURES_SHIFT 2
+#define JM_IDVS_GROUP_SIZE_SHIFT 16
+#define JM_MAX_IDVS_GROUP_SIZE 0x3F
+
+
+/* Job Control regs */
+#define JOB_INT_RAWSTAT 0x1000
+#define JOB_INT_CLEAR 0x1004
+#define JOB_INT_MASK 0x1008
+#define JOB_INT_STAT 0x100c
+#define JOB_INT_JS_STATE 0x1010
+#define JOB_INT_THROTTLE 0x1014
+
+#define MK_JS_MASK(j) (0x10001 << (j))
+#define JOB_INT_MASK_ERR(j) BIT((j) + 16)
+#define JOB_INT_MASK_DONE(j) BIT(j)
+
+#define JS_BASE 0x1800
+#define JS_HEAD_LO(n) (JS_BASE + ((n) * 0x80) + 0x00)
+#define JS_HEAD_HI(n) (JS_BASE + ((n) * 0x80) + 0x04)
+#define JS_TAIL_LO(n) (JS_BASE + ((n) * 0x80) + 0x08)
+#define JS_TAIL_HI(n) (JS_BASE + ((n) * 0x80) + 0x0c)
+#define JS_AFFINITY_LO(n) (JS_BASE + ((n) * 0x80) + 0x10)
+#define JS_AFFINITY_HI(n) (JS_BASE + ((n) * 0x80) + 0x14)
+#define JS_CONFIG(n) (JS_BASE + ((n) * 0x80) + 0x18)
+#define JS_XAFFINITY(n) (JS_BASE + ((n) * 0x80) + 0x1c)
+#define JS_COMMAND(n) (JS_BASE + ((n) * 0x80) + 0x20)
+#define JS_STATUS(n) (JS_BASE + ((n) * 0x80) + 0x24)
+#define JS_HEAD_NEXT_LO(n) (JS_BASE + ((n) * 0x80) + 0x40)
+#define JS_HEAD_NEXT_HI(n) (JS_BASE + ((n) * 0x80) + 0x44)
+#define JS_AFFINITY_NEXT_LO(n) (JS_BASE + ((n) * 0x80) + 0x50)
+#define JS_AFFINITY_NEXT_HI(n) (JS_BASE + ((n) * 0x80) + 0x54)
+#define JS_CONFIG_NEXT(n) (JS_BASE + ((n) * 0x80) + 0x58)
+#define JS_COMMAND_NEXT(n) (JS_BASE + ((n) * 0x80) + 0x60)
+#define JS_FLUSH_ID_NEXT(n) (JS_BASE + ((n) * 0x80) + 0x70)
+
+/* Possible values of JS_CONFIG and JS_CONFIG_NEXT registers */
+#define JS_CONFIG_START_FLUSH_CLEAN BIT(8)
+#define JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE (3u << 8)
+#define JS_CONFIG_START_MMU BIT(10)
+#define JS_CONFIG_JOB_CHAIN_FLAG BIT(11)
+#define JS_CONFIG_END_FLUSH_CLEAN BIT(12)
+#define JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE (3u << 12)
+#define JS_CONFIG_ENABLE_FLUSH_REDUCTION BIT(14)
+#define JS_CONFIG_DISABLE_DESCRIPTOR_WR_BK BIT(15)
+#define JS_CONFIG_THREAD_PRI(n) ((n) << 16)
+
+#define JS_COMMAND_NOP 0x00
+#define JS_COMMAND_START 0x01
+#define JS_COMMAND_SOFT_STOP 0x02 /* Gently stop processing a job chain */
+#define JS_COMMAND_HARD_STOP 0x03 /* Rudely stop processing a job chain */
+#define JS_COMMAND_SOFT_STOP_0 0x04 /* Execute SOFT_STOP if JOB_CHAIN_FLAG is 0 */
+#define JS_COMMAND_HARD_STOP_0 0x05 /* Execute HARD_STOP if JOB_CHAIN_FLAG is 0 */
+#define JS_COMMAND_SOFT_STOP_1 0x06 /* Execute SOFT_STOP if JOB_CHAIN_FLAG is 1 */
+#define JS_COMMAND_HARD_STOP_1 0x07 /* Execute HARD_STOP if JOB_CHAIN_FLAG is 1 */
+
+#define JS_STATUS_EVENT_ACTIVE 0x08
+
+
+/* MMU regs */
+#define MMU_INT_RAWSTAT 0x2000
+#define MMU_INT_CLEAR 0x2004
+#define MMU_INT_MASK 0x2008
+#define MMU_INT_STAT 0x200c
+
+/* AS_COMMAND register commands */
+#define AS_COMMAND_NOP 0x00 /* NOP Operation */
+#define AS_COMMAND_UPDATE 0x01 /* Broadcasts the values in AS_TRANSTAB and ASn_MEMATTR to all MMUs */
+#define AS_COMMAND_LOCK 0x02 /* Issue a lock region command to all MMUs */
+#define AS_COMMAND_UNLOCK 0x03 /* Issue a flush region command to all MMUs */
+#define AS_COMMAND_FLUSH 0x04 /* Flush all L2 caches then issue a flush region command to all MMUs
+ (deprecated - only for use with T60x) */
+#define AS_COMMAND_FLUSH_PT 0x04 /* Flush all L2 caches then issue a flush region command to all MMUs */
+#define AS_COMMAND_FLUSH_MEM 0x05 /* Wait for memory accesses to complete, flush all the L1s cache then
+ flush all L2 caches then issue a flush region command to all MMUs */
+
+#define MMU_AS(as) (0x2400 + ((as) << 6))
+
+#define AS_TRANSTAB_LO(as) (MMU_AS(as) + 0x00) /* (RW) Translation Table Base Address for address space n, low word */
+#define AS_TRANSTAB_HI(as) (MMU_AS(as) + 0x04) /* (RW) Translation Table Base Address for address space n, high word */
+#define AS_MEMATTR_LO(as) (MMU_AS(as) + 0x08) /* (RW) Memory attributes for address space n, low word. */
+#define AS_MEMATTR_HI(as) (MMU_AS(as) + 0x0C) /* (RW) Memory attributes for address space n, high word. */
+#define AS_LOCKADDR_LO(as) (MMU_AS(as) + 0x10) /* (RW) Lock region address for address space n, low word */
+#define AS_LOCKADDR_HI(as) (MMU_AS(as) + 0x14) /* (RW) Lock region address for address space n, high word */
+#define AS_COMMAND(as) (MMU_AS(as) + 0x18) /* (WO) MMU command register for address space n */
+#define AS_FAULTSTATUS(as) (MMU_AS(as) + 0x1C) /* (RO) MMU fault status register for address space n */
+#define AS_FAULTADDRESS_LO(as) (MMU_AS(as) + 0x20) /* (RO) Fault Address for address space n, low word */
+#define AS_FAULTADDRESS_HI(as) (MMU_AS(as) + 0x24) /* (RO) Fault Address for address space n, high word */
+#define AS_STATUS(as) (MMU_AS(as) + 0x28) /* (RO) Status flags for address space n */
+/* Additional Bifrost AS regsiters */
+#define AS_TRANSCFG_LO(as) (MMU_AS(as) + 0x30) /* (RW) Translation table configuration for address space n, low word */
+#define AS_TRANSCFG_HI(as) (MMU_AS(as) + 0x34) /* (RW) Translation table configuration for address space n, high word */
+#define AS_FAULTEXTRA_LO(as) (MMU_AS(as) + 0x38) /* (RO) Secondary fault address for address space n, low word */
+#define AS_FAULTEXTRA_HI(as) (MMU_AS(as) + 0x3C) /* (RO) Secondary fault address for address space n, high word */
+
+/*
+ * Begin LPAE MMU TRANSTAB register values
+ */
+#define AS_TRANSTAB_LPAE_ADDR_SPACE_MASK 0xfffffffffffff000
+#define AS_TRANSTAB_LPAE_ADRMODE_IDENTITY 0x2
+#define AS_TRANSTAB_LPAE_ADRMODE_TABLE 0x3
+#define AS_TRANSTAB_LPAE_ADRMODE_MASK 0x3
+#define AS_TRANSTAB_LPAE_READ_INNER BIT(2)
+#define AS_TRANSTAB_LPAE_SHARE_OUTER BIT(4)
+
+#define AS_STATUS_AS_ACTIVE 0x01
+
+#define AS_FAULTSTATUS_ACCESS_TYPE_MASK (0x3 << 8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC (0x0 << 8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_EX (0x1 << 8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2 << 8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3 << 8)
+
+#endif
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 754f6b25f265..4501597f30ab 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -188,7 +188,7 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
tim2 |= TIM2_IOE;
if (connector->display_info.bus_flags &
- DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
tim2 |= TIM2_IPC;
}
@@ -531,14 +531,15 @@ pl111_init_clock_divider(struct drm_device *drm)
dev_err(drm->dev, "CLCD: unable to get clcdclk.\n");
return PTR_ERR(parent);
}
+
+ spin_lock_init(&priv->tim2_lock);
+
/* If the clock divider is broken, use the parent directly */
if (priv->variant->broken_clockdivider) {
priv->clk = parent;
return 0;
}
parent_name = __clk_get_name(parent);
-
- spin_lock_init(&priv->tim2_lock);
div->init = &init;
ret = devm_clk_hw_register(drm->dev, div);
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index b9baefdba38a..1c318ad32a8c 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -330,6 +330,7 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
ret = vexpress_muxfpga_init();
if (ret) {
dev_err(dev, "unable to initialize muxfpga driver\n");
+ of_node_put(np);
return ret;
}
@@ -337,17 +338,20 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
pdev = of_find_device_by_node(np);
if (!pdev) {
dev_err(dev, "can't find the sysreg device, deferring\n");
+ of_node_put(np);
return -EPROBE_DEFER;
}
map = dev_get_drvdata(&pdev->dev);
if (!map) {
dev_err(dev, "sysreg has not yet probed\n");
platform_device_put(pdev);
+ of_node_put(np);
return -EPROBE_DEFER;
}
} else {
map = syscon_node_to_regmap(np);
}
+ of_node_put(np);
if (IS_ERR(map)) {
dev_err(dev, "no Versatile syscon regmap\n");
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 08c725544a2f..8b319ebbb0fb 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -535,7 +535,7 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
{
struct qxl_device *qdev = plane->dev->dev_private;
struct qxl_bo *bo = gem_to_qxl_bo(plane->state->fb->obj[0]);
- struct qxl_bo *bo_old, *primary;
+ struct qxl_bo *primary;
struct drm_clip_rect norect = {
.x1 = 0,
.y1 = 0,
@@ -544,12 +544,6 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
};
uint32_t dumb_shadow_offset = 0;
- if (old_state->fb) {
- bo_old = gem_to_qxl_bo(old_state->fb->obj[0]);
- } else {
- bo_old = NULL;
- }
-
primary = bo->shadow ? bo->shadow : bo;
if (!primary->is_primary) {
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 578d867a81d5..f33e349c4ec5 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -255,10 +255,14 @@ static struct drm_driver qxl_driver = {
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = qxl_debugfs_init,
#endif
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = qxl_gem_prime_pin,
.gem_prime_unpin = qxl_gem_prime_unpin,
+ .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
.gem_prime_vmap = qxl_gem_prime_vmap,
.gem_prime_vunmap = qxl_gem_prime_vunmap,
.gem_prime_mmap = qxl_gem_prime_mmap,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 4a0331b3ff7d..2896bb6fdbf4 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -65,9 +65,6 @@
extern int qxl_num_crtc;
extern int qxl_max_ioctls;
-#define DRM_FILE_OFFSET 0x100000000ULL
-#define DRM_FILE_PAGE_OFFSET (DRM_FILE_OFFSET >> PAGE_SHIFT)
-
#define QXL_INTERRUPT_MASK (\
QXL_INTERRUPT_DISPLAY |\
QXL_INTERRUPT_CURSOR |\
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
index 8b448eca1cd9..114653b471c6 100644
--- a/drivers/gpu/drm/qxl/qxl_prime.c
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -42,6 +42,18 @@ void qxl_gem_prime_unpin(struct drm_gem_object *obj)
qxl_bo_unpin(bo);
}
+struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+struct drm_gem_object *qxl_gem_prime_import_sg_table(
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *table)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
void *qxl_gem_prime_vmap(struct drm_gem_object *obj)
{
struct qxl_bo *bo = gem_to_qxl_bo(obj);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 92f5db5b296f..0234f8556ada 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -63,15 +63,10 @@ static vm_fault_t qxl_ttm_fault(struct vm_fault *vmf)
int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct qxl_device *qdev;
int r;
+ struct drm_file *file_priv = filp->private_data;
+ struct qxl_device *qdev = file_priv->minor->dev->dev_private;
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
-
- file_priv = filp->private_data;
- qdev = file_priv->minor->dev->dev_private;
if (qdev == NULL) {
DRM_ERROR(
"filp->private_data->minor->dev->dev_private == NULL\n");
@@ -328,7 +323,7 @@ int qxl_ttm_init(struct qxl_device *qdev)
r = ttm_bo_device_init(&qdev->mman.bdev,
&qxl_bo_driver,
qdev->ddev.anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET, 0);
+ false);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 53f29a115104..0a9312ea250a 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1388,7 +1388,7 @@ int radeon_device_init(struct radeon_device *rdev,
pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
pr_warn("radeon: No coherent DMA available\n");
}
- rdev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
+ rdev->need_swiotlb = drm_need_swiotlb(dma_bits);
/* Registers mapping */
/* TODO: block userspace mapping of io register */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index aa898c699101..433df7036f96 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -922,12 +922,12 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
/* get matching reference and feedback divider */
- *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
+ *ref_div = min(max(den/post_div, 1u), ref_div_max);
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
/* limit fb divider to its maximum */
if (*fb_div > fb_div_max) {
- *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
+ *ref_div = (*ref_div * fb_div_max)/(*fb_div);
*fb_div = fb_div_max;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index afef2d9fccd8..173deb463414 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -35,7 +35,6 @@
#include <linux/platform_device.h>
#include <drm/drm_legacy.h>
-#include <drm/ati_pcigart.h>
#include "radeon_family.h"
/* General customization:
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 1179034024ae..1298b84cb1c7 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -42,7 +42,7 @@
* the helper contains a pointer to radeon framebuffer baseclass.
*/
struct radeon_fbdev {
- struct drm_fb_helper helper;
+ struct drm_fb_helper helper; /* must be first */
struct drm_framebuffer fb;
struct radeon_device *rdev;
};
@@ -244,7 +244,8 @@ static int radeonfb_create(struct drm_fb_helper *helper,
goto out;
}
- info->par = rfbdev;
+ /* radeon resume is fragile and needs a vt switch to help it along */
+ info->skip_vt_switch = false;
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->fb, &mode_cmd, gobj);
if (ret) {
@@ -259,10 +260,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
- strcpy(info->fix.id, "radeondrmfb");
-
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
-
info->fbops = &radeonfb_ops;
tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
@@ -271,7 +268,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
info->screen_base = rbo->kptr;
info->screen_size = radeon_bo_size(rbo);
- drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, &rfbdev->helper, sizes);
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index b3019505065a..c9bd1278f573 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -133,7 +133,7 @@ static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
/* TODO we should be able to split locking for interval tree and
* the tear down.
*/
- if (range->blockable)
+ if (mmu_notifier_range_blockable(range))
mutex_lock(&rmn->lock);
else if (!mutex_trylock(&rmn->lock))
return -EAGAIN;
@@ -144,7 +144,7 @@ static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
struct radeon_bo *bo;
long r;
- if (!range->blockable) {
+ if (!mmu_notifier_range_blockable(range)) {
ret = -EAGAIN;
goto out_unlock;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 9920a6fc11bf..5d42f8d8e68d 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -45,8 +45,6 @@
#include "radeon_reg.h"
#include "radeon.h"
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
@@ -253,14 +251,12 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
- struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
struct ttm_place placements;
struct ttm_placement placement;
int r;
- rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.num_placement = 1;
@@ -300,14 +296,12 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
- struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
struct ttm_placement placement;
struct ttm_place placements;
int r;
- rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.num_placement = 1;
@@ -792,7 +786,6 @@ int radeon_ttm_init(struct radeon_device *rdev)
r = ttm_bo_device_init(&rdev->mman.bdev,
&radeon_bo_driver,
rdev->ddev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
rdev->need_dma32);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -901,16 +894,10 @@ static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct radeon_device *rdev;
int r;
+ struct drm_file *file_priv = filp->private_data;
+ struct radeon_device *rdev = file_priv->minor->dev->dev_private;
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
- return -EINVAL;
- }
-
- file_priv = filp->private_data;
- rdev = file_priv->minor->dev->dev_private;
if (rdev == NULL) {
return -EINVAL;
}
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 7c36e2777a15..1529849e217e 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -36,3 +36,7 @@ config DRM_RCAR_VSP
depends on VIDEO_RENESAS_VSP1=y || (VIDEO_RENESAS_VSP1 && DRM_RCAR_DU=m)
help
Enable support to expose the R-Car VSP Compositor as KMS planes.
+
+config DRM_RCAR_WRITEBACK
+ bool
+ default y if ARM64
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 2a3b8d7972b5..6c2ed9c46467 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -4,7 +4,7 @@ rcar-du-drm-y := rcar_du_crtc.o \
rcar_du_encoder.o \
rcar_du_group.o \
rcar_du_kms.o \
- rcar_du_plane.o
+ rcar_du_plane.o \
rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_of.o \
rcar_du_of_lvds_r8a7790.dtb.o \
@@ -13,6 +13,7 @@ rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_of.o \
rcar_du_of_lvds_r8a7795.dtb.o \
rcar_du_of_lvds_r8a7796.dtb.o
rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o
+rcar-du-drm-$(CONFIG_DRM_RCAR_WRITEBACK) += rcar_du_writeback.o
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 4cdea14d552f..2da46e3dc4ae 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -32,21 +32,21 @@
static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
return rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
}
static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data);
}
static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr);
@@ -54,7 +54,7 @@ static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set);
@@ -62,7 +62,7 @@ static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
void rcar_du_crtc_dsysr_clr_set(struct rcar_du_crtc *rcrtc, u32 clr, u32 set)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
rcrtc->dsysr = (rcrtc->dsysr & ~clr) | set;
rcar_du_write(rcdu, rcrtc->mmio_offset + DSYSR, rcrtc->dsysr);
@@ -157,10 +157,9 @@ static void rcar_du_dpll_divider(struct rcar_du_crtc *rcrtc,
}
done:
- dev_dbg(rcrtc->group->dev->dev,
+ dev_dbg(rcrtc->dev->dev,
"output:%u, fdpll:%u, n:%u, m:%u, diff:%lu\n",
- dpll->output, dpll->fdpll, dpll->n, dpll->m,
- best_diff);
+ dpll->output, dpll->fdpll, dpll->n, dpll->m, best_diff);
}
struct du_clk_params {
@@ -212,7 +211,7 @@ static const struct soc_device_attribute rcar_du_r8a7795_es1[] = {
static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
{
const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
unsigned long mode_clock = mode->clock * 1000;
u32 dsmr;
u32 escr;
@@ -277,7 +276,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
rcar_du_escr_divider(rcrtc->extclock, mode_clock,
ESCR_DCLKSEL_DCLKIN, &params);
- dev_dbg(rcrtc->group->dev->dev, "mode clock %lu %s rate %lu\n",
+ dev_dbg(rcrtc->dev->dev, "mode clock %lu %s rate %lu\n",
mode_clock, params.clk == rcrtc->clock ? "cpg" : "ext",
params.rate);
@@ -285,7 +284,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
escr = params.escr;
}
- dev_dbg(rcrtc->group->dev->dev, "%s: ESCR 0x%08x\n", __func__, escr);
+ dev_dbg(rcrtc->dev->dev, "%s: ESCR 0x%08x\n", __func__, escr);
rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? ESCR13 : ESCR02, escr);
rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? OTAR13 : OTAR02, 0);
@@ -333,7 +332,7 @@ plane_format(struct rcar_du_plane *plane)
static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
{
struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
unsigned int num_planes = 0;
unsigned int dptsr_planes;
unsigned int hwplanes = 0;
@@ -463,7 +462,7 @@ static bool rcar_du_crtc_page_flip_pending(struct rcar_du_crtc *rcrtc)
static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
if (wait_event_timeout(rcrtc->flip_wait,
!rcar_du_crtc_page_flip_pending(rcrtc),
@@ -493,7 +492,7 @@ static void rcar_du_crtc_setup(struct rcar_du_crtc *rcrtc)
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
/* Enable the VSP compositor. */
- if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
+ if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_enable(rcrtc);
/* Turn vertical blanking interrupt reporting on. */
@@ -564,7 +563,7 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
static void rcar_du_crtc_disable_planes(struct rcar_du_crtc *rcrtc)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
struct drm_crtc *crtc = &rcrtc->crtc;
u32 status;
@@ -617,7 +616,7 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
drm_crtc_vblank_off(crtc);
/* Disable the VSP compositor. */
- if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
+ if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_disable(rcrtc);
/*
@@ -627,7 +626,7 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
* TODO: Find another way to stop the display for DUs that don't support
* TVM sync.
*/
- if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_TVM_SYNC))
+ if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_TVM_SYNC))
rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_TVM_MASK,
DSYSR_TVM_SWITCH);
@@ -648,8 +647,13 @@ static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc,
rstate->outputs = 0;
drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask) {
- struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+ struct rcar_du_encoder *renc;
+ /* Skip the writeback encoder. */
+ if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
+ continue;
+
+ renc = to_rcar_encoder(encoder);
rstate->outputs |= BIT(renc->output);
}
@@ -661,7 +665,7 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc->state);
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
rcar_du_crtc_get(rcrtc);
@@ -689,7 +693,7 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(old_state);
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
rcar_du_crtc_stop(rcrtc);
rcar_du_crtc_put(rcrtc);
@@ -735,7 +739,7 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
*/
rcar_du_crtc_get(rcrtc);
- if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
+ if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_atomic_begin(rcrtc);
}
@@ -757,15 +761,16 @@ static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
spin_unlock_irqrestore(&dev->event_lock, flags);
}
- if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
+ if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_atomic_flush(rcrtc);
}
-enum drm_mode_status rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
- const struct drm_display_mode *mode)
+static enum drm_mode_status
+rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
unsigned int vbp;
@@ -797,7 +802,7 @@ static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
static void rcar_du_crtc_crc_init(struct rcar_du_crtc *rcrtc)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
const char **sources;
unsigned int count;
int i = -1;
@@ -981,8 +986,8 @@ static int rcar_du_crtc_verify_crc_source(struct drm_crtc *crtc,
return 0;
}
-const char *const *rcar_du_crtc_get_crc_sources(struct drm_crtc *crtc,
- size_t *count)
+static const char *const *
+rcar_du_crtc_get_crc_sources(struct drm_crtc *crtc, size_t *count)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
@@ -1079,7 +1084,7 @@ static const struct drm_crtc_funcs crtc_funcs_gen3 = {
static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
{
struct rcar_du_crtc *rcrtc = arg;
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
irqreturn_t ret = IRQ_NONE;
u32 status;
@@ -1171,6 +1176,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
init_waitqueue_head(&rcrtc->vblank_wait);
spin_lock_init(&rcrtc->vblank_lock);
+ rcrtc->dev = rcdu;
rcrtc->group = rgrp;
rcrtc->mmio_offset = mmio_offsets[hwindex];
rcrtc->index = hwindex;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index bcb35b0b7612..3b7fc668996f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -15,6 +15,7 @@
#include <linux/wait.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_writeback.h>
#include <media/vsp1.h>
@@ -24,10 +25,11 @@ struct rcar_du_vsp;
/**
* struct rcar_du_crtc - the CRTC, representing a DU superposition processor
* @crtc: base DRM CRTC
+ * @dev: the DU device
* @clock: the CRTC functional clock
* @extclock: external pixel dot clock (optional)
* @mmio_offset: offset of the CRTC registers in the DU MMIO block
- * @index: CRTC software and hardware index
+ * @index: CRTC hardware index
* @initialized: whether the CRTC has been initialized and clocks enabled
* @dsysr: cached value of the DSYSR register
* @vblank_enable: whether vblank events are enabled on this CRTC
@@ -39,10 +41,12 @@ struct rcar_du_vsp;
* @group: CRTC group this CRTC belongs to
* @vsp: VSP feeding video to this CRTC
* @vsp_pipe: index of the VSP pipeline feeding video to this CRTC
+ * @writeback: the writeback connector
*/
struct rcar_du_crtc {
struct drm_crtc crtc;
+ struct rcar_du_device *dev;
struct clk *clock;
struct clk *extclock;
unsigned int mmio_offset;
@@ -65,9 +69,12 @@ struct rcar_du_crtc {
const char *const *sources;
unsigned int sources_count;
+
+ struct drm_writeback_connector writeback;
};
-#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
+#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
+#define wb_to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, writeback)
/**
* struct rcar_du_crtc_state - Driver-specific CRTC state
@@ -97,8 +104,6 @@ enum rcar_du_output {
int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
unsigned int hwindex);
-void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc);
-void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 8ee4e762f4e5..6c91753af7bc 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -28,13 +28,33 @@ static const struct drm_encoder_funcs encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
+static unsigned int rcar_du_encoder_count_ports(struct device_node *node)
+{
+ struct device_node *ports;
+ struct device_node *port;
+ unsigned int num_ports = 0;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = of_node_get(node);
+
+ for_each_child_of_node(ports, port) {
+ if (of_node_name_eq(port, "port"))
+ num_ports++;
+ }
+
+ of_node_put(ports);
+
+ return num_ports;
+}
+
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
enum rcar_du_output output,
struct device_node *enc_node)
{
struct rcar_du_encoder *renc;
struct drm_encoder *encoder;
- struct drm_bridge *bridge = NULL;
+ struct drm_bridge *bridge;
int ret;
renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
@@ -48,11 +68,33 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n",
enc_node, output);
- /* Locate the DRM bridge from the encoder DT node. */
- bridge = of_drm_find_bridge(enc_node);
- if (!bridge) {
- ret = -EPROBE_DEFER;
- goto done;
+ /*
+ * Locate the DRM bridge from the DT node. For the DPAD outputs, if the
+ * DT node has a single port, assume that it describes a panel and
+ * create a panel bridge.
+ */
+ if ((output == RCAR_DU_OUTPUT_DPAD0 ||
+ output == RCAR_DU_OUTPUT_DPAD1) &&
+ rcar_du_encoder_count_ports(enc_node) == 1) {
+ struct drm_panel *panel = of_drm_find_panel(enc_node);
+
+ if (IS_ERR(panel)) {
+ ret = PTR_ERR(panel);
+ goto done;
+ }
+
+ bridge = devm_drm_panel_bridge_add(rcdu->dev, panel,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(bridge)) {
+ ret = PTR_ERR(bridge);
+ goto done;
+ }
+ } else {
+ bridge = of_drm_find_bridge(enc_node);
+ if (!bridge) {
+ ret = -EPROBE_DEFER;
+ goto done;
+ }
}
ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 3b7d50a8fb9b..f8f7fff34dff 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -26,6 +26,7 @@
#include "rcar_du_kms.h"
#include "rcar_du_regs.h"
#include "rcar_du_vsp.h"
+#include "rcar_du_writeback.h"
/* -----------------------------------------------------------------------------
* Format helpers
@@ -34,60 +35,70 @@
static const struct rcar_du_format_info rcar_du_format_infos[] = {
{
.fourcc = DRM_FORMAT_RGB565,
+ .v4l2 = V4L2_PIX_FMT_RGB565,
.bpp = 16,
.planes = 1,
.pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_ARGB1555,
+ .v4l2 = V4L2_PIX_FMT_ARGB555,
.bpp = 16,
.planes = 1,
.pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_XRGB1555,
+ .v4l2 = V4L2_PIX_FMT_XRGB555,
.bpp = 16,
.planes = 1,
.pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_XRGB8888,
+ .v4l2 = V4L2_PIX_FMT_XBGR32,
.bpp = 32,
.planes = 1,
.pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
.edf = PnDDCR4_EDF_RGB888,
}, {
.fourcc = DRM_FORMAT_ARGB8888,
+ .v4l2 = V4L2_PIX_FMT_ABGR32,
.bpp = 32,
.planes = 1,
.pnmr = PnMR_SPIM_ALP | PnMR_DDDF_16BPP,
.edf = PnDDCR4_EDF_ARGB8888,
}, {
.fourcc = DRM_FORMAT_UYVY,
+ .v4l2 = V4L2_PIX_FMT_UYVY,
.bpp = 16,
.planes = 1,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_YUYV,
+ .v4l2 = V4L2_PIX_FMT_YUYV,
.bpp = 16,
.planes = 1,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_NV12,
+ .v4l2 = V4L2_PIX_FMT_NV12M,
.bpp = 12,
.planes = 2,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_NV21,
+ .v4l2 = V4L2_PIX_FMT_NV21M,
.bpp = 12,
.planes = 2,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_NV16,
+ .v4l2 = V4L2_PIX_FMT_NV16M,
.bpp = 16,
.planes = 2,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
@@ -99,62 +110,77 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
*/
{
.fourcc = DRM_FORMAT_RGB332,
+ .v4l2 = V4L2_PIX_FMT_RGB332,
.bpp = 8,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_ARGB4444,
+ .v4l2 = V4L2_PIX_FMT_ARGB444,
.bpp = 16,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_XRGB4444,
+ .v4l2 = V4L2_PIX_FMT_XRGB444,
.bpp = 16,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_BGR888,
+ .v4l2 = V4L2_PIX_FMT_RGB24,
.bpp = 24,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_RGB888,
+ .v4l2 = V4L2_PIX_FMT_BGR24,
.bpp = 24,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_BGRA8888,
+ .v4l2 = V4L2_PIX_FMT_ARGB32,
.bpp = 32,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_BGRX8888,
+ .v4l2 = V4L2_PIX_FMT_XRGB32,
.bpp = 32,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_YVYU,
+ .v4l2 = V4L2_PIX_FMT_YVYU,
.bpp = 16,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_NV61,
+ .v4l2 = V4L2_PIX_FMT_NV61M,
.bpp = 16,
.planes = 2,
}, {
.fourcc = DRM_FORMAT_YUV420,
+ .v4l2 = V4L2_PIX_FMT_YUV420M,
.bpp = 12,
.planes = 3,
}, {
.fourcc = DRM_FORMAT_YVU420,
+ .v4l2 = V4L2_PIX_FMT_YVU420M,
.bpp = 12,
.planes = 3,
}, {
.fourcc = DRM_FORMAT_YUV422,
+ .v4l2 = V4L2_PIX_FMT_YUV422M,
.bpp = 16,
.planes = 3,
}, {
.fourcc = DRM_FORMAT_YVU422,
+ .v4l2 = V4L2_PIX_FMT_YVU422M,
.bpp = 16,
.planes = 3,
}, {
.fourcc = DRM_FORMAT_YUV444,
+ .v4l2 = V4L2_PIX_FMT_YUV444M,
.bpp = 24,
.planes = 3,
}, {
.fourcc = DRM_FORMAT_YVU444,
+ .v4l2 = V4L2_PIX_FMT_YVU444M,
.bpp = 24,
.planes = 3,
},
@@ -639,6 +665,17 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
encoder->possible_clones = (1 << num_encoders) - 1;
}
+ /* Create the writeback connectors. */
+ if (rcdu->info->gen >= 3) {
+ for (i = 0; i < rcdu->num_crtcs; ++i) {
+ struct rcar_du_crtc *rcrtc = &rcdu->crtcs[i];
+
+ ret = rcar_du_writeback_init(rcdu, rcrtc);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
/*
* Initialize the default DPAD0 source to the index of the first DU
* channel that can be connected to DPAD0. The exact value doesn't
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.h b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
index e171527abdaa..0346504d8c59 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
@@ -19,6 +19,7 @@ struct rcar_du_device;
struct rcar_du_format_info {
u32 fourcc;
+ u32 v4l2;
unsigned int bpp;
unsigned int planes;
unsigned int pnmr;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index 0878accbd134..5e4faf258c31 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -10,6 +10,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
@@ -26,16 +27,19 @@
#include "rcar_du_drv.h"
#include "rcar_du_kms.h"
#include "rcar_du_vsp.h"
+#include "rcar_du_writeback.h"
-static void rcar_du_vsp_complete(void *private, bool completed, u32 crc)
+static void rcar_du_vsp_complete(void *private, unsigned int status, u32 crc)
{
struct rcar_du_crtc *crtc = private;
if (crtc->vblank_enable)
drm_crtc_handle_vblank(&crtc->crtc);
- if (completed)
+ if (status & VSP1_DU_STATUS_COMPLETE)
rcar_du_crtc_finish_page_flip(crtc);
+ if (status & VSP1_DU_STATUS_WRITEBACK)
+ rcar_du_writeback_complete(crtc);
drm_crtc_add_crc_entry(&crtc->crtc, false, 0, &crc);
}
@@ -43,7 +47,7 @@ static void rcar_du_vsp_complete(void *private, bool completed, u32 crc)
void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
{
const struct drm_display_mode *mode = &crtc->crtc.state->adjusted_mode;
- struct rcar_du_device *rcdu = crtc->group->dev;
+ struct rcar_du_device *rcdu = crtc->dev;
struct vsp1_du_lif_config cfg = {
.width = mode->hdisplay,
.height = mode->vdisplay,
@@ -107,11 +111,12 @@ void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc)
state = to_rcar_crtc_state(crtc->crtc.state);
cfg.crc = state->crc;
+ rcar_du_writeback_setup(crtc, &cfg.writeback);
+
vsp1_du_atomic_flush(crtc->vsp->vsp, crtc->vsp_pipe, &cfg);
}
-/* Keep the two tables in sync. */
-static const u32 formats_kms[] = {
+static const u32 rcar_du_vsp_formats[] = {
DRM_FORMAT_RGB332,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_XRGB4444,
@@ -139,40 +144,13 @@ static const u32 formats_kms[] = {
DRM_FORMAT_YVU444,
};
-static const u32 formats_v4l2[] = {
- V4L2_PIX_FMT_RGB332,
- V4L2_PIX_FMT_ARGB444,
- V4L2_PIX_FMT_XRGB444,
- V4L2_PIX_FMT_ARGB555,
- V4L2_PIX_FMT_XRGB555,
- V4L2_PIX_FMT_RGB565,
- V4L2_PIX_FMT_RGB24,
- V4L2_PIX_FMT_BGR24,
- V4L2_PIX_FMT_ARGB32,
- V4L2_PIX_FMT_XRGB32,
- V4L2_PIX_FMT_ABGR32,
- V4L2_PIX_FMT_XBGR32,
- V4L2_PIX_FMT_UYVY,
- V4L2_PIX_FMT_YUYV,
- V4L2_PIX_FMT_YVYU,
- V4L2_PIX_FMT_NV12M,
- V4L2_PIX_FMT_NV21M,
- V4L2_PIX_FMT_NV16M,
- V4L2_PIX_FMT_NV61M,
- V4L2_PIX_FMT_YUV420M,
- V4L2_PIX_FMT_YVU420M,
- V4L2_PIX_FMT_YUV422M,
- V4L2_PIX_FMT_YVU422M,
- V4L2_PIX_FMT_YUV444M,
- V4L2_PIX_FMT_YVU444M,
-};
-
static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
{
struct rcar_du_vsp_plane_state *state =
to_rcar_vsp_plane_state(plane->plane.state);
struct rcar_du_crtc *crtc = to_rcar_crtc(state->state.crtc);
struct drm_framebuffer *fb = plane->plane.state->fb;
+ const struct rcar_du_format_info *format;
struct vsp1_du_atomic_config cfg = {
.pixelformat = 0,
.pitch = fb->pitches[0],
@@ -195,37 +173,23 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
cfg.mem[i] = sg_dma_address(state->sg_tables[i].sgl)
+ fb->offsets[i];
- for (i = 0; i < ARRAY_SIZE(formats_kms); ++i) {
- if (formats_kms[i] == state->format->fourcc) {
- cfg.pixelformat = formats_v4l2[i];
- break;
- }
- }
+ format = rcar_du_format_info(state->format->fourcc);
+ cfg.pixelformat = format->v4l2;
vsp1_du_atomic_update(plane->vsp->vsp, crtc->vsp_pipe,
plane->index, &cfg);
}
-static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *state)
+int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
+ struct sg_table sg_tables[3])
{
- struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
- struct rcar_du_vsp *vsp = to_rcar_vsp_plane(plane)->vsp;
struct rcar_du_device *rcdu = vsp->dev;
unsigned int i;
int ret;
- /*
- * There's no need to prepare (and unprepare) the framebuffer when the
- * plane is not visible, as it will not be displayed.
- */
- if (!state->visible)
- return 0;
-
- for (i = 0; i < rstate->format->planes; ++i) {
- struct drm_gem_cma_object *gem =
- drm_fb_cma_get_gem_obj(state->fb, i);
- struct sg_table *sgt = &rstate->sg_tables[i];
+ for (i = 0; i < fb->format->num_planes; ++i) {
+ struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, i);
+ struct sg_table *sgt = &sg_tables[i];
ret = dma_get_sgtable(rcdu->dev, sgt, gem->vaddr, gem->paddr,
gem->base.size);
@@ -240,15 +204,11 @@ static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane,
}
}
- ret = drm_gem_fb_prepare_fb(plane, state);
- if (ret)
- goto fail;
-
return 0;
fail:
while (i--) {
- struct sg_table *sgt = &rstate->sg_tables[i];
+ struct sg_table *sgt = &sg_tables[i];
vsp1_du_unmap_sg(vsp->vsp, sgt);
sg_free_table(sgt);
@@ -257,24 +217,52 @@ fail:
return ret;
}
-static void rcar_du_vsp_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *state)
+static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
struct rcar_du_vsp *vsp = to_rcar_vsp_plane(plane)->vsp;
- unsigned int i;
+ int ret;
+ /*
+ * There's no need to prepare (and unprepare) the framebuffer when the
+ * plane is not visible, as it will not be displayed.
+ */
if (!state->visible)
- return;
+ return 0;
+
+ ret = rcar_du_vsp_map_fb(vsp, state->fb, rstate->sg_tables);
+ if (ret < 0)
+ return ret;
+
+ return drm_gem_fb_prepare_fb(plane, state);
+}
+
+void rcar_du_vsp_unmap_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
+ struct sg_table sg_tables[3])
+{
+ unsigned int i;
- for (i = 0; i < rstate->format->planes; ++i) {
- struct sg_table *sgt = &rstate->sg_tables[i];
+ for (i = 0; i < fb->format->num_planes; ++i) {
+ struct sg_table *sgt = &sg_tables[i];
vsp1_du_unmap_sg(vsp->vsp, sgt);
sg_free_table(sgt);
}
}
+static void rcar_du_vsp_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
+ struct rcar_du_vsp *vsp = to_rcar_vsp_plane(plane)->vsp;
+
+ if (!state->visible)
+ return;
+
+ rcar_du_vsp_unmap_fb(vsp, state->fb, rstate->sg_tables);
+}
+
static int rcar_du_vsp_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
@@ -395,8 +383,8 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs,
&rcar_du_vsp_plane_funcs,
- formats_kms,
- ARRAY_SIZE(formats_kms),
+ rcar_du_vsp_formats,
+ ARRAY_SIZE(rcar_du_vsp_formats),
NULL, type, NULL);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
index db232037f24a..9b4724159378 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
@@ -12,8 +12,10 @@
#include <drm/drm_plane.h>
+struct drm_framebuffer;
struct rcar_du_format_info;
struct rcar_du_vsp;
+struct sg_table;
struct rcar_du_vsp_plane {
struct drm_plane plane;
@@ -60,6 +62,10 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc);
void rcar_du_vsp_disable(struct rcar_du_crtc *crtc);
void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc);
void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc);
+int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
+ struct sg_table sg_tables[3]);
+void rcar_du_vsp_unmap_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
+ struct sg_table sg_tables[3]);
#else
static inline int rcar_du_vsp_init(struct rcar_du_vsp *vsp,
struct device_node *np,
@@ -71,6 +77,17 @@ static inline void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) { };
static inline void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) { };
static inline void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) { };
static inline void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc) { };
+static inline int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp,
+ struct drm_framebuffer *fb,
+ struct sg_table sg_tables[3])
+{
+ return -ENXIO;
+}
+static inline void rcar_du_vsp_unmap_fb(struct rcar_du_vsp *vsp,
+ struct drm_framebuffer *fb,
+ struct sg_table sg_tables[3])
+{
+}
#endif
#endif /* __RCAR_DU_VSP_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
new file mode 100644
index 000000000000..989a0be94131
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * rcar_du_writeback.c -- R-Car Display Unit Writeback Support
+ *
+ * Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_writeback.h>
+
+#include "rcar_du_crtc.h"
+#include "rcar_du_drv.h"
+#include "rcar_du_kms.h"
+
+/**
+ * struct rcar_du_wb_conn_state - Driver-specific writeback connector state
+ * @state: base DRM connector state
+ * @format: format of the writeback framebuffer
+ */
+struct rcar_du_wb_conn_state {
+ struct drm_connector_state state;
+ const struct rcar_du_format_info *format;
+};
+
+#define to_rcar_wb_conn_state(s) \
+ container_of(s, struct rcar_du_wb_conn_state, state)
+
+/**
+ * struct rcar_du_wb_job - Driver-private data for writeback jobs
+ * @sg_tables: scatter-gather tables for the framebuffer memory
+ */
+struct rcar_du_wb_job {
+ struct sg_table sg_tables[3];
+};
+
+static int rcar_du_wb_conn_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ return drm_add_modes_noedid(connector, dev->mode_config.max_width,
+ dev->mode_config.max_height);
+}
+
+static int rcar_du_wb_prepare_job(struct drm_writeback_connector *connector,
+ struct drm_writeback_job *job)
+{
+ struct rcar_du_crtc *rcrtc = wb_to_rcar_crtc(connector);
+ struct rcar_du_wb_job *rjob;
+ int ret;
+
+ if (!job->fb)
+ return 0;
+
+ rjob = kzalloc(sizeof(*rjob), GFP_KERNEL);
+ if (!rjob)
+ return -ENOMEM;
+
+ /* Map the framebuffer to the VSP. */
+ ret = rcar_du_vsp_map_fb(rcrtc->vsp, job->fb, rjob->sg_tables);
+ if (ret < 0) {
+ kfree(rjob);
+ return ret;
+ }
+
+ job->priv = rjob;
+ return 0;
+}
+
+static void rcar_du_wb_cleanup_job(struct drm_writeback_connector *connector,
+ struct drm_writeback_job *job)
+{
+ struct rcar_du_crtc *rcrtc = wb_to_rcar_crtc(connector);
+ struct rcar_du_wb_job *rjob = job->priv;
+
+ if (!job->fb)
+ return;
+
+ rcar_du_vsp_unmap_fb(rcrtc->vsp, job->fb, rjob->sg_tables);
+ kfree(rjob);
+}
+
+static const struct drm_connector_helper_funcs rcar_du_wb_conn_helper_funcs = {
+ .get_modes = rcar_du_wb_conn_get_modes,
+ .prepare_writeback_job = rcar_du_wb_prepare_job,
+ .cleanup_writeback_job = rcar_du_wb_cleanup_job,
+};
+
+static struct drm_connector_state *
+rcar_du_wb_conn_duplicate_state(struct drm_connector *connector)
+{
+ struct rcar_du_wb_conn_state *copy;
+
+ if (WARN_ON(!connector->state))
+ return NULL;
+
+ copy = kzalloc(sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ __drm_atomic_helper_connector_duplicate_state(connector, &copy->state);
+
+ return &copy->state;
+}
+
+static void rcar_du_wb_conn_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ __drm_atomic_helper_connector_destroy_state(state);
+ kfree(to_rcar_wb_conn_state(state));
+}
+
+static void rcar_du_wb_conn_reset(struct drm_connector *connector)
+{
+ struct rcar_du_wb_conn_state *state;
+
+ if (connector->state) {
+ rcar_du_wb_conn_destroy_state(connector, connector->state);
+ connector->state = NULL;
+ }
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state == NULL)
+ return;
+
+ __drm_atomic_helper_connector_reset(connector, &state->state);
+}
+
+static const struct drm_connector_funcs rcar_du_wb_conn_funcs = {
+ .reset = rcar_du_wb_conn_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = rcar_du_wb_conn_duplicate_state,
+ .atomic_destroy_state = rcar_du_wb_conn_destroy_state,
+};
+
+static int rcar_du_wb_enc_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rcar_du_wb_conn_state *wb_state =
+ to_rcar_wb_conn_state(conn_state);
+ const struct drm_display_mode *mode = &crtc_state->mode;
+ struct drm_device *dev = encoder->dev;
+ struct drm_framebuffer *fb;
+
+ if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ return 0;
+
+ fb = conn_state->writeback_job->fb;
+
+ /*
+ * Verify that the framebuffer format is supported and that its size
+ * matches the current mode.
+ */
+ if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) {
+ dev_dbg(dev->dev, "%s: invalid framebuffer size %ux%u\n",
+ __func__, fb->width, fb->height);
+ return -EINVAL;
+ }
+
+ wb_state->format = rcar_du_format_info(fb->format->format);
+ if (wb_state->format == NULL) {
+ dev_dbg(dev->dev, "%s: unsupported format %08x\n", __func__,
+ fb->format->format);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs rcar_du_wb_enc_helper_funcs = {
+ .atomic_check = rcar_du_wb_enc_atomic_check,
+};
+
+/*
+ * Only RGB formats are currently supported as the VSP outputs RGB to the DU
+ * and can't convert to YUV separately for writeback.
+ */
+static const u32 writeback_formats[] = {
+ DRM_FORMAT_RGB332,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+};
+
+int rcar_du_writeback_init(struct rcar_du_device *rcdu,
+ struct rcar_du_crtc *rcrtc)
+{
+ struct drm_writeback_connector *wb_conn = &rcrtc->writeback;
+
+ wb_conn->encoder.possible_crtcs = 1 << drm_crtc_index(&rcrtc->crtc);
+ drm_connector_helper_add(&wb_conn->base,
+ &rcar_du_wb_conn_helper_funcs);
+
+ return drm_writeback_connector_init(rcdu->ddev, wb_conn,
+ &rcar_du_wb_conn_funcs,
+ &rcar_du_wb_enc_helper_funcs,
+ writeback_formats,
+ ARRAY_SIZE(writeback_formats));
+}
+
+void rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc,
+ struct vsp1_du_writeback_config *cfg)
+{
+ struct rcar_du_wb_conn_state *wb_state;
+ struct drm_connector_state *state;
+ struct rcar_du_wb_job *rjob;
+ struct drm_framebuffer *fb;
+ unsigned int i;
+
+ state = rcrtc->writeback.base.state;
+ if (!state || !state->writeback_job || !state->writeback_job->fb)
+ return;
+
+ fb = state->writeback_job->fb;
+ rjob = state->writeback_job->priv;
+ wb_state = to_rcar_wb_conn_state(state);
+
+ cfg->pixelformat = wb_state->format->v4l2;
+ cfg->pitch = fb->pitches[0];
+
+ for (i = 0; i < wb_state->format->planes; ++i)
+ cfg->mem[i] = sg_dma_address(rjob->sg_tables[i].sgl)
+ + fb->offsets[i];
+
+ drm_writeback_queue_job(&rcrtc->writeback, state);
+}
+
+void rcar_du_writeback_complete(struct rcar_du_crtc *rcrtc)
+{
+ drm_writeback_signal_completion(&rcrtc->writeback, 0);
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.h b/drivers/gpu/drm/rcar-du/rcar_du_writeback.h
new file mode 100644
index 000000000000..fa87ebf8d21f
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * rcar_du_writeback.h -- R-Car Display Unit Writeback Support
+ *
+ * Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#ifndef __RCAR_DU_WRITEBACK_H__
+#define __RCAR_DU_WRITEBACK_H__
+
+#include <drm/drm_plane.h>
+
+struct rcar_du_crtc;
+struct rcar_du_device;
+struct vsp1_du_atomic_pipe_config;
+
+#ifdef CONFIG_DRM_RCAR_WRITEBACK
+int rcar_du_writeback_init(struct rcar_du_device *rcdu,
+ struct rcar_du_crtc *rcrtc);
+void rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc,
+ struct vsp1_du_writeback_config *cfg);
+void rcar_du_writeback_complete(struct rcar_du_crtc *rcrtc);
+#else
+static inline int rcar_du_writeback_init(struct rcar_du_device *rcdu,
+ struct rcar_du_crtc *rcrtc)
+{
+ return -ENXIO;
+}
+static inline void
+rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc,
+ struct vsp1_du_writeback_config *cfg)
+{
+}
+static inline void rcar_du_writeback_complete(struct rcar_du_crtc *rcrtc)
+{
+}
+#endif
+
+#endif /* __RCAR_DU_WRITEBACK_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 7ef97b2a6eda..620b51aab291 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -283,7 +283,7 @@ static void rcar_lvds_d3_e3_pll_calc(struct rcar_lvds *lvds, struct clk *clk,
* divider.
*/
fout = fvco / (1 << e) / div7;
- div = DIV_ROUND_CLOSEST(fout, target);
+ div = max(1UL, DIV_ROUND_CLOSEST(fout, target));
diff = abs(fout / div - target);
if (diff < pll->diff) {
@@ -485,9 +485,13 @@ static void rcar_lvds_enable(struct drm_bridge *bridge)
}
if (lvds->info->quirks & RCAR_LVDS_QUIRK_GEN3_LVEN) {
- /* Turn on the LVDS PHY. */
+ /*
+ * Turn on the LVDS PHY. On D3, the LVEN and LVRES bit must be
+ * set at the same time, so don't write the register yet.
+ */
lvdcr0 |= LVDCR0_LVEN;
- rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+ if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_PWD))
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
}
if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)) {
@@ -531,11 +535,16 @@ static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
+ int min_freq;
+
/*
* The internal LVDS encoder has a restricted clock frequency operating
- * range (31MHz to 148.5MHz). Clamp the clock accordingly.
+ * range, from 5MHz to 148.5MHz on D3 and E3, and from 31MHz to
+ * 148.5MHz on all other platforms. Clamp the clock accordingly.
*/
- adjusted_mode->clock = clamp(adjusted_mode->clock, 31000, 148500);
+ min_freq = lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL ? 5000 : 31000;
+ adjusted_mode->clock = clamp(adjusted_mode->clock, min_freq, 148500);
return true;
}
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 1e75196f9659..2cdf3b62d559 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -77,4 +77,12 @@ config ROCKCHIP_RGB
Some Rockchip CRTCs, like rv1108, can directly output parallel
and serial RGB format to panel or connect to a conversion chip.
say Y to enable its driver.
+
+config ROCKCHIP_RK3066_HDMI
+ bool "Rockchip specific extensions for RK3066 HDMI"
+ depends on DRM_ROCKCHIP
+ help
+ This selects support for Rockchip SoC specific extensions
+ for the RK3066 HDMI driver. If you want to enable
+ HDMI on RK3066 based SoC, you should select this option.
endif
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index f6fc9d5dd0ad..524684ba7f6a 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -15,5 +15,6 @@ rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
rockchipdrm-$(CONFIG_ROCKCHIP_LVDS) += rockchip_lvds.o
rockchipdrm-$(CONFIG_ROCKCHIP_RGB) += rockchip_rgb.o
+rockchipdrm-$(CONFIG_ROCKCHIP_RK3066_HDMI) += rk3066_hdmi.o
obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
new file mode 100644
index 000000000000..85fc5f01f761
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -0,0 +1,876 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Zheng Yang <zhengyang@rock-chips.com>
+ */
+
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "rk3066_hdmi.h"
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_vop.h"
+
+#define DEFAULT_PLLA_RATE 30000000
+
+struct hdmi_data_info {
+ int vic; /* The CEA Video ID (VIC) of the current drm display mode. */
+ bool sink_is_hdmi;
+ unsigned int enc_out_format;
+ unsigned int colorimetry;
+};
+
+struct rk3066_hdmi_i2c {
+ struct i2c_adapter adap;
+
+ u8 ddc_addr;
+ u8 segment_addr;
+ u8 stat;
+
+ struct mutex i2c_lock; /* For i2c operation. */
+ struct completion cmpltn;
+};
+
+struct rk3066_hdmi {
+ struct device *dev;
+ struct drm_device *drm_dev;
+ struct regmap *grf_regmap;
+ int irq;
+ struct clk *hclk;
+ void __iomem *regs;
+
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+
+ struct rk3066_hdmi_i2c *i2c;
+ struct i2c_adapter *ddc;
+
+ unsigned int tmdsclk;
+
+ struct hdmi_data_info hdmi_data;
+ struct drm_display_mode previous_mode;
+};
+
+#define to_rk3066_hdmi(x) container_of(x, struct rk3066_hdmi, x)
+
+static inline u8 hdmi_readb(struct rk3066_hdmi *hdmi, u16 offset)
+{
+ return readl_relaxed(hdmi->regs + offset);
+}
+
+static inline void hdmi_writeb(struct rk3066_hdmi *hdmi, u16 offset, u32 val)
+{
+ writel_relaxed(val, hdmi->regs + offset);
+}
+
+static inline void hdmi_modb(struct rk3066_hdmi *hdmi, u16 offset,
+ u32 msk, u32 val)
+{
+ u8 temp = hdmi_readb(hdmi, offset) & ~msk;
+
+ temp |= val & msk;
+ hdmi_writeb(hdmi, offset, temp);
+}
+
+static void rk3066_hdmi_i2c_init(struct rk3066_hdmi *hdmi)
+{
+ int ddc_bus_freq;
+
+ ddc_bus_freq = (hdmi->tmdsclk >> 2) / HDMI_SCL_RATE;
+
+ hdmi_writeb(hdmi, HDMI_DDC_BUS_FREQ_L, ddc_bus_freq & 0xFF);
+ hdmi_writeb(hdmi, HDMI_DDC_BUS_FREQ_H, (ddc_bus_freq >> 8) & 0xFF);
+
+ /* Clear the EDID interrupt flag and mute the interrupt. */
+ hdmi_modb(hdmi, HDMI_INTR_MASK1, HDMI_INTR_EDID_MASK, 0);
+ hdmi_writeb(hdmi, HDMI_INTR_STATUS1, HDMI_INTR_EDID_MASK);
+}
+
+static inline u8 rk3066_hdmi_get_power_mode(struct rk3066_hdmi *hdmi)
+{
+ return hdmi_readb(hdmi, HDMI_SYS_CTRL) & HDMI_SYS_POWER_MODE_MASK;
+}
+
+static void rk3066_hdmi_set_power_mode(struct rk3066_hdmi *hdmi, int mode)
+{
+ u8 current_mode, next_mode;
+ u8 i = 0;
+
+ current_mode = rk3066_hdmi_get_power_mode(hdmi);
+
+ DRM_DEV_DEBUG(hdmi->dev, "mode :%d\n", mode);
+ DRM_DEV_DEBUG(hdmi->dev, "current_mode :%d\n", current_mode);
+
+ if (current_mode == mode)
+ return;
+
+ do {
+ if (current_mode > mode) {
+ next_mode = current_mode / 2;
+ } else {
+ if (current_mode < HDMI_SYS_POWER_MODE_A)
+ next_mode = HDMI_SYS_POWER_MODE_A;
+ else
+ next_mode = current_mode * 2;
+ }
+
+ DRM_DEV_DEBUG(hdmi->dev, "%d: next_mode :%d\n", i, next_mode);
+
+ if (next_mode != HDMI_SYS_POWER_MODE_D) {
+ hdmi_modb(hdmi, HDMI_SYS_CTRL,
+ HDMI_SYS_POWER_MODE_MASK, next_mode);
+ } else {
+ hdmi_writeb(hdmi, HDMI_SYS_CTRL,
+ HDMI_SYS_POWER_MODE_D |
+ HDMI_SYS_PLL_RESET_MASK);
+ usleep_range(90, 100);
+ hdmi_writeb(hdmi, HDMI_SYS_CTRL,
+ HDMI_SYS_POWER_MODE_D |
+ HDMI_SYS_PLLB_RESET);
+ usleep_range(90, 100);
+ hdmi_writeb(hdmi, HDMI_SYS_CTRL,
+ HDMI_SYS_POWER_MODE_D);
+ }
+ current_mode = next_mode;
+ i = i + 1;
+ } while ((next_mode != mode) && (i < 5));
+
+ /*
+ * When the IP controller isn't configured with accurate video timing,
+ * DDC_CLK should be equal to the PLLA frequency, which is 30MHz,
+ * so we need to init the TMDS rate to the PCLK rate and reconfigure
+ * the DDC clock.
+ */
+ if (mode < HDMI_SYS_POWER_MODE_D)
+ hdmi->tmdsclk = DEFAULT_PLLA_RATE;
+}
+
+static int
+rk3066_hdmi_upload_frame(struct rk3066_hdmi *hdmi, int setup_rc,
+ union hdmi_infoframe *frame, u32 frame_index,
+ u32 mask, u32 disable, u32 enable)
+{
+ if (mask)
+ hdmi_modb(hdmi, HDMI_CP_AUTO_SEND_CTRL, mask, disable);
+
+ hdmi_writeb(hdmi, HDMI_CP_BUF_INDEX, frame_index);
+
+ if (setup_rc >= 0) {
+ u8 packed_frame[HDMI_MAXIMUM_INFO_FRAME_SIZE];
+ ssize_t rc, i;
+
+ rc = hdmi_infoframe_pack(frame, packed_frame,
+ sizeof(packed_frame));
+ if (rc < 0)
+ return rc;
+
+ for (i = 0; i < rc; i++)
+ hdmi_writeb(hdmi, HDMI_CP_BUF_ACC_HB0 + i * 4,
+ packed_frame[i]);
+
+ if (mask)
+ hdmi_modb(hdmi, HDMI_CP_AUTO_SEND_CTRL, mask, enable);
+ }
+
+ return setup_rc;
+}
+
+static int rk3066_hdmi_config_avi(struct rk3066_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ union hdmi_infoframe frame;
+ int rc;
+
+ rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+ &hdmi->connector, mode);
+
+ if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV444)
+ frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
+ else if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV422)
+ frame.avi.colorspace = HDMI_COLORSPACE_YUV422;
+ else
+ frame.avi.colorspace = HDMI_COLORSPACE_RGB;
+
+ frame.avi.colorimetry = hdmi->hdmi_data.colorimetry;
+ frame.avi.scan_mode = HDMI_SCAN_MODE_NONE;
+
+ return rk3066_hdmi_upload_frame(hdmi, rc, &frame,
+ HDMI_INFOFRAME_AVI, 0, 0, 0);
+}
+
+static int rk3066_hdmi_config_video_timing(struct rk3066_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ int value, vsync_offset;
+
+ /* Set the details for the external polarity and interlace mode. */
+ value = HDMI_EXT_VIDEO_SET_EN;
+ value |= mode->flags & DRM_MODE_FLAG_PHSYNC ?
+ HDMI_VIDEO_HSYNC_ACTIVE_HIGH : HDMI_VIDEO_HSYNC_ACTIVE_LOW;
+ value |= mode->flags & DRM_MODE_FLAG_PVSYNC ?
+ HDMI_VIDEO_VSYNC_ACTIVE_HIGH : HDMI_VIDEO_VSYNC_ACTIVE_LOW;
+ value |= mode->flags & DRM_MODE_FLAG_INTERLACE ?
+ HDMI_VIDEO_MODE_INTERLACE : HDMI_VIDEO_MODE_PROGRESSIVE;
+
+ if (hdmi->hdmi_data.vic == 2 || hdmi->hdmi_data.vic == 3)
+ vsync_offset = 6;
+ else
+ vsync_offset = 0;
+
+ value |= vsync_offset << HDMI_VIDEO_VSYNC_OFFSET_SHIFT;
+ hdmi_writeb(hdmi, HDMI_EXT_VIDEO_PARA, value);
+
+ /* Set the details for the external video timing. */
+ value = mode->htotal;
+ hdmi_writeb(hdmi, HDMI_EXT_HTOTAL_L, value & 0xFF);
+ hdmi_writeb(hdmi, HDMI_EXT_HTOTAL_H, (value >> 8) & 0xFF);
+
+ value = mode->htotal - mode->hdisplay;
+ hdmi_writeb(hdmi, HDMI_EXT_HBLANK_L, value & 0xFF);
+ hdmi_writeb(hdmi, HDMI_EXT_HBLANK_H, (value >> 8) & 0xFF);
+
+ value = mode->htotal - mode->hsync_start;
+ hdmi_writeb(hdmi, HDMI_EXT_HDELAY_L, value & 0xFF);
+ hdmi_writeb(hdmi, HDMI_EXT_HDELAY_H, (value >> 8) & 0xFF);
+
+ value = mode->hsync_end - mode->hsync_start;
+ hdmi_writeb(hdmi, HDMI_EXT_HDURATION_L, value & 0xFF);
+ hdmi_writeb(hdmi, HDMI_EXT_HDURATION_H, (value >> 8) & 0xFF);
+
+ value = mode->vtotal;
+ hdmi_writeb(hdmi, HDMI_EXT_VTOTAL_L, value & 0xFF);
+ hdmi_writeb(hdmi, HDMI_EXT_VTOTAL_H, (value >> 8) & 0xFF);
+
+ value = mode->vtotal - mode->vdisplay;
+ hdmi_writeb(hdmi, HDMI_EXT_VBLANK_L, value & 0xFF);
+
+ value = mode->vtotal - mode->vsync_start + vsync_offset;
+ hdmi_writeb(hdmi, HDMI_EXT_VDELAY, value & 0xFF);
+
+ value = mode->vsync_end - mode->vsync_start;
+ hdmi_writeb(hdmi, HDMI_EXT_VDURATION, value & 0xFF);
+
+ return 0;
+}
+
+static void
+rk3066_hdmi_phy_write(struct rk3066_hdmi *hdmi, u16 offset, u8 value)
+{
+ hdmi_writeb(hdmi, offset, value);
+ hdmi_modb(hdmi, HDMI_SYS_CTRL,
+ HDMI_SYS_PLL_RESET_MASK, HDMI_SYS_PLL_RESET);
+ usleep_range(90, 100);
+ hdmi_modb(hdmi, HDMI_SYS_CTRL, HDMI_SYS_PLL_RESET_MASK, 0);
+ usleep_range(900, 1000);
+}
+
+static void rk3066_hdmi_config_phy(struct rk3066_hdmi *hdmi)
+{
+ /* TMDS uses the same frequency as dclk. */
+ hdmi_writeb(hdmi, HDMI_DEEP_COLOR_MODE, 0x22);
+
+ /*
+ * The semi-public documentation does not describe the hdmi registers
+ * used by the function rk3066_hdmi_phy_write(), so we keep using
+ * these magic values for now.
+ */
+ if (hdmi->tmdsclk > 100000000) {
+ rk3066_hdmi_phy_write(hdmi, 0x158, 0x0E);
+ rk3066_hdmi_phy_write(hdmi, 0x15c, 0x00);
+ rk3066_hdmi_phy_write(hdmi, 0x160, 0x60);
+ rk3066_hdmi_phy_write(hdmi, 0x164, 0x00);
+ rk3066_hdmi_phy_write(hdmi, 0x168, 0xDA);
+ rk3066_hdmi_phy_write(hdmi, 0x16c, 0xA1);
+ rk3066_hdmi_phy_write(hdmi, 0x170, 0x0e);
+ rk3066_hdmi_phy_write(hdmi, 0x174, 0x22);
+ rk3066_hdmi_phy_write(hdmi, 0x178, 0x00);
+ } else if (hdmi->tmdsclk > 50000000) {
+ rk3066_hdmi_phy_write(hdmi, 0x158, 0x06);
+ rk3066_hdmi_phy_write(hdmi, 0x15c, 0x00);
+ rk3066_hdmi_phy_write(hdmi, 0x160, 0x60);
+ rk3066_hdmi_phy_write(hdmi, 0x164, 0x00);
+ rk3066_hdmi_phy_write(hdmi, 0x168, 0xCA);
+ rk3066_hdmi_phy_write(hdmi, 0x16c, 0xA3);
+ rk3066_hdmi_phy_write(hdmi, 0x170, 0x0e);
+ rk3066_hdmi_phy_write(hdmi, 0x174, 0x20);
+ rk3066_hdmi_phy_write(hdmi, 0x178, 0x00);
+ } else {
+ rk3066_hdmi_phy_write(hdmi, 0x158, 0x02);
+ rk3066_hdmi_phy_write(hdmi, 0x15c, 0x00);
+ rk3066_hdmi_phy_write(hdmi, 0x160, 0x60);
+ rk3066_hdmi_phy_write(hdmi, 0x164, 0x00);
+ rk3066_hdmi_phy_write(hdmi, 0x168, 0xC2);
+ rk3066_hdmi_phy_write(hdmi, 0x16c, 0xA2);
+ rk3066_hdmi_phy_write(hdmi, 0x170, 0x0e);
+ rk3066_hdmi_phy_write(hdmi, 0x174, 0x20);
+ rk3066_hdmi_phy_write(hdmi, 0x178, 0x00);
+ }
+}
+
+static int rk3066_hdmi_setup(struct rk3066_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ hdmi->hdmi_data.vic = drm_match_cea_mode(mode);
+ hdmi->hdmi_data.enc_out_format = HDMI_COLORSPACE_RGB;
+
+ if (hdmi->hdmi_data.vic == 6 || hdmi->hdmi_data.vic == 7 ||
+ hdmi->hdmi_data.vic == 21 || hdmi->hdmi_data.vic == 22 ||
+ hdmi->hdmi_data.vic == 2 || hdmi->hdmi_data.vic == 3 ||
+ hdmi->hdmi_data.vic == 17 || hdmi->hdmi_data.vic == 18)
+ hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601;
+ else
+ hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709;
+
+ hdmi->tmdsclk = mode->clock * 1000;
+
+ /* Mute video and audio output. */
+ hdmi_modb(hdmi, HDMI_VIDEO_CTRL2, HDMI_VIDEO_AUDIO_DISABLE_MASK,
+ HDMI_AUDIO_DISABLE | HDMI_VIDEO_DISABLE);
+
+ /* Set power state to mode B. */
+ if (rk3066_hdmi_get_power_mode(hdmi) != HDMI_SYS_POWER_MODE_B)
+ rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_B);
+
+ /* Input video mode is RGB 24 bit. Use external data enable signal. */
+ hdmi_modb(hdmi, HDMI_AV_CTRL1,
+ HDMI_VIDEO_DE_MASK, HDMI_VIDEO_EXTERNAL_DE);
+ hdmi_writeb(hdmi, HDMI_VIDEO_CTRL1,
+ HDMI_VIDEO_OUTPUT_RGB444 |
+ HDMI_VIDEO_INPUT_DATA_DEPTH_8BIT |
+ HDMI_VIDEO_INPUT_COLOR_RGB);
+ hdmi_writeb(hdmi, HDMI_DEEP_COLOR_MODE, 0x20);
+
+ rk3066_hdmi_config_video_timing(hdmi, mode);
+
+ if (hdmi->hdmi_data.sink_is_hdmi) {
+ hdmi_modb(hdmi, HDMI_HDCP_CTRL, HDMI_VIDEO_MODE_MASK,
+ HDMI_VIDEO_MODE_HDMI);
+ rk3066_hdmi_config_avi(hdmi, mode);
+ } else {
+ hdmi_modb(hdmi, HDMI_HDCP_CTRL, HDMI_VIDEO_MODE_MASK, 0);
+ }
+
+ rk3066_hdmi_config_phy(hdmi);
+
+ rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_E);
+
+ /*
+ * When the IP controller is configured with accurate video
+ * timing, the TMDS clock source should be switched to
+ * DCLK_LCDC, so we need to init the TMDS rate to the pixel mode
+ * clock rate and reconfigure the DDC clock.
+ */
+ rk3066_hdmi_i2c_init(hdmi);
+
+ /* Unmute video output. */
+ hdmi_modb(hdmi, HDMI_VIDEO_CTRL2,
+ HDMI_VIDEO_AUDIO_DISABLE_MASK, HDMI_AUDIO_DISABLE);
+ return 0;
+}
+
+static void
+rk3066_hdmi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct rk3066_hdmi *hdmi = to_rk3066_hdmi(encoder);
+
+ /* Store the display mode for plugin/DPMS poweron events. */
+ memcpy(&hdmi->previous_mode, adj_mode, sizeof(hdmi->previous_mode));
+}
+
+static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct rk3066_hdmi *hdmi = to_rk3066_hdmi(encoder);
+ int mux, val;
+
+ mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder);
+ if (mux)
+ val = (HDMI_VIDEO_SEL << 16) | HDMI_VIDEO_SEL;
+ else
+ val = HDMI_VIDEO_SEL << 16;
+
+ regmap_write(hdmi->grf_regmap, GRF_SOC_CON0, val);
+
+ DRM_DEV_DEBUG(hdmi->dev, "hdmi encoder enable select: vop%s\n",
+ (mux) ? "1" : "0");
+
+ rk3066_hdmi_setup(hdmi, &hdmi->previous_mode);
+}
+
+static void rk3066_hdmi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct rk3066_hdmi *hdmi = to_rk3066_hdmi(encoder);
+
+ DRM_DEV_DEBUG(hdmi->dev, "hdmi encoder disable\n");
+
+ if (rk3066_hdmi_get_power_mode(hdmi) == HDMI_SYS_POWER_MODE_E) {
+ hdmi_writeb(hdmi, HDMI_VIDEO_CTRL2,
+ HDMI_VIDEO_AUDIO_DISABLE_MASK);
+ hdmi_modb(hdmi, HDMI_VIDEO_CTRL2,
+ HDMI_AUDIO_CP_LOGIC_RESET_MASK,
+ HDMI_AUDIO_CP_LOGIC_RESET);
+ usleep_range(500, 510);
+ }
+ rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_A);
+}
+
+static bool
+rk3066_hdmi_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ return true;
+}
+
+static int
+rk3066_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+
+ s->output_mode = ROCKCHIP_OUT_MODE_P888;
+ s->output_type = DRM_MODE_CONNECTOR_HDMIA;
+
+ return 0;
+}
+
+static const
+struct drm_encoder_helper_funcs rk3066_hdmi_encoder_helper_funcs = {
+ .enable = rk3066_hdmi_encoder_enable,
+ .disable = rk3066_hdmi_encoder_disable,
+ .mode_fixup = rk3066_hdmi_encoder_mode_fixup,
+ .mode_set = rk3066_hdmi_encoder_mode_set,
+ .atomic_check = rk3066_hdmi_encoder_atomic_check,
+};
+
+static const struct drm_encoder_funcs rk3066_hdmi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static enum drm_connector_status
+rk3066_hdmi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct rk3066_hdmi *hdmi = to_rk3066_hdmi(connector);
+
+ return (hdmi_readb(hdmi, HDMI_HPG_MENS_STA) & HDMI_HPG_IN_STATUS_HIGH) ?
+ connector_status_connected : connector_status_disconnected;
+}
+
+static int rk3066_hdmi_connector_get_modes(struct drm_connector *connector)
+{
+ struct rk3066_hdmi *hdmi = to_rk3066_hdmi(connector);
+ struct edid *edid;
+ int ret = 0;
+
+ if (!hdmi->ddc)
+ return 0;
+
+ edid = drm_get_edid(connector, hdmi->ddc);
+ if (edid) {
+ hdmi->hdmi_data.sink_is_hdmi = drm_detect_hdmi_monitor(edid);
+ drm_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ return ret;
+}
+
+static enum drm_mode_status
+rk3066_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ u32 vic = drm_match_cea_mode(mode);
+
+ if (vic > 1)
+ return MODE_OK;
+ else
+ return MODE_BAD;
+}
+
+static struct drm_encoder *
+rk3066_hdmi_connector_best_encoder(struct drm_connector *connector)
+{
+ struct rk3066_hdmi *hdmi = to_rk3066_hdmi(connector);
+
+ return &hdmi->encoder;
+}
+
+static int
+rk3066_hdmi_probe_single_connector_modes(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY)
+{
+ if (maxX > 1920)
+ maxX = 1920;
+ if (maxY > 1080)
+ maxY = 1080;
+
+ return drm_helper_probe_single_connector_modes(connector, maxX, maxY);
+}
+
+static void rk3066_hdmi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs rk3066_hdmi_connector_funcs = {
+ .fill_modes = rk3066_hdmi_probe_single_connector_modes,
+ .detect = rk3066_hdmi_connector_detect,
+ .destroy = rk3066_hdmi_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const
+struct drm_connector_helper_funcs rk3066_hdmi_connector_helper_funcs = {
+ .get_modes = rk3066_hdmi_connector_get_modes,
+ .mode_valid = rk3066_hdmi_connector_mode_valid,
+ .best_encoder = rk3066_hdmi_connector_best_encoder,
+};
+
+static int
+rk3066_hdmi_register(struct drm_device *drm, struct rk3066_hdmi *hdmi)
+{
+ struct drm_encoder *encoder = &hdmi->encoder;
+ struct device *dev = hdmi->dev;
+
+ encoder->possible_crtcs =
+ drm_of_find_possible_crtcs(drm, dev->of_node);
+
+ /*
+ * If we failed to find the CRTC(s) which this encoder is
+ * supposed to be connected to, it's because the CRTC has
+ * not been registered yet. Defer probing, and hope that
+ * the required CRTC is added later.
+ */
+ if (encoder->possible_crtcs == 0)
+ return -EPROBE_DEFER;
+
+ drm_encoder_helper_add(encoder, &rk3066_hdmi_encoder_helper_funcs);
+ drm_encoder_init(drm, encoder, &rk3066_hdmi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+
+ hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+ drm_connector_helper_add(&hdmi->connector,
+ &rk3066_hdmi_connector_helper_funcs);
+ drm_connector_init(drm, &hdmi->connector,
+ &rk3066_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+
+ drm_connector_attach_encoder(&hdmi->connector, encoder);
+
+ return 0;
+}
+
+static irqreturn_t rk3066_hdmi_hardirq(int irq, void *dev_id)
+{
+ struct rk3066_hdmi *hdmi = dev_id;
+ irqreturn_t ret = IRQ_NONE;
+ u8 interrupt;
+
+ if (rk3066_hdmi_get_power_mode(hdmi) == HDMI_SYS_POWER_MODE_A)
+ hdmi_writeb(hdmi, HDMI_SYS_CTRL, HDMI_SYS_POWER_MODE_B);
+
+ interrupt = hdmi_readb(hdmi, HDMI_INTR_STATUS1);
+ if (interrupt)
+ hdmi_writeb(hdmi, HDMI_INTR_STATUS1, interrupt);
+
+ if (interrupt & HDMI_INTR_EDID_MASK) {
+ hdmi->i2c->stat = interrupt;
+ complete(&hdmi->i2c->cmpltn);
+ }
+
+ if (interrupt & (HDMI_INTR_HOTPLUG | HDMI_INTR_MSENS))
+ ret = IRQ_WAKE_THREAD;
+
+ return ret;
+}
+
+static irqreturn_t rk3066_hdmi_irq(int irq, void *dev_id)
+{
+ struct rk3066_hdmi *hdmi = dev_id;
+
+ drm_helper_hpd_irq_event(hdmi->connector.dev);
+
+ return IRQ_HANDLED;
+}
+
+static int rk3066_hdmi_i2c_read(struct rk3066_hdmi *hdmi, struct i2c_msg *msgs)
+{
+ int length = msgs->len;
+ u8 *buf = msgs->buf;
+ int ret;
+
+ ret = wait_for_completion_timeout(&hdmi->i2c->cmpltn, HZ / 10);
+ if (!ret || hdmi->i2c->stat & HDMI_INTR_EDID_ERR)
+ return -EAGAIN;
+
+ while (length--)
+ *buf++ = hdmi_readb(hdmi, HDMI_DDC_READ_FIFO_ADDR);
+
+ return 0;
+}
+
+static int rk3066_hdmi_i2c_write(struct rk3066_hdmi *hdmi, struct i2c_msg *msgs)
+{
+ /*
+ * The DDC module only supports read EDID message, so
+ * we assume that each word write to this i2c adapter
+ * should be the offset of the EDID word address.
+ */
+ if (msgs->len != 1 ||
+ (msgs->addr != DDC_ADDR && msgs->addr != DDC_SEGMENT_ADDR))
+ return -EINVAL;
+
+ reinit_completion(&hdmi->i2c->cmpltn);
+
+ if (msgs->addr == DDC_SEGMENT_ADDR)
+ hdmi->i2c->segment_addr = msgs->buf[0];
+ if (msgs->addr == DDC_ADDR)
+ hdmi->i2c->ddc_addr = msgs->buf[0];
+
+ /* Set edid word address 0x00/0x80. */
+ hdmi_writeb(hdmi, HDMI_EDID_WORD_ADDR, hdmi->i2c->ddc_addr);
+
+ /* Set edid segment pointer. */
+ hdmi_writeb(hdmi, HDMI_EDID_SEGMENT_POINTER, hdmi->i2c->segment_addr);
+
+ return 0;
+}
+
+static int rk3066_hdmi_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct rk3066_hdmi *hdmi = i2c_get_adapdata(adap);
+ struct rk3066_hdmi_i2c *i2c = hdmi->i2c;
+ int i, ret = 0;
+
+ mutex_lock(&i2c->i2c_lock);
+
+ rk3066_hdmi_i2c_init(hdmi);
+
+ /* Unmute HDMI EDID interrupt. */
+ hdmi_modb(hdmi, HDMI_INTR_MASK1,
+ HDMI_INTR_EDID_MASK, HDMI_INTR_EDID_MASK);
+ i2c->stat = 0;
+
+ for (i = 0; i < num; i++) {
+ DRM_DEV_DEBUG(hdmi->dev,
+ "xfer: num: %d/%d, len: %d, flags: %#x\n",
+ i + 1, num, msgs[i].len, msgs[i].flags);
+
+ if (msgs[i].flags & I2C_M_RD)
+ ret = rk3066_hdmi_i2c_read(hdmi, &msgs[i]);
+ else
+ ret = rk3066_hdmi_i2c_write(hdmi, &msgs[i]);
+
+ if (ret < 0)
+ break;
+ }
+
+ if (!ret)
+ ret = num;
+
+ /* Mute HDMI EDID interrupt. */
+ hdmi_modb(hdmi, HDMI_INTR_MASK1, HDMI_INTR_EDID_MASK, 0);
+
+ mutex_unlock(&i2c->i2c_lock);
+
+ return ret;
+}
+
+static u32 rk3066_hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm rk3066_hdmi_algorithm = {
+ .master_xfer = rk3066_hdmi_i2c_xfer,
+ .functionality = rk3066_hdmi_i2c_func,
+};
+
+static struct i2c_adapter *rk3066_hdmi_i2c_adapter(struct rk3066_hdmi *hdmi)
+{
+ struct i2c_adapter *adap;
+ struct rk3066_hdmi_i2c *i2c;
+ int ret;
+
+ i2c = devm_kzalloc(hdmi->dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&i2c->i2c_lock);
+ init_completion(&i2c->cmpltn);
+
+ adap = &i2c->adap;
+ adap->class = I2C_CLASS_DDC;
+ adap->owner = THIS_MODULE;
+ adap->dev.parent = hdmi->dev;
+ adap->dev.of_node = hdmi->dev->of_node;
+ adap->algo = &rk3066_hdmi_algorithm;
+ strlcpy(adap->name, "RK3066 HDMI", sizeof(adap->name));
+ i2c_set_adapdata(adap, hdmi);
+
+ ret = i2c_add_adapter(adap);
+ if (ret) {
+ DRM_DEV_ERROR(hdmi->dev, "cannot add %s I2C adapter\n",
+ adap->name);
+ devm_kfree(hdmi->dev, i2c);
+ return ERR_PTR(ret);
+ }
+
+ hdmi->i2c = i2c;
+
+ DRM_DEV_DEBUG(hdmi->dev, "registered %s I2C bus driver\n", adap->name);
+
+ return adap;
+}
+
+static int rk3066_hdmi_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = data;
+ struct rk3066_hdmi *hdmi;
+ struct resource *iores;
+ int irq;
+ int ret;
+
+ hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ hdmi->dev = dev;
+ hdmi->drm_dev = drm;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iores)
+ return -ENXIO;
+
+ hdmi->regs = devm_ioremap_resource(dev, iores);
+ if (IS_ERR(hdmi->regs))
+ return PTR_ERR(hdmi->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ hdmi->hclk = devm_clk_get(dev, "hclk");
+ if (IS_ERR(hdmi->hclk)) {
+ DRM_DEV_ERROR(dev, "unable to get HDMI hclk clock\n");
+ return PTR_ERR(hdmi->hclk);
+ }
+
+ ret = clk_prepare_enable(hdmi->hclk);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "cannot enable HDMI hclk clock: %d\n", ret);
+ return ret;
+ }
+
+ hdmi->grf_regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,grf");
+ if (IS_ERR(hdmi->grf_regmap)) {
+ DRM_DEV_ERROR(dev, "unable to get rockchip,grf\n");
+ ret = PTR_ERR(hdmi->grf_regmap);
+ goto err_disable_hclk;
+ }
+
+ /* internal hclk = hdmi_hclk / 25 */
+ hdmi_writeb(hdmi, HDMI_INTERNAL_CLK_DIVIDER, 25);
+
+ hdmi->ddc = rk3066_hdmi_i2c_adapter(hdmi);
+ if (IS_ERR(hdmi->ddc)) {
+ ret = PTR_ERR(hdmi->ddc);
+ hdmi->ddc = NULL;
+ goto err_disable_hclk;
+ }
+
+ rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_B);
+ usleep_range(999, 1000);
+ hdmi_writeb(hdmi, HDMI_INTR_MASK1, HDMI_INTR_HOTPLUG);
+ hdmi_writeb(hdmi, HDMI_INTR_MASK2, 0);
+ hdmi_writeb(hdmi, HDMI_INTR_MASK3, 0);
+ hdmi_writeb(hdmi, HDMI_INTR_MASK4, 0);
+ rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_A);
+
+ ret = rk3066_hdmi_register(drm, hdmi);
+ if (ret)
+ goto err_disable_i2c;
+
+ dev_set_drvdata(dev, hdmi);
+
+ ret = devm_request_threaded_irq(dev, irq, rk3066_hdmi_hardirq,
+ rk3066_hdmi_irq, IRQF_SHARED,
+ dev_name(dev), hdmi);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to request hdmi irq: %d\n", ret);
+ goto err_cleanup_hdmi;
+ }
+
+ return 0;
+
+err_cleanup_hdmi:
+ hdmi->connector.funcs->destroy(&hdmi->connector);
+ hdmi->encoder.funcs->destroy(&hdmi->encoder);
+err_disable_i2c:
+ i2c_put_adapter(hdmi->ddc);
+err_disable_hclk:
+ clk_disable_unprepare(hdmi->hclk);
+
+ return ret;
+}
+
+static void rk3066_hdmi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct rk3066_hdmi *hdmi = dev_get_drvdata(dev);
+
+ hdmi->connector.funcs->destroy(&hdmi->connector);
+ hdmi->encoder.funcs->destroy(&hdmi->encoder);
+
+ i2c_put_adapter(hdmi->ddc);
+ clk_disable_unprepare(hdmi->hclk);
+}
+
+static const struct component_ops rk3066_hdmi_ops = {
+ .bind = rk3066_hdmi_bind,
+ .unbind = rk3066_hdmi_unbind,
+};
+
+static int rk3066_hdmi_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &rk3066_hdmi_ops);
+}
+
+static int rk3066_hdmi_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &rk3066_hdmi_ops);
+
+ return 0;
+}
+
+static const struct of_device_id rk3066_hdmi_dt_ids[] = {
+ { .compatible = "rockchip,rk3066-hdmi" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rk3066_hdmi_dt_ids);
+
+struct platform_driver rk3066_hdmi_driver = {
+ .probe = rk3066_hdmi_probe,
+ .remove = rk3066_hdmi_remove,
+ .driver = {
+ .name = "rockchip-rk3066-hdmi",
+ .of_match_table = rk3066_hdmi_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.h b/drivers/gpu/drm/rockchip/rk3066_hdmi.h
new file mode 100644
index 000000000000..39a31c62a428
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Zheng Yang <zhengyang@rock-chips.com>
+ */
+
+#ifndef __RK3066_HDMI_H__
+#define __RK3066_HDMI_H__
+
+#define GRF_SOC_CON0 0x150
+#define HDMI_VIDEO_SEL BIT(14)
+
+#define DDC_SEGMENT_ADDR 0x30
+#define HDMI_SCL_RATE (50 * 1000)
+#define HDMI_MAXIMUM_INFO_FRAME_SIZE 0x11
+
+#define N_32K 0x1000
+#define N_441K 0x1880
+#define N_882K 0x3100
+#define N_1764K 0x6200
+#define N_48K 0x1800
+#define N_96K 0x3000
+#define N_192K 0x6000
+
+#define HDMI_SYS_CTRL 0x000
+#define HDMI_LR_SWAP_N3 0x004
+#define HDMI_N2 0x008
+#define HDMI_N1 0x00c
+#define HDMI_SPDIF_FS_CTS_INT3 0x010
+#define HDMI_CTS_INT2 0x014
+#define HDMI_CTS_INT1 0x018
+#define HDMI_CTS_EXT3 0x01c
+#define HDMI_CTS_EXT2 0x020
+#define HDMI_CTS_EXT1 0x024
+#define HDMI_AUDIO_CTRL1 0x028
+#define HDMI_AUDIO_CTRL2 0x02c
+#define HDMI_I2S_AUDIO_CTRL 0x030
+#define HDMI_I2S_SWAP 0x040
+#define HDMI_AUDIO_STA_BIT_CTRL1 0x044
+#define HDMI_AUDIO_STA_BIT_CTRL2 0x048
+#define HDMI_AUDIO_SRC_NUM_AND_LENGTH 0x050
+#define HDMI_AV_CTRL1 0x054
+#define HDMI_VIDEO_CTRL1 0x058
+#define HDMI_DEEP_COLOR_MODE 0x05c
+
+#define HDMI_EXT_VIDEO_PARA 0x0c0
+#define HDMI_EXT_HTOTAL_L 0x0c4
+#define HDMI_EXT_HTOTAL_H 0x0c8
+#define HDMI_EXT_HBLANK_L 0x0cc
+#define HDMI_EXT_HBLANK_H 0x0d0
+#define HDMI_EXT_HDELAY_L 0x0d4
+#define HDMI_EXT_HDELAY_H 0x0d8
+#define HDMI_EXT_HDURATION_L 0x0dc
+#define HDMI_EXT_HDURATION_H 0x0e0
+#define HDMI_EXT_VTOTAL_L 0x0e4
+#define HDMI_EXT_VTOTAL_H 0x0e8
+#define HDMI_AV_CTRL2 0x0ec
+#define HDMI_EXT_VBLANK_L 0x0f4
+#define HDMI_EXT_VBLANK_H 0x10c
+#define HDMI_EXT_VDELAY 0x0f8
+#define HDMI_EXT_VDURATION 0x0fc
+
+#define HDMI_CP_MANU_SEND_CTRL 0x100
+#define HDMI_CP_AUTO_SEND_CTRL 0x104
+#define HDMI_AUTO_CHECKSUM_OPT 0x108
+
+#define HDMI_VIDEO_CTRL2 0x114
+
+#define HDMI_PHY_OPTION 0x144
+
+#define HDMI_CP_BUF_INDEX 0x17c
+#define HDMI_CP_BUF_ACC_HB0 0x180
+#define HDMI_CP_BUF_ACC_HB1 0x184
+#define HDMI_CP_BUF_ACC_HB2 0x188
+#define HDMI_CP_BUF_ACC_PB0 0x18c
+
+#define HDMI_DDC_READ_FIFO_ADDR 0x200
+#define HDMI_DDC_BUS_FREQ_L 0x204
+#define HDMI_DDC_BUS_FREQ_H 0x208
+#define HDMI_DDC_BUS_CTRL 0x2dc
+#define HDMI_DDC_I2C_LEN 0x278
+#define HDMI_DDC_I2C_OFFSET 0x280
+#define HDMI_DDC_I2C_CTRL 0x284
+#define HDMI_DDC_I2C_READ_BUF0 0x288
+#define HDMI_DDC_I2C_READ_BUF1 0x28c
+#define HDMI_DDC_I2C_READ_BUF2 0x290
+#define HDMI_DDC_I2C_READ_BUF3 0x294
+#define HDMI_DDC_I2C_WRITE_BUF0 0x298
+#define HDMI_DDC_I2C_WRITE_BUF1 0x29c
+#define HDMI_DDC_I2C_WRITE_BUF2 0x2a0
+#define HDMI_DDC_I2C_WRITE_BUF3 0x2a4
+#define HDMI_DDC_I2C_WRITE_BUF4 0x2ac
+#define HDMI_DDC_I2C_WRITE_BUF5 0x2b0
+#define HDMI_DDC_I2C_WRITE_BUF6 0x2b4
+
+#define HDMI_INTR_MASK1 0x248
+#define HDMI_INTR_MASK2 0x24c
+#define HDMI_INTR_STATUS1 0x250
+#define HDMI_INTR_STATUS2 0x254
+#define HDMI_INTR_MASK3 0x258
+#define HDMI_INTR_MASK4 0x25c
+#define HDMI_INTR_STATUS3 0x260
+#define HDMI_INTR_STATUS4 0x264
+
+#define HDMI_HDCP_CTRL 0x2bc
+
+#define HDMI_EDID_SEGMENT_POINTER 0x310
+#define HDMI_EDID_WORD_ADDR 0x314
+#define HDMI_EDID_FIFO_ADDR 0x318
+
+#define HDMI_HPG_MENS_STA 0x37c
+
+#define HDMI_INTERNAL_CLK_DIVIDER 0x800
+
+enum {
+ /* HDMI_SYS_CTRL */
+ HDMI_SYS_POWER_MODE_MASK = 0xf0,
+ HDMI_SYS_POWER_MODE_A = 0x10,
+ HDMI_SYS_POWER_MODE_B = 0x20,
+ HDMI_SYS_POWER_MODE_D = 0x40,
+ HDMI_SYS_POWER_MODE_E = 0x80,
+ HDMI_SYS_PLL_RESET_MASK = 0x0c,
+ HDMI_SYS_PLL_RESET = 0x0c,
+ HDMI_SYS_PLLB_RESET = 0x08,
+
+ /* HDMI_LR_SWAP_N3 */
+ HDMI_AUDIO_LR_SWAP_MASK = 0xf0,
+ HDMI_AUDIO_LR_SWAP_SUBPACKET0 = 0x10,
+ HDMI_AUDIO_LR_SWAP_SUBPACKET1 = 0x20,
+ HDMI_AUDIO_LR_SWAP_SUBPACKET2 = 0x40,
+ HDMI_AUDIO_LR_SWAP_SUBPACKET3 = 0x80,
+ HDMI_AUDIO_N_19_16_MASK = 0x0f,
+
+ /* HDMI_AUDIO_CTRL1 */
+ HDMI_AUDIO_EXTERNAL_CTS = BIT(7),
+ HDMI_AUDIO_INPUT_IIS = 0,
+ HDMI_AUDIO_INPUT_SPDIF = 0x08,
+ HDMI_AUDIO_INPUT_MCLK_ACTIVE = 0x04,
+ HDMI_AUDIO_INPUT_MCLK_DEACTIVE = 0,
+ HDMI_AUDIO_INPUT_MCLK_RATE_128X = 0,
+ HDMI_AUDIO_INPUT_MCLK_RATE_256X = 1,
+ HDMI_AUDIO_INPUT_MCLK_RATE_384X = 2,
+ HDMI_AUDIO_INPUT_MCLK_RATE_512X = 3,
+
+ /* HDMI_I2S_AUDIO_CTRL */
+ HDMI_AUDIO_I2S_FORMAT_STANDARD = 0,
+ HDMI_AUDIO_I2S_CHANNEL_1_2 = 0x04,
+ HDMI_AUDIO_I2S_CHANNEL_3_4 = 0x0c,
+ HDMI_AUDIO_I2S_CHANNEL_5_6 = 0x1c,
+ HDMI_AUDIO_I2S_CHANNEL_7_8 = 0x3c,
+
+ /* HDMI_AV_CTRL1 */
+ HDMI_AUDIO_SAMPLE_FRE_MASK = 0xf0,
+ HDMI_AUDIO_SAMPLE_FRE_32000 = 0x30,
+ HDMI_AUDIO_SAMPLE_FRE_44100 = 0,
+ HDMI_AUDIO_SAMPLE_FRE_48000 = 0x20,
+ HDMI_AUDIO_SAMPLE_FRE_88200 = 0x80,
+ HDMI_AUDIO_SAMPLE_FRE_96000 = 0xa0,
+ HDMI_AUDIO_SAMPLE_FRE_176400 = 0xc0,
+ HDMI_AUDIO_SAMPLE_FRE_192000 = 0xe0,
+ HDMI_AUDIO_SAMPLE_FRE_768000 = 0x90,
+
+ HDMI_VIDEO_INPUT_FORMAT_MASK = 0x0e,
+ HDMI_VIDEO_INPUT_RGB_YCBCR444 = 0,
+ HDMI_VIDEO_INPUT_YCBCR422 = 0x02,
+ HDMI_VIDEO_DE_MASK = 0x1,
+ HDMI_VIDEO_INTERNAL_DE = 0,
+ HDMI_VIDEO_EXTERNAL_DE = 0x01,
+
+ /* HDMI_VIDEO_CTRL1 */
+ HDMI_VIDEO_OUTPUT_FORMAT_MASK = 0xc0,
+ HDMI_VIDEO_OUTPUT_RGB444 = 0,
+ HDMI_VIDEO_OUTPUT_YCBCR444 = 0x40,
+ HDMI_VIDEO_OUTPUT_YCBCR422 = 0x80,
+ HDMI_VIDEO_INPUT_DATA_DEPTH_MASK = 0x30,
+ HDMI_VIDEO_INPUT_DATA_DEPTH_12BIT = 0,
+ HDMI_VIDEO_INPUT_DATA_DEPTH_10BIT = 0x10,
+ HDMI_VIDEO_INPUT_DATA_DEPTH_8BIT = 0x30,
+ HDMI_VIDEO_INPUT_COLOR_MASK = 1,
+ HDMI_VIDEO_INPUT_COLOR_RGB = 0,
+ HDMI_VIDEO_INPUT_COLOR_YCBCR = 1,
+
+ /* HDMI_EXT_VIDEO_PARA */
+ HDMI_VIDEO_VSYNC_OFFSET_SHIFT = 4,
+ HDMI_VIDEO_VSYNC_ACTIVE_HIGH = BIT(3),
+ HDMI_VIDEO_VSYNC_ACTIVE_LOW = 0,
+ HDMI_VIDEO_HSYNC_ACTIVE_HIGH = BIT(2),
+ HDMI_VIDEO_HSYNC_ACTIVE_LOW = 0,
+ HDMI_VIDEO_MODE_INTERLACE = BIT(1),
+ HDMI_VIDEO_MODE_PROGRESSIVE = 0,
+ HDMI_EXT_VIDEO_SET_EN = BIT(0),
+
+ /* HDMI_CP_AUTO_SEND_CTRL */
+
+ /* HDMI_VIDEO_CTRL2 */
+ HDMI_VIDEO_AV_MUTE_MASK = 0xc0,
+ HDMI_VIDEO_CLR_AV_MUTE = BIT(7),
+ HDMI_VIDEO_SET_AV_MUTE = BIT(6),
+ HDMI_AUDIO_CP_LOGIC_RESET_MASK = BIT(2),
+ HDMI_AUDIO_CP_LOGIC_RESET = BIT(2),
+ HDMI_VIDEO_AUDIO_DISABLE_MASK = 0x3,
+ HDMI_AUDIO_DISABLE = BIT(1),
+ HDMI_VIDEO_DISABLE = BIT(0),
+
+ /* HDMI_CP_BUF_INDEX */
+ HDMI_INFOFRAME_VSI = 0x05,
+ HDMI_INFOFRAME_AVI = 0x06,
+ HDMI_INFOFRAME_AAI = 0x08,
+
+ /* HDMI_INTR_MASK1 */
+ /* HDMI_INTR_STATUS1 */
+ HDMI_INTR_HOTPLUG = BIT(7),
+ HDMI_INTR_MSENS = BIT(6),
+ HDMI_INTR_VSYNC = BIT(5),
+ HDMI_INTR_AUDIO_FIFO_FULL = BIT(4),
+ HDMI_INTR_EDID_MASK = 0x6,
+ HDMI_INTR_EDID_READY = BIT(2),
+ HDMI_INTR_EDID_ERR = BIT(1),
+
+ /* HDMI_HDCP_CTRL */
+ HDMI_VIDEO_MODE_MASK = BIT(1),
+ HDMI_VIDEO_MODE_HDMI = BIT(1),
+
+ /* HDMI_HPG_MENS_STA */
+ HDMI_HPG_IN_STATUS_HIGH = BIT(7),
+ HDMI_MSENS_IN_STATUS_HIGH = BIT(6),
+};
+
+#endif /* __RK3066_HDMI_H__ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index d7fa17f12769..cb938d3cd3c2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -448,6 +448,14 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
return 0;
}
+static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ if (drm)
+ drm_atomic_helper_shutdown(drm);
+}
+
static const struct of_device_id rockchip_drm_dt_ids[] = {
{ .compatible = "rockchip,display-subsystem", },
{ /* sentinel */ },
@@ -457,6 +465,7 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
static struct platform_driver rockchip_drm_platform_driver = {
.probe = rockchip_drm_platform_probe,
.remove = rockchip_drm_platform_remove,
+ .shutdown = rockchip_drm_platform_shutdown,
.driver = {
.name = "rockchip-drm",
.of_match_table = rockchip_drm_dt_ids,
@@ -486,6 +495,8 @@ static int __init rockchip_drm_init(void)
ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi_rockchip_driver,
CONFIG_ROCKCHIP_DW_MIPI_DSI);
ADD_ROCKCHIP_SUB_DRIVER(inno_hdmi_driver, CONFIG_ROCKCHIP_INNO_HDMI);
+ ADD_ROCKCHIP_SUB_DRIVER(rk3066_hdmi_driver,
+ CONFIG_ROCKCHIP_RK3066_HDMI);
ret = platform_register_drivers(rockchip_sub_drivers,
num_rockchip_sub_drivers);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index ce48568ec8a0..e4bc4322bc3f 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -73,4 +73,5 @@ extern struct platform_driver inno_hdmi_driver;
extern struct platform_driver rockchip_dp_driver;
extern struct platform_driver rockchip_lvds_driver;
extern struct platform_driver vop_platform_driver;
+extern struct platform_driver rk3066_hdmi_driver;
#endif /* _ROCKCHIP_DRM_DRV_H_ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index 8ce68bd508be..30459de66b67 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -90,12 +90,10 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
goto out;
}
- fbi->par = helper;
fbi->fbops = &rockchip_drm_fbdev_ops;
fb = helper->fb;
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(fbi, helper, sizes);
offset = fbi->var.xoffset * bytes_per_pixel;
offset += fbi->var.yoffset * fb->pitches[0];
@@ -110,8 +108,6 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
rk_obj->kvaddr,
offset, size);
- fbi->skip_vt_switch = true;
-
return 0;
out:
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index a8db758d523e..a2ebb08990e9 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -221,26 +221,13 @@ static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
- unsigned int i, count = obj->size >> PAGE_SHIFT;
+ unsigned int count = obj->size >> PAGE_SHIFT;
unsigned long user_count = vma_pages(vma);
- unsigned long uaddr = vma->vm_start;
- unsigned long offset = vma->vm_pgoff;
- unsigned long end = user_count + offset;
- int ret;
if (user_count == 0)
return -ENXIO;
- if (end > count)
- return -ENXIO;
- for (i = offset; i < end; i++) {
- ret = vm_insert_page(vma, uaddr, rk_obj->pages[i]);
- if (ret)
- return ret;
- uaddr += PAGE_SIZE;
- }
-
- return 0;
+ return vm_map_pages(vma, rk_obj->pages, count);
}
static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 0d4ade9d4722..20a9c296d027 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1041,6 +1041,7 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
u16 vact_end = vact_st + vdisplay;
uint32_t pin_pol, val;
+ int dither_bpc = s->output_bpc ? s->output_bpc : 10;
int ret;
mutex_lock(&vop->vop_lock);
@@ -1098,11 +1099,19 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
!(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10))
s->output_mode = ROCKCHIP_OUT_MODE_P888;
- if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA && s->output_bpc == 8)
+ if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA && dither_bpc <= 8)
VOP_REG_SET(vop, common, pre_dither_down, 1);
else
VOP_REG_SET(vop, common, pre_dither_down, 0);
+ if (dither_bpc == 6) {
+ VOP_REG_SET(vop, common, dither_down_sel, DITHER_DOWN_ALLEGRO);
+ VOP_REG_SET(vop, common, dither_down_mode, RGB888_TO_RGB666);
+ VOP_REG_SET(vop, common, dither_down_en, 1);
+ } else {
+ VOP_REG_SET(vop, common, dither_down_en, 0);
+ }
+
VOP_REG_SET(vop, common, out_mode, s->output_mode);
VOP_REG_SET(vop, modeset, htotal_pw, (htotal << 16) | hsync_len);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 04ed401d2325..e64351dab610 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -71,7 +71,9 @@ struct vop_common {
struct vop_reg dsp_blank;
struct vop_reg data_blank;
struct vop_reg pre_dither_down;
- struct vop_reg dither_down;
+ struct vop_reg dither_down_sel;
+ struct vop_reg dither_down_mode;
+ struct vop_reg dither_down_en;
struct vop_reg dither_up;
struct vop_reg gate_en;
struct vop_reg mmu_en;
@@ -287,6 +289,16 @@ enum scale_down_mode {
SCALE_DOWN_AVG = 0x1
};
+enum dither_down_mode {
+ RGB888_TO_RGB565 = 0x0,
+ RGB888_TO_RGB666 = 0x1
+};
+
+enum dither_down_mode_sel {
+ DITHER_DOWN_ALLEGRO = 0x0,
+ DITHER_DOWN_FRC = 0x1
+};
+
enum vop_pol {
HSYNC_POSITIVE = 0,
VSYNC_POSITIVE = 1,
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index bd76328c0fdb..e732b73033c8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -137,6 +137,9 @@ static const struct vop_common rk3036_common = {
.standby = VOP_REG_SYNC(RK3036_SYS_CTRL, 0x1, 30),
.out_mode = VOP_REG(RK3036_DSP_CTRL0, 0xf, 0),
.dsp_blank = VOP_REG(RK3036_DSP_CTRL1, 0x1, 24),
+ .dither_down_sel = VOP_REG(RK3036_DSP_CTRL0, 0x1, 27),
+ .dither_down_en = VOP_REG(RK3036_DSP_CTRL0, 0x1, 11),
+ .dither_down_mode = VOP_REG(RK3036_DSP_CTRL0, 0x1, 10),
.cfg_done = VOP_REG_SYNC(RK3036_REG_CFG_DONE, 0x1, 0),
};
@@ -200,6 +203,9 @@ static const struct vop_common px30_common = {
.standby = VOP_REG_SYNC(PX30_SYS_CTRL2, 0x1, 1),
.out_mode = VOP_REG(PX30_DSP_CTRL2, 0xf, 16),
.dsp_blank = VOP_REG(PX30_DSP_CTRL2, 0x1, 14),
+ .dither_down_en = VOP_REG(PX30_DSP_CTRL2, 0x1, 8),
+ .dither_down_sel = VOP_REG(PX30_DSP_CTRL2, 0x1, 7),
+ .dither_down_mode = VOP_REG(PX30_DSP_CTRL2, 0x1, 6),
.cfg_done = VOP_REG_SYNC(PX30_REG_CFG_DONE, 0x1, 0),
};
@@ -365,6 +371,8 @@ static const struct vop_common rk3066_common = {
.standby = VOP_REG(RK3066_SYS_CTRL0, 0x1, 1),
.out_mode = VOP_REG(RK3066_DSP_CTRL0, 0xf, 0),
.cfg_done = VOP_REG(RK3066_REG_CFG_DONE, 0x1, 0),
+ .dither_down_en = VOP_REG(RK3066_DSP_CTRL0, 0x1, 11),
+ .dither_down_mode = VOP_REG(RK3066_DSP_CTRL0, 0x1, 10),
.dsp_blank = VOP_REG(RK3066_DSP_CTRL1, 0x1, 24),
};
@@ -458,6 +466,9 @@ static const struct vop_common rk3188_common = {
.standby = VOP_REG(RK3188_SYS_CTRL, 0x1, 30),
.out_mode = VOP_REG(RK3188_DSP_CTRL0, 0xf, 0),
.cfg_done = VOP_REG(RK3188_REG_CFG_DONE, 0x1, 0),
+ .dither_down_sel = VOP_REG(RK3188_DSP_CTRL0, 0x1, 27),
+ .dither_down_en = VOP_REG(RK3188_DSP_CTRL0, 0x1, 11),
+ .dither_down_mode = VOP_REG(RK3188_DSP_CTRL0, 0x1, 10),
.dsp_blank = VOP_REG(RK3188_DSP_CTRL1, 0x3, 24),
};
@@ -585,8 +596,10 @@ static const struct vop_common rk3288_common = {
.standby = VOP_REG_SYNC(RK3288_SYS_CTRL, 0x1, 22),
.gate_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 23),
.mmu_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 20),
+ .dither_down_sel = VOP_REG(RK3288_DSP_CTRL1, 0x1, 4),
+ .dither_down_mode = VOP_REG(RK3288_DSP_CTRL1, 0x1, 3),
+ .dither_down_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 2),
.pre_dither_down = VOP_REG(RK3288_DSP_CTRL1, 0x1, 1),
- .dither_down = VOP_REG(RK3288_DSP_CTRL1, 0xf, 1),
.dither_up = VOP_REG(RK3288_DSP_CTRL1, 0x1, 6),
.data_blank = VOP_REG(RK3288_DSP_CTRL0, 0x1, 19),
.dsp_blank = VOP_REG(RK3288_DSP_CTRL0, 0x3, 18),
@@ -878,7 +891,10 @@ static const struct vop_misc rk3328_misc = {
static const struct vop_common rk3328_common = {
.standby = VOP_REG_SYNC(RK3328_SYS_CTRL, 0x1, 22),
- .dither_down = VOP_REG(RK3328_DSP_CTRL1, 0xf, 1),
+ .dither_down_sel = VOP_REG(RK3328_DSP_CTRL1, 0x1, 4),
+ .dither_down_mode = VOP_REG(RK3328_DSP_CTRL1, 0x1, 3),
+ .dither_down_en = VOP_REG(RK3328_DSP_CTRL1, 0x1, 2),
+ .pre_dither_down = VOP_REG(RK3328_DSP_CTRL1, 0x1, 1),
.dither_up = VOP_REG(RK3328_DSP_CTRL1, 0x1, 6),
.dsp_blank = VOP_REG(RK3328_DSP_CTRL0, 0x3, 18),
.out_mode = VOP_REG(RK3328_DSP_CTRL0, 0xf, 0),
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c
index fbed2c90fd51..286a0eeefcb6 100644
--- a/drivers/gpu/drm/selftests/test-drm_mm.c
+++ b/drivers/gpu/drm/selftests/test-drm_mm.c
@@ -1615,7 +1615,7 @@ static int igt_topdown(void *ignored)
DRM_RND_STATE(prng, random_seed);
const unsigned int count = 8192;
unsigned int size;
- unsigned long *bitmap = NULL;
+ unsigned long *bitmap;
struct drm_mm mm;
struct drm_mm_node *nodes, *node, *next;
unsigned int *order, n, m, o = 0;
@@ -1631,8 +1631,7 @@ static int igt_topdown(void *ignored)
if (!nodes)
goto err;
- bitmap = kcalloc(count / BITS_PER_LONG, sizeof(unsigned long),
- GFP_KERNEL);
+ bitmap = bitmap_zalloc(count, GFP_KERNEL);
if (!bitmap)
goto err_nodes;
@@ -1717,7 +1716,7 @@ out:
drm_mm_takedown(&mm);
kfree(order);
err_bitmap:
- kfree(bitmap);
+ bitmap_free(bitmap);
err_nodes:
vfree(nodes);
err:
@@ -1745,8 +1744,7 @@ static int igt_bottomup(void *ignored)
if (!nodes)
goto err;
- bitmap = kcalloc(count / BITS_PER_LONG, sizeof(unsigned long),
- GFP_KERNEL);
+ bitmap = bitmap_zalloc(count, GFP_KERNEL);
if (!bitmap)
goto err_nodes;
@@ -1818,7 +1816,7 @@ out:
drm_mm_takedown(&mm);
kfree(order);
err_bitmap:
- kfree(bitmap);
+ bitmap_free(bitmap);
err_nodes:
vfree(nodes);
err:
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index 61bbe8e8bcc5..e2a6c82c8252 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -4,7 +4,6 @@ config DRM_SHMOBILE
depends on DRM && ARM
depends on ARCH_SHMOBILE || COMPILE_TEST
select BACKLIGHT_CLASS_DEVICE
- select BACKLIGHT_LCD_SUPPORT
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig
index 35367ada3bc1..d15b10de1da6 100644
--- a/drivers/gpu/drm/stm/Kconfig
+++ b/drivers/gpu/drm/stm/Kconfig
@@ -6,7 +6,7 @@ config DRM_STM
select DRM_KMS_CMA_HELPER
select DRM_PANEL_BRIDGE
select VIDEOMODE_HELPERS
- select FB_PROVIDE_GET_FB_UNMAPPED_AREA
+ select FB_PROVIDE_GET_FB_UNMAPPED_AREA if FB
help
Enable support for the on-chip display controller on
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 0a7f933ab007..5834ef56fbaa 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -129,6 +129,40 @@ static void drv_unload(struct drm_device *ddev)
drm_mode_config_cleanup(ddev);
}
+static __maybe_unused int drv_suspend(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct ltdc_device *ldev = ddev->dev_private;
+ struct drm_atomic_state *state;
+
+ drm_kms_helper_poll_disable(ddev);
+ state = drm_atomic_helper_suspend(ddev);
+ if (IS_ERR(state)) {
+ drm_kms_helper_poll_enable(ddev);
+ return PTR_ERR(state);
+ }
+ ldev->suspend_state = state;
+ ltdc_suspend(ddev);
+
+ return 0;
+}
+
+static __maybe_unused int drv_resume(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct ltdc_device *ldev = ddev->dev_private;
+
+ ltdc_resume(ddev);
+ drm_atomic_helper_resume(ddev, ldev->suspend_state);
+ drm_kms_helper_poll_enable(ddev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops drv_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(drv_suspend, drv_resume)
+};
+
static int stm_drm_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -186,6 +220,7 @@ static struct platform_driver stm_drm_platform_driver = {
.driver = {
.name = "stm32-display",
.of_match_table = drv_dt_ids,
+ .pm = &drv_pm_ops,
},
};
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index a672b59a2226..1bef73e8c8fe 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -356,12 +356,40 @@ static int dw_mipi_dsi_stm_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused dw_mipi_dsi_stm_suspend(struct device *dev)
+{
+ struct dw_mipi_dsi_stm *dsi = dw_mipi_dsi_stm_plat_data.priv_data;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ clk_disable_unprepare(dsi->pllref_clk);
+
+ return 0;
+}
+
+static int __maybe_unused dw_mipi_dsi_stm_resume(struct device *dev)
+{
+ struct dw_mipi_dsi_stm *dsi = dw_mipi_dsi_stm_plat_data.priv_data;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ clk_prepare_enable(dsi->pllref_clk);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dw_mipi_dsi_stm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dw_mipi_dsi_stm_suspend,
+ dw_mipi_dsi_stm_resume)
+};
+
static struct platform_driver dw_mipi_dsi_stm_driver = {
.probe = dw_mipi_dsi_stm_probe,
.remove = dw_mipi_dsi_stm_remove,
.driver = {
.of_match_table = dw_mipi_dsi_stm_dt_ids,
.name = "stm32-display-dsi",
+ .pm = &dw_mipi_dsi_stm_pm_ops,
},
};
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index b1741a9d5be2..32fd6a3b37fb 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -1062,6 +1062,30 @@ static int ltdc_get_caps(struct drm_device *ddev)
return 0;
}
+void ltdc_suspend(struct drm_device *ddev)
+{
+ struct ltdc_device *ldev = ddev->dev_private;
+
+ DRM_DEBUG_DRIVER("\n");
+ clk_disable_unprepare(ldev->pixel_clk);
+}
+
+int ltdc_resume(struct drm_device *ddev)
+{
+ struct ltdc_device *ldev = ddev->dev_private;
+ int ret;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ ret = clk_prepare_enable(ldev->pixel_clk);
+ if (ret) {
+ DRM_ERROR("failed to enable pixel clock (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
int ltdc_load(struct drm_device *ddev)
{
struct platform_device *pdev = to_platform_device(ddev->dev);
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index e46f477a8494..a1ad0ae3b006 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -36,6 +36,7 @@ struct ltdc_device {
u32 error_status;
u32 irq_status;
struct fps_info plane_fpsi[LTDC_MAX_LAYER];
+ struct drm_atomic_state *suspend_state;
};
bool ltdc_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
@@ -45,5 +46,7 @@ bool ltdc_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
int ltdc_load(struct drm_device *ddev);
void ltdc_unload(struct drm_device *ddev);
+void ltdc_suspend(struct drm_device *ddev);
+int ltdc_resume(struct drm_device *ddev);
#endif
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 4c0d51f73237..4e5922c89d7b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -361,13 +361,6 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
- /*
- * backend DMA accesses DRAM directly, bypassing the system
- * bus. As such, the address range is different and the buffer
- * address needs to be corrected.
- */
- paddr -= PHYS_OFFSET;
-
if (fb->format->is_yuv)
return sun4i_backend_update_yuv_buffer(backend, fb, paddr);
@@ -720,33 +713,22 @@ static int sun4i_backend_free_sat(struct device *dev) {
*/
static int sun4i_backend_of_get_id(struct device_node *node)
{
- struct device_node *port, *ep;
- int ret = -EINVAL;
+ struct device_node *ep, *remote;
+ struct of_endpoint of_ep;
- /* input is port 0 */
- port = of_graph_get_port_by_id(node, 0);
- if (!port)
+ /* Input port is 0, and we want the first endpoint. */
+ ep = of_graph_get_endpoint_by_regs(node, 0, -1);
+ if (!ep)
return -EINVAL;
- /* try finding an upstream endpoint */
- for_each_available_child_of_node(port, ep) {
- struct device_node *remote;
- u32 reg;
-
- remote = of_graph_get_remote_endpoint(ep);
- if (!remote)
- continue;
-
- ret = of_property_read_u32(remote, "reg", &reg);
- if (ret)
- continue;
-
- ret = reg;
- }
-
- of_node_put(port);
+ remote = of_graph_get_remote_endpoint(ep);
+ of_node_put(ep);
+ if (!remote)
+ return -EINVAL;
- return ret;
+ of_graph_parse_endpoint(remote, &of_ep);
+ of_node_put(remote);
+ return of_ep.id;
}
/* TODO: This needs to take multiple pipelines into account */
@@ -814,6 +796,27 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
dev_set_drvdata(dev, backend);
spin_lock_init(&backend->frontend_lock);
+ if (of_find_property(dev->of_node, "interconnects", NULL)) {
+ /*
+ * This assume we have the same DMA constraints for all our the
+ * devices in our pipeline (all the backends, but also the
+ * frontends). This sounds bad, but it has always been the case
+ * for us, and DRM doesn't do per-device allocation either, so
+ * we would need to fix DRM first...
+ */
+ ret = of_dma_configure(drm->dev, dev->of_node, true);
+ if (ret)
+ return ret;
+ } else {
+ /*
+ * If we don't have the interconnect property, most likely
+ * because of an old DT, we need to set the DMA offset by hand
+ * on our device since the RAM mapping is at 0 for the DMA bus,
+ * unlike the CPU.
+ */
+ drm->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
+ }
+
backend->engine.node = dev->of_node;
backend->engine.ops = &sun4i_backend_engine_ops;
backend->engine.id = sun4i_backend_of_get_id(dev->of_node);
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
index b685ee11623d..b08c4453d47c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
@@ -269,6 +269,7 @@ struct sun4i_hdmi {
struct clk *tmds_clk;
struct i2c_adapter *i2c;
+ struct i2c_adapter *ddc_i2c;
/* Regmap fields for I2C adapter */
struct regmap_field *field_ddc_en;
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index d18862629301..8c122e637697 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -217,7 +217,7 @@ static int sun4i_hdmi_get_modes(struct drm_connector *connector)
struct edid *edid;
int ret;
- edid = drm_get_edid(connector, hdmi->i2c);
+ edid = drm_get_edid(connector, hdmi->ddc_i2c ?: hdmi->i2c);
if (!edid)
return 0;
@@ -233,6 +233,28 @@ static int sun4i_hdmi_get_modes(struct drm_connector *connector)
return ret;
}
+static struct i2c_adapter *sun4i_hdmi_get_ddc(struct device *dev)
+{
+ struct device_node *phandle, *remote;
+ struct i2c_adapter *ddc;
+
+ remote = of_graph_get_remote_node(dev->of_node, 1, -1);
+ if (!remote)
+ return ERR_PTR(-EINVAL);
+
+ phandle = of_parse_phandle(remote, "ddc-i2c-bus", 0);
+ of_node_put(remote);
+ if (!phandle)
+ return ERR_PTR(-ENODEV);
+
+ ddc = of_get_i2c_adapter_by_node(phandle);
+ of_node_put(phandle);
+ if (!ddc)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return ddc;
+}
+
static const struct drm_connector_helper_funcs sun4i_hdmi_connector_helper_funcs = {
.get_modes = sun4i_hdmi_get_modes,
};
@@ -580,6 +602,15 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
goto err_disable_mod_clk;
}
+ hdmi->ddc_i2c = sun4i_hdmi_get_ddc(dev);
+ if (IS_ERR(hdmi->ddc_i2c)) {
+ ret = PTR_ERR(hdmi->ddc_i2c);
+ if (ret == -ENODEV)
+ hdmi->ddc_i2c = NULL;
+ else
+ goto err_del_i2c_adapter;
+ }
+
drm_encoder_helper_add(&hdmi->encoder,
&sun4i_hdmi_helper_funcs);
ret = drm_encoder_init(drm,
@@ -589,14 +620,14 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
NULL);
if (ret) {
dev_err(dev, "Couldn't initialise the HDMI encoder\n");
- goto err_del_i2c_adapter;
+ goto err_put_ddc_i2c;
}
hdmi->encoder.possible_crtcs = drm_of_find_possible_crtcs(drm,
dev->of_node);
if (!hdmi->encoder.possible_crtcs) {
ret = -EPROBE_DEFER;
- goto err_del_i2c_adapter;
+ goto err_put_ddc_i2c;
}
#ifdef CONFIG_DRM_SUN4I_HDMI_CEC
@@ -635,6 +666,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
err_cleanup_connector:
cec_delete_adapter(hdmi->cec_adap);
drm_encoder_cleanup(&hdmi->encoder);
+err_put_ddc_i2c:
+ i2c_put_adapter(hdmi->ddc_i2c);
err_del_i2c_adapter:
i2c_del_adapter(hdmi->i2c);
err_disable_mod_clk:
@@ -655,6 +688,7 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
drm_connector_cleanup(&hdmi->connector);
drm_encoder_cleanup(&hdmi->encoder);
i2c_del_adapter(hdmi->i2c);
+ i2c_put_adapter(hdmi->ddc_i2c);
clk_disable_unprepare(hdmi->mod_clk);
clk_disable_unprepare(hdmi->bus_clk);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
index fb985ba1a176..2598741a00a6 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
@@ -11,6 +11,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "sun4i_hdmi.h"
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index 147b97ed1a09..3a3ba99fed22 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -20,7 +20,7 @@ struct sun4i_lvds {
struct drm_connector connector;
struct drm_encoder encoder;
- struct sun4i_tcon *tcon;
+ struct drm_panel *panel;
};
static inline struct sun4i_lvds *
@@ -41,9 +41,8 @@ static int sun4i_lvds_get_modes(struct drm_connector *connector)
{
struct sun4i_lvds *lvds =
drm_connector_to_sun4i_lvds(connector);
- struct sun4i_tcon *tcon = lvds->tcon;
- return drm_panel_get_modes(tcon->panel);
+ return drm_panel_get_modes(lvds->panel);
}
static struct drm_connector_helper_funcs sun4i_lvds_con_helper_funcs = {
@@ -54,9 +53,8 @@ static void
sun4i_lvds_connector_destroy(struct drm_connector *connector)
{
struct sun4i_lvds *lvds = drm_connector_to_sun4i_lvds(connector);
- struct sun4i_tcon *tcon = lvds->tcon;
- drm_panel_detach(tcon->panel);
+ drm_panel_detach(lvds->panel);
drm_connector_cleanup(connector);
}
@@ -71,26 +69,24 @@ static const struct drm_connector_funcs sun4i_lvds_con_funcs = {
static void sun4i_lvds_encoder_enable(struct drm_encoder *encoder)
{
struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(encoder);
- struct sun4i_tcon *tcon = lvds->tcon;
DRM_DEBUG_DRIVER("Enabling LVDS output\n");
- if (tcon->panel) {
- drm_panel_prepare(tcon->panel);
- drm_panel_enable(tcon->panel);
+ if (lvds->panel) {
+ drm_panel_prepare(lvds->panel);
+ drm_panel_enable(lvds->panel);
}
}
static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder)
{
struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(encoder);
- struct sun4i_tcon *tcon = lvds->tcon;
DRM_DEBUG_DRIVER("Disabling LVDS output\n");
- if (tcon->panel) {
- drm_panel_disable(tcon->panel);
- drm_panel_unprepare(tcon->panel);
+ if (lvds->panel) {
+ drm_panel_disable(lvds->panel);
+ drm_panel_unprepare(lvds->panel);
}
}
@@ -113,11 +109,10 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
lvds = devm_kzalloc(drm->dev, sizeof(*lvds), GFP_KERNEL);
if (!lvds)
return -ENOMEM;
- lvds->tcon = tcon;
encoder = &lvds->encoder;
ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0,
- &tcon->panel, &bridge);
+ &lvds->panel, &bridge);
if (ret) {
dev_info(drm->dev, "No panel or bridge found... LVDS output disabled\n");
return 0;
@@ -138,7 +133,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
/* The LVDS encoder can only work with the TCON channel 0 */
lvds->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc);
- if (tcon->panel) {
+ if (lvds->panel) {
drm_connector_helper_add(&lvds->connector,
&sun4i_lvds_con_helper_funcs);
ret = drm_connector_init(drm, &lvds->connector,
@@ -152,7 +147,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
drm_connector_attach_encoder(&lvds->connector,
&lvds->encoder);
- ret = drm_panel_attach(tcon->panel, &lvds->connector);
+ ret = drm_panel_attach(lvds->panel, &lvds->connector);
if (ret) {
dev_err(drm->dev, "Couldn't attach our panel\n");
goto err_cleanup_connector;
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index cae19e7bbeaa..d9e2502b49fa 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -27,6 +27,8 @@ struct sun4i_rgb {
struct drm_encoder encoder;
struct sun4i_tcon *tcon;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
};
static inline struct sun4i_rgb *
@@ -47,11 +49,18 @@ static int sun4i_rgb_get_modes(struct drm_connector *connector)
{
struct sun4i_rgb *rgb =
drm_connector_to_sun4i_rgb(connector);
- struct sun4i_tcon *tcon = rgb->tcon;
- return drm_panel_get_modes(tcon->panel);
+ return drm_panel_get_modes(rgb->panel);
}
+/*
+ * VESA DMT defines a tolerance of 0.5% on the pixel clock, while the
+ * CVT spec reuses that tolerance in its examples, so it looks to be a
+ * good default tolerance for the EDID-based modes. Define it to 5 per
+ * mille to avoid floating point operations.
+ */
+#define SUN4I_RGB_DOTCLOCK_TOLERANCE_PER_MILLE 5
+
static enum drm_mode_status sun4i_rgb_mode_valid(struct drm_encoder *crtc,
const struct drm_display_mode *mode)
{
@@ -59,8 +68,9 @@ static enum drm_mode_status sun4i_rgb_mode_valid(struct drm_encoder *crtc,
struct sun4i_tcon *tcon = rgb->tcon;
u32 hsync = mode->hsync_end - mode->hsync_start;
u32 vsync = mode->vsync_end - mode->vsync_start;
- unsigned long rate = mode->clock * 1000;
- long rounded_rate;
+ unsigned long long rate = mode->clock * 1000;
+ unsigned long long lowest, highest;
+ unsigned long long rounded_rate;
DRM_DEBUG_DRIVER("Validating modes...\n");
@@ -92,15 +102,39 @@ static enum drm_mode_status sun4i_rgb_mode_valid(struct drm_encoder *crtc,
DRM_DEBUG_DRIVER("Vertical parameters OK\n");
+ /*
+ * TODO: We should use the struct display_timing if available
+ * and / or trying to stretch the timings within that
+ * tolerancy to take care of panels that we wouldn't be able
+ * to have a exact match for.
+ */
+ if (rgb->panel) {
+ DRM_DEBUG_DRIVER("RGB panel used, skipping clock rate checks");
+ goto out;
+ }
+
+ /*
+ * That shouldn't ever happen unless something is really wrong, but it
+ * doesn't harm to check.
+ */
+ if (!rgb->bridge)
+ goto out;
+
tcon->dclk_min_div = 6;
tcon->dclk_max_div = 127;
rounded_rate = clk_round_rate(tcon->dclk, rate);
- if (rounded_rate < rate)
+
+ lowest = rate * (1000 - SUN4I_RGB_DOTCLOCK_TOLERANCE_PER_MILLE);
+ do_div(lowest, 1000);
+ if (rounded_rate < lowest)
return MODE_CLOCK_LOW;
- if (rounded_rate > rate)
+ highest = rate * (1000 + SUN4I_RGB_DOTCLOCK_TOLERANCE_PER_MILLE);
+ do_div(highest, 1000);
+ if (rounded_rate > highest)
return MODE_CLOCK_HIGH;
+out:
DRM_DEBUG_DRIVER("Clock rate OK\n");
return MODE_OK;
@@ -114,9 +148,8 @@ static void
sun4i_rgb_connector_destroy(struct drm_connector *connector)
{
struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector);
- struct sun4i_tcon *tcon = rgb->tcon;
- drm_panel_detach(tcon->panel);
+ drm_panel_detach(rgb->panel);
drm_connector_cleanup(connector);
}
@@ -131,26 +164,24 @@ static const struct drm_connector_funcs sun4i_rgb_con_funcs = {
static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
{
struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
- struct sun4i_tcon *tcon = rgb->tcon;
DRM_DEBUG_DRIVER("Enabling RGB output\n");
- if (tcon->panel) {
- drm_panel_prepare(tcon->panel);
- drm_panel_enable(tcon->panel);
+ if (rgb->panel) {
+ drm_panel_prepare(rgb->panel);
+ drm_panel_enable(rgb->panel);
}
}
static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
{
struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
- struct sun4i_tcon *tcon = rgb->tcon;
DRM_DEBUG_DRIVER("Disabling RGB output\n");
- if (tcon->panel) {
- drm_panel_disable(tcon->panel);
- drm_panel_unprepare(tcon->panel);
+ if (rgb->panel) {
+ drm_panel_disable(rgb->panel);
+ drm_panel_unprepare(rgb->panel);
}
}
@@ -172,7 +203,6 @@ static struct drm_encoder_funcs sun4i_rgb_enc_funcs = {
int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
{
struct drm_encoder *encoder;
- struct drm_bridge *bridge;
struct sun4i_rgb *rgb;
int ret;
@@ -183,7 +213,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
encoder = &rgb->encoder;
ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0,
- &tcon->panel, &bridge);
+ &rgb->panel, &rgb->bridge);
if (ret) {
dev_info(drm->dev, "No panel or bridge found... RGB output disabled\n");
return 0;
@@ -204,7 +234,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
/* The RGB encoder can only work with the TCON channel 0 */
rgb->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc);
- if (tcon->panel) {
+ if (rgb->panel) {
drm_connector_helper_add(&rgb->connector,
&sun4i_rgb_con_helper_funcs);
ret = drm_connector_init(drm, &rgb->connector,
@@ -218,15 +248,15 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
drm_connector_attach_encoder(&rgb->connector,
&rgb->encoder);
- ret = drm_panel_attach(tcon->panel, &rgb->connector);
+ ret = drm_panel_attach(rgb->panel, &rgb->connector);
if (ret) {
dev_err(drm->dev, "Couldn't attach our panel\n");
goto err_cleanup_connector;
}
}
- if (bridge) {
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ if (rgb->bridge) {
+ ret = drm_bridge_attach(encoder, rgb->bridge, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't attach our bridge\n");
goto err_cleanup_connector;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 7136fc91c603..9d8d8124b1f6 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -236,8 +236,8 @@ static struct sun4i_tcon *sun4i_get_tcon0(struct drm_device *drm)
return NULL;
}
-void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel,
- const struct drm_encoder *encoder)
+static void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel,
+ const struct drm_encoder *encoder)
{
int ret = -ENOTSUPP;
@@ -341,8 +341,8 @@ static void sun4i_tcon0_mode_set_cpu(struct sun4i_tcon *tcon,
u32 block_space, start_delay;
u32 tcon_div;
- tcon->dclk_min_div = 4;
- tcon->dclk_max_div = 127;
+ tcon->dclk_min_div = SUN6I_DSI_TCON_DIV;
+ tcon->dclk_max_div = SUN6I_DSI_TCON_DIV;
sun4i_tcon0_mode_set_common(tcon, mode);
@@ -561,10 +561,10 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
* Following code is a way to avoid quirks all around TCON
* and DOTCLOCK drivers.
*/
- if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
+ if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
clk_set_phase(tcon->dclk, 240);
- if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
clk_set_phase(tcon->dclk, 0);
regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index b5214d71610f..84cfb1952ff7 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -257,8 +257,6 @@ struct sun4i_tcon {
struct reset_control *lcd_rst;
struct reset_control *lvds_rst;
- struct drm_panel *panel;
-
/* Platform adjustments */
const struct sun4i_tcon_quirks *quirks;
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index 318994cd1b85..6ff585055a07 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -24,7 +24,9 @@
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include "sun4i_crtc.h"
#include "sun4i_drv.h"
+#include "sun4i_tcon.h"
#include "sun6i_mipi_dsi.h"
#include <video/mipi_display.h>
@@ -33,6 +35,8 @@
#define SUN6I_DSI_CTL_EN BIT(0)
#define SUN6I_DSI_BASIC_CTL_REG 0x00c
+#define SUN6I_DSI_BASIC_CTL_TRAIL_INV(n) (((n) & 0xf) << 4)
+#define SUN6I_DSI_BASIC_CTL_TRAIL_FILL BIT(3)
#define SUN6I_DSI_BASIC_CTL_HBP_DIS BIT(2)
#define SUN6I_DSI_BASIC_CTL_HSA_HSE_DIS BIT(1)
#define SUN6I_DSI_BASIC_CTL_VIDEO_BURST BIT(0)
@@ -153,6 +157,8 @@
#define SUN6I_DSI_CMD_TX_REG(n) (0x300 + (n) * 0x04)
+#define SUN6I_DSI_SYNC_POINT 40
+
enum sun6i_dsi_start_inst {
DSI_START_LPRX,
DSI_START_LPTX,
@@ -358,7 +364,54 @@ static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi,
static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
- return mode->vtotal - (mode->vsync_end - mode->vdisplay) + 1;
+ u16 start = clamp(mode->vtotal - mode->vdisplay - 10, 8, 100);
+ u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + start;
+
+ if (delay > mode->vtotal)
+ delay = delay % mode->vtotal;
+
+ return max_t(u16, delay, 1);
+}
+
+static u16 sun6i_dsi_get_line_num(struct sun6i_dsi *dsi,
+ struct drm_display_mode *mode)
+{
+ struct mipi_dsi_device *device = dsi->device;
+ unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
+
+ return mode->htotal * Bpp / device->lanes;
+}
+
+static u16 sun6i_dsi_get_drq_edge0(struct sun6i_dsi *dsi,
+ struct drm_display_mode *mode,
+ u16 line_num, u16 edge1)
+{
+ u16 edge0 = edge1;
+
+ edge0 += (mode->hdisplay + 40) * SUN6I_DSI_TCON_DIV / 8;
+
+ if (edge0 > line_num)
+ return edge0 - line_num;
+
+ return 1;
+}
+
+static u16 sun6i_dsi_get_drq_edge1(struct sun6i_dsi *dsi,
+ struct drm_display_mode *mode,
+ u16 line_num)
+{
+ struct mipi_dsi_device *device = dsi->device;
+ unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
+ unsigned int hbp = mode->htotal - mode->hsync_end;
+ u16 edge1;
+
+ edge1 = SUN6I_DSI_SYNC_POINT;
+ edge1 += (mode->hdisplay + hbp + 20) * Bpp / device->lanes;
+
+ if (edge1 > line_num)
+ return line_num;
+
+ return edge1;
}
static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
@@ -367,7 +420,23 @@ static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
struct mipi_dsi_device *device = dsi->device;
u32 val = 0;
- if ((mode->hsync_end - mode->hdisplay) > 20) {
+ if (device->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
+ u16 line_num = sun6i_dsi_get_line_num(dsi, mode);
+ u16 edge0, edge1;
+
+ edge1 = sun6i_dsi_get_drq_edge1(dsi, mode, line_num);
+ edge0 = sun6i_dsi_get_drq_edge0(dsi, mode, line_num, edge1);
+
+ regmap_write(dsi->regs, SUN6I_DSI_BURST_DRQ_REG,
+ SUN6I_DSI_BURST_DRQ_EDGE0(edge0) |
+ SUN6I_DSI_BURST_DRQ_EDGE1(edge1));
+
+ regmap_write(dsi->regs, SUN6I_DSI_BURST_LINE_REG,
+ SUN6I_DSI_BURST_LINE_NUM(line_num) |
+ SUN6I_DSI_BURST_LINE_SYNC_POINT(SUN6I_DSI_SYNC_POINT));
+
+ val = SUN6I_DSI_TCON_DRQ_ENABLE_MODE;
+ } else if ((mode->hsync_end - mode->hdisplay) > 20) {
/* Maaaaaagic */
u16 drq = (mode->hsync_end - mode->hdisplay) - 20;
@@ -384,8 +453,19 @@ static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
static void sun6i_dsi_setup_inst_loop(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
+ struct mipi_dsi_device *device = dsi->device;
u16 delay = 50 - 1;
+ if (device->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
+ delay = (mode->htotal - mode->hdisplay) * 150;
+ delay /= (mode->clock / 1000) * 8;
+ delay -= 50;
+ }
+
+ regmap_write(dsi->regs, SUN6I_DSI_INST_LOOP_SEL_REG,
+ 2 << (4 * DSI_INST_ID_LP11) |
+ 3 << (4 * DSI_INST_ID_DLY));
+
regmap_write(dsi->regs, SUN6I_DSI_INST_LOOP_NUM_REG(0),
SUN6I_DSI_INST_LOOP_NUM_N0(50 - 1) |
SUN6I_DSI_INST_LOOP_NUM_N1(delay));
@@ -451,49 +531,68 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
{
struct mipi_dsi_device *device = dsi->device;
unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
- u16 hbp, hfp, hsa, hblk, vblk;
+ u16 hbp = 0, hfp = 0, hsa = 0, hblk = 0, vblk = 0;
+ u32 basic_ctl = 0;
size_t bytes;
u8 *buffer;
/* Do all timing calculations up front to allocate buffer space */
- /*
- * A sync period is composed of a blanking packet (4 bytes +
- * payload + 2 bytes) and a sync event packet (4 bytes). Its
- * minimal size is therefore 10 bytes
- */
-#define HSA_PACKET_OVERHEAD 10
- hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
- (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
+ if (device->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
+ hblk = mode->hdisplay * Bpp;
+ basic_ctl = SUN6I_DSI_BASIC_CTL_VIDEO_BURST |
+ SUN6I_DSI_BASIC_CTL_HSA_HSE_DIS |
+ SUN6I_DSI_BASIC_CTL_HBP_DIS;
- /*
- * The backporch is set using a blanking packet (4 bytes +
- * payload + 2 bytes). Its minimal size is therefore 6 bytes
- */
+ if (device->lanes == 4)
+ basic_ctl |= SUN6I_DSI_BASIC_CTL_TRAIL_FILL |
+ SUN6I_DSI_BASIC_CTL_TRAIL_INV(0xc);
+ } else {
+ /*
+ * A sync period is composed of a blanking packet (4
+ * bytes + payload + 2 bytes) and a sync event packet
+ * (4 bytes). Its minimal size is therefore 10 bytes
+ */
+#define HSA_PACKET_OVERHEAD 10
+ hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
+ (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
+
+ /*
+ * The backporch is set using a blanking packet (4
+ * bytes + payload + 2 bytes). Its minimal size is
+ * therefore 6 bytes
+ */
#define HBP_PACKET_OVERHEAD 6
- hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
- (mode->hsync_start - mode->hdisplay) * Bpp - HBP_PACKET_OVERHEAD);
-
- /*
- * The frontporch is set using a blanking packet (4 bytes +
- * payload + 2 bytes). Its minimal size is therefore 6 bytes
- */
+ hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
+ (mode->htotal - mode->hsync_end) * Bpp - HBP_PACKET_OVERHEAD);
+
+ /*
+ * The frontporch is set using a blanking packet (4
+ * bytes + payload + 2 bytes). Its minimal size is
+ * therefore 6 bytes
+ */
#define HFP_PACKET_OVERHEAD 6
- hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
- (mode->htotal - mode->hsync_end) * Bpp - HFP_PACKET_OVERHEAD);
-
- /*
- * hblk seems to be the line + porches length.
- */
- hblk = mode->htotal * Bpp - hsa;
-
- /*
- * And I'm not entirely sure what vblk is about. The driver in
- * Allwinner BSP is using a rather convoluted calculation
- * there only for 4 lanes. However, using 0 (the !4 lanes
- * case) even with a 4 lanes screen seems to work...
- */
- vblk = 0;
+ hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
+ (mode->hsync_start - mode->hdisplay) * Bpp - HFP_PACKET_OVERHEAD);
+
+ /*
+ * The blanking is set using a sync event (4 bytes)
+ * and a blanking packet (4 bytes + payload + 2
+ * bytes). Its minimal size is therefore 10 bytes.
+ */
+#define HBLK_PACKET_OVERHEAD 10
+ hblk = max((unsigned int)HBLK_PACKET_OVERHEAD,
+ (mode->htotal - (mode->hsync_end - mode->hsync_start)) * Bpp -
+ HBLK_PACKET_OVERHEAD);
+
+ /*
+ * And I'm not entirely sure what vblk is about. The driver in
+ * Allwinner BSP is using a rather convoluted calculation
+ * there only for 4 lanes. However, using 0 (the !4 lanes
+ * case) even with a 4 lanes screen seems to work...
+ */
+ vblk = 0;
+ }
/* How many bytes do we need to send all payloads? */
bytes = max_t(size_t, max(max(hfp, hblk), max(hsa, hbp)), vblk);
@@ -501,7 +600,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
if (WARN_ON(!buffer))
return;
- regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL_REG, 0);
+ regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL_REG, basic_ctl);
regmap_write(dsi->regs, SUN6I_DSI_SYNC_HSS_REG,
sun6i_dsi_build_sync_pkt(MIPI_DSI_H_SYNC_START,
@@ -526,8 +625,8 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
regmap_write(dsi->regs, SUN6I_DSI_BASIC_SIZE0_REG,
SUN6I_DSI_BASIC_SIZE0_VSA(mode->vsync_end -
mode->vsync_start) |
- SUN6I_DSI_BASIC_SIZE0_VBP(mode->vsync_start -
- mode->vdisplay));
+ SUN6I_DSI_BASIC_SIZE0_VBP(mode->vtotal -
+ mode->vsync_end));
regmap_write(dsi->regs, SUN6I_DSI_BASIC_SIZE1_REG,
SUN6I_DSI_BASIC_SIZE1_VACT(mode->vdisplay) |
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
index a07090579f84..5c3ad5be0690 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
@@ -13,6 +13,8 @@
#include <drm/drm_encoder.h>
#include <drm/drm_mipi_dsi.h>
+#define SUN6I_DSI_TCON_DIV 4
+
struct sun6i_dsi {
struct drm_connector connector;
struct drm_encoder encoder;
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 30a2eff55687..fd20a928cf4d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -325,38 +325,22 @@ static struct regmap_config sun8i_mixer_regmap_config = {
static int sun8i_mixer_of_get_id(struct device_node *node)
{
- struct device_node *port, *ep;
- int ret = -EINVAL;
+ struct device_node *ep, *remote;
+ struct of_endpoint of_ep;
- /* output is port 1 */
- port = of_graph_get_port_by_id(node, 1);
- if (!port)
+ /* Output port is 1, and we want the first endpoint. */
+ ep = of_graph_get_endpoint_by_regs(node, 1, -1);
+ if (!ep)
return -EINVAL;
- /* try to find downstream endpoint */
- for_each_available_child_of_node(port, ep) {
- struct device_node *remote;
- u32 reg;
-
- remote = of_graph_get_remote_endpoint(ep);
- if (!remote)
- continue;
-
- ret = of_property_read_u32(remote, "reg", &reg);
- if (!ret) {
- of_node_put(remote);
- of_node_put(ep);
- of_node_put(port);
-
- return reg;
- }
-
- of_node_put(remote);
- }
-
- of_node_put(port);
+ remote = of_graph_get_remote_endpoint(ep);
+ of_node_put(ep);
+ if (!remote)
+ return -EINVAL;
- return ret;
+ of_graph_parse_endpoint(remote, &of_ep);
+ of_node_put(remote);
+ return of_ep.id;
}
static int sun8i_mixer_bind(struct device *dev, struct device *master,
@@ -554,6 +538,7 @@ static int sun8i_mixer_remove(struct platform_device *pdev)
static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = {
.ccsc = 0,
.scaler_mask = 0xf,
+ .scanline_yuv = 2048,
.ui_num = 3,
.vi_num = 1,
};
@@ -561,6 +546,7 @@ static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = {
static const struct sun8i_mixer_cfg sun8i_a83t_mixer1_cfg = {
.ccsc = 1,
.scaler_mask = 0x3,
+ .scanline_yuv = 2048,
.ui_num = 1,
.vi_num = 1,
};
@@ -569,6 +555,7 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
.ccsc = 0,
.mod_rate = 432000000,
.scaler_mask = 0xf,
+ .scanline_yuv = 2048,
.ui_num = 3,
.vi_num = 1,
};
@@ -577,6 +564,7 @@ static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
.ccsc = 0,
.mod_rate = 297000000,
.scaler_mask = 0xf,
+ .scanline_yuv = 2048,
.ui_num = 3,
.vi_num = 1,
};
@@ -585,6 +573,7 @@ static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
.ccsc = 1,
.mod_rate = 297000000,
.scaler_mask = 0x3,
+ .scanline_yuv = 2048,
.ui_num = 1,
.vi_num = 1,
};
@@ -593,6 +582,7 @@ static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
.vi_num = 2,
.ui_num = 1,
.scaler_mask = 0x3,
+ .scanline_yuv = 2048,
.ccsc = 0,
.mod_rate = 150000000,
};
@@ -601,6 +591,7 @@ static const struct sun8i_mixer_cfg sun50i_a64_mixer0_cfg = {
.ccsc = 0,
.mod_rate = 297000000,
.scaler_mask = 0xf,
+ .scanline_yuv = 4096,
.ui_num = 3,
.vi_num = 1,
};
@@ -609,6 +600,7 @@ static const struct sun8i_mixer_cfg sun50i_a64_mixer1_cfg = {
.ccsc = 1,
.mod_rate = 297000000,
.scaler_mask = 0x3,
+ .scanline_yuv = 2048,
.ui_num = 1,
.vi_num = 1,
};
@@ -618,6 +610,7 @@ static const struct sun8i_mixer_cfg sun50i_h6_mixer0_cfg = {
.is_de3 = true,
.mod_rate = 600000000,
.scaler_mask = 0xf,
+ .scanline_yuv = 4096,
.ui_num = 3,
.vi_num = 1,
};
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index 913d14ce68b0..80e084caa084 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -159,6 +159,7 @@ struct de2_fmt_info {
* @mod_rate: module clock rate that needs to be set in order to have
* a functional block.
* @is_de3: true, if this is next gen display engine 3.0, false otherwise.
+ * @scaline_yuv: size of a scanline for VI scaler for YUV formats.
*/
struct sun8i_mixer_cfg {
int vi_num;
@@ -167,6 +168,7 @@ struct sun8i_mixer_cfg {
int ccsc;
unsigned long mod_rate;
unsigned int is_de3 : 1;
+ unsigned int scanline_yuv;
};
struct sun8i_mixer {
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index b1e7c76e9c17..3267d0f9b9b2 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -269,12 +269,12 @@ static int sun8i_tcon_top_remove(struct platform_device *pdev)
return 0;
}
-const struct sun8i_tcon_top_quirks sun8i_r40_tcon_top_quirks = {
+static const struct sun8i_tcon_top_quirks sun8i_r40_tcon_top_quirks = {
.has_tcon_tv1 = true,
.has_dsi = true,
};
-const struct sun8i_tcon_top_quirks sun50i_h6_tcon_top_quirks = {
+static const struct sun8i_tcon_top_quirks sun50i_h6_tcon_top_quirks = {
/* Nothing special */
};
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 8a0616238467..bb8e026d6405 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -80,6 +80,8 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
u32 bld_base, ch_base;
u32 outsize, insize;
u32 hphase, vphase;
+ u32 hn = 0, hm = 0;
+ u32 vn = 0, vm = 0;
bool subsampled;
DRM_DEBUG_DRIVER("Updating VI channel %d overlay %d\n",
@@ -137,12 +139,41 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
subsampled = format->hsub > 1 || format->vsub > 1;
if (insize != outsize || subsampled || hphase || vphase) {
- u32 hscale, vscale;
+ unsigned int scanline, required;
+ struct drm_display_mode *mode;
+ u32 hscale, vscale, fps;
+ u64 ability;
DRM_DEBUG_DRIVER("HW scaling is enabled\n");
- hscale = state->src_w / state->crtc_w;
- vscale = state->src_h / state->crtc_h;
+ mode = &plane->state->crtc->state->mode;
+ fps = (mode->clock * 1000) / (mode->vtotal * mode->htotal);
+ ability = clk_get_rate(mixer->mod_clk);
+ /* BSP algorithm assumes 80% efficiency of VI scaler unit */
+ ability *= 80;
+ do_div(ability, mode->vdisplay * fps * max(src_w, dst_w));
+
+ required = src_h * 100 / dst_h;
+
+ if (ability < required) {
+ DRM_DEBUG_DRIVER("Using vertical coarse scaling\n");
+ vm = src_h;
+ vn = (u32)ability * dst_h / 100;
+ src_h = vn;
+ }
+
+ /* it seems that every RGB scaler has buffer for 2048 pixels */
+ scanline = subsampled ? mixer->cfg->scanline_yuv : 2048;
+
+ if (src_w > scanline) {
+ DRM_DEBUG_DRIVER("Using horizontal coarse scaling\n");
+ hm = src_w;
+ hn = scanline;
+ src_w = hn;
+ }
+
+ hscale = (src_w << 16) / dst_w;
+ vscale = (src_h << 16) / dst_h;
sun8i_vi_scaler_setup(mixer, channel, src_w, src_h, dst_w,
dst_h, hscale, vscale, hphase, vphase,
@@ -153,6 +184,23 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
sun8i_vi_scaler_enable(mixer, channel, false);
}
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_VI_HDS_Y(ch_base),
+ SUN8I_MIXER_CHAN_VI_DS_N(hn) |
+ SUN8I_MIXER_CHAN_VI_DS_M(hm));
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_VI_HDS_UV(ch_base),
+ SUN8I_MIXER_CHAN_VI_DS_N(hn) |
+ SUN8I_MIXER_CHAN_VI_DS_M(hm));
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_VI_VDS_Y(ch_base),
+ SUN8I_MIXER_CHAN_VI_DS_N(vn) |
+ SUN8I_MIXER_CHAN_VI_DS_M(vm));
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_VI_VDS_UV(ch_base),
+ SUN8I_MIXER_CHAN_VI_DS_N(vn) |
+ SUN8I_MIXER_CHAN_VI_DS_M(vm));
+
/* Set base coordinates */
DRM_DEBUG_DRIVER("Layer destination coordinates X: %d Y: %d\n",
state->dst.x1, state->dst.y1);
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
index 8a5e6d01c85d..a223a4839f45 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
@@ -24,6 +24,14 @@
((base) + 0x30 * (layer) + 0x18 + 4 * (plane))
#define SUN8I_MIXER_CHAN_VI_OVL_SIZE(base) \
((base) + 0xe8)
+#define SUN8I_MIXER_CHAN_VI_HDS_Y(base) \
+ ((base) + 0xf0)
+#define SUN8I_MIXER_CHAN_VI_HDS_UV(base) \
+ ((base) + 0xf4)
+#define SUN8I_MIXER_CHAN_VI_VDS_Y(base) \
+ ((base) + 0xf8)
+#define SUN8I_MIXER_CHAN_VI_VDS_UV(base) \
+ ((base) + 0xfc)
#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN BIT(0)
/* RGB mode should be set for RGB formats and cleared for YCbCr */
@@ -33,6 +41,9 @@
#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK GENMASK(31, 24)
#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA(x) ((x) << 24)
+#define SUN8I_MIXER_CHAN_VI_DS_N(x) ((x) << 16)
+#define SUN8I_MIXER_CHAN_VI_DS_M(x) ((x) << 0)
+
struct sun8i_mixer;
struct sun8i_vi_layer {
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 0a4ce05e00ab..1dd83a757dba 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -255,11 +255,9 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
helper->fb = fb;
helper->fbdev = info;
- info->par = helper;
info->fbops = &tegra_fb_ops;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(info, helper, fb->width, fb->height);
+ drm_fb_helper_fill_info(info, helper, sizes);
offset = info->var.xoffset * bytes_per_pixel +
info->var.yoffset * fb->pitches[0];
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 4f80100ff5f3..4cce11fd8836 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -204,7 +204,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
{
if (bo->pages) {
dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
drm_gem_put_pages(&bo->gem, bo->pages, true, true);
sg_free_table(bo->sgt);
kfree(bo->sgt);
@@ -230,7 +230,7 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
}
err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
if (err == 0) {
err = -EFAULT;
goto free_sgt;
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 40057106f5f3..5be5a0817dfe 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2871,6 +2871,13 @@ static int tegra_sor_init(struct host1x_client *client)
* kernel is possible.
*/
if (sor->rst) {
+ err = reset_control_acquire(sor->rst);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
+ err);
+ return err;
+ }
+
err = reset_control_assert(sor->rst);
if (err < 0) {
dev_err(sor->dev, "failed to assert SOR reset: %d\n",
@@ -2894,6 +2901,8 @@ static int tegra_sor_init(struct host1x_client *client)
err);
return err;
}
+
+ reset_control_release(sor->rst);
}
err = clk_prepare_enable(sor->clk_safe);
@@ -3331,7 +3340,7 @@ static int tegra_sor_probe(struct platform_device *pdev)
goto remove;
}
- sor->rst = devm_reset_control_get(&pdev->dev, "sor");
+ sor->rst = devm_reset_control_get_exclusive_released(&pdev->dev, "sor");
if (IS_ERR(sor->rst)) {
err = PTR_ERR(sor->rst);
@@ -3519,6 +3528,8 @@ static int tegra_sor_suspend(struct device *dev)
dev_err(dev, "failed to assert reset: %d\n", err);
return err;
}
+
+ reset_control_release(sor->rst);
}
usleep_range(1000, 2000);
@@ -3542,9 +3553,17 @@ static int tegra_sor_resume(struct device *dev)
usleep_range(1000, 2000);
if (sor->rst) {
+ err = reset_control_acquire(sor->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to acquire reset: %d\n", err);
+ clk_disable_unprepare(sor->clk);
+ return err;
+ }
+
err = reset_control_deassert(sor->rst);
if (err < 0) {
dev_err(dev, "failed to deassert reset: %d\n", err);
+ reset_control_release(sor->rst);
clk_disable_unprepare(sor->clk);
return err;
}
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index 52598049c096..cb7df2086aee 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -8,7 +8,6 @@ config DRM_TILCDC
select DRM_PANEL_BRIDGE
select VIDEOMODE_HELPERS
select BACKLIGHT_CLASS_DEVICE
- select BACKLIGHT_LCD_SUPPORT
help
Choose this option if you have an TI SoC with LCDC display
controller, for example AM33xx in beagle-bone, DA8xx, or
diff --git a/drivers/gpu/drm/tinydrm/core/Makefile b/drivers/gpu/drm/tinydrm/core/Makefile
index fb221e6f8885..6f8f764560e0 100644
--- a/drivers/gpu/drm/tinydrm/core/Makefile
+++ b/drivers/gpu/drm/tinydrm/core/Makefile
@@ -1,3 +1,3 @@
-tinydrm-y := tinydrm-core.o tinydrm-pipe.o tinydrm-helpers.o
+tinydrm-y := tinydrm-pipe.o tinydrm-helpers.o
obj-$(CONFIG_DRM_TINYDRM) += tinydrm.o
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
deleted file mode 100644
index 554abd5d3b53..000000000000
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Copyright (C) 2016 Noralf Trønnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_print.h>
-#include <drm/tinydrm/tinydrm.h>
-#include <linux/device.h>
-#include <linux/dma-buf.h>
-#include <linux/module.h>
-
-/**
- * DOC: overview
- *
- * This library provides driver helpers for very simple display hardware.
- *
- * It is based on &drm_simple_display_pipe coupled with a &drm_connector which
- * has only one fixed &drm_display_mode. The framebuffers are backed by the
- * cma helper and have support for framebuffer flushing (dirty).
- * fbdev support is also included.
- *
- */
-
-/**
- * DOC: core
- *
- * The driver allocates &tinydrm_device, initializes it using
- * devm_tinydrm_init(), sets up the pipeline using tinydrm_display_pipe_init()
- * and registers the DRM device using devm_tinydrm_register().
- */
-
-static const struct drm_mode_config_funcs tinydrm_mode_config_funcs = {
- .fb_create = drm_gem_fb_create_with_dirty,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
-};
-
-static int tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
- struct drm_driver *driver)
-{
- struct drm_device *drm;
-
- /*
- * We don't embed drm_device, because that prevent us from using
- * devm_kzalloc() to allocate tinydrm_device in the driver since
- * drm_dev_put() frees the structure. The devm_ functions provide
- * for easy error handling.
- */
- drm = drm_dev_alloc(driver, parent);
- if (IS_ERR(drm))
- return PTR_ERR(drm);
-
- tdev->drm = drm;
- drm->dev_private = tdev;
- drm_mode_config_init(drm);
- drm->mode_config.funcs = &tinydrm_mode_config_funcs;
- drm->mode_config.allow_fb_modifiers = true;
-
- return 0;
-}
-
-static void tinydrm_fini(struct tinydrm_device *tdev)
-{
- drm_mode_config_cleanup(tdev->drm);
- tdev->drm->dev_private = NULL;
- drm_dev_put(tdev->drm);
-}
-
-static void devm_tinydrm_release(void *data)
-{
- tinydrm_fini(data);
-}
-
-/**
- * devm_tinydrm_init - Initialize tinydrm device
- * @parent: Parent device object
- * @tdev: tinydrm device
- * @driver: DRM driver
- *
- * This function initializes @tdev, the underlying DRM device and it's
- * mode_config. Resources will be automatically freed on driver detach (devres)
- * using drm_mode_config_cleanup() and drm_dev_put().
- *
- * Returns:
- * Zero on success, negative error code on failure.
- */
-int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
- struct drm_driver *driver)
-{
- int ret;
-
- ret = tinydrm_init(parent, tdev, driver);
- if (ret)
- return ret;
-
- ret = devm_add_action(parent, devm_tinydrm_release, tdev);
- if (ret)
- tinydrm_fini(tdev);
-
- return ret;
-}
-EXPORT_SYMBOL(devm_tinydrm_init);
-
-static int tinydrm_register(struct tinydrm_device *tdev)
-{
- struct drm_device *drm = tdev->drm;
- int ret;
-
- ret = drm_dev_register(tdev->drm, 0);
- if (ret)
- return ret;
-
- ret = drm_fbdev_generic_setup(drm, 0);
- if (ret)
- DRM_ERROR("Failed to initialize fbdev: %d\n", ret);
-
- return 0;
-}
-
-static void tinydrm_unregister(struct tinydrm_device *tdev)
-{
- drm_atomic_helper_shutdown(tdev->drm);
- drm_dev_unregister(tdev->drm);
-}
-
-static void devm_tinydrm_register_release(void *data)
-{
- tinydrm_unregister(data);
-}
-
-/**
- * devm_tinydrm_register - Register tinydrm device
- * @tdev: tinydrm device
- *
- * This function registers the underlying DRM device and fbdev.
- * These resources will be automatically unregistered on driver detach (devres)
- * and the display pipeline will be disabled.
- *
- * Returns:
- * Zero on success, negative error code on failure.
- */
-int devm_tinydrm_register(struct tinydrm_device *tdev)
-{
- struct device *dev = tdev->drm->dev;
- int ret;
-
- ret = tinydrm_register(tdev);
- if (ret)
- return ret;
-
- ret = devm_add_action(dev, devm_tinydrm_register_release, tdev);
- if (ret)
- tinydrm_unregister(tdev);
-
- return ret;
-}
-EXPORT_SYMBOL(devm_tinydrm_register);
-
-/**
- * tinydrm_shutdown - Shutdown tinydrm
- * @tdev: tinydrm device
- *
- * This function makes sure that the display pipeline is disabled.
- * Used by drivers in their shutdown callback to turn off the display
- * on machine shutdown and reboot.
- */
-void tinydrm_shutdown(struct tinydrm_device *tdev)
-{
- drm_atomic_helper_shutdown(tdev->drm);
-}
-EXPORT_SYMBOL(tinydrm_shutdown);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
index 2737b6fdadc8..6d540d93758f 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
@@ -26,164 +26,6 @@ static unsigned int spi_max;
module_param(spi_max, uint, 0400);
MODULE_PARM_DESC(spi_max, "Set a lower SPI max transfer size");
-/**
- * tinydrm_memcpy - Copy clip buffer
- * @dst: Destination buffer
- * @vaddr: Source buffer
- * @fb: DRM framebuffer
- * @clip: Clip rectangle area to copy
- */
-void tinydrm_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip)
-{
- unsigned int cpp = drm_format_plane_cpp(fb->format->format, 0);
- unsigned int pitch = fb->pitches[0];
- void *src = vaddr + (clip->y1 * pitch) + (clip->x1 * cpp);
- size_t len = (clip->x2 - clip->x1) * cpp;
- unsigned int y;
-
- for (y = clip->y1; y < clip->y2; y++) {
- memcpy(dst, src, len);
- src += pitch;
- dst += len;
- }
-}
-EXPORT_SYMBOL(tinydrm_memcpy);
-
-/**
- * tinydrm_swab16 - Swap bytes into clip buffer
- * @dst: RGB565 destination buffer
- * @vaddr: RGB565 source buffer
- * @fb: DRM framebuffer
- * @clip: Clip rectangle area to copy
- */
-void tinydrm_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip)
-{
- size_t len = (clip->x2 - clip->x1) * sizeof(u16);
- unsigned int x, y;
- u16 *src, *buf;
-
- /*
- * The cma memory is write-combined so reads are uncached.
- * Speed up by fetching one line at a time.
- */
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
- return;
-
- for (y = clip->y1; y < clip->y2; y++) {
- src = vaddr + (y * fb->pitches[0]);
- src += clip->x1;
- memcpy(buf, src, len);
- src = buf;
- for (x = clip->x1; x < clip->x2; x++)
- *dst++ = swab16(*src++);
- }
-
- kfree(buf);
-}
-EXPORT_SYMBOL(tinydrm_swab16);
-
-/**
- * tinydrm_xrgb8888_to_rgb565 - Convert XRGB8888 to RGB565 clip buffer
- * @dst: RGB565 destination buffer
- * @vaddr: XRGB8888 source buffer
- * @fb: DRM framebuffer
- * @clip: Clip rectangle area to copy
- * @swap: Swap bytes
- *
- * Drivers can use this function for RGB565 devices that don't natively
- * support XRGB8888.
- */
-void tinydrm_xrgb8888_to_rgb565(u16 *dst, void *vaddr,
- struct drm_framebuffer *fb,
- struct drm_rect *clip, bool swap)
-{
- size_t len = (clip->x2 - clip->x1) * sizeof(u32);
- unsigned int x, y;
- u32 *src, *buf;
- u16 val16;
-
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
- return;
-
- for (y = clip->y1; y < clip->y2; y++) {
- src = vaddr + (y * fb->pitches[0]);
- src += clip->x1;
- memcpy(buf, src, len);
- src = buf;
- for (x = clip->x1; x < clip->x2; x++) {
- val16 = ((*src & 0x00F80000) >> 8) |
- ((*src & 0x0000FC00) >> 5) |
- ((*src & 0x000000F8) >> 3);
- src++;
- if (swap)
- *dst++ = swab16(val16);
- else
- *dst++ = val16;
- }
- }
-
- kfree(buf);
-}
-EXPORT_SYMBOL(tinydrm_xrgb8888_to_rgb565);
-
-/**
- * tinydrm_xrgb8888_to_gray8 - Convert XRGB8888 to grayscale
- * @dst: 8-bit grayscale destination buffer
- * @vaddr: XRGB8888 source buffer
- * @fb: DRM framebuffer
- * @clip: Clip rectangle area to copy
- *
- * Drm doesn't have native monochrome or grayscale support.
- * Such drivers can announce the commonly supported XR24 format to userspace
- * and use this function to convert to the native format.
- *
- * Monochrome drivers will use the most significant bit,
- * where 1 means foreground color and 0 background color.
- *
- * ITU BT.601 is used for the RGB -> luma (brightness) conversion.
- */
-void tinydrm_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip)
-{
- unsigned int len = (clip->x2 - clip->x1) * sizeof(u32);
- unsigned int x, y;
- void *buf;
- u32 *src;
-
- if (WARN_ON(fb->format->format != DRM_FORMAT_XRGB8888))
- return;
- /*
- * The cma memory is write-combined so reads are uncached.
- * Speed up by fetching one line at a time.
- */
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
- return;
-
- for (y = clip->y1; y < clip->y2; y++) {
- src = vaddr + (y * fb->pitches[0]);
- src += clip->x1;
- memcpy(buf, src, len);
- src = buf;
- for (x = clip->x1; x < clip->x2; x++) {
- u8 r = (*src & 0x00ff0000) >> 16;
- u8 g = (*src & 0x0000ff00) >> 8;
- u8 b = *src & 0x000000ff;
-
- /* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
- *dst++ = (3 * r + 6 * g + b) / 10;
- src++;
- }
- }
-
- kfree(buf);
-}
-EXPORT_SYMBOL(tinydrm_xrgb8888_to_gray8);
-
#if IS_ENABLED(CONFIG_SPI)
/**
@@ -365,3 +207,5 @@ int tinydrm_spi_transfer(struct spi_device *spi, u32 speed_hz,
EXPORT_SYMBOL(tinydrm_spi_transfer);
#endif /* CONFIG_SPI */
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
index bb5b1c1e21ba..bb8a7ed8ddf6 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
@@ -13,7 +13,7 @@
#include <drm/drm_modes.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_print.h>
-#include <drm/tinydrm/tinydrm.h>
+#include <drm/drm_simple_kms_helper.h>
struct tinydrm_connector {
struct drm_connector base;
@@ -129,7 +129,8 @@ static int tinydrm_rotate_mode(struct drm_display_mode *mode,
/**
* tinydrm_display_pipe_init - Initialize display pipe
- * @tdev: tinydrm device
+ * @drm: DRM device
+ * @pipe: Display pipe
* @funcs: Display pipe functions
* @connector_type: Connector type
* @formats: Array of supported formats (DRM_FORMAT\_\*)
@@ -143,16 +144,15 @@ static int tinydrm_rotate_mode(struct drm_display_mode *mode,
* Returns:
* Zero on success, negative error code on failure.
*/
-int
-tinydrm_display_pipe_init(struct tinydrm_device *tdev,
- const struct drm_simple_display_pipe_funcs *funcs,
- int connector_type,
- const uint32_t *formats,
- unsigned int format_count,
- const struct drm_display_mode *mode,
- unsigned int rotation)
+int tinydrm_display_pipe_init(struct drm_device *drm,
+ struct drm_simple_display_pipe *pipe,
+ const struct drm_simple_display_pipe_funcs *funcs,
+ int connector_type,
+ const uint32_t *formats,
+ unsigned int format_count,
+ const struct drm_display_mode *mode,
+ unsigned int rotation)
{
- struct drm_device *drm = tdev->drm;
struct drm_display_mode mode_copy;
struct drm_connector *connector;
int ret;
@@ -177,7 +177,7 @@ tinydrm_display_pipe_init(struct tinydrm_device *tdev,
if (IS_ERR(connector))
return PTR_ERR(connector);
- return drm_simple_display_pipe_init(drm, &tdev->pipe, funcs, formats,
+ return drm_simple_display_pipe_init(drm, pipe, funcs, formats,
format_count, modifiers, connector);
}
EXPORT_SYMBOL(tinydrm_display_pipe_init);
diff --git a/drivers/gpu/drm/tinydrm/hx8357d.c b/drivers/gpu/drm/tinydrm/hx8357d.c
index 8bbd0beafc6a..5773d0fb6ca1 100644
--- a/drivers/gpu/drm/tinydrm/hx8357d.c
+++ b/drivers/gpu/drm/tinydrm/hx8357d.c
@@ -16,7 +16,9 @@
#include <linux/property.h>
#include <linux/spi/spi.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
@@ -46,16 +48,18 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
u8 addr_mode;
- int ret;
+ int ret, idx;
+
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
DRM_DEBUG_KMS("\n");
ret = mipi_dbi_poweron_conditional_reset(mipi);
if (ret < 0)
- return;
+ goto out_exit;
if (ret == 1)
goto out_enable;
@@ -171,6 +175,8 @@ out_enable:
}
mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+out_exit:
+ drm_dev_exit(idx);
}
static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = {
@@ -181,7 +187,7 @@ static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = {
};
static const struct drm_display_mode yx350hv15_mode = {
- TINYDRM_MODE(320, 480, 60, 75),
+ DRM_SIMPLE_MODE(320, 480, 60, 75),
};
DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops);
@@ -189,6 +195,7 @@ DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops);
static struct drm_driver hx8357d_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
.fops = &hx8357d_fops,
+ .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "hx8357d",
@@ -213,15 +220,25 @@ MODULE_DEVICE_TABLE(spi, hx8357d_id);
static int hx8357d_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct drm_device *drm;
struct mipi_dbi *mipi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
if (!mipi)
return -ENOMEM;
+ drm = &mipi->drm;
+ ret = devm_drm_dev_init(dev, drm, &hx8357d_driver);
+ if (ret) {
+ kfree(mipi);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+
dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
if (IS_ERR(dc)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
@@ -238,21 +255,36 @@ static int hx8357d_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = mipi_dbi_init(&spi->dev, mipi, &hx8357d_pipe_funcs,
- &hx8357d_driver, &yx350hv15_mode, rotation);
+ ret = mipi_dbi_init(mipi, &hx8357d_pipe_funcs, &yx350hv15_mode, rotation);
if (ret)
return ret;
- spi_set_drvdata(spi, mipi);
+ drm_mode_config_reset(drm);
- return devm_tinydrm_register(&mipi->tinydrm);
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, drm);
+
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
}
-static void hx8357d_shutdown(struct spi_device *spi)
+static int hx8357d_remove(struct spi_device *spi)
{
- struct mipi_dbi *mipi = spi_get_drvdata(spi);
+ struct drm_device *drm = spi_get_drvdata(spi);
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
- tinydrm_shutdown(&mipi->tinydrm);
+ return 0;
+}
+
+static void hx8357d_shutdown(struct spi_device *spi)
+{
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver hx8357d_spi_driver = {
@@ -262,6 +294,7 @@ static struct spi_driver hx8357d_spi_driver = {
},
.id_table = hx8357d_id,
.probe = hx8357d_probe,
+ .remove = hx8357d_remove,
.shutdown = hx8357d_shutdown,
};
module_spi_driver(hx8357d_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c
index 43a3b68d90a2..4b1a587c0134 100644
--- a/drivers/gpu/drm/tinydrm/ili9225.c
+++ b/drivers/gpu/drm/tinydrm/ili9225.c
@@ -20,9 +20,11 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -81,20 +83,22 @@ static inline int ili9225_command(struct mipi_dbi *mipi, u8 cmd, u16 data)
static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- struct tinydrm_device *tdev = fb->dev->dev_private;
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev);
unsigned int height = rect->y2 - rect->y1;
unsigned int width = rect->x2 - rect->x1;
bool swap = mipi->swap_bytes;
u16 x_start, y_start;
u16 x1, x2, y1, y2;
- int ret = 0;
+ int idx, ret = 0;
bool full;
void *tr;
if (!mipi->enabled)
return;
+ if (!drm_dev_enter(fb->dev, &idx))
+ return;
+
full = width == fb->width && height == fb->height;
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
@@ -157,6 +161,8 @@ static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
err_msg:
if (ret)
dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret);
+
+ drm_dev_exit(idx);
}
static void ili9225_pipe_update(struct drm_simple_display_pipe *pipe,
@@ -181,19 +187,21 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
struct drm_framebuffer *fb = plane_state->fb;
- struct device *dev = tdev->drm->dev;
+ struct device *dev = pipe->crtc.dev->dev;
struct drm_rect rect = {
.x1 = 0,
.x2 = fb->width,
.y1 = 0,
.y2 = fb->height,
};
- int ret;
+ int ret, idx;
u8 am_id;
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
+
DRM_DEBUG_KMS("\n");
mipi_dbi_hw_reset(mipi);
@@ -207,7 +215,7 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
ret = ili9225_command(mipi, ILI9225_POWER_CONTROL_1, 0x0000);
if (ret) {
DRM_DEV_ERROR(dev, "Error sending command %d\n", ret);
- return;
+ goto out_exit;
}
ili9225_command(mipi, ILI9225_POWER_CONTROL_2, 0x0000);
ili9225_command(mipi, ILI9225_POWER_CONTROL_3, 0x0000);
@@ -280,15 +288,23 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
mipi->enabled = true;
ili9225_fb_dirty(fb, &rect);
+out_exit:
+ drm_dev_exit(idx);
}
static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
DRM_DEBUG_KMS("\n");
+ /*
+ * This callback is not protected by drm_dev_enter/exit since we want to
+ * turn off the display on regular driver unload. It's highly unlikely
+ * that the underlying SPI controller is gone should this be called after
+ * unplug.
+ */
+
if (!mipi->enabled)
return;
@@ -301,7 +317,7 @@ static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe)
mipi->enabled = false;
}
-static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par,
+static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
size_t num)
{
struct spi_device *spi = mipi->spi;
@@ -311,11 +327,11 @@ static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par,
gpiod_set_value_cansleep(mipi->dc, 0);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
- ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1);
+ ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1);
if (ret || !num)
return ret;
- if (cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes)
+ if (*cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes)
bpw = 16;
gpiod_set_value_cansleep(mipi->dc, 1);
@@ -332,7 +348,7 @@ static const struct drm_simple_display_pipe_funcs ili9225_pipe_funcs = {
};
static const struct drm_display_mode ili9225_mode = {
- TINYDRM_MODE(176, 220, 35, 44),
+ DRM_SIMPLE_MODE(176, 220, 35, 44),
};
DEFINE_DRM_GEM_CMA_FOPS(ili9225_fops);
@@ -341,6 +357,7 @@ static struct drm_driver ili9225_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &ili9225_fops,
+ .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.name = "ili9225",
.desc = "Ilitek ILI9225",
@@ -364,15 +381,25 @@ MODULE_DEVICE_TABLE(spi, ili9225_id);
static int ili9225_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct drm_device *drm;
struct mipi_dbi *mipi;
struct gpio_desc *rs;
u32 rotation = 0;
int ret;
- mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
if (!mipi)
return -ENOMEM;
+ drm = &mipi->drm;
+ ret = devm_drm_dev_init(dev, drm, &ili9225_driver);
+ if (ret) {
+ kfree(mipi);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+
mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(mipi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
@@ -394,21 +421,36 @@ static int ili9225_probe(struct spi_device *spi)
/* override the command function set in mipi_dbi_spi_init() */
mipi->command = ili9225_dbi_command;
- ret = mipi_dbi_init(&spi->dev, mipi, &ili9225_pipe_funcs,
- &ili9225_driver, &ili9225_mode, rotation);
+ ret = mipi_dbi_init(mipi, &ili9225_pipe_funcs, &ili9225_mode, rotation);
if (ret)
return ret;
- spi_set_drvdata(spi, mipi);
+ drm_mode_config_reset(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, drm);
+
+ drm_fbdev_generic_setup(drm, 0);
- return devm_tinydrm_register(&mipi->tinydrm);
+ return 0;
}
-static void ili9225_shutdown(struct spi_device *spi)
+static int ili9225_remove(struct spi_device *spi)
{
- struct mipi_dbi *mipi = spi_get_drvdata(spi);
+ struct drm_device *drm = spi_get_drvdata(spi);
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
+
+ return 0;
+}
- tinydrm_shutdown(&mipi->tinydrm);
+static void ili9225_shutdown(struct spi_device *spi)
+{
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver ili9225_spi_driver = {
@@ -419,6 +461,7 @@ static struct spi_driver ili9225_spi_driver = {
},
.id_table = ili9225_id,
.probe = ili9225_probe,
+ .remove = ili9225_remove,
.shutdown = ili9225_shutdown,
};
module_spi_driver(ili9225_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/ili9341.c b/drivers/gpu/drm/tinydrm/ili9341.c
index 713bb2dd7e04..4ade9e4b924f 100644
--- a/drivers/gpu/drm/tinydrm/ili9341.c
+++ b/drivers/gpu/drm/tinydrm/ili9341.c
@@ -15,7 +15,9 @@
#include <linux/property.h>
#include <linux/spi/spi.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
@@ -52,16 +54,18 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
u8 addr_mode;
- int ret;
+ int ret, idx;
+
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
DRM_DEBUG_KMS("\n");
ret = mipi_dbi_poweron_conditional_reset(mipi);
if (ret < 0)
- return;
+ goto out_exit;
if (ret == 1)
goto out_enable;
@@ -127,6 +131,8 @@ out_enable:
addr_mode |= ILI9341_MADCTL_BGR;
mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+out_exit:
+ drm_dev_exit(idx);
}
static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = {
@@ -137,7 +143,7 @@ static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = {
};
static const struct drm_display_mode yx240qv29_mode = {
- TINYDRM_MODE(240, 320, 37, 49),
+ DRM_SIMPLE_MODE(240, 320, 37, 49),
};
DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
@@ -145,6 +151,7 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
static struct drm_driver ili9341_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
.fops = &ili9341_fops,
+ .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9341",
@@ -169,15 +176,25 @@ MODULE_DEVICE_TABLE(spi, ili9341_id);
static int ili9341_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct drm_device *drm;
struct mipi_dbi *mipi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
if (!mipi)
return -ENOMEM;
+ drm = &mipi->drm;
+ ret = devm_drm_dev_init(dev, drm, &ili9341_driver);
+ if (ret) {
+ kfree(mipi);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+
mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(mipi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
@@ -200,21 +217,36 @@ static int ili9341_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = mipi_dbi_init(&spi->dev, mipi, &ili9341_pipe_funcs,
- &ili9341_driver, &yx240qv29_mode, rotation);
+ ret = mipi_dbi_init(mipi, &ili9341_pipe_funcs, &yx240qv29_mode, rotation);
if (ret)
return ret;
- spi_set_drvdata(spi, mipi);
+ drm_mode_config_reset(drm);
- return devm_tinydrm_register(&mipi->tinydrm);
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, drm);
+
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
}
-static void ili9341_shutdown(struct spi_device *spi)
+static int ili9341_remove(struct spi_device *spi)
{
- struct mipi_dbi *mipi = spi_get_drvdata(spi);
+ struct drm_device *drm = spi_get_drvdata(spi);
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
- tinydrm_shutdown(&mipi->tinydrm);
+ return 0;
+}
+
+static void ili9341_shutdown(struct spi_device *spi)
+{
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver ili9341_spi_driver = {
@@ -224,6 +256,7 @@ static struct spi_driver ili9341_spi_driver = {
},
.id_table = ili9341_id,
.probe = ili9341_probe,
+ .remove = ili9341_remove,
.shutdown = ili9341_shutdown,
};
module_spi_driver(ili9341_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
index 82a92ec9ae3c..8e169846fbd8 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -17,7 +17,9 @@
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
@@ -54,16 +56,18 @@ static void mi0283qt_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
u8 addr_mode;
- int ret;
+ int ret, idx;
+
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
DRM_DEBUG_KMS("\n");
ret = mipi_dbi_poweron_conditional_reset(mipi);
if (ret < 0)
- return;
+ goto out_exit;
if (ret == 1)
goto out_enable;
@@ -135,6 +139,8 @@ out_enable:
addr_mode |= ILI9341_MADCTL_BGR;
mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+out_exit:
+ drm_dev_exit(idx);
}
static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = {
@@ -145,7 +151,7 @@ static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = {
};
static const struct drm_display_mode mi0283qt_mode = {
- TINYDRM_MODE(320, 240, 58, 43),
+ DRM_SIMPLE_MODE(320, 240, 58, 43),
};
DEFINE_DRM_GEM_CMA_FOPS(mi0283qt_fops);
@@ -154,6 +160,7 @@ static struct drm_driver mi0283qt_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &mi0283qt_fops,
+ .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "mi0283qt",
@@ -178,15 +185,25 @@ MODULE_DEVICE_TABLE(spi, mi0283qt_id);
static int mi0283qt_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct drm_device *drm;
struct mipi_dbi *mipi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
if (!mipi)
return -ENOMEM;
+ drm = &mipi->drm;
+ ret = devm_drm_dev_init(dev, drm, &mi0283qt_driver);
+ if (ret) {
+ kfree(mipi);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+
mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(mipi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
@@ -213,35 +230,46 @@ static int mi0283qt_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = mipi_dbi_init(&spi->dev, mipi, &mi0283qt_pipe_funcs,
- &mi0283qt_driver, &mi0283qt_mode, rotation);
+ ret = mipi_dbi_init(mipi, &mi0283qt_pipe_funcs, &mi0283qt_mode, rotation);
+ if (ret)
+ return ret;
+
+ drm_mode_config_reset(drm);
+
+ ret = drm_dev_register(drm, 0);
if (ret)
return ret;
- spi_set_drvdata(spi, mipi);
+ spi_set_drvdata(spi, drm);
- return devm_tinydrm_register(&mipi->tinydrm);
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
}
-static void mi0283qt_shutdown(struct spi_device *spi)
+static int mi0283qt_remove(struct spi_device *spi)
{
- struct mipi_dbi *mipi = spi_get_drvdata(spi);
+ struct drm_device *drm = spi_get_drvdata(spi);
- tinydrm_shutdown(&mipi->tinydrm);
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
+
+ return 0;
}
-static int __maybe_unused mi0283qt_pm_suspend(struct device *dev)
+static void mi0283qt_shutdown(struct spi_device *spi)
{
- struct mipi_dbi *mipi = dev_get_drvdata(dev);
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
+}
- return drm_mode_config_helper_suspend(mipi->tinydrm.drm);
+static int __maybe_unused mi0283qt_pm_suspend(struct device *dev)
+{
+ return drm_mode_config_helper_suspend(dev_get_drvdata(dev));
}
static int __maybe_unused mi0283qt_pm_resume(struct device *dev)
{
- struct mipi_dbi *mipi = dev_get_drvdata(dev);
-
- drm_mode_config_helper_resume(mipi->tinydrm.drm);
+ drm_mode_config_helper_resume(dev_get_drvdata(dev));
return 0;
}
@@ -259,6 +287,7 @@ static struct spi_driver mi0283qt_spi_driver = {
},
.id_table = mi0283qt_id,
.probe = mi0283qt_probe,
+ .remove = mi0283qt_remove,
.shutdown = mi0283qt_shutdown,
};
module_spi_driver(mi0283qt_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
index 918f77c7de34..85761b4abb83 100644
--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
+++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
@@ -21,6 +21,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_vblank.h>
@@ -153,16 +154,42 @@ EXPORT_SYMBOL(mipi_dbi_command_read);
*/
int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
{
+ u8 *cmdbuf;
int ret;
+ /* SPI requires dma-safe buffers */
+ cmdbuf = kmemdup(&cmd, 1, GFP_KERNEL);
+ if (!cmdbuf)
+ return -ENOMEM;
+
mutex_lock(&mipi->cmdlock);
- ret = mipi->command(mipi, cmd, data, len);
+ ret = mipi->command(mipi, cmdbuf, data, len);
mutex_unlock(&mipi->cmdlock);
+ kfree(cmdbuf);
+
return ret;
}
EXPORT_SYMBOL(mipi_dbi_command_buf);
+/* This should only be used by mipi_dbi_command() */
+int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmemdup(data, len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = mipi_dbi_command_buf(mipi, cmd, buf, len);
+
+ kfree(buf);
+
+ return ret;
+}
+EXPORT_SYMBOL(mipi_dbi_command_stackbuf);
+
/**
* mipi_dbi_buf_copy - Copy a framebuffer, transforming it if necessary
* @dst: The destination buffer
@@ -192,12 +219,12 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
if (swap)
- tinydrm_swab16(dst, src, fb, clip);
+ drm_fb_swab16(dst, src, fb, clip);
else
- tinydrm_memcpy(dst, src, fb, clip);
+ drm_fb_memcpy(dst, src, fb, clip);
break;
case DRM_FORMAT_XRGB8888:
- tinydrm_xrgb8888_to_rgb565(dst, src, fb, clip, swap);
+ drm_fb_xrgb8888_to_rgb565(dst, src, fb, clip, swap);
break;
default:
dev_err_once(fb->dev->dev, "Format is not supported: %s\n",
@@ -216,18 +243,20 @@ EXPORT_SYMBOL(mipi_dbi_buf_copy);
static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- struct tinydrm_device *tdev = fb->dev->dev_private;
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev);
unsigned int height = rect->y2 - rect->y1;
unsigned int width = rect->x2 - rect->x1;
bool swap = mipi->swap_bytes;
- int ret = 0;
+ int idx, ret = 0;
bool full;
void *tr;
if (!mipi->enabled)
return;
+ if (!drm_dev_enter(fb->dev, &idx))
+ return;
+
full = width == fb->width && height == fb->height;
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
@@ -254,6 +283,8 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
err_msg:
if (ret)
dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret);
+
+ drm_dev_exit(idx);
}
/**
@@ -308,19 +339,29 @@ void mipi_dbi_enable_flush(struct mipi_dbi *mipi,
.y1 = 0,
.y2 = fb->height,
};
+ int idx;
+
+ if (!drm_dev_enter(&mipi->drm, &idx))
+ return;
mipi->enabled = true;
mipi_dbi_fb_dirty(fb, &rect);
backlight_enable(mipi->backlight);
+
+ drm_dev_exit(idx);
}
EXPORT_SYMBOL(mipi_dbi_enable_flush);
static void mipi_dbi_blank(struct mipi_dbi *mipi)
{
- struct drm_device *drm = mipi->tinydrm.drm;
+ struct drm_device *drm = &mipi->drm;
u16 height = drm->mode_config.min_height;
u16 width = drm->mode_config.min_width;
size_t len = width * height * 2;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
memset(mipi->tx_buf, 0, len);
@@ -330,6 +371,8 @@ static void mipi_dbi_blank(struct mipi_dbi *mipi)
(height >> 8) & 0xFF, (height - 1) & 0xFF);
mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START,
(u8 *)mipi->tx_buf, len);
+
+ drm_dev_exit(idx);
}
/**
@@ -342,8 +385,10 @@ static void mipi_dbi_blank(struct mipi_dbi *mipi)
*/
void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+
+ if (!mipi->enabled)
+ return;
DRM_DEBUG_KMS("\n");
@@ -359,6 +404,12 @@ void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe)
}
EXPORT_SYMBOL(mipi_dbi_pipe_disable);
+static const struct drm_mode_config_funcs mipi_dbi_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
static const uint32_t mipi_dbi_formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -366,31 +417,27 @@ static const uint32_t mipi_dbi_formats[] = {
/**
* mipi_dbi_init - MIPI DBI initialization
- * @dev: Parent device
* @mipi: &mipi_dbi structure to initialize
- * @pipe_funcs: Display pipe functions
- * @driver: DRM driver
+ * @funcs: Display pipe functions
* @mode: Display mode
* @rotation: Initial rotation in degrees Counter Clock Wise
*
- * This function initializes a &mipi_dbi structure and it's underlying
- * @tinydrm_device. It also sets up the display pipeline.
+ * This function sets up a &drm_simple_display_pipe with a &drm_connector that
+ * has one fixed &drm_display_mode which is rotated according to @rotation.
+ * This mode is used to set the mode config min/max width/height properties.
+ * Additionally &mipi_dbi.tx_buf is allocated.
*
* Supported formats: Native RGB565 and emulated XRGB8888.
*
- * Objects created by this function will be automatically freed on driver
- * detach (devres).
- *
* Returns:
* Zero on success, negative error code on failure.
*/
-int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
- const struct drm_simple_display_pipe_funcs *pipe_funcs,
- struct drm_driver *driver,
+int mipi_dbi_init(struct mipi_dbi *mipi,
+ const struct drm_simple_display_pipe_funcs *funcs,
const struct drm_display_mode *mode, unsigned int rotation)
{
size_t bufsize = mode->vdisplay * mode->hdisplay * sizeof(u16);
- struct tinydrm_device *tdev = &mipi->tinydrm;
+ struct drm_device *drm = &mipi->drm;
int ret;
if (!mipi->command)
@@ -398,16 +445,12 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
mutex_init(&mipi->cmdlock);
- mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL);
+ mipi->tx_buf = devm_kmalloc(drm->dev, bufsize, GFP_KERNEL);
if (!mipi->tx_buf)
return -ENOMEM;
- ret = devm_tinydrm_init(dev, tdev, driver);
- if (ret)
- return ret;
-
/* TODO: Maybe add DRM_MODE_CONNECTOR_SPI */
- ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
+ ret = tinydrm_display_pipe_init(drm, &mipi->pipe, funcs,
DRM_MODE_CONNECTOR_VIRTUAL,
mipi_dbi_formats,
ARRAY_SIZE(mipi_dbi_formats), mode,
@@ -415,21 +458,40 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
if (ret)
return ret;
- drm_plane_enable_fb_damage_clips(&tdev->pipe.plane);
+ drm_plane_enable_fb_damage_clips(&mipi->pipe.plane);
- tdev->drm->mode_config.preferred_depth = 16;
+ drm->mode_config.funcs = &mipi_dbi_mode_config_funcs;
+ drm->mode_config.preferred_depth = 16;
mipi->rotation = rotation;
- drm_mode_config_reset(tdev->drm);
-
DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n",
- tdev->drm->mode_config.preferred_depth, rotation);
+ drm->mode_config.preferred_depth, rotation);
return 0;
}
EXPORT_SYMBOL(mipi_dbi_init);
/**
+ * mipi_dbi_release - DRM driver release helper
+ * @drm: DRM device
+ *
+ * This function finalizes and frees &mipi_dbi.
+ *
+ * Drivers can use this as their &drm_driver->release callback.
+ */
+void mipi_dbi_release(struct drm_device *drm)
+{
+ struct mipi_dbi *dbi = drm_to_mipi_dbi(drm);
+
+ DRM_DEBUG_DRIVER("\n");
+
+ drm_mode_config_cleanup(drm);
+ drm_dev_fini(drm);
+ kfree(dbi);
+}
+EXPORT_SYMBOL(mipi_dbi_release);
+
+/**
* mipi_dbi_hw_reset - Hardware reset of controller
* @mipi: MIPI DBI structure
*
@@ -481,7 +543,7 @@ EXPORT_SYMBOL(mipi_dbi_display_is_on);
static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi *mipi, bool cond)
{
- struct device *dev = mipi->tinydrm.drm->dev;
+ struct device *dev = mipi->drm.dev;
int ret;
if (mipi->regulator) {
@@ -774,18 +836,18 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc,
return 0;
}
-static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd,
+static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 *cmd,
u8 *parameters, size_t num)
{
- unsigned int bpw = (cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8;
+ unsigned int bpw = (*cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8;
int ret;
- if (mipi_dbi_command_is_read(mipi, cmd))
+ if (mipi_dbi_command_is_read(mipi, *cmd))
return -ENOTSUPP;
- MIPI_DBI_DEBUG_COMMAND(cmd, parameters, num);
+ MIPI_DBI_DEBUG_COMMAND(*cmd, parameters, num);
- ret = mipi_dbi_spi1_transfer(mipi, 0, &cmd, 1, 8);
+ ret = mipi_dbi_spi1_transfer(mipi, 0, cmd, 1, 8);
if (ret || !num)
return ret;
@@ -794,7 +856,7 @@ static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd,
/* MIPI DBI Type C Option 3 */
-static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
+static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 *cmd,
u8 *data, size_t len)
{
struct spi_device *spi = mipi->spi;
@@ -803,7 +865,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
struct spi_transfer tr[2] = {
{
.speed_hz = speed_hz,
- .tx_buf = &cmd,
+ .tx_buf = cmd,
.len = 1,
}, {
.speed_hz = speed_hz,
@@ -821,8 +883,8 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
* Support non-standard 24-bit and 32-bit Nokia read commands which
* start with a dummy clock, so we need to read an extra byte.
*/
- if (cmd == MIPI_DCS_GET_DISPLAY_ID ||
- cmd == MIPI_DCS_GET_DISPLAY_STATUS) {
+ if (*cmd == MIPI_DCS_GET_DISPLAY_ID ||
+ *cmd == MIPI_DCS_GET_DISPLAY_STATUS) {
if (!(len == 3 || len == 4))
return -EINVAL;
@@ -852,7 +914,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
data[i] = (buf[i] << 1) | !!(buf[i + 1] & BIT(7));
}
- MIPI_DBI_DEBUG_COMMAND(cmd, data, len);
+ MIPI_DBI_DEBUG_COMMAND(*cmd, data, len);
err_free:
kfree(buf);
@@ -860,7 +922,7 @@ err_free:
return ret;
}
-static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd,
+static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 *cmd,
u8 *par, size_t num)
{
struct spi_device *spi = mipi->spi;
@@ -868,18 +930,18 @@ static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd,
u32 speed_hz;
int ret;
- if (mipi_dbi_command_is_read(mipi, cmd))
+ if (mipi_dbi_command_is_read(mipi, *cmd))
return mipi_dbi_typec3_command_read(mipi, cmd, par, num);
- MIPI_DBI_DEBUG_COMMAND(cmd, par, num);
+ MIPI_DBI_DEBUG_COMMAND(*cmd, par, num);
gpiod_set_value_cansleep(mipi->dc, 0);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
- ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1);
+ ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1);
if (ret || !num)
return ret;
- if (cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
+ if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
bpw = 16;
gpiod_set_value_cansleep(mipi->dc, 1);
@@ -926,7 +988,7 @@ int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi,
* Even though it's not the SPI device that does DMA (the master does),
* the dma mask is necessary for the dma_alloc_wc() in
* drm_gem_cma_create(). The dma_addr returned will be a physical
- * adddress which might be different from the bus address, but this is
+ * address which might be different from the bus address, but this is
* not a problem since the address will not be used.
* The virtual address is used in the transfer and the SPI core
* re-maps it on the SPI master device using the DMA streaming API
@@ -976,11 +1038,16 @@ static ssize_t mipi_dbi_debugfs_command_write(struct file *file,
u8 val, cmd = 0, parameters[64];
char *buf, *pos, *token;
unsigned int i;
- int ret;
+ int ret, idx;
+
+ if (!drm_dev_enter(&mipi->drm, &idx))
+ return -ENODEV;
buf = memdup_user_nul(ubuf, count);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
+ if (IS_ERR(buf)) {
+ ret = PTR_ERR(buf);
+ goto err_exit;
+ }
/* strip trailing whitespace */
for (i = count - 1; i > 0; i--)
@@ -1016,6 +1083,8 @@ static ssize_t mipi_dbi_debugfs_command_write(struct file *file,
err_free:
kfree(buf);
+err_exit:
+ drm_dev_exit(idx);
return ret < 0 ? ret : count;
}
@@ -1024,8 +1093,11 @@ static int mipi_dbi_debugfs_command_show(struct seq_file *m, void *unused)
{
struct mipi_dbi *mipi = m->private;
u8 cmd, val[4];
+ int ret, idx;
size_t len;
- int ret;
+
+ if (!drm_dev_enter(&mipi->drm, &idx))
+ return -ENODEV;
for (cmd = 0; cmd < 255; cmd++) {
if (!mipi_dbi_command_is_read(mipi, cmd))
@@ -1056,6 +1128,8 @@ static int mipi_dbi_debugfs_command_show(struct seq_file *m, void *unused)
seq_printf(m, "%*phN\n", (int)len, val);
}
+ drm_dev_exit(idx);
+
return 0;
}
@@ -1088,8 +1162,7 @@ static const struct file_operations mipi_dbi_debugfs_command_fops = {
*/
int mipi_dbi_debugfs_init(struct drm_minor *minor)
{
- struct tinydrm_device *tdev = minor->dev->dev_private;
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(minor->dev);
umode_t mode = S_IFREG | S_IWUSR;
if (mipi->read_commands)
diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c
index b037c6540cf3..370629e2de94 100644
--- a/drivers/gpu/drm/tinydrm/repaper.c
+++ b/drivers/gpu/drm/tinydrm/repaper.c
@@ -26,14 +26,17 @@
#include <linux/spi/spi.h>
#include <linux/thermal.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_format_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
-#include <drm/tinydrm/tinydrm.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/tinydrm/tinydrm-helpers.h>
#define REPAPER_RID_G2_COG_ID 0x12
@@ -59,7 +62,8 @@ enum repaper_epd_border_byte {
};
struct repaper_epd {
- struct tinydrm_device tinydrm;
+ struct drm_device drm;
+ struct drm_simple_display_pipe pipe;
struct spi_device *spi;
struct gpio_desc *panel_on;
@@ -88,10 +92,9 @@ struct repaper_epd {
bool partial;
};
-static inline struct repaper_epd *
-epd_from_tinydrm(struct tinydrm_device *tdev)
+static inline struct repaper_epd *drm_to_epd(struct drm_device *drm)
{
- return container_of(tdev, struct repaper_epd, tinydrm);
+ return container_of(drm, struct repaper_epd, drm);
}
static int repaper_spi_transfer(struct spi_device *spi, u8 header,
@@ -529,11 +532,16 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
{
struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
struct dma_buf_attachment *import_attach = cma_obj->base.import_attach;
- struct tinydrm_device *tdev = fb->dev->dev_private;
- struct repaper_epd *epd = epd_from_tinydrm(tdev);
+ struct repaper_epd *epd = drm_to_epd(fb->dev);
struct drm_rect clip;
+ int idx, ret = 0;
u8 *buf = NULL;
- int ret = 0;
+
+ if (!epd->enabled)
+ return 0;
+
+ if (!drm_dev_enter(fb->dev, &idx))
+ return -ENODEV;
/* repaper can't do partial updates */
clip.x1 = 0;
@@ -541,17 +549,16 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
clip.y1 = 0;
clip.y2 = fb->height;
- if (!epd->enabled)
- return 0;
-
repaper_get_temperature(epd);
DRM_DEBUG("Flushing [FB:%d] st=%ums\n", fb->base.id,
epd->factored_stage_time);
buf = kmalloc_array(fb->width, fb->height, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out_exit;
+ }
if (import_attach) {
ret = dma_buf_begin_cpu_access(import_attach->dmabuf,
@@ -560,7 +567,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
goto out_free;
}
- tinydrm_xrgb8888_to_gray8(buf, cma_obj->vaddr, fb, &clip);
+ drm_fb_xrgb8888_to_gray8(buf, cma_obj->vaddr, fb, &clip);
if (import_attach) {
ret = dma_buf_end_cpu_access(import_attach->dmabuf,
@@ -620,6 +627,8 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
out_free:
kfree(buf);
+out_exit:
+ drm_dev_exit(idx);
return ret;
}
@@ -645,12 +654,14 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct repaper_epd *epd = epd_from_tinydrm(tdev);
+ struct repaper_epd *epd = drm_to_epd(pipe->crtc.dev);
struct spi_device *spi = epd->spi;
struct device *dev = &spi->dev;
bool dc_ok = false;
- int i, ret;
+ int i, ret, idx;
+
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
DRM_DEBUG_DRIVER("\n");
@@ -689,7 +700,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
if (!i) {
DRM_DEV_ERROR(dev, "timeout waiting for panel to become ready.\n");
power_off(epd);
- return;
+ goto out_exit;
}
repaper_read_id(spi);
@@ -700,7 +711,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
else
dev_err(dev, "wrong COG ID 0x%02x\n", ret);
power_off(epd);
- return;
+ goto out_exit;
}
/* Disable OE */
@@ -713,7 +724,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
else
DRM_DEV_ERROR(dev, "panel is reported broken\n");
power_off(epd);
- return;
+ goto out_exit;
}
/* Power saving mode */
@@ -753,7 +764,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
if (ret < 0) {
DRM_DEV_ERROR(dev, "failed to read chip (%d)\n", ret);
power_off(epd);
- return;
+ goto out_exit;
}
if (ret & 0x40) {
@@ -765,7 +776,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
if (!dc_ok) {
DRM_DEV_ERROR(dev, "dc/dc failed\n");
power_off(epd);
- return;
+ goto out_exit;
}
/*
@@ -776,15 +787,26 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
epd->enabled = true;
epd->partial = false;
+out_exit:
+ drm_dev_exit(idx);
}
static void repaper_pipe_disable(struct drm_simple_display_pipe *pipe)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct repaper_epd *epd = epd_from_tinydrm(tdev);
+ struct repaper_epd *epd = drm_to_epd(pipe->crtc.dev);
struct spi_device *spi = epd->spi;
unsigned int line;
+ /*
+ * This callback is not protected by drm_dev_enter/exit since we want to
+ * turn off the display on regular driver unload. It's highly unlikely
+ * that the underlying SPI controller is gone should this be called after
+ * unplug.
+ */
+
+ if (!epd->enabled)
+ return;
+
DRM_DEBUG_DRIVER("\n");
epd->enabled = false;
@@ -855,33 +877,50 @@ static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
};
+static const struct drm_mode_config_funcs repaper_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void repaper_release(struct drm_device *drm)
+{
+ struct repaper_epd *epd = drm_to_epd(drm);
+
+ DRM_DEBUG_DRIVER("\n");
+
+ drm_mode_config_cleanup(drm);
+ drm_dev_fini(drm);
+ kfree(epd);
+}
+
static const uint32_t repaper_formats[] = {
DRM_FORMAT_XRGB8888,
};
static const struct drm_display_mode repaper_e1144cs021_mode = {
- TINYDRM_MODE(128, 96, 29, 22),
+ DRM_SIMPLE_MODE(128, 96, 29, 22),
};
static const u8 repaper_e1144cs021_cs[] = { 0x00, 0x00, 0x00, 0x00,
0x00, 0x0f, 0xff, 0x00 };
static const struct drm_display_mode repaper_e1190cs021_mode = {
- TINYDRM_MODE(144, 128, 36, 32),
+ DRM_SIMPLE_MODE(144, 128, 36, 32),
};
static const u8 repaper_e1190cs021_cs[] = { 0x00, 0x00, 0x00, 0x03,
0xfc, 0x00, 0x00, 0xff };
static const struct drm_display_mode repaper_e2200cs021_mode = {
- TINYDRM_MODE(200, 96, 46, 22),
+ DRM_SIMPLE_MODE(200, 96, 46, 22),
};
static const u8 repaper_e2200cs021_cs[] = { 0x00, 0x00, 0x00, 0x00,
0x01, 0xff, 0xe0, 0x00 };
static const struct drm_display_mode repaper_e2271cs021_mode = {
- TINYDRM_MODE(264, 176, 57, 38),
+ DRM_SIMPLE_MODE(264, 176, 57, 38),
};
static const u8 repaper_e2271cs021_cs[] = { 0x00, 0x00, 0x00, 0x7f,
@@ -893,6 +932,7 @@ static struct drm_driver repaper_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &repaper_fops,
+ .release = repaper_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.name = "repaper",
.desc = "Pervasive Displays RePaper e-ink panels",
@@ -925,11 +965,11 @@ static int repaper_probe(struct spi_device *spi)
const struct spi_device_id *spi_id;
const struct of_device_id *match;
struct device *dev = &spi->dev;
- struct tinydrm_device *tdev;
enum repaper_model model;
const char *thermal_zone;
struct repaper_epd *epd;
size_t line_buffer_size;
+ struct drm_device *drm;
int ret;
match = of_match_device(repaper_of_match, dev);
@@ -949,10 +989,21 @@ static int repaper_probe(struct spi_device *spi)
}
}
- epd = devm_kzalloc(dev, sizeof(*epd), GFP_KERNEL);
+ epd = kzalloc(sizeof(*epd), GFP_KERNEL);
if (!epd)
return -ENOMEM;
+ drm = &epd->drm;
+
+ ret = devm_drm_dev_init(dev, drm, &repaper_driver);
+ if (ret) {
+ kfree(epd);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+ drm->mode_config.funcs = &repaper_mode_config_funcs;
+
epd->spi = spi;
epd->panel_on = devm_gpiod_get(dev, "panel-on", GPIOD_OUT_LOW);
@@ -1063,32 +1114,41 @@ static int repaper_probe(struct spi_device *spi)
if (!epd->current_frame)
return -ENOMEM;
- tdev = &epd->tinydrm;
-
- ret = devm_tinydrm_init(dev, tdev, &repaper_driver);
- if (ret)
- return ret;
-
- ret = tinydrm_display_pipe_init(tdev, &repaper_pipe_funcs,
+ ret = tinydrm_display_pipe_init(drm, &epd->pipe, &repaper_pipe_funcs,
DRM_MODE_CONNECTOR_VIRTUAL,
repaper_formats,
ARRAY_SIZE(repaper_formats), mode, 0);
if (ret)
return ret;
- drm_mode_config_reset(tdev->drm);
- spi_set_drvdata(spi, tdev);
+ drm_mode_config_reset(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, drm);
DRM_DEBUG_DRIVER("SPI speed: %uMHz\n", spi->max_speed_hz / 1000000);
- return devm_tinydrm_register(tdev);
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
}
-static void repaper_shutdown(struct spi_device *spi)
+static int repaper_remove(struct spi_device *spi)
{
- struct tinydrm_device *tdev = spi_get_drvdata(spi);
+ struct drm_device *drm = spi_get_drvdata(spi);
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
+
+ return 0;
+}
- tinydrm_shutdown(tdev);
+static void repaper_shutdown(struct spi_device *spi)
+{
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver repaper_spi_driver = {
@@ -1099,6 +1159,7 @@ static struct spi_driver repaper_spi_driver = {
},
.id_table = repaper_id,
.probe = repaper_probe,
+ .remove = repaper_remove,
.shutdown = repaper_shutdown,
};
module_spi_driver(repaper_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
index 01a8077954b3..36bb16a15f7e 100644
--- a/drivers/gpu/drm/tinydrm/st7586.c
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -17,9 +17,12 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_format_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_rect.h>
@@ -75,7 +78,7 @@ static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr,
if (!buf)
return;
- tinydrm_xrgb8888_to_gray8(buf, vaddr, fb, clip);
+ drm_fb_xrgb8888_to_gray8(buf, vaddr, fb, clip);
src = buf;
for (y = clip->y1; y < clip->y2; y++) {
@@ -116,14 +119,15 @@ static int st7586_buf_copy(void *dst, struct drm_framebuffer *fb,
static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
- struct tinydrm_device *tdev = fb->dev->dev_private;
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
- int start, end;
- int ret = 0;
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev);
+ int start, end, idx, ret = 0;
if (!mipi->enabled)
return;
+ if (!drm_dev_enter(fb->dev, &idx))
+ return;
+
/* 3 pixels per byte, so grow clip to nearest multiple of 3 */
rect->x1 = rounddown(rect->x1, 3);
rect->x2 = roundup(rect->x2, 3);
@@ -151,6 +155,8 @@ static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
err_msg:
if (ret)
dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret);
+
+ drm_dev_exit(idx);
}
static void st7586_pipe_update(struct drm_simple_display_pipe *pipe,
@@ -175,8 +181,7 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
struct drm_framebuffer *fb = plane_state->fb;
struct drm_rect rect = {
.x1 = 0,
@@ -184,14 +189,17 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
.y1 = 0,
.y2 = fb->height,
};
- int ret;
+ int idx, ret;
u8 addr_mode;
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
+
DRM_DEBUG_KMS("\n");
ret = mipi_dbi_poweron_reset(mipi);
if (ret)
- return;
+ goto out_exit;
mipi_dbi_command(mipi, ST7586_AUTO_READ_CTRL, 0x9f);
mipi_dbi_command(mipi, ST7586_OTP_RW_CTRL, 0x00);
@@ -244,12 +252,20 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
st7586_fb_dirty(fb, &rect);
mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+out_exit:
+ drm_dev_exit(idx);
}
static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+
+ /*
+ * This callback is not protected by drm_dev_enter/exit since we want to
+ * turn off the display on regular driver unload. It's highly unlikely
+ * that the underlying SPI controller is gone should this be called after
+ * unplug.
+ */
DRM_DEBUG_KMS("\n");
@@ -264,46 +280,6 @@ static const u32 st7586_formats[] = {
DRM_FORMAT_XRGB8888,
};
-static int st7586_init(struct device *dev, struct mipi_dbi *mipi,
- const struct drm_simple_display_pipe_funcs *pipe_funcs,
- struct drm_driver *driver, const struct drm_display_mode *mode,
- unsigned int rotation)
-{
- size_t bufsize = (mode->vdisplay + 2) / 3 * mode->hdisplay;
- struct tinydrm_device *tdev = &mipi->tinydrm;
- int ret;
-
- mutex_init(&mipi->cmdlock);
-
- mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL);
- if (!mipi->tx_buf)
- return -ENOMEM;
-
- ret = devm_tinydrm_init(dev, tdev, driver);
- if (ret)
- return ret;
-
- ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL,
- st7586_formats,
- ARRAY_SIZE(st7586_formats),
- mode, rotation);
- if (ret)
- return ret;
-
- drm_plane_enable_fb_damage_clips(&tdev->pipe.plane);
-
- tdev->drm->mode_config.preferred_depth = 32;
- mipi->rotation = rotation;
-
- drm_mode_config_reset(tdev->drm);
-
- DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n",
- tdev->drm->mode_config.preferred_depth, rotation);
-
- return 0;
-}
-
static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = {
.enable = st7586_pipe_enable,
.disable = st7586_pipe_disable,
@@ -311,8 +287,14 @@ static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = {
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
};
+static const struct drm_mode_config_funcs st7586_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
static const struct drm_display_mode st7586_mode = {
- TINYDRM_MODE(178, 128, 37, 27),
+ DRM_SIMPLE_MODE(178, 128, 37, 27),
};
DEFINE_DRM_GEM_CMA_FOPS(st7586_fops);
@@ -321,6 +303,7 @@ static struct drm_driver st7586_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &st7586_fops,
+ .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7586",
@@ -345,15 +328,35 @@ MODULE_DEVICE_TABLE(spi, st7586_id);
static int st7586_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct drm_device *drm;
struct mipi_dbi *mipi;
struct gpio_desc *a0;
u32 rotation = 0;
+ size_t bufsize;
int ret;
- mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
if (!mipi)
return -ENOMEM;
+ drm = &mipi->drm;
+ ret = devm_drm_dev_init(dev, drm, &st7586_driver);
+ if (ret) {
+ kfree(mipi);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+ drm->mode_config.preferred_depth = 32;
+ drm->mode_config.funcs = &st7586_mode_config_funcs;
+
+ mutex_init(&mipi->cmdlock);
+
+ bufsize = (st7586_mode.vdisplay + 2) / 3 * st7586_mode.hdisplay;
+ mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL);
+ if (!mipi->tx_buf)
+ return -ENOMEM;
+
mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(mipi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
@@ -367,6 +370,7 @@ static int st7586_probe(struct spi_device *spi)
}
device_property_read_u32(dev, "rotation", &rotation);
+ mipi->rotation = rotation;
ret = mipi_dbi_spi_init(spi, mipi, a0);
if (ret)
@@ -384,21 +388,44 @@ static int st7586_probe(struct spi_device *spi)
*/
mipi->swap_bytes = true;
- ret = st7586_init(&spi->dev, mipi, &st7586_pipe_funcs, &st7586_driver,
- &st7586_mode, rotation);
+ ret = tinydrm_display_pipe_init(drm, &mipi->pipe, &st7586_pipe_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL,
+ st7586_formats, ARRAY_SIZE(st7586_formats),
+ &st7586_mode, rotation);
if (ret)
return ret;
- spi_set_drvdata(spi, mipi);
+ drm_plane_enable_fb_damage_clips(&mipi->pipe.plane);
- return devm_tinydrm_register(&mipi->tinydrm);
+ drm_mode_config_reset(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, drm);
+
+ DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n",
+ drm->mode_config.preferred_depth, rotation);
+
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
}
-static void st7586_shutdown(struct spi_device *spi)
+static int st7586_remove(struct spi_device *spi)
{
- struct mipi_dbi *mipi = spi_get_drvdata(spi);
+ struct drm_device *drm = spi_get_drvdata(spi);
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
+
+ return 0;
+}
- tinydrm_shutdown(&mipi->tinydrm);
+static void st7586_shutdown(struct spi_device *spi)
+{
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver st7586_spi_driver = {
@@ -409,6 +436,7 @@ static struct spi_driver st7586_spi_driver = {
},
.id_table = st7586_id,
.probe = st7586_probe,
+ .remove = st7586_remove,
.shutdown = st7586_shutdown,
};
module_spi_driver(st7586_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/st7735r.c b/drivers/gpu/drm/tinydrm/st7735r.c
index 3bab9a9569a6..ce9109e613e0 100644
--- a/drivers/gpu/drm/tinydrm/st7735r.c
+++ b/drivers/gpu/drm/tinydrm/st7735r.c
@@ -14,7 +14,9 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
@@ -41,16 +43,18 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
- int ret;
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+ int ret, idx;
u8 addr_mode;
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
+
DRM_DEBUG_KMS("\n");
ret = mipi_dbi_poweron_reset(mipi);
if (ret)
- return;
+ goto out_exit;
msleep(150);
@@ -101,6 +105,8 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
msleep(20);
mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+out_exit:
+ drm_dev_exit(idx);
}
static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = {
@@ -111,7 +117,7 @@ static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = {
};
static const struct drm_display_mode jd_t18003_t01_mode = {
- TINYDRM_MODE(128, 160, 28, 35),
+ DRM_SIMPLE_MODE(128, 160, 28, 35),
};
DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops);
@@ -120,6 +126,7 @@ static struct drm_driver st7735r_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &st7735r_fops,
+ .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7735r",
@@ -144,15 +151,25 @@ MODULE_DEVICE_TABLE(spi, st7735r_id);
static int st7735r_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct drm_device *drm;
struct mipi_dbi *mipi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
if (!mipi)
return -ENOMEM;
+ drm = &mipi->drm;
+ ret = devm_drm_dev_init(dev, drm, &st7735r_driver);
+ if (ret) {
+ kfree(mipi);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+
mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(mipi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
@@ -178,21 +195,36 @@ static int st7735r_probe(struct spi_device *spi)
/* Cannot read from Adafruit 1.8" display via SPI */
mipi->read_commands = NULL;
- ret = mipi_dbi_init(&spi->dev, mipi, &jd_t18003_t01_pipe_funcs,
- &st7735r_driver, &jd_t18003_t01_mode, rotation);
+ ret = mipi_dbi_init(mipi, &jd_t18003_t01_pipe_funcs, &jd_t18003_t01_mode, rotation);
if (ret)
return ret;
- spi_set_drvdata(spi, mipi);
+ drm_mode_config_reset(drm);
- return devm_tinydrm_register(&mipi->tinydrm);
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, drm);
+
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
}
-static void st7735r_shutdown(struct spi_device *spi)
+static int st7735r_remove(struct spi_device *spi)
{
- struct mipi_dbi *mipi = spi_get_drvdata(spi);
+ struct drm_device *drm = spi_get_drvdata(spi);
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
- tinydrm_shutdown(&mipi->tinydrm);
+ return 0;
+}
+
+static void st7735r_shutdown(struct spi_device *spi)
+{
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver st7735r_spi_driver = {
@@ -203,6 +235,7 @@ static struct spi_driver st7735r_spi_driver = {
},
.id_table = st7735r_id,
.probe = st7735r_probe,
+ .remove = st7735r_remove,
.shutdown = st7735r_shutdown,
};
module_spi_driver(st7735r_spi_driver);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1a01669b159a..2845fceb2fbd 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1626,7 +1626,6 @@ EXPORT_SYMBOL(ttm_bo_device_release);
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver,
struct address_space *mapping,
- uint64_t file_page_offset,
bool need_dma32)
{
struct ttm_bo_global *glob = &ttm_bo_glob;
@@ -1648,8 +1647,9 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
if (unlikely(ret != 0))
goto out_no_sys;
- drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
- 0x10000000);
+ drm_vma_offset_manager_init(&bdev->vma_manager,
+ DRM_FILE_PAGE_OFFSET_START,
+ DRM_FILE_PAGE_OFFSET_SIZE);
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = mapping;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index e86a29a1e51f..6dacff49c1cc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -432,6 +432,9 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_buffer_object *bo;
int ret;
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
+ return -EINVAL;
+
bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
if (unlikely(!bo))
return -EINVAL;
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 93860346c426..0075eb9a0b52 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -188,13 +188,11 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
struct ttm_validate_buffer *entry;
struct ttm_buffer_object *bo;
struct ttm_bo_global *glob;
- struct ttm_bo_device *bdev;
if (list_empty(list))
return;
bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
- bdev = bo->bdev;
glob = bo->bdev->glob;
spin_lock(&glob->lru_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 9a0909decb36..8617958b7ae6 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -81,7 +81,7 @@ static void ttm_mem_zone_kobj_release(struct kobject *kobj)
struct ttm_mem_zone *zone =
container_of(kobj, struct ttm_mem_zone, kobj);
- pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
+ pr_info("Zone %7s: Used memory at exit: %llu KiB\n",
zone->name, (unsigned long long)zone->used_mem >> 10);
kfree(zone);
}
@@ -448,7 +448,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
#endif
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
- pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
+ pr_info("Zone %7s: Available graphics memory: %llu KiB\n",
zone->name, (unsigned long long)zone->max_mem >> 10);
}
ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
@@ -523,7 +523,7 @@ static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
void ttm_mem_global_free(struct ttm_mem_global *glob,
uint64_t amount)
{
- return ttm_mem_global_free_zone(glob, NULL, amount);
+ return ttm_mem_global_free_zone(glob, glob->zone_kernel, amount);
}
EXPORT_SYMBOL(ttm_mem_global_free);
@@ -622,10 +622,10 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
{
/**
* Normal allocations of kernel memory are registered in
- * all zones.
+ * the kernel zone.
*/
- return ttm_mem_global_alloc_zone(glob, NULL, memory, ctx);
+ return ttm_mem_global_alloc_zone(glob, glob->zone_kernel, memory, ctx);
}
EXPORT_SYMBOL(ttm_mem_global_alloc);
diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
index e8723a2412a6..d775d10dbe6a 100644
--- a/drivers/gpu/drm/tve200/tve200_display.c
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -149,7 +149,8 @@ static void tve200_display_enable(struct drm_simple_display_pipe *pipe,
/* Vsync IRQ at start of Vsync at first */
ctrl1 |= TVE200_VSTSTYPE_VSYNC;
- if (connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ if (connector->display_info.bus_flags &
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
ctrl1 |= TVE200_CTRL_TVCLKP;
if ((mode->hdisplay == 352 && mode->vdisplay == 240) || /* SIF(525) */
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index ff47f890e6ad..312bf324841a 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -48,10 +48,16 @@ static const struct file_operations udl_driver_fops = {
.llseek = noop_llseek,
};
+static void udl_driver_release(struct drm_device *dev)
+{
+ udl_fini(dev);
+ udl_modeset_cleanup(dev);
+ drm_dev_fini(dev);
+ kfree(dev);
+}
+
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
- .load = udl_driver_load,
- .unload = udl_driver_unload,
.release = udl_driver_release,
/* gem hooks */
@@ -75,28 +81,56 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct udl_device *udl_driver_create(struct usb_interface *interface)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct udl_device *udl;
+ int r;
+
+ udl = kzalloc(sizeof(*udl), GFP_KERNEL);
+ if (!udl)
+ return ERR_PTR(-ENOMEM);
+
+ r = drm_dev_init(&udl->drm, &driver, &interface->dev);
+ if (r) {
+ kfree(udl);
+ return ERR_PTR(r);
+ }
+
+ udl->udev = udev;
+ udl->drm.dev_private = udl;
+
+ r = udl_init(udl);
+ if (r) {
+ drm_dev_fini(&udl->drm);
+ kfree(udl);
+ return ERR_PTR(r);
+ }
+
+ usb_set_intfdata(interface, udl);
+ return udl;
+}
+
static int udl_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
- struct usb_device *udev = interface_to_usbdev(interface);
- struct drm_device *dev;
int r;
+ struct udl_device *udl;
- dev = drm_dev_alloc(&driver, &interface->dev);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
+ udl = udl_driver_create(interface);
+ if (IS_ERR(udl))
+ return PTR_ERR(udl);
- r = drm_dev_register(dev, (unsigned long)udev);
+ r = drm_dev_register(&udl->drm, 0);
if (r)
goto err_free;
- usb_set_intfdata(interface, dev);
- DRM_INFO("Initialized udl on minor %d\n", dev->primary->index);
+ DRM_INFO("Initialized udl on minor %d\n", udl->drm.primary->index);
return 0;
err_free:
- drm_dev_put(dev);
+ drm_dev_put(&udl->drm);
return r;
}
@@ -108,6 +142,7 @@ static void udl_usb_disconnect(struct usb_interface *interface)
udl_fbdev_unplug(dev);
udl_drop_usb(dev);
drm_dev_unplug(dev);
+ drm_dev_put(dev);
}
/*
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 4ae67d882eae..35c1f33fbc1a 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -50,8 +50,8 @@ struct urb_list {
struct udl_fbdev;
struct udl_device {
+ struct drm_device drm;
struct device *dev;
- struct drm_device *ddev;
struct usb_device *udev;
struct drm_crtc *crtc;
@@ -71,6 +71,8 @@ struct udl_device {
atomic_t cpu_kcycles_used; /* transpired during pixel processing */
};
+#define to_udl(x) container_of(x, struct udl_device, drm)
+
struct udl_gem_object {
struct drm_gem_object base;
struct page **pages;
@@ -102,9 +104,8 @@ struct urb *udl_get_urb(struct drm_device *dev);
int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
void udl_urb_completion(struct urb *urb);
-int udl_driver_load(struct drm_device *dev, unsigned long flags);
-void udl_driver_unload(struct drm_device *dev);
-void udl_driver_release(struct drm_device *dev);
+int udl_init(struct udl_device *udl);
+void udl_fini(struct drm_device *dev);
int udl_fbdev_init(struct drm_device *dev);
void udl_fbdev_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index dd9ffded223b..b9b67a546d4c 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -32,7 +32,7 @@ module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
module_param(fb_defio, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
struct udl_fbdev {
- struct drm_fb_helper helper;
+ struct drm_fb_helper helper; /* must be first */
struct udl_framebuffer ufb;
int fb_count;
};
@@ -82,7 +82,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
int width, int height)
{
struct drm_device *dev = fb->base.dev;
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
int i, ret;
char *cmd;
cycles_t start_cycles, end_cycles;
@@ -210,10 +210,10 @@ static int udl_fb_open(struct fb_info *info, int user)
{
struct udl_fbdev *ufbdev = info->par;
struct drm_device *dev = ufbdev->ufb.base.dev;
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
/* If the USB device is gone, we don't accept new opens */
- if (drm_dev_is_unplugged(udl->ddev))
+ if (drm_dev_is_unplugged(&udl->drm))
return -ENODEV;
ufbdev->fb_count++;
@@ -392,7 +392,6 @@ static int udlfb_create(struct drm_fb_helper *helper,
ret = PTR_ERR(info);
goto out_gfree;
}
- info->par = ufbdev;
ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
if (ret)
@@ -402,15 +401,12 @@ static int udlfb_create(struct drm_fb_helper *helper,
ufbdev->helper.fb = fb;
- strcpy(info->fix.id, "udldrmfb");
-
info->screen_base = ufbdev->ufb.obj->vmapping;
info->fix.smem_len = size;
info->fix.smem_start = (unsigned long)ufbdev->ufb.obj->vmapping;
info->fbops = &udlfb_ops;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, &ufbdev->helper, sizes);
DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
fb->width, fb->height,
@@ -441,7 +437,7 @@ static void udl_fbdev_destroy(struct drm_device *dev,
int udl_fbdev_init(struct drm_device *dev)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
int bpp_sel = fb_bpp;
struct udl_fbdev *ufbdev;
int ret;
@@ -480,7 +476,7 @@ free:
void udl_fbdev_cleanup(struct drm_device *dev)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
if (!udl->fbdev)
return;
@@ -491,7 +487,7 @@ void udl_fbdev_cleanup(struct drm_device *dev)
void udl_fbdev_unplug(struct drm_device *dev)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
struct udl_fbdev *ufbdev;
if (!udl->fbdev)
return;
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index bb7b58407039..3b3e17652bb2 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -203,7 +203,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
{
struct udl_gem_object *gobj;
struct drm_gem_object *obj;
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
int ret = 0;
mutex_lock(&udl->gem_lock);
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 1f8ef34ade24..6743eaef4594 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -30,7 +30,7 @@
static int udl_parse_vendor_descriptor(struct drm_device *dev,
struct usb_device *usbdev)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
char *desc;
char *buf;
char *desc_end;
@@ -166,7 +166,7 @@ void udl_urb_completion(struct urb *urb)
static void udl_free_urb_list(struct drm_device *dev)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
int count = udl->urbs.count;
struct list_head *node;
struct urb_node *unode;
@@ -199,7 +199,7 @@ static void udl_free_urb_list(struct drm_device *dev)
static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
struct urb *urb;
struct urb_node *unode;
char *buf;
@@ -263,7 +263,7 @@ retry:
struct urb *udl_get_urb(struct drm_device *dev)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
int ret = 0;
struct list_head *entry;
struct urb_node *unode;
@@ -296,7 +296,7 @@ error:
int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
int ret;
BUG_ON(len > udl->urbs.size);
@@ -311,20 +311,12 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
return ret;
}
-int udl_driver_load(struct drm_device *dev, unsigned long flags)
+int udl_init(struct udl_device *udl)
{
- struct usb_device *udev = (void*)flags;
- struct udl_device *udl;
+ struct drm_device *dev = &udl->drm;
int ret = -ENOMEM;
DRM_DEBUG("\n");
- udl = kzalloc(sizeof(struct udl_device), GFP_KERNEL);
- if (!udl)
- return -ENOMEM;
-
- udl->udev = udev;
- udl->ddev = dev;
- dev->dev_private = udl;
mutex_init(&udl->gem_lock);
@@ -358,7 +350,6 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
err:
if (udl->urbs.count)
udl_free_urb_list(dev);
- kfree(udl);
DRM_ERROR("%d\n", ret);
return ret;
}
@@ -369,9 +360,9 @@ int udl_drop_usb(struct drm_device *dev)
return 0;
}
-void udl_driver_unload(struct drm_device *dev)
+void udl_fini(struct drm_device *dev)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
drm_kms_helper_poll_fini(dev);
@@ -379,12 +370,4 @@ void udl_driver_unload(struct drm_device *dev)
udl_free_urb_list(dev);
udl_fbdev_cleanup(dev);
- kfree(udl);
-}
-
-void udl_driver_release(struct drm_device *dev)
-{
- udl_modeset_cleanup(dev);
- drm_dev_fini(dev);
- kfree(dev);
}
diff --git a/drivers/gpu/drm/v3d/Kconfig b/drivers/gpu/drm/v3d/Kconfig
index 1552bf552c94..75a74c45f109 100644
--- a/drivers/gpu/drm/v3d/Kconfig
+++ b/drivers/gpu/drm/v3d/Kconfig
@@ -5,6 +5,7 @@ config DRM_V3D
depends on COMMON_CLK
depends on MMU
select DRM_SCHED
+ select DRM_GEM_SHMEM_HELPER
help
Choose this option if you have a system that has a Broadcom
V3D 3.x or newer GPU, such as BCM7268.
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index a08766d39eab..a22b75a3a533 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -25,162 +25,6 @@
#include "v3d_drv.h"
#include "uapi/drm/v3d_drm.h"
-/* Pins the shmem pages, fills in the .pages and .sgt fields of the BO, and maps
- * it for DMA.
- */
-static int
-v3d_bo_get_pages(struct v3d_bo *bo)
-{
- struct drm_gem_object *obj = &bo->base;
- struct drm_device *dev = obj->dev;
- int npages = obj->size >> PAGE_SHIFT;
- int ret = 0;
-
- mutex_lock(&bo->lock);
- if (bo->pages_refcount++ != 0)
- goto unlock;
-
- if (!obj->import_attach) {
- bo->pages = drm_gem_get_pages(obj);
- if (IS_ERR(bo->pages)) {
- ret = PTR_ERR(bo->pages);
- goto unlock;
- }
-
- bo->sgt = drm_prime_pages_to_sg(bo->pages, npages);
- if (IS_ERR(bo->sgt)) {
- ret = PTR_ERR(bo->sgt);
- goto put_pages;
- }
-
- /* Map the pages for use by the GPU. */
- dma_map_sg(dev->dev, bo->sgt->sgl,
- bo->sgt->nents, DMA_BIDIRECTIONAL);
- } else {
- bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL);
- if (!bo->pages)
- goto put_pages;
-
- drm_prime_sg_to_page_addr_arrays(bo->sgt, bo->pages,
- NULL, npages);
-
- /* Note that dma-bufs come in mapped. */
- }
-
- mutex_unlock(&bo->lock);
-
- return 0;
-
-put_pages:
- drm_gem_put_pages(obj, bo->pages, true, true);
- bo->pages = NULL;
-unlock:
- bo->pages_refcount--;
- mutex_unlock(&bo->lock);
- return ret;
-}
-
-static void
-v3d_bo_put_pages(struct v3d_bo *bo)
-{
- struct drm_gem_object *obj = &bo->base;
-
- mutex_lock(&bo->lock);
- if (--bo->pages_refcount == 0) {
- if (!obj->import_attach) {
- dma_unmap_sg(obj->dev->dev, bo->sgt->sgl,
- bo->sgt->nents, DMA_BIDIRECTIONAL);
- sg_free_table(bo->sgt);
- kfree(bo->sgt);
- drm_gem_put_pages(obj, bo->pages, true, true);
- } else {
- kfree(bo->pages);
- }
- }
- mutex_unlock(&bo->lock);
-}
-
-static struct v3d_bo *v3d_bo_create_struct(struct drm_device *dev,
- size_t unaligned_size)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct drm_gem_object *obj;
- struct v3d_bo *bo;
- size_t size = roundup(unaligned_size, PAGE_SIZE);
- int ret;
-
- if (size == 0)
- return ERR_PTR(-EINVAL);
-
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
- if (!bo)
- return ERR_PTR(-ENOMEM);
- obj = &bo->base;
-
- INIT_LIST_HEAD(&bo->vmas);
- INIT_LIST_HEAD(&bo->unref_head);
- mutex_init(&bo->lock);
-
- ret = drm_gem_object_init(dev, obj, size);
- if (ret)
- goto free_bo;
-
- spin_lock(&v3d->mm_lock);
- ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
- obj->size >> PAGE_SHIFT,
- GMP_GRANULARITY >> PAGE_SHIFT, 0, 0);
- spin_unlock(&v3d->mm_lock);
- if (ret)
- goto free_obj;
-
- return bo;
-
-free_obj:
- drm_gem_object_release(obj);
-free_bo:
- kfree(bo);
- return ERR_PTR(ret);
-}
-
-struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
- size_t unaligned_size)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct drm_gem_object *obj;
- struct v3d_bo *bo;
- int ret;
-
- bo = v3d_bo_create_struct(dev, unaligned_size);
- if (IS_ERR(bo))
- return bo;
- obj = &bo->base;
-
- bo->resv = &bo->_resv;
- reservation_object_init(bo->resv);
-
- ret = v3d_bo_get_pages(bo);
- if (ret)
- goto free_mm;
-
- v3d_mmu_insert_ptes(bo);
-
- mutex_lock(&v3d->bo_lock);
- v3d->bo_stats.num_allocated++;
- v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT;
- mutex_unlock(&v3d->bo_lock);
-
- return bo;
-
-free_mm:
- spin_lock(&v3d->mm_lock);
- drm_mm_remove_node(&bo->node);
- spin_unlock(&v3d->mm_lock);
-
- drm_gem_object_release(obj);
- kfree(bo);
- return ERR_PTR(ret);
-}
-
/* Called DRM core on the last userspace/kernel unreference of the
* BO.
*/
@@ -189,92 +33,116 @@ void v3d_free_object(struct drm_gem_object *obj)
struct v3d_dev *v3d = to_v3d_dev(obj->dev);
struct v3d_bo *bo = to_v3d_bo(obj);
+ v3d_mmu_remove_ptes(bo);
+
mutex_lock(&v3d->bo_lock);
v3d->bo_stats.num_allocated--;
v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT;
mutex_unlock(&v3d->bo_lock);
- reservation_object_fini(&bo->_resv);
-
- v3d_bo_put_pages(bo);
-
- if (obj->import_attach)
- drm_prime_gem_destroy(obj, bo->sgt);
-
- v3d_mmu_remove_ptes(bo);
spin_lock(&v3d->mm_lock);
drm_mm_remove_node(&bo->node);
spin_unlock(&v3d->mm_lock);
- mutex_destroy(&bo->lock);
+ /* GPU execution may have dirtied any pages in the BO. */
+ bo->base.pages_mark_dirty_on_put = true;
- drm_gem_object_release(obj);
- kfree(bo);
+ drm_gem_shmem_free_object(obj);
}
-struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj)
+static const struct drm_gem_object_funcs v3d_gem_funcs = {
+ .free = v3d_free_object,
+ .print_info = drm_gem_shmem_print_info,
+ .pin = drm_gem_shmem_pin,
+ .unpin = drm_gem_shmem_unpin,
+ .get_sg_table = drm_gem_shmem_get_sg_table,
+ .vmap = drm_gem_shmem_vmap,
+ .vunmap = drm_gem_shmem_vunmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+};
+
+/* gem_create_object function for allocating a BO struct and doing
+ * early setup.
+ */
+struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size)
{
- struct v3d_bo *bo = to_v3d_bo(obj);
+ struct v3d_bo *bo;
+ struct drm_gem_object *obj;
- return bo->resv;
-}
+ if (size == 0)
+ return NULL;
-static void
-v3d_set_mmap_vma_flags(struct vm_area_struct *vma)
-{
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-}
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return NULL;
+ obj = &bo->base.base;
-vm_fault_t v3d_gem_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_gem_object *obj = vma->vm_private_data;
- struct v3d_bo *bo = to_v3d_bo(obj);
- pfn_t pfn;
- pgoff_t pgoff;
+ obj->funcs = &v3d_gem_funcs;
- /* We don't use vmf->pgoff since that has the fake offset: */
- pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
- pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
+ INIT_LIST_HEAD(&bo->unref_head);
- return vmf_insert_mixed(vma, vmf->address, pfn);
+ return &bo->base.base;
}
-int v3d_mmap(struct file *filp, struct vm_area_struct *vma)
+static int
+v3d_bo_create_finish(struct drm_gem_object *obj)
{
+ struct v3d_dev *v3d = to_v3d_dev(obj->dev);
+ struct v3d_bo *bo = to_v3d_bo(obj);
+ struct sg_table *sgt;
int ret;
- ret = drm_gem_mmap(filp, vma);
+ /* So far we pin the BO in the MMU for its lifetime, so use
+ * shmem's helper for getting a lifetime sgt.
+ */
+ sgt = drm_gem_shmem_get_pages_sgt(&bo->base.base);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ spin_lock(&v3d->mm_lock);
+ /* Allocate the object's space in the GPU's page tables.
+ * Inserting PTEs will happen later, but the offset is for the
+ * lifetime of the BO.
+ */
+ ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
+ obj->size >> PAGE_SHIFT,
+ GMP_GRANULARITY >> PAGE_SHIFT, 0, 0);
+ spin_unlock(&v3d->mm_lock);
if (ret)
return ret;
- v3d_set_mmap_vma_flags(vma);
+ /* Track stats for /debug/dri/n/bo_stats. */
+ mutex_lock(&v3d->bo_lock);
+ v3d->bo_stats.num_allocated++;
+ v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT;
+ mutex_unlock(&v3d->bo_lock);
- return ret;
+ v3d_mmu_insert_ptes(bo);
+
+ return 0;
}
-int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
+ size_t unaligned_size)
{
+ struct drm_gem_shmem_object *shmem_obj;
+ struct v3d_bo *bo;
int ret;
- ret = drm_gem_mmap_obj(obj, obj->size, vma);
- if (ret < 0)
- return ret;
-
- v3d_set_mmap_vma_flags(vma);
+ shmem_obj = drm_gem_shmem_create(dev, unaligned_size);
+ if (IS_ERR(shmem_obj))
+ return ERR_CAST(shmem_obj);
+ bo = to_v3d_bo(&shmem_obj->base);
- return 0;
-}
+ ret = v3d_bo_create_finish(&shmem_obj->base);
+ if (ret)
+ goto free_obj;
-struct sg_table *
-v3d_prime_get_sg_table(struct drm_gem_object *obj)
-{
- struct v3d_bo *bo = to_v3d_bo(obj);
- int npages = obj->size >> PAGE_SHIFT;
+ return bo;
- return drm_prime_pages_to_sg(bo->pages, npages);
+free_obj:
+ drm_gem_shmem_free_object(&shmem_obj->base);
+ return ERR_PTR(ret);
}
struct drm_gem_object *
@@ -283,20 +151,17 @@ v3d_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt)
{
struct drm_gem_object *obj;
- struct v3d_bo *bo;
-
- bo = v3d_bo_create_struct(dev, attach->dmabuf->size);
- if (IS_ERR(bo))
- return ERR_CAST(bo);
- obj = &bo->base;
-
- bo->resv = attach->dmabuf->resv;
+ int ret;
- bo->sgt = sgt;
- obj->import_attach = attach;
- v3d_bo_get_pages(bo);
+ obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
+ if (IS_ERR(obj))
+ return obj;
- v3d_mmu_insert_ptes(bo);
+ ret = v3d_bo_create_finish(obj);
+ if (ret) {
+ drm_gem_shmem_free_object(obj);
+ return ERR_PTR(ret);
+ }
return obj;
}
@@ -319,8 +184,8 @@ int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
args->offset = bo->node.start << PAGE_SHIFT;
- ret = drm_gem_handle_create(file_priv, &bo->base, &args->handle);
- drm_gem_object_put_unlocked(&bo->base);
+ ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
+ drm_gem_object_put_unlocked(&bo->base.base);
return ret;
}
@@ -330,7 +195,6 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
{
struct drm_v3d_mmap_bo *args = data;
struct drm_gem_object *gem_obj;
- int ret;
if (args->flags != 0) {
DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
@@ -343,12 +207,10 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
- ret = drm_gem_create_mmap_offset(gem_obj);
- if (ret == 0)
- args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
drm_gem_object_put_unlocked(gem_obj);
- return ret;
+ return 0;
}
int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index eb2b2d2f8553..a24af2d2f574 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -187,6 +187,11 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
uint32_t cycles;
int core = 0;
int measure_ms = 1000;
+ int ret;
+
+ ret = pm_runtime_get_sync(v3d->dev);
+ if (ret < 0)
+ return ret;
if (v3d->ver >= 40) {
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3,
@@ -210,6 +215,9 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
cycles / (measure_ms * 1000),
(cycles / (measure_ms * 100)) % 10);
+ pm_runtime_mark_last_busy(v3d->dev);
+ pm_runtime_put_autosuspend(v3d->dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index f0afcec72c34..a06b05f714a5 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -7,9 +7,9 @@
* This driver supports the Broadcom V3D 3.3 and 4.1 OpenGL ES GPUs.
* For V3D 2.x support, see the VC4 driver.
*
- * Currently only single-core rendering using the binner and renderer
- * is supported. The TFU (texture formatting unit) and V3D 4.x's CSD
- * (compute shader dispatch) are not yet supported.
+ * Currently only single-core rendering using the binner and renderer,
+ * along with TFU (texture formatting unit) rendering is supported.
+ * V3D 4.x's CSD (compute shader dispatch) is not yet supported.
*/
#include <linux/clk.h>
@@ -19,6 +19,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
@@ -101,6 +102,8 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
ret = pm_runtime_get_sync(v3d->dev);
+ if (ret < 0)
+ return ret;
if (args->param >= DRM_V3D_PARAM_V3D_CORE0_IDENT0 &&
args->param <= DRM_V3D_PARAM_V3D_CORE0_IDENT2) {
args->value = V3D_CORE_READ(0, offset);
@@ -160,17 +163,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file)
kfree(v3d_priv);
}
-static const struct file_operations v3d_drm_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = v3d_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
+DEFINE_DRM_GEM_SHMEM_FOPS(v3d_drm_fops);
/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
* protection between clients. Note that render nodes would be be
@@ -188,12 +181,6 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(V3D_SUBMIT_TFU, v3d_submit_tfu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
};
-static const struct vm_operations_struct v3d_vm_ops = {
- .fault = v3d_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static struct drm_driver v3d_drm_driver = {
.driver_features = (DRIVER_GEM |
DRIVER_RENDER |
@@ -207,17 +194,11 @@ static struct drm_driver v3d_drm_driver = {
.debugfs_init = v3d_debugfs_init,
#endif
- .gem_free_object_unlocked = v3d_free_object,
- .gem_vm_ops = &v3d_vm_ops,
-
+ .gem_create_object = v3d_create_object,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_res_obj = v3d_prime_res_obj,
- .gem_prime_get_sg_table = v3d_prime_get_sg_table,
.gem_prime_import_sg_table = v3d_prime_import_sg_table,
- .gem_prime_mmap = v3d_prime_mmap,
+ .gem_prime_mmap = drm_gem_prime_mmap,
.ioctls = v3d_drm_ioctls,
.num_ioctls = ARRAY_SIZE(v3d_drm_ioctls),
@@ -265,10 +246,6 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
v3d->pdev = pdev;
drm = &v3d->drm;
- ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
- if (ret)
- goto dev_free;
-
ret = map_regs(v3d, &v3d->hub_regs, "hub");
if (ret)
goto dev_free;
@@ -283,6 +260,22 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES);
WARN_ON(v3d->cores > 1); /* multicore not yet implemented */
+ v3d->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(v3d->reset)) {
+ ret = PTR_ERR(v3d->reset);
+
+ if (ret == -EPROBE_DEFER)
+ goto dev_free;
+
+ v3d->reset = NULL;
+ ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
+ if (ret) {
+ dev_err(dev,
+ "Failed to get reset control or bridge regs\n");
+ goto dev_free;
+ }
+ }
+
if (v3d->ver < 41) {
ret = map_regs(v3d, &v3d->gca_regs, "gca");
if (ret)
@@ -312,14 +305,18 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
if (ret)
goto dev_destroy;
- v3d_irq_init(v3d);
+ ret = v3d_irq_init(v3d);
+ if (ret)
+ goto gem_destroy;
ret = drm_dev_register(drm, 0);
if (ret)
- goto gem_destroy;
+ goto irq_disable;
return 0;
+irq_disable:
+ v3d_irq_disable(v3d);
gem_destroy:
v3d_gem_destroy(drm);
dev_destroy:
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index fdda3037f7af..e9d4a2fdcf44 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2015-2018 Broadcom */
-#include <linux/reservation.h>
#include <linux/mm_types.h>
#include <drm/drmP.h>
#include <drm/drm_encoder.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/gpu_scheduler.h>
#include "uapi/drm/v3d_drm.h"
@@ -34,6 +34,7 @@ struct v3d_dev {
* and revision.
*/
int ver;
+ bool single_irq_line;
struct device *dev;
struct platform_device *pdev;
@@ -42,6 +43,7 @@ struct v3d_dev {
void __iomem *bridge_regs;
void __iomem *gca_regs;
struct clk *clk;
+ struct reset_control *reset;
/* Virtual and DMA addresses of the single shared page table. */
volatile u32 *pt;
@@ -109,34 +111,15 @@ struct v3d_file_priv {
struct drm_sched_entity sched_entity[V3D_MAX_QUEUES];
};
-/* Tracks a mapping of a BO into a per-fd address space */
-struct v3d_vma {
- struct v3d_page_table *pt;
- struct list_head list; /* entry in v3d_bo.vmas */
-};
-
struct v3d_bo {
- struct drm_gem_object base;
-
- struct mutex lock;
+ struct drm_gem_shmem_object base;
struct drm_mm_node node;
- u32 pages_refcount;
- struct page **pages;
- struct sg_table *sgt;
- void *vaddr;
-
- struct list_head vmas; /* list of v3d_vma */
-
/* List entry for the BO's position in
* v3d_exec_info->unref_list
*/
struct list_head unref_head;
-
- /* normally (resv == &_resv) except for imported bo's */
- struct reservation_object *resv;
- struct reservation_object _resv;
};
static inline struct v3d_bo *
@@ -180,7 +163,7 @@ struct v3d_job {
struct dma_fence *in_fence;
/* v3d fence to be signaled by IRQ handler when the job is complete. */
- struct dma_fence *done_fence;
+ struct dma_fence *irq_fence;
/* GPU virtual addresses of the start/end of the CL job. */
u32 start, end;
@@ -227,7 +210,7 @@ struct v3d_tfu_job {
struct dma_fence *in_fence;
/* v3d fence to be signaled by IRQ handler when the job is complete. */
- struct dma_fence *done_fence;
+ struct dma_fence *irq_fence;
struct v3d_dev *v3d;
@@ -270,6 +253,7 @@ static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
}
/* v3d_bo.c */
+struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size);
void v3d_free_object(struct drm_gem_object *gem_obj);
struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
size_t size);
@@ -279,11 +263,6 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-vm_fault_t v3d_gem_fault(struct vm_fault *vmf);
-int v3d_mmap(struct file *filp, struct vm_area_struct *vma);
-struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj);
-int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-struct sg_table *v3d_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
@@ -310,7 +289,7 @@ void v3d_reset(struct v3d_dev *v3d);
void v3d_invalidate_caches(struct v3d_dev *v3d);
/* v3d_irq.c */
-void v3d_irq_init(struct v3d_dev *v3d);
+int v3d_irq_init(struct v3d_dev *v3d);
void v3d_irq_enable(struct v3d_dev *v3d);
void v3d_irq_disable(struct v3d_dev *v3d);
void v3d_irq_reset(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 803f31467ec1..93ff8fcbe475 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/sched/signal.h>
@@ -24,7 +25,8 @@ v3d_init_core(struct v3d_dev *v3d, int core)
* type. If you want the default behavior, you can still put
* "2" in the indirect texture state's output_type field.
*/
- V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
+ if (v3d->ver < 40)
+ V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
/* Whenever we flush the L2T cache, we always want to flush
* the whole thing.
@@ -69,7 +71,7 @@ v3d_idle_gca(struct v3d_dev *v3d)
}
static void
-v3d_reset_v3d(struct v3d_dev *v3d)
+v3d_reset_by_bridge(struct v3d_dev *v3d)
{
int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION);
@@ -89,6 +91,15 @@ v3d_reset_v3d(struct v3d_dev *v3d)
V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT);
V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0);
}
+}
+
+static void
+v3d_reset_v3d(struct v3d_dev *v3d)
+{
+ if (v3d->reset)
+ reset_control_reset(v3d->reset);
+ else
+ v3d_reset_by_bridge(v3d);
v3d_init_hw_state(v3d);
}
@@ -190,7 +201,8 @@ v3d_attach_object_fences(struct v3d_bo **bos, int bo_count,
for (i = 0; i < bo_count; i++) {
/* XXX: Use shared fences for read-only objects. */
- reservation_object_add_excl_fence(bos[i]->resv, fence);
+ reservation_object_add_excl_fence(bos[i]->base.base.resv,
+ fence);
}
}
@@ -199,12 +211,8 @@ v3d_unlock_bo_reservations(struct v3d_bo **bos,
int bo_count,
struct ww_acquire_ctx *acquire_ctx)
{
- int i;
-
- for (i = 0; i < bo_count; i++)
- ww_mutex_unlock(&bos[i]->resv->lock);
-
- ww_acquire_fini(acquire_ctx);
+ drm_gem_unlock_reservations((struct drm_gem_object **)bos, bo_count,
+ acquire_ctx);
}
/* Takes the reservation lock on all the BOs being referenced, so that
@@ -219,58 +227,19 @@ v3d_lock_bo_reservations(struct v3d_bo **bos,
int bo_count,
struct ww_acquire_ctx *acquire_ctx)
{
- int contended_lock = -1;
int i, ret;
- ww_acquire_init(acquire_ctx, &reservation_ww_class);
-
-retry:
- if (contended_lock != -1) {
- struct v3d_bo *bo = bos[contended_lock];
-
- ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
- acquire_ctx);
- if (ret) {
- ww_acquire_done(acquire_ctx);
- return ret;
- }
- }
-
- for (i = 0; i < bo_count; i++) {
- if (i == contended_lock)
- continue;
-
- ret = ww_mutex_lock_interruptible(&bos[i]->resv->lock,
- acquire_ctx);
- if (ret) {
- int j;
-
- for (j = 0; j < i; j++)
- ww_mutex_unlock(&bos[j]->resv->lock);
-
- if (contended_lock != -1 && contended_lock >= i) {
- struct v3d_bo *bo = bos[contended_lock];
-
- ww_mutex_unlock(&bo->resv->lock);
- }
-
- if (ret == -EDEADLK) {
- contended_lock = i;
- goto retry;
- }
-
- ww_acquire_done(acquire_ctx);
- return ret;
- }
- }
-
- ww_acquire_done(acquire_ctx);
+ ret = drm_gem_lock_reservations((struct drm_gem_object **)bos,
+ bo_count, acquire_ctx);
+ if (ret)
+ return ret;
/* Reserve space for our shared (read-only) fence references,
* before we commit the CL to the hardware.
*/
for (i = 0; i < bo_count; i++) {
- ret = reservation_object_reserve_shared(bos[i]->resv, 1);
+ ret = reservation_object_reserve_shared(bos[i]->base.base.resv,
+ 1);
if (ret) {
v3d_unlock_bo_reservations(bos, bo_count,
acquire_ctx);
@@ -371,18 +340,18 @@ v3d_exec_cleanup(struct kref *ref)
dma_fence_put(exec->bin.in_fence);
dma_fence_put(exec->render.in_fence);
- dma_fence_put(exec->bin.done_fence);
- dma_fence_put(exec->render.done_fence);
+ dma_fence_put(exec->bin.irq_fence);
+ dma_fence_put(exec->render.irq_fence);
dma_fence_put(exec->bin_done_fence);
dma_fence_put(exec->render_done_fence);
for (i = 0; i < exec->bo_count; i++)
- drm_gem_object_put_unlocked(&exec->bo[i]->base);
+ drm_gem_object_put_unlocked(&exec->bo[i]->base.base);
kvfree(exec->bo);
list_for_each_entry_safe(bo, save, &exec->unref_list, unref_head) {
- drm_gem_object_put_unlocked(&bo->base);
+ drm_gem_object_put_unlocked(&bo->base.base);
}
pm_runtime_mark_last_busy(v3d->dev);
@@ -405,11 +374,11 @@ v3d_tfu_job_cleanup(struct kref *ref)
unsigned int i;
dma_fence_put(job->in_fence);
- dma_fence_put(job->done_fence);
+ dma_fence_put(job->irq_fence);
for (i = 0; i < ARRAY_SIZE(job->bo); i++) {
if (job->bo[i])
- drm_gem_object_put_unlocked(&job->bo[i]->base);
+ drm_gem_object_put_unlocked(&job->bo[i]->base.base);
}
pm_runtime_mark_last_busy(v3d->dev);
@@ -429,8 +398,6 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
{
int ret;
struct drm_v3d_wait_bo *args = data;
- struct drm_gem_object *gem_obj;
- struct v3d_bo *bo;
ktime_t start = ktime_get();
u64 delta_ns;
unsigned long timeout_jiffies =
@@ -439,21 +406,8 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
if (args->pad != 0)
return -EINVAL;
- gem_obj = drm_gem_object_lookup(file_priv, args->handle);
- if (!gem_obj) {
- DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
- return -EINVAL;
- }
- bo = to_v3d_bo(gem_obj);
-
- ret = reservation_object_wait_timeout_rcu(bo->resv,
- true, true,
- timeout_jiffies);
-
- if (ret == 0)
- ret = -ETIME;
- else if (ret > 0)
- ret = 0;
+ ret = drm_gem_reservation_object_wait(file_priv, args->handle,
+ true, timeout_jiffies);
/* Decrement the user's timeout, in case we got interrupted
* such that the ioctl will be restarted.
@@ -468,8 +422,6 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
if (ret == -ETIME && args->timeout_ns)
ret = -EAGAIN;
- drm_gem_object_put_unlocked(gem_obj);
-
return ret;
}
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 69338da70ddc..aa0a180ae700 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -27,6 +27,9 @@
V3D_HUB_INT_MMU_CAP | \
V3D_HUB_INT_TFUC))
+static irqreturn_t
+v3d_hub_irq(int irq, void *arg);
+
static void
v3d_overflow_mem_work(struct work_struct *work)
{
@@ -34,12 +37,14 @@ v3d_overflow_mem_work(struct work_struct *work)
container_of(work, struct v3d_dev, overflow_mem_work);
struct drm_device *dev = &v3d->drm;
struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024);
+ struct drm_gem_object *obj;
unsigned long irqflags;
if (IS_ERR(bo)) {
DRM_ERROR("Couldn't allocate binner overflow mem\n");
return;
}
+ obj = &bo->base.base;
/* We lost a race, and our work task came in after the bin job
* completed and exited. This can happen because the HW
@@ -56,15 +61,15 @@ v3d_overflow_mem_work(struct work_struct *work)
goto out;
}
- drm_gem_object_get(&bo->base);
+ drm_gem_object_get(obj);
list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list);
spin_unlock_irqrestore(&v3d->job_lock, irqflags);
V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT);
- V3D_CORE_WRITE(0, V3D_PTB_BPOS, bo->base.size);
+ V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size);
out:
- drm_gem_object_put_unlocked(&bo->base);
+ drm_gem_object_put_unlocked(obj);
}
static irqreturn_t
@@ -82,7 +87,8 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_OUTOMEM) {
/* Note that the OOM status is edge signaled, so the
* interrupt won't happen again until the we actually
- * add more memory.
+ * add more memory. Also, as of V3D 4.1, FLDONE won't
+ * be reported until any OOM state has been cleared.
*/
schedule_work(&v3d->overflow_mem_work);
status = IRQ_HANDLED;
@@ -90,7 +96,7 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_FLDONE) {
struct v3d_fence *fence =
- to_v3d_fence(v3d->bin_job->bin.done_fence);
+ to_v3d_fence(v3d->bin_job->bin.irq_fence);
trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
@@ -99,7 +105,7 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_FRDONE) {
struct v3d_fence *fence =
- to_v3d_fence(v3d->render_job->render.done_fence);
+ to_v3d_fence(v3d->render_job->render.irq_fence);
trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
@@ -112,6 +118,12 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_GMPV)
dev_err(v3d->dev, "GMP violation\n");
+ /* V3D 4.2 wires the hub and core IRQs together, so if we &
+ * didn't see the common one then check hub for MMU IRQs.
+ */
+ if (v3d->single_irq_line && status == IRQ_NONE)
+ return v3d_hub_irq(irq, arg);
+
return status;
}
@@ -129,7 +141,7 @@ v3d_hub_irq(int irq, void *arg)
if (intsts & V3D_HUB_INT_TFUC) {
struct v3d_fence *fence =
- to_v3d_fence(v3d->tfu_job->done_fence);
+ to_v3d_fence(v3d->tfu_job->irq_fence);
trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
@@ -156,10 +168,10 @@ v3d_hub_irq(int irq, void *arg)
return status;
}
-void
+int
v3d_irq_init(struct v3d_dev *v3d)
{
- int ret, core;
+ int irq1, ret, core;
INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work);
@@ -170,16 +182,37 @@ v3d_irq_init(struct v3d_dev *v3d)
V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
- ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
- v3d_hub_irq, IRQF_SHARED,
- "v3d_hub", v3d);
- ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1),
- v3d_irq, IRQF_SHARED,
- "v3d_core0", v3d);
- if (ret)
- dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
+ irq1 = platform_get_irq(v3d->pdev, 1);
+ if (irq1 == -EPROBE_DEFER)
+ return irq1;
+ if (irq1 > 0) {
+ ret = devm_request_irq(v3d->dev, irq1,
+ v3d_irq, IRQF_SHARED,
+ "v3d_core0", v3d);
+ if (ret)
+ goto fail;
+ ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
+ v3d_hub_irq, IRQF_SHARED,
+ "v3d_hub", v3d);
+ if (ret)
+ goto fail;
+ } else {
+ v3d->single_irq_line = true;
+
+ ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
+ v3d_irq, IRQF_SHARED,
+ "v3d", v3d);
+ if (ret)
+ goto fail;
+ }
v3d_irq_enable(v3d);
+ return 0;
+
+fail:
+ if (ret != -EPROBE_DEFER)
+ dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
+ return ret;
}
void
diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c
index b00f97c31b70..7a21f1787ab1 100644
--- a/drivers/gpu/drm/v3d/v3d_mmu.c
+++ b/drivers/gpu/drm/v3d/v3d_mmu.c
@@ -83,13 +83,14 @@ int v3d_mmu_set_page_table(struct v3d_dev *v3d)
void v3d_mmu_insert_ptes(struct v3d_bo *bo)
{
- struct v3d_dev *v3d = to_v3d_dev(bo->base.dev);
+ struct drm_gem_shmem_object *shmem_obj = &bo->base;
+ struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev);
u32 page = bo->node.start;
u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID;
unsigned int count;
struct scatterlist *sgl;
- for_each_sg(bo->sgt->sgl, sgl, bo->sgt->nents, count) {
+ for_each_sg(shmem_obj->sgt->sgl, sgl, shmem_obj->sgt->nents, count) {
u32 page_address = sg_dma_address(sgl) >> V3D_MMU_PAGE_SHIFT;
u32 pte = page_prot | page_address;
u32 i;
@@ -102,7 +103,7 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo)
}
WARN_ON_ONCE(page - bo->node.start !=
- bo->base.size >> V3D_MMU_PAGE_SHIFT);
+ shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT);
if (v3d_mmu_flush_all(v3d))
dev_err(v3d->dev, "MMU flush timeout\n");
@@ -110,8 +111,8 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo)
void v3d_mmu_remove_ptes(struct v3d_bo *bo)
{
- struct v3d_dev *v3d = to_v3d_dev(bo->base.dev);
- u32 npages = bo->base.size >> V3D_MMU_PAGE_SHIFT;
+ struct v3d_dev *v3d = to_v3d_dev(bo->base.base.dev);
+ u32 npages = bo->base.base.size >> V3D_MMU_PAGE_SHIFT;
u32 page;
for (page = bo->node.start; page < bo->node.start + npages; page++)
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
index 6ccdee9d47bd..8e88af237610 100644
--- a/drivers/gpu/drm/v3d/v3d_regs.h
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -216,6 +216,8 @@
# define V3D_IDENT2_BCG_INT BIT(28)
#define V3D_CTL_MISCCFG 0x00018
+# define V3D_CTL_MISCCFG_QRMAXCNT_MASK V3D_MASK(3, 1)
+# define V3D_CTL_MISCCFG_QRMAXCNT_SHIFT 1
# define V3D_MISCCFG_OVRTMUOUT BIT(0)
#define V3D_CTL_L2CACTL 0x00020
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 4704b2df3688..e740f3b99aa5 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -156,9 +156,9 @@ static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
if (IS_ERR(fence))
return NULL;
- if (job->done_fence)
- dma_fence_put(job->done_fence);
- job->done_fence = dma_fence_get(fence);
+ if (job->irq_fence)
+ dma_fence_put(job->irq_fence);
+ job->irq_fence = dma_fence_get(fence);
trace_v3d_submit_cl(dev, q == V3D_RENDER, to_v3d_fence(fence)->seqno,
job->start, job->end);
@@ -199,9 +199,9 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
return NULL;
v3d->tfu_job = job;
- if (job->done_fence)
- dma_fence_put(job->done_fence);
- job->done_fence = dma_fence_get(fence);
+ if (job->irq_fence)
+ dma_fence_put(job->irq_fence);
+ job->irq_fence = dma_fence_get(fence);
trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno);
@@ -231,20 +231,17 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
mutex_lock(&v3d->reset_lock);
/* block scheduler */
- for (q = 0; q < V3D_MAX_QUEUES; q++) {
- struct drm_gpu_scheduler *sched = &v3d->queue[q].sched;
-
- drm_sched_stop(sched);
+ for (q = 0; q < V3D_MAX_QUEUES; q++)
+ drm_sched_stop(&v3d->queue[q].sched);
- if(sched_job)
- drm_sched_increase_karma(sched_job);
- }
+ if (sched_job)
+ drm_sched_increase_karma(sched_job);
/* get the GPU back into the init state */
v3d_reset(v3d);
for (q = 0; q < V3D_MAX_QUEUES; q++)
- drm_sched_resubmit_jobs(sched_job->sched);
+ drm_sched_resubmit_jobs(&v3d->queue[q].sched);
/* Unblock schedulers and restart their jobs. */
for (q = 0; q < V3D_MAX_QUEUES; q++) {
diff --git a/drivers/staging/vboxvideo/Kconfig b/drivers/gpu/drm/vboxvideo/Kconfig
index 1f4182e2e980..d6ab955c0768 100644
--- a/drivers/staging/vboxvideo/Kconfig
+++ b/drivers/gpu/drm/vboxvideo/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config DRM_VBOXVIDEO
tristate "Virtual Box Graphics Card"
depends on DRM && X86 && PCI
diff --git a/drivers/staging/vboxvideo/Makefile b/drivers/gpu/drm/vboxvideo/Makefile
index 1224f313af0c..1224f313af0c 100644
--- a/drivers/staging/vboxvideo/Makefile
+++ b/drivers/gpu/drm/vboxvideo/Makefile
diff --git a/drivers/staging/vboxvideo/hgsmi_base.c b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
index 361d3193258e..361d3193258e 100644
--- a/drivers/staging/vboxvideo/hgsmi_base.c
+++ b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
diff --git a/drivers/staging/vboxvideo/hgsmi_ch_setup.h b/drivers/gpu/drm/vboxvideo/hgsmi_ch_setup.h
index 4e93418d6a13..4e93418d6a13 100644
--- a/drivers/staging/vboxvideo/hgsmi_ch_setup.h
+++ b/drivers/gpu/drm/vboxvideo/hgsmi_ch_setup.h
diff --git a/drivers/staging/vboxvideo/hgsmi_channels.h b/drivers/gpu/drm/vboxvideo/hgsmi_channels.h
index 9b83f4ff3faf..9b83f4ff3faf 100644
--- a/drivers/staging/vboxvideo/hgsmi_channels.h
+++ b/drivers/gpu/drm/vboxvideo/hgsmi_channels.h
diff --git a/drivers/staging/vboxvideo/hgsmi_defs.h b/drivers/gpu/drm/vboxvideo/hgsmi_defs.h
index 6c8df1cdb087..6c8df1cdb087 100644
--- a/drivers/staging/vboxvideo/hgsmi_defs.h
+++ b/drivers/gpu/drm/vboxvideo/hgsmi_defs.h
diff --git a/drivers/staging/vboxvideo/modesetting.c b/drivers/gpu/drm/vboxvideo/modesetting.c
index 7580b9002379..7580b9002379 100644
--- a/drivers/staging/vboxvideo/modesetting.c
+++ b/drivers/gpu/drm/vboxvideo/modesetting.c
diff --git a/drivers/staging/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index e7755a179850..fb6a0f0b8167 100644
--- a/drivers/staging/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -200,36 +200,11 @@ static const struct file_operations vbox_fops = {
.read = drm_read,
};
-static int vbox_master_set(struct drm_device *dev,
- struct drm_file *file_priv, bool from_open)
-{
- struct vbox_private *vbox = dev->dev_private;
-
- /*
- * We do not yet know whether the new owner can handle hotplug, so we
- * do not advertise dynamic modes on the first query and send a
- * tentative hotplug notification after that to see if they query again.
- */
- vbox->initial_mode_queried = false;
-
- return 0;
-}
-
-static void vbox_master_drop(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct vbox_private *vbox = dev->dev_private;
-
- /* See vbox_master_set() */
- vbox->initial_mode_queried = false;
-}
-
static struct drm_driver driver = {
.driver_features =
DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
.lastclose = drm_fb_helper_lastclose,
- .master_set = vbox_master_set,
- .master_drop = vbox_master_drop,
.fops = &vbox_fops,
.irq_handler = vbox_irq_handler,
diff --git a/drivers/staging/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h
index aa40e5cc2861..ece31f395540 100644
--- a/drivers/staging/vboxvideo/vbox_drv.h
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h
@@ -14,7 +14,6 @@
#include <linux/io.h>
#include <linux/irqreturn.h>
#include <linux/string.h>
-#include <linux/version.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
@@ -83,12 +82,6 @@ struct vbox_private {
} ttm;
struct mutex hw_mutex; /* protects modeset and accel/vbva accesses */
- /*
- * We decide whether or not user-space supports display hot-plug
- * depending on whether they react to a hot-plug event after the initial
- * mode query.
- */
- bool initial_mode_queried;
struct work_struct hotplug_work;
u32 input_mapping_width;
u32 input_mapping_height;
@@ -209,8 +202,6 @@ int vbox_dumb_mmap_offset(struct drm_file *file,
struct drm_device *dev,
u32 handle, u64 *offset);
-#define DRM_FILE_PAGE_OFFSET (0x10000000ULL >> PAGE_SHIFT)
-
int vbox_mm_init(struct vbox_private *vbox);
void vbox_mm_fini(struct vbox_private *vbox);
diff --git a/drivers/staging/vboxvideo/vbox_fb.c b/drivers/gpu/drm/vboxvideo/vbox_fb.c
index 83a04afd1766..b724fe7c0c30 100644
--- a/drivers/staging/vboxvideo/vbox_fb.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_fb.c
@@ -90,13 +90,9 @@ int vboxfb_create(struct drm_fb_helper *helper,
if (IS_ERR(info->screen_base))
return PTR_ERR(info->screen_base);
- info->par = helper;
-
fb = &vbox->afb.base;
helper->fb = fb;
- strcpy(info->fix.id, "vboxdrmfb");
-
info->fbops = &vboxfb_ops;
/*
@@ -106,9 +102,7 @@ int vboxfb_create(struct drm_fb_helper *helper,
info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(info, helper, sizes->fb_width,
- sizes->fb_height);
+ drm_fb_helper_fill_info(info, helper, sizes);
gpu_addr = vbox_bo_gpu_offset(bo);
info->fix.smem_start = info->apertures->ranges[0].base + gpu_addr;
diff --git a/drivers/staging/vboxvideo/vbox_hgsmi.c b/drivers/gpu/drm/vboxvideo/vbox_hgsmi.c
index 94b60654a012..94b60654a012 100644
--- a/drivers/staging/vboxvideo/vbox_hgsmi.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_hgsmi.c
diff --git a/drivers/staging/vboxvideo/vbox_irq.c b/drivers/gpu/drm/vboxvideo/vbox_irq.c
index 195484713365..16a1e29f5292 100644
--- a/drivers/staging/vboxvideo/vbox_irq.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_irq.c
@@ -105,6 +105,7 @@ static void validate_or_set_position_hints(struct vbox_private *vbox)
/* Query the host for the most recent video mode hints. */
static void vbox_update_mode_hints(struct vbox_private *vbox)
{
+ struct drm_connector_list_iter conn_iter;
struct drm_device *dev = &vbox->ddev;
struct drm_connector *connector;
struct vbox_connector *vbox_conn;
@@ -122,8 +123,10 @@ static void vbox_update_mode_hints(struct vbox_private *vbox)
}
validate_or_set_position_hints(vbox);
- drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
vbox_conn = to_vbox_connector(connector);
hints = &vbox->last_mode_hints[vbox_conn->vbox_crtc->crtc_id];
@@ -152,7 +155,8 @@ static void vbox_update_mode_hints(struct vbox_private *vbox)
vbox_conn->vbox_crtc->disconnected = disconnected;
}
- drm_modeset_unlock_all(dev);
+ drm_connector_list_iter_end(&conn_iter);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
static void vbox_hotplug_worker(struct work_struct *work)
diff --git a/drivers/staging/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
index e1fb70a42d32..f4d02de5518a 100644
--- a/drivers/staging/vboxvideo/vbox_main.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -32,9 +32,9 @@ void vbox_report_caps(struct vbox_private *vbox)
u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION |
VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY;
- if (vbox->initial_mode_queried)
- caps |= VBVACAPS_VIDEO_MODE_HINTS;
-
+ /* The host only accepts VIDEO_MODE_HINTS if it is send separately. */
+ hgsmi_send_caps_info(vbox->guest_pool, caps);
+ caps |= VBVACAPS_VIDEO_MODE_HINTS;
hgsmi_send_caps_info(vbox->guest_pool, caps);
}
diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index 213551394495..58cea131470e 100644
--- a/drivers/staging/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -57,8 +57,7 @@ static void vbox_do_modeset(struct drm_crtc *crtc)
vbox_write_ioport(VBE_DISPI_INDEX_VIRT_WIDTH, pitch * 8 / bpp);
vbox_write_ioport(VBE_DISPI_INDEX_BPP, bpp);
vbox_write_ioport(VBE_DISPI_INDEX_ENABLE, VBE_DISPI_ENABLED);
- vbox_write_ioport(
- VBE_DISPI_INDEX_X_OFFSET,
+ vbox_write_ioport(VBE_DISPI_INDEX_X_OFFSET,
vbox_crtc->fb_offset % pitch / bpp * 8 + vbox_crtc->x);
vbox_write_ioport(VBE_DISPI_INDEX_Y_OFFSET,
vbox_crtc->fb_offset / pitch + vbox_crtc->y);
@@ -736,29 +735,12 @@ static int vbox_get_modes(struct drm_connector *connector)
vbox_connector = to_vbox_connector(connector);
vbox = connector->dev->dev_private;
- /*
- * Heuristic: we do not want to tell the host that we support dynamic
- * resizing unless we feel confident that the user space client using
- * the video driver can handle hot-plug events. So the first time modes
- * are queried after a "master" switch we tell the host that we do not,
- * and immediately after we send the client a hot-plug notification as
- * a test to see if they will respond and query again.
- * That is also the reason why capabilities are reported to the host at
- * this place in the code rather than elsewhere.
- * We need to report the flags location before reporting the IRQ
- * capability.
- */
+
hgsmi_report_flags_location(vbox->guest_pool, GUEST_HEAP_OFFSET(vbox) +
HOST_FLAGS_OFFSET);
if (vbox_connector->vbox_crtc->crtc_id == 0)
vbox_report_caps(vbox);
- if (!vbox->initial_mode_queried) {
- if (vbox_connector->vbox_crtc->crtc_id == 0) {
- vbox->initial_mode_queried = true;
- vbox_report_hotplug(vbox);
- }
- return drm_add_modes_noedid(connector, 800, 600);
- }
+
num_modes = drm_add_modes_noedid(connector, 2560, 1600);
preferred_width = vbox_connector->mode_hint.width ?
vbox_connector->mode_hint.width : 1024;
diff --git a/drivers/staging/vboxvideo/vbox_prime.c b/drivers/gpu/drm/vboxvideo/vbox_prime.c
index d61985b0c6eb..702b1aa53494 100644
--- a/drivers/staging/vboxvideo/vbox_prime.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_prime.c
@@ -16,7 +16,7 @@
int vbox_gem_prime_pin(struct drm_gem_object *obj)
{
WARN_ONCE(1, "not implemented");
- return -ENOSYS;
+ return -ENODEV;
}
void vbox_gem_prime_unpin(struct drm_gem_object *obj)
@@ -27,7 +27,7 @@ void vbox_gem_prime_unpin(struct drm_gem_object *obj)
struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-ENODEV);
}
struct drm_gem_object *vbox_gem_prime_import_sg_table(
@@ -35,13 +35,13 @@ struct drm_gem_object *vbox_gem_prime_import_sg_table(
struct sg_table *table)
{
WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-ENODEV);
}
void *vbox_gem_prime_vmap(struct drm_gem_object *obj)
{
WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-ENODEV);
}
void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
@@ -52,5 +52,5 @@ void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
int vbox_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *area)
{
WARN_ONCE(1, "not implemented");
- return -ENOSYS;
+ return -ENODEV;
}
diff --git a/drivers/staging/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
index 30f270027acf..9d78438c2877 100644
--- a/drivers/staging/vboxvideo/vbox_ttm.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
@@ -156,7 +156,7 @@ int vbox_mm_init(struct vbox_private *vbox)
ret = ttm_bo_device_init(&vbox->ttm.bdev,
&vbox_bo_driver,
dev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET, true);
+ true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
return ret;
@@ -357,14 +357,8 @@ int vbox_bo_push_sysram(struct vbox_bo *bo)
int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct vbox_private *vbox;
-
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
-
- file_priv = filp->private_data;
- vbox = file_priv->minor->dev->dev_private;
+ struct drm_file *file_priv = filp->private_data;
+ struct vbox_private *vbox = file_priv->minor->dev->dev_private;
return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
}
diff --git a/drivers/staging/vboxvideo/vboxvideo.h b/drivers/gpu/drm/vboxvideo/vboxvideo.h
index 0592004f71aa..0592004f71aa 100644
--- a/drivers/staging/vboxvideo/vboxvideo.h
+++ b/drivers/gpu/drm/vboxvideo/vboxvideo.h
diff --git a/drivers/staging/vboxvideo/vboxvideo_guest.h b/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h
index 55fcee3a6470..55fcee3a6470 100644
--- a/drivers/staging/vboxvideo/vboxvideo_guest.h
+++ b/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h
diff --git a/drivers/staging/vboxvideo/vboxvideo_vbe.h b/drivers/gpu/drm/vboxvideo/vboxvideo_vbe.h
index 427235869297..427235869297 100644
--- a/drivers/staging/vboxvideo/vboxvideo_vbe.h
+++ b/drivers/gpu/drm/vboxvideo/vboxvideo_vbe.h
diff --git a/drivers/staging/vboxvideo/vbva_base.c b/drivers/gpu/drm/vboxvideo/vbva_base.c
index 36bc9824ec3f..36bc9824ec3f 100644
--- a/drivers/staging/vboxvideo/vbva_base.c
+++ b/drivers/gpu/drm/vboxvideo/vbva_base.c
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 8dcce7182bb7..88ebd681d7eb 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -40,7 +40,7 @@ static bool is_user_label(int label)
return label >= VC4_BO_TYPE_COUNT;
}
-static void vc4_bo_stats_dump(struct vc4_dev *vc4)
+static void vc4_bo_stats_print(struct drm_printer *p, struct vc4_dev *vc4)
{
int i;
@@ -48,58 +48,35 @@ static void vc4_bo_stats_dump(struct vc4_dev *vc4)
if (!vc4->bo_labels[i].num_allocated)
continue;
- DRM_INFO("%30s: %6dkb BOs (%d)\n",
- vc4->bo_labels[i].name,
- vc4->bo_labels[i].size_allocated / 1024,
- vc4->bo_labels[i].num_allocated);
+ drm_printf(p, "%30s: %6dkb BOs (%d)\n",
+ vc4->bo_labels[i].name,
+ vc4->bo_labels[i].size_allocated / 1024,
+ vc4->bo_labels[i].num_allocated);
}
mutex_lock(&vc4->purgeable.lock);
if (vc4->purgeable.num)
- DRM_INFO("%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
- vc4->purgeable.size / 1024, vc4->purgeable.num);
+ drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
+ vc4->purgeable.size / 1024, vc4->purgeable.num);
if (vc4->purgeable.purged_num)
- DRM_INFO("%30s: %6zdkb BOs (%d)\n", "total purged BO",
- vc4->purgeable.purged_size / 1024,
- vc4->purgeable.purged_num);
+ drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "total purged BO",
+ vc4->purgeable.purged_size / 1024,
+ vc4->purgeable.purged_num);
mutex_unlock(&vc4->purgeable.lock);
}
-#ifdef CONFIG_DEBUG_FS
-int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
+static int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- int i;
-
- mutex_lock(&vc4->bo_lock);
- for (i = 0; i < vc4->num_labels; i++) {
- if (!vc4->bo_labels[i].num_allocated)
- continue;
-
- seq_printf(m, "%30s: %6dkb BOs (%d)\n",
- vc4->bo_labels[i].name,
- vc4->bo_labels[i].size_allocated / 1024,
- vc4->bo_labels[i].num_allocated);
- }
- mutex_unlock(&vc4->bo_lock);
-
- mutex_lock(&vc4->purgeable.lock);
- if (vc4->purgeable.num)
- seq_printf(m, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
- vc4->purgeable.size / 1024, vc4->purgeable.num);
+ struct drm_printer p = drm_seq_file_printer(m);
- if (vc4->purgeable.purged_num)
- seq_printf(m, "%30s: %6zdkb BOs (%d)\n", "total purged BO",
- vc4->purgeable.purged_size / 1024,
- vc4->purgeable.purged_num);
- mutex_unlock(&vc4->purgeable.lock);
+ vc4_bo_stats_print(&p, vc4);
return 0;
}
-#endif
/* Takes ownership of *name and returns the appropriate slot for it in
* the bo_labels[] array, extending it as necessary.
@@ -201,8 +178,6 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
bo->validated_shader = NULL;
}
- reservation_object_fini(&bo->_resv);
-
drm_gem_cma_free_object(obj);
}
@@ -427,8 +402,6 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
mutex_unlock(&vc4->bo_lock);
- bo->resv = &bo->_resv;
- reservation_object_init(bo->resv);
return &bo->base.base;
}
@@ -479,8 +452,9 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
}
if (IS_ERR(cma_obj)) {
+ struct drm_printer p = drm_info_printer(vc4->dev->dev);
DRM_ERROR("Failed to allocate from CMA:\n");
- vc4_bo_stats_dump(vc4);
+ vc4_bo_stats_print(&p, vc4);
return ERR_PTR(-ENOMEM);
}
bo = to_vc4_bo(&cma_obj->base);
@@ -684,13 +658,6 @@ static void vc4_bo_cache_time_timer(struct timer_list *t)
schedule_work(&vc4->bo_cache.time_work);
}
-struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj)
-{
- struct vc4_bo *bo = to_vc4_bo(obj);
-
- return bo->resv;
-}
-
struct dma_buf *
vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
{
@@ -822,14 +789,12 @@ vc4_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt)
{
struct drm_gem_object *obj;
- struct vc4_bo *bo;
obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj))
return obj;
- bo = to_vc4_bo(obj);
- bo->resv = attach->dmabuf->resv;
+ obj->resv = attach->dmabuf->resv;
return obj;
}
@@ -1038,6 +1003,8 @@ int vc4_bo_cache_init(struct drm_device *dev)
mutex_init(&vc4->bo_lock);
+ vc4_debugfs_add_file(dev, "bo_stats", vc4_bo_stats_debugfs, NULL);
+
INIT_LIST_HEAD(&vc4->bo_cache.time_list);
INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 1baa10e94484..5e09389e1514 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -35,6 +35,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <linux/clk.h>
#include <drm/drm_fb_cma_helper.h>
@@ -67,67 +68,22 @@ to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
#define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset))
#define CRTC_READ(offset) readl(vc4_crtc->regs + (offset))
-#define CRTC_REG(reg) { reg, #reg }
-static const struct {
- u32 reg;
- const char *name;
-} crtc_regs[] = {
- CRTC_REG(PV_CONTROL),
- CRTC_REG(PV_V_CONTROL),
- CRTC_REG(PV_VSYNCD_EVEN),
- CRTC_REG(PV_HORZA),
- CRTC_REG(PV_HORZB),
- CRTC_REG(PV_VERTA),
- CRTC_REG(PV_VERTB),
- CRTC_REG(PV_VERTA_EVEN),
- CRTC_REG(PV_VERTB_EVEN),
- CRTC_REG(PV_INTEN),
- CRTC_REG(PV_INTSTAT),
- CRTC_REG(PV_STAT),
- CRTC_REG(PV_HACT_ACT),
+static const struct debugfs_reg32 crtc_regs[] = {
+ VC4_REG32(PV_CONTROL),
+ VC4_REG32(PV_V_CONTROL),
+ VC4_REG32(PV_VSYNCD_EVEN),
+ VC4_REG32(PV_HORZA),
+ VC4_REG32(PV_HORZB),
+ VC4_REG32(PV_VERTA),
+ VC4_REG32(PV_VERTB),
+ VC4_REG32(PV_VERTA_EVEN),
+ VC4_REG32(PV_VERTB_EVEN),
+ VC4_REG32(PV_INTEN),
+ VC4_REG32(PV_INTSTAT),
+ VC4_REG32(PV_STAT),
+ VC4_REG32(PV_HACT_ACT),
};
-static void vc4_crtc_dump_regs(struct vc4_crtc *vc4_crtc)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(crtc_regs); i++) {
- DRM_INFO("0x%04x (%s): 0x%08x\n",
- crtc_regs[i].reg, crtc_regs[i].name,
- CRTC_READ(crtc_regs[i].reg));
- }
-}
-
-#ifdef CONFIG_DEBUG_FS
-int vc4_crtc_debugfs_regs(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- int crtc_index = (uintptr_t)node->info_ent->data;
- struct drm_crtc *crtc;
- struct vc4_crtc *vc4_crtc;
- int i;
-
- i = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (i == crtc_index)
- break;
- i++;
- }
- if (!crtc)
- return 0;
- vc4_crtc = to_vc4_crtc(crtc);
-
- for (i = 0; i < ARRAY_SIZE(crtc_regs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- crtc_regs[i].name, crtc_regs[i].reg,
- CRTC_READ(crtc_regs[i].reg));
- }
-
- return 0;
-}
-#endif
-
bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
bool in_vblank_irq, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
@@ -434,8 +390,10 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
bool debug_dump_regs = false;
if (debug_dump_regs) {
- DRM_INFO("CRTC %d regs before:\n", drm_crtc_index(crtc));
- vc4_crtc_dump_regs(vc4_crtc);
+ struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev);
+ dev_info(&vc4_crtc->pdev->dev, "CRTC %d regs before:\n",
+ drm_crtc_index(crtc));
+ drm_print_regset32(&p, &vc4_crtc->regset);
}
if (vc4_crtc->channel == 2) {
@@ -476,8 +434,10 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
vc4_crtc_lut_load(crtc);
if (debug_dump_regs) {
- DRM_INFO("CRTC %d regs after:\n", drm_crtc_index(crtc));
- vc4_crtc_dump_regs(vc4_crtc);
+ struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev);
+ dev_info(&vc4_crtc->pdev->dev, "CRTC %d regs after:\n",
+ drm_crtc_index(crtc));
+ drm_print_regset32(&p, &vc4_crtc->regset);
}
}
@@ -834,6 +794,14 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
vc4_crtc->event = NULL;
drm_crtc_vblank_put(crtc);
+
+ /* Wait for the page flip to unmask the underrun to ensure that
+ * the display list was updated by the hardware. Before that
+ * happens, the HVS will be using the previous display list with
+ * the CRTC and encoder already reconfigured, leading to
+ * underruns. This can be seen when reconfiguring the CRTC.
+ */
+ vc4_hvs_unmask_underrun(dev, vc4_crtc->channel);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -1075,6 +1043,7 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
static const struct vc4_crtc_data pv0_data = {
.hvs_channel = 0,
+ .debugfs_name = "crtc0_regs",
.encoder_types = {
[PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI0,
[PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_DPI,
@@ -1083,6 +1052,7 @@ static const struct vc4_crtc_data pv0_data = {
static const struct vc4_crtc_data pv1_data = {
.hvs_channel = 2,
+ .debugfs_name = "crtc1_regs",
.encoder_types = {
[PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI1,
[PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_SMI,
@@ -1091,6 +1061,7 @@ static const struct vc4_crtc_data pv1_data = {
static const struct vc4_crtc_data pv2_data = {
.hvs_channel = 1,
+ .debugfs_name = "crtc2_regs",
.encoder_types = {
[PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_HDMI,
[PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC,
@@ -1169,11 +1140,16 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
if (!match)
return -ENODEV;
vc4_crtc->data = match->data;
+ vc4_crtc->pdev = pdev;
vc4_crtc->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(vc4_crtc->regs))
return PTR_ERR(vc4_crtc->regs);
+ vc4_crtc->regset.base = vc4_crtc->regs;
+ vc4_crtc->regset.regs = crtc_regs;
+ vc4_crtc->regset.nregs = ARRAY_SIZE(crtc_regs);
+
/* For now, we create just the primary and the legacy cursor
* planes. We should be able to stack more planes on easily,
* but to do that we would need to compute the bandwidth
@@ -1247,6 +1223,9 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
platform_set_drvdata(pdev, vc4_crtc);
+ vc4_debugfs_add_regset32(drm, vc4_crtc->data->debugfs_name,
+ &vc4_crtc->regset);
+
return 0;
err_destroy_planes:
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index 7a0003de71ab..f9dec08267dc 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -15,26 +15,82 @@
#include "vc4_drv.h"
#include "vc4_regs.h"
-static const struct drm_info_list vc4_debugfs_list[] = {
- {"bo_stats", vc4_bo_stats_debugfs, 0},
- {"dpi_regs", vc4_dpi_debugfs_regs, 0},
- {"dsi1_regs", vc4_dsi_debugfs_regs, 0, (void *)(uintptr_t)1},
- {"hdmi_regs", vc4_hdmi_debugfs_regs, 0},
- {"vec_regs", vc4_vec_debugfs_regs, 0},
- {"txp_regs", vc4_txp_debugfs_regs, 0},
- {"hvs_regs", vc4_hvs_debugfs_regs, 0},
- {"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0},
- {"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1},
- {"crtc2_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)2},
- {"v3d_ident", vc4_v3d_debugfs_ident, 0},
- {"v3d_regs", vc4_v3d_debugfs_regs, 0},
+struct vc4_debugfs_info_entry {
+ struct list_head link;
+ struct drm_info_list info;
};
-#define VC4_DEBUGFS_ENTRIES ARRAY_SIZE(vc4_debugfs_list)
-
+/**
+ * Called at drm_dev_register() time on each of the minors registered
+ * by the DRM device, to attach the debugfs files.
+ */
int
vc4_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(vc4_debugfs_list, VC4_DEBUGFS_ENTRIES,
- minor->debugfs_root, minor);
+ struct vc4_dev *vc4 = to_vc4_dev(minor->dev);
+ struct vc4_debugfs_info_entry *entry;
+ struct dentry *dentry;
+
+ dentry = debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR,
+ minor->debugfs_root,
+ &vc4->load_tracker_enabled);
+ if (!dentry)
+ return -ENOMEM;
+
+ list_for_each_entry(entry, &vc4->debugfs_list, link) {
+ int ret = drm_debugfs_create_files(&entry->info, 1,
+ minor->debugfs_root, minor);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vc4_debugfs_regset32(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct debugfs_regset32 *regset = node->info_ent->data;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ drm_print_regset32(&p, regset);
+
+ return 0;
+}
+
+/**
+ * Registers a debugfs file with a callback function for a vc4 component.
+ *
+ * This is like drm_debugfs_create_files(), but that can only be
+ * called a given DRM minor, while the various VC4 components want to
+ * register their debugfs files during the component bind process. We
+ * track the request and delay it to be called on each minor during
+ * vc4_debugfs_init().
+ */
+void vc4_debugfs_add_file(struct drm_device *dev,
+ const char *name,
+ int (*show)(struct seq_file*, void*),
+ void *data)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ struct vc4_debugfs_info_entry *entry =
+ devm_kzalloc(dev->dev, sizeof(*entry), GFP_KERNEL);
+
+ if (!entry)
+ return;
+
+ entry->info.name = name;
+ entry->info.show = show;
+ entry->info.data = data;
+
+ list_add(&entry->link, &vc4->debugfs_list);
+}
+
+void vc4_debugfs_add_regset32(struct drm_device *drm,
+ const char *name,
+ struct debugfs_regset32 *regset)
+{
+ vc4_debugfs_add_file(drm, name, vc4_debugfs_regset32, regset);
}
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 169521e547ba..34f90ca8f479 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -101,6 +101,8 @@ struct vc4_dpi {
struct clk *pixel_clock;
struct clk *core_clock;
+
+ struct debugfs_regset32 regset;
};
#define DPI_READ(offset) readl(dpi->regs + (offset))
@@ -118,37 +120,11 @@ to_vc4_dpi_encoder(struct drm_encoder *encoder)
return container_of(encoder, struct vc4_dpi_encoder, base.base);
}
-#define DPI_REG(reg) { reg, #reg }
-static const struct {
- u32 reg;
- const char *name;
-} dpi_regs[] = {
- DPI_REG(DPI_C),
- DPI_REG(DPI_ID),
+static const struct debugfs_reg32 dpi_regs[] = {
+ VC4_REG32(DPI_C),
+ VC4_REG32(DPI_ID),
};
-#ifdef CONFIG_DEBUG_FS
-int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_dpi *dpi = vc4->dpi;
- int i;
-
- if (!dpi)
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(dpi_regs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- dpi_regs[i].name, dpi_regs[i].reg,
- DPI_READ(dpi_regs[i].reg));
- }
-
- return 0;
-}
-#endif
-
static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
@@ -314,6 +290,9 @@ static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
dpi->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(dpi->regs))
return PTR_ERR(dpi->regs);
+ dpi->regset.base = dpi->regs;
+ dpi->regset.regs = dpi_regs;
+ dpi->regset.nregs = ARRAY_SIZE(dpi_regs);
if (DPI_READ(DPI_ID) != DPI_ID_VALUE) {
dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n",
@@ -352,6 +331,8 @@ static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
vc4->dpi = dpi;
+ vc4_debugfs_add_regset32(drm, "dpi_regs", &dpi->regset);
+
return 0;
err_destroy_encoder:
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 5fcd2f0da7f7..6d9be20a32be 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -72,30 +72,30 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
if (args->pad != 0)
return -EINVAL;
+ if (!vc4->v3d)
+ return -ENODEV;
+
switch (args->param) {
case DRM_VC4_PARAM_V3D_IDENT0:
- ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
- if (ret < 0)
+ ret = vc4_v3d_pm_get(vc4);
+ if (ret)
return ret;
args->value = V3D_READ(V3D_IDENT0);
- pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
- pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
+ vc4_v3d_pm_put(vc4);
break;
case DRM_VC4_PARAM_V3D_IDENT1:
- ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
- if (ret < 0)
+ ret = vc4_v3d_pm_get(vc4);
+ if (ret)
return ret;
args->value = V3D_READ(V3D_IDENT1);
- pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
- pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
+ vc4_v3d_pm_put(vc4);
break;
case DRM_VC4_PARAM_V3D_IDENT2:
- ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
- if (ret < 0)
+ ret = vc4_v3d_pm_get(vc4);
+ if (ret)
return ret;
args->value = V3D_READ(V3D_IDENT2);
- pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
- pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
+ vc4_v3d_pm_put(vc4);
break;
case DRM_VC4_PARAM_SUPPORTS_BRANCHES:
case DRM_VC4_PARAM_SUPPORTS_ETC1:
@@ -200,7 +200,6 @@ static struct drm_driver vc4_drm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_export = vc4_prime_export,
- .gem_prime_res_obj = vc4_prime_res_obj,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = vc4_prime_import_sg_table,
.gem_prime_vmap = vc4_prime_vmap,
@@ -252,6 +251,7 @@ static int vc4_drm_bind(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm;
struct vc4_dev *vc4;
+ struct device_node *node;
int ret = 0;
dev->coherent_dma_mask = DMA_BIT_MASK(32);
@@ -260,12 +260,19 @@ static int vc4_drm_bind(struct device *dev)
if (!vc4)
return -ENOMEM;
+ /* If VC4 V3D is missing, don't advertise render nodes. */
+ node = of_find_matching_node_and_match(NULL, vc4_v3d_dt_match, NULL);
+ if (!node || !of_device_is_available(node))
+ vc4_drm_driver.driver_features &= ~DRIVER_RENDER;
+ of_node_put(node);
+
drm = drm_dev_alloc(&vc4_drm_driver, dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
platform_set_drvdata(pdev, drm);
vc4->dev = drm;
drm->dev_private = vc4;
+ INIT_LIST_HEAD(&vc4->debugfs_list);
ret = vc4_bo_cache_init(drm);
if (ret)
@@ -281,13 +288,15 @@ static int vc4_drm_bind(struct device *dev)
drm_fb_helper_remove_conflicting_framebuffers(NULL, "vc4drmfb", false);
- ret = drm_dev_register(drm, 0);
+ ret = vc4_kms_load(drm);
if (ret < 0)
goto unbind_all;
- vc4_kms_load(drm);
+ ret = drm_dev_register(drm, 0);
+ if (ret < 0)
+ goto unbind_all;
- drm_fbdev_generic_setup(drm, 32);
+ drm_fbdev_generic_setup(drm, 16);
return 0;
@@ -312,6 +321,7 @@ static void vc4_drm_unbind(struct device *dev)
drm_mode_config_cleanup(drm);
+ drm_atomic_private_obj_fini(&vc4->load_tracker);
drm_atomic_private_obj_fini(&vc4->ctm_manager);
drm_dev_put(drm);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 2c635f001c71..4f13f6262491 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -7,7 +7,6 @@
*/
#include <linux/mm_types.h>
-#include <linux/reservation.h>
#include <drm/drmP.h>
#include <drm/drm_util.h>
#include <drm/drm_encoder.h>
@@ -185,10 +184,20 @@ struct vc4_dev {
/* Bitmask of the current bin_alloc used for overflow memory. */
uint32_t bin_alloc_overflow;
+ /* Incremented when an underrun error happened after an atomic commit.
+ * This is particularly useful to detect when a specific modeset is too
+ * demanding in term of memory or HVS bandwidth which is hard to guess
+ * at atomic check time.
+ */
+ atomic_t underrun;
+
struct work_struct overflow_mem_work;
int power_refcount;
+ /* Set to true when the load tracker is active. */
+ bool load_tracker_enabled;
+
/* Mutex controlling the power refcount. */
struct mutex power_lock;
@@ -201,6 +210,12 @@ struct vc4_dev {
struct drm_modeset_lock ctm_state_lock;
struct drm_private_obj ctm_manager;
+ struct drm_private_obj load_tracker;
+
+ /* List of vc4_debugfs_info_entry for adding to debugfs once
+ * the minor is available (after drm_dev_register()).
+ */
+ struct list_head debugfs_list;
};
static inline struct vc4_dev *
@@ -240,10 +255,6 @@ struct vc4_bo {
*/
struct vc4_validated_shader_info *validated_shader;
- /* normally (resv == &_resv) except for imported bo's */
- struct reservation_object *resv;
- struct reservation_object _resv;
-
/* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i
* for user-allocated labels.
*/
@@ -290,6 +301,7 @@ struct vc4_v3d {
struct platform_device *pdev;
void __iomem *regs;
struct clk *clk;
+ struct debugfs_regset32 regset;
};
struct vc4_hvs {
@@ -306,6 +318,7 @@ struct vc4_hvs {
spinlock_t mm_lock;
struct drm_mm_node mitchell_netravali_filter;
+ struct debugfs_regset32 regset;
};
struct vc4_plane {
@@ -376,6 +389,16 @@ struct vc4_plane_state {
* when async update is not possible.
*/
bool dlist_initialized;
+
+ /* Load of this plane on the HVS block. The load is expressed in HVS
+ * cycles/sec.
+ */
+ u64 hvs_load;
+
+ /* Memory bandwidth needed for this plane. This is expressed in
+ * bytes/sec.
+ */
+ u64 membus_load;
};
static inline struct vc4_plane_state *
@@ -411,10 +434,12 @@ struct vc4_crtc_data {
int hvs_channel;
enum vc4_encoder_type encoder_types[4];
+ const char *debugfs_name;
};
struct vc4_crtc {
struct drm_crtc base;
+ struct platform_device *pdev;
const struct vc4_crtc_data *data;
void __iomem *regs;
@@ -431,6 +456,8 @@ struct vc4_crtc {
u32 cob_size;
struct drm_pending_vblank_event *event;
+
+ struct debugfs_regset32 regset;
};
static inline struct vc4_crtc *
@@ -444,6 +471,8 @@ to_vc4_crtc(struct drm_crtc *crtc)
#define HVS_READ(offset) readl(vc4->hvs->regs + offset)
#define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
+#define VC4_REG32(reg) { .name = #reg, .offset = reg }
+
struct vc4_exec_info {
/* Sequence number for this bin/render job. */
uint64_t seqno;
@@ -685,7 +714,6 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
vm_fault_t vc4_fault(struct vm_fault *vmf);
int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
-struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
@@ -693,7 +721,6 @@ struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
void *vc4_prime_vmap(struct drm_gem_object *obj);
int vc4_bo_cache_init(struct drm_device *dev);
void vc4_bo_cache_destroy(struct drm_device *dev);
-int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
int vc4_bo_inc_usecnt(struct vc4_bo *bo);
void vc4_bo_dec_usecnt(struct vc4_bo *bo);
void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
@@ -701,7 +728,6 @@ void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
/* vc4_crtc.c */
extern struct platform_driver vc4_crtc_driver;
-int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
bool in_vblank_irq, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
@@ -714,17 +740,37 @@ void vc4_crtc_get_margins(struct drm_crtc_state *state,
/* vc4_debugfs.c */
int vc4_debugfs_init(struct drm_minor *minor);
+#ifdef CONFIG_DEBUG_FS
+void vc4_debugfs_add_file(struct drm_device *drm,
+ const char *filename,
+ int (*show)(struct seq_file*, void*),
+ void *data);
+void vc4_debugfs_add_regset32(struct drm_device *drm,
+ const char *filename,
+ struct debugfs_regset32 *regset);
+#else
+static inline void vc4_debugfs_add_file(struct drm_device *drm,
+ const char *filename,
+ int (*show)(struct seq_file*, void*),
+ void *data)
+{
+}
+
+static inline void vc4_debugfs_add_regset32(struct drm_device *drm,
+ const char *filename,
+ struct debugfs_regset32 *regset)
+{
+}
+#endif
/* vc4_drv.c */
void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
/* vc4_dpi.c */
extern struct platform_driver vc4_dpi_driver;
-int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused);
/* vc4_dsi.c */
extern struct platform_driver vc4_dsi_driver;
-int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused);
/* vc4_fence.c */
extern const struct dma_fence_ops vc4_fence_ops;
@@ -752,15 +798,12 @@ int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
/* vc4_hdmi.c */
extern struct platform_driver vc4_hdmi_driver;
-int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
/* vc4_vec.c */
extern struct platform_driver vc4_vec_driver;
-int vc4_vec_debugfs_regs(struct seq_file *m, void *unused);
/* vc4_txp.c */
extern struct platform_driver vc4_txp_driver;
-int vc4_txp_debugfs_regs(struct seq_file *m, void *unused);
/* vc4_irq.c */
irqreturn_t vc4_irq(int irq, void *arg);
@@ -772,7 +815,8 @@ void vc4_irq_reset(struct drm_device *dev);
/* vc4_hvs.c */
extern struct platform_driver vc4_hvs_driver;
void vc4_hvs_dump_state(struct drm_device *dev);
-int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused);
+void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel);
+void vc4_hvs_mask_underrun(struct drm_device *dev, int channel);
/* vc4_kms.c */
int vc4_kms_load(struct drm_device *dev);
@@ -787,9 +831,10 @@ void vc4_plane_async_set_fb(struct drm_plane *plane,
/* vc4_v3d.c */
extern struct platform_driver vc4_v3d_driver;
-int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
-int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
+extern const struct of_device_id vc4_v3d_dt_match[];
int vc4_v3d_get_bin_slot(struct vc4_dev *vc4);
+int vc4_v3d_pm_get(struct vc4_dev *vc4);
+void vc4_v3d_pm_put(struct vc4_dev *vc4);
/* vc4_validate.c */
int
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 11702e1d9011..2ea4e20b7b8a 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -41,6 +41,7 @@
#include <linux/component.h>
#include <linux/dmaengine.h>
#include <linux/i2c.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
@@ -545,6 +546,8 @@ struct vc4_dsi {
struct completion xfer_completion;
int xfer_result;
+
+ struct debugfs_regset32 regset;
};
#define host_to_dsi(host) container_of(host, struct vc4_dsi, dsi_host)
@@ -605,113 +608,56 @@ to_vc4_dsi_encoder(struct drm_encoder *encoder)
return container_of(encoder, struct vc4_dsi_encoder, base.base);
}
-#define DSI_REG(reg) { reg, #reg }
-static const struct {
- u32 reg;
- const char *name;
-} dsi0_regs[] = {
- DSI_REG(DSI0_CTRL),
- DSI_REG(DSI0_STAT),
- DSI_REG(DSI0_HSTX_TO_CNT),
- DSI_REG(DSI0_LPRX_TO_CNT),
- DSI_REG(DSI0_TA_TO_CNT),
- DSI_REG(DSI0_PR_TO_CNT),
- DSI_REG(DSI0_DISP0_CTRL),
- DSI_REG(DSI0_DISP1_CTRL),
- DSI_REG(DSI0_INT_STAT),
- DSI_REG(DSI0_INT_EN),
- DSI_REG(DSI0_PHYC),
- DSI_REG(DSI0_HS_CLT0),
- DSI_REG(DSI0_HS_CLT1),
- DSI_REG(DSI0_HS_CLT2),
- DSI_REG(DSI0_HS_DLT3),
- DSI_REG(DSI0_HS_DLT4),
- DSI_REG(DSI0_HS_DLT5),
- DSI_REG(DSI0_HS_DLT6),
- DSI_REG(DSI0_HS_DLT7),
- DSI_REG(DSI0_PHY_AFEC0),
- DSI_REG(DSI0_PHY_AFEC1),
- DSI_REG(DSI0_ID),
+static const struct debugfs_reg32 dsi0_regs[] = {
+ VC4_REG32(DSI0_CTRL),
+ VC4_REG32(DSI0_STAT),
+ VC4_REG32(DSI0_HSTX_TO_CNT),
+ VC4_REG32(DSI0_LPRX_TO_CNT),
+ VC4_REG32(DSI0_TA_TO_CNT),
+ VC4_REG32(DSI0_PR_TO_CNT),
+ VC4_REG32(DSI0_DISP0_CTRL),
+ VC4_REG32(DSI0_DISP1_CTRL),
+ VC4_REG32(DSI0_INT_STAT),
+ VC4_REG32(DSI0_INT_EN),
+ VC4_REG32(DSI0_PHYC),
+ VC4_REG32(DSI0_HS_CLT0),
+ VC4_REG32(DSI0_HS_CLT1),
+ VC4_REG32(DSI0_HS_CLT2),
+ VC4_REG32(DSI0_HS_DLT3),
+ VC4_REG32(DSI0_HS_DLT4),
+ VC4_REG32(DSI0_HS_DLT5),
+ VC4_REG32(DSI0_HS_DLT6),
+ VC4_REG32(DSI0_HS_DLT7),
+ VC4_REG32(DSI0_PHY_AFEC0),
+ VC4_REG32(DSI0_PHY_AFEC1),
+ VC4_REG32(DSI0_ID),
};
-static const struct {
- u32 reg;
- const char *name;
-} dsi1_regs[] = {
- DSI_REG(DSI1_CTRL),
- DSI_REG(DSI1_STAT),
- DSI_REG(DSI1_HSTX_TO_CNT),
- DSI_REG(DSI1_LPRX_TO_CNT),
- DSI_REG(DSI1_TA_TO_CNT),
- DSI_REG(DSI1_PR_TO_CNT),
- DSI_REG(DSI1_DISP0_CTRL),
- DSI_REG(DSI1_DISP1_CTRL),
- DSI_REG(DSI1_INT_STAT),
- DSI_REG(DSI1_INT_EN),
- DSI_REG(DSI1_PHYC),
- DSI_REG(DSI1_HS_CLT0),
- DSI_REG(DSI1_HS_CLT1),
- DSI_REG(DSI1_HS_CLT2),
- DSI_REG(DSI1_HS_DLT3),
- DSI_REG(DSI1_HS_DLT4),
- DSI_REG(DSI1_HS_DLT5),
- DSI_REG(DSI1_HS_DLT6),
- DSI_REG(DSI1_HS_DLT7),
- DSI_REG(DSI1_PHY_AFEC0),
- DSI_REG(DSI1_PHY_AFEC1),
- DSI_REG(DSI1_ID),
+static const struct debugfs_reg32 dsi1_regs[] = {
+ VC4_REG32(DSI1_CTRL),
+ VC4_REG32(DSI1_STAT),
+ VC4_REG32(DSI1_HSTX_TO_CNT),
+ VC4_REG32(DSI1_LPRX_TO_CNT),
+ VC4_REG32(DSI1_TA_TO_CNT),
+ VC4_REG32(DSI1_PR_TO_CNT),
+ VC4_REG32(DSI1_DISP0_CTRL),
+ VC4_REG32(DSI1_DISP1_CTRL),
+ VC4_REG32(DSI1_INT_STAT),
+ VC4_REG32(DSI1_INT_EN),
+ VC4_REG32(DSI1_PHYC),
+ VC4_REG32(DSI1_HS_CLT0),
+ VC4_REG32(DSI1_HS_CLT1),
+ VC4_REG32(DSI1_HS_CLT2),
+ VC4_REG32(DSI1_HS_DLT3),
+ VC4_REG32(DSI1_HS_DLT4),
+ VC4_REG32(DSI1_HS_DLT5),
+ VC4_REG32(DSI1_HS_DLT6),
+ VC4_REG32(DSI1_HS_DLT7),
+ VC4_REG32(DSI1_PHY_AFEC0),
+ VC4_REG32(DSI1_PHY_AFEC1),
+ VC4_REG32(DSI1_ID),
};
-static void vc4_dsi_dump_regs(struct vc4_dsi *dsi)
-{
- int i;
-
- if (dsi->port == 0) {
- for (i = 0; i < ARRAY_SIZE(dsi0_regs); i++) {
- DRM_INFO("0x%04x (%s): 0x%08x\n",
- dsi0_regs[i].reg, dsi0_regs[i].name,
- DSI_READ(dsi0_regs[i].reg));
- }
- } else {
- for (i = 0; i < ARRAY_SIZE(dsi1_regs); i++) {
- DRM_INFO("0x%04x (%s): 0x%08x\n",
- dsi1_regs[i].reg, dsi1_regs[i].name,
- DSI_READ(dsi1_regs[i].reg));
- }
- }
-}
-
-#ifdef CONFIG_DEBUG_FS
-int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *drm = node->minor->dev;
- struct vc4_dev *vc4 = to_vc4_dev(drm);
- int dsi_index = (uintptr_t)node->info_ent->data;
- struct vc4_dsi *dsi = (dsi_index == 1 ? vc4->dsi1 : NULL);
- int i;
-
- if (!dsi)
- return 0;
-
- if (dsi->port == 0) {
- for (i = 0; i < ARRAY_SIZE(dsi0_regs); i++) {
- seq_printf(m, "0x%04x (%s): 0x%08x\n",
- dsi0_regs[i].reg, dsi0_regs[i].name,
- DSI_READ(dsi0_regs[i].reg));
- }
- } else {
- for (i = 0; i < ARRAY_SIZE(dsi1_regs); i++) {
- seq_printf(m, "0x%04x (%s): 0x%08x\n",
- dsi1_regs[i].reg, dsi1_regs[i].name,
- DSI_READ(dsi1_regs[i].reg));
- }
- }
-
- return 0;
-}
-#endif
-
static void vc4_dsi_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
@@ -900,8 +846,9 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
}
if (debug_dump_regs) {
- DRM_INFO("DSI regs before:\n");
- vc4_dsi_dump_regs(dsi);
+ struct drm_printer p = drm_info_printer(&dsi->pdev->dev);
+ dev_info(&dsi->pdev->dev, "DSI regs before:\n");
+ drm_print_regset32(&p, &dsi->regset);
}
/* Round up the clk_set_rate() request slightly, since
@@ -1135,8 +1082,9 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
drm_bridge_enable(dsi->bridge);
if (debug_dump_regs) {
- DRM_INFO("DSI regs after:\n");
- vc4_dsi_dump_regs(dsi);
+ struct drm_printer p = drm_info_printer(&dsi->pdev->dev);
+ dev_info(&dsi->pdev->dev, "DSI regs after:\n");
+ drm_print_regset32(&p, &dsi->regset);
}
}
@@ -1527,6 +1475,15 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(dsi->regs))
return PTR_ERR(dsi->regs);
+ dsi->regset.base = dsi->regs;
+ if (dsi->port == 0) {
+ dsi->regset.regs = dsi0_regs;
+ dsi->regset.nregs = ARRAY_SIZE(dsi0_regs);
+ } else {
+ dsi->regset.regs = dsi1_regs;
+ dsi->regset.nregs = ARRAY_SIZE(dsi1_regs);
+ }
+
if (DSI_PORT_READ(ID) != DSI_ID_VALUE) {
dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n",
DSI_PORT_READ(ID), DSI_ID_VALUE);
@@ -1662,6 +1619,11 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
*/
dsi->encoder->bridge = NULL;
+ if (dsi->port == 0)
+ vc4_debugfs_add_regset32(drm, "dsi0_regs", &dsi->regset);
+ else
+ vc4_debugfs_add_regset32(drm, "dsi1_regs", &dsi->regset);
+
pm_runtime_enable(dev);
return 0;
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index aea2b8dfec17..d9311be32a4f 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -74,6 +74,11 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
u32 i;
int ret = 0;
+ if (!vc4->v3d) {
+ DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n");
+ return -ENODEV;
+ }
+
spin_lock_irqsave(&vc4->job_lock, irqflags);
kernel_state = vc4->hang_state;
if (!kernel_state) {
@@ -536,7 +541,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
bo = to_vc4_bo(&exec->bo[i]->base);
bo->seqno = seqno;
- reservation_object_add_shared_fence(bo->resv, exec->fence);
+ reservation_object_add_shared_fence(bo->base.base.resv, exec->fence);
}
list_for_each_entry(bo, &exec->unref_list, unref_head) {
@@ -547,7 +552,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
bo->write_seqno = seqno;
- reservation_object_add_excl_fence(bo->resv, exec->fence);
+ reservation_object_add_excl_fence(bo->base.base.resv, exec->fence);
}
}
@@ -559,7 +564,7 @@ vc4_unlock_bo_reservations(struct drm_device *dev,
int i;
for (i = 0; i < exec->bo_count; i++) {
- struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
+ struct drm_gem_object *bo = &exec->bo[i]->base;
ww_mutex_unlock(&bo->resv->lock);
}
@@ -581,13 +586,13 @@ vc4_lock_bo_reservations(struct drm_device *dev,
{
int contended_lock = -1;
int i, ret;
- struct vc4_bo *bo;
+ struct drm_gem_object *bo;
ww_acquire_init(acquire_ctx, &reservation_ww_class);
retry:
if (contended_lock != -1) {
- bo = to_vc4_bo(&exec->bo[contended_lock]->base);
+ bo = &exec->bo[contended_lock]->base;
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
acquire_ctx);
if (ret) {
@@ -600,19 +605,19 @@ retry:
if (i == contended_lock)
continue;
- bo = to_vc4_bo(&exec->bo[i]->base);
+ bo = &exec->bo[i]->base;
ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
if (ret) {
int j;
for (j = 0; j < i; j++) {
- bo = to_vc4_bo(&exec->bo[j]->base);
+ bo = &exec->bo[j]->base;
ww_mutex_unlock(&bo->resv->lock);
}
if (contended_lock != -1 && contended_lock >= i) {
- bo = to_vc4_bo(&exec->bo[contended_lock]->base);
+ bo = &exec->bo[contended_lock]->base;
ww_mutex_unlock(&bo->resv->lock);
}
@@ -633,7 +638,7 @@ retry:
* before we commit the CL to the hardware.
*/
for (i = 0; i < exec->bo_count; i++) {
- bo = to_vc4_bo(&exec->bo[i]->base);
+ bo = &exec->bo[i]->base;
ret = reservation_object_reserve_shared(bo->resv, 1);
if (ret) {
@@ -964,12 +969,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
/* Release the reference we had on the perf monitor. */
vc4_perfmon_put(exec->perfmon);
- mutex_lock(&vc4->power_lock);
- if (--vc4->power_refcount == 0) {
- pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
- pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
- }
- mutex_unlock(&vc4->power_lock);
+ vc4_v3d_pm_put(vc4);
kfree(exec);
}
@@ -1124,6 +1124,11 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
struct dma_fence *in_fence;
int ret = 0;
+ if (!vc4->v3d) {
+ DRM_DEBUG("VC4_SUBMIT_CL with no VC4 V3D probed\n");
+ return -ENODEV;
+ }
+
if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR |
VC4_SUBMIT_CL_FIXED_RCL_ORDER |
VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X |
@@ -1143,17 +1148,11 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
}
- mutex_lock(&vc4->power_lock);
- if (vc4->power_refcount++ == 0) {
- ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
- if (ret < 0) {
- mutex_unlock(&vc4->power_lock);
- vc4->power_refcount--;
- kfree(exec);
- return ret;
- }
+ ret = vc4_v3d_pm_get(vc4);
+ if (ret) {
+ kfree(exec);
+ return ret;
}
- mutex_unlock(&vc4->power_lock);
exec->args = args;
INIT_LIST_HEAD(&exec->unref_list);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 88fd5df7e7dc..99fc8569e0f5 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -97,6 +97,9 @@ struct vc4_hdmi {
struct clk *pixel_clock;
struct clk *hsm_clock;
+
+ struct debugfs_regset32 hdmi_regset;
+ struct debugfs_regset32 hd_regset;
};
#define HDMI_READ(offset) readl(vc4->hdmi->hdmicore_regs + offset)
@@ -134,103 +137,69 @@ to_vc4_hdmi_connector(struct drm_connector *connector)
return container_of(connector, struct vc4_hdmi_connector, base);
}
-#define HDMI_REG(reg) { reg, #reg }
-static const struct {
- u32 reg;
- const char *name;
-} hdmi_regs[] = {
- HDMI_REG(VC4_HDMI_CORE_REV),
- HDMI_REG(VC4_HDMI_SW_RESET_CONTROL),
- HDMI_REG(VC4_HDMI_HOTPLUG_INT),
- HDMI_REG(VC4_HDMI_HOTPLUG),
- HDMI_REG(VC4_HDMI_MAI_CHANNEL_MAP),
- HDMI_REG(VC4_HDMI_MAI_CONFIG),
- HDMI_REG(VC4_HDMI_MAI_FORMAT),
- HDMI_REG(VC4_HDMI_AUDIO_PACKET_CONFIG),
- HDMI_REG(VC4_HDMI_RAM_PACKET_CONFIG),
- HDMI_REG(VC4_HDMI_HORZA),
- HDMI_REG(VC4_HDMI_HORZB),
- HDMI_REG(VC4_HDMI_FIFO_CTL),
- HDMI_REG(VC4_HDMI_SCHEDULER_CONTROL),
- HDMI_REG(VC4_HDMI_VERTA0),
- HDMI_REG(VC4_HDMI_VERTA1),
- HDMI_REG(VC4_HDMI_VERTB0),
- HDMI_REG(VC4_HDMI_VERTB1),
- HDMI_REG(VC4_HDMI_TX_PHY_RESET_CTL),
- HDMI_REG(VC4_HDMI_TX_PHY_CTL0),
-
- HDMI_REG(VC4_HDMI_CEC_CNTRL_1),
- HDMI_REG(VC4_HDMI_CEC_CNTRL_2),
- HDMI_REG(VC4_HDMI_CEC_CNTRL_3),
- HDMI_REG(VC4_HDMI_CEC_CNTRL_4),
- HDMI_REG(VC4_HDMI_CEC_CNTRL_5),
- HDMI_REG(VC4_HDMI_CPU_STATUS),
- HDMI_REG(VC4_HDMI_CPU_MASK_STATUS),
-
- HDMI_REG(VC4_HDMI_CEC_RX_DATA_1),
- HDMI_REG(VC4_HDMI_CEC_RX_DATA_2),
- HDMI_REG(VC4_HDMI_CEC_RX_DATA_3),
- HDMI_REG(VC4_HDMI_CEC_RX_DATA_4),
- HDMI_REG(VC4_HDMI_CEC_TX_DATA_1),
- HDMI_REG(VC4_HDMI_CEC_TX_DATA_2),
- HDMI_REG(VC4_HDMI_CEC_TX_DATA_3),
- HDMI_REG(VC4_HDMI_CEC_TX_DATA_4),
+static const struct debugfs_reg32 hdmi_regs[] = {
+ VC4_REG32(VC4_HDMI_CORE_REV),
+ VC4_REG32(VC4_HDMI_SW_RESET_CONTROL),
+ VC4_REG32(VC4_HDMI_HOTPLUG_INT),
+ VC4_REG32(VC4_HDMI_HOTPLUG),
+ VC4_REG32(VC4_HDMI_MAI_CHANNEL_MAP),
+ VC4_REG32(VC4_HDMI_MAI_CONFIG),
+ VC4_REG32(VC4_HDMI_MAI_FORMAT),
+ VC4_REG32(VC4_HDMI_AUDIO_PACKET_CONFIG),
+ VC4_REG32(VC4_HDMI_RAM_PACKET_CONFIG),
+ VC4_REG32(VC4_HDMI_HORZA),
+ VC4_REG32(VC4_HDMI_HORZB),
+ VC4_REG32(VC4_HDMI_FIFO_CTL),
+ VC4_REG32(VC4_HDMI_SCHEDULER_CONTROL),
+ VC4_REG32(VC4_HDMI_VERTA0),
+ VC4_REG32(VC4_HDMI_VERTA1),
+ VC4_REG32(VC4_HDMI_VERTB0),
+ VC4_REG32(VC4_HDMI_VERTB1),
+ VC4_REG32(VC4_HDMI_TX_PHY_RESET_CTL),
+ VC4_REG32(VC4_HDMI_TX_PHY_CTL0),
+
+ VC4_REG32(VC4_HDMI_CEC_CNTRL_1),
+ VC4_REG32(VC4_HDMI_CEC_CNTRL_2),
+ VC4_REG32(VC4_HDMI_CEC_CNTRL_3),
+ VC4_REG32(VC4_HDMI_CEC_CNTRL_4),
+ VC4_REG32(VC4_HDMI_CEC_CNTRL_5),
+ VC4_REG32(VC4_HDMI_CPU_STATUS),
+ VC4_REG32(VC4_HDMI_CPU_MASK_STATUS),
+
+ VC4_REG32(VC4_HDMI_CEC_RX_DATA_1),
+ VC4_REG32(VC4_HDMI_CEC_RX_DATA_2),
+ VC4_REG32(VC4_HDMI_CEC_RX_DATA_3),
+ VC4_REG32(VC4_HDMI_CEC_RX_DATA_4),
+ VC4_REG32(VC4_HDMI_CEC_TX_DATA_1),
+ VC4_REG32(VC4_HDMI_CEC_TX_DATA_2),
+ VC4_REG32(VC4_HDMI_CEC_TX_DATA_3),
+ VC4_REG32(VC4_HDMI_CEC_TX_DATA_4),
};
-static const struct {
- u32 reg;
- const char *name;
-} hd_regs[] = {
- HDMI_REG(VC4_HD_M_CTL),
- HDMI_REG(VC4_HD_MAI_CTL),
- HDMI_REG(VC4_HD_MAI_THR),
- HDMI_REG(VC4_HD_MAI_FMT),
- HDMI_REG(VC4_HD_MAI_SMP),
- HDMI_REG(VC4_HD_VID_CTL),
- HDMI_REG(VC4_HD_CSC_CTL),
- HDMI_REG(VC4_HD_FRAME_COUNT),
+static const struct debugfs_reg32 hd_regs[] = {
+ VC4_REG32(VC4_HD_M_CTL),
+ VC4_REG32(VC4_HD_MAI_CTL),
+ VC4_REG32(VC4_HD_MAI_THR),
+ VC4_REG32(VC4_HD_MAI_FMT),
+ VC4_REG32(VC4_HD_MAI_SMP),
+ VC4_REG32(VC4_HD_VID_CTL),
+ VC4_REG32(VC4_HD_CSC_CTL),
+ VC4_REG32(VC4_HD_FRAME_COUNT),
};
-#ifdef CONFIG_DEBUG_FS
-int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
+static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hdmi_regs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- hdmi_regs[i].name, hdmi_regs[i].reg,
- HDMI_READ(hdmi_regs[i].reg));
- }
+ struct vc4_hdmi *hdmi = vc4->hdmi;
+ struct drm_printer p = drm_seq_file_printer(m);
- for (i = 0; i < ARRAY_SIZE(hd_regs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- hd_regs[i].name, hd_regs[i].reg,
- HD_READ(hd_regs[i].reg));
- }
+ drm_print_regset32(&p, &hdmi->hdmi_regset);
+ drm_print_regset32(&p, &hdmi->hd_regset);
return 0;
}
-#endif /* CONFIG_DEBUG_FS */
-
-static void vc4_hdmi_dump_regs(struct drm_device *dev)
-{
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hdmi_regs); i++) {
- DRM_INFO("0x%04x (%s): 0x%08x\n",
- hdmi_regs[i].reg, hdmi_regs[i].name,
- HDMI_READ(hdmi_regs[i].reg));
- }
- for (i = 0; i < ARRAY_SIZE(hd_regs); i++) {
- DRM_INFO("0x%04x (%s): 0x%08x\n",
- hd_regs[i].reg, hd_regs[i].name,
- HD_READ(hd_regs[i].reg));
- }
-}
static enum drm_connector_status
vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
@@ -561,8 +530,11 @@ static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
HDMI_WRITE(VC4_HDMI_TX_PHY_RESET_CTL, 0);
if (debug_dump_regs) {
- DRM_INFO("HDMI regs before:\n");
- vc4_hdmi_dump_regs(dev);
+ struct drm_printer p = drm_info_printer(&hdmi->pdev->dev);
+
+ dev_info(&hdmi->pdev->dev, "HDMI regs before:\n");
+ drm_print_regset32(&p, &hdmi->hdmi_regset);
+ drm_print_regset32(&p, &hdmi->hd_regset);
}
HD_WRITE(VC4_HD_VID_CTL, 0);
@@ -637,8 +609,11 @@ static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
HDMI_WRITE(VC4_HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N);
if (debug_dump_regs) {
- DRM_INFO("HDMI regs after:\n");
- vc4_hdmi_dump_regs(dev);
+ struct drm_printer p = drm_info_printer(&hdmi->pdev->dev);
+
+ dev_info(&hdmi->pdev->dev, "HDMI regs after:\n");
+ drm_print_regset32(&p, &hdmi->hdmi_regset);
+ drm_print_regset32(&p, &hdmi->hd_regset);
}
HD_WRITE(VC4_HD_VID_CTL,
@@ -1333,6 +1308,13 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(hdmi->hd_regs))
return PTR_ERR(hdmi->hd_regs);
+ hdmi->hdmi_regset.base = hdmi->hdmicore_regs;
+ hdmi->hdmi_regset.regs = hdmi_regs;
+ hdmi->hdmi_regset.nregs = ARRAY_SIZE(hdmi_regs);
+ hdmi->hd_regset.base = hdmi->hd_regs;
+ hdmi->hd_regset.regs = hd_regs;
+ hdmi->hd_regset.nregs = ARRAY_SIZE(hd_regs);
+
hdmi->pixel_clock = devm_clk_get(dev, "pixel");
if (IS_ERR(hdmi->pixel_clock)) {
DRM_ERROR("Failed to get pixel clock\n");
@@ -1448,6 +1430,8 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
if (ret)
goto err_destroy_encoder;
+ vc4_debugfs_add_file(drm, "hdmi_regs", vc4_hdmi_debugfs_regs, hdmi);
+
return 0;
#ifdef CONFIG_DRM_VC4_HDMI_CEC
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 5d8c749c9749..f746e9a7a88c 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -22,58 +22,52 @@
* each CRTC.
*/
+#include <drm/drm_atomic_helper.h>
#include <linux/component.h>
#include "vc4_drv.h"
#include "vc4_regs.h"
-#define HVS_REG(reg) { reg, #reg }
-static const struct {
- u32 reg;
- const char *name;
-} hvs_regs[] = {
- HVS_REG(SCALER_DISPCTRL),
- HVS_REG(SCALER_DISPSTAT),
- HVS_REG(SCALER_DISPID),
- HVS_REG(SCALER_DISPECTRL),
- HVS_REG(SCALER_DISPPROF),
- HVS_REG(SCALER_DISPDITHER),
- HVS_REG(SCALER_DISPEOLN),
- HVS_REG(SCALER_DISPLIST0),
- HVS_REG(SCALER_DISPLIST1),
- HVS_REG(SCALER_DISPLIST2),
- HVS_REG(SCALER_DISPLSTAT),
- HVS_REG(SCALER_DISPLACT0),
- HVS_REG(SCALER_DISPLACT1),
- HVS_REG(SCALER_DISPLACT2),
- HVS_REG(SCALER_DISPCTRL0),
- HVS_REG(SCALER_DISPBKGND0),
- HVS_REG(SCALER_DISPSTAT0),
- HVS_REG(SCALER_DISPBASE0),
- HVS_REG(SCALER_DISPCTRL1),
- HVS_REG(SCALER_DISPBKGND1),
- HVS_REG(SCALER_DISPSTAT1),
- HVS_REG(SCALER_DISPBASE1),
- HVS_REG(SCALER_DISPCTRL2),
- HVS_REG(SCALER_DISPBKGND2),
- HVS_REG(SCALER_DISPSTAT2),
- HVS_REG(SCALER_DISPBASE2),
- HVS_REG(SCALER_DISPALPHA2),
- HVS_REG(SCALER_OLEDOFFS),
- HVS_REG(SCALER_OLEDCOEF0),
- HVS_REG(SCALER_OLEDCOEF1),
- HVS_REG(SCALER_OLEDCOEF2),
+static const struct debugfs_reg32 hvs_regs[] = {
+ VC4_REG32(SCALER_DISPCTRL),
+ VC4_REG32(SCALER_DISPSTAT),
+ VC4_REG32(SCALER_DISPID),
+ VC4_REG32(SCALER_DISPECTRL),
+ VC4_REG32(SCALER_DISPPROF),
+ VC4_REG32(SCALER_DISPDITHER),
+ VC4_REG32(SCALER_DISPEOLN),
+ VC4_REG32(SCALER_DISPLIST0),
+ VC4_REG32(SCALER_DISPLIST1),
+ VC4_REG32(SCALER_DISPLIST2),
+ VC4_REG32(SCALER_DISPLSTAT),
+ VC4_REG32(SCALER_DISPLACT0),
+ VC4_REG32(SCALER_DISPLACT1),
+ VC4_REG32(SCALER_DISPLACT2),
+ VC4_REG32(SCALER_DISPCTRL0),
+ VC4_REG32(SCALER_DISPBKGND0),
+ VC4_REG32(SCALER_DISPSTAT0),
+ VC4_REG32(SCALER_DISPBASE0),
+ VC4_REG32(SCALER_DISPCTRL1),
+ VC4_REG32(SCALER_DISPBKGND1),
+ VC4_REG32(SCALER_DISPSTAT1),
+ VC4_REG32(SCALER_DISPBASE1),
+ VC4_REG32(SCALER_DISPCTRL2),
+ VC4_REG32(SCALER_DISPBKGND2),
+ VC4_REG32(SCALER_DISPSTAT2),
+ VC4_REG32(SCALER_DISPBASE2),
+ VC4_REG32(SCALER_DISPALPHA2),
+ VC4_REG32(SCALER_OLEDOFFS),
+ VC4_REG32(SCALER_OLEDCOEF0),
+ VC4_REG32(SCALER_OLEDCOEF1),
+ VC4_REG32(SCALER_OLEDCOEF2),
};
void vc4_hvs_dump_state(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct drm_printer p = drm_info_printer(&vc4->hvs->pdev->dev);
int i;
- for (i = 0; i < ARRAY_SIZE(hvs_regs); i++) {
- DRM_INFO("0x%04x (%s): 0x%08x\n",
- hvs_regs[i].reg, hvs_regs[i].name,
- HVS_READ(hvs_regs[i].reg));
- }
+ drm_print_regset32(&p, &vc4->hvs->regset);
DRM_INFO("HVS ctx:\n");
for (i = 0; i < 64; i += 4) {
@@ -86,23 +80,17 @@ void vc4_hvs_dump_state(struct drm_device *dev)
}
}
-#ifdef CONFIG_DEBUG_FS
-int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused)
+static int vc4_hvs_debugfs_underrun(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- int i;
+ struct drm_printer p = drm_seq_file_printer(m);
- for (i = 0; i < ARRAY_SIZE(hvs_regs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- hvs_regs[i].name, hvs_regs[i].reg,
- HVS_READ(hvs_regs[i].reg));
- }
+ drm_printf(&p, "%d\n", atomic_read(&vc4->underrun));
return 0;
}
-#endif
/* The filter kernel is composed of dwords each containing 3 9-bit
* signed integers packed next to each other.
@@ -166,6 +154,67 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
return 0;
}
+void vc4_hvs_mask_underrun(struct drm_device *dev, int channel)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ u32 dispctrl = HVS_READ(SCALER_DISPCTRL);
+
+ dispctrl &= ~SCALER_DISPCTRL_DSPEISLUR(channel);
+
+ HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+}
+
+void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ u32 dispctrl = HVS_READ(SCALER_DISPCTRL);
+
+ dispctrl |= SCALER_DISPCTRL_DSPEISLUR(channel);
+
+ HVS_WRITE(SCALER_DISPSTAT,
+ SCALER_DISPSTAT_EUFLOW(channel));
+ HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+}
+
+static void vc4_hvs_report_underrun(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ atomic_inc(&vc4->underrun);
+ DRM_DEV_ERROR(dev->dev, "HVS underrun\n");
+}
+
+static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
+{
+ struct drm_device *dev = data;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ irqreturn_t irqret = IRQ_NONE;
+ int channel;
+ u32 control;
+ u32 status;
+
+ status = HVS_READ(SCALER_DISPSTAT);
+ control = HVS_READ(SCALER_DISPCTRL);
+
+ for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) {
+ /* Interrupt masking is not always honored, so check it here. */
+ if (status & SCALER_DISPSTAT_EUFLOW(channel) &&
+ control & SCALER_DISPCTRL_DSPEISLUR(channel)) {
+ vc4_hvs_mask_underrun(dev, channel);
+ vc4_hvs_report_underrun(dev);
+
+ irqret = IRQ_HANDLED;
+ }
+ }
+
+ /* Clear every per-channel interrupt flag. */
+ HVS_WRITE(SCALER_DISPSTAT, SCALER_DISPSTAT_IRQMASK(0) |
+ SCALER_DISPSTAT_IRQMASK(1) |
+ SCALER_DISPSTAT_IRQMASK(2));
+
+ return irqret;
+}
+
static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -185,6 +234,10 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(hvs->regs))
return PTR_ERR(hvs->regs);
+ hvs->regset.base = hvs->regs;
+ hvs->regset.regs = hvs_regs;
+ hvs->regset.nregs = ARRAY_SIZE(hvs_regs);
+
hvs->dlist = hvs->regs + SCALER_DLIST_START;
spin_lock_init(&hvs->mm_lock);
@@ -219,15 +272,40 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
dispctrl = HVS_READ(SCALER_DISPCTRL);
dispctrl |= SCALER_DISPCTRL_ENABLE;
+ dispctrl |= SCALER_DISPCTRL_DISPEIRQ(0) |
+ SCALER_DISPCTRL_DISPEIRQ(1) |
+ SCALER_DISPCTRL_DISPEIRQ(2);
/* Set DSP3 (PV1) to use HVS channel 2, which would otherwise
* be unused.
*/
dispctrl &= ~SCALER_DISPCTRL_DSP3_MUX_MASK;
+ dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
+ SCALER_DISPCTRL_SLVWREIRQ |
+ SCALER_DISPCTRL_SLVRDEIRQ |
+ SCALER_DISPCTRL_DSPEIEOF(0) |
+ SCALER_DISPCTRL_DSPEIEOF(1) |
+ SCALER_DISPCTRL_DSPEIEOF(2) |
+ SCALER_DISPCTRL_DSPEIEOLN(0) |
+ SCALER_DISPCTRL_DSPEIEOLN(1) |
+ SCALER_DISPCTRL_DSPEIEOLN(2) |
+ SCALER_DISPCTRL_DSPEISLUR(0) |
+ SCALER_DISPCTRL_DSPEISLUR(1) |
+ SCALER_DISPCTRL_DSPEISLUR(2) |
+ SCALER_DISPCTRL_SCLEIRQ);
dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+ ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
+ vc4_hvs_irq_handler, 0, "vc4 hvs", drm);
+ if (ret)
+ return ret;
+
+ vc4_debugfs_add_regset32(drm, "hvs_regs", &hvs->regset);
+ vc4_debugfs_add_file(drm, "hvs_underrun", vc4_hvs_debugfs_underrun,
+ NULL);
+
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 4cd2ccfe15f4..ffd0a4388752 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -229,6 +229,9 @@ vc4_irq_preinstall(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ if (!vc4->v3d)
+ return;
+
init_waitqueue_head(&vc4->job_wait_queue);
INIT_WORK(&vc4->overflow_mem_work, vc4_overflow_mem_work);
@@ -243,6 +246,9 @@ vc4_irq_postinstall(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ if (!vc4->v3d)
+ return 0;
+
/* Enable both the render done and out of memory interrupts. */
V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
@@ -254,6 +260,9 @@ vc4_irq_uninstall(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ if (!vc4->v3d)
+ return;
+
/* Disable sending interrupts for our driver's IRQs. */
V3D_WRITE(V3D_INTDIS, V3D_DRIVER_IRQS);
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 91b8c72ff361..295dacc8bcb9 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -34,6 +34,18 @@ static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
return container_of(priv, struct vc4_ctm_state, base);
}
+struct vc4_load_tracker_state {
+ struct drm_private_state base;
+ u64 hvs_load;
+ u64 membus_load;
+};
+
+static struct vc4_load_tracker_state *
+to_vc4_load_tracker_state(struct drm_private_state *priv)
+{
+ return container_of(priv, struct vc4_load_tracker_state, base);
+}
+
static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
struct drm_private_obj *manager)
{
@@ -138,6 +150,16 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc *vc4_crtc;
+ int i;
+
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ if (!state->crtcs[i].ptr || !state->crtcs[i].commit)
+ continue;
+
+ vc4_crtc = to_vc4_crtc(state->crtcs[i].ptr);
+ vc4_hvs_mask_underrun(dev, vc4_crtc->channel);
+ }
drm_atomic_helper_wait_for_fences(dev, state, false);
@@ -385,6 +407,85 @@ vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
return 0;
}
+static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct vc4_dev *vc4 = to_vc4_dev(state->dev);
+ struct vc4_load_tracker_state *load_state;
+ struct drm_private_state *priv_state;
+ struct drm_plane *plane;
+ int i;
+
+ priv_state = drm_atomic_get_private_obj_state(state,
+ &vc4->load_tracker);
+ if (IS_ERR(priv_state))
+ return PTR_ERR(priv_state);
+
+ load_state = to_vc4_load_tracker_state(priv_state);
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state,
+ new_plane_state, i) {
+ struct vc4_plane_state *vc4_plane_state;
+
+ if (old_plane_state->fb && old_plane_state->crtc) {
+ vc4_plane_state = to_vc4_plane_state(old_plane_state);
+ load_state->membus_load -= vc4_plane_state->membus_load;
+ load_state->hvs_load -= vc4_plane_state->hvs_load;
+ }
+
+ if (new_plane_state->fb && new_plane_state->crtc) {
+ vc4_plane_state = to_vc4_plane_state(new_plane_state);
+ load_state->membus_load += vc4_plane_state->membus_load;
+ load_state->hvs_load += vc4_plane_state->hvs_load;
+ }
+ }
+
+ /* Don't check the load when the tracker is disabled. */
+ if (!vc4->load_tracker_enabled)
+ return 0;
+
+ /* The absolute limit is 2Gbyte/sec, but let's take a margin to let
+ * the system work when other blocks are accessing the memory.
+ */
+ if (load_state->membus_load > SZ_1G + SZ_512M)
+ return -ENOSPC;
+
+ /* HVS clock is supposed to run @ 250Mhz, let's take a margin and
+ * consider the maximum number of cycles is 240M.
+ */
+ if (load_state->hvs_load > 240000000ULL)
+ return -ENOSPC;
+
+ return 0;
+}
+
+static struct drm_private_state *
+vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
+{
+ struct vc4_load_tracker_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+ return &state->base;
+}
+
+static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct vc4_load_tracker_state *load_state;
+
+ load_state = to_vc4_load_tracker_state(state);
+ kfree(load_state);
+}
+
+static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
+ .atomic_duplicate_state = vc4_load_tracker_duplicate_state,
+ .atomic_destroy_state = vc4_load_tracker_destroy_state,
+};
+
static int
vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
{
@@ -394,7 +495,11 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
if (ret < 0)
return ret;
- return drm_atomic_helper_check(dev, state);
+ ret = drm_atomic_helper_check(dev, state);
+ if (ret)
+ return ret;
+
+ return vc4_load_tracker_atomic_check(state);
}
static const struct drm_mode_config_funcs vc4_mode_funcs = {
@@ -407,13 +512,20 @@ int vc4_kms_load(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_ctm_state *ctm_state;
+ struct vc4_load_tracker_state *load_state;
int ret;
+ /* Start with the load tracker enabled. Can be disabled through the
+ * debugfs load_tracker file.
+ */
+ vc4->load_tracker_enabled = true;
+
sema_init(&vc4->async_modeset, 1);
/* Set support for vblank irq fast disable, before drm_vblank_init() */
dev->vblank_disable_immediate = true;
+ dev->irq_enabled = true;
ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
if (ret < 0) {
dev_err(dev->dev, "failed to initialize vblank\n");
@@ -436,6 +548,15 @@ int vc4_kms_load(struct drm_device *dev)
drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base,
&vc4_ctm_state_funcs);
+ load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
+ if (!load_state) {
+ drm_atomic_private_obj_fini(&vc4->ctm_manager);
+ return -ENOMEM;
+ }
+
+ drm_atomic_private_obj_init(dev, &vc4->load_tracker, &load_state->base,
+ &vc4_load_tracker_state_funcs);
+
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c
index 495150415020..f4aa75efd16b 100644
--- a/drivers/gpu/drm/vc4/vc4_perfmon.c
+++ b/drivers/gpu/drm/vc4/vc4_perfmon.c
@@ -100,12 +100,18 @@ void vc4_perfmon_close_file(struct vc4_file *vc4file)
int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file = file_priv->driver_priv;
struct drm_vc4_perfmon_create *req = data;
struct vc4_perfmon *perfmon;
unsigned int i;
int ret;
+ if (!vc4->v3d) {
+ DRM_DEBUG("Creating perfmon no VC4 V3D probed\n");
+ return -ENODEV;
+ }
+
/* Number of monitored counters cannot exceed HW limits. */
if (req->ncounters > DRM_VC4_MAX_PERF_COUNTERS ||
!req->ncounters)
@@ -146,10 +152,16 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file = file_priv->driver_priv;
struct drm_vc4_perfmon_destroy *req = data;
struct vc4_perfmon *perfmon;
+ if (!vc4->v3d) {
+ DRM_DEBUG("Destroying perfmon no VC4 V3D probed\n");
+ return -ENODEV;
+ }
+
mutex_lock(&vc4file->perfmon.lock);
perfmon = idr_remove(&vc4file->perfmon.idr, req->id);
mutex_unlock(&vc4file->perfmon.lock);
@@ -164,11 +176,17 @@ int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file = file_priv->driver_priv;
struct drm_vc4_perfmon_get_values *req = data;
struct vc4_perfmon *perfmon;
int ret;
+ if (!vc4->v3d) {
+ DRM_DEBUG("Getting perfmon no VC4 V3D probed\n");
+ return -ENODEV;
+ }
+
mutex_lock(&vc4file->perfmon.lock);
perfmon = idr_find(&vc4file->perfmon.idr, req->id);
vc4_perfmon_get(perfmon);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index d098337c10e9..4d918d3e4858 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -488,6 +488,61 @@ static void vc4_write_scaling_parameters(struct drm_plane_state *state,
}
}
+static void vc4_plane_calc_load(struct drm_plane_state *state)
+{
+ unsigned int hvs_load_shift, vrefresh, i;
+ struct drm_framebuffer *fb = state->fb;
+ struct vc4_plane_state *vc4_state;
+ struct drm_crtc_state *crtc_state;
+ unsigned int vscale_factor;
+
+ vc4_state = to_vc4_plane_state(state);
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+ state->crtc);
+ vrefresh = drm_mode_vrefresh(&crtc_state->adjusted_mode);
+
+ /* The HVS is able to process 2 pixels/cycle when scaling the source,
+ * 4 pixels/cycle otherwise.
+ * Alpha blending step seems to be pipelined and it's always operating
+ * at 4 pixels/cycle, so the limiting aspect here seems to be the
+ * scaler block.
+ * HVS load is expressed in clk-cycles/sec (AKA Hz).
+ */
+ if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[1] != VC4_SCALING_NONE)
+ hvs_load_shift = 1;
+ else
+ hvs_load_shift = 2;
+
+ vc4_state->membus_load = 0;
+ vc4_state->hvs_load = 0;
+ for (i = 0; i < fb->format->num_planes; i++) {
+ /* Even if the bandwidth/plane required for a single frame is
+ *
+ * vc4_state->src_w[i] * vc4_state->src_h[i] * cpp * vrefresh
+ *
+ * when downscaling, we have to read more pixels per line in
+ * the time frame reserved for a single line, so the bandwidth
+ * demand can be punctually higher. To account for that, we
+ * calculate the down-scaling factor and multiply the plane
+ * load by this number. We're likely over-estimating the read
+ * demand, but that's better than under-estimating it.
+ */
+ vscale_factor = DIV_ROUND_UP(vc4_state->src_h[i],
+ vc4_state->crtc_h);
+ vc4_state->membus_load += vc4_state->src_w[i] *
+ vc4_state->src_h[i] * vscale_factor *
+ fb->format->cpp[i];
+ vc4_state->hvs_load += vc4_state->crtc_h * vc4_state->crtc_w;
+ }
+
+ vc4_state->hvs_load *= vrefresh;
+ vc4_state->hvs_load >>= hvs_load_shift;
+ vc4_state->membus_load *= vrefresh;
+}
+
static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
@@ -875,6 +930,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
*/
vc4_state->dlist_initialized = 1;
+ vc4_plane_calc_load(state);
+
return 0;
}
@@ -1082,7 +1139,7 @@ static int vc4_prepare_fb(struct drm_plane *plane,
bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
- fence = reservation_object_get_excl_rcu(bo->resv);
+ fence = reservation_object_get_excl_rcu(bo->base.base.resv);
drm_atomic_set_fence_for_plane(state, fence);
if (plane->state->fb == state->fb)
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 931088014272..c0c5fadaf7e3 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -212,11 +212,11 @@
#define PV_HACT_ACT 0x30
+#define SCALER_CHANNELS_COUNT 3
+
#define SCALER_DISPCTRL 0x00000000
/* Global register for clock gating the HVS */
# define SCALER_DISPCTRL_ENABLE BIT(31)
-# define SCALER_DISPCTRL_DSP2EISLUR BIT(15)
-# define SCALER_DISPCTRL_DSP1EISLUR BIT(14)
# define SCALER_DISPCTRL_DSP3_MUX_MASK VC4_MASK(19, 18)
# define SCALER_DISPCTRL_DSP3_MUX_SHIFT 18
@@ -224,45 +224,25 @@
* SCALER_DISPSTAT_IRQDISP0. Note that short frame contributions are
* always enabled.
*/
-# define SCALER_DISPCTRL_DSP0EISLUR BIT(13)
-# define SCALER_DISPCTRL_DSP2EIEOLN BIT(12)
-# define SCALER_DISPCTRL_DSP2EIEOF BIT(11)
-# define SCALER_DISPCTRL_DSP1EIEOLN BIT(10)
-# define SCALER_DISPCTRL_DSP1EIEOF BIT(9)
+# define SCALER_DISPCTRL_DSPEISLUR(x) BIT(13 + (x))
/* Enables Display 0 end-of-line-N contribution to
* SCALER_DISPSTAT_IRQDISP0
*/
-# define SCALER_DISPCTRL_DSP0EIEOLN BIT(8)
+# define SCALER_DISPCTRL_DSPEIEOLN(x) BIT(8 + ((x) * 2))
/* Enables Display 0 EOF contribution to SCALER_DISPSTAT_IRQDISP0 */
-# define SCALER_DISPCTRL_DSP0EIEOF BIT(7)
+# define SCALER_DISPCTRL_DSPEIEOF(x) BIT(7 + ((x) * 2))
# define SCALER_DISPCTRL_SLVRDEIRQ BIT(6)
# define SCALER_DISPCTRL_SLVWREIRQ BIT(5)
# define SCALER_DISPCTRL_DMAEIRQ BIT(4)
-# define SCALER_DISPCTRL_DISP2EIRQ BIT(3)
-# define SCALER_DISPCTRL_DISP1EIRQ BIT(2)
/* Enables interrupt generation on the enabled EOF/EOLN/EISLUR
* bits and short frames..
*/
-# define SCALER_DISPCTRL_DISP0EIRQ BIT(1)
+# define SCALER_DISPCTRL_DISPEIRQ(x) BIT(1 + (x))
/* Enables interrupt generation on scaler profiler interrupt. */
# define SCALER_DISPCTRL_SCLEIRQ BIT(0)
#define SCALER_DISPSTAT 0x00000004
-# define SCALER_DISPSTAT_COBLOW2 BIT(29)
-# define SCALER_DISPSTAT_EOLN2 BIT(28)
-# define SCALER_DISPSTAT_ESFRAME2 BIT(27)
-# define SCALER_DISPSTAT_ESLINE2 BIT(26)
-# define SCALER_DISPSTAT_EUFLOW2 BIT(25)
-# define SCALER_DISPSTAT_EOF2 BIT(24)
-
-# define SCALER_DISPSTAT_COBLOW1 BIT(21)
-# define SCALER_DISPSTAT_EOLN1 BIT(20)
-# define SCALER_DISPSTAT_ESFRAME1 BIT(19)
-# define SCALER_DISPSTAT_ESLINE1 BIT(18)
-# define SCALER_DISPSTAT_EUFLOW1 BIT(17)
-# define SCALER_DISPSTAT_EOF1 BIT(16)
-
# define SCALER_DISPSTAT_RESP_MASK VC4_MASK(15, 14)
# define SCALER_DISPSTAT_RESP_SHIFT 14
# define SCALER_DISPSTAT_RESP_OKAY 0
@@ -270,23 +250,26 @@
# define SCALER_DISPSTAT_RESP_SLVERR 2
# define SCALER_DISPSTAT_RESP_DECERR 3
-# define SCALER_DISPSTAT_COBLOW0 BIT(13)
+# define SCALER_DISPSTAT_COBLOW(x) BIT(13 + ((x) * 8))
/* Set when the DISPEOLN line is done compositing. */
-# define SCALER_DISPSTAT_EOLN0 BIT(12)
+# define SCALER_DISPSTAT_EOLN(x) BIT(12 + ((x) * 8))
/* Set when VSTART is seen but there are still pixels in the current
* output line.
*/
-# define SCALER_DISPSTAT_ESFRAME0 BIT(11)
+# define SCALER_DISPSTAT_ESFRAME(x) BIT(11 + ((x) * 8))
/* Set when HSTART is seen but there are still pixels in the current
* output line.
*/
-# define SCALER_DISPSTAT_ESLINE0 BIT(10)
+# define SCALER_DISPSTAT_ESLINE(x) BIT(10 + ((x) * 8))
/* Set when the the downstream tries to read from the display FIFO
* while it's empty.
*/
-# define SCALER_DISPSTAT_EUFLOW0 BIT(9)
+# define SCALER_DISPSTAT_EUFLOW(x) BIT(9 + ((x) * 8))
/* Set when the display mode changes from RUN to EOF */
-# define SCALER_DISPSTAT_EOF0 BIT(8)
+# define SCALER_DISPSTAT_EOF(x) BIT(8 + ((x) * 8))
+
+# define SCALER_DISPSTAT_IRQMASK(x) VC4_MASK(13 + ((x) * 8), \
+ 8 + ((x) * 8))
/* Set on AXI invalid DMA ID error. */
# define SCALER_DISPSTAT_DMA_ERROR BIT(7)
@@ -298,12 +281,10 @@
* SCALER_DISPSTAT_RESP_ERROR is not SCALER_DISPSTAT_RESP_OKAY.
*/
# define SCALER_DISPSTAT_IRQDMA BIT(4)
-# define SCALER_DISPSTAT_IRQDISP2 BIT(3)
-# define SCALER_DISPSTAT_IRQDISP1 BIT(2)
/* Set when any of the EOF/EOLN/ESFRAME/ESLINE bits are set and their
* corresponding interrupt bit is enabled in DISPCTRL.
*/
-# define SCALER_DISPSTAT_IRQDISP0 BIT(1)
+# define SCALER_DISPSTAT_IRQDISP(x) BIT(1 + (x))
/* On read, the profiler interrupt. On write, clear *all* interrupt bits. */
# define SCALER_DISPSTAT_IRQSCL BIT(0)
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 273984f71ae2..3c918eeaf56e 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -148,6 +148,12 @@ static void emit_tile(struct vc4_exec_info *exec,
}
if (setup->zs_read) {
+ if (setup->color_read) {
+ /* Exec previous load. */
+ vc4_tile_coordinates(setup, x, y);
+ vc4_store_before_load(setup);
+ }
+
if (args->zs_read.flags &
VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
rcl_u8(setup, VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER);
@@ -156,12 +162,6 @@ static void emit_tile(struct vc4_exec_info *exec,
&args->zs_read, x, y) |
VC4_LOADSTORE_FULL_RES_DISABLE_COLOR);
} else {
- if (setup->color_read) {
- /* Exec previous load. */
- vc4_tile_coordinates(setup, x, y);
- vc4_store_before_load(setup);
- }
-
rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
rcl_u16(setup, args->zs_read.bits);
rcl_u32(setup, setup->zs_read->paddr +
@@ -291,16 +291,15 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
}
}
if (setup->zs_read) {
+ if (setup->color_read) {
+ loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE;
+ loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
+ }
+
if (args->zs_read.flags &
VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
loop_body_size += VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE;
} else {
- if (setup->color_read &&
- !(args->color_read.flags &
- VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES)) {
- loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE;
- loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
- }
loop_body_size += VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE;
}
}
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index aa279b5b0de7..c8b89a78f9f4 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -148,6 +148,7 @@ struct vc4_txp {
struct drm_writeback_connector connector;
void __iomem *regs;
+ struct debugfs_regset32 regset;
};
static inline struct vc4_txp *encoder_to_vc4_txp(struct drm_encoder *encoder)
@@ -160,40 +161,14 @@ static inline struct vc4_txp *connector_to_vc4_txp(struct drm_connector *conn)
return container_of(conn, struct vc4_txp, connector.base);
}
-#define TXP_REG(reg) { reg, #reg }
-static const struct {
- u32 reg;
- const char *name;
-} txp_regs[] = {
- TXP_REG(TXP_DST_PTR),
- TXP_REG(TXP_DST_PITCH),
- TXP_REG(TXP_DIM),
- TXP_REG(TXP_DST_CTRL),
- TXP_REG(TXP_PROGRESS),
+static const struct debugfs_reg32 txp_regs[] = {
+ VC4_REG32(TXP_DST_PTR),
+ VC4_REG32(TXP_DST_PITCH),
+ VC4_REG32(TXP_DIM),
+ VC4_REG32(TXP_DST_CTRL),
+ VC4_REG32(TXP_PROGRESS),
};
-#ifdef CONFIG_DEBUG_FS
-int vc4_txp_debugfs_regs(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_txp *txp = vc4->txp;
- int i;
-
- if (!txp)
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(txp_regs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- txp_regs[i].name, txp_regs[i].reg,
- TXP_READ(txp_regs[i].reg));
- }
-
- return 0;
-}
-#endif
-
static int vc4_txp_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -249,7 +224,6 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
struct drm_connector_state *conn_state)
{
struct drm_crtc_state *crtc_state;
- struct drm_gem_cma_object *gem;
struct drm_framebuffer *fb;
int i;
@@ -275,8 +249,6 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
if (i == ARRAY_SIZE(drm_fmts))
return -EINVAL;
- gem = drm_fb_cma_get_gem_obj(fb, 0);
-
/* Pitch must be aligned on 16 bytes. */
if (fb->pitches[0] & GENMASK(3, 0))
return -EINVAL;
@@ -327,7 +299,7 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
TXP_WRITE(TXP_DST_CTRL, ctrl);
- drm_writeback_queue_job(&txp->connector, conn_state->writeback_job);
+ drm_writeback_queue_job(&txp->connector, conn_state);
}
static const struct drm_connector_helper_funcs vc4_txp_connector_helper_funcs = {
@@ -413,6 +385,9 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
txp->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(txp->regs))
return PTR_ERR(txp->regs);
+ txp->regset.base = txp->regs;
+ txp->regset.regs = txp_regs;
+ txp->regset.nregs = ARRAY_SIZE(txp_regs);
drm_connector_helper_add(&txp->connector.base,
&vc4_txp_connector_helper_funcs);
@@ -431,6 +406,8 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
dev_set_drvdata(dev, txp);
vc4->txp = txp;
+ vc4_debugfs_add_regset32(drm, "txp_regs", &txp->regset);
+
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index e47e29426078..a4b6859e3af6 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -22,129 +22,145 @@
#include "vc4_drv.h"
#include "vc4_regs.h"
-#ifdef CONFIG_DEBUG_FS
-#define REGDEF(reg) { reg, #reg }
-static const struct {
- uint32_t reg;
- const char *name;
-} vc4_reg_defs[] = {
- REGDEF(V3D_IDENT0),
- REGDEF(V3D_IDENT1),
- REGDEF(V3D_IDENT2),
- REGDEF(V3D_SCRATCH),
- REGDEF(V3D_L2CACTL),
- REGDEF(V3D_SLCACTL),
- REGDEF(V3D_INTCTL),
- REGDEF(V3D_INTENA),
- REGDEF(V3D_INTDIS),
- REGDEF(V3D_CT0CS),
- REGDEF(V3D_CT1CS),
- REGDEF(V3D_CT0EA),
- REGDEF(V3D_CT1EA),
- REGDEF(V3D_CT0CA),
- REGDEF(V3D_CT1CA),
- REGDEF(V3D_CT00RA0),
- REGDEF(V3D_CT01RA0),
- REGDEF(V3D_CT0LC),
- REGDEF(V3D_CT1LC),
- REGDEF(V3D_CT0PC),
- REGDEF(V3D_CT1PC),
- REGDEF(V3D_PCS),
- REGDEF(V3D_BFC),
- REGDEF(V3D_RFC),
- REGDEF(V3D_BPCA),
- REGDEF(V3D_BPCS),
- REGDEF(V3D_BPOA),
- REGDEF(V3D_BPOS),
- REGDEF(V3D_BXCF),
- REGDEF(V3D_SQRSV0),
- REGDEF(V3D_SQRSV1),
- REGDEF(V3D_SQCNTL),
- REGDEF(V3D_SRQPC),
- REGDEF(V3D_SRQUA),
- REGDEF(V3D_SRQUL),
- REGDEF(V3D_SRQCS),
- REGDEF(V3D_VPACNTL),
- REGDEF(V3D_VPMBASE),
- REGDEF(V3D_PCTRC),
- REGDEF(V3D_PCTRE),
- REGDEF(V3D_PCTR(0)),
- REGDEF(V3D_PCTRS(0)),
- REGDEF(V3D_PCTR(1)),
- REGDEF(V3D_PCTRS(1)),
- REGDEF(V3D_PCTR(2)),
- REGDEF(V3D_PCTRS(2)),
- REGDEF(V3D_PCTR(3)),
- REGDEF(V3D_PCTRS(3)),
- REGDEF(V3D_PCTR(4)),
- REGDEF(V3D_PCTRS(4)),
- REGDEF(V3D_PCTR(5)),
- REGDEF(V3D_PCTRS(5)),
- REGDEF(V3D_PCTR(6)),
- REGDEF(V3D_PCTRS(6)),
- REGDEF(V3D_PCTR(7)),
- REGDEF(V3D_PCTRS(7)),
- REGDEF(V3D_PCTR(8)),
- REGDEF(V3D_PCTRS(8)),
- REGDEF(V3D_PCTR(9)),
- REGDEF(V3D_PCTRS(9)),
- REGDEF(V3D_PCTR(10)),
- REGDEF(V3D_PCTRS(10)),
- REGDEF(V3D_PCTR(11)),
- REGDEF(V3D_PCTRS(11)),
- REGDEF(V3D_PCTR(12)),
- REGDEF(V3D_PCTRS(12)),
- REGDEF(V3D_PCTR(13)),
- REGDEF(V3D_PCTRS(13)),
- REGDEF(V3D_PCTR(14)),
- REGDEF(V3D_PCTRS(14)),
- REGDEF(V3D_PCTR(15)),
- REGDEF(V3D_PCTRS(15)),
- REGDEF(V3D_DBGE),
- REGDEF(V3D_FDBGO),
- REGDEF(V3D_FDBGB),
- REGDEF(V3D_FDBGR),
- REGDEF(V3D_FDBGS),
- REGDEF(V3D_ERRSTAT),
+static const struct debugfs_reg32 v3d_regs[] = {
+ VC4_REG32(V3D_IDENT0),
+ VC4_REG32(V3D_IDENT1),
+ VC4_REG32(V3D_IDENT2),
+ VC4_REG32(V3D_SCRATCH),
+ VC4_REG32(V3D_L2CACTL),
+ VC4_REG32(V3D_SLCACTL),
+ VC4_REG32(V3D_INTCTL),
+ VC4_REG32(V3D_INTENA),
+ VC4_REG32(V3D_INTDIS),
+ VC4_REG32(V3D_CT0CS),
+ VC4_REG32(V3D_CT1CS),
+ VC4_REG32(V3D_CT0EA),
+ VC4_REG32(V3D_CT1EA),
+ VC4_REG32(V3D_CT0CA),
+ VC4_REG32(V3D_CT1CA),
+ VC4_REG32(V3D_CT00RA0),
+ VC4_REG32(V3D_CT01RA0),
+ VC4_REG32(V3D_CT0LC),
+ VC4_REG32(V3D_CT1LC),
+ VC4_REG32(V3D_CT0PC),
+ VC4_REG32(V3D_CT1PC),
+ VC4_REG32(V3D_PCS),
+ VC4_REG32(V3D_BFC),
+ VC4_REG32(V3D_RFC),
+ VC4_REG32(V3D_BPCA),
+ VC4_REG32(V3D_BPCS),
+ VC4_REG32(V3D_BPOA),
+ VC4_REG32(V3D_BPOS),
+ VC4_REG32(V3D_BXCF),
+ VC4_REG32(V3D_SQRSV0),
+ VC4_REG32(V3D_SQRSV1),
+ VC4_REG32(V3D_SQCNTL),
+ VC4_REG32(V3D_SRQPC),
+ VC4_REG32(V3D_SRQUA),
+ VC4_REG32(V3D_SRQUL),
+ VC4_REG32(V3D_SRQCS),
+ VC4_REG32(V3D_VPACNTL),
+ VC4_REG32(V3D_VPMBASE),
+ VC4_REG32(V3D_PCTRC),
+ VC4_REG32(V3D_PCTRE),
+ VC4_REG32(V3D_PCTR(0)),
+ VC4_REG32(V3D_PCTRS(0)),
+ VC4_REG32(V3D_PCTR(1)),
+ VC4_REG32(V3D_PCTRS(1)),
+ VC4_REG32(V3D_PCTR(2)),
+ VC4_REG32(V3D_PCTRS(2)),
+ VC4_REG32(V3D_PCTR(3)),
+ VC4_REG32(V3D_PCTRS(3)),
+ VC4_REG32(V3D_PCTR(4)),
+ VC4_REG32(V3D_PCTRS(4)),
+ VC4_REG32(V3D_PCTR(5)),
+ VC4_REG32(V3D_PCTRS(5)),
+ VC4_REG32(V3D_PCTR(6)),
+ VC4_REG32(V3D_PCTRS(6)),
+ VC4_REG32(V3D_PCTR(7)),
+ VC4_REG32(V3D_PCTRS(7)),
+ VC4_REG32(V3D_PCTR(8)),
+ VC4_REG32(V3D_PCTRS(8)),
+ VC4_REG32(V3D_PCTR(9)),
+ VC4_REG32(V3D_PCTRS(9)),
+ VC4_REG32(V3D_PCTR(10)),
+ VC4_REG32(V3D_PCTRS(10)),
+ VC4_REG32(V3D_PCTR(11)),
+ VC4_REG32(V3D_PCTRS(11)),
+ VC4_REG32(V3D_PCTR(12)),
+ VC4_REG32(V3D_PCTRS(12)),
+ VC4_REG32(V3D_PCTR(13)),
+ VC4_REG32(V3D_PCTRS(13)),
+ VC4_REG32(V3D_PCTR(14)),
+ VC4_REG32(V3D_PCTRS(14)),
+ VC4_REG32(V3D_PCTR(15)),
+ VC4_REG32(V3D_PCTRS(15)),
+ VC4_REG32(V3D_DBGE),
+ VC4_REG32(V3D_FDBGO),
+ VC4_REG32(V3D_FDBGB),
+ VC4_REG32(V3D_FDBGR),
+ VC4_REG32(V3D_FDBGS),
+ VC4_REG32(V3D_ERRSTAT),
};
-int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused)
+static int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vc4_reg_defs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- vc4_reg_defs[i].name, vc4_reg_defs[i].reg,
- V3D_READ(vc4_reg_defs[i].reg));
+ int ret = vc4_v3d_pm_get(vc4);
+
+ if (ret == 0) {
+ uint32_t ident1 = V3D_READ(V3D_IDENT1);
+ uint32_t nslc = VC4_GET_FIELD(ident1, V3D_IDENT1_NSLC);
+ uint32_t tups = VC4_GET_FIELD(ident1, V3D_IDENT1_TUPS);
+ uint32_t qups = VC4_GET_FIELD(ident1, V3D_IDENT1_QUPS);
+
+ seq_printf(m, "Revision: %d\n",
+ VC4_GET_FIELD(ident1, V3D_IDENT1_REV));
+ seq_printf(m, "Slices: %d\n", nslc);
+ seq_printf(m, "TMUs: %d\n", nslc * tups);
+ seq_printf(m, "QPUs: %d\n", nslc * qups);
+ seq_printf(m, "Semaphores: %d\n",
+ VC4_GET_FIELD(ident1, V3D_IDENT1_NSEM));
+ vc4_v3d_pm_put(vc4);
}
return 0;
}
-int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
+/**
+ * Wraps pm_runtime_get_sync() in a refcount, so that we can reliably
+ * get the pm_runtime refcount to 0 in vc4_reset().
+ */
+int
+vc4_v3d_pm_get(struct vc4_dev *vc4)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- uint32_t ident1 = V3D_READ(V3D_IDENT1);
- uint32_t nslc = VC4_GET_FIELD(ident1, V3D_IDENT1_NSLC);
- uint32_t tups = VC4_GET_FIELD(ident1, V3D_IDENT1_TUPS);
- uint32_t qups = VC4_GET_FIELD(ident1, V3D_IDENT1_QUPS);
-
- seq_printf(m, "Revision: %d\n",
- VC4_GET_FIELD(ident1, V3D_IDENT1_REV));
- seq_printf(m, "Slices: %d\n", nslc);
- seq_printf(m, "TMUs: %d\n", nslc * tups);
- seq_printf(m, "QPUs: %d\n", nslc * qups);
- seq_printf(m, "Semaphores: %d\n",
- VC4_GET_FIELD(ident1, V3D_IDENT1_NSEM));
+ mutex_lock(&vc4->power_lock);
+ if (vc4->power_refcount++ == 0) {
+ int ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+
+ if (ret < 0) {
+ vc4->power_refcount--;
+ mutex_unlock(&vc4->power_lock);
+ return ret;
+ }
+ }
+ mutex_unlock(&vc4->power_lock);
return 0;
}
-#endif /* CONFIG_DEBUG_FS */
+
+void
+vc4_v3d_pm_put(struct vc4_dev *vc4)
+{
+ mutex_lock(&vc4->power_lock);
+ if (--vc4->power_refcount == 0) {
+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
+ }
+ mutex_unlock(&vc4->power_lock);
+}
static void vc4_v3d_init_hw(struct drm_device *dev)
{
@@ -354,6 +370,9 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
v3d->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(v3d->regs))
return PTR_ERR(v3d->regs);
+ v3d->regset.base = v3d->regs;
+ v3d->regset.regs = v3d_regs;
+ v3d->regset.nregs = ARRAY_SIZE(v3d_regs);
vc4->v3d = v3d;
v3d->vc4 = vc4;
@@ -409,6 +428,9 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
pm_runtime_set_autosuspend_delay(dev, 40); /* a little over 2 frames. */
pm_runtime_enable(dev);
+ vc4_debugfs_add_file(drm, "v3d_ident", vc4_v3d_debugfs_ident, NULL);
+ vc4_debugfs_add_regset32(drm, "v3d_regs", &v3d->regset);
+
return 0;
}
@@ -452,7 +474,7 @@ static int vc4_v3d_dev_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id vc4_v3d_dt_match[] = {
+const struct of_device_id vc4_v3d_dt_match[] = {
{ .compatible = "brcm,bcm2835-v3d" },
{ .compatible = "brcm,cygnus-v3d" },
{ .compatible = "brcm,vc4-v3d" },
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index 858c3a483229..0a27e48fab31 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -176,6 +176,8 @@ struct vc4_vec {
struct clk *clock;
const struct vc4_vec_tv_mode *tv_mode;
+
+ struct debugfs_regset32 regset;
};
#define VEC_READ(offset) readl(vec->regs + (offset))
@@ -223,59 +225,33 @@ struct vc4_vec_tv_mode {
void (*mode_set)(struct vc4_vec *vec);
};
-#define VEC_REG(reg) { reg, #reg }
-static const struct {
- u32 reg;
- const char *name;
-} vec_regs[] = {
- VEC_REG(VEC_WSE_CONTROL),
- VEC_REG(VEC_WSE_WSS_DATA),
- VEC_REG(VEC_WSE_VPS_DATA1),
- VEC_REG(VEC_WSE_VPS_CONTROL),
- VEC_REG(VEC_REVID),
- VEC_REG(VEC_CONFIG0),
- VEC_REG(VEC_SCHPH),
- VEC_REG(VEC_CLMP0_START),
- VEC_REG(VEC_CLMP0_END),
- VEC_REG(VEC_FREQ3_2),
- VEC_REG(VEC_FREQ1_0),
- VEC_REG(VEC_CONFIG1),
- VEC_REG(VEC_CONFIG2),
- VEC_REG(VEC_INTERRUPT_CONTROL),
- VEC_REG(VEC_INTERRUPT_STATUS),
- VEC_REG(VEC_FCW_SECAM_B),
- VEC_REG(VEC_SECAM_GAIN_VAL),
- VEC_REG(VEC_CONFIG3),
- VEC_REG(VEC_STATUS0),
- VEC_REG(VEC_MASK0),
- VEC_REG(VEC_CFG),
- VEC_REG(VEC_DAC_TEST),
- VEC_REG(VEC_DAC_CONFIG),
- VEC_REG(VEC_DAC_MISC),
+static const struct debugfs_reg32 vec_regs[] = {
+ VC4_REG32(VEC_WSE_CONTROL),
+ VC4_REG32(VEC_WSE_WSS_DATA),
+ VC4_REG32(VEC_WSE_VPS_DATA1),
+ VC4_REG32(VEC_WSE_VPS_CONTROL),
+ VC4_REG32(VEC_REVID),
+ VC4_REG32(VEC_CONFIG0),
+ VC4_REG32(VEC_SCHPH),
+ VC4_REG32(VEC_CLMP0_START),
+ VC4_REG32(VEC_CLMP0_END),
+ VC4_REG32(VEC_FREQ3_2),
+ VC4_REG32(VEC_FREQ1_0),
+ VC4_REG32(VEC_CONFIG1),
+ VC4_REG32(VEC_CONFIG2),
+ VC4_REG32(VEC_INTERRUPT_CONTROL),
+ VC4_REG32(VEC_INTERRUPT_STATUS),
+ VC4_REG32(VEC_FCW_SECAM_B),
+ VC4_REG32(VEC_SECAM_GAIN_VAL),
+ VC4_REG32(VEC_CONFIG3),
+ VC4_REG32(VEC_STATUS0),
+ VC4_REG32(VEC_MASK0),
+ VC4_REG32(VEC_CFG),
+ VC4_REG32(VEC_DAC_TEST),
+ VC4_REG32(VEC_DAC_CONFIG),
+ VC4_REG32(VEC_DAC_MISC),
};
-#ifdef CONFIG_DEBUG_FS
-int vc4_vec_debugfs_regs(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_vec *vec = vc4->vec;
- int i;
-
- if (!vec)
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(vec_regs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- vec_regs[i].name, vec_regs[i].reg,
- VEC_READ(vec_regs[i].reg));
- }
-
- return 0;
-}
-#endif
-
static void vc4_vec_ntsc_mode_set(struct vc4_vec *vec)
{
VEC_WRITE(VEC_CONFIG0, VEC_CONFIG0_NTSC_STD | VEC_CONFIG0_PDEN);
@@ -587,6 +563,9 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
vec->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(vec->regs))
return PTR_ERR(vec->regs);
+ vec->regset.base = vec->regs;
+ vec->regset.regs = vec_regs;
+ vec->regset.nregs = ARRAY_SIZE(vec_regs);
vec->clock = devm_clk_get(dev, NULL);
if (IS_ERR(vec->clock)) {
@@ -612,6 +591,8 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
vc4->vec = vec;
+ vc4_debugfs_add_regset32(drm, "vec_regs", &vec->regset);
+
return 0;
err_destroy_encoder:
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 8bf3a7c23ed3..062067438f1d 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -243,7 +243,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
if (NULL == vsg->pages)
return -ENOMEM;
ret = get_user_pages_fast((unsigned long)xfer->mem_addr,
- vsg->num_pages, vsg->direction == DMA_FROM_DEVICE,
+ vsg->num_pages,
+ vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
vsg->pages);
if (ret != vsg->num_pages) {
if (ret < 0)
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index 73dc99046c43..ed0fcda713c3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -28,6 +28,30 @@
#include "virtgpu_drv.h"
+static void virtio_add_bool(struct seq_file *m, const char *name,
+ bool value)
+{
+ seq_printf(m, "%-16s : %s\n", name, value ? "yes" : "no");
+}
+
+static void virtio_add_int(struct seq_file *m, const char *name,
+ int value)
+{
+ seq_printf(m, "%-16s : %d\n", name, value);
+}
+
+static int virtio_gpu_features(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
+
+ virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);
+ virtio_add_bool(m, "edid", vgdev->has_edid);
+ virtio_add_int(m, "cap sets", vgdev->num_capsets);
+ virtio_add_int(m, "scanouts", vgdev->num_scanouts);
+ return 0;
+}
+
static int
virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
{
@@ -41,7 +65,8 @@ virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
}
static struct drm_info_list virtio_gpu_debugfs_list[] = {
- { "irq_fence", virtio_gpu_debugfs_irq_info, 0, NULL },
+ { "virtio-gpu-features", virtio_gpu_features },
+ { "virtio-gpu-irq-fence", virtio_gpu_debugfs_irq_info, 0, NULL },
};
#define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list)
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 653ec7d0bf4d..86843a4d6102 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -385,5 +385,6 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
for (i = 0 ; i < vgdev->num_scanouts; ++i)
kfree(vgdev->outputs[i].edid);
+ drm_atomic_helper_shutdown(vgdev->ddev);
drm_mode_config_cleanup(vgdev->ddev);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index af92964b6889..c50868753132 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -209,8 +209,6 @@ static struct drm_driver driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
- .gem_prime_pin = virtgpu_gem_prime_pin,
- .gem_prime_unpin = virtgpu_gem_prime_unpin,
.gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
.gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
.gem_prime_vmap = virtgpu_gem_prime_vmap,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index d577cb76f5ad..b69ae10ca238 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -50,6 +50,23 @@
#define DRIVER_MINOR 1
#define DRIVER_PATCHLEVEL 0
+struct virtio_gpu_object_params {
+ uint32_t format;
+ uint32_t width;
+ uint32_t height;
+ unsigned long size;
+ bool dumb;
+ /* 3d */
+ bool virgl;
+ uint32_t target;
+ uint32_t bind;
+ uint32_t depth;
+ uint32_t array_size;
+ uint32_t last_level;
+ uint32_t nr_samples;
+ uint32_t flags;
+};
+
struct virtio_gpu_object {
struct drm_gem_object gem_base;
uint32_t hw_res_handle;
@@ -204,6 +221,9 @@ struct virtio_gpu_fpriv {
/* virtio_ioctl.c */
#define DRM_VIRTIO_NUM_IOCTLS 10
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
+int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
+ struct list_head *head);
+void virtio_gpu_unref_list(struct list_head *head);
/* virtio_kms.c */
int virtio_gpu_init(struct drm_device *dev);
@@ -217,16 +237,17 @@ int virtio_gpu_gem_init(struct virtio_gpu_device *vgdev);
void virtio_gpu_gem_fini(struct virtio_gpu_device *vgdev);
int virtio_gpu_gem_create(struct drm_file *file,
struct drm_device *dev,
- uint64_t size,
+ struct virtio_gpu_object_params *params,
struct drm_gem_object **obj_p,
uint32_t *handle_p);
int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file);
void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file);
-struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
- size_t size, bool kernel,
- bool pinned);
+struct virtio_gpu_object*
+virtio_gpu_alloc_object(struct drm_device *dev,
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_fence *fence);
int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -243,9 +264,8 @@ int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
- uint32_t format,
- uint32_t width,
- uint32_t height);
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
uint32_t resource_id);
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
@@ -304,7 +324,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
- struct virtio_gpu_resource_create_3d *rc_3d);
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_fence *fence);
void virtio_gpu_ctrl_ack(struct virtqueue *vq);
void virtio_gpu_cursor_ack(struct virtqueue *vq);
void virtio_gpu_fence_ack(struct virtqueue *vq);
@@ -332,6 +353,7 @@ void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev);
int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
/* virtio_gpu_fence.c */
+bool virtio_fence_signaled(struct dma_fence *f);
struct virtio_gpu_fence *virtio_gpu_fence_alloc(
struct virtio_gpu_device *vgdev);
int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
@@ -342,8 +364,9 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
/* virtio_gpu_object */
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
- unsigned long size, bool kernel, bool pinned,
- struct virtio_gpu_object **bo_ptr);
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object **bo_ptr,
+ struct virtio_gpu_fence *fence);
void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo);
int virtio_gpu_object_kmap(struct virtio_gpu_object *bo);
int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
@@ -352,8 +375,6 @@ void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo);
int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
/* virtgpu_prime.c */
-int virtgpu_gem_prime_pin(struct drm_gem_object *obj);
-void virtgpu_gem_prime_unpin(struct drm_gem_object *obj);
struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 21bd4c4a32d1..87d1966192f4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -36,7 +36,7 @@ static const char *virtio_get_timeline_name(struct dma_fence *f)
return "controlq";
}
-static bool virtio_signaled(struct dma_fence *f)
+bool virtio_fence_signaled(struct dma_fence *f)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
@@ -62,7 +62,7 @@ static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
static const struct dma_fence_ops virtio_fence_ops = {
.get_driver_name = virtio_get_driver_name,
.get_timeline_name = virtio_get_timeline_name,
- .signaled = virtio_signaled,
+ .signaled = virtio_fence_signaled,
.fence_value_str = virtio_fence_value_str,
.timeline_value_str = virtio_timeline_value_str,
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index f06586393974..1e49e08dd545 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -34,15 +34,16 @@ void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj)
virtio_gpu_object_unref(&obj);
}
-struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
- size_t size, bool kernel,
- bool pinned)
+struct virtio_gpu_object*
+virtio_gpu_alloc_object(struct drm_device *dev,
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_object *obj;
int ret;
- ret = virtio_gpu_object_create(vgdev, size, kernel, pinned, &obj);
+ ret = virtio_gpu_object_create(vgdev, params, &obj, fence);
if (ret)
return ERR_PTR(ret);
@@ -51,7 +52,7 @@ struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
int virtio_gpu_gem_create(struct drm_file *file,
struct drm_device *dev,
- uint64_t size,
+ struct virtio_gpu_object_params *params,
struct drm_gem_object **obj_p,
uint32_t *handle_p)
{
@@ -59,7 +60,7 @@ int virtio_gpu_gem_create(struct drm_file *file,
int ret;
u32 handle;
- obj = virtio_gpu_alloc_object(dev, size, false, false);
+ obj = virtio_gpu_alloc_object(dev, params, NULL);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -82,12 +83,10 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_gem_object *gobj;
- struct virtio_gpu_object *obj;
+ struct virtio_gpu_object_params params = { 0 };
int ret;
uint32_t pitch;
- uint32_t format;
if (args->bpp != 32)
return -EINVAL;
@@ -96,22 +95,16 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
args->size = pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
- ret = virtio_gpu_gem_create(file_priv, dev, args->size, &gobj,
+ params.format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
+ params.width = args->width;
+ params.height = args->height;
+ params.size = args->size;
+ params.dumb = true;
+ ret = virtio_gpu_gem_create(file_priv, dev, &params, &gobj,
&args->handle);
if (ret)
goto fail;
- format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
- obj = gem_to_virtio_gpu_obj(gobj);
- virtio_gpu_cmd_create_resource(vgdev, obj, format,
- args->width, args->height);
-
- /* attach the object to the resource */
- ret = virtio_gpu_object_attach(vgdev, obj, NULL);
- if (ret)
- goto fail;
-
- obj->dumb = true;
args->pitch = pitch;
return ret;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 14ce8188c052..949a264985fc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -54,8 +54,8 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
&virtio_gpu_map->offset);
}
-static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
- struct list_head *head)
+int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
+ struct list_head *head)
{
struct ttm_operation_ctx ctx = { false, false };
struct ttm_validate_buffer *buf;
@@ -79,7 +79,7 @@ static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
return 0;
}
-static void virtio_gpu_unref_list(struct list_head *head)
+void virtio_gpu_unref_list(struct list_head *head)
{
struct ttm_validate_buffer *buf;
struct ttm_buffer_object *bo;
@@ -275,16 +275,12 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_resource_create *rc = data;
+ struct virtio_gpu_fence *fence;
int ret;
struct virtio_gpu_object *qobj;
struct drm_gem_object *obj;
uint32_t handle = 0;
- uint32_t size;
- struct list_head validate_list;
- struct ttm_validate_buffer mainbuf;
- struct virtio_gpu_fence *fence = NULL;
- struct ww_acquire_ctx ticket;
- struct virtio_gpu_resource_create_3d rc_3d;
+ struct virtio_gpu_object_params params = { 0 };
if (vgdev->has_virgl_3d == false) {
if (rc->depth > 1)
@@ -299,94 +295,43 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- INIT_LIST_HEAD(&validate_list);
- memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
-
- size = rc->size;
-
+ params.format = rc->format;
+ params.width = rc->width;
+ params.height = rc->height;
+ params.size = rc->size;
+ if (vgdev->has_virgl_3d) {
+ params.virgl = true;
+ params.target = rc->target;
+ params.bind = rc->bind;
+ params.depth = rc->depth;
+ params.array_size = rc->array_size;
+ params.last_level = rc->last_level;
+ params.nr_samples = rc->nr_samples;
+ params.flags = rc->flags;
+ }
/* allocate a single page size object */
- if (size == 0)
- size = PAGE_SIZE;
+ if (params.size == 0)
+ params.size = PAGE_SIZE;
- qobj = virtio_gpu_alloc_object(dev, size, false, false);
+ fence = virtio_gpu_fence_alloc(vgdev);
+ if (!fence)
+ return -ENOMEM;
+ qobj = virtio_gpu_alloc_object(dev, &params, fence);
+ dma_fence_put(&fence->f);
if (IS_ERR(qobj))
return PTR_ERR(qobj);
obj = &qobj->gem_base;
- if (!vgdev->has_virgl_3d) {
- virtio_gpu_cmd_create_resource(vgdev, qobj, rc->format,
- rc->width, rc->height);
-
- ret = virtio_gpu_object_attach(vgdev, qobj, NULL);
- } else {
- /* use a gem reference since unref list undoes them */
- drm_gem_object_get(&qobj->gem_base);
- mainbuf.bo = &qobj->tbo;
- list_add(&mainbuf.head, &validate_list);
-
- ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
- if (ret) {
- DRM_DEBUG("failed to validate\n");
- goto fail_unref;
- }
-
- rc_3d.resource_id = cpu_to_le32(qobj->hw_res_handle);
- rc_3d.target = cpu_to_le32(rc->target);
- rc_3d.format = cpu_to_le32(rc->format);
- rc_3d.bind = cpu_to_le32(rc->bind);
- rc_3d.width = cpu_to_le32(rc->width);
- rc_3d.height = cpu_to_le32(rc->height);
- rc_3d.depth = cpu_to_le32(rc->depth);
- rc_3d.array_size = cpu_to_le32(rc->array_size);
- rc_3d.last_level = cpu_to_le32(rc->last_level);
- rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
- rc_3d.flags = cpu_to_le32(rc->flags);
-
- fence = virtio_gpu_fence_alloc(vgdev);
- if (!fence) {
- ret = -ENOMEM;
- goto fail_backoff;
- }
-
- virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d);
- ret = virtio_gpu_object_attach(vgdev, qobj, fence);
- if (ret) {
- dma_fence_put(&fence->f);
- goto fail_backoff;
- }
- ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
- }
-
ret = drm_gem_handle_create(file_priv, obj, &handle);
if (ret) {
-
drm_gem_object_release(obj);
- if (vgdev->has_virgl_3d) {
- virtio_gpu_unref_list(&validate_list);
- dma_fence_put(&fence->f);
- }
return ret;
}
drm_gem_object_put_unlocked(obj);
rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
rc->bo_handle = handle;
-
- if (vgdev->has_virgl_3d) {
- virtio_gpu_unref_list(&validate_list);
- dma_fence_put(&fence->f);
- }
return 0;
-fail_backoff:
- ttm_eu_backoff_reservation(&ticket, &validate_list);
-fail_unref:
- if (vgdev->has_virgl_3d) {
- virtio_gpu_unref_list(&validate_list);
- dma_fence_put(&fence->f);
- }
-//fail_obj:
-// drm_gem_object_handle_unreference_unlocked(obj);
- return ret;
}
static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index e7e946035027..b2da31310d24 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -23,6 +23,8 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <drm/ttm/ttm_execbuf_util.h>
+
#include "virtgpu_drv.h"
static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
@@ -74,39 +76,34 @@ static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
kfree(bo);
}
-static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo,
- bool pinned)
+static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo)
{
u32 c = 1;
- u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
vgbo->placement.placement = &vgbo->placement_code;
vgbo->placement.busy_placement = &vgbo->placement_code;
vgbo->placement_code.fpfn = 0;
vgbo->placement_code.lpfn = 0;
vgbo->placement_code.flags =
- TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT | pflag;
+ TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT |
+ TTM_PL_FLAG_NO_EVICT;
vgbo->placement.num_placement = c;
vgbo->placement.num_busy_placement = c;
}
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
- unsigned long size, bool kernel, bool pinned,
- struct virtio_gpu_object **bo_ptr)
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object **bo_ptr,
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_object *bo;
- enum ttm_bo_type type;
size_t acc_size;
int ret;
- if (kernel)
- type = ttm_bo_type_kernel;
- else
- type = ttm_bo_type_device;
*bo_ptr = NULL;
- acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, size,
+ acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, params->size,
sizeof(struct virtio_gpu_object));
bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
@@ -117,23 +114,62 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
kfree(bo);
return ret;
}
- size = roundup(size, PAGE_SIZE);
- ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
+ params->size = roundup(params->size, PAGE_SIZE);
+ ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, params->size);
if (ret != 0) {
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
kfree(bo);
return ret;
}
- bo->dumb = false;
- virtio_gpu_init_ttm_placement(bo, pinned);
+ bo->dumb = params->dumb;
+
+ if (params->virgl) {
+ virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, fence);
+ } else {
+ virtio_gpu_cmd_create_resource(vgdev, bo, params, fence);
+ }
- ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, 0, !kernel, acc_size,
- NULL, NULL, &virtio_gpu_ttm_bo_destroy);
+ virtio_gpu_init_ttm_placement(bo);
+ ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size,
+ ttm_bo_type_device, &bo->placement, 0,
+ true, acc_size, NULL, NULL,
+ &virtio_gpu_ttm_bo_destroy);
/* ttm_bo_init failure will call the destroy */
if (ret != 0)
return ret;
+ if (fence) {
+ struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
+ struct list_head validate_list;
+ struct ttm_validate_buffer mainbuf;
+ struct ww_acquire_ctx ticket;
+ unsigned long irq_flags;
+ bool signaled;
+
+ INIT_LIST_HEAD(&validate_list);
+ memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
+
+ /* use a gem reference since unref list undoes them */
+ drm_gem_object_get(&bo->gem_base);
+ mainbuf.bo = &bo->tbo;
+ list_add(&mainbuf.head, &validate_list);
+
+ ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
+ if (ret == 0) {
+ spin_lock_irqsave(&drv->lock, irq_flags);
+ signaled = virtio_fence_signaled(&fence->f);
+ if (!signaled)
+ /* virtio create command still in flight */
+ ttm_eu_fence_buffer_objects(&ticket, &validate_list,
+ &fence->f);
+ spin_unlock_irqrestore(&drv->lock, irq_flags);
+ if (signaled)
+ /* virtio create command finished */
+ ttm_eu_backoff_reservation(&ticket, &validate_list);
+ }
+ virtio_gpu_unref_list(&validate_list);
+ }
+
*bo_ptr = bo;
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index eb51a78e1199..8fbf71bd0c5e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -28,20 +28,16 @@
* device that might share buffers with virtgpu
*/
-int virtgpu_gem_prime_pin(struct drm_gem_object *obj)
+struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
- WARN_ONCE(1, "not implemented");
- return -ENODEV;
-}
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
-void virtgpu_gem_prime_unpin(struct drm_gem_object *obj)
-{
- WARN_ONCE(1, "not implemented");
-}
+ if (!bo->tbo.ttm->pages || !bo->tbo.ttm->num_pages)
+ /* should not happen */
+ return ERR_PTR(-EINVAL);
-struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- return ERR_PTR(-ENODEV);
+ return drm_prime_pages_to_sg(bo->tbo.ttm->pages,
+ bo->tbo.ttm->num_pages);
}
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
@@ -68,7 +64,10 @@ void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
}
int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *area)
+ struct vm_area_struct *vma)
{
- return -ENODEV;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+
+ bo->gem_base.vma_node.vm_node.start = bo->tbo.vma_node.vm_node.start;
+ return drm_gem_prime_mmap(obj, vma);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index 4bfbf25fabff..300ef3a83538 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -37,8 +37,6 @@
#include <linux/delay.h>
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
static struct
virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
{
@@ -116,10 +114,6 @@ static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = {
static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
- struct virtio_gpu_device *vgdev;
-
- vgdev = virtio_gpu_get_vgdev(bdev);
-
switch (type) {
case TTM_PL_SYSTEM:
/* System memory */
@@ -194,42 +188,45 @@ static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev,
*/
struct virtio_gpu_ttm_tt {
struct ttm_dma_tt ttm;
- struct virtio_gpu_device *vgdev;
- u64 offset;
+ struct virtio_gpu_object *obj;
};
-static int virtio_gpu_ttm_backend_bind(struct ttm_tt *ttm,
- struct ttm_mem_reg *bo_mem)
+static int virtio_gpu_ttm_tt_bind(struct ttm_tt *ttm,
+ struct ttm_mem_reg *bo_mem)
{
- struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
+ struct virtio_gpu_ttm_tt *gtt =
+ container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
+ struct virtio_gpu_device *vgdev =
+ virtio_gpu_get_vgdev(gtt->obj->tbo.bdev);
- gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
- if (!ttm->num_pages)
- WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
- ttm->num_pages, bo_mem, ttm);
-
- /* Not implemented */
+ virtio_gpu_object_attach(vgdev, gtt->obj, NULL);
return 0;
}
-static int virtio_gpu_ttm_backend_unbind(struct ttm_tt *ttm)
+static int virtio_gpu_ttm_tt_unbind(struct ttm_tt *ttm)
{
- /* Not implemented */
+ struct virtio_gpu_ttm_tt *gtt =
+ container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
+ struct virtio_gpu_device *vgdev =
+ virtio_gpu_get_vgdev(gtt->obj->tbo.bdev);
+
+ virtio_gpu_object_detach(vgdev, gtt->obj);
return 0;
}
-static void virtio_gpu_ttm_backend_destroy(struct ttm_tt *ttm)
+static void virtio_gpu_ttm_tt_destroy(struct ttm_tt *ttm)
{
- struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
+ struct virtio_gpu_ttm_tt *gtt =
+ container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
ttm_dma_tt_fini(&gtt->ttm);
kfree(gtt);
}
-static struct ttm_backend_func virtio_gpu_backend_func = {
- .bind = &virtio_gpu_ttm_backend_bind,
- .unbind = &virtio_gpu_ttm_backend_unbind,
- .destroy = &virtio_gpu_ttm_backend_destroy,
+static struct ttm_backend_func virtio_gpu_tt_func = {
+ .bind = &virtio_gpu_ttm_tt_bind,
+ .unbind = &virtio_gpu_ttm_tt_unbind,
+ .destroy = &virtio_gpu_ttm_tt_destroy,
};
static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
@@ -242,8 +239,8 @@ static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL);
if (gtt == NULL)
return NULL;
- gtt->ttm.ttm.func = &virtio_gpu_backend_func;
- gtt->vgdev = vgdev;
+ gtt->ttm.ttm.func = &virtio_gpu_tt_func;
+ gtt->obj = container_of(bo, struct virtio_gpu_object, tbo);
if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
kfree(gtt);
return NULL;
@@ -251,58 +248,11 @@ static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
return &gtt->ttm.ttm;
}
-static void virtio_gpu_move_null(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *new_mem)
-{
- struct ttm_mem_reg *old_mem = &bo->mem;
-
- BUG_ON(old_mem->mm_node != NULL);
- *old_mem = *new_mem;
- new_mem->mm_node = NULL;
-}
-
-static int virtio_gpu_bo_move(struct ttm_buffer_object *bo, bool evict,
- struct ttm_operation_ctx *ctx,
- struct ttm_mem_reg *new_mem)
-{
- int ret;
-
- ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
- if (ret)
- return ret;
-
- virtio_gpu_move_null(bo, new_mem);
- return 0;
-}
-
-static void virtio_gpu_bo_move_notify(struct ttm_buffer_object *tbo,
- bool evict,
- struct ttm_mem_reg *new_mem)
-{
- struct virtio_gpu_object *bo;
- struct virtio_gpu_device *vgdev;
-
- bo = container_of(tbo, struct virtio_gpu_object, tbo);
- vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
-
- if (!new_mem || (new_mem->placement & TTM_PL_FLAG_SYSTEM)) {
- if (bo->hw_res_handle)
- virtio_gpu_object_detach(vgdev, bo);
-
- } else if (new_mem->placement & TTM_PL_FLAG_TT) {
- if (bo->hw_res_handle) {
- virtio_gpu_object_attach(vgdev, bo, NULL);
- }
- }
-}
-
static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo)
{
struct virtio_gpu_object *bo;
- struct virtio_gpu_device *vgdev;
bo = container_of(tbo, struct virtio_gpu_object, tbo);
- vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
if (bo->pages)
virtio_gpu_object_free_sg_table(bo);
@@ -314,11 +264,9 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = {
.init_mem_type = &virtio_gpu_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &virtio_gpu_evict_flags,
- .move = &virtio_gpu_bo_move,
.verify_access = &virtio_gpu_verify_access,
.io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
.io_mem_free = &virtio_gpu_ttm_io_mem_free,
- .move_notify = &virtio_gpu_bo_move_notify,
.swap_notify = &virtio_gpu_bo_swap_notify,
};
@@ -330,7 +278,7 @@ int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
r = ttm_bo_device_init(&vgdev->mman.bdev,
&virtio_gpu_bo_driver,
vgdev->ddev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET, 0);
+ false);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
goto err_dev_init;
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 6bc2008b0d0d..e62fe24b1a2e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -376,9 +376,8 @@ retry:
/* create a basic resource */
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
- uint32_t format,
- uint32_t width,
- uint32_t height)
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_create_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -388,11 +387,11 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
- cmd_p->format = cpu_to_le32(format);
- cmd_p->width = cpu_to_le32(width);
- cmd_p->height = cpu_to_le32(height);
+ cmd_p->format = cpu_to_le32(params->format);
+ cmd_p->width = cpu_to_le32(params->width);
+ cmd_p->height = cpu_to_le32(params->height);
- virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
bo->created = true;
}
@@ -828,7 +827,8 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
- struct virtio_gpu_resource_create_3d *rc_3d)
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_create_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -836,11 +836,21 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
- *cmd_p = *rc_3d;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
- cmd_p->hdr.flags = 0;
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ cmd_p->format = cpu_to_le32(params->format);
+ cmd_p->width = cpu_to_le32(params->width);
+ cmd_p->height = cpu_to_le32(params->height);
+
+ cmd_p->target = cpu_to_le32(params->target);
+ cmd_p->bind = cpu_to_le32(params->bind);
+ cmd_p->depth = cpu_to_le32(params->depth);
+ cmd_p->array_size = cpu_to_le32(params->array_size);
+ cmd_p->last_level = cpu_to_le32(params->last_level);
+ cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
+ cmd_p->flags = cpu_to_le32(params->flags);
- virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
bo->created = true;
}
@@ -924,8 +934,8 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct scatterlist *sg;
int si, nents;
- if (!obj->created)
- return 0;
+ if (WARN_ON_ONCE(!obj->created))
+ return -EINVAL;
if (!obj->pages) {
int ret;
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 8a9aeb0a9ea8..bb66dbcd5e3f 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -219,6 +219,8 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
spin_lock_init(&vkms_out->state_lock);
vkms_out->crc_workq = alloc_ordered_workqueue("vkms_crc_workq", 0);
+ if (!vkms_out->crc_workq)
+ return -ENOMEM;
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index 0b9ee7fb45d6..66e14e38d5e8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -499,12 +499,9 @@ static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdSetShader body;
} *cmd;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "unbinding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_SET_SHADER;
cmd->header.size = sizeof(cmd->body);
@@ -534,12 +531,9 @@ static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
SVGA3dCmdSetRenderTarget body;
} *cmd;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for render target "
- "unbinding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
cmd->header.size = sizeof(cmd->body);
@@ -576,12 +570,9 @@ static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
} body;
} *cmd;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for texture "
- "unbinding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
cmd->header.size = sizeof(cmd->body);
@@ -610,12 +601,10 @@ static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdDXSetShader body;
} *cmd;
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for DX shader "
- "unbinding.\n");
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
+
cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER;
cmd->header.size = sizeof(cmd->body);
cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
@@ -641,12 +630,9 @@ static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdDXSetSingleConstantBuffer body;
} *cmd;
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for DX shader "
- "unbinding.\n");
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER;
cmd->header.size = sizeof(cmd->body);
@@ -768,12 +754,9 @@ static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
- cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for DX shader"
- " resource binding.\n");
+ cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES;
cmd->header.size = sizeof(cmd->body) + view_id_size;
@@ -807,12 +790,9 @@ static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS);
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
- cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for DX render-target"
- " binding.\n");
+ cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS;
cmd->header.size = sizeof(cmd->body) + view_id_size;
@@ -894,12 +874,9 @@ static int vmw_emit_set_so(struct vmw_ctx_binding_state *cbs)
so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
cmd_size = sizeof(*cmd) + so_target_size;
- cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for DX SO target"
- " binding.\n");
+ cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS;
cmd->header.size = sizeof(cmd->body) + so_target_size;
@@ -1011,12 +988,9 @@ static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
cmd_size = sizeof(*cmd) + set_vb_size;
- cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for DX vertex buffer"
- " binding.\n");
+ cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS;
cmd->header.size = sizeof(cmd->body) + set_vb_size;
@@ -1167,12 +1141,10 @@ static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdDXSetIndexBuffer body;
} *cmd;
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for DX index buffer "
- "binding.\n");
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
+
cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER;
cmd->header.size = sizeof(cmd->body);
if (rebind) {
@@ -1269,6 +1241,32 @@ void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
vmw_binding_drop(entry);
}
+/**
+ * vmw_binding_dirtying - Return whether a binding type is dirtying its resource
+ * @binding_type: The binding type
+ *
+ * Each time a resource is put on the validation list as the result of a
+ * context binding referencing it, we need to determine whether that resource
+ * will be dirtied (written to by the GPU) as a result of the corresponding
+ * GPU operation. Currently rendertarget-, depth-stencil-, and
+ * stream-output-target bindings are capable of dirtying its resource.
+ *
+ * Return: Whether the binding type dirties the resource its binding points to.
+ */
+u32 vmw_binding_dirtying(enum vmw_ctx_binding_type binding_type)
+{
+ static u32 is_binding_dirtying[vmw_ctx_binding_max] = {
+ [vmw_ctx_binding_rt] = VMW_RES_DIRTY_SET,
+ [vmw_ctx_binding_dx_rt] = VMW_RES_DIRTY_SET,
+ [vmw_ctx_binding_ds] = VMW_RES_DIRTY_SET,
+ [vmw_ctx_binding_so] = VMW_RES_DIRTY_SET,
+ };
+
+ /* Review this function as new bindings are added. */
+ BUILD_BUG_ON(vmw_ctx_binding_max != 11);
+ return is_binding_dirtying[binding_type];
+}
+
/*
* This function is unused at run-time, and only used to hold various build
* asserts important for code optimization assumptions.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
index 6a2a9d69043b..f6ab79d23923 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
@@ -205,5 +205,7 @@ extern void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs);
extern struct list_head *
vmw_binding_state_list(struct vmw_ctx_binding_state *cbs);
extern void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs);
+extern u32 vmw_binding_dirtying(enum vmw_ctx_binding_type binding_type);
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 70dab55e7888..56979e412ca8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -393,6 +393,7 @@ static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
__vmw_cmdbuf_header_free(entry);
break;
case SVGA_CB_STATUS_COMMAND_ERROR:
+ WARN_ONCE(true, "Command buffer error.\n");
entry->cb_header->status = SVGA_CB_STATUS_NONE;
list_add_tail(&entry->list, &man->error);
schedule_work(&man->work);
@@ -511,17 +512,14 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
container_of(work, struct vmw_cmdbuf_man, work);
struct vmw_cmdbuf_header *entry, *next;
uint32_t dummy;
- bool restart[SVGA_CB_CONTEXT_MAX];
bool send_fence = false;
struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
int i;
struct vmw_cmdbuf_context *ctx;
bool global_block = false;
- for_each_cmdbuf_ctx(man, i, ctx) {
+ for_each_cmdbuf_ctx(man, i, ctx)
INIT_LIST_HEAD(&restart_head[i]);
- restart[i] = false;
- }
mutex_lock(&man->error_mutex);
spin_lock(&man->lock);
@@ -533,23 +531,23 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
const char *cmd_name;
list_del_init(&entry->list);
- restart[entry->cb_context] = true;
global_block = true;
if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
- DRM_ERROR("Unknown command causing device error.\n");
- DRM_ERROR("Command buffer offset is %lu\n",
- (unsigned long) cb_hdr->errorOffset);
+ VMW_DEBUG_USER("Unknown command causing device error.\n");
+ VMW_DEBUG_USER("Command buffer offset is %lu\n",
+ (unsigned long) cb_hdr->errorOffset);
__vmw_cmdbuf_header_free(entry);
send_fence = true;
continue;
}
- DRM_ERROR("Command \"%s\" causing device error.\n", cmd_name);
- DRM_ERROR("Command buffer offset is %lu\n",
- (unsigned long) cb_hdr->errorOffset);
- DRM_ERROR("Command size is %lu\n",
- (unsigned long) error_cmd_size);
+ VMW_DEBUG_USER("Command \"%s\" causing device error.\n",
+ cmd_name);
+ VMW_DEBUG_USER("Command buffer offset is %lu\n",
+ (unsigned long) cb_hdr->errorOffset);
+ VMW_DEBUG_USER("Command size is %lu\n",
+ (unsigned long) error_cmd_size);
new_start_offset = cb_hdr->errorOffset + error_cmd_size;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 14bd760a62fd..63f111068a44 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -156,12 +156,9 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
}
vmw_execbuf_release_pinned_bo(dev_priv);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "destruction.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return;
- }
cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
cmd->header.size = sizeof(cmd->body);
@@ -210,7 +207,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
&uctx->res, i);
- if (unlikely(IS_ERR(uctx->cotables[i]))) {
+ if (IS_ERR(uctx->cotables[i])) {
ret = PTR_ERR(uctx->cotables[i]);
goto out_cotables;
}
@@ -259,9 +256,8 @@ static int vmw_context_init(struct vmw_private *dev_priv,
return -ENOMEM;
}
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
vmw_resource_unreference(&res);
return -ENOMEM;
}
@@ -311,10 +307,8 @@ static int vmw_gb_context_create(struct vmw_resource *res)
goto out_no_fifo;
}
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "creation.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
@@ -345,12 +339,10 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "binding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
+
cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
@@ -391,10 +383,8 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "unbinding.\n");
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
}
@@ -441,12 +431,9 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
if (likely(res->id == -1))
return 0;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "destruction.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
cmd->header.size = sizeof(cmd->body);
@@ -487,10 +474,8 @@ static int vmw_dx_context_create(struct vmw_resource *res)
goto out_no_fifo;
}
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "creation.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
@@ -521,12 +506,9 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "binding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
cmd->header.size = sizeof(cmd->body);
@@ -615,10 +597,8 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "unbinding.\n");
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
}
@@ -665,12 +645,9 @@ static int vmw_dx_context_destroy(struct vmw_resource *res)
if (likely(res->id == -1))
return 0;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "destruction.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
cmd->header.size = sizeof(cmd->body);
@@ -751,7 +728,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
int ret;
if (!dev_priv->has_dx && dx) {
- DRM_ERROR("DX contexts not supported by device.\n");
+ VMW_DEBUG_USER("DX contexts not supported by device.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 44f3f6f107d3..b4f6e1217c9d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -171,12 +171,9 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
lockdep_assert_held(&bo->resv->lock.base);
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), SVGA3D_INVALID_ID);
- if (!cmd) {
- DRM_ERROR("Failed reserving FIFO space for cotable "
- "binding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (!cmd)
return -ENOMEM;
- }
WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
@@ -262,12 +259,9 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
if (readback)
submit_size += sizeof(*cmd0);
- cmd1 = vmw_fifo_reserve_dx(dev_priv, submit_size, SVGA3D_INVALID_ID);
- if (!cmd1) {
- DRM_ERROR("Failed reserving FIFO space for cotable "
- "unbinding.\n");
+ cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ if (!cmd1)
return -ENOMEM;
- }
vcotbl->size_read_back = 0;
if (readback) {
@@ -351,13 +345,10 @@ static int vmw_cotable_readback(struct vmw_resource *res)
struct vmw_fence_obj *fence;
if (!vcotbl->scrubbed) {
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
- SVGA3D_INVALID_ID);
- if (!cmd) {
- DRM_ERROR("Failed reserving FIFO space for cotable "
- "readback.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (!cmd)
return -ENOMEM;
- }
+
cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = vcotbl->ctx->id;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 1bfa353d995c..bf6c3500d363 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -828,7 +828,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
ret = ttm_bo_device_init(&dev_priv->bdev,
&vmw_bo_driver,
dev->anon_inode->i_mapping,
- VMWGFX_FILE_PAGE_OFFSET,
false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing TTM buffer object driver.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index accb2fafe2f1..96983c47fb40 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -48,7 +48,6 @@
#define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 15
#define VMWGFX_DRIVER_PATCHLEVEL 0
-#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
#define VMWGFX_MAX_VALIDATIONS 2048
@@ -700,6 +699,8 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
uint32_t *inout_id,
struct vmw_resource **out);
extern void vmw_resource_unreserve(struct vmw_resource *res,
+ bool dirty_set,
+ bool dirty,
bool switch_backup,
struct vmw_buffer_object *new_backup,
unsigned long new_backup_offset);
@@ -812,7 +813,6 @@ extern int vmw_fifo_init(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo);
extern void vmw_fifo_release(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo);
-extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
extern void *
vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
@@ -828,6 +828,18 @@ extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
extern int vmw_fifo_flush(struct vmw_private *dev_priv,
bool interruptible);
+#define VMW_FIFO_RESERVE_DX(__priv, __bytes, __ctx_id) \
+({ \
+ vmw_fifo_reserve_dx(__priv, __bytes, __ctx_id) ? : ({ \
+ DRM_ERROR("FIFO reserve failed at %s for %u bytes\n", \
+ __func__, (unsigned int) __bytes); \
+ NULL; \
+ }); \
+})
+
+#define VMW_FIFO_RESERVE(__priv, __bytes) \
+ VMW_FIFO_RESERVE_DX(__priv, __bytes, SVGA3D_INVALID_ID)
+
/**
* TTM glue - vmwgfx_ttm_glue.c
*/
@@ -1312,6 +1324,20 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
char *buffer, size_t *length);
int vmw_host_log(const char *log);
+/* VMW logging */
+
+/**
+ * VMW_DEBUG_USER - Debug output for user-space debugging.
+ *
+ * @fmt: printf() like format string.
+ *
+ * This macro is for logging user-space error and debugging messages for e.g.
+ * command buffer execution errors due to malformed commands, invalid context,
+ * etc.
+ */
+#define VMW_DEBUG_USER(fmt, ...) \
+ DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
+
/**
* Inline helper functions
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 88b8178d4687..2ff7ba04d8c8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -36,6 +36,25 @@
#define VMW_RES_HT_ORDER 12
/*
+ * Helper macro to get dx_ctx_node if available otherwise print an error
+ * message. This is for use in command verifier function where if dx_ctx_node
+ * is not set then command is invalid.
+ */
+#define VMW_GET_CTX_NODE(__sw_context) \
+({ \
+ __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \
+ VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \
+ __sw_context->dx_ctx_node; \
+ }); \
+})
+
+#define VMW_DECLARE_CMD_VAR(__var, __type) \
+ struct { \
+ SVGA3dCmdHeader header; \
+ __type body; \
+ } __var
+
+/**
* struct vmw_relocation - Buffer object relocation
*
* @head: List head for the command submission context's relocation list
@@ -59,9 +78,8 @@ struct vmw_relocation {
* command stream is replaced with the actual id after validation.
* @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
* with a NOP.
- * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
- * after validation is -1, the command is replaced with a NOP. Otherwise no
- * action.
+ * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
+ * validation is -1, the command is replaced with a NOP. Otherwise no action.
*/
enum vmw_resource_relocation_type {
vmw_res_rel_normal,
@@ -75,8 +93,8 @@ enum vmw_resource_relocation_type {
*
* @head: List head for the software context's relocation list.
* @res: Non-ref-counted pointer to the resource.
- * @offset: Offset of single byte entries into the command buffer where the
- * id that needs fixup is located.
+ * @offset: Offset of single byte entries into the command buffer where the id
+ * that needs fixup is located.
* @rel_type: Type of relocation.
*/
struct vmw_resource_relocation {
@@ -86,8 +104,9 @@ struct vmw_resource_relocation {
enum vmw_resource_relocation_type rel_type:3;
};
-/*
+/**
* struct vmw_ctx_validation_info - Extra validation metadata for contexts
+ *
* @head: List head of context list
* @ctx: The context resource
* @cur: The context's persistent binding state
@@ -142,9 +161,10 @@ static size_t vmw_ptr_diff(void *a, void *b)
/**
* vmw_execbuf_bindings_commit - Commit modified binding state
+ *
* @sw_context: The command submission context
- * @backoff: Whether this is part of the error path and binding state
- * changes should be ignored
+ * @backoff: Whether this is part of the error path and binding state changes
+ * should be ignored
*/
static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
bool backoff)
@@ -154,6 +174,7 @@ static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
list_for_each_entry(entry, &sw_context->ctx_list, head) {
if (!backoff)
vmw_binding_state_commit(entry->cur, entry->staged);
+
if (entry->staged != sw_context->staged_bindings)
vmw_binding_state_free(entry->staged);
else
@@ -166,6 +187,7 @@ static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
/**
* vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
+ *
* @sw_context: The command submission context
*/
static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
@@ -176,8 +198,8 @@ static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
}
/**
- * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
- * added to the validate list.
+ * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
+ * the validate list.
*
* @dev_priv: Pointer to the device private:
* @sw_context: The command submission context
@@ -195,11 +217,8 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
goto out_err;
if (!sw_context->staged_bindings) {
- sw_context->staged_bindings =
- vmw_binding_state_alloc(dev_priv);
+ sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
if (IS_ERR(sw_context->staged_bindings)) {
- DRM_ERROR("Failed to allocate context binding "
- "information.\n");
ret = PTR_ERR(sw_context->staged_bindings);
sw_context->staged_bindings = NULL;
goto out_err;
@@ -209,8 +228,6 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
if (sw_context->staged_bindings_inuse) {
node->staged = vmw_binding_state_alloc(dev_priv);
if (IS_ERR(node->staged)) {
- DRM_ERROR("Failed to allocate context binding "
- "information.\n");
ret = PTR_ERR(node->staged);
node->staged = NULL;
goto out_err;
@@ -225,19 +242,20 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
list_add_tail(&node->head, &sw_context->ctx_list);
return 0;
+
out_err:
return ret;
}
/**
- * vmw_execbuf_res_size - calculate extra size fore the resource validation
- * node
+ * vmw_execbuf_res_size - calculate extra size fore the resource validation node
+ *
* @dev_priv: Pointer to the device private struct.
* @res_type: The resource type.
*
- * Guest-backed contexts and DX contexts require extra size to store
- * execbuf private information in the validation node. Typically the
- * binding manager associated data structures.
+ * Guest-backed contexts and DX contexts require extra size to store execbuf
+ * private information in the validation node. Typically the binding manager
+ * associated data structures.
*
* Returns: The extra size requirement based on resource type.
*/
@@ -254,8 +272,8 @@ static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
*
* @rcache: Pointer to the entry to update.
* @res: Pointer to the resource.
- * @private: Pointer to the execbuf-private space in the resource
- * validation node.
+ * @private: Pointer to the execbuf-private space in the resource validation
+ * node.
*/
static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
struct vmw_resource *res,
@@ -268,17 +286,19 @@ static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
}
/**
- * vmw_execbuf_res_noref_val_add - Add a resource described by an
- * unreferenced rcu-protected pointer to the validation list.
+ * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
+ * rcu-protected pointer to the validation list.
+ *
* @sw_context: Pointer to the software context.
* @res: Unreferenced rcu-protected pointer to the resource.
+ * @dirty: Whether to change dirty status.
*
- * Returns: 0 on success. Negative error code on failure. Typical error
- * codes are %-EINVAL on inconsistency and %-ESRCH if the resource was
- * doomed.
+ * Returns: 0 on success. Negative error code on failure. Typical error codes
+ * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
*/
static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
- struct vmw_resource *res)
+ struct vmw_resource *res,
+ u32 dirty)
{
struct vmw_private *dev_priv = res->dev_priv;
int ret;
@@ -290,13 +310,17 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
rcache = &sw_context->res_cache[res_type];
if (likely(rcache->valid && rcache->res == res)) {
+ if (dirty)
+ vmw_validation_res_set_dirty(sw_context->ctx,
+ rcache->private, dirty);
vmw_user_resource_noref_release();
return 0;
}
priv_size = vmw_execbuf_res_size(dev_priv, res_type);
ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
- (void **)&ctx_info, &first_usage);
+ dirty, (void **)&ctx_info,
+ &first_usage);
vmw_user_resource_noref_release();
if (ret)
return ret;
@@ -304,8 +328,10 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
if (priv_size && first_usage) {
ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
ctx_info);
- if (ret)
+ if (ret) {
+ VMW_DEBUG_USER("Failed first usage context setup.\n");
return ret;
+ }
}
vmw_execbuf_rcache_update(rcache, res, ctx_info);
@@ -315,13 +341,16 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
/**
* vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
* validation list if it's not already on it
+ *
* @sw_context: Pointer to the software context.
* @res: Pointer to the resource.
+ * @dirty: Whether to change dirty status.
*
* Returns: Zero on success. Negative error code on failure.
*/
static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
- struct vmw_resource *res)
+ struct vmw_resource *res,
+ u32 dirty)
{
struct vmw_res_cache_entry *rcache;
enum vmw_res_type res_type = vmw_res_type(res);
@@ -329,10 +358,15 @@ static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
int ret;
rcache = &sw_context->res_cache[res_type];
- if (likely(rcache->valid && rcache->res == res))
+ if (likely(rcache->valid && rcache->res == res)) {
+ if (dirty)
+ vmw_validation_res_set_dirty(sw_context->ctx,
+ rcache->private, dirty);
return 0;
+ }
- ret = vmw_validation_add_resource(sw_context->ctx, res, 0, &ptr, NULL);
+ ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
+ &ptr, NULL);
if (ret)
return ret;
@@ -342,8 +376,8 @@ static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
}
/**
- * vmw_view_res_val_add - Add a view and the surface it's pointing to
- * to the validation list
+ * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
+ * validation list
*
* @sw_context: The software context holding the validation list.
* @view: Pointer to the view resource.
@@ -356,27 +390,29 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
int ret;
/*
- * First add the resource the view is pointing to, otherwise
- * it may be swapped out when the view is validated.
+ * First add the resource the view is pointing to, otherwise it may be
+ * swapped out when the view is validated.
*/
- ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view));
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
+ vmw_view_dirtying(view));
if (ret)
return ret;
- return vmw_execbuf_res_noctx_val_add(sw_context, view);
+ return vmw_execbuf_res_noctx_val_add(sw_context, view,
+ VMW_RES_DIRTY_NONE);
}
/**
- * vmw_view_id_val_add - Look up a view and add it and the surface it's
- * pointing to to the validation list.
+ * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
+ * to to the validation list.
*
* @sw_context: The software context holding the validation list.
* @view_type: The view type to look up.
* @id: view id of the view.
*
- * The view is represented by a view id and the DX context it's created on,
- * or scheduled for creation on. If there is no DX context set, the function
- * will return an -EINVAL error pointer.
+ * The view is represented by a view id and the DX context it's created on, or
+ * scheduled for creation on. If there is no DX context set, the function will
+ * return an -EINVAL error pointer.
*
* Returns: Unreferenced pointer to the resource on success, negative error
* pointer on failure.
@@ -389,10 +425,8 @@ vmw_view_id_val_add(struct vmw_sw_context *sw_context,
struct vmw_resource *view;
int ret;
- if (!ctx_node) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return ERR_PTR(-EINVAL);
- }
view = vmw_view_lookup(sw_context->man, view_type, id);
if (IS_ERR(view))
@@ -413,8 +447,8 @@ vmw_view_id_val_add(struct vmw_sw_context *sw_context,
* @sw_context: Pointer to a software context used for this command submission
* @ctx: Pointer to the context resource
*
- * This function puts all resources that were previously bound to @ctx on
- * the resource validation list. This is part of the context state reemission
+ * This function puts all resources that were previously bound to @ctx on the
+ * resource validation list. This is part of the context state reemission
*/
static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
@@ -433,13 +467,13 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
if (IS_ERR(res))
continue;
- ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+ VMW_RES_DIRTY_SET);
if (unlikely(ret != 0))
return ret;
}
}
-
/* Add all resources bound to the context to the validation list */
mutex_lock(&dev_priv->binding_mutex);
binding_list = vmw_context_binding_list(ctx);
@@ -448,8 +482,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
if (vmw_res_type(entry->res) == vmw_res_view)
ret = vmw_view_res_val_add(sw_context, entry->res);
else
- ret = vmw_execbuf_res_noctx_val_add(sw_context,
- entry->res);
+ ret = vmw_execbuf_res_noctx_val_add
+ (sw_context, entry->res,
+ vmw_binding_dirtying(entry->bt));
if (unlikely(ret != 0))
break;
}
@@ -472,8 +507,8 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
*
* @list: Pointer to head of relocation list.
* @res: The resource.
- * @offset: Offset into the command buffer currently being parsed where the
- * id that needs fixup is located. Granularity is one byte.
+ * @offset: Offset into the command buffer currently being parsed where the id
+ * that needs fixup is located. Granularity is one byte.
* @rel_type: Relocation type.
*/
static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
@@ -486,7 +521,7 @@ static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
if (unlikely(!rel)) {
- DRM_ERROR("Failed to allocate a resource relocation.\n");
+ VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
return -ENOMEM;
}
@@ -506,17 +541,15 @@ static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
static void vmw_resource_relocations_free(struct list_head *list)
{
/* Memory is validation context memory, so no need to free it */
-
INIT_LIST_HEAD(list);
}
/**
* vmw_resource_relocations_apply - Apply all relocations on a list
*
- * @cb: Pointer to the start of the command buffer bein patch. This need
- * not be the same buffer as the one being parsed when the relocation
- * list was built, but the contents must be the same modulo the
- * resource ids.
+ * @cb: Pointer to the start of the command buffer bein patch. This need not be
+ * the same buffer as the one being parsed when the relocation list was built,
+ * but the contents must be the same modulo the resource ids.
* @list: Pointer to the head of the relocation list.
*/
static void vmw_resource_relocations_apply(uint32_t *cb,
@@ -560,14 +593,14 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
}
/**
- * vmw_resources_reserve - Reserve all resources on the sw_context's
- * resource list.
+ * vmw_resources_reserve - Reserve all resources on the sw_context's resource
+ * list.
*
* @sw_context: Pointer to the software context.
*
- * Note that since vmware's command submission currently is protected by
- * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
- * since only a single thread at once will attempt this.
+ * Note that since vmware's command submission currently is protected by the
+ * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
+ * only a single thread at once will attempt this.
*/
static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
{
@@ -592,22 +625,24 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
}
/**
- * vmw_cmd_res_check - Check that a resource is present and if so, put it
- * on the resource validate list unless it's already there.
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
+ * resource validate list unless it's already there.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
+ * @dirty: Whether to change dirty status.
* @converter: User-space visisble type specific information.
- * @id_loc: Pointer to the location in the command buffer currently being
- * parsed from where the user-space resource id handle is located.
- * @p_val: Pointer to pointer to resource validalidation node. Populated
- * on exit.
+ * @id_loc: Pointer to the location in the command buffer currently being parsed
+ * from where the user-space resource id handle is located.
+ * @p_val: Pointer to pointer to resource validalidation node. Populated on
+ * exit.
*/
static int
vmw_cmd_res_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
enum vmw_res_type res_type,
+ u32 dirty,
const struct vmw_user_resource_conv *converter,
uint32_t *id_loc,
struct vmw_resource **p_res)
@@ -621,7 +656,7 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
if (*id_loc == SVGA3D_INVALID_ID) {
if (res_type == vmw_res_context) {
- DRM_ERROR("Illegal context invalid id.\n");
+ VMW_DEBUG_USER("Illegal context invalid id.\n");
return -EINVAL;
}
return 0;
@@ -629,6 +664,9 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
res = rcache->res;
+ if (dirty)
+ vmw_validation_res_set_dirty(sw_context->ctx,
+ rcache->private, dirty);
} else {
unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
@@ -638,13 +676,13 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
res = vmw_user_resource_noref_lookup_handle
(dev_priv, sw_context->fp->tfile, *id_loc, converter);
- if (unlikely(IS_ERR(res))) {
- DRM_ERROR("Could not find or use resource 0x%08x.\n",
- (unsigned int) *id_loc);
+ if (IS_ERR(res)) {
+ VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
+ (unsigned int) *id_loc);
return PTR_ERR(res);
}
- ret = vmw_execbuf_res_noref_val_add(sw_context, res);
+ ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
if (unlikely(ret != 0))
return ret;
@@ -675,23 +713,16 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
{
struct vmw_private *dev_priv = ctx_res->dev_priv;
struct vmw_buffer_object *dx_query_mob;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXBindAllQuery body;
- } *cmd;
-
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
if (!dx_query_mob || dx_query_mob->dx_query_ctx)
return 0;
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
-
- if (cmd == NULL) {
- DRM_ERROR("Failed to rebind queries.\n");
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), ctx_res->id);
+ if (cmd == NULL)
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
cmd->header.size = sizeof(cmd->body);
@@ -705,8 +736,8 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
}
/**
- * vmw_rebind_contexts - Rebind all resources previously bound to
- * referenced contexts.
+ * vmw_rebind_contexts - Rebind all resources previously bound to referenced
+ * contexts.
*
* @sw_context: Pointer to the software context.
*
@@ -721,21 +752,23 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
ret = vmw_binding_rebind_all(val->cur);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
- DRM_ERROR("Failed to rebind context.\n");
+ VMW_DEBUG_USER("Failed to rebind context.\n");
return ret;
}
ret = vmw_rebind_all_dx_query(val->ctx);
- if (ret != 0)
+ if (ret != 0) {
+ VMW_DEBUG_USER("Failed to rebind queries.\n");
return ret;
+ }
}
return 0;
}
/**
- * vmw_view_bindings_add - Add an array of view bindings to a context
- * binding state tracker.
+ * vmw_view_bindings_add - Add an array of view bindings to a context binding
+ * state tracker.
*
* @sw_context: The execbuf state used for this command.
* @view_type: View type for the bindings.
@@ -752,13 +785,11 @@ static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
uint32 view_ids[], u32 num_views,
u32 first_slot)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
u32 i;
- if (!ctx_node) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
for (i = 0; i < num_views; ++i) {
struct vmw_ctx_bindinfo_view binding;
@@ -768,7 +799,7 @@ static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
view = vmw_view_id_val_add(sw_context, view_type,
view_ids[i]);
if (IS_ERR(view)) {
- DRM_ERROR("View not found.\n");
+ VMW_DEBUG_USER("View not found.\n");
return PTR_ERR(view);
}
}
@@ -798,19 +829,18 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_cid_cmd {
- SVGA3dCmdHeader header;
- uint32_t cid;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
+ container_of(header, typeof(*cmd), header);
- cmd = container_of(header, struct vmw_cid_cmd, header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->cid, NULL);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body, NULL);
}
/**
* vmw_execbuf_info_from_res - Get the private validation metadata for a
* recently validated resource
+ *
* @sw_context: Pointer to the command submission context
* @res: The resource
*
@@ -818,8 +848,8 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
* context's resource cache and hence the last resource of that type to be
* processed by the validation code.
*
- * Return: a pointer to the private metadata of the resource, or NULL
- * if it wasn't found
+ * Return: a pointer to the private metadata of the resource, or NULL if it
+ * wasn't found
*/
static struct vmw_ctx_validation_info *
vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
@@ -835,36 +865,32 @@ vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
return NULL;
}
-
static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_sid_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetRenderTarget body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
struct vmw_resource *ctx;
struct vmw_resource *res;
int ret;
- cmd = container_of(header, struct vmw_sid_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
if (cmd->body.type >= SVGA3D_RT_MAX) {
- DRM_ERROR("Illegal render target type %u.\n",
- (unsigned) cmd->body.type);
+ VMW_DEBUG_USER("Illegal render target type %u.\n",
+ (unsigned int) cmd->body.type);
return -EINVAL;
}
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->body.cid,
- &ctx);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, &ctx);
if (unlikely(ret != 0))
return ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter, &cmd->body.target.sid,
- &res);
+ VMW_RES_DIRTY_SET, user_surface_converter,
+ &cmd->body.target.sid, &res);
if (unlikely(ret))
return ret;
@@ -890,44 +916,38 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_sid_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceCopy body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
int ret;
- cmd = container_of(header, struct vmw_sid_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.src.sid, NULL);
+ VMW_RES_DIRTY_NONE, user_surface_converter,
+ &cmd->body.src.sid, NULL);
if (ret)
return ret;
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_SET, user_surface_converter,
&cmd->body.dest.sid, NULL);
}
static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXBufferCopy body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
int ret;
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.src, NULL);
if (ret != 0)
return ret;
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_SET, user_surface_converter,
&cmd->body.dest, NULL);
}
@@ -935,21 +955,18 @@ static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXPredCopyRegion body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
int ret;
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.srcSid, NULL);
if (ret != 0)
return ret;
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_SET, user_surface_converter,
&cmd->body.dstSid, NULL);
}
@@ -957,20 +974,18 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_sid_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceStretchBlt body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
int ret;
- cmd = container_of(header, struct vmw_sid_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.src.sid, NULL);
if (unlikely(ret != 0))
return ret;
+
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_SET, user_surface_converter,
&cmd->body.dest.sid, NULL);
}
@@ -978,15 +993,11 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_sid_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdBlitSurfaceToScreen body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_sid_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.srcImage.sid, NULL);
}
@@ -994,17 +1005,12 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_sid_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdPresent body;
- } *cmd;
-
-
- cmd = container_of(header, struct vmw_sid_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter, &cmd->body.sid,
- NULL);
+ VMW_RES_DIRTY_NONE, user_surface_converter,
+ &cmd->body.sid, NULL);
}
/**
@@ -1014,11 +1020,10 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
* @new_query_bo: The new buffer holding query results.
* @sw_context: The software context used for this command submission.
*
- * This function checks whether @new_query_bo is suitable for holding
- * query results, and if another buffer currently is pinned for query
- * results. If so, the function prepares the state of @sw_context for
- * switching pinned buffers after successful submission of the current
- * command batch.
+ * This function checks whether @new_query_bo is suitable for holding query
+ * results, and if another buffer currently is pinned for query results. If so,
+ * the function prepares the state of @sw_context for switching pinned buffers
+ * after successful submission of the current command batch.
*/
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
struct vmw_buffer_object *new_query_bo,
@@ -1034,7 +1039,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
if (unlikely(new_query_bo->base.num_pages > 4)) {
- DRM_ERROR("Query buffer too large.\n");
+ VMW_DEBUG_USER("Query buffer too large.\n");
return -EINVAL;
}
@@ -1053,13 +1058,11 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
dev_priv->has_mob, false);
if (unlikely(ret != 0))
return ret;
-
}
return 0;
}
-
/**
* vmw_query_bo_switch_commit - Finalize switching pinned query buffer
*
@@ -1068,11 +1071,11 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
*
* This function will check if we're switching query buffers, and will then,
* issue a dummy occlusion query wait used as a query barrier. When the fence
- * object following that query wait has signaled, we are sure that all
- * preceding queries have finished, and the old query buffer can be unpinned.
- * However, since both the new query buffer and the old one are fenced with
- * that fence, we can do an asynchronus unpin now, and be sure that the
- * old query buffer won't be moved until the fence has signaled.
+ * object following that query wait has signaled, we are sure that all preceding
+ * queries have finished, and the old query buffer can be unpinned. However,
+ * since both the new query buffer and the old one are fenced with that fence,
+ * we can do an asynchronus unpin now, and be sure that the old query buffer
+ * won't be moved until the fence has signaled.
*
* As mentioned above, both the new - and old query buffers need to be fenced
* using a sequence emitted *after* calling this function.
@@ -1084,7 +1087,6 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
* The validate list should still hold references to all
* contexts here.
*/
-
if (sw_context->needs_post_query_barrier) {
struct vmw_res_cache_entry *ctx_entry =
&sw_context->res_cache[vmw_res_context];
@@ -1097,7 +1099,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
if (unlikely(ret != 0))
- DRM_ERROR("Out of fifo space for dummy query.\n");
+ VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
}
if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
@@ -1111,10 +1113,9 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
/*
* We pin also the dummy_query_bo buffer so that we
- * don't need to validate it when emitting
- * dummy queries in context destroy paths.
+ * don't need to validate it when emitting dummy queries
+ * in context destroy paths.
*/
-
if (!dev_priv->dummy_query_bo_pinned) {
vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
true);
@@ -1131,22 +1132,24 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
}
/**
- * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
- * handle to a MOB id.
+ * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle
+ * to a MOB id.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: The software context used for this command batch validation.
* @id: Pointer to the user-space handle to be translated.
- * @vmw_bo_p: Points to a location that, on successful return will carry
- * a non-reference-counted pointer to the buffer object identified by the
+ * @vmw_bo_p: Points to a location that, on successful return will carry a
+ * non-reference-counted pointer to the buffer object identified by the
* user-space handle in @id.
*
* This function saves information needed to translate a user-space buffer
* handle to a MOB id. The translation does not take place immediately, but
- * during a call to vmw_apply_relocations(). This function builds a relocation
- * list and a list of buffers to validate. The former needs to be freed using
- * either vmw_apply_relocations() or vmw_free_relocations(). The latter
- * needs to be freed using vmw_clear_validations.
+ * during a call to vmw_apply_relocations().
+ *
+ * This function builds a relocation list and a list of buffers to validate. The
+ * former needs to be freed using either vmw_apply_relocations() or
+ * vmw_free_relocations(). The latter needs to be freed using
+ * vmw_clear_validations.
*/
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
@@ -1161,7 +1164,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
vmw_validation_preload_bo(sw_context->ctx);
vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
if (IS_ERR(vmw_bo)) {
- DRM_ERROR("Could not find or use MOB buffer.\n");
+ VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
return PTR_ERR(vmw_bo);
}
@@ -1184,19 +1187,20 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
}
/**
- * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
- * handle to a valid SVGAGuestPtr
+ * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle
+ * to a valid SVGAGuestPtr
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: The software context used for this command batch validation.
* @ptr: Pointer to the user-space handle to be translated.
- * @vmw_bo_p: Points to a location that, on successful return will carry
- * a non-reference-counted pointer to the DMA buffer identified by the
- * user-space handle in @id.
+ * @vmw_bo_p: Points to a location that, on successful return will carry a
+ * non-reference-counted pointer to the DMA buffer identified by the user-space
+ * handle in @id.
*
* This function saves information needed to translate a user-space buffer
* handle to a valid SVGAGuestPtr. The translation does not take place
* immediately, but during a call to vmw_apply_relocations().
+ *
* This function builds a relocation list and a list of buffers to validate.
* The former needs to be freed using either vmw_apply_relocations() or
* vmw_free_relocations(). The latter needs to be freed using
@@ -1215,7 +1219,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
vmw_validation_preload_bo(sw_context->ctx);
vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
if (IS_ERR(vmw_bo)) {
- DRM_ERROR("Could not find or use GMR region.\n");
+ VMW_DEBUG_USER("Could not find or use GMR region.\n");
return PTR_ERR(vmw_bo);
}
@@ -1236,10 +1240,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
return 0;
}
-
-
/**
- * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
+ * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
@@ -1251,67 +1253,52 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_dx_define_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXDefineQuery q;
- } *cmd;
-
- int ret;
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_resource *cotable_res;
+ int ret;
-
- if (ctx_node == NULL) {
- DRM_ERROR("DX Context not set for query.\n");
+ if (!ctx_node)
return -EINVAL;
- }
- cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
- if (cmd->q.type < SVGA3D_QUERYTYPE_MIN ||
- cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
+ if (cmd->body.type < SVGA3D_QUERYTYPE_MIN ||
+ cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
return -EINVAL;
cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
- ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
+ ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
return ret;
}
-
-
/**
- * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
+ * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
* @header: Pointer to the command header in the command stream.
*
- * The query bind operation will eventually associate the query ID
- * with its backing MOB. In this function, we take the user mode
- * MOB ID and use vmw_translate_mob_ptr() to translate it to its
- * kernel mode equivalent.
+ * The query bind operation will eventually associate the query ID with its
+ * backing MOB. In this function, we take the user mode MOB ID and use
+ * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
*/
static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_dx_bind_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXBindQuery q;
- } *cmd;
-
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
struct vmw_buffer_object *vmw_bo;
- int ret;
-
+ int ret;
- cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
/*
* Look up the buffer pointed to by q.mobid, put it on the relocation
* list so its kernel mode MOB ID can be filled in later
*/
- ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
&vmw_bo);
if (ret != 0)
@@ -1322,10 +1309,8 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
return 0;
}
-
-
/**
- * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
+ * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
@@ -1335,21 +1320,16 @@ static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_begin_gb_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdBeginGBQuery q;
- } *cmd;
-
- cmd = container_of(header, struct vmw_begin_gb_query_cmd,
- header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->q.cid,
- NULL);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, NULL);
}
/**
- * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
+ * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
@@ -1359,38 +1339,30 @@ static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_begin_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdBeginQuery q;
- } *cmd;
-
- cmd = container_of(header, struct vmw_begin_query_cmd,
- header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
+ container_of(header, typeof(*cmd), header);
if (unlikely(dev_priv->has_mob)) {
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdBeginGBQuery q;
- } gb_cmd;
+ VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
gb_cmd.header.size = cmd->header.size;
- gb_cmd.q.cid = cmd->q.cid;
- gb_cmd.q.type = cmd->q.type;
+ gb_cmd.body.cid = cmd->body.cid;
+ gb_cmd.body.type = cmd->body.type;
memcpy(cmd, &gb_cmd, sizeof(*cmd));
return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
}
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->q.cid,
- NULL);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, NULL);
}
/**
- * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
+ * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
@@ -1401,19 +1373,15 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
struct vmw_buffer_object *vmw_bo;
- struct vmw_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdEndGBQuery q;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
int ret;
- cmd = container_of(header, struct vmw_query_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
- ret = vmw_translate_mob_ptr(dev_priv, sw_context,
- &cmd->q.mobid,
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
@@ -1424,7 +1392,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
+ * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
@@ -1435,27 +1403,21 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
struct vmw_buffer_object *vmw_bo;
- struct vmw_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdEndQuery q;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
int ret;
- cmd = container_of(header, struct vmw_query_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
if (dev_priv->has_mob) {
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdEndGBQuery q;
- } gb_cmd;
+ VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
gb_cmd.header.size = cmd->header.size;
- gb_cmd.q.cid = cmd->q.cid;
- gb_cmd.q.type = cmd->q.type;
- gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
- gb_cmd.q.offset = cmd->q.guestResult.offset;
+ gb_cmd.body.cid = cmd->body.cid;
+ gb_cmd.body.type = cmd->body.type;
+ gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
+ gb_cmd.body.offset = cmd->body.guestResult.offset;
memcpy(cmd, &gb_cmd, sizeof(*cmd));
return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
@@ -1466,8 +1428,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
return ret;
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
- &cmd->q.guestResult,
- &vmw_bo);
+ &cmd->body.guestResult, &vmw_bo);
if (unlikely(ret != 0))
return ret;
@@ -1477,7 +1438,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
+ * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
@@ -1488,19 +1449,15 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
struct vmw_buffer_object *vmw_bo;
- struct vmw_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdWaitForGBQuery q;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
int ret;
- cmd = container_of(header, struct vmw_query_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
- ret = vmw_translate_mob_ptr(dev_priv, sw_context,
- &cmd->q.mobid,
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
@@ -1509,7 +1466,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
+ * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
@@ -1520,27 +1477,21 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
struct vmw_buffer_object *vmw_bo;
- struct vmw_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdWaitForQuery q;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
int ret;
- cmd = container_of(header, struct vmw_query_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
if (dev_priv->has_mob) {
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdWaitForGBQuery q;
- } gb_cmd;
+ VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
gb_cmd.header.size = cmd->header.size;
- gb_cmd.q.cid = cmd->q.cid;
- gb_cmd.q.type = cmd->q.type;
- gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
- gb_cmd.q.offset = cmd->q.guestResult.offset;
+ gb_cmd.body.cid = cmd->body.cid;
+ gb_cmd.body.type = cmd->body.type;
+ gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
+ gb_cmd.body.offset = cmd->body.guestResult.offset;
memcpy(cmd, &gb_cmd, sizeof(*cmd));
return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
@@ -1551,8 +1502,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
return ret;
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
- &cmd->q.guestResult,
- &vmw_bo);
+ &cmd->body.guestResult, &vmw_bo);
if (unlikely(ret != 0))
return ret;
@@ -1565,54 +1515,52 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
{
struct vmw_buffer_object *vmw_bo = NULL;
struct vmw_surface *srf = NULL;
- struct vmw_dma_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceDMA dma;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
int ret;
SVGA3dCmdSurfaceDMASuffix *suffix;
uint32_t bo_size;
+ bool dirty;
- cmd = container_of(header, struct vmw_dma_cmd, header);
- suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
+ cmd = container_of(header, typeof(*cmd), header);
+ suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
header->size - sizeof(*suffix));
/* Make sure device and verifier stays in sync. */
if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
- DRM_ERROR("Invalid DMA suffix size.\n");
+ VMW_DEBUG_USER("Invalid DMA suffix size.\n");
return -EINVAL;
}
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
- &cmd->dma.guest.ptr,
- &vmw_bo);
+ &cmd->body.guest.ptr, &vmw_bo);
if (unlikely(ret != 0))
return ret;
/* Make sure DMA doesn't cross BO boundaries. */
bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
- if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
- DRM_ERROR("Invalid DMA offset.\n");
+ if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
+ VMW_DEBUG_USER("Invalid DMA offset.\n");
return -EINVAL;
}
- bo_size -= cmd->dma.guest.ptr.offset;
+ bo_size -= cmd->body.guest.ptr.offset;
if (unlikely(suffix->maximumOffset > bo_size))
suffix->maximumOffset = bo_size;
+ dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
+ VMW_RES_DIRTY_SET : 0;
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter, &cmd->dma.host.sid,
- NULL);
+ dirty, user_surface_converter,
+ &cmd->body.host.sid, NULL);
if (unlikely(ret != 0)) {
if (unlikely(ret != -ERESTARTSYS))
- DRM_ERROR("could not find surface for DMA.\n");
+ VMW_DEBUG_USER("could not find surface for DMA.\n");
return ret;
}
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
- vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
- header);
+ vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
return 0;
}
@@ -1621,10 +1569,7 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_draw_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdDrawPrimitives body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
(unsigned long)header + sizeof(*cmd));
SVGA3dPrimitiveRange *range;
@@ -1636,16 +1581,17 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- cmd = container_of(header, struct vmw_draw_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
if (unlikely(cmd->body.numVertexDecls > maxnum)) {
- DRM_ERROR("Illegal number of vertex declarations.\n");
+ VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
return -EINVAL;
}
for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ VMW_RES_DIRTY_NONE,
user_surface_converter,
&decl->array.surfaceId, NULL);
if (unlikely(ret != 0))
@@ -1655,13 +1601,14 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
maxnum = (header->size - sizeof(cmd->body) -
cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
if (unlikely(cmd->body.numRanges > maxnum)) {
- DRM_ERROR("Illegal number of index ranges.\n");
+ VMW_DEBUG_USER("Illegal number of index ranges.\n");
return -EINVAL;
}
range = (SVGA3dPrimitiveRange *) decl;
for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ VMW_RES_DIRTY_NONE,
user_surface_converter,
&range->indexArray.surfaceId, NULL);
if (unlikely(ret != 0))
@@ -1670,30 +1617,24 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
return 0;
}
-
static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_tex_state_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetTextureState state;
- } *cmd;
-
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
SVGA3dTextureState *last_state = (SVGA3dTextureState *)
((unsigned long) header + header->size + sizeof(header));
SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
- ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
+ ((unsigned long) header + sizeof(*cmd));
struct vmw_resource *ctx;
struct vmw_resource *res;
int ret;
- cmd = container_of(header, struct vmw_tex_state_cmd,
- header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->state.cid,
- &ctx);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, &ctx);
if (unlikely(ret != 0))
return ret;
@@ -1702,12 +1643,13 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
continue;
if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
- DRM_ERROR("Illegal texture/sampler unit %u.\n",
- (unsigned) cur_state->stage);
+ VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
+ (unsigned int) cur_state->stage);
return -EINVAL;
}
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ VMW_RES_DIRTY_NONE,
user_surface_converter,
&cur_state->value, &res);
if (unlikely(ret != 0))
@@ -1744,12 +1686,10 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
SVGAFifoCmdDefineGMRFB body;
} *cmd = buf;
- return vmw_translate_guest_ptr(dev_priv, sw_context,
- &cmd->body.ptr,
+ return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
&vmw_bo);
}
-
/**
* vmw_cmd_res_switch_backup - Utility function to handle backup buffer
* switching
@@ -1761,14 +1701,13 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
* stream.
* @backup_offset: Offset of backup into MOB.
*
- * This function prepares for registering a switch of backup buffers
- * in the resource metadata just prior to unreserving. It's basically a wrapper
- * around vmw_cmd_res_switch_backup with a different interface.
+ * This function prepares for registering a switch of backup buffers in the
+ * resource metadata just prior to unreserving. It's basically a wrapper around
+ * vmw_cmd_res_switch_backup with a different interface.
*/
static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
- struct vmw_resource *res,
- uint32_t *buf_id,
+ struct vmw_resource *res, uint32_t *buf_id,
unsigned long backup_offset)
{
struct vmw_buffer_object *vbo;
@@ -1788,7 +1727,6 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
return 0;
}
-
/**
* vmw_cmd_switch_backup - Utility function to handle backup buffer switching
*
@@ -1801,34 +1739,31 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
* stream.
* @backup_offset: Offset of backup into MOB.
*
- * This function prepares for registering a switch of backup buffers
- * in the resource metadata just prior to unreserving. It's basically a wrapper
- * around vmw_cmd_res_switch_backup with a different interface.
+ * This function prepares for registering a switch of backup buffers in the
+ * resource metadata just prior to unreserving. It's basically a wrapper around
+ * vmw_cmd_res_switch_backup with a different interface.
*/
static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
enum vmw_res_type res_type,
const struct vmw_user_resource_conv
- *converter,
- uint32_t *res_id,
- uint32_t *buf_id,
+ *converter, uint32_t *res_id, uint32_t *buf_id,
unsigned long backup_offset)
{
struct vmw_resource *res;
int ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
- converter, res_id, &res);
+ VMW_RES_DIRTY_NONE, converter, res_id, &res);
if (ret)
return ret;
- return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
- buf_id, backup_offset);
+ return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
+ backup_offset);
}
/**
- * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
- * command
+ * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -1838,22 +1773,16 @@ static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_bind_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdBindGBSurface body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.sid, &cmd->body.mobid,
- 0);
+ user_surface_converter, &cmd->body.sid,
+ &cmd->body.mobid, 0);
}
/**
- * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
- * command
+ * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -1863,21 +1792,16 @@ static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdUpdateGBImage body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.image.sid, NULL);
}
/**
- * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
- * command
+ * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -1887,21 +1811,16 @@ static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdUpdateGBSurface body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_CLEAR, user_surface_converter,
&cmd->body.sid, NULL);
}
/**
- * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
- * command
+ * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -1911,20 +1830,16 @@ static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdReadbackGBImage body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.image.sid, NULL);
}
/**
- * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
+ * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
* command
*
* @dev_priv: Pointer to a device private struct.
@@ -1935,20 +1850,16 @@ static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdReadbackGBSurface body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_CLEAR, user_surface_converter,
&cmd->body.sid, NULL);
}
/**
- * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
+ * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
* command
*
* @dev_priv: Pointer to a device private struct.
@@ -1959,21 +1870,17 @@ static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdInvalidateGBImage body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.image.sid, NULL);
}
/**
- * vmw_cmd_invalidate_gb_surface - Validate an
- * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
+ * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
+ * command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -1983,22 +1890,16 @@ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdInvalidateGBSurface body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_CLEAR, user_surface_converter,
&cmd->body.sid, NULL);
}
-
/**
- * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
- * command
+ * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2008,20 +1909,16 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_shader_define_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdDefineShader body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
int ret;
size_t size;
struct vmw_resource *ctx;
- cmd = container_of(header, struct vmw_shader_define_cmd,
- header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->body.cid,
- &ctx);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, &ctx);
if (unlikely(ret != 0))
return ret;
@@ -2029,24 +1926,20 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
return 0;
size = cmd->header.size - sizeof(cmd->body);
- ret = vmw_compat_shader_add(dev_priv,
- vmw_context_res_man(ctx),
- cmd->body.shid, cmd + 1,
- cmd->body.type, size,
- &sw_context->staged_cmd_res);
+ ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
+ cmd->body.shid, cmd + 1, cmd->body.type,
+ size, &sw_context->staged_cmd_res);
if (unlikely(ret != 0))
return ret;
- return vmw_resource_relocation_add(sw_context,
- NULL,
+ return vmw_resource_relocation_add(sw_context, NULL,
vmw_ptr_diff(sw_context->buf_start,
&cmd->header.id),
vmw_res_rel_nop);
}
/**
- * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
- * command
+ * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2056,42 +1949,34 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_shader_destroy_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdDestroyShader body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
int ret;
struct vmw_resource *ctx;
- cmd = container_of(header, struct vmw_shader_destroy_cmd,
- header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->body.cid,
- &ctx);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, &ctx);
if (unlikely(ret != 0))
return ret;
if (unlikely(!dev_priv->has_mob))
return 0;
- ret = vmw_shader_remove(vmw_context_res_man(ctx),
- cmd->body.shid,
- cmd->body.type,
- &sw_context->staged_cmd_res);
+ ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
+ cmd->body.type, &sw_context->staged_cmd_res);
if (unlikely(ret != 0))
return ret;
- return vmw_resource_relocation_add(sw_context,
- NULL,
+ return vmw_resource_relocation_add(sw_context, NULL,
vmw_ptr_diff(sw_context->buf_start,
&cmd->header.id),
vmw_res_rel_nop);
}
/**
- * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
- * command
+ * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2101,27 +1986,23 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_set_shader_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetShader body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
struct vmw_ctx_bindinfo_shader binding;
struct vmw_resource *ctx, *res = NULL;
struct vmw_ctx_validation_info *ctx_info;
int ret;
- cmd = container_of(header, struct vmw_set_shader_cmd,
- header);
+ cmd = container_of(header, typeof(*cmd), header);
if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
- DRM_ERROR("Illegal shader type %u.\n",
- (unsigned) cmd->body.type);
+ VMW_DEBUG_USER("Illegal shader type %u.\n",
+ (unsigned int) cmd->body.type);
return -EINVAL;
}
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->body.cid,
- &ctx);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, &ctx);
if (unlikely(ret != 0))
return ret;
@@ -2130,21 +2011,20 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
if (cmd->body.shid != SVGA3D_INVALID_ID) {
res = vmw_shader_lookup(vmw_context_res_man(ctx),
- cmd->body.shid,
- cmd->body.type);
-
+ cmd->body.shid, cmd->body.type);
if (!IS_ERR(res)) {
- ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+ VMW_RES_DIRTY_NONE);
if (unlikely(ret != 0))
return ret;
}
}
if (IS_ERR_OR_NULL(res)) {
- ret = vmw_cmd_res_check(dev_priv, sw_context,
- vmw_res_shader,
- user_shader_converter,
- &cmd->body.shid, &res);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
+ VMW_RES_DIRTY_NONE,
+ user_shader_converter, &cmd->body.shid,
+ &res);
if (unlikely(ret != 0))
return ret;
}
@@ -2157,14 +2037,13 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_shader;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
- vmw_binding_add(ctx_info->staged, &binding.bi,
- binding.shader_slot, 0);
+ vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
+
return 0;
}
/**
- * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
- * command
+ * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2174,18 +2053,14 @@ static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_set_shader_const_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetShaderConst body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
int ret;
- cmd = container_of(header, struct vmw_set_shader_const_cmd,
- header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->body.cid,
- NULL);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, NULL);
if (unlikely(ret != 0))
return ret;
@@ -2196,8 +2071,7 @@ static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
- * command
+ * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2207,22 +2081,16 @@ static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_bind_gb_shader_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdBindGBShader body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
- header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
- user_shader_converter,
- &cmd->body.shid, &cmd->body.mobid,
- cmd->body.offsetInBytes);
+ user_shader_converter, &cmd->body.shid,
+ &cmd->body.mobid, cmd->body.offsetInBytes);
}
/**
- * vmw_cmd_dx_set_single_constant_buffer - Validate an
+ * vmw_cmd_dx_set_single_constant_buffer - Validate
* SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
*
* @dev_priv: Pointer to a device private struct.
@@ -2234,23 +2102,18 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXSetSingleConstantBuffer body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
struct vmw_resource *res = NULL;
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_cb binding;
int ret;
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.sid, &res);
if (unlikely(ret != 0))
return ret;
@@ -2265,21 +2128,21 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
- DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
- (unsigned) cmd->body.type,
- (unsigned) binding.slot);
+ VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
+ (unsigned int) cmd->body.type,
+ (unsigned int) binding.slot);
return -EINVAL;
}
- vmw_binding_add(ctx_node->staged, &binding.bi,
- binding.shader_slot, binding.slot);
+ vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
+ binding.slot);
return 0;
}
/**
- * vmw_cmd_dx_set_shader_res - Validate an
- * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
+ * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
+ * command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2289,17 +2152,15 @@ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXSetShaderResources body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
+ container_of(header, typeof(*cmd), header);
u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
sizeof(SVGA3dShaderResourceViewId);
if ((u64) cmd->body.startView + (u64) num_sr_view >
(u64) SVGA3D_DX_MAX_SRVIEWS ||
cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
- DRM_ERROR("Invalid shader binding.\n");
+ VMW_DEBUG_USER("Invalid shader binding.\n");
return -EINVAL;
}
@@ -2311,8 +2172,7 @@ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
- * command
+ * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2322,36 +2182,32 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXSetShader body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
struct vmw_resource *res = NULL;
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_shader binding;
int ret = 0;
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
cmd = container_of(header, typeof(*cmd), header);
if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
- DRM_ERROR("Illegal shader type %u.\n",
- (unsigned) cmd->body.type);
+ VMW_DEBUG_USER("Illegal shader type %u.\n",
+ (unsigned int) cmd->body.type);
return -EINVAL;
}
if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
if (IS_ERR(res)) {
- DRM_ERROR("Could not find shader for binding.\n");
+ VMW_DEBUG_USER("Could not find shader for binding.\n");
return PTR_ERR(res);
}
- ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+ VMW_RES_DIRTY_NONE);
if (ret)
return ret;
}
@@ -2361,15 +2217,14 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
binding.bi.bt = vmw_ctx_binding_dx_shader;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
- vmw_binding_add(ctx_node->staged, &binding.bi,
- binding.shader_slot, 0);
+ vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
return 0;
}
/**
- * vmw_cmd_dx_set_vertex_buffers - Validates an
- * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
+ * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
+ * command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2379,7 +2234,7 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_vb binding;
struct vmw_resource *res;
struct {
@@ -2389,22 +2244,21 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
} *cmd;
int i, ret, num;
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
cmd = container_of(header, typeof(*cmd), header);
num = (cmd->header.size - sizeof(cmd->body)) /
sizeof(SVGA3dVertexBuffer);
if ((u64)num + (u64)cmd->body.startBuffer >
(u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
- DRM_ERROR("Invalid number of vertex buffers.\n");
+ VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
return -EINVAL;
}
for (i = 0; i < num; i++) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ VMW_RES_DIRTY_NONE,
user_surface_converter,
&cmd->buf[i].sid, &res);
if (unlikely(ret != 0))
@@ -2417,15 +2271,14 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
binding.stride = cmd->buf[i].stride;
binding.slot = i + cmd->body.startBuffer;
- vmw_binding_add(ctx_node->staged, &binding.bi,
- 0, binding.slot);
+ vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
}
return 0;
}
/**
- * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
+ * vmw_cmd_dx_ia_set_vertex_buffers - Validate
* SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
*
* @dev_priv: Pointer to a device private struct.
@@ -2436,23 +2289,18 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_ib binding;
struct vmw_resource *res;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXSetIndexBuffer body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
int ret;
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.sid, &res);
if (unlikely(ret != 0))
return ret;
@@ -2469,8 +2317,8 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_dx_set_rendertarget - Validate an
- * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
+ * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
+ * command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2480,32 +2328,29 @@ static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXSetRenderTargets body;
- } *cmd = container_of(header, typeof(*cmd), header);
- int ret;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
+ container_of(header, typeof(*cmd), header);
u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
sizeof(SVGA3dRenderTargetViewId);
+ int ret;
if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
- DRM_ERROR("Invalid DX Rendertarget binding.\n");
+ VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
return -EINVAL;
}
- ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
- vmw_ctx_binding_ds, 0,
- &cmd->body.depthStencilViewId, 1, 0);
+ ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
+ 0, &cmd->body.depthStencilViewId, 1, 0);
if (ret)
return ret;
return vmw_view_bindings_add(sw_context, vmw_view_rt,
- vmw_ctx_binding_dx_rt, 0,
- (void *)&cmd[1], num_rt_view, 0);
+ vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
+ num_rt_view, 0);
}
/**
- * vmw_cmd_dx_clear_rendertarget_view - Validate an
+ * vmw_cmd_dx_clear_rendertarget_view - Validate
* SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
*
* @dev_priv: Pointer to a device private struct.
@@ -2516,17 +2361,15 @@ static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXClearRenderTargetView body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
+ container_of(header, typeof(*cmd), header);
return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_rt,
cmd->body.renderTargetViewId));
}
/**
- * vmw_cmd_dx_clear_rendertarget_view - Validate an
+ * vmw_cmd_dx_clear_rendertarget_view - Validate
* SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
*
* @dev_priv: Pointer to a device private struct.
@@ -2537,10 +2380,8 @@ static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXClearDepthStencilView body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
+ container_of(header, typeof(*cmd), header);
return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_ds,
cmd->body.depthStencilViewId));
@@ -2550,14 +2391,14 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_resource *srf;
struct vmw_resource *res;
enum vmw_view_type view_type;
int ret;
/*
- * This is based on the fact that all affected define commands have
- * the same initial command body layout.
+ * This is based on the fact that all affected define commands have the
+ * same initial command body layout.
*/
struct {
SVGA3dCmdHeader header;
@@ -2565,17 +2406,16 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
uint32 sid;
} *cmd;
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
view_type = vmw_view_cmd_to_type(header->id);
if (view_type == vmw_view_max)
return -EINVAL;
+
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->sid, &srf);
if (unlikely(ret != 0))
return ret;
@@ -2585,19 +2425,14 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- return vmw_view_add(sw_context->man,
- ctx_node->ctx,
- srf,
- view_type,
- cmd->defined_id,
- header,
+ return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
+ cmd->defined_id, header,
header->size + sizeof(*header),
&sw_context->staged_cmd_res);
}
/**
- * vmw_cmd_dx_set_so_targets - Validate an
- * SVGA_3D_CMD_DX_SET_SOTARGETS command.
+ * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2607,7 +2442,7 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_so binding;
struct vmw_resource *res;
struct {
@@ -2617,22 +2452,20 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
} *cmd;
int i, ret, num;
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
cmd = container_of(header, typeof(*cmd), header);
- num = (cmd->header.size - sizeof(cmd->body)) /
- sizeof(SVGA3dSoTarget);
+ num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
if (num > SVGA3D_DX_MAX_SOTARGETS) {
- DRM_ERROR("Invalid DX SO binding.\n");
+ VMW_DEBUG_USER("Invalid DX SO binding.\n");
return -EINVAL;
}
for (i = 0; i < num; i++) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ VMW_RES_DIRTY_SET,
user_surface_converter,
&cmd->targets[i].sid, &res);
if (unlikely(ret != 0))
@@ -2645,8 +2478,7 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
binding.size = cmd->targets[i].sizeInBytes;
binding.slot = i;
- vmw_binding_add(ctx_node->staged, &binding.bi,
- 0, binding.slot);
+ vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
}
return 0;
@@ -2656,7 +2488,7 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_resource *res;
/*
* This is based on the fact that all affected define commands have
@@ -2669,10 +2501,8 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
enum vmw_so_type so_type;
int ret;
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
so_type = vmw_so_cmd_to_type(header->id);
res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
@@ -2683,8 +2513,8 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_dx_check_subresource - Validate an
- * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
+ * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
+ * command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2714,7 +2544,7 @@ static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->sid, NULL);
}
@@ -2722,32 +2552,30 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
return 0;
}
/**
- * vmw_cmd_dx_view_remove - validate a view remove command and
- * schedule the view resource for removal.
+ * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
+ * resource for removal.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*
- * Check that the view exists, and if it was not created using this
- * command batch, conditionally make this command a NOP.
+ * Check that the view exists, and if it was not created using this command
+ * batch, conditionally make this command a NOP.
*/
static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct {
SVGA3dCmdHeader header;
union vmw_view_destroy body;
@@ -2756,15 +2584,11 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
struct vmw_resource *view;
int ret;
- if (!ctx_node) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
- ret = vmw_view_remove(sw_context->man,
- cmd->body.view_id, view_type,
- &sw_context->staged_cmd_res,
- &view);
+ ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
+ &sw_context->staged_cmd_res, &view);
if (ret || !view)
return ret;
@@ -2774,16 +2598,14 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
* relocation to conditionally make this command a NOP to avoid
* device errors.
*/
- return vmw_resource_relocation_add(sw_context,
- view,
+ return vmw_resource_relocation_add(sw_context, view,
vmw_ptr_diff(sw_context->buf_start,
&cmd->header.id),
vmw_res_rel_cond_nop);
}
/**
- * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
- * command
+ * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2793,18 +2615,14 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_resource *res;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXDefineShader body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
+ container_of(header, typeof(*cmd), header);
int ret;
- if (!ctx_node) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
ret = vmw_cotable_notify(res, cmd->body.shaderId);
@@ -2817,8 +2635,7 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
- * command
+ * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2828,29 +2645,22 @@ static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXDestroyShader body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
+ container_of(header, typeof(*cmd), header);
int ret;
- if (!ctx_node) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
&sw_context->staged_cmd_res);
- if (ret)
- DRM_ERROR("Could not find shader to remove.\n");
return ret;
}
/**
- * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
- * command
+ * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2862,36 +2672,37 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
{
struct vmw_resource *ctx;
struct vmw_resource *res;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXBindShader body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
+ container_of(header, typeof(*cmd), header);
int ret;
if (cmd->body.cid != SVGA3D_INVALID_ID) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter,
- &cmd->body.cid, &ctx);
+ VMW_RES_DIRTY_SET,
+ user_context_converter, &cmd->body.cid,
+ &ctx);
if (ret)
return ret;
} else {
- if (!sw_context->dx_ctx_node) {
- DRM_ERROR("DX Context not set.\n");
+ struct vmw_ctx_validation_info *ctx_node =
+ VMW_GET_CTX_NODE(sw_context);
+
+ if (!ctx_node)
return -EINVAL;
- }
- ctx = sw_context->dx_ctx_node->ctx;
+
+ ctx = ctx_node->ctx;
}
- res = vmw_shader_lookup(vmw_context_res_man(ctx),
- cmd->body.shid, 0);
+ res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
if (IS_ERR(res)) {
- DRM_ERROR("Could not find shader to bind.\n");
+ VMW_DEBUG_USER("Could not find shader to bind.\n");
return PTR_ERR(res);
}
- ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+ VMW_RES_DIRTY_NONE);
if (ret) {
- DRM_ERROR("Error creating resource validation node.\n");
+ VMW_DEBUG_USER("Error creating resource validation node.\n");
return ret;
}
@@ -2901,7 +2712,7 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
+ * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2911,18 +2722,16 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXGenMips body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
+ container_of(header, typeof(*cmd), header);
return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_sr,
cmd->body.shaderResourceViewId));
}
/**
- * vmw_cmd_dx_transfer_from_buffer -
- * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
+ * vmw_cmd_dx_transfer_from_buffer - Validate
+ * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2932,26 +2741,23 @@ static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXTransferFromBuffer body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
+ container_of(header, typeof(*cmd), header);
int ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.srcSid, NULL);
if (ret != 0)
return ret;
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_SET, user_surface_converter,
&cmd->body.destSid, NULL);
}
/**
- * vmw_cmd_intra_surface_copy -
- * Validate an SVGA_3D_CMD_INTRA_SURFACE_COPY command
+ * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2961,20 +2767,17 @@ static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdIntraSurfaceCopy body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
+ container_of(header, typeof(*cmd), header);
if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
return -EINVAL;
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.surface.sid, NULL);
+ VMW_RES_DIRTY_SET, user_surface_converter,
+ &cmd->body.surface.sid, NULL);
}
-
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -2997,18 +2800,18 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
break;
default:
- DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
+ VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
return -EINVAL;
}
if (*size > size_remaining) {
- DRM_ERROR("Invalid SVGA command (size mismatch):"
- " %u.\n", cmd_id);
+ VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
+ cmd_id);
return -EINVAL;
}
if (unlikely(!sw_context->kernel)) {
- DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
+ VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
return -EPERM;
}
@@ -3196,9 +2999,7 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
false, false, true),
- /*
- * DX commands
- */
+ /* SM commands */
VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
@@ -3380,8 +3181,8 @@ bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
}
static int vmw_cmd_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- void *buf, uint32_t *size)
+ struct vmw_sw_context *sw_context, void *buf,
+ uint32_t *size)
{
uint32_t cmd_id;
uint32_t size_remaining = *size;
@@ -3420,31 +3221,33 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
goto out_new;
ret = entry->func(dev_priv, sw_context, header);
- if (unlikely(ret != 0))
- goto out_invalid;
+ if (unlikely(ret != 0)) {
+ VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
+ cmd_id + SVGA_3D_CMD_BASE, ret);
+ return ret;
+ }
return 0;
out_invalid:
- DRM_ERROR("Invalid SVGA3D command: %d\n",
- cmd_id + SVGA_3D_CMD_BASE);
+ VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
+ cmd_id + SVGA_3D_CMD_BASE);
return -EINVAL;
out_privileged:
- DRM_ERROR("Privileged SVGA3D command: %d\n",
- cmd_id + SVGA_3D_CMD_BASE);
+ VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
+ cmd_id + SVGA_3D_CMD_BASE);
return -EPERM;
out_old:
- DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
- cmd_id + SVGA_3D_CMD_BASE);
+ VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
+ cmd_id + SVGA_3D_CMD_BASE);
return -EINVAL;
out_new:
- DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
- cmd_id + SVGA_3D_CMD_BASE);
+ VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
+ cmd_id + SVGA_3D_CMD_BASE);
return -EINVAL;
}
static int vmw_cmd_check_all(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- void *buf,
+ struct vmw_sw_context *sw_context, void *buf,
uint32_t size)
{
int32_t cur_size = size;
@@ -3462,7 +3265,7 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
}
if (unlikely(cur_size != 0)) {
- DRM_ERROR("Command verifier out of sync.\n");
+ VMW_DEBUG_USER("Command verifier out of sync.\n");
return -EINVAL;
}
@@ -3472,7 +3275,6 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
static void vmw_free_relocations(struct vmw_sw_context *sw_context)
{
/* Memory is validation context memory, so no need to free it */
-
INIT_LIST_HEAD(&sw_context->bo_relocations);
}
@@ -3520,7 +3322,7 @@ static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
if (sw_context->cmd_bounce == NULL) {
- DRM_ERROR("Failed to allocate command bounce buffer.\n");
+ VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
sw_context->cmd_bounce_size = 0;
return -ENOMEM;
}
@@ -3535,8 +3337,8 @@ static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
* If this fails for some reason, We sync the fifo and return NULL.
* It is then safe to fence buffers with a NULL pointer.
*
- * If @p_handle is not NULL @file_priv must also not be NULL. Creates
- * a userspace handle if @p_handle is not NULL, otherwise not.
+ * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
+ * userspace handle if @p_handle is not NULL, otherwise not.
*/
int vmw_execbuf_fence_commands(struct drm_file *file_priv,
@@ -3553,7 +3355,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
ret = vmw_fifo_send_fence(dev_priv, &sequence);
if (unlikely(ret != 0)) {
- DRM_ERROR("Fence submission error. Syncing.\n");
+ VMW_DEBUG_USER("Fence submission error. Syncing.\n");
synced = true;
}
@@ -3564,9 +3366,8 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
if (unlikely(ret != 0 && !synced)) {
- (void) vmw_fallback_wait(dev_priv, false, false,
- sequence, false,
- VMW_FENCE_WAIT_TIMEOUT);
+ (void) vmw_fallback_wait(dev_priv, false, false, sequence,
+ false, VMW_FENCE_WAIT_TIMEOUT);
*p_fence = NULL;
}
@@ -3574,36 +3375,32 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
}
/**
- * vmw_execbuf_copy_fence_user - copy fence object information to
- * user-space.
+ * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
*
* @dev_priv: Pointer to a vmw_private struct.
* @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
* @ret: Return value from fence object creation.
- * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
- * which the information should be copied.
+ * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
+ * the information should be copied.
* @fence: Pointer to the fenc object.
* @fence_handle: User-space fence handle.
* @out_fence_fd: exported file descriptor for the fence. -1 if not used
* @sync_file: Only used to clean up in case of an error in this function.
*
- * This function copies fence information to user-space. If copying fails,
- * The user-space struct drm_vmw_fence_rep::error member is hopefully
- * left untouched, and if it's preloaded with an -EFAULT by user-space,
- * the error will hopefully be detected.
- * Also if copying fails, user-space will be unable to signal the fence
- * object so we wait for it immediately, and then unreference the
- * user-space reference.
+ * This function copies fence information to user-space. If copying fails, the
+ * user-space struct drm_vmw_fence_rep::error member is hopefully left
+ * untouched, and if it's preloaded with an -EFAULT by user-space, the error
+ * will hopefully be detected.
+ *
+ * Also if copying fails, user-space will be unable to signal the fence object
+ * so we wait for it immediately, and then unreference the user-space reference.
*/
void
vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
- struct vmw_fpriv *vmw_fp,
- int ret,
+ struct vmw_fpriv *vmw_fp, int ret,
struct drm_vmw_fence_rep __user *user_fence_rep,
- struct vmw_fence_obj *fence,
- uint32_t fence_handle,
- int32_t out_fence_fd,
- struct sync_file *sync_file)
+ struct vmw_fence_obj *fence, uint32_t fence_handle,
+ int32_t out_fence_fd, struct sync_file *sync_file)
{
struct drm_vmw_fence_rep fence_rep;
@@ -3624,16 +3421,16 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
}
/*
- * copy_to_user errors will be detected by user space not
- * seeing fence_rep::error filled in. Typically
- * user-space would have pre-set that member to -EFAULT.
+ * copy_to_user errors will be detected by user space not seeing
+ * fence_rep::error filled in. Typically user-space would have pre-set
+ * that member to -EFAULT.
*/
ret = copy_to_user(user_fence_rep, &fence_rep,
sizeof(fence_rep));
/*
- * User-space lost the fence object. We need to sync
- * and unreference the handle.
+ * User-space lost the fence object. We need to sync and unreference the
+ * handle.
*/
if (unlikely(ret != 0) && (fence_rep.error == 0)) {
if (sync_file)
@@ -3644,42 +3441,39 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
fence_rep.fd = -1;
}
- ttm_ref_object_base_unref(vmw_fp->tfile,
- fence_handle, TTM_REF_USAGE);
- DRM_ERROR("Fence copy error. Syncing.\n");
+ ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
+ TTM_REF_USAGE);
+ VMW_DEBUG_USER("Fence copy error. Syncing.\n");
(void) vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT);
}
}
/**
- * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
- * the fifo.
+ * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
*
* @dev_priv: Pointer to a device private structure.
* @kernel_commands: Pointer to the unpatched command batch.
* @command_size: Size of the unpatched command batch.
* @sw_context: Structure holding the relocation lists.
*
- * Side effects: If this function returns 0, then the command batch
- * pointed to by @kernel_commands will have been modified.
+ * Side effects: If this function returns 0, then the command batch pointed to
+ * by @kernel_commands will have been modified.
*/
static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
- void *kernel_commands,
- u32 command_size,
+ void *kernel_commands, u32 command_size,
struct vmw_sw_context *sw_context)
{
void *cmd;
if (sw_context->dx_ctx_node)
- cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, command_size,
sw_context->dx_ctx_node->ctx->id);
else
- cmd = vmw_fifo_reserve(dev_priv, command_size);
- if (!cmd) {
- DRM_ERROR("Failed reserving fifo space for commands.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, command_size);
+
+ if (!cmd)
return -ENOMEM;
- }
vmw_apply_relocations(sw_context);
memcpy(cmd, kernel_commands, command_size);
@@ -3691,16 +3485,16 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
}
/**
- * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
- * the command buffer manager.
+ * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
+ * command buffer manager.
*
* @dev_priv: Pointer to a device private structure.
* @header: Opaque handle to the command buffer allocation.
* @command_size: Size of the unpatched command batch.
* @sw_context: Structure holding the relocation lists.
*
- * Side effects: If this function returns 0, then the command buffer
- * represented by @header will have been modified.
+ * Side effects: If this function returns 0, then the command buffer represented
+ * by @header will have been modified.
*/
static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
struct vmw_cmdbuf_header *header,
@@ -3709,8 +3503,8 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
{
u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
SVGA3D_INVALID_ID);
- void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
- id, false, header);
+ void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
+ header);
vmw_apply_relocations(sw_context);
vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
@@ -3730,22 +3524,23 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
* @header: Out parameter returning the opaque pointer to the command buffer.
*
* This function checks whether we can use the command buffer manager for
- * submission and if so, creates a command buffer of suitable size and
- * copies the user data into that buffer.
+ * submission and if so, creates a command buffer of suitable size and copies
+ * the user data into that buffer.
*
* On successful return, the function returns a pointer to the data in the
* command buffer and *@header is set to non-NULL.
- * If command buffers could not be used, the function will return the value
- * of @kernel_commands on function call. That value may be NULL. In that case,
- * the value of *@header will be set to NULL.
+ *
+ * If command buffers could not be used, the function will return the value of
+ * @kernel_commands on function call. That value may be NULL. In that case, the
+ * value of *@header will be set to NULL.
+ *
* If an error is encountered, the function will return a pointer error value.
* If the function is interrupted by a signal while sleeping, it will return
* -ERESTARTSYS casted to a pointer error value.
*/
static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
void __user *user_commands,
- void *kernel_commands,
- u32 command_size,
+ void *kernel_commands, u32 command_size,
struct vmw_cmdbuf_header **header)
{
size_t cmdbuf_size;
@@ -3753,7 +3548,7 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
*header = NULL;
if (command_size > SVGA_CB_MAX_SIZE) {
- DRM_ERROR("Command buffer is too large.\n");
+ VMW_DEBUG_USER("Command buffer is too large.\n");
return ERR_PTR(-EINVAL);
}
@@ -3763,15 +3558,14 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
/* If possible, add a little space for fencing. */
cmdbuf_size = command_size + 512;
cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
- kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
- true, header);
+ kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
+ header);
if (IS_ERR(kernel_commands))
return kernel_commands;
- ret = copy_from_user(kernel_commands, user_commands,
- command_size);
+ ret = copy_from_user(kernel_commands, user_commands, command_size);
if (ret) {
- DRM_ERROR("Failed copying commands.\n");
+ VMW_DEBUG_USER("Failed copying commands.\n");
vmw_cmdbuf_header_free(*header);
*header = NULL;
return ERR_PTR(-EFAULT);
@@ -3799,13 +3593,13 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
res = vmw_user_resource_noref_lookup_handle
(dev_priv, sw_context->fp->tfile, handle,
user_context_converter);
- if (unlikely(IS_ERR(res))) {
- DRM_ERROR("Could not find or user DX context 0x%08x.\n",
- (unsigned) handle);
+ if (IS_ERR(res)) {
+ VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
+ (unsigned int) handle);
return PTR_ERR(res);
}
- ret = vmw_execbuf_res_noref_val_add(sw_context, res);
+ ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
if (unlikely(ret != 0))
return ret;
@@ -3817,19 +3611,16 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
- void __user *user_commands,
- void *kernel_commands,
- uint32_t command_size,
- uint64_t throttle_us,
+ void __user *user_commands, void *kernel_commands,
+ uint32_t command_size, uint64_t throttle_us,
uint32_t dx_context_handle,
struct drm_vmw_fence_rep __user *user_fence_rep,
- struct vmw_fence_obj **out_fence,
- uint32_t flags)
+ struct vmw_fence_obj **out_fence, uint32_t flags)
{
struct vmw_sw_context *sw_context = &dev_priv->ctx;
struct vmw_fence_obj *fence = NULL;
struct vmw_cmdbuf_header *header;
- uint32_t handle;
+ uint32_t handle = 0;
int ret;
int32_t out_fence_fd = -1;
struct sync_file *sync_file = NULL;
@@ -3840,7 +3631,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
- DRM_ERROR("Failed to get a fence file descriptor.\n");
+ VMW_DEBUG_USER("Failed to get a fence fd.\n");
return out_fence_fd;
}
}
@@ -3873,18 +3664,18 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (unlikely(ret != 0))
goto out_unlock;
-
- ret = copy_from_user(sw_context->cmd_bounce,
- user_commands, command_size);
-
+ ret = copy_from_user(sw_context->cmd_bounce, user_commands,
+ command_size);
if (unlikely(ret != 0)) {
ret = -EFAULT;
- DRM_ERROR("Failed copying commands.\n");
+ VMW_DEBUG_USER("Failed copying commands.\n");
goto out_unlock;
}
+
kernel_commands = sw_context->cmd_bounce;
- } else if (!header)
+ } else if (!header) {
sw_context->kernel = true;
+ }
sw_context->fp = vmw_fpriv(file_priv);
INIT_LIST_HEAD(&sw_context->ctx_list);
@@ -3897,6 +3688,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
INIT_LIST_HEAD(&sw_context->res_relocations);
INIT_LIST_HEAD(&sw_context->bo_relocations);
+
if (sw_context->staged_bindings)
vmw_binding_state_reset(sw_context->staged_bindings);
@@ -3904,8 +3696,10 @@ int vmw_execbuf_process(struct drm_file *file_priv,
ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
if (unlikely(ret != 0))
goto out_unlock;
+
sw_context->res_ht_initialized = true;
}
+
INIT_LIST_HEAD(&sw_context->staged_cmd_res);
sw_context->ctx = &val_ctx;
ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
@@ -3932,6 +3726,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
ret = vmw_validation_res_validate(&val_ctx, true);
if (unlikely(ret != 0))
goto out_err;
+
vmw_validation_drop_ht(&val_ctx);
ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
@@ -3959,17 +3754,15 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_err;
vmw_query_bo_switch_commit(dev_priv, sw_context);
- ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
- &fence,
+ ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
(user_fence_rep) ? &handle : NULL);
/*
* This error is harmless, because if fence submission fails,
* vmw_fifo_send_fence will sync. The error will be propagated to
* user-space in @fence_rep
*/
-
if (ret != 0)
- DRM_ERROR("Fence submission error. Syncing.\n");
+ VMW_DEBUG_USER("Fence submission error. Syncing.\n");
vmw_execbuf_bindings_commit(sw_context, false);
vmw_bind_dx_query_mob(sw_context);
@@ -3977,21 +3770,19 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_validation_bo_fence(sw_context->ctx, fence);
- if (unlikely(dev_priv->pinned_bo != NULL &&
- !dev_priv->query_cid_valid))
+ if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
__vmw_execbuf_release_pinned_bo(dev_priv, fence);
/*
- * If anything fails here, give up trying to export the fence
- * and do a sync since the user mode will not be able to sync
- * the fence itself. This ensures we are still functionally
- * correct.
+ * If anything fails here, give up trying to export the fence and do a
+ * sync since the user mode will not be able to sync the fence itself.
+ * This ensures we are still functionally correct.
*/
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
sync_file = sync_file_create(&fence->base);
if (!sync_file) {
- DRM_ERROR("Unable to create sync file for fence\n");
+ VMW_DEBUG_USER("Sync file create failed for fence\n");
put_unused_fd(out_fence_fd);
out_fence_fd = -1;
@@ -4004,8 +3795,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
}
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
- user_fence_rep, fence, handle,
- out_fence_fd, sync_file);
+ user_fence_rep, fence, handle, out_fence_fd,
+ sync_file);
/* Don't unreference when handing fence out */
if (unlikely(out_fence != NULL)) {
@@ -4019,8 +3810,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
- * Unreference resources outside of the cmdbuf_mutex to
- * avoid deadlocks in resource destruction paths.
+ * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
+ * in resource destruction paths.
*/
vmw_validation_unref_lists(&val_ctx);
@@ -4035,8 +3826,7 @@ out_err_nores:
vmw_validation_res_unreserve(&val_ctx, true);
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context);
- if (unlikely(dev_priv->pinned_bo != NULL &&
- !dev_priv->query_cid_valid))
+ if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
out_unlock:
vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
@@ -4045,8 +3835,8 @@ out_unlock:
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
- * Unreference resources outside of the cmdbuf_mutex to
- * avoid deadlocks in resource destruction paths.
+ * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
+ * in resource destruction paths.
*/
vmw_validation_unref_lists(&val_ctx);
out_free_header:
@@ -4064,13 +3854,13 @@ out_free_fence_fd:
*
* @dev_priv: The device private structure.
*
- * This function is called to idle the fifo and unpin the query buffer
- * if the normal way to do this hits an error, which should typically be
- * extremely rare.
+ * This function is called to idle the fifo and unpin the query buffer if the
+ * normal way to do this hits an error, which should typically be extremely
+ * rare.
*/
static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
{
- DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
+ VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
@@ -4082,28 +3872,27 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
/**
- * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
- * query bo.
+ * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
+ * bo.
*
* @dev_priv: The device private structure.
- * @fence: If non-NULL should point to a struct vmw_fence_obj issued
- * _after_ a query barrier that flushes all queries touching the current
- * buffer pointed to by @dev_priv->pinned_bo
+ * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
+ * query barrier that flushes all queries touching the current buffer pointed to
+ * by @dev_priv->pinned_bo
*
- * This function should be used to unpin the pinned query bo, or
- * as a query barrier when we need to make sure that all queries have
- * finished before the next fifo command. (For example on hardware
- * context destructions where the hardware may otherwise leak unfinished
- * queries).
+ * This function should be used to unpin the pinned query bo, or as a query
+ * barrier when we need to make sure that all queries have finished before the
+ * next fifo command. (For example on hardware context destructions where the
+ * hardware may otherwise leak unfinished queries).
*
- * This function does not return any failure codes, but make attempts
- * to do safe unpinning in case of errors.
+ * This function does not return any failure codes, but make attempts to do safe
+ * unpinning in case of errors.
*
- * The function will synchronize on the previous query barrier, and will
- * thus not finish until that barrier has executed.
+ * The function will synchronize on the previous query barrier, and will thus
+ * not finish until that barrier has executed.
*
- * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
- * before calling this function.
+ * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
+ * calling this function.
*/
void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
struct vmw_fence_obj *fence)
@@ -4153,35 +3942,32 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
vmw_validation_unref_lists(&val_ctx);
vmw_bo_unreference(&dev_priv->pinned_bo);
+
out_unlock:
return;
-
out_no_emit:
vmw_validation_bo_backoff(&val_ctx);
out_no_reserve:
vmw_validation_unref_lists(&val_ctx);
vmw_execbuf_unpin_panic(dev_priv);
vmw_bo_unreference(&dev_priv->pinned_bo);
-
}
/**
- * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
- * query bo.
+ * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
*
* @dev_priv: The device private structure.
*
- * This function should be used to unpin the pinned query bo, or
- * as a query barrier when we need to make sure that all queries have
- * finished before the next fifo command. (For example on hardware
- * context destructions where the hardware may otherwise leak unfinished
- * queries).
+ * This function should be used to unpin the pinned query bo, or as a query
+ * barrier when we need to make sure that all queries have finished before the
+ * next fifo command. (For example on hardware context destructions where the
+ * hardware may otherwise leak unfinished queries).
*
- * This function does not return any failure codes, but make attempts
- * to do safe unpinning in case of errors.
+ * This function does not return any failure codes, but make attempts to do safe
+ * unpinning in case of errors.
*
- * The function will synchronize on the previous query barrier, and will
- * thus not finish until that barrier has executed.
+ * The function will synchronize on the previous query barrier, and will thus
+ * not finish until that barrier has executed.
*/
void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
{
@@ -4203,8 +3989,8 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
struct dma_fence *in_fence = NULL;
if (unlikely(size < copy_offset[0])) {
- DRM_ERROR("Invalid command size, ioctl %d\n",
- DRM_VMW_EXECBUF);
+ VMW_DEBUG_USER("Invalid command size, ioctl %d\n",
+ DRM_VMW_EXECBUF);
return -EINVAL;
}
@@ -4212,23 +3998,19 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
return -EFAULT;
/*
- * Extend the ioctl argument while
- * maintaining backwards compatibility:
- * We take different code paths depending on the value of
- * arg.version.
+ * Extend the ioctl argument while maintaining backwards compatibility:
+ * We take different code paths depending on the value of arg.version.
*/
-
if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
arg.version == 0)) {
- DRM_ERROR("Incorrect execbuf version.\n");
+ VMW_DEBUG_USER("Incorrect execbuf version.\n");
return -EINVAL;
}
if (arg.version > 1 &&
copy_from_user(&arg.context_handle,
(void __user *) (data + copy_offset[0]),
- copy_offset[arg.version - 1] -
- copy_offset[0]) != 0)
+ copy_offset[arg.version - 1] - copy_offset[0]) != 0)
return -EFAULT;
switch (arg.version) {
@@ -4240,13 +4022,12 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
break;
}
-
/* If imported a fence FD from elsewhere, then wait on it */
if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
in_fence = sync_file_get_fence(arg.imported_fence_fd);
if (!in_fence) {
- DRM_ERROR("Cannot get imported fence\n");
+ VMW_DEBUG_USER("Cannot get imported fence\n");
return -EINVAL;
}
@@ -4264,8 +4045,8 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
NULL, arg.command_size, arg.throttle_us,
arg.context_handle,
(void __user *)(unsigned long)arg.fence_rep,
- NULL,
- arg.flags);
+ NULL, arg.flags);
+
ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0))
goto out;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 2a9112515f46..972e8fda6d35 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -642,12 +642,11 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
struct vmw_fb_par *par;
struct fb_info *info;
unsigned fb_width, fb_height;
- unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
+ unsigned int fb_bpp, fb_pitch, fb_size;
struct drm_display_mode *init_mode;
int ret;
fb_bpp = 32;
- fb_depth = 24;
/* XXX As shouldn't these be as well. */
fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
@@ -655,7 +654,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
fb_pitch = fb_width * fb_bpp / 8;
fb_size = fb_pitch * fb_height;
- fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
info = framebuffer_alloc(sizeof(*par), device);
if (!info)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index d0fd147ef75f..ff3586cb6851 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -395,12 +395,8 @@ void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
WARN(1, "Command buffer has not been allocated.\n");
ret = NULL;
}
- if (IS_ERR_OR_NULL(ret)) {
- DRM_ERROR("Fifo reserve failure of %u bytes.\n",
- (unsigned) bytes);
- dump_stack();
+ if (IS_ERR_OR_NULL(ret))
return NULL;
- }
return ret;
}
@@ -544,7 +540,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
int ret = 0;
uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
- fm = vmw_fifo_reserve(dev_priv, bytes);
+ fm = VMW_FIFO_RESERVE(dev_priv, bytes);
if (unlikely(fm == NULL)) {
*seqno = atomic_read(&dev_priv->marker_seq);
ret = -ENOMEM;
@@ -603,12 +599,9 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
SVGA3dCmdWaitForQuery body;
} *cmd;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Out of fifo space for dummy query.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
cmd->header.size = sizeof(cmd->body);
@@ -652,12 +645,9 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
SVGA3dCmdWaitForGBQuery body;
} *cmd;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Out of fifo space for dummy query.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
cmd->header.size = sizeof(cmd->body);
@@ -699,8 +689,3 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
}
-
-void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
-{
- return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID);
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 007a0cc7f232..ae7acc6f3dda 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -51,7 +51,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
uint32_t cmd_size = define_size + remap_size;
uint32_t i;
- cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size);
+ cmd_orig = cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -110,11 +110,10 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
uint32_t define_size = sizeof(define_cmd) + 4;
uint32_t *cmd;
- cmd = vmw_fifo_reserve(dev_priv, define_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("GMR2 unbind failed.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, define_size);
+ if (unlikely(cmd == NULL))
return;
- }
+
define_cmd.gmrId = gmr_id;
define_cmd.numPages = 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 172a6ba6539c..a15375eb476e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -188,7 +188,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
- DRM_ERROR("Illegal GET_3D_CAP argument.\n");
+ VMW_DEBUG_USER("Illegal GET_3D_CAP argument.\n");
return -EINVAL;
}
@@ -268,7 +268,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
return 0;
if (clips_ptr == NULL) {
- DRM_ERROR("Variable clips_ptr must be specified.\n");
+ VMW_DEBUG_USER("Variable clips_ptr must be specified.\n");
ret = -EINVAL;
goto out_clips;
}
@@ -291,7 +291,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
if (!fb) {
- DRM_ERROR("Invalid framebuffer id.\n");
+ VMW_DEBUG_USER("Invalid framebuffer id.\n");
ret = -ENOENT;
goto out_no_fb;
}
@@ -351,7 +351,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
return 0;
if (clips_ptr == NULL) {
- DRM_ERROR("Argument clips_ptr must be specified.\n");
+ VMW_DEBUG_USER("Argument clips_ptr must be specified.\n");
ret = -EINVAL;
goto out_clips;
}
@@ -374,14 +374,14 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
if (!fb) {
- DRM_ERROR("Invalid framebuffer id.\n");
+ VMW_DEBUG_USER("Invalid framebuffer id.\n");
ret = -ENOENT;
goto out_no_fb;
}
vfb = vmw_framebuffer_to_vfb(fb);
if (!vfb->bo) {
- DRM_ERROR("Framebuffer not buffer backed.\n");
+ VMW_DEBUG_USER("Framebuffer not buffer backed.\n");
ret = -EINVAL;
goto out_no_ttm_lock;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ed2f67822f45..b97bc8e5944b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -64,11 +64,9 @@ static int vmw_cursor_update_image(struct vmw_private *dev_priv,
if (!image)
return -EINVAL;
- cmd = vmw_fifo_reserve(dev_priv, cmd_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
memset(cmd, 0, sizeof(*cmd));
@@ -1202,7 +1200,7 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
vmw_bo_unreference(&res->backup);
res->backup = vmw_bo_reference(bo_mob);
res->backup_offset = 0;
- vmw_resource_unreserve(res, false, NULL, 0);
+ vmw_resource_unreserve(res, false, false, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
return 0;
@@ -2468,13 +2466,11 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
dirty->unit = unit;
if (dirty->fifo_reserve_size > 0) {
- dirty->cmd = vmw_fifo_reserve(dev_priv,
+ dirty->cmd = VMW_FIFO_RESERVE(dev_priv,
dirty->fifo_reserve_size);
- if (!dirty->cmd) {
- DRM_ERROR("Couldn't reserve fifo space "
- "for dirty blits.\n");
+ if (!dirty->cmd)
return -ENOMEM;
- }
+
memset(dirty->cmd, 0, dirty->fifo_reserve_size);
}
dirty->num_hits = 0;
@@ -2604,12 +2600,9 @@ int vmw_kms_update_proxy(struct vmw_resource *res,
if (!clips)
return 0;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
- if (!cmd) {
- DRM_ERROR("Couldn't reserve fifo space for proxy surface "
- "update.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
+ if (!cmd)
return -ENOMEM;
- }
for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
box = &cmd->body.box;
@@ -2827,7 +2820,8 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
container_of(update->vfb, typeof(*vfbs), base);
ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
- 0, NULL, NULL);
+ 0, VMW_RES_DIRTY_NONE, NULL,
+ NULL);
}
if (ret)
@@ -2838,7 +2832,7 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
goto out_unref;
reserved_size = update->calc_fifo_size(update, num_hits);
- cmd_start = vmw_fifo_reserve(update->dev_priv, reserved_size);
+ cmd_start = VMW_FIFO_RESERVE(update->dev_priv, reserved_size);
if (!cmd_start) {
ret = -ENOMEM;
goto out_revert;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 16be515c4c0f..25e6343bcf21 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -554,11 +554,9 @@ int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
} *cmd;
fifo_size = sizeof(*cmd) * num_clips;
- cmd = vmw_fifo_reserve(dev_priv, fifo_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
memset(cmd, 0, fifo_size);
for (i = 0; i < num_clips; i++, clips += increment) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index d83cc66e1210..406edc8cef35 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -146,9 +146,8 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
}
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
@@ -202,12 +201,9 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
return;
bo = otable->page_table->pt_bo;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for OTable "
- "takedown.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return;
- }
memset(cmd, 0, sizeof(*cmd));
cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
@@ -614,16 +610,14 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
BUG_ON(ret != 0);
}
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for Memory "
- "Object unbinding.\n");
- } else {
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (cmd) {
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
cmd->header.size = sizeof(cmd->body);
cmd->body.mobid = mob->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
}
+
if (bo) {
vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo);
@@ -683,12 +677,9 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
vmw_fifo_resource_inc(dev_priv);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for Memory "
- "Object binding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
goto out_no_cmd_space;
- }
cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64;
cmd->header.size = sizeof(cmd->body);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 9f1b9d289bec..d5ef8cf802de 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -124,7 +124,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;
- cmds = vmw_fifo_reserve(dev_priv, fifo_size);
+ cmds = VMW_FIFO_RESERVE(dev_priv, fifo_size);
/* hardware has hung, can't do anything here */
if (!cmds)
return -ENOMEM;
@@ -194,7 +194,7 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
int ret;
for (;;) {
- cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
+ cmds = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmds));
if (cmds)
break;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index a7c30e567f09..711f8fd0dd45 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -365,14 +365,6 @@ static int vmw_resource_do_validate(struct vmw_resource *res,
list_add_tail(&res->mob_head, &res->backup->res_list);
}
- /*
- * Only do this on write operations, and move to
- * vmw_resource_unreserve if it can be called after
- * backup buffers have been unreserved. Otherwise
- * sort out locking.
- */
- res->res_dirty = true;
-
return 0;
out_bind_failed:
@@ -386,6 +378,8 @@ out_bind_failed:
* command submission.
*
* @res: Pointer to the struct vmw_resource to unreserve.
+ * @dirty_set: Change dirty status of the resource.
+ * @dirty: When changing dirty status indicates the new status.
* @switch_backup: Backup buffer has been switched.
* @new_backup: Pointer to new backup buffer if command submission
* switched. May be NULL.
@@ -395,6 +389,8 @@ out_bind_failed:
* resource lru list, so that it can be evicted if necessary.
*/
void vmw_resource_unreserve(struct vmw_resource *res,
+ bool dirty_set,
+ bool dirty,
bool switch_backup,
struct vmw_buffer_object *new_backup,
unsigned long new_backup_offset)
@@ -422,6 +418,9 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (switch_backup)
res->backup_offset = new_backup_offset;
+ if (dirty_set)
+ res->res_dirty = dirty;
+
if (!res->func->may_evict || res->id == -1 || res->pin_count)
return;
@@ -696,7 +695,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
if (!res->func->unbind)
continue;
- (void) res->func->unbind(res, true, &val_buf);
+ (void) res->func->unbind(res, res->res_dirty, &val_buf);
res->backup_dirty = true;
res->res_dirty = false;
list_del_init(&res->mob_head);
@@ -731,12 +730,9 @@ int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
dx_query_ctx = dx_query_mob->dx_query_ctx;
dev_priv = dx_query_ctx->dev_priv;
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for "
- "query MOB read back.\n");
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
cmd->header.size = sizeof(cmd->body);
@@ -932,7 +928,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
res->pin_count++;
out_no_validate:
- vmw_resource_unreserve(res, false, NULL, 0UL);
+ vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
out_no_reserve:
mutex_unlock(&dev_priv->cmdbuf_mutex);
ttm_write_unlock(&dev_priv->reservation_sem);
@@ -968,7 +964,7 @@ void vmw_resource_unpin(struct vmw_resource *res)
ttm_bo_unreserve(&vbo->base);
}
- vmw_resource_unreserve(res, false, NULL, 0UL);
+ vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
mutex_unlock(&dev_priv->cmdbuf_mutex);
ttm_read_unlock(&dev_priv->reservation_sem);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index cd586c52af7e..9a2a3836d89a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -130,12 +130,9 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
BUG_ON(!sou->buffer);
fifo_size = sizeof(*cmd);
- cmd = vmw_fifo_reserve(dev_priv, fifo_size);
- /* The hardware has hung, nothing we can do about it here. */
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
memset(cmd, 0, fifo_size);
cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN;
@@ -182,12 +179,9 @@ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
return 0;
fifo_size = sizeof(*cmd);
- cmd = vmw_fifo_reserve(dev_priv, fifo_size);
- /* the hardware has hung, nothing we can do about it here */
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
memset(cmd, 0, fifo_size);
cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
@@ -998,11 +992,9 @@ static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
if (depth == 32)
depth = 24;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (!cmd) {
- DRM_ERROR("Out of fifo space for dirty framebuffer command.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (!cmd)
return -ENOMEM;
- }
cmd->header = SVGA_CMD_DEFINE_GMRFB;
cmd->body.format.bitsPerPixel = framebuffer->base.format->cpp[0] * 8;
@@ -1148,7 +1140,8 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
if (!srf)
srf = &vfbs->surface->res;
- ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
+ ret = vmw_validation_add_resource(&val_ctx, srf, 0, VMW_RES_DIRTY_NONE,
+ NULL, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index bf32fe446219..d310d21f0d54 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -218,10 +218,8 @@ static int vmw_gb_shader_create(struct vmw_resource *res)
goto out_no_fifo;
}
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "creation.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
@@ -254,12 +252,9 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "binding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
cmd->header.size = sizeof(cmd->body);
@@ -285,12 +280,9 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "unbinding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
cmd->header.size = sizeof(cmd->body);
@@ -328,10 +320,8 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
mutex_lock(&dev_priv->binding_mutex);
vmw_binding_res_list_scrub(&res->binding_head);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "destruction.\n");
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
}
@@ -400,13 +390,9 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res)
if (!list_empty(&shader->cotable_head) || !shader->committed)
return 0;
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
- shader->ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "scrubbing.\n");
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), shader->ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
cmd->header.size = sizeof(cmd->body);
@@ -491,12 +477,9 @@ static int vmw_dx_shader_scrub(struct vmw_resource *res)
return 0;
WARN_ON_ONCE(!shader->committed);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "scrubbing.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
cmd->header.size = sizeof(cmd->body);
@@ -865,14 +848,13 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
ret = vmw_user_bo_lookup(tfile, buffer_handle,
&buffer, NULL);
if (unlikely(ret != 0)) {
- DRM_ERROR("Could not find buffer for shader "
- "creation.\n");
+ VMW_DEBUG_USER("Couldn't find buffer for shader creation.\n");
return ret;
}
if ((u64)buffer->base.num_pages * PAGE_SIZE <
(u64)size + (u64)offset) {
- DRM_ERROR("Illegal buffer- or shader size.\n");
+ VMW_DEBUG_USER("Illegal buffer- or shader size.\n");
ret = -EINVAL;
goto out_bad_arg;
}
@@ -886,7 +868,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
shader_type = SVGA3D_SHADERTYPE_PS;
break;
default:
- DRM_ERROR("Illegal shader type.\n");
+ VMW_DEBUG_USER("Illegal shader type.\n");
ret = -EINVAL;
goto out_bad_arg;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
index 6a6865384e91..73e9a487e659 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
@@ -239,17 +239,17 @@ vmw_simple_resource_lookup(struct ttm_object_file *tfile,
base = ttm_base_object_lookup(tfile, handle);
if (!base) {
- DRM_ERROR("Invalid %s handle 0x%08lx.\n",
- func->res_func.type_name,
- (unsigned long) handle);
+ VMW_DEBUG_USER("Invalid %s handle 0x%08lx.\n",
+ func->res_func.type_name,
+ (unsigned long) handle);
return ERR_PTR(-ESRCH);
}
if (ttm_base_object_type(base) != func->ttm_res_type) {
ttm_base_object_unref(&base);
- DRM_ERROR("Invalid type of %s handle 0x%08lx.\n",
- func->res_func.type_name,
- (unsigned long) handle);
+ VMW_DEBUG_USER("Invalid type of %s handle 0x%08lx.\n",
+ func->res_func.type_name,
+ (unsigned long) handle);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index bc8bb690f1ea..63807361e16f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -170,13 +170,12 @@ static int vmw_view_create(struct vmw_resource *res)
return 0;
}
- cmd = vmw_fifo_reserve_dx(res->dev_priv, view->cmd_size,
- view->ctx->id);
+ cmd = VMW_FIFO_RESERVE_DX(res->dev_priv, view->cmd_size, view->ctx->id);
if (!cmd) {
- DRM_ERROR("Failed reserving FIFO space for view creation.\n");
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
}
+
memcpy(cmd, &view->cmd, view->cmd_size);
WARN_ON(cmd->body.view_id != view->view_id);
/* Sid may have changed due to surface eviction. */
@@ -214,12 +213,9 @@ static int vmw_view_destroy(struct vmw_resource *res)
if (!view->committed || res->id == -1)
return 0;
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), view->ctx->id);
- if (!cmd) {
- DRM_ERROR("Failed reserving FIFO space for view "
- "destruction.\n");
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), view->ctx->id);
+ if (!cmd)
return -ENOMEM;
- }
cmd->header.id = vmw_view_destroy_cmds[view->view_type];
cmd->header.size = sizeof(cmd->body);
@@ -338,12 +334,12 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
if (cmd_size != vmw_view_define_sizes[view_type] +
sizeof(SVGA3dCmdHeader)) {
- DRM_ERROR("Illegal view create command size.\n");
+ VMW_DEBUG_USER("Illegal view create command size.\n");
return -EINVAL;
}
if (!vmw_view_id_ok(user_key, view_type)) {
- DRM_ERROR("Illegal view add view id.\n");
+ VMW_DEBUG_USER("Illegal view add view id.\n");
return -EINVAL;
}
@@ -352,8 +348,7 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ttm_opt_ctx);
if (ret) {
if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for view"
- " creation.\n");
+ DRM_ERROR("Out of graphics memory for view creation\n");
return ret;
}
@@ -413,7 +408,7 @@ int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
struct vmw_resource **res_p)
{
if (!vmw_view_id_ok(user_key, view_type)) {
- DRM_ERROR("Illegal view remove view id.\n");
+ VMW_DEBUG_USER("Illegal view remove view id.\n");
return -EINVAL;
}
@@ -497,6 +492,30 @@ struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
vmw_view_key(user_key, view_type));
}
+/**
+ * vmw_view_dirtying - Return whether a view type is dirtying its resource
+ * @res: Pointer to the view
+ *
+ * Each time a resource is put on the validation list as the result of a
+ * view pointing to it, we need to determine whether that resource will
+ * be dirtied (written to by the GPU) as a result of the corresponding
+ * GPU operation. Currently only rendertarget- and depth-stencil views are
+ * capable of dirtying its resource.
+ *
+ * Return: Whether the view type of @res dirties the resource it points to.
+ */
+u32 vmw_view_dirtying(struct vmw_resource *res)
+{
+ static u32 view_is_dirtying[vmw_view_max] = {
+ [vmw_view_rt] = VMW_RES_DIRTY_SET,
+ [vmw_view_ds] = VMW_RES_DIRTY_SET,
+ };
+
+ /* Update this function as we add more view types */
+ BUILD_BUG_ON(vmw_view_max != 3);
+ return view_is_dirtying[vmw_view(res)->view_type];
+}
+
const u32 vmw_view_destroy_cmds[] = {
[vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
[vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
index b80c7252f2fd..12565047bc55 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
@@ -157,4 +157,5 @@ extern struct vmw_resource *vmw_view_srf(struct vmw_resource *res);
extern struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
enum vmw_view_type view_type,
u32 user_key);
+extern u32 vmw_view_dirtying(struct vmw_resource *res);
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 096c2941a8e4..f803bb5e782b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -111,7 +111,7 @@ struct vmw_stdu_update_gb_image {
*/
struct vmw_screen_target_display_unit {
struct vmw_display_unit base;
- const struct vmw_surface *display_srf;
+ struct vmw_surface *display_srf;
enum stdu_content_type content_fb_type;
s32 display_width, display_height;
@@ -167,12 +167,9 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv,
SVGA3dCmdDefineGBScreenTarget body;
} *cmd;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Out of FIFO space defining Screen Target\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SCREENTARGET;
cmd->header.size = sizeof(cmd->body);
@@ -229,12 +226,9 @@ static int vmw_stdu_bind_st(struct vmw_private *dev_priv,
memset(&image, 0, sizeof(image));
image.sid = res ? res->id : SVGA3D_INVALID_ID;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Out of FIFO space binding a screen target\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_BIND_GB_SCREENTARGET;
cmd->header.size = sizeof(cmd->body);
@@ -296,12 +290,9 @@ static int vmw_stdu_update_st(struct vmw_private *dev_priv,
return -EINVAL;
}
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Out of FIFO space updating a Screen Target\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
vmw_stdu_populate_update(cmd, stdu->base.unit,
0, stdu->display_width,
@@ -335,12 +326,9 @@ static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
if (unlikely(!stdu->defined))
return 0;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Out of FIFO space, screen target not destroyed\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SCREENTARGET;
cmd->header.size = sizeof(cmd->body);
@@ -533,6 +521,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
vmw_fifo_commit(dirty->dev_priv, sizeof(*cmd) + blit_size);
+ stdu->display_srf->res.res_dirty = true;
ddirty->left = ddirty->top = S32_MAX;
ddirty->right = ddirty->bottom = S32_MIN;
}
@@ -629,20 +618,16 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
region.x2 = diff.rect.x2;
region.y1 = diff.rect.y1;
region.y2 = diff.rect.y2;
- ret = vmw_kms_update_proxy(
- (struct vmw_resource *) &stdu->display_srf->res,
- (const struct drm_clip_rect *) &region, 1, 1);
+ ret = vmw_kms_update_proxy(&stdu->display_srf->res, &region,
+ 1, 1);
if (ret)
goto out_cleanup;
dev_priv = vmw_priv(stdu->base.crtc.dev);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- if (!cmd) {
- DRM_ERROR("Cannot reserve FIFO space to update STDU");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (!cmd)
goto out_cleanup;
- }
vmw_stdu_populate_update(cmd, stdu->base.unit,
region.x1, region.x2,
@@ -820,6 +805,7 @@ static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
cmd->body.dest.sid = stdu->display_srf->res.id;
update = (struct vmw_stdu_update *) &blit[dirty->num_hits];
commit_size = sizeof(*cmd) + blit_size + sizeof(*update);
+ stdu->display_srf->res.res_dirty = true;
} else {
update = dirty->cmd;
commit_size = sizeof(*update);
@@ -876,7 +862,8 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
if (!srf)
srf = &vfbs->surface->res;
- ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
+ ret = vmw_validation_add_resource(&val_ctx, srf, 0, VMW_RES_DIRTY_NONE,
+ NULL, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index ef09f7edf931..219471903bc1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -342,12 +342,9 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
if (res->id != -1) {
- cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
- if (unlikely(!cmd)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "destruction.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, vmw_surface_destroy_size());
+ if (unlikely(!cmd))
return;
- }
vmw_surface_destroy_encode(res->id, cmd);
vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
@@ -414,10 +411,8 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
*/
submit_size = vmw_surface_define_size(srf);
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "creation.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
@@ -468,12 +463,10 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
BUG_ON(!val_buf->bo);
submit_size = vmw_surface_dma_size(srf);
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(!cmd)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "DMA.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ if (unlikely(!cmd))
return -ENOMEM;
- }
+
vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
vmw_surface_dma_encode(srf, cmd, &ptr, bind);
@@ -556,12 +549,9 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res)
*/
submit_size = vmw_surface_destroy_size();
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(!cmd)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "eviction.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ if (unlikely(!cmd))
return -ENOMEM;
- }
vmw_surface_destroy_encode(res->id, cmd);
vmw_fifo_commit(dev_priv, submit_size);
@@ -748,11 +738,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
-
desc = svga3dsurface_get_desc(req->format);
if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
- DRM_ERROR("Invalid surface format for surface creation.\n");
- DRM_ERROR("Format requested is: %d\n", req->format);
+ VMW_DEBUG_USER("Invalid format %d for surface creation.\n",
+ req->format);
return -EINVAL;
}
@@ -764,8 +753,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
size, &ctx);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for surface"
- " creation.\n");
+ DRM_ERROR("Out of graphics memory for surface.\n");
goto out_unlock;
}
@@ -939,12 +927,12 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
ret = -EINVAL;
base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
if (unlikely(!base)) {
- DRM_ERROR("Could not find surface to reference.\n");
+ VMW_DEBUG_USER("Could not find surface to reference.\n");
goto out_no_lookup;
}
if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
- DRM_ERROR("Referenced object is not a surface.\n");
+ VMW_DEBUG_USER("Referenced object is not a surface.\n");
goto out_bad_resource;
}
@@ -1022,8 +1010,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
ret = copy_to_user(user_sizes, &srf->base_size,
sizeof(srf->base_size));
if (unlikely(ret != 0)) {
- DRM_ERROR("copy_to_user failed %p %u\n",
- user_sizes, srf->num_sizes);
+ VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes,
+ srf->num_sizes);
ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE);
ret = -EFAULT;
}
@@ -1088,12 +1076,10 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
submit_len = sizeof(*cmd);
}
- cmd = vmw_fifo_reserve(dev_priv, submit_len);
+ cmd = VMW_FIFO_RESERVE(dev_priv, submit_len);
cmd2 = (typeof(cmd2))cmd;
cmd3 = (typeof(cmd3))cmd;
if (unlikely(!cmd)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "creation.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
@@ -1171,12 +1157,9 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
- cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(!cmd1)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "binding.\n");
+ cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ if (unlikely(!cmd1))
return -ENOMEM;
- }
cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
cmd1->header.size = sizeof(cmd1->body);
@@ -1221,12 +1204,9 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(!cmd)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "unbinding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ if (unlikely(!cmd))
return -ENOMEM;
- }
if (readback) {
cmd1 = (void *) cmd;
@@ -1280,10 +1260,8 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
vmw_binding_res_list_scrub(&res->binding_head);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(!cmd)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "destruction.\n");
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
}
@@ -1405,16 +1383,16 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
if (for_scanout) {
if (!svga3dsurface_is_screen_target_format(format)) {
- DRM_ERROR("Invalid Screen Target surface format.");
+ VMW_DEBUG_USER("Invalid Screen Target surface format.");
return -EINVAL;
}
if (size.width > dev_priv->texture_max_width ||
size.height > dev_priv->texture_max_height) {
- DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u",
- size.width, size.height,
- dev_priv->texture_max_width,
- dev_priv->texture_max_height);
+ VMW_DEBUG_USER("%ux%u\n, exceeds max surface size %ux%u",
+ size.width, size.height,
+ dev_priv->texture_max_width,
+ dev_priv->texture_max_height);
return -EINVAL;
}
} else {
@@ -1422,14 +1400,14 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
desc = svga3dsurface_get_desc(format);
if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
- DRM_ERROR("Invalid surface format.\n");
+ VMW_DEBUG_USER("Invalid surface format.\n");
return -EINVAL;
}
}
/* array_size must be null for non-GL3 host. */
if (array_size > 0 && !dev_priv->has_dx) {
- DRM_ERROR("Tried to create DX surface on non-DX host.\n");
+ VMW_DEBUG_USER("Tried to create DX surface on non-DX host.\n");
return -EINVAL;
}
@@ -1651,7 +1629,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
if (ret == 0) {
if (res->backup->base.num_pages * PAGE_SIZE <
res->backup_size) {
- DRM_ERROR("Surface backup buffer too small.\n");
+ VMW_DEBUG_USER("Surface backup buffer too small.\n");
vmw_bo_unreference(&res->backup);
ret = -EINVAL;
goto out_unlock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index e6d75e377dd8..8bafa6eac5a8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -30,16 +30,9 @@
int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct vmw_private *dev_priv;
+ struct drm_file *file_priv = filp->private_data;
+ struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
- if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) {
- DRM_ERROR("Illegal attempt to mmap old fifo space.\n");
- return -EINVAL;
- }
-
- file_priv = filp->private_data;
- dev_priv = vmw_priv(file_priv->minor->dev);
return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index e9944ac2e057..f611b2290a1b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -76,6 +76,8 @@ struct vmw_validation_res_node {
u32 switching_backup : 1;
u32 first_usage : 1;
u32 reserved : 1;
+ u32 dirty : 1;
+ u32 dirty_set : 1;
unsigned long private[0];
};
@@ -299,6 +301,7 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
* @ctx: The validation context.
* @res: The resource.
* @priv_size: Size of private, additional metadata.
+ * @dirty: Whether to change dirty status.
* @p_node: Output pointer of additional metadata address.
* @first_usage: Whether this was the first time this resource was seen.
*
@@ -307,6 +310,7 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
int vmw_validation_add_resource(struct vmw_validation_context *ctx,
struct vmw_resource *res,
size_t priv_size,
+ u32 dirty,
void **p_node,
bool *first_usage)
{
@@ -321,8 +325,7 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx,
node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
if (!node) {
- DRM_ERROR("Failed to allocate a resource validation "
- "entry.\n");
+ VMW_DEBUG_USER("Failed to allocate a resource validation entry.\n");
return -ENOMEM;
}
@@ -358,6 +361,11 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx,
}
out_fill:
+ if (dirty) {
+ node->dirty_set = 1;
+ /* Overwriting previous information here is intentional! */
+ node->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
+ }
if (first_usage)
*first_usage = node->first_usage;
if (p_node)
@@ -367,6 +375,29 @@ out_fill:
}
/**
+ * vmw_validation_res_set_dirty - Register a resource dirty set or clear during
+ * validation.
+ * @ctx: The validation context.
+ * @val_private: The additional meta-data pointer returned when the
+ * resource was registered with the validation context. Used to identify
+ * the resource.
+ * @dirty: Dirty information VMW_RES_DIRTY_XX
+ */
+void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
+ void *val_private, u32 dirty)
+{
+ struct vmw_validation_res_node *val;
+
+ if (!dirty)
+ return;
+
+ val = container_of(val_private, typeof(*val), private);
+ val->dirty_set = 1;
+ /* Overwriting previous information here is intentional! */
+ val->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
+}
+
+/**
* vmw_validation_res_switch_backup - Register a backup MOB switch during
* validation.
* @ctx: The validation context.
@@ -450,15 +481,23 @@ void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
struct vmw_validation_res_node *val;
list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
-
- list_for_each_entry(val, &ctx->resource_list, head) {
- if (val->reserved)
- vmw_resource_unreserve(val->res,
- !backoff &&
- val->switching_backup,
- val->new_backup,
- val->new_backup_offset);
- }
+ if (backoff)
+ list_for_each_entry(val, &ctx->resource_list, head) {
+ if (val->reserved)
+ vmw_resource_unreserve(val->res,
+ false, false, false,
+ NULL, 0);
+ }
+ else
+ list_for_each_entry(val, &ctx->resource_list, head) {
+ if (val->reserved)
+ vmw_resource_unreserve(val->res,
+ val->dirty_set,
+ val->dirty,
+ val->switching_backup,
+ val->new_backup,
+ val->new_backup_offset);
+ }
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index 3b396fea40d7..523f6ac5c335 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -33,6 +33,10 @@
#include <linux/ww_mutex.h>
#include <drm/ttm/ttm_execbuf_util.h>
+#define VMW_RES_DIRTY_NONE 0
+#define VMW_RES_DIRTY_SET BIT(0)
+#define VMW_RES_DIRTY_CLEAR BIT(1)
+
/**
* struct vmw_validation_mem - Custom interface to provide memory reservations
* for the validation code.
@@ -237,6 +241,7 @@ void vmw_validation_unref_lists(struct vmw_validation_context *ctx);
int vmw_validation_add_resource(struct vmw_validation_context *ctx,
struct vmw_resource *res,
size_t priv_size,
+ u32 dirty,
void **p_node,
bool *first_usage);
void vmw_validation_drop_ht(struct vmw_validation_context *ctx);
@@ -261,4 +266,6 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
int vmw_validation_preload_bo(struct vmw_validation_context *ctx);
int vmw_validation_preload_res(struct vmw_validation_context *ctx,
unsigned int size);
+void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
+ void *val_private, u32 dirty);
#endif
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 3e78a832d7f9..84aa4d61dc42 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -582,6 +582,7 @@ static void xen_drm_drv_fini(struct xen_drm_front_info *front_info)
drm_kms_helper_poll_fini(dev);
drm_dev_unplug(dev);
+ drm_dev_put(dev);
front_info->drm_info = NULL;
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 53c376d55fcf..a24548489dde 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -224,8 +224,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
static int gem_mmap_obj(struct xen_gem_object *xen_obj,
struct vm_area_struct *vma)
{
- unsigned long addr = vma->vm_start;
- int i;
+ int ret;
/*
* clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
@@ -252,18 +251,11 @@ static int gem_mmap_obj(struct xen_gem_object *xen_obj,
* FIXME: as we insert all the pages now then no .fault handler must
* be called, so don't provide one
*/
- for (i = 0; i < xen_obj->num_pages; i++) {
- int ret;
-
- ret = vm_insert_page(vma, addr, xen_obj->pages[i]);
- if (ret < 0) {
- DRM_ERROR("Failed to insert pages into vma: %d\n", ret);
- return ret;
- }
+ ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
+ if (ret < 0)
+ DRM_ERROR("Failed to map pages into vma: %d\n", ret);
- addr += PAGE_SIZE;
- }
- return 0;
+ return ret;
}
int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 4ca0cdfa6b33..c3c390ca3690 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -231,6 +231,16 @@ config HID_COUGAR
Supported devices:
- Cougar 500k Gaming Keyboard
+config HID_MACALLY
+ tristate "Macally devices"
+ depends on HID
+ help
+ Support for Macally devices that are not fully compliant with the
+ HID standard.
+
+ supported devices:
+ - Macally ikey keyboard
+
config HID_PRODIKEYS
tristate "Prodikeys PC-MIDI Keyboard support"
depends on HID && SND
@@ -511,6 +521,7 @@ config HID_LOGITECH
config HID_LOGITECH_DJ
tristate "Logitech Unifying receivers full support"
+ depends on USB_HID
depends on HIDRAW
depends on HID_LOGITECH
select HID_LOGITECH_HIDPP
@@ -1003,6 +1014,22 @@ config HID_UDRAW_PS3
Say Y here if you want to use the THQ uDraw gaming tablet for
the PS3.
+config HID_U2FZERO
+ tristate "U2F Zero LED and RNG support"
+ depends on USB_HID
+ depends on LEDS_CLASS
+ depends on HW_RANDOM
+ help
+ Support for the LED of the U2F Zero device.
+
+ U2F Zero supports custom commands for blinking the LED
+ and getting data from the internal hardware RNG.
+ The internal hardware can be used to feed the enthropy pool.
+
+ U2F Zero only supports blinking its LED, so this driver doesn't
+ allow setting the brightness to anything but 1, which will
+ trigger a single blink and immediately reset to back 0.
+
config HID_WACOM
tristate "Wacom Intuos/Graphire tablet support (USB)"
depends on USB_HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 170163b41303..cc5d827c9164 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_HID_LENOVO) += hid-lenovo.o
obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o
obj-$(CONFIG_HID_LOGITECH_HIDPP) += hid-logitech-hidpp.o
+obj-$(CONFIG_HID_MACALLY) += hid-macally.o
obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
obj-$(CONFIG_HID_MALTRON) += hid-maltron.o
obj-$(CONFIG_HID_MAYFLASH) += hid-mf.o
@@ -109,6 +110,7 @@ obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o
obj-$(CONFIG_HID_TIVO) += hid-tivo.o
obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o
+obj-$(CONFIG_HID_U2FZERO) += hid-u2fzero.o
hid-uclogic-objs := hid-uclogic-core.o \
hid-uclogic-rdesc.o \
hid-uclogic-params.o
@@ -134,3 +136,4 @@ obj-$(CONFIG_USB_KBD) += usbhid/
obj-$(CONFIG_I2C_HID) += i2c-hid/
obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-hid/
+obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ish-hid/
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 860e21ec6a49..92387fc0bf18 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -30,6 +30,7 @@
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include <linux/semaphore.h>
+#include <linux/async.h>
#include <linux/hid.h>
#include <linux/hiddev.h>
@@ -218,13 +219,14 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
* Add a usage to the temporary parser table.
*/
-static int hid_add_usage(struct hid_parser *parser, unsigned usage)
+static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
{
if (parser->local.usage_index >= HID_MAX_USAGES) {
hid_err(parser->device, "usage index exceeded\n");
return -1;
}
parser->local.usage[parser->local.usage_index] = usage;
+ parser->local.usage_size[parser->local.usage_index] = size;
parser->local.collection_index[parser->local.usage_index] =
parser->collection_stack_ptr ?
parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
@@ -486,10 +488,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
return 0;
}
- if (item->size <= 2)
- data = (parser->global.usage_page << 16) + data;
-
- return hid_add_usage(parser, data);
+ return hid_add_usage(parser, data, item->size);
case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
@@ -498,9 +497,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
return 0;
}
- if (item->size <= 2)
- data = (parser->global.usage_page << 16) + data;
-
parser->local.usage_minimum = data;
return 0;
@@ -511,9 +507,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
return 0;
}
- if (item->size <= 2)
- data = (parser->global.usage_page << 16) + data;
-
count = data - parser->local.usage_minimum;
if (count + parser->local.usage_index >= HID_MAX_USAGES) {
/*
@@ -533,7 +526,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
}
for (n = parser->local.usage_minimum; n <= data; n++)
- if (hid_add_usage(parser, n)) {
+ if (hid_add_usage(parser, n, item->size)) {
dbg_hid("hid_add_usage failed\n");
return -1;
}
@@ -548,6 +541,22 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
}
/*
+ * Concatenate Usage Pages into Usages where relevant:
+ * As per specification, 6.2.2.8: "When the parser encounters a main item it
+ * concatenates the last declared Usage Page with a Usage to form a complete
+ * usage value."
+ */
+
+static void hid_concatenate_usage_page(struct hid_parser *parser)
+{
+ int i;
+
+ for (i = 0; i < parser->local.usage_index; i++)
+ if (parser->local.usage_size[i] <= 2)
+ parser->local.usage[i] += parser->global.usage_page << 16;
+}
+
+/*
* Process a main item.
*/
@@ -556,6 +565,8 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
__u32 data;
int ret;
+ hid_concatenate_usage_page(parser);
+
data = item_udata(item);
switch (item->tag) {
@@ -765,6 +776,8 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
__u32 data;
int i;
+ hid_concatenate_usage_page(parser);
+
data = item_udata(item);
switch (item->tag) {
@@ -1624,7 +1637,7 @@ static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
* Implement a generic .request() callback, using .raw_request()
* DO NOT USE in hid drivers directly, but through hid_hw_request instead.
*/
-void __hid_request(struct hid_device *hid, struct hid_report *report,
+int __hid_request(struct hid_device *hid, struct hid_report *report,
int reqtype)
{
char *buf;
@@ -1633,7 +1646,7 @@ void __hid_request(struct hid_device *hid, struct hid_report *report,
buf = hid_alloc_report_buf(report, GFP_KERNEL);
if (!buf)
- return;
+ return -ENOMEM;
len = hid_report_len(report);
@@ -1650,8 +1663,11 @@ void __hid_request(struct hid_device *hid, struct hid_report *report,
if (reqtype == HID_REQ_GET_REPORT)
hid_input_report(hid, report->type, buf, ret, 0);
+ ret = 0;
+
out:
kfree(buf);
+ return ret;
}
EXPORT_SYMBOL_GPL(__hid_request);
@@ -2349,6 +2365,15 @@ int hid_add_device(struct hid_device *hdev)
dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
hdev->vendor, hdev->product, atomic_inc_return(&id));
+ /*
+ * Try loading the module for the device before the add, so that we do
+ * not first have hid-generic binding only to have it replaced
+ * immediately afterwards with a specialized driver.
+ */
+ if (!current_is_async())
+ request_module("hid:b%04Xg%04Xv%08Xp%08X", hdev->bus,
+ hdev->group, hdev->vendor, hdev->product);
+
hid_debug_register(hdev, dev_name(&hdev->dev));
ret = device_add(&hdev->dev);
if (!ret)
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index adce58f24f76..1c8c72093b5a 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -323,6 +323,7 @@
#define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a
#define USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH 0x81b9
#define USB_DEVICE_ID_CYGNAL_CP2112 0xea90
+#define USB_DEVICE_ID_U2F_ZERO 0x8acf
#define USB_DEVICE_ID_CYGNAL_RADIO_SI4713 0x8244
@@ -762,8 +763,12 @@
#define USB_DEVICE_ID_S510_RECEIVER_2 0xc517
#define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512
#define USB_DEVICE_ID_MX3000_RECEIVER 0xc513
+#define USB_DEVICE_ID_LOGITECH_27MHZ_MOUSE_RECEIVER 0xc51b
#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER 0xc52b
+#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER 0xc52f
#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2 0xc532
+#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534
+#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_GAMING 0xc539
#define USB_DEVICE_ID_SPACETRAVELLER 0xc623
#define USB_DEVICE_ID_SPACENAVIGATOR 0xc626
#define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704
@@ -1034,6 +1039,7 @@
#define USB_DEVICE_ID_SINO_LITE_CONTROLLER 0x3008
#define USB_VENDOR_ID_SOLID_YEAR 0x060b
+#define USB_DEVICE_ID_MACALLY_IKEY_KEYBOARD 0x0001
#define USB_DEVICE_ID_COUGAR_500K_GAMING_KEYBOARD 0x500a
#define USB_DEVICE_ID_COUGAR_700K_GAMING_KEYBOARD 0x700a
@@ -1083,7 +1089,6 @@
#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
-#define I2C_DEVICE_ID_SYNAPTICS_7E7E 0x7e7e
#define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047
#define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index b607286a0bc8..abdb01879caa 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1051,6 +1051,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x28b: map_key_clear(KEY_FORWARDMAIL); break;
case 0x28c: map_key_clear(KEY_SEND); break;
+ case 0x29d: map_key_clear(KEY_KBD_LAYOUT_NEXT); break;
+
case 0x2c7: map_key_clear(KEY_KBDINPUTASSIST_PREV); break;
case 0x2c8: map_key_clear(KEY_KBDINPUTASSIST_NEXT); break;
case 0x2c9: map_key_clear(KEY_KBDINPUTASSIST_PREVGROUP); break;
@@ -1557,52 +1559,71 @@ static void hidinput_close(struct input_dev *dev)
hid_hw_close(hid);
}
-static void hidinput_change_resolution_multipliers(struct hid_device *hid)
+static bool __hidinput_change_resolution_multipliers(struct hid_device *hid,
+ struct hid_report *report, bool use_logical_max)
{
- struct hid_report_enum *rep_enum;
- struct hid_report *rep;
struct hid_usage *usage;
+ bool update_needed = false;
int i, j;
- rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
- list_for_each_entry(rep, &rep_enum->report_list, list) {
- bool update_needed = false;
+ if (report->maxfield == 0)
+ return false;
- if (rep->maxfield == 0)
- continue;
+ /*
+ * If we have more than one feature within this report we
+ * need to fill in the bits from the others before we can
+ * overwrite the ones for the Resolution Multiplier.
+ */
+ if (report->maxfield > 1) {
+ hid_hw_request(hid, report, HID_REQ_GET_REPORT);
+ hid_hw_wait(hid);
+ }
- /*
- * If we have more than one feature within this report we
- * need to fill in the bits from the others before we can
- * overwrite the ones for the Resolution Multiplier.
+ for (i = 0; i < report->maxfield; i++) {
+ __s32 value = use_logical_max ?
+ report->field[i]->logical_maximum :
+ report->field[i]->logical_minimum;
+
+ /* There is no good reason for a Resolution
+ * Multiplier to have a count other than 1.
+ * Ignore that case.
*/
- if (rep->maxfield > 1) {
- hid_hw_request(hid, rep, HID_REQ_GET_REPORT);
- hid_hw_wait(hid);
- }
+ if (report->field[i]->report_count != 1)
+ continue;
- for (i = 0; i < rep->maxfield; i++) {
- __s32 logical_max = rep->field[i]->logical_maximum;
+ for (j = 0; j < report->field[i]->maxusage; j++) {
+ usage = &report->field[i]->usage[j];
- /* There is no good reason for a Resolution
- * Multiplier to have a count other than 1.
- * Ignore that case.
- */
- if (rep->field[i]->report_count != 1)
+ if (usage->hid != HID_GD_RESOLUTION_MULTIPLIER)
continue;
- for (j = 0; j < rep->field[i]->maxusage; j++) {
- usage = &rep->field[i]->usage[j];
+ report->field[i]->value[j] = value;
+ update_needed = true;
+ }
+ }
+
+ return update_needed;
+}
- if (usage->hid != HID_GD_RESOLUTION_MULTIPLIER)
- continue;
+static void hidinput_change_resolution_multipliers(struct hid_device *hid)
+{
+ struct hid_report_enum *rep_enum;
+ struct hid_report *rep;
+ int ret;
- *rep->field[i]->value = logical_max;
- update_needed = true;
+ rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
+ list_for_each_entry(rep, &rep_enum->report_list, list) {
+ bool update_needed = __hidinput_change_resolution_multipliers(hid,
+ rep, true);
+
+ if (update_needed) {
+ ret = __hid_request(hid, rep, HID_REQ_SET_REPORT);
+ if (ret) {
+ __hidinput_change_resolution_multipliers(hid,
+ rep, false);
+ return;
}
}
- if (update_needed)
- hid_hw_request(hid, rep, HID_REQ_SET_REPORT);
}
/* refresh our structs */
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 5d419a95b6c2..36d725fdb199 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -876,8 +876,6 @@ static const struct hid_device_id lg_devices[] = {
.driver_data = LG_RDESC | LG_WIRELESS },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER),
.driver_data = LG_RDESC | LG_WIRELESS },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2),
- .driver_data = LG_RDESC | LG_WIRELESS },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER),
.driver_data = LG_BAD_RELATIVE_KEYS },
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 826fa1e1c8d9..b1e894618eed 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -25,13 +25,14 @@
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
-#include <linux/usb.h>
#include <linux/kfifo.h>
+#include <linux/delay.h>
+#include <linux/usb.h> /* For to_usb_interface for kvm extra intf check */
#include <asm/unaligned.h>
#include "hid-ids.h"
#define DJ_MAX_PAIRED_DEVICES 6
-#define DJ_MAX_NUMBER_NOTIFICATIONS 8
+#define DJ_MAX_NUMBER_NOTIFS 8
#define DJ_RECEIVER_INDEX 0
#define DJ_DEVICE_INDEX_MIN 1
#define DJ_DEVICE_INDEX_MAX 6
@@ -74,7 +75,6 @@
/* Device Un-Paired Notification */
#define REPORT_TYPE_NOTIF_DEVICE_UNPAIRED 0x40
-
/* Connection Status Notification */
#define REPORT_TYPE_NOTIF_CONNECTION_STATUS 0x42
#define CONNECTION_STATUS_PARAM_STATUS 0x00
@@ -94,12 +94,43 @@
#define REPORT_TYPE_LEDS 0x0E
/* RF Report types bitfield */
-#define STD_KEYBOARD 0x00000002
-#define STD_MOUSE 0x00000004
-#define MULTIMEDIA 0x00000008
-#define POWER_KEYS 0x00000010
-#define MEDIA_CENTER 0x00000100
-#define KBD_LEDS 0x00004000
+#define STD_KEYBOARD BIT(1)
+#define STD_MOUSE BIT(2)
+#define MULTIMEDIA BIT(3)
+#define POWER_KEYS BIT(4)
+#define MEDIA_CENTER BIT(8)
+#define KBD_LEDS BIT(14)
+/* Fake (bitnr > NUMBER_OF_HID_REPORTS) bit to track HID++ capability */
+#define HIDPP BIT_ULL(63)
+
+/* HID++ Device Connected Notification */
+#define REPORT_TYPE_NOTIF_DEVICE_CONNECTED 0x41
+#define HIDPP_PARAM_PROTO_TYPE 0x00
+#define HIDPP_PARAM_DEVICE_INFO 0x01
+#define HIDPP_PARAM_EQUAD_LSB 0x02
+#define HIDPP_PARAM_EQUAD_MSB 0x03
+#define HIDPP_PARAM_27MHZ_DEVID 0x03
+#define HIDPP_DEVICE_TYPE_MASK GENMASK(3, 0)
+#define HIDPP_LINK_STATUS_MASK BIT(6)
+#define HIDPP_MANUFACTURER_MASK BIT(7)
+
+#define HIDPP_DEVICE_TYPE_KEYBOARD 1
+#define HIDPP_DEVICE_TYPE_MOUSE 2
+
+#define HIDPP_SET_REGISTER 0x80
+#define HIDPP_GET_LONG_REGISTER 0x83
+#define HIDPP_REG_CONNECTION_STATE 0x02
+#define HIDPP_REG_PAIRING_INFORMATION 0xB5
+#define HIDPP_PAIRING_INFORMATION 0x20
+#define HIDPP_FAKE_DEVICE_ARRIVAL 0x02
+
+enum recvr_type {
+ recvr_type_dj,
+ recvr_type_hidpp,
+ recvr_type_gaming_hidpp,
+ recvr_type_27mhz,
+ recvr_type_bluetooth,
+};
struct dj_report {
u8 report_id;
@@ -108,23 +139,51 @@ struct dj_report {
u8 report_params[DJREPORT_SHORT_LENGTH - 3];
};
+struct hidpp_event {
+ u8 report_id;
+ u8 device_index;
+ u8 sub_id;
+ u8 params[HIDPP_REPORT_LONG_LENGTH - 3U];
+} __packed;
+
struct dj_receiver_dev {
- struct hid_device *hdev;
+ struct hid_device *mouse;
+ struct hid_device *keyboard;
+ struct hid_device *hidpp;
struct dj_device *paired_dj_devices[DJ_MAX_PAIRED_DEVICES +
DJ_DEVICE_INDEX_MIN];
+ struct list_head list;
+ struct kref kref;
struct work_struct work;
struct kfifo notif_fifo;
+ unsigned long last_query; /* in jiffies */
+ bool ready;
+ enum recvr_type type;
+ unsigned int unnumbered_application;
spinlock_t lock;
- bool querying_devices;
};
struct dj_device {
struct hid_device *hdev;
struct dj_receiver_dev *dj_receiver_dev;
- u32 reports_supported;
+ u64 reports_supported;
u8 device_index;
};
+#define WORKITEM_TYPE_EMPTY 0
+#define WORKITEM_TYPE_PAIRED 1
+#define WORKITEM_TYPE_UNPAIRED 2
+#define WORKITEM_TYPE_UNKNOWN 255
+
+struct dj_workitem {
+ u8 type; /* WORKITEM_TYPE_* */
+ u8 device_index;
+ u8 device_type;
+ u8 quad_id_msb;
+ u8 quad_id_lsb;
+ u64 reports_supported;
+};
+
/* Keyboard descriptor (1) */
static const char kbd_descriptor[] = {
0x05, 0x01, /* USAGE_PAGE (generic Desktop) */
@@ -200,6 +259,131 @@ static const char mse_descriptor[] = {
0xC0, /* END_COLLECTION */
};
+/* Mouse descriptor (2) for 27 MHz receiver, only 8 buttons */
+static const char mse_27mhz_descriptor[] = {
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x02, /* USAGE (Mouse) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x02, /* REPORT_ID = 2 */
+ 0x09, 0x01, /* USAGE (pointer) */
+ 0xA1, 0x00, /* COLLECTION (physical) */
+ 0x05, 0x09, /* USAGE_PAGE (buttons) */
+ 0x19, 0x01, /* USAGE_MIN (1) */
+ 0x29, 0x08, /* USAGE_MAX (8) */
+ 0x15, 0x00, /* LOGICAL_MIN (0) */
+ 0x25, 0x01, /* LOGICAL_MAX (1) */
+ 0x95, 0x08, /* REPORT_COUNT (8) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x81, 0x02, /* INPUT (data var abs) */
+ 0x05, 0x01, /* USAGE_PAGE (generic desktop) */
+ 0x16, 0x01, 0xF8, /* LOGICAL_MIN (-2047) */
+ 0x26, 0xFF, 0x07, /* LOGICAL_MAX (2047) */
+ 0x75, 0x0C, /* REPORT_SIZE (12) */
+ 0x95, 0x02, /* REPORT_COUNT (2) */
+ 0x09, 0x30, /* USAGE (X) */
+ 0x09, 0x31, /* USAGE (Y) */
+ 0x81, 0x06, /* INPUT */
+ 0x15, 0x81, /* LOGICAL_MIN (-127) */
+ 0x25, 0x7F, /* LOGICAL_MAX (127) */
+ 0x75, 0x08, /* REPORT_SIZE (8) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x09, 0x38, /* USAGE (wheel) */
+ 0x81, 0x06, /* INPUT */
+ 0x05, 0x0C, /* USAGE_PAGE(consumer) */
+ 0x0A, 0x38, 0x02, /* USAGE(AC Pan) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x81, 0x06, /* INPUT */
+ 0xC0, /* END_COLLECTION */
+ 0xC0, /* END_COLLECTION */
+};
+
+/* Mouse descriptor (2) for Bluetooth receiver, low-res hwheel, 12 buttons */
+static const char mse_bluetooth_descriptor[] = {
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x02, /* USAGE (Mouse) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x02, /* REPORT_ID = 2 */
+ 0x09, 0x01, /* USAGE (pointer) */
+ 0xA1, 0x00, /* COLLECTION (physical) */
+ 0x05, 0x09, /* USAGE_PAGE (buttons) */
+ 0x19, 0x01, /* USAGE_MIN (1) */
+ 0x29, 0x08, /* USAGE_MAX (8) */
+ 0x15, 0x00, /* LOGICAL_MIN (0) */
+ 0x25, 0x01, /* LOGICAL_MAX (1) */
+ 0x95, 0x08, /* REPORT_COUNT (8) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x81, 0x02, /* INPUT (data var abs) */
+ 0x05, 0x01, /* USAGE_PAGE (generic desktop) */
+ 0x16, 0x01, 0xF8, /* LOGICAL_MIN (-2047) */
+ 0x26, 0xFF, 0x07, /* LOGICAL_MAX (2047) */
+ 0x75, 0x0C, /* REPORT_SIZE (12) */
+ 0x95, 0x02, /* REPORT_COUNT (2) */
+ 0x09, 0x30, /* USAGE (X) */
+ 0x09, 0x31, /* USAGE (Y) */
+ 0x81, 0x06, /* INPUT */
+ 0x15, 0x81, /* LOGICAL_MIN (-127) */
+ 0x25, 0x7F, /* LOGICAL_MAX (127) */
+ 0x75, 0x08, /* REPORT_SIZE (8) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x09, 0x38, /* USAGE (wheel) */
+ 0x81, 0x06, /* INPUT */
+ 0x05, 0x0C, /* USAGE_PAGE(consumer) */
+ 0x0A, 0x38, 0x02, /* USAGE(AC Pan) */
+ 0x15, 0xF9, /* LOGICAL_MIN (-7) */
+ 0x25, 0x07, /* LOGICAL_MAX (7) */
+ 0x75, 0x04, /* REPORT_SIZE (4) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x81, 0x06, /* INPUT */
+ 0x05, 0x09, /* USAGE_PAGE (buttons) */
+ 0x19, 0x09, /* USAGE_MIN (9) */
+ 0x29, 0x0C, /* USAGE_MAX (12) */
+ 0x15, 0x00, /* LOGICAL_MIN (0) */
+ 0x25, 0x01, /* LOGICAL_MAX (1) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x95, 0x04, /* REPORT_COUNT (4) */
+ 0x81, 0x06, /* INPUT */
+ 0xC0, /* END_COLLECTION */
+ 0xC0, /* END_COLLECTION */
+};
+
+/* Gaming Mouse descriptor (2) */
+static const char mse_high_res_descriptor[] = {
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x02, /* USAGE (Mouse) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x02, /* REPORT_ID = 2 */
+ 0x09, 0x01, /* USAGE (pointer) */
+ 0xA1, 0x00, /* COLLECTION (physical) */
+ 0x05, 0x09, /* USAGE_PAGE (buttons) */
+ 0x19, 0x01, /* USAGE_MIN (1) */
+ 0x29, 0x10, /* USAGE_MAX (16) */
+ 0x15, 0x00, /* LOGICAL_MIN (0) */
+ 0x25, 0x01, /* LOGICAL_MAX (1) */
+ 0x95, 0x10, /* REPORT_COUNT (16) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x81, 0x02, /* INPUT (data var abs) */
+ 0x05, 0x01, /* USAGE_PAGE (generic desktop) */
+ 0x16, 0x01, 0x80, /* LOGICAL_MIN (-32767) */
+ 0x26, 0xFF, 0x7F, /* LOGICAL_MAX (32767) */
+ 0x75, 0x10, /* REPORT_SIZE (16) */
+ 0x95, 0x02, /* REPORT_COUNT (2) */
+ 0x09, 0x30, /* USAGE (X) */
+ 0x09, 0x31, /* USAGE (Y) */
+ 0x81, 0x06, /* INPUT */
+ 0x15, 0x81, /* LOGICAL_MIN (-127) */
+ 0x25, 0x7F, /* LOGICAL_MAX (127) */
+ 0x75, 0x08, /* REPORT_SIZE (8) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x09, 0x38, /* USAGE (wheel) */
+ 0x81, 0x06, /* INPUT */
+ 0x05, 0x0C, /* USAGE_PAGE(consumer) */
+ 0x0A, 0x38, 0x02, /* USAGE(AC Pan) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x81, 0x06, /* INPUT */
+ 0xC0, /* END_COLLECTION */
+ 0xC0, /* END_COLLECTION */
+};
+
/* Consumer Control descriptor (3) */
static const char consumer_descriptor[] = {
0x05, 0x0C, /* USAGE_PAGE (Consumer Devices) */
@@ -308,7 +492,7 @@ static const char hidpp_descriptor[] = {
/* Make sure all descriptors are present here */
#define MAX_RDESC_SIZE \
(sizeof(kbd_descriptor) + \
- sizeof(mse_descriptor) + \
+ sizeof(mse_bluetooth_descriptor) + \
sizeof(consumer_descriptor) + \
sizeof(syscontrol_descriptor) + \
sizeof(media_descriptor) + \
@@ -341,51 +525,160 @@ static const u8 hid_reportid_size_map[NUMBER_OF_HID_REPORTS] = {
static struct hid_ll_driver logi_dj_ll_driver;
static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev);
+static void delayedwork_callback(struct work_struct *work);
+
+static LIST_HEAD(dj_hdev_list);
+static DEFINE_MUTEX(dj_hdev_list_lock);
+
+/*
+ * dj/HID++ receivers are really a single logical entity, but for BIOS/Windows
+ * compatibility they have multiple USB interfaces. On HID++ receivers we need
+ * to listen for input reports on both interfaces. The functions below are used
+ * to create a single struct dj_receiver_dev for all interfaces belonging to
+ * a single USB-device / receiver.
+ */
+static struct dj_receiver_dev *dj_find_receiver_dev(struct hid_device *hdev,
+ enum recvr_type type)
+{
+ struct dj_receiver_dev *djrcv_dev;
+ char sep;
+
+ /*
+ * The bluetooth receiver contains a built-in hub and has separate
+ * USB-devices for the keyboard and mouse interfaces.
+ */
+ sep = (type == recvr_type_bluetooth) ? '.' : '/';
+
+ /* Try to find an already-probed interface from the same device */
+ list_for_each_entry(djrcv_dev, &dj_hdev_list, list) {
+ if (djrcv_dev->mouse &&
+ hid_compare_device_paths(hdev, djrcv_dev->mouse, sep)) {
+ kref_get(&djrcv_dev->kref);
+ return djrcv_dev;
+ }
+ if (djrcv_dev->keyboard &&
+ hid_compare_device_paths(hdev, djrcv_dev->keyboard, sep)) {
+ kref_get(&djrcv_dev->kref);
+ return djrcv_dev;
+ }
+ if (djrcv_dev->hidpp &&
+ hid_compare_device_paths(hdev, djrcv_dev->hidpp, sep)) {
+ kref_get(&djrcv_dev->kref);
+ return djrcv_dev;
+ }
+ }
+
+ return NULL;
+}
+
+static void dj_release_receiver_dev(struct kref *kref)
+{
+ struct dj_receiver_dev *djrcv_dev = container_of(kref, struct dj_receiver_dev, kref);
+
+ list_del(&djrcv_dev->list);
+ kfifo_free(&djrcv_dev->notif_fifo);
+ kfree(djrcv_dev);
+}
+
+static void dj_put_receiver_dev(struct hid_device *hdev)
+{
+ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+
+ mutex_lock(&dj_hdev_list_lock);
+
+ if (djrcv_dev->mouse == hdev)
+ djrcv_dev->mouse = NULL;
+ if (djrcv_dev->keyboard == hdev)
+ djrcv_dev->keyboard = NULL;
+ if (djrcv_dev->hidpp == hdev)
+ djrcv_dev->hidpp = NULL;
+
+ kref_put(&djrcv_dev->kref, dj_release_receiver_dev);
+
+ mutex_unlock(&dj_hdev_list_lock);
+}
+
+static struct dj_receiver_dev *dj_get_receiver_dev(struct hid_device *hdev,
+ enum recvr_type type,
+ unsigned int application,
+ bool is_hidpp)
+{
+ struct dj_receiver_dev *djrcv_dev;
+
+ mutex_lock(&dj_hdev_list_lock);
+
+ djrcv_dev = dj_find_receiver_dev(hdev, type);
+ if (!djrcv_dev) {
+ djrcv_dev = kzalloc(sizeof(*djrcv_dev), GFP_KERNEL);
+ if (!djrcv_dev)
+ goto out;
+
+ INIT_WORK(&djrcv_dev->work, delayedwork_callback);
+ spin_lock_init(&djrcv_dev->lock);
+ if (kfifo_alloc(&djrcv_dev->notif_fifo,
+ DJ_MAX_NUMBER_NOTIFS * sizeof(struct dj_workitem),
+ GFP_KERNEL)) {
+ kfree(djrcv_dev);
+ djrcv_dev = NULL;
+ goto out;
+ }
+ kref_init(&djrcv_dev->kref);
+ list_add_tail(&djrcv_dev->list, &dj_hdev_list);
+ djrcv_dev->last_query = jiffies;
+ djrcv_dev->type = type;
+ }
+
+ if (application == HID_GD_KEYBOARD)
+ djrcv_dev->keyboard = hdev;
+ if (application == HID_GD_MOUSE)
+ djrcv_dev->mouse = hdev;
+ if (is_hidpp)
+ djrcv_dev->hidpp = hdev;
+
+ hid_set_drvdata(hdev, djrcv_dev);
+out:
+ mutex_unlock(&dj_hdev_list_lock);
+ return djrcv_dev;
+}
static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev,
- struct dj_report *dj_report)
+ struct dj_workitem *workitem)
{
/* Called in delayed work context */
struct dj_device *dj_dev;
unsigned long flags;
spin_lock_irqsave(&djrcv_dev->lock, flags);
- dj_dev = djrcv_dev->paired_dj_devices[dj_report->device_index];
- djrcv_dev->paired_dj_devices[dj_report->device_index] = NULL;
+ dj_dev = djrcv_dev->paired_dj_devices[workitem->device_index];
+ djrcv_dev->paired_dj_devices[workitem->device_index] = NULL;
spin_unlock_irqrestore(&djrcv_dev->lock, flags);
if (dj_dev != NULL) {
hid_destroy_device(dj_dev->hdev);
kfree(dj_dev);
} else {
- dev_err(&djrcv_dev->hdev->dev, "%s: can't destroy a NULL device\n",
+ hid_err(djrcv_dev->hidpp, "%s: can't destroy a NULL device\n",
__func__);
}
}
static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
- struct dj_report *dj_report)
+ struct dj_workitem *workitem)
{
/* Called in delayed work context */
- struct hid_device *djrcv_hdev = djrcv_dev->hdev;
- struct usb_interface *intf = to_usb_interface(djrcv_hdev->dev.parent);
- struct usb_device *usbdev = interface_to_usbdev(intf);
+ struct hid_device *djrcv_hdev = djrcv_dev->hidpp;
struct hid_device *dj_hiddev;
struct dj_device *dj_dev;
+ u8 device_index = workitem->device_index;
+ unsigned long flags;
/* Device index goes from 1 to 6, we need 3 bytes to store the
* semicolon, the index, and a null terminator
*/
unsigned char tmpstr[3];
- if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] &
- SPFUNCTION_DEVICE_LIST_EMPTY) {
- dbg_hid("%s: device list is empty\n", __func__);
- djrcv_dev->querying_devices = false;
- return;
- }
-
- if (djrcv_dev->paired_dj_devices[dj_report->device_index]) {
+ /* We are the only one ever adding a device, no need to lock */
+ if (djrcv_dev->paired_dj_devices[device_index]) {
/* The device is already known. No need to reallocate it. */
dbg_hid("%s: device is already known\n", __func__);
return;
@@ -393,8 +686,7 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
dj_hiddev = hid_allocate_device();
if (IS_ERR(dj_hiddev)) {
- dev_err(&djrcv_hdev->dev, "%s: hid_allocate_device failed\n",
- __func__);
+ hid_err(djrcv_hdev, "%s: hid_allocate_dev failed\n", __func__);
return;
}
@@ -402,48 +694,67 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
dj_hiddev->dev.parent = &djrcv_hdev->dev;
dj_hiddev->bus = BUS_USB;
- dj_hiddev->vendor = le16_to_cpu(usbdev->descriptor.idVendor);
- dj_hiddev->product =
- (dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_MSB]
- << 8) |
- dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_LSB];
- snprintf(dj_hiddev->name, sizeof(dj_hiddev->name),
- "Logitech Unifying Device. Wireless PID:%04x",
- dj_hiddev->product);
-
- dj_hiddev->group = HID_GROUP_LOGITECH_DJ_DEVICE;
-
- usb_make_path(usbdev, dj_hiddev->phys, sizeof(dj_hiddev->phys));
- snprintf(tmpstr, sizeof(tmpstr), ":%d", dj_report->device_index);
+ dj_hiddev->vendor = djrcv_hdev->vendor;
+ dj_hiddev->product = (workitem->quad_id_msb << 8) |
+ workitem->quad_id_lsb;
+ if (workitem->device_type) {
+ const char *type_str = "Device";
+
+ switch (workitem->device_type) {
+ case 0x01: type_str = "Keyboard"; break;
+ case 0x02: type_str = "Mouse"; break;
+ case 0x03: type_str = "Numpad"; break;
+ case 0x04: type_str = "Presenter"; break;
+ case 0x07: type_str = "Remote Control"; break;
+ case 0x08: type_str = "Trackball"; break;
+ case 0x09: type_str = "Touchpad"; break;
+ }
+ snprintf(dj_hiddev->name, sizeof(dj_hiddev->name),
+ "Logitech Wireless %s PID:%04x",
+ type_str, dj_hiddev->product);
+ } else {
+ snprintf(dj_hiddev->name, sizeof(dj_hiddev->name),
+ "Logitech Unifying Device. Wireless PID:%04x",
+ dj_hiddev->product);
+ }
+
+ if (djrcv_dev->type == recvr_type_27mhz)
+ dj_hiddev->group = HID_GROUP_LOGITECH_27MHZ_DEVICE;
+ else
+ dj_hiddev->group = HID_GROUP_LOGITECH_DJ_DEVICE;
+
+ memcpy(dj_hiddev->phys, djrcv_hdev->phys, sizeof(djrcv_hdev->phys));
+ snprintf(tmpstr, sizeof(tmpstr), ":%d", device_index);
strlcat(dj_hiddev->phys, tmpstr, sizeof(dj_hiddev->phys));
dj_dev = kzalloc(sizeof(struct dj_device), GFP_KERNEL);
if (!dj_dev) {
- dev_err(&djrcv_hdev->dev, "%s: failed allocating dj_device\n",
- __func__);
+ hid_err(djrcv_hdev, "%s: failed allocating dj_dev\n", __func__);
goto dj_device_allocate_fail;
}
- dj_dev->reports_supported = get_unaligned_le32(
- dj_report->report_params + DEVICE_PAIRED_RF_REPORT_TYPE);
+ dj_dev->reports_supported = workitem->reports_supported;
dj_dev->hdev = dj_hiddev;
dj_dev->dj_receiver_dev = djrcv_dev;
- dj_dev->device_index = dj_report->device_index;
+ dj_dev->device_index = device_index;
dj_hiddev->driver_data = dj_dev;
- djrcv_dev->paired_dj_devices[dj_report->device_index] = dj_dev;
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+ djrcv_dev->paired_dj_devices[device_index] = dj_dev;
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
if (hid_add_device(dj_hiddev)) {
- dev_err(&djrcv_hdev->dev, "%s: failed adding dj_device\n",
- __func__);
+ hid_err(djrcv_hdev, "%s: failed adding dj_device\n", __func__);
goto hid_add_device_fail;
}
return;
hid_add_device_fail:
- djrcv_dev->paired_dj_devices[dj_report->device_index] = NULL;
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+ djrcv_dev->paired_dj_devices[device_index] = NULL;
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
kfree(dj_dev);
dj_device_allocate_fail:
hid_destroy_device(dj_hiddev);
@@ -454,7 +765,7 @@ static void delayedwork_callback(struct work_struct *work)
struct dj_receiver_dev *djrcv_dev =
container_of(work, struct dj_receiver_dev, work);
- struct dj_report dj_report;
+ struct dj_workitem workitem;
unsigned long flags;
int count;
int retval;
@@ -463,69 +774,231 @@ static void delayedwork_callback(struct work_struct *work)
spin_lock_irqsave(&djrcv_dev->lock, flags);
- count = kfifo_out(&djrcv_dev->notif_fifo, &dj_report,
- sizeof(struct dj_report));
-
- if (count != sizeof(struct dj_report)) {
- dev_err(&djrcv_dev->hdev->dev, "%s: workitem triggered without "
- "notifications available\n", __func__);
+ /*
+ * Since we attach to multiple interfaces, we may get scheduled before
+ * we are bound to the HID++ interface, catch this.
+ */
+ if (!djrcv_dev->ready) {
+ pr_warn("%s: delayedwork queued before hidpp interface was enumerated\n",
+ __func__);
spin_unlock_irqrestore(&djrcv_dev->lock, flags);
return;
}
- if (!kfifo_is_empty(&djrcv_dev->notif_fifo)) {
- if (schedule_work(&djrcv_dev->work) == 0) {
- dbg_hid("%s: did not schedule the work item, was "
- "already queued\n", __func__);
- }
+ count = kfifo_out(&djrcv_dev->notif_fifo, &workitem, sizeof(workitem));
+
+ if (count != sizeof(workitem)) {
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+ return;
}
+ if (!kfifo_is_empty(&djrcv_dev->notif_fifo))
+ schedule_work(&djrcv_dev->work);
+
spin_unlock_irqrestore(&djrcv_dev->lock, flags);
- switch (dj_report.report_type) {
- case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
- logi_dj_recv_add_djhid_device(djrcv_dev, &dj_report);
+ switch (workitem.type) {
+ case WORKITEM_TYPE_PAIRED:
+ logi_dj_recv_add_djhid_device(djrcv_dev, &workitem);
break;
- case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
- logi_dj_recv_destroy_djhid_device(djrcv_dev, &dj_report);
+ case WORKITEM_TYPE_UNPAIRED:
+ logi_dj_recv_destroy_djhid_device(djrcv_dev, &workitem);
break;
- default:
- /* A normal report (i. e. not belonging to a pair/unpair notification)
- * arriving here, means that the report arrived but we did not have a
- * paired dj_device associated to the report's device_index, this
- * means that the original "device paired" notification corresponding
- * to this dj_device never arrived to this driver. The reason is that
- * hid-core discards all packets coming from a device while probe() is
- * executing. */
- if (!djrcv_dev->paired_dj_devices[dj_report.device_index]) {
- /* ok, we don't know the device, just re-ask the
- * receiver for the list of connected devices. */
+ case WORKITEM_TYPE_UNKNOWN:
retval = logi_dj_recv_query_paired_devices(djrcv_dev);
- if (!retval) {
- /* everything went fine, so just leave */
- break;
- }
- dev_err(&djrcv_dev->hdev->dev,
- "%s:logi_dj_recv_query_paired_devices "
- "error:%d\n", __func__, retval);
+ if (retval) {
+ hid_err(djrcv_dev->hidpp, "%s: logi_dj_recv_query_paired_devices error: %d\n",
+ __func__, retval);
}
- dbg_hid("%s: unexpected report type\n", __func__);
+ break;
+ case WORKITEM_TYPE_EMPTY:
+ dbg_hid("%s: device list is empty\n", __func__);
+ break;
}
}
+/*
+ * Sometimes we receive reports for which we do not have a paired dj_device
+ * associated with the device_index or report-type to forward the report to.
+ * This means that the original "device paired" notification corresponding
+ * to the dj_device never arrived to this driver. Possible reasons for this are:
+ * 1) hid-core discards all packets coming from a device during probe().
+ * 2) if the receiver is plugged into a KVM switch then the pairing reports
+ * are only forwarded to it if the focus is on this PC.
+ * This function deals with this by re-asking the receiver for the list of
+ * connected devices in the delayed work callback.
+ * This function MUST be called with djrcv->lock held.
+ */
+static void logi_dj_recv_queue_unknown_work(struct dj_receiver_dev *djrcv_dev)
+{
+ struct dj_workitem workitem = { .type = WORKITEM_TYPE_UNKNOWN };
+
+ /* Rate limit queries done because of unhandeled reports to 2/sec */
+ if (time_before(jiffies, djrcv_dev->last_query + HZ / 2))
+ return;
+
+ kfifo_in(&djrcv_dev->notif_fifo, &workitem, sizeof(workitem));
+ schedule_work(&djrcv_dev->work);
+}
+
static void logi_dj_recv_queue_notification(struct dj_receiver_dev *djrcv_dev,
struct dj_report *dj_report)
{
/* We are called from atomic context (tasklet && djrcv->lock held) */
+ struct dj_workitem workitem = {
+ .device_index = dj_report->device_index,
+ };
+
+ switch (dj_report->report_type) {
+ case REPORT_TYPE_NOTIF_DEVICE_PAIRED:
+ workitem.type = WORKITEM_TYPE_PAIRED;
+ if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] &
+ SPFUNCTION_DEVICE_LIST_EMPTY) {
+ workitem.type = WORKITEM_TYPE_EMPTY;
+ break;
+ }
+ /* fall-through */
+ case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED:
+ workitem.quad_id_msb =
+ dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_MSB];
+ workitem.quad_id_lsb =
+ dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_LSB];
+ workitem.reports_supported = get_unaligned_le32(
+ dj_report->report_params +
+ DEVICE_PAIRED_RF_REPORT_TYPE);
+ workitem.reports_supported |= HIDPP;
+ if (dj_report->report_type == REPORT_TYPE_NOTIF_DEVICE_UNPAIRED)
+ workitem.type = WORKITEM_TYPE_UNPAIRED;
+ break;
+ default:
+ logi_dj_recv_queue_unknown_work(djrcv_dev);
+ return;
+ }
- kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
+ kfifo_in(&djrcv_dev->notif_fifo, &workitem, sizeof(workitem));
+ schedule_work(&djrcv_dev->work);
+}
- if (schedule_work(&djrcv_dev->work) == 0) {
- dbg_hid("%s: did not schedule the work item, was already "
- "queued\n", __func__);
+static void logi_hidpp_dev_conn_notif_equad(struct hidpp_event *hidpp_report,
+ struct dj_workitem *workitem)
+{
+ workitem->type = WORKITEM_TYPE_PAIRED;
+ workitem->device_type = hidpp_report->params[HIDPP_PARAM_DEVICE_INFO] &
+ HIDPP_DEVICE_TYPE_MASK;
+ workitem->quad_id_msb = hidpp_report->params[HIDPP_PARAM_EQUAD_MSB];
+ workitem->quad_id_lsb = hidpp_report->params[HIDPP_PARAM_EQUAD_LSB];
+ switch (workitem->device_type) {
+ case REPORT_TYPE_KEYBOARD:
+ workitem->reports_supported |= STD_KEYBOARD | MULTIMEDIA |
+ POWER_KEYS | MEDIA_CENTER |
+ HIDPP;
+ break;
+ case REPORT_TYPE_MOUSE:
+ workitem->reports_supported |= STD_MOUSE | HIDPP;
+ break;
+ }
+}
+
+static void logi_hidpp_dev_conn_notif_27mhz(struct hid_device *hdev,
+ struct hidpp_event *hidpp_report,
+ struct dj_workitem *workitem)
+{
+ workitem->type = WORKITEM_TYPE_PAIRED;
+ workitem->quad_id_lsb = hidpp_report->params[HIDPP_PARAM_27MHZ_DEVID];
+ switch (hidpp_report->device_index) {
+ case 1: /* Index 1 is always a mouse */
+ case 2: /* Index 2 is always a mouse */
+ workitem->device_type = HIDPP_DEVICE_TYPE_MOUSE;
+ workitem->reports_supported |= STD_MOUSE | HIDPP;
+ break;
+ case 3: /* Index 3 is always the keyboard */
+ case 4: /* Index 4 is used for an optional separate numpad */
+ workitem->device_type = HIDPP_DEVICE_TYPE_KEYBOARD;
+ workitem->reports_supported |= STD_KEYBOARD | MULTIMEDIA |
+ POWER_KEYS | HIDPP;
+ break;
+ default:
+ hid_warn(hdev, "%s: unexpected device-index %d", __func__,
+ hidpp_report->device_index);
}
}
+static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
+ struct hidpp_event *hidpp_report)
+{
+ /* We are called from atomic context (tasklet && djrcv->lock held) */
+ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+ const char *device_type = "UNKNOWN";
+ struct dj_workitem workitem = {
+ .type = WORKITEM_TYPE_EMPTY,
+ .device_index = hidpp_report->device_index,
+ };
+
+ switch (hidpp_report->params[HIDPP_PARAM_PROTO_TYPE]) {
+ case 0x01:
+ device_type = "Bluetooth";
+ /* Bluetooth connect packet contents is the same as (e)QUAD */
+ logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ if (!(hidpp_report->params[HIDPP_PARAM_DEVICE_INFO] &
+ HIDPP_MANUFACTURER_MASK)) {
+ hid_info(hdev, "Non Logitech device connected on slot %d\n",
+ hidpp_report->device_index);
+ workitem.reports_supported &= ~HIDPP;
+ }
+ break;
+ case 0x02:
+ device_type = "27 Mhz";
+ logi_hidpp_dev_conn_notif_27mhz(hdev, hidpp_report, &workitem);
+ break;
+ case 0x03:
+ device_type = "QUAD or eQUAD";
+ logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ break;
+ case 0x04:
+ device_type = "eQUAD step 4 DJ";
+ logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ break;
+ case 0x05:
+ device_type = "DFU Lite";
+ break;
+ case 0x06:
+ device_type = "eQUAD step 4 Lite";
+ logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ break;
+ case 0x07:
+ device_type = "eQUAD step 4 Gaming";
+ break;
+ case 0x08:
+ device_type = "eQUAD step 4 for gamepads";
+ break;
+ case 0x0a:
+ device_type = "eQUAD nano Lite";
+ logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ break;
+ case 0x0c:
+ device_type = "eQUAD Lightspeed";
+ logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
+ workitem.reports_supported |= STD_KEYBOARD;
+ break;
+ }
+
+ if (workitem.type == WORKITEM_TYPE_EMPTY) {
+ hid_warn(hdev,
+ "unusable device of type %s (0x%02x) connected on slot %d",
+ device_type,
+ hidpp_report->params[HIDPP_PARAM_PROTO_TYPE],
+ hidpp_report->device_index);
+ return;
+ }
+
+ hid_info(hdev, "device of type %s (0x%02x) connected on slot %d",
+ device_type, hidpp_report->params[HIDPP_PARAM_PROTO_TYPE],
+ hidpp_report->device_index);
+
+ kfifo_in(&djrcv_dev->notif_fifo, &workitem, sizeof(workitem));
+ schedule_work(&djrcv_dev->work);
+}
+
static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev,
struct dj_report *dj_report)
{
@@ -552,8 +1025,8 @@ static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev,
}
}
-static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev,
- struct dj_report *dj_report)
+static void logi_dj_recv_forward_dj(struct dj_receiver_dev *djrcv_dev,
+ struct dj_report *dj_report)
{
/* We are called from atomic context (tasklet && djrcv->lock held) */
struct dj_device *dj_device;
@@ -573,18 +1046,48 @@ static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev,
}
}
-static void logi_dj_recv_forward_hidpp(struct dj_device *dj_dev, u8 *data,
- int size)
+static void logi_dj_recv_forward_report(struct dj_device *dj_dev, u8 *data,
+ int size)
{
/* We are called from atomic context (tasklet && djrcv->lock held) */
if (hid_input_report(dj_dev->hdev, HID_INPUT_REPORT, data, size, 1))
dbg_hid("hid_input_report error\n");
}
+static void logi_dj_recv_forward_input_report(struct hid_device *hdev,
+ u8 *data, int size)
+{
+ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+ struct dj_device *dj_dev;
+ unsigned long flags;
+ u8 report = data[0];
+ int i;
+
+ if (report > REPORT_TYPE_RFREPORT_LAST) {
+ hid_err(hdev, "Unexpected input report number %d\n", report);
+ return;
+ }
+
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+ for (i = 0; i < (DJ_MAX_PAIRED_DEVICES + DJ_DEVICE_INDEX_MIN); i++) {
+ dj_dev = djrcv_dev->paired_dj_devices[i];
+ if (dj_dev && (dj_dev->reports_supported & BIT(report))) {
+ logi_dj_recv_forward_report(dj_dev, data, size);
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+ return;
+ }
+ }
+
+ logi_dj_recv_queue_unknown_work(djrcv_dev);
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+
+ dbg_hid("No dj-devs handling input report number %d\n", report);
+}
+
static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
struct dj_report *dj_report)
{
- struct hid_device *hdev = djrcv_dev->hdev;
+ struct hid_device *hdev = djrcv_dev->hidpp;
struct hid_report *report;
struct hid_report_enum *output_report_enum;
u8 *data = (u8 *)(&dj_report->device_index);
@@ -594,7 +1097,7 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT];
if (!report) {
- dev_err(&hdev->dev, "%s: unable to find dj report\n", __func__);
+ hid_err(hdev, "%s: unable to find dj report\n", __func__);
return -ENODEV;
}
@@ -606,14 +1109,40 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
return 0;
}
+static int logi_dj_recv_query_hidpp_devices(struct dj_receiver_dev *djrcv_dev)
+{
+ const u8 template[] = {REPORT_ID_HIDPP_SHORT,
+ HIDPP_RECEIVER_INDEX,
+ HIDPP_SET_REGISTER,
+ HIDPP_REG_CONNECTION_STATE,
+ HIDPP_FAKE_DEVICE_ARRIVAL,
+ 0x00, 0x00};
+ u8 *hidpp_report;
+ int retval;
+
+ hidpp_report = kmemdup(template, sizeof(template), GFP_KERNEL);
+ if (!hidpp_report)
+ return -ENOMEM;
+
+ retval = hid_hw_raw_request(djrcv_dev->hidpp,
+ REPORT_ID_HIDPP_SHORT,
+ hidpp_report, sizeof(template),
+ HID_OUTPUT_REPORT,
+ HID_REQ_SET_REPORT);
+
+ kfree(hidpp_report);
+ return 0;
+}
+
static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
{
struct dj_report *dj_report;
int retval;
- /* no need to protect djrcv_dev->querying_devices */
- if (djrcv_dev->querying_devices)
- return 0;
+ djrcv_dev->last_query = jiffies;
+
+ if (djrcv_dev->type != recvr_type_dj)
+ return logi_dj_recv_query_hidpp_devices(djrcv_dev);
dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
if (!dj_report)
@@ -630,27 +1159,33 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
unsigned timeout)
{
- struct hid_device *hdev = djrcv_dev->hdev;
+ struct hid_device *hdev = djrcv_dev->hidpp;
struct dj_report *dj_report;
u8 *buf;
- int retval;
+ int retval = 0;
dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
if (!dj_report)
return -ENOMEM;
- dj_report->report_id = REPORT_ID_DJ_SHORT;
- dj_report->device_index = 0xFF;
- dj_report->report_type = REPORT_TYPE_CMD_SWITCH;
- dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
- dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
- retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
- /*
- * Ugly sleep to work around a USB 3.0 bug when the receiver is still
- * processing the "switch-to-dj" command while we send an other command.
- * 50 msec should gives enough time to the receiver to be ready.
- */
- msleep(50);
+ if (djrcv_dev->type == recvr_type_dj) {
+ dj_report->report_id = REPORT_ID_DJ_SHORT;
+ dj_report->device_index = 0xFF;
+ dj_report->report_type = REPORT_TYPE_CMD_SWITCH;
+ dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
+ dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] =
+ (u8)timeout;
+
+ retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
+
+ /*
+ * Ugly sleep to work around a USB 3.0 bug when the receiver is
+ * still processing the "switch-to-dj" command while we send an
+ * other command.
+ * 50 msec should gives enough time to the receiver to be ready.
+ */
+ msleep(50);
+ }
/*
* Magical bits to set up hidpp notifications when the dj devices
@@ -682,22 +1217,28 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
static int logi_dj_ll_open(struct hid_device *hid)
{
- dbg_hid("%s:%s\n", __func__, hid->phys);
+ dbg_hid("%s: %s\n", __func__, hid->phys);
return 0;
}
static void logi_dj_ll_close(struct hid_device *hid)
{
- dbg_hid("%s:%s\n", __func__, hid->phys);
+ dbg_hid("%s: %s\n", __func__, hid->phys);
}
/*
* Register 0xB5 is "pairing information". It is solely intended for the
* receiver, so do not overwrite the device index.
*/
-static u8 unifying_pairing_query[] = {0x10, 0xff, 0x83, 0xb5};
-static u8 unifying_pairing_answer[] = {0x11, 0xff, 0x83, 0xb5};
+static u8 unifying_pairing_query[] = { REPORT_ID_HIDPP_SHORT,
+ HIDPP_RECEIVER_INDEX,
+ HIDPP_GET_LONG_REGISTER,
+ HIDPP_REG_PAIRING_INFORMATION };
+static u8 unifying_pairing_answer[] = { REPORT_ID_HIDPP_LONG,
+ HIDPP_RECEIVER_INDEX,
+ HIDPP_GET_LONG_REGISTER,
+ HIDPP_REG_PAIRING_INFORMATION };
static int logi_dj_ll_raw_request(struct hid_device *hid,
unsigned char reportnum, __u8 *buf,
@@ -721,13 +1262,23 @@ static int logi_dj_ll_raw_request(struct hid_device *hid,
buf[4] = (buf[4] & 0xf0) | (djdev->device_index - 1);
else
buf[1] = djdev->device_index;
- return hid_hw_raw_request(djrcv_dev->hdev, reportnum, buf,
+ return hid_hw_raw_request(djrcv_dev->hidpp, reportnum, buf,
count, report_type, reqtype);
}
if (buf[0] != REPORT_TYPE_LEDS)
return -EINVAL;
+ if (djrcv_dev->type != recvr_type_dj && count >= 2) {
+ if (!djrcv_dev->keyboard) {
+ hid_warn(hid, "Received REPORT_TYPE_LEDS request before the keyboard interface was enumerated\n");
+ return 0;
+ }
+ /* usbhid overrides the report ID and ignores the first byte */
+ return hid_hw_raw_request(djrcv_dev->keyboard, 0, buf, count,
+ report_type, reqtype);
+ }
+
out_buf = kzalloc(DJREPORT_SHORT_LENGTH, GFP_ATOMIC);
if (!out_buf)
return -ENOMEM;
@@ -739,7 +1290,7 @@ static int logi_dj_ll_raw_request(struct hid_device *hid,
out_buf[1] = djdev->device_index;
memcpy(out_buf + 2, buf, count);
- ret = hid_hw_raw_request(djrcv_dev->hdev, out_buf[0], out_buf,
+ ret = hid_hw_raw_request(djrcv_dev->hidpp, out_buf[0], out_buf,
DJREPORT_SHORT_LENGTH, report_type, reqtype);
kfree(out_buf);
@@ -769,41 +1320,55 @@ static int logi_dj_ll_parse(struct hid_device *hid)
return -ENOMEM;
if (djdev->reports_supported & STD_KEYBOARD) {
- dbg_hid("%s: sending a kbd descriptor, reports_supported: %x\n",
+ dbg_hid("%s: sending a kbd descriptor, reports_supported: %llx\n",
__func__, djdev->reports_supported);
rdcat(rdesc, &rsize, kbd_descriptor, sizeof(kbd_descriptor));
}
if (djdev->reports_supported & STD_MOUSE) {
- dbg_hid("%s: sending a mouse descriptor, reports_supported: "
- "%x\n", __func__, djdev->reports_supported);
- rdcat(rdesc, &rsize, mse_descriptor, sizeof(mse_descriptor));
+ dbg_hid("%s: sending a mouse descriptor, reports_supported: %llx\n",
+ __func__, djdev->reports_supported);
+ if (djdev->dj_receiver_dev->type == recvr_type_gaming_hidpp)
+ rdcat(rdesc, &rsize, mse_high_res_descriptor,
+ sizeof(mse_high_res_descriptor));
+ else if (djdev->dj_receiver_dev->type == recvr_type_27mhz)
+ rdcat(rdesc, &rsize, mse_27mhz_descriptor,
+ sizeof(mse_27mhz_descriptor));
+ else if (djdev->dj_receiver_dev->type == recvr_type_bluetooth)
+ rdcat(rdesc, &rsize, mse_bluetooth_descriptor,
+ sizeof(mse_bluetooth_descriptor));
+ else
+ rdcat(rdesc, &rsize, mse_descriptor,
+ sizeof(mse_descriptor));
}
if (djdev->reports_supported & MULTIMEDIA) {
- dbg_hid("%s: sending a multimedia report descriptor: %x\n",
+ dbg_hid("%s: sending a multimedia report descriptor: %llx\n",
__func__, djdev->reports_supported);
rdcat(rdesc, &rsize, consumer_descriptor, sizeof(consumer_descriptor));
}
if (djdev->reports_supported & POWER_KEYS) {
- dbg_hid("%s: sending a power keys report descriptor: %x\n",
+ dbg_hid("%s: sending a power keys report descriptor: %llx\n",
__func__, djdev->reports_supported);
rdcat(rdesc, &rsize, syscontrol_descriptor, sizeof(syscontrol_descriptor));
}
if (djdev->reports_supported & MEDIA_CENTER) {
- dbg_hid("%s: sending a media center report descriptor: %x\n",
+ dbg_hid("%s: sending a media center report descriptor: %llx\n",
__func__, djdev->reports_supported);
rdcat(rdesc, &rsize, media_descriptor, sizeof(media_descriptor));
}
if (djdev->reports_supported & KBD_LEDS) {
- dbg_hid("%s: need to send kbd leds report descriptor: %x\n",
+ dbg_hid("%s: need to send kbd leds report descriptor: %llx\n",
__func__, djdev->reports_supported);
}
- rdcat(rdesc, &rsize, hidpp_descriptor, sizeof(hidpp_descriptor));
+ if (djdev->reports_supported & HIDPP) {
+ rdcat(rdesc, &rsize, hidpp_descriptor,
+ sizeof(hidpp_descriptor));
+ }
retval = hid_parse_report(hid, rdesc, rsize);
kfree(rdesc);
@@ -866,7 +1431,7 @@ static int logi_dj_dj_event(struct hid_device *hdev,
* so ignore those reports too.
*/
if (dj_report->device_index != DJ_RECEIVER_INDEX)
- dev_err(&hdev->dev, "%s: invalid device index:%d\n",
+ hid_err(hdev, "%s: invalid device index:%d\n",
__func__, dj_report->device_index);
return false;
}
@@ -893,7 +1458,7 @@ static int logi_dj_dj_event(struct hid_device *hdev,
}
break;
default:
- logi_dj_recv_forward_report(djrcv_dev, dj_report);
+ logi_dj_recv_forward_dj(djrcv_dev, dj_report);
}
out:
@@ -907,9 +1472,10 @@ static int logi_dj_hidpp_event(struct hid_device *hdev,
int size)
{
struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
- struct dj_report *dj_report = (struct dj_report *) data;
+ struct hidpp_event *hidpp_report = (struct hidpp_event *) data;
+ struct dj_device *dj_dev;
unsigned long flags;
- u8 device_index = dj_report->device_index;
+ u8 device_index = hidpp_report->device_index;
if (device_index == HIDPP_RECEIVER_INDEX) {
/* special case were the device wants to know its unifying
@@ -937,21 +1503,42 @@ static int logi_dj_hidpp_event(struct hid_device *hdev,
* This driver can ignore safely the receiver notifications,
* so ignore those reports too.
*/
- dev_err(&hdev->dev, "%s: invalid device index:%d\n",
- __func__, dj_report->device_index);
+ hid_err(hdev, "%s: invalid device index:%d\n", __func__,
+ hidpp_report->device_index);
return false;
}
spin_lock_irqsave(&djrcv_dev->lock, flags);
- if (!djrcv_dev->paired_dj_devices[device_index])
- /* received an event for an unknown device, bail out */
- goto out;
+ dj_dev = djrcv_dev->paired_dj_devices[device_index];
+
+ /*
+ * With 27 MHz receivers, we do not get an explicit unpair event,
+ * remove the old device if the user has paired a *different* device.
+ */
+ if (djrcv_dev->type == recvr_type_27mhz && dj_dev &&
+ hidpp_report->sub_id == REPORT_TYPE_NOTIF_DEVICE_CONNECTED &&
+ hidpp_report->params[HIDPP_PARAM_PROTO_TYPE] == 0x02 &&
+ hidpp_report->params[HIDPP_PARAM_27MHZ_DEVID] !=
+ dj_dev->hdev->product) {
+ struct dj_workitem workitem = {
+ .device_index = hidpp_report->device_index,
+ .type = WORKITEM_TYPE_UNPAIRED,
+ };
+ kfifo_in(&djrcv_dev->notif_fifo, &workitem, sizeof(workitem));
+ /* logi_hidpp_recv_queue_notif will queue the work */
+ dj_dev = NULL;
+ }
- logi_dj_recv_forward_hidpp(djrcv_dev->paired_dj_devices[device_index],
- data, size);
+ if (dj_dev) {
+ logi_dj_recv_forward_report(dj_dev, data, size);
+ } else {
+ if (hidpp_report->sub_id == REPORT_TYPE_NOTIF_DEVICE_CONNECTED)
+ logi_hidpp_recv_queue_notif(hdev, hidpp_report);
+ else
+ logi_dj_recv_queue_unknown_work(djrcv_dev);
+ }
-out:
spin_unlock_irqrestore(&djrcv_dev->lock, flags);
return false;
@@ -961,112 +1548,176 @@ static int logi_dj_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *data,
int size)
{
+ struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
dbg_hid("%s, size:%d\n", __func__, size);
+ if (!djrcv_dev)
+ return 0;
+
+ if (!hdev->report_enum[HID_INPUT_REPORT].numbered) {
+
+ if (djrcv_dev->unnumbered_application == HID_GD_KEYBOARD) {
+ /*
+ * For the keyboard, we can reuse the same report by
+ * using the second byte which is constant in the USB
+ * HID report descriptor.
+ */
+ data[1] = data[0];
+ data[0] = REPORT_TYPE_KEYBOARD;
+
+ logi_dj_recv_forward_input_report(hdev, data, size);
+
+ /* restore previous state */
+ data[0] = data[1];
+ data[1] = 0;
+ }
+ /* The 27 MHz mouse-only receiver sends unnumbered mouse data */
+ if (djrcv_dev->unnumbered_application == HID_GD_MOUSE &&
+ size == 6) {
+ u8 mouse_report[7];
+
+ /* Prepend report id */
+ mouse_report[0] = REPORT_TYPE_MOUSE;
+ memcpy(mouse_report + 1, data, 6);
+ logi_dj_recv_forward_input_report(hdev, mouse_report, 7);
+ }
+
+ return false;
+ }
+
switch (data[0]) {
case REPORT_ID_DJ_SHORT:
if (size != DJREPORT_SHORT_LENGTH) {
- dev_err(&hdev->dev, "DJ report of bad size (%d)", size);
+ hid_err(hdev, "Short DJ report bad size (%d)", size);
+ return false;
+ }
+ return logi_dj_dj_event(hdev, report, data, size);
+ case REPORT_ID_DJ_LONG:
+ if (size != DJREPORT_LONG_LENGTH) {
+ hid_err(hdev, "Long DJ report bad size (%d)", size);
return false;
}
return logi_dj_dj_event(hdev, report, data, size);
case REPORT_ID_HIDPP_SHORT:
if (size != HIDPP_REPORT_SHORT_LENGTH) {
- dev_err(&hdev->dev,
- "Short HID++ report of bad size (%d)", size);
+ hid_err(hdev, "Short HID++ report bad size (%d)", size);
return false;
}
return logi_dj_hidpp_event(hdev, report, data, size);
case REPORT_ID_HIDPP_LONG:
if (size != HIDPP_REPORT_LONG_LENGTH) {
- dev_err(&hdev->dev,
- "Long HID++ report of bad size (%d)", size);
+ hid_err(hdev, "Long HID++ report bad size (%d)", size);
return false;
}
return logi_dj_hidpp_event(hdev, report, data, size);
}
+ logi_dj_recv_forward_input_report(hdev, data, size);
+
return false;
}
static int logi_dj_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
- struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct hid_report_enum *rep_enum;
+ struct hid_report *rep;
struct dj_receiver_dev *djrcv_dev;
+ struct usb_interface *intf;
+ unsigned int no_dj_interfaces = 0;
+ bool has_hidpp = false;
+ unsigned long flags;
int retval;
- dbg_hid("%s called for ifnum %d\n", __func__,
- intf->cur_altsetting->desc.bInterfaceNumber);
+ /*
+ * Call to usbhid to fetch the HID descriptors of the current
+ * interface subsequently call to the hid/hid-core to parse the
+ * fetched descriptors.
+ */
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "%s: parse failed\n", __func__);
+ return retval;
+ }
- /* Ignore interfaces 0 and 1, they will not carry any data, dont create
- * any hid_device for them */
- if (intf->cur_altsetting->desc.bInterfaceNumber !=
- LOGITECH_DJ_INTERFACE_NUMBER) {
- dbg_hid("%s: ignoring ifnum %d\n", __func__,
- intf->cur_altsetting->desc.bInterfaceNumber);
+ /*
+ * Some KVMs add an extra interface for e.g. mouse emulation. If we
+ * treat these as logitech-dj interfaces then this causes input events
+ * reported through this extra interface to not be reported correctly.
+ * To avoid this, we treat these as generic-hid devices.
+ */
+ switch (id->driver_data) {
+ case recvr_type_dj: no_dj_interfaces = 3; break;
+ case recvr_type_hidpp: no_dj_interfaces = 2; break;
+ case recvr_type_gaming_hidpp: no_dj_interfaces = 3; break;
+ case recvr_type_27mhz: no_dj_interfaces = 2; break;
+ case recvr_type_bluetooth: no_dj_interfaces = 2; break;
+ }
+ if (hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+ intf = to_usb_interface(hdev->dev.parent);
+ if (intf && intf->altsetting->desc.bInterfaceNumber >=
+ no_dj_interfaces) {
+ hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
+ return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ }
+ }
+
+ rep_enum = &hdev->report_enum[HID_INPUT_REPORT];
+
+ /* no input reports, bail out */
+ if (list_empty(&rep_enum->report_list))
return -ENODEV;
+
+ /*
+ * Check for the HID++ application.
+ * Note: we should theoretically check for HID++ and DJ
+ * collections, but this will do.
+ */
+ list_for_each_entry(rep, &rep_enum->report_list, list) {
+ if (rep->application == 0xff000001)
+ has_hidpp = true;
}
- /* Treat interface 2 */
+ /*
+ * Ignore interfaces without DJ/HID++ collection, they will not carry
+ * any data, dont create any hid_device for them.
+ */
+ if (!has_hidpp && id->driver_data == recvr_type_dj)
+ return -ENODEV;
- djrcv_dev = kzalloc(sizeof(struct dj_receiver_dev), GFP_KERNEL);
+ /* get the current application attached to the node */
+ rep = list_first_entry(&rep_enum->report_list, struct hid_report, list);
+ djrcv_dev = dj_get_receiver_dev(hdev, id->driver_data,
+ rep->application, has_hidpp);
if (!djrcv_dev) {
- dev_err(&hdev->dev,
- "%s:failed allocating dj_receiver_dev\n", __func__);
+ hid_err(hdev, "%s: dj_get_receiver_dev failed\n", __func__);
return -ENOMEM;
}
- djrcv_dev->hdev = hdev;
- INIT_WORK(&djrcv_dev->work, delayedwork_callback);
- spin_lock_init(&djrcv_dev->lock);
- if (kfifo_alloc(&djrcv_dev->notif_fifo,
- DJ_MAX_NUMBER_NOTIFICATIONS * sizeof(struct dj_report),
- GFP_KERNEL)) {
- dev_err(&hdev->dev,
- "%s:failed allocating notif_fifo\n", __func__);
- kfree(djrcv_dev);
- return -ENOMEM;
- }
- hid_set_drvdata(hdev, djrcv_dev);
- /* Call to usbhid to fetch the HID descriptors of interface 2 and
- * subsequently call to the hid/hid-core to parse the fetched
- * descriptors, this will in turn create the hidraw and hiddev nodes
- * for interface 2 of the receiver */
- retval = hid_parse(hdev);
- if (retval) {
- dev_err(&hdev->dev,
- "%s:parse of interface 2 failed\n", __func__);
- goto hid_parse_fail;
- }
-
- if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, REPORT_ID_DJ_SHORT,
- 0, DJREPORT_SHORT_LENGTH - 1)) {
- retval = -ENODEV;
- goto hid_parse_fail;
- }
+ if (!rep_enum->numbered)
+ djrcv_dev->unnumbered_application = rep->application;
/* Starts the usb device and connects to upper interfaces hiddev and
* hidraw */
- retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ retval = hid_hw_start(hdev, HID_CONNECT_HIDRAW|HID_CONNECT_HIDDEV);
if (retval) {
- dev_err(&hdev->dev,
- "%s:hid_hw_start returned error\n", __func__);
+ hid_err(hdev, "%s: hid_hw_start returned error\n", __func__);
goto hid_hw_start_fail;
}
- retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0);
- if (retval < 0) {
- dev_err(&hdev->dev,
- "%s:logi_dj_recv_switch_to_dj_mode returned error:%d\n",
- __func__, retval);
- goto switch_to_dj_mode_fail;
+ if (has_hidpp) {
+ retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0);
+ if (retval < 0) {
+ hid_err(hdev, "%s: logi_dj_recv_switch_to_dj_mode returned error:%d\n",
+ __func__, retval);
+ goto switch_to_dj_mode_fail;
+ }
}
/* This is enabling the polling urb on the IN endpoint */
retval = hid_hw_open(hdev);
if (retval < 0) {
- dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n",
+ hid_err(hdev, "%s: hid_hw_open returned error:%d\n",
__func__, retval);
goto llopen_failed;
}
@@ -1074,11 +1725,16 @@ static int logi_dj_probe(struct hid_device *hdev,
/* Allow incoming packets to arrive: */
hid_device_io_start(hdev);
- retval = logi_dj_recv_query_paired_devices(djrcv_dev);
- if (retval < 0) {
- dev_err(&hdev->dev, "%s:logi_dj_recv_query_paired_devices "
- "error:%d\n", __func__, retval);
- goto logi_dj_recv_query_paired_devices_failed;
+ if (has_hidpp) {
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+ djrcv_dev->ready = true;
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+ retval = logi_dj_recv_query_paired_devices(djrcv_dev);
+ if (retval < 0) {
+ hid_err(hdev, "%s: logi_dj_recv_query_paired_devices error:%d\n",
+ __func__, retval);
+ goto logi_dj_recv_query_paired_devices_failed;
+ }
}
return retval;
@@ -1091,12 +1747,8 @@ switch_to_dj_mode_fail:
hid_hw_stop(hdev);
hid_hw_start_fail:
-hid_parse_fail:
- kfifo_free(&djrcv_dev->notif_fifo);
- kfree(djrcv_dev);
- hid_set_drvdata(hdev, NULL);
+ dj_put_receiver_dev(hdev);
return retval;
-
}
#ifdef CONFIG_PM
@@ -1105,10 +1757,12 @@ static int logi_dj_reset_resume(struct hid_device *hdev)
int retval;
struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
+ if (!djrcv_dev || djrcv_dev->hidpp != hdev)
+ return 0;
+
retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0);
if (retval < 0) {
- dev_err(&hdev->dev,
- "%s:logi_dj_recv_switch_to_dj_mode returned error:%d\n",
+ hid_err(hdev, "%s: logi_dj_recv_switch_to_dj_mode returned error:%d\n",
__func__, retval);
}
@@ -1120,39 +1774,83 @@ static void logi_dj_remove(struct hid_device *hdev)
{
struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
struct dj_device *dj_dev;
+ unsigned long flags;
int i;
dbg_hid("%s\n", __func__);
+ if (!djrcv_dev)
+ return hid_hw_stop(hdev);
+
+ /*
+ * This ensures that if the work gets requeued from another
+ * interface of the same receiver it will be a no-op.
+ */
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
+ djrcv_dev->ready = false;
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
+
cancel_work_sync(&djrcv_dev->work);
hid_hw_close(hdev);
hid_hw_stop(hdev);
- /* I suppose that at this point the only context that can access
- * the djrecv_data is this thread as the work item is guaranteed to
- * have finished and no more raw_event callbacks should arrive after
- * the remove callback was triggered so no locks are put around the
- * code below */
+ /*
+ * For proper operation we need access to all interfaces, so we destroy
+ * the paired devices when we're unbound from any interface.
+ *
+ * Note we may still be bound to other interfaces, sharing the same
+ * djrcv_dev, so we need locking here.
+ */
for (i = 0; i < (DJ_MAX_PAIRED_DEVICES + DJ_DEVICE_INDEX_MIN); i++) {
+ spin_lock_irqsave(&djrcv_dev->lock, flags);
dj_dev = djrcv_dev->paired_dj_devices[i];
+ djrcv_dev->paired_dj_devices[i] = NULL;
+ spin_unlock_irqrestore(&djrcv_dev->lock, flags);
if (dj_dev != NULL) {
hid_destroy_device(dj_dev->hdev);
kfree(dj_dev);
- djrcv_dev->paired_dj_devices[i] = NULL;
}
}
- kfifo_free(&djrcv_dev->notif_fifo);
- kfree(djrcv_dev);
- hid_set_drvdata(hdev, NULL);
+ dj_put_receiver_dev(hdev);
}
static const struct hid_device_id logi_dj_receivers[] = {
{HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER)},
+ USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER),
+ .driver_data = recvr_type_dj},
{HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2)},
+ USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2),
+ .driver_data = recvr_type_dj},
+ { /* Logitech Nano (non DJ) receiver */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER),
+ .driver_data = recvr_type_hidpp},
+ { /* Logitech Nano (non DJ) receiver */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2),
+ .driver_data = recvr_type_hidpp},
+ { /* Logitech gaming receiver (0xc539) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_GAMING),
+ .driver_data = recvr_type_gaming_hidpp},
+ { /* Logitech 27 MHz HID++ 1.0 receiver (0xc517) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_S510_RECEIVER_2),
+ .driver_data = recvr_type_27mhz},
+ { /* Logitech 27 MHz HID++ 1.0 mouse-only receiver (0xc51b) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_27MHZ_MOUSE_RECEIVER),
+ .driver_data = recvr_type_27mhz},
+ { /* Logitech MX5000 HID++ / bluetooth receiver keyboard intf. */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ 0xc70e),
+ .driver_data = recvr_type_bluetooth},
+ { /* Logitech MX5000 HID++ / bluetooth receiver mouse intf. */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ 0xc70a),
+ .driver_data = recvr_type_bluetooth},
{}
};
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 199cc256e9d9..72fc9c0566db 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -51,7 +51,11 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_REPORT_SHORT_LENGTH 7
#define HIDPP_REPORT_LONG_LENGTH 20
-#define HIDPP_REPORT_VERY_LONG_LENGTH 64
+#define HIDPP_REPORT_VERY_LONG_MAX_LENGTH 64
+
+#define HIDPP_SUB_ID_CONSUMER_VENDOR_KEYS 0x03
+#define HIDPP_SUB_ID_ROLLER 0x05
+#define HIDPP_SUB_ID_MOUSE_EXTRA_BTNS 0x06
#define HIDPP_QUIRK_CLASS_WTP BIT(0)
#define HIDPP_QUIRK_CLASS_M560 BIT(1)
@@ -68,6 +72,13 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_QUIRK_HI_RES_SCROLL_1P0 BIT(26)
#define HIDPP_QUIRK_HI_RES_SCROLL_X2120 BIT(27)
#define HIDPP_QUIRK_HI_RES_SCROLL_X2121 BIT(28)
+#define HIDPP_QUIRK_HIDPP_WHEELS BIT(29)
+#define HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS BIT(30)
+#define HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS BIT(31)
+
+/* These are just aliases for now */
+#define HIDPP_QUIRK_KBD_SCROLL_WHEEL HIDPP_QUIRK_HIDPP_WHEELS
+#define HIDPP_QUIRK_KBD_ZOOM_WHEEL HIDPP_QUIRK_HIDPP_WHEELS
/* Convenience constant to check for any high-res support. */
#define HIDPP_QUIRK_HI_RES_SCROLL (HIDPP_QUIRK_HI_RES_SCROLL_1P0 | \
@@ -106,13 +117,13 @@ MODULE_PARM_DESC(disable_tap_to_click,
struct fap {
u8 feature_index;
u8 funcindex_clientid;
- u8 params[HIDPP_REPORT_VERY_LONG_LENGTH - 4U];
+ u8 params[HIDPP_REPORT_VERY_LONG_MAX_LENGTH - 4U];
};
struct rap {
u8 sub_id;
u8 reg_address;
- u8 params[HIDPP_REPORT_VERY_LONG_LENGTH - 4U];
+ u8 params[HIDPP_REPORT_VERY_LONG_MAX_LENGTH - 4U];
};
struct hidpp_report {
@@ -149,7 +160,6 @@ struct hidpp_battery {
* @last_time: last event time, used to reset remainder after inactivity
*/
struct hidpp_scroll_counter {
- struct input_dev *dev;
int wheel_multiplier;
int remainder;
int direction;
@@ -158,10 +168,12 @@ struct hidpp_scroll_counter {
struct hidpp_device {
struct hid_device *hid_dev;
+ struct input_dev *input;
struct mutex send_mutex;
void *send_receive_buf;
char *name; /* will never be NULL and should not be freed */
wait_queue_head_t wait;
+ int very_long_report_length;
bool answer_available;
u8 protocol_major;
u8 protocol_minor;
@@ -206,8 +218,6 @@ static int __hidpp_send_report(struct hid_device *hdev,
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
int fields_count, ret;
- hidpp = hid_get_drvdata(hdev);
-
switch (hidpp_report->report_id) {
case REPORT_ID_HIDPP_SHORT:
fields_count = HIDPP_REPORT_SHORT_LENGTH;
@@ -216,7 +226,7 @@ static int __hidpp_send_report(struct hid_device *hdev,
fields_count = HIDPP_REPORT_LONG_LENGTH;
break;
case REPORT_ID_HIDPP_VERY_LONG:
- fields_count = HIDPP_REPORT_VERY_LONG_LENGTH;
+ fields_count = hidpp->very_long_report_length;
break;
default:
return -ENODEV;
@@ -342,7 +352,7 @@ static int hidpp_send_rap_command_sync(struct hidpp_device *hidpp_dev,
max_count = HIDPP_REPORT_LONG_LENGTH - 4;
break;
case REPORT_ID_HIDPP_VERY_LONG:
- max_count = HIDPP_REPORT_VERY_LONG_LENGTH - 4;
+ max_count = hidpp_dev->very_long_report_length - 4;
break;
default:
return -EINVAL;
@@ -434,14 +444,15 @@ static void hidpp_prefix_name(char **name, int name_length)
* emit low-resolution scroll events when appropriate for
* backwards-compatibility with userspace input libraries.
*/
-static void hidpp_scroll_counter_handle_scroll(struct hidpp_scroll_counter *counter,
+static void hidpp_scroll_counter_handle_scroll(struct input_dev *input_dev,
+ struct hidpp_scroll_counter *counter,
int hi_res_value)
{
int low_res_value, remainder, direction;
unsigned long long now, previous;
hi_res_value = hi_res_value * 120/counter->wheel_multiplier;
- input_report_rel(counter->dev, REL_WHEEL_HI_RES, hi_res_value);
+ input_report_rel(input_dev, REL_WHEEL_HI_RES, hi_res_value);
remainder = counter->remainder;
direction = hi_res_value > 0 ? 1 : -1;
@@ -475,7 +486,7 @@ static void hidpp_scroll_counter_handle_scroll(struct hidpp_scroll_counter *coun
low_res_value = remainder / 120;
if (low_res_value == 0)
low_res_value = (hi_res_value > 0 ? 1 : -1);
- input_report_rel(counter->dev, REL_WHEEL, low_res_value);
+ input_report_rel(input_dev, REL_WHEEL, low_res_value);
remainder -= low_res_value * 120;
}
counter->remainder = remainder;
@@ -491,14 +502,16 @@ static void hidpp_scroll_counter_handle_scroll(struct hidpp_scroll_counter *coun
#define HIDPP_GET_LONG_REGISTER 0x83
/**
- * hidpp10_set_register_bit() - Sets a single bit in a HID++ 1.0 register.
+ * hidpp10_set_register - Modify a HID++ 1.0 register.
* @hidpp_dev: the device to set the register on.
* @register_address: the address of the register to modify.
* @byte: the byte of the register to modify. Should be less than 3.
+ * @mask: mask of the bits to modify
+ * @value: new values for the bits in mask
* Return: 0 if successful, otherwise a negative error code.
*/
-static int hidpp10_set_register_bit(struct hidpp_device *hidpp_dev,
- u8 register_address, u8 byte, u8 bit)
+static int hidpp10_set_register(struct hidpp_device *hidpp_dev,
+ u8 register_address, u8 byte, u8 mask, u8 value)
{
struct hidpp_report response;
int ret;
@@ -514,7 +527,8 @@ static int hidpp10_set_register_bit(struct hidpp_device *hidpp_dev,
memcpy(params, response.rap.params, 3);
- params[byte] |= BIT(bit);
+ params[byte] &= ~mask;
+ params[byte] |= value & mask;
return hidpp_send_rap_command_sync(hidpp_dev,
REPORT_ID_HIDPP_SHORT,
@@ -523,20 +537,28 @@ static int hidpp10_set_register_bit(struct hidpp_device *hidpp_dev,
params, 3, &response);
}
-
-#define HIDPP_REG_GENERAL 0x00
+#define HIDPP_REG_ENABLE_REPORTS 0x00
+#define HIDPP_ENABLE_CONSUMER_REPORT BIT(0)
+#define HIDPP_ENABLE_WHEEL_REPORT BIT(2)
+#define HIDPP_ENABLE_MOUSE_EXTRA_BTN_REPORT BIT(3)
+#define HIDPP_ENABLE_BAT_REPORT BIT(4)
+#define HIDPP_ENABLE_HWHEEL_REPORT BIT(5)
static int hidpp10_enable_battery_reporting(struct hidpp_device *hidpp_dev)
{
- return hidpp10_set_register_bit(hidpp_dev, HIDPP_REG_GENERAL, 0, 4);
+ return hidpp10_set_register(hidpp_dev, HIDPP_REG_ENABLE_REPORTS, 0,
+ HIDPP_ENABLE_BAT_REPORT, HIDPP_ENABLE_BAT_REPORT);
}
#define HIDPP_REG_FEATURES 0x01
+#define HIDPP_ENABLE_SPECIAL_BUTTON_FUNC BIT(1)
+#define HIDPP_ENABLE_FAST_SCROLL BIT(6)
/* On HID++ 1.0 devices, high-res scroll was called "scrolling acceleration". */
static int hidpp10_enable_scrolling_acceleration(struct hidpp_device *hidpp_dev)
{
- return hidpp10_set_register_bit(hidpp_dev, HIDPP_REG_FEATURES, 0, 6);
+ return hidpp10_set_register(hidpp_dev, HIDPP_REG_FEATURES, 0,
+ HIDPP_ENABLE_FAST_SCROLL, HIDPP_ENABLE_FAST_SCROLL);
}
#define HIDPP_REG_BATTERY_STATUS 0x07
@@ -741,6 +763,9 @@ static char *hidpp_unifying_get_name(struct hidpp_device *hidpp_dev)
if (2 + len > sizeof(response.rap.params))
return NULL;
+ if (len < 4) /* logitech devices are usually at least Xddd */
+ return NULL;
+
name = kzalloc(len + 1, GFP_KERNEL);
if (!name)
return NULL;
@@ -836,18 +861,21 @@ static int hidpp_root_get_feature(struct hidpp_device *hidpp, u16 feature,
static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
{
+ const u8 ping_byte = 0x5a;
+ u8 ping_data[3] = { 0, 0, ping_byte };
struct hidpp_report response;
int ret;
- ret = hidpp_send_fap_command_sync(hidpp,
+ ret = hidpp_send_rap_command_sync(hidpp,
+ REPORT_ID_HIDPP_SHORT,
HIDPP_PAGE_ROOT_IDX,
CMD_ROOT_GET_PROTOCOL_VERSION,
- NULL, 0, &response);
+ ping_data, sizeof(ping_data), &response);
if (ret == HIDPP_ERROR_INVALID_SUBID) {
hidpp->protocol_major = 1;
hidpp->protocol_minor = 0;
- return 0;
+ goto print_version;
}
/* the device might not be connected */
@@ -862,21 +890,19 @@ static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
if (ret)
return ret;
- hidpp->protocol_major = response.fap.params[0];
- hidpp->protocol_minor = response.fap.params[1];
+ if (response.rap.params[2] != ping_byte) {
+ hid_err(hidpp->hid_dev, "%s: ping mismatch 0x%02x != 0x%02x\n",
+ __func__, response.rap.params[2], ping_byte);
+ return -EPROTO;
+ }
- return ret;
-}
+ hidpp->protocol_major = response.rap.params[0];
+ hidpp->protocol_minor = response.rap.params[1];
-static bool hidpp_is_connected(struct hidpp_device *hidpp)
-{
- int ret;
-
- ret = hidpp_root_get_protocol_version(hidpp);
- if (!ret)
- hid_dbg(hidpp->hid_dev, "HID++ %u.%u device connected.\n",
- hidpp->protocol_major, hidpp->protocol_minor);
- return ret == 0;
+print_version:
+ hid_info(hidpp->hid_dev, "HID++ %u.%u device connected.\n",
+ hidpp->protocol_major, hidpp->protocol_minor);
+ return 0;
}
/* -------------------------------------------------------------------------- */
@@ -932,7 +958,7 @@ static int hidpp_devicenametype_get_device_name(struct hidpp_device *hidpp,
switch (response.report_id) {
case REPORT_ID_HIDPP_VERY_LONG:
- count = HIDPP_REPORT_VERY_LONG_LENGTH - 4;
+ count = hidpp->very_long_report_length - 4;
break;
case REPORT_ID_HIDPP_LONG:
count = HIDPP_REPORT_LONG_LENGTH - 4;
@@ -1012,7 +1038,11 @@ static int hidpp_map_battery_level(int capacity)
{
if (capacity < 11)
return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
- else if (capacity < 31)
+ /*
+ * The spec says this should be < 31 but some devices report 30
+ * with brand new batteries and Windows reports 30 as "Good".
+ */
+ else if (capacity < 30)
return POWER_SUPPLY_CAPACITY_LEVEL_LOW;
else if (capacity < 81)
return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
@@ -2211,7 +2241,6 @@ static int hidpp_ff_deinit(struct hid_device *hid)
#define WTP_MANUAL_RESOLUTION 39
struct wtp_data {
- struct input_dev *input;
u16 x_size, y_size;
u8 finger_count;
u8 mt_feature_index;
@@ -2229,7 +2258,7 @@ static int wtp_input_mapping(struct hid_device *hdev, struct hid_input *hi,
}
static void wtp_populate_input(struct hidpp_device *hidpp,
- struct input_dev *input_dev, bool origin_is_hid_core)
+ struct input_dev *input_dev)
{
struct wtp_data *wd = hidpp->private_data;
@@ -2255,31 +2284,30 @@ static void wtp_populate_input(struct hidpp_device *hidpp,
input_mt_init_slots(input_dev, wd->maxcontacts, INPUT_MT_POINTER |
INPUT_MT_DROP_UNUSED);
-
- wd->input = input_dev;
}
-static void wtp_touch_event(struct wtp_data *wd,
+static void wtp_touch_event(struct hidpp_device *hidpp,
struct hidpp_touchpad_raw_xy_finger *touch_report)
{
+ struct wtp_data *wd = hidpp->private_data;
int slot;
if (!touch_report->finger_id || touch_report->contact_type)
/* no actual data */
return;
- slot = input_mt_get_slot_by_key(wd->input, touch_report->finger_id);
+ slot = input_mt_get_slot_by_key(hidpp->input, touch_report->finger_id);
- input_mt_slot(wd->input, slot);
- input_mt_report_slot_state(wd->input, MT_TOOL_FINGER,
+ input_mt_slot(hidpp->input, slot);
+ input_mt_report_slot_state(hidpp->input, MT_TOOL_FINGER,
touch_report->contact_status);
if (touch_report->contact_status) {
- input_event(wd->input, EV_ABS, ABS_MT_POSITION_X,
+ input_event(hidpp->input, EV_ABS, ABS_MT_POSITION_X,
touch_report->x);
- input_event(wd->input, EV_ABS, ABS_MT_POSITION_Y,
+ input_event(hidpp->input, EV_ABS, ABS_MT_POSITION_Y,
wd->flip_y ? wd->y_size - touch_report->y :
touch_report->y);
- input_event(wd->input, EV_ABS, ABS_MT_PRESSURE,
+ input_event(hidpp->input, EV_ABS, ABS_MT_PRESSURE,
touch_report->area);
}
}
@@ -2287,19 +2315,18 @@ static void wtp_touch_event(struct wtp_data *wd,
static void wtp_send_raw_xy_event(struct hidpp_device *hidpp,
struct hidpp_touchpad_raw_xy *raw)
{
- struct wtp_data *wd = hidpp->private_data;
int i;
for (i = 0; i < 2; i++)
- wtp_touch_event(wd, &(raw->fingers[i]));
+ wtp_touch_event(hidpp, &(raw->fingers[i]));
if (raw->end_of_frame &&
!(hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS))
- input_event(wd->input, EV_KEY, BTN_LEFT, raw->button);
+ input_event(hidpp->input, EV_KEY, BTN_LEFT, raw->button);
if (raw->end_of_frame || raw->finger_count <= 2) {
- input_mt_sync_frame(wd->input);
- input_sync(wd->input);
+ input_mt_sync_frame(hidpp->input);
+ input_sync(hidpp->input);
}
}
@@ -2349,7 +2376,7 @@ static int wtp_raw_event(struct hid_device *hdev, u8 *data, int size)
struct hidpp_report *report = (struct hidpp_report *)data;
struct hidpp_touchpad_raw_xy raw;
- if (!wd || !wd->input)
+ if (!wd || !hidpp->input)
return 1;
switch (data[0]) {
@@ -2360,11 +2387,11 @@ static int wtp_raw_event(struct hid_device *hdev, u8 *data, int size)
return 1;
}
if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) {
- input_event(wd->input, EV_KEY, BTN_LEFT,
+ input_event(hidpp->input, EV_KEY, BTN_LEFT,
!!(data[1] & 0x01));
- input_event(wd->input, EV_KEY, BTN_RIGHT,
+ input_event(hidpp->input, EV_KEY, BTN_RIGHT,
!!(data[1] & 0x02));
- input_sync(wd->input);
+ input_sync(hidpp->input);
return 0;
} else {
if (size < 21)
@@ -2482,10 +2509,6 @@ static int wtp_connect(struct hid_device *hdev, bool connected)
static const u8 m560_config_parameter[] = {0x00, 0xaf, 0x03};
-struct m560_private_data {
- struct input_dev *input;
-};
-
/* how buttons are mapped in the report */
#define M560_MOUSE_BTN_LEFT 0x01
#define M560_MOUSE_BTN_RIGHT 0x02
@@ -2513,28 +2536,12 @@ static int m560_send_config_command(struct hid_device *hdev, bool connected)
);
}
-static int m560_allocate(struct hid_device *hdev)
-{
- struct hidpp_device *hidpp = hid_get_drvdata(hdev);
- struct m560_private_data *d;
-
- d = devm_kzalloc(&hdev->dev, sizeof(struct m560_private_data),
- GFP_KERNEL);
- if (!d)
- return -ENOMEM;
-
- hidpp->private_data = d;
-
- return 0;
-};
-
static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
{
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
- struct m560_private_data *mydata = hidpp->private_data;
/* sanity check */
- if (!mydata || !mydata->input) {
+ if (!hidpp->input) {
hid_err(hdev, "error in parameter\n");
return -EINVAL;
}
@@ -2561,24 +2568,24 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
switch (data[5]) {
case 0xaf:
- input_report_key(mydata->input, BTN_MIDDLE, 1);
+ input_report_key(hidpp->input, BTN_MIDDLE, 1);
break;
case 0xb0:
- input_report_key(mydata->input, BTN_FORWARD, 1);
+ input_report_key(hidpp->input, BTN_FORWARD, 1);
break;
case 0xae:
- input_report_key(mydata->input, BTN_BACK, 1);
+ input_report_key(hidpp->input, BTN_BACK, 1);
break;
case 0x00:
- input_report_key(mydata->input, BTN_BACK, 0);
- input_report_key(mydata->input, BTN_FORWARD, 0);
- input_report_key(mydata->input, BTN_MIDDLE, 0);
+ input_report_key(hidpp->input, BTN_BACK, 0);
+ input_report_key(hidpp->input, BTN_FORWARD, 0);
+ input_report_key(hidpp->input, BTN_MIDDLE, 0);
break;
default:
hid_err(hdev, "error in report\n");
return 0;
}
- input_sync(mydata->input);
+ input_sync(hidpp->input);
} else if (data[0] == 0x02) {
/*
@@ -2592,59 +2599,55 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
int v;
- input_report_key(mydata->input, BTN_LEFT,
+ input_report_key(hidpp->input, BTN_LEFT,
!!(data[1] & M560_MOUSE_BTN_LEFT));
- input_report_key(mydata->input, BTN_RIGHT,
+ input_report_key(hidpp->input, BTN_RIGHT,
!!(data[1] & M560_MOUSE_BTN_RIGHT));
if (data[1] & M560_MOUSE_BTN_WHEEL_LEFT) {
- input_report_rel(mydata->input, REL_HWHEEL, -1);
- input_report_rel(mydata->input, REL_HWHEEL_HI_RES,
+ input_report_rel(hidpp->input, REL_HWHEEL, -1);
+ input_report_rel(hidpp->input, REL_HWHEEL_HI_RES,
-120);
} else if (data[1] & M560_MOUSE_BTN_WHEEL_RIGHT) {
- input_report_rel(mydata->input, REL_HWHEEL, 1);
- input_report_rel(mydata->input, REL_HWHEEL_HI_RES,
+ input_report_rel(hidpp->input, REL_HWHEEL, 1);
+ input_report_rel(hidpp->input, REL_HWHEEL_HI_RES,
120);
}
v = hid_snto32(hid_field_extract(hdev, data+3, 0, 12), 12);
- input_report_rel(mydata->input, REL_X, v);
+ input_report_rel(hidpp->input, REL_X, v);
v = hid_snto32(hid_field_extract(hdev, data+3, 12, 12), 12);
- input_report_rel(mydata->input, REL_Y, v);
+ input_report_rel(hidpp->input, REL_Y, v);
v = hid_snto32(data[6], 8);
if (v != 0)
- hidpp_scroll_counter_handle_scroll(
+ hidpp_scroll_counter_handle_scroll(hidpp->input,
&hidpp->vertical_wheel_counter, v);
- input_sync(mydata->input);
+ input_sync(hidpp->input);
}
return 1;
}
static void m560_populate_input(struct hidpp_device *hidpp,
- struct input_dev *input_dev, bool origin_is_hid_core)
+ struct input_dev *input_dev)
{
- struct m560_private_data *mydata = hidpp->private_data;
-
- mydata->input = input_dev;
-
- __set_bit(EV_KEY, mydata->input->evbit);
- __set_bit(BTN_MIDDLE, mydata->input->keybit);
- __set_bit(BTN_RIGHT, mydata->input->keybit);
- __set_bit(BTN_LEFT, mydata->input->keybit);
- __set_bit(BTN_BACK, mydata->input->keybit);
- __set_bit(BTN_FORWARD, mydata->input->keybit);
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(BTN_MIDDLE, input_dev->keybit);
+ __set_bit(BTN_RIGHT, input_dev->keybit);
+ __set_bit(BTN_LEFT, input_dev->keybit);
+ __set_bit(BTN_BACK, input_dev->keybit);
+ __set_bit(BTN_FORWARD, input_dev->keybit);
- __set_bit(EV_REL, mydata->input->evbit);
- __set_bit(REL_X, mydata->input->relbit);
- __set_bit(REL_Y, mydata->input->relbit);
- __set_bit(REL_WHEEL, mydata->input->relbit);
- __set_bit(REL_HWHEEL, mydata->input->relbit);
- __set_bit(REL_WHEEL_HI_RES, mydata->input->relbit);
- __set_bit(REL_HWHEEL_HI_RES, mydata->input->relbit);
+ __set_bit(EV_REL, input_dev->evbit);
+ __set_bit(REL_X, input_dev->relbit);
+ __set_bit(REL_Y, input_dev->relbit);
+ __set_bit(REL_WHEEL, input_dev->relbit);
+ __set_bit(REL_HWHEEL, input_dev->relbit);
+ __set_bit(REL_WHEEL_HI_RES, input_dev->relbit);
+ __set_bit(REL_HWHEEL_HI_RES, input_dev->relbit);
}
static int m560_input_mapping(struct hid_device *hdev, struct hid_input *hi,
@@ -2747,6 +2750,175 @@ static int g920_get_config(struct hidpp_device *hidpp)
}
/* -------------------------------------------------------------------------- */
+/* HID++1.0 devices which use HID++ reports for their wheels */
+/* -------------------------------------------------------------------------- */
+static int hidpp10_wheel_connect(struct hidpp_device *hidpp)
+{
+ return hidpp10_set_register(hidpp, HIDPP_REG_ENABLE_REPORTS, 0,
+ HIDPP_ENABLE_WHEEL_REPORT | HIDPP_ENABLE_HWHEEL_REPORT,
+ HIDPP_ENABLE_WHEEL_REPORT | HIDPP_ENABLE_HWHEEL_REPORT);
+}
+
+static int hidpp10_wheel_raw_event(struct hidpp_device *hidpp,
+ u8 *data, int size)
+{
+ s8 value, hvalue;
+
+ if (!hidpp->input)
+ return -EINVAL;
+
+ if (size < 7)
+ return 0;
+
+ if (data[0] != REPORT_ID_HIDPP_SHORT || data[2] != HIDPP_SUB_ID_ROLLER)
+ return 0;
+
+ value = data[3];
+ hvalue = data[4];
+
+ input_report_rel(hidpp->input, REL_WHEEL, value);
+ input_report_rel(hidpp->input, REL_WHEEL_HI_RES, value * 120);
+ input_report_rel(hidpp->input, REL_HWHEEL, hvalue);
+ input_report_rel(hidpp->input, REL_HWHEEL_HI_RES, hvalue * 120);
+ input_sync(hidpp->input);
+
+ return 1;
+}
+
+static void hidpp10_wheel_populate_input(struct hidpp_device *hidpp,
+ struct input_dev *input_dev)
+{
+ __set_bit(EV_REL, input_dev->evbit);
+ __set_bit(REL_WHEEL, input_dev->relbit);
+ __set_bit(REL_WHEEL_HI_RES, input_dev->relbit);
+ __set_bit(REL_HWHEEL, input_dev->relbit);
+ __set_bit(REL_HWHEEL_HI_RES, input_dev->relbit);
+}
+
+/* -------------------------------------------------------------------------- */
+/* HID++1.0 mice which use HID++ reports for extra mouse buttons */
+/* -------------------------------------------------------------------------- */
+static int hidpp10_extra_mouse_buttons_connect(struct hidpp_device *hidpp)
+{
+ return hidpp10_set_register(hidpp, HIDPP_REG_ENABLE_REPORTS, 0,
+ HIDPP_ENABLE_MOUSE_EXTRA_BTN_REPORT,
+ HIDPP_ENABLE_MOUSE_EXTRA_BTN_REPORT);
+}
+
+static int hidpp10_extra_mouse_buttons_raw_event(struct hidpp_device *hidpp,
+ u8 *data, int size)
+{
+ int i;
+
+ if (!hidpp->input)
+ return -EINVAL;
+
+ if (size < 7)
+ return 0;
+
+ if (data[0] != REPORT_ID_HIDPP_SHORT ||
+ data[2] != HIDPP_SUB_ID_MOUSE_EXTRA_BTNS)
+ return 0;
+
+ /*
+ * Buttons are either delivered through the regular mouse report *or*
+ * through the extra buttons report. At least for button 6 how it is
+ * delivered differs per receiver firmware version. Even receivers with
+ * the same usb-id show different behavior, so we handle both cases.
+ */
+ for (i = 0; i < 8; i++)
+ input_report_key(hidpp->input, BTN_MOUSE + i,
+ (data[3] & (1 << i)));
+
+ /* Some mice report events on button 9+, use BTN_MISC */
+ for (i = 0; i < 8; i++)
+ input_report_key(hidpp->input, BTN_MISC + i,
+ (data[4] & (1 << i)));
+
+ input_sync(hidpp->input);
+ return 1;
+}
+
+static void hidpp10_extra_mouse_buttons_populate_input(
+ struct hidpp_device *hidpp, struct input_dev *input_dev)
+{
+ /* BTN_MOUSE - BTN_MOUSE+7 are set already by the descriptor */
+ __set_bit(BTN_0, input_dev->keybit);
+ __set_bit(BTN_1, input_dev->keybit);
+ __set_bit(BTN_2, input_dev->keybit);
+ __set_bit(BTN_3, input_dev->keybit);
+ __set_bit(BTN_4, input_dev->keybit);
+ __set_bit(BTN_5, input_dev->keybit);
+ __set_bit(BTN_6, input_dev->keybit);
+ __set_bit(BTN_7, input_dev->keybit);
+}
+
+/* -------------------------------------------------------------------------- */
+/* HID++1.0 kbds which only report 0x10xx consumer usages through sub-id 0x03 */
+/* -------------------------------------------------------------------------- */
+
+/* Find the consumer-page input report desc and change Maximums to 0x107f */
+static u8 *hidpp10_consumer_keys_report_fixup(struct hidpp_device *hidpp,
+ u8 *_rdesc, unsigned int *rsize)
+{
+ /* Note 0 terminated so we can use strnstr to search for this. */
+ const char consumer_rdesc_start[] = {
+ 0x05, 0x0C, /* USAGE_PAGE (Consumer Devices) */
+ 0x09, 0x01, /* USAGE (Consumer Control) */
+ 0xA1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x03, /* REPORT_ID = 3 */
+ 0x75, 0x10, /* REPORT_SIZE (16) */
+ 0x95, 0x02, /* REPORT_COUNT (2) */
+ 0x15, 0x01, /* LOGICAL_MIN (1) */
+ 0x26, 0x00 /* LOGICAL_MAX (... */
+ };
+ char *consumer_rdesc, *rdesc = (char *)_rdesc;
+ unsigned int size;
+
+ consumer_rdesc = strnstr(rdesc, consumer_rdesc_start, *rsize);
+ size = *rsize - (consumer_rdesc - rdesc);
+ if (consumer_rdesc && size >= 25) {
+ consumer_rdesc[15] = 0x7f;
+ consumer_rdesc[16] = 0x10;
+ consumer_rdesc[20] = 0x7f;
+ consumer_rdesc[21] = 0x10;
+ }
+ return _rdesc;
+}
+
+static int hidpp10_consumer_keys_connect(struct hidpp_device *hidpp)
+{
+ return hidpp10_set_register(hidpp, HIDPP_REG_ENABLE_REPORTS, 0,
+ HIDPP_ENABLE_CONSUMER_REPORT,
+ HIDPP_ENABLE_CONSUMER_REPORT);
+}
+
+static int hidpp10_consumer_keys_raw_event(struct hidpp_device *hidpp,
+ u8 *data, int size)
+{
+ u8 consumer_report[5];
+
+ if (size < 7)
+ return 0;
+
+ if (data[0] != REPORT_ID_HIDPP_SHORT ||
+ data[2] != HIDPP_SUB_ID_CONSUMER_VENDOR_KEYS)
+ return 0;
+
+ /*
+ * Build a normal consumer report (3) out of the data, this detour
+ * is necessary to get some keyboards to report their 0x10xx usages.
+ */
+ consumer_report[0] = 0x03;
+ memcpy(&consumer_report[1], &data[3], 4);
+ /* We are called from atomic context */
+ hid_report_raw_event(hidpp->hid_dev, HID_INPUT_REPORT,
+ consumer_report, 5, 1);
+
+ return 1;
+}
+
+/* -------------------------------------------------------------------------- */
/* High-resolution scroll wheels */
/* -------------------------------------------------------------------------- */
@@ -2781,12 +2953,31 @@ static int hi_res_scroll_enable(struct hidpp_device *hidpp)
/* Generic HID++ devices */
/* -------------------------------------------------------------------------- */
+static u8 *hidpp_report_fixup(struct hid_device *hdev, u8 *rdesc,
+ unsigned int *rsize)
+{
+ struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+
+ if (!hidpp)
+ return rdesc;
+
+ /* For 27 MHz keyboards the quirk gets set after hid_parse. */
+ if (hdev->group == HID_GROUP_LOGITECH_27MHZ_DEVICE ||
+ (hidpp->quirks & HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS))
+ rdesc = hidpp10_consumer_keys_report_fixup(hidpp, rdesc, rsize);
+
+ return rdesc;
+}
+
static int hidpp_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+ if (!hidpp)
+ return 0;
+
if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)
return wtp_input_mapping(hdev, hi, field, usage, bit, max);
else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560 &&
@@ -2802,6 +2993,9 @@ static int hidpp_input_mapped(struct hid_device *hdev, struct hid_input *hi,
{
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+ if (!hidpp)
+ return 0;
+
/* Ensure that Logitech G920 is not given a default fuzz/flat value */
if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
if (usage->type == EV_ABS && (usage->code == ABS_X ||
@@ -2816,15 +3010,20 @@ static int hidpp_input_mapped(struct hid_device *hdev, struct hid_input *hi,
static void hidpp_populate_input(struct hidpp_device *hidpp,
- struct input_dev *input, bool origin_is_hid_core)
+ struct input_dev *input)
{
+ hidpp->input = input;
+
if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)
- wtp_populate_input(hidpp, input, origin_is_hid_core);
+ wtp_populate_input(hidpp, input);
else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560)
- m560_populate_input(hidpp, input, origin_is_hid_core);
+ m560_populate_input(hidpp, input);
- if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL)
- hidpp->vertical_wheel_counter.dev = input;
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_WHEELS)
+ hidpp10_wheel_populate_input(hidpp, input);
+
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS)
+ hidpp10_extra_mouse_buttons_populate_input(hidpp, input);
}
static int hidpp_input_configured(struct hid_device *hdev,
@@ -2833,7 +3032,10 @@ static int hidpp_input_configured(struct hid_device *hdev,
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
struct input_dev *input = hidinput->input;
- hidpp_populate_input(hidpp, input, true);
+ if (!hidpp)
+ return 0;
+
+ hidpp_populate_input(hidpp, input);
return 0;
}
@@ -2893,6 +3095,24 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
return ret;
}
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_WHEELS) {
+ ret = hidpp10_wheel_raw_event(hidpp, data, size);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS) {
+ ret = hidpp10_extra_mouse_buttons_raw_event(hidpp, data, size);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS) {
+ ret = hidpp10_consumer_keys_raw_event(hidpp, data, size);
+ if (ret != 0)
+ return ret;
+ }
+
return 0;
}
@@ -2902,10 +3122,13 @@ static int hidpp_raw_event(struct hid_device *hdev, struct hid_report *report,
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
int ret = 0;
+ if (!hidpp)
+ return 0;
+
/* Generic HID++ processing. */
switch (data[0]) {
case REPORT_ID_HIDPP_VERY_LONG:
- if (size != HIDPP_REPORT_VERY_LONG_LENGTH) {
+ if (size != hidpp->very_long_report_length) {
hid_err(hdev, "received hid++ report of bad size (%d)",
size);
return 1;
@@ -2950,17 +3173,22 @@ static int hidpp_event(struct hid_device *hdev, struct hid_field *field,
* restriction imposed in hidpp_usages.
*/
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
- struct hidpp_scroll_counter *counter = &hidpp->vertical_wheel_counter;
+ struct hidpp_scroll_counter *counter;
+
+ if (!hidpp)
+ return 0;
+
+ counter = &hidpp->vertical_wheel_counter;
/* A scroll event may occur before the multiplier has been retrieved or
* the input device set, or high-res scroll enabling may fail. In such
* cases we must return early (falling back to default behaviour) to
* avoid a crash in hidpp_scroll_counter_handle_scroll.
*/
if (!(hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL) || value == 0
- || counter->dev == NULL || counter->wheel_multiplier == 0)
+ || hidpp->input == NULL || counter->wheel_multiplier == 0)
return 0;
- hidpp_scroll_counter_handle_scroll(counter, value);
+ hidpp_scroll_counter_handle_scroll(hidpp->input, counter, value);
return 1;
}
@@ -3132,32 +3360,45 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
return;
}
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_WHEELS) {
+ ret = hidpp10_wheel_connect(hidpp);
+ if (ret)
+ return;
+ }
+
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS) {
+ ret = hidpp10_extra_mouse_buttons_connect(hidpp);
+ if (ret)
+ return;
+ }
+
+ if (hidpp->quirks & HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS) {
+ ret = hidpp10_consumer_keys_connect(hidpp);
+ if (ret)
+ return;
+ }
+
/* the device is already connected, we can ask for its name and
* protocol */
if (!hidpp->protocol_major) {
- ret = !hidpp_is_connected(hidpp);
+ ret = hidpp_root_get_protocol_version(hidpp);
if (ret) {
hid_err(hdev, "Can not get the protocol version.\n");
return;
}
- hid_info(hdev, "HID++ %u.%u device connected.\n",
- hidpp->protocol_major, hidpp->protocol_minor);
}
if (hidpp->name == hdev->name && hidpp->protocol_major >= 2) {
name = hidpp_get_device_name(hidpp);
- if (!name) {
- hid_err(hdev,
- "unable to retrieve the name of the device");
- return;
- }
-
- devm_name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s", name);
- kfree(name);
- if (!devm_name)
- return;
+ if (name) {
+ devm_name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
+ "%s", name);
+ kfree(name);
+ if (!devm_name)
+ return;
- hidpp->name = devm_name;
+ hidpp->name = devm_name;
+ }
}
hidpp_initialize_battery(hidpp);
@@ -3188,7 +3429,7 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
return;
}
- hidpp_populate_input(hidpp, input, false);
+ hidpp_populate_input(hidpp, input);
ret = input_register_device(input);
if (ret)
@@ -3208,6 +3449,60 @@ static const struct attribute_group ps_attribute_group = {
.attrs = sysfs_attrs
};
+static int hidpp_get_report_length(struct hid_device *hdev, int id)
+{
+ struct hid_report_enum *re;
+ struct hid_report *report;
+
+ re = &(hdev->report_enum[HID_OUTPUT_REPORT]);
+ report = re->report_id_hash[id];
+ if (!report)
+ return 0;
+
+ return report->field[0]->report_count + 1;
+}
+
+static bool hidpp_validate_report(struct hid_device *hdev, int id,
+ int expected_length, bool optional)
+{
+ int report_length;
+
+ if (id >= HID_MAX_IDS || id < 0) {
+ hid_err(hdev, "invalid HID report id %u\n", id);
+ return false;
+ }
+
+ report_length = hidpp_get_report_length(hdev, id);
+ if (!report_length)
+ return optional;
+
+ if (report_length < expected_length) {
+ hid_warn(hdev, "not enough values in hidpp report %d\n", id);
+ return false;
+ }
+
+ return true;
+}
+
+static bool hidpp_validate_device(struct hid_device *hdev)
+{
+ return hidpp_validate_report(hdev, REPORT_ID_HIDPP_SHORT,
+ HIDPP_REPORT_SHORT_LENGTH, false) &&
+ hidpp_validate_report(hdev, REPORT_ID_HIDPP_LONG,
+ HIDPP_REPORT_LONG_LENGTH, true);
+}
+
+static bool hidpp_application_equals(struct hid_device *hdev,
+ unsigned int application)
+{
+ struct list_head *report_list;
+ struct hid_report *report;
+
+ report_list = &hdev->report_enum[HID_INPUT_REPORT].report_list;
+ report = list_first_entry_or_null(report_list, struct hid_report, list);
+ return report && report->application == application;
+}
+
static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
struct hidpp_device *hidpp;
@@ -3215,20 +3510,48 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
bool connected;
unsigned int connect_mask = HID_CONNECT_DEFAULT;
- hidpp = devm_kzalloc(&hdev->dev, sizeof(struct hidpp_device),
- GFP_KERNEL);
+ /* report_fixup needs drvdata to be set before we call hid_parse */
+ hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL);
if (!hidpp)
return -ENOMEM;
hidpp->hid_dev = hdev;
hidpp->name = hdev->name;
+ hidpp->quirks = id->driver_data;
hid_set_drvdata(hdev, hidpp);
- hidpp->quirks = id->driver_data;
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "%s:parse failed\n", __func__);
+ return ret;
+ }
+
+ /*
+ * Make sure the device is HID++ capable, otherwise treat as generic HID
+ */
+ if (!hidpp_validate_device(hdev)) {
+ hid_set_drvdata(hdev, NULL);
+ devm_kfree(&hdev->dev, hidpp);
+ return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ }
+
+ hidpp->very_long_report_length =
+ hidpp_get_report_length(hdev, REPORT_ID_HIDPP_VERY_LONG);
+ if (hidpp->very_long_report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH)
+ hidpp->very_long_report_length = HIDPP_REPORT_VERY_LONG_MAX_LENGTH;
if (id->group == HID_GROUP_LOGITECH_DJ_DEVICE)
hidpp->quirks |= HIDPP_QUIRK_UNIFYING;
+ if (id->group == HID_GROUP_LOGITECH_27MHZ_DEVICE &&
+ hidpp_application_equals(hdev, HID_GD_MOUSE))
+ hidpp->quirks |= HIDPP_QUIRK_HIDPP_WHEELS |
+ HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS;
+
+ if (id->group == HID_GROUP_LOGITECH_27MHZ_DEVICE &&
+ hidpp_application_equals(hdev, HID_GD_KEYBOARD))
+ hidpp->quirks |= HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS;
+
if (disable_raw_mode) {
hidpp->quirks &= ~HIDPP_QUIRK_CLASS_WTP;
hidpp->quirks &= ~HIDPP_QUIRK_NO_HIDINPUT;
@@ -3237,15 +3560,11 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) {
ret = wtp_allocate(hdev, id);
if (ret)
- goto allocate_fail;
- } else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560) {
- ret = m560_allocate(hdev);
- if (ret)
- goto allocate_fail;
+ return ret;
} else if (hidpp->quirks & HIDPP_QUIRK_CLASS_K400) {
ret = k400_allocate(hdev);
if (ret)
- goto allocate_fail;
+ return ret;
}
INIT_WORK(&hidpp->work, delayed_work_cb);
@@ -3258,93 +3577,79 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
hid_warn(hdev, "Cannot allocate sysfs group for %s\n",
hdev->name);
- ret = hid_parse(hdev);
+ /*
+ * Plain USB connections need to actually call start and open
+ * on the transport driver to allow incoming data.
+ */
+ ret = hid_hw_start(hdev, 0);
if (ret) {
- hid_err(hdev, "%s:parse failed\n", __func__);
- goto hid_parse_fail;
+ hid_err(hdev, "hw start failed\n");
+ goto hid_hw_start_fail;
}
- if (hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT)
- connect_mask &= ~HID_CONNECT_HIDINPUT;
-
- if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
- ret = hid_hw_start(hdev, connect_mask);
- if (ret) {
- hid_err(hdev, "hw start failed\n");
- goto hid_hw_start_fail;
- }
- ret = hid_hw_open(hdev);
- if (ret < 0) {
- dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n",
- __func__, ret);
- hid_hw_stop(hdev);
- goto hid_hw_start_fail;
- }
+ ret = hid_hw_open(hdev);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n",
+ __func__, ret);
+ hid_hw_stop(hdev);
+ goto hid_hw_open_fail;
}
-
/* Allow incoming packets */
hid_device_io_start(hdev);
if (hidpp->quirks & HIDPP_QUIRK_UNIFYING)
hidpp_unifying_init(hidpp);
- connected = hidpp_is_connected(hidpp);
+ connected = hidpp_root_get_protocol_version(hidpp) == 0;
atomic_set(&hidpp->connected, connected);
if (!(hidpp->quirks & HIDPP_QUIRK_UNIFYING)) {
if (!connected) {
ret = -ENODEV;
hid_err(hdev, "Device not connected");
- goto hid_hw_open_failed;
+ goto hid_hw_init_fail;
}
- hid_info(hdev, "HID++ %u.%u device connected.\n",
- hidpp->protocol_major, hidpp->protocol_minor);
-
hidpp_overwrite_name(hdev);
}
if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) {
ret = wtp_get_config(hidpp);
if (ret)
- goto hid_hw_open_failed;
+ goto hid_hw_init_fail;
} else if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_G920)) {
ret = g920_get_config(hidpp);
if (ret)
- goto hid_hw_open_failed;
+ goto hid_hw_init_fail;
}
- /* Block incoming packets */
- hid_device_io_stop(hdev);
+ hidpp_connect_event(hidpp);
- if (!(hidpp->quirks & HIDPP_QUIRK_CLASS_G920)) {
- ret = hid_hw_start(hdev, connect_mask);
- if (ret) {
- hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
- goto hid_hw_start_fail;
- }
- }
+ /* Reset the HID node state */
+ hid_device_io_stop(hdev);
+ hid_hw_close(hdev);
+ hid_hw_stop(hdev);
- /* Allow incoming packets */
- hid_device_io_start(hdev);
+ if (hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT)
+ connect_mask &= ~HID_CONNECT_HIDINPUT;
- hidpp_connect_event(hidpp);
+ /* Now export the actual inputs and hidraw nodes to the world */
+ ret = hid_hw_start(hdev, connect_mask);
+ if (ret) {
+ hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
+ goto hid_hw_start_fail;
+ }
return ret;
-hid_hw_open_failed:
- hid_device_io_stop(hdev);
- if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
- hid_hw_close(hdev);
- hid_hw_stop(hdev);
- }
+hid_hw_init_fail:
+ hid_hw_close(hdev);
+hid_hw_open_fail:
+ hid_hw_stop(hdev);
hid_hw_start_fail:
-hid_parse_fail:
sysfs_remove_group(&hdev->dev.kobj, &ps_attribute_group);
cancel_work_sync(&hidpp->work);
mutex_destroy(&hidpp->send_mutex);
-allocate_fail:
- hid_set_drvdata(hdev, NULL);
return ret;
}
@@ -3352,12 +3657,14 @@ static void hidpp_remove(struct hid_device *hdev)
{
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+ if (!hidpp)
+ return hid_hw_stop(hdev);
+
sysfs_remove_group(&hdev->dev.kobj, &ps_attribute_group);
- if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
+ if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920)
hidpp_ff_deinit(hdev);
- hid_hw_close(hdev);
- }
+
hid_hw_stop(hdev);
cancel_work_sync(&hidpp->work);
mutex_destroy(&hidpp->send_mutex);
@@ -3367,6 +3674,10 @@ static void hidpp_remove(struct hid_device *hdev)
HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, \
USB_VENDOR_ID_LOGITECH, (product))
+#define L27MHZ_DEVICE(product) \
+ HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_27MHZ_DEVICE, \
+ USB_VENDOR_ID_LOGITECH, (product))
+
static const struct hid_device_id hidpp_devices[] = {
{ /* wireless touchpad */
LDJ_DEVICE(0x4011),
@@ -3418,11 +3729,37 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* Solar Keyboard Logitech K750 */
LDJ_DEVICE(0x4002),
.driver_data = HIDPP_QUIRK_CLASS_K750 },
+ { /* Keyboard MX5000 (Bluetooth-receiver in HID proxy mode) */
+ LDJ_DEVICE(0xb305),
+ .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
{ LDJ_DEVICE(HID_ANY_ID) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL),
+ { /* Keyboard LX501 (Y-RR53) */
+ L27MHZ_DEVICE(0x0049),
+ .driver_data = HIDPP_QUIRK_KBD_ZOOM_WHEEL },
+ { /* Keyboard MX3000 (Y-RAM74) */
+ L27MHZ_DEVICE(0x0057),
+ .driver_data = HIDPP_QUIRK_KBD_SCROLL_WHEEL },
+ { /* Keyboard MX3200 (Y-RAV80) */
+ L27MHZ_DEVICE(0x005c),
+ .driver_data = HIDPP_QUIRK_KBD_ZOOM_WHEEL },
+
+ { L27MHZ_DEVICE(HID_ANY_ID) },
+
+ { /* Logitech G403 Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) },
+ { /* Logitech G700 Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC06B) },
+ { /* Logitech G900 Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC081) },
+ { /* Logitech G920 Wheel over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL),
.driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS},
+
+ { /* MX5000 keyboard over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305),
+ .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
{}
};
@@ -3436,6 +3773,7 @@ static const struct hid_usage_id hidpp_usages[] = {
static struct hid_driver hidpp_driver = {
.name = "logitech-hidpp-device",
.id_table = hidpp_devices,
+ .report_fixup = hidpp_report_fixup,
.probe = hidpp_probe,
.remove = hidpp_remove,
.raw_event = hidpp_raw_event,
diff --git a/drivers/hid/hid-macally.c b/drivers/hid/hid-macally.c
new file mode 100644
index 000000000000..9a4fc7dffb14
--- /dev/null
+++ b/drivers/hid/hid-macally.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * HID driver for quirky Macally devices
+ *
+ * Copyright (c) 2019 Alex Henrie <alexhenrie24@gmail.com>
+ */
+
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+MODULE_AUTHOR("Alex Henrie <alexhenrie24@gmail.com>");
+MODULE_DESCRIPTION("Macally devices");
+MODULE_LICENSE("GPL");
+
+/*
+ * The Macally ikey keyboard says that its logical and usage maximums are both
+ * 101, but the power key is 102 and the equals key is 103
+ */
+static __u8 *macally_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ if (*rsize >= 60 && rdesc[53] == 0x65 && rdesc[59] == 0x65) {
+ hid_info(hdev,
+ "fixing up Macally ikey keyboard report descriptor\n");
+ rdesc[53] = rdesc[59] = 0x67;
+ }
+ return rdesc;
+}
+
+static struct hid_device_id macally_id_table[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_SOLID_YEAR,
+ USB_DEVICE_ID_MACALLY_IKEY_KEYBOARD) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, macally_id_table);
+
+static struct hid_driver macally_driver = {
+ .name = "macally",
+ .id_table = macally_id_table,
+ .report_fixup = macally_report_fixup,
+};
+
+module_hid_driver(macally_driver);
diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
index c1b29a9eb41a..482c24f0e078 100644
--- a/drivers/hid/hid-picolcd_core.c
+++ b/drivers/hid/hid-picolcd_core.c
@@ -28,6 +28,7 @@
#include <linux/completion.h>
#include <linux/uaccess.h>
#include <linux/module.h>
+#include <linux/string.h>
#include "hid-picolcd.h"
@@ -275,27 +276,20 @@ static ssize_t picolcd_operation_mode_store(struct device *dev,
{
struct picolcd_data *data = dev_get_drvdata(dev);
struct hid_report *report = NULL;
- size_t cnt = count;
int timeout = data->opmode_delay;
unsigned long flags;
- if (cnt >= 3 && strncmp("lcd", buf, 3) == 0) {
+ if (sysfs_streq(buf, "lcd")) {
if (data->status & PICOLCD_BOOTLOADER)
report = picolcd_out_report(REPORT_EXIT_FLASHER, data->hdev);
- buf += 3;
- cnt -= 3;
- } else if (cnt >= 10 && strncmp("bootloader", buf, 10) == 0) {
+ } else if (sysfs_streq(buf, "bootloader")) {
if (!(data->status & PICOLCD_BOOTLOADER))
report = picolcd_out_report(REPORT_EXIT_KEYBOARD, data->hdev);
- buf += 10;
- cnt -= 10;
- }
- if (!report || report->maxfield != 1)
+ } else {
return -EINVAL;
+ }
- while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r'))
- cnt--;
- if (cnt != 0)
+ if (!report || report->maxfield != 1)
return -EINVAL;
spin_lock_irqsave(&data->lock, flags);
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 77ffba48cc73..fea7f7ff5ab1 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -432,7 +432,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
#if IS_ENABLED(CONFIG_HID_LOGITECH)
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) },
@@ -464,13 +463,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
#endif
#if IS_ENABLED(CONFIG_HID_LOGITECH_HIDPP)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
#endif
-#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
-#endif
#if IS_ENABLED(CONFIG_HID_MAGICMOUSE)
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index bb012bc032e0..462e653a7bbb 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -157,8 +157,7 @@ static int usage_id_cmp(const void *p1, const void *p2)
static ssize_t enable_sensor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct hid_sensor_custom *sensor_inst = platform_get_drvdata(pdev);
+ struct hid_sensor_custom *sensor_inst = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", sensor_inst->enable);
}
@@ -237,8 +236,7 @@ static ssize_t enable_sensor_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct hid_sensor_custom *sensor_inst = platform_get_drvdata(pdev);
+ struct hid_sensor_custom *sensor_inst = dev_get_drvdata(dev);
int value;
int ret = -EINVAL;
@@ -283,8 +281,7 @@ static const struct attribute_group enable_sensor_attr_group = {
static ssize_t show_value(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct hid_sensor_custom *sensor_inst = platform_get_drvdata(pdev);
+ struct hid_sensor_custom *sensor_inst = dev_get_drvdata(dev);
struct hid_sensor_hub_attribute_info *attribute;
int index, usage, field_index;
char name[HID_CUSTOM_NAME_LENGTH];
@@ -392,8 +389,7 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr,
static ssize_t store_value(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct hid_sensor_custom *sensor_inst = platform_get_drvdata(pdev);
+ struct hid_sensor_custom *sensor_inst = dev_get_drvdata(dev);
int index, field_index, usage;
char name[HID_CUSTOM_NAME_LENGTH];
int value;
diff --git a/drivers/hid/hid-u2fzero.c b/drivers/hid/hid-u2fzero.c
new file mode 100644
index 000000000000..95e0807878c7
--- /dev/null
+++ b/drivers/hid/hid-u2fzero.c
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * U2F Zero LED and RNG driver
+ *
+ * Copyright 2018 Andrej Shadura <andrew@shadura.me>
+ * Loosely based on drivers/hid/hid-led.c
+ * and drivers/usb/misc/chaoskey.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ */
+
+#include <linux/hid.h>
+#include <linux/hidraw.h>
+#include <linux/hw_random.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/usb.h>
+
+#include "usbhid/usbhid.h"
+#include "hid-ids.h"
+
+#define DRIVER_SHORT "u2fzero"
+
+#define HID_REPORT_SIZE 64
+
+/* We only use broadcast (CID-less) messages */
+#define CID_BROADCAST 0xffffffff
+
+struct u2f_hid_msg {
+ u32 cid;
+ union {
+ struct {
+ u8 cmd;
+ u8 bcnth;
+ u8 bcntl;
+ u8 data[HID_REPORT_SIZE - 7];
+ } init;
+ struct {
+ u8 seq;
+ u8 data[HID_REPORT_SIZE - 5];
+ } cont;
+ };
+} __packed;
+
+struct u2f_hid_report {
+ u8 report_type;
+ struct u2f_hid_msg msg;
+} __packed;
+
+#define U2F_HID_MSG_LEN(f) (size_t)(((f).init.bcnth << 8) + (f).init.bcntl)
+
+/* Custom extensions to the U2FHID protocol */
+#define U2F_CUSTOM_GET_RNG 0x21
+#define U2F_CUSTOM_WINK 0x24
+
+struct u2fzero_device {
+ struct hid_device *hdev;
+ struct urb *urb; /* URB for the RNG data */
+ struct led_classdev ldev; /* Embedded struct for led */
+ struct hwrng hwrng; /* Embedded struct for hwrng */
+ char *led_name;
+ char *rng_name;
+ u8 *buf_out;
+ u8 *buf_in;
+ struct mutex lock;
+ bool present;
+};
+
+static int u2fzero_send(struct u2fzero_device *dev, struct u2f_hid_report *req)
+{
+ int ret;
+
+ mutex_lock(&dev->lock);
+
+ memcpy(dev->buf_out, req, sizeof(struct u2f_hid_report));
+
+ ret = hid_hw_output_report(dev->hdev, dev->buf_out,
+ sizeof(struct u2f_hid_msg));
+
+ mutex_unlock(&dev->lock);
+
+ if (ret < 0)
+ return ret;
+
+ return ret == sizeof(struct u2f_hid_msg) ? 0 : -EMSGSIZE;
+}
+
+struct u2fzero_transfer_context {
+ struct completion done;
+ int status;
+};
+
+static void u2fzero_read_callback(struct urb *urb)
+{
+ struct u2fzero_transfer_context *ctx = urb->context;
+
+ ctx->status = urb->status;
+ complete(&ctx->done);
+}
+
+static int u2fzero_recv(struct u2fzero_device *dev,
+ struct u2f_hid_report *req,
+ struct u2f_hid_msg *resp)
+{
+ int ret;
+ struct hid_device *hdev = dev->hdev;
+ struct u2fzero_transfer_context ctx;
+
+ mutex_lock(&dev->lock);
+
+ memcpy(dev->buf_out, req, sizeof(struct u2f_hid_report));
+
+ dev->urb->context = &ctx;
+ init_completion(&ctx.done);
+
+ ret = usb_submit_urb(dev->urb, GFP_NOIO);
+ if (unlikely(ret)) {
+ hid_err(hdev, "usb_submit_urb failed: %d", ret);
+ goto err;
+ }
+
+ ret = hid_hw_output_report(dev->hdev, dev->buf_out,
+ sizeof(struct u2f_hid_msg));
+
+ if (ret < 0) {
+ hid_err(hdev, "hid_hw_output_report failed: %d", ret);
+ goto err;
+ }
+
+ ret = (wait_for_completion_timeout(
+ &ctx.done, msecs_to_jiffies(USB_CTRL_SET_TIMEOUT)));
+ if (ret < 0) {
+ usb_kill_urb(dev->urb);
+ hid_err(hdev, "urb submission timed out");
+ } else {
+ ret = dev->urb->actual_length;
+ memcpy(resp, dev->buf_in, ret);
+ }
+
+err:
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+static int u2fzero_blink(struct led_classdev *ldev)
+{
+ struct u2fzero_device *dev = container_of(ldev,
+ struct u2fzero_device, ldev);
+ struct u2f_hid_report req = {
+ .report_type = 0,
+ .msg.cid = CID_BROADCAST,
+ .msg.init = {
+ .cmd = U2F_CUSTOM_WINK,
+ .bcnth = 0,
+ .bcntl = 0,
+ .data = {0},
+ }
+ };
+ return u2fzero_send(dev, &req);
+}
+
+static int u2fzero_brightness_set(struct led_classdev *ldev,
+ enum led_brightness brightness)
+{
+ ldev->brightness = LED_OFF;
+ if (brightness)
+ return u2fzero_blink(ldev);
+ else
+ return 0;
+}
+
+static int u2fzero_rng_read(struct hwrng *rng, void *data,
+ size_t max, bool wait)
+{
+ struct u2fzero_device *dev = container_of(rng,
+ struct u2fzero_device, hwrng);
+ struct u2f_hid_report req = {
+ .report_type = 0,
+ .msg.cid = CID_BROADCAST,
+ .msg.init = {
+ .cmd = U2F_CUSTOM_GET_RNG,
+ .bcnth = 0,
+ .bcntl = 0,
+ .data = {0},
+ }
+ };
+ struct u2f_hid_msg resp;
+ int ret;
+ size_t actual_length;
+
+ if (!dev->present) {
+ hid_dbg(dev->hdev, "device not present");
+ return 0;
+ }
+
+ ret = u2fzero_recv(dev, &req, &resp);
+ if (ret < 0)
+ return 0;
+
+ /* only take the minimum amount of data it is safe to take */
+ actual_length = min3((size_t)ret - offsetof(struct u2f_hid_msg,
+ init.data), U2F_HID_MSG_LEN(resp), max);
+
+ memcpy(data, resp.init.data, actual_length);
+
+ return actual_length;
+}
+
+static int u2fzero_init_led(struct u2fzero_device *dev,
+ unsigned int minor)
+{
+ dev->led_name = devm_kasprintf(&dev->hdev->dev, GFP_KERNEL,
+ "%s%u", DRIVER_SHORT, minor);
+ if (dev->led_name == NULL)
+ return -ENOMEM;
+
+ dev->ldev.name = dev->led_name;
+ dev->ldev.max_brightness = LED_ON;
+ dev->ldev.flags = LED_HW_PLUGGABLE;
+ dev->ldev.brightness_set_blocking = u2fzero_brightness_set;
+
+ return devm_led_classdev_register(&dev->hdev->dev, &dev->ldev);
+}
+
+static int u2fzero_init_hwrng(struct u2fzero_device *dev,
+ unsigned int minor)
+{
+ dev->rng_name = devm_kasprintf(&dev->hdev->dev, GFP_KERNEL,
+ "%s-rng%u", DRIVER_SHORT, minor);
+ if (dev->rng_name == NULL)
+ return -ENOMEM;
+
+ dev->hwrng.name = dev->rng_name;
+ dev->hwrng.read = u2fzero_rng_read;
+ dev->hwrng.quality = 1;
+
+ return devm_hwrng_register(&dev->hdev->dev, &dev->hwrng);
+}
+
+static int u2fzero_fill_in_urb(struct u2fzero_device *dev)
+{
+ struct hid_device *hdev = dev->hdev;
+ struct usb_device *udev;
+ struct usbhid_device *usbhid = hdev->driver_data;
+ unsigned int pipe_in;
+ struct usb_host_endpoint *ep;
+
+ if (dev->hdev->bus != BUS_USB)
+ return -EINVAL;
+
+ udev = hid_to_usb_dev(hdev);
+
+ if (!usbhid->urbout || !usbhid->urbin)
+ return -ENODEV;
+
+ ep = usb_pipe_endpoint(udev, usbhid->urbin->pipe);
+ if (!ep)
+ return -ENODEV;
+
+ dev->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!dev->urb)
+ return -ENOMEM;
+
+ pipe_in = (usbhid->urbin->pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30);
+
+ usb_fill_int_urb(dev->urb,
+ udev,
+ pipe_in,
+ dev->buf_in,
+ HID_REPORT_SIZE,
+ u2fzero_read_callback,
+ NULL,
+ ep->desc.bInterval);
+
+ return 0;
+}
+
+static int u2fzero_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct u2fzero_device *dev;
+ unsigned int minor;
+ int ret;
+
+ if (!hid_is_using_ll_driver(hdev, &usb_hid_driver))
+ return -EINVAL;
+
+ dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL)
+ return -ENOMEM;
+
+ dev->buf_out = devm_kmalloc(&hdev->dev,
+ sizeof(struct u2f_hid_report), GFP_KERNEL);
+ if (dev->buf_out == NULL)
+ return -ENOMEM;
+
+ dev->buf_in = devm_kmalloc(&hdev->dev,
+ sizeof(struct u2f_hid_msg), GFP_KERNEL);
+ if (dev->buf_in == NULL)
+ return -ENOMEM;
+
+ ret = hid_parse(hdev);
+ if (ret)
+ return ret;
+
+ dev->hdev = hdev;
+ hid_set_drvdata(hdev, dev);
+ mutex_init(&dev->lock);
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret)
+ return ret;
+
+ u2fzero_fill_in_urb(dev);
+
+ dev->present = true;
+
+ minor = ((struct hidraw *) hdev->hidraw)->minor;
+
+ ret = u2fzero_init_led(dev, minor);
+ if (ret) {
+ hid_hw_stop(hdev);
+ return ret;
+ }
+
+ hid_info(hdev, "U2F Zero LED initialised\n");
+
+ ret = u2fzero_init_hwrng(dev, minor);
+ if (ret) {
+ hid_hw_stop(hdev);
+ return ret;
+ }
+
+ hid_info(hdev, "U2F Zero RNG initialised\n");
+
+ return 0;
+}
+
+static void u2fzero_remove(struct hid_device *hdev)
+{
+ struct u2fzero_device *dev = hid_get_drvdata(hdev);
+
+ mutex_lock(&dev->lock);
+ dev->present = false;
+ mutex_unlock(&dev->lock);
+
+ hid_hw_stop(hdev);
+ usb_poison_urb(dev->urb);
+ usb_free_urb(dev->urb);
+}
+
+static const struct hid_device_id u2fzero_table[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL,
+ USB_DEVICE_ID_U2F_ZERO) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, u2fzero_table);
+
+static struct hid_driver u2fzero_driver = {
+ .name = "hid-" DRIVER_SHORT,
+ .probe = u2fzero_probe,
+ .remove = u2fzero_remove,
+ .id_table = u2fzero_table,
+};
+
+module_hid_driver(u2fzero_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrej Shadura <andrew@shadura.me>");
+MODULE_DESCRIPTION("U2F Zero LED and RNG driver");
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 4d1f24ee249c..90164fed08d3 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -184,8 +184,6 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_NO_RUNTIME_PM },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
I2C_HID_QUIRK_BOGUS_IRQ },
- { USB_VENDOR_ID_SYNAPTICS, I2C_DEVICE_ID_SYNAPTICS_7E7E,
- I2C_HID_QUIRK_NO_RUNTIME_PM },
{ 0, 0 }
};
diff --git a/drivers/hid/intel-ish-hid/Kconfig b/drivers/hid/intel-ish-hid/Kconfig
index 519e4c8b53c4..786adbc97ac5 100644
--- a/drivers/hid/intel-ish-hid/Kconfig
+++ b/drivers/hid/intel-ish-hid/Kconfig
@@ -14,4 +14,19 @@ config INTEL_ISH_HID
Broxton and Kaby Lake.
Say Y here if you want to support Intel ISH. If unsure, say N.
+
+config INTEL_ISH_FIRMWARE_DOWNLOADER
+ tristate "Host Firmware Load feature for Intel ISH"
+ depends on INTEL_ISH_HID
+ depends on X86
+ help
+ The Integrated Sensor Hub (ISH) enables the kernel to offload
+ sensor polling and algorithm processing to a dedicated low power
+ processor in the chipset.
+
+ The Host Firmware Load feature adds support to load the ISH
+ firmware from host file system at boot.
+
+ Say M here if you want to support Host Firmware Loading feature
+ for Intel ISH. If unsure, say N.
endmenu
diff --git a/drivers/hid/intel-ish-hid/Makefile b/drivers/hid/intel-ish-hid/Makefile
index 825b70af672f..f0a82b1c7cb9 100644
--- a/drivers/hid/intel-ish-hid/Makefile
+++ b/drivers/hid/intel-ish-hid/Makefile
@@ -20,4 +20,7 @@ obj-$(CONFIG_INTEL_ISH_HID) += intel-ishtp-hid.o
intel-ishtp-hid-objs := ishtp-hid.o
intel-ishtp-hid-objs += ishtp-hid-client.o
-ccflags-y += -Idrivers/hid/intel-ish-hid/ishtp
+obj-$(CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ishtp-loader.o
+intel-ishtp-loader-objs += ishtp-fw-loader.o
+
+ccflags-y += -I $(srctree)/$(src)/ishtp
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 08a8327dfd22..523c0cbd44a4 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -31,6 +31,7 @@
#define CNL_H_DEVICE_ID 0xA37C
#define ICL_MOBILE_DEVICE_ID 0x34FC
#define SPT_H_DEVICE_ID 0xA135
+#define CML_LP_DEVICE_ID 0x02FC
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index a6e1ee744f4d..ac0a179daf23 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -40,6 +40,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
new file mode 100644
index 000000000000..22ba21457035
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
@@ -0,0 +1,1085 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ISH-TP client driver for ISH firmware loading
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/intel-ish-client-if.h>
+#include <linux/property.h>
+#include <asm/cacheflush.h>
+
+/* Number of times we attempt to load the firmware before giving up */
+#define MAX_LOAD_ATTEMPTS 3
+
+/* ISH TX/RX ring buffer pool size */
+#define LOADER_CL_RX_RING_SIZE 1
+#define LOADER_CL_TX_RING_SIZE 1
+
+/*
+ * ISH Shim firmware loader reserves 4 Kb buffer in SRAM. The buffer is
+ * used to temporarily hold the data transferred from host to Shim
+ * firmware loader. Reason for the odd size of 3968 bytes? Each IPC
+ * transfer is 128 bytes (= 4 bytes header + 124 bytes payload). So the
+ * 4 Kb buffer can hold maximum of 32 IPC transfers, which means we can
+ * have a max payload of 3968 bytes (= 32 x 124 payload).
+ */
+#define LOADER_SHIM_IPC_BUF_SIZE 3968
+
+/**
+ * enum ish_loader_commands - ISH loader host commands.
+ * LOADER_CMD_XFER_QUERY Query the Shim firmware loader for
+ * capabilities
+ * LOADER_CMD_XFER_FRAGMENT Transfer one firmware image fragment at a
+ * time. The command may be executed
+ * multiple times until the entire firmware
+ * image is downloaded to SRAM.
+ * LOADER_CMD_START Start executing the main firmware.
+ */
+enum ish_loader_commands {
+ LOADER_CMD_XFER_QUERY = 0,
+ LOADER_CMD_XFER_FRAGMENT,
+ LOADER_CMD_START,
+};
+
+/* Command bit mask */
+#define CMD_MASK GENMASK(6, 0)
+#define IS_RESPONSE BIT(7)
+
+/*
+ * ISH firmware max delay for one transmit failure is 1 Hz,
+ * and firmware will retry 2 times, so 3 Hz is used for timeout.
+ */
+#define ISHTP_SEND_TIMEOUT (3 * HZ)
+
+/*
+ * Loader transfer modes:
+ *
+ * LOADER_XFER_MODE_ISHTP mode uses the existing ISH-TP mechanism to
+ * transfer data. This may use IPC or DMA if supported in firmware.
+ * The buffer size is limited to 4 Kb by the IPC/ISH-TP protocol for
+ * both IPC & DMA (legacy).
+ *
+ * LOADER_XFER_MODE_DIRECT_DMA - firmware loading is a bit different
+ * from the sensor data streaming. Here we download a large (300+ Kb)
+ * image directly to ISH SRAM memory. There is limited benefit of
+ * DMA'ing 300 Kb image in 4 Kb chucks limit. Hence, we introduce
+ * this "direct dma" mode, where we do not use ISH-TP for DMA, but
+ * instead manage the DMA directly in kernel driver and Shim firmware
+ * loader (allocate buffer, break in chucks and transfer). This allows
+ * to overcome 4 Kb limit, and optimize the data flow path in firmware.
+ */
+#define LOADER_XFER_MODE_DIRECT_DMA BIT(0)
+#define LOADER_XFER_MODE_ISHTP BIT(1)
+
+/* ISH Transport Loader client unique GUID */
+static const guid_t loader_ishtp_guid =
+ GUID_INIT(0xc804d06a, 0x55bd, 0x4ea7,
+ 0xad, 0xed, 0x1e, 0x31, 0x22, 0x8c, 0x76, 0xdc);
+
+#define FILENAME_SIZE 256
+
+/*
+ * The firmware loading latency will be minimum if we can DMA the
+ * entire ISH firmware image in one go. This requires that we allocate
+ * a large DMA buffer in kernel, which could be problematic on some
+ * platforms. So here we limit the DMA buffer size via a module_param.
+ * We default to 4 pages, but a customer can set it to higher limit if
+ * deemed appropriate for his platform.
+ */
+static int dma_buf_size_limit = 4 * PAGE_SIZE;
+
+/**
+ * struct loader_msg_hdr - Header for ISH Loader commands.
+ * @command: LOADER_CMD* commands. Bit 7 is the response.
+ * @status: Command response status. Non 0, is error
+ * condition.
+ *
+ * This structure is used as header for every command/data sent/received
+ * between Host driver and ISH Shim firmware loader.
+ */
+struct loader_msg_hdr {
+ u8 command;
+ u8 reserved[2];
+ u8 status;
+} __packed;
+
+struct loader_xfer_query {
+ struct loader_msg_hdr hdr;
+ u32 image_size;
+} __packed;
+
+struct ish_fw_version {
+ u16 major;
+ u16 minor;
+ u16 hotfix;
+ u16 build;
+} __packed;
+
+union loader_version {
+ u32 value;
+ struct {
+ u8 major;
+ u8 minor;
+ u8 hotfix;
+ u8 build;
+ };
+} __packed;
+
+struct loader_capability {
+ u32 max_fw_image_size;
+ u32 xfer_mode;
+ u32 max_dma_buf_size; /* only for dma mode, multiples of cacheline */
+} __packed;
+
+struct shim_fw_info {
+ struct ish_fw_version ish_fw_version;
+ u32 protocol_version;
+ union loader_version ldr_version;
+ struct loader_capability ldr_capability;
+} __packed;
+
+struct loader_xfer_query_response {
+ struct loader_msg_hdr hdr;
+ struct shim_fw_info fw_info;
+} __packed;
+
+struct loader_xfer_fragment {
+ struct loader_msg_hdr hdr;
+ u32 xfer_mode;
+ u32 offset;
+ u32 size;
+ u32 is_last;
+} __packed;
+
+struct loader_xfer_ipc_fragment {
+ struct loader_xfer_fragment fragment;
+ u8 data[] ____cacheline_aligned; /* variable length payload here */
+} __packed;
+
+struct loader_xfer_dma_fragment {
+ struct loader_xfer_fragment fragment;
+ u64 ddr_phys_addr;
+} __packed;
+
+struct loader_start {
+ struct loader_msg_hdr hdr;
+} __packed;
+
+/**
+ * struct response_info - Encapsulate firmware response related
+ * information for passing between function
+ * loader_cl_send() and process_recv() callback.
+ * @data Copy the data received from firmware here.
+ * @max_size Max size allocated for the @data buffer. If the
+ * received data exceeds this value, we log an
+ * error.
+ * @size Actual size of data received from firmware.
+ * @error Returns 0 for success, negative error code for a
+ * failure in function process_recv().
+ * @received Set to true on receiving a valid firmware
+ * response to host command
+ * @wait_queue Wait queue for Host firmware loading where the
+ * client sends message to ISH firmware and waits
+ * for response
+ */
+struct response_info {
+ void *data;
+ size_t max_size;
+ size_t size;
+ int error;
+ bool received;
+ wait_queue_head_t wait_queue;
+};
+
+/**
+ * struct ishtp_cl_data - Encapsulate per ISH-TP Client Data.
+ * @work_ishtp_reset: Work queue for reset handling.
+ * @work_fw_load: Work queue for host firmware loading.
+ * @flag_retry Flag for indicating host firmware loading should
+ * be retried.
+ * @retry_count Count the number of retries.
+ *
+ * This structure is used to store data per client.
+ */
+struct ishtp_cl_data {
+ struct ishtp_cl *loader_ishtp_cl;
+ struct ishtp_cl_device *cl_device;
+
+ /*
+ * Used for passing firmware response information between
+ * loader_cl_send() and process_recv() callback.
+ */
+ struct response_info response;
+
+ struct work_struct work_ishtp_reset;
+ struct work_struct work_fw_load;
+
+ /*
+ * In certain failure scenrios, it makes sense to reset the ISH
+ * subsystem and retry Host firmware loading (e.g. bad message
+ * packet, ENOMEM, etc.). On the other hand, failures due to
+ * protocol mismatch, etc., are not recoverable. We do not
+ * retry them.
+ *
+ * If set, the flag indicates that we should re-try the
+ * particular failure.
+ */
+ bool flag_retry;
+ int retry_count;
+};
+
+#define IPC_FRAGMENT_DATA_PREAMBLE \
+ offsetof(struct loader_xfer_ipc_fragment, data)
+
+#define cl_data_to_dev(client_data) ishtp_device((client_data)->cl_device)
+
+/**
+ * get_firmware_variant() - Gets the filename of firmware image to be
+ * loaded based on platform variant.
+ * @client_data Client data instance.
+ * @filename Returns firmware filename.
+ *
+ * Queries the firmware-name device property string.
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int get_firmware_variant(struct ishtp_cl_data *client_data,
+ char *filename)
+{
+ int rv;
+ const char *val;
+ struct device *devc = ishtp_get_pci_device(client_data->cl_device);
+
+ rv = device_property_read_string(devc, "firmware-name", &val);
+ if (rv < 0) {
+ dev_err(devc,
+ "Error: ISH firmware-name device property required\n");
+ return rv;
+ }
+ return snprintf(filename, FILENAME_SIZE, "intel/%s", val);
+}
+
+/**
+ * loader_cl_send() Send message from host to firmware
+ * @client_data: Client data instance
+ * @out_msg Message buffer to be sent to firmware
+ * @out_size Size of out going message
+ * @in_msg Message buffer where the incoming data copied.
+ * This buffer is allocated by calling
+ * @in_size Max size of incoming message
+ *
+ * Return: Number of bytes copied in the in_msg on success, negative
+ * error code on failure.
+ */
+static int loader_cl_send(struct ishtp_cl_data *client_data,
+ u8 *out_msg, size_t out_size,
+ u8 *in_msg, size_t in_size)
+{
+ int rv;
+ struct loader_msg_hdr *out_hdr = (struct loader_msg_hdr *)out_msg;
+ struct ishtp_cl *loader_ishtp_cl = client_data->loader_ishtp_cl;
+
+ dev_dbg(cl_data_to_dev(client_data),
+ "%s: command=%02lx is_response=%u status=%02x\n",
+ __func__,
+ out_hdr->command & CMD_MASK,
+ out_hdr->command & IS_RESPONSE ? 1 : 0,
+ out_hdr->status);
+
+ /* Setup in coming buffer & size */
+ client_data->response.data = in_msg;
+ client_data->response.max_size = in_size;
+ client_data->response.error = 0;
+ client_data->response.received = false;
+
+ rv = ishtp_cl_send(loader_ishtp_cl, out_msg, out_size);
+ if (rv < 0) {
+ dev_err(cl_data_to_dev(client_data),
+ "ishtp_cl_send error %d\n", rv);
+ return rv;
+ }
+
+ wait_event_interruptible_timeout(client_data->response.wait_queue,
+ client_data->response.received,
+ ISHTP_SEND_TIMEOUT);
+ if (!client_data->response.received) {
+ dev_err(cl_data_to_dev(client_data),
+ "Timed out for response to command=%02lx",
+ out_hdr->command & CMD_MASK);
+ return -ETIMEDOUT;
+ }
+
+ if (client_data->response.error < 0)
+ return client_data->response.error;
+
+ return client_data->response.size;
+}
+
+/**
+ * process_recv() - Receive and parse incoming packet
+ * @loader_ishtp_cl: Client instance to get stats
+ * @rb_in_proc: ISH received message buffer
+ *
+ * Parse the incoming packet. If it is a response packet then it will
+ * update received and wake up the caller waiting to for the response.
+ */
+static void process_recv(struct ishtp_cl *loader_ishtp_cl,
+ struct ishtp_cl_rb *rb_in_proc)
+{
+ struct loader_msg_hdr *hdr;
+ size_t data_len = rb_in_proc->buf_idx;
+ struct ishtp_cl_data *client_data =
+ ishtp_get_client_data(loader_ishtp_cl);
+
+ /* Sanity check */
+ if (!client_data->response.data) {
+ dev_err(cl_data_to_dev(client_data),
+ "Receiving buffer is null. Should be allocated by calling function\n");
+ client_data->response.error = -EINVAL;
+ goto end;
+ }
+
+ if (client_data->response.received) {
+ dev_err(cl_data_to_dev(client_data),
+ "Previous firmware message not yet processed\n");
+ client_data->response.error = -EINVAL;
+ goto end;
+ }
+ /*
+ * All firmware messages have a header. Check buffer size
+ * before accessing elements inside.
+ */
+ if (!rb_in_proc->buffer.data) {
+ dev_warn(cl_data_to_dev(client_data),
+ "rb_in_proc->buffer.data returned null");
+ client_data->response.error = -EBADMSG;
+ goto end;
+ }
+
+ if (data_len < sizeof(struct loader_msg_hdr)) {
+ dev_err(cl_data_to_dev(client_data),
+ "data size %zu is less than header %zu\n",
+ data_len, sizeof(struct loader_msg_hdr));
+ client_data->response.error = -EMSGSIZE;
+ goto end;
+ }
+
+ hdr = (struct loader_msg_hdr *)rb_in_proc->buffer.data;
+
+ dev_dbg(cl_data_to_dev(client_data),
+ "%s: command=%02lx is_response=%u status=%02x\n",
+ __func__,
+ hdr->command & CMD_MASK,
+ hdr->command & IS_RESPONSE ? 1 : 0,
+ hdr->status);
+
+ if (((hdr->command & CMD_MASK) != LOADER_CMD_XFER_QUERY) &&
+ ((hdr->command & CMD_MASK) != LOADER_CMD_XFER_FRAGMENT) &&
+ ((hdr->command & CMD_MASK) != LOADER_CMD_START)) {
+ dev_err(cl_data_to_dev(client_data),
+ "Invalid command=%02lx\n",
+ hdr->command & CMD_MASK);
+ client_data->response.error = -EPROTO;
+ goto end;
+ }
+
+ if (data_len > client_data->response.max_size) {
+ dev_err(cl_data_to_dev(client_data),
+ "Received buffer size %zu is larger than allocated buffer %zu\n",
+ data_len, client_data->response.max_size);
+ client_data->response.error = -EMSGSIZE;
+ goto end;
+ }
+
+ /* We expect only "response" messages from firmware */
+ if (!(hdr->command & IS_RESPONSE)) {
+ dev_err(cl_data_to_dev(client_data),
+ "Invalid response to command\n");
+ client_data->response.error = -EIO;
+ goto end;
+ }
+
+ if (hdr->status) {
+ dev_err(cl_data_to_dev(client_data),
+ "Loader returned status %d\n",
+ hdr->status);
+ client_data->response.error = -EIO;
+ goto end;
+ }
+
+ /* Update the actual received buffer size */
+ client_data->response.size = data_len;
+
+ /*
+ * Copy the buffer received in firmware response for the
+ * calling thread.
+ */
+ memcpy(client_data->response.data,
+ rb_in_proc->buffer.data, data_len);
+
+ /* Set flag before waking up the caller */
+ client_data->response.received = true;
+
+end:
+ /* Free the buffer */
+ ishtp_cl_io_rb_recycle(rb_in_proc);
+ rb_in_proc = NULL;
+
+ /* Wake the calling thread */
+ wake_up_interruptible(&client_data->response.wait_queue);
+}
+
+/**
+ * loader_cl_event_cb() - bus driver callback for incoming message
+ * @device: Pointer to the ishtp client device for which this
+ * message is targeted
+ *
+ * Remove the packet from the list and process the message by calling
+ * process_recv
+ */
+static void loader_cl_event_cb(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl_rb *rb_in_proc;
+ struct ishtp_cl *loader_ishtp_cl = ishtp_get_drvdata(cl_device);
+
+ while ((rb_in_proc = ishtp_cl_rx_get_rb(loader_ishtp_cl)) != NULL) {
+ /* Process the data packet from firmware */
+ process_recv(loader_ishtp_cl, rb_in_proc);
+ }
+}
+
+/**
+ * ish_query_loader_prop() - Query ISH Shim firmware loader
+ * @client_data: Client data instance
+ * @fw: Poiner to firmware data struct in host memory
+ * @fw_info: Loader firmware properties
+ *
+ * This function queries the ISH Shim firmware loader for capabilities.
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int ish_query_loader_prop(struct ishtp_cl_data *client_data,
+ const struct firmware *fw,
+ struct shim_fw_info *fw_info)
+{
+ int rv;
+ struct loader_xfer_query ldr_xfer_query;
+ struct loader_xfer_query_response ldr_xfer_query_resp;
+
+ memset(&ldr_xfer_query, 0, sizeof(ldr_xfer_query));
+ ldr_xfer_query.hdr.command = LOADER_CMD_XFER_QUERY;
+ ldr_xfer_query.image_size = fw->size;
+ rv = loader_cl_send(client_data,
+ (u8 *)&ldr_xfer_query,
+ sizeof(ldr_xfer_query),
+ (u8 *)&ldr_xfer_query_resp,
+ sizeof(ldr_xfer_query_resp));
+ if (rv < 0) {
+ client_data->flag_retry = true;
+ return rv;
+ }
+
+ /* On success, the return value is the received buffer size */
+ if (rv != sizeof(struct loader_xfer_query_response)) {
+ dev_err(cl_data_to_dev(client_data),
+ "data size %d is not equal to size of loader_xfer_query_response %zu\n",
+ rv, sizeof(struct loader_xfer_query_response));
+ client_data->flag_retry = true;
+ return -EMSGSIZE;
+ }
+
+ /* Save fw_info for use outside this function */
+ *fw_info = ldr_xfer_query_resp.fw_info;
+
+ /* Loader firmware properties */
+ dev_dbg(cl_data_to_dev(client_data),
+ "ish_fw_version: major=%d minor=%d hotfix=%d build=%d protocol_version=0x%x loader_version=%d\n",
+ fw_info->ish_fw_version.major,
+ fw_info->ish_fw_version.minor,
+ fw_info->ish_fw_version.hotfix,
+ fw_info->ish_fw_version.build,
+ fw_info->protocol_version,
+ fw_info->ldr_version.value);
+
+ dev_dbg(cl_data_to_dev(client_data),
+ "loader_capability: max_fw_image_size=0x%x xfer_mode=%d max_dma_buf_size=0x%x dma_buf_size_limit=0x%x\n",
+ fw_info->ldr_capability.max_fw_image_size,
+ fw_info->ldr_capability.xfer_mode,
+ fw_info->ldr_capability.max_dma_buf_size,
+ dma_buf_size_limit);
+
+ /* Sanity checks */
+ if (fw_info->ldr_capability.max_fw_image_size < fw->size) {
+ dev_err(cl_data_to_dev(client_data),
+ "ISH firmware size %zu is greater than Shim firmware loader max supported %d\n",
+ fw->size,
+ fw_info->ldr_capability.max_fw_image_size);
+ return -ENOSPC;
+ }
+
+ /* For DMA the buffer size should be multiple of cacheline size */
+ if ((fw_info->ldr_capability.xfer_mode & LOADER_XFER_MODE_DIRECT_DMA) &&
+ (fw_info->ldr_capability.max_dma_buf_size % L1_CACHE_BYTES)) {
+ dev_err(cl_data_to_dev(client_data),
+ "Shim firmware loader buffer size %d should be multiple of cacheline\n",
+ fw_info->ldr_capability.max_dma_buf_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ish_fw_xfer_ishtp() Loads ISH firmware using ishtp interface
+ * @client_data: Client data instance
+ * @fw: Pointer to firmware data struct in host memory
+ *
+ * This function uses ISH-TP to transfer ISH firmware from host to
+ * ISH SRAM. Lower layers may use IPC or DMA depending on firmware
+ * support.
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int ish_fw_xfer_ishtp(struct ishtp_cl_data *client_data,
+ const struct firmware *fw)
+{
+ int rv;
+ u32 fragment_offset, fragment_size, payload_max_size;
+ struct loader_xfer_ipc_fragment *ldr_xfer_ipc_frag;
+ struct loader_msg_hdr ldr_xfer_ipc_ack;
+
+ payload_max_size =
+ LOADER_SHIM_IPC_BUF_SIZE - IPC_FRAGMENT_DATA_PREAMBLE;
+
+ ldr_xfer_ipc_frag = kzalloc(LOADER_SHIM_IPC_BUF_SIZE, GFP_KERNEL);
+ if (!ldr_xfer_ipc_frag) {
+ client_data->flag_retry = true;
+ return -ENOMEM;
+ }
+
+ ldr_xfer_ipc_frag->fragment.hdr.command = LOADER_CMD_XFER_FRAGMENT;
+ ldr_xfer_ipc_frag->fragment.xfer_mode = LOADER_XFER_MODE_ISHTP;
+
+ /* Break the firmware image into fragments and send as ISH-TP payload */
+ fragment_offset = 0;
+ while (fragment_offset < fw->size) {
+ if (fragment_offset + payload_max_size < fw->size) {
+ fragment_size = payload_max_size;
+ ldr_xfer_ipc_frag->fragment.is_last = 0;
+ } else {
+ fragment_size = fw->size - fragment_offset;
+ ldr_xfer_ipc_frag->fragment.is_last = 1;
+ }
+
+ ldr_xfer_ipc_frag->fragment.offset = fragment_offset;
+ ldr_xfer_ipc_frag->fragment.size = fragment_size;
+ memcpy(ldr_xfer_ipc_frag->data,
+ &fw->data[fragment_offset],
+ fragment_size);
+
+ dev_dbg(cl_data_to_dev(client_data),
+ "xfer_mode=ipc offset=0x%08x size=0x%08x is_last=%d\n",
+ ldr_xfer_ipc_frag->fragment.offset,
+ ldr_xfer_ipc_frag->fragment.size,
+ ldr_xfer_ipc_frag->fragment.is_last);
+
+ rv = loader_cl_send(client_data,
+ (u8 *)ldr_xfer_ipc_frag,
+ IPC_FRAGMENT_DATA_PREAMBLE + fragment_size,
+ (u8 *)&ldr_xfer_ipc_ack,
+ sizeof(ldr_xfer_ipc_ack));
+ if (rv < 0) {
+ client_data->flag_retry = true;
+ goto end_err_resp_buf_release;
+ }
+
+ fragment_offset += fragment_size;
+ }
+
+ kfree(ldr_xfer_ipc_frag);
+ return 0;
+
+end_err_resp_buf_release:
+ /* Free ISH buffer if not done already, in error case */
+ kfree(ldr_xfer_ipc_frag);
+ return rv;
+}
+
+/**
+ * ish_fw_xfer_direct_dma() - Loads ISH firmware using direct dma
+ * @client_data: Client data instance
+ * @fw: Pointer to firmware data struct in host memory
+ * @fw_info: Loader firmware properties
+ *
+ * Host firmware load is a unique case where we need to download
+ * a large firmware image (200+ Kb). This function implements
+ * direct DMA transfer in kernel and ISH firmware. This allows
+ * us to overcome the ISH-TP 4 Kb limit, and allows us to DMA
+ * directly to ISH UMA at location of choice.
+ * Function depends on corresponding support in ISH firmware.
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int ish_fw_xfer_direct_dma(struct ishtp_cl_data *client_data,
+ const struct firmware *fw,
+ const struct shim_fw_info fw_info)
+{
+ int rv;
+ void *dma_buf;
+ dma_addr_t dma_buf_phy;
+ u32 fragment_offset, fragment_size, payload_max_size;
+ struct loader_msg_hdr ldr_xfer_dma_frag_ack;
+ struct loader_xfer_dma_fragment ldr_xfer_dma_frag;
+ struct device *devc = ishtp_get_pci_device(client_data->cl_device);
+ u32 shim_fw_buf_size =
+ fw_info.ldr_capability.max_dma_buf_size;
+
+ /*
+ * payload_max_size should be set to minimum of
+ * (1) Size of firmware to be loaded,
+ * (2) Max DMA buffer size supported by Shim firmware,
+ * (3) DMA buffer size limit set by boot_param dma_buf_size_limit.
+ */
+ payload_max_size = min3(fw->size,
+ (size_t)shim_fw_buf_size,
+ (size_t)dma_buf_size_limit);
+
+ /*
+ * Buffer size should be multiple of cacheline size
+ * if it's not, select the previous cacheline boundary.
+ */
+ payload_max_size &= ~(L1_CACHE_BYTES - 1);
+
+ dma_buf = kmalloc(payload_max_size, GFP_KERNEL | GFP_DMA32);
+ if (!dma_buf) {
+ client_data->flag_retry = true;
+ return -ENOMEM;
+ }
+
+ dma_buf_phy = dma_map_single(devc, dma_buf, payload_max_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(devc, dma_buf_phy)) {
+ dev_err(cl_data_to_dev(client_data), "DMA map failed\n");
+ client_data->flag_retry = true;
+ rv = -ENOMEM;
+ goto end_err_dma_buf_release;
+ }
+
+ ldr_xfer_dma_frag.fragment.hdr.command = LOADER_CMD_XFER_FRAGMENT;
+ ldr_xfer_dma_frag.fragment.xfer_mode = LOADER_XFER_MODE_DIRECT_DMA;
+ ldr_xfer_dma_frag.ddr_phys_addr = (u64)dma_buf_phy;
+
+ /* Send the firmware image in chucks of payload_max_size */
+ fragment_offset = 0;
+ while (fragment_offset < fw->size) {
+ if (fragment_offset + payload_max_size < fw->size) {
+ fragment_size = payload_max_size;
+ ldr_xfer_dma_frag.fragment.is_last = 0;
+ } else {
+ fragment_size = fw->size - fragment_offset;
+ ldr_xfer_dma_frag.fragment.is_last = 1;
+ }
+
+ ldr_xfer_dma_frag.fragment.offset = fragment_offset;
+ ldr_xfer_dma_frag.fragment.size = fragment_size;
+ memcpy(dma_buf, &fw->data[fragment_offset], fragment_size);
+
+ dma_sync_single_for_device(devc, dma_buf_phy,
+ payload_max_size,
+ DMA_TO_DEVICE);
+
+ /*
+ * Flush cache here because the dma_sync_single_for_device()
+ * does not do for x86.
+ */
+ clflush_cache_range(dma_buf, payload_max_size);
+
+ dev_dbg(cl_data_to_dev(client_data),
+ "xfer_mode=dma offset=0x%08x size=0x%x is_last=%d ddr_phys_addr=0x%016llx\n",
+ ldr_xfer_dma_frag.fragment.offset,
+ ldr_xfer_dma_frag.fragment.size,
+ ldr_xfer_dma_frag.fragment.is_last,
+ ldr_xfer_dma_frag.ddr_phys_addr);
+
+ rv = loader_cl_send(client_data,
+ (u8 *)&ldr_xfer_dma_frag,
+ sizeof(ldr_xfer_dma_frag),
+ (u8 *)&ldr_xfer_dma_frag_ack,
+ sizeof(ldr_xfer_dma_frag_ack));
+ if (rv < 0) {
+ client_data->flag_retry = true;
+ goto end_err_resp_buf_release;
+ }
+
+ fragment_offset += fragment_size;
+ }
+
+ dma_unmap_single(devc, dma_buf_phy, payload_max_size, DMA_TO_DEVICE);
+ kfree(dma_buf);
+ return 0;
+
+end_err_resp_buf_release:
+ /* Free ISH buffer if not done already, in error case */
+ dma_unmap_single(devc, dma_buf_phy, payload_max_size, DMA_TO_DEVICE);
+end_err_dma_buf_release:
+ kfree(dma_buf);
+ return rv;
+}
+
+/**
+ * ish_fw_start() Start executing ISH main firmware
+ * @client_data: client data instance
+ *
+ * This function sends message to Shim firmware loader to start
+ * the execution of ISH main firmware.
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int ish_fw_start(struct ishtp_cl_data *client_data)
+{
+ struct loader_start ldr_start;
+ struct loader_msg_hdr ldr_start_ack;
+
+ memset(&ldr_start, 0, sizeof(ldr_start));
+ ldr_start.hdr.command = LOADER_CMD_START;
+ return loader_cl_send(client_data,
+ (u8 *)&ldr_start,
+ sizeof(ldr_start),
+ (u8 *)&ldr_start_ack,
+ sizeof(ldr_start_ack));
+}
+
+/**
+ * load_fw_from_host() Loads ISH firmware from host
+ * @client_data: Client data instance
+ *
+ * This function loads the ISH firmware to ISH SRAM and starts execution
+ *
+ * Return: 0 for success, negative error code for failure.
+ */
+static int load_fw_from_host(struct ishtp_cl_data *client_data)
+{
+ int rv;
+ u32 xfer_mode;
+ char *filename;
+ const struct firmware *fw;
+ struct shim_fw_info fw_info;
+ struct ishtp_cl *loader_ishtp_cl = client_data->loader_ishtp_cl;
+
+ client_data->flag_retry = false;
+
+ filename = kzalloc(FILENAME_SIZE, GFP_KERNEL);
+ if (!filename) {
+ client_data->flag_retry = true;
+ rv = -ENOMEM;
+ goto end_error;
+ }
+
+ /* Get filename of the ISH firmware to be loaded */
+ rv = get_firmware_variant(client_data, filename);
+ if (rv < 0)
+ goto end_err_filename_buf_release;
+
+ rv = request_firmware(&fw, filename, cl_data_to_dev(client_data));
+ if (rv < 0)
+ goto end_err_filename_buf_release;
+
+ /* Step 1: Query Shim firmware loader properties */
+
+ rv = ish_query_loader_prop(client_data, fw, &fw_info);
+ if (rv < 0)
+ goto end_err_fw_release;
+
+ /* Step 2: Send the main firmware image to be loaded, to ISH SRAM */
+
+ xfer_mode = fw_info.ldr_capability.xfer_mode;
+ if (xfer_mode & LOADER_XFER_MODE_DIRECT_DMA) {
+ rv = ish_fw_xfer_direct_dma(client_data, fw, fw_info);
+ } else if (xfer_mode & LOADER_XFER_MODE_ISHTP) {
+ rv = ish_fw_xfer_ishtp(client_data, fw);
+ } else {
+ dev_err(cl_data_to_dev(client_data),
+ "No transfer mode selected in firmware\n");
+ rv = -EINVAL;
+ }
+ if (rv < 0)
+ goto end_err_fw_release;
+
+ /* Step 3: Start ISH main firmware exeuction */
+
+ rv = ish_fw_start(client_data);
+ if (rv < 0)
+ goto end_err_fw_release;
+
+ release_firmware(fw);
+ kfree(filename);
+ dev_info(cl_data_to_dev(client_data), "ISH firmware %s loaded\n",
+ filename);
+ return 0;
+
+end_err_fw_release:
+ release_firmware(fw);
+end_err_filename_buf_release:
+ kfree(filename);
+end_error:
+ /* Keep a count of retries, and give up after 3 attempts */
+ if (client_data->flag_retry &&
+ client_data->retry_count++ < MAX_LOAD_ATTEMPTS) {
+ dev_warn(cl_data_to_dev(client_data),
+ "ISH host firmware load failed %d. Resetting ISH, and trying again..\n",
+ rv);
+ ish_hw_reset(ishtp_get_ishtp_device(loader_ishtp_cl));
+ } else {
+ dev_err(cl_data_to_dev(client_data),
+ "ISH host firmware load failed %d\n", rv);
+ }
+ return rv;
+}
+
+static void load_fw_from_host_handler(struct work_struct *work)
+{
+ struct ishtp_cl_data *client_data;
+
+ client_data = container_of(work, struct ishtp_cl_data,
+ work_fw_load);
+ load_fw_from_host(client_data);
+}
+
+/**
+ * loader_init() - Init function for ISH-TP client
+ * @loader_ishtp_cl: ISH-TP client instance
+ * @reset: true if called for init after reset
+ *
+ * Return: 0 for success, negative error code for failure
+ */
+static int loader_init(struct ishtp_cl *loader_ishtp_cl, int reset)
+{
+ int rv;
+ struct ishtp_fw_client *fw_client;
+ struct ishtp_cl_data *client_data =
+ ishtp_get_client_data(loader_ishtp_cl);
+
+ dev_dbg(cl_data_to_dev(client_data), "reset flag: %d\n", reset);
+
+ rv = ishtp_cl_link(loader_ishtp_cl);
+ if (rv < 0) {
+ dev_err(cl_data_to_dev(client_data), "ishtp_cl_link failed\n");
+ return rv;
+ }
+
+ /* Connect to firmware client */
+ ishtp_set_tx_ring_size(loader_ishtp_cl, LOADER_CL_TX_RING_SIZE);
+ ishtp_set_rx_ring_size(loader_ishtp_cl, LOADER_CL_RX_RING_SIZE);
+
+ fw_client =
+ ishtp_fw_cl_get_client(ishtp_get_ishtp_device(loader_ishtp_cl),
+ &loader_ishtp_guid);
+ if (!fw_client) {
+ dev_err(cl_data_to_dev(client_data),
+ "ISH client uuid not found\n");
+ rv = -ENOENT;
+ goto err_cl_unlink;
+ }
+
+ ishtp_cl_set_fw_client_id(loader_ishtp_cl,
+ ishtp_get_fw_client_id(fw_client));
+ ishtp_set_connection_state(loader_ishtp_cl, ISHTP_CL_CONNECTING);
+
+ rv = ishtp_cl_connect(loader_ishtp_cl);
+ if (rv < 0) {
+ dev_err(cl_data_to_dev(client_data), "Client connect fail\n");
+ goto err_cl_unlink;
+ }
+
+ dev_dbg(cl_data_to_dev(client_data), "Client connected\n");
+
+ ishtp_register_event_cb(client_data->cl_device, loader_cl_event_cb);
+
+ return 0;
+
+err_cl_unlink:
+ ishtp_cl_unlink(loader_ishtp_cl);
+ return rv;
+}
+
+static void loader_deinit(struct ishtp_cl *loader_ishtp_cl)
+{
+ ishtp_set_connection_state(loader_ishtp_cl, ISHTP_CL_DISCONNECTING);
+ ishtp_cl_disconnect(loader_ishtp_cl);
+ ishtp_cl_unlink(loader_ishtp_cl);
+ ishtp_cl_flush_queues(loader_ishtp_cl);
+
+ /* Disband and free all Tx and Rx client-level rings */
+ ishtp_cl_free(loader_ishtp_cl);
+}
+
+static void reset_handler(struct work_struct *work)
+{
+ int rv;
+ struct ishtp_cl_data *client_data;
+ struct ishtp_cl *loader_ishtp_cl;
+ struct ishtp_cl_device *cl_device;
+
+ client_data = container_of(work, struct ishtp_cl_data,
+ work_ishtp_reset);
+
+ loader_ishtp_cl = client_data->loader_ishtp_cl;
+ cl_device = client_data->cl_device;
+
+ /* Unlink, flush queues & start again */
+ ishtp_cl_unlink(loader_ishtp_cl);
+ ishtp_cl_flush_queues(loader_ishtp_cl);
+ ishtp_cl_free(loader_ishtp_cl);
+
+ loader_ishtp_cl = ishtp_cl_allocate(cl_device);
+ if (!loader_ishtp_cl)
+ return;
+
+ ishtp_set_drvdata(cl_device, loader_ishtp_cl);
+ ishtp_set_client_data(loader_ishtp_cl, client_data);
+ client_data->loader_ishtp_cl = loader_ishtp_cl;
+ client_data->cl_device = cl_device;
+
+ rv = loader_init(loader_ishtp_cl, 1);
+ if (rv < 0) {
+ dev_err(ishtp_device(cl_device), "Reset Failed\n");
+ return;
+ }
+
+ /* ISH firmware loading from host */
+ load_fw_from_host(client_data);
+}
+
+/**
+ * loader_ishtp_cl_probe() - ISH-TP client driver probe
+ * @cl_device: ISH-TP client device instance
+ *
+ * This function gets called on device create on ISH-TP bus
+ *
+ * Return: 0 for success, negative error code for failure
+ */
+static int loader_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *loader_ishtp_cl;
+ struct ishtp_cl_data *client_data;
+ int rv;
+
+ client_data = devm_kzalloc(ishtp_device(cl_device),
+ sizeof(*client_data),
+ GFP_KERNEL);
+ if (!client_data)
+ return -ENOMEM;
+
+ loader_ishtp_cl = ishtp_cl_allocate(cl_device);
+ if (!loader_ishtp_cl)
+ return -ENOMEM;
+
+ ishtp_set_drvdata(cl_device, loader_ishtp_cl);
+ ishtp_set_client_data(loader_ishtp_cl, client_data);
+ client_data->loader_ishtp_cl = loader_ishtp_cl;
+ client_data->cl_device = cl_device;
+
+ init_waitqueue_head(&client_data->response.wait_queue);
+
+ INIT_WORK(&client_data->work_ishtp_reset,
+ reset_handler);
+ INIT_WORK(&client_data->work_fw_load,
+ load_fw_from_host_handler);
+
+ rv = loader_init(loader_ishtp_cl, 0);
+ if (rv < 0) {
+ ishtp_cl_free(loader_ishtp_cl);
+ return rv;
+ }
+ ishtp_get_device(cl_device);
+
+ client_data->retry_count = 0;
+
+ /* ISH firmware loading from host */
+ schedule_work(&client_data->work_fw_load);
+
+ return 0;
+}
+
+/**
+ * loader_ishtp_cl_remove() - ISH-TP client driver remove
+ * @cl_device: ISH-TP client device instance
+ *
+ * This function gets called on device remove on ISH-TP bus
+ *
+ * Return: 0
+ */
+static int loader_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl_data *client_data;
+ struct ishtp_cl *loader_ishtp_cl = ishtp_get_drvdata(cl_device);
+
+ client_data = ishtp_get_client_data(loader_ishtp_cl);
+
+ /*
+ * The sequence of the following two cancel_work_sync() is
+ * important. The work_fw_load can in turn schedue
+ * work_ishtp_reset, so first cancel work_fw_load then
+ * cancel work_ishtp_reset.
+ */
+ cancel_work_sync(&client_data->work_fw_load);
+ cancel_work_sync(&client_data->work_ishtp_reset);
+ loader_deinit(loader_ishtp_cl);
+ ishtp_put_device(cl_device);
+
+ return 0;
+}
+
+/**
+ * loader_ishtp_cl_reset() - ISH-TP client driver reset
+ * @cl_device: ISH-TP client device instance
+ *
+ * This function gets called on device reset on ISH-TP bus
+ *
+ * Return: 0
+ */
+static int loader_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl_data *client_data;
+ struct ishtp_cl *loader_ishtp_cl = ishtp_get_drvdata(cl_device);
+
+ client_data = ishtp_get_client_data(loader_ishtp_cl);
+
+ schedule_work(&client_data->work_ishtp_reset);
+
+ return 0;
+}
+
+static struct ishtp_cl_driver loader_ishtp_cl_driver = {
+ .name = "ish-loader",
+ .guid = &loader_ishtp_guid,
+ .probe = loader_ishtp_cl_probe,
+ .remove = loader_ishtp_cl_remove,
+ .reset = loader_ishtp_cl_reset,
+};
+
+static int __init ish_loader_init(void)
+{
+ return ishtp_cl_driver_register(&loader_ishtp_cl_driver, THIS_MODULE);
+}
+
+static void __exit ish_loader_exit(void)
+{
+ ishtp_cl_driver_unregister(&loader_ishtp_cl_driver);
+}
+
+late_initcall(ish_loader_init);
+module_exit(ish_loader_exit);
+
+module_param(dma_buf_size_limit, int, 0644);
+MODULE_PARM_DESC(dma_buf_size_limit, "Limit the DMA buf size to this value in bytes");
+
+MODULE_DESCRIPTION("ISH ISH-TP Host firmware Loader Client Driver");
+MODULE_AUTHOR("Rushikesh S Kadam <rushikesh.s.kadam@intel.com>");
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("ishtp:*");
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index 30fe0c5e6fad..56777a43e69c 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -15,15 +15,16 @@
#include <linux/module.h>
#include <linux/hid.h>
+#include <linux/intel-ish-client-if.h>
#include <linux/sched.h>
-#include "ishtp/ishtp-dev.h"
-#include "ishtp/client.h"
#include "ishtp-hid.h"
/* Rx ring buffer pool size */
#define HID_CL_RX_RING_SIZE 32
#define HID_CL_TX_RING_SIZE 16
+#define cl_data_to_dev(client_data) ishtp_device(client_data->cl_device)
+
/**
* report_bad_packets() - Report bad packets
* @hid_ishtp_cl: Client instance to get stats
@@ -37,9 +38,9 @@ static void report_bad_packet(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
size_t cur_pos, size_t payload_len)
{
struct hostif_msg *recv_msg = recv_buf;
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
- dev_err(&client_data->cl_device->dev, "[hid-ish]: BAD packet %02X\n"
+ dev_err(cl_data_to_dev(client_data), "[hid-ish]: BAD packet %02X\n"
"total_bad=%u cur_pos=%u\n"
"[%02X %02X %02X %02X]\n"
"payload_len=%u\n"
@@ -69,13 +70,15 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
unsigned char *payload;
struct device_info *dev_info;
int i, j;
- size_t payload_len, total_len, cur_pos;
+ size_t payload_len, total_len, cur_pos, raw_len;
int report_type;
struct report_list *reports_list;
char *reports;
size_t report_len;
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
int curr_hid_dev = client_data->cur_hid_dev;
+ struct ishtp_hid_data *hid_data = NULL;
+ struct hid_device *hid = NULL;
payload = recv_buf + sizeof(struct hostif_msg_hdr);
total_len = data_len;
@@ -83,12 +86,12 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
do {
if (cur_pos + sizeof(struct hostif_msg) > total_len) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: error, received %u which is less than data header %u\n",
(unsigned int)data_len,
(unsigned int)sizeof(struct hostif_msg_hdr));
++client_data->bad_recv_cnt;
- ish_hw_reset(hid_ishtp_cl->dev);
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
break;
}
@@ -101,7 +104,7 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
++client_data->bad_recv_cnt;
report_bad_packet(hid_ishtp_cl, recv_msg, cur_pos,
payload_len);
- ish_hw_reset(hid_ishtp_cl->dev);
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
break;
}
@@ -116,18 +119,18 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
report_bad_packet(hid_ishtp_cl, recv_msg,
cur_pos,
payload_len);
- ish_hw_reset(hid_ishtp_cl->dev);
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
break;
}
client_data->hid_dev_count = (unsigned int)*payload;
if (!client_data->hid_devices)
client_data->hid_devices = devm_kcalloc(
- &client_data->cl_device->dev,
+ cl_data_to_dev(client_data),
client_data->hid_dev_count,
sizeof(struct device_info),
GFP_KERNEL);
if (!client_data->hid_devices) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"Mem alloc failed for hid device info\n");
wake_up_interruptible(&client_data->init_wait);
break;
@@ -135,7 +138,7 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
for (i = 0; i < client_data->hid_dev_count; ++i) {
if (1 + sizeof(struct device_info) * i >=
payload_len) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: [ENUM_DEVICES]: content size %zu is bigger than payload_len %zu\n",
1 + sizeof(struct device_info)
* i, payload_len);
@@ -165,12 +168,12 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
report_bad_packet(hid_ishtp_cl, recv_msg,
cur_pos,
payload_len);
- ish_hw_reset(hid_ishtp_cl->dev);
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
break;
}
if (!client_data->hid_descr[curr_hid_dev])
client_data->hid_descr[curr_hid_dev] =
- devm_kmalloc(&client_data->cl_device->dev,
+ devm_kmalloc(cl_data_to_dev(client_data),
payload_len, GFP_KERNEL);
if (client_data->hid_descr[curr_hid_dev]) {
memcpy(client_data->hid_descr[curr_hid_dev],
@@ -190,12 +193,12 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
report_bad_packet(hid_ishtp_cl, recv_msg,
cur_pos,
payload_len);
- ish_hw_reset(hid_ishtp_cl->dev);
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
break;
}
if (!client_data->report_descr[curr_hid_dev])
client_data->report_descr[curr_hid_dev] =
- devm_kmalloc(&client_data->cl_device->dev,
+ devm_kmalloc(cl_data_to_dev(client_data),
payload_len, GFP_KERNEL);
if (client_data->report_descr[curr_hid_dev]) {
memcpy(client_data->report_descr[curr_hid_dev],
@@ -219,18 +222,31 @@ do_get_report:
/* Get index of device that matches this id */
for (i = 0; i < client_data->num_hid_devices; ++i) {
if (recv_msg->hdr.device_id ==
- client_data->hid_devices[i].dev_id)
- if (client_data->hid_sensor_hubs[i]) {
- hid_input_report(
- client_data->hid_sensor_hubs[
- i],
- report_type, payload,
- payload_len, 0);
- ishtp_hid_wakeup(
- client_data->hid_sensor_hubs[
- i]);
+ client_data->hid_devices[i].dev_id) {
+ hid = client_data->hid_sensor_hubs[i];
+ if (!hid)
break;
+
+ hid_data = hid->driver_data;
+ if (hid_data->raw_get_req) {
+ raw_len =
+ (hid_data->raw_buf_size <
+ payload_len) ?
+ hid_data->raw_buf_size :
+ payload_len;
+
+ memcpy(hid_data->raw_buf,
+ payload, raw_len);
+ } else {
+ hid_input_report
+ (hid, report_type,
+ payload, payload_len,
+ 0);
}
+
+ ishtp_hid_wakeup(hid);
+ break;
+ }
}
break;
@@ -295,7 +311,7 @@ do_get_report:
++client_data->bad_recv_cnt;
report_bad_packet(hid_ishtp_cl, recv_msg, cur_pos,
payload_len);
- ish_hw_reset(hid_ishtp_cl->dev);
+ ish_hw_reset(ishtp_get_ishtp_device(hid_ishtp_cl));
break;
}
@@ -475,7 +491,7 @@ int ishtp_hid_link_ready_wait(struct ishtp_cl_data *client_data)
static int ishtp_enum_enum_devices(struct ishtp_cl *hid_ishtp_cl)
{
struct hostif_msg msg;
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
int retry_count;
int rv;
@@ -501,18 +517,18 @@ static int ishtp_enum_enum_devices(struct ishtp_cl *hid_ishtp_cl)
sizeof(struct hostif_msg));
}
if (!client_data->enum_devices_done) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: timed out waiting for enum_devices\n");
return -ETIMEDOUT;
}
if (!client_data->hid_devices) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: failed to allocate HID dev structures\n");
return -ENOMEM;
}
client_data->num_hid_devices = client_data->hid_dev_count;
- dev_info(&hid_ishtp_cl->device->dev,
+ dev_info(ishtp_device(client_data->cl_device),
"[hid-ish]: enum_devices_done OK, num_hid_devices=%d\n",
client_data->num_hid_devices);
@@ -531,7 +547,7 @@ static int ishtp_enum_enum_devices(struct ishtp_cl *hid_ishtp_cl)
static int ishtp_get_hid_descriptor(struct ishtp_cl *hid_ishtp_cl, int index)
{
struct hostif_msg msg;
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
int rv;
/* Get HID descriptor */
@@ -549,13 +565,13 @@ static int ishtp_get_hid_descriptor(struct ishtp_cl *hid_ishtp_cl, int index)
client_data->hid_descr_done,
3 * HZ);
if (!client_data->hid_descr_done) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: timed out for hid_descr_done\n");
return -EIO;
}
if (!client_data->hid_descr[index]) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: allocation HID desc fail\n");
return -ENOMEM;
}
@@ -578,7 +594,7 @@ static int ishtp_get_report_descriptor(struct ishtp_cl *hid_ishtp_cl,
int index)
{
struct hostif_msg msg;
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
int rv;
/* Get report descriptor */
@@ -596,12 +612,12 @@ static int ishtp_get_report_descriptor(struct ishtp_cl *hid_ishtp_cl,
client_data->report_descr_done,
3 * HZ);
if (!client_data->report_descr_done) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: timed out for report descr\n");
return -EIO;
}
if (!client_data->report_descr[index]) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: failed to alloc report descr\n");
return -ENOMEM;
}
@@ -626,42 +642,42 @@ static int ishtp_get_report_descriptor(struct ishtp_cl *hid_ishtp_cl,
static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
{
struct ishtp_device *dev;
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
struct ishtp_fw_client *fw_client;
int i;
int rv;
- dev_dbg(&client_data->cl_device->dev, "%s\n", __func__);
+ dev_dbg(cl_data_to_dev(client_data), "%s\n", __func__);
hid_ishtp_trace(client_data, "%s reset flag: %d\n", __func__, reset);
- rv = ishtp_cl_link(hid_ishtp_cl, ISHTP_HOST_CLIENT_ID_ANY);
+ rv = ishtp_cl_link(hid_ishtp_cl);
if (rv) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"ishtp_cl_link failed\n");
return -ENOMEM;
}
client_data->init_done = 0;
- dev = hid_ishtp_cl->dev;
+ dev = ishtp_get_ishtp_device(hid_ishtp_cl);
/* Connect to FW client */
- hid_ishtp_cl->rx_ring_size = HID_CL_RX_RING_SIZE;
- hid_ishtp_cl->tx_ring_size = HID_CL_TX_RING_SIZE;
+ ishtp_set_tx_ring_size(hid_ishtp_cl, HID_CL_TX_RING_SIZE);
+ ishtp_set_rx_ring_size(hid_ishtp_cl, HID_CL_RX_RING_SIZE);
fw_client = ishtp_fw_cl_get_client(dev, &hid_ishtp_guid);
if (!fw_client) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"ish client uuid not found\n");
return -ENOENT;
}
-
- hid_ishtp_cl->fw_client_id = fw_client->client_id;
- hid_ishtp_cl->state = ISHTP_CL_CONNECTING;
+ ishtp_cl_set_fw_client_id(hid_ishtp_cl,
+ ishtp_get_fw_client_id(fw_client));
+ ishtp_set_connection_state(hid_ishtp_cl, ISHTP_CL_CONNECTING);
rv = ishtp_cl_connect(hid_ishtp_cl);
if (rv) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"client connect fail\n");
goto err_cl_unlink;
}
@@ -669,7 +685,7 @@ static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
hid_ishtp_trace(client_data, "%s client connected\n", __func__);
/* Register read callback */
- ishtp_register_event_cb(hid_ishtp_cl->device, ish_cl_event_cb);
+ ishtp_register_event_cb(client_data->cl_device, ish_cl_event_cb);
rv = ishtp_enum_enum_devices(hid_ishtp_cl);
if (rv)
@@ -692,7 +708,7 @@ static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
if (!reset) {
rv = ishtp_hid_probe(i, client_data);
if (rv) {
- dev_err(&client_data->cl_device->dev,
+ dev_err(cl_data_to_dev(client_data),
"[hid-ish]: HID probe for #%u failed: %d\n",
i, rv);
goto err_cl_disconnect;
@@ -707,7 +723,7 @@ static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
return 0;
err_cl_disconnect:
- hid_ishtp_cl->state = ISHTP_CL_DISCONNECTING;
+ ishtp_set_connection_state(hid_ishtp_cl, ISHTP_CL_DISCONNECTING);
ishtp_cl_disconnect(hid_ishtp_cl);
err_cl_unlink:
ishtp_cl_unlink(hid_ishtp_cl);
@@ -744,16 +760,16 @@ static void hid_ishtp_cl_reset_handler(struct work_struct *work)
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
hid_ishtp_cl);
- dev_dbg(&cl_device->dev, "%s\n", __func__);
+ dev_dbg(ishtp_device(client_data->cl_device), "%s\n", __func__);
hid_ishtp_cl_deinit(hid_ishtp_cl);
- hid_ishtp_cl = ishtp_cl_allocate(cl_device->ishtp_dev);
+ hid_ishtp_cl = ishtp_cl_allocate(cl_device);
if (!hid_ishtp_cl)
return;
ishtp_set_drvdata(cl_device, hid_ishtp_cl);
- hid_ishtp_cl->client_data = client_data;
+ ishtp_set_client_data(hid_ishtp_cl, client_data);
client_data->hid_ishtp_cl = hid_ishtp_cl;
client_data->num_hid_devices = 0;
@@ -762,15 +778,17 @@ static void hid_ishtp_cl_reset_handler(struct work_struct *work)
rv = hid_ishtp_cl_init(hid_ishtp_cl, 1);
if (!rv)
break;
- dev_err(&client_data->cl_device->dev, "Retry reset init\n");
+ dev_err(cl_data_to_dev(client_data), "Retry reset init\n");
}
if (rv) {
- dev_err(&client_data->cl_device->dev, "Reset Failed\n");
+ dev_err(cl_data_to_dev(client_data), "Reset Failed\n");
hid_ishtp_trace(client_data, "%s Failed hid_ishtp_cl %p\n",
__func__, hid_ishtp_cl);
}
}
+void (*hid_print_trace)(void *unused, const char *format, ...);
+
/**
* hid_ishtp_cl_probe() - ISHTP client driver probe
* @cl_device: ISHTP client device instance
@@ -788,21 +806,18 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
if (!cl_device)
return -ENODEV;
- if (!guid_equal(&hid_ishtp_guid,
- &cl_device->fw_client->props.protocol_name))
- return -ENODEV;
-
- client_data = devm_kzalloc(&cl_device->dev, sizeof(*client_data),
+ client_data = devm_kzalloc(ishtp_device(cl_device),
+ sizeof(*client_data),
GFP_KERNEL);
if (!client_data)
return -ENOMEM;
- hid_ishtp_cl = ishtp_cl_allocate(cl_device->ishtp_dev);
+ hid_ishtp_cl = ishtp_cl_allocate(cl_device);
if (!hid_ishtp_cl)
return -ENOMEM;
ishtp_set_drvdata(cl_device, hid_ishtp_cl);
- hid_ishtp_cl->client_data = client_data;
+ ishtp_set_client_data(hid_ishtp_cl, client_data);
client_data->hid_ishtp_cl = hid_ishtp_cl;
client_data->cl_device = cl_device;
@@ -811,6 +826,8 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
INIT_WORK(&client_data->work, hid_ishtp_cl_reset_handler);
+ hid_print_trace = ishtp_trace_callback(cl_device);
+
rv = hid_ishtp_cl_init(hid_ishtp_cl, 0);
if (rv) {
ishtp_cl_free(hid_ishtp_cl);
@@ -832,13 +849,13 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
static int hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
{
struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
hid_ishtp_cl);
- dev_dbg(&cl_device->dev, "%s\n", __func__);
- hid_ishtp_cl->state = ISHTP_CL_DISCONNECTING;
+ dev_dbg(ishtp_device(cl_device), "%s\n", __func__);
+ ishtp_set_connection_state(hid_ishtp_cl, ISHTP_CL_DISCONNECTING);
ishtp_cl_disconnect(hid_ishtp_cl);
ishtp_put_device(cl_device);
ishtp_hid_remove(client_data);
@@ -862,7 +879,7 @@ static int hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
{
struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
hid_ishtp_cl);
@@ -872,8 +889,6 @@ static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
return 0;
}
-#define to_ishtp_cl_device(d) container_of(d, struct ishtp_cl_device, dev)
-
/**
* hid_ishtp_cl_suspend() - ISHTP client driver suspend
* @device: device instance
@@ -884,9 +899,9 @@ static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
*/
static int hid_ishtp_cl_suspend(struct device *device)
{
- struct ishtp_cl_device *cl_device = to_ishtp_cl_device(device);
+ struct ishtp_cl_device *cl_device = dev_get_drvdata(device);
struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
hid_ishtp_cl);
@@ -905,9 +920,9 @@ static int hid_ishtp_cl_suspend(struct device *device)
*/
static int hid_ishtp_cl_resume(struct device *device)
{
- struct ishtp_cl_device *cl_device = to_ishtp_cl_device(device);
+ struct ishtp_cl_device *cl_device = dev_get_drvdata(device);
struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
- struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_cl_data *client_data = ishtp_get_client_data(hid_ishtp_cl);
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
hid_ishtp_cl);
@@ -922,6 +937,7 @@ static const struct dev_pm_ops hid_ishtp_pm_ops = {
static struct ishtp_cl_driver hid_ishtp_cl_driver = {
.name = "ish-hid",
+ .guid = &hid_ishtp_guid,
.probe = hid_ishtp_cl_probe,
.remove = hid_ishtp_cl_remove,
.reset = hid_ishtp_cl_reset,
@@ -933,7 +949,7 @@ static int __init ish_hid_init(void)
int rv;
/* Register ISHTP client device driver with ISHTP Bus */
- rv = ishtp_cl_driver_register(&hid_ishtp_cl_driver);
+ rv = ishtp_cl_driver_register(&hid_ishtp_cl_driver, THIS_MODULE);
return rv;
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.c b/drivers/hid/intel-ish-hid/ishtp-hid.c
index bc4c536f3c0d..62c03561adaa 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.c
@@ -14,8 +14,8 @@
*/
#include <linux/hid.h>
+#include <linux/intel-ish-client-if.h>
#include <uapi/linux/input.h>
-#include "ishtp/client.h"
#include "ishtp-hid.h"
/**
@@ -59,10 +59,46 @@ static void ishtp_hid_close(struct hid_device *hid)
{
}
-static int ishtp_raw_request(struct hid_device *hdev, unsigned char reportnum,
- __u8 *buf, size_t len, unsigned char rtype, int reqtype)
+static int ishtp_raw_request(struct hid_device *hid, unsigned char reportnum,
+ __u8 *buf, size_t len, unsigned char rtype,
+ int reqtype)
{
- return 0;
+ struct ishtp_hid_data *hid_data = hid->driver_data;
+ char *ishtp_buf = NULL;
+ size_t ishtp_buf_len;
+ unsigned int header_size = sizeof(struct hostif_msg);
+
+ if (rtype == HID_OUTPUT_REPORT)
+ return -EINVAL;
+
+ hid_data->request_done = false;
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ hid_data->raw_buf = buf;
+ hid_data->raw_buf_size = len;
+ hid_data->raw_get_req = true;
+
+ hid_ishtp_get_report(hid, reportnum, rtype);
+ break;
+ case HID_REQ_SET_REPORT:
+ /*
+ * Spare 7 bytes for 64b accesses through
+ * get/put_unaligned_le64()
+ */
+ ishtp_buf_len = len + header_size;
+ ishtp_buf = kzalloc(ishtp_buf_len + 7, GFP_KERNEL);
+ if (!ishtp_buf)
+ return -ENOMEM;
+
+ memcpy(ishtp_buf + header_size, buf, len);
+ hid_ishtp_set_feature(hid, ishtp_buf, ishtp_buf_len, reportnum);
+ kfree(ishtp_buf);
+ break;
+ }
+
+ hid_hw_wait(hid);
+
+ return len;
}
/**
@@ -87,6 +123,7 @@ static void ishtp_hid_request(struct hid_device *hid, struct hid_report *rep,
hid_data->request_done = false;
switch (reqtype) {
case HID_REQ_GET_REPORT:
+ hid_data->raw_get_req = false;
hid_ishtp_get_report(hid, rep->id, rep->type);
break;
case HID_REQ_SET_REPORT:
@@ -116,7 +153,6 @@ static void ishtp_hid_request(struct hid_device *hid, struct hid_report *rep,
static int ishtp_wait_for_response(struct hid_device *hid)
{
struct ishtp_hid_data *hid_data = hid->driver_data;
- struct ishtp_cl_data *client_data = hid_data->client_data;
int rv;
hid_ishtp_trace(client_data, "%s hid %p\n", __func__, hid);
@@ -204,7 +240,8 @@ int ishtp_hid_probe(unsigned int cur_hid_dev,
hid->ll_driver = &ishtp_hid_ll_driver;
hid->bus = BUS_INTEL_ISHTP;
- hid->dev.parent = &client_data->cl_device->dev;
+ hid->dev.parent = ishtp_device(client_data->cl_device);
+
hid->version = le16_to_cpu(ISH_HID_VERSION);
hid->vendor = le16_to_cpu(client_data->hid_devices[cur_hid_dev].vid);
hid->product = le16_to_cpu(client_data->hid_devices[cur_hid_dev].pid);
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.h b/drivers/hid/intel-ish-hid/ishtp-hid.h
index 1cd07a441cd4..e27d3d6c1920 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.h
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.h
@@ -24,9 +24,9 @@
#define IS_RESPONSE 0x80
/* Used to dump to Linux trace buffer, if enabled */
-#define hid_ishtp_trace(client, ...) \
- client->cl_device->ishtp_dev->print_log(\
- client->cl_device->ishtp_dev, __VA_ARGS__)
+extern void (*hid_print_trace)(void *unused, const char *format, ...);
+#define hid_ishtp_trace(client, ...) \
+ (hid_print_trace)(NULL, __VA_ARGS__)
/* ISH Transport protocol (ISHTP in short) GUID */
static const guid_t hid_ishtp_guid =
@@ -159,6 +159,9 @@ struct ishtp_cl_data {
* @client_data: Link to the client instance
* @hid_wait: Completion waitq
*
+ * @raw_get_req: Flag indicating raw get request ongoing
+ * @raw_buf: raw request buffer filled on receiving get report
+ * @raw_buf_size: raw request buffer size
* Used to tie hid hid->driver data to driver client instance
*/
struct ishtp_hid_data {
@@ -166,6 +169,11 @@ struct ishtp_hid_data {
bool request_done;
struct ishtp_cl_data *client_data;
wait_queue_head_t hid_wait;
+
+ /* raw request */
+ bool raw_get_req;
+ u8 *raw_buf;
+ size_t raw_buf_size;
};
/* Interface functions between HID LL driver and ISH TP client */
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index d5f4b6438d86..fb8ca12955b4 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -171,6 +171,19 @@ struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev,
EXPORT_SYMBOL(ishtp_fw_cl_get_client);
/**
+ * ishtp_get_fw_client_id() - Get fw client id
+ *
+ * This interface is used to reset HW get FW client id.
+ *
+ * Return: firmware client id.
+ */
+int ishtp_get_fw_client_id(struct ishtp_fw_client *fw_client)
+{
+ return fw_client->client_id;
+}
+EXPORT_SYMBOL(ishtp_get_fw_client_id);
+
+/**
* ishtp_fw_cl_by_id() - return index to fw_clients for client_id
* @dev: the ishtp device structure
* @client_id: fw client id to search
@@ -220,6 +233,26 @@ static int ishtp_cl_device_probe(struct device *dev)
}
/**
+ * ishtp_cl_bus_match() - Bus match() callback
+ * @dev: the device structure
+ * @drv: the driver structure
+ *
+ * This is a bus match callback, called when a new ishtp_cl_device is
+ * registered during ishtp bus client enumeration. Use the guid_t in
+ * drv and dev to decide whether they match or not.
+ *
+ * Return: 1 if dev & drv matches, 0 otherwise.
+ */
+static int ishtp_cl_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+ struct ishtp_cl_driver *driver = to_ishtp_cl_driver(drv);
+
+ return guid_equal(driver->guid,
+ &device->fw_client->props.protocol_name);
+}
+
+/**
* ishtp_cl_device_remove() - Bus remove() callback
* @dev: the device structure
*
@@ -372,6 +405,7 @@ static struct bus_type ishtp_cl_bus_type = {
.name = "ishtp",
.dev_groups = ishtp_cl_dev_groups,
.probe = ishtp_cl_device_probe,
+ .match = ishtp_cl_bus_match,
.remove = ishtp_cl_device_remove,
.pm = &ishtp_cl_bus_dev_pm_ops,
.uevent = ishtp_cl_uevent,
@@ -445,6 +479,7 @@ static struct ishtp_cl_device *ishtp_bus_add_device(struct ishtp_device *dev,
}
ishtp_device_ready = true;
+ dev_set_drvdata(&device->dev, device);
return device;
}
@@ -464,7 +499,7 @@ static void ishtp_bus_remove_device(struct ishtp_cl_device *device)
}
/**
- * __ishtp_cl_driver_register() - Client driver register
+ * ishtp_cl_driver_register() - Client driver register
* @driver: the client driver instance
* @owner: Owner of this driver module
*
@@ -473,8 +508,8 @@ static void ishtp_bus_remove_device(struct ishtp_cl_device *device)
*
* Return: Return value of driver_register or -ENODEV if not ready
*/
-int __ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
- struct module *owner)
+int ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
+ struct module *owner)
{
int err;
@@ -491,7 +526,7 @@ int __ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
return 0;
}
-EXPORT_SYMBOL(__ishtp_cl_driver_register);
+EXPORT_SYMBOL(ishtp_cl_driver_register);
/**
* ishtp_cl_driver_unregister() - Client driver unregister
@@ -807,6 +842,59 @@ int ishtp_use_dma_transfer(void)
}
/**
+ * ishtp_device() - Return device pointer
+ *
+ * This interface is used to return device pointer from ishtp_cl_device
+ * instance.
+ *
+ * Return: device *.
+ */
+struct device *ishtp_device(struct ishtp_cl_device *device)
+{
+ return &device->dev;
+}
+EXPORT_SYMBOL(ishtp_device);
+
+/**
+ * ishtp_get_pci_device() - Return PCI device dev pointer
+ * This interface is used to return PCI device pointer
+ * from ishtp_cl_device instance.
+ *
+ * Return: device *.
+ */
+struct device *ishtp_get_pci_device(struct ishtp_cl_device *device)
+{
+ return device->ishtp_dev->devc;
+}
+EXPORT_SYMBOL(ishtp_get_pci_device);
+
+/**
+ * ishtp_trace_callback() - Return trace callback
+ *
+ * This interface is used to return trace callback function pointer.
+ *
+ * Return: void *.
+ */
+void *ishtp_trace_callback(struct ishtp_cl_device *cl_device)
+{
+ return cl_device->ishtp_dev->print_log;
+}
+EXPORT_SYMBOL(ishtp_trace_callback);
+
+/**
+ * ish_hw_reset() - Call HW reset IPC callback
+ *
+ * This interface is used to reset HW in case of error.
+ *
+ * Return: value from IPC hw_reset callback
+ */
+int ish_hw_reset(struct ishtp_device *dev)
+{
+ return dev->ops->hw_reset(dev);
+}
+EXPORT_SYMBOL(ish_hw_reset);
+
+/**
* ishtp_bus_register() - Function to register bus
*
* This register ishtp bus
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.h b/drivers/hid/intel-ish-hid/ishtp/bus.h
index 4cf7ad586c37..93d516f5a853 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.h
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.h
@@ -17,6 +17,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
+#include <linux/intel-ish-client-if.h>
struct ishtp_cl;
struct ishtp_cl_device;
@@ -52,25 +53,6 @@ struct ishtp_cl_device {
void (*event_cb)(struct ishtp_cl_device *device);
};
-/**
- * struct ishtp_cl_device - ISHTP device handle
- * @driver: driver instance on a bus
- * @name: Name of the device for probe
- * @probe: driver callback for device probe
- * @remove: driver callback on device removal
- *
- * Client drivers defines to get probed/removed for ISHTP client device.
- */
-struct ishtp_cl_driver {
- struct device_driver driver;
- const char *name;
- int (*probe)(struct ishtp_cl_device *dev);
- int (*remove)(struct ishtp_cl_device *dev);
- int (*reset)(struct ishtp_cl_device *dev);
- const struct dev_pm_ops *pm;
-};
-
-
int ishtp_bus_new_client(struct ishtp_device *dev);
void ishtp_remove_all_clients(struct ishtp_device *dev);
int ishtp_cl_device_bind(struct ishtp_cl *cl);
@@ -98,22 +80,5 @@ void ishtp_recv(struct ishtp_device *dev);
void ishtp_reset_handler(struct ishtp_device *dev);
void ishtp_reset_compl_handler(struct ishtp_device *dev);
-void ishtp_put_device(struct ishtp_cl_device *);
-void ishtp_get_device(struct ishtp_cl_device *);
-
-void ishtp_set_drvdata(struct ishtp_cl_device *cl_device, void *data);
-void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device);
-
-int __ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
- struct module *owner);
-#define ishtp_cl_driver_register(driver) \
- __ishtp_cl_driver_register(driver, THIS_MODULE)
-void ishtp_cl_driver_unregister(struct ishtp_cl_driver *driver);
-
-int ishtp_register_event_cb(struct ishtp_cl_device *device,
- void (*read_cb)(struct ishtp_cl_device *));
int ishtp_fw_cl_by_uuid(struct ishtp_device *dev, const guid_t *cuuid);
-struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev,
- const guid_t *uuid);
-
#endif /* _LINUX_ISHTP_CL_BUS_H */
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
index faeccdb1475b..b7ac5e3b1e82 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -126,7 +126,7 @@ static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
*
* Return: The allocated client instance or NULL on failure
*/
-struct ishtp_cl *ishtp_cl_allocate(struct ishtp_device *dev)
+struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device)
{
struct ishtp_cl *cl;
@@ -134,7 +134,7 @@ struct ishtp_cl *ishtp_cl_allocate(struct ishtp_device *dev)
if (!cl)
return NULL;
- ishtp_cl_init(cl, dev);
+ ishtp_cl_init(cl, cl_device->ishtp_dev);
return cl;
}
EXPORT_SYMBOL(ishtp_cl_allocate);
@@ -168,9 +168,6 @@ EXPORT_SYMBOL(ishtp_cl_free);
/**
* ishtp_cl_link() - Reserve a host id and link the client instance
* @cl: client device instance
- * @id: host client id to use. It can be ISHTP_HOST_CLIENT_ID_ANY if any
- * id from the available can be used
- *
*
* This allocates a single bit in the hostmap. This function will make sure
* that not many client sessions are opened at the same time. Once allocated
@@ -179,11 +176,11 @@ EXPORT_SYMBOL(ishtp_cl_free);
*
* Return: 0 or error code on failure
*/
-int ishtp_cl_link(struct ishtp_cl *cl, int id)
+int ishtp_cl_link(struct ishtp_cl *cl)
{
struct ishtp_device *dev;
- unsigned long flags, flags_cl;
- int ret = 0;
+ unsigned long flags, flags_cl;
+ int id, ret = 0;
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
@@ -197,10 +194,7 @@ int ishtp_cl_link(struct ishtp_cl *cl, int id)
goto unlock_dev;
}
- /* If Id is not assigned get one*/
- if (id == ISHTP_HOST_CLIENT_ID_ANY)
- id = find_first_zero_bit(dev->host_clients_map,
- ISHTP_CLIENTS_MAX);
+ id = find_first_zero_bit(dev->host_clients_map, ISHTP_CLIENTS_MAX);
if (id >= ISHTP_CLIENTS_MAX) {
spin_unlock_irqrestore(&dev->device_lock, flags);
@@ -1069,3 +1063,45 @@ void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
eoi:
return;
}
+
+void *ishtp_get_client_data(struct ishtp_cl *cl)
+{
+ return cl->client_data;
+}
+EXPORT_SYMBOL(ishtp_get_client_data);
+
+void ishtp_set_client_data(struct ishtp_cl *cl, void *data)
+{
+ cl->client_data = data;
+}
+EXPORT_SYMBOL(ishtp_set_client_data);
+
+struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl)
+{
+ return cl->dev;
+}
+EXPORT_SYMBOL(ishtp_get_ishtp_device);
+
+void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size)
+{
+ cl->tx_ring_size = size;
+}
+EXPORT_SYMBOL(ishtp_set_tx_ring_size);
+
+void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size)
+{
+ cl->rx_ring_size = size;
+}
+EXPORT_SYMBOL(ishtp_set_rx_ring_size);
+
+void ishtp_set_connection_state(struct ishtp_cl *cl, int state)
+{
+ cl->state = state;
+}
+EXPORT_SYMBOL(ishtp_set_connection_state);
+
+void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id)
+{
+ cl->fw_client_id = fw_client_id;
+}
+EXPORT_SYMBOL(ishtp_cl_set_fw_client_id);
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.h b/drivers/hid/intel-ish-hid/ishtp/client.h
index e0df3eb611e6..6ed00947d6bc 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.h
+++ b/drivers/hid/intel-ish-hid/ishtp/client.h
@@ -19,15 +19,6 @@
#include <linux/types.h>
#include "ishtp-dev.h"
-/* Client state */
-enum cl_state {
- ISHTP_CL_INITIALIZING = 0,
- ISHTP_CL_CONNECTING,
- ISHTP_CL_CONNECTED,
- ISHTP_CL_DISCONNECTING,
- ISHTP_CL_DISCONNECTED
-};
-
/* Tx and Rx ring size */
#define CL_DEF_RX_RING_SIZE 2
#define CL_DEF_TX_RING_SIZE 2
@@ -169,19 +160,4 @@ static inline bool ishtp_cl_cmp_id(const struct ishtp_cl *cl1,
(cl1->fw_client_id == cl2->fw_client_id);
}
-/* exported functions from ISHTP under client management scope */
-struct ishtp_cl *ishtp_cl_allocate(struct ishtp_device *dev);
-void ishtp_cl_free(struct ishtp_cl *cl);
-int ishtp_cl_link(struct ishtp_cl *cl, int id);
-void ishtp_cl_unlink(struct ishtp_cl *cl);
-int ishtp_cl_disconnect(struct ishtp_cl *cl);
-int ishtp_cl_connect(struct ishtp_cl *cl);
-int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length);
-int ishtp_cl_flush_queues(struct ishtp_cl *cl);
-
-/* exported functions from ISHTP client buffer management scope */
-int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb);
-bool ishtp_cl_tx_empty(struct ishtp_cl *cl);
-struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl);
-
#endif /* _ISHTP_CLIENT_H_ */
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
index e54ce1ef27dd..3cfef084b9fc 100644
--- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -79,32 +79,6 @@ struct ishtp_fw_client {
uint8_t client_id;
};
-/**
- * struct ishtp_msg_data - ISHTP message data struct
- * @size: Size of data in the *data
- * @data: Pointer to data
- */
-struct ishtp_msg_data {
- uint32_t size;
- unsigned char *data;
-};
-
-/*
- * struct ishtp_cl_rb - request block structure
- * @list: Link to list members
- * @cl: ISHTP client instance
- * @buffer: message header
- * @buf_idx: Index into buffer
- * @read_time: unused at this time
- */
-struct ishtp_cl_rb {
- struct list_head list;
- struct ishtp_cl *cl;
- struct ishtp_msg_data buffer;
- unsigned long buf_idx;
- unsigned long read_time;
-};
-
/*
* Control info for IPC messages ISHTP/IPC sending FIFO -
* list with inline data buffer
@@ -264,11 +238,6 @@ static inline int ish_ipc_reset(struct ishtp_device *dev)
return dev->ops->ipc_reset(dev);
}
-static inline int ish_hw_reset(struct ishtp_device *dev)
-{
- return dev->ops->hw_reset(dev);
-}
-
/* Exported function */
void ishtp_device_init(struct ishtp_device *dev);
int ishtp_start(struct ishtp_device *dev);
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 840634e0f1e3..dbaead0a5371 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -632,7 +632,7 @@ static int uhid_char_open(struct inode *inode, struct file *file)
INIT_WORK(&uhid->worker, uhid_device_add_worker);
file->private_data = uhid;
- nonseekable_open(inode, file);
+ stream_open(inode, file);
return 0;
}
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 62703b354d6d..3fc0b247a807 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -336,6 +336,8 @@ static struct vmbus_channel *alloc_channel(void)
tasklet_init(&channel->callback_event,
vmbus_on_event, (unsigned long)channel);
+ hv_ringbuffer_pre_init(channel);
+
return channel;
}
@@ -345,6 +347,7 @@ static struct vmbus_channel *alloc_channel(void)
static void free_channel(struct vmbus_channel *channel)
{
tasklet_kill(&channel->callback_event);
+ vmbus_remove_channel_attr_group(channel);
kobject_put(&channel->kobj);
}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 632d25674e7f..45653029ee18 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -408,7 +408,6 @@ int hv_synic_cleanup(unsigned int cpu)
clockevents_unbind_device(hv_cpu->clk_evt, cpu);
hv_ce_shutdown(hv_cpu->clk_evt);
- put_cpu_ptr(hv_cpu);
}
hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index cb86b133eb4d..e5467b821f41 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -193,6 +193,7 @@ extern void hv_synic_clockevents_cleanup(void);
/* Interface */
+void hv_ringbuffer_pre_init(struct vmbus_channel *channel);
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
struct page *pages, u32 pagecnt);
@@ -321,6 +322,8 @@ void vmbus_device_unregister(struct hv_device *device_obj);
int vmbus_add_channel_kobj(struct hv_device *device_obj,
struct vmbus_channel *channel);
+void vmbus_remove_channel_attr_group(struct vmbus_channel *channel);
+
struct vmbus_channel *relid2channel(u32 relid);
void vmbus_free_channels(void);
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 9e8b31ccc142..121a01c43298 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -166,14 +166,18 @@ hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
}
/* Get various debug metrics for the specified ring buffer. */
-int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
+int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info)
{
u32 bytes_avail_towrite;
u32 bytes_avail_toread;
- if (!ring_info->ring_buffer)
+ mutex_lock(&ring_info->ring_buffer_mutex);
+
+ if (!ring_info->ring_buffer) {
+ mutex_unlock(&ring_info->ring_buffer_mutex);
return -EINVAL;
+ }
hv_get_ringbuffer_availbytes(ring_info,
&bytes_avail_toread,
@@ -184,10 +188,19 @@ int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
debug_info->current_write_index = ring_info->ring_buffer->write_index;
debug_info->current_interrupt_mask
= ring_info->ring_buffer->interrupt_mask;
+ mutex_unlock(&ring_info->ring_buffer_mutex);
+
return 0;
}
EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
+/* Initialize a channel's ring buffer info mutex locks */
+void hv_ringbuffer_pre_init(struct vmbus_channel *channel)
+{
+ mutex_init(&channel->inbound.ring_buffer_mutex);
+ mutex_init(&channel->outbound.ring_buffer_mutex);
+}
+
/* Initialize the ring buffer. */
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
struct page *pages, u32 page_cnt)
@@ -197,8 +210,6 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
- memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
-
/*
* First page holds struct hv_ring_buffer, do wraparound mapping for
* the rest.
@@ -232,6 +243,7 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
reciprocal_value(ring_info->ring_size / 10);
ring_info->ring_datasize = ring_info->ring_size -
sizeof(struct hv_ring_buffer);
+ ring_info->priv_read_index = 0;
spin_lock_init(&ring_info->ring_lock);
@@ -241,8 +253,10 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
/* Cleanup the ring buffer. */
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
{
+ mutex_lock(&ring_info->ring_buffer_mutex);
vunmap(ring_info->ring_buffer);
ring_info->ring_buffer = NULL;
+ mutex_unlock(&ring_info->ring_buffer_mutex);
}
/* Write to the ring buffer. */
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 000b53e5a17a..aa25f3bcbdea 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -630,7 +630,36 @@ static struct attribute *vmbus_dev_attrs[] = {
&dev_attr_driver_override.attr,
NULL,
};
-ATTRIBUTE_GROUPS(vmbus_dev);
+
+/*
+ * Device-level attribute_group callback function. Returns the permission for
+ * each attribute, and returns 0 if an attribute is not visible.
+ */
+static umode_t vmbus_dev_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ const struct hv_device *hv_dev = device_to_hv_device(dev);
+
+ /* Hide the monitor attributes if the monitor mechanism is not used. */
+ if (!hv_dev->channel->offermsg.monitor_allocated &&
+ (attr == &dev_attr_monitor_id.attr ||
+ attr == &dev_attr_server_monitor_pending.attr ||
+ attr == &dev_attr_client_monitor_pending.attr ||
+ attr == &dev_attr_server_monitor_latency.attr ||
+ attr == &dev_attr_client_monitor_latency.attr ||
+ attr == &dev_attr_server_monitor_conn_id.attr ||
+ attr == &dev_attr_client_monitor_conn_id.attr))
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group vmbus_dev_group = {
+ .attrs = vmbus_dev_attrs,
+ .is_visible = vmbus_dev_attr_is_visible
+};
+__ATTRIBUTE_GROUPS(vmbus_dev);
/*
* vmbus_uevent - add uevent for our device
@@ -1381,7 +1410,7 @@ static void vmbus_chan_release(struct kobject *kobj)
struct vmbus_chan_attribute {
struct attribute attr;
- ssize_t (*show)(const struct vmbus_channel *chan, char *buf);
+ ssize_t (*show)(struct vmbus_channel *chan, char *buf);
ssize_t (*store)(struct vmbus_channel *chan,
const char *buf, size_t count);
};
@@ -1400,15 +1429,12 @@ static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
{
const struct vmbus_chan_attribute *attribute
= container_of(attr, struct vmbus_chan_attribute, attr);
- const struct vmbus_channel *chan
+ struct vmbus_channel *chan
= container_of(kobj, struct vmbus_channel, kobj);
if (!attribute->show)
return -EIO;
- if (chan->state != CHANNEL_OPENED_STATE)
- return -EINVAL;
-
return attribute->show(chan, buf);
}
@@ -1416,45 +1442,81 @@ static const struct sysfs_ops vmbus_chan_sysfs_ops = {
.show = vmbus_chan_attr_show,
};
-static ssize_t out_mask_show(const struct vmbus_channel *channel, char *buf)
+static ssize_t out_mask_show(struct vmbus_channel *channel, char *buf)
{
- const struct hv_ring_buffer_info *rbi = &channel->outbound;
+ struct hv_ring_buffer_info *rbi = &channel->outbound;
+ ssize_t ret;
+
+ mutex_lock(&rbi->ring_buffer_mutex);
+ if (!rbi->ring_buffer) {
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return -EINVAL;
+ }
- return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
+ ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return ret;
}
static VMBUS_CHAN_ATTR_RO(out_mask);
-static ssize_t in_mask_show(const struct vmbus_channel *channel, char *buf)
+static ssize_t in_mask_show(struct vmbus_channel *channel, char *buf)
{
- const struct hv_ring_buffer_info *rbi = &channel->inbound;
+ struct hv_ring_buffer_info *rbi = &channel->inbound;
+ ssize_t ret;
+
+ mutex_lock(&rbi->ring_buffer_mutex);
+ if (!rbi->ring_buffer) {
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return -EINVAL;
+ }
- return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
+ ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return ret;
}
static VMBUS_CHAN_ATTR_RO(in_mask);
-static ssize_t read_avail_show(const struct vmbus_channel *channel, char *buf)
+static ssize_t read_avail_show(struct vmbus_channel *channel, char *buf)
{
- const struct hv_ring_buffer_info *rbi = &channel->inbound;
+ struct hv_ring_buffer_info *rbi = &channel->inbound;
+ ssize_t ret;
+
+ mutex_lock(&rbi->ring_buffer_mutex);
+ if (!rbi->ring_buffer) {
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return -EINVAL;
+ }
- return sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
+ ret = sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return ret;
}
static VMBUS_CHAN_ATTR_RO(read_avail);
-static ssize_t write_avail_show(const struct vmbus_channel *channel, char *buf)
+static ssize_t write_avail_show(struct vmbus_channel *channel, char *buf)
{
- const struct hv_ring_buffer_info *rbi = &channel->outbound;
+ struct hv_ring_buffer_info *rbi = &channel->outbound;
+ ssize_t ret;
+
+ mutex_lock(&rbi->ring_buffer_mutex);
+ if (!rbi->ring_buffer) {
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return -EINVAL;
+ }
- return sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
+ ret = sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
+ mutex_unlock(&rbi->ring_buffer_mutex);
+ return ret;
}
static VMBUS_CHAN_ATTR_RO(write_avail);
-static ssize_t show_target_cpu(const struct vmbus_channel *channel, char *buf)
+static ssize_t show_target_cpu(struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%u\n", channel->target_cpu);
}
static VMBUS_CHAN_ATTR(cpu, S_IRUGO, show_target_cpu, NULL);
-static ssize_t channel_pending_show(const struct vmbus_channel *channel,
+static ssize_t channel_pending_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%d\n",
@@ -1463,7 +1525,7 @@ static ssize_t channel_pending_show(const struct vmbus_channel *channel,
}
static VMBUS_CHAN_ATTR(pending, S_IRUGO, channel_pending_show, NULL);
-static ssize_t channel_latency_show(const struct vmbus_channel *channel,
+static ssize_t channel_latency_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%d\n",
@@ -1472,19 +1534,19 @@ static ssize_t channel_latency_show(const struct vmbus_channel *channel,
}
static VMBUS_CHAN_ATTR(latency, S_IRUGO, channel_latency_show, NULL);
-static ssize_t channel_interrupts_show(const struct vmbus_channel *channel, char *buf)
+static ssize_t channel_interrupts_show(struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%llu\n", channel->interrupts);
}
static VMBUS_CHAN_ATTR(interrupts, S_IRUGO, channel_interrupts_show, NULL);
-static ssize_t channel_events_show(const struct vmbus_channel *channel, char *buf)
+static ssize_t channel_events_show(struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%llu\n", channel->sig_events);
}
static VMBUS_CHAN_ATTR(events, S_IRUGO, channel_events_show, NULL);
-static ssize_t channel_intr_in_full_show(const struct vmbus_channel *channel,
+static ssize_t channel_intr_in_full_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%llu\n",
@@ -1492,7 +1554,7 @@ static ssize_t channel_intr_in_full_show(const struct vmbus_channel *channel,
}
static VMBUS_CHAN_ATTR(intr_in_full, 0444, channel_intr_in_full_show, NULL);
-static ssize_t channel_intr_out_empty_show(const struct vmbus_channel *channel,
+static ssize_t channel_intr_out_empty_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%llu\n",
@@ -1500,7 +1562,7 @@ static ssize_t channel_intr_out_empty_show(const struct vmbus_channel *channel,
}
static VMBUS_CHAN_ATTR(intr_out_empty, 0444, channel_intr_out_empty_show, NULL);
-static ssize_t channel_out_full_first_show(const struct vmbus_channel *channel,
+static ssize_t channel_out_full_first_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%llu\n",
@@ -1508,7 +1570,7 @@ static ssize_t channel_out_full_first_show(const struct vmbus_channel *channel,
}
static VMBUS_CHAN_ATTR(out_full_first, 0444, channel_out_full_first_show, NULL);
-static ssize_t channel_out_full_total_show(const struct vmbus_channel *channel,
+static ssize_t channel_out_full_total_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%llu\n",
@@ -1516,14 +1578,14 @@ static ssize_t channel_out_full_total_show(const struct vmbus_channel *channel,
}
static VMBUS_CHAN_ATTR(out_full_total, 0444, channel_out_full_total_show, NULL);
-static ssize_t subchannel_monitor_id_show(const struct vmbus_channel *channel,
+static ssize_t subchannel_monitor_id_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%u\n", channel->offermsg.monitorid);
}
static VMBUS_CHAN_ATTR(monitor_id, S_IRUGO, subchannel_monitor_id_show, NULL);
-static ssize_t subchannel_id_show(const struct vmbus_channel *channel,
+static ssize_t subchannel_id_show(struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%u\n",
@@ -1550,10 +1612,34 @@ static struct attribute *vmbus_chan_attrs[] = {
NULL
};
+/*
+ * Channel-level attribute_group callback function. Returns the permission for
+ * each attribute, and returns 0 if an attribute is not visible.
+ */
+static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int idx)
+{
+ const struct vmbus_channel *channel =
+ container_of(kobj, struct vmbus_channel, kobj);
+
+ /* Hide the monitor attributes if the monitor mechanism is not used. */
+ if (!channel->offermsg.monitor_allocated &&
+ (attr == &chan_attr_pending.attr ||
+ attr == &chan_attr_latency.attr ||
+ attr == &chan_attr_monitor_id.attr))
+ return 0;
+
+ return attr->mode;
+}
+
+static struct attribute_group vmbus_chan_group = {
+ .attrs = vmbus_chan_attrs,
+ .is_visible = vmbus_chan_attr_is_visible
+};
+
static struct kobj_type vmbus_chan_ktype = {
.sysfs_ops = &vmbus_chan_sysfs_ops,
.release = vmbus_chan_release,
- .default_attrs = vmbus_chan_attrs,
};
/*
@@ -1561,6 +1647,7 @@ static struct kobj_type vmbus_chan_ktype = {
*/
int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
{
+ const struct device *device = &dev->device;
struct kobject *kobj = &channel->kobj;
u32 relid = channel->offermsg.child_relid;
int ret;
@@ -1571,12 +1658,31 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
if (ret)
return ret;
+ ret = sysfs_create_group(kobj, &vmbus_chan_group);
+
+ if (ret) {
+ /*
+ * The calling functions' error handling paths will cleanup the
+ * empty channel directory.
+ */
+ dev_err(device, "Unable to set up channel sysfs files\n");
+ return ret;
+ }
+
kobject_uevent(kobj, KOBJ_ADD);
return 0;
}
/*
+ * vmbus_remove_channel_attr_group - remove the channel's attribute group
+ */
+void vmbus_remove_channel_attr_group(struct vmbus_channel *channel)
+{
+ sysfs_remove_group(&channel->kobj, &vmbus_chan_group);
+}
+
+/*
* vmbus_device_create - Creates and registers a new child device
* on the vmbus.
*/
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index d0f1dfe2bcbb..1915a18b537b 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -17,7 +17,7 @@ menuconfig HWMON
To find out which specific driver(s) you need, use the
sensors-detect script from the lm_sensors package. Read
- <file:Documentation/hwmon/userspace-tools> for details.
+ <file:Documentation/hwmon/userspace-tools.rst> for details.
This support can also be built as a module. If so, the module
will be called hwmon.
@@ -59,7 +59,7 @@ config SENSORS_ABITUGURU
chip can be found on Abit uGuru featuring motherboards (most modern
Abit motherboards from before end 2005). For more info and a list
of which motherboards have which revision see
- Documentation/hwmon/abituguru
+ Documentation/hwmon/abituguru.rst
This driver can also be built as a module. If so, the module
will be called abituguru.
@@ -73,7 +73,7 @@ config SENSORS_ABITUGURU3
and their settings is supported. The third revision of the Abit
uGuru chip can be found on recent Abit motherboards (since end
2005). For more info and a list of which motherboards have which
- revision see Documentation/hwmon/abituguru3
+ revision see Documentation/hwmon/abituguru3.rst
This driver can also be built as a module. If so, the module
will be called abituguru3.
@@ -643,7 +643,7 @@ config SENSORS_CORETEMP
help
If you say yes here you get support for the temperature
sensor inside your CPU. Most of the family 6 CPUs
- are supported. Check Documentation/hwmon/coretemp for details.
+ are supported. Check Documentation/hwmon/coretemp.rst for details.
config SENSORS_IT87
tristate "ITE IT87xx and compatibles"
@@ -705,6 +705,16 @@ config SENSORS_LINEAGE
This driver can also be built as a module. If so, the module
will be called lineage-pem.
+config SENSORS_LOCHNAGAR
+ tristate "Lochnagar Hardware Monitor"
+ depends on MFD_LOCHNAGAR
+ help
+ If you say yes here you get support for Lochnagar 2 temperature,
+ voltage and current sensors abilities.
+
+ This driver can also be built as a module. If so, the module
+ will be called lochnagar-hwmon.
+
config SENSORS_LTC2945
tristate "Linear Technology LTC2945"
depends on I2C
@@ -898,6 +908,7 @@ config SENSORS_MAX6642
config SENSORS_MAX6650
tristate "Maxim MAX6650 sensor chip"
depends on I2C
+ depends on THERMAL || THERMAL=n
help
If you say yes here you get support for the MAX6650 / MAX6651
sensor chips.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index f5c7b442e69e..8db472ea04f0 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -89,6 +89,7 @@ obj-$(CONFIG_SENSORS_JZ4740) += jz4740-hwmon.o
obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o
obj-$(CONFIG_SENSORS_K10TEMP) += k10temp.o
obj-$(CONFIG_SENSORS_LINEAGE) += lineage-pem.o
+obj-$(CONFIG_SENSORS_LOCHNAGAR) += lochnagar-hwmon.o
obj-$(CONFIG_SENSORS_LM63) += lm63.o
obj-$(CONFIG_SENSORS_LM70) += lm70.o
obj-$(CONFIG_SENSORS_LM73) += lm73.o
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index f13806d731fa..b176da8e92a7 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -215,7 +215,7 @@ static const struct i2c_device_id ad7414_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ad7414_id);
-static const struct of_device_id ad7414_of_match[] = {
+static const struct of_device_id __maybe_unused ad7414_of_match[] = {
{ .compatible = "ad,ad7414" },
{ },
};
diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
index ca794bf904de..e640be442dae 100644
--- a/drivers/hwmon/adc128d818.c
+++ b/drivers/hwmon/adc128d818.c
@@ -521,7 +521,7 @@ static const struct i2c_device_id adc128_id[] = {
};
MODULE_DEVICE_TABLE(i2c, adc128_id);
-static const struct of_device_id adc128_of_match[] = {
+static const struct of_device_id __maybe_unused adc128_of_match[] = {
{ .compatible = "ti,adc128d818" },
{ },
};
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index 1e4dad36f5ef..ec437cc9e739 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -174,7 +174,7 @@ static struct adm1025_data *adm1025_update_device(struct device *dev)
*/
static ssize_t
-show_in(struct device *dev, struct device_attribute *attr, char *buf)
+in_show(struct device *dev, struct device_attribute *attr, char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = adm1025_update_device(dev);
@@ -183,7 +183,7 @@ show_in(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-show_in_min(struct device *dev, struct device_attribute *attr, char *buf)
+in_min_show(struct device *dev, struct device_attribute *attr, char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = adm1025_update_device(dev);
@@ -192,7 +192,7 @@ show_in_min(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-show_in_max(struct device *dev, struct device_attribute *attr, char *buf)
+in_max_show(struct device *dev, struct device_attribute *attr, char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = adm1025_update_device(dev);
@@ -201,7 +201,7 @@ show_in_max(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-show_temp(struct device *dev, struct device_attribute *attr, char *buf)
+temp_show(struct device *dev, struct device_attribute *attr, char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = adm1025_update_device(dev);
@@ -209,7 +209,7 @@ show_temp(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-show_temp_min(struct device *dev, struct device_attribute *attr, char *buf)
+temp_min_show(struct device *dev, struct device_attribute *attr, char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = adm1025_update_device(dev);
@@ -217,15 +217,15 @@ show_temp_min(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-show_temp_max(struct device *dev, struct device_attribute *attr, char *buf)
+temp_max_show(struct device *dev, struct device_attribute *attr, char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = adm1025_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[index]));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = dev_get_drvdata(dev);
@@ -245,8 +245,8 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = dev_get_drvdata(dev);
@@ -266,22 +266,28 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define set_in(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IWUSR | S_IRUGO, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IWUSR | S_IRUGO, \
- show_in_max, set_in_max, offset)
-set_in(0);
-set_in(1);
-set_in(2);
-set_in(3);
-set_in(4);
-set_in(5);
-
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
+
+static ssize_t temp_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = dev_get_drvdata(dev);
@@ -301,8 +307,9 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = dev_get_drvdata(dev);
@@ -322,15 +329,12 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define set_temp(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IWUSR | S_IRUGO, \
- show_temp_min, set_temp_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IWUSR | S_IRUGO, \
- show_temp_max, set_temp_max, offset - 1)
-set_temp(1);
-set_temp(2);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
static ssize_t
alarms_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -341,21 +345,21 @@ alarms_show(struct device *dev, struct device_attribute *attr, char *buf)
static DEVICE_ATTR_RO(alarms);
static ssize_t
-show_alarm(struct device *dev, struct device_attribute *attr, char *buf)
+alarm_show(struct device *dev, struct device_attribute *attr, char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct adm1025_data *data = adm1025_update_device(dev);
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_alarm, NULL, 14);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 9);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp1_fault, alarm, 14);
static ssize_t
cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index e43f09a07cd0..d34a68a11036 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -477,24 +477,24 @@ static struct adm1026_data *adm1026_update_device(struct device *dev)
return data;
}
-static ssize_t show_in(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(nr, data->in[nr]));
}
-static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(nr, data->in_min[nr]));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -513,16 +513,16 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_max_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(nr, data->in_max[nr]));
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -542,48 +542,72 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define in_reg(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, show_in, \
- NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset);
-
-
-in_reg(0);
-in_reg(1);
-in_reg(2);
-in_reg(3);
-in_reg(4);
-in_reg(5);
-in_reg(6);
-in_reg(7);
-in_reg(8);
-in_reg(9);
-in_reg(10);
-in_reg(11);
-in_reg(12);
-in_reg(13);
-in_reg(14);
-in_reg(15);
-
-static ssize_t show_in16(struct device *dev, struct device_attribute *attr,
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
+static SENSOR_DEVICE_ATTR_RO(in6_input, in, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 6);
+static SENSOR_DEVICE_ATTR_RO(in7_input, in, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_min, in_min, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_max, in_max, 7);
+static SENSOR_DEVICE_ATTR_RO(in8_input, in, 8);
+static SENSOR_DEVICE_ATTR_RW(in8_min, in_min, 8);
+static SENSOR_DEVICE_ATTR_RW(in8_max, in_max, 8);
+static SENSOR_DEVICE_ATTR_RO(in9_input, in, 9);
+static SENSOR_DEVICE_ATTR_RW(in9_min, in_min, 9);
+static SENSOR_DEVICE_ATTR_RW(in9_max, in_max, 9);
+static SENSOR_DEVICE_ATTR_RO(in10_input, in, 10);
+static SENSOR_DEVICE_ATTR_RW(in10_min, in_min, 10);
+static SENSOR_DEVICE_ATTR_RW(in10_max, in_max, 10);
+static SENSOR_DEVICE_ATTR_RO(in11_input, in, 11);
+static SENSOR_DEVICE_ATTR_RW(in11_min, in_min, 11);
+static SENSOR_DEVICE_ATTR_RW(in11_max, in_max, 11);
+static SENSOR_DEVICE_ATTR_RO(in12_input, in, 12);
+static SENSOR_DEVICE_ATTR_RW(in12_min, in_min, 12);
+static SENSOR_DEVICE_ATTR_RW(in12_max, in_max, 12);
+static SENSOR_DEVICE_ATTR_RO(in13_input, in, 13);
+static SENSOR_DEVICE_ATTR_RW(in13_min, in_min, 13);
+static SENSOR_DEVICE_ATTR_RW(in13_max, in_max, 13);
+static SENSOR_DEVICE_ATTR_RO(in14_input, in, 14);
+static SENSOR_DEVICE_ATTR_RW(in14_min, in_min, 14);
+static SENSOR_DEVICE_ATTR_RW(in14_max, in_max, 14);
+static SENSOR_DEVICE_ATTR_RO(in15_input, in, 15);
+static SENSOR_DEVICE_ATTR_RW(in15_min, in_min, 15);
+static SENSOR_DEVICE_ATTR_RW(in15_max, in_max, 15);
+
+static ssize_t in16_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(16, data->in[16]) -
NEG12_OFFSET);
}
-static ssize_t show_in16_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in16_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(16, data->in_min[16])
- NEG12_OFFSET);
}
-static ssize_t set_in16_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in16_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -603,15 +627,16 @@ static ssize_t set_in16_min(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t show_in16_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in16_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(16, data->in_max[16])
- NEG12_OFFSET);
}
-static ssize_t set_in16_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in16_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct adm1026_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -632,17 +657,14 @@ static ssize_t set_in16_max(struct device *dev, struct device_attribute *attr,
return count;
}
-static SENSOR_DEVICE_ATTR(in16_input, S_IRUGO, show_in16, NULL, 16);
-static SENSOR_DEVICE_ATTR(in16_min, S_IRUGO | S_IWUSR, show_in16_min,
- set_in16_min, 16);
-static SENSOR_DEVICE_ATTR(in16_max, S_IRUGO | S_IWUSR, show_in16_max,
- set_in16_max, 16);
-
+static SENSOR_DEVICE_ATTR_RO(in16_input, in16, 16);
+static SENSOR_DEVICE_ATTR_RW(in16_min, in16_min, 16);
+static SENSOR_DEVICE_ATTR_RW(in16_max, in16_max, 16);
/* Now add fan read/write functions */
-static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -650,8 +672,8 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr],
data->fan_div[nr]));
}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -659,8 +681,9 @@ static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr],
data->fan_div[nr]));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -681,20 +704,22 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
return count;
}
-#define fan_offset(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, show_fan, NULL, \
- offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1);
-
-fan_offset(1);
-fan_offset(2);
-fan_offset(3);
-fan_offset(4);
-fan_offset(5);
-fan_offset(6);
-fan_offset(7);
-fan_offset(8);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_input, fan, 2);
+static SENSOR_DEVICE_ATTR_RW(fan3_min, fan_min, 2);
+static SENSOR_DEVICE_ATTR_RO(fan4_input, fan, 3);
+static SENSOR_DEVICE_ATTR_RW(fan4_min, fan_min, 3);
+static SENSOR_DEVICE_ATTR_RO(fan5_input, fan, 4);
+static SENSOR_DEVICE_ATTR_RW(fan5_min, fan_min, 4);
+static SENSOR_DEVICE_ATTR_RO(fan6_input, fan, 5);
+static SENSOR_DEVICE_ATTR_RW(fan6_min, fan_min, 5);
+static SENSOR_DEVICE_ATTR_RO(fan7_input, fan, 6);
+static SENSOR_DEVICE_ATTR_RW(fan7_min, fan_min, 6);
+static SENSOR_DEVICE_ATTR_RO(fan8_input, fan, 7);
+static SENSOR_DEVICE_ATTR_RW(fan8_min, fan_min, 7);
/* Adjust fan_min to account for new fan divisor */
static void fixup_fan_min(struct device *dev, int fan, int old_div)
@@ -715,16 +740,17 @@ static void fixup_fan_min(struct device *dev, int fan, int old_div)
}
/* Now add fan_div read/write functions */
-static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", data->fan_div[nr]);
}
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -765,38 +791,35 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
return count;
}
-#define fan_offset_div(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- show_fan_div, set_fan_div, offset - 1);
-
-fan_offset_div(1);
-fan_offset_div(2);
-fan_offset_div(3);
-fan_offset_div(4);
-fan_offset_div(5);
-fan_offset_div(6);
-fan_offset_div(7);
-fan_offset_div(8);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
+static SENSOR_DEVICE_ATTR_RW(fan3_div, fan_div, 2);
+static SENSOR_DEVICE_ATTR_RW(fan4_div, fan_div, 3);
+static SENSOR_DEVICE_ATTR_RW(fan5_div, fan_div, 4);
+static SENSOR_DEVICE_ATTR_RW(fan6_div, fan_div, 5);
+static SENSOR_DEVICE_ATTR_RW(fan7_div, fan_div, 6);
+static SENSOR_DEVICE_ATTR_RW(fan8_div, fan_div, 7);
/* Temps */
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr]));
}
-static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[nr]));
}
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -816,16 +839,17 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[nr]));
}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -846,30 +870,27 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define temp_reg(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, show_temp, \
- NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \
- show_temp_min, set_temp_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_max, set_temp_max, offset - 1);
-
-
-temp_reg(1);
-temp_reg(2);
-temp_reg(3);
-
-static ssize_t show_temp_offset(struct device *dev,
- struct device_attribute *attr, char *buf)
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_min, temp_min, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
+
+static ssize_t temp_offset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_offset[nr]));
}
-static ssize_t set_temp_offset(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
+static ssize_t temp_offset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -890,16 +911,13 @@ static ssize_t set_temp_offset(struct device *dev,
return count;
}
-#define temp_offset_reg(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_offset, S_IRUGO | S_IWUSR, \
- show_temp_offset, set_temp_offset, offset - 1);
-
-temp_offset_reg(1);
-temp_offset_reg(2);
-temp_offset_reg(3);
+static SENSOR_DEVICE_ATTR_RW(temp1_offset, temp_offset, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_offset, temp_offset, 1);
+static SENSOR_DEVICE_ATTR_RW(temp3_offset, temp_offset, 2);
-static ssize_t show_temp_auto_point1_temp_hyst(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_point1_temp_hyst_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -907,8 +925,9 @@ static ssize_t show_temp_auto_point1_temp_hyst(struct device *dev,
return sprintf(buf, "%d\n", TEMP_FROM_REG(
ADM1026_FAN_ACTIVATION_TEMP_HYST + data->temp_tmin[nr]));
}
-static ssize_t show_temp_auto_point2_temp(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_point2_temp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -916,16 +935,18 @@ static ssize_t show_temp_auto_point2_temp(struct device *dev,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_tmin[nr] +
ADM1026_FAN_CONTROL_TEMP_RANGE));
}
-static ssize_t show_temp_auto_point1_temp(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_point1_temp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_tmin[nr]));
}
-static ssize_t set_temp_auto_point1_temp(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t temp_auto_point1_temp_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -946,18 +967,18 @@ static ssize_t set_temp_auto_point1_temp(struct device *dev,
return count;
}
-#define temp_auto_point(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_auto_point1_temp, \
- S_IRUGO | S_IWUSR, show_temp_auto_point1_temp, \
- set_temp_auto_point1_temp, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_auto_point1_temp_hyst, S_IRUGO,\
- show_temp_auto_point1_temp_hyst, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_auto_point2_temp, S_IRUGO, \
- show_temp_auto_point2_temp, NULL, offset - 1);
-
-temp_auto_point(1);
-temp_auto_point(2);
-temp_auto_point(3);
+static SENSOR_DEVICE_ATTR_RW(temp1_auto_point1_temp, temp_auto_point1_temp, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_auto_point1_temp_hyst,
+ temp_auto_point1_temp_hyst, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_auto_point2_temp, temp_auto_point2_temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_auto_point1_temp, temp_auto_point1_temp, 1);
+static SENSOR_DEVICE_ATTR_RO(temp2_auto_point1_temp_hyst,
+ temp_auto_point1_temp_hyst, 1);
+static SENSOR_DEVICE_ATTR_RO(temp2_auto_point2_temp, temp_auto_point2_temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp3_auto_point1_temp, temp_auto_point1_temp, 2);
+static SENSOR_DEVICE_ATTR_RO(temp3_auto_point1_temp_hyst,
+ temp_auto_point1_temp_hyst, 2);
+static SENSOR_DEVICE_ATTR_RO(temp3_auto_point2_temp, temp_auto_point2_temp, 2);
static ssize_t show_temp_crit_enable(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -988,24 +1009,24 @@ static ssize_t set_temp_crit_enable(struct device *dev,
return count;
}
-#define temp_crit_enable(offset) \
-static DEVICE_ATTR(temp##offset##_crit_enable, S_IRUGO | S_IWUSR, \
- show_temp_crit_enable, set_temp_crit_enable);
+static DEVICE_ATTR(temp1_crit_enable, 0644, show_temp_crit_enable,
+ set_temp_crit_enable);
+static DEVICE_ATTR(temp2_crit_enable, 0644, show_temp_crit_enable,
+ set_temp_crit_enable);
+static DEVICE_ATTR(temp3_crit_enable, 0644, show_temp_crit_enable,
+ set_temp_crit_enable);
-temp_crit_enable(1);
-temp_crit_enable(2);
-temp_crit_enable(3);
-
-static ssize_t show_temp_crit(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_crit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_crit[nr]));
}
-static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_crit_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -1026,13 +1047,9 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
return count;
}
-#define temp_crit_reg(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_crit, S_IRUGO | S_IWUSR, \
- show_temp_crit, set_temp_crit, offset - 1);
-
-temp_crit_reg(1);
-temp_crit_reg(2);
-temp_crit_reg(3);
+static SENSOR_DEVICE_ATTR_RW(temp1_crit, temp_crit, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_crit, temp_crit, 1);
+static SENSOR_DEVICE_ATTR_RW(temp3_crit, temp_crit, 2);
static ssize_t analog_out_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -1110,7 +1127,7 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adm1026_data *data = adm1026_update_device(dev);
@@ -1118,34 +1135,34 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%ld\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in11_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in12_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in13_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(in14_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(in15_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(in16_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 10);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 12);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 13);
-static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 14);
-static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 15);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 16);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 17);
-static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 18);
-static SENSOR_DEVICE_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 19);
-static SENSOR_DEVICE_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 20);
-static SENSOR_DEVICE_ATTR(fan6_alarm, S_IRUGO, show_alarm, NULL, 21);
-static SENSOR_DEVICE_ATTR(fan7_alarm, S_IRUGO, show_alarm, NULL, 22);
-static SENSOR_DEVICE_ATTR(fan8_alarm, S_IRUGO, show_alarm, NULL, 23);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 24);
-static SENSOR_DEVICE_ATTR(in10_alarm, S_IRUGO, show_alarm, NULL, 25);
-static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 26);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in9_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in11_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in12_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in13_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(in14_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(in15_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(in16_alarm, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 9);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 10);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 12);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 13);
+static SENSOR_DEVICE_ATTR_RO(in6_alarm, alarm, 14);
+static SENSOR_DEVICE_ATTR_RO(in7_alarm, alarm, 15);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 16);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 17);
+static SENSOR_DEVICE_ATTR_RO(fan3_alarm, alarm, 18);
+static SENSOR_DEVICE_ATTR_RO(fan4_alarm, alarm, 19);
+static SENSOR_DEVICE_ATTR_RO(fan5_alarm, alarm, 20);
+static SENSOR_DEVICE_ATTR_RO(fan6_alarm, alarm, 21);
+static SENSOR_DEVICE_ATTR_RO(fan7_alarm, alarm, 22);
+static SENSOR_DEVICE_ATTR_RO(fan8_alarm, alarm, 23);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 24);
+static SENSOR_DEVICE_ATTR_RO(in10_alarm, alarm, 25);
+static SENSOR_DEVICE_ATTR_RO(in8_alarm, alarm, 26);
static ssize_t alarm_mask_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -1188,7 +1205,6 @@ static ssize_t alarm_mask_store(struct device *dev,
static DEVICE_ATTR_RW(alarm_mask);
-
static ssize_t gpio_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1371,23 +1387,23 @@ static ssize_t pwm1_enable_store(struct device *dev,
/* enable PWM fan control */
static DEVICE_ATTR_RW(pwm1);
-static DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, pwm1_show, pwm1_store);
-static DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, pwm1_show, pwm1_store);
+static DEVICE_ATTR(pwm2, 0644, pwm1_show, pwm1_store);
+static DEVICE_ATTR(pwm3, 0644, pwm1_show, pwm1_store);
static DEVICE_ATTR_RW(pwm1_enable);
-static DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR, pwm1_enable_show,
+static DEVICE_ATTR(pwm2_enable, 0644, pwm1_enable_show,
pwm1_enable_store);
-static DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR, pwm1_enable_show,
+static DEVICE_ATTR(pwm3_enable, 0644, pwm1_enable_show,
pwm1_enable_store);
static DEVICE_ATTR_RW(temp1_auto_point1_pwm);
-static DEVICE_ATTR(temp2_auto_point1_pwm, S_IRUGO | S_IWUSR,
- temp1_auto_point1_pwm_show, temp1_auto_point1_pwm_store);
-static DEVICE_ATTR(temp3_auto_point1_pwm, S_IRUGO | S_IWUSR,
- temp1_auto_point1_pwm_show, temp1_auto_point1_pwm_store);
+static DEVICE_ATTR(temp2_auto_point1_pwm, 0644,
+ temp1_auto_point1_pwm_show, temp1_auto_point1_pwm_store);
+static DEVICE_ATTR(temp3_auto_point1_pwm, 0644,
+ temp1_auto_point1_pwm_show, temp1_auto_point1_pwm_store);
static DEVICE_ATTR_RO(temp1_auto_point2_pwm);
-static DEVICE_ATTR(temp2_auto_point2_pwm, S_IRUGO, temp1_auto_point2_pwm_show,
+static DEVICE_ATTR(temp2_auto_point2_pwm, 0444, temp1_auto_point2_pwm_show,
NULL);
-static DEVICE_ATTR(temp3_auto_point2_pwm, S_IRUGO, temp1_auto_point2_pwm_show,
+static DEVICE_ATTR(temp3_auto_point2_pwm, 0444, temp1_auto_point2_pwm_show,
NULL);
static struct attribute *adm1026_attributes[] = {
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index e561279aea21..388060ff85e7 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -166,7 +166,7 @@ static struct adm1029_data *adm1029_update_device(struct device *dev)
*/
static ssize_t
-show_temp(struct device *dev, struct device_attribute *devattr, char *buf)
+temp_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm1029_data *data = adm1029_update_device(dev);
@@ -175,7 +175,7 @@ show_temp(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-show_fan(struct device *dev, struct device_attribute *devattr, char *buf)
+fan_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm1029_data *data = adm1029_update_device(dev);
@@ -193,7 +193,7 @@ show_fan(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-show_fan_div(struct device *dev, struct device_attribute *devattr, char *buf)
+fan_div_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm1029_data *data = adm1029_update_device(dev);
@@ -203,8 +203,9 @@ show_fan_div(struct device *dev, struct device_attribute *devattr, char *buf)
return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[attr->index]));
}
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct adm1029_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -254,26 +255,26 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *devattr,
}
/* Access rights on sysfs. */
-static SENSOR_DEVICE_ATTR(temp1_input, 0444, show_temp, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, 0444, show_temp, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_input, 0444, show_temp, NULL, 2);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
-static SENSOR_DEVICE_ATTR(temp1_max, 0444, show_temp, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp2_max, 0444, show_temp, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp3_max, 0444, show_temp, NULL, 5);
+static SENSOR_DEVICE_ATTR_RO(temp1_max, temp, 3);
+static SENSOR_DEVICE_ATTR_RO(temp2_max, temp, 4);
+static SENSOR_DEVICE_ATTR_RO(temp3_max, temp, 5);
-static SENSOR_DEVICE_ATTR(temp1_min, 0444, show_temp, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp2_min, 0444, show_temp, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp3_min, 0444, show_temp, NULL, 8);
+static SENSOR_DEVICE_ATTR_RO(temp1_min, temp, 6);
+static SENSOR_DEVICE_ATTR_RO(temp2_min, temp, 7);
+static SENSOR_DEVICE_ATTR_RO(temp3_min, temp, 8);
-static SENSOR_DEVICE_ATTR(fan1_input, 0444, show_fan, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan2_input, 0444, show_fan, NULL, 1);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
-static SENSOR_DEVICE_ATTR(fan1_min, 0444, show_fan, NULL, 2);
-static SENSOR_DEVICE_ATTR(fan2_min, 0444, show_fan, NULL, 3);
+static SENSOR_DEVICE_ATTR_RO(fan1_min, fan, 2);
+static SENSOR_DEVICE_ATTR_RO(fan2_min, fan, 3);
-static SENSOR_DEVICE_ATTR(fan1_div, 0644, show_fan_div, set_fan_div, 0);
-static SENSOR_DEVICE_ATTR(fan2_div, 0644, show_fan_div, set_fan_div, 1);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
static struct attribute *adm1029_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index bcf508269fd6..6b46d8eaa775 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -331,7 +331,7 @@ get_fan_auto_nearest(struct adm1031_data *data, int chan, u8 val, u8 reg)
return -EINVAL;
}
-static ssize_t show_fan_auto_channel(struct device *dev,
+static ssize_t fan_auto_channel_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
@@ -340,8 +340,8 @@ static ssize_t show_fan_auto_channel(struct device *dev,
}
static ssize_t
-set_fan_auto_channel(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+fan_auto_channel_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -392,13 +392,11 @@ set_fan_auto_channel(struct device *dev, struct device_attribute *attr,
return count;
}
-static SENSOR_DEVICE_ATTR(auto_fan1_channel, S_IRUGO | S_IWUSR,
- show_fan_auto_channel, set_fan_auto_channel, 0);
-static SENSOR_DEVICE_ATTR(auto_fan2_channel, S_IRUGO | S_IWUSR,
- show_fan_auto_channel, set_fan_auto_channel, 1);
+static SENSOR_DEVICE_ATTR_RW(auto_fan1_channel, fan_auto_channel, 0);
+static SENSOR_DEVICE_ATTR_RW(auto_fan2_channel, fan_auto_channel, 1);
/* Auto Temps */
-static ssize_t show_auto_temp_off(struct device *dev,
+static ssize_t auto_temp_off_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
@@ -406,7 +404,7 @@ static ssize_t show_auto_temp_off(struct device *dev,
return sprintf(buf, "%d\n",
AUTO_TEMP_OFF_FROM_REG(data->auto_temp[nr]));
}
-static ssize_t show_auto_temp_min(struct device *dev,
+static ssize_t auto_temp_min_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
@@ -415,8 +413,8 @@ static ssize_t show_auto_temp_min(struct device *dev,
AUTO_TEMP_MIN_FROM_REG(data->auto_temp[nr]));
}
static ssize_t
-set_auto_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+auto_temp_min_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -436,7 +434,7 @@ set_auto_temp_min(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t show_auto_temp_max(struct device *dev,
+static ssize_t auto_temp_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
@@ -445,8 +443,8 @@ static ssize_t show_auto_temp_max(struct device *dev,
AUTO_TEMP_MAX_FROM_REG(data->auto_temp[nr]));
}
static ssize_t
-set_auto_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+auto_temp_max_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -468,28 +466,26 @@ set_auto_temp_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define auto_temp_reg(offset) \
-static SENSOR_DEVICE_ATTR(auto_temp##offset##_off, S_IRUGO, \
- show_auto_temp_off, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(auto_temp##offset##_min, S_IRUGO | S_IWUSR, \
- show_auto_temp_min, set_auto_temp_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(auto_temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_auto_temp_max, set_auto_temp_max, offset - 1)
-
-auto_temp_reg(1);
-auto_temp_reg(2);
-auto_temp_reg(3);
+static SENSOR_DEVICE_ATTR_RO(auto_temp1_off, auto_temp_off, 0);
+static SENSOR_DEVICE_ATTR_RW(auto_temp1_min, auto_temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(auto_temp1_max, auto_temp_max, 0);
+static SENSOR_DEVICE_ATTR_RO(auto_temp2_off, auto_temp_off, 1);
+static SENSOR_DEVICE_ATTR_RW(auto_temp2_min, auto_temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(auto_temp2_max, auto_temp_max, 1);
+static SENSOR_DEVICE_ATTR_RO(auto_temp3_off, auto_temp_off, 2);
+static SENSOR_DEVICE_ATTR_RW(auto_temp3_min, auto_temp_min, 2);
+static SENSOR_DEVICE_ATTR_RW(auto_temp3_max, auto_temp_max, 2);
/* pwm */
-static ssize_t show_pwm(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pwm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
return sprintf(buf, "%d\n", PWM_FROM_REG(data->pwm[nr]));
}
-static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t pwm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -517,12 +513,10 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
return count;
}
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 0);
-static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 1);
-static SENSOR_DEVICE_ATTR(auto_fan1_min_pwm, S_IRUGO | S_IWUSR,
- show_pwm, set_pwm, 0);
-static SENSOR_DEVICE_ATTR(auto_fan2_min_pwm, S_IRUGO | S_IWUSR,
- show_pwm, set_pwm, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2, pwm, 1);
+static SENSOR_DEVICE_ATTR_RW(auto_fan1_min_pwm, pwm, 0);
+static SENSOR_DEVICE_ATTR_RW(auto_fan2_min_pwm, pwm, 1);
/* Fans */
@@ -572,9 +566,8 @@ static int trust_fan_readings(struct adm1031_data *data, int chan)
return res;
}
-
-static ssize_t show_fan(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
@@ -585,15 +578,15 @@ static ssize_t show_fan(struct device *dev,
return sprintf(buf, "%d\n", value);
}
-static ssize_t show_fan_div(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
return sprintf(buf, "%d\n", FAN_DIV_FROM_REG(data->fan_div[nr]));
}
-static ssize_t show_fan_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
@@ -601,8 +594,9 @@ static ssize_t show_fan_min(struct device *dev,
FAN_FROM_REG(data->fan_min[nr],
FAN_DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -625,8 +619,9 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -673,21 +668,16 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
return count;
}
-#define fan_offset(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- show_fan_div, set_fan_div, offset - 1)
-
-fan_offset(1);
-fan_offset(2);
-
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
/* Temps */
-static ssize_t show_temp(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
@@ -697,7 +687,7 @@ static ssize_t show_temp(struct device *dev,
(((data->ext_temp[nr] >> ((nr - 1) * 3)) & 7));
return sprintf(buf, "%d\n", TEMP_FROM_REG_EXT(data->temp[nr], ext));
}
-static ssize_t show_temp_offset(struct device *dev,
+static ssize_t temp_offset_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
@@ -705,30 +695,30 @@ static ssize_t show_temp_offset(struct device *dev,
return sprintf(buf, "%d\n",
TEMP_OFFSET_FROM_REG(data->temp_offset[nr]));
}
-static ssize_t show_temp_min(struct device *dev,
+static ssize_t temp_min_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[nr]));
}
-static ssize_t show_temp_max(struct device *dev,
+static ssize_t temp_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[nr]));
}
-static ssize_t show_temp_crit(struct device *dev,
+static ssize_t temp_crit_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_crit[nr]));
}
-static ssize_t set_temp_offset(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
+static ssize_t temp_offset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -748,8 +738,9 @@ static ssize_t set_temp_offset(struct device *dev,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -769,8 +760,9 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -790,8 +782,9 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_crit_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct adm1031_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -812,21 +805,21 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
return count;
}
-#define temp_reg(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_offset, S_IRUGO | S_IWUSR, \
- show_temp_offset, set_temp_offset, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \
- show_temp_min, set_temp_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_max, set_temp_max, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_crit, S_IRUGO | S_IWUSR, \
- show_temp_crit, set_temp_crit, offset - 1)
-
-temp_reg(1);
-temp_reg(2);
-temp_reg(3);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_offset, temp_offset, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_crit, temp_crit, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_offset, temp_offset, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_crit, temp_crit, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_offset, temp_offset, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_min, temp_min, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_crit, temp_crit, 2);
/* Alarms */
static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
@@ -838,29 +831,29 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct adm1031_data *data = adm1031_update_device(dev);
return sprintf(buf, "%d\n", (data->alarm >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(fan2_fault, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_alarm, NULL, 10);
-static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 12);
-static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 13);
-static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 14);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(fan1_fault, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(temp2_max_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(temp2_min_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(temp2_crit_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(temp1_min_alarm, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(fan2_fault, alarm, 9);
+static SENSOR_DEVICE_ATTR_RO(temp3_max_alarm, alarm, 10);
+static SENSOR_DEVICE_ATTR_RO(temp3_min_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(temp3_crit_alarm, alarm, 12);
+static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 13);
+static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 14);
/* Update Interval */
static const unsigned int update_intervals[] = {
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 255413fdbde9..000b20f1db71 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -269,16 +269,16 @@ static ssize_t temp1_input_show(struct device *dev,
return sprintf(buf, "%d\n", data->temp / 128 * 500); /* 9-bit value */
}
-static ssize_t show_max(struct device *dev, struct device_attribute *devattr,
- char *buf)
+static ssize_t max_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
return sprintf(buf, "%d\n", data->temp_max[attr->index] * 1000);
}
-static ssize_t set_max(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t max_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = dev_get_drvdata(dev);
@@ -299,14 +299,12 @@ static ssize_t set_max(struct device *dev, struct device_attribute *devattr,
}
static DEVICE_ATTR_RO(temp1_input);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
- show_max, set_max, 0);
-static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO,
- show_max, set_max, 1);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, max, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max_hyst, max, 1);
/* voltage */
-static ssize_t show_in(struct device *dev, struct device_attribute *devattr,
- char *buf)
+static ssize_t in_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
@@ -314,8 +312,8 @@ static ssize_t show_in(struct device *dev, struct device_attribute *devattr,
attr->index));
}
-static ssize_t show_in_min(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t in_min_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
@@ -323,8 +321,8 @@ static ssize_t show_in_min(struct device *dev,
attr->index));
}
-static ssize_t show_in_max(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t in_max_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
@@ -332,9 +330,9 @@ static ssize_t show_in_max(struct device *dev,
attr->index));
}
-static ssize_t set_in_min(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev,
+ struct device_attribute *devattr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = dev_get_drvdata(dev);
@@ -354,9 +352,9 @@ static ssize_t set_in_min(struct device *dev,
return count;
}
-static ssize_t set_in_max(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev,
+ struct device_attribute *devattr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = dev_get_drvdata(dev);
@@ -376,24 +374,28 @@ static ssize_t set_in_max(struct device *dev,
return count;
}
-#define vin(nr) \
-static SENSOR_DEVICE_ATTR(in##nr##_input, S_IRUGO, \
- show_in, NULL, nr); \
-static SENSOR_DEVICE_ATTR(in##nr##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, nr); \
-static SENSOR_DEVICE_ATTR(in##nr##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, nr);
-
-vin(0);
-vin(1);
-vin(2);
-vin(3);
-vin(4);
-vin(5);
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
/* fans */
-static ssize_t show_fan(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t fan_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
@@ -401,8 +403,8 @@ static ssize_t show_fan(struct device *dev,
1 << data->fan_div[attr->index]));
}
-static ssize_t show_fan_min(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t fan_min_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
@@ -410,8 +412,8 @@ static ssize_t show_fan_min(struct device *dev,
1 << data->fan_div[attr->index]));
}
-static ssize_t show_fan_div(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static ssize_t fan_div_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = adm9240_update_device(dev);
@@ -429,9 +431,9 @@ static ssize_t show_fan_div(struct device *dev,
* - otherwise: select fan clock divider to suit fan speed low limit,
* measurement code may adjust registers to ensure fan speed reading
*/
-static ssize_t set_fan_min(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adm9240_data *data = dev_get_drvdata(dev);
@@ -489,16 +491,12 @@ static ssize_t set_fan_min(struct device *dev,
return count;
}
-#define fan(nr) \
-static SENSOR_DEVICE_ATTR(fan##nr##_input, S_IRUGO, \
- show_fan, NULL, nr - 1); \
-static SENSOR_DEVICE_ATTR(fan##nr##_div, S_IRUGO, \
- show_fan_div, NULL, nr - 1); \
-static SENSOR_DEVICE_ATTR(fan##nr##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, nr - 1);
-
-fan(1);
-fan(2);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RO(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RO(fan2_div, fan_div, 1);
/* alarms */
static ssize_t alarms_show(struct device *dev,
@@ -509,22 +507,22 @@ static ssize_t alarms_show(struct device *dev,
}
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct adm9240_data *data = adm9240_update_device(dev);
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 9);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 7);
/* vid */
static ssize_t cpu0_vid_show(struct device *dev,
@@ -564,9 +562,8 @@ static ssize_t aout_output_store(struct device *dev,
}
static DEVICE_ATTR_RW(aout_output);
-static ssize_t chassis_clear(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t alarm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct adm9240_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -583,8 +580,7 @@ static ssize_t chassis_clear(struct device *dev,
return count;
}
-static SENSOR_DEVICE_ATTR(intrusion0_alarm, S_IRUGO | S_IWUSR, show_alarm,
- chassis_clear, 12);
+static SENSOR_DEVICE_ATTR_RW(intrusion0_alarm, alarm, 12);
static struct attribute *adm9240_attrs[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
@@ -632,7 +628,6 @@ static struct attribute *adm9240_attrs[] = {
ATTRIBUTE_GROUPS(adm9240);
-
/*** sensor chip detect and driver install ***/
/* Return 0 if detection is successful, -ENODEV otherwise */
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
index c21b0529adb2..412ab7015d75 100644
--- a/drivers/hwmon/ads1015.c
+++ b/drivers/hwmon/ads1015.c
@@ -307,7 +307,7 @@ static const struct i2c_device_id ads1015_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ads1015_id);
-static const struct of_device_id ads1015_of_match[] = {
+static const struct of_device_id __maybe_unused ads1015_of_match[] = {
{
.compatible = "ti,ads1015",
.data = (void *)ads1015
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index 12c56d3783ed..03d6e782777a 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -8,7 +8,7 @@
*
* ADS7830 support, by Guillaume Roguez <guillaume.roguez@savoirfairelinux.com>
*
- * For further information, see the Documentation/hwmon/ads7828 file.
+ * For further information, see the Documentation/hwmon/ads7828.rst file.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -200,7 +200,7 @@ static const struct i2c_device_id ads7828_device_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, ads7828_device_ids);
-static const struct of_device_id ads7828_of_match[] = {
+static const struct of_device_id __maybe_unused ads7828_of_match[] = {
{
.compatible = "ti,ads7828",
.data = (void *)ads7828
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index b939f8a115ba..44a827b031cb 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -639,40 +639,22 @@ static int adt7411_init_device(struct adt7411_data *data)
return i2c_smbus_write_byte_data(data->client, ADT7411_REG_CFG1, val);
}
-static const u32 adt7411_in_config[] = {
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
- 0
-};
-
-static const struct hwmon_channel_info adt7411_in = {
- .type = hwmon_in,
- .config = adt7411_in_config,
-};
-
-static const u32 adt7411_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MIN_ALARM |
- HWMON_T_MAX | HWMON_T_MAX_ALARM,
- HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MIN_ALARM |
- HWMON_T_MAX | HWMON_T_MAX_ALARM | HWMON_T_FAULT,
- 0
-};
-
-static const struct hwmon_channel_info adt7411_temp = {
- .type = hwmon_temp,
- .config = adt7411_temp_config,
-};
-
static const struct hwmon_channel_info *adt7411_info[] = {
- &adt7411_in,
- &adt7411_temp,
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX | HWMON_T_MAX_ALARM,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX | HWMON_T_MAX_ALARM | HWMON_T_FAULT),
NULL
};
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 0dbb8df74e44..7caec127df86 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -168,7 +168,7 @@ static const struct i2c_device_id adt7475_id[] = {
};
MODULE_DEVICE_TABLE(i2c, adt7475_id);
-static const struct of_device_id adt7475_of_match[] = {
+static const struct of_device_id __maybe_unused adt7475_of_match[] = {
{
.compatible = "adi,adt7473",
.data = (void *)adt7473
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index c4dd6301e7c8..0daf0b32aa4a 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -830,10 +830,8 @@ static int aspeed_create_pwm_cooling(struct device *dev,
}
snprintf(cdev->name, MAX_CDEV_NAME_LEN, "%pOFn%d", child, pwm_port);
- cdev->tcdev = thermal_of_cooling_device_register(child,
- cdev->name,
- cdev,
- &aspeed_pwm_cool_ops);
+ cdev->tcdev = devm_thermal_of_cooling_device_register(dev, child,
+ cdev->name, cdev, &aspeed_pwm_cool_ops);
if (IS_ERR(cdev->tcdev))
return PTR_ERR(cdev->tcdev);
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index 73c681162653..623736d2a7c1 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -96,17 +96,23 @@ superio_select(int base, int ld)
outb(ld, base + 1);
}
-static inline void
+static inline int
superio_enter(int base)
{
+ if (!request_muxed_region(base, 2, DRVNAME))
+ return -EBUSY;
+
outb(0x87, base);
outb(0x87, base);
+
+ return 0;
}
static inline void
superio_exit(int base)
{
outb(0xaa, base);
+ release_region(base, 2);
}
/*
@@ -1561,7 +1567,7 @@ exit:
static int __init f71805f_find(int sioaddr, unsigned short *address,
struct f71805f_sio_data *sio_data)
{
- int err = -ENODEV;
+ int err;
u16 devid;
static const char * const names[] = {
@@ -1569,8 +1575,11 @@ static int __init f71805f_find(int sioaddr, unsigned short *address,
"F71872F/FG or F71806F/FG",
};
- superio_enter(sioaddr);
+ err = superio_enter(sioaddr);
+ if (err)
+ return err;
+ err = -ENODEV;
devid = superio_inw(sioaddr, SIO_REG_MANID);
if (devid != SIO_FINTEK_ID)
goto exit;
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index 042a166e1858..8fb54079fac8 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -837,7 +837,7 @@ static int watchdog_open(struct inode *inode, struct file *filp)
watchdog_trigger(data);
filp->private_data = data;
- return nonseekable_open(inode, filp);
+ return stream_open(inode, filp);
}
static int watchdog_release(struct inode *inode, struct file *filp)
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index f1bf67aca9e8..3f6e5b4e3997 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -498,6 +498,11 @@ static const struct of_device_id of_gpio_fan_match[] = {
};
MODULE_DEVICE_TABLE(of, of_gpio_fan_match);
+static void gpio_fan_stop(void *data)
+{
+ set_fan_speed(data, 0);
+}
+
static int gpio_fan_probe(struct platform_device *pdev)
{
int err;
@@ -532,6 +537,7 @@ static int gpio_fan_probe(struct platform_device *pdev)
err = fan_ctrl_init(fan_data);
if (err)
return err;
+ devm_add_action_or_reset(dev, gpio_fan_stop, fan_data);
}
/* Make this driver part of hwmon class. */
@@ -543,32 +549,20 @@ static int gpio_fan_probe(struct platform_device *pdev)
return PTR_ERR(fan_data->hwmon_dev);
/* Optional cooling device register for Device tree platforms */
- fan_data->cdev = thermal_of_cooling_device_register(np,
- "gpio-fan",
- fan_data,
- &gpio_fan_cool_ops);
+ fan_data->cdev = devm_thermal_of_cooling_device_register(dev, np,
+ "gpio-fan", fan_data, &gpio_fan_cool_ops);
dev_info(dev, "GPIO fan initialized\n");
return 0;
}
-static int gpio_fan_remove(struct platform_device *pdev)
+static void gpio_fan_shutdown(struct platform_device *pdev)
{
struct gpio_fan_data *fan_data = platform_get_drvdata(pdev);
- if (!IS_ERR(fan_data->cdev))
- thermal_cooling_device_unregister(fan_data->cdev);
-
if (fan_data->gpios)
set_fan_speed(fan_data, 0);
-
- return 0;
-}
-
-static void gpio_fan_shutdown(struct platform_device *pdev)
-{
- gpio_fan_remove(pdev);
}
#ifdef CONFIG_PM_SLEEP
@@ -602,7 +596,6 @@ static SIMPLE_DEV_PM_OPS(gpio_fan_pm, gpio_fan_suspend, gpio_fan_resume);
static struct platform_driver gpio_fan_driver = {
.probe = gpio_fan_probe,
- .remove = gpio_fan_remove,
.shutdown = gpio_fan_shutdown,
.driver = {
.name = "gpio-fan",
diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c
index d167fcfec765..2bf9599b34a1 100644
--- a/drivers/hwmon/hih6130.c
+++ b/drivers/hwmon/hih6130.c
@@ -252,7 +252,7 @@ static const struct i2c_device_id hih6130_id[] = {
};
MODULE_DEVICE_TABLE(i2c, hih6130_id);
-static const struct of_device_id hih6130_of_match[] = {
+static const struct of_device_id __maybe_unused hih6130_of_match[] = {
{ .compatible = "honeywell,hih6130", },
{ }
};
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index c22dc1e07911..e694c46ff039 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -118,9 +118,7 @@ static DEFINE_IDA(hwmon_ida);
* The complex conditional is necessary to avoid a cyclic dependency
* between hwmon and thermal_sys modules.
*/
-#if IS_REACHABLE(CONFIG_THERMAL) && defined(CONFIG_THERMAL_OF) && \
- (!defined(CONFIG_THERMAL_HWMON) || \
- !(defined(MODULE) && IS_MODULE(CONFIG_THERMAL)))
+#ifdef CONFIG_THERMAL_OF
static int hwmon_thermal_get_temp(void *data, int *temp)
{
struct hwmon_thermal_data *tdata = data;
@@ -324,6 +322,11 @@ static const char * const hwmon_chip_attrs[] = {
[hwmon_chip_power_reset_history] = "power_reset_history",
[hwmon_chip_update_interval] = "update_interval",
[hwmon_chip_alarms] = "alarms",
+ [hwmon_chip_samples] = "samples",
+ [hwmon_chip_curr_samples] = "curr_samples",
+ [hwmon_chip_in_samples] = "in_samples",
+ [hwmon_chip_power_samples] = "power_samples",
+ [hwmon_chip_temp_samples] = "temp_samples",
};
static const char * const hwmon_temp_attr_templates[] = {
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index 5c3c08449de7..1770423f7a80 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -92,6 +92,9 @@ static int iio_hwmon_probe(struct platform_device *pdev)
return -ENOMEM;
for (i = 0; i < st->num_channels; i++) {
+ const char *prefix;
+ int n;
+
a = devm_kzalloc(dev, sizeof(*a), GFP_KERNEL);
if (a == NULL)
return -ENOMEM;
@@ -103,28 +106,28 @@ static int iio_hwmon_probe(struct platform_device *pdev)
switch (type) {
case IIO_VOLTAGE:
- a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
- "in%d_input",
- in_i++);
+ n = in_i++;
+ prefix = "in";
break;
case IIO_TEMP:
- a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
- "temp%d_input",
- temp_i++);
+ n = temp_i++;
+ prefix = "temp";
break;
case IIO_CURRENT:
- a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
- "curr%d_input",
- curr_i++);
+ n = curr_i++;
+ prefix = "curr";
break;
case IIO_HUMIDITYRELATIVE:
- a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
- "humidity%d_input",
- humidity_i++);
+ n = humidity_i++;
+ prefix = "humidity";
break;
default:
return -EINVAL;
}
+
+ a->dev_attr.attr.name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s%d_input",
+ prefix, n);
if (a->dev_attr.attr.name == NULL)
return -ENOMEM;
diff --git a/drivers/hwmon/ina209.c b/drivers/hwmon/ina209.c
index e3854463db84..6a4ec2f2ddb2 100644
--- a/drivers/hwmon/ina209.c
+++ b/drivers/hwmon/ina209.c
@@ -587,7 +587,7 @@ static const struct i2c_device_id ina209_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ina209_id);
-static const struct of_device_id ina209_of_match[] = {
+static const struct of_device_id __maybe_unused ina209_of_match[] = {
{ .compatible = "ti,ina209" },
{ },
};
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 290379c49be9..42df51f8cdf2 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -507,7 +507,7 @@ static const struct i2c_device_id ina2xx_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ina2xx_id);
-static const struct of_device_id ina2xx_of_match[] = {
+static const struct of_device_id __maybe_unused ina2xx_of_match[] = {
{
.compatible = "ti,ina219",
.data = (void *)ina219
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index 3626b87a5fd2..e0637fed9585 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -22,6 +22,7 @@
#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <linux/util_macros.h>
#define INA3221_DRIVER_NAME "ina3221"
@@ -51,6 +52,9 @@
#define INA3221_CONFIG_VBUS_CT_SHIFT 6
#define INA3221_CONFIG_VBUS_CT_MASK GENMASK(8, 6)
#define INA3221_CONFIG_VBUS_CT(x) (((x) & GENMASK(8, 6)) >> 6)
+#define INA3221_CONFIG_AVG_SHIFT 9
+#define INA3221_CONFIG_AVG_MASK GENMASK(11, 9)
+#define INA3221_CONFIG_AVG(x) (((x) & GENMASK(11, 9)) >> 9)
#define INA3221_CONFIG_CHs_EN_MASK GENMASK(14, 12)
#define INA3221_CONFIG_CHx_EN(x) BIT(14 - (x))
@@ -135,17 +139,42 @@ static const u16 ina3221_conv_time[] = {
140, 204, 332, 588, 1100, 2116, 4156, 8244,
};
-static inline int ina3221_wait_for_data(struct ina3221_data *ina)
+/* Lookup table for number of samples using in averaging mode */
+static const int ina3221_avg_samples[] = {
+ 1, 4, 16, 64, 128, 256, 512, 1024,
+};
+
+/* Converting update_interval in msec to conversion time in usec */
+static inline u32 ina3221_interval_ms_to_conv_time(u16 config, int interval)
+{
+ u32 channels = hweight16(config & INA3221_CONFIG_CHs_EN_MASK);
+ u32 samples_idx = INA3221_CONFIG_AVG(config);
+ u32 samples = ina3221_avg_samples[samples_idx];
+
+ /* Bisect the result to Bus and Shunt conversion times */
+ return DIV_ROUND_CLOSEST(interval * 1000 / 2, channels * samples);
+}
+
+/* Converting CONFIG register value to update_interval in usec */
+static inline u32 ina3221_reg_to_interval_us(u16 config)
{
- u32 channels = hweight16(ina->reg_config & INA3221_CONFIG_CHs_EN_MASK);
- u32 vbus_ct_idx = INA3221_CONFIG_VBUS_CT(ina->reg_config);
- u32 vsh_ct_idx = INA3221_CONFIG_VSH_CT(ina->reg_config);
+ u32 channels = hweight16(config & INA3221_CONFIG_CHs_EN_MASK);
+ u32 vbus_ct_idx = INA3221_CONFIG_VBUS_CT(config);
+ u32 vsh_ct_idx = INA3221_CONFIG_VSH_CT(config);
+ u32 samples_idx = INA3221_CONFIG_AVG(config);
+ u32 samples = ina3221_avg_samples[samples_idx];
u32 vbus_ct = ina3221_conv_time[vbus_ct_idx];
u32 vsh_ct = ina3221_conv_time[vsh_ct_idx];
- u32 wait, cvrf;
/* Calculate total conversion time */
- wait = channels * (vbus_ct + vsh_ct);
+ return channels * (vbus_ct + vsh_ct) * samples;
+}
+
+static inline int ina3221_wait_for_data(struct ina3221_data *ina)
+{
+ u32 wait, cvrf;
+
+ wait = ina3221_reg_to_interval_us(ina->reg_config);
/* Polling the CVRF bit to make sure read data is ready */
return regmap_field_read_poll_timeout(ina->fields[F_CVRF],
@@ -176,6 +205,26 @@ static const u8 ina3221_in_reg[] = {
INA3221_SHUNT3,
};
+static int ina3221_read_chip(struct device *dev, u32 attr, long *val)
+{
+ struct ina3221_data *ina = dev_get_drvdata(dev);
+ int regval;
+
+ switch (attr) {
+ case hwmon_chip_samples:
+ regval = INA3221_CONFIG_AVG(ina->reg_config);
+ *val = ina3221_avg_samples[regval];
+ return 0;
+ case hwmon_chip_update_interval:
+ /* Return in msec */
+ *val = ina3221_reg_to_interval_us(ina->reg_config);
+ *val = DIV_ROUND_CLOSEST(*val, 1000);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int ina3221_read_in(struct device *dev, u32 attr, int channel, long *val)
{
const bool is_shunt = channel > INA3221_CHANNEL3;
@@ -279,6 +328,48 @@ static int ina3221_read_curr(struct device *dev, u32 attr,
}
}
+static int ina3221_write_chip(struct device *dev, u32 attr, long val)
+{
+ struct ina3221_data *ina = dev_get_drvdata(dev);
+ int ret, idx;
+ u32 tmp;
+
+ switch (attr) {
+ case hwmon_chip_samples:
+ idx = find_closest(val, ina3221_avg_samples,
+ ARRAY_SIZE(ina3221_avg_samples));
+
+ tmp = (ina->reg_config & ~INA3221_CONFIG_AVG_MASK) |
+ (idx << INA3221_CONFIG_AVG_SHIFT);
+ ret = regmap_write(ina->regmap, INA3221_CONFIG, tmp);
+ if (ret)
+ return ret;
+
+ /* Update reg_config accordingly */
+ ina->reg_config = tmp;
+ return 0;
+ case hwmon_chip_update_interval:
+ tmp = ina3221_interval_ms_to_conv_time(ina->reg_config, val);
+ idx = find_closest(tmp, ina3221_conv_time,
+ ARRAY_SIZE(ina3221_conv_time));
+
+ /* Update Bus and Shunt voltage conversion times */
+ tmp = INA3221_CONFIG_VBUS_CT_MASK | INA3221_CONFIG_VSH_CT_MASK;
+ tmp = (ina->reg_config & ~tmp) |
+ (idx << INA3221_CONFIG_VBUS_CT_SHIFT) |
+ (idx << INA3221_CONFIG_VSH_CT_SHIFT);
+ ret = regmap_write(ina->regmap, INA3221_CONFIG, tmp);
+ if (ret)
+ return ret;
+
+ /* Update reg_config accordingly */
+ ina->reg_config = tmp;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int ina3221_write_curr(struct device *dev, u32 attr,
int channel, long val)
{
@@ -309,6 +400,7 @@ static int ina3221_write_enable(struct device *dev, int channel, bool enable)
struct ina3221_data *ina = dev_get_drvdata(dev);
u16 config, mask = INA3221_CONFIG_CHx_EN(channel);
u16 config_old = ina->reg_config & mask;
+ u32 tmp;
int ret;
config = enable ? mask : 0;
@@ -327,14 +419,13 @@ static int ina3221_write_enable(struct device *dev, int channel, bool enable)
}
/* Enable or disable the channel */
- ret = regmap_update_bits(ina->regmap, INA3221_CONFIG, mask, config);
+ tmp = (ina->reg_config & ~mask) | (config & mask);
+ ret = regmap_write(ina->regmap, INA3221_CONFIG, tmp);
if (ret)
goto fail;
/* Cache the latest config register value */
- ret = regmap_read(ina->regmap, INA3221_CONFIG, &ina->reg_config);
- if (ret)
- goto fail;
+ ina->reg_config = tmp;
/* For disabling routine, decrease refcount or suspend() at last */
if (!enable)
@@ -361,6 +452,9 @@ static int ina3221_read(struct device *dev, enum hwmon_sensor_types type,
mutex_lock(&ina->lock);
switch (type) {
+ case hwmon_chip:
+ ret = ina3221_read_chip(dev, attr, val);
+ break;
case hwmon_in:
/* 0-align channel ID */
ret = ina3221_read_in(dev, attr, channel - 1, val);
@@ -387,6 +481,9 @@ static int ina3221_write(struct device *dev, enum hwmon_sensor_types type,
mutex_lock(&ina->lock);
switch (type) {
+ case hwmon_chip:
+ ret = ina3221_write_chip(dev, attr, val);
+ break;
case hwmon_in:
/* 0-align channel ID */
ret = ina3221_write_enable(dev, channel - 1, val);
@@ -423,6 +520,14 @@ static umode_t ina3221_is_visible(const void *drvdata,
const struct ina3221_input *input = NULL;
switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_samples:
+ case hwmon_chip_update_interval:
+ return 0644;
+ default:
+ return 0;
+ }
case hwmon_in:
/* Ignore in0_ */
if (channel == 0)
@@ -458,44 +563,29 @@ static umode_t ina3221_is_visible(const void *drvdata,
}
}
-static const u32 ina3221_in_config[] = {
- /* 0: dummy, skipped in is_visible */
- HWMON_I_INPUT,
- /* 1-3: input voltage Channels */
- HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
- HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
- HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
- /* 4-6: shunt voltage Channels */
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info ina3221_in = {
- .type = hwmon_in,
- .config = ina3221_in_config,
-};
-
#define INA3221_HWMON_CURR_CONFIG (HWMON_C_INPUT | \
HWMON_C_CRIT | HWMON_C_CRIT_ALARM | \
HWMON_C_MAX | HWMON_C_MAX_ALARM)
-static const u32 ina3221_curr_config[] = {
- INA3221_HWMON_CURR_CONFIG,
- INA3221_HWMON_CURR_CONFIG,
- INA3221_HWMON_CURR_CONFIG,
- 0
-};
-
-static const struct hwmon_channel_info ina3221_curr = {
- .type = hwmon_curr,
- .config = ina3221_curr_config,
-};
-
static const struct hwmon_channel_info *ina3221_info[] = {
- &ina3221_in,
- &ina3221_curr,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_SAMPLES,
+ HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(in,
+ /* 0: dummy, skipped in is_visible */
+ HWMON_I_INPUT,
+ /* 1-3: input voltage Channels */
+ HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_ENABLE | HWMON_I_LABEL,
+ /* 4-6: shunt voltage Channels */
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT),
+ HWMON_CHANNEL_INFO(curr,
+ INA3221_HWMON_CURR_CONFIG,
+ INA3221_HWMON_CURR_CONFIG,
+ INA3221_HWMON_CURR_CONFIG),
NULL
};
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 4fa482ae0eb5..6b57a6d4e626 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -451,20 +451,12 @@ static int jc42_detect(struct i2c_client *client, struct i2c_board_info *info)
return -ENODEV;
}
-static const u32 jc42_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | HWMON_T_CRIT |
- HWMON_T_MAX_HYST | HWMON_T_CRIT_HYST |
- HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM,
- 0
-};
-
-static const struct hwmon_channel_info jc42_temp = {
- .type = hwmon_temp,
- .config = jc42_temp_config,
-};
-
static const struct hwmon_channel_info *jc42_info[] = {
- &jc42_temp,
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_CRIT | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM),
NULL
};
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index 2d40a2e771d7..7d5947595b45 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -94,7 +94,6 @@ static int jz4740_hwmon_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct jz4740_hwmon *hwmon;
struct device *hwmon_dev;
- struct resource *mem;
hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
if (!hwmon)
@@ -109,8 +108,7 @@ static int jz4740_hwmon_probe(struct platform_device *pdev)
return hwmon->irq;
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hwmon->base = devm_ioremap_resource(&pdev->dev, mem);
+ hwmon->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hwmon->base))
return PTR_ERR(hwmon->base);
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index eac54b9cdeec..8848fbe5fd16 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -1153,7 +1153,7 @@ static const struct i2c_device_id lm63_id[] = {
};
MODULE_DEVICE_TABLE(i2c, lm63_id);
-static const struct of_device_id lm63_of_match[] = {
+static const struct of_device_id __maybe_unused lm63_of_match[] = {
{
.compatible = "national,lm63",
.data = (void *)lm63
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 447af07450f1..423a382420b9 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -59,6 +59,7 @@ enum lm75_type { /* keep sorted in alphabetical order */
tmp175,
tmp275,
tmp75,
+ tmp75b,
tmp75c,
};
@@ -194,35 +195,11 @@ static umode_t lm75_is_visible(const void *data, enum hwmon_sensor_types type,
return 0;
}
-/*-----------------------------------------------------------------------*/
-
-/* device probe and removal */
-
-/* chip configuration */
-
-static const u32 lm75_chip_config[] = {
- HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
- 0
-};
-
-static const struct hwmon_channel_info lm75_chip = {
- .type = hwmon_chip,
- .config = lm75_chip_config,
-};
-
-static const u32 lm75_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST,
- 0
-};
-
-static const struct hwmon_channel_info lm75_temp = {
- .type = hwmon_temp,
- .config = lm75_temp_config,
-};
-
static const struct hwmon_channel_info *lm75_info[] = {
- &lm75_chip,
- &lm75_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST),
NULL
};
@@ -378,6 +355,11 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
data->resolution = 12;
data->sample_time = MSEC_PER_SEC / 2;
break;
+ case tmp75b: /* not one-shot mode, Conversion rate 37Hz */
+ clr_mask |= 1 << 15 | 0x3 << 13;
+ data->resolution = 12;
+ data->sample_time = MSEC_PER_SEC / 37;
+ break;
case tmp75c:
clr_mask |= 1 << 5; /* not one-shot mode */
data->resolution = 12;
@@ -438,12 +420,13 @@ static const struct i2c_device_id lm75_ids[] = {
{ "tmp175", tmp175, },
{ "tmp275", tmp275, },
{ "tmp75", tmp75, },
+ { "tmp75b", tmp75b, },
{ "tmp75c", tmp75c, },
{ /* LIST END */ }
};
MODULE_DEVICE_TABLE(i2c, lm75_ids);
-static const struct of_device_id lm75_of_match[] = {
+static const struct of_device_id __maybe_unused lm75_of_match[] = {
{
.compatible = "adi,adt75",
.data = (void *)adt75
@@ -537,6 +520,10 @@ static const struct of_device_id lm75_of_match[] = {
.data = (void *)tmp75
},
{
+ .compatible = "ti,tmp75b",
+ .data = (void *)tmp75b
+ },
+ {
.compatible = "ti,tmp75c",
.data = (void *)tmp75c
},
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 0cb7ff613b80..eb95947673de 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -73,7 +73,6 @@ enum chips { lm78, lm79 };
#define LM78_REG_CHIPID 0x49
#define LM78_REG_I2C_ADDR 0x48
-
/*
* Conversions. Rounding and limit checking is only done on the TO_REG
* variants.
@@ -147,15 +146,13 @@ struct lm78_data {
u16 alarms; /* Register encoding, combined */
};
-
static int lm78_read_value(struct lm78_data *data, u8 reg);
static int lm78_write_value(struct lm78_data *data, u8 reg, u8 value);
static struct lm78_data *lm78_update_device(struct device *dev);
static void lm78_init_device(struct lm78_data *data);
-
/* 7 Voltages */
-static ssize_t show_in(struct device *dev, struct device_attribute *da,
+static ssize_t in_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -163,7 +160,7 @@ static ssize_t show_in(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in[attr->index]));
}
-static ssize_t show_in_min(struct device *dev, struct device_attribute *da,
+static ssize_t in_min_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -171,7 +168,7 @@ static ssize_t show_in_min(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_min[attr->index]));
}
-static ssize_t show_in_max(struct device *dev, struct device_attribute *da,
+static ssize_t in_max_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -179,8 +176,8 @@ static ssize_t show_in_max(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_max[attr->index]));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm78_data *data = dev_get_drvdata(dev);
@@ -199,8 +196,8 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *da,
return count;
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm78_data *data = dev_get_drvdata(dev);
@@ -219,21 +216,27 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *da,
return count;
}
-#define show_in_offset(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset);
-
-show_in_offset(0);
-show_in_offset(1);
-show_in_offset(2);
-show_in_offset(3);
-show_in_offset(4);
-show_in_offset(5);
-show_in_offset(6);
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
+static SENSOR_DEVICE_ATTR_RO(in6_input, in, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 6);
/* Temperature */
static ssize_t temp1_input_show(struct device *dev,
@@ -300,7 +303,7 @@ static DEVICE_ATTR_RW(temp1_max);
static DEVICE_ATTR_RW(temp1_max_hyst);
/* 3 Fans */
-static ssize_t show_fan(struct device *dev, struct device_attribute *da,
+static ssize_t fan_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -310,7 +313,7 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *da,
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -320,8 +323,8 @@ static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm78_data *data = dev_get_drvdata(dev);
@@ -340,7 +343,7 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
return count;
}
-static ssize_t show_fan_div(struct device *dev, struct device_attribute *da,
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -354,8 +357,8 @@ static ssize_t show_fan_div(struct device *dev, struct device_attribute *da,
* least surprise; the user doesn't expect the fan minimum to change just
* because the divisor changed.
*/
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm78_data *data = dev_get_drvdata(dev);
@@ -413,22 +416,17 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
return count;
}
-#define show_fan_offset(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1);
-
-show_fan_offset(1);
-show_fan_offset(2);
-show_fan_offset(3);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_input, fan, 2);
+static SENSOR_DEVICE_ATTR_RW(fan3_min, fan_min, 2);
/* Fan 3 divisor is locked in H/W */
-static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR,
- show_fan_div, set_fan_div, 0);
-static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO | S_IWUSR,
- show_fan_div, set_fan_div, 1);
-static SENSOR_DEVICE_ATTR(fan3_div, S_IRUGO, show_fan_div, NULL, 2);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_div, fan_div, 2);
/* VID */
static ssize_t cpu0_vid_show(struct device *dev, struct device_attribute *da,
@@ -448,24 +446,24 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *da,
}
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev, struct device_attribute *da,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct lm78_data *data = lm78_update_device(dev);
int nr = to_sensor_dev_attr(da)->index;
return sprintf(buf, "%u\n", (data->alarms >> nr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 10);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 9);
+static SENSOR_DEVICE_ATTR_RO(in6_alarm, alarm, 10);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(fan3_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 4);
static struct attribute *lm78_attrs[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index a95d48316f06..80db367b4c54 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -165,7 +165,6 @@ static inline u16 FAN_TO_REG(unsigned long val)
#define PWM_TO_REG(val) clamp_val(val, 0, 255)
#define PWM_FROM_REG(val) (val)
-
/*
* ZONEs have the following parameters:
* Limit (low) temp, 1. degC
@@ -563,24 +562,25 @@ static struct lm85_data *lm85_update_device(struct device *dev)
}
/* 4 Fans */
-static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr]));
}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr]));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -599,16 +599,14 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
return count;
}
-#define show_fan_offset(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1)
-
-show_fan_offset(1);
-show_fan_offset(2);
-show_fan_offset(3);
-show_fan_offset(4);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_input, fan, 2);
+static SENSOR_DEVICE_ATTR_RW(fan3_min, fan_min, 2);
+static SENSOR_DEVICE_ATTR_RO(fan4_input, fan, 3);
+static SENSOR_DEVICE_ATTR_RW(fan4_min, fan_min, 3);
/* vid, vrm, alarms */
@@ -667,44 +665,44 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%u\n", (data->alarms >> nr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 18);
-static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 16);
-static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 17);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_alarm, NULL, 14);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 15);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 10);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 12);
-static SENSOR_DEVICE_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 13);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 18);
+static SENSOR_DEVICE_ATTR_RO(in6_alarm, alarm, 16);
+static SENSOR_DEVICE_ATTR_RO(in7_alarm, alarm, 17);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp1_fault, alarm, 14);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 15);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 10);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(fan3_alarm, alarm, 12);
+static SENSOR_DEVICE_ATTR_RO(fan4_alarm, alarm, 13);
/* pwm */
-static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t pwm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", PWM_FROM_REG(data->pwm[nr]));
}
-static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t pwm_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -723,8 +721,8 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_pwm_enable(struct device *dev, struct device_attribute
- *attr, char *buf)
+static ssize_t pwm_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
@@ -745,8 +743,9 @@ static ssize_t show_pwm_enable(struct device *dev, struct device_attribute
return sprintf(buf, "%d\n", enable);
}
-static ssize_t set_pwm_enable(struct device *dev, struct device_attribute
- *attr, const char *buf, size_t count)
+static ssize_t pwm_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -788,8 +787,8 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute
return count;
}
-static ssize_t show_pwm_freq(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pwm_freq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
@@ -804,8 +803,9 @@ static ssize_t show_pwm_freq(struct device *dev,
return sprintf(buf, "%d\n", freq);
}
-static ssize_t set_pwm_freq(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t pwm_freq_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -841,22 +841,20 @@ static ssize_t set_pwm_freq(struct device *dev,
return count;
}
-#define show_pwm_reg(offset) \
-static SENSOR_DEVICE_ATTR(pwm##offset, S_IRUGO | S_IWUSR, \
- show_pwm, set_pwm, offset - 1); \
-static SENSOR_DEVICE_ATTR(pwm##offset##_enable, S_IRUGO | S_IWUSR, \
- show_pwm_enable, set_pwm_enable, offset - 1); \
-static SENSOR_DEVICE_ATTR(pwm##offset##_freq, S_IRUGO | S_IWUSR, \
- show_pwm_freq, set_pwm_freq, offset - 1)
-
-show_pwm_reg(1);
-show_pwm_reg(2);
-show_pwm_reg(3);
+static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm1_enable, pwm_enable, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm1_freq, pwm_freq, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2, pwm, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm2_enable, pwm_enable, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm2_freq, pwm_freq, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm3, pwm, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm3_enable, pwm_enable, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm3_freq, pwm_freq, 2);
/* Voltages */
-static ssize_t show_in(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
@@ -864,16 +862,16 @@ static ssize_t show_in(struct device *dev, struct device_attribute *attr,
data->in_ext[nr]));
}
-static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(nr, data->in_min[nr]));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -892,16 +890,16 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_max_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", INS_FROM_REG(nr, data->in_max[nr]));
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -920,27 +918,35 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define show_in_reg(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset)
-
-show_in_reg(0);
-show_in_reg(1);
-show_in_reg(2);
-show_in_reg(3);
-show_in_reg(4);
-show_in_reg(5);
-show_in_reg(6);
-show_in_reg(7);
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
+static SENSOR_DEVICE_ATTR_RO(in6_input, in, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 6);
+static SENSOR_DEVICE_ATTR_RO(in7_input, in, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_min, in_min, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_max, in_max, 7);
/* Temps */
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
@@ -948,16 +954,17 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
data->temp_ext[nr]));
}
-static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[nr]));
}
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -979,16 +986,17 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[nr]));
}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1010,31 +1018,30 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define show_temp_reg(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \
- show_temp_min, set_temp_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_max, set_temp_max, offset - 1);
-
-show_temp_reg(1);
-show_temp_reg(2);
-show_temp_reg(3);
-
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_min, temp_min, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
/* Automatic PWM control */
-static ssize_t show_pwm_auto_channels(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pwm_auto_channels_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", ZONE_FROM_REG(data->autofan[nr].config));
}
-static ssize_t set_pwm_auto_channels(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t pwm_auto_channels_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1055,16 +1062,17 @@ static ssize_t set_pwm_auto_channels(struct device *dev,
return count;
}
-static ssize_t show_pwm_auto_pwm_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pwm_auto_pwm_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", PWM_FROM_REG(data->autofan[nr].min_pwm));
}
-static ssize_t set_pwm_auto_pwm_min(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t pwm_auto_pwm_min_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1084,16 +1092,18 @@ static ssize_t set_pwm_auto_pwm_min(struct device *dev,
return count;
}
-static ssize_t show_pwm_auto_pwm_minctl(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t pwm_auto_pwm_minctl_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", data->autofan[nr].min_off);
}
-static ssize_t set_pwm_auto_pwm_minctl(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t pwm_auto_pwm_minctl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1117,25 +1127,21 @@ static ssize_t set_pwm_auto_pwm_minctl(struct device *dev,
return count;
}
-#define pwm_auto(offset) \
-static SENSOR_DEVICE_ATTR(pwm##offset##_auto_channels, \
- S_IRUGO | S_IWUSR, show_pwm_auto_channels, \
- set_pwm_auto_channels, offset - 1); \
-static SENSOR_DEVICE_ATTR(pwm##offset##_auto_pwm_min, \
- S_IRUGO | S_IWUSR, show_pwm_auto_pwm_min, \
- set_pwm_auto_pwm_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(pwm##offset##_auto_pwm_minctl, \
- S_IRUGO | S_IWUSR, show_pwm_auto_pwm_minctl, \
- set_pwm_auto_pwm_minctl, offset - 1)
-
-pwm_auto(1);
-pwm_auto(2);
-pwm_auto(3);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_channels, pwm_auto_channels, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_pwm_min, pwm_auto_pwm_min, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm1_auto_pwm_minctl, pwm_auto_pwm_minctl, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2_auto_channels, pwm_auto_channels, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm2_auto_pwm_min, pwm_auto_pwm_min, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm2_auto_pwm_minctl, pwm_auto_pwm_minctl, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm3_auto_channels, pwm_auto_channels, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm3_auto_pwm_min, pwm_auto_pwm_min, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm3_auto_pwm_minctl, pwm_auto_pwm_minctl, 2);
/* Temperature settings for automatic PWM control */
-static ssize_t show_temp_auto_temp_off(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_temp_off_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
@@ -1143,8 +1149,9 @@ static ssize_t show_temp_auto_temp_off(struct device *dev,
HYST_FROM_REG(data->zone[nr].hyst));
}
-static ssize_t set_temp_auto_temp_off(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t temp_auto_temp_off_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1172,16 +1179,18 @@ static ssize_t set_temp_auto_temp_off(struct device *dev,
return count;
}
-static ssize_t show_temp_auto_temp_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_temp_min_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->zone[nr].limit));
}
-static ssize_t set_temp_auto_temp_min(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t temp_auto_temp_min_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1210,8 +1219,9 @@ static ssize_t set_temp_auto_temp_min(struct device *dev,
return count;
}
-static ssize_t show_temp_auto_temp_max(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_temp_max_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
@@ -1219,8 +1229,9 @@ static ssize_t show_temp_auto_temp_max(struct device *dev,
RANGE_FROM_REG(data->zone[nr].range));
}
-static ssize_t set_temp_auto_temp_max(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t temp_auto_temp_max_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1245,16 +1256,18 @@ static ssize_t set_temp_auto_temp_max(struct device *dev,
return count;
}
-static ssize_t show_temp_auto_temp_crit(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t temp_auto_temp_crit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = lm85_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->zone[nr].critical));
}
-static ssize_t set_temp_auto_temp_crit(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t temp_auto_temp_crit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm85_data *data = dev_get_drvdata(dev);
@@ -1274,23 +1287,18 @@ static ssize_t set_temp_auto_temp_crit(struct device *dev,
return count;
}
-#define temp_auto(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_auto_temp_off, \
- S_IRUGO | S_IWUSR, show_temp_auto_temp_off, \
- set_temp_auto_temp_off, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_auto_temp_min, \
- S_IRUGO | S_IWUSR, show_temp_auto_temp_min, \
- set_temp_auto_temp_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_auto_temp_max, \
- S_IRUGO | S_IWUSR, show_temp_auto_temp_max, \
- set_temp_auto_temp_max, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_auto_temp_crit, \
- S_IRUGO | S_IWUSR, show_temp_auto_temp_crit, \
- set_temp_auto_temp_crit, offset - 1);
-
-temp_auto(1);
-temp_auto(2);
-temp_auto(3);
+static SENSOR_DEVICE_ATTR_RW(temp1_auto_temp_off, temp_auto_temp_off, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_auto_temp_min, temp_auto_temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_auto_temp_max, temp_auto_temp_max, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_auto_temp_crit, temp_auto_temp_crit, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_auto_temp_off, temp_auto_temp_off, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_auto_temp_min, temp_auto_temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_auto_temp_max, temp_auto_temp_max, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_auto_temp_crit, temp_auto_temp_crit, 1);
+static SENSOR_DEVICE_ATTR_RW(temp3_auto_temp_off, temp_auto_temp_off, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_auto_temp_min, temp_auto_temp_min, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_auto_temp_max, temp_auto_temp_max, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_auto_temp_crit, temp_auto_temp_crit, 2);
static struct attribute *lm85_attributes[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr,
@@ -1642,7 +1650,7 @@ static const struct i2c_device_id lm85_id[] = {
};
MODULE_DEVICE_TABLE(i2c, lm85_id);
-static const struct of_device_id lm85_of_match[] = {
+static const struct of_device_id __maybe_unused lm85_of_match[] = {
{
.compatible = "adi,adm1027",
.data = (void *)adm1027
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index b48d30760388..644e61eae574 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -276,8 +276,8 @@ static struct lm87_data *lm87_update_device(struct device *dev)
* Sysfs stuff
*/
-static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_input_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
int nr = to_sensor_dev_attr(attr)->index;
@@ -286,8 +286,8 @@ static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
data->in_scale[nr]));
}
-static ssize_t show_in_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t in_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
int nr = to_sensor_dev_attr(attr)->index;
@@ -296,8 +296,8 @@ static ssize_t show_in_min(struct device *dev,
data->in_scale[nr]));
}
-static ssize_t show_in_max(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t in_max_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
int nr = to_sensor_dev_attr(attr)->index;
@@ -306,8 +306,8 @@ static ssize_t show_in_max(struct device *dev,
data->in_scale[nr]));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
@@ -327,8 +327,8 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
@@ -348,23 +348,32 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define set_in(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in_input, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset)
-set_in(0);
-set_in(1);
-set_in(2);
-set_in(3);
-set_in(4);
-set_in(5);
-set_in(6);
-set_in(7);
-
-static ssize_t show_temp_input(struct device *dev,
+static SENSOR_DEVICE_ATTR_RO(in0_input, in_input, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in_input, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in_input, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in_input, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in_input, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in_input, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
+static SENSOR_DEVICE_ATTR_RO(in6_input, in_input, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 6);
+static SENSOR_DEVICE_ATTR_RO(in7_input, in_input, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_min, in_min, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_max, in_max, 7);
+
+static ssize_t temp_input_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
@@ -373,7 +382,7 @@ static ssize_t show_temp_input(struct device *dev,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr]));
}
-static ssize_t show_temp_low(struct device *dev,
+static ssize_t temp_low_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
@@ -383,7 +392,7 @@ static ssize_t show_temp_low(struct device *dev,
TEMP_FROM_REG(data->temp_low[nr]));
}
-static ssize_t show_temp_high(struct device *dev,
+static ssize_t temp_high_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
@@ -393,8 +402,9 @@ static ssize_t show_temp_high(struct device *dev,
TEMP_FROM_REG(data->temp_high[nr]));
}
-static ssize_t set_temp_low(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_low_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
@@ -413,8 +423,9 @@ static ssize_t set_temp_low(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_temp_high(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_high_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
@@ -433,16 +444,15 @@ static ssize_t set_temp_high(struct device *dev, struct device_attribute *attr,
return count;
}
-#define set_temp(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp_input, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_high, set_temp_high, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \
- show_temp_low, set_temp_low, offset - 1)
-set_temp(1);
-set_temp(2);
-set_temp(3);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp_input, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_low, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_high, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp_input, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_low, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_high, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp_input, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_min, temp_low, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_high, 2);
static ssize_t temp1_crit_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -460,9 +470,9 @@ static ssize_t temp2_crit_show(struct device *dev,
static DEVICE_ATTR_RO(temp1_crit);
static DEVICE_ATTR_RO(temp2_crit);
-static DEVICE_ATTR(temp3_crit, S_IRUGO, temp2_crit_show, NULL);
+static DEVICE_ATTR(temp3_crit, 0444, temp2_crit_show, NULL);
-static ssize_t show_fan_input(struct device *dev,
+static ssize_t fan_input_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
@@ -472,8 +482,8 @@ static ssize_t show_fan_input(struct device *dev,
FAN_DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_min(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
int nr = to_sensor_dev_attr(attr)->index;
@@ -482,8 +492,8 @@ static ssize_t show_fan_min(struct device *dev,
FAN_DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_div(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
int nr = to_sensor_dev_attr(attr)->index;
@@ -492,8 +502,9 @@ static ssize_t show_fan_div(struct device *dev,
FAN_DIV_FROM_REG(data->fan_div[nr]));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
@@ -519,8 +530,9 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
* of least surprise; the user doesn't expect the fan minimum to change just
* because the divider changed.
*/
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct i2c_client *client = dev_get_drvdata(dev);
struct lm87_data *data = i2c_get_clientdata(client);
@@ -575,15 +587,12 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
return count;
}
-#define set_fan(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan_input, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- show_fan_div, set_fan_div, offset - 1)
-set_fan(1);
-set_fan(2);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan_input, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan_input, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -653,28 +662,28 @@ static ssize_t aout_output_store(struct device *dev,
}
static DEVICE_ATTR_RW(aout_output);
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct lm87_data *data = lm87_update_device(dev);
int bitnr = to_sensor_dev_attr(attr)->index;
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 14);
-static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 15);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 9);
+static SENSOR_DEVICE_ATTR_RO(in6_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(in7_alarm, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 14);
+static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 15);
/*
* Real code
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 480d70a51778..b99eda01696e 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -236,7 +236,7 @@ static const struct i2c_device_id lm90_id[] = {
};
MODULE_DEVICE_TABLE(i2c, lm90_id);
-static const struct of_device_id lm90_of_match[] = {
+static const struct of_device_id __maybe_unused lm90_of_match[] = {
{
.compatible = "adi,adm1032",
.data = (void *)adm1032
@@ -1720,16 +1720,6 @@ static void lm90_regulator_disable(void *regulator)
regulator_disable(regulator);
}
-static const u32 lm90_chip_config[] = {
- HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL | HWMON_C_ALARMS,
- 0
-};
-
-static const struct hwmon_channel_info lm90_chip_info = {
- .type = hwmon_chip,
- .config = lm90_chip_config,
-};
-
static const struct hwmon_ops lm90_ops = {
.is_visible = lm90_is_visible,
@@ -1792,7 +1782,8 @@ static int lm90_probe(struct i2c_client *client,
data->chip.ops = &lm90_ops;
data->chip.info = data->info;
- data->info[0] = &lm90_chip_info;
+ data->info[0] = HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL | HWMON_C_ALARMS);
data->info[1] = &data->temp_info;
info = &data->temp_info;
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 3ff188937158..6c5215e6d448 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -418,33 +418,15 @@ static void lm95241_init_client(struct i2c_client *client,
data->model);
}
-static const u32 lm95241_chip_config[] = {
- HWMON_C_UPDATE_INTERVAL,
- 0
-};
-
-static const struct hwmon_channel_info lm95241_chip = {
- .type = hwmon_chip,
- .config = lm95241_chip_config,
-};
-
-static const u32 lm95241_temp_config[] = {
- HWMON_T_INPUT,
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_TYPE |
- HWMON_T_FAULT,
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_TYPE |
- HWMON_T_FAULT,
- 0
-};
-
-static const struct hwmon_channel_info lm95241_temp = {
- .type = hwmon_temp,
- .config = lm95241_temp_config,
-};
-
static const struct hwmon_channel_info *lm95241_info[] = {
- &lm95241_chip,
- &lm95241_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_TYPE | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_TYPE | HWMON_T_FAULT),
NULL
};
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
index e4cac3a04536..8d08ca8bbdf8 100644
--- a/drivers/hwmon/lm95245.c
+++ b/drivers/hwmon/lm95245.c
@@ -92,19 +92,6 @@ static const unsigned short normal_i2c[] = {
#define LM95235_REVISION 0xB1
#define LM95245_REVISION 0xB3
-static const u8 lm95245_reg_address[] = {
- LM95245_REG_R_LOCAL_TEMPH_S,
- LM95245_REG_R_LOCAL_TEMPL_S,
- LM95245_REG_R_REMOTE_TEMPH_S,
- LM95245_REG_R_REMOTE_TEMPL_S,
- LM95245_REG_R_REMOTE_TEMPH_U,
- LM95245_REG_R_REMOTE_TEMPL_U,
- LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT,
- LM95245_REG_RW_REMOTE_TCRIT_LIMIT,
- LM95245_REG_RW_COMMON_HYSTERESIS,
- LM95245_REG_R_STATUS1,
-};
-
/* Client data (each client gets its own) */
struct lm95245_data {
struct regmap *regmap;
@@ -545,32 +532,16 @@ static const struct regmap_config lm95245_regmap_config = {
.use_single_write = true,
};
-static const u32 lm95245_chip_config[] = {
- HWMON_C_UPDATE_INTERVAL,
- 0
-};
-
-static const struct hwmon_channel_info lm95245_chip = {
- .type = hwmon_chip,
- .config = lm95245_chip_config,
-};
-
-static const u32 lm95245_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_CRIT_ALARM,
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST | HWMON_T_CRIT |
- HWMON_T_CRIT_HYST | HWMON_T_FAULT | HWMON_T_MAX_ALARM |
- HWMON_T_CRIT_ALARM | HWMON_T_TYPE | HWMON_T_OFFSET,
- 0
-};
-
-static const struct hwmon_channel_info lm95245_temp = {
- .type = hwmon_temp,
- .config = lm95245_temp_config,
-};
-
static const struct hwmon_channel_info *lm95245_info[] = {
- &lm95245_chip,
- &lm95245_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_HYST |
+ HWMON_T_CRIT_ALARM,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_FAULT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM |
+ HWMON_T_TYPE | HWMON_T_OFFSET),
NULL
};
@@ -623,7 +594,7 @@ static const struct i2c_device_id lm95245_id[] = {
};
MODULE_DEVICE_TABLE(i2c, lm95245_id);
-static const struct of_device_id lm95245_of_match[] = {
+static const struct of_device_id __maybe_unused lm95245_of_match[] = {
{ .compatible = "national,lm95235" },
{ .compatible = "national,lm95245" },
{ },
diff --git a/drivers/hwmon/lochnagar-hwmon.c b/drivers/hwmon/lochnagar-hwmon.c
new file mode 100644
index 000000000000..8b19adf2eeb0
--- /dev/null
+++ b/drivers/hwmon/lochnagar-hwmon.c
@@ -0,0 +1,412 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lochnagar hardware monitoring features
+ *
+ * Copyright (c) 2016-2019 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ *
+ * Author: Lucas Tanure <tanureal@opensource.cirrus.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/math64.h>
+#include <linux/mfd/lochnagar.h>
+#include <linux/mfd/lochnagar2_regs.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define LN2_MAX_NSAMPLE 1023
+#define LN2_SAMPLE_US 1670
+
+#define LN2_CURR_UNITS 1000
+#define LN2_VOLT_UNITS 1000
+#define LN2_TEMP_UNITS 1000
+#define LN2_PWR_UNITS 1000000
+
+static const char * const lochnagar_chan_names[] = {
+ "DBVDD1",
+ "1V8 DSP",
+ "1V8 CDC",
+ "VDDCORE DSP",
+ "AVDD 1V8",
+ "SYSVDD",
+ "VDDCORE CDC",
+ "MICVDD",
+};
+
+struct lochnagar_hwmon {
+ struct regmap *regmap;
+
+ long power_nsamples[ARRAY_SIZE(lochnagar_chan_names)];
+
+ /* Lock to ensure only a single sensor is read at a time */
+ struct mutex sensor_lock;
+};
+
+enum lochnagar_measure_mode {
+ LN2_CURR = 0,
+ LN2_VOLT,
+ LN2_TEMP,
+};
+
+/**
+ * float_to_long - Convert ieee754 reading from hardware to an integer
+ *
+ * @data: Value read from the hardware
+ * @precision: Units to multiply up to eg. 1000 = milli, 1000000 = micro
+ *
+ * Return: Converted integer reading
+ *
+ * Depending on the measurement type the hardware returns an ieee754
+ * floating point value in either volts, amps or celsius. This function
+ * will convert that into an integer in a smaller unit such as micro-amps
+ * or milli-celsius. The hardware does not return NaN, so consideration of
+ * that is not required.
+ */
+static long float_to_long(u32 data, u32 precision)
+{
+ u64 man = data & 0x007FFFFF;
+ int exp = ((data & 0x7F800000) >> 23) - 127 - 23;
+ bool negative = data & 0x80000000;
+ long result;
+
+ man = (man + (1 << 23)) * precision;
+
+ if (fls64(man) + exp > (int)sizeof(long) * 8 - 1)
+ result = LONG_MAX;
+ else if (exp < 0)
+ result = (man + (1ull << (-exp - 1))) >> -exp;
+ else
+ result = man << exp;
+
+ return negative ? -result : result;
+}
+
+static int do_measurement(struct regmap *regmap, int chan,
+ enum lochnagar_measure_mode mode, int nsamples)
+{
+ unsigned int val;
+ int ret;
+
+ chan = 1 << (chan + LOCHNAGAR2_IMON_MEASURED_CHANNELS_SHIFT);
+
+ ret = regmap_write(regmap, LOCHNAGAR2_IMON_CTRL1,
+ LOCHNAGAR2_IMON_ENA_MASK | chan | mode);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(regmap, LOCHNAGAR2_IMON_CTRL2, nsamples);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(regmap, LOCHNAGAR2_IMON_CTRL3,
+ LOCHNAGAR2_IMON_CONFIGURE_MASK);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(regmap, LOCHNAGAR2_IMON_CTRL3, val,
+ val & LOCHNAGAR2_IMON_DONE_MASK,
+ 1000, 10000);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(regmap, LOCHNAGAR2_IMON_CTRL3,
+ LOCHNAGAR2_IMON_MEASURE_MASK);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Actual measurement time is ~1.67mS per sample, approximate this
+ * with a 1.5mS per sample msleep and then poll for success up to
+ * ~0.17mS * 1023 (LN2_MAX_NSAMPLES). Normally for smaller values
+ * of nsamples the poll will complete on the first loop due to
+ * other latency in the system.
+ */
+ msleep((nsamples * 3) / 2);
+
+ ret = regmap_read_poll_timeout(regmap, LOCHNAGAR2_IMON_CTRL3, val,
+ val & LOCHNAGAR2_IMON_DONE_MASK,
+ 5000, 200000);
+ if (ret < 0)
+ return ret;
+
+ return regmap_write(regmap, LOCHNAGAR2_IMON_CTRL3, 0);
+}
+
+static int request_data(struct regmap *regmap, int chan, u32 *data)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_write(regmap, LOCHNAGAR2_IMON_CTRL4,
+ LOCHNAGAR2_IMON_DATA_REQ_MASK |
+ chan << LOCHNAGAR2_IMON_CH_SEL_SHIFT);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(regmap, LOCHNAGAR2_IMON_CTRL4, val,
+ val & LOCHNAGAR2_IMON_DATA_RDY_MASK,
+ 1000, 10000);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(regmap, LOCHNAGAR2_IMON_DATA1, &val);
+ if (ret < 0)
+ return ret;
+
+ *data = val << 16;
+
+ ret = regmap_read(regmap, LOCHNAGAR2_IMON_DATA2, &val);
+ if (ret < 0)
+ return ret;
+
+ *data |= val;
+
+ return regmap_write(regmap, LOCHNAGAR2_IMON_CTRL4, 0);
+}
+
+static int read_sensor(struct device *dev, int chan,
+ enum lochnagar_measure_mode mode, int nsamples,
+ unsigned int precision, long *val)
+{
+ struct lochnagar_hwmon *priv = dev_get_drvdata(dev);
+ struct regmap *regmap = priv->regmap;
+ u32 data;
+ int ret;
+
+ mutex_lock(&priv->sensor_lock);
+
+ ret = do_measurement(regmap, chan, mode, nsamples);
+ if (ret < 0) {
+ dev_err(dev, "Failed to perform measurement: %d\n", ret);
+ goto error;
+ }
+
+ ret = request_data(regmap, chan, &data);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read measurement: %d\n", ret);
+ goto error;
+ }
+
+ *val = float_to_long(data, precision);
+
+error:
+ mutex_unlock(&priv->sensor_lock);
+
+ return ret;
+}
+
+static int read_power(struct device *dev, int chan, long *val)
+{
+ struct lochnagar_hwmon *priv = dev_get_drvdata(dev);
+ int nsamples = priv->power_nsamples[chan];
+ u64 power;
+ int ret;
+
+ if (!strcmp("SYSVDD", lochnagar_chan_names[chan])) {
+ power = 5 * LN2_PWR_UNITS;
+ } else {
+ ret = read_sensor(dev, chan, LN2_VOLT, 1, LN2_PWR_UNITS, val);
+ if (ret < 0)
+ return ret;
+
+ power = abs(*val);
+ }
+
+ ret = read_sensor(dev, chan, LN2_CURR, nsamples, LN2_PWR_UNITS, val);
+ if (ret < 0)
+ return ret;
+
+ power *= abs(*val);
+ power = DIV_ROUND_CLOSEST_ULL(power, LN2_PWR_UNITS);
+
+ if (power > LONG_MAX)
+ *val = LONG_MAX;
+ else
+ *val = power;
+
+ return 0;
+}
+
+static umode_t lochnagar_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type,
+ u32 attr, int chan)
+{
+ switch (type) {
+ case hwmon_in:
+ if (!strcmp("SYSVDD", lochnagar_chan_names[chan]))
+ return 0;
+ break;
+ case hwmon_power:
+ if (attr == hwmon_power_average_interval)
+ return 0644;
+ break;
+ default:
+ break;
+ }
+
+ return 0444;
+}
+
+static int lochnagar_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int chan, long *val)
+{
+ struct lochnagar_hwmon *priv = dev_get_drvdata(dev);
+ int interval;
+
+ switch (type) {
+ case hwmon_in:
+ return read_sensor(dev, chan, LN2_VOLT, 1, LN2_VOLT_UNITS, val);
+ case hwmon_curr:
+ return read_sensor(dev, chan, LN2_CURR, 1, LN2_CURR_UNITS, val);
+ case hwmon_temp:
+ return read_sensor(dev, chan, LN2_TEMP, 1, LN2_TEMP_UNITS, val);
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_average:
+ return read_power(dev, chan, val);
+ case hwmon_power_average_interval:
+ interval = priv->power_nsamples[chan] * LN2_SAMPLE_US;
+ *val = DIV_ROUND_CLOSEST(interval, 1000);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int lochnagar_read_string(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int chan, const char **str)
+{
+ switch (type) {
+ case hwmon_in:
+ case hwmon_curr:
+ case hwmon_power:
+ *str = lochnagar_chan_names[chan];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int lochnagar_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int chan, long val)
+{
+ struct lochnagar_hwmon *priv = dev_get_drvdata(dev);
+
+ if (type != hwmon_power || attr != hwmon_power_average_interval)
+ return -EOPNOTSUPP;
+
+ val = clamp_t(long, val, 1, (LN2_MAX_NSAMPLE * LN2_SAMPLE_US) / 1000);
+ val = DIV_ROUND_CLOSEST(val * 1000, LN2_SAMPLE_US);
+
+ priv->power_nsamples[chan] = val;
+
+ return 0;
+}
+
+static const struct hwmon_ops lochnagar_ops = {
+ .is_visible = lochnagar_is_visible,
+ .read = lochnagar_read,
+ .read_string = lochnagar_read_string,
+ .write = lochnagar_write,
+};
+
+static const struct hwmon_channel_info *lochnagar_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(curr, HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL),
+ HWMON_CHANNEL_INFO(power, HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL,
+ HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL,
+ HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL,
+ HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL,
+ HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL,
+ HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL,
+ HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL,
+ HWMON_P_AVERAGE | HWMON_P_AVERAGE_INTERVAL |
+ HWMON_P_LABEL),
+ NULL
+};
+
+static const struct hwmon_chip_info lochnagar_chip_info = {
+ .ops = &lochnagar_ops,
+ .info = lochnagar_info,
+};
+
+static const struct of_device_id lochnagar_of_match[] = {
+ { .compatible = "cirrus,lochnagar2-hwmon" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, lochnagar_of_match);
+
+static int lochnagar_hwmon_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *hwmon_dev;
+ struct lochnagar_hwmon *priv;
+ int i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->sensor_lock);
+
+ priv->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!priv->regmap) {
+ dev_err(dev, "No register map found\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(priv->power_nsamples); i++)
+ priv->power_nsamples[i] = 96;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, "Lochnagar", priv,
+ &lochnagar_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct platform_driver lochnagar_hwmon_driver = {
+ .driver = {
+ .name = "lochnagar-hwmon",
+ .of_match_table = lochnagar_of_match,
+ },
+ .probe = lochnagar_hwmon_probe,
+};
+module_platform_driver(lochnagar_hwmon_driver);
+
+MODULE_AUTHOR("Lucas Tanure <tanureal@opensource.cirrus.com>");
+MODULE_DESCRIPTION("Lochnagar hardware monitoring features");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ltc4151.c b/drivers/hwmon/ltc4151.c
index 76c6fda76d95..2b5cd37e6dc3 100644
--- a/drivers/hwmon/ltc4151.c
+++ b/drivers/hwmon/ltc4151.c
@@ -208,7 +208,7 @@ static const struct i2c_device_id ltc4151_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ltc4151_id);
-static const struct of_device_id ltc4151_match[] = {
+static const struct of_device_id __maybe_unused ltc4151_match[] = {
{ .compatible = "lltc,ltc4151" },
{},
};
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index 34d0653ca607..26542635de9b 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -390,57 +390,30 @@ static umode_t ltc4245_is_visible(const void *_data,
}
}
-static const u32 ltc4245_in_config[] = {
- HWMON_I_INPUT, /* dummy, skipped in is_visible */
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT | HWMON_I_MIN_ALARM,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info ltc4245_in = {
- .type = hwmon_in,
- .config = ltc4245_in_config,
-};
-
-static const u32 ltc4245_curr_config[] = {
- HWMON_C_INPUT | HWMON_C_MAX_ALARM,
- HWMON_C_INPUT | HWMON_C_MAX_ALARM,
- HWMON_C_INPUT | HWMON_C_MAX_ALARM,
- HWMON_C_INPUT | HWMON_C_MAX_ALARM,
- 0
-};
-
-static const struct hwmon_channel_info ltc4245_curr = {
- .type = hwmon_curr,
- .config = ltc4245_curr_config,
-};
-
-static const u32 ltc4245_power_config[] = {
- HWMON_P_INPUT,
- HWMON_P_INPUT,
- HWMON_P_INPUT,
- HWMON_P_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info ltc4245_power = {
- .type = hwmon_power,
- .config = ltc4245_power_config,
-};
-
static const struct hwmon_channel_info *ltc4245_info[] = {
- &ltc4245_in,
- &ltc4245_curr,
- &ltc4245_power,
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT | HWMON_I_MIN_ALARM,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_MAX_ALARM,
+ HWMON_C_INPUT | HWMON_C_MAX_ALARM,
+ HWMON_C_INPUT | HWMON_C_MAX_ALARM,
+ HWMON_C_INPUT | HWMON_C_MAX_ALARM),
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT,
+ HWMON_P_INPUT,
+ HWMON_P_INPUT,
+ HWMON_P_INPUT),
NULL
};
diff --git a/drivers/hwmon/ltq-cputemp.c b/drivers/hwmon/ltq-cputemp.c
index 1d33f94594c1..570791f0e024 100644
--- a/drivers/hwmon/ltq-cputemp.c
+++ b/drivers/hwmon/ltq-cputemp.c
@@ -77,29 +77,11 @@ static umode_t ltq_is_visible(const void *_data, enum hwmon_sensor_types type,
}
}
-static const u32 ltq_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info ltq_chip = {
- .type = hwmon_chip,
- .config = ltq_chip_config,
-};
-
-static const u32 ltq_temp_config[] = {
- HWMON_T_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info ltq_temp = {
- .type = hwmon_temp,
- .config = ltq_temp_config,
-};
-
static const struct hwmon_channel_info *ltq_info[] = {
- &ltq_chip,
- &ltq_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT),
NULL
};
diff --git a/drivers/hwmon/max197.c b/drivers/hwmon/max197.c
index 3d9e210beedf..dd6a35219a18 100644
--- a/drivers/hwmon/max197.c
+++ b/drivers/hwmon/max197.c
@@ -8,7 +8,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * For further information, see the Documentation/hwmon/max197 file.
+ * For further information, see the Documentation/hwmon/max197.rst file.
*/
#include <linux/kernel.h>
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index 722bcbb9865a..0b0b04d36931 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -400,45 +400,27 @@ static umode_t max31790_is_visible(const void *data,
}
}
-static const u32 max31790_fan_config[] = {
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- 0
-};
-
-static const struct hwmon_channel_info max31790_fan = {
- .type = hwmon_fan,
- .config = max31790_fan_config,
-};
-
-static const u32 max31790_pwm_config[] = {
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- 0
-};
-
-static const struct hwmon_channel_info max31790_pwm = {
- .type = hwmon_pwm,
- .config = max31790_pwm_config,
-};
-
static const struct hwmon_channel_info *max31790_info[] = {
- &max31790_fan,
- &max31790_pwm,
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_TARGET | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
NULL
};
diff --git a/drivers/hwmon/max6621.c b/drivers/hwmon/max6621.c
index 35555f0eefb9..1810a753f1a3 100644
--- a/drivers/hwmon/max6621.c
+++ b/drivers/hwmon/max6621.c
@@ -458,37 +458,19 @@ static const struct regmap_config max6621_regmap_config = {
.num_reg_defaults = ARRAY_SIZE(max6621_regmap_default),
};
-static u32 max6621_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info max6621_chip = {
- .type = hwmon_chip,
- .config = max6621_chip_config,
-};
-
-static const u32 max6621_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET,
- HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_LABEL,
- HWMON_T_INPUT | HWMON_T_LABEL,
- 0
-};
-
-static const struct hwmon_channel_info max6621_temp = {
- .type = hwmon_temp,
- .config = max6621_temp_config,
-};
-
static const struct hwmon_channel_info *max6621_info[] = {
- &max6621_chip,
- &max6621_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
NULL
};
@@ -570,7 +552,7 @@ static const struct i2c_device_id max6621_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max6621_id);
-static const struct of_device_id max6621_of_match[] = {
+static const struct of_device_id __maybe_unused max6621_of_match[] = {
{ .compatible = "maxim,max6621" },
{ }
};
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index 61135a2d0cff..939953240827 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -40,6 +40,7 @@
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/of_device.h>
+#include <linux/thermal.h>
/*
* Insmod parameters
@@ -113,6 +114,7 @@ module_param(clock, int, 0444);
struct max6650_data {
struct i2c_client *client;
const struct attribute_group *groups[3];
+ struct thermal_cooling_device *cooling_dev;
struct mutex update_lock;
int nr_fans;
char valid; /* zero until following fields are valid */
@@ -125,6 +127,7 @@ struct max6650_data {
u8 count;
u8 dac;
u8 alarm;
+ unsigned long cooling_dev_state;
};
static const u8 tach_reg[] = {
@@ -134,7 +137,7 @@ static const u8 tach_reg[] = {
MAX6650_REG_TACH3,
};
-static const struct of_device_id max6650_dt_match[] = {
+static const struct of_device_id __maybe_unused max6650_dt_match[] = {
{
.compatible = "maxim,max6650",
.data = (void *)1
@@ -694,6 +697,63 @@ static int max6650_init_client(struct max6650_data *data,
return 0;
}
+#if IS_ENABLED(CONFIG_THERMAL)
+
+static int max6650_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = 255;
+
+ return 0;
+}
+
+static int max6650_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct max6650_data *data = cdev->devdata;
+
+ *state = data->cooling_dev_state;
+
+ return 0;
+}
+
+static int max6650_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct max6650_data *data = cdev->devdata;
+ struct i2c_client *client = data->client;
+ int err;
+
+ state = clamp_val(state, 0, 255);
+
+ mutex_lock(&data->update_lock);
+
+ if (data->config & MAX6650_CFG_V12)
+ data->dac = 180 - (180 * state)/255;
+ else
+ data->dac = 76 - (76 * state)/255;
+
+ err = i2c_smbus_write_byte_data(client, MAX6650_REG_DAC, data->dac);
+
+ if (!err) {
+ max6650_set_operating_mode(data, state ?
+ MAX6650_CFG_MODE_OPEN_LOOP :
+ MAX6650_CFG_MODE_OFF);
+ data->cooling_dev_state = state;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return err < 0 ? err : 0;
+}
+
+static const struct thermal_cooling_device_ops max6650_cooling_ops = {
+ .get_max_state = max6650_get_max_state,
+ .get_cur_state = max6650_get_cur_state,
+ .set_cur_state = max6650_set_cur_state,
+};
+#endif
+
static int max6650_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -709,6 +769,7 @@ static int max6650_probe(struct i2c_client *client,
return -ENOMEM;
data->client = client;
+ i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
data->nr_fans = of_id ? (int)(uintptr_t)of_id->data : id->driver_data;
@@ -727,7 +788,31 @@ static int max6650_probe(struct i2c_client *client,
hwmon_dev = devm_hwmon_device_register_with_groups(dev,
client->name, data,
data->groups);
- return PTR_ERR_OR_ZERO(hwmon_dev);
+ err = PTR_ERR_OR_ZERO(hwmon_dev);
+ if (err)
+ return err;
+
+#if IS_ENABLED(CONFIG_THERMAL)
+ data->cooling_dev =
+ thermal_of_cooling_device_register(client->dev.of_node,
+ client->name, data,
+ &max6650_cooling_ops);
+ if (IS_ERR(data->cooling_dev))
+ dev_warn(&client->dev,
+ "thermal cooling device register failed: %ld\n",
+ PTR_ERR(data->cooling_dev));
+#endif
+ return 0;
+}
+
+static int max6650_remove(struct i2c_client *client)
+{
+ struct max6650_data *data = i2c_get_clientdata(client);
+
+ if (!IS_ERR(data->cooling_dev))
+ thermal_cooling_device_unregister(data->cooling_dev);
+
+ return 0;
}
static const struct i2c_device_id max6650_id[] = {
@@ -743,6 +828,7 @@ static struct i2c_driver max6650_driver = {
.of_match_table = of_match_ptr(max6650_dt_match),
},
.probe = max6650_probe,
+ .remove = max6650_remove,
.id_table = max6650_id,
};
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
index da43f7ae3de1..328793eaee3c 100644
--- a/drivers/hwmon/max6697.c
+++ b/drivers/hwmon/max6697.c
@@ -650,7 +650,7 @@ static const struct i2c_device_id max6697_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max6697_id);
-static const struct of_device_id max6697_of_match[] = {
+static const struct of_device_id __maybe_unused max6697_of_match[] = {
{
.compatible = "maxim,max6581",
.data = (void *)max6581
diff --git a/drivers/hwmon/menf21bmc_hwmon.c b/drivers/hwmon/menf21bmc_hwmon.c
index c29a4c3c6b9e..f540f938fcd9 100644
--- a/drivers/hwmon/menf21bmc_hwmon.c
+++ b/drivers/hwmon/menf21bmc_hwmon.c
@@ -101,7 +101,7 @@ static int menf21bmc_hwmon_get_volt_limits(struct menf21bmc_hwmon *drv_data)
}
static ssize_t
-show_label(struct device *dev, struct device_attribute *devattr, char *buf)
+label_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
@@ -109,7 +109,7 @@ show_label(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-show_in(struct device *dev, struct device_attribute *devattr, char *buf)
+in_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct menf21bmc_hwmon *drv_data = menf21bmc_hwmon_update(dev);
@@ -121,7 +121,7 @@ show_in(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-show_min(struct device *dev, struct device_attribute *devattr, char *buf)
+min_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct menf21bmc_hwmon *drv_data = dev_get_drvdata(dev);
@@ -130,7 +130,7 @@ show_min(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-show_max(struct device *dev, struct device_attribute *devattr, char *buf)
+max_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct menf21bmc_hwmon *drv_data = dev_get_drvdata(dev);
@@ -138,21 +138,26 @@ show_max(struct device *dev, struct device_attribute *devattr, char *buf)
return sprintf(buf, "%d\n", drv_data->in_max[attr->index]);
}
-#define create_voltage_sysfs(idx) \
-static SENSOR_DEVICE_ATTR(in##idx##_input, S_IRUGO, \
- show_in, NULL, idx); \
-static SENSOR_DEVICE_ATTR(in##idx##_min, S_IRUGO, \
- show_min, NULL, idx); \
-static SENSOR_DEVICE_ATTR(in##idx##_max, S_IRUGO, \
- show_max, NULL, idx); \
-static SENSOR_DEVICE_ATTR(in##idx##_label, S_IRUGO, \
- show_label, NULL, idx);
-
-create_voltage_sysfs(0);
-create_voltage_sysfs(1);
-create_voltage_sysfs(2);
-create_voltage_sysfs(3);
-create_voltage_sysfs(4);
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RO(in0_min, min, 0);
+static SENSOR_DEVICE_ATTR_RO(in0_max, max, 0);
+static SENSOR_DEVICE_ATTR_RO(in0_label, label, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RO(in1_min, min, 1);
+static SENSOR_DEVICE_ATTR_RO(in1_max, max, 1);
+static SENSOR_DEVICE_ATTR_RO(in1_label, label, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RO(in2_min, min, 2);
+static SENSOR_DEVICE_ATTR_RO(in2_max, max, 2);
+static SENSOR_DEVICE_ATTR_RO(in2_label, label, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RO(in3_min, min, 3);
+static SENSOR_DEVICE_ATTR_RO(in3_max, max, 3);
+static SENSOR_DEVICE_ATTR_RO(in3_label, label, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RO(in4_min, min, 4);
+static SENSOR_DEVICE_ATTR_RO(in4_max, max, 4);
+static SENSOR_DEVICE_ATTR_RO(in4_label, label, 4);
static struct attribute *menf21bmc_hwmon_attrs[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
index db8c6de0b6a0..ed8d59d4eecb 100644
--- a/drivers/hwmon/mlxreg-fan.c
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -27,7 +27,9 @@
#define MLXREG_FAN_SPEED_MAX (MLXREG_FAN_MAX_STATE * 2)
#define MLXREG_FAN_SPEED_MIN_LEVEL 2 /* 20 percent */
#define MLXREG_FAN_TACHO_SAMPLES_PER_PULSE_DEF 44
-#define MLXREG_FAN_TACHO_DIVIDER_DEF 1132
+#define MLXREG_FAN_TACHO_DIV_MIN 283
+#define MLXREG_FAN_TACHO_DIV_DEF (MLXREG_FAN_TACHO_DIV_MIN * 4)
+#define MLXREG_FAN_TACHO_DIV_SCALE_MAX 64
/*
* FAN datasheet defines the formula for RPM calculations as RPM = 15/t-high.
* The logic in a programmable device measures the time t-high by sampling the
@@ -227,40 +229,22 @@ mlxreg_fan_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
return 0;
}
-static const u32 mlxreg_fan_hwmon_fan_config[] = {
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- HWMON_F_INPUT | HWMON_F_FAULT,
- 0
-};
-
-static const struct hwmon_channel_info mlxreg_fan_hwmon_fan = {
- .type = hwmon_fan,
- .config = mlxreg_fan_hwmon_fan_config,
-};
-
-static const u32 mlxreg_fan_hwmon_pwm_config[] = {
- HWMON_PWM_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info mlxreg_fan_hwmon_pwm = {
- .type = hwmon_pwm,
- .config = mlxreg_fan_hwmon_pwm_config,
-};
-
static const struct hwmon_channel_info *mlxreg_fan_hwmon_info[] = {
- &mlxreg_fan_hwmon_fan,
- &mlxreg_fan_hwmon_pwm,
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT),
NULL
};
@@ -360,15 +344,57 @@ static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
.set_cur_state = mlxreg_fan_set_cur_state,
};
+static int mlxreg_fan_connect_verify(struct mlxreg_fan *fan,
+ struct mlxreg_core_data *data)
+{
+ u32 regval;
+ int err;
+
+ err = regmap_read(fan->regmap, data->capability, &regval);
+ if (err) {
+ dev_err(fan->dev, "Failed to query capability register 0x%08x\n",
+ data->capability);
+ return err;
+ }
+
+ return !!(regval & data->bit);
+}
+
+static int mlxreg_fan_speed_divider_get(struct mlxreg_fan *fan,
+ struct mlxreg_core_data *data)
+{
+ u32 regval;
+ int err;
+
+ err = regmap_read(fan->regmap, data->capability, &regval);
+ if (err) {
+ dev_err(fan->dev, "Failed to query capability register 0x%08x\n",
+ data->capability);
+ return err;
+ }
+
+ /*
+ * Set divider value according to the capability register, in case it
+ * contains valid value. Otherwise use default value. The purpose of
+ * this validation is to protect against the old hardware, in which
+ * this register can return zero.
+ */
+ if (regval > 0 && regval <= MLXREG_FAN_TACHO_DIV_SCALE_MAX)
+ fan->divider = regval * MLXREG_FAN_TACHO_DIV_MIN;
+
+ return 0;
+}
+
static int mlxreg_fan_config(struct mlxreg_fan *fan,
struct mlxreg_core_platform_data *pdata)
{
struct mlxreg_core_data *data = pdata->data;
bool configured = false;
int tacho_num = 0, i;
+ int err;
fan->samples = MLXREG_FAN_TACHO_SAMPLES_PER_PULSE_DEF;
- fan->divider = MLXREG_FAN_TACHO_DIVIDER_DEF;
+ fan->divider = MLXREG_FAN_TACHO_DIV_DEF;
for (i = 0; i < pdata->counter; i++, data++) {
if (strnstr(data->label, "tacho", sizeof(data->label))) {
if (tacho_num == MLXREG_FAN_MAX_TACHO) {
@@ -376,6 +402,17 @@ static int mlxreg_fan_config(struct mlxreg_fan *fan,
data->label);
return -EINVAL;
}
+
+ if (data->capability) {
+ err = mlxreg_fan_connect_verify(fan, data);
+ if (err < 0)
+ return err;
+ else if (!err) {
+ tacho_num++;
+ continue;
+ }
+ }
+
fan->tacho[tacho_num].reg = data->reg;
fan->tacho[tacho_num].mask = data->mask;
fan->tacho[tacho_num++].connected = true;
@@ -394,13 +431,21 @@ static int mlxreg_fan_config(struct mlxreg_fan *fan,
return -EINVAL;
}
/* Validate that conf parameters are not zeros. */
- if (!data->mask || !data->bit) {
+ if (!data->mask && !data->bit && !data->capability) {
dev_err(fan->dev, "invalid conf entry params: %s\n",
data->label);
return -EINVAL;
}
- fan->samples = data->mask;
- fan->divider = data->bit;
+ if (data->capability) {
+ err = mlxreg_fan_speed_divider_get(fan, data);
+ if (err)
+ return err;
+ } else {
+ if (data->mask)
+ fan->samples = data->mask;
+ if (data->bit)
+ fan->divider = data->bit;
+ }
configured = true;
} else {
dev_err(fan->dev, "invalid label: %s\n", data->label);
@@ -420,42 +465,42 @@ static int mlxreg_fan_config(struct mlxreg_fan *fan,
static int mlxreg_fan_probe(struct platform_device *pdev)
{
struct mlxreg_core_platform_data *pdata;
+ struct device *dev = &pdev->dev;
struct mlxreg_fan *fan;
struct device *hwm;
int err;
- pdata = dev_get_platdata(&pdev->dev);
+ pdata = dev_get_platdata(dev);
if (!pdata) {
- dev_err(&pdev->dev, "Failed to get platform data.\n");
+ dev_err(dev, "Failed to get platform data.\n");
return -EINVAL;
}
- fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
+ fan = devm_kzalloc(dev, sizeof(*fan), GFP_KERNEL);
if (!fan)
return -ENOMEM;
- fan->dev = &pdev->dev;
+ fan->dev = dev;
fan->regmap = pdata->regmap;
- platform_set_drvdata(pdev, fan);
err = mlxreg_fan_config(fan, pdata);
if (err)
return err;
- hwm = devm_hwmon_device_register_with_info(&pdev->dev, "mlxreg_fan",
+ hwm = devm_hwmon_device_register_with_info(dev, "mlxreg_fan",
fan,
&mlxreg_fan_hwmon_chip_info,
NULL);
if (IS_ERR(hwm)) {
- dev_err(&pdev->dev, "Failed to register hwmon device\n");
+ dev_err(dev, "Failed to register hwmon device\n");
return PTR_ERR(hwm);
}
if (IS_REACHABLE(CONFIG_THERMAL)) {
- fan->cdev = thermal_cooling_device_register("mlxreg_fan", fan,
- &mlxreg_fan_cooling_ops);
+ fan->cdev = devm_thermal_of_cooling_device_register(dev,
+ NULL, "mlxreg_fan", fan, &mlxreg_fan_cooling_ops);
if (IS_ERR(fan->cdev)) {
- dev_err(&pdev->dev, "Failed to register cooling device\n");
+ dev_err(dev, "Failed to register cooling device\n");
return PTR_ERR(fan->cdev);
}
}
@@ -463,22 +508,11 @@ static int mlxreg_fan_probe(struct platform_device *pdev)
return 0;
}
-static int mlxreg_fan_remove(struct platform_device *pdev)
-{
- struct mlxreg_fan *fan = platform_get_drvdata(pdev);
-
- if (IS_REACHABLE(CONFIG_THERMAL))
- thermal_cooling_device_unregister(fan->cdev);
-
- return 0;
-}
-
static struct platform_driver mlxreg_fan_driver = {
.driver = {
.name = "mlxreg-fan",
},
.probe = mlxreg_fan_probe,
- .remove = mlxreg_fan_remove,
};
module_platform_driver(mlxreg_fan_driver);
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index 82c7de7b4639..04516789b070 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -400,89 +400,53 @@ static int nct7904_detect(struct i2c_client *client,
return 0;
}
-static const u32 nct7904_in_config[] = {
- HWMON_I_INPUT, /* dummy, skipped in is_visible */
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- HWMON_I_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info nct7904_in = {
- .type = hwmon_in,
- .config = nct7904_in_config,
-};
-
-static const u32 nct7904_fan_config[] = {
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info nct7904_fan = {
- .type = hwmon_fan,
- .config = nct7904_fan_config,
-};
-
-static const u32 nct7904_pwm_config[] = {
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- 0
-};
-
-static const struct hwmon_channel_info nct7904_pwm = {
- .type = hwmon_pwm,
- .config = nct7904_pwm_config,
-};
-
-static const u32 nct7904_temp_config[] = {
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- HWMON_T_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info nct7904_temp = {
- .type = hwmon_temp,
- .config = nct7904_temp_config,
-};
-
static const struct hwmon_channel_info *nct7904_info[] = {
- &nct7904_in,
- &nct7904_fan,
- &nct7904_pwm,
- &nct7904_temp,
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT, /* dummy, skipped in is_visible */
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT),
NULL
};
diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c
index b3b907bdfb63..09aaefa6fdb8 100644
--- a/drivers/hwmon/npcm750-pwm-fan.c
+++ b/drivers/hwmon/npcm750-pwm-fan.c
@@ -629,51 +629,33 @@ static umode_t npcm7xx_is_visible(const void *data,
}
}
-static const u32 npcm7xx_pwm_config[] = {
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info npcm7xx_pwm = {
- .type = hwmon_pwm,
- .config = npcm7xx_pwm_config,
-};
-
-static const u32 npcm7xx_fan_config[] = {
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- HWMON_F_INPUT,
- 0
-};
-
-static const struct hwmon_channel_info npcm7xx_fan = {
- .type = hwmon_fan,
- .config = npcm7xx_fan_config,
-};
-
static const struct hwmon_channel_info *npcm7xx_info[] = {
- &npcm7xx_pwm,
- &npcm7xx_fan,
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT,
+ HWMON_F_INPUT),
NULL
};
@@ -864,10 +846,8 @@ static int npcm7xx_create_pwm_cooling(struct device *dev,
snprintf(cdev->name, THERMAL_NAME_LENGTH, "%pOFn%d", child,
pwm_port);
- cdev->tcdev = thermal_of_cooling_device_register(child,
- cdev->name,
- cdev,
- &npcm7xx_pwm_cool_ops);
+ cdev->tcdev = devm_thermal_of_cooling_device_register(dev, child,
+ cdev->name, cdev, &npcm7xx_pwm_cool_ops);
if (IS_ERR(cdev->tcdev))
return PTR_ERR(cdev->tcdev);
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index f9abeeeead9e..fd47c36a52bc 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -629,29 +629,9 @@ static umode_t ntc_is_visible(const void *data, enum hwmon_sensor_types type,
return 0;
}
-static const u32 ntc_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info ntc_chip = {
- .type = hwmon_chip,
- .config = ntc_chip_config,
-};
-
-static const u32 ntc_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_TYPE,
- 0
-};
-
-static const struct hwmon_channel_info ntc_temp = {
- .type = hwmon_temp,
- .config = ntc_temp_config,
-};
-
static const struct hwmon_channel_info *ntc_info[] = {
- &ntc_chip,
- &ntc_temp,
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_TYPE),
NULL
};
diff --git a/drivers/hwmon/occ/Kconfig b/drivers/hwmon/occ/Kconfig
index 66686628fb53..1658634a053e 100644
--- a/drivers/hwmon/occ/Kconfig
+++ b/drivers/hwmon/occ/Kconfig
@@ -5,11 +5,14 @@
config SENSORS_OCC_P8_I2C
tristate "POWER8 OCC through I2C"
depends on I2C
+ depends on ARM || ARM64 || COMPILE_TEST
select SENSORS_OCC
help
This option enables support for monitoring sensors provided by the
- On-Chip Controller (OCC) on a POWER8 processor. Communications with
- the OCC are established through I2C bus.
+ On-Chip Controller (OCC) on a POWER8 processor. However, this driver
+ can only run on a baseboard management controller (BMC) connected to
+ the P8, not the POWER processor itself. Communications with the OCC are
+ established through I2C bus.
This driver can also be built as a module. If so, the module will be
called occ-p8-hwmon.
@@ -17,15 +20,17 @@ config SENSORS_OCC_P8_I2C
config SENSORS_OCC_P9_SBE
tristate "POWER9 OCC through SBE"
depends on FSI_OCC
+ depends on ARM || ARM64 || COMPILE_TEST
select SENSORS_OCC
help
This option enables support for monitoring sensors provided by the
- On-Chip Controller (OCC) on a POWER9 processor. Communications with
- the OCC are established through SBE fifo on an FSI bus.
+ On-Chip Controller (OCC) on a POWER9 processor. However, this driver
+ can only run on a baseboard management controller (BMC) connected to
+ the P9, not the POWER processor itself. Communications with the OCC are
+ established through SBE fifo on an FSI bus.
This driver can also be built as a module. If so, the module will be
called occ-p9-hwmon.
config SENSORS_OCC
- bool "POWER On-Chip Controller"
- depends on SENSORS_OCC_P8_I2C || SENSORS_OCC_P9_SBE
+ tristate
diff --git a/drivers/hwmon/occ/Makefile b/drivers/hwmon/occ/Makefile
index 3fec12ddc7e7..493588d5a9d3 100644
--- a/drivers/hwmon/occ/Makefile
+++ b/drivers/hwmon/occ/Makefile
@@ -1,5 +1,7 @@
-occ-p8-hwmon-objs := common.o sysfs.o p8_i2c.o
-occ-p9-hwmon-objs := common.o sysfs.o p9_sbe.o
+occ-hwmon-common-objs := common.o sysfs.o
+occ-p8-hwmon-objs := p8_i2c.o
+occ-p9-hwmon-objs := p9_sbe.o
+obj-$(CONFIG_SENSORS_OCC) += occ-hwmon-common.o
obj-$(CONFIG_SENSORS_OCC_P8_I2C) += occ-p8-hwmon.o
obj-$(CONFIG_SENSORS_OCC_P9_SBE) += occ-p9-hwmon.o
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index 4679acb4918e..13a6290c8d25 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -2,11 +2,13 @@
// Copyright IBM Corp 2019
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/math64.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
#include <asm/unaligned.h>
@@ -139,6 +141,7 @@ static int occ_poll(struct occ *occ)
/* mutex should already be locked if necessary */
rc = occ->send_cmd(occ, cmd);
if (rc) {
+ occ->last_error = rc;
if (occ->error_count++ > OCC_ERROR_COUNT_THRESHOLD)
occ->error = rc;
@@ -147,6 +150,7 @@ static int occ_poll(struct occ *occ)
/* clear error since communication was successful */
occ->error_count = 0;
+ occ->last_error = 0;
occ->error = 0;
/* check for safe state */
@@ -208,6 +212,8 @@ int occ_update_response(struct occ *occ)
if (time_after(jiffies, occ->last_update + OCC_UPDATE_FREQUENCY)) {
rc = occ_poll(occ);
occ->last_update = jiffies;
+ } else {
+ rc = occ->last_error;
}
mutex_unlock(&occ->lock);
@@ -1099,3 +1105,8 @@ int occ_setup(struct occ *occ, const char *name)
return rc;
}
+EXPORT_SYMBOL_GPL(occ_setup);
+
+MODULE_AUTHOR("Eddie James <eajames@linux.ibm.com>");
+MODULE_DESCRIPTION("Common OCC hwmon code");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/occ/common.h b/drivers/hwmon/occ/common.h
index ed2cf4245295..fc13f3c73c47 100644
--- a/drivers/hwmon/occ/common.h
+++ b/drivers/hwmon/occ/common.h
@@ -106,7 +106,8 @@ struct occ {
struct attribute_group group;
const struct attribute_group *groups[2];
- int error; /* latest transfer error */
+ int error; /* final transfer error after retry */
+ int last_error; /* latest transfer error */
unsigned int error_count; /* number of xfr errors observed */
unsigned long last_safe; /* time OCC entered "safe" state */
diff --git a/drivers/hwmon/occ/sysfs.c b/drivers/hwmon/occ/sysfs.c
index fe3d15e416e7..c73be0747e66 100644
--- a/drivers/hwmon/occ/sysfs.c
+++ b/drivers/hwmon/occ/sysfs.c
@@ -3,6 +3,7 @@
#include <linux/bitops.h>
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/hwmon-sysfs.h>
#include <linux/kernel.h>
#include <linux/sysfs.h>
@@ -42,16 +43,16 @@ static ssize_t occ_sysfs_show(struct device *dev,
val = !!(header->status & OCC_STAT_ACTIVE);
break;
case 2:
- val = !!(header->status & OCC_EXT_STAT_DVFS_OT);
+ val = !!(header->ext_status & OCC_EXT_STAT_DVFS_OT);
break;
case 3:
- val = !!(header->status & OCC_EXT_STAT_DVFS_POWER);
+ val = !!(header->ext_status & OCC_EXT_STAT_DVFS_POWER);
break;
case 4:
- val = !!(header->status & OCC_EXT_STAT_MEM_THROTTLE);
+ val = !!(header->ext_status & OCC_EXT_STAT_MEM_THROTTLE);
break;
case 5:
- val = !!(header->status & OCC_EXT_STAT_QUICK_DROP);
+ val = !!(header->ext_status & OCC_EXT_STAT_QUICK_DROP);
break;
case 6:
val = header->occ_state;
@@ -62,9 +63,6 @@ static ssize_t occ_sysfs_show(struct device *dev,
else
val = 1;
break;
- case 8:
- val = occ->error;
- break;
default:
return -EINVAL;
}
@@ -72,6 +70,16 @@ static ssize_t occ_sysfs_show(struct device *dev,
return snprintf(buf, PAGE_SIZE - 1, "%d\n", val);
}
+static ssize_t occ_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct occ *occ = dev_get_drvdata(dev);
+
+ occ_update_response(occ);
+
+ return snprintf(buf, PAGE_SIZE - 1, "%d\n", occ->error);
+}
+
static SENSOR_DEVICE_ATTR(occ_master, 0444, occ_sysfs_show, NULL, 0);
static SENSOR_DEVICE_ATTR(occ_active, 0444, occ_sysfs_show, NULL, 1);
static SENSOR_DEVICE_ATTR(occ_dvfs_overtemp, 0444, occ_sysfs_show, NULL, 2);
@@ -80,7 +88,7 @@ static SENSOR_DEVICE_ATTR(occ_mem_throttle, 0444, occ_sysfs_show, NULL, 4);
static SENSOR_DEVICE_ATTR(occ_quick_pwr_drop, 0444, occ_sysfs_show, NULL, 5);
static SENSOR_DEVICE_ATTR(occ_state, 0444, occ_sysfs_show, NULL, 6);
static SENSOR_DEVICE_ATTR(occs_present, 0444, occ_sysfs_show, NULL, 7);
-static SENSOR_DEVICE_ATTR(occ_error, 0444, occ_sysfs_show, NULL, 8);
+static DEVICE_ATTR_RO(occ_error);
static struct attribute *occ_attributes[] = {
&sensor_dev_attr_occ_master.dev_attr.attr,
@@ -91,7 +99,7 @@ static struct attribute *occ_attributes[] = {
&sensor_dev_attr_occ_quick_pwr_drop.dev_attr.attr,
&sensor_dev_attr_occ_state.dev_attr.attr,
&sensor_dev_attr_occs_present.dev_attr.attr,
- &sensor_dev_attr_occ_error.dev_attr.attr,
+ &dev_attr_occ_error.attr,
NULL
};
@@ -155,7 +163,7 @@ void occ_sysfs_poll_done(struct occ *occ)
}
if (occ->error && occ->error != occ->prev_error) {
- name = sensor_dev_attr_occ_error.dev_attr.attr.name;
+ name = dev_attr_occ_error.attr.name;
sysfs_notify(&occ->bus_dev->kobj, NULL, name);
}
@@ -177,3 +185,4 @@ void occ_shutdown(struct occ *occ)
{
sysfs_remove_group(&occ->bus_dev->kobj, &occ_sysfs);
}
+EXPORT_SYMBOL_GPL(occ_shutdown);
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index d1a3f2040c00..58eee8fa3e6d 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -106,6 +106,13 @@ static const char *logdev_str[2] = { DRVNAME " FMC", DRVNAME " HMC" };
#define LD_IN 1
#define LD_TEMP 1
+static inline int superio_enter(int sioaddr)
+{
+ if (!request_muxed_region(sioaddr, 2, DRVNAME))
+ return -EBUSY;
+ return 0;
+}
+
static inline void superio_outb(int sioaddr, int reg, int val)
{
outb(reg, sioaddr);
@@ -122,6 +129,7 @@ static inline void superio_exit(int sioaddr)
{
outb(0x02, sioaddr);
outb(0x02, sioaddr + 1);
+ release_region(sioaddr, 2);
}
/*
@@ -1195,7 +1203,11 @@ static int __init pc87427_find(int sioaddr, struct pc87427_sio_data *sio_data)
{
u16 val;
u8 cfg, cfg_b;
- int i, err = 0;
+ int i, err;
+
+ err = superio_enter(sioaddr);
+ if (err)
+ return err;
/* Identify device */
val = force_id ? force_id : superio_inb(sioaddr, SIOREG_DEVID);
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 629cb45f8557..7edab7e30eaf 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -54,6 +54,24 @@ config SENSORS_IR35221
This driver can also be built as a module. If so, the module will
be called ir35521.
+config SENSORS_IR38064
+ tristate "Infineon IR38064"
+ help
+ If you say yes here you get hardware monitoring support for Infineon
+ IR38064.
+
+ This driver can also be built as a module. If so, the module will
+ be called ir38064.
+
+config SENSORS_ISL68137
+ tristate "Intersil ISL68137"
+ help
+ If you say yes here you get hardware monitoring support for Intersil
+ ISL68137.
+
+ This driver can also be built as a module. If so, the module will
+ be called isl68137.
+
config SENSORS_LM25066
tristate "National Semiconductor LM25066 and compatibles"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index ea0e39518c21..2219b9300316 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -8,6 +8,8 @@ obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
obj-$(CONFIG_SENSORS_ADM1275) += adm1275.o
obj-$(CONFIG_SENSORS_IBM_CFFPS) += ibm-cffps.o
obj-$(CONFIG_SENSORS_IR35221) += ir35221.o
+obj-$(CONFIG_SENSORS_IR38064) += ir38064.o
+obj-$(CONFIG_SENSORS_ISL68137) += isl68137.o
obj-$(CONFIG_SENSORS_LM25066) += lm25066.o
obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o
obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o
diff --git a/drivers/hwmon/pmbus/ir38064.c b/drivers/hwmon/pmbus/ir38064.c
new file mode 100644
index 000000000000..1820f5077f66
--- /dev/null
+++ b/drivers/hwmon/pmbus/ir38064.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Hardware monitoring driver for Infineon IR38064
+ *
+ * Copyright (c) 2017 Google Inc
+ *
+ * VOUT_MODE is not supported by the device. The driver fakes VOUT linear16
+ * mode with exponent value -8 as direct mode with m=256/b=0/R=0.
+ *
+ * The device supports VOUT_PEAK, IOUT_PEAK, and TEMPERATURE_PEAK, however
+ * this driver does not currently support them.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "pmbus.h"
+
+static struct pmbus_driver_info ir38064_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .m[PSC_VOLTAGE_OUT] = 256,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 0,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_POUT,
+};
+
+static int ir38064_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &ir38064_info);
+}
+
+static const struct i2c_device_id ir38064_id[] = {
+ {"ir38064", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ir38064_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver ir38064_driver = {
+ .driver = {
+ .name = "ir38064",
+ },
+ .probe = ir38064_probe,
+ .remove = pmbus_do_remove,
+ .id_table = ir38064_id,
+};
+
+module_i2c_driver(ir38064_driver);
+
+MODULE_AUTHOR("Maxim Sloyko <maxims@google.com>");
+MODULE_DESCRIPTION("PMBus driver for Infineon IR38064");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c
new file mode 100644
index 000000000000..515596c92fe1
--- /dev/null
+++ b/drivers/hwmon/pmbus/isl68137.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Hardware monitoring driver for Intersil ISL68137
+ *
+ * Copyright (c) 2017 Google Inc
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "pmbus.h"
+
+#define ISL68137_VOUT_AVS 0x30
+
+static ssize_t isl68137_avs_enable_show_page(struct i2c_client *client,
+ int page,
+ char *buf)
+{
+ int val = pmbus_read_byte_data(client, page, PMBUS_OPERATION);
+
+ return sprintf(buf, "%d\n",
+ (val & ISL68137_VOUT_AVS) == ISL68137_VOUT_AVS ? 1 : 0);
+}
+
+static ssize_t isl68137_avs_enable_store_page(struct i2c_client *client,
+ int page,
+ const char *buf, size_t count)
+{
+ int rc, op_val;
+ bool result;
+
+ rc = kstrtobool(buf, &result);
+ if (rc)
+ return rc;
+
+ op_val = result ? ISL68137_VOUT_AVS : 0;
+
+ /*
+ * Writes to VOUT setpoint over AVSBus will persist after the VRM is
+ * switched to PMBus control. Switching back to AVSBus control
+ * restores this persisted setpoint rather than re-initializing to
+ * PMBus VOUT_COMMAND. Writing VOUT_COMMAND first over PMBus before
+ * enabling AVS control is the workaround.
+ */
+ if (op_val == ISL68137_VOUT_AVS) {
+ rc = pmbus_read_word_data(client, page, PMBUS_VOUT_COMMAND);
+ if (rc < 0)
+ return rc;
+
+ rc = pmbus_write_word_data(client, page, PMBUS_VOUT_COMMAND,
+ rc);
+ if (rc < 0)
+ return rc;
+ }
+
+ rc = pmbus_update_byte_data(client, page, PMBUS_OPERATION,
+ ISL68137_VOUT_AVS, op_val);
+
+ return (rc < 0) ? rc : count;
+}
+
+static ssize_t isl68137_avs_enable_show(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+ return isl68137_avs_enable_show_page(client, attr->index, buf);
+}
+
+static ssize_t isl68137_avs_enable_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+ return isl68137_avs_enable_store_page(client, attr->index, buf, count);
+}
+
+static SENSOR_DEVICE_ATTR_RW(avs0_enable, isl68137_avs_enable, 0);
+static SENSOR_DEVICE_ATTR_RW(avs1_enable, isl68137_avs_enable, 1);
+
+static struct attribute *enable_attrs[] = {
+ &sensor_dev_attr_avs0_enable.dev_attr.attr,
+ &sensor_dev_attr_avs1_enable.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group enable_group = {
+ .attrs = enable_attrs,
+};
+
+static const struct attribute_group *attribute_groups[] = {
+ &enable_group,
+ NULL,
+};
+
+static struct pmbus_driver_info isl68137_info = {
+ .pages = 2,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_CURRENT_IN] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_POWER] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_VOLTAGE_IN] = 1,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 3,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .m[PSC_CURRENT_IN] = 1,
+ .b[PSC_CURRENT_IN] = 0,
+ .R[PSC_CURRENT_IN] = 2,
+ .m[PSC_CURRENT_OUT] = 1,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 1,
+ .m[PSC_POWER] = 1,
+ .b[PSC_POWER] = 0,
+ .R[PSC_POWER] = 0,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 0,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN | PMBUS_HAVE_PIN
+ | PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2
+ | PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_TEMP
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_POUT,
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_POUT,
+ .groups = attribute_groups,
+};
+
+static int isl68137_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &isl68137_info);
+}
+
+static const struct i2c_device_id isl68137_id[] = {
+ {"isl68137", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, isl68137_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver isl68137_driver = {
+ .driver = {
+ .name = "isl68137",
+ },
+ .probe = isl68137_probe,
+ .remove = pmbus_do_remove,
+ .id_table = isl68137_id,
+};
+
+module_i2c_driver(isl68137_driver);
+
+MODULE_AUTHOR("Maxim Sloyko <maxims@google.com>");
+MODULE_DESCRIPTION("PMBus driver for Intersil ISL68137");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index 53db78753a0d..715d4ab57516 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -26,6 +26,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/log2.h>
#include "pmbus.h"
enum chips { lm25056, lm25066, lm5064, lm5066, lm5066i };
@@ -39,12 +40,15 @@ enum chips { lm25056, lm25066, lm5064, lm5066, lm5066i };
#define LM25066_CLEAR_PIN_PEAK 0xd6
#define LM25066_DEVICE_SETUP 0xd9
#define LM25066_READ_AVG_VIN 0xdc
+#define LM25066_SAMPLES_FOR_AVG 0xdb
#define LM25066_READ_AVG_VOUT 0xdd
#define LM25066_READ_AVG_IIN 0xde
#define LM25066_READ_AVG_PIN 0xdf
#define LM25066_DEV_SETUP_CL BIT(4) /* Current limit */
+#define LM25066_SAMPLES_FOR_AVG_MAX 4096
+
/* LM25056 only */
#define LM25056_VAUX_OV_WARN_LIMIT 0xe3
@@ -284,6 +288,12 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
case PMBUS_VIRT_RESET_PIN_HISTORY:
ret = 0;
break;
+ case PMBUS_VIRT_SAMPLES:
+ ret = pmbus_read_byte_data(client, 0, LM25066_SAMPLES_FOR_AVG);
+ if (ret < 0)
+ break;
+ ret = 1 << ret;
+ break;
default:
ret = -ENODATA;
break;
@@ -398,6 +408,11 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
case PMBUS_VIRT_RESET_PIN_HISTORY:
ret = pmbus_write_byte(client, 0, LM25066_CLEAR_PIN_PEAK);
break;
+ case PMBUS_VIRT_SAMPLES:
+ word = clamp_val(word, 1, LM25066_SAMPLES_FOR_AVG_MAX);
+ ret = pmbus_write_byte_data(client, 0, LM25066_SAMPLES_FOR_AVG,
+ ilog2(word));
+ break;
default:
ret = -ENODATA;
break;
@@ -438,7 +453,7 @@ static int lm25066_probe(struct i2c_client *client,
info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VMON
| PMBUS_HAVE_PIN | PMBUS_HAVE_IIN | PMBUS_HAVE_STATUS_INPUT
- | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | PMBUS_HAVE_SAMPLES;
if (data->id == lm25056) {
info->func[0] |= PMBUS_HAVE_STATUS_VMON;
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 1d24397d36ec..59f85658313c 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -217,6 +217,20 @@ enum pmbus_regs {
PMBUS_VIRT_PWM_ENABLE_2,
PMBUS_VIRT_PWM_ENABLE_3,
PMBUS_VIRT_PWM_ENABLE_4,
+
+ /* Samples for average
+ *
+ * Drivers wanting to expose functionality for changing the number of
+ * samples used for average values should implement support in
+ * {read,write}_word_data callback for either PMBUS_VIRT_SAMPLES if it
+ * applies to all types of measurements, or any number of specific
+ * PMBUS_VIRT_*_SAMPLES registers to allow for individual control.
+ */
+ PMBUS_VIRT_SAMPLES,
+ PMBUS_VIRT_IN_SAMPLES,
+ PMBUS_VIRT_CURR_SAMPLES,
+ PMBUS_VIRT_POWER_SAMPLES,
+ PMBUS_VIRT_TEMP_SAMPLES,
};
/*
@@ -371,6 +385,7 @@ enum pmbus_sensor_classes {
#define PMBUS_HAVE_STATUS_VMON BIT(19)
#define PMBUS_HAVE_PWM12 BIT(20)
#define PMBUS_HAVE_PWM34 BIT(21)
+#define PMBUS_HAVE_SAMPLES BIT(22)
#define PMBUS_PAGE_VIRTUAL BIT(31)
@@ -417,6 +432,9 @@ struct pmbus_driver_info {
/* Regulator functionality, if supported by this chip driver. */
int num_regulators;
const struct regulator_desc *reg_desc;
+
+ /* custom attributes */
+ const struct attribute_group **groups;
};
/* Regulator ops */
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 2e2b5851139c..32a74b8be6bd 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -103,7 +103,7 @@ struct pmbus_data {
int max_attributes;
int num_attributes;
struct attribute_group group;
- const struct attribute_group *groups[2];
+ const struct attribute_group **groups;
struct dentry *debugfs; /* debugfs device directory */
struct pmbus_sensor *sensors;
@@ -1073,7 +1073,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
name, seq, type);
boolean->s1 = s1;
boolean->s2 = s2;
- pmbus_attr_init(a, boolean->name, S_IRUGO, pmbus_show_boolean, NULL,
+ pmbus_attr_init(a, boolean->name, 0444, pmbus_show_boolean, NULL,
(reg << 16) | mask);
return pmbus_add_attribute(data, &a->dev_attr.attr);
@@ -1107,7 +1107,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
sensor->update = update;
sensor->convert = convert;
pmbus_dev_attr_init(a, sensor->name,
- readonly ? S_IRUGO : S_IRUGO | S_IWUSR,
+ readonly ? 0444 : 0644,
pmbus_show_sensor, pmbus_set_sensor);
if (pmbus_add_attribute(data, &a->attr))
@@ -1139,7 +1139,7 @@ static int pmbus_add_label(struct pmbus_data *data,
snprintf(label->label, sizeof(label->label), "%s%d", lstring,
index);
- pmbus_dev_attr_init(a, label->name, S_IRUGO, pmbus_show_label, NULL);
+ pmbus_dev_attr_init(a, label->name, 0444, pmbus_show_label, NULL);
return pmbus_add_attribute(data, &a->attr);
}
@@ -1901,6 +1901,112 @@ static int pmbus_add_fan_attributes(struct i2c_client *client,
return 0;
}
+struct pmbus_samples_attr {
+ int reg;
+ char *name;
+};
+
+struct pmbus_samples_reg {
+ int page;
+ struct pmbus_samples_attr *attr;
+ struct device_attribute dev_attr;
+};
+
+static struct pmbus_samples_attr pmbus_samples_registers[] = {
+ {
+ .reg = PMBUS_VIRT_SAMPLES,
+ .name = "samples",
+ }, {
+ .reg = PMBUS_VIRT_IN_SAMPLES,
+ .name = "in_samples",
+ }, {
+ .reg = PMBUS_VIRT_CURR_SAMPLES,
+ .name = "curr_samples",
+ }, {
+ .reg = PMBUS_VIRT_POWER_SAMPLES,
+ .name = "power_samples",
+ }, {
+ .reg = PMBUS_VIRT_TEMP_SAMPLES,
+ .name = "temp_samples",
+ }
+};
+
+#define to_samples_reg(x) container_of(x, struct pmbus_samples_reg, dev_attr)
+
+static ssize_t pmbus_show_samples(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ int val;
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ struct pmbus_samples_reg *reg = to_samples_reg(devattr);
+
+ val = _pmbus_read_word_data(client, reg->page, reg->attr->reg);
+ if (val < 0)
+ return val;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t pmbus_set_samples(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ int ret;
+ long val;
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ struct pmbus_samples_reg *reg = to_samples_reg(devattr);
+
+ if (kstrtol(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ ret = _pmbus_write_word_data(client, reg->page, reg->attr->reg, val);
+
+ return ret ? : count;
+}
+
+static int pmbus_add_samples_attr(struct pmbus_data *data, int page,
+ struct pmbus_samples_attr *attr)
+{
+ struct pmbus_samples_reg *reg;
+
+ reg = devm_kzalloc(data->dev, sizeof(*reg), GFP_KERNEL);
+ if (!reg)
+ return -ENOMEM;
+
+ reg->attr = attr;
+ reg->page = page;
+
+ pmbus_dev_attr_init(&reg->dev_attr, attr->name, 0644,
+ pmbus_show_samples, pmbus_set_samples);
+
+ return pmbus_add_attribute(data, &reg->dev_attr.attr);
+}
+
+static int pmbus_add_samples_attributes(struct i2c_client *client,
+ struct pmbus_data *data)
+{
+ const struct pmbus_driver_info *info = data->info;
+ int s;
+
+ if (!(info->func[0] & PMBUS_HAVE_SAMPLES))
+ return 0;
+
+ for (s = 0; s < ARRAY_SIZE(pmbus_samples_registers); s++) {
+ struct pmbus_samples_attr *attr;
+ int ret;
+
+ attr = &pmbus_samples_registers[s];
+ if (!pmbus_check_word_register(client, 0, attr->reg))
+ continue;
+
+ ret = pmbus_add_samples_attr(data, 0, attr);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int pmbus_find_attributes(struct i2c_client *client,
struct pmbus_data *data)
{
@@ -1932,6 +2038,10 @@ static int pmbus_find_attributes(struct i2c_client *client,
/* Fans */
ret = pmbus_add_fan_attributes(client, data);
+ if (ret)
+ return ret;
+
+ ret = pmbus_add_samples_attributes(client, data);
return ret;
}
@@ -2305,6 +2415,7 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
struct device *dev = &client->dev;
const struct pmbus_platform_data *pdata = dev_get_platdata(dev);
struct pmbus_data *data;
+ size_t groups_num = 0;
int ret;
if (!info)
@@ -2319,6 +2430,15 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
if (!data)
return -ENOMEM;
+ if (info->groups)
+ while (info->groups[groups_num])
+ groups_num++;
+
+ data->groups = devm_kcalloc(dev, groups_num + 2, sizeof(void *),
+ GFP_KERNEL);
+ if (!data->groups)
+ return -ENOMEM;
+
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
data->dev = dev;
@@ -2346,6 +2466,7 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
}
data->groups[0] = &data->group;
+ memcpy(data->groups + 1, info->groups, sizeof(void *) * groups_num);
data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
data, data->groups);
if (IS_ERR(data->hwmon_dev)) {
diff --git a/drivers/hwmon/pmbus/tps53679.c b/drivers/hwmon/pmbus/tps53679.c
index 2bc352c5357f..3fd5105ee9ae 100644
--- a/drivers/hwmon/pmbus/tps53679.c
+++ b/drivers/hwmon/pmbus/tps53679.c
@@ -97,7 +97,7 @@ static const struct i2c_device_id tps53679_id[] = {
MODULE_DEVICE_TABLE(i2c, tps53679_id);
-static const struct of_device_id tps53679_of_match[] = {
+static const struct of_device_id __maybe_unused tps53679_of_match[] = {
{.compatible = "ti,tps53679"},
{}
};
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index ae93885fccd8..40a3e3d0e661 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -151,7 +151,7 @@ static const struct i2c_device_id ucd9000_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ucd9000_id);
-static const struct of_device_id ucd9000_of_match[] = {
+static const struct of_device_id __maybe_unused ucd9000_of_match[] = {
{
.compatible = "ti,ucd9000",
.data = (void *)ucd9000
diff --git a/drivers/hwmon/pmbus/ucd9200.c b/drivers/hwmon/pmbus/ucd9200.c
index 3ed94585837a..fec7656700db 100644
--- a/drivers/hwmon/pmbus/ucd9200.c
+++ b/drivers/hwmon/pmbus/ucd9200.c
@@ -47,7 +47,7 @@ static const struct i2c_device_id ucd9200_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ucd9200_id);
-static const struct of_device_id ucd9200_of_match[] = {
+static const struct of_device_id __maybe_unused ucd9200_of_match[] = {
{
.compatible = "ti,cd9200",
.data = (void *)ucd9200
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 167221c7628a..5fb2745f0226 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -18,6 +18,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
@@ -26,6 +27,7 @@
#include <linux/regulator/consumer.h>
#include <linux/sysfs.h>
#include <linux/thermal.h>
+#include <linux/timer.h>
#define MAX_PWM 255
@@ -33,6 +35,14 @@ struct pwm_fan_ctx {
struct mutex lock;
struct pwm_device *pwm;
struct regulator *reg_en;
+
+ int irq;
+ atomic_t pulses;
+ unsigned int rpm;
+ u8 pulses_per_revolution;
+ ktime_t sample_start;
+ struct timer_list rpm_timer;
+
unsigned int pwm_value;
unsigned int pwm_fan_state;
unsigned int pwm_fan_max_state;
@@ -40,6 +50,32 @@ struct pwm_fan_ctx {
struct thermal_cooling_device *cdev;
};
+/* This handler assumes self resetting edge triggered interrupt. */
+static irqreturn_t pulse_handler(int irq, void *dev_id)
+{
+ struct pwm_fan_ctx *ctx = dev_id;
+
+ atomic_inc(&ctx->pulses);
+
+ return IRQ_HANDLED;
+}
+
+static void sample_timer(struct timer_list *t)
+{
+ struct pwm_fan_ctx *ctx = from_timer(ctx, t, rpm_timer);
+ int pulses;
+ u64 tmp;
+
+ pulses = atomic_read(&ctx->pulses);
+ atomic_sub(pulses, &ctx->pulses);
+ tmp = (u64)pulses * ktime_ms_delta(ktime_get(), ctx->sample_start) * 60;
+ do_div(tmp, ctx->pulses_per_revolution * 1000);
+ ctx->rpm = tmp;
+
+ ctx->sample_start = ktime_get();
+ mod_timer(&ctx->rpm_timer, jiffies + HZ);
+}
+
static int __set_pwm(struct pwm_fan_ctx *ctx, unsigned long pwm)
{
unsigned long period;
@@ -100,15 +136,45 @@ static ssize_t pwm_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", ctx->pwm_value);
}
+static ssize_t rpm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", ctx->rpm);
+}
static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, rpm, 0);
static struct attribute *pwm_fan_attrs[] = {
&sensor_dev_attr_pwm1.dev_attr.attr,
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
NULL,
};
-ATTRIBUTE_GROUPS(pwm_fan);
+static umode_t pwm_fan_attrs_visible(struct kobject *kobj, struct attribute *a,
+ int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
+
+ /* Hide fan_input in case no interrupt is available */
+ if (n == 1 && ctx->irq <= 0)
+ return 0;
+
+ return a->mode;
+}
+
+static const struct attribute_group pwm_fan_group = {
+ .attrs = pwm_fan_attrs,
+ .is_visible = pwm_fan_attrs_visible,
+};
+
+static const struct attribute_group *pwm_fan_groups[] = {
+ &pwm_fan_group,
+ NULL,
+};
/* thermal cooling device callbacks */
static int pwm_fan_get_max_state(struct thermal_cooling_device *cdev,
@@ -207,33 +273,51 @@ static int pwm_fan_of_get_cooling_data(struct device *dev,
return 0;
}
+static void pwm_fan_regulator_disable(void *data)
+{
+ regulator_disable(data);
+}
+
+static void pwm_fan_pwm_disable(void *__ctx)
+{
+ struct pwm_fan_ctx *ctx = __ctx;
+ pwm_disable(ctx->pwm);
+ del_timer_sync(&ctx->rpm_timer);
+}
+
static int pwm_fan_probe(struct platform_device *pdev)
{
struct thermal_cooling_device *cdev;
+ struct device *dev = &pdev->dev;
struct pwm_fan_ctx *ctx;
struct device *hwmon;
int ret;
struct pwm_state state = { };
+ u32 ppr = 2;
- ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
mutex_init(&ctx->lock);
- ctx->pwm = devm_of_pwm_get(&pdev->dev, pdev->dev.of_node, NULL);
+ ctx->pwm = devm_of_pwm_get(dev, dev->of_node, NULL);
if (IS_ERR(ctx->pwm)) {
ret = PTR_ERR(ctx->pwm);
if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Could not get PWM: %d\n", ret);
+ dev_err(dev, "Could not get PWM: %d\n", ret);
return ret;
}
platform_set_drvdata(pdev, ctx);
- ctx->reg_en = devm_regulator_get_optional(&pdev->dev, "fan");
+ ctx->irq = platform_get_irq(pdev, 0);
+ if (ctx->irq == -EPROBE_DEFER)
+ return ctx->irq;
+
+ ctx->reg_en = devm_regulator_get_optional(dev, "fan");
if (IS_ERR(ctx->reg_en)) {
if (PTR_ERR(ctx->reg_en) != -ENODEV)
return PTR_ERR(ctx->reg_en);
@@ -242,10 +326,11 @@ static int pwm_fan_probe(struct platform_device *pdev)
} else {
ret = regulator_enable(ctx->reg_en);
if (ret) {
- dev_err(&pdev->dev,
- "Failed to enable fan supply: %d\n", ret);
+ dev_err(dev, "Failed to enable fan supply: %d\n", ret);
return ret;
}
+ devm_add_action_or_reset(dev, pwm_fan_regulator_disable,
+ ctx->reg_en);
}
ctx->pwm_value = MAX_PWM;
@@ -257,62 +342,57 @@ static int pwm_fan_probe(struct platform_device *pdev)
ret = pwm_apply_state(ctx->pwm, &state);
if (ret) {
- dev_err(&pdev->dev, "Failed to configure PWM\n");
- goto err_reg_disable;
+ dev_err(dev, "Failed to configure PWM: %d\n", ret);
+ return ret;
+ }
+ timer_setup(&ctx->rpm_timer, sample_timer, 0);
+ devm_add_action_or_reset(dev, pwm_fan_pwm_disable, ctx);
+
+ of_property_read_u32(dev->of_node, "pulses-per-revolution", &ppr);
+ ctx->pulses_per_revolution = ppr;
+ if (!ctx->pulses_per_revolution) {
+ dev_err(dev, "pulses-per-revolution can't be zero.\n");
+ return -EINVAL;
+ }
+
+ if (ctx->irq > 0) {
+ ret = devm_request_irq(dev, ctx->irq, pulse_handler, 0,
+ pdev->name, ctx);
+ if (ret) {
+ dev_err(dev, "Failed to request interrupt: %d\n", ret);
+ return ret;
+ }
+ ctx->sample_start = ktime_get();
+ mod_timer(&ctx->rpm_timer, jiffies + HZ);
}
- hwmon = devm_hwmon_device_register_with_groups(&pdev->dev, "pwmfan",
+ hwmon = devm_hwmon_device_register_with_groups(dev, "pwmfan",
ctx, pwm_fan_groups);
if (IS_ERR(hwmon)) {
- dev_err(&pdev->dev, "Failed to register hwmon device\n");
- ret = PTR_ERR(hwmon);
- goto err_pwm_disable;
+ dev_err(dev, "Failed to register hwmon device\n");
+ return PTR_ERR(hwmon);
}
- ret = pwm_fan_of_get_cooling_data(&pdev->dev, ctx);
+ ret = pwm_fan_of_get_cooling_data(dev, ctx);
if (ret)
return ret;
ctx->pwm_fan_state = ctx->pwm_fan_max_state;
if (IS_ENABLED(CONFIG_THERMAL)) {
- cdev = thermal_of_cooling_device_register(pdev->dev.of_node,
- "pwm-fan", ctx,
- &pwm_fan_cooling_ops);
+ cdev = devm_thermal_of_cooling_device_register(dev,
+ dev->of_node, "pwm-fan", ctx, &pwm_fan_cooling_ops);
if (IS_ERR(cdev)) {
- dev_err(&pdev->dev,
- "Failed to register pwm-fan as cooling device");
ret = PTR_ERR(cdev);
- goto err_pwm_disable;
+ dev_err(dev,
+ "Failed to register pwm-fan as cooling device: %d\n",
+ ret);
+ return ret;
}
ctx->cdev = cdev;
thermal_cdev_update(cdev);
}
return 0;
-
-err_pwm_disable:
- state.enabled = false;
- pwm_apply_state(ctx->pwm, &state);
-
-err_reg_disable:
- if (ctx->reg_en)
- regulator_disable(ctx->reg_en);
-
- return ret;
-}
-
-static int pwm_fan_remove(struct platform_device *pdev)
-{
- struct pwm_fan_ctx *ctx = platform_get_drvdata(pdev);
-
- thermal_cooling_device_unregister(ctx->cdev);
- if (ctx->pwm_value)
- pwm_disable(ctx->pwm);
-
- if (ctx->reg_en)
- regulator_disable(ctx->reg_en);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -380,7 +460,6 @@ MODULE_DEVICE_TABLE(of, of_pwm_fan_match);
static struct platform_driver pwm_fan_driver = {
.probe = pwm_fan_probe,
- .remove = pwm_fan_remove,
.driver = {
.name = "pwm-fan",
.pm = &pwm_fan_pm,
diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c
index 0d0457245e7d..efe4bb1ff221 100644
--- a/drivers/hwmon/raspberrypi-hwmon.c
+++ b/drivers/hwmon/raspberrypi-hwmon.c
@@ -86,18 +86,9 @@ static umode_t rpi_is_visible(const void *_data, enum hwmon_sensor_types type,
return 0444;
}
-static const u32 rpi_in_config[] = {
- HWMON_I_LCRIT_ALARM,
- 0
-};
-
-static const struct hwmon_channel_info rpi_in = {
- .type = hwmon_in,
- .config = rpi_in_config,
-};
-
static const struct hwmon_channel_info *rpi_info[] = {
- &rpi_in,
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_LCRIT_ALARM),
NULL
};
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index 0c4710d35d16..0d65aa5985e2 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -98,7 +98,7 @@ static int s3c_hwmon_read_ch(struct device *dev,
static ssize_t s3c_hwmon_show_raw(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct s3c_hwmon *adc = platform_get_drvdata(to_platform_device(dev));
+ struct s3c_hwmon *adc = dev_get_drvdata(dev);
struct sensor_device_attribute *sa = to_sensor_dev_attr(attr);
int ret;
@@ -164,7 +164,7 @@ static ssize_t s3c_hwmon_ch_show(struct device *dev,
char *buf)
{
struct sensor_device_attribute *sen_attr = to_sensor_dev_attr(attr);
- struct s3c_hwmon *hwmon = platform_get_drvdata(to_platform_device(dev));
+ struct s3c_hwmon *hwmon = dev_get_drvdata(dev);
struct s3c_hwmon_pdata *pdata = dev_get_platdata(dev);
struct s3c_hwmon_chcfg *cfg;
int ret;
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 39b41e35c2bf..7f4a63959730 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -10,7 +10,7 @@
*
* Copyright (c) 2007 Wouter Horre
*
- * For further information, see the Documentation/hwmon/sht15 file.
+ * For further information, see the Documentation/hwmon/sht15.rst file.
*/
#include <linux/interrupt.h>
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 6d789aab54c9..44451b913292 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -67,7 +67,6 @@
#include <linux/acpi.h>
#include <linux/io.h>
-
/*
* If force_addr is set to anything different from 0, we forcibly enable
* the device at the given address.
@@ -222,7 +221,7 @@ static struct platform_driver sis5595_driver = {
};
/* 4 Voltages */
-static ssize_t show_in(struct device *dev, struct device_attribute *da,
+static ssize_t in_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
@@ -231,7 +230,7 @@ static ssize_t show_in(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in[nr]));
}
-static ssize_t show_in_min(struct device *dev, struct device_attribute *da,
+static ssize_t in_min_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
@@ -240,7 +239,7 @@ static ssize_t show_in_min(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_min[nr]));
}
-static ssize_t show_in_max(struct device *dev, struct device_attribute *da,
+static ssize_t in_max_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
@@ -249,8 +248,8 @@ static ssize_t show_in_max(struct device *dev, struct device_attribute *da,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_max[nr]));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sis5595_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -269,8 +268,8 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *da,
return count;
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sis5595_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -289,19 +288,21 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *da,
return count;
}
-#define show_in_offset(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset);
-
-show_in_offset(0);
-show_in_offset(1);
-show_in_offset(2);
-show_in_offset(3);
-show_in_offset(4);
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
/* Temperature */
static ssize_t temp1_input_show(struct device *dev,
@@ -368,7 +369,7 @@ static DEVICE_ATTR_RW(temp1_max);
static DEVICE_ATTR_RW(temp1_max_hyst);
/* 2 Fans */
-static ssize_t show_fan(struct device *dev, struct device_attribute *da,
+static ssize_t fan_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
@@ -378,7 +379,7 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *da,
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
@@ -388,8 +389,8 @@ static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sis5595_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -408,7 +409,7 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
return count;
}
-static ssize_t show_fan_div(struct device *dev, struct device_attribute *da,
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
@@ -423,8 +424,8 @@ static ssize_t show_fan_div(struct device *dev, struct device_attribute *da,
* least surprise; the user doesn't expect the fan minimum to change just
* because the divisor changed.
*/
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count)
{
struct sis5595_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
@@ -480,16 +481,12 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
return count;
}
-#define show_fan_offset(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- show_fan_div, set_fan_div, offset - 1);
-
-show_fan_offset(1);
-show_fan_offset(2);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
/* Alarms */
static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
@@ -500,21 +497,21 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev, struct device_attribute *da,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sis5595_data *data = sis5595_update_device(dev);
int nr = to_sensor_dev_attr(da)->index;
return sprintf(buf, "%u\n", (data->alarms >> nr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 15);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 15);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 15);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 15);
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -673,7 +670,6 @@ static int sis5595_remove(struct platform_device *pdev)
return 0;
}
-
/* ISA access must be locked explicitly. */
static int sis5595_read_value(struct sis5595_data *data, u8 reg)
{
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index c0775084dde0..60e193f2e970 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -72,14 +72,19 @@ static inline void superio_select(int ld)
superio_outb(0x07, ld);
}
-static inline void superio_enter(void)
+static inline int superio_enter(void)
{
+ if (!request_muxed_region(REG, 2, DRVNAME))
+ return -EBUSY;
+
outb(0x55, REG);
+ return 0;
}
static inline void superio_exit(void)
{
outb(0xAA, REG);
+ release_region(REG, 2);
}
#define SUPERIO_REG_DEVID 0x20
@@ -300,8 +305,12 @@ static int __init smsc47b397_find(void)
u8 id, rev;
char *name;
unsigned short addr;
+ int err;
+
+ err = superio_enter();
+ if (err)
+ return err;
- superio_enter();
id = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
switch (id) {
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index c7b6a425e2c0..5f92eab24c62 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -73,16 +73,21 @@ superio_inb(int reg)
/* logical device for fans is 0x0A */
#define superio_select() superio_outb(0x07, 0x0A)
-static inline void
+static inline int
superio_enter(void)
{
+ if (!request_muxed_region(REG, 2, DRVNAME))
+ return -EBUSY;
+
outb(0x55, REG);
+ return 0;
}
static inline void
superio_exit(void)
{
outb(0xAA, REG);
+ release_region(REG, 2);
}
#define SUPERIO_REG_ACT 0x30
@@ -202,8 +207,8 @@ static struct smsc47m1_data *smsc47m1_update_device(struct device *dev,
return data;
}
-static ssize_t get_fan(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t fan_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = smsc47m1_update_device(dev, 0);
@@ -221,8 +226,8 @@ static ssize_t get_fan(struct device *dev, struct device_attribute
return sprintf(buf, "%d\n", rpm);
}
-static ssize_t get_fan_min(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t fan_min_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = smsc47m1_update_device(dev, 0);
@@ -232,32 +237,32 @@ static ssize_t get_fan_min(struct device *dev, struct device_attribute
return sprintf(buf, "%d\n", rpm);
}
-static ssize_t get_fan_div(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t fan_div_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = smsc47m1_update_device(dev, 0);
return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[attr->index]));
}
-static ssize_t get_fan_alarm(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t fan_alarm_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
int bitnr = to_sensor_dev_attr(devattr)->index;
struct smsc47m1_data *data = smsc47m1_update_device(dev, 0);
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static ssize_t get_pwm(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t pwm_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = smsc47m1_update_device(dev, 0);
return sprintf(buf, "%d\n", PWM_FROM_REG(data->pwm[attr->index]));
}
-static ssize_t get_pwm_en(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static ssize_t pwm_en_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = smsc47m1_update_device(dev, 0);
@@ -271,8 +276,9 @@ static ssize_t alarms_show(struct device *dev,
return sprintf(buf, "%d\n", data->alarms);
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = dev_get_drvdata(dev);
@@ -307,8 +313,9 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute
* of least surprise; the user doesn't expect the fan minimum to change just
* because the divider changed.
*/
-static ssize_t set_fan_div(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = dev_get_drvdata(dev);
@@ -370,8 +377,8 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute
return count;
}
-static ssize_t set_pwm(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
+static ssize_t pwm_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = dev_get_drvdata(dev);
@@ -396,8 +403,9 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute
return count;
}
-static ssize_t set_pwm_en(struct device *dev, struct device_attribute
- *devattr, const char *buf, size_t count)
+static ssize_t pwm_en_store(struct device *dev,
+ struct device_attribute *devattr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47m1_data *data = dev_get_drvdata(dev);
@@ -422,23 +430,24 @@ static ssize_t set_pwm_en(struct device *dev, struct device_attribute
return count;
}
-#define fan_present(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, get_fan, \
- NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- get_fan_min, set_fan_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- get_fan_div, set_fan_div, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_alarm, S_IRUGO, get_fan_alarm, \
- NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(pwm##offset, S_IRUGO | S_IWUSR, \
- get_pwm, set_pwm, offset - 1); \
-static SENSOR_DEVICE_ATTR(pwm##offset##_enable, S_IRUGO | S_IWUSR, \
- get_pwm_en, set_pwm_en, offset - 1)
-
-fan_present(1);
-fan_present(2);
-fan_present(3);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, fan_alarm, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm1_enable, pwm_en, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, fan_alarm, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm2, pwm, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm2_enable, pwm_en, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_input, fan, 2);
+static SENSOR_DEVICE_ATTR_RW(fan3_min, fan_min, 2);
+static SENSOR_DEVICE_ATTR_RW(fan3_div, fan_div, 2);
+static SENSOR_DEVICE_ATTR_RO(fan3_alarm, fan_alarm, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm3, pwm, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm3_enable, pwm_en, 2);
static DEVICE_ATTR_RO(alarms);
@@ -531,8 +540,12 @@ static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data)
{
u8 val;
unsigned short addr;
+ int err;
+
+ err = superio_enter();
+ if (err)
+ return err;
- superio_enter();
val = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
/*
@@ -608,13 +621,14 @@ static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data)
static void smsc47m1_restore(const struct smsc47m1_sio_data *sio_data)
{
if ((sio_data->activate & 0x01) == 0) {
- superio_enter();
- superio_select();
-
- pr_info("Disabling device\n");
- superio_outb(SUPERIO_REG_ACT, sio_data->activate);
-
- superio_exit();
+ if (!superio_enter()) {
+ superio_select();
+ pr_info("Disabling device\n");
+ superio_outb(SUPERIO_REG_ACT, sio_data->activate);
+ superio_exit();
+ } else {
+ pr_warn("Failed to disable device\n");
+ }
}
}
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index 6989408033ec..e5d9222b22f1 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -179,8 +179,8 @@ static struct smsc47m192_data *smsc47m192_update_device(struct device *dev)
}
/* Voltages */
-static ssize_t show_in(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -188,8 +188,8 @@ static ssize_t show_in(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in[nr], nr));
}
-static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -197,8 +197,8 @@ static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_min[nr], nr));
}
-static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_max_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -206,8 +206,8 @@ static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_max[nr], nr));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -228,8 +228,8 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -250,26 +250,34 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
return count;
}
-#define show_in_offset(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset);
-
-show_in_offset(0)
-show_in_offset(1)
-show_in_offset(2)
-show_in_offset(3)
-show_in_offset(4)
-show_in_offset(5)
-show_in_offset(6)
-show_in_offset(7)
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
+static SENSOR_DEVICE_ATTR_RO(in6_input, in, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 6);
+static SENSOR_DEVICE_ATTR_RO(in7_input, in, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_min, in_min, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_max, in_max, 7);
/* Temperatures */
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -277,8 +285,8 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr]));
}
-static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -286,8 +294,8 @@ static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[nr]));
}
-static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -295,8 +303,9 @@ static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[nr]));
}
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -317,8 +326,9 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -339,8 +349,8 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_temp_offset(struct device *dev, struct device_attribute
- *attr, char *buf)
+static ssize_t temp_offset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -348,8 +358,9 @@ static ssize_t show_temp_offset(struct device *dev, struct device_attribute
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_offset[nr]));
}
-static ssize_t set_temp_offset(struct device *dev, struct device_attribute
- *attr, const char *buf, size_t count)
+static ssize_t temp_offset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -385,19 +396,18 @@ static ssize_t set_temp_offset(struct device *dev, struct device_attribute
return count;
}
-#define show_temp_index(index) \
-static SENSOR_DEVICE_ATTR(temp##index##_input, S_IRUGO, \
- show_temp, NULL, index-1); \
-static SENSOR_DEVICE_ATTR(temp##index##_min, S_IRUGO | S_IWUSR, \
- show_temp_min, set_temp_min, index-1); \
-static SENSOR_DEVICE_ATTR(temp##index##_max, S_IRUGO | S_IWUSR, \
- show_temp_max, set_temp_max, index-1); \
-static SENSOR_DEVICE_ATTR(temp##index##_offset, S_IRUGO | S_IWUSR, \
- show_temp_offset, set_temp_offset, index-1);
-
-show_temp_index(1)
-show_temp_index(2)
-show_temp_index(3)
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_offset, temp_offset, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_offset, temp_offset, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_min, temp_min, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_offset, temp_offset, 2);
/* VID */
static ssize_t cpu0_vid_show(struct device *dev,
@@ -434,8 +444,8 @@ static ssize_t vrm_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(vrm);
/* Alarms */
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -443,19 +453,19 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", (data->alarms & nr) ? 1 : 0);
}
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 0x0010);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 0x0020);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 0x0040);
-static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 0x4000);
-static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 0x8000);
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0x0001);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 0x0002);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 0x0004);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 0x0008);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 0x0100);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 0x0200);
-static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 0x0400);
-static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 0x0800);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 0x0010);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 0x0020);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 0x0040);
+static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 0x4000);
+static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 0x8000);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0x0001);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 0x0002);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 0x0004);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 0x0008);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 0x0100);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 0x0200);
+static SENSOR_DEVICE_ATTR_RO(in6_alarm, alarm, 0x0400);
+static SENSOR_DEVICE_ATTR_RO(in7_alarm, alarm, 0x0800);
static struct attribute *smsc47m192_attributes[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
diff --git a/drivers/hwmon/stts751.c b/drivers/hwmon/stts751.c
index 90b60297f2f7..f670796b609a 100644
--- a/drivers/hwmon/stts751.c
+++ b/drivers/hwmon/stts751.c
@@ -85,7 +85,7 @@ static const struct i2c_device_id stts751_id[] = {
{ }
};
-static const struct of_device_id stts751_of_match[] = {
+static const struct of_device_id __maybe_unused stts751_of_match[] = {
{ .compatible = "stts751" },
{ },
};
diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c
index 6a0ee903127e..ae9942331cae 100644
--- a/drivers/hwmon/thmc50.c
+++ b/drivers/hwmon/thmc50.c
@@ -128,16 +128,16 @@ static struct thmc50_data *thmc50_update_device(struct device *dev)
return data;
}
-static ssize_t show_analog_out(struct device *dev,
+static ssize_t analog_out_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct thmc50_data *data = thmc50_update_device(dev);
return sprintf(buf, "%d\n", data->analog_out);
}
-static ssize_t set_analog_out(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t analog_out_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct thmc50_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
@@ -166,14 +166,14 @@ static ssize_t set_analog_out(struct device *dev,
}
/* There is only one PWM mode = DC */
-static ssize_t show_pwm_mode(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t pwm_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "0\n");
}
/* Temperatures */
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
@@ -181,16 +181,17 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", data->temp_input[nr] * 1000);
}
-static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct thmc50_data *data = thmc50_update_device(dev);
return sprintf(buf, "%d\n", data->temp_min[nr] * 1000);
}
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct thmc50_data *data = dev_get_drvdata(dev);
@@ -210,16 +211,17 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct thmc50_data *data = thmc50_update_device(dev);
return sprintf(buf, "%d\n", data->temp_max[nr] * 1000);
}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct thmc50_data *data = dev_get_drvdata(dev);
@@ -239,16 +241,15 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_temp_critical(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t temp_critical_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct thmc50_data *data = thmc50_update_device(dev);
return sprintf(buf, "%d\n", data->temp_critical[nr] * 1000);
}
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
@@ -257,29 +258,27 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", (data->alarms >> index) & 1);
}
-#define temp_reg(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, show_temp, \
- NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \
- show_temp_min, set_temp_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_max, set_temp_max, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_crit, S_IRUGO, \
- show_temp_critical, NULL, offset - 1);
-
-temp_reg(1);
-temp_reg(2);
-temp_reg(3);
-
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 2);
-
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_analog_out,
- set_analog_out, 0);
-static SENSOR_DEVICE_ATTR(pwm1_mode, S_IRUGO, show_pwm_mode, NULL, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_min, temp_min, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RO(temp1_crit, temp_critical, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_min, temp_min, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RO(temp2_crit, temp_critical, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_min, temp_min, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
+static SENSOR_DEVICE_ATTR_RO(temp3_crit, temp_critical, 2);
+
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(temp2_fault, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(temp3_fault, alarm, 2);
+
+static SENSOR_DEVICE_ATTR_RW(pwm1, analog_out, 0);
+static SENSOR_DEVICE_ATTR_RO(pwm1_mode, pwm_mode, 0);
static struct attribute *thmc50_attributes[] = {
&sensor_dev_attr_temp1_max.dev_attr.attr,
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 35523d315f25..f4ee55615dea 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -150,29 +150,11 @@ static umode_t tmp102_is_visible(const void *data, enum hwmon_sensor_types type,
}
}
-static u32 tmp102_chip_config[] = {
- HWMON_C_REGISTER_TZ,
- 0
-};
-
-static const struct hwmon_channel_info tmp102_chip = {
- .type = hwmon_chip,
- .config = tmp102_chip_config,
-};
-
-static u32 tmp102_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST,
- 0
-};
-
-static const struct hwmon_channel_info tmp102_temp = {
- .type = hwmon_temp,
- .config = tmp102_temp_config,
-};
-
static const struct hwmon_channel_info *tmp102_info[] = {
- &tmp102_chip,
- &tmp102_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST),
NULL
};
@@ -321,7 +303,7 @@ static const struct i2c_device_id tmp102_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tmp102_id);
-static const struct of_device_id tmp102_of_match[] = {
+static const struct of_device_id __maybe_unused tmp102_of_match[] = {
{ .compatible = "ti,tmp102" },
{ },
};
diff --git a/drivers/hwmon/tmp103.c b/drivers/hwmon/tmp103.c
index bda0fdc1eb53..a91726d33da8 100644
--- a/drivers/hwmon/tmp103.c
+++ b/drivers/hwmon/tmp103.c
@@ -170,7 +170,7 @@ static const struct i2c_device_id tmp103_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tmp103_id);
-static const struct of_device_id tmp103_of_match[] = {
+static const struct of_device_id __maybe_unused tmp103_of_match[] = {
{ .compatible = "ti,tmp103" },
{ },
};
diff --git a/drivers/hwmon/tmp108.c b/drivers/hwmon/tmp108.c
index 429bfeae4ca8..2447af704424 100644
--- a/drivers/hwmon/tmp108.c
+++ b/drivers/hwmon/tmp108.c
@@ -281,30 +281,13 @@ static umode_t tmp108_is_visible(const void *data, enum hwmon_sensor_types type,
}
}
-static u32 tmp108_chip_config[] = {
- HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
- 0
-};
-
-static const struct hwmon_channel_info tmp108_chip = {
- .type = hwmon_chip,
- .config = tmp108_chip_config,
-};
-
-static u32 tmp108_temp_config[] = {
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_MIN_HYST
- | HWMON_T_MAX_HYST | HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM,
- 0
-};
-
-static const struct hwmon_channel_info tmp108_temp = {
- .type = hwmon_temp,
- .config = tmp108_temp_config,
-};
-
static const struct hwmon_channel_info *tmp108_info[] = {
- &tmp108_chip,
- &tmp108_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN |
+ HWMON_T_MIN_HYST | HWMON_T_MAX_HYST |
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM),
NULL
};
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 2732a71f3b39..5e63010dd3a0 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -70,7 +70,7 @@ static const struct i2c_device_id tmp421_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tmp421_id);
-static const struct of_device_id tmp421_of_match[] = {
+static const struct of_device_id __maybe_unused tmp421_of_match[] = {
{
.compatible = "ti,tmp421",
.data = (void *)2
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index 81f35e3a06b8..259725dec37e 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -47,7 +47,6 @@
#include <linux/acpi.h>
#include <linux/io.h>
-
/*
* If force_addr is set to anything different from 0, we forcibly enable
* the device at the given address.
@@ -355,32 +354,32 @@ static void via686a_init_device(struct via686a_data *data);
/* following are the sysfs callback functions */
/* 7 voltage sensors */
-static ssize_t show_in(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t in_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%ld\n", IN_FROM_REG(data->in[nr], nr));
}
-static ssize_t show_in_min(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t in_min_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%ld\n", IN_FROM_REG(data->in_min[nr], nr));
}
-static ssize_t show_in_max(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t in_max_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%ld\n", IN_FROM_REG(data->in_max[nr], nr));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count) {
+static ssize_t in_min_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count) {
struct via686a_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
@@ -398,8 +397,8 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *da,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count) {
+static ssize_t in_max_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count) {
struct via686a_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
@@ -417,44 +416,48 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *da,
mutex_unlock(&data->update_lock);
return count;
}
-#define show_in_offset(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset);
-show_in_offset(0);
-show_in_offset(1);
-show_in_offset(2);
-show_in_offset(3);
-show_in_offset(4);
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
/* 3 temperatures */
-static ssize_t show_temp(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t temp_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%ld\n", TEMP_FROM_REG10(data->temp[nr]));
}
-static ssize_t show_temp_over(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t temp_over_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%ld\n", TEMP_FROM_REG(data->temp_over[nr]));
}
-static ssize_t show_temp_hyst(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t temp_hyst_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%ld\n", TEMP_FROM_REG(data->temp_hyst[nr]));
}
-static ssize_t set_temp_over(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count) {
+static ssize_t temp_over_store(struct device *dev,
+ struct device_attribute *da, const char *buf,
+ size_t count) {
struct via686a_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
@@ -472,8 +475,9 @@ static ssize_t set_temp_over(struct device *dev, struct device_attribute *da,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count) {
+static ssize_t temp_hyst_store(struct device *dev,
+ struct device_attribute *da, const char *buf,
+ size_t count) {
struct via686a_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
@@ -491,29 +495,28 @@ static ssize_t set_temp_hyst(struct device *dev, struct device_attribute *da,
mutex_unlock(&data->update_lock);
return count;
}
-#define show_temp_offset(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_over, set_temp_over, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO | S_IWUSR, \
- show_temp_hyst, set_temp_hyst, offset - 1);
-show_temp_offset(1);
-show_temp_offset(2);
-show_temp_offset(3);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_over, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max_hyst, temp_hyst, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_over, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max_hyst, temp_hyst, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_over, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max_hyst, temp_hyst, 2);
/* 2 Fans */
-static ssize_t show_fan(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t fan_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr],
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
@@ -521,15 +524,15 @@ static ssize_t show_fan_min(struct device *dev, struct device_attribute *da,
FAN_FROM_REG(data->fan_min[nr],
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_div(struct device *dev, struct device_attribute *da,
- char *buf) {
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *da,
+ char *buf) {
struct via686a_data *data = via686a_update_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr]));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count) {
+static ssize_t fan_min_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count) {
struct via686a_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
@@ -546,8 +549,8 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *da,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
- const char *buf, size_t count) {
+static ssize_t fan_div_store(struct device *dev, struct device_attribute *da,
+ const char *buf, size_t count) {
struct via686a_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
int nr = attr->index;
@@ -568,16 +571,12 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
return count;
}
-#define show_fan_offset(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- show_fan_div, set_fan_div, offset - 1);
-
-show_fan_offset(1);
-show_fan_offset(2);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
/* Alarms */
static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
@@ -589,23 +588,23 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct via686a_data *data = via686a_update_device(dev);
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 15);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 15);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 7);
static ssize_t name_show(struct device *dev, struct device_attribute
*devattr, char *buf)
@@ -676,7 +675,6 @@ static struct platform_driver via686a_driver = {
.remove = via686a_remove,
};
-
/* This is called when the module is loaded */
static int via686a_probe(struct platform_device *pdev)
{
diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c
index 3a6bfa51cb94..95d5e8ec8b7f 100644
--- a/drivers/hwmon/vt1211.c
+++ b/drivers/hwmon/vt1211.c
@@ -226,15 +226,21 @@ static inline void superio_select(int sio_cip, int ldn)
outb(ldn, sio_cip + 1);
}
-static inline void superio_enter(int sio_cip)
+static inline int superio_enter(int sio_cip)
{
+ if (!request_muxed_region(sio_cip, 2, DRVNAME))
+ return -EBUSY;
+
outb(0x87, sio_cip);
outb(0x87, sio_cip);
+
+ return 0;
}
static inline void superio_exit(int sio_cip)
{
outb(0xaa, sio_cip);
+ release_region(sio_cip, 2);
}
/* ---------------------------------------------------------------------
@@ -1282,11 +1288,14 @@ EXIT:
static int __init vt1211_find(int sio_cip, unsigned short *address)
{
- int err = -ENODEV;
+ int err;
int devid;
- superio_enter(sio_cip);
+ err = superio_enter(sio_cip);
+ if (err)
+ return err;
+ err = -ENODEV;
devid = force_id ? force_id : superio_inb(sio_cip, SIO_VT1211_DEVID);
if (devid != SIO_VT1211_ID)
goto EXIT;
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index 367b5eb53fb6..e2f1a80367e2 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -192,8 +192,8 @@ static inline void vt8231_write_value(struct vt8231_data *data, u8 reg,
}
/* following are the sysfs callback functions */
-static ssize_t show_in(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -202,8 +202,8 @@ static ssize_t show_in(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", ((data->in[nr] - 3) * 10000) / 958);
}
-static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -212,8 +212,8 @@ static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", ((data->in_min[nr] - 3) * 10000) / 958);
}
-static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t in_max_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -222,8 +222,8 @@ static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", (((data->in_max[nr] - 3) * 10000) / 958));
}
-static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_min_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -242,8 +242,8 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t in_max_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -330,19 +330,21 @@ static ssize_t in5_max_store(struct device *dev,
return count;
}
-#define define_voltage_sysfs(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
- show_in_min, set_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
- show_in_max, set_in_max, offset)
-
-define_voltage_sysfs(0);
-define_voltage_sysfs(1);
-define_voltage_sysfs(2);
-define_voltage_sysfs(3);
-define_voltage_sysfs(4);
+static SENSOR_DEVICE_ATTR_RO(in0_input, in, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_min, in_min, 0);
+static SENSOR_DEVICE_ATTR_RW(in0_max, in_max, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_input, in, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
static DEVICE_ATTR_RO(in5_input);
static DEVICE_ATTR_RW(in5_min);
@@ -407,8 +409,8 @@ static ssize_t temp1_max_hyst_store(struct device *dev,
return count;
}
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -416,8 +418,8 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr]));
}
-static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_max_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -425,8 +427,8 @@ static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", TEMP_MAXMIN_FROM_REG(data->temp_max[nr]));
}
-static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t temp_min_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -434,8 +436,9 @@ static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", TEMP_MAXMIN_FROM_REG(data->temp_min[nr]));
}
-static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_max_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -453,8 +456,9 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t temp_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -477,27 +481,30 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
* Note that these map the Linux temperature sensor numbering (1-6) to the VIA
* temperature sensor numbering (0-5)
*/
-#define define_temperature_sysfs(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \
- show_temp_max, set_temp_max, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO | S_IWUSR, \
- show_temp_min, set_temp_min, offset - 1)
static DEVICE_ATTR_RO(temp1_input);
static DEVICE_ATTR_RW(temp1_max);
static DEVICE_ATTR_RW(temp1_max_hyst);
-define_temperature_sysfs(2);
-define_temperature_sysfs(3);
-define_temperature_sysfs(4);
-define_temperature_sysfs(5);
-define_temperature_sysfs(6);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max_hyst, temp_min, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max_hyst, temp_min, 2);
+static SENSOR_DEVICE_ATTR_RO(temp4_input, temp, 3);
+static SENSOR_DEVICE_ATTR_RW(temp4_max, temp_max, 3);
+static SENSOR_DEVICE_ATTR_RW(temp4_max_hyst, temp_min, 3);
+static SENSOR_DEVICE_ATTR_RO(temp5_input, temp, 4);
+static SENSOR_DEVICE_ATTR_RW(temp5_max, temp_max, 4);
+static SENSOR_DEVICE_ATTR_RW(temp5_max_hyst, temp_min, 4);
+static SENSOR_DEVICE_ATTR_RO(temp6_input, temp, 5);
+static SENSOR_DEVICE_ATTR_RW(temp6_max, temp_max, 5);
+static SENSOR_DEVICE_ATTR_RW(temp6_max_hyst, temp_min, 5);
/* Fans */
-static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -506,8 +513,8 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_min_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -516,8 +523,8 @@ static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
DIV_FROM_REG(data->fan_div[nr])));
}
-static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t fan_div_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -525,8 +532,9 @@ static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr]));
}
-static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_min_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
@@ -545,8 +553,9 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t fan_div_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct vt8231_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
@@ -593,17 +602,12 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
return count;
}
-
-#define define_fan_sysfs(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \
- show_fan_div, set_fan_div, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, set_fan_min, offset - 1)
-
-define_fan_sysfs(1);
-define_fan_sysfs(2);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
/* Alarms */
static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
@@ -614,27 +618,27 @@ static ssize_t alarms_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(alarms);
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+static ssize_t alarm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct vt8231_data *data = vt8231_update_device(dev);
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp4_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp5_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(temp6_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(temp4_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(temp5_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(temp6_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 7);
static ssize_t name_show(struct device *dev, struct device_attribute
*devattr, char *buf)
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 8ac89d0781cc..7ca53a28c305 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -130,17 +130,23 @@ superio_select(struct w83627hf_sio_data *sio, int ld)
outb(ld, sio->sioaddr + 1);
}
-static inline void
+static inline int
superio_enter(struct w83627hf_sio_data *sio)
{
+ if (!request_muxed_region(sio->sioaddr, 2, DRVNAME))
+ return -EBUSY;
+
outb(0x87, sio->sioaddr);
outb(0x87, sio->sioaddr);
+
+ return 0;
}
static inline void
superio_exit(struct w83627hf_sio_data *sio)
{
outb(0xAA, sio->sioaddr);
+ release_region(sio->sioaddr, 2);
}
#define W627_DEVID 0x52
@@ -396,7 +402,6 @@ struct w83627hf_data {
#endif
};
-
static int w83627hf_probe(struct platform_device *pdev);
static int w83627hf_remove(struct platform_device *pdev);
@@ -482,28 +487,28 @@ static struct platform_driver w83627hf_driver = {
};
static ssize_t
-show_in_input(struct device *dev, struct device_attribute *devattr, char *buf)
+in_input_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in[nr]));
}
static ssize_t
-show_in_min(struct device *dev, struct device_attribute *devattr, char *buf)
+in_min_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in_min[nr]));
}
static ssize_t
-show_in_max(struct device *dev, struct device_attribute *devattr, char *buf)
+in_max_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
return sprintf(buf, "%ld\n", (long)IN_FROM_REG(data->in_max[nr]));
}
static ssize_t
-store_in_min(struct device *dev, struct device_attribute *devattr,
+in_min_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -522,7 +527,7 @@ store_in_min(struct device *dev, struct device_attribute *devattr,
return count;
}
static ssize_t
-store_in_max(struct device *dev, struct device_attribute *devattr,
+in_max_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -540,22 +545,31 @@ store_in_max(struct device *dev, struct device_attribute *devattr,
mutex_unlock(&data->update_lock);
return count;
}
-#define sysfs_vin_decl(offset) \
-static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
- show_in_input, NULL, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO|S_IWUSR, \
- show_in_min, store_in_min, offset); \
-static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO|S_IWUSR, \
- show_in_max, store_in_max, offset);
-
-sysfs_vin_decl(1);
-sysfs_vin_decl(2);
-sysfs_vin_decl(3);
-sysfs_vin_decl(4);
-sysfs_vin_decl(5);
-sysfs_vin_decl(6);
-sysfs_vin_decl(7);
-sysfs_vin_decl(8);
+
+static SENSOR_DEVICE_ATTR_RO(in1_input, in_input, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_min, in_min, 1);
+static SENSOR_DEVICE_ATTR_RW(in1_max, in_max, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_input, in_input, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_min, in_min, 2);
+static SENSOR_DEVICE_ATTR_RW(in2_max, in_max, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_input, in_input, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_min, in_min, 3);
+static SENSOR_DEVICE_ATTR_RW(in3_max, in_max, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_input, in_input, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_min, in_min, 4);
+static SENSOR_DEVICE_ATTR_RW(in4_max, in_max, 4);
+static SENSOR_DEVICE_ATTR_RO(in5_input, in_input, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_min, in_min, 5);
+static SENSOR_DEVICE_ATTR_RW(in5_max, in_max, 5);
+static SENSOR_DEVICE_ATTR_RO(in6_input, in_input, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_min, in_min, 6);
+static SENSOR_DEVICE_ATTR_RW(in6_max, in_max, 6);
+static SENSOR_DEVICE_ATTR_RO(in7_input, in_input, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_min, in_min, 7);
+static SENSOR_DEVICE_ATTR_RW(in7_max, in_max, 7);
+static SENSOR_DEVICE_ATTR_RO(in8_input, in_input, 8);
+static SENSOR_DEVICE_ATTR_RW(in8_min, in_min, 8);
+static SENSOR_DEVICE_ATTR_RW(in8_max, in_max, 8);
/* use a different set of functions for in0 */
static ssize_t show_in_0(struct w83627hf_data *data, char *buf, u8 reg)
@@ -661,7 +675,8 @@ static DEVICE_ATTR_RW(in0_min);
static DEVICE_ATTR_RW(in0_max);
static ssize_t
-show_fan_input(struct device *dev, struct device_attribute *devattr, char *buf)
+fan_input_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -669,7 +684,7 @@ show_fan_input(struct device *dev, struct device_attribute *devattr, char *buf)
(long)DIV_FROM_REG(data->fan_div[nr])));
}
static ssize_t
-show_fan_min(struct device *dev, struct device_attribute *devattr, char *buf)
+fan_min_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -677,7 +692,7 @@ show_fan_min(struct device *dev, struct device_attribute *devattr, char *buf)
(long)DIV_FROM_REG(data->fan_div[nr])));
}
static ssize_t
-store_fan_min(struct device *dev, struct device_attribute *devattr,
+fan_min_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -697,18 +712,16 @@ store_fan_min(struct device *dev, struct device_attribute *devattr,
mutex_unlock(&data->update_lock);
return count;
}
-#define sysfs_fan_decl(offset) \
-static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
- show_fan_input, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \
- show_fan_min, store_fan_min, offset - 1);
-sysfs_fan_decl(1);
-sysfs_fan_decl(2);
-sysfs_fan_decl(3);
+static SENSOR_DEVICE_ATTR_RO(fan1_input, fan_input, 0);
+static SENSOR_DEVICE_ATTR_RW(fan1_min, fan_min, 0);
+static SENSOR_DEVICE_ATTR_RO(fan2_input, fan_input, 1);
+static SENSOR_DEVICE_ATTR_RW(fan2_min, fan_min, 1);
+static SENSOR_DEVICE_ATTR_RO(fan3_input, fan_input, 2);
+static SENSOR_DEVICE_ATTR_RW(fan3_min, fan_min, 2);
static ssize_t
-show_temp(struct device *dev, struct device_attribute *devattr, char *buf)
+temp_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -719,8 +732,7 @@ show_temp(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-show_temp_max(struct device *dev, struct device_attribute *devattr,
- char *buf)
+temp_max_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -731,7 +743,7 @@ show_temp_max(struct device *dev, struct device_attribute *devattr,
}
static ssize_t
-show_temp_max_hyst(struct device *dev, struct device_attribute *devattr,
+temp_max_hyst_show(struct device *dev, struct device_attribute *devattr,
char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -743,7 +755,7 @@ show_temp_max_hyst(struct device *dev, struct device_attribute *devattr,
}
static ssize_t
-store_temp_max(struct device *dev, struct device_attribute *devattr,
+temp_max_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -765,7 +777,7 @@ store_temp_max(struct device *dev, struct device_attribute *devattr,
}
static ssize_t
-store_temp_max_hyst(struct device *dev, struct device_attribute *devattr,
+temp_max_hyst_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -786,17 +798,15 @@ store_temp_max_hyst(struct device *dev, struct device_attribute *devattr,
return count;
}
-#define sysfs_temp_decl(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_input, S_IRUGO, \
- show_temp, NULL, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max, S_IRUGO|S_IWUSR, \
- show_temp_max, store_temp_max, offset - 1); \
-static SENSOR_DEVICE_ATTR(temp##offset##_max_hyst, S_IRUGO|S_IWUSR, \
- show_temp_max_hyst, store_temp_max_hyst, offset - 1);
-
-sysfs_temp_decl(1);
-sysfs_temp_decl(2);
-sysfs_temp_decl(3);
+static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, temp_max, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_max_hyst, temp_max_hyst, 0);
+static SENSOR_DEVICE_ATTR_RO(temp2_input, temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max, temp_max, 1);
+static SENSOR_DEVICE_ATTR_RW(temp2_max_hyst, temp_max_hyst, 1);
+static SENSOR_DEVICE_ATTR_RO(temp3_input, temp, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max, temp_max, 2);
+static SENSOR_DEVICE_ATTR_RW(temp3_max_hyst, temp_max_hyst, 2);
static ssize_t
cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -841,27 +851,27 @@ alarms_show(struct device *dev, struct device_attribute *attr, char *buf)
static DEVICE_ATTR_RO(alarms);
static ssize_t
-show_alarm(struct device *dev, struct device_attribute *attr, char *buf)
+alarm_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83627hf_data *data = w83627hf_update_device(dev);
int bitnr = to_sensor_dev_attr(attr)->index;
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 10);
-static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 16);
-static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 17);
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13);
+static SENSOR_DEVICE_ATTR_RO(in0_alarm, alarm, 0);
+static SENSOR_DEVICE_ATTR_RO(in1_alarm, alarm, 1);
+static SENSOR_DEVICE_ATTR_RO(in2_alarm, alarm, 2);
+static SENSOR_DEVICE_ATTR_RO(in3_alarm, alarm, 3);
+static SENSOR_DEVICE_ATTR_RO(in4_alarm, alarm, 8);
+static SENSOR_DEVICE_ATTR_RO(in5_alarm, alarm, 9);
+static SENSOR_DEVICE_ATTR_RO(in6_alarm, alarm, 10);
+static SENSOR_DEVICE_ATTR_RO(in7_alarm, alarm, 16);
+static SENSOR_DEVICE_ATTR_RO(in8_alarm, alarm, 17);
+static SENSOR_DEVICE_ATTR_RO(fan1_alarm, alarm, 6);
+static SENSOR_DEVICE_ATTR_RO(fan2_alarm, alarm, 7);
+static SENSOR_DEVICE_ATTR_RO(fan3_alarm, alarm, 11);
+static SENSOR_DEVICE_ATTR_RO(temp1_alarm, alarm, 4);
+static SENSOR_DEVICE_ATTR_RO(temp2_alarm, alarm, 5);
+static SENSOR_DEVICE_ATTR_RO(temp3_alarm, alarm, 13);
static ssize_t
beep_mask_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -902,7 +912,7 @@ beep_mask_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(beep_mask);
static ssize_t
-show_beep(struct device *dev, struct device_attribute *attr, char *buf)
+beep_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83627hf_data *data = w83627hf_update_device(dev);
int bitnr = to_sensor_dev_attr(attr)->index;
@@ -910,8 +920,8 @@ show_beep(struct device *dev, struct device_attribute *attr, char *buf)
}
static ssize_t
-store_beep(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+beep_store(struct device *dev, struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct w83627hf_data *data = dev_get_drvdata(dev);
int bitnr = to_sensor_dev_attr(attr)->index;
@@ -959,41 +969,25 @@ store_beep(struct device *dev, struct device_attribute *attr,
return count;
}
-static SENSOR_DEVICE_ATTR(in0_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 0);
-static SENSOR_DEVICE_ATTR(in1_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 1);
-static SENSOR_DEVICE_ATTR(in2_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 2);
-static SENSOR_DEVICE_ATTR(in3_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 3);
-static SENSOR_DEVICE_ATTR(in4_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 8);
-static SENSOR_DEVICE_ATTR(in5_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 9);
-static SENSOR_DEVICE_ATTR(in6_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 10);
-static SENSOR_DEVICE_ATTR(in7_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 16);
-static SENSOR_DEVICE_ATTR(in8_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 17);
-static SENSOR_DEVICE_ATTR(fan1_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 6);
-static SENSOR_DEVICE_ATTR(fan2_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 7);
-static SENSOR_DEVICE_ATTR(fan3_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 11);
-static SENSOR_DEVICE_ATTR(temp1_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 4);
-static SENSOR_DEVICE_ATTR(temp2_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 5);
-static SENSOR_DEVICE_ATTR(temp3_beep, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 13);
-static SENSOR_DEVICE_ATTR(beep_enable, S_IRUGO | S_IWUSR,
- show_beep, store_beep, 15);
+static SENSOR_DEVICE_ATTR_RW(in0_beep, beep, 0);
+static SENSOR_DEVICE_ATTR_RW(in1_beep, beep, 1);
+static SENSOR_DEVICE_ATTR_RW(in2_beep, beep, 2);
+static SENSOR_DEVICE_ATTR_RW(in3_beep, beep, 3);
+static SENSOR_DEVICE_ATTR_RW(in4_beep, beep, 8);
+static SENSOR_DEVICE_ATTR_RW(in5_beep, beep, 9);
+static SENSOR_DEVICE_ATTR_RW(in6_beep, beep, 10);
+static SENSOR_DEVICE_ATTR_RW(in7_beep, beep, 16);
+static SENSOR_DEVICE_ATTR_RW(in8_beep, beep, 17);
+static SENSOR_DEVICE_ATTR_RW(fan1_beep, beep, 6);
+static SENSOR_DEVICE_ATTR_RW(fan2_beep, beep, 7);
+static SENSOR_DEVICE_ATTR_RW(fan3_beep, beep, 11);
+static SENSOR_DEVICE_ATTR_RW(temp1_beep, beep, 4);
+static SENSOR_DEVICE_ATTR_RW(temp2_beep, beep, 5);
+static SENSOR_DEVICE_ATTR_RW(temp3_beep, beep, 13);
+static SENSOR_DEVICE_ATTR_RW(beep_enable, beep, 15);
static ssize_t
-show_fan_div(struct device *dev, struct device_attribute *devattr, char *buf)
+fan_div_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -1007,7 +1001,7 @@ show_fan_div(struct device *dev, struct device_attribute *devattr, char *buf)
* because the divisor changed.
*/
static ssize_t
-store_fan_div(struct device *dev, struct device_attribute *devattr,
+fan_div_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -1047,15 +1041,12 @@ store_fan_div(struct device *dev, struct device_attribute *devattr,
return count;
}
-static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO|S_IWUSR,
- show_fan_div, store_fan_div, 0);
-static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO|S_IWUSR,
- show_fan_div, store_fan_div, 1);
-static SENSOR_DEVICE_ATTR(fan3_div, S_IRUGO|S_IWUSR,
- show_fan_div, store_fan_div, 2);
+static SENSOR_DEVICE_ATTR_RW(fan1_div, fan_div, 0);
+static SENSOR_DEVICE_ATTR_RW(fan2_div, fan_div, 1);
+static SENSOR_DEVICE_ATTR_RW(fan3_div, fan_div, 2);
static ssize_t
-show_pwm(struct device *dev, struct device_attribute *devattr, char *buf)
+pwm_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -1063,7 +1054,7 @@ show_pwm(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-store_pwm(struct device *dev, struct device_attribute *devattr,
+pwm_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -1096,12 +1087,13 @@ store_pwm(struct device *dev, struct device_attribute *devattr,
return count;
}
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 0);
-static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 1);
-static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO|S_IWUSR, show_pwm, store_pwm, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm1, pwm, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2, pwm, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm3, pwm, 2);
static ssize_t
-show_pwm_enable(struct device *dev, struct device_attribute *devattr, char *buf)
+pwm_enable_show(struct device *dev, struct device_attribute *devattr,
+ char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -1109,8 +1101,8 @@ show_pwm_enable(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-store_pwm_enable(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+pwm_enable_store(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = dev_get_drvdata(dev);
@@ -1134,15 +1126,12 @@ store_pwm_enable(struct device *dev, struct device_attribute *devattr,
return count;
}
-static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
- store_pwm_enable, 0);
-static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
- store_pwm_enable, 1);
-static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO|S_IWUSR, show_pwm_enable,
- store_pwm_enable, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm1_enable, pwm_enable, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2_enable, pwm_enable, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm3_enable, pwm_enable, 2);
static ssize_t
-show_pwm_freq(struct device *dev, struct device_attribute *devattr, char *buf)
+pwm_freq_show(struct device *dev, struct device_attribute *devattr, char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
struct w83627hf_data *data = w83627hf_update_device(dev);
@@ -1155,7 +1144,7 @@ show_pwm_freq(struct device *dev, struct device_attribute *devattr, char *buf)
}
static ssize_t
-store_pwm_freq(struct device *dev, struct device_attribute *devattr,
+pwm_freq_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -1186,15 +1175,12 @@ store_pwm_freq(struct device *dev, struct device_attribute *devattr,
return count;
}
-static SENSOR_DEVICE_ATTR(pwm1_freq, S_IRUGO|S_IWUSR,
- show_pwm_freq, store_pwm_freq, 0);
-static SENSOR_DEVICE_ATTR(pwm2_freq, S_IRUGO|S_IWUSR,
- show_pwm_freq, store_pwm_freq, 1);
-static SENSOR_DEVICE_ATTR(pwm3_freq, S_IRUGO|S_IWUSR,
- show_pwm_freq, store_pwm_freq, 2);
+static SENSOR_DEVICE_ATTR_RW(pwm1_freq, pwm_freq, 0);
+static SENSOR_DEVICE_ATTR_RW(pwm2_freq, pwm_freq, 1);
+static SENSOR_DEVICE_ATTR_RW(pwm3_freq, pwm_freq, 2);
static ssize_t
-show_temp_type(struct device *dev, struct device_attribute *devattr,
+temp_type_show(struct device *dev, struct device_attribute *devattr,
char *buf)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -1203,7 +1189,7 @@ show_temp_type(struct device *dev, struct device_attribute *devattr,
}
static ssize_t
-store_temp_type(struct device *dev, struct device_attribute *devattr,
+temp_type_store(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(devattr)->index;
@@ -1258,13 +1244,9 @@ store_temp_type(struct device *dev, struct device_attribute *devattr,
return count;
}
-#define sysfs_temp_type(offset) \
-static SENSOR_DEVICE_ATTR(temp##offset##_type, S_IRUGO | S_IWUSR, \
- show_temp_type, store_temp_type, offset - 1);
-
-sysfs_temp_type(1);
-sysfs_temp_type(2);
-sysfs_temp_type(3);
+static SENSOR_DEVICE_ATTR_RW(temp1_type, temp_type, 0);
+static SENSOR_DEVICE_ATTR_RW(temp2_type, temp_type, 1);
+static SENSOR_DEVICE_ATTR_RW(temp3_type, temp_type, 2);
static ssize_t
name_show(struct device *dev, struct device_attribute *devattr, char *buf)
@@ -1278,7 +1260,7 @@ static DEVICE_ATTR_RO(name);
static int __init w83627hf_find(int sioaddr, unsigned short *addr,
struct w83627hf_sio_data *sio_data)
{
- int err = -ENODEV;
+ int err;
u16 val;
static __initconst char *const names[] = {
@@ -1290,7 +1272,11 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr,
};
sio_data->sioaddr = sioaddr;
- superio_enter(sio_data);
+ err = superio_enter(sio_data);
+ if (err)
+ return err;
+
+ err = -ENODEV;
val = force_id ? force_id : superio_inb(sio_data, DEVID);
switch (val) {
case W627_DEVID:
@@ -1595,7 +1581,6 @@ static int w83627hf_remove(struct platform_device *pdev)
return 0;
}
-
/* Registers 0x50-0x5f are banked */
static inline void w83627hf_set_bank(struct w83627hf_data *data, u16 reg)
{
@@ -1644,9 +1629,21 @@ static int w83627thf_read_gpio5(struct platform_device *pdev)
struct w83627hf_sio_data *sio_data = dev_get_platdata(&pdev->dev);
int res = 0xff, sel;
- superio_enter(sio_data);
+ if (superio_enter(sio_data)) {
+ /*
+ * Some other driver reserved the address space for itself.
+ * We don't want to fail driver instantiation because of that,
+ * so display a warning and keep going.
+ */
+ dev_warn(&pdev->dev,
+ "Can not read VID data: Failed to enable SuperIO access\n");
+ return res;
+ }
+
superio_select(sio_data, W83627HF_LD_GPIO5);
+ res = 0xff;
+
/* Make sure these GPIO pins are enabled */
if (!(superio_inb(sio_data, W83627THF_GPIO5_EN) & (1<<3))) {
dev_dbg(&pdev->dev, "GPIO5 disabled, no VID function\n");
@@ -1677,7 +1674,17 @@ static int w83687thf_read_vid(struct platform_device *pdev)
struct w83627hf_sio_data *sio_data = dev_get_platdata(&pdev->dev);
int res = 0xff;
- superio_enter(sio_data);
+ if (superio_enter(sio_data)) {
+ /*
+ * Some other driver reserved the address space for itself.
+ * We don't want to fail driver instantiation because of that,
+ * so display a warning and keep going.
+ */
+ dev_warn(&pdev->dev,
+ "Can not read VID data: Failed to enable SuperIO access\n");
+ return res;
+ }
+
superio_select(sio_data, W83627HF_LD_HWM);
/* Make sure these GPIO pins are enabled */
diff --git a/drivers/hwmon/w83773g.c b/drivers/hwmon/w83773g.c
index e858093ac806..d4105321e462 100644
--- a/drivers/hwmon/w83773g.c
+++ b/drivers/hwmon/w83773g.c
@@ -44,7 +44,7 @@ static const struct i2c_device_id w83773_id[] = {
MODULE_DEVICE_TABLE(i2c, w83773_id);
-static const struct of_device_id w83773_of_match[] = {
+static const struct of_device_id __maybe_unused w83773_of_match[] = {
{
.compatible = "nuvoton,w83773g"
},
@@ -237,31 +237,13 @@ static umode_t w83773_is_visible(const void *data, enum hwmon_sensor_types type,
return 0;
}
-static const u32 w83773_chip_config[] = {
- HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
- 0
-};
-
-static const struct hwmon_channel_info w83773_chip = {
- .type = hwmon_chip,
- .config = w83773_chip_config,
-};
-
-static const u32 w83773_temp_config[] = {
- HWMON_T_INPUT,
- HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET,
- HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET,
- 0
-};
-
-static const struct hwmon_channel_info w83773_temp = {
- .type = hwmon_temp,
- .config = w83773_temp_config,
-};
-
static const struct hwmon_channel_info *w83773_info[] = {
- &w83773_chip,
- &w83773_temp,
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET),
NULL
};
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 0af0f6283b35..e94ae1bb3cf0 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -1341,7 +1341,7 @@ static int watchdog_open(struct inode *inode, struct file *filp)
/* Store pointer to data into filp's private data */
filp->private_data = data;
- return nonseekable_open(inode, filp);
+ return stream_open(inode, filp);
}
static int watchdog_close(struct inode *inode, struct file *filp)
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index ad34380cac49..18e8d03321d6 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -75,20 +75,13 @@ config CORESIGHT_SOURCE_ETM4X
bool "CoreSight Embedded Trace Macrocell 4.x driver"
depends on ARM64
select CORESIGHT_LINKS_AND_SINKS
+ select PID_IN_CONTEXTIDR
help
This driver provides support for the ETM4.x tracer module, tracing the
instructions that a processor is executing. This is primarily useful
for instruction level tracing. Depending on the implemented version
data tracing may also be available.
-config CORESIGHT_DYNAMIC_REPLICATOR
- bool "CoreSight Programmable Replicator driver"
- depends on CORESIGHT_LINKS_AND_SINKS
- help
- This enables support for dynamic CoreSight replicator link driver.
- The programmable ATB replicator allows independent filtering of the
- trace data based on the traceid.
-
config CORESIGHT_STM
bool "CoreSight System Trace Macrocell driver"
depends on (ARM && !(CPU_32v3 || CPU_32v4 || CPU_32v4T)) || ARM64
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 41870ded51a3..3b435aa42af5 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o \
coresight-etm3x-sysfs.o
obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \
coresight-etm4x-sysfs.o
-obj-$(CONFIG_CORESIGHT_DYNAMIC_REPLICATOR) += coresight-dynamic-replicator.o
obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o
obj-$(CONFIG_CORESIGHT_CATU) += coresight-catu.o
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index 170fbb66bda2..4ea68a3522e9 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -485,12 +485,12 @@ static int catu_disable(struct coresight_device *csdev, void *__unused)
return rc;
}
-const struct coresight_ops_helper catu_helper_ops = {
+static const struct coresight_ops_helper catu_helper_ops = {
.enable = catu_enable,
.disable = catu_disable,
};
-const struct coresight_ops catu_ops = {
+static const struct coresight_ops catu_ops = {
.helper_ops = &catu_helper_ops,
};
@@ -557,8 +557,9 @@ static int catu_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->csdev = coresight_register(&catu_desc);
if (IS_ERR(drvdata->csdev))
ret = PTR_ERR(drvdata->csdev);
+ else
+ pm_runtime_put(&adev->dev);
out:
- pm_runtime_put(&adev->dev);
return ret;
}
diff --git a/drivers/hwtracing/coresight/coresight-catu.h b/drivers/hwtracing/coresight/coresight-catu.h
index 1b281f0dcccc..1d2ad183fd92 100644
--- a/drivers/hwtracing/coresight/coresight-catu.h
+++ b/drivers/hwtracing/coresight/coresight-catu.h
@@ -109,11 +109,6 @@ static inline bool coresight_is_catu_device(struct coresight_device *csdev)
return true;
}
-#ifdef CONFIG_CORESIGHT_CATU
extern const struct etr_buf_operations etr_catu_buf_ops;
-#else
-/* Dummy declaration for the CATU ops */
-static const struct etr_buf_operations etr_catu_buf_ops;
-#endif
#endif
diff --git a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
deleted file mode 100644
index 299667b887fc..000000000000
--- a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
+++ /dev/null
@@ -1,255 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/amba/bus.h>
-#include <linux/clk.h>
-#include <linux/coresight.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-
-#include "coresight-priv.h"
-
-#define REPLICATOR_IDFILTER0 0x000
-#define REPLICATOR_IDFILTER1 0x004
-
-/**
- * struct replicator_state - specifics associated to a replicator component
- * @base: memory mapped base address for this component.
- * @dev: the device entity associated with this component
- * @atclk: optional clock for the core parts of the replicator.
- * @csdev: component vitals needed by the framework
- */
-struct replicator_state {
- void __iomem *base;
- struct device *dev;
- struct clk *atclk;
- struct coresight_device *csdev;
-};
-
-/*
- * replicator_reset : Reset the replicator configuration to sane values.
- */
-static void replicator_reset(struct replicator_state *drvdata)
-{
- CS_UNLOCK(drvdata->base);
-
- if (!coresight_claim_device_unlocked(drvdata->base)) {
- writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
- writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
- coresight_disclaim_device_unlocked(drvdata->base);
- }
-
- CS_LOCK(drvdata->base);
-}
-
-static int replicator_enable(struct coresight_device *csdev, int inport,
- int outport)
-{
- int rc = 0;
- u32 reg;
- struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- switch (outport) {
- case 0:
- reg = REPLICATOR_IDFILTER0;
- break;
- case 1:
- reg = REPLICATOR_IDFILTER1;
- break;
- default:
- WARN_ON(1);
- return -EINVAL;
- }
-
- CS_UNLOCK(drvdata->base);
-
- if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
- (readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
- rc = coresight_claim_device_unlocked(drvdata->base);
-
- /* Ensure that the outport is enabled. */
- if (!rc) {
- writel_relaxed(0x00, drvdata->base + reg);
- dev_dbg(drvdata->dev, "REPLICATOR enabled\n");
- }
-
- CS_LOCK(drvdata->base);
-
- return rc;
-}
-
-static void replicator_disable(struct coresight_device *csdev, int inport,
- int outport)
-{
- u32 reg;
- struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- switch (outport) {
- case 0:
- reg = REPLICATOR_IDFILTER0;
- break;
- case 1:
- reg = REPLICATOR_IDFILTER1;
- break;
- default:
- WARN_ON(1);
- return;
- }
-
- CS_UNLOCK(drvdata->base);
-
- /* disable the flow of ATB data through port */
- writel_relaxed(0xff, drvdata->base + reg);
-
- if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
- (readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
- coresight_disclaim_device_unlocked(drvdata->base);
- CS_LOCK(drvdata->base);
-
- dev_dbg(drvdata->dev, "REPLICATOR disabled\n");
-}
-
-static const struct coresight_ops_link replicator_link_ops = {
- .enable = replicator_enable,
- .disable = replicator_disable,
-};
-
-static const struct coresight_ops replicator_cs_ops = {
- .link_ops = &replicator_link_ops,
-};
-
-#define coresight_replicator_reg(name, offset) \
- coresight_simple_reg32(struct replicator_state, name, offset)
-
-coresight_replicator_reg(idfilter0, REPLICATOR_IDFILTER0);
-coresight_replicator_reg(idfilter1, REPLICATOR_IDFILTER1);
-
-static struct attribute *replicator_mgmt_attrs[] = {
- &dev_attr_idfilter0.attr,
- &dev_attr_idfilter1.attr,
- NULL,
-};
-
-static const struct attribute_group replicator_mgmt_group = {
- .attrs = replicator_mgmt_attrs,
- .name = "mgmt",
-};
-
-static const struct attribute_group *replicator_groups[] = {
- &replicator_mgmt_group,
- NULL,
-};
-
-static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
-{
- int ret;
- struct device *dev = &adev->dev;
- struct resource *res = &adev->res;
- struct coresight_platform_data *pdata = NULL;
- struct replicator_state *drvdata;
- struct coresight_desc desc = { 0 };
- struct device_node *np = adev->dev.of_node;
- void __iomem *base;
-
- if (np) {
- pdata = of_get_coresight_platform_data(dev, np);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
- adev->dev.platform_data = pdata;
- }
-
- drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
- return -ENOMEM;
-
- drvdata->dev = &adev->dev;
- drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
- if (!IS_ERR(drvdata->atclk)) {
- ret = clk_prepare_enable(drvdata->atclk);
- if (ret)
- return ret;
- }
-
- /* Validity for the resource is already checked by the AMBA core */
- base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- drvdata->base = base;
- dev_set_drvdata(dev, drvdata);
- pm_runtime_put(&adev->dev);
-
- desc.type = CORESIGHT_DEV_TYPE_LINK;
- desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
- desc.ops = &replicator_cs_ops;
- desc.pdata = adev->dev.platform_data;
- desc.dev = &adev->dev;
- desc.groups = replicator_groups;
- drvdata->csdev = coresight_register(&desc);
-
- if (!IS_ERR(drvdata->csdev)) {
- replicator_reset(drvdata);
- return 0;
- }
- return PTR_ERR(drvdata->csdev);
-}
-
-#ifdef CONFIG_PM
-static int replicator_runtime_suspend(struct device *dev)
-{
- struct replicator_state *drvdata = dev_get_drvdata(dev);
-
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_disable_unprepare(drvdata->atclk);
-
- return 0;
-}
-
-static int replicator_runtime_resume(struct device *dev)
-{
- struct replicator_state *drvdata = dev_get_drvdata(dev);
-
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_prepare_enable(drvdata->atclk);
-
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops replicator_dev_pm_ops = {
- SET_RUNTIME_PM_OPS(replicator_runtime_suspend,
- replicator_runtime_resume,
- NULL)
-};
-
-static const struct amba_id replicator_ids[] = {
- {
- .id = 0x000bb909,
- .mask = 0x000fffff,
- },
- {
- /* Coresight SoC-600 */
- .id = 0x000bb9ec,
- .mask = 0x000fffff,
- },
- { 0, 0 },
-};
-
-static struct amba_driver replicator_driver = {
- .drv = {
- .name = "coresight-dynamic-replicator",
- .pm = &replicator_dev_pm_ops,
- .suppress_bind_attrs = true,
- },
- .probe = replicator_probe,
- .id_table = replicator_ids,
-};
-builtin_amba_driver(replicator_driver);
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 105782ea64c7..4ee4c80a4354 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -5,6 +5,7 @@
* Description: CoreSight Embedded Trace Buffer driver
*/
+#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -71,6 +72,8 @@
* @miscdev: specifics to handle "/dev/xyz.etb" entry.
* @spinlock: only one at a time pls.
* @reading: synchronise user space access to etb buffer.
+ * @pid: Process ID of the process being monitored by the session
+ * that is using this component.
* @buf: area of memory where ETB buffer content gets sent.
* @mode: this ETB is being used.
* @buffer_depth: size of @buf.
@@ -84,6 +87,7 @@ struct etb_drvdata {
struct miscdevice miscdev;
spinlock_t spinlock;
local_t reading;
+ pid_t pid;
u8 *buf;
u32 mode;
u32 buffer_depth;
@@ -93,17 +97,9 @@ struct etb_drvdata {
static int etb_set_buffer(struct coresight_device *csdev,
struct perf_output_handle *handle);
-static unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
+static inline unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
{
- u32 depth = 0;
-
- pm_runtime_get_sync(drvdata->dev);
-
- /* RO registers don't need locking */
- depth = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG);
-
- pm_runtime_put(drvdata->dev);
- return depth;
+ return readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG);
}
static void __etb_enable_hw(struct etb_drvdata *drvdata)
@@ -159,14 +155,15 @@ static int etb_enable_sysfs(struct coresight_device *csdev)
goto out;
}
- /* Nothing to do, the tracer is already enabled. */
- if (drvdata->mode == CS_MODE_SYSFS)
- goto out;
+ if (drvdata->mode == CS_MODE_DISABLED) {
+ ret = etb_enable_hw(drvdata);
+ if (ret)
+ goto out;
- ret = etb_enable_hw(drvdata);
- if (!ret)
drvdata->mode = CS_MODE_SYSFS;
+ }
+ atomic_inc(csdev->refcnt);
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return ret;
@@ -175,29 +172,52 @@ out:
static int etb_enable_perf(struct coresight_device *csdev, void *data)
{
int ret = 0;
+ pid_t pid;
unsigned long flags;
struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct perf_output_handle *handle = data;
spin_lock_irqsave(&drvdata->spinlock, flags);
- /* No need to continue if the component is already in use. */
- if (drvdata->mode != CS_MODE_DISABLED) {
+ /* No need to continue if the component is already in used by sysFS. */
+ if (drvdata->mode == CS_MODE_SYSFS) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Get a handle on the pid of the process to monitor */
+ pid = task_pid_nr(handle->event->owner);
+
+ if (drvdata->pid != -1 && drvdata->pid != pid) {
ret = -EBUSY;
goto out;
}
/*
+ * No HW configuration is needed if the sink is already in
+ * use for this session.
+ */
+ if (drvdata->pid == pid) {
+ atomic_inc(csdev->refcnt);
+ goto out;
+ }
+
+ /*
* We don't have an internal state to clean up if we fail to setup
* the perf buffer. So we can perform the step before we turn the
* ETB on and leave without cleaning up.
*/
- ret = etb_set_buffer(csdev, (struct perf_output_handle *)data);
+ ret = etb_set_buffer(csdev, handle);
if (ret)
goto out;
ret = etb_enable_hw(drvdata);
- if (!ret)
+ if (!ret) {
+ /* Associate with monitored process. */
+ drvdata->pid = pid;
drvdata->mode = CS_MODE_PERF;
+ atomic_inc(csdev->refcnt);
+ }
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -325,27 +345,35 @@ static void etb_disable_hw(struct etb_drvdata *drvdata)
coresight_disclaim_device(drvdata->base);
}
-static void etb_disable(struct coresight_device *csdev)
+static int etb_disable(struct coresight_device *csdev)
{
struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
unsigned long flags;
spin_lock_irqsave(&drvdata->spinlock, flags);
- /* Disable the ETB only if it needs to */
- if (drvdata->mode != CS_MODE_DISABLED) {
- etb_disable_hw(drvdata);
- drvdata->mode = CS_MODE_DISABLED;
+ if (atomic_dec_return(csdev->refcnt)) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return -EBUSY;
}
+
+ /* Complain if we (somehow) got out of sync */
+ WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
+ etb_disable_hw(drvdata);
+ /* Dissociate from monitored process. */
+ drvdata->pid = -1;
+ drvdata->mode = CS_MODE_DISABLED;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_dbg(drvdata->dev, "ETB disabled\n");
+ return 0;
}
-static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu,
- void **pages, int nr_pages, bool overwrite)
+static void *etb_alloc_buffer(struct coresight_device *csdev,
+ struct perf_event *event, void **pages,
+ int nr_pages, bool overwrite)
{
- int node;
+ int node, cpu = event->cpu;
struct cs_buffers *buf;
if (cpu == -1)
@@ -404,7 +432,7 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
const u32 *barrier;
u32 read_ptr, write_ptr, capacity;
u32 status, read_data;
- unsigned long offset, to_read;
+ unsigned long offset, to_read = 0, flags;
struct cs_buffers *buf = sink_config;
struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -413,6 +441,12 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ /* Don't do anything if another tracer is using this sink */
+ if (atomic_read(csdev->refcnt) != 1)
+ goto out;
+
__etb_disable_hw(drvdata);
CS_UNLOCK(drvdata->base);
@@ -523,6 +557,8 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
}
__etb_enable_hw(drvdata);
CS_LOCK(drvdata->base);
+out:
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
return to_read;
}
@@ -720,7 +756,6 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
spin_lock_init(&drvdata->spinlock);
drvdata->buffer_depth = etb_get_buffer_depth(drvdata);
- pm_runtime_put(&adev->dev);
if (drvdata->buffer_depth & 0x80000000)
return -EINVAL;
@@ -730,6 +765,9 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
if (!drvdata->buf)
return -ENOMEM;
+ /* This device is not associated with a session */
+ drvdata->pid = -1;
+
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
desc.ops = &etb_cs_ops;
@@ -747,6 +785,7 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
if (ret)
goto err_misc_register;
+ pm_runtime_put(&adev->dev);
return 0;
err_misc_register:
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 4d5a2b9f9d6a..3c6294432748 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -29,6 +29,7 @@ static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
/* ETMv3.5/PTM's ETMCR is 'config' */
PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
+PMU_FORMAT_ATTR(contextid, "config:" __stringify(ETM_OPT_CTXTID));
PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK));
/* Sink ID - same for all ETMs */
@@ -36,6 +37,7 @@ PMU_FORMAT_ATTR(sinkid, "config2:0-31");
static struct attribute *etm_config_formats_attr[] = {
&format_attr_cycacc.attr,
+ &format_attr_contextid.attr,
&format_attr_timestamp.attr,
&format_attr_retstack.attr,
&format_attr_sinkid.attr,
@@ -118,23 +120,34 @@ out:
return ret;
}
+static void free_sink_buffer(struct etm_event_data *event_data)
+{
+ int cpu;
+ cpumask_t *mask = &event_data->mask;
+ struct coresight_device *sink;
+
+ if (WARN_ON(cpumask_empty(mask)))
+ return;
+
+ if (!event_data->snk_config)
+ return;
+
+ cpu = cpumask_first(mask);
+ sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu));
+ sink_ops(sink)->free_buffer(event_data->snk_config);
+}
+
static void free_event_data(struct work_struct *work)
{
int cpu;
cpumask_t *mask;
struct etm_event_data *event_data;
- struct coresight_device *sink;
event_data = container_of(work, struct etm_event_data, work);
mask = &event_data->mask;
/* Free the sink buffers, if there are any */
- if (event_data->snk_config && !WARN_ON(cpumask_empty(mask))) {
- cpu = cpumask_first(mask);
- sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu));
- if (sink_ops(sink)->free_buffer)
- sink_ops(sink)->free_buffer(event_data->snk_config);
- }
+ free_sink_buffer(event_data);
for_each_cpu(cpu, mask) {
struct list_head **ppath;
@@ -213,7 +226,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
sink = coresight_get_enabled_sink(true);
}
- if (!sink || !sink_ops(sink)->alloc_buffer)
+ if (!sink)
goto err;
mask = &event_data->mask;
@@ -259,9 +272,12 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
if (cpu >= nr_cpu_ids)
goto err;
+ if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer)
+ goto err;
+
/* Allocate the sink buffer for this session */
event_data->snk_config =
- sink_ops(sink)->alloc_buffer(sink, cpu, pages,
+ sink_ops(sink)->alloc_buffer(sink, event, pages,
nr_pages, overwrite);
if (!event_data->snk_config)
goto err;
@@ -566,7 +582,8 @@ static int __init etm_perf_init(void)
{
int ret;
- etm_pmu.capabilities = PERF_PMU_CAP_EXCLUSIVE;
+ etm_pmu.capabilities = (PERF_PMU_CAP_EXCLUSIVE |
+ PERF_PMU_CAP_ITRACE);
etm_pmu.attr_groups = etm_pmu_attr_groups;
etm_pmu.task_ctx_nr = perf_sw_context;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 08ce37c9475d..8bb0092c7ec2 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -138,8 +138,11 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
drvdata->base + TRCCNTVRn(i));
}
- /* Resource selector pair 0 is always implemented and reserved */
- for (i = 0; i < drvdata->nr_resource * 2; i++)
+ /*
+ * Resource selector pair 0 is always implemented and reserved. As
+ * such start at 2.
+ */
+ for (i = 2; i < drvdata->nr_resource * 2; i++)
writel_relaxed(config->res_ctrl[i],
drvdata->base + TRCRSCTLRn(i));
@@ -201,6 +204,91 @@ static void etm4_enable_hw_smp_call(void *info)
arg->rc = etm4_enable_hw(arg->drvdata);
}
+/*
+ * The goal of function etm4_config_timestamp_event() is to configure a
+ * counter that will tell the tracer to emit a timestamp packet when it
+ * reaches zero. This is done in order to get a more fine grained idea
+ * of when instructions are executed so that they can be correlated
+ * with execution on other CPUs.
+ *
+ * To do this the counter itself is configured to self reload and
+ * TRCRSCTLR1 (always true) used to get the counter to decrement. From
+ * there a resource selector is configured with the counter and the
+ * timestamp control register to use the resource selector to trigger the
+ * event that will insert a timestamp packet in the stream.
+ */
+static int etm4_config_timestamp_event(struct etmv4_drvdata *drvdata)
+{
+ int ctridx, ret = -EINVAL;
+ int counter, rselector;
+ u32 val = 0;
+ struct etmv4_config *config = &drvdata->config;
+
+ /* No point in trying if we don't have at least one counter */
+ if (!drvdata->nr_cntr)
+ goto out;
+
+ /* Find a counter that hasn't been initialised */
+ for (ctridx = 0; ctridx < drvdata->nr_cntr; ctridx++)
+ if (config->cntr_val[ctridx] == 0)
+ break;
+
+ /* All the counters have been configured already, bail out */
+ if (ctridx == drvdata->nr_cntr) {
+ pr_debug("%s: no available counter found\n", __func__);
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ /*
+ * Searching for an available resource selector to use, starting at
+ * '2' since every implementation has at least 2 resource selector.
+ * ETMIDR4 gives the number of resource selector _pairs_,
+ * hence multiply by 2.
+ */
+ for (rselector = 2; rselector < drvdata->nr_resource * 2; rselector++)
+ if (!config->res_ctrl[rselector])
+ break;
+
+ if (rselector == drvdata->nr_resource * 2) {
+ pr_debug("%s: no available resource selector found\n",
+ __func__);
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ /* Remember what counter we used */
+ counter = 1 << ctridx;
+
+ /*
+ * Initialise original and reload counter value to the smallest
+ * possible value in order to get as much precision as we can.
+ */
+ config->cntr_val[ctridx] = 1;
+ config->cntrldvr[ctridx] = 1;
+
+ /* Set the trace counter control register */
+ val = 0x1 << 16 | /* Bit 16, reload counter automatically */
+ 0x0 << 7 | /* Select single resource selector */
+ 0x1; /* Resource selector 1, i.e always true */
+
+ config->cntr_ctrl[ctridx] = val;
+
+ val = 0x2 << 16 | /* Group 0b0010 - Counter and sequencers */
+ counter << 0; /* Counter to use */
+
+ config->res_ctrl[rselector] = val;
+
+ val = 0x0 << 7 | /* Select single resource selector */
+ rselector; /* Resource selector */
+
+ config->ts_ctrl = val;
+
+ ret = 0;
+out:
+ return ret;
+}
+
static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
struct perf_event *event)
{
@@ -236,9 +324,29 @@ static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
/* TRM: Must program this for cycacc to work */
config->ccctlr = ETM_CYC_THRESHOLD_DEFAULT;
}
- if (attr->config & BIT(ETM_OPT_TS))
+ if (attr->config & BIT(ETM_OPT_TS)) {
+ /*
+ * Configure timestamps to be emitted at regular intervals in
+ * order to correlate instructions executed on different CPUs
+ * (CPU-wide trace scenarios).
+ */
+ ret = etm4_config_timestamp_event(drvdata);
+
+ /*
+ * No need to go further if timestamp intervals can't
+ * be configured.
+ */
+ if (ret)
+ goto out;
+
/* bit[11], Global timestamp tracing bit */
config->cfg |= BIT(11);
+ }
+
+ if (attr->config & BIT(ETM_OPT_CTXTID))
+ /* bit[6], Context ID tracing bit */
+ config->cfg |= BIT(ETM4_CFG_BIT_CTXTID);
+
/* return stack - enable if selected and supported */
if ((attr->config & BIT(ETM_OPT_RETSTK)) && drvdata->retstack)
/* bit[12], Return stack enable bit */
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 927925151509..16b0c0e1e43a 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -12,6 +12,8 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/coresight.h>
#include <linux/amba/bus.h>
@@ -43,7 +45,7 @@ struct funnel_drvdata {
unsigned long priority;
};
-static int funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
+static int dynamic_funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
{
u32 functl;
int rc = 0;
@@ -71,17 +73,19 @@ done:
static int funnel_enable(struct coresight_device *csdev, int inport,
int outport)
{
- int rc;
+ int rc = 0;
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- rc = funnel_enable_hw(drvdata, inport);
+ if (drvdata->base)
+ rc = dynamic_funnel_enable_hw(drvdata, inport);
if (!rc)
dev_dbg(drvdata->dev, "FUNNEL inport %d enabled\n", inport);
return rc;
}
-static void funnel_disable_hw(struct funnel_drvdata *drvdata, int inport)
+static void dynamic_funnel_disable_hw(struct funnel_drvdata *drvdata,
+ int inport)
{
u32 functl;
@@ -103,7 +107,8 @@ static void funnel_disable(struct coresight_device *csdev, int inport,
{
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- funnel_disable_hw(drvdata, inport);
+ if (drvdata->base)
+ dynamic_funnel_disable_hw(drvdata, inport);
dev_dbg(drvdata->dev, "FUNNEL inport %d disabled\n", inport);
}
@@ -177,54 +182,70 @@ static struct attribute *coresight_funnel_attrs[] = {
};
ATTRIBUTE_GROUPS(coresight_funnel);
-static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
+static int funnel_probe(struct device *dev, struct resource *res)
{
int ret;
void __iomem *base;
- struct device *dev = &adev->dev;
struct coresight_platform_data *pdata = NULL;
struct funnel_drvdata *drvdata;
- struct resource *res = &adev->res;
struct coresight_desc desc = { 0 };
- struct device_node *np = adev->dev.of_node;
+ struct device_node *np = dev->of_node;
if (np) {
pdata = of_get_coresight_platform_data(dev, np);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
- adev->dev.platform_data = pdata;
+ dev->platform_data = pdata;
}
+ if (of_device_is_compatible(np, "arm,coresight-funnel"))
+ pr_warn_once("Uses OBSOLETE CoreSight funnel binding\n");
+
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- drvdata->dev = &adev->dev;
- drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
+ drvdata->dev = dev;
+ drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */
if (!IS_ERR(drvdata->atclk)) {
ret = clk_prepare_enable(drvdata->atclk);
if (ret)
return ret;
}
- dev_set_drvdata(dev, drvdata);
- /* Validity for the resource is already checked by the AMBA core */
- base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ /*
+ * Map the device base for dynamic-funnel, which has been
+ * validated by AMBA core.
+ */
+ if (res) {
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto out_disable_clk;
+ }
+ drvdata->base = base;
+ desc.groups = coresight_funnel_groups;
+ }
- drvdata->base = base;
- pm_runtime_put(&adev->dev);
+ dev_set_drvdata(dev, drvdata);
desc.type = CORESIGHT_DEV_TYPE_LINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
desc.ops = &funnel_cs_ops;
desc.pdata = pdata;
desc.dev = dev;
- desc.groups = coresight_funnel_groups;
drvdata->csdev = coresight_register(&desc);
+ if (IS_ERR(drvdata->csdev)) {
+ ret = PTR_ERR(drvdata->csdev);
+ goto out_disable_clk;
+ }
+
+ pm_runtime_put(dev);
- return PTR_ERR_OR_ZERO(drvdata->csdev);
+out_disable_clk:
+ if (ret && !IS_ERR_OR_NULL(drvdata->atclk))
+ clk_disable_unprepare(drvdata->atclk);
+ return ret;
}
#ifdef CONFIG_PM
@@ -253,7 +274,48 @@ static const struct dev_pm_ops funnel_dev_pm_ops = {
SET_RUNTIME_PM_OPS(funnel_runtime_suspend, funnel_runtime_resume, NULL)
};
-static const struct amba_id funnel_ids[] = {
+static int static_funnel_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ /* Static funnel do not have programming base */
+ ret = funnel_probe(&pdev->dev, NULL);
+
+ if (ret) {
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ }
+
+ return ret;
+}
+
+static const struct of_device_id static_funnel_match[] = {
+ {.compatible = "arm,coresight-static-funnel"},
+ {}
+};
+
+static struct platform_driver static_funnel_driver = {
+ .probe = static_funnel_probe,
+ .driver = {
+ .name = "coresight-static-funnel",
+ .of_match_table = static_funnel_match,
+ .pm = &funnel_dev_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(static_funnel_driver);
+
+static int dynamic_funnel_probe(struct amba_device *adev,
+ const struct amba_id *id)
+{
+ return funnel_probe(&adev->dev, &adev->res);
+}
+
+static const struct amba_id dynamic_funnel_ids[] = {
{
.id = 0x000bb908,
.mask = 0x000fffff,
@@ -266,14 +328,14 @@ static const struct amba_id funnel_ids[] = {
{ 0, 0},
};
-static struct amba_driver funnel_driver = {
+static struct amba_driver dynamic_funnel_driver = {
.drv = {
- .name = "coresight-funnel",
+ .name = "coresight-dynamic-funnel",
.owner = THIS_MODULE,
.pm = &funnel_dev_pm_ops,
.suppress_bind_attrs = true,
},
- .probe = funnel_probe,
- .id_table = funnel_ids,
+ .probe = dynamic_funnel_probe,
+ .id_table = dynamic_funnel_ids,
};
-builtin_amba_driver(funnel_driver);
+builtin_amba_driver(dynamic_funnel_driver);
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index feac98315471..8c9ce74498e1 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -1,10 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
*
* Description: CoreSight Replicator driver
*/
+#include <linux/amba/bus.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/platform_device.h>
@@ -18,25 +19,117 @@
#include "coresight-priv.h"
+#define REPLICATOR_IDFILTER0 0x000
+#define REPLICATOR_IDFILTER1 0x004
+
/**
* struct replicator_drvdata - specifics associated to a replicator component
+ * @base: memory mapped base address for this component. Also indicates
+ * whether this one is programmable or not.
* @dev: the device entity associated with this component
* @atclk: optional clock for the core parts of the replicator.
* @csdev: component vitals needed by the framework
*/
struct replicator_drvdata {
+ void __iomem *base;
struct device *dev;
struct clk *atclk;
struct coresight_device *csdev;
};
+static void dynamic_replicator_reset(struct replicator_drvdata *drvdata)
+{
+ CS_UNLOCK(drvdata->base);
+
+ if (!coresight_claim_device_unlocked(drvdata->base)) {
+ writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
+ writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
+ coresight_disclaim_device_unlocked(drvdata->base);
+ }
+
+ CS_LOCK(drvdata->base);
+}
+
+/*
+ * replicator_reset : Reset the replicator configuration to sane values.
+ */
+static inline void replicator_reset(struct replicator_drvdata *drvdata)
+{
+ if (drvdata->base)
+ dynamic_replicator_reset(drvdata);
+}
+
+static int dynamic_replicator_enable(struct replicator_drvdata *drvdata,
+ int inport, int outport)
+{
+ int rc = 0;
+ u32 reg;
+
+ switch (outport) {
+ case 0:
+ reg = REPLICATOR_IDFILTER0;
+ break;
+ case 1:
+ reg = REPLICATOR_IDFILTER1;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ CS_UNLOCK(drvdata->base);
+
+ if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
+ (readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
+ rc = coresight_claim_device_unlocked(drvdata->base);
+
+ /* Ensure that the outport is enabled. */
+ if (!rc)
+ writel_relaxed(0x00, drvdata->base + reg);
+ CS_LOCK(drvdata->base);
+
+ return rc;
+}
+
static int replicator_enable(struct coresight_device *csdev, int inport,
int outport)
{
+ int rc = 0;
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- dev_dbg(drvdata->dev, "REPLICATOR enabled\n");
- return 0;
+ if (drvdata->base)
+ rc = dynamic_replicator_enable(drvdata, inport, outport);
+ if (!rc)
+ dev_dbg(drvdata->dev, "REPLICATOR enabled\n");
+ return rc;
+}
+
+static void dynamic_replicator_disable(struct replicator_drvdata *drvdata,
+ int inport, int outport)
+{
+ u32 reg;
+
+ switch (outport) {
+ case 0:
+ reg = REPLICATOR_IDFILTER0;
+ break;
+ case 1:
+ reg = REPLICATOR_IDFILTER1;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ CS_UNLOCK(drvdata->base);
+
+ /* disable the flow of ATB data through port */
+ writel_relaxed(0xff, drvdata->base + reg);
+
+ if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
+ (readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
+ coresight_disclaim_device_unlocked(drvdata->base);
+ CS_LOCK(drvdata->base);
}
static void replicator_disable(struct coresight_device *csdev, int inport,
@@ -44,6 +137,8 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
{
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ if (drvdata->base)
+ dynamic_replicator_disable(drvdata, inport, outport);
dev_dbg(drvdata->dev, "REPLICATOR disabled\n");
}
@@ -56,58 +151,110 @@ static const struct coresight_ops replicator_cs_ops = {
.link_ops = &replicator_link_ops,
};
-static int replicator_probe(struct platform_device *pdev)
+#define coresight_replicator_reg(name, offset) \
+ coresight_simple_reg32(struct replicator_drvdata, name, offset)
+
+coresight_replicator_reg(idfilter0, REPLICATOR_IDFILTER0);
+coresight_replicator_reg(idfilter1, REPLICATOR_IDFILTER1);
+
+static struct attribute *replicator_mgmt_attrs[] = {
+ &dev_attr_idfilter0.attr,
+ &dev_attr_idfilter1.attr,
+ NULL,
+};
+
+static const struct attribute_group replicator_mgmt_group = {
+ .attrs = replicator_mgmt_attrs,
+ .name = "mgmt",
+};
+
+static const struct attribute_group *replicator_groups[] = {
+ &replicator_mgmt_group,
+ NULL,
+};
+
+static int replicator_probe(struct device *dev, struct resource *res)
{
- int ret;
- struct device *dev = &pdev->dev;
+ int ret = 0;
struct coresight_platform_data *pdata = NULL;
struct replicator_drvdata *drvdata;
struct coresight_desc desc = { 0 };
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = dev->of_node;
+ void __iomem *base;
if (np) {
pdata = of_get_coresight_platform_data(dev, np);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
- pdev->dev.platform_data = pdata;
+ dev->platform_data = pdata;
}
+ if (of_device_is_compatible(np, "arm,coresight-replicator"))
+ pr_warn_once("Uses OBSOLETE CoreSight replicator binding\n");
+
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- drvdata->dev = &pdev->dev;
- drvdata->atclk = devm_clk_get(&pdev->dev, "atclk"); /* optional */
+ drvdata->dev = dev;
+ drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */
if (!IS_ERR(drvdata->atclk)) {
ret = clk_prepare_enable(drvdata->atclk);
if (ret)
return ret;
}
- pm_runtime_get_noresume(&pdev->dev);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
- platform_set_drvdata(pdev, drvdata);
+
+ /*
+ * Map the device base for dynamic-replicator, which has been
+ * validated by AMBA core
+ */
+ if (res) {
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto out_disable_clk;
+ }
+ drvdata->base = base;
+ desc.groups = replicator_groups;
+ }
+
+ dev_set_drvdata(dev, drvdata);
desc.type = CORESIGHT_DEV_TYPE_LINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
desc.ops = &replicator_cs_ops;
- desc.pdata = pdev->dev.platform_data;
- desc.dev = &pdev->dev;
+ desc.pdata = dev->platform_data;
+ desc.dev = dev;
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
- goto out_disable_pm;
+ goto out_disable_clk;
}
- pm_runtime_put(&pdev->dev);
-
- return 0;
+ replicator_reset(drvdata);
+ pm_runtime_put(dev);
-out_disable_pm:
- if (!IS_ERR(drvdata->atclk))
+out_disable_clk:
+ if (ret && !IS_ERR_OR_NULL(drvdata->atclk))
clk_disable_unprepare(drvdata->atclk);
- pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int static_replicator_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ /* Static replicators do not have programming base */
+ ret = replicator_probe(&pdev->dev, NULL);
+
+ if (ret) {
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ }
return ret;
}
@@ -139,18 +286,49 @@ static const struct dev_pm_ops replicator_dev_pm_ops = {
replicator_runtime_resume, NULL)
};
-static const struct of_device_id replicator_match[] = {
+static const struct of_device_id static_replicator_match[] = {
{.compatible = "arm,coresight-replicator"},
+ {.compatible = "arm,coresight-static-replicator"},
{}
};
-static struct platform_driver replicator_driver = {
- .probe = replicator_probe,
+static struct platform_driver static_replicator_driver = {
+ .probe = static_replicator_probe,
.driver = {
- .name = "coresight-replicator",
- .of_match_table = replicator_match,
+ .name = "coresight-static-replicator",
+ .of_match_table = static_replicator_match,
+ .pm = &replicator_dev_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(static_replicator_driver);
+
+static int dynamic_replicator_probe(struct amba_device *adev,
+ const struct amba_id *id)
+{
+ return replicator_probe(&adev->dev, &adev->res);
+}
+
+static const struct amba_id dynamic_replicator_ids[] = {
+ {
+ .id = 0x000bb909,
+ .mask = 0x000fffff,
+ },
+ {
+ /* Coresight SoC-600 */
+ .id = 0x000bb9ec,
+ .mask = 0x000fffff,
+ },
+ { 0, 0 },
+};
+
+static struct amba_driver dynamic_replicator_driver = {
+ .drv = {
+ .name = "coresight-dynamic-replicator",
.pm = &replicator_dev_pm_ops,
.suppress_bind_attrs = true,
},
+ .probe = dynamic_replicator_probe,
+ .id_table = dynamic_replicator_ids,
};
-builtin_platform_driver(replicator_driver);
+builtin_amba_driver(dynamic_replicator_driver);
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index a5f053f2db2c..2527b5d3b65e 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -4,6 +4,7 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <linux/atomic.h>
#include <linux/circ_buf.h>
#include <linux/coresight.h>
#include <linux/perf_event.h>
@@ -180,8 +181,10 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
* sink is already enabled no memory is needed and the HW need not be
* touched.
*/
- if (drvdata->mode == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS) {
+ atomic_inc(csdev->refcnt);
goto out;
+ }
/*
* If drvdata::buf isn't NULL, memory was allocated for a previous
@@ -200,11 +203,13 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
}
ret = tmc_etb_enable_hw(drvdata);
- if (!ret)
+ if (!ret) {
drvdata->mode = CS_MODE_SYSFS;
- else
+ atomic_inc(csdev->refcnt);
+ } else {
/* Free up the buffer if we failed to enable */
used = false;
+ }
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -218,6 +223,7 @@ out:
static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
{
int ret = 0;
+ pid_t pid;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct perf_output_handle *handle = data;
@@ -228,19 +234,42 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
if (drvdata->reading)
break;
/*
- * In Perf mode there can be only one writer per sink. There
- * is also no need to continue if the ETB/ETF is already
- * operated from sysFS.
+ * No need to continue if the ETB/ETF is already operated
+ * from sysFS.
*/
- if (drvdata->mode != CS_MODE_DISABLED)
+ if (drvdata->mode == CS_MODE_SYSFS) {
+ ret = -EBUSY;
+ break;
+ }
+
+ /* Get a handle on the pid of the process to monitor */
+ pid = task_pid_nr(handle->event->owner);
+
+ if (drvdata->pid != -1 && drvdata->pid != pid) {
+ ret = -EBUSY;
break;
+ }
ret = tmc_set_etf_buffer(csdev, handle);
if (ret)
break;
+
+ /*
+ * No HW configuration is needed if the sink is already in
+ * use for this session.
+ */
+ if (drvdata->pid == pid) {
+ atomic_inc(csdev->refcnt);
+ break;
+ }
+
ret = tmc_etb_enable_hw(drvdata);
- if (!ret)
+ if (!ret) {
+ /* Associate with monitored process. */
+ drvdata->pid = pid;
drvdata->mode = CS_MODE_PERF;
+ atomic_inc(csdev->refcnt);
+ }
} while (0);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -273,26 +302,34 @@ static int tmc_enable_etf_sink(struct coresight_device *csdev,
return 0;
}
-static void tmc_disable_etf_sink(struct coresight_device *csdev)
+static int tmc_disable_etf_sink(struct coresight_device *csdev)
{
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
spin_lock_irqsave(&drvdata->spinlock, flags);
+
if (drvdata->reading) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- return;
+ return -EBUSY;
}
- /* Disable the TMC only if it needs to */
- if (drvdata->mode != CS_MODE_DISABLED) {
- tmc_etb_disable_hw(drvdata);
- drvdata->mode = CS_MODE_DISABLED;
+ if (atomic_dec_return(csdev->refcnt)) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return -EBUSY;
}
+ /* Complain if we (somehow) got out of sync */
+ WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
+ tmc_etb_disable_hw(drvdata);
+ /* Dissociate from monitored process. */
+ drvdata->pid = -1;
+ drvdata->mode = CS_MODE_DISABLED;
+
spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_dbg(drvdata->dev, "TMC-ETB/ETF disabled\n");
+ return 0;
}
static int tmc_enable_etf_link(struct coresight_device *csdev,
@@ -337,10 +374,11 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
dev_dbg(drvdata->dev, "TMC-ETF disabled\n");
}
-static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
- void **pages, int nr_pages, bool overwrite)
+static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
+ struct perf_event *event, void **pages,
+ int nr_pages, bool overwrite)
{
- int node;
+ int node, cpu = event->cpu;
struct cs_buffers *buf;
if (cpu == -1)
@@ -400,7 +438,7 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
u32 *buf_ptr;
u64 read_ptr, write_ptr;
u32 status;
- unsigned long offset, to_read;
+ unsigned long offset, to_read = 0, flags;
struct cs_buffers *buf = sink_config;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -411,6 +449,12 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
return 0;
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ /* Don't do anything if another tracer is using this sink */
+ if (atomic_read(csdev->refcnt) != 1)
+ goto out;
+
CS_UNLOCK(drvdata->base);
tmc_flush_and_stop(drvdata);
@@ -504,6 +548,8 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
to_read = buf->nr_pages << PAGE_SHIFT;
}
CS_LOCK(drvdata->base);
+out:
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
return to_read;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index f684283890d3..df6e4b0b84e9 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -4,10 +4,15 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <linux/atomic.h>
#include <linux/coresight.h>
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
+#include <linux/idr.h>
+#include <linux/mutex.h>
+#include <linux/refcount.h>
#include <linux/slab.h>
+#include <linux/types.h>
#include <linux/vmalloc.h>
#include "coresight-catu.h"
#include "coresight-etm-perf.h"
@@ -23,14 +28,18 @@ struct etr_flat_buf {
/*
* etr_perf_buffer - Perf buffer used for ETR
+ * @drvdata - The ETR drvdaga this buffer has been allocated for.
* @etr_buf - Actual buffer used by the ETR
+ * @pid - The PID this etr_perf_buffer belongs to.
* @snaphost - Perf session mode
* @head - handle->head at the beginning of the session.
* @nr_pages - Number of pages in the ring buffer.
* @pages - Array of Pages in the ring buffer.
*/
struct etr_perf_buffer {
+ struct tmc_drvdata *drvdata;
struct etr_buf *etr_buf;
+ pid_t pid;
bool snapshot;
unsigned long head;
int nr_pages;
@@ -772,7 +781,8 @@ static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata)
static const struct etr_buf_operations *etr_buf_ops[] = {
[ETR_MODE_FLAT] = &etr_flat_buf_ops,
[ETR_MODE_ETR_SG] = &etr_sg_buf_ops,
- [ETR_MODE_CATU] = &etr_catu_buf_ops,
+ [ETR_MODE_CATU] = IS_ENABLED(CONFIG_CORESIGHT_CATU)
+ ? &etr_catu_buf_ops : NULL,
};
static inline int tmc_etr_mode_alloc_buf(int mode,
@@ -786,7 +796,7 @@ static inline int tmc_etr_mode_alloc_buf(int mode,
case ETR_MODE_FLAT:
case ETR_MODE_ETR_SG:
case ETR_MODE_CATU:
- if (etr_buf_ops[mode]->alloc)
+ if (etr_buf_ops[mode] && etr_buf_ops[mode]->alloc)
rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf,
node, pages);
if (!rc)
@@ -1124,8 +1134,10 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
* sink is already enabled no memory is needed and the HW need not be
* touched, even if the buffer size has changed.
*/
- if (drvdata->mode == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS) {
+ atomic_inc(csdev->refcnt);
goto out;
+ }
/*
* If we don't have a buffer or it doesn't match the requested size,
@@ -1138,8 +1150,10 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
}
ret = tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf);
- if (!ret)
+ if (!ret) {
drvdata->mode = CS_MODE_SYSFS;
+ atomic_inc(csdev->refcnt);
+ }
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -1154,23 +1168,23 @@ out:
}
/*
- * tmc_etr_setup_perf_buf: Allocate ETR buffer for use by perf.
+ * alloc_etr_buf: Allocate ETR buffer for use by perf.
* The size of the hardware buffer is dependent on the size configured
* via sysfs and the perf ring buffer size. We prefer to allocate the
* largest possible size, scaling down the size by half until it
* reaches a minimum limit (1M), beyond which we give up.
*/
-static struct etr_perf_buffer *
-tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, int node, int nr_pages,
- void **pages, bool snapshot)
+static struct etr_buf *
+alloc_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
+ int nr_pages, void **pages, bool snapshot)
{
+ int node, cpu = event->cpu;
struct etr_buf *etr_buf;
- struct etr_perf_buffer *etr_perf;
unsigned long size;
- etr_perf = kzalloc_node(sizeof(*etr_perf), GFP_KERNEL, node);
- if (!etr_perf)
- return ERR_PTR(-ENOMEM);
+ if (cpu == -1)
+ cpu = smp_processor_id();
+ node = cpu_to_node(cpu);
/*
* Try to match the perf ring buffer size if it is larger
@@ -1195,32 +1209,160 @@ tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, int node, int nr_pages,
size /= 2;
} while (size >= TMC_ETR_PERF_MIN_BUF_SIZE);
+ return ERR_PTR(-ENOMEM);
+
+done:
+ return etr_buf;
+}
+
+static struct etr_buf *
+get_perf_etr_buf_cpu_wide(struct tmc_drvdata *drvdata,
+ struct perf_event *event, int nr_pages,
+ void **pages, bool snapshot)
+{
+ int ret;
+ pid_t pid = task_pid_nr(event->owner);
+ struct etr_buf *etr_buf;
+
+retry:
+ /*
+ * An etr_perf_buffer is associated with an event and holds a reference
+ * to the AUX ring buffer that was created for that event. In CPU-wide
+ * N:1 mode multiple events (one per CPU), each with its own AUX ring
+ * buffer, share a sink. As such an etr_perf_buffer is created for each
+ * event but a single etr_buf associated with the ETR is shared between
+ * them. The last event in a trace session will copy the content of the
+ * etr_buf to its AUX ring buffer. Ring buffer associated to other
+ * events are simply not used an freed as events are destoyed. We still
+ * need to allocate a ring buffer for each event since we don't know
+ * which event will be last.
+ */
+
+ /*
+ * The first thing to do here is check if an etr_buf has already been
+ * allocated for this session. If so it is shared with this event,
+ * otherwise it is created.
+ */
+ mutex_lock(&drvdata->idr_mutex);
+ etr_buf = idr_find(&drvdata->idr, pid);
+ if (etr_buf) {
+ refcount_inc(&etr_buf->refcount);
+ mutex_unlock(&drvdata->idr_mutex);
+ return etr_buf;
+ }
+
+ /* If we made it here no buffer has been allocated, do so now. */
+ mutex_unlock(&drvdata->idr_mutex);
+
+ etr_buf = alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot);
+ if (IS_ERR(etr_buf))
+ return etr_buf;
+
+ refcount_set(&etr_buf->refcount, 1);
+
+ /* Now that we have a buffer, add it to the IDR. */
+ mutex_lock(&drvdata->idr_mutex);
+ ret = idr_alloc(&drvdata->idr, etr_buf, pid, pid + 1, GFP_KERNEL);
+ mutex_unlock(&drvdata->idr_mutex);
+
+ /* Another event with this session ID has allocated this buffer. */
+ if (ret == -ENOSPC) {
+ tmc_free_etr_buf(etr_buf);
+ goto retry;
+ }
+
+ /* The IDR can't allocate room for a new session, abandon ship. */
+ if (ret == -ENOMEM) {
+ tmc_free_etr_buf(etr_buf);
+ return ERR_PTR(ret);
+ }
+
+
+ return etr_buf;
+}
+
+static struct etr_buf *
+get_perf_etr_buf_per_thread(struct tmc_drvdata *drvdata,
+ struct perf_event *event, int nr_pages,
+ void **pages, bool snapshot)
+{
+ struct etr_buf *etr_buf;
+
+ /*
+ * In per-thread mode the etr_buf isn't shared, so just go ahead
+ * with memory allocation.
+ */
+ etr_buf = alloc_etr_buf(drvdata, event, nr_pages, pages, snapshot);
+ if (IS_ERR(etr_buf))
+ goto out;
+
+ refcount_set(&etr_buf->refcount, 1);
+out:
+ return etr_buf;
+}
+
+static struct etr_buf *
+get_perf_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
+ int nr_pages, void **pages, bool snapshot)
+{
+ if (event->cpu == -1)
+ return get_perf_etr_buf_per_thread(drvdata, event, nr_pages,
+ pages, snapshot);
+
+ return get_perf_etr_buf_cpu_wide(drvdata, event, nr_pages,
+ pages, snapshot);
+}
+
+static struct etr_perf_buffer *
+tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
+ int nr_pages, void **pages, bool snapshot)
+{
+ int node, cpu = event->cpu;
+ struct etr_buf *etr_buf;
+ struct etr_perf_buffer *etr_perf;
+
+ if (cpu == -1)
+ cpu = smp_processor_id();
+ node = cpu_to_node(cpu);
+
+ etr_perf = kzalloc_node(sizeof(*etr_perf), GFP_KERNEL, node);
+ if (!etr_perf)
+ return ERR_PTR(-ENOMEM);
+
+ etr_buf = get_perf_etr_buf(drvdata, event, nr_pages, pages, snapshot);
+ if (!IS_ERR(etr_buf))
+ goto done;
+
kfree(etr_perf);
return ERR_PTR(-ENOMEM);
done:
+ /*
+ * Keep a reference to the ETR this buffer has been allocated for
+ * in order to have access to the IDR in tmc_free_etr_buffer().
+ */
+ etr_perf->drvdata = drvdata;
etr_perf->etr_buf = etr_buf;
+
return etr_perf;
}
static void *tmc_alloc_etr_buffer(struct coresight_device *csdev,
- int cpu, void **pages, int nr_pages,
- bool snapshot)
+ struct perf_event *event, void **pages,
+ int nr_pages, bool snapshot)
{
struct etr_perf_buffer *etr_perf;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- if (cpu == -1)
- cpu = smp_processor_id();
-
- etr_perf = tmc_etr_setup_perf_buf(drvdata, cpu_to_node(cpu),
+ etr_perf = tmc_etr_setup_perf_buf(drvdata, event,
nr_pages, pages, snapshot);
if (IS_ERR(etr_perf)) {
dev_dbg(drvdata->dev, "Unable to allocate ETR buffer\n");
return NULL;
}
+ etr_perf->pid = task_pid_nr(event->owner);
etr_perf->snapshot = snapshot;
etr_perf->nr_pages = nr_pages;
etr_perf->pages = pages;
@@ -1231,9 +1373,33 @@ static void *tmc_alloc_etr_buffer(struct coresight_device *csdev,
static void tmc_free_etr_buffer(void *config)
{
struct etr_perf_buffer *etr_perf = config;
+ struct tmc_drvdata *drvdata = etr_perf->drvdata;
+ struct etr_buf *buf, *etr_buf = etr_perf->etr_buf;
+
+ if (!etr_buf)
+ goto free_etr_perf_buffer;
+
+ mutex_lock(&drvdata->idr_mutex);
+ /* If we are not the last one to use the buffer, don't touch it. */
+ if (!refcount_dec_and_test(&etr_buf->refcount)) {
+ mutex_unlock(&drvdata->idr_mutex);
+ goto free_etr_perf_buffer;
+ }
+
+ /* We are the last one, remove from the IDR and free the buffer. */
+ buf = idr_remove(&drvdata->idr, etr_perf->pid);
+ mutex_unlock(&drvdata->idr_mutex);
+
+ /*
+ * Something went very wrong if the buffer associated with this ID
+ * is not the same in the IDR. Leak to avoid use after free.
+ */
+ if (buf && WARN_ON(buf != etr_buf))
+ goto free_etr_perf_buffer;
+
+ tmc_free_etr_buf(etr_perf->etr_buf);
- if (etr_perf->etr_buf)
- tmc_free_etr_buf(etr_perf->etr_buf);
+free_etr_perf_buffer:
kfree(etr_perf);
}
@@ -1308,6 +1474,13 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
struct etr_buf *etr_buf = etr_perf->etr_buf;
spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ /* Don't do anything if another tracer is using this sink */
+ if (atomic_read(csdev->refcnt) != 1) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ goto out;
+ }
+
if (WARN_ON(drvdata->perf_data != etr_perf)) {
lost = true;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -1347,17 +1520,15 @@ out:
static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
{
int rc = 0;
+ pid_t pid;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct perf_output_handle *handle = data;
struct etr_perf_buffer *etr_perf = etm_perf_sink_config(handle);
spin_lock_irqsave(&drvdata->spinlock, flags);
- /*
- * There can be only one writer per sink in perf mode. If the sink
- * is already open in SYSFS mode, we can't use it.
- */
- if (drvdata->mode != CS_MODE_DISABLED || WARN_ON(drvdata->perf_data)) {
+ /* Don't use this sink if it is already claimed by sysFS */
+ if (drvdata->mode == CS_MODE_SYSFS) {
rc = -EBUSY;
goto unlock_out;
}
@@ -1367,11 +1538,34 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
goto unlock_out;
}
+ /* Get a handle on the pid of the process to monitor */
+ pid = etr_perf->pid;
+
+ /* Do not proceed if this device is associated with another session */
+ if (drvdata->pid != -1 && drvdata->pid != pid) {
+ rc = -EBUSY;
+ goto unlock_out;
+ }
+
etr_perf->head = PERF_IDX2OFF(handle->head, etr_perf);
drvdata->perf_data = etr_perf;
+
+ /*
+ * No HW configuration is needed if the sink is already in
+ * use for this session.
+ */
+ if (drvdata->pid == pid) {
+ atomic_inc(csdev->refcnt);
+ goto unlock_out;
+ }
+
rc = tmc_etr_enable_hw(drvdata, etr_perf->etr_buf);
- if (!rc)
+ if (!rc) {
+ /* Associate with monitored process. */
+ drvdata->pid = pid;
drvdata->mode = CS_MODE_PERF;
+ atomic_inc(csdev->refcnt);
+ }
unlock_out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -1392,26 +1586,34 @@ static int tmc_enable_etr_sink(struct coresight_device *csdev,
return -EINVAL;
}
-static void tmc_disable_etr_sink(struct coresight_device *csdev)
+static int tmc_disable_etr_sink(struct coresight_device *csdev)
{
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
spin_lock_irqsave(&drvdata->spinlock, flags);
+
if (drvdata->reading) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- return;
+ return -EBUSY;
}
- /* Disable the TMC only if it needs to */
- if (drvdata->mode != CS_MODE_DISABLED) {
- tmc_etr_disable_hw(drvdata);
- drvdata->mode = CS_MODE_DISABLED;
+ if (atomic_dec_return(csdev->refcnt)) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return -EBUSY;
}
+ /* Complain if we (somehow) got out of sync */
+ WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
+ tmc_etr_disable_hw(drvdata);
+ /* Dissociate from monitored process. */
+ drvdata->pid = -1;
+ drvdata->mode = CS_MODE_DISABLED;
+
spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_dbg(drvdata->dev, "TMC-ETR disabled\n");
+ return 0;
}
static const struct coresight_ops_sink tmc_etr_sink_ops = {
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 2a02da3d630f..3f718729d741 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -8,10 +8,12 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
+#include <linux/idr.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
+#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
@@ -340,6 +342,8 @@ static inline bool tmc_etr_can_use_sg(struct tmc_drvdata *drvdata)
static int tmc_etr_setup_caps(struct tmc_drvdata *drvdata,
u32 devid, void *dev_caps)
{
+ int rc;
+
u32 dma_mask = 0;
/* Set the unadvertised capabilities */
@@ -369,7 +373,10 @@ static int tmc_etr_setup_caps(struct tmc_drvdata *drvdata,
dma_mask = 40;
}
- return dma_set_mask_and_coherent(drvdata->dev, DMA_BIT_MASK(dma_mask));
+ rc = dma_set_mask_and_coherent(drvdata->dev, DMA_BIT_MASK(dma_mask));
+ if (rc)
+ dev_err(drvdata->dev, "Failed to setup DMA mask: %d\n", rc);
+ return rc;
}
static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
@@ -415,6 +422,8 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
drvdata->config_type = BMVAL(devid, 6, 7);
drvdata->memwidth = tmc_get_memwidth(devid);
+ /* This device is not associated with a session */
+ drvdata->pid = -1;
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
if (np)
@@ -427,8 +436,6 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
}
- pm_runtime_put(&adev->dev);
-
desc.pdata = pdata;
desc.dev = dev;
desc.groups = coresight_tmc_groups;
@@ -447,6 +454,8 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
coresight_get_uci_data(id));
if (ret)
goto out;
+ idr_init(&drvdata->idr);
+ mutex_init(&drvdata->idr_mutex);
break;
case TMC_CONFIG_TYPE_ETF:
desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
@@ -471,6 +480,8 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
ret = misc_register(&drvdata->miscdev);
if (ret)
coresight_unregister(drvdata->csdev);
+ else
+ pm_runtime_put(&adev->dev);
out:
return ret;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 487c53701e9c..503f1b3a3741 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -8,7 +8,10 @@
#define _CORESIGHT_TMC_H
#include <linux/dma-mapping.h>
+#include <linux/idr.h>
#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/refcount.h>
#define TMC_RSZ 0x004
#define TMC_STS 0x00c
@@ -133,6 +136,7 @@ struct etr_buf_operations;
/**
* struct etr_buf - Details of the buffer used by ETR
+ * refcount ; Number of sources currently using this etr_buf.
* @mode : Mode of the ETR buffer, contiguous, Scatter Gather etc.
* @full : Trace data overflow
* @size : Size of the buffer.
@@ -143,6 +147,7 @@ struct etr_buf_operations;
* @private : Backend specific information for the buf
*/
struct etr_buf {
+ refcount_t refcount;
enum etr_mode mode;
bool full;
ssize_t size;
@@ -160,6 +165,8 @@ struct etr_buf {
* @csdev: component vitals needed by the framework.
* @miscdev: specifics to handle "/dev/xyz.tmc" entry.
* @spinlock: only one at a time pls.
+ * @pid: Process ID of the process being monitored by the session
+ * that is using this component.
* @buf: Snapshot of the trace data for ETF/ETB.
* @etr_buf: details of buffer used in TMC-ETR
* @len: size of the available trace for ETF/ETB.
@@ -170,6 +177,8 @@ struct etr_buf {
* @trigger_cntr: amount of words to store after a trigger.
* @etr_caps: Bitmask of capabilities of the TMC ETR, inferred from the
* device configuration register (DEVID)
+ * @idr: Holds etr_bufs allocated for this ETR.
+ * @idr_mutex: Access serialisation for idr.
* @perf_data: PERF buffer for ETR.
* @sysfs_data: SYSFS buffer for ETR.
*/
@@ -179,6 +188,7 @@ struct tmc_drvdata {
struct coresight_device *csdev;
struct miscdevice miscdev;
spinlock_t spinlock;
+ pid_t pid;
bool reading;
union {
char *buf; /* TMC ETB */
@@ -191,6 +201,8 @@ struct tmc_drvdata {
enum tmc_mem_intf_width memwidth;
u32 trigger_cntr;
u32 etr_caps;
+ struct idr idr;
+ struct mutex idr_mutex;
struct etr_buf *sysfs_buf;
void *perf_data;
};
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index b2f72a1fa402..63d9af31f57f 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -5,6 +5,7 @@
* Description: CoreSight Trace Port Interface Unit driver
*/
+#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/device.h>
@@ -73,7 +74,7 @@ static int tpiu_enable(struct coresight_device *csdev, u32 mode, void *__unused)
struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
tpiu_enable_hw(drvdata);
-
+ atomic_inc(csdev->refcnt);
dev_dbg(drvdata->dev, "TPIU enabled\n");
return 0;
}
@@ -94,13 +95,17 @@ static void tpiu_disable_hw(struct tpiu_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
-static void tpiu_disable(struct coresight_device *csdev)
+static int tpiu_disable(struct coresight_device *csdev)
{
struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ if (atomic_dec_return(csdev->refcnt))
+ return -EBUSY;
+
tpiu_disable_hw(drvdata);
dev_dbg(drvdata->dev, "TPIU disabled\n");
+ return 0;
}
static const struct coresight_ops_sink tpiu_sink_ops = {
@@ -153,8 +158,6 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
/* Disable tpiu to support older devices */
tpiu_disable_hw(drvdata);
- pm_runtime_put(&adev->dev);
-
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PORT;
desc.ops = &tpiu_cs_ops;
@@ -162,7 +165,12 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
desc.dev = dev;
drvdata->csdev = coresight_register(&desc);
- return PTR_ERR_OR_ZERO(drvdata->csdev);
+ if (!IS_ERR(drvdata->csdev)) {
+ pm_runtime_put(&adev->dev);
+ return 0;
+ }
+
+ return PTR_ERR(drvdata->csdev);
}
#ifdef CONFIG_PM
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 29cef898afba..4b130281236a 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -225,26 +225,28 @@ static int coresight_enable_sink(struct coresight_device *csdev,
* We need to make sure the "new" session is compatible with the
* existing "mode" of operation.
*/
- if (sink_ops(csdev)->enable) {
- ret = sink_ops(csdev)->enable(csdev, mode, data);
- if (ret)
- return ret;
- csdev->enable = true;
- }
+ if (!sink_ops(csdev)->enable)
+ return -EINVAL;
- atomic_inc(csdev->refcnt);
+ ret = sink_ops(csdev)->enable(csdev, mode, data);
+ if (ret)
+ return ret;
+ csdev->enable = true;
return 0;
}
static void coresight_disable_sink(struct coresight_device *csdev)
{
- if (atomic_dec_return(csdev->refcnt) == 0) {
- if (sink_ops(csdev)->disable) {
- sink_ops(csdev)->disable(csdev);
- csdev->enable = false;
- }
- }
+ int ret;
+
+ if (!sink_ops(csdev)->disable)
+ return;
+
+ ret = sink_ops(csdev)->disable(csdev);
+ if (ret)
+ return;
+ csdev->enable = false;
}
static int coresight_enable_link(struct coresight_device *csdev,
@@ -973,7 +975,6 @@ static void coresight_device_release(struct device *dev)
{
struct coresight_device *csdev = to_coresight_device(dev);
- kfree(csdev->conns);
kfree(csdev->refcnt);
kfree(csdev);
}
diff --git a/drivers/hwtracing/intel_th/acpi.c b/drivers/hwtracing/intel_th/acpi.c
index 87bc3744755f..87f9024e4bbb 100644
--- a/drivers/hwtracing/intel_th/acpi.c
+++ b/drivers/hwtracing/intel_th/acpi.c
@@ -37,15 +37,21 @@ MODULE_DEVICE_TABLE(acpi, intel_th_acpi_ids);
static int intel_th_acpi_probe(struct platform_device *pdev)
{
struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ struct resource resource[TH_MMIO_END];
const struct acpi_device_id *id;
struct intel_th *th;
+ int i, r;
id = acpi_match_device(intel_th_acpi_ids, &pdev->dev);
if (!id)
return -ENODEV;
- th = intel_th_alloc(&pdev->dev, (void *)id->driver_data,
- pdev->resource, pdev->num_resources, -1);
+ for (i = 0, r = 0; i < pdev->num_resources && r < TH_MMIO_END; i++)
+ if (pdev->resource[i].flags &
+ (IORESOURCE_IRQ | IORESOURCE_MEM))
+ resource[r++] = pdev->resource[i];
+
+ th = intel_th_alloc(&pdev->dev, (void *)id->driver_data, resource, r);
if (IS_ERR(th))
return PTR_ERR(th);
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 7c1acc2f801c..033dce563c99 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -430,9 +430,9 @@ static const struct intel_th_subdevice {
.nres = 1,
.res = {
{
- /* Handle TSCU from GTH driver */
+ /* Handle TSCU and CTS from GTH driver */
.start = REG_GTH_OFFSET,
- .end = REG_TSCU_OFFSET + REG_TSCU_LENGTH - 1,
+ .end = REG_CTS_OFFSET + REG_CTS_LENGTH - 1,
.flags = IORESOURCE_MEM,
},
},
@@ -491,7 +491,7 @@ static const struct intel_th_subdevice {
.flags = IORESOURCE_MEM,
},
{
- .start = 1, /* use resource[1] */
+ .start = TH_MMIO_SW,
.end = 0,
.flags = IORESOURCE_MEM,
},
@@ -501,6 +501,24 @@ static const struct intel_th_subdevice {
.type = INTEL_TH_SOURCE,
},
{
+ .nres = 2,
+ .res = {
+ {
+ .start = REG_STH_OFFSET,
+ .end = REG_STH_OFFSET + REG_STH_LENGTH - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = TH_MMIO_RTIT,
+ .end = 0,
+ .flags = IORESOURCE_MEM,
+ },
+ },
+ .id = -1,
+ .name = "rtit",
+ .type = INTEL_TH_SOURCE,
+ },
+ {
.nres = 1,
.res = {
{
@@ -584,7 +602,6 @@ intel_th_subdevice_alloc(struct intel_th *th,
struct intel_th_device *thdev;
struct resource res[3];
unsigned int req = 0;
- bool is64bit = false;
int r, err;
thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
@@ -594,18 +611,12 @@ intel_th_subdevice_alloc(struct intel_th *th,
thdev->drvdata = th->drvdata;
- for (r = 0; r < th->num_resources; r++)
- if (th->resource[r].flags & IORESOURCE_MEM_64) {
- is64bit = true;
- break;
- }
-
memcpy(res, subdev->res,
sizeof(struct resource) * subdev->nres);
for (r = 0; r < subdev->nres; r++) {
struct resource *devres = th->resource;
- int bar = 0; /* cut subdevices' MMIO from resource[0] */
+ int bar = TH_MMIO_CONFIG;
/*
* Take .end == 0 to mean 'take the whole bar',
@@ -614,8 +625,9 @@ intel_th_subdevice_alloc(struct intel_th *th,
*/
if (!res[r].end && res[r].flags == IORESOURCE_MEM) {
bar = res[r].start;
- if (is64bit)
- bar *= 2;
+ err = -ENODEV;
+ if (bar >= th->num_resources)
+ goto fail_put_device;
res[r].start = 0;
res[r].end = resource_size(&devres[bar]) - 1;
}
@@ -627,7 +639,12 @@ intel_th_subdevice_alloc(struct intel_th *th,
dev_dbg(th->dev, "%s:%d @ %pR\n",
subdev->name, r, &res[r]);
} else if (res[r].flags & IORESOURCE_IRQ) {
- res[r].start = th->irq;
+ /*
+ * Only pass on the IRQ if we have useful interrupts:
+ * the ones that can be configured via MINTCTL.
+ */
+ if (INTEL_TH_CAP(th, has_mintctl) && th->irq != -1)
+ res[r].start = th->irq;
}
}
@@ -758,8 +775,13 @@ static int intel_th_populate(struct intel_th *th)
thdev = intel_th_subdevice_alloc(th, subdev);
/* note: caller should free subdevices from th::thdev[] */
- if (IS_ERR(thdev))
+ if (IS_ERR(thdev)) {
+ /* ENODEV for individual subdevices is allowed */
+ if (PTR_ERR(thdev) == -ENODEV)
+ continue;
+
return PTR_ERR(thdev);
+ }
th->thdev[th->num_thdevs++] = thdev;
}
@@ -809,26 +831,40 @@ static const struct file_operations intel_th_output_fops = {
.llseek = noop_llseek,
};
+static irqreturn_t intel_th_irq(int irq, void *data)
+{
+ struct intel_th *th = data;
+ irqreturn_t ret = IRQ_NONE;
+ struct intel_th_driver *d;
+ int i;
+
+ for (i = 0; i < th->num_thdevs; i++) {
+ if (th->thdev[i]->type != INTEL_TH_OUTPUT)
+ continue;
+
+ d = to_intel_th_driver(th->thdev[i]->dev.driver);
+ if (d && d->irq)
+ ret |= d->irq(th->thdev[i]);
+ }
+
+ if (ret == IRQ_NONE)
+ pr_warn_ratelimited("nobody cared for irq\n");
+
+ return ret;
+}
+
/**
* intel_th_alloc() - allocate a new Intel TH device and its subdevices
* @dev: parent device
- * @devres: parent's resources
- * @ndevres: number of resources
+ * @devres: resources indexed by th_mmio_idx
* @irq: irq number
*/
struct intel_th *
intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
- struct resource *devres, unsigned int ndevres, int irq)
+ struct resource *devres, unsigned int ndevres)
{
+ int err, r, nr_mmios = 0;
struct intel_th *th;
- int err, r;
-
- if (irq == -1)
- for (r = 0; r < ndevres; r++)
- if (devres[r].flags & IORESOURCE_IRQ) {
- irq = devres[r].start;
- break;
- }
th = kzalloc(sizeof(*th), GFP_KERNEL);
if (!th)
@@ -846,12 +882,32 @@ intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
err = th->major;
goto err_ida;
}
+ th->irq = -1;
th->dev = dev;
th->drvdata = drvdata;
- th->resource = devres;
- th->num_resources = ndevres;
- th->irq = irq;
+ for (r = 0; r < ndevres; r++)
+ switch (devres[r].flags & IORESOURCE_TYPE_BITS) {
+ case IORESOURCE_MEM:
+ th->resource[nr_mmios++] = devres[r];
+ break;
+ case IORESOURCE_IRQ:
+ err = devm_request_irq(dev, devres[r].start,
+ intel_th_irq, IRQF_SHARED,
+ dev_name(dev), th);
+ if (err)
+ goto err_chrdev;
+
+ if (th->irq == -1)
+ th->irq = devres[r].start;
+ break;
+ default:
+ dev_warn(dev, "Unknown resource type %lx\n",
+ devres[r].flags);
+ break;
+ }
+
+ th->num_resources = nr_mmios;
dev_set_drvdata(dev, th);
@@ -868,6 +924,10 @@ intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
return th;
+err_chrdev:
+ __unregister_chrdev(th->major, 0, TH_POSSIBLE_OUTPUTS,
+ "intel_th/output");
+
err_ida:
ida_simple_remove(&intel_th_ida, th->id);
@@ -928,6 +988,27 @@ int intel_th_trace_enable(struct intel_th_device *thdev)
EXPORT_SYMBOL_GPL(intel_th_trace_enable);
/**
+ * intel_th_trace_switch() - execute a switch sequence
+ * @thdev: output device that requests tracing switch
+ */
+int intel_th_trace_switch(struct intel_th_device *thdev)
+{
+ struct intel_th_device *hub = to_intel_th_device(thdev->dev.parent);
+ struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver);
+
+ if (WARN_ON_ONCE(hub->type != INTEL_TH_SWITCH))
+ return -EINVAL;
+
+ if (WARN_ON_ONCE(thdev->type != INTEL_TH_OUTPUT))
+ return -EINVAL;
+
+ hubdrv->trig_switch(hub, &thdev->output);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_th_trace_switch);
+
+/**
* intel_th_trace_disable() - disable tracing for an output device
* @thdev: output device that requests tracing be disabled
*/
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index edc52d75e6bd..fa9d34af87ac 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -308,6 +308,11 @@ static int intel_th_gth_reset(struct gth_device *gth)
iowrite32(0, gth->base + REG_GTH_SCR);
iowrite32(0xfc, gth->base + REG_GTH_SCR2);
+ /* setup CTS for single trigger */
+ iowrite32(CTS_EVENT_ENABLE_IF_ANYTHING, gth->base + REG_CTS_C0S0_EN);
+ iowrite32(CTS_ACTION_CONTROL_SET_STATE(CTS_STATE_IDLE) |
+ CTS_ACTION_CONTROL_TRIGGER, gth->base + REG_CTS_C0S0_ACT);
+
return 0;
}
@@ -457,6 +462,68 @@ static int intel_th_output_attributes(struct gth_device *gth)
}
/**
+ * intel_th_gth_stop() - stop tracing to an output device
+ * @gth: GTH device
+ * @output: output device's descriptor
+ * @capture_done: set when no more traces will be captured
+ *
+ * This will stop tracing using force storeEn off signal and wait for the
+ * pipelines to be empty for the corresponding output port.
+ */
+static void intel_th_gth_stop(struct gth_device *gth,
+ struct intel_th_output *output,
+ bool capture_done)
+{
+ struct intel_th_device *outdev =
+ container_of(output, struct intel_th_device, output);
+ struct intel_th_driver *outdrv =
+ to_intel_th_driver(outdev->dev.driver);
+ unsigned long count;
+ u32 reg;
+ u32 scr2 = 0xfc | (capture_done ? 1 : 0);
+
+ iowrite32(0, gth->base + REG_GTH_SCR);
+ iowrite32(scr2, gth->base + REG_GTH_SCR2);
+
+ /* wait on pipeline empty for the given port */
+ for (reg = 0, count = GTH_PLE_WAITLOOP_DEPTH;
+ count && !(reg & BIT(output->port)); count--) {
+ reg = ioread32(gth->base + REG_GTH_STAT);
+ cpu_relax();
+ }
+
+ if (!count)
+ dev_dbg(gth->dev, "timeout waiting for GTH[%d] PLE\n",
+ output->port);
+
+ /* wait on output piepline empty */
+ if (outdrv->wait_empty)
+ outdrv->wait_empty(outdev);
+
+ /* clear force capture done for next captures */
+ iowrite32(0xfc, gth->base + REG_GTH_SCR2);
+}
+
+/**
+ * intel_th_gth_start() - start tracing to an output device
+ * @gth: GTH device
+ * @output: output device's descriptor
+ *
+ * This will start tracing using force storeEn signal.
+ */
+static void intel_th_gth_start(struct gth_device *gth,
+ struct intel_th_output *output)
+{
+ u32 scr = 0xfc0000;
+
+ if (output->multiblock)
+ scr |= 0xff;
+
+ iowrite32(scr, gth->base + REG_GTH_SCR);
+ iowrite32(0, gth->base + REG_GTH_SCR2);
+}
+
+/**
* intel_th_gth_disable() - disable tracing to an output device
* @thdev: GTH device
* @output: output device's descriptor
@@ -469,7 +536,6 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
struct intel_th_output *output)
{
struct gth_device *gth = dev_get_drvdata(&thdev->dev);
- unsigned long count;
int master;
u32 reg;
@@ -482,22 +548,7 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
}
spin_unlock(&gth->gth_lock);
- iowrite32(0, gth->base + REG_GTH_SCR);
- iowrite32(0xfd, gth->base + REG_GTH_SCR2);
-
- /* wait on pipeline empty for the given port */
- for (reg = 0, count = GTH_PLE_WAITLOOP_DEPTH;
- count && !(reg & BIT(output->port)); count--) {
- reg = ioread32(gth->base + REG_GTH_STAT);
- cpu_relax();
- }
-
- /* clear force capture done for next captures */
- iowrite32(0xfc, gth->base + REG_GTH_SCR2);
-
- if (!count)
- dev_dbg(&thdev->dev, "timeout waiting for GTH[%d] PLE\n",
- output->port);
+ intel_th_gth_stop(gth, output, true);
reg = ioread32(gth->base + REG_GTH_SCRPD0);
reg &= ~output->scratchpad;
@@ -526,8 +577,8 @@ static void intel_th_gth_enable(struct intel_th_device *thdev,
{
struct gth_device *gth = dev_get_drvdata(&thdev->dev);
struct intel_th *th = to_intel_th(thdev);
- u32 scr = 0xfc0000, scrpd;
int master;
+ u32 scrpd;
spin_lock(&gth->gth_lock);
for_each_set_bit(master, gth->output[output->port].master,
@@ -535,9 +586,6 @@ static void intel_th_gth_enable(struct intel_th_device *thdev,
gth_master_set(gth, master, output->port);
}
- if (output->multiblock)
- scr |= 0xff;
-
output->active = true;
spin_unlock(&gth->gth_lock);
@@ -548,8 +596,38 @@ static void intel_th_gth_enable(struct intel_th_device *thdev,
scrpd |= output->scratchpad;
iowrite32(scrpd, gth->base + REG_GTH_SCRPD0);
- iowrite32(scr, gth->base + REG_GTH_SCR);
- iowrite32(0, gth->base + REG_GTH_SCR2);
+ intel_th_gth_start(gth, output);
+}
+
+/**
+ * intel_th_gth_switch() - execute a switch sequence
+ * @thdev: GTH device
+ * @output: output device's descriptor
+ *
+ * This will execute a switch sequence that will trigger a switch window
+ * when tracing to MSC in multi-block mode.
+ */
+static void intel_th_gth_switch(struct intel_th_device *thdev,
+ struct intel_th_output *output)
+{
+ struct gth_device *gth = dev_get_drvdata(&thdev->dev);
+ unsigned long count;
+ u32 reg;
+
+ /* trigger */
+ iowrite32(0, gth->base + REG_CTS_CTL);
+ iowrite32(CTS_CTL_SEQUENCER_ENABLE, gth->base + REG_CTS_CTL);
+ /* wait on trigger status */
+ for (reg = 0, count = CTS_TRIG_WAITLOOP_DEPTH;
+ count && !(reg & BIT(4)); count--) {
+ reg = ioread32(gth->base + REG_CTS_STAT);
+ cpu_relax();
+ }
+ if (!count)
+ dev_dbg(&thdev->dev, "timeout waiting for CTS Trigger\n");
+
+ intel_th_gth_stop(gth, output, false);
+ intel_th_gth_start(gth, output);
}
/**
@@ -735,6 +813,7 @@ static struct intel_th_driver intel_th_gth_driver = {
.unassign = intel_th_gth_unassign,
.set_output = intel_th_gth_set_output,
.enable = intel_th_gth_enable,
+ .trig_switch = intel_th_gth_switch,
.disable = intel_th_gth_disable,
.driver = {
.name = "gth",
diff --git a/drivers/hwtracing/intel_th/gth.h b/drivers/hwtracing/intel_th/gth.h
index 6f2b0b930875..bfcc0fd01177 100644
--- a/drivers/hwtracing/intel_th/gth.h
+++ b/drivers/hwtracing/intel_th/gth.h
@@ -49,6 +49,12 @@ enum {
REG_GTH_SCRPD3 = 0xec, /* ScratchPad[3] */
REG_TSCU_TSUCTRL = 0x2000, /* TSCU control register */
REG_TSCU_TSCUSTAT = 0x2004, /* TSCU status register */
+
+ /* Common Capture Sequencer (CTS) registers */
+ REG_CTS_C0S0_EN = 0x30c0, /* clause_event_enable_c0s0 */
+ REG_CTS_C0S0_ACT = 0x3180, /* clause_action_control_c0s0 */
+ REG_CTS_STAT = 0x32a0, /* cts_status */
+ REG_CTS_CTL = 0x32a4, /* cts_control */
};
/* waiting for Pipeline Empty bit(s) to assert for GTH */
@@ -57,4 +63,17 @@ enum {
#define TSUCTRL_CTCRESYNC BIT(0)
#define TSCUSTAT_CTCSYNCING BIT(1)
+/* waiting for Trigger status to assert for CTS */
+#define CTS_TRIG_WAITLOOP_DEPTH 10000
+
+#define CTS_EVENT_ENABLE_IF_ANYTHING BIT(31)
+#define CTS_ACTION_CONTROL_STATE_OFF 27
+#define CTS_ACTION_CONTROL_SET_STATE(x) \
+ (((x) & 0x1f) << CTS_ACTION_CONTROL_STATE_OFF)
+#define CTS_ACTION_CONTROL_TRIGGER BIT(4)
+
+#define CTS_STATE_IDLE 0x10u
+
+#define CTS_CTL_SEQUENCER_ENABLE BIT(0)
+
#endif /* __INTEL_TH_GTH_H__ */
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 780206dc9012..0df480072b6c 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -8,6 +8,8 @@
#ifndef __INTEL_TH_H__
#define __INTEL_TH_H__
+#include <linux/irqreturn.h>
+
/* intel_th_device device types */
enum {
/* Devices that generate trace data */
@@ -18,6 +20,8 @@ enum {
INTEL_TH_SWITCH,
};
+struct intel_th_device;
+
/**
* struct intel_th_output - descriptor INTEL_TH_OUTPUT type devices
* @port: output port number, assigned by the switch
@@ -25,6 +29,7 @@ enum {
* @scratchpad: scratchpad bits to flag when this output is enabled
* @multiblock: true for multiblock output configuration
* @active: true when this output is enabled
+ * @wait_empty: wait for device pipeline to be empty
*
* Output port descriptor, used by switch driver to tell which output
* port this output device corresponds to. Filled in at output device's
@@ -42,10 +47,12 @@ struct intel_th_output {
/**
* struct intel_th_drvdata - describes hardware capabilities and quirks
* @tscu_enable: device needs SW to enable time stamping unit
+ * @has_mintctl: device has interrupt control (MINTCTL) register
* @host_mode_only: device can only operate in 'host debugger' mode
*/
struct intel_th_drvdata {
unsigned int tscu_enable : 1,
+ has_mintctl : 1,
host_mode_only : 1;
};
@@ -157,10 +164,13 @@ struct intel_th_driver {
struct intel_th_device *othdev);
void (*enable)(struct intel_th_device *thdev,
struct intel_th_output *output);
+ void (*trig_switch)(struct intel_th_device *thdev,
+ struct intel_th_output *output);
void (*disable)(struct intel_th_device *thdev,
struct intel_th_output *output);
/* output ops */
- void (*irq)(struct intel_th_device *thdev);
+ irqreturn_t (*irq)(struct intel_th_device *thdev);
+ void (*wait_empty)(struct intel_th_device *thdev);
int (*activate)(struct intel_th_device *thdev);
void (*deactivate)(struct intel_th_device *thdev);
/* file_operations for those who want a device node */
@@ -213,21 +223,23 @@ static inline struct intel_th *to_intel_th(struct intel_th_device *thdev)
struct intel_th *
intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata,
- struct resource *devres, unsigned int ndevres, int irq);
+ struct resource *devres, unsigned int ndevres);
void intel_th_free(struct intel_th *th);
int intel_th_driver_register(struct intel_th_driver *thdrv);
void intel_th_driver_unregister(struct intel_th_driver *thdrv);
int intel_th_trace_enable(struct intel_th_device *thdev);
+int intel_th_trace_switch(struct intel_th_device *thdev);
int intel_th_trace_disable(struct intel_th_device *thdev);
int intel_th_set_output(struct intel_th_device *thdev,
unsigned int master);
int intel_th_output_enable(struct intel_th *th, unsigned int otype);
-enum {
+enum th_mmio_idx {
TH_MMIO_CONFIG = 0,
- TH_MMIO_SW = 2,
+ TH_MMIO_SW = 1,
+ TH_MMIO_RTIT = 2,
TH_MMIO_END,
};
@@ -237,6 +249,9 @@ enum {
#define TH_CONFIGURABLE_MASTERS 256
#define TH_MSC_MAX 2
+/* Maximum IRQ vectors */
+#define TH_NVEC_MAX 8
+
/**
* struct intel_th - Intel TH controller
* @dev: driver core's device
@@ -244,7 +259,7 @@ enum {
* @hub: "switch" subdevice (GTH)
* @resource: resources of the entire controller
* @num_thdevs: number of devices in the @thdev array
- * @num_resources: number or resources in the @resource array
+ * @num_resources: number of resources in the @resource array
* @irq: irq number
* @id: this Intel TH controller's device ID in the system
* @major: device node major for output devices
@@ -256,7 +271,7 @@ struct intel_th {
struct intel_th_device *hub;
struct intel_th_drvdata *drvdata;
- struct resource *resource;
+ struct resource resource[TH_MMIO_END];
int (*activate)(struct intel_th *);
void (*deactivate)(struct intel_th *);
unsigned int num_thdevs;
@@ -296,6 +311,9 @@ enum {
REG_TSCU_OFFSET = 0x2000,
REG_TSCU_LENGTH = 0x1000,
+ REG_CTS_OFFSET = 0x3000,
+ REG_CTS_LENGTH = 0x1000,
+
/* Software Trace Hub (STH) [0x4000..0x4fff] */
REG_STH_OFFSET = 0x4000,
REG_STH_LENGTH = 0x2000,
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index ba7aaf421f36..81bb54fa3ce8 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -29,28 +29,18 @@
#define msc_dev(x) (&(x)->thdev->dev)
/**
- * struct msc_block - multiblock mode block descriptor
- * @bdesc: pointer to hardware descriptor (beginning of the block)
- * @addr: physical address of the block
- */
-struct msc_block {
- struct msc_block_desc *bdesc;
- dma_addr_t addr;
-};
-
-/**
* struct msc_window - multiblock mode window descriptor
* @entry: window list linkage (msc::win_list)
* @pgoff: page offset into the buffer that this window starts at
* @nr_blocks: number of blocks (pages) in this window
- * @block: array of block descriptors
+ * @sgt: array of block descriptors
*/
struct msc_window {
struct list_head entry;
unsigned long pgoff;
unsigned int nr_blocks;
struct msc *msc;
- struct msc_block block[0];
+ struct sg_table sgt;
};
/**
@@ -84,6 +74,8 @@ struct msc_iter {
* @reg_base: register window base address
* @thdev: intel_th_device pointer
* @win_list: list of windows in multiblock mode
+ * @single_sgt: single mode buffer
+ * @cur_win: current window
* @nr_pages: total number of pages allocated for this buffer
* @single_sz: amount of data in single mode
* @single_wrap: single mode wrap occurred
@@ -101,9 +93,12 @@ struct msc_iter {
*/
struct msc {
void __iomem *reg_base;
+ void __iomem *msu_base;
struct intel_th_device *thdev;
struct list_head win_list;
+ struct sg_table single_sgt;
+ struct msc_window *cur_win;
unsigned long nr_pages;
unsigned long single_sz;
unsigned int single_wrap : 1;
@@ -120,7 +115,8 @@ struct msc {
/* config */
unsigned int enabled : 1,
- wrap : 1;
+ wrap : 1,
+ do_irq : 1;
unsigned int mode;
unsigned int burst_len;
unsigned int index;
@@ -139,6 +135,49 @@ static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
return false;
}
+static inline struct msc_block_desc *
+msc_win_block(struct msc_window *win, unsigned int block)
+{
+ return sg_virt(&win->sgt.sgl[block]);
+}
+
+static inline dma_addr_t
+msc_win_baddr(struct msc_window *win, unsigned int block)
+{
+ return sg_dma_address(&win->sgt.sgl[block]);
+}
+
+static inline unsigned long
+msc_win_bpfn(struct msc_window *win, unsigned int block)
+{
+ return msc_win_baddr(win, block) >> PAGE_SHIFT;
+}
+
+/**
+ * msc_is_last_win() - check if a window is the last one for a given MSC
+ * @win: window
+ * Return: true if @win is the last window in MSC's multiblock buffer
+ */
+static inline bool msc_is_last_win(struct msc_window *win)
+{
+ return win->entry.next == &win->msc->win_list;
+}
+
+/**
+ * msc_next_window() - return next window in the multiblock buffer
+ * @win: current window
+ *
+ * Return: window following the current one
+ */
+static struct msc_window *msc_next_window(struct msc_window *win)
+{
+ if (msc_is_last_win(win))
+ return list_first_entry(&win->msc->win_list, struct msc_window,
+ entry);
+
+ return list_next_entry(win, entry);
+}
+
/**
* msc_oldest_window() - locate the window with oldest data
* @msc: MSC device
@@ -150,9 +189,7 @@ static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
*/
static struct msc_window *msc_oldest_window(struct msc *msc)
{
- struct msc_window *win;
- u32 reg = ioread32(msc->reg_base + REG_MSU_MSC0NWSA);
- unsigned long win_addr = (unsigned long)reg << PAGE_SHIFT;
+ struct msc_window *win, *next = msc_next_window(msc->cur_win);
unsigned int found = 0;
if (list_empty(&msc->win_list))
@@ -164,18 +201,18 @@ static struct msc_window *msc_oldest_window(struct msc *msc)
* something like 2, in which case we're good
*/
list_for_each_entry(win, &msc->win_list, entry) {
- if (win->block[0].addr == win_addr)
+ if (win == next)
found++;
/* skip the empty ones */
- if (msc_block_is_empty(win->block[0].bdesc))
+ if (msc_block_is_empty(msc_win_block(win, 0)))
continue;
if (found)
return win;
}
- return list_entry(msc->win_list.next, struct msc_window, entry);
+ return list_first_entry(&msc->win_list, struct msc_window, entry);
}
/**
@@ -187,7 +224,7 @@ static struct msc_window *msc_oldest_window(struct msc *msc)
static unsigned int msc_win_oldest_block(struct msc_window *win)
{
unsigned int blk;
- struct msc_block_desc *bdesc = win->block[0].bdesc;
+ struct msc_block_desc *bdesc = msc_win_block(win, 0);
/* without wrapping, first block is the oldest */
if (!msc_block_wrapped(bdesc))
@@ -198,7 +235,7 @@ static unsigned int msc_win_oldest_block(struct msc_window *win)
* oldest data for this window.
*/
for (blk = 0; blk < win->nr_blocks; blk++) {
- bdesc = win->block[blk].bdesc;
+ bdesc = msc_win_block(win, blk);
if (msc_block_last_written(bdesc))
return blk;
@@ -207,34 +244,9 @@ static unsigned int msc_win_oldest_block(struct msc_window *win)
return 0;
}
-/**
- * msc_is_last_win() - check if a window is the last one for a given MSC
- * @win: window
- * Return: true if @win is the last window in MSC's multiblock buffer
- */
-static inline bool msc_is_last_win(struct msc_window *win)
-{
- return win->entry.next == &win->msc->win_list;
-}
-
-/**
- * msc_next_window() - return next window in the multiblock buffer
- * @win: current window
- *
- * Return: window following the current one
- */
-static struct msc_window *msc_next_window(struct msc_window *win)
-{
- if (msc_is_last_win(win))
- return list_entry(win->msc->win_list.next, struct msc_window,
- entry);
-
- return list_entry(win->entry.next, struct msc_window, entry);
-}
-
static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter)
{
- return iter->win->block[iter->block].bdesc;
+ return msc_win_block(iter->win, iter->block);
}
static void msc_iter_init(struct msc_iter *iter)
@@ -467,13 +479,47 @@ static void msc_buffer_clear_hw_header(struct msc *msc)
offsetof(struct msc_block_desc, hw_tag);
for (blk = 0; blk < win->nr_blocks; blk++) {
- struct msc_block_desc *bdesc = win->block[blk].bdesc;
+ struct msc_block_desc *bdesc = msc_win_block(win, blk);
memset(&bdesc->hw_tag, 0, hw_sz);
}
}
}
+static int intel_th_msu_init(struct msc *msc)
+{
+ u32 mintctl, msusts;
+
+ if (!msc->do_irq)
+ return 0;
+
+ mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
+ mintctl |= msc->index ? M1BLIE : M0BLIE;
+ iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
+ if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) {
+ dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n");
+ msc->do_irq = 0;
+ return 0;
+ }
+
+ msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
+ iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
+
+ return 0;
+}
+
+static void intel_th_msu_deinit(struct msc *msc)
+{
+ u32 mintctl;
+
+ if (!msc->do_irq)
+ return;
+
+ mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
+ mintctl &= msc->index ? ~M1BLIE : ~M0BLIE;
+ iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
+}
+
/**
* msc_configure() - set up MSC hardware
* @msc: the MSC device to configure
@@ -531,23 +577,14 @@ static int msc_configure(struct msc *msc)
*/
static void msc_disable(struct msc *msc)
{
- unsigned long count;
u32 reg;
lockdep_assert_held(&msc->buf_mutex);
intel_th_trace_disable(msc->thdev);
- for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH;
- count && !(reg & MSCSTS_PLE); count--) {
- reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
- cpu_relax();
- }
-
- if (!count)
- dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n");
-
if (msc->mode == MSC_MODE_SINGLE) {
+ reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT);
reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP);
@@ -617,22 +654,45 @@ static void intel_th_msc_deactivate(struct intel_th_device *thdev)
*/
static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
{
+ unsigned long nr_pages = size >> PAGE_SHIFT;
unsigned int order = get_order(size);
struct page *page;
+ int ret;
if (!size)
return 0;
+ ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL);
+ if (ret)
+ goto err_out;
+
+ ret = -ENOMEM;
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!page)
- return -ENOMEM;
+ goto err_free_sgt;
split_page(page, order);
- msc->nr_pages = size >> PAGE_SHIFT;
+ sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
+
+ ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1,
+ DMA_FROM_DEVICE);
+ if (ret < 0)
+ goto err_free_pages;
+
+ msc->nr_pages = nr_pages;
msc->base = page_address(page);
- msc->base_addr = page_to_phys(page);
+ msc->base_addr = sg_dma_address(msc->single_sgt.sgl);
return 0;
+
+err_free_pages:
+ __free_pages(page, order);
+
+err_free_sgt:
+ sg_free_table(&msc->single_sgt);
+
+err_out:
+ return ret;
}
/**
@@ -643,6 +703,10 @@ static void msc_buffer_contig_free(struct msc *msc)
{
unsigned long off;
+ dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl,
+ 1, DMA_FROM_DEVICE);
+ sg_free_table(&msc->single_sgt);
+
for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
struct page *page = virt_to_page(msc->base + off);
@@ -669,6 +733,40 @@ static struct page *msc_buffer_contig_get_page(struct msc *msc,
return virt_to_page(msc->base + (pgoff << PAGE_SHIFT));
}
+static int __msc_buffer_win_alloc(struct msc_window *win,
+ unsigned int nr_blocks)
+{
+ struct scatterlist *sg_ptr;
+ void *block;
+ int i, ret;
+
+ ret = sg_alloc_table(&win->sgt, nr_blocks, GFP_KERNEL);
+ if (ret)
+ return -ENOMEM;
+
+ for_each_sg(win->sgt.sgl, sg_ptr, nr_blocks, i) {
+ block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent,
+ PAGE_SIZE, &sg_dma_address(sg_ptr),
+ GFP_KERNEL);
+ if (!block)
+ goto err_nomem;
+
+ sg_set_buf(sg_ptr, block, PAGE_SIZE);
+ }
+
+ return nr_blocks;
+
+err_nomem:
+ for (i--; i >= 0; i--)
+ dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
+ msc_win_block(win, i),
+ msc_win_baddr(win, i));
+
+ sg_free_table(&win->sgt);
+
+ return -ENOMEM;
+}
+
/**
* msc_buffer_win_alloc() - alloc a window for a multiblock mode
* @msc: MSC device
@@ -682,44 +780,49 @@ static struct page *msc_buffer_contig_get_page(struct msc *msc,
static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
{
struct msc_window *win;
- unsigned long size = PAGE_SIZE;
- int i, ret = -ENOMEM;
+ int ret = -ENOMEM, i;
if (!nr_blocks)
return 0;
- win = kzalloc(offsetof(struct msc_window, block[nr_blocks]),
- GFP_KERNEL);
+ /*
+ * This limitation hold as long as we need random access to the
+ * block. When that changes, this can go away.
+ */
+ if (nr_blocks > SG_MAX_SINGLE_ALLOC)
+ return -EINVAL;
+
+ win = kzalloc(sizeof(*win), GFP_KERNEL);
if (!win)
return -ENOMEM;
+ win->msc = msc;
+
if (!list_empty(&msc->win_list)) {
- struct msc_window *prev = list_entry(msc->win_list.prev,
- struct msc_window, entry);
+ struct msc_window *prev = list_last_entry(&msc->win_list,
+ struct msc_window,
+ entry);
+ /* This works as long as blocks are page-sized */
win->pgoff = prev->pgoff + prev->nr_blocks;
}
- for (i = 0; i < nr_blocks; i++) {
- win->block[i].bdesc =
- dma_alloc_coherent(msc_dev(msc)->parent->parent, size,
- &win->block[i].addr, GFP_KERNEL);
-
- if (!win->block[i].bdesc)
- goto err_nomem;
+ ret = __msc_buffer_win_alloc(win, nr_blocks);
+ if (ret < 0)
+ goto err_nomem;
#ifdef CONFIG_X86
+ for (i = 0; i < ret; i++)
/* Set the page as uncached */
- set_memory_uc((unsigned long)win->block[i].bdesc, 1);
+ set_memory_uc((unsigned long)msc_win_block(win, i), 1);
#endif
- }
- win->msc = msc;
- win->nr_blocks = nr_blocks;
+ win->nr_blocks = ret;
if (list_empty(&msc->win_list)) {
- msc->base = win->block[0].bdesc;
- msc->base_addr = win->block[0].addr;
+ msc->base = msc_win_block(win, 0);
+ msc->base_addr = msc_win_baddr(win, 0);
+ msc->cur_win = win;
}
list_add_tail(&win->entry, &msc->win_list);
@@ -728,19 +831,25 @@ static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
return 0;
err_nomem:
- for (i--; i >= 0; i--) {
-#ifdef CONFIG_X86
- /* Reset the page to write-back before releasing */
- set_memory_wb((unsigned long)win->block[i].bdesc, 1);
-#endif
- dma_free_coherent(msc_dev(msc)->parent->parent, size,
- win->block[i].bdesc, win->block[i].addr);
- }
kfree(win);
return ret;
}
+static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
+{
+ int i;
+
+ for (i = 0; i < win->nr_blocks; i++) {
+ struct page *page = sg_page(&win->sgt.sgl[i]);
+
+ page->mapping = NULL;
+ dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
+ msc_win_block(win, i), msc_win_baddr(win, i));
+ }
+ sg_free_table(&win->sgt);
+}
+
/**
* msc_buffer_win_free() - free a window from MSC's window list
* @msc: MSC device
@@ -761,17 +870,13 @@ static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
msc->base_addr = 0;
}
- for (i = 0; i < win->nr_blocks; i++) {
- struct page *page = virt_to_page(win->block[i].bdesc);
-
- page->mapping = NULL;
#ifdef CONFIG_X86
- /* Reset the page to write-back before releasing */
- set_memory_wb((unsigned long)win->block[i].bdesc, 1);
+ for (i = 0; i < win->nr_blocks; i++)
+ /* Reset the page to write-back */
+ set_memory_wb((unsigned long)msc_win_block(win, i), 1);
#endif
- dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
- win->block[i].bdesc, win->block[i].addr);
- }
+
+ __msc_buffer_win_free(msc, win);
kfree(win);
}
@@ -798,19 +903,18 @@ static void msc_buffer_relink(struct msc *msc)
*/
if (msc_is_last_win(win)) {
sw_tag |= MSC_SW_TAG_LASTWIN;
- next_win = list_entry(msc->win_list.next,
- struct msc_window, entry);
+ next_win = list_first_entry(&msc->win_list,
+ struct msc_window, entry);
} else {
- next_win = list_entry(win->entry.next,
- struct msc_window, entry);
+ next_win = list_next_entry(win, entry);
}
for (blk = 0; blk < win->nr_blocks; blk++) {
- struct msc_block_desc *bdesc = win->block[blk].bdesc;
+ struct msc_block_desc *bdesc = msc_win_block(win, blk);
memset(bdesc, 0, sizeof(*bdesc));
- bdesc->next_win = next_win->block[0].addr >> PAGE_SHIFT;
+ bdesc->next_win = msc_win_bpfn(next_win, 0);
/*
* Similarly to last window, last block should point
@@ -818,11 +922,9 @@ static void msc_buffer_relink(struct msc *msc)
*/
if (blk == win->nr_blocks - 1) {
sw_tag |= MSC_SW_TAG_LASTBLK;
- bdesc->next_blk =
- win->block[0].addr >> PAGE_SHIFT;
+ bdesc->next_blk = msc_win_bpfn(win, 0);
} else {
- bdesc->next_blk =
- win->block[blk + 1].addr >> PAGE_SHIFT;
+ bdesc->next_blk = msc_win_bpfn(win, blk + 1);
}
bdesc->sw_tag = sw_tag;
@@ -997,7 +1099,7 @@ static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
found:
pgoff -= win->pgoff;
- return virt_to_page(win->block[pgoff].bdesc);
+ return sg_page(&win->sgt.sgl[pgoff]);
}
/**
@@ -1250,6 +1352,22 @@ static const struct file_operations intel_th_msc_fops = {
.owner = THIS_MODULE,
};
+static void intel_th_msc_wait_empty(struct intel_th_device *thdev)
+{
+ struct msc *msc = dev_get_drvdata(&thdev->dev);
+ unsigned long count;
+ u32 reg;
+
+ for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH;
+ count && !(reg & MSCSTS_PLE); count--) {
+ reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS);
+ cpu_relax();
+ }
+
+ if (!count)
+ dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n");
+}
+
static int intel_th_msc_init(struct msc *msc)
{
atomic_set(&msc->user_count, -1);
@@ -1266,6 +1384,39 @@ static int intel_th_msc_init(struct msc *msc)
return 0;
}
+static void msc_win_switch(struct msc *msc)
+{
+ struct msc_window *last, *first;
+
+ first = list_first_entry(&msc->win_list, struct msc_window, entry);
+ last = list_last_entry(&msc->win_list, struct msc_window, entry);
+
+ if (msc_is_last_win(msc->cur_win))
+ msc->cur_win = first;
+ else
+ msc->cur_win = list_next_entry(msc->cur_win, entry);
+
+ msc->base = msc_win_block(msc->cur_win, 0);
+ msc->base_addr = msc_win_baddr(msc->cur_win, 0);
+
+ intel_th_trace_switch(msc->thdev);
+}
+
+static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev)
+{
+ struct msc *msc = dev_get_drvdata(&thdev->dev);
+ u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
+ u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
+
+ if (!(msusts & mask)) {
+ if (msc->enabled)
+ return IRQ_HANDLED;
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
static const char * const msc_mode[] = {
[MSC_MODE_SINGLE] = "single",
[MSC_MODE_MULTI] = "multi",
@@ -1440,10 +1591,38 @@ free_win:
static DEVICE_ATTR_RW(nr_pages);
+static ssize_t
+win_switch_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct msc *msc = dev_get_drvdata(dev);
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (val != 1)
+ return -EINVAL;
+
+ mutex_lock(&msc->buf_mutex);
+ if (msc->mode != MSC_MODE_MULTI)
+ ret = -ENOTSUPP;
+ else
+ msc_win_switch(msc);
+ mutex_unlock(&msc->buf_mutex);
+
+ return ret ? ret : size;
+}
+
+static DEVICE_ATTR_WO(win_switch);
+
static struct attribute *msc_output_attrs[] = {
&dev_attr_wrap.attr,
&dev_attr_mode.attr,
&dev_attr_nr_pages.attr,
+ &dev_attr_win_switch.attr,
NULL,
};
@@ -1471,10 +1650,19 @@ static int intel_th_msc_probe(struct intel_th_device *thdev)
if (!msc)
return -ENOMEM;
+ res = intel_th_device_get_resource(thdev, IORESOURCE_IRQ, 1);
+ if (!res)
+ msc->do_irq = 1;
+
msc->index = thdev->id;
msc->thdev = thdev;
msc->reg_base = base + msc->index * 0x100;
+ msc->msu_base = base;
+
+ err = intel_th_msu_init(msc);
+ if (err)
+ return err;
err = intel_th_msc_init(msc);
if (err)
@@ -1491,6 +1679,7 @@ static void intel_th_msc_remove(struct intel_th_device *thdev)
int ret;
intel_th_msc_deactivate(thdev);
+ intel_th_msu_deinit(msc);
/*
* Buffers should not be used at this point except if the
@@ -1504,6 +1693,8 @@ static void intel_th_msc_remove(struct intel_th_device *thdev)
static struct intel_th_driver intel_th_msc_driver = {
.probe = intel_th_msc_probe,
.remove = intel_th_msc_remove,
+ .irq = intel_th_msc_interrupt,
+ .wait_empty = intel_th_msc_wait_empty,
.activate = intel_th_msc_activate,
.deactivate = intel_th_msc_deactivate,
.fops = &intel_th_msc_fops,
diff --git a/drivers/hwtracing/intel_th/msu.h b/drivers/hwtracing/intel_th/msu.h
index 9cc8aced6116..574c16004cb2 100644
--- a/drivers/hwtracing/intel_th/msu.h
+++ b/drivers/hwtracing/intel_th/msu.h
@@ -11,6 +11,7 @@
enum {
REG_MSU_MSUPARAMS = 0x0000,
REG_MSU_MSUSTS = 0x0008,
+ REG_MSU_MINTCTL = 0x0004, /* MSU-global interrupt control */
REG_MSU_MSC0CTL = 0x0100, /* MSC0 control */
REG_MSU_MSC0STS = 0x0104, /* MSC0 status */
REG_MSU_MSC0BAR = 0x0108, /* MSC0 output base address */
@@ -28,6 +29,8 @@ enum {
/* MSUSTS bits */
#define MSUSTS_MSU_INT BIT(0)
+#define MSUSTS_MSC0BLAST BIT(16)
+#define MSUSTS_MSC1BLAST BIT(24)
/* MSCnCTL bits */
#define MSC_EN BIT(0)
@@ -36,6 +39,11 @@ enum {
#define MSC_MODE (BIT(4) | BIT(5))
#define MSC_LEN (BIT(8) | BIT(9) | BIT(10))
+/* MINTCTL bits */
+#define MICDE BIT(0)
+#define M0BLIE BIT(16)
+#define M1BLIE BIT(24)
+
/* MSC operating modes (MSC_MODE) */
enum {
MSC_MODE_SINGLE = 0,
@@ -87,7 +95,7 @@ static inline unsigned long msc_data_sz(struct msc_block_desc *bdesc)
static inline bool msc_block_wrapped(struct msc_block_desc *bdesc)
{
- if (bdesc->hw_tag & MSC_HW_TAG_BLOCKWRAP)
+ if (bdesc->hw_tag & (MSC_HW_TAG_BLOCKWRAP | MSC_HW_TAG_WINWRAP))
return true;
return false;
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 1cf6290d6435..f1228708f2a2 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -17,7 +17,13 @@
#define DRIVER_NAME "intel_th_pci"
-#define BAR_MASK (BIT(TH_MMIO_CONFIG) | BIT(TH_MMIO_SW))
+enum {
+ TH_PCI_CONFIG_BAR = 0,
+ TH_PCI_STH_SW_BAR = 2,
+ TH_PCI_RTIT_BAR = 4,
+};
+
+#define BAR_MASK (BIT(TH_PCI_CONFIG_BAR) | BIT(TH_PCI_STH_SW_BAR))
#define PCI_REG_NPKDSC 0x80
#define NPKDSC_TSACT BIT(5)
@@ -66,8 +72,12 @@ static int intel_th_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct intel_th_drvdata *drvdata = (void *)id->driver_data;
+ struct resource resource[TH_MMIO_END + TH_NVEC_MAX] = {
+ [TH_MMIO_CONFIG] = pdev->resource[TH_PCI_CONFIG_BAR],
+ [TH_MMIO_SW] = pdev->resource[TH_PCI_STH_SW_BAR],
+ };
+ int err, r = TH_MMIO_SW + 1, i;
struct intel_th *th;
- int err;
err = pcim_enable_device(pdev);
if (err)
@@ -77,8 +87,19 @@ static int intel_th_pci_probe(struct pci_dev *pdev,
if (err)
return err;
- th = intel_th_alloc(&pdev->dev, drvdata, pdev->resource,
- DEVICE_COUNT_RESOURCE, pdev->irq);
+ if (pdev->resource[TH_PCI_RTIT_BAR].start) {
+ resource[TH_MMIO_RTIT] = pdev->resource[TH_PCI_RTIT_BAR];
+ r++;
+ }
+
+ err = pci_alloc_irq_vectors(pdev, 1, 8, PCI_IRQ_ALL_TYPES);
+ if (err > 0)
+ for (i = 0; i < err; i++, r++) {
+ resource[r].flags = IORESOURCE_IRQ;
+ resource[r].start = pci_irq_vector(pdev, i);
+ }
+
+ th = intel_th_alloc(&pdev->dev, drvdata, resource, r);
if (IS_ERR(th))
return PTR_ERR(th);
@@ -95,10 +116,13 @@ static void intel_th_pci_remove(struct pci_dev *pdev)
struct intel_th *th = pci_get_drvdata(pdev);
intel_th_free(th);
+
+ pci_free_irq_vectors(pdev);
}
static const struct intel_th_drvdata intel_th_2x = {
.tscu_enable = 1,
+ .has_mintctl = 1,
};
static const struct pci_device_id intel_th_pci_id_table[] = {
@@ -165,6 +189,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
+ {
+ /* Comet Lake */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x02a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
{ 0 },
};
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index c7ba8acfd4d5..e55b902560de 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -166,11 +166,10 @@ stm_master(struct stm_device *stm, unsigned int idx)
static int stp_master_alloc(struct stm_device *stm, unsigned int idx)
{
struct stp_master *master;
- size_t size;
- size = ALIGN(stm->data->sw_nchannels, 8) / 8;
- size += sizeof(struct stp_master);
- master = kzalloc(size, GFP_ATOMIC);
+ master = kzalloc(struct_size(master, chan_map,
+ BITS_TO_LONGS(stm->data->sw_nchannels)),
+ GFP_ATOMIC);
if (!master)
return -ENOMEM;
@@ -218,8 +217,8 @@ stm_output_disclaim(struct stm_device *stm, struct stm_output *output)
bitmap_release_region(&master->chan_map[0], output->channel,
ilog2(output->nr_chans));
- output->nr_chans = 0;
master->nr_free += output->nr_chans;
+ output->nr_chans = 0;
}
/*
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 5e5990a83da5..913db013fe90 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -603,6 +603,23 @@ bailout:
return ret;
}
+/*
+ * We print a warning when we are not flagged to support atomic transfers but
+ * will try anyhow. That's what the I2C core would do as well. Sadly, we can't
+ * modify the algorithm struct at probe time because this struct is exported
+ * 'const'.
+ */
+static int bit_xfer_atomic(struct i2c_adapter *i2c_adap, struct i2c_msg msgs[],
+ int num)
+{
+ struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
+
+ if (!adap->can_do_atomic)
+ dev_warn(&i2c_adap->dev, "not flagged for atomic transfers\n");
+
+ return bit_xfer(i2c_adap, msgs, num);
+}
+
static u32 bit_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_NOSTART | I2C_FUNC_SMBUS_EMUL |
@@ -615,8 +632,9 @@ static u32 bit_func(struct i2c_adapter *adap)
/* -----exported algorithm data: ------------------------------------- */
const struct i2c_algorithm i2c_bit_algo = {
- .master_xfer = bit_xfer,
- .functionality = bit_func,
+ .master_xfer = bit_xfer,
+ .master_xfer_atomic = bit_xfer_atomic,
+ .functionality = bit_func,
};
EXPORT_SYMBOL(i2c_bit_algo);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index f8979abb9a19..26186439db6b 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -77,6 +77,16 @@ config I2C_AMD8111
This driver can also be built as a module. If so, the module
will be called i2c-amd8111.
+config I2C_AMD_MP2
+ tristate "AMD MP2 PCIe"
+ depends on PCI && ACPI
+ help
+ If you say yes to this option, support will be included for the AMD
+ MP2 PCIe I2C adapter.
+
+ This driver can also be built as modules. If so, the modules will
+ be called i2c-amd-mp2-pci and i2c-amd-mp2-plat.
+
config I2C_HIX5HD2
tristate "Hix5hd2 high-speed I2C driver"
depends on ARCH_HISI || ARCH_HIX5HD2 || COMPILE_TEST
@@ -176,6 +186,7 @@ config I2C_PIIX4
AMD Hudson-2
AMD ML
AMD CZ
+ Hygon CZ
Serverworks OSB4
Serverworks CSB5
Serverworks CSB6
@@ -388,6 +399,19 @@ config I2C_AT91
the latency to fill the transmission register is too long. If you
are facing this situation, use the i2c-gpio driver.
+config I2C_AT91_SLAVE_EXPERIMENTAL
+ tristate "Microchip AT91 I2C experimental slave mode"
+ depends on I2C_AT91
+ select I2C_SLAVE
+ help
+ If you say yes to this option, support for the slave mode will be
+ added. Caution: do not use it for production. This feature has not
+ been tested in a heavy way, help wanted.
+ There are known bugs:
+ - It can hang, on a SAMA5D4, after several transfers.
+ - There are some mismtaches with a SAMA5D4 as slave and a SAMA5D2 as
+ master.
+
config I2C_AU1550
tristate "Au1550/Au1200/Au1300 SMBus interface"
depends on MIPS_ALCHEMY
@@ -425,6 +449,7 @@ config I2C_BCM_IPROC
tristate "Broadcom iProc I2C controller"
depends on ARCH_BCM_IPROC || COMPILE_TEST
default ARCH_BCM_IPROC
+ select I2C_SLAVE
help
If you say yes to this option, support will be included for the
Broadcom iProc I2C controller.
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 5f0cb6915969..a3245231b0b7 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -33,8 +33,13 @@ obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o
# Embedded system I2C/SMBus host controller drivers
obj-$(CONFIG_I2C_ALTERA) += i2c-altera.o
+obj-$(CONFIG_I2C_AMD_MP2) += i2c-amd-mp2-pci.o i2c-amd-mp2-plat.o
obj-$(CONFIG_I2C_ASPEED) += i2c-aspeed.o
obj-$(CONFIG_I2C_AT91) += i2c-at91.o
+i2c-at91-objs := i2c-at91-core.o i2c-at91-master.o
+ifeq ($(CONFIG_I2C_AT91_SLAVE_EXPERIMENTAL),y)
+ i2c-at91-objs += i2c-at91-slave.o
+endif
obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
obj-$(CONFIG_I2C_AXXIA) += i2c-axxia.o
obj-$(CONFIG_I2C_BCM2835) += i2c-bcm2835.o
diff --git a/drivers/i2c/busses/i2c-amd-mp2-pci.c b/drivers/i2c/busses/i2c-amd-mp2-pci.c
new file mode 100644
index 000000000000..455e1f36a2a3
--- /dev/null
+++ b/drivers/i2c/busses/i2c-amd-mp2-pci.c
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * AMD MP2 PCIe communication driver
+ *
+ * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ * Elie Morisse <syniurge@gmail.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include "i2c-amd-mp2.h"
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+static void amd_mp2_c2p_mutex_lock(struct amd_i2c_common *i2c_common)
+{
+ struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+
+ /* there is only one data mailbox for two i2c adapters */
+ mutex_lock(&privdata->c2p_lock);
+ privdata->c2p_lock_busid = i2c_common->bus_id;
+}
+
+static void amd_mp2_c2p_mutex_unlock(struct amd_i2c_common *i2c_common)
+{
+ struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+
+ if (unlikely(privdata->c2p_lock_busid != i2c_common->bus_id)) {
+ dev_warn(ndev_dev(privdata),
+ "bus %d attempting to unlock C2P locked by bus %d\n",
+ i2c_common->bus_id, privdata->c2p_lock_busid);
+ return;
+ }
+
+ mutex_unlock(&privdata->c2p_lock);
+}
+
+static int amd_mp2_cmd(struct amd_i2c_common *i2c_common,
+ union i2c_cmd_base i2c_cmd_base)
+{
+ struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+ void __iomem *reg;
+
+ i2c_common->reqcmd = i2c_cmd_base.s.i2c_cmd;
+
+ reg = privdata->mmio + ((i2c_cmd_base.s.bus_id == 1) ?
+ AMD_C2P_MSG1 : AMD_C2P_MSG0);
+ writel(i2c_cmd_base.ul, reg);
+
+ return 0;
+}
+
+int amd_mp2_bus_enable_set(struct amd_i2c_common *i2c_common, bool enable)
+{
+ struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+ union i2c_cmd_base i2c_cmd_base;
+
+ dev_dbg(ndev_dev(privdata), "%s id: %d\n", __func__,
+ i2c_common->bus_id);
+
+ i2c_cmd_base.ul = 0;
+ i2c_cmd_base.s.i2c_cmd = enable ? i2c_enable : i2c_disable;
+ i2c_cmd_base.s.bus_id = i2c_common->bus_id;
+ i2c_cmd_base.s.i2c_speed = i2c_common->i2c_speed;
+
+ amd_mp2_c2p_mutex_lock(i2c_common);
+
+ return amd_mp2_cmd(i2c_common, i2c_cmd_base);
+}
+EXPORT_SYMBOL_GPL(amd_mp2_bus_enable_set);
+
+static void amd_mp2_cmd_rw_fill(struct amd_i2c_common *i2c_common,
+ union i2c_cmd_base *i2c_cmd_base,
+ enum i2c_cmd reqcmd)
+{
+ i2c_cmd_base->s.i2c_cmd = reqcmd;
+ i2c_cmd_base->s.bus_id = i2c_common->bus_id;
+ i2c_cmd_base->s.i2c_speed = i2c_common->i2c_speed;
+ i2c_cmd_base->s.slave_addr = i2c_common->msg->addr;
+ i2c_cmd_base->s.length = i2c_common->msg->len;
+}
+
+int amd_mp2_rw(struct amd_i2c_common *i2c_common, enum i2c_cmd reqcmd)
+{
+ struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+ union i2c_cmd_base i2c_cmd_base;
+
+ amd_mp2_cmd_rw_fill(i2c_common, &i2c_cmd_base, reqcmd);
+ amd_mp2_c2p_mutex_lock(i2c_common);
+
+ if (i2c_common->msg->len <= 32) {
+ i2c_cmd_base.s.mem_type = use_c2pmsg;
+ if (reqcmd == i2c_write)
+ memcpy_toio(privdata->mmio + AMD_C2P_MSG2,
+ i2c_common->msg->buf,
+ i2c_common->msg->len);
+ } else {
+ i2c_cmd_base.s.mem_type = use_dram;
+ writeq((u64)i2c_common->dma_addr,
+ privdata->mmio + AMD_C2P_MSG2);
+ }
+
+ return amd_mp2_cmd(i2c_common, i2c_cmd_base);
+}
+EXPORT_SYMBOL_GPL(amd_mp2_rw);
+
+static void amd_mp2_pci_check_rw_event(struct amd_i2c_common *i2c_common)
+{
+ struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+ int len = i2c_common->eventval.r.length;
+ u32 slave_addr = i2c_common->eventval.r.slave_addr;
+ bool err = false;
+
+ if (unlikely(len != i2c_common->msg->len)) {
+ dev_err(ndev_dev(privdata),
+ "length %d in event doesn't match buffer length %d!\n",
+ len, i2c_common->msg->len);
+ err = true;
+ }
+
+ if (unlikely(slave_addr != i2c_common->msg->addr)) {
+ dev_err(ndev_dev(privdata),
+ "unexpected slave address %x (expected: %x)!\n",
+ slave_addr, i2c_common->msg->addr);
+ err = true;
+ }
+
+ if (!err)
+ i2c_common->cmd_success = true;
+}
+
+static void __amd_mp2_process_event(struct amd_i2c_common *i2c_common)
+{
+ struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+ enum status_type sts = i2c_common->eventval.r.status;
+ enum response_type res = i2c_common->eventval.r.response;
+ int len = i2c_common->eventval.r.length;
+
+ if (res != command_success) {
+ if (res != command_failed)
+ dev_err(ndev_dev(privdata), "invalid response to i2c command!\n");
+ return;
+ }
+
+ switch (i2c_common->reqcmd) {
+ case i2c_read:
+ if (sts == i2c_readcomplete_event) {
+ amd_mp2_pci_check_rw_event(i2c_common);
+ if (len <= 32)
+ memcpy_fromio(i2c_common->msg->buf,
+ privdata->mmio + AMD_C2P_MSG2,
+ len);
+ } else if (sts != i2c_readfail_event) {
+ dev_err(ndev_dev(privdata),
+ "invalid i2c status after read (%d)!\n", sts);
+ }
+ break;
+ case i2c_write:
+ if (sts == i2c_writecomplete_event)
+ amd_mp2_pci_check_rw_event(i2c_common);
+ else if (sts != i2c_writefail_event)
+ dev_err(ndev_dev(privdata),
+ "invalid i2c status after write (%d)!\n", sts);
+ break;
+ case i2c_enable:
+ if (sts == i2c_busenable_complete)
+ i2c_common->cmd_success = true;
+ else if (sts != i2c_busenable_failed)
+ dev_err(ndev_dev(privdata),
+ "invalid i2c status after bus enable (%d)!\n",
+ sts);
+ break;
+ case i2c_disable:
+ if (sts == i2c_busdisable_complete)
+ i2c_common->cmd_success = true;
+ else if (sts != i2c_busdisable_failed)
+ dev_err(ndev_dev(privdata),
+ "invalid i2c status after bus disable (%d)!\n",
+ sts);
+ break;
+ default:
+ break;
+ }
+}
+
+void amd_mp2_process_event(struct amd_i2c_common *i2c_common)
+{
+ struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+
+ if (unlikely(i2c_common->reqcmd == i2c_none)) {
+ dev_warn(ndev_dev(privdata),
+ "received msg but no cmd was sent (bus = %d)!\n",
+ i2c_common->bus_id);
+ return;
+ }
+
+ __amd_mp2_process_event(i2c_common);
+
+ i2c_common->reqcmd = i2c_none;
+ amd_mp2_c2p_mutex_unlock(i2c_common);
+}
+EXPORT_SYMBOL_GPL(amd_mp2_process_event);
+
+static irqreturn_t amd_mp2_irq_isr(int irq, void *dev)
+{
+ struct amd_mp2_dev *privdata = dev;
+ struct amd_i2c_common *i2c_common;
+ u32 val;
+ unsigned int bus_id;
+ void __iomem *reg;
+ enum irqreturn ret = IRQ_NONE;
+
+ for (bus_id = 0; bus_id < 2; bus_id++) {
+ i2c_common = privdata->busses[bus_id];
+ if (!i2c_common)
+ continue;
+
+ reg = privdata->mmio + ((bus_id == 0) ?
+ AMD_P2C_MSG1 : AMD_P2C_MSG2);
+ val = readl(reg);
+ if (val != 0) {
+ writel(0, reg);
+ writel(0, privdata->mmio + AMD_P2C_MSG_INTEN);
+ i2c_common->eventval.ul = val;
+ i2c_common->cmd_completion(i2c_common);
+
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ if (ret != IRQ_HANDLED) {
+ val = readl(privdata->mmio + AMD_P2C_MSG_INTEN);
+ if (val != 0) {
+ writel(0, privdata->mmio + AMD_P2C_MSG_INTEN);
+ dev_warn(ndev_dev(privdata),
+ "received irq without message\n");
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ return ret;
+}
+
+void amd_mp2_rw_timeout(struct amd_i2c_common *i2c_common)
+{
+ i2c_common->reqcmd = i2c_none;
+ amd_mp2_c2p_mutex_unlock(i2c_common);
+}
+EXPORT_SYMBOL_GPL(amd_mp2_rw_timeout);
+
+int amd_mp2_register_cb(struct amd_i2c_common *i2c_common)
+{
+ struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+
+ if (i2c_common->bus_id > 1)
+ return -EINVAL;
+
+ if (privdata->busses[i2c_common->bus_id]) {
+ dev_err(ndev_dev(privdata),
+ "Bus %d already taken!\n", i2c_common->bus_id);
+ return -EINVAL;
+ }
+
+ privdata->busses[i2c_common->bus_id] = i2c_common;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(amd_mp2_register_cb);
+
+int amd_mp2_unregister_cb(struct amd_i2c_common *i2c_common)
+{
+ struct amd_mp2_dev *privdata = i2c_common->mp2_dev;
+
+ privdata->busses[i2c_common->bus_id] = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(amd_mp2_unregister_cb);
+
+static void amd_mp2_clear_reg(struct amd_mp2_dev *privdata)
+{
+ int reg;
+
+ for (reg = AMD_C2P_MSG0; reg <= AMD_C2P_MSG9; reg += 4)
+ writel(0, privdata->mmio + reg);
+
+ for (reg = AMD_P2C_MSG1; reg <= AMD_P2C_MSG2; reg += 4)
+ writel(0, privdata->mmio + reg);
+}
+
+static int amd_mp2_pci_init(struct amd_mp2_dev *privdata,
+ struct pci_dev *pci_dev)
+{
+ int rc;
+
+ pci_set_drvdata(pci_dev, privdata);
+
+ rc = pcim_enable_device(pci_dev);
+ if (rc) {
+ dev_err(ndev_dev(privdata), "Failed to enable MP2 PCI device\n");
+ goto err_pci_enable;
+ }
+
+ rc = pcim_iomap_regions(pci_dev, 1 << 2, pci_name(pci_dev));
+ if (rc) {
+ dev_err(ndev_dev(privdata), "I/O memory remapping failed\n");
+ goto err_pci_enable;
+ }
+ privdata->mmio = pcim_iomap_table(pci_dev)[2];
+
+ pci_set_master(pci_dev);
+
+ rc = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64));
+ if (rc) {
+ rc = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ if (rc)
+ goto err_dma_mask;
+ }
+
+ /* Set up intx irq */
+ writel(0, privdata->mmio + AMD_P2C_MSG_INTEN);
+ pci_intx(pci_dev, 1);
+ rc = devm_request_irq(&pci_dev->dev, pci_dev->irq, amd_mp2_irq_isr,
+ IRQF_SHARED, dev_name(&pci_dev->dev), privdata);
+ if (rc)
+ dev_err(&pci_dev->dev, "Failure requesting irq %i: %d\n",
+ pci_dev->irq, rc);
+
+ return rc;
+
+err_dma_mask:
+ pci_clear_master(pci_dev);
+err_pci_enable:
+ pci_set_drvdata(pci_dev, NULL);
+ return rc;
+}
+
+static int amd_mp2_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id)
+{
+ struct amd_mp2_dev *privdata;
+ int rc;
+
+ privdata = devm_kzalloc(&pci_dev->dev, sizeof(*privdata), GFP_KERNEL);
+ if (!privdata)
+ return -ENOMEM;
+
+ rc = amd_mp2_pci_init(privdata, pci_dev);
+ if (rc)
+ return rc;
+
+ mutex_init(&privdata->c2p_lock);
+ privdata->pci_dev = pci_dev;
+
+ pm_runtime_set_autosuspend_delay(&pci_dev->dev, 1000);
+ pm_runtime_use_autosuspend(&pci_dev->dev);
+ pm_runtime_put_autosuspend(&pci_dev->dev);
+ pm_runtime_allow(&pci_dev->dev);
+
+ privdata->probed = true;
+
+ dev_info(&pci_dev->dev, "MP2 device registered.\n");
+ return 0;
+}
+
+static void amd_mp2_pci_remove(struct pci_dev *pci_dev)
+{
+ struct amd_mp2_dev *privdata = pci_get_drvdata(pci_dev);
+
+ pm_runtime_forbid(&pci_dev->dev);
+ pm_runtime_get_noresume(&pci_dev->dev);
+
+ pci_intx(pci_dev, 0);
+ pci_clear_master(pci_dev);
+
+ amd_mp2_clear_reg(privdata);
+}
+
+#ifdef CONFIG_PM
+static int amd_mp2_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct amd_mp2_dev *privdata = pci_get_drvdata(pci_dev);
+ struct amd_i2c_common *i2c_common;
+ unsigned int bus_id;
+ int ret = 0;
+
+ for (bus_id = 0; bus_id < 2; bus_id++) {
+ i2c_common = privdata->busses[bus_id];
+ if (i2c_common)
+ i2c_common->suspend(i2c_common);
+ }
+
+ ret = pci_save_state(pci_dev);
+ if (ret) {
+ dev_err(ndev_dev(privdata),
+ "pci_save_state failed = %d\n", ret);
+ return ret;
+ }
+
+ pci_disable_device(pci_dev);
+ return ret;
+}
+
+static int amd_mp2_pci_resume(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct amd_mp2_dev *privdata = pci_get_drvdata(pci_dev);
+ struct amd_i2c_common *i2c_common;
+ unsigned int bus_id;
+ int ret = 0;
+
+ pci_restore_state(pci_dev);
+ ret = pci_enable_device(pci_dev);
+ if (ret < 0) {
+ dev_err(ndev_dev(privdata),
+ "pci_enable_device failed = %d\n", ret);
+ return ret;
+ }
+
+ for (bus_id = 0; bus_id < 2; bus_id++) {
+ i2c_common = privdata->busses[bus_id];
+ if (i2c_common) {
+ ret = i2c_common->resume(i2c_common);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static UNIVERSAL_DEV_PM_OPS(amd_mp2_pci_pm_ops, amd_mp2_pci_suspend,
+ amd_mp2_pci_resume, NULL);
+#endif /* CONFIG_PM */
+
+static const struct pci_device_id amd_mp2_pci_tbl[] = {
+ {PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_MP2)},
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, amd_mp2_pci_tbl);
+
+static struct pci_driver amd_mp2_pci_driver = {
+ .name = "i2c_amd_mp2",
+ .id_table = amd_mp2_pci_tbl,
+ .probe = amd_mp2_pci_probe,
+ .remove = amd_mp2_pci_remove,
+#ifdef CONFIG_PM
+ .driver = {
+ .pm = &amd_mp2_pci_pm_ops,
+ },
+#endif
+};
+module_pci_driver(amd_mp2_pci_driver);
+
+static int amd_mp2_device_match(struct device *dev, void *data)
+{
+ return 1;
+}
+
+struct amd_mp2_dev *amd_mp2_find_device(void)
+{
+ struct device *dev;
+ struct pci_dev *pci_dev;
+
+ dev = driver_find_device(&amd_mp2_pci_driver.driver, NULL, NULL,
+ amd_mp2_device_match);
+ if (!dev)
+ return NULL;
+
+ pci_dev = to_pci_dev(dev);
+ return (struct amd_mp2_dev *)pci_get_drvdata(pci_dev);
+}
+EXPORT_SYMBOL_GPL(amd_mp2_find_device);
+
+MODULE_DESCRIPTION("AMD(R) PCI-E MP2 I2C Controller Driver");
+MODULE_AUTHOR("Shyam Sundar S K <Shyam-sundar.S-k@amd.com>");
+MODULE_AUTHOR("Elie Morisse <syniurge@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/i2c/busses/i2c-amd-mp2-plat.c b/drivers/i2c/busses/i2c-amd-mp2-plat.c
new file mode 100644
index 000000000000..f5b3f00c6559
--- /dev/null
+++ b/drivers/i2c/busses/i2c-amd-mp2-plat.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * AMD MP2 platform driver
+ *
+ * Setup the I2C adapters enumerated in the ACPI namespace.
+ * MP2 controllers have 2 separate busses, up to 2 I2C adapters may be listed.
+ *
+ * Authors: Nehal Bakulchandra Shah <Nehal-bakulchandra.shah@amd.com>
+ * Elie Morisse <syniurge@gmail.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "i2c-amd-mp2.h"
+
+#define AMD_MP2_I2C_MAX_RW_LENGTH ((1 << 12) - 1)
+#define AMD_I2C_TIMEOUT (msecs_to_jiffies(250))
+
+/**
+ * struct amd_i2c_dev - MP2 bus/i2c adapter context
+ * @common: shared context with the MP2 PCI driver
+ * @pdev: platform driver node
+ * @adap: i2c adapter
+ * @cmd_complete: xfer completion object
+ */
+struct amd_i2c_dev {
+ struct amd_i2c_common common;
+ struct platform_device *pdev;
+ struct i2c_adapter adap;
+ struct completion cmd_complete;
+};
+
+#define amd_i2c_dev_common(__common) \
+ container_of(__common, struct amd_i2c_dev, common)
+
+static int i2c_amd_dma_map(struct amd_i2c_common *i2c_common)
+{
+ struct device *dev_pci = &i2c_common->mp2_dev->pci_dev->dev;
+ struct amd_i2c_dev *i2c_dev = amd_i2c_dev_common(i2c_common);
+ enum dma_data_direction dma_direction =
+ i2c_common->msg->flags & I2C_M_RD ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ i2c_common->dma_buf = i2c_get_dma_safe_msg_buf(i2c_common->msg, 0);
+ i2c_common->dma_addr = dma_map_single(dev_pci, i2c_common->dma_buf,
+ i2c_common->msg->len,
+ dma_direction);
+
+ if (unlikely(dma_mapping_error(dev_pci, i2c_common->dma_addr))) {
+ dev_err(&i2c_dev->pdev->dev,
+ "Error while mapping dma buffer %p\n",
+ i2c_common->dma_buf);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void i2c_amd_dma_unmap(struct amd_i2c_common *i2c_common)
+{
+ struct device *dev_pci = &i2c_common->mp2_dev->pci_dev->dev;
+ enum dma_data_direction dma_direction =
+ i2c_common->msg->flags & I2C_M_RD ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ dma_unmap_single(dev_pci, i2c_common->dma_addr,
+ i2c_common->msg->len, dma_direction);
+
+ i2c_put_dma_safe_msg_buf(i2c_common->dma_buf, i2c_common->msg, true);
+}
+
+static void i2c_amd_start_cmd(struct amd_i2c_dev *i2c_dev)
+{
+ struct amd_i2c_common *i2c_common = &i2c_dev->common;
+
+ reinit_completion(&i2c_dev->cmd_complete);
+ i2c_common->cmd_success = false;
+}
+
+static void i2c_amd_cmd_completion(struct amd_i2c_common *i2c_common)
+{
+ struct amd_i2c_dev *i2c_dev = amd_i2c_dev_common(i2c_common);
+ union i2c_event *event = &i2c_common->eventval;
+
+ if (event->r.status == i2c_readcomplete_event)
+ dev_dbg(&i2c_dev->pdev->dev, "%s readdata:%*ph\n",
+ __func__, event->r.length,
+ i2c_common->msg->buf);
+
+ complete(&i2c_dev->cmd_complete);
+}
+
+static int i2c_amd_check_cmd_completion(struct amd_i2c_dev *i2c_dev)
+{
+ struct amd_i2c_common *i2c_common = &i2c_dev->common;
+ unsigned long timeout;
+
+ timeout = wait_for_completion_timeout(&i2c_dev->cmd_complete,
+ i2c_dev->adap.timeout);
+
+ if ((i2c_common->reqcmd == i2c_read ||
+ i2c_common->reqcmd == i2c_write) &&
+ i2c_common->msg->len > 32)
+ i2c_amd_dma_unmap(i2c_common);
+
+ if (timeout == 0) {
+ amd_mp2_rw_timeout(i2c_common);
+ return -ETIMEDOUT;
+ }
+
+ amd_mp2_process_event(i2c_common);
+
+ if (!i2c_common->cmd_success)
+ return -EIO;
+
+ return 0;
+}
+
+static int i2c_amd_enable_set(struct amd_i2c_dev *i2c_dev, bool enable)
+{
+ struct amd_i2c_common *i2c_common = &i2c_dev->common;
+
+ i2c_amd_start_cmd(i2c_dev);
+ amd_mp2_bus_enable_set(i2c_common, enable);
+
+ return i2c_amd_check_cmd_completion(i2c_dev);
+}
+
+static int i2c_amd_xfer_msg(struct amd_i2c_dev *i2c_dev, struct i2c_msg *pmsg)
+{
+ struct amd_i2c_common *i2c_common = &i2c_dev->common;
+
+ i2c_amd_start_cmd(i2c_dev);
+ i2c_common->msg = pmsg;
+
+ if (pmsg->len > 32)
+ if (i2c_amd_dma_map(i2c_common))
+ return -EIO;
+
+ if (pmsg->flags & I2C_M_RD)
+ amd_mp2_rw(i2c_common, i2c_read);
+ else
+ amd_mp2_rw(i2c_common, i2c_write);
+
+ return i2c_amd_check_cmd_completion(i2c_dev);
+}
+
+static int i2c_amd_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ struct amd_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
+ int i;
+ struct i2c_msg *pmsg;
+ int err;
+
+ /* the adapter might have been deleted while waiting for the bus lock */
+ if (unlikely(!i2c_dev->common.mp2_dev))
+ return -EINVAL;
+
+ amd_mp2_pm_runtime_get(i2c_dev->common.mp2_dev);
+
+ for (i = 0; i < num; i++) {
+ pmsg = &msgs[i];
+ err = i2c_amd_xfer_msg(i2c_dev, pmsg);
+ if (err)
+ break;
+ }
+
+ amd_mp2_pm_runtime_put(i2c_dev->common.mp2_dev);
+ return err ? err : num;
+}
+
+static u32 i2c_amd_func(struct i2c_adapter *a)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm i2c_amd_algorithm = {
+ .master_xfer = i2c_amd_xfer,
+ .functionality = i2c_amd_func,
+};
+
+#ifdef CONFIG_PM
+static int i2c_amd_suspend(struct amd_i2c_common *i2c_common)
+{
+ struct amd_i2c_dev *i2c_dev = amd_i2c_dev_common(i2c_common);
+
+ i2c_amd_enable_set(i2c_dev, false);
+ return 0;
+}
+
+static int i2c_amd_resume(struct amd_i2c_common *i2c_common)
+{
+ struct amd_i2c_dev *i2c_dev = amd_i2c_dev_common(i2c_common);
+
+ return i2c_amd_enable_set(i2c_dev, true);
+}
+#endif
+
+static enum speed_enum i2c_amd_get_bus_speed(struct platform_device *pdev)
+{
+ u32 acpi_speed;
+ int i;
+ static const u32 supported_speeds[] = {
+ 0, 100000, 400000, 1000000, 1400000, 3400000
+ };
+
+ acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
+ /* round down to the lowest standard speed */
+ for (i = 1; i < ARRAY_SIZE(supported_speeds); i++) {
+ if (acpi_speed < supported_speeds[i])
+ break;
+ }
+ acpi_speed = supported_speeds[i - 1];
+
+ switch (acpi_speed) {
+ case 100000:
+ return speed100k;
+ case 400000:
+ return speed400k;
+ case 1000000:
+ return speed1000k;
+ case 1400000:
+ return speed1400k;
+ case 3400000:
+ return speed3400k;
+ default:
+ return speed400k;
+ }
+}
+
+static const struct i2c_adapter_quirks amd_i2c_dev_quirks = {
+ .max_read_len = AMD_MP2_I2C_MAX_RW_LENGTH,
+ .max_write_len = AMD_MP2_I2C_MAX_RW_LENGTH,
+};
+
+static int i2c_amd_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct amd_i2c_dev *i2c_dev;
+ acpi_handle handle = ACPI_HANDLE(&pdev->dev);
+ struct acpi_device *adev;
+ struct amd_mp2_dev *mp2_dev;
+ const char *uid;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return -ENODEV;
+
+ /* The ACPI namespace doesn't contain information about which MP2 PCI
+ * device an AMDI0011 ACPI device is related to, so assume that there's
+ * only one MP2 PCI device per system.
+ */
+ mp2_dev = amd_mp2_find_device();
+ if (!mp2_dev || !mp2_dev->probed)
+ /* The MP2 PCI device should get probed later */
+ return -EPROBE_DEFER;
+
+ i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL);
+ if (!i2c_dev)
+ return -ENOMEM;
+
+ i2c_dev->common.mp2_dev = mp2_dev;
+ i2c_dev->pdev = pdev;
+ platform_set_drvdata(pdev, i2c_dev);
+
+ i2c_dev->common.cmd_completion = &i2c_amd_cmd_completion;
+#ifdef CONFIG_PM
+ i2c_dev->common.suspend = &i2c_amd_suspend;
+ i2c_dev->common.resume = &i2c_amd_resume;
+#endif
+
+ uid = adev->pnp.unique_id;
+ if (!uid) {
+ dev_err(&pdev->dev, "missing UID/bus id!\n");
+ return -EINVAL;
+ } else if (strcmp(uid, "0") == 0) {
+ i2c_dev->common.bus_id = 0;
+ } else if (strcmp(uid, "1") == 0) {
+ i2c_dev->common.bus_id = 1;
+ } else {
+ dev_err(&pdev->dev, "incorrect UID/bus id \"%s\"!\n", uid);
+ return -EINVAL;
+ }
+ dev_dbg(&pdev->dev, "bus id is %u\n", i2c_dev->common.bus_id);
+
+ /* Register the adapter */
+ amd_mp2_pm_runtime_get(mp2_dev);
+
+ i2c_dev->common.reqcmd = i2c_none;
+ if (amd_mp2_register_cb(&i2c_dev->common))
+ return -EINVAL;
+ device_link_add(&i2c_dev->pdev->dev, &mp2_dev->pci_dev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+
+ i2c_dev->common.i2c_speed = i2c_amd_get_bus_speed(pdev);
+
+ /* Setup i2c adapter description */
+ i2c_dev->adap.owner = THIS_MODULE;
+ i2c_dev->adap.algo = &i2c_amd_algorithm;
+ i2c_dev->adap.quirks = &amd_i2c_dev_quirks;
+ i2c_dev->adap.dev.parent = &pdev->dev;
+ i2c_dev->adap.algo_data = i2c_dev;
+ i2c_dev->adap.timeout = AMD_I2C_TIMEOUT;
+ ACPI_COMPANION_SET(&i2c_dev->adap.dev, ACPI_COMPANION(&pdev->dev));
+ i2c_dev->adap.dev.of_node = pdev->dev.of_node;
+ snprintf(i2c_dev->adap.name, sizeof(i2c_dev->adap.name),
+ "AMD MP2 i2c bus %u", i2c_dev->common.bus_id);
+ i2c_set_adapdata(&i2c_dev->adap, i2c_dev);
+
+ init_completion(&i2c_dev->cmd_complete);
+
+ /* Enable the bus */
+ if (i2c_amd_enable_set(i2c_dev, true))
+ dev_err(&pdev->dev, "initial bus enable failed\n");
+
+ /* Attach to the i2c layer */
+ ret = i2c_add_adapter(&i2c_dev->adap);
+
+ amd_mp2_pm_runtime_put(mp2_dev);
+
+ if (ret < 0)
+ dev_err(&pdev->dev, "i2c add adapter failed = %d\n", ret);
+
+ return ret;
+}
+
+static int i2c_amd_remove(struct platform_device *pdev)
+{
+ struct amd_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+ struct amd_i2c_common *i2c_common = &i2c_dev->common;
+
+ i2c_lock_bus(&i2c_dev->adap, I2C_LOCK_ROOT_ADAPTER);
+
+ i2c_amd_enable_set(i2c_dev, false);
+ amd_mp2_unregister_cb(i2c_common);
+ i2c_common->mp2_dev = NULL;
+
+ i2c_unlock_bus(&i2c_dev->adap, I2C_LOCK_ROOT_ADAPTER);
+
+ i2c_del_adapter(&i2c_dev->adap);
+ return 0;
+}
+
+static const struct acpi_device_id i2c_amd_acpi_match[] = {
+ { "AMDI0011" },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, i2c_amd_acpi_match);
+
+static struct platform_driver i2c_amd_plat_driver = {
+ .probe = i2c_amd_probe,
+ .remove = i2c_amd_remove,
+ .driver = {
+ .name = "i2c_amd_mp2",
+ .acpi_match_table = ACPI_PTR(i2c_amd_acpi_match),
+ },
+};
+module_platform_driver(i2c_amd_plat_driver);
+
+MODULE_DESCRIPTION("AMD(R) MP2 I2C Platform Driver");
+MODULE_AUTHOR("Nehal Shah <nehal-bakulchandra.shah@amd.com>");
+MODULE_AUTHOR("Elie Morisse <syniurge@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/i2c/busses/i2c-amd-mp2.h b/drivers/i2c/busses/i2c-amd-mp2.h
new file mode 100644
index 000000000000..058362edebaa
--- /dev/null
+++ b/drivers/i2c/busses/i2c-amd-mp2.h
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * AMD MP2 I2C adapter driver
+ *
+ * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ * Elie Morisse <syniurge@gmail.com>
+ */
+
+#ifndef I2C_AMD_PCI_MP2_H
+#define I2C_AMD_PCI_MP2_H
+
+#include <linux/i2c.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+
+#define PCI_DEVICE_ID_AMD_MP2 0x15E6
+
+struct amd_i2c_common;
+struct amd_mp2_dev;
+
+enum {
+ /* MP2 C2P Message Registers */
+ AMD_C2P_MSG0 = 0x10500, /* MP2 Message for I2C0 */
+ AMD_C2P_MSG1 = 0x10504, /* MP2 Message for I2C1 */
+ AMD_C2P_MSG2 = 0x10508, /* DRAM Address Lo / Data 0 */
+ AMD_C2P_MSG3 = 0x1050c, /* DRAM Address HI / Data 1 */
+ AMD_C2P_MSG4 = 0x10510, /* Data 2 */
+ AMD_C2P_MSG5 = 0x10514, /* Data 3 */
+ AMD_C2P_MSG6 = 0x10518, /* Data 4 */
+ AMD_C2P_MSG7 = 0x1051c, /* Data 5 */
+ AMD_C2P_MSG8 = 0x10520, /* Data 6 */
+ AMD_C2P_MSG9 = 0x10524, /* Data 7 */
+
+ /* MP2 P2C Message Registers */
+ AMD_P2C_MSG0 = 0x10680, /* Do not use */
+ AMD_P2C_MSG1 = 0x10684, /* I2C0 interrupt register */
+ AMD_P2C_MSG2 = 0x10688, /* I2C1 interrupt register */
+ AMD_P2C_MSG3 = 0x1068C, /* MP2 debug info */
+ AMD_P2C_MSG_INTEN = 0x10690, /* MP2 interrupt gen register */
+ AMD_P2C_MSG_INTSTS = 0x10694, /* Interrupt status */
+};
+
+/* Command register data structures */
+
+#define i2c_none (-1)
+enum i2c_cmd {
+ i2c_read = 0,
+ i2c_write,
+ i2c_enable,
+ i2c_disable,
+ number_of_sensor_discovered,
+ is_mp2_active,
+ invalid_cmd = 0xF,
+};
+
+enum speed_enum {
+ speed100k = 0,
+ speed400k = 1,
+ speed1000k = 2,
+ speed1400k = 3,
+ speed3400k = 4
+};
+
+enum mem_type {
+ use_dram = 0,
+ use_c2pmsg = 1,
+};
+
+/**
+ * union i2c_cmd_base : bit access of C2P commands
+ * @i2c_cmd: bit 0..3 i2c R/W command
+ * @bus_id: bit 4..7 i2c bus index
+ * @slave_addr: bit 8..15 slave address
+ * @length: bit 16..27 read/write length
+ * @i2c_speed: bit 28..30 bus speed
+ * @mem_type: bit 31 0-DRAM; 1-C2P msg o/p
+ */
+union i2c_cmd_base {
+ u32 ul;
+ struct {
+ enum i2c_cmd i2c_cmd : 4;
+ u8 bus_id : 4;
+ u32 slave_addr : 8;
+ u32 length : 12;
+ enum speed_enum i2c_speed : 3;
+ enum mem_type mem_type : 1;
+ } s;
+};
+
+enum response_type {
+ invalid_response = 0,
+ command_success = 1,
+ command_failed = 2,
+};
+
+enum status_type {
+ i2c_readcomplete_event = 0,
+ i2c_readfail_event = 1,
+ i2c_writecomplete_event = 2,
+ i2c_writefail_event = 3,
+ i2c_busenable_complete = 4,
+ i2c_busenable_failed = 5,
+ i2c_busdisable_complete = 6,
+ i2c_busdisable_failed = 7,
+ invalid_data_length = 8,
+ invalid_slave_address = 9,
+ invalid_i2cbus_id = 10,
+ invalid_dram_addr = 11,
+ invalid_command = 12,
+ mp2_active = 13,
+ numberof_sensors_discovered_resp = 14,
+ i2c_bus_notinitialized
+};
+
+/**
+ * union i2c_event : bit access of P2C events
+ * @response: bit 0..1 i2c response type
+ * @status: bit 2..6 status_type
+ * @mem_type: bit 7 0-DRAM; 1-C2P msg o/p
+ * @bus_id: bit 8..11 i2c bus id
+ * @length: bit 12..23 message length
+ * @slave_addr: bit 24-31 slave address
+ */
+union i2c_event {
+ u32 ul;
+ struct {
+ enum response_type response : 2;
+ enum status_type status : 5;
+ enum mem_type mem_type : 1;
+ u8 bus_id : 4;
+ u32 length : 12;
+ u32 slave_addr : 8;
+ } r;
+};
+
+/**
+ * struct amd_i2c_common - per bus/i2c adapter context, shared
+ * between the pci and the platform driver
+ * @eventval: MP2 event value set by the IRQ handler
+ * @mp2_dev: MP2 pci device this adapter is part of
+ * @msg: i2c message
+ * @cmd_completion: function called by the IRQ handler to signal
+ * the platform driver
+ * @reqcmd: requested i2c command type
+ * @cmd_success: set to true if the MP2 responded to a command with
+ * the expected status and response type
+ * @bus_id: bus index
+ * @i2c_speed: i2c bus speed determined by the slowest slave
+ * @dma_buf: if msg length > 32, holds the DMA buffer virtual address
+ * @dma_addr: if msg length > 32, holds the DMA buffer address
+ */
+struct amd_i2c_common {
+ union i2c_event eventval;
+ struct amd_mp2_dev *mp2_dev;
+ struct i2c_msg *msg;
+ void (*cmd_completion)(struct amd_i2c_common *i2c_common);
+ enum i2c_cmd reqcmd;
+ u8 cmd_success;
+ u8 bus_id;
+ enum speed_enum i2c_speed;
+ u8 *dma_buf;
+ dma_addr_t dma_addr;
+#ifdef CONFIG_PM
+ int (*suspend)(struct amd_i2c_common *i2c_common);
+ int (*resume)(struct amd_i2c_common *i2c_common);
+#endif /* CONFIG_PM */
+};
+
+/**
+ * struct amd_mp2_dev - per PCI device context
+ * @pci_dev: PCI driver node
+ * @busses: MP2 devices may have up to two busses,
+ * each bus corresponding to an i2c adapter
+ * @mmio: iommapped registers
+ * @c2p_lock: controls access to the C2P mailbox shared between
+ * the two adapters
+ * @c2p_lock_busid: id of the adapter which locked c2p_lock
+ */
+struct amd_mp2_dev {
+ struct pci_dev *pci_dev;
+ struct amd_i2c_common *busses[2];
+ void __iomem *mmio;
+ struct mutex c2p_lock;
+ u8 c2p_lock_busid;
+ unsigned int probed;
+};
+
+#define ndev_pdev(ndev) ((ndev)->pci_dev)
+#define ndev_name(ndev) pci_name(ndev_pdev(ndev))
+#define ndev_dev(ndev) (&ndev_pdev(ndev)->dev)
+#define work_amd_i2c_common(__work) \
+ container_of(__work, struct amd_i2c_common, work.work)
+
+/* PCIe communication driver */
+
+int amd_mp2_rw(struct amd_i2c_common *i2c_common, enum i2c_cmd reqcmd);
+int amd_mp2_bus_enable_set(struct amd_i2c_common *i2c_common, bool enable);
+
+void amd_mp2_process_event(struct amd_i2c_common *i2c_common);
+
+void amd_mp2_rw_timeout(struct amd_i2c_common *i2c_common);
+
+int amd_mp2_register_cb(struct amd_i2c_common *i2c_common);
+int amd_mp2_unregister_cb(struct amd_i2c_common *i2c_common);
+
+struct amd_mp2_dev *amd_mp2_find_device(void);
+
+static inline void amd_mp2_pm_runtime_get(struct amd_mp2_dev *mp2_dev)
+{
+ pm_runtime_get_sync(&mp2_dev->pci_dev->dev);
+}
+
+static inline void amd_mp2_pm_runtime_put(struct amd_mp2_dev *mp2_dev)
+{
+ pm_runtime_mark_last_busy(&mp2_dev->pci_dev->dev);
+ pm_runtime_put_autosuspend(&mp2_dev->pci_dev->dev);
+}
+
+#endif
diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c
new file mode 100644
index 000000000000..8d55cdd69ff4
--- /dev/null
+++ b/drivers/i2c/busses/i2c-at91-core.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
+ *
+ * Copyright (C) 2011 Weinmann Medical GmbH
+ * Author: Nikolaus Voss <n.voss@weinmann.de>
+ *
+ * Evolved from original work by:
+ * Copyright (C) 2004 Rick Bronson
+ * Converted to 2.6 by Andrew Victor <andrew@sanpeople.com>
+ *
+ * Borrowed heavily from original work by:
+ * Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/pinctrl/consumer.h>
+
+#include "i2c-at91.h"
+
+unsigned at91_twi_read(struct at91_twi_dev *dev, unsigned reg)
+{
+ return readl_relaxed(dev->base + reg);
+}
+
+void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val)
+{
+ writel_relaxed(val, dev->base + reg);
+}
+
+void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
+{
+ at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_INT_MASK);
+}
+
+void at91_twi_irq_save(struct at91_twi_dev *dev)
+{
+ dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & AT91_TWI_INT_MASK;
+ at91_disable_twi_interrupts(dev);
+}
+
+void at91_twi_irq_restore(struct at91_twi_dev *dev)
+{
+ at91_twi_write(dev, AT91_TWI_IER, dev->imr);
+}
+
+void at91_init_twi_bus(struct at91_twi_dev *dev)
+{
+ at91_disable_twi_interrupts(dev);
+ at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SWRST);
+ if (dev->slave_detected)
+ at91_init_twi_bus_slave(dev);
+ else
+ at91_init_twi_bus_master(dev);
+}
+
+static struct at91_twi_pdata at91rm9200_config = {
+ .clk_max_div = 5,
+ .clk_offset = 3,
+ .has_unre_flag = true,
+ .has_alt_cmd = false,
+ .has_hold_field = false,
+};
+
+static struct at91_twi_pdata at91sam9261_config = {
+ .clk_max_div = 5,
+ .clk_offset = 4,
+ .has_unre_flag = false,
+ .has_alt_cmd = false,
+ .has_hold_field = false,
+};
+
+static struct at91_twi_pdata at91sam9260_config = {
+ .clk_max_div = 7,
+ .clk_offset = 4,
+ .has_unre_flag = false,
+ .has_alt_cmd = false,
+ .has_hold_field = false,
+};
+
+static struct at91_twi_pdata at91sam9g20_config = {
+ .clk_max_div = 7,
+ .clk_offset = 4,
+ .has_unre_flag = false,
+ .has_alt_cmd = false,
+ .has_hold_field = false,
+};
+
+static struct at91_twi_pdata at91sam9g10_config = {
+ .clk_max_div = 7,
+ .clk_offset = 4,
+ .has_unre_flag = false,
+ .has_alt_cmd = false,
+ .has_hold_field = false,
+};
+
+static const struct platform_device_id at91_twi_devtypes[] = {
+ {
+ .name = "i2c-at91rm9200",
+ .driver_data = (unsigned long) &at91rm9200_config,
+ }, {
+ .name = "i2c-at91sam9261",
+ .driver_data = (unsigned long) &at91sam9261_config,
+ }, {
+ .name = "i2c-at91sam9260",
+ .driver_data = (unsigned long) &at91sam9260_config,
+ }, {
+ .name = "i2c-at91sam9g20",
+ .driver_data = (unsigned long) &at91sam9g20_config,
+ }, {
+ .name = "i2c-at91sam9g10",
+ .driver_data = (unsigned long) &at91sam9g10_config,
+ }, {
+ /* sentinel */
+ }
+};
+
+#if defined(CONFIG_OF)
+static struct at91_twi_pdata at91sam9x5_config = {
+ .clk_max_div = 7,
+ .clk_offset = 4,
+ .has_unre_flag = false,
+ .has_alt_cmd = false,
+ .has_hold_field = false,
+};
+
+static struct at91_twi_pdata sama5d4_config = {
+ .clk_max_div = 7,
+ .clk_offset = 4,
+ .has_unre_flag = false,
+ .has_alt_cmd = false,
+ .has_hold_field = true,
+};
+
+static struct at91_twi_pdata sama5d2_config = {
+ .clk_max_div = 7,
+ .clk_offset = 4,
+ .has_unre_flag = true,
+ .has_alt_cmd = true,
+ .has_hold_field = true,
+};
+
+static const struct of_device_id atmel_twi_dt_ids[] = {
+ {
+ .compatible = "atmel,at91rm9200-i2c",
+ .data = &at91rm9200_config,
+ }, {
+ .compatible = "atmel,at91sam9260-i2c",
+ .data = &at91sam9260_config,
+ }, {
+ .compatible = "atmel,at91sam9261-i2c",
+ .data = &at91sam9261_config,
+ }, {
+ .compatible = "atmel,at91sam9g20-i2c",
+ .data = &at91sam9g20_config,
+ }, {
+ .compatible = "atmel,at91sam9g10-i2c",
+ .data = &at91sam9g10_config,
+ }, {
+ .compatible = "atmel,at91sam9x5-i2c",
+ .data = &at91sam9x5_config,
+ }, {
+ .compatible = "atmel,sama5d4-i2c",
+ .data = &sama5d4_config,
+ }, {
+ .compatible = "atmel,sama5d2-i2c",
+ .data = &sama5d2_config,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, atmel_twi_dt_ids);
+#endif
+
+static struct at91_twi_pdata *at91_twi_get_driver_data(
+ struct platform_device *pdev)
+{
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(atmel_twi_dt_ids, pdev->dev.of_node);
+ if (!match)
+ return NULL;
+ return (struct at91_twi_pdata *)match->data;
+ }
+ return (struct at91_twi_pdata *) platform_get_device_id(pdev)->driver_data;
+}
+
+static int at91_twi_probe(struct platform_device *pdev)
+{
+ struct at91_twi_dev *dev;
+ struct resource *mem;
+ int rc;
+ u32 phy_addr;
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->dev = &pdev->dev;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem)
+ return -ENODEV;
+ phy_addr = mem->start;
+
+ dev->pdata = at91_twi_get_driver_data(pdev);
+ if (!dev->pdata)
+ return -ENODEV;
+
+ dev->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(dev->base))
+ return PTR_ERR(dev->base);
+
+ dev->irq = platform_get_irq(pdev, 0);
+ if (dev->irq < 0)
+ return dev->irq;
+
+ platform_set_drvdata(pdev, dev);
+
+ dev->clk = devm_clk_get(dev->dev, NULL);
+ if (IS_ERR(dev->clk)) {
+ dev_err(dev->dev, "no clock defined\n");
+ return -ENODEV;
+ }
+ clk_prepare_enable(dev->clk);
+
+ snprintf(dev->adapter.name, sizeof(dev->adapter.name), "AT91");
+ i2c_set_adapdata(&dev->adapter, dev);
+ dev->adapter.owner = THIS_MODULE;
+ dev->adapter.class = I2C_CLASS_DEPRECATED;
+ dev->adapter.dev.parent = dev->dev;
+ dev->adapter.nr = pdev->id;
+ dev->adapter.timeout = AT91_I2C_TIMEOUT;
+ dev->adapter.dev.of_node = pdev->dev.of_node;
+
+ dev->slave_detected = i2c_detect_slave_mode(&pdev->dev);
+
+ if (dev->slave_detected)
+ rc = at91_twi_probe_slave(pdev, phy_addr, dev);
+ else
+ rc = at91_twi_probe_master(pdev, phy_addr, dev);
+ if (rc)
+ return rc;
+
+ at91_init_twi_bus(dev);
+
+ pm_runtime_set_autosuspend_delay(dev->dev, AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(dev->dev);
+ pm_runtime_set_active(dev->dev);
+ pm_runtime_enable(dev->dev);
+
+ rc = i2c_add_numbered_adapter(&dev->adapter);
+ if (rc) {
+ clk_disable_unprepare(dev->clk);
+
+ pm_runtime_disable(dev->dev);
+ pm_runtime_set_suspended(dev->dev);
+
+ return rc;
+ }
+
+ dev_info(dev->dev, "AT91 i2c bus driver (hw version: %#x).\n",
+ at91_twi_read(dev, AT91_TWI_VER));
+ return 0;
+}
+
+static int at91_twi_remove(struct platform_device *pdev)
+{
+ struct at91_twi_dev *dev = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&dev->adapter);
+ clk_disable_unprepare(dev->clk);
+
+ pm_runtime_disable(dev->dev);
+ pm_runtime_set_suspended(dev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int at91_twi_runtime_suspend(struct device *dev)
+{
+ struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(twi_dev->clk);
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int at91_twi_runtime_resume(struct device *dev)
+{
+ struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
+
+ pinctrl_pm_select_default_state(dev);
+
+ return clk_prepare_enable(twi_dev->clk);
+}
+
+static int at91_twi_suspend_noirq(struct device *dev)
+{
+ if (!pm_runtime_status_suspended(dev))
+ at91_twi_runtime_suspend(dev);
+
+ return 0;
+}
+
+static int at91_twi_resume_noirq(struct device *dev)
+{
+ struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
+ int ret;
+
+ if (!pm_runtime_status_suspended(dev)) {
+ ret = at91_twi_runtime_resume(dev);
+ if (ret)
+ return ret;
+ }
+
+ pm_runtime_mark_last_busy(dev);
+ pm_request_autosuspend(dev);
+
+ at91_init_twi_bus(twi_dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops at91_twi_pm = {
+ .suspend_noirq = at91_twi_suspend_noirq,
+ .resume_noirq = at91_twi_resume_noirq,
+ .runtime_suspend = at91_twi_runtime_suspend,
+ .runtime_resume = at91_twi_runtime_resume,
+};
+
+#define at91_twi_pm_ops (&at91_twi_pm)
+#else
+#define at91_twi_pm_ops NULL
+#endif
+
+static struct platform_driver at91_twi_driver = {
+ .probe = at91_twi_probe,
+ .remove = at91_twi_remove,
+ .id_table = at91_twi_devtypes,
+ .driver = {
+ .name = "at91_i2c",
+ .of_match_table = of_match_ptr(atmel_twi_dt_ids),
+ .pm = at91_twi_pm_ops,
+ },
+};
+
+static int __init at91_twi_init(void)
+{
+ return platform_driver_register(&at91_twi_driver);
+}
+
+static void __exit at91_twi_exit(void)
+{
+ platform_driver_unregister(&at91_twi_driver);
+}
+
+subsys_initcall(at91_twi_init);
+module_exit(at91_twi_exit);
+
+MODULE_AUTHOR("Nikolaus Voss <n.voss@weinmann.de>");
+MODULE_DESCRIPTION("I2C (TWI) driver for Atmel AT91");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:at91_i2c");
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91-master.c
index 3f3e8b3bf5ff..e87232f2e708 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
*
@@ -10,11 +11,6 @@
*
* Borrowed heavily from original work by:
* Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/clk.h>
@@ -25,159 +21,16 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <linux/slab.h>
#include <linux/platform_data/dma-atmel.h>
#include <linux/pm_runtime.h>
-#include <linux/pinctrl/consumer.h>
-
-#define DEFAULT_TWI_CLK_HZ 100000 /* max 400 Kbits/s */
-#define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */
-#define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */
-#define AUTOSUSPEND_TIMEOUT 2000
-#define AT91_I2C_MAX_ALT_CMD_DATA_SIZE 256
-
-/* AT91 TWI register definitions */
-#define AT91_TWI_CR 0x0000 /* Control Register */
-#define AT91_TWI_START BIT(0) /* Send a Start Condition */
-#define AT91_TWI_STOP BIT(1) /* Send a Stop Condition */
-#define AT91_TWI_MSEN BIT(2) /* Master Transfer Enable */
-#define AT91_TWI_MSDIS BIT(3) /* Master Transfer Disable */
-#define AT91_TWI_SVEN BIT(4) /* Slave Transfer Enable */
-#define AT91_TWI_SVDIS BIT(5) /* Slave Transfer Disable */
-#define AT91_TWI_QUICK BIT(6) /* SMBus quick command */
-#define AT91_TWI_SWRST BIT(7) /* Software Reset */
-#define AT91_TWI_ACMEN BIT(16) /* Alternative Command Mode Enable */
-#define AT91_TWI_ACMDIS BIT(17) /* Alternative Command Mode Disable */
-#define AT91_TWI_THRCLR BIT(24) /* Transmit Holding Register Clear */
-#define AT91_TWI_RHRCLR BIT(25) /* Receive Holding Register Clear */
-#define AT91_TWI_LOCKCLR BIT(26) /* Lock Clear */
-#define AT91_TWI_FIFOEN BIT(28) /* FIFO Enable */
-#define AT91_TWI_FIFODIS BIT(29) /* FIFO Disable */
-
-#define AT91_TWI_MMR 0x0004 /* Master Mode Register */
-#define AT91_TWI_IADRSZ_1 0x0100 /* Internal Device Address Size */
-#define AT91_TWI_MREAD BIT(12) /* Master Read Direction */
-
-#define AT91_TWI_IADR 0x000c /* Internal Address Register */
-
-#define AT91_TWI_CWGR 0x0010 /* Clock Waveform Generator Reg */
-#define AT91_TWI_CWGR_HOLD_MAX 0x1f
-#define AT91_TWI_CWGR_HOLD(x) (((x) & AT91_TWI_CWGR_HOLD_MAX) << 24)
-
-#define AT91_TWI_SR 0x0020 /* Status Register */
-#define AT91_TWI_TXCOMP BIT(0) /* Transmission Complete */
-#define AT91_TWI_RXRDY BIT(1) /* Receive Holding Register Ready */
-#define AT91_TWI_TXRDY BIT(2) /* Transmit Holding Register Ready */
-#define AT91_TWI_OVRE BIT(6) /* Overrun Error */
-#define AT91_TWI_UNRE BIT(7) /* Underrun Error */
-#define AT91_TWI_NACK BIT(8) /* Not Acknowledged */
-#define AT91_TWI_LOCK BIT(23) /* TWI Lock due to Frame Errors */
-
-#define AT91_TWI_INT_MASK \
- (AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY | AT91_TWI_NACK)
-
-#define AT91_TWI_IER 0x0024 /* Interrupt Enable Register */
-#define AT91_TWI_IDR 0x0028 /* Interrupt Disable Register */
-#define AT91_TWI_IMR 0x002c /* Interrupt Mask Register */
-#define AT91_TWI_RHR 0x0030 /* Receive Holding Register */
-#define AT91_TWI_THR 0x0034 /* Transmit Holding Register */
-
-#define AT91_TWI_ACR 0x0040 /* Alternative Command Register */
-#define AT91_TWI_ACR_DATAL(len) ((len) & 0xff)
-#define AT91_TWI_ACR_DIR BIT(8)
-
-#define AT91_TWI_FMR 0x0050 /* FIFO Mode Register */
-#define AT91_TWI_FMR_TXRDYM(mode) (((mode) & 0x3) << 0)
-#define AT91_TWI_FMR_TXRDYM_MASK (0x3 << 0)
-#define AT91_TWI_FMR_RXRDYM(mode) (((mode) & 0x3) << 4)
-#define AT91_TWI_FMR_RXRDYM_MASK (0x3 << 4)
-#define AT91_TWI_ONE_DATA 0x0
-#define AT91_TWI_TWO_DATA 0x1
-#define AT91_TWI_FOUR_DATA 0x2
-
-#define AT91_TWI_FLR 0x0054 /* FIFO Level Register */
-
-#define AT91_TWI_FSR 0x0060 /* FIFO Status Register */
-#define AT91_TWI_FIER 0x0064 /* FIFO Interrupt Enable Register */
-#define AT91_TWI_FIDR 0x0068 /* FIFO Interrupt Disable Register */
-#define AT91_TWI_FIMR 0x006c /* FIFO Interrupt Mask Register */
-
-#define AT91_TWI_VER 0x00fc /* Version Register */
-
-struct at91_twi_pdata {
- unsigned clk_max_div;
- unsigned clk_offset;
- bool has_unre_flag;
- bool has_alt_cmd;
- bool has_hold_field;
- struct at_dma_slave dma_slave;
-};
-
-struct at91_twi_dma {
- struct dma_chan *chan_rx;
- struct dma_chan *chan_tx;
- struct scatterlist sg[2];
- struct dma_async_tx_descriptor *data_desc;
- enum dma_data_direction direction;
- bool buf_mapped;
- bool xfer_in_progress;
-};
-struct at91_twi_dev {
- struct device *dev;
- void __iomem *base;
- struct completion cmd_complete;
- struct clk *clk;
- u8 *buf;
- size_t buf_len;
- struct i2c_msg *msg;
- int irq;
- unsigned imr;
- unsigned transfer_status;
- struct i2c_adapter adapter;
- unsigned twi_cwgr_reg;
- struct at91_twi_pdata *pdata;
- bool use_dma;
- bool use_alt_cmd;
- bool recv_len_abort;
- u32 fifo_size;
- struct at91_twi_dma dma;
-};
-
-static unsigned at91_twi_read(struct at91_twi_dev *dev, unsigned reg)
-{
- return readl_relaxed(dev->base + reg);
-}
-
-static void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val)
-{
- writel_relaxed(val, dev->base + reg);
-}
-
-static void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
-{
- at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_INT_MASK);
-}
+#include "i2c-at91.h"
-static void at91_twi_irq_save(struct at91_twi_dev *dev)
+void at91_init_twi_bus_master(struct at91_twi_dev *dev)
{
- dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & AT91_TWI_INT_MASK;
- at91_disable_twi_interrupts(dev);
-}
-
-static void at91_twi_irq_restore(struct at91_twi_dev *dev)
-{
- at91_twi_write(dev, AT91_TWI_IER, dev->imr);
-}
-
-static void at91_init_twi_bus(struct at91_twi_dev *dev)
-{
- at91_disable_twi_interrupts(dev);
- at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SWRST);
/* FIFO should be enabled immediately after the software reset */
if (dev->fifo_size)
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_FIFOEN);
@@ -190,16 +43,18 @@ static void at91_init_twi_bus(struct at91_twi_dev *dev)
* Calculate symmetric clock as stated in datasheet:
* twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset))
*/
-static void at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
+static void at91_calc_twi_clock(struct at91_twi_dev *dev)
{
int ckdiv, cdiv, div, hold = 0;
struct at91_twi_pdata *pdata = dev->pdata;
int offset = pdata->clk_offset;
int max_ckdiv = pdata->clk_max_div;
- u32 twd_hold_time_ns = 0;
+ struct i2c_timings timings, *t = &timings;
+
+ i2c_parse_fw_timings(dev->dev, t, true);
div = max(0, (int)DIV_ROUND_UP(clk_get_rate(dev->clk),
- 2 * twi_clk) - offset);
+ 2 * t->bus_freq_hz) - offset);
ckdiv = fls(div >> 8);
cdiv = div >> ckdiv;
@@ -211,15 +66,12 @@ static void at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
}
if (pdata->has_hold_field) {
- of_property_read_u32(dev->dev->of_node, "i2c-sda-hold-time-ns",
- &twd_hold_time_ns);
-
/*
* hold time = HOLD + 3 x T_peripheral_clock
* Use clk rate in kHz to prevent overflows when computing
* hold.
*/
- hold = DIV_ROUND_UP(twd_hold_time_ns
+ hold = DIV_ROUND_UP(t->sda_hold_ns
* (clk_get_rate(dev->clk) / 1000), 1000000);
hold -= 3;
if (hold < 0)
@@ -236,7 +88,7 @@ static void at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
| AT91_TWI_CWGR_HOLD(hold);
dev_dbg(dev->dev, "cdiv %d ckdiv %d hold %d (%d ns)\n",
- cdiv, ckdiv, hold, twd_hold_time_ns);
+ cdiv, ckdiv, hold, t->sda_hold_ns);
}
static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
@@ -833,124 +685,6 @@ static const struct i2c_algorithm at91_twi_algorithm = {
.functionality = at91_twi_func,
};
-static struct at91_twi_pdata at91rm9200_config = {
- .clk_max_div = 5,
- .clk_offset = 3,
- .has_unre_flag = true,
- .has_alt_cmd = false,
- .has_hold_field = false,
-};
-
-static struct at91_twi_pdata at91sam9261_config = {
- .clk_max_div = 5,
- .clk_offset = 4,
- .has_unre_flag = false,
- .has_alt_cmd = false,
- .has_hold_field = false,
-};
-
-static struct at91_twi_pdata at91sam9260_config = {
- .clk_max_div = 7,
- .clk_offset = 4,
- .has_unre_flag = false,
- .has_alt_cmd = false,
- .has_hold_field = false,
-};
-
-static struct at91_twi_pdata at91sam9g20_config = {
- .clk_max_div = 7,
- .clk_offset = 4,
- .has_unre_flag = false,
- .has_alt_cmd = false,
- .has_hold_field = false,
-};
-
-static struct at91_twi_pdata at91sam9g10_config = {
- .clk_max_div = 7,
- .clk_offset = 4,
- .has_unre_flag = false,
- .has_alt_cmd = false,
- .has_hold_field = false,
-};
-
-static const struct platform_device_id at91_twi_devtypes[] = {
- {
- .name = "i2c-at91rm9200",
- .driver_data = (unsigned long) &at91rm9200_config,
- }, {
- .name = "i2c-at91sam9261",
- .driver_data = (unsigned long) &at91sam9261_config,
- }, {
- .name = "i2c-at91sam9260",
- .driver_data = (unsigned long) &at91sam9260_config,
- }, {
- .name = "i2c-at91sam9g20",
- .driver_data = (unsigned long) &at91sam9g20_config,
- }, {
- .name = "i2c-at91sam9g10",
- .driver_data = (unsigned long) &at91sam9g10_config,
- }, {
- /* sentinel */
- }
-};
-
-#if defined(CONFIG_OF)
-static struct at91_twi_pdata at91sam9x5_config = {
- .clk_max_div = 7,
- .clk_offset = 4,
- .has_unre_flag = false,
- .has_alt_cmd = false,
- .has_hold_field = false,
-};
-
-static struct at91_twi_pdata sama5d4_config = {
- .clk_max_div = 7,
- .clk_offset = 4,
- .has_unre_flag = false,
- .has_alt_cmd = false,
- .has_hold_field = true,
-};
-
-static struct at91_twi_pdata sama5d2_config = {
- .clk_max_div = 7,
- .clk_offset = 4,
- .has_unre_flag = true,
- .has_alt_cmd = true,
- .has_hold_field = true,
-};
-
-static const struct of_device_id atmel_twi_dt_ids[] = {
- {
- .compatible = "atmel,at91rm9200-i2c",
- .data = &at91rm9200_config,
- } , {
- .compatible = "atmel,at91sam9260-i2c",
- .data = &at91sam9260_config,
- } , {
- .compatible = "atmel,at91sam9261-i2c",
- .data = &at91sam9261_config,
- } , {
- .compatible = "atmel,at91sam9g20-i2c",
- .data = &at91sam9g20_config,
- } , {
- .compatible = "atmel,at91sam9g10-i2c",
- .data = &at91sam9g10_config,
- }, {
- .compatible = "atmel,at91sam9x5-i2c",
- .data = &at91sam9x5_config,
- }, {
- .compatible = "atmel,sama5d4-i2c",
- .data = &sama5d4_config,
- }, {
- .compatible = "atmel,sama5d2-i2c",
- .data = &sama5d2_config,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(of, atmel_twi_dt_ids);
-#endif
-
static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
{
int ret = 0;
@@ -1033,74 +767,24 @@ error:
return ret;
}
-static struct at91_twi_pdata *at91_twi_get_driver_data(
- struct platform_device *pdev)
+int at91_twi_probe_master(struct platform_device *pdev,
+ u32 phy_addr, struct at91_twi_dev *dev)
{
- if (pdev->dev.of_node) {
- const struct of_device_id *match;
- match = of_match_node(atmel_twi_dt_ids, pdev->dev.of_node);
- if (!match)
- return NULL;
- return (struct at91_twi_pdata *)match->data;
- }
- return (struct at91_twi_pdata *) platform_get_device_id(pdev)->driver_data;
-}
-
-static int at91_twi_probe(struct platform_device *pdev)
-{
- struct at91_twi_dev *dev;
- struct resource *mem;
int rc;
- u32 phy_addr;
- u32 bus_clk_rate;
- dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
init_completion(&dev->cmd_complete);
- dev->dev = &pdev->dev;
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem)
- return -ENODEV;
- phy_addr = mem->start;
-
- dev->pdata = at91_twi_get_driver_data(pdev);
- if (!dev->pdata)
- return -ENODEV;
-
- dev->base = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(dev->base))
- return PTR_ERR(dev->base);
-
- dev->irq = platform_get_irq(pdev, 0);
- if (dev->irq < 0)
- return dev->irq;
rc = devm_request_irq(&pdev->dev, dev->irq, atmel_twi_interrupt, 0,
- dev_name(dev->dev), dev);
+ dev_name(dev->dev), dev);
if (rc) {
dev_err(dev->dev, "Cannot get irq %d: %d\n", dev->irq, rc);
return rc;
}
- platform_set_drvdata(pdev, dev);
-
- dev->clk = devm_clk_get(dev->dev, NULL);
- if (IS_ERR(dev->clk)) {
- dev_err(dev->dev, "no clock defined\n");
- return -ENODEV;
- }
- rc = clk_prepare_enable(dev->clk);
- if (rc)
- return rc;
-
if (dev->dev->of_node) {
rc = at91_twi_configure_dma(dev, phy_addr);
- if (rc == -EPROBE_DEFER) {
- clk_disable_unprepare(dev->clk);
+ if (rc == -EPROBE_DEFER)
return rc;
- }
}
if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size",
@@ -1108,144 +792,10 @@ static int at91_twi_probe(struct platform_device *pdev)
dev_info(dev->dev, "Using FIFO (%u data)\n", dev->fifo_size);
}
- rc = of_property_read_u32(dev->dev->of_node, "clock-frequency",
- &bus_clk_rate);
- if (rc)
- bus_clk_rate = DEFAULT_TWI_CLK_HZ;
-
- at91_calc_twi_clock(dev, bus_clk_rate);
- at91_init_twi_bus(dev);
+ at91_calc_twi_clock(dev);
- snprintf(dev->adapter.name, sizeof(dev->adapter.name), "AT91");
- i2c_set_adapdata(&dev->adapter, dev);
- dev->adapter.owner = THIS_MODULE;
- dev->adapter.class = I2C_CLASS_DEPRECATED;
dev->adapter.algo = &at91_twi_algorithm;
dev->adapter.quirks = &at91_twi_quirks;
- dev->adapter.dev.parent = dev->dev;
- dev->adapter.nr = pdev->id;
- dev->adapter.timeout = AT91_I2C_TIMEOUT;
- dev->adapter.dev.of_node = pdev->dev.of_node;
-
- pm_runtime_set_autosuspend_delay(dev->dev, AUTOSUSPEND_TIMEOUT);
- pm_runtime_use_autosuspend(dev->dev);
- pm_runtime_set_active(dev->dev);
- pm_runtime_enable(dev->dev);
-
- rc = i2c_add_numbered_adapter(&dev->adapter);
- if (rc) {
- clk_disable_unprepare(dev->clk);
-
- pm_runtime_disable(dev->dev);
- pm_runtime_set_suspended(dev->dev);
-
- return rc;
- }
-
- dev_info(dev->dev, "AT91 i2c bus driver (hw version: %#x).\n",
- at91_twi_read(dev, AT91_TWI_VER));
- return 0;
-}
-
-static int at91_twi_remove(struct platform_device *pdev)
-{
- struct at91_twi_dev *dev = platform_get_drvdata(pdev);
-
- i2c_del_adapter(&dev->adapter);
- clk_disable_unprepare(dev->clk);
-
- pm_runtime_disable(dev->dev);
- pm_runtime_set_suspended(dev->dev);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-
-static int at91_twi_runtime_suspend(struct device *dev)
-{
- struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
-
- clk_disable_unprepare(twi_dev->clk);
-
- pinctrl_pm_select_sleep_state(dev);
-
- return 0;
-}
-
-static int at91_twi_runtime_resume(struct device *dev)
-{
- struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
-
- pinctrl_pm_select_default_state(dev);
-
- return clk_prepare_enable(twi_dev->clk);
-}
-
-static int at91_twi_suspend_noirq(struct device *dev)
-{
- if (!pm_runtime_status_suspended(dev))
- at91_twi_runtime_suspend(dev);
-
- return 0;
-}
-
-static int at91_twi_resume_noirq(struct device *dev)
-{
- struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
- int ret;
-
- if (!pm_runtime_status_suspended(dev)) {
- ret = at91_twi_runtime_resume(dev);
- if (ret)
- return ret;
- }
-
- pm_runtime_mark_last_busy(dev);
- pm_request_autosuspend(dev);
-
- at91_init_twi_bus(twi_dev);
return 0;
}
-
-static const struct dev_pm_ops at91_twi_pm = {
- .suspend_noirq = at91_twi_suspend_noirq,
- .resume_noirq = at91_twi_resume_noirq,
- .runtime_suspend = at91_twi_runtime_suspend,
- .runtime_resume = at91_twi_runtime_resume,
-};
-
-#define at91_twi_pm_ops (&at91_twi_pm)
-#else
-#define at91_twi_pm_ops NULL
-#endif
-
-static struct platform_driver at91_twi_driver = {
- .probe = at91_twi_probe,
- .remove = at91_twi_remove,
- .id_table = at91_twi_devtypes,
- .driver = {
- .name = "at91_i2c",
- .of_match_table = of_match_ptr(atmel_twi_dt_ids),
- .pm = at91_twi_pm_ops,
- },
-};
-
-static int __init at91_twi_init(void)
-{
- return platform_driver_register(&at91_twi_driver);
-}
-
-static void __exit at91_twi_exit(void)
-{
- platform_driver_unregister(&at91_twi_driver);
-}
-
-subsys_initcall(at91_twi_init);
-module_exit(at91_twi_exit);
-
-MODULE_AUTHOR("Nikolaus Voss <n.voss@weinmann.de>");
-MODULE_DESCRIPTION("I2C (TWI) driver for Atmel AT91");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:at91_i2c");
diff --git a/drivers/i2c/busses/i2c-at91-slave.c b/drivers/i2c/busses/i2c-at91-slave.c
new file mode 100644
index 000000000000..d6eeea5166c0
--- /dev/null
+++ b/drivers/i2c/busses/i2c-at91-slave.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * i2c slave support for Atmel's AT91 Two-Wire Interface (TWI)
+ *
+ * Copyright (C) 2017 Juergen Fitschen <me@jue.yt>
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+
+#include "i2c-at91.h"
+
+static irqreturn_t atmel_twi_interrupt_slave(int irq, void *dev_id)
+{
+ struct at91_twi_dev *dev = dev_id;
+ const unsigned status = at91_twi_read(dev, AT91_TWI_SR);
+ const unsigned irqstatus = status & at91_twi_read(dev, AT91_TWI_IMR);
+ u8 value;
+
+ if (!irqstatus)
+ return IRQ_NONE;
+
+ /* slave address has been detected on I2C bus */
+ if (irqstatus & AT91_TWI_SVACC) {
+ if (status & AT91_TWI_SVREAD) {
+ i2c_slave_event(dev->slave,
+ I2C_SLAVE_READ_REQUESTED, &value);
+ writeb_relaxed(value, dev->base + AT91_TWI_THR);
+ at91_twi_write(dev, AT91_TWI_IER,
+ AT91_TWI_TXRDY | AT91_TWI_EOSACC);
+ } else {
+ i2c_slave_event(dev->slave,
+ I2C_SLAVE_WRITE_REQUESTED, &value);
+ at91_twi_write(dev, AT91_TWI_IER,
+ AT91_TWI_RXRDY | AT91_TWI_EOSACC);
+ }
+ at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_SVACC);
+ }
+
+ /* byte transmitted to remote master */
+ if (irqstatus & AT91_TWI_TXRDY) {
+ i2c_slave_event(dev->slave, I2C_SLAVE_READ_PROCESSED, &value);
+ writeb_relaxed(value, dev->base + AT91_TWI_THR);
+ }
+
+ /* byte received from remote master */
+ if (irqstatus & AT91_TWI_RXRDY) {
+ value = readb_relaxed(dev->base + AT91_TWI_RHR);
+ i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED, &value);
+ }
+
+ /* master sent stop */
+ if (irqstatus & AT91_TWI_EOSACC) {
+ at91_twi_write(dev, AT91_TWI_IDR,
+ AT91_TWI_TXRDY | AT91_TWI_RXRDY | AT91_TWI_EOSACC);
+ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_SVACC);
+ i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &value);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int at91_reg_slave(struct i2c_client *slave)
+{
+ struct at91_twi_dev *dev = i2c_get_adapdata(slave->adapter);
+
+ if (dev->slave)
+ return -EBUSY;
+
+ if (slave->flags & I2C_CLIENT_TEN)
+ return -EAFNOSUPPORT;
+
+ /* Make sure twi_clk doesn't get turned off! */
+ pm_runtime_get_sync(dev->dev);
+
+ dev->slave = slave;
+ dev->smr = AT91_TWI_SMR_SADR(slave->addr);
+
+ at91_init_twi_bus(dev);
+ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_SVACC);
+
+ dev_info(dev->dev, "entered slave mode (ADR=%d)\n", slave->addr);
+
+ return 0;
+}
+
+static int at91_unreg_slave(struct i2c_client *slave)
+{
+ struct at91_twi_dev *dev = i2c_get_adapdata(slave->adapter);
+
+ WARN_ON(!dev->slave);
+
+ dev_info(dev->dev, "leaving slave mode\n");
+
+ dev->slave = NULL;
+ dev->smr = 0;
+
+ at91_init_twi_bus(dev);
+
+ pm_runtime_put(dev->dev);
+
+ return 0;
+}
+
+static u32 at91_twi_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_SLAVE | I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
+ | I2C_FUNC_SMBUS_READ_BLOCK_DATA;
+}
+
+static const struct i2c_algorithm at91_twi_algorithm_slave = {
+ .reg_slave = at91_reg_slave,
+ .unreg_slave = at91_unreg_slave,
+ .functionality = at91_twi_func,
+};
+
+int at91_twi_probe_slave(struct platform_device *pdev,
+ u32 phy_addr, struct at91_twi_dev *dev)
+{
+ int rc;
+
+ rc = devm_request_irq(&pdev->dev, dev->irq, atmel_twi_interrupt_slave,
+ 0, dev_name(dev->dev), dev);
+ if (rc) {
+ dev_err(dev->dev, "Cannot get irq %d: %d\n", dev->irq, rc);
+ return rc;
+ }
+
+ dev->adapter.algo = &at91_twi_algorithm_slave;
+
+ return 0;
+}
+
+void at91_init_twi_bus_slave(struct at91_twi_dev *dev)
+{
+ at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSDIS);
+ if (dev->slave_detected && dev->smr) {
+ at91_twi_write(dev, AT91_TWI_SMR, dev->smr);
+ at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVEN);
+ }
+}
diff --git a/drivers/i2c/busses/i2c-at91.h b/drivers/i2c/busses/i2c-at91.h
new file mode 100644
index 000000000000..499b506f6128
--- /dev/null
+++ b/drivers/i2c/busses/i2c-at91.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
+ *
+ * Copyright (C) 2011 Weinmann Medical GmbH
+ * Author: Nikolaus Voss <n.voss@weinmann.de>
+ *
+ * Evolved from original work by:
+ * Copyright (C) 2004 Rick Bronson
+ * Converted to 2.6 by Andrew Victor <andrew@sanpeople.com>
+ *
+ * Borrowed heavily from original work by:
+ * Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/i2c.h>
+#include <linux/platform_data/dma-atmel.h>
+#include <linux/platform_device.h>
+
+#define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */
+#define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */
+#define AUTOSUSPEND_TIMEOUT 2000
+#define AT91_I2C_MAX_ALT_CMD_DATA_SIZE 256
+
+/* AT91 TWI register definitions */
+#define AT91_TWI_CR 0x0000 /* Control Register */
+#define AT91_TWI_START BIT(0) /* Send a Start Condition */
+#define AT91_TWI_STOP BIT(1) /* Send a Stop Condition */
+#define AT91_TWI_MSEN BIT(2) /* Master Transfer Enable */
+#define AT91_TWI_MSDIS BIT(3) /* Master Transfer Disable */
+#define AT91_TWI_SVEN BIT(4) /* Slave Transfer Enable */
+#define AT91_TWI_SVDIS BIT(5) /* Slave Transfer Disable */
+#define AT91_TWI_QUICK BIT(6) /* SMBus quick command */
+#define AT91_TWI_SWRST BIT(7) /* Software Reset */
+#define AT91_TWI_ACMEN BIT(16) /* Alternative Command Mode Enable */
+#define AT91_TWI_ACMDIS BIT(17) /* Alternative Command Mode Disable */
+#define AT91_TWI_THRCLR BIT(24) /* Transmit Holding Register Clear */
+#define AT91_TWI_RHRCLR BIT(25) /* Receive Holding Register Clear */
+#define AT91_TWI_LOCKCLR BIT(26) /* Lock Clear */
+#define AT91_TWI_FIFOEN BIT(28) /* FIFO Enable */
+#define AT91_TWI_FIFODIS BIT(29) /* FIFO Disable */
+
+#define AT91_TWI_MMR 0x0004 /* Master Mode Register */
+#define AT91_TWI_IADRSZ_1 0x0100 /* Internal Device Address Size */
+#define AT91_TWI_MREAD BIT(12) /* Master Read Direction */
+
+#define AT91_TWI_SMR 0x0008 /* Slave Mode Register */
+#define AT91_TWI_SMR_SADR_MAX 0x007f
+#define AT91_TWI_SMR_SADR(x) (((x) & AT91_TWI_SMR_SADR_MAX) << 16)
+
+#define AT91_TWI_IADR 0x000c /* Internal Address Register */
+
+#define AT91_TWI_CWGR 0x0010 /* Clock Waveform Generator Reg */
+#define AT91_TWI_CWGR_HOLD_MAX 0x1f
+#define AT91_TWI_CWGR_HOLD(x) (((x) & AT91_TWI_CWGR_HOLD_MAX) << 24)
+
+#define AT91_TWI_SR 0x0020 /* Status Register */
+#define AT91_TWI_TXCOMP BIT(0) /* Transmission Complete */
+#define AT91_TWI_RXRDY BIT(1) /* Receive Holding Register Ready */
+#define AT91_TWI_TXRDY BIT(2) /* Transmit Holding Register Ready */
+#define AT91_TWI_SVREAD BIT(3) /* Slave Read */
+#define AT91_TWI_SVACC BIT(4) /* Slave Access */
+#define AT91_TWI_OVRE BIT(6) /* Overrun Error */
+#define AT91_TWI_UNRE BIT(7) /* Underrun Error */
+#define AT91_TWI_NACK BIT(8) /* Not Acknowledged */
+#define AT91_TWI_EOSACC BIT(11) /* End Of Slave Access */
+#define AT91_TWI_LOCK BIT(23) /* TWI Lock due to Frame Errors */
+
+#define AT91_TWI_INT_MASK \
+ (AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY | AT91_TWI_NACK \
+ | AT91_TWI_SVACC | AT91_TWI_EOSACC)
+
+#define AT91_TWI_IER 0x0024 /* Interrupt Enable Register */
+#define AT91_TWI_IDR 0x0028 /* Interrupt Disable Register */
+#define AT91_TWI_IMR 0x002c /* Interrupt Mask Register */
+#define AT91_TWI_RHR 0x0030 /* Receive Holding Register */
+#define AT91_TWI_THR 0x0034 /* Transmit Holding Register */
+
+#define AT91_TWI_ACR 0x0040 /* Alternative Command Register */
+#define AT91_TWI_ACR_DATAL(len) ((len) & 0xff)
+#define AT91_TWI_ACR_DIR BIT(8)
+
+#define AT91_TWI_FMR 0x0050 /* FIFO Mode Register */
+#define AT91_TWI_FMR_TXRDYM(mode) (((mode) & 0x3) << 0)
+#define AT91_TWI_FMR_TXRDYM_MASK (0x3 << 0)
+#define AT91_TWI_FMR_RXRDYM(mode) (((mode) & 0x3) << 4)
+#define AT91_TWI_FMR_RXRDYM_MASK (0x3 << 4)
+#define AT91_TWI_ONE_DATA 0x0
+#define AT91_TWI_TWO_DATA 0x1
+#define AT91_TWI_FOUR_DATA 0x2
+
+#define AT91_TWI_FLR 0x0054 /* FIFO Level Register */
+
+#define AT91_TWI_FSR 0x0060 /* FIFO Status Register */
+#define AT91_TWI_FIER 0x0064 /* FIFO Interrupt Enable Register */
+#define AT91_TWI_FIDR 0x0068 /* FIFO Interrupt Disable Register */
+#define AT91_TWI_FIMR 0x006c /* FIFO Interrupt Mask Register */
+
+#define AT91_TWI_VER 0x00fc /* Version Register */
+
+struct at91_twi_pdata {
+ unsigned clk_max_div;
+ unsigned clk_offset;
+ bool has_unre_flag;
+ bool has_alt_cmd;
+ bool has_hold_field;
+ struct at_dma_slave dma_slave;
+};
+
+struct at91_twi_dma {
+ struct dma_chan *chan_rx;
+ struct dma_chan *chan_tx;
+ struct scatterlist sg[2];
+ struct dma_async_tx_descriptor *data_desc;
+ enum dma_data_direction direction;
+ bool buf_mapped;
+ bool xfer_in_progress;
+};
+
+struct at91_twi_dev {
+ struct device *dev;
+ void __iomem *base;
+ struct completion cmd_complete;
+ struct clk *clk;
+ u8 *buf;
+ size_t buf_len;
+ struct i2c_msg *msg;
+ int irq;
+ unsigned imr;
+ unsigned transfer_status;
+ struct i2c_adapter adapter;
+ unsigned twi_cwgr_reg;
+ struct at91_twi_pdata *pdata;
+ bool use_dma;
+ bool use_alt_cmd;
+ bool recv_len_abort;
+ u32 fifo_size;
+ struct at91_twi_dma dma;
+ bool slave_detected;
+#ifdef CONFIG_I2C_AT91_SLAVE_EXPERIMENTAL
+ unsigned smr;
+ struct i2c_client *slave;
+#endif
+};
+
+unsigned at91_twi_read(struct at91_twi_dev *dev, unsigned reg);
+void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val);
+void at91_disable_twi_interrupts(struct at91_twi_dev *dev);
+void at91_twi_irq_save(struct at91_twi_dev *dev);
+void at91_twi_irq_restore(struct at91_twi_dev *dev);
+void at91_init_twi_bus(struct at91_twi_dev *dev);
+
+void at91_init_twi_bus_master(struct at91_twi_dev *dev);
+int at91_twi_probe_master(struct platform_device *pdev, u32 phy_addr,
+ struct at91_twi_dev *dev);
+
+#ifdef CONFIG_I2C_AT91_SLAVE_EXPERIMENTAL
+void at91_init_twi_bus_slave(struct at91_twi_dev *dev);
+int at91_twi_probe_slave(struct platform_device *pdev, u32 phy_addr,
+ struct at91_twi_dev *dev);
+
+#else
+static inline void at91_init_twi_bus_slave(struct at91_twi_dev *dev) {}
+static inline int at91_twi_probe_slave(struct platform_device *pdev,
+ u32 phy_addr, struct at91_twi_dev *dev)
+{
+ return -EINVAL;
+}
+
+#endif
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index bf564391091f..1c7b41f45c83 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -99,6 +99,7 @@
* @adapter: core i2c abstraction
* @i2c_clk: clock reference for i2c input clock
* @bus_clk_rate: current i2c bus clock rate
+ * @last: a flag indicating is this is last message in transfer
*/
struct axxia_i2c_dev {
void __iomem *base;
@@ -112,6 +113,7 @@ struct axxia_i2c_dev {
struct i2c_adapter adapter;
struct clk *i2c_clk;
u32 bus_clk_rate;
+ bool last;
};
static void i2c_int_disable(struct axxia_i2c_dev *idev, u32 mask)
@@ -324,15 +326,14 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev)
/* Stop completed */
i2c_int_disable(idev, ~MST_STATUS_TSS);
complete(&idev->msg_complete);
- } else if (status & MST_STATUS_SNS) {
+ } else if (status & (MST_STATUS_SNS | MST_STATUS_SS)) {
/* Transfer done */
- i2c_int_disable(idev, ~MST_STATUS_TSS);
+ int mask = idev->last ? ~0 : ~MST_STATUS_TSS;
+
+ i2c_int_disable(idev, mask);
if (i2c_m_rd(idev->msg_r) && idev->msg_xfrd_r < idev->msg_r->len)
axxia_i2c_empty_rx_fifo(idev);
complete(&idev->msg_complete);
- } else if (status & MST_STATUS_SS) {
- /* Auto/Sequence transfer done */
- complete(&idev->msg_complete);
} else if (status & MST_STATUS_TSS) {
/* Transfer timeout */
idev->msg_err = -ETIMEDOUT;
@@ -405,6 +406,7 @@ static int axxia_i2c_xfer_seq(struct axxia_i2c_dev *idev, struct i2c_msg msgs[])
idev->msg_r = &msgs[1];
idev->msg_xfrd = 0;
idev->msg_xfrd_r = 0;
+ idev->last = true;
axxia_i2c_fill_tx_fifo(idev);
writel(CMD_SEQUENCE, idev->base + MST_COMMAND);
@@ -415,10 +417,6 @@ static int axxia_i2c_xfer_seq(struct axxia_i2c_dev *idev, struct i2c_msg msgs[])
time_left = wait_for_completion_timeout(&idev->msg_complete,
I2C_XFER_TIMEOUT);
- i2c_int_disable(idev, int_mask);
-
- axxia_i2c_empty_rx_fifo(idev);
-
if (idev->msg_err == -ENXIO) {
if (axxia_i2c_handle_seq_nak(idev))
axxia_i2c_init(idev);
@@ -438,9 +436,10 @@ static int axxia_i2c_xfer_seq(struct axxia_i2c_dev *idev, struct i2c_msg msgs[])
return idev->msg_err;
}
-static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
+static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg,
+ bool last)
{
- u32 int_mask = MST_STATUS_ERR | MST_STATUS_SNS;
+ u32 int_mask = MST_STATUS_ERR;
u32 rx_xfer, tx_xfer;
unsigned long time_left;
unsigned int wt_value;
@@ -449,6 +448,7 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
idev->msg_r = msg;
idev->msg_xfrd = 0;
idev->msg_xfrd_r = 0;
+ idev->last = last;
reinit_completion(&idev->msg_complete);
axxia_i2c_set_addr(idev, msg);
@@ -478,8 +478,13 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
if (idev->msg_err)
goto out;
- /* Start manual mode */
- writel(CMD_MANUAL, idev->base + MST_COMMAND);
+ if (!last) {
+ writel(CMD_MANUAL, idev->base + MST_COMMAND);
+ int_mask |= MST_STATUS_SNS;
+ } else {
+ writel(CMD_AUTO, idev->base + MST_COMMAND);
+ int_mask |= MST_STATUS_SS;
+ }
writel(WT_EN | wt_value, idev->base + WAIT_TIMER_CONTROL);
@@ -507,28 +512,6 @@ out:
return idev->msg_err;
}
-static int axxia_i2c_stop(struct axxia_i2c_dev *idev)
-{
- u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC | MST_STATUS_TSS;
- unsigned long time_left;
-
- reinit_completion(&idev->msg_complete);
-
- /* Issue stop */
- writel(0xb, idev->base + MST_COMMAND);
- i2c_int_enable(idev, int_mask);
- time_left = wait_for_completion_timeout(&idev->msg_complete,
- I2C_STOP_TIMEOUT);
- i2c_int_disable(idev, int_mask);
- if (time_left == 0)
- return -ETIMEDOUT;
-
- if (readl(idev->base + MST_COMMAND) & CMD_BUSY)
- dev_warn(idev->dev, "busy after stop\n");
-
- return 0;
-}
-
/* This function checks if the msgs[] array contains messages compatible with
* Sequence mode of operation. This mode assumes there will be exactly one
* write of non-zero length followed by exactly one read of non-zero length,
@@ -558,9 +541,7 @@ axxia_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
i2c_int_enable(idev, MST_STATUS_TSS);
for (i = 0; ret == 0 && i < num; ++i)
- ret = axxia_i2c_xfer_msg(idev, &msgs[i]);
-
- axxia_i2c_stop(idev);
+ ret = axxia_i2c_xfer_msg(idev, &msgs[i], i == (num - 1));
return ret ? : i;
}
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 4c8c3bc4669c..a845b8decac8 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -17,17 +17,38 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#define IDM_CTRL_DIRECT_OFFSET 0x00
#define CFG_OFFSET 0x00
#define CFG_RESET_SHIFT 31
#define CFG_EN_SHIFT 30
+#define CFG_SLAVE_ADDR_0_SHIFT 28
#define CFG_M_RETRY_CNT_SHIFT 16
#define CFG_M_RETRY_CNT_MASK 0x0f
#define TIM_CFG_OFFSET 0x04
#define TIM_CFG_MODE_400_SHIFT 31
+#define TIM_RAND_SLAVE_STRETCH_SHIFT 24
+#define TIM_RAND_SLAVE_STRETCH_MASK 0x7f
+#define TIM_PERIODIC_SLAVE_STRETCH_SHIFT 16
+#define TIM_PERIODIC_SLAVE_STRETCH_MASK 0x7f
+
+#define S_CFG_SMBUS_ADDR_OFFSET 0x08
+#define S_CFG_EN_NIC_SMB_ADDR3_SHIFT 31
+#define S_CFG_NIC_SMB_ADDR3_SHIFT 24
+#define S_CFG_NIC_SMB_ADDR3_MASK 0x7f
+#define S_CFG_EN_NIC_SMB_ADDR2_SHIFT 23
+#define S_CFG_NIC_SMB_ADDR2_SHIFT 16
+#define S_CFG_NIC_SMB_ADDR2_MASK 0x7f
+#define S_CFG_EN_NIC_SMB_ADDR1_SHIFT 15
+#define S_CFG_NIC_SMB_ADDR1_SHIFT 8
+#define S_CFG_NIC_SMB_ADDR1_MASK 0x7f
+#define S_CFG_EN_NIC_SMB_ADDR0_SHIFT 7
+#define S_CFG_NIC_SMB_ADDR0_SHIFT 0
+#define S_CFG_NIC_SMB_ADDR0_MASK 0x7f
#define M_FIFO_CTRL_OFFSET 0x0c
#define M_FIFO_RX_FLUSH_SHIFT 31
@@ -37,6 +58,14 @@
#define M_FIFO_RX_THLD_SHIFT 8
#define M_FIFO_RX_THLD_MASK 0x3f
+#define S_FIFO_CTRL_OFFSET 0x10
+#define S_FIFO_RX_FLUSH_SHIFT 31
+#define S_FIFO_TX_FLUSH_SHIFT 30
+#define S_FIFO_RX_CNT_SHIFT 16
+#define S_FIFO_RX_CNT_MASK 0x7f
+#define S_FIFO_RX_THLD_SHIFT 8
+#define S_FIFO_RX_THLD_MASK 0x3f
+
#define M_CMD_OFFSET 0x30
#define M_CMD_START_BUSY_SHIFT 31
#define M_CMD_STATUS_SHIFT 25
@@ -46,6 +75,8 @@
#define M_CMD_STATUS_NACK_ADDR 0x2
#define M_CMD_STATUS_NACK_DATA 0x3
#define M_CMD_STATUS_TIMEOUT 0x4
+#define M_CMD_STATUS_FIFO_UNDERRUN 0x5
+#define M_CMD_STATUS_RX_FIFO_FULL 0x6
#define M_CMD_PROTOCOL_SHIFT 9
#define M_CMD_PROTOCOL_MASK 0xf
#define M_CMD_PROTOCOL_BLK_WR 0x7
@@ -54,17 +85,36 @@
#define M_CMD_RD_CNT_SHIFT 0
#define M_CMD_RD_CNT_MASK 0xff
+#define S_CMD_OFFSET 0x34
+#define S_CMD_START_BUSY_SHIFT 31
+#define S_CMD_STATUS_SHIFT 23
+#define S_CMD_STATUS_MASK 0x07
+#define S_CMD_STATUS_SUCCESS 0x0
+#define S_CMD_STATUS_TIMEOUT 0x5
+
#define IE_OFFSET 0x38
#define IE_M_RX_FIFO_FULL_SHIFT 31
#define IE_M_RX_THLD_SHIFT 30
#define IE_M_START_BUSY_SHIFT 28
#define IE_M_TX_UNDERRUN_SHIFT 27
+#define IE_S_RX_FIFO_FULL_SHIFT 26
+#define IE_S_RX_THLD_SHIFT 25
+#define IE_S_RX_EVENT_SHIFT 24
+#define IE_S_START_BUSY_SHIFT 23
+#define IE_S_TX_UNDERRUN_SHIFT 22
+#define IE_S_RD_EVENT_SHIFT 21
#define IS_OFFSET 0x3c
#define IS_M_RX_FIFO_FULL_SHIFT 31
#define IS_M_RX_THLD_SHIFT 30
#define IS_M_START_BUSY_SHIFT 28
#define IS_M_TX_UNDERRUN_SHIFT 27
+#define IS_S_RX_FIFO_FULL_SHIFT 26
+#define IS_S_RX_THLD_SHIFT 25
+#define IS_S_RX_EVENT_SHIFT 24
+#define IS_S_START_BUSY_SHIFT 23
+#define IS_S_TX_UNDERRUN_SHIFT 22
+#define IS_S_RD_EVENT_SHIFT 21
#define M_TX_OFFSET 0x40
#define M_TX_WR_STATUS_SHIFT 31
@@ -78,19 +128,71 @@
#define M_RX_DATA_SHIFT 0
#define M_RX_DATA_MASK 0xff
+#define S_TX_OFFSET 0x48
+#define S_TX_WR_STATUS_SHIFT 31
+#define S_TX_DATA_SHIFT 0
+#define S_TX_DATA_MASK 0xff
+
+#define S_RX_OFFSET 0x4c
+#define S_RX_STATUS_SHIFT 30
+#define S_RX_STATUS_MASK 0x03
+#define S_RX_PEC_ERR_SHIFT 29
+#define S_RX_DATA_SHIFT 0
+#define S_RX_DATA_MASK 0xff
+
#define I2C_TIMEOUT_MSEC 50000
#define M_TX_RX_FIFO_SIZE 64
+#define M_RX_FIFO_MAX_THLD_VALUE (M_TX_RX_FIFO_SIZE - 1)
+
+#define M_RX_MAX_READ_LEN 255
+#define M_RX_FIFO_THLD_VALUE 50
+
+#define IE_M_ALL_INTERRUPT_SHIFT 27
+#define IE_M_ALL_INTERRUPT_MASK 0x1e
+
+#define SLAVE_READ_WRITE_BIT_MASK 0x1
+#define SLAVE_READ_WRITE_BIT_SHIFT 0x1
+#define SLAVE_MAX_SIZE_TRANSACTION 64
+#define SLAVE_CLOCK_STRETCH_TIME 25
+
+#define IE_S_ALL_INTERRUPT_SHIFT 21
+#define IE_S_ALL_INTERRUPT_MASK 0x3f
+
+enum i2c_slave_read_status {
+ I2C_SLAVE_RX_FIFO_EMPTY = 0,
+ I2C_SLAVE_RX_START,
+ I2C_SLAVE_RX_DATA,
+ I2C_SLAVE_RX_END,
+};
+
+enum i2c_slave_xfer_dir {
+ I2C_SLAVE_DIR_READ = 0,
+ I2C_SLAVE_DIR_WRITE,
+ I2C_SLAVE_DIR_NONE,
+};
enum bus_speed_index {
I2C_SPD_100K = 0,
I2C_SPD_400K,
};
+enum bcm_iproc_i2c_type {
+ IPROC_I2C,
+ IPROC_I2C_NIC
+};
+
struct bcm_iproc_i2c_dev {
struct device *device;
+ enum bcm_iproc_i2c_type type;
int irq;
void __iomem *base;
+ void __iomem *idm_base;
+
+ u32 ape_addr_mask;
+
+ /* lock for indirect access through IDM */
+ spinlock_t idm_lock;
struct i2c_adapter adapter;
unsigned int bus_speed;
@@ -100,68 +202,332 @@ struct bcm_iproc_i2c_dev {
struct i2c_msg *msg;
+ struct i2c_client *slave;
+ enum i2c_slave_xfer_dir xfer_dir;
+
/* bytes that have been transferred */
unsigned int tx_bytes;
+ /* bytes that have been read */
+ unsigned int rx_bytes;
+ unsigned int thld_bytes;
};
/*
* Can be expanded in the future if more interrupt status bits are utilized
*/
-#define ISR_MASK (BIT(IS_M_START_BUSY_SHIFT) | BIT(IS_M_TX_UNDERRUN_SHIFT))
+#define ISR_MASK (BIT(IS_M_START_BUSY_SHIFT) | BIT(IS_M_TX_UNDERRUN_SHIFT)\
+ | BIT(IS_M_RX_THLD_SHIFT))
-static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
+#define ISR_MASK_SLAVE (BIT(IS_S_START_BUSY_SHIFT)\
+ | BIT(IS_S_RX_EVENT_SHIFT) | BIT(IS_S_RD_EVENT_SHIFT))
+
+static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave);
+static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave);
+static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
+ bool enable);
+
+static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
+ u32 offset)
{
- struct bcm_iproc_i2c_dev *iproc_i2c = data;
- u32 status = readl(iproc_i2c->base + IS_OFFSET);
+ u32 val;
- status &= ISR_MASK;
+ if (iproc_i2c->idm_base) {
+ spin_lock(&iproc_i2c->idm_lock);
+ writel(iproc_i2c->ape_addr_mask,
+ iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET);
+ val = readl(iproc_i2c->base + offset);
+ spin_unlock(&iproc_i2c->idm_lock);
+ } else {
+ val = readl(iproc_i2c->base + offset);
+ }
- if (!status)
- return IRQ_NONE;
+ return val;
+}
- /* TX FIFO is empty and we have more data to send */
- if (status & BIT(IS_M_TX_UNDERRUN_SHIFT)) {
- struct i2c_msg *msg = iproc_i2c->msg;
- unsigned int tx_bytes = msg->len - iproc_i2c->tx_bytes;
- unsigned int i;
- u32 val;
-
- /* can only fill up to the FIFO size */
- tx_bytes = min_t(unsigned int, tx_bytes, M_TX_RX_FIFO_SIZE);
- for (i = 0; i < tx_bytes; i++) {
- /* start from where we left over */
- unsigned int idx = iproc_i2c->tx_bytes + i;
+static inline void iproc_i2c_wr_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
+ u32 offset, u32 val)
+{
+ if (iproc_i2c->idm_base) {
+ spin_lock(&iproc_i2c->idm_lock);
+ writel(iproc_i2c->ape_addr_mask,
+ iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET);
+ writel(val, iproc_i2c->base + offset);
+ spin_unlock(&iproc_i2c->idm_lock);
+ } else {
+ writel(val, iproc_i2c->base + offset);
+ }
+}
- val = msg->buf[idx];
+static void bcm_iproc_i2c_slave_init(
+ struct bcm_iproc_i2c_dev *iproc_i2c, bool need_reset)
+{
+ u32 val;
- /* mark the last byte */
- if (idx == msg->len - 1) {
- u32 tmp;
+ if (need_reset) {
+ /* put controller in reset */
+ val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET);
+ val |= BIT(CFG_RESET_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
- val |= BIT(M_TX_WR_STATUS_SHIFT);
+ /* wait 100 usec per spec */
+ udelay(100);
+
+ /* bring controller out of reset */
+ val &= ~(BIT(CFG_RESET_SHIFT));
+ iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
+ }
+
+ /* flush TX/RX FIFOs */
+ val = (BIT(S_FIFO_RX_FLUSH_SHIFT) | BIT(S_FIFO_TX_FLUSH_SHIFT));
+ iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, val);
+
+ /* Maximum slave stretch time */
+ val = iproc_i2c_rd_reg(iproc_i2c, TIM_CFG_OFFSET);
+ val &= ~(TIM_RAND_SLAVE_STRETCH_MASK << TIM_RAND_SLAVE_STRETCH_SHIFT);
+ val |= (SLAVE_CLOCK_STRETCH_TIME << TIM_RAND_SLAVE_STRETCH_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, TIM_CFG_OFFSET, val);
+
+ /* Configure the slave address */
+ val = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
+ val |= BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
+ val &= ~(S_CFG_NIC_SMB_ADDR3_MASK << S_CFG_NIC_SMB_ADDR3_SHIFT);
+ val |= (iproc_i2c->slave->addr << S_CFG_NIC_SMB_ADDR3_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, val);
+
+ /* clear all pending slave interrupts */
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, ISR_MASK_SLAVE);
+
+ /* Enable interrupt register for any READ event */
+ val = BIT(IE_S_RD_EVENT_SHIFT);
+ /* Enable interrupt register to indicate a valid byte in receive fifo */
+ val |= BIT(IE_S_RX_EVENT_SHIFT);
+ /* Enable interrupt register for the Slave BUSY command */
+ val |= BIT(IE_S_START_BUSY_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+
+ iproc_i2c->xfer_dir = I2C_SLAVE_DIR_NONE;
+}
+
+static void bcm_iproc_i2c_check_slave_status(
+ struct bcm_iproc_i2c_dev *iproc_i2c)
+{
+ u32 val;
+
+ val = iproc_i2c_rd_reg(iproc_i2c, S_CMD_OFFSET);
+ val = (val >> S_CMD_STATUS_SHIFT) & S_CMD_STATUS_MASK;
+
+ if (val == S_CMD_STATUS_TIMEOUT) {
+ dev_err(iproc_i2c->device, "slave random stretch time timeout\n");
+
+ /* re-initialize i2c for recovery */
+ bcm_iproc_i2c_enable_disable(iproc_i2c, false);
+ bcm_iproc_i2c_slave_init(iproc_i2c, true);
+ bcm_iproc_i2c_enable_disable(iproc_i2c, true);
+ }
+}
+
+static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
+ u32 status)
+{
+ u8 value;
+ u32 val;
+ u32 rd_status;
+ u32 tmp;
+
+ /* Start of transaction. check address and populate the direction */
+ if (iproc_i2c->xfer_dir == I2C_SLAVE_DIR_NONE) {
+ tmp = iproc_i2c_rd_reg(iproc_i2c, S_RX_OFFSET);
+ rd_status = (tmp >> S_RX_STATUS_SHIFT) & S_RX_STATUS_MASK;
+ /* This condition checks whether the request is a new request */
+ if (((rd_status == I2C_SLAVE_RX_START) &&
+ (status & BIT(IS_S_RX_EVENT_SHIFT))) ||
+ ((rd_status == I2C_SLAVE_RX_END) &&
+ (status & BIT(IS_S_RD_EVENT_SHIFT)))) {
+
+ /* Last bit is W/R bit.
+ * If 1 then its a read request(by master).
+ */
+ iproc_i2c->xfer_dir = tmp & SLAVE_READ_WRITE_BIT_MASK;
+ if (iproc_i2c->xfer_dir == I2C_SLAVE_DIR_WRITE)
+ i2c_slave_event(iproc_i2c->slave,
+ I2C_SLAVE_READ_REQUESTED, &value);
+ else
+ i2c_slave_event(iproc_i2c->slave,
+ I2C_SLAVE_WRITE_REQUESTED, &value);
+ }
+ }
+
+ /* read request from master */
+ if ((status & BIT(IS_S_RD_EVENT_SHIFT)) &&
+ (iproc_i2c->xfer_dir == I2C_SLAVE_DIR_WRITE)) {
+ i2c_slave_event(iproc_i2c->slave,
+ I2C_SLAVE_READ_PROCESSED, &value);
+ iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, value);
+
+ val = BIT(S_CMD_START_BUSY_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val);
+ }
+
+ /* write request from master */
+ if ((status & BIT(IS_S_RX_EVENT_SHIFT)) &&
+ (iproc_i2c->xfer_dir == I2C_SLAVE_DIR_READ)) {
+ val = iproc_i2c_rd_reg(iproc_i2c, S_RX_OFFSET);
+ /* Its a write request by Master to Slave.
+ * We read data present in receive FIFO
+ */
+ value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
+ i2c_slave_event(iproc_i2c->slave,
+ I2C_SLAVE_WRITE_RECEIVED, &value);
+
+ /* check the status for the last byte of the transaction */
+ rd_status = (val >> S_RX_STATUS_SHIFT) & S_RX_STATUS_MASK;
+ if (rd_status == I2C_SLAVE_RX_END)
+ iproc_i2c->xfer_dir = I2C_SLAVE_DIR_NONE;
+
+ dev_dbg(iproc_i2c->device, "\nread value = 0x%x\n", value);
+ }
+
+ /* Stop */
+ if (status & BIT(IS_S_START_BUSY_SHIFT)) {
+ i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP, &value);
+ iproc_i2c->xfer_dir = I2C_SLAVE_DIR_NONE;
+ }
+
+ /* clear interrupt status */
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, status);
+
+ bcm_iproc_i2c_check_slave_status(iproc_i2c);
+ return true;
+}
+
+static void bcm_iproc_i2c_read_valid_bytes(struct bcm_iproc_i2c_dev *iproc_i2c)
+{
+ struct i2c_msg *msg = iproc_i2c->msg;
+
+ /* Read valid data from RX FIFO */
+ while (iproc_i2c->rx_bytes < msg->len) {
+ if (!((iproc_i2c_rd_reg(iproc_i2c, M_FIFO_CTRL_OFFSET) >> M_FIFO_RX_CNT_SHIFT)
+ & M_FIFO_RX_CNT_MASK))
+ break;
+
+ msg->buf[iproc_i2c->rx_bytes] =
+ (iproc_i2c_rd_reg(iproc_i2c, M_RX_OFFSET) >>
+ M_RX_DATA_SHIFT) & M_RX_DATA_MASK;
+ iproc_i2c->rx_bytes++;
+ }
+}
+
+static void bcm_iproc_i2c_send(struct bcm_iproc_i2c_dev *iproc_i2c)
+{
+ struct i2c_msg *msg = iproc_i2c->msg;
+ unsigned int tx_bytes = msg->len - iproc_i2c->tx_bytes;
+ unsigned int i;
+ u32 val;
+
+ /* can only fill up to the FIFO size */
+ tx_bytes = min_t(unsigned int, tx_bytes, M_TX_RX_FIFO_SIZE);
+ for (i = 0; i < tx_bytes; i++) {
+ /* start from where we left over */
+ unsigned int idx = iproc_i2c->tx_bytes + i;
+
+ val = msg->buf[idx];
+
+ /* mark the last byte */
+ if (idx == msg->len - 1) {
+ val |= BIT(M_TX_WR_STATUS_SHIFT);
+
+ if (iproc_i2c->irq) {
+ u32 tmp;
/*
- * Since this is the last byte, we should
- * now disable TX FIFO underrun interrupt
+ * Since this is the last byte, we should now
+ * disable TX FIFO underrun interrupt
*/
- tmp = readl(iproc_i2c->base + IE_OFFSET);
+ tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
tmp &= ~BIT(IE_M_TX_UNDERRUN_SHIFT);
- writel(tmp, iproc_i2c->base + IE_OFFSET);
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET,
+ tmp);
}
+ }
+
+ /* load data into TX FIFO */
+ iproc_i2c_wr_reg(iproc_i2c, M_TX_OFFSET, val);
+ }
- /* load data into TX FIFO */
- writel(val, iproc_i2c->base + M_TX_OFFSET);
+ /* update number of transferred bytes */
+ iproc_i2c->tx_bytes += tx_bytes;
+}
+
+static void bcm_iproc_i2c_read(struct bcm_iproc_i2c_dev *iproc_i2c)
+{
+ struct i2c_msg *msg = iproc_i2c->msg;
+ u32 bytes_left, val;
+
+ bcm_iproc_i2c_read_valid_bytes(iproc_i2c);
+ bytes_left = msg->len - iproc_i2c->rx_bytes;
+ if (bytes_left == 0) {
+ if (iproc_i2c->irq) {
+ /* finished reading all data, disable rx thld event */
+ val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+ val &= ~BIT(IS_M_RX_THLD_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
}
- /* update number of transferred bytes */
- iproc_i2c->tx_bytes += tx_bytes;
+ } else if (bytes_left < iproc_i2c->thld_bytes) {
+ /* set bytes left as threshold */
+ val = iproc_i2c_rd_reg(iproc_i2c, M_FIFO_CTRL_OFFSET);
+ val &= ~(M_FIFO_RX_THLD_MASK << M_FIFO_RX_THLD_SHIFT);
+ val |= (bytes_left << M_FIFO_RX_THLD_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, M_FIFO_CTRL_OFFSET, val);
+ iproc_i2c->thld_bytes = bytes_left;
}
+ /*
+ * bytes_left >= iproc_i2c->thld_bytes,
+ * hence no need to change the THRESHOLD SET.
+ * It will remain as iproc_i2c->thld_bytes itself
+ */
+}
+
+static void bcm_iproc_i2c_process_m_event(struct bcm_iproc_i2c_dev *iproc_i2c,
+ u32 status)
+{
+ /* TX FIFO is empty and we have more data to send */
+ if (status & BIT(IS_M_TX_UNDERRUN_SHIFT))
+ bcm_iproc_i2c_send(iproc_i2c);
+ /* RX FIFO threshold is reached and data needs to be read out */
+ if (status & BIT(IS_M_RX_THLD_SHIFT))
+ bcm_iproc_i2c_read(iproc_i2c);
+
+ /* transfer is done */
if (status & BIT(IS_M_START_BUSY_SHIFT)) {
iproc_i2c->xfer_is_done = 1;
- complete(&iproc_i2c->done);
+ if (iproc_i2c->irq)
+ complete(&iproc_i2c->done);
}
+}
- writel(status, iproc_i2c->base + IS_OFFSET);
+static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
+{
+ struct bcm_iproc_i2c_dev *iproc_i2c = data;
+ u32 status = iproc_i2c_rd_reg(iproc_i2c, IS_OFFSET);
+ bool ret;
+ u32 sl_status = status & ISR_MASK_SLAVE;
+
+ if (sl_status) {
+ ret = bcm_iproc_i2c_slave_isr(iproc_i2c, sl_status);
+ if (ret)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+ }
+
+ status &= ISR_MASK;
+ if (!status)
+ return IRQ_NONE;
+
+ /* process all master based events */
+ bcm_iproc_i2c_process_m_event(iproc_i2c, status);
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, status);
return IRQ_HANDLED;
}
@@ -171,26 +537,29 @@ static int bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c)
u32 val;
/* put controller in reset */
- val = readl(iproc_i2c->base + CFG_OFFSET);
- val |= 1 << CFG_RESET_SHIFT;
- val &= ~(1 << CFG_EN_SHIFT);
- writel(val, iproc_i2c->base + CFG_OFFSET);
+ val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET);
+ val |= BIT(CFG_RESET_SHIFT);
+ val &= ~(BIT(CFG_EN_SHIFT));
+ iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
/* wait 100 usec per spec */
udelay(100);
/* bring controller out of reset */
- val &= ~(1 << CFG_RESET_SHIFT);
- writel(val, iproc_i2c->base + CFG_OFFSET);
+ val &= ~(BIT(CFG_RESET_SHIFT));
+ iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
/* flush TX/RX FIFOs and set RX FIFO threshold to zero */
- val = (1 << M_FIFO_RX_FLUSH_SHIFT) | (1 << M_FIFO_TX_FLUSH_SHIFT);
- writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
+ val = (BIT(M_FIFO_RX_FLUSH_SHIFT) | BIT(M_FIFO_TX_FLUSH_SHIFT));
+ iproc_i2c_wr_reg(iproc_i2c, M_FIFO_CTRL_OFFSET, val);
/* disable all interrupts */
- writel(0, iproc_i2c->base + IE_OFFSET);
+ val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+ val &= ~(IE_M_ALL_INTERRUPT_MASK <<
+ IE_M_ALL_INTERRUPT_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
/* clear all pending interrupts */
- writel(0xffffffff, iproc_i2c->base + IS_OFFSET);
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, 0xffffffff);
return 0;
}
@@ -200,12 +569,12 @@ static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
{
u32 val;
- val = readl(iproc_i2c->base + CFG_OFFSET);
+ val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET);
if (enable)
val |= BIT(CFG_EN_SHIFT);
else
val &= ~BIT(CFG_EN_SHIFT);
- writel(val, iproc_i2c->base + CFG_OFFSET);
+ iproc_i2c_wr_reg(iproc_i2c, CFG_OFFSET, val);
}
static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
@@ -213,7 +582,7 @@ static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
{
u32 val;
- val = readl(iproc_i2c->base + M_CMD_OFFSET);
+ val = iproc_i2c_rd_reg(iproc_i2c, M_CMD_OFFSET);
val = (val >> M_CMD_STATUS_SHIFT) & M_CMD_STATUS_MASK;
switch (val) {
@@ -236,6 +605,14 @@ static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
dev_dbg(iproc_i2c->device, "bus timeout\n");
return -ETIMEDOUT;
+ case M_CMD_STATUS_FIFO_UNDERRUN:
+ dev_dbg(iproc_i2c->device, "FIFO under-run\n");
+ return -ENXIO;
+
+ case M_CMD_STATUS_RX_FIFO_FULL:
+ dev_dbg(iproc_i2c->device, "RX FIFO full\n");
+ return -ETIMEDOUT;
+
default:
dev_dbg(iproc_i2c->device, "unknown error code=%d\n", val);
@@ -248,18 +625,76 @@ static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
}
}
+static int bcm_iproc_i2c_xfer_wait(struct bcm_iproc_i2c_dev *iproc_i2c,
+ struct i2c_msg *msg,
+ u32 cmd)
+{
+ unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT_MSEC);
+ u32 val, status;
+ int ret;
+
+ iproc_i2c_wr_reg(iproc_i2c, M_CMD_OFFSET, cmd);
+
+ if (iproc_i2c->irq) {
+ time_left = wait_for_completion_timeout(&iproc_i2c->done,
+ time_left);
+ /* disable all interrupts */
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, 0);
+ /* read it back to flush the write */
+ iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+ /* make sure the interrupt handler isn't running */
+ synchronize_irq(iproc_i2c->irq);
+
+ } else { /* polling mode */
+ unsigned long timeout = jiffies + time_left;
+
+ do {
+ status = iproc_i2c_rd_reg(iproc_i2c,
+ IS_OFFSET) & ISR_MASK;
+ bcm_iproc_i2c_process_m_event(iproc_i2c, status);
+ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, status);
+
+ if (time_after(jiffies, timeout)) {
+ time_left = 0;
+ break;
+ }
+
+ cpu_relax();
+ cond_resched();
+ } while (!iproc_i2c->xfer_is_done);
+ }
+
+ if (!time_left && !iproc_i2c->xfer_is_done) {
+ dev_err(iproc_i2c->device, "transaction timed out\n");
+
+ /* flush both TX/RX FIFOs */
+ val = BIT(M_FIFO_RX_FLUSH_SHIFT) | BIT(M_FIFO_TX_FLUSH_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, M_FIFO_CTRL_OFFSET, val);
+ return -ETIMEDOUT;
+ }
+
+ ret = bcm_iproc_i2c_check_status(iproc_i2c, msg);
+ if (ret) {
+ /* flush both TX/RX FIFOs */
+ val = BIT(M_FIFO_RX_FLUSH_SHIFT) | BIT(M_FIFO_TX_FLUSH_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, M_FIFO_CTRL_OFFSET, val);
+ return ret;
+ }
+
+ return 0;
+}
+
static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
struct i2c_msg *msg)
{
- int ret, i;
+ int i;
u8 addr;
- u32 val;
+ u32 val, tmp, val_intr_en;
unsigned int tx_bytes;
- unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT_MSEC);
/* check if bus is busy */
- if (!!(readl(iproc_i2c->base + M_CMD_OFFSET) &
- BIT(M_CMD_START_BUSY_SHIFT))) {
+ if (!!(iproc_i2c_rd_reg(iproc_i2c,
+ M_CMD_OFFSET) & BIT(M_CMD_START_BUSY_SHIFT))) {
dev_warn(iproc_i2c->device, "bus is busy\n");
return -EBUSY;
}
@@ -268,7 +703,7 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
/* format and load slave address into the TX FIFO */
addr = i2c_8bit_addr_from_msg(msg);
- writel(addr, iproc_i2c->base + M_TX_OFFSET);
+ iproc_i2c_wr_reg(iproc_i2c, M_TX_OFFSET, addr);
/*
* For a write transaction, load data into the TX FIFO. Only allow
@@ -282,15 +717,17 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
/* mark the last byte */
if (i == msg->len - 1)
- val |= 1 << M_TX_WR_STATUS_SHIFT;
+ val |= BIT(M_TX_WR_STATUS_SHIFT);
- writel(val, iproc_i2c->base + M_TX_OFFSET);
+ iproc_i2c_wr_reg(iproc_i2c, M_TX_OFFSET, val);
}
iproc_i2c->tx_bytes = tx_bytes;
}
/* mark as incomplete before starting the transaction */
- reinit_completion(&iproc_i2c->done);
+ if (iproc_i2c->irq)
+ reinit_completion(&iproc_i2c->done);
+
iproc_i2c->xfer_is_done = 0;
/*
@@ -298,7 +735,7 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
* transaction is done, i.e., the internal start_busy bit, transitions
* from 1 to 0.
*/
- val = BIT(IE_M_START_BUSY_SHIFT);
+ val_intr_en = BIT(IE_M_START_BUSY_SHIFT);
/*
* If TX data size is larger than the TX FIFO, need to enable TX
@@ -307,9 +744,7 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
*/
if (!(msg->flags & I2C_M_RD) &&
msg->len > iproc_i2c->tx_bytes)
- val |= BIT(IE_M_TX_UNDERRUN_SHIFT);
-
- writel(val, iproc_i2c->base + IE_OFFSET);
+ val_intr_en |= BIT(IE_M_TX_UNDERRUN_SHIFT);
/*
* Now we can activate the transfer. For a read operation, specify the
@@ -317,54 +752,31 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
*/
val = BIT(M_CMD_START_BUSY_SHIFT);
if (msg->flags & I2C_M_RD) {
+ iproc_i2c->rx_bytes = 0;
+ if (msg->len > M_RX_FIFO_MAX_THLD_VALUE)
+ iproc_i2c->thld_bytes = M_RX_FIFO_THLD_VALUE;
+ else
+ iproc_i2c->thld_bytes = msg->len;
+
+ /* set threshold value */
+ tmp = iproc_i2c_rd_reg(iproc_i2c, M_FIFO_CTRL_OFFSET);
+ tmp &= ~(M_FIFO_RX_THLD_MASK << M_FIFO_RX_THLD_SHIFT);
+ tmp |= iproc_i2c->thld_bytes << M_FIFO_RX_THLD_SHIFT;
+ iproc_i2c_wr_reg(iproc_i2c, M_FIFO_CTRL_OFFSET, tmp);
+
+ /* enable the RX threshold interrupt */
+ val_intr_en |= BIT(IE_M_RX_THLD_SHIFT);
+
val |= (M_CMD_PROTOCOL_BLK_RD << M_CMD_PROTOCOL_SHIFT) |
(msg->len << M_CMD_RD_CNT_SHIFT);
} else {
val |= (M_CMD_PROTOCOL_BLK_WR << M_CMD_PROTOCOL_SHIFT);
}
- writel(val, iproc_i2c->base + M_CMD_OFFSET);
-
- time_left = wait_for_completion_timeout(&iproc_i2c->done, time_left);
-
- /* disable all interrupts */
- writel(0, iproc_i2c->base + IE_OFFSET);
- /* read it back to flush the write */
- readl(iproc_i2c->base + IE_OFFSET);
-
- /* make sure the interrupt handler isn't running */
- synchronize_irq(iproc_i2c->irq);
- if (!time_left && !iproc_i2c->xfer_is_done) {
- dev_err(iproc_i2c->device, "transaction timed out\n");
+ if (iproc_i2c->irq)
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val_intr_en);
- /* flush FIFOs */
- val = (1 << M_FIFO_RX_FLUSH_SHIFT) |
- (1 << M_FIFO_TX_FLUSH_SHIFT);
- writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
- return -ETIMEDOUT;
- }
-
- ret = bcm_iproc_i2c_check_status(iproc_i2c, msg);
- if (ret) {
- /* flush both TX/RX FIFOs */
- val = (1 << M_FIFO_RX_FLUSH_SHIFT) |
- (1 << M_FIFO_TX_FLUSH_SHIFT);
- writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
- return ret;
- }
-
- /*
- * For a read operation, we now need to load the data from FIFO
- * into the memory buffer
- */
- if (msg->flags & I2C_M_RD) {
- for (i = 0; i < msg->len; i++) {
- msg->buf[i] = (readl(iproc_i2c->base + M_RX_OFFSET) >>
- M_RX_DATA_SHIFT) & M_RX_DATA_MASK;
- }
- }
-
- return 0;
+ return bcm_iproc_i2c_xfer_wait(iproc_i2c, msg, val);
}
static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
@@ -387,17 +799,23 @@ static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ u32 val = I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+
+ if (adap->algo->reg_slave)
+ val |= I2C_FUNC_SLAVE;
+
+ return val;
}
-static const struct i2c_algorithm bcm_iproc_algo = {
+static struct i2c_algorithm bcm_iproc_algo = {
.master_xfer = bcm_iproc_i2c_xfer,
.functionality = bcm_iproc_i2c_functionality,
+ .reg_slave = bcm_iproc_i2c_reg_slave,
+ .unreg_slave = bcm_iproc_i2c_unreg_slave,
};
-static const struct i2c_adapter_quirks bcm_iproc_i2c_quirks = {
- /* need to reserve one byte in the FIFO for the slave address */
- .max_read_len = M_TX_RX_FIFO_SIZE - 1,
+static struct i2c_adapter_quirks bcm_iproc_i2c_quirks = {
+ .max_read_len = M_RX_MAX_READ_LEN,
};
static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
@@ -425,10 +843,10 @@ static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
}
iproc_i2c->bus_speed = bus_speed;
- val = readl(iproc_i2c->base + TIM_CFG_OFFSET);
- val &= ~(1 << TIM_CFG_MODE_400_SHIFT);
+ val = iproc_i2c_rd_reg(iproc_i2c, TIM_CFG_OFFSET);
+ val &= ~BIT(TIM_CFG_MODE_400_SHIFT);
val |= (bus_speed == 400000) << TIM_CFG_MODE_400_SHIFT;
- writel(val, iproc_i2c->base + TIM_CFG_OFFSET);
+ iproc_i2c_wr_reg(iproc_i2c, TIM_CFG_OFFSET, val);
dev_info(iproc_i2c->device, "bus set to %u Hz\n", bus_speed);
@@ -449,6 +867,8 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, iproc_i2c);
iproc_i2c->device = &pdev->dev;
+ iproc_i2c->type =
+ (enum bcm_iproc_i2c_type)of_device_get_match_data(&pdev->dev);
init_completion(&iproc_i2c->done);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -456,6 +876,29 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
if (IS_ERR(iproc_i2c->base))
return PTR_ERR(iproc_i2c->base);
+ if (iproc_i2c->type == IPROC_I2C_NIC) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ iproc_i2c->idm_base = devm_ioremap_resource(iproc_i2c->device,
+ res);
+ if (IS_ERR(iproc_i2c->idm_base))
+ return PTR_ERR(iproc_i2c->idm_base);
+
+ ret = of_property_read_u32(iproc_i2c->device->of_node,
+ "brcm,ape-hsls-addr-mask",
+ &iproc_i2c->ape_addr_mask);
+ if (ret < 0) {
+ dev_err(iproc_i2c->device,
+ "'brcm,ape-hsls-addr-mask' missing\n");
+ return -EINVAL;
+ }
+
+ spin_lock_init(&iproc_i2c->idm_lock);
+
+ /* no slave support */
+ bcm_iproc_algo.reg_slave = NULL;
+ bcm_iproc_algo.unreg_slave = NULL;
+ }
+
ret = bcm_iproc_i2c_init(iproc_i2c);
if (ret)
return ret;
@@ -465,17 +908,20 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
return ret;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(iproc_i2c->device, "no irq resource\n");
- return irq;
- }
- iproc_i2c->irq = irq;
+ if (irq > 0) {
+ ret = devm_request_irq(iproc_i2c->device, irq,
+ bcm_iproc_i2c_isr, 0, pdev->name,
+ iproc_i2c);
+ if (ret < 0) {
+ dev_err(iproc_i2c->device,
+ "unable to request irq %i\n", irq);
+ return ret;
+ }
- ret = devm_request_irq(iproc_i2c->device, irq, bcm_iproc_i2c_isr, 0,
- pdev->name, iproc_i2c);
- if (ret < 0) {
- dev_err(iproc_i2c->device, "unable to request irq %i\n", irq);
- return ret;
+ iproc_i2c->irq = irq;
+ } else {
+ dev_warn(iproc_i2c->device,
+ "no irq resource, falling back to poll mode\n");
}
bcm_iproc_i2c_enable_disable(iproc_i2c, true);
@@ -495,10 +941,15 @@ static int bcm_iproc_i2c_remove(struct platform_device *pdev)
{
struct bcm_iproc_i2c_dev *iproc_i2c = platform_get_drvdata(pdev);
- /* make sure there's no pending interrupt when we remove the adapter */
- writel(0, iproc_i2c->base + IE_OFFSET);
- readl(iproc_i2c->base + IE_OFFSET);
- synchronize_irq(iproc_i2c->irq);
+ if (iproc_i2c->irq) {
+ /*
+ * Make sure there's no pending interrupt when we remove the
+ * adapter
+ */
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, 0);
+ iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+ synchronize_irq(iproc_i2c->irq);
+ }
i2c_del_adapter(&iproc_i2c->adapter);
bcm_iproc_i2c_enable_disable(iproc_i2c, false);
@@ -512,10 +963,15 @@ static int bcm_iproc_i2c_suspend(struct device *dev)
{
struct bcm_iproc_i2c_dev *iproc_i2c = dev_get_drvdata(dev);
- /* make sure there's no pending interrupt when we go into suspend */
- writel(0, iproc_i2c->base + IE_OFFSET);
- readl(iproc_i2c->base + IE_OFFSET);
- synchronize_irq(iproc_i2c->irq);
+ if (iproc_i2c->irq) {
+ /*
+ * Make sure there's no pending interrupt when we go into
+ * suspend
+ */
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, 0);
+ iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+ synchronize_irq(iproc_i2c->irq);
+ }
/* now disable the controller */
bcm_iproc_i2c_enable_disable(iproc_i2c, false);
@@ -538,10 +994,10 @@ static int bcm_iproc_i2c_resume(struct device *dev)
return ret;
/* configure to the desired bus speed */
- val = readl(iproc_i2c->base + TIM_CFG_OFFSET);
- val &= ~(1 << TIM_CFG_MODE_400_SHIFT);
+ val = iproc_i2c_rd_reg(iproc_i2c, TIM_CFG_OFFSET);
+ val &= ~BIT(TIM_CFG_MODE_400_SHIFT);
val |= (iproc_i2c->bus_speed == 400000) << TIM_CFG_MODE_400_SHIFT;
- writel(val, iproc_i2c->base + TIM_CFG_OFFSET);
+ iproc_i2c_wr_reg(iproc_i2c, TIM_CFG_OFFSET, val);
bcm_iproc_i2c_enable_disable(iproc_i2c, true);
@@ -558,8 +1014,54 @@ static const struct dev_pm_ops bcm_iproc_i2c_pm_ops = {
#define BCM_IPROC_I2C_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
+
+static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave)
+{
+ struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter);
+
+ if (iproc_i2c->slave)
+ return -EBUSY;
+
+ if (slave->flags & I2C_CLIENT_TEN)
+ return -EAFNOSUPPORT;
+
+ iproc_i2c->slave = slave;
+ bcm_iproc_i2c_slave_init(iproc_i2c, false);
+ return 0;
+}
+
+static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
+{
+ u32 tmp;
+ struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter);
+
+ if (!iproc_i2c->slave)
+ return -EINVAL;
+
+ iproc_i2c->slave = NULL;
+
+ /* disable all slave interrupts */
+ tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+ tmp &= ~(IE_S_ALL_INTERRUPT_MASK <<
+ IE_S_ALL_INTERRUPT_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp);
+
+ /* Erase the slave address programmed */
+ tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
+ tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, tmp);
+
+ return 0;
+}
+
static const struct of_device_id bcm_iproc_i2c_of_match[] = {
- { .compatible = "brcm,iproc-i2c" },
+ {
+ .compatible = "brcm,iproc-i2c",
+ .data = (int *)IPROC_I2C,
+ }, {
+ .compatible = "brcm,iproc-nic-i2c",
+ .data = (int *)IPROC_I2C_NIC,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, bcm_iproc_i2c_of_match);
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index f4d862234980..506991596b68 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -165,7 +165,6 @@ static const struct bsc_clk_param bsc_clk[] = {
struct brcmstb_i2c_dev {
struct device *device;
void __iomem *base;
- void __iomem *irq_base;
int irq;
struct bsc_regs *bsc_regmap;
struct i2c_adapter adapter;
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index a4730111d290..2de7452fcd6d 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -251,13 +251,27 @@ unsigned long i2c_dw_clk_rate(struct dw_i2c_dev *dev)
int i2c_dw_prepare_clk(struct dw_i2c_dev *dev, bool prepare)
{
+ int ret;
+
if (IS_ERR(dev->clk))
return PTR_ERR(dev->clk);
- if (prepare)
- return clk_prepare_enable(dev->clk);
+ if (prepare) {
+ /* Optional interface clock */
+ ret = clk_prepare_enable(dev->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(dev->clk);
+ if (ret)
+ clk_disable_unprepare(dev->pclk);
+
+ return ret;
+ }
clk_disable_unprepare(dev->clk);
+ clk_disable_unprepare(dev->pclk);
+
return 0;
}
EXPORT_SYMBOL_GPL(i2c_dw_prepare_clk);
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 6b4ef1d38fb2..67edbbde1070 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -177,6 +177,7 @@
* @base: IO registers pointer
* @cmd_complete: tx completion indicator
* @clk: input reference clock
+ * @pclk: clock required to access the registers
* @slave: represent an I2C slave device
* @cmd_err: run time hadware error code
* @msgs: points to an array of messages currently being transferred
@@ -227,6 +228,7 @@ struct dw_i2c_dev {
void __iomem *ext;
struct completion cmd_complete;
struct clk *clk;
+ struct clk *pclk;
struct reset_control *rst;
struct i2c_client *slave;
u32 (*get_clk_rate_khz) (struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index bb8e3f149979..d464799e40a3 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -426,8 +426,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
pm_runtime_get_sync(dev->dev);
- if (dev->suspended) {
- dev_err(dev->dev, "Error %s call while suspended\n", __func__);
+ if (dev_WARN_ONCE(dev->dev, dev->suspended, "Transfer while suspended\n")) {
ret = -ESHUTDOWN;
goto done_nolock;
}
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 416f89b8f881..ddfb81872906 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -344,6 +344,11 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
else
i2c_dw_configure_master(dev);
+ /* Optional interface clock */
+ dev->pclk = devm_clk_get_optional(&pdev->dev, "pclk");
+ if (IS_ERR(dev->pclk))
+ return PTR_ERR(dev->pclk);
+
dev->clk = devm_clk_get(&pdev->dev, NULL);
if (!i2c_dw_prepare_clk(dev, true)) {
u64 clk_khz;
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index bba5c4627de3..9684a0ac2a6d 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -413,6 +413,8 @@ static int i2c_gpio_probe(struct platform_device *pdev)
if (gpiod_cansleep(priv->sda) || gpiod_cansleep(priv->scl))
dev_warn(dev, "Slow GPIO pins might wreak havoc into I2C/SMBus bus timing");
+ else
+ bit_data->can_do_atomic = true;
bit_data->setsda = i2c_gpio_setsda_val;
bit_data->setscl = i2c_gpio_setscl_val;
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 06c4c767af32..dc00fabc919a 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -639,8 +639,7 @@ static int lpi2c_imx_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int lpi2c_runtime_suspend(struct device *dev)
+static int __maybe_unused lpi2c_runtime_suspend(struct device *dev)
{
struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
@@ -650,7 +649,7 @@ static int lpi2c_runtime_suspend(struct device *dev)
return 0;
}
-static int lpi2c_runtime_resume(struct device *dev)
+static int __maybe_unused lpi2c_runtime_resume(struct device *dev)
{
struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
int ret;
@@ -671,10 +670,6 @@ static const struct dev_pm_ops lpi2c_pm_ops = {
SET_RUNTIME_PM_OPS(lpi2c_runtime_suspend,
lpi2c_runtime_resume, NULL)
};
-#define IMX_LPI2C_PM (&lpi2c_pm_ops)
-#else
-#define IMX_LPI2C_PM NULL
-#endif
static struct platform_driver lpi2c_imx_driver = {
.probe = lpi2c_imx_probe,
@@ -682,7 +677,7 @@ static struct platform_driver lpi2c_imx_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = lpi2c_imx_of_match,
- .pm = IMX_LPI2C_PM,
+ .pm = &lpi2c_pm_ops,
},
};
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index c0c3043b5d61..fd70b110e8f4 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -515,9 +515,9 @@ static int i2c_imx_clk_notifier_call(struct notifier_block *nb,
unsigned long action, void *data)
{
struct clk_notifier_data *ndata = data;
- struct imx_i2c_struct *i2c_imx = container_of(&ndata->clk,
+ struct imx_i2c_struct *i2c_imx = container_of(nb,
struct imx_i2c_struct,
- clk);
+ clk_change_nb);
if (action & POST_RATE_CHANGE)
i2c_imx_set_clk(i2c_imx, ndata->new_rate);
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index 5c754bf659e2..f64c1d72f73f 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -30,7 +30,6 @@
#include <linux/ioport.h>
#include <linux/i2c.h>
#include <linux/io.h>
-#include <linux/acpi.h>
/* SCH SMBus address offsets */
#define SMBHSTCNT (0 + sch_smba)
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 684d651612b3..745b0d03e883 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#define I2C_RS_TRANSFER (1 << 4)
+#define I2C_ARB_LOST (1 << 3)
#define I2C_HS_NACKERR (1 << 2)
#define I2C_ACKERR (1 << 1)
#define I2C_TRANSAC_COMP (1 << 0)
@@ -76,6 +77,8 @@
#define I2C_CONTROL_DIR_CHANGE (0x1 << 4)
#define I2C_CONTROL_ACKERR_DET_EN (0x1 << 5)
#define I2C_CONTROL_TRANSFER_LEN_CHANGE (0x1 << 6)
+#define I2C_CONTROL_DMAACK_EN (0x1 << 8)
+#define I2C_CONTROL_ASYNC_MODE (0x1 << 9)
#define I2C_CONTROL_WRAPPER (0x1 << 0)
#define I2C_DRV_NAME "i2c-mt65xx"
@@ -106,40 +109,97 @@ enum mtk_trans_op {
};
enum I2C_REGS_OFFSET {
- OFFSET_DATA_PORT = 0x0,
- OFFSET_SLAVE_ADDR = 0x04,
- OFFSET_INTR_MASK = 0x08,
- OFFSET_INTR_STAT = 0x0c,
- OFFSET_CONTROL = 0x10,
- OFFSET_TRANSFER_LEN = 0x14,
- OFFSET_TRANSAC_LEN = 0x18,
- OFFSET_DELAY_LEN = 0x1c,
- OFFSET_TIMING = 0x20,
- OFFSET_START = 0x24,
- OFFSET_EXT_CONF = 0x28,
- OFFSET_FIFO_STAT = 0x30,
- OFFSET_FIFO_THRESH = 0x34,
- OFFSET_FIFO_ADDR_CLR = 0x38,
- OFFSET_IO_CONFIG = 0x40,
- OFFSET_RSV_DEBUG = 0x44,
- OFFSET_HS = 0x48,
- OFFSET_SOFTRESET = 0x50,
- OFFSET_DCM_EN = 0x54,
- OFFSET_PATH_DIR = 0x60,
- OFFSET_DEBUGSTAT = 0x64,
- OFFSET_DEBUGCTRL = 0x68,
- OFFSET_TRANSFER_LEN_AUX = 0x6c,
- OFFSET_CLOCK_DIV = 0x70,
+ OFFSET_DATA_PORT,
+ OFFSET_SLAVE_ADDR,
+ OFFSET_INTR_MASK,
+ OFFSET_INTR_STAT,
+ OFFSET_CONTROL,
+ OFFSET_TRANSFER_LEN,
+ OFFSET_TRANSAC_LEN,
+ OFFSET_DELAY_LEN,
+ OFFSET_TIMING,
+ OFFSET_START,
+ OFFSET_EXT_CONF,
+ OFFSET_FIFO_STAT,
+ OFFSET_FIFO_THRESH,
+ OFFSET_FIFO_ADDR_CLR,
+ OFFSET_IO_CONFIG,
+ OFFSET_RSV_DEBUG,
+ OFFSET_HS,
+ OFFSET_SOFTRESET,
+ OFFSET_DCM_EN,
+ OFFSET_PATH_DIR,
+ OFFSET_DEBUGSTAT,
+ OFFSET_DEBUGCTRL,
+ OFFSET_TRANSFER_LEN_AUX,
+ OFFSET_CLOCK_DIV,
+ OFFSET_LTIMING,
+};
+
+static const u16 mt_i2c_regs_v1[] = {
+ [OFFSET_DATA_PORT] = 0x0,
+ [OFFSET_SLAVE_ADDR] = 0x4,
+ [OFFSET_INTR_MASK] = 0x8,
+ [OFFSET_INTR_STAT] = 0xc,
+ [OFFSET_CONTROL] = 0x10,
+ [OFFSET_TRANSFER_LEN] = 0x14,
+ [OFFSET_TRANSAC_LEN] = 0x18,
+ [OFFSET_DELAY_LEN] = 0x1c,
+ [OFFSET_TIMING] = 0x20,
+ [OFFSET_START] = 0x24,
+ [OFFSET_EXT_CONF] = 0x28,
+ [OFFSET_FIFO_STAT] = 0x30,
+ [OFFSET_FIFO_THRESH] = 0x34,
+ [OFFSET_FIFO_ADDR_CLR] = 0x38,
+ [OFFSET_IO_CONFIG] = 0x40,
+ [OFFSET_RSV_DEBUG] = 0x44,
+ [OFFSET_HS] = 0x48,
+ [OFFSET_SOFTRESET] = 0x50,
+ [OFFSET_DCM_EN] = 0x54,
+ [OFFSET_PATH_DIR] = 0x60,
+ [OFFSET_DEBUGSTAT] = 0x64,
+ [OFFSET_DEBUGCTRL] = 0x68,
+ [OFFSET_TRANSFER_LEN_AUX] = 0x6c,
+ [OFFSET_CLOCK_DIV] = 0x70,
+};
+
+static const u16 mt_i2c_regs_v2[] = {
+ [OFFSET_DATA_PORT] = 0x0,
+ [OFFSET_SLAVE_ADDR] = 0x4,
+ [OFFSET_INTR_MASK] = 0x8,
+ [OFFSET_INTR_STAT] = 0xc,
+ [OFFSET_CONTROL] = 0x10,
+ [OFFSET_TRANSFER_LEN] = 0x14,
+ [OFFSET_TRANSAC_LEN] = 0x18,
+ [OFFSET_DELAY_LEN] = 0x1c,
+ [OFFSET_TIMING] = 0x20,
+ [OFFSET_START] = 0x24,
+ [OFFSET_EXT_CONF] = 0x28,
+ [OFFSET_LTIMING] = 0x2c,
+ [OFFSET_HS] = 0x30,
+ [OFFSET_IO_CONFIG] = 0x34,
+ [OFFSET_FIFO_ADDR_CLR] = 0x38,
+ [OFFSET_TRANSFER_LEN_AUX] = 0x44,
+ [OFFSET_CLOCK_DIV] = 0x48,
+ [OFFSET_SOFTRESET] = 0x50,
+ [OFFSET_DEBUGSTAT] = 0xe0,
+ [OFFSET_DEBUGCTRL] = 0xe8,
+ [OFFSET_FIFO_STAT] = 0xf4,
+ [OFFSET_FIFO_THRESH] = 0xf8,
+ [OFFSET_DCM_EN] = 0xf88,
};
struct mtk_i2c_compatible {
const struct i2c_adapter_quirks *quirks;
+ const u16 *regs;
unsigned char pmic_i2c: 1;
unsigned char dcm: 1;
unsigned char auto_restart: 1;
unsigned char aux_len_reg: 1;
unsigned char support_33bits: 1;
unsigned char timing_adjust: 1;
+ unsigned char dma_sync: 1;
+ unsigned char ltiming_adjust: 1;
};
struct mtk_i2c {
@@ -153,6 +213,7 @@ struct mtk_i2c {
struct clk *clk_main; /* main clock for i2c bus */
struct clk *clk_dma; /* DMA clock for i2c via DMA */
struct clk *clk_pmic; /* PMIC clock for i2c from PMIC */
+ struct clk *clk_arb; /* Arbitrator clock for i2c */
bool have_pmic; /* can use i2c pins from PMIC */
bool use_push_pull; /* IO config push-pull mode */
@@ -162,6 +223,7 @@ struct mtk_i2c {
enum mtk_trans_op op;
u16 timing_reg;
u16 high_speed_reg;
+ u16 ltiming_reg;
unsigned char auto_restart;
bool ignore_restart_irq;
const struct mtk_i2c_compatible *dev_comp;
@@ -181,51 +243,78 @@ static const struct i2c_adapter_quirks mt7622_i2c_quirks = {
};
static const struct mtk_i2c_compatible mt2712_compat = {
+ .regs = mt_i2c_regs_v1,
.pmic_i2c = 0,
.dcm = 1,
.auto_restart = 1,
.aux_len_reg = 1,
.support_33bits = 1,
.timing_adjust = 1,
+ .dma_sync = 0,
+ .ltiming_adjust = 0,
};
static const struct mtk_i2c_compatible mt6577_compat = {
.quirks = &mt6577_i2c_quirks,
+ .regs = mt_i2c_regs_v1,
.pmic_i2c = 0,
.dcm = 1,
.auto_restart = 0,
.aux_len_reg = 0,
.support_33bits = 0,
.timing_adjust = 0,
+ .dma_sync = 0,
+ .ltiming_adjust = 0,
};
static const struct mtk_i2c_compatible mt6589_compat = {
.quirks = &mt6577_i2c_quirks,
+ .regs = mt_i2c_regs_v1,
.pmic_i2c = 1,
.dcm = 0,
.auto_restart = 0,
.aux_len_reg = 0,
.support_33bits = 0,
.timing_adjust = 0,
+ .dma_sync = 0,
+ .ltiming_adjust = 0,
};
static const struct mtk_i2c_compatible mt7622_compat = {
.quirks = &mt7622_i2c_quirks,
+ .regs = mt_i2c_regs_v1,
.pmic_i2c = 0,
.dcm = 1,
.auto_restart = 1,
.aux_len_reg = 1,
.support_33bits = 0,
.timing_adjust = 0,
+ .dma_sync = 0,
+ .ltiming_adjust = 0,
};
static const struct mtk_i2c_compatible mt8173_compat = {
+ .regs = mt_i2c_regs_v1,
.pmic_i2c = 0,
.dcm = 1,
.auto_restart = 1,
.aux_len_reg = 1,
.support_33bits = 1,
.timing_adjust = 0,
+ .dma_sync = 0,
+ .ltiming_adjust = 0,
+};
+
+static const struct mtk_i2c_compatible mt8183_compat = {
+ .regs = mt_i2c_regs_v2,
+ .pmic_i2c = 0,
+ .dcm = 0,
+ .auto_restart = 1,
+ .aux_len_reg = 1,
+ .support_33bits = 1,
+ .timing_adjust = 1,
+ .dma_sync = 1,
+ .ltiming_adjust = 1,
};
static const struct of_device_id mtk_i2c_of_match[] = {
@@ -234,10 +323,22 @@ static const struct of_device_id mtk_i2c_of_match[] = {
{ .compatible = "mediatek,mt6589-i2c", .data = &mt6589_compat },
{ .compatible = "mediatek,mt7622-i2c", .data = &mt7622_compat },
{ .compatible = "mediatek,mt8173-i2c", .data = &mt8173_compat },
+ { .compatible = "mediatek,mt8183-i2c", .data = &mt8183_compat },
{}
};
MODULE_DEVICE_TABLE(of, mtk_i2c_of_match);
+static u16 mtk_i2c_readw(struct mtk_i2c *i2c, enum I2C_REGS_OFFSET reg)
+{
+ return readw(i2c->base + i2c->dev_comp->regs[reg]);
+}
+
+static void mtk_i2c_writew(struct mtk_i2c *i2c, u16 val,
+ enum I2C_REGS_OFFSET reg)
+{
+ writew(val, i2c->base + i2c->dev_comp->regs[reg]);
+}
+
static int mtk_i2c_clock_enable(struct mtk_i2c *i2c)
{
int ret;
@@ -255,8 +356,18 @@ static int mtk_i2c_clock_enable(struct mtk_i2c *i2c)
if (ret)
goto err_pmic;
}
+
+ if (i2c->clk_arb) {
+ ret = clk_prepare_enable(i2c->clk_arb);
+ if (ret)
+ goto err_arb;
+ }
+
return 0;
+err_arb:
+ if (i2c->have_pmic)
+ clk_disable_unprepare(i2c->clk_pmic);
err_pmic:
clk_disable_unprepare(i2c->clk_main);
err_main:
@@ -267,6 +378,9 @@ err_main:
static void mtk_i2c_clock_disable(struct mtk_i2c *i2c)
{
+ if (i2c->clk_arb)
+ clk_disable_unprepare(i2c->clk_arb);
+
if (i2c->have_pmic)
clk_disable_unprepare(i2c->clk_pmic);
@@ -278,31 +392,36 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
{
u16 control_reg;
- writew(I2C_SOFT_RST, i2c->base + OFFSET_SOFTRESET);
+ mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
/* Set ioconfig */
if (i2c->use_push_pull)
- writew(I2C_IO_CONFIG_PUSH_PULL, i2c->base + OFFSET_IO_CONFIG);
+ mtk_i2c_writew(i2c, I2C_IO_CONFIG_PUSH_PULL, OFFSET_IO_CONFIG);
else
- writew(I2C_IO_CONFIG_OPEN_DRAIN, i2c->base + OFFSET_IO_CONFIG);
+ mtk_i2c_writew(i2c, I2C_IO_CONFIG_OPEN_DRAIN, OFFSET_IO_CONFIG);
if (i2c->dev_comp->dcm)
- writew(I2C_DCM_DISABLE, i2c->base + OFFSET_DCM_EN);
+ mtk_i2c_writew(i2c, I2C_DCM_DISABLE, OFFSET_DCM_EN);
if (i2c->dev_comp->timing_adjust)
- writew(I2C_DEFAULT_CLK_DIV - 1, i2c->base + OFFSET_CLOCK_DIV);
+ mtk_i2c_writew(i2c, I2C_DEFAULT_CLK_DIV - 1, OFFSET_CLOCK_DIV);
- writew(i2c->timing_reg, i2c->base + OFFSET_TIMING);
- writew(i2c->high_speed_reg, i2c->base + OFFSET_HS);
+ mtk_i2c_writew(i2c, i2c->timing_reg, OFFSET_TIMING);
+ mtk_i2c_writew(i2c, i2c->high_speed_reg, OFFSET_HS);
+ if (i2c->dev_comp->ltiming_adjust)
+ mtk_i2c_writew(i2c, i2c->ltiming_reg, OFFSET_LTIMING);
/* If use i2c pin from PMIC mt6397 side, need set PATH_DIR first */
if (i2c->have_pmic)
- writew(I2C_CONTROL_WRAPPER, i2c->base + OFFSET_PATH_DIR);
+ mtk_i2c_writew(i2c, I2C_CONTROL_WRAPPER, OFFSET_PATH_DIR);
control_reg = I2C_CONTROL_ACKERR_DET_EN |
I2C_CONTROL_CLK_EXT_EN | I2C_CONTROL_DMA_EN;
- writew(control_reg, i2c->base + OFFSET_CONTROL);
- writew(I2C_DELAY_LEN, i2c->base + OFFSET_DELAY_LEN);
+ if (i2c->dev_comp->dma_sync)
+ control_reg |= I2C_CONTROL_DMAACK_EN | I2C_CONTROL_ASYNC_MODE;
+
+ mtk_i2c_writew(i2c, control_reg, OFFSET_CONTROL);
+ mtk_i2c_writew(i2c, I2C_DELAY_LEN, OFFSET_DELAY_LEN);
writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
udelay(50);
@@ -390,6 +509,8 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
unsigned int clk_src;
unsigned int step_cnt;
unsigned int sample_cnt;
+ unsigned int l_step_cnt;
+ unsigned int l_sample_cnt;
unsigned int target_speed;
int ret;
@@ -399,11 +520,11 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
if (target_speed > MAX_FS_MODE_SPEED) {
/* Set master code speed register */
ret = mtk_i2c_calculate_speed(i2c, clk_src, MAX_FS_MODE_SPEED,
- &step_cnt, &sample_cnt);
+ &l_step_cnt, &l_sample_cnt);
if (ret < 0)
return ret;
- i2c->timing_reg = (sample_cnt << 8) | step_cnt;
+ i2c->timing_reg = (l_sample_cnt << 8) | l_step_cnt;
/* Set the high speed mode register */
ret = mtk_i2c_calculate_speed(i2c, clk_src, target_speed,
@@ -413,6 +534,10 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
i2c->high_speed_reg = I2C_TIME_DEFAULT_VALUE |
(sample_cnt << 12) | (step_cnt << 8);
+
+ if (i2c->dev_comp->ltiming_adjust)
+ i2c->ltiming_reg = (l_sample_cnt << 6) | l_step_cnt |
+ (sample_cnt << 12) | (step_cnt << 9);
} else {
ret = mtk_i2c_calculate_speed(i2c, clk_src, target_speed,
&step_cnt, &sample_cnt);
@@ -423,6 +548,9 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
/* Disable the high speed transaction */
i2c->high_speed_reg = I2C_TIME_CLR_VALUE;
+
+ if (i2c->dev_comp->ltiming_adjust)
+ i2c->ltiming_reg = (sample_cnt << 6) | step_cnt;
}
return 0;
@@ -454,7 +582,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
reinit_completion(&i2c->msg_complete);
- control_reg = readw(i2c->base + OFFSET_CONTROL) &
+ control_reg = mtk_i2c_readw(i2c, OFFSET_CONTROL) &
~(I2C_CONTROL_DIR_CHANGE | I2C_CONTROL_RS);
if ((i2c->speed_hz > MAX_FS_MODE_SPEED) || (left_num >= 1))
control_reg |= I2C_CONTROL_RS;
@@ -462,40 +590,41 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
if (i2c->op == I2C_MASTER_WRRD)
control_reg |= I2C_CONTROL_DIR_CHANGE | I2C_CONTROL_RS;
- writew(control_reg, i2c->base + OFFSET_CONTROL);
+ mtk_i2c_writew(i2c, control_reg, OFFSET_CONTROL);
/* set start condition */
if (i2c->speed_hz <= I2C_DEFAULT_SPEED)
- writew(I2C_ST_START_CON, i2c->base + OFFSET_EXT_CONF);
+ mtk_i2c_writew(i2c, I2C_ST_START_CON, OFFSET_EXT_CONF);
else
- writew(I2C_FS_START_CON, i2c->base + OFFSET_EXT_CONF);
+ mtk_i2c_writew(i2c, I2C_FS_START_CON, OFFSET_EXT_CONF);
addr_reg = i2c_8bit_addr_from_msg(msgs);
- writew(addr_reg, i2c->base + OFFSET_SLAVE_ADDR);
+ mtk_i2c_writew(i2c, addr_reg, OFFSET_SLAVE_ADDR);
/* Clear interrupt status */
- writew(restart_flag | I2C_HS_NACKERR | I2C_ACKERR |
- I2C_TRANSAC_COMP, i2c->base + OFFSET_INTR_STAT);
- writew(I2C_FIFO_ADDR_CLR, i2c->base + OFFSET_FIFO_ADDR_CLR);
+ mtk_i2c_writew(i2c, restart_flag | I2C_HS_NACKERR | I2C_ACKERR |
+ I2C_ARB_LOST | I2C_TRANSAC_COMP, OFFSET_INTR_STAT);
+
+ mtk_i2c_writew(i2c, I2C_FIFO_ADDR_CLR, OFFSET_FIFO_ADDR_CLR);
/* Enable interrupt */
- writew(restart_flag | I2C_HS_NACKERR | I2C_ACKERR |
- I2C_TRANSAC_COMP, i2c->base + OFFSET_INTR_MASK);
+ mtk_i2c_writew(i2c, restart_flag | I2C_HS_NACKERR | I2C_ACKERR |
+ I2C_ARB_LOST | I2C_TRANSAC_COMP, OFFSET_INTR_MASK);
/* Set transfer and transaction len */
if (i2c->op == I2C_MASTER_WRRD) {
if (i2c->dev_comp->aux_len_reg) {
- writew(msgs->len, i2c->base + OFFSET_TRANSFER_LEN);
- writew((msgs + 1)->len, i2c->base +
- OFFSET_TRANSFER_LEN_AUX);
+ mtk_i2c_writew(i2c, msgs->len, OFFSET_TRANSFER_LEN);
+ mtk_i2c_writew(i2c, (msgs + 1)->len,
+ OFFSET_TRANSFER_LEN_AUX);
} else {
- writew(msgs->len | ((msgs + 1)->len) << 8,
- i2c->base + OFFSET_TRANSFER_LEN);
+ mtk_i2c_writew(i2c, msgs->len | ((msgs + 1)->len) << 8,
+ OFFSET_TRANSFER_LEN);
}
- writew(I2C_WRRD_TRANAC_VALUE, i2c->base + OFFSET_TRANSAC_LEN);
+ mtk_i2c_writew(i2c, I2C_WRRD_TRANAC_VALUE, OFFSET_TRANSAC_LEN);
} else {
- writew(msgs->len, i2c->base + OFFSET_TRANSFER_LEN);
- writew(num, i2c->base + OFFSET_TRANSAC_LEN);
+ mtk_i2c_writew(i2c, msgs->len, OFFSET_TRANSFER_LEN);
+ mtk_i2c_writew(i2c, num, OFFSET_TRANSAC_LEN);
}
/* Prepare buffer data to start transfer */
@@ -607,14 +736,14 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
if (left_num >= 1)
start_reg |= I2C_RS_MUL_CNFG;
}
- writew(start_reg, i2c->base + OFFSET_START);
+ mtk_i2c_writew(i2c, start_reg, OFFSET_START);
ret = wait_for_completion_timeout(&i2c->msg_complete,
i2c->adap.timeout);
/* Clear interrupt mask */
- writew(~(restart_flag | I2C_HS_NACKERR | I2C_ACKERR |
- I2C_TRANSAC_COMP), i2c->base + OFFSET_INTR_MASK);
+ mtk_i2c_writew(i2c, ~(restart_flag | I2C_HS_NACKERR | I2C_ACKERR |
+ I2C_ARB_LOST | I2C_TRANSAC_COMP), OFFSET_INTR_MASK);
if (i2c->op == I2C_MASTER_WR) {
dma_unmap_single(i2c->dev, wpaddr,
@@ -724,8 +853,8 @@ static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id)
if (i2c->auto_restart)
restart_flag = I2C_RS_TRANSFER;
- intr_stat = readw(i2c->base + OFFSET_INTR_STAT);
- writew(intr_stat, i2c->base + OFFSET_INTR_STAT);
+ intr_stat = mtk_i2c_readw(i2c, OFFSET_INTR_STAT);
+ mtk_i2c_writew(i2c, intr_stat, OFFSET_INTR_STAT);
/*
* when occurs ack error, i2c controller generate two interrupts
@@ -737,8 +866,8 @@ static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id)
if (i2c->ignore_restart_irq && (i2c->irq_stat & restart_flag)) {
i2c->ignore_restart_irq = false;
i2c->irq_stat = 0;
- writew(I2C_RS_MUL_CNFG | I2C_RS_MUL_TRIG | I2C_TRANSAC_START,
- i2c->base + OFFSET_START);
+ mtk_i2c_writew(i2c, I2C_RS_MUL_CNFG | I2C_RS_MUL_TRIG |
+ I2C_TRANSAC_START, OFFSET_START);
} else {
if (i2c->irq_stat & (I2C_TRANSAC_COMP | restart_flag))
complete(&i2c->msg_complete);
@@ -839,6 +968,10 @@ static int mtk_i2c_probe(struct platform_device *pdev)
return PTR_ERR(i2c->clk_dma);
}
+ i2c->clk_arb = devm_clk_get(&pdev->dev, "arb");
+ if (IS_ERR(i2c->clk_arb))
+ i2c->clk_arb = NULL;
+
clk = i2c->clk_main;
if (i2c->have_pmic) {
i2c->clk_pmic = devm_clk_get(&pdev->dev, "pmic");
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 0ed5a41804dc..4f30a43b63da 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -1070,8 +1070,7 @@ static int nmk_i2c_remove(struct amba_device *adev)
/* disable the controller */
i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
clk_disable_unprepare(dev->clk);
- if (res)
- release_mem_region(res->start, resource_size(res));
+ release_mem_region(res->start, resource_size(res));
return 0;
}
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
index 4e67d5ed480e..1c8f708f212b 100644
--- a/drivers/i2c/busses/i2c-nvidia-gpu.c
+++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
@@ -253,6 +253,12 @@ static const struct pci_device_id gpu_i2c_ids[] = {
};
MODULE_DEVICE_TABLE(pci, gpu_i2c_ids);
+static const struct property_entry ccgx_props[] = {
+ /* Use FW built for NVIDIA (nv) only */
+ PROPERTY_ENTRY_U16("ccgx,firmware-build", ('n' << 8) | 'v'),
+ { }
+};
+
static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq)
{
struct i2c_client *ccgx_client;
@@ -267,6 +273,7 @@ static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq)
sizeof(i2cd->gpu_ccgx_ucsi->type));
i2cd->gpu_ccgx_ucsi->addr = 0x8;
i2cd->gpu_ccgx_ucsi->irq = irq;
+ i2cd->gpu_ccgx_ucsi->properties = ccgx_props;
ccgx_client = i2c_new_device(&i2cd->adapter, i2cd->gpu_ccgx_ucsi);
if (!ccgx_client)
return -ENODEV;
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 4e1a077fb688..c3dabee0aa35 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -26,8 +26,6 @@
#include <linux/spinlock.h>
#include <linux/jiffies.h>
-#define OCORES_FLAG_POLL BIT(0)
-
/*
* 'process_lock' exists because ocores_process() and ocores_process_timeout()
* can't run in parallel.
@@ -37,7 +35,6 @@ struct ocores_i2c {
int iobase;
u32 reg_shift;
u32 reg_io_width;
- unsigned long flags;
wait_queue_head_t wait;
struct i2c_adapter adap;
struct i2c_msg *msg;
@@ -403,11 +400,7 @@ static int ocores_xfer_polling(struct i2c_adapter *adap,
static int ocores_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs, int num)
{
- struct ocores_i2c *i2c = i2c_get_adapdata(adap);
-
- if (i2c->flags & OCORES_FLAG_POLL)
- return ocores_xfer_polling(adap, msgs, num);
- return ocores_xfer_core(i2c, msgs, num, false);
+ return ocores_xfer_core(i2c_get_adapdata(adap), msgs, num, false);
}
static int ocores_init(struct device *dev, struct ocores_i2c *i2c)
@@ -447,8 +440,9 @@ static u32 ocores_func(struct i2c_adapter *adap)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
-static const struct i2c_algorithm ocores_algorithm = {
+static struct i2c_algorithm ocores_algorithm = {
.master_xfer = ocores_xfer,
+ .master_xfer_atomic = ocores_xfer_polling,
.functionality = ocores_func,
};
@@ -673,13 +667,13 @@ static int ocores_i2c_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq == -ENXIO) {
- i2c->flags |= OCORES_FLAG_POLL;
+ ocores_algorithm.master_xfer = ocores_xfer_polling;
} else {
if (irq < 0)
return irq;
}
- if (!(i2c->flags & OCORES_FLAG_POLL)) {
+ if (ocores_algorithm.master_xfer != ocores_xfer_polling) {
ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0,
pdev->name, i2c);
if (ret) {
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index cd9c65f3d404..faa0394048a0 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -269,6 +269,8 @@ static const u8 reg_map_ip_v2[] = {
[OMAP_I2C_IP_V2_IRQENABLE_CLR] = 0x30,
};
+static int omap_i2c_xfer_data(struct omap_i2c_dev *omap);
+
static inline void omap_i2c_write_reg(struct omap_i2c_dev *omap,
int reg, u16 val)
{
@@ -648,15 +650,28 @@ static void omap_i2c_resize_fifo(struct omap_i2c_dev *omap, u8 size, bool is_rx)
(1000 * omap->speed / 8);
}
+static void omap_i2c_wait(struct omap_i2c_dev *omap)
+{
+ u16 stat;
+ u16 mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
+ int count = 0;
+
+ do {
+ stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
+ count++;
+ } while (!(stat & mask) && count < 5);
+}
+
/*
* Low level master read/write transaction.
*/
static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
- struct i2c_msg *msg, int stop)
+ struct i2c_msg *msg, int stop, bool polling)
{
struct omap_i2c_dev *omap = i2c_get_adapdata(adap);
unsigned long timeout;
u16 w;
+ int ret;
dev_dbg(omap->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
msg->addr, msg->len, msg->flags, stop);
@@ -680,7 +695,8 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
w |= OMAP_I2C_BUF_RXFIF_CLR | OMAP_I2C_BUF_TXFIF_CLR;
omap_i2c_write_reg(omap, OMAP_I2C_BUF_REG, w);
- reinit_completion(&omap->cmd_complete);
+ if (!polling)
+ reinit_completion(&omap->cmd_complete);
omap->cmd_err = 0;
w = OMAP_I2C_CON_EN | OMAP_I2C_CON_MST | OMAP_I2C_CON_STT;
@@ -732,8 +748,18 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
* REVISIT: We should abort the transfer on signals, but the bus goes
* into arbitration and we're currently unable to recover from it.
*/
- timeout = wait_for_completion_timeout(&omap->cmd_complete,
- OMAP_I2C_TIMEOUT);
+ if (!polling) {
+ timeout = wait_for_completion_timeout(&omap->cmd_complete,
+ OMAP_I2C_TIMEOUT);
+ } else {
+ do {
+ omap_i2c_wait(omap);
+ ret = omap_i2c_xfer_data(omap);
+ } while (ret == -EAGAIN);
+
+ timeout = !ret;
+ }
+
if (timeout == 0) {
dev_err(omap->dev, "controller timed out\n");
omap_i2c_reset(omap);
@@ -772,7 +798,8 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
* to do the work during IRQ processing.
*/
static int
-omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+omap_i2c_xfer_common(struct i2c_adapter *adap, struct i2c_msg msgs[], int num,
+ bool polling)
{
struct omap_i2c_dev *omap = i2c_get_adapdata(adap);
int i;
@@ -794,7 +821,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
omap->set_mpu_wkup_lat(omap->dev, omap->latency);
for (i = 0; i < num; i++) {
- r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1)));
+ r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1)),
+ polling);
if (r != 0)
break;
}
@@ -813,6 +841,18 @@ out:
return r;
}
+static int
+omap_i2c_xfer_irq(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+ return omap_i2c_xfer_common(adap, msgs, num, false);
+}
+
+static int
+omap_i2c_xfer_polling(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+ return omap_i2c_xfer_common(adap, msgs, num, true);
+}
+
static u32
omap_i2c_func(struct i2c_adapter *adap)
{
@@ -1035,10 +1075,8 @@ omap_i2c_isr(int irq, void *dev_id)
return ret;
}
-static irqreturn_t
-omap_i2c_isr_thread(int this_irq, void *dev_id)
+static int omap_i2c_xfer_data(struct omap_i2c_dev *omap)
{
- struct omap_i2c_dev *omap = dev_id;
u16 bits;
u16 stat;
int err = 0, count = 0;
@@ -1056,7 +1094,8 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
if (!stat) {
/* my work here is done */
- goto out;
+ err = -EAGAIN;
+ break;
}
dev_dbg(omap->dev, "IRQ (ISR = 0x%04x)\n", stat);
@@ -1165,14 +1204,25 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
}
} while (stat);
- omap_i2c_complete_cmd(omap, err);
+ return err;
+}
+
+static irqreturn_t
+omap_i2c_isr_thread(int this_irq, void *dev_id)
+{
+ int ret;
+ struct omap_i2c_dev *omap = dev_id;
+
+ ret = omap_i2c_xfer_data(omap);
+ if (ret != -EAGAIN)
+ omap_i2c_complete_cmd(omap, ret);
-out:
return IRQ_HANDLED;
}
static const struct i2c_algorithm omap_i2c_algo = {
- .master_xfer = omap_i2c_xfer,
+ .master_xfer = omap_i2c_xfer_irq,
+ .master_xfer_atomic = omap_i2c_xfer_polling,
.functionality = omap_i2c_func,
};
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 90946a8b9a75..e9a0514ae166 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -19,6 +19,7 @@
Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100
ATI IXP200, IXP300, IXP400, SB600, SB700/SP5100, SB800
AMD Hudson-2, ML, CZ
+ Hygon CZ
SMSC Victory66
Note: we assume there can only be one device, with one or more
@@ -289,7 +290,9 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
PIIX4_dev->revision >= 0x41) ||
(PIIX4_dev->vendor == PCI_VENDOR_ID_AMD &&
PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS &&
- PIIX4_dev->revision >= 0x49))
+ PIIX4_dev->revision >= 0x49) ||
+ (PIIX4_dev->vendor == PCI_VENDOR_ID_HYGON &&
+ PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS))
smb_en = 0x00;
else
smb_en = (aux) ? 0x28 : 0x2c;
@@ -361,7 +364,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
piix4_smba, i2ccfg >> 4);
/* Find which register is used for port selection */
- if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) {
+ if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD ||
+ PIIX4_dev->vendor == PCI_VENDOR_ID_HYGON) {
switch (PIIX4_dev->device) {
case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS:
piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
@@ -794,6 +798,7 @@ static const struct pci_device_id piix4_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_OSB4) },
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
@@ -904,11 +909,13 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
if ((dev->vendor == PCI_VENDOR_ID_ATI &&
dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
dev->revision >= 0x40) ||
- dev->vendor == PCI_VENDOR_ID_AMD) {
+ dev->vendor == PCI_VENDOR_ID_AMD ||
+ dev->vendor == PCI_VENDOR_ID_HYGON) {
bool notify_imc = false;
is_sb800 = true;
- if (dev->vendor == PCI_VENDOR_ID_AMD &&
+ if ((dev->vendor == PCI_VENDOR_ID_AMD ||
+ dev->vendor == PCI_VENDOR_ID_HYGON) &&
dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) {
u8 imc;
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index a7578f6da979..d39a4606f72d 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -85,6 +85,7 @@
/* ICFBSCR */
#define TCYC17 0x0f /* 17*Tcyc delay 1st bit between SDA and SCL */
+#define RCAR_MIN_DMA_LEN 8
#define RCAR_BUS_PHASE_START (MDBS | MIE | ESG)
#define RCAR_BUS_PHASE_DATA (MDBS | MIE)
@@ -398,7 +399,7 @@ static void rcar_i2c_dma_callback(void *data)
rcar_i2c_dma_unmap(priv);
}
-static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
+static bool rcar_i2c_dma(struct rcar_i2c_priv *priv)
{
struct device *dev = rcar_i2c_priv_to_dev(priv);
struct i2c_msg *msg = priv->msg;
@@ -412,9 +413,9 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
int len;
/* Do various checks to see if DMA is feasible at all */
- if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE) ||
- (read && priv->flags & ID_P_NO_RXDMA))
- return;
+ if (IS_ERR(chan) || msg->len < RCAR_MIN_DMA_LEN ||
+ !(msg->flags & I2C_M_DMA_SAFE) || (read && priv->flags & ID_P_NO_RXDMA))
+ return false;
if (read) {
/*
@@ -434,7 +435,7 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
if (dma_mapping_error(chan->device->dev, dma_addr)) {
dev_dbg(dev, "dma map failed, using PIO\n");
- return;
+ return false;
}
sg_dma_len(&priv->sg) = len;
@@ -448,7 +449,7 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
if (!txdesc) {
dev_dbg(dev, "dma prep slave sg failed, using PIO\n");
rcar_i2c_cleanup_dma(priv);
- return;
+ return false;
}
txdesc->callback = rcar_i2c_dma_callback;
@@ -458,7 +459,7 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
if (dma_submit_error(cookie)) {
dev_dbg(dev, "submitting dma failed, using PIO\n");
rcar_i2c_cleanup_dma(priv);
- return;
+ return false;
}
/* Enable DMA Master Received/Transmitted */
@@ -468,6 +469,7 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
rcar_i2c_write(priv, ICDMAER, TMDMAE);
dma_async_issue_pending(chan);
+ return true;
}
static void rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr)
@@ -478,6 +480,10 @@ static void rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr)
if (!(msr & MDE))
return;
+ /* Check if DMA can be enabled and take over */
+ if (priv->pos == 1 && rcar_i2c_dma(priv))
+ return;
+
if (priv->pos < msg->len) {
/*
* Prepare next data to ICRXTX register.
@@ -488,13 +494,6 @@ static void rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr)
*/
rcar_i2c_write(priv, ICRXTX, msg->buf[priv->pos]);
priv->pos++;
-
- /*
- * Try to use DMA to transmit the rest of the data if
- * address transfer phase just finished.
- */
- if (msr & MAT)
- rcar_i2c_dma(priv);
} else {
/*
* The last data was pushed to ICRXTX on _PREV_ empty irq.
@@ -921,6 +920,9 @@ static int rcar_i2c_probe(struct platform_device *pdev)
struct i2c_timings i2c_t;
int irq, ret;
+ /* Otherwise logic will break because some bytes must always use PIO */
+ BUILD_BUG_ON_MSG(RCAR_MIN_DMA_LEN < 3, "Invalid min DMA length");
+
priv = devm_kzalloc(dev, sizeof(struct rcar_i2c_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index b75ff144b570..f31413fd9521 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -43,6 +43,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#define RIIC_ICCR1 0x00
#define RIIC_ICCR2 0x04
@@ -112,12 +113,10 @@ static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
struct riic_dev *riic = i2c_get_adapdata(adap);
unsigned long time_left;
- int i, ret;
+ int i;
u8 start_bit;
- ret = clk_prepare_enable(riic->clk);
- if (ret)
- return ret;
+ pm_runtime_get_sync(adap->dev.parent);
if (readb(riic->base + RIIC_ICCR2) & ICCR2_BBSY) {
riic->err = -EBUSY;
@@ -150,7 +149,7 @@ static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
}
out:
- clk_disable_unprepare(riic->clk);
+ pm_runtime_put(adap->dev.parent);
return riic->err ?: num;
}
@@ -281,20 +280,18 @@ static const struct i2c_algorithm riic_algo = {
static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
{
- int ret;
+ int ret = 0;
unsigned long rate;
int total_ticks, cks, brl, brh;
- ret = clk_prepare_enable(riic->clk);
- if (ret)
- return ret;
+ pm_runtime_get_sync(riic->adapter.dev.parent);
if (t->bus_freq_hz > 400000) {
dev_err(&riic->adapter.dev,
"unsupported bus speed (%dHz). 400000 max\n",
t->bus_freq_hz);
- clk_disable_unprepare(riic->clk);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
rate = clk_get_rate(riic->clk);
@@ -332,8 +329,8 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
if (brl > (0x1F + 3)) {
dev_err(&riic->adapter.dev, "invalid speed (%lu). Too slow.\n",
(unsigned long)t->bus_freq_hz);
- clk_disable_unprepare(riic->clk);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
brh = total_ticks - brl;
@@ -378,9 +375,9 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
riic_clear_set_bit(riic, ICCR1_IICRST, 0, RIIC_ICCR1);
- clk_disable_unprepare(riic->clk);
-
- return 0;
+out:
+ pm_runtime_put(riic->adapter.dev.parent);
+ return ret;
}
static struct riic_irq_desc riic_irqs[] = {
@@ -439,28 +436,36 @@ static int riic_i2c_probe(struct platform_device *pdev)
i2c_parse_fw_timings(&pdev->dev, &i2c_t, true);
+ pm_runtime_enable(&pdev->dev);
+
ret = riic_init_hw(riic, &i2c_t);
if (ret)
- return ret;
-
+ goto out;
ret = i2c_add_adapter(adap);
if (ret)
- return ret;
+ goto out;
platform_set_drvdata(pdev, riic);
dev_info(&pdev->dev, "registered with %dHz bus speed\n",
i2c_t.bus_freq_hz);
return 0;
+
+out:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
}
static int riic_i2c_remove(struct platform_device *pdev)
{
struct riic_dev *riic = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(&pdev->dev);
writeb(0, riic->base + RIIC_ICIER);
+ pm_runtime_put(&pdev->dev);
i2c_del_adapter(&riic->adapter);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 4284fc991cfd..48337bef5b87 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -476,8 +476,12 @@ static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev,
list_add_tail(&v->node,
&solutions);
+ break;
}
}
+
+ if (p_prev == p)
+ break;
}
}
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 5503fa171df0..743c161b22c5 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -328,12 +328,6 @@ static int stu300_start_and_await_event(struct stu300_dev *dev,
{
int ret;
- if (unlikely(irqs_disabled())) {
- /* TODO: implement polling for this case if need be. */
- WARN(1, "irqs are disabled, cannot poll for event\n");
- return -EIO;
- }
-
/* Lock command issue, fill in an event we wait for */
spin_lock_irq(&dev->cmd_issue_lock);
init_completion(&dev->cmd_complete);
@@ -380,13 +374,6 @@ static int stu300_await_event(struct stu300_dev *dev,
{
int ret;
- if (unlikely(irqs_disabled())) {
- /* TODO: implement polling for this case if need be. */
- dev_err(&dev->pdev->dev, "irqs are disabled on this "
- "system!\n");
- return -EIO;
- }
-
/* Is it already here? */
spin_lock_irq(&dev->cmd_issue_lock);
dev->cmd_err = STU300_ERROR_NONE;
@@ -846,6 +833,13 @@ static int stu300_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
return num;
}
+static int stu300_xfer_todo(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ /* TODO: implement polling for this case if need be. */
+ WARN(1, "%s: atomic transfers not implemented\n", dev_name(&adap->dev));
+ return -EOPNOTSUPP;
+}
+
static u32 stu300_func(struct i2c_adapter *adap)
{
/* This is the simplest thing you can think of... */
@@ -853,8 +847,9 @@ static u32 stu300_func(struct i2c_adapter *adap)
}
static const struct i2c_algorithm stu300_algo = {
- .master_xfer = stu300_xfer,
- .functionality = stu300_func,
+ .master_xfer = stu300_xfer,
+ .master_xfer_atomic = stu300_xfer_todo,
+ .functionality = stu300_func,
};
static const struct i2c_adapter_quirks stu300_quirks = {
diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
index d18b0941b71a..f14d4b3fab44 100644
--- a/drivers/i2c/busses/i2c-synquacer.c
+++ b/drivers/i2c/busses/i2c-synquacer.c
@@ -597,6 +597,8 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
i2c->adapter = synquacer_i2c_ops;
i2c_set_adapdata(&i2c->adapter, i2c);
i2c->adapter.dev.parent = &pdev->dev;
+ i2c->adapter.dev.of_node = pdev->dev.of_node;
+ ACPI_COMPANION_SET(&i2c->adapter.dev, ACPI_COMPANION(&pdev->dev));
i2c->adapter.nr = pdev->id;
init_completion(&i2c->completion);
diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c
index f6cd35d0a2ac..9bb085793a0c 100644
--- a/drivers/i2c/busses/i2c-tegra-bpmp.c
+++ b/drivers/i2c/busses/i2c-tegra-bpmp.c
@@ -207,7 +207,8 @@ static int tegra_bpmp_i2c_msg_len_check(struct i2c_msg *msgs, unsigned int num)
static int tegra_bpmp_i2c_msg_xfer(struct tegra_bpmp_i2c *i2c,
struct mrq_i2c_request *request,
- struct mrq_i2c_response *response)
+ struct mrq_i2c_response *response,
+ bool atomic)
{
struct tegra_bpmp_message msg;
int err;
@@ -222,7 +223,7 @@ static int tegra_bpmp_i2c_msg_xfer(struct tegra_bpmp_i2c *i2c,
msg.rx.data = response;
msg.rx.size = sizeof(*response);
- if (irqs_disabled())
+ if (atomic)
err = tegra_bpmp_transfer_atomic(i2c->bpmp, &msg);
else
err = tegra_bpmp_transfer(i2c->bpmp, &msg);
@@ -230,8 +231,9 @@ static int tegra_bpmp_i2c_msg_xfer(struct tegra_bpmp_i2c *i2c,
return err;
}
-static int tegra_bpmp_i2c_xfer(struct i2c_adapter *adapter,
- struct i2c_msg *msgs, int num)
+static int tegra_bpmp_i2c_xfer_common(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs, int num,
+ bool atomic)
{
struct tegra_bpmp_i2c *i2c = i2c_get_adapdata(adapter);
struct mrq_i2c_response response;
@@ -253,7 +255,7 @@ static int tegra_bpmp_i2c_xfer(struct i2c_adapter *adapter,
return err;
}
- err = tegra_bpmp_i2c_msg_xfer(i2c, &request, &response);
+ err = tegra_bpmp_i2c_msg_xfer(i2c, &request, &response, atomic);
if (err < 0) {
dev_err(i2c->dev, "failed to transfer message: %d\n", err);
return err;
@@ -268,6 +270,18 @@ static int tegra_bpmp_i2c_xfer(struct i2c_adapter *adapter,
return num;
}
+static int tegra_bpmp_i2c_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs, int num)
+{
+ return tegra_bpmp_i2c_xfer_common(adapter, msgs, num, false);
+}
+
+static int tegra_bpmp_i2c_xfer_atomic(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs, int num)
+{
+ return tegra_bpmp_i2c_xfer_common(adapter, msgs, num, true);
+}
+
static u32 tegra_bpmp_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR |
@@ -276,6 +290,7 @@ static u32 tegra_bpmp_i2c_func(struct i2c_adapter *adapter)
static const struct i2c_algorithm tegra_bpmp_i2c_algo = {
.master_xfer = tegra_bpmp_i2c_xfer,
+ .master_xfer_atomic = tegra_bpmp_i2c_xfer_atomic,
.functionality = tegra_bpmp_i2c_func,
};
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 38af18645133..d389d4fb0623 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -185,7 +185,7 @@ static int i2c_generic_bus_free(struct i2c_adapter *adap)
int i2c_generic_scl_recovery(struct i2c_adapter *adap)
{
struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
- int i = 0, scl = 1, ret;
+ int i = 0, scl = 1, ret = 0;
if (bri->prepare_recovery)
bri->prepare_recovery(adap);
@@ -327,6 +327,8 @@ static int i2c_device_probe(struct device *dev)
if (client->flags & I2C_CLIENT_HOST_NOTIFY) {
dev_dbg(dev, "Using Host Notify IRQ\n");
+ /* Keep adapter active when Host Notify is required */
+ pm_runtime_get_sync(&client->adapter->dev);
irq = i2c_smbus_host_notify_to_irq(client);
} else if (dev->of_node) {
irq = of_irq_get_byname(dev->of_node, "irq");
@@ -431,6 +433,8 @@ static int i2c_device_remove(struct device *dev)
device_init_wakeup(&client->dev, false);
client->irq = client->init_irq;
+ if (client->flags & I2C_CLIENT_HOST_NOTIFY)
+ pm_runtime_put(&client->adapter->dev);
return status;
}
@@ -710,7 +714,7 @@ static int i2c_dev_irq_from_resources(const struct resource *resources,
}
/**
- * i2c_new_device - instantiate an i2c device
+ * i2c_new_client_device - instantiate an i2c device
* @adap: the adapter managing the device
* @info: describes one I2C device; bus_num is ignored
* Context: can sleep
@@ -723,17 +727,17 @@ static int i2c_dev_irq_from_resources(const struct resource *resources,
* before any i2c_adapter could exist.
*
* This returns the new i2c client, which may be saved for later use with
- * i2c_unregister_device(); or NULL to indicate an error.
+ * i2c_unregister_device(); or an ERR_PTR to describe the error.
*/
-struct i2c_client *
-i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
+static struct i2c_client *
+i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
{
struct i2c_client *client;
int status;
client = kzalloc(sizeof *client, GFP_KERNEL);
if (!client)
- return NULL;
+ return ERR_PTR(-ENOMEM);
client->adapter = adap;
@@ -799,7 +803,31 @@ out_err:
client->name, client->addr, status);
out_err_silent:
kfree(client);
- return NULL;
+ return ERR_PTR(status);
+}
+EXPORT_SYMBOL_GPL(i2c_new_client_device);
+
+/**
+ * i2c_new_device - instantiate an i2c device
+ * @adap: the adapter managing the device
+ * @info: describes one I2C device; bus_num is ignored
+ * Context: can sleep
+ *
+ * This deprecated function has the same functionality as
+ * @i2c_new_client_device, it just returns NULL instead of an ERR_PTR in case of
+ * an error for compatibility with current I2C API. It will be removed once all
+ * users are converted.
+ *
+ * This returns the new i2c client, which may be saved for later use with
+ * i2c_unregister_device(); or NULL to indicate an error.
+ */
+struct i2c_client *
+i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
+{
+ struct i2c_client *ret;
+
+ ret = i2c_new_client_device(adap, info);
+ return IS_ERR(ret) ? NULL : ret;
}
EXPORT_SYMBOL_GPL(i2c_new_device);
@@ -850,7 +878,7 @@ static struct i2c_driver dummy_driver = {
};
/**
- * i2c_new_dummy - return a new i2c device bound to a dummy driver
+ * i2c_new_dummy_device - return a new i2c device bound to a dummy driver
* @adapter: the adapter managing the device
* @address: seven bit address to be used
* Context: can sleep
@@ -865,18 +893,86 @@ static struct i2c_driver dummy_driver = {
* different driver.
*
* This returns the new i2c client, which should be saved for later use with
- * i2c_unregister_device(); or NULL to indicate an error.
+ * i2c_unregister_device(); or an ERR_PTR to describe the error.
*/
-struct i2c_client *i2c_new_dummy(struct i2c_adapter *adapter, u16 address)
+static struct i2c_client *
+i2c_new_dummy_device(struct i2c_adapter *adapter, u16 address)
{
struct i2c_board_info info = {
I2C_BOARD_INFO("dummy", address),
};
- return i2c_new_device(adapter, &info);
+ return i2c_new_client_device(adapter, &info);
+}
+EXPORT_SYMBOL_GPL(i2c_new_dummy_device);
+
+/**
+ * i2c_new_dummy - return a new i2c device bound to a dummy driver
+ * @adapter: the adapter managing the device
+ * @address: seven bit address to be used
+ * Context: can sleep
+ *
+ * This deprecated function has the same functionality as @i2c_new_dummy_device,
+ * it just returns NULL instead of an ERR_PTR in case of an error for
+ * compatibility with current I2C API. It will be removed once all users are
+ * converted.
+ *
+ * This returns the new i2c client, which should be saved for later use with
+ * i2c_unregister_device(); or NULL to indicate an error.
+ */
+struct i2c_client *i2c_new_dummy(struct i2c_adapter *adapter, u16 address)
+{
+ struct i2c_client *ret;
+
+ ret = i2c_new_dummy_device(adapter, address);
+ return IS_ERR(ret) ? NULL : ret;
}
EXPORT_SYMBOL_GPL(i2c_new_dummy);
+struct i2c_dummy_devres {
+ struct i2c_client *client;
+};
+
+static void devm_i2c_release_dummy(struct device *dev, void *res)
+{
+ struct i2c_dummy_devres *this = res;
+
+ i2c_unregister_device(this->client);
+}
+
+/**
+ * devm_i2c_new_dummy_device - return a new i2c device bound to a dummy driver
+ * @dev: device the managed resource is bound to
+ * @adapter: the adapter managing the device
+ * @address: seven bit address to be used
+ * Context: can sleep
+ *
+ * This is the device-managed version of @i2c_new_dummy_device. It returns the
+ * new i2c client or an ERR_PTR in case of an error.
+ */
+struct i2c_client *devm_i2c_new_dummy_device(struct device *dev,
+ struct i2c_adapter *adapter,
+ u16 address)
+{
+ struct i2c_dummy_devres *dr;
+ struct i2c_client *client;
+
+ dr = devres_alloc(devm_i2c_release_dummy, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ client = i2c_new_dummy_device(adapter, address);
+ if (IS_ERR(client)) {
+ devres_free(dr);
+ } else {
+ dr->client = client;
+ devres_add(dev, dr);
+ }
+
+ return client;
+}
+EXPORT_SYMBOL_GPL(devm_i2c_new_dummy_device);
+
/**
* i2c_new_secondary_device - Helper to get the instantiated secondary address
* and create the associated device
@@ -996,9 +1092,9 @@ i2c_sysfs_new_device(struct device *dev, struct device_attribute *attr,
info.flags |= I2C_CLIENT_SLAVE;
}
- client = i2c_new_device(adap, &info);
- if (!client)
- return -EINVAL;
+ client = i2c_new_client_device(adap, &info);
+ if (IS_ERR(client))
+ return PTR_ERR(client);
/* Keep track of the added device */
mutex_lock(&adap->userspace_clients_lock);
@@ -1867,8 +1963,10 @@ int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if (WARN_ON(!msgs || num < 1))
return -EINVAL;
- if (WARN_ON(test_bit(I2C_ALF_IS_SUSPENDED, &adap->locked_flags)))
- return -ESHUTDOWN;
+
+ ret = __i2c_check_suspended(adap);
+ if (ret)
+ return ret;
if (adap->quirks && i2c_check_for_quirks(adap, msgs, num))
return -EOPNOTSUPP;
@@ -1890,7 +1988,11 @@ int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
/* Retry automatically on arbitration loss */
orig_jiffies = jiffies;
for (ret = 0, try = 0; try <= adap->retries; try++) {
- ret = adap->algo->master_xfer(adap, msgs, num);
+ if (i2c_in_atomic_xfer_mode() && adap->algo->master_xfer_atomic)
+ ret = adap->algo->master_xfer_atomic(adap, msgs, num);
+ else
+ ret = adap->algo->master_xfer(adap, msgs, num);
+
if (ret != -EAGAIN)
break;
if (time_after(jiffies, orig_jiffies + adap->timeout))
@@ -1946,14 +2048,9 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
* one (discarding status on the second message) or errno
* (discarding status on the first one).
*/
- if (in_atomic() || irqs_disabled()) {
- ret = i2c_trylock_bus(adap, I2C_LOCK_SEGMENT);
- if (!ret)
- /* I2C activity is ongoing. */
- return -EAGAIN;
- } else {
- i2c_lock_bus(adap, I2C_LOCK_SEGMENT);
- }
+ ret = __i2c_lock_bus_helper(adap);
+ if (ret)
+ return ret;
ret = __i2c_transfer(adap, msgs, num);
i2c_unlock_bus(adap, I2C_LOCK_SEGMENT);
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index 132119112596..788d42f2aad9 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -20,6 +20,8 @@
#include <linux/i2c-smbus.h>
#include <linux/slab.h>
+#include "i2c-core.h"
+
#define CREATE_TRACE_POINTS
#include <trace/events/smbus.h>
@@ -530,7 +532,10 @@ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
{
s32 res;
- i2c_lock_bus(adapter, I2C_LOCK_SEGMENT);
+ res = __i2c_lock_bus_helper(adapter);
+ if (res)
+ return res;
+
res = __i2c_smbus_xfer(adapter, addr, flags, read_write,
command, protocol, data);
i2c_unlock_bus(adapter, I2C_LOCK_SEGMENT);
@@ -543,10 +548,17 @@ s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
unsigned short flags, char read_write,
u8 command, int protocol, union i2c_smbus_data *data)
{
+ int (*xfer_func)(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int size, union i2c_smbus_data *data);
unsigned long orig_jiffies;
int try;
s32 res;
+ res = __i2c_check_suspended(adapter);
+ if (res)
+ return res;
+
/* If enabled, the following two tracepoints are conditional on
* read_write and protocol.
*/
@@ -557,13 +569,20 @@ s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
flags &= I2C_M_TEN | I2C_CLIENT_PEC | I2C_CLIENT_SCCB;
- if (adapter->algo->smbus_xfer) {
+ xfer_func = adapter->algo->smbus_xfer;
+ if (i2c_in_atomic_xfer_mode()) {
+ if (adapter->algo->smbus_xfer_atomic)
+ xfer_func = adapter->algo->smbus_xfer_atomic;
+ else if (adapter->algo->master_xfer_atomic)
+ xfer_func = NULL; /* fallback to I2C emulation */
+ }
+
+ if (xfer_func) {
/* Retry automatically on arbitration loss */
orig_jiffies = jiffies;
for (res = 0, try = 0; try <= adapter->retries; try++) {
- res = adapter->algo->smbus_xfer(adapter, addr, flags,
- read_write, command,
- protocol, data);
+ res = xfer_func(adapter, addr, flags, read_write,
+ command, protocol, data);
if (res != -EAGAIN)
break;
if (time_after(jiffies,
diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h
index 37576f50fe20..c88cfef81343 100644
--- a/drivers/i2c/i2c-core.h
+++ b/drivers/i2c/i2c-core.h
@@ -29,6 +29,42 @@ extern int __i2c_first_dynamic_bus_num;
int i2c_check_7bit_addr_validity_strict(unsigned short addr);
+/*
+ * We only allow atomic transfers for very late communication, e.g. to send
+ * the powerdown command to a PMIC. Atomic transfers are a corner case and not
+ * for generic use!
+ */
+static inline bool i2c_in_atomic_xfer_mode(void)
+{
+ return system_state > SYSTEM_RUNNING && irqs_disabled();
+}
+
+static inline int __i2c_lock_bus_helper(struct i2c_adapter *adap)
+{
+ int ret = 0;
+
+ if (i2c_in_atomic_xfer_mode()) {
+ WARN(!adap->algo->master_xfer_atomic && !adap->algo->smbus_xfer_atomic,
+ "No atomic I2C transfer handler for '%s'\n", dev_name(&adap->dev));
+ ret = i2c_trylock_bus(adap, I2C_LOCK_SEGMENT) ? 0 : -EAGAIN;
+ } else {
+ i2c_lock_bus(adap, I2C_LOCK_SEGMENT);
+ }
+
+ return ret;
+}
+
+static inline int __i2c_check_suspended(struct i2c_adapter *adap)
+{
+ if (test_bit(I2C_ALF_IS_SUSPENDED, &adap->locked_flags)) {
+ if (!test_and_set_bit(I2C_ALF_SUSPEND_REPORTED, &adap->locked_flags))
+ dev_WARN(&adap->dev, "Transfer while suspended\n");
+ return -ESHUTDOWN;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_ACPI
const struct acpi_device_id *
i2c_acpi_match_device(const struct acpi_device_id *matches,
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index f330690b4125..603252fa1284 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -310,12 +310,18 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
else
priv->algo.master_xfer = __i2c_mux_master_xfer;
}
+ if (parent->algo->master_xfer_atomic)
+ priv->algo.master_xfer_atomic = priv->algo.master_xfer;
+
if (parent->algo->smbus_xfer) {
if (muxc->mux_locked)
priv->algo.smbus_xfer = i2c_mux_smbus_xfer;
else
priv->algo.smbus_xfer = __i2c_mux_smbus_xfer;
}
+ if (parent->algo->smbus_xfer_atomic)
+ priv->algo.smbus_xfer_atomic = priv->algo.smbus_xfer;
+
priv->algo.functionality = i2c_mux_functionality;
/* Now fill out new adapter structure */
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index 035032e20327..4eecffc26527 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -99,6 +99,8 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
/* Now fill out current adapter structure. cur_chan must be up to date */
priv->algo.master_xfer = i2c_demux_master_xfer;
+ if (adap->algo->master_xfer_atomic)
+ priv->algo.master_xfer_atomic = i2c_demux_master_xfer;
priv->algo.functionality = i2c_demux_functionality;
snprintf(priv->cur_adap.name, sizeof(priv->cur_adap.name),
@@ -219,8 +221,8 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
return -EINVAL;
}
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv)
- + num_chan * sizeof(struct i2c_demux_pinctrl_chan), GFP_KERNEL);
+ priv = devm_kzalloc(&pdev->dev, struct_size(priv, chan, num_chan),
+ GFP_KERNEL);
props = devm_kcalloc(&pdev->dev, num_chan, sizeof(*props), GFP_KERNEL);
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index 9e75d6b9140b..50e1fb4aedf5 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -22,7 +22,6 @@
#include <linux/i2c-mux.h>
#include <linux/jiffies.h>
#include <linux/module.h>
-#include <linux/platform_data/pca954x.h>
#include <linux/slab.h>
/*
@@ -287,10 +286,8 @@ static int pca9541_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adap = client->adapter;
- struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev);
struct i2c_mux_core *muxc;
struct pca9541 *data;
- int force;
int ret;
if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -306,9 +303,6 @@ static int pca9541_probe(struct i2c_client *client,
/* Create mux adapter */
- force = 0;
- if (pdata)
- force = pdata->modes[0].adap_id;
muxc = i2c_mux_alloc(adap, &client->dev, 1, sizeof(*data),
I2C_MUX_ARBITRATOR,
pca9541_select_chan, pca9541_release_chan);
@@ -320,7 +314,7 @@ static int pca9541_probe(struct i2c_client *client,
i2c_set_clientdata(client, muxc);
- ret = i2c_mux_add_adapter(muxc, force, 0, 0);
+ ret = i2c_mux_add_adapter(muxc, 0, 0, 0);
if (ret)
return ret;
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index bfabf985e830..923aa3a5a3dc 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -46,10 +46,10 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
-#include <linux/platform_data/pca954x.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <dt-bindings/mux/mux.h>
#define PCA954X_MAX_NCHANS 8
@@ -85,7 +85,9 @@ struct pca954x {
const struct chip_desc *chip;
u8 last_chan; /* last register value */
- u8 deselect;
+ /* MUX_IDLE_AS_IS, MUX_IDLE_DISCONNECT or >= 0 for channel */
+ s8 idle_state;
+
struct i2c_client *client;
struct irq_domain *irq;
@@ -254,15 +256,71 @@ static int pca954x_deselect_mux(struct i2c_mux_core *muxc, u32 chan)
{
struct pca954x *data = i2c_mux_priv(muxc);
struct i2c_client *client = data->client;
+ s8 idle_state;
+
+ idle_state = READ_ONCE(data->idle_state);
+ if (idle_state >= 0)
+ /* Set the mux back to a predetermined channel */
+ return pca954x_select_chan(muxc, idle_state);
+
+ if (idle_state == MUX_IDLE_DISCONNECT) {
+ /* Deselect active channel */
+ data->last_chan = 0;
+ return pca954x_reg_write(muxc->parent, client,
+ data->last_chan);
+ }
- if (!(data->deselect & (1 << chan)))
- return 0;
+ /* otherwise leave as-is */
- /* Deselect active channel */
- data->last_chan = 0;
- return pca954x_reg_write(muxc->parent, client, data->last_chan);
+ return 0;
+}
+
+static ssize_t idle_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_mux_core *muxc = i2c_get_clientdata(client);
+ struct pca954x *data = i2c_mux_priv(muxc);
+
+ return sprintf(buf, "%d\n", READ_ONCE(data->idle_state));
}
+static ssize_t idle_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_mux_core *muxc = i2c_get_clientdata(client);
+ struct pca954x *data = i2c_mux_priv(muxc);
+ int val;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val != MUX_IDLE_AS_IS && val != MUX_IDLE_DISCONNECT &&
+ (val < 0 || val >= data->chip->nchans))
+ return -EINVAL;
+
+ i2c_lock_bus(muxc->parent, I2C_LOCK_SEGMENT);
+
+ WRITE_ONCE(data->idle_state, val);
+ /*
+ * Set the mux into a state consistent with the new
+ * idle_state.
+ */
+ if (data->last_chan || val != MUX_IDLE_DISCONNECT)
+ ret = pca954x_deselect_mux(muxc, 0);
+
+ i2c_unlock_bus(muxc->parent, I2C_LOCK_SEGMENT);
+
+ return ret < 0 ? ret : count;
+}
+
+static DEVICE_ATTR_RW(idle_state);
+
static irqreturn_t pca954x_irq_handler(int irq, void *dev_id)
{
struct pca954x *data = dev_id;
@@ -329,8 +387,11 @@ static int pca954x_irq_setup(struct i2c_mux_core *muxc)
static void pca954x_cleanup(struct i2c_mux_core *muxc)
{
struct pca954x *data = i2c_mux_priv(muxc);
+ struct i2c_client *client = data->client;
int c, irq;
+ device_remove_file(&client->dev, &dev_attr_idle_state);
+
if (data->irq) {
for (c = 0; c < data->chip->nchans; c++) {
irq = irq_find_mapping(data->irq, c);
@@ -348,14 +409,13 @@ static int pca954x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adap = client->adapter;
- struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev);
struct device *dev = &client->dev;
struct device_node *np = dev->of_node;
bool idle_disconnect_dt;
struct gpio_desc *gpio;
- int num, force, class;
struct i2c_mux_core *muxc;
struct pca954x *data;
+ int num;
int ret;
if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE))
@@ -412,9 +472,12 @@ static int pca954x_probe(struct i2c_client *client,
}
data->last_chan = 0; /* force the first selection */
+ data->idle_state = MUX_IDLE_AS_IS;
idle_disconnect_dt = np &&
of_property_read_bool(np, "i2c-mux-idle-disconnect");
+ if (idle_disconnect_dt)
+ data->idle_state = MUX_IDLE_DISCONNECT;
ret = pca954x_irq_setup(muxc);
if (ret)
@@ -422,24 +485,7 @@ static int pca954x_probe(struct i2c_client *client,
/* Now create an adapter for each channel */
for (num = 0; num < data->chip->nchans; num++) {
- bool idle_disconnect_pd = false;
-
- force = 0; /* dynamic adap number */
- class = 0; /* no class by default */
- if (pdata) {
- if (num < pdata->num_modes) {
- /* force static number */
- force = pdata->modes[num].adap_id;
- class = pdata->modes[num].class;
- } else
- /* discard unconfigured channels */
- break;
- idle_disconnect_pd = pdata->modes[num].deselect_on_exit;
- }
- data->deselect |= (idle_disconnect_pd ||
- idle_disconnect_dt) << num;
-
- ret = i2c_mux_add_adapter(muxc, force, num, class);
+ ret = i2c_mux_add_adapter(muxc, 0, num, 0);
if (ret)
goto fail_cleanup;
}
@@ -453,6 +499,12 @@ static int pca954x_probe(struct i2c_client *client,
goto fail_cleanup;
}
+ /*
+ * The attr probably isn't going to be needed in most cases,
+ * so don't fail completely on error.
+ */
+ device_create_file(dev, &dev_attr_idle_state);
+
dev_info(dev, "registered %d multiplexed busses for I2C %s %s\n",
num, data->chip->muxtype == pca954x_ismux
? "mux" : "switch", client->name);
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 1412abcff010..5f4bd52121fe 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -385,8 +385,9 @@ static void i3c_bus_set_addr_slot_status(struct i3c_bus *bus, u16 addr,
return;
ptr = bus->addrslots + (bitpos / BITS_PER_LONG);
- *ptr &= ~(I3C_ADDR_SLOT_STATUS_MASK << (bitpos % BITS_PER_LONG));
- *ptr |= status << (bitpos % BITS_PER_LONG);
+ *ptr &= ~((unsigned long)I3C_ADDR_SLOT_STATUS_MASK <<
+ (bitpos % BITS_PER_LONG));
+ *ptr |= (unsigned long)status << (bitpos % BITS_PER_LONG);
}
static bool i3c_bus_dev_addr_is_avail(struct i3c_bus *bus, u8 addr)
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index 10c26ffaa8ef..1d83c97431c7 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -841,11 +841,6 @@ static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
return -ENOTSUPP;
for (i = 0; i < i3c_nxfers; i++) {
- if (i3c_xfers[i].len > COMMAND_PORT_ARG_DATA_LEN_MAX)
- return -ENOTSUPP;
- }
-
- for (i = 0; i < i3c_nxfers; i++) {
if (i3c_xfers[i].rnw)
nrxwords += DIV_ROUND_UP(i3c_xfers[i].len, 4);
else
@@ -974,11 +969,6 @@ static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
return -ENOTSUPP;
for (i = 0; i < i2c_nxfers; i++) {
- if (i2c_xfers[i].len > COMMAND_PORT_ARG_DATA_LEN_MAX)
- return -ENOTSUPP;
- }
-
- for (i = 0; i < i2c_nxfers; i++) {
if (i2c_xfers[i].flags & I2C_M_RD)
nrxwords += DIV_ROUND_UP(i2c_xfers[i].len, 4);
else
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 1f03884a6808..3b15adc6ce98 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1797,6 +1797,7 @@ static int ide_cd_probe(ide_drive_t *drive)
ide_cd_read_toc(drive);
g->fops = &idecd_ops;
g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ g->events = DISK_EVENT_MEDIA_CHANGE;
device_add_disk(&drive->gendev, g, NULL);
return 0;
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 4a6e1a413ead..46f2df288c6a 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -82,8 +82,9 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
/*
* ide-cd always generates media changed event if media is missing, which
- * makes it impossible to use for proper event reporting, so disk->events
- * is cleared to 0 and the following function is used only to trigger
+ * makes it impossible to use for proper event reporting, so
+ * DISK_EVENT_FLAG_UEVENT is cleared in disk->event_flags
+ * and the following function is used only to trigger
* revalidation and never propagated to userland.
*/
unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi,
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index 04e008e8f6f9..f233b34ea0c0 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -299,8 +299,9 @@ static unsigned int ide_gd_check_events(struct gendisk *disk,
/*
* The following is used to force revalidation on the first open on
* removeable devices, and never gets reported to userland as
- * genhd->events is 0. This is intended as removeable ide disk
- * can't really detect MEDIA_CHANGE events.
+ * DISK_EVENT_FLAG_UEVENT isn't set in genhd->event_flags.
+ * This is intended as removable ide disk can't really detect
+ * MEDIA_CHANGE events.
*/
ret = drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED;
drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
@@ -416,6 +417,7 @@ static int ide_gd_probe(ide_drive_t *drive)
if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
g->flags = GENHD_FL_REMOVABLE;
g->fops = &ide_gd_ops;
+ g->events = DISK_EVENT_MEDIA_CHANGE;
device_add_disk(&drive->gendev, g, NULL);
return 0;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 5aeaca24a28f..4ad824984581 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1437,6 +1437,9 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
ide_hwif_t *hwif, *mate = NULL;
int i, j = 0;
+ pr_warn("legacy IDE will be removed in 2021, please switch to libata\n"
+ "Report any missing HW support to linux-ide@vger.kernel.org\n");
+
ide_host_for_each_port(i, hwif, host) {
if (hwif == NULL) {
mate = NULL;
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index 67d4a7d4acc8..88d132edc4e3 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -156,7 +156,6 @@ static u16 tx4939ide_check_error_ints(ide_hwif_t *hwif)
u16 sysctl = tx4939ide_readw(base, TX4939IDE_Sys_Ctl);
tx4939ide_writew(sysctl | 0x4000, base, TX4939IDE_Sys_Ctl);
- mmiowb();
/* wait 12GBUSCLK (typ. 60ns @ GBUS200MHz, max 270ns) */
ndelay(270);
tx4939ide_writew(sysctl, base, TX4939IDE_Sys_Ctl);
@@ -396,7 +395,6 @@ static void tx4939ide_init_hwif(ide_hwif_t *hwif)
/* Soft Reset */
tx4939ide_writew(0x8000, base, TX4939IDE_Sys_Ctl);
- mmiowb();
/* at least 20 GBUSCLK (typ. 100ns @ GBUS200MHz, max 450ns) */
ndelay(450);
tx4939ide_writew(0x0000, base, TX4939IDE_Sys_Ctl);
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index d08aeb41cd07..a22cbee593fe 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -4,7 +4,6 @@
menuconfig IIO
tristate "Industrial I/O support"
- select ANON_INODES
help
The industrial I/O subsystem provides a unified framework for
drivers for many different types of embedded sensors using a
@@ -39,28 +38,28 @@ config IIO_TRIGGER
data now' interrupt.
config IIO_CONSUMERS_PER_TRIGGER
- int "Maximum number of consumers per trigger"
- depends on IIO_TRIGGER
- default "2"
- help
- This value controls the maximum number of consumers that a
- given trigger may handle. Default is 2.
+ int "Maximum number of consumers per trigger"
+ depends on IIO_TRIGGER
+ default "2"
+ help
+ This value controls the maximum number of consumers that a
+ given trigger may handle. Default is 2.
config IIO_SW_DEVICE
tristate "Enable software IIO device support"
select IIO_CONFIGFS
help
- Provides IIO core support for software devices. A software
- device can be created via configfs or directly by a driver
- using the API provided.
+ Provides IIO core support for software devices. A software
+ device can be created via configfs or directly by a driver
+ using the API provided.
config IIO_SW_TRIGGER
tristate "Enable software triggers support"
select IIO_CONFIGFS
help
- Provides IIO core support for software triggers. A software
- trigger can be created via configfs or directly by a driver
- using the API provided.
+ Provides IIO core support for software triggers. A software
+ trigger can be created via configfs or directly by a driver
+ using the API provided.
config IIO_TRIGGERED_EVENT
tristate
@@ -74,7 +73,6 @@ source "drivers/iio/afe/Kconfig"
source "drivers/iio/amplifiers/Kconfig"
source "drivers/iio/chemical/Kconfig"
source "drivers/iio/common/Kconfig"
-source "drivers/iio/counter/Kconfig"
source "drivers/iio/dac/Kconfig"
source "drivers/iio/dummy/Kconfig"
source "drivers/iio/frequency/Kconfig"
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index cb5993251381..bff682ad1cfb 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -20,7 +20,6 @@ obj-y += amplifiers/
obj-y += buffer/
obj-y += chemical/
obj-y += common/
-obj-y += counter/
obj-y += dac/
obj-y += dummy/
obj-y += gyro/
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 898839ca164a..62a970a20219 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -6,28 +6,28 @@
menu "Accelerometers"
config ADIS16201
- tristate "Analog Devices ADIS16201 Dual-Axis Digital Inclinometer and Accelerometer"
- depends on SPI
- select IIO_ADIS_LIB
- select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
- help
- Say Y here to build support for Analog Devices adis16201 dual-axis
- digital inclinometer and accelerometer.
+ tristate "Analog Devices ADIS16201 Dual-Axis Digital Inclinometer and Accelerometer"
+ depends on SPI
+ select IIO_ADIS_LIB
+ select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+ help
+ Say Y here to build support for Analog Devices adis16201 dual-axis
+ digital inclinometer and accelerometer.
- To compile this driver as a module, say M here: the module will
- be called adis16201.
+ To compile this driver as a module, say M here: the module will
+ be called adis16201.
config ADIS16209
- tristate "Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer"
- depends on SPI
- select IIO_ADIS_LIB
- select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
- help
- Say Y here to build support for Analog Devices adis16209 dual-axis digital inclinometer
- and accelerometer.
+ tristate "Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer"
+ depends on SPI
+ select IIO_ADIS_LIB
+ select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+ help
+ Say Y here to build support for Analog Devices adis16209 dual-axis digital inclinometer
+ and accelerometer.
- To compile this driver as a module, say M here: the module will be
- called adis16209.
+ To compile this driver as a module, say M here: the module will be
+ called adis16209.
config ADXL345
tristate
@@ -100,16 +100,16 @@ config BMA180
module will be called bma180.
config BMA220
- tristate "Bosch BMA220 3-Axis Accelerometer Driver"
+ tristate "Bosch BMA220 3-Axis Accelerometer Driver"
depends on SPI
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
- help
- Say yes here to add support for the Bosch BMA220 triaxial
- acceleration sensor.
+ help
+ Say yes here to add support for the Bosch BMA220 triaxial
+ acceleration sensor.
- To compile this driver as a module, choose M here: the
- module will be called bma220_spi.
+ To compile this driver as a module, choose M here: the
+ module will be called bma220_spi.
config BMC150_ACCEL
tristate "Bosch BMC150 Accelerometer Driver"
@@ -223,7 +223,7 @@ config IIO_ST_ACCEL_3AXIS
Say yes here to build support for STMicroelectronics accelerometers:
LSM303DLH, LSM303DLHC, LIS3DH, LSM330D, LSM330DL, LSM330DLC,
LIS331DLH, LSM303DL, LSM303DLM, LSM330, LIS2DH12, H3LIS331DL,
- LNG2DM, LIS3DE
+ LNG2DM, LIS3DE, LIS2DE12
This driver can also be built as a module. If so, these modules
will be created:
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index cb9765a3de60..f9720a1e8a7c 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -116,6 +116,7 @@ struct bma180_data {
struct i2c_client *client;
struct iio_trigger *trig;
const struct bma180_part_info *part_info;
+ struct iio_mount_matrix orientation;
struct mutex mutex;
bool sleep_state;
int scale;
@@ -561,6 +562,15 @@ static int bma180_set_power_mode(struct iio_dev *indio_dev,
return ret;
}
+static const struct iio_mount_matrix *
+bma180_accel_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct bma180_data *data = iio_priv(indio_dev);
+
+ return &data->orientation;
+}
+
static const struct iio_enum bma180_power_mode_enum = {
.items = bma180_power_modes,
.num_items = ARRAY_SIZE(bma180_power_modes),
@@ -571,7 +581,8 @@ static const struct iio_enum bma180_power_mode_enum = {
static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
IIO_ENUM("power_mode", true, &bma180_power_mode_enum),
IIO_ENUM_AVAILABLE("power_mode", &bma180_power_mode_enum),
- { },
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, bma180_accel_get_mount_matrix),
+ { }
};
#define BMA180_ACC_CHANNEL(_axis, _bits) { \
@@ -722,6 +733,11 @@ static int bma180_probe(struct i2c_client *client,
chip = id->driver_data;
data->part_info = &bma180_part_info[chip];
+ ret = iio_read_mount_matrix(&client->dev, "mount-matrix",
+ &data->orientation);
+ if (ret)
+ return ret;
+
ret = data->part_info->chip_config(data);
if (ret < 0)
goto err_chip_disable;
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 383c802eb5b8..44d7c49fe2a1 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -204,6 +204,7 @@ struct bmc150_accel_data {
int ev_enable_state;
int64_t timestamp, old_timestamp; /* Only used in hw fifo mode. */
const struct bmc150_accel_chip_info *chip_info;
+ struct iio_mount_matrix orientation;
};
static const struct {
@@ -393,7 +394,7 @@ static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
if (ret < 0) {
dev_err(dev,
- "Failed: bmc150_accel_set_power_state for %d\n", on);
+ "Failed: %s for %d\n", __func__, on);
if (on)
pm_runtime_put_noidle(dev);
@@ -796,6 +797,20 @@ static ssize_t bmc150_accel_get_fifo_state(struct device *dev,
return sprintf(buf, "%d\n", state);
}
+static const struct iio_mount_matrix *
+bmc150_accel_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct bmc150_accel_data *data = iio_priv(indio_dev);
+
+ return &data->orientation;
+}
+
+static const struct iio_chan_spec_ext_info bmc150_accel_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, bmc150_accel_get_mount_matrix),
+ { }
+};
+
static IIO_CONST_ATTR(hwfifo_watermark_min, "1");
static IIO_CONST_ATTR(hwfifo_watermark_max,
__stringify(BMC150_ACCEL_FIFO_LENGTH));
@@ -978,6 +993,7 @@ static const struct iio_event_spec bmc150_accel_event = {
.shift = 16 - (bits), \
.endianness = IIO_LE, \
}, \
+ .ext_info = bmc150_accel_ext_info, \
.event_spec = &bmc150_accel_event, \
.num_event_specs = 1 \
}
@@ -1555,6 +1571,11 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
data->regmap = regmap;
+ ret = iio_read_mount_matrix(dev, "mount-matrix",
+ &data->orientation);
+ if (ret)
+ return ret;
+
ret = bmc150_accel_chip_init(data);
if (ret < 0)
return ret;
diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
index 063e89eff791..46bb2e421bb9 100644
--- a/drivers/iio/accel/cros_ec_accel_legacy.c
+++ b/drivers/iio/accel/cros_ec_accel_legacy.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for older Chrome OS EC accelerometer
*
* Copyright 2017 Google, Inc
*
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* This driver uses the memory mapper cros-ec interface to communicate
* with the Chrome OS EC about accelerometer data.
* Accelerometer access is presented through iio sysfs.
@@ -29,7 +21,6 @@
#include <linux/mfd/cros_ec_commands.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
#include <linux/platform_device.h>
#define DRV_NAME "cros-ec-accel-legacy"
@@ -353,7 +344,7 @@ static int cros_ec_accel_legacy_probe(struct platform_device *pdev)
struct cros_ec_sensor_platform *sensor_platform = dev_get_platdata(dev);
struct iio_dev *indio_dev;
struct cros_ec_accel_legacy_state *state;
- int ret, i;
+ int ret;
if (!ec || !ec->ec_dev) {
dev_warn(&pdev->dev, "No EC device found.\n");
@@ -381,20 +372,17 @@ static int cros_ec_accel_legacy_probe(struct platform_device *pdev)
* Present the channel using HTML5 standard:
* need to invert X and Y and invert some lid axis.
*/
- for (i = X ; i < MAX_AXIS; i++) {
- switch (i) {
- case X:
- ec_accel_channels[X].scan_index = Y;
- case Y:
- ec_accel_channels[Y].scan_index = X;
- case Z:
- ec_accel_channels[Z].scan_index = Z;
- }
- if (state->sensor_num == MOTIONSENSE_LOC_LID && i != Y)
- state->sign[i] = -1;
- else
- state->sign[i] = 1;
- }
+ ec_accel_channels[X].scan_index = Y;
+ ec_accel_channels[Y].scan_index = X;
+ ec_accel_channels[Z].scan_index = Z;
+
+ state->sign[Y] = 1;
+
+ if (state->sensor_num == MOTIONSENSE_LOC_LID)
+ state->sign[X] = state->sign[Z] = -1;
+ else
+ state->sign[X] = state->sign[Z] = 1;
+
indio_dev->num_channels = ARRAY_SIZE(ec_accel_channels);
indio_dev->dev.parent = &pdev->dev;
indio_dev->info = &cros_ec_accel_legacy_info;
@@ -419,5 +407,5 @@ module_platform_driver(cros_ec_accel_platform_driver);
MODULE_DESCRIPTION("ChromeOS EC legacy accelerometer driver");
MODULE_AUTHOR("Gwendal Grignou <gwendal@chromium.org>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 50f3ff386bea..2922a2e88a1b 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -451,7 +451,7 @@ static int kxcjk1013_set_power_state(struct kxcjk1013_data *data, bool on)
}
if (ret < 0) {
dev_err(&data->client->dev,
- "Failed: kxcjk1013_set_power_state for %d\n", on);
+ "Failed: %s for %d\n", __func__, on);
if (on)
pm_runtime_put_noidle(&data->client->dev);
return ret;
@@ -1491,6 +1491,7 @@ static const struct acpi_device_id kx_acpi_match[] = {
{"KXCJ1013", KXCJK1013},
{"KXCJ1008", KXCJ91008},
{"KXCJ9000", KXCJ91008},
+ {"KIOX0008", KXCJ91008},
{"KIOX0009", KXTJ21009},
{"KIOX000A", KXCJ91008},
{"KIOX010A", KXCJ91008}, /* KXCJ91008 inside the display of a 2-in-1 */
@@ -1512,10 +1513,20 @@ static const struct i2c_device_id kxcjk1013_id[] = {
MODULE_DEVICE_TABLE(i2c, kxcjk1013_id);
+static const struct of_device_id kxcjk1013_of_match[] = {
+ { .compatible = "kionix,kxcjk1013", },
+ { .compatible = "kionix,kxcj91008", },
+ { .compatible = "kionix,kxtj21009", },
+ { .compatible = "kionix,kxtf9", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, kxcjk1013_of_match);
+
static struct i2c_driver kxcjk1013_driver = {
.driver = {
.name = KXCJK1013_DRV_NAME,
.acpi_match_table = ACPI_PTR(kx_acpi_match),
+ .of_match_table = kxcjk1013_of_match,
.pm = &kxcjk1013_pm_ops,
},
.probe = kxcjk1013_probe,
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index 0c0df4fce420..70c60db62247 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -420,9 +420,7 @@ int kxsd9_common_probe(struct device *dev,
indio_dev->available_scan_masks = kxsd9_scan_masks;
/* Read the mounting matrix, if present */
- ret = of_iio_read_mount_matrix(dev,
- "mount-matrix",
- &st->orientation);
+ ret = iio_read_mount_matrix(dev, "mount-matrix", &st->orientation);
if (ret)
return ret;
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 302781126bc6..00e100fc845a 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1580,7 +1580,7 @@ static int mma8452_probe(struct i2c_client *client,
case FXLS8471_DEVICE_ID:
if (ret == data->chip_info->chip_id)
break;
- /* else: fall through */
+ /* fall through */
default:
ret = -ENODEV;
goto disable_regulators;
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index fd53258656ca..9d25955af58e 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -34,6 +34,7 @@ enum st_accel_type {
LIS3LV02DL,
LIS2DW12,
LIS3DHH,
+ LIS2DE12,
ST_ACCEL_MAX,
};
@@ -57,6 +58,7 @@ enum st_accel_type {
#define LIS2DW12_ACCEL_DEV_NAME "lis2dw12"
#define LIS3DHH_ACCEL_DEV_NAME "lis3dhh"
#define LIS3DE_ACCEL_DEV_NAME "lis3de"
+#define LIS2DE12_ACCEL_DEV_NAME "lis2de12"
/**
* struct st_sensors_platform_data - default accel platform data
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index a3c0916479fa..5ff04d9755d5 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -831,6 +831,82 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.multi_read_bit = false,
.bootime = 2,
},
+ {
+ .wai = 0x33,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = LIS2DE12_ACCEL_DEV_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_8bit_channels,
+ .odr = {
+ .addr = 0x20,
+ .mask = 0xf0,
+ .odr_avl = {
+ { .hz = 1, .value = 0x01, },
+ { .hz = 10, .value = 0x02, },
+ { .hz = 25, .value = 0x03, },
+ { .hz = 50, .value = 0x04, },
+ { .hz = 100, .value = 0x05, },
+ { .hz = 200, .value = 0x06, },
+ { .hz = 400, .value = 0x07, },
+ { .hz = 1620, .value = 0x08, },
+ { .hz = 5376, .value = 0x09, },
+ },
+ },
+ .pw = {
+ .addr = 0x20,
+ .mask = 0xf0,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = 0x23,
+ .mask = 0x30,
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_2G,
+ .value = 0x00,
+ .gain = IIO_G_TO_M_S_2(15600),
+ },
+ [1] = {
+ .num = ST_ACCEL_FS_AVL_4G,
+ .value = 0x01,
+ .gain = IIO_G_TO_M_S_2(31200),
+ },
+ [2] = {
+ .num = ST_ACCEL_FS_AVL_8G,
+ .value = 0x02,
+ .gain = IIO_G_TO_M_S_2(62500),
+ },
+ [3] = {
+ .num = ST_ACCEL_FS_AVL_16G,
+ .value = 0x03,
+ .gain = IIO_G_TO_M_S_2(187500),
+ },
+ },
+ },
+ .drdy_irq = {
+ .int1 = {
+ .addr = 0x22,
+ .mask = 0x10,
+ },
+ .addr_ihl = 0x25,
+ .mask_ihl = 0x02,
+ .stat_drdy = {
+ .addr = ST_SENSORS_DEFAULT_STAT_ADDR,
+ .mask = 0x07,
+ },
+ },
+ .sim = {
+ .addr = 0x23,
+ .value = BIT(0),
+ },
+ .multi_read_bit = true,
+ .bootime = 2,
+ },
};
static int st_accel_read_raw(struct iio_dev *indio_dev,
@@ -992,7 +1068,7 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev,
goto out;
val = elements[i].integer.value;
- if (val < 0 || val > 2)
+ if (val > 2)
goto out;
/* Avoiding full matrix multiplication, we simply reorder the
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index de8ae4327094..4e54477ccf24 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -102,6 +102,10 @@ static const struct of_device_id st_accel_of_match[] = {
.compatible = "st,lis3de",
.data = LIS3DE_ACCEL_DEV_NAME,
},
+ {
+ .compatible = "st,lis2de12",
+ .data = LIS2DE12_ACCEL_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -140,6 +144,7 @@ static const struct i2c_device_id st_accel_id_table[] = {
{ LIS3LV02DL_ACCEL_DEV_NAME },
{ LIS2DW12_ACCEL_DEV_NAME },
{ LIS3DE_ACCEL_DEV_NAME },
+ { LIS2DE12_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 76db6e5cc296..2036eca546fd 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -124,6 +124,18 @@ config AD7768_1
To compile this driver as a module, choose M here: the module will be
called ad7768-1.
+config AD7780
+ tristate "Analog Devices AD7780 and similar ADCs driver"
+ depends on SPI
+ depends on GPIOLIB || COMPILE_TEST
+ select AD_SIGMA_DELTA
+ help
+ Say yes here to build support for Analog Devices AD7170, AD7171,
+ AD7780 and AD7781 SPI analog to digital converters (ADC).
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7780.
+
config AD7791
tristate "Analog Devices AD7791 ADC driver"
depends on SPI
@@ -390,7 +402,7 @@ config HX711
This driver uses two GPIOs, one acts as the clock and controls the
channel selection and gain, the other one is used for the measurement
- data
+ data
Currently the raw value is read from the chip and delivered.
To get an actual weight one needs to subtract the
@@ -541,7 +553,7 @@ config MAX1363
To compile this driver as a module, choose M here: the module will be
called max1363.
-config MAX9611
+config MAX9611
tristate "Maxim max9611/max9612 ADC driver"
depends on I2C
help
@@ -585,17 +597,17 @@ config MCP3911
called mcp3911.
config MEDIATEK_MT6577_AUXADC
- tristate "MediaTek AUXADC driver"
- depends on ARCH_MEDIATEK || COMPILE_TEST
- depends on HAS_IOMEM
- help
- Say yes here to enable support for MediaTek mt65xx AUXADC.
+ tristate "MediaTek AUXADC driver"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Say yes here to enable support for MediaTek mt65xx AUXADC.
- The driver supports immediate mode operation to read from one of sixteen
- channels (external or internal).
+ The driver supports immediate mode operation to read from one of sixteen
+ channels (external or internal).
- This driver can also be built as a module. If so, the module will be
- called mt6577_auxadc.
+ This driver can also be built as a module. If so, the module will be
+ called mt6577_auxadc.
config MEN_Z188_ADC
tristate "MEN 16z188 ADC IP Core support"
@@ -809,7 +821,9 @@ config STM32_DFSDM_ADC
depends on (ARCH_STM32 && OF) || COMPILE_TEST
select STM32_DFSDM_CORE
select REGMAP_MMIO
+ select IIO_BUFFER
select IIO_BUFFER_HW_CONSUMER
+ select IIO_TRIGGERED_BUFFER
help
Select this option to support ADCSigma delta modulator for
STMicroelectronics STM32 digital filter for sigma delta converter.
@@ -956,7 +970,7 @@ config TI_ADS1015
config TI_ADS7950
tristate "Texas Instruments ADS7950 ADC driver"
- depends on SPI
+ depends on SPI && GPIOLIB
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
@@ -967,6 +981,16 @@ config TI_ADS7950
To compile this driver as a module, choose M here: the
module will be called ti-ads7950.
+config TI_ADS8344
+ tristate "Texas Instruments ADS8344"
+ depends on SPI && OF
+ help
+ If you say yes here you get support for Texas Instruments ADS8344
+ ADC chips
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-ads8344.
+
config TI_ADS8688
tristate "Texas Instruments ADS8688"
depends on SPI && OF
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 6fcebd167524..ef9cc485fb67 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_AD7606_IFACE_SPI) += ad7606_spi.o
obj-$(CONFIG_AD7606) += ad7606.o
obj-$(CONFIG_AD7766) += ad7766.o
obj-$(CONFIG_AD7768_1) += ad7768-1.o
+obj-$(CONFIG_AD7780) += ad7780.o
obj-$(CONFIG_AD7791) += ad7791.o
obj-$(CONFIG_AD7793) += ad7793.o
obj-$(CONFIG_AD7887) += ad7887.o
@@ -87,6 +88,7 @@ obj-$(CONFIG_TI_ADC128S052) += ti-adc128s052.o
obj-$(CONFIG_TI_ADC161S626) += ti-adc161s626.o
obj-$(CONFIG_TI_ADS1015) += ti-ads1015.o
obj-$(CONFIG_TI_ADS7950) += ti-ads7950.o
+obj-$(CONFIG_TI_ADS8344) += ti-ads8344.o
obj-$(CONFIG_TI_ADS8688) += ti-ads8688.o
obj-$(CONFIG_TI_ADS124S08) += ti-ads124s08.o
obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index 7d5e5311d8de..659ef37d5fe8 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -411,7 +411,7 @@ static int ad7124_init_channel_vref(struct ad7124_state *st,
dev_err(&st->sd.spi->dev,
"Error, trying to use external voltage reference without a %s regulator.\n",
ad7124_ref_names[refsel]);
- return PTR_ERR(st->vref[refsel]);
+ return PTR_ERR(st->vref[refsel]);
}
st->channel_config[channel_number].vref_mv =
regulator_get_voltage(st->vref[refsel]);
diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
index ebb8de03bbce..24c70c3cefb4 100644
--- a/drivers/iio/adc/ad7606.c
+++ b/drivers/iio/adc/ad7606.c
@@ -31,7 +31,7 @@
* Scales are computed as 5000/32768 and 10000/32768 respectively,
* so that when applied to the raw values they provide mV values
*/
-static const unsigned int scale_avail[2] = {
+static const unsigned int ad7606_scale_avail[2] = {
152588, 305176
};
@@ -39,6 +39,10 @@ static const unsigned int ad7606_oversampling_avail[7] = {
1, 2, 4, 8, 16, 32, 64,
};
+static const unsigned int ad7616_oversampling_avail[8] = {
+ 1, 2, 4, 8, 16, 32, 64, 128,
+};
+
static int ad7606_reset(struct ad7606_state *st)
{
if (st->gpio_reset) {
@@ -154,7 +158,7 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 0;
- *val2 = scale_avail[st->range];
+ *val2 = st->scale_avail[st->range];
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
*val = st->oversampling;
@@ -163,21 +167,31 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
-static ssize_t in_voltage_scale_available_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t ad7606_show_avail(char *buf, const unsigned int *vals,
+ unsigned int n, bool micros)
{
- int i, len = 0;
-
- for (i = 0; i < ARRAY_SIZE(scale_avail); i++)
- len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
- scale_avail[i]);
+ size_t len = 0;
+ int i;
+ for (i = 0; i < n; i++) {
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ micros ? "0.%06u " : "%u ", vals[i]);
+ }
buf[len - 1] = '\n';
return len;
}
+static ssize_t in_voltage_scale_available_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ad7606_state *st = iio_priv(indio_dev);
+
+ return ad7606_show_avail(buf, st->scale_avail, st->num_scales, true);
+}
+
static IIO_DEVICE_ATTR_RO(in_voltage_scale_available, 0);
static int ad7606_write_raw(struct iio_dev *indio_dev,
@@ -193,7 +207,7 @@ static int ad7606_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_SCALE:
mutex_lock(&st->lock);
- i = find_closest(val2, scale_avail, ARRAY_SIZE(scale_avail));
+ i = find_closest(val2, st->scale_avail, st->num_scales);
gpiod_set_value(st->gpio_range, i);
st->range = i;
mutex_unlock(&st->lock);
@@ -202,15 +216,20 @@ static int ad7606_write_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
if (val2)
return -EINVAL;
- i = find_closest(val, ad7606_oversampling_avail,
- ARRAY_SIZE(ad7606_oversampling_avail));
+ i = find_closest(val, st->oversampling_avail,
+ st->num_os_ratios);
values[0] = i;
mutex_lock(&st->lock);
gpiod_set_array_value(ARRAY_SIZE(values), st->gpio_os->desc,
st->gpio_os->info, values);
- st->oversampling = ad7606_oversampling_avail[i];
+
+ /* AD7616 requires a reset to update value */
+ if (st->chip_info->os_req_reset)
+ ad7606_reset(st);
+
+ st->oversampling = st->oversampling_avail[i];
mutex_unlock(&st->lock);
return 0;
@@ -219,11 +238,23 @@ static int ad7606_write_raw(struct iio_dev *indio_dev,
}
}
-static IIO_CONST_ATTR(oversampling_ratio_available, "1 2 4 8 16 32 64");
+static ssize_t ad7606_oversampling_ratio_avail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct ad7606_state *st = iio_priv(indio_dev);
+
+ return ad7606_show_avail(buf, st->oversampling_avail,
+ st->num_os_ratios, false);
+}
+
+static IIO_DEVICE_ATTR(oversampling_ratio_available, 0444,
+ ad7606_oversampling_ratio_avail, NULL, 0);
static struct attribute *ad7606_attributes_os_and_range[] = {
&iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
- &iio_const_attr_oversampling_ratio_available.dev_attr.attr,
+ &iio_dev_attr_oversampling_ratio_available.dev_attr.attr,
NULL,
};
@@ -232,7 +263,7 @@ static const struct attribute_group ad7606_attribute_group_os_and_range = {
};
static struct attribute *ad7606_attributes_os[] = {
- &iio_const_attr_oversampling_ratio_available.dev_attr.attr,
+ &iio_dev_attr_oversampling_ratio_available.dev_attr.attr,
NULL,
};
@@ -292,6 +323,36 @@ static const struct iio_chan_spec ad7606_channels[] = {
AD7606_CHANNEL(7),
};
+/*
+ * The current assumption that this driver makes for AD7616, is that it's
+ * working in Hardware Mode with Serial, Burst and Sequencer modes activated.
+ * To activate them, following pins must be pulled high:
+ * -SER/PAR
+ * -SEQEN
+ * And following pins must be pulled low:
+ * -WR/BURST
+ * -DB4/SER1W
+ */
+static const struct iio_chan_spec ad7616_channels[] = {
+ IIO_CHAN_SOFT_TIMESTAMP(16),
+ AD7606_CHANNEL(0),
+ AD7606_CHANNEL(1),
+ AD7606_CHANNEL(2),
+ AD7606_CHANNEL(3),
+ AD7606_CHANNEL(4),
+ AD7606_CHANNEL(5),
+ AD7606_CHANNEL(6),
+ AD7606_CHANNEL(7),
+ AD7606_CHANNEL(8),
+ AD7606_CHANNEL(9),
+ AD7606_CHANNEL(10),
+ AD7606_CHANNEL(11),
+ AD7606_CHANNEL(12),
+ AD7606_CHANNEL(13),
+ AD7606_CHANNEL(14),
+ AD7606_CHANNEL(15),
+};
+
static const struct ad7606_chip_info ad7606_chip_info_tbl[] = {
/* More devices added in future */
[ID_AD7605_4] = {
@@ -301,17 +362,27 @@ static const struct ad7606_chip_info ad7606_chip_info_tbl[] = {
[ID_AD7606_8] = {
.channels = ad7606_channels,
.num_channels = 9,
- .has_oversampling = true,
+ .oversampling_avail = ad7606_oversampling_avail,
+ .oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
},
[ID_AD7606_6] = {
.channels = ad7606_channels,
.num_channels = 7,
- .has_oversampling = true,
+ .oversampling_avail = ad7606_oversampling_avail,
+ .oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
},
[ID_AD7606_4] = {
.channels = ad7606_channels,
.num_channels = 5,
- .has_oversampling = true,
+ .oversampling_avail = ad7606_oversampling_avail,
+ .oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
+ },
+ [ID_AD7616] = {
+ .channels = ad7616_channels,
+ .num_channels = 17,
+ .oversampling_avail = ad7616_oversampling_avail,
+ .oversampling_num = ARRAY_SIZE(ad7616_oversampling_avail),
+ .os_req_reset = true,
},
};
@@ -343,7 +414,7 @@ static int ad7606_request_gpios(struct ad7606_state *st)
if (IS_ERR(st->gpio_frstdata))
return PTR_ERR(st->gpio_frstdata);
- if (!st->chip_info->has_oversampling)
+ if (!st->chip_info->oversampling_num)
return 0;
st->gpio_os = devm_gpiod_get_array_optional(dev,
@@ -467,6 +538,8 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
/* tied to logic low, analog input range is +/- 5V */
st->range = 0;
st->oversampling = 1;
+ st->scale_avail = ad7606_scale_avail;
+ st->num_scales = ARRAY_SIZE(ad7606_scale_avail);
st->reg = devm_regulator_get(dev, "avcc");
if (IS_ERR(st->reg))
@@ -484,6 +557,11 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
st->chip_info = &ad7606_chip_info_tbl[id];
+ if (st->chip_info->oversampling_num) {
+ st->oversampling_avail = st->chip_info->oversampling_avail;
+ st->num_os_ratios = st->chip_info->oversampling_num;
+ }
+
ret = ad7606_request_gpios(st);
if (ret)
return ret;
diff --git a/drivers/iio/adc/ad7606.h b/drivers/iio/adc/ad7606.h
index 5d12410f68e1..f9ef52131e74 100644
--- a/drivers/iio/adc/ad7606.h
+++ b/drivers/iio/adc/ad7606.h
@@ -12,12 +12,17 @@
* struct ad7606_chip_info - chip specific information
* @channels: channel specification
* @num_channels: number of channels
- * @has_oversampling: whether the device has oversampling support
+ * @oversampling_avail pointer to the array which stores the available
+ * oversampling ratios.
+ * @oversampling_num number of elements stored in oversampling_avail array
+ * @os_req_reset some devices require a reset to update oversampling
*/
struct ad7606_chip_info {
const struct iio_chan_spec *channels;
unsigned int num_channels;
- bool has_oversampling;
+ const unsigned int *oversampling_avail;
+ unsigned int oversampling_num;
+ bool os_req_reset;
};
/**
@@ -29,6 +34,11 @@ struct ad7606_chip_info {
* @range voltage range selection, selects which scale to apply
* @oversampling oversampling selection
* @base_address address from where to read data in parallel operation
+ * @scale_avail pointer to the array which stores the available scales
+ * @num_scales number of elements stored in the scale_avail array
+ * @oversampling_avail pointer to the array which stores the available
+ * oversampling ratios.
+ * @num_os_ratios number of elements stored in oversampling_avail array
* @lock protect sensor state from concurrent accesses to GPIOs
* @gpio_convst GPIO descriptor for conversion start signal (CONVST)
* @gpio_reset GPIO descriptor for device hard-reset
@@ -50,6 +60,10 @@ struct ad7606_state {
unsigned int range;
unsigned int oversampling;
void __iomem *base_address;
+ const unsigned int *scale_avail;
+ unsigned int num_scales;
+ const unsigned int *oversampling_avail;
+ unsigned int num_os_ratios;
struct mutex lock; /* protect sensor state */
struct gpio_desc *gpio_convst;
@@ -64,9 +78,9 @@ struct ad7606_state {
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
- * 8 * 16-bit samples + 64-bit timestamp
+ * 16 * 16-bit samples + 64-bit timestamp
*/
- unsigned short data[12] ____cacheline_aligned;
+ unsigned short data[20] ____cacheline_aligned;